diff --git a/.coin-or/projDesc.xml b/.coin-or/projDesc.xml index dd009f994bf..0c8942ea955 100644 --- a/.coin-or/projDesc.xml +++ b/.coin-or/projDesc.xml @@ -107,7 +107,7 @@ Carl D. Laird, Chair, Pyomo Management Committee, claird at andrew dot cmu dot e license file directly. --> - https://github.com/Pyomo/pyomo/blob/main/LICENSE.txt + https://github.com/Pyomo/pyomo/blob/main/LICENSE.md @@ -227,8 +227,8 @@ Carl D. Laird, Chair, Pyomo Management Committee, claird at andrew dot cmu dot e Use explicit overrides to disable use of automated version reporting. --> - 6.4.2 - 6.4.2 + 6.6.1 + 6.6.1 @@ -287,7 +287,7 @@ Carl D. Laird, Chair, Pyomo Management Committee, claird at andrew dot cmu dot e Any - Python 3.7, 3.8, 3.9, 3.10 + Python 3.7, 3.8, 3.9, 3.10, 3.11 diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..6d3e6401c5b --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,46 @@ +# PURPOSE: This file will ignore the included SHAs when +# running `git blame`. +# USAGE: To use this file, you can do one of the following: +# 1) git blame --ignore-revs-file .git-blame-ignore-revs +# 2) git config blame.ignoreRevsFile .git-blame-ignore-revs + +# Most of these will be ignoring application +# of black to the repository +49fb44e8acb90b3019dbc21241515ae9789b9798 +ce67d33762114de1bd31033b791ba38aca0360a4 +6e8e1e879aa33e8917ab9ecb46371031e39c49ee +01d84aad72719f77270bc5fb15984663cc2692d3 +38d7587c51c518ebf77948de43f1e7e4714b4b70 +3fa9f893fc98e9c48de7652095ab815bb2a51310 +bbc492a8f88a09868ba24ef99b8550cfb700d563 +b991efa2916b8f2b1cdadde0f8bcc848b715db8f +7302ab9b2cbbe0792b7227c1a65298935021b874 +c79e1e94f7ad28f6ddf83c3eec4db1071898b613 +d0f5081da6621e45df1bed1543ebcca2fee7d2cf +bf37b8701ada7517193d9f270cc14235326ff95d +8d69faf286c623f02f48ab3bf0aa76a39bed158c +09687ab6297cb8cc2577030a8751d3b39470ef69 +3acc0f41b699b57d0020fdbe44078bf36631a093 +733289d6f80e2c7ea4d500be20919ffca80c452a +7102497c843e0687c4817b78f3939b1e1fff18d1 +72b2dbfe34e59f60b2668e2b65bc1a439fb50b3e +3d8a9a058a548cee7716fea47d9a0e0712acb52f +ec2101738756789aa82a547013e9a345310409a7 +8f53d6c3d82a42bd2324e3def60f54802f6394c1 +2895da9ccb6248f41dcfe34ff23b9c5243ca2ff1 +5a58691b11b6265af22bf83c4c9a88d6e76ec079 +7c30d21fee6d305f30d3bc21fd7dff067951f662 +d1338fa2e9015f59a0505c1561d8e12924187fd1 +6ef8840eeb88978fa244c4837057e7a058756dda +597df3cf410d84f9a9f2aa91b905ef02d60d3013 +f74ee5e4488e5df1dc6033226441c26e1d4a6d08 +765b2fd671952bccf7f3ea78821a9267436429e0 +4e811287b5695e92f8b860497f7f3336d10de787 +ab66ddc816fbc7748b8ff7f5ccf29ed248e8f808 +0b7b099f4578250b65cdba874dfd3a491e6007fe +69aaac0180bd4bb2088086d412f31f4a592298dd +ed13c8c65d6c3f56973887744be1c62a5d4756de +0d93f98aa608f892df404ad8015885d26e09bb55 +63a3c602a00a2b747fc308c0571bbe33e55a3731 +363a16a609f519b3edfdfcf40c66d6de7ac135af + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 56271b333d2..66aff69938d 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -4,6 +4,9 @@ about: Suggest adding an enhancement of a current feature or a new feature in Py labels: enhancement --- + + + ## Summary diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f6da4169dc5..4bd8e88bfed 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -1,3 +1,12 @@ + + + + + + + + + ## Fixes # . ## Summary/Motivation: diff --git a/.github/workflows/release_wheel_creation.yml b/.github/workflows/release_wheel_creation.yml index a3a2f6f138b..2c71584c5b8 100644 --- a/.github/workflows/release_wheel_creation.yml +++ b/.github/workflows/release_wheel_creation.yml @@ -10,23 +10,28 @@ on: description: Git Hash (Optional) required: false +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + env: PYOMO_SETUP_ARGS: --with-distributable-extensions jobs: manylinux: - name: ${{ matrix.TARGET }}/wheel_creation + name: ${{ matrix.TARGET }}/${{ matrix.wheel-version }}_wheel_creation runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: + wheel-version: ['cp37-cp37m', 'cp38-cp38', 'cp39-cp39', 'cp310-cp310', 'cp311-cp311'] os: [ubuntu-latest] include: - os: ubuntu-latest TARGET: manylinux - python-version: [3.7] + python-version: [3.8] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -35,10 +40,11 @@ jobs: run: | python -m pip install --upgrade pip pip install twine wheel setuptools pybind11 + # TODO: Update the manylinux builder to next tagged release - name: Build manylinux Python wheels - uses: RalfG/python-wheels-manylinux-build@v0.4.0-manylinux2010_x86_64 + uses: RalfG/python-wheels-manylinux-build@a1e012c58ed3960f81b7ed2759a037fb0ad28e2d with: - python-versions: 'cp37-cp37m cp38-cp38 cp39-cp39 cp310-cp310' + python-versions: ${{ matrix.wheel-version }} build-requirements: 'cython pybind11' package-path: '' pip-wheel-args: '' @@ -51,56 +57,11 @@ jobs: run: | sudo rm -rfv dist/*-linux_x86_64.whl - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: manylinux-wheels path: dist - manylinuxaarch64: - if: ${{ false }} - name: ${{ matrix.TARGET }}/wheel_creation - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ubuntu-latest] - include: - - os: ubuntu-latest - TARGET: manylinuxaarch64 - python-version: [3.7] - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v3 - with: - python-version: ${{ matrix.python-version }} - - uses: docker/setup-qemu-action@v1 - name: Set up QEMU - - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install twine wheel setuptools pybind11 - - name: Build manylinux Python wheels - uses: RalfG/python-wheels-manylinux-build@v0.4.0-manylinux2014_aarch64 - with: - python-versions: 'cp37-cp37m cp38-cp38 cp39-cp39' - build-requirements: 'cython' - package-path: '' - pip-wheel-args: '' - # When locally testing, --no-deps flag is necessary (PyUtilib dependency will trigger an error otherwise) - - name: Consolidate wheels - run: | - sudo test -d dist || mkdir -v dist - sudo find . -name \*.whl | grep -v /dist/ | xargs -n1 -i mv -v "{}" dist/ - - name: Delete linux wheels - run: | - sudo rm -rfv dist/*-linux_aarch64.whl - - name: Upload artifact - uses: actions/upload-artifact@v2 - with: - name: manylinux-aarch64-wheels - path: dist - generictarball: name: ${{ matrix.TARGET }} runs-on: ${{ matrix.os }} @@ -111,9 +72,9 @@ jobs: include: - os: ubuntu-latest TARGET: generic_tarball - python-version: [3.7] + python-version: [3.8] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -126,7 +87,7 @@ jobs: run: | python setup.py --without-cython sdist --format=gztar - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: generictarball path: dist @@ -141,9 +102,9 @@ jobs: include: - os: macos-latest TARGET: osx - python-version: [ 3.7, 3.8, 3.9, '3.10' ] + python-version: [ 3.7, 3.8, 3.9, '3.10', '3.11' ] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -157,7 +118,7 @@ jobs: python setup.py --with-cython --with-distributable-extensions sdist --format=gztar bdist_wheel - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: osx-wheels path: dist @@ -172,9 +133,9 @@ jobs: include: - os: windows-latest TARGET: win - python-version: [ 3.7, 3.8, 3.9, '3.10' ] + python-version: [ 3.7, 3.8, 3.9, '3.10', '3.11' ] steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v3 with: @@ -191,7 +152,7 @@ jobs: $env:PYTHONWARNINGS="ignore::UserWarning" Invoke-Expression "python setup.py --with-cython --with-distributable-extensions sdist --format=gztar bdist_wheel" - name: Upload artifact - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: win-wheels path: dist diff --git a/.github/workflows/test_branches.yml b/.github/workflows/test_branches.yml index f986f6f93a5..6fd56708520 100644 --- a/.github/workflows/test_branches.yml +++ b/.github/workflows/test_branches.yml @@ -10,6 +10,10 @@ on: description: Git Hash (Optional) required: false +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + defaults: run: shell: bash -l {0} @@ -19,10 +23,31 @@ env: PYTHON_CORE_PKGS: wheel PYPI_ONLY: z3-solver PYPY_EXCLUDE: scipy numdifftools seaborn statsmodels - CACHE_VER: v210812.0 + CACHE_VER: v221013.1 NEOS_EMAIL: tests@pyomo.org + SRC_REF: ${{ github.head_ref || github.ref }} jobs: + lint: + name: lint/style-and-typos + runs-on: ubuntu-latest + steps: + - name: Checkout Pyomo source + uses: actions/checkout@v3 + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + - name: Black Formatting Check + run: | + pip install black + black . -S -C --check --diff --exclude examples/pyomobook/python-ch/BadIndent.py + - name: Spell Check + uses: crate-ci/typos@master + with: + config: ./.github/workflows/typos.toml + + build: name: ${{ matrix.TARGET }}/${{ matrix.python }}${{ matrix.other }} runs-on: ${{ matrix.os }} @@ -31,23 +56,23 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python: ['3.10'] + python: ['3.11'] other: [""] category: [""] include: - os: ubuntu-latest - python: '3.10' + python: '3.11' TARGET: linux PYENV: pip - os: macos-latest - python: 3.8 + python: '3.10' TARGET: osx PYENV: pip - os: windows-latest - python: 3.7 + python: 3.9 TARGET: win PYENV: conda PACKAGES: glpk @@ -70,15 +95,7 @@ jobs: PACKAGES: mpi4py openmpi - os: ubuntu-latest - python: 3.7 - other: /slim - slim: 1 - skip_doctest: 1 - TARGET: linux - PYENV: pip - - - os: ubuntu-latest - python: 3.8 + python: '3.10' other: /cython setup_options: --with-cython skip_doctest: 1 @@ -121,31 +138,32 @@ jobs: # the 5 GB GitHub allows. # #- name: Conda package cache - # uses: actions/cache@v2 + # uses: actions/cache@v3 # if: matrix.PYENV == 'conda' # id: conda-cache # with: # path: cache/conda # key: conda-${{env.CACHE_VER}}.0-${{runner.os}}-${{matrix.python}} - - name: Pip package cache - uses: actions/cache@v2 - if: matrix.PYENV == 'pip' - id: pip-cache - with: - path: cache/pip - key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-${{matrix.python}} + #- name: Pip package cache + # uses: actions/cache@v3 + # if: matrix.PYENV == 'pip' + # id: pip-cache + # with: + # path: cache/pip + # key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-${{matrix.python}} - - name: OS package cache - uses: actions/cache@v2 - if: matrix.TARGET != 'osx' - id: os-cache - with: - path: cache/os - key: pkg-${{env.CACHE_VER}}.0-${{runner.os}} + #- name: OS package cache + # uses: actions/cache@v3 + # if: matrix.TARGET != 'osx' + # id: os-cache + # with: + # path: cache/os + # key: pkg-${{env.CACHE_VER}}.0-${{runner.os}} - name: TPL package download cache - uses: actions/cache@v2 + uses: actions/cache@v3 + if: ${{ ! matrix.slim }} id: download-cache with: path: cache/download @@ -196,7 +214,7 @@ jobs: - name: Set up Python ${{ matrix.python }} if: matrix.PYENV == 'pip' - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} @@ -206,7 +224,7 @@ jobs: with: auto-update-conda: true python-version: ${{ matrix.python }} - channels: conda-forge + channels: conda-forge,gurobi,ibmdecisionoptimization,fico-xpress channel-priority: strict # GitHub actions is very fragile when it comes to setting up various @@ -247,7 +265,7 @@ jobs: python -m pip install --cache-dir cache/pip pymysql || \ python -m pip install --cache-dir cache/pip pymysql if test -z "${{matrix.slim}}"; then - python -m pip install --cache-dir cache/pip cplex \ + python -m pip install --cache-dir cache/pip cplex docplex \ || echo "WARNING: CPLEX Community Edition is not available" python -m pip install --cache-dir cache/pip \ -i https://pypi.gurobi.com gurobipy \ @@ -264,10 +282,19 @@ jobs: - name: Install Python packages (conda) if: matrix.PYENV == 'conda' run: | + # Set up environment mkdir -p $GITHUB_WORKSPACE/cache/conda conda config --set always_yes yes conda config --set auto_update_conda false conda config --prepend pkgs_dirs $GITHUB_WORKSPACE/cache/conda + # Try to install mamba + conda install -q -y -n base conda-libmamba-solver || MAMBA_FAILED=1 + if test -z "$MAMBA_FAILED"; then + echo "*** Activating the mamba environment solver ***" + conda config --set solver libmamba + fi + # Print environment info + echo "*** CONDA environment: ***" conda info conda config --show-sources conda config --show channels @@ -298,22 +325,34 @@ jobs: CONDA_DEPENDENCIES="$CONDA_DEPENDENCIES $PKG" fi done - conda install -q -y -c conda-forge $CONDA_DEPENDENCIES + echo "*** Install Pyomo dependencies ***" + conda install -q -y $CONDA_DEPENDENCIES if test -z "${{matrix.slim}}"; then - conda install -q -y -c ibmdecisionoptimization 'cplex>=12.10' \ + echo "*** Install CPLEX ***" + conda install -q -y 'cplex>=12.10' docplex \ || echo "WARNING: CPLEX Community Edition is not available" - conda install -q -y -c gurobi gurobi \ + echo "*** Install Gurobi ***" + conda install -q -y gurobi \ || echo "WARNING: Gurobi is not available" - conda install -q -y -c fico-xpress xpress \ + echo "*** Install Xpress ***" + conda install -q -y xpress \ || echo "WARNING: Xpress Community Edition is not available" - for PKG in cyipopt pymumps; do - conda install -q -y -c conda-forge $PKG \ + for PKG in cyipopt pymumps scip; do + echo "*** Install $PKG ***" + conda install -q -y $PKG \ || echo "WARNING: $PKG is not available" done # TODO: This is a hack to stop test_qt.py from running until we # can better troubleshoot why it fails on GHA for QTPACKAGE in qt pyqt; do - conda remove $QTPACKAGE || echo "$QTPACKAGE not in this environment" + # Because conda is insane, removing packages can cause + # unrelated packages to be updated (breaking version + # specifications specified previously, e.g., in + # setup.py). There doesn't appear to be a good + # workaround, so we will just force-remove (recognizing + # that it may break other conda cruft). + conda remove --force-remove $QTPACKAGE \ + || echo "$QTPACKAGE not in this environment" done fi # Re-try Pyomo (optional) dependencies with pip @@ -398,6 +437,8 @@ jobs: echo "DYLD_LIBRARY_PATH=${env:DYLD_LIBRARY_PATH}:$GAMS_DIR" ` Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append $INSTALLER = "${env:DOWNLOAD_DIR}/gams_install.exe" + # We are pinning to 29.1.0 because a license is required for + # versions after this in order to run in demo mode. $URL = "https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0" if ( "${{matrix.TARGET}}" -eq "win" ) { $URL = "$URL/windows/windows_x64_64.exe" @@ -503,7 +544,8 @@ jobs: run: | echo "" echo "Clone Pyomo-model-libraries..." - git clone -b main https://github.com/Pyomo/pyomo-model-libraries.git + URL=https://github.com/Pyomo/pyomo-model-libraries.git + git clone -b ${SRC_REF##*/} $URL || git clone -b main $URL echo "" echo "Install Pyomo..." echo "" @@ -513,6 +555,15 @@ jobs: echo "" echo "PYOMO_CONFIG_DIR=${GITHUB_WORKSPACE}/config" >> $GITHUB_ENV + # this has to be done after Pyomo is installed because highspy + # depends on pyomo's find_library function + - name: Install HiGHS + if: ${{ ! matrix.slim }} + shell: bash + run: | + $PYTHON_EXE -m pip install --cache-dir cache/pip highspy \ + || echo "WARNING: highspy is not available" + - name: Set up coverage tracking run: | if test "${{matrix.TARGET}}" == win; then @@ -537,7 +588,7 @@ jobs: echo "" echo "Pyomo download-extensions" echo "" - pyomo download-extensions + pyomo download-extensions || exit 1 echo "" echo "Pyomo build-extensions" echo "" @@ -553,12 +604,8 @@ jobs: - name: Run Pyomo tests if: matrix.mpi == 0 run: | - CATEGORY= - for cat in ${{matrix.category}}; do - CATEGORY+=" -m $cat" - done $PYTHON_EXE -m pytest -v \ - -W ignore::Warning $CATEGORY \ + -W ignore::Warning ${{matrix.category}} \ pyomo `pwd`/pyomo-model-libraries \ `pwd`/examples/pyomobook --junitxml="TEST-pyomo.xml" @@ -586,7 +633,7 @@ jobs: coverage xml -i - name: Record build artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ${{github.job}}_${{env.GHA_JOBGROUP}}-${{env.GHA_JOBNAME}} path: | @@ -606,7 +653,7 @@ jobs: uses: actions/checkout@v3 - name: Set up Python 3.8 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.8 @@ -640,7 +687,7 @@ jobs: cover: name: process-coverage-${{ matrix.TARGET }} needs: build - if: always() # run even if a build job fails + if: ${{ false }} # turn off for branches runs-on: ${{ matrix.os }} timeout-minutes: 10 strategy: @@ -661,20 +708,20 @@ jobs: uses: actions/checkout@v3 # We need the source for .codecov.yml and running "coverage xml" - - name: Pip package cache - uses: actions/cache@v2 - id: pip-cache - with: - path: cache/pip - key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-3.8 + #- name: Pip package cache + # uses: actions/cache@v3 + # id: pip-cache + # with: + # path: cache/pip + # key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-3.8 - name: Download build artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: path: artifacts - name: Set up Python 3.8 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.8 @@ -769,9 +816,10 @@ jobs: - name: Upload codecov reports if: github.repository_owner == 'Pyomo' || github.ref != 'refs/heads/main' - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: files: coverage.xml + token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }} flags: ${{ matrix.TARGET }} fail_ci_if_error: true @@ -780,9 +828,10 @@ jobs: if: | hashFiles('coverage-other.xml') != '' && (github.repository_owner == 'Pyomo' || github.ref != 'refs/heads/main') - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: files: coverage-other.xml + token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }}/other flags: ${{ matrix.TARGET }},other fail_ci_if_error: true diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index beb874ebc69..9a8888cef1c 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -13,6 +13,10 @@ on: description: Git Hash (Optional) required: false +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + defaults: run: shell: bash -l {0} @@ -22,19 +26,37 @@ env: PYTHON_CORE_PKGS: wheel PYPI_ONLY: z3-solver PYPY_EXCLUDE: scipy numdifftools seaborn statsmodels - CACHE_VER: v210812.0 + CACHE_VER: v221013.1 NEOS_EMAIL: tests@pyomo.org + SRC_REF: ${{ github.head_ref || github.ref }} jobs: + lint: + name: lint/style-and-typos + runs-on: ubuntu-latest + steps: + - name: Checkout Pyomo source + uses: actions/checkout@v3 + - name: Black Formatting Check + run: | + pip install black + black . -S -C --check --diff --exclude examples/pyomobook/python-ch/BadIndent.py + - name: Spell Check + uses: crate-ci/typos@master + with: + config: ./.github/workflows/typos.toml + + build: name: ${{ matrix.TARGET }}/${{ matrix.python }}${{ matrix.other }} + needs: lint # the linter job is a prerequisite for PRs runs-on: ${{ matrix.os }} timeout-minutes: 120 strategy: fail-fast: false matrix: os: [ubuntu-latest, macos-latest, windows-latest] - python: [3.7, 3.8, 3.9, '3.10'] + python: [3.7, 3.8, 3.9, '3.10', '3.11'] other: [""] category: [""] @@ -70,15 +92,15 @@ jobs: PACKAGES: mpi4py openmpi - os: ubuntu-latest - python: 3.7 - other: /slim - slim: 1 + python: 3.11 + other: /singletest + category: "-m 'neos or importtest'" skip_doctest: 1 TARGET: linux PYENV: pip - os: ubuntu-latest - python: 3.8 + python: '3.10' other: /cython setup_options: --with-cython skip_doctest: 1 @@ -86,10 +108,17 @@ jobs: PYENV: pip PACKAGES: cython - - os: ubuntu-latest + - os: windows-latest python: 3.8 - other: /singletest - category: neos + other: /pip + skip_doctest: 1 + TARGET: win + PYENV: pip + + - os: ubuntu-latest + python: 3.7 + other: /slim + slim: 1 skip_doctest: 1 TARGET: linux PYENV: pip @@ -107,13 +136,6 @@ jobs: TARGET: linux PYENV: pip - - os: windows-latest - python: 3.8 - other: /pip - skip_doctest: 1 - TARGET: win - PYENV: pip - steps: - name: Checkout Pyomo source uses: actions/checkout@v3 @@ -142,31 +164,32 @@ jobs: # the 5 GB GitHub allows. # #- name: Conda package cache - # uses: actions/cache@v2 + # uses: actions/cache@v3 # if: matrix.PYENV == 'conda' # id: conda-cache # with: # path: cache/conda # key: conda-${{env.CACHE_VER}}.0-${{runner.os}}-${{matrix.python}} - - name: Pip package cache - uses: actions/cache@v2 - if: matrix.PYENV == 'pip' - id: pip-cache - with: - path: cache/pip - key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-${{matrix.python}} + #- name: Pip package cache + # uses: actions/cache@v3 + # if: matrix.PYENV == 'pip' + # id: pip-cache + # with: + # path: cache/pip + # key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-${{matrix.python}} - - name: OS package cache - uses: actions/cache@v2 - if: matrix.TARGET != 'osx' - id: os-cache - with: - path: cache/os - key: pkg-${{env.CACHE_VER}}.0-${{runner.os}} + #- name: OS package cache + # uses: actions/cache@v3 + # if: matrix.TARGET != 'osx' + # id: os-cache + # with: + # path: cache/os + # key: pkg-${{env.CACHE_VER}}.0-${{runner.os}} - name: TPL package download cache - uses: actions/cache@v2 + uses: actions/cache@v3 + if: ${{ ! matrix.slim }} id: download-cache with: path: cache/download @@ -217,7 +240,7 @@ jobs: - name: Set up Python ${{ matrix.python }} if: matrix.PYENV == 'pip' - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} @@ -227,7 +250,7 @@ jobs: with: auto-update-conda: true python-version: ${{ matrix.python }} - channels: conda-forge + channels: conda-forge,gurobi,ibmdecisionoptimization,fico-xpress channel-priority: strict # GitHub actions is very fragile when it comes to setting up various @@ -268,7 +291,7 @@ jobs: python -m pip install --cache-dir cache/pip pymysql || \ python -m pip install --cache-dir cache/pip pymysql if test -z "${{matrix.slim}}"; then - python -m pip install --cache-dir cache/pip cplex \ + python -m pip install --cache-dir cache/pip cplex docplex \ || echo "WARNING: CPLEX Community Edition is not available" python -m pip install --cache-dir cache/pip \ -i https://pypi.gurobi.com gurobipy \ @@ -285,10 +308,19 @@ jobs: - name: Install Python packages (conda) if: matrix.PYENV == 'conda' run: | + # Set up environment mkdir -p $GITHUB_WORKSPACE/cache/conda conda config --set always_yes yes conda config --set auto_update_conda false conda config --prepend pkgs_dirs $GITHUB_WORKSPACE/cache/conda + # Try to install mamba + conda install -q -y -n base conda-libmamba-solver || MAMBA_FAILED=1 + if test -z "$MAMBA_FAILED"; then + echo "*** Activating the mamba environment solver ***" + conda config --set solver libmamba + fi + # Print environment info + echo "*** CONDA environment: ***" conda info conda config --show-sources conda config --show channels @@ -319,22 +351,34 @@ jobs: CONDA_DEPENDENCIES="$CONDA_DEPENDENCIES $PKG" fi done - conda install -q -y -c conda-forge $CONDA_DEPENDENCIES + echo "*** Install Pyomo dependencies ***" + conda install -q -y $CONDA_DEPENDENCIES if test -z "${{matrix.slim}}"; then - conda install -q -y -c ibmdecisionoptimization 'cplex>=12.10' \ + echo "*** Install CPLEX ***" + conda install -q -y 'cplex>=12.10' docplex \ || echo "WARNING: CPLEX Community Edition is not available" - conda install -q -y -c gurobi gurobi \ + echo "*** Install Gurobi ***" + conda install -q -y gurobi \ || echo "WARNING: Gurobi is not available" - conda install -q -y -c fico-xpress xpress \ + echo "*** Install Xpress ***" + conda install -q -y xpress \ || echo "WARNING: Xpress Community Edition is not available" - for PKG in cyipopt pymumps; do - conda install -q -y -c conda-forge $PKG \ + for PKG in cyipopt pymumps scip; do + echo "*** Install $PKG ***" + conda install -q -y $PKG \ || echo "WARNING: $PKG is not available" done # TODO: This is a hack to stop test_qt.py from running until we # can better troubleshoot why it fails on GHA for QTPACKAGE in qt pyqt; do - conda remove $QTPACKAGE || echo "$QTPACKAGE not in this environment" + # Because conda is insane, removing packages can cause + # unrelated packages to be updated (breaking version + # specifications specified previously, e.g., in + # setup.py). There doesn't appear to be a good + # workaround, so we will just force-remove (recognizing + # that it may break other conda cruft). + conda remove --force-remove $QTPACKAGE \ + || echo "$QTPACKAGE not in this environment" done fi # Re-try Pyomo (optional) dependencies with pip @@ -419,6 +463,8 @@ jobs: echo "DYLD_LIBRARY_PATH=${env:DYLD_LIBRARY_PATH}:$GAMS_DIR" ` Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append $INSTALLER = "${env:DOWNLOAD_DIR}/gams_install.exe" + # We are pinning to 29.1.0 because a license is required for + # versions after this in order to run in demo mode. $URL = "https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0" if ( "${{matrix.TARGET}}" -eq "win" ) { $URL = "$URL/windows/windows_x64_64.exe" @@ -524,7 +570,8 @@ jobs: run: | echo "" echo "Clone Pyomo-model-libraries..." - git clone -b main https://github.com/Pyomo/pyomo-model-libraries.git + URL=https://github.com/Pyomo/pyomo-model-libraries.git + git clone -b ${SRC_REF##*/} $URL || git clone -b main $URL echo "" echo "Install Pyomo..." echo "" @@ -534,6 +581,15 @@ jobs: echo "" echo "PYOMO_CONFIG_DIR=${GITHUB_WORKSPACE}/config" >> $GITHUB_ENV + # this has to be done after Pyomo is installed because highspy + # depends on pyomo's find_library function + - name: Install HiGHS + if: ${{ ! matrix.slim }} + shell: bash + run: | + $PYTHON_EXE -m pip install --cache-dir cache/pip highspy \ + || echo "WARNING: highspy is not available" + - name: Set up coverage tracking run: | if test "${{matrix.TARGET}}" == win; then @@ -558,7 +614,7 @@ jobs: echo "" echo "Pyomo download-extensions" echo "" - pyomo download-extensions + pyomo download-extensions || exit 1 echo "" echo "Pyomo build-extensions" echo "" @@ -574,12 +630,8 @@ jobs: - name: Run Pyomo tests if: matrix.mpi == 0 run: | - CATEGORY= - for cat in ${{matrix.category}}; do - CATEGORY+=" -m $cat" - done $PYTHON_EXE -m pytest -v \ - -W ignore::Warning $CATEGORY \ + -W ignore::Warning ${{matrix.category}} \ pyomo `pwd`/pyomo-model-libraries \ `pwd`/examples/pyomobook --junitxml="TEST-pyomo.xml" @@ -607,7 +659,7 @@ jobs: coverage xml -i - name: Record build artifacts - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ${{github.job}}_${{env.GHA_JOBGROUP}}-${{env.GHA_JOBNAME}} path: | @@ -620,6 +672,7 @@ jobs: bare-python-env: name: linux/3.8/bare-env + needs: lint # the linter job is a prerequisite for PRs runs-on: ubuntu-latest timeout-minutes: 10 steps: @@ -627,7 +680,7 @@ jobs: uses: actions/checkout@v3 - name: Set up Python 3.8 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.8 @@ -682,20 +735,20 @@ jobs: uses: actions/checkout@v3 # We need the source for .codecov.yml and running "coverage xml" - - name: Pip package cache - uses: actions/cache@v2 - id: pip-cache - with: - path: cache/pip - key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-3.8 + #- name: Pip package cache + # uses: actions/cache@v3 + # id: pip-cache + # with: + # path: cache/pip + # key: pip-${{env.CACHE_VER}}.0-${{runner.os}}-3.8 - name: Download build artifacts - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: path: artifacts - name: Set up Python 3.8 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.8 @@ -790,9 +843,10 @@ jobs: - name: Upload codecov reports if: github.repository_owner == 'Pyomo' || github.ref != 'refs/heads/main' - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: files: coverage.xml + token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }} flags: ${{ matrix.TARGET }} fail_ci_if_error: true @@ -801,9 +855,10 @@ jobs: if: | hashFiles('coverage-other.xml') != '' && (github.repository_owner == 'Pyomo' || github.ref != 'refs/heads/main') - uses: codecov/codecov-action@v2 + uses: codecov/codecov-action@v3 with: files: coverage-other.xml + token: ${{ secrets.PYOMO_CODECOV_TOKEN }} name: ${{ matrix.TARGET }}/other flags: ${{ matrix.TARGET }},other fail_ci_if_error: true diff --git a/.github/workflows/typos.toml b/.github/workflows/typos.toml new file mode 100644 index 00000000000..23f94fc8afd --- /dev/null +++ b/.github/workflows/typos.toml @@ -0,0 +1,43 @@ +[files] +extend-exclude = ["*.eps"] + +[default.extend-words] +# Ignore IDAES +IDAES = "IDAES" +idaes = "idaes" +# Ignore datas +Datas = "Datas" +datas = "datas" +# Ignore ND +ND = "ND" +nd = "nd" +# Ignore INOUT +inout = "inout" +INOUT= "INOUT" +# Ignore MAYBEE from examples +MAYBEE = "MAYBEE" +# Ignore conext for constraint-external +conext = "conext" +# Ignore AFE (affine expression) from MOSEK vernacular +afe = "afe" +# Ignore Fo from PyNumero tests +Fo = "Fo" +# Ignore ba from contrib +ba = "ba" +# Ignore complimentarity from interior_point +complimentarity = "complimentarity" +# Ignore from "number of inputs" from core +nin = "nin" +# sisser CUTEr instance +sisser = "sisser" +# LAF +LAF = "LAF" +# caf +caf = "caf" +# WRONLY +WRONLY = "WRONLY" +# Ignore the name Hax +Hax = "Hax" +# Big Sur +Sur = "Sur" +# AS NEEDED: Add More Words Below diff --git a/.gitignore b/.gitignore index cea795d481b..09069552990 100644 --- a/.gitignore +++ b/.gitignore @@ -18,7 +18,7 @@ doc/OnlineDocs/**/*.spy pyomo/dataportal/parse_table_datacmds.py gurobi.log -# Results from nosetests --with-coverage +# Results from pytest --with-coverage .coverage *.cover diff --git a/.jenkins.sh b/.jenkins.sh index 36f950b9e9c..f31fef99377 100644 --- a/.jenkins.sh +++ b/.jenkins.sh @@ -11,7 +11,7 @@ # # CATEGORY: the category to pass to pytest # -# TEST_SUITES: Paths (module or directory) to be passed to nosetests to +# TEST_SUITES: Paths (module or directory) to be passed to pytest to # run. (defaults to "pyomo '$WORKSPACE/pyomo-model-libraries'") # # SLIM: If nonempty, then the virtualenv will only have pip, setuptools, @@ -86,9 +86,13 @@ if test -z "$MODE" -o "$MODE" == setup; then echo "#" echo "# Installing pyomo modules" echo "#" - pushd "$WORKSPACE/pyutilib" || echo "PyUtilib not found" - python setup.py develop || echo "PyUtilib failed - skipping." - popd + if test -d "$WORKSPACE/pyutilib"; then + pushd "$WORKSPACE/pyutilib" + python setup.py develop || echo "PyUtilib failed - skipping." + popd + else + echo "PyUtilib not found; skipping" + fi pushd "$WORKSPACE/pyomo" || exit 1 python setup.py develop $PYOMO_SETUP_ARGS || exit 1 popd diff --git a/.readthedocs.yaml b/.readthedocs.yaml index ffcba32f400..dc77164f866 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -5,6 +5,11 @@ # Required version: 2 +build: + os: ubuntu-22.04 + tools: + python: "3.8" + sphinx: configuration: doc/OnlineDocs/conf.py @@ -12,7 +17,6 @@ formats: all # Set the version of Python and requirements required to build the docs python: - version: 3 install: - method: pip path: . diff --git a/AUTHORS.txt b/AUTHORS.txt deleted file mode 100644 index 789467f30a5..00000000000 --- a/AUTHORS.txt +++ /dev/null @@ -1 +0,0 @@ -See www.pyomo.org diff --git a/CHANGELOG.txt b/CHANGELOG.md similarity index 87% rename from CHANGELOG.txt rename to CHANGELOG.md index 0cae7c774fd..1bf751268fa 100644 --- a/CHANGELOG.txt +++ b/CHANGELOG.md @@ -1,9 +1,263 @@ -=============== Pyomo CHANGELOG =============== + +------------------------------------------------------------------------------- +Pyomo 6.6.1 (30 May 2023) +------------------------------------------------------------------------------- + +- General + - Update cmake builder for recent setuptools (#2847) + - Fixing minor formatting for 6.6.0 release changes (#2842) + - Silence deprecation warnings (#2854) +- Core + - Update indentation handling in `config.StringFormatter` (#2853) + - Restore slice API broken by #2829 (#2849) + - Resolve handling of {}**0 in `LinearRepn`/`QuadraticRepn` (#2857) +- Solver Interfaces + - NL writer: resolve error identifying vars in indexed SOS (#2852) + - Manage Gurobi environments in GurobiDirect (#2680) +- Contributed Packages + - cp: fix handling fixed BooleanVars in logical-to-disjunctive walker (#2850) + - FBBT: Fix typo when handling GeneralExpression objects (#2848) + - MindtPy: add support for cyipopt (#2830) + +------------------------------------------------------------------------------- +Pyomo 6.6.0 (24 May 2023) +------------------------------------------------------------------------------- + +- General + - Remove `pyomo check`/`pyomo.checker` module (#2753) + - Improve formatting of docstrings generated from `ConfigDict` (#2754) + - Deprecate `add_docstring_list` (#2755) + - Reapply `black` to previously completed directories (#2775) + - Improve formatting for `DeveloperError`, `MouseTrap` messages (#2805) +- Core + - Bugfix: component indexes specified as lists (#2765) + - Remove the use of weakrefs in `SymbolMap` (#2791) + - Improve conversions between Pyomo and Sympy expressions (#2806) + - Rework expression generation to leverage multiple dispatch (#2722) + - Improve robustness of `calculate_variable_from_constraint()` (#2812) + - Add support for infix Boolean logical operators (#2835) + - Improvements to Pyomo component iteration (#2829) +- Documentation + - Copyright and Book Updates (#2750) + - Link documentation in incidence_analysis README (#2759) + - Update ReadtheDocs Configuration (#2780) + - Correct import in community.rst (#2792) + - Remove instructions for python <= 3.0 (#2822) +- Solvers Interfaces + - NEOS: fix typo in `kestrelAMPL.kill()` argument (#2758) + - Better handling of mutable parameters in HiGHS interface (#2763) + - Improve linear data structure in NL writer (#2769) + - Bugfix for shared named expressions in NL writer (#2790) + - Resolve NPV constants in `LinearExpressions` in NL writer (#2811) + - GAMS/Baron: ensure negative numbers are parenthesized (#2833) + - Release LP version 2 (LPv2) writer (#2823, #2840) +- Testing + - Rework Upload of Coverage Reports (#2761) + - Update constant for visitor tests for python 3.11.2 (#2799) + - Auto-Linting: Spelling Black Style Checker (#2800, #2818) + - Skip MOSEK tests on NEOS (due to unknown NEOS error) (#2839) +- GDP + - Add `gdp.bound_pretransformation` (#2824) +- Contributed Packages + - APPSI: Improve logging consistency across solvers (#2787) + - APPSI: Update `available` method in APPSI-Gurobi interface (#2828) + - DoE: Release version 2 (#2794) + - incidence_analysis: Remove strict usage of PyomoNLP (#2752) + - incidence_analysis: Test `IndexedBlock` (#2789) + - incidence_analysis: Use standard repn for incidence graph generation (#2834) + - Parmest: Update for pandas 2.0.0 release (#2795) + - piecewise: Add contrib.piecewise package (#2708, #2768, #2766, #2797, #2798, + #2826) + - PyNumero: Refactor CyIpopt interface to subclass `cyipopt.Problem` (#2760) + - PyNumero: Fix CyIpopt interface when `load_solutions=False` (#2820) + - PyROS: Fixes to PyROS Separation Routine (#2815) + - PyROS: Fixes to Coefficient Matching and Timing Functionalities (#2837) + +------------------------------------------------------------------------------- +Pyomo 6.5.0 (16 Feb 2023) +------------------------------------------------------------------------------- + +- General + - Apply `black` to enforce PEP8 standards in certain modules (#2737, #2738, + #2733, #2732, #2731, #2728, #2730, #2729, #2720, #2721, #2719, #2718) + - Add Developers' call information to README (#2665) + - Deprecate `pyomo.checker` module (#2734) + - Warn when infeasibility tools will not log output (#2666) + - Separate identification from logging in `pyomo.util.infeasible.log_*` (#2669) + - Convert subprocess timeout parameters to module attributes (#2672) + - Resolve consistency issues in the Bunch class (#2685) + - Remove GSL downloader from `download-extensions` (#2725) + - Update enhancement GitHub issue template to link to wiki (#2739) + - Add deprecation warning to `pyomo` command (#2740) + - Require `version=` for all deprecation utilities (#2744) + - Fix `pyomo --version` version string (#2743) +- Core + - Fix minor typo in set.py (#2679) + - Fix bugs in scaling transformation (#2678) + - Rework handling of 'dimensionless' units in Pyomo (#2691) +- Solver Interfaces + - Switch default NL writer to nlv2 and bug fixes (#2676, #2710, #2726) + - Enable MOSEK10 warm-start flag and relocate var initialization (#2647) + - Fix handling of POW in Baron writer (#2693) + - Update GAMS license check to avoid exception when not available (#2697) +- Documentation + - Fix incorrect documentation for sending options to solvers (#2688) + - Fix Sphinx warnings (#2712) + - Document Python Version Support policy (#2735) + - Document deprecation and removal of functionality (#2741) + - Document docstring formatting requirements (#2742) +- Testing + - Skip failing Baron tests (#2694) + - Remove residual `nose` references (#2736) + - Update GHA setup-python version (#2705) + - Improve GHA conda setup performance (#2701) + - Add unit test for QCQO problems with MOSEK (#2682) +- DAE + - Fix typo in `__init__.py` (#2683) + - Add `active` filter to flattener (#2643) +- GDP + - Add GDP-to-MIP transformation base class (#2687) +- Contributed Packages + - DoE: New module for model-based design of experiments (#2294, #2711, #2527) + - FBBT: Add tolerances to tests (#2675) + - GDPopt: Switch a LBB test to use Gurobi as MINLP solver (#2686) + - incidence_analysis: Add `plot` method to `IncidenceGraphInterface` (#2716) + - incidence_analysis: Refactor to cache a graph instead of a matrix (#2715) + - incidence_analysis: Add documentation and update API (#2727, #2745) + - incidence_analysis: Add logging solve_strongly_connected_components (#2723) + - MindtPy: Refactor to improve extensibility and maintainability (#2654) + - Parmest: Suppress mpi-sppy output in import (#2692) + - PyNumero: Add tee argument to Pyomo-SciPy square solvers (#2668) + - PyNumero: Support implicit function solvers in ExternalPyomoModel (#2652) + - PyROS: Fix user_time and wallclock_time bug (#2670) + - PyROS: More judicious enforcement of PyROS Solver time limit (#2660, #2706) + - PyROS: Update documentation (#2698, #2707) + - PyROS: Adjust routine for loading DR polishing model solutions (#2700) + - Viewer: Update to support PySide6 and display units and domain (#2689) + +------------------------------------------------------------------------------- +Pyomo 6.4.4 (9 Dec 2022) +------------------------------------------------------------------------------- + +- General + - Convert `txt` to `md` files (`CHANGELOG`, `LICENSE`, `RELEASE`) (#2635) + - Parallelize build of manylinux wheels (#2636) + - Update source for Jenkins status badge (#2639, #2640) + - Update relocated_module_attribute to work with cythonized modules (#2644) + - Add utility methods to HierarchicalTimer (#2651) +- Core + - Fix preservation of stale flags through clone/pickle (#2633) + - Add support for local suffixes in scaling transformation (#2619) +- Solver Interfaces + - Fix handling of nonconvex MIQCP problems in Xpress (#2625) +- Testing + - Update GitHub actions to cancel jobs when new changes are pushed (#2634) + - Remove requirement for a `pyutilib` directory in Jenkins driver (#2637) + - Enable GitHub actions build on Windows Python 3.11 (#2638) + - Add build services infrastructure status badge (#2646) + - Add version upper bound on MOSEK warmstart test skip (#2649) + - Improve compare.py handling of nosetests/pytest output (#2661) +- GDP + - Add option to use multiple-bigm only on bound constraints (#2624) + - Add logical_to_disjunctive and replace uses of logical_to_linear (#2627) +- Contributed Packages + - FBBT: Fix bug with ExternalFunction expressions (#2657) + - PyROS: Fix uncertain param bounds evaluation for FactorModelSet (#2620) + - PyROS: Add origin attribute to BudgetSet (#2645) + - PyROS: Fix UncertaintySet.bounding_problem method (#2659) + ------------------------------------------------------------------------------- -Pyomo 6.4.2 17 Aug 2022 +Pyomo 6.4.3 (28 Nov 2022) +------------------------------------------------------------------------------- + +- General + - Update PauseGC to work in nested contexts (#2507) + - Simplify deepcopy/pickle logic to speed up model clone (#2510) + - Fix generate_standard_repn to handle unexpected NPV expressions (#2511) + - Add thread safe proxies for PauseGC, TempFileManager singletons (#2514) + - Fix ConstructionTimer bug for components indexed by nonfinite sets (#2518) + - Add calculate_variable_from_constraint differentiation mode option (#2549) + - Update URL for downloading GSL and GJH (#2556, #2588) + - Update logic for retrying failed downloads (#2569) + - Add support and testing for Python 3.11 (#2596, #2618) + - Update deprecation utilities to improve user messages (#2606) +- Core + - Refactor expression hierarchy, add RelationalExpression base class (#2499) + - Support cloning individual blocks (#2504) + - Block performance improvements (#2508) + - Add support for creating a slice to a single ComponentData object (#2509) + - Fix missing import of value in pyomo.core.base.external (#2525) + - Improve handling of restricted words on Blocks (#2535) + - Improve Reference() performance (#2537) + - Fix mapping gradient/hessian for external functions with string args (#2539) + - Fix bug for sum_product(Var, Param, Param) (#2551) + - Add deprecation path for expression objects moved to relational_expr (#2554) + - Exception when setting value of Expression to non-numeric expression (#2567) + - Improve deepcopy performance (#2628) +- Documentation + - Fix examples in working_models.rst (#2502) +- Solver Interfaces + - Improve SCIP results object (#2462) + - Improve warning message when LP writer raises KeyError (#2497) + - Fix Gurobi work limit bug (#2530) + - Updates and fixes for the NLv2 writer (#2540, #2622, #2568) + - Fix Xpress when stopped due to MAXTIME or MAXNODES (#2553) + - Add support for MOSEK 10 affine conic constraints (#2557) + - Fix missing explicit space in GAMS put command (#2578) + - Fix GAMS logfile storage location (#2580) + - LP writer performance improvements (#2583, #2585) + - Update handling of MOSEK Env and Python module (#2591) + - Release MOSEK license when optimize raises a mosek.Error (#2593) + - Update list of allowable characters in CPLEX filenames (#2597) +- Testing + - Update performance driver to be usable outside of Pyomo (#2505) + - Update the performance test driver (#2538) + - Reduce amount of environment code cached in GitHub actions (#2565) + - Update GitHub actions versions from v2 to v3 (#2566) + - Allow nan to compare equal in assertStructuredAlmostEqual() (#2582) + - Add test utilities for comparing expressions (#2590) + - Skip a test in MOSEK 10 due to a bug in warm starting MIQPs (#2614) + - Update skipped CBC test that works with CBC 2.10.6 (#2615) + - Add SCIP to GitHub actions environment (#2602) +- GDP + - Use OrderedSet instead of list in GDPTree to improve performance (#2516) + - Reduce calls to logical_to_linear in GDP transformations (#2519) + - Add utility for gathering BigM values after transformation (#2520) + - Add tighter logical constraints in transformations of nested GDPs (#2550) + - Fix pickling of transformed GDP models (#2576) + - Add multiple-bigM transformation (#2592) + - Improve performance of BigM transformation (#2605) + - Remove weakref mapping Disjunctions to their algebraic_constraint (#2617) +- Contributed Packages + - APPSI: Fix exception raised by appsi_gurobi during Python shutdown (#2498) + - APPSI: Improve handling of Gurobi results (#2517) + - APPSI: Add interface to HiGHS solver (#2561) + - APPSI: Only release Gurobi license after deleting all instances (#2599) + - APPSI: Patch IntEnum to preserve pre-3.11 formatting (#2607) + - CP: New package for constraint programming (#2570, #2612) + - GDPopt: Add warning when reporting results from LBB (#2534) + - GDPopt: Delete dummy objective when we're done using it (#2552) + - GDPopt: Add enumerate solution approach (#2559, #2575) + - IIS: Add package for computing the IIS of an infeasible Pyomo model (#2512) + - MindtPy: Fix bug in termination condition (#2587) + - MindtPy: Fix bug in checking absolute and relative gap (#2608) + - MPC: Data structures/utils for rolling horizon dynamic optimization (#2477) + - Parmest: Solve square problem to initialize regression problem (#2438) + - Parmest: Return ContinuousSet values from theta_est() (#2464) + - PyNumero: Fix NumPy deprecation warnings (#2521) + - PyNumero: Add interfaces to SciPy square solvers (#2523) + - PyNumero: Check AmplInterface availability in SciPy solver tests (#2594) + - PyNumero: Add ProjectedExtendedNLP class (#2601) + - PyNumero: Add interface to SciPy scalar Newton solver (#2603) + - PyROS: Rewrite UncertaintySet docstrings/improve validation (#2488) + - PyROS: Updates to subproblem initialization and solver call routines (#2515) + - PyROS: Fix collection of sub-solver solve times (#2543) + +------------------------------------------------------------------------------- +Pyomo 6.4.2 (17 Aug 2022) ------------------------------------------------------------------------------- - General @@ -62,7 +316,7 @@ Pyomo 6.4.2 17 Aug 2022 - PyROS: Update exception handling parsing BARON lower/upper bounds (#2486) ------------------------------------------------------------------------------- -Pyomo 6.4.1 13 May 2022 +Pyomo 6.4.1 (13 May 2022) ------------------------------------------------------------------------------- - General @@ -100,7 +354,7 @@ Pyomo 6.4.1 13 May 2022 (#2353, #2371) ------------------------------------------------------------------------------- -Pyomo 6.4.0 16 Mar 2022 +Pyomo 6.4.0 (16 Mar 2022) ------------------------------------------------------------------------------- - General @@ -124,11 +378,11 @@ Pyomo 6.4.0 16 Mar 2022 constant expressions (#2324) - PyNumero: Improve coverage of mpi block matrix tests (#2318) - PyNumero: Skip bound/domain validation in ExternalPyomoModel (#2323) - - PyNumero: Remove deprecated useage of numpy.bool (#2339) + - PyNumero: Remove deprecated usage of numpy.bool (#2339) - PyROS: Fix variable default initialization (#2331) ------------------------------------------------------------------------------- -Pyomo 6.3.0 23 Feb 2022 +Pyomo 6.3.0 (23 Feb 2022) ------------------------------------------------------------------------------- ADVANCE CHANGE NOTICE @@ -208,7 +462,7 @@ ADVANCE CHANGE NOTICE - TrustRegion: New implementation of Trust Region Framework (#2238, #2279) ------------------------------------------------------------------------------- -Pyomo 6.2 17 Nov 2021 +Pyomo 6.2 (17 Nov 2021) ------------------------------------------------------------------------------- - General @@ -280,7 +534,7 @@ Pyomo 6.2 17 Nov 2021 - PyROS: Add uncertain variable bounds detection (#2159) ------------------------------------------------------------------------------- -Pyomo 6.1.2 20 Aug 2021 +Pyomo 6.1.2 (20 Aug 2021) ------------------------------------------------------------------------------- - General @@ -295,14 +549,14 @@ Pyomo 6.1.2 20 Aug 2021 - MindtPy: Support gurobi_persistent in LP/NLP-based B&B algorithm (#2071) ------------------------------------------------------------------------------- -Pyomo 6.1.1 17 Aug 2021 +Pyomo 6.1.1 (17 Aug 2021) ------------------------------------------------------------------------------- - General - Adding missing __init__.py files across Pyomo (#2086) ------------------------------------------------------------------------------- -Pyomo 6.1 17 Aug 2021 +Pyomo 6.1 (17 Aug 2021) ------------------------------------------------------------------------------- - General @@ -364,7 +618,7 @@ Pyomo 6.1 17 Aug 2021 ------------------------------------------------------------------------------- -Pyomo 6.0.1 4 Jun 2021 +Pyomo 6.0.1 (4 Jun 2021) ------------------------------------------------------------------------------- - General @@ -379,7 +633,7 @@ Pyomo 6.0.1 4 Jun 2021 - GDPopt: Fix implicit conversion warnings (#2002) ------------------------------------------------------------------------------- -Pyomo 6.0 20 May 2021 +Pyomo 6.0 (20 May 2021) ------------------------------------------------------------------------------- BACKWARDS COMPATIBILITY WARNINGS @@ -508,7 +762,7 @@ BACKWARDS COMPATIBILITY WARNINGS - MindtPy: general improvements and add feasibility pump (#1847) ------------------------------------------------------------------------------- -Pyomo 5.7.3 29 Jan 2021 +Pyomo 5.7.3 (29 Jan 2021) ------------------------------------------------------------------------------- ADVANCE CHANGE NOTICE: @@ -555,7 +809,7 @@ ADVANCE CHANGE NOTICE: - Parmest: update pairwise plot to use the covariance matrix (#1774) ------------------------------------------------------------------------------- -Pyomo 5.7.2 17 Dec 2020 +Pyomo 5.7.2 (17 Dec 2020) ------------------------------------------------------------------------------- - General @@ -645,7 +899,7 @@ Pyomo 5.7.2 17 Dec 2020 misc updates (#1632, #1653, #1610, #1667, #1681, #1705, #1724) ------------------------------------------------------------------------------- -Pyomo 5.7.1 15 Sep 2020 +Pyomo 5.7.1 (15 Sep 2020) ------------------------------------------------------------------------------- - General @@ -707,7 +961,7 @@ Pyomo 5.7.1 15 Sep 2020 - Add integer arithmetic option to FME transformation (#1594) ------------------------------------------------------------------------------- -Pyomo 5.7.0 19 Jun 2020 +Pyomo 5.7.0 (19 Jun 2020) ------------------------------------------------------------------------------- - General @@ -793,13 +1047,13 @@ Pyomo 5.7.0 19 Jun 2020 - Add basic interior point algorithm based on PyNumero (#1450, #1505, #1495) ------------------------------------------------------------------------------- -Pyomo 5.6.9 18 Mar 2020 +Pyomo 5.6.9 (18 Mar 2020) ------------------------------------------------------------------------------- - General - Fix bug and improve output formatting in pyomo.util.infeasible (#1226, #1234) - Add 'version' and 'remove_in' arguments to deprecation_warning (#1231) - - Change NoArgumentGiven to a class and standardize useage (#1236) + - Change NoArgumentGiven to a class and standardize usage (#1236) - Update GSL URL to track change in AMPL SSL certificate (#1245) - Clean up setup.py (#1227) - Remove legacy build/test/distribution scripts (#1263) @@ -819,7 +1073,7 @@ Pyomo 5.6.9 18 Mar 2020 - Raise error on failed Param validation (#1272) - Fix return value for component decorator (#1296) - Change mult. order in taylor_series_expansion for numpy compatibility (#1329) - - Deprecate 'Any' being the defalt Param domain (#1266) + - Deprecate 'Any' being the default Param domain (#1266) - Solver Interfaces - Update CPLEX direct interface to support CPLEX 12.10 (#1276) - Shorten GAMS ShortNameLabeler symbols (#1338) @@ -863,7 +1117,7 @@ Pyomo 5.6.9 18 Mar 2020 - Fix Benders MPI logic bug and expand parallel test coverage (#1278) ------------------------------------------------------------------------------- -Pyomo 5.6.8 13 Dec 2019 +Pyomo 5.6.8 (13 Dec 2019) ------------------------------------------------------------------------------- - General @@ -896,7 +1150,7 @@ Pyomo 5.6.8 13 Dec 2019 - Add test skipping to trust region tests requiring IPOPT (#1220) ------------------------------------------------------------------------------- -Pyomo 5.6.7 7 Nov 2019 +Pyomo 5.6.7 (7 Nov 2019) ------------------------------------------------------------------------------- - General @@ -942,7 +1196,7 @@ Pyomo 5.6.7 7 Nov 2019 - Add documentation for user interface to LinearExpression (#1120) ------------------------------------------------------------------------------- -Pyomo 5.6.6 21 Jun 2019 +Pyomo 5.6.6 (21 Jun 2019) ------------------------------------------------------------------------------- - Core @@ -959,7 +1213,7 @@ Pyomo 5.6.6 21 Jun 2019 support for AbstractModels (#955, #1054, #1066) ------------------------------------------------------------------------------- -Pyomo 5.6.5 10 Jun 2019 +Pyomo 5.6.5 (10 Jun 2019) ------------------------------------------------------------------------------- - General @@ -972,14 +1226,14 @@ Pyomo 5.6.5 10 Jun 2019 - Benders cut generator component (#1028) ------------------------------------------------------------------------------- -Pyomo 5.6.4 24 May 2019 +Pyomo 5.6.4 (24 May 2019) ------------------------------------------------------------------------------- - General - Resolve project description rendering on PyPI ------------------------------------------------------------------------------- -Pyomo 5.6.3 24 May 2019 +Pyomo 5.6.3 (24 May 2019) ------------------------------------------------------------------------------- - General @@ -1005,7 +1259,7 @@ Pyomo 5.6.3 24 May 2019 - GDPbb improvements and cleanup (#982) ------------------------------------------------------------------------------- -Pyomo 5.6.2 1 May 2019 +Pyomo 5.6.2 (1 May 2019) ------------------------------------------------------------------------------- - General @@ -1062,7 +1316,7 @@ Pyomo 5.6.2 1 May 2019 - Network updates - Fix sequential decomposition when ports contain References (#975) - Contributed Packages - - ParmEst updates to make API more flexible, updated examples, documentation + - Parmest updates to make API more flexible, updated examples, documentation and tests (#814) - GDPopt algorithm enhancements, cut generation bug fix, add example to tests, time limit option support (#805, #826, #852, #970) @@ -1090,7 +1344,7 @@ Pyomo 5.6.2 1 May 2019 - Show how to activate/deactivate constraints (#932) ------------------------------------------------------------------------------- -Pyomo 5.6.1 18 Jan 2019 +Pyomo 5.6.1 (18 Jan 2019) ------------------------------------------------------------------------------- - General @@ -1106,7 +1360,7 @@ Pyomo 5.6.1 18 Jan 2019 - Add support for RangeSet in GDP transformations (#803) ------------------------------------------------------------------------------- -Pyomo 5.6 19 Dec 2018 +Pyomo 5.6 (19 Dec 2018) ------------------------------------------------------------------------------- - General @@ -1189,7 +1443,7 @@ Pyomo 5.6 19 Dec 2018 - PySP updates - Python 3.7 support (#463) - Fix bugs in finance example (#564, #578) - - Added a wrapper for PySP to create a scripting interace (#689, #727, #737) + - Added a wrapper for PySP to create a scripting interface (#689, #727, #737) - Bug fixes (#736, #788) - New packages: - DataPortal: @@ -1206,7 +1460,7 @@ Pyomo 5.6 19 Dec 2018 - New packages: - Preprocessing transformation for variable aggregation (#533, #617) - Compute disjunctive variable bounds (#481) - - ParmEst package for parameter estimation (#706, #733, #769, #781) + - Parmest package for parameter estimation (#706, #733, #769, #781) - PyNumero package for numerical optimization (#725, #775) - sensitivity_toolbox for interfacing with sIPOPT (#766) - PETSc AMPL wrapper (#774) @@ -1231,14 +1485,14 @@ Pyomo 5.6 19 Dec 2018 - Update examples (#436) ------------------------------------------------------------------------------- -Pyomo 5.5.1 26 Oct 2018 +Pyomo 5.5.1 (26 Oct 2018) ------------------------------------------------------------------------------- - General - Adding support for Python 3.7 ------------------------------------------------------------------------------- -Pyomo 5.5 14 Apr 2018 +Pyomo 5.5 (14 Apr 2018) ------------------------------------------------------------------------------- - Move preprocessing transformations to contrib (#426) @@ -1273,25 +1527,25 @@ Pyomo 5.5 14 Apr 2018 - Documentation updates (#425) ------------------------------------------------------------------------------- -Pyomo 5.4.3 2 Mar 2018 +Pyomo 5.4.3 (2 Mar 2018) ------------------------------------------------------------------------------- - Another fix in the release process. ------------------------------------------------------------------------------- -Pyomo 5.4.2 2 Mar 2018 +Pyomo 5.4.2 (2 Mar 2018) ------------------------------------------------------------------------------- - Misc fix in the release process. ------------------------------------------------------------------------------- -Pyomo 5.4.1 28 Feb 2018 +Pyomo 5.4.1 (28 Feb 2018) ------------------------------------------------------------------------------- - Misc version increment to support pypi idiosyncrasies. ------------------------------------------------------------------------------- -Pyomo 5.4 28 Feb 2018 +Pyomo 5.4 (28 Feb 2018) ------------------------------------------------------------------------------- ======= @@ -1337,7 +1591,7 @@ Pyomo 5.4 28 Feb 2018 - Logging overhaul and support for timing concrete models (#245) ------------------------------------------------------------------------------- -Pyomo 5.3 21 Oct 2017 +Pyomo 5.3 (21 Oct 2017) ------------------------------------------------------------------------------- - Removed testing for Python 3.4 @@ -1361,7 +1615,7 @@ Pyomo 5.3 21 Oct 2017 - Tracking changes in pyutilib.th ------------------------------------------------------------------------------- -Pyomo 5.2 14 May 2017 +Pyomo 5.2 (14 May 2017) ------------------------------------------------------------------------------- - Resolved timeout issues running NEOS solvers @@ -1406,13 +1660,13 @@ Pyomo 5.2 14 May 2017 - Updating the bilinear transform to avoid creating a Set `index`. ------------------------------------------------------------------------------- -Pyomo 5.1.1 8 Jan 2017 +Pyomo 5.1.1 (8 Jan 2017) ------------------------------------------------------------------------------- - Monkeypatch to resolve (#95) ------------------------------------------------------------------------------- -Pyomo 5.1 4 Jan 2017 +Pyomo 5.1 (4 Jan 2017) ------------------------------------------------------------------------------- - Added a CONOPT plugin to handle a custom SOL file output (#88) @@ -1436,13 +1690,13 @@ Pyomo 5.1 4 Jan 2017 - Removed support for OpenOpt ------------------------------------------------------------------------------- -Pyomo 5.0.1 16 Nov 2016 +Pyomo 5.0.1 (16 Nov 2016) ------------------------------------------------------------------------------- - Updating PyUtilib dependency ------------------------------------------------------------------------------- -Pyomo 5.0 15 Nov 2016 +Pyomo 5.0 (15 Nov 2016) ------------------------------------------------------------------------------- - Added examples used in the Pyomo book to the Pyomo software repos @@ -2107,7 +2361,7 @@ Pyomo 2.4.3199 - ASL solver interface can now be specified with the form --solver=asl:PICO -- Usability enchancements +- Usability enhancements - Numerous bug fixes. - Updated messages to provide clearer indication of modeling errors @@ -2330,7 +2584,7 @@ Pyomo 1.1 indexed by one or more sets. - A revision to Pyomo semantics. Now, expressions are not evaluated - when performing arithemetic operations (plus, times, etc). + when performing arithmetic operations (plus, times, etc). - A major rework of how component attributes are managed for NumericValue objects and subclasses of this class. This was driven @@ -2390,11 +2644,11 @@ Pyomo 1.1 the validation function: we want to allow the validation function to refer to the value as if it were set. - - Depricating the use of the expression factory for algebraic expression + - Deprecating the use of the expression factory for algebraic expression types. These are now launched directly from the generate_expression() function. - - Adding support for specifing options when launching solvers. For example: + - Adding support for specifying options when launching solvers. For example: results = self.pico.solve(currdir+"bell3a.mps", options="maxCPUMinutes=0.1") diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6c7b8fdb3a1..5907c0d36ff 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,8 @@ Contributing to Pyomo Online Documentation -------------------- -Detailed contribution guidelines may be found in our [online documentation](http://pyomo.readthedocs.io/en/latest/contribution_guide.html). +Detailed contribution guidelines may be found in our +[online documentation](http://pyomo.readthedocs.io/en/latest/contribution_guide.html). Pull Requests ------------- diff --git a/LICENSE.txt b/LICENSE.md similarity index 99% rename from LICENSE.txt rename to LICENSE.md index 103764561df..192d315e4b5 100644 --- a/LICENSE.txt +++ b/LICENSE.md @@ -1,3 +1,6 @@ +LICENSE +======= + Copyright (c) 2008-2022 National Technology and Engineering Solutions of Sandia, LLC . Under the terms of Contract DE-NA0003525 with National Technology and Engineering Solutions of Sandia, LLC , the U.S. diff --git a/MANIFEST.in b/MANIFEST.in index 3e677d0574e..c28ab72d11a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1,2 @@ include README.md -include LICENSE.txt +include LICENSE.md diff --git a/README.md b/README.md index 655523d8741..b35ac20ed98 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,8 @@ [![Github Actions Status](https://github.com/Pyomo/pyomo/workflows/GitHub%20CI/badge.svg?event=push)](https://github.com/Pyomo/pyomo/actions?query=event%3Apush+workflow%3A%22GitHub+CI%22) -[![Jenkins Status](https://img.shields.io/jenkins/s/https/software.sandia.gov/downloads/pub/pyomo/jenkins/Pyomo_trunk.svg?logo=jenkins&logoColor=white)](https://jenkins-srn.sandia.gov/job/Pyomo_trunk) +[![Jenkins Status](https://github.com/Pyomo/jenkins-status/blob/main/pyomo_main.svg)](https://pyomo-jenkins.sandia.gov/) [![codecov](https://codecov.io/gh/Pyomo/pyomo/branch/main/graph/badge.svg)](https://codecov.io/gh/Pyomo/pyomo) [![Documentation Status](https://readthedocs.org/projects/pyomo/badge/?version=latest)](http://pyomo.readthedocs.org/en/latest/) +[![Build services](https://github.com/Pyomo/jenkins-status/blob/main/pyomo_services.svg)](https://pyomo-jenkins.sandia.gov/) [![GitHub contributors](https://img.shields.io/github/contributors/pyomo/pyomo.svg)](https://github.com/pyomo/pyomo/graphs/contributors) [![Merged PRs](https://img.shields.io/github/issues-pr-closed-raw/pyomo/pyomo.svg?label=merged+PRs)](https://github.com/pyomo/pyomo/pulls?q=is:pr+is:merged) @@ -26,6 +27,7 @@ including: - Generalized disjunctive programming - Differential algebraic equations - Mathematical programming with equilibrium constraints + - Constraint programming Pyomo supports analysis and scripting within a full-featured programming language. Further, Pyomo has also proven an effective framework for @@ -44,13 +46,19 @@ subproblems using Python parallel communication libraries. Pyomo was formerly released as the Coopr software library. -Pyomo is available under the BSD License, see the LICENSE.txt file. +Pyomo is available under the BSD License - see the +[`LICENSE.md`](https://github.com/Pyomo/pyomo/blob/main/LICENSE.md) file. Pyomo is currently tested with the following Python implementations: -* CPython: 3.7, 3.8, 3.9, 3.10 +* CPython: 3.7, 3.8, 3.9, 3.10, 3.11 * PyPy: 3.7, 3.8, 3.9 +_Testing and support policy_: + +At the time of the first Pyomo release after the end-of-life of a minor Python +version, we will remove testing for that Python version. + ### Installation #### PyPI [![PyPI version](https://img.shields.io/pypi/v/pyomo.svg?maxAge=3600)](https://pypi.org/project/Pyomo/) [![PyPI downloads](https://img.shields.io/pypi/dm/pyomo.svg?maxAge=21600)](https://pypistats.org/packages/pyomo) @@ -77,7 +85,11 @@ To get help from the Pyomo community ask a question on one of the following: Pyomo development moved to this repository in June, 2016 from Sandia National Laboratories. Developer discussions are hosted by -[google groups](https://groups.google.com/forum/#!forum/pyomo-developers). +[Google Groups](https://groups.google.com/forum/#!forum/pyomo-developers). + +The Pyomo Development team holds weekly coordination meetings on +Tuesdays 12:30 - 14:00 (MT). Please contact wg-pyomo@sandia.gov to +request call-in information. By contributing to this software project, you are agreeing to the following terms and conditions for your contributions: diff --git a/RELEASE.txt b/RELEASE.md similarity index 52% rename from RELEASE.txt rename to RELEASE.md index 10def0707e9..53de39654c9 100644 --- a/RELEASE.txt +++ b/RELEASE.md @@ -1,4 +1,4 @@ -We are pleased to announce the release of Pyomo 6.4.2. +We are pleased to announce the release of Pyomo 6.6.1. Pyomo is a collection of Python software packages that supports a diverse set of optimization capabilities for formulating and analyzing @@ -9,16 +9,29 @@ The following are highlights of the 6.0 release series: - Improved stability and robustness of core Pyomo code and solver interfaces - Integration of Boolean variables into GDP - Integration of NumPy support into the Pyomo expression system - - Added support for Python 3.10 + - Implemented a more performant and robust expression generation system + - Implemented a more performant NL file writer (NLv2) + - Implemented a more performant LP file writer (LPv2) + - Applied [PEP8 standards](https://peps.python.org/pep-0008/) throughout the + codebase + - Added support for Python 3.10, 3.11 - Removed support for Python 3.6 + - Removed the `pyomo check` command - New packages: - APPSI (Auto-Persistent Pyomo Solver Interfaces) + - CP (Constraint programming models and solver interfaces) + - DoE (Model based design of experiments) - External grey box models + - IIS (Standard interface to solver IIS capabilities) + - MPC (Data structures/utils for rolling horizon dynamic optimization) + - piecewise (Modeling with and reformulating multivariate piecewise linear + functions) - PyROS (Pyomo Robust Optimization Solver) - Structural model analysis - Rewrite of the TrustRegion Solver -A full list of updates and changes is available in the CHANGELOG.txt +A full list of updates and changes is available in the +[`CHANGELOG.md`](https://github.com/Pyomo/pyomo/blob/main/CHANGELOG.md). Enjoy! @@ -27,7 +40,6 @@ Enjoy! - https://www.pyomo.org ------------ About Pyomo ----------- @@ -35,11 +47,15 @@ The Pyomo home page provides resources for Pyomo users: * https://www.pyomo.org +Detailed documentation is hosted on Read the Docs: + + * https://pyomo.readthedocs.io/en/stable/ + Pyomo development is hosted at GitHub: * https://github.com/Pyomo -Get help at +Get help at: * StackOverflow: https://stackoverflow.com/questions/tagged/pyomo * Pyomo Forum: https://groups.google.com/group/pyomo-forum/ diff --git a/conftest.py b/conftest.py index 70a3ccebec5..df5b0f31e59 100644 --- a/conftest.py +++ b/conftest.py @@ -11,6 +11,7 @@ import pytest + def pytest_runtest_setup(item): """ This method overrides pytest's default behavior for marked tests. @@ -45,8 +46,9 @@ def pytest_runtest_setup(item): elif markeroption: return elif item_markers: - if (not set(implicit_markers).issubset(item_markers) - and not item_markers.issubset(set(extended_implicit_markers))): + if not set(implicit_markers).issubset( + item_markers + ) and not item_markers.issubset(set(extended_implicit_markers)): pytest.skip('SKIPPED: Only running default, solver, and unmarked tests.') diff --git a/doc/OnlineDocs/README.md b/doc/OnlineDocs/README.md new file mode 100644 index 00000000000..a2d4e5997dc --- /dev/null +++ b/doc/OnlineDocs/README.md @@ -0,0 +1,27 @@ +Preview Changes Locally +------------------------ + +1. Install Sphinx + + ```bash + $ pip install sphinx sphinx_rtd_theme sphinx_copybutton + ``` + + **NOTE**: You may get a warning about the `dot` command if you do not have + `graphviz` installed. + +1. Build the documentation + + ```bash + $ make html # Option 1 + $ make latexpdf # Option 2 + ``` + +1. View `_build/html/index.html` in your browser + +Test Changes Locally +-------------------- + + ```bash + $ make -C doc/OnlineDocs doctest -d # from the pyomo root folder + ``` diff --git a/doc/OnlineDocs/README.txt b/doc/OnlineDocs/README.txt deleted file mode 100644 index 237dc8d3fcf..00000000000 --- a/doc/OnlineDocs/README.txt +++ /dev/null @@ -1,32 +0,0 @@ -GETTING STARTED ---------------- - -0. Install Sphinx - - pip install sphinx sphinx_rtd_theme - -1. Edit documentation - - vi *.rst - -2. Build the documentation - - make html - -or - - make latexpdf - -NOTE: If the local python is not on your path, then you may need to -invoke 'make' differently. For example, using the PyUtilib 'lbin' command: - - lbin make html - -3. Admire your work - - cd _build/html - open index.html - -4. Repeat - - GOTO STEP 1 diff --git a/doc/OnlineDocs/_static/theme_overrides.css b/doc/OnlineDocs/_static/theme_overrides.css index faadd069f12..43d48693e03 100644 --- a/doc/OnlineDocs/_static/theme_overrides.css +++ b/doc/OnlineDocs/_static/theme_overrides.css @@ -13,7 +13,7 @@ code.descname { font-weight: bold !important; color: black; } -/* method argument lists shoult *not* be bold, argument names in black */ +/* method argument lists should *not* be bold, argument names in black */ dl.py.method dt { font-weight: normal; } diff --git a/doc/OnlineDocs/advanced_topics/flattener/index.rst b/doc/OnlineDocs/advanced_topics/flattener/index.rst new file mode 100644 index 00000000000..377de5233ec --- /dev/null +++ b/doc/OnlineDocs/advanced_topics/flattener/index.rst @@ -0,0 +1,44 @@ +"Flattening" a Pyomo model +========================== + +.. autosummary:: + + pyomo.dae.flatten + +.. toctree:: + :maxdepth: 1 + + motivation.rst + reference.rst + +What does it mean to flatten a model? +------------------------------------- +When accessing components in a block-structured model, we use +``component_objects`` or ``component_data_objects`` to access all objects +of a specific ``Component`` or ``ComponentData`` type. +The generated objects may be thought of as a "flattened" representation +of the model, as they may be accessed without any knowledge of the model's +block structure. +These methods are very useful, but it is still challenging to use them +to access specific components. +Specifically, we often want to access "all components indexed by some set," +or "all component data at a particular index of this set." +In addition, we often want to generate the components in a block that +is indexed by our particular set, as these components may be thought of as +"implicitly indexed" by this set. +The ``pyomo.dae.flatten`` module aims to address this use case by providing +utilities to generate all components indexed, explicitly or implicitly, by +user-provided sets. + +**When we say "flatten a model," we mean "generate all components in the model, +preserving all user-specified indexing sets."** + +Data structures +--------------- +The components returned are either ``ComponentData`` objects, for components +not indexed by any of the provided sets, or references-to-slices, for +components indexed, explicitly or implicitly, by the provided sets. +Slices are necessary as they can encode "implicit indexing" -- where a +component is contained in an indexed block. It is natural to return references +to these slices, so they may be accessed and manipulated like any other +component. diff --git a/doc/OnlineDocs/advanced_topics/flattener/motivation.rst b/doc/OnlineDocs/advanced_topics/flattener/motivation.rst new file mode 100644 index 00000000000..046d888a215 --- /dev/null +++ b/doc/OnlineDocs/advanced_topics/flattener/motivation.rst @@ -0,0 +1,26 @@ +Motivation +========== + +The ``pyomo.dae.flatten`` module was originally developed to assist with +dynamic optimization. A very common operation in dynamic or multi-period +optimization is to initialize all time-indexed variables to their values +at a specific time point. However, for variables indexed by time and +arbitrary other indexing sets, this is difficult to do in a way that does +does not depend on the variable we are initializing. Things get worse +when we consider that a time index can exist on a parent block rather +than the component itself. + +By "reshaping" time-indexed variables in a model into references indexed +only by time, the ``flatten_dae_components`` function allows us to perform +operations that depend on knowledge of time indices without knowing +anything about the variables that we are operating on. + +This "flattened representation" of a model turns out to be useful for +dynamic optimization in a variety of other contexts. Examples include +constructing a tracking objective function and plotting results. +This representation is also useful in cases where we want to preserve +indexing along more than one set, as in PDE-constrained optimization. +The ``flatten_components_along_sets`` function allows partitioning +components while preserving multiple indexing sets. +In such a case, time and space-indexed data for a given variable is useful +for purposes such as initialization, visualization, and stability analysis. diff --git a/doc/OnlineDocs/advanced_topics/flattener/reference.rst b/doc/OnlineDocs/advanced_topics/flattener/reference.rst new file mode 100644 index 00000000000..22c7b67e1f6 --- /dev/null +++ b/doc/OnlineDocs/advanced_topics/flattener/reference.rst @@ -0,0 +1,14 @@ +API reference +============= + +.. autosummary:: + + pyomo.dae.flatten.slice_component_along_sets + pyomo.dae.flatten.flatten_components_along_sets + pyomo.dae.flatten.flatten_dae_components + +.. autofunction:: pyomo.dae.flatten.slice_component_along_sets + +.. autofunction:: pyomo.dae.flatten.flatten_components_along_sets + +.. autofunction:: pyomo.dae.flatten.flatten_dae_components diff --git a/doc/OnlineDocs/advanced_topics/index.rst b/doc/OnlineDocs/advanced_topics/index.rst index e727dfc0d67..d5293bfa40c 100644 --- a/doc/OnlineDocs/advanced_topics/index.rst +++ b/doc/OnlineDocs/advanced_topics/index.rst @@ -7,3 +7,5 @@ Advanced Topics persistent_solvers.rst units_container.rst linearexpression.rst + flattener/index.rst + sos_constraints.rst diff --git a/doc/OnlineDocs/advanced_topics/linearexpression.rst b/doc/OnlineDocs/advanced_topics/linearexpression.rst index abadc7869da..a320b66590f 100644 --- a/doc/OnlineDocs/advanced_topics/linearexpression.rst +++ b/doc/OnlineDocs/advanced_topics/linearexpression.rst @@ -39,4 +39,4 @@ syntax. This example creates two constraints that are the same: .. warning:: The lists that are passed to ``LinearModel`` are not copied, so caution must - be excercised if they are modified after the component is constructed. + be exercised if they are modified after the component is constructed. diff --git a/doc/OnlineDocs/advanced_topics/sos_constraints.rst b/doc/OnlineDocs/advanced_topics/sos_constraints.rst new file mode 100644 index 00000000000..b536b3f0b26 --- /dev/null +++ b/doc/OnlineDocs/advanced_topics/sos_constraints.rst @@ -0,0 +1,288 @@ +Special Ordered Sets (SOS) +========================== + +Pyomo allows users to declare special ordered sets (SOS) within their problems. +These are sets of variables among which only a certain number of variables can +be non-zero, and those that are must be adjacent according to a given order. + +Special ordered sets of types 1 (SOS1) and 2 (SOS2) are the classic ones, but +the concept can be generalised: a SOS of type N cannot have more than N of its +members taking non-zero values, and those that do must be adjacent in the set. +These can be useful for modelling and computational performance purposes. + +By explicitly declaring these, users can keep their formulations and respective +solving times shorter than they would otherwise, since the logical constraints +that enforce the SOS do not need to be implemented within the model and are +instead (ideally) handled algorithmically by the solver. + +Special ordered sets can be declared one by one or indexed via other sets. + +Non-indexed Special Ordered Sets +-------------------------------- + +A single SOS of type **N** involving all members of a pyomo Var component can +be declared in one line: + +.. currentmodule:: pyomo.environ + +.. testcode:: + + # import pyomo + import pyomo.environ as pyo + # declare the model + model = pyo.AbstractModel() + # the type of SOS + N = 1 # or 2, 3, ... + # the set that indexes the variables + model.A = pyo.Set() + # the variables under consideration + model.x = pyo.Var(model.A) + # the sos constraint + model.mysos = pyo.SOSConstraint(var=model.x, sos=N) + +In the example above, the weight of each variable is determined automatically +based on their position/order in the pyomo Var component (``model.x``). + +Alternatively, the weights can be specified through a pyomo Param component +(``model.mysosweights``) indexed by the set also indexing the variables +(``model.A``): + +.. doctest:: + :hide: + + >>> model = pyo.AbstractModel() + +.. testcode:: + + # the set that indexes the variables + model.A = pyo.Set() + # the variables under consideration + model.x = pyo.Var(model.A) + # the weights for each variable used in the sos constraints + model.mysosweights = pyo.Param(model.A) + # the sos constraint + model.mysos = pyo.SOSConstraint( + var=model.x, + sos=N, + weights=model.mysosweights + ) + +Indexed Special Ordered Sets +---------------------------- + +Multiple SOS of type **N** involving members of a pyomo Var component +(``model.x``) can be created using two additional sets (``model.A`` and +``model.mysosvarindexset``): + +.. doctest:: + :hide: + + >>> model = pyo.AbstractModel() + +.. testcode:: + + # the set that indexes the variables + model.A = pyo.Set() + # the variables under consideration + model.x = pyo.Var(model.A) + # the set indexing the sos constraints + model.B = pyo.Set() + # the sets containing the variable indexes for each constraint + model.mysosvarindexset = pyo.Set(model.B) + # the sos constraints + model.mysos = pyo.SOSConstraint( + model.B, + var=model.x, + sos=N, + index=model.mysosvarindexset + ) + +In the example above, the weights are determined automatically from the +position of the variables. Alternatively, they can be specified through a pyomo +Param component (``model.mysosweights``) and an additional set (``model.C``): + +.. doctest:: + :hide: + + >>> model = pyo.AbstractModel() + +.. testcode:: + + # the set that indexes the variables + model.A = pyo.Set() + # the variables under consideration + model.x = pyo.Var(model.A) + # the set indexing the sos constraints + model.B = pyo.Set() + # the sets containing the variable indexes for each constraint + model.mysosvarindexset = pyo.Set(model.B) + # the set that indexes the variables used in the sos constraints + model.C = pyo.Set(within=model.A) + # the weights for each variable used in the sos constraints + model.mysosweights = pyo.Param(model.C) + # the sos constraints + model.mysos = pyo.SOSConstraint( + model.B, + var=model.x, + sos=N, + index=model.mysosvarindexset, + weights=model.mysosweights, + ) + +Declaring Special Ordered Sets using rules +------------------------------------------ + +Arguably the best way to declare an SOS is through rules. This option allows +users to specify the variables and weights through a method provided via the +``rule`` parameter. If this parameter is used, users must specify a method that +returns one of the following options: + +- a list of the variables in the SOS, whose respective weights are then determined based on their position; + +- a tuple of two lists, the first for the variables in the SOS and the second for the respective weights; + +- or, pyomo.environ.SOSConstraint.Skip, if the SOS is not to be declared. + +If one is content on having the weights determined based on the position of the +variables, then the following example using the ``rule`` parameter is sufficient: + +.. doctest:: + :hide: + + >>> model = pyo.AbstractModel() + +.. testcode:: + + # the set that indexes the variables + model.A = pyo.Set() + # the variables under consideration + model.x = pyo.Var(model.A, domain=pyo.NonNegativeReals) + # the rule method creating the constraint + def rule_mysos(m): + return [m.x[a] for a in m.x] + # the sos constraint(s) + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=N) + + +If the weights must be determined in some other way, then the following example +illustrates how they can be specified for each member of the SOS using the ``rule`` parameter: + +.. doctest:: + :hide: + + >>> model = pyo.AbstractModel() + +.. testcode:: + + # the set that indexes the variables + model.A = pyo.Set() + # the variables under consideration + model.x = pyo.Var(model.A, domain=pyo.NonNegativeReals) + # the rule method creating the constraint + def rule_mysos(m): + var_list = [m.x[a] for a in m.x] + weight_list = [i+1 for i in range(len(var_list))] + return (var_list, weight_list) + # the sos constraint(s) + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=N) + +The ``rule`` parameter also allows users to create SOS comprising variables +from different pyomo Var components, as shown below: + +.. doctest:: + :hide: + + >>> model = pyo.AbstractModel() + +.. testcode:: + + # the set that indexes the x variables + model.A = pyo.Set() + # the set that indexes the y variables + model.B = pyo.Set() + # the set that indexes the SOS constraints + model.C = pyo.Set() + # the x variables, which will be used in the constraints + model.x = pyo.Var(model.A, domain=pyo.NonNegativeReals) + # the y variables, which will be used in the constraints + model.y = pyo.Var(model.B, domain=pyo.NonNegativeReals) + # the x variable indices for each constraint + model.mysosindex_x = pyo.Set(model.C) + # the y variable indices for each constraint + model.mysosindex_y = pyo.Set(model.C) + # the weights for the x variable indices + model.mysosweights_x = pyo.Param(model.A) + # the weights for the y variable indices + model.mysosweights_y = pyo.Param(model.B) + # the rule method with which each constraint c is built + def rule_mysos(m, c): + var_list = [m.x[a] for a in m.mysosindex_x[c]] + var_list.extend([m.y[b] for b in m.mysosindex_y[c]]) + weight_list = [m.mysosweights_x[a] for a in m.mysosindex_x[c]] + weight_list.extend([m.mysosweights_y[b] for b in m.mysosindex_y[c]]) + return (var_list, weight_list) + # the sos constraint(s) + model.mysos = pyo.SOSConstraint( + model.C, + rule=rule_mysos, + sos=N + ) + +Compatible solvers +------------------ + +Not all LP/MILP solvers are compatible with SOS declarations and Pyomo might +not be ready to interact with all those that are. The following is a list of +solvers known to be compatible with special ordered sets through Pyomo: + +- CBC +- SCIP +- Gurobi +- CPLEX + +Please note that declaring an SOS is no guarantee that a solver will use it as +such in the end. Some solvers, namely Gurobi and CPLEX, might reformulate +problems with explicit SOS declarations, if they perceive that to be useful. + +Full example with non-indexed SOS constraint +-------------------------------------------- + +.. doctest:: + :hide: + + >>> model = pyo.AbstractModel() + +.. testcode:: + + import pyomo.environ as pyo + from pyomo.opt import check_available_solvers + from math import isclose + N = 1 + model = pyo.ConcreteModel() + model.x = pyo.Var([1], domain=pyo.NonNegativeReals, bounds=(0,40)) + model.A = pyo.Set(initialize=[1,2,4,6]) + model.y = pyo.Var(model.A, domain=pyo.NonNegativeReals, bounds=(0,2)) + model.OBJ = pyo.Objective( + expr=(1*model.x[1]+ + 2*model.y[1]+ + 3*model.y[2]+ + -0.1*model.y[4]+ + 0.5*model.y[6]) + ) + model.ConstraintYmin = pyo.Constraint( + expr = (model.x[1]+ + model.y[1]+ + model.y[2]+ + model.y[6] >= 0.25 + ) + ) + model.mysos = pyo.SOSConstraint( + var=model.y, + sos=N + ) + solver_name = 'scip' + solver_available = bool(check_available_solvers(solver_name)) + if solver_available: + opt = pyo.SolverFactory(solver_name) + opt.solve(model, tee=False) + assert isclose(pyo.value(model.OBJ), 0.05, abs_tol=1e-3) diff --git a/doc/OnlineDocs/bibliography.rst b/doc/OnlineDocs/bibliography.rst index e4e76f66adb..6cbb96d3bfb 100644 --- a/doc/OnlineDocs/bibliography.rst +++ b/doc/OnlineDocs/bibliography.rst @@ -9,7 +9,7 @@ Bibliography .. [GAMS] http://www.gams.com -.. [GRCSPaper] Isenberg, NM, Akula, P, Eslick, JC, Bhattacharyya, D, +.. [Isenberg_et_al] Isenberg, NM, Akula, P, Eslick, JC, Bhattacharyya, D, Miller, DC, Gounaris, CE. A generalized cuttingâ€set approach for nonlinear robust optimization in process systems engineering. AIChE J. 2021; 67:e17175. DOI `10.1002/aic.17175 @@ -34,6 +34,12 @@ Bibliography 2nd Edition. Springer Optimization and Its Applications, Vol 67. Springer, 2017. +.. [PyomoBookIII] Bynum, Michael L., Gabriel A. Hackebeil, + William E. Hart, Carl D. Laird, Bethany L. Nicholson, + John D. Siirola, Jean-Paul Watson, and David L. Woodruff. + Pyomo - Optimization Modeling in Python, 3rd Edition. + Vol. 67. Springer, 2021. + .. [PyomoJournal] William E. Hart, Jean-Paul Watson, David L. Woodruff. "Pyomo: modeling and solving mathematical programs in Python," Mathematical Programming Computation, Volume diff --git a/doc/OnlineDocs/citing_pyomo.rst b/doc/OnlineDocs/citing_pyomo.rst index 8064e56770b..458a1fe6ab7 100644 --- a/doc/OnlineDocs/citing_pyomo.rst +++ b/doc/OnlineDocs/citing_pyomo.rst @@ -4,9 +4,10 @@ Citing Pyomo Pyomo ----- +Bynum, Michael L., Gabriel A. Hackebeil, William E. Hart, Carl D. Laird, Bethany L. Nicholson, John D. Siirola, Jean-Paul Watson, and David L. Woodruff. Pyomo - Optimization Modeling in Python, 3rd Edition. Springer, 2021. + Hart, William E., Jean-Paul Watson, and David L. Woodruff. "Pyomo: modeling and solving mathematical programs in Python." Mathematical Programming Computation 3, no. 3 (2011): 219-260. -Hart, William E., Carl Laird, Jean-Paul Watson, David L. Woodruff, Gabriel A. Hackebeil, Bethany L. Nicholson, and John D. Siirola. Pyomo – Optimization Modeling in Python. Springer, 2017. PySP ---- diff --git a/doc/OnlineDocs/conf.py b/doc/OnlineDocs/conf.py index 03da0c471e4..d8939cf61dd 100644 --- a/doc/OnlineDocs/conf.py +++ b/doc/OnlineDocs/conf.py @@ -30,9 +30,11 @@ try: print("Regenerating SPY files...") from strip_examples import generate_spy_files + generate_spy_files(os.path.abspath('tests')) - generate_spy_files(os.path.abspath(os.path.join( - 'library_reference','kernel','examples'))) + generate_spy_files( + os.path.abspath(os.path.join('library_reference', 'kernel', 'examples')) + ) finally: sys.path.pop(0) @@ -68,12 +70,13 @@ 'sphinx.ext.inheritance_diagram', 'sphinx.ext.autosummary', 'sphinx.ext.doctest', + 'sphinx.ext.todo', 'sphinx_copybutton', #'sphinx.ext.githubpages', ] viewcode_follow_imported_members = True -#napoleon_include_private_with_doc = True +# napoleon_include_private_with_doc = True copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: " copybutton_prompt_is_regexp = True @@ -92,7 +95,7 @@ # General information about the project. project = u'Pyomo' -copyright = u'2017, Sandia National Laboratories' +copyright = u'2008-2023, Sandia National Laboratories' author = u'Pyomo Developers' # The version info for the project you're documenting, acts as replacement for @@ -101,6 +104,7 @@ # # The short X.Y version. import pyomo.version + version = pyomo.version.__version__ # The full version, including alpha/beta/rc tags. release = pyomo.version.__version__ @@ -121,7 +125,7 @@ pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False +todo_include_todos = True # If true, doctest flags (comments looking like # doctest: FLAG, ...) at # the ends of lines and markers are removed for all code @@ -137,20 +141,14 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -#html_theme = 'alabaster' +# html_theme = 'alabaster' on_rtd = os.environ.get('READTHEDOCS', None) == 'True' html_theme = 'sphinx_rtd_theme' -# Force HTML4: If we don't explicitly force HTML4, then the background -# of the Parameters/Returns/Return type headers is shaded the same as the -# method prototype (tested 15 April 21 with Sphinx=3.5.4 and -# sphinx-rtd-theme=0.5.2). -html4_writer = True -#html5_writer = True - if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme + html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme @@ -163,9 +161,7 @@ # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] -html_css_files = [ - 'theme_overrides.css', -] +html_css_files = ['theme_overrides.css'] html_favicon = "../logos/pyomo/favicon.ico" @@ -182,15 +178,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -199,20 +192,14 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'pyomo.tex', 'Pyomo Documentation', - 'Pyomo', 'manual'), -] +latex_documents = [(master_doc, 'pyomo.tex', 'Pyomo Documentation', 'Pyomo', 'manual')] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'pyomo', 'Pyomo Documentation', - [author], 1) -] +man_pages = [(master_doc, 'pyomo', 'Pyomo Documentation', [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -221,27 +208,41 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'pyomo', 'Pyomo Documentation', - author, 'Pyomo', 'One line description of project.', - 'Miscellaneous'), + ( + master_doc, + 'pyomo', + 'Pyomo Documentation', + author, + 'Pyomo', + 'One line description of project.', + 'Miscellaneous', + ) ] -#autodoc_member_order = 'bysource' -#autodoc_member_order = 'groupwise' +# autodoc_member_order = 'bysource' +# autodoc_member_order = 'groupwise' # -- Check which conditional dependencies are available ------------------ # Used for skipping certain doctests from sphinx.ext.doctest import doctest + doctest_default_flags = ( - doctest.ELLIPSIS + doctest.NORMALIZE_WHITESPACE + - doctest.IGNORE_EXCEPTION_DETAIL + doctest.DONT_ACCEPT_TRUE_FOR_1 + doctest.ELLIPSIS + + doctest.NORMALIZE_WHITESPACE + + doctest.IGNORE_EXCEPTION_DETAIL + + doctest.DONT_ACCEPT_TRUE_FOR_1 ) + + class IgnoreResultOutputChecker(doctest.OutputChecker): IGNORE_RESULT = doctest.register_optionflag('IGNORE_RESULT') + def check_output(self, want, got, optionflags): if optionflags & self.IGNORE_RESULT: return True return super().check_output(want, got, optionflags) + + doctest.OutputChecker = IgnoreResultOutputChecker doctest_global_setup = ''' @@ -265,7 +266,7 @@ def check_output(self, want, got, optionflags): import pyomo.opt as _opt # Not using SolverFactory to check solver availability because -# as of June 2020 there is no way to supress warnings when +# as of June 2020 there is no way to suppress warnings when # solvers are not available ipopt_available = bool(_opt.check_available_solvers('ipopt')) sipopt_available = bool(_opt.check_available_solvers('ipopt_sens')) diff --git a/doc/OnlineDocs/contributed_packages/community.rst b/doc/OnlineDocs/contributed_packages/community.rst index 08d3bb36a3c..b110107e604 100644 --- a/doc/OnlineDocs/contributed_packages/community.rst +++ b/doc/OnlineDocs/contributed_packages/community.rst @@ -101,9 +101,10 @@ We'll first use a model from `Allman et al, 2019`_ : .. _Allman et al, 2019: https://doi.org/10.1007/s11081-019-09450-5 .. doctest:: + :skipif: not networkx_available Required Imports - >>> from pyomo.contrib.community_detection.detection import detect_communities, detect_communities, CommunityMap, generate_model_graph + >>> from pyomo.contrib.community_detection.detection import detect_communities, CommunityMap, generate_model_graph >>> from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet >>> from pyomo.core import ConcreteModel, Var, Constraint >>> import networkx as nx @@ -143,26 +144,33 @@ have unintended consequences): ``new_community_map = copy.deepcopy(community_map Let's take a closer look at the actual community map object generated by `detect_communities`: .. doctest:: + :skipif: not networkx_available :hide: - >>> if community_map_object[0] == (['c3', 'c4', 'c5'], ['x3', 'x4']): - ... community_map_object[0], community_map_object[1] = community_map_object[1], community_map_object[0] + >>> from pyomo.common.formatting import tostr + >>> if tostr(community_map_object[0]) == "([c3, c4, c5], [x3, x4])": + ... _ = community_map_object.community_map + ... _[0], _[1] = _[1], _[0] .. doctest:: + :skipif: not networkx_available - >>> print(community_map_object) #doctest:+SKIP + >>> print(community_map_object) {0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])} Printing a community map object is made to be user-friendly (by showing the community map with components replaced by their strings). However, if the default Pyomo representation of components is desired, then the -community_map attribute or the repr() function can be used: +community_map attribute or the `repr()` function can be used: .. doctest:: + :skipif: not networkx_available - >>> print(community_map_object.community_map) # or print(repr(community_map_object)) # doctest: +SKIP - {0: ([, ], [, ]), 1: ([, , ], [, ])} + >>> print(community_map_object.community_map) + {0: ([, ], [, ]), 1: ([, , ], [, ])} + >>> print(repr(community_map_object)) + {0: ([, ], [, ]), 1: ([, , ], [, ])} `generate_structured_model` method of CommunityMap objects It may be useful to create a new model based on the communities found in the model - we can use the @@ -171,6 +179,7 @@ community_map attribute or the repr() function can be used: take a look at the example below: .. doctest:: + :skipif: not networkx_available Use the CommunityMap object made from the first code example >>> structured_model = community_map_object.generate_structured_model() # doctest: +SKIP @@ -262,7 +271,7 @@ community_map attribute or the repr() function can be used: so. Let's take a look at how this can be done in the following example: .. doctest:: - :skipif: not matplotlib_available + :skipif: not matplotlib_available or not networkx_available Create a CommunityMap object (so we can demonstrate the visualize_model_graph method) >>> community_map_object = cmo = detect_communities(model, type_of_community_map='bipartite', random_seed=seed) @@ -295,7 +304,7 @@ An example of the two separate graphs created for these two function calls is sh .. _Duran & Grossmann, 1986: https://dx.doi.org/10.1007/BF02592064 .. doctest:: - :skipif: not matplotlib_available + :skipif: not matplotlib_available or not networkx_available Define the model >>> model = EightProcessFlowsheet() @@ -337,6 +346,7 @@ We can see an example for the three separate graphs created by these three funct For this example, we will only need the NetworkX graph of the model and the number-to-component mapping. .. doctest:: + :skipif: not networkx_available Define the model >>> model = decode_model_1() diff --git a/doc/OnlineDocs/contributed_packages/doe/CCSI-license.txt b/doc/OnlineDocs/contributed_packages/doe/CCSI-license.txt new file mode 100644 index 00000000000..4b0dadd9e06 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/doe/CCSI-license.txt @@ -0,0 +1,43 @@ +# Pyomo.DoE was originally developed as part of the Carbon Capture Simulation for Industry +# Impact (CCSI2) project under the following license: +# +# *** License Agreement *** +# +# Pyomo.DoE Copyright (c) 2022, by the software owners: TRIAD National Security, LLC., Lawrence +# Livermore National Security, LLC., Lawrence Berkeley National Laboratory, +# Pacific Northwest National Laboratory, Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, University of Toledo, +# West Virginia University, et al. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without modification, are permitted provided +# that the following conditions are met: +# (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the +# following disclaimer. +# (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and +# the following disclaimer in the documentation and/or other materials provided with the distribution. +# (3) Neither the name of the Carbon Capture Simulation for Industry Impact, +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, The University of Pittsburgh, +# U.S. Dept. of Energy nor the names of its contributors may be used to endorse or promote products +# derived from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY +# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL +# THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# +# You are under no obligation whatsoever to provide any bug fixes, patches, or upgrades to the features, +# functionality or performance of the source code ("Enhancements") to anyone; however, if you choose to +# make your Enhancements available either publicly, or directly to Lawrence Berkeley National Laboratory, +# without imposing a separate written license agreement for such Enhancements, then you hereby grant +# the following license: a non-exclusive, royalty-free perpetual license to install, use, modify, prepare +# derivative works, incorporate into other computer software, distribute, and sublicense such +# enhancements or derivative works thereof, in binary and source code form. +# +# Lead Developers: Jialu Wang and Alexander Dowling, University of Notre Dame diff --git a/doc/OnlineDocs/contributed_packages/doe/doe.rst b/doc/OnlineDocs/contributed_packages/doe/doe.rst new file mode 100644 index 00000000000..354a9916e9b --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/doe/doe.rst @@ -0,0 +1,291 @@ +Pyomo.DoE +========= + +**Pyomo.DoE** (Pyomo Design of Experiments) is a Python library for model-based design of experiments using science-based models. + +Pyomo.DoE was developed by **Jialu Wang** and **Alexander W. Dowling** at the University of Notre Dame as part of the `Carbon Capture Simulation for Industry Impact (CCSI2) `_. +project, funded through the U.S. Department Of Energy Office of Fossil Energy. + +If you use Pyomo.DoE, please cite: + +[Wang and Dowling, 2022] Wang, Jialu, and Alexander W. Dowling. +"Pyomo.DOE: An openâ€source package for modelâ€based design of experiments in Python." +AIChE Journal 68.12 (2022): e17813. `https://doi.org/10.1002/aic.17813` + +Methodology Overview +--------------------- + +Model-based Design of Experiments (MBDoE) is a technique to maximize the information gain of experiments by directly using science-based models with physically meaningful parameters. It is one key component in the model calibration and uncertainty quantification workflow shown below: + +.. figure:: flowchart.png + :scale: 25 % + + The exploratory analysis, parameter estimation, uncertainty analysis, and MBDoE are combined into an iterative framework to select, refine, and calibrate science-based mathematical models with quantified uncertainty. Currently, Pyomo.DoE focuses on increasing parameter precision. + +Pyomo.DoE provides the exploratory analysis and MBDoE capabilities to the Pyomo ecosystem. The user provides one Pyomo model, a set of parameter nominal values, +the allowable design spaces for design variables, and the assumed observation error model. +During exploratory analysis, Pyomo.DoE checks if the model parameters can be inferred from the postulated measurements or preliminary data. +MBDoE then recommends optimized experimental conditions for collecting more data. +Parameter estimation packages such as `Parmest `_ can perform parameter estimation using the available data to infer values for parameters, +and facilitate an uncertainty analysis to approximate the parameter covariance matrix. +If the parameter uncertainties are sufficiently small, the workflow terminates and returns the final model with quantified parametric uncertainty. +If not, MBDoE recommends optimized experimental conditions to generate new data. + +Below is an overview of the type of optimization models Pyomo.DoE can accommodate: + +* Pyomo.DoE is suitable for optimization models of **continuous** variables +* Pyomo.DoE can handle **equality constraints** defining state variables +* Pyomo.DoE supports (Partial) Differential-Algebraic Equations (PDAE) models via Pyomo.DAE +* Pyomo.DoE also supports models with only algebraic constraints + +The general form of a DAE problem that can be passed into Pyomo.DoE is shown below: + +.. math:: + \begin{align*} + & \dot{\mathbf{x}}(t) = \mathbf{f}(\mathbf{x}(t), \mathbf{z}(t), \mathbf{y}(t), \mathbf{u}(t), \overline{\mathbf{w}}, \boldsymbol{\theta}) \\ + & \mathbf{g}(\mathbf{x}(t), \mathbf{z}(t), \mathbf{y}(t), \mathbf{u}(t), \overline{\mathbf{w}},\boldsymbol{\theta})=\mathbf{0} \\ + & \mathbf{y} =\mathbf{h}(\mathbf{x}(t), \mathbf{z}(t), \mathbf{u}(t), \overline{\mathbf{w}},\boldsymbol{\theta}) \\ + & \mathbf{f}^{\mathbf{0}}\left(\dot{\mathbf{x}}\left(t_{0}\right), \mathbf{x}\left(t_{0}\right), \mathbf{z}(t_0), \mathbf{y}(t_0), \mathbf{u}\left(t_{0}\right), \overline{\mathbf{w}}, \boldsymbol{\theta})\right)=\mathbf{0} \\ + & \mathbf{g}^{\mathbf{0}}\left( \mathbf{x}\left(t_{0}\right),\mathbf{z}(t_0), \mathbf{y}(t_0), \mathbf{u}\left(t_{0}\right), \overline{\mathbf{w}}, \boldsymbol{\theta}\right)=\mathbf{0}\\ + &\mathbf{y}^{\mathbf{0}}\left(t_{0}\right)=\mathbf{h}\left(\mathbf{x}\left(t_{0}\right),\mathbf{z}(t_0), \mathbf{u}\left(t_{0}\right), \overline{\mathbf{w}}, \boldsymbol{\theta}\right) + \end{align*} + +where: + +* :math:`\boldsymbol{\theta} \in \mathbb{R}^{N_p}` are unknown model parameters. +* :math:`\mathbf{x} \subseteq \mathcal{X}` are dynamic state variables which characterize trajectory of the system, :math:`\mathcal{X} \in \mathbb{R}^{N_x \times N_t}`. +* :math:`\mathbf{z} \subseteq \mathcal{Z}` are algebraic state variables, :math:`\mathcal{Z} \in \mathbb{R}^{N_z \times N_t}`. +* :math:`\mathbf{u} \subseteq \mathcal{U}` are time-varying decision variables, :math:`\mathcal{U} \in \mathbb{R}^{N_u \times N_t}`. +* :math:`\overline{\mathbf{w}} \in \mathbb{R}^{N_w}` are time-invariant decision variables. +* :math:`\mathbf{y} \subseteq \mathcal{Y}` are measurement response variables, :math:`\mathcal{Y} \in \mathbb{R}^{N_r \times N_t}`. +* :math:`\mathbf{f}(\cdot)` are differential equations. +* :math:`\mathbf{g}(\cdot)` are algebraic equations. +* :math:`\mathbf{h}(\cdot)` are measurement functions. +* :math:`\mathbf{t} \in \mathbb{R}^{N_t \times 1}` is a union of all time sets. + +.. note:: + * Parameters and design variables should be defined as Pyomo ``Var`` components on the model to use ``direct_kaug`` mode, and can be defined as Pyomo ``Param`` object if not using ``direct_kaug``. + +Based on the above notation, the form of the MBDoE problem addressed in Pyomo.DoE is shown below: + +.. math:: + \begin{equation} + \begin{aligned} + \underset{\boldsymbol{\varphi}}{\max} \quad & \Psi (\mathbf{M}(\mathbf{\hat{y}}, \boldsymbol{\varphi})) \\ + \text{s.t.} \quad & \mathbf{M}(\boldsymbol{\hat{\theta}}, \boldsymbol{\varphi}) = \sum_r^{N_r} \sum_{r'}^{N_r} \tilde{\sigma}_{(r,r')}\mathbf{Q}_r^\mathbf{T} \mathbf{Q}_{r'} + \mathbf{V}^{-1}_{\boldsymbol{\theta}}(\boldsymbol{\hat{\theta}}) \\ + & \dot{\mathbf{x}}(t) = \mathbf{f}(\mathbf{x}(t), \mathbf{z}(t), \mathbf{y}(t), \mathbf{u}(t), \overline{\mathbf{w}}, \boldsymbol{\theta}) \\ + & \mathbf{g}(\mathbf{x}(t), \mathbf{z}(t), \mathbf{y}(t), \mathbf{u}(t), \overline{\mathbf{w}},\boldsymbol{\theta})=\mathbf{0} \\ + & \mathbf{y} =\mathbf{h}(\mathbf{x}(t), \mathbf{z}(t), \mathbf{u}(t), \overline{\mathbf{w}},\boldsymbol{\theta}) \\ + & \mathbf{f}^{\mathbf{0}}\left(\dot{\mathbf{x}}\left(t_{0}\right), \mathbf{x}\left(t_{0}\right), \mathbf{z}(t_0), \mathbf{y}(t_0), \mathbf{u}\left(t_{0}\right), \overline{\mathbf{w}}, \boldsymbol{\theta})\right)=\mathbf{0} \\ + & \mathbf{g}^{\mathbf{0}}\left( \mathbf{x}\left(t_{0}\right),\mathbf{z}(t_0), \mathbf{y}(t_0), \mathbf{u}\left(t_{0}\right), \overline{\mathbf{w}}, \boldsymbol{\theta}\right)=\mathbf{0}\\ + &\mathbf{y}^{\mathbf{0}}\left(t_{0}\right)=\mathbf{h}\left(\mathbf{x}\left(t_{0}\right),\mathbf{z}(t_0), \mathbf{u}\left(t_{0}\right), \overline{\mathbf{w}}, \boldsymbol{\theta}\right) + \end{aligned} + \end{equation} + +where: + +* :math:`\boldsymbol{\varphi}` are design variables, which are manipulated to maximize the information content of experiments. It should consist of one or more of :math:`\mathbf{u}(t), \mathbf{y}^{\mathbf{0}}({t_0}),\overline{\mathbf{w}}`. With a proper model formulation, the timepoints for control or measurements :math:`\mathbf{t}` can also be degrees of freedom. +* :math:`\mathbf{M}` is the Fisher information matrix (FIM), estimated as the inverse of the covariance matrix of parameter estimates :math:`\boldsymbol{\hat{\theta}}`. A large FIM indicates more information contained in the experiment for parameter estimation. +* :math:`\mathbf{Q}` is the dynamic sensitivity matrix, containing the partial derivatives of :math:`\mathbf{y}` with respect to :math:`\boldsymbol{\theta}`. +* :math:`\Psi` is the design criteria to measure FIM. +* :math:`\mathbf{V}_{\boldsymbol{\theta}}(\boldsymbol{\hat{\theta}})^{-1}` is the FIM of previous experiments. + +Pyomo.DoE provides four design criteria :math:`\Psi` to measure the size of FIM: + +.. list-table:: Pyomo.DoE design criteria + :header-rows: 1 + :class: tight-table + + * - Design criterion + - Computation + - Geometrical meaning + * - A-optimality + - :math:`\text{trace}({\mathbf{M}})` + - Dimensions of the enclosing box of the confidence ellipse + * - D-optimality + - :math:`\text{det}({\mathbf{M}})` + - Volume of the confidence ellipse + * - E-optimality + - :math:`\text{min eig}({\mathbf{M}})` + - Size of the longest axis of the confidence ellipse + * - Modified E-optimality + - :math:`\text{cond}({\mathbf{M}})` + - Ratio of the longest axis to the shortest axis of the confidence ellipse + +In order to solve problems of the above, Pyomo.DoE implements the 2-stage stochastic program. Please see Wang and Dowling (2022) for details. + +Pyomo.DoE Required Inputs +-------------------------------- +The required inputs to the Pyomo.DoE solver are the following: + +* A function that creates the process model +* Dictionary of parameters and their nominal value +* A measurement object +* A design variables object +* A Numpy ``array`` containing the Prior FIM +* Optimization solver + +Below is a list of arguments that Pyomo.DoE expects the user to provide. + +parameter_dict : ``dictionary`` + A ``dictionary`` of parameter names and values. If they are an indexed variable, put the variable name and index in a nested ``Dictionary``. + +design_variables: ``DesignVariables`` + A ``DesignVariables`` of design variables, provided by the DesignVariables class. + If this design var is independent of time (constant), set the time to [0] + +measurement_variables : ``MeasurementVariables`` + A ``MeasurementVariables`` of the measurements, provided by the MeasurementVariables class. + +create_model : ``function`` + A ``function`` returning a deterministic process model. + +prior_FIM : ``array`` + An ``array`` defining the Fisher information matrix (FIM) for prior experiments, default is a zero matrix. + +Pyomo.DoE Solver Interface +--------------------------- + +.. figure:: uml.png + :scale: 25 % + + +.. autoclass:: pyomo.contrib.doe.doe.DesignOfExperiments + :members: __init__, stochastic_program, compute_FIM, run_grid_search + +.. Note:: + ``stochastic_program()`` includes the following steps: + #. Build two-stage stochastic programming optimization model where scenarios correspond to finite difference approximations for the Jacobian of the response variables with respect to calibrated model parameters + #. Fix the experiment design decisions and solve a square (i.e., zero degrees of freedom) instance of the two-stage DOE problem. This step is for initialization. + #. Unfix the experiment design decisions and solve the two-stage DOE problem. + +.. autoclass:: pyomo.contrib.doe.measurements.MeasurementVariables + :members: __init__, add_variables + +.. autoclass:: pyomo.contrib.doe.measurements.DesignVariables + :members: __init__, add_variables + +.. autoclass:: pyomo.contrib.doe.scenario.ScenarioGenerator + :special-members: __init__ + +.. autoclass:: pyomo.contrib.doe.result.FisherResults + :members: __init__, result_analysis + +.. autoclass:: pyomo.contrib.doe.result.GridSearchResult + :special-members: __init__ + + +Pyomo.DoE Usage Example +----------------------- + +We illustrate the use of Pyomo.DoE using a reaction kinetics example (Wang and Dowling, 2022). +The Arrhenius equations model the temperature dependence of the reaction rate coefficient :math:`k_1, k_2`. Assuming a first-order reaction mechanism gives the reaction rate model. Further, we assume only species A is fed to the reactor. + + +.. math:: + \begin{equation} + \begin{aligned} + k_1 & = A_1 e^{-\frac{E_1}{RT}} \\ + k_2 & = A_2 e^{-\frac{E_2}{RT}} \\ + \frac{d{C_A}}{dt} & = -k_1{C_A} \\ + \frac{d{C_B}}{dt} & = k_1{C_A} - k_2{C_B} \\ + C_{A0}& = C_A + C_B + C_C \\ + C_B(t_0) & = 0 \\ + C_C(t_0) & = 0 \\ + \end{aligned} + \end{equation} + + + +:math:`C_A(t), C_B(t), C_C(t)` are the time-varying concentrations of the species A, B, C, respectively. +:math:`k_1, k_2` are the rates for the two chemical reactions using an Arrhenius equation with activation energies :math:`E_1, E_2` and pre-exponential factors :math:`A_1, A_2`. +The goal of MBDoE is to optimize the experiment design variables :math:`\boldsymbol{\varphi} = (C_{A0}, T(t))`, where :math:`C_{A0},T(t)` are the initial concentration of species A and the time-varying reactor temperature, to maximize the precision of unknown model parameters :math:`\boldsymbol{\theta} = (A_1, E_1, A_2, E_2)` by measuring :math:`\mathbf{y}(t)=(C_A(t), C_B(t), C_C(t))`. +The observation errors are assumed to be independent both in time and across measurements with a constant standard deviation of 1 M for each species. + + +Step 0: Import Pyomo and the Pyomo.DoE module +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. doctest:: + + >>> # === Required import === + >>> import pyomo.environ as pyo + >>> from pyomo.dae import ContinuousSet, DerivativeVar + >>> from pyomo.contrib.doe import DesignOfExperiments, MeasurementVariables, DesignVariables + >>> import numpy as np + +Step 1: Define the Pyomo process model +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The process model for the reaction kinetics problem is shown below. + +.. literalinclude:: ../../../../pyomo/contrib/doe/examples/reactor_kinetics.py + :language: python + :pyobject: create_model + +.. literalinclude:: ../../../../pyomo/contrib/doe/examples/reactor_kinetics.py + :language: python + :pyobject: disc_for_measure + +.. note:: + The model requires at least two options: "block" and "global". Both options requires the pass of a created empty Pyomo model. + With "global" option, only design variables and their time sets need to be defined; + With "block" option, a full model needs to be defined. + + +Step 2: Define the inputs for Pyomo.DoE +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. literalinclude:: ../../../../pyomo/contrib/doe/examples/reactor_compute_FIM.py + :language: python + :start-at: # Control time set + :end-before: ### Compute + + +Step 3: Compute the FIM of a square MBDoE problem +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This method computes an MBDoE optimization problem with no degree of freedom. + +This method can be accomplished by two modes, ``direct_kaug`` and ``sequential_finite``. +``direct_kaug`` mode requires the installation of the solver `k_aug `_. + +.. literalinclude:: ../../../../pyomo/contrib/doe/examples/reactor_compute_FIM.py + :language: python + :start-after: ### Compute the FIM + :end-before: # test result + +Step 4: Exploratory analysis (Enumeration) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Exploratory analysis is suggested to enumerate the design space to check if the problem is identifiable, +i.e., ensure that D-, E-optimality metrics are not small numbers near zero, and Modified E-optimality is not a big number. + +Pyomo.DoE accomplishes the exploratory analysis with the ``run_grid_search`` function. +It allows users to define any number of design decisions. Heatmaps can be drawn by two design variables, fixing other design variables. +1D curve can be drawn by one design variable, fixing all other variables. +The function ``run_grid_search`` enumerates over the design space, each MBDoE problem accomplished by ``compute_FIM`` method. +Therefore, ``run_grid_search`` supports only two modes: ``sequential_finite`` and ``direct_kaug``. + +.. literalinclude:: ../../../../pyomo/contrib/doe/examples/reactor_compute_FIM.py + :language: python + :pyobject: main + +Successful run of the above code shows the following figure: + +.. figure:: grid-1.png + :scale: 35 % + +A heatmap shows the change of the objective function, a.k.a. the experimental information content, in the design region. Horizontal and vertical axes are two design variables, while the color of each grid shows the experimental information content. Taking the Fig. Reactor case - A optimality as example, A-optimality shows that the most informative region is around $C_{A0}=5.0$ M, $T=300.0$ K, while the least informative region is around $C_{A0}=1.0$ M, $T=700.0$ K. + +Step 5: Gradient-based optimization +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Pyomo.DoE accomplishes gradient-based optimization with the ``stochastic_program`` function for A- and D-optimality design. + +This function solves twice: It solves the square version of the MBDoE problem first, and then unfixes the design variables as degree of freedoms and solves again. In this way the optimization problem can be well initialized. + +.. literalinclude:: ../../../../pyomo/contrib/doe/examples/reactor_compute_FIM.py + :language: python + :pyobject: main + + diff --git a/doc/OnlineDocs/contributed_packages/doe/flowchart.png b/doc/OnlineDocs/contributed_packages/doe/flowchart.png new file mode 100644 index 00000000000..2e66566d2f6 Binary files /dev/null and b/doc/OnlineDocs/contributed_packages/doe/flowchart.png differ diff --git a/doc/OnlineDocs/contributed_packages/doe/grid-1.png b/doc/OnlineDocs/contributed_packages/doe/grid-1.png new file mode 100644 index 00000000000..6c96bbab7e0 Binary files /dev/null and b/doc/OnlineDocs/contributed_packages/doe/grid-1.png differ diff --git a/doc/OnlineDocs/contributed_packages/doe/reactor.png b/doc/OnlineDocs/contributed_packages/doe/reactor.png new file mode 100644 index 00000000000..7664493fd81 Binary files /dev/null and b/doc/OnlineDocs/contributed_packages/doe/reactor.png differ diff --git a/doc/OnlineDocs/contributed_packages/doe/uml.png b/doc/OnlineDocs/contributed_packages/doe/uml.png new file mode 100644 index 00000000000..e5280987722 Binary files /dev/null and b/doc/OnlineDocs/contributed_packages/doe/uml.png differ diff --git a/doc/OnlineDocs/contributed_packages/incidence/api.rst b/doc/OnlineDocs/contributed_packages/incidence/api.rst new file mode 100644 index 00000000000..38bf0be125b --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/api.rst @@ -0,0 +1,14 @@ +.. _incidence_api: + +API Reference +============= + +.. toctree:: + incidence.rst + config.rst + interface.rst + matching.rst + connected.rst + triangularize.rst + dulmage_mendelsohn.rst + scc_solver.rst diff --git a/doc/OnlineDocs/contributed_packages/incidence/config.rst b/doc/OnlineDocs/contributed_packages/incidence/config.rst new file mode 100644 index 00000000000..06e4f5c5626 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/config.rst @@ -0,0 +1,5 @@ +Incidence Options +================= + +.. automodule:: pyomo.contrib.incidence_analysis.config + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/connected.rst b/doc/OnlineDocs/contributed_packages/incidence/connected.rst new file mode 100644 index 00000000000..4cf60f62eba --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/connected.rst @@ -0,0 +1,5 @@ +Weakly Connected Components +=========================== + +.. automodule:: pyomo.contrib.incidence_analysis.connected + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/dulmage_mendelsohn.rst b/doc/OnlineDocs/contributed_packages/incidence/dulmage_mendelsohn.rst new file mode 100644 index 00000000000..6fe2bd59324 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/dulmage_mendelsohn.rst @@ -0,0 +1,5 @@ +Dulmage-Mendelsohn Partition +============================ + +.. automodule:: pyomo.contrib.incidence_analysis.dulmage_mendelsohn + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/incidence.rst b/doc/OnlineDocs/contributed_packages/incidence/incidence.rst new file mode 100644 index 00000000000..ebf481c00a7 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/incidence.rst @@ -0,0 +1,5 @@ +Incident Variables +================== + +.. automodule:: pyomo.contrib.incidence_analysis.incidence + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/index.rst b/doc/OnlineDocs/contributed_packages/incidence/index.rst new file mode 100644 index 00000000000..ab0e07f6abc --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/index.rst @@ -0,0 +1,19 @@ +Incidence Analysis +================== + +Tools for constructing and analyzing the incidence graph of variables +and constraints. + +This documentation contains the following resources: + +.. toctree:: + :maxdepth: 1 + + overview.rst + tutorial.rst + api.rst + +If you are wondering what Incidence Analysis is and would like to learn more, +please see :ref:`incidence_overview`. If you already know what +Incidence Analysis is and are here for reference, see :ref:`incidence_tutorial` +or :ref:`incidence_api` as needed. diff --git a/doc/OnlineDocs/contributed_packages/incidence/interface.rst b/doc/OnlineDocs/contributed_packages/incidence/interface.rst new file mode 100644 index 00000000000..29c92d8193c --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/interface.rst @@ -0,0 +1,5 @@ +Pyomo Interfaces +================ + +.. automodule:: pyomo.contrib.incidence_analysis.interface + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/matching.rst b/doc/OnlineDocs/contributed_packages/incidence/matching.rst new file mode 100644 index 00000000000..1941c7116cd --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/matching.rst @@ -0,0 +1,5 @@ +Maximum Matching +================ + +.. automodule:: pyomo.contrib.incidence_analysis.matching + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/overview.rst b/doc/OnlineDocs/contributed_packages/incidence/overview.rst new file mode 100644 index 00000000000..544740cae57 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/overview.rst @@ -0,0 +1,46 @@ +.. _incidence_overview: + +Overview +======== + +What is Incidence Analysis? +--------------------------- + +A Pyomo extension for constructing the bipartite incidence graph of variables +and constraints, and an interface to useful algorithms for analyzing or +decomposing this graph. + +Why is Incidence Analysis useful? +--------------------------------- + +It can identify the source of certain types of singularities in a system of +variables and constraints. These singularities often violate assumptions made +while modeling a physical system or assumptions required for an optimization +solver to guarantee convergence. In particular, interior point methods used for +nonlinear local optimization require the Jacobian of equality constraints (and +active inequalities) to be full row rank, and this package implements the +Dulmage-Mendelsohn partition, which can be used to determine if this Jacobian +is structurally rank-deficient. + +Who develops and maintains Incidence Analysis? +---------------------------------------------- + +This extension was developed by Robert Parker while a PhD student in +Professor Biegler's lab at Carnegie Mellon University, with guidance +from Bethany Nicholson and John Siirola at Sandia. + +How can I cite Incidence Analysis? +---------------------------------- + +We are working on a journal article about Incidence Analysis and the underlying +methods. In the meantime, if you use Incidence Analysis in your research, you +may cite the following conference paper: + +.. code-block:: bibtex + + @inproceedings{Parker2023Dulmage, + title={{An application of the Dulmage-Mendelsohn partition to the analysis of a discretized dynamic chemical looping combustion reactor model}}, + author={Robert Parker and Chinedu Okoli and Bethany Nicholson and John Siirola and Lorenz Biegler}, + booktitle={Proceedings of FOCAPO/CPC 2023}, + year={2023} + } diff --git a/doc/OnlineDocs/contributed_packages/incidence/scc_solver.rst b/doc/OnlineDocs/contributed_packages/incidence/scc_solver.rst new file mode 100644 index 00000000000..35f494af1a1 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/scc_solver.rst @@ -0,0 +1,5 @@ +Block Triangular Decomposition Solver +===================================== + +.. automodule:: pyomo.contrib.incidence_analysis.scc_solver + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/triangularize.rst b/doc/OnlineDocs/contributed_packages/incidence/triangularize.rst new file mode 100644 index 00000000000..a051086a859 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/triangularize.rst @@ -0,0 +1,5 @@ +Block Triangularization +======================= + +.. automodule:: pyomo.contrib.incidence_analysis.triangularize + :members: diff --git a/doc/OnlineDocs/contributed_packages/incidence/tutorial.bt.rst b/doc/OnlineDocs/contributed_packages/incidence/tutorial.bt.rst new file mode 100644 index 00000000000..6710c0dbb50 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/tutorial.bt.rst @@ -0,0 +1,107 @@ +Debugging a numeric singularity using block triangularization +============================================================= + +We start with some imports. To debug a *numeric* singularity, we will need +``PyomoNLP`` from :ref:`pynumero` to get the constraint Jacobian, +and will need NumPy to compute condition numbers. + +.. doctest:: + :skipif: not scipy_available or not asl_available or not networkx_available + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP + >>> from pyomo.contrib.incidence_analysis import IncidenceGraphInterface + >>> import numpy as np + +We now build the model we would like to debug. Compared to the model in +:ref:`incidence_tutorial_dm`, we have converted the sum equation to use a sum +over component flow rates rather than a sum over mass fractions. + +.. doctest:: + :skipif: not scipy_available or not asl_available or not networkx_available + + >>> m = pyo.ConcreteModel() + >>> m.components = pyo.Set(initialize=[1, 2, 3]) + >>> m.x = pyo.Var(m.components, initialize=1.0/3.0) + >>> m.flow_comp = pyo.Var(m.components, initialize=10.0) + >>> m.flow = pyo.Var(initialize=30.0) + >>> m.density = pyo.Var(initialize=1.0) + >>> # This equation is new! + >>> m.sum_flow_eqn = pyo.Constraint( + ... expr=sum(m.flow_comp[j] for j in m.components) == m.flow + ... ) + >>> m.holdup_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.density - 1 == 0 for j in m.components + ... }) + >>> m.density_eqn = pyo.Constraint( + ... expr=1/m.density - sum(1/m.x[j] for j in m.components) == 0 + ... ) + >>> m.flow_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.flow - m.flow_comp[j] == 0 for j in m.components + ... }) + +We now construct the incidence graph and check unmatched variables and +constraints to validate structural nonsingularity. + +.. doctest:: + :skipif: not scipy_available or not asl_available or not networkx_available + + >>> igraph = IncidenceGraphInterface(m, include_inequality=False) + >>> var_dmp, con_dmp = igraph.dulmage_mendelsohn() + >>> print(len(var_dmp.unmatched)) + 0 + >>> print(len(con_dmp.unmatched)) + 0 + +Our system is structurally nonsingular. Now we check whether we are numerically +nonsingular (well-conditioned) by checking the condition number. +Admittedly, deciding if a matrix is "singular" by looking at its condition +number is somewhat of an art. We might define "numerically singular" as having a +condition number greater than the inverse of machine precision (approximately +``1e16``), but poorly conditioned matrices can cause problems even if they don't +meet this definition. Here we use ``1e10`` as a somewhat arbitrary condition +number threshold to indicate a problem in our system. + +.. doctest:: + :skipif: not scipy_available or not asl_available or not networkx_available + + >>> # PyomoNLP requires exactly one objective function + >>> m._obj = pyo.Objective(expr=0.0) + >>> nlp = PyomoNLP(m) + >>> cond_threshold = 1e10 + >>> cond = np.linalg.cond(nlp.evaluate_jacobian_eq().toarray()) + >>> print(cond > cond_threshold) + True + +The system is poorly conditioned. Now we can check diagonal blocks of a block +triangularization to determine which blocks are causing the poor conditioning. + +.. code-block:: python + + >>> var_blocks, con_blocks = igraph.block_triangularize() + >>> for i, (vblock, cblock) in enumerate(zip(var_blocks, con_blocks)): + ... submatrix = nlp.extract_submatrix_jacobian(vblock, cblock) + ... cond = np.linalg.cond(submatrix.toarray()) + ... print(f"block {i}: {cond}") + ... if cond > cond_threshold: + ... for var in vblock: + ... print(f" {var.name}") + ... for con in cblock: + ... print(f" {con.name}") + block 0: 24.492504515710433 + block 1: 1.2480741394486336e+17 + flow + flow_comp[1] + flow_comp[2] + flow_comp[3] + sum_flow_eqn + flow_eqn[1] + flow_eqn[2] + flow_eqn[3] + +We see that the second block is causing the singularity, and that this block +contains the sum equation that we modified for this example. This suggests that +converting this equation to sum over flow rates rather than mass fractions just +converted a structural singularity to a numeric singularity, and didn't really +solve our problem. To see a fix that *does* resolve the singularity, see +:ref:`incidence_tutorial_dm`. diff --git a/doc/OnlineDocs/contributed_packages/incidence/tutorial.btsolve.rst b/doc/OnlineDocs/contributed_packages/incidence/tutorial.btsolve.rst new file mode 100644 index 00000000000..1ff0b6c5afe --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/tutorial.btsolve.rst @@ -0,0 +1,72 @@ +Solving a square system with a block triangular decomposition +============================================================= + +We start with imports. The key function from Incidence Analysis we will use is +``solve_strongly_connected_components``. + +.. doctest:: + :skipif: not networkx_available or not scipy_available or not asl_available + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.incidence_analysis import ( + ... solve_strongly_connected_components + ... ) + +Now we construct the model we would like to solve. This is a model with the +same structure as the "fixed model" in :ref:`incidence_tutorial_dm`. + +.. doctest:: + :skipif: not networkx_available or not scipy_available or not asl_available + + >>> m = pyo.ConcreteModel() + >>> m.components = pyo.Set(initialize=[1, 2, 3]) + >>> m.x = pyo.Var(m.components, initialize=1.0/3.0) + >>> m.flow_comp = pyo.Var(m.components, initialize=10.0) + >>> m.flow = pyo.Var(initialize=30.0) + >>> m.dens_bulk = pyo.Var(initialize=1.0) + >>> m.dens_skel = pyo.Var(initialize=1.0) + >>> m.porosity = pyo.Var(initialize=0.25) + >>> m.velocity = pyo.Param(initialize=1.0) + >>> m.holdup = pyo.Param( + ... m.components, initialize={j: 1.0+j/10.0 for j in m.components} + ... ) + >>> m.sum_eqn = pyo.Constraint( + ... expr=sum(m.x[j] for j in m.components) - 1 == 0 + ... ) + >>> m.holdup_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.dens_bulk - m.holdup[j] == 0 for j in m.components + ... }) + >>> m.dens_skel_eqn = pyo.Constraint( + ... expr=1/m.dens_skel - sum(1e-3/m.x[j] for j in m.components) == 0 + ... ) + >>> m.dens_bulk_eqn = pyo.Constraint( + ... expr=m.dens_bulk == (1 - m.porosity)*m.dens_skel + ... ) + >>> m.flow_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.flow - m.flow_comp[j] == 0 for j in m.components + ... }) + >>> m.flow_dens_eqn = pyo.Constraint( + ... expr=m.flow == m.velocity*m.dens_bulk + ... ) + +Solving via a block triangular decomposition is useful in cases where the full +model does not converge when considered simultaneously by a Newton solver. +In this case, we specify a solver to use for the diagonal blocks and call +``solve_strongly_connected_components``. + +.. doctest:: + :skipif: not networkx_available or not scipy_available or not asl_available + + >>> # Suppose a solve like this does not converge + >>> # pyo.SolverFactory("scipy.fsolve").solve(m) + + >>> # We solve via block-triangular decomposition + >>> solver = pyo.SolverFactory("scipy.fsolve") + >>> res_list = solve_strongly_connected_components(m, solver=solver) + +We can now display the variable values at the solution: + +.. code-block:: python + + for var in m.component_objects(pyo.Var): + var.pprint() diff --git a/doc/OnlineDocs/contributed_packages/incidence/tutorial.dm.rst b/doc/OnlineDocs/contributed_packages/incidence/tutorial.dm.rst new file mode 100644 index 00000000000..c14861e9fc8 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/tutorial.dm.rst @@ -0,0 +1,191 @@ +.. _incidence_tutorial_dm: + +Debugging a structural singularity with the Dulmage-Mendelsohn partition +======================================================================== + +We start with some imports and by creating a Pyomo model we would like +to debug. Usually the model is much larger and more complicated than this. +This particular system appeared when debugging a dynamic 1-D partial +differential-algebraic equation (PDAE) model representing a chemical looping +combustion reactor. + +.. doctest:: + :skipif: not scipy_available or not networkx_available or not asl_available + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.incidence_analysis import IncidenceGraphInterface + + >>> m = pyo.ConcreteModel() + >>> m.components = pyo.Set(initialize=[1, 2, 3]) + >>> m.x = pyo.Var(m.components, initialize=1.0/3.0) + >>> m.flow_comp = pyo.Var(m.components, initialize=10.0) + >>> m.flow = pyo.Var(initialize=30.0) + >>> m.density = pyo.Var(initialize=1.0) + >>> m.sum_eqn = pyo.Constraint( + ... expr=sum(m.x[j] for j in m.components) - 1 == 0 + ... ) + >>> m.holdup_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.density - 1 == 0 for j in m.components + ... }) + >>> m.density_eqn = pyo.Constraint( + ... expr=1/m.density - sum(1/m.x[j] for j in m.components) == 0 + ... ) + >>> m.flow_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.flow - m.flow_comp[j] == 0 for j in m.components + ... }) + +To check this model for structural singularity, we apply the Dulmage-Mendelsohn +partition. ``var_dm_partition`` and ``con_dm_partition`` are named tuples +with fields for each of the four subsets defined by the partition: +``unmatched``, ``overconstrained``, ``square``, and ``underconstrained``. + +.. doctest:: + :skipif: not scipy_available or not networkx_available or not asl_available + + >>> igraph = IncidenceGraphInterface(m) + >>> # Make sure we have a square system + >>> print(len(igraph.variables)) + 8 + >>> print(len(igraph.constraints)) + 8 + >>> var_dm_partition, con_dm_partition = igraph.dulmage_mendelsohn() + +If any variables or constraints are unmatched, the (Jacobian of the) model +is structurally singular. + +.. code-block:: python + + >>> # Note that the unmatched variables/constraints are not mathematically + >>> # unique and could change with implementation! + >>> for var in var_dm_partition.unmatched: + ... print(var.name) + flow_comp[1] + >>> for con in con_dm_partition.unmatched: + ... print(con.name) + density_eqn + +This model has one unmatched constraint and one unmatched variable, so it is +structurally singular. However, the unmatched variable and constraint are not +unique. For example, ``flow_comp[2]`` could have been unmatched instead of +``flow_comp[1]``. The exact variables and constraints that are unmatched depends +on both the order in which variables are identified in Pyomo expressions and +the implementation of the matching algorithm. For a given implementation, +however, these variables and constraints should be deterministic. + +Unique subsets of variables and constraints that are useful when debugging a +structural singularity are the underconstrained and overconstrained subsystems. +The variables in the underconstrained subsystem are contained in the +``unmatched`` and ``underconstrained`` fields of the ``var_dm_partition`` named tuple, +while the constraints are contained in the ``underconstrained`` field of the +``con_dm_partition`` named tuple. +The variables in the overconstrained subsystem are contained in the +``overconstrained`` field of the ``var_dm_partition`` named tuple, while the constraints +are contained in the ``overconstrained`` and ``unmatched`` fields of the +``con_dm_partition`` named tuple. + +We now construct the underconstrained and overconstrained subsystems: + +.. doctest:: + :skipif: not scipy_available or not networkx_available or not asl_available + + >>> uc_var = var_dm_partition.unmatched + var_dm_partition.underconstrained + >>> uc_con = con_dm_partition.underconstrained + >>> oc_var = var_dm_partition.overconstrained + >>> oc_con = con_dm_partition.overconstrained + con_dm_partition.unmatched + +And display the variables and constraints contained in each: + +.. code-block:: python + + >>> # Note that while these variables/constraints are uniquely determined, + >>> # their order is not! + + >>> # Overconstrained subsystem + >>> for var in oc_var: + >>> print(var.name) + x[1] + density + x[2] + x[3] + >>> for con in oc_con: + >>> print(con.name) + sum_eqn + holdup_eqn[1] + holdup_eqn[2] + holdup_eqn[3] + density_eqn + + >>> # Underconstrained subsystem + >>> for var in uc_var: + >>> print(var.name) + flow_comp[1] + flow + flow_comp[2] + flow_comp[3] + >>> for con in uc_con: + >>> print(con.name) + flow_eqn[1] + flow_eqn[2] + flow_eqn[3] + +At this point we must use our intuition about the system being modeled to +identify "what is causing" the singularity. Looking at the under and over- +constrained systems, it appears that we are missing an equation to calculate +``flow``, the total flow rate, and that ``density`` is over-specified as it +is computed by both the bulk density equation and one of the component density +equations. + +With this knowledge, we can eventually figure out (a) that we need an equation +to calculate ``flow`` from density and (b) that our "bulk density equation" +is actually a *skeletal* density equation. Admittedly, this is difficult to +figure out without the full context behind this particular system. + +The following code constructs a new version of the model and verifies that it +is structurally nonsingular: + +.. doctest:: + :skipif: not scipy_available or not networkx_available or not asl_available + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.incidence_analysis import IncidenceGraphInterface + + >>> m = pyo.ConcreteModel() + >>> m.components = pyo.Set(initialize=[1, 2, 3]) + >>> m.x = pyo.Var(m.components, initialize=1.0/3.0) + >>> m.flow_comp = pyo.Var(m.components, initialize=10.0) + >>> m.flow = pyo.Var(initialize=30.0) + >>> m.dens_bulk = pyo.Var(initialize=1.0) + >>> m.dens_skel = pyo.Var(initialize=1.0) + >>> m.porosity = pyo.Var(initialize=0.25) + >>> m.velocity = pyo.Param(initialize=1.0) + >>> m.sum_eqn = pyo.Constraint( + ... expr=sum(m.x[j] for j in m.components) - 1 == 0 + ... ) + >>> m.holdup_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.dens_bulk - 1 == 0 for j in m.components + ... }) + >>> m.dens_skel_eqn = pyo.Constraint( + ... expr=1/m.dens_skel - sum(1/m.x[j] for j in m.components) == 0 + ... ) + >>> m.dens_bulk_eqn = pyo.Constraint( + ... expr=m.dens_bulk == (1 - m.porosity)*m.dens_skel + ... ) + >>> m.flow_eqn = pyo.Constraint(m.components, expr={ + ... j: m.x[j]*m.flow - m.flow_comp[j] == 0 for j in m.components + ... }) + >>> m.flow_dens_eqn = pyo.Constraint( + ... expr=m.flow == m.velocity*m.dens_bulk + ... ) + + >>> igraph = IncidenceGraphInterface(m, include_inequality=False) + >>> print(len(igraph.variables)) + 10 + >>> print(len(igraph.constraints)) + 10 + >>> var_dm_partition, con_dm_partition = igraph.dulmage_mendelsohn() + + >>> # There are now no unmatched variables or equations + >>> print(len(var_dm_partition.unmatched)) + 0 + >>> print(len(con_dm_partition.unmatched)) + 0 diff --git a/doc/OnlineDocs/contributed_packages/incidence/tutorial.rst b/doc/OnlineDocs/contributed_packages/incidence/tutorial.rst new file mode 100644 index 00000000000..4b22fc16c53 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/incidence/tutorial.rst @@ -0,0 +1,14 @@ +.. _incidence_tutorial: + +Incidence Analysis Tutorial +=========================== + +This tutorial walks through examples of the most common use cases for +Incidence Analysis: + +.. toctree:: + :maxdepth: 1 + + tutorial.dm.rst + tutorial.bt.rst + tutorial.btsolve.rst diff --git a/doc/OnlineDocs/contributed_packages/index.rst b/doc/OnlineDocs/contributed_packages/index.rst index 997fe589425..f893753780e 100644 --- a/doc/OnlineDocs/contributed_packages/index.rst +++ b/doc/OnlineDocs/contributed_packages/index.rst @@ -16,9 +16,12 @@ Contributed packages distributed with Pyomo: :maxdepth: 1 community.rst + doe/doe.rst gdpopt.rst iis.rst + incidence/index.rst mindtpy.rst + mpc/index.rst multistart.rst preprocessing.rst parmest/index.rst diff --git a/doc/OnlineDocs/contributed_packages/mindtpy.rst b/doc/OnlineDocs/contributed_packages/mindtpy.rst index 507216045ed..a850a42c740 100644 --- a/doc/OnlineDocs/contributed_packages/mindtpy.rst +++ b/doc/OnlineDocs/contributed_packages/mindtpy.rst @@ -3,7 +3,7 @@ MindtPy Solver The Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo (MindtPy) solver allows users to solve Mixed-Integer Nonlinear Programs (MINLP) using decomposition algorithms. -These decomposition algorithms usually rely on the solution of Mixed-Intger Linear Programs +These decomposition algorithms usually rely on the solution of Mixed-Integer Linear Programs (MILP) and Nonlinear Programs (NLP). The following algorithms are currently available in MindtPy: @@ -18,6 +18,10 @@ The following algorithms are currently available in MindtPy: Usage and early implementation details for MindtPy can be found in the PSE 2018 paper Bernal et al., (`ref `_, `preprint `_). +This solver implementation has been developed by `David Bernal `_ +and `Zedong Peng `_ as part of research efforts at the `Bernal Research Group +`_ and the `Grossmann Research Group `_ +at Purdue University and Carnegie Mellon University. .. _Duran & Grossmann, 1986: https://dx.doi.org/10.1007/BF02592064 .. _Westerlund & Petterson, 1995: http://dx.doi.org/10.1016/0098-1354(95)87027-X @@ -120,7 +124,7 @@ LP/NLP Based Branch-and-Bound ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ MindtPy also supports single-tree implementation of Outer-Approximation (OA) algorithm, which is known as LP/NLP based branch-and-bound algorithm originally described in [`Quesada & Grossmann, 1992`_]. -The LP/NLP based branch-and-bound algorithm in MindtPy is implemeted based on the LazyConstraintCallback function in commercial solvers. +The LP/NLP based branch-and-bound algorithm in MindtPy is implemented based on the LazyConstraintCallback function in commercial solvers. .. _Quesada & Grossmann, 1992: https://www.sciencedirect.com/science/article/abs/pii/0098135492800288 @@ -181,7 +185,7 @@ A usage example for OA with solution pool is as follows: >>> pyo.SolverFactory('mindtpy').solve(model, ... strategy='OA', - ... mip_solver='cplex_peristent', + ... mip_solver='cplex_persistent', ... nlp_solver='ipopt', ... solution_pool=True, ... num_solution_iteration=10, # default=5 @@ -310,6 +314,6 @@ Report a Bug If you find a bug in MindtPy, we will be grateful if you could - submit an `issue`_ in Pyomo repository -- directly contact David Bernal and Zedong Peng . +- directly contact David Bernal and Zedong Peng . .. _issue: https://github.com/Pyomo/pyomo/issues diff --git a/doc/OnlineDocs/contributed_packages/mpc/examples.rst b/doc/OnlineDocs/contributed_packages/mpc/examples.rst new file mode 100644 index 00000000000..95204192358 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/mpc/examples.rst @@ -0,0 +1,6 @@ +Examples +======== + +Please see ``pyomo/contrib/mpc/examples/cstr/run_openloop.py`` and +``pyomo/contrib/mpc/examples/cstr/run_mpc.py`` for examples of some simple +use cases. diff --git a/doc/OnlineDocs/contributed_packages/mpc/faq.rst b/doc/OnlineDocs/contributed_packages/mpc/faq.rst new file mode 100644 index 00000000000..e42e7184696 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/mpc/faq.rst @@ -0,0 +1,16 @@ +Frequently asked questions +========================== + +#. Why not use Pandas DataFrames? + +Pandas DataFrames are a natural data structure for storing "columns" of +time series data. These columns, or individual time series, could each represent +the data for a single variable. This is very similar to the TimeSeriesData +class introduced in this package. +The reason a new data structure is introduced is primarily that a DataFrame +does not provide any utility for converting labels into a consistent format, +as TimeSeriesData does by accepting variables, strings, slices, etc. +as keys and converting them into the form of a time-indexed ComponentUID. +Also, DataFrames do not have convenient analogs for scalar data and +time interval data, which this package provides as the ScalarData +and IntervalData classes with very similar APIs to TimeSeriesData. diff --git a/doc/OnlineDocs/contributed_packages/mpc/index.rst b/doc/OnlineDocs/contributed_packages/mpc/index.rst new file mode 100644 index 00000000000..b93abf223e2 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/mpc/index.rst @@ -0,0 +1,12 @@ +MPC +=== + +This package contains data structures and utilities for dynamic optimization +and rolling horizon applications, e.g. model predictive control. + +.. toctree:: + :maxdepth: 1 + + overview.rst + examples.rst + faq.rst diff --git a/doc/OnlineDocs/contributed_packages/mpc/overview.rst b/doc/OnlineDocs/contributed_packages/mpc/overview.rst new file mode 100644 index 00000000000..f5dbe85e523 --- /dev/null +++ b/doc/OnlineDocs/contributed_packages/mpc/overview.rst @@ -0,0 +1,210 @@ +Overview +======== + +What does this package contain? +------------------------------- + +#. Data structures for values and time series data associated with time-indexed variables (or parameters, or named expressions). Examples are setpoint values associated with a subset of state variables or time series data from a simulation + +#. Utilities for loading and extracting this data into and from variables in a model + +#. Utilities for constructing components from this data (expressions, constraints, and objectives) that are useful for dynamic optimization + +What is the goal of this package? +--------------------------------- + +This package was written to help developers of Pyomo-based dynamic optimization +case studies, especially rolling horizon dynamic optimization case studies, +write scripts that are small, legible, and maintainable. +It does this by providing utilities for mundane data-management and model +construction tasks, allowing the developer to focus on their application. + +Why is this package useful? +--------------------------- + +First, it is not normally easy to extract "flattened" time series data, +in which all indexing structure other than time-indexing has been +flattened to yield a set of one-dimensional arrays, from a Pyomo model. +This is an extremely convenient data structure to have for plotting, +analysis, initialization, and manipulation of dynamic models. +If all variables are indexed by time and only time, this data is relatively +easy to obtain. +The first issue comes up when dealing with components that are indexed by +time in addition to some other set(s). For example: + +.. doctest:: + + >>> import pyomo.environ as pyo + + >>> m = pyo.ConcreteModel() + >>> m.time = pyo.Set(initialize=[0, 1, 2]) + >>> m.comp = pyo.Set(initialize=["A", "B"]) + >>> m.var = pyo.Var(m.time, m.comp, initialize=1.0) + + >>> t0 = m.time.first() + >>> data = { + ... m.var[t0, j].name: [m.var[i, j].value for i in m.time] + ... for j in m.comp + ... } + >>> data + {'var[0,A]': [1.0, 1.0, 1.0], 'var[0,B]': [1.0, 1.0, 1.0]} + +To generate data in this form, we need to (a) know that our variable is indexed +by time and ``m.comp`` and (b) arbitrarily select a time index ``t0`` to +generate a unique key for each time series. +This gets more difficult when blocks and time-indexed blocks are used as well. +The first difficulty can be alleviated using +``flatten_dae_components`` from ``pyomo.dae.flatten``: + +.. doctest:: + + >>> import pyomo.environ as pyo + >>> from pyomo.dae.flatten import flatten_dae_components + + >>> m = pyo.ConcreteModel() + >>> m.time = pyo.Set(initialize=[0, 1, 2]) + >>> m.comp = pyo.Set(initialize=["A", "B"]) + >>> m.var = pyo.Var(m.time, m.comp, initialize=1.0) + + >>> t0 = m.time.first() + >>> scalar_vars, dae_vars = flatten_dae_components(m, m.time, pyo.Var) + >>> data = {var[t0].name: list(var[:].value) for var in dae_vars} + >>> data + {'var[0,A]': [1.0, 1.0, 1.0], 'var[0,B]': [1.0, 1.0, 1.0]} + +Addressing the arbitrary ``t0`` index requires us to ask what key we +would like to use to identify each time series in our data structure. +The key should uniquely correspond to a component, or "sub-component" +that is indexed only by time. A slice, e.g. ``m.var[:, "A"]`` seems +natural. However, Pyomo provides a better data structure that can +be constructed from a component, slice, or string, called +``ComponentUID``. Being constructable from a string is important as +we may want to store or serialize this data in a form that is agnostic +of any particular ``ConcreteModel`` object. +We can now generate our data structure as: + +.. doctest:: + + >>> data = { + ... pyo.ComponentUID(var.referent): list(var[:].value) + ... for var in dae_vars + ... } + >>> data + {var[*,A]: [1.0, 1.0, 1.0], var[*,B]: [1.0, 1.0, 1.0]} + +This is the structure of the underlying dictionary in the ``TimeSeriesData`` +class provided by this package. We can generate this data using this package +as: + +.. doctest:: + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.mpc import DynamicModelInterface + + >>> m = pyo.ConcreteModel() + >>> m.time = pyo.Set(initialize=[0, 1, 2]) + >>> m.comp = pyo.Set(initialize=["A", "B"]) + >>> m.var = pyo.Var(m.time, m.comp, initialize=1.0) + + >>> # Construct a helper class for interfacing model with data + >>> helper = DynamicModelInterface(m, m.time) + + >>> # Generates a TimeSeriesData object + >>> series_data = helper.get_data_at_time() + + >>> # Get the underlying dictionary + >>> data = series_data.get_data() + >>> data + {var[*,A]: [1.0, 1.0, 1.0], var[*,B]: [1.0, 1.0, 1.0]} + +The first value proposition of this package is that ``DynamicModelInterface`` +and ``TimeSeriesData`` provide wrappers to ease loading and extraction of data +via ``flatten_dae_components`` and ``ComponentUID``. + +The second difficulty addressed by this package is that of extracting and +loading data between (potentially) different models. +For instance, in model predictive control, we often want to extract data from +a particular time point in a plant model and load it into a controller model +as initial conditions. This can be done as follows: + +.. doctest:: + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.mpc import DynamicModelInterface + + >>> m1 = pyo.ConcreteModel() + >>> m1.time = pyo.Set(initialize=[0, 1, 2]) + >>> m1.comp = pyo.Set(initialize=["A", "B"]) + >>> m1.var = pyo.Var(m1.time, m1.comp, initialize=1.0) + + >>> m2 = pyo.ConcreteModel() + >>> m2.time = pyo.Set(initialize=[0, 1, 2]) + >>> m2.comp = pyo.Set(initialize=["A", "B"]) + >>> m2.var = pyo.Var(m2.time, m2.comp, initialize=2.0) + + >>> # Construct helper objects + >>> m1_helper = DynamicModelInterface(m1, m1.time) + >>> m2_helper = DynamicModelInterface(m2, m2.time) + + >>> # Extract data from final time point of m2 + >>> tf = m2.time.last() + >>> tf_data = m2_helper.get_data_at_time(tf) + + >>> # Load data into initial time point of m1 + >>> t0 = m1.time.first() + >>> m1_helper.load_data(tf_data, time_points=t0) + + >>> # Get TimeSeriesData object + >>> series_data = m1_helper.get_data_at_time() + >>> # Get underlying dictionary + >>> series_data.get_data() + {var[*,A]: [2.0, 1.0, 1.0], var[*,B]: [2.0, 1.0, 1.0]} + +.. note:: + + Here we rely on the fact that our variable has the same name in + both models. + +Finally, this package provides methods for constructing components like +tracking cost expressions and piecewise-constant constraints from the +provided data structures. For example, the following code constructs +a tracking cost expression. + +.. doctest:: + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.mpc import DynamicModelInterface + + >>> m = pyo.ConcreteModel() + >>> m.time = pyo.Set(initialize=[0, 1, 2]) + >>> m.comp = pyo.Set(initialize=["A", "B"]) + >>> m.var = pyo.Var(m.time, m.comp, initialize=1.0) + + >>> # Construct helper object + >>> helper = DynamicModelInterface(m, m.time) + + >>> # Construct data structure for setpoints + >>> setpoint = {m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0} + >>> var_set, tr_cost = helper.get_penalty_from_target(setpoint) + >>> m.setpoint_idx = var_set + >>> m.tracking_cost = tr_cost + >>> m.tracking_cost.pprint() + tracking_cost : Size=6, Index=tracking_cost_index + Key : Expression + (0, 0) : (var[0,A] - 0.5)**2 + (0, 1) : (var[1,A] - 0.5)**2 + (0, 2) : (var[2,A] - 0.5)**2 + (1, 0) : (var[0,B] - 2.0)**2 + (1, 1) : (var[1,B] - 2.0)**2 + (1, 2) : (var[2,B] - 2.0)**2 + + +These methods will hopefully allow developers to declutter dynamic optimization +scripts and pay more attention to the application of the optimization problem +rather than the setup of the optimization problem. + +Who develops and maintains this package? +---------------------------------------- + +This package was developed by Robert Parker while a PhD student in Larry +Biegler's group at CMU, with guidance from Bethany Nicholson and John Siirola. diff --git a/doc/OnlineDocs/contributed_packages/parmest/driver.rst b/doc/OnlineDocs/contributed_packages/parmest/driver.rst index 79db50603a4..28238928b83 100644 --- a/doc/OnlineDocs/contributed_packages/parmest/driver.rst +++ b/doc/OnlineDocs/contributed_packages/parmest/driver.rst @@ -156,3 +156,14 @@ expression which is used to define "SecondStageCost". The objective function can be used to customize data points and weights that are used in parameter estimation. +Suggested initialization procedure for parameter estimation problems +-------------------------------------------------------------------- + +To check the quality of initial guess values provided for the fitted parameters, we suggest solving a +square instance of the problem prior to solving the parameter estimation problem using the following steps: + +1. Create :class:`~pyomo.contrib.parmest.parmest.Estimator` object. To initialize the parameter estimation solve from the square problem solution, set optional argument ``solver_options = {bound_push: 1e-8}``. + +2. Call :class:`~pyomo.contrib.parmest.parmest.Estimator.objective_at_theta` with optional argument ``(initialize_parmest_model=True)``. Different initial guess values for the fitted parameters can be provided using optional argument `theta_values` (**Pandas Dataframe**) + +3. Solve parameter estimation problem by calling :class:`~pyomo.contrib.parmest.parmest.Estimator.theta_est` diff --git a/doc/OnlineDocs/contributed_packages/parmest/index.rst b/doc/OnlineDocs/contributed_packages/parmest/index.rst index 0d53d80fe2c..2bf4942e632 100644 --- a/doc/OnlineDocs/contributed_packages/parmest/index.rst +++ b/doc/OnlineDocs/contributed_packages/parmest/index.rst @@ -10,8 +10,8 @@ Citation for parmest If you use parmest, please cite [ParmestPaper]_ -Index of parmest documenation -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Index of parmest documentation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. toctree:: :maxdepth: 2 diff --git a/doc/OnlineDocs/contributed_packages/preprocessing.rst b/doc/OnlineDocs/contributed_packages/preprocessing.rst index e6802886468..fd26f2bf6db 100644 --- a/doc/OnlineDocs/contributed_packages/preprocessing.rst +++ b/doc/OnlineDocs/contributed_packages/preprocessing.rst @@ -20,7 +20,7 @@ later be deprecated or combined, depending on their usefulness. var_aggregator.VariableAggregator bounds_to_vars.ConstraintToVarBoundTransform induced_linearity.InducedLinearity - constraint_tightener.TightenContraintFromVars + constraint_tightener.TightenConstraintFromVars deactivate_trivial_constraints.TrivialConstraintDeactivator detect_fixed_vars.FixedVarDetector equality_propagate.FixedVarPropagator @@ -93,7 +93,7 @@ Constraint Bounds Tightener This transformation was developed by `Sunjeev Kale `_ at Carnegie Mellon University. -.. autoclass:: pyomo.contrib.preprocessing.plugins.constraint_tightener.TightenContraintFromVars +.. autoclass:: pyomo.contrib.preprocessing.plugins.constraint_tightener.TightenConstraintFromVars :members: apply_to, create_using Trivial Constraint Deactivation diff --git a/doc/OnlineDocs/contributed_packages/pynumero/index.rst b/doc/OnlineDocs/contributed_packages/pynumero/index.rst index be82d843091..6ff8b29f812 100644 --- a/doc/OnlineDocs/contributed_packages/pynumero/index.rst +++ b/doc/OnlineDocs/contributed_packages/pynumero/index.rst @@ -1,3 +1,5 @@ +.. _pynumero: + PyNumero ======== diff --git a/doc/OnlineDocs/contributed_packages/pynumero/installation.rst b/doc/OnlineDocs/contributed_packages/pynumero/installation.rst index 983d4565aa3..9ac6961d2de 100644 --- a/doc/OnlineDocs/contributed_packages/pynumero/installation.rst +++ b/doc/OnlineDocs/contributed_packages/pynumero/installation.rst @@ -10,11 +10,14 @@ https://github.com/Pyomo/pyomo/blob/main/pyomo/contrib/pynumero/build.py and https://github.com/Pyomo/pyomo/blob/main/pyomo/contrib/pynumero/src/CMakeLists.txt. +Note that you will need a C++ compiler and CMake installed to build the +PyNumero libraries. + Method 1 -------- One way to build PyNumero extensions is with the pyomo -download-extensions and build-extensions subcommands. Note that +`download-extensions` and `build-extensions` subcommands. Note that this approach will build PyNumero without support for the HSL linear solvers. :: @@ -27,6 +30,18 @@ Method 2 If you want PyNumero support for the HSL solvers and you have an IPOPT compilation for your machine, you can build PyNumero using the build script :: - cd pyomo/contrib/pynumero/ - python build.py -DBUILD_ASL=ON -DBUILD_MA27=ON -DIPOPT_DIR= + python -m pyomo.contrib.pynumero.build -DBUILD_ASL=ON -DBUILD_MA27=ON -DIPOPT_DIR= + +Method 3 +-------- + +You can build the PyNumero libraries from source using `cmake`. This +generally works best when building from a source distribution of Pyomo. +Assuming that you are starting in the root of the Pyomo source +distribution, you can follow the normal CMake build process :: + mkdir build + cd build + ccmake ../pyomo/contrib/pynumero/src + make + make install diff --git a/doc/OnlineDocs/contributed_packages/pynumero/tutorial.mpi_blocks.rst b/doc/OnlineDocs/contributed_packages/pynumero/tutorial.mpi_blocks.rst index 2c97320be4e..b9cb1d5db7a 100644 --- a/doc/OnlineDocs/contributed_packages/pynumero/tutorial.mpi_blocks.rst +++ b/doc/OnlineDocs/contributed_packages/pynumero/tutorial.mpi_blocks.rst @@ -40,7 +40,7 @@ Note that blocks should only be set if the process/rank owns that block. The operations performed with `MPIBlockVector` are identical to the -same operations peformed with `BlockVector` (or even NumPy arrays), +same operations performed with `BlockVector` (or even NumPy arrays), except that the operations are now performed in parallel. `MPIBlockMatrix` construction is very similar. Consider the following diff --git a/doc/OnlineDocs/contributed_packages/pyros.rst b/doc/OnlineDocs/contributed_packages/pyros.rst index aeb7a293997..4ef57fbf26c 100644 --- a/doc/OnlineDocs/contributed_packages/pyros.rst +++ b/doc/OnlineDocs/contributed_packages/pyros.rst @@ -2,10 +2,11 @@ PyROS Solver ############ -PyROS (Pyomo Robust Optimization Solver) is a metasolver capability within Pyomo for solving non-convex, -two-stage optimization models using adjustable robust optimization. +PyROS (Pyomo Robust Optimization Solver) is a Pyomo-based meta-solver +for non-convex, two-stage adjustable robust optimization problems. -It was developed by **Natalie M. Isenberg** and **Chrysanthos E. Gounaris** of Carnegie Mellon University, +It was developed by **Natalie M. Isenberg**, **Jason A. F. Sherman**, +and **Chrysanthos E. Gounaris** of Carnegie Mellon University, in collaboration with **John D. Siirola** of Sandia National Labs. The developers gratefully acknowledge support from the U.S. Department of Energy's `Institute for the Design of Advanced Energy Systems (IDAES) `_. @@ -13,88 +14,116 @@ The developers gratefully acknowledge support from the U.S. Department of Energy Methodology Overview ----------------------------- -Below is an overview of the type of optimization models PyROS can accomodate. +Below is an overview of the type of optimization models PyROS can accommodate. -* PyROS is suitable for optimization models of **continuous variables** that may feature non-linearities (including **non-convexities**) in both the variables and uncertain parameters. -* PyROS can handle **equality constraints** defining state variables, including implicit state variables that cannot be eliminated via reformulation. -* PyROS allows for **two-stage** optimization problems that may feature both first-stage and second-stage degrees of freedom. +* PyROS is suitable for optimization models of **continuous variables** + that may feature non-linearities (including **non-convexities**) in + both the variables and uncertain parameters. +* PyROS can handle **equality constraints** defining state variables, + including implicit state variables that cannot be eliminated via + reformulation. +* PyROS allows for **two-stage** optimization problems that may + feature both first-stage and second-stage degrees of freedom. -The general form of a deterministic optimization problem that can be passed into PyROS is shown below: +PyROS is designed to operate on deterministic models of the general form + +.. _deterministic-model: .. math:: - \begin{align*} - \displaystyle \min_{\substack{x \in \mathcal{X}, \\ z \in \mathbb{R}^n, y\in\mathbb{R}^a}} & ~~ f_1\left(x\right) + f_2\left(x,z,y; q^0\right) & \\ - \displaystyle \text{s.t.} \quad \: & ~~ g_i\left(x, z, y; q^0\right) \leq 0 & \forall i \in \mathcal{I} \\ - & ~~ h_j\left(x,z,y; q^0\right) = 0 & \forall j \in \mathcal{J} \\ - \end{align*} + \begin{array}{clll} + \displaystyle \min_{\substack{x \in \mathcal{X}, \\ z \in \mathbb{R}^{n_z}, y\in\mathbb{R}^{n_y}}} & ~~ f_1\left(x\right) + f_2(x,z,y; q^{\text{nom}}) & \\ + \displaystyle \text{s.t.} & ~~ g_i(x, z, y; q^{\text{nom}}) \leq 0 & \forall\,i \in \mathcal{I} \\ + & ~~ h_j(x,z,y; q^{\text{nom}}) = 0 & \forall\,j \in \mathcal{J} \\ + \end{array} where: -* :math:`x \in \mathcal{X}` are the "design" variables (i.e., first-stage degrees of freedom), where :math:`\mathcal{X} \subseteq \mathbb{R}^m` is the feasible space defined by the model constraints that only reference these variables -* :math:`z \in \mathbb{R}^n` are the "control" variables (i.e., second-stage degrees of freedom) -* :math:`y \in \mathbb{R}^a` are the "state" variables -* :math:`q \in \mathbb{R}^w` is the vector of parameters that we shall later consider to be uncertain, and :math:`q^0` is the vector of nominal values associated with those. -* :math:`f_1\left(x\right)` are the terms of the objective function that depend only on design variables -* :math:`f_2\left(x, z, y; q\right)` are the terms of the objective function that depend on control and/or state variables -* :math:`g_i\left(x, z, y; q\right)` is the :math:`i^\text{th}` inequality constraint in set :math:`\mathcal{I}` (see Note) -* :math:`h_j\left(x, z, y; q\right)` is the :math:`j^\text{th}` equality constraint in set :math:`\mathcal{J}` (see Note) +* :math:`x \in \mathcal{X}` are the "design" variables + (i.e., first-stage degrees of freedom), + where :math:`\mathcal{X} \subseteq \mathbb{R}^{n_x}` is the feasible space defined by the model constraints + (including variable bounds specifications) referencing :math:`x` only. +* :math:`z \in \mathbb{R}^{n_z}` are the "control" variables + (i.e., second-stage degrees of freedom) +* :math:`y \in \mathbb{R}^{n_y}` are the "state" variables +* :math:`q \in \mathbb{R}^{n_q}` is the vector of model parameters considered + uncertain, and :math:`q^{\text{nom}}` is the vector of nominal values + associated with those. +* :math:`f_1\left(x\right)` are the terms of the objective function that depend + only on design variables +* :math:`f_2\left(x, z, y; q\right)` are the terms of the objective function + that depend on all variables and the uncertain parameters +* :math:`g_i\left(x, z, y; q\right)` is the :math:`i^\text{th}` + inequality constraint function in set :math:`\mathcal{I}` + (see :ref:`Note `) +* :math:`h_j\left(x, z, y; q\right)` is the :math:`j^\text{th}` + equality constraint function in set :math:`\mathcal{J}` + (see :ref:`Note `) + +.. _var-bounds-to-ineqs: .. note:: - * Applicable bounds on variables :math:`z` and/or :math:`y` are assumed to have been incorporated in the set of inequality constraints :math:`\mathcal{I}`. - * A key requirement of PyROS is that each value of :math:`\left(x, z, q \right)` maps to a unique value of :math:`y`, a property that is assumed to be properly enforced by the system of equality constraints :math:`\mathcal{J}`. If such unique mapping does not hold, then the selection of 'state' (i.e., not degree of freedom) variables :math:`y` is incorrect, and one or more of the :math:`y` variables should be appropriately redesignated to be part of either :math:`x` or :math:`z`. + PyROS accepts models in which bounds are directly imposed on + ``Var`` objects representing components of the variables :math:`z` + and :math:`y`. These models are cast to + :ref:`the form above ` + by reformulating the bounds as inequality constraints. -In order to cast the robust optimization counterpart formulation of the above model, we shall now assume that the uncertain parameters may attain -any realization from within an uncertainty set :math:`\mathcal{Q} \subseteq \mathbb{R}^w`, such that :math:`q^0 \in \mathcal{Q}`. -The set :math:`\mathcal{Q}` is assumed to be closed and bounded, while it can be **either continuous or discrete**. +.. _unique-mapping: -Based on the above notation, the form of the robust counterpart addressed in PyROS is shown below: +.. note:: + A key requirement of PyROS is that each value of :math:`\left(x, z, q \right)` + maps to a unique value of :math:`y`, a property that is assumed to + be properly enforced by the system of equality constraints + :math:`\mathcal{J}`. + If the mapping is not unique, then the selection of 'state' + (i.e., not degree of freedom) variables :math:`y` is incorrect, + and one or more of the :math:`y` variables should be appropriately + redesignated to be part of either :math:`x` or :math:`z`. + +In order to cast the robust optimization counterpart of the +:ref:`deterministic model `, +we now assume that the uncertain parameters may attain +any realization in a compact uncertainty set +:math:`\mathcal{Q} \subseteq \mathbb{R}^{n_q}` containing +the nominal value :math:`q^{\text{nom}}`. +The set :math:`\mathcal{Q}` may be **either continuous or discrete**. + +Based on the above notation, the form of the robust counterpart addressed by PyROS is .. math:: - \begin{align*} + \begin{array}{ccclll} \displaystyle \min_{x \in \mathcal{X}} & \displaystyle \max_{q \in \mathcal{Q}} - & \displaystyle \min_{z \in \mathbb{R}^n, y \in \mathbb{R}^a} \ \ & \displaystyle ~~ f_1\left(x\right) + f_2\left(x, z, y, q\right) & & \\ - & & \text{s.t.} \quad \:& \displaystyle ~~ g_i\left(x, z, y, q\right) \leq 0 & & \forall i \in \mathcal{I}\\ - & & & \displaystyle ~~ h_j\left(x, z, y, q\right) = 0 & & \forall j \in \mathcal{J} - \end{align*} + & \displaystyle \min_{\substack{z \in \mathbb{R}^{n_z},\\y \in \mathbb{R}^{n_y}}} \ \ & \displaystyle ~~ f_1\left(x\right) + f_2\left(x, z, y, q\right) \\ + & & \text{s.t.}~ & \displaystyle ~~ g_i\left(x, z, y, q\right) \leq 0 & & \forall\, i \in \mathcal{I}\\ + & & & \displaystyle ~~ h_j\left(x, z, y, q\right) = 0 & & \forall\,j \in \mathcal{J} + \end{array} -In order to solve problems of the above type, PyROS implements the -Generalized Robust Cutting-Set algorithm developed in [GRCSPaper]_. +PyROS solves problems of this form using the +Generalized Robust Cutting-Set algorithm developed in [Isenberg_et_al]_. When using PyROS, please consider citing the above paper. PyROS Required Inputs ----------------------------- -The required inputs to the PyROS solver are the following: +The required inputs to the PyROS solver are: -* The determinisitic optimization model +* The deterministic optimization model * List of first-stage ("design") variables * List of second-stage ("control") variables -* List of parameters to be considered uncertain +* List of parameters considered uncertain * The uncertainty set -* Subordinate local and global NLP optimization solvers - -Below is a list of arguments that PyROS expects the user to provide when calling the ``solve`` command. -Note how all but the ``model`` argument **must** be specified as ``kwargs``. - -model : ``ConcreteModel`` - A ``ConcreteModel`` object representing the deterministic model. -first_stage_variables : ``list(Var)`` - A list of Pyomo ``Var`` objects representing the first-stage degrees of freedom (design variables) in ``model``. -second_stage_variables : ``list(Var)`` - A list of Pyomo ``Var`` objects representing second-stage degrees of freedom (control variables) in ``model``. -uncertain_params : ``list(Param)`` - A list of Pyomo ``Param`` objects in ``deterministic_model`` to be considered uncertain. These specified ``Param`` objects must have the property ``mutable=True``. -uncertainty_set : ``UncertaintySet`` - A PyROS ``UncertaintySet`` object representing uncertainty in the space of those parameters listed in the ``uncertain_params`` object. -local_solver : ``Solver`` - A Pyomo ``Solver`` instance for a local NLP optimization solver. -global_solver : ``Solver`` - A Pyomo ``Solver`` instance for a global NLP optimization solver. +* Subordinate local and global nonlinear programming (NLP) solvers + +These are more elaborately presented in the +:ref:`Solver Interface ` section. .. note:: - Any variables in the model not specified to be first- or second-stage variables are automatically considered to be state variables. + Any variables in the model not specified to be first-stage or second-stage + variables are automatically considered to be state variables. + +.. _solver-interface: PyROS Solver Interface ----------------------------- @@ -103,107 +132,161 @@ PyROS Solver Interface :members: solve .. note:: - Solving the master problems globally (via option ``solve_masters_globally=True``) is one of the requirements to guarantee robust optimality; - solving the master problems locally can only lead to a robust feasible solution. + Upon successful convergence of PyROS, the solution returned is + certified to be robust optimal only if: -.. note:: - Selecting worst-case objective (via option ``objective_focus=ObjectiveType.worst_case``) is one of the requirements to guarantee robust optimality; - selecting nominal objective can only lead to a robust feasible solution, - albeit one that has optimized the sum of first- and (nominal) second-stage objectives. + 1. master problems are solved to global optimality + (by specifying ``solve_master_globally=True``) + 2. a worst-case objective focus is chosen + (by specifying ``objective_focus=ObjectiveType.worst_case``) -.. note:: - To utilize option ``p_robustness``, a dictionary of the following form must be supplied via the ``kwarg``: - There must be a key (``str``) called 'rho', which maps to a non-negative value, where '1+rho' defines a bound - for the ratio of the objective that any scenario may exhibit compared to the nominal objective. + Otherwise, the solution returned is certified to only be robust feasible. PyROS Uncertainty Sets ----------------------------- -PyROS contains pre-implemented ``UncertaintySet`` specializations for many types of commonly used uncertainty sets. -Additional capabilities for intersecting multiple PyROS ``UncertaintySet`` objects so as to create custom sets are also provided -via the ``IntersectionSet`` class. Custom user-specified sets can also be defined via the base ``UncertaintySet`` class. - -Mathematical representations of the sets are shown below, followed by the class descriptions. - -.. list-table:: PyROS Uncertainty Sets +Uncertainty sets are represented by subclasses of +the :class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` +abstract base class. +PyROS provides a suite of pre-implemented subclasses representing +commonly used uncertainty sets. +Custom user-defined uncertainty set types may be implemented by +subclassing the +:class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` class. +The intersection of a sequence of concrete +:class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` +instances can be easily constructed by instantiating the pre-implemented +:class:`~pyomo.contrib.pyros.uncertainty_sets.IntersectionSet` +subclass. + +The table that follows provides mathematical definitions of +the various abstract and pre-implemented +:class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` subclasses. + +.. _table-uncertsets: + +.. list-table:: Mathematical definitions of PyROS uncertainty sets of dimension :math:`n`. :header-rows: 1 :class: tight-table * - Uncertainty Set Type - - Set Representation - * - ``BoxSet`` - - :math:`Q_X = \left\{q \in \mathbb{R}^n : q^\ell \leq q \leq q^u\right\} \\ q^\ell \in \mathbb{R}^n \\ q^u \in \mathbb{R}^n : \left\{q^\ell \leq q^u\right\}` - * - ``CardinalitySet`` - - :math:`Q_C = \left\{q \in \mathbb{R}^n : q = q^0 + (\hat{q} \circ \xi) \text{ for some } \xi \in \Xi_C\right\}\\ \Xi_C = \left\{\xi \in [0, 1]^n : \displaystyle\sum_{i=1}^{n} \xi_i \leq \Gamma\right\} \\ \Gamma \in [0, n] \\ \hat{q} \in \mathbb{R}^{n}_{+} \\ q^0 \in \mathbb{R}^n` - * - ``BudgetSet`` - - :math:`Q_B = \left\{q \in \mathbb{R}^n_+: \displaystyle\sum_{i \in B_\ell} q_i \leq b_\ell \ \forall \ell \in \left\{1,\ldots,L\right\} \right\} \\ b_\ell \in \mathbb{R}^{L}_+` - * - ``FactorModelSet`` - - :math:`Q_F = \left\{q \in \mathbb{R}^n: \displaystyle q = q^0 + \Psi \xi \text{ for some }\xi \in \Xi_F\right\} \\ \Xi_F = \left\{ \xi \in \left[-1, 1\right]^F, \left\lvert \displaystyle \sum_{f=1}^{F} \xi_f\right\rvert \leq \beta F \right\} \\ \beta \in [0,1] \\ \Psi \in \mathbb{R}^{n \times F}_+ \\ q^0 \in \mathbb{R}^n` - * - ``PolyhedralSet`` - - :math:`Q_P = \left\{q \in \mathbb{R}^n: \displaystyle A q \leq b \right\} \\ A \in \mathbb{R}^{m \times n} \\ b \in \mathbb{R}^{m} \\ q^0 \in \mathbb{R}^n: {Aq^0 \leq b}` - * - ``AxisAlignedEllipsoidalSet`` - - :math:`Q_A = \left\{q \in \mathbb{R}^n: \displaystyle \sum\limits_{i=1 : \atop \left\{ \alpha_i > 0 \right\} } \left(\frac{q_i - q_i^0}{\alpha_i} \right)^2 \leq 1 , \quad q_i = q^0_i \quad \forall i : \left\{\alpha_i=0\right\}\right\} \\ \alpha \in \mathbb{R}^n_+, \\ q^0 \in \mathbb{R}^n` - * - ``EllipsoidalSet`` - - :math:`Q_E = \left\{q \in \mathbb{R}^n: \displaystyle q = q^0 + P^{1/2} \xi \text{ for some } \xi \in \Xi_E \right\} \\ \Xi_E = \left\{\xi \in \mathbb{R} : \xi^T\xi \leq s \right\} \\ P \in \mathbb{S}^{n\times n}_+ \\ s \in \mathbb{R}_+ \\ q^0 \in \mathbb{R}^n` - * - ``UncertaintySet`` - - :math:`Q_U = \left\{q \in \mathbb{R}^n: \displaystyle g_i(q) \leq 0 \quad \forall i \in \left\{1,\ldots,m \right\}\right\} \\ m \in \mathbb{N}_+ \\ g_i : \mathbb{R}^n \mapsto \mathbb{R} \, \forall i \in \left\{1,\ldots,m\right\}, \\ q^0 \in \mathbb{R}^n : \left\{g_i(q^0) \leq 0 \ \forall i \in \left\{1,\ldots,m\right\}\right\}` - * - ``DiscreteScenariosSet`` - - :math:`Q_D = \left\{q^s : s = 0,\ldots,D \right\} \\ D \in \mathbb{N} \\ q^s \in \mathbb{R}^n \forall s \in \left\{ 0,\ldots,D\right\}` - * - ``IntersectionSet`` - - :math:`Q_I = \left\{q \in \mathbb{R}^n: \displaystyle q \in \bigcap_{i \in \left\{1,\ldots,m\right\}} Q_i\right\} \\ Q_i \subset \mathbb{R}^n \quad \forall i \in \left\{1,\ldots,m\right\}` + - Input Data + - Mathematical Definition + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.BoxSet` + - :math:`\begin{array}{l} q ^{\text{L}} \in \mathbb{R}^{n}, \\ q^{\text{U}} \in \mathbb{R}^{n} \end{array}` + - :math:`\{q \in \mathbb{R}^n \mid q^\mathrm{L} \leq q \leq q^\mathrm{U}\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.CardinalitySet` + - :math:`\begin{array}{l} q^{0} \in \mathbb{R}^{n}, \\ \hat{q} \in \mathbb{R}_{+}^{n}, \\ \Gamma \in [0, n] \end{array}` + - :math:`\left\{ q \in \mathbb{R}^{n} \middle| \begin{array}{l} q = q^{0} + \hat{q} \circ \xi \\ \displaystyle \sum_{i=1}^{n} \xi_{i} \leq \Gamma \\ \xi \in [0, 1]^{n} \end{array} \right\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.BudgetSet` + - :math:`\begin{array}{l} q^{0} \in \mathbb{R}^{n}, \\ b \in \mathbb{R}_{+}^{L}, \\ B \in \{0, 1\}^{L \times n} \end{array}` + - :math:`\left\{ q \in \mathbb{R}^{n} \middle| \begin{array}{l} \begin{pmatrix} B \\ -I \end{pmatrix} q \leq \begin{pmatrix} b + Bq^{0} \\ -q^{0} \end{pmatrix} \end{array} \right\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.FactorModelSet` + - :math:`\begin{array}{l} q^{0} \in \mathbb{R}^{n}, \\ \Psi \in \mathbb{R}^{n \times F}, \\ \beta \in [0, 1] \end{array}` + - :math:`\left\{ q \in \mathbb{R}^{n} \middle| \begin{array}{l} q = q^{0} + \Psi \xi \\ \displaystyle\bigg| \sum_{j=1}^{F} \xi_{j} \bigg| \leq \beta F \\ \xi \in [-1, 1]^{F} \\ \end{array} \right\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.PolyhedralSet` + - :math:`\begin{array}{l} A \in \mathbb{R}^{m \times n}, \\ b \in \mathbb{R}^{m}\end{array}` + - :math:`\{q \in \mathbb{R}^{n} \mid A q \leq b\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.AxisAlignedEllipsoidalSet` + - :math:`\begin{array}{l} q^0 \in \mathbb{R}^{n}, \\ \alpha \in \mathbb{R}_{+}^{n} \end{array}` + - :math:`\left\{ q \in \mathbb{R}^{n} \middle| \begin{array}{l} \displaystyle\sum_{\substack{i = 1: \\ \alpha_{i} > 0}}^{n} \left(\frac{q_{i} - q_{i}^{0}}{\alpha_{i}}\right)^2 \leq 1 \\ q_{i} = q_{i}^{0} \,\forall\,i : \alpha_{i} = 0 \end{array} \right\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.EllipsoidalSet` + - :math:`\begin{array}{l} q^0 \in \mathbb{R}^n, \\ P \in \mathbb{S}_{++}^{n}, \\ s \in \mathbb{R}_{+} \end{array}` + - :math:`\{q \in \mathbb{R}^{n} \mid (q - q^{0})^{\intercal} P^{-1} (q - q^{0}) \leq s\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` + - :math:`g: \mathbb{R}^{n} \to \mathbb{R}^{m}` + - :math:`\{q \in \mathbb{R}^{n} \mid g(q) \leq 0\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.DiscreteScenarioSet` + - :math:`q^{1}, q^{2},\dots , q^{S} \in \mathbb{R}^{n}` + - :math:`\{q^{1}, q^{2}, \dots , q^{S}\}` + * - :class:`~pyomo.contrib.pyros.uncertainty_sets.IntersectionSet` + - :math:`\mathcal{Q}_{1}, \mathcal{Q}_{2}, \dots , \mathcal{Q}_{m} \subset \mathbb{R}^{n}` + - :math:`\displaystyle \bigcap_{i=1}^{m} \mathcal{Q}_{i}` .. note:: - Each of the PyROS uncertainty set classes inherits from the ``UncertaintySet`` base class. + Each of the PyROS uncertainty set classes inherits from the + :class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` + abstract base class. PyROS Uncertainty Set Classes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.BoxSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: bounds, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.CardinalitySet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: origin, positive_deviation, gamma, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.BudgetSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: coefficients_mat, rhs_vec, origin, budget_membership_mat, budget_rhs_vec, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.FactorModelSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: origin, number_of_factors, psi_mat, beta, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.PolyhedralSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: coefficients_mat, rhs_vec, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.AxisAlignedEllipsoidalSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: center, half_lengths, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.EllipsoidalSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: center, shape_matrix, scale, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.UncertaintySet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.DiscreteScenarioSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: scenarios, type, parameter_bounds, dim, point_in_set .. autoclass:: pyomo.contrib.pyros.uncertainty_sets.IntersectionSet - :special-members: __init__, parameter_bounds, dim, point_in_set + :show-inheritance: + :special-members: all_sets, type, parameter_bounds, dim, point_in_set PyROS Usage Example ----------------------------- -We will use an example to illustrate the usage of PyROS. The problem we will use is called *hydro* and comes from the GAMS example problem database in `The GAMS Model Library `_. The model was converted to Pyomo format via the `GAMS Convert tool `_. - -This model is a QCQP with 31 variables. Of these variables, 13 represent degrees of freedom, with the additional 18 being state variables. -The model features 6 linear inequality constraints, 6 linear equality constraints, 6 non-linear (quadratic) equalities, and a quadratic objective. -We have augmented this model by converting one objective coefficient, two constraint coefficients, and one constraint right-hand side into Param objects so that they can be considered uncertain later on. +In this section, we illustrate the usage of PyROS with a modeling example. +The deterministic problem of interest is called *hydro* +(available `here `_), +a QCQP taken from the +`GAMS Model Library `_. +We have converted the model to Pyomo format using the +`GAMS Convert tool `_. + +The *hydro* model features 31 variables, +of which 13 are degrees of freedom and 18 are state variables. +Moreover, there are +6 linear inequality constraints, +12 linear equality constraints, +6 non-linear (quadratic) equality constraints, +and a quadratic objective. +We have extended this model by converting one objective coefficient, +two constraint coefficients, and one constraint right-hand side +into ``Param`` objects so that they can be considered uncertain later on. .. note:: - Per our analysis, the *hydro* problem satisfies the requirement that each value of :math:`\left(x, z, q \right)` maps to a unique value of :math:`y`, which indicates a proper partition of variables between (first- or second-stage) degrees of freedom and state variables. + Per our analysis, the *hydro* problem satisfies the requirement that + each value of :math:`\left(x, z, q \right)` maps to a unique + value of :math:`y`, which, in accordance with + :ref:`our earlier note `, + indicates a proper partitioning of the model variables + into (first-stage and second-stage) degrees of freedom and + state variables. Step 0: Import Pyomo and the PyROS Module ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -In anticipation of using the PyROS solver and building the deterministic Pyomo model: +In anticipation of using the PyROS solver and building the deterministic Pyomo +model: .. doctest:: @@ -220,14 +303,24 @@ Step 1: Define the Deterministic Problem The deterministic Pyomo model for *hydro* is shown below. .. note:: - Primitive data (Python literals) that have been hard-coded within a deterministic model cannot be later considered uncertain, unless they are first converted to ``Param`` objects within the ``ConcreteModel`` object. - Furthermore, any ``Param`` object that is to be later considered uncertain must have the property ``mutable=True``. + Primitive data (Python literals) that have been hard-coded within a + deterministic model cannot be later considered uncertain, + unless they are first converted to ``Param`` objects within + the ``ConcreteModel`` object. + Furthermore, any ``Param`` object that is to be later considered + uncertain must have the property ``mutable=True``. .. note:: - In case modifying the ``mutable`` property inside the deterministic model object itself is not straight-forward in your context, - you may consider adding the following statement **after** ``import pyomo.environ as pyo`` but **before** defining the model object: - ``pyo.Param.DefaultMutable = True``. Note how this sets the default ``mutable`` property in all ``Param`` objects in the ensuing model instance to ``True``; - consequently, this solution will not work with ``Param`` objects for which the ``mutable=False`` property was explicitly enabled inside the model object. + In case modifying the ``mutable`` property inside the deterministic + model object itself is not straightforward in your context, + you may consider adding the following statement **after** + ``import pyomo.environ as pyo`` but **before** defining the model + object: ``pyo.Param.DefaultMutable = True``. + For all ``Param`` objects declared after this statement, + the attribute ``mutable`` is set to ``True`` by default. + Hence, non-mutable ``Param`` objects are now declared by + explicitly passing the argument ``mutable=False`` to the + ``Param`` constructor. .. doctest:: @@ -310,25 +403,46 @@ The deterministic Pyomo model for *hydro* is shown below. Step 2: Define the Uncertainty ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -First, we need to collect into a list those ``Param`` objects of our model that represent potentially uncertain parameters. For purposes of our example, we shall assume uncertainty in the model parameters ``(m.p[0], m.p[1], m.p[2], m.p[3])``, for which we can conveniently utilize the ``m.p`` object (itself an indexed ``Param`` object). +First, we need to collect into a list those ``Param`` objects of our model +that represent potentially uncertain parameters. +For the purposes of our example, we shall assume uncertainty in the model +parameters ``[m.p[0], m.p[1], m.p[2], m.p[3]]``, for which we can +conveniently utilize the object ``m.p`` (itself an indexed ``Param`` object). .. doctest:: >>> # === Specify which parameters are uncertain === - >>> uncertain_parameters = [m.p] # We can pass IndexedParams this way to PyROS, or as an expanded list per index + >>> # We can pass IndexedParams this way to PyROS, + >>> # or as an expanded list per index + >>> uncertain_parameters = [m.p] .. note:: - Any ``Param`` object that is to be considered uncertain by PyROS must have the property ``mutable=True``. - -PyROS will seek to identify solutions that remain feasible for any realization of these parameters included in an uncertainty set. To that end, we need to construct an ``UncertaintySet`` object. In our example, let us utilize the ``BoxSet`` constructor to specify an uncertainty set of simple hyper-rectangular geometry. For this, we will assume each parameter value is uncertain within a percentage of its nominal value. Constructing this specific ``UncertaintySet`` object can be done as follows. + Any ``Param`` object that is to be considered uncertain by PyROS + must have the property ``mutable=True``. + +PyROS will seek to identify solutions that remain feasible for any +realization of these parameters included in an uncertainty set. +To that end, we need to construct an +:class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` +object. +In our example, let us utilize the +:class:`~pyomo.contrib.pyros.uncertainty_sets.BoxSet` +constructor to specify +an uncertainty set of simple hyper-rectangular geometry. +For this, we will assume each parameter value is uncertain within a +percentage of its nominal value. Constructing this specific +:class:`~pyomo.contrib.pyros.uncertainty_sets.UncertaintySet` +object can be done as follows: .. doctest:: >>> # === Define the pertinent data === >>> relative_deviation = 0.15 - >>> bounds = [(nominal_values[i] - relative_deviation*nominal_values[i], - ... nominal_values[i] + relative_deviation*nominal_values[i]) - ... for i in range(4)] + >>> bounds = [ + ... (nominal_values[i] - relative_deviation*nominal_values[i], + ... nominal_values[i] + relative_deviation*nominal_values[i]) + ... for i in range(4) + ... ] >>> # === Construct the desirable uncertainty set === >>> box_uncertainty_set = pyros.BoxSet(bounds=bounds) @@ -336,7 +450,10 @@ PyROS will seek to identify solutions that remain feasible for any realization o Step 3: Solve with PyROS ^^^^^^^^^^^^^^^^^^^^^^^^^^ -PyROS requires the user to supply one local and one global NLP solver to be used for solving sub-problems. For convenience, we shall have PyROS invoke BARON as both the local and the global NLP solver. +PyROS requires the user to supply one local and one global NLP solver to use +for solving sub-problems. +For convenience, we shall have PyROS invoke BARON as both the local and the +global NLP solver: .. doctest:: :skipif: not (baron.available() and baron.license_is_valid()) @@ -346,60 +463,81 @@ PyROS requires the user to supply one local and one global NLP solver to be used >>> global_solver = pyo.SolverFactory('baron') .. note:: - Additional solvers to be used as backup can be designated during the ``solve`` statement via the config options ``backup_local_solvers`` and ``backup_global_solvers`` presented above. - -The final step in solving a model with PyROS is to designate the remaining required inputs, namely ``first_stage_variables`` and ``second_stage_variables``. Below, we present two separate cases. + Additional NLP optimizers can be automatically used in the event the primary + subordinate local or global optimizer passed + to the PyROS :meth:`~pyomo.contrib.pyros.PyROS.solve` method + does not successfully solve a subproblem to an appropriate termination + condition. These alternative solvers are provided through the optional + keyword arguments ``backup_local_solvers`` and ``backup_global_solvers``. + +The final step in solving a model with PyROS is to construct the +remaining required inputs, namely +``first_stage_variables`` and ``second_stage_variables``. +Below, we present two separate cases. PyROS Termination Conditions """"""""""""""""""""""""""""" -PyROS will return one of six termination conditions upon completion. These termination conditions are tabulated below. - -.. tabularcolumns:: |c|c|c| - -+---------------------------------------------------+----------------------------------------------------------------+ -| **Termination Condition** | **Description** | -+---------------------------------------------------+----------------------------------------------------------------+ -| ``pyrosTerminationCondition.robust_optimal`` | The final solution is robust optimal | -+---------------------------------------------------+----------------------------------------------------------------+ -| ``pyrosTerminationCondition.robust_feasible`` | The final solution is robust feasible | -+---------------------------------------------------+----------------------------------------------------------------+ -| ``pyrosTerminationCondition.robust_infeasible`` | The posed problem is robust infeasible | -+---------------------------------------------------+----------------------------------------------------------------+ -| ``pyrosTerminationCondition.max_iter`` | Maximum number of GRCS iteration reached | -+---------------------------------------------------+----------------------------------------------------------------+ -| ``pyrosTerminationCondition.time_out`` | Maximum number of time reached | -+---------------------------------------------------+----------------------------------------------------------------+ -| ``pyrosTerminationCondition.subsolver_error`` | Unacceptable return status(es) from a user-supplied sub-solver| -+---------------------------------------------------+----------------------------------------------------------------+ +PyROS will return one of six termination conditions upon completion. +These termination conditions are defined through the +:class:`~pyomo.contrib.pyros.util.pyrosTerminationCondition` enumeration +and tabulated below. + +.. table:: PyROS termination conditions. + + +----------------------------------------------------------------------------------+----------------------------------------------------------------+ + | Termination Condition | Description | + +==================================================================================+================================================================+ + | :attr:`~pyomo.contrib.pyros.util.pyrosTerminationCondition.robust_optimal` | The final solution is robust optimal | + +----------------------------------------------------------------------------------+----------------------------------------------------------------+ + | :attr:`~pyomo.contrib.pyros.util.pyrosTerminationCondition.robust_feasible` | The final solution is robust feasible | + +----------------------------------------------------------------------------------+----------------------------------------------------------------+ + | :attr:`~pyomo.contrib.pyros.util.pyrosTerminationCondition.robust_infeasible` | The posed problem is robust infeasible | + +----------------------------------------------------------------------------------+----------------------------------------------------------------+ + | :attr:`~pyomo.contrib.pyros.util.pyrosTerminationCondition.max_iter` | Maximum number of GRCS iteration reached | + +----------------------------------------------------------------------------------+----------------------------------------------------------------+ + | :attr:`~pyomo.contrib.pyros.util.pyrosTerminationCondition.time_out` | Maximum number of time reached | + +----------------------------------------------------------------------------------+----------------------------------------------------------------+ + | :attr:`~pyomo.contrib.pyros.util.pyrosTerminationCondition.subsolver_error` | Unacceptable return status(es) from a user-supplied sub-solver| + +----------------------------------------------------------------------------------+----------------------------------------------------------------+ A Single-Stage Problem """"""""""""""""""""""""" -If we choose to designate all variables as either design or state variables, without any control variables (i.e., all degrees of freedom are first-stage), we can use PyROS to solve the single-stage problem as shown below. In particular, let us instruct PyROS that variables ``m.x1`` through ``m.x6``, ``m.x19`` through ``m.x24``, and ``m.x31`` correspond to first-stage degrees of freedom. +If we choose to designate all variables as either design or state variables, +without any control variables (i.e., all degrees of freedom are first-stage), +we can use PyROS to solve the single-stage problem as shown below. +In particular, let us instruct PyROS that variables +``m.x1`` through ``m.x6``, ``m.x19`` through ``m.x24``, and ``m.x31`` +correspond to first-stage degrees of freedom. + +.. _single-stage-problem: .. doctest:: :skipif: not (baron.available() and baron.license_is_valid()) - >>> # === Designate which variables correspond to first- and second-stage degrees of freedom === - >>> first_stage_variables =[m.x1, m.x2, m.x3, m.x4, m.x5, m.x6, - ... m.x19, m.x20, m.x21, m.x22, m.x23, m.x24, m.x31] + >>> # === Designate which variables correspond to first-stage + >>> # and second-stage degrees of freedom === + >>> first_stage_variables =[ + ... m.x1, m.x2, m.x3, m.x4, m.x5, m.x6, + ... m.x19, m.x20, m.x21, m.x22, m.x23, m.x24, m.x31, + ... ] >>> second_stage_variables = [] >>> # The remaining variables are implicitly designated to be state variables >>> # === Call PyROS to solve the robust optimization problem === - >>> results_1 = pyros_solver.solve(model = m, - ... first_stage_variables = first_stage_variables, - ... second_stage_variables = second_stage_variables, - ... uncertain_params = uncertain_parameters, - ... uncertainty_set = box_uncertainty_set, - ... local_solver = local_solver, - ... global_solver= global_solver, - ... options = { - ... "objective_focus": pyros.ObjectiveType.worst_case, - ... "solve_master_globally": True, - ... "load_solution":False - ... }) + >>> results_1 = pyros_solver.solve( + ... model=m, + ... first_stage_variables=first_stage_variables, + ... second_stage_variables=second_stage_variables, + ... uncertain_params=uncertain_parameters, + ... uncertainty_set=box_uncertainty_set, + ... local_solver=local_solver, + ... global_solver=global_solver, + ... objective_focus=pyros.ObjectiveType.worst_case, + ... solve_master_globally=True, + ... load_solution=False, + ... ) =========================================================================================== PyROS: Pyomo Robust Optimization Solver ... =========================================================================================== @@ -413,32 +551,58 @@ If we choose to designate all variables as either design or state variables, wit >>> objective = results_1.final_objective_value >>> # === Print some results === >>> single_stage_final_objective = round(objective,-1) - >>> print("Final objective value: %s" % single_stage_final_objective) + >>> print(f"Final objective value: {single_stage_final_objective}") Final objective value: 48367380.0 - >>> print("PyROS termination condition: %s" % termination_condition) + >>> print(f"PyROS termination condition: {termination_condition}") PyROS termination condition: pyrosTerminationCondition.robust_optimal PyROS Results Object """"""""""""""""""""""""""" -The results object returned by PyROS allows you to query the following information from the solve call: -total iterations of the algorithm ``iterations``, CPU time ``time``, the GRCS algorithm termination condition ``pyros_termination_condition``, -and the final objective function value ``final_objective_value``. If the option ``load_solution`` = ``True`` (default), the variables in the model will be -loaded to the solution determined by PyROS and can be obtained by querying the model variables. Note that in the results obtained above, we set ``load_solution`` = ``False``. -This is to ensure that the next set of runs shown here can utilize the original deterministic model, as the initial point can affect the performance of sub-solvers. +The results object returned by PyROS allows you to query the following information +from the solve call: + +* ``iterations``: total iterations of the algorithm +* ``time``: total wallclock time (or elapsed time) in seconds +* ``pyros_termination_condition``: the GRCS algorithm termination condition +* ``final_objective_value``: the final objective function value. + +The :ref:`preceding code snippet ` +demonstrates how to retrieve this information. + +If we pass ``load_solution=True`` (the default setting) +to the :meth:`~pyomo.contrib.pyros.PyROS.solve` method, +then the solution at which PyROS terminates will be loaded to +the variables of the original deterministic model. +Note that in the :ref:`preceding code snippet `, +we set ``load_solution=False`` to ensure the next set of runs shown here can +utilize the initial point loaded to the original deterministic model, +as the initial point may affect the performance of sub-solvers. .. note:: - The reported ``final_objective_value`` and final model variable values depend on the selection of the option ``objective_focus``. - The ``final_objective_value`` is the sum of first-stage and second-stage objective functions. - If ``objective_focus = ObjectiveType.nominal``, second-stage objective and variables are evaluated at the nominal realization of the uncertain parameters, :math:`q^0`. - If ``objective_focus = ObjectiveType.worst_case``, second-stage objective and variables are evaluated at the worst-case realization of the uncertain parameters, :math:`q^{k^\ast}` where :math:`k^\ast = argmax_{k \in \mathcal{K}} f_2(x,z^k,y^k,q^k)` . - -An example of how to query these values on the previously obtained results is shown in the code above. - + The reported ``final_objective_value`` and final model variable values + depend on the selection of the option ``objective_focus``. + The ``final_objective_value`` is the sum of first-stage + and second-stage objective functions. + If ``objective_focus = ObjectiveType.nominal``, + second-stage objective and variables are evaluated at + the nominal realization of the uncertain parameters, :math:`q^{\text{nom}}`. + If ``objective_focus = ObjectiveType.worst_case``, second-stage objective + and variables are evaluated at the worst-case realization + of the uncertain parameters, :math:`q^{k^\ast}` + where :math:`k^\ast = \mathrm{argmax}_{k \in \mathcal{K}}~f_2(x,z^k,y^k,q^k)`. A Two-Stage Problem """""""""""""""""""""" -For this next set of runs, we will assume that some of the previously designated first-stage degrees of freedom are in fact second-stage ones. PyROS handles second-stage degrees of freedom via the use of decision rules, which is controlled with the config option ``decision_rule_order`` presented above. Here, we shall select affine decision rules by setting ``decision_rule_order`` to the value of `1`. +For this next set of runs, we will +assume that some of the previously designated first-stage degrees of +freedom are in fact second-stage degrees of freedom. +PyROS handles second-stage degrees of freedom via the use of polynomial +decision rules, of which the degree is controlled through the +optional keyword argument ``decision_rule_order`` to the PyROS +:meth:`~pyomo.contrib.pyros.PyROS.solve` method. +In this example, we select affine decision rules by setting +``decision_rule_order=1``: .. doctest:: :skipif: not (baron.available() and baron.license_is_valid()) @@ -449,97 +613,127 @@ For this next set of runs, we will assume that some of the previously designated >>> # The remaining variables are implicitly designated to be state variables >>> # === Call PyROS to solve the robust optimization problem === - >>> results_2 = pyros_solver.solve(model = m, - ... first_stage_variables = first_stage_variables, - ... second_stage_variables = second_stage_variables, - ... uncertain_params = uncertain_parameters, - ... uncertainty_set = box_uncertainty_set, - ... local_solver = local_solver, - ... global_solver = global_solver, - ... options = { - ... "objective_focus": pyros.ObjectiveType.worst_case, - ... "solve_master_globally": True, - ... "decision_rule_order": 1 - ... }) + >>> results_2 = pyros_solver.solve( + ... model=m, + ... first_stage_variables=first_stage_variables, + ... second_stage_variables=second_stage_variables, + ... uncertain_params=uncertain_parameters, + ... uncertainty_set=box_uncertainty_set, + ... local_solver=local_solver, + ... global_solver=global_solver, + ... objective_focus=pyros.ObjectiveType.worst_case, + ... solve_master_globally=True, + ... decision_rule_order=1, + ... ) =========================================================================================== PyROS: Pyomo Robust Optimization Solver ... ... INFO: Robust optimal solution identified. Exiting PyROS. - >>> # === Compare final objective to the singe-stage solution - >>> two_stage_final_objective = round(pyo.value(results_2.final_objective_value),-1) - >>> percent_difference = 100 * (two_stage_final_objective - single_stage_final_objective)/(single_stage_final_objective) - >>> print("Percent objective change relative to constant decision rules objective: %.2f %%" % percent_difference) + >>> # === Compare final objective to the single-stage solution + >>> two_stage_final_objective = round( + ... pyo.value(results_2.final_objective_value), + ... -1, + ... ) + >>> percent_difference = 100 * ( + ... two_stage_final_objective - single_stage_final_objective + ... ) / (single_stage_final_objective) + >>> print("Percent objective change relative to constant decision rules " + ... f"objective: {percent_difference:.2f}") Percent objective change relative to constant decision rules objective: -24... -In this example, when we compare the final objective value in the case of constant decision rules (no second-stage recourse) -and affine decision rules, we see there is a ~25% decrease in total objective value. +For this example, we notice a ~25% decrease in the final objective +value when switching from a static decision rule (no second-stage recourse) +to an affine decision rule. The Price of Robustness """""""""""""""""""""""" -Using appropriately constructed hierarchies, PyROS allows for the facile comparison of robust optimal objectives across sets to determine the "price of robustness." -For the set we considered here, the ``BoxSet``, we can create such a hierarchy via an array of ``relative_deviation`` parameters to define the size of these uncertainty sets. -We can then loop through this array and call PyROS within a loop to identify robust solutions in light of each of the specified ``BoxSet`` objects. +In conjunction with standard Python control flow tools, +PyROS facilitates a "price of robustness" analysis for a model of interest +through the evaluation and comparison of the robust optimal +objective function value across any appropriately constructed hierarchy +of uncertainty sets. +In this example, we consider a sequence of +box uncertainty sets centered on the nominal uncertain +parameter realization, such that each box is parameterized +by a real value specifying a relative box size. +To this end, we construct an iterable called ``relative_deviation_list`` +whose entries are ``float`` values representing the relative sizes. +We then loop through ``relative_deviation_list`` so that for each relative +size, the corresponding robust optimal objective value +can be evaluated by creating an appropriate +:class:`~pyomo.contrib.pyros.uncertainty_sets.BoxSet` +instance and invoking the PyROS solver: .. code:: >>> # This takes a long time to run and therefore is not a doctest - >>> # === An array of maximum relative deviations from the nominal uncertain parameter values to utilize in constructing box sets + >>> # === An array of maximum relative deviations from the nominal uncertain + >>> # parameter values to utilize in constructing box sets >>> relative_deviation_list = [0.00, 0.10, 0.20, 0.30, 0.40] >>> # === Final robust optimal objectives >>> robust_optimal_objectives = [] >>> for relative_deviation in relative_deviation_list: # doctest: +SKIP - ... bounds = [(nominal_values[i] - relative_deviation*nominal_values[i], - ... nominal_values[i] + relative_deviation*nominal_values[i]) - ... for i in range(4)] - ... box_uncertainty_set = pyros.BoxSet(bounds = bounds) - ... results = pyros_solver.solve(model = m, - ... first_stage_variables = first_stage_variables, - ... second_stage_variables = second_stage_variables, - ... uncertain_params = uncertain_parameters, - ... uncertainty_set = box_uncertainty_set, - ... local_solver = local_solver, - ... global_solver = global_solver, - ... options = { - ... "objective_focus": pyros.ObjectiveType.worst_case, - ... "solve_master_globally": True, - ... "decision_rule_order": 1 - ... }) - ... if results.pyros_termination_condition != pyros.pyrosTerminationCondition.robust_optimal: - ... print("This instance didn't solve to robust optimality.") - ... robust_optimal_objective.append("-----") - ... else: - ... robust_optimal_objectives.append(str(results.final_objective_value)) + ... bounds = [ + ... (nominal_values[i] - relative_deviation*nominal_values[i], + ... nominal_values[i] + relative_deviation*nominal_values[i]) + ... for i in range(4) + ... ] + ... box_uncertainty_set = pyros.BoxSet(bounds = bounds) + ... results = pyros_solver.solve( + ... model=m, + ... first_stage_variables=first_stage_variables, + ... second_stage_variables=second_stage_variables, + ... uncertain_params=uncertain_parameters, + ... uncertainty_set= box_uncertainty_set, + ... local_solver=local_solver, + ... global_solver=global_solver, + ... objective_focus=pyros.ObjectiveType.worst_case, + ... solve_master_globally=True, + ... decision_rule_order=1, + ... ) + ... is_robust_optimal = ( + ... results.pyros_termination_condition + ... == pyros.pyrosTerminationCondition.robust_optimal + ... ) + ... if not is_robust_optimal: + ... print(f"Instance for relative deviation: {relative_deviation} " + ... "not solved to robust optimality.") + ... robust_optimal_objectives.append("-----") + ... else: + ... robust_optimal_objectives.append(str(results.final_objective_value)) For this example, we obtain the following price of robustness results: -.. tabularcolumns:: |c|c|c| - -+------------------------------------------+------------------------------+-----------------------------+ -| **Uncertainty Set Size (+/-)** :sup:`o` | **Robust Optimal Objective** | **% Increase** :sup:`x` | -+------------------------------------------+------------------------------+-----------------------------+ -| 0.00 | 35,837,659.18 | 0.00 % | -+------------------------------------------+------------------------------+-----------------------------+ -| 0.10 | 36,135,191.59 | 0.82 % | -+------------------------------------------+------------------------------+-----------------------------+ -| 0.20 | 36,437,979.81 | 1.64 % | -+------------------------------------------+------------------------------+-----------------------------+ -| 0.30 | 43,478,190.92 | 17.57 % | -+------------------------------------------+------------------------------+-----------------------------+ -| 0.40 | ``robust_infeasible`` | :math:`\text{-----}` | -+------------------------------------------+------------------------------+-----------------------------+ - -Note how, in the case of the last uncertainty set, we were able to utilize PyROS to show the robust infeasibility of the problem. +.. table:: Price of robustness results. + + +------------------------------------------+------------------------------+-----------------------------+ + | Uncertainty Set Size (+/-) :sup:`o` | Robust Optimal Objective | % Increase :sup:`x` | + +==========================================+==============================+=============================+ + | 0.00 | 35,837,659.18 | 0.00 % | + +------------------------------------------+------------------------------+-----------------------------+ + | 0.10 | 36,135,191.59 | 0.82 % | + +------------------------------------------+------------------------------+-----------------------------+ + | 0.20 | 36,437,979.81 | 1.64 % | + +------------------------------------------+------------------------------+-----------------------------+ + | 0.30 | 43,478,190.92 | 17.57 % | + +------------------------------------------+------------------------------+-----------------------------+ + | 0.40 | ``robust_infeasible`` | :math:`\text{-----}` | + +------------------------------------------+------------------------------+-----------------------------+ + +Notice that PyROS was successfully able to determine the robust +infeasibility of the problem under the largest uncertainty set. :sup:`o` **Relative Deviation from Nominal Realization** :sup:`x` **Relative to Deterministic Optimal Objective** -This clearly illustrates the impact that the uncertainty set size can have on the robust optimal objective values. -Price of robustness studies like this are easily implemented using PyROS. +This example clearly illustrates the potential impact of the uncertainty +set size on the robust optimal objective function value +and demonstrates the ease of implementing a price of robustness study +for a given optimization problem under uncertainty. -.. warning:: +.. note:: - PyROS is still under a beta release. Please provide feedback and/or - report any problems by opening an issue on the Pyomo - `GitHub page `_. + Please provide feedback and/or report any problems by opening an issue on + the `Pyomo GitHub page `_. diff --git a/doc/OnlineDocs/contribution_guide.rst b/doc/OnlineDocs/contribution_guide.rst index 2ebb3e1ce70..10670627546 100644 --- a/doc/OnlineDocs/contribution_guide.rst +++ b/doc/OnlineDocs/contribution_guide.rst @@ -21,18 +21,38 @@ we encourage modular/targeted commits with descriptive commit messages. Coding Standards ++++++++++++++++ - * Required: 4 space indentation (no tabs) - * Desired: PEP8 - * No use of __author__ + * Required: `black `_ + * No use of ``__author__`` * Inside ``pyomo.contrib``: Contact information for the contribution maintainer (such as a Github ID) should be included in the Sphinx documentation -Sphinx-compliant documentation is required for: +The first step of Pyomo's GitHub Actions workflow is to run +`black `_ and a +`spell-checker `_ to ensure style +guide compliance and minimize typos. Before opening a pull request, please +run: + +:: + + # Auto-apply correct formatting + pip install black + black -S -C --exclude examples/pyomobook/python-ch/BadIndent.py + # Find typos in files + conda install typos + typos --config .github/workflows/typos.toml + +If the spell-checker returns a failure for a word that is spelled correctly, +please add the word to the ``.github/workflows/typos.toml`` file. + +Online Pyomo documentation is generated using `Sphinx `_ +with the ``napoleon`` extension enabled. For API documentation we use of one of these +`supported styles for docstrings `_, +but we prefer the NumPy standard. Whichever you choose, we require compliant docstrings for: * Modules * Public and Private Classes - * Public and Private Functions + * Public and Private Functions We also encourage you to include examples, especially for new features and contributions to ``pyomo.contrib``. @@ -41,7 +61,7 @@ Testing +++++++ Pyomo uses `unittest `_, -`nose `_, +`pytest `_, `GitHub Actions `_, and Jenkins for testing and continuous integration. Submitted code should include @@ -54,7 +74,7 @@ merged. The Pyomo main branch provides a Github Actions workflow (configured in the ``.github/`` directory) that will test any changes pushed to a branch with a subset of the complete test harness that includes -multiple virtual machines (ubuntu, mac-os, windows) +multiple virtual machines (``ubuntu``, ``mac-os``, ``windows``) and multiple Python versions. For existing forks, fetch and merge your fork (and branches) with Pyomo's main. For new forks, you will need to enable GitHub Actions in the 'Actions' tab on your fork. @@ -65,10 +85,27 @@ may be opened by including '[WIP]' at the beginning of the PR title. This allows your code changes to be tested by the full suite of Pyomo's automatic testing infrastructure. Any pull requests marked '[WIP]' will not be -reviewed or merged by the core development team. In addition, any +reviewed or merged by the core development team. However, any '[WIP]' pull request left open for an extended period of time without active development may be marked 'stale' and closed. +Python Version Support +++++++++++++++++++++++ + +By policy, Pyomo supports and tests the currently supported Python versions, +as can be seen on `Status of Python Versions `_. +It is expected that tests will pass for all of the supported and tested +versions of Python, unless otherwise stated. + +At the time of the first Pyomo release after the end-of-life of a minor Python +version, we will remove testing and support for that Python version. + +This will also result in a bump in the minor Pyomo version. + +For example, assume Python 3.A is declared end-of-life while Pyomo is on +version 6.3.Y. After the release of Pyomo 6.3.(Y+1), Python 3.A will be removed, +and the next Pyomo release will be 6.4.0. + Working on Forks and Branches ----------------------------- @@ -251,13 +288,14 @@ Setting up your development environment After cloning your fork, you will want to install Pyomo from source. -Step 1 (recommended): Create a new conda environment. +Step 1 (recommended): Create a new ``conda`` environment. :: conda create --name pyomodev -You may change the environment name from ``pyomodev`` as you see fit. Then activate the environment: +You may change the environment name from ``pyomodev`` as you see fit. +Then activate the environment: :: @@ -265,9 +303,11 @@ You may change the environment name from ``pyomodev`` as you see fit. Then activ Step 2 (optional): Install PyUtilib -The hard dependency on PyUtilib was removed in Pyomo 6.0.0. There is still a soft dependency for any code related to ``pyomo.dataportal.plugins.sheet``. +The hard dependency on PyUtilib was removed in Pyomo 6.0.0. There is still a +soft dependency for any code related to ``pyomo.dataportal.plugins.sheet``. -If your contribution requires PyUtilib, you will likely need the main branch of PyUtilib to contribute. Clone a copy of the repository in a new directory: +If your contribution requires PyUtilib, you will likely need the main branch of +PyUtilib to contribute. Clone a copy of the repository in a new directory: :: @@ -287,7 +327,10 @@ Finally, move to the directory containing the clone of your Pyomo fork and run: python setup.py develop -These commands register the cloned code with the active python environment (``pyomodev``). This way, your changes to the source code for ``pyomo`` are automatically used by the active environment. You can create another conda environment to switch to alternate versions of pyomo (e.g., stable). +These commands register the cloned code with the active python environment +(``pyomodev``). This way, your changes to the source code for ``pyomo`` are +automatically used by the active environment. You can create another conda +environment to switch to alternate versions of pyomo (e.g., stable). Review Process -------------- @@ -331,9 +374,11 @@ of third-party contributions that enhance Pyomo's core functionality. The are two ways that ``pyomo.contrib`` can be used to integrate third-party packages: -* ``pyomo.contrib`` can provide wrappers for separate Python packages, thereby allowing these packages to be imported as subpackages of pyomo. +* ``pyomo.contrib`` can provide wrappers for separate Python packages, thereby + allowing these packages to be imported as subpackages of pyomo. -* ``pyomo.contrib`` can include contributed packages that are developed and maintained outside of the Pyomo developer team. +* ``pyomo.contrib`` can include contributed packages that are developed and + maintained outside of the Pyomo developer team. Including contrib packages in the Pyomo source tree provides a convenient mechanism for defining new functionality that can be diff --git a/doc/OnlineDocs/developer_reference/deprecation.rst b/doc/OnlineDocs/developer_reference/deprecation.rst new file mode 100644 index 00000000000..7fc5ec2b0ff --- /dev/null +++ b/doc/OnlineDocs/developer_reference/deprecation.rst @@ -0,0 +1,62 @@ +Deprecation and Removal of Functionality +======================================== + +During the course of development, there may be cases where it becomes +necessary to deprecate or remove functionality from the standard Pyomo +offering. + +Deprecation +----------- + +We offer a set of tools to help with deprecation in +``pyomo.common.deprecation``. + +By policy, when deprecating or moving an existing capability, one of the +following utilities should be leveraged. Each has a required +``version`` argument that should be set to current development version (e.g., +``"6.6.2.dev0"``). This version will be updated to the next actual +release as part of the Pyomo release process. The current development version +can be found by running ``pyomo --version`` on your local fork/branch. + +.. currentmodule:: pyomo.common.deprecation + +.. autosummary:: + + deprecated + deprecation_warning + relocated_module + relocated_module_attribute + RenamedClass + +.. autodecorator:: pyomo.common.deprecation.deprecated + :noindex: + +.. autofunction:: pyomo.common.deprecation.deprecation_warning + :noindex: + +.. autofunction:: pyomo.common.deprecation.relocated_module + :noindex: + +.. autofunction:: pyomo.common.deprecation.relocated_module_attribute + :noindex: + +.. autoclass:: pyomo.common.deprecation.RenamedClass + :noindex: + + +Removal +------- + +By policy, functionality should be deprecated with reasonable +warning, pending extenuating circumstances. The functionality should +be deprecated, following the information above. + +If the functionality is documented in the most recent +edition of [`Pyomo - Optimization Modeling in Python`_], it may not be removed +until the next major version release. + +.. _Pyomo - Optimization Modeling in Python: https://doi.org/10.1007/978-3-030-68928-5 + +For other functionality, it is preferred that ample time is given +before removing the functionality. At minimum, significant functionality +removal will result in a minor version bump. diff --git a/doc/OnlineDocs/developer_reference/expressions/design.rst b/doc/OnlineDocs/developer_reference/expressions/design.rst index f21e677cfc2..0d90b09953c 100644 --- a/doc/OnlineDocs/developer_reference/expressions/design.rst +++ b/doc/OnlineDocs/developer_reference/expressions/design.rst @@ -15,7 +15,7 @@ Most Pyomo expression trees have the following form 1. Interior nodes are objects that inherit from the :class:`ExpressionBase ` class. These objects typically have one or more child nodes. Linear expression nodes do not have child nodes, but they are treated as interior nodes in the expression tree because they references other leaf nodes. -2. Leaf nodes are numeric values, parameter components and variable components, which represent the *inputs* to the expresion. +2. Leaf nodes are numeric values, parameter components and variable components, which represent the *inputs* to the expression. Expression Classes ------------------ @@ -41,7 +41,7 @@ logical relationships, which are summarized in the following table: ==================== ==================================== ======================================================================================== Operation Example Pyomo Class ==================== ==================================== ======================================================================================== -exernal function ``myfunc(x,y,z)`` :class:`ExternalFunctionExpression ` +external function ``myfunc(x,y,z)`` :class:`ExternalFunctionExpression ` logical if-then-else ``Expr_if(IF=x, THEN=y, ELSE=z)`` :class:`Expr_ifExpression ` intrinsic function ``sin(x)`` :class:`UnaryFunctionExpression ` absolute function ``abs(x)`` :class:`AbsExpression ` @@ -119,7 +119,7 @@ the named expression. .. note:: The named expression classes are not implemented as sub-classes - of :class:`ExpressionBase `. + of :class:`NumericExpression `. This reflects design constraints related to the fact that these are modeling components that belong to class hierarchies other than the expression class hierarchy, and Pyomo's design prohibits @@ -149,7 +149,7 @@ Pyomo does not have a binary sum expression class. Instead, it has an ``n``-ary summation class, :class:`SumExpression `. This expression class treats sums as ``n``-ary sums for efficiency reasons; many large -optimization models contain large sums. But note tht this class +optimization models contain large sums. But note that this class maintains the immutability property described above. This class shares an underlying list of arguments with other :class:`SumExpression ` objects. A particular diff --git a/doc/OnlineDocs/developer_reference/expressions/index.rst b/doc/OnlineDocs/developer_reference/expressions/index.rst index 62220d300b0..769639d50eb 100644 --- a/doc/OnlineDocs/developer_reference/expressions/index.rst +++ b/doc/OnlineDocs/developer_reference/expressions/index.rst @@ -10,7 +10,7 @@ Pyomo Expressions This documentation does not explicitly reference objects in pyomo.core.kernel. While the Pyomo5 expression system works with pyomo.core.kernel objects, the documentation of these - documents was not sufficient to appropriately descibe the use + documents was not sufficient to appropriately describe the use of kernel objects in expressions. Pyomo supports the declaration of symbolic expressions that represent diff --git a/doc/OnlineDocs/developer_reference/expressions/performance.rst b/doc/OnlineDocs/developer_reference/expressions/performance.rst index e8d7fa2679f..2ed5dd0744b 100644 --- a/doc/OnlineDocs/developer_reference/expressions/performance.rst +++ b/doc/OnlineDocs/developer_reference/expressions/performance.rst @@ -50,7 +50,7 @@ For example, consider the following quadratic polynomial: .. literalinclude:: ../../tests/expr/performance_loop3.spy This quadratic polynomial is treated as a nonlinear expression -unless the expression is explicilty processed to identify quadratic +unless the expression is explicitly processed to identify quadratic terms. This *lazy* identification of of quadratic terms allows Pyomo to tailor the search for quadratic terms only when they are explicitly needed. diff --git a/doc/OnlineDocs/developer_reference/index.rst b/doc/OnlineDocs/developer_reference/index.rst index e8a0e29d122..8c29150015c 100644 --- a/doc/OnlineDocs/developer_reference/index.rst +++ b/doc/OnlineDocs/developer_reference/index.rst @@ -10,4 +10,5 @@ scripts using Pyomo. :maxdepth: 1 config.rst + deprecation.rst expressions/index.rst diff --git a/doc/OnlineDocs/errors.rst b/doc/OnlineDocs/errors.rst index 784754797b3..162c2e10257 100644 --- a/doc/OnlineDocs/errors.rst +++ b/doc/OnlineDocs/errors.rst @@ -177,7 +177,7 @@ this warning (and an exception from the converter): ... TypeError: Cannot create a Set from data that does not support __contains__... ERROR (E2001): 5 is not a valid domain. Variable domains must be an instance - of a Pyomo Set or convertable to a Pyomo Set. + of a Pyomo Set or convertible to a Pyomo Set. See also https://pyomo.readthedocs.io/en/stable/errors.html#e2001 diff --git a/doc/OnlineDocs/index.rst b/doc/OnlineDocs/index.rst index b3d6e754de0..ef986a3429f 100644 --- a/doc/OnlineDocs/index.rst +++ b/doc/OnlineDocs/index.rst @@ -18,6 +18,7 @@ with a diverse set of optimization capabilities. solving_pyomo_models.rst working_models.rst working_abstractmodels/index.rst + model_transformations/index.rst modeling_extensions/index.rst tutorial_examples.rst model_debugging/index.rst diff --git a/doc/OnlineDocs/installation.rst b/doc/OnlineDocs/installation.rst index 700cc7011fe..f833f1c4db5 100644 --- a/doc/OnlineDocs/installation.rst +++ b/doc/OnlineDocs/installation.rst @@ -3,9 +3,11 @@ Installation Pyomo currently supports the following versions of Python: -* CPython: 3.7, 3.8, 3.9, 3.10 +* CPython: 3.7, 3.8, 3.9, 3.10, 3.11 * PyPy: 3 +At the time of the first Pyomo release after the end-of-life of a minor Python +version, Pyomo will remove testing for that Python version. Using CONDA ~~~~~~~~~~~ diff --git a/doc/OnlineDocs/library_reference/aml/index.rst b/doc/OnlineDocs/library_reference/aml/index.rst index d104e3dceb8..f06ca35b087 100644 --- a/doc/OnlineDocs/library_reference/aml/index.rst +++ b/doc/OnlineDocs/library_reference/aml/index.rst @@ -20,6 +20,7 @@ through the `pyomo.environ` namespace. Constraint ExternalFunction Reference + SOSConstraint AML Component Documentation @@ -77,3 +78,8 @@ AML Component Documentation :members: :inherited-members: +.. autoclass:: SOSConstraint + :show-inheritance: + :members: + :inherited-members: + diff --git a/doc/OnlineDocs/library_reference/appsi/appsi.rst b/doc/OnlineDocs/library_reference/appsi/appsi.rst index 1881a8db67c..e26e4b0e82a 100644 --- a/doc/OnlineDocs/library_reference/appsi/appsi.rst +++ b/doc/OnlineDocs/library_reference/appsi/appsi.rst @@ -81,8 +81,26 @@ attribute. For example: Installation ------------ +There are a few ways to install Appsi listed below. + +Option1: + +.. code-block:: + + pyomo build-extensions + +Option2: .. code-block:: cd pyomo/contrib/appsi/ python build.py + +Option3: + +.. code-block:: + + python + >>> from pyomo.contrib.appsi.build import build_appsi + >>> build_appsi() + diff --git a/doc/OnlineDocs/library_reference/appsi/appsi.solvers.gurobi.rst b/doc/OnlineDocs/library_reference/appsi/appsi.solvers.gurobi.rst index cd83eed6e89..9e0af041410 100644 --- a/doc/OnlineDocs/library_reference/appsi/appsi.solvers.gurobi.rst +++ b/doc/OnlineDocs/library_reference/appsi/appsi.solvers.gurobi.rst @@ -1,6 +1,47 @@ Gurobi ====== + +Handling Gurobi licenses through the APPSI interface +---------------------------------------------------- + +In order to obtain performance benefits when re-solving a Pyomo model +with Gurobi repeatedly, Pyomo has to keep a reference to a gurobipy +model between calls to +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.solve()`. Depending +on the Gurobi license type, this may "consume" a license as long as +any APPSI-Gurobi interface exists (i.e., has not been garbage +collected). To release a Gurobi license for other processes, use the +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.release_license()` +method as shown below. Note that +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.release_license()` +must be called on every instance for this to actually release the +license. However, releasing the license will delete the gurobipy model +which will have to be reconstructed from scratch the next time +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.solve()` is +called, negating any performance benefit of the persistent solver +interface. + +.. code-block:: python + + >>> opt = appsi.solvers.Gurobi() # doctest: +SKIP + >>> results = opt.solve(model) # doctest: +SKIP + >>> opt.release_license() # doctest: +SKIP + + +Also note that both the +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.available()` and +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.solve()` methods +will construct a gurobipy model, thereby (depending on the type of +license) "consuming" a license. The +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.available()` +method has to do this so that the availability does not change between +calls to +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.available()` and +:py:meth:`~pyomo.contrib.appsi.solvers.gurobi.Gurobi.solve()`, leading +to unexpected errors. + + .. autoclass:: pyomo.contrib.appsi.solvers.gurobi.GurobiResults :members: :inherited-members: diff --git a/doc/OnlineDocs/library_reference/appsi/appsi.solvers.highs.rst b/doc/OnlineDocs/library_reference/appsi/appsi.solvers.highs.rst new file mode 100644 index 00000000000..f2f72d0ad85 --- /dev/null +++ b/doc/OnlineDocs/library_reference/appsi/appsi.solvers.highs.rst @@ -0,0 +1,14 @@ +HiGHS +===== + +.. autoclass:: pyomo.contrib.appsi.solvers.highs.HighsResults + :members: + :inherited-members: + :undoc-members: + :show-inheritance: + +.. autoclass:: pyomo.contrib.appsi.solvers.highs.Highs + :members: + :inherited-members: + :undoc-members: + :show-inheritance: diff --git a/doc/OnlineDocs/library_reference/appsi/appsi.solvers.rst b/doc/OnlineDocs/library_reference/appsi/appsi.solvers.rst index 5b5664290bf..1c598d95628 100644 --- a/doc/OnlineDocs/library_reference/appsi/appsi.solvers.rst +++ b/doc/OnlineDocs/library_reference/appsi/appsi.solvers.rst @@ -12,3 +12,4 @@ Solvers appsi.solvers.ipopt appsi.solvers.cplex appsi.solvers.cbc + appsi.solvers.highs diff --git a/doc/OnlineDocs/library_reference/common/config.rst b/doc/OnlineDocs/library_reference/common/config.rst index bca74a76694..7a400b26ce3 100644 --- a/doc/OnlineDocs/library_reference/common/config.rst +++ b/doc/OnlineDocs/library_reference/common/config.rst @@ -12,11 +12,21 @@ Core classes ConfigList ConfigValue +Utilities +~~~~~~~~~ + +.. autosummary:: + + document_kwargs_from_configdict + + Domain validators ~~~~~~~~~~~~~~~~~ .. autosummary:: + Bool + Integer PositiveInt NegativeInt NonNegativeInt @@ -27,6 +37,8 @@ Domain validators NonNegativeFloat In InEnum + ListOf + Module Path PathList DynamicImplicitDomain @@ -50,10 +62,10 @@ Domain validators :members: :undoc-members: -.. autoclass:: DynamicImplicitDomain - :members: - :undoc-members: +.. autodecorator:: document_kwargs_from_configdict +.. autofunction:: Bool +.. autofunction:: Integer .. autofunction:: PositiveInt .. autofunction:: NegativeInt .. autofunction:: NonNegativeInt @@ -64,5 +76,8 @@ Domain validators .. autofunction:: NonNegativeFloat .. autoclass:: In .. autoclass:: InEnum +.. autoclass:: ListOf +.. autoclass:: Module .. autoclass:: Path .. autoclass:: PathList +.. autoclass:: DynamicImplicitDomain diff --git a/doc/OnlineDocs/library_reference/common/errors.rst b/doc/OnlineDocs/library_reference/common/errors.rst new file mode 100644 index 00000000000..7b2bd01fe32 --- /dev/null +++ b/doc/OnlineDocs/library_reference/common/errors.rst @@ -0,0 +1,6 @@ +pyomo.common.errors +=================== + +.. automodule:: pyomo.common.errors + :members: + :member-order: bysource diff --git a/doc/OnlineDocs/library_reference/common/index.rst b/doc/OnlineDocs/library_reference/common/index.rst index bb14ff84660..c9c99008250 100644 --- a/doc/OnlineDocs/library_reference/common/index.rst +++ b/doc/OnlineDocs/library_reference/common/index.rst @@ -1,7 +1,7 @@ Common Utilities ================ -Pyomo provides a set of general-purpose utilites through +Pyomo provides a set of general-purpose utilities through ``pyomo.common``. These utilities are self-contained and do not import or rely on any other parts of Pyomo. @@ -11,6 +11,7 @@ or rely on any other parts of Pyomo. config.rst dependencies.rst deprecation.rst + errors.rst fileutils.rst formatting.rst tempfiles.rst diff --git a/doc/OnlineDocs/library_reference/expressions/classes.rst b/doc/OnlineDocs/library_reference/expressions/classes.rst index 0402bbe465c..9374272f501 100644 --- a/doc/OnlineDocs/library_reference/expressions/classes.rst +++ b/doc/OnlineDocs/library_reference/expressions/classes.rst @@ -4,12 +4,12 @@ Core Classes The following are the two core classes documented here: * :class:`NumericValue` - * :class:`ExpressionBase` + * :class:`NumericExpression` The remaining classes are the public classes for expressions, which developers may need to know about. The methods for these classes are not documented because they are described in the -:class:`ExpressionBase` class. +:class:`NumericExpression` class. Sets with Expression Types -------------------------- @@ -21,15 +21,15 @@ Pyomo expressions. .. autodata:: pyomo.core.expr.numvalue.native_types .. autodata:: pyomo.core.expr.numvalue.nonpyomo_leaf_types -NumericValue and ExpressionBase -------------------------------- +NumericValue and NumericExpression +---------------------------------- .. autoclass:: pyomo.core.expr.numvalue.NumericValue :members: :special-members: :private-members: -.. autoclass:: pyomo.core.expr.current.ExpressionBase +.. autoclass:: pyomo.core.expr.current.NumericExpression :members: :show-inheritance: :special-members: diff --git a/doc/OnlineDocs/library_reference/kernel/examples/aml_example.py b/doc/OnlineDocs/library_reference/kernel/examples/aml_example.py index fb8fd2748c8..146048a6046 100644 --- a/doc/OnlineDocs/library_reference/kernel/examples/aml_example.py +++ b/doc/OnlineDocs/library_reference/kernel/examples/aml_example.py @@ -1,5 +1,6 @@ # @Import_Syntax import pyomo.environ as aml + # @Import_Syntax datafile = None @@ -7,8 +8,7 @@ # @AbstractModels m = aml.AbstractModel() # ... define model ... -instance = \ - m.create_instance(datafile) +instance = m.create_instance(datafile) # @AbstractModels del datafile @@ -19,29 +19,27 @@ # @ConcreteModels - # @Sets_1 -m.s = aml.Set(initialize=[1,2], - ordered=True) +m.s = aml.Set(initialize=[1, 2], ordered=True) # @Sets_1 # @Sets_2 # [1,2,3] -m.q = aml.RangeSet(1,3) +m.q = aml.RangeSet(1, 3) # @Sets_2 - # @Parameters_single -m.p = aml.Param(mutable=True, - initialize=0) +m.p = aml.Param(mutable=True, initialize=0) + + # @Parameters_single # @Parameters_dict # pd[1] = 0, pd[2] = 1 def pd_(m, i): return m.s.ord(i) - 1 -m.pd = aml.Param(m.s, - mutable=True, - rule=pd_) + + +m.pd = aml.Param(m.s, mutable=True, rule=pd_) # @Parameters_dict # @Parameters_list @@ -53,36 +51,37 @@ def pd_(m, i): # @Parameters_list - # @Variables_single -m.v = aml.Var(initialize=1.0, - bounds=(1,4)) +m.v = aml.Var(initialize=1.0, bounds=(1, 4)) # @Variables_single # @Variables_dict -m.vd = aml.Var(m.s, - bounds=(None,9)) +m.vd = aml.Var(m.s, bounds=(None, 9)) + # @Variables_dict # @Variables_list # used 1-based indexing def vl_(m, i): return (i, None) + + m.vl = aml.VarList(bounds=vl_) for j in m.q: m.vl.add() # @Variables_list # @Constraints_single -m.c = aml.Constraint(expr=\ - sum(m.vd.values()) <= 9) +m.c = aml.Constraint(expr=sum(m.vd.values()) <= 9) + + # @Constraints_single # @Constraints_dict -def cd_(m,i,j): +def cd_(m, i, j): return m.vd[i] == j -m.cd = aml.Constraint(m.s, - m.q, - rule=cd_) + + +m.cd = aml.Constraint(m.s, m.q, rule=cd_) # @Constraints_dict @@ -90,23 +89,21 @@ def cd_(m,i,j): # uses 1-based indexing m.cl = aml.ConstraintList() for j in m.q: - m.cl.add( - aml.inequality( - -5, - m.vl[j]-m.v, - 5)) + m.cl.add(aml.inequality(-5, m.vl[j] - m.v, 5)) # @Constraints_list - # @Expressions_single m.e = aml.Expression(expr=-m.v) + + # @Expressions_single # @Expressions_dict def ed_(m, i): return -m.vd[i] -m.ed = aml.Expression(m.s, - rule=ed_) + + +m.ed = aml.Expression(m.s, rule=ed_) # @Expressions_dict # @Expressions_list @@ -117,15 +114,17 @@ def ed_(m, i): # @Expressions_list - # @Objectives_single m.o = aml.Objective(expr=-m.v) + + # @Objectives_single # @Objectives_dict def od_(m, i): return -m.vd[i] -m.od = aml.Objective(m.s, - rule=od_) + + +m.od = aml.Objective(m.s, rule=od_) # @Objectives_dict # @Objectives_list # uses 1-based indexing @@ -136,14 +135,11 @@ def od_(m, i): # @Objectives_list - # @SOS_single -m.sos1 = aml.SOSConstraint( - var=m.vl, - level=1) -m.sos2 = aml.SOSConstraint( - var=m.vd, - level=2) +m.sos1 = aml.SOSConstraint(var=m.vl, level=1) +m.sos2 = aml.SOSConstraint(var=m.vd, level=2) + + # @SOS_single # @SOS_dict def sd_(m, i): @@ -152,10 +148,9 @@ def sd_(m, i): elif i == 2: t = list(m.vl.values()) return t -m.sd = aml.SOSConstraint( - [1,2], - rule=sd_, - level=1) + + +m.sd = aml.SOSConstraint([1, 2], rule=sd_, level=1) # @SOS_dict # @SOS_list @@ -166,10 +161,8 @@ def sd_(m, i): # @SOS_list - # @Suffix_single -m.dual = aml.Suffix( - direction=aml.Suffix.IMPORT) +m.dual = aml.Suffix(direction=aml.Suffix.IMPORT) # @Suffix_single # @Suffix_dict # @@ -178,20 +171,12 @@ def sd_(m, i): # @Suffix_dict - # @Piecewise_1d -breakpoints = [1,2,3,4] -values = [1,2,1,2] +breakpoints = [1, 2, 3, 4] +values = [1, 2, 1, 2] m.f = aml.Var() -m.pw = aml.Piecewise( - m.f, - m.v, - pw_pts=breakpoints, - f_rule=values, - pw_constr_type='EQ') +m.pw = aml.Piecewise(m.f, m.v, pw_pts=breakpoints, f_rule=values, pw_constr_type='EQ') # @Piecewise_1d - m.pprint() - diff --git a/doc/OnlineDocs/library_reference/kernel/examples/conic.py b/doc/OnlineDocs/library_reference/kernel/examples/conic.py index be3fcb4d7df..9282bc67f9a 100644 --- a/doc/OnlineDocs/library_reference/kernel/examples/conic.py +++ b/doc/OnlineDocs/library_reference/kernel/examples/conic.py @@ -1,24 +1,22 @@ # @Class import pyomo.kernel as pmo + m = pmo.block() m.x1 = pmo.variable(lb=0) m.x2 = pmo.variable() m.r = pmo.variable(lb=0) -m.q = pmo.conic.primal_exponential( - x1=m.x1, - x2=m.x2, - r=m.r) +m.q = pmo.conic.primal_exponential(x1=m.x1, x2=m.x2, r=m.r) # @Class del m # @Domain import pyomo.kernel as pmo import math + m = pmo.block() m.x = pmo.variable(lb=0) m.y = pmo.variable(lb=0) m.b = pmo.conic.primal_exponential.as_domain( - x1=math.sqrt(2)*m.x, - x2=2.0, - r=2*(m.x + m.y)) + x1=math.sqrt(2) * m.x, x2=2.0, r=2 * (m.x + m.y) +) # @Domain diff --git a/doc/OnlineDocs/library_reference/kernel/examples/kernel_example.py b/doc/OnlineDocs/library_reference/kernel/examples/kernel_example.py index 997ffeafcad..1caf064bb2a 100644 --- a/doc/OnlineDocs/library_reference/kernel/examples/kernel_example.py +++ b/doc/OnlineDocs/library_reference/kernel/examples/kernel_example.py @@ -1,14 +1,18 @@ # @Import_Syntax import pyomo.kernel as pmo + # @Import_Syntax data = None + # @AbstractModels def create(data): instance = pmo.block() # ... define instance ... return instance + + instance = create(data) # @AbstractModels del data @@ -19,9 +23,8 @@ def create(data): # @ConcreteModels - # @Sets_1 -m.s = [1,2] +m.s = [1, 2] # @Sets_1 # @Sets_2 @@ -30,7 +33,6 @@ def create(data): # @Sets_2 - # @Parameters_single m.p = pmo.parameter(0) @@ -38,7 +40,7 @@ def create(data): # @Parameters_dict # pd[1] = 0, pd[2] = 1 m.pd = pmo.parameter_dict() -for k,i in enumerate(m.s): +for k, i in enumerate(m.s): m.pd[i] = pmo.parameter(k) @@ -48,16 +50,12 @@ def create(data): # pl[0] = 0, pl[0] = 1, ... m.pl = pmo.parameter_list() for j in m.q: - m.pl.append( - pmo.parameter(j)) + m.pl.append(pmo.parameter(j)) # @Parameters_list - # @Variables_single -m.v = pmo.variable(value=1, - lb=1, - ub=4) +m.v = pmo.variable(value=1, lb=1, ub=4) # @Variables_single # @Variables_dict m.vd = pmo.variable_dict() @@ -68,77 +66,60 @@ def create(data): # used 0-based indexing m.vl = pmo.variable_list() for j in m.q: - m.vl.append( - pmo.variable(lb=i)) + m.vl.append(pmo.variable(lb=i)) # @Variables_list - # @Constraints_single -m.c = pmo.constraint( - sum(m.vd.values()) <= 9) +m.c = pmo.constraint(sum(m.vd.values()) <= 9) # @Constraints_single # @Constraints_dict m.cd = pmo.constraint_dict() for i in m.s: for j in m.q: - m.cd[i,j] = \ - pmo.constraint( - body=m.vd[i], - rhs=j) + m.cd[i, j] = pmo.constraint(body=m.vd[i], rhs=j) # @Constraints_dict # @Constraints_list # uses 0-based indexing m.cl = pmo.constraint_list() for j in m.q: - m.cl.append( - pmo.constraint( - lb=-5, - body=m.vl[j]-m.v, - ub=5)) + m.cl.append(pmo.constraint(lb=-5, body=m.vl[j] - m.v, ub=5)) # @Constraints_list - # @Expressions_single m.e = pmo.expression(-m.v) # @Expressions_single # @Expressions_dict m.ed = pmo.expression_dict() for i in m.s: - m.ed[i] = \ - pmo.expression(-m.vd[i]) + m.ed[i] = pmo.expression(-m.vd[i]) # @Expressions_dict # @Expressions_list # uses 0-based indexed m.el = pmo.expression_list() for j in m.q: - m.el.append( - pmo.expression(-m.vl[j])) + m.el.append(pmo.expression(-m.vl[j])) # @Expressions_list - # @Objectives_single m.o = pmo.objective(-m.v) # @Objectives_single # @Objectives_dict m.od = pmo.objective_dict() for i in m.s: - m.od[i] = \ - pmo.objective(-m.vd[i]) + m.od[i] = pmo.objective(-m.vd[i]) # @Objectives_dict # @Objectives_list # uses 0-based indexing m.ol = pmo.objective_list() for j in m.q: - m.ol.append( - pmo.objective(-m.vl[j])) + m.ol.append(pmo.objective(-m.vl[j])) # @Objectives_list - # @SOS_single m.sos1 = pmo.sos1(m.vd.values()) @@ -153,46 +134,30 @@ def create(data): m.sd[2] = pmo.sos1(m.vl) - - - - - # @SOS_dict # @SOS_list # uses 0-based indexing m.sl = pmo.sos_list() for i in m.s: - m.sl.append(pmo.sos1( - [m.vl[i], m.vd[i]])) + m.sl.append(pmo.sos1([m.vl[i], m.vd[i]])) # @SOS_list - # @Suffix_single -m.dual = pmo.suffix( - direction=pmo.suffix.IMPORT) +m.dual = pmo.suffix(direction=pmo.suffix.IMPORT) # @Suffix_single # @Suffix_dict m.suffixes = pmo.suffix_dict() -m.suffixes['dual'] = pmo.suffix( - direction=pmo.suffix.IMPORT) +m.suffixes['dual'] = pmo.suffix(direction=pmo.suffix.IMPORT) # @Suffix_dict - # @Piecewise_1d -breakpoints = [1,2,3,4] -values = [1,2,1,2] +breakpoints = [1, 2, 3, 4] +values = [1, 2, 1, 2] m.f = pmo.variable() -m.pw = pmo.piecewise( - breakpoints, - values, - input=m.v, - output=m.f, - bound='eq') +m.pw = pmo.piecewise(breakpoints, values, input=m.v, output=m.f, bound='eq') # @Piecewise_1d - pmo.pprint(m) diff --git a/doc/OnlineDocs/library_reference/kernel/examples/kernel_subclassing.py b/doc/OnlineDocs/library_reference/kernel/examples/kernel_subclassing.py index 5333fa7faff..c21c6dc890b 100644 --- a/doc/OnlineDocs/library_reference/kernel/examples/kernel_subclassing.py +++ b/doc/OnlineDocs/library_reference/kernel/examples/kernel_subclassing.py @@ -1,8 +1,10 @@ import pyomo.kernel + # @Nonnegative class NonNegativeVariable(pyomo.kernel.variable): """A non-negative variable.""" + __slots__ = () def __init__(self, **kwds): @@ -19,45 +21,57 @@ def __init__(self, **kwds): def lb(self): # calls the base class property getter return pyomo.kernel.variable.lb.fget(self) + @lb.setter def lb(self, lb): if lb < 0: raise ValueError("lower bound must be non-negative") # calls the base class property setter pyomo.kernel.variable.lb.fset(self, lb) + + # @Nonnegative + # @Point class Point(pyomo.kernel.variable_tuple): """A 3-dimensional point in Cartesian space with the z coordinate restricted to non-negative values.""" + __slots__ = () def __init__(self): super(Point, self).__init__( - (pyomo.kernel.variable(), - pyomo.kernel.variable(), - NonNegativeVariable())) + (pyomo.kernel.variable(), pyomo.kernel.variable(), NonNegativeVariable()) + ) + @property def x(self): return self[0] + @property def y(self): return self[1] + @property def z(self): return self[2] + + # @Point + # @SOC class SOC(pyomo.kernel.constraint): """A convex second-order cone constraint""" + __slots__ = () def __init__(self, point): assert isinstance(point.z, NonNegativeVariable) - super(SOC, self).__init__( - point.x**2 + point.y**2 <= point.z**2) + super(SOC, self).__init__(point.x**2 + point.y**2 <= point.z**2) + + # @SOC # @Usage diff --git a/doc/OnlineDocs/library_reference/kernel/examples/transformer.py b/doc/OnlineDocs/library_reference/kernel/examples/transformer.py index 63f5e851213..3d8449a191d 100644 --- a/doc/OnlineDocs/library_reference/kernel/examples/transformer.py +++ b/doc/OnlineDocs/library_reference/kernel/examples/transformer.py @@ -3,45 +3,53 @@ import pympler.asizeof + def _fmt(num, suffix='B'): """format memory output""" if num is None: return "" - for unit in ['','K','M','G','T','P','E','Z']: + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1000.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f %s%s" % (num, 'Yi', suffix) + # @kernel class Transformer(pyomo.kernel.block): def __init__(self): - super(Transformer,self).__init__() + super(Transformer, self).__init__() self._a = pyomo.kernel.parameter() self._v_in = pyomo.kernel.expression() self._v_out = pyomo.kernel.expression() - self._c = pyomo.kernel.constraint( - self._a * self._v_out == self._v_in) + self._c = pyomo.kernel.constraint(self._a * self._v_out == self._v_in) + def set_ratio(self, a): assert a > 0 self._a.value = a + def connect_v_in(self, v_in): self._v_in.expr = v_in + def connect_v_out(self, v_out): self._v_out.expr = v_out + + # @kernel print(_fmt(pympler.asizeof.asizeof(Transformer()))) + # @aml def Transformer(): b = pyomo.environ.Block(concrete=True) b._a = pyomo.environ.Param(mutable=True) b._v_in = pyomo.environ.Expression() b._v_out = pyomo.environ.Expression() - b._c = pyomo.environ.Constraint(expr=\ - b._a * b._v_out == b._v_in) + b._c = pyomo.environ.Constraint(expr=b._a * b._v_out == b._v_in) return b + + # @aml print(_fmt(pympler.asizeof.asizeof(Transformer()))) diff --git a/doc/OnlineDocs/library_reference/solvers/gurobi_direct.rst b/doc/OnlineDocs/library_reference/solvers/gurobi_direct.rst new file mode 100644 index 00000000000..21cb79e5531 --- /dev/null +++ b/doc/OnlineDocs/library_reference/solvers/gurobi_direct.rst @@ -0,0 +1,18 @@ +GurobiDirect +============ + +.. currentmodule:: pyomo.solvers.plugins.solvers.gurobi_direct + +Methods +------- + +.. autosummary:: + + GurobiDirect.available + GurobiDirect.close + GurobiDirect.close_global + GurobiDirect.solve + GurobiDirect.version + +.. autoclass:: GurobiDirect + :members: available, close, close_global, solve, version diff --git a/doc/OnlineDocs/library_reference/solvers/index.rst b/doc/OnlineDocs/library_reference/solvers/index.rst index b2e215c3070..400032df076 100644 --- a/doc/OnlineDocs/library_reference/solvers/index.rst +++ b/doc/OnlineDocs/library_reference/solvers/index.rst @@ -6,5 +6,6 @@ Solver Interfaces gams.rst cplex_persistent.rst + gurobi_direct.rst gurobi_persistent.rst xpress_persistent.rst diff --git a/doc/OnlineDocs/model_transformations/index.rst b/doc/OnlineDocs/model_transformations/index.rst new file mode 100644 index 00000000000..462538128e7 --- /dev/null +++ b/doc/OnlineDocs/model_transformations/index.rst @@ -0,0 +1,7 @@ +Model Transformations +===================== + +.. toctree:: + :maxdepth: 1 + + scaling.rst diff --git a/doc/OnlineDocs/model_transformations/scaling.rst b/doc/OnlineDocs/model_transformations/scaling.rst new file mode 100644 index 00000000000..180f1e0205b --- /dev/null +++ b/doc/OnlineDocs/model_transformations/scaling.rst @@ -0,0 +1,41 @@ +Model Scaling Transformation +============================ + +Good scaling of models can greatly improve the numerical properties of a problem and thus increase reliability and convergence. The ``core.scale_model`` transformation allows users to separate scaling of a model from the declaration of the model variables and constraints which allows for models to be written in more natural forms and to be scaled and rescaled as required without having to rewrite the model code. + +.. autoclass:: pyomo.core.plugins.transform.scaling.ScaleModel + :members: + +Setting Scaling Factors +----------------------- + +Scaling factors for components in a model are declared using :ref:`Suffixes`, as shown in the example above. In order to define a scaling factor for a component, a ``Suffix`` named ``scaling_factor`` must first be created to hold the scaling factor(s). Scaling factor suffixes can be declared at any level of the model hierarchy, but scaling factors declared on the higher-level ``models`` or ``Blocks`` take precedence over those declared at lower levels. + +Scaling suffixes are dict-like where each key is a Pyomo component and the value is the scaling factor to be applied to that component. + +In the case of indexed components, scaling factors can either be declared for an individual index or for the indexed component as a whole (with scaling factors for individual indices taking precedence over overall scaling factors). + +.. note:: + + In the case that a scaling factor is declared for a component on at multiple levels of the hierarchy, the highest level scaling factor will be applied. + +.. note:: + + It is also possible (but not encouraged) to define a "default" scaling factor to be applied to any component for which a specific scaling factor has not been declared by setting a entry in a Suffix with a key of ``None``. In this case, the default value declared closest to the component to be scaled will be used (i.e., the first default value found when walking up the model hierarchy). + +Applying Model Scaling +---------------------- + +The ``core.scale_model`` transformation provides two approaches for creating a scaled model. + +In-Place Scaling +**************** + +The ``apply_to(model)`` method can be used to apply scaling directly to an existing model. When using this method, all the variables, constraints and objectives within the target model are replaced with new scaled components and the appropriate scaling factors applied. The model can then be sent to a solver as usual, however the results will be in terms of the scaled components and must be un-scaled by the user. + +Creating a New Scaled Model +*************************** + +Alternatively, the ``create_using(model)`` method can be used to create a new, scaled version of the model which can be solved. In this case, a clone of the original model is generated with the variables, constraints and objectives replaced by scaled equivalents. Users can then send the scaled model to a solver after which the ``propagate_solution`` method can be used to map the scaled solution back onto the original model for further analysis. + +The advantage of this approach is that the original model is maintained separately from the scaled model, which facilitates rescaling and other manipulation of the original model after a solution has been found. The disadvantage of this approach is that cloning the model may result in memory issues when dealing with larger models. diff --git a/doc/OnlineDocs/modeling_extensions/dae.rst b/doc/OnlineDocs/modeling_extensions/dae.rst index 19ac3c15593..703e83f4f14 100644 --- a/doc/OnlineDocs/modeling_extensions/dae.rst +++ b/doc/OnlineDocs/modeling_extensions/dae.rst @@ -446,7 +446,7 @@ a number of finite element points which is less than the number of points already included in the :py:class:`ContinuousSet` then the transformation will ignore the specified number and proceed with the larger set of points. Discretization points will never be removed from a -:py:class:`ContinousSet` during the discretization. +:py:class:`ContinuousSet` during the discretization. The following code is a Python script applying the backward difference method. The code also shows how to add a constraint to a discretized model. @@ -583,7 +583,7 @@ In the above example, the ``reduce_collocation_points`` function restricts the variable ``model.u`` to have only **1** free collocation point per finite element, thereby enforcing a piecewise constant profile. :numref:`Fig. %s ` shows the solution profile before and -after appling +after applying the ``reduce_collocation_points`` function. .. _reduce_points_fig: @@ -837,8 +837,8 @@ the corresponding values for the dynamic variable profiles. - Need to provide initial conditions for dynamic states by setting the value or using fix() -Specifying Time-Varing Inputs -***************************** +Specifying Time-Varying Inputs +****************************** The :py:class:`Simulator` supports simulation of a system of ODE's or DAE's with time-varying parameters or control inputs. Time-varying inputs can be specified using a Pyomo ``Suffix``. We currently only support diff --git a/doc/OnlineDocs/modeling_extensions/gdp/concepts.rst b/doc/OnlineDocs/modeling_extensions/gdp/concepts.rst index 3f571f988d2..95629bc48fd 100644 --- a/doc/OnlineDocs/modeling_extensions/gdp/concepts.rst +++ b/doc/OnlineDocs/modeling_extensions/gdp/concepts.rst @@ -115,7 +115,7 @@ These logical propositions can include: .. |equiv| replace:: :math:`Y_1 \Leftrightarrow Y_2` .. |land| replace:: :math:`Y_1 \land Y_2` .. |lor| replace:: :math:`Y_1 \lor Y_2` -.. |xor| replace:: :math:`Y_1 \underline{\lor} Y_2` +.. |xor| replace:: :math:`Y_1 \veebar Y_2` .. |impl| replace:: :math:`Y_1 \Rightarrow Y_2` +-----------------+---------+-------------+-------------+-------------+ diff --git a/doc/OnlineDocs/modeling_extensions/gdp/index.rst b/doc/OnlineDocs/modeling_extensions/gdp/index.rst index a5e47333377..0c8529c60cb 100644 --- a/doc/OnlineDocs/modeling_extensions/gdp/index.rst +++ b/doc/OnlineDocs/modeling_extensions/gdp/index.rst @@ -54,7 +54,7 @@ These can be expressed as a disjunction as follows: \text{constraints} \\ \text{for }\textit{else} \end{gathered}\right] \\ - Y_1 \underline{\vee} Y_2 + Y_1 \veebar Y_2 \end{gather*} Here, if the Boolean :math:`Y_1` is ``True``, then the constraints in the first disjunct are enforced; otherwise, the constraints in the second disjunct are enforced. diff --git a/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst b/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst index 9627698634e..b70e37d5935 100644 --- a/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst +++ b/doc/OnlineDocs/modeling_extensions/gdp/modeling.rst @@ -86,7 +86,7 @@ When the ``Disjunction`` object constructor is passed a list of lists, the outer By default, Pyomo.GDP ``Disjunction`` objects enforce an implicit "exactly one" relationship among the selection of the disjuncts (generalization of exclusive-OR). That is, exactly one of the ``Disjunct`` indicator variables should take a ``True`` value. - This can be seen as an implicit logical proposition, in our example, :math:`Y_1 \underline{\lor} Y_2`. + This can be seen as an implicit logical proposition, in our example, :math:`Y_1 \veebar Y_2`. Logical Propositions ==================== @@ -112,7 +112,7 @@ Using these Boolean variables, we can define ``LogicalConstraint`` objects, anal .. doctest:: - >>> m.p = LogicalConstraint(expr=m.Y[1].implies(land(m.Y[2], m.Y[3])).lor(m.Y[4])) + >>> m.p = LogicalConstraint(expr=m.Y[1].implies(m.Y[2] & m.Y[3]) | m.Y[4]) >>> m.p.pprint() p : Size=1, Index=None, Active=True Key : Body : Active @@ -126,19 +126,24 @@ Pyomo.GDP logical expression system supported operators and their usage are list +--------------+------------------------+-----------------------------------+--------------------------------+ | Operator | Operator | Method | Function | +==============+========================+===================================+================================+ -| Conjunction | | :code:`Y[1].land(Y[2])` | :code:`land(Y[1],Y[2])` | +| Negation | :code:`~Y[1]` | | :code:`lnot(Y[1])` | +--------------+------------------------+-----------------------------------+--------------------------------+ -| Disjunction | | :code:`Y[1].lor(Y[2])` | :code:`lor(Y[1],Y[2])` | +| Conjunction | :code:`Y[1] & Y[2]` | :code:`Y[1].land(Y[2])` | :code:`land(Y[1],Y[2])` | +--------------+------------------------+-----------------------------------+--------------------------------+ -| Negation | :code:`~Y[1]` | | :code:`lnot(Y[1])` | +| Disjunction | :code:`Y[1] | Y[2]` | :code:`Y[1].lor(Y[2])` | :code:`lor(Y[1],Y[2])` | +--------------+------------------------+-----------------------------------+--------------------------------+ -| Exclusive OR | | :code:`Y[1].xor(Y[2])` | :code:`xor(Y[1], Y[2])` | +| Exclusive OR | :code:`Y[1] ^ Y[2]` | :code:`Y[1].xor(Y[2])` | :code:`xor(Y[1], Y[2])` | +--------------+------------------------+-----------------------------------+--------------------------------+ | Implication | | :code:`Y[1].implies(Y[2])` | :code:`implies(Y[1], Y[2])` | +--------------+------------------------+-----------------------------------+--------------------------------+ | Equivalence | | :code:`Y[1].equivalent_to(Y[2])` | :code:`equivalent(Y[1], Y[2])` | +--------------+------------------------+-----------------------------------+--------------------------------+ +.. note:: + + We omit support for some infix operators, e.g. :code:`Y[1] >> Y[2]`, due to concerns about non-intuitive Python operator precedence. + That is :code:`Y[1] | Y[2] >> Y[3]` would translate to :math:`Y_1 \lor (Y_2 \Rightarrow Y_3)` rather than :math:`(Y_1 \lor Y_2) \Rightarrow Y_3` + In addition, the following constraint-programming-inspired operators are provided: ``exactly``, ``atmost``, and ``atleast``. These predicates enforce, respectively, that exactly, at most, or at least N of their ``BooleanVar`` arguments are ``True``. @@ -148,26 +153,22 @@ Usage: - :code:`atmost(3, Y)` - :code:`exactly(3, Y)` -.. note:: - - We omit support for most infix operators, e.g. :code:`Y[1] >> Y[2]`, due to concerns about non-intuitive Python operator precedence. - That is :code:`Y[1] | Y[2] >> Y[3]` would translate to :math:`Y_1 \lor (Y_2 \Rightarrow Y_3)` rather than :math:`(Y_1 \lor Y_2) \Rightarrow Y_3` - .. doctest:: >>> m = ConcreteModel() >>> m.my_set = RangeSet(4) >>> m.Y = BooleanVar(m.my_set) >>> m.p = LogicalConstraint(expr=atleast(3, m.Y)) + >>> m.p.pprint() + p : Size=1, Index=None, Active=True + Key : Body : Active + None : atleast(3: [Y[1], Y[2], Y[3], Y[4]]) : True >>> TransformationFactory('core.logical_to_linear').apply_to(m) - >>> m.logic_to_linear.transformed_constraints.pprint() # constraint auto-generated by transformation + >>> # constraint auto-generated by transformation + >>> m.logic_to_linear.transformed_constraints.pprint() transformed_constraints : Size=1, Index=logic_to_linear.transformed_constraints_index, Active=True Key : Lower : Body : Upper : Active 1 : 3.0 : Y_asbinary[1] + Y_asbinary[2] + Y_asbinary[3] + Y_asbinary[4] : +Inf : True - >>> m.p.pprint() - p : Size=1, Index=None, Active=False - Key : Body : Active - None : atleast(3: [Y[1], Y[2], Y[3], Y[4]]) : False We elaborate on the ``logical_to_linear`` transformation :ref:`on the next page `. @@ -223,8 +224,8 @@ Here, we demonstrate this capability with a toy example: \min~&x\\ \text{s.t.}~&\left[\begin{gathered}Y_1\\x \geq 2\end{gathered}\right] \vee \left[\begin{gathered}Y_2\\x \geq 3\end{gathered}\right]\\ &\left[\begin{gathered}Y_3\\x \leq 8\end{gathered}\right] \vee \left[\begin{gathered}Y_4\\x = 2.5\end{gathered}\right] \\ - &Y_1 \underline{\vee} Y_2\\ - &Y_3 \underline{\vee} Y_4\\ + &Y_1 \veebar Y_2\\ + &Y_3 \veebar Y_4\\ &Y_1 \Rightarrow Y_4 .. doctest:: @@ -288,8 +289,8 @@ Composition of standard operators .. code:: - m.p = LogicalConstraint(expr=lor(m.Y[1], m.Y[2]).implies( - land(m.Y[3], ~m.Y[4], m.Y[5].lor(m.Y[6]))) + m.p = LogicalConstraint(expr=(m.Y[1] | m.Y[2]).implies( + m.Y[3] & ~m.Y[4] & (m.Y[5] | m.Y[6])) ) Expressions within CP-type operators @@ -359,7 +360,7 @@ In the ``logical_to_linear`` transformation, we automatically convert these spec Additional Examples =================== -The following models all work and are equivalent for :math:`\left[x = 0\right] \underline{\lor} \left[y = 0\right]`: +The following models all work and are equivalent for :math:`\left[x = 0\right] \veebar \left[y = 0\right]`: .. doctest:: diff --git a/doc/OnlineDocs/modeling_extensions/gdp/solving.rst b/doc/OnlineDocs/modeling_extensions/gdp/solving.rst index 8f5e1ccd250..2f3076862e6 100644 --- a/doc/OnlineDocs/modeling_extensions/gdp/solving.rst +++ b/doc/OnlineDocs/modeling_extensions/gdp/solving.rst @@ -29,15 +29,25 @@ Logical constraints .. note:: - Historically it was required to convert logical propositions to - algebraic form prior to use of the MI(N)LP reformulations and the - GDPopt solver. However, this is mathematically incorrect since these - reformulations convert logical formulations to algebraic formulations. - It is therefore recommended to use both the MI(N)LP reformulations - and GDPopt directly to transform or solve GDPs that include logical - propositions. + Historically users needed to explicitly convert logical propositions + to algebraic form prior to invoking the GDP MI(N)LP reformulations + or the GDPopt solver. However, this is mathematically incorrect + since the GDP MI(N)LP reformulations themselves convert logical + formulations to algebraic formulations. The current recommended + practice is to pass the entire (mixed logical / algebraic) model to + the MI(N)LP reformulations or GDPopt directly. + +There are several approaches to convert logical constraints into +algebraic form. + +Conjunctive Normal Form +^^^^^^^^^^^^^^^^^^^^^^^ -The following transforms logical propositions on the model to algebraic form: +The first transformation (`core.logical_to_linear`) leverages the +`sympy` package to generate the conjunctive normal form of the logical +constraints and then adds the equivalent as a list algebraic +constraints. The following transforms logical propositions on the model +to algebraic form: .. code:: @@ -61,6 +71,29 @@ Following solution of the GDP model, values of the Boolean variables may be upda .. autofunction:: pyomo.core.plugins.transform.logical_to_linear.update_boolean_vars_from_binary +Factorable Programming +^^^^^^^^^^^^^^^^^^^^^^ + +The second transformation (`contrib.logical_to_disjunctive`) leverages +ideas from factorable programming to first generate an equivalent set of +"factored" logical constraints form by traversing each logical +proposition and replacing each logical operator with an additional +Boolean variable and then adding the "simple" logical constraint that +equates the new Boolean variable with the single logical operator. + +The resulting "simple" logical constraints are converted to either MIP +or GDP form: if the constraint contains only Boolean variables, then +then MIP representation is emitted. Logical constraints with mixed +integer-Boolean arguments (e.g., `atmost`, `atleast`, `exactly`, etc.) +are converted to a disjunctive representation. + +As this transformation both avoids the conversion into `sympy` and only +requires a single traversal of each logical constraint, +`contrib.logical_to_disjunctive` is significantly faster than +`core.logical_to_linear` at the cost of a larger model. In practice, +the cost of the larger model is negated by the effectiveness of the MIP +presolve in most solvers. + Reformulation to MI(N)LP ------------------------ @@ -77,7 +110,6 @@ By default, the BM transformation will estimate reasonably tight M values for yo For nonlinear models where finite expression bounds may be inferred from variable bounds, the BM transformation may also be able to automatically compute M values for you. For all other models, you will need to provide the M values through a "BigM" Suffix, or through the `bigM` argument to the transformation. We will raise a ``GDP_Error`` for missing M values. -We implement the multiple-parameter Big-M (MBM) approach described in literature\ [#gdp-mbm]_. To apply the BM reformulation within a python script, use: @@ -87,6 +119,27 @@ To apply the BM reformulation within a python script, use: From the Pyomo command line, include the ``--transform pyomo.gdp.bigm`` option. +Multiple Big-M (MBM) Reformulation +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We also implement the multiple-parameter Big-M (MBM) approach described in literature\ [#gdp-mbm]_. +By default, the MBM transformation will solve continuous subproblems in order to calculate M values. +This process can be time-consuming, so the transformation also provides a method to export the M values used as a dictionary and allows for M values to be provided through the `bigM` argument. + +For example, to apply the transformation and store the M values, use: + +.. code:: + + mbigm = TransformationFactory('gdp.mbigm') + mbigm.apply_to(model) + + # These can be stored... + M_values = mbigm.get_all_M_values(model) + # ...so that in future runs, you can write: + mbigm.apply_to(m, bigM=M_values) + +From the Pyomo command line, include the ``--transform pyomo.gdp.mbigm`` option. + Hull Reformulation (HR) ^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/doc/OnlineDocs/pyomo_modeling_components/Expressions.rst b/doc/OnlineDocs/pyomo_modeling_components/Expressions.rst index 0e5b5200b5d..f0558621316 100644 --- a/doc/OnlineDocs/pyomo_modeling_components/Expressions.rst +++ b/doc/OnlineDocs/pyomo_modeling_components/Expressions.rst @@ -2,7 +2,7 @@ Expressions =========== In this section, we use the word "expression" in two ways: first in the -general sense of the word and second to desribe a class of Pyomo objects +general sense of the word and second to describe a class of Pyomo objects that have the name ``Expression`` as described in the subsection on expression objects. diff --git a/doc/OnlineDocs/pyomo_modeling_components/Parameters.rst b/doc/OnlineDocs/pyomo_modeling_components/Parameters.rst index 194555b74dc..90bcff4dc27 100644 --- a/doc/OnlineDocs/pyomo_modeling_components/Parameters.rst +++ b/doc/OnlineDocs/pyomo_modeling_components/Parameters.rst @@ -78,7 +78,7 @@ Parameter values can be checked by a validation function. In the following example, the every value of the parameter ``T`` (indexed by ``model.A``) is checked to be greater than 3.14159. If a value is provided that is less than -that, the model instantation will be terminated and an error message +that, the model instantiation will be terminated and an error message issued. The validation function should be written so as to return ``True`` if the data is valid and ``False`` otherwise. diff --git a/doc/OnlineDocs/pyomo_modeling_components/Sets.rst b/doc/OnlineDocs/pyomo_modeling_components/Sets.rst index 2f71401c2b4..f9a692fcb10 100644 --- a/doc/OnlineDocs/pyomo_modeling_components/Sets.rst +++ b/doc/OnlineDocs/pyomo_modeling_components/Sets.rst @@ -125,7 +125,7 @@ Note that the element number starts with 1 and not 0: None : 1 : Any : 10 : {3, 5, 7, 9, 11, 13, 15, 17, 19, 21} Additional information about iterators for set initialization is in the -[PyomoBookII]_ book. +[PyomoBookIII]_ book. .. note:: diff --git a/doc/OnlineDocs/pyomo_modeling_components/Suffixes.rst b/doc/OnlineDocs/pyomo_modeling_components/Suffixes.rst index 58c4de9fbc2..e45fe2d74b7 100644 --- a/doc/OnlineDocs/pyomo_modeling_components/Suffixes.rst +++ b/doc/OnlineDocs/pyomo_modeling_components/Suffixes.rst @@ -388,6 +388,7 @@ Suffix component with an IMPORT_EXPORT direction. ipopt = pyo.SolverFactory('ipopt') + The difference in performance can be seen by examining Ipopt's iteration log with and without warm starting: @@ -405,13 +406,9 @@ log with and without warm starting: iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls 0 1.6109693e+01 1.12e+01 5.28e-01 -1.0 0.00e+00 - 0.00e+00 0.00e+00 0 1 1.6982239e+01 7.30e-01 1.02e+01 -1.0 6.11e-01 - 7.19e-02 1.00e+00f 1 - 2 1.7318411e+01 3.60e-02 5.05e-01 -1.0 1.61e-01 - 1.00e+00 1.00e+00h 1 - 3 1.6849424e+01 2.78e-01 6.68e-02 -1.7 2.85e-01 - 7.94e-01 1.00e+00h 1 - 4 1.7051199e+01 4.71e-03 2.78e-03 -1.7 6.06e-02 - 1.00e+00 1.00e+00h 1 - 5 1.7011979e+01 7.19e-03 8.50e-03 -3.8 3.66e-02 - 9.45e-01 9.98e-01h 1 - 6 1.7014271e+01 1.74e-05 9.78e-06 -3.8 3.33e-03 - 1.00e+00 1.00e+00h 1 - 7 1.7014021e+01 1.23e-07 1.82e-07 -5.7 2.69e-04 - 1.00e+00 1.00e+00h 1 - 8 1.7014017e+01 1.77e-11 2.52e-11 -8.6 3.32e-06 - 1.00e+00 1.00e+00h 1 + 2 1.7318411e+01 ... + ... + 8 1.7014017e+01 ... Number of Iterations....: 8 ... @@ -441,7 +438,7 @@ log with and without warm starting: iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls 0 1.7014032e+01 2.00e-06 4.07e-06 -6.0 0.00e+00 - 0.00e+00 0.00e+00 0 1 1.7014019e+01 3.65e-12 1.00e-11 -6.0 2.50e-01 - 1.00e+00 1.00e+00h 1 - 2 1.7014017e+01 4.48e-12 6.42e-12 -9.0 1.92e-06 - 1.00e+00 1.00e+00h 1 + 2 1.7014017e+01 ... Number of Iterations....: 2 ... diff --git a/doc/OnlineDocs/pyomo_overview/math_modeling.rst b/doc/OnlineDocs/pyomo_overview/math_modeling.rst index 20769524863..ccacca8d58d 100644 --- a/doc/OnlineDocs/pyomo_overview/math_modeling.rst +++ b/doc/OnlineDocs/pyomo_overview/math_modeling.rst @@ -3,7 +3,7 @@ Mathematical Modeling This section provides an introduction to Pyomo: Python Optimization Modeling Objects. A more complete description is contained in the -[PyomoBookII]_ book. Pyomo supports the formulation and analysis of +[PyomoBookIII]_ book. Pyomo supports the formulation and analysis of mathematical models for complex optimization applications. This capability is commonly associated with commercially available algebraic modeling languages (AMLs) such as [AMPL]_, [AIMMS]_, and [GAMS]_. @@ -72,8 +72,8 @@ solvers to analyze a model introduces additional complexities. Pyomo is an AML that extends Python to include objects for mathematical -modeling. [PyomoBookI]_, [PyomoBookII]_, and [PyomoJournal]_ compare -Pyomo with other AMLs. Although many good AMLs have been developed for +modeling. [PyomoBookI]_, [PyomoBookII]_, [PyomoBookIII]_, and [PyomoJournal]_ +compare Pyomo with other AMLs. Although many good AMLs have been developed for optimization models, the following are motivating factors for the development of Pyomo: diff --git a/doc/OnlineDocs/pyomo_overview/simple_examples.rst b/doc/OnlineDocs/pyomo_overview/simple_examples.rst index 3806d573ac3..4358c87b678 100644 --- a/doc/OnlineDocs/pyomo_overview/simple_examples.rst +++ b/doc/OnlineDocs/pyomo_overview/simple_examples.rst @@ -58,7 +58,6 @@ One way to implement this in Pyomo is as shown as follows: .. testcode:: - from __future__ import division import pyomo.environ as pyo model = pyo.AbstractModel() @@ -103,21 +102,8 @@ One way to implement this in Pyomo is as shown as follows: indented and the end of the indentation is used by Python to signal the end of the definition. -We will now examine the lines in this example. The first import line is -used to ensure that ``int`` or ``long`` division arguments are converted -to floating point values before division is performed. - -.. testcode:: - - from __future__ import division - -In Python versions before 3.0, division returns the floor of the -mathematical result of division if arguments are ``int`` or ``long``. -This import line avoids unexpected behavior when developing mathematical -models with integer values in Python 2.x (and is not necessary in Python -3.x). - -The next import line that is required in every Pyomo model. Its purpose +We will now examine the lines in this example. +The first import line is required in every Pyomo model. Its purpose is to make the symbols used by Pyomo known to Python. .. testcode:: diff --git a/doc/OnlineDocs/tests/data/ABCD2.py b/doc/OnlineDocs/tests/data/ABCD2.py index 15b4e46b04e..65a46415368 100644 --- a/doc/OnlineDocs/tests/data/ABCD2.py +++ b/doc/OnlineDocs/tests/data/ABCD2.py @@ -2,13 +2,13 @@ model = AbstractModel() -model.Z = Set(initialize=[('A1','B1',1), ('A2','B2',2), ('A3','B3',3)]) -#model.Z = Set(dimen=3) +model.Z = Set(initialize=[('A1', 'B1', 1), ('A2', 'B2', 2), ('A3', 'B3', 3)]) +# model.Z = Set(dimen=3) model.D = Param(model.Z) instance = model.create_instance('ABCD2.dat') -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('D') for key in sorted(instance.D.keys()): - print(name(instance.D,key)+" "+str(value(instance.D[key]))) + print(name(instance.D, key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/ABCD3.py b/doc/OnlineDocs/tests/data/ABCD3.py index b3b63af6c16..48797ced5bb 100644 --- a/doc/OnlineDocs/tests/data/ABCD3.py +++ b/doc/OnlineDocs/tests/data/ABCD3.py @@ -7,7 +7,7 @@ instance = model.create_instance('ABCD3.dat') -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('D') for key in sorted(instance.D.keys()): - print(name(instance.D,key)+" "+str(value(instance.D[key]))) + print(name(instance.D, key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/ABCD4.py b/doc/OnlineDocs/tests/data/ABCD4.py index 6ab89a84188..20f6a21c011 100644 --- a/doc/OnlineDocs/tests/data/ABCD4.py +++ b/doc/OnlineDocs/tests/data/ABCD4.py @@ -7,7 +7,7 @@ instance = model.create_instance('ABCD4.dat') -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('Y') for key in sorted(instance.Y.keys()): - print(name(instance.Y,key)+" "+str(value(instance.Y[key]))) + print(name(instance.Y, key) + " " + str(value(instance.Y[key]))) diff --git a/doc/OnlineDocs/tests/data/ABCD5.py b/doc/OnlineDocs/tests/data/ABCD5.py index 39b3ff667fa..58461af056b 100644 --- a/doc/OnlineDocs/tests/data/ABCD5.py +++ b/doc/OnlineDocs/tests/data/ABCD5.py @@ -10,10 +10,10 @@ instance = model.create_instance('ABCD5.dat') -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('Y') for key in sorted(instance.Y.keys()): - print(name(instance.Y,key)+" "+str(value(instance.Y[key]))) + print(name(instance.Y, key) + " " + str(value(instance.Y[key]))) print('W') for key in sorted(instance.W.keys()): - print(name(instance.W,key)+" "+str(value(instance.W[key]))) + print(name(instance.W, key) + " " + str(value(instance.W[key]))) diff --git a/doc/OnlineDocs/tests/data/ABCD6.py b/doc/OnlineDocs/tests/data/ABCD6.py index dc9c156458e..961408dbc7e 100644 --- a/doc/OnlineDocs/tests/data/ABCD6.py +++ b/doc/OnlineDocs/tests/data/ABCD6.py @@ -7,7 +7,7 @@ instance = model.create_instance('ABCD6.dat') -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('D') for key in sorted(instance.D.keys()): - print(name(instance.D,key)+" "+str(value(instance.D[key]))) + print(name(instance.D, key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/ABCD7.py b/doc/OnlineDocs/tests/data/ABCD7.py index 0f9f8967210..a97e764fa5a 100644 --- a/doc/OnlineDocs/tests/data/ABCD7.py +++ b/doc/OnlineDocs/tests/data/ABCD7.py @@ -10,10 +10,10 @@ try: instance = model.create_instance('ABCD7.dat') except pyomo.common.errors.ApplicationError as e: - print("ERROR "+str(e)) + print("ERROR " + str(e)) sys.exit(1) -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('Y') for key in sorted(instance.Y.keys()): - print(name(instance.Y,key)+" "+str(value(instance.Y[key]))) + print(name(instance.Y, key) + " " + str(value(instance.Y[key]))) diff --git a/doc/OnlineDocs/tests/data/ABCD8.py b/doc/OnlineDocs/tests/data/ABCD8.py index d9d00f0275a..9bcd950c681 100644 --- a/doc/OnlineDocs/tests/data/ABCD8.py +++ b/doc/OnlineDocs/tests/data/ABCD8.py @@ -10,10 +10,10 @@ try: instance = model.create_instance('ABCD8.dat') except pyomo.common.errors.ApplicationError as e: - print("ERROR "+str(e)) + print("ERROR " + str(e)) sys.exit(1) -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('Y') for key in sorted(instance.Y.keys()): - print(name(instance.Y,key)+" "+str(value(instance.Y[key]))) + print(name(instance.Y, key) + " " + str(value(instance.Y[key]))) diff --git a/doc/OnlineDocs/tests/data/ABCD9.py b/doc/OnlineDocs/tests/data/ABCD9.py index 1322432ee59..29fcb6426db 100644 --- a/doc/OnlineDocs/tests/data/ABCD9.py +++ b/doc/OnlineDocs/tests/data/ABCD9.py @@ -10,10 +10,10 @@ try: instance = model.create_instance('ABCD9.dat') except pyomo.common.errors.ApplicationError as e: - print("ERROR "+str(e)) + print("ERROR " + str(e)) sys.exit(1) -print('Z '+str(sorted(list(instance.Z.data())))) +print('Z ' + str(sorted(list(instance.Z.data())))) print('Y') for key in sorted(instance.Y.keys()): - print(instance.Y[key]+" "+str(value(instance.Y[key]))) + print(instance.Y[key] + " " + str(value(instance.Y[key]))) diff --git a/doc/OnlineDocs/tests/data/diet1.py b/doc/OnlineDocs/tests/data/diet1.py index b7eea483daa..ef0d8096350 100644 --- a/doc/OnlineDocs/tests/data/diet1.py +++ b/doc/OnlineDocs/tests/data/diet1.py @@ -2,7 +2,7 @@ from pyomo.environ import * infinity = float('inf') -MAX_FOOD_SUPPLY = 20.0 # There is a finite food supply +MAX_FOOD_SUPPLY = 20.0 # There is a finite food supply model = AbstractModel() @@ -11,8 +11,12 @@ model.FOOD = Set() model.cost = Param(model.FOOD, within=PositiveReals) model.f_min = Param(model.FOOD, within=NonNegativeReals, default=0.0) -def f_max_validate (model, value, j): + + +def f_max_validate(model, value, j): return model.f_max[j] > model.f_min[j] + + model.f_max = Param(model.FOOD, validate=f_max_validate, default=MAX_FOOD_SUPPLY) model.NUTR = Set() @@ -22,29 +26,50 @@ def f_max_validate (model, value, j): # -------------------------------------------------------- + def Buy_bounds(model, i): return (model.f_min[i], model.f_max[i]) + + model.Buy = Var(model.FOOD, bounds=Buy_bounds, within=NonNegativeIntegers) # -------------------------------------------------------- + def Total_Cost_rule(model): return sum(model.cost[j] * model.Buy[j] for j in model.FOOD) + + model.Total_Cost = Objective(rule=Total_Cost_rule, sense=minimize) # -------------------------------------------------------- + def Entree_rule(model): - entrees = ['Cheeseburger', 'Ham Sandwich', 'Hamburger', 'Fish Sandwich', 'Chicken Sandwich'] + entrees = [ + 'Cheeseburger', + 'Ham Sandwich', + 'Hamburger', + 'Fish Sandwich', + 'Chicken Sandwich', + ] return sum(model.Buy[e] for e in entrees) >= 1 + + model.Entree = Constraint(rule=Entree_rule) + def Side_rule(model): sides = ['Fries', 'Sausage Biscuit'] return sum(model.Buy[s] for s in sides) >= 1 + + model.Side = Constraint(rule=Side_rule) + def Drink_rule(model): drinks = ['Lowfat Milk', 'Orange Juice'] return sum(model.Buy[d] for d in drinks) >= 1 + + model.Drink = Constraint(rule=Drink_rule) diff --git a/doc/OnlineDocs/tests/data/import1.tab.py b/doc/OnlineDocs/tests/data/import1.tab.py index aef3dae30fc..c9164ab73ec 100644 --- a/doc/OnlineDocs/tests/data/import1.tab.py +++ b/doc/OnlineDocs/tests/data/import1.tab.py @@ -10,4 +10,4 @@ print('Y') keys = instance.Y.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.Y[key]))) + print(str(key) + " " + str(value(instance.Y[key]))) diff --git a/doc/OnlineDocs/tests/data/import2.tab.py b/doc/OnlineDocs/tests/data/import2.tab.py index 9ede9049f75..d03f053d090 100644 --- a/doc/OnlineDocs/tests/data/import2.tab.py +++ b/doc/OnlineDocs/tests/data/import2.tab.py @@ -7,8 +7,8 @@ instance = model.create_instance('import2.tab.dat') -print('A '+str(sorted(list(instance.A.data())))) +print('A ' + str(sorted(list(instance.A.data())))) print('Y') keys = instance.Y.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.Y[key]))) + print(str(key) + " " + str(value(instance.Y[key]))) diff --git a/doc/OnlineDocs/tests/data/import3.tab.py b/doc/OnlineDocs/tests/data/import3.tab.py index f5be9b19393..e86557677ee 100644 --- a/doc/OnlineDocs/tests/data/import3.tab.py +++ b/doc/OnlineDocs/tests/data/import3.tab.py @@ -6,4 +6,4 @@ instance = model.create_instance('import3.tab.dat') -print('A '+str(sorted(list(instance.A.data())))) +print('A ' + str(sorted(list(instance.A.data())))) diff --git a/doc/OnlineDocs/tests/data/import4.tab.py b/doc/OnlineDocs/tests/data/import4.tab.py index fd84afdccc9..93df9c761ab 100644 --- a/doc/OnlineDocs/tests/data/import4.tab.py +++ b/doc/OnlineDocs/tests/data/import4.tab.py @@ -6,4 +6,4 @@ instance = model.create_instance('import4.tab.dat') -print('C '+str(sorted(list(instance.C.data())))) +print('C ' + str(sorted(list(instance.C.data())))) diff --git a/doc/OnlineDocs/tests/data/import5.tab.py b/doc/OnlineDocs/tests/data/import5.tab.py index 413c741d8f4..1d20476a16f 100644 --- a/doc/OnlineDocs/tests/data/import5.tab.py +++ b/doc/OnlineDocs/tests/data/import5.tab.py @@ -6,4 +6,4 @@ instance = model.create_instance('import5.tab.dat') -print('B '+str(list(sorted(instance.B.data())))) +print('B ' + str(list(sorted(instance.B.data())))) diff --git a/doc/OnlineDocs/tests/data/import6.tab.py b/doc/OnlineDocs/tests/data/import6.tab.py index 8823047aa49..8a1ab232f86 100644 --- a/doc/OnlineDocs/tests/data/import6.tab.py +++ b/doc/OnlineDocs/tests/data/import6.tab.py @@ -6,4 +6,4 @@ instance = model.create_instance('import6.tab.dat') -print('p '+str(value(instance.p))) +print('p ' + str(value(instance.p))) diff --git a/doc/OnlineDocs/tests/data/import7.tab.py b/doc/OnlineDocs/tests/data/import7.tab.py index 7887266f9f4..747d884be31 100644 --- a/doc/OnlineDocs/tests/data/import7.tab.py +++ b/doc/OnlineDocs/tests/data/import7.tab.py @@ -4,14 +4,14 @@ model.I = Set(initialize=['I1', 'I2', 'I3', 'I4']) model.A = Set(initialize=['A1', 'A2', 'A3']) -model.U = Param(model.I,model.A) +model.U = Param(model.I, model.A) # BUG: This should cause an error -#model.U = Param(model.A,model.I) +# model.U = Param(model.A,model.I) instance = model.create_instance('import7.tab.dat') -print('I '+str(sorted(list(instance.I.data())))) -print('A '+str(sorted(list(instance.A.data())))) +print('I ' + str(sorted(list(instance.I.data())))) +print('A ' + str(sorted(list(instance.A.data())))) print('U') for key in sorted(instance.U.keys()): - print(name(instance.U,key)+" "+str(value(instance.U[key]))) + print(name(instance.U, key) + " " + str(value(instance.U[key]))) diff --git a/doc/OnlineDocs/tests/data/import8.tab.py b/doc/OnlineDocs/tests/data/import8.tab.py index 4790a2f1053..b7866d7a3e5 100644 --- a/doc/OnlineDocs/tests/data/import8.tab.py +++ b/doc/OnlineDocs/tests/data/import8.tab.py @@ -4,12 +4,12 @@ model.I = Set(initialize=['I1', 'I2', 'I3', 'I4']) model.A = Set(initialize=['A1', 'A2', 'A3']) -model.U = Param(model.A,model.I) +model.U = Param(model.A, model.I) instance = model.create_instance('import8.tab.dat') -print('A '+str(sorted(list(instance.A.data())))) -print('I '+str(sorted(list(instance.I.data())))) +print('A ' + str(sorted(list(instance.A.data())))) +print('I ' + str(sorted(list(instance.I.data())))) print('U') for key in sorted(instance.U.keys()): - print(name(instance.U,key)+" "+str(value(instance.U[key]))) + print(name(instance.U, key) + " " + str(value(instance.U[key]))) diff --git a/doc/OnlineDocs/tests/data/param2.py b/doc/OnlineDocs/tests/data/param2.py index ce5fa62b89b..f46f05ceebc 100644 --- a/doc/OnlineDocs/tests/data/param2.py +++ b/doc/OnlineDocs/tests/data/param2.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/param2a.py b/doc/OnlineDocs/tests/data/param2a.py index 62261118a2e..4557f63d841 100644 --- a/doc/OnlineDocs/tests/data/param2a.py +++ b/doc/OnlineDocs/tests/data/param2a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/param3.py b/doc/OnlineDocs/tests/data/param3.py index f3339743888..149155ce67d 100644 --- a/doc/OnlineDocs/tests/data/param3.py +++ b/doc/OnlineDocs/tests/data/param3.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.C[key]))) + print(str(key) + " " + str(value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.D[key]))) + print(str(key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/param3a.py b/doc/OnlineDocs/tests/data/param3a.py index c3e03d7884a..0e99cad0c7a 100644 --- a/doc/OnlineDocs/tests/data/param3a.py +++ b/doc/OnlineDocs/tests/data/param3a.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.C[key]))) + print(str(key) + " " + str(value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.D[key]))) + print(str(key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/param3b.py b/doc/OnlineDocs/tests/data/param3b.py index 0482e834b12..deda175ea12 100644 --- a/doc/OnlineDocs/tests/data/param3b.py +++ b/doc/OnlineDocs/tests/data/param3b.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.C[key]))) + print(str(key) + " " + str(value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.D[key]))) + print(str(key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/param3c.py b/doc/OnlineDocs/tests/data/param3c.py index 30ef74eb713..4056dc8107d 100644 --- a/doc/OnlineDocs/tests/data/param3c.py +++ b/doc/OnlineDocs/tests/data/param3c.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.C[key]))) + print(str(key) + " " + str(value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.D[key]))) + print(str(key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/param4.py b/doc/OnlineDocs/tests/data/param4.py index d02f4b5af7b..1190dae8dec 100644 --- a/doc/OnlineDocs/tests/data/param4.py +++ b/doc/OnlineDocs/tests/data/param4.py @@ -12,4 +12,4 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/param5.py b/doc/OnlineDocs/tests/data/param5.py index 2243f8aff57..69f6cc46552 100644 --- a/doc/OnlineDocs/tests/data/param5.py +++ b/doc/OnlineDocs/tests/data/param5.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/param5a.py b/doc/OnlineDocs/tests/data/param5a.py index 3695d42500c..303b92f9f2e 100644 --- a/doc/OnlineDocs/tests/data/param5a.py +++ b/doc/OnlineDocs/tests/data/param5a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/param6.py b/doc/OnlineDocs/tests/data/param6.py index 4b3dd053341..c3e4b25d144 100644 --- a/doc/OnlineDocs/tests/data/param6.py +++ b/doc/OnlineDocs/tests/data/param6.py @@ -14,12 +14,12 @@ keys = instance.B.keys() print('B') for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.C[key]))) + print(str(key) + " " + str(value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.D[key]))) + print(str(key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/param6a.py b/doc/OnlineDocs/tests/data/param6a.py index be68eaa7947..07e8280cc18 100644 --- a/doc/OnlineDocs/tests/data/param6a.py +++ b/doc/OnlineDocs/tests/data/param6a.py @@ -14,12 +14,12 @@ keys = instance.B.keys() print('B') for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.C[key]))) + print(str(key) + " " + str(value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.D[key]))) + print(str(key) + " " + str(value(instance.D[key]))) diff --git a/doc/OnlineDocs/tests/data/param7a.py b/doc/OnlineDocs/tests/data/param7a.py index b5edaf1d58e..3bb68b3f3b7 100644 --- a/doc/OnlineDocs/tests/data/param7a.py +++ b/doc/OnlineDocs/tests/data/param7a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/param7b.py b/doc/OnlineDocs/tests/data/param7b.py index 60956f69737..6e5c857851f 100644 --- a/doc/OnlineDocs/tests/data/param7b.py +++ b/doc/OnlineDocs/tests/data/param7b.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/param8a.py b/doc/OnlineDocs/tests/data/param8a.py index c9c63f5807b..57c9b08ca43 100644 --- a/doc/OnlineDocs/tests/data/param8a.py +++ b/doc/OnlineDocs/tests/data/param8a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(value(instance.B[key]))) + print(str(key) + " " + str(value(instance.B[key]))) diff --git a/doc/OnlineDocs/tests/data/set1.py b/doc/OnlineDocs/tests/data/set1.py index 6d40f7b6746..5248e9d5dc9 100644 --- a/doc/OnlineDocs/tests/data/set1.py +++ b/doc/OnlineDocs/tests/data/set1.py @@ -10,4 +10,4 @@ print(sorted(list(instance.A.data()))) print(sorted((instance.B.data()))) -print(sorted(list((instance.C.data())), key=lambda x:x if type(x) is str else str(x))) +print(sorted(list((instance.C.data())), key=lambda x: x if type(x) is str else str(x))) diff --git a/doc/OnlineDocs/tests/data/set3.py b/doc/OnlineDocs/tests/data/set3.py index a94e881c205..d58e0c0dd43 100644 --- a/doc/OnlineDocs/tests/data/set3.py +++ b/doc/OnlineDocs/tests/data/set3.py @@ -6,10 +6,14 @@ model.A = Set() model.B = Set(model.A) # @decl -#model.C = Set(model.A,model.A) +# model.C = Set(model.A,model.A) instance = model.create_instance('set3.dat') -print(sorted(list(instance.A.data()), key=lambda x:x if type(x) is str else str(x))) -print(sorted(list(instance.B[1].data()), key=lambda x:x if type(x) is str else str(x))) -print(sorted(list(instance.B['aaa'].data()), key=lambda x:x if type(x) is str else str(x))) +print(sorted(list(instance.A.data()), key=lambda x: x if type(x) is str else str(x))) +print(sorted(list(instance.B[1].data()), key=lambda x: x if type(x) is str else str(x))) +print( + sorted( + list(instance.B['aaa'].data()), key=lambda x: x if type(x) is str else str(x) + ) +) diff --git a/doc/OnlineDocs/tests/data/set5.py b/doc/OnlineDocs/tests/data/set5.py index bfbe8594a94..35acd4e4317 100644 --- a/doc/OnlineDocs/tests/data/set5.py +++ b/doc/OnlineDocs/tests/data/set5.py @@ -9,5 +9,5 @@ instance = model.create_instance('set5.dat') -for tpl in sorted(list(instance.A.data()), key=lambda x:tuple(map(str,x))): +for tpl in sorted(list(instance.A.data()), key=lambda x: tuple(map(str, x))): print(tpl) diff --git a/doc/OnlineDocs/tests/data/table0.py b/doc/OnlineDocs/tests/data/table0.py index 8d4af8f066b..af7f634bd34 100644 --- a/doc/OnlineDocs/tests/data/table0.py +++ b/doc/OnlineDocs/tests/data/table0.py @@ -2,7 +2,7 @@ model = AbstractModel() -model.A = Set(initialize=['A1','A2','A3']) +model.A = Set(initialize=['A1', 'A2', 'A3']) model.M = Param(model.A) instance = model.create_instance('table0.dat') diff --git a/doc/OnlineDocs/tests/data/table0.ul.py b/doc/OnlineDocs/tests/data/table0.ul.py index 5b2c88e17cf..213407b071c 100644 --- a/doc/OnlineDocs/tests/data/table0.ul.py +++ b/doc/OnlineDocs/tests/data/table0.ul.py @@ -2,7 +2,7 @@ model = AbstractModel() -model.A = Set(initialize=['A1','A2','A3']) +model.A = Set(initialize=['A1', 'A2', 'A3']) model.M = Param(model.A) instance = model.create_instance('table0.ul.dat') diff --git a/doc/OnlineDocs/tests/data/table1.py b/doc/OnlineDocs/tests/data/table1.py index 1ca81708a8b..1f86508c60a 100644 --- a/doc/OnlineDocs/tests/data/table1.py +++ b/doc/OnlineDocs/tests/data/table1.py @@ -2,7 +2,7 @@ model = AbstractModel() -model.A = Set(initialize=['A1','A2','A3']) +model.A = Set(initialize=['A1', 'A2', 'A3']) model.M = Param(model.A) instance = model.create_instance('table1.dat') diff --git a/doc/OnlineDocs/tests/data/table2.py b/doc/OnlineDocs/tests/data/table2.py index 32170e0d6aa..d7708b9277f 100644 --- a/doc/OnlineDocs/tests/data/table2.py +++ b/doc/OnlineDocs/tests/data/table2.py @@ -2,8 +2,8 @@ model = AbstractModel() -model.A = Set(initialize=['A1','A2','A3']) -model.B = Set(initialize=['B1','B2','B3']) +model.A = Set(initialize=['A1', 'A2', 'A3']) +model.B = Set(initialize=['B1', 'B2', 'B3']) model.M = Param(model.A) model.N = Param(model.A, model.B) diff --git a/doc/OnlineDocs/tests/data/table3.py b/doc/OnlineDocs/tests/data/table3.py index bb5c6620657..fa871a4f79c 100644 --- a/doc/OnlineDocs/tests/data/table3.py +++ b/doc/OnlineDocs/tests/data/table3.py @@ -3,7 +3,7 @@ model = AbstractModel() model.A = Set() -model.B = Set(initialize=['B1','B2','B3']) +model.B = Set(initialize=['B1', 'B2', 'B3']) model.Z = Set(dimen=2) model.M = Param(model.A) diff --git a/doc/OnlineDocs/tests/data/table3.ul.py b/doc/OnlineDocs/tests/data/table3.ul.py index 5af0de791be..713d36b9f3a 100644 --- a/doc/OnlineDocs/tests/data/table3.ul.py +++ b/doc/OnlineDocs/tests/data/table3.ul.py @@ -3,7 +3,7 @@ model = AbstractModel() model.A = Set() -model.B = Set(initialize=['B1','B2','B3']) +model.B = Set(initialize=['B1', 'B2', 'B3']) model.Z = Set(dimen=2) model.M = Param(model.A) diff --git a/doc/OnlineDocs/tests/data/table7.py b/doc/OnlineDocs/tests/data/table7.py index 3ba276c34a9..f8f8e769b2e 100644 --- a/doc/OnlineDocs/tests/data/table7.py +++ b/doc/OnlineDocs/tests/data/table7.py @@ -2,7 +2,7 @@ model = AbstractModel() -model.A = Set(initialize=['A1','A2','A3']) +model.A = Set(initialize=['A1', 'A2', 'A3']) model.M = Param(model.A) model.Z = Set(dimen=2) diff --git a/doc/OnlineDocs/tests/dataportal/PP_sqlite.py b/doc/OnlineDocs/tests/dataportal/PP_sqlite.py index 8d178681b84..9c6fc5ddc0b 100644 --- a/doc/OnlineDocs/tests/dataportal/PP_sqlite.py +++ b/doc/OnlineDocs/tests/dataportal/PP_sqlite.py @@ -23,20 +23,18 @@ c.execute('DROP TABLE IF EXISTS ' + table) conn.commit() -c.execute(''' +c.execute( + ''' CREATE TABLE PPtable ( A text not null, B text not null, PP float not null ) -''') +''' +) conn.commit() -data = [ - ("A1", "B1", 4.3), - ("A2", "B2", 4.4), - ("A3", "B3", 4.5) -] +data = [("A1", "B1", 4.3), ("A2", "B2", 4.4), ("A3", "B3", 4.5)] for row in data: c.execute('''INSERT INTO PPtable VALUES (?,?,?)''', row) conn.commit() diff --git a/doc/OnlineDocs/tests/dataportal/dataportal_tab.py b/doc/OnlineDocs/tests/dataportal/dataportal_tab.py index 49610e7396b..d1a75196c99 100644 --- a/doc/OnlineDocs/tests/dataportal/dataportal_tab.py +++ b/doc/OnlineDocs/tests/dataportal/dataportal_tab.py @@ -50,7 +50,7 @@ # @param2 model = AbstractModel() data = DataPortal() -model.A = Set(initialize=['A1','A2','A3']) +model.A = Set(initialize=['A1', 'A2', 'A3']) model.y = Param(model.A) data.load(filename='Y.tab', param=model.y) instance = model.create_instance(data) @@ -60,10 +60,10 @@ # @param4 model = AbstractModel() data = DataPortal() -model.A = Set(initialize=['A1','A2','A3']) +model.A = Set(initialize=['A1', 'A2', 'A3']) model.x = Param(model.A) model.w = Param(model.A) -data.load(filename='XW.tab', param=(model.x,model.w)) +data.load(filename='XW.tab', param=(model.x, model.w)) instance = model.create_instance(data) # @param4 instance.pprint() @@ -83,8 +83,7 @@ data = DataPortal() model.A = Set() model.w = Param(model.A) -data.load(filename='XW.tab', select=('A','W'), - param=model.w, index=model.A) +data.load(filename='XW.tab', select=('A', 'W'), param=model.w, index=model.A) instance = model.create_instance(data) # @param5 instance.pprint() @@ -92,11 +91,10 @@ # @param6 model = AbstractModel() data = DataPortal() -model.A = Set(initialize=['A1','A2','A3']) -model.I = Set(initialize=['I1','I2','I3','I4']) +model.A = Set(initialize=['A1', 'A2', 'A3']) +model.I = Set(initialize=['I1', 'I2', 'I3', 'I4']) model.u = Param(model.I, model.A) -data.load(filename='U.tab', param=model.u, - format='array') +data.load(filename='U.tab', param=model.u, format='array') instance = model.create_instance(data) # @param6 instance.pprint() @@ -104,11 +102,10 @@ # @param7 model = AbstractModel() data = DataPortal() -model.A = Set(initialize=['A1','A2','A3']) -model.I = Set(initialize=['I1','I2','I3','I4']) +model.A = Set(initialize=['A1', 'A2', 'A3']) +model.I = Set(initialize=['I1', 'I2', 'I3', 'I4']) model.t = Param(model.A, model.I) -data.load(filename='U.tab', param=model.t, - format='transposed_array') +data.load(filename='U.tab', param=model.t, format='transposed_array') instance = model.create_instance(data) # @param7 instance.pprint() @@ -126,7 +123,7 @@ # @param9 model = AbstractModel() data = DataPortal() -model.A = Set(initialize=['A1','A2','A3','A4']) +model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.y = Param(model.A) data.load(filename='Y.tab', param=model.y) instance = model.create_instance(data) @@ -149,8 +146,8 @@ model.A = Set() model.B = Set() model.q = Param(model.A, model.B) -data.load(filename='PP.tab', param=model.q, index=(model.A,model.B)) -#instance = model.create_instance(data) +data.load(filename='PP.tab', param=model.q, index=(model.A, model.B)) +# instance = model.create_instance(data) # @param11 # -------------------------------------------------- # @concrete1 @@ -169,17 +166,17 @@ model = ConcreteModel() model.z = Param(initialize=data['z']) -model.y = Param(['A1','A2','A3'], initialize=data['y']) +model.y = Param(['A1', 'A2', 'A3'], initialize=data['y']) # @concrete2 model.pprint() # -------------------------------------------------- # @getitem data = DataPortal() data.load(filename='A.tab', set="A", format="set") -print(data['A']) #['A1', 'A2', 'A3'] +print(data['A']) # ['A1', 'A2', 'A3'] data.load(filename='Z.tab', param="z", format="param") -print(data['z']) #1.1 +print(data['z']) # 1.1 data.load(filename='Y.tab', param="y", format="table") for key in sorted(data['y']): @@ -191,8 +188,7 @@ data = DataPortal() model.A = Set(dimen=2) model.p = Param(model.A) -data.load(filename='excel.xls', range='PPtable', - param=model.p, index=model.A) +data.load(filename='excel.xls', range='PPtable', param=model.p, index=model.A) instance = model.create_instance(data) # @excel1 instance.pprint() @@ -202,7 +198,7 @@ data = DataPortal() model.A = Set(dimen=2) model.p = Param(model.A) -#data.load(filename='excel.xls', range='AX2:AZ5', +# data.load(filename='excel.xls', range='AX2:AZ5', # param=model.p, index=model.A) instance = model.create_instance(data) # @excel2 @@ -213,15 +209,20 @@ data = DataPortal() model.A = Set(dimen=2) model.p = Param(model.A) -data.load(filename='PP.sqlite', using='sqlite3', - table='PPtable', - param=model.p, index=model.A) +data.load( + filename='PP.sqlite', using='sqlite3', table='PPtable', param=model.p, index=model.A +) instance = model.create_instance(data) # @db1 data = DataPortal() -data.load(filename='PP.sqlite', using='sqlite3', - table='PPtable', - param=model.p, index=model.A, text_factory=str) +data.load( + filename='PP.sqlite', + using='sqlite3', + table='PPtable', + param=model.p, + index=model.A, + text_factory=str, +) instance = model.create_instance(data) instance.pprint() # -------------------------------------------------- @@ -230,15 +231,24 @@ data = DataPortal() model.A = Set() model.p = Param(model.A) -data.load(filename='PP.sqlite', using='sqlite3', - query="SELECT A,PP FROM PPtable", - param=model.p, index=model.A) +data.load( + filename='PP.sqlite', + using='sqlite3', + query="SELECT A,PP FROM PPtable", + param=model.p, + index=model.A, +) instance = model.create_instance(data) # @db2 data = DataPortal() -data.load(filename='PP.sqlite', using='sqlite3', - query="SELECT A,PP FROM PPtable", - param=model.p, index=model.A, text_factory=str) +data.load( + filename='PP.sqlite', + using='sqlite3', + query="SELECT A,PP FROM PPtable", + param=model.p, + index=model.A, + text_factory=str, +) instance = model.create_instance(data) instance.pprint() # -------------------------------------------------- @@ -248,17 +258,24 @@ data = DataPortal() model.A = Set() model.p = Param(model.A) - data.load(filename="Driver={MySQL ODBC 5.2 UNICODE Driver}; Database=Pyomo; Server=localhost; User=pyomo;", - using='pypyodbc', - query="SELECT A,PP FROM PPtable", - param=model.p, index=model.A) + data.load( + filename="Driver={MySQL ODBC 5.2 UNICODE Driver}; Database=Pyomo; Server=localhost; User=pyomo;", + using='pypyodbc', + query="SELECT A,PP FROM PPtable", + param=model.p, + index=model.A, + ) instance = model.create_instance(data) # @db3 data = DataPortal() - data.load(filename="Driver={MySQL ODBC 5.2 UNICODE Driver}; Database=Pyomo; Server=localhost; User=pyomo;", - using='pypyodbc', - query="SELECT A,PP FROM PPtable", - param=model.p, index=model.A, text_factory=str) + data.load( + filename="Driver={MySQL ODBC 5.2 UNICODE Driver}; Database=Pyomo; Server=localhost; User=pyomo;", + using='pypyodbc', + query="SELECT A,PP FROM PPtable", + param=model.p, + index=model.A, + text_factory=str, + ) instance = model.create_instance(data) instance.pprint() # -------------------------------------------------- @@ -298,11 +315,9 @@ model.C = Set(dimen=2) data = DataPortal() data.load(filename='C.tab', set=model.C, namespace='ns1') -data.load(filename='D.tab', set=model.C, namespace='ns2', - format='set_array') +data.load(filename='D.tab', set=model.C, namespace='ns2', format='set_array') instance1 = model.create_instance(data, namespaces=['ns1']) instance2 = model.create_instance(data, namespaces=['ns2']) # @namespaces1 instance1.pprint() instance2.pprint() - diff --git a/doc/OnlineDocs/tests/dataportal/param_initialization.py b/doc/OnlineDocs/tests/dataportal/param_initialization.py index e46b641c43b..5567b01f284 100644 --- a/doc/OnlineDocs/tests/dataportal/param_initialization.py +++ b/doc/OnlineDocs/tests/dataportal/param_initialization.py @@ -9,14 +9,17 @@ # Initialize with a dictionary # @decl2 -model.b = Param([1,2,3], initialize={1:1, 2:2, 3:3}) +model.b = Param([1, 2, 3], initialize={1: 1, 2: 2, 3: 3}) # @decl2 + # Initialize with a function that returns native Python data # @decl3 def c(model): - return {1:1, 2:2, 3:3} -model.c = Param([1,2,3], initialize=c) + return {1: 1, 2: 2, 3: 3} + + +model.c = Param([1, 2, 3], initialize=c) # @decl3 model.pprint(verbose=True) diff --git a/doc/OnlineDocs/tests/dataportal/set_initialization.py b/doc/OnlineDocs/tests/dataportal/set_initialization.py index 14b034a0119..aa7b426fa82 100644 --- a/doc/OnlineDocs/tests/dataportal/set_initialization.py +++ b/doc/OnlineDocs/tests/dataportal/set_initialization.py @@ -5,15 +5,15 @@ # Initialize with a list, tuple or set # @decl2 -model.A = Set(initialize=[2,3,5]) -model.B = Set(initialize=set([2,3,5])) -model.C = Set(initialize=(2,3,5)) +model.A = Set(initialize=[2, 3, 5]) +model.B = Set(initialize=set([2, 3, 5])) +model.C = Set(initialize=(2, 3, 5)) # @decl2 # Initialize with a generator # @decl3 model.D = Set(initialize=range(9)) -model.E = Set(initialize=(i for i in model.B if i%2 == 0)) +model.E = Set(initialize=(i for i in model.B if i % 2 == 0)) # @decl3 # Initialize with a numpy @@ -22,20 +22,23 @@ model.F = Set(initialize=f) # @decl4 + # Initialize with a function that returns native Python data # @decl5 def g(model): - return [2,3,5] + return [2, 3, 5] + + model.G = Set(initialize=g) # @decl5 # Initialize an indexed set with a dictionary # @decl6 H_init = {} -H_init[2] = [1,3,5] -H_init[3] = [2,4,6] -H_init[4] = [3,5,7] -model.H = Set([2,3,4],initialize=H_init) +H_init[2] = [1, 3, 5] +H_init[3] = [2, 4, 6] +H_init[4] = [3, 5, 7] +model.H = Set([2, 3, 4], initialize=H_init) # @decl6 model.pprint(verbose=True) diff --git a/doc/OnlineDocs/tests/expr/design.py b/doc/OnlineDocs/tests/expr/design.py index 7cc8d9698c3..b122a5f2bf3 100644 --- a/doc/OnlineDocs/tests/expr/design.py +++ b/doc/OnlineDocs/tests/expr/design.py @@ -1,6 +1,6 @@ from pyomo.environ import * -#--------------------------------------------- +# --------------------------------------------- # @categories m = ConcreteModel() m.p = Param(default=10, mutable=False) @@ -11,18 +11,18 @@ # @categories m.pprint() -#--------------------------------------------- +# --------------------------------------------- # @named_expression M = ConcreteModel() M.v = Var() M.w = Var() -M.e = Expression(expr=2*M.v) -f = M.e + 3 # f == 2*v + 3 -M.e += M.w # f == 2*v + 3 + w +M.e = Expression(expr=2 * M.v) +f = M.e + 3 # f == 2*v + 3 +M.e += M.w # f == 2*v + 3 + w # @named_expression -#--------------------------------------------- +# --------------------------------------------- # @cm1 M = ConcreteModel() M.x = Var(range(5)) @@ -39,7 +39,7 @@ print(e) -#--------------------------------------------- +# --------------------------------------------- # @cm2 M = ConcreteModel() M.x = Var(range(5)) @@ -51,4 +51,3 @@ # @cm2 print("cm2") print(e) - diff --git a/doc/OnlineDocs/tests/expr/index.py b/doc/OnlineDocs/tests/expr/index.py index 1cfc954aa52..9c9c79bf7be 100644 --- a/doc/OnlineDocs/tests/expr/index.py +++ b/doc/OnlineDocs/tests/expr/index.py @@ -1,11 +1,10 @@ from pyomo.environ import * -#--------------------------------------------- +# --------------------------------------------- # @simple M = ConcreteModel() M.v = Var() -e = M.v*2 +e = M.v * 2 # @simple print(e) - diff --git a/doc/OnlineDocs/tests/expr/managing.py b/doc/OnlineDocs/tests/expr/managing.py index 99faba46721..0a2709fe96f 100644 --- a/doc/OnlineDocs/tests/expr/managing.py +++ b/doc/OnlineDocs/tests/expr/managing.py @@ -3,14 +3,14 @@ import math import copy -#--------------------------------------------- +# --------------------------------------------- # @ex1 from pyomo.core.expr import current as EXPR M = ConcreteModel() M.x = Var() -e = sin(M.x) + 2*M.x +e = sin(M.x) + 2 * M.x # sin(x) + 2*x print(EXPR.expression_to_string(e)) @@ -19,7 +19,7 @@ print(EXPR.expression_to_string(e, verbose=True)) # @ex1 -#--------------------------------------------- +# --------------------------------------------- # @ex2 from pyomo.core.expr import current as EXPR @@ -27,13 +27,13 @@ M.x = Var() M.y = Var() -e = sin(M.x) + 2*M.y +e = sin(M.x) + 2 * M.y # sin(x1) + 2*x2 print(EXPR.expression_to_string(e, labeler=NumericLabeler('x'))) # @ex2 -#--------------------------------------------- +# --------------------------------------------- # @ex3 from pyomo.core.expr import current as EXPR @@ -41,13 +41,13 @@ M.x = Var() M.y = Var() -e = sin(M.x) + 2*M.y + M.x*M.y - 3 +e = sin(M.x) + 2 * M.y + M.x * M.y - 3 # -3 + 2*y + sin(x) + x*y print(EXPR.expression_to_string(e, standardize=True)) # @ex3 -#--------------------------------------------- +# --------------------------------------------- # @ex4 from pyomo.core.expr import current as EXPR @@ -59,31 +59,31 @@ e1 = sin(M.x) e2 = e1.clone() total = counter.count - start - assert(total == 1) + assert total == 1 # @ex4 -#--------------------------------------------- +# --------------------------------------------- # @ex5 M = ConcreteModel() M.x = Var() -M.x.value = math.pi/2.0 +M.x.value = math.pi / 2.0 val = value(M.x) -assert(isclose(val, math.pi/2.0)) +assert isclose(val, math.pi / 2.0) # @ex5 # @ex6 val = M.x() -assert(isclose(val, math.pi/2.0)) +assert isclose(val, math.pi / 2.0) # @ex6 -#--------------------------------------------- +# --------------------------------------------- # @ex7 M = ConcreteModel() M.x = Var() val = value(M.x, exception=False) -assert(val is None) +assert val is None # @ex7 -#--------------------------------------------- +# --------------------------------------------- # @ex8 from pyomo.core.expr import current as EXPR @@ -91,12 +91,12 @@ M.x = Var() M.p = Param(mutable=True) -e = M.p+M.x +e = M.p + M.x s = set([type(M.p)]) -assert(list(EXPR.identify_components(e, s)) == [M.p]) +assert list(EXPR.identify_components(e, s)) == [M.p] # @ex8 -#--------------------------------------------- +# --------------------------------------------- # @ex9 from pyomo.core.expr import current as EXPR @@ -104,20 +104,22 @@ M.x = Var() M.y = Var() -e = M.x+M.y +e = M.x + M.y M.y.value = 1 M.y.fixed = True -assert(set(id(v) for v in EXPR.identify_variables(e)) == set([id(M.x), id(M.y)])) -assert(set(id(v) for v in EXPR.identify_variables(e, include_fixed=False)) == set([id(M.x)])) +assert set(id(v) for v in EXPR.identify_variables(e)) == set([id(M.x), id(M.y)]) +assert set(id(v) for v in EXPR.identify_variables(e, include_fixed=False)) == set( + [id(M.x)] +) # @ex9 -#--------------------------------------------- +# --------------------------------------------- # @visitor1 from pyomo.core.expr import current as EXPR -class SizeofVisitor(EXPR.SimpleExpressionVisitor): +class SizeofVisitor(EXPR.SimpleExpressionVisitor): def __init__(self): self.counter = 0 @@ -126,9 +128,12 @@ def visit(self, node): def finalize(self): return self.counter + + # @visitor1 -#--------------------------------------------- + +# --------------------------------------------- # @visitor2 def sizeof_expression(expr): # @@ -139,16 +144,18 @@ def sizeof_expression(expr): # Compute the value using the :func:`xbfs` search method. # return visitor.xbfs(expr) + + # @visitor2 -#--------------------------------------------- +# --------------------------------------------- # @visitor3 from pyomo.core.expr import current as EXPR -class CloneVisitor(EXPR.ExpressionValueVisitor): +class CloneVisitor(EXPR.ExpressionValueVisitor): def __init__(self): - self.memo = {'__block_scope__': { id(None): False }} + self.memo = {'__block_scope__': {id(None): False}} def visit(self, node, values): # @@ -160,14 +167,19 @@ def visiting_potential_leaf(self, node): # # Clone leaf nodes in the expression tree # - if node.__class__ in native_numeric_types or\ - node.__class__ not in pyomo5_expression_types:\ + if ( + node.__class__ in native_numeric_types + or node.__class__ not in pyomo5_expression_types + ): return True, copy.deepcopy(node, self.memo) return False, None + + # @visitor3 -#--------------------------------------------- + +# --------------------------------------------- # @visitor4 def clone_expression(expr): # @@ -175,18 +187,20 @@ def clone_expression(expr): # visitor = CloneVisitor() # - # Clone the expression using the :func:`dfs_postorder_stack` + # Clone the expression using the :func:`dfs_postorder_stack` # search method. # return visitor.dfs_postorder_stack(expr) + + # @visitor4 -#--------------------------------------------- +# --------------------------------------------- # @visitor5 from pyomo.core.expr import current as EXPR -class ScalingVisitor(EXPR.ExpressionReplacementVisitor): +class ScalingVisitor(EXPR.ExpressionReplacementVisitor): def __init__(self, scale): super(ScalingVisitor, self).__init__() self.scale = scale @@ -199,21 +213,24 @@ def visiting_potential_leaf(self, node): return True, node if node.is_variable_type(): - return True, self.scale[id(node)]*node + return True, self.scale[id(node)] * node if isinstance(node, EXPR.LinearExpression): node_ = copy.deepcopy(node) node_.constant = node.constant node_.linear_vars = copy.copy(node.linear_vars) node_.linear_coefs = [] - for i,v in enumerate(node.linear_vars): - node_.linear_coefs.append( node.linear_coefs[i]*self.scale[id(v)] ) + for i, v in enumerate(node.linear_vars): + node_.linear_coefs.append(node.linear_coefs[i] * self.scale[id(v)]) return True, node_ return False, None + + # @visitor5 -#--------------------------------------------- + +# --------------------------------------------- # @visitor6 def scale_expression(expr, scale): # @@ -221,26 +238,27 @@ def scale_expression(expr, scale): # visitor = ScalingVisitor(scale) # - # Scale the expression using the :func:`dfs_postorder_stack` + # Scale the expression using the :func:`dfs_postorder_stack` # search method. # return visitor.dfs_postorder_stack(expr) + + # @visitor6 -#--------------------------------------------- +# --------------------------------------------- # @visitor7 M = ConcreteModel() M.x = Var(range(5)) M.p = Param(range(5), mutable=True) -scale={} +scale = {} for i in M.x: - scale[id(M.x[i])] = M.p[i] + scale[id(M.x[i])] = M.p[i] e = quicksum(M.x[i] for i in M.x) -f = scale_expression(e,scale) +f = scale_expression(e, scale) # p[0]*x[0] + p[1]*x[1] + p[2]*x[2] + p[3]*x[3] + p[4]*x[4] print(f) # @visitor7 - diff --git a/doc/OnlineDocs/tests/expr/overview.py b/doc/OnlineDocs/tests/expr/overview.py index 7cd33925c6c..6207a4c4288 100644 --- a/doc/OnlineDocs/tests/expr/overview.py +++ b/doc/OnlineDocs/tests/expr/overview.py @@ -1,6 +1,6 @@ from pyomo.environ import * -#--------------------------------------------- +# --------------------------------------------- # @example1 M = ConcreteModel() M.x = Var(range(100)) @@ -17,46 +17,46 @@ # @example1 print(e) -#--------------------------------------------- +# --------------------------------------------- # @example2 M = ConcreteModel() M.p = Param(initialize=3) -M.q = 1/M.p +M.q = 1 / M.p M.x = Var(range(100)) # The value M.q is cloned every time it is used. e = 0 for i in range(100): - e = e + M.x[i]*M.q + e = e + M.x[i] * M.q # @example2 print(e) -#--------------------------------------------- +# --------------------------------------------- # @tree1 M = ConcreteModel() M.v = Var() -e = f = 2*M.v +e = f = 2 * M.v # @tree1 print(e) -#--------------------------------------------- +# --------------------------------------------- # @tree2 M = ConcreteModel() M.v = Var() -e = 2*M.v +e = 2 * M.v f = e + 3 # @tree2 print(e) print(f) -#--------------------------------------------- +# --------------------------------------------- # @tree3 M = ConcreteModel() M.v = Var() -e = 2*M.v +e = 2 * M.v f = e + 3 g = e + 4 # @tree3 @@ -64,13 +64,13 @@ print(f) print(g) -#--------------------------------------------- +# --------------------------------------------- # @tree4 M = ConcreteModel() M.v = Var() M.w = Var() -e = 2*M.v +e = 2 * M.v f = e + 3 e += M.w @@ -78,16 +78,15 @@ print(e) print(f) -#--------------------------------------------- +# --------------------------------------------- # @tree5 M = ConcreteModel() M.v = Var() M.w = Var() -M.e = Expression(expr=2*M.v) +M.e = Expression(expr=2 * M.v) f = M.e + 3 M.e += M.w # @tree5 print(M.e) - diff --git a/doc/OnlineDocs/tests/expr/performance.py b/doc/OnlineDocs/tests/expr/performance.py index 7ece93c195e..53ac5bb4f9e 100644 --- a/doc/OnlineDocs/tests/expr/performance.py +++ b/doc/OnlineDocs/tests/expr/performance.py @@ -1,6 +1,6 @@ from pyomo.environ import * -#--------------------------------------------- +# --------------------------------------------- # @loop1 M = ConcreteModel() M.x = Var(range(5)) @@ -11,19 +11,19 @@ # @loop1 print(s) -#--------------------------------------------- +# --------------------------------------------- # @loop2 s = sum(M.x[i] for i in range(5)) # @loop2 print(s) -#--------------------------------------------- +# --------------------------------------------- # @loop3 -s = sum(M.x[i] for i in range(5))**2 +s = sum(M.x[i] for i in range(5)) ** 2 # @loop3 print(s) -#--------------------------------------------- +# --------------------------------------------- # @prod M = ConcreteModel() M.x = Var(range(5)) @@ -42,30 +42,30 @@ print(e2) print(e3) -#--------------------------------------------- +# --------------------------------------------- # @quicksum M = ConcreteModel() M.x = Var(range(5)) # Summation using the Python sum() function -e1 = sum(M.x[i]**2 for i in M.x) +e1 = sum(M.x[i] ** 2 for i in M.x) # Summation using the Pyomo quicksum function -e2 = quicksum(M.x[i]**2 for i in M.x) +e2 = quicksum(M.x[i] ** 2 for i in M.x) # @quicksum print(e1) print(e2) -#--------------------------------------------- +# --------------------------------------------- # @warning M = ConcreteModel() M.x = Var(range(5)) -e = quicksum(M.x[i]**2 if i > 0 else M.x[i] for i in range(5)) +e = quicksum(M.x[i] ** 2 if i > 0 else M.x[i] for i in range(5)) # @warning print(e) -#--------------------------------------------- +# --------------------------------------------- # @sum_product1 M = ConcreteModel() M.z = RangeSet(5) @@ -85,7 +85,7 @@ print(e2) print(e3) -#--------------------------------------------- +# --------------------------------------------- # @sum_product2 # Sum the product of x_i/y_i e1 = sum_product(M.x, denom=M.y) @@ -95,4 +95,3 @@ # @sum_product2 print(e1) print(e2) - diff --git a/doc/OnlineDocs/tests/expr/quicksum.py b/doc/OnlineDocs/tests/expr/quicksum.py index 23b6c3473e3..a1ad9660664 100644 --- a/doc/OnlineDocs/tests/expr/quicksum.py +++ b/doc/OnlineDocs/tests/expr/quicksum.py @@ -9,7 +9,7 @@ M.x = Var(M.A) start = time.time() -e = sum( (M.x[i] - 1)**M.p[i] for i in M.A) +e = sum((M.x[i] - 1) ** M.p[i] for i in M.A) print("sum: %f" % (time.time() - start)) start = time.time() @@ -17,7 +17,7 @@ print("repn: %f" % (time.time() - start)) start = time.time() -e = quicksum( (M.x[i] - 1)**M.p[i] for i in M.A) +e = quicksum((M.x[i] - 1) ** M.p[i] for i in M.A) print("quicksum: %f" % (time.time() - start)) start = time.time() diff --git a/doc/OnlineDocs/tests/scripting/AbstractSuffixes.py b/doc/OnlineDocs/tests/scripting/AbstractSuffixes.py index fa7d4fae584..20a4cc20581 100644 --- a/doc/OnlineDocs/tests/scripting/AbstractSuffixes.py +++ b/doc/OnlineDocs/tests/scripting/AbstractSuffixes.py @@ -1,17 +1,24 @@ from pyomo.environ import * model = AbstractModel() -model.I = RangeSet(1,4) +model.I = RangeSet(1, 4) model.x = Var(model.I) + + def c_rule(m, i): return m.x[i] >= i + + model.c = Constraint(model.I, rule=c_rule) + def foo_rule(m): - return ((m.x[i], 3.0*i) for i in m.I) + return ((m.x[i], 3.0 * i) for i in m.I) + + model.foo = Suffix(rule=foo_rule) # instantiate the model inst = model.create_instance() for i in inst.I: - print (i, inst.foo[inst.x[i]]) + print(i, inst.foo[inst.x[i]]) diff --git a/doc/OnlineDocs/tests/scripting/Isinglebuild.py b/doc/OnlineDocs/tests/scripting/Isinglebuild.py index bee8d105c9b..00f79c9a750 100644 --- a/doc/OnlineDocs/tests/scripting/Isinglebuild.py +++ b/doc/OnlineDocs/tests/scripting/Isinglebuild.py @@ -10,13 +10,15 @@ model.NodesOut = Set(model.Nodes, within=model.Nodes, initialize=[]) model.NodesIn = Set(model.Nodes, within=model.Nodes, initialize=[]) + def Populate_In_and_Out(model): # loop over the arcs and put the end points in the appropriate places - for (i,j) in model.Arcs: + for i, j in model.Arcs: model.NodesIn[j].add(i) model.NodesOut[i].add(j) -model.In_n_Out = BuildAction(rule = Populate_In_and_Out) + +model.In_n_Out = BuildAction(rule=Populate_In_and_Out) model.Flow = Var(model.Arcs, domain=NonNegativeReals) model.FlowCost = Param(model.Arcs) @@ -24,14 +26,22 @@ def Populate_In_and_Out(model): model.Demand = Param(model.Nodes) model.Supply = Param(model.Nodes) + def Obj_rule(model): return summation(model.FlowCost, model.Flow) + + model.Obj = Objective(rule=Obj_rule, sense=minimize) + def FlowBalance_rule(model, node): - return model.Supply[node] \ - + sum(model.Flow[i, node] for i in model.NodesIn[node]) \ - - model.Demand[node] \ - - sum(model.Flow[node, j] for j in model.NodesOut[node]) \ - == 0 + return ( + model.Supply[node] + + sum(model.Flow[i, node] for i in model.NodesIn[node]) + - model.Demand[node] + - sum(model.Flow[node, j] for j in model.NodesOut[node]) + == 0 + ) + + model.FlowBalance = Constraint(model.Nodes, rule=FlowBalance_rule) diff --git a/doc/OnlineDocs/tests/scripting/NodesIn_init.py b/doc/OnlineDocs/tests/scripting/NodesIn_init.py index a91f690d840..4a90029baa3 100644 --- a/doc/OnlineDocs/tests/scripting/NodesIn_init.py +++ b/doc/OnlineDocs/tests/scripting/NodesIn_init.py @@ -1,7 +1,9 @@ def NodesIn_init(model, node): retval = [] - for (i,j) in model.Arcs: + for i, j in model.Arcs: if j == node: retval.append(i) return retval + + model.NodesIn = Set(model.Nodes, initialize=NodesIn_init) diff --git a/doc/OnlineDocs/tests/scripting/Z_init.py b/doc/OnlineDocs/tests/scripting/Z_init.py index 6bcf99cd35a..426de6f7d08 100644 --- a/doc/OnlineDocs/tests/scripting/Z_init.py +++ b/doc/OnlineDocs/tests/scripting/Z_init.py @@ -1,5 +1,7 @@ def Z_init(model, i): if i > 10: return Set.End - return 2*i+1 + return 2 * i + 1 + + model.Z = Set(initialize=Z_init) diff --git a/doc/OnlineDocs/tests/scripting/abstract2.py b/doc/OnlineDocs/tests/scripting/abstract2.py index 9bf0ab6cf38..7eb444914db 100644 --- a/doc/OnlineDocs/tests/scripting/abstract2.py +++ b/doc/OnlineDocs/tests/scripting/abstract2.py @@ -15,14 +15,18 @@ # the next line declares a variable indexed by the set J model.x = Var(model.J, domain=NonNegativeReals) + def obj_expression(model): return summation(model.c, model.x) + model.OBJ = Objective(rule=obj_expression) + def ax_constraint_rule(model, i): # return the expression for the constraint for i - return sum(model.a[i,j] * model.x[j] for j in model.J) >= model.b[i] + return sum(model.a[i, j] * model.x[j] for j in model.J) >= model.b[i] + # the next line creates one constraint for each member of the set model.I model.AxbConstraint = Constraint(model.I, rule=ax_constraint_rule) diff --git a/doc/OnlineDocs/tests/scripting/abstract2piece.py b/doc/OnlineDocs/tests/scripting/abstract2piece.py index 132d5ce060d..225ec0d1a64 100644 --- a/doc/OnlineDocs/tests/scripting/abstract2piece.py +++ b/doc/OnlineDocs/tests/scripting/abstract2piece.py @@ -8,7 +8,7 @@ model.I = Set() model.J = Set() -Topx = 6.1 # range of x variables +Topx = 6.1 # range of x variables model.a = Param(model.I, model.J) model.b = Param(model.I) @@ -21,23 +21,31 @@ # to avoid warnings, we set breakpoints at or beyond the bounds PieceCnt = 100 bpts = [] -for i in range(PieceCnt+2): - bpts.append(float((i*Topx)/PieceCnt)) +for i in range(PieceCnt + 2): + bpts.append(float((i * Topx) / PieceCnt)) + def f4(model, j, xp): # we not need j, but it is passed as the index for the constraint return xp**4 -model.ComputeObj = Piecewise(model.J, model.y, model.x, pw_pts=bpts, pw_constr_type='EQ', f_rule=f4) + +model.ComputeObj = Piecewise( + model.J, model.y, model.x, pw_pts=bpts, pw_constr_type='EQ', f_rule=f4 +) + def obj_expression(model): return summation(model.c, model.y) + model.OBJ = Objective(rule=obj_expression) + def ax_constraint_rule(model, i): # return the expression for the constraint for i - return sum(model.a[i,j] * model.x[j] for j in model.J) >= model.b[i] + return sum(model.a[i, j] * model.x[j] for j in model.J) >= model.b[i] + # the next line creates one constraint for each member of the set model.I model.AxbConstraint = Constraint(model.I, rule=ax_constraint_rule) diff --git a/doc/OnlineDocs/tests/scripting/abstract2piecebuild.py b/doc/OnlineDocs/tests/scripting/abstract2piecebuild.py index 85480c8ac0b..1f00cdb0265 100644 --- a/doc/OnlineDocs/tests/scripting/abstract2piecebuild.py +++ b/doc/OnlineDocs/tests/scripting/abstract2piecebuild.py @@ -12,43 +12,55 @@ model.b = Param(model.I) model.c = Param(model.J) -model.Topx = Param(default=6.1) # range of x variables +model.Topx = Param(default=6.1) # range of x variables model.PieceCnt = Param(default=100) # the next line declares a variable indexed by the set J -model.x = Var(model.J, domain=NonNegativeReals, bounds=(0,model.Topx)) +model.x = Var(model.J, domain=NonNegativeReals, bounds=(0, model.Topx)) model.y = Var(model.J, domain=NonNegativeReals) # to avoid warnings, we set breakpoints beyond the bounds # we are using a dictionary so that we can have different # breakpoints for each index. But we won't. model.bpts = {} + + # @Function_valid_declaration def bpts_build(model, j): -# @Function_valid_declaration + # @Function_valid_declaration model.bpts[j] = [] - for i in range(model.PieceCnt+2): - model.bpts[j].append(float((i*model.Topx)/model.PieceCnt)) -# The object model.BuildBpts is not refered to again; + for i in range(model.PieceCnt + 2): + model.bpts[j].append(float((i * model.Topx) / model.PieceCnt)) + + +# The object model.BuildBpts is not referred to again; # the only goal is to trigger the action at build time # @BuildAction_example model.BuildBpts = BuildAction(model.J, rule=bpts_build) # @BuildAction_example + def f4(model, j, xp): # we not need j in this example, but it is passed as the index for the constraint return xp**4 -model.ComputePieces = Piecewise(model.J, model.y, model.x, pw_pts=model.bpts, pw_constr_type='EQ', f_rule=f4) + +model.ComputePieces = Piecewise( + model.J, model.y, model.x, pw_pts=model.bpts, pw_constr_type='EQ', f_rule=f4 +) + def obj_expression(model): return summation(model.c, model.y) + model.OBJ = Objective(rule=obj_expression) + def ax_constraint_rule(model, i): # return the expression for the constraint for i - return sum(model.a[i,j] * model.x[j] for j in model.J) >= model.b[i] + return sum(model.a[i, j] * model.x[j] for j in model.J) >= model.b[i] + # the next line creates one constraint for each member of the set model.I model.AxbConstraint = Constraint(model.I, rule=ax_constraint_rule) diff --git a/doc/OnlineDocs/tests/scripting/block_iter_example.py b/doc/OnlineDocs/tests/scripting/block_iter_example.py index 89cfa77b835..680e0d1728b 100644 --- a/doc/OnlineDocs/tests/scripting/block_iter_example.py +++ b/doc/OnlineDocs/tests/scripting/block_iter_example.py @@ -1,13 +1,16 @@ # written by jds, adapted for doc by dlw from pyomo.environ import * -# simple way to get arbitrary, unique values for each thing +# simple way to get arbitrary, unique values for each thing val_iter = 0 + + def get_val(*args, **kwds): global val_iter val_iter += 1 return val_iter + model = ConcreteModel() model.I = RangeSet(3) model.x = Var(initialize=get_val) @@ -17,10 +20,13 @@ def get_val(*args, **kwds): model.b.a = Var(initialize=get_val) model.b.b = Var(model.I, initialize=get_val) -def c_rule(b,i): + +def c_rule(b, i): b.c = Var(initialize=get_val) b.d = Var(b.model().I, initialize=get_val) -model.c = Block([1,2], rule=c_rule) + + +model.c = Block([1, 2], rule=c_rule) model.pprint() @@ -30,5 +36,5 @@ def c_rule(b,i): v.pprint() for v_data in model.component_data_objects(Var, descend_into=True): - print("Found: "+v_data.name+", value = "+str(value(v_data))) + print("Found: " + v_data.name + ", value = " + str(value(v_data))) # @compprintloop diff --git a/doc/OnlineDocs/tests/scripting/concrete1.py b/doc/OnlineDocs/tests/scripting/concrete1.py index 5da68c14e38..1c1f1517e17 100644 --- a/doc/OnlineDocs/tests/scripting/concrete1.py +++ b/doc/OnlineDocs/tests/scripting/concrete1.py @@ -3,8 +3,8 @@ model = ConcreteModel() -model.x = Var([1,2], domain=NonNegativeReals) +model.x = Var([1, 2], domain=NonNegativeReals) -model.OBJ = Objective(expr = 2*model.x[1] + 3*model.x[2]) +model.OBJ = Objective(expr=2 * model.x[1] + 3 * model.x[2]) -model.Constraint1 = Constraint(expr = 3*model.x[1] + 4*model.x[2] >= 1) +model.Constraint1 = Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) diff --git a/doc/OnlineDocs/tests/scripting/doubleA.py b/doc/OnlineDocs/tests/scripting/doubleA.py index 77970841cdc..12a07944db3 100644 --- a/doc/OnlineDocs/tests/scripting/doubleA.py +++ b/doc/OnlineDocs/tests/scripting/doubleA.py @@ -1,3 +1,5 @@ def doubleA_init(model): - return (i*2 for i in model.A) + return (i * 2 for i in model.A) + + model.C = Set(initialize=DoubleA_init) diff --git a/doc/OnlineDocs/tests/scripting/driveabs2.py b/doc/OnlineDocs/tests/scripting/driveabs2.py index 257ea669480..67ab7468864 100644 --- a/doc/OnlineDocs/tests/scripting/driveabs2.py +++ b/doc/OnlineDocs/tests/scripting/driveabs2.py @@ -23,14 +23,14 @@ # @Access_all_dual # display all duals -print ("Duals") +print("Duals") for c in instance.component_objects(pyo.Constraint, active=True): - print (" Constraint",c) + print(" Constraint", c) for index in c: - print (" ", index, instance.dual[c[index]]) + print(" ", index, instance.dual[c[index]]) # @Access_all_dual # @Access_one_dual # access one dual -print ("Dual for Film=", instance.dual[instance.AxbConstraint['Film']]) +print("Dual for Film=", instance.dual[instance.AxbConstraint['Film']]) # @Access_one_dual diff --git a/doc/OnlineDocs/tests/scripting/driveconc1.py b/doc/OnlineDocs/tests/scripting/driveconc1.py index 74acd353084..ca5d6fc1593 100644 --- a/doc/OnlineDocs/tests/scripting/driveconc1.py +++ b/doc/OnlineDocs/tests/scripting/driveconc1.py @@ -13,14 +13,11 @@ # so the solver plugin will know which suffixes to collect model.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT) -results = opt.solve(model) # also load results to model +results = opt.solve(model) # also load results to model # display all duals -print ("Duals") +print("Duals") for c in model.component_objects(pyo.Constraint, active=True): - print (" Constraint",c) + print(" Constraint", c) for index in c: - print (" ", index, model.dual[c[index]]) - - - + print(" ", index, model.dual[c[index]]) diff --git a/doc/OnlineDocs/tests/scripting/iterative1.py b/doc/OnlineDocs/tests/scripting/iterative1.py index 6bdddb5ffd9..61b0fd3828e 100644 --- a/doc/OnlineDocs/tests/scripting/iterative1.py +++ b/doc/OnlineDocs/tests/scripting/iterative1.py @@ -2,6 +2,7 @@ # iterative1.py import pyomo.environ as pyo from pyomo.opt import SolverFactory + # @Import_symbols_for_pyomo # @Call_SolverFactory_with_argument @@ -17,8 +18,12 @@ model = pyo.AbstractModel() model.n = pyo.Param(default=4) model.x = pyo.Var(pyo.RangeSet(model.n), within=pyo.Binary) + + def o_rule(model): return pyo.summation(model.x) + + model.o = pyo.Objective(rule=o_rule) # @Create_base_model # @Create_empty_constraint_list @@ -39,20 +44,20 @@ def o_rule(model): # Iterate to eliminate the previously found solution # @Assign_integers for i in range(5): -# @Assign_integers -# @Iteratively_assign_and_test + # @Assign_integers + # @Iteratively_assign_and_test expr = 0 for j in instance.x: if pyo.value(instance.x[j]) == 0: expr += instance.x[j] else: - expr += (1-instance.x[j]) -# @Iteratively_assign_and_test -# @Add_expression_constraint - instance.c.add( expr >= 1 ) -# @Add_expression_constraint -# @Find_and_display_solution + expr += 1 - instance.x[j] + # @Iteratively_assign_and_test + # @Add_expression_constraint + instance.c.add(expr >= 1) + # @Add_expression_constraint + # @Find_and_display_solution results = opt.solve(instance) - print ("\n===== iteration",i) + print("\n===== iteration", i) instance.display() # @Find_and_display_solution diff --git a/doc/OnlineDocs/tests/scripting/iterative2.py b/doc/OnlineDocs/tests/scripting/iterative2.py index 6758de13221..e559a2c8400 100644 --- a/doc/OnlineDocs/tests/scripting/iterative2.py +++ b/doc/OnlineDocs/tests/scripting/iterative2.py @@ -13,8 +13,12 @@ model = pyo.AbstractModel() model.n = pyo.Param(default=4) model.x = pyo.Var(pyo.RangeSet(model.n), within=pyo.Binary) + + def o_rule(model): return pyo.summation(model.x) + + model.o = pyo.Objective(rule=o_rule) model.c = pyo.ConstraintList() diff --git a/doc/OnlineDocs/tests/scripting/noiteration1.py b/doc/OnlineDocs/tests/scripting/noiteration1.py index 47e2b5769e4..be9fb529855 100644 --- a/doc/OnlineDocs/tests/scripting/noiteration1.py +++ b/doc/OnlineDocs/tests/scripting/noiteration1.py @@ -13,8 +13,12 @@ model = pyo.ConcreteModel() model.n = pyo.Param(default=4) model.x = pyo.Var(pyo.RangeSet(model.n), within=pyo.Binary) + + def o_rule(model): return pyo.summation(model.x) + + model.o = pyo.Objective(rule=o_rule) model.c = pyo.ConstraintList() @@ -23,5 +27,4 @@ def o_rule(model): if pyo.value(model.x[2]) == 0: print("The second index has a zero") else: - print("x[2]=",pyo.value(model.x[2])) - + print("x[2]=", pyo.value(model.x[2])) diff --git a/doc/OnlineDocs/tests/scripting/parallel.py b/doc/OnlineDocs/tests/scripting/parallel.py index 85ecda6824b..cf9b55d9605 100644 --- a/doc/OnlineDocs/tests/scripting/parallel.py +++ b/doc/OnlineDocs/tests/scripting/parallel.py @@ -5,7 +5,9 @@ rank = MPI.COMM_WORLD.Get_rank() size = MPI.COMM_WORLD.Get_size() -assert size == 2, 'This example only works with 2 processes; please us mpirun -np 2 python -m mpi4py parallel.py' +assert ( + size == 2 +), 'This example only works with 2 processes; please us mpirun -np 2 python -m mpi4py parallel.py' # Create a solver opt = pyo.SolverFactory('cplex_direct') diff --git a/doc/OnlineDocs/tests/scripting/spy4Constraints.py b/doc/OnlineDocs/tests/scripting/spy4Constraints.py index dfcca5d8375..ac42b4d38b3 100644 --- a/doc/OnlineDocs/tests/scripting/spy4Constraints.py +++ b/doc/OnlineDocs/tests/scripting/spy4Constraints.py @@ -3,33 +3,48 @@ Code snippets for Constraints.rst in testable form """ from pyomo.environ import * + model = ConcreteModel() # @Inequality_constraints_2expressions model.x = Var() + def aRule(model): - return model.x >= 2 + return model.x >= 2 + + model.Boundx = Constraint(rule=aRule) + def bRule(model): - return (2, model.x, None) + return (2, model.x, None) + + model.boundx = Constraint(rule=bRule) # @Inequality_constraints_2expressions model = ConcreteModel() -model.J = Set(initialize=['butter','scones']) +model.J = Set(initialize=['butter', 'scones']) model.x = Var(model.J) + + # @Constraint_example def teaOKrule(model): - return(model.x['butter'] + model.x['scones'] == 3) + return model.x['butter'] + model.x['scones'] == 3 + + model.TeaConst = Constraint(rule=teaOKrule) # @Constraint_example # @Passing_elements_crossproduct -model.A = RangeSet(1,10) +model.A = RangeSet(1, 10) model.a = Param(model.A, within=PositiveReals) model.ToBuy = Var(model.A) + + def bud_rule(model, i): - return model.a[i]*model.ToBuy[i] <= i + return model.a[i] * model.ToBuy[i] <= i + + aBudget = Constraint(model.A, rule=bud_rule) # @Passing_elements_crossproduct diff --git a/doc/OnlineDocs/tests/scripting/spy4Expressions.py b/doc/OnlineDocs/tests/scripting/spy4Expressions.py index 5a1ab565d59..d4a5cad321a 100644 --- a/doc/OnlineDocs/tests/scripting/spy4Expressions.py +++ b/doc/OnlineDocs/tests/scripting/spy4Expressions.py @@ -3,6 +3,7 @@ Code snippets for Expressions.rst in testable form """ from pyomo.environ import * + model = ConcreteModel() # @Buildup_expression_switch @@ -13,11 +14,14 @@ model.d = Param() model.x = Var(model.A, domain=Boolean) + def pi_rule(model): accexpr = summation(model.c, model.x) if switch >= 2: accexpr = accexpr - model.d return accexpr >= 0.5 + + PieSlice = Constraint(rule=pi_rule) # @Buildup_expression_switch @@ -27,64 +31,76 @@ def pi_rule(model): model.d = Param() model.x = Var(model.A, domain=Boolean) + def pi_rule(model): accexpr = summation(model.c, model.x) if model.d >= 2: # NOT in an abstract model!! accexpr = accexpr - model.d return accexpr >= 0.5 + + PieSlice = Constraint(rule=pi_rule) # @Abstract_wrong_usage # @Declare_piecewise_constraints -#model.pwconst = Piecewise(indexes, yvar, xvar, **Keywords) -#model.pwconst = Piecewise(yvar,xvar,**Keywords) +# model.pwconst = Piecewise(indexes, yvar, xvar, **Keywords) +# model.pwconst = Piecewise(yvar,xvar,**Keywords) # @Declare_piecewise_constraints + # @f_rule_Function_examples # A function that changes with index -def f(model,j,x): - if (j == 2): - return x**2 + 1.0 - else: - return x**2 + 5.0 +def f(model, j, x): + if j == 2: + return x**2 + 1.0 + else: + return x**2 + 5.0 + # A nonlinear function -f = lambda model,x : exp(x) + value(model.p) +f = lambda model, x: exp(x) + value(model.p) # A step function -f = [0,0,1,1,2,2] +f = [0, 0, 1, 1, 2, 2] # @f_rule_Function_examples # @Keyword_assignment_example -kwds = {'pw_constr_type':'EQ','pw_repn':'SOS2','sense':maximize,'force_pw':True} +kwds = {'pw_constr_type': 'EQ', 'pw_repn': 'SOS2', 'sense': maximize, 'force_pw': True} # @Keyword_assignment_example # @Expression_objects_illustration model = ConcreteModel() model.x = Var(initialize=1.0) -def _e(m,i): - return m.x*i -model.e = Expression([1,2,3], rule=_e) + + +def _e(m, i): + return m.x * i + + +model.e = Expression([1, 2, 3], rule=_e) instance = model.create_instance() -print (value(instance.e[1])) # -> 1.0 -print (instance.e[1]()) # -> 1.0 -print (instance.e[1].value) # -> a pyomo expression object +print(value(instance.e[1])) # -> 1.0 +print(instance.e[1]()) # -> 1.0 +print(instance.e[1].value) # -> a pyomo expression object # Change the underlying expression instance.e[1].value = instance.x**2 -#... solve -#... load results +# ... solve +# ... load results # print the value of the expression given the loaded optimal solution -print (value(instance.e[1])) +print(value(instance.e[1])) # @Expression_objects_illustration + # @Define_python_function def f(x, p): return x + p + + # @Define_python_function # @Generate_new_expression diff --git a/doc/OnlineDocs/tests/scripting/spy4PyomoCommand.py b/doc/OnlineDocs/tests/scripting/spy4PyomoCommand.py index fff7b1d777b..c03ee1e5039 100644 --- a/doc/OnlineDocs/tests/scripting/spy4PyomoCommand.py +++ b/doc/OnlineDocs/tests/scripting/spy4PyomoCommand.py @@ -3,6 +3,7 @@ Code snippets for PyomoCommand.rst in testable form """ from pyomo.environ import * + model = ConcreteModel() model.I = RangeSet(3) model.J = RangeSet(3) @@ -10,11 +11,13 @@ model.x = Var(model.J) model.b = Param(model.I, default=1.0) + # @Troubleshooting_printed_command def ax_constraint_rule(model, i): - # return the expression for the constraint for i - print ("ax_constraint_rule was called for i=",str(i)) - return sum(model.a[i,j] * model.x[j] for j in model.J) >= model.b[i] + # return the expression for the constraint for i + print("ax_constraint_rule was called for i=", str(i)) + return sum(model.a[i, j] * model.x[j] for j in model.J) >= model.b[i] + # the next line creates one constraint for each member of the set model.I model.AxbConstraint = Constraint(model.I, rule=ax_constraint_rule) diff --git a/doc/OnlineDocs/tests/scripting/spy4Variables.py b/doc/OnlineDocs/tests/scripting/spy4Variables.py index 4a302d87ea8..802226247c5 100644 --- a/doc/OnlineDocs/tests/scripting/spy4Variables.py +++ b/doc/OnlineDocs/tests/scripting/spy4Variables.py @@ -3,9 +3,10 @@ Code snippets for Variables.rst in testable form """ from pyomo.environ import * + model = ConcreteModel() # @Declare_singleton_variable -model.LumberJack = Var(within=NonNegativeReals, bounds=(0,6), initialize=1.5) +model.LumberJack = Var(within=NonNegativeReals, bounds=(0, 6), initialize=1.5) # @Declare_singleton_variable # @Assign_value @@ -14,9 +15,13 @@ # @Declare_bounds model.A = Set(initialize=['Scones', 'Tea']) -lb = {'Scones':2, 'Tea':4} -ub = {'Scones':5, 'Tea':7} +lb = {'Scones': 2, 'Tea': 4} +ub = {'Scones': 5, 'Tea': 7} + + def fb(model, i): - return (lb[i], ub[i]) + return (lb[i], ub[i]) + + model.PriceToCharge = Var(model.A, domain=PositiveIntegers, bounds=fb) # @Declare_bounds diff --git a/doc/OnlineDocs/tests/scripting/spy4scripts.py b/doc/OnlineDocs/tests/scripting/spy4scripts.py index 40c69a6364c..48ba923d09c 100644 --- a/doc/OnlineDocs/tests/scripting/spy4scripts.py +++ b/doc/OnlineDocs/tests/scripting/spy4scripts.py @@ -9,7 +9,7 @@ import pyomo.environ as pyo instance = pyo.ConcreteModel() -instance.I = pyo.Set(initialize=[1,2,3]) +instance.I = pyo.Set(initialize=[1, 2, 3]) instance.sigma = pyo.Param(mutable=True, initialize=2.3) instance.Theta = pyo.Param(instance.I, mutable=True) for i in instance.I: @@ -28,7 +28,7 @@ instance.ParamName.value = NewVal # @Assign_value_to_unindexed_parametername_2 -instance.x = pyo.Var([1,2,3], initialize=0) +instance.x = pyo.Var([1, 2, 3], initialize=0) instance.y = pyo.Var() # @Set_upper&lower_bound @@ -45,43 +45,51 @@ instance.y.fixed = True # @Equivalent_form_of_instance.x.fix(2) -model=ConcreteModel() -model.obj1 = pyo.Objective(expr = 0) -model.obj2 = pyo.Objective(expr = 0) +model = ConcreteModel() +model.obj1 = pyo.Objective(expr=0) +model.obj2 = pyo.Objective(expr=0) # @Pass_multiple_objectives_to_solver model.obj1.deactivate() model.obj2.activate() # @Pass_multiple_objectives_to_solver + # @Listing_arguments def pyomo_preprocess(options=None): - if options == None: - print ("No command line options were given.") - else: - print ("Command line arguments were: %s" % options) + if options == None: + print("No command line options were given.") + else: + print("Command line arguments were: %s" % options) + + # @Listing_arguments # @Provide_dictionary_for_arbitrary_keywords def pyomo_preprocess(**kwds): - options = kwds.get('options',None) - if options == None: - print ("No command line options were given.") - else: - print ("Command line arguments were: %s" % options) + options = kwds.get('options', None) + if options == None: + print("No command line options were given.") + else: + print("Command line arguments were: %s" % options) + + # @Provide_dictionary_for_arbitrary_keywords + # @Pyomo_preprocess_argument def pyomo_preprocess(options=None): pass + + # @Pyomo_preprocess_argument # @Display_all_variables&values for v in instance.component_objects(pyo.Var, active=True): - print("Variable",v) + print("Variable", v) for index in v: - print (" ",index, pyo.value(v[index])) + print(" ", index, pyo.value(v[index])) # @Display_all_variables&values # @Display_all_variables&values_data @@ -90,41 +98,47 @@ def pyomo_preprocess(options=None): # @Display_all_variables&values_data -instance.iVar = pyo.Var([1,2,3], initialize=1, domain=pyo.Boolean) +instance.iVar = pyo.Var([1, 2, 3], initialize=1, domain=pyo.Boolean) instance.sVar = pyo.Var(initialize=1, domain=pyo.Boolean) # dlw may 2018: the next snippet does not trigger any fixing ("active?") # @Fix_all_integers&values for var in instance.component_data_objects(pyo.Var, active=True): if var.domain is pyo.IntegerSet or var.domain is pyo.BooleanSet: - print ("fixing "+str(v)) - var.fixed = True # fix the current value + print("fixing " + str(v)) + var.fixed = True # fix the current value # @Fix_all_integers&values + # @Include_definition_in_modelfile def pyomo_print_results(options, instance, results): for v in instance.component_objects(pyo.Var, active=True): - print ("Variable "+str(v)) + print("Variable " + str(v)) varobject = getattr(instance, v) for index in varobject: - print (" ",index, varobject[index].value) + print(" ", index, varobject[index].value) + + # @Include_definition_in_modelfile # @Print_parameter_name&value for parmobject in instance.component_objects(pyo.Param, active=True): - print ("Parameter "+str(parmobject.name)) + print("Parameter " + str(parmobject.name)) for index in parmobject: - print (" ",index, parmobject[index].value) + print(" ", index, parmobject[index].value) # @Print_parameter_name&value + # @Include_definition_output_constraints&duals def pyomo_print_results(options, instance, results): # display all duals - print ("Duals") + print("Duals") for c in instance.component_objects(pyo.Constraint, active=True): - print (" Constraint",c) + print(" Constraint", c) cobject = getattr(instance, c) for index in cobject: - print (" ", index, instance.dual[cobject[index]]) + print(" ", index, instance.dual[cobject[index]]) + + # @Include_definition_output_constraints&duals """ @@ -171,7 +185,7 @@ def pyomo_print_results(options, instance, results): # @Add_option_to_solver # @Add_multiple_options_to_solver -results = optimizer.solve(instance, options="threads=4", tee=True) +results = optimizer.solve(instance, options={'threads' : 4}, tee=True) # @Add_multiple_options_to_solver # @Set_path_to_solver_executable diff --git a/doc/OnlineDocs/tests/strip_examples.py b/doc/OnlineDocs/tests/strip_examples.py index aaadfc4632c..045af6b87cc 100644 --- a/doc/OnlineDocs/tests/strip_examples.py +++ b/doc/OnlineDocs/tests/strip_examples.py @@ -10,7 +10,7 @@ # # @block # print("END HERE") # -# If this file was foo.py, then a file foo_block.spy is created, which +# If this file was foo.py, then a file foo_block.spy is created, which # contains the lines between the lines starting with "# @". # # Additionally, the file foo.spy is created, which strips all lines @@ -24,13 +24,14 @@ import os import os.path + def f(root, file): if not file.endswith('.py'): return prefix = os.path.splitext(file)[0] - #print([root, file, prefix]) - OUTPUT = open(root+'/'+prefix+'.spy','w') - INPUT = open(root+'/'+file,'r') + # print([root, file, prefix]) + OUTPUT = open(root + '/' + prefix + '.spy', 'w') + INPUT = open(root + '/' + file, 'r') flag = False block_name = None for line in INPUT: @@ -39,10 +40,13 @@ def f(root, file): if flag is False: block_name = tmp[3:] flag = True - OUTPUT_ = open(root+'/'+prefix+'_%s.spy' % block_name,'w') + OUTPUT_ = open(root + '/' + prefix + '_%s.spy' % block_name, 'w') else: if block_name != tmp[3:]: - print("ERROR parsing file '%s': Started block '%s' but ended with '%s'" % (root+'/'+file, block_name, tmp[3:])) + print( + "ERROR parsing file '%s': Started block '%s' but ended with '%s'" + % (root + '/' + file, block_name, tmp[3:]) + ) sys.exit(1) flag = False block_name is None diff --git a/doc/OnlineDocs/tests/test_examples.py b/doc/OnlineDocs/tests/test_examples.py index f41db8644ac..0ee6a249c38 100644 --- a/doc/OnlineDocs/tests/test_examples.py +++ b/doc/OnlineDocs/tests/test_examples.py @@ -8,16 +8,17 @@ try: import yaml - yaml_available=True + + yaml_available = True except: - yaml_available=False + yaml_available = False # Find all *.txt files, and use them to define baseline tests currdir = os.path.dirname(os.path.abspath(__file__)) datadir = currdir -testdirs = [currdir, ] +testdirs = [currdir] -solver_dependencies = { +solver_dependencies = { 'Test_nonlinear_ch': { 'test_rosen_pyomo_rosen': 'ipopt', 'test_react_design_run_pyomo_reactor_table': 'ipopt', @@ -28,18 +29,14 @@ 'test_disease_est_run_disease_callback': 'ipopt', 'test_deer_run_deer': 'ipopt', }, - 'Test_mpec_ch': { - 'test_mpec_ch_path1': 'path', - }, - 'Test_dae_ch': { - 'test_run_path_constraint_tester': 'ipopt', - }, + 'Test_mpec_ch': {'test_mpec_ch_path1': 'path'}, + 'Test_dae_ch': {'test_run_path_constraint_tester': 'ipopt'}, } -package_dependencies = { +package_dependencies = { 'Test_data': { - 'test_data_ABCD9': ['pyodbc',], - 'test_data_ABCD8': ['pyodbc',], - 'test_data_ABCD7': ['win32com',], + 'test_data_ABCD9': ['pyodbc'], + 'test_data_ABCD8': ['pyodbc'], + 'test_data_ABCD7': ['win32com'], }, 'Test_dataportal': { 'test_dataportal_dataportal_tab': ['xlrd'], @@ -52,10 +49,13 @@ only_book_tests = set(['Test_nonlinear_ch', 'Test_scripts_ch']) + def _check_available(name): from pyomo.opt.base.solvers import check_available_solvers + return bool(check_available_solvers(name)) + def check_skip(tfname_, name): # # Skip if YAML isn't installed @@ -85,12 +85,15 @@ def check_skip(tfname_, name): # Return a boolean if the test should be skipped # if tfname_ in solver_dependencies: - if name in solver_dependencies[tfname_] and \ - not solver_available[solver_dependencies[tfname_][name]]: + if ( + name in solver_dependencies[tfname_] + and not solver_available[solver_dependencies[tfname_][name]] + ): # Skip the test because a solver is not available - # print('Skipping %s because of missing solver' %(name)) + # print('Skipping %s because of missing solver' %(name)) return 'Solver "%s" is not available' % ( - solver_dependencies[tfname_][name], ) + solver_dependencies[tfname_][name], + ) if tfname_ in package_dependencies: if name in package_dependencies[tfname_]: packages_ = package_dependencies[tfname_][name] @@ -104,7 +107,8 @@ def check_skip(tfname_, name): return "Package%s %s %s not available" % ( 's' if len(_missing) > 1 else '', ", ".join(_missing), - 'are' if len(_missing) > 1 else 'is',) + 'are' if len(_missing) > 1 else 'is', + ) return False @@ -112,135 +116,154 @@ def filter(line): # Ignore certain text when comparing output with baseline # Ipopt 3.12.4 puts BACKSPACE (chr(8) / ^H) into the output. - line = line.strip(" \n\t"+chr(8)) + line = line.strip(" \n\t" + chr(8)) if not line: return True - for field in ( '[', - 'password:', - 'http:', - 'Job ', - 'Importing module', - 'Function', - 'File',): + for field in ( + '[', + 'password:', + 'http:', + 'Job ', + 'Importing module', + 'Function', + 'File', + ): if line.startswith(field): return True - for field in ( 'Total CPU', - 'Ipopt', - 'Status: optimal', - 'Status: feasible', - 'time:', - 'Time:', - 'with format cpxlp', - 'usermodel = >> for var in instance.component_data_objects(pyo.Var, active=True): @@ -479,7 +479,7 @@ blocks) is as follows (this particular snippet assumes that instead of .. literalinclude:: tests/scripting/block_iter_example_compprintloop.spy :language: python -.. _ParmAccess: +.. _ParamAccess: Accessing Parameter Values -------------------------- diff --git a/doc/logos/doe/PyomoDoE-lg.png b/doc/logos/doe/PyomoDoE-lg.png new file mode 100644 index 00000000000..88bc36ce617 Binary files /dev/null and b/doc/logos/doe/PyomoDoE-lg.png differ diff --git a/doc/logos/doe/PyomoDoE-md.png b/doc/logos/doe/PyomoDoE-md.png new file mode 100644 index 00000000000..2612d31653a Binary files /dev/null and b/doc/logos/doe/PyomoDoE-md.png differ diff --git a/doc/logos/doe/PyomoDoE-sm.png b/doc/logos/doe/PyomoDoE-sm.png new file mode 100644 index 00000000000..9649ef80dda Binary files /dev/null and b/doc/logos/doe/PyomoDoE-sm.png differ diff --git a/doc/logos/doe/PyomoDoE.ai b/doc/logos/doe/PyomoDoE.ai new file mode 100644 index 00000000000..ec3708ccfdb --- /dev/null +++ b/doc/logos/doe/PyomoDoE.ai @@ -0,0 +1,1608 @@ +%PDF-1.6 %âãÏÓ +1 0 obj <>/OCGs[22 0 R]>>/Pages 3 0 R/Type/Catalog>> endobj 2 0 obj <>stream + + + + + application/pdf + + + PyomoDOE + + + 2022-09-15T15:11:38-04:00 + 2022-09-15T15:11:38-04:00 + 2022-09-15T15:11:38-04:00 + Adobe Illustrator 26.5 (Macintosh) + + + + 256 + 116 + JPEG + /9j/4AAQSkZJRgABAgEASABIAAD/7QAsUGhvdG9zaG9wIDMuMAA4QklNA+0AAAAAABAASAAAAAEA AQBIAAAAAQAB/+4ADkFkb2JlAGTAAAAAAf/bAIQABgQEBAUEBgUFBgkGBQYJCwgGBggLDAoKCwoK DBAMDAwMDAwQDA4PEA8ODBMTFBQTExwbGxscHx8fHx8fHx8fHwEHBwcNDA0YEBAYGhURFRofHx8f Hx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8fHx8f/8AAEQgAdAEAAwER AAIRAQMRAf/EAaIAAAAHAQEBAQEAAAAAAAAAAAQFAwIGAQAHCAkKCwEAAgIDAQEBAQEAAAAAAAAA AQACAwQFBgcICQoLEAACAQMDAgQCBgcDBAIGAnMBAgMRBAAFIRIxQVEGE2EicYEUMpGhBxWxQiPB UtHhMxZi8CRygvElQzRTkqKyY3PCNUQnk6OzNhdUZHTD0uIIJoMJChgZhJRFRqS0VtNVKBry4/PE 1OT0ZXWFlaW1xdXl9WZ2hpamtsbW5vY3R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo+Ck5SVlpeYmZ qbnJ2en5KjpKWmp6ipqqusra6voRAAICAQIDBQUEBQYECAMDbQEAAhEDBCESMUEFURNhIgZxgZEy obHwFMHR4SNCFVJicvEzJDRDghaSUyWiY7LCB3PSNeJEgxdUkwgJChgZJjZFGidkdFU38qOzwygp 0+PzhJSktMTU5PRldYWVpbXF1eX1RlZmdoaWprbG1ub2R1dnd4eXp7fH1+f3OEhYaHiImKi4yNjo +DlJWWl5iZmpucnZ6fkqOkpaanqKmqq6ytrq+v/aAAwDAQACEQMRAD8A9Na5otprOmy2N0PhfdJB 9pHH2WHyynPhjkiYlu0+eWKYkHiGqWGpaPfy2FwWjkiOxUkKwPR19jnLZccscjEvXYckMsRIdVJN W1RDVLydT4iRx/HIjLMdT82RwwPQfJWTzF5gQUTU7tR4CeQf8bZIajIP4pfMsTpsR/hj8grr5v8A M6mo1O4r7uSPuOTGsy/ziwOiw/zQrJ5682KKDUpKe4Q/rU5Ia7N/OYHs/B/NZB5W89eYJr1n1G5+ s2kYAeP041PxdwUVTUUyyPauSEgZG49eThavs/EI+kVL4vTYZopolliYPG4qrDoQc6SExIAjcF5+ USDRX5JDsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirG/O3lRNd 0/nCAuo24Jt36ch1MbHwPbwOYWt0nix2+ocnP0Gs8GW/0nn+t4vJHJHI0cilJEJV0YUIINCCDnME UaL1QIIsLcCXYq7FWQeW46QTSfzMF/4EV/42zHz9HD1R3AZv5X136nKLS4b/AEWQ/Cx/YY/wObLs rX+EeCX0H7C6jV6fiHEObN86t1DsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdirsVdi rsVdirsVefecV/MTQueoaJftf6WvxSWssUcksI+fEO6e9ajv45gZ/GhvE3F22k/L5fTOPDLv3osS tfzw8zx0FxaWk6+IWRG+8OR+GYse0Z9QHYS7FxHkZBKdc84Wev6gt19QFhdSCkzLJzSRh0NCq8TT bqa5g6uYyHiAo9XM0umlijw8XEOiFzBch2KuxVk2gRlNPDf78dmH/Ef+Ncxs3NwNQfUmWUtDMPKm veqq6fct+9UfuHP7QH7J9x2zpuye0OIeHM79P1Or1mnr1Dl1ZNm+de7FXYq7FXYq7FXYq7FXYq7F XYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXnfnv8qLTVfU1HRAttqRq0lv9mKY96fyOfHoe/jmv 1OiEt47F3Gi7UMPTPeP2h4teWd3ZXUlrdxNBcwnjJE4owOaiUSDRekhMSFg2Ez0689ZODn96v4jx zFnCkEIzK0OxVlukJw06AeK8v+CJP8cxMn1F12Y3IovK2ptHZHDoSrKaqw2IIwgkGwpFs/8AL2tr qVtxkIF3EP3q/wAw/nH8c7Hs7XDPGj9Y5/rdLqdP4Z2+kptmycV2KuxV2KuxV2KpV5p8x2Plry9f a7fJLJaafEZpo4ArSFQQPhDMi137sMVeS/8AQ3f5bf8AVt1n/kRa/wDZTimkXpv/ADlb+V15crDO mo6ejED17m3jMYr3PoSzPt/q4rSe+e/z48peS7mxj1Gy1C7ttTtxdWGoWKW8ttNGf5HeeMkioJ26 EeOKGdaLrGn61pFnq2nSiaxvoknt5B3VxUVHYjoR2OKsC8+fn95I8l+Yf0Bfw319qIjSSVLCOGQR mTdI39SWI8ytGoB0IxVNvOH5u+TvJ+n2Vz5hkms7y/iWaHSeAkvACBUOiMyLxOxJelQaE4q8+l/5 y98giSkek6q8f87Jbqff4fWb9eKaZj5D/Pn8vvOmoppmnTz2mpygmCyvYxG8nEFm4MjSRkgCtOVc UIfz9/zkB5N8keYX0LVrPUZ7tIo5jJaxQPHxkFRvJNE1f9jirI/y7/Mfy/5+0WXVtFWeKKCdreaC 6VEmV1VWqVR5BxYNsa4qnHmLXtP8v6Ffa3qLFbLT4Xnm40LEIK8UBKgsx+FRXriryvSf+cqfy91P VLPTbfT9WWe+njtomkhtggeZwiliLhjSrb0GKaZ757/Mryh5HtIrjzBeGF7jl9VtYlMk0vCnLig7 CvViB74oeZz/APOXnkFXpBpOqyJ/MyW6b+wEzYpplfkf/nIH8u/OGpxaVZTXFjqU5It7a/jWMyEV PFHjeWPltsC1T2xQ9JxV2Kse83+R9H8zW3G6X0r2MUgvUA5r7N/MvsfopmPn08cg35uZpNbPCduX c8I8x+Vtb8sagIr2Oikk290m8cgH8p/Wp3zR59PKBqT1Om1UM0bj8lW1uUuIg42PRl8DmBKNFuIV cihmdqnC2hT+VFH3DMKXMurkbJVcixdiqM0tr6O8ilswfWDAL4Gu3E/PM/QYcxmJYxy69HH1E4CN SZxrWvWWi2KXeoBlR2EdIxz+MqWp2/lO+d3p8E8poc3nNRqI4hxS5MG1H88/LtvO1vCsYnGxjuLi KJxUVFUBc5sYdlH+KQcCXaZIuEJEfj3pBqv546lCsdxbmzMSyIXhRuTNGDVxyLNSqilaZXrtHHDi uAlOdj8bOb2NknqdQIZSMeOibO3Tbc+dfC3pFp+YnkW5tYbhdf0+NZkVxHLdQJIvIVo6l6qw7jNe McyLo/JypgRkRY2ZBHJHLGskbB43AZHUgqVIqCCOoOQQuxVhH53f+Sn8z/8AMG3/ABJcVeT/APOL HlHyprXk/VrjWdFsNTnj1Axxy3ltDO6p6EZ4q0isQKmtMUlr/nKPyB5M0Tyfp2raNpFrpl82pJbO 1pGsCtFJBM7BkQKpo0S022xUI/QfIZ/ML/nGfRbUgPrVkl1LpEx6iS3upo1iqf2ZI14eHQ9sVYd+ R/52R+S/LWv6Dr1f9xscl3o1vISrG45cHtPEcpGD9NvjOKt/849+Sb/zz56vPPnmGtzbWFybjm4+ GfUHPNQB/LDUPQdPhHTFUN+bEMfmj/nJW30S8Pq2Au9N09kqy/uGWOSZARuPilfpir6Rh/Kr8s4o FhXyppJRRxBeygd6e7shYn3JxQ+Y/wA2NF0ryJ+eemS6DAtjaJJYahHbxV4I3q0cKvZWMZPHpv8A RilX/wCch7W3u/z0trS5qbe4SwimC/a4O3Fqe9DiqN/5xu1288pfmhqfkvUz6Yv2ltHQn4VvbIsV I7fEodff4cVLNv8AnLXzgbHyvp/le3kpPq8vr3ag/wDHtbEFVYf5cpUj/VOKh4Tp/lyfQfPfku2u AVubptMvpVIoR9auBIgI8RGVBr3xVnP/ADkG36d/PbT9EuWYWqDT7Dbslw4dyN+v7/FX0da/lR+W VtAkEflXSmRBQGWzgmf6XkV3P0nFD5j/AOcgvLuleRvzQ0u78u2iWEBtrfUY4YfgRbiKeQHgKnj/ AHSnbFL7GxQ7FXYqhdU0rT9VspLLUIFuLaUfFGw79iD1BHYjIzgJCjybMWWUJcUTReK+b/y51Lyz M+oafzvNH6yGlZIl/wCLAOoH8w+mmaPV6ExFjeL02i7Sjl9Mtp/ek1vxnMfA1EhABHuaZqTs58jQ ZvmvdUqRW8sv2Rt4npmXptDlzfSNu/o05c8IcyjIrGNd3+M+HbOh0vY2OG8/Ufsdbl10pbR2DI/L VvbmZpWZfVTaOLv7tTNwAAKDh3bH/wA6J7tdCtoLLgbySR3gEponJE4gtQE0HPNr2VE8UiO51Pa0 o1ES5W8R0vyDo8ETyaiv6Q1Cdi9xdSFgSzbniAdhm7hjA97qcuumT6fTEcgzS4/I7yLe6LDcgXdp JJAjv6E1QWdRX+9WXuc1s9TMTMel07bHllwCXWr+xLLX8nPy4s7y2tLiGa5uLosLdZ5nHIoKt/de mOhyE8wiQCdzyTCefJCU4j0wq/K9g+jookiiSKMUjjUKg8AooM0hLtV2KsI/O7/yU/mf/mDb/iS4 q+Zfyg8v/nRqeiXsvkPUvqWnJc8LqP11irP6amtGVq/ARillGrfkT+f3m24gi8z6xDNbwmqNdXbS Rx12YpFGjDkR7D54q+j/ACX5XtvKvlXTPL9tIZotOhERmI4l3JLSPxFacnYmmKHyj/zlH5X03Rfz GW7sV9P9NWwvrqIAcROZHjdl/wBfgGP+VU98Uh9XeS/Kem+U/LNhoGnL/o9lGFaQijSSH4pJW/yn ck4ofMHm4fUv+croGuDwRtX01g3akscHE1/2W+KX1zih8jf85GgX354afZwGswhsLdgNyHklZgKD fpIMUhr8/v8Ayflj/wBu7/iYxVGf85K6Dd+VPzL0rztpg9P6+0dyHHRb2yZa1p2ZAh9zyxUJPqt2 v5yfnvaR2wY6M7QxANUFLG1T1J6ntybnT3YYqmn50Iif85GaIiKFRZdJVVUUAAlUAADFVP8AN6f6 h/zkxp17NRYorzSJ6saDhGYqknsPgOKvrbFD5K/5y1m+s/mNpVlCOcsemRAgHfnLcTUWnyofpxSH 1rih2KuxV2KuIBBBFQdiDirzjzN5D0q21GO+0qVLYtIr3Gn/ALHWpeOgPD3U7eFOh1Os7M494bF2 +n7VIiYz3259V0VjEm7fG3v0+7BpuxsWPeXqP2fJxMutnLYbBdeXAtbKe548xBG8nAbV4KTT8M3M I7gBwiUi8jedrTzdp099a2728cExgKyEMSQoaop/rZfqMHhmrtjGVs58v2kE9yzyGrQ0ZE8ff6Mx 2TG/zcmBu9Ohrukcj0/12Uf8aZu+yBtI+50PbMvVEe9gGbh0qPi1D80bKB0i0ee/0uVVNn+4eTim 3AqYvip02P4Zxms1mWOonUbje3w93zfTezeyNDl0eMTycOTh9Rvv3r1dw2Ydrt75unvYr3Ubee1l tjygrC8IjIPKoqPbqc1Gp1WXJISltXJ63s3s3R6fGceKpCf1WQbfRH5f+b4fNHlyG+qFvY/3N9EP 2ZlG5A/lf7Q+7tmwwZeON9Xju09CdNmMf4eY937GSZc69jP5maBqHmHyHrei6cFa+vrYxW6u3BSx IO7Hp0xViX/OPn5eeZPI/lnUdP15Ikubm9+sRCGQSDh6SJuR7qcVep4q7FXg3/OQf5O+dPPHmbTt Q0GKB7a2svq8pmmEZ5+q77AjpRhil7zih4r+d/5BXnnbV4vMWgXsVpq6RLDcQXHJY5RGSUdZEDsr gHj0odundSw2L8uv+cqIIDbR+YZDENgTqBZqDpR2Bf8AHFU0/Lj/AJxx802/nK182eeNUiu7i1mW 7WBJJLmaW4j3jaaaQL9hgG25VpTFbRP5r/kx538y/mra+ZdLit20uL6nzaSYI/7hgX+EjFXov50f l/L558i3Ok2gT9KQyJdaa0h4qJozQgt25Rsy4oYb/wA49/kvrXke81XVvMSQjUriNLWyWFxIFhrz lJIH7bKn3e+KUF+ZP5O+dPMH5xad5q06KBtJtpLB5WeYLJS2kDSUQjwG2Kpx+ef5E3Pn27tNZ0a8 hs9ZtYvq8iXHIRTRKxdPjQMyspZv2TWvamKsEg/Lf/nKa0hNrB5hkaEDgpOolzxGw4tIC4/DFUb5 K/5xt85T+brTzL591iO7a2mjuZIBLLd3E7xEFEllkCgKCBWhbbbbritvo/FDsVdirsVQ2prKbCb0 mKOF5AqaH4dz+GKsMJJNTuT1OFDsVRulWFtfXD21yvOF42DLUitaDt88bpUD+WP5daf5d8sNZS2j 288l7ezOjSFyUNy6W7VDEb2yR/x3rl2oymcr8vx9qgUjNKuvq19HITRCeD/6rf065SrFvzSlD+ZU Qf7qto1PzLO3/G2dD2UKxH3/AKnm+1zeX/N/WxawtHvL63tI/t3EiRL83YD+ObDJPhiSejrscOKQ iOpfQMUSRRJFGOMcahUHgFFBnHE2bL2oFCguwJWrFGjMyoFZ92IABPzxpJJL5I/6G7/Mn/q26N/y Iuv+ynFad/0N3+ZP/Vt0b/kRdf8AZTitO/6G7/Mn/q26N/yIuv8AspxWnf8AQ3f5k/8AVt0b/kRd f9lOK0m+h/8AOYOtJMi67oFtPCTSSSxkkhZR3ISUzBvlyHzxWn0H5J89eXPOmiJq+hXBltyeE0Tj jNDIBUxypU8W+mh7EjFCf4q7FXYq7FXYq8c/PP8AOvX/AMvdW0yz0yxtbuO+geaRrn1KqVfiAvB0 xS8y/wChvvO//Vm0z7rj/qritO/6G+87/wDVm0z7rj/qritO/wChvvO//Vm0z7rj/qritO/6G+87 /wDVm0z7rj/qritJh5d/5yt85ap5g0zTJtI05Ir67gtpHQT8lWaRUJWshFQGxWn1Bih2KuxV2KuI BBB3B2IxVg9zCYbiSI/sMV+44UKeKo3Sb2KzujLKGK8StFoTUkeJGKpz/iaw/wB9y/cv/NWBNsUu bq3tYTNO4jjXqT+oeJycIGRqIsteTJGAuRoMPv31DzNrkktnbySyyBFVBuQqIE5Mei1413OdLgAw YgJkPLZzLUZSYA7s68o/l3+jLqLUdRmD3cVWigj+wpIIqzH7R37fjms1naPGDGI2dtouzPDInI+r uZvmqdu7FXYq/NbFk9o/KL/nH2z8/wDlR9dm1qTT3W6ktvQSBZRSNUblyLp15+GKGbf9Cc6b/wBT RN/0iL/1VxW1O4/5w4tTH/o/mp1k7epZBlO3TadafPFbeG/mN+Xus+Q/Mj6JqbpMxjWe2uoq8JYX JAYBtweSkEeI+nFWef8AOLPmifS/zIXSC5+p67BJDJGT8PrQI00T/MBXUf62Kl9i4odirsVdirsV fLH/ADmF/wApJ5e/5g5f+TuKQ8P8t+XdV8x63a6LpMQm1C8YrbxsyoCVUufiYhRspxS9C/6Fl/OD /q1Q/wDSXb/814ot3/Qsv5wf9WqH/pLt/wDmvFbd/wBCy/nB/wBWqH/pLt/+a8VtNPKv/OOn5r6f 5n0e/utMhS2tL22nncXUDERxSq7Ggep2GKvsHFDsVdirsVdirz+fW7bUPMeuWkRHPTbiOBwO/K3j ev8AwbMv0ZbPGYiJ7x+liCqZWl2KoO+1KO2KxIjXF3J/c2sYLOx+Qrtl+DTyyHbYDmegcbUamOPz keQHMqun+QdU1WZbzzBN6EQ3SyiI5AeBO4X8T8szfzePCOHELP8AOLgjRZM54sxofzQznTtL0/Tb cW9jAkEQ6hRuT4sTux9zmuyZZTNyNu0xYo4xURQRWVtjsVdirsVfmtiyfYf/ADif/wCSvl/7adx/ ybixQXs2KHYq+af+cxtPQSeV9QWnqMLu3kPchTE6du3JsUh4v+U921p+ZvlaZdq6paRE1p8Msyxt 19nxV97ahqFlp1jPf306W1nbI0txPIQqIiipZicUPD/MX/OXPk+yneHRNKutWCEgTyMtpE/ulRLJ T/WQfLFNMbk/5zHvi5MflWJU7K16zH7xCv6sVpNNJ/5zE0eWVV1fy3cWsXR5bW4S4Pz4Olv/AMSx WnuHlTzb5f8ANejRaxoV2t3ZSkqWAKsjr9pJEajKw8D8+mKHzh/zmF/yknl7/mDl/wCTuKQ8+/ID /wAm/wCW/wDjNL/1DyYq+6MUOxV2KuxV2KuxV2KuxVZNPDBGZJpFjjHVmIA/HITnGIuRoK+Zvy08 2fX/AMy/MRZ6xa3JPcw77Fo5WZAPlE7fdm812GsMf6Lj45eovYM07esgt9R1S0E+jCO4gZmj+trJ GyKyMUcActyrAjKs85w5QMj8vv8A0fYwlxEen5qdv5S8w2kjSwxt60n95MJl5t8zyGaLVR1+U77R 6CJAA+F/abPmwx4IwNj6j16qrp5xt/iY3Zp1ozSD8C2YRjrYfz/tLcti81a/bNxkkD0/YlQV/AK2 CPauogaJv3hbTWz89ISFvLYr4vEa/wDCt/XM/D26P44/JbZBY6vp18P9GnV27odnH+xO+bfBq8eX 6Tf3pReZKuxV+a2LJ9h/84n/APkr5f8Atp3H/JuLFBezYodir59/5zC/5Rvy9/zGS/8AJrFIfOnk X/lN/L3/AG07P/qITFX1B/zllrs9j+XtppkLlDq16iTgGnKGBWlKn/noEOKh84flh+XWoef/ADQN Ds7hLQJC9zdXUilxHChVSQgK8iWkUAVGKvc4/wDnDrSBCRJ5muGm2o62qKvv8JkY/wDDYrbyv84v ySvvy5FjdDUV1PTb93jjm9IwyRyIA3F15SA1U7EN2OwxVmn/ADiDr80PmbWtBZz9XvLRbxEPQS28 ipt4Flm3+WKld/zmF/yknl7/AJg5f+TuKh8+4pdirsVdir6V/wCcNf8Apr/+3d/2NYoL6UxQ7FXE gCp2A6nFUluddnuJGttGh+tSg0e4O0Kf7Lv/AJ9c1mTXSmeHAOI9/wDCFY/5y0ySx8qazrWrXj3F xa2c8sUanigkCHgB82p0Aw6bsk5csTmkZG+XT8fJjLYPlHyfrA0bzPpupM3GO3nUzN4RN8EnT/IY 53uox8cDHycWJos189/mxqOvSto/l1ZIbGU+kZEB9e5LGnFQPiVW/l6nv4ZhaXQCHqnz+5nPJewe 2fkR5T8zeWvJz2uuhYWuZzdW1nWskKuigrJ2BPHlxHTvvsNfr80JzuLbjiQN3o+YLY7FVOe2t514 TxJKv8rqGH45CeOMxUgCqS33k3S7irW/K2kP8vxJ/wACf4HNXn7GxT+n0n7EUxjUfLuq6afVK84l NRPETt8+65o9R2flw78x3hUbpPnG8tysd7W4h6c/92D6f2vpzK0nbE4bT9Uft/atsxs761vIRNbS CSM9x1B8COxzpMOeGSPFE2Evzdy1k+w/+cT/APyV8v8A207j/k3FigvZsUOxV88/85iTquieW7c/ akubiQfJI0B/4mMUh89/l9E83n3y1ClOcmq2SLXpVrhAMVe8/wDOZMzCDylDQcXa/cnvVBbgf8Tx UJJ/zh/AG83a7PXePT1QD/XmU/8AGmKl9WYoeHf85eRofy70uUj411eJVPs1tcE/8RGKQ8l/5xZZ l/NeEAkBrK5DAHqKKaH6RipZJ/zmF/yknl7/AJg5f+TuKh5V+VHlvTPMv5haNoeqK7WF7JIk6xtw YhYXcUYdN1GKvp//AKFa/Kf/AJZ7z/pJb+mKLd/0K1+U/wDyz3n/AEkt/TFbd/0K1+U//LPef9JL f0xW2X+QPyt8p+Q/r/8Ah+OZP0j6X1n1pTLX0OfClRt/etirLsVWzTRQxNLKwSNBVmORnMRFnkqW yWtzqZ/0nlBYfs2w+F5PeQjoP8n78wpYpZ/q9OPu6n3/AKvmqYwwxQxrHEgjjXZUUUA+7M2EBEUB QV5t/wA5E6qLH8s7qCtG1G4gtVPfZ/XP/CwkZsOzoXlHk15Ts+XvLvlvWvMeqxaXo9s11dy/sr9l VHV3Y7Ko7k5v8mWMBcjs4wBL6n/K78mNF8mxJfXfC/8AMLD47sj4IajdYFPTwLn4j7DbOf1Wtll2 G0XJhjAejZhNjsVdirsVdirsVY/rXlG1uw01nS3uOpXpGx9wOh+WafW9kwyeqHpl9hRTE4LjU9Gv jTlDMu0kbfZYe47jOfhky6bJ3S7lfEWdwzfYf/OJ/wD5K+X/ALadx/ybixQXs2KHYq+SP+csPNth q3m7TdGsp0uE0WCT6y8bBlW4uGHOOo/aVIkr86dRikMH/I7TG1H82fLUAFfTuxcn2FqjT13/AOMe KvYP+cyLZ2tPKlyK8I5L6NttqyCBhv8A88zioY9/ziDeInnXWbMkBp9O9RQep9KdAaf8jMVL6wxQ 8H/5y+vETyPo1kSOc2piZV7kQ28qkj2/fDFIeY/84p2zzfmmZFrS20+4lagrsWjj38N5MVKf/wDO YX/KSeXv+YOX/k7ioeafkpqum6V+aOg6hqVzHZ2NvLIZrmZgkaAwSKCzHYbkDFX2J/yt78r/APqa dM/6SY/64od/yt78r/8AqadM/wCkmP8Arirv+Vvflf8A9TTpn/STH/XFVS2/NX8trq5itrfzLp0t xO6xwxJcRlmdzxVVAO5JNMVZViqk1uJJhJJ8Sx7xJ2B/mPv4eGVnHcrPTkqrliuxV5n+cXkbXfO9 3oWi2JFtpsLzXWpX8gqkdAqRqq1BdyGeg+8jM7R544hKR59GvJEnZlvkzyN5e8n6WNP0e34cqG5u noZpmH7Uj0367DoO2Y+bPLIbkyjEBP8AKWTsVdirsVdirsVdirsVS/WNFtdTg4SjjMo/dTDqp/iP bMPWaKGeNHn0KvzmzMZJlp/mbzJptubfTtVvLK3LFzDb3EsScjsW4oyiu2Kor/HXnf8A6mHU/wDp MuP+a8UKN15u813cZiutav7iM1BSW5mdd9jszHFKU4q+m/8AnFv8rdW0+7uPOWtWr2okhNvpEMyl ZGWQgyT8Tuo4rxSvUE9qHFBZ3/zkd5Hv/NX5esdMga41LSZ1vYYIxWSSMKySog7ni/OnU8aDFAfH Gi65rOganFqWkXcthqFuSI54jxYVFGB8QRsQcWTPo/8AnJL840hMf6dVz0WRrS0LD/klQ/SDiimH ea/O/mvzbdx3fmLUpdQmhBEPPiqRhqFuEaBEWtBXiuKvoL/nEnyTqFnBqvmy9gaGK9jSz00uCpkj DepLIK/sFggU96HFSkf/ADmF/wApJ5e/5g5f+TuKh8+4pdirsVdiqeeRf+U38vf9tOz/AOohMUP0 NxQ7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq7FXYq+ff+sQP+XP/uYYpd/1iB/y5/8AcwxV3/WIH/Ln /wBzDFUZZ/8AQpPpH0v0Xx5b+t9Z5VoP9+/FT8MVZf5O/wCVBfW4v8MfoD9If7p9P6v9a6/sep+9 +7FD0fFXYq8w/MD/AKF8+vTf4v8A0P8ApOv+kdPrdf8Aiz6v++r/AK2KvO2/6Ey5GtK13p+nKfRT bFLLvKn/AEK59aT9D/oX6zyHp/XeXLltTh9e7+FO+KvZF48Rwpxp8NOlO1MUMM8+/wDKpfrdr/jj 9FfW/Tb6n+kvT5+ny+Lhz7csVYt/1i9/37X/AE74pd/1i9/37X/Tvirv+sXv+/a/6d8Vd/1i9/37 X/TviqJ03/oW39I2v6O/w7+kPWj+p+l6HqetyHp8Kb8uVKYof//Z + + + + uuid:648047da-72d6-b74b-a5aa-806a33643ff0 + xmp.did:26665edd-e4a0-4a03-aa39-7964ff2d7809 + uuid:5D20892493BFDB11914A8590D31508C8 + proof:pdf + + xmp.iid:77568db9-71f9-43f7-85eb-4960f6a3a2ff + xmp.did:77568db9-71f9-43f7-85eb-4960f6a3a2ff + uuid:5D20892493BFDB11914A8590D31508C8 + default + + + + + converted + from application/pdf to <unknown> + + + saved + xmp.iid:D27F11740720681191099C3B601C4548 + 2008-04-17T14:19:15+05:30 + Adobe Illustrator CS4 + + + / + + + + + converted + from application/pdf to <unknown> + + + converted + from application/pdf to <unknown> + + + saved + xmp.iid:F97F1174072068118D4ED246B3ADB1C6 + 2008-05-15T16:23:06-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:FA7F1174072068118D4ED246B3ADB1C6 + 2008-05-15T17:10:45-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:EF7F117407206811A46CA4519D24356B + 2008-05-15T22:53:33-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F07F117407206811A46CA4519D24356B + 2008-05-15T23:07:07-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F77F117407206811BDDDFD38D0CF24DD + 2008-05-16T10:35:43-07:00 + Adobe Illustrator CS4 + + + / + + + + + converted + from application/pdf to <unknown> + + + saved + xmp.iid:F97F117407206811BDDDFD38D0CF24DD + 2008-05-16T10:40:59-07:00 + Adobe Illustrator CS4 + + + / + + + + + converted + from application/vnd.adobe.illustrator to <unknown> + + + saved + xmp.iid:FA7F117407206811BDDDFD38D0CF24DD + 2008-05-16T11:26:55-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:FB7F117407206811BDDDFD38D0CF24DD + 2008-05-16T11:29:01-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:FC7F117407206811BDDDFD38D0CF24DD + 2008-05-16T11:29:20-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:FD7F117407206811BDDDFD38D0CF24DD + 2008-05-16T11:30:54-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:FE7F117407206811BDDDFD38D0CF24DD + 2008-05-16T11:31:22-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:B233668C16206811BDDDFD38D0CF24DD + 2008-05-16T12:23:46-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:B333668C16206811BDDDFD38D0CF24DD + 2008-05-16T13:27:54-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:B433668C16206811BDDDFD38D0CF24DD + 2008-05-16T13:46:13-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F77F11740720681197C1BF14D1759E83 + 2008-05-16T15:47:57-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F87F11740720681197C1BF14D1759E83 + 2008-05-16T15:51:06-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F97F11740720681197C1BF14D1759E83 + 2008-05-16T15:52:22-07:00 + Adobe Illustrator CS4 + + + / + + + + + converted + from application/vnd.adobe.illustrator to application/vnd.adobe.illustrator + + + saved + xmp.iid:FA7F117407206811B628E3BF27C8C41B + 2008-05-22T13:28:01-07:00 + Adobe Illustrator CS4 + + + / + + + + + converted + from application/vnd.adobe.illustrator to application/vnd.adobe.illustrator + + + saved + xmp.iid:FF7F117407206811B628E3BF27C8C41B + 2008-05-22T16:23:53-07:00 + Adobe Illustrator CS4 + + + / + + + + + converted + from application/vnd.adobe.illustrator to application/vnd.adobe.illustrator + + + saved + xmp.iid:07C3BD25102DDD1181B594070CEB88D9 + 2008-05-28T16:45:26-07:00 + Adobe Illustrator CS4 + + + / + + + + + converted + from application/vnd.adobe.illustrator to application/vnd.adobe.illustrator + + + saved + xmp.iid:F87F1174072068119098B097FDA39BEF + 2008-06-02T13:25:25-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F77F117407206811BB1DBF8F242B6F84 + 2008-06-09T14:58:36-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F97F117407206811ACAFB8DA80854E76 + 2008-06-11T14:31:27-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:0180117407206811834383CD3A8D2303 + 2008-06-11T22:37:35-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:F77F117407206811818C85DF6A1A75C3 + 2008-06-27T14:40:42-07:00 + Adobe Illustrator CS4 + + + / + + + + + saved + xmp.iid:00801174072068118F62EC69E7D04E31 + 2010-03-30T08:48:15-06:00 + Adobe Illustrator CS4 + / + + + saved + xmp.iid:26665edd-e4a0-4a03-aa39-7964ff2d7809 + 2022-09-06T15:17:39-04:00 + Adobe Illustrator 24.3 (Macintosh) + / + + + + Document + Print + AIRobin + False + False + 1 + + 500.000000 + 300.000000 + Pixels + + + + Cyan + Magenta + Yellow + Black + + + + + + Default Swatch Group + 0 + + + + White + RGB + PROCESS + 255 + 255 + 255 + + + Black + RGB + PROCESS + 35 + 31 + 32 + + + CMYK Red + RGB + PROCESS + 236 + 28 + 36 + + + CMYK Yellow + RGB + PROCESS + 255 + 241 + 0 + + + CMYK Green + RGB + PROCESS + 0 + 165 + 81 + + + CMYK Cyan + RGB + PROCESS + 0 + 173 + 238 + + + CMYK Blue + RGB + PROCESS + 46 + 49 + 145 + + + CMYK Magenta + RGB + PROCESS + 235 + 0 + 139 + + + C=15 M=100 Y=90 K=10 + RGB + PROCESS + 190 + 30 + 45 + + + C=0 M=90 Y=85 K=0 + RGB + PROCESS + 238 + 64 + 54 + + + C=0 M=80 Y=95 K=0 + RGB + PROCESS + 240 + 90 + 40 + + + C=0 M=50 Y=100 K=0 + RGB + PROCESS + 246 + 146 + 30 + + + C=0 M=35 Y=85 K=0 + RGB + PROCESS + 250 + 175 + 64 + + + C=5 M=0 Y=90 K=0 + RGB + PROCESS + 249 + 236 + 49 + + + C=20 M=0 Y=100 K=0 + RGB + PROCESS + 214 + 222 + 35 + + + C=50 M=0 Y=100 K=0 + RGB + PROCESS + 139 + 197 + 63 + + + C=75 M=0 Y=100 K=0 + RGB + PROCESS + 55 + 179 + 74 + + + C=85 M=10 Y=100 K=10 + RGB + PROCESS + 0 + 147 + 69 + + + C=90 M=30 Y=95 K=30 + RGB + PROCESS + 0 + 104 + 56 + + + C=75 M=0 Y=75 K=0 + RGB + PROCESS + 41 + 180 + 115 + + + C=80 M=10 Y=45 K=0 + RGB + PROCESS + 0 + 166 + 156 + + + C=70 M=15 Y=0 K=0 + RGB + PROCESS + 38 + 169 + 224 + + + C=85 M=50 Y=0 K=0 + RGB + PROCESS + 27 + 117 + 187 + + + C=100 M=95 Y=5 K=0 + RGB + PROCESS + 43 + 56 + 143 + + + C=100 M=100 Y=25 K=25 + RGB + PROCESS + 38 + 34 + 97 + + + C=75 M=100 Y=0 K=0 + RGB + PROCESS + 101 + 45 + 144 + + + C=50 M=100 Y=0 K=0 + RGB + PROCESS + 144 + 39 + 142 + + + C=35 M=100 Y=35 K=10 + RGB + PROCESS + 158 + 31 + 99 + + + C=10 M=100 Y=50 K=0 + RGB + PROCESS + 217 + 28 + 92 + + + C=0 M=95 Y=20 K=0 + RGB + PROCESS + 236 + 41 + 123 + + + C=25 M=25 Y=40 K=0 + RGB + PROCESS + 193 + 180 + 154 + + + C=40 M=45 Y=50 K=5 + RGB + PROCESS + 154 + 132 + 121 + + + C=50 M=50 Y=60 K=25 + RGB + PROCESS + 113 + 101 + 88 + + + C=55 M=60 Y=65 K=40 + RGB + PROCESS + 90 + 74 + 66 + + + C=25 M=40 Y=65 K=0 + RGB + PROCESS + 195 + 153 + 107 + + + C=30 M=50 Y=75 K=10 + RGB + PROCESS + 168 + 124 + 79 + + + C=35 M=60 Y=80 K=25 + RGB + PROCESS + 138 + 93 + 59 + + + C=40 M=65 Y=90 K=35 + RGB + PROCESS + 117 + 76 + 40 + + + C=40 M=70 Y=100 K=50 + RGB + PROCESS + 96 + 56 + 19 + + + C=50 M=70 Y=80 K=70 + RGB + PROCESS + 59 + 35 + 20 + + + R=245 G=187 B=58 + PROCESS + 100.000000 + RGB + 245 + 187 + 58 + + + R=236 G=123 B=49 + PROCESS + 100.000000 + RGB + 236 + 123 + 49 + + + R=213 G=86 B=43 + PROCESS + 100.000000 + RGB + 213 + 86 + 43 + + + R=127 G=214 B=246 + PROCESS + 100.000000 + RGB + 127 + 214 + 246 + + + R=191 G=234 B=251 + PROCESS + 100.000000 + RGB + 191 + 234 + 251 + + + + + + Grays + 1 + + + + C=0 M=0 Y=0 K=100 + RGB + PROCESS + 35 + 31 + 32 + + + C=0 M=0 Y=0 K=90 + RGB + PROCESS + 64 + 64 + 65 + + + C=0 M=0 Y=0 K=80 + RGB + PROCESS + 88 + 89 + 91 + + + C=0 M=0 Y=0 K=70 + RGB + PROCESS + 109 + 110 + 112 + + + C=0 M=0 Y=0 K=60 + RGB + PROCESS + 128 + 129 + 132 + + + C=0 M=0 Y=0 K=50 + RGB + PROCESS + 146 + 148 + 151 + + + C=0 M=0 Y=0 K=40 + RGB + PROCESS + 166 + 168 + 171 + + + C=0 M=0 Y=0 K=30 + RGB + PROCESS + 187 + 189 + 191 + + + C=0 M=0 Y=0 K=20 + RGB + PROCESS + 208 + 210 + 211 + + + C=0 M=0 Y=0 K=10 + RGB + PROCESS + 230 + 231 + 232 + + + C=0 M=0 Y=0 K=5 + RGB + PROCESS + 241 + 241 + 242 + + + + + + Brights + 1 + + + + C=0 M=100 Y=100 K=0 + RGB + PROCESS + 236 + 28 + 36 + + + C=0 M=75 Y=100 K=0 + RGB + PROCESS + 241 + 101 + 34 + + + C=0 M=10 Y=95 K=0 + RGB + PROCESS + 255 + 221 + 21 + + + C=85 M=10 Y=100 K=0 + RGB + PROCESS + 0 + 161 + 75 + + + C=100 M=90 Y=0 K=0 + RGB + PROCESS + 34 + 64 + 153 + + + C=60 M=90 Y=0 K=0 + RGB + PROCESS + 127 + 63 + 151 + + + + + + + Adobe PDF library 16.07 + + + + + + + + + + + + + + + + + + + + + + + + + endstream endobj 3 0 obj <> endobj 5 0 obj <>/Resources<>/ExtGState<>/Properties<>>>/Thumb 27 0 R/TrimBox[0.0 0.0 500.0 300.0]/Type/Page>> endobj 24 0 obj <>stream +H‰´WÍŽ$· ¾÷SÔ ´V"EJºfläd†y€†<œ} ßG©ºª»Ç“݃±Øé"«Ä~¤¾üãmûòÓ[ÞþöÃÛvùã’7Íy3ü¿òá¿¿^þ¹ýçòå헼ݾn9ùèøÛ¥â¯ä¾m_oxýw¼þ××Ë[Ù2þ•MJçV¶>’>¶Ûû…oÞ/ת©¨mꩱ]Knxh›–Ô­‚®5µáx(Iºm· >©©Vݤ'¯ÃFªÝIÊ U;>—” z.¿]~¾ÀB¨ÈIµáoñ1í<ì3ˆPÓ­Œ°³ö¹'¡Ð+~¿xIpãZÜ5Ðÿ^ +†PAíBCž¸¥QÚ&Â/ûI¼&­/i(Å™¥Œ÷×6ݸËg¨C=TŸe—Q“–ë›%órQ {§ýWëi xWï°– q¸¶] ö‡JáŸZžu +wiP^‘8-vÖÙ"¤&Q>²ÏS=‰gP2 L~q‰qª&[1bäñç ‘!t" ,hrjÐtrÁi}CĨ©½(q$[JmÍrV¢=©ÛVYt^zGËH½9*ª¡Wx‚òè,•ÝŒ^üÞ#ý3òH:2Ò‡RïßÍ0‡ó}ÚIÈ „Å™Å84d°Œ¨ž–Z¦•°)Ê ]0¢X{;S¨ô5–zï ÁJ?ƒRël)§˜kI™»K^*ˆ¢|EÖ ƒzÑ +hc(`°±¹Sÿö"ˆ g߀è†QN&IbCGÐóÑîšUNä‹ÇÅjÊn*rÞ"TŽF[?À·ßhW¥]% +GûGvåR)càS„äTœxÓ§aŠ¤#¸ØêAî!¨@• +*î¹Ï‡j• jØC `ÅÔ;Ú®lõõ¬‰yc0VšÚ`Þ‚`ÔdÀT$Øԧ͊^£ ¢Õae‹€ŒÔëKD:­D2_­l³ Y¬Dþ“59ô4ß^-_zù(K˜˜nÏgHevfE¤àmoôyš;`5£o>І^+ôÈ({ðê«ÇÈÈ•øô í›¼8åøÚ(ïÒêg>Ì‘!+òZ?þF™ƒ¬Œ§cô⧮؆ÏÑŸ‹‚º•Ä‘BÆ™˜ƒšfÙc÷M‘Ç;åW“è)£ªx¤m x: ub .äÁ=èÂO†ì +­¤01«q·KÞé4Âœ‡CôÌûÎ@ÊkäšÓHCNÆA²à¼î4ñ‹ˆÇNO´&,­©`›€‰`9,wøà¯8 o°œ,Ó]‚/' Ðãßž±ý25ÏñnÜ] ª;1ŶAváèÍ\Ûõ|Cߟ¯šÍf9=.ï_kK† Ëh~-ûã&Ð{ 0¯u:X´Z‹.)³W?à‡û[™ý|Š±º# +Åš¤m,†bá£W‹®˜›ª–=H.Ñ<ˆ—:³€©±7 ƒ"QT&;§qò?pro§y¡aJž€È©{åäžSl6LçPÃl]†ß8ìŠN(}®æŽRV +¼u9ã~’Ï„ŠØq Ô•EÔ‚-á=p)ôsà{¹?Ó´ùÝÎ`†˜6V< f„(ñ¹ü_z”ѣ氌ØáâvQäAï´ºªH‡ÇÒG¹•ó5èÍC­¶èjî;LÝý“k0}‹F!¦Åþ/)Åc^pòs«¾0ïž +46Ñÿnœ…¨&CÄ<ÐIàò¤Q·Š¢m‹†uR&Ž`îDq…¥¨áòLË*œ%Ѧ;YÀÎGi§¤‰D+ ¶U• ¥°»•ø°¾cà_… ÕHo9ÈÈ€Öã5ñ,„9>î.-Êƨ¸E•ZYº£~ñ†Kxaê‰æŒ‹Q×íçµT÷)`#±ÁÇ)édÀ*$DÎHžc|{.ogÜúGà‘Ï @À}¥¹•›h µ¤j -¼±+ q³ -*Œ·,=°NͱsÐØ ®¸}þñ`Þžð„:Œý…ƒ¬±¯lµ<÷kbï 0m]$ç¥ìä Á9=jš‡ÆXŒá–raRyûœÒå.´ˆp%`ô¹,•C4|*cÑœIgÚçK}ÐÙE#܃N k5°S#±=sr»Ü9‡_Ú¢^9Ú[ì_yŤ³¸‰¾eR3¸EÕçØ$ØWÂÛh‰žÇVš#x2 +OÎ… uÞ ‚:Î}qœBO {8ù¶äª®EÏ[›šû+% enQgaºµ½5ÊøÞQöÊq÷}ðþçèn't·gt·Oѽ>£ûx@w}@÷òïò×Ã{;û}ïõx—3¼—3¾û'øÞð½=ã{û ßë3¾—€ïßðõàóø¿ߟ¾?#|ÿ áÛ—ïFxÜ<¸—³/aõ Þ©-À§jrî­;mè¬úJçÓ3MmÝÏ!ØÙ™¹6afçC)''?>g÷/_9‡Æ‹Ã+ã°r?óÂùÓ¥¯Åå0×|<Ü È”ÙámM›;®À¢+W}k6a1šª÷ƒDÛõ>ë`q„¥Ì&´:¢Ô:ÇG f©O4QosÍ [æÊ5î„ô}Q·‹!íNódT´ÍDÆ­>ûCà @žÇ-f(ê¹ÈAÞG×çÁ:  +1Fè´Â‡e20©·@“:$ÜYµzƈ¦ü|@ä;¯¥6$¯™ Å™ÈÉÐز ÿíB¤„7XZN+‚P¾Sl.c´v†XØÉ£ŒJl1r¸Üà-„÷5å€Qlx1ϵ:ÂFlQÈĈ ãý5m&pópL)ÀèÐ)½- ÅÜ-¨=j–XÐfÈÃÈE£8,"wç0êPÞ`L=_©©u]©FÄŒoR¿Góçgî +•„qؽœ¦ ä¸•°K`¡O x„eÖ°vqÓA ³@XøðŠiAÝ×MjØÄPË‹};Y4ÂÄýÇœË+JÂh ð±z8Šü î¨[…«h]6¹)^j8 ­ïªidW_'„yÃß/˜z³‹†GìO9hUÀÔ†mÄë\ ¶ +1€Œ&2-°µö¾ +<‡~AØj)¿¨»³û[4•ÓIs l  ;Ý£1”›Œýâ +ƒÜ'€(´ÂÞîíNF´2G·³ßwÆ„¶zÛðó?çø endstream endobj 27 0 obj <>stream +8;Z\r0lFl_$q8W?4bu$g(W^$RaX>.I3ogA,Ub86JYWr2L]mqK4s#MGT`E#nf:7\L@ +#ss`Un&K>R=C[?*L@>'ClJBPibN+YL%*]EqpQ6K5Bn>'?>hKY7Rh.nB4>USZ%(Kh* +M!MEqmmm(pG?"6I,"qP=hk*4XJsB/UA[IP"iKp"jo>$1$Yg4=1&fZBA@:EO.S^4nt29nU&aZA5`5@\3C0oK7 endstream endobj 28 0 obj [/Indexed/DeviceRGB 255 29 0 R] endobj 29 0 obj <>stream +8;X]O>EqN@%''O_@%e@?J;%+8(9e>X=MR6S?i^YgA3=].HDXF.R$lIL@"pJ+EP(%0 +b]6ajmNZn*!='OQZeQ^Y*,=]?C.B+\Ulg9dhD*"iC[;*=3`oP1[!S^)?1)IZ4dup` +E1r!/,*0[*9.aFIR2&b-C#soRZ7Dl%MLY\.?d>Mn +6%Q2oYfNRF$$+ON<+]RUJmC0InDZ4OTs0S!saG>GGKUlQ*Q?45:CI&4J'_2j$XKrcYp0n+Xl_nU*O( +l[$6Nn+Z_Nq0]s7hs]`XX1nZ8&94a\~> endstream endobj 22 0 obj <> endobj 30 0 obj [/View/Design] endobj 31 0 obj <>>> endobj 26 0 obj <> endobj 25 0 obj [/ICCBased 32 0 R] endobj 32 0 obj <>stream +H‰œ–yTSwÇoÉž•°Ãc [€°5la‘QIBHØADED„ª•2ÖmtFOE.®c­Ö}êÒõ0êè8´׎8GNg¦Óïï÷9÷wïïÝß½÷ó '¥ªµÕ0 Ö ÏJŒÅb¤  + 2y­.-;!à’ÆK°ZÜ ü‹ž^i½"LÊÀ0ðÿ‰-×é @8(”µrœ;q®ª7èLöœy¥•&†Qëñq¶4±jž½ç|æ9ÚÄ +V³)gB£0ñiœWו8#©8wÕ©•õ8_Å٥ʨQãüÜ«QÊj@é&»A)/ÇÙgº>'K‚óÈtÕ;\ú” Ó¥$ÕºF½ZUnÀÜå˜(4TŒ%)ë«”ƒ0C&¯”阤Z£“i˜¿óœ8¦Úbx‘ƒE¡ÁÁBÑ;…ú¯›¿P¦ÞÎӓ̹žAü om?çW= +€x¯Íú·¶Ò-Œ¯Àòæ[›Ëû0ñ¾¾øÎ}ø¦y)7ta¾¾õõõ>j¥ÜÇTÐ7úŸ¿@ï¼ÏÇtÜ›ò`qÊ2™±Ê€™ê&¯®ª6ê±ZL®Ä„?â_øóyxg)Ë”z¥ÈçL­UáíÖ*ÔuµSkÿSeØO4?׸¸c¯¯Ø°.òò· åÒR´ ßÞô-•’2ð5ßáÞüÜÏ ú÷Sá>Ó£V­š‹“då`r£¾n~ÏôY &à+`œ;ÂA4ˆÉ 䀰ÈA9Ð=¨- t°lÃ`;»Á~pŒƒÁ ðGp| ®[`Lƒ‡`<¯ "A ˆ YA+äùCb(Š‡R¡,¨*T2B-Ð +¨ꇆ¡Ðnè÷ÐQètº}MA ï —0Óal»Á¾°ŽSàx ¬‚kà&¸^Á£ð>ø0|>_ƒ'á‡ð,ÂG!"F$H:Rˆ”!z¤éF‘Qd?r 9‹\A&‘GÈ ”ˆrQ ¢áhš‹ÊÑ´íE‡Ñ]èaô4zBgÐ×Á–àE#H ‹*B=¡‹0HØIøˆp†p0MxJ$ùD1„˜D, V›‰½Ä­ÄÄãÄKÄ»ÄY‰dEò"EÒI2’ÔEÚBÚGúŒt™4MzN¦‘Èþär!YKî ’÷?%_&ß#¿¢°(®”0J:EAi¤ôQÆ(Ç()Ó”WT6U@ æP+¨íÔ!ê~êêmêæD ¥eÒÔ´å´!ÚïhŸÓ¦h/èº']B/¢éëèÒÓ¿¢?a0nŒhF!ÃÀXÇØÍ8ÅøšñÜŒkæc&5S˜µ™˜6»lö˜Iaº2c˜K™MÌAæ!æEæ#…åÆ’°d¬VÖë(ëk–Íe‹Øél »—½‡}Ž}ŸCâ¸qâ9 +N'çÎ)Î].ÂuæJ¸rî +î÷ wšGä xR^¯‡÷[ÞoÆœchžgÞ`>bþ‰ù$á»ñ¥ü*~ÿ ÿ:ÿ¥…EŒ…ÒbÅ~‹ËÏ,m,£-•–Ý–,¯Y¾´Â¬â­*­6X[ݱF­=­3­ë­·YŸ±~dó ·‘ÛtÛ´¹i ÛzÚfÙ6Û~`{ÁvÖÎÞ.ÑNg·Åî”Ý#{¾}´}…ý€ý§ö¸‘j‡‡ÏþŠ™c1X6„Æfm“Ž;'_9 œr:œ8Ýq¦:‹ËœœO:ϸ8¸¤¹´¸ìu¹éJq»–»nv=ëúÌMà–ï¶ÊmÜí¾ÀR 4 ö +n»3Ü£ÜkÜGݯz=Ä•[=¾ô„=ƒ<Ë=GTB(É/ÙSòƒ,]6*›-•–¾W:#—È7Ë*¢ŠÊe¿ò^YDYÙ}U„j£êAyTù`ù#µD=¬þ¶"©b{ųÊôÊ+¬Ê¯: !kJ4Gµm¥ötµ}uCõ%—®K7YV³©fFŸ¢ßY Õ.©=bàá?SŒîÆ•Æ©ºÈº‘ºçõyõ‡Ø Ú† žkï5%4ý¦m–7Ÿlqlio™Z³lG+ÔZÚz²Í¹­³mzyâò]íÔöÊö?uøuôw|¿"űN»ÎåwW&®ÜÛe֥ﺱ*|ÕöÕèjõê‰5k¶¬yÝ­èþ¢Ç¯g°ç‡^yïkEk‡Öþ¸®lÝD_p߶õÄõÚõ×7DmØÕÏîoê¿»1mãál {àûMśΠnßLÝlÜ<9”úO¤[þ˜¸™$™™üšhšÕ›B›¯œœ‰œ÷dÒž@ž®ŸŸ‹Ÿú i Ø¡G¡¶¢&¢–££v£æ¤V¤Ç¥8¥©¦¦‹¦ý§n§à¨R¨Ä©7©©ªª««u«é¬\¬Ð­D­¸®-®¡¯¯‹°°u°ê±`±Ö²K²Â³8³®´%´œµµŠ¶¶y¶ð·h·à¸Y¸Ñ¹J¹Âº;ºµ».»§¼!¼›½½¾ +¾„¾ÿ¿z¿õÀpÀìÁgÁãÂ_ÂÛÃXÃÔÄQÄÎÅKÅÈÆFÆÃÇAÇ¿È=ȼÉ:ɹÊ8Ê·Ë6˶Ì5̵Í5͵Î6ζÏ7ϸÐ9кÑ<ѾÒ?ÒÁÓDÓÆÔIÔËÕNÕÑÖUÖØ×\×àØdØèÙlÙñÚvÚûÛ€ÜÜŠÝÝ–ÞÞ¢ß)߯à6à½áDáÌâSâÛãcãëäsäüå„æ æ–çç©è2è¼éFéÐê[êåëpëûì†ííœî(î´ï@ïÌðXðåñrñÿòŒóó§ô4ôÂõPõÞömöû÷Šøø¨ù8ùÇúWúçûwüü˜ý)ýºþKþÜÿmÿÿ ÷„óû endstream endobj 7 0 obj <> endobj 16 0 obj <> endobj 17 0 obj <>stream +%!PS-Adobe-3.0 %%Creator: Adobe Illustrator(R) 24.0 %%AI8_CreatorVersion: 26.5.0 %%For: (Kristina Davis) () %%Title: (PyomoDOE.ai) %%CreationDate: 9/15/22 3:11 PM %%Canvassize: 16383 %%BoundingBox: 59 70 397 221 %%HiResBoundingBox: 59.4353000000001 70.9548995583682 396.623100000001 220.928 %%DocumentProcessColors: Cyan Magenta Yellow Black %AI5_FileFormat 14.0 %AI12_BuildNumber: 223 %AI3_ColorUsage: Color %AI7_ImageSettings: 0 %%RGBProcessColor: 0.498039215803146 0.839215695858002 0.964705884456635 (R=127 G=214 B=246) %%+ 0.749019622802734 0.917647063732147 0.984313726425171 (R=191 G=234 B=251) %%+ 0.835294127464294 0.337254911661148 0.168627455830574 (R=213 G=86 B=43) %%+ 0.925490200519562 0.482352942228317 0.192156866192818 (R=236 G=123 B=49) %%+ 0.960784316062927 0.733333349227905 0.227450981736183 (R=245 G=187 B=58) %%+ 0 0 0 ([Registration]) %AI3_Cropmarks: 0 0 500 300 %AI3_TemplateBox: 250.5 149.5 250.5 149.5 %AI3_TileBox: -38 -206 538 528 %AI3_DocumentPreview: None %AI5_ArtSize: 14400 14400 %AI5_RulerUnits: 6 %AI24_LargeCanvasScale: 1 %AI9_ColorModel: 1 %AI5_ArtFlags: 0 0 0 1 0 0 1 0 0 %AI5_TargetResolution: 800 %AI5_NumLayers: 1 %AI17_Begin_Content_if_version_gt:24 4 %AI10_OpenToVie: -1213.02976175998 764.38503815438 0.751786916543069 0 8033.52634516619 8344.11967269269 2547 1303 18 1 0 6 45 0 0 0 1 1 0 1 1 0 1 %AI17_Alternate_Content %AI9_OpenToView: -1213.02976175998 764.38503815438 0.751786916543069 2547 1303 18 1 0 6 45 0 0 0 1 1 0 1 1 0 1 %AI17_End_Versioned_Content %AI5_OpenViewLayers: 7 %AI17_Begin_Content_if_version_gt:24 4 %AI17_Alternate_Content %AI17_End_Versioned_Content %%PageOrigin:0 0 %AI7_GridSettings: 72 8 72 8 1 0 0.800000011920929 0.800000011920929 0.800000011920929 0.899999976158142 0.899999976158142 0.899999976158142 %AI9_Flatten: 1 %AI12_CMSettings: 00.MS %%EndComments endstream endobj 18 0 obj <>stream +%AI24_ZStandard_Data(µ/ýXü¼ +@®T.ÐŒhҊ꼸 ¤èØëŽôå%×DÄï†'’ì&ÛàSu>(K=Œš¥Þ gùÔDI$FÒ£?o¾|‘ÿ÷ïçß×ÏÇ{ûäÉ/Nܽ]=ýÜ<5êÓ¦K“ööîæ¾6Ã… :[–ô2®!îÝ^%Mj›4™Ò¬«•Š—N!›‰ ’b(‰aÁÐr Þ‰7~¼'·¿ÇÏ×ßçï÷ÿ'_ÞüyôéA’DÉ&Mœ\¸0ággæeådäcã´hÍš-û뻫››{k›)R£E‰¾Öï©):Y’prjf"B„ $èظȈˆH=Z´ˆ¾ºº¶²®ªª¦¢š¦L‰ò¤I“%ß³sS3óÒ2aB„ ,HªÑ‘ºã׌vÈ’!cÅŠ¡¨ÇÆÆbd\\TTLLÌh|dãKõ6~m ‰×À'6Á†Ÿã³óþfô¥#"ºf ïßš¡½.~jÅ`oà#µ×Òwµ‘ï0ÃõŽùîu±3•µñôqÓoóþú•±óRý3Z³“­•ûþ0C³Ñ‘ÿìµ3°°P X±bèa×@ ›Ì«ê¾Öʘf¾¾Ï~˜Ázi¾‡Ÿæ¯‡ Ê,,” &$@8, + +  €Ð…2($4L$BB@*  +L¨p€ +<È‹† Iñà ,d¡"0AÃC1$ñð `€Ñ°€PHh0€Ñ@c0˜ˆxˆ0Š "$, *LqÊñC4Î.‘þ4Ð9}÷ßuÿ°6ÎrS EXX¨C4úÓ°6~ ˆ( "@Tˆ( •ž|E&&*€h0 e€Æƒ4°'€7( "ÂaCÃp"&DP ÁÅÃÂA‚……Z°€±`¡‚†c4@8,&$°€Á@""íÁh0€ˆp˜P(‚C@ÊEcA HD,$ 0 ‹…„YX(ÃŽd@ဈ„ °èjÝßÇåáȆaþ¶‡l»Ïxv˜qÆ©%çyë[ú÷>›úaõ |ycÙáš^¾Ú/î•žçu»¢ý>:²zº¯:â§áaå@/Oì­êÊBwèÇŒZp†#‚bBDö1ûp7ÒØKƒU¼åÃ0 Ã0¾ýÈ6T?gÃ0 Ãx vX¡Úae‡•~ßò»§V·º–íwh¬Î‡ýéžœö¨§¬xºxnƵ@±X( $8(ÀX< ÂÁ]UU•{Y)?U†‹œSJùÒeK–+‰&z¾ècñ÷j_Ô¦Iwozî#šô˜¬’{Uî)Ε^¾Ž0*ôƒ\ˆªÓ—ê“ñ”–¹âX°\õ¥Gõ–Ì»‘·ÎWâ+vµ@áP€CÃ0 Ã8=¬v{Uñ9õ³+ö¾c~\cO>¬”Õ6Û?Yµù°Jßß6´6ôçÃÐ0£D, E„†ôºØz²ú~Ÿá2®º§â)jzº±2f&«/ªö^6Þ‘g<§‡ý–½º‡l«o™ßìúŒz~¬‡Ç}x®Öú˜Ïɹܙ¸¹j¦ª¦}‡:ëûòZ3¹›¨ªzúÿÉ–˜Üìv¼‹Œo‹§¨v\{¬‡gfèñaFjf¼þuú¶ùñšáu¦®æaF2«ï·¥"îaÿåaÆ没&ïªþöò§Ý¯mãaF¢íÚw¢§úµ??Ÿ[këaõÀ¹O¹êóòãÇ q¾”2Õ£GÎxô$ƒL½}"×åÏ7æìárÎ5òS 缸7HùË”¦¦mNŽ2dÉ.9âqãÞ…¯åO­wñj–6æëê\_ŒmIS|-¢…·u{Y^µ´d‡ük/IÑzA—p{ÿlJ?|²õ»ô!Læ5u1”DKDyâÄO„—Øé'Oê*%I“âºqíÎbXn/RŒŸb'Ç©ÜõÄ”;æW¾¾òó·ÿ›?a})ßÕ¬ÕÕZMELyjzŠÒU[&Dé˜B!MšÔN$FЧ?ÊÿçGµ¨áBî¼7gg—66¶Y'NŒN!ìÌ›ŒVfÍ’£GË«g-X(& *80Š ʼnN©‰bvh‡©öϾg|˜±¬ÙŸhxxox|ëû®ï‡~Š½Üæëßïvxæ¾w˜q˜Ñyy˜ÑŸ›½¶þ»ŒÚo}†ø«©Üë¬xûçþnÍŒ˜ïï‡åfd'fœÞï›ê˜ë̇¯ly˜áÛ¨þxfŒ|‡ÕD°P +„Ï÷ó0C í¸5Ø8å¾óuþÃŒÛEÖµCÃ<ãµÖÞå=µæeëîU3;¬èérû_‘bP:;L]îóû#WÎÜ9té,Qk¹,‘Ÿ5/Ô§ bÆ ;7/kÚ³†w7nrJ9Ç¢B”Œšýt :È „.Ù¥L{Šè¡tÒA¦Oq,毥E‹NÙ*äß›W 4xaBËÓÜäO’%ËäI”)MOQSUWù•hAš´”suwy{}É–5{mZãcädåZfnv~&\ØðaĈ[_cgkosw{“.mú4êÔæçèéêëìíîÆ#/ógK‘Røò(-‹e°8ö™ÒÄK TÐ#G)Jt(tÎ)‡œ?~øìÑSyðܱ“ 2ÆCÌ7mبI“Í™TP9Å”RHù⥠—-Z²`I¤Žürô{W¬XAäC +!䉓&ö™nÌ›¨ìþiÝÚŒæýmo¿zý禑!tLŸõBä²wVë¹°ç‘/¬„Ž)†:SJù2„0QÆ5Ïë}¿Õ\?bh~05™hùº1ªslƒæ$G’hBÎq)¤˜d&¥ÁeSnÜ$;ÖmNƒa"Þ"åä6×—:uæ$»dvxY{ýçŤKD­EGÇ~R ƒU„˜šÊÅíïI¾˜’UÌAe¡²*¬¶·b'oÏÑ¢#ÁÌM6ˆÒ—¬aîtíR?\L}µ“dìðTêrûŸzRL*†;Ûÿ5ܱDvÃÃŒW>WÞLóÖ÷\WM3Ç_v7u]îFÌçìleÔÅåCæ4ZUÛ1ó¾ÕüÖ™}OÛ”Q}7[óðSÒ$Ê”hÖd÷ä¹S ¡¥ üü’;US7=DýÖÎEFDF?ÛܾxŒkؾö¿îÎÝoªhÍ«xŸ¦»ÚŽØˆþÈŒ÷÷`ïÿû£¹/ï3RÌIæ˜cZÛ‹Á ”P1ÜÿI«ª{Ø¡jøóóR«?ª]þ,Ô©õ‹­ÙâÚÊKè5:¥KŸÂŠþÛÞøVÒSÈ7k¢Öqs´¸9¯2UȧO_|+Jœø°Y¤šlØÆU¨Ž)䫹^\_¢ÊYÖ×SueÓ”ð´a"kj˜©‡ÛnœvlfÆÙí÷¬‡þ|{ÍÊûo¸«§žûzümª}ÖÎë­n~©÷άÆoÉöi¦jy¬f†œÈ–Éì×~f‹mœìýŸvöp1”–=r拾ãÏ°í½sÃK4Þµ\|¼Óö»³uÔ_ìíÔ?T ã_EãÛ7ÎÎtÕäk_ÜÃŒ·5w}v·ûø66{½÷3CË=´ͳç45D;ìQ/Ó²ñ»ý/ÑþÓ89½³»«Ùaérù“bªª_¬&æô9TYˆÞ”N‚¤•¼©O…¼¾ *ßúâCÇòÞ‹l´HyB<Ïõ²NÎËJÒ?¡A‚$ÏÎu9}/åìäöìþ:ˆ»êª¾H1ÙRÌùß3u:‘JFȧ:ô¼ÜåE¬$&ù,­Õeª”•SÅ„zæm¶³qk§êµþ3·¯éaêÈŠh×y÷n§úæצýæ}û{Í؆ÊîéŸÍާ列k«¬ñÊR¡S–Õ奾˜ò—ml†QÕRÆuH³^BÇò^¼Z—PRo•*¯ª*«”3eº2•éM¼iª\iÅKfIŸ)Š…6áSë… CÞ››ñ¾œt4Å{öââåÍyAç˜2æå~ƒ 2£J×÷‚ bتr¾ÎKVÈóæID²¼7×rwû›OýÈ }Þž/ÿÉç©&FÅëÒ%;% }1˜"ÕwËMŠ)”Zµõí|kv˜å¤ðp« D…2ëŒksuù‹¶õù9/¶½â§‘×ùt¨§³çaÆm¶!žÙ7^3fcoª¿¢uc»©t¶°^¡‚J‹&^*t ©òDÈ$¯+]†Ž)ä½K CŠT™CŽŸ6µFM1ÇÊÌ¢cJ‘/†²Òâf5N[½»Ë³ST‡ˆ¨L¿s´KÙ+‘ãâ*;L2kIÊÓÔ5ÿ\ + !,YÊô×-æ^žóµúî¦;g¢]ŸþÛ¿£sÛsÇ“#³Ì7Cn¾Ç;·ß{³Coóç}7n4lMî}e;WÔCý¾ä~Ìtõoã7ÍLûˆFÒЊaóî?}" --³ÓÕÛ6›™¨Ô²¶¡Ð!cfjc`A(†C‚¹pLï€nj<”\@™J£IÄ(ˆ¢Œ1ÆÀ0F† tã2À1~„ º[©³ð‘„œK»_ :ÅŸãã±ÙwÿßfÂkU%0^çïð + nªÑ×þÖÙ—jòü|7k>p-îgTt©),y¤{GE[Î ç”  +5J'—f+“$«]rº¡Á[®zjO\3uÆ°\¸äÞEïÝw\= þrñ¥öìë‹1{/æãòÃÒ"cÍEN^Ü/FÞ{1¿²Ó¡Íû6†j¦‡·ƒôÆzÞX$™^bY¨º¿?h:!Ž<æ¦ÎyœkëõY A\÷ÕÞ:ÌaÐ"þ"ÆhÇlŠv`>f¹L-y¸î5pìl7zщŽS°b Ï´‚£MüŠ`mpc“[øÖAÐøë‘áw—…SŸéM®}c¢ˆÊ b)¤³îFUü|Ç©ïGP~µ¢6…9{N“ŽΪ;bR + SM šõØaIËNéñÂ{‰>†[jAÖ©b*¯@µGá‰ã]ê/¦ OÁš£Þt\ØøðØ'níê8J2΃J¿ÃŒž²'Ò~gJÀ<­`žDšL]Ü…»yãƘz§ä–p¿Ê•øÄ&p4„bÄî5€¨ˆ~‡z§Å +¹âI‡5Ú2Õ]ËËI@α»{Žƒ„=hAˆ<…‡`4á®»+ó˜pä3‰ *•]k¿Á‡“º-#.õô0ɼ@5¥L–Z:a ¦‚(²#ðœèåè_'1ÒÙ.xn¦•Hm£íïGÙ†Ò1zD¸g³W§ Ýõq ¡¸T,ˆ)IGrUËIõ6NAWýDôü>;Bü¼~’„ÌAv-Ò`Ÿµ•û†sB–§N“1pÛO ‹TÓµãÊI„©»Ï1–Hvù4ËÆÆ7Xíjd¸p7‚ê§Qúrã|{XRU&u|'uP43ecâþ±qÔ4¼P=Ï£ ý©áܳJ^”ÿ/eY0uÞ€ùÊæ–Ì^Ÿz÷!RMй!±Ê9¢ ‰Hbò¾DR‰a‘ a ;Ù06áb&Í +V>üÐ…UÐ +»Å¨¼0<„C>ôm>\ÚQàJ©HDƒ­ ªßšB&–D!"´{&¬€Ç¤ò‘II—ÌÞ™IÙOL=4&˜0IÕSýÝþ,(G^¬ÁçT_@bk™©ˆ +yí`·Î"®²§Çg€ï¦éÖuäLÍnmAÆs£åÖ†vàÌ0›U„Tñ`y°örÙ´©«–iªŒ +¤}JZM4Ééë&Ä¥%Šò£¹ñs^€‡áÖñK t7„ã8ÅzLÉL³¤4?Bâ¦Vc2*î„è]¢ Õ½Ń—ìÍ«]Iúíß”MÕä+ÓKu-$#ºë§u#©k™}Ž¯g#¤¤Ú§£eD´)½á9þEáÜk¶‚u<šLoäIõ†ê„@{hô!90êÝhcÈZ¹2ˆ¤+~’jIæ\Úe›A槔‚ÿ qèaJ—ƒ|¿ùÐ’‹e[ÎÓt©¼äRˆ@ƒá"?4&èÁTÚ +ÂS,P‡6¿kS +õT³Ñ@\fñîCÔsѬr«YñG­b°‚°Ó•,9½‡à †ð#‰•|ï”|å<Ÿq²­õ¡u=ÍqÂÓd»y@ǬUþ”¢ô òçyÒ§MzùEìëpL &ðG8È×iõ§¾¡Pz†:ÆÆø‰xeù ÊXÉkgkñÎþå½Híe~ðÎ,¿¶OçM²°š°¥[Ì]§à˜‚†²´ÒóϹÉç .5;ßæÐûh–Ê'_ršÜtB&žWïÛ#2ٔ؂„O(ù¡R°©é6ä^C°Gº5©0  }!çÎmĸ5bÐs§4”B#LYeLF ¹n*€/ì¸dM6k]KðÃZ"VÀ\Àž,H¶lQûµÖçH¥yü%v¿Ç8^<´Þãà¤Uic‹ÅPü=M¼œXñ|ÁdØ÷@±ïÍŒ}?‰»òÝa~QLSù”~Jq†È.éÙ +àçý¹ŽM…—4 žûz÷tžXŽg½sǪ–ƒ6Äy¤Ó; ‚ͭײÃLŸ²ì—%FÝiæ}¢JÌ¡"!8º¤½ž§àÒ©]׶8Ý7ˆí±ö B|o°QÝÇ00"à.Ô.Îç?¡EwÊ~d +0÷{çú3~ô¡²0¸•ý›¬z~¾ÿX’èÚÍ¥Ìñ}2¸4êN!JJ»Úuã‹ò3¢”„‰+šaÖq^H*ŒOÔ)—Xõ›jÒDD…» ð%U†ï½Ö§ÖXòêòÒi¦·LˆÊ,»dÞ!F)ƒ@”Ï,&ëêwÆžÒKÊNÑ‚]X=ÎÔ!øÎû KøQM`ÛIÕúCtè Ñâ^‰ƒL«–¯*†( U½_pEþ²›…)Šq¨­53 ŸÔ´×ꂇFäûèˆq‡…,KÔ€d­4 ¡6x[gš/- âk%¬Â>1°*ºÍòOmj’(h¦£ZCóIuöÏøÊ($wUÆmF4w@M‰#Bœ½B³¿7`(¾(ÓÐ(ƒ™tŠâN_>à0þÐ=þØîîCÃiã+Äûü(àn~˜×rWhH¦ÙVn.ŸÏÏÙçïkÚZsWô4Z 90›»u¤òߧGwb=¡6/ýBXPß/ÚÕH ‰~úã[{•+’Ù­Jb*ÝH¿¢_‡ªôµñ8ã¸Ûѯ÷+hí QãWE*Wahê8˜%Dp©pzȬ«"‹÷ù§ãwh ˆvs–×Ó\¡î–AÉ™r0nò°E”÷30VÁÂÜ”ôàþt‰CÉg)ɵŽ¸+üC!ãTY€)â¸î˜0ÍlY®»™¬tŵþŠ’tw»¡»ÒÝ)ÀúO†˜š{ÚD- å´è³ZÎDÉú`2_0PbØ—ÕtJ!J¥2ÉìB‘ +·ï ”Î~Õê`Ú +Ë +8D6w“†SÄŠdõMä×îaKt.€3þ±'Œ yÑÔ¹§ï£‘ + ýƒ+L‡°›üktèV_‚!kCV|Axœyš}*äpÿuô‹Ë¤&jDð™V/9†—å ¡ÿaXRw?ݧSÃkç Û² §•@¾Jôc©ƒM|yOÏIºjÀµ“LH1-ŠU %°ì,<~,ó êgL›Ø;}ÊÁ {¢×FIR¸ïCá¢"·1DU ·’)ð„õ2áj€’DãRÊÊ\Ì^ B\öÐÊ^!mbç^ÃØ3™•!Ûû5öC«“QᢜX彋õšßTȬªª˜÷àטØBŽÌÀ„a2id⧷ú!âmY¦ý¡Mˆú¨†[Úô>·—»‰'ÑB~y dtÀÛuÎ<‡ÖTÞNª›º|’ôfš…X¼ÞJÁõWŽðhp ÿ‡&eܸéêhÙ¦ðȨgÈåÕs2ÂJãmlc—{!ŒX´· +)y¥=vGOÚK¢^E¦âuK±D4®,5½ÍÊPx3\`Ü%uò,iø6ez컣aÍÅ €¡Ã÷ jý#@^Þ¸„Œ¡#šz“‰ì­Úv•£F¼àäúr+W¢šeH7./'ùÈîÊÖjAãÒmà@FxïGW=Ñý¡2w~¼î¿h{÷iõìF°JûbßÜŸVKÝ9O±V6o+˜Z”ûÜL0ôצìýJ Ó…af„t‡À°M åÒï!G³€7¯ë‘¡ô‘J)!²} "1÷Ù—§Ö{ “|»Äµ.Îö§ø[IK(F@,‚R÷Ã<*›bãì/{ëz±.s‰çªk†‚,á?GΛ EžØ•èŽXØa½á†(MrŠv{<¹Â7Ca?¿S¿J±ˆJÞ¢¾»Iš¼îÉû,Íéo?ÕS0¤HÏ*L°´Q£y·k¢;ÔÛ…?b7Vײ¶Ð²›Êdcå +” •M4è9µtS8EÒž¾äÓ÷9h°”­‡Èµa®#okµ|Ö¨3ЂDüÍE¸_ÿ¡ˆÖk¸öAÙeþs†’y5ðƒ¸V¹RÞâ + ܾ6ýr’zsQb¥g@·Á yCª¥…´C"Ñt„êpv}¸g"¡Ū!WÒ¡Ôi›‹¢\1p!³ˆ2m&.hx^=X‹ Öµíšy¸jäC¤|fêúø˜ìø ц^ð›¬AU4 …gçJŸÁÈÓý¶¿hSÚH¼ü{P®Ü«Ý»ÉИ]i.«3»ÿt1¦[Õ¼ç¨h¹Ú]ðn”3¥‡‰Bò0YtÍt—ôRùãF€2l)HÕ±¯³?¾B~›´~R°§´ÏÔ‘·õ[Æ$sI„·¡“³™wlCËf.ëÚT¶} +p3¬‡ˆ>0Ô‹’i0 +†‰qbàµß KŒ’Mb™q(I6Æ<¸ýz™fp1Ì IϦpGã*f/%ÈÔÖø*‰º$ Z/×ÁHòqvØ" xÍÕ5=Õ›Ÿíð´ýmÿ+ªÖF´2#*´H¡Ñ*1 +»ÛÓð…!E†5˲3ÖŒBgW,p@Oh…8¦»fò`b‹NsYÙ¾OâE4³$I5úÈKê<ˆþE²üÌ“^ð}ZΚôÉç,j;΢Ú7‹Þ6‹îÛÉzbÒ «‰ %sÐÏk2¾»ÿƒòX6§ôad:ÖKŽ[ÙvVè”Û§aZ)³c®3Û1ÑvŠ”1T5dGÅûù‘#¼YGcO„ýQy52[ƒ€ Õš|H¦ƒ^ûbh(Öo-'»$c¾ZåZ¾ûiCã}Ê€Ú(½è¹±øåÓ“©D?Å~PAóE_àÎî|÷G¤#„Ú ºƒÌ¤ã;µohúH,¼/‰;!?nc!áôÀ)÷0µ›ÓªwLn)V\ÿ½³R +$˜œ{£È½‰½†_Ì”þYgšò”ìÄyPÞ¸zµ\«v’C鉸ÚÃ?zº˜„@(ñ\ ©¤G +2¬‘! +´B AMiÆéŽP {-²Û±2zÐã¬àÊ'¼¦æ¶ÅÔm Њ àãAÿsqÎöPŽ þ)ðáÐÎ6kZçÙ¿ †àE¯ž‘äs˜¢3ÂþþÀÓ +˜°‘1 %˜l$ÙC,G)2.û¬ëÂnàËm’â";@÷WØ ‘ <] ' a„¹4ܧÓùù×# êQïéÑn0•Ž +›‹B„"þé`ÓžÚî†L:¦tô‡ðY!±Ô‡SîÛ²UÞ2`2¤N‘ÑÊÆÇÇÊä]©ý†cò§2Un ¯*0<&3t43´{fhái†ŠÇ$Q]3|:Ldz÷ƒÂ úÓ2CTcöÀáé@X,ë÷°«‡hEÂõ÷x—´«^&Ë~̲/„„ªpG„«×译û¶G÷’O"Š_NÐå‹åÙ A+il8Eó)Š$2äOcPÅykîE˜ºÍ›5äDzà!†6‚;áÝÀB·š\w(ÇøzLÉħ¼*„Bæ“p‰ë¢—-‰¸Ck³ëÍ`AúmuúÄÞÆ+¨%‘b +ŸȀÆ1\^'¼ÆÚg¦Ng„ùÔh v(Ë8Äw’!óŸö-›»e9rœn±@ILãÀ¥_&ÃW) ùgߤ öG¦+e–leTú#9D`SBu¬ÙhNüÊþàœxØ ï}<¯ü¿}jä/(r +¥Í‡ŸIèOšþj…¶_eâ¾.óÓ‘!(ü"‡}§Ë|Y¯/6­™¡_äâß?-Éêók´ƒ]jl ¤¾aŸ.|3T'öp Z…ÐÁ°×Hâ¼Ñ´}¸ÂzARÒ‘,ÑúÀÝkåŸYŸS š~Ÿ{„;ãV表ˆÓ -À.ØÍ¢˜ÈÚR=NnÁªgŒHJ§“yÀ]s©cÅköÏÔMhª¦ÿáåÝ׈šŠïîƒC·æ +ØÌ¥íٓƯ'¥EI2I·(0ÐÜ—Eèo“l¦óÀò0` *‰ÙuvÏÈ$Âh«gd»lЛuóÒ3¶HkàÓõY ñ¦3¹‘lööænX€-2Ûö™\4Ì3ix–ü!ûè+¨X‚§k0R»NxËj­Œ‘¡¾x|žÄ_Jx?é?€0cÎÙ‡2x÷“}tbŸÈ„‘®0v¯>×'“8ê[;ƒ' ÒpUi¹­;t‰{Äñ|PÃkxyÖ|Àc>¸–Ÿä!©Ïˆ¦• ã’[ñ<÷BX…dß$CêTÐ]ìL§hˆ$xcµÇÏ%Vi6ÁÞý ñiæ1ÝØ»C#¸ÔíöGƒ…S̾‘Øâ6†@Sû˜ìeð?ïF +Fs{¹Ä#3H້ç7)ÕšÀ8ihËå¯ð]{6Û»•œ`ÙD¡Fdf¾¶÷ßÔØÿ=.'3¶;Ý=kQÜz·j„™´\áÁ6 ZtFasrù϶KÞ%W1UJÇÌÕÔ¶v”[òˆãpÿ¯¼t,Ö^ Ô¾ÛŒ2+‘Ì´Ždʜƹ¸9™øßÂÑ’®€"†U\(ÆžÃeà±ÖÉœ«œìS'?'_¯î«ÒÁÄV÷½ÊW…\0@’W ù P]Á®p0q/Zâ‹e™O^)àsјJ’$P9nî@…ø Ô˜ñ€v>ÅÅ^‹AÕ ˜;n}“j)Õ£·-Ï +™æÛbÇÜÈVD¦&Õô\ûvCbwþøpµCsµ”œ"°<Ò­½í ¼“ Ó@¢IÒ.¢Aˆ!àBJN•L¿R0+ˆ›µA&_ôß‹§oõ¨X¹ÀWB<§’»}@+Kê°HŠ=’ä6ò¤”7|õVO½Ù@»ç¸dÓ§ù=”Ò+ØZ´![Ö´àNú$2”$Œ6Pß<Š„`G#ÜD'_s¦.f ŸIÕçO¨ sÇËPyIÝøtqŒ +‰à2E 6àc~S6Y2/²Þ+›Ìèˆæ¡ãó +^2\…Ê Lèâ1ÀKïæ"±ûÖô?§ÑáSù5D:0“Îߦ#ÇÓ™ÛA-E/xsÄO†£d×CÅ>‡±¨ /3¾!¦;!^8ˆª±+±{ÛqJ®Í»+p·h%d?ã’= åC¦WÇäœ)£„I%‡c\‚Œ€)â‘#FŠÆNlØö>˜è!óäù#±‰áNŵT1Ê<èkœÑYÍÅøy]ÃŽ>º CB¶^p6#š[ £*ãJ.Â(h÷Œ¼@?Q¾¥v‰÷÷ˆ>UxbÞUiØȸ[øïu”+bܸ*Ûæ4ºk?%a‘)Žc§H7"FYÑxKŒx)G0·pͲž‘Ö9}åë=¤h=£ÍQuìNIl@Œ˜sšäÄŠM-¯ªªð­ð ДCU/¡ÞÞ‚%lŒÐwd&Õ[–¾ÍŠëxÿ k"™Ü„›‰Ð›˜†‰Öc%¥›0H+ê¤pbp0$îåw̃öê +²Ãé•&}U_œ­/U”l"X1 HoŠÍ‡e ¨Ü´˜¿;ð™ÂµîØtŽ¾Ó}‘Ãö±ÆŸdì²­0—\‚'á~-] ·è‰àE\F9@R.J’ûcÏá–"Yùötr¼¹Gî›L¼ôr)2@— .’5Ò‘Q Ù{b^!/æ$vÚ :é‘2äaì›È*á_•ò'îÿÍè º%@ÐD˜þ5¾Ý𗥌Åó!À@KöáéRj'áÎþG£@ž·kkiRyQMS„\¥IÌÛ[ôø¿æ î3<=ÿÚ.ÜÁæËO1ù :Ê +øê"xa{o‹K-<Ü"ï˜'Fuä)Sè”\ï_‚œ˜hÆfCÓAf «ŒœÒ¿†}¦â{0ÌnÐ=@ÖáCyq§DåŽdð!ª#?øŽß$2e ®•ÀEDyË™HSíŽ5ó¢½aŒ*÷¼ár™xa™È¶ lH¾ÎÝë¾y+êN‹<''ˆś»Ra›x¹ß- ‘puÏë%ñž4Â[Ï']'¨Ææ§vU‰Áír#Ál6´Lª‰P‚m¢%òv/¾dˆŠ=¨;4¨ŠŠÒ ŸúTd…d³ð#ú ƒ,Æ‹PÑàâäÙ5™5ÃÓQªÂöP±¸S;f©PÝ:Ð’C¡â%5ó$o¾²-ó„ðPy"ÎÖCÔ}Ê’Ÿ¦]úiÊø)Þû”¿}:"¬Ÿ~«#¸ r@p±½á*¸« œjÅïè‚“ið¢ý,{ÎPps n±ø(íCu6à±À[~£®á­¬zrWjŒo§ŽRY¢÷­éõ;u]YcÞœ"Ь(9Z6‡ »º*ïJÖ·èY–á—éÄÐÍnõ‡ ”,$×zñ6eÒ£ U®7ܱ“oÎ'ìŽK¶ˆX`S¥"Ú1’¢æÜÒTºVSÔ»Þ¢”ïýèÔN·ÿì— Dâw­„‘U¤ _Ù†D*Cy. +’ÐƸ*µZ„>€Ì +DnEl>ä @ËÝk +”é•F…’;|Mp 9ÈîN$W¥Ì›™“œ9S—‘9rÀ,{d¶ú=çÉŸ2‘¼cc’šéëCÎ>´ûÝ':HMÑ›5ñ‡íwGÍÀ³V™½‰Š² ÙÆAý0 Gc7W£?¢2aÇ ÑDµ× ºzÃ-ø:Ñ€6§ùß´"tâÙm Y¡;ƒ¢õ®‚`:2Ä_®BÝzAþÓ®­Fk£á™Ã”ãe…5(€€6w˜œ¥AgejT!ÌLçÆ× ªµçç(nLÿ­Mc oÉ!y$$…´ZHrlyé i3`³-Óµ°Lm¬<¢ÊË‘ÃaGTEexôØ{BRHâ(Py’ï1]³$»`L†mcÀÓ½?é› j]¨%uë(âû`˜K·ŒîKœ@„¡ÁÁˆiåÛ|nó;9»èÆƨ^2Ã2.!³QôÕµâU4áB3×Z¥ŸCðTsûzˆ¨?é+VWݤ}Òíc_wÑÜCµ=³KGí›]Ö±9¢ËlÌ^œ·ÒZ!‚ #õ HËZMRJÖ¢2[²OŸº)§Ng:‡-9G`8P1Üÿs’r•úá-ÚÀŒ™làWÒmU6¡,Õôõ›š(¹:Ê:ôÏD–Ñ?S™J•¼çÇ8»Æ–²SñQȧâ61 4ûsdóâFhÁÅyÈ1©¡MíÙD¾ùL÷àã‚Sòç`´ÚIÜa,`ÉWøXÖ?+”AÃl‚Æ#Iô»Ð$è +¿-_öB½(C’~킧7• ~Ó”^æœ ÷,¸ÊZÞŒ2â\¦JÞŒÓ!gÙèêÕeª]nœà¬úÂ2|ßA'-íßꊋ$绀zvJíNs¹,]øKƒ¶eÍŸzÇlµÈ↙ûõ¹E:LÙœne™†…ó,ÛD,doŠÞÒÍ\±@^OÞ4°·4ˆ +fÛ„™¦žœ— ñ_ïÓKªPÍ{ФRÜ = ö~¾ ´Ä ë€_Oþó–L¥Š@wä§(¡yM)¶ ±”ùòiADá›ß—£žô?c’\:ãëmÌC +FÃy%…ÇÉz¯K¯åŠWð^è(š¬;¤À¹®(°f/Á?teí{N–;À].´ks§¨s¿’Od^bàœú ˜]Þ~ÐñA>:-Óç-½€5óa]çñ6 (>Gþ¬ ]9Eìäe…Á´æ›4Å\ÇD6…¡ó ºÇñ³{l¸ ±¢¢qê” RN+ô + U¶$ ô'(£®W¡ß# YiJ òÙXBI8rß$(¸]/Õ Lêùfµ$¨L_YÑ”DÕ›Ù(^г˜®XxtÔŠ@^a‡¡²áKµú]!1Ù©VaË}W\8 Õ3ÅØ…ýUˆ¯—JØ £¿&ÊБï¹?Šgòò ”Å¿ÑÀ‰¨á«Lõü“óÏÎhÅ€ª£ñ$»_"ðÓ¢H¡¢wékº~Ú* éCr½5y_Ø5IØ$ÒTñ¾vdú‚s¿;í>h‡ƒqÊὶ¼HxØv Iœ á^†RC§´×'mû„9tM‡mÄŽZŸm_Êï¾HV$M¢Aa,fìó7Cb}!…RÄJHÃXðAÒý¸²¤q2]®F>NÑ°$ ðÎOÞtMb, §ë ‡;„*Wª ¾pŒUQ‡Œ2 ©¢ÚÝSⱑc?¥X(7ᄉÃR†Š `ØDë|ìdÌ¡3ìq&t8F̸Ê/‘ ‡OŠ°;‘"Ìñ2BHQ˜-‰©e[Q}¬Øâø-! âïAÃØ¥y`0ò[ˆä–ÿ6Gì ÊÜÏs[;NYd0mÚìý‚©c[O7îw?L©¬y Æž> Æ(vÕ—©ÇB<ñ Û¼[tðXWÇu‘ ŸÞã¯Ò‘uUÓá“梯iå-±¢Ï².¨k¡º5$ëÕ[ïØõ21‘c#§—[ážV¸Ä©½£¨U’x ºà(8oì éÜ”J£"|¤0Èd8´¢¯&vP†cU%Ä/ŽÙïkãŠl!ªö†Ã°þq;¢$Æ“Qß=Á ¡‘ §*b WfP…ŠQ[žÿZYYk¼þçTkXü€Â³B’— h'¼Ë½4-üfš××5Þ xaüÆÈô l«Ð‰"Žúõ’[*þ?_Ö8;ÇS•ðXaA¦d¸ö«Xnçpô‡´"¼’DŠHH7 +6ZGï,ÏÉ/Nž#Îeð"Qãæzÿu}ý‹³Y],EÇb®l7m}çô°2¥ÒÖµ´YñõÇ7έB7b{¿‚|þ»†À•\—iD*CFi#-fP8p…ÏÚ'±µ.z)…©Žy)8üAU@æ +oÜÂÁ½=šb®IN-ñ?†ýžá?¬¶›¸%”&Zcô”:?E0šþ¯r`DQÞ­²ˆ1Büa<Å«ñ— yˆ½oQU§ºÜ”&eDEÀf >ÁŸ“!¡É Yô“q6×ê*wsŸÍ"‚&eÔÒ⮵X Žð&<åìôeï²OžÝ_vñ—øQž !ç’ˆ?a€Ú½=#Zb™Cä¸6Ä«ÓýŸ}s!ò÷˜ ¼FÄ ˜çù‡?÷•z„âàÁÜÙ bD÷ÖÄõÑÏã‚¡!ž‡¯€ýâBaÌÇ\/N¯×%xëö²ÀÖÖºÜÉÐZ÷øt÷×qÐ{Ë=že½šç`¹h‘Ÿª•©rP›ý(Z\oyòQ® Ú ø8nžy{\ÜD'Ÿø:˜q¦Þ¡¡R…«ÞžYÆt;XŒØþ·î~K9 tf~ÌÉßèý`¼pŠ÷n,Ø ”¤š\ìDªé¦FP@"1$oõÑIçÀÚÖDIÔÀ-£p£dÝ>ú“Η›É ä—ô¹J…u‹ °.ÒQÕÜ Õt=єԎǖ– bDm5.UX3ùjŠ9Jq;Ò~ùÊ6¥Î¤Ïô_êNàn ¶D~;,pä õä«È(€0-A¬c86šIeñû‘¿Õ(j—›ŒW;ý€9”~•·>#׬É.z¤öÃé †¾|ÅOõÛZàÈ€cMIt:‘ŽR¨Nyíâ3äÛ¸­´'Ð’[Ø +ºœ€Ròóþ°»¤‡Ql³MVV,ji{ Dhsg3ûxÐ @N¥…¬Q…’ùª†/vb xQa²Åp.?äûe0—F¶oK , +zS×­#­²vÍY’¬™ëÕäy{A¢NžøAeiÀ{;+Ñ2œg¯&þøDuÎ^ØçÌs%ŒC¶öÅrHš׌ðþCKSlƒÚ¼*NfU]‘Æn¯¬0ÆNªË†ÃŠÌI•Ñ÷\õaÓXoN~1ZW(¥l°í]ê4|óHtrl?)¶ƒ¿Ak|ïo`Ü 4[QCæ€öl¬ã‹ìvÄZkÍÁÕCÏ©e¬+Ø<ðk RŸŽþcë–‰Â0DСºÇê¸] мÆn!ù~Çzsçu«=o¶/d>/Ø×±üí؆è+ü¼sDB9}øHø&°p2 n¨_²gõÂù¼¨/¿ÎÑÄć_pJ5ªGɺÀ V1?DO/*¶E ƨ+dT½#Ël¤¢.R=/ göt¤Îp`¸åbÝ©ÉC+Q6´rÛp°hÄŠî<³Uà)ÎnœsLnôV“ò#Ç_È|«í¢Ž#~XwŸªpè(ä2ÏZ¦ÚËcGM.óŽ™jì¥Jv(u9Ô*GÊT%ª<Ÿ½8¨JÍ@Õéô©… óÖ@xö«Ûý*N‰Ûv…¨W€¿‚ºqÕhY-1 ™Ä +GP<¯©ˆ²Ã/(h‰kí +´ªç3*®t±ŒS?Áƒ „$$ŵëô“ÄA•³zÎQQÝ„ ªt-†ˆPYc©AY“-Ì’Q r¦JR¡Å)’70S”©²<:…hN¹0wW"AåDƱ~ãc¤u£Ø«Œ´Î¦ao´á2å-R…©AQû¾-À•ô¯õc®)©ËOÈm¡²çÊ‘"pÀctCAÇZ‘Di2à eQ<ÙÌ›D¦´ahžI~òéÀ@Ép È4E¿ÜBWs AŒÕR‘9AðŽÄ\+Ä5pS©BÃEqUºàÜ퇯ÂiåõppÉ8ºäºYK=v¶¹n}ˆP÷™&Ù«“_&iƒ»‚âvï¢jw!âeÓÇ¡±N +†˜ÂÁðD×{ž¦ãþšÊðx€C0eqïAêzd¢ÑÕKÇË}–Òâ¾Ér–u<;o ¦Î9Ž2`&Eµ‚“{¹5‡ ‘.%Æ6êä«VMà &¨±¡!–€I[¾FàÙGgân¸>}îšKwÊ"ß5`Wf6Mgå­Èæ_î!FÉÞ‚pý³ …ËzD1ˆÞlÂ%É~,U¨q/dä èÃ$+†S¾ dq üCL·&c>¶ #”lûÐNÂ{{®RSŒpNãƺàر½‡eŒÑe~ú×)N»C—;]ø»¬‚p» +÷B$‡V˜¾dT{÷-0¹ÐÈ:‰€×g1aî ól˜" !"ß)ˆÏÞöñð!qsH»Õ*«:}>Vï*A§æÎ?!sVe±²G1‹‚ä£Ê¾)z<-%—±F@ˆ1D-Ôƒ\&Tþà#/xÆ`º;êz0ÌÈ'xbɲ±9vj0ƒ*“šI¦Eþ@ëÂÉz8\ø*A"4¾ÌÍû"mÂ󱟌Øc#Æ&3ä +¼9*dôš2Ktvû3EG7 üæÿ?±é¥݀ǣ_Œ +VZ¸ 7z7¤Úð«1_˜¼G7䯟õÚ0Ý+LU%úhZíÏÝ“GÕ‡°¬¾QðXKkSDÖÇ¥ºŽv@Ž$PÃÖjR[*µ22³ÉhS`¥Tp÷Ö6U 쌶Xœb¾ÍŽÄà…µ'@_¾U`Q#QØßÞ 5º9Òkp—3ã••M«=¢êtÿ&p wí]ÿւ‹ñ+(aIß;žzê¾Ô.)™¬½_àKƒšçè*50œF29R0ÍàšÕ‰åib)93µ:%óã:‰‚ä,LÍȬKKîë™!Â.«35Å.•ù²¾¨×»Ä^uÏ»—pGü 1.·8–de²‘àš·Ù @¯Z’™“¬ŸüñòMÕÓ£L ’ÒÜUa&Ý)¦c>’/ñIZ·h5¥¦ +{Ë?CÁ`˜”–†Û.Pzz¿&} ùwðßúð¢/{sLµwŒöâà5°üªZÇvè‹_ÈÜ€TV ÂIS4êÈà<á‘_îY³ZûÛ”Ë)3̶¯<ûrQ™1l?ï#5Œ`Sëï©}ôš"š¥AÔ<×ñS™ÊKÏÿ7¿)Ú\@àY¥© ›C¹‘”¡ìEzŸ7jßäÓ‡o²’]ÿ”ð»[ ,‹ô%9G±êñj$Í +Š’ë-<.¿<¤åƒçò<Ç!Qã˜* ˆFgý+^̈ +À!ìLíKíú~KK‰T¸^ƒ¸/=p»úúÒlB›³ÿËü"cPv™ú6­¿×'ö¯}½Ðy?7IÛ§Šö•G¾¢IF™xÑ;oÒº­’¶ÞgÝ›KäE“Ø?/ó¯¶A±=!œ{|Å&×ÙzÂ_aœÝ´Ú2‹™–öx»77>YrT[=šövIµÜŸ¥Ž™=0,{ç$§*†˜òÖW«F°ezÓìô S»KðD'Çp´58÷SìèëWÓó‡k©t°eôññMÞ^²#4µM5ÂN/Øg_Þ^¿ÇÏ!yI|$ðàßl6ýfÒåoúÍÞqõf¥Dß LÅîùÁj.» KTV’!š£Ç  Ì<=€`t×û5YDªóïÔuŽ d1)l sÁÔšÞ3˜Å2A’ä´mœŸ…ªŸ‹#ü£=þ­É„¤H¯Í¸×Ö®éöŒ™­wSÜ»ÚX—ïý&ˆúŒê[™À¦OÕ“†íøÁ˜öÿ¶¨6Ç]†û2󇛄ÜúóÙPàýã9¿“;…øXG:>ay«_ÌîG‘eþ{з¼ú[Ÿ¦ù,Ù¨`­ÿûø$:ö¬NÿùÇ@,Èj’V,àpJRl¢!î²ûD¢;…÷@BgU‡e­m¶| WvïL6…[˜a^ˆ16Á=ÍgÌçÑ^›öÒYìè8ã­\F©ËÛÒ<>Ÿ«üïäx´¬†dŽ tBvV<²¦…Ôú=]#TB2æÝ8~i‚yx²FãËË%åÉ>¢Ô[šü +רµ+Ë‹¤O˜wª¦¸uú-\÷£«PÑÑßБ/±ÑaÔ<»}fæžfKÚÑm2ö›¢Ô ùì$ty·]©Òçâ¯E¢Má[—Ò!£™$Ñ°NèþÜ‘sðXWbDb¦<ÑwÞâ¡#f‘§ªè°F”28YÒ>ï$ñÀ»Q®d±::¿$™JìÃIi¾$Lû”J|t¹‚vÜTsI›ZIK—GHCwYW4“YÏ,ëñŽSÓ»Ûq,í6H#cy3¾†rÆ&ïf,¹’ƒÆ,0p¤1ÑOˆšaãªH`9 D"âF™w‰2" F≉"f>áà1‰¢'ŒVY9§»)5v3Ž ’]ïænzfùÿ°S¦fù˜Ù±ÕTSÕê+g)FÑ*¦"êá‹Ò-Ã3è>xÅŒ,òs'RÑìïÝØé3,›²e'KF¼Kc˜öeÍn|sG×.‰nÔ1ª6›UHÇ1Îè;(“065tÊÂá ºÂö H<ÜP¼X8u÷®°3ˆ4ä¤9LœÚôu•feüoœ»)D§‘ÒWV¼Ïaë3ƒî¬ }rgÈCG•xUó4vl*‰m=ñ¯´“ÊŠ:µý¨9ÏÒ\UÍø²ðeL 2‘ˆxˆ¤!zp©D5UN›‹Ù"™û¶e–9̦ˆÞ^ª2#ý&Ÿ8èæ]§˜-zf¢Ñ}2ª2&p#¡x€j™qxœ,3Ð@E>$=@ µìðŠâFâÌ8Ì' ²aL ³JØ‘†Â¸qw¦£ÒAD(Æ!MO¼‡LÜVvVU#b ìBáð'–!"Õª"aŠHLˆ€ˆ€p€0 ‡3ã ΡȇÄC‚Ü´*l8DÜp¨‡l lăða(B‘ñ„0¬Fü@Š+E„= ‘uäQ…]…7™dôÏP^ó34e3t¥—/ËfSr¶òh3“iKKßüiiÆ«›Žl^”ä<ªé³®ýYïž™£yö4…EGTy,EøêÎ €Q[ÂtWHã³ß[ÝÉŒeOÛü·åó.éXY%_様¾ãÜ›I÷ú^ÎVÄ~(©&SYÊ«ºÌUù÷æWˆ‰ÖšåÎàT0‘ÈøR«~”‡ƒxWWEVÕ4tÓ €Ñ‘ü%ëäD„âááá!1q‰ŒÀNÄC=@DxŽ‰¼3;*c’ÓìJ}õ¯3Óžþ#šM•Z#sÄÁÂæŒÜ2›*ºû¨Ee"²Ò¤ÇLXÜU꣯¤<$¶0™œçØ@$mÖh×±!)'%—7¬\^_‹é •Ø¢h7%‰vB‡9õNˆ.-3ZÒhtr¶_2/Ö™#³Xû§‰Ù®NÿIÚ°ä¦C3¦üÒPdm}³‘_’ÐW)Ã"»KÈ“B™Ø`2`,Ž^¯)è÷)d}¥‹Ã'ËäYÎdž#©,3=64%¹üíI¥‹liÒ™”–ó¨ÌšËä¤Çåá úʯŸd&U^trÓå{2yDI³¥L±Rðu7N¹óeì´l +áÅjDR§+wuTy§¸¾ŠOÆ?±ÁN?Ÿ‘ܵÀx€ÁPT!óLfaûɘk¤†âL—ºt•P+pæ=Ý`døOèî†Ã$- å:Õ`,påžj8 -dS ǯ#xUÇY–¤Q8j6© FƒË'à,Ý`P8È\’Wu˜äM5 hp BâC Ž1ÇL% ƒ‡ +,Á!‚ +F8püh)'³ÅEND)Ê¿Æä´¨o³Ù7ó'-ærÝ,ÖÛ?“.ÎÈäµ»e\×’%[‰5k¼$™$%%uîÉؤ:³É¥=¡ZÓ®ä͹„ªÍ +GìlYlº¤õSb•c&h«3AÂÒH6˜™Y<ô¤^“&G,‹NRétŠÓélV eë‘D²©TæÓdŽØ»ƒÆâ&‹Ÿºœr +–êî¼W‡Äj9cßm¬X‘/K,k(½úÝc$-]‘I +óJ°°ŽJ°¢#©ƒÆ‚~)¾-SÆ êQ£“¼ãêß}&yG©*§ƒÒ#y‡ÕøŒFÒŽßQ²uCrÇ]+¶¸›¤Ë©£ãÅÛPŠ¬øn’º£0Åt¸$=ëHJÙqi&„ +— ^ÙA? ÖÍŒåõy$­£\“`5Bäbt°ÀX˜¹Ä‡tWý‹Õ)ÅŠžªƒ†‚S¬­‘+²¸,06YâC“ÉûrYO,›trWÕ´K»:¹«¢ú?HbyT&}dÒőɤ”ÊE]MՋ|OψÕý™t÷!}&©B”5$22|«ð4t¼ž Y=+|劎#<™ö{ê~X–ÝD@JÙLÁE¥¯tϳÅFTU,½)õ㮯¹ÛÓrF¡XzðWA#Â@â  Œ +8X`Hà@ó‰ï„YÉÉ4ÉÄKú8)YyÛ]—üß|•üå¬6íg¡ ÑhôMп¦ šŒ¶Ïgr¾œ§PÎv¾’óù.ó•ß‘r¡Ø?:ÏØ”mµÑ>6ؾôõíBΔÖdžñÕ™L¾ÇÎ,Þ>BÏ!? …Ü#©NL¾I¦Æ˸Êß©Œkü©¾Ê~ÿþTòés*?UŽïOGåL$y?5ŸÊlM¦²·³›j&vŒ²÷A—‘±¿& –æa½ë_¹þ¨\8שëñb-ZÍú)£ëoÍÃë§NV²¾¾»T"µÕ"±š”Â4™*:©NT_UKu +Éö½ZT=6\ÕÅ÷eË+–NëÊ”QÚ¶?™Ê²ûRæµ}Û²›è„ú©¦ÇB­M½ezô§;-›L-å·OóéôÓNQeúËÔ´Šþ+Ûá)‡J–“}SErºÑÈi*ë—;::YZÕècFdž©"KѳRôC¬•]™LGš˜nª)Ñ$¦¬LeKJÇ‘UJ¯#)LR"J)5]Õ}·æu×±[êWu®ÕÜÓ‘š£xn&/ʧJ<¯ØÖ¹kjLuI¦fHZΑÎÏ°rPhþ;=qLhÞ,*Ä;rMèNh~ˆ?Ík„æI2sîúŸ¶û[J^Òý$o—X»lIþ$Y&?Þ#™Ï:lÞ$è”›û“àÕQžåžyË=+÷9É•FîžØ`Crï‘ÜÓJì¯]^õêÔSœø楟i}%>]áKü)[Útü™2ƒ÷=ôV6Ò'Õ¤Uú£‘Ò±S·$¡ãÇ(3Õâ¡Ð_H¯|Ò³EWUè)Ÿ¿%ÏJ;-O£Ü¯<ÍòäOó%}ôWÙË03S2§W&™yOËÍ^ í#Ø‘)«àè”ËWÉåÈeéòS9›®ä?K,Ý4ž˜™UOÌjÄ?–S$Ÿ#c‹Rª¬òYI&©¤’ßHŸwå¾ +ÅN£¤;ygÝ¥/ÍÊIœÙ¼±‰G’f“æwÞ’¾ e¾L²73H&”}eËÑ2I—lO&Ós¼ÇŽåf&Ç—ä8”8lnJ‰O[ˆG´•à³D‚g’ßå°&‰˜(é›\þ?rr%½N1ì¼ÞKÚ}îC(9Î1´±¤™Š!a^aöÌj~«k–SÒ6bæŠ1±ÁhΣ­Hn”Gƒ%¿kyYŽüžgyi•»,ù"Éü¼|ÞÏ?%X—$Á–Ü8‚t¢”;çUʹ,É&–äîìÈm2CtÅ®.Y±›Zìj¦øKɪÎV÷Or¤ç‘^ZKèêCò¢Ìá¯4«ñxd®²9uÌÛm²<}sÑ$;rK¤AI»rIf¥íd’–)yD$÷UY™Í.Iצrª’ôQmD0GI’5ÆLKÚ$‹²d*Ë°’w2U£‚œÔÈKÆ™2U63°“È}œAi½nd†—xºÁ²¾×#Eè™ t‰í.íÑ Tpψ Á1E $`ƒ ‹" àˆá , ‚Ø@ÈÁ 00!áÀ‰ \ˆ@‚ÌP ,,`˜ $X˜à 00A+H¸ (â €`!*`#sa + X¨€ &Hp,.H¸Ð€ *P¡‚ + €¡BëÀ1#Ç@¸€¡,\¸ð "˜`!¡Â.8P‚…&H`1‹ +&`¨€ €á (<¨Á d¸p¡‚ 8ÈÀñ T!‚ *4@ˆ`‚ +(lÀÀ!‚ À 0p@`X€pa‚„ˆ@  *½I•Ž`08ì`àaŒbq +Ä`qw÷‡H®Švœ•U§®j¸¸+@bP4“&dkBQ3Ÿ¡(TÛ#‚± ³èªreÛ|ó%vH=¹–×%wO¦vBTW”soÉ]NÔºdé´¥ñL§‰­iý§{ˆpªŒ­S‹ŽŒ_›)±fõcj‘.ÑñÐscÆÚ5Ž]†LÃÁ¢ÉK'Ù¼– +QzYÛnyæjLÊ®ëi=£ÙçRNÕ#2!"áHÛ.Ð-þÎ#%QžNtµ–µ>¡¶”mˆìf*Ó$\`Rµ¥j©%êÐtZêôyês¬G†”Du%I‰d“dó!õè(SHŽèèΪìb÷´:,BC9Â##£'ˆNeTtBU5eDFc&uu³\U#è7/ä¤2!«‚Ø+SµbÛ$Ö3qa_bÖ*c¥”]5+W¯FWG®’£«O®d²Íœ†54›³½ú®uÕäua®‡ªêÙ5ä_Õ\{ˆ`L(²™f^=TT8Bgꔜt„¾“£×‘%=ét¬]‰<¥’§-½¯IsÓ™…t'X7AÃ…YÚ˜L˜Fä)±)ÛeXU*VXËSæb^[™>3³=]Fxru¶²Êä’-sôÔgΟËùÝœ/QÐÐЈ5š‰m·²\ÎÖÈ[åV¬tS'š/o?[Ú6[ÌîGlH=™©›Ù™ý›Øn«J¦|[¼4Á"bö•<ù,Ùgf4˜Ÿ^f-ÛJt¹OWV•uLS=+^kÏ·æ\#Ó¥zõŠÕJ~8ˆÈÀ€ %ÒËXÁÑBÚÌ~ûimÛ[dvú¾ÔÅÛ·ìîßÖŒ“ô¾uJÕšµ³,¢%ï*‹Íl}ßz[Þwd\WÆoëäs!’÷µ1K(éB™í+m Éû¾­OƒÆ’[_¶.®j]Ò•¿Õª&Je>[“Íž˜3k)Ys’œdÖîÎØ{ïð%æ—J k®™[(5‰-lGÊ™b{ÇãHÙ>ìki̹Ôηµözͺ¯c×<]éî²õâê’ñ¯ØÛ ç~¸ÀÌÄ‹ÅbÛ¶c›bc¯“Òþ Û^óJ½¶P¦Øi‰dz"ãÁቇ—½îXW,ÛGXdÂó•ñlûi+ºu·V-u¢ÁñBÛ}wº|ÙTÞ%}ÌN`L¨ÈH48^/_5ô>³°dVÌ*RÙ÷œÄk=„20€T¤¨K><™l’:-Ëïòò—ù›ä)0È3«u>Ø‹±*X¤,f v<6“ñMƒ±LUj¬wylß¾‹=U—ûÓF0 ðB Ö¨,Ï,Ïí–½D±«X‹Áaž]ží/ÝŸySù`õð‡g”ó!f"&"’ÌqÐó¹Q[œÛõ‚4Ï1ÖÓ‚VicbO´'¡™IÎÐOº•!æþspf:èéÍSkHs‡äƒS”ÏÇ‚)¢ÃJ©„>iéXB÷6¡/Å„EfEcRd$FTi3‘#AIˆ>ž "¡ ‹&’êÑXµç""bñ©l¨5Šô,uŽˆ  ã` …ÂQÁ^Ç€\‚DÄXHÈ“GQE$#7ªÉ×ëµ{Š$þ¤Ëõc’ ÞÏ‹ì¦lô×µy‹Ãq³Fa/Ì—Ü6¯Ÿ/»÷ß,ñÕ¶à@(Í;K8¤Xlm n‚†±c ]N¹+s¨Žf•ÏQö4ú_gýyŽÚ{ùȸ/}|åN_ÓÝ+Kÿ ÇñZÌêgŒ-ç}€èu…}?|M=…»‰¯4úhõÙÒöü«UœÿÈ÷ø-¿²^\büQ`v yk¡TTŸ-u.žçî¾ÎB_éÑþŒÞbäÂÉ÷×3Èt)|W®RKVd_R.˜™7ôýå0@WL7/èz(ÔU:úßÛVz í?üêc‰Ü—«Ñ/šüȉ4šÂã®ï¯öÜaìá­¶Ògtû ?1vÙ]Tx¢)LíÕaÔo‹nšž÷÷·­8ÓQ 5‰3<ã±ôõú—žFIœ ¢¨#só¨ÛæžRœfÈÓEüS·"8wØàËàЪc/âFN5ÄoÀ•cñ.âÓx5+»8ŽÊ Ÿ™©ƒ|x`÷Ôá^w¨žQ£Tã¾ÀÖ4Ù€¹ZÍ`Z×ôÍr26’¼¬(‘|ÖÀë RÝm¥_Ýr;&$· Õ…u;¬ìϬéú(4f4 '…@¿á ÍU¿² qS¸Õ "HŽB$Æ€P”¥A¾åȳõ~ÔútgÍ4Y‡„X£ã>³,~Ÿ¼>«®Z-?”ß©bµ\´º–,Šlœ›©‡E) (ðŽªÓ›RÝjÆÀŽ.=2ÿ?'ç& I“xÇ„i¼¤D°ÇRx^jµ)ýû&Í2h‘ô*CêÆ&·Gk½+ňº×Œatbº™EÅ*"˜ð+b8KNBW°ÐQ}Ç”BÞ›þ¤Q@¡ðs[Í÷ óa»=˜ª9µ?IÆ£ÅîÎSìl”éIgú£tPíëxeªulSáA¡# €%¨MzÅü'©jO¹Œ¢KÒ³^ïø×PÊŠDÎtD2Ù VlÌæ-çkj–À3ϘÌÁé| bnìKÊÊ%5[ñ,ojKÒŸoY#Ri5[&‰É8,‚VK “ÁIÊ—#Ç1"û5Èðcÿ +V!É9Ny;V4~ñn Q°¼Xi,î˜I)MqNìŠHüF“¢MîáÊLXŠpÿ{w° ¥ØBúÂ-!:8î¾ KIp‡òÇΠ‡[ò cdŠ d©÷C3ùQcí[èc+“ÓÓ*õ¬Øj£¬‘Òóeóˆyìï +Oîm¸wú#Ê.F­ קûG¢»oœnÕ0. …d)¹X'dM|8Aœ¯,ÁùBß ón,^¹™à˜:›¨¶ÃÖȼáÙS†}Œ–ŽTÕŽm³±˜æÔF{oA»*žÝJÏ6Ã,f83âD¬l²óŒC +{½oò²+qDÌÑTK f=,`¿Ÿlù${=^¨¸.3˜ &þApQ׶àpµ°-Xk À" o¬+zÐZµ:²•ýª#TÕ”ê†ÎOé—¢$iJSê¿Cßq¾ (JiµøYa&Qs–†«Øä§Ë¢Ól»éžiz 2c"½¤–QR>K'xJæ4_ÑóAÉøˆqôÐŒXmº1hÃ-*³lG³0"fv(Åê)§Ì gBtèå™0!àD Ö’„ìÏ$á'ÕLõž1nÕž ‚Çzj¡xn@wJ¾ÎmªÓž¡l3Ç,³¡EÞlê6\96cT\¬é§ÀÐpÎ çS…q'ˆ÷bô8A˃ Ó÷²|ýAâ2æO ùZìl +$î¹\Gì³+£˜iHˆŠ¬Â¥ôNË(:D6P:Žv’ú\ä5$ÃJâ:è’H$eaEâ©~*$± +Ò=ÿ£!}ôƒz´óUèÄPGÇäà]ÿCßXÆÙXNj”…g|÷¶!&CCÄ(ξXV.Ê-öéŠZŠ›E½<ñeÞc¿¿í e8‚!–0"^7ðCòð›±"L·‡Â¦’µ¸ ‰BÃ7r]jvAV ‘¹1¯&¸A³àB8D¸ñ(o`O Kîf¦:9"Æ_Õvª:ˆ `qb>tD-‚m€¸ ˆ”ôþ³Ij@‚øÌäëS ³0ß'üÿV¢ßžîÖ/¬‚Ãè‡`^­rlü"Oòî$Ðv×û W?_[œÌYmÞÄ.`9H 9 ‹Û]xÿ¸UþR¨Bc³î¸W¨6£fúQeËñ×±¥µˆtñ¢j§Â§-¡n(½°æìQRᬧO A¡žAÉÂH¦_fF*W†’™ßH‡)^Û-DÒëÕöWÆxóczƒ=ÄÙ'EÅÈ—ÕØ¿ð¼GîJìèRó¸b‘·{4¶Yž–âÎ#^—ý-$›ä`²bôŒ…ÛµÇ~­è‘‘ËY!ÀULUi mÑj ü¸ÓŒ…©jL:¥D[pØ,ZƒäTàüŸ~Á  ¿:¨ã%èMÞ¨_LÝUMÏMPÀ]&e9aëB&æb¤àʶH= e­bvТ¸²Ö äÕb=XÙg<´º¢ý>¹ï¿Êlªº”CV÷Sl× iê.•¢‰}TâQT)êÇ”Ÿ*ÐþäÏç)W:!pÞäišhdò¸—0+óFËÒ*ÝÖl[¶ïñJ’b€IWwHôÄG$ÇË›Ñp¼,å¿AmD|ʄ8ré.lƒ\‚v\@´¬?[ø¡øòÍêcõqݵ‡bÕ6œU<Ã5ŒRü:̤t`çPsZ@Ê ‡_…³IoèñÛM6„œ5“h³§)K¢å§oèL;E¡.q’ùÛŒ¾ÑFó!‚¡uùrww¡ƒË¥±…ðo> 3ù5¼¸´Êõ¤·‚ÏBllNÑ&%ÑhCI…¢øäVœ²QþÐLLÆÉe\ràEÉËŒdÿ925ŒèÅò€X ´NîâˆÑèúj&[GÖ©FíúE©¢ÖA‰Þ;lQ,ÖM­0{²`8äå÷ u¦ ŒH€ŠsÿŸïëë£øôëŸDæC¶Ö3ëò#ð\c;ì²:lÙ šÛ øÞ$MÆ+²›æýz ’ùvåD·ØÑ•ßE î´f‰¬ïTiÕg9•Ú\«òêÖL×;vŒe¤+©äSšÆ)Xùz.…&ZöÔH/HÇdž% ö¨‡%ÚÐSC×YŽhÌèÝGóô3k¢äYÅìlñœØNN?/ä&Ÿ•Ÿ2¾¹.@hHÔ^ø£ˆQüB¹¹ìÞÙ2r’Êc;Þ%–u9¢<!<ƒL‰,^d."¯ˆw–ƒ0¤%Õ\âž“_g×B*FrÅÊNÆð,L¨õÚ„ ÝT©?&/cÁÒf@a‚ Ì×· eÚíozŠÑ ¿´¾7Tε8Âè-›P’±½K¬^̱§ +z³±>à»kã*ž•B-“˜ŠSw.Q»}ýº;(Õ]—üÜ쟳IÕ³^!~ÚqUä®j„[>Ìöô¾òF‹çv#·ÑiÛɱÑÂkFÕ¤MS[¦›6‡î‰´‚¶ÀÐnz`Ï€ gtÇLÑ2{¢–•”½|‘Uµc÷ŠŒ­Ê•acbΰa£QÄ7‘oQB÷ê—”ŠH_O)¯¡h×Ó}®£‰ë ¼"è–ñ¾Ö2IZ÷ðÁ‚–)%=œÝ†°^äU +³»¦yÒ«Ī¡Ò™î«-†Êª,€À)¶¥:©Z{Qéñ­Zµ'~òž4í°¦SPÊ5-"LTÃ`bY[Âa%æ¿mpMY±¤4ü,m­ã²HWÞŽŽd£ï £kZÑ Ÿ iDÈ’ÌarAè –VP4 T.Ê ~P?wWŸv€Ï317Õ£ÊSíï”î—´cÕaÏç°ñ#(ÇÝÄAM¿Ñtn™`Àðh/¦FB¤ÁÔž1£ÌàeŒÒ1ÀøƒïÁÀ*¿Ðh¼€r.Œ–ì”[þ<¥¼ þzceŒS¹Né_•Š”íFW>á$Nr3@ö¬ ;&·äú#G„1’ev¤ÀÈ×é •G»Ø>H"öj@µº8b¢]j´;‡5°á³,ƒV·`¤ÀT{‡QØn{+ÒbâZA°öÚ öˆAG-KÇœÁ”=Îq,Òâh`cKi¤Ä(ê ( ðÇ©Qµ?%¦wý±ç4áÿ‹õÏ°º&ðªÆ  +O¬û)Vÿ"ôÙI{G„lõ pп:ì©öy”NÎ8ôÜð9vÑæÿñÿç7?õÊ ˜Ê0_gÉ­—Ü=ðsŽÉ6É[Œ\éŽ$âk$æB’ò^‹ºÑrd¡î‹"…õr·™:tl™qJ&óøË`èFyzó; àm›èIž‹¢A ™¹]Ûw”†¸Á·4¦QcGÔªY¶¥ÆÉAŸ@ã:B©``¿¢@f¡IêFŠž"ü^µqhUU,äÆþ PéoÇà0#oÀ­å¾%ù ¸GÑYrWÝ.Fv°÷¢`^iòÉZááÏκþTÅ /¼ÌÏMÈL> §™­:çè² o†\Ÿj”aÂÒ~âM)Þæúºx.t±œP?ô6iK¤À‘â•Û»ð³ ØŸâ­ßÑ6u¶VV)ÇæVPî8â¶A§ŒæÆÞ4L‰M'‚RÝ#„vÍJÕC1ÞŠÀîêÂY¾—BÁ”yHž¨mɘ‘f¥Tç¿÷ ê2ÂÏOˆxkä7¹gÐÈeo)p!9Ïň'HJê] ¯Ñ>Øîzôc\ ÐaòÅ.¢C€æ€ï}R¥×¨ø$±Ô„Zk²± nú~ëýI¢ôµû̼ ¨«ÐXÿ°…Ôõa.ú€¬Húý°vH8@üFé1á Ä3eÁmܾ â³ï ÐE7¨|Ø +Í&a¹ØŸj·Áq*ðµ?yCRS¸>&œ +§¨‹ƒUD/Ôjv*$+cº…·jÇ£ yü+Ôû6Å–óA=³:Î[D;\¢U#“â`ßß2kº––ª·(&'‚k¬—ÇæÁ!Ž¯‹«0¯)E|‘9a«;¯’ÈP|¥l|áÊL Ö¼Oüj<„þ6†<~4Åo²ä‹{GT±¯ž R==yy¢‡ökz:øÿÔ‚g¨äH5Ó€B-µžîóŒä½× ç£@Q‰C_ŠN˜œ°ù^Ñ;äƒ$ëyˆnta+þÓX¾Å·Ìb¾¶m +b&\=žTå[)vQô®#íxͦØNr8—ãÝ£)¥Kíf¿ 6bÜ3vwŠE™è@Å%“†%Hõ|)õ¥n”.Ó6§tÙíÅ{?¹JÃ_æ'ðßQýʧkeÔÏYë¯R0‘ø\=Ùå¹µ‡ÛxÝeòa^%D«€—Òç‘ÌBESbþˆušŸ@½¾£Î[Ÿð4&|÷Wû>˧U}‘š2}¨DoLdžL— úàÕbÕ.²ÎR{oâÇ®AÔþäf4ÐAfºCÚž29óÀèý¼â¥eG ÝåùÁ{<¨Þ°ƒ¿;¶x ]xCîÅp}£.,q¹})¢`nˆR¹d7Æo«MÂüЄ…æ*BÀÉu”C&6I ¼®²•xÁƒ5G´øðiôÛ—NU% w/³PõÓ{ »^czµ:/üzRDãö35¦Sô>+’*B£~#xÅ„û‚ƒ÷~€æRä·7Ù"®òÕbÂÕƒ“sâ^ÎíG_~¡8ÿ£Aèö¹RXXä(8v<*ÿt¦;ÎQ뀟w´á"hOIð@µ3$^°4ØiЉ°jX)æ—äªX˜?mQâLn Buï(!ÏÃ_ÔH†‡»3³9§ÑáVîýX*À3º:¶Üµ“3ü<ó^­³„qùÞˆ›Ê Y8„D:Ïô¤øôåshœEy:^6üJëŒÃ÷Âx Y«í»³@"P£ñ/ÎBõìUx‹W£YD®ˆõ¥‡²‰ö’U½¬ ä.݈ ×Ãt5ÚèÂÖ§W×û&åIô=òÜDC)Õ¡A4z‰UyÈØ©Pé)æ/Cc¤ z÷žç |Ì[]®$’?€† ˜ºDðÌ ±·UЇaì,„2—¬þìË‘tkÛF˜£æ51GÁ“ÁE+}ú£vø¬ü”ÀõØWå7 ”0Ó +V¿P²§òØ?ëoé¾Øxäh48òN»¯ç{áÕÒ3¦êƒ jÝŽß½]_ó²ÙNšQ×øˆÉ&HQ[îL9¦LI#‘WÖ”waœ±ù3µÔ¬Vú©+²¨éRæ%$)7÷ýͲ<³W–„ê~ê"—ÓùaD¼ÈšËä‘ÓAÏÃù)èèŸZÆBu™|íó=:RE âm¡õÉB+8:  )T|v:7àþ0¶vƒ^ +Ú8Ž¶ÍVç Ébéx ÁÃÒ÷ØÁ?Œ&M šÙCZƒ +™c‘rñõÌnCøæ³›H =6ˆ›f‘·ù}úT_!5A¨Äøs‰ðj„´7öÕ*"]qî®(æ¹õz,ƒrw2±"ĨeÞf—‰ ,~¥3vÌ„?é°Ìá¢d„ z·Oç¦Ô¥cVù¢Ù¬*»%9=Ç,Ãçã W°ÂŽbgÃw/ô ’ +5óëÃ`Ô\^e/Hž.7˜k¦»Rœ4úÔ@Îö“h©&mÀe)Ž½é|)ÎÕýÍW†¨¬-?¹È…ã%1ž‰·Lù¸H˜‰=¦Ç¶Šï%ŸšFÍÇCz$ۃ˙K†‚˜H"¤êQ~aV SC©ôLdíO="”~ ‡¶<ÞÖ_ç ¦¾ï›¡ûc¡BluÈu{j&76O¿>¶ +9:Ýóí§ìàÝÅ8Û¿‰à•:^*v^–ƒ/>š¨ýS4i b¢FvÓþgóê¨];¼Úí(H=±e¯€!ì )âbK]l>uK/šTÁiÊbD}¾)Àég3)\bjØΠÊ/FÜ¿+†­;R v¼Ö–J†‹à˜´€(Œšßç¢/•ô|‚Тë”mÈ_Ã`‡‹™Çæc‡ ‘¤„ Ãã—5ëƒê3t"xUÅ|®aïç´?íxTñ~#;Æ µuQßX¿Ge<ê‘Ç€gÿ(’O…Ò%§H†v[æ@À:†!Ša‚Zªqƒþ7¼(`t×ä6\é»N¨Àô¡¾c‚3MŠaèבY‰È,¦NÕÛ‰6ëV¬~@ö~º™N©]ó-×psú ›¶ †¨õM†"æøÈY[<¹T1ÕMÛaSLŽžB˜‘þ¤Q— +"RxEËGÔG 3ò,jÇK˜ëÓèWbÌèzï¡ÌxÃ*þçÇÜ=>+*m”õ'fR®ÛXúô6—ªuƒ^äwãÎLþU” !×*€Ó'ŠÁSQN8Y«¿ëJn@æxšÖ—tcÁ·!1Qþ,z–×J‰ÀË£<«},µòPÊ#N’y©2{âdCtHKäò¶† cKh”0… ®ßž×§½”@ë¡ ÁÌ‹ü­ˆZNäÐAnôbP)6: fJODÔ5Œ·„o¼'Ãóƒ "´‚;LD2yAjÔb]“rK¹Æµf'aªŠû‚J™£™,ÈÚr‹ëõéEÇcðþQwrÀ—ªê)fk¯Ñ“läæ0Kwv›)D`’&=Úö\ˆ+Égn­“..)sgþ¥«š_ó,~–ª~9ª#Ž·êàw¯ÇYlFòn|8—n·È\'¸JžÊŽ%‡Z¡Nû¥t\Ÿ‹ËCí.QÝ6ÈQ¿Êèÿýß~ž×ùÖkêJÒ¦à‰Œ_ÃZ$ðR&<jë J‡'óÚ“Æï{‹~9ñWAϧðè±6N)éâ“s“¶aìÙ¬¾oβŸ{+oYz\l³up.¬³1’Ðh‡äp—~±ZÀÉFX"§;í#jR$hºD×À²IÐú²´¿:,õpú#éfFép‘j²žbtÏ:©ÚOîZa~J˜ü=G#^1ˆ Âu_V»¤E9>H ºh§ëÞêÈ)?5×Iˆ[Lg†¿¾"Av““vË\š*²Ý›ýÉE"Þ‚ÁH¨-Ù>­/gJÁH!î@Ät«tqŽl€Ÿs†'½6Ž{l»€ã× íD¾=ý»2•]–›tøàˆºi—zO¨¾R¹ø6­I3°±ìmê‚W-ÉܨPheÕÊI!±3èƒÎgvûÀxˆ€vô{p% +BˆÂôCZØÍÊŸ?ö¾Æ"#±·«êø€+ˆ²Í·` +…5"V Änë€%ŸË30QGY¯¸-uÿ¡sòáW•§Õw6Ù—CœL¹èéÜëoȤŸ¼ÌÆj5ý×M$óh1û}å+ v)¦ÉZîéË"5B3vI|Á…µê”í[ì¿IOðuOÃ0“_zQŠrÈ ÙzEb¥X9—'ë±0[ÅA†·%­Ñ KP9òVDÊÙ±¹¸ÏÓÅð‡ö‘•™9.à{à|7èÁtCE‹µTÒ¸(cà–++Ó§Œ}™0™½SøglŒõ·±ä®bÁHöç9qÕräeë)O5¡‰óTÌò’s,84Ýà?†P%~¯¡ðK~ân‡ÙÖÐÌÀ0˜¨R… æ²SCêáÒ¿ BnÃ`Ú˜QÖ'²Ü¹ç^TvM:np ù¹9\åPãjÇÅc¨Ò[®Iì Ÿ‡ÎâwÐõ O°’Ñæ" mÅY¦ ¥,SD8Éý +N‡ró0«È]tSaM“[ÿ+¬±e>ˆ#QólþÁF®Æ <Õzææ4ÓÀGòÓ&&4H±5¾þ ·7wvÆ7áMjOm¦½­Èl2è-ÿ~¬§Àpb 9R[1Åd$Š-æÜ9L*eƒõå¡|»¬É®kŒ%+äHÖ…Bw•&™/¡¶Ocíµ)®ÅÌæ˜|_ œ­à=ïl¨hUgc¸ÔYê8ÉKÄõA`³?M#Ÿ&F¼ÂåÖKÜëœÄ FH¿¶vbOö#`>~.‚²ÈuQ9ê¦ /Ç™ªPóiïTð|C ë±°¿“=Ðb¡«°©zC¸¸€Y÷“/ÎP™ ›ËÖð8v™–ûÊ3#˜[$F»´Œ+1ç¬oâ¸y[u• vÂLáø3ßdú/ภu¢áb¿³ÆõŸ> —Áâ½àö3Tµ’-¼§äò÷,ªM¡ +Ã*03Í US4D&á®aò; à‰ûˆzÀW²â‡fƒêH"J"Ž… ôÓEí‘\) góžNd½. ŠI¹#®uùGIÍ–ý§wæˆ>Ü"D3¶‹ @ãnHËΫ%¥Ž/v“Zqü\–2qÛ°4²³V²‰“Dt Ál„F$tŠ¦‘ľÆ:»·C!Pp„)—c6_üÛ#2Œ‚¦0©_L£Šgê(gÃ#SC±È/wø.°k†ZÈ.ê§ÝÚ8é.BÉ­£«DäK:DSÿ ceéáF[<‚ uQMÇþªœ¯4kh“WG5:öQGSí>GµÝÙ¯sòk€¡z ?v‰.Æ:œéñÃPoÏiˆ#Ì,€×Y$=ˆ£e!ÑÆv" 5%ð p +²ßë˵PR«Nµó²Ãû~ª³iK)â«&N&šr²8 ^j1×£m­Äߤfßôå2aÅFg¡OöÇXcsó(ÿF‚prCâϳ³NEF‡¶Za;•mýæãCˆ@Âi§úˆý¯÷‡ŠJEJ.~!” z˜‘w”ÅÙMªš£%>ÞÃ6bã³)%‚©kœe‡äXbA´{q‚ù‹*ëâØêY¹¥3\BÝ°"ëÖ›$éÍaÎkåÁ·þ¬`ÆqjÒÂÔ*ë½Æ@0=^íÌ7ùÜÇJÙ€Ù4àçB]|^Jp-Àì_½ÐïŽÚs›St…$­9Lþ©azTÞ‡+è˜wÓÉÛ8heÍ5 œKVÅ×A%\ˆ|*³,lÝy6 +g—”UÝÚÝêñõxô­uŽJe‰é·¾â‡Bú¢Þp*.2Cs=<ž401‚ +T jbKó¿Ø«M«×K´3ñ;" YÅ”0¶’‘g˜ÉGÖïxµB¦µ5›Èƃ8>þUòN[p:í‡Í„Ú(ƒ±ù4]gÜ‚gLGa꼯§—ÓÔÝÞ8ÊÆ'pÊ[ +/°…OÓÀüæ¹ô˸mRxp¾k’Ч-ÍžG¡,:!7û¤“1„ÛNµ¾$|zZ^Ç ϸäqÌ%M¶Âk)ÝÙÕVƒŸºÄ_…Íøgˆµ©²ÖeàDy°œ k²ZkÓ +“EÑbù‰?͘ˆÔ\}ö Ó`Eâz|¿;Ì~.ðâeN1ˆåG£o~ͽXÆ ‡Xú¹Œ…:L?ü +ù¦þÿ£I kn­ø¶3²[G¬!À?L~ÇOv‘ƒ´Ê/2ã\FI¾áÈ„1X¢p­<[òËñqq†ãtœ°Ö½š@­Î…I¥a'/ò«H¹6aÂNo¶H­‘– )¤€0ÆŸíä2 ÙØir§¤ Be¿cƒ%ŒhÏñWÞPµŸím¦óÆf[lÕ?¦þ3'(ú®§Æ +¢z¯Lí¶¥È‘7î›Ì8}‰:ü~€Ÿo0‰·§ Ž¾¥×~$/5~¦+Z$Âð.ÄŠW;?€Ëåõ–’t/.vpâ"•·}‡[‡¸JÚA£Ö¿NôéºnË ›É­nêÓ¯›dT„y`Í|ÈÛØõK—Ô 0c`Ñ^-‡ª3Ó•ŸøLÁyâÒÄÉ~PQœv'hÅOà›•0‘/XÔÊ”ë‡@,Êd¿gH=ÐÛzÌ1ÛÅ(ë/ÅÉñGc|~cÛ@D’ ´ç˜ž@žiU¯YCÅ…®rµÀ7Ì #hÌöÿZU»d¸µ@7íÅ·ùa'Ümóc`o×=2_7‚EÒO 9ØÞTf¾12©—M~£Ððä#:2•§ÂHè—’_ ²è4ÊÒéNÊ¡oA¯ž+ÏÏ°_*÷7î¹–´ Ÿ<+K +l·ÃŠE®í1#`ò_TÐç«Åq\ÚT˜›€X?ÕC8.¸htwiœ|,¹Ù¦CÜÙíîsøîJ½ ¡Oñwm°’œ·ÖKróùÄ$¤k’ñu½òkÕ‘Î=ò©^„\«Vž€9+¾ÝíÓÆ¬Ý +o ¾¢0öòý…ð ë…IöÃý–’ñÊÚ1çq¶bn¸[’q~¼ù{µY¨`ʇð€%sUè÷ +Í";˜é ŽÅoo²˜<ã´v×|.d(2W¤ŒDRÌ@.à1îQ"ŽÐ½› ­=…ÁÌkžþƒ¼ŒÙÊÔåêL›þ”†¶™¥î6LÏ=G$Þk¶]ÑM`']ª_t±NÏæ÷6˜‚“Š ˆâè!5©På­ÅšäÌ‹Œ\W±'¯¨|$c•„ŠèæŒÆKø™r‰ÌÀnˆªÏˆÑ|‘XùNÅgp3;;·ÂÓÅrÆ)øC†u›—üÇM¸·ep%ã¯mjÃnÏS]‰Í§•½N‘úØ—e-/BkZ-ÝètÕVêÃ}<¼ó$KÀ˜ƒî*S)¿W$ Ÿk°ÀÃ#ŸŠ-@%§˜ÐiµæEò˹—Ð6èФë,š?ùU–X9”×öåid30ø“GG烴 Êû™ð7lÖe„<ÌÐSF9€;¨ø° ëJ)/ŠÈ×ÒøÇ‘ÇCد,_ÆrÁ-…ôçã9ÎÍá-‘…‰î!-ÂF ä{vAħ¥áóÀs2!Ì,F;Ý”!46rÿgº@AuXž +«åܺþ¶˜ê“BÚé&Ѹ̜%„ÁÐ!Næš@L +èUÛ*ÄÐõ®} n[úDþ|€#ƒ LÄ¢4'Œ¶bVïl»©¥Õ—–H­ŒA ZŒAW¢ã_2ÈòÏzê~ls¶Øœ¢ðËz&EÞ +q"w6#˜ðÐjÃ7 Æå› +#y” ×q`2ö*¿í£25H“nQÙ¸€{Z£LeP:ëSmøŽ)‹Ç¨«d)Žº¼<}²)ªA ü³Û\üĉޤ̻~ÏS¸júìRo­,<Ò1Ô˜siž0ë7$µxXô¦[($¤¿O™Dá®S¨±0ÐVõ|­ðËë€vKðÜÅŸ âaÆâ,Þ$á@,œ’ýëO)†¹["{V«ž…)4ט»qÿÕ™ƒühA o.ˆ ¾€@ä×1õÍ Š8¨á»sú±ßŸµâÙ\OÔŒlïbžþ P,Ž„|$@m½¯ÕfsSŸ"@XÆ:îøXÄŽ`G¼íj/º±î¨Óx‘¸ÉÐÑÚ +%+|Í=[ôÊÊ»!Ç€.¼èÇC‚h˜Æ Ãè…-ÍŽg&ùÞ{‘Œ}‘ãÄbÑÂàQ×Sà«‘òÄuö5"}õتF0¿aÏqÓ䉹‹8*®2õ;‚Â3¿0[ä–¯˜(è±S™UÜo¹#Ô¢\+_l¿æh*J¾ö·i‰ßÌõˆýOKƒ!ú˜gµUµ¯Dþ´ŒÖ±½7ãâçÃ.W+_"]½\œÃŽX‰Òû»I˜™ÝÁI‡¶ÙÕ7›õ$;&—6bbZHЦ©*Ó:+Z‰qRÐKÔ:<2@#tû%„Nt.¥Â-†BB3˜ad^¸F±¿:ÆHQYö(o술VMp¸²)ÑǤ7.£9º‹`‡KoYýŠjcåÜרyý’e¡\À¢ð·›¦áø„ð¾nù9«ÂwN,«Žêˆˆ8|à–ë"NÒwb°|‘Äëü@O¦I¡)Ša y=ÕŽêO=Ý–.¥dwIÈɹ¹P­c| ÄcÆy| Ÿ–i_wŒÎm’¯@i +KÌíå÷>X=Ä~íðöÌ"ŸíYí29·êMÌË<߸óù2ãðNß36O!ÎÃœ'ǦÕ1ßa‹'—ªmk£À ×¾á±Û»NÖlûa¼õàÇ"t³à!£Fù¤³-Tºj_.”3aÁ­!†—Á•‰*Ò‚¿ˆ­ìõr2P/Æ æÅ +ŠIS¼!"ÚÕv=ÑÁ7½6[áE +!&!Û½²ã*º’DƒÃn¿° Hz)‡0¹ÕOèµSeŠ²Ìúv9 ô¢ôQmµX».¯Þ¨7Œÿw5º–èæs«â +V”• [(§ DÔf÷"ƒ»¯¦CÕ´Ö•óçâW¢(ÜÉ1I`Mÿðè€TFŽÖG6$Ý=~‹µ†¶ äói/;ù®Ù5õGh>6b⣼8O}ñ¹ÿÇÎYàL¹2ØÈ“*‚ä4 Þ+ã° iéÕ€Šd)ã%Ä/ŽÂ…‡ O—'ØÕꕘ"H? ¾yC¿Šc¼¸æa׎¤—K"›üh$%Ôfg¤Y ¹@Vå/ô½¶Ñ}µ~Ä9¡,\˜Ê³áÏÚELd7>¹‰+–ƒép÷èw-nõfŸŠì<#µ%áP¨¹M¨×Äç÷£e¡×3}~ìGʺ¯S²VÊqZ‰‚¾3¯ HäæXÉ„ãºÿ†3:Ÿ—Z¿A{ÌÇ\nâ"Ùgö”4¿H%e*ëñVŒZcìj<ø6•ÚV»2Åf˜ý™£ÓBõ˜µ€Å¸ë’«y£äRCѵ¬¯UË̱£Ï«Â ˆç¸¨ F˜€e/„<¶…å}GâúQgiø*6õBY¿2¼ÂXämMèEÞÒ™,ÿÙËXÂ; +’à'œdÓDZûÖƒx˜DJÿõƒþ:È–P^{]Y‘¯ªh]­Ù:BAÆß#—QÒ½R©µ¿º3Eù +£6”Kœ…ØpVšz­p jkÞ’¢J}³PˆÀ¶5½}§ÑZwMl¼È®¡‡ª!°iÏ|Ý©l(f„ßÄÍX8k\tCÂl4$8å»TXh^QÇV‚0×w}‹¹°³¼'<åq¿Â¼ÞJºQÀ‘ËjÐëÛÚDž”MSâ¢ÞˆE“×린‰ŒUjŠÇÐEÉÄ~Ó÷kçEœŽ>Q>Ž× ÀÉ'5:‚3vØL8s¯~˜ÊNõÛä’®Œ6™G’à aæ ÍK½÷. ‚š~ÕÀ™p1ïÚ?!‰®5Ç¡» ¡ªà?\¦]zÈÔ1í!÷Ó¤G8)& VmU¾ù·”EY$(-MK9nê&Ò@ºß ˆÊÁ°™‡cß”öæ< ½‘ 䉼&³Ð…¼­ÒØXµZB÷“UÈœ¢¦ìuØ"R¢¿\Qš©Ú~rñ(£ž‚¾˜XŒ q¦µ!10 (Ø@â¿»Vaø|QlGª9Ö°¬tl:˜ðå*4Sƒì°óŒ°k SrÁmž„m®q,2À•ÆÃ$B·hè]èˆfT¤V²rÖñy°LswAˆ;§1¡ …ÀÓSíBµÐh€×ódTóÍašìKËÖÖçxµ]³.è?Àh—ºICh£âü+¶Ç5íÅYÌ¢tj#¸ïù”ý§«ü)IÛÊT齦ó¨‰EÂEÄ–6 X•£oN¨ $m*Vã8‹·]DIR¬Æ•†6ôŠŒèŽñ¬Þy:F{nM²?†AÑ”,Ê þ† GpIÖ8”†Íaj *±—Xþ‚L0 SL>šD‹ 7A”ÝìK„Œ"äeA·\B0RpA4¢E¨Ùœd§P›kí!  ŒJäºY«c¶N~„FP7èÈ4„‡@µNL½} /hÈ\"ëÂêw¹>]¢ –ß®Ó2ÔÉßJ2‹0u¶yî£!Ï- uî-˜mcØ ¤(ûûÈ—âçâqlCø›ˆÆ;¡{øß"õkUYù—âµÌ×yj6›%½¦x‹GÉ¢y9Ö2oùA]·Ï¾îòª˜ð§bYÉÀô‚ùG´À´ðhÁE0€–ŒÃR Ü‹žËrMrÞ'û_ÃÍpWËfÇù^=½ …î¡qyŸFX=À^¥Ô*Usœ@ÈtôÔûÀVF˜ø‚ ø7’W‰_c/'¡ ™õ7}¡þ•€ÆàkµœíïÐ#ð:ìÍI €¬` Î`_ì|b”Ô {÷JQß){Qó­ylQD|Œ$!í9Ý#p¥Òë +jê~>°Þo›Mâú!ÂRqøCyÑKƒuÒTõòan4ØÒù„"ÝFk14Wõ«QÑ-@(¿Å%Þ ÞÜÍ@Ò·v½"|D¦(QQeß×oÑï’u7ŸJ׋ªÍ u)ÉÌTör]>µ´]ŽzÏVL- ërÌ3!ÎE†9÷Ę H©Æ ¾4o‘EbO ü€•ÔÔ|Òì!ÁÃÙ•ûü’ÈVv"Õ`Y>Ú³ŒÕߧBð``VñùõÇdm ÛbxCPFÊ;À9äx;QFè~j‚i¥q$ +O=Çè)µ`rÛX-äÔ +%[ïô¹+aêå°Ó3ãAÖ©rÙâJ˜5#Pe˜ÕÍ™¢ƒ‘aÛƧ€exÈÃéq9ËaÉI|!NŒËo™g±ûŠRÔVÖ:JyFŸÇ‰NåeÀ#ë£'á¨Ç¦ +ÜO”B&UØåØo¦s~{^J1½cƒöß"ûÅ(MÑ”ó§ÇAÒ¸Ëú×dÿü$ˆÍÓE,˜ÐcHÅýþA†!8jçç7ÛcÒ +ç`¿,wÿàÏÏ÷YƒGé`ÁM܉ÉÑI¯€ aiuãÚLê,I®-þà<‡ ÉBîÏ…šQdo&á²uÊ-^qµ£˜®48‡…/©óŠ×k ®1‹€Xœ¹mðúÑ¿,±Éò߬†ümW(FÒ~3ЈÅÿƒœL€žÃ8ô×#«™ƒT%Œ4=´¬[`{μ âߺí \‰åÂH‚FCæÍãgS@D$?æ~w0PæÆûÔÈIJ´wéÛZ*ñûUÜj¡«%70õ43©c—׌æˆwZ$ݲˆ -ò/©E÷üty”§Eíd¿ö¬ð1ù’zeAµ0&.°*à7- û°lID&­:¡1æ"˜~9«íËBLÉ؇+ûᎽ[‡Î(õÂqä<DAry|Þÿ5,84v›cÊœ…ñá)öÍßÖ°‚Vt8—@Äus]K«µÚ­/4“—%Y¸tÑ:•uc®1êkþÝÖÉ×t-Çðå3 úÅDôÅ?LÿNqÿytJ5¢<ôôŽ{0£Ü€s–(V#§4£´±Ü|°ôƒùBRÀœæ(,Æ@ Z0È‚BMÕ'ÃÇ]É'âú$N£ßÁ …7`ª\ñÜa‡d_ŠM\±£ˆÎ]<,µ! *zŸŽöÔëÛ¸ oVêo‚È%ê”Ï®¢ܵeÏ­ø [¨à„|ŽóÄ|"Añ£‹‚ Í¥Ho1” ¾ëΦ:òåSNqˆŽÎñuJy^‡ŽVð$þƒ+=‚X ŸÔ„p¥û©6SÖH>õ*p@,…æmTø8¼Qž¤›ˆJ•O/¶}š&*M ‰ÁÚóAVJž»‹S©Z¾$T‚¢X&n‰ŽZ+&¾òÛWîö¦àFÕC‡ŒÇµ:T$0ó¹ +Ž6»¾òb'Ds°Â}í>H7Áoº-–{ez Í"r©J€ÅU^a"ë”þ VLÙt "yÅqo±LE̽2•‰3¼(UYCÔ8t)¶ò« _pY÷wg¶}~%v1[œæï9Çò̯úc½ôVÉÃ01]03,aRÌ_¡ï‹£ÙD·Ô,]-ΩP)’³Ó ‘œ7¯–ܱ ƒd2Õ"_°Äò–Šw9NZP'w“¨©'Tò'kAºwÏ0è…bJ¸&ÐÉ«÷õ<äW%– ‘í{ +|–çBáûg%6->Df”P £$9þ”›±šƒÙ7XŠàiX + ›@—–Á×çÓ™.오`:)1 …ËJz×[ôЫ PÃçÓ+D“ÐTж%, °í^Þ\€*0ûªcâí±‚Í¾Þ®NàÀ¦ÜÁ¢É‹=]FæVƒß#ž‚%’,„ Á‹[oþTЮ'Ž°OstÊ,GW ¢À‘ç9ªÃᆠï®Ã( 4 R¸÷”?àþ1@5(¢ º(ZygzüËÇ>.7`>¸}Ÿ´\õ/×qnú‘¾tÆ$çA¯KÖ_uqÖ"Ĉ6R I×ä@<·¯BÆ#›‰ýHP”#  3Ϥre6ƲÛH¢´©Ò¬£¶Ã#F~0ú¨èî››ÊÝÚîdß6= *‚õa[ª‰ ÑrÆvöÉ74.‚šSP™†P°çÉ<ú f¡Y¸w_LU úXÜâîñŒsBOÌ6‰‚np|ãÿë ÁŽ›ÝAsÖ¡¢Ä­ +XýW›ÃŽo|bÛ+˜üÓô^¢ “V”fÆ2üJO]èhkUP?@ðþ¿ +^þ/Ð| †¨L@‡;ù6ôt¿ÃÊÓÍ/Ób}R¾ádóµeLb'ÝÃÕIqÄuÃ|?9+þ`ÒŒ¼w•§wÎmö½¬3‘`¹ó/+¹ÁÂS亩؉Ìx® Z«L¸~SràÏÂãC‡Ÿ±è>L%ØJR{Ýw’¶?¸Õ~ÛNÃ&= À5wUÀ±²2œ|-Î’F{[~Ql4k {ÆÈãì¢Ðô¯ï%=%òP‡ÂKý6x%·u{° ãýbßßð?âw%ÁÉ—GPõ5Ø>B¨ªš°Z…y«Ö>Þ”EL;±ò»â¡ÔéÒ#£wÈë^—+AæêåÕ9.–ž4zjÙ¤æH "ìÉQXâé$&õLårÌ]a¶:-È2ƒBÍ&Ÿõîf¨&nNs6¿ªwŠZñië1äS8«ON-p Ó„€êU˜ ýQN—»½Ä]®kö”7råÞû¶öéø·{±j4ðòië°Ö¨<¹žaóõˆ¿¡÷­•aýÉ?Œ—ÞcÑ<’s>Éã’·ÇÕ&_ ØP>–iÂÇ$ΘMì>ù5¥†ç³ +uæhÒ«–é8¬L¹HÑÜÈC§Š>ÚdwI1Õ} Ø‹—§D(\iw¤~¨ª蹦‚70×;*yI»wBå(÷°?óÍÍ{>.V‡ð¢œ'nòd}è8‘r—„Æ1½øÑãa ç¡lE×M¹ôr‹ +°ÂÀãé17—ä³èñÿ‘@xÞÎÛo´”ú@…¦áÀæó¹VB©¿b×E 8æ]¾/©§s{Bÿ‚¬„廡Æ>gß$jà¿£²X—¾l%Všô·ÜŽýÂn• ¬èèâj; šØ[¾ànö!í/yñ›-mÌΩwô‚³Ý^¶[úWÿKx§@@„òƒ›VNeEo#ö0 @µåh–’CÞU Xké†ä‹@ææ{ø?·KEÙOcEª“¹CãY¯¤¢­¢\•b¯Þ³ÔãËe}÷êÓW9®3”dé~‹o;ep¨T:ŒGûE%³‡–AÅNuŽÕ÷KØWºvÍFx&öl©¿:ÝR —„„GåKXÝÇJŸ/ÏÎ8~.‘ÉBpuf®Æ Âr]83ñ ±|£/掕±ƒÙ­DN Æñš¨b®½è –²¥|lø>×  =×ítåUÊ^á§tC[•y˜–˜’Ú=ºH¥$™åuÚ¥¦´ÿL"~Õ)ÅUZ”°ü”Z“$SI$;Eôf+1¢™!Æ|Sz½˜X‹ ÌM<’g 7|Ÿ;H£Ý…m3,·ßaɹ,š)”'îËÄîÏh6kÚŠrªt 7îJŠˆÇÌ2(ùf*k‰]²Ÿ—ú5;¹ñƒÂyœ¶¹P9 Ü–n’… Y¾N?ÿ“¦«ñ:kIf¶“E/Íúœ}â¾8•Š}À«qØôvsÊ„röåHm?ѽ +×&#·G›EÇ =RG‘O´öËw½_+‡Þüq BÏ&Õ +Õî8Æše·ÒŽÊØ`ÿ§·Œã;¹ˆvÜ™]ý!p©º3T·”dwßFÙÒÞ§'ò[¥õú¤Ž@?øeÞã#tÙ˜3äXøýÂ(|C“ðà¨Ï.zÔfÉN_„ƺáÁhÜìºæøsŸ?gõçéþ¤†Añˆ¹I6IÀð#Ÿ?BU”®%çZt<¹–ãh$V„‡C²æüqs†å:kËø`<­µ<NÉœ½„ôyµÛúÓ¥-à²JÊÒڰЄj¬Ó´#Ù,Ra#½€ +èZÑM̶‡ÑþdÞªî–V)5=~š´j$bƒ884ð±ã/‚&~‹fÄŽh>aŸã5ïߘ“²ü¨hì#F¤}$ð8’ˆÎ¿Õÿ®½Ãí·ÝÔJ"bIdÅ]×<Îð§ +\Zt°ôäõS0g9›¸ç€ Oé{“Bþ,8¾(t˜fúWrª™²É®ülAé·”6êžrnêÏ}$ÝæU÷dæʤîn%P›™dw‘Š¼b’²,@¸ÛõЖÙBs€U[B@¡ Pv*D‰Þ/¬»‰¿Äê{c/É&,kÖåCÙFDC?®b]³ËÉ¢’îÎœf£Ù/ ºâp᧕â+é2jådÙh‘QÞñ‹IîæÊᢨ×÷Nš%K·¤ }<Ÿâ¡ƒGê7 +Jh…“SIƒê»T‚ /¶šÀÙ~k] ÿÒZ!âæ|QváHóChv²¦DŽ§îû¬.”¼É½×´kÊ97¥9Ü)Ç9§b éÂ*"®¾©<1Ém‚V.Üm–t=Žˆþ•¥z™çH6a¥òÑ̨̪›\¦ŠŽý(Rœ±ë he\ÄJÍí¨cTÿ˜Q@—mû!÷„•Â•Ë{E`ïaê qŒØÉ„ÁL°íokë^&ƒR>Ü ¤&ü[vœ~%´°zMüªŽ×ræN @D•‹²9F–‹þÄ·{ nrÓœÚ!7«Ì6RJ³êßií6Gæ÷¦¤;¡â觢?±‚Œ˜`6Ó3¿²ˆ&zánwœåækàÂË9}LÔÚ›óÜ}è·R¸®EqkL81&(fÄZx{ƒ(óŽ¡[½‹$›³¢m)ºV +Ù ÷Û6‚%>BûH”/&õN0™b"Q'aYÚê4äÓ´,`qì·§ÊÚOécùÏæÃ5PÙ>9œ‘˜€³’àøȽƒÑ³ª!Äø¹¨ÝÐk‰› ÞCƒƒWÿsçÒò.1€,P²ð‡ÙÈÁþœ#E½²­RWL®úë:Ð »Ì}¿}š#ŸUɆÆÌ}!›J¡4“°Xį¸F†õÞÖÐ’¿Þ³Ú"d/>턬\ñ:»b›˜vˉ‡š¤EF?ßžne™¯ê©ˆÐmÀŠ*¢Š$B«4°|S´eX"Ï­Näg* n,|BÝ­x:ú@vÇŠìvÿÎà¢ãB±†:Ç6À €šÒ¶Ü{¶žÏrëèœi3`…7d3xZj€ÌSlôœ‰âPÀ˜FŽü)öæîÁËáa‚$\iYÍÁ„W.nœvA¤UÌ3zf‹ ÁƒÙ˜Z¡dd“žÀ¸1ôŸâè’ÌÔåìï¡kþK6ñûößçFæߪò„=ÊøkÄ׃¶ÌYE ¡w+9±¦‚ú¿B×3É ’Myûðmî2 ¼~˜?Mx^SûMö︘Í(8?Ùâä %Åäö“éÏ„¡ +”@0Èd}àzÛÜö“Ç*J’Ðìƒ}Ç8ânI¾+°†‘½zŽÂ Ž¾Éa1J”ë»/ºø±Ð3œ]:ËOCl]«QB\'(«#.#½}MÄ*Iµ“ ¡Lé'KvÍî bÓsu ¼[N7ÙˆtèžgØQC’±ÃMÕCU*Sp÷4Bµî4-KAÎãl#¯d$òUó\î,CØeðï Ð+p÷ÅûuZp­löЂ×M‚ՇΥí’àiô(’eÊ7ÂD9[ q9Ëõ^Ù‹2•Àpsq±`Ez7ÃB;ÞþèZD*ýœ(NârñTâ|sPšLuõ[´‡ŠSÍ4`@ÈÚrá=‰m[ßðŒ9nAs7²‹ ÅÐËÏÍ$ ½Ïý"mºîQq¥Í7šM‘ÒÞP,²T"õ…*Ø+Œa”ÍoŠ/¢ÚoEv³’D¦‘½ÓAcš0ýý:—™@0Ðj˜ct¿ûjü3~X.àYŠ ÷ çœh?so« T…宿–-{*õŠ‰êÆâ“ìEšÍÛŒŒ·ÁLÁ¡³Â1@ôU–N$ƒ"±Ft2ˉ;4½rä dÊŒµrºõ‰yJe±­õ ¾ˆ [±ƒ” Ó!‰lhIÓ^ªžb +ID&@©€n—ïR¢ D ÌdQ¢èõT—@æ*ÿ¢p@éÉLî¤_ÜŠÇ8‹yéÝÆþuÁƒÚ‘ˆÃ;(/C‹àšYíYžÚÖŸ8i/ kS•güÀÉÊA¯Ô¶ˆ•*½à™­®†u¤WžØ÷*h!¾¼N@󵾫 œXí¼ŸäôjBéâ¤{aн(ƒè·Bü¬Óñª†fA¬â= +!Èj#ŸÈ['U½^«™:{vçJýµ¹¶Ùê-øí­FÉß’ò™i­Á?’Â,ÜáPiÖ7Ñ)‰IN…Zæ'pmô÷ªŽýa¿3LænC弣ˆƒø§pÔ1s¢ Óš|WŸ Ù Çô9ê¡òÞ©”æ”ÀØbS´49›(v¯ªMèsHT<8=Þ^£^‡è¿aã…Pà *>kê¹!"§ K@#$ s$ ÷âˆ;½Ô”rS‘Y/Kþ¼Z¥š¡uîÌS˜ÚX•rë*è-ܘZØënR%aò xßé÷á5ÛÌB߶¸k Ä¥à±ðʽåú8lXÛÅ–‹ªv'áíVÞ‹C¬§Ð4jÂÌÀ›•/*AU qcúˆÔÎDº|“ݺ¨Èm³µ +üsqÀ×i'–Y9âÍ:Åü>Éþ'¶%œÉÑ]}÷Q!¡1 _¹M3äAÝ0y;‘^]ˆ0¸`[Ã9:{¸T$=·Hûe€‘©)Ö®Pß×2}Ö`ILf€±Àën*¤r.ÓÖX-Õ+O»)Õ + eBqù`_ÀBÄøâQK¸ÇñpL¸hŸßvfgÉ8¸: W }‰ÁËS¯Qè†(¸=S$´| Ö˜ ìNõøX.AÎ…ÉÆEÇ0˜å±9…¦51çÊQ·Îp&N<Á‰CIõ~ò–äLè%U^%Dšv‘”71ùíT?EíHx=J´—Ù{͘q¢_ñ°h°Àf<—+>Ú:Ò›Á&6Ÿ=ý^åeNB)¨¬G›B‹>U2BSâGÕ¹j÷àëöShᤃm¹“©>’§õ QµP»ÆøÿÎgaô&Ìñg«²fÒfFe 4 .?‚{ŸÓ± vÌä™8§p¢˜‰!9Æôëæ0N‹.÷Jïrí<†i…ôWÄÄøãÛgàùÇÄÛ|9¶¶Ú¢6O4„@éÈÛœ\îFiÚû¶À**€JZ|ˤÈNBÛ€> ëPoi÷Ùó¥¿WBy¤ÝäF<(Šûl—E÷'J¥r +a:,pðQnÓ¾Ÿ&8}€˜`„Ô:»ôm!Þ^‹Îã÷Yê¾u#LÓiiÁ<ÅÍ,V0 6÷Ç['Ñ£wB»Ó8ÀÂàO Æß5˜BV{»{xÄÐàºhn…ô¸èTÔ àžÛëxv¤è±N»nnìLƒ“öÿð«Ï(oÄöÊý0…™äZí*ÍJé²~Z’Š‘Õ”’Ÿˆ±½¦ZÓŽ¤¬œÇ\‚B ƒMþüQ[åÀ*Žñ…Xµ «€ËM × ûa ¢Eï]9Ê/ô7àb7dm'$€ÄrúRž¤ €¶¾ÓG¦~6¼Ï‚\ÚÂeÍò‚ Ù#C„ì%ã—ƒiý +xY-¬–ç1¢–)Nf‘m§¡›q©ÜÒ6Žµ· 4žø…‰%ÄpíŽmüëGý‰³ÌзÚK°d Phðò$;“ ®w–$úw.³W:ÎH'ÁR :ã’@rì®ÓØä0€Š¾Š™GêŽÛÖÁk·u´¨ð¶ Ì«¾Q–m$äàŒûï¦ÄÑ µY$פÂHi@ölþçB– +×CjENJ§û%M…qÁeü¶O…ÙgÈJ¸1ꉣIãR»¡ßA>¬æé ³îåLþOʱl­Ã]æË•çc% 4»P¾²ˆ´šL†ßa-Ž’—Ã⣰)Ïk-þ÷†k!œ‚A~ygÈ7?~|¦ën’xh”«ƒBðÅ…sã"ÂMŽúœs=ÊæSüBÂïЀâ)vŠ_1ÖÃ1Ó¤ßDåÐËš˜}͹hŸX¦wÞðžlIá#^fÜd㳸ï¦òR×[Þ”Tßg¿ÁZûí[æ ¢AICï»?©hW¾káº4¶‹}¾"^§ô^²Qè›öÜ6 Š#³’yCŸQ&C:ÂòÜ%—• iQœK?„Ë~‡Žüèk¼\zïP¡ÆØ@vÊäHVInO ¬U ·(B/{!2oÓù/œzû¹õ ”kïM=Ñ´|—´ ÉeΠ{8.®îñE8ðQ•ÜÕ‹›‚ñméa×O›ÇiŸÔË,<ò–:cXÁK½ŠýSZN–Ða©ÆýP°Ye£ÐîKÊz©ÑÿQ·/Mã8íÑâEø5Bhë*{9žè³M¹-DwŽ¯‰$ªä£øÎ(X‘ðåcÛ 2­Ó™q¥dZl?=]µP²®avÆŽšlóöëÛÚÏ®­ÞE‹~ýPîËjܳ2¬Â´ù@?J;Ú‡Ô‡Ç\hAô䊳Z$éÌt¼Fpx"±žÞúàŒWDg†Ò^QÖNÍB¼§†Ö;_<"Ó7¬kE»~¨‰½².LO…Æ…Je4äÍtjÌ…¡€WŽÁ’îy#Ò…»¼Î嘓Y¹Áþ±~C+m=®V©WÆkyáv¯Q³ÑÔ¸h".ÇîÐ`ÝFS…)u<9ýqí¢¡Ð,¿¥S²£¾Æ[£w Ì¿P›R çPY>ÖÅWX:$…ª@—¹$0è784]Ôý# •‹&çb:Ì’ÃÂ&¨oËYî:à•¢<ÀôHÊmÎÒ§~—èXlàîà×?´¨Ðœ/¿l¿'P¤úßÁrt”ÒÆNÀúk:,˜˜ù¯8q–ªºZé†ÂÇ»ë0^LèFÎ'ƒdàÛã…`E+£º—¬Fá†ÛãLºñÔ×\¦&2àf˜ê8™ ÅçÝÊ÷`JÉP +†Þ ý5Çb!Ën~dEö{UÌ®ðÀJßÝ‹I»É!\‘rbëv‹¾²_ Ð]äSù]I²s¦ïAA€ `’ó ØN”ðØú  ¨Ý[ƒ(óú}&¯Ì\kZÂÛ‹G _vÀû¸zƾ$9Áþí³?—ù×uf¸°ÊnÄ ©·Šz'îTôžÙÍ/¨Èà툳Ÿ²Ü‚ë½õÍ>ÓM)bWhŠYÜ°À—åÀ­%9.OvaâRÙŸÔ~‘çþÒhµN'O º'ISþcêõzÜõ¡…"#zÐÈ”¼¼Ë;ÓÄ©Ï*åúDZŠÐSjáèÅBGy†ÓËY4™Ï-xçZŒ§¦|ÃQ3{” ï[r·G¸y˜z° ©œÞWØúXàÓ÷cëI·4KØJ‡yÈÆVnc_j¡ù9L/)®ñŽ°ÌæëÒíÙ³UôE½å@kI¾ÞPÁV†m´ÒHk¨Õ…4WÀz5ûo?³ƒc¹Þñ©Äq¸A[ð°-µ‹Ãm\²F7”Fɇ)%BÙ!Ïú¼FеI¨L:ê™Üéè!¢9=Š– }”>—âÖ±\‰’c0¹1”9O 8E+| +Èt¢4„°„m0ÏÙ3Œ¿.ä×í%ËS¯Ë—5øŽ<æ2œ,¦³£ö¯/ÕiÆó]§/FÉH¬˜+ÄUÿ÷·îX¼0u²`ƒhÕE¯•ˆÒ¸[kÈŠDCI +é-Á¤”z]d'Êèû$ÝÃÊ‘\€—ð¸æ‹úœ¸nåäÜÔýˆTdÐáö@qˆŠ¹;ýͧQAÈ©üwKÌ¿É Þ¸@ö)å“|9 Üð!+‡ˆÒ[Õð¿7p£ôšµ]ÀØÀº@· ½€ÝÝݽ¹6W(T(§Ñ õ¸r·Q·å÷¾ÍÉ–­öâi2/®a¦ÄÞnæU¶í7c÷Ǹ8 Ô‰¸uÆ× ‘ïŠÚ»¿:Cët_" M9q×ϲ—A}ÅE{]}{` ££D}q`,øAŒ†)ÿ´&’G³9‘†R¡9ÎÀ™8D*ð ECpL&0ùåÑ@yDD& +gÚ4€#™h8ÀšÊG2Ñð !i(T$äP& ŒˆÈD¡ ( *Êu‰¤2O"1‰ PQy8MÄ%°h,h4•f¡ñ8º³^ey/(ABâ‘8$“™Iy*O¤y,4 +H<É‚4Ï‚®=•ä<`°œ©P-Î*7­áŠÁŒÑp˜ºðEžIy‹‚=o—ø7æ ©Ýe3(óR2‹‚>9üCŒ¯T3•1•v{ÕƒTÔ¸Û(Ð(°D@‡¸Wé$̸«Ó¢ÁsPi4‰ +£±WU,c0ÜÚ²Ü3C¾Õ*ýõ­õñŽUy Â|³öÙÙq«u«LO/][òí¥­yxÅC¬|µþµI¤–Ù™¥V}˜lqÈŽÙ¯â7§Âg.çq§;sÔÂ|s¥\" ¥êÖÝuo9·„Õ›öþ2ȸ.«»zTþ´KdY +@%™H–‰Í¼¬–[÷4ó™ï×hºiÏÓk´,îtyÖÛõbͽôô‡pm©.VíÙæ,´ˆi¿&1›mò‚Vã’p˜¿¨(ãÚ"!9áÅ1QÛÓí¯ÀÍ}cšV¿î1¡!R…:ª!®¢Ü5Çp ^‡mZÁ–úhæ\‘^‹àUv?{½ÌÈk;.kÒøõB;FšN‹ž§À° +‰îo|Ë9” 3´V­ 3ÒÇíÐÒRÔõÙʺÃvÏåžæxûgÏÈtzÀ²i¶S4÷Ö؆|tÁý¡Ã(ößN“@.… +HP‹«Jô upœ7σËPº/nÖȜψ~„çPËìè²Ý‹ŽA@°¦WQÎŽù{ÅUšg³`{lM€Ú9Âåa9zê¶âôƒ–SŸÍ ¸¦ª)ªk*eÛ¦[4ÓÀ¢ò ­š¸ÃѶ(J}ÁŠCôdLLçCž£­-÷ï®°þB?}Nº|o ›…ÍFÙºÀÇd¢âÚnÐÜb/ɹ™wh¼ˆöZÑ°Ñy½[ó¤q©‰…=˜×d’LQ®“Ù¼Þ¥®¤/rTYe‚òîVC­Ð²q+Îi õ€¯‹Ï¢ ts"þ§X”Ïj“WÚãNœù¢ +𜾧MÛæêÙ¥=ê7ϪÑ[[d0 * $ŸMR¼–Kt˜,Y»ÖÎî4‚/·[vÈóiak'ô6¯óN˜_î!~hD<ñá4¢ÄgÁ"¤X,ñ¶ë, $ò™1zRRA|®€åYâœ9J½a£¢6Ò œ »VR·á-e‘Ú(¯1|bänûØ1>R⬠VÝ)Pº@Rv[KÙŠþ2V”[×¥lbÍ—è>ÄùÔ«°èDF(â²™Ýz¸EdºlŒAb¾µ% óQ1ż3j›ì„,‚Æ[#QÞ— ²* +j«ˆÒ*ºÄ¬rË UÏAÃݵ²sTýñ²Ô zš$k1 ŒˆÇ8m’Ud‹•£¶Ø°²ƒˆk÷”R[UhO@þ„}i¬2n¾Åï­¨­™&8‰YèÒ2:Çÿ’vðJ_ÓùPûÀòkômͪùH£Cw0_ù’.¥¨w8º®×áF–*›¹< ëÖhdŸ»p­=ënh¹%BÜ,ÊEñJ²‘úã×ò¹^È-Ñ™›4IüÐ5K\štfíý »¨™¥úTõËÌÄ»ù*ÞÍ>î-Òö qÑIràsFRû@ +ËŽn±âÈþ IèrÑÑ$¯ú©€Îôúd¨Øãy/§Ì[J4)’å×vF¶ óCÜ¿Üã» SÁ­ Ý fNÓOÃÂÇËm5Þ ËTÉ4A¸|½[a‡þ¤w[$ªuo1Á¬Â¶1s ‘=’‘°ó°²¥Õ“L¿úNÙWjzÆ) +Ζ„¢xà¿hߢP¯õ¸|X¸ƒƒƒ +iW2`º—£ïN¼6cVÍ¡ÇhÌL5ßQÓ sO`â±NÒ!ËZ2`Wa±0ʨmr1‹ý\ØwlÎ RL ²Òª2ˆÁ¶§ÐÑ?ì¾ûÎ éDöVÀdöÿin>ö8F¡ì¼NÔsÀ¨Zæâ¢Ûô¦ë'Ø^ð ÙcÚYWàl5L˜u­¶—>fºZåAÞ!m%°n;ÒMœ‘.‘MHäïlꟾ»¯ò癄ëËò»ärM +m¼éßA§¸*¼xu«¦—ËÂzr$ðC ¸Ã>¢jièyŒ(µõ–øÍnÅa¤¿ý +$Ú%·bZ-1b™hß°àž+ôcoÜôÖrì¥÷´p·ÊÏó›NzÎ…¦x´C‚ÆÆø²¢¿Ã>ßÎbYyX4?(~r-qXqØØ’‰ËVˆ—ú®ž‚I¥:Á½ ÚL^7^epWžÉJGp!~äð´ô…:}Vo:ì/Î`‹miQÐk4R߀¸Žß¾”[ÐäÛ€ºµ:ÖHÂñ•è\ODÚçñß“RxWéXƒý>‰ö$üF*geŸU—VfÁ-ø0„ó¶vŒ´VÃ5v¸ +ÖTšâ²ÃÆföAŠJoL,qg"‘Ãعâõìï1.–TfpŠÝº»…r…å˜H±G³¦^q´p£»6Ñ•„¾=Ô&7jFýìÞ°—KITŸ—h˜îcÆÄQ±ë- ~Áž!(Ýæú ³è³Ê±œ;9'ÝgQ\Ãò7ÐVõÊ; ¾ñ”ñ̆ ßSñ!¹X¾F¢×LÆΖªb©$¡å%¶á£2’—béÁA ŒGŽìŒ…«`»‹íì\@ +9_¢ž³ áý3@Æb ÚãlW>3¿Ê¡ùÃ=‰ÏfÜa4ùÔŒÁRÚ¦h!ßâp-xËÕó1$·R4»^ŸI§þu‘?öœÏâ¸=ƒÊêò <«+†o P¶ÃK2Æì@«f½‚7Ì+ÿ!{b~|ý6fÈÿ· >ö—W6þÈ.^4.þ¤«úxl»£½ßTïéA0®¹]Û߸$îuÿ½wÝ@=¹RçÊAÿòï+êýBxz§¡UâBYóÁ3”F,´Ô&U8‘§ Y9˜Â7šbç}ƒHfœ¸&¢‡‚ði\/ÈjNQ“K6O*|6‚Õ'm€Pgš~µ¢Ì{ã® :w +^›Åç‡dBù÷¤¯¿®ú•ðpIâÕqjb–‰LÅ~p ÎÔÕý€r‘¬2¯,2{ÉÝîþŽ}jZá1¯d„ÒJµE{g’… f<Þ§æ$–W Rî®rÖ:‰j‘cWhÊ,cX[é~Œ7üª ’®;=Úç$¿ÄÏÊôœAV~¥˜ÊÀÍaáoR“ZÝ…·¬ÒÈ¿ëú¯EA¼"p™ì—1¸Ñe·Ž[†€½•ã‹àZ† ­n:ÃY„Ô7tóå>z£õX€&/+C#Éîq©{MÚÆ¥Ã<^@Íû §Äá­ïî–anÀ_sÆLS6ºÜ†/ví°ÐàŒo#Õ!ž+Le@þâj$cP·ôs5øÖÒ~êžVíYAÝýw7:á0A¶‡.ç…˜Gë¿45%‡î /lÏ’~ºÑÈxeMî¶iíõ[¸5¾}ËÃׄzÑØtUÛ®Jn›¾!µµËÿ—:Ü)´–ÓWã£)õ]‘ß癑’o2¨¼µCr[¢xEl µ°#ŒÂ¦à*Õ/¹m·ëÇ  d=—È,9F~C©j$¿m²Bùíä5£\™Ö„(5!SUé-ô7·0’:`Õ÷0ŽS¢E÷nð²2.NšÙl‚™f Ö; ÍF…CMõ`ü†ÊLßG¥vRŠ•o-‚È™^{Ç|ꇗY€1³r–õ!Ï3Ì qP§>–·6BÅÝÞJäïHŒ+økÏø*%’¿iDèQê(}+TG77?Û‰H€ÆŽT‘ü°—쯯§ÃñW +d«ƒiàÏ@ÒY›¥²²±Rzr¢úW©#ð7Ñêf€[›Vâ² éø©:yÖMãý¸sNèW.“N<£÷Û)'FüìÚåÛ†jâx;bbyÒßtð’~’÷fA€5-ûÂVÜÊⲠ+²ß'ú €)Z*X4 ߆ëÞS[56oH†»,dž‰cÔ9W)ßÐ-fçØö‡Q_YUï4wØïàØ~)hé¾´¾ …µËn(sÛ›e0DaÛ©%è›*¿IÆåþ³…DÆwuQ”Ú˜“iç.#îÂͽžpî×jîáLsöº´Ç]y ;+¨N7å2Ê«På† N¿©¦úÊAU\m¸§øéÕ**Üð}(Pä·^²æ´ÞC™!,ÿ§ ͧÑP¥®7øÆBpnš[7LŠµ¿•4Sos@ ‰¢[²µ„äWÒ}£ÒóII?‚X?4ÉË©¤H/—' ‡á&$¨†S>"™NK¡§œkÖ=ô) +`æÎrÓß;½ÉÎ#–„øÝRB³M¯Õ–¦<3hèŽúõ”v­á™vkƒI¾G„Üü½rõ\ò—d ®öš( ê)5ÉÔ[¯üü:[%Éü§ä®z¦†:¤.°­ ß÷øËaãó¨Þ®H|æ°#-fsê„E D8SšŽºaÞú˜ˆXPðL,ŠI’u¬°ñ®–†pPÂÒ¨ò·ÏDUß4|„ †¥“4”2x÷)’žqÛ0ìQÔJUø}¥X\á… +Øhzb &Ccy~νogm7¼á6´*¸„pƒ9¾€™"ð<ÔzGT AŸ`ÛãÎó£$àÕž~ôøÿ³›û;½ß¬õª.ô¦$gOO<[Ù'¢)óßú?·µK.ll=ƒ#ÛÕ!Æ°bZô·!ëT¢©˜Ìp*Þ÷¢=G¥;{¡#X ‹ƒB+õ²'€¼Z»¬¿—ÔjèpÆu‰êãês{ì³4›Þ>Ñ‘êåe“FOg {[mžï§ß  >QjhÕ&^›y’Ñ·ï›>¬ÊÝËÇiD~…w®â¨Å¢RƒL¼úZð×][3ôWœpÅFPòšÐÅ&îÝ8§GŽˆùñz9üèùg£«ñŸj¤–æ™®ßökòJEZˆWgÏ‘–†Þ±ÃI1Ñú/‰¬hC Ã$´y%çZKñ½ÝMI>ßò¼Jûˆ._“«2˜Sô*ô>ù‰ÃÑEi ˜¡Ô³ðˆÿ¬5¿@\ð.þ}LBV¾|ÂJ¡p x4Æ<Žj€®hd ê^-iWƒ–6ïhžbÕyö®oR‰Ñë¾dÅ}1š'w†(ê‡iÊ- ÁðÇ6PÅ΋´mjTIªÍivÒˆ9±‹Yƒraœ?ã„á Ö‰"ÙÏ["Ü3H»"~¯PëUbÉ“%<¨öYgÞ,†çR=BüË@H¥‘aOŒ(l¹1öLNT&ößI7Ïõ…ž*fFÒ–_ʼE¶ƒÀغ´µÿ ¥Êjí+”’±öÜ‹ž¾âˆnßUã2Åqªua›^~ÒPwÓÀ{FÇò«U0ˆé¢CøQ´‡³¹£,Šh44°«J¥`ˆ5¾c“ûÃå?ÄθB,  +‹2D*£ „••±ÕÇ7š’Zo"³MAÕþö-F“îE–¢Jö$c`!’M³»ýí[²‹.†“2Ä­ê/+”‰ "]d¼Ú±B6ëT„Ñ¢…Þ简rçü¤výeŽ E»/÷NLuï°[|9¾®V<7sl)˜¾¨ ;6$œZ΢,U«kïûÖ³}We·‘>²Ö¸¯-´í 9ìxÃ_ä.¼ð='IêÀN©XbF¯òßá©a̵G2×>ÛÔããÈ.Ø/…(éÅ Ø# cZ\C™(#ÙØ0'·_‚õD®ë—ϲ•V¦h<¼Sö@ç½t<²¹ts-sÚi͉ºØ×}gËiJ`eò#ñ LÙ8góÙß}ð¡ @Ì:âHt\Ô &(ißJë/4q;Û%QLY*‘c¤f-wکø*Nñ&EÃßxFR-œNi­°óë(ü«êþmë=¥‘Õ +;:=ëöà¤Ãüx9R^t´£ÖÀÏM¹Ðy;Œ?šñ/_WÚ¶êŸ3 ÃrïÏ2üA…I>²,6Még¸Ë~ÈÎqnahAø Ò8d[¡Áøãô’ÖØáòh8¾5ïÿ¦Dº—ð#Eù\­bG¹„¦,È;m*€é¨óÝ¢ŠýùL&9_b›rbŸŸœìh †^ä!úl +x –ÖZ?INöITªo[ÉÀŽ4—'Eð¢ú£AÙ&AYz3“&:ôžäåvFâg¦êI‰… A°ø!€B´E1Šâ‚¥ñr÷Ó•L>?`Ëcm÷@ ¬2òº!#IÛÈë/ŒÜŒq{+ÉjÂظ¡´Y`Cçý›$M§šµPŒêì^€ö ÝI?½­^ÅC§?XJ°¡â“l¹V=@rÚ<æ½+ÌàtDÇןðÏp‰©PÁx´uDĤ‘xÖÛK€Z¹ê?ë„1L]¶ÝD4ùëé%w¼XÝv` ¥|·)3Ǭ­ ½œù,F…BO±jžȧРý˜OÉV[[ƒL°Ñº›&$Pl·¨©.ͤ k[7Àeœ^¶¾@mÁ¸IÅl0ó ¶­e4$ºÆ/-G‚EïÁˆjŠøÿb®¿p¥õ}¾6ÙHð8‰»Ó՜ġMl;‹<ª)¹µšëÀÊèÄ ×PêÇÙÂu›â=@„k”{þN¹ë™‚¹úM¶ÑˆhN_×½6¾ÿv€Š¥‡Zß³šLêY…¥¾±¢ŠÁe¿ ®‰´VQ + 3à¼8Ö–¬2äs'zï¶-=L™‚ô Û²þ¸#ýA ÍœÿI­˜ÏÌŸ 6Í›éaÙ' œÀƒ}¶cÖQtt /™ðlnoú¶¼hílÊnfkÔB{í7ëû ƼðE1×He;+d7ˤ39¡q¬áânì˜5& µ|{%Àäÿ“øÛKaK•~vÆ}Óã ð:i>Û +šÍFjm,ë"X–]ŠSÏŽÑs$ þ +¤«ŸÅIï¢-5~ü1=&賃8T†ZK“pW¤ªS,jX &¸Þ?O +Þ‡l¼ùL0 qš Ÿ†ßÄÛöZÄóŽ§7®dûK ”»¾FV·%òÿÓsçö€ Î” Ø8klu@ ë¶ ZðÙlÕLÀ½Õ <ªýï|Æ+žóý¯—L›fòI3Tÿ÷Šy¾T³]¡¹BUÿôyQ¢Ðæ,„s¸Hp0oÕ°ÚæžJéYºñÅYµé³Bã\—JìÚh'½¶hXà×SœF‘$n~|…+& Âd—s½U.ÇÕÎéÀã¬|Ã"ž%Òs(F9ƒi~s¹¬×Ø(µ¿è_žŒ†ÀÉþ0§:ý`0KzÜ]GËÕÜçñ!¢Ëe/ydåØùh;ÌcØ«;ÄÅa=)¹d-\ð¨=toS&IÓâ-fÙ Ý~Ðdö~­b°båѵÀ‰êÉ|&OÝa„`ì +À”è0˜w–×7Wh³¾ñR¥ôôÏ~ò^ CÂÜ÷'á&E rZv ȸ¡Á?_OÐÀòý±þ€ÿ‰~Güðdþ;Ë¢£ÌßÉ—¦g†—"*{“!&Ò…ÆV—„²ÞÈÕèª"?°]O\œ‚”êD]E ¶[@—ˆ~6ÖÌÅIÎÀù‡/0 $WE¼Ø[qÊ—í\eR8;cÓ«GCHNºIn,}1çœO @浊Svh-Q,µS¶Z„@6n/Í&kSÃ:9ïæ¡8VE³’ó›û,½| bŸ°FYxÕøŒé¿Žý.ÝVuŠ[¹Ûâ1þD\5ZMÄê4 Ž†3ûçÊr¸O”‹S«XõjƒQLo±Pp;¹|03Ç„M~¿…µ¹0gN`tD÷˜™Qía‰äÅæ}¬1š€ãw+ÐpH’<2nÀT+ÅÿO0ƒv§5ÕõSÕdI$dCí°$ÄÀÕ â–Qz A Ézñа .­´Ò>i©Ñ¶·Œ*Mµîu8$»–W¦]%§í=‹¾ˆD³!˜Þ¬A0¥—¢1—oú—ïs±­½*¸¯ïV•çÂI±ï¢¹ +„” "½HÈÅX9iù%YŒ65[#˽C"ãåÑQÑfRZÿ%4µVΙ֮å¡Ëî«ktd»4ù•ˆk˜V÷£Mo•šµäÅ:ý]þiYWd$ÝD4ÕÒ;LTsêØ‘¦6IñFêÿ¸F¢ž-ú;K꺺ô¸73r9kn‡gÕâ!ÿ)[ÝΛeh{òmçåS›KX§¦?jŽ®õK¸þú¯ŸLS_Û4ÚVMUk-#§&ûÍÌ*¼]¿ÏHo¬òÁDÌ[º +m]J»yIú©[çRº ¡Ï~æGNŽ”5-óªÅÂÑÒÏb ±ìõóE¢jÔ””}Lꆢ+øÕ¤z•8ïÈh#R2ÕñÉ\{xXJ§á=ÐU˜øTvÂ;gkƒ’:VT¾zÖsñUN:EQpmU§6T&êQ$H|¶“5jˆÄ5ªEñ?œ]R]«;ÝJìò¾\úYÛçÓ˜Ëf4³¤î»Ð*ö©.åš”L-×8*«Ë›2"%ŽÉ«L±†ïöê:߇z¥gX?ž’ÙY9gFkÂ2[–½r,¸šk´aÕ¾ÔrÌ6O¿˜D§v%”²÷¾/=¿rÒÉ>D35Ó_®ù„´¤wG¹ªdhNBBŸe­dvwF\OÑKËÞR1ýÝ”ªM—ì +o8ZòߥåsÕ‡¼f¦“X橤º¦Þ4U¤ç$4©§–‘Q*ÙV¶öõ–êê.%=U¤—¦*}“ä°¶2iNŸ‹›DÇ]£×bÒ.¡Å§UU«×"á@\•jcªKÔ²éIj:ûºª±]ss“\TFÓúªùRïÊl÷ ì=°Š.!˜ËÁƒÓr9sösi\š³ØåîI;QR¶q­¥”ŠDÍsÎg;iOC|U£bµóBÊ!‹m}žU¦"–-h´or]oÃ<º›Îά2óÈjyhcÙ¯ðPÓ?—bÊÉŒê·E´ÓMB <°äÀ@@04""ÑŽ‰¹”*Rߦ6&‡˜0_š¸Häì™Ædš’:­œ¥±­Y¹#Î^Ðå9ïT³|ï"!v×(Õ¡ÓmXðÛó¡’íþŠÐŠGE97|—d‹¼¸FU;÷ñ¾V¥kZ¯„7”FˆªwfÔU¼£´Á¤—Õnêý›¶Þž–ª5vL2|Ùe›µ¦ªÂDC4U9éÎÉn—Ñ‘J_vyé«ÛÅò|OëF.:“’yÇ¿—-‘Žzÿ²ÙŒš·§˜§32ÕTÝÞY…–T6ßÕýDTû»»lc¤Ud«Ï£Õ÷¥µ×/ÙhuÔ;5Bü_gYfô×EˆPx x`Ð0Ä»íÁÒ–ÆÓ;åª9¯œ ±²­KN2 :6{¤£9=ƶ˜µ@И¬¡K~ÁºU÷|ãÐçúUË5+K(~ ±Û[ÓpËyhºên‘±pïê¬|JXBh°БùÀ <àz ÆÎbÖáÐúÖjΚ97l,yjÖÐÕÝeÙ·’Yƒ71"jÊæ(xEbuç6ždÙcø±QéñÒ#iȦF‘ âdy©óSCŽ†èÖMÚÆâŃ¥©­òg£ßœçVr-ªaDºõ2ã¡ÚÇ/[Ék 7MÍÒJs§uêO†vÛ}´\Ú¡9‡l“¶vÏ>ûsÊèŽ4Oµn>‰PêV^³+_Òz¯•[—ö¬îå !éÚžTK¹8E¦'3Òo)Ú¹,/]žB-¤²Ä)Û–‡'kÚimæÖÍ.š½DZ<Ý\Ì£¶4ÄW¤*#³q Ƕã}öº]¯‹–nêêLJy¸…v’QIÚ…Gx›iÝî‹FõéN¹i“!¦ÍTo²H8 KßR¡ê¦n•é­‡hÒ¢¯ËµCë*I+ÿ Íôh-ï˜:ÈìIôVvða*ªŸ‡ +=´ãṨÕh‘ø\ÙÇÜ:7î"ñ1ËíØöðM +ÒÚÞ}4§ò(Q‘¤üoαY‘ððÏEâz;»éûf׊‡£kXP6tae’ÿY£¥®O+Iï&TMãËQ©Înf:¿ÈôI‡ø"T4ejÖgGÔ;ˆëB°;DšHeV$~Eâ%¶Ek­D—ª¼¡DýpênH÷æâ—„»XBçŽö³EÂórPò&G‹Òíá‚ 5Û´a5G+ÛüÇKîFê¤Ö04±44¬‘yÛšá·3I©ßÔu\$ì6Y;ÄUYjÍæ¢(Õ D•h’MëÇÃünÚÎMÐÓj˜`Ø´uY=ÓÄÅÔYY™¦àœ¡Sÿ»@ø’[eu4 + b¤[•Íri8ÔU&÷®¡GaáR»žüùŸµ¡Z÷¼õÜ;"[_¯¼éì§KÖÐáòxºHŒZ=ÖbFWÞ0 KyU\5›ÕÍ6*ÒähõØ(jPêlœ‰Ýצ +„ÞÜe¹Ï~Õ©+sv;wrø9É?ÇÎ$o7wò£W0Ô|ìÕ‡ÆÄ<þóåèý¶)&8@ì¬~è³Ý¬¡D´Ë\_÷‡Hù#¦ â/‰öú/´6g½ WdÓ [\¸ºdд44É…ƒhA*.È–Å…tÎ + KH/<ôÒ{QÓEÂâ¾üÆ‹„ÌÇ!ÑàízÅ6×9”¢ endstream endobj 19 0 obj <>stream +.¼Å…²5O²ô$QÐ(ŠyXX³CD,Hs(M©×Ø•C ®¡Éeá y±"Qd„(ëb‚äʉfßÍ¿î½BµW­櫸 ¯9‹–ŽêXŽp¨TÖ.dZJ¤.+¡Zæ­å«~*ÂRÍ[Ã;­kUïŽl\8üô¢²X<¬­SW–ù³´+ ‘Ô˜¶ μ!£qáPW……Kж˜à¯}]'}¹î’Q“lÈw‰¤qˆj:YBÄ+‚¦8ê¼ba!X\ÀüGá0Ê’Š­÷Š÷±W þ'—¹§9wv¼"!÷ÛÉuü +)³»©+.ÜîW$ÄwR5ä +Y?{qAmÈ?ôrò/ê‹}èDo¼µq Ö ,$\ÄBBâ5‹ b—re©™dã´YÄÌ£RÌcY8 ,.¬YVø°¬?íŸgÁðç}dñ ùá,ÕU"Ûß|_V*žÖÙöú‰ ]€ +¨0<¼ªí^C_<;-ï6h€˜óá«-çK2¨Fª2ËmH7Z¤«¯"1%ëÚ *YZLŒ¥[$È=šrNºô„IEÂT^QÅÓ Ü×\hfTe¶RU+™³ÂXK.$˜Éï¬ÆV³‰¢íVýysÒ™Ötnýù"ùÑV0ü\D?Úe[v¹ÝÊnù¥*³Ù’m…ÙÛAÏš ¯³¯^ bWHÀBÁsÑâëëÞ ²féÑ3£Ò’eªËg£ ¹ílÏpi窦Y ä'¯o +:ÅV0˜Øn’WÃOsÙ´²Î%³è4èÎù}¹óÐ$Z£ñ*9\Z¸s³ëcß°1†KzF£yšæ–ü)ã|œñ—ÃÇÑ»Q«-î½^„莚“MzÍIŽ´žŸ;¤§!6Š ÓÇSÞµ[Y=nllb1QwˆÌ’DTË? + VrÇÉsaŽÝ(εì + Æ1tÉâõdžÂÃ×ݳ"B»æy£½÷UúüêO­êL‚zwLŽOæðŸ:õ\ı÷Çnç?«q ¼/¯`p!.ÿÀïäQ¿.)ÏŒF¼U¬,›¶E¶ú½}SÓyGd_s´š›Zj‰%†r•I”gÅ;²–™Ñª~dü4©£¨Ùœð5¿kb+š˜ZD@1-‚˜Z§cën-"by. Ä%ÖÑØÍZhµ[‚ ­gÁ`U_›3Ì:Ñ]ŸñÝ×ÕœeCq1eå‡ µtX ОhˆyyQo {Ã~¸‡h¦º‘¨'—×,wÃp•¥¶f‘¦¶`ÊݵoG­fÁ —èƒ|‡ +ޥŃâ8Õ2Â*iŸúˆ`õG¸u«>®…Š¶|Ï¿ôPõï Íný.Ùþ·pï^oÍ®ŽFuÒ5­ý©;­ÇÅ3›–Ok(ÞájÞ¨ª{xêÄ-µœ-ÚâØ°œhœ/’Ïv©tµ4¢Ûrs>j¿B·²›¸Çg{g|®]_è]ïBB.®’yH­\·ffkCÕŠXf§1oç’]»UŠ‹ii=z½GVWŠ«åR²*<_q+‹æÊ·r]qsÍnI¹T”ëµÖYîÉoÑzÜ[÷$ˆÙ×\¨w•C¾S3“<'‹š¾rkF˜{çLgzÉÒrkuÇÈ–‹KgÒ¬\5g¢M¾êô\µË)¢ç’M§ÈIŠ¹¸jeÙ\f½¦Êð¸z—ývx²úëÊJKe¸ôD¼M[áÏ÷Йª‰k´'ÍÿÚéM§ó3JÜ\:õÜB‚¨®¸u×Ép2írœ-’•3‚fµϧ¸Wû\1/¯cë1ûa×îgüûÿ —æGزòÎ5«k}ø {/RÝÉBuZ‡Tf¥üü¬šxZ$6uRSI‡”ÕæÐ"±©ª*™ƒö9¤Ö9lÇwÔî) Sg¸rÊò g‘H4ÝÄ‚Anó‡ö‡œÞ™a¢Yb‘0(yÄk:ñ†e([u6åv\á ºò Ä'§±´ ,•—™™y™™•9¯%ôõ6]•š>Ôk¦üõßÕfJ³wñ^Çœ‡Sù±g%B;s '‡ááÝœ­G‡,åÞoà¡ æØ@©ÄDË"AjÊëñV¨´WßíhâJ ñBLÇMiµ¾óaß\³q-q1çÌŠ‰h‡&ª6³ :{Ô­HàM]Ö.ô¨{3Ëÿ”ݸ*Ò¡³ÞuÒsÔ(;;T&#C“ÔºžïÙaûýhh[»f=êvB“ «*iµEB¬†ÚUƒö_ck];ø"á±Ë\¼£Úý~Qþê y!U!%PŽdšY†W“W$îž™—z§³»û^j÷‚ȆŽî¿"qYýëœUvb†W$.¯Ì'š×ªº¾˜½z6ô3Á{¤;~%¶.­©ª´‰2Ùâ_ (IõZô<ßsö‚÷–lÙ¦!žŸv6C ß%éÒ¥Þ¤öÌ^Û£«y×tú” ÄÑÎWzßu¥VçåR$qN®§T$Ö9¸›¶$“Û"5½a^2ë}ÑDoèRGßkãŸáw‡©Má(hH£@x¾ÐðwŠZíå5LïXY˜‚ÃRÉ0¿Ïû‰zœÔQ²ÇÞÔ±ót×j:å×)zÌêߤt&Ùà¡ïîĵ¿„’¨‚K¦Î9ºÕxoOòd_Xð âJéÚQ5Ñ_¡³EB»»ï5Ç"Ag…X$ê(6ĬM¤Í㘠ٲÊ5åD*¦*x:x¤*9Û:i•çUÖ©Š õŠ +ÁÏïTïžéz(GaZâ–¾,ǵ¸>Íq9‘*ׇ¨ÑÝ!ñù•i¹™d½!˜gø + n§Xƒ²nÀ pà@¨Š8( à±€@ +P¨ @¨ +D`‚QA +8ðÃÃ'PaB²È¶€8@ÁB&8PP€T (X€Â$ AqàBq )Äö‹ˆˆ$@@@E @Á + +¨08Ta 8D4à@ +&$€¨Á Á‰*˜à ÂA00 "€0hÀ €Px˜€…D L Pp¸Pa(h°·]ɵv%&$@*ÊáI… È»k p€¨Èe +&p€@RcaÀ€„„Á…T… ÔK(%Ógr8l‚Z¹ + œ‰™Ÿ‚%%ED7P8Ð@1âƒD¨0 @AÂÀ ‡ … +L@A…(P@Á &HP0aa0Ä&`€XÒ*Lp€p bˆ &X€@¦‚¤„8@PP@A$˜p$ˆ@pH€žå4  hâ@ƒ5ˆä4 ÂÀ aA† œ"4,hXа aÁC† Š‡† 4,H4,hXð† 4,hXPLp@† Š "4,؆ 4,€à\TPATPAT@))ÀAœ’…ƒdáP…–¼ƒœ^­ÎÚYΩ₆²pÐ\„Õ´¼[¶™ÇžÎ5?\DÞÎ"„ŠR¥¹aY·j¼¡¾;*‡gÅà Þ +„àoçCªä¾^ÐBà}¬N«öws»yø‹:Z<(j¥iªÌ÷‘f…>>àG°AÒ-“ÕšÓ592QM:Jžãu§8¨wjg{sôQ@(¥:v9 +DŽû:GÐß&U1U’y¡ÚåÓÓ¤ÒUT³Agâ-gÓÃQ ìäÌ15tŒ1<ÄPGHS{¥ukÝ(" n-μZ t˜&ÞæZ ò¨ÃP~¼—Z7±†-·ê‚—SxsnþA\·s_-±%`‚‡   … $(hЀ˜¾J¹––¦¸¸Iyã<(ƒðÀ˜¢‚! ‚;¨Qó8¯5ñ¨fIÒ /Úy¿tQw½E¢û!:êX6&z/ÏÁ»ê]ìž©£@TUi™ÑäØù8OÅCNphXа aA­;£ïZ ïZÌÛT»ºün.i¢î90xЫT\Ú]Ë_}ŒáA‰Sõ"±vb'?¯ñ¸I¯ñ–Ké›R«·tÔOç˜?4VÖê¦Ï§Ø®}çdŸs¹µj=ü1;Å$Ÿjnùrl¯:KWšeê²ËHb&½£”€)*UN IéžÙÝCp°»š6$ò¡}Q-­îжÛåÝʇG¦tª”;D¤jû³æƒ$­N–µI:(gûuÂírKK D +( @ Á@Áá$xà0Á] +&°@¸a@  +  "ˆÀak.@ƒ¤aAñ\Ï%Ù÷Ê®#?H ! À¢”Cè+ÔZ[ÞäU³µ±d!AÒ!u-´,³lØ°ÒÓ=HÀ„™DT¼²©;®’è‘Dª™µC½š¤ED•+át(ªíHTCËq ’ÆçHÆqî$sìZ!õ·«&בú¸ÃЮ{ˆw¯lóLZ­¢…VGZ+÷EhzŠø—ÕzBÜ«µ¹y}š½Poün¡¤½E\ZÍ!³qMmWÏ64S®ÝêjjhLßçxw—½Ÿv•x»Î½­–ŠO¹F´ƒ.ÊÛ(Ón”›öe}qÒ>¤Ü,ã-Un=ï:ÜÊÅ\GŸR~MÂÙp׎ð¸5„—x\"W-K‡QîåÝäáèÚÊO3ç^“t{®‘K‡T®=õ&)]™©©•wN²ÊÅ3œ%kRê ÁÄû(:Eýó(ኇ‡jÄ"š^I@¼UÁüÝUSqÔ4Óª(¯ ¢«úÿæ÷ñq §*yhs/ïøõô•ÕÕ;RѦOg‹«Rx´“qé_´Û­ëí¹Ê³Ì£AÓA ™‰0mjaji^ñq¥ø•P¾d@;¹Ê²´³æœä_ÖÜq`éÖ£áÑjœ¶mu ktªfÖ@£qÚ˜Y]®™>h›'!’Z7çy§¸Þßéè¬K¥{wá^yUkkiq°¼r/hÑ+/³ú\I™êW\ Ôr}sõZÙü”Užfk“—5–wM1+$\ú¨°u,’)F“Vúoðdï8¿B—ïŒxN»q¸jJ‡¯<Ÿ7QhJ¶©½-½’ËÉÛóÖÅ8Z ̘f'þ ÎÑs|8ʤy¥Õâ\0Ïôß¡Ü_Þ#ƒ—è s°@¤âÎÊéÉõëNspˆãâiYŠšéù8Þɳý†o£r§ÏN˳ºÒ(”`ªK¸ëNâÑIk?ÛñUÞAé3<ã8Äq8ð¦KC+"$æpâå{ò”…rª'Ê6çFVÞäJíXÊÔ²±ø"ñ¦Ë¨d¯¬ª–2E32£@ †„báØŒP[?ýÄJH9 bŒRÈ@€ ¦šSPÆa[½ŠýYh´ö`e­w++s’ösf†éL¼4to Š¯ŸÇ g¾ª¿Piß+˜ï]Pÿ¾E#°å:bŽÝÙhxû%%üí»Wîd´?R{]gý}%Ò4öÒ¤Ø7ê˾µhZ/ç ‡9oÐqlOq[õs2»>aVìÎ$£µ+1RcYy'Ùžî@oÊõÞx‡(s¢0îU]ìÚw_û'|ŒÊ>`•z9 Û~@Q:ËÊœ¤vô¡¯ ¬R…q/îb߀ÔÞ˜ôµoÂÙ(Û¢6Å©öop¡=LY›T{e&¯+xyYúC¶šö¨©q¡Ñ‡|ÆZE:gÁ¢/9Ùæï}ödÇÀè]ßj )ú’^¨©zñÍ¿ûµP¨G:eoWõ·„„}8B¹µ8RçÓ½öí´"&iþ¿Ê¦ŒtöR“ïíEyŸ„øÇ2Ê…AÜÞÏætï»ôfÈ;ü9C¬DíOÅu]?éêCQò½œ¿©R5étÏ „”Á°eoÓú‘íèîi–—Êf‚g99˨ ®»./†eŸ§¡¬-¼ÒQ’XÎÀBš?dPª-(¦œ¬†Páʜɋ&WåfAhÓHØ:t­?G¯´<€(¡ÇßÄý’DY²S¢ÝÖéýÐÝÔ—–«ÆûQÞh®¹­ßdft«çœ,q*Šµ¤Nó¬v¡„Êpxö7N+`TPunØÕ{ë¾Þg¥ + +ÅŠÈ1\AazíG‚ÐLZfåWZwžD]Ävø0úÒõ îÛ=£,ŠÑk=¿3oó¢“0 ÔòI`Ž“ÇkúѺ˜î õê+£¿Æ5÷×ETÊPr–pgq-»Ï áRÎí“`ѧ.&É%@—˜L·C©ƒ<Á”µG.`£+”@ÑáœHðû™€ °~q¯Z[†º†±°E)°§*@æ&¥‹ èÖ~ÌÎ8ýØ߶¥ý6Je0ÛW˜È7„¡ò>U‰Sa;=gßnAY´c$³·ÑîÊH;&õoãÄR ‘Á"ߤ¡p‹ÒVæªÛ4“¨˜o¢FÊn×9ÅÂ܇ +Ãq¶i°í>Óô5·åHsÄíOÍŽQ­DSð‘§_owpM­úµÛjnÈÅãb êÈcIUê(×Rdóäþ x½ „›V$븀¤ ]¢Ê;qR4Ž®Ä`Ò —ß}ûÕÒowñTMæ1P¾.à—Â#î=@õ³Ú62v3x9ºý¯’A¡¢—6˜ñIÄ€â 08›Êij$ 9#át‰‡Š÷8dÅ/¿$yzŠ­)ÐÐ9®Zq›Qîå •T(—x÷“ô(8(Û¥£±5û4ËŒëê³yüéTV:Ì@b¿À@R2F½q]ãKèòœO“¡ëÙ4.&àÿñíOAûm+ #™Ø»º=¬ùÎ,Ñá‹OèØAÃ:Š›åŠ7ëI¯úhA›Ø8XÆ +‰Ô&ÁG}™Œ\æÂKá“K Ì”™–.–BÅÄ·7ýYç˜bfâWNj—>“nàá %uA†H$UäÇšþˆ‰ôc|Ž»³@)>ºDò¨µ!_ÔQ ~G ‰ÛÌÞºiÔ¦2:áq=´L1LŒ( +B¯ÛD•XE¢·²ˆú€@t’ã<ôΡö£ñ£ý²28†BãR…ŠR)v2û rÐÙ­y ŠÝ¢? õn¿21£ÕCÍÌ<±ùs Ÿø/ü<±Ø§ÿÑçÔç† Ÿ›½øÂàÓ¹üï$eÉ?âeó´Ö|ZÕÐÕù¨â£dšxÂáŽw^àOeþúp3˜õ â^ÔÇ:oÅ¡N[KçÜKVÓ]Ceª³bçÄ_õõ‘ZÍú •÷örLC§Ó#(71Â1n g­À¹Vúæü€7»ï».w,ÜüÛ´û@Ü ›Ö_pzÍ̃îšçHÕd{s<iüVfÀÍZaÐDùë™/¿q8òl"è…¬=6] òliĦDÎçô”-Z¦(öÓ~‡ÌF¼…„i3”Ä(ú€X¶:&&ë°„OkèY tbμƒ^®Ç:”5ÃÇ’òvÒ­Ý6ê~\>­ÿ¾T9eÔž•ÝñÇ2˜ÓõFŠ²„(³²>ó$£^£ŸÆ™¤zÆ(ñªˆU9]B®ì·O³\ìTVU—3†Ÿí6Ó.å‚÷õsu—7X AQÕ©è-—ÉgBÚ¶lÔ’nu·'Äɽm…$u;©á!v:m»¹*Ìõ€$ˆQo##e\M´â#ª:õ‚EzF>V%{ª\dØêïÖ%¾XN| g(÷¨OØÞ«23Ìn°z!¨ä½âWŠŒ4v%6TNCѨ[ÁÑb­®‰) +¨_ˆiMÌPeûænÀfY[ÂæÐØ,þ3df’H9êµ ‰1^”à–ìy9“x›Mï…|ÊP ‹Èò‹úO•oXz˜Ò»¢“RvSdí­¡†¶†tùUâCOË]÷Ót×ð½ 0wƳ~æx#Ÿ®C\¢p-¨.-—<ì¾½ÑÈA œ‡ˆPÃÂÔT#òèã?5q3Ákå‡ò=jnãWËÞHÆRrô‰²¤±á]^u© ¯ˆ•()†ƒ›>UTÉ5eR§-µªbá+9ÈÕÒž9œæcS MþHdˆS’¼ìö®íš:ÁKNŠûÑ‘.ã´8Å÷ ¥’ ³65JF1÷3¬„X╾²Þí¡SYåP”ÏFêa£K‚Cx¼v¾r³ˆäï"D¼Sb&HY)pE²n¸o&yѽ‰%ºÔßÃ$®µXÝ“¬ó +‚É0$| ëÑ/x7ŠãU÷]9 Fú|I÷±dD74÷Cöz¤©Ä‘cìmZšÉ›?ÏfêßdΣ:»RÁ‚î9©×VH¯Ec¸ 7Ÿé£ÁÎfÞ ó'aNŒHO/È°ù™³€ƒ2Õ‘Z~ŒüpçŒ(ØÜGÇ` ×”à\Ÿ7Ò˜°K;n#sPsÏ6ö‹F3áQæ„™iÆJ?R· >ÆÝ.|uòvè¼iœ›\úrßøŒaÕœ7¯|y€{âd¤òÒ™‰õå™{»“ (G]A4wž\î¶nà õ(òèX•*%IÎâ?‘˜mF][¥ÚË~©£!ëaQsq2¤s)H;¶: ùå´=Ûu h² œJ‡k÷ª£ssDna8Ît†µ&//N‘¿H;Mü88k}8‹\QßEÑà‰ÍB£q_ÝyÉOóÛ ÂÒ%»+ †¨ÚïOEŒäþ½¤3ÅuöÖÂð¬kÞñ¾0P„õl÷/QÝÌ!dk$+úä’”MÔ¡ËŠQ–Ï⦊‹>š³}›üŽÖ$8´0a¸Åž|ûˆ:[d2°DTœÄU˜HÕ ³\Ùü0o¢3X +À|¬ƒ³Ò÷`‘â‹ö}7àYKOxî\«(@˜`,NôäÔª¬eñ<„ëå«ó讄³b G™µ*Þ`æl^ró€ýp±ª*z„-­n‡ül‰HWáò’¶qæ׊Nx;Gž\ã9Ì´®¥ÚÀù€gçøLÓÔ¯_©®ªù½ä…ÂTöïõ±ybH§Xªì»høÒAÍî+"f½!J‹«{þ˜–ªÎA€dƒíŸÚ}m‰Ä¶êñjAc½Kð͉D³¹3N‚±?ßùÂI sîëø› ûeŽ Þ³.lUtå„>ÅEô§â¨žß¸3‹ê^ßÅT‡ÎÂ]$ê.rèE3·V׌Ø]mÍŠ™KTò٧ݼ‰‘¯ áf$[ +~% hËoé²Õôàƒ":JSù ‰´u<Ÿiº–Žj¥"î*W±ª$5-ø+€»OØ–7w–- ÀÛ‹‘º\ôEÒËq?>´l[*L$ý”Ó¢¬Ã4&gÌ/YFU’/=Z½ƒBȽûó— vQ¡q®U€¯µ m TÈô„´®$³­ŽFä*êYµa¬^—š-‰0ÆÐBÈZfš|Ö0¿¶MD.XPÝlÀ³-Òø¤ª'2VKߨ}¸<×…cW…ÒZÔJÆñD{÷ÚH#ð#Õæ0ŸIFÙSðž'¿µlò^«Í)ܵ#¿‹v¾¨D,m;>•ù1Ç+·žÏh<(­´9!ó¶9©Ê-R(¡ŠSâ)]7d%Þä„é»aŠ•žÌÁ½b‹yˆ4“w^|–`A‹¶àIÊÄgBý†‰Ê!2Ìœug™£‡ „\ºi$†ø*ÚMó`¸(øz<ØröЋa€³›ºòâ~D4b½O°¸k·½·3‡;½)8_==ªU-Ý/Ïü·ˆ´–RÃÀÅ)`¬nÄ3aHë˜É¶œ¿¾"n†B„ò'Ë·+J$ +|¼™~1¦º8¬Ø>dBš‡Îà ¥3fף˘®§D¬N‘Güh€~–=*»þØØEJi0]ÓC ,Ôéèp]®õÒ6!&KP˜nJYT¡ƒJ¢9 ( ­y\zÆk´k}ÑkíÞDw'é&gqInFö;‰ÈÂçfý»ßR¤“©«Ì„#Œa ä©1¶ùd%âñ®ÛÄ|ƒ(Û• 'çýYOÊAÈÙúÅFh%5äÎÛ/ÇKZ7uþÆÀò†3JµòÝø„á€)b*‡Ò/WàœGã­êÌ/"¾>”â=&Ì Ö)èþ$xE‘ÕgGºhá8ˆŒÈú–S|¶=2*ÎõœL¼jÓ2ÂƸf«\%$rÊ^ÂO é9¨Àûg¤ß;-Jd Ì×>Rr ú¦ßÜј‡¾ngB¥™¢Àu²„…*ÌÇh œ%u]À€À%}PóÑë‘Fv5s +{ûÉS¯2s|Ò•¬èrd’²{~kù©DKú¯UOž¯üfÜ£ +˜áÍó:¢þ¢bd`êÐÿÑÓ€‰Èw‘¦A`#ú‰ ÂSH‰Op9¹Ü”¤Žk檘²R5÷Eïþcp%˜Òkd´2òiv˜Ÿ!i¸0ÓÞ¾`ç¸×‚ö7SÛX>÷8؆V*¥0ªL𻈾٩綎[cx¿F'! †qQ.Eqíåcš:Y»Ú(:ð£óÉ &æ5퉆¥†#ݲ-ë„ÁéÓ§`Cvõ‚•¥‰Úÿý¹v–T‚UN*ÙÉí½8'‹÷‘0‡â+Zðšô_€wÁ+ræ`"TvÒƲS;›ß_ ÀÇ4“H㩆´¤mÿ+7Ä$÷- ¯¥q¦¼w 7h¬Y˜MßB‚ÐRʲ í¿ÚÌ·Eúy8¿º¬=b£Ž3ÑÔj2BÑ/™p|¦éµtÅ2 °o‰¦5¯?ÝÙÙ$GéxJ¾âIYÒ3‘ÿ„ð;ÙÌZ½$ØBh ‹ ë#OÇ1JIú BÂÁ@#œpr¬ W~Ô¹ƒÚ«Ì;î—£Tÿ ©žòšVÒÏx}6æ˜{‚ÍŽn¸(4˜+ˆ¿ Ë ª€ÆÏP‚¤Ôh]ZÆ!Óy»N4))°ÙOÑΚáÎSv®blW¢¬âõ0Ìß&%r-yX×NY6ʉ€ø0U†Á{8;ÏêÓÑœv`†(ÊY¤Å_e³$éhäÌÊî,.ˆÊâO>ØÃœ>ÚC‰³±äÜ1Ðià€ ÒùÏ q¸X×Þj°vÿàDÿrبÒ~€{~ ý]4ûNtœ¤à_íKæbþâæþðYƒjk?iÄ7“hXדÛƒâÔÉ^Þ­  U0Ê(»~1í\Ò‹¥e ^¦ÇØÙöÆòZ;#Hpj3äš kµ(G‰ðRªÐ]C×Áª1‘‰¬¸8ÍÝÐArmA¼ HIú„·kqLDcS›·¢ôõ `7iI‡aE!Íõq<¬`0”l¿ŸÀgï•Â9¢ªøq¥"ÖA\PjÀœ;kH…Õô\™‹àŒCwŒ5‹ÎÕ´3DA.¢¸àµª˜“×€òš‚๱е¦´ó÷Í ¶woŠ¾AïÙEt˜Á‘òz‚Õ}LŠR¼Ž24ÄWõ@®š‡T$‰±¸U'LˆóH$¯ +HÿãÞ¤*ÝGâ]×<¯,~ÌÍçp(0Žª¼i‰(+‚ä­aR1F´.öZ"ö´ñï¢WA¼Ri¤äΖ}¯!ǹÚÛ)+™èš„ÝrExç/”cHgpÅ`I| Ô{™n! 7l;üýFt›†%5ìX0’Rf—B¶°dBPHð(‡ö²ã¤ óuˆ[) c®f£Ÿü͆²LB°²c¦ÜÌ«Ÿauùk·;Õ³TÁ2×\4ÍuDP&9ÉøÌ8¯0u9GfÂ'ÜgC7lÓ’/"„\rÁ%@´¦“M˜jd]jGó‰ UÉZõÜq£¶Êà;ŒZ³ +}²¡d’…õ´¨˜LÒ8*­»7ìA—T^‚N¯!nÀñ _·ò9ÀÊb/KÔ$ÂÛÞ÷ݧãXœX“i +ßxÜÚ¾'u9Õ8SãæIª ã\7ñp"à2fj`ÒÂŽy‰«í‰ ½û<÷¡Ö 2uã´—i¨®ÛvÜÃxhÍ ±§§Ø&æjÐSl> ‡s3• ¿QGÃßÐ":‡ä´U€:U¢Rµìa@æ˜E +÷ön‚W äà +Jý(¿…lÃÓcÉðɘ—Èø¼ “¦#Ìв›£LQzšfÅ/t5V‰Ð‡h¨°4_PÓKÜ(Ž-‘¾¡¾\—ÔÅØ]8u«ª›<¹ü>¾~P#ÉýâØÖâÆË·oÃæ̃ÄN8T³h¥š£­<ÃO«xÁš=ï{$`.pðÉ2ìCvl¥ÿBD·ºÄ×ñÀ©Á@NO&«âoÿ×Éɘ¹×’ð“ + æZ +&Äù„Ž fž|x yŽô²Iþ)< ™äU`Ôìö ü½¸Š ¸M÷Þ婘¼j^Ðj½ðEÔôûAÃ*Sß3Ñ«ùÔj§ KühlEÝ ïN Î>s1RHY· +­Ôµ/„€l™S7uú¤í?ý'ž“¤™jöÚkJ×уHY$Ju®MRÖ,`…ÕëÈïÈsÉSÆ‹òA •ÕÂûËZ7úæ¨êrž;샗÷Ê@µÒP¥%t> ¦îWº]´û‰ºhü&“ò¬.QWúi^×é‚4ðk›MW$û³ L(ÀH°¡’ȇ,í°W3%9†c1ô'JÔÔVN:¥ÿ:bì>”0‡ +w*mÙÊ 1Ê_cÈäòG«8ÿq¤¼Ø=eI±E)ÌH;(B¥zq¹dZy£¤Š»Dßçøhµ¨xž xÞ¿"’Ÿ'Y4k q­*?NMþOðÒžA7FpÅ›½<&ƒ@ÿ ®V›>?Ò“Þ¼´Ó„}'`òí Å‚&÷8’1ò¨Ý à”»èEwJ +C]Úþù Ü°k”ƒ +—juæ­ˆo&êÎ)ó~Í^r†ßÞ˜¥yÒÚqȇú?ªÉW)õ¨(sŸsÐèÑ}µ÷\FU6V=Öª”.桽뒱j"£w’Xuì7‡Ïºå)ʈ‘8r™ü¾pD »ÞÈÏʪ§,’çˆ' ï$C5Á>nÅíȜɈï¨ï‰°”bs¹(ÊäÝB“òQÁ¢iò_ë w–FqêÞ)Ê›…„KQ¡xs7èooý6ýk•˜£ÿ¥\Ñ¿<yR™²3µyäXgŽ4u ꇢ„(€M“›<·!5ñRRYN®ôDes;ò˜bɯ±'“žK‡þÓƒy;TXÊ‘°yÔM;x®&œÍ|›Ô#H©v‡q+TG6àå% Rº´¯ÔÃèÙ4áY{ –J¼Ö—Ú¹êS±½ï„:Åð§‹;nÈ£DÝð؇±L£Œ)™€¨é3ŒkžÔ¬» üq#ùŸ;¾ŽÙµþaŠÊâQ~r—( +  ôQÄüŽÞùëýò1l7üÌ<¨ãÑc1¿R$¿ð {_›Äí÷`)#”¾ª¢Ýs–Ãa“ÍÝÿ’÷#h‹/¦Núpñ‹yœöƒÏÔÆÎm·@`îÎR†âÛn"Z”§2òUЈ­W»Ìã¼oj! ôÔÐŒ]^FI˜RÝÃW5vª–y¹±rklÐgÔåïçë“—=çûX®‹jó&@(–@€T€=õÓC¬,rcý]"þ‚H³¡uÒ2M0çNÐaXA8N¯´³ŠÎ›dhmûr åQ¢ Nƒ:_LN»\èÝ„tšòû³8ÖÚ8õ1–ß1*Išo˜ö²ÍF»!æì4íPxÛ0¥ÚÕ9í«AY~?=âyõÛ×u™¥1Î÷Ú¤íGaÖlÁ­3dõ&¿þ$õ9qBXLmM£’ âð>ŸÌì#ìÅÚúRÃzwœÎµ©Z“Íû8|ßõºðIwf×ë£ÃRßRw°=ú{E?("E2K좵†¶ž§œ‡ÝÒ7pØ#”-Mæ«,Å`òÙï]iÁ Ñi;Á9t°gÁ4Àߥ濧›÷‘x.„öKÉmÀ_ÃÇbœ\è¢%?ÝŽ¾0V>’/í¾CÓd5%É R’q– £Ú6^­[齓90â™ÑS-‰E 1£à|Ï®gíA02 ®¦ këkAnuçn|\BTQx,8'§a«åûÛØZÀ®Q÷rˆ,ÈÏv~J¶Ëi.‰"dýÕ‰|VÙÀÁmõã@¾#ÈÐd˵¦(j ýýZ +³­.ãtÂÜñ)Ó|ùé§ÔÆÇR§Þª€HÁà×a¸Ñ–æè˜ED ÊZ ê±×e(וÉæÇåÊñƒ³´\qxÔ.‰@#† ÉŸß)° ³Yœá- }´çÐé#|³™ÈI90kmCÇŽÊÂ^wnvhÂ1”¾| (¾Píp¨›Ç;‰åÅv7q_£ÊØ¡ÛaJ5±½ŠêÄbk Yí‰y^¦ÓØÁ:àनåÈJ6æWƒñ<ȵŠûGð¯/õ5ÉIßØðÓ^=”ÖTÒÊÅTµN?/~Q‚cÐËÊàñøšÙçXÉÐÅwJ”R¤°äQP2$Y‚ð%ÔO–ú¢3{RJŠü˜}1yê·œü›ý` +~èNö +ŒxÆ$|–Ì› ‹"Ô²Š á?Õ¤eº-“…i-ØÅÆAJùa…ÊÉËEp0w[e…–€ÇJíêFö½¡'³“lõPC…§v’g*ÁԸܨ $v2`¡DP¯xaâ‘ÖÜB:bUY?’–(´Eœ x‰W‚ _î<Èë–É–­]°A'׊@¾®N6 +ck*®Ý:»A +Âí$ó ÏûŒ8Üø@ûFüÌÞlºtÃÅafÂQ“ˆ ÈkŽ÷ïÝI@:ÊJõFîK¡Ü©ÍëÛªFßÁ­RÎ}Ƥ:;c»ü•¿\µÒdï-ÍIü*˜a«â³æ¿.d³DY?ק3Óyú¡ÙzÄThiñ!äðäha¨«zX¯½›UuîÌ“3ãà¿fßyˆËÚôÃzâЛëÔV¤Éû¡Í™›¢êœØC[¦ªòs`ƒvF£¯RE¯‰)Œ"ÿÊ‚Tô”àè67hogÚlNôSŠü7lI2‡! ÎQÂ#âiHGâ0Á¡Þ¤‹^‹(t¼„^æ;”H^Ž©B˜‚ 7sÚ€Ì^K½Ö³`}sxS4@SÂ6Iðë…áÄ>7®h£Ø\€:n,±™B¥K‚×vTÇ1í`s‚‚ÒÆŒe½µT3¸sÒ¬À»â €.@¿Ï3Æ¥$ÍÄøáü…jж‘|± i<õl¶ö€X)3:'•MC§™ƒÃuÍXçÐÊßÇ뺰¡¯Çô6ô§ùw¢•?̉G +ˆï.žH¤GSÞ 7v+‹á_„]u-Øa"Ç"7¢©ƒGš*˜s6°˜e/-õ6ˆŽpÙã‡ãlBOîø˜ U–*ä×OSX$0«øâ/ ´ £˜ ¸;óÙo¨c•ŠNy‚õ‰á9ƒ~úɯm¯¾Á\3Â; mÙçuxkÑ•LWÔö9–ŠG.1"˜ +¦6û³ pO¡8ÊSz®ß›ÂÍ7Á¦ÐNm¡2€ïW[&+_<\êcß?»°ZqZ)Sw_[JdÐÊKcIè…L¸Ë#±ÝùB +žß” †àuBÌO5ÃþôŠÂ©ÜجoÓi•5!C¤Öñ¬U¸ÙjòϘP=M† fN¢ªËeÁÁÖøÿdIØ©ü–Ö'³2ÈžUs«xÏÀ߬@§ùèj±î¶Ì”p;ÒÙ!͵‰–ýP™þ©ƒYLÏ:kL³ÿíЛ«6z1O²e7{`‘£‘úéÉQ}C(¸Øí¢£µã0¶g+åé-P}°¯¦Dï±JÚÀä…IbÙSÕ³vRþ=Ü7B‹ú<Ù&\5þX-ð%>¨ÂÀFõ#™Œ["ãëÎÛBþÞ…ôùÖ~㺤_K–ÒU‹§á£Êéò…V$…dЖÉцðjAÚíI¸*+H="ó`m¨ë€Ä—¤ÂÌ”uï´zE×h‚sO4ê0¸•P)DCâ¼ð1rçø¶¤¬{ØV¥ƒÆS6âfanÏ¿~Xïè¸|³àoP ’îå çgìÄf?Nh×2ƒÖ†XÐ hÁ8X à Óˆý9›.xØš`†U…­A4=ó;z£Ÿ¥eY h¨Aª8£dω&Ö¥,¯khi'p +6l ×b8ÑoÖR©Æ )CK_võýnvÒD„IŸ¾FMBWfΡ࿒¡ ê¼V‡è^._4ŸŠn (3Âß¹95(íÑú‹AÚµœ_3;»"ä¢n$Cl¼ã厢õ®»÷Kyׯv£bѤËÍz*œìP=_¤râ|÷Ä;¤¶l/¹9ãúO!âx¤7òQœYÊÂN¬ŠË§9ejoß2…˜)¦ÏŒÒ­ªŽó’ec̯Ž«g…{“!Ü/XVà1s—ÜÂo 1)½&—B@M‰š,ÎÙ´â÷PÙ`Gc™Éš:„F<Ö­ÙtÊVDæè´Þ%1J°€XÜa:aÎia¸ +Q8·m¹KlÅ5Ý6T|Öv!± æLƒß¼Pr`ýFn¤&ýµÄÅ'JU1|fcq¢YW8×83ƒ·èŽõ’Ð n°ðÓò*Bƒ"? EàÖG;`œSJAa5½ÿ™ÏKK¸Á9_¥%§—"Jøâú„²×m5…€ç@UêEgxŒq!((Ùä;åIK¥·7¨z\u™f ¹<ˆ}5 ¥!–ÀûMm!lšó—èkáÅ-DÚ–‹«gþ€?Ï>ÜÂù9€ámIÊdþÅlÊ[-¬ªkóÕ@Üÿ!¨aüŠð òôfÎìÿ À‚Àxf’`Ð6 éUZ8Bvj\Sôë%G±‘'Žþ®l(q[wÀT¦ëÔmå#„*¹iØ,ëäƒ8n…oz!çr,äuó  OÃi|ÌÀ›ÁkoìúùõÑÜpŒÄ¢i—Zì’ ‰4t‰Ü#ãFÐ7B +75öb‚ži¥Ùñ„ŠÞŸ*÷9•ŒkcR€—Ç í¨xñ²ÇF®‹§s?ÔEhwÌ™c’kâñ“=óA¯?k¥{\Ìÿ‹oKϬw$÷™PÝ +bXÄâéÔÜ[}Å4!uVPáÁ‡(Á¦×¼Ý|bcuÊÝÑZ ̈ÍBß­kç¾¼;‰ùЀ—”ñȘk–¤H8KˆÐ,*jºF£v˜AWƒÙUºj…»ÍÛyÿÄ}`€ „ãbbÝž±¬ð;Î +¸Ÿò«¢U¾FØÀô ,Ý Pyô<µÇWIÖ¸‚ ë çr—è=©ìŸ@åPQAàN†`¼ØöfÅÔQ•Å7òãeSäŒ)ÖÉ¡=)´;¾åµ³ôþAOh¦Ñ4Ax!ÊÜWðó}ðÈ'Ù"0iÚi7šgöÛÖN¢Æk`AR m)_6óµ|«º}Ò o¨‘7+¨š)^…«ÃˆÂXîÇý±ïj•„TÝO\…øËžÀh80Vö½ P­G„ŒÚ#I6|°:2‡aiNZÝGyi÷¥éØDQºÈE‘Ž²àç²cº£þ/ߎ8Ó/<Ît!HÜí û‘<&¥æ¢!Ç Çm:?D!ÿ·Ù¨áI ¯œ¿;%Ö[u˜‘NÝè)áÚ +^•Ü×O.$¶•ØÍ1oÁQø'§­mkXäøC ‚¨[¿¶"J,êßË%-!ocÜõ)ß°âr#öÂíÂZûlІœÙî˜QUmd.ág(.̯êÁˆœ"_ÈO?û!ç4F@Tm‘ •1ÑÛ!Ï2@í~} ‘˜þjœì§)¸º”©…šw’OÅÐäéa¨-\yÂ0†Ã4>Ï“ñskÁ`ê;Ï-ߎºªC’ñÉx™~Ñr0¸Îq7ð  +K:4?ô§pSÊÞxt\l}y»&ÂËyO¼é‰·™ÓÚæ ¥ya¡Á“ÜzZÆÿ–È•ÖÃñ`"´p±EÌGÒ$iìU\,²‡r Ec¬,qξۼ…ðm rãˆðÌr¤Yf8SÊEüù>v-ŸiøÒ÷‚‡l0ÜB®A}{ÃrôêÆp9 €ç±tC˜¯–1U—KK1V/¥Ði?¶}Ì>ˆ)Ô‚nÊdœ“_=²aÏ +lœéˆNCǬfç[la€¾‚¡¨zÔˆªÝv°Ô³å‡ýpTxßÔb”ÖlSéÔ5­ù¿ãbÑÓÞÐÎ' v„§š¿HjÙþ¯X´™l,™¬]|7„ò½˜{âØMãâ9½Ëf-jÂü÷¼¾q|à&&€ºv °sV2šLƦ”ÌÎ;Þp +¯~ôœŠ•~¬TÔä½KYÊÀ óîÂøâR¹žº¥¢/½–õÒ0@Ñ¡à©cß ÜE軆O.¤7µT¹/|z{3ðì×ÐXQ¨ðþ ;Äë@gô­¤G맟RZçK]–¯Ø¯Å¢=®Ã +ã 1öAXç=zÁƒé $t ‹^ó¸ÅÓµAøŽI!@^YIc‰äI}¹7ðFíB y°ët©r«`G ®6‹˜Y‚e /XÀj°Æ”qþIPÔÄÁòê5uJh¶#ÍØñçP3<õsë7ñgÇ7õŒÊK ZŠ¡ƒÕ_ |'ÏÊàëÁ8ÔSƒM˜,)Qçzg!ƒ¸8‰2kæk»Ë1•=(Ö­çPYbg¼á¥?7» AèVb ¶‘a–4È´N}P-œîúpTHˆT)Øænð7J g& 9¢ã˜2‰‰XínÌuñ’bHÅ“yM3ñLâ•–`ËÊX[Ê¥­B©²¿ç†#¶ƒ®à\ €«[¦¿:7~ +ȯbI÷êþ9Ö²…ˆ'莂—ýØrÃv¬jÕäj-.&»õpBáE~/há ?íkÎ^H˜e]A‚®¨P·”‡51e÷ Áň¸[CNÌ$·h¶„Ôí#¦Û¹½Ðö7þÄhÝŲ•“=}3†™5ätü\W6¡B…ƒæ¥77åÆPï^ÛðÉ:wŠ>YË¥´¹$ Eˆ/WëãöâYï‘#ûmþt€œš_!¸†ìñôø]i ñ¿3o­bw>X†¤õf«á#Ÿ™£¥¿y4,·§Œ¾{ë;6‚­‚;âÿ–Yó#3Në':V„ ©Ài"_Ÿ# +³ï˲Ô^ +©”J ·eȬ‰ýÆ69ˆn–€›\âÁì3è÷rxvf<¬tÙól.‹‚¢%°¢‡Èâ¾S)Ö³3ª£mœÝH:$7Šs©r^p ,X«ê¦ªt{âz<.‰Ü>}tù™šÇiÃ!|Q•D¨vÁ¤ó¯E|ÛîBŽ?"ü³ÕgÑq\aþªStmî –Ãju mÕA3lèêLM;±ÃÇ\àU7e3ÓÉùòÉ]Ýy „b¤˜?pÉ4þþúÜÔ·¹OÌð¯QõqU_fd<ƒ~iÉšAèåÝçAý}Å ' +ƒÊOÐ!”Ê< CËÈèt3•Ö ÕLWÀÆ—JôçEñ˜WíÚÁEAKä*ÝÂh±CrvóŽcÚ(ÈfAIIk™kÍôA_÷¢9ŒÁäêQ¾IT—¦Å:ê¡wX•šwÄ¿&Æ ÿãN H-ƒÄ`_FN’ã?`¦EãO•$¥ü¤ù*ï°íg´ã€÷GEIæ#—P $1rª£&L[Ȭ·\P…dnûê˜äKBT¡%Ž8|h0ÉšËà}i<ß•‹®>ø8 ‹?)z>ƒj—éB˜òÔ/ÆÒº‹ãz(õ>K8)Ì1x3_~äSÉïÁ!¬N"ÒÓÐljrš ÿˆ +vv›þÕ»‰¡Qƒ0n%‘s/‡Hýä÷b~§Ÿ½Ÿz2K3>_¸Ó[ó \›qŠ"&\¯i/ý'½_Â2ÙDêo”´ibz<ó/Ú…x’ÿø‹eìðû慨‰¢5Ëp|,/½Ìl\”F:’÷º{Áù +æ†'¯ÚîfÒ[kôÁØKeÙtX(â-s@ „ëUyXl´ƒ_öÝ“~` ¹/‡µ©¦ØÆʹP³§ ò—©D׸ëŠÑƒ6ÄaS(F „²¸¢Zò£XnJ|©wýf×+‡|;5J˜¢\\ +Ôy"†hdn#ùx‹;ši¬¸ÿ„!ƒe‰˜-¤ÌtÀ;œ²µ›âÅ.i¹Sb96…zÝ/&_&ºv.@e뾎ªÑMðd´•¨»ýŠâkàåärÇ, žê·ðÞªz¶‚M!ù ^‘õÿ±_ÈW€Ó¤#¹qg%ŽUYïXÍÚ@µvN´ý=úW#€¹ŠÎÙ|£tB«^^—:ËÓhXŒ'HRá +G(§Z||Zèëýøé#œÃiÈRYA;Šq?ô‚ãæû+‚ÂÔ «ï‚z$ÿäÖÔXîµjŽç·¡Í”‡§À55é>ýE J +®&É %‰€ƒ˜Ðêd° 8üé2 lí_0PBØ_ óB&Â|ý(—¿~ì(6€-8²B|¶ùFÌ؃£lÄDà§1¹¾TO‡/Ùe1i;~0I/©TUÃf•A—,„L½Zp› T /×y”-3MÄ\‹Ø~„˜ØxÊ î¥v­Ï"Q°$ø[!x¥l”,ª)ØhøCª !I¡¸ÐŨfk|?àâšTÏžhS΄ +ÎI÷øÖÅhœ?r¨ë„Òo)¹U ÊceŠ{¢SÏ ØêtCÍ-Y+¹q‰ílX§+øz:ÖLO˜Œ?”¡ŒÑûÙ—n\ôØÔ•X‹ Õ +‚+Éyü¸œ§ÉA2ÆÜ6 m±¡²‘´«>‹àd^÷êî>!HúË*“|Œî± ùñ•ÌÒ’ÄMÕå +pQhÃ1(ç¡ z\˜‹(ÍOHÑòDw%20Fï(Õæ÷5ðyG˜ÚÙ&ÈW˜I7Ÿ€ûmd\‡ÚKCGЦÒº„Ý2dF,çí~Û‚òèoÕœ®ž0Ä+ܶ¤€!$G¯¡Ýèùº6Ùú˜Mõ4Tâ¹'Ë«ÒD€ñ”£ì§'EeÇÃ%ˆ´Sól5Ò[áÌ1øf;Å/×½؇™Zk¸år¼¦vM/ô›ÐùñT„y`Óe'Æ0¥xAz¬OXMd]¢!|…—C5(L%§ °BMÁ‹ÂTOmp{Ígä•VPø5²”i>¤PÍEsQfs€‡šL¡ `:~ó´Ÿ)«‰ì>̾ºÇz› é'Ím~(ÍéÀ/lh©•þËï~-âø ¶šPéC9Ûeo‚ +füI~´l³\ÆØ ÙL¶ü1[°ŽÞ2Æ¥A%´Phà–0¤<—&@–‚Á¡=2áQ @5÷EA©ò—À¦)¥x¦ €_Cõrꮢ÷h:²+§lƒ{R\¹y<~ÓÒ§ëØÆ1¿7 r°$qnL¿]XKåÄuã“š“?¤e‰0mx{ZèguæÑ0¿º7íY$-…Uu0õ-´o¾ëðåœ×º)5»é%ˆ"q„†³±í6SznL%Œ£g F›aÿGuô¾: øiÐ#cb xÑÞ>‚È’†bB $$À®¬Åe÷ÿ)þÃFdÒ©øƒ«®NVÕ …â‹tý¸„k£GU?°ÇLO? ¸Î¸ŠÏÇgiA¬v(œòwyô…l©–š a ²c´ø¸èAãrT”]3*Ö²·úê­´BHÄ r)Þ +£æ[ØN uåš.RU°°?pJ—S¡±ÉJ{Ü9ñôCôêî±ÄÃp¬ql:‹ýYs*Çõç™ÃÎ3 ôL™Ž o)W`s“¹Ç)]ý[=ËfŠ8óAà]¯íÑuÉN¿ +™õ<Ì£"”7ð¯S:ì‹'+úMýôÕ/—ɪdž;ü÷ÓÏL@ñ> õQd LDhä\Ï»b-įøg3œ5&ÎRüLsÃ&¹¦Ç˜+­¶Vc:íí$¯Å4fm¥ŒaÆ€-r¼ d¦Û³%E=pû©8´eÑœ<&ÿ¿ç~u›˜Þ¥5‘"¦ò+C(R;Õ‡kßòM¤>çlj©¹÷VJcŸMÑ,”߶@''íàRuR W(î²Gµ|+ »Í}bŽZãKýùFsŠ‡¦)©eÿТk[H"=†ÒQ5–\b>c`<|'x,Hya,Îgõ®:ÙR`P¬·YÉ€Ô»yÚRvx^ë"–~ÉCb’—¹¿çý¿ ¥4À©Á?÷òùzÇñ®Odî1õ|8«|/mä ˆh+J)õDr€‘>.IËÖo\—¯Å1Bàý_pévÐVIÍ‘¥µ3׋§‰Á·žþɇ}ã7õJ,±Š{šÍŒÙòSï ]ÀàÅLûû9‚%\ 2¶ÐVIíÇâ*¯Ù«Û¾^ÏÀ†öØÖX%?‹,—Šå¡ú#¸Iy´uEDOðÖdÙe•§Ä8~ Ó^N@IL´ µ?‚\C!—2ÎpgôÛÓ&=} \¨Á²¹b»w¢g$‰ÞX¦‹Hu|,\¦áÆ{$K)Y¿eL•‘éÿ¿•ÞMðyÖm4‹?g!l– ,&GÀâTjWgàT°Í4ì?cè!Þ¢·+é{(#žuÀ?nN'à{“P´2i!M®ß + ø…Ìob©0.ÁÔöß,²(•q6Ðuè„F¤®>N"ÄüŸ¼¾º¹Xß‹šJÁ­ ðeº“¹Õ½‰pH—‡¶céÛáØÿ27JÐk£Õâ[–š¯s.Jl“ýó1}‡ádñŠ÷!‡-žzIcBgÁ”ϼB´bÏDÝ9b¹NÂzÈ‚t°°×çD¥µ,‹œZïØ%AÇ¥0ÜÞ‚¢n$J½¬|9=G&-I8ºí¯º@Z_61ßìáìû!£ÔßËÊ<þôÝm.a¬YÃŒcÜä¡w«—Ld£þÊž‹m0¼Ú¤µ’V3^v<ÝùyžÖÎ}¡ibW³>ÜÜÈã{VÙ¼ά¾ºPýA +þ[ÀOZ]w Y¼êÂÁá¥UF›µ\êœWïàÁ»½'¯+6€¸ÐŽä™»ñbк<7 +Š²3‘±Ê¼ ™·Öçxy/Q¿VBìZFÊòÚiFÄD?2‰½H@*ú‚j=¸±Êá]r¸Ó:=OfâLQØž¢6hT¶Ìºóˆ¸rÏÚ+¿OçÑi›…nPÚJM6ñ:„X¹%øøÜ!΀EbÅ…ZJ]Xø‰`¬)€ÝéÄZ‹P;¥x–óÔq vJë'‡_F ÈÓûbÝ]œÝg¢~P•ÜKDÂL«%Œ›zIYŽÚ/^ÍèÉ8h£*ã^»£ãi|™ÜŽ½ÚÀ§³ƒ} gf±§d*êÙú õv$Ù¢îŒMlQt˜#Kšx]®mŽ@!$Ó§ 7KÆiv'0h–S¦Êù­íz±¿™y¡EUg:p§K­›±P’Ç@è –|uÄÈ0.üÆ?.eû§nß-DÑô$P%qŒ* +S†!´Ý÷¯|öŽm×®D‹á×z%en¡ÝúU¡OXf£¹Y½ÿ­ÑI_ÀòŽavÑ>°$Ô/— ™±HV/«¡"$#̦q~›e»åH»V[I¶æ– P—Õ(ôÐÛ—”¨i®0w¯øÚ†Áa¶NF_”ƒxGY’óQ’`ÿ±yÉ›ŒNðwœÁþj•3~­‚²¬*92Bô¾,ñÂNrM {7².É`lJæ­3‹ ˆèš]bèS”bå'àŽ‰z›Â?TŽ™*2ÃÏ/¹ –»C-Aö¯Ó¾8,¯¾8ó±ëÇÍÌ +ÇÜŸ ÊX£²çN?%Z)·Q#ð·TX‚†z‚¤æÍœJñgê™ø9eüͬ§ï<í-HSg,p]q»úy°µzDâåUÔÏa:àÉOüûq‹À/À.†WºEþ&=( +ÆUc*[É +X Ø +)ŽØ•ŸÂ¼µ>H>ix"ŒÂÛ `祶Æ#9] ¨Ñ* mõRa?N%ÍZÍn*…P  ’ˆì‰ìáf,ýK=!.Iæ÷ùÅI¦œt;• bÆ-Vœû|m¯ƒ +tÊ_C¢QâáHáå5tÑ~ Î|Ʀ#\Á$¢ƒ4A”)Õ¤XÒê¶}PçU½û¬=?<é²ÚC–Tô|¥ƒ2h8Z•À©ZaX¿AJÌ5’+6Yµ?ðà9R0;ucÊô°‡`íz¯øáËq6v¤_´T‡á¯Ö ÷­úï‘oR}¢éŠ”8à®UpHª½ìÜ ²šÁ €Ä3vöq*SÌN¥™Œ.öÎÝa™«jùÓr^=}Yž¿J±9䱜¢êùkßxcpœ1h¡Ì™]§]–_AQ>»pC)ÀØÏìF™SÃ߸è©s0H° $÷(‘Ë,Žº™eÔüïDÍä„(ê5@„Û‡HBzu©Qú.ˆáè©Äx ›£Ùû¥â§ô†§m²¡nÒƒŒO‹ßÓ»|™ùP§Eð^0@ +}Ä…Q[q¶[HÖdÆ[í’óÈÇy½R2¦y$ GKÛ±6ÌU– ¥jkH!÷‹- PVýæ³w‚6³7õTBš—. tì~bµUœ;žË½ìç5‡ ÖZaŠÉ¨À£ÿ‰/ZFsÃÔE +YUn[ÆvÁJ²©Ú¤«œ7ƒ‹ÿC^±²^ã·;V¸2Æ +«¶Õy0”°)%3°¿ +ôQó†Éˆú1ßb1+Æ¡2ìéfeÎ]mºì(©DêXOòÊ`jϾP€ÒCvÙ§*#³äÅ­±"µÚëBRŠi’ÛH¹úS3>ß,á¬ÖþJoyc)äk]¾¾Z&[mFÊFÇøyº#"d€ýy5Moɽ ¿†åqhÅx˜d‹`DW.Ñ’R¶RJ¨\Lÿ¤Õ;åm¼Æppto†*Šíä K'¸ÀʬæŽs™B¢9E ¼óã5ÂÍÏXÇä•õÿ’ÿ¦ÉñD|ÛáQÝÖ-Ï_«• +ÿýµBfôkäÈã•ôMìñ#Œ"ìµÓšÇ;ÛjäèE.`‘ûáSß<¡à-^*’ ÉQÜ–L—øòmå2I'±9½éÖ’ŸTRlf0v<Hö°! _4«EeFíÜâÓŸì"¶pLO×ßr6*R›çLtØß:"ê‘w“IÛ&rà +‡NÑf•9Wá? ð÷¤æÊ[F8½™û Æ` Dc¼Qˆá,û¿cãÞK¸¥/ÞÃCRôRLñßS#-+S¯±¶C½FöD‘ÖÚ„\«´´€Øxér„ødÍšºbÍßC×?Èt²Öî4…±Üc2ñŸ]äÖ©Œº¯n&·­Ríì=é6ö7Þº:*évx»±F­v¢¶HY¦ ì5ÍǵvÈìæÕ¦”dÙgx’}ݱ?$î’£1Yï š“Ì3 +–ÅÕbx¡°¢òDwŽÎ?ׄÀý®H6›™¨š1¹]@nf·ÅA®Ï‰[ ôð"d€ÆY©½G&xR\;.ÌjXDN.äáQÐkìþ dIT»OÏ廈‘/·pµ[+‹z2ÜU-¡C”ï +~¤D!§<å6Br‚gÔ-˜#ÎúäÆôNg²âíjîUÉ°ðùïx.ó<¦KPk]*g´4{|]´ç«¯):{³óiÓjï1¤WmV»ž7_B@}üiF¡KW¯±ëheUºz!‘A)äë†Ð$µÍñ œ„ky’¬È`/CäXçÆ&"\Øw»¤¨Äk§ÂˆÑá9©DOµ_wþÜ»•Pž5`ÿ!"=´jLrÝcÜ¡‰[ÏèMôú†€gkÒœ&LØó­ýÑ”÷.–ܲ»à±­¬û0Õ¨­ˆ_³ÛɆkÞÀƒI7Ó§//20gòBâ™ÑQp:á5 +(.AÂÕé¸Étã°ã-§ +å*{§¼@ÿÇQ·VÓ¦RÃx3t™¾»aÙB$‰.FPk\õói.Äú5él˜,™›ÑaÜ•m^8njR¼Ã¢;w½Í-ÆíKŠ5Ûj%­Ãœä·Ãì‰é'vYÙ㸠+4!*hXX§.¤o€ÅZ‹WòÆË5bkcê“z»û¿øÓU‘åÆyuË&ãíã#‹óFÀ¡‰ž]1Y—?*ûÑôÊæ9.¤9w-ÙKã,<Ñj°ë!`^þט5¹µ†„ƒ5=ú`²‹€H1†k .!Ä8ÛíðãÀ¬zÐiñ +D+fGâ8ö2á{üIÓßÄä&È\Ñ.…—Ì,Å«ùá/çÚbÌUžÈžÆA4Ãt€ÊäccHéZÀq“á÷!Ù÷  Š;záAÒVj$¸Þ0ÔkñJîÕÓµQJsƒKaâ>2;@äÀsqÅÌýÿadÈôw¼U`—b»×ƒF? ž!†×}ìës)†]çñ^O`FÝÂuËè¡8IÙ»Q—Zj;Åò}ŸçG®wmÕ%rÄç øº+¿[#O9ÐØP„³IÙÁÛ¼,\5éˆN¡â¤+ÅX3d°f{p&yÔæ×4<KŽ‡ˆk–쿶çû®‘È4ÜjÜÒr“¥©Ñ)åD%}ZlºÝ J”Éœœ¡hD[T?f•.ƒÊîŒéÉ¢u=É­ãÊóDëìÕ‰f-NŸö°k¹A}‘L1¦d~°”'ô¡ç٬î<¢Ø£¸¤Ÿ·ÕdK;´=ôj|ë1› *¢ùÁ£>­J>lÆ*£ññVX„¸Õ1>º z´ÛEìK‚[8±ÿÖjF GŠÿ_ÆL´>£4¦|áZ%¼ºï´Y;rF Šë#ª¤Mê&÷l¸¥ •ˆ*½óÑR²°ÝŒ¿…ìo¢T¡;š}Á^}é"'šG¨ÌY{ÑšY£Ø¨Ìkh"€Ì¸¤~ø¤ëK +gó•÷JuƒEžS)¦l¥øCY¼á{ç0•[ÓÃÈÂ÷Âò4´N¨ +¼á¡dÙYh¬©¿.}ëÊoI'E5l²äûYÁ܈ø» ˆ7éøOÖC§P8š$!™^â;w%Všh“½×w‰Væþ’³¾¥2éÔ”M$æŽ`‘¡^åÍøÍeúYúÈg8GŠY,‰<÷±·{Íå<9¦qj3[+gBÛKQ)0Eb5*଎nâÌD£ þ¢ÉÞÙ/í«ãI|Îò—N +†É ŠŽ€qÞ‚ÚãÀþS4ÜÚ¬èŸS-&Õ§º¨ŒúsÒp ÆÙMiÒ^Á'„Þ=çwŠÓÄOÓ¨¡zx!½nʇ1¾gúÅ(¨%W[-‡ž¥0«Ç/+Ýà-Ž• +ðà·̋ÏMË”vuäõ•«*el'•À< X2or(¦Ešœ¦àÊLצh¹`Wð ‘$/7úÖÐÄ‹û7¡›« ù’ÉÊC ªc„æ,óUñ×ç\È 4¨d·Š7&5ÚF4,_臀œ” }uÏ:éLÝ¿Ånš¿ºm¨X³;ÄÊ]²˜ÇÙ7.ËT&rÅNŠnAˆÛVo³)MÉP¬³~*ð +ÈÜHþ_¥T#OƒDí”…éJ 2&yÜf’dPVHÏJ7±Ý6YÙÄA¼ö÷´î©ƒ™ð_}(%Dggï'é‚þA‘ÉÇÙ–B§/Õñ«´H‰{t4€X*¥ò_çæ³T´edŒm¯câ + ®ŠcÇCq9N@³I|gãm•9 øì;H³™æs]Á±s60¾0ˆ‰cäÅa±€È|8nƒâl—¯âæíÚ‰¨îƒ¹*}„ál­¸iOæõ'» +v]JS=R’Û‹£wL=? 'c¹ÿS~vbÅQZ:«ÐD%€)ñ,°‹¤ƒYwÖbiËTi y—Ê®¿Þû®vH‰B¼¡{]µ3‡«îè[,ÀÃø²— ¿ÊG©ƒû㇛2SCÞðG+­é+Lòîv+¢GYCNðÕP„Ô½msC1ó)sSÀ No=QÒ!' -)Ö«kj_‘­€SR(^g9ã&8ÛT¦Çmêøjgç&Ç2¤Û±ë¡ æÚGj/å@°GS*³D•ÙðÀÃéilÏBíÞb¡’§µ`ѹ*haÝc¾‘Žé™âã ÂÇ œnÅK±¸/4¡ “f/ ¡|@Zå;ìDÌè{†óä;_*±žž}ëƒjíaš…)¦{VýÙC»´Ü“O¶ÃtþÞ+•IIÇEë/á»ÝÎIùåá?w#~»°’ TÝ’«]ö[ÉH‚‰JáW±>  ßš™Ú`]¾¡aþd+6´îçÍ2'_MØaq¢Ôø¢&¡ÎZ¯V¶ÄœvÝFmHÕˬÃáÁjûÜ ß~çòsíwxY€/=M‰¸Æݽgö^Ñe‹Ä«þ‘óÉßÐvÉ£maQ›—%½•ÒgnŠ²…¦q~úõ&¢Œ¦˜pð/3OPçïŒ +Kæ¯Æ®ZÞr쟓™( w¬7_áÚ%Åk}Ý(›çSuAv¥{9ø* 6c%R—r©§ß²}"µZƒäàžÃh«½L"›>Ï\{?GíKÎ2Ï£"ߥ!©á‚[E€òÃv&=·)Û¶?z«­ìä·Õ܉¾,„“Ïâ)‰&³mQA#Â`^pø†ú¾J{žYÂù“‚R“œBƘ%n¤"Ã3 Æÿ¡*tÓÍtœž0ê2ÝX°úèPУC¢¥:š³JÑ¥úá1ïÌiÚµ.*L©ã!äqÅÙpѱ ûÔ+åÿqýå®öð¢€õ1Fêð½¬R Ïb±\?Ù‹¯‡b$’)_×½a9í&d€>q¸“bì—^°-YŠ2;JX¶,”šãWQÉô’Fôäm‰âóÛÑ-®bŠ>nwLc×êJ27³Daýµâòù ¼äxf¿Ísè :¹V’ÔRèJþÿÖðJqòÖ3#b´ƒÖ–ØCiŧð]R¬§gÑج쥥G„ÝÛc’lA½j]ê ;¢jß#èÙ¶{qÒÃöª±Çl¿É™ÁVõ|eTWD¼8°wÍO é$ÉÍülÁaŒÔvÈ¡DÕø 2À.nöí†(ëúËŒõ«{_Ñi%ú™Jð£p´à°nÔq,/èC2ü8KaÂØhšÓ'<×0³…v˺ˆn`“QâÌŠÛ>Yg7Ç“ §ŽxYrNÜýd¾ðåòEˆ éYeb÷ñ*ÅÏ­ÍHûñŒ¡™ÑVcüNù‹-ÍX “!`¯™Fägý0JñoæU!@€äÊ÷2ž•žñ[t kÇåW7nº ¸úu8C wüÁó òߪ%Æg +6G-ŽŽÃ¾šWQ®Z7†sÐÇZ+rÌĘ/6¡z‡îѹù*øFfuf\´vóáða!­Åî3 o ²U;qJÎÉÅVFù²År9:…Ñ åá’é`ª}®ÖÞÙÙsj(Šöñ{gç×.w”/±È:í4Ž›?3Ubo0të^X’œöÖ gªõÑ: Ýq¿uQPêQoiö¦×J4ó‡Š 1ŽÚÕVñ†_žJ»›û²`^rÔµ/E<±¯øRoÔx/ñv*š¯–hª -ô–*æKÝÙ€ÞŒ³ÀJ½WˆW'$ ÷"…2«–yæ•ßµD]Æá½yÁP{E[ņxãéå\ ÉÙÂqZ‡úéG²4vÞ)oì'òfå7ȽuEðùßîBgû-ÀAÔá½*nŸR“w ÄÃP©F.ıìþÑnóY¦ 8ˆÉ\–ê´)«ÑJL•d`­l4ƒ<ŽÞâ¸`U2í;–_Ôj¥¹H¡žäˆU’OõcG6V£ö|*{ 4å2Ë=k5m•Ø†²u5m™Ê6WO2çyèǤ(K1ó;Bó“–Å¥âÜÖ`ä–ýj¦•Z±JòËé0S_ØSç/]ÇïÚ¨€âÒ +^XÜÙh;d¯Võ”äצ¯äýQîŽÖ?P¥{ öNϺäb\n¹µ•Á˜–/«Ýç(äk†.2û~RwMGï±lž¦V,ìÞÄFArpÅðÛ=áôþ@0Û‡ÜÝm 0›¬— >òY)!’ÿø,Íê.Èè<ÓÓô”P'ÖF™ÈTCÐ\¶a¿B¿Ëæ¾çŽW†Ý¬ +¾f Dn·Ez-³g0}aÈFÂÌ„'eÀ)„Á£ÃvÆE©œ|D'DÌû!0å•´2Ò:Ìóãñ´çnØ–.ŸH'ybåZŸ[ÿ¨Ö:•[kóñ_DN ·ñc`YF`${’£î]ŒQ¼ÓpìPA”&ƶeSùv¨ß² +äiÕ4Å’ÂíTKâ‘È¿ÿÐã¡õyÂA¹]!rõçÒFË*Õ¸}%ý”ÓÌãv +O ÁVIAg‚¹=Kž…&“Єü.j>óìƒeŒÃåMÝ¥X-¡¸½}YG¿O³* +:¥¨a {´ ¡^ùÐ¥Gâ_lüSmàž}%•T¥ñîÃdZo€oÒµ‚ž•V!@iHȳ4q`Pôœs¹fV˜ÞÌÞs¹‘3Îe,ëxÞýûþAb7š{„[;ûͺÑ5Ïe*øSåMkÐ!¯’r¹øÊÙ‹Bl‹?v 4EÃ{­š5ÿ&CÁ@¼-–h¡‡B°Áy¤ÙßÅŸ°l´¦f8_ª5Lê~¥dª"ï\o+½)ÖDìûíÒäq!ÊT­ ¿ +÷@q@4›áñ2.E½û)YUü7×0VL2³={†-!–’Ÿoµûd½¤ásŒÓ$ d’ŽœqI«Ð'W˜ +k) P·UŽ&DS ¥µ¡ä1Ñ,ŸY¸]âp]Nûð}«(öýuîÝmx¤'þ [%b:T2z„ +’Ç6gèÌí?ÍrÓI¦Þ©¤À˜“¶òhÃÐxñ’ƒq Â:3<*ÙÅ<|¥.„q‰;°wt„yW¤‹N|LÉaÐQ¢AeÇ|è…éöÇ3O$«Ü"iéJ$²r¯iÒo±Ñ’´ø4QÝ×’óËË%ž!¨îiІšçmI·qˆ<æBuTQÈ +gÌö U•áµ°!"Òøå"]ÃAm²›´Èm¶m%asGà·añQÙŽw`–“Þj¾§Äˆýéµ€.ÝP†8a¹A–*n–wEºñ©¬ïri‘BM¨Xc}° uÄÀø,Ïxý†X³‹Öç;Ý£‹ø-º%™J3õ6Ñ7s¤¢ñ+ßыжÒ]S?¢cÍÚcIž¾doÑ“aU›ÒÚ„qßK1*µ•›AîG‡sŠ/°´>†Ò!Àú¡ƒ© $z~­ìL‡öŠ†\’ÉøgÊà&Iã9Vϧ-ñb¤Ðg§,0Q‹w]•J„×kB£ + +¯€ +i .Œ5zÐR2Ýgå]g¬áÄÅ“¬='áyD¢^’žD¢äÓbž§=MÉÿõ”·Ž“§,[¹Üƒáw‡‰6 +¶³<˜JHÊÀKdByχc✖iäeýñcö&ëó|®>-Al¼ø°(?HÑïã!âe¸Å‹‰Óã¦5eÄY”©wçÅ~Mlˆ_Ø…’óªŒEe¼ÓìQ<),–= ÙúŽ±hµ_~^¹Œ¦ÎPÞzy‡uÉ‹74d‘kÂEM ¿‹é¹­tD e¥ê¤çÈÙ>Ƈòø™Ö¿´Í낵ëBkûJ{›Šùñâs%ñ|“%„ohF-"š%Ï4Šf»£M›æóÏ“òXý·¥´€©˜fÌ‹”ZÖ½xl¥ñr.‰ rOj,‚\fyé>¦Õˆ˜*וI3Ë™<ª²6©Ÿ = Üg]É;–T×€ý³š<ÿO|oÙ1œ'ØÆPÇ« ¤àßïRôÒõpXP5•™ó’}ÔÒ`fBØ¡ç/ˆ"nÁ²ÇÍ`Õ5ò•³qâ"ZèGäÕ‰Hç)6ISúS¥Ç$ñqg‹Ilù’P˜¼k•‹¸²|F÷QÚ5¦!ÀQ ÇÊü2áZö Qq5Q¼ ˆ áe­ÙoK@ZL:Ä™>©þÕn/Ë9\•ðvÊå2yñSúÑfÑRTGWŽR¤©%•¬Á–Ê‚ ‘X´¤0ü¢øÔ ä[Ý5ʹËYï3o2[W3ÓĦè_æº.ŠJ=È mƒŽ=YIF/Pf~~0L¬Ï°Õ%/]¨J(­–¹PBHµkOˆP]v  ]ã¯Z +Ø8)´­]­ŸÖ +´áVЬFo‘.RHtAø\¾ +“„û¦6G,w6lÕrÂFÀj)ÄG+¦šF¶R'à>vb°êÁ­1÷š`ôbjcÄ &;•ôá°¸Ÿôu»…’ìØ4O=,~™9t»@Zî5Q'´ ßjZE³ðV5Ò-/¢7’O²H5™ž•LB#X{i\3ò¡…Ãʵ[é¡õ69ÉwªšZ#po;»¾/%G*…¼Á+Öú:!b7 +¨fž¨ ­t %üɔЭ¥íã¥×âÝ»Ä"êç×­¸‘+Nåñ¡ÚwyN„¥×—På(I6!B›u6 + è?[[é2'iÀúÃF.(`{¯‡¹ÕÓ›Ò{ŒXRÛô~r÷˜ˆ_ë•aÁ+؆Ûû~¯p¸¼Ø2ÌÀ›iõ+©.*C²Ñ)Yoá +W·[´sUkð4©Tµ »ÐÆ¥ÎÃo²þxøƬÝðtOÙ9K½[,2Š7MÊ&¡OyP¶Æ`æ@QÞµN"yQ2êƒÈü PZ²WVæƒÙ@­üמU5Æ÷8ÊŸåRXyþ }o 4–ÔÞx&Å•ºÎŠ&2.]6ÈCþGjæ'«Óí ˆ`¦4;Fb”’güã‚èÂ. +A‰AVƒÓô×+@ÖgφmI×FMG=jÚ»÷Bû‡³Q‚⯧sÈsĖᨒ©9DÔü–ú•HvÕ„Û +å+_Œf0GÔA„Q…È WþQ°³ªæÉSkq`Û ÕR5P#h5!â =¥Š…Jˆ e¨²Ú uAìKÎH~!]m®^R¤È%ä€p(ÈÓãQ9Þ*L‘YQ¸Ç(7ÄÖ‡8Y˜~A/ËÊ60MÍÒ¨ñÓ!—<4Kà&¹!ò¾%ÖMÆå-ÓxûÙU"ߺŸRZ„³¤¹P{ÏàêO€Ù¦!ô%[ÝÒoÊ#GÐrOâ,®\®âÐgm+"—5„åø„­}¢/üá$™t9âÓ+wH[?ÌYö<-8ÇS´9ÁÔº>y•´|"‡Šj5k¬W(«Tï @Ÿ*½•×ØÎ’xÒ«ßìÈÏl¸d¬ÿÚ-"£“sM‚ƒ,–"•„ÑÀ--hHdgì7’^?ô‡–çëTBW4ê!K’¯â +)î(_Û–+Á5ª¯N‹ePMðÑkZð{mN¥E6—dXácú«Ìµ,¤+½¾;\ 7 Ü Ááù•t'`]jÓwç2ù^«áIÙ!>¾b¿Q~WiÂú$¶TiŒ'‰ QNBY¿;YSGèn±Q¯aO´IXiÖ¹°˜IÚ3Ô> §&>þ²x[ç;ä¦}»ae°IM›P6¼?¹ÛJNG*Tמô5 +ѨG0—îá¨ÈR*b­[Åñõ·HŠ­œI´ÞY櫽Á3fè.ÁB¢V£L †¸<]Þ­9ê‹ÑôäÇ6«v^ï_€ó7 åp™íÃݬ˜¢J©•yš3Áû7uU„¬,a=‚RõW`HméÈ!¢Ao$/ü‰3$£88Eõw›:žÔiÀjYÕ†lhCnþøù¸Ä£:ÛC&‘žtŸ¡×r³48RîÐ%Ίî¹ áô0Ü„ÿ5<µçYË.HpÞú);æóÄÑ~Úîzs‘Éí"çïdù½„Lj€Â°®F­Ì,{bpZ´$3æVª>6öô‚ñèÿìã¼ç۔߷ٌøÖôºQ>ÿ’´ Aç*T|d*6Hâ4z½ ã&ŽcñH['&yÚAÇâ<%]5#Ï‘ãg„š½²™ WÀÓÁÕ 0c_ü o˜™ú¡Iù§/W?a(|ž" ‹xu%Qƒ[‡dÉCWýÁÝ.9c¦"œ;â·•ÇK¶T_îïQåߣóñ(‘ÝGKMÆ(É]~"ºú„¸ÈkäËSòup“Îþ$p_çc¶¾3CFqv@$È`$¹LÉ Õ© p§\" +¨t ¤^<Ù6yÍ Œ tR7l„ ‹,“ +DO ýS¸`ºÅÐ&©çt,~– +ݧÒmk„ü=‚Ž¤f3ý·;äÑd»·{°Êå;'Õˆzg”Í Ø‚HÈ–óãžäucîõÊ«]%“£%JŒ6jõÆìs_¥¡œÜ½aμÀ2渵>¬ËÉDÛd{è’<щ Ô:™}UÝø9 óYÅØADªD–{.ªoVHS´äÖ[‚î`f+pdiKéãCàz±ƒnÔ¿ªƒsÄ3ŒÌ»:<© ”7ðÅ8Hõé{*)`¼`íŽzäÙrû&EÛ^͹ñ¨ƒ_Ïô‚Èû•dú>@à O7Ѧïü$ÚÏóm3¿ËWúØJ˜ÌK½0àK\ê¾”ŸN{ SO°“^~‰FâÐ]ý +О/™5A ëY¬Ò0Ë}Q£”ÐÇ}ú^“ópÎÜú&¶ßÒ+¸Äó‡è(ö„Äõü­I¢‡…£çñÅ;½°ÃTÝÖ=Û±ÝßÒºÔO J—%úáÎ'eþȆº½;è1Õ€|@Ù¤%ä¯\†°ÎŒ­ÂçJ†BK|Á5 Æ‡—|ƒß´y +¬%~!Ú/¥¤0ï5‚&oUÅM. ÚdÙÁÉÑk +D3Hó,“­Ç¯êµ­D9ŽmeÒà²DµmˆmL‰™ÑÙ&PK_*.žÚ +v˜Øv_Ò»/Æ}Ô ñíÚD¡W”ç«ÐþÊ•êÞHÿ•EŒ€äÁˆ9±™ŒìQqȦ U蘗­P6C¦dÂý‰Ü‡p)õH°¿sºC׉ʙ^áÈ÷%d½väàkZ è±1TÔéA˜Üûó9”_Éè YÏF÷îŠ/ dkÔDS;XžÅ.ö7HÃà+€øS¸IqßIñïµòfyÏÌN¦W»[˜—åÕ}Z! +Ͻ1þ-a„ŠîjcEm'éþ˜`ÿ˜>ðX¹±l.'ÆIz¼ÐÅÐ(ÜèœL¤ê;P·¤ƒž  Š0*çDÖÕ­ŒoÞÍ ®(hÉM¤+[Ï a(ID±Ãá”6ë'Ns TLôQÃé%:;ßC·Iõ÷&×G*‰ ã¹0ËJõ„¤ÝRdQ{êFÿÂŒ?q°Ô&ÊX›ª(DDÚ `ÌÃ-;Úó-Œ‚$Aª=ß&ÒR*—LFd@Há Ù +æ +‚RðÎÜ ìÕf—å+MmU›«¶0˜µÉñ»Êñ`Ûö°w Ñ@̲Êi‚—$ØrÛ®¨€¾Uäcçx5Õf{JIk–ó^b/V;­:ËñSá‘à{î½Ú1¶e¨µ•²ÎjŒmá\ú»@}ÉÉþ{(n.Xa$ÁÚ²Íe¨£NSo,Bü¢ J°ÊrmÌͬ²öÁr£X·ÔÀ+sÙ“@°ÞTÏh„®rI ÏU.Ý2‚˜f‹©§CÂN–Ñ(rÁKÀl‹ +N–ü‚+MÍÑ\•S“×ÎœƒçÜG0ÇkÊÜr¬lí¨›ûlÛ9Y–./f-ol°ÊÜ ERžWY>צ6æ›"V…`Ä#ü’^øegU$Ïîk™W–¹/K[cAk ³m¬äã8Ò\¶&³®júŒy´ºj ’U¶UììªbÛ®¨¯ÅØÚi¶rÈ’ü·g+Gs/Ú¶+À: V—mçÜ+ˆ×™`ÏVÎã¾ØøÅôx¹/+Ç«©ms–Ìi7rÄ:Pù›ŒÑ\+k"Cu…älå¬-;{cl…Ý*ñû¶–Q»wykLµ°³ +ß^Ô’‚äXeNE4ÏVÎ̺¥‘KÆÆ—v °rf¶Us!ÍeÝu.3g Çk€@ Ëk ù‘`ä™sŽ±éÚÂTÒzS=­{9rš‰×ÙÍÄÞtV1í:BOz‘·¹+ âVaA™9×&Xgµ}eVÈؘ&”™“&˜s¯-öº6A¼Î²*¼sîdNÄü²̶*ÈÂ2—§Â^Voolf[¦Î¬¬"˜XI‚5@¼Êk pž9i± Ã\  +Jÿ¿8[9êŸð£Ô8>e«œ ¾Iåë8ªb¬\·gŒ¼«ŒÕ‹!#¦†ž1û¢—ír#\^·ˆ×6'Ú¤€‡åèò:ž.Åê ›ãáò^vfå$zIí*ò‚-·9xµ*ìm/®ÑX‘ ~I²um)˜ÉifSÛx]öÖ-Ä:Ïjçuw¬Â²«ÆsØ/Ð<€½±3üγ•s{çù…y‘‡áÞÜ €ýc[Ìd¾ÚboëÎë(VtÀ!Çj—÷¸½³œ‹¹d×€Pg5][ø¦ÊŽ ÁÃåÉó:Z[˜ÊÆju^oÜåÉ—Wº¼†ËëÔYMgw½åboZöÌéu³Ìr( qóNŽãÞÞ‡¡8–¤XŠ¥Èwø½øÅîGò~ä{û_–]Ç’ì;ÜNî‘û^nOŠ|÷ÏIpâXîÍ=ç¿ ·/wî²ó?úRäû‘Ž`eKÿù÷#7ÃñcèŽ#sÆÆ”ïIïÇ0äeßàþÿ}é7Åþ½Hþ’—âßßïÐo‘ ÇÜ} Ið‡‹áÑûÑsvqs>ŽeÉwÙý&K/öÝKÑóñóí{)öð»óÞý¸{'Ë’ü¾KÒïÞÿEÿËýÿ&Eñ‡\ü»ÏÇß}éwÈG/þñcøC‘ûrüä·÷û—œ$»øÇ߻ɑÿ‘÷qr$¹¸yÙ;ÃðNrq,ÿÈÉOz’$EÞÅÑ{ÿÇN~/þnQ ÇÏ»ÿÿsÞGrý‡¤/½èÃNŽÜw1,I’÷‘ü#/yÇòûÎEþw'ÉQÜœpABÀ»È{þ9;9zq‡á÷½óqä$)î—¾ENö0üáæÝ‹?ùÞÛ¥ÿãØE±ô%'ÇM~NòMr¾{ÙËÎ?ç\ôaHz^Š¡/ǽ7)’cXò’û½øÃÍùÞ¾ÿΆ"ûÈKr÷ÎKÏwç ß¿ü°€¥Úœ=’ã8þ>’¥¸}èwÿ¤¸G¿ùîÿ~‹äÇíË—¡ßŸǿɿDz‡%ß{äÜâ(òñóЗbßãçá8îÿýÉró‘1,IO’~Üd/KŠãîœüŸE^z’ÜÝ÷vÒwîGOŠÜ÷1·èÇÝEò‹¾ôbÈ÷èC±ÜÛbÙÅŽâÇ°‡½ ù8Ž¢/}8ŽÛ‡üŸoÏ{~¾ÿEÒe¸Cñsñï·(Š¾ï0²|1Ò|r“aYúû±ôåÈÅñwr1ÿþ_ì[ìåæüûðoÑ|‡å8’}¿(Ž¼s±¹¹8rÏÿ.ÿHŽüG6‚†[ I2yèŽIqìeèÅÞË’spdØ}I ~t¿‹ß‹¿ïÜ|‡]$K± ;¸7/ÇîEòÿ¾{éɲÿîÿؽÿ¼y/Ãqÿ°“eI’ïïI.z²žìž Éò{²w~ÿýp±“%ï¼—Ü“d¸É?öïwèÅrÿ-úño2ô¡X’~w?öñ—e)n‘—ã矇dè½HòPIñ—= ýßàï‘à÷pìž“ÿ—œìåØKòóqü{0’üÍÃÿÁ=~q‡þïñsq—£Ø{/;9†¾Ã?z²üãXöíË2ì#çœ G²ä?’üq,ûÿdÙ;Y~.~±¸Iñ“¡÷?·çc÷ü‹åæ!ïå.Gp Iþ/–œ,ÇpÜì¿ Å±oïÿßü£÷‘Ž`—}“¡'ÅÞ¹H–bÉ9½ç;üã'Ë‘ G_nÿ½Å‘{¿G^v_’—]$ÇŠèc=ï{oO†ŸÜžä½‹üc9îv¾÷Hn^†üï¿{E#ÉENŠdéÅ_v?î’÷-òÝÁHGðÛ$Ëq—$ÙûÞÝ¿;û†$ù½Ø?/Ë>–¼ïÍ÷"ç[$ûK²ô{Ñ‹óîɲ#Á'#)Á.#ÁŽ rìœä¢X’!éÅ’oïC1lÙvI öæ‘ŽàóŠø»—^ Ã2¹–{oï¿'E.þÐo¾×\KéåGßÿß;äd8ú²‡béK2ä_,÷ÇÒûÝÇ°,U¨/ÁM†ÿ°ûpä?äÝw‘‹`ù9ç›$=÷&É?’ãX–â÷þœäžÁ‘äá&½l 6©Š°ÅÔ–‚èÔufk'D§Æ@]7­ó¼¶*HæV¸¨…oìˆ5·9u¶¦ª˜ +¿àžã5@«²‹å˜Ìma¬ +·1Ù&øc¸ÃQ$½Øÿ(Ž¥ˆßš~ã”oXgX°¹@ŽW7~Q<»*+SE(î²—…ý핹êÓÖÔµÎêüÆ&S*6Í f ðÞXøEo9÷cªÅ]cñ +ra6¿Å/J0ÇÖv¶Šöv¤Îª,$ã4Àƒ¶§%ÔY•!Î €G·›;@X‡ –Zkð`« d­µöº…`•½±5ýd&ƒŽÎ†€¥4@vns,Ve®:0q‹º`W¼Î°*;G =‚%X†%ÿ»oqä lA‚äœoAï‰qžµ*F¿í‹’eÒ;ÈϾÃq,4|¼í¦ððº4BbpmM9.’Æ¡‡#”bŽ„„KVa}•Z%TÂ&¥mÖ^öɇPL6,éy˜dHA7$ŒÁ@Èӧà O˜Lo,mÝ4¨Më2zE–(G1\)FCªÄ‰-¢…‰®‘ˆ$Ž‹ˆyqR”P +qT,pÓ±Ÿø4JßCÑËŒ§òÏ +Û÷å‚—C¹Zù @…ƒ/¥ #$~èܽnNƒ¤qü¤P²¼†2‚|éõQÿJ„¢wGiÂ\G«¬_@ª§Z覷•×ìß!ÒûÉâ¼na\þ¢Æûø–xg«^KŒ"¼‘ðpîuDÆã6FÇÝ߇`û”ÒL›š5Û¢~ɶJ~CNbÃZ \ã +³FKShøÙñi´09´ñ 2²‰ ˆ× ­«iALi£‚Çž¦ èu#\›€-Ì ‚å·-Ǩ‰@£sZ½~õMŒwÖn'œçdœ ÇÈøLÁ°‘ÜãdƒÝÿŠ-<ˆ•ZäØgÁZäÊBõ2 OÆÒ‚ ±ðºQ4˜?ì |žƒí4È„*r ^7°é„Ûˆ¶b¤pÉ"„ ÂÐ +DˆØN‚e0„Æ“DÂìñ‚dõF>oø2å9#Ž„ŒMj’Ð@(Ä£A,æ×eH<`Á„Û—*±ƒžy̘~ƒ \zä Ús5x¨;5 ™æÒÀ µ¢AØ‘}ÌŠ3h3¬Ì Ü´,ƒ‘¨†p©x2€¨s°râà!ÇÀxÝ­Ãýà¡vöàåÉŒÁ– ß „ª!Å0èP` §fôÓ³Æb8}?¯Æ¥7Ù›žÕ·¦9¦l¦ 0J¦G"L/>é¥Wm¨¥Çj¥½®q èÊâEéÁ@ΤK¨]Òa'3Òì éŒhéÄ'ñh—§rô™cl4þdô +ö0z‚Yt!6T´‹ BuªpPôÇ݉¦ELýј¦O†ˆöº9{è–.94&´”Úw&¤Ž]|Ô-ÂBÔˆák±”óº0¼^G™ëè¶u„¥5t‚õ@eÅЦ–i¡)ƒÐ§ÁÇF¡ßQ%¡U­¡/ÛqÐ^÷?ƒÖµÑÓ°¯´']´ƒehCk=€¤%äÉ×2Nâ€M¾ Ak­Y=` µ¢t(4åIèK‘˜5­Ñ¸¢uxJµÀÈúk4ƒ'zÀ³Ó¡¤~Ř¥Až°:ù1?/³^¡|5/g1:5¡Ö°JÛÇ3±øô3¹1Ž0ï·ßNZŠ×2²Lª +• Ê7€#3BjsæVù`äÌ£SâªÛ~³MÃ6àqL(–™@e”Èk +š«Ûèu³ŒÆ¢îd1:wŒgóÓ­1`@,Wð¢Ç©|cL‚›pN§ò905Å6¢øÉX –í½8›AæuŸPØn²9l”—¸¢\¨×P‰ØÌôkáh Âë  +Å ‘‰N‘@Šrá gåIÉ(DE™ƒÖ®Öúá°6­uksÐ:Ö´àu®Oký´öqAÀ¦˜•Z0`À¯ûÌ[­AaèJH1H"Õˆâ$.À†DÜôÚU!íŸI{MÎÏë|Ëç6©‡9ÌR•£«°Ǥ+(‹'ãH'ˆÑ x…„ãÁ3—B,ƒM‚Uj°( ˆ°f¨åöEÒä—1¬Ö)¿ÔZ«œùª0J¸xE-ÎÐZË ˜þ¸A’Y´¢µÖž…ÖÚ{lÞE4ÐZà +­õâ¯#EZëÙˆ§b%­³ú¢Øz€#°t‹¹¹™ëª9€Ëâü‚p2ü²mŒÂÄ10¯r¼ÊÔš Ê0ã-(ÃòêÎj;eØ}c  «´Õe¸eØÃ@~g1ÜÊÀÙÁr28»³WÀ-Þb¼]eïMÃ`;Ëí,Ãp;Ë1 ƒ1 s±ç¸ü0ð1 ¯Ø8€ d«ËÎÖ` Ão ŒaŒaXÂÞXÕÛÌÂdö€qûÂòãfÛ(‹gweã¸mPÇj´ø][µ…gPÇÜXLeq¬„Õ6^¼ ,Ž­ìlfgØ;°ºl;[e;Ž±+M…ã Û-8›½²uma íFƒÑÙ’‚‚Gga­ü9€Ûå̬ÂÞ8v²3vvWYF\oo\‹½îÛ±Å/æø¼½±åußÎ_Øù;'  ÆÊ¥ØÌüâõ›æ'Â8†o2¯ ¹Ç^s]ßæþÖâØÓ§$ñ¶JäÀxrHjJ×Zcm &x¢‡l¨Ìßó§ABÓc«¸äu£9p^ÂEÔa?ž¶ B’e¯Ra{]9{‚‘lË~iˆ§@8^¹Øë:ŸØãC%B“;†L EldN — Š‡”½îô5Z1L7ºˆ¡ÐX±×-85T¸âÄ(lìèÁŠäGËœ4eíáqò²#w94è‹ùåHŽIš¼Š½®&Š"_w„ á‡d»*äZH]]‘ŠÙæë# áÑ7?\õ`Ïj'·DF³ÝA®vjÀÔ6 ¸NC³šD÷:Ì©òêªoã&_Gfu‘ÓåP¤` Li¶—o~pÕ½0û¦ùæLj}’t!™lF¯uÏ‹¢ª}’Ôs|̉ùWµ_›‘SP-OU΂k> O£ÔÁ|>Æ Cðp +ª…袄ùê£Fñz;ž +®~/U%m3,td¸%( É%š$Hˈå€ÐäÆD´RN•È…ô*½.Ö­ Œv‚ñ¬>ûUÑDÍišL¶,íÉÉà!(ìã«c[[3Ý Úà ùš~eÍe}hß=ÍOf¶YuúÌb8ŒŸ‰h»R" õoM¸&T¦›,Þ±l€¶ejfóÆT\żÎ4{Ș¦ˆa “ÌDX”É©#ÐD@=™&uËÔجj½lÜE_¹L˜x}5TÌ5rDÆU)•@WI1²­™€þ´¢¶Æ–ô²8£P‰sx,¯3€,Í2aØj%ÀzèJÇ"‰• kÉT-šß¨òŒ«8‰ÊMàu,HNÍ!0cU-@v++±ÒÔXx}©XÇFC=ß5Àˆ¨Š9‹O8•f´óäuሒ8 T 3SǬuL<Òú&Ûƒ—B“‘@ɱQQJSLQ‚Íféƒp€H ‰$}«&Á‰Ok5.£’amŒ4%ÏM$i&Ñ8à%¢ j D ñ›…¼ÎC”ŸÐൡ܌¥Cskvr+IDPkt°UpŸEn úôÝJ?Kü¤+ÈÍã!ü“GUOO6~ý  t:["uP +kvLš':*S£8#Ž×Qº+ÂIÅ$‚3³…Ú¦TWÑÆÔˆldÕ±1WSMê‘Fš°šOÏ%4ñ¥È 1U)3HœsCZdh­â†1+5ƃR º)7&N|¸K'æ±.^÷y™K‹düK$áÐyþÀ,%Ûê±”µÆÃ"œ–ÔMÁVN½*U +’RaMC£2ê¼àô¬X)~4©=ŽeÎ×تrû·À¾ªy÷ÌH^ˆÎQ™ž†F×`|žäjg§]„&#–Çà‡ ÎôM¸m)´œóˆ™%ŠHB^çuÉÂë^çu¾ò±âÊ&Í&¬Ù"¸uáô;Õ霦Š‘èzÆâ'Gã: L©0ø²„.¸JxNFGfõGJ†€MZiÁ½®E»p¯ó:¯óºD†ã µµð:¯[<$A 21$$bcÝë¸Äf~zƒ±O¯B.C›-›Ð-ÉÐ Š"áe®…®´L MÎ^‰Œ[èø´×Yhèl«¼nR ˜Bü«[$Ø5Šm³NcW +šn0a§ÏÉ (t¶1g˜-§Åsnh”ı9Pšm¯‹µ$Ãeæîu^s#34~‰ÀÔU[=& +W¬'¼î0Ú>ºb/µKQï À†YÊyÝ@ýÎdC5öá¥zÇ íuTg¡eþdhÓŸ =²ô….ØÌO`ÜBJðA$C?¡O{ÀاYµ…> >­‘¹Ú¶Ÿž >í94º‹Ò!60x¯[„,ѹ!E8×–oí8T ÑQ³%jäx[;KŠ{Î*ŠÒQEY¯'àuïšm¯ã^Ó^·j¡¤Ó•¢Þ±ÂU“BÁëpŸŠ‘ö¯{ʱÙå*-˜J&æ¿þO-¬ èCqÓíu©HcW®’Y€åæ+÷`£ (œšoJò‰¥ IŘw‚CL“Ø`aÑ ¦W(£TÃBcý1çF27&œåTá\Ô\êu™xž…1çD1gÙ±#Mâ•ÖÏNHT¢3³‚E ÐkÿÉzþÂiÁx‡µòü7òÚIº¸Ì œÚØ¢EèulªãÓ{2Mq ¥½Ÿ¼.BcÄ8"ÖœÇ$6°5öhUá·‚‡Ùëöéb~ˆ*ƒAXrÞ"ý¯Óé¥e‘š¥…0%ís0 —¥ö_Mš]–Ldcš¬&yo²Øá…Ù¦¬4Û^*’èó!Ù4 gÍ.Ë ‚©×õÓrúŸL¤³&À~™ šù²½óK^˜ýI†©o®NبÍ)sÞÛéuqê› —¥±QÖëéÚ@&PªZáˆ0¸Ñ—ëçSÒi‹\.é*-l^gr¦Gîq<‹ÂÆfÓÂÛÔ"³·Lz6NÜ„³ L%„@Ø(^Ç®ÇDÄqDÒ“ŸpY›q‹yþÐ8œ4ð†Ê¢“VŽÕ)±ëÕí÷A‹2 r£S#ª2)bàA:S¢ƒiòjà܉­òR+“w²ªðBºÖ Áü„fƒìŬÁrc²Us®P¦öbºGËÑŠ+‹Y)ÑSçEñ¼DÌ™˜¼‹Só?Ijñ„bçdËӓ¢œ*}ƒ4"—…Zeº×Í.ëOâùÒNâÑ4A'o§AøNc4`¥´{ª"ù¥JŽïJŽ•Ê0‘”H9Í(0AVÍŽ\œ1¼r,²N9!:¯m)^»æL[”[\6†Å²ìI¥´l{*ÛÁ€@¾T" …í§Ë–MhMU;K>â97¥(í¸êóšÉ`ímzMæ–8jÈ¡ðó9ØÌ6Y?íM•Ji—¥‚3ºäxæYäƒYËÅ Ç5Ïç²=/á²-'’e›’mÙ“0öøÚ¥ëÎ…¥Ð ‡$ŽSדmέ€3¯üL1Ž@ÂëµÂöïútÁÇñ1òây"ðÁ³Çº}~j ñ¡."qq™rÆ¥Ô²¡Úh¶ùȄìà`œÆaGÏ–¨Á 5Î~Ålk(ìé½ÎpvϹZ9‘O|œ.‡!ßÑ*…ò2Œƒ(ˆŠ] ±@8ÐÝè T¾pÜêbâVÜê4à³oÀgNÁ© œ‚S&Š¢¸Ãl›¾’×½¦×T +ŽÊ–SÁQ›¼î’ÂFè ñÂìá…Ù‡ ¼ñ¼ÞÀ‰HIâ½×mà /IüMD\YÈi¸æ¦é¸²ÓpÍaÃ%^sÓl¸$—/k¸#›óîæÕê¯Î[ý¥k^7è0^iTZ#eV´iUáó"eV|¥•X//¥§o7€PÚS•¢{]ªz Óý&ƒ¬ÍY:(ÁY…GÁA¥0$*ä!WYmån–\¨R!Aä¯\Œ@œ*Ärô%ãòjÈ,T*|5ÄSÔ>È"U¢jáRë½¾Mƒ‚¢1‚jH"æœÄ””²¤©QBª€L’ÅëZ“Biq©¦öRû˜q<:‘ü4$k‡!dçJ O—¢€&´ËáOJ¤ÙÈ ÑA™v¢)]ÞÌäv¾éBQ:šê\(Ê¢³ +eØE†]:ˆÎvN†¢È0»@„…ò®‹Ç­¾Ë@凮)( Üa¶ÀfN™ p +N]^ÓÅôŸM^÷g“_S¢„*p“SÁÑ×ôšÜΡXÖ½¬\ÖXÖzÝ tÝ@s„š#/ÌF/ÌF]êºG^‡ðÚë6^ÅÔ^IâgCòi>®,øœ†K47 ®,x¯Óܺwü†K4·ÁÀQ𚆭,ønN4ó†‡ožY\¦gšÇкlºP”ŽÁPD*GØG$3B2Ë-gÎæ—pÓ:p‘Ù"‡Òf~!¯»ÈXD÷‹ì‚ŠzLÚpá [T\‡™ÕbQ;¯ƒ)³ÒªøJ+µªpÍHùJ+ά*•þzúvoŒ¦^D‘&þ©àè!LE»2¿ŽÏëN‡âµ7ê ½Qäâ‚'˜K©R1m<¢”Úa?† „.[,\®TøJ=¡G=²±lPe%Z•‘ˆ ãAîD&4„Â2‚(7À›¦—¶¬X ÑXè(6w¼r1*z]J…:L J™"‰ç@Hæ¡Ë,+”=Ì èòIÒœA3HÒ蓤#ÍJHOš嶔qEZq…C™“‘É"Ž3ÕlÄdn™~z§TÅx†á|`ùÖþ!­†~‚¶jšôðM‹VÒi†Ù°äß‹ù•à5ô¢öW,»ÊÑÊøŽUu +ÀóÃfŽ-†+d ÷¸°ËyLN€Ï[“Iã¡ÿ}‘«Éð.¼C­ÃivØLU{ò¥ãÏõ^K2dJ¨×\wNüØ'[ÛU[¤$Æõªˆ>Ð%@~ZñÈØ^7ê=di{ý' >08±%nOÊŒóÝžÓP­¡µýeOPò–’„~]Ì´@¾N59&)Jý'ñ P·UÐM6%ׂÃorûß¡ðæ$~= N\@î@ Ç‘FôrFº[¸4s7«=-«eU5’_}LÖ`7LsDØ"™0[¢ù‘5šT9ú+Uœ7¼†ÝIïИ½)óõƒ×ª/TgH 5Žº Ørýo-é.þ4h7½UjÓ࣌¨—âýWÍ sß–‹p¼šÙbDQz<-)"i5¥ÈÚˆônA°—ºiÝ +ÄIML QïhCo "iŒN~Ï¥Õeõ’ÓþN‚GI0èfåO,5Õ ³I~5+¸9ÓTŒÙËø2U¤à ]¬×¶ôåZõµœØñ{¥p=läœVü˜«$à-“+4}öÇ°u ð¢ñôFðÒ¯ö…µGê+`X#'8o(¬C^–E‘Ùê%f~”i¸0e°¡Ì’•ÃI\+ÿ= +Pì$oP ò«Vc1ô³ÝNØ æêÙï…:EšpQÈ(iE3ï:U:ëp·…U¼É¢inžœ•Þñô‚gà$´ÍŠQÚ)ž>iÙOQ]Ðð—FÅ8é_‡E” ž´™ˆ'¹Í e<†Ìˆ'BýÉ.Ò¦b˜|ò ©ø,65·ÊU)=_Á!žŒV®w«¯Ã™+èC ÄS[]ÔPY_בœ)Ó\õB«¹žŸ`¨¿l%‡Ì¢íÖ:³dëÐÏÌ0wï¨>1np0eß–‘¥æe„©Lð¸ÇowK]ŽþÔÅ\pssœ-DNëD=šÌùòÌýb"^fÞÙ¶‰b˜]±´âþt•ÄÛ«Ê=áM—ñÑ1¥¾ÜÄI.,€@n]F|`' 6@!|‡0>rŒ¶ÿ^É`˜õfiö²â·Îz9#³Ã¬“þVÁÉŽ[†€ßöo ŽX½ÉK„²¼¤˜£0”@KVX,ã³Ní¶†7e¬çx(³¨7ÇÕ(&k²ÃD +Tlñ"°xıÊ6¿±¾vÑ|–¹Šâ"r>ã:æ–1“ÓkfÄD,‡-NÍ×P¬Í‘0 1劚b†¹nãI25"r)–GlU"iÒ«ñ4ÝEfÚ#GñgS Zqf¹&ž³ïø¼¯ôxZ<Ö ¾ö‚4!(–‡²ÌÁýñÄ•MÚˆX§ð;ûßTã H¾XDb#Õã‰íKÅ£jÓ?Ö~{ÇyTšæ{Άl;«Ò—dÛ,±ë*ìà'11Ek6¯¿rÆÁÄëñRÄ;yi]`  éd4:˜PHªs@!ÄÔ3¥$/vÝ}²v)•fôÈF¿ÏîÛf‚EÌû†™A/žº 9¬ é:iÍ Ã_´õÿÑ‚•ÝT(§ŸXÄÙÝÕQ¥;¢rNî»”‘–)/Ïɯ×ß¾ÒuP4•ãfƒ-rQ¼%à €£ãî°êžÓ1ìêuÐêꘖ‹ë4¨‡ˆß(ö&¡)¡aÜ}G?âè#À€o`|%Ò"¡þŽ–0üƒn¬šÇ¾DEõ!Y\Êø=ñ6R‚)â0öúU'Çh'K nèÔµJ\>ù©CzÀ°K°€&ÁøT;Í¥J‰lv8œ _TË^|p@@é5†§É2o(ÏûvAfŸ<Œ¾cWÞòg0ìŒv†ÀSN¶Pק3ÄãäôbT‹Ô8kK9!ߟ÷'§'‚R}§± è©n¼ ›[1DXFŸW \{"… úplxXŽ!6*ÆFQ€E½ë·T œ½UÆÔÈÝ(SDO#¼R¸ûü¤¹‰-´¶wñXΖªy ~G +ÿæz¡}7á†ø¨2–¾ƒþý]%˜ç„<=(µinžêÈ™üëbž-пÕ5o 4Édnêì}ÏEœ”…enJÑ‘ÊwSúknbº ‚Ä¿¬u¦´P}\ÙŒb{Ú²“à‹º’ã_ËM²;.&ö +vð¯I–á•Kÿfø‘³9e- þE_ÿ®Ý¬sØçuA.~Úÿ&Âלª?ti'Nü«{ö:§ùU£1–~îzyË5ú·rÿ-ýsRð¯5V&€”.T™LNø03FAú>(dΙ–ÿæ ¬,1{Úh-ZI×¢ ñ%Ë3¬9„g÷o¡Qm¬jê0¥m³x33d R3ö¯5×Ð…'ž]=XQ§øŽ…+ßí¾Çw¦ºp€ZM¾”1£Å„8}ü‹Dåo +ªå‡±ÚG#=ÆúdkÄ+^¯Ia³³ç;¥§ü©òè.¦£Ý=*ûËGþß¿£­·ö·1†étÙ%$6â'YUš|%c*sÿ­µG'£ëž© +²VÝÃÐa˜YËÚ$[Ï4IqÔdZRkû÷²ˆÛ"Ê]·J\T!Pç[Kˆa‚o"í ¸Šá:ÑT´q,4QÅ‹d!€6“Šÿÿ9¨²òÁžô}ñ ÍQD¿¦ Ο‚cݦ^¿Á˜rÀuÁ3L¡Ò‚õ@ØY–Èý`,ਥ^aÓCcrHh0ó*DÈÁ3eí,y+`mûCôJ#/«‚…z²å2JC½®aÁ¨¬ïCê(óô9Ô‹ž)ð.V™4Ä€z%éT …‘nA©ÓÏ€zç-<ñ“IRo¿‰á4⸱‘âS<öµˆ½ð»zšj IÀdŽ«ñ±Al,oúó±‹/izÕ²i¶ßŠ+ºßßMµó hÖfâËÿ +ƒã‚§FŒ:Nl}›‰ÏŸp66æ¡RÞ8‰·~c\@VŒ¾|é¬C£¾D•¯Æ8c€ºK*ØH¿ý,Ð(ƒeCÒÉ] +ôËÏé÷DЫ’6Ÿ´\\³¹ÁX +dÎ4õí²Å@Ÿ(ÄY –£“ºh}wÉŒ7èÍ!YKöVfíø…þQƒý¿»‡³“ù%kJ¨úÌî϶+÷¨~Íyí¢ﲤÛuÞ]­!¤­ ÍT7_ž xWG®õ»0®Î,Í"‚ôåàóÝ]ô¸@üµxe”;4ýIb‚H^ZbÙ»ë^âËÙC,9×Δ,(éÝÕ€è{%=áÀxWD`¤&†`¿»ˆs«œr+‹±hL°êðûOÐÈôðêÈ[gøôÍû4Cq¢¥~­E3Fâ†JŸÿt(«X99@‡r í5Íð×lŽœ0ËŒÒ.ÖJKì™Í´jÙX‘j‰‡-3h~{“ðŒÂ\5èÕr +ÆŸ§†>n0Ån;Ã%³(•OLñï• ŸMTTg`nµË‘NhÚ½RèºBUq]%Êöa/"„[®‚‘)ƒI¾ |#n'†>Ó;ÎliʽcÐÏŒ®eñN•}i1á´ScRžvQ¯Û­ŠÕGà¿g‡O Çâ+‰&IsU‘fsr[d_ƒgX¾{î«çÙ0u¸ˆÁ¿vl¬¾ M|ÆSsÊn$Ðýh;9Ûv¯°`¤ûT;`#˜¹]+Ü8þ§×µÏÔßɘÕ·B½È7ôº4~K)–lI¸áC¥µzhË‹ù}Ðë·ÝÕõ ÖÁÚºñ0ªŸ; Ð]ãÞ­›‚M®¥’5ìê¡’ÖvË{×AÑò1J«<¬zçÑk]w{cë¤Eˇî§\ͧÌ/Q¨3šè‹&²íe³í~õ­I½Ód†L…DnïÜŃ(Þc­YÑ×çXh_mL€wôTÖ4¾ž´§¢'=h©Û@òõËS¼¼MÉ—Èv´¡åUT¼ ‘vp±p·Ð´ùžù®9 ¸#¹~!(Óæ•ÐÏß`D¶+èùXèÈ!¨¢zHò¡J÷êßÎ$37f™Ä=zÈlÖ_ÙϨöÖP+Þ['NpP +€Pz \ ±½ì ”_Ñ¥õè^Ä^ˆ;SMÑ…±eBŸþ¡¬>ÁïG7A«d+SÞ²Hd@«°©dUr8º¾ +·¤\b¸…<º¬Ä—…wEw‘/ýΣ{¦ï¢›¨îðD¿Œ¾‚}÷v†¢÷UËê@«ÙMØ.dÖTmÚ×ÐÛû™VÒ‰ôp€¾â.Kñȹ fZňËÀ>CDÂõ¾â™-wFìS©b™EŽ1W|ÊŠµv˜AqŸ¡’aºFÌV(FƒGWÞAÊ>rý×|x <)ž"/ó®–Ë…®¡Är(ª(æ‘+&îbI UɈœ—¡Üö cf >ˆ¼FÄßp#ÿ'\ÒÙIuÁÄAÞü|I–ÏfÙÂÔ†Ð+C&†7à8G܉Çô¡ çCæŽdˆžHÚ‰})Q_¯ÃöÁBÅ€o‚›)’!ªýE0úØ‹nß±Š{$Ím—XÙÜT{yãÂ95ßͨb…ïÓÜLÏ;’šéúñ—4ÌëÔ±Sô·Lö½ðÀêê ¡P…<äÝeÕ¥Š¨dÿEî½%4Óä…æR ñ1&ööR“À++G6Y¨MfK+;9bA΂ß.£gÆ”àa×+O|÷µRF>2eŸƒDsè¼Ï3{¹0®lÌew*[³@²­/Ϧ Ý7Ó•ÒÎÅXÛÎ3ݪ@Õç÷hóL€ 1]•Â«”F$ÓU4TµÎØ,G5¾Ââþ*õwÐKOy]ÓÅ{¯¼WY ΋d¦^Uz§:Dyò}°Tºö䉂éFCF¡)Ž(fžfI˜¹­Ò¡|d-ùg uÙÁ;:*ð+ pùyÅʪJÅ~.Æ—žbôz‚þ·ð ×þÛ;q¦­z¨iža9M†[Ymºú1«1@-ïqÙ]ÙºlÄD8‹„œ¬v¨ÔµÄ8¼fO€ …Ã3ÓûH8Ù,PùyR–Çœ™Þ·!>1†¡{<*ìÚ¯õÕµg³ ·ÿŠæ¥¨fz¿ëðBP/ן]rìæ‘\ÛQ â Ê­“·ñ¸½Ç°_Æ<ßÈ̶Ûç|Þ%?vó äÎìhÚì>wíLlÃ|Öb,Là5)OF2·eÜ=”ŒiÒ…¯s9w(ÚcŽz +q¤E)¦Æpô›²'÷/nê@º+´Â íôÊ@ˆf#± öÊOE`¡QÜqbмkÁeße"àöèK(è¤',‹Öů$ÇÇd²îB|cÏ7d‰ÖFk¹$„W ÎhÞËÐ]«çŒz=WBºøü^Α ‚4ïö?GcúŒïf¥þÑŒýª÷®jlÅ’ü‚º 5™¿?6Óûž•j¡Û;è¢â‡=t^pÙ2!|Âv¬Ò’­³½“g̺Y†U-€/¡b ê}e+0“¸¨ÔÞZÈýØiZØQJ­)0Æ^ þ¬§ƒÓІZ/2²ž4;`ˆ´ýµÓ|´]X 7{ó³±.x1Ž=Ðݪ©ÀöcÍ®LVàþ åº:kC—ï¹0(o?€ç†{5bã‚ùOyë;/1ðt³Høp³Sù-«†Ã?·caÊgÇ´r€¦ÇÍŠ_óç-‘©y5]NàX+^h^Ö®×ðšê9Æ—'ÎÀå$à~Ìb}m#ÄzÇxZò¶³ÔêÆÂ2õ€Á†¾ÖÔ©g^N¡qFC°bgù“ |dFh†ñä¯æé £x-Ã:jüÒÎf¤‡ýÊ…–£%"â6ïø Æ]ÌœO Æó¬RNCò¹Í`ò%¶?îtBôC&Óøý& ñµ,/a5™ÁPk®ñóþð—9íÄ&”•[3ãôp8_Ș!ö×3ˆìÉ”×®lìHѲªÕ¤Ë¨måjú²£‘ï!„¾{åБQaT’J=Hr¼S[Y^%ÈØÌ¥D×áiXÇŒâpþýj¡õu´Å‹À…ÅžuÖ£sêˆå2GÃÏŽñ2wM0ÿIÒ[Òâ>¼ô6:úÑĬ¢êKÄjÚƒi"2Þ‘˜÷41Ñ¥µt©© n~ž–)µRvFͲá“ 6¯F€ó¶` Üþã”4)!¾XGphÿŽš›ÍÔ¼®Âñýl¼b¿«;˜~r>œ´¯wE³˜ã UCŸŒ£ùû^‰Z¹ŸPL³!+ðM©’¹¯ …¥™Æ¨›#T A©Ÿ:zΤŸY"¤”º‘ÃÍÆy “5Ò‹e?QVQg^ó˜ùÀFrrˆ,QNvžtÚû)—àIoîö.§àý0—>Ø;xÀ°ö}|üTš™CÿÞÛ¾ÃnzšqÄ5%zx„y¿1ç‹x( B³, ëPï+ˆCSÆMf¤ô…ÃêO1‰öæ8Õ]¡üvÈ)Bçæ>·ÐðZ/w’Í)§=øZ~ Į̀fÆ3Â=í;‘¦‹â1TgÚc/ÜF®›œ¢¹å}²¬ÃžcáÍ®¨ôc6r“ßX§êüȘ OxÒ‡÷ /“P:·¼¼ßgPûÀ~Á»þAþ÷«üÐeâ,Æ +ͪmYgãùó7“¥¡\Ö™ieÁ—À!ë\Ʋ†UÝNÀœ8¸èfƒ×Ö]Ô£UfÊäŠd1P˜Ø‘wïC”öfôvè©|»×Ó•DäÀãŒNŒ‹¥ï°À,kꨥ`ü)‡Pˆ)­GÌÓvãõ=– RùYE¯úšWû¬úïìÑâvZ[ øD|ü+šìNÝà'—Œ!jðpõªj– CPrM-‡[ Bü¾œA½æF ÍÖ¹6ÏeQkåT_ÎcHk¥ýR€@\ àEØ2:ÑÐ:Ѧ9ŠÉ®Ûe zÌ}ý„îÎ\rýVê”ÇÀ½Tðäß¼-;“&ØÄø%&8üªÇç©«F¨ª)zœÊÑ€ô£šlƒ¹6é‚Ïä“–=áà¤ófV5¿;#¦×Ng-\âÖ øñ—ÙÄ1Ç*÷‰SÔ_)YákEìxj°™ / “f]È!*6Îåhõ•tx1”ã.HgL/z›¬¶êðMö:S63 _Ƴ^‰ŽÖ¯¥»2µ/y¦¯ê›™x+q©ÈØìsd1hé4œHÖ%jøm~_ßì6e¿ Ç ’ߊ,Ž6Ác$>Éâ„¿wÔæU©'¬¢åúø$½úß ¬ÚÅ—Øbh,î6ö t×Sœ»Š)Œ6K´ªe÷Òd:õßÜrX¯äÖŠ4k¤ˆûЂÎJ­î) ©5òÆ 6%éF†«ÀϧÁµJ¶9çnèpILÙ–Á¦h”l];|ÛmàŽp#,ïâ䲎|‹ß@LV{xØlI/ň›©)—·£‡ó6 ªø5 wÚVÝ %QüÖIÅÞ½ÔuíyÏwâÛ›¾ß7·Cj ‰ýá~†“j[¯nÓj‹òjƒü ±ÇŸUO­µåoD¸^,¡)@YSÏÂñín×D1ÃRšôç0ƒƒ|àÊb^Èç“E Xªi›Bý8?åÖuk†ÚëP¶ô3D”y±o]†ë&½2¾Öéa—‡û©v2PÂþÒÈÞãn¶‘ÆÓ•GµK¬xiO9ŸF!H7…£Éó\]Vƒ’`a8‚òÃÓ +f…â­d,M_™±„t,ïm/ ÍC)€ŒvñHzA¦Ý³PAÌoæâ½Tqðàüã,M¾‹µ LÞEq—nEÒ$Rß ë6„š!ùEwr‚"ÑÕIs}pw5²!ô* æ Щdš¦ASª5V Jƒˆ’ UŽ‰/¬¹;Ls +X•×p¸öøþ:æ62òà®FˆÅMtÓ7\­‘I±¢±yÚQšt——ò&?Ìï’–}ïì¶Ï±ˆ6M~>‘—ÇÑf[',á9\~QÎåY6=Øè› F‚xJ²CÄ@A YêP•9ؽ=…:SQ|… “p_kû΃ÏNqìáƒ;Ð[ˆm™¨ÁÒßúìÇ5œòТ#§¡‰†(‹² кà鶚«äÜÚ—K—‰Å2qsĨäà/m°Má£\ÜD9@ÿ*JN>–5sKRÊP9…¸ü;sëù ¿‘RƒÎ†8ÜÅøóKÄw&{À¡…%\u¨ÈM|Ô]èñ˜,Q&!lÞÚÖ<$/£s¸ŒBŽZR'grbq  +Q¿yþŸGŸS—·“2ð©ÅõÂFA1Ë=”ãÉüô6–æøÕ­„“î-ÌÇv4Q/~ä?dÿµ¿àŠ&Ô6c5õX‡¸²éå®xësÅâÉ‹×N/ëÁ?Z1yÕø /ÔLB‡ÅªÕtƒÍjwyÙ€?ËÓ ò€®ÊzgÈÅ 3"”NA’”y°mÚ…,·Ì¬ðÕ6¸2«¶7Þ¬°¨‘G;Ũ¼ÓËÒG‘ºÅ¦VdÇ’^чÍ‘âQÕk¿ÍyØã²±ˆ·ÎÞÕý4 º×€¿¹Ãá¾ë¦%«H“)  jÕÅ #ÔÆm\i€ëä$Kós€S ¿ S€Ñg¢~W!íË4·úaã ÿtîãhŽLØ}XAÝ_ûˆõŒr/kjkeq½ø2[Ùœ÷5lO|Ö N¸Çt„ +ërÉPêp.m®Ô„Û1UM…&L0=;üˆ¸ ‡¿=fJ!ûò˜)±4ù„šp•½ Øí +Sz‘‰!¥†Ñ,sÐ ™*­ÆF“ÇÉæKaV룅K[Ë÷é+Â9¿ E'áMš—1o8ÛÎTGÁl (òu,þk$½±\YF£·QííY¸K̹Ȥú ÌDÓV£+Ôžà É"d;4\“Na§Ÿ¡æÑzv™/­^Ññ©#ç|ÜBša˜ÉnsÕ¼¢…„¢òk–´RÏlXŠ3M¢4ýèY©áï_u³Å¥<ýÚPFÁ>íÄÝ&®–'%´6¿‹WijeʺsâÚ(Ù9KZrQ¯kº(æ,é#ÇÐ.Í-0ö‰ +çÖ`õùª$Óâ8ÙÆ^Ê Ïdà÷gÔ5“…%¾)—Ì¥TP¥va1Gœ¶¶ˆ¯°RÜ9Öˆ†E öÈÖxÎ…7H‰AÊ.Õ]‹¦¯Ÿ™x ôi—9f.ÏØž Ì•UÚ™u·”®åmë7HžG/3ßÛq¹`ñÍæéeñat1ö’l­†ü±€£Œý³PÓF­s#5H)B`YF“;ƒ &#‡G×>ß¾d²éÁƒ‘çËW£YÙOI¢§ß»”"aã#O·ö„£AG¦¯u•,l&¤&ùcU¯=™*U%,EÈÎ’X&»Øß:”³.›Vêþq$?â”t¨qqòÇáUlƒâ Ô°wÈ», +Ç&×}²TÇ Ø>¶ŠŠlÒTE–.y†¸ïH¯ |ÂAVÓ@2#•çØ3 ¢¯JÂZ‘³`…Ž£DM‘.GGèrÌÇ 2Ý#úë¼}4±K”¯÷Ù±¶Üê§|¢cÓY’½½\Ú%¾Kb¹ÄΰNpIšMO +Bç»RʉÈ+µ2ÂÊ•Ê<%NóFÄ„( 1Ó¿FX99¢UùT¸ˆ¢ÃXœI_º`¨ž’p¦Ê¥aѬ‘YÔ’éÑ­V%ŠÙKºáîÔÏÈí"NFh&¶¡ËNG¦×²½A×Þ°²§˜F¶vòœé—´¨„ñˆê]ö1/u¶R1_gI +ZÖî­È5„˜“O«º#×ìRD“{;@çŒv²kfŒk†Ô2X‘„¸Åʸ# É@î½zÖ,.`©Mš{¢ÿï(jëîÚ5[6áüy©—”'"´# ?+½0õP·)Äzó¿x‚/[¼"Öÿˆ`}ÅàZ“Ÿœ£hFçïRë•)txMÆ ¬­~¡¨îê +-¦Añ½ýgK¨§VKÓ£äÅÀøí×ÇÃyñ”È 1½^y&‹¸ìÌ^BTLo*e…F#‹ÅäJó&™³\;OT龨zÁ$SÎÖœòdÏ:®Q¿[þJ/a{qÔNyî¦sã6©ÛfàÝr^ÈÀì“eI…“v¡Ç†ùÁIP›¡>oX8óÝ]Ègþ§` ìi×Áv'£ " +S›ÁA$ÝÚÂ6ò2ðÅÌ +tˆê[z¡3H¸w +ݾS¦­†+_¿ŠÓ‘?ÿ"û ù`Æ5ûf:.ö´žŽ /­Ïg|œ£×¢TwQÅäés‚¼ÉfˆF%¡–‡—šÍÍ·ED‡Ö Ä8Ülä8©Sie…¹ Éh'éÒÇ°qWÛ5´ Ëñ­t\k DCÕ²²wñ€éF–bÑ«-òðœûe…¶aÆlúU‡`ÌÊë˜J„KÌÞøjÒp-$r¼Nh† ¦§£Pó/èÓ@¬§ÂA›ú™W„;Ð~½•Öxås® ¨çQä4ÊûçÖnR¡èMé|ÝKHDëóº9}ó7bB¹óƒá¶1%Ê®§AÙÊ£%,Ù +º»üÚnùZsÖ²t5C”‚!æÁiåkØÓj‘ˆñµ‰>+.Ä™¥ô˜e”AÑ$ß}(ã<2+&Q/ÓÄD,/‰Òúcþ ¾ÀìÖŸÝømŒ©/¦oËðº,¥'¿ ¿B7ãï½aØ>(˜å¯.¨½þ§Ê^;´EKЉ4.Š9 "eœƒ9%Äûgrs«¡Uà3ž6F&†T Á¦µÐÎ)WO·N`xŸ¿bƒnhVþ +‹GÖ3ß‘ª ÿÛVoªHÅvT†Z:­1®¾"CôŒó Ü:Œž˜LÈçƒcw3j<ö~C£=ãÔÄZæ:mÞÏ}1šÐcIi\K÷ê—ú1A _ ,2ˆ `xQ¬®¸ P†awDÆñÎJ¬¸¹4¯ar`ܽTjÍm¬e?ƒ¥8ew(Ï2vË\àÛÎèWA֧à ¿D®¬U«vîTï½u¦ £dhC)+e9ÅÊTV ”ØC‚‚ÎÁ6ý‰{3:f„ÚÎ&4[]‡ÉAÅ2qéÉ—X“ëÛ"6ðzë¥JDçubÚºþQU¤# W&¢TÛxž|åªù¬ +ÊŽKY ŒeÄÍ›r¶9ÖúH4ð2ÒW2Q^„b8ýµœ¿<P…ɾr#ï*_òðw˜,…xÖãÏ +œ«¶Mè^ì@Ï u1à%½Ù; ÜËÄö_îIžy—’àrý¿6 +j{€d?L±enöuoT’€BËËP¾cm³zÈ``mŒšÓ+ÆÌbÉá äÜÀå#6¦œ³‚ê(ê­7üꬖUxÈ ÅSƯ– IJ:rΪöõ<ԙIJQFùnŽpûÂ%ï)ØLI„=‘AE>—†¨Î Ú[ƒÒ”»1‰‘ƒ…;[mò·Ô¢³<Ùø3+³M]Mm^ºÏÌj³øÃÅ⣣GàOñEúºMè9²fZEøЧ™Æ×d‘í5q“µ’¥ü|qŽ²3P¹%ê&óYý/ Å ¿¯S8zaÆÝŇ7¡8³ù¼–jb´´Ëðe\DÀŽrÔ¤i +7…9¶žêy7­¯?9‰g@p}dÖ%51;'á¡+½¿E9 + —7b½,ã¸7Ä'è Ǫ@­+A}]_‰Ñ“ò«Âúm7õ!m¹®¡J¤ÕÛ¥KØ¡ž‚^ +>qFÕ Wõá²äÊóÿèºyQŠi{VÉg¿/cB'|gå­à³Üç¼{7ÍG9x,¶8Ñõ@Ę¼‡‡ì‚÷ãꮆƒ3ÝW˨ƒ™¨JjËðƒRÛ÷pË:ñhn£iÔ½|åD¨³º)Äp=äD§û‰}ìtÑÐçÝ$¤/>CzøçêçãmŠ•ad¶ +¯éZ¯ ³†E¥ñ‚{Ï XÇÐ¥UªÖ[ն惑ÿdè*BfZ9ç±}]ÔÆ ×€±¢=?‰é^þ‡6“(ê%¶º¸>¦ç^“\9&çxðx ­? Ç¥øe‚<̺M'ÞT&Š¡ö'…RˆSVª&¡…ß(ØyÙ¢Ÿ÷ðe½(Øåæ£òÈü‡@6_/ )ïpH°ߪ~tl–=Δ¯°@ãåŽcÂàìÁó—ŠÞ šçêQLŒŽ6‚u3Á´I÷Íͬ6ür>k*²`¿|H*I§ƒ†_…¶ÆY(w9O¡ç,ñ­^Ï•œD©( ÷ð[õ8VÁgÎ}}€â ¬bC&—~j4ýv:›'ƒu Õ‰â«QíVYµË'kO~P‘“r͆ “-µcCUÑoÕþz +€àF* ÕC,Pk66Ù?ãq£.Ö½ø•,Î×(°.à™uçÐᄉ‘ÜôåßàV¶¼‹!lû±(aâ¥9肤҃™ÃA6 „.¨ª3a°¶M/`+"‡Û°ƒ. rèŒ×¯yà“(þ+÷¾D‰ ”ÐRâÐÔÀÏ>Ä¡·[>º#Ü_7~¯T¶‹ŠØu¹]Êœ/L(8L8\RÖžT‡À±2éd³æ%ÏqkaÀN&ã¾DË”~.ÁX +¸Ó³Ä£~º— sžÍN¦J)èاê6'ºøt@ÇE§V¬+u³‚s k'Z§$(Š-Ú¥óf/z·:‰:¬o$ |ô¬4BŽ‘ ƒ¡`Hrñ•/ß®å¬//fX–ñJ¢ldÆŽœ<80/óXß‹“y?³µ‚!›'àš‹;˜@‡Ž÷ÚÒdFôz8á^®C³»ãi_À3Û*áq‡1òüª"l +¥ÖæüÊ ãÒ~^£ `¿Â©©ÏÜÏÈ“\DbÝT½>°®b#BK ß}@ÿÄzןü¸ Âfó0N8¯0Ç™ÝØQ¶`PU¢d¦PÔyNvãeÒ=¤C¸ãœÿ³C²•›>BšY¼é^áŸDØfÂLÿþ}L.õä:8§ÿ‰lP"yàé›ÄÄŒ¥þht˜–±²é2éÈͬ·w™Fɧ¶îˆÅ]y¨òÉ.2Ѯ雹5»¹’^Bmøøÿzœ¥pkîàlüªB}Š•Æ//ò„)n.˜§)‰²KYM,ÒÎ(¥U­[FëâuЖçϱ;¤cÉ› +…ž¥<àœ)VÒžR .Á­uïótÖ/Å ËCY¿ +Õ•‡#HŠÈ©[}˜:“sµÛ®XFË°žiŸNKIa–9Å~]žde\c6ï>t†žáÂ䃯RDí§!ü2¬^pi¢)B²¢j>Â?áéeÊz2å=¬Å¥1½Fã˜Ó_‚ªrŠÞ^.SÃîF®©nñ5Îí›á`Ýj9MÊgSE&ä[c=¯”iÖQ°¯ðˆ­ÇøÊåƒ,ô‘|ç…Žò­°ø•·W.lŸ)´©ÈõcO«xœõç$u©BíD£9ìMUa¼73µ? UF?îùó(sç¥ÄvŠÝw%C‡—™÷¦ Ë|¸jä9lT4¹Zèќˀܯ†ï–A‡’b¾³â5³,d`/ã¦)˜s˜Ž3B[ —ƒN§v¬èRÈ*C£¿EÌ£KÁÜ°¢Yq=­ÍPO^ ò=€6sRùJžs¨Èi)èTÀËʮ۔à‹.EͫǪpéõÎ]¨÷ÀÍû(_ºö†¾'--¢>b¥#Ì×û˜ðÛ²´†ªŒ¬ò +bÛÓ®Ž´ 1x+Z°o“Ú¢“Š~Ó¬K/ùe‡d Ø–^ +ó5uXçÑÙNê%ÄòzèH"Æj”@þ÷g¼óÞQ~è ™ zÌz1Dq1 +ªðLá0À"GF>ºHÀjgP1âqX‡Ë'Ÿ8'*ðÓBež¥—™F€ÂÞ&£á[vCMèA¸9™–pq­·‰ËG{¡b|m€Ü!ø¦&~Ú¾7c6¼ÛëVUh”S¡$$ ÍÆXûÝ =9«Ã(æ}óãp+2„íhàbP‹ßD’©ÕíõDˆ´ßÕ>F—|ÎTi2Z䣋[rvUnN9n±{á´Å¤ûÐIôÄ•º&¦8tÀîàDÜ´ä° ¬uù«†x´µ¶*–tUÜ(¸> 1Êp°ÂßL@ÇÊý'¨1‚Á4íÅt‘ +ÇÌ¢\úà(ºbÄ:âÝ18%îÂQñŸõ`GWOo9¶PÃâfôæhÚR&Ï›yÑ…£¦l‘oâcUºÕ²¢4ط̹m(gñ„Lâ!¾ä83-[tíÓý/:"ar(j¡É»D²½˜Ž ­4á…ï•3ç£ÆÔ]ŠÊÛ“º›û¬WG¶À‹=p¢Ýè¯-Ú.˜_Xô²Gu•îP¯B=1u‡<¶º å“Ål=<Ç{Ö ® ‡CGÆ ô–Å£Š%ù´Tsq^bA¦¬ÍV¦ôŽÀ&q«oŒ‘ ~w. ¢FOÑQ©æ3'“ámvœL£†È„ +SÓ»T›¯éuŠÙ§) ƨÆï®P‰I°òR¦Ñä‚$â ¼³ÎÜñPß~½#~¼ïwÍÂ7wy1NràV‚r7]I´—V_D5ò›ÞÙ#vÑw/Çëí'$ÛIèÑC- tè6+ þ„EŽ §WV‘{©D=@CƲ‹RÓŠµT»"ÀMDœS_D…ÖK?_ÅõqƒRXd¡‡§›<×µÁVÀk©ÄÀ¢öRã‹B›øÃa‹“‰eѵâÊÿn§½ë OæËìþPEkqøÑ>†O¥ƒQ3ÒXÅ`8ªÛþÅñ¡¾wC`ðc ìЫ¿š+D« ƒ´VŸ‰¿„SkÆ R"ÌåÕ˜)#ÙùÄ8<!,i¦×Ó<:´`BÁ«g/ë©4:ÍÚ/=÷hàð1¾k¥Î>£Ÿ¾Š2©9@Ãʺ ½}2ê¦~÷Èiùã%9„ï§%=Ï&£ÿŸ~<ôŠ›8©ôbjtÌç©3*Æ÷žƒhÐP»WMù~@q¯4ߊ!_) $W9`Þ\ú¯w}=‘ë©»/NȲ‘›’’Ùõ¾  YÀëð”àuð fÖ¹çÁ$ÐȃI ‘CÆCŠ<œá!/k7t¤7˜:Œ”“KÁK P)À°á†ŠŒO6( RŸlpXÍ4èDLyY + `E ¬à“ BL¥‹ +CÊÈ¡Q.‹-ˆP¼/+„4'±œ\#'øò ÀÁl6…#’*5RxYHÑ)”(…£RA*B´,—b#˜Pròp@ ÂÄh%„ÌGB Š~dÎÓrp¥¶+wYŸš¬QÕ‚nHA ¬0¢@t4 a¨…‚ èr¡–Jnl>„£ŽË1‡• !Ô¬&|^V*"ÆQ`€‰¤&> *ƒ«í\Vç&KD‚ÌKÃhK´òY•*ƒ%G>‹K@£àÄÓ2¤A¥:f:*£Ã›|¹pÁ`’ ‚G áAæ"xÌ…ËHRÓAJ¹Œf0˜:q\F—åY@ˆŽP +®Ž‚¼'E +3‹±òØt&ŠAG´â‘SA§¥5`®P‰‰%@~îbt7£ ñ0*¤Ð‚I ¶,²¸`ÉPØæဨ(¹`ØODŠé¥Ä!Õá$ÐM¨øHQ20 /'+‘ô8FG1`‚ ’ÈÇI£SyIê8R<4¤ ‹•8(PPBÈ…Ù|¸ŒBÌaåD:¥ƒ<Õ¬|RLT +¨¤lH˜b›ÑyCåT 5&4˜„à1¨ø †O¨“J¸u$<ZB”ÂAªÂ´º +Ìø0T˜4lN2&—•[@y põ!0RàÄyRÜôò0*a¨•lTÎW6—‹z2 H aÒ~0(”6V×ÊL*&¤•Å†I8!3™p4ÀFÁ ã¢ÀžP2,  +&£QH +ÅÁ 'Â$P«ÈjY^8Bò$M^ +LPd"pBQ3! B ˜ ãx1ËI„ÂH‚¤4¤!C2Ô1`‚¡Ï†4NFà RÎ$DqR¢.(Ÿ¼¬„ ù¹ZÓ†‰fÂ[Jp˜8ú8F £k ¦áÕ-+Ÿ•ì$: (dtVða2 ú1ºUÊJ$ñd%’§1:F1ºQÆèBlB@ÆÛ˜¤ $uÉ1:›˜/P›ð. F£¡ .kT‚ø$@4Ðt0èQFÇP$ +†ƒ¼¬P‰4:„Òè<ž”ñð˜0¥>:N Ï“XJ#+71àC‚*QP(ÐÊJ$J<ºËªÑX8HP–Gw@p0ÀÁ Œ $u¢–•H†RR$)zùÝ×¼ œdT.FwY + #'Œðx@@'HCRHˆ8¤h<Šad±Á±R’¨hð8NöA6£RGMƒÇé²6g \tº:J.ΆÝbÔAsÒ$ ”!á4™F +RcÆ AS0f,0 nJ Ä`S³1ž’Ëæ².Kæau—UP`0êhŒDËŠéu,>Ó‚ÅKˆ:,=.&–.a L-xY¥’< €°•LXN°óâÂQ’‰I}òcUÊ(}H$؈$uYT†&TDËB-Lh 5Cu$n˜G£ÄcåØ’DQÑ”nB$ZÁÁ/«T€$Ul1BxñH(±‡)]Öe9EÉJ¦â²*ZH¨*!.Ë@Tšð‘ö@(Æ©”BŠ‹ÑA‹ GÉf¦JW'–:Á†‘„•ƒyQ¸,—Ë‚ðhÐe]ÅÁHÔ!0y6F:B›ƒ‚˜"ÊÅT3‚w€"ɘR4L7«KJ!‹ÆÊŠ+@G óÑ0„—•â°r4"#¸r* 6*wY'*wY—õp"YHpP}<TªËÂXé”é¢Iˆ80ÑG[ãEDuªñqiŒ• $D  ‡jÀ@%(,F7\2Å8„ð²lôFgýõÆ—¼hÁpþ¶µîeþeª¨˜þ ­˜O_ÇfÆŽœFlÞxê¨l­ +ήX0ºüùÏó:Ùgbsà jó¶÷ËŽ‘ÓÝ<yŸ]âíÿ³æíšÃIæÆF~´*8‡­}ïØzЗó™¹ç:¶LŽæjcnžß˜gûKÖÿ¹9ÿì¼g9p²ubû¢.*kÎi¾ŽéÉÿ¥£^KÞ>÷ÌöÎßr3¯ý·Õoó¾qòt“wõ~ѯÕ[Y’œL´d(8äx¬ +NK‘9]—•"s"A´H¸¬Ëº¬ËŠ‘`*ú\„:)†ꄈš¨ò¢NË*G*T*V:—5ðùT€F÷¹,Ÿˆ‡À;Y‰Ȁ쉀‡Ôy<"žƒ‘›…ƒËZ%\0˜\VèãbÀ «‡æ©,8`^fRNV" xA¨1ù¼|ŒîXF Уr1:oˆùä Š ˆ˜aU©V1 ‰ 1èÔ`œÜ\%{²á +*UÊIÀ(yƒÝ©!Õ6£† +æÓà™¹I¸¬››•¼ë ¦¤± bÁ–”|¹ÔõÁ(EDÌTŒP +’Œ)è`\Å; +‚h ˆ.ë²®ë²HZBq©Š9ðÁŽ&âÀyt<`lÀB0$P Ɉç¤ÓZò8a¼<*@ÊjTHq°è ‡TÈÃÃÕ'5P ¨&ˆ¸ÉB…‘¦.ñdŒŠ¦bÒr’1ÉTx@àÍ áÁ‹&a•PRÁN5 Î€-¡ŽÍÁ*eKö:J7¤’D]°4¥Úä—u*À@’&ŠÅ(#ÀK‰+¨é\E +I¨£0.Ÿsaá(uLSAÆiåh^J)’Å(ã“Ђ),£s­/+†@Câ Vœ7kjù\V[pÊ$¡`3*‰€< +'v12Q¨*J:ž™é²D—1já(}0E ÒX€$²À +W5# vB¼@@ºz †\–ç²H—µ¡€:(T†:š!#ê  © u>1 !RÉN + +¤.n¦¡rí¢1X &\xB(Cò¦‘g­PŒR—å7º¬eØ@uJxj°é¤LZ¸‘b#q‡ “’zH–O¾ÌÐTŒZć‹Iæ„ÀC^ÖÁ…‡IY€’ƒí¢r5‰>^BÔQ DÕ(q1i§ä4q«“ ¤¤2æá£TZ©…`PÉ‹PH¼\Öe]§ ŽqÀ†È›vz'£¤$ +¦ƒ0-¦Ë’!`£Ž¢R +&•9h,cL¶T1ó,U}5&—Å%Öe]–LÅe‘õBuh u£ © u +ܳr2*Ž’E£‰È÷„$ŽÀ]4FjÔ‚¨æ5%’ŒRÁ)…454—uY—幬RéêP‰P@‰HÀ‰€¨“ ŸŒ™”ü˜ÒÅôÀRð—UÑ%#ƒ©_8Eâ`@V0J'‰Û ¡ÈLÏeبF¥ “›‡‚‘öh1¹y8ŽËR… Õ¡ žÂT‚LJªF§ŒXÑTŒ\ã!SzÊŽ..Kåѱaª‘¸,†ƒìP-k‘Xn\‰ e€+ 3 §ÎÄ 4 L\Ê@À(´2hÐË’0]¥›„“Æè¦ÅÄb³`€yîD¤DP&d2eT”4œè€1!  ည ”z JR£a‚0I\Éa¦b$¡âà(E¸Œ4ãÓ%OÅJ$/kµ1raê€XqP^+ø\'™1á^LaÑŒ:LQj蔇4±¤@¦à!?:PPú° (ÀQÒ05Hi> +Œ:£—&bbZL—Õ1U¸˜H !‰ƒ(xX¹ QQQ¡fRd8X!ÎÈ" Q¢T†üD@| >Í0qÂ7Áš¸B8Q"s!¬!Ù4 .•@VóZZÍ4\ÇGC¥fÖ å_ÖŠyÎcÀeÄ^F·½|Œnãs @¦b4â¹Ëz•A'œ¨ÇØ´ŽŠ +õi ŽTÆ.KEŠ0 äñ!N„I »ø(aÒ‰TÐqJ#Ÿ.0y—Õ!\à¥àåACF²$C.$…$/+„I Ž pZõ`hÆ=˜úˆœ@ý{í5f±u[׆«(þËÌêÍœ]ôtdî¨­]6lïÖæþçŸ_ÝyõRÿZ껺3¶MgÜìºìûˆÈnÎß’5ÆwGý¼¶«®©ìÙôŸâ#kn®ª-ÿX¢Z(øžÇ©œ¬rº){÷þþ)ÛSŽþ÷bãn·n[}†§›ÍWûP±]^kž˜Ùí›/~ú6LewÊÁÞån¯}ªˆý›éŸ½í›ý'‡_1û·»nìN³aovs[¾ägwì뎋î¸Íð´›ùÔŠáxï]ÃôìþOEdí…ÝÕÕóöXžŸêÝõé3×ë¼ãÛKGEtNÖ¸ÏÜïS³qò¶õ5_ÿ–ª—Ý‹|÷œõÃnôý䨲ÿq³#cÆ~öý´P8¸Ùš7ü=½åÕSÖÝ W·7;û>ÑÓá©…²íü”Ó~=¹÷úØ¢rô‘9[¿?¾ªêcÛ[eÊ­‡­m™Ÿ›¦²b{·ní»ù³…ÂÞZ>²ë·Æº™©œÖUß[ÓkV>ÝåT å÷´—Ÿ½·Ö[æ!㹶WåDoìê¯ÚßÍøÙîñfŸ2ni¡8®"+îߺ¶?³ó"·mÉz­Î’½Ó¢ÐkäÛ?fÍó·µcô¼Ë?}w즸löÓëmüÊ{ÇÜó7‘½ñb¯çyöŸ³7mOŽc6mÿäs¦ŽÜ–ÉØósÙÏ6“Ù5Þ½7§¶cÚŽÌ©ÍÖ“‚ã«­ñÿ7ršþ+:»–VLsôÿüD€AùfNÎ3»#Çÿ>æqrºÝ+¯·edî¾×™œê­µÓ‘㌽}»ÚÝð–³uïµ6d†þª÷êéœúÿ¬ÛZsÓÄmݵ=sй5ÌõöÆ–³÷ÿt7›±U‘-'Þ¶«nZ­%zv b{à Œ.f6]ÝcØÉ|ÞÛ å¯¡&‡WÙ÷½a3ê·cnÆœœÝ|_çäönˆœ‡zÚ<‘ƒìݽ“óÚšÞ{6fînOÄlʼ;ݳ™›ñ[·çþ·7²ì¿¼Ýít<ìmåWv­û8•“ãMqŸï•5Îå{þW~öƹ¾Í›•ÙoZeçcfÙ”;³9#jr8‡“$+–^æc½ïÇìªÇür—c¼5r¸m3mª~èÛV5Ÿ}9msïÙö©Ãø.kŽV̵Özüú}ǬYñYÛ¯®Â`+£ÆÜ6róä|FÔ¶¿ÇúòS¿¶æå>g2fÌî–Íì.£zßj2b;²öhQ¬*ãK+¦1¯ÿ–ÄÆdÆÁ{÷X¯ñrÛÿlEÌÌm ¯Ÿµÿ•ïw{ÿôE• +LŠŒI*!A.<$¿1bczÙRSÑ%/(30‰*‡öß»í¦ðÚ_›)j˜¨RLn2`L\l\d"Xd\Th°‘ Ã"cbƒ¢¢‘àbc™O;¶ûm:çáµE|äæ¿•bSàå¶Åæóvdäð9žïê¢EÁIlªy¼Øù÷ºŒ9é¯5ëÝæt}Vmë¯ÉëÌhQp8±mÓÖìöóMmäìjãë ¦÷|û±÷Ü[&çós#²ó;·CG2â¶Î÷o­ÉËî·½,9My_‘»¡ó¿Ï¸ßî^ÙvŠÊ¥á{›½õu¦³æfã³êât]×e©°˜Ð`¦eÅQQÍجTT,Ž +LV,–JÆÅDµ¡Âb‚q]–HDqšy€AQ–¾î×°6mÞëÖì}¿?M䶈Ø0ñ;[ãþ½'k"Þë·dù­Ýù^cÜÉÁÖîÙ 6ãfê3íNddím϶qöÑÕ³>¦£¶lÝz‰»ÎœÓ¢Xnª­±ÛysîCVÎO¿üû?DñÎïlÝœ31ñØ—¡ß#ãÎóv¿¶—-7O[¯¾âºb¿zS½Å^ü]eïmz½Ïüܲ]KeÞZ{{7*óÛö?¿ÿw÷köžœlÛ箬Ùò»-²¦«îÌxÌÔ³­j2ÃöÙˆyÙ¹­µÓ} +W†Ü¹-7;¹7?ïÚçþ¿)*ç½7Û+*¿ÿgkîfª;õ±Q[ѱ[:?óewßêý±þÕ=ÜDlúßþSõ¸åfk~˜-Û2/¶Íãf¨˜×T•Y£>^cïoT %7ñÔï¼}klšÇô”ÓžýÍzù¸üŸÍ³åŸâ¶eÖümŒÞÝn}ÛvoÛnÌÛ«+Þî?:#~r[óÆ»NNv·ætÿüÆmÓ·é·æª6_ü[]^E¼Þý䦘§VÌm¶†èù¸êÈÚå¥÷zÛ×Íuü[w^vý^Öˆú‹}úÜúݶUo9éyÙ~{‡¬y—Û7eûšÍ¿Ì¶q2S»ùµ%gO[vÃV½nŸzúÉØ‘ýûÚ±ûäd7õmê­ŒÍò³á7÷²öi¡tnko®¾f>*ò.Kç𩾪v¶UÆÎùTÝݶ¨·|êÉšëbó?Î_÷z+ãÆ9U e[ö)ªŸ*G[ò¶ÖY»#>æ)rcc¦©šm}ÿZjcžÈÁüöúÝøžBYÿ’á»û3û)3né­ëé ïú.•1f¢+&7ÝÆ°ó›åÆpÜï󿶋Š  äý¦ìιíݬµ>íkúßžÿóÝ3³rò/þ¢n¶of­q*ë¦Êmke–Móü˜5ǾíÔDÇdíËìwÈ‹m]÷Qï²õûýb·u¾äÍg쇭íšÛ/7[7¼¦Ú‹ùM79;¹—ï°å±ÖÊì§í’¯5ÖZ±-»®æ'KWm÷üžÍÇÚÕBi|­}û÷UÖw¶ÏßÝm½_VlüfßÎÍÿ\nÏ÷úíËÝX6…i÷¾–ÉÚïí UUÿ¸‘µUŽ¦·ËÕÌtV^å8{þýßv2Ö;Nl®®·‰÷ÚÍzñù[¾ê>*çwãwæ\ÕÓ\m÷yÛÐo›bã3®6ÓmκM±å5Ì–«œïmû­Ý?[÷Þ6æ­«Åwã}Ÿ³²V}ïÇßeJö_îqÓýßäOf©ÊiªíùìW¹õ¶t–Ž®ÎîM›+'çŸ'®:÷²EÁÞZ;*ßi®jŸZ(œvدíÑß™ªr¼uîí]_v˦˪úÊ­-O×[9ˆxþ¯¯ê»Ë¨wˆÌx¸xÎZ£s§î­·ömMÞ[Æ}­ïÑÿ×÷Ÿ1¶ÄãM¼ÃÏ>ozÍzÉþýÜœwíš7vÌdæîy¬í>¦obú±Æú̺ëïÌ홳r|][=›®ó>»7æü¬­©û²ßüínüÇZŒÚ¨R†d™XТ$t + Õ$ÁC PÈ£B¹h ¢å€žJ&>,0“„A±@0‹Ã ¢ Š¢ Y”ÂŒ²â(HþE\e`9] Ú7^| –›ª†PËG=DîÛY<ÒDy¯pÿ#]8kG—ã!?bØG.Ú¾7Ÿ¿Ll߆5‘ª—TòèµÉV‡h…Øšâ1êßïÀ:ˆ¥lOó´^¦«Ä`(X……¾9> þ´òOw¬†PÅìsZ¼§X磞~©;QCu£h›Ÿ“ªÐçÊ x#'Û€Õ7IŠöÌ—L!È=Ü#ÕdÏ.ÙöÆÁV&¥¢TÑGûÁTÍË~½}:æýåk +@Ó¯Í==Be‰0Ñ”5 H‡V9?õ¯{RãïóÖ¯Ú9@J„.:8yã^Yaö5Bi.K£{#Ì0Añ5oèr¿j0#ˆ¨§Š¦1€ðøfŠ:´”‡Õò)ªÁÀv +õ8€ËXÀexµï-¨gäö@Ë>z…Mß–ÿ~Âf`f ŒŽG„©g;ëR@Ï)†Tè”âlB[üñ ÝúÅœÕÀ¿Eçe¬IˆOóÍÂJ÷ŽÕ­¢ +öit.«]ÊŠ;†:Ð;–:OÁjµ Ëô¾W×´õ öð Î/ÓÑ]Xœ(*4$¬Óž–:t?Ò.yßJ¸­'þd¯ð]>Þ;Š„\ %¨ñ¢MKÍÉö2>ƒ¦Le½ =Í…¦[DÔý6ÇP§G“ö(siºŽ#k†îˆÐ%{9óî—YæDÄŠNjƒu8Sæ™pëî\É7¸òEê2õ:sÈh±a!-$z=POf”bi¼jÚ[œ ´9œÅSAžKïÂñh¸rÓþ ,fÐ9ªv™=ÃäMQh­MÇíÝ`AMÈnqI“ÕËýáæJqåGKR*&*|- ‘@íƒ'Ö¾ÐYž±®ê]»@Y˜Î„=Ÿç_ž÷ý€Ý‘¢¤GkJˆlÛ¼ =$âÙ›ÖôD@ñ!¤H$L×M(tÊ:²MíGW"Û÷Qæ9>è¼ ¦µÂaª[ÚýÒ¯ž°l»Ih5åde‚zßzÛˆ‚òNŽh5&kï @ÀP„fÝbô~îKðW‹Cl’šªÇ9>ÝÐé§RhÙ€ïf§ýÙ^•¦úv §y’üà¨QøZCR$ó5è4½q¡u Lª$ˆ¬õ#+[Ýà7ý‹qŽßj,š“©å©v?'¬•{”é½/g= ûc«°ÇRØâJ£€ÊÂõ„:ZÆvá†ùÓ×wÄöi_ãkÜû¼4lÒjЪáy¤ÕŸS~ó7{Á,¥L+šYc‹'èŠgZ7; +ÒK„¼vß² G{E·x ­Þ‡ÈVѯйeľ³àÒ&´a]CÀ±Ð*H4FXÁëŠÖ Òèj8èx¸ÕBÌR_0‰Ö úZ VßzcÜŒ±O.ZòŠ;…çJ@ë^ƒxu†7ÛyÐE_Y¬Å—ÝŠÖÛ&Π'Z:"ÖÎ@¯EëÊ­7ÔaáðDï~Ùx ô‹7ShõÌ%áFºŠ£ÅD˜À/êJ„ÒAq0@퉶u=\3ˆó +z†J–c,|«ˆÏ?‹«;â pi=ê ™Ûnµ)â–øᙧ_ô4Að9Çw×u¯¹ì ²ÔEØ7HjÁeÖB"f÷±­ÙgŸF T%mÏDˆ|ÊH„"”Hä‘ëhú´‚H\©ü¨¿Æ­qB+ +¦Lp…M%ŸûêÃÍbš/sõaBhôù‘¥gH%Üo3+SaO"w6èƒ<ç+XÊbvuÛ|úÃ?ì¢? ¬ßm¾¤¤%êÂFfôjùTVºÃáÅ€܈¢×4à®ÖÎ?7$B]^îV$vÔ K׌Òù=ÅüÈL©FAfe•L9k°¸kY56£ìÖÊ3Y‹‹Ž]ê ÀOŒyy}ðU§O%È=‰Óú·×f%HßRªm0WÛO‘À5…· É`šüµAÏö%_묞Ûfÿhé\˜¦¿Ÿ7§q‘Ï#y¸Y ³d¿ÊàãQ.%VJÄ‘ÖÛXî67ô<þÝ»bÖ°pé}`±àù-Ù}?\¥>#Š´—±(ŸoRCAqÑL)©É°R(PL‘˜.âçHÊ£Ö¹7­„å<]Úw'ÂÙý]pù‹‹ ++YE¼/¥C~‰Û€„póåkFÿ)6+|{ Añuœ§»¼˜2‘\tQô„λÄO†€¡ÏÂ=þâùFÆDú›ƒ´`&+å°ã¤ä…wÞM0,8=›G:)b'`œ !ÍBYµpò<®ÿØÑôÿzvÝmÇ˳áŠùîZù°ÈýÞYHƒeÙ_´þ±Á´ º™0¾;¬ô7+~,‘Á/#8¥äW=HÛþ4 T§£:¡h9yf=ö¨ÙW. endstream endobj 20 0 obj <>stream +8Ǻ‡.Q§ùŽAô`NõÝ%}n7qáÁó¢åok²ç}ZÃðEß6 æ‰ï2 É%)`b”üÃ,JC1B¸oƒ¶£‘Þ¾€rìÈšNÀÓ;ç{§pÇÃ7·0D±‘@ûiDJŠ-¦¼&£‡Š–@ÜÁåw¹¦ÚÚƒæÅQV—z,P§Ú{[ËþÁztœ¬<‘w¶•ÍP”æ0¸8®¥”ÊVDÕ—*½ld…íäpAסUX„ÀƬ€5í½ò,$t²Ù£¸BðÇÓÔäë¢GÇÜH¢‡ÆÕ²kå¿}ìªH~¶~s±x<°«!<%çcf„Ý¥¥pçg]_I´Ž|Á`QƾݓW]%¯–¾ôÐ¥u-ÒËå†ÄtÒ¿{}¬ÔUN›©&¤/uÙHn’B.7£_ßÅ!Â9¬Zb- <±"‘È’­À»ËK‹Þº#Éë.1˜•’;7PüŸ[aA¾`î"6u^E±¢œ)k~1%ÙÚÔåµµôgU~‚pNùÒy*1M¹›ŧO¤2k¦Ò8>°N†¨M²rœ´-ÔÞ˜,$…‡A'éšEfŽÊ88õ€_ží˜£3‰ÚÕ|¨¥QôüO +­|ÎeZí÷tR0Ü>J;obÑ0³gf ÜAÛø€Ùš h³ü©KàbÑ/E7.¸3¢öµjÌ8'^ ¥˜§*R 5œ£[Évýn éÐÈcçˆNRÇòxœø¥¿ý¦–*(=Î5͹•¶b#A‰Zq°”ðP4Å-û*ÿ¸=mû¢6ÓõÖ ç7‡Œ‰J¦ +î*?Ý{¶*C7ï7Xžˆ$…Ÿ‚çTpqêçL™wf¹;ÞÂøør{¿JÍí +üá;,(gíØ{qöôקcœÀUàaG! [¿~&6k Î8Y¡R~ø‡îm¿Ë;¸qìOpàÝeëà­Œš7´DcÅ-Ã) "X‹ˆ®¥å×ákÕïc¢ +-½Pš7‰¯‹¢UL  \ŠÄl1(—Ç×±0†®ªу%”zm°ÚýQ"ÐäžÕÞ1BÄ 'ÅŸ¡Ô<ÆTe¼í âîånÅ?\êÛÜ—³¬‘0d¬™t;êFºÞö¾Ä(²® t`‡ÔÁÁÁ'ë€L+^ùf«bÖñr΀ÆÅ~ +ùè;µöEH„Î…% øдø,siìëWeäv„Û·ìh³óÞk†§.±¤ÐbĤ¼Âœ_ð§Dׂ>½«ã) +,äéJ¤”ÎèÃ_ AÆÏÌžª²=]³·í£© wn¸r eؾˆ”0RqrFI¥Ë_›Ð^“ÊÚY a÷Ùˆu(ÂX ˆ‚œhÓ9mYAõØÁMx“jãØÌKü:ã3.rÎÓ(6'Š1óHµC·å¦âRšãÚC +8ÚÞpw'E±|Ç—¥_HòMØë.4gæ8’³S[ð¦V¢â¬'â _,«(S'©Cä/½Žñm€t÷-¯*e¯ªmÙ z¼&úKo Ÿ÷Üê2*Y1&å¾õ#ÌÏ%IìAÁNdÖP/JQìoqœFNÑQŒÇ/õÙcLBµª8ÍÒÂ" aë”õ"Ž2çmˆ5X¬Õ°éZàèE¡a§^²ït¿éÖšH%Ñd,gÛP´|àä9h ½[¥Ñ' £ÿð“LÚ u’hü*Ùýb±o€H³ÙdyM¿Û*ýž¦³sà:74ë"B¸¡çfëÜ´7ÁÃySm19ª]=MµX(O=iÂC7wù*½EŽ&6o{ašGµÏ Ÿƒª´)Or¡'%"{Ô'ÊÓÐÓí¢Qž$}z"6Õ:¬<©=1s +aå)©éIàpeå)èIs,ÑíìmãLÿ¼&7{¸ÛÙ¤F¾‘+kÚJkÎXrAe]å¹Ùȴ⺉·à‘Vî-døòìèòå0¶j{ÝáÎhÔ²Sut9žÊS7*,¯õ1 !Œ²ì¤g%%k¯1؆3±L˜ÿ‹,ž©NC‰l f<ÏöD@¨l&d¨Kèüð[‘zž½wƒz—vÎ3‚r@b¢1 ˜Z8ýØõÍ.üÀ­jšÁ{ø&…‚C€jž*¡þeÚ¼®¡3?÷êqƒÛ5OuñJ‚¼½5¨®ÜpcºðÂQ•Èù†ç-;À¸O,ÉND­8ã¿)┉l'×FNœ™'LŠ¬ÆZ- Ή™'ø>Å5î ŒËn{‡À»Ñ6Õ¬åCaRåH¼Ž4¹li²(2I(ª†IN„„¾Âl]ìþ^d î.§ŸQe5êƒ åÌù¼ól`)è“Û7÷÷sàHcŒðÔKºA -ÇäA¨J䪋Hî0à¬s‡³?1N°o–­7Άyëo +KϤê›ßâ[BÞ2·åRtª¬gïXk˜_À¿õG@\/âLt a}Ä6”®â{Èk¾¥(Ç´iv€ãLýÎÄ ýϱžŒ‰ORxF‚ÎN•~­Ñ~Ž–pe†ÔÉ=^¶vG›È+ÕS)G8ìÐ0ìGæô}¸qGf®lä2jGözˆÛèÙ;ówwdµúÈfv$ Ž÷ÌŠ¾ æçé,¥ˆÉÖ0ØëPBìîH¦!+æÊÚ|¼j1,ßÃ.C Û[ØÑ“ `Xék“îO †‡v „yºÁ°•A ;63,Ö¾ª–Žs [¦vüBŽ*XÅ°³ » 1ÌÖÛóúÅ0TÀ°›ÔUÃÚ› 0Ì\M‹vglpS͈±-Œaµ’§;9±.5ô¥Yr“IEŸÅÿKVTiU^?ô³¡ ‚ {ñˆá¨nÇ"i¶™Ãql<Q†:úø”$óhÈ¡ø‡#ñã>Ý~õ¸#Dsòºð@C;û‰w ÙTäóÚ àí4 ùPœ´ë~íÈ¥.–›íc[UÕÝ©w³IÑã, +0^v¤6ÒÁñ‘Öhèÿf>£Ï#3uÈ*q[G0Ôf»Ôn.I†õ;àQÆ0Æô߯Èç8V4îcÒCd†Å°xèýÙUZ0SEà:‡KLîqiÁå’ƒÒÌ ùŒNrO¥‹ÈÖ;åÝ 4CbáêwX9bô¹Îôy£2P"òˆNô¹òYúÔÝ%ò#ò½iÀ”¹S3Ö¯?ÚŒf¦>¤¶fÒÞtèÐŽ|Í€éîà]3ƒ~PkNË»<¿‘ÔgºÁÝê@ËR¯–Ð|0x{-˜å_ö˜‘&¼Ð?*•…$t Jb„Ý=‡C ®Ÿ8*  +2å)LÕæÌàèîìÃ{G‚Ü­eGeÀ±Õ³¡±,HCªn{i.Õ‡w?jPº{-¼{·´áð1m/JA‡·V‡aºÙ¡hÛþ~Œào21¶Uào›½Ô6a,áæÚYäVzEÒö‘œòÈ+Q˜l.ÝHðN^ÛQÊ:ÙšT@››ïd¬Šy+tÛågX”€¶aiš/ê·› s6K0&ŠïD‚˜y¨M0d¢¿ÕöÿÆDΛç·³œ\ÖÄâôŠ?|BàZ’•3zßÂûÇ“Ñ k‘Äì‰SðɘÊö /ÂÀ¯–ÓQïF8H†z7_s£ŽÞŠ®å"xÏܱ–·ãŸÉæIC‹Rbíôî&ÚÎ,kgB^DPõí‚ktÄ8ß8ÄÁ·ŽB111KcÂRY˛ÙÐažM€n¶Šàª†ð` Ï–ºã25ñÜV‘”q:qKTYÆöR|®‚íôdö‹é¤VnŠ &âIÓy2•5†t<±@çPVÍ“Ìý‘]\dÙPEÐ=¥l"LÍôµ§™N¢¯Ìù¢3Ýíõ6V3ØžFDVqrÁÒGÆåû9ù2Ì#‰¬ z@ŽƒæaËä£üˆ0‘Úpšlœ/„gÍ}¶Š~™ ©*BGŒHÞŸk™&Œqå´‚²§åõ´Âd<¼ÂÙeGâÑõ?TÚQâ­MB»8ó%j}¥¿uÎAQÜ®Áìdhv% Sa×êåõôü°õ¯„x§~Þ`<‘×Ø£|Ù—6Ù‰ö0¡fÔ¼fc­o÷µ·¶USàl™¡ff(ñüî + 0ÙŠup’æŒcÚõÞ`»Iľîâý­£è}…°DqvQèŽ>tLoщ¿W\·ïoµ+°z"kTÍ&Ùâå°ìØ/r‡DqLH—¾s²"Û¹†×ÿJ3о¡wMcÆxöü܃ÎäÿïµX×÷C§žÉ4RœIîUjËh‚½Èk.f˜ ‹6RÍ3,>I¹ÕˆÕ&e†ÞÝ&ú_9¡BH>³&×øµ&5×ït_tV @Iªx¬ ícÿÆ9âgËbÕ¶Ù‘aûê×”?„Åa;oäB_>ËhgL¹ŒŸäÀŒ¡[VÐêðA駺¦‡34—À‡i·Þ|ŸföZJ¤Îk1z ¤$‹Sû`Uõ‚áÍ–¸Cêe‘ WP‡‚~•ÿØaÈ ²ÓÚöŸTŒ´ò:U!*nYŠ2'dUͶºTlÝ$&uć›mÓ²ËåŸkÓjÑVæý}ä‹6ãNúåjå£ëÉv³4+à Ù²)>¦ñ± õx[ +Š®/·5´†dû è³šÏÄiÓ#%Cž;B¯Ö—Ñ0ø& »5g‘Eû1ØOøíŠð¤ª:ñÛ6«R\=ì,Úcðõ—“ª‰ sÅ]°©â˜f“¡fÑÉ–42Pc+k~ .^4V¾NCÆ šNXí¼Of8,Ÿ²ö÷ŽQ¹š¶}̈&Ly?R2ÇNLÌÑ +z…qÖÉÀòtÑ™…’ø·Úm¬Š¾x´èÎ'° ©GF˜ý3õ}í¦€Iìf°LMQ Uâ/ 2Íõ”­þóuLJ]«¾UM ‡ôçAa|«”»wF™´.œ/¼0žáå‚î(ÈxÎe=°îöÂmq½¼iUO2†¤w(ß+°ÚxÁôWcWx¥Í`Zš%O™Íž¢ôDÈ ^DB&áB«¦íêž2@X”¦÷ +´c›§&8 my! dä–½‰é–¦ɶßlìê– ++Î +¹²?üÚÂI\/˜€à,èÈ>ô¥#!K§cG.Ò—EpqãPNó×;X™-FlÙ›61íY½@Ð=Ý®“Ʀ¯~<3Uû¬pÝ5 qÜ?d³{¨Þ~SƒÁÛªD7¼öý”Fñ”Ü•«ÆàóÊ&lÐK‹„:®ö³„´•¦ãߦoªþÞžÕ}‘ø#€4’e;cÆžà*šÑ¨Adb¦xý'±h;Õ» É7)ž T¹AŽ´t©­É±|Ý~r;Bû +|:UIågü>îËhud’;ÊeíO_ n´$n†U‘B9TÖiґ¸ʥf<ÙHpv锾®t@Ÿ¥ªË%ÎÐá…­i™D¿%*‰)û ¿>Ϫ½°j–,=£ŸSˆ[Õ`L¨Ã »Ó€<ç“H¸‘{ÃKm+³_…ˆ§Ê˜‰Q KT™©yÞƒÈÊœH+€Ñd'9QV2&-Þ{\ìôŽüêœçœ²ÀÙ÷ݶííÓ¹a¦·“ÑA`Ë8óÚÐ¥­›Ñ xUQE(¨˜Âv¼Vaöݘ¯þmbŒa|Kɘo¹àz/ó©J넪'®D|^`±¿Ã ”@M• TÉ õR•Ë%ÿRɶ$MgKo»g@%BØJ)çÀC :ƒ}I¿;µ(6Ž„‰§)¦f¬‰B\3ëb\³.ˆeíÜs0'Xº,¾ùÎᙪ«òOàÔèØ c³Þ“Xˆµ´L‹ÎBq‰E˜›f„Ôsëó1®%!¬æÆdŒ—¯^¸pïØK§M[ÞV»š¿pÔÄð¼h¥+sæÌ`œÿm‘¯ Œù2°µˆ˜j=ÅTÓÏeÿÔÞFIì58·K+ý|Y9–Q)…LÁ"yKµÔÍn9i5yYÄËöÌ|@}ôga@ŽIZ<5ãÃ…íYså6Ög31lvj!Ä‘ŠF¬» ÈÁ•UçÛÍÄÖ]t+ŽX Ë÷ïP®®ß,¯Ó¦>‹«ÿµ³‹:MåoGx^ÚUJV`d¡ÿ¤³7*¶îTÍ”O¿ÐD"‘Ÿ½ü܉ýÒ“fq™>Ab©®]Nf¤I@§8’ÆØ–QEÓïc_svTDHBI‹×ÒüÅÈ. ÍÄ#Äૃ˜z1ÉIgÉ]Dü¶J?­eßA‡ª +²‰}·È ÅSå%nQ\›aá–k:µßî# +¶:‚KÜ£0þºßf‘mùE0jR_¼ß*ÈMýhå 6õBkÉNn +ÂK€át*ž+:BÈeõV¢àÖÓu±VJ.é-… M2ön ·¼ ¦ŠÎ›Ékõ%xär~=Lʆd.DµQüN¢mœ×Hâ‹ž È,hmɹ?ªæ’¨jU Áq9O×}Úš%!ùÇ m#ç„Ö·C€šb±W‹K…¹/ò…d1"YTn‰¦Ð`–%ø Ó%n$ä7µ)U^MLè×Ë­fn-møœÙ‰_%õnÿ»—Ë™qŸc–Õ +Û¬jH,‰ŠøÓ&ÓË«Zê%RÜ¢Þ¤¢²Ý‡”˜fóhÒ\E 1åÉœ€2Î(°ŒW%áÁ;ŽûMºNÏ'=º +)CRVÕÔ/$ÿ8ê“Jq•!͉>A +Û¬’ Sêo$›¹Úõòù¤–B;ƒÿ3Œn_(Nœ3I8ëDf¶O›«ôݵ©D¢FñðîYÇÁ{ gt8ò~¶w¢%ƒ”XKº£¹SqÍ‘ÚÓT’•nrŠìœxF›ïÙ+¼³íÕBd­"+f?™½+ßp¼ÎŠÆ¢ñÍ—XAr@Ì•@@{Tá2‚"+ø‚ ˜\øê.h¬ùßÖ­hG Ȇô +Ú_ƒ ŒÛ6Bo0︻qg»Cƒ·=¹¨ær·2ñøq…f'Î|šJ9@ ïn ïÅÑbÄŠ †L°»“CËZÿZIsíà}E +ï\HI¡Ÿ”áG Æ݈1¢±’¸ËfDiEâÜHw_cx‡2›Ž»Yd‡öŽ„ð>ÍDÖî:q ŸZ‚vYA ÞIîÇÞƒÍ × +Déî&À%ÚžíÉÚa¦<ÆÃY!R\"¹÷6[WÚ† ¥h•¹Íæ–q³Z«³"þ8`ÇuS¼†’Èf*½â¸ç_D£ïæZ¹¢íŸ +JY¾¸MKg¬ßKáPGàè0£¶·*@ yvžÓ­®—¤ó¦h*¢ ·É9mk;iÛ”R]Økžhèí"ËŽ:xz&=ÍÿÐðfª}4=D7 ⇽vÁ®…fÍÁ†–qF¢’' ÿÐM @Ç!š…Å® IöŠœŠæ7Îwg9š×Al­¢ëâ˜ÂÔ’Z{ÏîéÕ§é·Sðïó`,;ÙJà8sŠØøD ™†åO¡Úç'×{ñZÁ]ô"¢Z_-hé|›RÀBÔ)ζ¿ ËZ¬çËÉʯóÖž|`ó%õ#\d endstream endobj 23 0 obj [22 0 R] endobj 33 0 obj <> endobj xref +0 34 +0000000004 65535 f +0000000016 00000 n +0000000147 00000 n +0000064628 00000 n +0000000000 00000 f +0000064679 00000 n +0000000000 00000 f +0000071625 00000 n +0000000000 00000 f +0000000000 00000 f +0000000000 00000 f +0000000000 00000 f +0000000000 00000 f +0000000000 00000 f +0000000000 00000 f +0000000000 00000 f +0000071698 00000 n +0000071894 00000 n +0000073776 00000 n +0000139364 00000 n +0000204952 00000 n +0000000000 00000 f +0000068642 00000 n +0000211727 00000 n +0000065070 00000 n +0000068942 00000 n +0000068829 00000 n +0000067569 00000 n +0000068081 00000 n +0000068129 00000 n +0000068713 00000 n +0000068744 00000 n +0000068977 00000 n +0000211752 00000 n +trailer <<24FADF2300F342B29EEA9E9F7E9F1E1D>]>> startxref 211940 %%EOF \ No newline at end of file diff --git a/doc/logos/doe/PyomoDoE.eps b/doc/logos/doe/PyomoDoE.eps new file mode 100644 index 00000000000..2d36bf64df7 Binary files /dev/null and b/doc/logos/doe/PyomoDoE.eps differ diff --git a/doc/logos/doe/PyomoDoE.pdf b/doc/logos/doe/PyomoDoE.pdf new file mode 100644 index 00000000000..ab5c17fca8d Binary files /dev/null and b/doc/logos/doe/PyomoDoE.pdf differ diff --git a/doc/logos/doe/PyomoDoE_noTXT-150.png b/doc/logos/doe/PyomoDoE_noTXT-150.png new file mode 100644 index 00000000000..1fe64eec4ae Binary files /dev/null and b/doc/logos/doe/PyomoDoE_noTXT-150.png differ diff --git a/doc/logos/doe/PyomoDoE_noTXT-300.png b/doc/logos/doe/PyomoDoE_noTXT-300.png new file mode 100644 index 00000000000..315e51b0b08 Binary files /dev/null and b/doc/logos/doe/PyomoDoE_noTXT-300.png differ diff --git a/doc/logos/doe/PyomoDoE_noTXT-600.png b/doc/logos/doe/PyomoDoE_noTXT-600.png new file mode 100644 index 00000000000..f6e2cb4c72b Binary files /dev/null and b/doc/logos/doe/PyomoDoE_noTXT-600.png differ diff --git a/doc/logos/doe/PyomoDoE_noTXT-72.png b/doc/logos/doe/PyomoDoE_noTXT-72.png new file mode 100644 index 00000000000..efc56ea9dab Binary files /dev/null and b/doc/logos/doe/PyomoDoE_noTXT-72.png differ diff --git a/examples/dae/Heat_Conduction.py b/examples/dae/Heat_Conduction.py index c928ee2e899..11f35fddd13 100644 --- a/examples/dae/Heat_Conduction.py +++ b/examples/dae/Heat_Conduction.py @@ -15,37 +15,49 @@ from pyomo.dae import * m = ConcreteModel() -m.time = ContinuousSet(bounds=(0,1)) -m.x = ContinuousSet(bounds=(0,10)) -m.y = ContinuousSet(bounds=(0,5)) -m.T = Var(m.x,m.y,m.time) -m.u = Var(m.x,m.y,m.time) +m.time = ContinuousSet(bounds=(0, 1)) +m.x = ContinuousSet(bounds=(0, 10)) +m.y = ContinuousSet(bounds=(0, 5)) +m.T = Var(m.x, m.y, m.time) +m.u = Var(m.x, m.y, m.time) m.T0 = Param(initialize=5) -m.TD = Param(m.x,m.y,initialize=25) +m.TD = Param(m.x, m.y, initialize=25) m.Ux0 = Param(initialize=10) m.Uy5 = Param(initialize=15) -m.dTdx = DerivativeVar(m.T,wrt=m.x) -m.d2Tdx2 = DerivativeVar(m.T,wrt=(m.x,m.x)) -m.dTdy = DerivativeVar(m.T,wrt=m.y) -m.d2Tdy2 = DerivativeVar(m.T,wrt=(m.y,m.y)) -m.dTdt = DerivativeVar(m.T,wrt=m.time) +m.dTdx = DerivativeVar(m.T, wrt=m.x) +m.d2Tdx2 = DerivativeVar(m.T, wrt=(m.x, m.x)) +m.dTdy = DerivativeVar(m.T, wrt=m.y) +m.d2Tdy2 = DerivativeVar(m.T, wrt=(m.y, m.y)) +m.dTdt = DerivativeVar(m.T, wrt=m.time) -def _heateq(m,i,j,k): - return m.d2Tdx2[i,j,k] + m.d2Tdy2[i,j,k] + m.u[i,j,k] == m.dTdt[i,j,k] -m.heateq = Constraint(m.x,m.y,m.time,rule=_heateq) -def _initT(m,i,j): - return m.T[i,j,0] == m.T0 -m.initT = Constraint(m.x,m.y,rule=_initT) +def _heateq(m, i, j, k): + return m.d2Tdx2[i, j, k] + m.d2Tdy2[i, j, k] + m.u[i, j, k] == m.dTdt[i, j, k] -def _xbound(m,j,k): - return m.dTdx[0,j,k] == m.Ux0 -m.xbound = Constraint(m.y,m.time,rule=_xbound) -def _ybound(m,i,k): - return m.dTdy[i,5,k] == m.Uy5 -m.ybound = Constraint(m.x,m.time,rule=_ybound) +m.heateq = Constraint(m.x, m.y, m.time, rule=_heateq) + + +def _initT(m, i, j): + return m.T[i, j, 0] == m.T0 + + +m.initT = Constraint(m.x, m.y, rule=_initT) + + +def _xbound(m, j, k): + return m.dTdx[0, j, k] == m.Ux0 + + +m.xbound = Constraint(m.y, m.time, rule=_xbound) + + +def _ybound(m, i, k): + return m.dTdy[i, 5, k] == m.Uy5 + + +m.ybound = Constraint(m.x, m.time, rule=_ybound) # def _intExp(m,i,j): # return m.T[i,j,1] - m.TD[i,j] diff --git a/examples/dae/Optimal_Control.py b/examples/dae/Optimal_Control.py index f3b1b8bed16..ed44d5eeb59 100644 --- a/examples/dae/Optimal_Control.py +++ b/examples/dae/Optimal_Control.py @@ -11,20 +11,20 @@ # Sample Problem 1 (Ex 1 from Dynopt Guide) # -# min X2(tf) -# s.t. X1_dot = u X1(0) = 1 -# X2_dot = X1^2 + u^2 X2(0) = 0 -# tf = 1 +# min X2(tf) +# s.t. X1_dot = u X1(0) = 1 +# X2_dot = X1^2 + u^2 X2(0) = 0 +# tf = 1 from pyomo.environ import * from pyomo.dae import * m = ConcreteModel() -m.t = ContinuousSet(bounds=(0,1)) +m.t = ContinuousSet(bounds=(0, 1)) -m.x1 = Var(m.t, bounds=(0,1)) -m.x2 = Var(m.t, bounds=(0,1)) +m.x1 = Var(m.t, bounds=(0, 1)) +m.x2 = Var(m.t, bounds=(0, 1)) m.u = Var(m.t, initialize=0) m.x1dot = DerivativeVar(m.x1) @@ -32,21 +32,29 @@ m.obj = Objective(expr=m.x2[1]) -def _x1dot(M,i): - if i == 0: - return Constraint.Skip - return M.x1dot[i] == M.u[i] + +def _x1dot(M, i): + if i == 0: + return Constraint.Skip + return M.x1dot[i] == M.u[i] + + m.x1dotcon = Constraint(m.t, rule=_x1dot) -def _x2dot(M,i): - if i == 0: - return Constraint.Skip - return M.x2dot[i] == M.x1[i]**2 + M.u[i]**2 + +def _x2dot(M, i): + if i == 0: + return Constraint.Skip + return M.x2dot[i] == M.x1[i] ** 2 + M.u[i] ** 2 + + m.x2dotcon = Constraint(m.t, rule=_x2dot) + def _init(M): - yield M.x1[0] == 1 - yield M.x2[0] == 0 - yield ConstraintList.End -m.init_conditions = ConstraintList(rule=_init) + yield M.x1[0] == 1 + yield M.x2[0] == 0 + yield ConstraintList.End + +m.init_conditions = ConstraintList(rule=_init) diff --git a/examples/dae/PDE_example.py b/examples/dae/PDE_example.py index 187e3ce52ed..6cb7eb4a7fe 100644 --- a/examples/dae/PDE_example.py +++ b/examples/dae/PDE_example.py @@ -16,33 +16,45 @@ m = ConcreteModel() m.pi = Param(initialize=3.1416) -m.t = ContinuousSet(bounds=(0,2)) -m.x = ContinuousSet(bounds=(0,1)) -m.u = Var(m.x,m.t) +m.t = ContinuousSet(bounds=(0, 2)) +m.x = ContinuousSet(bounds=(0, 1)) +m.u = Var(m.x, m.t) -m.dudx = DerivativeVar(m.u,wrt=m.x) -m.dudx2 = DerivativeVar(m.u,wrt=(m.x,m.x)) -m.dudt = DerivativeVar(m.u,wrt=m.t) +m.dudx = DerivativeVar(m.u, wrt=m.x) +m.dudx2 = DerivativeVar(m.u, wrt=(m.x, m.x)) +m.dudt = DerivativeVar(m.u, wrt=m.t) -def _pde(m,i,j): - if i == 0 or i == 1 or j == 0 : + +def _pde(m, i, j): + if i == 0 or i == 1 or j == 0: return Constraint.Skip - return m.pi**2*m.dudt[i,j] == m.dudx2[i,j] -m.pde = Constraint(m.x,m.t,rule=_pde) + return m.pi**2 * m.dudt[i, j] == m.dudx2[i, j] + + +m.pde = Constraint(m.x, m.t, rule=_pde) + -def _initcon(m,i): +def _initcon(m, i): if i == 0 or i == 1: return Constraint.Skip - return m.u[i,0] == sin(m.pi*i) -m.initcon = Constraint(m.x,rule=_initcon) + return m.u[i, 0] == sin(m.pi * i) + + +m.initcon = Constraint(m.x, rule=_initcon) + -def _lowerbound(m,j): - return m.u[0,j] == 0 -m.lowerbound = Constraint(m.t,rule=_lowerbound) +def _lowerbound(m, j): + return m.u[0, j] == 0 -def _upperbound(m,j): - return m.pi*exp(-j)+m.dudx[1,j] == 0 -m.upperbound = Constraint(m.t,rule=_upperbound) + +m.lowerbound = Constraint(m.t, rule=_lowerbound) + + +def _upperbound(m, j): + return m.pi * exp(-j) + m.dudx[1, j] == 0 + + +m.upperbound = Constraint(m.t, rule=_upperbound) m.obj = Objective(expr=1) @@ -54,27 +66,27 @@ def _upperbound(m,j): # Discretize using Finite Difference and Collocation discretizer = TransformationFactory('dae.finite_difference') discretizer2 = TransformationFactory('dae.collocation') -discretizer.apply_to(m,nfe=25,wrt=m.x,scheme='BACKWARD') -discretizer2.apply_to(m,nfe=20,ncp=3,wrt=m.t) +discretizer.apply_to(m, nfe=25, wrt=m.x, scheme='BACKWARD') +discretizer2.apply_to(m, nfe=20, ncp=3, wrt=m.t) # Discretize using Finite Difference Method # discretizer = TransformationFactory('dae.finite_difference') # discretizer.apply_to(m,nfe=25,wrt=m.x,scheme='BACKWARD') # discretizer.apply_to(m,nfe=20,wrt=m.t,scheme='BACKWARD') -solver=SolverFactory('ipopt') -results = solver.solve(m,tee=True) +solver = SolverFactory('ipopt') +results = solver.solve(m, tee=True) x = [] t = [] u = [] for i in sorted(m.x): - temp=[] + temp = [] tempx = [] for j in sorted(m.t): tempx.append(i) - temp.append(value(m.u[i,j])) + temp.append(value(m.u[i, j])) x.append(tempx) t.append(sorted(m.t)) u.append(temp) @@ -83,9 +95,10 @@ def _upperbound(m,j): import numpy import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D + fig = plt.figure() -ax = fig.add_subplot(1,1,1,projection='3d') +ax = fig.add_subplot(1, 1, 1, projection='3d') ax.set_xlabel('Distance x') ax.set_ylabel('Time t') -p = ax.plot_wireframe(x,t,u,rstride=1,cstride=1) +p = ax.plot_wireframe(x, t, u, rstride=1, cstride=1) fig.show() diff --git a/examples/dae/Parameter_Estimation.py b/examples/dae/Parameter_Estimation.py index 5cb4e756644..7ee2f112b94 100644 --- a/examples/dae/Parameter_Estimation.py +++ b/examples/dae/Parameter_Estimation.py @@ -12,52 +12,64 @@ # Sample Problem 2: Parameter Estimation # (Ex 5 from Dynopt Guide) # -# min sum((X1(ti)-X1_meas(ti))^2) -# s.t. X1_dot = X2 X1(0) = p1 -# X2_dot = 1-2*X2-X1 X2(0) = p2 -# -1.5 <= p1,p2 <= 1.5 -# tf = 6 +# min sum((X1(ti)-X1_meas(ti))^2) +# s.t. X1_dot = X2 X1(0) = p1 +# X2_dot = 1-2*X2-X1 X2(0) = p2 +# -1.5 <= p1,p2 <= 1.5 +# tf = 6 # from pyomo.environ import * from pyomo.dae import * model = AbstractModel() -model.t = ContinuousSet() -model.MEAS_t = Set(within=model.t) # Measurement times, must be subset of t +model.t = ContinuousSet() +model.MEAS_t = Set(within=model.t) # Measurement times, must be subset of t model.x1_meas = Param(model.MEAS_t) model.x1 = Var(model.t) model.x2 = Var(model.t) -model.p1 = Var(bounds=(-1.5,1.5)) -model.p2 = Var(bounds=(-1.5,1.5)) +model.p1 = Var(bounds=(-1.5, 1.5)) +model.p2 = Var(bounds=(-1.5, 1.5)) -model.x1dot = DerivativeVar(model.x1,wrt=model.t) +model.x1dot = DerivativeVar(model.x1, wrt=model.t) model.x2dot = DerivativeVar(model.x2) + def _init_conditions(model): - yield model.x1[0] == model.p1 - yield model.x2[0] == model.p2 + yield model.x1[0] == model.p1 + yield model.x2[0] == model.p2 + + model.init_conditions = ConstraintList(rule=_init_conditions) # Alternate way to declare initial conditions -#def _initx1(model): -# return model.x1[0] == model.p1 -#model.initx1 = Constraint(rule=_initx1) +# def _initx1(model): +# return model.x1[0] == model.p1 +# model.initx1 = Constraint(rule=_initx1) + +# def _initx2(model): +# return model.x2[0] == model.p2 +# model.initx2 = Constraint(rule=_initx2) + + +def _x1dot(model, i): + return model.x1dot[i] == model.x2[i] -#def _initx2(model): -# return model.x2[0] == model.p2 -#model.initx2 = Constraint(rule=_initx2) -def _x1dot(model,i): - return model.x1dot[i] == model.x2[i] model.x1dotcon = Constraint(model.t, rule=_x1dot) -def _x2dot(model,i): - return model.x2dot[i] == 1-2*model.x2[i]-model.x1[i] + +def _x2dot(model, i): + return model.x2dot[i] == 1 - 2 * model.x2[i] - model.x1[i] + + model.x2dotcon = Constraint(model.t, rule=_x2dot) + def _obj(model): - return sum((model.x1[i]-model.x1_meas[i])**2 for i in model.MEAS_t) + return sum((model.x1[i] - model.x1_meas[i]) ** 2 for i in model.MEAS_t) + + model.obj = Objective(rule=_obj) diff --git a/examples/dae/Path_Constraint.py b/examples/dae/Path_Constraint.py index f28994ce3c9..866b4b3b90a 100644 --- a/examples/dae/Path_Constraint.py +++ b/examples/dae/Path_Constraint.py @@ -25,7 +25,7 @@ m = ConcreteModel() m.tf = Param(initialize=1) -m.t = ContinuousSet(bounds=(0,m.tf)) +m.t = ContinuousSet(bounds=(0, m.tf)) m.u = Var(m.t, initialize=0) m.x1 = Var(m.t) @@ -38,32 +38,47 @@ m.obj = Objective(expr=m.x3[m.tf]) + def _x1dot(m, t): if t == 0: return Constraint.Skip return m.dx1[t] == m.x2[t] + + m.x1dotcon = Constraint(m.t, rule=_x1dot) + def _x2dot(m, t): if t == 0: return Constraint.Skip - return m.dx2[t] == -m.x2[t]+m.u[t] + return m.dx2[t] == -m.x2[t] + m.u[t] + + m.x2dotcon = Constraint(m.t, rule=_x2dot) + def _x3dot(m, t): if t == 0: return Constraint.Skip - return m.dx3[t] == m.x1[t]**2+m.x2[t]**2+0.005*m.u[t]**2 + return m.dx3[t] == m.x1[t] ** 2 + m.x2[t] ** 2 + 0.005 * m.u[t] ** 2 + + m.x3dotcon = Constraint(m.t, rule=_x3dot) + def _con(m, t): - return m.x2[t]-8*(t-0.5)**2+0.5 <= 0 + return m.x2[t] - 8 * (t - 0.5) ** 2 + 0.5 <= 0 + + m.con = Constraint(m.t, rule=_con) + def _init(m): yield m.x1[0] == 0 yield m.x2[0] == -1 yield m.x3[0] == 0 + + m.init_conditions = ConstraintList(rule=_init) diff --git a/examples/dae/ReactionKinetics.py b/examples/dae/ReactionKinetics.py index 8d1fb6055d0..ef760820c4b 100644 --- a/examples/dae/ReactionKinetics.py +++ b/examples/dae/ReactionKinetics.py @@ -27,15 +27,17 @@ colloc = TransformationFactory('dae.collocation') solver = SolverFactory('ipopt') + class Reaction(object): - """ A simple class to hold the stoichiometry of a single reaction + """A simple class to hold the stoichiometry of a single reaction Reaction data is stored in two dictionaries: reactants: a map of reactant species name -> stoichiometric coefficient products: a map of product species name -> stoichiometric coefficient """ + def __init__(self, name, reactants, products=None): - """ Define a reaction. The reaction can be specified either as + """Define a reaction. The reaction can be specified either as a text string: Reaction("2*A + B -> C + 3*D") @@ -66,13 +68,13 @@ def _parse(self, _in): _in = _in.split('+') for x in _in: coef, species = self._parseTerm(x) - ans[species] = ans.get(species,0) + coef + ans[species] = ans.get(species, 0) + coef return ans - + def _parseTerm(self, x): if isinstance(x, str): if '*' in x: - coef, species = x.split('*',1) + coef, species = x.split('*', 1) coef = float(coef) else: coef, species = 1, x @@ -81,36 +83,43 @@ def _parseTerm(self, x): coef = float(coef) return coef, species.strip() + class ReactionNetwork(object): - """ A simple object to hold sets of reactions. """ + """A simple object to hold sets of reactions.""" + def __init__(self): self.reactions = {} def add(self, rxn): - """ Add a single reaction to the reaction network. """ + """Add a single reaction to the reaction network.""" if rxn.name in self.reactions: - raise RuntimeError("Duplicate reaction %s:\n\told=%s\n\tnew=%s" % - rxn.name, self.reactions[rxn.name], rxn) + raise RuntimeError( + "Duplicate reaction %s:\n\told=%s\n\tnew=%s" % rxn.name, + self.reactions[rxn.name], + rxn, + ) self.reactions[rxn.name] = rxn def add_reversible(self, rxn): - """ Add a pair of reactions to the reaction network. + """Add a pair of reactions to the reaction network. This model implements reversible reactions through an explicit pair of forward and reverse reactions. """ self.add(rxn) - tmp = Reaction( name= rxn.name+'_r', - reactants= [(b,a) for a,b in rxn.products.items()], - products= [(b,a) for a,b in rxn.reactants.items()] ) + tmp = Reaction( + name=rxn.name + '_r', + reactants=[(b, a) for a, b in rxn.products.items()], + products=[(b, a) for a, b in rxn.reactants.items()], + ) self.add(tmp) def species(self): """Return the set of all species appearing int he Reaction Network""" ans = set() for rxn in self.reactions.values(): - ans.update( rxn.reactants ) - ans.update( rxn.products ) + ans.update(rxn.reactants) + ans.update(rxn.products) return sorted(ans) @@ -122,40 +131,40 @@ def create_kinetic_model(rxnNet, time): model.rxnNetwork = rxnNet - model.SPECIES = Set( initialize=rxnNet.species() ) - model.REACTIONS = Set( initialize=rxnNet.reactions.keys() ) + model.SPECIES = Set(initialize=rxnNet.species()) + model.REACTIONS = Set(initialize=rxnNet.reactions.keys()) try: maxTime = max(time) times = time except TypeError: maxTime = time times = [time] - model.TIME = ContinuousSet( bounds=(0,maxTime), initialize=times ) + model.TIME = ContinuousSet(bounds=(0, maxTime), initialize=times) - model.c = Var( model.TIME, model.SPECIES, bounds=(0,None) ) - model.dcdt = DerivativeVar( model.c, wrt=model.TIME ) + model.c = Var(model.TIME, model.SPECIES, bounds=(0, None)) + model.dcdt = DerivativeVar(model.c, wrt=model.TIME) - model.k = Var( model.REACTIONS, bounds=(0,None) ) - model.rate = Var( model.TIME, model.REACTIONS ) + model.k = Var(model.REACTIONS, bounds=(0, None)) + model.rate = Var(model.TIME, model.REACTIONS) def reaction_rate(m, t, r): rhs = m.k[r] for s, coef in m.rxnNetwork.reactions[r].reactants.items(): - rhs *= m.c[t,s]**coef - return m.rate[t,r] == rhs - model.reaction_rate = Constraint( model.TIME, model.REACTIONS, - rule=reaction_rate ) + rhs *= m.c[t, s] ** coef + return m.rate[t, r] == rhs + + model.reaction_rate = Constraint(model.TIME, model.REACTIONS, rule=reaction_rate) def stoichiometry(m, t, s): rhs = 0 for r in m.REACTIONS: if s in m.rxnNetwork.reactions[r].reactants: - rhs -= m.rate[t,r] * m.rxnNetwork.reactions[r].reactants[s] + rhs -= m.rate[t, r] * m.rxnNetwork.reactions[r].reactants[s] if s in m.rxnNetwork.reactions[r].products: - rhs += m.rate[t,r] * m.rxnNetwork.reactions[r].products[s] - return m.dcdt[t,s] == rhs - model.stoichiometry = Constraint( model.TIME, model.SPECIES, - rule=stoichiometry ) + rhs += m.rate[t, r] * m.rxnNetwork.reactions[r].products[s] + return m.dcdt[t, s] == rhs + + model.stoichiometry = Constraint(model.TIME, model.SPECIES, rule=stoichiometry) return model @@ -172,20 +181,20 @@ def simple_simulation_model(): """Run a simple simulation model for 2*A -> B -> C.""" rxns = ReactionNetwork() - rxns.add( Reaction("AtoB", "2*A -> B") ) - rxns.add( Reaction("BtoC", "B -> C") ) + rxns.add(Reaction("AtoB", "2*A -> B")) + rxns.add(Reaction("BtoC", "B -> C")) - model = create_kinetic_model(rxns, 60*60) + model = create_kinetic_model(rxns, 60 * 60) - A1 = 1.32e19 # L / mol*s - A2 = 1.09e13 # 1/s + A1 = 1.32e19 # L / mol*s + A2 = 1.09e13 # 1/s Ea1 = 140000 # J/mol Ea2 = 100000 # J/mol - R = 8.314 # J / K*mol - T = 330 # K + R = 8.314 # J / K*mol + T = 330 # K - model.k['AtoB'].fix( A1 * exp( -Ea1 / (R*T) ) ) - model.k['BtoC'].fix( A2 * exp( -Ea2 / (R*T) ) ) + model.k['AtoB'].fix(A1 * exp(-Ea1 / (R * T))) + model.k['BtoC'].fix(A2 * exp(-Ea2 / (R * T))) model.c[0, 'A'].fix(1) model.c[0, 'B'].fix(0) @@ -198,12 +207,16 @@ def simple_simulation_model(): if plt is not None: _tmp = sorted(model.c.items()) for _i, _x in enumerate('ABC'): - plt.plot([x[0][0] for x in _tmp if x[0][1] == _x], - [value(x[1]) for x in _tmp if x[0][1] == _x], - 'bgr'[_i]+'*', label=_x) + plt.plot( + [x[0][0] for x in _tmp if x[0][1] == _x], + [value(x[1]) for x in _tmp if x[0][1] == _x], + 'bgr'[_i] + '*', + label=_x, + ) plt.legend() plt.show() + # # This example is based on # @@ -217,21 +230,22 @@ def simple_optimization_model(): concentration of the intermediate, "B".""" rxns = ReactionNetwork() - rxns.add( Reaction("AtoB", "2*A -> B") ) - rxns.add( Reaction("BtoC", "B -> C") ) + rxns.add(Reaction("AtoB", "2*A -> B")) + rxns.add(Reaction("BtoC", "B -> C")) - model = create_kinetic_model(rxns, 60*60) + model = create_kinetic_model(rxns, 60 * 60) - A1 = 1.32e19 # L / mol*s - A2 = 1.09e13 # 1/s + A1 = 1.32e19 # L / mol*s + A2 = 1.09e13 # 1/s Ea1 = 140000 # J/mol Ea2 = 100000 # J/mol - R = 8.314 # J / K*mol - model.T = Var(bounds=(0,None), initialize=330) # K + R = 8.314 # J / K*mol + model.T = Var(bounds=(0, None), initialize=330) # K def compute_k(m): - yield m.k['AtoB'] == A1 * exp( -Ea1 / (R*m.T) ) - yield m.k['BtoC'] == A2 * exp( -Ea2 / (R*m.T) ) + yield m.k['AtoB'] == A1 * exp(-Ea1 / (R * m.T)) + yield m.k['BtoC'] == A2 * exp(-Ea2 / (R * m.T)) + model.compute_k = ConstraintList(rule=compute_k) # initial conditions @@ -241,24 +255,27 @@ def compute_k(m): fdiff.apply_to(model, nfe=100) - model.obj = Objective( sense=maximize, - expr=model.c[max(model.TIME), 'B']) + model.obj = Objective(sense=maximize, expr=model.c[max(model.TIME), 'B']) results = solver.solve(model, tee=True) if plt is not None: for _i, _x in enumerate('ABC'): - plt.plot([x.index()[0] for x in model.c[:,_x]], - [value(x) for x in model.c[:,_x]], - 'bgr'[_i]+'*', label=_x) + plt.plot( + [x.index()[0] for x in model.c[:, _x]], + [value(x) for x in model.c[:, _x]], + 'bgr'[_i] + '*', + label=_x, + ) plt.legend() plt.show() + def create_regression_model(b, t): rxns = ReactionNetwork() - rxns.add_reversible( Reaction( "k_1", "TG + MeOH -> DG + FAME" ) ) - rxns.add_reversible( Reaction( "k_2", "DG + MeOH -> MG + FAME" ) ) - rxns.add_reversible( Reaction( "k_3", "MG + MeOH -> Glycerol + FAME" ) ) + rxns.add_reversible(Reaction("k_1", "TG + MeOH -> DG + FAME")) + rxns.add_reversible(Reaction("k_2", "DG + MeOH -> MG + FAME")) + rxns.add_reversible(Reaction("k_3", "MG + MeOH -> Glycerol + FAME")) data = b.model().data[t] key = b.model().key @@ -266,17 +283,22 @@ def create_regression_model(b, t): model = create_kinetic_model(rxns, data.keys()) model.T = Param(initialize=t) - model.error = Var(bounds=(0,None)) + model.error = Var(bounds=(0, None)) model.compute_error = Constraint( - expr = model.error == sum( - (( model.c[t,key[i]] - x ) / max(data[_t][i] for _t in data) )**2 - for t in data for i,x in enumerate(data[t]) ) ) + expr=model.error + == sum( + ((model.c[t, key[i]] - x) / max(data[_t][i] for _t in data)) ** 2 + for t in data + for i, x in enumerate(data[t]) + ) + ) return model + def regression_model(): - """ Develop a simple parameter estimation model to identify either + """Develop a simple parameter estimation model to identify either rate coedfficients (if regress_Ea is False), or the activation energy (if regress_Ea is True).""" @@ -286,53 +308,56 @@ def regression_model(): # model = ConcreteModel() - model.key = key = ('MeOH','TG','DG','MG','FAME','Glycerol') + model.key = key = ('MeOH', 'TG', 'DG', 'MG', 'FAME', 'Glycerol') model.data = data = { 150: { - 0: (2.833,6.84E-02,0.00,0.00,0.00,0.00,), - 256: (2.807,4.75E-02,1.51E-02,3.71E-03,2.60E-02,8.18E-04,), - 613: (2.795,3.92E-02,1.98E-02,5.83E-03,3.83E-02,1.60E-03,), - 1228: (2.772,2.95E-02,2.83E-02,9.78E-03,6.07E-02,2.30E-03,), - 1433: (2.762,2.40E-02,3.13E-02,1.49E-02,7.08E-02,4.48E-03,), - 1633: (2.747,1.74E-02,2.02E-02,2.16E-02,8.57E-02,6.23E-03,), - 1933: (2.715,1.03E-02,9.10E-03,2.83E-02,1.18E-01,6.97E-03,), - 2623: (2.699,7.49E-03,7.87E-03,2.34E-02,1.34E-01,9.83E-03,), - 3028: (2.676,3.04E-03,6.56E-03,1.58E-02,1.57E-01,1.68E-02,), - 9000: (2.639,0.00,0.00,0.00,1.94E-01,6.06E-02,), }, + 0: (2.833, 6.84e-02, 0.00, 0.00, 0.00, 0.00), + 256: (2.807, 4.75e-02, 1.51e-02, 3.71e-03, 2.60e-02, 8.18e-04), + 613: (2.795, 3.92e-02, 1.98e-02, 5.83e-03, 3.83e-02, 1.60e-03), + 1228: (2.772, 2.95e-02, 2.83e-02, 9.78e-03, 6.07e-02, 2.30e-03), + 1433: (2.762, 2.40e-02, 3.13e-02, 1.49e-02, 7.08e-02, 4.48e-03), + 1633: (2.747, 1.74e-02, 2.02e-02, 2.16e-02, 8.57e-02, 6.23e-03), + 1933: (2.715, 1.03e-02, 9.10e-03, 2.83e-02, 1.18e-01, 6.97e-03), + 2623: (2.699, 7.49e-03, 7.87e-03, 2.34e-02, 1.34e-01, 9.83e-03), + 3028: (2.676, 3.04e-03, 6.56e-03, 1.58e-02, 1.57e-01, 1.68e-02), + 9000: (2.639, 0.00, 0.00, 0.00, 1.94e-01, 6.06e-02), + }, 210: { - 0: (2.835,6.78E-02,0.00,0.00,0.00,0.00,), - 130:(2.806,3.56E-02,1.96E-02,1.92E-02,3.35E-02,0.00,), - 160:(2.755,3.42E-02,1.49E-02,2.54E-02,4.17E-02,0.00,), - 190:(2.735,2.92E-02,1.38E-02,2.83E-02,5.67E-02,0.00,), - 220:(2.715,2.20E-02,1.40E-02,2.80E-02,7.97E-02,4.37E-03,), - 250:(2.698,1.70E-02,7.89E-03,3.12E-02,1.05E-01,1.24E-02,), - 280:(2.675,1.29E-02,5.45E-03,2.78E-02,1.28E-01,2.23E-02,), - 340:(2.659,7.02E-03,5.90E-03,1.56E-02,1.58E-01,3.99E-02,), - 400:(2.648,3.65E-03,5.13E-03,7.92E-03,1.75E-01,5.17E-02,), - 460:(2.641,2.66E-03,5.04E-03,4.64E-03,1.79E-01,5.61E-02,), - 520:(2.637,1.49E-03,3.57E-03,2.48E-03,1.86E-01,6.09E-02,), - 580:(2.633,3.35E-04,4.96E-04,1.84E-03,1.95E-01,6.58E-02,), - 640:(2.632,2.49E-04,2.40E-04,1.44E-03,1.98E-01,6.65E-02,), - 700:(2.630,2.31E-04,2.90E-05,1.28E-03,2.00E-01,6.69E-02,), - 760:(2.630,0.00,0.00,7.61E-04,2.02E-01,6.77E-02,), } - } - - model.experiment = Block( data.keys(), rule=create_regression_model ) - - model.obj = Objective( sense=minimize, - expr=sum(b.error for b in model.experiment[:]) ) - _experiments = list( model.experiment.values() ) + 0: (2.835, 6.78e-02, 0.00, 0.00, 0.00, 0.00), + 130: (2.806, 3.56e-02, 1.96e-02, 1.92e-02, 3.35e-02, 0.00), + 160: (2.755, 3.42e-02, 1.49e-02, 2.54e-02, 4.17e-02, 0.00), + 190: (2.735, 2.92e-02, 1.38e-02, 2.83e-02, 5.67e-02, 0.00), + 220: (2.715, 2.20e-02, 1.40e-02, 2.80e-02, 7.97e-02, 4.37e-03), + 250: (2.698, 1.70e-02, 7.89e-03, 3.12e-02, 1.05e-01, 1.24e-02), + 280: (2.675, 1.29e-02, 5.45e-03, 2.78e-02, 1.28e-01, 2.23e-02), + 340: (2.659, 7.02e-03, 5.90e-03, 1.56e-02, 1.58e-01, 3.99e-02), + 400: (2.648, 3.65e-03, 5.13e-03, 7.92e-03, 1.75e-01, 5.17e-02), + 460: (2.641, 2.66e-03, 5.04e-03, 4.64e-03, 1.79e-01, 5.61e-02), + 520: (2.637, 1.49e-03, 3.57e-03, 2.48e-03, 1.86e-01, 6.09e-02), + 580: (2.633, 3.35e-04, 4.96e-04, 1.84e-03, 1.95e-01, 6.58e-02), + 640: (2.632, 2.49e-04, 2.40e-04, 1.44e-03, 1.98e-01, 6.65e-02), + 700: (2.630, 2.31e-04, 2.90e-05, 1.28e-03, 2.00e-01, 6.69e-02), + 760: (2.630, 0.00, 0.00, 7.61e-04, 2.02e-01, 6.77e-02), + }, + } + + model.experiment = Block(data.keys(), rule=create_regression_model) + + model.obj = Objective( + sense=minimize, expr=sum(b.error for b in model.experiment[:]) + ) + _experiments = list(model.experiment.values()) # initializations from the paper for _e in _experiments: - _e.k['k_1'] = 7.58e-7 + _e.k['k_1'] = 7.58e-7 _e.k['k_1_r'] = 0 - _e.k['k_2'] = 2.20e-7 + _e.k['k_2'] = 2.20e-7 _e.k['k_2_r'] = 0 - _e.k['k_3'] = 2.15e-7 + _e.k['k_3'] = 2.15e-7 _e.k['k_3_r'] = 0 - #fdiff.apply_to(model, nfe=100) + # fdiff.apply_to(model, nfe=100) colloc.apply_to(model, nfe=100, ncp=3) # Note that the two experiments are not linked at this point, so @@ -345,17 +370,19 @@ def regression_model(): # independent regression as the starting point) regress_Ea = True if regress_Ea: - model.Kset = Set(initialize=['k_1','k_2','k_3',]) - model.Ea = Var(model.Kset, bounds=(0,None), initialize=0) - model.A = Var(model.Kset, bounds=(0,None), initialize=0) + model.Kset = Set(initialize=['k_1', 'k_2', 'k_3']) + model.Ea = Var(model.Kset, bounds=(0, None), initialize=0) + model.A = Var(model.Kset, bounds=(0, None), initialize=0) model.R = Param(initialize=8.314) for _e in _experiments: _e.k.fix() + def compute_k(e, _k): m = e.model() # k11' == k_mt + k_11 * (C_DG + C_MG) / C_TG_0 - #return e.k[_k] == m.A[_k] * exp( -m.Ea[_k] / ( m.R * e.T ) ) - return log(e.k[_k]) == log(m.A[_k]) - m.Ea[_k] / ( m.R * e.T ) + # return e.k[_k] == m.A[_k] * exp( -m.Ea[_k] / ( m.R * e.T ) ) + return log(e.k[_k]) == log(m.A[_k]) - m.Ea[_k] / (m.R * e.T) + _e.compute_k = Constraint(model.Kset, rule=compute_k) solver.solve(model, tee=True) @@ -372,23 +399,31 @@ def compute_k(e, _k): ax2 = plt.twinx() for _i, _x in enumerate(key): _ax = ax2 if _x == 'MeOH' else ax - _ax.plot( [ t for t in data[T].keys() ], - [ data[T][t][_i] for t in data[T].keys() ], - 'mkrgbc'[_i]+'x' ) + _ax.plot( + [t for t in data[T].keys()], + [data[T][t][_i] for t in data[T].keys()], + 'mkrgbc'[_i] + 'x', + ) for _i, _x in enumerate(key): _ax = ax2 if _x == 'MeOH' else ax - _ax.plot([ x.index()[0] for x in model.experiment[T].c[:,_x] ], - [ value(x) for x in model.experiment[T].c[:,_x] ], - 'mkrgbc'[_i]+'-') + _ax.plot( + [x.index()[0] for x in model.experiment[T].c[:, _x]], + [value(x) for x in model.experiment[T].c[:, _x]], + 'mkrgbc'[_i] + '-', + ) plt.show() - + + if __name__ == "__main__": import sys + if len(sys.argv) != 2 or sys.argv[1] not in '123': - print("""ERROR: expected a model to run: + print( + """ERROR: expected a model to run: 1 - simple simulation model 2 - simple (final value) optimization model - 3 - kinetic parameter regression model""") + 3 - kinetic parameter regression model""" + ) sys.exit(1) if '1' in sys.argv[1]: diff --git a/examples/dae/car_example.py b/examples/dae/car_example.py index 82143669a46..a157159cf6c 100644 --- a/examples/dae/car_example.py +++ b/examples/dae/car_example.py @@ -16,15 +16,15 @@ m = ConcreteModel() -m.R = Param(initialize=0.001) # Friction factor -m.L = Param(initialize=100.0) # Final position +m.R = Param(initialize=0.001) # Friction factor +m.L = Param(initialize=100.0) # Final position -m.tau = ContinuousSet(bounds=(0,1)) # Unscaled time -m.time = Var(m.tau) # Scaled time +m.tau = ContinuousSet(bounds=(0, 1)) # Unscaled time +m.time = Var(m.tau) # Scaled time m.tf = Var() -m.x = Var(m.tau,bounds=(0,m.L+50)) -m.v = Var(m.tau,bounds=(0,None)) -m.a = Var(m.tau, bounds=(-3.0,1.0),initialize=0) +m.x = Var(m.tau, bounds=(0, m.L + 50)) +m.v = Var(m.tau, bounds=(0, None)) +m.a = Var(m.tau, bounds=(-3.0, 1.0), initialize=0) m.dtime = DerivativeVar(m.time) m.dx = DerivativeVar(m.x) @@ -32,65 +32,77 @@ m.obj = Objective(expr=m.tf) -def _ode1(m,i): - if i == 0 : + +def _ode1(m, i): + if i == 0: return Constraint.Skip return m.dx[i] == m.tf * m.v[i] + + m.ode1 = Constraint(m.tau, rule=_ode1) -def _ode2(m,i): - if i == 0 : + +def _ode2(m, i): + if i == 0: return Constraint.Skip - return m.dv[i] == m.tf*(m.a[i] - m.R*m.v[i]**2) + return m.dv[i] == m.tf * (m.a[i] - m.R * m.v[i] ** 2) + + m.ode2 = Constraint(m.tau, rule=_ode2) -def _ode3(m,i): + +def _ode3(m, i): if i == 0: return Constraint.Skip return m.dtime[i] == m.tf + + m.ode3 = Constraint(m.tau, rule=_ode3) + def _init(m): yield m.x[0] == 0 yield m.x[1] == m.L yield m.v[0] == 0 yield m.v[1] == 0 yield m.time[0] == 0 + + m.initcon = ConstraintList(rule=_init) discretizer = TransformationFactory('dae.finite_difference') -discretizer.apply_to(m,nfe=15,scheme='BACKWARD') +discretizer.apply_to(m, nfe=15, scheme='BACKWARD') solver = SolverFactory('ipopt') -solver.solve(m,tee=True) +solver.solve(m, tee=True) -print("final time = %6.2f" %(value(m.tf))) +print("final time = %6.2f" % (value(m.tf))) x = [] v = [] a = [] -time=[] +time = [] for i in m.tau: time.append(value(m.time[i])) x.append(value(m.x[i])) v.append(value(m.v[i])) a.append(value(m.a[i])) - + import matplotlib.pyplot as plt plt.subplot(131) -plt.plot(time,x,label='x') +plt.plot(time, x, label='x') plt.title('location') plt.xlabel('time') plt.subplot(132) -plt.plot(time,v,label='v') +plt.plot(time, v, label='v') plt.xlabel('time') plt.title('velocity') plt.subplot(133) -plt.plot(time,a,label='a') +plt.plot(time, a, label='a') plt.xlabel('time') plt.title('acceleration') diff --git a/examples/dae/disease_DAE.py b/examples/dae/disease_DAE.py index 143b8bfabc2..59e598aa504 100644 --- a/examples/dae/disease_DAE.py +++ b/examples/dae/disease_DAE.py @@ -9,42 +9,44 @@ years = 20 beta_py = 26 fepr = 1 -fepy = beta_py*fepr +fepy = beta_py * fepr fe = fepy * years -step = 365.0/fepy +step = 365.0 / fepy model = AbstractModel() # Define unindexed parameters -model.P_GAMMA = Param(default=1.0/14.0) +model.P_GAMMA = Param(default=1.0 / 14.0) model.P_NUM_BETA = Param(default=beta_py) model.P_FEPY = Param(default=fepy) model.P_FE = Param(default=fe) model.P_STEP = Param(default=step) -model.P_TRI = Param(default=beta_py*years) +model.P_TRI = Param(default=beta_py * years) model.P_FEPR = Param(default=fepr) model.I_OBJ_WT = Param(default=0.995) model.PHI_OBJ_WT = Param(default=0.005) # Define sets -model.S_BETA = RangeSet(1,model.P_NUM_BETA) -model.S_FE = RangeSet(1,model.P_FE) +model.S_BETA = RangeSet(1, model.P_NUM_BETA) +model.S_FE = RangeSet(1, model.P_FE) def _TIME_init(model): - return (model.P_STEP*i for i in model.S_FE) -model.TIME = ContinuousSet(initialize=_TIME_init, bounds=(0,None)) + return (model.P_STEP * i for i in model.S_FE) -model.S_TRI = RangeSet(1,model.P_TRI) + +model.TIME = ContinuousSet(initialize=_TIME_init, bounds=(0, None)) + +model.S_TRI = RangeSet(1, model.P_TRI) # Define indexed parameters beta_ndx = {} if (beta_py > 26) or (fepr > 1): - for i in range(1,fe+1): - beta_ndx[i] = (((i+1)/fepr)-1)%beta_py+1 + for i in range(1, fe + 1): + beta_ndx[i] = (((i + 1) / fepr) - 1) % beta_py + 1 else: - for i in range(1,fe+1): - beta_ndx[i] = ((i-1)%beta_py)+1 + for i in range(1, fe + 1): + beta_ndx[i] = ((i - 1) % beta_py) + 1 model.P_BETA_NDX = Param(model.S_FE, initialize=beta_ndx, default=1.0) model.P_POP = Param(default=1.0e6) @@ -52,7 +54,7 @@ def _TIME_init(model): model.P_REP_CASES = Param(model.S_TRI, default=10.0) model.P_BIRTHS = Param(model.S_FE, default=100.0) model.P_DATA_WTS = Param(model.S_TRI, default=1.0) -model.P_ALL_CASES = Param(model.S_TRI, default = 10.0) +model.P_ALL_CASES = Param(model.S_TRI, default=10.0) # Define initialization parameters and rules model.init_S_bar = Param(default=1.0e5) @@ -72,106 +74,149 @@ def _TIME_init(model): model.init_Idot = Param(model.S_FE, default=1.0) model.init_phidot = Param(model.S_FE, default=1.0) -model.init_beta_patt = Param(model.S_BETA, default = 0.0) -model.init_beta_int = Param(default = 1.0) +model.init_beta_patt = Param(model.S_BETA, default=0.0) +model.init_beta_int = Param(default=1.0) + + def _init_S_bar(model): return model.init_S_bar + + def _init_beta_bar(model): return model.init_beta_bar + + def _init_I_init(model): return model.init_I_init + + def _init_S_init(model): return model.init_S_init -def _init_beta(model,i): + + +def _init_beta(model, i): return model.init_beta[i] -def _init_beta_pos(model,i): + + +def _init_beta_pos(model, i): return model.init_beta_pos[i] -def _init_beta_neg(model,i): + + +def _init_beta_neg(model, i): return model.init_beta_neg[i] -def _init_eps_I(model,i): + + +def _init_eps_I(model, i): return model.init_eps_I[i] -def _init_eps_phi(model,i): + + +def _init_eps_phi(model, i): return model.init_eps_phi[i] -def _init_S(model,i): - if i==0: + +def _init_S(model, i): + if i == 0: return model.init_S_init fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) return model.init_S[j] -def _init_I(model,i): - if i==0: + + +def _init_I(model, i): + if i == 0: return model.init_I_init fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) return model.init_I[j] -def _init_phi(model,i): - if i==0: + + +def _init_phi(model, i): + if i == 0: return 0 fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) return model.init_phi[j] -def _init_Sdot(model,i): - if i==0: + + +def _init_Sdot(model, i): + if i == 0: return 1 fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) - return model.P_STEP*model.init_Sdot[j] -def _init_Idot(model,i): - if i==0: + return model.P_STEP * model.init_Sdot[j] + + +def _init_Idot(model, i): + if i == 0: return 1 fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) - return model.P_STEP*model.init_Idot[j] -def _init_phidot(model,i): - if i==0: + return model.P_STEP * model.init_Idot[j] + + +def _init_phidot(model, i): + if i == 0: return 1 fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) - return model.P_STEP*model.init_phidot[j] + return model.P_STEP * model.init_phidot[j] -def _init_beta_patt(model,i): + +def _init_beta_patt(model, i): return model.init_beta_patt[i] + + def _init_beta_int(model): return model.init_beta_int -def _people_bounds(model,i): + +def _people_bounds(model, i): return (0.0, model.P_POP) + + def _init_people_bounds(model): return (0.0, model.P_POP) + # Define unindexed variables -model.S_bar = Var(initialize=_init_S_bar, bounds=(0,None)) -model.beta_bar = Var(initialize=_init_beta_bar, bounds=(0.05,5)) +model.S_bar = Var(initialize=_init_S_bar, bounds=(0, None)) +model.beta_bar = Var(initialize=_init_beta_bar, bounds=(0.05, 5)) model.I_init = Var(initialize=_init_I_init, bounds=_init_people_bounds) model.S_init = Var(initialize=_init_S_init, bounds=_init_people_bounds) model.phi_init = Param(default=0.0) # Define indexed variables -model.beta = Var(model.S_BETA, initialize=_init_beta, bounds=(0.01,5)) +model.beta = Var(model.S_BETA, initialize=_init_beta, bounds=(0.01, 5)) -model.beta_pos = Var(model.S_BETA, initialize=_init_beta_pos, bounds=(0,None)) -model.beta_neg = Var(model.S_BETA, initialize=_init_beta_neg, bounds=(0,None)) -model.beta_patt = Var(model.S_BETA, initialize=_init_beta_patt, bounds=(-5,5)) -model.beta_int = Var(initialize = _init_beta_int, bounds=(0.01,5.0)) -model.beta_c = Var(initialize = 1.0) -model.alpha = Var(initialize = 0.05, bounds=(-1.0,1.0)) +model.beta_pos = Var(model.S_BETA, initialize=_init_beta_pos, bounds=(0, None)) +model.beta_neg = Var(model.S_BETA, initialize=_init_beta_neg, bounds=(0, None)) +model.beta_patt = Var(model.S_BETA, initialize=_init_beta_patt, bounds=(-5, 5)) +model.beta_int = Var(initialize=_init_beta_int, bounds=(0.01, 5.0)) +model.beta_c = Var(initialize=1.0) +model.alpha = Var(initialize=0.05, bounds=(-1.0, 1.0)) model.eps_I = Var(model.S_FE, initialize=_init_eps_I) model.eps_phi = Var(model.S_TRI, initialize=_init_eps_phi) model.S = Var(model.TIME, initialize=_init_S, bounds=_people_bounds) model.I = Var(model.TIME, initialize=_init_I, bounds=_people_bounds) -model.phi = Var(model.TIME, initialize=_init_phi, bounds=(0,None)) +model.phi = Var(model.TIME, initialize=_init_phi, bounds=(0, None)) model.Sdot = DerivativeVar(model.S, initialize=_init_Sdot) model.Idot = DerivativeVar(model.I, initialize=_init_Idot) -model.phidot = DerivativeVar(model.phi, initialize=_init_phidot, bounds=(-10,None)) +model.phidot = DerivativeVar(model.phi, initialize=_init_phidot, bounds=(-10, None)) + def _obj_rule(model): - return (model.I_OBJ_WT*sum(model.eps_I[i]**2 for i in model.S_FE) + \ - model.PHI_OBJ_WT*sum(model.P_DATA_WTS[i]*model.eps_phi[i]**2 for i in model.S_TRI)) + return model.I_OBJ_WT * sum( + model.eps_I[i] ** 2 for i in model.S_FE + ) + model.PHI_OBJ_WT * sum( + model.P_DATA_WTS[i] * model.eps_phi[i] ** 2 for i in model.S_TRI + ) + + model.obj = Objective(rule=_obj_rule) + ######################## # Initial Conditions ######################## @@ -179,57 +224,101 @@ def _init_conditions(model): yield model.I[0] == model.I_init yield model.S[0] == model.S_init yield model.phi[0] == model.phi_init + + model.init_conditions = ConstraintList(rule=_init_conditions) -def _reported_cases(model,i): + +def _reported_cases(model, i): if i == 1: if model.P_DATA_WTS[i] > 0.1: - return model.P_REP_CASES[i]== model.P_REP_FRAC[i]*( model.phi[model.TIME._fe[i*model.P_FEPR]] - model.phi_init ) + model.eps_phi[i] + return ( + model.P_REP_CASES[i] + == model.P_REP_FRAC[i] + * (model.phi[model.TIME._fe[i * model.P_FEPR]] - model.phi_init) + + model.eps_phi[i] + ) else: return Constraint.Skip else: if model.P_DATA_WTS[i] > 0.1: - return model.P_REP_CASES[i]== model.P_REP_FRAC[i]*( model.phi[model.TIME._fe[i*model.P_FEPR]] - model.phi[model.TIME._fe[(i-1)*model.P_FEPR]] ) + model.eps_phi[i] + return ( + model.P_REP_CASES[i] + == model.P_REP_FRAC[i] + * ( + model.phi[model.TIME._fe[i * model.P_FEPR]] + - model.phi[model.TIME._fe[(i - 1) * model.P_FEPR]] + ) + + model.eps_phi[i] + ) else: return Constraint.Skip + + model.con_reported_cases = Constraint(model.S_TRI, rule=_reported_cases) + def _beta_bar(model): - return (model.beta_bar, sum(model.beta[i] for i in model.S_BETA)/len(model.S_BETA)) + return ( + model.beta_bar, + sum(model.beta[i] for i in model.S_BETA) / len(model.S_BETA), + ) + + model.con_beta_bar = Constraint(rule=_beta_bar) -def _phidot_eq(model,i): + +def _phidot_eq(model, i): if i == 0: return Constraint.Skip fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) - return model.phidot[i] == model.eps_I[j] \ - + (model.beta[model.P_BETA_NDX[j]]*model.I[i]*model.S[i])/model.P_POP + return ( + model.phidot[i] + == model.eps_I[j] + + (model.beta[model.P_BETA_NDX[j]] * model.I[i] * model.S[i]) / model.P_POP + ) + + model.phidot_eq = Constraint(model.TIME, rule=_phidot_eq) -def _Idot_eq(model,i): + +def _Idot_eq(model, i): if i == 0: return Constraint.Skip - return model.Idot[i] == model.phidot[i] - model.P_GAMMA*model.I[i] + return model.Idot[i] == model.phidot[i] - model.P_GAMMA * model.I[i] + + model.Idot_eq = Constraint(model.TIME, rule=_Idot_eq) -def _Sdot_eq(model,i): + +def _Sdot_eq(model, i): if i == 0: return Constraint.Skip fe = model.TIME.get_upper_element_boundary(i) j = model.TIME._fe.index(fe) return model.Sdot[i] == -model.phidot[i] + model.P_BIRTHS[j] + + model.Sdot_eq = Constraint(model.TIME, rule=_Sdot_eq) + def _scaled_beta(model, i): - return (model.beta[i], model.beta_c * model.beta_patt[i]) + return (model.beta[i], model.beta_c * model.beta_patt[i]) + + model.con_city_varying_beta = Constraint(model.S_BETA, rule=_scaled_beta) + def _mean_patt(model): - return (1.0, sum_product(model.beta_patt)/len(model.S_BETA)) + return (1.0, sum_product(model.beta_patt) / len(model.S_BETA)) + + model.con_mean_patt = Constraint(rule=_mean_patt) + def _beta_c(model): return (0.75, model.beta_c, 1.5) + + model.con_beta_c = Constraint(rule=_beta_c) - diff --git a/examples/dae/distill_DAE.py b/examples/dae/distill_DAE.py index f882eb87288..cdfd543f9a8 100644 --- a/examples/dae/distill_DAE.py +++ b/examples/dae/distill_DAE.py @@ -12,85 +12,120 @@ from pyomo.environ import * from pyomo.dae import * -model = AbstractModel() +model = AbstractModel() -model.Feed = Param(initialize = 24.0/60.0) -model.x_Feed = Param(initialize = 0.5) -model.D = Param(initialize = model.x_Feed*model.Feed) -model.vol = Param(initialize = 1.6) -model.atray = Param(initialize = 0.25) -model.acond = Param(initialize = 0.5) -model.areb = Param(initialize = 1.0) +model.Feed = Param(initialize=24.0 / 60.0) +model.x_Feed = Param(initialize=0.5) +model.D = Param(initialize=model.x_Feed * model.Feed) +model.vol = Param(initialize=1.6) +model.atray = Param(initialize=0.25) +model.acond = Param(initialize=0.5) +model.areb = Param(initialize=1.0) model.S_TRAYS = Set(dimen=1) -model.S_RECTIFICATION = Set(within = model.S_TRAYS) -model.S_STRIPPING = Set(within = model.S_TRAYS) +model.S_RECTIFICATION = Set(within=model.S_TRAYS) +model.S_STRIPPING = Set(within=model.S_TRAYS) model.x0 = Param(model.S_TRAYS) -model.t = ContinuousSet(initialize=range(1,52)) +model.t = ContinuousSet(initialize=range(1, 52)) # Alternatively you could simply specify bounds on the # ContinuousSet and let the finite element points be generated # automatically. # model.t = ContinuousSet(bounds=(1,51)) model.y = Var(model.S_TRAYS, model.t) -def x_init_rule(m,n,ti): + + +def x_init_rule(m, n, ti): return value(m.x0[n]) + + model.x = Var(model.S_TRAYS, model.t, initialize=x_init_rule) model.dx = DerivativeVar(model.x) -model.rr = Var(model.t,initialize=3.0) -model.L = Var(model.t,initialize=0.6) -model.V = Var(model.t,initialize=0.8) -model.FL = Var(model.t,initialize=1) -model.u1 = Var(model.t,initialize=3.0, bounds=(1,5)) +model.rr = Var(model.t, initialize=3.0) +model.L = Var(model.t, initialize=0.6) +model.V = Var(model.t, initialize=0.8) +model.FL = Var(model.t, initialize=1) +model.u1 = Var(model.t, initialize=3.0, bounds=(1, 5)) + +model.alpha = Param(initialize=1000) +model.rho = Param(initialize=1) +model.u1_ref = Param(initialize=2.0) +model.y1_ref = Param(initialize=0.895814) -model.alpha = Param(initialize = 1000) -model.rho = Param(initialize = 1) -model.u1_ref = Param(initialize = 2.0) -model.y1_ref = Param(initialize = 0.895814) -### +### # Model constraints -### -def reflux_ratio_rule(m,t): +### +def reflux_ratio_rule(m, t): return m.rr[t] == m.u1[t] + + model.reflux_ratio = Constraint(model.t, rule=reflux_ratio_rule) -def flowrate_rectificaiton_rule(m,t): - return m.L[t] == m.rr[t]*m.D + +def flowrate_rectificaiton_rule(m, t): + return m.L[t] == m.rr[t] * m.D + + model.flowrate_rectificaiton = Constraint(model.t, rule=flowrate_rectificaiton_rule) -def vapor_column_rule(m,t): - return m.V[t] == m.L[t]+m.D + +def vapor_column_rule(m, t): + return m.V[t] == m.L[t] + m.D + + model.vapor_column = Constraint(model.t, rule=vapor_column_rule) -def flowrate_stripping_rule(m,t): + +def flowrate_stripping_rule(m, t): return m.FL[t] == m.Feed + m.L[t] + + model.flowrate_stripping = Constraint(model.t, rule=flowrate_stripping_rule) -def mole_frac_balance_rule(m,n,t): - return m.y[n,t] == m.x[n,t]*m.vol/(1+((m.vol-1)*m.x[n,t])) -model.mole_frac_balance = Constraint(model.S_TRAYS, model.t, rule=mole_frac_balance_rule) -def _diffeq(m,n,t): - +def mole_frac_balance_rule(m, n, t): + return m.y[n, t] == m.x[n, t] * m.vol / (1 + ((m.vol - 1) * m.x[n, t])) + + +model.mole_frac_balance = Constraint( + model.S_TRAYS, model.t, rule=mole_frac_balance_rule +) + + +def _diffeq(m, n, t): if t == 1: return Constraint.Skip if n == 1: - return m.dx[n,t] == 1/m.acond*m.V[t]*(m.y[n+1,t]-m.x[n,t]) + return m.dx[n, t] == 1 / m.acond * m.V[t] * (m.y[n + 1, t] - m.x[n, t]) elif n in m.S_RECTIFICATION: - return m.dx[n,t] == 1/m.atray*(m.L[t]*(m.x[n-1,t]-m.x[n,t])-m.V[t]*(m.y[n,t]-m.y[n+1,t])) + return m.dx[n, t] == 1 / m.atray * ( + m.L[t] * (m.x[n - 1, t] - m.x[n, t]) - m.V[t] * (m.y[n, t] - m.y[n + 1, t]) + ) elif n == 17: - return m.dx[n,t] == 1/m.atray*(m.Feed*m.x_Feed+m.L[t]*m.x[n-1,t]-m.FL[t]*m.x[n,t]-m.V[t]*(m.y[n,t]-m.y[n+1,t])) + return m.dx[n, t] == 1 / m.atray * ( + m.Feed * m.x_Feed + + m.L[t] * m.x[n - 1, t] + - m.FL[t] * m.x[n, t] + - m.V[t] * (m.y[n, t] - m.y[n + 1, t]) + ) elif n in m.S_STRIPPING: - return m.dx[n,t] == 1/m.atray*(m.FL[t]*(m.x[n-1,t]-m.x[n,t])-m.V[t]*(m.y[n,t]-m.y[n+1,t])) - else : - return m.dx[n,t] == 1/m.areb*(m.FL[t]*m.x[n-1,t]-(m.Feed-m.D)*m.x[n,t]-m.V[t]*m.y[n,t]) + return m.dx[n, t] == 1 / m.atray * ( + m.FL[t] * (m.x[n - 1, t] - m.x[n, t]) - m.V[t] * (m.y[n, t] - m.y[n + 1, t]) + ) + else: + return m.dx[n, t] == 1 / m.areb * ( + m.FL[t] * m.x[n - 1, t] - (m.Feed - m.D) * m.x[n, t] - m.V[t] * m.y[n, t] + ) + + model.diffeq = Constraint(model.S_TRAYS, model.t, rule=_diffeq) -def _init_rule(m,n): - return m.x[n,1] == m.x0[n] -model.init_rule = Constraint(model.S_TRAYS, rule=_init_rule) - +def _init_rule(m, n): + return m.x[n, 1] == m.x0[n] + + +model.init_rule = Constraint(model.S_TRAYS, rule=_init_rule) diff --git a/examples/dae/dynamic_scheduling.py b/examples/dae/dynamic_scheduling.py index 571a1fbbd9b..13cabeb5bcf 100644 --- a/examples/dae/dynamic_scheduling.py +++ b/examples/dae/dynamic_scheduling.py @@ -8,7 +8,7 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# +# # This is a toy example for scheduling a sequence of reactions taking # place in a single reactor. It combines the Pyomo DAE and GDP # packages and includes modeling concepts from the DAE car example and @@ -20,77 +20,101 @@ m = ConcreteModel() -m.products = Set(initialize=['A','B']) -m.AthenB = Set(initialize=[0,1]) +m.products = Set(initialize=['A', 'B']) +m.AthenB = Set(initialize=[0, 1]) -m.tau = ContinuousSet(bounds=[0,1]) # Unscaled Time +m.tau = ContinuousSet(bounds=[0, 1]) # Unscaled Time -m.k = Param(m.products, initialize={'A':1, 'B':5}) # Reaction Rates +m.k = Param(m.products, initialize={'A': 1, 'B': 5}) # Reaction Rates # Cost of having 'A' or 'B' in the final product stream -m.cost = Param(m.products, initialize={'A':15000, 'B':20000}) +m.cost = Param(m.products, initialize={'A': 15000, 'B': 20000}) -m.tstart = Var(m.products,bounds=(0,None)) # Start Time -m.tproc = Var(m.products,bounds=(0,None)) # Processing Time -m.time = Var(m.products, m.tau, bounds=(0,None)) # Scaled time over each job -m.totaltime = Var() # Total job time +m.tstart = Var(m.products, bounds=(0, None)) # Start Time +m.tproc = Var(m.products, bounds=(0, None)) # Processing Time +m.time = Var(m.products, m.tau, bounds=(0, None)) # Scaled time over each job +m.totaltime = Var() # Total job time -m.c = Var(m.products, m.tau, bounds=(0,None)) +m.c = Var(m.products, m.tau, bounds=(0, None)) m.dc = DerivativeVar(m.c, wrt=m.tau) m.dtime = DerivativeVar(m.time, wrt=m.tau) # Initial concentrations -m.c['A',0].fix(4) -m.c['B',0].fix(3) +m.c['A', 0].fix(4) +m.c['B', 0].fix(3) + # Reaction kinetics -def _diffeq(m,p,t): - return m.dc[p,t] == -m.tproc[p]*m.k[p]*m.c[p,t] +def _diffeq(m, p, t): + return m.dc[p, t] == -m.tproc[p] * m.k[p] * m.c[p, t] + + m.diffeq = Constraint(m.products, m.tau, rule=_diffeq) # Initial time -m.time['A',0].fix(0) -m.time['B',0].fix(0) +m.time['A', 0].fix(0) +m.time['B', 0].fix(0) -# Bound on the final concentration of reactants + +# Bound on the final concentration of reactants def _finalc(m, p): - return m.c[p,1] <= 0.001 + return m.c[p, 1] <= 0.001 + + m.finalc = Constraint(m.products, rule=_finalc) + # Scaled time -def _diffeqtime(m,p,t): - return m.dtime[p,t] == m.tproc[p] +def _diffeqtime(m, p, t): + return m.dtime[p, t] == m.tproc[p] + + m.diffeqtime = Constraint(m.products, m.tau, rule=_diffeqtime) + # No clash disjuncts def _noclash(disjunct, AthenB): model = disjunct.model() if AthenB: - e = model.tstart['A']+model.tproc['A'] <= model.tstart['B'] + e = model.tstart['A'] + model.tproc['A'] <= model.tstart['B'] disjunct.c = Constraint(expr=e) else: - e = model.tstart['B']+model.tproc['B'] <= model.tstart['A'] + e = model.tstart['B'] + model.tproc['B'] <= model.tstart['A'] disjunct.c = Constraint(expr=e) + + m.noclash = Disjunct(m.AthenB, rule=_noclash) + # Define the disjunctions: either job I occurs before K or K before I def _disj(model): return [model.noclash[AthenB] for AthenB in model.AthenB] + + m.disj = Disjunction(rule=_disj) + # Due Time def _duetime(m): - return m.tstart['B']+m.tproc['B'] <= 2.0 + return m.tstart['B'] + m.tproc['B'] <= 2.0 + + m.duetime = Constraint(rule=_duetime) + # Feasibility def _feas(m, p): - return m.totaltime >= m.tstart[p]+m.tproc[p] + return m.totaltime >= m.tstart[p] + m.tproc[p] + + m.feas = Constraint(m.products, rule=_feas) + # Objective def _obj(m): - return m.totaltime + sum(m.cost[p]*m.c[p,1] for p in m.products) + return m.totaltime + sum(m.cost[p] * m.c[p, 1] for p in m.products) + + m.obj = Objective(rule=_obj) # Discretize model @@ -103,19 +127,19 @@ def _obj(m): # Solve the model solver = SolverFactory('couenne') -solver.solve(m,tee=True) +solver.solve(m, tee=True) # Plot the results import matplotlib.pyplot as plt -timeA = [value(m.time['A',i])+value(m.tstart['A']) for i in m.tau] -timeB = [value(m.time['B',i])+value(m.tstart['B']) for i in m.tau] +timeA = [value(m.time['A', i]) + value(m.tstart['A']) for i in m.tau] +timeB = [value(m.time['B', i]) + value(m.tstart['B']) for i in m.tau] -concA = [value(m.c['A',i]) for i in m.tau] -concB = [value(m.c['B',i]) for i in m.tau] +concA = [value(m.c['A', i]) for i in m.tau] +concB = [value(m.c['B', i]) for i in m.tau] -plt.plot(timeA,concA,'r',label='Reactant A') -plt.plot(timeB,concB,'b',label='Reactant B') +plt.plot(timeA, concA, 'r', label='Reactant A') +plt.plot(timeB, concB, 'b', label='Reactant B') plt.legend(loc='best') plt.xlabel('Time') plt.ylabel('Concentration in Reactor') diff --git a/examples/dae/laplace_BVP.py b/examples/dae/laplace_BVP.py index 255a65df350..6b2e2841575 100644 --- a/examples/dae/laplace_BVP.py +++ b/examples/dae/laplace_BVP.py @@ -13,70 +13,88 @@ from pyomo.dae import * m = ConcreteModel() -m.x = ContinuousSet(bounds=(0,1)) -m.y = ContinuousSet(bounds=(0,1)) -m.u = Var(m.x,m.y) +m.x = ContinuousSet(bounds=(0, 1)) +m.y = ContinuousSet(bounds=(0, 1)) +m.u = Var(m.x, m.y) -m.dudx = DerivativeVar(m.u,wrt=(m.x,m.x)) -m.dudy = DerivativeVar(m.u,wrt=(m.y,m.y)) +m.dudx = DerivativeVar(m.u, wrt=(m.x, m.x)) +m.dudy = DerivativeVar(m.u, wrt=(m.y, m.y)) -def _lowerY(m,i): + +def _lowerY(m, i): if i == 0 or i == 1: return Constraint.Skip - return m.u[i,0] == 1 -m.lowerY = Constraint(m.x,rule=_lowerY) + return m.u[i, 0] == 1 + + +m.lowerY = Constraint(m.x, rule=_lowerY) -def _upperY(m,i): + +def _upperY(m, i): if i == 0 or i == 1: return Constraint.Skip - return m.u[i,1] == 2 -m.upperY = Constraint(m.x,rule=_upperY) + return m.u[i, 1] == 2 + -def _lowerX(m,j): +m.upperY = Constraint(m.x, rule=_upperY) + + +def _lowerX(m, j): if j == 0 or j == 1: return Constraint.Skip - return m.u[0,j] == 1 -m.lowerX = Constraint(m.y,rule=_lowerX) + return m.u[0, j] == 1 + + +m.lowerX = Constraint(m.y, rule=_lowerX) + -def _upperX(m,j): +def _upperX(m, j): if j == 0 or j == 1: return Constraint.Skip - return m.u[1,j] == 2 -m.upperX = Constraint(m.y,rule=_upperX) + return m.u[1, j] == 2 -def _laplace(m,i,j): + +m.upperX = Constraint(m.y, rule=_upperX) + + +def _laplace(m, i, j): if i == 0 or i == 1: return Constraint.Skip if j == 0 or j == 1: return Constraint.Skip - return m.dudx[i,j] + m.dudy[i,j] == 0 -m.laplace = Constraint(m.x,m.y,rule=_laplace) + return m.dudx[i, j] + m.dudy[i, j] == 0 + + +m.laplace = Constraint(m.x, m.y, rule=_laplace) + def _dummy(m): return 1.0 + + m.obj = Objective(rule=_dummy) discretizer = TransformationFactory('dae.finite_difference') -discretizer.apply_to(m,nfe=20,wrt=m.y,scheme='FORWARD') -discretizer.apply_to(m,nfe=20,wrt=m.x,scheme='CENTRAL') +discretizer.apply_to(m, nfe=20, wrt=m.y, scheme='FORWARD') +discretizer.apply_to(m, nfe=20, wrt=m.x, scheme='CENTRAL') -solver=SolverFactory('ipopt') +solver = SolverFactory('ipopt') -results = solver.solve(m,tee=True) +results = solver.solve(m, tee=True) -#disc.u.pprint() +# disc.u.pprint() x = [] y = [] u = [] for i in sorted(m.x): - temp=[] + temp = [] tempx = [] for j in sorted(m.y): tempx.append(i) - temp.append(value(m.u[i,j])) + temp.append(value(m.u[i, j])) x.append(tempx) y.append(sorted(m.y)) u.append(temp) @@ -85,7 +103,8 @@ def _dummy(m): import numpy import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D + fig = plt.figure() -ax = fig.add_subplot(1,1,1,projection='3d') -p = ax.plot_wireframe(x,y,u,rstride=1,cstride=1) +ax = fig.add_subplot(1, 1, 1, projection='3d') +p = ax.plot_wireframe(x, y, u, rstride=1, cstride=1) fig.show() diff --git a/examples/dae/run_Optimal_Control.py b/examples/dae/run_Optimal_Control.py index 7c767da2235..2523bd8c607 100644 --- a/examples/dae/run_Optimal_Control.py +++ b/examples/dae/run_Optimal_Control.py @@ -19,17 +19,17 @@ # Discretize model using Orthogonal Collocation discretizer = TransformationFactory('dae.collocation') -discretizer.apply_to(m,nfe=20,ncp=3,scheme='LAGRANGE-RADAU') -discretizer.reduce_collocation_points(m,var=m.u,ncp=1,contset=m.t) +discretizer.apply_to(m, nfe=20, ncp=3, scheme='LAGRANGE-RADAU') +discretizer.reduce_collocation_points(m, var=m.u, ncp=1, contset=m.t) -solver=SolverFactory('ipopt') +solver = SolverFactory('ipopt') -results = solver.solve(m,tee=True) +results = solver.solve(m, tee=True) x1 = [] x2 = [] u = [] -t=[] +t = [] print(sorted(m.t)) @@ -41,9 +41,9 @@ import matplotlib.pyplot as plt -plt.plot(t,x1) -plt.plot(t,x2) +plt.plot(t, x1) +plt.plot(t, x2) plt.show() -plt.plot(t,u) +plt.plot(t, u) plt.show() diff --git a/examples/dae/run_Parameter_Estimation.py b/examples/dae/run_Parameter_Estimation.py index 2e850eb5c9d..a319000cb59 100644 --- a/examples/dae/run_Parameter_Estimation.py +++ b/examples/dae/run_Parameter_Estimation.py @@ -22,16 +22,16 @@ # Discretize model using Orthogonal Collocation discretizer = TransformationFactory('dae.collocation') -discretizer.apply_to(instance,nfe=8,ncp=5) +discretizer.apply_to(instance, nfe=8, ncp=5) -solver=SolverFactory('ipopt') +solver = SolverFactory('ipopt') -results = solver.solve(instance,tee=True) +results = solver.solve(instance, tee=True) x1 = [] x1_meas = [] -t=[] -t_meas=[] +t = [] +t_meas = [] print(sorted(instance.t)) @@ -42,11 +42,11 @@ for i in sorted(instance.t): t.append(i) x1.append(value(instance.x1[i])) - + import matplotlib.pyplot as plt -plt.plot(t,x1) -plt.plot(t_meas,x1_meas,'o') +plt.plot(t, x1) +plt.plot(t_meas, x1_meas, 'o') plt.xlabel('t') plt.ylabel('x') plt.title('Dynamic Parameter Estimation Using Collocation') diff --git a/examples/dae/run_Path_Constraint.py b/examples/dae/run_Path_Constraint.py index 4da6e59b006..17a576a57d8 100644 --- a/examples/dae/run_Path_Constraint.py +++ b/examples/dae/run_Path_Constraint.py @@ -19,22 +19,28 @@ # Discretize model using Orthogonal Collocation discretizer = TransformationFactory('dae.collocation') -discretizer.apply_to(m,nfe=7,ncp=6,scheme='LAGRANGE-RADAU') -discretizer.reduce_collocation_points(m,var=m.u,ncp=1,contset=m.t) +discretizer.apply_to(m, nfe=7, ncp=6, scheme='LAGRANGE-RADAU') +discretizer.reduce_collocation_points(m, var=m.u, ncp=1, contset=m.t) results = SolverFactory('ipopt').solve(m, tee=True) + def plotter(subplot, x, *series, **kwds): plt.subplot(subplot) - for i,y in enumerate(series): - plt.plot(list(x), [value(y[t]) for t in x], - 'brgcmk'[i%6]+kwds.get('points','')) - plt.title(kwds.get('title','')) - plt.legend(tuple(y.name for y in series), frameon=True, edgecolor='k').draw_frame(True) + for i, y in enumerate(series): + plt.plot( + list(x), [value(y[t]) for t in x], 'brgcmk'[i % 6] + kwds.get('points', '') + ) + plt.title(kwds.get('title', '')) + plt.legend(tuple(y.name for y in series), frameon=True, edgecolor='k').draw_frame( + True + ) plt.xlabel(x.name) - plt.gca().set_xlim([0,1]) + plt.gca().set_xlim([0, 1]) + import matplotlib.pyplot as plt + plotter(121, m.t, m.x1, m.x2, m.x3, title='Differential Variables') plotter(122, m.t, m.u, title='Control Variables', points='o-') plt.show() diff --git a/examples/dae/run_disease.py b/examples/dae/run_disease.py index 9f6f7811eb0..139046d434e 100644 --- a/examples/dae/run_disease.py +++ b/examples/dae/run_disease.py @@ -5,12 +5,16 @@ instance = model.create_instance('disease.dat') discretizer = TransformationFactory('dae.collocation') -discretizer.apply_to(instance,nfe=520,ncp=3) +discretizer.apply_to(instance, nfe=520, ncp=3) + def _S_bar(model): - return model.S_bar == sum(model.S[i] for i in model.TIME if i != 0)/(len(model.TIME)-1) -instance.con_S_bar = Constraint(rule=_S_bar) + return model.S_bar == sum(model.S[i] for i in model.TIME if i != 0) / ( + len(model.TIME) - 1 + ) -solver=SolverFactory('ipopt') -results = solver.solve(instance,tee=True) +instance.con_S_bar = Constraint(rule=_S_bar) + +solver = SolverFactory('ipopt') +results = solver.solve(instance, tee=True) diff --git a/examples/dae/run_distill.py b/examples/dae/run_distill.py index df449277fa0..d9ececf34fc 100644 --- a/examples/dae/run_distill.py +++ b/examples/dae/run_distill.py @@ -17,7 +17,7 @@ # Discretize using Finite Difference Approach discretizer = TransformationFactory('dae.finite_difference') -discretizer.apply_to(instance,nfe=50,scheme='BACKWARD') +discretizer.apply_to(instance, nfe=50, scheme='BACKWARD') # Discretize using Orthogonal Collocation # discretizer = TransformationFactory('dae.collocation') @@ -30,27 +30,32 @@ # discretized to ensure that we include all the discretization points # when we take the sum. + def obj_rule(m): - return m.alpha*sum((m.y[1,i] - m.y1_ref)**2 for i in m.t if i != 1) + m.rho*sum((m.u1[i] - m.u1_ref)**2 for i in m.t if i!=1) -instance.OBJ = Objective(rule=obj_rule) + return m.alpha * sum( + (m.y[1, i] - m.y1_ref) ** 2 for i in m.t if i != 1 + ) + m.rho * sum((m.u1[i] - m.u1_ref) ** 2 for i in m.t if i != 1) + + +instance.OBJ = Objective(rule=obj_rule) -solver=SolverFactory('ipopt') +solver = SolverFactory('ipopt') -results = solver.solve(instance,tee=True) +results = solver.solve(instance, tee=True) # If you have matplotlib you can use the following code to plot the # results -t = [] -x5 = [] +t = [] +x5 = [] x20 = [] -for i in sorted(instance.t): - x5.append(value(instance.x[5,i])) - x20.append(value(instance.x[20,i])) +for i in sorted(instance.t): + x5.append(value(instance.x[5, i])) + x20.append(value(instance.x[20, i])) t.append(i) import matplotlib.pyplot as plt -plt.plot(t,x5) -plt.plot(t,x20) +plt.plot(t, x5) +plt.plot(t, x20) plt.show() diff --git a/examples/dae/run_stochpdegas_automatic.py b/examples/dae/run_stochpdegas_automatic.py index 7df16b0e844..dd710588406 100644 --- a/examples/dae/run_stochpdegas_automatic.py +++ b/examples/dae/run_stochpdegas_automatic.py @@ -9,65 +9,126 @@ # discretize model discretizer = TransformationFactory('dae.finite_difference') -discretizer.apply_to(instance,nfe=1,wrt=instance.DIS,scheme='FORWARD') -discretizer.apply_to(instance,nfe=47,wrt=instance.TIME,scheme='BACKWARD') +discretizer.apply_to(instance, nfe=1, wrt=instance.DIS, scheme='FORWARD') +discretizer.apply_to(instance, nfe=47, wrt=instance.TIME, scheme='BACKWARD') # What it should be to match description in paper -#discretizer.apply_to(instance,nfe=48,wrt=instance.TIME,scheme='BACKWARD') +# discretizer.apply_to(instance,nfe=48,wrt=instance.TIME,scheme='BACKWARD') TimeStep = instance.TIME.at(2) - instance.TIME.at(1) -def supcost_rule(m,k): - return sum(m.cs*m.s[k,j,t]*(TimeStep) for j in m.SUP for t in m.TIME.get_finite_elements()) -instance.supcost = Expression(instance.SCEN,rule=supcost_rule) -def boostcost_rule(m,k): - return sum(m.ce*m.pow[k,j,t]*(TimeStep) for j in m.LINK_A for t in m.TIME.get_finite_elements()) -instance.boostcost = Expression(instance.SCEN,rule=boostcost_rule) +def supcost_rule(m, k): + return sum( + m.cs * m.s[k, j, t] * (TimeStep) + for j in m.SUP + for t in m.TIME.get_finite_elements() + ) -def trackcost_rule(m,k): - return sum(m.cd*(m.dem[k,j,t]-m.stochd[k,j,t])**2.0 for j in m.DEM for t in m.TIME.get_finite_elements()) -instance.trackcost = Expression(instance.SCEN,rule=trackcost_rule) -def sspcost_rule(m,k): - return sum(m.cT*(m.px[k,i,m.TIME.last(),j]-m.px[k,i,m.TIME.first(),j])**2.0 for i in m.LINK for j in m.DIS) -instance.sspcost = Expression(instance.SCEN,rule=sspcost_rule) +instance.supcost = Expression(instance.SCEN, rule=supcost_rule) -def ssfcost_rule(m,k): - return sum(m.cT*(m.fx[k,i,m.TIME.last(),j]-m.fx[k,i,m.TIME.first(),j])**2.0 for i in m.LINK for j in m.DIS) -instance.ssfcost = Expression(instance.SCEN,rule=ssfcost_rule) -def cost_rule(m,k): - return 1e-6*(m.supcost[k] + m.boostcost[k] + m.trackcost[k] + m.sspcost[k] + m.ssfcost[k]) -instance.cost = Expression(instance.SCEN,rule=cost_rule) +def boostcost_rule(m, k): + return sum( + m.ce * m.pow[k, j, t] * (TimeStep) + for j in m.LINK_A + for t in m.TIME.get_finite_elements() + ) + + +instance.boostcost = Expression(instance.SCEN, rule=boostcost_rule) + + +def trackcost_rule(m, k): + return sum( + m.cd * (m.dem[k, j, t] - m.stochd[k, j, t]) ** 2.0 + for j in m.DEM + for t in m.TIME.get_finite_elements() + ) + + +instance.trackcost = Expression(instance.SCEN, rule=trackcost_rule) + + +def sspcost_rule(m, k): + return sum( + m.cT * (m.px[k, i, m.TIME.last(), j] - m.px[k, i, m.TIME.first(), j]) ** 2.0 + for i in m.LINK + for j in m.DIS + ) + + +instance.sspcost = Expression(instance.SCEN, rule=sspcost_rule) + + +def ssfcost_rule(m, k): + return sum( + m.cT * (m.fx[k, i, m.TIME.last(), j] - m.fx[k, i, m.TIME.first(), j]) ** 2.0 + for i in m.LINK + for j in m.DIS + ) + + +instance.ssfcost = Expression(instance.SCEN, rule=ssfcost_rule) + + +def cost_rule(m, k): + return 1e-6 * ( + m.supcost[k] + m.boostcost[k] + m.trackcost[k] + m.sspcost[k] + m.ssfcost[k] + ) + + +instance.cost = Expression(instance.SCEN, rule=cost_rule) + def mcost_rule(m): - return (1.0/m.S)*sum(m.cost[k] for k in m.SCEN) + return (1.0 / m.S) * sum(m.cost[k] for k in m.SCEN) + + instance.mcost = Expression(rule=mcost_rule) -def eqcvar_rule(m,k): - return m.cost[k] - m.nu <= m.phi[k]; -instance.eqcvar = Constraint(instance.SCEN,rule=eqcvar_rule) + +def eqcvar_rule(m, k): + return m.cost[k] - m.nu <= m.phi[k] + + +instance.eqcvar = Constraint(instance.SCEN, rule=eqcvar_rule) + def obj_rule(m): - return (1.0-m.cvar_lambda)*m.mcost + m.cvar_lambda*m.cvarcost + return (1.0 - m.cvar_lambda) * m.mcost + m.cvar_lambda * m.cvarcost + + instance.obj = Objective(rule=obj_rule) -endTime = time.time()-start +endTime = time.time() - start print('model creation time = %s' % (endTime,)) for i in instance.SCEN: - print("Scenario %s = %s" % ( - i, sum(sum(0.5*value(instance.pow[i,j,k]) - for j in instance.LINK_A) - for k in instance.TIME.get_finite_elements()) )) + print( + "Scenario %s = %s" + % ( + i, + sum( + sum(0.5 * value(instance.pow[i, j, k]) for j in instance.LINK_A) + for k in instance.TIME.get_finite_elements() + ), + ) + ) -solver=SolverFactory('ipopt') -results = solver.solve(instance,tee=True) +solver = SolverFactory('ipopt') +results = solver.solve(instance, tee=True) for i in instance.SCEN: - print("Scenario %s = %s" % ( - i, sum(sum(0.5*value(instance.pow[i,j,k]) - for j in instance.LINK_A) - for k in instance.TIME.get_finite_elements()) )) + print( + "Scenario %s = %s" + % ( + i, + sum( + sum(0.5 * value(instance.pow[i, j, k]) for j in instance.LINK_A) + for k in instance.TIME.get_finite_elements() + ), + ) + ) diff --git a/examples/dae/simulator_dae_example.py b/examples/dae/simulator_dae_example.py index 8847ab35d50..ef6484be6c6 100644 --- a/examples/dae/simulator_dae_example.py +++ b/examples/dae/simulator_dae_example.py @@ -38,15 +38,17 @@ def create_model(): def _diffeq1(m, t): return m.dza[t] == -m.p1 * m.za[t] + m.p2 * m.zb[t] + m.diffeq1 = Constraint(m.t, rule=_diffeq1) def _diffeq2(m, t): - return m.dzb[t] == m.p1 * m.za[t] - \ - (m.p2 + m.p3) * m.zb[t] + m.p4 * m.zc[t] + return m.dzb[t] == m.p1 * m.za[t] - (m.p2 + m.p3) * m.zb[t] + m.p4 * m.zc[t] + m.diffeq2 = Constraint(m.t, rule=_diffeq2) def _algeq1(m, t): return m.za[t] + m.zb[t] + m.zc[t] == 1 + m.algeq1 = Constraint(m.t, rule=_algeq1) return m @@ -90,6 +92,7 @@ def plot_result(m, sim, tsim, profiles): plt.legend(loc='best') plt.show() + if __name__ == "__main__": model = create_model() sim, tsim, profiles = simulate_model(model) diff --git a/examples/dae/simulator_dae_multindex_example.py b/examples/dae/simulator_dae_multindex_example.py index 497cfc3650c..d1a97fec79f 100644 --- a/examples/dae/simulator_dae_multindex_example.py +++ b/examples/dae/simulator_dae_multindex_example.py @@ -28,6 +28,7 @@ def _p1_init(m, t): if t >= 0.5: return 1.0 return 4.0 + m.p1 = Param(m.t, initialize=4.0, default=_p1_init) m.p2 = Param(initialize=2.0) m.p3 = Param(initialize=40.0) @@ -50,15 +51,17 @@ def _p1_init(m, t): def _diffeq1(m, t): return m.dza[t] == -m.p1[t] * m.za[t] + m.p2 * m.zb[t] + m.diffeq1 = Constraint(m.t, rule=_diffeq1) def _diffeq2(m, t): - return m.dzb[t] == m.p1[t] * m.za[t] - \ - (m.p2 + m.p3) * m.zb[t] + m.p4 * m.zc[t] + return m.dzb[t] == m.p1[t] * m.za[t] - (m.p2 + m.p3) * m.zb[t] + m.p4 * m.zc[t] + m.diffeq2 = Constraint(m.t, rule=_diffeq2) def _algeq1(m, t): return m.za[t] + m.zb[t] + m.zc[t] == 1 + m.algeq1 = Constraint(m.t, rule=_algeq1) return m @@ -66,8 +69,9 @@ def _algeq1(m, t): def simulate_model(m): # Simulate the model using casadi sim = Simulator(m, package='casadi') - tsim, profiles = sim.simulate(numpoints=100, integrator='idas', - varying_inputs=m.var_input) + tsim, profiles = sim.simulate( + numpoints=100, integrator='idas', varying_inputs=m.var_input + ) # Discretize model using Orthogonal Collocation discretizer = TransformationFactory('dae.collocation') @@ -103,6 +107,7 @@ def plot_results(m, sim, tsim, profiles): plt.legend(loc='best') plt.show() + if __name__ == "__main__": model = create_model() sim, tsim, profiles = simulate_model(model) diff --git a/examples/dae/simulator_ode_example.py b/examples/dae/simulator_ode_example.py index b5f7c85fe0d..bf600cf163e 100644 --- a/examples/dae/simulator_ode_example.py +++ b/examples/dae/simulator_ode_example.py @@ -32,10 +32,12 @@ def create_model(): def _diffeq1(m, t): return m.domegadt[t] == -m.b * m.omega[t] - m.c * sin(m.theta[t]) + m.diffeq1 = Constraint(m.t, rule=_diffeq1) def _diffeq2(m, t): return m.dthetadt[t] == m.omega[t] + m.diffeq2 = Constraint(m.t, rule=_diffeq2) return m @@ -78,6 +80,7 @@ def plot_result(m, sim, tsim, profiles): plt.legend(loc='best') plt.show() + if __name__ == "__main__": model = create_model() sim, tsim, profiles = simulate_model(model) diff --git a/examples/dae/simulator_ode_multindex_example.py b/examples/dae/simulator_ode_multindex_example.py index fc42d276d76..fa2623f4cc2 100644 --- a/examples/dae/simulator_ode_multindex_example.py +++ b/examples/dae/simulator_ode_multindex_example.py @@ -15,7 +15,6 @@ def create_model(): - m = ConcreteModel() m.t = ContinuousSet(bounds=(0.0, 20.0)) @@ -24,12 +23,14 @@ def _b_default(m, t): if t >= 15: return 0.025 return 0.25 + m.b = Param(m.t, initialize=0.25, default=_b_default) def _c_default(m, t): if t >= 7: return 50 return 5 + m.c = Param(m.t, initialize=5.0, default=_c_default) m.omega = Var(m.t) @@ -43,12 +44,13 @@ def _c_default(m, t): m.theta[0] = 3.14 - 0.1 def _diffeq1(m, t): - return m.domegadt[t] == -m.b[t] * m.omega[t] - \ - m.c[t] * sin(m.theta[t]) + return m.domegadt[t] == -m.b[t] * m.omega[t] - m.c[t] * sin(m.theta[t]) + m.diffeq1 = Constraint(m.t, rule=_diffeq1) def _diffeq2(m, t): return m.dthetadt[t] == m.omega[t] + m.diffeq2 = Constraint(m.t, rule=_diffeq2) b_profile = {0: 0.25, 15: 0.025} @@ -62,19 +64,18 @@ def _diffeq2(m, t): def simulate_model(m): - if False: # Simulate the model using casadi sim = Simulator(m, package='casadi') - tsim, profiles = sim.simulate(numpoints=200, - integrator='cvodes', - varying_inputs=m.var_input) + tsim, profiles = sim.simulate( + numpoints=200, integrator='cvodes', varying_inputs=m.var_input + ) else: # Simulate the model using scipy sim = Simulator(m, package='scipy') - tsim, profiles = sim.simulate(numpoints=200, - integrator='vode', - varying_inputs=m.var_input) + tsim, profiles = sim.simulate( + numpoints=200, integrator='vode', varying_inputs=m.var_input + ) # Discretize model using Orthogonal Collocation discretizer = TransformationFactory('dae.collocation') @@ -102,6 +103,7 @@ def plot_result(m, sim, tsim, profiles): plt.legend(loc='best') plt.show() + if __name__ == "__main__": model = create_model() sim, tsim, profiles = simulate_model(model) diff --git a/examples/dae/stochpdegas_automatic.py b/examples/dae/stochpdegas_automatic.py index beb4099c453..3cd5c34f011 100644 --- a/examples/dae/stochpdegas_automatic.py +++ b/examples/dae/stochpdegas_automatic.py @@ -1,7 +1,7 @@ # stochastic pde model for natural gas network # victor m. zavala / 2013 -#from __future__ import division +# from __future__ import division from pyomo.environ import * from pyomo.dae import * @@ -10,134 +10,161 @@ # sets model.TF = Param(within=NonNegativeReals) + + def _tinit(m): - return [0.5,value(m.TF)] + return [0.5, value(m.TF)] # What it should be to match description in paper - #return [0,value(m.TF)] + # return [0,value(m.TF)] + + model.TIME = ContinuousSet(initialize=_tinit) -model.DIS = ContinuousSet(bounds=(0.0,1.0)) +model.DIS = ContinuousSet(bounds=(0.0, 1.0)) model.S = Param(within=PositiveIntegers) -model.SCEN = RangeSet(1,model.S) +model.SCEN = RangeSet(1, model.S) # links model.LINK = Set(dimen=1) model.lstartloc = Param(model.LINK, within=Any) model.lendloc = Param(model.LINK, within=Any) -model.ldiam = Param(model.LINK,within=PositiveReals,mutable=True) -model.llength = Param(model.LINK,within=PositiveReals,mutable=True) +model.ldiam = Param(model.LINK, within=PositiveReals, mutable=True) +model.llength = Param(model.LINK, within=PositiveReals, mutable=True) model.ltype = Param(model.LINK, within=Any) + def link_a_init_rule(m): return (l for l in m.LINK if m.ltype[l] == "a") + + model.LINK_A = Set(initialize=link_a_init_rule) + def link_p_init_rule(m): return (l for l in m.LINK if m.ltype[l] == "p") + + model.LINK_P = Set(initialize=link_p_init_rule) # nodes model.NODE = Set() -model.pmin = Param(model.NODE,within=PositiveReals,mutable=True) -model.pmax = Param(model.NODE,within=PositiveReals,mutable=True) +model.pmin = Param(model.NODE, within=PositiveReals, mutable=True) +model.pmax = Param(model.NODE, within=PositiveReals, mutable=True) # supply model.SUP = Set() model.sloc = Param(model.SUP, within=Any) -model.smin = Param(model.SUP,within=NonNegativeReals,mutable=True) -model.smax = Param(model.SUP,within=NonNegativeReals,mutable=True) -model.scost = Param(model.SUP,within=NonNegativeReals) +model.smin = Param(model.SUP, within=NonNegativeReals, mutable=True) +model.smax = Param(model.SUP, within=NonNegativeReals, mutable=True) +model.scost = Param(model.SUP, within=NonNegativeReals) # demand model.DEM = Set() model.dloc = Param(model.DEM, within=Any) -model.d = Param(model.DEM, within=PositiveReals,mutable=True) +model.d = Param(model.DEM, within=PositiveReals, mutable=True) # physical data -model.eps = Param(initialize=0.025,within=PositiveReals) -model.z = Param(initialize=0.80,within=PositiveReals) -model.rhon = Param(initialize=0.72,within=PositiveReals) -model.R = Param(initialize=8314.0,within=PositiveReals) -model.M = Param(initialize=18.0,within=PositiveReals) -model.pi = Param(initialize=3.14,within=PositiveReals) -model.nu2 = Param(within=PositiveReals,mutable=True) -model.lam = Param(model.LINK,within=PositiveReals,mutable=True) -model.A = Param(model.LINK,within=NonNegativeReals,mutable=True) -model.Tgas = Param(initialize=293.15,within=PositiveReals) -model.Cp = Param(initialize=2.34,within=PositiveReals) -model.Cv = Param(initialize=1.85,within=PositiveReals) -model.gam = Param(initialize=model.Cp/model.Cv, within=PositiveReals) -model.om = Param(initialize=(model.gam-1.0)/model.gam,within=PositiveReals) +model.eps = Param(initialize=0.025, within=PositiveReals) +model.z = Param(initialize=0.80, within=PositiveReals) +model.rhon = Param(initialize=0.72, within=PositiveReals) +model.R = Param(initialize=8314.0, within=PositiveReals) +model.M = Param(initialize=18.0, within=PositiveReals) +model.pi = Param(initialize=3.14, within=PositiveReals) +model.nu2 = Param(within=PositiveReals, mutable=True) +model.lam = Param(model.LINK, within=PositiveReals, mutable=True) +model.A = Param(model.LINK, within=NonNegativeReals, mutable=True) +model.Tgas = Param(initialize=293.15, within=PositiveReals) +model.Cp = Param(initialize=2.34, within=PositiveReals) +model.Cv = Param(initialize=1.85, within=PositiveReals) +model.gam = Param(initialize=model.Cp / model.Cv, within=PositiveReals) +model.om = Param(initialize=(model.gam - 1.0) / model.gam, within=PositiveReals) # scaling and constants -model.ffac = Param(within=PositiveReals,initialize=(1.0e+6*model.rhon)/(24.0*3600.0)) -model.ffac2 = Param(within=PositiveReals,initialize=(3600.0)/(1.0e+4*model.rhon)) -model.pfac = Param(within=PositiveReals,initialize=1.0e+5) -model.pfac2 = Param(within=PositiveReals,initialize=1.0e-5) -model.dfac = Param(within=PositiveReals,initialize=1.0e-3) -model.lfac = Param(within=PositiveReals,initialize=1.0e+3) - -model.c1 = Param(model.LINK,within=PositiveReals,mutable=True) -model.c2 = Param(model.LINK,within=PositiveReals,mutable=True) -model.c3 = Param(model.LINK,within=PositiveReals,mutable=True) -model.c4 = Param(within=PositiveReals,mutable=True) +model.ffac = Param( + within=PositiveReals, initialize=(1.0e6 * model.rhon) / (24.0 * 3600.0) +) +model.ffac2 = Param(within=PositiveReals, initialize=(3600.0) / (1.0e4 * model.rhon)) +model.pfac = Param(within=PositiveReals, initialize=1.0e5) +model.pfac2 = Param(within=PositiveReals, initialize=1.0e-5) +model.dfac = Param(within=PositiveReals, initialize=1.0e-3) +model.lfac = Param(within=PositiveReals, initialize=1.0e3) + +model.c1 = Param(model.LINK, within=PositiveReals, mutable=True) +model.c2 = Param(model.LINK, within=PositiveReals, mutable=True) +model.c3 = Param(model.LINK, within=PositiveReals, mutable=True) +model.c4 = Param(within=PositiveReals, mutable=True) # cost factors -model.ce = Param(initialize=0.1,within=NonNegativeReals) -model.cd = Param(initialize=1.0e+6,within=NonNegativeReals) -model.cT = Param(initialize=1.0e+6,within=NonNegativeReals) -model.cs = Param(initialize=0.0,within=NonNegativeReals) +model.ce = Param(initialize=0.1, within=NonNegativeReals) +model.cd = Param(initialize=1.0e6, within=NonNegativeReals) +model.cT = Param(initialize=1.0e6, within=NonNegativeReals) +model.cs = Param(initialize=0.0, within=NonNegativeReals) model.TDEC = Param(within=PositiveReals) # define stochastic info -model.rand_d = Param(model.SCEN,model.DEM,within=NonNegativeReals,mutable=True) +model.rand_d = Param(model.SCEN, model.DEM, within=NonNegativeReals, mutable=True) + # convert units for input data def rescale_rule(m): - for i in m.LINK: - m.ldiam[i] = m.ldiam[i]*m.dfac - m.llength[i] = m.llength[i]*m.lfac + m.ldiam[i] = m.ldiam[i] * m.dfac + m.llength[i] = m.llength[i] * m.lfac # m.dx[i] = m.llength[i]/float(m.DIS.last()) for i in m.SUP: - m.smin[i] = m.smin[i]*m.ffac*m.ffac2 # from scmx106/day to kg/s and then to scmx10-4/hr - m.smax[i] = m.smax[i]*m.ffac*m.ffac2 # from scmx106/day to kg/s and then to scmx10-4/hr + m.smin[i] = ( + m.smin[i] * m.ffac * m.ffac2 + ) # from scmx106/day to kg/s and then to scmx10-4/hr + m.smax[i] = ( + m.smax[i] * m.ffac * m.ffac2 + ) # from scmx106/day to kg/s and then to scmx10-4/hr for i in m.DEM: - m.d[i] = m.d[i]*m.ffac*m.ffac2 + m.d[i] = m.d[i] * m.ffac * m.ffac2 for i in m.NODE: - m.pmin[i] = m.pmin[i]*m.pfac*m.pfac2 # from bar to Pascals and then to bar - m.pmax[i] = m.pmax[i]*m.pfac*m.pfac2 # from bar to Pascals and then to bar + m.pmin[i] = m.pmin[i] * m.pfac * m.pfac2 # from bar to Pascals and then to bar + m.pmax[i] = m.pmax[i] * m.pfac * m.pfac2 # from bar to Pascals and then to bar + + model.rescale = BuildAction(rule=rescale_rule) -def compute_constants(m): +def compute_constants(m): for i in m.LINK: - m.lam[i] = (2.0*log10(3.7*m.ldiam[i]/(m.eps*m.dfac)))**(-2.0) - m.A[i] = (1.0/4.0)*m.pi*m.ldiam[i]*m.ldiam[i] - m.nu2 = m.gam*m.z*m.R*m.Tgas/m.M - m.c1[i] = (m.pfac2/m.ffac2)*(m.nu2/m.A[i]) - m.c2[i] = m.A[i]*(m.ffac2/m.pfac2) - m.c3[i] = m.A[i]*(m.pfac2/m.ffac2)*(8.0*m.lam[i]*m.nu2)/(m.pi*m.pi*(m.ldiam[i]**5.0)) - m.c4 = (1/m.ffac2)*(m.Cp*m.Tgas) + m.lam[i] = (2.0 * log10(3.7 * m.ldiam[i] / (m.eps * m.dfac))) ** (-2.0) + m.A[i] = (1.0 / 4.0) * m.pi * m.ldiam[i] * m.ldiam[i] + m.nu2 = m.gam * m.z * m.R * m.Tgas / m.M + m.c1[i] = (m.pfac2 / m.ffac2) * (m.nu2 / m.A[i]) + m.c2[i] = m.A[i] * (m.ffac2 / m.pfac2) + m.c3[i] = ( + m.A[i] + * (m.pfac2 / m.ffac2) + * (8.0 * m.lam[i] * m.nu2) + / (m.pi * m.pi * (m.ldiam[i] ** 5.0)) + ) + m.c4 = (1 / m.ffac2) * (m.Cp * m.Tgas) + model.compute_constants = BuildAction(rule=compute_constants) + # set stochastic demands def compute_demands_rule(m): - for k in m.SCEN: for j in m.DEM: if k == 2: - m.rand_d[k,j] = 1.1*m.d[j] + m.rand_d[k, j] = 1.1 * m.d[j] elif k == 1: - m.rand_d[k,j] = 1.2*m.d[j] + m.rand_d[k, j] = 1.2 * m.d[j] else: - m.rand_d[k,j] = 1.3*m.d[j] + m.rand_d[k, j] = 1.3 * m.d[j] + + model.compute_demands = BuildAction(rule=compute_demands_rule) -def stochd_init(m,k,j,t): + +def stochd_init(m, k, j, t): # What it should be to match description in paper # if t < m.TDEC: # return m.d[j] @@ -145,152 +172,264 @@ def stochd_init(m,k,j,t): # return m.rand_d[k,j] # if t >= m.TDEC+5: # return m.d[j] - if t < m.TDEC+1: + if t < m.TDEC + 1: return m.d[j] - if t >= m.TDEC+1 and t < m.TDEC+1+4.5: - return m.rand_d[k,j] - if t >= m.TDEC+1+4.5: + if t >= m.TDEC + 1 and t < m.TDEC + 1 + 4.5: + return m.rand_d[k, j] + if t >= m.TDEC + 1 + 4.5: return m.d[j] -model.stochd = Param(model.SCEN,model.DEM,model.TIME,within=PositiveReals,mutable=True,default=stochd_init) + +model.stochd = Param( + model.SCEN, + model.DEM, + model.TIME, + within=PositiveReals, + mutable=True, + default=stochd_init, +) + # define temporal variables -def p_bounds_rule(m,k,j,t): - return (value(m.pmin[j]),value(m.pmax[j])) -def p_init(m,k,j,t): +def p_bounds_rule(m, k, j, t): + return (value(m.pmin[j]), value(m.pmax[j])) + + +def p_init(m, k, j, t): return (value(m.pmax[j]) + value(m.pmin[j])) / 2 -model.p = Var(model.SCEN, model.NODE, model.TIME, bounds=p_bounds_rule, initialize=p_init) -model.dp = Var(model.SCEN,model.LINK_A,model.TIME,bounds=(0.0,100.0), initialize=10.0) -model.fin = Var(model.SCEN,model.LINK,model.TIME,bounds=(1.0,500.0),initialize=100.0) -model.fout = Var(model.SCEN,model.LINK,model.TIME,bounds=(1.0,500.0),initialize=100.0) - -def s_bounds_rule(m,k,j,t): - return (0.01,value(m.smax[j])) -model.s = Var(model.SCEN,model.SUP,model.TIME,bounds=s_bounds_rule,initialize=10.0) -model.dem = Var(model.SCEN,model.DEM,model.TIME,initialize=100.0) -model.pow = Var(model.SCEN,model.LINK_A,model.TIME,bounds=(0.0,3000.0),initialize=1000.0) -model.slack = Var(model.SCEN,model.LINK,model.TIME,model.DIS,bounds=(0.0,None),initialize=10.0) + + +model.p = Var( + model.SCEN, model.NODE, model.TIME, bounds=p_bounds_rule, initialize=p_init +) +model.dp = Var( + model.SCEN, model.LINK_A, model.TIME, bounds=(0.0, 100.0), initialize=10.0 +) +model.fin = Var( + model.SCEN, model.LINK, model.TIME, bounds=(1.0, 500.0), initialize=100.0 +) +model.fout = Var( + model.SCEN, model.LINK, model.TIME, bounds=(1.0, 500.0), initialize=100.0 +) + + +def s_bounds_rule(m, k, j, t): + return (0.01, value(m.smax[j])) + + +model.s = Var(model.SCEN, model.SUP, model.TIME, bounds=s_bounds_rule, initialize=10.0) +model.dem = Var(model.SCEN, model.DEM, model.TIME, initialize=100.0) +model.pow = Var( + model.SCEN, model.LINK_A, model.TIME, bounds=(0.0, 3000.0), initialize=1000.0 +) +model.slack = Var( + model.SCEN, model.LINK, model.TIME, model.DIS, bounds=(0.0, None), initialize=10.0 +) # define spatio-temporal variables -model.px = Var(model.SCEN,model.LINK,model.TIME,model.DIS,bounds=(10.0,100.0),initialize=50.0) -model.fx = Var(model.SCEN,model.LINK,model.TIME,model.DIS,bounds=(1.0,100.0),initialize=100.0) +model.px = Var( + model.SCEN, model.LINK, model.TIME, model.DIS, bounds=(10.0, 100.0), initialize=50.0 +) +model.fx = Var( + model.SCEN, model.LINK, model.TIME, model.DIS, bounds=(1.0, 100.0), initialize=100.0 +) # define derivatives -model.dpxdt = DerivativeVar(model.px,wrt=model.TIME,initialize=0) -model.dpxdx = DerivativeVar(model.px,wrt=model.DIS,initialize=0) -model.dfxdt = DerivativeVar(model.fx,wrt=model.TIME,initialize=0) -model.dfxdx = DerivativeVar(model.fx,wrt=model.DIS,initialize=0) +model.dpxdt = DerivativeVar(model.px, wrt=model.TIME, initialize=0) +model.dpxdx = DerivativeVar(model.px, wrt=model.DIS, initialize=0) +model.dfxdt = DerivativeVar(model.fx, wrt=model.TIME, initialize=0) +model.dfxdx = DerivativeVar(model.fx, wrt=model.DIS, initialize=0) # ----------- MODEL -------------- + # compressor equations -def powereq_rule(m,j,i,t): - return m.pow[j,i,t] == m.c4*m.fin[j,i,t]*(((m.p[j,m.lstartloc[i],t]+m.dp[j,i,t])/m.p[j,m.lstartloc[i],t])**m.om - 1.0) -model.powereq = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=powereq_rule) +def powereq_rule(m, j, i, t): + return m.pow[j, i, t] == m.c4 * m.fin[j, i, t] * ( + ((m.p[j, m.lstartloc[i], t] + m.dp[j, i, t]) / m.p[j, m.lstartloc[i], t]) + ** m.om + - 1.0 + ) + + +model.powereq = Constraint(model.SCEN, model.LINK_A, model.TIME, rule=powereq_rule) # cvar model model.cvar_lambda = Param(within=NonNegativeReals) model.nu = Var(initialize=100.0) -model.phi = Var(model.SCEN,bounds=(0.0,None),initialize=100.0) +model.phi = Var(model.SCEN, bounds=(0.0, None), initialize=100.0) def cvarcost_rule(m): - return (1.0/m.S)*sum((m.phi[k]/(1.0-0.95) + m.nu) for k in m.SCEN) + return (1.0 / m.S) * sum((m.phi[k] / (1.0 - 0.95) + m.nu) for k in m.SCEN) + + model.cvarcost = Expression(rule=cvarcost_rule) + # node balances -def nodeeq_rule(m,k,i,t): - return sum(m.fout[k,j,t] for j in m.LINK if m.lendloc[j]==i) + \ - sum(m.s[k,j,t] for j in m.SUP if m.sloc[j]==i) - \ - sum(m.fin[k,j,t] for j in m.LINK if m.lstartloc[j]==i) - \ - sum(m.dem[k,j,t] for j in m.DEM if m.dloc[j]==i) == 0.0 -model.nodeeq = Constraint(model.SCEN,model.NODE,model.TIME,rule=nodeeq_rule) +def nodeeq_rule(m, k, i, t): + return ( + sum(m.fout[k, j, t] for j in m.LINK if m.lendloc[j] == i) + + sum(m.s[k, j, t] for j in m.SUP if m.sloc[j] == i) + - sum(m.fin[k, j, t] for j in m.LINK if m.lstartloc[j] == i) + - sum(m.dem[k, j, t] for j in m.DEM if m.dloc[j] == i) + == 0.0 + ) + + +model.nodeeq = Constraint(model.SCEN, model.NODE, model.TIME, rule=nodeeq_rule) + # boundary conditions flow -def flow_start_rule(m,j,i,t): - return m.fx[j,i,t,m.DIS.first()] == m.fin[j,i,t] -model.flow_start = Constraint(model.SCEN,model.LINK,model.TIME,rule=flow_start_rule) +def flow_start_rule(m, j, i, t): + return m.fx[j, i, t, m.DIS.first()] == m.fin[j, i, t] + + +model.flow_start = Constraint(model.SCEN, model.LINK, model.TIME, rule=flow_start_rule) + + +def flow_end_rule(m, j, i, t): + return m.fx[j, i, t, m.DIS.last()] == m.fout[j, i, t] + + +model.flow_end = Constraint(model.SCEN, model.LINK, model.TIME, rule=flow_end_rule) -def flow_end_rule(m,j,i,t): - return m.fx[j,i,t,m.DIS.last()] == m.fout[j,i,t] -model.flow_end = Constraint(model.SCEN,model.LINK,model.TIME,rule=flow_end_rule) # First PDE for gas network model -def flow_rule(m,j,i,t,k): +def flow_rule(m, j, i, t, k): if t == m.TIME.first() or k == m.DIS.last(): - return Constraint.Skip # Do not apply pde at initial time or final location - return m.dpxdt[j,i,t,k]/3600 + m.c1[i]/m.llength[i]*m.dfxdx[j,i,t,k] == 0 -model.flow = Constraint(model.SCEN,model.LINK,model.TIME,model.DIS,rule=flow_rule) + return Constraint.Skip # Do not apply pde at initial time or final location + return ( + m.dpxdt[j, i, t, k] / 3600 + m.c1[i] / m.llength[i] * m.dfxdx[j, i, t, k] == 0 + ) + + +model.flow = Constraint(model.SCEN, model.LINK, model.TIME, model.DIS, rule=flow_rule) + # Second PDE for gas network model -def press_rule(m,j,i,t,k): +def press_rule(m, j, i, t, k): if t == m.TIME.first() or k == m.DIS.last(): - return Constraint.Skip # Do not apply pde at initial time or final location - return m.dfxdt[j,i,t,k]/3600 == -m.c2[i]/m.llength[i]*m.dpxdx[j,i,t,k] - m.slack[j,i,t,k] -model.press = Constraint(model.SCEN,model.LINK,model.TIME,model.DIS,rule=press_rule) + return Constraint.Skip # Do not apply pde at initial time or final location + return ( + m.dfxdt[j, i, t, k] / 3600 + == -m.c2[i] / m.llength[i] * m.dpxdx[j, i, t, k] - m.slack[j, i, t, k] + ) + + +model.press = Constraint(model.SCEN, model.LINK, model.TIME, model.DIS, rule=press_rule) + -def slackeq_rule(m,j,i,t,k): +def slackeq_rule(m, j, i, t, k): if t == m.TIME.last(): return Constraint.Skip - return m.slack[j,i,t,k]*m.px[j,i,t,k] == m.c3[i]*m.fx[j,i,t,k]*m.fx[j,i,t,k] -model.slackeq = Constraint(model.SCEN,model.LINK,model.TIME,model.DIS,rule=slackeq_rule) + return ( + m.slack[j, i, t, k] * m.px[j, i, t, k] + == m.c3[i] * m.fx[j, i, t, k] * m.fx[j, i, t, k] + ) + + +model.slackeq = Constraint( + model.SCEN, model.LINK, model.TIME, model.DIS, rule=slackeq_rule +) + # boundary conditions pressure, passive links -def presspas_start_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.first()] == m.p[j,m.lstartloc[i],t] -model.presspas_start = Constraint(model.SCEN,model.LINK_P,model.TIME,rule=presspas_start_rule) +def presspas_start_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.first()] == m.p[j, m.lstartloc[i], t] + + +model.presspas_start = Constraint( + model.SCEN, model.LINK_P, model.TIME, rule=presspas_start_rule +) + + +def presspas_end_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.last()] == m.p[j, m.lendloc[i], t] + + +model.presspas_end = Constraint( + model.SCEN, model.LINK_P, model.TIME, rule=presspas_end_rule +) -def presspas_end_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.last()] == m.p[j,m.lendloc[i],t] -model.presspas_end = Constraint(model.SCEN,model.LINK_P,model.TIME,rule=presspas_end_rule) # boundary conditions pressure, active links -def pressact_start_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.first()] == m.p[j,m.lstartloc[i],t]+m.dp[j,i,t] -model.pressact_start = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=pressact_start_rule) +def pressact_start_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.first()] == m.p[j, m.lstartloc[i], t] + m.dp[j, i, t] + + +model.pressact_start = Constraint( + model.SCEN, model.LINK_A, model.TIME, rule=pressact_start_rule +) + + +def pressact_end_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.last()] == m.p[j, m.lendloc[i], t] + + +model.pressact_end = Constraint( + model.SCEN, model.LINK_A, model.TIME, rule=pressact_end_rule +) -def pressact_end_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.last()] == m.p[j,m.lendloc[i],t] -model.pressact_end = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=pressact_end_rule) # fix pressure at supply nodes -def suppres_rule(m,k,j,t): - return m.p[k,m.sloc[j],t] == m.pmin[m.sloc[j]] -model.suppres = Constraint(model.SCEN,model.SUP,model.TIME,rule=suppres_rule) +def suppress_rule(m, k, j, t): + return m.p[k, m.sloc[j], t] == m.pmin[m.sloc[j]] + + +model.suppress = Constraint(model.SCEN, model.SUP, model.TIME, rule=suppress_rule) + # discharge pressure for compressors -def dispress_rule(m,j,i,t): - return m.p[j,m.lstartloc[i],t]+m.dp[j,i,t] <= m.pmax[m.lstartloc[i]] -model.dispress = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=dispress_rule) +def dispress_rule(m, j, i, t): + return m.p[j, m.lstartloc[i], t] + m.dp[j, i, t] <= m.pmax[m.lstartloc[i]] + + +model.dispress = Constraint(model.SCEN, model.LINK_A, model.TIME, rule=dispress_rule) + # ss constraints -def flow_ss_rule(m,j,i,k): +def flow_ss_rule(m, j, i, k): if k == m.DIS.last(): return Constraint.Skip - return m.dfxdx[j,i,m.TIME.first(),k]/m.llength[i] == 0.0 -model.flow_ss = Constraint(model.SCEN,model.LINK,model.DIS,rule=flow_ss_rule) + return m.dfxdx[j, i, m.TIME.first(), k] / m.llength[i] == 0.0 + + +model.flow_ss = Constraint(model.SCEN, model.LINK, model.DIS, rule=flow_ss_rule) -def pres_ss_rule(m,j,i,k): + +def pres_ss_rule(m, j, i, k): if k == m.DIS.last(): return Constraint.Skip - return 0.0 == - m.c2[i]/m.llength[i]*m.dpxdx[j,i,m.TIME.first(),k] - m.slack[j,i,m.TIME.first(),k]; -model.pres_ss = Constraint(model.SCEN,model.LINK,model.DIS,rule=pres_ss_rule) + return ( + 0.0 + == -m.c2[i] / m.llength[i] * m.dpxdx[j, i, m.TIME.first(), k] + - m.slack[j, i, m.TIME.first(), k] + ) + + +model.pres_ss = Constraint(model.SCEN, model.LINK, model.DIS, rule=pres_ss_rule) + # non-anticipativity constraints -def nonantdq_rule(m,j,i,t): +def nonantdq_rule(m, j, i, t): if j == 1: return Constraint.Skip - if t >= m.TDEC+1: + if t >= m.TDEC + 1: return Constraint.Skip - return m.dp[j,i,t] == m.dp[1,i,t] + return m.dp[j, i, t] == m.dp[1, i, t] + -model.nonantdq = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=nonantdq_rule) +model.nonantdq = Constraint(model.SCEN, model.LINK_A, model.TIME, rule=nonantdq_rule) -def nonantde_rule(m,j,i,t): + +def nonantde_rule(m, j, i, t): if j == 1: return Constraint.Skip - if t >= m.TDEC+1: + if t >= m.TDEC + 1: return Constraint.Skip - return m.dem[j,i,t] == m.dem[1,i,t] + return m.dem[j, i, t] == m.dem[1, i, t] + -model.nonantde = Constraint(model.SCEN,model.DEM,model.TIME,rule=nonantde_rule) +model.nonantde = Constraint(model.SCEN, model.DEM, model.TIME, rule=nonantde_rule) diff --git a/examples/doc/samples/__init__.py b/examples/doc/samples/__init__.py index 6ac6c575ea9..3115f06ef53 100644 --- a/examples/doc/samples/__init__.py +++ b/examples/doc/samples/__init__.py @@ -1 +1 @@ -# Dummy file for nose tests +# Dummy file for pytest diff --git a/examples/doc/samples/case_studies/deer/DeerProblem.py b/examples/doc/samples/case_studies/deer/DeerProblem.py index b94e92d5b2d..0b6b7252aaa 100644 --- a/examples/doc/samples/case_studies/deer/DeerProblem.py +++ b/examples/doc/samples/case_studies/deer/DeerProblem.py @@ -6,24 +6,24 @@ model = AbstractModel() -model.p1 = Param(); -model.p2 = Param(); -model.p3 = Param(); -model.p4 = Param(); -model.p5 = Param(); -model.p6 = Param(); -model.p7 = Param(); -model.p8 = Param(); -model.p9 = Param(); -model.ps = Param(); - -model.f = Var(initialize = 20, within=PositiveReals) -model.d = Var(initialize = 20, within=PositiveReals) -model.b = Var(initialize = 20, within=PositiveReals) - -model.hf = Var(initialize = 20, within=PositiveReals) -model.hd = Var(initialize = 20, within=PositiveReals) -model.hb = Var(initialize = 20, within=PositiveReals) +model.p1 = Param() +model.p2 = Param() +model.p3 = Param() +model.p4 = Param() +model.p5 = Param() +model.p6 = Param() +model.p7 = Param() +model.p8 = Param() +model.p9 = Param() +model.ps = Param() + +model.f = Var(initialize=20, within=PositiveReals) +model.d = Var(initialize=20, within=PositiveReals) +model.b = Var(initialize=20, within=PositiveReals) + +model.hf = Var(initialize=20, within=PositiveReals) +model.hd = Var(initialize=20, within=PositiveReals) +model.hb = Var(initialize=20, within=PositiveReals) model.br = Var(initialize=1.5, within=PositiveReals) @@ -31,33 +31,62 @@ def obj_rule(amodel): - return 10*amodel.hb + amodel.hd + amodel.hf + return 10 * amodel.hb + amodel.hd + amodel.hf + + model.obj = Objective(rule=obj_rule, sense=maximize) + def f_bal_rule(amodel): - return amodel.f == amodel.p1*amodel.br*(amodel.p2/10.0*amodel.f + amodel.p3*amodel.d)-amodel.hf + return ( + amodel.f + == amodel.p1 * amodel.br * (amodel.p2 / 10.0 * amodel.f + amodel.p3 * amodel.d) + - amodel.hf + ) + + model.f_bal = Constraint(rule=f_bal_rule) + def d_bal_rule(amodel): - return amodel.d == amodel.p4*amodel.d + amodel.p5/2.0*amodel.f - amodel.hd + return amodel.d == amodel.p4 * amodel.d + amodel.p5 / 2.0 * amodel.f - amodel.hd + + model.d_bal = Constraint(rule=d_bal_rule) + def b_bal_rule(amodel): - return amodel.b == amodel.p6*amodel.b + amodel.p5/2.0*amodel.f - amodel.hb + return amodel.b == amodel.p6 * amodel.b + amodel.p5 / 2.0 * amodel.f - amodel.hb + + model.b_bal = Constraint(rule=b_bal_rule) + def food_cons_rule(amodel): - return amodel.c == amodel.p7*amodel.b + amodel.p8*amodel.d + amodel.p9*amodel.f + return ( + amodel.c == amodel.p7 * amodel.b + amodel.p8 * amodel.d + amodel.p9 * amodel.f + ) + + model.food_cons = Constraint(rule=food_cons_rule) + def supply_rule(amodel): return amodel.c <= amodel.ps + + model.supply = Constraint(rule=supply_rule) + def birth_rule(amodel): - return amodel.br == 1.1 + 0.8*(amodel.ps - amodel.c)/amodel.ps + return amodel.br == 1.1 + 0.8 * (amodel.ps - amodel.c) / amodel.ps + + model.birth = Constraint(rule=birth_rule) + def proc_rule(amodel): - return amodel.b >= 1.0/5.0*(0.4*amodel.f + amodel.d) + return amodel.b >= 1.0 / 5.0 * (0.4 * amodel.f + amodel.d) + + model.proc = Constraint(rule=proc_rule) diff --git a/examples/doc/samples/case_studies/diet/DietProblem.py b/examples/doc/samples/case_studies/diet/DietProblem.py index f72a574f2b8..f070201c28e 100644 --- a/examples/doc/samples/case_studies/diet/DietProblem.py +++ b/examples/doc/samples/case_studies/diet/DietProblem.py @@ -5,25 +5,33 @@ model.foods = Set() model.nutrients = Set() model.costs = Param(model.foods) -model.min_nutrient=Param(model.nutrients) -model.max_nutrient=Param(model.nutrients) -model.volumes=Param(model.foods) -model.max_volume=Param() -model.nutrient_value=Param(model.nutrients, model.foods) -model.amount=Var(model.foods, within = NonNegativeReals) +model.min_nutrient = Param(model.nutrients) +model.max_nutrient = Param(model.nutrients) +model.volumes = Param(model.foods) +model.max_volume = Param() +model.nutrient_value = Param(model.nutrients, model.foods) +model.amount = Var(model.foods, within=NonNegativeReals) + def costRule(model): - return sum(model.costs[n]*model.amount[n] for n in model.foods) + return sum(model.costs[n] * model.amount[n] for n in model.foods) + + +model.cost = Objective(rule=costRule) -model.cost=Objective(rule=costRule) def volumeRule(model): - return sum(model.volumes[n]*model.amount[n] for n in model.foods) <= model.max_volume + return ( + sum(model.volumes[n] * model.amount[n] for n in model.foods) <= model.max_volume + ) + model.volume = Constraint(rule=volumeRule) + def nutrientRule(model, n): - value = sum(model.nutrient_value[n,f]*model.amount[f] for f in model.foods) + value = sum(model.nutrient_value[n, f] * model.amount[f] for f in model.foods) return (model.min_nutrient[n], value, model.max_nutrient[n]) + model.nutrientConstraint = Constraint(model.nutrients, rule=nutrientRule) diff --git a/examples/doc/samples/case_studies/diet/DietProblem.tex b/examples/doc/samples/case_studies/diet/DietProblem.tex index 7f08ff900e0..d933e097d88 100644 --- a/examples/doc/samples/case_studies/diet/DietProblem.tex +++ b/examples/doc/samples/case_studies/diet/DietProblem.tex @@ -60,7 +60,7 @@ \subsection*{Build the model} We restrict our domain to the non-negative reals. If we accepted negative numbers than the model could tell us to buy negative amounts of food, which is an unrealistic---and thus useless---model. We could further restrict the domain to the integers to make it more realistic, but that would make the problem much harder for little gain: if this model is used on a large scale than the difference between the integer solution and the non-integer solution is often irrelevant. -At this point we must start defining the rules associated with our paramaters and variables. We begin with the most important rule, the cost rule, which will tell the model to try and minimize the overall cost. Logically, the total cost is going to be the sum of how much is spent on each food, and that value in turn is going to be determined by the cost of the food and how much of it is purchased. For example, if three \$5 hamburgers and two \$1 apples are purchased, than the total cost would be $3 \cdot 5 + 2 \cdot 1 = 17$. Note that this process is the same as taking the dot product of the amounts vector and the costs vector. +At this point we must start defining the rules associated with our parameters and variables. We begin with the most important rule, the cost rule, which will tell the model to try and minimize the overall cost. Logically, the total cost is going to be the sum of how much is spent on each food, and that value in turn is going to be determined by the cost of the food and how much of it is purchased. For example, if three \$5 hamburgers and two \$1 apples are purchased, than the total cost would be $3 \cdot 5 + 2 \cdot 1 = 17$. Note that this process is the same as taking the dot product of the amounts vector and the costs vector. To input this, we must define the cost rule, which we creatively call costRule as @@ -84,7 +84,7 @@ \subsection*{Build the model} model.volume = Constraint(rule=volumeRule) \end{verbatim} -Note that here we have a constraint instead of an objective. This requires that the rule returns true, but otherwsie the value is irrelevant. While objective looks for the least value, constraints just require that a value works. +Note that here we have a constraint instead of an objective. This requires that the rule returns true, but otherwise the value is irrelevant. While objective looks for the least value, constraints just require that a value works. Finally, we need to add the constraint that ensures we obtain proper amounts of each nutrient. This one is a bit more complicated for two reasons: the value needs to be within a range, rather than just greater than or less than another value, and nutrient\_value was a two dimensional variable. It's easy to fix the first problem in a myriad of ways; the way we will do it involves defining another variable and checking if that is in the proper range. To solve the second problem, we give the rule an index in addition to the model as an input. The code will be @@ -99,21 +99,21 @@ \subsection*{Build the model} The rule itself will act much like the previous rules, but by adding an index into the constraint we will cycle through each of the nutrients. Essentially, what we have done is compressed many ``nutrient rules,'' each of which acts the same, into one rule that will look at each nutrient individually. -At this point, we have finished creating the model file. We have defined our sets, paramaters and variables. We also defined the objective of the model and constraints that must be accepted for a solution. All that's left now is to build a data file. +At this point, we have finished creating the model file. We have defined our sets, parameters and variables. We also defined the objective of the model and constraints that must be accepted for a solution. All that's left now is to build a data file. \subsection*{Data entry} -Much like with the model, we begin wtih the two main sets we're looking at: foods and nutrients. For brevety, we'll only look at three foods and three nutrients. Note that ``vc'' is just shorthand for vitamin c. +Much like with the model, we begin with the two main sets we're looking at: foods and nutrients. For brevety, we'll only look at three foods and three nutrients. Note that ``vc'' is just shorthand for vitamin c. \begin{verbatim} set foods := steak apple rice; set nutrients := calories protein vc; \end{verbatim} -To define the set just put set [name] := [elements of set]; where the elements of the set are seperated by a single space. +To define the set just put set [name] := [elements of set]; where the elements of the set are separated by a single space. -We now define a paramater without an associated set. In this case, it is the paramater ``max\_volume''. To do this, simply input +We now define a parameter without an associated set. In this case, it is the parameter ``max\_volume''. To do this, simply input \begin{verbatim} param: max_volume := 400; @@ -122,7 +122,7 @@ \subsection*{Data entry} \noindent It is worth pointing out that for this example the volumes are all fairly arbitrary. -The parameters representing the costs, volumes, nutrient minimums and nutrient maximums can all be input in compareable fashions with the main difference being which set the paramater is over. In the code, the first line defins what parameter is being looked at and each subsequent line gives a member of the appropriate set and a value associated with it. +The parameters representing the costs, volumes, nutrient minimums and nutrient maximums can all be input in comparable fashions with the main difference being which set the parameter is over. In the code, the first line defines what parameter is being looked at and each subsequent line gives a member of the appropriate set and a value associated with it. \begin{verbatim} param: costs := @@ -158,7 +158,7 @@ \subsection*{Data entry} vc 0 30 0; \end{verbatim} -The amount of spaces between each element is irrelevent (as long as there is at least one) so the matrix should be formatted for ease of reading. +The amount of spaces between each element is irrelevant (as long as there is at least one) so the matrix should be formatted for ease of reading. Now that we have finished both the model and the data file save them both. It's convention to give the model file a .py extension and the data file a .dat extension. diff --git a/examples/doc/samples/case_studies/diet/README.txt b/examples/doc/samples/case_studies/diet/README.txt index 255d65adadb..c30e963dc27 100644 --- a/examples/doc/samples/case_studies/diet/README.txt +++ b/examples/doc/samples/case_studies/diet/README.txt @@ -77,7 +77,7 @@ model.amount=Var(model.foods, within = NonNegativeReals) We restrict our domain to the non-negative reals. If we accepted negative numbers than the model could tell us to buy negative amounts of food, which is an unrealistic--and thus useless--model. We could further restrict the domain to the integers to make it more realistic, but that would make the problem much harder for little gain: if this model is used on a large scale than the difference between the integer solution and the non-integer solution is often irrelevant. -At this point we must start defining the rules associated with our paramaters and variables. We begin with the most important rule, the cost rule, which will tell the model to try and minimize the overall cost. Logically, the total cost is going to be the sum of how much is spent on each food, and that value in turn is going to be determined by the cost of the food and how much of it is purchased. For example, if three !$5 hamburgers and two !$1 apples are purchased, than the total cost would be 3*5 + 2*1 = 17. Note that this process is the same as taking the dot product of the amounts vector and the costs vector. +At this point we must start defining the rules associated with our parameters and variables. We begin with the most important rule, the cost rule, which will tell the model to try and minimize the overall cost. Logically, the total cost is going to be the sum of how much is spent on each food, and that value in turn is going to be determined by the cost of the food and how much of it is purchased. For example, if three !$5 hamburgers and two !$1 apples are purchased, than the total cost would be 3*5 + 2*1 = 17. Note that this process is the same as taking the dot product of the amounts vector and the costs vector. To input this, we must define the cost rule, which we creatively call costRule as @@ -105,7 +105,7 @@ def volumeRule(model): model.volume = Constraint(rule=volumeRule) }}} -Note that here we have a constraint instead of an objective. This requires that the rule returns true, but otherwsie the value is irrelevant. While objective looks for the least value, constraints just require that a value works. +Note that here we have a constraint instead of an objective. This requires that the rule returns true, but otherwise the value is irrelevant. While objective looks for the least value, constraints just require that a value works. Finally, we need to add the constraint that ensures we obtain proper amounts of each nutrient. This one is a bit more complicated for two reasons: the value needs to be within a range, rather than just greater than or less than another value, and nutrient_value was a two dimensional variable. It's easy to fix the first problem in a myriad of ways; the way we will do it involves defining another variable and checking if that is in the proper range. To solve the second problem, we give the rule an index in addition to the model as an input. The code will be @@ -121,27 +121,27 @@ model.nutrientConstraint = Constraint(model.nutrients, rule=nutrientRule) The rule itself will act much like the previous rules, but by adding an index into the constraint we will cycle through each of the nutrients. Essentially, what we have done is compressed many "nutrient rules," each of which acts the same, into one rule that will look at each nutrient individually. -At this point, we have finished creating the model file. We have defined our sets, paramaters and variables. We also defined the objective of the model and constraints that must be accepted for a solution. Make sure to save this as a .py file. Now, all that's left now is to build a data file. +At this point, we have finished creating the model file. We have defined our sets, parameters and variables. We also defined the objective of the model and constraints that must be accepted for a solution. Make sure to save this as a .py file. Now, all that's left now is to build a data file. == Data entry == -Much like with the model, we begin wtih the two main sets we're looking at: foods and nutrients. For brevety, we'll only look at three foods and three nutrients. Note that "vc" is just shorthand for vitamin c. +Much like with the model, we begin with the two main sets we're looking at: foods and nutrients. For brevety, we'll only look at three foods and three nutrients. Note that "vc" is just shorthand for vitamin c. {{{ set foods := steak apple rice; set nutrients := calories protein vc; }}} -To define the set just put set [name] := [elements of set]; where the elements of the set are seperated by a single space. +To define the set just put set [name] := [elements of set]; where the elements of the set are separated by a single space. -We now define a paramater without an associated set. In this case, it is the paramater "max_volume". To do this, simply input +We now define a parameter without an associated set. In this case, it is the parameter "max_volume". To do this, simply input {{{ param: max_volume := 400; }}} It is worth pointing out that for this example the volumes are all fairly arbitrary. -The parameters representing the costs, volumes, nutrient minimums and nutrient maximums can all be input in compareable fashions with the main difference being which set the paramater is over. In the code, the first line defins what parameter is being looked at and each subsequent line gives a member of the appropriate set and a value associated with it. +The parameters representing the costs, volumes, nutrient minimums and nutrient maximums can all be input in comparable fashions with the main difference being which set the parameter is over. In the code, the first line defines what parameter is being looked at and each subsequent line gives a member of the appropriate set and a value associated with it. {{{ param: costs := @@ -177,7 +177,7 @@ protein 40 1 5 vc 0 30 0; }}} -The amount of spaces between each element is irrelevent (as long as there is at least one) so the matrix should be formatted for ease of reading. +The amount of spaces between each element is irrelevant (as long as there is at least one) so the matrix should be formatted for ease of reading. Now that we have finished both the model and the data file save them both. It's convention to give the model file a .py extension and the data file a .dat extension. diff --git a/examples/doc/samples/case_studies/disease_est/DiseaseEstimation.py b/examples/doc/samples/case_studies/disease_est/DiseaseEstimation.py index b9aa480c8a6..c685a6ee67f 100644 --- a/examples/doc/samples/case_studies/disease_est/DiseaseEstimation.py +++ b/examples/doc/samples/case_studies/disease_est/DiseaseEstimation.py @@ -7,26 +7,42 @@ model.P_REP_CASES = Param(model.S_SI) model.P_POP = Param() -model.I = Var(model.S_SI, bounds=(0,model.P_POP), initialize=1) -model.S = Var(model.S_SI, bounds=(0,model.P_POP), initialize=300) +model.I = Var(model.S_SI, bounds=(0, model.P_POP), initialize=1) +model.S = Var(model.S_SI, bounds=(0, model.P_POP), initialize=300) model.beta = Var(bounds=(0.05, 70)) model.alpha = Var(bounds=(0.5, 1.5)) model.eps_I = Var(model.S_SI, initialize=0.0) + def _objective(model): - return sum((model.eps_I[i])**2 for i in model.S_SI) + return sum((model.eps_I[i]) ** 2 for i in model.S_SI) + + model.objective = Objective(rule=_objective, sense=minimize) + def _InfDynamics(model, i): if i != 1: - return model.I[i] == (model.beta*model.S[i-1]*model.I[i-1]**model.alpha)/model.P_POP + return ( + model.I[i] + == (model.beta * model.S[i - 1] * model.I[i - 1] ** model.alpha) + / model.P_POP + ) + + model.InfDynamics = Constraint(model.S_SI, rule=_InfDynamics) + def _SusDynamics(model, i): if i != 1: - return model.S[i] == model.S[i-1] - model.I[i] + return model.S[i] == model.S[i - 1] - model.I[i] + + model.SusDynamics = Constraint(model.S_SI, rule=_SusDynamics) + def _Data(model, i): - return model.P_REP_CASES[i] == model.I[i]+model.eps_I[i] + return model.P_REP_CASES[i] == model.I[i] + model.eps_I[i] + + model.Data = Constraint(model.S_SI, rule=_Data) diff --git a/examples/doc/samples/case_studies/max_flow/MaxFlow.py b/examples/doc/samples/case_studies/max_flow/MaxFlow.py index c8e25dd968e..c6eb42ccf7d 100644 --- a/examples/doc/samples/case_studies/max_flow/MaxFlow.py +++ b/examples/doc/samples/case_studies/max_flow/MaxFlow.py @@ -3,7 +3,7 @@ model = AbstractModel() model.nodes = Set() -model.arcs = Set(within=model.nodes*model.nodes) +model.arcs = Set(within=model.nodes * model.nodes) model.sources = Set(within=model.nodes) model.sinks = Set(within=model.nodes) model.upperBound = Param(model.arcs) @@ -11,52 +11,38 @@ model.demand = Param(model.sinks) model.amount = Var(model.arcs, within=NonNegativeReals) + def totalRule(model): - expression = sum( - model.amount[i,j] - for (i, j) in model.arcs - if j in model.sinks - ) + expression = sum(model.amount[i, j] for (i, j) in model.arcs if j in model.sinks) return expression + model.maxFlow = Objective(rule=totalRule, sense=maximize) + def maxRule(model, arcIn, arcOut): - constraint_equation = (model.amount[arcIn, arcOut] <= model.upperBound[arcIn, arcOut]) + constraint_equation = model.amount[arcIn, arcOut] <= model.upperBound[arcIn, arcOut] return constraint_equation + model.loadOnArc = Constraint(model.arcs, rule=maxRule) + def flowRule(model, node): if node in model.sources: - flow_out = sum( - model.amount[i,j] - for (i,j) in model.arcs - if i == node - ) - constraint_equation = ( flow_out <= model.supply[node] ) + flow_out = sum(model.amount[i, j] for (i, j) in model.arcs if i == node) + constraint_equation = flow_out <= model.supply[node] elif node in model.sinks: - flow_in = sum( - model.amount[i,j] - for (i,j) in model.arcs - if j == node - ) - constraint_equation = (flow_in >= model.demand[node]) + flow_in = sum(model.amount[i, j] for (i, j) in model.arcs if j == node) + constraint_equation = flow_in >= model.demand[node] else: - amountIn = sum( - model.amount[i,j] - for (i,j) in model.arcs - if j == node - ) - amountOut = sum( - model.amount[i,j] - for (i,j) in model.arcs - if i == node - ) - constraint_equation = ( amountIn == amountOut ) + amountIn = sum(model.amount[i, j] for (i, j) in model.arcs if j == node) + amountOut = sum(model.amount[i, j] for (i, j) in model.arcs if i == node) + constraint_equation = amountIn == amountOut return constraint_equation + model.flow = Constraint(model.nodes, rule=flowRule) diff --git a/examples/doc/samples/case_studies/max_flow/MaxFlow.tex b/examples/doc/samples/case_studies/max_flow/MaxFlow.tex index 2ff2fb662f1..417b0093652 100644 --- a/examples/doc/samples/case_studies/max_flow/MaxFlow.tex +++ b/examples/doc/samples/case_studies/max_flow/MaxFlow.tex @@ -14,7 +14,7 @@ \section*{Max Flow} A panda is about to give birth at the zoo! Officials anticipate that attendance will skyrocket to see the new, adorable baby panda. There's one particular residential area called ``Home'' that is full of panda loving families and there's a fear that this inflated number of people visiting the zoo will overload the public transportation system. It will be especially bad in the evening since the zoo closes about the same time as rush hour, so everyone will be trying to find spaced on the already crowded buses and subways. As a city planner, you were given a map of routes from the zoo to Home (shown below), along with the estimated number of families that could go on each route. Additionally, it was estimated that $16$ families from Home will visit each day, and it's your task to figure out if this will overload the public transportation system, and, if it does, how could the system be improved? -This is another kind of network flow problem, called a maximum flow problem. Here, what we're concerned about is the upper bound on the amount of people that can move through the system. This is a special case of network flow problems, but it's also one of the most relevent and easily applicable kinds of problems. Unlike previous examples, we're not trying to minimize cost; instead we're trying to find the maximum amount of flow, which is defined by the number of objects moving thrugh the system. In this case, our flow is how many people can travel from the zoo to Home. +This is another kind of network flow problem, called a maximum flow problem. Here, what we're concerned about is the upper bound on the amount of people that can move through the system. This is a special case of network flow problems, but it's also one of the most relevant and easily applicable kinds of problems. Unlike previous examples, we're not trying to minimize cost; instead we're trying to find the maximum amount of flow, which is defined by the number of objects moving through the system. In this case, our flow is how many people can travel from the zoo to Home. Before we begin the implementation, it's worth briefly covering some key points of a network. Typically, the locations we're looking at are called ``nodes'' and the paths between them are ``arcs,'' which should be familiar to anyone who has with graph theory. Additionally, nodes that have an excess supply are called ``sources'' while nodes with an excess of demand are ``sinks.'' All other nodes should have a net change of zero---anything that flows in should also flow out. In this example, our source is the zoo and our sink is Home, while all the locations between the two should not supply any people and no people should stop at them. @@ -84,7 +84,7 @@ \subsection*{Build the model} model.maxFlow = Objective(rule=totalRule, sense=maximize) \end{verbatim} -Another important element of this model is the maximum amount that can travel on each route. Without that, the max flow problem would be trivial: the answer is always infinity! To avoid this situation, we need to ensure that the amount traveling along each arc is less than or equal to the upper bound of how much can move along the arc. One part that deserves note is that we give the constraint the set of arcs as an argument, but in defining the rule we give it two arguements: arcIn and arcOut. The reason for this is because the set of arcs is itself a set of tuples, so when we feed it in as an argument we're actually supplying two arguements. The final result will be this: +Another important element of this model is the maximum amount that can travel on each route. Without that, the max flow problem would be trivial: the answer is always infinity! To avoid this situation, we need to ensure that the amount traveling along each arc is less than or equal to the upper bound of how much can move along the arc. One part that deserves note is that we give the constraint the set of arcs as an argument, but in defining the rule we give it two arguments: arcIn and arcOut. The reason for this is because the set of arcs is itself a set of tuples, so when we feed it in as an argument we're actually supplying two arguments. The final result will be this: \begin{verbatim} def maxRule(arcIn, arcOut, model): @@ -211,7 +211,7 @@ \subsection*{Data entry} set sinks := Home; \end{verbatim} -We also need to define the supply and demand parameters. These are especially easy because the sets they're over only have one element. Remember, for this example the supply is abitrarily large and demand is zero so we won't accidentally create an infeasible model. +We also need to define the supply and demand parameters. These are especially easy because the sets they're over only have one element. Remember, for this example the supply is arbitrarily large and demand is zero so we won't accidentally create an infeasible model. \begin{verbatim} param: supply := @@ -221,7 +221,7 @@ \subsection*{Data entry} Home 0; \end{verbatim} -Finally, we create the upper bound parameter. This, is a paramater over tuples, but we must omit the parentheses and commas that usually denote such a tuple. Otherwise, it's formated the same way as the above parameters. +Finally, we create the upper bound parameter. This, is a parameter over tuples, but we must omit the parentheses and commas that usually denote such a tuple. Otherwise, it's formatted the same way as the above parameters. \begin{verbatim} param: upperBound := diff --git a/examples/doc/samples/case_studies/max_flow/README.txt b/examples/doc/samples/case_studies/max_flow/README.txt index 4c7f3009508..985a702d623 100644 --- a/examples/doc/samples/case_studies/max_flow/README.txt +++ b/examples/doc/samples/case_studies/max_flow/README.txt @@ -4,7 +4,7 @@ A panda is about to give birth at the zoo! Officials anticipate that attendance [[Image(MaxFlow.png)]] -This is another kind of network flow problem, called a maximum flow problem. Here, what we're concerned about is the upper bound on the amount of people that can move through the system. This is a special case of network flow problems, but it's also one of the most relevent and easily applicable kinds of problems. Unlike previous examples, we're not trying to minimize cost; instead we're trying to find the maximum amount of flow, which is defined by the number of objects moving thrugh the system. In this case, our flow is how many people can travel from the zoo to Home. +This is another kind of network flow problem, called a maximum flow problem. Here, what we're concerned about is the upper bound on the amount of people that can move through the system. This is a special case of network flow problems, but it's also one of the most relevant and easily applicable kinds of problems. Unlike previous examples, we're not trying to minimize cost; instead we're trying to find the maximum amount of flow, which is defined by the number of objects moving through the system. In this case, our flow is how many people can travel from the zoo to Home. Before we begin the implementation, it's worth briefly covering some key points of a network. Typically, the locations we're looking at are called "nodes" and the paths between them are "arcs," which should be familiar to anyone who has with graph theory. Additionally, nodes that have an excess supply are called "sources" while nodes with an excess of demand are "sinks." All other nodes should have a net change of zero--anything that flows in should also flow out. In this example, our source is the zoo and our sink is Home, while all the locations between the two should not supply any people and no people should stop at them. @@ -79,7 +79,7 @@ def totalRule(model): model.maxFlow = Objective(rule=totalRule, sense=maximize) }}} -Another important element of this model is the maximum amount that can travel on each route. Without that, the max flow problem would be trivial: the answer is always infinity! To avoid this situation, we need to ensure that the amount traveling along each arc is less than or equal to the upper bound of how much can move along the arc. One part that deserves note is that we give the constraint the set of arcs as an argument, but in defining the rule we give it two arguements: arcIn and arcOut. The reason for this is because the set of arcs is itself a set of tuples, so when we feed it in as an argument we're actually supplying two arguements. The final result will be this: +Another important element of this model is the maximum amount that can travel on each route. Without that, the max flow problem would be trivial: the answer is always infinity! To avoid this situation, we need to ensure that the amount traveling along each arc is less than or equal to the upper bound of how much can move along the arc. One part that deserves note is that we give the constraint the set of arcs as an argument, but in defining the rule we give it two arguments: arcIn and arcOut. The reason for this is because the set of arcs is itself a set of tuples, so when we feed it in as an argument we're actually supplying two arguments. The final result will be this: {{{ #!python @@ -212,7 +212,7 @@ set sources := Zoo; set sinks := Home; }}} -We also need to define the supply and demand parameters. These are especially easy because the sets they're over only have one element. Remember, for this example the supply is abitrarily large and demand is zero so we won't accidentally create an infeasible model. +We also need to define the supply and demand parameters. These are especially easy because the sets they're over only have one element. Remember, for this example the supply is arbitrarily large and demand is zero so we won't accidentally create an infeasible model. {{{ param: supply := @@ -222,7 +222,7 @@ param: demand := Home 0; }}} -Finally, we create the upper bound parameter. This, is a paramater over tuples, but we must omit the parentheses and commas that usually denote such a tuple. Otherwise, it's formated the same way as the above parameters. +Finally, we create the upper bound parameter. This, is a parameter over tuples, but we must omit the parentheses and commas that usually denote such a tuple. Otherwise, it's formatted the same way as the above parameters. {{{ param: upperBound := diff --git a/examples/doc/samples/case_studies/network_flow/README.txt b/examples/doc/samples/case_studies/network_flow/README.txt index 5a976be0de3..67043f311f3 100644 --- a/examples/doc/samples/case_studies/network_flow/README.txt +++ b/examples/doc/samples/case_studies/network_flow/README.txt @@ -2,7 +2,7 @@ Network flow problems are an important subset of linear programming. The idea is we have a set of objects (stores, blogs, nerves in a human body), that can transmit something (goods, information, messages to and from the brain) to other objects in the set. The connections between objects need not be two-ways: just because object A transmits to object B does not mean B will transmit to A. A good example of a network flow problem is the Transportation Problem also found in the Pyomo directory. While a simplified example, it is essentially a problem of moving goods throughout the set of locations so that demand can be satisfied at a few locations. -In that vein, we do a compareable to begin looking at more complex network flow problems. We are looking at a company that produces shoes. There are three factories, Albany, Albuquerque and Atlanta, each with a certain supply, and these factories produce goods that they store in two warehouses, one in Boise and one in Boston, until they need to be shipped to the three stores in Casper, Charleston and Chicago, each with some value for their demand. The warehouses may have supply, demand or neither. To make matters more complicated, two of the mills can ship directly to the factories. Another factor to take into account is that each route has a maximum amount that can be shipped along it, which is a realistic condition (if being shipped by train, for example, only so many trains can run in a period of time). Also, the companies being contracted to do the shipping require a minimum amount along each route to be shipped. Finally, it costs different amounts per volume of shoes to ship along any route (to pay for space on trains, or gas for cars, for example). All of this must be taken into account to create a realistic model that will minimize the total cost for shipping shoes from the factories to the stores. +In that vein, we do a comparable to begin looking at more complex network flow problems. We are looking at a company that produces shoes. There are three factories, Albany, Albuquerque and Atlanta, each with a certain supply, and these factories produce goods that they store in two warehouses, one in Boise and one in Boston, until they need to be shipped to the three stores in Casper, Charleston and Chicago, each with some value for their demand. The warehouses may have supply, demand or neither. To make matters more complicated, two of the mills can ship directly to the factories. Another factor to take into account is that each route has a maximum amount that can be shipped along it, which is a realistic condition (if being shipped by train, for example, only so many trains can run in a period of time). Also, the companies being contracted to do the shipping require a minimum amount along each route to be shipped. Finally, it costs different amounts per volume of shoes to ship along any route (to pay for space on trains, or gas for cars, for example). All of this must be taken into account to create a realistic model that will minimize the total cost for shipping shoes from the factories to the stores. The image below shows all the locations, routes and their associated costs in this model. @@ -109,7 +109,7 @@ def supplyDemandRule(nn, model): model.supplyDemand = Constraint(model.places, rule=supplyDemandRule) }}} -What we did was just construct a few additional variables; this wasn't required but makes reading and understanding the code much, much simpler. The variable "amountIn" looks at each route to find ones that end in this location, then adds the amount on each of those routes together to determine how much flows to each location. The "amountOut" variable fucntions similarly for the flow out. Then we just create an "input" and "output" and ensure they're equal. Like in some of the previous constraints, we feed the set of places into the constraint as an arguement so it will index over that set, and thus this rule functions for all the places in our network. +What we did was just construct a few additional variables; this wasn't required but makes reading and understanding the code much, much simpler. The variable "amountIn" looks at each route to find ones that end in this location, then adds the amount on each of those routes together to determine how much flows to each location. The "amountOut" variable functions similarly for the flow out. Then we just create an "input" and "output" and ensure they're equal. Like in some of the previous constraints, we feed the set of places into the constraint as an arguement so it will index over that set, and thus this rule functions for all the places in our network. We have now finished creating the model. Save this as a .py file before continuing. diff --git a/examples/doc/samples/case_studies/network_flow/networkFlow1.py b/examples/doc/samples/case_studies/network_flow/networkFlow1.py index 00c5c35b37d..adfaab4476b 100644 --- a/examples/doc/samples/case_studies/network_flow/networkFlow1.py +++ b/examples/doc/samples/case_studies/network_flow/networkFlow1.py @@ -3,7 +3,7 @@ model = AbstractModel() model.places = Set() -model.routes = Set(within=model.places*model.places) +model.routes = Set(within=model.places * model.places) model.supply = Param(model.places) model.demand = Param(model.places) model.cost = Param(model.routes) @@ -12,24 +12,29 @@ model.amount = Var(model.routes, within=NonNegativeReals) model.excess = Var(model.places, within=NonNegativeReals) + def costRule(model): - return sum(model.cost[n]*model.amount[n] for n in model.routes) + return sum(model.cost[n] * model.amount[n] for n in model.routes) + model.costTotal = Objective(rule=costRule) + def loadRule(model, i, j): - return (model.minimum[i,j], model.amount[i,j], model.maximum[i,j]) + return (model.minimum[i, j], model.amount[i, j], model.maximum[i, j]) + model.loadOnRoad = Constraint(model.routes, rule=loadRule) -def supplyDemandRule(model, nn): - amountIn = sum(model.amount[i,j] for (i,j) in model.routes if j == nn) - amountOut = sum(model.amount[i,j] for (i,j) in model.routes if i == nn) +def supplyDemandRule(model, nn): + amountIn = sum(model.amount[i, j] for (i, j) in model.routes if j == nn) + amountOut = sum(model.amount[i, j] for (i, j) in model.routes if i == nn) - input = amountIn + model.supply[nn] + input = amountIn + model.supply[nn] output = amountOut + model.demand[nn] + model.excess[nn] return input == output + model.supplyDemand = Constraint(model.places, rule=supplyDemandRule) diff --git a/examples/doc/samples/case_studies/network_flow/networkFlow1.tex b/examples/doc/samples/case_studies/network_flow/networkFlow1.tex index 4c55e85e2ce..274320f917a 100644 --- a/examples/doc/samples/case_studies/network_flow/networkFlow1.tex +++ b/examples/doc/samples/case_studies/network_flow/networkFlow1.tex @@ -68,7 +68,7 @@ \section*{Network Flow 1} Boston Chicago .2; Network flow problems are a very important type of linear programming problem. The idea is we have a set of objects (stores, blogs, nerves in a human body), that can transmit something (goods, information, messages to and from the brain) to other objects in the set. The connections between objects need not be two-ways: just because object A transmits to object B does not mean B will transmit to A. A good example of a network flow problem is the Transportation Problem also found in the Pyomo directory. While a simplified example, it is essentially a problem of moving goods throughout the set of locations so that demand can be satisfied at a few locations. -In that vein, we do a compareable example to begin. We are looking at a company that produces grain. There are three mills in the area with a certain supply, and these mills produce goods that they store in two warehouses until they need to be shipped to the three factories with some value for their demand. The warehouses have no supply or demand. To make matters more complicated, two of the mills ship directly to the factories. Another factor to take into account is that each route has a maximum amount that can be shipped along it, which is a realistic condition (if being shipped by train, for example, only so many trains can run in a period of time). Worse, a company is being contracted to do the shipping, and they require a minimum amount along each route that we must also take into account. Finally, it costs a certain amount per volume of grain to ship along any route (to pay for space on trains, or gas for cars, for example). All of this must be taken into account to create a realistic model that will minimize the total cost for shipping grain from the mill to the factory. +In that vein, we do a comparable example to begin. We are looking at a company that produces grain. There are three mills in the area with a certain supply, and these mills produce goods that they store in two warehouses until they need to be shipped to the three factories with some value for their demand. The warehouses have no supply or demand. To make matters more complicated, two of the mills ship directly to the factories. Another factor to take into account is that each route has a maximum amount that can be shipped along it, which is a realistic condition (if being shipped by train, for example, only so many trains can run in a period of time). Worse, a company is being contracted to do the shipping, and they require a minimum amount along each route that we must also take into account. Finally, it costs a certain amount per volume of grain to ship along any route (to pay for space on trains, or gas for cars, for example). All of this must be taken into account to create a realistic model that will minimize the total cost for shipping grain from the mill to the factory. The image below shows all the routes in this model, along with their associated costs. @@ -172,7 +172,7 @@ \subsection*{Build the model} model.supplyDemand = Constraint(model.places, rule=supplyDemandRule) \end{verbatim} -What we did was just construct a few additional variables; this wasn't required but makes reading and understanding the code much, much simpler. The variable ``amountIn'' looks at each route to find ones that end in this location, then adds the amount on each of those routes together to determine how much flows to each location. The ``amountOut'' variable fucntions similarly for the flow out. Then we just create an ``input'' and ``output'' and ensure they're equal. As in some of the previous constraints, we feed the set of places into the constraint as an arguement so it will index over that set, and thus this rule functions for all the places in our network. +What we did was just construct a few additional variables; this wasn't required but makes reading and understanding the code much, much simpler. The variable ``amountIn'' looks at each route to find ones that end in this location, then adds the amount on each of those routes together to determine how much flows to each location. The ``amountOut'' variable functions similarly for the flow out. Then we just create an ``input'' and ``output'' and ensure they're equal. As in some of the previous constraints, we feed the set of places into the constraint as an arguement so it will index over that set, and thus this rule functions for all the places in our network. We have now finished creating the model. Save this as a .py file before continuing. diff --git a/examples/doc/samples/case_studies/rosen/Rosenbrock.py b/examples/doc/samples/case_studies/rosen/Rosenbrock.py index 02b9136b992..9677cea95dd 100644 --- a/examples/doc/samples/case_studies/rosen/Rosenbrock.py +++ b/examples/doc/samples/case_studies/rosen/Rosenbrock.py @@ -4,12 +4,15 @@ model = AbstractModel() # @:intro # @vars: -model.x = Var(initialize = 1.5) -model.y = Var(initialize = 1.5) +model.x = Var(initialize=1.5) +model.y = Var(initialize=1.5) + + # @:vars # @obj: def rosenbrock(amodel): - return (1.0-amodel.x)**2 \ - + 100.0*(amodel.y - amodel.x**2)**2 + return (1.0 - amodel.x) ** 2 + 100.0 * (amodel.y - amodel.x**2) ** 2 + + model.obj = Objective(rule=rosenbrock, sense=minimize) # @:obj diff --git a/examples/doc/samples/case_studies/transportation/README.txt b/examples/doc/samples/case_studies/transportation/README.txt index b0fde2a90ad..8087666b83b 100644 --- a/examples/doc/samples/case_studies/transportation/README.txt +++ b/examples/doc/samples/case_studies/transportation/README.txt @@ -13,7 +13,7 @@ from pyomo.core import * model = AbstractModel() }}} -The next step is to define our sets that are being acted upon. The warehouses have a supply and the stores have demand, so those two make sense as sets (with supply and demand parameters that rely on them). Each route will have an associated cost, so we could create a set of routes. However, it's more efficent (and easier to read) to make the costs a parameter over two set, the warehouses and the stores, so we will only need to define those two sets. +The next step is to define our sets that are being acted upon. The warehouses have a supply and the stores have demand, so those two make sense as sets (with supply and demand parameters that rely on them). Each route will have an associated cost, so we could create a set of routes. However, it's more efficient (and easier to read) to make the costs a parameter over two set, the warehouses and the stores, so we will only need to define those two sets. {{{ #!python @@ -29,7 +29,7 @@ model.supply = Param(model.warehouses) model.demand = Param(model.stores) model.costs = Param(model.warehouses, model.stores) }}} -Note once again that the costs are a parameter of two dimesnions since it takes in two arguments. +Note once again that the costs are a parameter of two dimensions since it takes in two arguments. We have one final addition to make: the variable to solve for. We're looking for the amount of goods to be sent from each warehouse to each store, so we create a variable over the set of warehouses and the set of stores that determines how much will be shipped from each warehouse to each store. This is slightly different than in the diet problem, where our variable was just over one set, but the difference is minor in the implementation. @@ -90,7 +90,7 @@ set warehouses := quick brown fox; set stores := jumps over the lazy dog; }}} -Now we need to define the supply and demand parameters on these two sets. Fortunately, they're done very similarly to each other with the main difference being which set the parameter is indexed over. Otherwise, the notation is simple: put "param: [name of parameter] :=" on the first line, than an elment of the set and its associated value on each subsequent line. Don't forget to end with a semi-colon. +Now we need to define the supply and demand parameters on these two sets. Fortunately, they're done very similarly to each other with the main difference being which set the parameter is indexed over. Otherwise, the notation is simple: put "param: [name of parameter] :=" on the first line, than an element of the set and its associated value on each subsequent line. Don't forget to end with a semi-colon. {{{ param: supply := @@ -121,7 +121,7 @@ Now that we're done inputting the data, save this file as a .dat file. Once tha == Solution == -On Linux, through the comand line input "pyomo [filename].py [filename].dat". If using the files from the Pyomo page, for example, input "pyomo transportation.py transportation.dat" and let the program run. +On Linux, through the command line input "pyomo [filename].py [filename].dat". If using the files from the Pyomo page, for example, input "pyomo transportation.py transportation.dat" and let the program run. Pretty quickly the program will crash saying the model is infeasible. What happened? Look back at the data used, specifically the supply and demand data. In total, we have 11000 supply, but we have 11500 demand--our demand exceeds our supply. Thus, it's impossible for this model to work: we can't supply all of the stores to meet their demand. This means the model is infeasible. diff --git a/examples/doc/samples/case_studies/transportation/transportation.py b/examples/doc/samples/case_studies/transportation/transportation.py index 5630aa086b8..26fcb5f0b66 100644 --- a/examples/doc/samples/case_studies/transportation/transportation.py +++ b/examples/doc/samples/case_studies/transportation/transportation.py @@ -7,23 +7,32 @@ model.supply = Param(model.warehouses) model.demand = Param(model.stores) model.costs = Param(model.warehouses, model.stores) -model.amounts = Var(model.warehouses, model.stores, within = NonNegativeReals) +model.amounts = Var(model.warehouses, model.stores, within=NonNegativeReals) + def costRule(model): return sum( - model.costs[n,i] * model.amounts[n,i] + model.costs[n, i] * model.amounts[n, i] for n in model.warehouses for i in model.stores ) -model.cost=Objective(rule=costRule) + +model.cost = Objective(rule=costRule) + def minDemandRule(model, store): return sum(model.amounts[i, store] for i in model.warehouses) >= model.demand[store] + model.demandConstraint = Constraint(model.stores, rule=minDemandRule) + def maxSupplyRule(model, warehouse): - return sum(model.amounts[warehouse, j] for j in model.stores) <= model.supply[warehouse] + return ( + sum(model.amounts[warehouse, j] for j in model.stores) + <= model.supply[warehouse] + ) + model.supplyConstraint = Constraint(model.warehouses, rule=maxSupplyRule) diff --git a/examples/doc/samples/case_studies/transportation/transportationProblem.tex b/examples/doc/samples/case_studies/transportation/transportationProblem.tex index 83dbfe38c4d..ca3caf2d751 100644 --- a/examples/doc/samples/case_studies/transportation/transportationProblem.tex +++ b/examples/doc/samples/case_studies/transportation/transportationProblem.tex @@ -15,7 +15,7 @@ \subsection*{Build the model} model = AbstractModel() \end{verbatim} -The next step is to define our sets that are being acted upon. The warehouses have a supply and the stores have demand, so those two make sense as sets (with supply and demand parameters that rely on them). Each route will have an associated cost, so we could create a set of routes. However, it's more efficent (and easier to read) to make the costs a parameter over two set, the warehouses and the stores, so we will only need to define those two sets. +The next step is to define our sets that are being acted upon. The warehouses have a supply and the stores have demand, so those two make sense as sets (with supply and demand parameters that rely on them). Each route will have an associated cost, so we could create a set of routes. However, it's more efficient (and easier to read) to make the costs a parameter over two set, the warehouses and the stores, so we will only need to define those two sets. \begin{verbatim} model.warehouses = Set() @@ -31,7 +31,7 @@ \subsection*{Build the model} \end{verbatim} \noindent -Note once again that the costs are a parameter of two dimesnions since it takes in two arguments. +Note once again that the costs are a parameter of two dimensions since it takes in two arguments. We have one final addition to make: the variable to solve for. We're looking for the amount of goods to be sent from each warehouse to each store, so we create a variable over the set of warehouses and the set of stores that determines how much will be shipped from each warehouse to each store. This is slightly different than in the diet problem, where our variable was just over one set, but the difference is minor in the implementation. @@ -93,7 +93,7 @@ \subsection*{Data Entry} set stores := jumps over the lazy dog; \end{verbatim} -Now we need to define the supply and demand parameters on these two sets. Fortunately, they're done very similarly to each other with the main difference being which set the parameter is indexed over. Otherwise, the notation is simple: put ``param: [name of parameter] :='' on the first line, than an elment of the set and its associated value on each subsequent line. Don't forget to end with a semi-colon. +Now we need to define the supply and demand parameters on these two sets. Fortunately, they're done very similarly to each other with the main difference being which set the parameter is indexed over. Otherwise, the notation is simple: put ``param: [name of parameter] :='' on the first line, than an element of the set and its associated value on each subsequent line. Don't forget to end with a semi-colon. \begin{verbatim} param: supply := @@ -126,7 +126,7 @@ \subsection*{Data Entry} \subsection*{Solution} -On Linux, through the comand line input ``pyomo [filename].py [filename].dat''. If using the files from the Pyomo page, for example, input ``pyomo transportation.py transportation.dat'' and let the program run. +On Linux, through the command line input ``pyomo [filename].py [filename].dat''. If using the files from the Pyomo page, for example, input ``pyomo transportation.py transportation.dat'' and let the program run. Pretty quickly the program will crash saying the model is infeasible. What happened? Look back at the data used, specifically the supply and demand data. In total, we have $11000$ supply, but we have $11500$ demand---our demand exceeds our supply. Thus, it's impossible for this model to work: we can't supply all of the stores to meet their demand. This means the model is infeasible. diff --git a/examples/doc/samples/comparisons/cutstock/README.txt b/examples/doc/samples/comparisons/cutstock/README.txt index b73b579beee..efd34354cd1 100644 --- a/examples/doc/samples/comparisons/cutstock/README.txt +++ b/examples/doc/samples/comparisons/cutstock/README.txt @@ -11,7 +11,7 @@ interfaces. == Model Files == -Auxilliary files: +Auxiliary files: * [source:pyomo.data.samples/trunk/pyomo/data/samples/comparisons/cutstock/cutstock_util.py cutstock_util.py] - Utilities functions used in these examples Model files: diff --git a/examples/doc/samples/comparisons/cutstock/cutstock_cplex.py b/examples/doc/samples/comparisons/cutstock/cutstock_cplex.py index 3a86d8d5102..796c39810f8 100644 --- a/examples/doc/samples/comparisons/cutstock/cutstock_cplex.py +++ b/examples/doc/samples/comparisons/cutstock/cutstock_cplex.py @@ -1,5 +1,5 @@ import cplex -from cutstock_util import* +from cutstock_util import * from cplex.exceptions import CplexSolverError # Reading in Data using the cutstock_util @@ -13,10 +13,10 @@ CutsInPattern = getCutsInPattern() ######################################## -indA = range(patcount+1) +indA = range(patcount + 1) indA[0] = "SheetsCut" for i in range(patcount): - indA[i+1] = Patterns[i] + indA[i + 1] = Patterns[i] valA = [1] + [-1 for i in range(patcount)] @@ -27,8 +27,8 @@ for p in range(patcount): if CutsInPattern[c][p] >= 1: count += 1 - indP[c] = range(count+1) - valP[c] = range(count+1) + indP[c] = range(count + 1) + valP[c] = range(count + 1) count = 0 for p in range(patcount): if CutsInPattern[c][p] >= 1: @@ -41,27 +41,41 @@ cpx = cplex.Cplex() # Variable definition -cpx.variables.add(names = ["SheetsCut"], lb = [0], ub = [cplex.infinity]) -cpx.variables.add(names = ["TotalCost"], lb = [0], ub = [cplex.infinity], obj = [1]) -cpx.variables.add(names = Patterns) -cpx.variables.add(names = Cuts) +cpx.variables.add(names=["SheetsCut"], lb=[0], ub=[cplex.infinity]) +cpx.variables.add(names=["TotalCost"], lb=[0], ub=[cplex.infinity], obj=[1]) +cpx.variables.add(names=Patterns) +cpx.variables.add(names=Cuts) -#objective +# objective cpx.objective.set_sense(cpx.objective.sense.minimize) -#Constraints -cpx.linear_constraints.add(lin_expr = [cplex.SparsePair(ind = ["SheetsCut", "TotalCost"], val = [-PriceSheet, 1.0])], senses = ["E"], rhs = [0]) -cpx.linear_constraints.add(lin_expr = [cplex.SparsePair(ind = ["SheetsCut"],val = [1.0])], senses = ["L"], rhs = [SheetsAvail]) -cpx.linear_constraints.add(lin_expr = [cplex.SparsePair(ind = indA,val = valA)], senses = ["E"], rhs = [0]) +# Constraints +cpx.linear_constraints.add( + lin_expr=[cplex.SparsePair(ind=["SheetsCut", "TotalCost"], val=[-PriceSheet, 1.0])], + senses=["E"], + rhs=[0], +) +cpx.linear_constraints.add( + lin_expr=[cplex.SparsePair(ind=["SheetsCut"], val=[1.0])], + senses=["L"], + rhs=[SheetsAvail], +) +cpx.linear_constraints.add( + lin_expr=[cplex.SparsePair(ind=indA, val=valA)], senses=["E"], rhs=[0] +) for c in range(cutcount): - cpx.linear_constraints.add(lin_expr = [cplex.SparsePair(ind = indP[c],val = valP[c])], senses = ["E"], rhs = [CutDemand[c]]) + cpx.linear_constraints.add( + lin_expr=[cplex.SparsePair(ind=indP[c], val=valP[c])], + senses=["E"], + rhs=[CutDemand[c]], + ) -#cpx.write("CutStock.lp") +# cpx.write("CutStock.lp") cpx.solve() numcols = cpx.variables.get_num() -x = cpx.solution.get_values() -print cpx.solution.status[cpx.solution.get_status()] -print "Objective value = ", cpx.solution.get_objective_value() +x = cpx.solution.get_values() +print(cpx.solution.status[cpx.solution.get_status()]) +print("Objective value = ", cpx.solution.get_objective_value()) for j in range(numcols): if x[j] >= 1: - print "Var:", j ,"Value=", x[j] + print("Var:", j, "Value=", x[j]) diff --git a/examples/doc/samples/comparisons/cutstock/cutstock_grb.py b/examples/doc/samples/comparisons/cutstock/cutstock_grb.py index 2ca93b66a9d..4fa4556fc96 100644 --- a/examples/doc/samples/comparisons/cutstock/cutstock_grb.py +++ b/examples/doc/samples/comparisons/cutstock/cutstock_grb.py @@ -1,5 +1,5 @@ from gurobipy import * -from cutstock_util import* +from cutstock_util import * # Reading in Data using the cutstock_util cutcount = getCutCount() @@ -14,9 +14,9 @@ m = Model("CutStock") -#Defining Variables -SheetsCut = m.addVar(0, GRB.INFINITY, 0, GRB.CONTINUOUS,"SheetsCut") -TotalCost = m.addVar(0, GRB.INFINITY, 1, GRB.CONTINUOUS,"TotCost") +# Defining Variables +SheetsCut = m.addVar(0, GRB.INFINITY, 0, GRB.CONTINUOUS, "SheetsCut") +TotalCost = m.addVar(0, GRB.INFINITY, 1, GRB.CONTINUOUS, "TotCost") PatternCount = [] for i in range(patcount): @@ -28,30 +28,30 @@ newvar = m.addVar(0, GRB.INFINITY, 0, GRB.CONTINUOUS, Cuts[j]) ExcessCuts += [newvar] -#Objective Sense +# Objective Sense m.ModelSense = 1 -#Update model to integrate variables +# Update model to integrate variables m.update() -#Defining Constraints -m.addConstr(LinExpr(PriceSheet, SheetsCut), GRB.EQUAL, TotalCost,"TotCostCalc") -m.addConstr(LinExpr(1, SheetsCut), GRB.LESS_EQUAL, SheetsAvail,"RawAvail") +# Defining Constraints +m.addConstr(LinExpr(PriceSheet, SheetsCut), GRB.EQUAL, TotalCost, "TotCostCalc") +m.addConstr(LinExpr(1, SheetsCut), GRB.LESS_EQUAL, SheetsAvail, "RawAvail") sheetsB = LinExpr() for i in range(patcount): sheetsB.addTerms(1, PatternCount[i]) -m.addConstr(sheetsB, GRB.EQUAL, SheetsCut,"Sheets") +m.addConstr(sheetsB, GRB.EQUAL, SheetsCut, "Sheets") for c in range(cutcount): cutReqB = LinExpr() - cutReqB.addTerms(-1,ExcessCuts[c]) + cutReqB.addTerms(-1, ExcessCuts[c]) for p in range(patcount): - cutReqB.addTerms(CutsInPattern[c][p],PatternCount[p]) - m.addConstr(cutReqB, GRB.EQUAL, CutDemand[c],"CutReq_") + cutReqB.addTerms(CutsInPattern[c][p], PatternCount[p]) + m.addConstr(cutReqB, GRB.EQUAL, CutDemand[c], "CutReq_") m.update() -#m.write("CutStock.lp") +# m.write("CutStock.lp") m.optimize() -print m.ObjVal +print(m.ObjVal) diff --git a/examples/doc/samples/comparisons/cutstock/cutstock_lpsolve.py b/examples/doc/samples/comparisons/cutstock/cutstock_lpsolve.py index a0a1c6de6df..658ee006c30 100644 --- a/examples/doc/samples/comparisons/cutstock/cutstock_lpsolve.py +++ b/examples/doc/samples/comparisons/cutstock/cutstock_lpsolve.py @@ -1,5 +1,5 @@ from lpsolve55 import * -from cutstock_util import* +from cutstock_util import * # Reading in Data using the cutstock_util cutcount = getCutCount() @@ -23,40 +23,40 @@ else: ObjCoeff[i] = 0 -#Arrays for constraints +# Arrays for constraints TotCostB = range(varcount) for i in TotCostB: TotCostB[i] = 0 TotCostB[0] = -PriceSheet -TotCostB[1] = 1 +TotCostB[1] = 1 RawAvailB = range(varcount) for i in RawAvailB: RawAvailB[i] = 0 RawAvailB[0] = 1 - + SheetsB = range(varcount) for i in SheetsB: SheetsB[i] = 0 SheetsB[0] = 1 for i in range(patcount): - SheetsB[i+PatCountStart] = -1 + SheetsB[i + PatCountStart] = -1 CutReqB = [[0 for col in range(varcount)] for row in range(cutcount)] for i in range(cutcount): for j in range(patcount): - CutReqB[i][j+PatCountStart] = CutsInPattern[i][j] - CutReqB[i][patcount+PatCountStart+i] = -1 + CutReqB[i][j + PatCountStart] = CutsInPattern[i][j] + CutReqB[i][patcount + PatCountStart + i] = -1 ################################################### - + lp = lpsolve('make_lp', 0, varcount) ret = lpsolve('set_lp_name', lp, 'CutStock') lpsolve('set_verbose', 'CutStock', IMPORTANT) -#Define Objective +# Define Objective ret = lpsolve('set_obj_fn', 'CutStock', ObjCoeff) -#Define Constraints +# Define Constraints ret = lpsolve('add_constraint', 'CutStock', TotCostB, EQ, 0) ret = lpsolve('add_constraint', 'CutStock', RawAvailB, LE, SheetsAvail) ret = lpsolve('add_constraint', 'CutStock', SheetsB, EQ, 0) @@ -64,13 +64,9 @@ ret = lpsolve('add_constraint', 'CutStock', CutReqB[i], EQ, CutDemand[i]) lpsolve('solve', 'CutStock') -#ret = lpsolve('write_lp', 'CutStock', 'cutstock.lp') +# ret = lpsolve('write_lp', 'CutStock', 'cutstock.lp') lpsolve('solve', 'CutStock') -statuscode = lpsolve('get_status', 'CutStock') -print lpsolve('get_statustext', 'CutStock', statuscode) -print lpsolve('get_objective', 'CutStock') -print lpsolve('get_variables', 'CutStock')[0] - - - - +statuscode = lpsolve('get_status', 'CutStock') +print(lpsolve('get_statustext', 'CutStock', statuscode)) +print(lpsolve('get_objective', 'CutStock')) +print(lpsolve('get_variables', 'CutStock')[0]) diff --git a/examples/doc/samples/comparisons/cutstock/cutstock_pulpor.py b/examples/doc/samples/comparisons/cutstock/cutstock_pulpor.py index ecf7f9b7aec..2f2506ba3d6 100644 --- a/examples/doc/samples/comparisons/cutstock/cutstock_pulpor.py +++ b/examples/doc/samples/comparisons/cutstock/cutstock_pulpor.py @@ -1,5 +1,5 @@ from pulp import * -from cutstock_util import* +from cutstock_util import * # Reading in Data using the cutstock_util cutcount = getCutCount() @@ -12,33 +12,35 @@ CutsInPattern = getCutsInPattern() ######################################## -#Dictionary for PulpOR -CutsInPattern = makeDict([Cuts,Patterns],CutsInPattern) -CutDemand = makeDict([Cuts],CutDemand) +# Dictionary for PulpOR +CutsInPattern = makeDict([Cuts, Patterns], CutsInPattern) +CutDemand = makeDict([Cuts], CutDemand) prob = LpProblem("CutStock Problem", LpMinimize) -#Defining Variables -SheetsCut = LpVariable("SheetsCut",0) -TotalCost = LpVariable("TotalCost",0) -PatternCount = LpVariable.dicts("PatternCount",Patterns, lowBound = 0) -ExcessCuts = LpVariable.dicts("ExcessCuts",Cuts, lowBound = 0) +# Defining Variables +SheetsCut = LpVariable("SheetsCut", 0) +TotalCost = LpVariable("TotalCost", 0) +PatternCount = LpVariable.dicts("PatternCount", Patterns, lowBound=0) +ExcessCuts = LpVariable.dicts("ExcessCuts", Cuts, lowBound=0) -#objective -prob += TotalCost,"" +# objective +prob += TotalCost, "" -#Constraints -prob += TotalCost == PriceSheet*SheetsCut,"TotCost" -prob += SheetsCut <= SheetsAvail,"RawAvail" +# Constraints +prob += TotalCost == PriceSheet * SheetsCut, "TotCost" +prob += SheetsCut <= SheetsAvail, "RawAvail" prob += PatternCount == SheetsCut, "Sheets" for c in Cuts: - prob += lpSum([CutsInPattern[c][p]*PatternCount[p] for p in Patterns]) == CutDemand[c] + ExcessCuts[c],"CutReq" + str(c) + prob += lpSum( + [CutsInPattern[c][p] * PatternCount[p] for p in Patterns] + ) == CutDemand[c] + ExcessCuts[c], "CutReq" + str(c) -#prob.writeLP("CutStock.lp") +# prob.writeLP("CutStock.lp") prob.solve() -print "Status:", LpStatus[prob.status] -print "Minimum total cost:", prob.objective.value() +print("Status:", LpStatus[prob.status]) +print("Minimum total cost:", prob.objective.value()) for v in prob.variables(): if v.varValue > 0: - print v.name, "=", v.varValue + print(v.name, "=", v.varValue) diff --git a/examples/doc/samples/comparisons/cutstock/cutstock_pyomo.py b/examples/doc/samples/comparisons/cutstock/cutstock_pyomo.py index 2c22b00cf1c..a67ebdd0675 100644 --- a/examples/doc/samples/comparisons/cutstock/cutstock_pyomo.py +++ b/examples/doc/samples/comparisons/cutstock/cutstock_pyomo.py @@ -1,6 +1,6 @@ from pyomo.core import * import pyomo.opt -from cutstock_util import* +from cutstock_util import * # Reading in Data using the cutstock_util cutcount = getCutCount() @@ -12,7 +12,7 @@ CutDemand = getCutDemand() CutsInPattern = getCutsInPattern() ######################################## -#CutsInPattern = makeDict([Cuts,Patterns],CutsInPattern) +# CutsInPattern = makeDict([Cuts,Patterns],CutsInPattern) tmp = {} for i in range(len(Cuts)): tmp[Cuts[i]] = {} @@ -20,7 +20,7 @@ tmp[Cuts[i]][Patterns[j]] = CutsInPattern[i][j] CutsInPattern = tmp ######################################## -#CutDemand = makeDict([Cuts],CutDemand) +# CutDemand = makeDict([Cuts],CutDemand) tmp = {} for i in range(len(Cuts)): tmp[Cuts[i]] = CutDemand[i] @@ -28,31 +28,35 @@ model = ConcreteModel(name="CutStock Problem") -#Defining Variables +# Defining Variables model.SheetsCut = Var() model.TotalCost = Var() -model.PatternCount = Var(Patterns, bounds=(0,None)) -model.ExcessCuts = Var(Cuts, bounds=(0,None)) +model.PatternCount = Var(Patterns, bounds=(0, None)) +model.ExcessCuts = Var(Cuts, bounds=(0, None)) -#objective -model.objective = Objective(expr=1.0*model.TotalCost) +# objective +model.objective = Objective(expr=1.0 * model.TotalCost) -#Constraints -model.TotCost = Constraint(expr = model.TotalCost == PriceSheet* model.SheetsCut) -model.RawAvail = Constraint(expr = model.SheetsCut <= SheetsAvail) -model.Sheets = Constraint(expr = summation(model.PatternCount) == model.SheetsCut) +# Constraints +model.TotCost = Constraint(expr=model.TotalCost == PriceSheet * model.SheetsCut) +model.RawAvail = Constraint(expr=model.SheetsCut <= SheetsAvail) +model.Sheets = Constraint(expr=summation(model.PatternCount) == model.SheetsCut) model.CutReq = Constraint(Cuts) for c in Cuts: - model.CutReq.add(c, expr=sum(CutsInPattern[c][p]*model.PatternCount[p] for p in Patterns) == CutDemand[c] + model.ExcessCuts[c]) + model.CutReq.add( + c, + expr=sum(CutsInPattern[c][p] * model.PatternCount[p] for p in Patterns) + == CutDemand[c] + model.ExcessCuts[c], + ) instance = model.create() opt = pyomo.opt.SolverFactory('glpk') results = opt.solve(instance) instance.load(results) -print "Status:", results.solver.status -print "Minimum total cost:", value(instance.objective) +print("Status:", results.solver.status) +print("Minimum total cost:", value(instance.objective)) for v in instance.variables(): var = instance.variable(v) if value(var) > 0: - print v, "=", value(var) + print(v, "=", value(var)) diff --git a/examples/doc/samples/comparisons/cutstock/cutstock_util.py b/examples/doc/samples/comparisons/cutstock/cutstock_util.py index 46c6c63f92a..1cd8c61922f 100644 --- a/examples/doc/samples/comparisons/cutstock/cutstock_util.py +++ b/examples/doc/samples/comparisons/cutstock/cutstock_util.py @@ -5,7 +5,8 @@ def getCutCount(): cutCount += 1 fout1.close() return cutCount - + + def getPatCount(): patCount = 0 fout2 = open('Waste.csv', 'r') @@ -14,28 +15,33 @@ def getPatCount(): fout2.close() return patCount + def getPriceSheetData(): return 28 - + + def getSheetsAvail(): return 2000 - + + def getCuts(): cutcount = getCutCount() Cuts = range(cutcount) for i in range(cutcount): - nstr = str(i+1) + nstr = str(i + 1) Cuts[i] = 'w' + nstr return Cuts + def getPatterns(): patcount = getPatCount() Patterns = range(patcount) for j in range(patcount): - pstr = str(j+1) + pstr = str(j + 1) Patterns[j] = 'P' + pstr - return Patterns - + return Patterns + + def getCutDemand(): i = 0 cutcount = getCutCount() @@ -48,6 +54,7 @@ def getCutDemand(): fout1.close() return CutDemand + def getCutsInPattern(): cutcount = getCutCount() patcount = getPatCount() @@ -59,12 +66,12 @@ def getCutsInPattern(): pstr = lstr[0] wstr = lstr[1] cstr = lstr[2] - pstr = pstr.replace("P","") - wstr = wstr.replace("w","") + pstr = pstr.replace("P", "") + wstr = wstr.replace("w", "") cstr = cstr.rstrip("\n") p = int(pstr) w = int(wstr) c = int(cstr) - CutsInPattern[w-1][p-1] = c + CutsInPattern[w - 1][p - 1] = c fout2.close() return CutsInPattern diff --git a/examples/doc/samples/comparisons/sched/pyomo/sched.py b/examples/doc/samples/comparisons/sched/pyomo/sched.py index 7d1a077ac62..627bc083fbe 100644 --- a/examples/doc/samples/comparisons/sched/pyomo/sched.py +++ b/examples/doc/samples/comparisons/sched/pyomo/sched.py @@ -11,42 +11,71 @@ model.ntasks = Param(model.TASKS, within=NonNegativeReals) model.minp = Param(model.TASKS, within=NonNegativeReals) + def maxp_valid(value, i, Model): return Model.maxp[i] >= Model.minp[i] + + model.maxp = Param(model.TASKS, validate=maxp_valid) model.x = Var(model.TASKS, model.PEOPLE, model.SLOTS, within=Binary) model.xts = Var(model.TASKS, model.SLOTS, within=Binary) model.xtp = Var(model.TASKS, model.PEOPLE, within=Binary) + def rule1_rule(t, s, Model): - return sum(Model.x[t,p,s] for p in Model.PEOPLE) >= Model.xts[t,s] + return sum(Model.x[t, p, s] for p in Model.PEOPLE) >= Model.xts[t, s] + + model.rule1 = Constraint(model.TASKS, model.SLOTS) + def rule2_rule(t, p, s, Model): - return Model.x[t,p,s] <= Model.xts[t,s] + return Model.x[t, p, s] <= Model.xts[t, s] + + model.rule2 = Constraint(model.TASKS, model.PEOPLE, model.SLOTS) + def rule3_rule(t, p, Model): - return sum(Model.x[t,p,s] for s in Model.SLOTS) == Model.xtp[t,p] + return sum(Model.x[t, p, s] for s in Model.SLOTS) == Model.xtp[t, p] + + model.rule3 = Constraint(model.TASKS, model.PEOPLE) + def rule4_rule(t, Model): - return sum(Model.xts[t,s] for s in Model.SLOTS) == Model.ntasks[t] + return sum(Model.xts[t, s] for s in Model.SLOTS) == Model.ntasks[t] + + model.rule4 = Constraint(model.TASKS) + def rule5_rule(t, Model): - return Model.minp[t] <= sum(Model.xtp[t,p] for p in Model.PEOPLE) <= Model.maxp[t] + return Model.minp[t] <= sum(Model.xtp[t, p] for p in Model.PEOPLE) <= Model.maxp[t] + + model.rule5 = Constraint(model.TASKS) + def rule6_rule(s, Model): - return sum(Model.xts[t,s] for t in Model.TASKS) <= Model.nrooms[s] + return sum(Model.xts[t, s] for t in Model.TASKS) <= Model.nrooms[s] + + model.rule6 = Constraint(model.SLOTS) + def rule7_rule(p, s, Model): - return sum(Model.x[t,p,s] for t in Model.TASKS) == 1 + return sum(Model.x[t, p, s] for t in Model.TASKS) == 1 + + model.rule7 = Constraint(model.PEOPLE, model.SLOTS) + def z_rule(Model): - return sum(Model.amt[t,p] * Model.xtp[t,p] for t in Model.TASKS for p in Model.PEOPLE) + return sum( + Model.amt[t, p] * Model.xtp[t, p] for t in Model.TASKS for p in Model.PEOPLE + ) + + model.z = Objective(sense=maximize) diff --git a/examples/doc/samples/scripts/__init__.py b/examples/doc/samples/scripts/__init__.py index 6ac6c575ea9..3115f06ef53 100644 --- a/examples/doc/samples/scripts/__init__.py +++ b/examples/doc/samples/scripts/__init__.py @@ -1 +1 @@ -# Dummy file for nose tests +# Dummy file for pytest diff --git a/examples/doc/samples/scripts/s1/knapsack.py b/examples/doc/samples/scripts/s1/knapsack.py index b6066f8b5ce..642e0faaaed 100644 --- a/examples/doc/samples/scripts/s1/knapsack.py +++ b/examples/doc/samples/scripts/s1/knapsack.py @@ -13,10 +13,16 @@ model.x = Var(model.ITEMS, within=Binary) + def value_rule(model): - return sum(model.v[i]*model.x[i] for i in model.ITEMS) + return sum(model.v[i] * model.x[i] for i in model.ITEMS) + + model.value = Objective(sense=maximize, rule=value_rule) + def weight_rule(model): - return sum(model.w[i]*model.x[i] for i in model.ITEMS) <= model.limit + return sum(model.w[i] * model.x[i] for i in model.ITEMS) <= model.limit + + model.weight = Constraint(rule=weight_rule) diff --git a/examples/doc/samples/scripts/s1/script.out b/examples/doc/samples/scripts/s1/script.out index 330dd3b9567..f6abc26188a 100644 --- a/examples/doc/samples/scripts/s1/script.out +++ b/examples/doc/samples/scripts/s1/script.out @@ -9,9 +9,9 @@ Problem: Lower bound: 25.0 Upper bound: 25.0 Number of objectives: 1 - Number of constraints: 2 - Number of variables: 5 - Number of nonzeros: 5 + Number of constraints: 1 + Number of variables: 4 + Number of nonzeros: 4 Sense: maximize # ---------------------------------------------------------- # Solver Information diff --git a/examples/doc/samples/scripts/s1/script.py b/examples/doc/samples/scripts/s1/script.py index 137669c2062..02b6b406922 100644 --- a/examples/doc/samples/scripts/s1/script.py +++ b/examples/doc/samples/scripts/s1/script.py @@ -1,9 +1,11 @@ from pyomo.core import * import pyomo.opt import pyomo.environ + # # Import model import knapsack + # # Create the model instance instance = knapsack.model.create_instance("knapsack.dat") @@ -12,7 +14,7 @@ opt = pyomo.opt.SolverFactory("glpk") # # Optimize -results = opt.solve(instance,symbolic_solver_labels=True) +results = opt.solve(instance, symbolic_solver_labels=True) instance.solutions.store_to(results) # # Write the output diff --git a/examples/doc/samples/scripts/s2/knapsack.py b/examples/doc/samples/scripts/s2/knapsack.py index 268b12aa331..a7d693f5d35 100644 --- a/examples/doc/samples/scripts/s2/knapsack.py +++ b/examples/doc/samples/scripts/s2/knapsack.py @@ -13,16 +13,25 @@ model.x = Var(model.ITEMS, within=PercentFraction) + def value_rule(model): - return sum(model.v[i]*model.x[i] for i in model.ITEMS) + return sum(model.v[i] * model.x[i] for i in model.ITEMS) + + model.value = Objective(sense=maximize, rule=value_rule) + def weight_rule(model): - return sum(model.w[i]*model.x[i] for i in model.ITEMS) <= model.limit + return sum(model.w[i] * model.x[i] for i in model.ITEMS) <= model.limit + + model.weight = Constraint(rule=weight_rule) + # This constraint is not active, to illustrate how zero dual values are # handled by the pyomo command. def W_rule(model): - return sum(model.w[i]*model.x[i] for i in model.ITEMS) <= 2*model.limit + return sum(model.w[i] * model.x[i] for i in model.ITEMS) <= 2 * model.limit + + model.W = Constraint(rule=W_rule) diff --git a/examples/doc/samples/scripts/s2/script.py b/examples/doc/samples/scripts/s2/script.py index c60743ef5b9..88de1dec680 100644 --- a/examples/doc/samples/scripts/s2/script.py +++ b/examples/doc/samples/scripts/s2/script.py @@ -1,9 +1,11 @@ from pyomo.core import * import pyomo.opt import pyomo.environ + # # Import model import knapsack + # # Create the model instance instance = knapsack.model.create_instance("knapsack.dat") @@ -21,19 +23,19 @@ # Print the results i = 0 for sol in results.solution: - print("Solution "+str(i)) + print("Solution " + str(i)) # print(sorted(sol.variable.keys())) for var in sorted(sol.variable.keys()): - print(" Variable "+str(var)) - print(" "+str(sol.variable[var]['Value'])) - #for key in sorted(sol.variable[var].keys()): - #print(' '+str(key)+' '+str(sol.variable[var][key])) + print(" Variable " + str(var)) + print(" " + str(sol.variable[var]['Value'])) + # for key in sorted(sol.variable[var].keys()): + # print(' '+str(key)+' '+str(sol.variable[var][key])) # for con in sorted(sol.constraint.keys()): - print(" Constraint "+str(con)) + print(" Constraint " + str(con)) for key in sorted(sol.constraint[con].keys()): - print(' '+str(key)+' '+str(sol.constraint[con][key])) + print(' ' + str(key) + ' ' + str(sol.constraint[con][key])) # i += 1 # @@ -41,4 +43,4 @@ print("") print("Dual Values") for con in sorted(results.solution(0).constraint.keys()): - print(str(con)+' '+str(results.solution(0).constraint[con]["Dual"])) + print(str(con) + ' ' + str(results.solution(0).constraint[con]["Dual"])) diff --git a/examples/doc/samples/scripts/test_scripts.py b/examples/doc/samples/scripts/test_scripts.py index ee2e0d23f0a..ca0c8a7cc4e 100644 --- a/examples/doc/samples/scripts/test_scripts.py +++ b/examples/doc/samples/scripts/test_scripts.py @@ -14,23 +14,25 @@ import sys import subprocess from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.core import pyomo.common.unittest as unittest try: import yaml - yaml_available=True + + yaml_available = True except ImportError: - yaml_available=False + yaml_available = False @unittest.skipIf(not yaml_available, "PyYaml is not installed") -@unittest.skipIf(not pyomo.common.Executable("glpsol"), - "The 'glpsol' executable is not available") +@unittest.skipIf( + not pyomo.common.Executable("glpsol"), "The 'glpsol' executable is not available" +) class Test(unittest.TestCase): - def setUp(self): self.cwd = currdir os.chdir(self.cwd) @@ -39,23 +41,24 @@ def tearDown(self): os.chdir(self.cwd) def run_script(self, test, yaml_available=False): - cwd = self.cwd+os.sep+test+os.sep + cwd = self.cwd + os.sep + test + os.sep os.chdir(cwd) - with open(cwd+os.sep+'script.log', 'w') as f: - subprocess.run([sys.executable, 'script.py'], - stdout=f, stderr=f, cwd=cwd) + with open(cwd + os.sep + 'script.log', 'w') as f: + subprocess.run([sys.executable, 'script.py'], stdout=f, stderr=f, cwd=cwd) if yaml_available: - with open(cwd+'script.log', 'r') as f1: - with open(cwd+'script.out', 'r') as f2: + with open(cwd + 'script.log', 'r') as f1: + with open(cwd + 'script.out', 'r') as f2: baseline = yaml.full_load(f1) output = yaml.full_load(f2) - self.assertStructuredAlmostEqual(output, baseline, - allow_second_superset=True) + self.assertStructuredAlmostEqual( + output, baseline, allow_second_superset=True + ) else: _log = os.path.join(cwd, 'script.log') _out = os.path.join(cwd, 'script.out') - self.assertTrue(cmp(_log, _out), - msg="Files %s and %s differ" % (_log, _out)) + self.assertTrue( + cmp(_log, _out), msg="Files %s and %s differ" % (_log, _out) + ) def test_s1(self): self.run_script('s1', True) diff --git a/examples/doc/samples/update.py b/examples/doc/samples/update.py index 1f824a4421d..9eae2f4b694 100644 --- a/examples/doc/samples/update.py +++ b/examples/doc/samples/update.py @@ -7,13 +7,14 @@ import glob import os.path + def get_title(fname): INPUT = open(fname, 'r') for line in INPUT: sline = line.strip() - #print sline - #print sline[0:2] - #print '.%s.' % sline[-2:] + # print sline + # print sline[0:2] + # print '.%s.' % sline[-2:] if sline[0:2] == '= ' and sline[-2:] == ' =': tmp = sline[2:-2] tmp.strip() @@ -22,7 +23,7 @@ def get_title(fname): OUTPUT = open('TRAC.txt', 'w') -print >>OUTPUT, """{{{ +print >> OUTPUT, """{{{ #!comment ; ; Trac examples generated automatically by the update.py script @@ -36,41 +37,50 @@ def get_title(fname): """ -print >>OUTPUT, '== Case Studies ==' -print >>OUTPUT, '' -print >>OUTPUT, """The following links provide case studies that illustrate the use of Pyomo to formulate and analyze optimization models.""" -print >>OUTPUT, '' +print >> OUTPUT, '== Case Studies ==' +print >> OUTPUT, '' +print >> OUTPUT, """The following links provide case studies that illustrate the use of Pyomo to formulate and analyze optimization models.""" +print >> OUTPUT, '' for Dir in glob.glob('case_studies/*'): dir = os.path.basename(Dir) fname = 'case_studies/%s/README.txt' % dir if os.path.exists(fname): - print >>OUTPUT, " * [wiki:Documentation/PyomoGallery/CaseStudies/%s %s]" % (dir, get_title(fname)) - print >>OUTPUT, "{{{\n#!comment\n[[Include(source:pyomo.data.samples/trunk/pyomo/data/samples/case_studies/%s/README.txt, text/x-trac-wiki)]]\n}}}" % dir + print >> OUTPUT, " * [wiki:Documentation/PyomoGallery/CaseStudies/%s %s]" % ( + dir, + get_title(fname), + ) + print >> OUTPUT, "{{{\n#!comment\n[[Include(source:pyomo.data.samples/trunk/pyomo/data/samples/case_studies/%s/README.txt, text/x-trac-wiki)]]\n}}}" % dir -print >>OUTPUT, '' -print >>OUTPUT, '== Pyomo Scripts ==' -print >>OUTPUT, '' -print >>OUTPUT, """The following links describe examples that show how to execute Pyomo functionality with Python scripts.""" -print >>OUTPUT, '' +print >> OUTPUT, '' +print >> OUTPUT, '== Pyomo Scripts ==' +print >> OUTPUT, '' +print >> OUTPUT, """The following links describe examples that show how to execute Pyomo functionality with Python scripts.""" +print >> OUTPUT, '' for Dir in glob.glob('scripts/*'): dir = os.path.basename(Dir) fname = 'scripts/%s/README.txt' % dir if os.path.exists(fname): - print >>OUTPUT, " * [wiki:Documentation/PyomoGallery/Scripts/%s %s]" % (dir, get_title(fname)) - print >>OUTPUT, "{{{\n#!comment\n[[Include(source:pyomo.data.samples/trunk/pyomo/data/samples/scripts/%s/README.txt, text/x-trac-wiki)]]\n}}}" % dir + print >> OUTPUT, " * [wiki:Documentation/PyomoGallery/Scripts/%s %s]" % ( + dir, + get_title(fname), + ) + print >> OUTPUT, "{{{\n#!comment\n[[Include(source:pyomo.data.samples/trunk/pyomo/data/samples/scripts/%s/README.txt, text/x-trac-wiki)]]\n}}}" % dir -print >>OUTPUT, '' -print >>OUTPUT, '== Modeling Comparisons ==' -print >>OUTPUT, '' -print >>OUTPUT, """The following links provide documentation of optimization models that can be used to compare and contrast Pyomo with other optimization modeling tools. Note that the list of [wiki:Documentation/RelatedProjects related projects] summarizes Python software frameworks that provide optimization functionality that is similar to Pyomo.""" -print >>OUTPUT, '' +print >> OUTPUT, '' +print >> OUTPUT, '== Modeling Comparisons ==' +print >> OUTPUT, '' +print >> OUTPUT, """The following links provide documentation of optimization models that can be used to compare and contrast Pyomo with other optimization modeling tools. Note that the list of [wiki:Documentation/RelatedProjects related projects] summarizes Python software frameworks that provide optimization functionality that is similar to Pyomo.""" +print >> OUTPUT, '' for Dir in glob.glob('comparisons/*'): dir = os.path.basename(Dir) fname = 'comparisons/%s/README.txt' % dir if os.path.exists(fname): - print >>OUTPUT, " * [wiki:Documentation/PyomoGallery/ModelingComparisons/%s %s]" % (dir, get_title(fname)) - print >>OUTPUT, "{{{\n#!comment\n[[Include(source:pyomo.data.samples/trunk/pyomo/data/samples/comparisons/%s/README.txt, text/x-trac-wiki)]]\n}}}" % dir + print >> OUTPUT, " * [wiki:Documentation/PyomoGallery/ModelingComparisons/%s %s]" % ( + dir, + get_title(fname), + ) + print >> OUTPUT, "{{{\n#!comment\n[[Include(source:pyomo.data.samples/trunk/pyomo/data/samples/comparisons/%s/README.txt, text/x-trac-wiki)]]\n}}}" % dir diff --git a/examples/gdp/batchProcessing.py b/examples/gdp/batchProcessing.py index 7ac536b2eb9..f0980dd5034 100644 --- a/examples/gdp/batchProcessing.py +++ b/examples/gdp/batchProcessing.py @@ -10,6 +10,7 @@ because the _opt file is different (It has hard-coded bigM parameters so that each constraint has the "optimal" bigM).''' + def build_model(): model = AbstractModel() @@ -17,9 +18,10 @@ def build_model(): model.BigM = Suffix(direction=Suffix.LOCAL) model.BigM[None] = 1000 - ## Constants from GAMS - StorageTankSizeFactor = 2*5 # btw, I know 2*5 is 10... I don't know why it's written this way in GAMS? + StorageTankSizeFactor = ( + 2 * 5 + ) # btw, I know 2*5 is 10... I don't know why it's written this way in GAMS? StorageTankSizeFactorByProd = 3 MinFlow = -log(10000) VolumeLB = log(300) @@ -31,7 +33,6 @@ def build_model(): # TODO: YOU ARE HERE. YOU HAVEN'T ACTUALLY MADE THESE THE BOUNDS YET, NOR HAVE YOU FIGURED OUT WHOSE # BOUNDS THEY ARE. AND THERE ARE MORE IN GAMS. - ########## # Sets ########## @@ -43,12 +44,11 @@ def build_model(): # TODO: this seems like an over-complicated way to accomplish this task... def filter_out_last(model, j): return j != model.STAGES.last() - model.STAGESExceptLast = Set(initialize=model.STAGES, filter=filter_out_last) + model.STAGESExceptLast = Set(initialize=model.STAGES, filter=filter_out_last) # TODO: these aren't in the formulation?? - #model.STORAGE_TANKS = Set() - + # model.STORAGE_TANKS = Set() ############### # Parameters @@ -66,8 +66,9 @@ def filter_out_last(model, j): # These are hard-coded in the GAMS file, hence the defaults model.StorageTankSizeFactor = Param(model.STAGES, default=StorageTankSizeFactor) - model.StorageTankSizeFactorByProd = Param(model.PRODUCTS, model.STAGES, - default=StorageTankSizeFactorByProd) + model.StorageTankSizeFactorByProd = Param( + model.PRODUCTS, model.STAGES, default=StorageTankSizeFactorByProd + ) # TODO: bonmin wasn't happy and I think it might have something to do with this? # or maybe issues with convexity or a lack thereof... I don't know yet. @@ -86,7 +87,6 @@ def get_log_coeffs(model, k): model.unitsInPhaseUB = Param(model.STAGES, default=UnitsInPhaseUB) model.unitsOutOfPhaseUB = Param(model.STAGES, default=UnitsOutOfPhaseUB) - ################ # Variables ################ @@ -115,17 +115,24 @@ def get_log_coeffs(model, k): # must be the log ones. def get_volume_bounds(model, j): return (model.volumeLB[j], model.volumeUB[j]) + model.volume_log = Var(model.STAGES, bounds=get_volume_bounds) model.batchSize_log = Var(model.PRODUCTS, model.STAGES) model.cycleTime_log = Var(model.PRODUCTS) + def get_unitsOutOfPhase_bounds(model, j): return (0, model.unitsOutOfPhaseUB[j]) + model.unitsOutOfPhase_log = Var(model.STAGES, bounds=get_unitsOutOfPhase_bounds) + def get_unitsInPhase_bounds(model, j): return (0, model.unitsInPhaseUB[j]) + model.unitsInPhase_log = Var(model.STAGES, bounds=get_unitsInPhase_bounds) + def get_storageTankSize_bounds(model, j): return (model.storageTankSizeLB[j], model.storageTankSizeUB[j]) + # TODO: these bounds make it infeasible... model.storageTankSize_log = Var(model.STAGES, bounds=get_storageTankSize_bounds) @@ -138,31 +145,55 @@ def get_storageTankSize_bounds(model, j): ############### def get_cost_rule(model): - return model.Alpha1 * sum(exp(model.unitsInPhase_log[j] + model.unitsOutOfPhase_log[j] + \ - model.Beta1 * model.volume_log[j]) for j in model.STAGES) +\ - model.Alpha2 * sum(exp(model.Beta2 * model.storageTankSize_log[j]) for j in model.STAGESExceptLast) - model.min_cost = Objective(rule=get_cost_rule) + return model.Alpha1 * sum( + exp( + model.unitsInPhase_log[j] + + model.unitsOutOfPhase_log[j] + + model.Beta1 * model.volume_log[j] + ) + for j in model.STAGES + ) + model.Alpha2 * sum( + exp(model.Beta2 * model.storageTankSize_log[j]) + for j in model.STAGESExceptLast + ) + model.min_cost = Objective(rule=get_cost_rule) ############## # Constraints ############## def processing_capacity_rule(model, j, i): - return model.volume_log[j] >= log(model.ProductSizeFactor[i, j]) + model.batchSize_log[i, j] - \ - model.unitsInPhase_log[j] - model.processing_capacity = Constraint(model.STAGES, model.PRODUCTS, rule=processing_capacity_rule) + return ( + model.volume_log[j] + >= log(model.ProductSizeFactor[i, j]) + + model.batchSize_log[i, j] + - model.unitsInPhase_log[j] + ) + + model.processing_capacity = Constraint( + model.STAGES, model.PRODUCTS, rule=processing_capacity_rule + ) def processing_time_rule(model, j, i): - return model.cycleTime_log[i] >= log(model.ProcessingTime[i, j]) - model.batchSize_log[i, j] - \ - model.unitsOutOfPhase_log[j] - model.processing_time = Constraint(model.STAGES, model.PRODUCTS, rule=processing_time_rule) + return ( + model.cycleTime_log[i] + >= log(model.ProcessingTime[i, j]) + - model.batchSize_log[i, j] + - model.unitsOutOfPhase_log[j] + ) + + model.processing_time = Constraint( + model.STAGES, model.PRODUCTS, rule=processing_time_rule + ) def finish_in_time_rule(model): - return model.HorizonTime >= sum(model.ProductionAmount[i]*exp(model.cycleTime_log[i]) \ - for i in model.PRODUCTS) - model.finish_in_time = Constraint(rule=finish_in_time_rule) + return model.HorizonTime >= sum( + model.ProductionAmount[i] * exp(model.cycleTime_log[i]) + for i in model.PRODUCTS + ) + model.finish_in_time = Constraint(rule=finish_in_time_rule) ############### # Disjunctions @@ -170,55 +201,84 @@ def finish_in_time_rule(model): def storage_tank_selection_disjunct_rule(disjunct, selectStorageTank, j): model = disjunct.model() + def volume_stage_j_rule(disjunct, i): - return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \ - model.batchSize_log[i, j] + return ( + model.storageTankSize_log[j] + >= log(model.StorageTankSizeFactor[j]) + model.batchSize_log[i, j] + ) + def volume_stage_jPlus1_rule(disjunct, i): - return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \ - model.batchSize_log[i, j+1] + return ( + model.storageTankSize_log[j] + >= log(model.StorageTankSizeFactor[j]) + model.batchSize_log[i, j + 1] + ) + def batch_size_rule(disjunct, i): - return inequality(-log(model.StorageTankSizeFactorByProd[i,j]), - model.batchSize_log[i,j] - model.batchSize_log[i, j+1], - log(model.StorageTankSizeFactorByProd[i,j])) + return inequality( + -log(model.StorageTankSizeFactorByProd[i, j]), + model.batchSize_log[i, j] - model.batchSize_log[i, j + 1], + log(model.StorageTankSizeFactorByProd[i, j]), + ) + def no_batch_rule(disjunct, i): - return model.batchSize_log[i,j] - model.batchSize_log[i,j+1] == 0 + return model.batchSize_log[i, j] - model.batchSize_log[i, j + 1] == 0 if selectStorageTank: - disjunct.volume_stage_j = Constraint(model.PRODUCTS, rule=volume_stage_j_rule) - disjunct.volume_stage_jPlus1 = Constraint(model.PRODUCTS, - rule=volume_stage_jPlus1_rule) + disjunct.volume_stage_j = Constraint( + model.PRODUCTS, rule=volume_stage_j_rule + ) + disjunct.volume_stage_jPlus1 = Constraint( + model.PRODUCTS, rule=volume_stage_jPlus1_rule + ) disjunct.batch_size = Constraint(model.PRODUCTS, rule=batch_size_rule) else: # The formulation says 0, but GAMS has this constant. # 04/04: Francisco says volume should be free: # disjunct.no_volume = Constraint(expr=model.storageTankSize_log[j] == MinFlow) disjunct.no_batch = Constraint(model.PRODUCTS, rule=no_batch_rule) - model.storage_tank_selection_disjunct = Disjunct([0,1], model.STAGESExceptLast, - rule=storage_tank_selection_disjunct_rule) + + model.storage_tank_selection_disjunct = Disjunct( + [0, 1], model.STAGESExceptLast, rule=storage_tank_selection_disjunct_rule + ) def select_storage_tanks_rule(model, j): - return [model.storage_tank_selection_disjunct[selectTank, j] for selectTank in [0,1]] - model.select_storage_tanks = Disjunction(model.STAGESExceptLast, rule=select_storage_tanks_rule) + return [ + model.storage_tank_selection_disjunct[selectTank, j] + for selectTank in [0, 1] + ] + + model.select_storage_tanks = Disjunction( + model.STAGESExceptLast, rule=select_storage_tanks_rule + ) # though this is a disjunction in the GAMs model, it is more efficiently formulated this way: # TODO: what on earth is k? def units_out_of_phase_rule(model, j): - return model.unitsOutOfPhase_log[j] == sum(model.LogCoeffs[k] * model.outOfPhase[j,k] \ - for k in model.PARALLELUNITS) + return model.unitsOutOfPhase_log[j] == sum( + model.LogCoeffs[k] * model.outOfPhase[j, k] for k in model.PARALLELUNITS + ) + model.units_out_of_phase = Constraint(model.STAGES, rule=units_out_of_phase_rule) def units_in_phase_rule(model, j): - return model.unitsInPhase_log[j] == sum(model.LogCoeffs[k] * model.inPhase[j,k] \ - for k in model.PARALLELUNITS) + return model.unitsInPhase_log[j] == sum( + model.LogCoeffs[k] * model.inPhase[j, k] for k in model.PARALLELUNITS + ) + model.units_in_phase = Constraint(model.STAGES, rule=units_in_phase_rule) # and since I didn't do the disjunction as a disjunction, we need the XORs: def units_out_of_phase_xor_rule(model, j): - return sum(model.outOfPhase[j,k] for k in model.PARALLELUNITS) == 1 - model.units_out_of_phase_xor = Constraint(model.STAGES, rule=units_out_of_phase_xor_rule) + return sum(model.outOfPhase[j, k] for k in model.PARALLELUNITS) == 1 + + model.units_out_of_phase_xor = Constraint( + model.STAGES, rule=units_out_of_phase_xor_rule + ) def units_in_phase_xor_rule(model, j): - return sum(model.inPhase[j,k] for k in model.PARALLELUNITS) == 1 + return sum(model.inPhase[j, k] for k in model.PARALLELUNITS) == 1 + model.units_in_phase_xor = Constraint(model.STAGES, rule=units_in_phase_xor_rule) return model @@ -227,5 +287,7 @@ def units_in_phase_xor_rule(model, j): if __name__ == "__main__": m = build_model().create_instance('batchProcessing1.dat') TransformationFactory('gdp.bigm').apply_to(m) - SolverFactory('gams').solve(m, solver='baron', tee=True, add_options=['option optcr=1e-6;']) + SolverFactory('gams').solve( + m, solver='baron', tee=True, add_options=['option optcr=1e-6;'] + ) m.min_cost.display() diff --git a/examples/gdp/constrained_layout/cons_layout_model.py b/examples/gdp/constrained_layout/cons_layout_model.py index b12cf1463d1..10595db4c22 100644 --- a/examples/gdp/constrained_layout/cons_layout_model.py +++ b/examples/gdp/constrained_layout/cons_layout_model.py @@ -11,64 +11,157 @@ """ from __future__ import division -from pyomo.environ import (ConcreteModel, Objective, Param, - RangeSet, Set, Var) +from pyomo.environ import ConcreteModel, Objective, Param, RangeSet, Set, Var, value +# Constrained layout model examples. These are from Nicolas Sawaya (2006). +# Format: rect_lengths, rect_heights, circ_xvals, circ_yvals, circ_rvals (as dicts), +# sep_penalty_matrix (as nested array) +# Note that only the strict upper triangle of sep_penalty_matrix is used +constrained_layout_model_examples = { + "CLay0203": { + 'rect_lengths': {1: 5, 2: 7, 3: 3}, + 'rect_heights': {1: 6, 2: 5, 3: 3}, + 'circ_xvals': {1: 15, 2: 50}, + 'circ_yvals': {1: 10, 2: 80}, + 'circ_rvals': {1: 6, 2: 5}, + 'sep_penalty_matrix': [[0, 300, 240], [0, 0, 100]], + }, + "CLay0204": { + 'rect_lengths': {1: 5, 2: 7, 3: 3, 4: 2}, + 'rect_heights': {1: 6, 2: 5, 3: 3, 4: 3}, + 'circ_xvals': {1: 15, 2: 50}, + 'circ_yvals': {1: 10, 2: 80}, + 'circ_rvals': {1: 6, 2: 10}, + 'sep_penalty_matrix': [[0, 300, 240, 210], [0, 0, 100, 150], [0, 0, 0, 120]], + }, + "CLay0205": { + 'rect_lengths': {1: 5, 2: 7, 3: 3, 4: 2, 5: 9}, + 'rect_heights': {1: 6, 2: 5, 3: 3, 4: 3, 5: 7}, + 'circ_xvals': {1: 15, 2: 50}, + 'circ_yvals': {1: 10, 2: 80}, + 'circ_rvals': {1: 6, 2: 10}, + 'sep_penalty_matrix': [ + [0, 300, 240, 210, 50], + [0, 0, 100, 150, 30], + [0, 0, 0, 120, 25], + [0, 0, 0, 0, 60], + ], + }, + "CLay0303": { + 'rect_lengths': {1: 5, 2: 7, 3: 3}, + 'rect_heights': {1: 6, 2: 5, 3: 3}, + 'circ_xvals': {1: 15, 2: 50, 3: 30}, + 'circ_yvals': {1: 10, 2: 80, 3: 50}, + 'circ_rvals': {1: 6, 2: 5, 3: 4}, + 'sep_penalty_matrix': [[0, 300, 240], [0, 0, 100]], + }, + "CLay0304": { + 'rect_lengths': {1: 5, 2: 7, 3: 3, 4: 2}, + 'rect_heights': {1: 6, 2: 5, 3: 3, 4: 3}, + 'circ_xvals': {1: 15, 2: 50, 3: 30}, + 'circ_yvals': {1: 10, 2: 80, 3: 50}, + 'circ_rvals': {1: 6, 2: 5, 3: 4}, + 'sep_penalty_matrix': [[0, 300, 240, 210], [0, 0, 100, 150], [0, 0, 0, 120]], + }, + "CLay0305": { + 'rect_lengths': {1: 5, 2: 7, 3: 3, 4: 2, 5: 9}, + 'rect_heights': {1: 6, 2: 5, 3: 3, 4: 3, 5: 7}, + 'circ_xvals': {1: 15, 2: 50, 3: 30}, + 'circ_yvals': {1: 10, 2: 80, 3: 50}, + 'circ_rvals': {1: 6, 2: 10, 3: 4}, + 'sep_penalty_matrix': [ + [0, 300, 240, 210, 50], + [0, 0, 100, 150, 30], + [0, 0, 0, 120, 25], + [0, 0, 0, 0, 60], + ], + }, +} -def build_constrained_layout_model(): + +def build_constrained_layout_model( + params=constrained_layout_model_examples['CLay0203'], metric="l1" +): """Build the model.""" + + # Ensure the caller passed good data + assert len(params) == 6 + + # Get all the parameters out + rect_lengths = params['rect_lengths'] + rect_heights = params['rect_heights'] + circ_xvals = params['circ_xvals'] + circ_yvals = params['circ_yvals'] + circ_rvals = params['circ_rvals'] + sep_penalty_matrix = params['sep_penalty_matrix'] + + assert len(rect_lengths) == len( + rect_heights + ), "There should be the same number of rectangle lengths and heights." + assert ( + len(circ_xvals) == len(circ_yvals) == len(circ_rvals) + ), "There should be the same number of circle x values, y values, and radii" + for row in sep_penalty_matrix: + assert len(row) == len( + sep_penalty_matrix[0] + ), "Matrix rows should have the same length" + assert metric in ["l1", "l2"], 'Metric options are "l1" and "l2"' + m = ConcreteModel(name="2-D constrained layout") - m.rectangles = RangeSet(3, doc="Three rectangles") - m.circles = RangeSet(2, doc="Two circles") - - m.rect_length = Param( - m.rectangles, initialize={1: 5, 2: 7, 3: 3}, - doc="Rectangle length") - m.rect_height = Param( - m.rectangles, initialize={1: 6, 2: 5, 3: 3}, - doc="Rectangle height") - - m.circle_x = Param(m.circles, initialize={1: 15, 2: 50}, - doc="x-coordinate of circle center") - m.circle_y = Param(m.circles, initialize={1: 10, 2: 80}, - doc="y-coordinate of circle center") - m.circle_r = Param(m.circles, initialize={1: 6, 2: 5}, - doc="radius of circle") + m.rectangles = RangeSet(len(rect_lengths), doc=f"{len(rect_lengths)} rectangles") + m.circles = RangeSet(len(circ_xvals), doc=f"{len(circ_xvals)} circles") + + m.rect_length = Param(m.rectangles, initialize=rect_lengths, doc="Rectangle length") + m.rect_height = Param(m.rectangles, initialize=rect_heights, doc="Rectangle height") + + m.circle_x = Param( + m.circles, initialize=circ_xvals, doc="x-coordinate of circle center" + ) + m.circle_y = Param( + m.circles, initialize=circ_yvals, doc="y-coordinate of circle center" + ) + m.circle_r = Param(m.circles, initialize=circ_rvals, doc="radius of circle") @m.Param(m.rectangles, doc="Minimum feasible x value for rectangle") def x_min(m, rect): return min( m.circle_x[circ] - m.circle_r[circ] + m.rect_length[rect] / 2 - for circ in m.circles) + for circ in m.circles + ) @m.Param(m.rectangles, doc="Maximum feasible x value for rectangle") def x_max(m, rect): return max( m.circle_x[circ] + m.circle_r[circ] - m.rect_length[rect] / 2 - for circ in m.circles) + for circ in m.circles + ) @m.Param(m.rectangles, doc="Minimum feasible y value for rectangle") def y_min(m, rect): return min( m.circle_y[circ] - m.circle_r[circ] + m.rect_height[rect] / 2 - for circ in m.circles) + for circ in m.circles + ) @m.Param(m.rectangles, doc="Maximum feasible y value for rectangle") def y_max(m, rect): return max( m.circle_y[circ] + m.circle_r[circ] - m.rect_height[rect] / 2 - for circ in m.circles) + for circ in m.circles + ) - m.ordered_rect_pairs = Set( - initialize=m.rectangles * m.rectangles, - filter=lambda _, r1, r2: r1 != r2) - m.rect_pairs = Set(initialize=[ - (r1, r2) for r1, r2 in m.ordered_rect_pairs - if r1 < r2]) + m.rect_pairs = Set( + initialize=m.rectangles * m.rectangles, filter=lambda _, r1, r2: r1 < r2 + ) m.rect_sep_penalty = Param( - m.rect_pairs, initialize={(1, 2): 300, (1, 3): 240, (2, 3): 100}, - doc="Penalty for separation distance between two rectangles.") + m.rect_pairs, + # 0-based vs 1-based indices... + initialize={ + (r1, r2): sep_penalty_matrix[r1 - 1][r2 - 1] for r1, r2 in m.rect_pairs + }, + doc="Penalty for separation distance between two rectangles.", + ) def x_bounds(m, rect): return m.x_min[rect], m.x_max[rect] @@ -77,47 +170,64 @@ def y_bounds(m, rect): return m.y_min[rect], m.y_max[rect] m.rect_x = Var( - m.rectangles, doc="x-coordinate of rectangle center", - bounds=x_bounds) + m.rectangles, doc="x-coordinate of rectangle center", bounds=x_bounds + ) m.rect_y = Var( - m.rectangles, doc="y-coordinate of rectangle center", - bounds=y_bounds) - m.dist_x = Var( - m.rect_pairs, doc="x-axis separation between rectangle pair") - m.dist_y = Var( - m.rect_pairs, doc="y-axis separation between rectangle pair") - - m.min_dist_cost = Objective( - expr=sum(m.rect_sep_penalty[r1, r2] * - (m.dist_x[r1, r2] + m.dist_y[r1, r2]) - for (r1, r2) in m.rect_pairs)) - - @m.Constraint(m.ordered_rect_pairs, - doc="x-distance between rectangles") - def dist_x_defn(m, r1, r2): - return m.dist_x[ - tuple(sorted([r1, r2]))] >= m.rect_x[r2] - m.rect_x[r1] - - @m.Constraint(m.ordered_rect_pairs, - doc="y-distance between rectangles") - def dist_y_defn(m, r1, r2): - return m.dist_y[ - tuple(sorted([r1, r2]))] >= m.rect_y[r2] - m.rect_y[r1] + m.rectangles, doc="y-coordinate of rectangle center", bounds=y_bounds + ) + m.dist_x = Var(m.rect_pairs, doc="x-axis separation between rectangle pair") + m.dist_y = Var(m.rect_pairs, doc="y-axis separation between rectangle pair") + + if metric == "l2": + m.min_dist_cost = Objective( + expr=sum( + m.rect_sep_penalty[r1, r2] + * (m.dist_x[r1, r2] ** 2 + m.dist_y[r1, r2] ** 2) ** 0.5 + for (r1, r2) in m.rect_pairs + ) + ) + # l1 distance used in the paper + else: + m.min_dist_cost = Objective( + expr=sum( + m.rect_sep_penalty[r1, r2] * (m.dist_x[r1, r2] + m.dist_y[r1, r2]) + for (r1, r2) in m.rect_pairs + ) + ) + + # Ensure the dist_x and dist_y are greater than the positive and negative + # signed distances. + @m.Constraint(m.rect_pairs, doc="x-distance between rectangles") + def dist_x_defn_1(m, r1, r2): + return m.dist_x[r1, r2] >= m.rect_x[r2] - m.rect_x[r1] + + @m.Constraint(m.rect_pairs, doc="x-distance between rectangles") + def dist_x_defn_2(m, r1, r2): + return m.dist_x[r1, r2] >= m.rect_x[r1] - m.rect_x[r2] + + @m.Constraint(m.rect_pairs, doc="y-distance between rectangles") + def dist_y_defn_1(m, r1, r2): + return m.dist_y[r1, r2] >= m.rect_y[r2] - m.rect_y[r1] + + @m.Constraint(m.rect_pairs, doc="y-distance between rectangles") + def dist_y_defn_2(m, r1, r2): + return m.dist_y[r1, r2] >= m.rect_y[r1] - m.rect_y[r2] @m.Disjunction( m.rect_pairs, doc="Make sure that none of the rectangles overlap in " - "either the x or y dimensions.") + "either the x or y dimensions.", + ) def no_overlap(m, r1, r2): return [ - m.rect_x[r1] + m.rect_length[r1] / 2 <= ( - m.rect_x[r2] - m.rect_length[r2] / 2), - m.rect_y[r1] + m.rect_height[r1] / 2 <= ( - m.rect_y[r2] - m.rect_height[r2] / 2), - m.rect_x[r2] + m.rect_length[r2] / 2 <= ( - m.rect_x[r1] - m.rect_length[r1] / 2), - m.rect_y[r2] + m.rect_height[r2] / 2 <= ( - m.rect_y[r1] - m.rect_height[r1] / 2), + m.rect_x[r1] + m.rect_length[r1] / 2 + <= (m.rect_x[r2] - m.rect_length[r2] / 2), + m.rect_y[r1] + m.rect_height[r1] / 2 + <= (m.rect_y[r2] - m.rect_height[r2] / 2), + m.rect_x[r2] + m.rect_length[r2] / 2 + <= (m.rect_x[r1] - m.rect_length[r1] / 2), + m.rect_y[r2] + m.rect_height[r2] / 2 + <= (m.rect_y[r1] - m.rect_height[r1] / 2), ] @m.Disjunction(m.rectangles, doc="Each rectangle must be in a circle.") @@ -125,23 +235,111 @@ def rectangle_in_circle(m, r): return [ [ # Rectangle lower left corner in circle - (m.rect_x[r] - m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + - (m.rect_y[r] + m.rect_height[r] / 2 - m.circle_y[c]) ** 2 + (m.rect_x[r] - m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + + (m.rect_y[r] + m.rect_height[r] / 2 - m.circle_y[c]) ** 2 <= m.circle_r[c] ** 2, # rectangle upper left corner in circle - (m.rect_x[r] - m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + - (m.rect_y[r] - m.rect_height[r] / 2 - m.circle_y[c]) ** 2 + (m.rect_x[r] - m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + + (m.rect_y[r] - m.rect_height[r] / 2 - m.circle_y[c]) ** 2 <= m.circle_r[c] ** 2, # rectangle lower right corner in circle - (m.rect_x[r] + m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + - (m.rect_y[r] + m.rect_height[r] / 2 - m.circle_y[c]) ** 2 + (m.rect_x[r] + m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + + (m.rect_y[r] + m.rect_height[r] / 2 - m.circle_y[c]) ** 2 <= m.circle_r[c] ** 2, # rectangle upper right corner in circle - (m.rect_x[r] + m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + - (m.rect_y[r] - m.rect_height[r] / 2 - m.circle_y[c]) ** 2 + (m.rect_x[r] + m.rect_length[r] / 2 - m.circle_x[c]) ** 2 + + (m.rect_y[r] - m.rect_height[r] / 2 - m.circle_y[c]) ** 2 <= m.circle_r[c] ** 2, ] for c in m.circles ] return m + + +def draw_model(m, title=None): + """Draw a model using matplotlib to illustrate what's going on. Pass 'title' argument to give chart a title""" + + # matplotlib setup + import matplotlib as mpl + import matplotlib.pyplot as plt + + fig, ax = plt.subplots(1, 1, figsize=(10, 10)) + ax.set_xlabel("x") + ax.set_ylabel("y") + + # Hardcode these bounds since I'm not sure the best way to do it automatically + ax.set_xlim([0, 100]) + ax.set_ylim([0, 100]) + + if title is not None: + plt.title(title) + + for c in m.circles: + print( + f"drawing circle {c}: x={m.circle_x[c]}, y={m.circle_y[c]}, r={m.circle_r[c]}" + ) + # no idea about colors + ax.add_patch( + mpl.patches.Circle( + (m.circle_x[c], m.circle_y[c]), + radius=m.circle_r[c], + facecolor="#0d98e6", + edgecolor="#000000", + ) + ) + ax.text( + m.circle_x[c], + m.circle_y[c] + m.circle_r[c] + 1.5, + f"Circle {c}", + horizontalalignment="center", + ) + + for r in m.rectangles: + print( + f"drawing rectangle {r}: x={value(m.rect_x[r])}, y={value(m.rect_y[r])} (center), L={m.rect_length[r]}, H={m.rect_height[r]}" + ) + ax.add_patch( + mpl.patches.Rectangle( + ( + value(m.rect_x[r]) - m.rect_length[r] / 2, + value(m.rect_y[r]) - m.rect_height[r] / 2, + ), + m.rect_length[r], + m.rect_height[r], + facecolor="#fbec68", + edgecolor="#000000", + ) + ) + ax.text( + value(m.rect_x[r]), + value(m.rect_y[r]), + f"R{r}", + horizontalalignment="center", + verticalalignment="center", + fontsize=8, + ) + + plt.show() + + +# Solve some example problems and draw some pictures +if __name__ == "__main__": + from pyomo.environ import SolverFactory, TransformationFactory + + # Set up a solver, for example scip and bigm works + solver = SolverFactory("scip") + transformer = TransformationFactory("gdp.bigm") + + # Do all of the possible problems + for d in ["l1", "l2"]: + for key in constrained_layout_model_examples.keys(): + print(f"Solving example problem: {key}") + model = build_constrained_layout_model( + constrained_layout_model_examples[key], metric=d + ) + transformer.apply_to(model) + solver.solve(model) + print(f"Found objective function value: {model.min_dist_cost()}") + draw_model(model, title=(f"{key} ({d} distance)")) + print() diff --git a/examples/gdp/disease_model.py b/examples/gdp/disease_model.py index a1f52c4d7d3..bc3e69600ec 100644 --- a/examples/gdp/disease_model.py +++ b/examples/gdp/disease_model.py @@ -21,53 +21,1619 @@ from pyomo.gdp import * import math + def build_model(): # import data - pop = [ 15.881351, 15.881339, 15.881320, 15.881294, 15.881261, 15.881223, 15.881180, 15.881132, 15.881079, 15.881022, 15.880961, 15.880898, 15.880832, 15.880764, 15.880695, 15.880624, 15.880553, 15.880480, 15.880409, 15.880340, 15.880270, 15.880203, 15.880138, 15.880076, 15.880016, 15.879960, 15.879907, 15.879852, 15.879799, 15.879746, 15.879693, 15.879638, 15.879585, 15.879531, 15.879477, 15.879423, 15.879370, 15.879315, 15.879262, 15.879209, 15.879155, 15.879101, 15.879048, 15.878994, 15.878940, 15.878886, 15.878833, 15.878778, 15.878725, 15.878672, 15.878618, 15.878564, 15.878510, 15.878457, 15.878402, 15.878349, 15.878295, 15.878242, 15.878187, 15.878134, 15.878081, 15.878026, 15.877973, 15.877919, 15.877864, 15.877811, 15.877758, 15.877704, 15.877650, 15.877596, 15.877543, 15.877488, 15.877435, 15.877381, 15.877326, 15.877273, 15.877220, 15.877166, 15.877111, 15.877058, 15.877005, 15.876950, 15.876896, 15.876843, 15.876789, 15.876735, 15.876681, 15.876628, 15.876573, 15.876520, 15.876466, 15.876411, 15.876358, 15.876304, 15.876251, 15.876196, 15.876143, 15.876089, 15.876034, 15.875981, 15.875927, 15.875872, 15.875819, 15.875765, 15.875712, 15.875657, 15.875604, 15.875550, 15.875495, 15.875442, 15.875388, 15.875335, 15.875280, 15.875226, 15.875173, 15.875118, 15.875064, 15.875011, 15.874956, 15.874902, 15.874849, 15.874795, 15.874740, 15.874687, 15.874633, 15.874578, 15.874525, 15.874471, 15.874416, 15.874363, 15.874309, 15.874256, 15.874201, 15.874147, 15.874094, 15.874039, 15.873985, 15.873931, 15.873878, 15.873823, 15.873769, 15.873716, 15.873661, 15.873607, 15.873554, 15.873499, 15.873445, 15.873391, 15.873338, 15.873283, 15.873229, 15.873175, 15.873121, 15.873067, 15.873013, 15.872960, 15.872905, 15.872851, 15.872797, 15.872742, 15.872689, 15.872635, 15.872580, 15.872526, 15.872473, 15.872419, 15.872364, 15.872310, 15.872256, 15.872202, 15.872148, 15.872094, 15.872039, 15.871985, 15.871932, 15.871878, 15.871823, 15.871769, 15.871715, 15.871660, 15.871607, 15.871553, 15.871499, 15.871444, 15.871390, 15.871337, 15.871282, 15.871228, 15.871174, 15.871119, 15.871065, 15.871012, 15.870958, 15.870903, 15.870849, 15.870795, 15.870740, 15.870686, 15.870633, 15.870577, 15.870524, 15.870470, 15.870416, 15.870361, 15.870307, 15.870253, 15.870198, 15.870144, 15.870091, 15.870037, 15.869982, 15.869928, 15.869874, 15.869819, 15.869765, 15.869711, 15.869656, 15.869602, 15.869548, 15.869495, 15.869439, 15.869386, 15.869332, 15.869277, 15.869223, 15.869169, 15.869114, 15.869060, 15.869006, 15.868952, 15.868897, 15.868843, 15.868789, 15.868734, 15.868679, 15.868618, 15.868556, 15.868489, 15.868421, 15.868351, 15.868280, 15.868208, 15.868134, 15.868063, 15.867991, 15.867921, 15.867852, 15.867785, 15.867721, 15.867659, 15.867601, 15.867549, 15.867499, 15.867455, 15.867416, 15.867383, 15.867357, 15.867338, 15.867327, 15.867321, 15.867327, 15.867338, 15.867359, 15.867386, 15.867419, 15.867459, 15.867505, 15.867555, 15.867610, 15.867671, 15.867734, 15.867801, 15.867869, 15.867941, 15.868012, 15.868087, 15.868161, 15.868236, 15.868310, 15.868384, 15.868457, 15.868527, 15.868595, 15.868661, 15.868722, 15.868780, 15.868837, 15.868892, 15.868948, 15.869005, 15.869061, 15.869116, 15.869173, 15.869229, 15.869284, 15.869341, 15.869397, 15.869452, 15.869509, 15.869565, 15.869620, 15.869677, 15.869733, 15.869788, 15.869845, 15.869901, 15.869956, 15.870012, 15.870069, 15.870124, 15.870180, 15.870237, 15.870292, 15.870348, 15.870405, 15.870461, 15.870516, 15.870572, 15.870629, 15.870684, 15.870740, 15.870796, 15.870851, 15.870908, 15.870964, 15.871019, 15.871076, 15.871132, 15.871187, 15.871243, 15.871300, 15.871355, 15.871411, 15.871467, 15.871522, 15.871579, 15.871635, 15.871691, 15.871746, 15.871802, 15.871859, 15.871914, 15.871970, 15.872026, 15.872081, 15.872138, 15.872194, 15.872249, 15.872305, 15.872361, 15.872416, 15.872473, 15.872529, 15.872584, 15.872640, 15.872696, 15.872751, 15.872807, 15.872864, 15.872919, 15.872975, 15.873031, 15.873087, 15.873142, 15.873198, 15.873255, 15.873310, 15.873366, 15.873422, 15.873477, 15.873533, 15.873589, 15.873644, 15.873700, 15.873757, 15.873811, 15.873868, 15.873924, 15.873979, 15.874035, 15.874091, 15.874146, 15.874202, 15.874258, 15.874313, 15.874369, 15.874425, 15.874481, 15.874536, 15.874592] + pop = [ + 15.881351, + 15.881339, + 15.881320, + 15.881294, + 15.881261, + 15.881223, + 15.881180, + 15.881132, + 15.881079, + 15.881022, + 15.880961, + 15.880898, + 15.880832, + 15.880764, + 15.880695, + 15.880624, + 15.880553, + 15.880480, + 15.880409, + 15.880340, + 15.880270, + 15.880203, + 15.880138, + 15.880076, + 15.880016, + 15.879960, + 15.879907, + 15.879852, + 15.879799, + 15.879746, + 15.879693, + 15.879638, + 15.879585, + 15.879531, + 15.879477, + 15.879423, + 15.879370, + 15.879315, + 15.879262, + 15.879209, + 15.879155, + 15.879101, + 15.879048, + 15.878994, + 15.878940, + 15.878886, + 15.878833, + 15.878778, + 15.878725, + 15.878672, + 15.878618, + 15.878564, + 15.878510, + 15.878457, + 15.878402, + 15.878349, + 15.878295, + 15.878242, + 15.878187, + 15.878134, + 15.878081, + 15.878026, + 15.877973, + 15.877919, + 15.877864, + 15.877811, + 15.877758, + 15.877704, + 15.877650, + 15.877596, + 15.877543, + 15.877488, + 15.877435, + 15.877381, + 15.877326, + 15.877273, + 15.877220, + 15.877166, + 15.877111, + 15.877058, + 15.877005, + 15.876950, + 15.876896, + 15.876843, + 15.876789, + 15.876735, + 15.876681, + 15.876628, + 15.876573, + 15.876520, + 15.876466, + 15.876411, + 15.876358, + 15.876304, + 15.876251, + 15.876196, + 15.876143, + 15.876089, + 15.876034, + 15.875981, + 15.875927, + 15.875872, + 15.875819, + 15.875765, + 15.875712, + 15.875657, + 15.875604, + 15.875550, + 15.875495, + 15.875442, + 15.875388, + 15.875335, + 15.875280, + 15.875226, + 15.875173, + 15.875118, + 15.875064, + 15.875011, + 15.874956, + 15.874902, + 15.874849, + 15.874795, + 15.874740, + 15.874687, + 15.874633, + 15.874578, + 15.874525, + 15.874471, + 15.874416, + 15.874363, + 15.874309, + 15.874256, + 15.874201, + 15.874147, + 15.874094, + 15.874039, + 15.873985, + 15.873931, + 15.873878, + 15.873823, + 15.873769, + 15.873716, + 15.873661, + 15.873607, + 15.873554, + 15.873499, + 15.873445, + 15.873391, + 15.873338, + 15.873283, + 15.873229, + 15.873175, + 15.873121, + 15.873067, + 15.873013, + 15.872960, + 15.872905, + 15.872851, + 15.872797, + 15.872742, + 15.872689, + 15.872635, + 15.872580, + 15.872526, + 15.872473, + 15.872419, + 15.872364, + 15.872310, + 15.872256, + 15.872202, + 15.872148, + 15.872094, + 15.872039, + 15.871985, + 15.871932, + 15.871878, + 15.871823, + 15.871769, + 15.871715, + 15.871660, + 15.871607, + 15.871553, + 15.871499, + 15.871444, + 15.871390, + 15.871337, + 15.871282, + 15.871228, + 15.871174, + 15.871119, + 15.871065, + 15.871012, + 15.870958, + 15.870903, + 15.870849, + 15.870795, + 15.870740, + 15.870686, + 15.870633, + 15.870577, + 15.870524, + 15.870470, + 15.870416, + 15.870361, + 15.870307, + 15.870253, + 15.870198, + 15.870144, + 15.870091, + 15.870037, + 15.869982, + 15.869928, + 15.869874, + 15.869819, + 15.869765, + 15.869711, + 15.869656, + 15.869602, + 15.869548, + 15.869495, + 15.869439, + 15.869386, + 15.869332, + 15.869277, + 15.869223, + 15.869169, + 15.869114, + 15.869060, + 15.869006, + 15.868952, + 15.868897, + 15.868843, + 15.868789, + 15.868734, + 15.868679, + 15.868618, + 15.868556, + 15.868489, + 15.868421, + 15.868351, + 15.868280, + 15.868208, + 15.868134, + 15.868063, + 15.867991, + 15.867921, + 15.867852, + 15.867785, + 15.867721, + 15.867659, + 15.867601, + 15.867549, + 15.867499, + 15.867455, + 15.867416, + 15.867383, + 15.867357, + 15.867338, + 15.867327, + 15.867321, + 15.867327, + 15.867338, + 15.867359, + 15.867386, + 15.867419, + 15.867459, + 15.867505, + 15.867555, + 15.867610, + 15.867671, + 15.867734, + 15.867801, + 15.867869, + 15.867941, + 15.868012, + 15.868087, + 15.868161, + 15.868236, + 15.868310, + 15.868384, + 15.868457, + 15.868527, + 15.868595, + 15.868661, + 15.868722, + 15.868780, + 15.868837, + 15.868892, + 15.868948, + 15.869005, + 15.869061, + 15.869116, + 15.869173, + 15.869229, + 15.869284, + 15.869341, + 15.869397, + 15.869452, + 15.869509, + 15.869565, + 15.869620, + 15.869677, + 15.869733, + 15.869788, + 15.869845, + 15.869901, + 15.869956, + 15.870012, + 15.870069, + 15.870124, + 15.870180, + 15.870237, + 15.870292, + 15.870348, + 15.870405, + 15.870461, + 15.870516, + 15.870572, + 15.870629, + 15.870684, + 15.870740, + 15.870796, + 15.870851, + 15.870908, + 15.870964, + 15.871019, + 15.871076, + 15.871132, + 15.871187, + 15.871243, + 15.871300, + 15.871355, + 15.871411, + 15.871467, + 15.871522, + 15.871579, + 15.871635, + 15.871691, + 15.871746, + 15.871802, + 15.871859, + 15.871914, + 15.871970, + 15.872026, + 15.872081, + 15.872138, + 15.872194, + 15.872249, + 15.872305, + 15.872361, + 15.872416, + 15.872473, + 15.872529, + 15.872584, + 15.872640, + 15.872696, + 15.872751, + 15.872807, + 15.872864, + 15.872919, + 15.872975, + 15.873031, + 15.873087, + 15.873142, + 15.873198, + 15.873255, + 15.873310, + 15.873366, + 15.873422, + 15.873477, + 15.873533, + 15.873589, + 15.873644, + 15.873700, + 15.873757, + 15.873811, + 15.873868, + 15.873924, + 15.873979, + 15.874035, + 15.874091, + 15.874146, + 15.874202, + 15.874258, + 15.874313, + 15.874369, + 15.874425, + 15.874481, + 15.874536, + 15.874592, + ] - logIstar = [7.943245, 8.269994, 8.517212, 8.814208, 9.151740, 9.478472, 9.559847, 9.664087, 9.735378, 9.852583, 9.692265, 9.498807, 9.097634, 8.388878, 7.870516, 7.012956, 6.484941, 5.825368, 5.346815, 5.548361, 5.706732, 5.712617, 5.709714, 5.696888, 5.530087, 5.826563, 6.643563, 7.004292, 7.044663, 7.190259, 7.335926, 7.516861, 7.831779, 8.188895, 8.450204, 8.801436, 8.818379, 8.787658, 8.601685, 8.258338, 7.943364, 7.425585, 7.062834, 6.658307, 6.339600, 6.526984, 6.679178, 6.988758, 7.367331, 7.746694, 8.260558, 8.676522, 9.235582, 9.607778, 9.841917, 10.081571, 10.216090, 10.350366, 10.289668, 10.248842, 10.039504, 9.846343, 9.510392, 9.190923, 8.662465, 7.743221, 7.128458, 5.967898, 5.373883, 5.097497, 4.836570, 5.203345, 5.544798, 5.443047, 5.181152, 5.508669, 6.144130, 6.413744, 6.610423, 6.748885, 6.729511, 6.789841, 6.941034, 7.093516, 7.307039, 7.541077, 7.644803, 7.769145, 7.760187, 7.708017, 7.656795, 7.664983, 7.483828, 6.887324, 6.551093, 6.457449, 6.346064, 6.486300, 6.612378, 6.778753, 6.909477, 7.360570, 8.150303, 8.549044, 8.897572, 9.239323, 9.538751, 9.876531, 10.260911, 10.613536, 10.621510, 10.661115, 10.392899, 10.065536, 9.920090, 9.933097, 9.561691, 8.807713, 8.263463, 7.252184, 6.669083, 5.877763, 5.331878, 5.356563, 5.328469, 5.631146, 6.027497, 6.250717, 6.453919, 6.718444, 7.071636, 7.348905, 7.531528, 7.798226, 8.197941, 8.578809, 8.722964, 8.901152, 8.904370, 8.889865, 8.881902, 8.958903, 8.721281, 8.211509, 7.810624, 7.164607, 6.733688, 6.268503, 5.905983, 5.900432, 5.846547, 6.245427, 6.786271, 7.088480, 7.474295, 7.650063, 7.636703, 7.830990, 8.231516, 8.584816, 8.886908, 9.225216, 9.472778, 9.765505, 9.928623, 10.153033, 10.048574, 9.892620, 9.538818, 8.896100, 8.437584, 7.819738, 7.362598, 6.505880, 5.914972, 6.264584, 6.555019, 6.589319, 6.552029, 6.809771, 7.187616, 7.513918, 8.017712, 8.224957, 8.084474, 8.079148, 8.180991, 8.274269, 8.413748, 8.559599, 8.756090, 9.017927, 9.032720, 9.047983, 8.826873, 8.366489, 8.011876, 7.500830, 7.140406, 6.812626, 6.538719, 6.552218, 6.540129, 6.659927, 6.728530, 7.179692, 7.989210, 8.399173, 8.781128, 9.122303, 9.396378, 9.698512, 9.990104, 10.276543, 10.357284, 10.465869, 10.253833, 10.018503, 9.738407, 9.484367, 9.087025, 8.526409, 8.041126, 7.147168, 6.626706, 6.209446, 5.867231, 5.697439, 5.536769, 5.421413, 5.238297, 5.470136, 5.863007, 6.183083, 6.603569, 6.906278, 7.092324, 7.326612, 7.576052, 7.823430, 7.922775, 8.041677, 8.063403, 8.073229, 8.099726, 8.168522, 8.099041, 8.011404, 7.753147, 6.945211, 6.524244, 6.557723, 6.497742, 6.256247, 5.988794, 6.268093, 6.583316, 7.106842, 8.053929, 8.508237, 8.938915, 9.311863, 9.619753, 9.931745, 10.182361, 10.420978, 10.390829, 10.389230, 10.079342, 9.741479, 9.444561, 9.237448, 8.777687, 7.976436, 7.451502, 6.742856, 6.271545, 5.782289, 5.403089, 5.341954, 5.243509, 5.522993, 5.897001, 6.047042, 6.100738, 6.361727, 6.849562, 7.112544, 7.185346, 7.309412, 7.423746, 7.532142, 7.510318, 7.480175, 7.726362, 8.061117, 8.127072, 8.206166, 8.029634, 7.592953, 7.304869, 7.005394, 6.750019, 6.461377, 6.226432, 6.287047, 6.306452, 6.783694, 7.450957, 7.861692, 8.441530, 8.739626, 8.921994, 9.168961, 9.428077, 9.711664, 10.032714, 10.349937, 10.483985, 10.647475, 10.574038, 10.522431, 10.192246, 9.756246, 9.342511, 8.872072, 8.414189, 7.606582, 7.084701, 6.149903, 5.517257, 5.839429, 6.098090, 6.268935, 6.475965, 6.560543, 6.598942, 6.693938, 6.802531, 6.934345, 7.078370, 7.267736, 7.569640, 7.872204, 8.083603, 8.331226, 8.527144, 8.773523, 8.836599, 8.894303, 8.808326, 8.641717, 8.397901, 7.849034, 7.482899, 7.050252, 6.714103, 6.900603, 7.050765, 7.322905, 7.637986, 8.024340, 8.614505, 8.933591, 9.244008, 9.427410, 9.401385, 9.457744, 9.585068, 9.699673, 9.785478, 9.884559, 9.769732, 9.655075, 9.423071, 9.210198, 8.786654, 8.061787, 7.560976, 6.855829, 6.390707, 5.904006, 5.526631, 5.712303, 5.867027, 5.768367, 5.523352, 5.909118, 6.745543, 6.859218 ] + logIstar = [ + 7.943245, + 8.269994, + 8.517212, + 8.814208, + 9.151740, + 9.478472, + 9.559847, + 9.664087, + 9.735378, + 9.852583, + 9.692265, + 9.498807, + 9.097634, + 8.388878, + 7.870516, + 7.012956, + 6.484941, + 5.825368, + 5.346815, + 5.548361, + 5.706732, + 5.712617, + 5.709714, + 5.696888, + 5.530087, + 5.826563, + 6.643563, + 7.004292, + 7.044663, + 7.190259, + 7.335926, + 7.516861, + 7.831779, + 8.188895, + 8.450204, + 8.801436, + 8.818379, + 8.787658, + 8.601685, + 8.258338, + 7.943364, + 7.425585, + 7.062834, + 6.658307, + 6.339600, + 6.526984, + 6.679178, + 6.988758, + 7.367331, + 7.746694, + 8.260558, + 8.676522, + 9.235582, + 9.607778, + 9.841917, + 10.081571, + 10.216090, + 10.350366, + 10.289668, + 10.248842, + 10.039504, + 9.846343, + 9.510392, + 9.190923, + 8.662465, + 7.743221, + 7.128458, + 5.967898, + 5.373883, + 5.097497, + 4.836570, + 5.203345, + 5.544798, + 5.443047, + 5.181152, + 5.508669, + 6.144130, + 6.413744, + 6.610423, + 6.748885, + 6.729511, + 6.789841, + 6.941034, + 7.093516, + 7.307039, + 7.541077, + 7.644803, + 7.769145, + 7.760187, + 7.708017, + 7.656795, + 7.664983, + 7.483828, + 6.887324, + 6.551093, + 6.457449, + 6.346064, + 6.486300, + 6.612378, + 6.778753, + 6.909477, + 7.360570, + 8.150303, + 8.549044, + 8.897572, + 9.239323, + 9.538751, + 9.876531, + 10.260911, + 10.613536, + 10.621510, + 10.661115, + 10.392899, + 10.065536, + 9.920090, + 9.933097, + 9.561691, + 8.807713, + 8.263463, + 7.252184, + 6.669083, + 5.877763, + 5.331878, + 5.356563, + 5.328469, + 5.631146, + 6.027497, + 6.250717, + 6.453919, + 6.718444, + 7.071636, + 7.348905, + 7.531528, + 7.798226, + 8.197941, + 8.578809, + 8.722964, + 8.901152, + 8.904370, + 8.889865, + 8.881902, + 8.958903, + 8.721281, + 8.211509, + 7.810624, + 7.164607, + 6.733688, + 6.268503, + 5.905983, + 5.900432, + 5.846547, + 6.245427, + 6.786271, + 7.088480, + 7.474295, + 7.650063, + 7.636703, + 7.830990, + 8.231516, + 8.584816, + 8.886908, + 9.225216, + 9.472778, + 9.765505, + 9.928623, + 10.153033, + 10.048574, + 9.892620, + 9.538818, + 8.896100, + 8.437584, + 7.819738, + 7.362598, + 6.505880, + 5.914972, + 6.264584, + 6.555019, + 6.589319, + 6.552029, + 6.809771, + 7.187616, + 7.513918, + 8.017712, + 8.224957, + 8.084474, + 8.079148, + 8.180991, + 8.274269, + 8.413748, + 8.559599, + 8.756090, + 9.017927, + 9.032720, + 9.047983, + 8.826873, + 8.366489, + 8.011876, + 7.500830, + 7.140406, + 6.812626, + 6.538719, + 6.552218, + 6.540129, + 6.659927, + 6.728530, + 7.179692, + 7.989210, + 8.399173, + 8.781128, + 9.122303, + 9.396378, + 9.698512, + 9.990104, + 10.276543, + 10.357284, + 10.465869, + 10.253833, + 10.018503, + 9.738407, + 9.484367, + 9.087025, + 8.526409, + 8.041126, + 7.147168, + 6.626706, + 6.209446, + 5.867231, + 5.697439, + 5.536769, + 5.421413, + 5.238297, + 5.470136, + 5.863007, + 6.183083, + 6.603569, + 6.906278, + 7.092324, + 7.326612, + 7.576052, + 7.823430, + 7.922775, + 8.041677, + 8.063403, + 8.073229, + 8.099726, + 8.168522, + 8.099041, + 8.011404, + 7.753147, + 6.945211, + 6.524244, + 6.557723, + 6.497742, + 6.256247, + 5.988794, + 6.268093, + 6.583316, + 7.106842, + 8.053929, + 8.508237, + 8.938915, + 9.311863, + 9.619753, + 9.931745, + 10.182361, + 10.420978, + 10.390829, + 10.389230, + 10.079342, + 9.741479, + 9.444561, + 9.237448, + 8.777687, + 7.976436, + 7.451502, + 6.742856, + 6.271545, + 5.782289, + 5.403089, + 5.341954, + 5.243509, + 5.522993, + 5.897001, + 6.047042, + 6.100738, + 6.361727, + 6.849562, + 7.112544, + 7.185346, + 7.309412, + 7.423746, + 7.532142, + 7.510318, + 7.480175, + 7.726362, + 8.061117, + 8.127072, + 8.206166, + 8.029634, + 7.592953, + 7.304869, + 7.005394, + 6.750019, + 6.461377, + 6.226432, + 6.287047, + 6.306452, + 6.783694, + 7.450957, + 7.861692, + 8.441530, + 8.739626, + 8.921994, + 9.168961, + 9.428077, + 9.711664, + 10.032714, + 10.349937, + 10.483985, + 10.647475, + 10.574038, + 10.522431, + 10.192246, + 9.756246, + 9.342511, + 8.872072, + 8.414189, + 7.606582, + 7.084701, + 6.149903, + 5.517257, + 5.839429, + 6.098090, + 6.268935, + 6.475965, + 6.560543, + 6.598942, + 6.693938, + 6.802531, + 6.934345, + 7.078370, + 7.267736, + 7.569640, + 7.872204, + 8.083603, + 8.331226, + 8.527144, + 8.773523, + 8.836599, + 8.894303, + 8.808326, + 8.641717, + 8.397901, + 7.849034, + 7.482899, + 7.050252, + 6.714103, + 6.900603, + 7.050765, + 7.322905, + 7.637986, + 8.024340, + 8.614505, + 8.933591, + 9.244008, + 9.427410, + 9.401385, + 9.457744, + 9.585068, + 9.699673, + 9.785478, + 9.884559, + 9.769732, + 9.655075, + 9.423071, + 9.210198, + 8.786654, + 8.061787, + 7.560976, + 6.855829, + 6.390707, + 5.904006, + 5.526631, + 5.712303, + 5.867027, + 5.768367, + 5.523352, + 5.909118, + 6.745543, + 6.859218, + ] - deltaS = [ 9916.490263 ,12014.263380 ,13019.275755 ,12296.373612 ,8870.995603 ,1797.354574 ,-6392.880771 ,-16150.825387 ,-27083.245106 ,-40130.421462 ,-50377.169958 ,-57787.717468 ,-60797.223427 ,-59274.041897 ,-55970.213230 ,-51154.650927 ,-45877.841034 ,-40278.553775 ,-34543.967175 ,-28849.633641 ,-23192.776605 ,-17531.130740 ,-11862.021829 ,-6182.456792 ,-450.481090 ,5201.184400 ,10450.773882 ,15373.018272 ,20255.699431 ,24964.431669 ,29470.745887 ,33678.079947 ,37209.808930 ,39664.432393 ,41046.735479 ,40462.982011 ,39765.070209 ,39270.815830 ,39888.077002 ,42087.276604 ,45332.012929 ,49719.128772 ,54622.190928 ,59919.718626 ,65436.341097 ,70842.911460 ,76143.747430 ,81162.358574 ,85688.102884 ,89488.917734 ,91740.108470 ,91998.787916 ,87875.986012 ,79123.877908 ,66435.611045 ,48639.250610 ,27380.282817 ,2166.538464 ,-21236.428084 ,-43490.803535 ,-60436.624080 ,-73378.401966 ,-80946.278268 ,-84831.969493 ,-84696.627286 ,-81085.365407 ,-76410.847049 ,-70874.415387 ,-65156.276464 ,-59379.086883 ,-53557.267619 ,-47784.164830 ,-42078.001172 ,-36340.061427 ,-30541.788202 ,-24805.281435 ,-19280.817165 ,-13893.690606 ,-8444.172221 ,-3098.160839 ,2270.908649 ,7594.679295 ,12780.079247 ,17801.722109 ,22543.091206 ,26897.369814 ,31051.285734 ,34933.809557 ,38842.402859 ,42875.230152 ,47024.395356 ,51161.516122 ,55657.298307 ,60958.155424 ,66545.635029 ,72202.930397 ,77934.761905 ,83588.207792 ,89160.874522 ,94606.115027 ,99935.754968 ,104701.404975 ,107581.670606 ,108768.440311 ,107905.700480 ,104062.148863 ,96620.281684 ,83588.443029 ,61415.088182 ,27124.031692 ,-7537.285321 ,-43900.451653 ,-70274.062783 ,-87573.481475 ,-101712.148408 ,-116135.719087 ,-124187.225446 ,-124725.278371 ,-122458.145590 ,-117719.918256 ,-112352.138605 ,-106546.806030 ,-100583.803012 ,-94618.253238 ,-88639.090897 ,-82725.009842 ,-76938.910669 ,-71248.957807 ,-65668.352795 ,-60272.761991 ,-55179.538428 ,-50456.021161 ,-46037.728058 ,-42183.912670 ,-39522.184006 ,-38541.255303 ,-38383.665728 ,-39423.998130 ,-40489.466130 ,-41450.406768 ,-42355.156592 ,-43837.562085 ,-43677.262972 ,-41067.896944 ,-37238.628465 ,-32230.392026 ,-26762.766062 ,-20975.163308 ,-15019.218554 ,-9053.105545 ,-3059.663132 ,2772.399618 ,8242.538397 ,13407.752291 ,18016.047539 ,22292.125752 ,26616.583347 ,30502.564253 ,33153.890890 ,34216.684448 ,33394.220786 ,29657.417791 ,23064.375405 ,12040.831532 ,-2084.921068 ,-21390.235970 ,-38176.615985 ,-51647.714482 ,-59242.564959 ,-60263.150854 ,-58599.245165 ,-54804.972560 ,-50092.112608 ,-44465.812552 ,-38533.096297 ,-32747.104307 ,-27130.082610 ,-21529.632955 ,-15894.611939 ,-10457.566933 ,-5429.042583 ,-903.757828 ,2481.947589 ,5173.789976 ,8358.768202 ,11565.584635 ,14431.147931 ,16951.619820 ,18888.807708 ,20120.884465 ,20222.141242 ,18423.168124 ,16498.668271 ,14442.624242 ,14070.038273 ,16211.370808 ,19639.815904 ,24280.360465 ,29475.380079 ,35030.793540 ,40812.325095 ,46593.082382 ,52390.906885 ,58109.310860 ,63780.896094 ,68984.456561 ,72559.442320 ,74645.487900 ,74695.219755 ,72098.143876 ,66609.929889 ,56864.971296 ,41589.295266 ,19057.032104 ,-5951.329863 ,-34608.796853 ,-56603.801584 ,-72678.838057 ,-83297.070856 ,-90127.593511 ,-92656.040614 ,-91394.995510 ,-88192.056842 ,-83148.833075 ,-77582.587173 ,-71750.440823 ,-65765.369857 ,-59716.101820 ,-53613.430067 ,-47473.832358 ,-41287.031890 ,-35139.919259 ,-29097.671507 ,-23178.836760 ,-17486.807388 ,-12046.775779 ,-6802.483422 ,-1867.556171 ,2644.380534 ,6615.829501 ,10332.557518 ,13706.737038 ,17017.991307 ,20303.136670 ,23507.386461 ,26482.194102 ,29698.585356 ,33196.305757 ,37385.914179 ,42872.996212 ,48725.617879 ,54564.488527 ,60453.841604 ,66495.146265 ,72668.620416 ,78723.644870 ,84593.136677 ,89974.936239 ,93439.798630 ,95101.207834 ,94028.126381 ,89507.925620 ,80989.846001 ,66944.274744 ,47016.422041 ,19932.783790 ,-6198.433172 ,-32320.379400 ,-49822.852084 ,-60517.553414 ,-66860.548269 ,-70849.714105 ,-71058.721556 ,-67691.947812 ,-63130.703822 ,-57687.607311 ,-51916.952488 ,-45932.054982 ,-39834.909941 ,-33714.535713 ,-27564.443333 ,-21465.186188 ,-15469.326408 ,-9522.358787 ,-3588.742161 ,2221.802073 ,7758.244339 ,13020.269708 ,18198.562827 ,23211.338588 ,28051.699645 ,32708.577247 ,37413.795242 ,42181.401920 ,46462.499633 ,49849.582315 ,53026.578940 ,55930.600705 ,59432.642178 ,64027.356857 ,69126.843653 ,74620.328837 ,80372.056070 ,86348.152766 ,92468.907239 ,98568.998246 ,104669.511588 ,110445.790143 ,115394.348973 ,119477.553152 ,121528.574511 ,121973.674087 ,121048.017786 ,118021.473181 ,112151.993711 ,102195.999157 ,85972.731130 ,61224.719621 ,31949.279603 ,-3726.022971 ,-36485.298619 ,-67336.469799 ,-87799.366129 ,-98865.713558 ,-104103.651120 ,-105068.402300 ,-103415.820781 ,-99261.356633 ,-94281.850081 ,-88568.701325 ,-82625.711921 ,-76766.776770 ,-70998.803524 ,-65303.404499 ,-59719.198305 ,-54182.230439 ,-48662.904657 ,-43206.731668 ,-37732.701095 ,-32375.478519 ,-27167.508567 ,-22197.211891 ,-17722.869502 ,-13925.135219 ,-10737.893027 ,-8455.327914 ,-7067.008358 ,-7086.991191 ,-7527.693561 ,-8378.025732 ,-8629.383998 ,-7854.586079 ,-5853.040657 ,-1973.225485 ,2699.850783 ,8006.098287 ,13651.734934 ,19139.318072 ,24476.645420 ,29463.480336 ,33899.078820 ,37364.528796 ,38380.214949 ,37326.585649 ,33428.470616 ,27441.000494 ,21761.126583 ,15368.408081 ,7224.234078 ,-2702.217396 ,-14109.682505 ,-27390.915614 ,-38569.562393 ,-47875.155339 ,-53969.121872 ,-57703.473001 ,-57993.198171 ,-54908.391840 ,-50568.410328 ,-45247.622563 ,-39563.224328 ,-33637.786521 ,-27585.345413 ,-21572.074797 ,-15597.363909 ,-9577.429076 ,-3475.770622 ,2520.378408 ,8046.881775 ,13482.345595 ] + deltaS = [ + 9916.490263, + 12014.263380, + 13019.275755, + 12296.373612, + 8870.995603, + 1797.354574, + -6392.880771, + -16150.825387, + -27083.245106, + -40130.421462, + -50377.169958, + -57787.717468, + -60797.223427, + -59274.041897, + -55970.213230, + -51154.650927, + -45877.841034, + -40278.553775, + -34543.967175, + -28849.633641, + -23192.776605, + -17531.130740, + -11862.021829, + -6182.456792, + -450.481090, + 5201.184400, + 10450.773882, + 15373.018272, + 20255.699431, + 24964.431669, + 29470.745887, + 33678.079947, + 37209.808930, + 39664.432393, + 41046.735479, + 40462.982011, + 39765.070209, + 39270.815830, + 39888.077002, + 42087.276604, + 45332.012929, + 49719.128772, + 54622.190928, + 59919.718626, + 65436.341097, + 70842.911460, + 76143.747430, + 81162.358574, + 85688.102884, + 89488.917734, + 91740.108470, + 91998.787916, + 87875.986012, + 79123.877908, + 66435.611045, + 48639.250610, + 27380.282817, + 2166.538464, + -21236.428084, + -43490.803535, + -60436.624080, + -73378.401966, + -80946.278268, + -84831.969493, + -84696.627286, + -81085.365407, + -76410.847049, + -70874.415387, + -65156.276464, + -59379.086883, + -53557.267619, + -47784.164830, + -42078.001172, + -36340.061427, + -30541.788202, + -24805.281435, + -19280.817165, + -13893.690606, + -8444.172221, + -3098.160839, + 2270.908649, + 7594.679295, + 12780.079247, + 17801.722109, + 22543.091206, + 26897.369814, + 31051.285734, + 34933.809557, + 38842.402859, + 42875.230152, + 47024.395356, + 51161.516122, + 55657.298307, + 60958.155424, + 66545.635029, + 72202.930397, + 77934.761905, + 83588.207792, + 89160.874522, + 94606.115027, + 99935.754968, + 104701.404975, + 107581.670606, + 108768.440311, + 107905.700480, + 104062.148863, + 96620.281684, + 83588.443029, + 61415.088182, + 27124.031692, + -7537.285321, + -43900.451653, + -70274.062783, + -87573.481475, + -101712.148408, + -116135.719087, + -124187.225446, + -124725.278371, + -122458.145590, + -117719.918256, + -112352.138605, + -106546.806030, + -100583.803012, + -94618.253238, + -88639.090897, + -82725.009842, + -76938.910669, + -71248.957807, + -65668.352795, + -60272.761991, + -55179.538428, + -50456.021161, + -46037.728058, + -42183.912670, + -39522.184006, + -38541.255303, + -38383.665728, + -39423.998130, + -40489.466130, + -41450.406768, + -42355.156592, + -43837.562085, + -43677.262972, + -41067.896944, + -37238.628465, + -32230.392026, + -26762.766062, + -20975.163308, + -15019.218554, + -9053.105545, + -3059.663132, + 2772.399618, + 8242.538397, + 13407.752291, + 18016.047539, + 22292.125752, + 26616.583347, + 30502.564253, + 33153.890890, + 34216.684448, + 33394.220786, + 29657.417791, + 23064.375405, + 12040.831532, + -2084.921068, + -21390.235970, + -38176.615985, + -51647.714482, + -59242.564959, + -60263.150854, + -58599.245165, + -54804.972560, + -50092.112608, + -44465.812552, + -38533.096297, + -32747.104307, + -27130.082610, + -21529.632955, + -15894.611939, + -10457.566933, + -5429.042583, + -903.757828, + 2481.947589, + 5173.789976, + 8358.768202, + 11565.584635, + 14431.147931, + 16951.619820, + 18888.807708, + 20120.884465, + 20222.141242, + 18423.168124, + 16498.668271, + 14442.624242, + 14070.038273, + 16211.370808, + 19639.815904, + 24280.360465, + 29475.380079, + 35030.793540, + 40812.325095, + 46593.082382, + 52390.906885, + 58109.310860, + 63780.896094, + 68984.456561, + 72559.442320, + 74645.487900, + 74695.219755, + 72098.143876, + 66609.929889, + 56864.971296, + 41589.295266, + 19057.032104, + -5951.329863, + -34608.796853, + -56603.801584, + -72678.838057, + -83297.070856, + -90127.593511, + -92656.040614, + -91394.995510, + -88192.056842, + -83148.833075, + -77582.587173, + -71750.440823, + -65765.369857, + -59716.101820, + -53613.430067, + -47473.832358, + -41287.031890, + -35139.919259, + -29097.671507, + -23178.836760, + -17486.807388, + -12046.775779, + -6802.483422, + -1867.556171, + 2644.380534, + 6615.829501, + 10332.557518, + 13706.737038, + 17017.991307, + 20303.136670, + 23507.386461, + 26482.194102, + 29698.585356, + 33196.305757, + 37385.914179, + 42872.996212, + 48725.617879, + 54564.488527, + 60453.841604, + 66495.146265, + 72668.620416, + 78723.644870, + 84593.136677, + 89974.936239, + 93439.798630, + 95101.207834, + 94028.126381, + 89507.925620, + 80989.846001, + 66944.274744, + 47016.422041, + 19932.783790, + -6198.433172, + -32320.379400, + -49822.852084, + -60517.553414, + -66860.548269, + -70849.714105, + -71058.721556, + -67691.947812, + -63130.703822, + -57687.607311, + -51916.952488, + -45932.054982, + -39834.909941, + -33714.535713, + -27564.443333, + -21465.186188, + -15469.326408, + -9522.358787, + -3588.742161, + 2221.802073, + 7758.244339, + 13020.269708, + 18198.562827, + 23211.338588, + 28051.699645, + 32708.577247, + 37413.795242, + 42181.401920, + 46462.499633, + 49849.582315, + 53026.578940, + 55930.600705, + 59432.642178, + 64027.356857, + 69126.843653, + 74620.328837, + 80372.056070, + 86348.152766, + 92468.907239, + 98568.998246, + 104669.511588, + 110445.790143, + 115394.348973, + 119477.553152, + 121528.574511, + 121973.674087, + 121048.017786, + 118021.473181, + 112151.993711, + 102195.999157, + 85972.731130, + 61224.719621, + 31949.279603, + -3726.022971, + -36485.298619, + -67336.469799, + -87799.366129, + -98865.713558, + -104103.651120, + -105068.402300, + -103415.820781, + -99261.356633, + -94281.850081, + -88568.701325, + -82625.711921, + -76766.776770, + -70998.803524, + -65303.404499, + -59719.198305, + -54182.230439, + -48662.904657, + -43206.731668, + -37732.701095, + -32375.478519, + -27167.508567, + -22197.211891, + -17722.869502, + -13925.135219, + -10737.893027, + -8455.327914, + -7067.008358, + -7086.991191, + -7527.693561, + -8378.025732, + -8629.383998, + -7854.586079, + -5853.040657, + -1973.225485, + 2699.850783, + 8006.098287, + 13651.734934, + 19139.318072, + 24476.645420, + 29463.480336, + 33899.078820, + 37364.528796, + 38380.214949, + 37326.585649, + 33428.470616, + 27441.000494, + 21761.126583, + 15368.408081, + 7224.234078, + -2702.217396, + -14109.682505, + -27390.915614, + -38569.562393, + -47875.155339, + -53969.121872, + -57703.473001, + -57993.198171, + -54908.391840, + -50568.410328, + -45247.622563, + -39563.224328, + -33637.786521, + -27585.345413, + -21572.074797, + -15597.363909, + -9577.429076, + -3475.770622, + 2520.378408, + 8046.881775, + 13482.345595, + ] - beta_set = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26] + beta_set = [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + ] - #from new_data_set import * + # from new_data_set import * # declare model name model = ConcreteModel() # declare constants - bpy = 26 # biweeks per year - years = 15 # years of data - bigM = 50.0 # big M for disjunction constraints + bpy = 26 # biweeks per year + years = 15 # years of data + bigM = 50.0 # big M for disjunction constraints # declare sets - model.S_meas = RangeSet(1,bpy*years) - model.S_meas_small = RangeSet(1,bpy*years-1) - model.S_beta = RangeSet(1,bpy) + model.S_meas = RangeSet(1, bpy * years) + model.S_meas_small = RangeSet(1, bpy * years - 1) + model.S_beta = RangeSet(1, bpy) # define variable bounds - def _gt_zero(m,i): - return (0.0,1e7) + def _gt_zero(m, i): + return (0.0, 1e7) + def _beta_bounds(m): - return (None,5.0) + return (None, 5.0) # define variables # log of estimated cases - #model.logI = Var(model.S_meas, bounds=_gt_zero) - model.logI = Var(model.S_meas, bounds=(0.001,1e7)) + # model.logI = Var(model.S_meas, bounds=_gt_zero) + model.logI = Var(model.S_meas, bounds=(0.001, 1e7)) # log of transmission parameter beta - #model.logbeta = Var(model.S_beta, bounds=_gt_zero) - model.logbeta = Var(model.S_beta, bounds=(0.0001,5)) + # model.logbeta = Var(model.S_beta, bounds=_gt_zero) + model.logbeta = Var(model.S_beta, bounds=(0.0001, 5)) # binary variable y over all betas - #model.y = Var(model.S_beta, within=Binary) + # model.y = Var(model.S_beta, within=Binary) # low value of beta - #model.logbeta_low = Var(bounds=_beta_bounds) - model.logbeta_low = Var(bounds=(0.0001,5)) + # model.logbeta_low = Var(bounds=_beta_bounds) + model.logbeta_low = Var(bounds=(0.0001, 5)) # high value of beta - #model.logbeta_high = Var(bounds=_beta_bounds) - model.logbeta_high = Var(bounds=(0.0001,5)) + # model.logbeta_high = Var(bounds=_beta_bounds) + model.logbeta_high = Var(bounds=(0.0001, 5)) # dummy variables model.p = Var(model.S_meas, bounds=_gt_zero) model.n = Var(model.S_meas, bounds=_gt_zero) @@ -79,7 +1645,7 @@ def _beta_bounds(m): # changes in susceptible population profile from susceptible reconstruction deltaS = deltaS # mean susceptibles - #meanS = 1.04e6 + # meanS = 1.04e6 meanS = 8.65e5 # log of measured population logN = pop @@ -90,36 +1656,47 @@ def _beta_bounds(m): def _obj_rule(m): expr = sum(m.p[i] + m.n[i] for i in m.S_meas) return expr + model.obj = Objective(rule=_obj_rule, sense=minimize) # define constraints - def _logSIR(m,i): - expr = m.logI[i+1] - ( m.logbeta[beta_set[i-1]] + m.logI[i] + math.log(deltaS[i-1] + meanS) - logN[i-1] ) + def _logSIR(m, i): + expr = m.logI[i + 1] - ( + m.logbeta[beta_set[i - 1]] + + m.logI[i] + + math.log(deltaS[i - 1] + meanS) + - logN[i - 1] + ) return (0.0, expr) + model.logSIR = Constraint(model.S_meas_small, rule=_logSIR) # objective function constraint - def _p_n_const(m,i): - expr = logIstar[i-1] - m.logI[i] - m.p[i] + m.n[i] + def _p_n_const(m, i): + expr = logIstar[i - 1] - m.logI[i] - m.p[i] + m.n[i] return (0.0, expr) - model.p_n_const = Constraint(model.S_meas,rule=_p_n_const) + + model.p_n_const = Constraint(model.S_meas, rule=_p_n_const) # disjuncts model.BigM = Suffix() - model.y = RangeSet(0,1) + model.y = RangeSet(0, 1) + def _high_low(disjunct, i, y): model = disjunct.model() if y: - disjunct.c = Constraint(expr=model.logbeta_high - model.logbeta[i]== 0.0) + disjunct.c = Constraint(expr=model.logbeta_high - model.logbeta[i] == 0.0) else: disjunct.c = Constraint(expr=model.logbeta[i] - model.logbeta_low == 0.0) model.BigM[disjunct.c] = bigM + model.high_low = Disjunct(model.S_beta, model.y, rule=_high_low) # disjunctions def _disj(model, i): - return [model.high_low[i,j] for j in model.y] + return [model.high_low[i, j] for j in model.y] + model.disj = Disjunction(model.S_beta, rule=_disj) return model @@ -152,5 +1729,7 @@ def lowbeta_L(m,i): if __name__ == "__main__": m = build_model() TransformationFactory('gdp.bigm').apply_to(m) - SolverFactory('gams').solve(m, solver='baron', tee=True, add_options=['option optcr=1e-6;']) + SolverFactory('gams').solve( + m, solver='baron', tee=True, add_options=['option optcr=1e-6;'] + ) m.obj.display() diff --git a/examples/gdp/eight_process/eight_proc_logical.py b/examples/gdp/eight_process/eight_proc_logical.py index 12d3399c3a8..7e183dfc397 100644 --- a/examples/gdp/eight_process/eight_proc_logical.py +++ b/examples/gdp/eight_process/eight_proc_logical.py @@ -25,10 +25,23 @@ from __future__ import division from pyomo.core.expr.logical_expr import land, lor -from pyomo.core.plugins.transform.logical_to_linear import update_boolean_vars_from_binary +from pyomo.core.plugins.transform.logical_to_linear import ( + update_boolean_vars_from_binary, +) from pyomo.environ import ( - ConcreteModel, Constraint, ConstraintList, NonNegativeReals, - Objective, Param, RangeSet, Reference, Var, exp, minimize, LogicalConstraint, ) + ConcreteModel, + Constraint, + ConstraintList, + NonNegativeReals, + Objective, + Param, + RangeSet, + Reference, + Var, + exp, + minimize, + LogicalConstraint, +) from pyomo.gdp import Disjunct, Disjunction from pyomo.opt import SolverFactory @@ -44,9 +57,9 @@ def build_eight_process_flowsheet(): no_unit_zero_flows = { 1: (2, 3), 2: (4, 5), - 3: (9, ), + 3: (9,), 4: (12, 14), - 5: (15, ), + 5: (15,), 6: (19, 20), 7: (21, 22), 8: (10, 17, 18), @@ -60,28 +73,58 @@ def build_eight_process_flowsheet(): def fixed_cost_bounds(m, unit): return (0, m.CF[unit]) + m.yCF = Var(m.units, initialize=0, bounds=fixed_cost_bounds) # VARIABLE COST COEFF FOR PROCESS UNITS - STREAMS # Format: stream #: cost - variable_cost = {3: -10, 5: -15, 9: -40, 19: 25, 21: 35, 25: -35, - 17: 80, 14: 15, 10: 15, 2: 1, 4: 1, 18: -65, 20: -60, - 22: -80} + variable_cost = { + 3: -10, + 5: -15, + 9: -40, + 19: 25, + 21: 35, + 25: -35, + 17: 80, + 14: 15, + 10: 15, + 2: 1, + 4: 1, + 18: -65, + 20: -60, + 22: -80, + } CV = m.CV = Param(m.streams, initialize=variable_cost, default=0) # initial point information for stream flows - initX = {2: 2, 3: 1.5, 6: 0.75, 7: 0.5, 8: 0.5, 9: 0.75, 11: 1.5, - 12: 1.34, 13: 2, 14: 2.5, 17: 2, 18: 0.75, 19: 2, 20: 1.5, - 23: 1.7, 24: 1.5, 25: 0.5} + initX = { + 2: 2, + 3: 1.5, + 6: 0.75, + 7: 0.5, + 8: 0.5, + 9: 0.75, + 11: 1.5, + 12: 1.34, + 13: 2, + 14: 2.5, + 17: 2, + 18: 0.75, + 19: 2, + 20: 1.5, + 23: 1.7, + 24: 1.5, + 25: 0.5, + } """Variable declarations""" # FLOWRATES OF PROCESS STREAMS - m.flow = Var(m.streams, domain=NonNegativeReals, initialize=initX, - bounds=(0, 10)) + m.flow = Var(m.streams, domain=NonNegativeReals, initialize=initX, bounds=(0, 10)) # OBJECTIVE FUNCTION CONSTANT TERM CONSTANT = m.constant = Param(initialize=122.0) """Constraint definitions""" + # INPUT-OUTPUT RELATIONS FOR process units 1 through 8 @m.Disjunct(m.units) def use_unit(disj, unit): @@ -113,11 +156,9 @@ def use_unit_or_not(m, unit): # Mass balance equations m.massbal1 = Constraint(expr=m.flow[13] == m.flow[19] + m.flow[21]) - m.massbal2 = Constraint( - expr=m.flow[17] == m.flow[9] + m.flow[16] + m.flow[25]) + m.massbal2 = Constraint(expr=m.flow[17] == m.flow[9] + m.flow[16] + m.flow[25]) m.massbal3 = Constraint(expr=m.flow[11] == m.flow[12] + m.flow[15]) - m.massbal4 = Constraint( - expr=m.flow[3] + m.flow[5] == m.flow[6] + m.flow[11]) + m.massbal4 = Constraint(expr=m.flow[3] + m.flow[5] == m.flow[6] + m.flow[11]) m.massbal5 = Constraint(expr=m.flow[6] == m.flow[7] + m.flow[8]) m.massbal6 = Constraint(expr=m.flow[23] == m.flow[20] + m.flow[22]) m.massbal7 = Constraint(expr=m.flow[23] == m.flow[14] + m.flow[24]) @@ -133,19 +174,20 @@ def use_unit_or_not(m, unit): # logical propositions m.use1or2 = LogicalConstraint(expr=m.Y[1].xor(m.Y[2])) m.use1or2implies345 = LogicalConstraint( - expr=lor(m.Y[1], m.Y[2]).implies(lor(m.Y[3], m.Y[4], m.Y[5]))) + expr=lor(m.Y[1], m.Y[2]).implies(lor(m.Y[3], m.Y[4], m.Y[5])) + ) m.use4implies6or7 = LogicalConstraint(expr=m.Y[4].implies(lor(m.Y[6], m.Y[7]))) m.use3implies8 = LogicalConstraint(expr=m.Y[3].implies(m.Y[8])) m.use6or7implies4 = LogicalConstraint(expr=lor(m.Y[6], m.Y[7]).implies(m.Y[4])) m.use6or7 = LogicalConstraint(expr=m.Y[6].xor(m.Y[7])) """Profit (objective) function definition""" - m.profit = Objective(expr=sum( - m.yCF[unit] - for unit in m.units) + - sum(m.flow[stream] * CV[stream] - for stream in m.streams) + CONSTANT, - sense=minimize) + m.profit = Objective( + expr=sum(m.yCF[unit] for unit in m.units) + + sum(m.flow[stream] * CV[stream] for stream in m.streams) + + CONSTANT, + sense=minimize, + ) """Bound definitions""" # x (flow) upper bounds @@ -161,6 +203,7 @@ def use_unit_or_not(m, unit): if __name__ == "__main__": m = build_eight_process_flowsheet() from pyomo.environ import TransformationFactory + TransformationFactory('core.logical_to_linear').apply_to(m) SolverFactory('gdpopt.loa').solve(m, tee=True) update_boolean_vars_from_binary(m) diff --git a/examples/gdp/eight_process/eight_proc_model.py b/examples/gdp/eight_process/eight_proc_model.py index 3053ba53de2..d4bd4dbd102 100644 --- a/examples/gdp/eight_process/eight_proc_model.py +++ b/examples/gdp/eight_process/eight_proc_model.py @@ -24,8 +24,17 @@ """ from __future__ import division -from pyomo.environ import (ConcreteModel, Constraint, NonNegativeReals, - Objective, Param, RangeSet, Var, exp, minimize) +from pyomo.environ import ( + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + Param, + RangeSet, + Var, + exp, + minimize, +) from pyomo.gdp import Disjunction @@ -45,24 +54,53 @@ def build_eight_process_flowsheet(): def fixed_cost_bounds(m, unit): return (0, m.CF[unit]) + m.yCF = Var(m.units, initialize=0, bounds=fixed_cost_bounds) # VARIABLE COST COEFF FOR PROCESS UNITS - STREAMS # Format: stream #: cost - variable_cost = {3: -10, 5: -15, 9: -40, 19: 25, 21: 35, 25: -35, - 17: 80, 14: 15, 10: 15, 2: 1, 4: 1, 18: -65, 20: -60, - 22: -80} + variable_cost = { + 3: -10, + 5: -15, + 9: -40, + 19: 25, + 21: 35, + 25: -35, + 17: 80, + 14: 15, + 10: 15, + 2: 1, + 4: 1, + 18: -65, + 20: -60, + 22: -80, + } CV = m.CV = Param(m.streams, initialize=variable_cost, default=0) # initial point information for stream flows - initX = {2: 2, 3: 1.5, 6: 0.75, 7: 0.5, 8: 0.5, 9: 0.75, 11: 1.5, - 12: 1.34, 13: 2, 14: 2.5, 17: 2, 18: 0.75, 19: 2, 20: 1.5, - 23: 1.7, 24: 1.5, 25: 0.5} + initX = { + 2: 2, + 3: 1.5, + 6: 0.75, + 7: 0.5, + 8: 0.5, + 9: 0.75, + 11: 1.5, + 12: 1.34, + 13: 2, + 14: 2.5, + 17: 2, + 18: 0.75, + 19: 2, + 20: 1.5, + 23: 1.7, + 24: 1.5, + 25: 0.5, + } """Variable declarations""" # FLOWRATES OF PROCESS STREAMS - m.flow = Var(m.streams, domain=NonNegativeReals, initialize=initX, - bounds=(0, 10)) + m.flow = Var(m.streams, domain=NonNegativeReals, initialize=initX, bounds=(0, 10)) # OBJECTIVE FUNCTION CONSTANT TERM CONSTANT = m.constant = Param(initialize=122.0) @@ -71,77 +109,82 @@ def fixed_cost_bounds(m, unit): m.use_unit_1or2 = Disjunction( expr=[ # use unit 1 disjunct - [m.yCF[1] == m.CF[1], - exp(m.flow[3]) - 1 == m.flow[2], - m.flow[4] == 0, - m.flow[5] == 0], + [ + m.yCF[1] == m.CF[1], + exp(m.flow[3]) - 1 == m.flow[2], + m.flow[4] == 0, + m.flow[5] == 0, + ], # use unit 2 disjunct - [m.yCF[2] == m.CF[2], - exp(m.flow[5] / 1.2) - 1 == m.flow[4], - m.flow[2] == 0, - m.flow[3] == 0] - ]) + [ + m.yCF[2] == m.CF[2], + exp(m.flow[5] / 1.2) - 1 == m.flow[4], + m.flow[2] == 0, + m.flow[3] == 0, + ], + ] + ) m.use_unit_3ornot = Disjunction( expr=[ # Use unit 3 disjunct - [m.yCF[3] == m.CF[3], - 1.5 * m.flow[9] + m.flow[10] == m.flow[8]], + [m.yCF[3] == m.CF[3], 1.5 * m.flow[9] + m.flow[10] == m.flow[8]], # No unit 3 disjunct - [m.flow[9] == 0, - m.flow[10] == m.flow[8]] - ]) + [m.flow[9] == 0, m.flow[10] == m.flow[8]], + ] + ) m.use_unit_4or5ornot = Disjunction( expr=[ # Use unit 4 disjunct - [m.yCF[4] == m.CF[4], - 1.25 * (m.flow[12] + m.flow[14]) == m.flow[13], - m.flow[15] == 0], + [ + m.yCF[4] == m.CF[4], + 1.25 * (m.flow[12] + m.flow[14]) == m.flow[13], + m.flow[15] == 0, + ], # Use unit 5 disjunct - [m.yCF[5] == m.CF[5], - m.flow[15] == 2 * m.flow[16], - m.flow[12] == 0, - m.flow[14] == 0], + [ + m.yCF[5] == m.CF[5], + m.flow[15] == 2 * m.flow[16], + m.flow[12] == 0, + m.flow[14] == 0, + ], # No unit 4 or 5 disjunct - [m.flow[15] == 0, - m.flow[12] == 0, - m.flow[14] == 0] - ]) + [m.flow[15] == 0, m.flow[12] == 0, m.flow[14] == 0], + ] + ) m.use_unit_6or7ornot = Disjunction( expr=[ # use unit 6 disjunct - [m.yCF[6] == m.CF[6], - exp(m.flow[20] / 1.5) - 1 == m.flow[19], - m.flow[21] == 0, - m.flow[22] == 0], - # use unit 7 disjunct - [m.yCF[7] == m.CF[7], - exp(m.flow[22]) - 1 == m.flow[21], - m.flow[19] == 0, - m.flow[20] == 0], - # No unit 6 or 7 disjunct - [m.flow[21] == 0, + [ + m.yCF[6] == m.CF[6], + exp(m.flow[20] / 1.5) - 1 == m.flow[19], + m.flow[21] == 0, m.flow[22] == 0, + ], + # use unit 7 disjunct + [ + m.yCF[7] == m.CF[7], + exp(m.flow[22]) - 1 == m.flow[21], m.flow[19] == 0, - m.flow[20] == 0] - ]) + m.flow[20] == 0, + ], + # No unit 6 or 7 disjunct + [m.flow[21] == 0, m.flow[22] == 0, m.flow[19] == 0, m.flow[20] == 0], + ] + ) m.use_unit_8ornot = Disjunction( expr=[ # use unit 8 disjunct - [m.yCF[8] == m.CF[8], - exp(m.flow[18]) - 1 == m.flow[10] + m.flow[17]], + [m.yCF[8] == m.CF[8], exp(m.flow[18]) - 1 == m.flow[10] + m.flow[17]], # no unit 8 disjunct - [m.flow[10] == 0, - m.flow[17] == 0, - m.flow[18] == 0] - ]) + [m.flow[10] == 0, m.flow[17] == 0, m.flow[18] == 0], + ] + ) # Mass balance equations m.massbal1 = Constraint(expr=m.flow[13] == m.flow[19] + m.flow[21]) - m.massbal2 = Constraint( - expr=m.flow[17] == m.flow[9] + m.flow[16] + m.flow[25]) + m.massbal2 = Constraint(expr=m.flow[17] == m.flow[9] + m.flow[16] + m.flow[25]) m.massbal3 = Constraint(expr=m.flow[11] == m.flow[12] + m.flow[15]) - m.massbal4 = Constraint( - expr=m.flow[3] + m.flow[5] == m.flow[6] + m.flow[11]) + m.massbal4 = Constraint(expr=m.flow[3] + m.flow[5] == m.flow[6] + m.flow[11]) m.massbal5 = Constraint(expr=m.flow[6] == m.flow[7] + m.flow[8]) m.massbal6 = Constraint(expr=m.flow[23] == m.flow[20] + m.flow[22]) m.massbal7 = Constraint(expr=m.flow[23] == m.flow[14] + m.flow[24]) @@ -154,20 +197,24 @@ def fixed_cost_bounds(m, unit): # pure integer constraints m.use4implies6or7 = Constraint( - expr=m.use_unit_6or7ornot.disjuncts[0].binary_indicator_var + - m.use_unit_6or7ornot.disjuncts[1].binary_indicator_var - - m.use_unit_4or5ornot.disjuncts[0].binary_indicator_var == 0) + expr=m.use_unit_6or7ornot.disjuncts[0].binary_indicator_var + + m.use_unit_6or7ornot.disjuncts[1].binary_indicator_var + - m.use_unit_4or5ornot.disjuncts[0].binary_indicator_var + == 0 + ) m.use3implies8 = Constraint( expr=m.use_unit_3ornot.disjuncts[0].binary_indicator_var - - m.use_unit_8ornot.disjuncts[0].binary_indicator_var <= 0) + - m.use_unit_8ornot.disjuncts[0].binary_indicator_var + <= 0 + ) """Profit (objective) function definition""" - m.profit = Objective(expr=sum( - m.yCF[unit] - for unit in m.units) + - sum(m.flow[stream] * CV[stream] - for stream in m.streams) + CONSTANT, - sense=minimize) + m.profit = Objective( + expr=sum(m.yCF[unit] for unit in m.units) + + sum(m.flow[stream] * CV[stream] for stream in m.streams) + + CONSTANT, + sense=minimize, + ) """Bound definitions""" # x (flow) upper bounds diff --git a/examples/gdp/eight_process/eight_proc_verbose_model.py b/examples/gdp/eight_process/eight_proc_verbose_model.py index d26d484b461..78da347e564 100644 --- a/examples/gdp/eight_process/eight_proc_verbose_model.py +++ b/examples/gdp/eight_process/eight_proc_verbose_model.py @@ -6,8 +6,17 @@ """ from __future__ import division -from pyomo.environ import (ConcreteModel, Constraint, NonNegativeReals, - Objective, Param, RangeSet, Var, exp, minimize) +from pyomo.environ import ( + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + Param, + RangeSet, + Var, + exp, + minimize, +) from pyomo.gdp import Disjunct, Disjunction @@ -27,20 +36,48 @@ def build_eight_process_flowsheet(): # VARIABLE COST COEFF FOR PROCESS UNITS - STREAMS # Format: stream #: cost - variable_cost = {3: -10, 5: -15, 9: -40, 19: 25, 21: 35, 25: -35, - 17: 80, 14: 15, 10: 15, 2: 1, 4: 1, 18: -65, 20: -60, - 22: -80} + variable_cost = { + 3: -10, + 5: -15, + 9: -40, + 19: 25, + 21: 35, + 25: -35, + 17: 80, + 14: 15, + 10: 15, + 2: 1, + 4: 1, + 18: -65, + 20: -60, + 22: -80, + } CV = m.CV = Param(m.streams, initialize=variable_cost, default=0) # initial point information for stream flows - initX = {2: 2, 3: 1.5, 6: 0.75, 7: 0.5, 8: 0.5, 9: 0.75, 11: 1.5, - 12: 1.34, 13: 2, 14: 2.5, 17: 2, 18: 0.75, 19: 2, 20: 1.5, - 23: 1.7, 24: 1.5, 25: 0.5} + initX = { + 2: 2, + 3: 1.5, + 6: 0.75, + 7: 0.5, + 8: 0.5, + 9: 0.75, + 11: 1.5, + 12: 1.34, + 13: 2, + 14: 2.5, + 17: 2, + 18: 0.75, + 19: 2, + 20: 1.5, + 23: 1.7, + 24: 1.5, + 25: 0.5, + } """Variable declarations""" # FLOWRATES OF PROCESS STREAMS - m.flow = Var(m.streams, domain=NonNegativeReals, initialize=initX, - bounds=(0, 10)) + m.flow = Var(m.streams, domain=NonNegativeReals, initialize=initX, bounds=(0, 10)) # OBJECTIVE FUNCTION CONSTANT TERM CONSTANT = m.constant = Param(initialize=122.0) @@ -51,21 +88,18 @@ def build_eight_process_flowsheet(): m.use_unit1.no_unit2_flow1 = Constraint(expr=m.flow[4] == 0) m.use_unit1.no_unit2_flow2 = Constraint(expr=m.flow[5] == 0) m.use_unit2 = Disjunct() - m.use_unit2.inout2 = Constraint( - expr=exp(m.flow[5] / 1.2) - 1 == m.flow[4]) + m.use_unit2.inout2 = Constraint(expr=exp(m.flow[5] / 1.2) - 1 == m.flow[4]) m.use_unit2.no_unit1_flow1 = Constraint(expr=m.flow[2] == 0) m.use_unit2.no_unit1_flow2 = Constraint(expr=m.flow[3] == 0) m.use_unit3 = Disjunct() - m.use_unit3.inout3 = Constraint( - expr=1.5 * m.flow[9] + m.flow[10] == m.flow[8]) + m.use_unit3.inout3 = Constraint(expr=1.5 * m.flow[9] + m.flow[10] == m.flow[8]) m.no_unit3 = Disjunct() m.no_unit3.no_unit3_flow1 = Constraint(expr=m.flow[9] == 0) m.no_unit3.flow_pass_through = Constraint(expr=m.flow[10] == m.flow[8]) m.use_unit4 = Disjunct() - m.use_unit4.inout4 = Constraint( - expr=1.25 * (m.flow[12] + m.flow[14]) == m.flow[13]) + m.use_unit4.inout4 = Constraint(expr=1.25 * (m.flow[12] + m.flow[14]) == m.flow[13]) m.use_unit4.no_unit5_flow = Constraint(expr=m.flow[15] == 0) m.use_unit5 = Disjunct() m.use_unit5.inout5 = Constraint(expr=m.flow[15] == 2 * m.flow[16]) @@ -77,8 +111,7 @@ def build_eight_process_flowsheet(): m.no_unit4or5.no_unit4_flow2 = Constraint(expr=m.flow[14] == 0) m.use_unit6 = Disjunct() - m.use_unit6.inout6 = Constraint( - expr=exp(m.flow[20] / 1.5) - 1 == m.flow[19]) + m.use_unit6.inout6 = Constraint(expr=exp(m.flow[20] / 1.5) - 1 == m.flow[19]) m.use_unit6.no_unit7_flow1 = Constraint(expr=m.flow[21] == 0) m.use_unit6.no_unit7_flow2 = Constraint(expr=m.flow[22] == 0) m.use_unit7 = Disjunct() @@ -92,8 +125,7 @@ def build_eight_process_flowsheet(): m.no_unit6or7.no_unit6_flow2 = Constraint(expr=m.flow[20] == 0) m.use_unit8 = Disjunct() - m.use_unit8.inout8 = Constraint( - expr=exp(m.flow[18]) - 1 == m.flow[10] + m.flow[17]) + m.use_unit8.inout8 = Constraint(expr=exp(m.flow[18]) - 1 == m.flow[10] + m.flow[17]) m.no_unit8 = Disjunct() m.no_unit8.no_unit8_flow1 = Constraint(expr=m.flow[10] == 0) m.no_unit8.no_unit8_flow2 = Constraint(expr=m.flow[17] == 0) @@ -101,11 +133,9 @@ def build_eight_process_flowsheet(): # Mass balance equations m.massbal1 = Constraint(expr=m.flow[13] == m.flow[19] + m.flow[21]) - m.massbal2 = Constraint( - expr=m.flow[17] == m.flow[9] + m.flow[16] + m.flow[25]) + m.massbal2 = Constraint(expr=m.flow[17] == m.flow[9] + m.flow[16] + m.flow[25]) m.massbal3 = Constraint(expr=m.flow[11] == m.flow[12] + m.flow[15]) - m.massbal4 = Constraint( - expr=m.flow[3] + m.flow[5] == m.flow[6] + m.flow[11]) + m.massbal4 = Constraint(expr=m.flow[3] + m.flow[5] == m.flow[6] + m.flow[11]) m.massbal5 = Constraint(expr=m.flow[6] == m.flow[7] + m.flow[8]) m.massbal6 = Constraint(expr=m.flow[23] == m.flow[20] + m.flow[22]) m.massbal7 = Constraint(expr=m.flow[23] == m.flow[14] + m.flow[24]) @@ -118,29 +148,36 @@ def build_eight_process_flowsheet(): # pure integer constraints m.use1or2 = Disjunction(expr=[m.use_unit1, m.use_unit2]) - m.use4or5maybe = Disjunction( - expr=[m.use_unit4, m.use_unit5, m.no_unit4or5]) + m.use4or5maybe = Disjunction(expr=[m.use_unit4, m.use_unit5, m.no_unit4or5]) m.use4or5 = Constraint( - expr=m.use_unit4.indicator_var + m.use_unit5.indicator_var <= 1) - m.use6or7maybe = Disjunction( - expr=[m.use_unit6, m.use_unit7, m.no_unit6or7]) + expr=m.use_unit4.indicator_var + m.use_unit5.indicator_var <= 1 + ) + m.use6or7maybe = Disjunction(expr=[m.use_unit6, m.use_unit7, m.no_unit6or7]) m.use4implies6or7 = Constraint( - expr=m.use_unit6.indicator_var + m.use_unit7.indicator_var - - m.use_unit4.indicator_var == 0) + expr=m.use_unit6.indicator_var + + m.use_unit7.indicator_var + - m.use_unit4.indicator_var + == 0 + ) m.use3maybe = Disjunction(expr=[m.use_unit3, m.no_unit3]) m.either3ornot = Constraint( - expr=m.use_unit3.indicator_var + m.no_unit3.indicator_var == 1) + expr=m.use_unit3.indicator_var + m.no_unit3.indicator_var == 1 + ) m.use8maybe = Disjunction(expr=[m.use_unit8, m.no_unit8]) m.use3implies8 = Constraint( - expr=m.use_unit3.indicator_var - m.use_unit8.indicator_var <= 0) + expr=m.use_unit3.indicator_var - m.use_unit8.indicator_var <= 0 + ) """Profit (objective) function definition""" - m.profit = Objective(expr=sum( - getattr(m, 'use_unit%s' % (unit,)).indicator_var * CF[unit] - for unit in m.units) + - sum(m.flow[stream] * CV[stream] - for stream in m.streams) + CONSTANT, - sense=minimize) + m.profit = Objective( + expr=sum( + getattr(m, 'use_unit%s' % (unit,)).indicator_var * CF[unit] + for unit in m.units + ) + + sum(m.flow[stream] * CV[stream] for stream in m.streams) + + CONSTANT, + sense=minimize, + ) """Bound definitions""" # x (flow) upper bounds diff --git a/examples/gdp/jobshop-nodisjuncts.py b/examples/gdp/jobshop-nodisjuncts.py index 92bb76ff860..bc656dc4717 100644 --- a/examples/gdp/jobshop-nodisjuncts.py +++ b/examples/gdp/jobshop-nodisjuncts.py @@ -30,40 +30,47 @@ # Aldo Vecchietti, LogMIP User's Manual, http://www.logmip.ceride.gov.ar/, 2007 # + def build_model(): model = AbstractModel() model.JOBS = Set(ordered=True) model.STAGES = Set(ordered=True) - model.I_BEFORE_K = RangeSet(0,1) + model.I_BEFORE_K = RangeSet(0, 1) # Task durations model.tau = Param(model.JOBS, model.STAGES, default=0) # Total Makespan (this will be the objective) model.ms = Var() + # Start time of each job def t_bounds(model, I): return (0, sum(value(model.tau[idx]) for idx in model.tau)) - model.t = Var( model.JOBS, within=NonNegativeReals, bounds=t_bounds ) + + model.t = Var(model.JOBS, within=NonNegativeReals, bounds=t_bounds) # Auto-generate the L set (potential collisions between 2 jobs at any stage. def _L_filter(model, I, K, J): - return I < K and model.tau[I,J] and model.tau[K,J] - model.L = Set( initialize=model.JOBS * model.JOBS * model.STAGES, - dimen=3, filter=_L_filter) + return I < K and model.tau[I, J] and model.tau[K, J] + + model.L = Set( + initialize=model.JOBS * model.JOBS * model.STAGES, dimen=3, filter=_L_filter + ) # Makespan is greater than the start time of every job + that job's # total duration def _feas(model, I): - return model.ms >= model.t[I] + sum(model.tau[I,M] for M in model.STAGES) + return model.ms >= model.t[I] + sum(model.tau[I, M] for M in model.STAGES) + model.Feas = Constraint(model.JOBS, rule=_feas) # Define the disjunctions: either job I occurs before K or K before I def _disj(model, I, K, J): - lhs = model.t[I] + sum([M= model.t[I] + sum(model.tau[I,M] for M in model.STAGES) + return model.ms >= model.t[I] + sum(model.tau[I, M] for M in model.STAGES) + model.Feas = Constraint(model.JOBS, rule=_feas) # Disjunctions to prevent clashes at a stage: This creates a set of @@ -64,17 +70,19 @@ def _feas(model, I): # K occurs before job I. def _NoClash(disjunct, I, K, J, IthenK): model = disjunct.model() - lhs = model.t[I] + sum([M= model.DemandLB[j,t] - model.demand_LB = Constraint(model.Products, model.TimePeriods, rule=demand_LB_rule) + return model.FlowRate[j, t] >= model.DemandLB[j, t] + model.demand_LB = Constraint(model.Products, model.TimePeriods, rule=demand_LB_rule) # FIXED PRICE CONTRACT @@ -439,25 +551,36 @@ def demand_LB_rule(model, j, t): def FP_contract_disjunct_rule(disjunct, j, t, buy): model = disjunct.model() if buy: - disjunct.c = Constraint(expr=model.AmountPurchased_FP[j,t] <= MAX_AMOUNT_FP) + disjunct.c = Constraint( + expr=model.AmountPurchased_FP[j, t] <= MAX_AMOUNT_FP + ) else: - disjunct.c = Constraint(expr=model.AmountPurchased_FP[j,t] == 0) - model.FP_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods, - model.BuyFPContract, rule=FP_contract_disjunct_rule) + disjunct.c = Constraint(expr=model.AmountPurchased_FP[j, t] == 0) + + model.FP_contract_disjunct = Disjunct( + model.RawMaterials, + model.TimePeriods, + model.BuyFPContract, + rule=FP_contract_disjunct_rule, + ) # Fixed price disjunction def FP_contract_rule(model, j, t): - return [model.FP_contract_disjunct[j,t,buy] for buy in model.BuyFPContract] - model.FP_disjunction = Disjunction(model.RawMaterials, model.TimePeriods, - rule=FP_contract_rule) + return [model.FP_contract_disjunct[j, t, buy] for buy in model.BuyFPContract] + + model.FP_disjunction = Disjunction( + model.RawMaterials, model.TimePeriods, rule=FP_contract_rule + ) - # cost constraint for fixed price contract (independent contraint) + # cost constraint for fixed price contract (independent constraint) def FP_contract_cost_rule(model, j, t): - return model.Cost_FP[j,t] == model.AmountPurchased_FP[j,t] * \ - model.Prices[j,t] - model.FP_contract_cost = Constraint(model.RawMaterials, model.TimePeriods, - rule=FP_contract_cost_rule) + return ( + model.Cost_FP[j, t] == model.AmountPurchased_FP[j, t] * model.Prices[j, t] + ) + model.FP_contract_cost = Constraint( + model.RawMaterials, model.TimePeriods, rule=FP_contract_cost_rule + ) # DISCOUNT CONTRACT @@ -466,41 +589,61 @@ def discount_contract_disjunct_rule(disjunct, j, t, buy): model = disjunct.model() if buy == 'BelowMin': disjunct.belowMin = Constraint( - expr=model.AmountPurchasedBelowMin_Discount[j,t] <= \ - model.MinAmount_Discount[j,t]) + expr=model.AmountPurchasedBelowMin_Discount[j, t] + <= model.MinAmount_Discount[j, t] + ) disjunct.aboveMin = Constraint( - expr=model.AmountPurchasedAboveMin_Discount[j,t] == 0) + expr=model.AmountPurchasedAboveMin_Discount[j, t] == 0 + ) elif buy == 'AboveMin': disjunct.belowMin = Constraint( - expr=model.AmountPurchasedBelowMin_Discount[j,t] == \ - model.MinAmount_Discount[j,t]) + expr=model.AmountPurchasedBelowMin_Discount[j, t] + == model.MinAmount_Discount[j, t] + ) disjunct.aboveMin = Constraint( - expr=model.AmountPurchasedAboveMin_Discount[j,t] >= 0) + expr=model.AmountPurchasedAboveMin_Discount[j, t] >= 0 + ) elif buy == 'NotSelected': disjunct.belowMin = Constraint( - expr=model.AmountPurchasedBelowMin_Discount[j,t] == 0) + expr=model.AmountPurchasedBelowMin_Discount[j, t] == 0 + ) disjunct.aboveMin = Constraint( - expr=model.AmountPurchasedAboveMin_Discount[j,t] == 0) + expr=model.AmountPurchasedAboveMin_Discount[j, t] == 0 + ) else: raise RuntimeError("Unrecognized choice for discount contract: %s" % buy) - model.discount_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods, - model.BuyDiscountContract, rule=discount_contract_disjunct_rule) + + model.discount_contract_disjunct = Disjunct( + model.RawMaterials, + model.TimePeriods, + model.BuyDiscountContract, + rule=discount_contract_disjunct_rule, + ) # Discount contract disjunction def discount_contract_rule(model, j, t): - return [model.discount_contract_disjunct[j,t,buy] \ - for buy in model.BuyDiscountContract] - model.discount_contract = Disjunction(model.RawMaterials, model.TimePeriods, - rule=discount_contract_rule) + return [ + model.discount_contract_disjunct[j, t, buy] + for buy in model.BuyDiscountContract + ] + + model.discount_contract = Disjunction( + model.RawMaterials, model.TimePeriods, rule=discount_contract_rule + ) # cost constraint for discount contract (independent constraint) def discount_cost_rule(model, j, t): - return model.Cost_Discount[j,t] == model.RegPrice_Discount[j,t] * \ - model.AmountPurchasedBelowMin_Discount[j,t] + \ - model.DiscountPrice_Discount[j,t] * model.AmountPurchasedAboveMin_Discount[j,t] - model.discount_cost = Constraint(model.RawMaterials, model.TimePeriods, - rule=discount_cost_rule) - + return ( + model.Cost_Discount[j, t] + == model.RegPrice_Discount[j, t] + * model.AmountPurchasedBelowMin_Discount[j, t] + + model.DiscountPrice_Discount[j, t] + * model.AmountPurchasedAboveMin_Discount[j, t] + ) + + model.discount_cost = Constraint( + model.RawMaterials, model.TimePeriods, rule=discount_cost_rule + ) # BULK CONTRACT @@ -509,98 +652,142 @@ def bulk_contract_disjunct_rule(disjunct, j, t, buy): model = disjunct.model() if buy == 'BelowMin': disjunct.amount = Constraint( - expr=model.AmountPurchased_Bulk[j,t] <= model.MinAmount_Bulk[j,t]) + expr=model.AmountPurchased_Bulk[j, t] <= model.MinAmount_Bulk[j, t] + ) disjunct.price = Constraint( - expr=model.Cost_Bulk[j,t] == model.RegPrice_Bulk[j,t] * \ - model.AmountPurchased_Bulk[j,t]) + expr=model.Cost_Bulk[j, t] + == model.RegPrice_Bulk[j, t] * model.AmountPurchased_Bulk[j, t] + ) elif buy == 'AboveMin': disjunct.amount = Constraint( - expr=model.AmountPurchased_Bulk[j,t] >= model.MinAmount_Bulk[j,t]) + expr=model.AmountPurchased_Bulk[j, t] >= model.MinAmount_Bulk[j, t] + ) disjunct.price = Constraint( - expr=model.Cost_Bulk[j,t] == model.DiscountPrice_Bulk[j,t] * \ - model.AmountPurchased_Bulk[j,t]) + expr=model.Cost_Bulk[j, t] + == model.DiscountPrice_Bulk[j, t] * model.AmountPurchased_Bulk[j, t] + ) elif buy == 'NotSelected': - disjunct.amount = Constraint(expr=model.AmountPurchased_Bulk[j,t] == 0) - disjunct.price = Constraint(expr=model.Cost_Bulk[j,t] == 0) + disjunct.amount = Constraint(expr=model.AmountPurchased_Bulk[j, t] == 0) + disjunct.price = Constraint(expr=model.Cost_Bulk[j, t] == 0) else: raise RuntimeError("Unrecognized choice for bulk contract: %s" % buy) - model.bulk_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods, - model.BuyBulkContract, rule=bulk_contract_disjunct_rule) + + model.bulk_contract_disjunct = Disjunct( + model.RawMaterials, + model.TimePeriods, + model.BuyBulkContract, + rule=bulk_contract_disjunct_rule, + ) # Bulk contract disjunction def bulk_contract_rule(model, j, t): - return [model.bulk_contract_disjunct[j,t,buy] for buy in model.BuyBulkContract] - model.bulk_contract = Disjunction(model.RawMaterials, model.TimePeriods, - rule=bulk_contract_rule) + return [ + model.bulk_contract_disjunct[j, t, buy] for buy in model.BuyBulkContract + ] + model.bulk_contract = Disjunction( + model.RawMaterials, model.TimePeriods, rule=bulk_contract_rule + ) # FIXED DURATION CONTRACT def FD_1mo_contract(disjunct, j, t): - model = disjunct.model() - disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \ - MIN_AMOUNT_FD_1MONTH) - disjunct.price1 = Constraint(expr=model.Cost_FD[j,t] == \ - model.Prices_Length[j,1,t] * model.AmountPurchased_FD[j,t]) + model = disjunct.model() + disjunct.amount1 = Constraint( + expr=model.AmountPurchased_FD[j, t] >= MIN_AMOUNT_FD_1MONTH + ) + disjunct.price1 = Constraint( + expr=model.Cost_FD[j, t] + == model.Prices_Length[j, 1, t] * model.AmountPurchased_FD[j, t] + ) + model.FD_1mo_contract = Disjunct( - model.RawMaterials, model.TimePeriods, rule=FD_1mo_contract) + model.RawMaterials, model.TimePeriods, rule=FD_1mo_contract + ) def FD_2mo_contract(disjunct, j, t): - model = disjunct.model() - disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \ - model.MinAmount_Length[j,2]) - disjunct.price1 = Constraint(expr=model.Cost_FD[j,t] == \ - model.Prices_Length[j,2,t] * model.AmountPurchased_FD[j,t]) - # only enforce these if we aren't in the last time period - if t < model.TimePeriods[-1]: - disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j, t+1] >= \ - model.MinAmount_Length[j,2]) - disjunct.price2 = Constraint(expr=model.Cost_FD[j,t+1] == \ - model.Prices_Length[j,2,t] * model.AmountPurchased_FD[j, t+1]) + model = disjunct.model() + disjunct.amount1 = Constraint( + expr=model.AmountPurchased_FD[j, t] >= model.MinAmount_Length[j, 2] + ) + disjunct.price1 = Constraint( + expr=model.Cost_FD[j, t] + == model.Prices_Length[j, 2, t] * model.AmountPurchased_FD[j, t] + ) + # only enforce these if we aren't in the last time period + if t < model.TimePeriods[-1]: + disjunct.amount2 = Constraint( + expr=model.AmountPurchased_FD[j, t + 1] >= model.MinAmount_Length[j, 2] + ) + disjunct.price2 = Constraint( + expr=model.Cost_FD[j, t + 1] + == model.Prices_Length[j, 2, t] * model.AmountPurchased_FD[j, t + 1] + ) + model.FD_2mo_contract = Disjunct( - model.RawMaterials, model.TimePeriods, rule=FD_2mo_contract) + model.RawMaterials, model.TimePeriods, rule=FD_2mo_contract + ) def FD_3mo_contract(disjunct, j, t): - model = disjunct.model() - # NOTE: I think there is a mistake in the GAMS file in line 327. - # they use the bulk minamount rather than the length one. - #I am doing the same here for validation purposes. - disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \ - model.MinAmount_Bulk[j,3]) - disjunct.cost1 = Constraint(expr=model.Cost_FD[j,t] == \ - model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t]) - # check we aren't in one of the last two time periods - if t < model.TimePeriods[-1]: - disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j,t+1] >= \ - model.MinAmount_Length[j,3]) - disjunct.cost2 = Constraint(expr=model.Cost_FD[j,t+1] == \ - model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t+1]) - if t < model.TimePeriods[-2]: - disjunct.amount3 = Constraint(expr=model.AmountPurchased_FD[j,t+2] >= \ - model.MinAmount_Length[j,3]) - disjunct.cost3 = Constraint(expr=model.Cost_FD[j,t+2] == \ - model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t+2]) + model = disjunct.model() + # NOTE: I think there is a mistake in the GAMS file in line 327. + # they use the bulk minamount rather than the length one. + # I am doing the same here for validation purposes. + disjunct.amount1 = Constraint( + expr=model.AmountPurchased_FD[j, t] >= model.MinAmount_Bulk[j, 3] + ) + disjunct.cost1 = Constraint( + expr=model.Cost_FD[j, t] + == model.Prices_Length[j, 3, t] * model.AmountPurchased_FD[j, t] + ) + # check we aren't in one of the last two time periods + if t < model.TimePeriods[-1]: + disjunct.amount2 = Constraint( + expr=model.AmountPurchased_FD[j, t + 1] >= model.MinAmount_Length[j, 3] + ) + disjunct.cost2 = Constraint( + expr=model.Cost_FD[j, t + 1] + == model.Prices_Length[j, 3, t] * model.AmountPurchased_FD[j, t + 1] + ) + if t < model.TimePeriods[-2]: + disjunct.amount3 = Constraint( + expr=model.AmountPurchased_FD[j, t + 2] >= model.MinAmount_Length[j, 3] + ) + disjunct.cost3 = Constraint( + expr=model.Cost_FD[j, t + 2] + == model.Prices_Length[j, 3, t] * model.AmountPurchased_FD[j, t + 2] + ) + model.FD_3mo_contract = Disjunct( - model.RawMaterials, model.TimePeriods, rule=FD_3mo_contract) + model.RawMaterials, model.TimePeriods, rule=FD_3mo_contract + ) def FD_no_contract(disjunct, j, t): model = disjunct.model() - disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] == 0) - disjunct.cost1 = Constraint(expr=model.Cost_FD[j,t] == 0) + disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j, t] == 0) + disjunct.cost1 = Constraint(expr=model.Cost_FD[j, t] == 0) if t < model.TimePeriods[-1]: - disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j,t+1] == 0) - disjunct.cost2 = Constraint(expr=model.Cost_FD[j,t+1] == 0) + disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j, t + 1] == 0) + disjunct.cost2 = Constraint(expr=model.Cost_FD[j, t + 1] == 0) if t < model.TimePeriods[-2]: - disjunct.amount3 = Constraint(expr=model.AmountPurchased_FD[j,t+2] == 0) - disjunct.cost3 = Constraint(expr=model.Cost_FD[j,t+2] == 0) + disjunct.amount3 = Constraint(expr=model.AmountPurchased_FD[j, t + 2] == 0) + disjunct.cost3 = Constraint(expr=model.Cost_FD[j, t + 2] == 0) + model.FD_no_contract = Disjunct( - model.RawMaterials, model.TimePeriods, rule=FD_no_contract) + model.RawMaterials, model.TimePeriods, rule=FD_no_contract + ) def FD_contract(model, j, t): - return [ model.FD_1mo_contract[j,t], model.FD_2mo_contract[j,t], - model.FD_3mo_contract[j,t], model.FD_no_contract[j,t], ] - model.FD_contract = Disjunction(model.RawMaterials, model.TimePeriods, - rule=FD_contract) + return [ + model.FD_1mo_contract[j, t], + model.FD_2mo_contract[j, t], + model.FD_3mo_contract[j, t], + model.FD_no_contract[j, t], + ] + + model.FD_contract = Disjunction( + model.RawMaterials, model.TimePeriods, rule=FD_contract + ) return model @@ -608,5 +795,7 @@ def FD_contract(model, j, t): if __name__ == "__main__": m = build_model().create_instance('medTermPurchasing_Literal_Hull.dat') TransformationFactory('gdp.bigm').apply_to(m) - SolverFactory('gams').solve(m, solver='baron', tee=True, add_options=['option optcr=1e-6;']) + SolverFactory('gams').solve( + m, solver='baron', tee=True, add_options=['option optcr=1e-6;'] + ) m.profit.display() diff --git a/examples/gdp/nine_process/small_process.py b/examples/gdp/nine_process/small_process.py index 38b45a7de41..7f96f32c65c 100644 --- a/examples/gdp/nine_process/small_process.py +++ b/examples/gdp/nine_process/small_process.py @@ -19,60 +19,78 @@ def build_model(): m.x = Var(m.streams, bounds=(0, 50), initialize=5) m.stage1_split = Constraint(expr=m.x[1] == m.x[2] + m.x[4]) - m.first_stage = Disjunction(expr=[ - [ - # Unit 1 - m.x[2] == exp(m.x[3]) - 1, - m.x[4] == 0, m.x[5] == 0 - ], - [ - # Unit 2 - m.x[5] == log(m.x[4] + 1), - m.x[2] == 0, m.x[3] == 0 + m.first_stage = Disjunction( + expr=[ + [ + # Unit 1 + m.x[2] == exp(m.x[3]) - 1, + m.x[4] == 0, + m.x[5] == 0, + ], + [ + # Unit 2 + m.x[5] == log(m.x[4] + 1), + m.x[2] == 0, + m.x[3] == 0, + ], ] - ]) + ) m.stage1_mix = Constraint(expr=m.x[3] + m.x[5] == m.x[6]) m.stage2_split = Constraint(expr=m.x[6] == sum(m.x[i] for i in (7, 9, 11, 13))) - m.second_stage = Disjunction(expr=[ - [ - # Unit 3 - m.x[8] == 2 * log(m.x[7]) + 3, - m.x[7] >= 0.2, - ] + [m.x[i] == 0 for i in (9, 10, 11, 12, 14, 15)], - [ - # Unit 4 - m.x[10] == 1.8 * log(m.x[9] + 4), - ] + [m.x[i] == 0 for i in (7, 8, 11, 12, 14, 15)], - [ - # Unit 5 - m.x[12] == 1.2 * log(m.x[11]) + 2, - m.x[11] >= 0.001, - ] + [m.x[i] == 0 for i in (7, 8, 9, 10, 14, 15)], - [ - # Unit 6 - m.x[15] == sqrt(m.x[14] - 3) * m.x[23] + 1, - m.x[14] >= 5, m.x[14] <= 20, - ] + [m.x[i] == 0 for i in (7, 8, 9, 10, 11, 12)] - ]) + m.second_stage = Disjunction( + expr=[ + [ + # Unit 3 + m.x[8] == 2 * log(m.x[7]) + 3, + m.x[7] >= 0.2, + ] + + [m.x[i] == 0 for i in (9, 10, 11, 12, 14, 15)], + [ + # Unit 4 + m.x[10] + == 1.8 * log(m.x[9] + 4) + ] + + [m.x[i] == 0 for i in (7, 8, 11, 12, 14, 15)], + [ + # Unit 5 + m.x[12] == 1.2 * log(m.x[11]) + 2, + m.x[11] >= 0.001, + ] + + [m.x[i] == 0 for i in (7, 8, 9, 10, 14, 15)], + [ + # Unit 6 + m.x[15] == sqrt(m.x[14] - 3) * m.x[23] + 1, + m.x[14] >= 5, + m.x[14] <= 20, + ] + + [m.x[i] == 0 for i in (7, 8, 9, 10, 11, 12)], + ] + ) m.stage2_special_mix = Constraint(expr=m.x[14] == m.x[13] + m.x[23]) m.stage2_mix = Constraint(expr=sum(m.x[i] for i in (8, 10, 12, 15)) == m.x[16]) m.stage3_split = Constraint(expr=m.x[16] == sum(m.x[i] for i in (17, 19, 21))) - m.third_stage = Disjunction(expr=[ - [ - # Unit 7 - m.x[18] == m.x[17] * 0.9, - ] + [m.x[i] == 0 for i in (19, 20, 21, 22)], - [ - # Unit 8 - m.x[20] == log(m.x[19] ** 1.5) + 2, - m.x[19] >= 1, - ] + [m.x[i] == 0 for i in (17, 18, 21, 22)], - [ - # Unit 9 - m.x[22] == log(m.x[21] + sqrt(m.x[21])) + 1, - m.x[21] >= 4, - ] + [m.x[i] == 0 for i in (17, 18, 19, 20)] - ]) + m.third_stage = Disjunction( + expr=[ + [ + # Unit 7 + m.x[18] + == m.x[17] * 0.9 + ] + + [m.x[i] == 0 for i in (19, 20, 21, 22)], + [ + # Unit 8 + m.x[20] == log(m.x[19] ** 1.5) + 2, + m.x[19] >= 1, + ] + + [m.x[i] == 0 for i in (17, 18, 21, 22)], + [ + # Unit 9 + m.x[22] == log(m.x[21] + sqrt(m.x[21])) + 1, + m.x[21] >= 4, + ] + + [m.x[i] == 0 for i in (17, 18, 19, 20)], + ] + ) m.stage3_special_split = Constraint(expr=m.x[22] == m.x[23] + m.x[24]) m.stage3_mix = Constraint(expr=m.x[25] == sum(m.x[i] for i in (18, 20, 24))) @@ -87,106 +105,138 @@ def build_nonexclusive_model(): m.x = Var(m.streams, bounds=(0, 50), initialize=5) m.stage1_split = Constraint(expr=m.x[1] == m.x[2] + m.x[4]) - m.unit1 = Disjunction(expr=[ - [ - # Unit 1 - m.x[2] == exp(m.x[3]) - 1, - ], - [ - # No Unit 1 - m.x[2] == 0, m.x[3] == 0 + m.unit1 = Disjunction( + expr=[ + [ + # Unit 1 + m.x[2] + == exp(m.x[3]) - 1 + ], + [ + # No Unit 1 + m.x[2] == 0, + m.x[3] == 0, + ], ] - ]) - m.unit2 = Disjunction(expr=[ - [ - # Unit 2 - m.x[5] == log(m.x[4] + 1), - ], - [ - # No Unit 2 - m.x[4] == 0, m.x[5] == 0 + ) + m.unit2 = Disjunction( + expr=[ + [ + # Unit 2 + m.x[5] + == log(m.x[4] + 1) + ], + [ + # No Unit 2 + m.x[4] == 0, + m.x[5] == 0, + ], ] - ]) + ) m.stage1_mix = Constraint(expr=m.x[3] + m.x[5] == m.x[6]) m.stage2_split = Constraint(expr=m.x[6] == sum(m.x[i] for i in (7, 9, 11, 13))) - m.unit3 = Disjunction(expr=[ - [ - # Unit 3 - m.x[8] == 2 * log(m.x[7]) + 3, - m.x[7] >= 0.2, - ], - [ - # No Unit 3 - m.x[7] == 0, m.x[8] == 0 + m.unit3 = Disjunction( + expr=[ + [ + # Unit 3 + m.x[8] == 2 * log(m.x[7]) + 3, + m.x[7] >= 0.2, + ], + [ + # No Unit 3 + m.x[7] == 0, + m.x[8] == 0, + ], ] - ]) - m.unit4 = Disjunction(expr=[ - [ - # Unit 4 - m.x[10] == 1.8 * log(m.x[9] + 4), - ], - [ - # No Unit 4 - m.x[9] == 0, m.x[10] == 0 + ) + m.unit4 = Disjunction( + expr=[ + [ + # Unit 4 + m.x[10] + == 1.8 * log(m.x[9] + 4) + ], + [ + # No Unit 4 + m.x[9] == 0, + m.x[10] == 0, + ], ] - ]) - m.unit5 = Disjunction(expr=[ - [ - # Unit 5 - m.x[12] == 1.2 * log(m.x[11]) + 2, - m.x[11] >= 0.001, - ], - [ - # No Unit 5 - m.x[11] == 0, m.x[12] == 0 + ) + m.unit5 = Disjunction( + expr=[ + [ + # Unit 5 + m.x[12] == 1.2 * log(m.x[11]) + 2, + m.x[11] >= 0.001, + ], + [ + # No Unit 5 + m.x[11] == 0, + m.x[12] == 0, + ], ] - ]) - m.unit6 = Disjunction(expr=[ - [ - # Unit 6 - m.x[15] == sqrt(m.x[14] - 3) * m.x[23] + 1, - m.x[14] >= 5, m.x[14] <= 20, - ], - [ - # No Unit 6 - m.x[14] == 0, m.x[15] == 0 + ) + m.unit6 = Disjunction( + expr=[ + [ + # Unit 6 + m.x[15] == sqrt(m.x[14] - 3) * m.x[23] + 1, + m.x[14] >= 5, + m.x[14] <= 20, + ], + [ + # No Unit 6 + m.x[14] == 0, + m.x[15] == 0, + ], ] - ]) + ) m.stage2_special_mix = Constraint(expr=m.x[14] == m.x[13] + m.x[23]) m.stage2_mix = Constraint(expr=sum(m.x[i] for i in (8, 10, 12, 15)) == m.x[16]) m.stage3_split = Constraint(expr=m.x[16] == sum(m.x[i] for i in (17, 19, 21))) - m.unit7 = Disjunction(expr=[ - [ - # Unit 7 - m.x[18] == m.x[17] * 0.9, - ], - [ - # No Unit 7 - m.x[17] == 0, m.x[18] == 0 + m.unit7 = Disjunction( + expr=[ + [ + # Unit 7 + m.x[18] + == m.x[17] * 0.9 + ], + [ + # No Unit 7 + m.x[17] == 0, + m.x[18] == 0, + ], ] - ]) - m.unit8 = Disjunction(expr=[ - [ - # Unit 8 - m.x[20] == log(m.x[19] ** 1.5) + 2, - m.x[19] >= 1, - ], - [ - # No Unit 8 - m.x[19] == 0, m.x[20] == 0 + ) + m.unit8 = Disjunction( + expr=[ + [ + # Unit 8 + m.x[20] == log(m.x[19] ** 1.5) + 2, + m.x[19] >= 1, + ], + [ + # No Unit 8 + m.x[19] == 0, + m.x[20] == 0, + ], ] - ]) - m.unit9 = Disjunction(expr=[ - [ - # Unit 9 - m.x[22] == log(m.x[21] + sqrt(m.x[21])) + 1, - m.x[21] >= 4, - ], - [ - # No Unit 9 - m.x[21] == 0, m.x[22] == 0 + ) + m.unit9 = Disjunction( + expr=[ + [ + # Unit 9 + m.x[22] == log(m.x[21] + sqrt(m.x[21])) + 1, + m.x[21] >= 4, + ], + [ + # No Unit 9 + m.x[21] == 0, + m.x[22] == 0, + ], ] - ]) + ) m.stage3_special_split = Constraint(expr=m.x[22] == m.x[23] + m.x[24]) m.stage3_mix = Constraint(expr=m.x[25] == sum(m.x[i] for i in (18, 20, 24))) @@ -197,9 +247,11 @@ def build_nonexclusive_model(): if __name__ == '__main__': from pyomo.environ import SolverFactory + m = build_model() result = SolverFactory('gdpopt.gloa').solve( - m, tee=True, + m, + tee=True, mip_solver='gams', nlp_solver='gams', nlp_solver_args=dict(add_options=['option optcr=0.01;']), @@ -212,7 +264,8 @@ def build_nonexclusive_model(): m = build_nonexclusive_model() result = SolverFactory('gdpopt.gloa').solve( - m, tee=True, + m, + tee=True, mip_solver='gams', nlp_solver='gams', nlp_solver_args=dict(add_options=['option optcr=0.01;']), diff --git a/examples/gdp/simple1.py b/examples/gdp/simple1.py index 2073a5bc4f3..f7c77b111f0 100644 --- a/examples/gdp/simple1.py +++ b/examples/gdp/simple1.py @@ -1,4 +1,4 @@ -# Example: modeling a complementarity condition as a +# Example: modeling a complementarity condition as a # disjunction # # This model does not work with existing transformations. @@ -7,13 +7,13 @@ from pyomo.core import * from pyomo.gdp import * -def build_model(): +def build_model(): model = ConcreteModel() # x >= 0 _|_ y>=0 - model.x = Var(bounds=(0,None)) - model.y = Var(bounds=(0,None)) + model.x = Var(bounds=(0, None)) + model.y = Var(bounds=(0, None)) # Two conditions def _d(disjunct, flag): @@ -24,14 +24,16 @@ def _d(disjunct, flag): else: # y == 0 disjunct.c = Constraint(expr=model.y == 0) - model.d = Disjunct([0,1], rule=_d) + + model.d = Disjunct([0, 1], rule=_d) # Define the disjunction def _c(model): return [model.d[0], model.d[1]] + model.c = Disjunction(rule=_c) - model.C = Constraint(expr=model.x+model.y <= 1) + model.C = Constraint(expr=model.x + model.y <= 1) - model.o = Objective(expr=2*model.x+3*model.y, sense=maximize) + model.o = Objective(expr=2 * model.x + 3 * model.y, sense=maximize) return model diff --git a/examples/gdp/simple2.py b/examples/gdp/simple2.py index fbb3ffa190c..6bcc7bbf747 100644 --- a/examples/gdp/simple2.py +++ b/examples/gdp/simple2.py @@ -1,4 +1,4 @@ -# Example: modeling a complementarity condition as a +# Example: modeling a complementarity condition as a # disjunction # # Specifying variable bounds @@ -6,12 +6,13 @@ from pyomo.core import * from pyomo.gdp import * + def build_model(): model = ConcreteModel() # x >= 0 _|_ y>=0 - model.x = Var(bounds=(0,100)) - model.y = Var(bounds=(0,100)) + model.x = Var(bounds=(0, 100)) + model.y = Var(bounds=(0, 100)) # Two conditions def _d(disjunct, flag): @@ -22,14 +23,16 @@ def _d(disjunct, flag): else: # y == 0 disjunct.c = Constraint(expr=model.y == 0) - model.d = Disjunct([0,1], rule=_d) + + model.d = Disjunct([0, 1], rule=_d) # Define the disjunction def _c(model): return [model.d[0], model.d[1]] + model.c = Disjunction(rule=_c) - model.C = Constraint(expr=model.x+model.y <= 1) + model.C = Constraint(expr=model.x + model.y <= 1) - model.o = Objective(expr=2*model.x+3*model.y, sense=maximize) - return model \ No newline at end of file + model.o = Objective(expr=2 * model.x + 3 * model.y, sense=maximize) + return model diff --git a/examples/gdp/simple3.py b/examples/gdp/simple3.py index 73dc27be6a2..6b3d6ec46c4 100644 --- a/examples/gdp/simple3.py +++ b/examples/gdp/simple3.py @@ -1,4 +1,4 @@ -# Example: modeling a complementarity condition as a +# Example: modeling a complementarity condition as a # disjunction # # Specifying BigM suffix values for the gdp.bigm transformation @@ -6,12 +6,13 @@ from pyomo.core import * from pyomo.gdp import * + def build_model(): model = ConcreteModel() # x >= 0 _|_ y>=0 - model.x = Var(bounds=(0,None)) - model.y = Var(bounds=(0,None)) + model.x = Var(bounds=(0, None)) + model.y = Var(bounds=(0, None)) # Two conditions def _d(disjunct, flag): @@ -24,15 +25,17 @@ def _d(disjunct, flag): disjunct.c = Constraint(expr=model.y == 0) disjunct.BigM = Suffix() disjunct.BigM[disjunct.c] = 1 - model.d = Disjunct([0,1], rule=_d) + + model.d = Disjunct([0, 1], rule=_d) # Define the disjunction def _c(model): return [model.d[0], model.d[1]] + model.c = Disjunction(rule=_c) - model.C = Constraint(expr=model.x+model.y <= 1) + model.C = Constraint(expr=model.x + model.y <= 1) - model.o = Objective(expr=2*model.x+3*model.y, sense=maximize) + model.o = Objective(expr=2 * model.x + 3 * model.y, sense=maximize) return model diff --git a/examples/gdp/small_lit/basic_step.py b/examples/gdp/small_lit/basic_step.py index 89cf0ffc0b0..16d134500e7 100644 --- a/examples/gdp/small_lit/basic_step.py +++ b/examples/gdp/small_lit/basic_step.py @@ -1,7 +1,7 @@ """ Example from Section 3.2 in paper of Pseudo Basic Steps Ref: - Pseude basic steps: bound improvement guarantess from Lagrangian + Pseudo basic steps: bound improvement guarantees from Lagrangian decomposition in convex disjunctive programming Papageorgiou and Trespalacios, 2017 @@ -13,37 +13,51 @@ from pyomo.gdp import * from pyomo.gdp.basic_step import apply_basic_step + def build_gdp_model(): model = ConcreteModel() - model.x1 = Var(bounds=(-1,6), initialize=0) - model.x2 = Var(bounds=( 0,7), initialize=3.5) - - model.objective = Objective(expr=0.2*model.x1 + model.x2) - model.disjunction_set = RangeSet(1,3) - - model.disjuncts = Disjunct([1,2,3],[1,2]) - model.disjuncts[1,1].c = Constraint(expr=model.x1**2 + (1/4)*(model.x2 - 5)**2 <= 1) - model.disjuncts[2,1].c = Constraint(expr=model.x1**2 + (1/4)*(model.x2 - 2)**2 <= 1) - model.disjuncts[3,1].c = Constraint(expr=model.x1**2 + (1/4)*(model.x2 - 3.5)**2 <= 1) - - model.disjuncts[1,2].c = Constraint(expr=(model.x1 - 5)**2 + (1/4)*(model.x2 - 2)**2 <= 1) - model.disjuncts[2,2].c = Constraint(expr=(model.x1 - 5)**2 + (1/4)*(model.x2 - 5)**2 <= 1) - model.disjuncts[3,2].c = Constraint(expr=(model.x1 - 5)**2 + (1/4)*(model.x2 - 3.5)**2 <= 1) + model.x1 = Var(bounds=(-1, 6), initialize=0) + model.x2 = Var(bounds=(0, 7), initialize=3.5) + + model.objective = Objective(expr=0.2 * model.x1 + model.x2) + model.disjunction_set = RangeSet(1, 3) + + model.disjuncts = Disjunct([1, 2, 3], [1, 2]) + model.disjuncts[1, 1].c = Constraint( + expr=model.x1**2 + (1 / 4) * (model.x2 - 5) ** 2 <= 1 + ) + model.disjuncts[2, 1].c = Constraint( + expr=model.x1**2 + (1 / 4) * (model.x2 - 2) ** 2 <= 1 + ) + model.disjuncts[3, 1].c = Constraint( + expr=model.x1**2 + (1 / 4) * (model.x2 - 3.5) ** 2 <= 1 + ) + + model.disjuncts[1, 2].c = Constraint( + expr=(model.x1 - 5) ** 2 + (1 / 4) * (model.x2 - 2) ** 2 <= 1 + ) + model.disjuncts[2, 2].c = Constraint( + expr=(model.x1 - 5) ** 2 + (1 / 4) * (model.x2 - 5) ** 2 <= 1 + ) + model.disjuncts[3, 2].c = Constraint( + expr=(model.x1 - 5) ** 2 + (1 / 4) * (model.x2 - 3.5) ** 2 <= 1 + ) @model.Disjunction(model.disjunction_set, xor=True) - def disjunctions(model,i): - return [model.disjuncts[i,1], model.disjuncts[i,2]] + def disjunctions(model, i): + return [model.disjuncts[i, 1], model.disjuncts[i, 2]] return model + def solve_base_model(): m_base = build_gdp_model() m_hull = TransformationFactory('gdp.hull').create_using(m_base) - #m_bigm = TransformationFactory('gdp.bigm').create_using(m_base, bigM=100) + # m_bigm = TransformationFactory('gdp.bigm').create_using(m_base, bigM=100) solver = SolverFactory('gams') solver.solve(m_hull, solver='baron') - #m_hull.pprint() + # m_hull.pprint() m_hull.objective.display() m_hull.x1.display() m_hull.x2.display() @@ -51,13 +65,13 @@ def solve_base_model(): def solve_basic_step_model(): m_base = build_gdp_model() - m_base.BS = apply_basic_step([m_base.disjunctions[1],m_base.disjunctions[2]]) + m_base.BS = apply_basic_step([m_base.disjunctions[1], m_base.disjunctions[2]]) # crux to pprint component - #with open('pprint.log','w') as outputfile: + # with open('pprint.log','w') as outputfile: # m_base.disjunctions.pprint(outputfile) - #m_bs_hull = TransformationFactory('gdp.hull').create_using(m_base) + # m_bs_hull = TransformationFactory('gdp.hull').create_using(m_base) m_bigm = TransformationFactory('gdp.bigm').create_using(m_base, bigM=100) m_bigm.pprint() @@ -68,6 +82,7 @@ def solve_basic_step_model(): m_bigm.x1.display() m_bigm.x2.display() + if __name__ == '__main__': print('################################') print('[1] Sanity check: solving base model') diff --git a/examples/gdp/small_lit/contracts_problem.py b/examples/gdp/small_lit/contracts_problem.py index 3c80f4915c1..500fe15cb2a 100644 --- a/examples/gdp/small_lit/contracts_problem.py +++ b/examples/gdp/small_lit/contracts_problem.py @@ -13,9 +13,20 @@ from pyomo.core.expr.logical_expr import lor from pyomo.environ import ( - ConcreteModel, Constraint, Set, RangeSet, Param, - Objective, Var, NonNegativeReals, Block, - TransformationFactory, SolverFactory, LogicalConstraint, BooleanVar) + ConcreteModel, + Constraint, + Set, + RangeSet, + Param, + Objective, + Var, + NonNegativeReals, + Block, + TransformationFactory, + SolverFactory, + LogicalConstraint, + BooleanVar, +) from pyomo.gdp import Disjunct @@ -35,33 +46,45 @@ def build_model(): m.max_q_idx = RangeSet(m.T_max) # Randomly generated parameters - m.D = Param(m.T, doc='demand', - initialize=dict((t, randint(50, 100)) for t in m.T)) - m.alpha = Param(m.T, doc='storage cost', - initialize=dict((t, randint(5, 20)) for t in m.T)) - m.gamma = Param(m.T, doc='base buying cost', - initialize=dict((t, randint(10, 30)) for t in m.T)) - m.beta_B = Param(m.T, doc='bulk discount', - initialize=dict((t, randint(50, 500)/1000) for t in m.T)) - - m.F_B_lo = Param(m.T, doc='bulk minimum purchase amount', - initialize=dict((t, randint(50, 100)) for t in m.T)) - - m.beta_L = Param(m.T, m.max_q_idx, - initialize=dict(((t, q), randint(10, 999)/1000) - for t in m.T for q in m.max_q_idx), - doc='long-term discount') - m.F_L_lo = Param(m.T, m.max_q_idx, - initialize=dict(((t, q), randint(50, 100)) - for t in m.T for q in m.max_q_idx), - doc='long-term minimum purchase amount') + m.D = Param(m.T, doc='demand', initialize=dict((t, randint(50, 100)) for t in m.T)) + m.alpha = Param( + m.T, doc='storage cost', initialize=dict((t, randint(5, 20)) for t in m.T) + ) + m.gamma = Param( + m.T, doc='base buying cost', initialize=dict((t, randint(10, 30)) for t in m.T) + ) + m.beta_B = Param( + m.T, + doc='bulk discount', + initialize=dict((t, randint(50, 500) / 1000) for t in m.T), + ) + + m.F_B_lo = Param( + m.T, + doc='bulk minimum purchase amount', + initialize=dict((t, randint(50, 100)) for t in m.T), + ) + + m.beta_L = Param( + m.T, + m.max_q_idx, + initialize=dict( + ((t, q), randint(10, 999) / 1000) for t in m.T for q in m.max_q_idx + ), + doc='long-term discount', + ) + m.F_L_lo = Param( + m.T, + m.max_q_idx, + initialize=dict(((t, q), randint(50, 100)) for t in m.T for q in m.max_q_idx), + doc='long-term minimum purchase amount', + ) # Contract choices 'standard', 'bulk' and long term contracts '0','1',... time_time_choices = [(t1, str(t2)) for t1, t2 in m.T * m.T if t2 <= m.T_max - t1] time_special_choices = [(t, s) for t in m.T for s in {'S', 'B', '0'}] m.contract_time_choices = Set(initialize=time_time_choices + time_special_choices) - m.disjunct_choices = Set( - initialize=['S', 'B', *[str(t) for t in range(m.T_max)]]) + m.disjunct_choices = Set(initialize=['S', 'B', *[str(t) for t in range(m.T_max)]]) m.disjuncts = Disjunct(m.contract_time_choices) m.Y = BooleanVar(m.contract_time_choices) for t, c in m.contract_time_choices: @@ -69,45 +92,57 @@ def build_model(): # Create disjuncts for contracts in each timeset for t in m.T: - m.disjuncts[t, 'S'].cost = Constraint(expr=m.c[t] == m.gamma[t]*m.x[t]) + m.disjuncts[t, 'S'].cost = Constraint(expr=m.c[t] == m.gamma[t] * m.x[t]) m.disjuncts[t, 'B'].cost = Constraint( - expr=m.c[t] == (1-m.beta_B[t])*m.gamma[t]*m.x[t]) - m.disjuncts[t, 'B'].amount = Constraint( - expr=m.x[t] >= m.F_B_lo[t]) + expr=m.c[t] == (1 - m.beta_B[t]) * m.gamma[t] * m.x[t] + ) + m.disjuncts[t, 'B'].amount = Constraint(expr=m.x[t] >= m.F_B_lo[t]) m.disjuncts[t, '0'].c = Constraint(expr=0 <= m.c[t]) - for q in range(1, m.T_max-t+1): - m.disjuncts[t, str(q)].t_idx = RangeSet(t, t+q) + for q in range(1, m.T_max - t + 1): + m.disjuncts[t, str(q)].t_idx = RangeSet(t, t + q) m.disjuncts[t, str(q)].cost = Constraint(m.disjuncts[t, str(q)].t_idx) m.disjuncts[t, str(q)].amount = Constraint(m.disjuncts[t, str(q)].t_idx) for t_ in m.disjuncts[t, str(q)].t_idx: - m.disjuncts[t, str(q)].cost[t_] =\ - m.c[t_] == (1-m.beta_L[t, q])*m.gamma[t]*m.x[t_] - m.disjuncts[t, str(q)].amount[t_] =\ - m.x[t_] >= m.F_L_lo[t, q] + m.disjuncts[t, str(q)].cost[t_] = ( + m.c[t_] == (1 - m.beta_L[t, q]) * m.gamma[t] * m.x[t_] + ) + m.disjuncts[t, str(q)].amount[t_] = m.x[t_] >= m.F_L_lo[t, q] # Create disjunctions @m.Disjunction(m.T, xor=True) def disjunctions(m, t): - return [m.disjuncts[t, 'S'], m.disjuncts[t, 'B'], m.disjuncts[t, '0'], - *[m.disjuncts[t, str(q)] for q in range(1, m.T_max-t+1)]] + return [ + m.disjuncts[t, 'S'], + m.disjuncts[t, 'B'], + m.disjuncts[t, '0'], + *[m.disjuncts[t, str(q)] for q in range(1, m.T_max - t + 1)], + ] # Connect the disjuncts indicator variables using logical expressions - m.logical_blocks = Block(range(1, m.T_max+1)) + m.logical_blocks = Block(range(1, m.T_max + 1)) # Enforce absence of existing long-term contract - m.logical_blocks[1].not_y_1_0 = LogicalConstraint(expr=~m.Y[1, '0'], doc="no pre-existing long-term contract") + m.logical_blocks[1].not_y_1_0 = LogicalConstraint( + expr=~m.Y[1, '0'], doc="no pre-existing long-term contract" + ) # Long-term contract implies '0'-disjunct in following timesteps - for t in range(2, m.T_max+1): + for t in range(2, m.T_max + 1): m.logical_blocks[t].equiv = LogicalConstraint( - expr=m.Y[t, '0'].equivalent_to(lor(m.Y[t_, str(q)] for t_ in range(1, t) for q in range(t - t_, m.T_max - t_ + 1))) + expr=m.Y[t, '0'].equivalent_to( + lor( + m.Y[t_, str(q)] + for t_ in range(1, t) + for q in range(t - t_, m.T_max - t_ + 1) + ) + ) ) # Objective function - m.objective = Objective(expr=sum(m.alpha[t]*m.s[t]+m.c[t] for t in m.T)) + m.objective = Objective(expr=sum(m.alpha[t] * m.s[t] + m.c[t] for t in m.T)) # Global constraints m.demand_satisfaction = Constraint(m.T) @@ -116,7 +151,7 @@ def disjunctions(m, t): m.material_balance = Constraint(m.T) for t in m.T: - m.material_balance[t]=m.s[t] == (m.s[t-1] if t>1 else 0) + m.x[t] - m.f[t] + m.material_balance[t] = m.s[t] == (m.s[t - 1] if t > 1 else 0) + m.x[t] - m.f[t] return m @@ -137,13 +172,28 @@ def pprint_result(model): # Find activated disjunct/contract in each timestep choice = filter( lambda y: model.disjuncts[t, y].indicator_var.value == 1.0, - model.disjunct_choices) + model.disjunct_choices, + ) choices.append(next(iter(choice))) try: from pandas import DataFrame + df = DataFrame( - columns=['choice', 'base_cost', 'reduction', 'reduced_cost', 'spending', 'stock', 'storage_cost', 'min_purchase', 'purchased', 'feed', 'demand']) + columns=[ + 'choice', + 'base_cost', + 'reduction', + 'reduced_cost', + 'spending', + 'stock', + 'storage_cost', + 'min_purchase', + 'purchased', + 'feed', + 'demand', + ] + ) df.choice = choices df.stock = [model.s[t].value for t in model.T] df.storage_cost = [model.alpha[t] for t in model.T] @@ -162,22 +212,24 @@ def pprint_result(model): df.loc[t, 'min_purchase'] = 0 df.loc[t, 'reduced_cost'] = model.gamma[t] df.loc[t, 'base_cost'] = model.gamma[t] - t = t+1 + t = t + 1 elif df.loc[t, 'choice'] == 'B': df.loc[t, 'reduction'] = model.beta_B[t] df.loc[t, 'min_purchase'] = model.F_B_lo[t] - df.loc[t, 'reduced_cost'] = (1-model.beta_B[t])*model.gamma[t] + df.loc[t, 'reduced_cost'] = (1 - model.beta_B[t]) * model.gamma[t] df.loc[t, 'base_cost'] = model.gamma[t] - t = t+1 + t = t + 1 elif int(df.loc[t, 'choice']) == 0: - t = t+1 + t = t + 1 else: q = int(df.loc[t, 'choice']) t_contract = t - for t_ in range(t, t+q+1): + for t_ in range(t, t + q + 1): df.loc[t_, 'reduction'] = model.beta_L[t_contract, q] df.loc[t_, 'min_purchase'] = model.F_L_lo[t_contract, q] - df.loc[t_, 'reduced_cost'] = (1-model.beta_L[t_contract, q])*model.gamma[t_contract] + df.loc[t_, 'reduced_cost'] = ( + 1 - model.beta_L[t_contract, q] + ) * model.gamma[t_contract] df.loc[t_, 'base_cost'] = model.gamma[t_contract] t = t + q + 1 print(df) diff --git a/examples/gdp/small_lit/ex1_Lee.py b/examples/gdp/small_lit/ex1_Lee.py index 2aae500b299..ddd2e1c3d2f 100644 --- a/examples/gdp/small_lit/ex1_Lee.py +++ b/examples/gdp/small_lit/ex1_Lee.py @@ -6,8 +6,15 @@ """ -from pyomo.environ import (ConcreteModel, Constraint, NonNegativeReals, - Objective, SolverFactory, Var, minimize) +from pyomo.environ import ( + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + SolverFactory, + Var, + minimize, +) from pyomo.gdp import Disjunct, Disjunction @@ -22,18 +29,18 @@ def build_model(): m.y3 = Disjunct() m.y1.constr1 = Constraint(expr=m.x1**2 + m.x2**2 - 1 <= 0) m.y1.constr2 = Constraint(expr=m.c == 2) - m.y2.constr1 = Constraint(expr=(m.x1 - 4)**2 + (m.x2 - 1)**2 - 1 <= 0) + m.y2.constr1 = Constraint(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2 - 1 <= 0) m.y2.constr2 = Constraint(expr=m.c == 1) - m.y3.constr1 = Constraint(expr=(m.x1 - 2)**2 + (m.x2 - 4)**2 - 1 <= 0) + m.y3.constr1 = Constraint(expr=(m.x1 - 2) ** 2 + (m.x2 - 4) ** 2 - 1 <= 0) m.y3.constr2 = Constraint(expr=m.c == 3) m.GPD123 = Disjunction(expr=[m.y1, m.y2, m.y3]) - m.obj = Objective(expr=(m.x1 - 3)**2 + (m.x2 - 2)**2 + m.c, sense=minimize) + m.obj = Objective(expr=(m.x1 - 3) ** 2 + (m.x2 - 2) ** 2 + m.c, sense=minimize) return m + if __name__ == "__main__": model = build_model() results = SolverFactory('gdpopt.loa').solve(model, tee=True) print(results) - diff --git a/examples/gdp/small_lit/ex_633_trespalacios.py b/examples/gdp/small_lit/ex_633_trespalacios.py index fc62111bbf3..b281e009d1f 100644 --- a/examples/gdp/small_lit/ex_633_trespalacios.py +++ b/examples/gdp/small_lit/ex_633_trespalacios.py @@ -29,22 +29,34 @@ def build_simple_nonconvex_gdp(): m.x2 = Var(bounds=(0, 3), doc="variable x2") m.obj = Objective(expr=5 + 0.2 * m.x1 - m.x2, doc="Minimize objective") - m.disjunction1 = Disjunction(expr=[ - [m.x2 <= 0.4*exp(m.x1/2.0), - m.x2 <= 0.5*(m.x1 - 2.5)**2 + 0.3, - m.x2 <= 6.5/(m.x1/0.3 + 2.0) + 1.0], - [m.x2 <= 0.3*exp(m.x1/1.8), - m.x2 <= 0.7*(m.x1/1.2 - 2.1)**2 + 0.3, - m.x2 <= 6.5/(m.x1/0.8 + 1.1)] - ]) - m.disjunction2 = Disjunction(expr=[ - [m.x2 <= 0.9*exp(m.x1/2.1), - m.x2 <= 1.3*(m.x1/1.5 - 1.8)**2 + 0.3, - m.x2 <= 6.5/(m.x1/0.8 + 1.1)], - [m.x2 <= 0.4*exp(m.x1/1.5), - m.x2 <= 1.2*(m.x1 - 2.5)**2 + 0.3, - m.x2 <= 6.0/(m.x1/0.6 + 1.0) + 0.5] - ]) + m.disjunction1 = Disjunction( + expr=[ + [ + m.x2 <= 0.4 * exp(m.x1 / 2.0), + m.x2 <= 0.5 * (m.x1 - 2.5) ** 2 + 0.3, + m.x2 <= 6.5 / (m.x1 / 0.3 + 2.0) + 1.0, + ], + [ + m.x2 <= 0.3 * exp(m.x1 / 1.8), + m.x2 <= 0.7 * (m.x1 / 1.2 - 2.1) ** 2 + 0.3, + m.x2 <= 6.5 / (m.x1 / 0.8 + 1.1), + ], + ] + ) + m.disjunction2 = Disjunction( + expr=[ + [ + m.x2 <= 0.9 * exp(m.x1 / 2.1), + m.x2 <= 1.3 * (m.x1 / 1.5 - 1.8) ** 2 + 0.3, + m.x2 <= 6.5 / (m.x1 / 0.8 + 1.1), + ], + [ + m.x2 <= 0.4 * exp(m.x1 / 1.5), + m.x2 <= 1.2 * (m.x1 - 2.5) ** 2 + 0.3, + m.x2 <= 6.0 / (m.x1 / 0.6 + 1.0) + 0.5, + ], + ] + ) return m diff --git a/examples/gdp/small_lit/nonconvex_HEN.py b/examples/gdp/small_lit/nonconvex_HEN.py index 193bb475f4f..61c24c3187a 100644 --- a/examples/gdp/small_lit/nonconvex_HEN.py +++ b/examples/gdp/small_lit/nonconvex_HEN.py @@ -8,72 +8,99 @@ """ -from pyomo.environ import (ConcreteModel, Constraint, NonNegativeReals, - Objective, RangeSet, SolverFactory, - TransformationFactory, Var) +from pyomo.environ import ( + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + RangeSet, + SolverFactory, + TransformationFactory, + Var, +) def build_gdp_model(): - # PARAMETERS - T1_lo, T1_up = 350., 400. - T2_lo, T2_up = 450., 500. + T1_lo, T1_up = 350.0, 400.0 + T2_lo, T2_up = 450.0, 500.0 - U = {'1':1.5, '2':0.5, '3':1} - FCP = {'hot':10.0, 'cold':7.5} - T_in = {'hot':500., 'cold':350., 'cooling':300., 'steam':600.} - T_out = {'hot':340., 'cold':560., 'cooling':320., 'steam':600.} - Cost = {'cooling':20., 'steam':80.} + U = {'1': 1.5, '2': 0.5, '3': 1} + FCP = {'hot': 10.0, 'cold': 7.5} + T_in = {'hot': 500.0, 'cold': 350.0, 'cooling': 300.0, 'steam': 600.0} + T_out = {'hot': 340.0, 'cold': 560.0, 'cooling': 320.0, 'steam': 600.0} + Cost = {'cooling': 20.0, 'steam': 80.0} # VARIABLES m = ConcreteModel() m.T1 = Var(domain=NonNegativeReals, bounds=(T1_lo, T1_up)) m.T2 = Var(domain=NonNegativeReals, bounds=(T2_lo, T2_up)) - m.exchangers = RangeSet(1,3) - m.A = Var(m.exchangers, domain=NonNegativeReals, bounds=(1e-4,50)) - m.CP = Var(m.exchangers, domain=NonNegativeReals, bounds=(0,600*(50**0.6)+2*46500)) + m.exchangers = RangeSet(1, 3) + m.A = Var(m.exchangers, domain=NonNegativeReals, bounds=(1e-4, 50)) + m.CP = Var( + m.exchangers, domain=NonNegativeReals, bounds=(0, 600 * (50**0.6) + 2 * 46500) + ) # Note that A_lo=0 leads to an exception in MC++ if using gdpopt with strategy 'GLOA' # The exception occurs when constructing McCormick relaxations # OBJECTIVE m.objective = Objective( - expr=(sum(m.CP[i] for i in m.exchangers) - + FCP['hot']*(m.T1-T_out['hot'])*Cost['cooling'] - + FCP['cold']*(T_out['cold']-m.T2)*Cost['steam']) + expr=( + sum(m.CP[i] for i in m.exchangers) + + FCP['hot'] * (m.T1 - T_out['hot']) * Cost['cooling'] + + FCP['cold'] * (T_out['cold'] - m.T2) * Cost['steam'] + ) ) # GLOBAL CONSTRAINTS m.constr1 = Constraint( - expr=FCP['hot']*(T_in['hot']-m.T1) == m.A[1]*U['1']*((T_in['hot']-m.T2)+(m.T1-T_in['cold']))/2. + expr=FCP['hot'] * (T_in['hot'] - m.T1) + == m.A[1] * U['1'] * ((T_in['hot'] - m.T2) + (m.T1 - T_in['cold'])) / 2.0 ) - m.constr2 = Constraint( # Note the error in the paper in constraint 2 - expr=FCP['hot']*(m.T1-T_out['hot']) == m.A[2]*U['2']*((T_out['hot']-T_in['cooling'])+(m.T1-T_out['cooling']))/2. + m.constr2 = Constraint( # Note the error in the paper in constraint 2 + expr=FCP['hot'] * (m.T1 - T_out['hot']) + == m.A[2] + * U['2'] + * ((T_out['hot'] - T_in['cooling']) + (m.T1 - T_out['cooling'])) + / 2.0 ) m.constr3 = Constraint( - expr=FCP['cold']*(T_out['cold']-m.T2) == m.A[3]*U['3']*((T_out['steam']-m.T2)+(T_in['steam']-T_out['cold']))/2. + expr=FCP['cold'] * (T_out['cold'] - m.T2) + == m.A[3] + * U['3'] + * ((T_out['steam'] - m.T2) + (T_in['steam'] - T_out['cold'])) + / 2.0 ) m.constr4 = Constraint( - expr=FCP['hot']*(T_in['hot']-m.T1) == FCP['cold']*(m.T2-T_in['cold']) + expr=FCP['hot'] * (T_in['hot'] - m.T1) == FCP['cold'] * (m.T2 - T_in['cold']) ) # DISJUNCTIONS @m.Disjunction(m.exchangers) def exchanger_disjunction(m, disjctn): return [ - [m.CP[disjctn] == 2750*(m.A[disjctn]**0.6)+3000, - 0. <= m.A[disjctn], m.A[disjctn] <= 10.], - [m.CP[disjctn] == 1500*(m.A[disjctn]**0.6)+15000, - 10. <= m.A[disjctn], m.A[disjctn] <= 25.], - [m.CP[disjctn] == 600*(m.A[disjctn]**0.6)+46500, - 25. <= m.A[disjctn], m.A[disjctn] <= 50.] + [ + m.CP[disjctn] == 2750 * (m.A[disjctn] ** 0.6) + 3000, + 0.0 <= m.A[disjctn], + m.A[disjctn] <= 10.0, + ], + [ + m.CP[disjctn] == 1500 * (m.A[disjctn] ** 0.6) + 15000, + 10.0 <= m.A[disjctn], + m.A[disjctn] <= 25.0, + ], + [ + m.CP[disjctn] == 600 * (m.A[disjctn] ** 0.6) + 46500, + 25.0 <= m.A[disjctn], + m.A[disjctn] <= 50.0, + ], ] return m if __name__ == "__main__": - # Decide whether to reformulate as MINLP and what method to use reformulation = True reformulation_method = 'hull' @@ -83,10 +110,18 @@ def exchanger_disjunction(m, disjctn): if reformulation: if reformulation_method == 'bigm': - TransformationFactory('gdp.bigm').apply_to(model,bigM=600*(50**0.6)+2*46500) + TransformationFactory('gdp.bigm').apply_to( + model, bigM=600 * (50**0.6) + 2 * 46500 + ) elif reformulation_method == 'hull': TransformationFactory('gdp.hull').apply_to(model) - res = SolverFactory('gams').solve(model, tee=True, solver='baron', add_options=['option optcr = 0;'], keepfiles=True) + res = SolverFactory('gams').solve( + model, + tee=True, + solver='baron', + add_options=['option optcr = 0;'], + keepfiles=True, + ) else: # Note: MC++ needs to be properly installed to use strategy GLOA res = SolverFactory('gdpopt.gloa').solve(model, tee=True) diff --git a/examples/gdp/stickies.py b/examples/gdp/stickies.py index e050da57b7d..75beb911415 100644 --- a/examples/gdp/stickies.py +++ b/examples/gdp/stickies.py @@ -21,14 +21,13 @@ def build_model(): model = AbstractModel() - model.BigM = Suffix(direction=Suffix.LOCAL) model.BigM[None] = 1000 DATFILE = "stickies1.dat" ####################### - #Sets + # Sets ####################### # J @@ -39,18 +38,22 @@ def build_model(): model.BadComponents = Set() # N: total nodes in the system model.Nodes = Set() - # S: possibe screens + # S: possible screens model.Screens = Set() def screen_node_filter(model, s, n): return s != n - model.ScreenNodePairs = Set(initialize=model.Screens * model.Nodes, dimen=2, - filter=screen_node_filter) + + model.ScreenNodePairs = Set( + initialize=model.Screens * model.Nodes, dimen=2, filter=screen_node_filter + ) def screen_filter(model, s, sprime): return s != sprime - model.ScreenPairs = Set(initialize = model.Screens * model.Screens, dimen=2, - filter=screen_filter) + + model.ScreenPairs = Set( + initialize=model.Screens * model.Screens, dimen=2, filter=screen_filter + ) ###################### # Parameters @@ -76,22 +79,25 @@ def screen_filter(model, s, sprime): model.StickiesWeight = Param() model.CostWeight = Param() - ## Bounds on variables # F_s^{in, lo} and F_s^{in, up} (f_in_up(s), f_in_lo(s)) def flow_ub_rule(model, s): return sum(model.InitialComponentFlow[k] for k in model.Components) + model.ScreenFlowLB = Param(model.Screens) model.ScreenFlowUB = Param(model.Screens, initialize=flow_ub_rule) # m_in_lo(ss, k): lower bound of individual flow into nodes. model.InletComponentFlowLB = Param(model.Components, model.Nodes, default=0) + def component_flow_ub_rule(model, k, n): return model.InitialComponentFlow[k] + # m_in_up(ss, k) - model.InletComponentFlowUB = Param(model.Components, model.Nodes, - initialize=component_flow_ub_rule) + model.InletComponentFlowUB = Param( + model.Components, model.Nodes, initialize=component_flow_ub_rule + ) # r_lo(s) model.RejectRateLB = Param(model.Screens) @@ -99,134 +105,186 @@ def component_flow_ub_rule(model, k, n): model.RejectRateUB = Param(model.Screens) # m_rej_lo(s, k) - model.RejectedComponentFlowLB = Param(model.Components, model.Screens, - default=0) + model.RejectedComponentFlowLB = Param(model.Components, model.Screens, default=0) + def rejected_component_flow_bound(model, k, s): - return model.InitialComponentFlow[k]*(model.RejectRateUB[s]**\ - model.AcceptanceFactor[s, k]) + return model.InitialComponentFlow[k] * ( + model.RejectRateUB[s] ** model.AcceptanceFactor[s, k] + ) + # m_rej_up(s, k) - model.RejectedComponentFlowUB = Param(model.Components, model.Screens, - initialize=rejected_component_flow_bound) + model.RejectedComponentFlowUB = Param( + model.Components, model.Screens, initialize=rejected_component_flow_bound + ) # m_acc_lo(s, k): lower bound of accepted individual flow - model.AcceptedComponentFlowLB = Param(model.Components, model.Screens, - default=0) + model.AcceptedComponentFlowLB = Param(model.Components, model.Screens, default=0) + def accepted_component_flow_bound(model, k, s): - return model.InitialComponentFlow[k]*(1 - model.RejectRateLB[s]**\ - model.AcceptanceFactor[s, k]) + return model.InitialComponentFlow[k] * ( + 1 - model.RejectRateLB[s] ** model.AcceptanceFactor[s, k] + ) + # m_acc_up(s, k) - model.AcceptedComponentFlowUB = Param(model.Components, model.Screens, - initialize=accepted_component_flow_bound) + model.AcceptedComponentFlowUB = Param( + model.Components, model.Screens, initialize=accepted_component_flow_bound + ) ###################### # Variables ###################### # c_s, C(s), cost of selecting screen - model.screenCost = Var(model.Screens, within=NonNegativeReals)#, bounds=get_screen_cost_bounds) + model.screenCost = Var( + model.Screens, within=NonNegativeReals + ) # , bounds=get_screen_cost_bounds) # total inlet flow into screen s (f_s, F_IN(s)) # NOTE: the upper bound is enforced globally. The lower bound is enforced in # the first disjunction (to match GAMS) def get_inlet_flow_bounds(model, s): return (0, model.ScreenFlowUB[s]) - model.inletScreenFlow = Var(model.Screens, within=NonNegativeReals, - bounds=get_inlet_flow_bounds) + + model.inletScreenFlow = Var( + model.Screens, within=NonNegativeReals, bounds=get_inlet_flow_bounds + ) # inlet flow of component j into node n, (f_{n,j}^I, M_IN) def get_inlet_component_flow_bounds(model, j, n): return (model.InletComponentFlowLB[j, n], model.InletComponentFlowUB[j, n]) - model.inletComponentFlow = Var(model.Components, model.Nodes, - within=NonNegativeReals, - bounds=get_inlet_component_flow_bounds) + + model.inletComponentFlow = Var( + model.Components, + model.Nodes, + within=NonNegativeReals, + bounds=get_inlet_component_flow_bounds, + ) # accepted flow of component j from screen s (f_{s, j}^A) def get_accepted_component_flow_bounds(model, j, s): - return (model.AcceptedComponentFlowLB[j, s], - model.AcceptedComponentFlowUB[j, s]) - model.acceptedComponentFlow = Var(model.Components, model.Screens, - within=NonNegativeReals, - bounds=get_accepted_component_flow_bounds) + return ( + model.AcceptedComponentFlowLB[j, s], + model.AcceptedComponentFlowUB[j, s], + ) + + model.acceptedComponentFlow = Var( + model.Components, + model.Screens, + within=NonNegativeReals, + bounds=get_accepted_component_flow_bounds, + ) + # rejected flow of component j from screen s (f_{s,j}^R) def rej_component_flow_bounds(model, k, s): - return (model.RejectedComponentFlowLB[k, s], - model.RejectedComponentFlowUB[k, s]) - model.rejectedComponentFlow = Var(model.Components, model.Screens, - within=NonNegativeReals, - bounds=rej_component_flow_bounds) + return ( + model.RejectedComponentFlowLB[k, s], + model.RejectedComponentFlowUB[k, s], + ) + + model.rejectedComponentFlow = Var( + model.Components, + model.Screens, + within=NonNegativeReals, + bounds=rej_component_flow_bounds, + ) # accepted flow of component j from screen s to node n (m_{s,n,j}^A) def get_accepted_node_flow_bounds(model, j, s, n): return (0, model.AcceptedComponentFlowUB[j, s]) - model.acceptedNodeFlow = Var(model.Components, model.Screens, model.Nodes, - within=NonNegativeReals, - bounds=get_accepted_node_flow_bounds) + + model.acceptedNodeFlow = Var( + model.Components, + model.Screens, + model.Nodes, + within=NonNegativeReals, + bounds=get_accepted_node_flow_bounds, + ) # rejected flow of component j from screen s to node n (m_{s,n,j}^R) def get_rejected_node_flow_bounds(model, j, s, n): return (0, model.RejectedComponentFlowUB[j, s]) - model.rejectedNodeFlow = Var(model.Components, model.Screens, model.Nodes, - within=NonNegativeReals, - bounds=get_rejected_node_flow_bounds) + + model.rejectedNodeFlow = Var( + model.Components, + model.Screens, + model.Nodes, + within=NonNegativeReals, + bounds=get_rejected_node_flow_bounds, + ) # flow of component j from source to node n (m_{s,j}^0) def get_src_flow_bounds(model, j, n): return (0, model.InitialComponentFlow[j]) - model.flowFromSource = Var(model.Components, model.Nodes, - within=NonNegativeReals) + + model.flowFromSource = Var(model.Components, model.Nodes, within=NonNegativeReals) # reject rate of screen s (r_s) def get_rej_rate_bounds(model, s): return (model.RejectRateLB[s], model.RejectRateUB[s]) - model.rejectRate = Var(model.Screens, within=NonNegativeReals, - bounds=get_rej_rate_bounds) + model.rejectRate = Var( + model.Screens, within=NonNegativeReals, bounds=get_rej_rate_bounds + ) ###################### # Objective ###################### def calc_cost_rule(model): - lostFiberCost = model.FiberWeight * sum(model.inletComponentFlow[j,'SNK'] \ - for j in model.GoodComponents) - stickiesCost = model.StickiesWeight * sum(model.inletComponentFlow[j,'PRD']\ - for j in model.BadComponents) - screenCost = model.CostWeight * sum(model.screenCost[s] \ - for s in model.Screens) + lostFiberCost = model.FiberWeight * sum( + model.inletComponentFlow[j, 'SNK'] for j in model.GoodComponents + ) + stickiesCost = model.StickiesWeight * sum( + model.inletComponentFlow[j, 'PRD'] for j in model.BadComponents + ) + screenCost = model.CostWeight * sum(model.screenCost[s] for s in model.Screens) return lostFiberCost + stickiesCost + screenCost - model.min_cost = Objective(rule=calc_cost_rule) + model.min_cost = Objective(rule=calc_cost_rule) ###################### # Constraints ###################### def stickies_bound_rule(model, j): - return sum(model.inletComponentFlow[j,'PRD'] for j in model.BadComponents) \ + return ( + sum(model.inletComponentFlow[j, 'PRD'] for j in model.BadComponents) <= model.AcceptedLeftover[j] * model.InitialComponentFlow[j] + ) + model.stickies_bound = Constraint(model.BadComponents, rule=stickies_bound_rule) def inlet_flow_rule(model, s, j): - return model.inletComponentFlow[j,s] == model.acceptedComponentFlow[j,s] + \ - model.rejectedComponentFlow[j, s] - model.inlet_flow = Constraint(model.Screens, model.Components, - rule=inlet_flow_rule) + return ( + model.inletComponentFlow[j, s] + == model.acceptedComponentFlow[j, s] + model.rejectedComponentFlow[j, s] + ) + + model.inlet_flow = Constraint(model.Screens, model.Components, rule=inlet_flow_rule) def total_inlet_flow_rule(model, s): - return model.inletScreenFlow[s] == sum(model.inletComponentFlow[j, s] \ - for j in model.Components) + return model.inletScreenFlow[s] == sum( + model.inletComponentFlow[j, s] for j in model.Components + ) + model.total_inlet_flow = Constraint(model.Screens, rule=total_inlet_flow_rule) def inlet_flow_balance_rule(model, n, j): - return model.inletComponentFlow[j, n] == model.flowFromSource[j, n] + \ - sum(model.acceptedNodeFlow[j, s, n] + model.rejectedNodeFlow[j, s, n] \ - for s in model.Screens if s != n) - model.inlet_flow_balance = Constraint(model.Nodes, model.Components, - rule=inlet_flow_balance_rule) + return model.inletComponentFlow[j, n] == model.flowFromSource[j, n] + sum( + model.acceptedNodeFlow[j, s, n] + model.rejectedNodeFlow[j, s, n] + for s in model.Screens + if s != n + ) + + model.inlet_flow_balance = Constraint( + model.Nodes, model.Components, rule=inlet_flow_balance_rule + ) def source_flow_rule(model, j): - return model.InitialComponentFlow[j] == sum(model.flowFromSource[j, n] \ - for n in model.Nodes) + return model.InitialComponentFlow[j] == sum( + model.flowFromSource[j, n] for n in model.Nodes + ) + model.source_flow = Constraint(model.Components, rule=source_flow_rule) ################# @@ -235,108 +293,130 @@ def source_flow_rule(model, j): def screen_disjunct_rule(disjunct, selectScreen, s): model = disjunct.model() + def rejected_flow_rule(disjunct, j): - return model.rejectedComponentFlow[j,s] == \ - model.inletComponentFlow[j,s]* \ - (model.rejectRate[s]**model.AcceptanceFactor[s, j]) + return model.rejectedComponentFlow[j, s] == model.inletComponentFlow[ + j, s + ] * (model.rejectRate[s] ** model.AcceptanceFactor[s, j]) if selectScreen: - disjunct.inlet_flow_bounds = Constraint(expr=model.ScreenFlowLB[s] <= \ - model.inletScreenFlow[s])# <= \ - #model.ScreenFlowUB[s]) - disjunct.rejected_flow = Constraint(model.Components, - rule=rejected_flow_rule) - disjunct.screen_cost = Constraint(expr=model.screenCost[s] == \ - model.ScreenCostCoeff1[s]* \ - (model.inletScreenFlow[s]** \ - model.ExpScreenCostCoeff[s]) + \ - model.ScreenCostCoeff2[s]* \ - (1 - model.rejectRate[s])) + disjunct.inlet_flow_bounds = Constraint( + expr=model.ScreenFlowLB[s] <= model.inletScreenFlow[s] + ) # <= \ + # model.ScreenFlowUB[s]) + disjunct.rejected_flow = Constraint( + model.Components, rule=rejected_flow_rule + ) + disjunct.screen_cost = Constraint( + expr=model.screenCost[s] + == model.ScreenCostCoeff1[s] + * (model.inletScreenFlow[s] ** model.ExpScreenCostCoeff[s]) + + model.ScreenCostCoeff2[s] * (1 - model.rejectRate[s]) + ) else: disjunct.no_flow = Constraint(expr=model.inletScreenFlow[s] == 0) disjunct.no_cost = Constraint(expr=model.screenCost[s] == 0) - model.screen_selection_disjunct = Disjunct([0,1], model.Screens, - rule=screen_disjunct_rule) + + model.screen_selection_disjunct = Disjunct( + [0, 1], model.Screens, rule=screen_disjunct_rule + ) def screen_disjunction_rule(model, s): - return [model.screen_selection_disjunct[selectScreen, s] \ - for selectScreen in [0,1]] - model.screen_disjunction = Disjunction(model.Screens, - rule=screen_disjunction_rule) + return [ + model.screen_selection_disjunct[selectScreen, s] for selectScreen in [0, 1] + ] + model.screen_disjunction = Disjunction(model.Screens, rule=screen_disjunction_rule) def accepted_flow_disjunct_rule(disjunct, s, n, acceptFlow): model = disjunct.model() + def flow_balance_rule(disjunct, j): - return model.acceptedNodeFlow[j, s, n] == \ - model.acceptedComponentFlow[j, s] + return model.acceptedNodeFlow[j, s, n] == model.acceptedComponentFlow[j, s] + def no_flow_rule(disjunct, j): return model.acceptedNodeFlow[j, s, n] == 0 if acceptFlow: - disjunct.flow_balance = Constraint(model.Components, - rule=flow_balance_rule) + disjunct.flow_balance = Constraint(model.Components, rule=flow_balance_rule) else: disjunct.no_flow = Constraint(model.Components, rule=no_flow_rule) - model.flow_acceptance_disjunct = Disjunct(model.ScreenNodePairs, [0,1], - rule=accepted_flow_disjunct_rule) + + model.flow_acceptance_disjunct = Disjunct( + model.ScreenNodePairs, [0, 1], rule=accepted_flow_disjunct_rule + ) def flow_acceptance_disjunction_rule(model, s, n): - return [model.flow_acceptance_disjunct[s, n, acceptFlow] \ - for acceptFlow in [0,1]] - model.flow_acceptance_disjunction = Disjunction(model.ScreenNodePairs, - rule=flow_acceptance_disjunction_rule) + return [ + model.flow_acceptance_disjunct[s, n, acceptFlow] for acceptFlow in [0, 1] + ] + model.flow_acceptance_disjunction = Disjunction( + model.ScreenNodePairs, rule=flow_acceptance_disjunction_rule + ) def rejected_flow_disjunct_rule(disjunct, s, n, rejectFlow): model = disjunct.model() + def flow_balance_rule(disjunct, j): - return model.rejectedNodeFlow[j, s, n] == \ - model.rejectedComponentFlow[j, s] + return model.rejectedNodeFlow[j, s, n] == model.rejectedComponentFlow[j, s] + def no_reject_rule(disjunct, j): return model.rejectedNodeFlow[j, s, n] == 0 if rejectFlow: - disjunct.flow_balance = Constraint(model.Components, - rule=flow_balance_rule) + disjunct.flow_balance = Constraint(model.Components, rule=flow_balance_rule) else: disjunct.no_reject = Constraint(model.Components, rule=no_reject_rule) - model.flow_rejection_disjunct = Disjunct(model.ScreenNodePairs, [0,1], - rule=rejected_flow_disjunct_rule) + + model.flow_rejection_disjunct = Disjunct( + model.ScreenNodePairs, [0, 1], rule=rejected_flow_disjunct_rule + ) def rejected_flow_disjunction_rule(model, s, n): - return [model.flow_rejection_disjunct[s, n, rejectFlow] \ - for rejectFlow in [0,1]] - model.flow_rejection_disjunction = Disjunction(model.ScreenNodePairs, - rule=rejected_flow_disjunction_rule) + return [ + model.flow_rejection_disjunct[s, n, rejectFlow] for rejectFlow in [0, 1] + ] + model.flow_rejection_disjunction = Disjunction( + model.ScreenNodePairs, rule=rejected_flow_disjunction_rule + ) def flow_from_source_disjunct_rule(disjunct, n): model = disjunct.model() + def sourceFlow_balance_rule1(disjunct, j): # this doesn't match the formulation, but it matches GAMS: return model.flowFromSource[j, n] >= model.InitialComponentFlowLB[j] # this would be the formulation version: - #return model.flowFromSource[j, n] == model.InitialComponentFlow[j] + # return model.flowFromSource[j, n] == model.InitialComponentFlow[j] + def sourceFlow_balance_rule2(disjunct, j): return model.flowFromSource[j, n] <= model.InitialComponentFlow[j] + def no_sourceFlow_rule(disjunct, j, nprime): return model.flowFromSource[j, nprime] == 0 - disjunct.flow_balance1 = Constraint(model.Components, - rule=sourceFlow_balance_rule1) - disjunct.flow_balance2 = Constraint(model.Components, - rule=sourceFlow_balance_rule2) - disjunct.no_flow = Constraint(model.Components, model.Nodes - [n], - rule=no_sourceFlow_rule) - model.flow_from_source_disjunct = Disjunct(model.Nodes, - rule=flow_from_source_disjunct_rule) + disjunct.flow_balance1 = Constraint( + model.Components, rule=sourceFlow_balance_rule1 + ) + disjunct.flow_balance2 = Constraint( + model.Components, rule=sourceFlow_balance_rule2 + ) + disjunct.no_flow = Constraint( + model.Components, model.Nodes - [n], rule=no_sourceFlow_rule + ) + + model.flow_from_source_disjunct = Disjunct( + model.Nodes, rule=flow_from_source_disjunct_rule + ) def flow_from_source_disjunction_rule(model): return [model.flow_from_source_disjunct[n] for n in model.Nodes] - model.flow_from_source_disjunction = Disjunction( - rule=flow_from_source_disjunction_rule) + model.flow_from_source_disjunction = Disjunction( + rule=flow_from_source_disjunction_rule + ) ###################### # Boolean Constraints @@ -348,47 +428,75 @@ def flow_from_source_disjunction_rule(model): # These are the GAMS versions of the logical constraints, which is not # what appears in the formulation: def log1_rule(model, s): - return model.screen_selection_disjunct[1, s].binary_indicator_var == \ - sum(model.flow_acceptance_disjunct[s, n, 1].binary_indicator_var \ - for n in model.Nodes if s != n) + return model.screen_selection_disjunct[1, s].binary_indicator_var == sum( + model.flow_acceptance_disjunct[s, n, 1].binary_indicator_var + for n in model.Nodes + if s != n + ) + model.log1 = Constraint(model.Screens, rule=log1_rule) def log2_rule(model, s): - return model.screen_selection_disjunct[1, s].binary_indicator_var == \ - sum(model.flow_rejection_disjunct[s, n, 1].binary_indicator_var \ - for n in model.Nodes if s != n) + return model.screen_selection_disjunct[1, s].binary_indicator_var == sum( + model.flow_rejection_disjunct[s, n, 1].binary_indicator_var + for n in model.Nodes + if s != n + ) + model.log2 = Constraint(model.Screens, rule=log2_rule) def log3_rule(model, s): - return model.screen_selection_disjunct[1, s].binary_indicator_var >= \ - sum(model.flow_acceptance_disjunct[s, sprime, 1].binary_indicator_var \ - for sprime in model.Screens if s != sprime) + return model.screen_selection_disjunct[1, s].binary_indicator_var >= sum( + model.flow_acceptance_disjunct[s, sprime, 1].binary_indicator_var + for sprime in model.Screens + if s != sprime + ) + model.log3 = Constraint(model.Screens, rule=log3_rule) def log4_rule(model, s): - return model.screen_selection_disjunct[1, s].binary_indicator_var >= \ - sum(model.flow_rejection_disjunct[s, sprime, 1].binary_indicator_var \ - for sprime in model.Screens if s != sprime) + return model.screen_selection_disjunct[1, s].binary_indicator_var >= sum( + model.flow_rejection_disjunct[s, sprime, 1].binary_indicator_var + for sprime in model.Screens + if s != sprime + ) + model.log4 = Constraint(model.Screens, rule=log4_rule) def log6_rule(model, s, sprime): - return model.flow_acceptance_disjunct[s, sprime, 1].binary_indicator_var + \ - model.flow_acceptance_disjunct[sprime, s, 1].binary_indicator_var <= 1 + return ( + model.flow_acceptance_disjunct[s, sprime, 1].binary_indicator_var + + model.flow_acceptance_disjunct[sprime, s, 1].binary_indicator_var + <= 1 + ) + model.log6 = Constraint(model.ScreenPairs, rule=log6_rule) def log7_rule(model, s, sprime): - return model.flow_rejection_disjunct[s, sprime, 1].binary_indicator_var + \ - model.flow_rejection_disjunct[sprime, s, 1].binary_indicator_var <= 1 + return ( + model.flow_rejection_disjunct[s, sprime, 1].binary_indicator_var + + model.flow_rejection_disjunct[sprime, s, 1].binary_indicator_var + <= 1 + ) + model.log7 = Constraint(model.ScreenPairs, rule=log7_rule) def log8_rule(model, s, n): - return model.flow_acceptance_disjunct[s, n, 1].binary_indicator_var + \ - model.flow_rejection_disjunct[s, n, 1].binary_indicator_var <= 1 + return ( + model.flow_acceptance_disjunct[s, n, 1].binary_indicator_var + + model.flow_rejection_disjunct[s, n, 1].binary_indicator_var + <= 1 + ) + model.log8 = Constraint(model.ScreenNodePairs, rule=log8_rule) def log9_rule(model, s, sprime): - return model.flow_acceptance_disjunct[s, sprime, 1].binary_indicator_var + \ - model.flow_rejection_disjunct[sprime, s, 1].binary_indicator_var <= 1 + return ( + model.flow_acceptance_disjunct[s, sprime, 1].binary_indicator_var + + model.flow_rejection_disjunct[sprime, s, 1].binary_indicator_var + <= 1 + ) + model.log9 = Constraint(model.ScreenPairs, rule=log9_rule) # These are the above logical constraints implemented correctly (I think) @@ -408,7 +516,6 @@ def log9_rule(model, s, sprime): # model.flow_existence2 = Constraint(model.ScreenNodePairs, # rule=flow_existence_rule2) - # # YA_{s,s'} v YR_{s',s} implies Y_s # def screen_flow_existence_rule1(model, s, sprime): # return model.screen_selection_disjunct[1, s].indicator_var >= \ @@ -422,7 +529,6 @@ def log9_rule(model, s, sprime): # model.screen_flow_existence2 = Constraint(model.ScreenPairs, # rule=screen_flow_existence_rule2) - # # YA_{s', s} XOR YA_{s, s'} # def accept_rule1(model, s, sprime): # return 1 <= model.flow_acceptance_disjunct[s, sprime, 1].indicator_var + \ @@ -444,7 +550,6 @@ def log9_rule(model, s, sprime): # model.flow_acceptance_disjunct[s, sprime, 1].indicator_var # model.accept4 = Constraint(model.ScreenPairs, rule=accept_rule4) - # # YR_{s', s} XOR YR_{s, s'} # def reject_rule1(model, s, sprime): # return 1 <= model.flow_rejection_disjunct[s, sprime, 1].indicator_var + \ @@ -466,7 +571,6 @@ def log9_rule(model, s, sprime): # model.flow_rejection_disjunct[s, sprime, 1].indicator_var # model.reject4 = Constraint(model.ScreenPairs, rule=reject_rule4) - # # YA_{s,n} XOR YR_{s,n} # def accept_or_reject_rule1(model, s, n): # return 1 <= model.flow_acceptance_disjunct[s, n, 1].indicator_var + \ @@ -496,8 +600,8 @@ def log9_rule(model, s, sprime): # fix the variables they fix in GAMS for s in instance.Screens: - instance.flow_acceptance_disjunct[s,'SNK',1].indicator_var.fix(False) - instance.flow_rejection_disjunct[s,'PRD',1].indicator_var.fix(False) + instance.flow_acceptance_disjunct[s, 'SNK', 1].indicator_var.fix(False) + instance.flow_rejection_disjunct[s, 'PRD', 1].indicator_var.fix(False) ################################################################################## ## for validation: Fix all the indicator variables to see if we get same objective @@ -511,7 +615,6 @@ def log9_rule(model, s, sprime): # instance.screen_selection_disjunct[1,'S5'].indicator_var.fix(0) # instance.screen_selection_disjunct[1,'S6'].indicator_var.fix(0) - # instance.flow_acceptance_disjunct['S1','S2',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','S3',1].indicator_var.fix(0) # instance.flow_acceptance_disjunct['S1','S4',1].indicator_var.fix(0) diff --git a/examples/gdp/strip_packing/stripPacking.py b/examples/gdp/strip_packing/stripPacking.py index a55e012a7ac..0e8902c5ee4 100644 --- a/examples/gdp/strip_packing/stripPacking.py +++ b/examples/gdp/strip_packing/stripPacking.py @@ -17,9 +17,12 @@ # width of strip model.StripWidth = Param() + # upperbound on length (default is sum of lengths of rectangles) def sumLengths(model): return sum(model.Lengths[i] for i in model.RECTANGLES) + + model.LengthUB = Param(initialize=sumLengths) # rectangle relations @@ -32,27 +35,41 @@ def sumLengths(model): # length of strip (this will be the objective) model.Lt = Var(within=NonNegativeReals) + # generate the list of possible rectangle conflicts (which are any pair) def rec_pairs_filter(model, i, j): return i < j -model.RectanglePairs = Set(initialize=model.RECTANGLES * model.RECTANGLES, - dimen=2, filter=rec_pairs_filter) + + +model.RectanglePairs = Set( + initialize=model.RECTANGLES * model.RECTANGLES, dimen=2, filter=rec_pairs_filter +) + # strip length constraint def strip_ends_after_last_rec_rule(model, i): return model.Lt >= model.x[i] + model.Lengths[i] -model.strip_ends_after_last_rec = Constraint(model.RECTANGLES, - rule=strip_ends_after_last_rec_rule) + + +model.strip_ends_after_last_rec = Constraint( + model.RECTANGLES, rule=strip_ends_after_last_rec_rule +) + # constraints to prevent rectangles from going off strip def no_recs_off_end_rule(model, i): return inequality(0, model.x[i], model.LengthUB - model.Lengths[i]) + + model.no_recs_off_end = Constraint(model.RECTANGLES, rule=no_recs_off_end_rule) + def no_recs_off_bottom_rule(model, i): return inequality(model.Heights[i], model.y[i], model.StripWidth) -model.no_recs_off_bottom = Constraint(model.RECTANGLES, - rule=no_recs_off_bottom_rule) + + +model.no_recs_off_bottom = Constraint(model.RECTANGLES, rule=no_recs_off_bottom_rule) + # Disjunctions to prevent overlap between rectangles def no_overlap_disjunct_rule(disjunct, i, j, recRelation): @@ -70,14 +87,20 @@ def no_overlap_disjunct_rule(disjunct, i, j, recRelation): elif recRelation == 'Below': disjunct.c = Constraint(expr=model.y[j] - model.Heights[j] >= model.y[i]) else: - raise RuntimeError("Unrecognized rectangle relationship: %s" - % recRelation) -model.no_overlap_disjunct = Disjunct(model.RectanglePairs, model.RecRelations, - rule=no_overlap_disjunct_rule) + raise RuntimeError("Unrecognized rectangle relationship: %s" % recRelation) + + +model.no_overlap_disjunct = Disjunct( + model.RectanglePairs, model.RecRelations, rule=no_overlap_disjunct_rule +) + def no_overlap(model, i, j): - return [model.no_overlap_disjunct[i, j, direction] \ - for direction in model.RecRelations] + return [ + model.no_overlap_disjunct[i, j, direction] for direction in model.RecRelations + ] + + model.disj = Disjunction(model.RectanglePairs, rule=no_overlap) # minimize length diff --git a/examples/gdp/strip_packing/strip_packing_8rect.py b/examples/gdp/strip_packing/strip_packing_8rect.py index 9fb96500f03..eba3c82dc05 100644 --- a/examples/gdp/strip_packing/strip_packing_8rect.py +++ b/examples/gdp/strip_packing/strip_packing_8rect.py @@ -13,8 +13,16 @@ from __future__ import division -from pyomo.environ import (ConcreteModel, NonNegativeReals, Objective, Param, - Set, SolverFactory, TransformationFactory, Var) +from pyomo.environ import ( + ConcreteModel, + NonNegativeReals, + Objective, + Param, + Set, + SolverFactory, + TransformationFactory, + Var, +) # x and y are flipped from article @@ -26,55 +34,62 @@ def build_rect_strip_packing_model(): # Width and Length of each rectangle model.rect_width = Param( - model.rectangles, initialize={0: 3, 1: 3, 2: 2, 3: 2, 4: 3, 5: 5, - 6: 7, 7: 7}) + model.rectangles, initialize={0: 3, 1: 3, 2: 2, 3: 2, 4: 3, 5: 5, 6: 7, 7: 7} + ) # parameter indexed by each rectangle # same as height? model.rect_length = Param( - model.rectangles, initialize={0: 4, 1: 3, 2: 2, 3: 2, 4: 3, 5: 3, - 6: 4, 7: 4}) + model.rectangles, initialize={0: 4, 1: 3, 2: 2, 3: 2, 4: 3, 5: 3, 6: 4, 7: 4} + ) - model.strip_width = Param( - initialize=10, doc="Available width of the strip") + model.strip_width = Param(initialize=10, doc="Available width of the strip") # upperbound on length (default is sum of lengths of rectangles) model.max_length = Param( initialize=sum(model.rect_length[i] for i in model.rectangles), doc="maximum length of the strip (if all rectangles were arranged " - "lengthwise)") + "lengthwise)", + ) # x (length) and y (width) coordinates of each of the rectangles - model.x = Var(model.rectangles, - bounds=(0, model.max_length), - doc="rectangle corner x-position (position across length)") + model.x = Var( + model.rectangles, + bounds=(0, model.max_length), + doc="rectangle corner x-position (position across length)", + ) def w_bounds(m, i): return (0, m.strip_width - m.rect_width[i]) - model.y = Var(model.rectangles, - bounds=w_bounds, - doc="rectangle corner y-position (position down width)") - model.strip_length = Var( - within=NonNegativeReals, doc="Length of strip required.") + model.y = Var( + model.rectangles, + bounds=w_bounds, + doc="rectangle corner y-position (position down width)", + ) + + model.strip_length = Var(within=NonNegativeReals, doc="Length of strip required.") def rec_pairs_filter(model, i, j): return i < j + model.overlap_pairs = Set( initialize=model.rectangles * model.rectangles, - dimen=2, filter=rec_pairs_filter, - doc="set of possible rectangle conflicts") + dimen=2, + filter=rec_pairs_filter, + doc="set of possible rectangle conflicts", + ) @model.Constraint(model.rectangles) def strip_ends_after_last_rec(model, i): return model.strip_length >= model.x[i] + model.rect_length[i] - model.total_length = Objective(expr=model.strip_length, - doc="Minimize length") + model.total_length = Objective(expr=model.strip_length, doc="Minimize length") @model.Disjunction( model.overlap_pairs, doc="Make sure that none of the rectangles on the strip overlap in " - "either the x or y dimensions.") + "either the x or y dimensions.", + ) def no_overlap(m, i, j): return [ m.x[i] + m.rect_length[i] <= m.x[j], diff --git a/examples/gdp/strip_packing/strip_packing_concrete.py b/examples/gdp/strip_packing/strip_packing_concrete.py index b83c45cd40c..4fa6172a8d1 100644 --- a/examples/gdp/strip_packing/strip_packing_concrete.py +++ b/examples/gdp/strip_packing/strip_packing_concrete.py @@ -11,8 +11,7 @@ """ from __future__ import division -from pyomo.environ import (ConcreteModel, NonNegativeReals, Objective, Param, - Set, Var) +from pyomo.environ import ConcreteModel, NonNegativeReals, Objective, Param, Set, Var def build_rect_strip_packing_model(): @@ -21,58 +20,63 @@ def build_rect_strip_packing_model(): model.rectangles = Set(ordered=True, initialize=[0, 1, 2, 3]) # Width and Length of each rectangle - model.rect_width = Param( - model.rectangles, initialize={0: 6, 1: 3, 2: 4, 3: 2}) - model.rect_length = Param( - model.rectangles, initialize={0: 6, 1: 8, 2: 5, 3: 3}) + model.rect_width = Param(model.rectangles, initialize={0: 6, 1: 3, 2: 4, 3: 2}) + model.rect_length = Param(model.rectangles, initialize={0: 6, 1: 8, 2: 5, 3: 3}) - model.strip_width = Param( - initialize=10, doc="Available width of the strip") + model.strip_width = Param(initialize=10, doc="Available width of the strip") # upperbound on length (default is sum of lengths of rectangles) model.max_length = Param( initialize=sum(model.rect_length[i] for i in model.rectangles), doc="maximum length of the strip (if all rectangles were arranged " - "lengthwise)") + "lengthwise)", + ) # x (length) and y (width) coordinates of each of the rectangles - model.x = Var(model.rectangles, - bounds=(0, model.max_length), - doc="rectangle corner x-position (position down length)") + model.x = Var( + model.rectangles, + bounds=(0, model.max_length), + doc="rectangle corner x-position (position down length)", + ) def w_bounds(m, i): return (0, m.strip_width - m.rect_width[i]) - model.y = Var(model.rectangles, - bounds=w_bounds, - doc="rectangle corner y-position (position across width)") - model.strip_length = Var( - within=NonNegativeReals, doc="Length of strip required.") + model.y = Var( + model.rectangles, + bounds=w_bounds, + doc="rectangle corner y-position (position across width)", + ) + + model.strip_length = Var(within=NonNegativeReals, doc="Length of strip required.") def rec_pairs_filter(model, i, j): return i < j + model.overlap_pairs = Set( initialize=model.rectangles * model.rectangles, - dimen=2, filter=rec_pairs_filter, - doc="set of possible rectangle conflicts") + dimen=2, + filter=rec_pairs_filter, + doc="set of possible rectangle conflicts", + ) @model.Constraint(model.rectangles) def strip_ends_after_last_rec(model, i): return model.strip_length >= model.x[i] + model.rect_length[i] - model.total_length = Objective(expr=model.strip_length, - doc="Minimize length") + model.total_length = Objective(expr=model.strip_length, doc="Minimize length") @model.Disjunction( model.overlap_pairs, doc="Make sure that none of the rectangles on the strip overlap in " - "either the x or y dimensions.") + "either the x or y dimensions.", + ) def no_overlap(m, i, j): return [ - m.x[i] + m.rect_length[i] <= m.x[j],# i left of j - m.x[j] + m.rect_length[j] <= m.x[i],# i right of j - m.y[i] + m.rect_width[i] <= m.y[j],# i below j - m.y[j] + m.rect_width[j] <= m.y[i],#i above j + m.x[i] + m.rect_length[i] <= m.x[j], # i left of j + m.x[j] + m.rect_length[j] <= m.x[i], # i right of j + m.y[i] + m.rect_width[i] <= m.y[j], # i below j + m.y[j] + m.rect_width[j] <= m.y[i], # i above j ] return model diff --git a/examples/gdp/two_rxn_lee/two_rxn_model.py b/examples/gdp/two_rxn_lee/two_rxn_model.py index a3ab0d1f0c3..9057ef8c006 100644 --- a/examples/gdp/two_rxn_lee/two_rxn_model.py +++ b/examples/gdp/two_rxn_lee/two_rxn_model.py @@ -1,8 +1,8 @@ """Two reactor model from literature. See README.md.""" from __future__ import division -from pyomo.core import (ConcreteModel, Constraint, Objective, Param, Var, - maximize) +from pyomo.core import ConcreteModel, Constraint, Objective, Param, Var, maximize + # from pyomo.environ import * # NOQA from pyomo.gdp import Disjunction @@ -13,47 +13,68 @@ def build_model(use_mccormick=False): m.F = Var(bounds=(0, 8), doc="Flow into reactor") m.X = Var(bounds=(0, 1), doc="Reactor conversion") m.d = Param(initialize=2, doc="Max product demand") - m.c = Param([1, 2, 'I', 'II'], doc="Costs", initialize={ - 1: 2, # Value of product - 2: 0.2, # Cost of raw material - 'I': 2.5, # Cost of reactor I - 'II': 1.5 # Cost of reactor II - }) - m.alpha = Param(['I', 'II'], doc="Reactor coefficient", - initialize={'I': -8, 'II': -10}) - m.beta = Param(['I', 'II'], doc="Reactor coefficient", - initialize={'I': 9, 'II': 15}) - m.X_LB = Param(['I', 'II'], doc="Reactor conversion lower bound", - initialize={'I': 0.2, 'II': 0.7}) - m.X_UB = Param(['I', 'II'], doc="Reactor conversion upper bound", - initialize={'I': 0.95, 'II': 0.99}) + m.c = Param( + [1, 2, 'I', 'II'], + doc="Costs", + initialize={ + 1: 2, # Value of product + 2: 0.2, # Cost of raw material + 'I': 2.5, # Cost of reactor I + 'II': 1.5, # Cost of reactor II + }, + ) + m.alpha = Param( + ['I', 'II'], doc="Reactor coefficient", initialize={'I': -8, 'II': -10} + ) + m.beta = Param( + ['I', 'II'], doc="Reactor coefficient", initialize={'I': 9, 'II': 15} + ) + m.X_LB = Param( + ['I', 'II'], + doc="Reactor conversion lower bound", + initialize={'I': 0.2, 'II': 0.7}, + ) + m.X_UB = Param( + ['I', 'II'], + doc="Reactor conversion upper bound", + initialize={'I': 0.95, 'II': 0.99}, + ) m.C_rxn = Var(bounds=(1.5, 2.5), doc="Cost of reactor") - m.reactor_choice = Disjunction(expr=[ - # Disjunct 1: Choose reactor I - [m.F == m.alpha['I'] * m.X + m.beta['I'], - m.X_LB['I'] <= m.X, - m.X <= m.X_UB['I'], - m.C_rxn == m.c['I']], - # Disjunct 2: Choose reactor II - [m.F == m.alpha['II'] * m.X + m.beta['II'], - m.X_LB['II'] <= m.X, - m.X <= m.X_UB['II'], - m.C_rxn == m.c['II']] - ], xor=True) + m.reactor_choice = Disjunction( + expr=[ + # Disjunct 1: Choose reactor I + [ + m.F == m.alpha['I'] * m.X + m.beta['I'], + m.X_LB['I'] <= m.X, + m.X <= m.X_UB['I'], + m.C_rxn == m.c['I'], + ], + # Disjunct 2: Choose reactor II + [ + m.F == m.alpha['II'] * m.X + m.beta['II'], + m.X_LB['II'] <= m.X, + m.X <= m.X_UB['II'], + m.C_rxn == m.c['II'], + ], + ], + xor=True, + ) if use_mccormick: m.P = Var(bounds=(0, 8), doc="McCormick approximation of F*X") m.mccormick_1 = Constraint( expr=m.P <= m.F.lb * m.X + m.F * m.X.ub - m.F.lb * m.X.ub, - doc="McCormick overestimator") + doc="McCormick overestimator", + ) m.mccormick_2 = Constraint( expr=m.P <= m.F.ub * m.X + m.F * m.X.lb - m.F.ub * m.X.lb, - doc="McCormick underestimator") + doc="McCormick underestimator", + ) m.max_demand = Constraint(expr=m.P <= m.d, doc="product demand") - m.profit = Objective( - expr=m.c[1] * m.P - m.c[2] * m.F - m.C_rxn, sense=maximize) + m.profit = Objective(expr=m.c[1] * m.P - m.c[2] * m.F - m.C_rxn, sense=maximize) else: m.max_demand = Constraint(expr=m.F * m.X <= m.d, doc="product demand") m.profit = Objective( - expr=m.c[1] * m.F * m.X - m.c[2] * m.F - m.C_rxn, sense=maximize) + expr=m.c[1] * m.F * m.X - m.c[2] * m.F - m.C_rxn, sense=maximize + ) return m diff --git a/examples/kernel/blocks.py b/examples/kernel/blocks.py index 3420d239a6b..7036981dcc8 100644 --- a/examples/kernel/blocks.py +++ b/examples/kernel/blocks.py @@ -7,50 +7,47 @@ # define a simple optimization model b = pmo.block() b.x = pmo.variable() -b.c = pmo.constraint(expr= b.x >= 1) -b.o = pmo.objective(expr= b.x) +b.c = pmo.constraint(expr=b.x >= 1) +b.o = pmo.objective(expr=b.x) # define an optimization model with indexed containers b = pmo.block() b.p = pmo.parameter() -b.plist = pmo.parameter_list(pmo.parameter() - for i in range(10)) -b.pdict = pmo.parameter_dict(((i,j), pmo.parameter()) - for i in range(10) - for j in range(10)) +b.plist = pmo.parameter_list(pmo.parameter() for i in range(10)) +b.pdict = pmo.parameter_dict( + ((i, j), pmo.parameter()) for i in range(10) for j in range(10) +) b.x = pmo.variable() -b.xlist = pmo.variable_list(pmo.variable() - for i in range(10)) -b.xdict = pmo.variable_dict(((i,j), pmo.variable()) - for i in range(10) - for j in range(10)) +b.xlist = pmo.variable_list(pmo.variable() for i in range(10)) +b.xdict = pmo.variable_dict( + ((i, j), pmo.variable()) for i in range(10) for j in range(10) +) b.c = pmo.constraint(b.x >= 1) -b.clist = pmo.constraint_list( - pmo.constraint(b.xlist[i] >= i) - for i in range(10)) +b.clist = pmo.constraint_list(pmo.constraint(b.xlist[i] >= i) for i in range(10)) b.cdict = pmo.constraint_dict( - ((i,j), pmo.constraint(b.xdict[i,j] >= i * j)) + ((i, j), pmo.constraint(b.xdict[i, j] >= i * j)) for i in range(10) - for j in range(10)) + for j in range(10) +) -b.o = pmo.objective( - b.x + sum(b.xlist) + sum(b.xdict.values())) +b.o = pmo.objective(b.x + sum(b.xlist) + sum(b.xdict.values())) # # Define a custom block # + class Widget(pmo.block): def __init__(self, p, input=None): super(Widget, self).__init__() self.p = pmo.parameter(value=p) self.input = pmo.expression(expr=input) self.output = pmo.variable() - self.c = pmo.constraint( - self.output == self.input**2 / self.p) + self.c = pmo.constraint(self.output == self.input**2 / self.p) + b = pmo.block() b.x = pmo.variable() diff --git a/examples/kernel/conic.py b/examples/kernel/conic.py index 9de5c106a0f..a2a787794a4 100644 --- a/examples/kernel/conic.py +++ b/examples/kernel/conic.py @@ -4,27 +4,23 @@ # Specialized Conic Constraints # -c = pmo.conic.quadratic( - r=pmo.variable(lb=0), - x=[pmo.variable(), pmo.variable()]) +c = pmo.conic.quadratic(r=pmo.variable(lb=0), x=[pmo.variable(), pmo.variable()]) assert not c.has_lb() assert c.has_ub() and (c.ub == 0) assert c.check_convexity_conditions() print(c.body) c = pmo.conic.rotated_quadratic( - r1=pmo.variable(lb=0), - r2=pmo.variable(lb=0), - x=[pmo.variable(), pmo.variable()]) + r1=pmo.variable(lb=0), r2=pmo.variable(lb=0), x=[pmo.variable(), pmo.variable()] +) assert not c.has_lb() assert c.has_ub() and (c.ub == 0) assert c.check_convexity_conditions() print(c.body) c = pmo.conic.primal_exponential( - r=pmo.variable(lb=0), - x1=pmo.variable(lb=0), - x2=pmo.variable()) + r=pmo.variable(lb=0), x1=pmo.variable(lb=0), x2=pmo.variable() +) assert not c.has_lb() assert c.has_ub() and (c.ub == 0) assert c.check_convexity_conditions() @@ -34,16 +30,16 @@ r1=pmo.variable(lb=0), r2=pmo.variable(lb=0), x=[pmo.variable(), pmo.variable()], - alpha=0.5) + alpha=0.5, +) assert not c.has_lb() assert c.has_ub() and (c.ub == 0) assert c.check_convexity_conditions() print(c.body) c = pmo.conic.dual_exponential( - r=pmo.variable(lb=0), - x1=pmo.variable(), - x2=pmo.variable(ub=0)) + r=pmo.variable(lb=0), x1=pmo.variable(), x2=pmo.variable(ub=0) +) assert not c.has_lb() assert c.has_ub() and (c.ub == 0) assert c.check_convexity_conditions() @@ -53,7 +49,8 @@ r1=pmo.variable(lb=0), r2=pmo.variable(lb=0), x=[pmo.variable(), pmo.variable()], - alpha=0.5) + alpha=0.5, +) assert not c.has_lb() assert c.has_ub() and (c.ub == 0) assert c.check_convexity_conditions() @@ -67,8 +64,8 @@ # b = pmo.conic.quadratic.as_domain( - r=0.5*pmo.variable(lb=0), - x=[pmo.variable() + 1, 1.5, None, None]) + r=0.5 * pmo.variable(lb=0), x=[pmo.variable() + 1, 1.5, None, None] +) assert type(b.q) is pmo.conic.quadratic assert type(b.c) is pmo.constraint_tuple assert len(b.c) == 3 diff --git a/examples/kernel/constraints.py b/examples/kernel/constraints.py index 809efa4f690..6495ad12f63 100644 --- a/examples/kernel/constraints.py +++ b/examples/kernel/constraints.py @@ -8,7 +8,7 @@ c = pmo.constraint(v == 1) -c = pmo.constraint(expr= v == 1) +c = pmo.constraint(expr=v == 1) c = pmo.constraint(body=v, rhs=1) @@ -22,7 +22,7 @@ c = pmo.constraint(v <= 1) -c = pmo.constraint(expr= v <= 1) +c = pmo.constraint(expr=v <= 1) c = pmo.constraint(body=v, ub=1) @@ -32,7 +32,7 @@ c = pmo.constraint(v >= 1) -c = pmo.constraint(expr= v >= 1) +c = pmo.constraint(expr=v >= 1) c = pmo.constraint(body=v, lb=1) @@ -46,7 +46,7 @@ c = pmo.constraint((0, v, 1)) -c = pmo.constraint(expr= (0, v, 1)) +c = pmo.constraint(expr=(0, v, 1)) c = pmo.constraint(lb=0, body=v, ub=1) diff --git a/examples/kernel/containers.py b/examples/kernel/containers.py index f15c41b4e19..9b525e87af6 100644 --- a/examples/kernel/containers.py +++ b/examples/kernel/containers.py @@ -4,14 +4,13 @@ # List containers # -vl = pmo.variable_list( - pmo.variable() for i in range(10)) +vl = pmo.variable_list(pmo.variable() for i in range(10)) cl = pmo.constraint_list() for i in range(10): cl.append(pmo.constraint(vl[-1] == 1)) -cl.insert(0, pmo.constraint(vl[0]**2 >= 1)) +cl.insert(0, pmo.constraint(vl[0] ** 2 >= 1)) del cl[0] @@ -19,18 +18,16 @@ # Dict containers # -vd = pmo.variable_dict( - ((str(i), pmo.variable()) for i in range(10))) +vd = pmo.variable_dict(((str(i), pmo.variable()) for i in range(10))) -cd = pmo.constraint_dict( - (i, pmo.constraint(v == 1)) for i,v in vd.items()) +cd = pmo.constraint_dict((i, pmo.constraint(v == 1)) for i, v in vd.items()) cd = pmo.constraint_dict() for i, v in vd.items(): cd[i] = pmo.constraint(v == 1) cd = pmo.constraint_dict() -cd.update((i, pmo.constraint()) for i,v in vd.items()) +cd.update((i, pmo.constraint()) for i, v in vd.items()) cd[None] = pmo.constraint() diff --git a/examples/kernel/expressions.py b/examples/kernel/expressions.py index be083b82208..1756e5d3fd4 100644 --- a/examples/kernel/expressions.py +++ b/examples/kernel/expressions.py @@ -10,7 +10,7 @@ assert e() == None assert e.expr == None -e = pmo.expression(expr= v**2 + 1) +e = pmo.expression(expr=v**2 + 1) assert e() == 5 assert pmo.value(e) == 5 assert pmo.value(e.expr) == 5 @@ -19,8 +19,8 @@ e.expr = v - 1 assert pmo.value(e) == 1 -esub = pmo.expression(expr= v + 1) -e = pmo.expression(expr= esub + 1) +esub = pmo.expression(expr=v + 1) +e = pmo.expression(expr=esub + 1) assert pmo.value(esub) == 3 assert pmo.value(e) == 4 @@ -47,6 +47,6 @@ assert pmo.value(c.lb) == 0 # the following will result in an error -#e = pmo.expression() -#c = pmo.constraint() -#c.lb = e +# e = pmo.expression() +# c = pmo.constraint() +# c.lb = e diff --git a/examples/kernel/mosek/geometric1.py b/examples/kernel/mosek/geometric1.py index b75f0ed5da3..b5ec59541c4 100644 --- a/examples/kernel/mosek/geometric1.py +++ b/examples/kernel/mosek/geometric1.py @@ -2,84 +2,75 @@ import pyomo.kernel as pmo -def solve_nonlinear(Aw, Af, alpha, beta, gamma, delta): +def solve_nonlinear(Aw, Af, alpha, beta, gamma, delta): m = pmo.block() m.h = pmo.variable(lb=0) m.w = pmo.variable(lb=0) m.d = pmo.variable(lb=0) - m.c = pmo.constraint_tuple([ - pmo.constraint(body=2*(m.h*m.w + m.h*m.d), ub=Aw), - pmo.constraint(body=m.w*m.d, - ub=Af), - pmo.constraint(lb=alpha, - body=m.h/m.w, - ub=beta), - pmo.constraint(lb=gamma, - body=m.d/m.w, - ub=delta)]) - - m.o = pmo.objective(m.h * m.w * m.d, - sense=pmo.maximize) - - m.h.value, m.w.value, m.d.value = (1,1,1) + m.c = pmo.constraint_tuple( + [ + pmo.constraint(body=2 * (m.h * m.w + m.h * m.d), ub=Aw), + pmo.constraint(body=m.w * m.d, ub=Af), + pmo.constraint(lb=alpha, body=m.h / m.w, ub=beta), + pmo.constraint(lb=gamma, body=m.d / m.w, ub=delta), + ] + ) + + m.o = pmo.objective(m.h * m.w * m.d, sense=pmo.maximize) + + m.h.value, m.w.value, m.d.value = (1, 1, 1) ipopt = pmo.SolverFactory("ipopt") result = ipopt.solve(m) assert str(result.solver.termination_condition) == "optimal" print("nonlinear solution:") - print("h: {0:.4f}, w: {1:.4f}, d: {2:.4f}".\ - format(m.h(), m.w(), m.d())) - print("volume: {0: .5f}".\ - format(m.o())) + print("h: {0:.4f}, w: {1:.4f}, d: {2:.4f}".format(m.h(), m.w(), m.d())) + print("volume: {0: .5f}".format(m.o())) print("") -def solve_conic(Aw, Af, alpha, beta, gamma, delta): +def solve_conic(Aw, Af, alpha, beta, gamma, delta): m = pmo.block() m.x = pmo.variable() m.y = pmo.variable() m.z = pmo.variable() - m.k = pmo.block_tuple([ - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=m.x + m.y + pmo.log(2.0/Aw)), - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=m.x + m.z + pmo.log(2.0/Aw))]) - - m.c = pmo.constraint_tuple([ - pmo.constraint(body=m.k[0].r + m.k[1].r, - ub=1), - pmo.constraint(body=m.y + m.z, ub=pmo.log(Af)), - pmo.constraint(lb=pmo.log(alpha), - body=m.x - m.y, - ub=pmo.log(beta)), - pmo.constraint(lb=pmo.log(gamma), - body=m.z - m.y, - ub=pmo.log(delta))]) - - m.o = pmo.objective(m.x + m.y + m.z, - sense=pmo.maximize) + m.k = pmo.block_tuple( + [ + pmo.conic.primal_exponential.as_domain( + r=None, x1=1, x2=m.x + m.y + pmo.log(2.0 / Aw) + ), + pmo.conic.primal_exponential.as_domain( + r=None, x1=1, x2=m.x + m.z + pmo.log(2.0 / Aw) + ), + ] + ) + + m.c = pmo.constraint_tuple( + [ + pmo.constraint(body=m.k[0].r + m.k[1].r, ub=1), + pmo.constraint(body=m.y + m.z, ub=pmo.log(Af)), + pmo.constraint(lb=pmo.log(alpha), body=m.x - m.y, ub=pmo.log(beta)), + pmo.constraint(lb=pmo.log(gamma), body=m.z - m.y, ub=pmo.log(delta)), + ] + ) + + m.o = pmo.objective(m.x + m.y + m.z, sense=pmo.maximize) mosek = pmo.SolverFactory("mosek_direct") result = mosek.solve(m) assert str(result.solver.termination_condition) == "optimal" h, w, d = pmo.exp(m.x()), pmo.exp(m.y()), pmo.exp(m.z()) print("conic solution:") - print("h: {0:.4f}, w: {1:.4f}, d: {2:.4f}".\ - format(h, w, d)) - print("volume: {0: .5f}".\ - format(h*w*d)) + print("h: {0:.4f}, w: {1:.4f}, d: {2:.4f}".format(h, w, d)) + print("volume: {0: .5f}".format(h * w * d)) print("") + if __name__ == "__main__": - Aw, Af, alpha, beta, gamma, delta = \ - 200.0, 50.0, 2.0, 10.0, 2.0, 10.0 + Aw, Af, alpha, beta, gamma, delta = 200.0, 50.0, 2.0, 10.0, 2.0, 10.0 solve_nonlinear(Aw, Af, alpha, beta, gamma, delta) solve_conic(Aw, Af, alpha, beta, gamma, delta) diff --git a/examples/kernel/mosek/geometric2.py b/examples/kernel/mosek/geometric2.py index 8a6d5074ce3..84825c0a39b 100644 --- a/examples/kernel/mosek/geometric2.py +++ b/examples/kernel/mosek/geometric2.py @@ -3,36 +3,34 @@ import pyomo.kernel as pmo -def solve_nonlinear(): +def solve_nonlinear(): m = pmo.block() m.x = pmo.variable() m.y = pmo.variable() m.z = pmo.variable() - m.c = pmo.constraint_tuple([ - pmo.constraint(body=0.1*pmo.sqrt(m.x) + (2.0/m.y), - ub=1), - pmo.constraint(body=(1.0/m.z) + (m.y/(m.x**2)), - ub=1)]) + m.c = pmo.constraint_tuple( + [ + pmo.constraint(body=0.1 * pmo.sqrt(m.x) + (2.0 / m.y), ub=1), + pmo.constraint(body=(1.0 / m.z) + (m.y / (m.x**2)), ub=1), + ] + ) - m.o = pmo.objective(m.x + (m.y**2)*m.z, - sense=pmo.minimize) + m.o = pmo.objective(m.x + (m.y**2) * m.z, sense=pmo.minimize) - m.x.value, m.y.value, m.z.value = (1,1,1) + m.x.value, m.y.value, m.z.value = (1, 1, 1) ipopt = pmo.SolverFactory("ipopt") result = ipopt.solve(m) assert str(result.solver.termination_condition) == "optimal" print("nonlinear solution:") - print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".\ - format(m.x(), m.y(), m.z())) - print("objective: {0: .5f}".\ - format(m.o())) + print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".format(m.x(), m.y(), m.z())) + print("objective: {0: .5f}".format(m.o())) print("") -def solve_conic(): +def solve_conic(): m = pmo.block() m.t = pmo.variable() @@ -40,57 +38,44 @@ def solve_conic(): m.v = pmo.variable() m.w = pmo.variable() - m.k = pmo.block_tuple([ - # exp(u-t) + exp(2v + w - t) <= 1 - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=m.u - m.t), - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=2*m.v + m.w - m.t), - # exp(0.5u + log(0.1)) + exp(-v + log(2)) <= 1 - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=0.5*m.u + pmo.log(0.1)), - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=-m.v + pmo.log(2)), - # exp(-w) + exp(v-2u) <= 1 - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=-m.w), - pmo.conic.primal_exponential.\ - as_domain(r=None, - x1=1, - x2=m.v - 2*m.u)]) - - m.c = pmo.constraint_tuple([ - pmo.constraint(body=m.k[0].r + m.k[1].r, - ub=1), - pmo.constraint(body=m.k[2].r + m.k[3].r, - ub=1), - pmo.constraint(body=m.k[4].r + m.k[5].r, - ub=1)]) - - m.o = pmo.objective(m.t, - sense=pmo.minimize) + m.k = pmo.block_tuple( + [ + # exp(u-t) + exp(2v + w - t) <= 1 + pmo.conic.primal_exponential.as_domain(r=None, x1=1, x2=m.u - m.t), + pmo.conic.primal_exponential.as_domain( + r=None, x1=1, x2=2 * m.v + m.w - m.t + ), + # exp(0.5u + log(0.1)) + exp(-v + log(2)) <= 1 + pmo.conic.primal_exponential.as_domain( + r=None, x1=1, x2=0.5 * m.u + pmo.log(0.1) + ), + pmo.conic.primal_exponential.as_domain(r=None, x1=1, x2=-m.v + pmo.log(2)), + # exp(-w) + exp(v-2u) <= 1 + pmo.conic.primal_exponential.as_domain(r=None, x1=1, x2=-m.w), + pmo.conic.primal_exponential.as_domain(r=None, x1=1, x2=m.v - 2 * m.u), + ] + ) + + m.c = pmo.constraint_tuple( + [ + pmo.constraint(body=m.k[0].r + m.k[1].r, ub=1), + pmo.constraint(body=m.k[2].r + m.k[3].r, ub=1), + pmo.constraint(body=m.k[4].r + m.k[5].r, ub=1), + ] + ) + + m.o = pmo.objective(m.t, sense=pmo.minimize) mosek = pmo.SolverFactory("mosek_direct") result = mosek.solve(m) assert str(result.solver.termination_condition) == "optimal" x, y, z = pmo.exp(m.u()), pmo.exp(m.v()), pmo.exp(m.w()) print("conic solution:") - print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".\ - format(x, y, z)) - print("objective: {0: .5f}".\ - format(x + (y**2)*z)) + print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".format(x, y, z)) + print("objective: {0: .5f}".format(x + (y**2) * z)) print("") + if __name__ == "__main__": solve_nonlinear() solve_conic() diff --git a/examples/kernel/mosek/maximum_volume_cuboid.py b/examples/kernel/mosek/maximum_volume_cuboid.py new file mode 100644 index 00000000000..92e210cf400 --- /dev/null +++ b/examples/kernel/mosek/maximum_volume_cuboid.py @@ -0,0 +1,124 @@ +from scipy.spatial import ConvexHull +from mpl_toolkits.mplot3d import Axes3D +from mpl_toolkits.mplot3d.art3d import Poly3DCollection +import matplotlib.pyplot as plt +import itertools +import numpy as np + +import pyomo.kernel as pmo + +# Vertices of a regular icosahedron with edge length 2 +f = (1 + np.sqrt(5)) / 2 +icosahedron = np.array( + [ + [0, 1, f], + [0, -1, f], + [0, 1, -f], + [0, -1, -f], + [1, f, 0], + [1, -f, 0], + [-1, f, 0], + [-1, -f, 0], + [f, 0, 1], + [-f, 0, 1], + [f, 0, -1], + [-f, 0, -1], + ] +) +print(f"Volume of the icosahedron = {2.18169699*8}") + + +def convex_hull_constraint(model, p_v, c_v, v_index): + A = np.vstack( + ( + np.eye(len(model.p)), # p-variable coefficients + np.diag(c_v), # x-variable coefficients + p_v, + ) + ) # u-variable coefficients + A = np.transpose(A) + # Sum(u_i) = 1 + row = [0] * len(list(model.p) + list(model.x)) + [1] * len(model.u[v_index]) + A = np.vstack([A, row]) + # x + var_vector = list(model.p) + list(model.x) + list(model.u[v_index]) + # b + b = np.array([0] * A.shape[0]) + b[-1] = 1 + + # Matrix constraint ( Ax = b ) + return pmo.matrix_constraint(A, rhs=b, x=var_vector) + + +def pyomo_maxVolCuboid(vertices): + m, n = len(vertices), len(vertices[0]) + + model = pmo.block() + model.cuboid_vertices = list(itertools.product([0, 1], repeat=n)) + + # Variables + model.x = pmo.variable_list(pmo.variable(lb=0.0) for i in range(n)) + model.p = pmo.variable_list(pmo.variable() for i in range(n)) + model.t = pmo.variable() + + model.u = pmo.variable_list( + pmo.variable_list(pmo.variable(lb=0.0) for j in range(m)) for i in range(2**n) + ) + + # Maximize: (volume_of_cuboid)**1/n + model.cuboid_volume = pmo.objective(model.t, sense=-1) + # Cone: Geometric-mean conic constraint + model.geo_cone = pmo.conic.primal_geomean(r=model.x, x=model.t) + + # K : Convex hull formed by the vertices of the polyhedron + model.conv_hull = pmo.constraint_list() + for i in range(2**n): + model.conv_hull.append( + convex_hull_constraint(model, vertices, model.cuboid_vertices[i], i) + ) + + opt = pmo.SolverFactory("mosek") + result = opt.solve(model, tee=True) + + _x = np.array([x.value for x in model.x]) + _p = np.array([p.value for p in model.p]) + cuboid_vertices = np.array([_p + e * _x for e in model.cuboid_vertices]) + return cuboid_vertices + + +cuboid = pyomo_maxVolCuboid(icosahedron) + + +# Make an interactive 3-D plot + + +def inscribed_cuboid_plot(icosahedron, cuboid): + fig = plt.figure(figsize=(5, 5)) + ax = fig.add_subplot(111, projection="3d") + + ico_hull = ConvexHull(icosahedron) + for s in ico_hull.simplices: + tri = Poly3DCollection([icosahedron[s]]) + tri.set_edgecolor('black') + tri.set_alpha(0.3) + tri.set_facecolor('red') + ax.add_collection3d(tri) + ax.scatter( + icosahedron[:, 0], icosahedron[:, 1], icosahedron[:, 2], color='darkred' + ) + + cub_hull = ConvexHull(cuboid) + for s in cub_hull.simplices: + tri = Poly3DCollection([cuboid[s]]) + # tri.set_edgecolor('black') + tri.set_alpha(0.8) + tri.set_facecolor('blue') + ax.add_collection3d(tri) + + ax.set_xlim(-2, 2) + ax.set_ylim(-2, 2) + ax.set_zlim(-2, 2) + plt.show() + + +inscribed_cuboid_plot(icosahedron, cuboid) diff --git a/examples/kernel/mosek/power1.py b/examples/kernel/mosek/power1.py index acbf2633f54..7274b587dae 100644 --- a/examples/kernel/mosek/power1.py +++ b/examples/kernel/mosek/power1.py @@ -2,33 +2,31 @@ import pyomo.kernel as pmo -def solve_nonlinear(): +def solve_nonlinear(): m = pmo.block() m.x = pmo.variable(lb=0) m.y = pmo.variable(lb=0) m.z = pmo.variable(lb=0) - m.c = pmo.constraint(body=m.x + m.y + 0.5*m.z, - rhs=2) + m.c = pmo.constraint(body=m.x + m.y + 0.5 * m.z, rhs=2) - m.o = pmo.objective((m.x**0.2)*(m.y**0.8) + (m.z**0.4) - m.x, - sense=pmo.maximize) + m.o = pmo.objective( + (m.x**0.2) * (m.y**0.8) + (m.z**0.4) - m.x, sense=pmo.maximize + ) - m.x.value, m.y.value, m.z.value = (1,1,1) + m.x.value, m.y.value, m.z.value = (1, 1, 1) ipopt = pmo.SolverFactory("ipopt") result = ipopt.solve(m) assert str(result.solver.termination_condition) == "optimal" print("nonlinear solution:") - print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".\ - format(m.x(), m.y(), m.z())) - print("objective: {0: .5f}".\ - format(m.o())) + print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".format(m.x(), m.y(), m.z())) + print("objective: {0: .5f}".format(m.o())) print("") -def solve_conic(): +def solve_conic(): m = pmo.block() m.x = pmo.variable(lb=0) @@ -39,32 +37,26 @@ def solve_conic(): m.q = pmo.variable() m.r = pmo.variable(lb=0) - m.k = pmo.block_tuple([ - pmo.conic.primal_power.as_domain(r1=m.x, - r2=m.y, - x=[None], - alpha=0.2), - pmo.conic.primal_power.as_domain(r1=m.z, - r2=1, - x=[None], - alpha=0.4)]) + m.k = pmo.block_tuple( + [ + pmo.conic.primal_power.as_domain(r1=m.x, r2=m.y, x=[None], alpha=0.2), + pmo.conic.primal_power.as_domain(r1=m.z, r2=1, x=[None], alpha=0.4), + ] + ) - m.c = pmo.constraint(body=m.x + m.y + 0.5*m.z, - rhs=2) + m.c = pmo.constraint(body=m.x + m.y + 0.5 * m.z, rhs=2) - m.o = pmo.objective(m.k[0].x[0] + m.k[1].x[0] - m.x, - sense=pmo.maximize) + m.o = pmo.objective(m.k[0].x[0] + m.k[1].x[0] - m.x, sense=pmo.maximize) mosek = pmo.SolverFactory("mosek_direct") result = mosek.solve(m) assert str(result.solver.termination_condition) == "optimal" print("conic solution:") - print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".\ - format(m.x(), m.y(), m.z())) - print("objective: {0: .5f}".\ - format(m.o())) + print("x: {0:.4f}, y: {1:.4f}, z: {2:.4f}".format(m.x(), m.y(), m.z())) + print("objective: {0: .5f}".format(m.o())) print("") + if __name__ == "__main__": solve_nonlinear() solve_conic() diff --git a/examples/kernel/mosek/semidefinite.py b/examples/kernel/mosek/semidefinite.py new file mode 100644 index 00000000000..44ab7c95a68 --- /dev/null +++ b/examples/kernel/mosek/semidefinite.py @@ -0,0 +1,118 @@ +# Source: https://docs.mosek.com/latest/pythonfusion/tutorial-sdo-shared.html#doc-tutorial-sdo + +# This examples illustrates SDP formulations in Pyomo using +# the MOSEK interface. The following functions construct the +# same problem, but in the primal and dual forms respectively. +# +# Read more about SDP duality in the MOSEK modeling cookbook. + +import numpy as np +import pyomo.kernel as pmo + +from pyomo.core import sum_product + +# GENERAL SDP (PRIMAL FORM) +# min + c*x +# s.t. + a_i*x = b_i i = 1,...,m +# x \in K ; barX \in PSD_CONE + + +def primal_sdo1(): + # Problem data + d = 3 + n = int(d * (d + 1) / 2) + + # PSD matrices + # NOTE: As the matrices are symmetric (required) + # we only specify lower-triangular part + # and the off-diagonal elements are doubled. + barC = [2, 2, 0, 2, 2, 2] + barA1 = [1, 0, 0, 1, 0, 1] + barA2 = [1, 2, 2, 1, 2, 1] + + model = pmo.block() + + # VARIABLES + model.x = pmo.variable_list(pmo.variable() for i in range(d)) + model.X = pmo.variable_list(pmo.variable() for i in range(n)) + + # CONSTRAINTS + # Linear + model.c1 = pmo.constraint( + sum_product(barA1, model.X, index=list(range(n))) + model.x[0] == 1 + ) + model.c2 = pmo.constraint( + sum_product(barA2, model.X, index=list(range(n))) + model.x[1] + model.x[2] + == 0.5 + ) + # Conic + model.quad_cone = pmo.conic.quadratic(r=model.x[0], x=model.x[1:]) + # Off-diagonal elements need to be scaled by sqrt(2) in SVEC_PSD domain + scale = [1, np.sqrt(2), np.sqrt(2), 1, np.sqrt(2), 1] + model.psd_cone = pmo.conic.svec_psdcone.as_domain( + x=[model.X[i] * scale[i] for i in range(n)] + ) + + # OBJECTIVE + model.obj = pmo.objective( + sum_product(barC, model.X, index=list(range(n))) + model.x[0] + ) + + msk = pmo.SolverFactory('mosek') + results = msk.solve(model, tee=True) + + return results + + +# GENERAL SDP (DUAL FORM) +# +# max. b*y +# s.t. barC - sum(y_i, barA_i) \in PSD_CONE +# c - A*y \in K +# +# NOTE: the PSD constraint here is in the LMI (linear-matrix-inequality) form + + +def dual_sdo1(): + # Problem data + d = 3 + n = int(d * (d + 1) / 2) + + c = [1, 0, 0] + a_T = [[1, 0], [0, 1], [0, 1]] + + # PSD matrices + barC = [2, np.sqrt(2), 0, 2, np.sqrt(2), 2] + barA1 = [1, 0, 0, 1, 0, 1] + barA2 = [1, np.sqrt(2), np.sqrt(2), 1, np.sqrt(2), 1] + + model = pmo.block() + + # VARIABLES + model.y = pmo.variable_list(pmo.variable() for i in range(2)) + + # CONSTRAINTS + e1 = pmo.expression_list( + pmo.expression(barC[i] - model.y[0] * barA1[i] - model.y[1] * barA2[i]) + for i in range(n) + ) + model.psd_cone = pmo.conic.svec_psdcone.as_domain(x=e1) + + e2 = pmo.expression_list( + pmo.expression(c[i] - sum_product(a_T[i], model.y, index=[0, 1])) + for i in range(3) + ) + model.quad_cone = pmo.conic.quadratic.as_domain(r=e2[0], x=e2[1:]) + + # OBJECTIVE + model.obj = pmo.objective(model.y[0] + 0.5 * model.y[1], sense=-1) + + msk = pmo.SolverFactory('mosek') + results = msk.solve(model, tee=True) + + return results + + +if __name__ == '__main__': + primal_sdo1() + dual_sdo1() diff --git a/examples/kernel/objectives.py b/examples/kernel/objectives.py index 4eb97a2c317..7d87671ef8d 100644 --- a/examples/kernel/objectives.py +++ b/examples/kernel/objectives.py @@ -10,7 +10,7 @@ assert o() == None assert o.expr == None -o = pmo.objective(expr= v**2 + 1) +o = pmo.objective(expr=v**2 + 1) assert o() == 5 assert pmo.value(o) == 5 assert pmo.value(o.expr) == 5 @@ -19,8 +19,8 @@ o.expr = v - 1 assert pmo.value(o) == 1 -osub = pmo.objective(expr= v + 1) -o = pmo.objective(expr= osub + 1) +osub = pmo.objective(expr=v + 1) +o = pmo.objective(expr=osub + 1) assert pmo.value(osub) == 3 assert pmo.value(o) == 4 diff --git a/examples/kernel/parameters.py b/examples/kernel/parameters.py index 09f98c638b5..55b230add6b 100644 --- a/examples/kernel/parameters.py +++ b/examples/kernel/parameters.py @@ -16,7 +16,7 @@ assert pmo.value(p - 1) == 3 v = pmo.variable() -c = pmo.constraint((p-1, v, p+1)) +c = pmo.constraint((p - 1, v, p + 1)) assert pmo.value(c.lb) == 3 assert pmo.value(c.ub) == 5 diff --git a/examples/kernel/piecewise_functions.py b/examples/kernel/piecewise_functions.py index 2cfe80019f5..528d4c16791 100644 --- a/examples/kernel/piecewise_functions.py +++ b/examples/kernel/piecewise_functions.py @@ -4,17 +4,12 @@ # Piecewise linear constraints # -breakpoints = [1,2,3,4] -values = [1,2,1,2] +breakpoints = [1, 2, 3, 4] +values = [1, 2, 1, 2] x = pmo.variable(lb=1, ub=4) y = pmo.variable() -p = pmo.piecewise(breakpoints, - values, - input=x, - output=y, - repn='sos2', - bound='eq') +p = pmo.piecewise(breakpoints, values, input=x, output=y, repn='sos2', bound='eq') # change the input and output variables z = pmo.variable(lb=1, ub=4) @@ -35,21 +30,16 @@ assert p(2.5) == 1.5 assert p(4) == 2 -breakpoints = [pmo.parameter(1), - pmo.parameter(2), - pmo.parameter(3), - pmo.parameter(None)] -values = [pmo.parameter(1), - pmo.parameter(2), - pmo.parameter(1), - pmo.parameter(None)] -p = pmo.piecewise(breakpoints, - values, - input=x, - output=y, - repn='sos2', - bound='eq', - validate=False) +breakpoints = [ + pmo.parameter(1), + pmo.parameter(2), + pmo.parameter(3), + pmo.parameter(None), +] +values = [pmo.parameter(1), pmo.parameter(2), pmo.parameter(1), pmo.parameter(None)] +p = pmo.piecewise( + breakpoints, values, input=x, output=y, repn='sos2', bound='eq', validate=False +) # change the function parameters and # validate that the inputs are correct @@ -79,8 +69,4 @@ m.o = pmo.objective(m.y) -m.pw = pmo.piecewise(breakpoints, - function_points, - input=m.x, - output=m.y, - repn='inc') +m.pw = pmo.piecewise(breakpoints, function_points, input=m.x, output=m.y, repn='inc') diff --git a/examples/kernel/piecewise_nd_functions.py b/examples/kernel/piecewise_nd_functions.py index 639a065b53b..847bb5f4a84 100644 --- a/examples/kernel/piecewise_nd_functions.py +++ b/examples/kernel/piecewise_nd_functions.py @@ -12,16 +12,19 @@ # Set to True to show 3d plots show_plots = False + def f(x, y, package=pmo): - return (-20 * package.exp( - -2.0 * package.sqrt(0.5 * (x**2 + y**2))) - - package.exp( - 0.5 * (package.cos(2*np.pi*x) + \ - package.cos(2*np.pi*y))) + \ - np.e + 20.0) + return ( + -20 * package.exp(-2.0 * package.sqrt(0.5 * (x**2 + y**2))) + - package.exp(0.5 * (package.cos(2 * np.pi * x) + package.cos(2 * np.pi * y))) + + np.e + + 20.0 + ) + def g(x, y, package=pmo): - return (x-3)**2 + (y-1)**2 + return (x - 3) ** 2 + (y - 1) ** 2 + m = pmo.block() m.x = pmo.variable(lb=-5, ub=5) @@ -43,19 +46,11 @@ def g(x, y, package=pmo): pw_xarray, pw_yarray = np.transpose(tri.points) fvals = f(pw_xarray, pw_yarray, package=np) -pw_f = pmo.piecewise_nd(tri, - fvals, - input=[m.x,m.y], - output=m.z, - bound='lb') +pw_f = pmo.piecewise_nd(tri, fvals, input=[m.x, m.y], output=m.z, bound='lb') m.approx.pw_f = pw_f gvals = g(pw_xarray, pw_yarray, package=np) -pw_g = pmo.piecewise_nd(tri, - gvals, - input=[m.x,m.y], - output=m.z, - bound='eq') +pw_g = pmo.piecewise_nd(tri, gvals, input=[m.x, m.y], output=m.z, bound='eq') m.approx.pw_g = pw_g # @@ -70,14 +65,10 @@ def g(x, y, package=pmo): assert str(status.solver.status) == "ok" assert str(status.solver.termination_condition) == "optimal" -print("Approximate f value at MIP solution: %s" - % (pw_f((m.x.value, m.y.value)))) -print("Approximate g value at MIP solution: %s" - % (pw_g((m.x.value, m.y.value)))) -print("Real f value at MIP solution: %s" - % (f(m.x.value, m.y.value))) -print("Real g value at MIP solution: %s" - % (g(m.x.value, m.y.value))) +print("Approximate f value at MIP solution: %s" % (pw_f((m.x.value, m.y.value)))) +print("Approximate g value at MIP solution: %s" % (pw_g((m.x.value, m.y.value)))) +print("Real f value at MIP solution: %s" % (f(m.x.value, m.y.value))) +print("Real g value at MIP solution: %s" % (g(m.x.value, m.y.value))) # # Solve the real nonlinear model using a local solver @@ -89,13 +80,10 @@ def g(x, y, package=pmo): status = ipopt.solve(m) assert str(status.solver.status) == "ok" assert str(status.solver.termination_condition) == "optimal" -print("Real f value at NL solution: %s" - % (f(m.x.value, m.y.value))) -print("Real g value at NL solution: %s" - % (f(m.x.value, m.y.value))) +print("Real f value at NL solution: %s" % (f(m.x.value, m.y.value))) +print("Real g value at NL solution: %s" % (f(m.x.value, m.y.value))) if show_plots: - import matplotlib.pylab as plt import mpl_toolkits.mplot3d @@ -105,8 +93,7 @@ def g(x, y, package=pmo): fig = plt.figure() ax = fig.gca(projection='3d') - ax.plot_trisurf(pw_xarray, pw_yarray, fvals, - color='yellow', alpha=0.5) + ax.plot_trisurf(pw_xarray, pw_yarray, fvals, color='yellow', alpha=0.5) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') @@ -118,8 +105,7 @@ def g(x, y, package=pmo): fig = plt.figure() ax = fig.gca(projection='3d') - ax.plot_trisurf(pw_xarray, pw_yarray, gvals, - color='blue', alpha=0.5) + ax.plot_trisurf(pw_xarray, pw_yarray, gvals, color='blue', alpha=0.5) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') @@ -137,18 +123,13 @@ def g(x, y, package=pmo): fig = plt.figure() ax = fig.gca(projection='3d') - ax.scatter(m.x.value, m.y.value, f(m.x.value, m.y.value), - color='black', s=2**6) - ax.plot_surface(xarray, yarray, fvals, - linewidth=0, cmap=plt.cm.jet, - alpha=0.6) + ax.scatter(m.x.value, m.y.value, f(m.x.value, m.y.value), color='black', s=2**6) + ax.plot_surface(xarray, yarray, fvals, linewidth=0, cmap=plt.cm.jet, alpha=0.6) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') - ax.plot_surface(xarray, yarray, gvals, - linewidth=0, cmap=plt.cm.jet, - alpha=0.6) + ax.plot_surface(xarray, yarray, gvals, linewidth=0, cmap=plt.cm.jet, alpha=0.6) ax.set_xlabel('x') ax.set_ylabel('y') ax.set_zlabel('z') diff --git a/examples/kernel/special_ordered_sets.py b/examples/kernel/special_ordered_sets.py index 5bf5ed5ac15..9526a551c12 100644 --- a/examples/kernel/special_ordered_sets.py +++ b/examples/kernel/special_ordered_sets.py @@ -8,23 +8,23 @@ # Special Ordered Sets (Type 1) # -s = pmo.sos([v1,v2]) +s = pmo.sos([v1, v2]) assert s.level == 1 -assert s.weights == (1,2) +assert s.weights == (1, 2) assert len(s.variables) == 2 assert v1 in s assert v2 in s -s = pmo.sos([v1,v2], level=1) +s = pmo.sos([v1, v2], level=1) assert s.level == 1 -assert s.weights == (1,2) +assert s.weights == (1, 2) assert len(s.variables) == 2 assert v1 in s assert v2 in s -s = pmo.sos1([v1,v2]) +s = pmo.sos1([v1, v2]) assert s.level == 1 -assert s.weights == (1,2) +assert s.weights == (1, 2) assert len(s.variables) == 2 assert v1 in s assert v2 in s @@ -33,16 +33,16 @@ # Special Ordered Sets (Type 2) # -s = pmo.sos([v1,v2], level=2) +s = pmo.sos([v1, v2], level=2) assert s.level == 2 -assert s.weights == (1,2) +assert s.weights == (1, 2) assert len(s.variables) == 2 assert v1 in s assert v2 in s -s = pmo.sos2([v1,v2]) +s = pmo.sos2([v1, v2]) assert s.level == 2 -assert s.weights == (1,2) +assert s.weights == (1, 2) assert len(s.variables) == 2 assert v1 in s assert v2 in s @@ -51,9 +51,9 @@ # Special Ordered Sets (Type n) # -s = pmo.sos([v1,v2,v3], level=3) +s = pmo.sos([v1, v2, v3], level=3) assert s.level == 3 -assert s.weights == (1,2,3) +assert s.weights == (1, 2, 3) assert len(s.variables) == 3 assert v1 in s assert v2 in s @@ -64,22 +64,20 @@ # # using known values -s = pmo.sos([v1,v2], weights=[1.2,2.5]) -assert s.weights == (1.2,2.5) +s = pmo.sos([v1, v2], weights=[1.2, 2.5]) +assert s.weights == (1.2, 2.5) -# using paramters -p = pmo.parameter_list( - pmo.parameter() for i in range(2)) -s = pmo.sos([v1,v2], weights=[p[0]**2, p[1]**2]) +# using parameters +p = pmo.parameter_list(pmo.parameter() for i in range(2)) +s = pmo.sos([v1, v2], weights=[p[0] ** 2, p[1] ** 2]) assert len(s.weights) == 2 p[0].value = 1 p[1].value = 2 assert tuple(pmo.value(w) for w in s.weights) == (1, 4) # using data expressions -d = pmo.expression_list( - pmo.data_expression() for i in range(2)) -s = pmo.sos([v1,v2], weights=d) +d = pmo.expression_list(pmo.data_expression() for i in range(2)) +s = pmo.sos([v1, v2], weights=d) assert len(s.weights) == 2 d[0].expr = p[0] + 1 d[1].expr = p[0] + p[1] @@ -93,15 +91,11 @@ m = pmo.block() -m.z = pmo.variable_list( - pmo.variable(lb=0) - for i in range(len(domain))) +m.z = pmo.variable_list(pmo.variable(lb=0) for i in range(len(domain))) m.y = pmo.variable() m.o = pmo.objective(m.y, sense=pmo.maximize) -m.c1 = pmo.constraint( - m.y == sum(v*z for v,z in zip(m.z, domain))) -m.c2 = pmo.constraint( - sum(m.z) == 1) +m.c1 = pmo.constraint(m.y == sum(v * z for v, z in zip(m.z, domain))) +m.c2 = pmo.constraint(sum(m.z) == 1) m.s = pmo.sos1(m.z) diff --git a/examples/kernel/suffixes.py b/examples/kernel/suffixes.py index 480d2d11863..39caa5b8652 100644 --- a/examples/kernel/suffixes.py +++ b/examples/kernel/suffixes.py @@ -7,8 +7,8 @@ # collect dual information when the model is solved b = pmo.block() b.x = pmo.variable() -b.c = pmo.constraint(expr= b.x >= 1) -b.o = pmo.objective(expr= b.x) +b.c = pmo.constraint(expr=b.x >= 1) +b.o = pmo.objective(expr=b.x) b.dual = pmo.suffix(direction=pmo.suffix.IMPORT) # suffixes behave as dictionaries that map diff --git a/examples/kernel/variables.py b/examples/kernel/variables.py index f67ffd99482..7ab571245a1 100644 --- a/examples/kernel/variables.py +++ b/examples/kernel/variables.py @@ -8,14 +8,12 @@ v = pmo.variable(domain=pmo.Reals) -v = pmo.variable(domain=pmo.NonNegativeReals, - ub=10) +v = pmo.variable(domain=pmo.NonNegativeReals, ub=10) -v = pmo.variable(domain_type=pmo.RealSet, - lb=1) +v = pmo.variable(domain_type=pmo.RealSet, lb=1) # error (because domain lower bound is finite) -#v = pmo.variable(domain=pmo.NonNegativeReals, +# v = pmo.variable(domain=pmo.NonNegativeReals, # lb=1) # @@ -26,14 +24,12 @@ v = pmo.variable(domain=pmo.Integers) -v = pmo.variable(domain=pmo.NonNegativeIntegers, - ub=10) +v = pmo.variable(domain=pmo.NonNegativeIntegers, ub=10) -v = pmo.variable(domain_type=pmo.IntegerSet, - lb=1) +v = pmo.variable(domain_type=pmo.IntegerSet, lb=1) # error (because domain upper bound is finite) -#v = pmo.variable(domain=pmo.NegativeIntegers, +# v = pmo.variable(domain=pmo.NegativeIntegers, # ub=10) # diff --git a/examples/mpec/bard1.py b/examples/mpec/bard1.py index 7ff231fe77d..dbe666a7004 100644 --- a/examples/mpec/bard1.py +++ b/examples/mpec/bard1.py @@ -17,13 +17,21 @@ model.y = Var(within=NonNegativeReals) # ... multipliers -model.l = Var([1,2,3]) - -model.f = Objective(expr=(model.x - 5)**2 + (2*model.y + 1)**2) - -model.KKT = Constraint(expr=2*(model.y-1) - 1.5*model.x + model.l[1] - model.l[2]*0.5 + model.l[3] == 0) - -model.lin_1 = Complementarity(expr=complements(0 <= 3*model.x - model.y - 3, model.l[1] >= 0)) -model.lin_2 = Complementarity(expr=complements(0 <= - model.x + 0.5*model.y + 4, model.l[2] >= 0)) -model.lin_3 = Complementarity(expr=complements(0 <= - model.x - model.y + 7, model.l[3] >= 0)) - +model.l = Var([1, 2, 3]) + +model.f = Objective(expr=(model.x - 5) ** 2 + (2 * model.y + 1) ** 2) + +model.KKT = Constraint( + expr=2 * (model.y - 1) - 1.5 * model.x + model.l[1] - model.l[2] * 0.5 + model.l[3] + == 0 +) + +model.lin_1 = Complementarity( + expr=complements(0 <= 3 * model.x - model.y - 3, model.l[1] >= 0) +) +model.lin_2 = Complementarity( + expr=complements(0 <= -model.x + 0.5 * model.y + 4, model.l[2] >= 0) +) +model.lin_3 = Complementarity( + expr=complements(0 <= -model.x - model.y + 7, model.l[3] >= 0) +) diff --git a/examples/mpec/df.py b/examples/mpec/df.py index dd6f8f4e1c0..41984992bdd 100644 --- a/examples/mpec/df.py +++ b/examples/mpec/df.py @@ -24,12 +24,12 @@ from pyomo.mpec import * M = ConcreteModel() -M.x = Var(bounds=(-1,2)) +M.x = Var(bounds=(-1, 2)) M.y = Var() -M.o = Objective(expr=(M.x - 1 - M.y)**2) +M.o = Objective(expr=(M.x - 1 - M.y) ** 2) M.c1 = Constraint(expr=M.x**2 <= 2) -M.c2 = Constraint(expr=(M.x - 1)**2 + (M.y - 1)**2 <= 3) +M.c2 = Constraint(expr=(M.x - 1) ** 2 + (M.y - 1) ** 2 <= 3) M.c3 = Complementarity(expr=complements(M.y - M.x**2 + 1 >= 0, M.y >= 0)) model = M diff --git a/examples/mpec/indexed.py b/examples/mpec/indexed.py index a96b93bda88..b69d5093477 100644 --- a/examples/mpec/indexed.py +++ b/examples/mpec/indexed.py @@ -17,12 +17,13 @@ model = ConcreteModel() -model.x = Var(RangeSet(1,n)) +model.x = Var(RangeSet(1, n)) + +model.f = Objective(expr=sum(i * (model.x[i] - 1) ** 2 for i in range(1, n + 1))) -model.f = Objective(expr=sum(i*(model.x[i]-1)**2 - for i in range(1,n+1))) def compl_(model, i): - return complements(model.x[i] >= 0, model.x[i+1] >= 0) -model.compl = Complementarity(RangeSet(1,n-1), rule=compl_) + return complements(model.x[i] >= 0, model.x[i + 1] >= 0) + +model.compl = Complementarity(RangeSet(1, n - 1), rule=compl_) diff --git a/examples/mpec/linear1.py b/examples/mpec/linear1.py index fd9c37c007d..eba04759ae3 100644 --- a/examples/mpec/linear1.py +++ b/examples/mpec/linear1.py @@ -19,10 +19,9 @@ a = 100 model = ConcreteModel() -model.x1 = Var(bounds=(-2,2)) -model.x2 = Var(bounds=(-1,1)) +model.x1 = Var(bounds=(-2, 2)) +model.x2 = Var(bounds=(-1, 1)) -model.f = Objective(expr=- model.x1 - 2*model.x2) +model.f = Objective(expr=-model.x1 - 2 * model.x2) model.c = Complementarity(expr=complements(model.x1 >= 0, model.x2 >= 0)) - diff --git a/examples/mpec/munson1.py b/examples/mpec/munson1.py index 7bb7dfc478a..debdf709db9 100644 --- a/examples/mpec/munson1.py +++ b/examples/mpec/munson1.py @@ -25,15 +25,10 @@ model.x2 = Var() model.x3 = Var() -model.f1 = Complementarity(expr=\ - complements(model.x1 >= 0, \ - model.x1 + 2*model.x2 + 3*model.x3 >= 1)) +model.f1 = Complementarity( + expr=complements(model.x1 >= 0, model.x1 + 2 * model.x2 + 3 * model.x3 >= 1) +) -model.f2 = Complementarity(expr=\ - complements(model.x2 >= 0, \ - model.x2 - model.x3 >= -1)) - -model.f3 = Complementarity(expr=\ - complements(model.x3 >= 0, \ - model.x1 + model.x2 >= -1)) +model.f2 = Complementarity(expr=complements(model.x2 >= 0, model.x2 - model.x3 >= -1)) +model.f3 = Complementarity(expr=complements(model.x3 >= 0, model.x1 + model.x2 >= -1)) diff --git a/examples/mpec/munson1.yml b/examples/mpec/munson1.yml index 91de39f18e4..02fb0e09884 100644 --- a/examples/mpec/munson1.yml +++ b/examples/mpec/munson1.yml @@ -15,8 +15,8 @@ model: # index of components when transforming the # model. Anything less than 1 disables # index sorting. Anything greater than 1 - # additionaly sorts by component name to - # override declartion order. + # additionally sorts by component name to + # override declaration order. # optimization model is setup). runtime: logging: verbose # Logging level: quiet, warning, info, diff --git a/examples/mpec/munson1a.py b/examples/mpec/munson1a.py index 61a145f13b1..519db4e6ec2 100644 --- a/examples/mpec/munson1a.py +++ b/examples/mpec/munson1a.py @@ -27,15 +27,10 @@ model.x2 = Var() model.x3 = Var() -model.f1 = Complementarity(expr=\ - complements(model.x1 >= 0, \ - model.x1 + 2*model.x2 + 3*model.x3 >= 1)) +model.f1 = Complementarity( + expr=complements(model.x1 >= 0, model.x1 + 2 * model.x2 + 3 * model.x3 >= 1) +) -model.f2 = Complementarity(expr=\ - complements(model.x2 >= 0, \ - model.x2 - model.x3 >= -1)) - -model.f3 = Complementarity(expr=\ - complements(model.x3 >= 0, \ - model.x1 + model.x2 >= -1)) +model.f2 = Complementarity(expr=complements(model.x2 >= 0, model.x2 - model.x3 >= -1)) +model.f3 = Complementarity(expr=complements(model.x3 >= 0, model.x1 + model.x2 >= -1)) diff --git a/examples/mpec/munson1b.py b/examples/mpec/munson1b.py index 59848a5d955..ff2b7b51294 100644 --- a/examples/mpec/munson1b.py +++ b/examples/mpec/munson1b.py @@ -27,15 +27,10 @@ model.x2 = Var() model.x3 = Var() -model.f1 = Complementarity(expr=\ - complements(model.x1 <= 0, \ - - model.x1 - 2*model.x2 - 3*model.x3 >= 1)) +model.f1 = Complementarity( + expr=complements(model.x1 <= 0, -model.x1 - 2 * model.x2 - 3 * model.x3 >= 1) +) -model.f2 = Complementarity(expr=\ - complements(model.x2 <= 0, \ - - model.x2 + model.x3 >= -1)) - -model.f3 = Complementarity(expr=\ - complements(model.x3 <= 0, \ - - model.x1 - model.x2 >= -1)) +model.f2 = Complementarity(expr=complements(model.x2 <= 0, -model.x2 + model.x3 >= -1)) +model.f3 = Complementarity(expr=complements(model.x3 <= 0, -model.x1 - model.x2 >= -1)) diff --git a/examples/mpec/munson1c.py b/examples/mpec/munson1c.py index 1e53d1c15d9..2592b25c515 100644 --- a/examples/mpec/munson1c.py +++ b/examples/mpec/munson1c.py @@ -27,15 +27,10 @@ model.x2 = Var() model.x3 = Var() -model.f1 = Complementarity(expr=\ - complements(model.x1 >= 0, \ - - model.x1 - 2*model.x2 - 3*model.x3 <= -1)) +model.f1 = Complementarity( + expr=complements(model.x1 >= 0, -model.x1 - 2 * model.x2 - 3 * model.x3 <= -1) +) -model.f2 = Complementarity(expr=\ - complements(model.x2 >= 0, \ - - model.x2 + model.x3 <= 1)) - -model.f3 = Complementarity(expr=\ - complements(model.x3 >= 0, \ - - model.x1 - model.x2 <= 1)) +model.f2 = Complementarity(expr=complements(model.x2 >= 0, -model.x2 + model.x3 <= 1)) +model.f3 = Complementarity(expr=complements(model.x3 >= 0, -model.x1 - model.x2 <= 1)) diff --git a/examples/mpec/munson1d.py b/examples/mpec/munson1d.py index d9e304f71d3..0fb08ce73fb 100644 --- a/examples/mpec/munson1d.py +++ b/examples/mpec/munson1d.py @@ -27,15 +27,10 @@ model.x2 = Var() model.x3 = Var() -model.f1 = Complementarity(expr=\ - complements(model.x1 <= 0, \ - model.x1 + 2*model.x2 + 3*model.x3 <= -1)) +model.f1 = Complementarity( + expr=complements(model.x1 <= 0, model.x1 + 2 * model.x2 + 3 * model.x3 <= -1) +) -model.f2 = Complementarity(expr=\ - complements(model.x2 <= 0, \ - model.x2 - model.x3 <= 1)) - -model.f3 = Complementarity(expr=\ - complements(model.x3 <= 0, \ - model.x1 + model.x2 <= 1)) +model.f2 = Complementarity(expr=complements(model.x2 <= 0, model.x2 - model.x3 <= 1)) +model.f3 = Complementarity(expr=complements(model.x3 <= 0, model.x1 + model.x2 <= 1)) diff --git a/examples/mpec/scholtes4.py b/examples/mpec/scholtes4.py index 330b1f129b4..904729780cf 100644 --- a/examples/mpec/scholtes4.py +++ b/examples/mpec/scholtes4.py @@ -13,8 +13,8 @@ model = ConcreteModel() -z_init = {1:0, 2:1} -model.z = Var([1,2], within=NonNegativeReals, initialize=z_init) +z_init = {1: 0, 2: 1} +model.z = Var([1, 2], within=NonNegativeReals, initialize=z_init) model.z3 = Var(initialize=0) model.objf = Objective(expr=model.z[1] + model.z[2] - model.z3) @@ -24,4 +24,3 @@ model.lin2 = Constraint(expr=-4 * model.z[2] + model.z3 <= 0) model.compl = Complementarity(expr=complements(0 <= model.z[1], model.z[2] >= 0)) - diff --git a/examples/performance/dae/run_stochpdegas1_automatic.py b/examples/performance/dae/run_stochpdegas1_automatic.py index 577b35b4446..993e22c7c86 100644 --- a/examples/performance/dae/run_stochpdegas1_automatic.py +++ b/examples/performance/dae/run_stochpdegas1_automatic.py @@ -9,73 +9,134 @@ # discretize model discretizer = TransformationFactory('dae.finite_difference') -discretizer.apply_to(instance,nfe=1,wrt=instance.DIS,scheme='FORWARD') -discretizer.apply_to(instance,nfe=47,wrt=instance.TIME,scheme='BACKWARD') +discretizer.apply_to(instance, nfe=1, wrt=instance.DIS, scheme='FORWARD') +discretizer.apply_to(instance, nfe=47, wrt=instance.TIME, scheme='BACKWARD') # What it should be to match description in paper -#discretizer.apply_to(instance,nfe=48,wrt=instance.TIME,scheme='BACKWARD') +# discretizer.apply_to(instance,nfe=48,wrt=instance.TIME,scheme='BACKWARD') -TimeStep = instance.TIME[2]-instance.TIME[1] +TimeStep = instance.TIME[2] - instance.TIME[1] -def supcost_rule(m,k): - return sum(m.cs*m.s[k,j,t]*(TimeStep) for j in m.SUP for t in m.TIME.get_finite_elements()) -instance.supcost = Expression(instance.SCEN,rule=supcost_rule) -def boostcost_rule(m,k): - return sum(m.ce*m.pow[k,j,t]*(TimeStep) for j in m.LINK_A for t in m.TIME.get_finite_elements()) -instance.boostcost = Expression(instance.SCEN,rule=boostcost_rule) +def supcost_rule(m, k): + return sum( + m.cs * m.s[k, j, t] * (TimeStep) + for j in m.SUP + for t in m.TIME.get_finite_elements() + ) -def trackcost_rule(m,k): - return sum(m.cd*(m.dem[k,j,t]-m.stochd[k,j,t])**2.0 for j in m.DEM for t in m.TIME.get_finite_elements()) -instance.trackcost = Expression(instance.SCEN,rule=trackcost_rule) -def sspcost_rule(m,k): - return sum(m.cT*(m.px[k,i,m.TIME.last(),j]-m.px[k,i,m.TIME.first(),j])**2.0 for i in m.LINK for j in m.DIS) -instance.sspcost = Expression(instance.SCEN,rule=sspcost_rule) +instance.supcost = Expression(instance.SCEN, rule=supcost_rule) -def ssfcost_rule(m,k): - return sum(m.cT*(m.fx[k,i,m.TIME.last(),j]-m.fx[k,i,m.TIME.first(),j])**2.0 for i in m.LINK for j in m.DIS) -instance.ssfcost = Expression(instance.SCEN,rule=ssfcost_rule) -def cost_rule(m,k): - return 1e-6*(m.supcost[k] + m.boostcost[k] + m.trackcost[k] + m.sspcost[k] + m.ssfcost[k]) -instance.cost = Expression(instance.SCEN,rule=cost_rule) +def boostcost_rule(m, k): + return sum( + m.ce * m.pow[k, j, t] * (TimeStep) + for j in m.LINK_A + for t in m.TIME.get_finite_elements() + ) + + +instance.boostcost = Expression(instance.SCEN, rule=boostcost_rule) + + +def trackcost_rule(m, k): + return sum( + m.cd * (m.dem[k, j, t] - m.stochd[k, j, t]) ** 2.0 + for j in m.DEM + for t in m.TIME.get_finite_elements() + ) + + +instance.trackcost = Expression(instance.SCEN, rule=trackcost_rule) + + +def sspcost_rule(m, k): + return sum( + m.cT * (m.px[k, i, m.TIME.last(), j] - m.px[k, i, m.TIME.first(), j]) ** 2.0 + for i in m.LINK + for j in m.DIS + ) + + +instance.sspcost = Expression(instance.SCEN, rule=sspcost_rule) + + +def ssfcost_rule(m, k): + return sum( + m.cT * (m.fx[k, i, m.TIME.last(), j] - m.fx[k, i, m.TIME.first(), j]) ** 2.0 + for i in m.LINK + for j in m.DIS + ) + + +instance.ssfcost = Expression(instance.SCEN, rule=ssfcost_rule) + + +def cost_rule(m, k): + return 1e-6 * ( + m.supcost[k] + m.boostcost[k] + m.trackcost[k] + m.sspcost[k] + m.ssfcost[k] + ) + + +instance.cost = Expression(instance.SCEN, rule=cost_rule) + def mcost_rule(m): - return (1.0/m.S)*sum(m.cost[k] for k in m.SCEN) + return (1.0 / m.S) * sum(m.cost[k] for k in m.SCEN) + + instance.mcost = Expression(rule=mcost_rule) -def eqcvar_rule(m,k): - return m.cost[k] - m.nu <= m.phi[k]; -instance.eqcvar = Constraint(instance.SCEN,rule=eqcvar_rule) + +def eqcvar_rule(m, k): + return m.cost[k] - m.nu <= m.phi[k] + + +instance.eqcvar = Constraint(instance.SCEN, rule=eqcvar_rule) + def obj_rule(m): - return (1.0-m.cvar_lambda)*m.mcost + m.cvar_lambda*m.cvarcost + return (1.0 - m.cvar_lambda) * m.mcost + m.cvar_lambda * m.cvarcost + + instance.obj = Objective(rule=obj_rule) -endTime = time.time()-start +endTime = time.time() - start print('%f seconds required to construct' % endTime) import sys + start = time.time() instance.write(sys.argv[1]) -endTime = time.time()-start +endTime = time.time() - start print('%f seconds required to write file %s' % (endTime, sys.argv[1])) if False: for i in instance.SCEN: - print("Scenario %s = %s" % ( - i, sum(sum(0.5*value(instance.pow[i,j,k]) - for j in instance.LINK_A) - for k in instance.TIME.get_finite_elements()) )) - - - solver=SolverFactory('ipopt') - results = solver.solve(instance,tee=True) + print( + "Scenario %s = %s" + % ( + i, + sum( + sum(0.5 * value(instance.pow[i, j, k]) for j in instance.LINK_A) + for k in instance.TIME.get_finite_elements() + ), + ) + ) + + solver = SolverFactory('ipopt') + results = solver.solve(instance, tee=True) for i in instance.SCEN: - print("Scenario %s = %s" % ( - i, sum(sum(0.5*value(instance.pow[i,j,k]) - for j in instance.LINK_A) - for k in instance.TIME.get_finite_elements()) )) + print( + "Scenario %s = %s" + % ( + i, + sum( + sum(0.5 * value(instance.pow[i, j, k]) for j in instance.LINK_A) + for k in instance.TIME.get_finite_elements() + ), + ) + ) diff --git a/examples/performance/dae/stochpdegas1_automatic.py b/examples/performance/dae/stochpdegas1_automatic.py index 4a948559726..cd0153eee61 100644 --- a/examples/performance/dae/stochpdegas1_automatic.py +++ b/examples/performance/dae/stochpdegas1_automatic.py @@ -1,7 +1,7 @@ # stochastic pde model for natural gas network # victor m. zavala / 2013 -#from __future__ import division +# from __future__ import division from pyomo.environ import * from pyomo.dae import * @@ -10,285 +10,420 @@ # sets model.TF = Param(within=NonNegativeReals) + + def _tinit(m): - return [0.5,value(m.TF)] + return [0.5, value(m.TF)] # What it should be to match description in paper - #return [0,value(m.TF)] + # return [0,value(m.TF)] + + model.TIME = ContinuousSet(initialize=_tinit) -model.DIS = ContinuousSet(bounds=(0.0,1.0)) +model.DIS = ContinuousSet(bounds=(0.0, 1.0)) model.S = Param(within=PositiveIntegers) -model.SCEN = RangeSet(1,model.S) +model.SCEN = RangeSet(1, model.S) # links model.LINK = Set() model.lstartloc = Param(model.LINK) model.lendloc = Param(model.LINK) -model.ldiam = Param(model.LINK,within=PositiveReals,mutable=True) -model.llength = Param(model.LINK,within=PositiveReals,mutable=True) +model.ldiam = Param(model.LINK, within=PositiveReals, mutable=True) +model.llength = Param(model.LINK, within=PositiveReals, mutable=True) model.ltype = Param(model.LINK) + def link_a_init_rule(m): return (l for l in m.LINK if m.ltype[l] == "a") + + model.LINK_A = Set(initialize=link_a_init_rule) + def link_p_init_rule(m): return (l for l in m.LINK if m.ltype[l] == "p") + + model.LINK_P = Set(initialize=link_p_init_rule) # nodes model.NODE = Set() -model.pmin = Param(model.NODE,within=PositiveReals,mutable=True) -model.pmax = Param(model.NODE,within=PositiveReals,mutable=True) +model.pmin = Param(model.NODE, within=PositiveReals, mutable=True) +model.pmax = Param(model.NODE, within=PositiveReals, mutable=True) # supply model.SUP = Set() model.sloc = Param(model.SUP) -model.smin = Param(model.SUP,within=NonNegativeReals,mutable=True) -model.smax = Param(model.SUP,within=NonNegativeReals,mutable=True) -model.scost = Param(model.SUP,within=NonNegativeReals) +model.smin = Param(model.SUP, within=NonNegativeReals, mutable=True) +model.smax = Param(model.SUP, within=NonNegativeReals, mutable=True) +model.scost = Param(model.SUP, within=NonNegativeReals) # demand model.DEM = Set() model.dloc = Param(model.DEM) -model.d = Param(model.DEM, within=PositiveReals,mutable=True) +model.d = Param(model.DEM, within=PositiveReals, mutable=True) # physical data -model.eps = Param(initialize=0.025,within=PositiveReals) -model.z = Param(initialize=0.80,within=PositiveReals) -model.rhon = Param(initialize=0.72,within=PositiveReals) -model.R = Param(initialize=8314.0,within=PositiveReals) -model.M = Param(initialize=18.0,within=PositiveReals) -model.pi = Param(initialize=3.14,within=PositiveReals) -model.nu2 = Param(within=PositiveReals,mutable=True) -model.lam = Param(model.LINK,within=PositiveReals,mutable=True) -model.A = Param(model.LINK,within=NonNegativeReals,mutable=True) -model.Tgas = Param(initialize=293.15,within=PositiveReals) -model.Cp = Param(initialize=2.34,within=PositiveReals) -model.Cv = Param(initialize=1.85,within=PositiveReals) -model.gam = Param(initialize=model.Cp/model.Cv, within=PositiveReals) -model.om = Param(initialize=(model.gam-1.0)/model.gam,within=PositiveReals) +model.eps = Param(initialize=0.025, within=PositiveReals) +model.z = Param(initialize=0.80, within=PositiveReals) +model.rhon = Param(initialize=0.72, within=PositiveReals) +model.R = Param(initialize=8314.0, within=PositiveReals) +model.M = Param(initialize=18.0, within=PositiveReals) +model.pi = Param(initialize=3.14, within=PositiveReals) +model.nu2 = Param(within=PositiveReals, mutable=True) +model.lam = Param(model.LINK, within=PositiveReals, mutable=True) +model.A = Param(model.LINK, within=NonNegativeReals, mutable=True) +model.Tgas = Param(initialize=293.15, within=PositiveReals) +model.Cp = Param(initialize=2.34, within=PositiveReals) +model.Cv = Param(initialize=1.85, within=PositiveReals) +model.gam = Param(initialize=model.Cp / model.Cv, within=PositiveReals) +model.om = Param(initialize=(model.gam - 1.0) / model.gam, within=PositiveReals) # scaling and constants -model.ffac = Param(within=PositiveReals,initialize=(1.0e+6*model.rhon)/(24.0*3600.0)) -model.ffac2 = Param(within=PositiveReals,initialize=(3600.0)/(1.0e+4*model.rhon)) -model.pfac = Param(within=PositiveReals,initialize=1.0e+5) -model.pfac2 = Param(within=PositiveReals,initialize=1.0e-5) -model.dfac = Param(within=PositiveReals,initialize=1.0e-3) -model.lfac = Param(within=PositiveReals,initialize=1.0e+3) - -model.c1 = Param(model.LINK,within=PositiveReals,mutable=True) -model.c2 = Param(model.LINK,within=PositiveReals,mutable=True) -model.c3 = Param(model.LINK,within=PositiveReals,mutable=True) -model.c4 = Param(within=PositiveReals,mutable=True) +model.ffac = Param( + within=PositiveReals, initialize=(1.0e6 * model.rhon) / (24.0 * 3600.0) +) +model.ffac2 = Param(within=PositiveReals, initialize=(3600.0) / (1.0e4 * model.rhon)) +model.pfac = Param(within=PositiveReals, initialize=1.0e5) +model.pfac2 = Param(within=PositiveReals, initialize=1.0e-5) +model.dfac = Param(within=PositiveReals, initialize=1.0e-3) +model.lfac = Param(within=PositiveReals, initialize=1.0e3) + +model.c1 = Param(model.LINK, within=PositiveReals, mutable=True) +model.c2 = Param(model.LINK, within=PositiveReals, mutable=True) +model.c3 = Param(model.LINK, within=PositiveReals, mutable=True) +model.c4 = Param(within=PositiveReals, mutable=True) # cost factors -model.ce = Param(initialize=0.1,within=NonNegativeReals) -model.cd = Param(initialize=1.0e+6,within=NonNegativeReals) -model.cT = Param(initialize=1.0e+6,within=NonNegativeReals) -model.cs = Param(initialize=0.0,within=NonNegativeReals) +model.ce = Param(initialize=0.1, within=NonNegativeReals) +model.cd = Param(initialize=1.0e6, within=NonNegativeReals) +model.cT = Param(initialize=1.0e6, within=NonNegativeReals) +model.cs = Param(initialize=0.0, within=NonNegativeReals) model.TDEC = Param(within=PositiveReals) # define stochastic info -model.rand_d = Param(model.SCEN,model.DEM,within=NonNegativeReals,mutable=True) +model.rand_d = Param(model.SCEN, model.DEM, within=NonNegativeReals, mutable=True) + # convert units for input data def rescale_rule(m): - for i in m.LINK: - m.ldiam[i] = m.ldiam[i]*m.dfac - m.llength[i] = m.llength[i]*m.lfac + m.ldiam[i] = m.ldiam[i] * m.dfac + m.llength[i] = m.llength[i] * m.lfac # m.dx[i] = m.llength[i]/float(m.DIS.last()) for i in m.SUP: - m.smin[i] = m.smin[i]*m.ffac*m.ffac2 # from scmx106/day to kg/s and then to scmx10-4/hr - m.smax[i] = m.smax[i]*m.ffac*m.ffac2 # from scmx106/day to kg/s and then to scmx10-4/hr + m.smin[i] = ( + m.smin[i] * m.ffac * m.ffac2 + ) # from scmx106/day to kg/s and then to scmx10-4/hr + m.smax[i] = ( + m.smax[i] * m.ffac * m.ffac2 + ) # from scmx106/day to kg/s and then to scmx10-4/hr for i in m.DEM: - m.d[i] = m.d[i]*m.ffac*m.ffac2 + m.d[i] = m.d[i] * m.ffac * m.ffac2 for i in m.NODE: - m.pmin[i] = m.pmin[i]*m.pfac*m.pfac2 # from bar to Pascals and then to bar - m.pmax[i] = m.pmax[i]*m.pfac*m.pfac2 # from bar to Pascals and then to bar + m.pmin[i] = m.pmin[i] * m.pfac * m.pfac2 # from bar to Pascals and then to bar + m.pmax[i] = m.pmax[i] * m.pfac * m.pfac2 # from bar to Pascals and then to bar + + model.rescale = BuildAction(rule=rescale_rule) + def compute_constants(m): - for i in m.LINK: - m.lam[i] = (2.0*log10(3.7*m.ldiam[i]/(m.eps*m.dfac)))**(-2.0) - m.A[i] = (1.0/4.0)*m.pi*m.ldiam[i]*m.ldiam[i] - m.nu2 = m.gam*m.z*m.R*m.Tgas/m.M - m.c1[i] = (m.pfac2/m.ffac2)*(m.nu2/m.A[i]) - m.c2[i] = m.A[i]*(m.ffac2/m.pfac2) - m.c3[i] = m.A[i]*(m.pfac2/m.ffac2)*(8.0*m.lam[i]*m.nu2)/(m.pi*m.pi*(m.ldiam[i]**5.0)) - m.c4 = (1/m.ffac2)*(m.Cp*m.Tgas) + m.lam[i] = (2.0 * log10(3.7 * m.ldiam[i] / (m.eps * m.dfac))) ** (-2.0) + m.A[i] = (1.0 / 4.0) * m.pi * m.ldiam[i] * m.ldiam[i] + m.nu2 = m.gam * m.z * m.R * m.Tgas / m.M + m.c1[i] = (m.pfac2 / m.ffac2) * (m.nu2 / m.A[i]) + m.c2[i] = m.A[i] * (m.ffac2 / m.pfac2) + m.c3[i] = ( + m.A[i] + * (m.pfac2 / m.ffac2) + * (8.0 * m.lam[i] * m.nu2) + / (m.pi * m.pi * (m.ldiam[i] ** 5.0)) + ) + m.c4 = (1 / m.ffac2) * (m.Cp * m.Tgas) + model.compute_constants = BuildAction(rule=compute_constants) + # set stochastic demands def compute_demands_rule(m): - for k in m.SCEN: for j in m.DEM: if k == 2: - m.rand_d[k,j] = 1.1*m.d[j] + m.rand_d[k, j] = 1.1 * m.d[j] elif k == 1: - m.rand_d[k,j] = 1.2*m.d[j] + m.rand_d[k, j] = 1.2 * m.d[j] else: - m.rand_d[k,j] = 1.3*m.d[j] + m.rand_d[k, j] = 1.3 * m.d[j] + + model.compute_demands = BuildAction(rule=compute_demands_rule) -def stochd_init(m,k,j,t): + +def stochd_init(m, k, j, t): # What it should be to match description in paper # if t < m.TDEC: # return m.d[j] # if t >= m.TDEC and t < m.TDEC+5: # return m.rand_d[k,j] # if t >= m.TDEC+5: - # return m.d[j] - if t < m.TDEC+1: + # return m.d[j] + if t < m.TDEC + 1: + return m.d[j] + if t >= m.TDEC + 1 and t < m.TDEC + 1 + 4.5: + return m.rand_d[k, j] + if t >= m.TDEC + 1 + 4.5: return m.d[j] - if t >= m.TDEC+1 and t < m.TDEC+1+4.5: - return m.rand_d[k,j] - if t >= m.TDEC+1+4.5: - return m.d[j] -model.stochd = Param(model.SCEN,model.DEM,model.TIME,within=PositiveReals,mutable=True,default=stochd_init) + +model.stochd = Param( + model.SCEN, + model.DEM, + model.TIME, + within=PositiveReals, + mutable=True, + default=stochd_init, +) + # define temporal variables -def p_bounds_rule(m,k,j,t): - return (value(m.pmin[j]),value(m.pmax[j])) +def p_bounds_rule(m, k, j, t): + return (value(m.pmin[j]), value(m.pmax[j])) + + model.p = Var(model.SCEN, model.NODE, model.TIME, bounds=p_bounds_rule, initialize=50.0) -model.dp = Var(model.SCEN,model.LINK_A,model.TIME,bounds=(0.0,100.0), initialize=10.0) -model.fin = Var(model.SCEN,model.LINK,model.TIME,bounds=(1.0,500.0),initialize=100.0) -model.fout = Var(model.SCEN,model.LINK,model.TIME,bounds=(1.0,500.0),initialize=100.0) - -def s_bounds_rule(m,k,j,t): - return (0.01,value(m.smax[j])) -model.s = Var(model.SCEN,model.SUP,model.TIME,bounds=s_bounds_rule,initialize=10.0) -model.dem = Var(model.SCEN,model.DEM,model.TIME,initialize=100.0) -model.pow = Var(model.SCEN,model.LINK_A,model.TIME,bounds=(0.0,3000.0),initialize=1000.0) -model.slack = Var(model.SCEN,model.LINK,model.TIME,model.DIS,bounds=(0.0,None),initialize=10.0) - +model.dp = Var( + model.SCEN, model.LINK_A, model.TIME, bounds=(0.0, 100.0), initialize=10.0 +) +model.fin = Var( + model.SCEN, model.LINK, model.TIME, bounds=(1.0, 500.0), initialize=100.0 +) +model.fout = Var( + model.SCEN, model.LINK, model.TIME, bounds=(1.0, 500.0), initialize=100.0 +) + + +def s_bounds_rule(m, k, j, t): + return (0.01, value(m.smax[j])) + + +model.s = Var(model.SCEN, model.SUP, model.TIME, bounds=s_bounds_rule, initialize=10.0) +model.dem = Var(model.SCEN, model.DEM, model.TIME, initialize=100.0) +model.pow = Var( + model.SCEN, model.LINK_A, model.TIME, bounds=(0.0, 3000.0), initialize=1000.0 +) +model.slack = Var( + model.SCEN, model.LINK, model.TIME, model.DIS, bounds=(0.0, None), initialize=10.0 +) + # define spatio-temporal variables -model.px = Var(model.SCEN,model.LINK,model.TIME,model.DIS,bounds=(10.0,100.0),initialize=50.0) -model.fx = Var(model.SCEN,model.LINK,model.TIME,model.DIS,bounds=(1.0,100.0),initialize=100.0) +model.px = Var( + model.SCEN, model.LINK, model.TIME, model.DIS, bounds=(10.0, 100.0), initialize=50.0 +) +model.fx = Var( + model.SCEN, model.LINK, model.TIME, model.DIS, bounds=(1.0, 100.0), initialize=100.0 +) # define derivatives -model.dpxdt = DerivativeVar(model.px,wrt=model.TIME,initialize=0) -model.dpxdx = DerivativeVar(model.px,wrt=model.DIS,initialize=0) -model.dfxdt = DerivativeVar(model.fx,wrt=model.TIME,initialize=0) -model.dfxdx = DerivativeVar(model.fx,wrt=model.DIS,initialize=0) +model.dpxdt = DerivativeVar(model.px, wrt=model.TIME, initialize=0) +model.dpxdx = DerivativeVar(model.px, wrt=model.DIS, initialize=0) +model.dfxdt = DerivativeVar(model.fx, wrt=model.TIME, initialize=0) +model.dfxdx = DerivativeVar(model.fx, wrt=model.DIS, initialize=0) # ----------- MODEL -------------- + # compressor equations -def powereq_rule(m,j,i,t): - return m.pow[j,i,t] == m.c4*m.fin[j,i,t]*(((m.p[j,m.lstartloc[i],t]+m.dp[j,i,t])/m.p[j,m.lstartloc[i],t])**m.om - 1.0) -model.powereq = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=powereq_rule) +def powereq_rule(m, j, i, t): + return m.pow[j, i, t] == m.c4 * m.fin[j, i, t] * ( + ((m.p[j, m.lstartloc[i], t] + m.dp[j, i, t]) / m.p[j, m.lstartloc[i], t]) + ** m.om + - 1.0 + ) + + +model.powereq = Constraint(model.SCEN, model.LINK_A, model.TIME, rule=powereq_rule) -# cvar model +# cvar model model.cvar_lambda = Param(within=NonNegativeReals) model.nu = Var(initialize=100.0) -model.phi = Var(model.SCEN,bounds=(0.0,None),initialize=100.0) +model.phi = Var(model.SCEN, bounds=(0.0, None), initialize=100.0) def cvarcost_rule(m): - return (1.0/m.S)*sum((m.phi[k]/(1.0-0.95) + m.nu) for k in m.SCEN) + return (1.0 / m.S) * sum((m.phi[k] / (1.0 - 0.95) + m.nu) for k in m.SCEN) + + model.cvarcost = Expression(rule=cvarcost_rule) + # node balances -def nodeeq_rule(m,k,i,t): - return sum(m.fout[k,j,t] for j in m.LINK if m.lendloc[j]==i) + \ - sum(m.s[k,j,t] for j in m.SUP if m.sloc[j]==i) - \ - sum(m.fin[k,j,t] for j in m.LINK if m.lstartloc[j]==i) - \ - sum(m.dem[k,j,t] for j in m.DEM if m.dloc[j]==i) == 0.0 -model.nodeeq = Constraint(model.SCEN,model.NODE,model.TIME,rule=nodeeq_rule) - +def nodeeq_rule(m, k, i, t): + return ( + sum(m.fout[k, j, t] for j in m.LINK if m.lendloc[j] == i) + + sum(m.s[k, j, t] for j in m.SUP if m.sloc[j] == i) + - sum(m.fin[k, j, t] for j in m.LINK if m.lstartloc[j] == i) + - sum(m.dem[k, j, t] for j in m.DEM if m.dloc[j] == i) + == 0.0 + ) + + +model.nodeeq = Constraint(model.SCEN, model.NODE, model.TIME, rule=nodeeq_rule) + + # boundary conditions flow -def flow_start_rule(m,j,i,t): - return m.fx[j,i,t,m.DIS.first()] == m.fin[j,i,t] -model.flow_start = Constraint(model.SCEN,model.LINK,model.TIME,rule=flow_start_rule) +def flow_start_rule(m, j, i, t): + return m.fx[j, i, t, m.DIS.first()] == m.fin[j, i, t] + + +model.flow_start = Constraint(model.SCEN, model.LINK, model.TIME, rule=flow_start_rule) + + +def flow_end_rule(m, j, i, t): + return m.fx[j, i, t, m.DIS.last()] == m.fout[j, i, t] + + +model.flow_end = Constraint(model.SCEN, model.LINK, model.TIME, rule=flow_end_rule) -def flow_end_rule(m,j,i,t): - return m.fx[j,i,t,m.DIS.last()] == m.fout[j,i,t] -model.flow_end = Constraint(model.SCEN,model.LINK,model.TIME,rule=flow_end_rule) # First PDE for gas network model -def flow_rule(m,j,i,t,k): - if t == m.TIME.first() or k == m.DIS.last(): - return Constraint.Skip # Do not apply pde at initial time or final location - return m.dpxdt[j,i,t,k]/3600 + m.c1[i]/m.llength[i]*m.dfxdx[j,i,t,k] == 0 -model.flow = Constraint(model.SCEN,model.LINK,model.TIME,model.DIS,rule=flow_rule) +def flow_rule(m, j, i, t, k): + if t == m.TIME.first() or k == m.DIS.last(): + return Constraint.Skip # Do not apply pde at initial time or final location + return ( + m.dpxdt[j, i, t, k] / 3600 + m.c1[i] / m.llength[i] * m.dfxdx[j, i, t, k] == 0 + ) + + +model.flow = Constraint(model.SCEN, model.LINK, model.TIME, model.DIS, rule=flow_rule) + # Second PDE for gas network model -def press_rule(m,j,i,t,k): +def press_rule(m, j, i, t, k): if t == m.TIME.first() or k == m.DIS.last(): - return Constraint.Skip # Do not apply pde at initial time or final location - return m.dfxdt[j,i,t,k]/3600 == -m.c2[i]/m.llength[i]*m.dpxdx[j,i,t,k] - m.slack[j,i,t,k] -model.press = Constraint(model.SCEN,model.LINK,model.TIME,model.DIS,rule=press_rule) + return Constraint.Skip # Do not apply pde at initial time or final location + return ( + m.dfxdt[j, i, t, k] / 3600 + == -m.c2[i] / m.llength[i] * m.dpxdx[j, i, t, k] - m.slack[j, i, t, k] + ) -def slackeq_rule(m,j,i,t,k): + +model.press = Constraint(model.SCEN, model.LINK, model.TIME, model.DIS, rule=press_rule) + + +def slackeq_rule(m, j, i, t, k): if t == m.TIME.last(): return Constraint.Skip - return m.slack[j,i,t,k]*m.px[j,i,t,k] == m.c3[i]*m.fx[j,i,t,k]*m.fx[j,i,t,k] -model.slackeq = Constraint(model.SCEN,model.LINK,model.TIME,model.DIS,rule=slackeq_rule) + return ( + m.slack[j, i, t, k] * m.px[j, i, t, k] + == m.c3[i] * m.fx[j, i, t, k] * m.fx[j, i, t, k] + ) + + +model.slackeq = Constraint( + model.SCEN, model.LINK, model.TIME, model.DIS, rule=slackeq_rule +) + # boundary conditions pressure, passive links -def presspas_start_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.first()] == m.p[j,m.lstartloc[i],t] -model.presspas_start = Constraint(model.SCEN,model.LINK_P,model.TIME,rule=presspas_start_rule) +def presspas_start_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.first()] == m.p[j, m.lstartloc[i], t] + + +model.presspas_start = Constraint( + model.SCEN, model.LINK_P, model.TIME, rule=presspas_start_rule +) + + +def presspas_end_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.last()] == m.p[j, m.lendloc[i], t] + + +model.presspas_end = Constraint( + model.SCEN, model.LINK_P, model.TIME, rule=presspas_end_rule +) -def presspas_end_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.last()] == m.p[j,m.lendloc[i],t] -model.presspas_end = Constraint(model.SCEN,model.LINK_P,model.TIME,rule=presspas_end_rule) # boundary conditions pressure, active links -def pressact_start_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.first()] == m.p[j,m.lstartloc[i],t]+m.dp[j,i,t] -model.pressact_start = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=pressact_start_rule) - -def pressact_end_rule(m,j,i,t): - return m.px[j,i,t,m.DIS.last()] == m.p[j,m.lendloc[i],t] -model.pressact_end = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=pressact_end_rule) - +def pressact_start_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.first()] == m.p[j, m.lstartloc[i], t] + m.dp[j, i, t] + + +model.pressact_start = Constraint( + model.SCEN, model.LINK_A, model.TIME, rule=pressact_start_rule +) + + +def pressact_end_rule(m, j, i, t): + return m.px[j, i, t, m.DIS.last()] == m.p[j, m.lendloc[i], t] + + +model.pressact_end = Constraint( + model.SCEN, model.LINK_A, model.TIME, rule=pressact_end_rule +) + + # fix pressure at supply nodes -def suppres_rule(m,k,j,t): - return m.p[k,m.sloc[j],t] == m.pmin[m.sloc[j]] -model.suppres = Constraint(model.SCEN,model.SUP,model.TIME,rule=suppres_rule) +def suppress_rule(m, k, j, t): + return m.p[k, m.sloc[j], t] == m.pmin[m.sloc[j]] + + +model.suppress = Constraint(model.SCEN, model.SUP, model.TIME, rule=suppress_rule) + # discharge pressure for compressors -def dispress_rule(m,j,i,t): - return m.p[j,m.lstartloc[i],t]+m.dp[j,i,t] <= m.pmax[m.lstartloc[i]] -model.dispress = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=dispress_rule) +def dispress_rule(m, j, i, t): + return m.p[j, m.lstartloc[i], t] + m.dp[j, i, t] <= m.pmax[m.lstartloc[i]] + + +model.dispress = Constraint(model.SCEN, model.LINK_A, model.TIME, rule=dispress_rule) + # ss constraints -def flow_ss_rule(m,j,i,k): +def flow_ss_rule(m, j, i, k): if k == m.DIS.last(): return Constraint.Skip - return m.dfxdx[j,i,m.TIME.first(),k]/m.llength[i] == 0.0 -model.flow_ss = Constraint(model.SCEN,model.LINK,model.DIS,rule=flow_ss_rule) + return m.dfxdx[j, i, m.TIME.first(), k] / m.llength[i] == 0.0 + + +model.flow_ss = Constraint(model.SCEN, model.LINK, model.DIS, rule=flow_ss_rule) -def pres_ss_rule(m,j,i,k): + +def pres_ss_rule(m, j, i, k): if k == m.DIS.last(): return Constraint.Skip - return 0.0 == - m.c2[i]/m.llength[i]*m.dpxdx[j,i,m.TIME.first(),k] - m.slack[j,i,m.TIME.first(),k]; -model.pres_ss = Constraint(model.SCEN,model.LINK,model.DIS,rule=pres_ss_rule) + return ( + 0.0 + == -m.c2[i] / m.llength[i] * m.dpxdx[j, i, m.TIME.first(), k] + - m.slack[j, i, m.TIME.first(), k] + ) + + +model.pres_ss = Constraint(model.SCEN, model.LINK, model.DIS, rule=pres_ss_rule) + # non-anticipativity constraints -def nonantdq_rule(m,j,i,t): +def nonantdq_rule(m, j, i, t): if j == 1: return Constraint.Skip - if t >= m.TDEC+1: + if t >= m.TDEC + 1: return Constraint.Skip - return m.dp[j,i,t] == m.dp[1,i,t] + return m.dp[j, i, t] == m.dp[1, i, t] + -model.nonantdq = Constraint(model.SCEN,model.LINK_A,model.TIME,rule=nonantdq_rule) +model.nonantdq = Constraint(model.SCEN, model.LINK_A, model.TIME, rule=nonantdq_rule) -def nonantde_rule(m,j,i,t): + +def nonantde_rule(m, j, i, t): if j == 1: return Constraint.Skip - if t >= m.TDEC+1: + if t >= m.TDEC + 1: return Constraint.Skip - return m.dem[j,i,t] == m.dem[1,i,t] + return m.dem[j, i, t] == m.dem[1, i, t] + -model.nonantde = Constraint(model.SCEN,model.DEM,model.TIME,rule=nonantde_rule) +model.nonantde = Constraint(model.SCEN, model.DEM, model.TIME, rule=nonantde_rule) diff --git a/examples/performance/jump/clnlbeam.py b/examples/performance/jump/clnlbeam.py index 9ff2abf4aa2..d2ceda790ec 100644 --- a/examples/performance/jump/clnlbeam.py +++ b/examples/performance/jump/clnlbeam.py @@ -3,35 +3,48 @@ model = AbstractModel() model.N = Param(within=PositiveIntegers) -model.h = 1.0/model.N +model.h = 1.0 / model.N -model.VarIdx = RangeSet(model.N+1) +model.VarIdx = RangeSet(model.N + 1) -model.t = Var(model.VarIdx, bounds=(-1.0,1.0), initialize=lambda m,i: 0.05*cos(i*m.h)) -model.x = Var(model.VarIdx, bounds=(-0.05,0.05), initialize=lambda m,i: 0.05*cos(i*m.h)) +model.t = Var( + model.VarIdx, bounds=(-1.0, 1.0), initialize=lambda m, i: 0.05 * cos(i * m.h) +) +model.x = Var( + model.VarIdx, bounds=(-0.05, 0.05), initialize=lambda m, i: 0.05 * cos(i * m.h) +) model.u = Var(model.VarIdx, initialize=0.01) alpha = 350 + def c_rule(m): ex = 0 for i in m.VarIdx: - if i == m.N+1: + if i == m.N + 1: continue - ex += 0.5*m.h*(m.u[i+1]**2+m.u[i]**2) + 0.5*alpha*m.h*(cos(m.t[i+1])+cos(m.t[i])) + ex += 0.5 * m.h * (m.u[i + 1] ** 2 + m.u[i] ** 2) + 0.5 * alpha * m.h * ( + cos(m.t[i + 1]) + cos(m.t[i]) + ) return ex + model.c = Objective(rule=c_rule) + def cons1_rule(m, i): - if i == m.N+1: + if i == m.N + 1: return Constraint.Skip - return m.x[i+1] - m.x[i] - (0.5*m.h)*(sin(m.t[i+1])+sin(m.t[i])) == 0 + return m.x[i + 1] - m.x[i] - (0.5 * m.h) * (sin(m.t[i + 1]) + sin(m.t[i])) == 0 + + model.cons1 = Constraint(model.VarIdx, rule=cons1_rule) -def cons2_rule(m,i): - if i == m.N+1: + +def cons2_rule(m, i): + if i == m.N + 1: return Constraint.Skip - return m.t[i+1] - m.t[i] - (0.5*m.h)*m.u[i+1] - (0.5*m.h)*m.u[i] == 0 -model.cons2 = Constraint(model.VarIdx, rule=cons2_rule) + return m.t[i + 1] - m.t[i] - (0.5 * m.h) * m.u[i + 1] - (0.5 * m.h) * m.u[i] == 0 + +model.cons2 = Constraint(model.VarIdx, rule=cons2_rule) diff --git a/examples/performance/jump/facility.py b/examples/performance/jump/facility.py index 8b4acb75e70..6832e8d32ac 100644 --- a/examples/performance/jump/facility.py +++ b/examples/performance/jump/facility.py @@ -2,8 +2,8 @@ model = AbstractModel() -model.G = 25 #Param(within=PositiveIntegers) -model.F = 25 #Param(within=PositiveIntegers) +model.G = 25 # Param(within=PositiveIntegers) +model.F = 25 # Param(within=PositiveIntegers) model.Grid = RangeSet(0, model.G) model.Facs = RangeSet(1, model.F) @@ -15,28 +15,46 @@ model.s = Var(model.Grid, model.Grid, model.Facs, bounds=(0.0, None)) model.r = Var(model.Grid, model.Grid, model.Facs, model.Dims) + def obj_rule(mod): - return 1.0*mod.d + return 1.0 * mod.d + + model.obj = Objective(rule=obj_rule) + def assmt_rule(mod, i, j): - return sum([mod.z[i,j,f] for f in mod.Facs]) == 1 + return sum([mod.z[i, j, f] for f in mod.Facs]) == 1 + + model.assmt = Constraint(model.Grid, model.Grid, rule=assmt_rule) -M = 2*1.414 -def quadrhs_rule(mod,i,j,f): - return mod.s[i,j,f] == mod.d + M*(1 - mod.z[i,j,f]) +M = 2 * 1.414 + + +def quadrhs_rule(mod, i, j, f): + return mod.s[i, j, f] == mod.d + M * (1 - mod.z[i, j, f]) + + model.quadrhs = Constraint(model.Grid, model.Grid, model.Facs, rule=quadrhs_rule) -def quaddistk1_rule(mod,i,j,f): - return mod.r[i,j,f,1] == (1.0*i)/mod.G - mod.y[f,1] + +def quaddistk1_rule(mod, i, j, f): + return mod.r[i, j, f, 1] == (1.0 * i) / mod.G - mod.y[f, 1] + + model.quaddistk1 = Constraint(model.Grid, model.Grid, model.Facs, rule=quaddistk1_rule) -def quaddistk2_rule(mod,i,j,f): - return mod.r[i,j,f,2] == (1.0*j)/mod.G - mod.y[f,2] + +def quaddistk2_rule(mod, i, j, f): + return mod.r[i, j, f, 2] == (1.0 * j) / mod.G - mod.y[f, 2] + + model.quaddistk2 = Constraint(model.Grid, model.Grid, model.Facs, rule=quaddistk2_rule) -def quaddist_rule(mod,i,j,f): - return mod.r[i,j,f,1]**2 + mod.r[i,j,f,2]**2 <= mod.s[i,j,f]**2 -model.quaddist = Constraint(model.Grid, model.Grid, model.Facs, rule=quaddist_rule) +def quaddist_rule(mod, i, j, f): + return mod.r[i, j, f, 1] ** 2 + mod.r[i, j, f, 2] ** 2 <= mod.s[i, j, f] ** 2 + + +model.quaddist = Constraint(model.Grid, model.Grid, model.Facs, rule=quaddist_rule) diff --git a/examples/performance/jump/lqcp.py b/examples/performance/jump/lqcp.py index 10cb49cb262..bb3e66b36f5 100644 --- a/examples/performance/jump/lqcp.py +++ b/examples/performance/jump/lqcp.py @@ -4,9 +4,9 @@ model.n = 1000 model.m = 1000 -model.dx = 1.0/model.n +model.dx = 1.0 / model.n model.T = 1.58 -model.dt = model.T/model.n +model.dt = model.T / model.n model.h2 = model.dx**2 model.a = 0.001 @@ -16,32 +16,59 @@ model.y = Var(model.ms, model.ns, bounds=(0.0, 1.0)) model.u = Var(model.ms, bounds=(-1.0, 1.0)) -def yt(j,dx): - return 0.5*(1 - (j*dx)*(j*dx)) + +def yt(j, dx): + return 0.5 * (1 - (j * dx) * (j * dx)) + def rule(model): - return 0.25*model.dx*( - (model.y[model.m,0] - yt(0,model.dx))**2 + - 2*sum( (model.y[model.m,j] - yt(j,model.dx))**2 for j in range(1,model.n)) + - (model.y[model.m,model.n] - yt(model.n,model.dx))**2 - ) + 0.25*model.a*model.dt*( - 2 * sum( model.u[i]**2 for i in range(1,model.m)) + - model.u[model.m]**2 + return 0.25 * model.dx * ( + (model.y[model.m, 0] - yt(0, model.dx)) ** 2 + + 2 + * sum((model.y[model.m, j] - yt(j, model.dx)) ** 2 for j in range(1, model.n)) + + (model.y[model.m, model.n] - yt(model.n, model.dx)) ** 2 + ) + 0.25 * model.a * model.dt * ( + 2 * sum(model.u[i] ** 2 for i in range(1, model.m)) + model.u[model.m] ** 2 ) + + model.obj = Objective(rule=rule) + def pde_rule(model, i, j): - return (model.y[i+1,j] - model.y[i,j])/model.dt == 0.5*(model.y[i,j-1] - 2*model.y[i,j] + model.y[i,j+1] + model.y[i+1,j-1] - 2*model.y[i+1,j] + model.y[i+1,j+1])/model.h2 -model.pde = Constraint(RangeSet(0,model.n-1), RangeSet(1,model.n-1), rule=pde_rule) + return (model.y[i + 1, j] - model.y[i, j]) / model.dt == 0.5 * ( + model.y[i, j - 1] + - 2 * model.y[i, j] + + model.y[i, j + 1] + + model.y[i + 1, j - 1] + - 2 * model.y[i + 1, j] + + model.y[i + 1, j + 1] + ) / model.h2 + + +model.pde = Constraint( + RangeSet(0, model.n - 1), RangeSet(1, model.n - 1), rule=pde_rule +) + def ic_rule(model, j): - return model.y[0,j] == 0 + return model.y[0, j] == 0 + + model.ic = Constraint(model.ns, rule=ic_rule) + def bc1_rule(model, i): - return model.y[i, 2] - 4*model.y[i, 1] + 3*model.y[i,0] == 0 -model.bc1 = Constraint(RangeSet(1,model.n), rule=bc1_rule) + return model.y[i, 2] - 4 * model.y[i, 1] + 3 * model.y[i, 0] == 0 + + +model.bc1 = Constraint(RangeSet(1, model.n), rule=bc1_rule) + def bc2_rule(model, i): - return model.y[i,model.n-2] - 4*model.y[i,model.n-1] + 3*model.y[i,model.n-0] == (2*model.dx)*(model.u[i] - model.y[i,model.n-0]) -model.bc2 = Constraint(RangeSet(1,model.n), rule=bc2_rule) + return model.y[i, model.n - 2] - 4 * model.y[i, model.n - 1] + 3 * model.y[ + i, model.n - 0 + ] == (2 * model.dx) * (model.u[i] - model.y[i, model.n - 0]) + + +model.bc2 = Constraint(RangeSet(1, model.n), rule=bc2_rule) diff --git a/examples/performance/jump/opf_66200bus.py b/examples/performance/jump/opf_66200bus.py index ef20ed11940..f3e1822fbfb 100644 --- a/examples/performance/jump/opf_66200bus.py +++ b/examples/performance/jump/opf_66200bus.py @@ -1,15 +1,18 @@ from pyomo.environ import * + class Bus: pass + class Branch: pass + bus = [] busmap = {} busfile = open("IEEE66200.bus", "r") -for i,line in enumerate(busfile): +for i, line in enumerate(busfile): sp = line.split() b = Bus() busmap[sp[0]] = i @@ -40,7 +43,7 @@ class Branch: branchfile = open("IEEE66200.branch", "r") branch = [] -for i,line in enumerate(branchfile): +for i, line in enumerate(branchfile): sp = line.split() b = Branch() b.frm = busmap[sp[1]] @@ -57,14 +60,14 @@ class Branch: b.def_max = float(sp[12]) b.g = b.r / (b.r**2 + b.x**2) b.b = -b.x / (b.r**2 + b.x**2) - b.def_min *= 3.14159/180 - b.def_max *= 3.14159/180 - b.def0 *= -3.14159/180 + b.def_min *= 3.14159 / 180 + b.def_max *= 3.14159 / 180 + b.def0 *= -3.14159 / 180 branch.append(b) -bus_voltage_min = {0 : 0.85, 1 : 0.85, 2 : 0.92, 3 : 0.99} -bus_voltage_max = {0 : 1.15, 1 : 1.15, 2 : 1.08, 3 : 1.01} +bus_voltage_min = {0: 0.85, 1: 0.85, 2: 0.92, 3: 0.99} +bus_voltage_max = {0: 1.15, 1: 1.15, 2: 1.08, 3: 1.01} branch_tap_min = 0.85 branch_tap_max = 1.15 @@ -74,108 +77,244 @@ class Branch: nbus = len(bus) nbranch = len(branch) -in_lines = [ [] for i in range(nbus) ] -out_lines = [ [] for i in range(nbus) ] +in_lines = [[] for i in range(nbus)] +out_lines = [[] for i in range(nbus)] for i in range(nbranch): b = branch[i] out_lines[b.frm].append(i) in_lines[b.to].append(i) - assert(b.to >= 0 and b.to < nbus) + assert b.to >= 0 and b.to < nbus model = ConcreteModel() -model.bus_voltage = Var(range(nbus),bounds = lambda model,i : (bus_voltage_min[bus[i].bustype], bus_voltage_max[bus[i].bustype]), initialize=1) -model.bus_b_shunt = Var(range(nbus),bounds = lambda model,i : (bus[i].b_shunt_min, bus[i].b_shunt_max), initialize = lambda model,i : bus[i].b_shunt0) +model.bus_voltage = Var( + range(nbus), + bounds=lambda model, i: ( + bus_voltage_min[bus[i].bustype], + bus_voltage_max[bus[i].bustype], + ), + initialize=1, +) +model.bus_b_shunt = Var( + range(nbus), + bounds=lambda model, i: (bus[i].b_shunt_min, bus[i].b_shunt_max), + initialize=lambda model, i: bus[i].b_shunt0, +) model.bus_angle = Var(range(nbus), initialize=0) -model.branch_tap = Var(range(nbranch), bounds=(branch_tap_min, branch_tap_max), initialize=1) -model.branch_def = Var(range(nbranch), bounds=lambda model,i: (branch[i].def_min, branch[i].def_max), initialize = lambda model,i : branch[i].def0) +model.branch_tap = Var( + range(nbranch), bounds=(branch_tap_min, branch_tap_max), initialize=1 +) +model.branch_def = Var( + range(nbranch), + bounds=lambda model, i: (branch[i].def_min, branch[i].def_max), + initialize=lambda model, i: branch[i].def0, +) + def Gself(k): - return bus[k].g_shunt + sum(branch[i].g*model.branch_tap[i]**2 for i in out_lines[k]) + sum(branch[i].g for i in in_lines[k]) + return ( + bus[k].g_shunt + + sum(branch[i].g * model.branch_tap[i] ** 2 for i in out_lines[k]) + + sum(branch[i].g for i in in_lines[k]) + ) + def Gout(i): - return (-branch[i].g*cos(model.branch_def[i])+branch[i].b*sin(model.branch_def[i]))*model.branch_tap[i] + return ( + -branch[i].g * cos(model.branch_def[i]) + branch[i].b * sin(model.branch_def[i]) + ) * model.branch_tap[i] + def Gin(i): - return (-branch[i].g*cos(model.branch_def[i])-branch[i].b*sin(model.branch_def[i]))*model.branch_tap[i] + return ( + -branch[i].g * cos(model.branch_def[i]) - branch[i].b * sin(model.branch_def[i]) + ) * model.branch_tap[i] + def Bself(k): - return model.bus_b_shunt[k] + sum(branch[i].b*model.branch_tap[i]**2 + branch[i].c/2 for i in out_lines[k]) + sum(branch[i].b + branch[i].c/2 for i in in_lines[k]) + return ( + model.bus_b_shunt[k] + + sum( + branch[i].b * model.branch_tap[i] ** 2 + branch[i].c / 2 + for i in out_lines[k] + ) + + sum(branch[i].b + branch[i].c / 2 for i in in_lines[k]) + ) + def Bin(i): - return (branch[i].g*sin(model.branch_def[i])-branch[i].b*cos(model.branch_def[i]))*model.branch_tap[i] + return ( + branch[i].g * sin(model.branch_def[i]) - branch[i].b * cos(model.branch_def[i]) + ) * model.branch_tap[i] + def Bout(i): - return (-branch[i].g*sin(model.branch_def[i])-branch[i].b*cos(model.branch_def[i]))*model.branch_tap[i] - -model.obj = Objective(expr = sum( \ - (bus[k].p_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + \ - Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm])) \ - for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + \ - Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to])) \ - for i in out_lines[k]) + \ - model.bus_voltage[k]**2*Gself(k) )**2 \ - for k in range(nbus) if bus[k].bustype == 2 or bus[k].bustype == 3)) + return ( + -branch[i].g * sin(model.branch_def[i]) - branch[i].b * cos(model.branch_def[i]) + ) * model.branch_tap[i] + + +model.obj = Objective( + expr=sum( + ( + bus[k].p_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Gself(k) + ) + ** 2 + for k in range(nbus) + if bus[k].bustype == 2 or bus[k].bustype == 3 + ) +) + def p_load_rule(model, k): if bus[k].bustype != 0: return Constraint.Skip - - return bus[k].p_gen - bus[k].p_load - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm]) + Bin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to]) + Bout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) - \ - model.bus_voltage[k]**2*Gself(k) == 0 + + return ( + bus[k].p_gen + - bus[k].p_load + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + - model.bus_voltage[k] ** 2 * Gself(k) + == 0 + ) + model.p_load_constr = Constraint(range(nbus), rule=p_load_rule) + def q_load_rule(model, k): if bus[k].bustype != 0: return Constraint.Skip - - return bus[k].q_gen - bus[k].q_load - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm]) - Bin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to]) - Bout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) \ - + model.bus_voltage[k]**2*Bself(k) == 0 + + return ( + bus[k].q_gen + - bus[k].q_load + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + - Bin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + - Bout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Bself(k) + == 0 + ) + model.q_load_constr = Constraint(range(nbus), rule=q_load_rule) + def q_inj_rule(model, k): if not (bus[k].bustype == 2 or bus[k].bustype == 3): return Constraint.Skip - - return (bus[k].q_min, \ - bus[k].q_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm]) - Bin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to]) - Bout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) - \ - model.bus_voltage[k]**2*Bself(k), \ - bus[k].q_max) + + return ( + bus[k].q_min, + bus[k].q_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + - Bin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + - Bout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + - model.bus_voltage[k] ** 2 * Bself(k), + bus[k].q_max, + ) + model.q_inj_rule = Constraint(range(nbus), rule=q_inj_rule) + def p_inj_rule(model, k): if not (bus[k].bustype == 2 or bus[k].bustype == 3): return Constraint.Skip - - return (0, \ - bus[k].p_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm]) + Bin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to]) + Bout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) + \ - model.bus_voltage[k]**2*Gself(k),\ - p_gen_upper*bus[k].p_gen) + + return ( + 0, + bus[k].p_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Gself(k), + p_gen_upper * bus[k].p_gen, + ) + model.p_inj_rule = Constraint(range(nbus), rule=p_inj_rule) diff --git a/examples/performance/jump/opf_6620bus.py b/examples/performance/jump/opf_6620bus.py index d89fc4985c2..64348ae931e 100644 --- a/examples/performance/jump/opf_6620bus.py +++ b/examples/performance/jump/opf_6620bus.py @@ -1,15 +1,18 @@ from pyomo.environ import * + class Bus: pass + class Branch: pass + bus = [] busmap = {} busfile = open("IEEE6620.bus", "r") -for i,line in enumerate(busfile): +for i, line in enumerate(busfile): sp = line.split() b = Bus() busmap[sp[0]] = i @@ -40,7 +43,7 @@ class Branch: branchfile = open("IEEE6620.branch", "r") branch = [] -for i,line in enumerate(branchfile): +for i, line in enumerate(branchfile): sp = line.split() b = Branch() b.frm = busmap[sp[1]] @@ -57,14 +60,14 @@ class Branch: b.def_max = float(sp[12]) b.g = b.r / (b.r**2 + b.x**2) b.b = -b.x / (b.r**2 + b.x**2) - b.def_min *= 3.14159/180 - b.def_max *= 3.14159/180 - b.def0 *= -3.14159/180 + b.def_min *= 3.14159 / 180 + b.def_max *= 3.14159 / 180 + b.def0 *= -3.14159 / 180 branch.append(b) -bus_voltage_min = {0 : 0.85, 1 : 0.85, 2 : 0.92, 3 : 0.99} -bus_voltage_max = {0 : 1.15, 1 : 1.15, 2 : 1.08, 3 : 1.01} +bus_voltage_min = {0: 0.85, 1: 0.85, 2: 0.92, 3: 0.99} +bus_voltage_max = {0: 1.15, 1: 1.15, 2: 1.08, 3: 1.01} branch_tap_min = 0.85 branch_tap_max = 1.15 @@ -74,108 +77,244 @@ class Branch: nbus = len(bus) nbranch = len(branch) -in_lines = [ [] for i in range(nbus) ] -out_lines = [ [] for i in range(nbus) ] +in_lines = [[] for i in range(nbus)] +out_lines = [[] for i in range(nbus)] for i in range(nbranch): b = branch[i] out_lines[b.frm].append(i) in_lines[b.to].append(i) - assert(b.to >= 0 and b.to < nbus) + assert b.to >= 0 and b.to < nbus model = ConcreteModel() -model.bus_voltage = Var(range(nbus),bounds = lambda model,i : (bus_voltage_min[bus[i].bustype], bus_voltage_max[bus[i].bustype]), initialize=1) -model.bus_b_shunt = Var(range(nbus),bounds = lambda model,i : (bus[i].b_shunt_min, bus[i].b_shunt_max), initialize = lambda model,i : bus[i].b_shunt0) +model.bus_voltage = Var( + range(nbus), + bounds=lambda model, i: ( + bus_voltage_min[bus[i].bustype], + bus_voltage_max[bus[i].bustype], + ), + initialize=1, +) +model.bus_b_shunt = Var( + range(nbus), + bounds=lambda model, i: (bus[i].b_shunt_min, bus[i].b_shunt_max), + initialize=lambda model, i: bus[i].b_shunt0, +) model.bus_angle = Var(range(nbus), initialize=0) -model.branch_tap = Var(range(nbranch), bounds=(branch_tap_min, branch_tap_max), initialize=1) -model.branch_def = Var(range(nbranch), bounds=lambda model,i: (branch[i].def_min, branch[i].def_max), initialize = lambda model,i : branch[i].def0) +model.branch_tap = Var( + range(nbranch), bounds=(branch_tap_min, branch_tap_max), initialize=1 +) +model.branch_def = Var( + range(nbranch), + bounds=lambda model, i: (branch[i].def_min, branch[i].def_max), + initialize=lambda model, i: branch[i].def0, +) + def Gself(k): - return bus[k].g_shunt + sum(branch[i].g*model.branch_tap[i]**2 for i in out_lines[k]) + sum(branch[i].g for i in in_lines[k]) + return ( + bus[k].g_shunt + + sum(branch[i].g * model.branch_tap[i] ** 2 for i in out_lines[k]) + + sum(branch[i].g for i in in_lines[k]) + ) + def Gout(i): - return (-branch[i].g*cos(model.branch_def[i])+branch[i].b*sin(model.branch_def[i]))*model.branch_tap[i] + return ( + -branch[i].g * cos(model.branch_def[i]) + branch[i].b * sin(model.branch_def[i]) + ) * model.branch_tap[i] + def Gin(i): - return (-branch[i].g*cos(model.branch_def[i])-branch[i].b*sin(model.branch_def[i]))*model.branch_tap[i] + return ( + -branch[i].g * cos(model.branch_def[i]) - branch[i].b * sin(model.branch_def[i]) + ) * model.branch_tap[i] + def Bself(k): - return model.bus_b_shunt[k] + sum(branch[i].b*model.branch_tap[i]**2 + branch[i].c/2 for i in out_lines[k]) + sum(branch[i].b + branch[i].c/2 for i in in_lines[k]) + return ( + model.bus_b_shunt[k] + + sum( + branch[i].b * model.branch_tap[i] ** 2 + branch[i].c / 2 + for i in out_lines[k] + ) + + sum(branch[i].b + branch[i].c / 2 for i in in_lines[k]) + ) + def Bin(i): - return (branch[i].g*sin(model.branch_def[i])-branch[i].b*cos(model.branch_def[i]))*model.branch_tap[i] + return ( + branch[i].g * sin(model.branch_def[i]) - branch[i].b * cos(model.branch_def[i]) + ) * model.branch_tap[i] + def Bout(i): - return (-branch[i].g*sin(model.branch_def[i])-branch[i].b*cos(model.branch_def[i]))*model.branch_tap[i] - -model.obj = Objective(expr = sum( \ - (bus[k].p_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + \ - Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm])) \ - for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + \ - Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to])) \ - for i in out_lines[k]) + \ - model.bus_voltage[k]**2*Gself(k) )**2 \ - for k in range(nbus) if bus[k].bustype == 2 or bus[k].bustype == 3)) + return ( + -branch[i].g * sin(model.branch_def[i]) - branch[i].b * cos(model.branch_def[i]) + ) * model.branch_tap[i] + + +model.obj = Objective( + expr=sum( + ( + bus[k].p_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Gself(k) + ) + ** 2 + for k in range(nbus) + if bus[k].bustype == 2 or bus[k].bustype == 3 + ) +) + def p_load_rule(model, k): if bus[k].bustype != 0: return Constraint.Skip - - return bus[k].p_gen - bus[k].p_load - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm]) + Bin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to]) + Bout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) - \ - model.bus_voltage[k]**2*Gself(k) == 0 + + return ( + bus[k].p_gen + - bus[k].p_load + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + - model.bus_voltage[k] ** 2 * Gself(k) + == 0 + ) + model.p_load_constr = Constraint(range(nbus), rule=p_load_rule) + def q_load_rule(model, k): if bus[k].bustype != 0: return Constraint.Skip - - return bus[k].q_gen - bus[k].q_load - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm]) - Bin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to]) - Bout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) \ - + model.bus_voltage[k]**2*Bself(k) == 0 + + return ( + bus[k].q_gen + - bus[k].q_load + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + - Bin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + - Bout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Bself(k) + == 0 + ) + model.q_load_constr = Constraint(range(nbus), rule=q_load_rule) + def q_inj_rule(model, k): if not (bus[k].bustype == 2 or bus[k].bustype == 3): return Constraint.Skip - - return (bus[k].q_min, \ - bus[k].q_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm]) - Bin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to]) - Bout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) - \ - model.bus_voltage[k]**2*Bself(k), \ - bus[k].q_max) + + return ( + bus[k].q_min, + bus[k].q_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + - Bin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + - Bout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + - model.bus_voltage[k] ** 2 * Bself(k), + bus[k].q_max, + ) + model.q_inj_rule = Constraint(range(nbus), rule=q_inj_rule) + def p_inj_rule(model, k): if not (bus[k].bustype == 2 or bus[k].bustype == 3): return Constraint.Skip - - return (0, \ - bus[k].p_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm]) + Bin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to]) + Bout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) + \ - model.bus_voltage[k]**2*Gself(k),\ - p_gen_upper*bus[k].p_gen) + + return ( + 0, + bus[k].p_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Gself(k), + p_gen_upper * bus[k].p_gen, + ) + model.p_inj_rule = Constraint(range(nbus), rule=p_inj_rule) diff --git a/examples/performance/jump/opf_662bus.py b/examples/performance/jump/opf_662bus.py index e205287f385..6ff97c577e3 100644 --- a/examples/performance/jump/opf_662bus.py +++ b/examples/performance/jump/opf_662bus.py @@ -1,15 +1,18 @@ from pyomo.environ import * + class Bus: pass + class Branch: pass + bus = [] busmap = {} busfile = open("IEEE662.bus", "r") -for i,line in enumerate(busfile): +for i, line in enumerate(busfile): sp = line.split() b = Bus() busmap[sp[0]] = i @@ -40,7 +43,7 @@ class Branch: branchfile = open("IEEE662.branch", "r") branch = [] -for i,line in enumerate(branchfile): +for i, line in enumerate(branchfile): sp = line.split() b = Branch() b.frm = busmap[sp[1]] @@ -57,14 +60,14 @@ class Branch: b.def_max = float(sp[12]) b.g = b.r / (b.r**2 + b.x**2) b.b = -b.x / (b.r**2 + b.x**2) - b.def_min *= 3.14159/180 - b.def_max *= 3.14159/180 - b.def0 *= -3.14159/180 + b.def_min *= 3.14159 / 180 + b.def_max *= 3.14159 / 180 + b.def0 *= -3.14159 / 180 branch.append(b) -bus_voltage_min = {0 : 0.85, 1 : 0.85, 2 : 0.92, 3 : 0.99} -bus_voltage_max = {0 : 1.15, 1 : 1.15, 2 : 1.08, 3 : 1.01} +bus_voltage_min = {0: 0.85, 1: 0.85, 2: 0.92, 3: 0.99} +bus_voltage_max = {0: 1.15, 1: 1.15, 2: 1.08, 3: 1.01} branch_tap_min = 0.85 branch_tap_max = 1.15 @@ -74,108 +77,244 @@ class Branch: nbus = len(bus) nbranch = len(branch) -in_lines = [ [] for i in range(nbus) ] -out_lines = [ [] for i in range(nbus) ] +in_lines = [[] for i in range(nbus)] +out_lines = [[] for i in range(nbus)] for i in range(nbranch): b = branch[i] out_lines[b.frm].append(i) in_lines[b.to].append(i) - assert(b.to >= 0 and b.to < nbus) + assert b.to >= 0 and b.to < nbus model = ConcreteModel() -model.bus_voltage = Var(range(nbus),bounds = lambda model,i : (bus_voltage_min[bus[i].bustype], bus_voltage_max[bus[i].bustype]), initialize=1) -model.bus_b_shunt = Var(range(nbus),bounds = lambda model,i : (bus[i].b_shunt_min, bus[i].b_shunt_max), initialize = lambda model,i : bus[i].b_shunt0) +model.bus_voltage = Var( + range(nbus), + bounds=lambda model, i: ( + bus_voltage_min[bus[i].bustype], + bus_voltage_max[bus[i].bustype], + ), + initialize=1, +) +model.bus_b_shunt = Var( + range(nbus), + bounds=lambda model, i: (bus[i].b_shunt_min, bus[i].b_shunt_max), + initialize=lambda model, i: bus[i].b_shunt0, +) model.bus_angle = Var(range(nbus), initialize=0) -model.branch_tap = Var(range(nbranch), bounds=(branch_tap_min, branch_tap_max), initialize=1) -model.branch_def = Var(range(nbranch), bounds=lambda model,i: (branch[i].def_min, branch[i].def_max), initialize = lambda model,i : branch[i].def0) +model.branch_tap = Var( + range(nbranch), bounds=(branch_tap_min, branch_tap_max), initialize=1 +) +model.branch_def = Var( + range(nbranch), + bounds=lambda model, i: (branch[i].def_min, branch[i].def_max), + initialize=lambda model, i: branch[i].def0, +) + def Gself(k): - return bus[k].g_shunt + sum(branch[i].g*model.branch_tap[i]**2 for i in out_lines[k]) + sum(branch[i].g for i in in_lines[k]) + return ( + bus[k].g_shunt + + sum(branch[i].g * model.branch_tap[i] ** 2 for i in out_lines[k]) + + sum(branch[i].g for i in in_lines[k]) + ) + def Gout(i): - return (-branch[i].g*cos(model.branch_def[i])+branch[i].b*sin(model.branch_def[i]))*model.branch_tap[i] + return ( + -branch[i].g * cos(model.branch_def[i]) + branch[i].b * sin(model.branch_def[i]) + ) * model.branch_tap[i] + def Gin(i): - return (-branch[i].g*cos(model.branch_def[i])-branch[i].b*sin(model.branch_def[i]))*model.branch_tap[i] + return ( + -branch[i].g * cos(model.branch_def[i]) - branch[i].b * sin(model.branch_def[i]) + ) * model.branch_tap[i] + def Bself(k): - return model.bus_b_shunt[k] + sum(branch[i].b*model.branch_tap[i]**2 + branch[i].c/2 for i in out_lines[k]) + sum(branch[i].b + branch[i].c/2 for i in in_lines[k]) + return ( + model.bus_b_shunt[k] + + sum( + branch[i].b * model.branch_tap[i] ** 2 + branch[i].c / 2 + for i in out_lines[k] + ) + + sum(branch[i].b + branch[i].c / 2 for i in in_lines[k]) + ) + def Bin(i): - return (branch[i].g*sin(model.branch_def[i])-branch[i].b*cos(model.branch_def[i]))*model.branch_tap[i] + return ( + branch[i].g * sin(model.branch_def[i]) - branch[i].b * cos(model.branch_def[i]) + ) * model.branch_tap[i] + def Bout(i): - return (-branch[i].g*sin(model.branch_def[i])-branch[i].b*cos(model.branch_def[i]))*model.branch_tap[i] - -model.obj = Objective(expr = sum( \ - (bus[k].p_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + \ - Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm])) \ - for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + \ - Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to])) \ - for i in out_lines[k]) + \ - model.bus_voltage[k]**2*Gself(k) )**2 \ - for k in range(nbus) if bus[k].bustype == 2 or bus[k].bustype == 3)) + return ( + -branch[i].g * sin(model.branch_def[i]) - branch[i].b * cos(model.branch_def[i]) + ) * model.branch_tap[i] + + +model.obj = Objective( + expr=sum( + ( + bus[k].p_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Gself(k) + ) + ** 2 + for k in range(nbus) + if bus[k].bustype == 2 or bus[k].bustype == 3 + ) +) + def p_load_rule(model, k): if bus[k].bustype != 0: return Constraint.Skip - - return bus[k].p_gen - bus[k].p_load - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm]) + Bin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to]) + Bout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) - \ - model.bus_voltage[k]**2*Gself(k) == 0 + + return ( + bus[k].p_gen + - bus[k].p_load + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + - model.bus_voltage[k] ** 2 * Gself(k) + == 0 + ) + model.p_load_constr = Constraint(range(nbus), rule=p_load_rule) + def q_load_rule(model, k): if bus[k].bustype != 0: return Constraint.Skip - - return bus[k].q_gen - bus[k].q_load - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm]) - Bin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) - \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to]) - Bout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) \ - + model.bus_voltage[k]**2*Bself(k) == 0 + + return ( + bus[k].q_gen + - bus[k].q_load + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + - Bin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + - sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + - Bout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Bself(k) + == 0 + ) + model.q_load_constr = Constraint(range(nbus), rule=q_load_rule) + def q_inj_rule(model, k): if not (bus[k].bustype == 2 or bus[k].bustype == 3): return Constraint.Skip - - return (bus[k].q_min, \ - bus[k].q_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm]) - Bin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to]) - Bout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) - \ - model.bus_voltage[k]**2*Bself(k), \ - bus[k].q_max) + + return ( + bus[k].q_min, + bus[k].q_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + - Bin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + - Bout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + - model.bus_voltage[k] ** 2 * Bself(k), + bus[k].q_max, + ) + model.q_inj_rule = Constraint(range(nbus), rule=q_inj_rule) + def p_inj_rule(model, k): if not (bus[k].bustype == 2 or bus[k].bustype == 3): return Constraint.Skip - - return (0, \ - bus[k].p_load + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].frm] * \ - (Gin(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].frm]) + Bin(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].frm])) for i in in_lines[k]) + \ - sum( model.bus_voltage[k]*model.bus_voltage[branch[i].to] * \ - (Gout(i)*cos(model.bus_angle[k]-model.bus_angle[branch[i].to]) + Bout(i)*sin(model.bus_angle[k]-model.bus_angle[branch[i].to])) for i in out_lines[k]) + \ - model.bus_voltage[k]**2*Gself(k),\ - p_gen_upper*bus[k].p_gen) + + return ( + 0, + bus[k].p_load + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].frm] + * ( + Gin(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + + Bin(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].frm]) + ) + for i in in_lines[k] + ) + + sum( + model.bus_voltage[k] + * model.bus_voltage[branch[i].to] + * ( + Gout(i) * cos(model.bus_angle[k] - model.bus_angle[branch[i].to]) + + Bout(i) * sin(model.bus_angle[k] - model.bus_angle[branch[i].to]) + ) + for i in out_lines[k] + ) + + model.bus_voltage[k] ** 2 * Gself(k), + p_gen_upper * bus[k].p_gen, + ) + model.p_inj_rule = Constraint(range(nbus), rule=p_inj_rule) diff --git a/examples/performance/misc/bilinear1_100.py b/examples/performance/misc/bilinear1_100.py index d6bc3e5cc5e..e68fbba6283 100644 --- a/examples/performance/misc/bilinear1_100.py +++ b/examples/performance/misc/bilinear1_100.py @@ -1,20 +1,21 @@ from pyomo.environ import * -def create_model(N): +def create_model(N): model = ConcreteModel() model.A = RangeSet(N) - model.x = Var(model.A, bounds=(1,2)) + model.x = Var(model.A, bounds=(1, 2)) - expr=0 + expr = 0 for i in model.A: - if not (i+1) in model.A: + if not (i + 1) in model.A: continue - expr += i*(model.x[i]*model.x[i+1]+1) + expr += i * (model.x[i] * model.x[i + 1] + 1) model.obj = Objective(expr=expr) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100) diff --git a/examples/performance/misc/bilinear1_100000.py b/examples/performance/misc/bilinear1_100000.py index 6034cac2931..924d7233d24 100644 --- a/examples/performance/misc/bilinear1_100000.py +++ b/examples/performance/misc/bilinear1_100000.py @@ -1,20 +1,21 @@ from pyomo.environ import * -def create_model(N): +def create_model(N): model = ConcreteModel() model.A = RangeSet(N) - model.x = Var(model.A, bounds=(1,2)) + model.x = Var(model.A, bounds=(1, 2)) - expr=0 + expr = 0 for i in model.A: - if not (i+1) in model.A: + if not (i + 1) in model.A: continue - expr += i*(model.x[i]*model.x[i+1]+1) + expr += i * (model.x[i] * model.x[i + 1] + 1) model.obj = Objective(expr=expr) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100000) diff --git a/examples/performance/misc/bilinear2_100.py b/examples/performance/misc/bilinear2_100.py index 2997631e727..4dd9f9ead57 100644 --- a/examples/performance/misc/bilinear2_100.py +++ b/examples/performance/misc/bilinear2_100.py @@ -1,20 +1,21 @@ from pyomo.environ import * -def create_model(N): +def create_model(N): model = ConcreteModel() model.A = RangeSet(N) - model.x = Var(model.A, bounds=(1,2)) + model.x = Var(model.A, bounds=(1, 2)) with nonlinear_expression as expr: for i in model.A: - if not (i+1) in model.A: + if not (i + 1) in model.A: continue - expr += i*(model.x[i]*model.x[i+1]+1) + expr += i * (model.x[i] * model.x[i + 1] + 1) model.obj = Objective(expr=expr) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100) diff --git a/examples/performance/misc/bilinear2_100000.py b/examples/performance/misc/bilinear2_100000.py index c999ab5f081..90eeaf82271 100644 --- a/examples/performance/misc/bilinear2_100000.py +++ b/examples/performance/misc/bilinear2_100000.py @@ -1,20 +1,21 @@ from pyomo.environ import * -def create_model(N): +def create_model(N): model = ConcreteModel() model.A = RangeSet(N) - model.x = Var(model.A, bounds=(1,2)) + model.x = Var(model.A, bounds=(1, 2)) with nonlinear_expression as expr: for i in model.A: - if not (i+1) in model.A: + if not (i + 1) in model.A: continue - expr += i*(model.x[i]*model.x[i+1]+1) + expr += i * (model.x[i] * model.x[i + 1] + 1) model.obj = Objective(expr=expr) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100000) diff --git a/examples/performance/misc/diag1_100.py b/examples/performance/misc/diag1_100.py index 7f791d18f2d..e47a9179974 100644 --- a/examples/performance/misc/diag1_100.py +++ b/examples/performance/misc/diag1_100.py @@ -1,19 +1,22 @@ from pyomo.environ import * + def create_model(N): model = ConcreteModel() model.A = RangeSet(N) model.x = Var(model.A) - expr=sum(i*model.x[i] for i in model.A) + expr = sum(i * model.x[i] for i in model.A) model.obj = Objective(expr=expr) def c_rule(model, i): - return (N-i+1)*model.x[i] >= N + return (N - i + 1) * model.x[i] >= N + model.c = Constraint(model.A) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100) diff --git a/examples/performance/misc/diag1_100000.py b/examples/performance/misc/diag1_100000.py index c690f375dad..a110c0d9d67 100644 --- a/examples/performance/misc/diag1_100000.py +++ b/examples/performance/misc/diag1_100000.py @@ -1,19 +1,22 @@ from pyomo.environ import * + def create_model(N): model = ConcreteModel() model.A = RangeSet(N) model.x = Var(model.A) - expr=sum(i*model.x[i] for i in model.A) + expr = sum(i * model.x[i] for i in model.A) model.obj = Objective(expr=expr) def c_rule(model, i): - return (N-i+1)*model.x[i] >= N + return (N - i + 1) * model.x[i] >= N + model.c = Constraint(model.A) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100000) diff --git a/examples/performance/misc/diag2_100.py b/examples/performance/misc/diag2_100.py index abe47d625cf..fe820e8590b 100644 --- a/examples/performance/misc/diag2_100.py +++ b/examples/performance/misc/diag2_100.py @@ -1,19 +1,22 @@ from pyomo.environ import * + def create_model(N): model = ConcreteModel() model.A = RangeSet(N) model.x = Var(model.A) - expr = Sum(i*model.x[i] for i in model.A) + expr = Sum(i * model.x[i] for i in model.A) model.obj = Objective(expr=expr) def c_rule(model, i): - return (N-i+1)*model.x[i] >= N + return (N - i + 1) * model.x[i] >= N + model.c = Constraint(model.A) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100) diff --git a/examples/performance/misc/diag2_100000.py b/examples/performance/misc/diag2_100000.py index 100e378caae..38563de57b9 100644 --- a/examples/performance/misc/diag2_100000.py +++ b/examples/performance/misc/diag2_100000.py @@ -1,19 +1,22 @@ from pyomo.environ import * + def create_model(N): model = ConcreteModel() model.A = RangeSet(N) model.x = Var(model.A) - expr = Sum(i*model.x[i] for i in model.A) + expr = Sum(i * model.x[i] for i in model.A) model.obj = Objective(expr=expr) def c_rule(model, i): - return (N-i+1)*model.x[i] >= N + return (N - i + 1) * model.x[i] >= N + model.c = Constraint(model.A) return model + def pyomo_create_model(options=None, model_options=None): return create_model(100000) diff --git a/examples/performance/misc/set1.py b/examples/performance/misc/set1.py index 33c9301a53f..53227a3ee73 100644 --- a/examples/performance/misc/set1.py +++ b/examples/performance/misc/set1.py @@ -4,16 +4,22 @@ model.d = Param(default=10) + def A_rule(model): - return range(0,value(model.d)) + return range(0, value(model.d)) + + model.A = Set() + def B_rule(model): - return range(1,value(model.d)+1) + return range(1, value(model.d) + 1) + + model.B = Set() -if 1>0: - model.X1 = model.A*model.B +if 1 > 0: + model.X1 = model.A * model.B model.X2 = model.A | model.B model.X3 = model.A ^ model.B model.X4 = model.B - model.A @@ -22,17 +28,17 @@ def B_rule(model): model.Y = Set(initialize=model.B - model.A) model.Y.add('foo') -if 1>0: +if 1 > 0: instance = model.create() - print "X1", len(instance.X1) - print instance.X1.data() - print "X2", len(instance.X2) - print instance.X2.data() - print "X3", len(instance.X3) - print instance.X3.data() - print "X4", len(instance.X4) - print instance.X4.data() - print "X5", len(instance.X5) - print instance.X5.data() - - print instance.Y.data() + print("X1", len(instance.X1)) + print(instance.X1.data()) + print("X2", len(instance.X2)) + print(instance.X2.data()) + print("X3", len(instance.X3)) + print(instance.X3.data()) + print("X4", len(instance.X4)) + print(instance.X4.data()) + print("X5", len(instance.X5)) + print(instance.X5.data()) + + print(instance.Y.data()) diff --git a/examples/performance/misc/sparse1.py b/examples/performance/misc/sparse1.py index 0026f9fc94f..264862760f9 100644 --- a/examples/performance/misc/sparse1.py +++ b/examples/performance/misc/sparse1.py @@ -9,13 +9,16 @@ def f(N): M.A = Set(initialize=range(N)) M.x = Var() M.o = Objective(expr=M.x) + def rule(m, i): if i == 3 or i == 5: return M.x >= i return Constraint.Skip + M.c = Constraint(M.A, rule=rule) return M + # # Generation of this model is slow because set M.A is big # diff --git a/examples/performance/pmedian/pmedian1.py b/examples/performance/pmedian/pmedian1.py index dbb883e59bf..3d3f6c5407f 100644 --- a/examples/performance/pmedian/pmedian1.py +++ b/examples/performance/pmedian/pmedian1.py @@ -12,6 +12,7 @@ from pyomo.environ import * + def pyomo_create_model(options=None, model_options=None): import random @@ -21,34 +22,47 @@ def pyomo_create_model(options=None, model_options=None): model.N = Param(within=PositiveIntegers) - model.Locations = RangeSet(1,model.N) + model.Locations = RangeSet(1, model.N) - model.P = Param(within=RangeSet(1,model.N)) + model.P = Param(within=RangeSet(1, model.N)) model.M = Param(within=PositiveIntegers) - model.Customers = RangeSet(1,model.M) + model.Customers = RangeSet(1, model.M) - model.d = Param(model.Locations, model.Customers, initialize=lambda n, m, model : random.uniform(1.0,2.0), within=Reals) + model.d = Param( + model.Locations, + model.Customers, + initialize=lambda n, m, model: random.uniform(1.0, 2.0), + within=Reals, + ) - model.x = Var(model.Locations, model.Customers, bounds=(0.0,1.0), initialize=0.0) + model.x = Var(model.Locations, model.Customers, bounds=(0.0, 1.0), initialize=0.0) model.y = Var(model.Locations, bounds=(0.0, 1.0), initialize=0.0) def rule(model): - return sum( model.d[n,m]*model.x[n,m] for n in model.Locations for m in model.Customers ) + return sum( + model.d[n, m] * model.x[n, m] + for n in model.Locations + for m in model.Customers + ) + model.obj = Objective(rule=rule) def rule(model, m): - return (sum( model.x[n,m] for n in model.Locations ), 1.0) + return (sum(model.x[n, m] for n in model.Locations), 1.0) + model.single_x = Constraint(model.Customers, rule=rule) - def rule(model, n,m): - return (None, model.x[n,m] - model.y[n], 0.0) + def rule(model, n, m): + return (None, model.x[n, m] - model.y[n], 0.0) + model.bound_y = Constraint(model.Locations, model.Customers, rule=rule) def rule(model): - return (sum( model.y[n] for n in model.Locations ) - model.P, 0.0) + return (sum(model.y[n] for n in model.Locations) - model.P, 0.0) + model.num_facilities = Constraint(rule=rule) return model diff --git a/examples/performance/pmedian/pmedian2.py b/examples/performance/pmedian/pmedian2.py index 006bbf317ff..434ded6dcbc 100644 --- a/examples/performance/pmedian/pmedian2.py +++ b/examples/performance/pmedian/pmedian2.py @@ -12,6 +12,7 @@ from pyomo.environ import * + def pyomo_create_model(options=None, model_options=None): import random @@ -21,34 +22,47 @@ def pyomo_create_model(options=None, model_options=None): model.N = Param(within=PositiveIntegers) - model.Locations = RangeSet(1,model.N) + model.Locations = RangeSet(1, model.N) - model.P = Param(within=RangeSet(1,model.N)) + model.P = Param(within=RangeSet(1, model.N)) model.M = Param(within=PositiveIntegers) - model.Customers = RangeSet(1,model.M) + model.Customers = RangeSet(1, model.M) - model.d = Param(model.Locations, model.Customers, initialize=lambda n, m, model : random.uniform(1.0,2.0), within=Reals) + model.d = Param( + model.Locations, + model.Customers, + initialize=lambda n, m, model: random.uniform(1.0, 2.0), + within=Reals, + ) - model.x = Var(model.Locations, model.Customers, bounds=(0.0,1.0), initialize=0.0) + model.x = Var(model.Locations, model.Customers, bounds=(0.0, 1.0), initialize=0.0) model.y = Var(model.Locations, bounds=(0.0, 1.0), initialize=0.0) def rule(model): - return Sum(model.d[n,m]*model.x[n,m] for n in model.Locations for m in model.Customers) + return Sum( + model.d[n, m] * model.x[n, m] + for n in model.Locations + for m in model.Customers + ) + model.obj = Objective(rule=rule) def rule(model, m): - return (Sum(model.x[n,m] for n in model.Locations), 1.0) + return (Sum(model.x[n, m] for n in model.Locations), 1.0) + model.single_x = Constraint(model.Customers, rule=rule) - def rule(model, n,m): - return (None, model.x[n,m] - model.y[n], 0.0) + def rule(model, n, m): + return (None, model.x[n, m] - model.y[n], 0.0) + model.bound_y = Constraint(model.Locations, model.Customers, rule=rule) def rule(model): return (Sum(model.y[n] for n in model.Locations) - model.P, 0.0) + model.num_facilities = Constraint(rule=rule) return model diff --git a/examples/pyomo/amplbook2/diet.py b/examples/pyomo/amplbook2/diet.py index 3b1cb313adc..8cdffefa20f 100644 --- a/examples/pyomo/amplbook2/diet.py +++ b/examples/pyomo/amplbook2/diet.py @@ -27,29 +27,44 @@ model.f_min = Param(model.FOOD, within=NonNegativeReals) -def f_max_valid (model, value, j): + +def f_max_valid(model, value, j): return value > model.f_min[j] + + model.f_max = Param(model.FOOD, validate=f_max_valid) model.n_min = Param(model.NUTR, within=NonNegativeReals) + def paramn_max(model, value, i): return value > model.n_min[i] + + model.n_max = Param(model.NUTR, validate=paramn_max) model.amt = Param(model.NUTR, model.FOOD, within=NonNegativeReals) + def Buy_bounds(model, i): - return (model.f_min[i],model.f_max[i]) + return (model.f_min[i], model.f_max[i]) + + model.Buy = Var(model.FOOD, bounds=Buy_bounds) + def Objective_rule(model): return sum_product(model.cost, model.Buy) + + model.totalcost = Objective(rule=Objective_rule) + def Diet_rule(model, i): expr = 0 for j in model.FOOD: - expr = expr + model.amt[i,j] * model.Buy[j] + expr = expr + model.amt[i, j] * model.Buy[j] return (model.n_min[i], expr, model.n_max[i]) + + model.Diet = Constraint(model.NUTR, rule=Diet_rule) diff --git a/examples/pyomo/amplbook2/dieti.py b/examples/pyomo/amplbook2/dieti.py index 261ece5da24..0934dcf83c6 100644 --- a/examples/pyomo/amplbook2/dieti.py +++ b/examples/pyomo/amplbook2/dieti.py @@ -27,29 +27,44 @@ model.f_min = Param(model.FOOD, within=NonNegativeReals) -def f_max_valid (model, value, j): + +def f_max_valid(model, value, j): return value > model.f_min[j] + + model.f_max = Param(model.FOOD, validate=f_max_valid) model.n_min = Param(model.NUTR, within=NonNegativeReals) -def paramn_max (model, value, i): + +def paramn_max(model, value, i): return value > model.n_min[i] + + model.n_max = Param(model.NUTR, validate=paramn_max) model.amt = Param(model.NUTR, model.FOOD, within=NonNegativeReals) -def Buy_bounds(model,i): - return (model.f_min[i],model.f_max[i]) + +def Buy_bounds(model, i): + return (model.f_min[i], model.f_max[i]) + + model.Buy = Var(model.FOOD, bounds=Buy_bounds, domain=Integers) + def Objective_rule(model): return sum_product(model.cost, model.Buy) + + model.totalcost = Objective(rule=Objective_rule) + def Diet_rule(model, i): expr = 0 for j in model.FOOD: - expr = expr + model.amt[i,j] * model.Buy[j] + expr = expr + model.amt[i, j] * model.Buy[j] return (model.n_min[i], expr, model.n_max[i]) + + model.Diet = Constraint(model.NUTR, rule=Diet_rule) diff --git a/examples/pyomo/amplbook2/econ2min.py b/examples/pyomo/amplbook2/econ2min.py index 4022f6559ce..0d27df780bb 100644 --- a/examples/pyomo/amplbook2/econ2min.py +++ b/examples/pyomo/amplbook2/econ2min.py @@ -28,31 +28,55 @@ # *********************************** -model.cost = Param(model.ACT, within=PositiveReals, doc='cost per unit of each activity') +model.cost = Param( + model.ACT, within=PositiveReals, doc='cost per unit of each activity' +) -model.demand = Param(model.PROD, within=NonNegativeReals, doc='units of demand for each product') +model.demand = Param( + model.PROD, within=NonNegativeReals, doc='units of demand for each product' +) -model.io = Param(model.PROD, model.ACT, within=NonNegativeReals, doc='units of each product from 1 unit of each activity') +model.io = Param( + model.PROD, + model.ACT, + within=NonNegativeReals, + doc='units of each product from 1 unit of each activity', +) -model.level_min = Param(model.ACT, within=NonNegativeReals, doc='min allowed level for each activity') +model.level_min = Param( + model.ACT, within=NonNegativeReals, doc='min allowed level for each activity' +) -model.level_max = Param(model.ACT, within=NonNegativeReals, doc='max allowed level for each activity') +model.level_max = Param( + model.ACT, within=NonNegativeReals, doc='max allowed level for each activity' +) # *********************************** + def Level_bounds(model, i): return (model.level_min[i], model.level_max[i]) + + model.Level = Var(model.ACT, bounds=Level_bounds, doc='level for each activity') # *********************************** + def Total_Cost_rule(model): return sum_product(model.cost, model.Level) + + model.Total_Cost = Objective(rule=Total_Cost_rule, doc='minimize total cost') + def Demand_rule(model, i): expr = 0 for j in model.ACT: - expr += model.io[i,j] * model.Level[j] + expr += model.io[i, j] * model.Level[j] return model.demand[i] < expr -model.Demand = Constraint(model.PROD, rule=Demand_rule, doc='total level for each activity exceeds demand') + + +model.Demand = Constraint( + model.PROD, rule=Demand_rule, doc='total level for each activity exceeds demand' +) diff --git a/examples/pyomo/amplbook2/econmin.py b/examples/pyomo/amplbook2/econmin.py index 173e58d8e11..84e41107ff2 100644 --- a/examples/pyomo/amplbook2/econmin.py +++ b/examples/pyomo/amplbook2/econmin.py @@ -39,13 +39,19 @@ # *********************************** + def Total_Cost_rule(model): return sum_product(model.cost, model.Level) + + model.Total_Cost = Objective(rule=Total_Cost_rule) + def Demand_rule(model, i): expr = 0 for j in model.ACT: - expr += model.io[i,j] * model.Level[j] + expr += model.io[i, j] * model.Level[j] return expr > model.demand[i] + + model.Demand = Constraint(model.PROD, rule=Demand_rule) diff --git a/examples/pyomo/amplbook2/prod.py b/examples/pyomo/amplbook2/prod.py index 066f6a3b113..74e456e013f 100644 --- a/examples/pyomo/amplbook2/prod.py +++ b/examples/pyomo/amplbook2/prod.py @@ -27,17 +27,26 @@ # Variables model.X = Var(model.P) + # Objective def Objective_rule(model): - return sum([model.c[j]*model.X[j] for j in model.P]) + return sum([model.c[j] * model.X[j] for j in model.P]) + + model.Total_Profit = Objective(rule=Objective_rule, sense=maximize) + # Time Constraint def Time_rule(model): return sum_product(model.X, denom=model.a) <= model.b + + model.Time = Constraint(rule=Time_rule) + # Limit Constraint def Limit_rule(model, j): return (0, model.X[j], model.u[j]) + + model.Limit = Constraint(model.P, rule=Limit_rule) diff --git a/examples/pyomo/amplbook2/steel.py b/examples/pyomo/amplbook2/steel.py index 2fe276220e5..43bea775526 100644 --- a/examples/pyomo/amplbook2/steel.py +++ b/examples/pyomo/amplbook2/steel.py @@ -22,7 +22,7 @@ model.PROD = Set() -model.rate = Param(model.PROD,within=PositiveReals) +model.rate = Param(model.PROD, within=PositiveReals) model.avail = Param(within=NonNegativeReals) @@ -30,20 +30,30 @@ model.market = Param(model.PROD, within=NonNegativeReals) -def Make_bounds(model,i): - return (0,model.market[i]) + +def Make_bounds(model, i): + return (0, model.market[i]) + + model.Make = Var(model.PROD, bounds=Make_bounds) + def Objective_rule(model): return sum_product(model.profit, model.Make) + + model.Total_Profit = Objective(rule=Objective_rule, sense=maximize) + def Time_rule(model): ans = 0 for p in model.PROD: - ans = ans + (1.0/model.rate[p]) * model.Make[p] + ans = ans + (1.0 / model.rate[p]) * model.Make[p] return ans < model.avail + def XTime_rule(model): - return sum_product(model.Make, denom=(model.rate,) ) < model.avail -#model.Time = Constraint(rule=Time_rule) + return sum_product(model.Make, denom=(model.rate,)) < model.avail + + +# model.Time = Constraint(rule=Time_rule) diff --git a/examples/pyomo/amplbook2/steel3.py b/examples/pyomo/amplbook2/steel3.py index f0681a4723b..e9e494b6a1a 100644 --- a/examples/pyomo/amplbook2/steel3.py +++ b/examples/pyomo/amplbook2/steel3.py @@ -32,14 +32,23 @@ model.market = Param(model.PROD, within=NonNegativeReals) + def Make_bounds(model, i): - return (model.commit[i],model.market[i]) + return (model.commit[i], model.market[i]) + + model.Make = Var(model.PROD, bounds=Make_bounds) + def Objective_rule(model): return sum_product(model.profit, model.Make) + + model.totalprofit = Objective(rule=Objective_rule, sense=maximize) + def Time_rule(model): return sum_product(model.Make, denom=(model.rate)) < model.avail + + model.Time = Constraint(rule=Time_rule) diff --git a/examples/pyomo/amplbook2/steel4.py b/examples/pyomo/amplbook2/steel4.py index d88c5210128..b6709e478e9 100644 --- a/examples/pyomo/amplbook2/steel4.py +++ b/examples/pyomo/amplbook2/steel4.py @@ -34,17 +34,26 @@ model.market = Param(model.PROD, within=NonNegativeReals) -def Make_bounds(model,i): - return (model.commit[i],model.market[i]) + +def Make_bounds(model, i): + return (model.commit[i], model.market[i]) + + model.Make = Var(model.PROD, bounds=Make_bounds) -def Objective_rule (model): + +def Objective_rule(model): return sum_product(model.profit, model.Make) + + model.Total_Profit = Objective(rule=Objective_rule, sense=maximize) + def Timelim_rule(model, s): timeexpr = 0 for p in model.PROD: - timeexpr = timeexpr + (1.0/model.rate[p,s]) * model.Make[p] + timeexpr = timeexpr + (1.0 / model.rate[p, s]) * model.Make[p] return timeexpr < model.avail[s] + + model.Time = Constraint(model.STAGE, rule=Timelim_rule) diff --git a/examples/pyomo/benders/master.py b/examples/pyomo/benders/master.py index 69aa55b67e3..a457bf28b06 100644 --- a/examples/pyomo/benders/master.py +++ b/examples/pyomo/benders/master.py @@ -54,8 +54,11 @@ # projected revenue/ton, per scenario. model.revenue = Param(model.PROD, model.WEEKS, model.SCEN, within=NonNegativeReals) + def prob_validator(model, value, s): return (value >= 0) and (value <= 1.0) + + model.prob = Param(model.SCEN, validate=prob_validator) ############################################################################## @@ -64,8 +67,11 @@ def prob_validator(model, value, s): model.Inv1 = Var(model.PROD, within=NonNegativeReals) + def sell_bounds(model, p): return (0, model.market[p, 1]) + + model.Sell1 = Var(model.PROD, within=NonNegativeReals, bounds=sell_bounds) model.Min_Stage2_Profit = Var(within=NonNegativeReals) @@ -74,32 +80,52 @@ def sell_bounds(model, p): model.CUTS = Set(within=PositiveIntegers, ordered=True) -model.time_price = Param(model.TWOPLUSWEEKS, model.SCEN, \ - model.CUTS, default=0.0, mutable=True) +model.time_price = Param( + model.TWOPLUSWEEKS, model.SCEN, model.CUTS, default=0.0, mutable=True +) + +model.bal2_price = Param(model.PROD, model.SCEN, model.CUTS, default=0.0, mutable=True) -model.bal2_price = Param(model.PROD, model.SCEN, model.CUTS, \ - default=0.0, mutable=True) +model.sell_lim_price = Param( + model.PROD, model.TWOPLUSWEEKS, model.SCEN, model.CUTS, default=0.0, mutable=True +) -model.sell_lim_price = Param(model.PROD, model.TWOPLUSWEEKS, \ - model.SCEN, model.CUTS, \ - default=0.0, mutable=True) def time1_rule(model): - return (None, sum([1.0 / model.rate[p] * model.Make1[p] for p in model.PROD]) - model.avail[1], 0.0) + return ( + None, + sum([1.0 / model.rate[p] * model.Make1[p] for p in model.PROD]) + - model.avail[1], + 0.0, + ) + + model.Time1 = Constraint(rule=time1_rule) + def balance1_rule(model, p): - return model.Make1[p] + model.inv0[p] - (model.Sell1[p] + model.Inv1[p]) == 0.0 + return model.Make1[p] + model.inv0[p] - (model.Sell1[p] + model.Inv1[p]) == 0.0 + + model.Balance1 = Constraint(model.PROD, rule=balance1_rule) # cuts are generated on-the-fly, so no rules are necessary. model.Cut_Defn = ConstraintList() + def expected_profit_rule(model): - return sum([model.prob[s] * model.revenue[p, 1, s] * model.Sell1[p] - \ - model.prob[s] * model.prodcost[p] * model.Make1[p] - \ - model.prob[s] * model.invcost[p] * model.Inv1[p] \ - for p in model.PROD for s in model.SCEN]) + \ - model.Min_Stage2_Profit + return ( + sum( + [ + model.prob[s] * model.revenue[p, 1, s] * model.Sell1[p] + - model.prob[s] * model.prodcost[p] * model.Make1[p] + - model.prob[s] * model.invcost[p] * model.Inv1[p] + for p in model.PROD + for s in model.SCEN + ] + ) + + model.Min_Stage2_Profit + ) + model.Expected_Profit = Objective(rule=expected_profit_rule, sense=maximize) diff --git a/examples/pyomo/benders/subproblem.py b/examples/pyomo/benders/subproblem.py index 089ccb7f065..886f71ff321 100644 --- a/examples/pyomo/benders/subproblem.py +++ b/examples/pyomo/benders/subproblem.py @@ -28,17 +28,26 @@ # number of weeks model.T = Param(within=PositiveIntegers) + # derived set containing all valid week indices and subsets of interest. def weeks_rule(model): return list(sequence(model.T())) + + model.WEEKS = Set(initialize=weeks_rule, within=PositiveIntegers) + def two_plus_weeks_rule(model): return list(sequence(2, model.T())) + + model.TWOPLUSWEEKS = Set(initialize=two_plus_weeks_rule, within=PositiveIntegers) + def three_plus_weeks_rule(model): return list(sequence(3, model.T())) + + model.THREEPLUSWEEKS = Set(initialize=three_plus_weeks_rule, within=PositiveIntegers) # tons per hour produced @@ -59,9 +68,12 @@ def three_plus_weeks_rule(model): # projected revenue/ton model.revenue = Param(model.PROD, model.WEEKS, within=NonNegativeReals) + # scenario probability def unit_interval_validate(model, value): return (value >= 0.0) and (value <= 1.0) + + model.prob = Param(validate=unit_interval_validate) # inventory at end of first period. @@ -73,28 +85,58 @@ def unit_interval_validate(model, value): # tons inventoried model.Inv = Var(model.PROD, model.TWOPLUSWEEKS, domain=NonNegativeReals) + # tons sold def sell_bounds(model, p, t): return (0, model.market[p, t]) -model.Sell = Var(model.PROD, model.TWOPLUSWEEKS, within=NonNegativeReals, bounds=sell_bounds) + + +model.Sell = Var( + model.PROD, model.TWOPLUSWEEKS, within=NonNegativeReals, bounds=sell_bounds +) + def time_rule(model, t): - return sum([(1.0 / model.rate[p]) * model.Make[p, t] for p in model.PROD]) - model.avail[t] <= 0.0 + return ( + sum([(1.0 / model.rate[p]) * model.Make[p, t] for p in model.PROD]) + - model.avail[t] + <= 0.0 + ) + + model.Time = Constraint(model.TWOPLUSWEEKS, rule=time_rule) + def balance2_rule(model, p): - return (model.Make[p, 2] + model.inv1[p]) - (model.Sell[p, 2] + model.Inv[p, 2]) == 0.0 + return (model.Make[p, 2] + model.inv1[p]) - ( + model.Sell[p, 2] + model.Inv[p, 2] + ) == 0.0 + + model.Balance2 = Constraint(model.PROD, rule=balance2_rule) + def balance_rule(model, p, t): - return (model.Make[p, t] + model.Inv[p, t-1]) - (model.Sell[p, t] + model.Inv[p, t]) == 0.0 + return (model.Make[p, t] + model.Inv[p, t - 1]) - ( + model.Sell[p, t] + model.Inv[p, t] + ) == 0.0 + + model.Balance = Constraint(model.PROD, model.THREEPLUSWEEKS, rule=balance_rule) + # the manual distribution of model.prob is ugly, but at the moment necessary; Pyomo # expression simplification will be significantly improved in the near-term future. def exp_stage2_profit_rule(model): - return sum([model.prob * model.revenue[p, t] * model.Sell[p, t] - \ - model.prob * model.prodcost[p] * model.Make[p, t] - \ - model.prob * model.invcost[p] * model.Inv[p, t] \ - for p in model.PROD for t in model.TWOPLUSWEEKS]) + return sum( + [ + model.prob * model.revenue[p, t] * model.Sell[p, t] + - model.prob * model.prodcost[p] * model.Make[p, t] + - model.prob * model.invcost[p] * model.Inv[p, t] + for p in model.PROD + for t in model.TWOPLUSWEEKS + ] + ) + + model.Exp_Stage2_Profit = Objective(rule=exp_stage2_profit_rule, sense=maximize) diff --git a/examples/pyomo/callbacks/sc.py b/examples/pyomo/callbacks/sc.py index b6477c1bc29..ce32b0a1074 100644 --- a/examples/pyomo/callbacks/sc.py +++ b/examples/pyomo/callbacks/sc.py @@ -14,8 +14,9 @@ import math import random -def print_model_stats(options,model): - print("-"*40) + +def print_model_stats(options, model): + print("-" * 40) if options is None: print("DEFAULT") else: @@ -26,22 +27,23 @@ def print_model_stats(options,model): colc = {} for i in model.J: colc[i] = 0 - for (i,j) in model.S: - rowc[i] += 1 - colc[j] += 1 + for i, j in model.S: + rowc[i] += 1 + colc[j] += 1 print("Row Counts") s = 0.0 for i in sorted(rowc): s += rowc[i] - print("Average: %s" % str(s/len(rowc))) + print("Average: %s" % str(s / len(rowc))) print("Col Counts") s = 0.0 for i in sorted(colc): s += colc[i] - print("Average: %s" % str(s/len(colc))) + print("Average: %s" % str(s / len(colc))) print("I %d" % len(model.I)) print("J %d" % len(model.J)) - print("-"*40) + print("-" * 40) + def pyomo_create_model(options=None, model_options=None): if model_options is None: @@ -70,15 +72,17 @@ def pyomo_create_model(options=None, model_options=None): p = int(math.ceil(m * 0.7)) else: p = int(math.ceil(m * model_options.rho)) + # def S_rule(model): ans = set() - for j in range(1,n+1): - tmp = list(range(1,m+1)) - random.shuffle( tmp ) - for i in range(0,p): - ans.add( (tmp[i], j) ) + for j in range(1, n + 1): + tmp = list(range(1, m + 1)) + random.shuffle(tmp) + for i in range(0, p): + ans.add((tmp[i], j)) return ans + elif model_options.type == 'fixed_element_coverage': # # p - fixed number of sets that cover each element @@ -90,41 +94,47 @@ def S_rule(model): p = int(math.ceil(n * 0.4)) else: p = int(math.ceil(n * model_options.rho)) + # def S_rule(model): ans = set() - for i in range(1,m+1): - tmp = list(range(1,n+1)) - random.shuffle( tmp ) - for j in range(0,p): - ans.add( (i, tmp[j]) ) + for i in range(1, m + 1): + tmp = list(range(1, n + 1)) + random.shuffle(tmp) + for j in range(0, p): + ans.add((i, tmp[j])) return ans + elif model_options.type == 'fixed_probability': # # rho - probability of selecting element for a set # rho = 0.3 if model_options.rho is None else model_options.rho + # def S_rule(model): ans = set() - for j in range(1,n+1): - for i in range(1,m+1): - if random.uniform(0,1) < rho: - ans.add( (i, j) ) + for j in range(1, n + 1): + for i in range(1, m + 1): + if random.uniform(0, 1) < rho: + ans.add((i, j)) return ans + elif model_options.type == 'fixed_fill': # # rho - |S|/(I*J) # rho = 0.3 if model_options.rho is None else model_options.rho + # def S_rule(model): ans = set() - for j in range(1,n+1): - for i in range(1,m+1): - if random.uniform(0,1) < rho: - ans.add( (i, j) ) + for j in range(1, n + 1): + for i in range(1, m + 1): + if random.uniform(0, 1) < rho: + ans.add((i, j)) return ans + # # CREATE MODEL # @@ -133,15 +143,19 @@ def S_rule(model): # (i,j) in S if element i in set j # model.S = Set(dimen=2, initialize=S_rule) + # # Dynamically create the I and J index sets, since # some rows or columns of S may not be populated. # def I_rule(model): - return set((i for (i,j) in model.S)) + return set((i for (i, j) in model.S)) + model.I = Set(initialize=I_rule) + def J_rule(model): - return set((j for (i,j) in model.S)) + return set((j for (i, j) in model.S)) + model.J = Set(initialize=J_rule) # # Weights @@ -151,11 +165,13 @@ def J_rule(model): # Set selection binary variables # model.x = Var(model.J, within=Binary) + # # Objective # def cost_rule(model): return sum_product(model.w, model.x) + model.cost = Objective(rule=cost_rule) # @@ -164,23 +180,26 @@ def cost_rule(model): def cover_rule(model, i): expr = 0 for j in model.x: - if (i,j) in model.S: + if (i, j) in model.S: expr += model.x[j] # # WEH - this check is not needed, since I is constructed dynamically # - #if expr is 0: - #return Constraint.Skip + # if expr is 0: + # return Constraint.Skip return expr >= 1 + model.cover = Constraint(model.I, rule=cover_rule) # print_model_stats(model_options, model) return model + def test_model(options=None): model = pyomo_create_model(model_options=options) - #print_model_stats(options, model) + # print_model_stats(options, model) + if __name__ == '__main__': test_model() @@ -209,4 +228,3 @@ def test_model(options=None): options.rho = 0.1 test_model(options) # - diff --git a/examples/pyomo/callbacks/sc_callback.py b/examples/pyomo/callbacks/sc_callback.py index e43537215bf..0dae9e1befc 100644 --- a/examples/pyomo/callbacks/sc_callback.py +++ b/examples/pyomo/callbacks/sc_callback.py @@ -13,16 +13,17 @@ from pyomo.core import * from sc import * + @pyomo_callback('solve-callback') def solve_callback(solver, model): - print "CB-Solve" + print("CB-Solve") + @pyomo_callback('cut-callback') def cut_callback(solver, model): - print "CB-Cut" + print("CB-Cut") + @pyomo_callback('node-callback') def node_callback(solver, model): - print "CB-Node" - - + print("CB-Node") diff --git a/examples/pyomo/callbacks/sc_script.py b/examples/pyomo/callbacks/sc_script.py index 946b38dfb02..8e4ade21b51 100644 --- a/examples/pyomo/callbacks/sc_script.py +++ b/examples/pyomo/callbacks/sc_script.py @@ -14,14 +14,21 @@ from pyomo.core import * import sc + model = sc.pyomo_create_model() + def solve_callback(solver, model): - print "CB-Solve" + print("CB-Solve") + + def cut_callback(solver, model): - print "CB-Cut" + print("CB-Cut") + + def node_callback(solver, model): - print "CB-Node" + print("CB-Node") + instance = model.create() @@ -31,5 +38,4 @@ def node_callback(solver, model): opt.set_callback('solve-callback', solve_callback) results = opt.solve(instance, tee=True) -print results - +print(results) diff --git a/examples/pyomo/callbacks/scalability/run.py b/examples/pyomo/callbacks/scalability/run.py index bc5a891ccb9..8465e3f5019 100644 --- a/examples/pyomo/callbacks/scalability/run.py +++ b/examples/pyomo/callbacks/scalability/run.py @@ -15,8 +15,8 @@ random.seed(2384792387) -nsets = [i*1000 for i in range(1,8,4)] -nelts = [i*1000 for i in range(1,61,10)] +nsets = [i * 1000 for i in range(1, 8, 4)] +nelts = [i * 1000 for i in range(1, 61, 10)] seeds = [random.getrandbits(32) for i in range(10)] for seed in seeds: @@ -25,7 +25,14 @@ for m in nsets: for n in nelts: fname = 'scover_%d_%d_%d' % (n, m, seed) - print 'fname',fname - pyomo.scripting.convert.pyomo2lp(args=['--model-options','n=%d m=%d seed=%d type=fixed_element_coverage rho=0.1' % (n,m,seed), '--save-model','%s.lp' % fname, os.path.abspath('../sc.py')]) - - + print('fname', fname) + pyomo.scripting.convert.pyomo2lp( + args=[ + '--model-options', + 'n=%d m=%d seed=%d type=fixed_element_coverage rho=0.1' + % (n, m, seed), + '--save-model', + '%s.lp' % fname, + os.path.abspath('../sc.py'), + ] + ) diff --git a/examples/pyomo/callbacks/tsp.py b/examples/pyomo/callbacks/tsp.py index c1d214cca06..d3e28a98d3f 100644 --- a/examples/pyomo/callbacks/tsp.py +++ b/examples/pyomo/callbacks/tsp.py @@ -16,6 +16,7 @@ import math from pyomo.core import * + def pyomo_create_model(options=None, model_options=None): model = ConcreteModel() # @@ -40,27 +41,37 @@ def pyomo_create_model(options=None, model_options=None): model.N = Param(within=PositiveIntegers, initialize=N) # # Index set for points - model.POINTS = RangeSet(1,model.N) + model.POINTS = RangeSet(1, model.N) + # # (x,y) location def x_rule(model, i): - return x[i-1] + return x[i - 1] + model.x = Param(model.POINTS) + def y_rule(model, i): - return y[i-1] + return y[i - 1] + model.y = Param(model.POINTS) + # # Derived data # # # All points are connected def LINKS_rule(model): - return set([(i,j) for i in model.POINTS for j in model.POINTS if i= quantity + cs.demand[width] = ( + initial_patterns[i + 1][width] * cs.pattern[i + 1] >= quantity + ) cs.obj = pyo.Objective(expr=pyo.quicksum(cs.pattern.values()), sense=pyo.minimize) @@ -70,19 +73,23 @@ def create_base_cutting_stock(demand, W): ks.widths = pyo.Var(demand.keys(), within=pyo.NonNegativeIntegers) - ks.knapsack = pyo.Constraint(expr=pyo.quicksum(width*ks.widths[width] for width in demand) <= W) + ks.knapsack = pyo.Constraint( + expr=pyo.quicksum(width * ks.widths[width] for width in demand) <= W + ) # blank objective, set by the dual values of cs ks.obj = pyo.Objective(expr=0, sense=pyo.maximize) return cs, ks, initial_patterns -def solve_cutting_stock(demand, W, solver, iterations=30): +def solve_cutting_stock(demand, W, solver, iterations=30): cs, ks, patterns = create_base_cutting_stock(demand, W) if '_persistent' not in solver: - raise RuntimeError('solver must be a string for pyo.SolverFactory and persistent') + raise RuntimeError( + 'solver must be a string for pyo.SolverFactory and persistent' + ) cs_s = pyo.SolverFactory(solver) ks_s = pyo.SolverFactory(solver) @@ -91,12 +98,11 @@ def solve_cutting_stock(demand, W, solver, iterations=30): ks_s.set_instance(ks) for _ in range(iterations): - cs_s.solve() - duals = { width : cs.dual[cs.demand[width]] for width in demand } + duals = {width: cs.dual[cs.demand[width]] for width in demand} - ks.obj.expr = sum(duals[width]*ks.widths[width] for width in demand) + ks.obj.expr = sum(duals[width] * ks.widths[width] for width in demand) ks_s.set_objective(ks.obj) @@ -108,7 +114,7 @@ def solve_cutting_stock(demand, W, solver, iterations=30): # else we'll add the column from ks new_pattern_var = cs.pattern.add() - np_widths = [] + np_widths = [] np_constraints = [] pattern = dict() @@ -123,7 +129,7 @@ def solve_cutting_stock(demand, W, solver, iterations=30): patterns[len(cs.pattern)] = pattern - cs_s.add_column(cs, new_pattern_var, 1., np_constraints, np_widths) + cs_s.add_column(cs, new_pattern_var, 1.0, np_constraints, np_widths) # heuristically solve the cutting stock problem with integer restrictions # to get an integer feasible solution @@ -137,18 +143,20 @@ def solve_cutting_stock(demand, W, solver, iterations=30): return cs, patterns + if __name__ == '__main__': import sys + solver = sys.argv[1] cs, patterns = solve_cutting_stock(demand, W, solver) - print('Sheets Required: '+str(int(pyo.value(cs.obj)))) + print('Sheets Required: ' + str(int(pyo.value(cs.obj)))) print('Repetition\tPattern') for idx, var in cs.pattern.items(): quantity = int(pyo.value(var)) if quantity > 0: - print_str = str(quantity)+'\t\t' + print_str = str(quantity) + '\t\t' for width, number in patterns[idx].items(): - print_str += str(int(number))+':'+str(int(width))+', ' + print_str += str(int(number)) + ':' + str(int(width)) + ', ' print(print_str[:-2]) diff --git a/examples/pyomo/concrete/Whiskas.py b/examples/pyomo/concrete/Whiskas.py index 8ae0b03b891..9bc8dd87e9d 100644 --- a/examples/pyomo/concrete/Whiskas.py +++ b/examples/pyomo/concrete/Whiskas.py @@ -16,53 +16,82 @@ Ingredients = ['CHICKEN', 'BEEF', 'MUTTON', 'RICE', 'WHEAT', 'GEL'] # A dictionary of the costs of each of the Ingredients is created -costs = {'CHICKEN': 0.013, - 'BEEF': 0.008, - 'MUTTON': 0.010, - 'RICE': 0.002, - 'WHEAT': 0.005, - 'GEL': 0.001} +costs = { + 'CHICKEN': 0.013, + 'BEEF': 0.008, + 'MUTTON': 0.010, + 'RICE': 0.002, + 'WHEAT': 0.005, + 'GEL': 0.001, +} # A dictionary of the protein percent in each of the Ingredients is created -proteinPercent = {'CHICKEN': 0.100, - 'BEEF': 0.200, - 'MUTTON': 0.150, - 'RICE': 0.000, - 'WHEAT': 0.040, - 'GEL': 0.000} +proteinPercent = { + 'CHICKEN': 0.100, + 'BEEF': 0.200, + 'MUTTON': 0.150, + 'RICE': 0.000, + 'WHEAT': 0.040, + 'GEL': 0.000, +} # A dictionary of the fat percent in each of the Ingredients is created -fatPercent = {'CHICKEN': 0.080, - 'BEEF': 0.100, - 'MUTTON': 0.110, - 'RICE': 0.010, - 'WHEAT': 0.010, - 'GEL': 0.000} +fatPercent = { + 'CHICKEN': 0.080, + 'BEEF': 0.100, + 'MUTTON': 0.110, + 'RICE': 0.010, + 'WHEAT': 0.010, + 'GEL': 0.000, +} # A dictionary of the fibre percent in each of the Ingredients is created -fibrePercent = {'CHICKEN': 0.001, - 'BEEF': 0.005, - 'MUTTON': 0.003, - 'RICE': 0.100, - 'WHEAT': 0.150, - 'GEL': 0.000} +fibrePercent = { + 'CHICKEN': 0.001, + 'BEEF': 0.005, + 'MUTTON': 0.003, + 'RICE': 0.100, + 'WHEAT': 0.150, + 'GEL': 0.000, +} # A dictionary of the salt percent in each of the Ingredients is created -saltPercent = {'CHICKEN': 0.002, - 'BEEF': 0.005, - 'MUTTON': 0.007, - 'RICE': 0.002, - 'WHEAT': 0.008, - 'GEL': 0.000} +saltPercent = { + 'CHICKEN': 0.002, + 'BEEF': 0.005, + 'MUTTON': 0.007, + 'RICE': 0.002, + 'WHEAT': 0.008, + 'GEL': 0.000, +} model = ConcreteModel(name="The Whiskas Problem") -model.ingredient_vars = Var(Ingredients, bounds=(0,None), doc="The amount of each ingredient that is used") +model.ingredient_vars = Var( + Ingredients, bounds=(0, None), doc="The amount of each ingredient that is used" +) -model.obj = Objective(expr=sum(costs[i]*model.ingredient_vars[i] for i in Ingredients), doc="Total Cost of Ingredients per can") +model.obj = Objective( + expr=sum(costs[i] * model.ingredient_vars[i] for i in Ingredients), + doc="Total Cost of Ingredients per can", +) -model.c0 = Constraint(expr=sum(model.ingredient_vars[i] for i in Ingredients) == 100, doc="PercentagesSum") -model.c1 = Constraint(expr=sum(proteinPercent[i] * model.ingredient_vars[i] for i in Ingredients) >= 8.0, doc="ProteinRequirement") -model.c2 = Constraint(expr=sum(fatPercent[i] * model.ingredient_vars[i] for i in Ingredients) >= 6.0, doc="FatRequirement") -model.c3 = Constraint(expr=sum(fibrePercent[i] * model.ingredient_vars[i] for i in Ingredients) <= 2.0, doc="FibreRequirement") -model.c4 = Constraint(expr=sum(saltPercent[i] * model.ingredient_vars[i] for i in Ingredients) <= 0.4, doc="SaltRequirement") +model.c0 = Constraint( + expr=sum(model.ingredient_vars[i] for i in Ingredients) == 100, doc="PercentagesSum" +) +model.c1 = Constraint( + expr=sum(proteinPercent[i] * model.ingredient_vars[i] for i in Ingredients) >= 8.0, + doc="ProteinRequirement", +) +model.c2 = Constraint( + expr=sum(fatPercent[i] * model.ingredient_vars[i] for i in Ingredients) >= 6.0, + doc="FatRequirement", +) +model.c3 = Constraint( + expr=sum(fibrePercent[i] * model.ingredient_vars[i] for i in Ingredients) <= 2.0, + doc="FibreRequirement", +) +model.c4 = Constraint( + expr=sum(saltPercent[i] * model.ingredient_vars[i] for i in Ingredients) <= 0.4, + doc="SaltRequirement", +) diff --git a/examples/pyomo/concrete/knapsack-abstract.py b/examples/pyomo/concrete/knapsack-abstract.py index e5efc203ce4..bbef95f7810 100644 --- a/examples/pyomo/concrete/knapsack-abstract.py +++ b/examples/pyomo/concrete/knapsack-abstract.py @@ -27,29 +27,27 @@ model.x = Var(model.ITEMS, within=Binary) + def value_rule(model): - return sum(model.v[i]*model.x[i] for i in model.ITEMS) + return sum(model.v[i] * model.x[i] for i in model.ITEMS) + + model.value = Objective(sense=maximize, rule=value_rule) + def weight_rule(model): - return sum(model.w[i]*model.x[i] for i in model.ITEMS) <= model.limit + return sum(model.w[i] * model.x[i] for i in model.ITEMS) <= model.limit + + model.weight = Constraint(rule=weight_rule) if __name__ == '__main__': data = { - 'ITEMS': {None:('hammer','wrench','screwdriver','towel')}, - 'v': {'hammer': 8, - 'wrench': 3, - 'screwdriver': 6, - 'towel': 11, - }, - 'w': { 'hammer': 5, - 'wrench': 7, - 'screwdriver': 4, - 'towel': 3, - }, - 'limit': {None:14}, + 'ITEMS': {None: ('hammer', 'wrench', 'screwdriver', 'towel')}, + 'v': {'hammer': 8, 'wrench': 3, 'screwdriver': 6, 'towel': 11}, + 'w': {'hammer': 5, 'wrench': 7, 'screwdriver': 4, 'towel': 3}, + 'limit': {None: 14}, } - inst = model.create_instance(data={None:data}) + inst = model.create_instance(data={None: data}) inst.pprint() diff --git a/examples/pyomo/concrete/knapsack-concrete.py b/examples/pyomo/concrete/knapsack-concrete.py index 5adbd8df71b..cd115ab40a3 100644 --- a/examples/pyomo/concrete/knapsack-concrete.py +++ b/examples/pyomo/concrete/knapsack-concrete.py @@ -15,8 +15,8 @@ from pyomo.environ import * -v = {'hammer':8, 'wrench':3, 'screwdriver':6, 'towel':11} -w = {'hammer':5, 'wrench':7, 'screwdriver':4, 'towel':3} +v = {'hammer': 8, 'wrench': 3, 'screwdriver': 6, 'towel': 11} +w = {'hammer': 5, 'wrench': 7, 'screwdriver': 4, 'towel': 3} limit = 14 @@ -26,6 +26,6 @@ M.x = Var(M.ITEMS, within=Binary) -M.value = Objective(expr=sum(v[i]*M.x[i] for i in M.ITEMS), sense=maximize) +M.value = Objective(expr=sum(v[i] * M.x[i] for i in M.ITEMS), sense=maximize) -M.weight = Constraint(expr=sum(w[i]*M.x[i] for i in M.ITEMS) <= limit) +M.weight = Constraint(expr=sum(w[i] * M.x[i] for i in M.ITEMS) <= limit) diff --git a/examples/pyomo/concrete/rosen.py b/examples/pyomo/concrete/rosen.py index efa50f637ce..a8e8a175127 100644 --- a/examples/pyomo/concrete/rosen.py +++ b/examples/pyomo/concrete/rosen.py @@ -4,8 +4,6 @@ M = ConcreteModel() M.x = Var() M.y = Var() -M.o = Objective( - expr=(M.x-1)**2 + \ - 100*(M.y-M.x**2)**2) +M.o = Objective(expr=(M.x - 1) ** 2 + 100 * (M.y - M.x**2) ** 2) model = M diff --git a/examples/pyomo/concrete/sodacan.py b/examples/pyomo/concrete/sodacan.py index 10cb295e137..3c0cfd3aab2 100644 --- a/examples/pyomo/concrete/sodacan.py +++ b/examples/pyomo/concrete/sodacan.py @@ -3,9 +3,7 @@ from math import pi M = ConcreteModel() -M.r = Var(bounds=(0,None)) -M.h = Var(bounds=(0,None)) -M.o = Objective(expr=\ - 2*pi*M.r*(M.r + M.h)) -M.c = Constraint(expr=\ - pi*M.h*M.r**2 == 355) +M.r = Var(bounds=(0, None)) +M.h = Var(bounds=(0, None)) +M.o = Objective(expr=2 * pi * M.r * (M.r + M.h)) +M.c = Constraint(expr=pi * M.h * M.r**2 == 355) diff --git a/examples/pyomo/concrete/sodacan_fig.py b/examples/pyomo/concrete/sodacan_fig.py index 1f25651e732..bf9ae476b4c 100644 --- a/examples/pyomo/concrete/sodacan_fig.py +++ b/examples/pyomo/concrete/sodacan_fig.py @@ -10,22 +10,23 @@ R_ = np.arange(0.25, 10, 0.25) H_ = np.arange(0.25, 10, 0.25) R, H = np.meshgrid(R_, H_) -Z = 2*pi*R*(R+H) -surf = ax.plot_surface(R, H, Z, rstride=1, cstride=1, cmap=cm.hot, - linewidth=0, antialiased=False) +Z = 2 * pi * R * (R + H) +surf = ax.plot_surface( + R, H, Z, rstride=1, cstride=1, cmap=cm.hot, linewidth=0, antialiased=False +) ax.set_xlabel("r") ax.set_ylabel("h") ax.set_zlim(0, 1200) ax.zaxis.set_major_locator(LinearLocator(10)) -#ax.zaxis.set_major_formatter(FormatStrFormatter(' %.02f')) +# ax.zaxis.set_major_formatter(FormatStrFormatter(' %.02f')) -#fig.colorbar(surf, shrink=0.5, aspect=5) +# fig.colorbar(surf, shrink=0.5, aspect=5) -H_ = 355/(pi*R_*R_) +H_ = 355 / (pi * R_ * R_) valid = np.where(H_ < 10.1) -Z_ = R_+H_ -Z_ = 2*pi*R_*Z_ +Z_ = R_ + H_ +Z_ = 2 * pi * R_ * Z_ ax.plot(R_[valid], H_[valid], Z_[valid], label='parametric curve') plt.show() diff --git a/examples/pyomo/concrete/sp.py b/examples/pyomo/concrete/sp.py index 6c0f5281cd3..edc2d68b170 100644 --- a/examples/pyomo/concrete/sp.py +++ b/examples/pyomo/concrete/sp.py @@ -1,19 +1,25 @@ # sp.py from pyomo.environ import * -from sp_data import * # define c, b, h, and d +from sp_data import * # define c, b, h, and d -scenarios = range(1,6) +scenarios = range(1, 6) M = ConcreteModel() M.x = Var(within=NonNegativeReals) + def b_rule(B, i): - B.y = Var() - B.l = Constraint(expr=B.y >= (c-b)*M.x + b*d[i]) - B.u = Constraint(expr=B.y >= (c+h)*M.x + h*d[i]) - return B + B.y = Var() + B.l = Constraint(expr=B.y >= (c - b) * M.x + b * d[i]) + B.u = Constraint(expr=B.y >= (c + h) * M.x + h * d[i]) + return B + + M.B = Block(scenarios, rule=b_rule) + def o_rule(M): - return sum(M.B[i].y for i in scenarios)/5.0 + return sum(M.B[i].y for i in scenarios) / 5.0 + + M.o = Objective(rule=o_rule) diff --git a/examples/pyomo/concrete/sp_data.py b/examples/pyomo/concrete/sp_data.py index 080ff4f3e0e..58210126819 100644 --- a/examples/pyomo/concrete/sp_data.py +++ b/examples/pyomo/concrete/sp_data.py @@ -1,4 +1,4 @@ -c=1.0 -b=1.5 -h=0.1 -d = {1:15, 2:60, 3:72, 4:78, 5:82} +c = 1.0 +b = 1.5 +h = 0.1 +d = {1: 15, 2: 60, 3: 72, 4: 78, 5: 82} diff --git a/examples/pyomo/connectors/network_flow.py b/examples/pyomo/connectors/network_flow.py index 7a5b92a603f..cb75ca7ecf2 100644 --- a/examples/pyomo/connectors/network_flow.py +++ b/examples/pyomo/connectors/network_flow.py @@ -12,14 +12,16 @@ from pyomo.core import * + def pipe_rule(pipe, i): m = pipe.model() pipe.flow = Var() - pipe.pIn = Var( within=NonNegativeReals ) - pipe.pOut = Var( within=NonNegativeReals ) - pipe.pDrop = Constraint( expr=pipe.pIn - pipe.pOut == - m.friction*m.pipe_length[i]*pipe.flow ) - + pipe.pIn = Var(within=NonNegativeReals) + pipe.pOut = Var(within=NonNegativeReals) + pipe.pDrop = Constraint( + expr=pipe.pIn - pipe.pOut == m.friction * m.pipe_length[i] * pipe.flow + ) + pipe.IN = Connector() pipe.IN.add(-pipe.flow, "flow") pipe.IN.add(pipe.pIn, "pressure") @@ -28,45 +30,48 @@ def pipe_rule(pipe, i): pipe.OUT.add(pipe.flow) pipe.OUT.add(pipe.pOut, "pressure") + def node_rule(node, i): def _mass_balance(node, flows): return node.model().demands[i] == sum_product(flows) node.flow = VarList() - node.pressure = Var( within=NonNegativeReals ) + node.pressure = Var(within=NonNegativeReals) node.port = Connector() - #node.port.add(node.flow, + # node.port.add(node.flow, # aggregate=lambda n,v: n.model().demands[id] == sum_product(v)) - node.port.add( node.flow, aggregate=_mass_balance ) - node.port.add( node.pressure ) + node.port.add(node.flow, aggregate=_mass_balance) + node.port.add(node.pressure) def _src_rule(model, pipe): - return model.nodes[ value(model.pipe_links[pipe, 0]) ].port == \ - model.pipes[pipe].IN + return model.nodes[value(model.pipe_links[pipe, 0])].port == model.pipes[pipe].IN + def _sink_rule(model, pipe): - return model.nodes[ value(model.pipe_links[pipe, 1]) ].port == \ - model.pipes[pipe].OUT + return model.nodes[value(model.pipe_links[pipe, 1])].port == model.pipes[pipe].OUT model = AbstractModel() model.PIPES = Set() model.NODES = Set() -model.friction = Param( within=NonNegativeReals ) -model.pipe_length = Param( model.PIPES, within=NonNegativeReals ) -model.pipe_links = Param( model.PIPES, [0,1] ) -model.demands = Param( model.NODES, within=Reals, default=0 ) +model.friction = Param(within=NonNegativeReals) +model.pipe_length = Param(model.PIPES, within=NonNegativeReals) +model.pipe_links = Param(model.PIPES, [0, 1]) +model.demands = Param(model.NODES, within=Reals, default=0) -model.pipes = Block( model.PIPES, rule=pipe_rule ) -model.nodes = Block( model.NODES, rule=node_rule ) +model.pipes = Block(model.PIPES, rule=pipe_rule) +model.nodes = Block(model.NODES, rule=node_rule) # Connect the network -model.network_src = Constraint(model.PIPES, rule=_src_rule) +model.network_src = Constraint(model.PIPES, rule=_src_rule) model.network_sink = Constraint(model.PIPES, rule=_sink_rule) + # Solve so the minimum pressure in the network is 0 def _obj(model): return sum(model.nodes[n].pressure for n in model.NODES) -model.obj = Objective( rule=_obj ) + + +model.obj = Objective(rule=_obj) diff --git a/examples/pyomo/connectors/network_flow_proposed.py b/examples/pyomo/connectors/network_flow_proposed.py index bee2cc4c2d6..ed603ff6626 100644 --- a/examples/pyomo/connectors/network_flow_proposed.py +++ b/examples/pyomo/connectors/network_flow_proposed.py @@ -12,62 +12,67 @@ from pyomo.core import * + def pipe_rule(model, pipe, id): - pipe.length = Param( within=NonNegativeReals ) + pipe.length = Param(within=NonNegativeReals) pipe.flow = Var() - pipe.pIn = Var( within=NonNegativeReals ) - pipe.pOut = Var( within=NonNegativeReals ) - pipe.pDrop = Constraint( expr=pipe.pIn - pipe.pOut == - model.friction*model.pipe_length[id]*pipe.flow ) - + pipe.pIn = Var(within=NonNegativeReals) + pipe.pOut = Var(within=NonNegativeReals) + pipe.pDrop = Constraint( + expr=pipe.pIn - pipe.pOut == model.friction * model.pipe_length[id] * pipe.flow + ) + pipe.IN = Connector() - pipe.IN.add(-1*pipe.flow, "flow") + pipe.IN.add(-1 * pipe.flow, "flow") pipe.IN.add(pipe.pIn, "pressure") pipe.OUT = Connector() pipe.OUT.add(pipe.flow) pipe.OUT.add(pipe.pOut, "pressure") + def node_rule(model, node, id): def _mass_balance(model, node, flows): return node.demand == sum_product(flows) - node.demand = Param( within=Reals, default=0 ) + node.demand = Param(within=Reals, default=0) node.flow = VarList() - node.pressure = Var( within=NonNegativeReals ) + node.pressure = Var(within=NonNegativeReals) node.port = Connector() - #node.port.add( node.flow, + # node.port.add( node.flow, # aggregate=lambda m,n,v: m.demands[id] == sum_product(v) ) - node.port.add( node.flow, aggregate=_mass_balance ) - node.port.add( node.pressure ) + node.port.add(node.flow, aggregate=_mass_balance) + node.port.add(node.pressure) def _src_rule(model, pipe): - return model.nodes[ value(model.pipe_links[pipe, 0]) ].port == \ - model.pipes[pipe].IN + return model.nodes[value(model.pipe_links[pipe, 0])].port == model.pipes[pipe].IN + def _sink_rule(model, pipe): - return model.nodes[ value(model.pipe_links[pipe, 1]) ].port == \ - model.pipes[pipe].OUT + return model.nodes[value(model.pipe_links[pipe, 1])].port == model.pipes[pipe].OUT model = AbstractModel() model.PIPES = Set() model.NODES = Set() -model.friction = Param( within=NonNegativeReals ) -model.pipe_links = Param( model.PIPES, RangeSet(2) ) +model.friction = Param(within=NonNegativeReals) +model.pipe_links = Param(model.PIPES, RangeSet(2)) -model.pipes = Block( model.PIPES, rule=pipe_rule ) -model.nodes = Block( model.NODES, rule=node_rule ) +model.pipes = Block(model.PIPES, rule=pipe_rule) +model.nodes = Block(model.NODES, rule=node_rule) # Connect the network -model.network_src = Constraint(model.PIPES, rule=_src_rule) +model.network_src = Constraint(model.PIPES, rule=_src_rule) model.network_sink = Constraint(model.PIPES, rule=_sink_rule) + # Solve so the minimum pressure in the network is 0 def _obj(model): return sum(model.nodes[n].pressure for n in model.NODES) -model.obj = Objective( rule=_obj ) + + +model.obj = Objective(rule=_obj) diff --git a/examples/pyomo/core/block1.py b/examples/pyomo/core/block1.py index 42f86708b79..96f8114f19c 100644 --- a/examples/pyomo/core/block1.py +++ b/examples/pyomo/core/block1.py @@ -17,5 +17,4 @@ model.b = Block() model.b.x = Var() -model.o = Objective(expr=(model.x-1.0)**2 + (model.b.x - 2.0)**2) - +model.o = Objective(expr=(model.x - 1.0) ** 2 + (model.b.x - 2.0) ** 2) diff --git a/examples/pyomo/core/integrality1.py b/examples/pyomo/core/integrality1.py index 5ddc01c5f8f..db81805555f 100644 --- a/examples/pyomo/core/integrality1.py +++ b/examples/pyomo/core/integrality1.py @@ -16,8 +16,8 @@ M.x2 = Var(within=Boolean) M.x3 = Var(within=Boolean) -M.o = Objective(expr=M.x1+M.x2+M.x3) -M.c1 = Constraint(expr=4*M.x1+M.x2 >= 1) -M.c2 = Constraint(expr=M.x2+4*M.x3 >= 1) +M.o = Objective(expr=M.x1 + M.x2 + M.x3) +M.c1 = Constraint(expr=4 * M.x1 + M.x2 >= 1) +M.c2 = Constraint(expr=M.x2 + 4 * M.x3 >= 1) -model=M +model = M diff --git a/examples/pyomo/core/integrality2.py b/examples/pyomo/core/integrality2.py index 661df5e3b3a..2d85c9f2455 100644 --- a/examples/pyomo/core/integrality2.py +++ b/examples/pyomo/core/integrality2.py @@ -12,10 +12,10 @@ from pyomo.environ import * M = ConcreteModel() -M.x = Var([1,2,3], within=Boolean) +M.x = Var([1, 2, 3], within=Boolean) M.o = Objective(expr=sum_product(M.x)) -M.c1 = Constraint(expr=4*M.x[1]+M.x[2] >= 1) -M.c2 = Constraint(expr=M.x[2]+4*M.x[3] >= 1) +M.c1 = Constraint(expr=4 * M.x[1] + M.x[2] >= 1) +M.c2 = Constraint(expr=M.x[2] + 4 * M.x[3] >= 1) -model=M +model = M diff --git a/examples/pyomo/core/simple.py b/examples/pyomo/core/simple.py index 2ba94471232..d0359c143bf 100644 --- a/examples/pyomo/core/simple.py +++ b/examples/pyomo/core/simple.py @@ -13,12 +13,10 @@ M = ConcreteModel() M.x1 = Var() -M.x2 = Var(bounds=(-1,1)) -M.x3 = Var(bounds=(1,2)) -M.o = Objective( - expr=M.x1**2 + (M.x2*M.x3)**4 + \ - M.x1*M.x3 + \ - M.x2*sin(M.x1+M.x3) + M.x2) +M.x2 = Var(bounds=(-1, 1)) +M.x3 = Var(bounds=(1, 2)) +M.o = Objective( + expr=M.x1**2 + (M.x2 * M.x3) ** 4 + M.x1 * M.x3 + M.x2 * sin(M.x1 + M.x3) + M.x2 +) model = M - diff --git a/examples/pyomo/core/t1.py b/examples/pyomo/core/t1.py index b5e4e49c2af..4135049d4be 100644 --- a/examples/pyomo/core/t1.py +++ b/examples/pyomo/core/t1.py @@ -11,18 +11,17 @@ from pyomo.environ import * -def pyomo_create_model(options, model_options): +def pyomo_create_model(options, model_options): model = ConcreteModel() model.x1 = Var(within=NonNegativeReals) model.x2 = Var(within=NonNegativeReals) model.x3 = Var(within=NonNegativeReals) - model.o = Objective(expr=6*model.x1 + 4*model.x2 + 2*model.x3, sense=minimize) + model.o = Objective(expr=6 * model.x1 + 4 * model.x2 + 2 * model.x3, sense=minimize) - model.c1 = Constraint(expr=4*model.x1 + 2*model.x2 + model.x3 >= 5) + model.c1 = Constraint(expr=4 * model.x1 + 2 * model.x2 + model.x3 >= 5) model.c2 = Constraint(expr=model.x1 + model.x2 >= 3) model.c3 = Constraint(expr=model.x2 + model.x3 >= 4) return model - diff --git a/examples/pyomo/core/t2.py b/examples/pyomo/core/t2.py index 81128d6537c..5d687917fba 100644 --- a/examples/pyomo/core/t2.py +++ b/examples/pyomo/core/t2.py @@ -11,17 +11,17 @@ from pyomo.environ import * -def pyomo_create_model(options, model_options): +def pyomo_create_model(options, model_options): model = ConcreteModel() model.x1 = Var(within=NonNegativeReals) model.x2 = Var(within=NonPositiveReals) model.x3 = Var(within=Reals) - model.o = Objective(expr=model.x1 + 2*model.x2 + 3*model.x3, sense=maximize) + model.o = Objective(expr=model.x1 + 2 * model.x2 + 3 * model.x3, sense=maximize) - model.c1 = Constraint(expr= - model.x1 + 3*model.x2 == 5) - model.c2 = Constraint(expr=2*model.x1 - model.x2 + 3*model.x3 >= 6) + model.c1 = Constraint(expr=-model.x1 + 3 * model.x2 == 5) + model.c2 = Constraint(expr=2 * model.x1 - model.x2 + 3 * model.x3 >= 6) model.c3 = Constraint(expr=model.x3 <= 4) return model diff --git a/examples/pyomo/core/t5.py b/examples/pyomo/core/t5.py index f67956c9e17..38605751015 100644 --- a/examples/pyomo/core/t5.py +++ b/examples/pyomo/core/t5.py @@ -15,17 +15,16 @@ # from pyomo.environ import * -def pyomo_create_model(options, model_options): +def pyomo_create_model(options, model_options): model = ConcreteModel() model.x1 = Var(within=NonNegativeReals) model.x2 = Var(within=NonNegativeReals) - model.o = Objective(expr=3*model.x1 + 2.5*model.x2, sense=maximize) + model.o = Objective(expr=3 * model.x1 + 2.5 * model.x2, sense=maximize) - model.c1 = Constraint(expr=4.44*model.x1 <= 100) - model.c2 = Constraint(expr=6.67*model.x2 <= 100) - model.c3 = Constraint(expr=4*model.x1 + 2.86*model.x2 <= 100) - model.c4 = Constraint(expr=3*model.x1 + 6*model.x2 <= 100) + model.c1 = Constraint(expr=4.44 * model.x1 <= 100) + model.c2 = Constraint(expr=6.67 * model.x2 <= 100) + model.c3 = Constraint(expr=4 * model.x1 + 2.86 * model.x2 <= 100) + model.c4 = Constraint(expr=3 * model.x1 + 6 * model.x2 <= 100) return model - diff --git a/examples/pyomo/diet/diet-sqlite.py b/examples/pyomo/diet/diet-sqlite.py index 5be5c83832a..e8963485294 100644 --- a/examples/pyomo/diet/diet-sqlite.py +++ b/examples/pyomo/diet/diet-sqlite.py @@ -23,7 +23,8 @@ c.execute('DROP TABLE IF EXISTS ' + table) conn.commit() -c.execute(''' +c.execute( + ''' CREATE TABLE Food ( FOOD text not null, cost float not null, @@ -31,7 +32,8 @@ f_max float, PRIMARY KEY (FOOD) ) -''') +''' +) conn.commit() Food_data = [ @@ -43,20 +45,22 @@ ("Fries, small", 0.77, None, None), ("Sausage McMuffin", 1.29, None, None), ("1% Lowfat Milk", 0.60, None, None), - ("Orange Juice", 0.72, None, None) + ("Orange Juice", 0.72, None, None), ] for row in Food_data: c.execute('''INSERT INTO Food VALUES (?,?,?,?)''', row) conn.commit() -c.execute(''' +c.execute( + ''' CREATE TABLE Nutr ( NUTR text not null, n_min float, n_max float, PRIMARY KEY (NUTR) ) -''') +''' +) Nutr_data = [ ("Cal", 2000.0, None), @@ -65,13 +69,14 @@ ("VitA", 100.0, None), ("VitC", 100.0, None), ("Calc", 100.0, None), - ("Iron", 100.0, None) + ("Iron", 100.0, None), ] for row in Nutr_data: c.execute('''INSERT INTO Nutr VALUES (?,?,?)''', row) conn.commit() -c.execute(''' +c.execute( + ''' CREATE TABLE Amount ( NUTR text not null, FOOD varchar not null, @@ -80,73 +85,74 @@ FOREIGN KEY (NUTR) REFERENCES Nutr (NUTR), FOREIGN KEY (FOOD) REFERENCES Food (FOOD) ) -''') +''' +) conn.commit() Amount_data = [ - ('Cal','Quarter Pounder w Cheese','510'), - ('Carbo','Quarter Pounder w Cheese','34'), - ('Protein','Quarter Pounder w Cheese','28'), - ('VitA','Quarter Pounder w Cheese','15'), - ('VitC','Quarter Pounder w Cheese','6'), - ('Calc','Quarter Pounder w Cheese','30'), - ('Iron','Quarter Pounder w Cheese','20'), - ('Cal','McLean Deluxe w Cheese','370'), - ('Carbo','McLean Deluxe w Cheese','35'), - ('Protein','McLean Deluxe w Cheese','24'), - ('VitA','McLean Deluxe w Cheese','15'), - ('VitC','McLean Deluxe w Cheese','10'), - ('Calc','McLean Deluxe w Cheese','20'), - ('Iron','McLean Deluxe w Cheese','20'), - ('Cal','Big Mac','500'), - ('Carbo','Big Mac','42'), - ('Protein','Big Mac','25'), - ('VitA','Big Mac','6'), - ('VitC','Big Mac','2'), - ('Calc','Big Mac','25'), - ('Iron','Big Mac','20'), - ('Cal','Filet-O-Fish','370'), - ('Carbo','Filet-O-Fish','38'), - ('Protein','Filet-O-Fish','14'), - ('VitA','Filet-O-Fish','2'), - ('VitC','Filet-O-Fish','0'), - ('Calc','Filet-O-Fish','15'), - ('Iron','Filet-O-Fish','10'), - ('Cal','McGrilled Chicken','400'), - ('Carbo','McGrilled Chicken','42'), - ('Protein','McGrilled Chicken','31'), - ('VitA','McGrilled Chicken','8'), - ('VitC','McGrilled Chicken','15'), - ('Calc','McGrilled Chicken','15'), - ('Iron','McGrilled Chicken','8'), - ('Cal','Fries, small','220'), - ('Carbo','Fries, small','26'), - ('Protein','Fries, small','3'), - ('VitA','Fries, small','0'), - ('VitC','Fries, small','15'), - ('Calc','Fries, small','0'), - ('Iron','Fries, small','2'), - ('Cal','Sausage McMuffin','345'), - ('Carbo','Sausage McMuffin','27'), - ('Protein','Sausage McMuffin','15'), - ('VitA','Sausage McMuffin','4'), - ('VitC','Sausage McMuffin','0'), - ('Calc','Sausage McMuffin','20'), - ('Iron','Sausage McMuffin','15'), - ('Cal','1% Lowfat Milk','110'), - ('Carbo','1% Lowfat Milk','12'), - ('Protein','1% Lowfat Milk','9'), - ('VitA','1% Lowfat Milk','10'), - ('VitC','1% Lowfat Milk','4'), - ('Calc','1% Lowfat Milk','30'), - ('Iron','1% Lowfat Milk','0'), - ('Cal','Orange Juice','80'), - ('Carbo','Orange Juice','20'), - ('Protein','Orange Juice','1'), - ('VitA','Orange Juice','2'), - ('VitC','Orange Juice','120'), - ('Calc','Orange Juice','2'), - ('Iron','Orange Juice','2') + ('Cal', 'Quarter Pounder w Cheese', '510'), + ('Carbo', 'Quarter Pounder w Cheese', '34'), + ('Protein', 'Quarter Pounder w Cheese', '28'), + ('VitA', 'Quarter Pounder w Cheese', '15'), + ('VitC', 'Quarter Pounder w Cheese', '6'), + ('Calc', 'Quarter Pounder w Cheese', '30'), + ('Iron', 'Quarter Pounder w Cheese', '20'), + ('Cal', 'McLean Deluxe w Cheese', '370'), + ('Carbo', 'McLean Deluxe w Cheese', '35'), + ('Protein', 'McLean Deluxe w Cheese', '24'), + ('VitA', 'McLean Deluxe w Cheese', '15'), + ('VitC', 'McLean Deluxe w Cheese', '10'), + ('Calc', 'McLean Deluxe w Cheese', '20'), + ('Iron', 'McLean Deluxe w Cheese', '20'), + ('Cal', 'Big Mac', '500'), + ('Carbo', 'Big Mac', '42'), + ('Protein', 'Big Mac', '25'), + ('VitA', 'Big Mac', '6'), + ('VitC', 'Big Mac', '2'), + ('Calc', 'Big Mac', '25'), + ('Iron', 'Big Mac', '20'), + ('Cal', 'Filet-O-Fish', '370'), + ('Carbo', 'Filet-O-Fish', '38'), + ('Protein', 'Filet-O-Fish', '14'), + ('VitA', 'Filet-O-Fish', '2'), + ('VitC', 'Filet-O-Fish', '0'), + ('Calc', 'Filet-O-Fish', '15'), + ('Iron', 'Filet-O-Fish', '10'), + ('Cal', 'McGrilled Chicken', '400'), + ('Carbo', 'McGrilled Chicken', '42'), + ('Protein', 'McGrilled Chicken', '31'), + ('VitA', 'McGrilled Chicken', '8'), + ('VitC', 'McGrilled Chicken', '15'), + ('Calc', 'McGrilled Chicken', '15'), + ('Iron', 'McGrilled Chicken', '8'), + ('Cal', 'Fries, small', '220'), + ('Carbo', 'Fries, small', '26'), + ('Protein', 'Fries, small', '3'), + ('VitA', 'Fries, small', '0'), + ('VitC', 'Fries, small', '15'), + ('Calc', 'Fries, small', '0'), + ('Iron', 'Fries, small', '2'), + ('Cal', 'Sausage McMuffin', '345'), + ('Carbo', 'Sausage McMuffin', '27'), + ('Protein', 'Sausage McMuffin', '15'), + ('VitA', 'Sausage McMuffin', '4'), + ('VitC', 'Sausage McMuffin', '0'), + ('Calc', 'Sausage McMuffin', '20'), + ('Iron', 'Sausage McMuffin', '15'), + ('Cal', '1% Lowfat Milk', '110'), + ('Carbo', '1% Lowfat Milk', '12'), + ('Protein', '1% Lowfat Milk', '9'), + ('VitA', '1% Lowfat Milk', '10'), + ('VitC', '1% Lowfat Milk', '4'), + ('Calc', '1% Lowfat Milk', '30'), + ('Iron', '1% Lowfat Milk', '0'), + ('Cal', 'Orange Juice', '80'), + ('Carbo', 'Orange Juice', '20'), + ('Protein', 'Orange Juice', '1'), + ('VitA', 'Orange Juice', '2'), + ('VitC', 'Orange Juice', '120'), + ('Calc', 'Orange Juice', '2'), + ('Iron', 'Orange Juice', '2'), ] for row in Amount_data: c.execute('''INSERT INTO Amount VALUES (?,?,?)''', row) diff --git a/examples/pyomo/diet/diet1.py b/examples/pyomo/diet/diet1.py index 3eee2689e70..1fd61ca268c 100644 --- a/examples/pyomo/diet/diet1.py +++ b/examples/pyomo/diet/diet1.py @@ -28,9 +28,13 @@ model.f_min = Param(model.FOOD, within=NonNegativeReals, default=0.0) -MAX_FOOD_SUPPLY = 20.0 # McDonald's doesn't stock infinite food -def f_max_validate (model, value, j): +MAX_FOOD_SUPPLY = 20.0 # McDonald's doesn't stock infinite food + + +def f_max_validate(model, value, j): return model.f_max[j] > model.f_min[j] + + model.f_max = Param(model.FOOD, validate=f_max_validate, default=MAX_FOOD_SUPPLY) # Unneeded vars - they're in the .dat file, so we list them here @@ -41,29 +45,50 @@ def f_max_validate (model, value, j): # -------------------------------------------------------- + def Buy_bounds(model, i): return (model.f_min[i], model.f_max[i]) + + model.Buy = Var(model.FOOD, bounds=Buy_bounds, within=NonNegativeIntegers) # -------------------------------------------------------- + def Total_Cost_rule(model): return sum(model.cost[j] * model.Buy[j] for j in model.FOOD) + + model.Total_Cost = Objective(rule=Total_Cost_rule, sense=minimize) # -------------------------------------------------------- + def Entree_rule(model): - entrees = ['Quarter Pounder w Cheese', 'McLean Deluxe w Cheese', 'Big Mac', 'Filet-O-Fish', 'McGrilled Chicken'] + entrees = [ + 'Quarter Pounder w Cheese', + 'McLean Deluxe w Cheese', + 'Big Mac', + 'Filet-O-Fish', + 'McGrilled Chicken', + ] return sum(model.Buy[e] for e in entrees) >= 1 + + model.Entree = Constraint(rule=Entree_rule) + def Side_rule(model): sides = ['Fries, small', 'Sausage McMuffin'] return sum(model.Buy[s] for s in sides) >= 1 + + model.Side = Constraint(rule=Side_rule) + def Drink_rule(model): drinks = ['1% Lowfat Milk', 'Orange Juice'] return sum(model.Buy[d] for d in drinks) >= 1 + + model.Drink = Constraint(rule=Drink_rule) diff --git a/examples/pyomo/diet/diet2.py b/examples/pyomo/diet/diet2.py index e497bf3f292..526dbcef484 100644 --- a/examples/pyomo/diet/diet2.py +++ b/examples/pyomo/diet/diet2.py @@ -29,47 +29,63 @@ model.f_min = Param(model.FOOD, within=NonNegativeReals, default=0.0) -def f_max_validate (model, value, j): + +def f_max_validate(model, value, j): return model.f_max[j] > model.f_min[j] -model.f_max = Param(model.FOOD, validate=f_max_validate, - default=infinity) + + +model.f_max = Param(model.FOOD, validate=f_max_validate, default=infinity) model.n_min = Param(model.NUTR, within=NonNegativeReals, default=0.0) -def n_max_validate (model, value, j): + +def n_max_validate(model, value, j): return value > model.n_min[j] -model.n_max = Param(model.NUTR, validate=n_max_validate, - default=infinity) + + +model.n_max = Param(model.NUTR, validate=n_max_validate, default=infinity) model.amt = Param(model.NUTR, model.FOOD, within=NonNegativeReals) # -------------------------------------------------------- + def Buy_bounds(model, i): - return (model.f_min[i],model.f_max[i]) + return (model.f_min[i], model.f_max[i]) + + model.Buy = Var(model.FOOD, bounds=Buy_bounds, within=NonNegativeIntegers) # -------------------------------------------------------- + def Total_Cost_rule(model): ans = 0 for j in model.FOOD: ans = ans + model.cost[j] * model.Buy[j] return ans + + model.Total_Cost = Objective(rule=Total_Cost_rule, sense=minimize) + def Nutr_Amt_rule(model, i): ans = 0 for j in model.FOOD: - ans = ans + model.amt[i,j] * model.Buy[j] + ans = ans + model.amt[i, j] * model.Buy[j] return ans -#model.Nutr_Amt = Objective(model.NUTR, rule=Nutr_Amt_rule) + + +# model.Nutr_Amt = Objective(model.NUTR, rule=Nutr_Amt_rule) # -------------------------------------------------------- + def Diet_rule(model, i): expr = 0 for j in model.FOOD: - expr = expr + model.amt[i,j] * model.Buy[j] + expr = expr + model.amt[i, j] * model.Buy[j] return (model.n_min[i], expr, model.n_max[i]) + + model.Diet = Constraint(model.NUTR, rule=Diet_rule) diff --git a/examples/pyomo/draft/api.py b/examples/pyomo/draft/api.py index fed2bd67ccb..5b506882d9b 100644 --- a/examples/pyomo/draft/api.py +++ b/examples/pyomo/draft/api.py @@ -59,7 +59,7 @@ # # Creating an array of sets, which are indexed with another set # -model.C = Set(model.A,model.A) +model.C = Set(model.A, model.A) # # Option 'initialize' indicates how values in the set will be constructed. # This option behaves differently depending on the type of data provided: @@ -68,32 +68,44 @@ # (3) a function can be used to initial a set (perhaps using model # information. # -model.A = Set(initialize=[1,4,9]) -model.B = Set(model.A, initialize={1:[1,4,9], 2:[2,5,10]}) +model.A = Set(initialize=[1, 4, 9]) +model.B = Set(model.A, initialize={1: [1, 4, 9], 2: [2, 5, 10]}) + + def f(model): - return range(0,10) + return range(0, 10) + + model.A = Set(initialize=f) + + # # Option 'ordered' specifies whether the set elements are ordered # This option allows for more sophisticated construction rules # def f(model, i): - if i==10: + if i == 10: return Set.End - if i==0: + if i == 0: return 1 else: - return model.A[i-1] * (i+1) + return model.A[i - 1] * (i + 1) + + model.A = Set(ordered=True, initialize=f) # # Option 'within' specifies a set that is used to validate set elements # model.B = Set(within=model.A) + + # # Option 'validate' specifies a function that is used to validate set elements # def f(model, value): return value in model.A + + model.B = Set(validate=f) # # Option 'dimen' specifies the arity of the data in the set @@ -124,8 +136,8 @@ def f(model, value): # # add() - adds data to a set # -instance.A.add(1,3,5) -instance.A[i].add(1,3,5) +instance.A.add(1, 3, 5) +instance.A[i].add(1, 3, 5) # # remove() - removes data from a set, throwing an exception if the data does # not exist @@ -142,26 +154,26 @@ def f(model, value): # Set iteration # for val in instance.A: - print val + print(val) # # Set comparisons # -instance.A < instance.B # True if A is strict subset of B -instance.A <= instance.B # True if A is a subset of B -instance.A == instance.B # True if A equals B -instance.A >= instance.B # True if A is a superset of B -instance.A > instance.B # True if A is a strict superset of B +instance.A < instance.B # True if A is strict subset of B +instance.A <= instance.B # True if A is a subset of B +instance.A == instance.B # True if A equals B +instance.A >= instance.B # True if A is a superset of B +instance.A > instance.B # True if A is a strict superset of B # # Set membership # -val in instance.A # True if 'val' is in A +val in instance.A # True if 'val' is in A # # Set operations # -instance.A | instance.B # Set union -instance.A & instance.B # Set intersection -instance.A ^ instance.B # Set symmetric difference -instance.A - instance.B # Set difference +instance.A | instance.B # Set union +instance.A & instance.B # Set intersection +instance.A ^ instance.B # Set symmetric difference +instance.A - instance.B # Set difference # # Set cross product - define a new set that is the cross-product of # two or more sets @@ -170,8 +182,8 @@ def f(model, value): # # Ordered set operations # -instance.A[j] # returns the j'th member of ordered set A -instance.A[i][j] # returns the j'th member of ordered set A[i] +instance.A[j] # returns the j'th member of ordered set A +instance.A[i][j] # returns the j'th member of ordered set A[i] # # keys() - returns the indices of the set array # @@ -208,20 +220,24 @@ def f(model, value): # # Array of parameters # -model.Z = Param(model.A,model.B) +model.Z = Param(model.A, model.B) # # Option 'initialize' specifies values used to construct the parameter # model.Z = Param(initialize=9) -model.Z = Param(model.A,initialize={1:1, 2:4, 3:9}) -model.Z = Param(model.A,initialize=2) +model.Z = Param(model.A, initialize={1: 1, 2: 4, 3: 9}) +model.Z = Param(model.A, initialize=2) + + # # Option 'initialize' can also specify a function used to construct the # parameter # def f(model, i): - return 3*i -model.Z = Param(model.A,initialize=f) + return 3 * i + + +model.Z = Param(model.A, initialize=f) # # Option 'default' specifies values used for a parameter if no value # has been set. Note that for scalar parameters this has the same @@ -229,16 +245,20 @@ def f(model, i): # 'fills in' parameter values that have not been initialized. # model.Z = Param(default=9.0) -model.Z = Param(model.A,default=9.0) +model.Z = Param(model.A, default=9.0) # # Option 'within' specifies a set that is used to validate parameters # model.Z = Param(within=model.A) + + # # Option 'validate' specifies a function that is used to validate parameters # def f(model, value): return value in model.A + + model.Z = Param(validate=f) # #### @@ -302,20 +322,24 @@ def f(model, value): # # Array of variables # -model.x = Var(model.A,model.B) +model.x = Var(model.A, model.B) # # Option 'initialize' specifies the initial values of variables # model.x = Var(initialize=9) -model.x = Var(model.A,initialize={1:1, 2:4, 3:9}) -model.x = Var(model.A,initialize=2) +model.x = Var(model.A, initialize={1: 1, 2: 4, 3: 9}) +model.x = Var(model.A, initialize=2) + + # # Option 'initialize' can specify a function used to construct the initial # variable values # def f(model, i): - return 3*i -model.x = Var(model.A,initialize=f) + return 3 * i + + +model.x = Var(model.A, initialize=f) # # Option 'within' specifies a set that is used to constrain variables # @@ -325,9 +349,13 @@ def f(model, i): # Simple bounds can be specified, or a function that defines bounds for # different variables. # -model.x = Var(bounds=(0.0,1.0)) +model.x = Var(bounds=(0.0, 1.0)) + + def f(model, i): return (model.x_low[i], model._x_high[i]) + + model.x = Var(bounds=f) # #### @@ -381,13 +409,12 @@ def f(model, i): # # Bounds # -instance.x.setlb(0.0) # Set a variable lower bound -instance.x.setub(1.0) # Set a variable upper bound +instance.x.setlb(0.0) # Set a variable lower bound +instance.x.setub(1.0) # Set a variable upper bound # # Fixed - variables that are fixed (and thus not optimized) # -instance.x.fixed = True # Fixes this variable value - +instance.x.fixed = True # Fixes this variable value ## ------------------------------------------------------------------------- @@ -412,15 +439,19 @@ def f(model, i): # # Array of objectives # -model.obj = Objective(model.A,model.B) +model.obj = Objective(model.A, model.B) # # Option 'rule' can specify a function used to construct the objective # expression # model.Z = Param(model.A) model.x = Var(model.A) + + def f(model, i): return model.Z[i] * model.A[i] + + model.obj = Objective(model.A, rule=f) # # Option 'sense' specifies whether the objective is maximized or minimized @@ -467,7 +498,6 @@ def f(model, i): tmp = value(instance.obj)[2] - ## ------------------------------------------------------------------------- ## ## Constraint objects @@ -488,17 +518,23 @@ def f(model, i): # # Array of constraint # -model.con = Constraint(model.A,model.B) +model.con = Constraint(model.A, model.B) # # Option 'rule' can specify a function used to construct the constraint # expression # model.Z = Param(model.A) model.x = Var(model.A) + + def f(model, i): expr = model.Z[i] * model.A[i] return (0, expr, 1) + + model.con = Constraint(model.A, rule=f) + + # # Note: the constructor rule must include the specification of bounds # information for the constraint. This can be done in one of two ways. First, @@ -509,6 +545,8 @@ def f(model, i): def f(model, i): expr = model.Z[i] * model.A[i] return (expr, 0) + + # # Second, the constructor rule can augment the expression to include # bound information. For example, the previous rule can be rewritten as @@ -518,6 +556,8 @@ def f(model, i): expr = expr >= 0 expr = expr <= 1 return expr + + # # The following illustrate the type of bounds information that can be # specified: @@ -528,6 +568,7 @@ def f(model, i): # expr = expr == val Equality constraint # + # # If the constructor rule returns Constraint.Skip, then the constraint index # is ignored. Alternatively, a constructor rule can return a dictionary @@ -539,8 +580,11 @@ def f1(model, i): return Constraint.Skip expr = model.Z[i] * model.A[i] return (0, expr, 1) + + model.con1 = Constraint(model.A, rule=f1) + def f2(model): res = {} for i in model.A: @@ -548,6 +592,8 @@ def f2(model): expr = model.Z[i] * model.A[i] res[i] = (0, expr, 1) return res + + model.con2 = Constraint(model.A, rule=f2) #### @@ -583,4 +629,3 @@ def f2(model): # tmp = instance.con.value[2] tmp = value(instance.con)[2] - diff --git a/examples/pyomo/draft/bpack.py b/examples/pyomo/draft/bpack.py index 763a5927088..697ce531013 100644 --- a/examples/pyomo/draft/bpack.py +++ b/examples/pyomo/draft/bpack.py @@ -20,22 +20,28 @@ # # Set I # -model.I = RangeSet(1,model.N) +model.I = RangeSet(1, model.N) # # Variable b # model.b = Var(model.I, domain=Boolean) + + # # Objective zot # def costrule(model): ans = 0 for i in model.I: -# ans += (-1 - .02*i)*model.b[i] - ans += (1 + .02*i)*model.b[i] + # ans += (-1 - .02*i)*model.b[i] + ans += (1 + 0.02 * i) * model.b[i] return ans -#model.zot = Objective(rule=costrule) + + +# model.zot = Objective(rule=costrule) model.zot = Objective(rule=costrule, sense=maximize) + + # # Set w_ind # @@ -47,11 +53,13 @@ def w_ind_rule(model): j = i i9 = i + 9 while j <= i9: - ans.add((i,j)) + ans.add((i, j)) j += 1 i += 1 return ans -model.w_ind = Set(initialize=w_ind_rule,dimen=2) + + +model.w_ind = Set(initialize=w_ind_rule, dimen=2) # # Parameter w # @@ -59,11 +67,13 @@ def w_ind_rule(model): # # Set rhs_ind # -model.rhs_ind = RangeSet(1,model.N-9) +model.rhs_ind = RangeSet(1, model.N - 9) # # Parameter rhs # model.rhs = Param(model.rhs_ind) + + # # Constraint bletch # @@ -72,8 +82,10 @@ def bletch_rule(model, i): j = i i9 = i + 9 while j <= i9: - ans += model.w[i,j]*model.b[j] + ans += model.w[i, j] * model.b[j] j += 1 ans = ans < model.rhs[i] return ans + + model.bletch = Constraint(model.rhs_ind, rule=bletch_rule) diff --git a/examples/pyomo/draft/diet2.py b/examples/pyomo/draft/diet2.py index 2a6c8b112f9..9e4d2c5d9c4 100644 --- a/examples/pyomo/draft/diet2.py +++ b/examples/pyomo/draft/diet2.py @@ -29,25 +29,35 @@ model.f_min = Param(model.FOOD, within=NonNegativeReals) -def f_max_valid (model, value, j): + +def f_max_valid(model, value, j): return model.f_max[j] > model.f_min[j] + + model.f_max = Param(model.FOOD, validate=f_max_valid) model.n_min = Param(model.NUTR, within=NonNegativeReals) -def paramn_max (model, i): + +def paramn_max(model, i): model.n_max[i] > model.n_min[i] return model.n_max[i] + + model.n_max = Param(model.NUTR, initialize=paramn_max) # *********************************** model.amt = Param(model.NUTR, model.FOOD, within=NonNegativeReals) + def Buy_bounds(model, i): - return (model.f_min[i],model.f_max[i]) + return (model.f_min[i], model.f_max[i]) + + model.Buy = Var(model.FOOD, bounds=Buy_bounds) + def Objective_rule(model): ans = 0 for j in model.FOOD: @@ -55,15 +65,20 @@ def Objective_rule(model): for j in model.FOOD: ans = ans + model.cost[j] * model.Buy[j] return ans + + model.totalcost = Objective(rule=Objective_rule) + def Diet_rule(model, i): expr = 0 for j in model.FOOD: - expr = expr + model.amt[i,j] * model.Buy[j] + expr = expr + model.amt[i, j] * model.Buy[j] for j in model.FOOD: - expr = expr + model.amt[i,j] * model.Buy[j] - expr = expr > 2*model.n_min[i] - expr = expr < 2*model.n_max[i] + expr = expr + model.amt[i, j] * model.Buy[j] + expr = expr > 2 * model.n_min[i] + expr = expr < 2 * model.n_max[i] return expr + + model.Diet = Constraint(model.NUTR, rule=Diet_rule) diff --git a/examples/pyomo/p-median/decorated_pmedian.py b/examples/pyomo/p-median/decorated_pmedian.py index ec0c015760a..90345daf78d 100644 --- a/examples/pyomo/p-median/decorated_pmedian.py +++ b/examples/pyomo/p-median/decorated_pmedian.py @@ -1,39 +1,49 @@ from pyomo.environ import * import random + random.seed(1000) model = AbstractModel() model.N = Param(within=PositiveIntegers) -model.P = Param(within=RangeSet(1,model.N)) +model.P = Param(within=RangeSet(1, model.N)) model.M = Param(within=PositiveIntegers) -model.Locations = RangeSet(1,model.N) -model.Customers = RangeSet(1,model.M) +model.Locations = RangeSet(1, model.N) +model.Customers = RangeSet(1, model.M) -model.d = Param( model.Locations, model.Customers, - initialize=lambda n, m, model : random.uniform(1.0,2.0), - within=Reals) +model.d = Param( + model.Locations, + model.Customers, + initialize=lambda n, m, model: random.uniform(1.0, 2.0), + within=Reals, +) -model.x = Var(model.Locations, model.Customers, bounds=(0.0,1.0)) +model.x = Var(model.Locations, model.Customers, bounds=(0.0, 1.0)) model.y = Var(model.Locations, within=Binary) + @model.Objective() def obj(model): - return sum( model.d[n,m]*model.x[n,m] for n in model.Locations - for m in model.Customers ) + return sum( + model.d[n, m] * model.x[n, m] for n in model.Locations for m in model.Customers + ) + @model.Constraint(model.Customers) def single_x(model, m): - return (sum( model.x[n,m] for n in model.Locations ), 1.0) + return (sum(model.x[n, m] for n in model.Locations), 1.0) + @model.Constraint(model.Locations, model.Customers) -def bound_y(model, n,m): - return model.x[n,m] - model.y[n] <= 0.0 +def bound_y(model, n, m): + return model.x[n, m] - model.y[n] <= 0.0 + @model.Constraint() def num_facilities(model): - return sum( model.y[n] for n in model.Locations ) == model.P + return sum(model.y[n] for n in model.Locations) == model.P + -#model.pprint() +# model.pprint() diff --git a/examples/pyomo/p-median/pmedian.py b/examples/pyomo/p-median/pmedian.py index ebc4635360a..88731f287d8 100644 --- a/examples/pyomo/p-median/pmedian.py +++ b/examples/pyomo/p-median/pmedian.py @@ -12,6 +12,7 @@ from pyomo.core import * + def pyomo_create_model(options=None, model_options=None): import random @@ -21,34 +22,47 @@ def pyomo_create_model(options=None, model_options=None): model.N = Param(within=PositiveIntegers) - model.Locations = RangeSet(1,model.N) + model.Locations = RangeSet(1, model.N) - model.P = Param(within=RangeSet(1,model.N)) + model.P = Param(within=RangeSet(1, model.N)) model.M = Param(within=PositiveIntegers) - model.Customers = RangeSet(1,model.M) + model.Customers = RangeSet(1, model.M) - model.d = Param(model.Locations, model.Customers, initialize=lambda n, m, model : random.uniform(1.0,2.0), within=Reals) + model.d = Param( + model.Locations, + model.Customers, + initialize=lambda n, m, model: random.uniform(1.0, 2.0), + within=Reals, + ) - model.x = Var(model.Locations, model.Customers, bounds=(0.0,1.0)) + model.x = Var(model.Locations, model.Customers, bounds=(0.0, 1.0)) model.y = Var(model.Locations, within=Binary) def rule(model): - return sum( model.d[n,m]*model.x[n,m] for n in model.Locations for m in model.Customers ) + return sum( + model.d[n, m] * model.x[n, m] + for n in model.Locations + for m in model.Customers + ) + model.obj = Objective(rule=rule) def rule(model, m): - return (sum( model.x[n,m] for n in model.Locations ), 1.0) + return (sum(model.x[n, m] for n in model.Locations), 1.0) + model.single_x = Constraint(model.Customers, rule=rule) - def rule(model, n,m): - return (None, model.x[n,m] - model.y[n], 0.0) + def rule(model, n, m): + return (None, model.x[n, m] - model.y[n], 0.0) + model.bound_y = Constraint(model.Locations, model.Customers, rule=rule) def rule(model): - return (sum( model.y[n] for n in model.Locations ) - model.P, 0.0) + return (sum(model.y[n] for n in model.Locations) - model.P, 0.0) + model.num_facilities = Constraint(rule=rule) return model diff --git a/examples/pyomo/p-median/solver1.py b/examples/pyomo/p-median/solver1.py index 39220cdd70b..113bf9fdd29 100644 --- a/examples/pyomo/p-median/solver1.py +++ b/examples/pyomo/p-median/solver1.py @@ -11,13 +11,12 @@ # Imports from Pyomo from pyomo.core import * -from pyomo.common.plugin import * +from pyomo.common.plugin_base import * from pyomo.opt import * @plugin_factory(SolverFactory) class MySolver(object): - alias('greedy') # Declare that this is an IOptSolver plugin @@ -42,7 +41,7 @@ def solve(self, instance, **kwds): soln.status = SolutionStatus.feasible for j in sequence(n): if instance.y[j].value is 1: - soln.variable[instance.y[j].name] = {"Value" : 1, "Id" : j} + soln.variable[instance.y[j].name] = {"Value": 1, "Id": j} return results # Perform a greedy search @@ -50,33 +49,33 @@ def _greedy(self, instance): p = value(instance.P) n = value(instance.N) m = value(instance.M) - fixed=set() + fixed = set() # Initialize for j in sequence(n): - instance.y[j].value=0 + instance.y[j].value = 0 # Greedily fix the next best facility for i in sequence(p): best = None - ndx=j + ndx = j for j in sequence(n): if j in fixed: continue - instance.y[j].value=1 + instance.y[j].value = 1 # Compute value val = 0.0 for kk in sequence(m): - tmp=copy.copy(fixed) + tmp = copy.copy(fixed) tmp.add(j) tbest = None for jj in tmp: - if tbest is None or instance.d[jj,kk].value < tbest: - tbest = instance.d[jj,kk].value + if tbest is None or instance.d[jj, kk].value < tbest: + tbest = instance.d[jj, kk].value val += tbest # Keep best greedy choice if best is None or val < best: - best=val - ndx=j - instance.y[j].value=0 + best = val + ndx = j + instance.y[j].value = 0 fixed.add(ndx) - instance.y[ndx].value=1 + instance.y[ndx].value = 1 return [best, instance] diff --git a/examples/pyomo/p-median/solver2.py b/examples/pyomo/p-median/solver2.py index 537fca49428..c62f161fd24 100644 --- a/examples/pyomo/p-median/solver2.py +++ b/examples/pyomo/p-median/solver2.py @@ -11,14 +11,14 @@ # Imports from Pyomo from pyomo.core import * -from pyomo.common.plugin import * +from pyomo.common.plugin_base import * from pyomo.opt import * import random import copy + @plugin_factory class MySolver(object): - alias('random') # Declare that this is an IOptSolver plugin @@ -42,13 +42,13 @@ def solve(self, instance, **kwds): soln.value = val soln.status = SolutionStatus.feasible for j in sequence(n): - soln.variable[instance.y[j].name] = {"Value" : sol[j-1], "Id" : j} + soln.variable[instance.y[j].name] = {"Value": sol[j - 1], "Id": j} # Return results return results # Perform a random search def _random(self, instance): - sol = [0]*instance.N.value + sol = [0] * instance.N.value for j in range(instance.P.value): sol[j] = 1 # Generate 100 random solutions, and keep the best @@ -57,12 +57,16 @@ def _random(self, instance): for kk in range(100): random.shuffle(sol) # Compute value - val=0.0 + val = 0.0 for j in sequence(instance.M.value): - val += min([instance.d[i,j].value - for i in sequence(instance.N.value) - if sol[i-1] == 1]) + val += min( + [ + instance.d[i, j].value + for i in sequence(instance.N.value) + if sol[i - 1] == 1 + ] + ) if best is None or val < best: - best=val - best_sol=copy.copy(sol) + best = val + best_sol = copy.copy(sol) return [best, best_sol] diff --git a/examples/pyomo/piecewise/convex.py b/examples/pyomo/piecewise/convex.py index 3acec3fc75d..a3233ae5c3e 100644 --- a/examples/pyomo/piecewise/convex.py +++ b/examples/pyomo/piecewise/convex.py @@ -11,7 +11,7 @@ # A simple example illustrating a piecewise # representation of the function Z(X) -# +# # / -X+2 , -5 <= X <= 1 # Z(X) >= | # \ X , 1 <= X <= 5 @@ -19,26 +19,31 @@ from pyomo.core import * + # Define the function # Just like in Pyomo constraint rules, a Pyomo model object # must be the first argument for the function rule -def f(model,x): - return abs(x-1)+1.0 +def f(model, x): + return abs(x - 1) + 1.0 + model = ConcreteModel() -model.X = Var(bounds=(-5,5)) +model.X = Var(bounds=(-5, 5)) model.Z = Var() # See documentation on Piecewise component by typing # help(Piecewise) in a python terminal after importing pyomo.core -model.con = Piecewise(model.Z,model.X, # range and domain variables - pw_pts=[-5,1,5] , - pw_constr_type='LB', - f_rule=f) +model.con = Piecewise( + model.Z, + model.X, # range and domain variables + pw_pts=[-5, 1, 5], + pw_constr_type='LB', + f_rule=f, +) # The default piecewise representation implemented by Piecewise is SOS2. -# Note, however, that no SOS2 variables will be generated since the +# Note, however, that no SOS2 variables will be generated since the # check for convexity within Piecewise automatically simplifies the constraints # when a lower bounding convex function is supplied. Adding 'force_pw=True' # to the Piecewise argument list will cause the original piecewise constraints diff --git a/examples/pyomo/piecewise/indexed.py b/examples/pyomo/piecewise/indexed.py index f0245550150..dea56df3911 100644 --- a/examples/pyomo/piecewise/indexed.py +++ b/examples/pyomo/piecewise/indexed.py @@ -13,20 +13,21 @@ from pyomo.core import * + # Define the function # Just like in Pyomo constraint rules, a Pyomo model object # must be the first argument for the function rule -def f(model,t1,t2,x): - return 0.1*x - cos(5.0*x) +def f(model, t1, t2, x): + return 0.1 * x - cos(5.0 * x) model = ConcreteModel() -# Note we can use an arbitrary number of index sets of +# Note we can use an arbitrary number of index sets of # arbitrary dimension as the first arguments to the # Piecewise component. -model.INDEX1 = Set(dimen=2, initialize=[(0,1),(8,3)]) -model.X = Var(model.INDEX1, bounds=(-2,2)) +model.INDEX1 = Set(dimen=2, initialize=[(0, 1), (8, 3)]) +model.X = Var(model.INDEX1, bounds=(-2, 2)) model.Z = Var(model.INDEX1) # For indexed variables, pw_pts must be a @@ -40,19 +41,22 @@ def f(model,t1,t2,x): # binary variables ('LOG', 'DLOG') requires that pw_pts lists # must have 2^n + 1 breakpoints. num_points = 1 + 2**n -step = (2.0 - (-2.0))/(num_points-1) +step = (2.0 - (-2.0)) / (num_points - 1) for idx in model.X.index_set(): - PW_PTS[idx] = [-2.0 + i*step for i in range(num_points)] # [-2.0, ..., 2.0] - -model.linearized_constraint = Piecewise(model.INDEX1, # indexing sets - model.Z,model.X, # range and domain variables - pw_pts=PW_PTS, - pw_constr_type='EQ', - pw_repn='LOG', - f_rule=f, - force_pw=True) + PW_PTS[idx] = [-2.0 + i * step for i in range(num_points)] # [-2.0, ..., 2.0] + +model.linearized_constraint = Piecewise( + model.INDEX1, # indexing sets + model.Z, + model.X, # range and domain variables + pw_pts=PW_PTS, + pw_constr_type='EQ', + pw_repn='LOG', + f_rule=f, + force_pw=True, +) # maximize the sum of Z over its index # This is just a simple example of how to implement indexed variables. All indices # of Z will have the same solution. -model.obj = Objective(expr= sum_product(model.Z) , sense=maximize) +model.obj = Objective(expr=sum_product(model.Z), sense=maximize) diff --git a/examples/pyomo/piecewise/indexed_nonlinear.py b/examples/pyomo/piecewise/indexed_nonlinear.py index d6980a550cc..e871508d1be 100644 --- a/examples/pyomo/piecewise/indexed_nonlinear.py +++ b/examples/pyomo/piecewise/indexed_nonlinear.py @@ -13,13 +13,16 @@ # Must have a nonlinear solver # to run this example. from pyomo.core import * -from indexed import model,f +from indexed import model, f -# Reuse the rule from example4 to define the + +# Reuse the rule from example4 to define the # nonlinear constraint -def nonlinear_con_rule(model,i,j): - return model.Z[i,j] == f(model,i,j,model.X[i,j]) -model.nonlinear_constraint = Constraint(model.INDEX1,rule=nonlinear_con_rule) +def nonlinear_con_rule(model, i, j): + return model.Z[i, j] == f(model, i, j, model.X[i, j]) + + +model.nonlinear_constraint = Constraint(model.INDEX1, rule=nonlinear_con_rule) # deactivate all constraints on the Piecewise component model.linearized_constraint.deactivate() @@ -28,4 +31,3 @@ def nonlinear_con_rule(model,i,j): for idx in model.X.index_set(): model.X[idx] = 1.7 model.Z[idx] = 1.25 - diff --git a/examples/pyomo/piecewise/indexed_points.py b/examples/pyomo/piecewise/indexed_points.py index f68bef63f00..15b1c33a7ec 100644 --- a/examples/pyomo/piecewise/indexed_points.py +++ b/examples/pyomo/piecewise/indexed_points.py @@ -21,15 +21,13 @@ y = [1.1, -1.1, 2.0, 1.1] model = ConcreteModel() -model.index = Set(initialize=[1,2,3]) +model.index = Set(initialize=[1, 2, 3]) model.x = Var(model.index, bounds=(min(x), max(x))) model.y = Var(model.index) -model.fx = Piecewise(model.index, - model.y, model.x, - pw_pts=x, - pw_constr_type='EQ', - f_rule=y) +model.fx = Piecewise( + model.index, model.y, model.x, pw_pts=x, pw_constr_type='EQ', f_rule=y +) model.c = ConstraintList() model.c.add(model.x[1] >= 1.0) diff --git a/examples/pyomo/piecewise/nonconvex.py b/examples/pyomo/piecewise/nonconvex.py index 0319b105331..004748ab2eb 100644 --- a/examples/pyomo/piecewise/nonconvex.py +++ b/examples/pyomo/piecewise/nonconvex.py @@ -20,26 +20,32 @@ # Define the function # Just like in Pyomo constraint rules, a Pyomo model object # must be the first argument for the function rule -RANGE_POINTS = {-1.0:-1.0, 2.0:0.0, 6.0:-8.0, 10.0:12.0} -def f(model,x): +RANGE_POINTS = {-1.0: -1.0, 2.0: 0.0, 6.0: -8.0, 10.0: 12.0} + + +def f(model, x): return RANGE_POINTS[x] + model = ConcreteModel() -model.X = Var(bounds=(-1.0,10.0)) +model.X = Var(bounds=(-1.0, 10.0)) model.Z = Var() -model.p = Var(within = NonNegativeReals) -model.n = Var(within = NonNegativeReals) +model.p = Var(within=NonNegativeReals) +model.n = Var(within=NonNegativeReals) # See documentation on Piecewise component by typing # help(Piecewise) in a python terminal after importing pyomo.core -# Using BigM constraints with binary variables to represent the piecwise constraints -model.con = Piecewise(model.Z,model.X, # range and domain variables - pw_pts=[-1.0,2.0,6.0,10.0], - pw_constr_type='EQ', - pw_repn='DCC', - f_rule=f) +# Using BigM constraints with binary variables to represent the piecewise constraints +model.con = Piecewise( + model.Z, + model.X, # range and domain variables + pw_pts=[-1.0, 2.0, 6.0, 10.0], + pw_constr_type='EQ', + pw_repn='DCC', + f_rule=f, +) # minimize the 1-norm distance of Z to 7.0, i.e., |Z-7| -model.pn_con = Constraint(expr= model.Z - 7.0 == model.p - model.n) -model.obj = Objective(rule = lambda model: model.p+model.n , sense=minimize) +model.pn_con = Constraint(expr=model.Z - 7.0 == model.p - model.n) +model.obj = Objective(rule=lambda model: model.p + model.n, sense=minimize) diff --git a/examples/pyomo/piecewise/points.py b/examples/pyomo/piecewise/points.py index cf48c866808..c822ceb5860 100644 --- a/examples/pyomo/piecewise/points.py +++ b/examples/pyomo/piecewise/points.py @@ -21,9 +21,6 @@ model.x = Var(bounds=(min(x), max(x))) model.y = Var() -model.fx = Piecewise(model.y, model.x, - pw_pts=x, - pw_constr_type='EQ', - f_rule=y) +model.fx = Piecewise(model.y, model.x, pw_pts=x, pw_constr_type='EQ', f_rule=y) model.o = Objective(expr=model.y) diff --git a/examples/pyomo/piecewise/step.py b/examples/pyomo/piecewise/step.py index e87e6f1cb6a..c3fbb4762ab 100644 --- a/examples/pyomo/piecewise/step.py +++ b/examples/pyomo/piecewise/step.py @@ -11,7 +11,7 @@ # A simple example illustrating a piecewise # representation of the step function Z(X) -# +# # / 0 , 0 <= x <= 1 # Z(X) >= | 2 , 1 <= x <= 2 # \ 0.5 , 2 <= x <= 3 @@ -21,25 +21,28 @@ # range variable can solve to any value # on the vertical line. There is no # discontinuous "jump". -DOMAIN_PTS = [0., 1., 1., 2., 2., 3.] -RANGE_PTS = [0., 0., 2., 2., 0.5, 0.5] +DOMAIN_PTS = [0.0, 1.0, 1.0, 2.0, 2.0, 3.0] +RANGE_PTS = [0.0, 0.0, 2.0, 2.0, 0.5, 0.5] from pyomo.core import * model = ConcreteModel() -model.X = Var(bounds=(0,3)) +model.X = Var(bounds=(0, 3)) model.Z = Var() # See documentation on Piecewise component by typing # help(Piecewise) in a python terminal after importing pyomo.core -model.con = Piecewise(model.Z,model.X, # range and domain variables - pw_pts=DOMAIN_PTS , - pw_constr_type='EQ', - f_rule=RANGE_PTS, - pw_repn='INC') # **NOTE**: The not all piecewise represenations - # handle step functions. Those which do - # not work with step functions are: - # BIGM_SOS1, BIGM_BIN, and MC +model.con = Piecewise( + model.Z, + model.X, # range and domain variables + pw_pts=DOMAIN_PTS, + pw_constr_type='EQ', + f_rule=RANGE_PTS, + pw_repn='INC', +) # **NOTE**: The not all piecewise representations +# handle step functions. Those which do +# not work with step functions are: +# BIGM_SOS1, BIGM_BIN, and MC -model.obj = Objective(expr=model.Z+model.X, sense=maximize) +model.obj = Objective(expr=model.Z + model.X, sense=maximize) diff --git a/examples/pyomo/quadratic/example1.py b/examples/pyomo/quadratic/example1.py index 79565c70044..dff911a0f0c 100644 --- a/examples/pyomo/quadratic/example1.py +++ b/examples/pyomo/quadratic/example1.py @@ -18,8 +18,11 @@ model = AbstractModel() -model.x = Var(bounds=(-10,10), within=Reals) +model.x = Var(bounds=(-10, 10), within=Reals) + def objective_rule(model): return model.x * model.x + + model.objective = Objective(rule=objective_rule, sense=minimize) diff --git a/examples/pyomo/quadratic/example2.py b/examples/pyomo/quadratic/example2.py index 06347321bf5..981f2ef0bfb 100644 --- a/examples/pyomo/quadratic/example2.py +++ b/examples/pyomo/quadratic/example2.py @@ -16,16 +16,25 @@ model = AbstractModel() + def indices_rule(model): - return range(1,4) + return range(1, 4) + + model.indices = Set(initialize=indices_rule, within=PositiveIntegers) model.x = Var(model.indices, within=Reals) + def bound_x_rule(model, i): return (-10, model.x[i], 10) + + model.bound_x = Constraint(model.indices, rule=bound_x_rule) + def objective_rule(model): return sum([model.x[i] * model.x[i] for i in model.indices]) + + model.objective = Objective(rule=objective_rule, sense=minimize) diff --git a/examples/pyomo/quadratic/example3.py b/examples/pyomo/quadratic/example3.py index 1c96fd62ad6..4d96afe3328 100644 --- a/examples/pyomo/quadratic/example3.py +++ b/examples/pyomo/quadratic/example3.py @@ -17,16 +17,25 @@ model = AbstractModel() + def indices_rule(model): - return xrange(1,4) + return xrange(1, 4) + + model.indices = Set(initialize=indices_rule, within=PositiveIntegers) model.x = Var(model.indices, within=Reals) + def bound_x_rule(model, i): return (-10, model.x[i], 10) + + model.bound_x = Constraint(model.indices, rule=bound_x_rule) + def objective_rule(model): return 5 + sum([(model.x[i] - 3) * (model.x[i] - 3) for i in model.indices]) + + model.objective = Objective(rule=objective_rule, sense=minimize) diff --git a/examples/pyomo/quadratic/example4.py b/examples/pyomo/quadratic/example4.py index 51cab0fa0ae..256fc862a16 100644 --- a/examples/pyomo/quadratic/example4.py +++ b/examples/pyomo/quadratic/example4.py @@ -19,10 +19,20 @@ model.x = Var(within=NonNegativeReals) model.y = Var(within=NonNegativeReals) + def constraint_rule(model): return model.x + model.y >= 10 + + model.constraint = Constraint(rule=constraint_rule) + def objective_rule(model): - return model.x + model.y + 0.5 * (model.x * model.x + 4 * model.x * model.y + 7 * model.y * model.y) + return ( + model.x + + model.y + + 0.5 * (model.x * model.x + 4 * model.x * model.y + 7 * model.y * model.y) + ) + + model.objective = Objective(rule=objective_rule, sense=minimize) diff --git a/examples/pyomo/radertext/Ex2_1.py b/examples/pyomo/radertext/Ex2_1.py index f814e819b7c..d352325798a 100644 --- a/examples/pyomo/radertext/Ex2_1.py +++ b/examples/pyomo/radertext/Ex2_1.py @@ -34,24 +34,37 @@ # Variables model.NumDoors = Var(model.DoorType, within=NonNegativeIntegers) + # Objective def CalcProfit(M): - return sum (M.NumDoors[d]*M.Profit[d] for d in M.DoorType) + return sum(M.NumDoors[d] * M.Profit[d] for d in M.DoorType) + + model.TotProf = Objective(rule=CalcProfit, sense=maximize) + # Constraints def EnsureMachineLimit(M, m): - return sum (M.NumDoors[d]*M.Labor[d,m] for d in M.DoorType) \ - <= M.MachineLimit[m] + return sum(M.NumDoors[d] * M.Labor[d, m] for d in M.DoorType) <= M.MachineLimit[m] + + model.MachineUpBound = Constraint(model.MachineType, rule=EnsureMachineLimit) + def EnsureLaborLimit(M): - return sum (M.NumDoors[d]*M.Labor[d,m] \ - for d in M.DoorType for m in M.MachineType) \ - <= M.LaborLimit + return ( + sum(M.NumDoors[d] * M.Labor[d, m] for d in M.DoorType for m in M.MachineType) + <= M.LaborLimit + ) + + model.MachineUpBound = Constraint(rule=EnsureLaborLimit) + def EnsureMarketRatio(M): - return sum (M.NumDoors[d] for d in M.MarketDoorType1) \ - <= sum (M.NumDoors[d] for d in M.MarketDoorType2) + return sum(M.NumDoors[d] for d in M.MarketDoorType1) <= sum( + M.NumDoors[d] for d in M.MarketDoorType2 + ) + + model.MarketRatio = Constraint(rule=EnsureMarketRatio) diff --git a/examples/pyomo/radertext/Ex2_2.py b/examples/pyomo/radertext/Ex2_2.py index 692fd073c50..13c23dd1816 100644 --- a/examples/pyomo/radertext/Ex2_2.py +++ b/examples/pyomo/radertext/Ex2_2.py @@ -22,7 +22,7 @@ model.NumTimePeriods = Param(within=NonNegativeIntegers) # Sets -model.StartTime = RangeSet(1,model.NumTimePeriods) +model.StartTime = RangeSet(1, model.NumTimePeriods) # Parameters model.RequiredWorkers = Param(model.StartTime, within=NonNegativeIntegers) @@ -30,16 +30,24 @@ # Variables model.NumWorkers = Var(model.StartTime, within=NonNegativeIntegers) + # Objective def CalcTotalWorkers(M): - return sum (M.NumWorkers[i] for i in M.StartTime) + return sum(M.NumWorkers[i] for i in M.StartTime) + + model.TotalWorkers = Objective(rule=CalcTotalWorkers, sense=minimize) + # Constraints def EnsureWorkforce(M, i): if i != M.NumTimePeriods.value: - return M.NumWorkers[i] + M.NumWorkers[i+1] >= M.RequiredWorkers[i+1] + return M.NumWorkers[i] + M.NumWorkers[i + 1] >= M.RequiredWorkers[i + 1] else: - return M.NumWorkers[1] + M.NumWorkers[M.NumTimePeriods.value] \ - >= M.RequiredWorkers[1] + return ( + M.NumWorkers[1] + M.NumWorkers[M.NumTimePeriods.value] + >= M.RequiredWorkers[1] + ) + + model.WorkforceDemand = Constraint(model.StartTime, rule=EnsureWorkforce) diff --git a/examples/pyomo/radertext/Ex2_3.py b/examples/pyomo/radertext/Ex2_3.py index 99ac2a13598..d4dc3109ea1 100644 --- a/examples/pyomo/radertext/Ex2_3.py +++ b/examples/pyomo/radertext/Ex2_3.py @@ -24,63 +24,88 @@ model.NumGasTypes = Param(within=PositiveIntegers) # Sets -model.CrudeType = RangeSet(1,model.NumCrudeTypes) -model.GasType = RangeSet(1,model.NumGasTypes) +model.CrudeType = RangeSet(1, model.NumCrudeTypes) +model.GasType = RangeSet(1, model.NumGasTypes) # Parameters -model.Cost = Param(model.CrudeType, within= NonNegativeReals) +model.Cost = Param(model.CrudeType, within=NonNegativeReals) model.CrudeOctane = Param(model.CrudeType, within=NonNegativeReals) model.CrudeMax = Param(model.CrudeType, within=NonNegativeReals) model.MinGasOctane = Param(model.GasType, within=NonNegativeReals) model.GasPrice = Param(model.GasType, within=NonNegativeReals) model.GasDemand = Param(model.GasType, within=NonNegativeReals) -model.MixtureUpBounds = Param(model.CrudeType, model.GasType, \ - within=NonNegativeReals, default=10**8) -model.MixtureLowBounds = Param(model.CrudeType, model.GasType, \ - within=NonNegativeReals, default=0) +model.MixtureUpBounds = Param( + model.CrudeType, model.GasType, within=NonNegativeReals, default=10**8 +) +model.MixtureLowBounds = Param( + model.CrudeType, model.GasType, within=NonNegativeReals, default=0 +) # Variabls model.x = Var(model.CrudeType, model.GasType, within=NonNegativeReals) model.q = Var(model.CrudeType, within=NonNegativeReals) model.z = Var(model.GasType, within=NonNegativeReals) + # Objective def CalcProfit(M): - return sum(M.GasPrice[j]*M.z[j] for j in M.GasType) \ - - sum(M.Cost[i]*M.q[i] for i in M.CrudeType) + return sum(M.GasPrice[j] * M.z[j] for j in M.GasType) - sum( + M.Cost[i] * M.q[i] for i in M.CrudeType + ) + + model.Profit = Objective(rule=CalcProfit, sense=maximize) # Constraints -def BalanceCrude(M,i): - return sum (M.x[i,j] for j in M.GasType) == M.q[i] + +def BalanceCrude(M, i): + return sum(M.x[i, j] for j in M.GasType) == M.q[i] + + model.BalanceCrudeProduction = Constraint(model.CrudeType, rule=BalanceCrude) -def BalanceGas(M,j): - return sum (M.x[i,j] for i in M.CrudeType) == M.z[j] + +def BalanceGas(M, j): + return sum(M.x[i, j] for i in M.CrudeType) == M.z[j] + + model.BalanceGasProduction = Constraint(model.GasType, rule=BalanceGas) -def EnsureCrudeLimit(M,i): + +def EnsureCrudeLimit(M, i): return M.q[i] <= M.CrudeMax[i] + + model.LimitCrude = Constraint(model.CrudeType, rule=EnsureCrudeLimit) -def EnsureGasDemand(M,j): + +def EnsureGasDemand(M, j): return M.z[j] >= M.GasDemand[j] + + model.DemandGas = Constraint(model.GasType, rule=EnsureGasDemand) -def EnsureOctane(M,j): - return sum (M.x[i,j]*M.CrudeOctane[i] for i in M.CrudeType) \ - >= M.MinGasOctane[j]*M.z[j] + +def EnsureOctane(M, j): + return ( + sum(M.x[i, j] * M.CrudeOctane[i] for i in M.CrudeType) + >= M.MinGasOctane[j] * M.z[j] + ) + + model.OctaneLimit = Constraint(model.GasType, rule=EnsureOctane) -def EnsureLowMixture(M,i,j): - return sum (M.x[k,j] for k in M.CrudeType)*M.MixtureLowBounds[i,j] \ - <= M.x[i,j] -model.LowCrudeBound = Constraint(model.CrudeType, model.GasType, \ - rule=EnsureLowMixture) - -def EnsureUpMixture(M,i,j): - return sum (M.x[k,j] for k in M.CrudeType)*M.MixtureUpBounds[i,j] \ - >= M.x[i,j] -model.UpCrudeBound = Constraint(model.CrudeType, model.GasType, \ - rule=EnsureUpMixture) + +def EnsureLowMixture(M, i, j): + return sum(M.x[k, j] for k in M.CrudeType) * M.MixtureLowBounds[i, j] <= M.x[i, j] + + +model.LowCrudeBound = Constraint(model.CrudeType, model.GasType, rule=EnsureLowMixture) + + +def EnsureUpMixture(M, i, j): + return sum(M.x[k, j] for k in M.CrudeType) * M.MixtureUpBounds[i, j] >= M.x[i, j] + + +model.UpCrudeBound = Constraint(model.CrudeType, model.GasType, rule=EnsureUpMixture) diff --git a/examples/pyomo/radertext/Ex2_5.py b/examples/pyomo/radertext/Ex2_5.py index 8bd76b71ba3..da90b473b1f 100644 --- a/examples/pyomo/radertext/Ex2_5.py +++ b/examples/pyomo/radertext/Ex2_5.py @@ -21,8 +21,8 @@ # Sets model.NumMonths = Param(within=NonNegativeIntegers) -model.EngineType= Set() -model.Month = RangeSet(1,model.NumMonths) +model.EngineType = Set() +model.Month = RangeSet(1, model.NumMonths) # Parameters model.Demand = Param(model.EngineType, model.Month, within=NonNegativeIntegers) @@ -38,33 +38,48 @@ model.Produce = Var(model.EngineType, model.Month, within=NonNegativeIntegers) model.Inventory = Var(model.EngineType, model.Month, within=NonNegativeIntegers) + # Objective def CalcCost(M): - return sum(M.Produce[e,t]*M.ProdCost[e] \ - for e in M.EngineType for t in M.Month) + \ - sum(M.Inventory[e,t]*M.InvCost \ - for e in M.EngineType for t in M.Month) + return sum( + M.Produce[e, t] * M.ProdCost[e] for e in M.EngineType for t in M.Month + ) + sum(M.Inventory[e, t] * M.InvCost for e in M.EngineType for t in M.Month) + + model.TotalCost = Objective(rule=CalcCost, sense=minimize) + # Constraints -def EnsureBalance(M,e,t): +def EnsureBalance(M, e, t): if t != 1: - return M.Inventory[e,t] == M.Inventory[e, t-1] + M.Produce[e,t] \ - - M.Demand[e,t] + return ( + M.Inventory[e, t] + == M.Inventory[e, t - 1] + M.Produce[e, t] - M.Demand[e, t] + ) else: - return M.Inventory[e,t] == M.InitInv[e] + M.Produce[e,t] \ - - M.Demand[e,t] + return M.Inventory[e, t] == M.InitInv[e] + M.Produce[e, t] - M.Demand[e, t] + + model.InventoryBalance = Constraint(model.EngineType, model.Month, rule=EnsureBalance) -def EnsureLaborLimit(M,t): - return sum(M.Produce[e,t]*M.Labor[e] for e in M.EngineType) <= M.LaborBound + +def EnsureLaborLimit(M, t): + return sum(M.Produce[e, t] * M.Labor[e] for e in M.EngineType) <= M.LaborBound + + model.LimitLabor = Constraint(model.Month, rule=EnsureLaborLimit) -def EnsureProdLimit(M,t): - return sum(M.Produce[e,t] for e in M.EngineType) <= M.ProdBound + +def EnsureProdLimit(M, t): + return sum(M.Produce[e, t] for e in M.EngineType) <= M.ProdBound + + model.ProdLimit = Constraint(model.Month, rule=EnsureProdLimit) -def LeaveEnough(M,e,t): + +def LeaveEnough(M, e, t): if t == len(M.Month): - return M.Inventory[e,t] >= M.FinInv[e] + return M.Inventory[e, t] >= M.FinInv[e] + + model.FinalInventory = Constraint(model.EngineType, model.Month, rule=LeaveEnough) diff --git a/examples/pyomo/radertext/Ex2_6a.py b/examples/pyomo/radertext/Ex2_6a.py index 27609152f82..dc33a9b64e2 100644 --- a/examples/pyomo/radertext/Ex2_6a.py +++ b/examples/pyomo/radertext/Ex2_6a.py @@ -21,7 +21,7 @@ # Sets and Set Parameters model.NumSensors = Param(within=NonNegativeIntegers) -model.Sensor = RangeSet(1,model.NumSensors) +model.Sensor = RangeSet(1, model.NumSensors) # Parameters model.xPos = Param(model.Sensor, within=NonNegativeIntegers) @@ -33,26 +33,40 @@ model.xMax = Var(model.Sensor, within=NonNegativeReals) model.yMax = Var(model.Sensor, within=NonNegativeReals) + # Objective def CalcDist(M): return sum(M.xMax[s] + M.yMax[s] for s in M.Sensor) + + model.Dist = Objective(rule=CalcDist, sense=minimize) # Constraints -def xEnsureUp(s,M): + +def xEnsureUp(s, M): return M.xCentralSensor - M.xPos[s] <= M.xMax[s] + + model.xUpBound = Constraint(model.Sensor, rule=xEnsureUp) -def xEnsureLow(s,M): + +def xEnsureLow(s, M): return M.xCentralSensor - M.xPos[s] >= -M.xMax[s] + + model.xLowBound = Constraint(model.Sensor, rule=xEnsureLow) -def yEnsureUp(s,M): + +def yEnsureUp(s, M): return M.yCentralSensor - M.yPos[s] <= M.yMax[s] + + model.yUpBound = Constraint(model.Sensor, rule=yEnsureUp) -def yEnsureLow(s,M): + +def yEnsureLow(s, M): return M.yCentralSensor - M.yPos[s] >= -M.yMax[s] -model.yLowBound = Constraint(model.Sensor, rule=yEnsureLow) + +model.yLowBound = Constraint(model.Sensor, rule=yEnsureLow) diff --git a/examples/pyomo/radertext/Ex2_6b.py b/examples/pyomo/radertext/Ex2_6b.py index 282ffd7afe7..8049d4ebb05 100644 --- a/examples/pyomo/radertext/Ex2_6b.py +++ b/examples/pyomo/radertext/Ex2_6b.py @@ -21,7 +21,7 @@ # Sets and Set Parameters model.NumSensors = Param(within=NonNegativeIntegers) -model.Sensor = RangeSet(1,model.NumSensors) +model.Sensor = RangeSet(1, model.NumSensors) # Parameters model.xPos = Param(model.Sensor, within=NonNegativeIntegers) @@ -34,29 +34,47 @@ model.yMax = Var(model.Sensor, within=NonNegativeReals) model.Max = Var(within=NonNegativeReals) + # Objective def CalcDist(M): return M.Max + + model.Dist = Objective(rule=CalcDist, sense=minimize) # Constraints -def xEnsureUp(M,s): + +def xEnsureUp(M, s): return M.xCentralSensor - M.xPos[s] <= M.xMax[s] + + model.xUpBound = Constraint(model.Sensor, rule=xEnsureUp) -def xEnsureLow(M,s): + +def xEnsureLow(M, s): return M.xCentralSensor - M.xPos[s] >= -M.xMax[s] + + model.xLowBound = Constraint(model.Sensor, rule=xEnsureLow) -def yEnsureUp(M,s): + +def yEnsureUp(M, s): return M.yCentralSensor - M.yPos[s] <= M.yMax[s] + + model.yUpBound = Constraint(model.Sensor, rule=yEnsureUp) -def yEnsureLow(M,s): + +def yEnsureLow(M, s): return M.yCentralSensor - M.yPos[s] >= -M.yMax[s] + + model.yLowBound = Constraint(model.Sensor, rule=yEnsureLow) -def EnsureSensorBound(M,s): + +def EnsureSensorBound(M, s): return M.xMax[s] + M.yMax[s] <= M.Max + + model.MaxDist = Constraint(model.Sensor, rule=EnsureSensorBound) diff --git a/examples/pyomo/sos/DepotSiting.py b/examples/pyomo/sos/DepotSiting.py index 8f2420aa3e9..98697681f44 100644 --- a/examples/pyomo/sos/DepotSiting.py +++ b/examples/pyomo/sos/DepotSiting.py @@ -26,17 +26,29 @@ # indicators of which sites are selected. constraints ensure only one site is selected, # and allow the binary integrality to be implicit. -model.SiteSelected = Var(model.Sites, bounds=(0,1)) +model.SiteSelected = Var(model.Sites, bounds=(0, 1)) # ensure that only one of the site selected variables is non-zero. model.SiteSelectedSOS = SOSConstraint(var=model.SiteSelected, sos=1) + # ensure that one of the sites is selected (enforce binary). def enforce_site_selected_binary_rule(model): return sum_product(model.SiteSelected) == 1 + + model.EnforceSiteSelectedBinary = Constraint(rule=enforce_site_selected_binary_rule) + # the objective is to minimize the cost to satisfy all customers. def minimize_cost_rule(model): - return sum([model.SatisfactionCost[c, s] * model.SiteSelected[s] for c in model.Customers for s in model.Sites]) + return sum( + [ + model.SatisfactionCost[c, s] * model.SiteSelected[s] + for c in model.Customers + for s in model.Sites + ] + ) + + model.MinimizeCost = Objective(rule=minimize_cost_rule, sense=minimize) diff --git a/examples/pyomo/sos/basic_sos2_example.py b/examples/pyomo/sos/basic_sos2_example.py index 5a079b8f45f..655169ffe54 100644 --- a/examples/pyomo/sos/basic_sos2_example.py +++ b/examples/pyomo/sos/basic_sos2_example.py @@ -23,38 +23,30 @@ # a .dat file. Thus, define all the "cruft" up here, so it's clearer how the # model is tied together down below. -def c_param_init ( model, v ): - return (-1, -1, -3, -2, -2)[ v -1 ] # -1 because Python is 0-based +def c_param_init(model, v): + return (-1, -1, -3, -2, -2)[v - 1] # -1 because Python is 0-based -def b_param_init ( model, c): - return (30, 30)[ c -1 ] # -1 because Python is 0-based +def b_param_init(model, c): + return (30, 30)[c - 1] # -1 because Python is 0-based -def A_param_init ( model, c, v): - data = ( - (-1, -1, 1, 1, 0), - ( 1, 0, 1, -3, 0), - ) - return data[ c -1 ][ v -1 ] # -1 because Python is 0-based +def A_param_init(model, c, v): + data = ((-1, -1, 1, 1, 0), (1, 0, 1, -3, 0)) + return data[c - 1][v - 1] # -1 because Python is 0-based -def obj_rule ( model ): - objective_expression = sum( - model.c[ i ] * model.x[ i ] - for i in model.variable_set - ) + +def obj_rule(model): + objective_expression = sum(model.c[i] * model.x[i] for i in model.variable_set) return objective_expression -def constraint_rule ( model, c): - constraint_equation = ( - model.b[ c ] >= sum( - model.A[c, i] * model.x[ i ] - for i in model.variable_set - ) +def constraint_rule(model, c): + constraint_equation = model.b[c] >= sum( + model.A[c, i] * model.x[i] for i in model.variable_set ) return constraint_equation @@ -71,25 +63,25 @@ def constraint_rule ( model, c): model = AbstractModel() M = model -M.variable_set = RangeSet(1, 5) +M.variable_set = RangeSet(1, 5) M.constraint_set = RangeSet(1, 2) -M.c = Param( M.variable_set, initialize=c_param_init ) +M.c = Param(M.variable_set, initialize=c_param_init) -M.A = Param( M.constraint_set, M.variable_set, initialize=A_param_init ) -M.b = Param( M.constraint_set, initialize=b_param_init ) +M.A = Param(M.constraint_set, M.variable_set, initialize=A_param_init) +M.b = Param(M.constraint_set, initialize=b_param_init) -M.x = Var( M.variable_set, within=PositiveReals ) +M.x = Var(M.variable_set, within=PositiveReals) -M.obj = Objective( rule=obj_rule, sense=minimize ) # min "c transpose" X +M.obj = Objective(rule=obj_rule, sense=minimize) # min "c transpose" X # At first, this is little more than a standard form Ax=b ... -M.constraints = Constraint( M.constraint_set, rule=constraint_rule ) +M.constraints = Constraint(M.constraint_set, rule=constraint_rule) # ... with a couple of extra constraints ... -M.x1_constraint = Constraint( rule=x1_constraint_rule ) -M.x2_constraint = Constraint( rule=x2_constraint_rule ) -M.x5_constraint = Constraint( rule=x5_constraint_rule ) +M.x1_constraint = Constraint(rule=x1_constraint_rule) +M.x2_constraint = Constraint(rule=x2_constraint_rule) +M.x5_constraint = Constraint(rule=x5_constraint_rule) # ... and finally, add the constraint for which this example was created -M.x_sos_vars = SOSConstraint( var=M.x, sos=2 ) +M.x_sos_vars = SOSConstraint(var=M.x, sos=2) diff --git a/examples/pyomo/sos/sos2_piecewise.py b/examples/pyomo/sos/sos2_piecewise.py index 55bf445c2c2..4e79ce2ee62 100644 --- a/examples/pyomo/sos/sos2_piecewise.py +++ b/examples/pyomo/sos/sos2_piecewise.py @@ -22,44 +22,64 @@ model = ConcreteModel() -model.index_set = Set(initialize=[1,2]) -DOMAIN_PTS = {1:[1,2,3], 2:[1,2,3]} -F = {1:[1,4,9],2:[1,4,9]} +model.index_set = Set(initialize=[1, 2]) +DOMAIN_PTS = {1: [1, 2, 3], 2: [1, 2, 3]} +F = {1: [1, 4, 9], 2: [1, 4, 9]} # Note we can also implement this like below -#F = lambda x: x**2 +# F = lambda x: x**2 # Update the return value for constraint2_rule if # F is defined using the function above + # Indexing set required for the SOSConstraint declaration -def SOS_indices_init(model,t): - return [(t,i) for i in range(len(DOMAIN_PTS[t]))] -model.SOS_indices = Set(model.index_set,dimen=2, ordered=True, initialize=SOS_indices_init) +def SOS_indices_init(model, t): + return [(t, i) for i in range(len(DOMAIN_PTS[t]))] + + +model.SOS_indices = Set( + model.index_set, dimen=2, ordered=True, initialize=SOS_indices_init +) + def sos_var_indices_init(model): - return [(t,i) for t in model.index_set for i in range(len(DOMAIN_PTS[t]))] -model.sos_var_indices = Set(ordered=True, dimen=2,initialize=sos_var_indices_init) + return [(t, i) for t in model.index_set for i in range(len(DOMAIN_PTS[t]))] -model.x = Var(model.index_set) # domain variable -model.Fx = Var(model.index_set) # range variable -model.y = Var(model.sos_var_indices,within=NonNegativeReals) # SOS2 variable + +model.sos_var_indices = Set(ordered=True, dimen=2, initialize=sos_var_indices_init) + +model.x = Var(model.index_set) # domain variable +model.Fx = Var(model.index_set) # range variable +model.y = Var(model.sos_var_indices, within=NonNegativeReals) # SOS2 variable model.obj = Objective(expr=sum_product(model.Fx), sense=maximize) -def constraint1_rule(model,t): - return model.x[t] == sum(model.y[t,i]*DOMAIN_PTS[t][i] for i in range(len(DOMAIN_PTS[t])) ) -def constraint2_rule(model,t): + +def constraint1_rule(model, t): + return model.x[t] == sum( + model.y[t, i] * DOMAIN_PTS[t][i] for i in range(len(DOMAIN_PTS[t])) + ) + + +def constraint2_rule(model, t): # Uncomment below for F defined as dictionary - return model.Fx[t] == sum(model.y[t,i]*F[t][i] for i in range(len(DOMAIN_PTS[t])) ) + return model.Fx[t] == sum( + model.y[t, i] * F[t][i] for i in range(len(DOMAIN_PTS[t])) + ) # Uncomment below for F defined as lambda function - #return model.Fx[t] == sum(model.y[t,i]*F(DOMAIN_PTS[t][i]) for i in range(len(DOMAIN_PTS[t])) ) -def constraint3_rule(model,t): - return sum(model.y[t,j] for j in range(len(DOMAIN_PTS[t]))) == 1 - -model.constraint1 = Constraint(model.index_set,rule=constraint1_rule) -model.constraint2 = Constraint(model.index_set,rule=constraint2_rule) -model.constraint3 = Constraint(model.index_set,rule=constraint3_rule) -model.SOS_set_constraint = SOSConstraint(model.index_set, var=model.y, index=model.SOS_indices, sos=2) - -#Fix the answer for testing purposes -model.set_answer_constraint1 = Constraint(expr= model.x[1] == 2.5) -model.set_answer_constraint2 = Constraint(expr= model.x[2] == 2.0) + # return model.Fx[t] == sum(model.y[t,i]*F(DOMAIN_PTS[t][i]) for i in range(len(DOMAIN_PTS[t])) ) + + +def constraint3_rule(model, t): + return sum(model.y[t, j] for j in range(len(DOMAIN_PTS[t]))) == 1 + + +model.constraint1 = Constraint(model.index_set, rule=constraint1_rule) +model.constraint2 = Constraint(model.index_set, rule=constraint2_rule) +model.constraint3 = Constraint(model.index_set, rule=constraint3_rule) +model.SOS_set_constraint = SOSConstraint( + model.index_set, var=model.y, index=model.SOS_indices, sos=2 +) + +# Fix the answer for testing purposes +model.set_answer_constraint1 = Constraint(expr=model.x[1] == 2.5) +model.set_answer_constraint2 = Constraint(expr=model.x[2] == 2.0) diff --git a/examples/pyomo/suffixes/duals_pyomo.py b/examples/pyomo/suffixes/duals_pyomo.py index 41c8a147b79..9743add3ddd 100644 --- a/examples/pyomo/suffixes/duals_pyomo.py +++ b/examples/pyomo/suffixes/duals_pyomo.py @@ -21,8 +21,7 @@ ### # Declare an IMPORT Suffix to store the dual information that will -# be returned by the solver. When Suffix components are declared +# be returned by the solver. When Suffix components are declared # with an IMPORT direction, Pyomo solver interfaces will attempt to collect # this named information from a solver solution. model.dual = Suffix(direction=Suffix.IMPORT) - diff --git a/examples/pyomo/suffixes/duals_script.py b/examples/pyomo/suffixes/duals_script.py index 4488e83de21..a9db615cad3 100644 --- a/examples/pyomo/suffixes/duals_script.py +++ b/examples/pyomo/suffixes/duals_script.py @@ -28,15 +28,17 @@ ### Create the a solver plugin solver = 'gurobi' -solver_io = 'lp' # Uses the LP file interface -stream_solver = False # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +solver_io = 'lp' # Uses the LP file interface +stream_solver = False # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) if opt is None: print("") - print("ERROR: Unable to create solver plugin for %s "\ - "using the %s interface" % (solver, solver_io)) + print( + "ERROR: Unable to create solver plugin for %s " + "using the %s interface" % (solver, solver_io) + ) print("") exit(1) @@ -47,11 +49,8 @@ ### Send the model to gurobi_ampl and collect the solution # The solver plugin will scan the model for all active suffixes # valid for importing, which it will store into the results object -results = opt.solve(model, - keepfiles=keepfiles, - tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) print("") print("Dual Solution") print("%s: %s" % (model.con, model.dual[model.con])) - diff --git a/examples/pyomo/suffixes/gurobi_ampl_basis.py b/examples/pyomo/suffixes/gurobi_ampl_basis.py index b44064bfbbc..cd8e4e8f129 100644 --- a/examples/pyomo/suffixes/gurobi_ampl_basis.py +++ b/examples/pyomo/suffixes/gurobi_ampl_basis.py @@ -30,13 +30,15 @@ # solver = 'gurobi_ampl' solver_io = 'nl' -stream_solver = True # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +stream_solver = True # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) if opt is None: print("") - print("ERROR: Unable to create solver plugin for %s "\ - "using the %s interface" % (solver, solver_io)) + print( + "ERROR: Unable to create solver plugin for %s " + "using the %s interface" % (solver, solver_io) + ) print("") exit(1) @@ -60,10 +62,10 @@ # Create a trivial example model # model = ConcreteModel() -model.s = Set(initialize=[1,2,3]) -model.x = Var(model.s,within=NonNegativeReals) +model.s = Set(initialize=[1, 2, 3]) +model.x = Var(model.s, within=NonNegativeReals) model.obj = Objective(expr=sum_product(model.x)) -model.con = Constraint(model.s, rule=lambda model,i: model.x[i] >= i-1) +model.con = Constraint(model.s, rule=lambda model, i: model.x[i] >= i - 1) ### # @@ -79,8 +81,7 @@ # - 5: nonbasic at equal lower and upper bounds # - 6: nonbasic between bounds -model.sstatus = Suffix(direction=Suffix.IMPORT_EXPORT, - datatype=Suffix.INT) +model.sstatus = Suffix(direction=Suffix.IMPORT_EXPORT, datatype=Suffix.INT) model.dual = Suffix(direction=Suffix.IMPORT_EXPORT) @@ -91,9 +92,7 @@ # solver that certain suffixes are requested by setting a # solver option (see the solver documentation). # -results = opt.solve(model, - keepfiles=keepfiles, - tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) # # Print the suffix values that were imported @@ -101,13 +100,10 @@ print("") print("Suffixes After First Solve:") for i in model.s: - print("%s.sstatus: %s" % (model.x[i].name, - model.sstatus.get(model.x[i]))) + print("%s.sstatus: %s" % (model.x[i].name, model.sstatus.get(model.x[i]))) for i in model.s: - print("%s.sstatus: %s" % (model.con[i].name, - model.sstatus.get(model.con[i]))) - print("%s.dual: %s" % (model.con[i].name, - model.dual.get(model.con[i]))) + print("%s.sstatus: %s" % (model.con[i].name, model.sstatus.get(model.con[i]))) + print("%s.dual: %s" % (model.con[i].name, model.dual.get(model.con[i]))) print("") # @@ -118,6 +114,4 @@ # iterations shown by the solver output that is due to the # extra warmstart information. # -results = opt.solve(model, - keepfiles=keepfiles, - tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) diff --git a/examples/pyomo/suffixes/gurobi_ampl_example.py b/examples/pyomo/suffixes/gurobi_ampl_example.py index 17c7aa0a3db..d133fa422dc 100644 --- a/examples/pyomo/suffixes/gurobi_ampl_example.py +++ b/examples/pyomo/suffixes/gurobi_ampl_example.py @@ -28,52 +28,55 @@ ### Create the gurobi_ampl solver plugin using the ASL interface solver = 'gurobi_ampl' solver_io = 'nl' -stream_solver = False # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +stream_solver = False # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) if opt is None: print("") - print("ERROR: Unable to create solver plugin for %s "\ - "using the %s interface" % (solver, solver_io)) + print( + "ERROR: Unable to create solver plugin for %s " + "using the %s interface" % (solver, solver_io) + ) print("") exit(1) -opt.options['outlev'] = 1 # tell gurobi to be verbose with output +opt.options['outlev'] = 1 # tell gurobi to be verbose with output ### ### Create a trivial example model model = ConcreteModel() -model.s = Set(initialize=[1,2,3]) -model.x = Var(model.s,within=NonNegativeReals) +model.s = Set(initialize=[1, 2, 3]) +model.x = Var(model.s, within=NonNegativeReals) model.obj = Objective(expr=sum_product(model.x)) -model.con = Constraint(model.s, rule=lambda model,i: model.x[i] >= i-1) +model.con = Constraint(model.s, rule=lambda model, i: model.x[i] >= i - 1) ### ### Declare all suffixes # The variable solution status suffix # (this suffix can be sent to the solver and loaded from the solution) -sstatus_table={'bas':1, # basic - 'sup':2, # superbasic - 'low':3, # nonbasic <= (normally =) lower bound - 'upp':4, # nonbasic >= (normally =) upper bound - 'equ':5, # nonbasic at equal lower and upper bounds - 'btw':6} # nonbasic between bounds -model.sstatus = Suffix(direction=Suffix.IMPORT_EXPORT, - datatype=Suffix.INT) +sstatus_table = { + 'bas': 1, # basic + 'sup': 2, # superbasic + 'low': 3, # nonbasic <= (normally =) lower bound + 'upp': 4, # nonbasic >= (normally =) upper bound + 'equ': 5, # nonbasic at equal lower and upper bounds + 'btw': 6, +} # nonbasic between bounds +model.sstatus = Suffix(direction=Suffix.IMPORT_EXPORT, datatype=Suffix.INT) model.dual = Suffix(direction=Suffix.IMPORT_EXPORT) # Report the best known bound on the objective function model.bestbound = Suffix(direction=Suffix.IMPORT) # A few Gurobi variable solution sensitivity suffixes -model.senslblo = Suffix(direction=Suffix.IMPORT) # smallest variable lower bound -model.senslbhi = Suffix(direction=Suffix.IMPORT) # greatest variable lower bound -model.sensublo = Suffix(direction=Suffix.IMPORT) # smallest variable upper bound -model.sensubhi = Suffix(direction=Suffix.IMPORT) # greatest variable upper bound +model.senslblo = Suffix(direction=Suffix.IMPORT) # smallest variable lower bound +model.senslbhi = Suffix(direction=Suffix.IMPORT) # greatest variable lower bound +model.sensublo = Suffix(direction=Suffix.IMPORT) # smallest variable upper bound +model.sensubhi = Suffix(direction=Suffix.IMPORT) # greatest variable upper bound # A Gurobi constraint solution sensitivity suffix -model.sensrhshi = Suffix(direction=Suffix.IMPORT) # greatest right-hand side value +model.sensrhshi = Suffix(direction=Suffix.IMPORT) # greatest right-hand side value ### # Tell gurobi_ampl to report solution sensitivities @@ -84,28 +87,30 @@ # Set one of the sstatus suffix values, which will be sent to the solver model.sstatus[model.x[1]] = sstatus_table['low'] + def print_model_suffixes(model): # print all suffix values for all model components in a nice table - print("\t",end='') - for name,suffix in active_import_suffix_generator(model): - print("%10s" % (name),end='') + print("\t", end='') + for name, suffix in active_import_suffix_generator(model): + print("%10s" % (name), end='') print("") for i in model.s: - print(model.x[i].name+"\t",end='') - for name,suffix in active_import_suffix_generator(model): - print("%10s" % (suffix.get(model.x[i])),end='') + print(model.x[i].name + "\t", end='') + for name, suffix in active_import_suffix_generator(model): + print("%10s" % (suffix.get(model.x[i])), end='') print("") for i in model.s: - print(model.con[i].name+"\t",end='') - for name,suffix in active_import_suffix_generator(model): - print("%10s" % (suffix.get(model.con[i])),end='') + print(model.con[i].name + "\t", end='') + for name, suffix in active_import_suffix_generator(model): + print("%10s" % (suffix.get(model.con[i])), end='') print("") - print(model.obj.name+"\t",end='') - for name,suffix in active_import_suffix_generator(model): - print("%10s" % (suffix.get(model.obj)),end='') + print(model.obj.name + "\t", end='') + for name, suffix in active_import_suffix_generator(model): + print("%10s" % (suffix.get(model.obj)), end='') print("") print("") + print("") print("Suffixes Before Solve:") print_model_suffixes(model) @@ -113,12 +118,9 @@ def print_model_suffixes(model): ### Send the model to gurobi_ampl and collect the solution # The solver plugin will scan the model for all active suffixes # valid for importing, which it will store into the results object -results = opt.solve(model, - keepfiles=keepfiles, - tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) ### print("") print("Suffixes After Solve:") print_model_suffixes(model) - diff --git a/examples/pyomo/suffixes/gurobi_ampl_iis.py b/examples/pyomo/suffixes/gurobi_ampl_iis.py index cac666202ad..ccba226db78 100644 --- a/examples/pyomo/suffixes/gurobi_ampl_iis.py +++ b/examples/pyomo/suffixes/gurobi_ampl_iis.py @@ -28,14 +28,16 @@ ### Create the gurobi_ampl solver plugin using the ASL interface solver = 'gurobi_ampl' solver_io = 'nl' -stream_solver = False # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +stream_solver = False # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) if opt is None: print("") - print("ERROR: Unable to create solver plugin for %s "\ - "using the %s interface" % (solver, solver_io)) + print( + "ERROR: Unable to create solver plugin for %s " + "using the %s interface" % (solver, solver_io) + ) print("") exit(1) @@ -43,7 +45,7 @@ opt.options['outlev'] = 1 # tell gurobi to find an iis table for the infeasible model -opt.options['iisfind'] = 1 # tell gurobi to be verbose with output +opt.options['iisfind'] = 1 # tell gurobi to be verbose with output ### Create a trivial and infeasible example model model = ConcreteModel() @@ -59,11 +61,9 @@ ### Send the model to gurobi_ampl and collect the solution # The solver plugin will scan the model for all active suffixes # valid for importing, which it will store into the results object -results = opt.solve(model, - keepfiles=keepfiles, - tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) print("") print("IIS Results") for component, value in model.iis.items(): - print(component.name+" "+str(value)) + print(component.name + " " + str(value)) diff --git a/examples/pyomo/suffixes/ipopt_scaling.py b/examples/pyomo/suffixes/ipopt_scaling.py index b1a6733580d..c192a98dd98 100644 --- a/examples/pyomo/suffixes/ipopt_scaling.py +++ b/examples/pyomo/suffixes/ipopt_scaling.py @@ -26,14 +26,16 @@ ### Create the ipopt solver plugin using the ASL interface solver = 'ipopt' solver_io = 'nl' -stream_solver = False # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +stream_solver = False # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) if opt is None: print("") - print("ERROR: Unable to create solver plugin for %s "\ - "using the %s interface" % (solver, solver_io)) + print( + "ERROR: Unable to create solver plugin for %s " + "using the %s interface" % (solver, solver_io) + ) print("") exit(1) ### @@ -44,15 +46,21 @@ ### Create the example model model = ConcreteModel() -model.s = Set(initialize=[1,2,3]) -model.y = Var(bounds=(1,5),initialize=1.0) -model.x = Var(model.s,bounds=(1,5),initialize=5.0) -model.obj = Objective(expr=model.y*model.x[3]*(model.y+model.x[1]+model.x[2]) + model.x[2]) -model.inequality = Constraint(expr=model.y*model.x[1]*model.x[2]*model.x[3] >= 25.0) -model.equality = Constraint(expr=model.y**2 + model.x[1]**2 + model.x[2]**2 + model.x[3]**2 == 40.0) +model.s = Set(initialize=[1, 2, 3]) +model.y = Var(bounds=(1, 5), initialize=1.0) +model.x = Var(model.s, bounds=(1, 5), initialize=5.0) +model.obj = Objective( + expr=model.y * model.x[3] * (model.y + model.x[1] + model.x[2]) + model.x[2] +) +model.inequality = Constraint( + expr=model.y * model.x[1] * model.x[2] * model.x[3] >= 25.0 +) +model.equality = Constraint( + expr=model.y**2 + model.x[1] ** 2 + model.x[2] ** 2 + model.x[3] ** 2 == 40.0 +) ### -### Declare the scaling_factor suffix +### Declare the scaling_factor suffix model.scaling_factor = Suffix(direction=Suffix.EXPORT) # set objective scaling factor model.scaling_factor[model.obj] = 4.23 @@ -66,7 +74,7 @@ ### ### Send the model to ipopt and collect the solution -results = opt.solve(model,keepfiles=keepfiles,tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) ### model.pprint() diff --git a/examples/pyomo/suffixes/ipopt_warmstart.py b/examples/pyomo/suffixes/ipopt_warmstart.py index d6379cb9cec..6975bbaaa62 100644 --- a/examples/pyomo/suffixes/ipopt_warmstart.py +++ b/examples/pyomo/suffixes/ipopt_warmstart.py @@ -30,30 +30,36 @@ ### Create the ipopt solver plugin using the ASL interface solver = 'ipopt' solver_io = 'nl' -stream_solver = False # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +stream_solver = False # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) if opt is None: print("") - print("ERROR: Unable to create solver plugin for %s "\ - "using the %s interface" % (solver, solver_io)) + print( + "ERROR: Unable to create solver plugin for %s " + "using the %s interface" % (solver, solver_io) + ) print("") exit(1) ### ### Create the example model model = ConcreteModel() -model.x1 = Var(bounds=(1,5),initialize=1.0) -model.x2 = Var(bounds=(1,5),initialize=5.0) -model.x3 = Var(bounds=(1,5),initialize=5.0) -model.x4 = Var(bounds=(1,5),initialize=1.0) -model.obj = Objective(expr=model.x1*model.x4*(model.x1+model.x2+model.x3) + model.x3) -model.inequality = Constraint(expr=model.x1*model.x2*model.x3*model.x4 >= 25.0) -model.equality = Constraint(expr=model.x1**2 + model.x2**2 + model.x3**2 + model.x4**2 == 40.0) +model.x1 = Var(bounds=(1, 5), initialize=1.0) +model.x2 = Var(bounds=(1, 5), initialize=5.0) +model.x3 = Var(bounds=(1, 5), initialize=5.0) +model.x4 = Var(bounds=(1, 5), initialize=1.0) +model.obj = Objective( + expr=model.x1 * model.x4 * (model.x1 + model.x2 + model.x3) + model.x3 +) +model.inequality = Constraint(expr=model.x1 * model.x2 * model.x3 * model.x4 >= 25.0) +model.equality = Constraint( + expr=model.x1**2 + model.x2**2 + model.x3**2 + model.x4**2 == 40.0 +) ### -### Declare all suffixes +### Declare all suffixes # Ipopt bound multipliers (obtained from solution) model.ipopt_zL_out = Suffix(direction=Suffix.IMPORT) model.ipopt_zU_out = Suffix(direction=Suffix.IMPORT) @@ -67,24 +73,23 @@ ### Send the model to ipopt and collect the solution print("") print("INITIAL SOLVE") -results = opt.solve(model,keepfiles=keepfiles,tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) ### ### Print Solution -print(" %7s %12s %12s" % ("Value","ipopt_zL_out","ipopt_zU_out")) -for v in [model.x1,model.x2,model.x3,model.x4]: - print("%s %7g %12g %12g" % (v, - value(v), - model.ipopt_zL_out[v], - model.ipopt_zU_out[v])) -print("inequality.dual = "+str(model.dual[model.inequality])) -print("equality.dual = "+str(model.dual[model.equality])) +print(" %7s %12s %12s" % ("Value", "ipopt_zL_out", "ipopt_zU_out")) +for v in [model.x1, model.x2, model.x3, model.x4]: + print( + "%s %7g %12g %12g" % (v, value(v), model.ipopt_zL_out[v], model.ipopt_zU_out[v]) + ) +print("inequality.dual = " + str(model.dual[model.inequality])) +print("equality.dual = " + str(model.dual[model.equality])) ### ### Set Ipopt options for warm-start # The current values on the ipopt_zU_out and -# ipopt_zL_out suffixes will be used as initial +# ipopt_zL_out suffixes will be used as initial # conditions for the bound multipliers to solve # the new problem model.ipopt_zL_in.update(model.ipopt_zL_out) @@ -96,20 +101,19 @@ ### ### Send the model and suffix data to ipopt and collect the solution -print("") +print("") print("WARM-STARTED SOLVE") # The solver plugin will scan the model for all active suffixes # valid for importing, which it will store into the results object -results = opt.solve(model,keepfiles=keepfiles,tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) ### ### Print Solution -print(" %7s %12s %12s" % ("Value","ipopt_zL_out","ipopt_zU_out")) -for v in [model.x1,model.x2,model.x3,model.x4]: - print("%s %7g %12g %12g" % (v, - value(v), - model.ipopt_zL_out[v], - model.ipopt_zU_out[v])) -print("inequality.dual = "+str(model.dual[model.inequality])) -print("equality.dual = "+str(model.dual[model.equality])) +print(" %7s %12s %12s" % ("Value", "ipopt_zL_out", "ipopt_zU_out")) +for v in [model.x1, model.x2, model.x3, model.x4]: + print( + "%s %7g %12g %12g" % (v, value(v), model.ipopt_zL_out[v], model.ipopt_zU_out[v]) + ) +print("inequality.dual = " + str(model.dual[model.inequality])) +print("equality.dual = " + str(model.dual[model.equality])) ### diff --git a/examples/pyomo/suffixes/sipopt_hicks.py b/examples/pyomo/suffixes/sipopt_hicks.py index 0c610f1de8a..dbf4e07b8f7 100644 --- a/examples/pyomo/suffixes/sipopt_hicks.py +++ b/examples/pyomo/suffixes/sipopt_hicks.py @@ -20,7 +20,7 @@ # Execution of this script requires that the ipopt_sens # solver (distributed with Ipopt) is in the current search # path for executables on this system. Optionally required -# are the numpy and matplotlib python modules (needed for +# are the numpy and matplotlib python modules (needed for # viewing results). import pyomo.environ @@ -30,9 +30,9 @@ ### Create the ipopt_sens solver plugin using the ASL interface solver = 'ipopt_sens' solver_io = 'nl' -stream_solver = False # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +stream_solver = False # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) ### if opt is None: @@ -58,7 +58,7 @@ n = 5.0 alpha1 = 1.0e6 alpha2 = 2.0e3 -alpha3 = 1.e-3 +alpha3 = 1.0e-3 c_des = 0.0944 t_des = 0.7766 u_des = 340.0 @@ -66,65 +66,129 @@ t_init = 0.7293 u_init = 390.0 theta = 20.0 -yc = tc/(jj*cf) -yf = tf/(jj*cf) +yc = tc / (jj * cf) +yf = tf / (jj * cf) model = ConcreteModel() model.c_init_var = Var() model.t_init_var = Var() -model.cdot = Var(FE,CP) -model.tdot = Var(FE,CP) +model.cdot = Var(FE, CP) +model.tdot = Var(FE, CP) + + def c_init_rule(m, i, j): - return float(i)/nfe*(c_des-c_init) + c_init -model.c = Var(FE,CP, within=NonNegativeReals, initialize=c_init_rule) + return float(i) / nfe * (c_des - c_init) + c_init + + +model.c = Var(FE, CP, within=NonNegativeReals, initialize=c_init_rule) + + def t_init_rule(m, i, j): - return float(i)/nfe*(t_des-t_init) + t_init -model.t = Var(FE,CP, within=NonNegativeReals, initialize=t_init_rule) -model.u = Var(FE, within=NonNegativeReals, initialize=1.0) + return float(i) / nfe * (t_des - t_init) + t_init + + +model.t = Var(FE, CP, within=NonNegativeReals, initialize=t_init_rule) +model.u = Var(FE, within=NonNegativeReals, initialize=1.0) a_init = {} -a_init[0,0] = 0.19681547722366 -a_init[0,1] = 0.39442431473909 -a_init[0,2] = 0.37640306270047 -a_init[1,0] = -0.06553542585020 -a_init[1,1] = 0.29207341166523 -a_init[1,2] = 0.51248582618842 -a_init[2,0] = 0.02377097434822 -a_init[2,1] = -0.04154875212600 -a_init[2,2] = 0.11111111111111 +a_init[0, 0] = 0.19681547722366 +a_init[0, 1] = 0.39442431473909 +a_init[0, 2] = 0.37640306270047 +a_init[1, 0] = -0.06553542585020 +a_init[1, 1] = 0.29207341166523 +a_init[1, 2] = 0.51248582618842 +a_init[2, 0] = 0.02377097434822 +a_init[2, 1] = -0.04154875212600 +a_init[2, 2] = 0.11111111111111 + +model.a = Param(FE, CP, initialize=a_init) -model.a = Param(FE,CP, initialize=a_init) +h = [1.0 / nfe] * nfe -h = [1.0/nfe]*nfe def cdot_ode_rule(m, i, j): - return m.cdot[i,j] == (1.0-m.c[i,j])/theta-k10*exp(-n/m.t[i,j])*m.c[i,j] + return ( + m.cdot[i, j] + == (1.0 - m.c[i, j]) / theta - k10 * exp(-n / m.t[i, j]) * m.c[i, j] + ) + + model.cdot_ode = Constraint(FE, CP, rule=cdot_ode_rule) + def tdot_ode_rule(m, i, j): - return m.tdot[i,j] == (yf-m.t[i,j])/theta+k10*exp(-n/m.t[i,j])*m.c[i,j]-alpha*m.u[i]*(m.t[i,j]-yc) + return m.tdot[i, j] == (yf - m.t[i, j]) / theta + k10 * exp(-n / m.t[i, j]) * m.c[ + i, j + ] - alpha * m.u[i] * (m.t[i, j] - yc) + + model.tdot_ode = Constraint(FE, CP, rule=tdot_ode_rule) -def fecolc_rule(m, i,j): - if i==0: - return m.c[i,j] == m.c_init_var + time*h[i]*sum(m.a[k,j]*m.cdot[i,k] for k in CP) + +def fecolc_rule(m, i, j): + if i == 0: + return m.c[i, j] == m.c_init_var + time * h[i] * sum( + m.a[k, j] * m.cdot[i, k] for k in CP + ) else: - return m.c[i,j] == m.c[i-1,ncp-1] + time*h[i]*sum(m.a[k,j]*m.cdot[i,k] for k in CP) + return m.c[i, j] == m.c[i - 1, ncp - 1] + time * h[i] * sum( + m.a[k, j] * m.cdot[i, k] for k in CP + ) + + model.fecolc = Constraint(FE, CP, rule=fecolc_rule) -model.c_init_def = Constraint(expr= model.c_init_var == c_init) -model.t_init_def = Constraint(expr= model.t_init_var == t_init) +model.c_init_def = Constraint(expr=model.c_init_var == c_init) +model.t_init_def = Constraint(expr=model.t_init_var == t_init) + def fecolt_rule(m, i, j): - if i==0: - return m.t[i,j] == m.t_init_var + time*h[i]*sum(m.a[k,j]*m.tdot[i,k] for k in CP) + if i == 0: + return m.t[i, j] == m.t_init_var + time * h[i] * sum( + m.a[k, j] * m.tdot[i, k] for k in CP + ) else: - return m.t[i,j] == m.t[i-1,ncp-1] + time*h[i]*sum(m.a[k,j]*m.tdot[i,k] for k in CP) + return m.t[i, j] == m.t[i - 1, ncp - 1] + time * h[i] * sum( + m.a[k, j] * m.tdot[i, k] for k in CP + ) + + model.fecolt = Constraint(FE, CP, rule=fecolt_rule) + def obj_rule(m): - return \ - sum(h[i]*sum((alpha1*(m.c[i,j]-c_des)**2+ alpha2*(m.t[i,j]-t_des)**2+alpha3*(m.u[i]-u_des)**2 )*m.a[j,ncp-1] for j in CP) for i in range(2,nfe)) + \ - h[0]*sum((alpha1*((m.c_init_var+time*h[0]*sum( m.a[k,j]*m.cdot[0,k] for k in CP)) - c_des)**2 + alpha2*((m.t_init_var+time*h[0]*sum(m.a[k,j]*m.tdot[0,k]for k in CP))-t_des)**2 + alpha3*(m.u[0]-u_des)**2)*m.a[j,ncp-1] for j in CP) + return sum( + h[i] + * sum( + ( + alpha1 * (m.c[i, j] - c_des) ** 2 + + alpha2 * (m.t[i, j] - t_des) ** 2 + + alpha3 * (m.u[i] - u_des) ** 2 + ) + * m.a[j, ncp - 1] + for j in CP + ) + for i in range(2, nfe) + ) + h[0] * sum( + ( + alpha1 + * ( + (m.c_init_var + time * h[0] * sum(m.a[k, j] * m.cdot[0, k] for k in CP)) + - c_des + ) + ** 2 + + alpha2 + * ( + (m.t_init_var + time * h[0] * sum(m.a[k, j] * m.tdot[0, k] for k in CP)) + - t_des + ) + ** 2 + + alpha3 * (m.u[0] - u_des) ** 2 + ) + * m.a[j, ncp - 1] + for j in CP + ) + + model.cost = Objective(rule=obj_rule) ### @@ -132,24 +196,24 @@ def obj_rule(m): model.sens_state_0 = Suffix(direction=Suffix.EXPORT) model.sens_state_1 = Suffix(direction=Suffix.EXPORT) model.sens_state_value_1 = Suffix(direction=Suffix.EXPORT) -model.sens_sol_state_1 = Suffix(direction=Suffix.IMPORT) -model.sens_init_constr = Suffix(direction=Suffix.EXPORT) +model.sens_sol_state_1 = Suffix(direction=Suffix.IMPORT) +model.sens_init_constr = Suffix(direction=Suffix.EXPORT) ### ### set sIPOPT data opt.options['run_sens'] = 'yes' model.sens_state_0[model.c_init_var] = 1 model.sens_state_0[model.t_init_var] = 2 -model.sens_state_1[model.c[4,0]] = 1 -model.sens_state_1[model.t[4,0]] = 2 -model.sens_state_value_1[model.c[4,0]] = 0.135 -model.sens_state_value_1[model.t[4,0]] = 0.745 +model.sens_state_1[model.c[4, 0]] = 1 +model.sens_state_1[model.t[4, 0]] = 2 +model.sens_state_value_1[model.c[4, 0]] = 0.135 +model.sens_state_value_1[model.t[4, 0]] = 0.745 model.sens_init_constr[model.c_init_def] = 1 model.sens_init_constr[model.t_init_def] = 1 ### ### Send the model to ipopt_sens and collect the solution -results = opt.solve(model,keepfiles=keepfiles,tee=stream_solver) +results = opt.solve(model, keepfiles=keepfiles, tee=stream_solver) ### # Plot the results @@ -162,43 +226,46 @@ def obj_rule(m): print("") exit(1) + def collocation_points(n_fe, n_cp, h): t = 0.0 r1 = 0.15505102572168 r2 = 0.64494897427832 r3 = 1.0 for i in range(n_fe): - yield t+h[i]*r1 - yield t+h[i]*r2 - yield t+h[i]*r3 - t += h[i] + yield t + h[i] * r1 + yield t + h[i] * r2 + yield t + h[i] * r3 + t += h[i] + def collocation_idx(n_fe, n_cp): for i in range(n_fe): - yield i,0 - yield i,1 - yield i,2 + yield i, 0 + yield i, 1 + yield i, 2 + times = np.array([i for i in collocation_points(nfe, ncp, h)]) -cnominal = np.zeros((nfe*ncp,1)) -cperturbed = np.zeros((nfe*ncp,1)) -tnominal = np.zeros((nfe*ncp,1)) -tperturbed = np.zeros((nfe*ncp,1)) -for k,(i,j) in enumerate(collocation_idx(nfe, ncp)): - cnominal[k] = value(model.c[i,j]) - tnominal[k] = value(model.t[i,j]) - cperturbed[k] = value(model.sens_sol_state_1[model.c[i,j]]) - tperturbed[k] = value(model.sens_sol_state_1[model.t[i,j]]) - -plt.subplot(2,1,1) +cnominal = np.zeros((nfe * ncp, 1)) +cperturbed = np.zeros((nfe * ncp, 1)) +tnominal = np.zeros((nfe * ncp, 1)) +tperturbed = np.zeros((nfe * ncp, 1)) +for k, (i, j) in enumerate(collocation_idx(nfe, ncp)): + cnominal[k] = value(model.c[i, j]) + tnominal[k] = value(model.t[i, j]) + cperturbed[k] = value(model.sens_sol_state_1[model.c[i, j]]) + tperturbed[k] = value(model.sens_sol_state_1[model.t[i, j]]) + +plt.subplot(2, 1, 1) plt.plot(times, cnominal, label='c_nominal') -#plt.hold(True) +# plt.hold(True) plt.plot(times, cperturbed, label='c_perturbed') -plt.xlim([min(times),max(times)]) +plt.xlim([min(times), max(times)]) plt.legend(loc=0) -plt.subplot(2,1,2) +plt.subplot(2, 1, 2) plt.plot(times, tnominal, label='t_nominal') plt.plot(times, tperturbed, label='t_perturbed') -plt.xlim([min(times),max(times)]) +plt.xlim([min(times), max(times)]) plt.legend(loc=0) plt.show() diff --git a/examples/pyomo/suffixes/sipopt_parametric.py b/examples/pyomo/suffixes/sipopt_parametric.py index d18f750392c..29bba934bd8 100644 --- a/examples/pyomo/suffixes/sipopt_parametric.py +++ b/examples/pyomo/suffixes/sipopt_parametric.py @@ -28,9 +28,9 @@ ### Create the ipopt_sens solver plugin using the ASL interface solver = 'ipopt_sens' solver_io = 'nl' -stream_solver = True # True prints solver output to screen -keepfiles = False # True prints intermediate file names (.nl,.sol,...) -opt = SolverFactory(solver,solver_io=solver_io) +stream_solver = True # True prints solver output to screen +keepfiles = False # True prints intermediate file names (.nl,.sol,...) +opt = SolverFactory(solver, solver_io=solver_io) ### if opt is None: @@ -40,9 +40,9 @@ exit(1) ### Set this data -nominal_eta1 = 4.5 +nominal_eta1 = 4.5 perturbed_eta1 = 4.0 -nominal_eta2 = 1.0 +nominal_eta2 = 1.0 perturbed_eta2 = 1.0 ### Create the model @@ -55,9 +55,11 @@ model.eta1 = Var() model.eta2 = Var() # constraints + objective -model.const1 = Constraint(expr=6*model.x1+3*model.x2+2*model.x3 - model.eta1 == 0) -model.const2 = Constraint(expr=model.eta2*model.x1+model.x2-model.x3-1 == 0) -model.cost = Objective(expr=model.x1**2 + model.x2**2 + model.x3**2) +model.const1 = Constraint( + expr=6 * model.x1 + 3 * model.x2 + 2 * model.x3 - model.eta1 == 0 +) +model.const2 = Constraint(expr=model.eta2 * model.x1 + model.x2 - model.x3 - 1 == 0) +model.cost = Objective(expr=model.x1**2 + model.x2**2 + model.x3**2) model.consteta1 = Constraint(expr=model.eta1 == nominal_eta1) model.consteta2 = Constraint(expr=model.eta2 == nominal_eta2) ### @@ -66,8 +68,8 @@ model.sens_state_0 = Suffix(direction=Suffix.EXPORT) model.sens_state_1 = Suffix(direction=Suffix.EXPORT) model.sens_state_value_1 = Suffix(direction=Suffix.EXPORT) -model.sens_sol_state_1 = Suffix(direction=Suffix.IMPORT) -model.sens_init_constr = Suffix(direction=Suffix.EXPORT) +model.sens_sol_state_1 = Suffix(direction=Suffix.IMPORT) +model.sens_init_constr = Suffix(direction=Suffix.EXPORT) ### ### set sIPOPT data diff --git a/examples/pyomo/transform/scaling_ex.py b/examples/pyomo/transform/scaling_ex.py index 4de84c036bc..a5960393e75 100644 --- a/examples/pyomo/transform/scaling_ex.py +++ b/examples/pyomo/transform/scaling_ex.py @@ -15,22 +15,25 @@ # create the original unscaled model ### model = pe.ConcreteModel() -model.x = pe.Var([1,2,3], bounds=(-10,10), initialize=5.0) -model.z = pe.Var(bounds=(10,20)) +model.x = pe.Var([1, 2, 3], bounds=(-10, 10), initialize=5.0) +model.z = pe.Var(bounds=(10, 20)) model.obj = pe.Objective(expr=model.z + model.x[1]) # demonstrate scaling of duals as well model.dual = pe.Suffix(direction=pe.Suffix.IMPORT) model.rc = pe.Suffix(direction=pe.Suffix.IMPORT) - + + def con_rule(m, i): if i == 1: - return m.x[1] + 2*m.x[2] + 1*m.x[3] == 4.0 + return m.x[1] + 2 * m.x[2] + 1 * m.x[3] == 4.0 if i == 2: - return m.x[1] + 2*m.x[2] + 2*m.x[3] == 5.0 + return m.x[1] + 2 * m.x[2] + 2 * m.x[3] == 5.0 if i == 3: - return m.x[1] + 3.0*m.x[2] + 1*m.x[3] == 5.0 -model.con = pe.Constraint([1,2,3], rule=con_rule) + return m.x[1] + 3.0 * m.x[2] + 1 * m.x[3] == 5.0 + + +model.con = pe.Constraint([1, 2, 3], rule=con_rule) model.zcon = pe.Constraint(expr=model.z >= model.x[2]) ### @@ -40,7 +43,7 @@ def con_rule(m, i): model.scaling_factor[model.obj] = 2.0 model.scaling_factor[model.x] = 0.5 model.scaling_factor[model.z] = -10.0 -model.scaling_factor[model.con[1]] = 0.5 +model.scaling_factor[model.con[1]] = 0.5 model.scaling_factor[model.con[2]] = 2.0 model.scaling_factor[model.con[3]] = -5.0 model.scaling_factor[model.zcon] = -3.0 @@ -67,7 +70,7 @@ def con_rule(m, i): if compare_solutions: # compare the solution of the original model with a clone of the # original that has a backmapped solution from the scaled model - + # solve the original (unscaled) model original_model = model.clone() pe.SolverFactory('glpk').solve(original_model) @@ -90,6 +93,3 @@ def con_rule(m, i): bv = cuid.find_component_on(backmapped_unscaled_model) print('%s\t%.16f\t%.16f' % (v.local_name, pe.value(v), pe.value(bv))) print('=====================================================') - - - diff --git a/examples/pyomo/tutorials/data.py b/examples/pyomo/tutorials/data.py index cf2a5d61972..d065c9ff9bc 100644 --- a/examples/pyomo/tutorials/data.py +++ b/examples/pyomo/tutorials/data.py @@ -56,7 +56,7 @@ # # An indexed set # -model.G = Set(model.A,model.B) +model.G = Set(model.A, model.B) # # A simple set # @@ -95,8 +95,8 @@ # # Initializing a parameter with two indices # -model.U = Param(model.I,model.A) -model.T = Param(model.A,model.I) +model.U = Param(model.I, model.A) +model.T = Param(model.A, model.I) # # Initializing a parameter with missing data # @@ -131,4 +131,3 @@ ## instance = model.create_instance("data.dat") instance.pprint() - diff --git a/examples/pyomo/tutorials/excel.py b/examples/pyomo/tutorials/excel.py index 6e8f86d2d24..127db722c07 100644 --- a/examples/pyomo/tutorials/excel.py +++ b/examples/pyomo/tutorials/excel.py @@ -19,8 +19,8 @@ ## # # Pyomo makes a fundamental distinction between an abstract model and a -# problem instance. The Pyomo AbstractModel() class is used to manage the -# declaration of model components (e.g. sets and variables), and to +# problem instance. The Pyomo AbstractModel() class is used to manage the +# declaration of model components (e.g. sets and variables), and to # generate a problem instance. # model = AbstractModel() @@ -51,12 +51,12 @@ # # An indexed set -# +# model.F = Set(model.A) # # An indexed set -# -model.G = Set(model.A,model.B) +# +model.G = Set(model.A, model.B) # # A simple set # @@ -90,8 +90,8 @@ # # Initializing a parameter with two indices # -model.U = Param(model.I,model.A) -model.T = Param(model.A,model.I) +model.U = Param(model.I, model.A) +model.T = Param(model.A, model.I) # # Initializing a parameter with missing data # @@ -108,12 +108,12 @@ model.P = Param(model.J, within=Reals) model.PP = Param(model.J, within=Reals) model.O = Param(model.J, within=Reals) - + ## -## Process an input file and confirm that we get appropriate +## Process an input file and confirm that we get appropriate ## set instances. ## -#model.pprint() +# model.pprint() data = DataPortal(model=model) data.load(filename="excel.xls", range="Atable", format='set', set='A') @@ -124,19 +124,18 @@ data.load(filename="excel.xls", range="Itable", format='set', set='I') data.load(filename="excel.xls", range="Zparam", format='param', param='Z') data.load(filename="excel.xls", range="Ytable", index='A', param='Y') -data.load(filename="excel.xls", range="XWtable", index='A', param=['X','W']) +data.load(filename="excel.xls", range="XWtable", index='A', param=['X', 'W']) data.load(filename="excel.xls", range="Ttable", param='T', format='transposed_array') data.load(filename="excel.xls", range="Utable", param='U', format='array') data.load(filename="excel.xls", range="Stable", index='A', param='S') -data.load(filename="excel.xls", range="RQtable", index='H', param=('R','Q')) -data.load(filename="excel.xls", range="POtable", index='J', param=('P','O')) -data.load(filename="excel.xls", range="PPtable", index=('A','B'), param="PP") +data.load(filename="excel.xls", range="RQtable", index='H', param=('R', 'Q')) +data.load(filename="excel.xls", range="POtable", index='J', param=('P', 'O')) +data.load(filename="excel.xls", range="PPtable", index=('A', 'B'), param="PP") -#try: +# try: # data.read() -#except pyomo.ApplicationError: +# except pyomo.ApplicationError: # sys.exit(0) - + instance = model.create_instance(data) instance.pprint() - diff --git a/examples/pyomo/tutorials/param.py b/examples/pyomo/tutorials/param.py index d3f1af8d1f5..ba31975ab4b 100644 --- a/examples/pyomo/tutorials/param.py +++ b/examples/pyomo/tutorials/param.py @@ -42,7 +42,8 @@ # # Initializing a parameter with two indices # -model.X = Param(model.A,model.B) +model.X = Param(model.A, model.B) + ## ## Parameter Data @@ -56,7 +57,9 @@ def W_init(model, i, j): # # Create the value of model.W[i,j] # - return i*j + return i * j + + model.W = Param(model.A, model.B, initialize=W_init) # # Note that the parameter model.W is not created when this object is @@ -64,13 +67,13 @@ def W_init(model, i, j): # problem instance. # # The _initialize_ option can also be used to specify the values in -# a parameter. These default values may be overriden by later construction +# a parameter. These default values may be overridden by later construction # steps, or by data in an input file: # -V_init={} -V_init[1]=1 -V_init[2]=2 -V_init[3]=9 +V_init = {} +V_init[1] = 1 +V_init[2] = 2 +V_init[3] = 9 model.V = Param(model.B, initialize=V_init) # # Note that parameter V is initialized with a dictionary, which maps @@ -84,6 +87,8 @@ def W_init(model, i, j): # option: # model.T = Param(within=model.B) + + # # Note that the default domain for parameters is Reals, the set of floating # point values. @@ -93,6 +98,8 @@ def W_init(model, i, j): # def S_validate(model, value): return value in model.A + + model.S = Param(validate=S_validate) ## @@ -103,16 +110,16 @@ def S_validate(model, value): # example, the instance Param(model.A,model.B) declares a parameter indexed # over sets A and B. However, not all of these values are necessarily # declared in a model. The default value for all parameters not declared -# is zero. This default can be overriden with the _default_ option. +# is zero. This default can be overridden with the _default_ option. # # The following example illustrates how a parameter can be declared where # every parameter value is nonzero, but the parameter is stored with a sparse # representation. # -R_init={} -R_init[2,1]=1 -R_init[2,2]=1 -R_init[2,3]=1 +R_init = {} +R_init[2, 1] = 1 +R_init[2, 2] = 1 +R_init[2, 3] = 1 model.R = Param(model.A, model.B, default=99.0, initialize=R_init) # # Note that the parameter default value can also be specified in an input diff --git a/examples/pyomo/tutorials/set.py b/examples/pyomo/tutorials/set.py index 4fbfdfe3f2b..78f2656d739 100644 --- a/examples/pyomo/tutorials/set.py +++ b/examples/pyomo/tutorials/set.py @@ -32,7 +32,7 @@ # to the Set() object: # model.B = Set() -model.C = Set(model.A,model.B) +model.C = Set(model.A, model.B) # # Set declarations can also use standard set operations to declare # a set in a constructive fashion: @@ -51,6 +51,7 @@ # model.Hsub = Set(within=model.A * model.B) + ## ## Data for Simple Sets ## @@ -60,12 +61,14 @@ # element: # def I_init(model): - ans=[] + ans = [] for a in model.A: for b in model.B: - ans.append( (a,b) ) + ans.append((a, b)) return ans -model.I = Set(within=model.A*model.B, initialize=I_init) + + +model.I = Set(within=model.A * model.B, initialize=I_init) # # Note that the set model.I is not created when this set object is # constructed. Instead, I_init() is called during the construction of a @@ -75,31 +78,37 @@ def I_init(model): # model.J = Set() model.J.construct() -model.J.add(1,4,9) +model.J.add(1, 4, 9) # # The _initialize_ option can also be used to specify the values in -# a set. These default values may be overriden by later construction +# a set. These default values may be overridden by later construction # steps, or by data in an input file: # -model.K = Set(initialize=[1,4,9]) -model.K_2 = Set(initialize=[(1,4),(9,16)],dimen=2) +model.K = Set(initialize=[1, 4, 9]) +model.K_2 = Set(initialize=[(1, 4), (9, 16)], dimen=2) # # Validation of set data is supported in two different ways. First, a # superset can be specified with the _within_ option: # model.L = Set(within=model.A) + + # # Validation of set data can also be performed with the _validate_ option, # which is a function that returns True if a data belongs in this set: # def M_validate(model, value): return value in model.A + + model.M = Set(validate=M_validate) # # Although the _within_ option is convenient, it can force the creation of # a temporary set. For example, consider the declaration # -model.N = Set(within=model.A*model.B) +model.N = Set(within=model.A * model.B) + + # # In this example, the cross-product of sets A and B is needed to validate # the members of set C. Pyomo creates this set implicitly and uses @@ -108,8 +117,11 @@ def M_validate(model, value): # def O_validate(model, value): return value[0] in model.A and value[1] in model.B + + model.O = Set(validate=O_validate) + ## ## Data for Set Arrays ## @@ -119,8 +131,10 @@ def O_validate(model, value): # array index: # def P_init(model, i, j): - return range(0,i*j) -model.P = Set(model.B,model.B,initialize=P_init) + return range(0, i * j) + + +model.P = Set(model.B, model.B, initialize=P_init) # # A set array CANNOT be explicitly constructed by adding set elements # to individual arrays. For example, the following is invalid: @@ -146,24 +160,29 @@ def P_init(model, i, j): # a set array. These default values are defined in a dictionary, which # specifies how each array element is initialized: # -R_init={} -R_init[2] = [1,3,5] -R_init[3] = [2,4,6] -R_init[4] = [3,5,7] -model.R = Set(model.B,initialize=R_init) +R_init = {} +R_init[2] = [1, 3, 5] +R_init[3] = [2, 4, 6] +R_init[4] = [3, 5, 7] +model.R = Set(model.B, initialize=R_init) # # Validation of a set array is supported with the _within_ option. The # elements of all sets in the array must be in this set: # model.S = Set(model.B, within=model.A) + + # # Validation of set arrays can also be performed with the _validate_ option. # This is applied to all sets in the array: # def T_validate(model, value): return value in model.A + + model.T = Set(model.B, validate=M_validate) + ## ## Set options ## @@ -178,13 +197,17 @@ def T_validate(model, value): # called repeatedly to construct each element in the set: # def U_init(model, z): - if z==6: + if z == 6: return Set.End - if z==1: + if z == 1: return 1 else: - return model.U[z-1]*z + return model.U[z - 1] * z + + model.U = Set(ordered=True, initialize=U_init) + + # # This example can be generalized to array sets. Note that in this case # we can use ordered sets to to index the array, thereby guaranteeing that @@ -193,12 +216,14 @@ def U_init(model, z): # (inclusive). # def V_init(model, z, i): - if z==6: + if z == 6: return Set.End - if i==1: + if i == 1: return z - return model.V[i-1][z]+z-1 -model.V = Set(RangeSet(1,4), initialize=V_init, ordered=True) + return model.V[i - 1][z] + z - 1 + + +model.V = Set(RangeSet(1, 4), initialize=V_init, ordered=True) ## ## Process an input file and confirm that we get appropriate diff --git a/examples/pyomo/tutorials/table.py b/examples/pyomo/tutorials/table.py index a3221343422..16951352ee1 100644 --- a/examples/pyomo/tutorials/table.py +++ b/examples/pyomo/tutorials/table.py @@ -19,8 +19,8 @@ ## # # Pyomo makes a fundamental distinction between an abstract model and a -# problem instance. The Pyomo AbstractModel() class is used to manage the -# declaration of model components (e.g. sets and variables), and to +# problem instance. The Pyomo AbstractModel() class is used to manage the +# declaration of model components (e.g. sets and variables), and to # generate a problem instance. # model = AbstractModel() @@ -51,12 +51,12 @@ # # An indexed set -# +# model.F = Set(model.A) # # An indexed set -# -model.G = Set(model.A,model.B) +# +model.G = Set(model.A, model.B) # # A simple set # @@ -90,8 +90,8 @@ # # Initializing a parameter with two indices # -model.U = Param(model.I,model.A) -model.T = Param(model.A,model.I) +model.U = Param(model.I, model.A) +model.T = Param(model.A, model.I) # # Initializing a parameter with missing data # @@ -108,9 +108,9 @@ model.P = Param(model.J, within=Reals) model.PP = Param(model.J, within=Reals) model.O = Param(model.J, within=Reals) - + ## -## Process an input file and confirm that we get appropriate +## Process an input file and confirm that we get appropriate ## set instances. ## data = DataPortal() @@ -122,14 +122,13 @@ data.load(filename="tab/I.tab", format='set', set='I') data.load(filename="tab/Z.tab", format='param', param="Z") data.load(filename="tab/Y.tab", index='A', param='Y') -data.load(filename="tab/XW.tab", index='A', param=['X','W']) +data.load(filename="tab/XW.tab", index='A', param=['X', 'W']) data.load(filename="tab/T.tab", param="T", format="transposed_array") data.load(filename="tab/U.tab", param="U", format="array") data.load(filename="tab/S.tab", index='A', param='S') -data.load(filename="tab/RQ.tab", index="H", param=["R","Q"]) -data.load(filename="tab/PO.tab", index="J", param=["P","O"]) +data.load(filename="tab/RQ.tab", index="H", param=["R", "Q"]) +data.load(filename="tab/PO.tab", index="J", param=["P", "O"]) data.load(filename="tab/PP.tab", param="PP") instance = model.create_instance(data) instance.pprint() - diff --git a/examples/pyomobook/abstract-ch/AbstHLinScript.py b/examples/pyomobook/abstract-ch/AbstHLinScript.py index 54dd0e280a3..adf700bfd5c 100644 --- a/examples/pyomobook/abstract-ch/AbstHLinScript.py +++ b/examples/pyomobook/abstract-ch/AbstHLinScript.py @@ -12,26 +12,34 @@ model.b = pyo.Param() model.u = pyo.Param(model.A) + def xbounds_rule(model, i): return (0, model.u[i]) + + model.x = pyo.Var(model.A, bounds=xbounds_rule) + def obj_rule(model): - return sum(model.h[i] * (1 - model.u[i]/model.d[i]**2) * model.x[i] for i in model.A) + return sum( + model.h[i] * (1 - model.u[i] / model.d[i] ** 2) * model.x[i] for i in model.A + ) + model.z = pyo.Objective(rule=obj_rule, sense=pyo.maximize) + def budget_rule(model): return pyo.summation(model.c, model.x) <= model.b + model.budgetconstr = pyo.Constraint(rule=budget_rule) # @tail: opt = pyo.SolverFactory('glpk') instance = model.create_instance("AbstractH.dat") -results = opt.solve(instance) # solves and updates instance +results = opt.solve(instance) # solves and updates instance instance.display() # @:tail - diff --git a/examples/pyomobook/abstract-ch/AbstractH.py b/examples/pyomobook/abstract-ch/AbstractH.py index b436fef1d8b..da9f0a4931c 100644 --- a/examples/pyomobook/abstract-ch/AbstractH.py +++ b/examples/pyomobook/abstract-ch/AbstractH.py @@ -11,16 +11,25 @@ model.b = pyo.Param() model.u = pyo.Param(model.A) + def xbounds_rule(model, i): return (0, model.u[i]) + + model.x = pyo.Var(model.A, bounds=xbounds_rule) + def obj_rule(model): - return sum(model.h[i] * \ - (model.x[i] - (model.x[i]/model.d[i])**2) \ - for i in model.A) + return sum( + model.h[i] * (model.x[i] - (model.x[i] / model.d[i]) ** 2) for i in model.A + ) + + model.z = pyo.Objective(rule=obj_rule, sense=pyo.maximize) + def budget_rule(model): - return sum(model.c[i]*model.x[i] for i in model.A) <= model.b + return sum(model.c[i] * model.x[i] for i in model.A) <= model.b + + model.budgetconstr = pyo.Constraint(rule=budget_rule) diff --git a/examples/pyomobook/abstract-ch/AbstractHLinear.py b/examples/pyomobook/abstract-ch/AbstractHLinear.py index e175a376e40..575487d3e95 100644 --- a/examples/pyomobook/abstract-ch/AbstractHLinear.py +++ b/examples/pyomobook/abstract-ch/AbstractHLinear.py @@ -11,19 +11,28 @@ model.b = pyo.Param() model.u = pyo.Param(model.A) + def xbounds_rule(model, i): return (0, model.u[i]) + + model.x = pyo.Var(model.A, bounds=xbounds_rule) + # @obj: def obj_rule(model): - return sum(model.h[i] * \ - (1 - model.u[i]/model.d[i]**2) * model.x[i] \ - for i in model.A) + return sum( + model.h[i] * (1 - model.u[i] / model.d[i] ** 2) * model.x[i] for i in model.A + ) + + # @:obj model.z = pyo.Objective(rule=obj_rule, sense=pyo.maximize) + def budget_rule(model): return pyo.summation(model.c, model.x) <= model.b + + model.budgetconstr = pyo.Constraint(rule=budget_rule) diff --git a/examples/pyomobook/abstract-ch/abstract5.py b/examples/pyomobook/abstract-ch/abstract5.py index c2df442f6d7..3a06256dff8 100644 --- a/examples/pyomobook/abstract-ch/abstract5.py +++ b/examples/pyomobook/abstract-ch/abstract5.py @@ -11,11 +11,16 @@ model.x = pyo.Var(model.N, within=pyo.NonNegativeReals) + def obj_rule(model): - return sum(model.c[i]*model.x[i] for i in model.N) + return sum(model.c[i] * model.x[i] for i in model.N) + + model.obj = pyo.Objective(rule=obj_rule) + def con_rule(model, m): - return sum(model.a[i,m]*model.x[i] for i in model.N) \ - >= model.b[m] + return sum(model.a[i, m] * model.x[i] for i in model.N) >= model.b[m] + + model.con = pyo.Constraint(model.M, rule=con_rule) diff --git a/examples/pyomobook/abstract-ch/abstract6.py b/examples/pyomobook/abstract-ch/abstract6.py index f35971ccb72..d11a4652f64 100644 --- a/examples/pyomobook/abstract-ch/abstract6.py +++ b/examples/pyomobook/abstract-ch/abstract6.py @@ -11,11 +11,16 @@ Model.x = pyo.Var(Model.N, within=pyo.NonNegativeReals) + def obj_rule(Model): - return sum(Model.c[i]*Model.x[i] for i in Model.N) + return sum(Model.c[i] * Model.x[i] for i in Model.N) + + Model.obj = pyo.Objective(rule=obj_rule) + def con_rule(Model, m): - return sum(Model.a[i,m]*Model.x[i] for i in Model.N) \ - >= Model.b[m] + return sum(Model.a[i, m] * Model.x[i] for i in Model.N) >= Model.b[m] + + Model.con = pyo.Constraint(Model.M, rule=con_rule) diff --git a/examples/pyomobook/abstract-ch/abstract7.py b/examples/pyomobook/abstract-ch/abstract7.py index 9b87353eb59..2fd5d467d3e 100644 --- a/examples/pyomobook/abstract-ch/abstract7.py +++ b/examples/pyomobook/abstract-ch/abstract7.py @@ -4,72 +4,97 @@ import sys from os.path import dirname, abspath + # @preprocess: def pyomo_preprocess(options=None): print("Here are the options that were provided:") if options is not None: options.display() + + # @:preprocess + # @create_model: def pyomo_create_model(options=None, model_options=None): sys.path.append(abspath(dirname(__file__))) abstract6 = __import__('abstract6') sys.path.remove(abspath(dirname(__file__))) return abstract6.Model + + # @:create_model + # @create_modeldata: def pyomo_create_dataportal(options=None, model=None): data = pyo.DataPortal(model=model) data.load(filename='abstract6.dat') return data + + # @:create_modeldata + # @print_model: def pyomo_print_model(options=None, model=None): if options['runtime']['logging']: model.pprint() + + # @:print_model + # @modify_instance: -def pyomo_modify_instance(options=None, model=None, - instance=None): +def pyomo_modify_instance(options=None, model=None, instance=None): instance.x[1].value = 0.0 instance.x[1].fixed = True + + # @:modify_instance + # @print_instance: def pyomo_print_instance(options=None, instance=None): if options['runtime']['logging']: instance.pprint() + + # @:print_instance + # @save_instance: def pyomo_save_instance(options=None, instance=None): - OUTPUT = open('abstract7.pyomo','w') + OUTPUT = open('abstract7.pyomo', 'w') OUTPUT.write(str(pickle.dumps(instance))) OUTPUT.close() + + # @:save_instance + # @print_results: -def pyomo_print_results(options=None, instance=None, - results=None): +def pyomo_print_results(options=None, instance=None, results=None): print(results) + + # @:print_results + # @save_results: -def pyomo_save_results(options=None, instance=None, - results=None): - OUTPUT = open('abstract7.results','w') +def pyomo_save_results(options=None, instance=None, results=None): + OUTPUT = open('abstract7.results', 'w') OUTPUT.write(str(results)) OUTPUT.close() + + # @:save_results + # @postprocess: -def pyomo_postprocess(options=None, instance=None, - results=None): - instance.solutions.load_from(results, \ - allow_consistent_values_for_fixed_vars=True) - print("Solution value "+str(pyo.value(instance.obj))) +def pyomo_postprocess(options=None, instance=None, results=None): + instance.solutions.load_from(results, allow_consistent_values_for_fixed_vars=True) + print("Solution value " + str(pyo.value(instance.obj))) + + # @:postprocess diff --git a/examples/pyomobook/abstract-ch/bad1.py b/examples/pyomobook/abstract-ch/bad1.py deleted file mode 100644 index 0589c2065da..00000000000 --- a/examples/pyomobook/abstract-ch/bad1.py +++ /dev/null @@ -1,13 +0,0 @@ -# bad1.py -import pyomo.environ as pyo - -model = pyo.AbstractModel() -model.A = pyo.Set(initialize=[1,2,3]) -model.x = pyo.Var(model.A) - -def x_rule(M): - return sum(M.x[i] for i in model.A) >= 0 -model.c = pyo.Constraint(rule=x_rule) - -instance = model.create_instance() -instance.pprint() diff --git a/examples/pyomobook/abstract-ch/bad2.py b/examples/pyomobook/abstract-ch/bad2.py deleted file mode 100644 index 02bc62f8f45..00000000000 --- a/examples/pyomobook/abstract-ch/bad2.py +++ /dev/null @@ -1,17 +0,0 @@ -# bad2.py -import pyomo.environ as pyo - -model = pyo.AbstractModel() -model.q = pyo.Param(initialize=0, mutable=True) -model.A = pyo.Set(initialize=[1,2,3]) -model.x = pyo.Var(model.A) - -def x_rule(model): - if model.q > 0: - return sum(model.x[i] for i in model.A) >= 1 - else: - return sum(model.x[i] for i in model.A) >= 0 -model.c = pyo.Constraint(rule=x_rule) - -instance = model.create_instance() -instance.pprint() diff --git a/examples/pyomobook/abstract-ch/buildactions.py b/examples/pyomobook/abstract-ch/buildactions.py index c675b09635f..ad918e2b5f2 100644 --- a/examples/pyomobook/abstract-ch/buildactions.py +++ b/examples/pyomobook/abstract-ch/buildactions.py @@ -3,34 +3,52 @@ model = pyo.AbstractModel() -model.N = pyo.Set() # Set of warehouses -model.M = pyo.Set() # Set of customers -model.d = pyo.Param(model.N,model.M) +model.N = pyo.Set() # Set of warehouses +model.M = pyo.Set() # Set of customers +model.d = pyo.Param(model.N, model.M) model.P = pyo.Param() -model.x = pyo.Var(model.N, model.M, bounds=(0,1)) +model.x = pyo.Var(model.N, model.M, bounds=(0, 1)) model.y = pyo.Var(model.N, within=pyo.Binary) + def checkPN_rule(model): return model.P <= len(model.N) + + model.checkPN = pyo.BuildCheck(rule=checkPN_rule) + def obj_rule(model): - return sum(model.d[n,m]*model.x[n,m] for n in model.N for m in model.M) + return sum(model.d[n, m] * model.x[n, m] for n in model.N for m in model.M) + + model.obj = pyo.Objective(rule=obj_rule) + def one_per_cust_rule(model, m): - return sum(model.x[n,m] for n in model.N) == 1 + return sum(model.x[n, m] for n in model.N) == 1 + + model.one_per_cust = pyo.Constraint(model.M, rule=one_per_cust_rule) + def warehouse_active_rule(model, n, m): - return model.x[n,m] <= model.y[n] + return model.x[n, m] <= model.y[n] + + model.warehouse_active = pyo.Constraint(model.N, model.M, rule=warehouse_active_rule) + def num_warehouses_rule(model): return sum(model.y[n] for n in model.N) <= model.P + + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) + def printM_rule(model): model.M.pprint() + + model.printM = pyo.BuildAction(rule=printM_rule) diff --git a/examples/pyomobook/abstract-ch/concrete1.py b/examples/pyomobook/abstract-ch/concrete1.py index a01ef4476cf..0ad41c79ea3 100644 --- a/examples/pyomobook/abstract-ch/concrete1.py +++ b/examples/pyomobook/abstract-ch/concrete1.py @@ -3,6 +3,6 @@ model = pyo.ConcreteModel() model.x_1 = pyo.Var(within=pyo.NonNegativeIntegers) model.x_2 = pyo.Var(within=pyo.NonNegativeIntegers) -model.obj = pyo.Objective(expr=model.x_1 + 2*model.x_2) -model.con1 = pyo.Constraint(expr=3*model.x_1 + 4*model.x_2 >= 1) -model.con2 = pyo.Constraint(expr=2*model.x_1 + 5*model.x_2 >= 2) +model.obj = pyo.Objective(expr=model.x_1 + 2 * model.x_2) +model.con1 = pyo.Constraint(expr=3 * model.x_1 + 4 * model.x_2 >= 1) +model.con2 = pyo.Constraint(expr=2 * model.x_1 + 5 * model.x_2 >= 2) diff --git a/examples/pyomobook/abstract-ch/concrete2.py b/examples/pyomobook/abstract-ch/concrete2.py index 30ca1e78c92..6aee434d556 100644 --- a/examples/pyomobook/abstract-ch/concrete2.py +++ b/examples/pyomobook/abstract-ch/concrete2.py @@ -1,8 +1,7 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.x = pyo.Var([1,2], within=pyo.NonNegativeReals) -model.obj = pyo.Objective(expr=model.x[1] + 2*model.x[2]) -model.con1 = pyo.Constraint(expr=3*model.x[1] + 4*model.x[2]>=1) -model.con2 = pyo.Constraint(expr=2*model.x[1] + 5*model.x[2]>=2) - +model.x = pyo.Var([1, 2], within=pyo.NonNegativeReals) +model.obj = pyo.Objective(expr=model.x[1] + 2 * model.x[2]) +model.con1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) +model.con2 = pyo.Constraint(expr=2 * model.x[1] + 5 * model.x[2] >= 2) diff --git a/examples/pyomobook/abstract-ch/diet1.py b/examples/pyomobook/abstract-ch/diet1.py index eece699292f..eb8b071cdb5 100644 --- a/examples/pyomobook/abstract-ch/diet1.py +++ b/examples/pyomobook/abstract-ch/diet1.py @@ -2,7 +2,7 @@ import pyomo.environ as pyo infinity = float('inf') -MAX_FOOD_SUPPLY = 20.0 # There is a finite food supply +MAX_FOOD_SUPPLY = 20.0 # There is a finite food supply model = pyo.AbstractModel() @@ -11,8 +11,12 @@ model.FOOD = pyo.Set() model.cost = pyo.Param(model.FOOD, within=pyo.PositiveReals) model.f_min = pyo.Param(model.FOOD, within=pyo.NonNegativeReals, default=0.0) -def f_max_validate (model, value, j): + + +def f_max_validate(model, value, j): return model.f_max[j] > model.f_min[j] + + model.f_max = pyo.Param(model.FOOD, validate=f_max_validate, default=MAX_FOOD_SUPPLY) model.NUTR = pyo.Set() @@ -22,29 +26,50 @@ def f_max_validate (model, value, j): # -------------------------------------------------------- + def Buy_bounds(model, i): return (model.f_min[i], model.f_max[i]) + + model.Buy = pyo.Var(model.FOOD, bounds=Buy_bounds, within=pyo.NonNegativeIntegers) # -------------------------------------------------------- + def Total_Cost_rule(model): return sum(model.cost[j] * model.Buy[j] for j in model.FOOD) + + model.Total_Cost = pyo.Objective(rule=Total_Cost_rule, sense=pyo.minimize) # -------------------------------------------------------- + def Entree_rule(model): - entrees = ['Cheeseburger', 'Ham Sandwich', 'Hamburger', 'Fish Sandwich', 'Chicken Sandwich'] + entrees = [ + 'Cheeseburger', + 'Ham Sandwich', + 'Hamburger', + 'Fish Sandwich', + 'Chicken Sandwich', + ] return sum(model.Buy[e] for e in entrees) >= 1 + + model.Entree = pyo.Constraint(rule=Entree_rule) + def Side_rule(model): sides = ['Fries', 'Sausage Biscuit'] return sum(model.Buy[s] for s in sides) >= 1 + + model.Side = pyo.Constraint(rule=Side_rule) + def Drink_rule(model): drinks = ['Lowfat Milk', 'Orange Juice'] return sum(model.Buy[d] for d in drinks) >= 1 + + model.Drink = pyo.Constraint(rule=Drink_rule) diff --git a/examples/pyomobook/abstract-ch/param2.py b/examples/pyomobook/abstract-ch/param2.py index a747c945523..d51cbeffe84 100644 --- a/examples/pyomobook/abstract-ch/param2.py +++ b/examples/pyomobook/abstract-ch/param2.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/param2a.py b/examples/pyomobook/abstract-ch/param2a.py index c0adf31e0f4..fe928eb4197 100644 --- a/examples/pyomobook/abstract-ch/param2a.py +++ b/examples/pyomobook/abstract-ch/param2a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/param3.py b/examples/pyomobook/abstract-ch/param3.py index 45233df17c9..64efba5c5ad 100644 --- a/examples/pyomobook/abstract-ch/param3.py +++ b/examples/pyomobook/abstract-ch/param3.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.C[key]))) + print(str(key) + " " + str(pyo.value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.D[key]))) + print(str(key) + " " + str(pyo.value(instance.D[key]))) diff --git a/examples/pyomobook/abstract-ch/param3a.py b/examples/pyomobook/abstract-ch/param3a.py index 0cb63a261e8..857d96f8318 100644 --- a/examples/pyomobook/abstract-ch/param3a.py +++ b/examples/pyomobook/abstract-ch/param3a.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.C[key]))) + print(str(key) + " " + str(pyo.value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.D[key]))) + print(str(key) + " " + str(pyo.value(instance.D[key]))) diff --git a/examples/pyomobook/abstract-ch/param3b.py b/examples/pyomobook/abstract-ch/param3b.py index bf0d2f01d01..655694c33dd 100644 --- a/examples/pyomobook/abstract-ch/param3b.py +++ b/examples/pyomobook/abstract-ch/param3b.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.C[key]))) + print(str(key) + " " + str(pyo.value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.D[key]))) + print(str(key) + " " + str(pyo.value(instance.D[key]))) diff --git a/examples/pyomobook/abstract-ch/param3c.py b/examples/pyomobook/abstract-ch/param3c.py index b6dec1d6c7d..7d58b8b6a39 100644 --- a/examples/pyomobook/abstract-ch/param3c.py +++ b/examples/pyomobook/abstract-ch/param3c.py @@ -14,12 +14,12 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.C[key]))) + print(str(key) + " " + str(pyo.value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.D[key]))) + print(str(key) + " " + str(pyo.value(instance.D[key]))) diff --git a/examples/pyomobook/abstract-ch/param4.py b/examples/pyomobook/abstract-ch/param4.py index 7c2a4831219..c902b9034ad 100644 --- a/examples/pyomobook/abstract-ch/param4.py +++ b/examples/pyomobook/abstract-ch/param4.py @@ -12,4 +12,4 @@ print('B') keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/param5.py b/examples/pyomobook/abstract-ch/param5.py index d95fb008edd..488e1debda8 100644 --- a/examples/pyomobook/abstract-ch/param5.py +++ b/examples/pyomobook/abstract-ch/param5.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/param5a.py b/examples/pyomobook/abstract-ch/param5a.py index cfb96982542..7e814b917cc 100644 --- a/examples/pyomobook/abstract-ch/param5a.py +++ b/examples/pyomobook/abstract-ch/param5a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/param6.py b/examples/pyomobook/abstract-ch/param6.py index 2dea55274a9..d9c49a548b2 100644 --- a/examples/pyomobook/abstract-ch/param6.py +++ b/examples/pyomobook/abstract-ch/param6.py @@ -14,12 +14,12 @@ keys = instance.B.keys() print('B') for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.C[key]))) + print(str(key) + " " + str(pyo.value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.D[key]))) + print(str(key) + " " + str(pyo.value(instance.D[key]))) diff --git a/examples/pyomobook/abstract-ch/param6a.py b/examples/pyomobook/abstract-ch/param6a.py index e8053c21d24..e9aca384ee6 100644 --- a/examples/pyomobook/abstract-ch/param6a.py +++ b/examples/pyomobook/abstract-ch/param6a.py @@ -14,12 +14,12 @@ keys = instance.B.keys() print('B') for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) print('C') keys = instance.C.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.C[key]))) + print(str(key) + " " + str(pyo.value(instance.C[key]))) print('D') keys = instance.D.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.D[key]))) + print(str(key) + " " + str(pyo.value(instance.D[key]))) diff --git a/examples/pyomobook/abstract-ch/param7a.py b/examples/pyomobook/abstract-ch/param7a.py index 06f72298f1a..2a18cceabf6 100644 --- a/examples/pyomobook/abstract-ch/param7a.py +++ b/examples/pyomobook/abstract-ch/param7a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/param7b.py b/examples/pyomobook/abstract-ch/param7b.py index 142fabf12d5..acf02ddd62f 100644 --- a/examples/pyomobook/abstract-ch/param7b.py +++ b/examples/pyomobook/abstract-ch/param7b.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/param8a.py b/examples/pyomobook/abstract-ch/param8a.py index 8c22d7e7da0..e68378961ed 100644 --- a/examples/pyomobook/abstract-ch/param8a.py +++ b/examples/pyomobook/abstract-ch/param8a.py @@ -11,4 +11,4 @@ keys = instance.B.keys() for key in sorted(keys): - print(str(key)+" "+str(pyo.value(instance.B[key]))) + print(str(key) + " " + str(pyo.value(instance.B[key]))) diff --git a/examples/pyomobook/abstract-ch/postprocess_fn.py b/examples/pyomobook/abstract-ch/postprocess_fn.py index acae5543a5d..f96a5b4dac1 100644 --- a/examples/pyomobook/abstract-ch/postprocess_fn.py +++ b/examples/pyomobook/abstract-ch/postprocess_fn.py @@ -1,7 +1,7 @@ import csv -def pyomo_postprocess(options=None, instance=None, - results=None): + +def pyomo_postprocess(options=None, instance=None, results=None): # # Collect the data # @@ -12,8 +12,7 @@ def pyomo_postprocess(options=None, instance=None, data[i] = {} for var in results.solution[i].variable: vars.add(var) - data[i][var] = \ - results.solution[i].variable[var]['Value'] + data[i][var] = results.solution[i].variable[var]['Value'] for obj in results.solution[i].objective: f[i] = results.solution[i].objective[obj]['Value'] break @@ -25,15 +24,14 @@ def pyomo_postprocess(options=None, instance=None, rows = [] vars = list(vars) vars.sort() - rows.append(['obj']+vars) + rows.append(['obj'] + vars) for i in range(len(results.solution)): row = [f[i]] for var in vars: - row.append( data[i].get(var,None) ) + row.append(data[i].get(var, None)) rows.append(row) print("Creating results file results.csv") OUTPUT = open('results.csv', 'w') writer = csv.writer(OUTPUT) writer.writerows(rows) OUTPUT.close() - diff --git a/examples/pyomobook/abstract-ch/pyomo.AbstractHLinear.txt b/examples/pyomobook/abstract-ch/pyomo.AbstractHLinear.txt index d66e08e3025..3dda4f152cf 100644 --- a/examples/pyomobook/abstract-ch/pyomo.AbstractHLinear.txt +++ b/examples/pyomobook/abstract-ch/pyomo.AbstractHLinear.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 3.83388751714678 Upper bound: 3.83388751714678 Number of objectives: 1 - Number of constraints: 2 - Number of variables: 3 - Number of nonzeros: 3 + Number of constraints: 1 + Number of variables: 2 + Number of nonzeros: 2 Sense: maximize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.abstract5.ns1.txt b/examples/pyomobook/abstract-ch/pyomo.abstract5.ns1.txt index 74197655219..d7970652d9d 100644 --- a/examples/pyomobook/abstract-ch/pyomo.abstract5.ns1.txt +++ b/examples/pyomobook/abstract-ch/pyomo.abstract5.ns1.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.abstract5.ns2.txt b/examples/pyomobook/abstract-ch/pyomo.abstract5.ns2.txt index e7bea7e6c97..e82f37fbb43 100644 --- a/examples/pyomobook/abstract-ch/pyomo.abstract5.ns2.txt +++ b/examples/pyomobook/abstract-ch/pyomo.abstract5.ns2.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 8.0 Upper bound: 8.0 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.abstract5.ns3.txt b/examples/pyomobook/abstract-ch/pyomo.abstract5.ns3.txt index 844807b880a..6e8a8d8c52b 100644 --- a/examples/pyomobook/abstract-ch/pyomo.abstract5.ns3.txt +++ b/examples/pyomobook/abstract-ch/pyomo.abstract5.ns3.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.abstract6.txt b/examples/pyomobook/abstract-ch/pyomo.abstract6.txt index 57dbe066e67..2afa2f4ba7d 100644 --- a/examples/pyomobook/abstract-ch/pyomo.abstract6.txt +++ b/examples/pyomobook/abstract-ch/pyomo.abstract6.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.abstract7.txt b/examples/pyomobook/abstract-ch/pyomo.abstract7.txt index 9d669b54b39..d6039afd555 100644 --- a/examples/pyomobook/abstract-ch/pyomo.abstract7.txt +++ b/examples/pyomobook/abstract-ch/pyomo.abstract7.txt @@ -13,7 +13,7 @@ model: save file: None save format: None symbolic solver labels: false - file determinism: 1 + file determinism: None transform: [] preprocess: [] runtime: @@ -61,9 +61,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 2 - Number of nonzeros: 3 + Number of constraints: 2 + Number of variables: 1 + Number of nonzeros: 2 Sense: minimize Solver: - Status: ok diff --git a/examples/pyomobook/abstract-ch/pyomo.bad1.sh b/examples/pyomobook/abstract-ch/pyomo.bad1.sh deleted file mode 100755 index b7947495d41..00000000000 --- a/examples/pyomobook/abstract-ch/pyomo.bad1.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -# @cmd: -pyomo check bad1.py -# @:cmd -rm -f results.yml results.json diff --git a/examples/pyomobook/abstract-ch/pyomo.bad2.sh b/examples/pyomobook/abstract-ch/pyomo.bad2.sh deleted file mode 100755 index b487a2007bd..00000000000 --- a/examples/pyomobook/abstract-ch/pyomo.bad2.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -# @cmd: -pyomo check bad2.py -# @:cmd -rm -f results.yml results.json diff --git a/examples/pyomobook/abstract-ch/pyomo.diet1.txt b/examples/pyomobook/abstract-ch/pyomo.diet1.txt index f7240f72443..412df0c0015 100644 --- a/examples/pyomobook/abstract-ch/pyomo.diet1.txt +++ b/examples/pyomobook/abstract-ch/pyomo.diet1.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 2.81 Upper bound: 2.81 Number of objectives: 1 - Number of constraints: 4 - Number of variables: 10 - Number of nonzeros: 10 + Number of constraints: 3 + Number of variables: 9 + Number of nonzeros: 9 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.model3.sh b/examples/pyomobook/abstract-ch/pyomo.model3.sh index 94d1a4b500d..246d4d38ab3 100755 --- a/examples/pyomobook/abstract-ch/pyomo.model3.sh +++ b/examples/pyomobook/abstract-ch/pyomo.model3.sh @@ -3,5 +3,5 @@ # @cmd: pyomo convert --output=concrete1.nl concrete1.py # @:cmd -diff concrete1.nl concrete1-ref.nl +python -m pyomo.repn.tests.nl_diff concrete1.nl concrete1-ref.nl rm -f results.yml results.json concrete1.nl diff --git a/examples/pyomobook/abstract-ch/pyomo.solve1.txt b/examples/pyomobook/abstract-ch/pyomo.solve1.txt index c5f90d5f921..a62da58cb59 100644 --- a/examples/pyomobook/abstract-ch/pyomo.solve1.txt +++ b/examples/pyomobook/abstract-ch/pyomo.solve1.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 1.0 Upper bound: 1.0 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.solve2.txt b/examples/pyomobook/abstract-ch/pyomo.solve2.txt index 4e554d7358d..5c46682b3e1 100644 --- a/examples/pyomobook/abstract-ch/pyomo.solve2.txt +++ b/examples/pyomobook/abstract-ch/pyomo.solve2.txt @@ -23,9 +23,9 @@ Problem: Lower bound: 1.0 Upper bound: 1.0 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.solve3.txt b/examples/pyomobook/abstract-ch/pyomo.solve3.txt index 9e361d10c66..1d79652eb2a 100644 --- a/examples/pyomobook/abstract-ch/pyomo.solve3.txt +++ b/examples/pyomobook/abstract-ch/pyomo.solve3.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.solve4.txt b/examples/pyomobook/abstract-ch/pyomo.solve4.txt index 035ebe70973..d59cc97c6bd 100644 --- a/examples/pyomobook/abstract-ch/pyomo.solve4.txt +++ b/examples/pyomobook/abstract-ch/pyomo.solve4.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 1.0 Upper bound: 1.0 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.solve5.txt b/examples/pyomobook/abstract-ch/pyomo.solve5.txt index 3013c10a4dd..a38ece741b7 100644 --- a/examples/pyomobook/abstract-ch/pyomo.solve5.txt +++ b/examples/pyomobook/abstract-ch/pyomo.solve5.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/pyomo.wl_abstract.txt b/examples/pyomobook/abstract-ch/pyomo.wl_abstract.txt index e2e538ad1c5..7fc707e6d22 100644 --- a/examples/pyomobook/abstract-ch/pyomo.wl_abstract.txt +++ b/examples/pyomobook/abstract-ch/pyomo.wl_abstract.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 2745.0 Upper bound: 2745.0 Number of objectives: 1 - Number of constraints: 18 - Number of variables: 16 - Number of nonzeros: 40 + Number of constraints: 17 + Number of variables: 15 + Number of nonzeros: 39 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/abstract-ch/set1.py b/examples/pyomobook/abstract-ch/set1.py index 1e7c5e7b22e..ee281bd10bd 100644 --- a/examples/pyomobook/abstract-ch/set1.py +++ b/examples/pyomobook/abstract-ch/set1.py @@ -10,4 +10,4 @@ print(sorted(list(instance.A.data()))) print(sorted((instance.B.data()))) -print(sorted(list((instance.C.data())), key=lambda x:x if type(x) is str else str(x))) +print(sorted(list((instance.C.data())), key=lambda x: x if type(x) is str else str(x))) diff --git a/examples/pyomobook/abstract-ch/set3.py b/examples/pyomobook/abstract-ch/set3.py index a1d9a01774a..7661963d19d 100644 --- a/examples/pyomobook/abstract-ch/set3.py +++ b/examples/pyomobook/abstract-ch/set3.py @@ -6,10 +6,14 @@ model.A = pyo.Set() model.B = pyo.Set(model.A) # @:decl -model.C = pyo.Set(model.A,model.A) +model.C = pyo.Set(model.A, model.A) instance = model.create_instance('set3.dat') -print(sorted(list(instance.A.data()), key=lambda x:x if type(x) is str else str(x))) -print(sorted(list(instance.B[1].data()), key=lambda x:x if type(x) is str else str(x))) -print(sorted(list(instance.B['aaa'].data()), key=lambda x:x if type(x) is str else str(x))) +print(sorted(list(instance.A.data()), key=lambda x: x if type(x) is str else str(x))) +print(sorted(list(instance.B[1].data()), key=lambda x: x if type(x) is str else str(x))) +print( + sorted( + list(instance.B['aaa'].data()), key=lambda x: x if type(x) is str else str(x) + ) +) diff --git a/examples/pyomobook/abstract-ch/set5.py b/examples/pyomobook/abstract-ch/set5.py index bd95871c95e..9f79870d3ff 100644 --- a/examples/pyomobook/abstract-ch/set5.py +++ b/examples/pyomobook/abstract-ch/set5.py @@ -9,5 +9,5 @@ instance = model.create_instance('set5.dat') -for tpl in sorted(list(instance.A.data()), key=lambda x:tuple(map(str,x))): +for tpl in sorted(list(instance.A.data()), key=lambda x: tuple(map(str, x))): print(tpl) diff --git a/examples/pyomobook/abstract-ch/unknown-ref.lp b/examples/pyomobook/abstract-ch/unknown-ref.lp index e3210465ec9..2ba5956008a 100644 --- a/examples/pyomobook/abstract-ch/unknown-ref.lp +++ b/examples/pyomobook/abstract-ch/unknown-ref.lp @@ -1,29 +1,26 @@ \* Source Pyomo model name=unknown *\ min -x3: -+1 x1 -+2 x2 +x1: ++1 x2 ++2 x3 s.t. c_l_x4_: -+3 x1 -+4 x2 ++3 x2 ++4 x3 >= 1 c_l_x5_: -+2 x1 -+5 x2 ++2 x2 ++5 x3 >= 2 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds - 0 <= x1 <= +inf 0 <= x2 <= +inf + 0 <= x3 <= +inf general - x1 x2 + x3 end diff --git a/examples/pyomobook/abstract-ch/wl_abstract.py b/examples/pyomobook/abstract-ch/wl_abstract.py index 40b1729d696..f35a5327bfb 100644 --- a/examples/pyomobook/abstract-ch/wl_abstract.py +++ b/examples/pyomobook/abstract-ch/wl_abstract.py @@ -7,28 +7,40 @@ model.M = pyo.Set() # @:setdecl # @paramdecl: -model.d = pyo.Param(model.N,model.M) +model.d = pyo.Param(model.N, model.M) model.P = pyo.Param() # @:paramdecl # @vardecl: -model.x = pyo.Var(model.N, model.M, bounds=(0,1)) +model.x = pyo.Var(model.N, model.M, bounds=(0, 1)) model.y = pyo.Var(model.N, within=pyo.Binary) # @:vardecl + def obj_rule(model): - return sum(model.d[n,m]*model.x[n,m] for n in model.N for m in model.M) + return sum(model.d[n, m] * model.x[n, m] for n in model.N for m in model.M) + + model.obj = pyo.Objective(rule=obj_rule) + # @deliver: def one_per_cust_rule(model, m): - return sum(model.x[n,m] for n in model.N) == 1 + return sum(model.x[n, m] for n in model.N) == 1 + + model.one_per_cust = pyo.Constraint(model.M, rule=one_per_cust_rule) # @:deliver + def warehouse_active_rule(model, n, m): - return model.x[n,m] <= model.y[n] + return model.x[n, m] <= model.y[n] + + model.warehouse_active = pyo.Constraint(model.N, model.M, rule=warehouse_active_rule) + def num_warehouses_rule(model): return sum(model.y[n] for n in model.N) <= model.P + + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) diff --git a/examples/pyomobook/abstract-ch/wl_abstract_script.py b/examples/pyomobook/abstract-ch/wl_abstract_script.py index b070e01820b..0b042405714 100644 --- a/examples/pyomobook/abstract-ch/wl_abstract_script.py +++ b/examples/pyomobook/abstract-ch/wl_abstract_script.py @@ -6,26 +6,38 @@ model.N = pyo.Set() model.M = pyo.Set() -model.d = pyo.Param(model.N,model.M) +model.d = pyo.Param(model.N, model.M) model.P = pyo.Param() -model.x = pyo.Var(model.N, model.M, bounds=(0,1)) +model.x = pyo.Var(model.N, model.M, bounds=(0, 1)) model.y = pyo.Var(model.N, within=pyo.Binary) + def obj_rule(model): - return sum(model.d[n,m]*model.x[n,m] for n in model.N for m in model.M) + return sum(model.d[n, m] * model.x[n, m] for n in model.N for m in model.M) + + model.obj = pyo.Objective(rule=obj_rule) + def one_per_cust_rule(model, m): - return sum(model.x[n,m] for n in model.N) == 1 + return sum(model.x[n, m] for n in model.N) == 1 + + model.one_per_cust = pyo.Constraint(model.M, rule=one_per_cust_rule) + def warehouse_active_rule(model, n, m): - return model.x[n,m] <= model.y[n] + return model.x[n, m] <= model.y[n] + + model.warehouse_active = pyo.Constraint(model.N, model.M, rule=warehouse_active_rule) + def num_warehouses_rule(model): return sum(model.y[n] for n in model.N) <= model.P + + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) # @abstractsolve: diff --git a/examples/pyomobook/blocks-ch/blocks_gen.py b/examples/pyomobook/blocks-ch/blocks_gen.py index 5b98366febc..109e881cad5 100644 --- a/examples/pyomobook/blocks-ch/blocks_gen.py +++ b/examples/pyomobook/blocks-ch/blocks_gen.py @@ -10,22 +10,31 @@ model = pyo.ConcreteModel() model.TIME = pyo.Set(initialize=time, ordered=True) model.GEN_UNITS = pyo.Set(initialize=genset, ordered=True) -def generator_rule(b,g): + + +def generator_rule(b, g): m = b.model() b.MaxPower = pyo.Param(within=pyo.NonNegativeReals, initialize=maxpower) b.RampLimit = pyo.Param(within=pyo.NonNegativeReals, initialize=ramplimit) - b.Power = pyo.Var(m.TIME, bounds=(0,b.MaxPower), initialize=gennom) + b.Power = pyo.Var(m.TIME, bounds=(0, b.MaxPower), initialize=gennom) b.UnitOn = pyo.Var(m.TIME, within=pyo.Binary) + def limit_ramp(_b, t): if t == min(_b.model().TIME): return pyo.Constraint.Skip - return pyo.inequality(-_b.RampLimit, _b.Power[t] - _b.Power[t-1], _b.RampLimit) + return pyo.inequality( + -_b.RampLimit, _b.Power[t] - _b.Power[t - 1], _b.RampLimit + ) + b.limit_ramp = pyo.Constraint(m.TIME, rule=limit_ramp) - b.CostCoef = pyo.Param([1,2]) - def Cost(_b,t): - return sum(_b.CostCoef[i]*_b.Power[t]**i for i in _b.CostCoef) + b.CostCoef = pyo.Param([1, 2]) + + def Cost(_b, t): + return sum(_b.CostCoef[i] * _b.Power[t] ** i for i in _b.CostCoef) + b.Cost = pyo.Expression(m.TIME, rule=Cost) - + + model.Generator = pyo.Block(model.GEN_UNITS, rule=generator_rule) # @:usepassedblock @@ -37,24 +46,32 @@ def Cost(_b,t): model.TIME = pyo.Set(initialize=time, ordered=True) model.GEN_UNITS = pyo.Set(initialize=genset, ordered=True) -def generator_rule(b,g): + +def generator_rule(b, g): m = b.model() gen = pyo.Block(concrete=True) gen.MaxPower = pyo.Param(within=pyo.NonNegativeReals, initialize=maxpower) gen.RampLimit = pyo.Param(within=pyo.NonNegativeReals, initialize=ramplimit) - gen.Power = pyo.Var(m.TIME, bounds=(0,gen.MaxPower), initialize=gennom) + gen.Power = pyo.Var(m.TIME, bounds=(0, gen.MaxPower), initialize=gennom) gen.UnitOn = pyo.Var(m.TIME, within=pyo.Binary) + def limit_ramp(_b, t): if t == m.TIME.first(): return pyo.Constraint.Skip - return pyo.inequality(-_b.RampLimit, _b.Power[t] - _b.Power[t-1], _b.RampLimit) + return pyo.inequality( + -_b.RampLimit, _b.Power[t] - _b.Power[t - 1], _b.RampLimit + ) + gen.limit_ramp = pyo.Constraint(m.TIME, rule=limit_ramp) - gen.CostCoef = pyo.Param([1,2]) - def Cost(_b,t): - return sum(_b.CostCoef[i]*_b.Power[t]**i for i in _b.CostCoef) + gen.CostCoef = pyo.Param([1, 2]) + + def Cost(_b, t): + return sum(_b.CostCoef[i] * _b.Power[t] ** i for i in _b.CostCoef) + gen.Cost = pyo.Expression(m.TIME, rule=Cost) return gen + model.Generator = pyo.Block(model.GEN_UNITS, rule=generator_rule) # @:buildnewblock diff --git a/examples/pyomobook/blocks-ch/blocks_intro.py b/examples/pyomobook/blocks-ch/blocks_intro.py index bff67528f71..ad3ceaa4349 100644 --- a/examples/pyomobook/blocks-ch/blocks_intro.py +++ b/examples/pyomobook/blocks-ch/blocks_intro.py @@ -9,29 +9,29 @@ model.b.I = pyo.RangeSet(model.P) model.b.x = pyo.Var(model.b.I) model.b.y = pyo.Var(model.S) -model.b.b = pyo.Block([1,2]) +model.b.b = pyo.Block([1, 2]) model.b.b[1].x = pyo.Var() model.b.b[2].x = pyo.Var() # @:hierarchy # @hierarchyprint: -print(model.x.local_name) # x -print(model.x.name) # x -print(model.b.x.local_name) # x -print(model.b.x.name) # b.x -print(model.b.b[1].x.local_name) # x -print(model.b.b[1].x.name) # b.b[1].x +print(model.x.local_name) # x +print(model.x.name) # x +print(model.b.x.local_name) # x +print(model.b.x.name) # b.x +print(model.b.b[1].x.local_name) # x +print(model.b.b[1].x.name) # b.b[1].x # @:hierarchyprint # @hierarchymove: model.b.b[1].x.parent_component() # is model.b.b[1].x -model.b.b[1].x.parent_block() # is model.b.b[1] -model.b.b[1].x.model() # is model -model.b.b[1].component('x') # is model.b.b[1].x -model.b.x[1].parent_component() # is model.b.x -model.b.x[1].parent_block() # is model.b -model.b.x[1].model() # is model -model.b.component('x') # is model.b.x +model.b.b[1].x.parent_block() # is model.b.b[1] +model.b.b[1].x.model() # is model +model.b.b[1].component('x') # is model.b.b[1].x +model.b.x[1].parent_component() # is model.b.x +model.b.x[1].parent_block() # is model.b +model.b.x[1].model() # is model +model.b.component('x') # is model.b.x # @:hierarchymove model = None @@ -53,11 +53,14 @@ model.P = pyo.Param(initialize=3) model.T = pyo.RangeSet(model.P) + def xyb_rule(b, t): b.x = pyo.Var() b.I = pyo.RangeSet(t) b.y = pyo.Var(b.I) b.c = pyo.Constraint(expr=b.x == 1 - sum(b.y[i] for i in b.I)) + + model.xyb = pyo.Block(model.T, rule=xyb_rule) # @:blockrule model.pprint() @@ -66,14 +69,20 @@ def xyb_rule(b, t): model = pyo.ConcreteModel() model.P = pyo.Param(initialize=3) model.T = pyo.RangeSet(model.P) + + # @blockrule2: def xyb_rule(b, t): b.x = pyo.Var() b.I = pyo.RangeSet(t) b.y = pyo.Var(b.I, initialize=1.0) + def _b_c_rule(_b): return _b.x == 1.0 - sum(_b.y[i] for i in _b.I) + b.c = pyo.Constraint(rule=_b_c_rule) + + model.xyb = pyo.Block(model.T, rule=xyb_rule) # @:blockrule2 model.pprint() @@ -85,8 +94,8 @@ def _b_c_rule(_b): # @blockvalues1: for t in model.xyb: - for i in model.xyb[t].y: - print("%s %f" % (model.xyb[t].y[i], pyo.value(model.xyb[t].y[i]))) + for i in model.xyb[t].y: + print("%s %f" % (model.xyb[t].y[i], pyo.value(model.xyb[t].y[i]))) # @:blockvalues1 # @blockvalues2: diff --git a/examples/pyomobook/blocks-ch/blocks_lotsizing.py b/examples/pyomobook/blocks-ch/blocks_lotsizing.py index e5f42c199d4..fe0717d8c7c 100644 --- a/examples/pyomobook/blocks-ch/blocks_lotsizing.py +++ b/examples/pyomobook/blocks-ch/blocks_lotsizing.py @@ -1,16 +1,17 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.T = pyo.RangeSet(5) # time periods +model.T = pyo.RangeSet(5) # time periods -i0 = 5.0 # initial inventory -c = 4.6 # setup cost -h_pos = 0.7 # inventory holding cost -h_neg = 1.2 # shortage cost -P = 5.0 # maximum production amount +i0 = 5.0 # initial inventory +c = 4.6 # setup cost +h_pos = 0.7 # inventory holding cost +h_neg = 1.2 # shortage cost +P = 5.0 # maximum production amount # demand during period t -d = {1: 5.0, 2:7.0, 3:6.2, 4:3.1, 5:1.7} +d = {1: 5.0, 2: 7.0, 3: 6.2, 4: 3.1, 5: 1.7} + # @blockrule: # create a block for a single time period @@ -27,19 +28,29 @@ def lotsizing_block_rule(b, t): b.inventory = pyo.Constraint(expr=b.i == b.i0 + b.x - d[t]) b.pos_neg = pyo.Constraint(expr=b.i == b.i_pos - b.i_neg) b.prod_indicator = pyo.Constraint(expr=b.x <= P * b.y) + + model.lsb = pyo.Block(model.T, rule=lotsizing_block_rule) # @:blockrule + # link the inventory variables between blocks def i_linking_rule(m, t): if t == m.T.first(): return m.lsb[t].i0 == i0 - return m.lsb[t].i0 == m.lsb[t-1].i + return m.lsb[t].i0 == m.lsb[t - 1].i + + model.i_linking = pyo.Constraint(model.T, rule=i_linking_rule) + # construct the objective function over all the blocks def obj_rule(m): - return sum(c*m.lsb[t].y + h_pos*m.lsb[t].i_pos + h_neg*m.lsb[t].i_neg for t in m.T) + return sum( + c * m.lsb[t].y + h_pos * m.lsb[t].i_pos + h_neg * m.lsb[t].i_neg for t in m.T + ) + + model.obj = pyo.Objective(rule=obj_rule) ### solve the problem @@ -48,4 +59,4 @@ def obj_rule(m): # print the results for t in model.T: - print('Period: {0}, Prod. Amount: {1}'.format(t, pyo.value(model.lsb[t].x))) + print('Period: {0}, Prod. Amount: {1}'.format(t, pyo.value(model.lsb[t].x))) diff --git a/examples/pyomobook/blocks-ch/lotsizing.py b/examples/pyomobook/blocks-ch/lotsizing.py index 9d73ef9a72d..47ea265246e 100644 --- a/examples/pyomobook/blocks-ch/lotsizing.py +++ b/examples/pyomobook/blocks-ch/lotsizing.py @@ -1,16 +1,16 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.T = pyo.RangeSet(5) # time periods +model.T = pyo.RangeSet(5) # time periods -i0 = 5.0 # initial inventory -c = 4.6 # setup cost -h_pos = 0.7 # inventory holding cost -h_neg = 1.2 # shortage cost -P = 5.0 # maximum production amount +i0 = 5.0 # initial inventory +c = 4.6 # setup cost +h_pos = 0.7 # inventory holding cost +h_neg = 1.2 # shortage cost +P = 5.0 # maximum production amount # demand during period t -d = {1: 5.0, 2:7.0, 3:6.2, 4:3.1, 5:1.7} +d = {1: 5.0, 2: 7.0, 3: 6.2, 4: 3.1, 5: 1.7} # @vars: # define the variables @@ -21,25 +21,37 @@ model.i_neg = pyo.Var(model.T, domain=pyo.NonNegativeReals) # @:vars + # define the inventory relationships def inventory_rule(m, t): if t == m.T.first(): return m.i[t] == i0 + m.x[t] - d[t] - return m.i[t] == m.i[t-1] + m.x[t] - d[t] + return m.i[t] == m.i[t - 1] + m.x[t] - d[t] + + model.inventory = pyo.Constraint(model.T, rule=inventory_rule) + def pos_neg_rule(m, t): return m.i[t] == m.i_pos[t] - m.i_neg[t] + + model.pos_neg = pyo.Constraint(model.T, rule=pos_neg_rule) + # create the big-M constraint for the production indicator variable -def prod_indicator_rule(m,t): - return m.x[t] <= P*m.y[t] +def prod_indicator_rule(m, t): + return m.x[t] <= P * m.y[t] + + model.prod_indicator = pyo.Constraint(model.T, rule=prod_indicator_rule) + # define the cost function def obj_rule(m): - return sum(c*m.y[t] + h_pos*m.i_pos[t] + h_neg*m.i_neg[t] for t in m.T) + return sum(c * m.y[t] + h_pos * m.i_pos[t] + h_neg * m.i_neg[t] for t in m.T) + + model.obj = pyo.Objective(rule=obj_rule) # solve the problem @@ -48,5 +60,4 @@ def obj_rule(m): # print the results for t in model.T: - print('Period: {0}, Prod. Amount: {1}'.format(t, pyo.value(model.x[t]))) - + print('Period: {0}, Prod. Amount: {1}'.format(t, pyo.value(model.x[t]))) diff --git a/examples/pyomobook/blocks-ch/lotsizing_no_time.py b/examples/pyomobook/blocks-ch/lotsizing_no_time.py index 5abc8baf24b..901467a0cbb 100644 --- a/examples/pyomobook/blocks-ch/lotsizing_no_time.py +++ b/examples/pyomobook/blocks-ch/lotsizing_no_time.py @@ -1,17 +1,17 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.T = pyo.RangeSet(5) # time periods +model.T = pyo.RangeSet(5) # time periods model.S = pyo.RangeSet(5) -i0 = 5.0 # initial inventory -c = 4.6 # setup cost -h_pos = 0.7 # inventory holding cost -h_neg = 1.2 # shortage cost -P = 5.0 # maximum production amount +i0 = 5.0 # initial inventory +c = 4.6 # setup cost +h_pos = 0.7 # inventory holding cost +h_neg = 1.2 # shortage cost +P = 5.0 # maximum production amount # demand during period t -d = {1: 5.0, 2:7.0, 3:6.2, 4:3.1, 5:1.7} +d = {1: 5.0, 2: 7.0, 3: 6.2, 4: 3.1, 5: 1.7} # @vars: # define the variables diff --git a/examples/pyomobook/blocks-ch/lotsizing_uncertain.py b/examples/pyomobook/blocks-ch/lotsizing_uncertain.py index 3c9e09bd79a..6d16de7e3a7 100644 --- a/examples/pyomobook/blocks-ch/lotsizing_uncertain.py +++ b/examples/pyomobook/blocks-ch/lotsizing_uncertain.py @@ -1,23 +1,23 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.T = pyo.RangeSet(5) # time periods +model.T = pyo.RangeSet(5) # time periods model.S = pyo.RangeSet(5) -i0 = 5.0 # initial inventory -c = 4.6 # setup cost -h_pos = 0.7 # inventory holding cost -h_neg = 1.2 # shortage cost -P = 5.0 # maximum production amount +i0 = 5.0 # initial inventory +c = 4.6 # setup cost +h_pos = 0.7 # inventory holding cost +h_neg = 1.2 # shortage cost +P = 5.0 # maximum production amount # demand during period t -d = {1: 5.0, 2:7.0, 3:6.2, 4:3.1, 5:1.7} +d = {1: 5.0, 2: 7.0, 3: 6.2, 4: 3.1, 5: 1.7} # @vars: # define the variables model.y = pyo.Var(model.T, model.S, domain=pyo.Binary) model.x = pyo.Var(model.T, model.S, domain=pyo.NonNegativeReals) -model.i = pyo.Var(model.T, model.S,) +model.i = pyo.Var(model.T, model.S) model.i_pos = pyo.Var(model.T, model.S, domain=pyo.NonNegativeReals) model.i_neg = pyo.Var(model.T, model.S, domain=pyo.NonNegativeReals) # @:vars diff --git a/examples/pyomobook/dae-ch/dae_tester_model.py b/examples/pyomobook/dae-ch/dae_tester_model.py index a6664f74575..9e0da9f4a62 100644 --- a/examples/pyomobook/dae-ch/dae_tester_model.py +++ b/examples/pyomobook/dae-ch/dae_tester_model.py @@ -3,7 +3,7 @@ import pyomo.dae as dae m = pyo.ConcreteModel() -m.t = dae.ContinuousSet(bounds=(0,1)) +m.t = dae.ContinuousSet(bounds=(0, 1)) m.x1 = pyo.Var(m.t) # @second-deriv: @@ -20,8 +20,8 @@ print(len(m.x1)) m = pyo.ConcreteModel() -m.t1 = dae.ContinuousSet(bounds=(0,1)) -m.t2 = dae.ContinuousSet(bounds=(0,1)) +m.t1 = dae.ContinuousSet(bounds=(0, 1)) +m.t2 = dae.ContinuousSet(bounds=(0, 1)) m.x1 = pyo.Var(m.t1, m.t2) m.dx1dt2 = dae.DerivativeVar(m.x1, wrt=(m.t1, m.t2)) @@ -39,8 +39,8 @@ print(len(m.x1)) m = pyo.ConcreteModel() -m.t1 = dae.ContinuousSet(bounds=(0,1)) -m.t2 = dae.ContinuousSet(bounds=(0,1)) +m.t1 = dae.ContinuousSet(bounds=(0, 1)) +m.t2 = dae.ContinuousSet(bounds=(0, 1)) m.x1 = pyo.Var(m.t1, m.t2) m.dx1dt2 = dae.DerivativeVar(m.x1, wrt=(m.t1, m.t2)) @@ -58,14 +58,14 @@ print(len(m.x1)) m = pyo.ConcreteModel() -m.t1 = dae.ContinuousSet(bounds=(0,1)) -m.t2 = dae.ContinuousSet(bounds=(0,1)) +m.t1 = dae.ContinuousSet(bounds=(0, 1)) +m.t2 = dae.ContinuousSet(bounds=(0, 1)) m.x1 = pyo.Var(m.t1, m.t2) m.dx1dt2 = dae.DerivativeVar(m.x1, wrt=(m.t1, m.t2)) # @finite-colloc: -# Apply a combination of finite difference and +# Apply a combination of finite difference and # collocation schemes discretizer1 = pyo.TransformationFactory('dae.finite_difference') discretizer2 = pyo.TransformationFactory('dae.collocation') diff --git a/examples/pyomobook/dae-ch/path_constraint.py b/examples/pyomobook/dae-ch/path_constraint.py index d0d96740b68..5fe41dd132d 100644 --- a/examples/pyomobook/dae-ch/path_constraint.py +++ b/examples/pyomobook/dae-ch/path_constraint.py @@ -22,13 +22,14 @@ # @import: import pyomo.environ as pyo import pyomo.dae as dae + # @:import m = pyo.ConcreteModel() # @contset: m.tf = pyo.Param(initialize=1) -m.t = dae.ContinuousSet(bounds=(0,m.tf)) +m.t = dae.ContinuousSet(bounds=(0, m.tf)) # @:contset # @vardecl: @@ -42,25 +43,37 @@ m.dx3 = dae.DerivativeVar(m.x3) # @:vardecl + # @diffeq: def _x1dot(m, t): return m.dx1[t] == m.x2[t] + + m.x1dotcon = pyo.Constraint(m.t, rule=_x1dot) + def _x2dot(m, t): - return m.dx2[t] == -m.x2[t] + m.u[t] + return m.dx2[t] == -m.x2[t] + m.u[t] + + m.x2dotcon = pyo.Constraint(m.t, rule=_x2dot) + def _x3dot(m, t): - return m.dx3[t] == m.x1[t]**2 + m.x2[t]**2 + 0.005*m.u[t]**2 + return m.dx3[t] == m.x1[t] ** 2 + m.x2[t] ** 2 + 0.005 * m.u[t] ** 2 + + m.x3dotcon = pyo.Constraint(m.t, rule=_x3dot) # @:diffeq # @objpath: m.obj = pyo.Objective(expr=m.x3[m.tf]) + def _con(m, t): - return m.x2[t] - 8*(t - 0.5)**2 + 0.5 <= 0 + return m.x2[t] - 8 * (t - 0.5) ** 2 + 0.5 <= 0 + + m.con = pyo.Constraint(m.t, rule=_con) # @:objpath diff --git a/examples/pyomobook/dae-ch/plot_path_constraint.py b/examples/pyomobook/dae-ch/plot_path_constraint.py index 56197b0dab3..4c04bc1b6b6 100644 --- a/examples/pyomobook/dae-ch/plot_path_constraint.py +++ b/examples/pyomobook/dae-ch/plot_path_constraint.py @@ -1,15 +1,17 @@ # @plot_path: def plotter(subplot, x, *y, **kwds): plt.subplot(subplot) - for i,_y in enumerate(y): - plt.plot(list(x), [value(_y[t]) for t in x], 'brgcmk'[i%6]) + for i, _y in enumerate(y): + plt.plot(list(x), [value(_y[t]) for t in x], 'brgcmk'[i % 6]) if kwds.get('points', False): plt.plot(list(x), [value(_y[t]) for t in x], 'o') - plt.title(kwds.get('title','')) + plt.title(kwds.get('title', '')) plt.legend(tuple(_y.name for _y in y)) plt.xlabel(x.name) + import matplotlib.pyplot as plt + plotter(121, m.t, m.x1, m.x2, title='Differential Variables') plotter(122, m.t, m.u, title='Control Variable', points=True) plt.show() diff --git a/examples/pyomobook/dae-ch/run_path_constraint.py b/examples/pyomobook/dae-ch/run_path_constraint.py index 983658198f7..b819d6a7127 100644 --- a/examples/pyomobook/dae-ch/run_path_constraint.py +++ b/examples/pyomobook/dae-ch/run_path_constraint.py @@ -5,12 +5,11 @@ # Discretize model using Orthogonal Collocation # @disc: discretizer = pyo.TransformationFactory('dae.collocation') -discretizer.apply_to(m,nfe=7,ncp=6,scheme='LAGRANGE-RADAU') +discretizer.apply_to(m, nfe=7, ncp=6, scheme='LAGRANGE-RADAU') # @:disc # @reduce: discretizer.reduce_collocation_points(m, var=m.u, ncp=1, contset=m.t) # @:reduce -solver=pyo.SolverFactory('ipopt') +solver = pyo.SolverFactory('ipopt') solver.solve(m, tee=True) - diff --git a/examples/pyomobook/gdp-ch/gdp_uc.py b/examples/pyomobook/gdp-ch/gdp_uc.py index bcb15e75683..2495ed9bef1 100644 --- a/examples/pyomobook/gdp-ch/gdp_uc.py +++ b/examples/pyomobook/gdp-ch/gdp_uc.py @@ -16,66 +16,84 @@ model.StartUpRampLimit = pyo.Param(model.GENERATORS, within=pyo.NonNegativeReals) model.ShutDownRampLimit = pyo.Param(model.GENERATORS, within=pyo.NonNegativeReals) -def Power_bound(m,g,t): + +def Power_bound(m, g, t): return (0, m.MaxPower[g]) + + model.Power = pyo.Var(model.GENERATORS, model.TIME, bounds=Power_bound) + def GenOn(b, g, t): m = b.model() b.power_limit = pyo.Constraint( - expr=pyo.inequality(m.MinPower[g], m.Power[g,t], m.MaxPower[g]) ) + expr=pyo.inequality(m.MinPower[g], m.Power[g, t], m.MaxPower[g]) + ) if t == m.TIME.first(): return b.ramp_limit = pyo.Constraint( - expr=pyo.inequality(-m.RampDownLimit[g], - m.Power[g,t] - m.Power[g,t-1], - m.RampUpLimit[g]) + expr=pyo.inequality( + -m.RampDownLimit[g], m.Power[g, t] - m.Power[g, t - 1], m.RampUpLimit[g] + ) ) + + model.GenOn = Disjunct(model.GENERATORS, model.TIME, rule=GenOn) + def GenOff(b, g, t): m = b.model() - b.power_limit = pyo.Constraint( - expr=m.Power[g,t] == 0 ) + b.power_limit = pyo.Constraint(expr=m.Power[g, t] == 0) if t == m.TIME.first(): return - b.ramp_limit = pyo.Constraint( - expr=m.Power[g,t-1] <= m.ShutDownRampLimit[g] ) + b.ramp_limit = pyo.Constraint(expr=m.Power[g, t - 1] <= m.ShutDownRampLimit[g]) + + model.GenOff = Disjunct(model.GENERATORS, model.TIME, rule=GenOff) + def GenStartUp(b, g, t): m = b.model() - b.power_limit = pyo.Constraint( - expr=m.Power[g,t] <= m.StartUpRampLimit[g] ) + b.power_limit = pyo.Constraint(expr=m.Power[g, t] <= m.StartUpRampLimit[g]) + + model.GenStartup = Disjunct(model.GENERATORS, model.TIME, rule=GenStartUp) # @:disjuncts + # @disjunction: def bind_generators(m, g, t): - return [m.GenOn[g, t], m.GenOff[g, t], m.GenStartup[g, t]] -model.bind_generators = Disjunction( - model.GENERATORS, model.TIME, rule=bind_generators) + return [m.GenOn[g, t], m.GenOff[g, t], m.GenStartup[g, t]] + + +model.bind_generators = Disjunction(model.GENERATORS, model.TIME, rule=bind_generators) # @:disjunction + # @logic: def onState(m, g, t): if t == m.TIME.first(): return pyo.LogicalConstraint.Skip - return m.GenOn[g, t].indicator_var.implies(pyo.lor( - m.GenOn[g, t-1].indicator_var, - m.GenStartup[g, t-1].indicator_var)) -model.onState = pyo.LogicalConstraint( - model.GENERATORS, model.TIME, rule=onState) + return m.GenOn[g, t].indicator_var.implies( + pyo.lor(m.GenOn[g, t - 1].indicator_var, m.GenStartup[g, t - 1].indicator_var) + ) + + +model.onState = pyo.LogicalConstraint(model.GENERATORS, model.TIME, rule=onState) + def startupState(m, g, t): if t == m.TIME.first(): return pyo.LogicalConstraint.Skip - return m.GenStartup[g, t].indicator_var.implies( - m.GenOff[g, t-1].indicator_var) + return m.GenStartup[g, t].indicator_var.implies(m.GenOff[g, t - 1].indicator_var) + + model.startupState = pyo.LogicalConstraint( - model.GENERATORS, model.TIME, rule=startupState) + model.GENERATORS, model.TIME, rule=startupState +) # @:logic + # # Fictitious objective to form a legal LP file # @@ -83,6 +101,7 @@ def startupState(m, g, t): def obj(m): return sum(m.Power[g, t] for g in m.GENERATORS for t in m.TIME) + @model.Constraint(model.GENERATORS) def nontrivial(m, g): - return sum(m.Power[g, t] for t in m.TIME) >= len(m.TIME)/2 * m.MinPower[g] + return sum(m.Power[g, t] for t in m.TIME) >= len(m.TIME) / 2 * m.MinPower[g] diff --git a/examples/pyomobook/gdp-ch/pyomo.gdp_uc.txt b/examples/pyomobook/gdp-ch/pyomo.gdp_uc.txt index 5330f32b4e4..477336d48ba 100644 --- a/examples/pyomobook/gdp-ch/pyomo.gdp_uc.txt +++ b/examples/pyomobook/gdp-ch/pyomo.gdp_uc.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 45.0 Upper bound: 45.0 Number of objectives: 1 - Number of constraints: 59 - Number of variables: 25 - Number of nonzeros: 125 + Number of constraints: 58 + Number of variables: 24 + Number of nonzeros: 124 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/gdp-ch/scont.py b/examples/pyomobook/gdp-ch/scont.py index 82ca4051985..76597326700 100644 --- a/examples/pyomobook/gdp-ch/scont.py +++ b/examples/pyomobook/gdp-ch/scont.py @@ -2,35 +2,42 @@ import pyomo.environ as pyo from pyomo.gdp import Disjunct, Disjunction -L = [1,2,3] -U = [2,4,6] -index = [0,1,2] +L = [1, 2, 3] +U = [2, 4, 6] +index = [0, 1, 2] model = pyo.ConcreteModel() -model.x = pyo.Var(index, within=pyo.Reals, bounds=(0,20)) -model.x_nonzero = pyo.Var(index, bounds=(0,1)) +model.x = pyo.Var(index, within=pyo.Reals, bounds=(0, 20)) +model.x_nonzero = pyo.Var(index, bounds=(0, 1)) + # Each disjunction is a semi-continuous variable # x[k] == 0 or L[k] <= x[k] <= U[k] def d_0_rule(d, k): m = d.model() d.c = pyo.Constraint(expr=m.x[k] == 0) + + model.d_0 = Disjunct(index, rule=d_0_rule) + def d_nonzero_rule(d, k): m = d.model() d.c = pyo.Constraint(expr=pyo.inequality(L[k], m.x[k], U[k])) d.count = pyo.Constraint(expr=m.x_nonzero[k] == 1) + + model.d_nonzero = Disjunct(index, rule=d_nonzero_rule) + def D_rule(m, k): return [m.d_0[k], m.d_nonzero[k]] + + model.D = Disjunction(index, rule=D_rule) # Minimize the number of x variables that are nonzero -model.o = pyo.Objective( - expr=sum(model.x_nonzero[k] for k in index)) +model.o = pyo.Objective(expr=sum(model.x_nonzero[k] for k in index)) # Satisfy a demand that is met by these variables -model.c = pyo.Constraint( - expr=sum(model.x[k] for k in index) >= 7) +model.c = pyo.Constraint(expr=sum(model.x[k] for k in index) >= 7) diff --git a/examples/pyomobook/gdp-ch/scont2.py b/examples/pyomobook/gdp-ch/scont2.py index 9ec593a9a7a..94e510b358a 100644 --- a/examples/pyomobook/gdp-ch/scont2.py +++ b/examples/pyomobook/gdp-ch/scont2.py @@ -1,11 +1,15 @@ import pyomo.environ as pyo import scont + model = scont.model + # @action: def transform_gdp(m): - xfrm = pyo.TransformationFactory('gdp.bigm') - xfrm.apply_to(m) + xfrm = pyo.TransformationFactory('gdp.bigm') + xfrm.apply_to(m) + + model.transform_gdp = pyo.BuildAction(rule=transform_gdp) # @:action diff --git a/examples/pyomobook/gdp-ch/scont_script.py b/examples/pyomobook/gdp-ch/scont_script.py index 89b6c70b5dc..22c9b88ad0c 100644 --- a/examples/pyomobook/gdp-ch/scont_script.py +++ b/examples/pyomobook/gdp-ch/scont_script.py @@ -13,4 +13,5 @@ print(status) import verify_scont + verify_scont.verify_model(model) diff --git a/examples/pyomobook/gdp-ch/scont_script.txt b/examples/pyomobook/gdp-ch/scont_script.txt index 410b6c9e898..3275db20f38 100644 --- a/examples/pyomobook/gdp-ch/scont_script.txt +++ b/examples/pyomobook/gdp-ch/scont_script.txt @@ -4,17 +4,17 @@ Problem: Lower bound: 2.0 Upper bound: 2.0 Number of objectives: 1 - Number of constraints: 23 - Number of variables: 13 - Number of nonzeros: 40 + Number of constraints: 22 + Number of variables: 12 + Number of nonzeros: 39 Sense: minimize Solver: - Status: ok Termination condition: optimal Statistics: Branch and bound: - Number of bounded subproblems: 5 - Number of created subproblems: 5 + Number of bounded subproblems: 7 + Number of created subproblems: 7 Error rc: 0 Time: 0.006493806838989258 Solution: diff --git a/examples/pyomobook/gdp-ch/verify_scont.py b/examples/pyomobook/gdp-ch/verify_scont.py index 07e0202f931..db44024fe66 100644 --- a/examples/pyomobook/gdp-ch/verify_scont.py +++ b/examples/pyomobook/gdp-ch/verify_scont.py @@ -1,33 +1,39 @@ import os + def verify(obj, x, iv): assert obj == 2 for i in range(3): - assert sorted([iv[i,0],iv[i,1]]) == [0,1] - assert iv[i,0] == (0 if x[i] else 1) - assert iv[i,1] == (1 if x[i] else 0) + assert sorted([iv[i, 0], iv[i, 1]]) == [0, 1] + assert iv[i, 0] == (0 if x[i] else 1) + assert iv[i, 1] == (1 if x[i] else 0) assert sum(x.values()) >= 7 fname = os.path.basename(__file__) if fname.endswith('.pyc'): fname = fname[:-1] print("%s: OK: result validated" % (fname,)) + def verify_file(fname): import yaml - ans = yaml.load(open(fname,'r'), Loader=yaml.FullLoader) + + ans = yaml.load(open(fname, 'r'), Loader=yaml.FullLoader) assert ans['Solution'][0]['number of solutions'] == 1 obj = ans['Solution'][1]['Objective']['o']['Value'] - ZERO={'Value':0} + ZERO = {'Value': 0} x = {} iv = {} for i in range(3): - x[i] = ans['Solution'][1]['Variable'].get('x[%s]'%i, ZERO)['Value'] - iv[i,0] = ans['Solution'][1]['Variable'].get( - 'd_0[%s].binary_indicator_var'%i, ZERO)['Value'] - iv[i,1] = ans['Solution'][1]['Variable'].get( - 'd_nonzero[%s].binary_indicator_var'%i, ZERO)['Value'] + x[i] = ans['Solution'][1]['Variable'].get('x[%s]' % i, ZERO)['Value'] + iv[i, 0] = ans['Solution'][1]['Variable'].get( + 'd_0[%s].binary_indicator_var' % i, ZERO + )['Value'] + iv[i, 1] = ans['Solution'][1]['Variable'].get( + 'd_nonzero[%s].binary_indicator_var' % i, ZERO + )['Value'] verify(obj, x, iv) + def verify_model(model): assert len(model.solutions) == 1 obj = model.o() @@ -35,10 +41,12 @@ def verify_model(model): iv = {} for i in range(3): x[i] = model.x[i]() - iv[i,0] = model.d_0[i].binary_indicator_var() - iv[i,1] = model.d_nonzero[i].binary_indicator_var() + iv[i, 0] = model.d_0[i].binary_indicator_var() + iv[i, 1] = model.d_nonzero[i].binary_indicator_var() verify(obj, x, iv) + if __name__ == '__main__': import sys + verify_file(sys.argv[1]) diff --git a/examples/pyomobook/intro-ch/abstract5.py b/examples/pyomobook/intro-ch/abstract5.py index cd3a6f60370..2184ed7b3aa 100644 --- a/examples/pyomobook/intro-ch/abstract5.py +++ b/examples/pyomobook/intro-ch/abstract5.py @@ -10,11 +10,16 @@ model.x = pyo.Var(model.N, within=pyo.NonNegativeReals) + def obj_rule(model): - return sum(model.c[i]*model.x[i] for i in model.N) + return sum(model.c[i] * model.x[i] for i in model.N) + + model.obj = pyo.Objective(rule=obj_rule) + def con_rule(model, m): - return sum(model.a[m,i]*model.x[i] for i in model.N) \ - >= model.b[m] + return sum(model.a[m, i] * model.x[i] for i in model.N) >= model.b[m] + + model.con = pyo.Constraint(model.M, rule=con_rule) diff --git a/examples/pyomobook/intro-ch/coloring_concrete.py b/examples/pyomobook/intro-ch/coloring_concrete.py index c08471c6e17..107a31668c4 100644 --- a/examples/pyomobook/intro-ch/coloring_concrete.py +++ b/examples/pyomobook/intro-ch/coloring_concrete.py @@ -7,21 +7,42 @@ # # Define data for the graph of interest. -vertices = set(['Ar', 'Bo', 'Br', 'Ch', 'Co', 'Ec', - 'FG', 'Gu', 'Pa', 'Pe', 'Su', 'Ur', 'Ve']) - -edges = set([('FG','Su'), ('FG','Br'), ('Su','Gu'), - ('Su','Br'), ('Gu','Ve'), ('Gu','Br'), - ('Ve','Co'), ('Ve','Br'), ('Co','Ec'), - ('Co','Pe'), ('Co','Br'), ('Ec','Pe'), - ('Pe','Ch'), ('Pe','Bo'), ('Pe','Br'), - ('Ch','Ar'), ('Ch','Bo'), ('Ar','Ur'), - ('Ar','Br'), ('Ar','Pa'), ('Ar','Bo'), - ('Ur','Br'), ('Bo','Pa'), ('Bo','Br'), - ('Pa','Br')]) +vertices = set( + ['Ar', 'Bo', 'Br', 'Ch', 'Co', 'Ec', 'FG', 'Gu', 'Pa', 'Pe', 'Su', 'Ur', 'Ve'] +) + +edges = set( + [ + ('FG', 'Su'), + ('FG', 'Br'), + ('Su', 'Gu'), + ('Su', 'Br'), + ('Gu', 'Ve'), + ('Gu', 'Br'), + ('Ve', 'Co'), + ('Ve', 'Br'), + ('Co', 'Ec'), + ('Co', 'Pe'), + ('Co', 'Br'), + ('Ec', 'Pe'), + ('Pe', 'Ch'), + ('Pe', 'Bo'), + ('Pe', 'Br'), + ('Ch', 'Ar'), + ('Ch', 'Bo'), + ('Ar', 'Ur'), + ('Ar', 'Br'), + ('Ar', 'Pa'), + ('Ar', 'Bo'), + ('Ur', 'Br'), + ('Bo', 'Pa'), + ('Bo', 'Br'), + ('Pa', 'Br'), + ] +) ncolors = 4 -colors = range(1, ncolors+1) +colors = range(1, ncolors + 1) # Python import statement @@ -37,23 +58,20 @@ # Each node is colored with one color model.node_coloring = pyo.ConstraintList() for v in vertices: - model.node_coloring.add( - sum(model.x[v,c] for c in colors) == 1) + model.node_coloring.add(sum(model.x[v, c] for c in colors) == 1) # Nodes that share an edge cannot be colored the same model.edge_coloring = pyo.ConstraintList() -for v,w in edges: +for v, w in edges: for c in colors: - model.edge_coloring.add( - model.x[v,c] + model.x[w,c] <= 1) + model.edge_coloring.add(model.x[v, c] + model.x[w, c] <= 1) # Provide a lower bound on the minimum number of colors # that are needed model.min_coloring = pyo.ConstraintList() for v in vertices: for c in colors: - model.min_coloring.add( - model.y >= c * model.x[v,c]) + model.min_coloring.add(model.y >= c * model.x[v, c]) # Minimize the number of colors that are needed model.obj = pyo.Objective(expr=model.y) diff --git a/examples/pyomobook/intro-ch/concrete1.py b/examples/pyomobook/intro-ch/concrete1.py index eaf190f38bb..a39ca1d41cd 100644 --- a/examples/pyomobook/intro-ch/concrete1.py +++ b/examples/pyomobook/intro-ch/concrete1.py @@ -3,6 +3,6 @@ model = pyo.ConcreteModel() model.x_1 = pyo.Var(within=pyo.NonNegativeReals) model.x_2 = pyo.Var(within=pyo.NonNegativeReals) -model.obj = pyo.Objective(expr=model.x_1 + 2*model.x_2) -model.con1 = pyo.Constraint(expr=3*model.x_1 + 4*model.x_2 >= 1) -model.con2 = pyo.Constraint(expr=2*model.x_1 + 5*model.x_2 >= 2) +model.obj = pyo.Objective(expr=model.x_1 + 2 * model.x_2) +model.con1 = pyo.Constraint(expr=3 * model.x_1 + 4 * model.x_2 >= 1) +model.con2 = pyo.Constraint(expr=2 * model.x_1 + 5 * model.x_2 >= 2) diff --git a/examples/pyomobook/intro-ch/concrete1_generic.py b/examples/pyomobook/intro-ch/concrete1_generic.py index 712d218b854..de648470469 100644 --- a/examples/pyomobook/intro-ch/concrete1_generic.py +++ b/examples/pyomobook/intro-ch/concrete1_generic.py @@ -5,11 +5,16 @@ model.x = pyo.Var(mydata.N, within=pyo.NonNegativeReals) + def obj_rule(model): - return sum(mydata.c[i]*model.x[i] for i in mydata.N) + return sum(mydata.c[i] * model.x[i] for i in mydata.N) + + model.obj = pyo.Objective(rule=obj_rule) + def con_rule(model, m): - return sum(mydata.a[m,i]*model.x[i] for i in mydata.N) \ - >= mydata.b[m] + return sum(mydata.a[m, i] * model.x[i] for i in mydata.N) >= mydata.b[m] + + model.con = pyo.Constraint(mydata.M, rule=con_rule) diff --git a/examples/pyomobook/intro-ch/mydata.py b/examples/pyomobook/intro-ch/mydata.py index 9a001ced84e..83aa26bacd9 100644 --- a/examples/pyomobook/intro-ch/mydata.py +++ b/examples/pyomobook/intro-ch/mydata.py @@ -1,5 +1,5 @@ -N = [1,2] -M = [1,2] -c = {1:1, 2:2} -a = {(1,1):3, (1,2):4, (2,1):2, (2,2):5} -b = {1:1, 2:2} +N = [1, 2] +M = [1, 2] +c = {1: 1, 2: 2} +a = {(1, 1): 3, (1, 2): 4, (2, 1): 2, (2, 2): 5} +b = {1: 1, 2: 2} diff --git a/examples/pyomobook/intro-ch/pyomo.abstract5.txt b/examples/pyomobook/intro-ch/pyomo.abstract5.txt index e54f1751609..a9a4d0864f7 100644 --- a/examples/pyomobook/intro-ch/pyomo.abstract5.txt +++ b/examples/pyomobook/intro-ch/pyomo.abstract5.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/intro-ch/pyomo.concrete1.txt b/examples/pyomobook/intro-ch/pyomo.concrete1.txt index e69da978e1b..eaebb25be6b 100644 --- a/examples/pyomobook/intro-ch/pyomo.concrete1.txt +++ b/examples/pyomobook/intro-ch/pyomo.concrete1.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/intro-ch/pyomo.concrete1_generic.txt b/examples/pyomobook/intro-ch/pyomo.concrete1_generic.txt index 1a3320eebbe..317a71d56f8 100644 --- a/examples/pyomobook/intro-ch/pyomo.concrete1_generic.txt +++ b/examples/pyomobook/intro-ch/pyomo.concrete1_generic.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 0.8 Upper bound: 0.8 Number of objectives: 1 - Number of constraints: 3 - Number of variables: 3 - Number of nonzeros: 5 + Number of constraints: 2 + Number of variables: 2 + Number of nonzeros: 4 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/mpec-ch/ex1a.py b/examples/pyomobook/mpec-ch/ex1a.py index cec5cc539f9..30cd2842556 100644 --- a/examples/pyomobook/mpec-ch/ex1a.py +++ b/examples/pyomobook/mpec-ch/ex1a.py @@ -6,11 +6,13 @@ model = pyo.ConcreteModel() -model.x = pyo.Var( range(1,n+1) ) +model.x = pyo.Var(range(1, n + 1)) + +model.f = pyo.Objective(expr=sum(i * (model.x[i] - 1) ** 2 for i in range(1, n + 1))) -model.f = pyo.Objective(expr=sum(i*(model.x[i]-1)**2 - for i in range(1,n+1)) ) def compl_(model, i): - return complements(model.x[i] >= 0, model.x[i+1] >= 0) -model.compl = Complementarity( range(1,n), rule=compl_ ) + return complements(model.x[i] >= 0, model.x[i + 1] >= 0) + + +model.compl = Complementarity(range(1, n), rule=compl_) diff --git a/examples/pyomobook/mpec-ch/ex1b.py b/examples/pyomobook/mpec-ch/ex1b.py index 8ae412ce5bd..9592c81c4f6 100644 --- a/examples/pyomobook/mpec-ch/ex1b.py +++ b/examples/pyomobook/mpec-ch/ex1b.py @@ -6,13 +6,12 @@ model = pyo.ConcreteModel() -model.x = pyo.Var( range(1,n+1) ) +model.x = pyo.Var(range(1, n + 1)) -model.f = pyo.Objective(expr=sum(i*(model.x[i]-1)**2 - for i in range(1,n+1)) ) +model.f = pyo.Objective(expr=sum(i * (model.x[i] - 1) ** 2 for i in range(1, n + 1))) model.compl = ComplementarityList() -model.compl.add(complements(model.x[1]>=0, model.x[2]>=0)) -model.compl.add(complements(model.x[2]>=0, model.x[3]>=0)) -model.compl.add(complements(model.x[3]>=0, model.x[4]>=0)) -model.compl.add(complements(model.x[4]>=0, model.x[5]>=0)) +model.compl.add(complements(model.x[1] >= 0, model.x[2] >= 0)) +model.compl.add(complements(model.x[2] >= 0, model.x[3] >= 0)) +model.compl.add(complements(model.x[3] >= 0, model.x[4] >= 0)) +model.compl.add(complements(model.x[4] >= 0, model.x[5] >= 0)) diff --git a/examples/pyomobook/mpec-ch/ex1c.py b/examples/pyomobook/mpec-ch/ex1c.py index f255f13aa08..aad9c9b0d47 100644 --- a/examples/pyomobook/mpec-ch/ex1c.py +++ b/examples/pyomobook/mpec-ch/ex1c.py @@ -6,14 +6,16 @@ model = pyo.ConcreteModel() -model.x = pyo.Var( range(1,n+1) ) +model.x = pyo.Var(range(1, n + 1)) + +model.f = pyo.Objective(expr=sum(i * (model.x[i] - 1) ** 2 for i in range(1, n + 1))) -model.f = pyo.Objective(expr=sum(i*(model.x[i]-1)**2 - for i in range(1,n+1)) ) def compl_(model): yield complements(model.x[1] >= 0, model.x[2] >= 0) yield complements(model.x[2] >= 0, model.x[3] >= 0) yield complements(model.x[3] >= 0, model.x[4] >= 0) yield complements(model.x[4] >= 0, model.x[5] >= 0) -model.compl = ComplementarityList( rule=compl_ ) + + +model.compl = ComplementarityList(rule=compl_) diff --git a/examples/pyomobook/mpec-ch/ex1d.py b/examples/pyomobook/mpec-ch/ex1d.py index 7607513e18f..fa5247ff831 100644 --- a/examples/pyomobook/mpec-ch/ex1d.py +++ b/examples/pyomobook/mpec-ch/ex1d.py @@ -6,13 +6,15 @@ model = pyo.ConcreteModel() -model.x = pyo.Var( range(1,n+1) ) +model.x = pyo.Var(range(1, n + 1)) + +model.f = pyo.Objective(expr=sum(i * (model.x[i] - 1) ** 2 for i in range(1, n + 1))) -model.f = pyo.Objective(expr=sum(i*(model.x[i]-1)**2 - for i in range(1,n+1)) ) def compl_(model, i): if i == n: return Complementarity.Skip - return complements(model.x[i] >= 0, model.x[i+1] >= 0) -model.compl = Complementarity( range(1,n+1), rule=compl_ ) + return complements(model.x[i] >= 0, model.x[i + 1] >= 0) + + +model.compl = Complementarity(range(1, n + 1), rule=compl_) diff --git a/examples/pyomobook/mpec-ch/ex1e.py b/examples/pyomobook/mpec-ch/ex1e.py index 9d3ffcc5487..bf714411396 100644 --- a/examples/pyomobook/mpec-ch/ex1e.py +++ b/examples/pyomobook/mpec-ch/ex1e.py @@ -6,11 +6,10 @@ model = pyo.ConcreteModel() -model.x = pyo.Var( range(1,n+1) ) +model.x = pyo.Var(range(1, n + 1)) -model.f = pyo.Objective(expr=sum(i*(model.x[i]-1)**2 - for i in range(1,n+1)) ) +model.f = pyo.Objective(expr=sum(i * (model.x[i] - 1) ** 2 for i in range(1, n + 1))) model.compl = ComplementarityList( - rule=(complements(model.x[i] >= 0, model.x[i+1] >= 0) - for i in range(1,n)) ) + rule=(complements(model.x[i] >= 0, model.x[i + 1] >= 0) for i in range(1, n)) +) diff --git a/examples/pyomobook/mpec-ch/ex2.py b/examples/pyomobook/mpec-ch/ex2.py index 93919398b2c..c192ccc7a34 100644 --- a/examples/pyomobook/mpec-ch/ex2.py +++ b/examples/pyomobook/mpec-ch/ex2.py @@ -8,11 +8,9 @@ model.x = pyo.Var(within=pyo.NonNegativeReals) model.y = pyo.Var(within=pyo.NonNegativeReals) -model.f1 = pyo.Objective(expr=2*model.x - model.y) +model.f1 = pyo.Objective(expr=2 * model.x - model.y) -model.compl = Complementarity( - expr=complements(0 <= model.y, - model.y >= model.x)) +model.compl = Complementarity(expr=complements(0 <= model.y, model.y >= model.x)) # @transform: xfrm = pyo.TransformationFactory("mpec.simple_nonlinear") diff --git a/examples/pyomobook/mpec-ch/munson1.py b/examples/pyomobook/mpec-ch/munson1.py index 3287a8269c1..c7d171eb416 100644 --- a/examples/pyomobook/mpec-ch/munson1.py +++ b/examples/pyomobook/mpec-ch/munson1.py @@ -9,14 +9,10 @@ model.x2 = pyo.Var() model.x3 = pyo.Var() -model.f1 = Complementarity(expr=complements( - model.x1 >= 0, - model.x1 + 2*model.x2 + 3*model.x3 >= 1)) +model.f1 = Complementarity( + expr=complements(model.x1 >= 0, model.x1 + 2 * model.x2 + 3 * model.x3 >= 1) +) -model.f2 = Complementarity(expr=complements( - model.x2 >= 0, - model.x2 - model.x3 >= -1)) +model.f2 = Complementarity(expr=complements(model.x2 >= 0, model.x2 - model.x3 >= -1)) -model.f3 = Complementarity(expr=complements( - model.x3 >= 0, - model.x1 + model.x2 >= -1)) +model.f3 = Complementarity(expr=complements(model.x3 >= 0, model.x1 + model.x2 >= -1)) diff --git a/examples/pyomobook/mpec-ch/ralph1.py b/examples/pyomobook/mpec-ch/ralph1.py index 121b08b5d9c..1d44a303b84 100644 --- a/examples/pyomobook/mpec-ch/ralph1.py +++ b/examples/pyomobook/mpec-ch/ralph1.py @@ -4,11 +4,9 @@ model = pyo.ConcreteModel() -model.x = pyo.Var( within=pyo.NonNegativeReals ) -model.y = pyo.Var( within=pyo.NonNegativeReals ) +model.x = pyo.Var(within=pyo.NonNegativeReals) +model.y = pyo.Var(within=pyo.NonNegativeReals) -model.f1 = pyo.Objective( expr=2*model.x - model.y ) +model.f1 = pyo.Objective(expr=2 * model.x - model.y) -model.compl = Complementarity( - expr=complements(0 <= model.y, - model.y >= model.x) ) +model.compl = Complementarity(expr=complements(0 <= model.y, model.y >= model.x)) diff --git a/examples/pyomobook/nonlinear-ch/deer/DeerProblem.py b/examples/pyomobook/nonlinear-ch/deer/DeerProblem.py index 6841e3b81b5..c076a7f4687 100644 --- a/examples/pyomobook/nonlinear-ch/deer/DeerProblem.py +++ b/examples/pyomobook/nonlinear-ch/deer/DeerProblem.py @@ -3,24 +3,24 @@ model = pyo.AbstractModel() -model.p1 = pyo.Param(); -model.p2 = pyo.Param(); -model.p3 = pyo.Param(); -model.p4 = pyo.Param(); -model.p5 = pyo.Param(); -model.p6 = pyo.Param(); -model.p7 = pyo.Param(); -model.p8 = pyo.Param(); -model.p9 = pyo.Param(); -model.ps = pyo.Param(); - -model.f = pyo.Var(initialize = 20, within=pyo.PositiveReals) -model.d = pyo.Var(initialize = 20, within=pyo.PositiveReals) -model.b = pyo.Var(initialize = 20, within=pyo.PositiveReals) - -model.hf = pyo.Var(initialize = 20, within=pyo.PositiveReals) -model.hd = pyo.Var(initialize = 20, within=pyo.PositiveReals) -model.hb = pyo.Var(initialize = 20, within=pyo.PositiveReals) +model.p1 = pyo.Param() +model.p2 = pyo.Param() +model.p3 = pyo.Param() +model.p4 = pyo.Param() +model.p5 = pyo.Param() +model.p6 = pyo.Param() +model.p7 = pyo.Param() +model.p8 = pyo.Param() +model.p9 = pyo.Param() +model.ps = pyo.Param() + +model.f = pyo.Var(initialize=20, within=pyo.PositiveReals) +model.d = pyo.Var(initialize=20, within=pyo.PositiveReals) +model.b = pyo.Var(initialize=20, within=pyo.PositiveReals) + +model.hf = pyo.Var(initialize=20, within=pyo.PositiveReals) +model.hd = pyo.Var(initialize=20, within=pyo.PositiveReals) +model.hb = pyo.Var(initialize=20, within=pyo.PositiveReals) model.br = pyo.Var(initialize=1.5, within=pyo.PositiveReals) @@ -28,35 +28,58 @@ def obj_rule(m): - return 10*m.hb + m.hd + m.hf + return 10 * m.hb + m.hd + m.hf + + model.obj = pyo.Objective(rule=obj_rule, sense=pyo.maximize) + def f_bal_rule(m): - return m.f == m.p1*m.br*(m.p2/10.0*m.f + m.p3*m.d) - m.hf + return m.f == m.p1 * m.br * (m.p2 / 10.0 * m.f + m.p3 * m.d) - m.hf + + model.f_bal = pyo.Constraint(rule=f_bal_rule) + def d_bal_rule(m): - return m.d == m.p4*m.d + m.p5/2.0*m.f - m.hd + return m.d == m.p4 * m.d + m.p5 / 2.0 * m.f - m.hd + + model.d_bal = pyo.Constraint(rule=d_bal_rule) + def b_bal_rule(m): - return m.b == m.p6*m.b + m.p5/2.0*m.f - m.hb + return m.b == m.p6 * m.b + m.p5 / 2.0 * m.f - m.hb + + model.b_bal = pyo.Constraint(rule=b_bal_rule) + def food_cons_rule(m): - return m.c == m.p7*m.b + m.p8*m.d + m.p9*m.f + return m.c == m.p7 * m.b + m.p8 * m.d + m.p9 * m.f + + model.food_cons = pyo.Constraint(rule=food_cons_rule) + def supply_rule(m): return m.c <= m.ps + + model.supply = pyo.Constraint(rule=supply_rule) + def birth_rule(m): - return m.br == 1.1 + 0.8*(m.ps - m.c)/m.ps + return m.br == 1.1 + 0.8 * (m.ps - m.c) / m.ps + + model.birth = pyo.Constraint(rule=birth_rule) + def minbuck_rule(m): - return m.b >= 1.0/5.0*(0.4*m.f + m.d) + return m.b >= 1.0 / 5.0 * (0.4 * m.f + m.d) + + model.minbuck = pyo.Constraint(rule=minbuck_rule) # create the ConcreteModel @@ -65,4 +88,3 @@ def minbuck_rule(m): pyo.assert_optimal_termination(status) instance.pprint() - diff --git a/examples/pyomobook/nonlinear-ch/disease_est/disease_estimation.py b/examples/pyomobook/nonlinear-ch/disease_est/disease_estimation.py index 6ea87cc069b..4eb859dc349 100644 --- a/examples/pyomobook/nonlinear-ch/disease_est/disease_estimation.py +++ b/examples/pyomobook/nonlinear-ch/disease_est/disease_estimation.py @@ -8,31 +8,46 @@ model.P_REP_CASES = pyo.Param(model.S_SI) model.P_POP = pyo.Param() -model.I = pyo.Var(model.S_SI, bounds=(0,model.P_POP), initialize=1) -model.S = pyo.Var(model.S_SI, bounds=(0,model.P_POP), initialize=300) +model.I = pyo.Var(model.S_SI, bounds=(0, model.P_POP), initialize=1) +model.S = pyo.Var(model.S_SI, bounds=(0, model.P_POP), initialize=300) model.beta = pyo.Var(bounds=(0.05, 70)) model.alpha = pyo.Var(bounds=(0.5, 1.5)) model.eps_I = pyo.Var(model.S_SI, initialize=0.0) + def _objective(model): - return sum((model.eps_I[i])**2 for i in model.S_SI) + return sum((model.eps_I[i]) ** 2 for i in model.S_SI) + + model.objective = pyo.Objective(rule=_objective, sense=pyo.minimize) + def _InfDynamics(model, i): if i != 1: - return model.I[i] == (model.beta * model.S[i-1] * model.I[i-1]**model.alpha)/model.P_POP + return ( + model.I[i] + == (model.beta * model.S[i - 1] * model.I[i - 1] ** model.alpha) + / model.P_POP + ) return pyo.Constraint.Skip + model.InfDynamics = pyo.Constraint(model.S_SI, rule=_InfDynamics) + def _SusDynamics(model, i): if i != 1: - return model.S[i] == model.S[i-1] - model.I[i] + return model.S[i] == model.S[i - 1] - model.I[i] return pyo.Constraint.Skip + + model.SusDynamics = pyo.Constraint(model.S_SI, rule=_SusDynamics) + def _Data(model, i): - return model.P_REP_CASES[i] == model.I[i]+model.eps_I[i] + return model.P_REP_CASES[i] == model.I[i] + model.eps_I[i] + + model.Data = pyo.Constraint(model.S_SI, rule=_Data) # create the ConcreteModel diff --git a/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init1.py b/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init1.py index 896828ed785..c435cafc3d5 100644 --- a/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init1.py +++ b/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init1.py @@ -3,11 +3,14 @@ from math import pi model = pyo.ConcreteModel() -model.x = pyo.Var(initialize = 0.25, bounds=(0,4)) -model.y = pyo.Var(initialize = 0.25, bounds=(0,4)) +model.x = pyo.Var(initialize=0.25, bounds=(0, 4)) +model.y = pyo.Var(initialize=0.25, bounds=(0, 4)) + def multimodal(m): - return (2-pyo.cos(pi*m.x)-pyo.cos(pi*m.y)) * (m.x**2) * (m.y**2) + return (2 - pyo.cos(pi * m.x) - pyo.cos(pi * m.y)) * (m.x**2) * (m.y**2) + + model.obj = pyo.Objective(rule=multimodal, sense=pyo.minimize) status = pyo.SolverFactory('ipopt').solve(model) diff --git a/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init2.py b/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init2.py index f3fecdc22b2..aa0dbae1e66 100644 --- a/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init2.py +++ b/examples/pyomobook/nonlinear-ch/multimodal/multimodal_init2.py @@ -3,12 +3,15 @@ model = pyo.ConcreteModel() # @init: -model.x = pyo.Var(initialize = 2.1, bounds=(0,4)) -model.y = pyo.Var(initialize = 2.1, bounds=(0,4)) +model.x = pyo.Var(initialize=2.1, bounds=(0, 4)) +model.y = pyo.Var(initialize=2.1, bounds=(0, 4)) # @:init + def multimodal(m): - return (2-pyo.cos(pi*m.x)-pyo.cos(pi*m.y)) * (m.x**2) * (m.y**2) + return (2 - pyo.cos(pi * m.x) - pyo.cos(pi * m.y)) * (m.x**2) * (m.y**2) + + model.obj = pyo.Objective(rule=multimodal, sense=pyo.minimize) status = pyo.SolverFactory('ipopt').solve(model) diff --git a/examples/pyomobook/nonlinear-ch/react_design/ReactorDesign.py b/examples/pyomobook/nonlinear-ch/react_design/ReactorDesign.py index 6b3eef5e519..814b4a5938e 100644 --- a/examples/pyomobook/nonlinear-ch/react_design/ReactorDesign.py +++ b/examples/pyomobook/nonlinear-ch/react_design/ReactorDesign.py @@ -1,6 +1,7 @@ import pyomo.environ import pyomo.environ as pyo + def create_model(k1, k2, k3, caf): # create the concrete model model = pyo.ConcreteModel() @@ -13,30 +14,38 @@ def create_model(k1, k2, k3, caf): model.cd = pyo.Var(initialize=1000.0, within=pyo.PositiveReals) # create the objective - model.obj = pyo.Objective(expr = model.cb, sense=pyo.maximize) + model.obj = pyo.Objective(expr=model.cb, sense=pyo.maximize) # create the constraints - model.ca_bal = pyo.Constraint(expr = (0 == model.sv * caf \ - - model.sv * model.ca - k1 * model.ca \ - - 2.0 * k3 * model.ca ** 2.0)) - - model.cb_bal = pyo.Constraint(expr=(0 == -model.sv * model.cb \ - + k1 * model.ca - k2 * model.cb)) - - model.cc_bal = pyo.Constraint(expr=(0 == -model.sv * model.cc \ - + k2 * model.cb)) - - model.cd_bal = pyo.Constraint(expr=(0 == -model.sv * model.cd \ - + k3 * model.ca ** 2.0)) + model.ca_bal = pyo.Constraint( + expr=( + 0 + == model.sv * caf + - model.sv * model.ca + - k1 * model.ca + - 2.0 * k3 * model.ca**2.0 + ) + ) + + model.cb_bal = pyo.Constraint( + expr=(0 == -model.sv * model.cb + k1 * model.ca - k2 * model.cb) + ) + + model.cc_bal = pyo.Constraint(expr=(0 == -model.sv * model.cc + k2 * model.cb)) + + model.cd_bal = pyo.Constraint( + expr=(0 == -model.sv * model.cd + k3 * model.ca**2.0) + ) return model -if __name__ =='__main__': + +if __name__ == '__main__': # solve a single instance of the problem - k1 = 5.0/6.0 # min^-1 - k2 = 5.0/3.0 # min^-1 - k3 = 1.0/6000.0 # m^3/(gmol min) - caf = 10000.0 # gmol/m^3 + k1 = 5.0 / 6.0 # min^-1 + k2 = 5.0 / 3.0 # min^-1 + k3 = 1.0 / 6000.0 # m^3/(gmol min) + caf = 10000.0 # gmol/m^3 m = create_model(k1, k2, k3, caf) status = pyo.SolverFactory('ipopt').solve(m) diff --git a/examples/pyomobook/nonlinear-ch/react_design/ReactorDesignTable.py b/examples/pyomobook/nonlinear-ch/react_design/ReactorDesignTable.py index 4c0d84754a8..a242c85fbc2 100644 --- a/examples/pyomobook/nonlinear-ch/react_design/ReactorDesignTable.py +++ b/examples/pyomobook/nonlinear-ch/react_design/ReactorDesignTable.py @@ -2,21 +2,20 @@ from ReactorDesign import create_model # set the data (native Python data) -k1 = 5.0/6.0 # min^-1 -k2 = 5.0/3.0 # min^-1 -k3 = 1.0/6000.0 # m^3/(gmol min) +k1 = 5.0 / 6.0 # min^-1 +k2 = 5.0 / 3.0 # min^-1 +k3 = 1.0 / 6000.0 # m^3/(gmol min) # solve the model for different values of caf and report results print('{:>10s}\t{:>10s}\t{:>10s}'.format('CAf', 'SV', 'CB')) -for cafi in range(1,11): - caf = cafi*1000.0 # gmol/m^3 +for cafi in range(1, 11): + caf = cafi * 1000.0 # gmol/m^3 # create the model with the new data # note, we could do this more efficiently with # mutable parameters m = create_model(k1, k2, k3, caf) - + # solve the problem status = pyo.SolverFactory('ipopt').solve(m) - print("{:10g}\t{:10g}\t{:10g}".\ - format(caf, pyo.value(m.sv), pyo.value(m.cb))) + print("{:10g}\t{:10g}\t{:10g}".format(caf, pyo.value(m.sv), pyo.value(m.cb))) diff --git a/examples/pyomobook/nonlinear-ch/rosen/rosenbrock.py b/examples/pyomobook/nonlinear-ch/rosen/rosenbrock.py index 4724f1be191..e1633e2df69 100644 --- a/examples/pyomobook/nonlinear-ch/rosen/rosenbrock.py +++ b/examples/pyomobook/nonlinear-ch/rosen/rosenbrock.py @@ -6,9 +6,11 @@ model.x = pyo.Var(initialize=1.5) model.y = pyo.Var(initialize=1.5) + def rosenbrock(model): - return (1.0 - model.x)**2 \ - + 100.0*(model.y - model.x**2)**2 + return (1.0 - model.x) ** 2 + 100.0 * (model.y - model.x**2) ** 2 + + model.obj = pyo.Objective(rule=rosenbrock, sense=pyo.minimize) status = pyo.SolverFactory('ipopt').solve(model) diff --git a/examples/pyomobook/optimization-ch/ConcHLinScript.py b/examples/pyomobook/optimization-ch/ConcHLinScript.py index 7095069ed48..8481a83afbf 100644 --- a/examples/pyomobook/optimization-ch/ConcHLinScript.py +++ b/examples/pyomobook/optimization-ch/ConcHLinScript.py @@ -1,24 +1,25 @@ # ConcHLinScript.py - Linear (H) as a script import pyomo.environ as pyo + def IC_model_linear(A, h, d, c, b, u): model = pyo.ConcreteModel(name="Linear (H)") def x_bounds(m, i): - return (0,u[i]) + return (0, u[i]) model.x = pyo.Var(A, bounds=x_bounds) def obj_rule(model): - return sum(h[i]*(1 - u[i]/d[i]**2) * model.x[i] for i in A) + return sum(h[i] * (1 - u[i] / d[i] ** 2) * model.x[i] for i in A) model.z = pyo.Objective(rule=obj_rule, sense=pyo.maximize) - model.budgetconstr = \ - pyo.Constraint(expr = sum(c[i] * model.x[i] for i in A) <= b) + model.budgetconstr = pyo.Constraint(expr=sum(c[i] * model.x[i] for i in A) <= b) return model + # Main script # @main: @@ -32,7 +33,7 @@ def obj_rule(model): model = IC_model_linear(A, h, d, c, b, u) opt = pyo.SolverFactory('glpk') -results = opt.solve(model) # solves and updates model +results = opt.solve(model) # solves and updates model pyo.assert_optimal_termination(results) model.display() diff --git a/examples/pyomobook/optimization-ch/ConcreteH.py b/examples/pyomobook/optimization-ch/ConcreteH.py index dfb08816d98..1bf2a9446c1 100644 --- a/examples/pyomobook/optimization-ch/ConcreteH.py +++ b/examples/pyomobook/optimization-ch/ConcreteH.py @@ -3,23 +3,25 @@ # @fct: import pyomo.environ as pyo -def IC_model(A, h, d, c, b, u): - model = pyo.ConcreteModel(name = "(H)") +def IC_model(A, h, d, c, b, u): + model = pyo.ConcreteModel(name="(H)") def x_bounds(m, i): - return (0,u[i]) + return (0, u[i]) + model.x = pyo.Var(A, bounds=x_bounds) def z_rule(model): - return sum(h[i] * (model.x[i] - (model.x[i]/d[i])**2) - for i in A) + return sum(h[i] * (model.x[i] - (model.x[i] / d[i]) ** 2) for i in A) + model.z = pyo.Objective(rule=z_rule, sense=pyo.maximize) - model.budgetconstr = pyo.Constraint(\ - expr = sum(c[i]*model.x[i] for i in A) <= b) - + model.budgetconstr = pyo.Constraint(expr=sum(c[i] * model.x[i] for i in A) <= b) + return model + + # @:fct if __name__ == "__main__": diff --git a/examples/pyomobook/optimization-ch/ConcreteHLinear.py b/examples/pyomobook/optimization-ch/ConcreteHLinear.py index 8a4210ded07..0b42d5e2187 100644 --- a/examples/pyomobook/optimization-ch/ConcreteHLinear.py +++ b/examples/pyomobook/optimization-ch/ConcreteHLinear.py @@ -10,18 +10,22 @@ b = 12 u = {'I_C_Scoops': 100, 'Peanuts': 40.6} + def x_bounds(m, i): - return (0,u[i]) + return (0, u[i]) + + model.x = pyo.Var(A, bounds=x_bounds) + # @obj: def obj_rule(model): - return sum(h[i]*(1 - u[i]/d[i]**2) * model.x[i] \ - for i in A) + return sum(h[i] * (1 - u[i] / d[i] ** 2) * model.x[i] for i in A) + + # @:obj model.z = pyo.Objective(rule=obj_rule, sense=pyo.maximize) -model.budgetconstr = \ - pyo.Constraint(expr = sum(c[i]*model.x[i] for i in A) <= b) +model.budgetconstr = pyo.Constraint(expr=sum(c[i] * model.x[i] for i in A) <= b) model.pprint() diff --git a/examples/pyomobook/optimization-ch/IC_model_dict.py b/examples/pyomobook/optimization-ch/IC_model_dict.py index 17e5d2293e0..4c54ef83701 100644 --- a/examples/pyomobook/optimization-ch/IC_model_dict.py +++ b/examples/pyomobook/optimization-ch/IC_model_dict.py @@ -3,11 +3,12 @@ # @fct: import pyomo.environ as pyo + def IC_model_dict(ICD): # ICD is a dictionary with the data for the problem - model = pyo.ConcreteModel(name = "(H)") - + model = pyo.ConcreteModel(name="(H)") + model.A = pyo.Set(initialize=ICD["A"]) model.h = pyo.Param(model.A, initialize=ICD["h"]) @@ -18,20 +19,24 @@ def IC_model_dict(ICD): def xbounds_rule(model, i): return (0, model.u[i]) + model.x = pyo.Var(model.A, bounds=xbounds_rule) def obj_rule(model): - return sum(model.h[i] * \ - (model.x[i] - (model.x[i]/model.d[i])**2)\ - for i in model.A) - model.z = pyo.Objective(rule=obj_rule,sense=pyo.maximize) + return sum( + model.h[i] * (model.x[i] - (model.x[i] / model.d[i]) ** 2) for i in model.A + ) + + model.z = pyo.Objective(rule=obj_rule, sense=pyo.maximize) def budget_rule(model): - return sum(model.c[i]*model.x[i]\ - for i in model.A) <= model.b + return sum(model.c[i] * model.x[i] for i in model.A) <= model.b + model.budgetconstr = pyo.Constraint(rule=budget_rule) return model + + # @:fct if __name__ == "__main__": diff --git a/examples/pyomobook/overview-ch/pyomo.wl_abstract.txt b/examples/pyomobook/overview-ch/pyomo.wl_abstract.txt index 7a2fcb43646..d5d977f3d3e 100644 --- a/examples/pyomobook/overview-ch/pyomo.wl_abstract.txt +++ b/examples/pyomobook/overview-ch/pyomo.wl_abstract.txt @@ -22,9 +22,9 @@ Problem: Lower bound: 2745.0 Upper bound: 2745.0 Number of objectives: 1 - Number of constraints: 18 - Number of variables: 16 - Number of nonzeros: 40 + Number of constraints: 17 + Number of variables: 15 + Number of nonzeros: 39 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/examples/pyomobook/overview-ch/var_obj_con_snippet.py b/examples/pyomobook/overview-ch/var_obj_con_snippet.py index 78cc0461c98..49bb7c1276b 100644 --- a/examples/pyomobook/overview-ch/var_obj_con_snippet.py +++ b/examples/pyomobook/overview-ch/var_obj_con_snippet.py @@ -3,7 +3,7 @@ model = pyo.ConcreteModel() # @body: model.x = pyo.Var() -model.y = pyo.Var(bounds=(-2,4)) +model.y = pyo.Var(bounds=(-2, 4)) model.z = pyo.Var(initialize=1.0, within=pyo.NonNegativeReals) model.obj = pyo.Objective(expr=model.x**2 + model.y + model.z) diff --git a/examples/pyomobook/overview-ch/wl_abstract.py b/examples/pyomobook/overview-ch/wl_abstract.py index 40b1729d696..f35a5327bfb 100644 --- a/examples/pyomobook/overview-ch/wl_abstract.py +++ b/examples/pyomobook/overview-ch/wl_abstract.py @@ -7,28 +7,40 @@ model.M = pyo.Set() # @:setdecl # @paramdecl: -model.d = pyo.Param(model.N,model.M) +model.d = pyo.Param(model.N, model.M) model.P = pyo.Param() # @:paramdecl # @vardecl: -model.x = pyo.Var(model.N, model.M, bounds=(0,1)) +model.x = pyo.Var(model.N, model.M, bounds=(0, 1)) model.y = pyo.Var(model.N, within=pyo.Binary) # @:vardecl + def obj_rule(model): - return sum(model.d[n,m]*model.x[n,m] for n in model.N for m in model.M) + return sum(model.d[n, m] * model.x[n, m] for n in model.N for m in model.M) + + model.obj = pyo.Objective(rule=obj_rule) + # @deliver: def one_per_cust_rule(model, m): - return sum(model.x[n,m] for n in model.N) == 1 + return sum(model.x[n, m] for n in model.N) == 1 + + model.one_per_cust = pyo.Constraint(model.M, rule=one_per_cust_rule) # @:deliver + def warehouse_active_rule(model, n, m): - return model.x[n,m] <= model.y[n] + return model.x[n, m] <= model.y[n] + + model.warehouse_active = pyo.Constraint(model.N, model.M, rule=warehouse_active_rule) + def num_warehouses_rule(model): return sum(model.y[n] for n in model.N) <= model.P + + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) diff --git a/examples/pyomobook/overview-ch/wl_abstract_script.py b/examples/pyomobook/overview-ch/wl_abstract_script.py index b070e01820b..0b042405714 100644 --- a/examples/pyomobook/overview-ch/wl_abstract_script.py +++ b/examples/pyomobook/overview-ch/wl_abstract_script.py @@ -6,26 +6,38 @@ model.N = pyo.Set() model.M = pyo.Set() -model.d = pyo.Param(model.N,model.M) +model.d = pyo.Param(model.N, model.M) model.P = pyo.Param() -model.x = pyo.Var(model.N, model.M, bounds=(0,1)) +model.x = pyo.Var(model.N, model.M, bounds=(0, 1)) model.y = pyo.Var(model.N, within=pyo.Binary) + def obj_rule(model): - return sum(model.d[n,m]*model.x[n,m] for n in model.N for m in model.M) + return sum(model.d[n, m] * model.x[n, m] for n in model.N for m in model.M) + + model.obj = pyo.Objective(rule=obj_rule) + def one_per_cust_rule(model, m): - return sum(model.x[n,m] for n in model.N) == 1 + return sum(model.x[n, m] for n in model.N) == 1 + + model.one_per_cust = pyo.Constraint(model.M, rule=one_per_cust_rule) + def warehouse_active_rule(model, n, m): - return model.x[n,m] <= model.y[n] + return model.x[n, m] <= model.y[n] + + model.warehouse_active = pyo.Constraint(model.N, model.M, rule=warehouse_active_rule) + def num_warehouses_rule(model): return sum(model.y[n] for n in model.N) <= model.P + + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) # @abstractsolve: diff --git a/examples/pyomobook/overview-ch/wl_concrete.py b/examples/pyomobook/overview-ch/wl_concrete.py index de3e76cd15c..29316304f0a 100644 --- a/examples/pyomobook/overview-ch/wl_concrete.py +++ b/examples/pyomobook/overview-ch/wl_concrete.py @@ -2,28 +2,33 @@ # ConcreteModel version of warehouse location problem import pyomo.environ as pyo + def create_warehouse_model(N, M, d, P): model = pyo.ConcreteModel(name="(WL)") - model.x = pyo.Var(N, M, bounds=(0,1)) + model.x = pyo.Var(N, M, bounds=(0, 1)) model.y = pyo.Var(N, within=pyo.Binary) def obj_rule(mdl): - return sum(d[n,m]*mdl.x[n,m] for n in N for m in M) + return sum(d[n, m] * mdl.x[n, m] for n in N for m in M) + model.obj = pyo.Objective(rule=obj_rule) -# @deliver: + # @deliver: def demand_rule(mdl, m): - return sum(mdl.x[n,m] for n in N) == 1 + return sum(mdl.x[n, m] for n in N) == 1 + model.demand = pyo.Constraint(M, rule=demand_rule) -# @:deliver + # @:deliver def warehouse_active_rule(mdl, n, m): - return mdl.x[n,m] <= mdl.y[n] + return mdl.x[n, m] <= mdl.y[n] + model.warehouse_active = pyo.Constraint(N, M, rule=warehouse_active_rule) def num_warehouses_rule(mdl): return sum(mdl.y[n] for n in N) <= P + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) return model diff --git a/examples/pyomobook/overview-ch/wl_concrete_script.py b/examples/pyomobook/overview-ch/wl_concrete_script.py index 8bf8b8e0669..278937f5aed 100644 --- a/examples/pyomobook/overview-ch/wl_concrete_script.py +++ b/examples/pyomobook/overview-ch/wl_concrete_script.py @@ -1,8 +1,8 @@ -# wl_concrete_script.py +# wl_concrete_script.py # Solve an instance of the warehouse location problem # Import Pyomo environment and model -import pyomo.environ as pyo +import pyomo.environ as pyo from wl_concrete import create_warehouse_model # Establish the data for this model (this could also be @@ -11,18 +11,20 @@ N = ['Harlingen', 'Memphis', 'Ashland'] M = ['NYC', 'LA', 'Chicago', 'Houston'] -d = {('Harlingen', 'NYC'): 1956, \ - ('Harlingen', 'LA'): 1606, \ - ('Harlingen', 'Chicago'): 1410, \ - ('Harlingen', 'Houston'): 330, \ - ('Memphis', 'NYC'): 1096, \ - ('Memphis', 'LA'): 1792, \ - ('Memphis', 'Chicago'): 531, \ - ('Memphis', 'Houston'): 567, \ - ('Ashland', 'NYC'): 485, \ - ('Ashland', 'LA'): 2322, \ - ('Ashland', 'Chicago'): 324, \ - ('Ashland', 'Houston'): 1236 } +d = { + ('Harlingen', 'NYC'): 1956, + ('Harlingen', 'LA'): 1606, + ('Harlingen', 'Chicago'): 1410, + ('Harlingen', 'Houston'): 330, + ('Memphis', 'NYC'): 1096, + ('Memphis', 'LA'): 1792, + ('Memphis', 'Chicago'): 531, + ('Memphis', 'Houston'): 567, + ('Ashland', 'NYC'): 485, + ('Ashland', 'LA'): 2322, + ('Ashland', 'Chicago'): 324, + ('Ashland', 'Houston'): 1236, +} P = 2 # Create the Pyomo model @@ -34,5 +36,5 @@ pyo.assert_optimal_termination(res) # @output: -model.y.pprint() # Print the optimal warehouse locations +model.y.pprint() # Print the optimal warehouse locations # @:output diff --git a/examples/pyomobook/overview-ch/wl_excel.py b/examples/pyomobook/overview-ch/wl_excel.py index 6183eb987c0..1c4ad997225 100644 --- a/examples/pyomobook/overview-ch/wl_excel.py +++ b/examples/pyomobook/overview-ch/wl_excel.py @@ -8,7 +8,7 @@ N = list(df.index.map(str)) M = list(df.columns.map(str)) -d = {(r, c):df.at[r,c] for r in N for c in M} +d = {(r, c): df.at[r, c] for r in N for c in M} P = 2 # create the Pyomo model @@ -19,5 +19,5 @@ solver.solve(model) # @output: -model.y.pprint() # print the optimal warehouse locations +model.y.pprint() # print the optimal warehouse locations # @:output diff --git a/examples/pyomobook/overview-ch/wl_list.py b/examples/pyomobook/overview-ch/wl_list.py index a419429a0f5..64db76be548 100644 --- a/examples/pyomobook/overview-ch/wl_list.py +++ b/examples/pyomobook/overview-ch/wl_list.py @@ -7,37 +7,39 @@ N = ['Harlingen', 'Memphis', 'Ashland'] M = ['NYC', 'LA', 'Chicago', 'Houston'] # @:data -d = {('Harlingen', 'NYC'): 1956, \ - ('Harlingen', 'LA'): 1606, \ - ('Harlingen', 'Chicago'): 1410, \ - ('Harlingen', 'Houston'): 330, \ - ('Memphis', 'NYC'): 1096, \ - ('Memphis', 'LA'): 1792, \ - ('Memphis', 'Chicago'): 531, \ - ('Memphis', 'Houston'): 567, \ - ('Ashland', 'NYC'): 485, \ - ('Ashland', 'LA'): 2322, \ - ('Ashland', 'Chicago'): 324, \ - ('Ashland', 'Houston'): 1236 } +d = { + ('Harlingen', 'NYC'): 1956, + ('Harlingen', 'LA'): 1606, + ('Harlingen', 'Chicago'): 1410, + ('Harlingen', 'Houston'): 330, + ('Memphis', 'NYC'): 1096, + ('Memphis', 'LA'): 1792, + ('Memphis', 'Chicago'): 531, + ('Memphis', 'Houston'): 567, + ('Ashland', 'NYC'): 485, + ('Ashland', 'LA'): 2322, + ('Ashland', 'Chicago'): 324, + ('Ashland', 'Houston'): 1236, +} P = 2 # @vars: -model.x = pyo.Var(N, M, bounds=(0,1)) +model.x = pyo.Var(N, M, bounds=(0, 1)) model.y = pyo.Var(N, within=pyo.Binary) # @:vars # @obj: -model.obj = pyo.Objective(expr=sum(d[n,m]*model.x[n,m] for n in N for m in M)) +model.obj = pyo.Objective(expr=sum(d[n, m] * model.x[n, m] for n in N for m in M)) # @:obj # @conslist: model.demand = pyo.ConstraintList() for m in M: - model.demand.add(sum(model.x[n,m] for n in N) == 1) + model.demand.add(sum(model.x[n, m] for n in N) == 1) model.warehouse_active = pyo.ConstraintList() for n in N: for m in M: - model.warehouse_active.add(model.x[n,m] <= model.y[n]) + model.warehouse_active.add(model.x[n, m] <= model.y[n]) # @:conslist # @scalarcon: diff --git a/examples/pyomobook/overview-ch/wl_mutable.py b/examples/pyomobook/overview-ch/wl_mutable.py index 281b46b7ab5..e5c4f5e9dbb 100644 --- a/examples/pyomobook/overview-ch/wl_mutable.py +++ b/examples/pyomobook/overview-ch/wl_mutable.py @@ -1,29 +1,34 @@ # wl_mutable.py: warehouse location problem with mutable param import pyomo.environ as pyo + def create_warehouse_model(N, M, d, P): model = pyo.ConcreteModel(name="(WL)") - model.x = pyo.Var(N, M, bounds=(0,1)) + model.x = pyo.Var(N, M, bounds=(0, 1)) model.y = pyo.Var(N, within=pyo.Binary) model.P = pyo.Param(initialize=P, mutable=True) def obj_rule(mdl): - return sum(d[n,m]*mdl.x[n,m] for n in N for m in M) + return sum(d[n, m] * mdl.x[n, m] for n in N for m in M) + model.obj = pyo.Objective(rule=obj_rule) -# @deliver: + # @deliver: def demand_rule(mdl, m): - return sum(mdl.x[n,m] for n in N) == 1 + return sum(mdl.x[n, m] for n in N) == 1 + model.demand = pyo.Constraint(M, rule=demand_rule) -# @:deliver + # @:deliver def warehouse_active_rule(mdl, n, m): - return mdl.x[n,m] <= mdl.y[n] + return mdl.x[n, m] <= mdl.y[n] + model.warehouse_active = pyo.Constraint(N, M, rule=warehouse_active_rule) def num_warehouses_rule(mdl): return sum(mdl.y[n] for n in N) <= mdl.P + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) return model diff --git a/examples/pyomobook/overview-ch/wl_mutable_excel.py b/examples/pyomobook/overview-ch/wl_mutable_excel.py index a1a27a0a1f4..0906fbb25b3 100644 --- a/examples/pyomobook/overview-ch/wl_mutable_excel.py +++ b/examples/pyomobook/overview-ch/wl_mutable_excel.py @@ -8,7 +8,7 @@ N = list(df.index.map(str)) M = list(df.columns.map(str)) -d = {(r, c):df.at[r,c] for r in N for c in M} +d = {(r, c): df.at[r, c] for r in N for c in M} P = 2 # create the Pyomo model @@ -18,10 +18,8 @@ solver = pyo.SolverFactory('glpk') # loop over values for mutable parameter P -for n in range(1,10): +for n in range(1, 10): model.P = n res = solver.solve(model) pyo.assert_optimal_termination(res) - print('# warehouses:', n, \ - 'delivery cost:', pyo.value(model.obj)) - + print('# warehouses:', n, 'delivery cost:', pyo.value(model.obj)) diff --git a/examples/pyomobook/overview-ch/wl_scalar.py b/examples/pyomobook/overview-ch/wl_scalar.py index 11aa3c3f13b..ac10fbe8265 100644 --- a/examples/pyomobook/overview-ch/wl_scalar.py +++ b/examples/pyomobook/overview-ch/wl_scalar.py @@ -4,35 +4,41 @@ model = pyo.ConcreteModel() # @vars: -model.x_Harlingen_NYC = pyo.Var(bounds=(0,1)) -model.x_Harlingen_LA = pyo.Var(bounds=(0,1)) -model.x_Harlingen_Chicago = pyo.Var(bounds=(0,1)) -model.x_Harlingen_Houston = pyo.Var(bounds=(0,1)) -model.x_Memphis_NYC = pyo.Var(bounds=(0,1)) -model.x_Memphis_LA = pyo.Var(bounds=(0,1)) -#... +model.x_Harlingen_NYC = pyo.Var(bounds=(0, 1)) +model.x_Harlingen_LA = pyo.Var(bounds=(0, 1)) +model.x_Harlingen_Chicago = pyo.Var(bounds=(0, 1)) +model.x_Harlingen_Houston = pyo.Var(bounds=(0, 1)) +model.x_Memphis_NYC = pyo.Var(bounds=(0, 1)) +model.x_Memphis_LA = pyo.Var(bounds=(0, 1)) +# ... # @:vars -model.x_Memphis_Chicago = pyo.Var(bounds=(0,1)) -model.x_Memphis_Houston = pyo.Var(bounds=(0,1)) -model.x_Ashland_NYC = pyo.Var(bounds=(0,1)) -model.x_Ashland_LA = pyo.Var(bounds=(0,1)) -model.x_Ashland_Chicago = pyo.Var(bounds=(0,1)) -model.x_Ashland_Houston = pyo.Var(bounds=(0,1)) +model.x_Memphis_Chicago = pyo.Var(bounds=(0, 1)) +model.x_Memphis_Houston = pyo.Var(bounds=(0, 1)) +model.x_Ashland_NYC = pyo.Var(bounds=(0, 1)) +model.x_Ashland_LA = pyo.Var(bounds=(0, 1)) +model.x_Ashland_Chicago = pyo.Var(bounds=(0, 1)) +model.x_Ashland_Houston = pyo.Var(bounds=(0, 1)) model.y_Harlingen = pyo.Var(within=pyo.Binary) -model.y_Memphis= pyo.Var(within=pyo.Binary) -model.y_Ashland= pyo.Var(within=pyo.Binary) +model.y_Memphis = pyo.Var(within=pyo.Binary) +model.y_Ashland = pyo.Var(within=pyo.Binary) P = 2 # @cons: -model.one_warehouse_for_NYC = pyo.Constraint(expr=model.x_Harlingen_NYC + model.x_Memphis_NYC + model.x_Ashland_NYC == 1) +model.one_warehouse_for_NYC = pyo.Constraint( + expr=model.x_Harlingen_NYC + model.x_Memphis_NYC + model.x_Ashland_NYC == 1 +) -model.one_warehouse_for_LA = pyo.Constraint(expr=model.x_Harlingen_LA + model.x_Memphis_LA + model.x_Ashland_LA == 1) -#... +model.one_warehouse_for_LA = pyo.Constraint( + expr=model.x_Harlingen_LA + model.x_Memphis_LA + model.x_Ashland_LA == 1 +) +# ... # @:cons # @maxY: -model.maxY = pyo.Constraint(expr=model.y_Harlingen + model.y_Memphis + model.y_Ashland <= P) +model.maxY = pyo.Constraint( + expr=model.y_Harlingen + model.y_Memphis + model.y_Ashland <= P +) # @:maxY model.pprint() diff --git a/examples/pyomobook/performance-ch/SparseSets.py b/examples/pyomobook/performance-ch/SparseSets.py index 0586feb9b5f..90d097b53aa 100644 --- a/examples/pyomobook/performance-ch/SparseSets.py +++ b/examples/pyomobook/performance-ch/SparseSets.py @@ -6,8 +6,11 @@ model.K = pyo.Set() model.V = pyo.Set(model.K) + def kv_init(m): - return ((k,v) for k in m.K for v in m.V[k]) + return ((k, v) for k in m.K for v in m.V[k]) + + model.KV = pyo.Set(dimen=2, initialize=kv_init) model.a = pyo.Param(model.I, model.K) @@ -18,6 +21,9 @@ def kv_init(m): # include a constraint that looks like this: # x[i,k,v] <= a[i,k]*y[i], for i in I, k in K, v in V[k] -def c1Rule(m,i,k,v): - return m.x[i,k,v] <= m.a[i,k]*m.y[i] + +def c1Rule(m, i, k, v): + return m.x[i, k, v] <= m.a[i, k] * m.y[i] + + model.c1 = pyo.Constraint(model.I, model.KV, rule=c1Rule) diff --git a/examples/pyomobook/performance-ch/lin_expr.py b/examples/pyomobook/performance-ch/lin_expr.py index 728b2678a58..75f4e70ec2a 100644 --- a/examples/pyomobook/performance-ch/lin_expr.py +++ b/examples/pyomobook/performance-ch/lin_expr.py @@ -12,7 +12,7 @@ timer.tic() for i in range(N2): - e = sum(i*m.x[i] for i in range(N1)) + e = sum(i * m.x[i] for i in range(N1)) timer.toc('created expression with sum function') for i in range(N2): diff --git a/examples/pyomobook/performance-ch/persistent.py b/examples/pyomobook/performance-ch/persistent.py index 70808272866..98207909cb6 100644 --- a/examples/pyomobook/performance-ch/persistent.py +++ b/examples/pyomobook/performance-ch/persistent.py @@ -1,10 +1,11 @@ # @model: import pyomo.environ as pyo + m = pyo.ConcreteModel() m.x = pyo.Var() m.y = pyo.Var() m.obj = pyo.Objective(expr=m.x**2 + m.y**2) -m.c = pyo.Constraint(expr=m.y >= -2*m.x + 5) +m.c = pyo.Constraint(expr=m.y >= -2 * m.x + 5) # @:model # @creation: @@ -44,7 +45,7 @@ m = pyo.ConcreteModel() m.x = pyo.Var() m.y = pyo.Var() -m.c = pyo.Constraint(expr=m.y >= -2*m.x + 5) +m.c = pyo.Constraint(expr=m.y >= -2 * m.x + 5) opt = pyo.SolverFactory('gurobi_persistent') opt.set_instance(m) # WRONG: @@ -57,7 +58,7 @@ m = pyo.ConcreteModel() m.x = pyo.Var() m.y = pyo.Var() -m.c = pyo.Constraint(expr=m.y >= -2*m.x + 5) +m.c = pyo.Constraint(expr=m.y >= -2 * m.x + 5) opt = pyo.SolverFactory('gurobi_persistent') opt.set_instance(m) # Correct: @@ -72,7 +73,7 @@ m.x = pyo.Var() m.y = pyo.Var() m.obj = pyo.Objective(expr=m.x**2 + m.y**2) -m.c = pyo.Constraint(expr=m.y >= -2*m.x + 5) +m.c = pyo.Constraint(expr=m.y >= -2 * m.x + 5) opt = pyo.SolverFactory('gurobi_persistent') opt.set_instance(m) m.x.setlb(1.0) @@ -95,7 +96,7 @@ m.x = pyo.Var() m.y = pyo.Var() m.obj = pyo.Objective(expr=m.x**2 + m.y**2) -m.c = pyo.Constraint(expr=m.y >= -2*m.x + 5) +m.c = pyo.Constraint(expr=m.y >= -2 * m.x + 5) opt = pyo.SolverFactory('gurobi_persistent') opt.set_instance(m) results = opt.solve(save_results=False) diff --git a/examples/pyomobook/performance-ch/wl.py b/examples/pyomobook/performance-ch/wl.py index 8175661e97d..34c8a73f36e 100644 --- a/examples/pyomobook/performance-ch/wl.py +++ b/examples/pyomobook/performance-ch/wl.py @@ -1,6 +1,6 @@ # wl.py # define a script to demonstrate performance profiling and improvements # @imports: -import pyomo.environ as pyo # import pyomo environment +import pyomo.environ as pyo # import pyomo environment import cProfile import pstats import io @@ -9,52 +9,60 @@ from pyomo.core.expr.numeric_expr import LinearExpression import matplotlib.pyplot as plt import numpy as np + np.random.seed(0) # @:imports + # @model_func: def create_warehouse_model(num_locations=50, num_customers=50): N = list(range(num_locations)) # warehouse locations M = list(range(num_customers)) # customers - d = dict() # distances from warehouse locations to customers + d = dict() # distances from warehouse locations to customers for n in N: for m in M: d[n, m] = np.random.randint(low=1, high=100) max_num_warehouses = 2 model = pyo.ConcreteModel(name="(WL)") - model.P = pyo.Param(initialize=max_num_warehouses, - mutable=True) + model.P = pyo.Param(initialize=max_num_warehouses, mutable=True) model.x = pyo.Var(N, M, bounds=(0, 1)) model.y = pyo.Var(N, bounds=(0, 1)) def obj_rule(mdl): - return sum(d[n,m]*mdl.x[n,m] for n in N for m in M) + return sum(d[n, m] * mdl.x[n, m] for n in N for m in M) + model.obj = pyo.Objective(rule=obj_rule) def demand_rule(mdl, m): - return sum(mdl.x[n,m] for n in N) == 1 + return sum(mdl.x[n, m] for n in N) == 1 + model.demand = pyo.Constraint(M, rule=demand_rule) def warehouse_active_rule(mdl, n, m): - return mdl.x[n,m] <= mdl.y[n] + return mdl.x[n, m] <= mdl.y[n] + model.warehouse_active = pyo.Constraint(N, M, rule=warehouse_active_rule) def num_warehouses_rule(mdl): return sum(mdl.y[n] for n in N) <= model.P + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) return model + + # @:model_func + # @model_linear_expr: def create_warehouse_linear_expr(num_locations=50, num_customers=50): N = list(range(num_locations)) # warehouse locations M = list(range(num_customers)) # customers - d = dict() # distances from warehouse locations to customers + d = dict() # distances from warehouse locations to customers for n in N: for m in M: d[n, m] = np.random.randint(low=1, high=100) @@ -63,29 +71,38 @@ def create_warehouse_linear_expr(num_locations=50, num_customers=50): model = pyo.ConcreteModel(name="(WL)") model.P = pyo.Param(initialize=max_num_warehouses, mutable=True) - model.x = pyo.Var(N, M, bounds=(0,1)) + model.x = pyo.Var(N, M, bounds=(0, 1)) model.y = pyo.Var(N, bounds=(0, 1)) def obj_rule(mdl): - return sum(d[n,m]*mdl.x[n,m] for n in N for m in M) + return sum(d[n, m] * mdl.x[n, m] for n in N for m in M) + model.obj = pyo.Objective(rule=obj_rule) def demand_rule(mdl, m): - return sum(mdl.x[n,m] for n in N) == 1 + return sum(mdl.x[n, m] for n in N) == 1 + model.demand = pyo.Constraint(M, rule=demand_rule) def warehouse_active_rule(mdl, n, m): - expr = LinearExpression(constant=0, linear_coefs=[1, -1], linear_vars=[mdl.x[n,m], mdl.y[n]]) + expr = LinearExpression( + constant=0, linear_coefs=[1, -1], linear_vars=[mdl.x[n, m], mdl.y[n]] + ) return expr <= 0 + model.warehouse_active = pyo.Constraint(N, M, rule=warehouse_active_rule) def num_warehouses_rule(mdl): return sum(mdl.y[n] for n in N) <= model.P + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) return model + + # @:model_linear_expr + # @print_c_profiler: def print_c_profiler(pr, lines_to_print=15): s = io.StringIO() @@ -96,15 +113,21 @@ def print_c_profiler(pr, lines_to_print=15): stats = pstats.Stats(pr, stream=s).sort_stats('tottime') stats.print_stats(lines_to_print) print(s.getvalue()) + + # @:print_c_profiler + # @solve_warehouse_location: def solve_warehouse_location(m): opt = pyo.SolverFactory('gurobi') res = opt.solve(m) assert_optimal_termination(res) + + # @:solve_warehouse_location + # @solve_parametric: def solve_parametric(): m = create_warehouse_model(num_locations=50, num_customers=50) @@ -116,8 +139,11 @@ def solve_parametric(): res = opt.solve(m) assert_optimal_termination(res) obj_values.append(res.problem.lower_bound) + + # @:solve_parametric + # @parametric_persistent: def solve_parametric_persistent(): m = create_warehouse_model(num_locations=50, num_customers=50) @@ -132,6 +158,8 @@ def solve_parametric_persistent(): res = opt.solve(save_results=False) assert_optimal_termination(res) obj_values.append(res.problem.lower_bound) + + # @:parametric_persistent # @report_timing: @@ -178,9 +206,9 @@ def solve_parametric_persistent(): # @:time_parametric_persistent # @profile_parametric_persistent: -#pr = cProfile.Profile() -#pr.enable() -#solve_parametric_persistent() -#pr.disable() -#print_c_profiler(pr) +# pr = cProfile.Profile() +# pr.enable() +# solve_parametric_persistent() +# pr.disable() +# print_c_profiler(pr) # @:profile_parametric_persistent diff --git a/examples/pyomobook/performance-ch/wl_output.txt b/examples/pyomobook/performance-ch/wl_output.txt index efb1045b496..e8a3d8c49d4 100644 --- a/examples/pyomobook/performance-ch/wl_output.txt +++ b/examples/pyomobook/performance-ch/wl_output.txt @@ -8,17 +8,17 @@ Building model 0 seconds to construct Set OrderedSimpleSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total - 0.09 seconds to construct Var x; 40000 indicies total + 0.09 seconds to construct Var x; 40000 indices total 0 seconds to construct Set OrderedSimpleSet; 1 index total - 0 seconds to construct Var y; 200 indicies total + 0 seconds to construct Var y; 200 indices total 0.19 seconds to construct Objective obj; 1 index total 0 seconds to construct Set OrderedSimpleSet; 1 index total - 0.11 seconds to construct Constraint demand; 200 indicies total + 0.11 seconds to construct Constraint demand; 200 indices total 0 seconds to construct Set OrderedSimpleSet; 1 index total 0 seconds to construct Set OrderedSimpleSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total - 0.62 seconds to construct Constraint warehouse_active; 40000 indicies total + 0.62 seconds to construct Constraint warehouse_active; 40000 indices total 0 seconds to construct Constraint num_warehouses; 1 index total # @:report_timing @@ -32,17 +32,17 @@ Building model with LinearExpression 0 seconds to construct Set OrderedSimpleSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total - 0.12 seconds to construct Var x; 40000 indicies total + 0.12 seconds to construct Var x; 40000 indices total 0 seconds to construct Set OrderedSimpleSet; 1 index total - 0 seconds to construct Var y; 200 indicies total + 0 seconds to construct Var y; 200 indices total 0.17 seconds to construct Objective obj; 1 index total 0 seconds to construct Set OrderedSimpleSet; 1 index total - 0.11 seconds to construct Constraint demand; 200 indicies total + 0.11 seconds to construct Constraint demand; 200 indices total 0 seconds to construct Set OrderedSimpleSet; 1 index total 0 seconds to construct Set OrderedSimpleSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total 0 seconds to construct Set SetProduct_OrderedSet; 1 index total - 0.49 seconds to construct Constraint warehouse_active; 40000 indicies total + 0.49 seconds to construct Constraint warehouse_active; 40000 indices total 0 seconds to construct Constraint num_warehouses; 1 index total # @:report_timing_with_lin_expr diff --git a/examples/pyomobook/pyomo-components-ch/con_declaration.py b/examples/pyomobook/pyomo-components-ch/con_declaration.py index cb8e768b5cd..7775c1b26a0 100644 --- a/examples/pyomobook/pyomo-components-ch/con_declaration.py +++ b/examples/pyomobook/pyomo-components-ch/con_declaration.py @@ -3,8 +3,8 @@ model = pyo.ConcreteModel() # @decl1: -model.x = pyo.Var([1,2], initialize=1.0) -model.diff = pyo.Constraint(expr=model.x[2]-model.x[1] <= 7.5) +model.x = pyo.Var([1, 2], initialize=1.0) +model.diff = pyo.Constraint(expr=model.x[2] - model.x[1] <= 7.5) # @:decl1 model.pprint() @@ -12,9 +12,13 @@ model = pyo.ConcreteModel() # @decl2: -model.x = pyo.Var([1,2], initialize=1.0) +model.x = pyo.Var([1, 2], initialize=1.0) + + def diff_rule(model): return model.x[2] - model.x[1] <= 7.5 + + model.diff = pyo.Constraint(rule=diff_rule) # @:decl2 @@ -23,15 +27,18 @@ def diff_rule(model): model = pyo.ConcreteModel() # @decl3: -N = [1,2,3] +N = [1, 2, 3] -a = {1:1, 2:3.1, 3:4.5} -b = {1:1, 2:2.9, 3:3.1} +a = {1: 1, 2: 3.1, 3: 4.5} +b = {1: 1, 2: 2.9, 3: 3.1} model.y = pyo.Var(N, within=pyo.NonNegativeReals, initialize=0.0) + def CoverConstr_rule(model, i): return a[i] * model.y[i] >= b[i] + + model.CoverConstr = pyo.Constraint(N, rule=CoverConstr_rule) # @:decl3 @@ -40,16 +47,18 @@ def CoverConstr_rule(model, i): model = pyo.ConcreteModel() # @decl6: -TimePeriods = [1,2,3,4,5] +TimePeriods = [1, 2, 3, 4, 5] LastTimePeriod = 5 model.StartTime = pyo.Var(TimePeriods, initialize=1.0) + def Pred_rule(model, t): if t == LastTimePeriod: return pyo.Constraint.Skip else: - return model.StartTime[t] <= model.StartTime[t+1] + return model.StartTime[t] <= model.StartTime[t + 1] + model.Pred = pyo.Constraint(TimePeriods, rule=Pred_rule) # @:decl6 @@ -64,17 +73,16 @@ def Pred_rule(model, t): model.c1 = pyo.Constraint(expr=model.y - model.x <= 7.5) model.c2 = pyo.Constraint(expr=-2.5 <= model.y - model.x) -model.c3 = pyo.Constraint( - expr=pyo.inequality(-3.0, model.y - model.x, 7.0)) +model.c3 = pyo.Constraint(expr=pyo.inequality(-3.0, model.y - model.x, 7.0)) -print(pyo.value(model.c1.body)) # 0.0 +print(pyo.value(model.c1.body)) # 0.0 -print(model.c1.lslack()) # inf -print(model.c1.uslack()) # 7.5 -print(model.c2.lslack()) # 2.5 -print(model.c2.uslack()) # inf -print(model.c3.lslack()) # 3.0 -print(model.c3.uslack()) # 7.0 +print(model.c1.lslack()) # inf +print(model.c1.uslack()) # 7.5 +print(model.c2.lslack()) # 2.5 +print(model.c2.uslack()) # inf +print(model.c3.lslack()) # 3.0 +print(model.c3.uslack()) # 7.0 # @:slack model.display() diff --git a/examples/pyomobook/pyomo-components-ch/examples.py b/examples/pyomobook/pyomo-components-ch/examples.py index f12b31c20e5..6ba96792e28 100644 --- a/examples/pyomobook/pyomo-components-ch/examples.py +++ b/examples/pyomobook/pyomo-components-ch/examples.py @@ -4,14 +4,18 @@ # -------------------------------------------------- # @indexed1: model = pyo.ConcreteModel() -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) model.B = pyo.Set(initialize=['Q', 'R']) model.x = pyo.Var() model.y = pyo.Var(model.A, model.B) model.o = pyo.Objective(expr=model.x) model.c = pyo.Constraint(expr=model.x >= 0) + + def d_rule(model, a): return a * model.x <= 0 + + model.d = pyo.Constraint(model.A, rule=d_rule) # @:indexed1 diff --git a/examples/pyomobook/pyomo-components-ch/expr_declaration.py b/examples/pyomobook/pyomo-components-ch/expr_declaration.py index cf802618218..8974a4d406a 100644 --- a/examples/pyomobook/pyomo-components-ch/expr_declaration.py +++ b/examples/pyomobook/pyomo-components-ch/expr_declaration.py @@ -13,8 +13,12 @@ # @decl2: model.x = pyo.Var() model.e1 = pyo.Expression(expr=model.x + 1) + + def e2_rule(model): return model.x + 2 + + model.e2 = pyo.Expression(rule=e2_rule) # @:decl2 @@ -24,13 +28,17 @@ def e2_rule(model): model = pyo.ConcreteModel() # @decl3: -N = [1,2,3] +N = [1, 2, 3] model.x = pyo.Var(N) + + def e_rule(model, i): if i == 1: return pyo.Expression.Skip else: - return model.x[i]**2 + return model.x[i] ** 2 + + model.e = pyo.Expression(N, rule=e_rule) # @:decl3 @@ -41,8 +49,8 @@ def e_rule(model, i): # @decl4: model.x = pyo.Var() -model.e = pyo.Expression(expr=(model.x - 1.0)**2) -model.o = pyo.Objective(expr=0.1*model.e + model.x) +model.e = pyo.Expression(expr=(model.x - 1.0) ** 2) +model.o = pyo.Objective(expr=0.1 * model.e + model.x) model.c = pyo.Constraint(expr=model.e <= 1.0) # @:decl4 @@ -50,13 +58,13 @@ def e_rule(model, i): # @decl5: model.x.set_value(2.0) -print(pyo.value(model.e)) # 1.0 -print(pyo.value(model.o)) # 2.1 +print(pyo.value(model.e)) # 1.0 +print(pyo.value(model.o)) # 2.1 print(pyo.value(model.c.body)) # 1.0 -model.e.set_value((model.x - 2.0)**2) -print(pyo.value(model.e)) # 0.0 -print(pyo.value(model.o)) # 2.0 +model.e.set_value((model.x - 2.0) ** 2) +print(pyo.value(model.e)) # 0.0 +print(pyo.value(model.o)) # 2.0 print(pyo.value(model.c.body)) # 0.0 # @:decl5 diff --git a/examples/pyomobook/pyomo-components-ch/obj_declaration.py b/examples/pyomobook/pyomo-components-ch/obj_declaration.py index 9b18fed7094..2c26c2b3363 100644 --- a/examples/pyomobook/pyomo-components-ch/obj_declaration.py +++ b/examples/pyomobook/pyomo-components-ch/obj_declaration.py @@ -13,14 +13,17 @@ print('declexprrule') # @declexprrule: -model.x = pyo.Var([1,2], initialize=1.0) +model.x = pyo.Var([1, 2], initialize=1.0) + +model.b = pyo.Objective(expr=model.x[1] + 2 * model.x[2]) -model.b = pyo.Objective(expr=model.x[1] + 2*model.x[2]) def m_rule(model): expr = model.x[1] - expr += 2*model.x[2] + expr += 2 * model.x[2] return expr + + model.c = pyo.Objective(rule=m_rule) # @:declexprrule model.display() @@ -32,17 +35,25 @@ def m_rule(model): # @declmulti: A = ['Q', 'R', 'S'] model.x = pyo.Var(A, initialize=1.0) + + def d_rule(model, i): - return model.x[i]**2 + return model.x[i] ** 2 + + model.d = pyo.Objective(A, rule=d_rule) # @:declmulti print('declskip') + + # @declskip: def e_rule(model, i): if i == 'R': return pyo.Objective.Skip - return model.x[i]**2 + return model.x[i] ** 2 + + model.e = pyo.Objective(A, rule=e_rule) # @:declskip model.display() @@ -53,10 +64,10 @@ def e_rule(model, i): print('value') # @value: A = ['Q', 'R'] -model.x = pyo.Var(A, initialize={'Q':1.5, 'R':2.5}) -model.o = pyo.Objective(expr=model.x['Q'] + 2*model.x['R']) -print(model.o.expr) # x[Q] + 2*x[R] -print(model.o.sense) # minimize +model.x = pyo.Var(A, initialize={'Q': 1.5, 'R': 2.5}) +model.o = pyo.Objective(expr=model.x['Q'] + 2 * model.x['R']) +print(model.o.expr) # x[Q] + 2*x[R] +print(model.o.sense) # minimize print(pyo.value(model.o)) # 6.5 # @:value diff --git a/examples/pyomobook/pyomo-components-ch/param_declaration.py b/examples/pyomobook/pyomo-components-ch/param_declaration.py index d854d94d23d..a9d3256abfe 100644 --- a/examples/pyomobook/pyomo-components-ch/param_declaration.py +++ b/examples/pyomobook/pyomo-components-ch/param_declaration.py @@ -7,11 +7,12 @@ # @:decl1 # @decl3: -model.A = pyo.Set(initialize=[1,2,3]) -model.B = pyo.Set(initialize=['A','B']) -model.U = pyo.Param(model.A, initialize={1:10, 2:20, 3:30}) -model.T = pyo.Param(model.A, model.B, - initialize={(1,'A'):10, (2,'B'):20, (3,'A'):30}) +model.A = pyo.Set(initialize=[1, 2, 3]) +model.B = pyo.Set(initialize=['A', 'B']) +model.U = pyo.Param(model.A, initialize={1: 10, 2: 20, 3: 30}) +model.T = pyo.Param( + model.A, model.B, initialize={(1, 'A'): 10, (2, 'B'): 20, (3, 'A'): 30} +) # @:decl3 model.pprint() diff --git a/examples/pyomobook/pyomo-components-ch/param_initialization.py b/examples/pyomobook/pyomo-components-ch/param_initialization.py index af3b6103d53..11c257d2c31 100644 --- a/examples/pyomobook/pyomo-components-ch/param_initialization.py +++ b/examples/pyomobook/pyomo-components-ch/param_initialization.py @@ -1,35 +1,41 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) + # @decl3b: def X_init(model, i, j): - return i*j + return i * j + + model.X = pyo.Param(model.A, model.A, initialize=X_init) # @:decl3b + # @decl3c: def XX_init(model, i, j): - if i==1 or j==1: - return i*j - return i*j + model.XX[i-1,j-1] + if i == 1 or j == 1: + return i * j + return i * j + model.XX[i - 1, j - 1] + + model.XX = pyo.Param(model.A, model.A, initialize=XX_init) # @:decl3c # @decl4: -model.B = pyo.Set(initialize=[1,2,3]) -w={} +model.B = pyo.Set(initialize=[1, 2, 3]) +w = {} w[1] = 10 w[3] = 30 model.W = pyo.Param(model.B, initialize=w) # @:decl4 # @decl5: -u={} -u[1,1] = 10 -u[2,2] = 20 -u[3,3] = 30 +u = {} +u[1, 1] = 10 +u[2, 2] = 20 +u[3, 3] = 30 model.U = pyo.Param(model.A, model.A, initialize=u, default=0) # @:decl5 @@ -43,18 +49,18 @@ def XX_init(model, i, j): # -------------------------------------------------- # @special1: model = pyo.ConcreteModel() -model.p = pyo.Param([1,2,3], initialize={1:1.42, 3:3.14}) -model.q = pyo.Param([1,2,3], initialize={1:1.42, 3:3.14}, default=0) +model.p = pyo.Param([1, 2, 3], initialize={1: 1.42, 3: 3.14}) +model.q = pyo.Param([1, 2, 3], initialize={1: 1.42, 3: 3.14}, default=0) # Demonstrating the len() function -print(len(model.p)) # 2 -print(len(model.q)) # 3 +print(len(model.p)) # 2 +print(len(model.q)) # 3 # Demonstrating the 'in' operator (checks against component keys) -print(2 in model.p) # False -print(2 in model.q) # True +print(2 in model.p) # False +print(2 in model.q) # True # Demonstrating iteration over component keys -print([key for key in model.p]) # [1, 3] -print([key for key in model.q]) # [1, 2, 3] +print([key for key in model.p]) # [1, 3] +print([key for key in model.q]) # [1, 2, 3] # @:special1 diff --git a/examples/pyomobook/pyomo-components-ch/param_misc.py b/examples/pyomobook/pyomo-components-ch/param_misc.py index 467a1cae373..baf76cc7c03 100644 --- a/examples/pyomobook/pyomo-components-ch/param_misc.py +++ b/examples/pyomobook/pyomo-components-ch/param_misc.py @@ -2,22 +2,22 @@ # @mutable1: model = pyo.ConcreteModel() -p = {1:1, 2:4, 3:9} +p = {1: 1, 2: 4, 3: 9} -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) model.p = pyo.Param(model.A, initialize=p) model.x = pyo.Var(model.A, within=pyo.NonNegativeReals) -model.o = pyo.Objective(expr=sum(model.p[i]*model.x[i] for i in model.A)) +model.o = pyo.Objective(expr=sum(model.p[i] * model.x[i] for i in model.A)) # @:mutable1 model.pprint() # @mutable2: model = pyo.ConcreteModel() -p = {1:1, 2:4, 3:9} +p = {1: 1, 2: 4, 3: 9} -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) model.p = pyo.Param(model.A, initialize=p, mutable=True) model.x = pyo.Var(model.A, within=pyo.NonNegativeReals) @@ -27,4 +27,3 @@ model.p[3] = 3.14 # @:mutable2 model.pprint() - diff --git a/examples/pyomobook/pyomo-components-ch/param_validation.py b/examples/pyomobook/pyomo-components-ch/param_validation.py index 019d6b1d92f..c82657c8d0f 100644 --- a/examples/pyomobook/pyomo-components-ch/param_validation.py +++ b/examples/pyomobook/pyomo-components-ch/param_validation.py @@ -6,16 +6,23 @@ model.Z = pyo.Param(within=pyo.Reals) # @:decl1 + # @decl2: def Y_validate(model, value): return value in pyo.Reals + + model.Y = pyo.Param(validate=Y_validate) # @:decl2 # @decl3: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) + + def X_validate(model, value, i): return value > i + + model.X = pyo.Param(model.A, validate=X_validate) # @:decl3 diff --git a/examples/pyomobook/pyomo-components-ch/rangeset.py b/examples/pyomobook/pyomo-components-ch/rangeset.py index 237031469be..d5e1015064c 100644 --- a/examples/pyomobook/pyomo-components-ch/rangeset.py +++ b/examples/pyomobook/pyomo-components-ch/rangeset.py @@ -7,11 +7,11 @@ # @:decl1 # @decl3: -model.C = pyo.RangeSet(5,10) +model.C = pyo.RangeSet(5, 10) # @:decl3 # @decl4: -model.D = pyo.RangeSet(2.5,11,1.5) +model.D = pyo.RangeSet(2.5, 11, 1.5) # @:decl4 instance = model.create_instance() diff --git a/examples/pyomobook/pyomo-components-ch/set_declaration.py b/examples/pyomobook/pyomo-components-ch/set_declaration.py index 5555c02993b..1a507d4f588 100644 --- a/examples/pyomobook/pyomo-components-ch/set_declaration.py +++ b/examples/pyomobook/pyomo-components-ch/set_declaration.py @@ -14,7 +14,7 @@ model.A = pyo.Set() model.B = pyo.Set() model.C = pyo.Set(model.A) -model.D = pyo.Set(model.A,model.B) +model.D = pyo.Set(model.A, model.B) # @:decl2 model = pyo.AbstractModel() @@ -23,8 +23,8 @@ model = pyo.AbstractModel() # @decl6: -model.E = pyo.Set([1,2,3]) -f = set([1,2,3]) +model.E = pyo.Set([1, 2, 3]) +f = set([1, 2, 3]) model.F = pyo.Set(f) # @:decl6 @@ -36,10 +36,10 @@ # @decl3: model.A = pyo.Set() model.B = pyo.Set() -model.G = model.A | model.B # set union -model.H = model.B & model.A # set intersection -model.I = model.A - model.B # set difference -model.J = model.A ^ model.B # set exclusive-or +model.G = model.A | model.B # set union +model.H = model.B & model.A # set intersection +model.I = model.A - model.B # set difference +model.J = model.A ^ model.B # set exclusive-or # @:decl3 instance = model.create_instance('set_declaration.dat') diff --git a/examples/pyomobook/pyomo-components-ch/set_initialization.py b/examples/pyomobook/pyomo-components-ch/set_initialization.py index 6d91f25e9da..89dbaa713db 100644 --- a/examples/pyomobook/pyomo-components-ch/set_initialization.py +++ b/examples/pyomobook/pyomo-components-ch/set_initialization.py @@ -3,35 +3,42 @@ model = pyo.ConcreteModel() # @decl2: -model.B = pyo.Set(initialize=[2,3,4]) -model.C = pyo.Set(initialize=[(1,4),(9,16)]) +model.B = pyo.Set(initialize=[2, 3, 4]) +model.C = pyo.Set(initialize=[(1, 4), (9, 16)]) # @:decl2 # @decl6: F_init = {} -F_init[2] = [1,3,5] -F_init[3] = [2,4,6] -F_init[4] = [3,5,7] -model.F = pyo.Set([2,3,4],initialize=F_init) +F_init[2] = [1, 3, 5] +F_init[3] = [2, 4, 6] +F_init[4] = [3, 5, 7] +model.F = pyo.Set([2, 3, 4], initialize=F_init) # @:decl6 + # @decl8: def J_init(model, i, j): - return range(0,i*j) -model.J = pyo.Set(model.B,model.B, initialize=J_init) + return range(0, i * j) + + +model.J = pyo.Set(model.B, model.B, initialize=J_init) # @:decl8 # @decl12: -model.P = pyo.Set(initialize=[1,2,3,5,7]) +model.P = pyo.Set(initialize=[1, 2, 3, 5, 7]) + + def filter_rule(model, x): return x not in model.P -model.Q = pyo.Set(initialize=range(1,10), filter=filter_rule) + + +model.Q = pyo.Set(initialize=range(1, 10), filter=filter_rule) # @:decl12 # @decl20: -model.R = pyo.Set([1,2,3]) +model.R = pyo.Set([1, 2, 3]) model.R[1] = [1] -model.R[2] = [1,2] +model.R[2] = [1, 2] # @:decl20 model.pprint(verbose=True) diff --git a/examples/pyomobook/pyomo-components-ch/set_misc.py b/examples/pyomobook/pyomo-components-ch/set_misc.py index 73ef80baac1..9a795b196b8 100644 --- a/examples/pyomobook/pyomo-components-ch/set_misc.py +++ b/examples/pyomobook/pyomo-components-ch/set_misc.py @@ -2,48 +2,48 @@ model = pyo.ConcreteModel() # @len: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) -print(len(model.A)) # 3 +print(len(model.A)) # 3 # @:len model = pyo.ConcreteModel() # @data: model.A = pyo.Set(initialize=[1, 2, 3]) model.B = pyo.Set(initialize=[3, 2, 1], ordered=True) -model.C = pyo.Set(model.A, initialize={1:[1], 2:[1, 2]}) +model.C = pyo.Set(model.A, initialize={1: [1], 2: [1, 2]}) -print(type(model.A.data()) is tuple) # True -print(type(model.B.data()) is tuple) # True -print(type(model.C.data()) is dict) # True -print(sorted(model.A.data())) # [1, 2, 3] +print(type(model.A.data()) is tuple) # True +print(type(model.B.data()) is tuple) # True +print(type(model.C.data()) is dict) # True +print(sorted(model.A.data())) # [1, 2, 3] for index in sorted(model.C.data().keys()): - print(sorted(model.C.data()[index])) + print(sorted(model.C.data()[index])) # [1] # [1, 2] # @:data model = pyo.ConcreteModel() # @special: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) # Test if an element is in the set -print(1 in model.A) # True +print(1 in model.A) # True # Test if sets are equal -print([1, 2] == model.A) # False +print([1, 2] == model.A) # False # Test if sets are not equal -print([1, 2] != model.A) # True +print([1, 2] != model.A) # True # Test if a set is a subset of or equal to the set -print([1, 2] <= model.A) # True +print([1, 2] <= model.A) # True # Test if a set is a subset of the set -print([1, 2] < model.A) # True +print([1, 2] < model.A) # True # Test if a set is a superset of the set -print([1, 2, 3] > model.A) # False +print([1, 2, 3] > model.A) # False # Test if a set is a superset of or equal to the set print([1, 2, 3] >= model.A) # True @@ -52,9 +52,9 @@ model = pyo.ConcreteModel() # @iter: model.A = pyo.Set(initialize=[1, 2, 3]) -model.C = pyo.Set(model.A, initialize={1:[1], 2:[1, 2]}) +model.C = pyo.Set(model.A, initialize={1: [1], 2: [1, 2]}) -print(sorted(e for e in model.A)) # [1, 2, 3] +print(sorted(e for e in model.A)) # [1, 2, 3] for index in model.C: print(sorted(e for e in model.C[index])) # [1] @@ -65,21 +65,20 @@ # @ordered: model.A = pyo.Set(initialize=[3, 2, 1], ordered=True) -print(model.A.first()) # 3 -print(model.A.last()) # 1 -print(model.A.next(2)) # 1 -print(model.A.prev(2)) # 3 -print(model.A.nextw(1)) # 3 -print(model.A.prevw(3)) # 1 +print(model.A.first()) # 3 +print(model.A.last()) # 1 +print(model.A.next(2)) # 1 +print(model.A.prev(2)) # 3 +print(model.A.nextw(1)) # 3 +print(model.A.prevw(3)) # 1 # @:ordered model = pyo.ConcreteModel() # @ordered2: model.A = pyo.Set(initialize=[3, 2, 1], ordered=True) -print(model.A.ord(3)) # 1 -print(model.A.ord(1)) # 3 -print(model.A[1]) # 3 -print(model.A[3]) # 1 +print(model.A.ord(3)) # 1 +print(model.A.ord(1)) # 3 +print(model.A[1]) # 3 +print(model.A[3]) # 1 # @:ordered2 - diff --git a/examples/pyomobook/pyomo-components-ch/set_validation.py b/examples/pyomobook/pyomo-components-ch/set_validation.py index 2e1d90b0039..a55dfc9ab7c 100644 --- a/examples/pyomobook/pyomo-components-ch/set_validation.py +++ b/examples/pyomobook/pyomo-components-ch/set_validation.py @@ -7,9 +7,12 @@ model.B = pyo.Set(within=model.A) # @:decl1 + # @decl2: def C_validate(model, value): return value in model.A + + model.C = pyo.Set(validate=C_validate) # @:decl2 diff --git a/examples/pyomobook/pyomo-components-ch/suffix_declaration.py b/examples/pyomobook/pyomo-components-ch/suffix_declaration.py index 455ab6e33c5..650669ef5a6 100644 --- a/examples/pyomobook/pyomo-components-ch/suffix_declaration.py +++ b/examples/pyomobook/pyomo-components-ch/suffix_declaration.py @@ -11,8 +11,7 @@ # @suffixdecl: # Export integer data -model.priority = pyo.Suffix(direction=pyo.Suffix.EXPORT, - datatype=pyo.Suffix.INT) +model.priority = pyo.Suffix(direction=pyo.Suffix.EXPORT, datatype=pyo.Suffix.INT) # Export and import floating point data model.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT) @@ -25,20 +24,23 @@ model.x = pyo.Var() model.c = pyo.Constraint(expr=model.x >= 1) + def foo_rule(m): - return ((m.x, 2.0), (m.c, 3.0)) + return ((m.x, 2.0), (m.c, 3.0)) + + model.foo = pyo.Suffix(initialize=foo_rule) # @:suffixinitrule model.pprint() -del foo_rule # Needed to avoid implicit rule warning in next example +del foo_rule # Needed to avoid implicit rule warning in next example print('') print('*** suffix1 ***') # @suffix1: model = pyo.ConcreteModel() model.x = pyo.Var() -model.y = pyo.Var([1,2,3]) +model.y = pyo.Var([1, 2, 3]) model.foo = pyo.Suffix() # @:suffix1 print('suffix1a') @@ -50,7 +52,7 @@ def foo_rule(m): model.x.set_suffix_value(model.foo, 2.0) # Get the value of suffix 'foo' for model.x -print(model.x.get_suffix_value('foo')) # 2.0 +print(model.x.get_suffix_value('foo')) # 2.0 # @:suffix1a print('suffix1b') # @suffix1b: @@ -61,17 +63,17 @@ def foo_rule(m): model.y[2].set_suffix_value(model.foo, 4.0) # Get the value of suffix 'foo' for model.y -print(model.y.get_suffix_value(model.foo)) # None -print(model.y[1].get_suffix_value(model.foo)) # 3.0 -print(model.y[2].get_suffix_value(model.foo)) # 4.0 -print(model.y[3].get_suffix_value(model.foo)) # 3.0 +print(model.y.get_suffix_value(model.foo)) # None +print(model.y[1].get_suffix_value(model.foo)) # 3.0 +print(model.y[2].get_suffix_value(model.foo)) # 4.0 +print(model.y[3].get_suffix_value(model.foo)) # 3.0 # @:suffix1b # @suffix1d: model.y[3].clear_suffix_value(model.foo) -print(model.y.get_suffix_value(model.foo)) # None -print(model.y[1].get_suffix_value(model.foo)) # 3.0 -print(model.y[2].get_suffix_value(model.foo)) # 4.0 -print(model.y[3].get_suffix_value(model.foo)) # None +print(model.y.get_suffix_value(model.foo)) # None +print(model.y[1].get_suffix_value(model.foo)) # 3.0 +print(model.y[2].get_suffix_value(model.foo)) # 4.0 +print(model.y[3].get_suffix_value(model.foo)) # None # @:suffix1d diff --git a/examples/pyomobook/pyomo-components-ch/var_declaration.py b/examples/pyomobook/pyomo-components-ch/var_declaration.py index 1f48e1502a6..538cbea1842 100644 --- a/examples/pyomobook/pyomo-components-ch/var_declaration.py +++ b/examples/pyomobook/pyomo-components-ch/var_declaration.py @@ -8,17 +8,21 @@ # @:initscalarvar # for testing -print('3.14 =', pyo.value(model.x)) # 3.14 +print('3.14 =', pyo.value(model.x)) # 3.14 model = None model = pyo.ConcreteModel() # @dictruleinit: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) model.x = pyo.Var(model.A, initialize=3.14) -model.y = pyo.Var(model.A, initialize={1:1.5, 2:4.5, 3:5.5}) +model.y = pyo.Var(model.A, initialize={1: 1.5, 2: 4.5, 3: 5.5}) + + def z_init_rule(m, i): return float(i) + 0.5 + + model.z = pyo.Var(model.A, initialize=z_init_rule) # @:dictruleinit @@ -41,7 +45,7 @@ def z_init_rule(m, i): model = pyo.ConcreteModel() # @domaindecl: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) model.y = pyo.Var(within=model.A) model.r = pyo.Var(domain=pyo.Reals) model.w = pyo.Var(within=pyo.Boolean) @@ -57,9 +61,13 @@ def z_init_rule(m, i): model = pyo.ConcreteModel() # @domaindeclrule: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) + + def s_domain(model, i): - return pyo.RangeSet(i, i+1, 1) # (start, end, step) + return pyo.RangeSet(i, i + 1, 1) # (start, end, step) + + model.s = pyo.Var(model.A, domain=s_domain) # @:domaindeclrule @@ -71,13 +79,17 @@ def s_domain(model, i): model = pyo.ConcreteModel() # @declbounds: -model.A = pyo.Set(initialize=[1,2,3]) -model.a = pyo.Var(bounds=(0.0,None)) +model.A = pyo.Set(initialize=[1, 2, 3]) +model.a = pyo.Var(bounds=(0.0, None)) + +lower = {1: 2.5, 2: 4.5, 3: 6.5} +upper = {1: 3.5, 2: 4.5, 3: 7.5} + -lower = {1:2.5, 2:4.5, 3:6.5} -upper = {1:3.5, 2:4.5, 3:7.5} def f(model, i): return (lower[i], upper[i]) + + model.b = pyo.Var(model.A, bounds=f) # @:declbounds @@ -92,27 +104,31 @@ def f(model, i): model = pyo.ConcreteModel() # @declinit: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) model.za = pyo.Var(initialize=9.5, within=pyo.NonNegativeReals) -model.zb = pyo.Var(model.A, initialize={1:1.5, 2:4.5, 3:5.5}) +model.zb = pyo.Var(model.A, initialize={1: 1.5, 2: 4.5, 3: 5.5}) model.zc = pyo.Var(model.A, initialize=2.1) -print(pyo.value(model.za)) # 9.5 -print(pyo.value(model.zb[3])) # 5.5 -print(pyo.value(model.zc[3])) # 2.1 +print(pyo.value(model.za)) # 9.5 +print(pyo.value(model.zb[3])) # 5.5 +print(pyo.value(model.zc[3])) # 2.1 # @:declinit model = None model = pyo.ConcreteModel() # @declinitrule: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) + + def g(model, i): - return 3*i + return 3 * i + + model.m = pyo.Var(model.A, initialize=g) -print(pyo.value(model.m[1])) # 3 -print(pyo.value(model.m[3])) # 9 +print(pyo.value(model.m[1])) # 3 +print(pyo.value(model.m[3])) # 9 # @:declinitrule model = None @@ -120,16 +136,16 @@ def g(model, i): print("varattrib") # @varattribdecl: -model.A = pyo.Set(initialize=[1,2,3]) +model.A = pyo.Set(initialize=[1, 2, 3]) model.za = pyo.Var(initialize=9.5, within=pyo.NonNegativeReals) -model.zb = pyo.Var(model.A, initialize={1:1.5, 2:4.5, 3:5.5}) +model.zb = pyo.Var(model.A, initialize={1: 1.5, 2: 4.5, 3: 5.5}) model.zc = pyo.Var(model.A, initialize=2.1) # @:varattribdecl # @varattribvaluebounds: -print(pyo.value(model.zb[2])) # 4.5 -print(model.za.lb) # 0 -print(model.za.ub) # None +print(pyo.value(model.zb[2])) # 4.5 +print(model.za.lb) # 0 +print(model.za.ub) # None # @:varattribvaluebounds # @varassign: @@ -139,11 +155,9 @@ def g(model, i): # @varfixed: model.zb.fix(3.0) -print(model.zb[1].fixed) # True -print(model.zb[2].fixed) # True +print(model.zb[1].fixed) # True +print(model.zb[2].fixed) # True model.zc[2].fix(3.0) -print(model.zc[1].fixed) # False -print(model.zc[2].fixed) # True +print(model.zc[1].fixed) # False +print(model.zc[2].fixed) # True # @:varfixed - - diff --git a/examples/pyomobook/python-ch/class.py b/examples/pyomobook/python-ch/class.py index 97bb44b1464..562cef07ea7 100644 --- a/examples/pyomobook/python-ch/class.py +++ b/examples/pyomobook/python-ch/class.py @@ -1,20 +1,25 @@ # class.py + # @all: class IntLocker: sint = None + def __init__(self, i): self.set_value(i) + def set_value(self, i): if type(i) is not int: print("Error: %d is not integer." % i) else: self.sint = i + def pprint(self): - print("The Int Locker has "+str(self.sint)) - + print("The Int Locker has " + str(self.sint)) + + a = IntLocker(3) -a.pprint() # prints: The Int Locker has 3 +a.pprint() # prints: The Int Locker has 3 a.set_value(5) -a.pprint() # prints: The Int Locker has 5 +a.pprint() # prints: The Int Locker has 5 # @:all diff --git a/examples/pyomobook/python-ch/ctob.py b/examples/pyomobook/python-ch/ctob.py index d05f6f03cb7..e418d27f103 100644 --- a/examples/pyomobook/python-ch/ctob.py +++ b/examples/pyomobook/python-ch/ctob.py @@ -1,14 +1,18 @@ -# An example of a silly decorator to change 'c' to 'b' -# in the return value of a function. +# An example of a silly decorator to change 'c' to 'b' +# in the return value of a function. + def ctob_decorate(func): - def func_wrapper(*args, **kwargs): - retval = func(*args, **kwargs).replace('c','b') - return retval.replace('C','B') - return func_wrapper + def func_wrapper(*args, **kwargs): + retval = func(*args, **kwargs).replace('c', 'b') + return retval.replace('C', 'B') + + return func_wrapper + @ctob_decorate def Last_Words(): return "Flying Circus" + print(Last_Words()) # prints: Flying Birbus diff --git a/examples/pyomobook/python-ch/example2.py b/examples/pyomobook/python-ch/example2.py index 4c0cb94c0ad..da7d14e24ae 100644 --- a/examples/pyomobook/python-ch/example2.py +++ b/examples/pyomobook/python-ch/example2.py @@ -2,4 +2,5 @@ print("Hello World") import sys + sys.stdin.readline() diff --git a/examples/pyomobook/python-ch/functions.py b/examples/pyomobook/python-ch/functions.py index 15657a5eb78..7948c5e55df 100644 --- a/examples/pyomobook/python-ch/functions.py +++ b/examples/pyomobook/python-ch/functions.py @@ -1,20 +1,23 @@ # functions.py + # @all: def Apply(f, a): r = [] for i in range(len(a)): r.append(f(a[i])) return r - + + def SqifOdd(x): # if x is odd, 2*int(x/2) is not x # due to integer divide of x/2 - if 2*int(x/2) == x: + if 2 * int(x / 2) == x: return x else: - return x*x - + return x * x + + ShortList = range(4) B = Apply(SqifOdd, ShortList) print(B) diff --git a/examples/pyomobook/python-ch/iterate.py b/examples/pyomobook/python-ch/iterate.py index b59cd2c7814..3a3422b2a09 100644 --- a/examples/pyomobook/python-ch/iterate.py +++ b/examples/pyomobook/python-ch/iterate.py @@ -1,7 +1,7 @@ # iterate.py # @all: -D = {'Mary':231} +D = {'Mary': 231} D['Bob'] = 123 D['Alice'] = 331 D['Ted'] = 987 @@ -11,8 +11,8 @@ continue if i == 'John': print("Loop ends. Cleese alert!") - break; - print(i+" "+str(D[i])) + break + print(i + " " + str(D[i])) else: print("Cleese is not in the list.") # @:all diff --git a/examples/pyomobook/scripts-ch/attributes.py b/examples/pyomobook/scripts-ch/attributes.py index b182a4c2ffd..643162082b6 100644 --- a/examples/pyomobook/scripts-ch/attributes.py +++ b/examples/pyomobook/scripts-ch/attributes.py @@ -22,7 +22,3 @@ # print the value of a particular variable print(pyo.value(model.y['Harlingen'])) - - - - diff --git a/examples/pyomobook/scripts-ch/prob_mod_ex.py b/examples/pyomobook/scripts-ch/prob_mod_ex.py index 6712fbed5b1..6d610e9b44a 100644 --- a/examples/pyomobook/scripts-ch/prob_mod_ex.py +++ b/examples/pyomobook/scripts-ch/prob_mod_ex.py @@ -1,52 +1,51 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.x = pyo.Var(bounds=(0,5)) -model.y = pyo.Var(bounds=(0,1)) +model.x = pyo.Var(bounds=(0, 5)) +model.y = pyo.Var(bounds=(0, 1)) model.con = pyo.Constraint(expr=model.x + model.y == 1.0) -model.obj = pyo.Objective(expr=model.y-model.x) +model.obj = pyo.Objective(expr=model.y - model.x) # solve the problem # @solver: solver = pyo.SolverFactory('glpk') # @:solver solver.solve(model) -print(pyo.value(model.x)) # 1.0 -print(pyo.value(model.y)) # 0.0 +print(pyo.value(model.x)) # 1.0 +print(pyo.value(model.y)) # 0.0 # add a constraint -model.con2 = pyo.Constraint(expr=4.0*model.x + model.y == 2.0) +model.con2 = pyo.Constraint(expr=4.0 * model.x + model.y == 2.0) solver.solve(model) -print(pyo.value(model.x)) # 0.33 -print(pyo.value(model.y)) # 0.66 +print(pyo.value(model.x)) # 0.33 +print(pyo.value(model.y)) # 0.66 # deactivate a constraint model.con.deactivate() solver.solve(model) -print(pyo.value(model.x)) # 0.5 -print(pyo.value(model.y)) # 0.0 +print(pyo.value(model.x)) # 0.5 +print(pyo.value(model.y)) # 0.0 # re-activate a constraint model.con.activate() solver.solve(model) -print(pyo.value(model.x)) # 0.33 -print(pyo.value(model.y)) # 0.66 +print(pyo.value(model.x)) # 0.33 +print(pyo.value(model.y)) # 0.66 # delete a constraint del model.con2 solver.solve(model) -print(pyo.value(model.x)) # 1.0 -print(pyo.value(model.y)) # 0.0 +print(pyo.value(model.x)) # 1.0 +print(pyo.value(model.y)) # 0.0 # fix a variable model.x.fix(0.5) solver.solve(model) -print(pyo.value(model.x)) # 0.5 -print(pyo.value(model.y)) # 0.5 +print(pyo.value(model.x)) # 0.5 +print(pyo.value(model.y)) # 0.5 # unfix a variable model.x.unfix() solver.solve(model) -print(pyo.value(model.x)) # 1.0 -print(pyo.value(model.y)) # 0.0 - +print(pyo.value(model.x)) # 1.0 +print(pyo.value(model.y)) # 0.0 diff --git a/examples/pyomobook/scripts-ch/sudoku/sudoku.py b/examples/pyomobook/scripts-ch/sudoku/sudoku.py index f2515252eb8..ea0c0044e1d 100644 --- a/examples/pyomobook/scripts-ch/sudoku/sudoku.py +++ b/examples/pyomobook/scripts-ch/sudoku/sudoku.py @@ -4,73 +4,78 @@ # the list (row,col) entries subsq_to_row_col = dict() -subsq_to_row_col[1] = [(i,j) for i in range(1,4) for j in range(1,4)] -subsq_to_row_col[2] = [(i,j) for i in range(1,4) for j in range(4,7)] -subsq_to_row_col[3] = [(i,j) for i in range(1,4) for j in range(7,10)] +subsq_to_row_col[1] = [(i, j) for i in range(1, 4) for j in range(1, 4)] +subsq_to_row_col[2] = [(i, j) for i in range(1, 4) for j in range(4, 7)] +subsq_to_row_col[3] = [(i, j) for i in range(1, 4) for j in range(7, 10)] -subsq_to_row_col[4] = [(i,j) for i in range(4,7) for j in range(1,4)] -subsq_to_row_col[5] = [(i,j) for i in range(4,7) for j in range(4,7)] -subsq_to_row_col[6] = [(i,j) for i in range(4,7) for j in range(7,10)] +subsq_to_row_col[4] = [(i, j) for i in range(4, 7) for j in range(1, 4)] +subsq_to_row_col[5] = [(i, j) for i in range(4, 7) for j in range(4, 7)] +subsq_to_row_col[6] = [(i, j) for i in range(4, 7) for j in range(7, 10)] + +subsq_to_row_col[7] = [(i, j) for i in range(7, 10) for j in range(1, 4)] +subsq_to_row_col[8] = [(i, j) for i in range(7, 10) for j in range(4, 7)] +subsq_to_row_col[9] = [(i, j) for i in range(7, 10) for j in range(7, 10)] -subsq_to_row_col[7] = [(i,j) for i in range(7,10) for j in range(1,4)] -subsq_to_row_col[8] = [(i,j) for i in range(7,10) for j in range(4,7)] -subsq_to_row_col[9] = [(i,j) for i in range(7,10) for j in range(7,10)] # creates the sudoku model for a 10x10 board, where the # input board is a list of fixed numbers specified in # (row, col, val) tuples. def create_sudoku_model(board): - model = pyo.ConcreteModel() # store the starting board for the model model.board = board # create sets for rows columns and squares - model.ROWS = pyo.RangeSet(1,9) - model.COLS = pyo.RangeSet(1,9) - model.SUBSQUARES = pyo.RangeSet(1,9) - model.VALUES = pyo.RangeSet(1,9) + model.ROWS = pyo.RangeSet(1, 9) + model.COLS = pyo.RangeSet(1, 9) + model.SUBSQUARES = pyo.RangeSet(1, 9) + model.VALUES = pyo.RangeSet(1, 9) # create the binary variables to define the values model.y = pyo.Var(model.ROWS, model.COLS, model.VALUES, within=pyo.Binary) # fix variables based on the current board - for (r,c,v) in board: - model.y[r,c,v].fix(1) + for r, c, v in board: + model.y[r, c, v].fix(1) # create the objective - this is a feasibility problem # so we just make it a constant - model.obj = pyo.Objective(expr= 1.0) + model.obj = pyo.Objective(expr=1.0) -# @row_col_cons: + # @row_col_cons: # exactly one number in each row def _RowCon(model, r, v): - return sum(model.y[r,c,v] for c in model.COLS) == 1 + return sum(model.y[r, c, v] for c in model.COLS) == 1 + model.RowCon = pyo.Constraint(model.ROWS, model.VALUES, rule=_RowCon) # exactly one number in each column def _ColCon(model, c, v): - return sum(model.y[r,c,v] for r in model.ROWS) == 1 + return sum(model.y[r, c, v] for r in model.ROWS) == 1 + model.ColCon = pyo.Constraint(model.COLS, model.VALUES, rule=_ColCon) -# @:row_col_cons + # @:row_col_cons -# @subsq_con: + # @subsq_con: # exactly one number in each subsquare def _SqCon(model, s, v): - return sum(model.y[r,c,v] for (r,c) in subsq_to_row_col[s]) == 1 + return sum(model.y[r, c, v] for (r, c) in subsq_to_row_col[s]) == 1 + model.SqCon = pyo.Constraint(model.SUBSQUARES, model.VALUES, rule=_SqCon) -# @:subsq_con + # @:subsq_con -# @num_con: + # @num_con: # exactly one number in each cell def _ValueCon(model, r, c): - return sum(model.y[r,c,v] for v in model.VALUES) == 1 + return sum(model.y[r, c, v] for v in model.VALUES) == 1 + model.ValueCon = pyo.Constraint(model.ROWS, model.COLS, rule=_ValueCon) -# @:num_con + # @:num_con return model + # use this function to add a new integer cut to the model. def add_integer_cut(model): # add the ConstraintList to store the IntegerCuts if @@ -84,18 +89,24 @@ def add_integer_cut(model): for r in model.ROWS: for c in model.COLS: for v in model.VALUES: - if not model.y[r,c,v].fixed: + if not model.y[r, c, v].fixed: # check if the binary variable is on or off # note, it may not be exactly 1 - if pyo.value(model.y[r,c,v]) >= 0.5: - cut_expr += (1.0 - model.y[r,c,v]) + if pyo.value(model.y[r, c, v]) >= 0.5: + cut_expr += 1.0 - model.y[r, c, v] else: - cut_expr += model.y[r,c,v] + cut_expr += model.y[r, c, v] model.IntegerCuts.add(cut_expr >= 1) + # prints the current solution stored in the model def print_solution(model): for r in model.ROWS: - print(' '.join(str(v) for c in model.COLS - for v in model.VALUES - if pyo.value(model.y[r,c,v]) >= 0.5)) + print( + ' '.join( + str(v) + for c in model.COLS + for v in model.VALUES + if pyo.value(model.y[r, c, v]) >= 0.5 + ) + ) diff --git a/examples/pyomobook/scripts-ch/sudoku/sudoku_run.py b/examples/pyomobook/scripts-ch/sudoku/sudoku_run.py index ce8bed4ed4e..266362308fa 100644 --- a/examples/pyomobook/scripts-ch/sudoku/sudoku_run.py +++ b/examples/pyomobook/scripts-ch/sudoku/sudoku_run.py @@ -1,29 +1,47 @@ -from pyomo.opt import (SolverFactory, - TerminationCondition) -from sudoku import (create_sudoku_model, - print_solution, - add_integer_cut) +from pyomo.opt import SolverFactory, TerminationCondition +from sudoku import create_sudoku_model, print_solution, add_integer_cut # define the board -board = [(1,1,5),(1,2,3),(1,5,7), \ - (2,1,6),(2,4,1),(2,5,9),(2,6,5), \ - (3,2,9),(3,3,8),(3,8,6), \ - (4,1,8),(4,5,6),(4,9,3), \ - (5,1,4),(5,4,8),(5,6,3),(5,9,1), \ - (6,1,7),(6,5,2),(6,9,6), \ - (7,2,6),(7,7,2),(7,8,8), \ - (8,4,4),(8,5,1),(8,6,9),(8,9,5), \ - (9,5,8),(9,8,7),(9,9,9)] +board = [ + (1, 1, 5), + (1, 2, 3), + (1, 5, 7), + (2, 1, 6), + (2, 4, 1), + (2, 5, 9), + (2, 6, 5), + (3, 2, 9), + (3, 3, 8), + (3, 8, 6), + (4, 1, 8), + (4, 5, 6), + (4, 9, 3), + (5, 1, 4), + (5, 4, 8), + (5, 6, 3), + (5, 9, 1), + (6, 1, 7), + (6, 5, 2), + (6, 9, 6), + (7, 2, 6), + (7, 7, 2), + (7, 8, 8), + (8, 4, 4), + (8, 5, 1), + (8, 6, 9), + (8, 9, 5), + (9, 5, 8), + (9, 8, 7), + (9, 9, 9), +] model = create_sudoku_model(board) solution_count = 0 while 1: - with SolverFactory("glpk") as opt: results = opt.solve(model) - if results.solver.termination_condition != \ - TerminationCondition.optimal: + if results.solver.termination_condition != TerminationCondition.optimal: print("All board solutions have been found") break diff --git a/examples/pyomobook/scripts-ch/sudoku/sudoku_run.txt b/examples/pyomobook/scripts-ch/sudoku/sudoku_run.txt index 53c8162ff00..65bc61accd2 100644 --- a/examples/pyomobook/scripts-ch/sudoku/sudoku_run.txt +++ b/examples/pyomobook/scripts-ch/sudoku/sudoku_run.txt @@ -1,5 +1,3 @@ -WARNING: Constant objective detected, replacing with a placeholder to prevent - solver failure. Solution #1 5 3 4 6 7 8 9 1 2 6 7 2 1 9 5 3 4 8 @@ -10,6 +8,4 @@ Solution #1 9 6 1 5 3 7 2 8 4 2 8 7 4 1 9 6 3 5 3 4 5 2 8 6 1 7 9 -WARNING: Constant objective detected, replacing with a placeholder to prevent - solver failure. All board solutions have been found diff --git a/examples/pyomobook/scripts-ch/value_expression.py b/examples/pyomobook/scripts-ch/value_expression.py index 4b44d272d39..51c07500ea8 100644 --- a/examples/pyomobook/scripts-ch/value_expression.py +++ b/examples/pyomobook/scripts-ch/value_expression.py @@ -5,13 +5,10 @@ # unexpected expression instead of value a = model.u - 1 -print(a) # "u - 1" -print(type(a)) # +print(a) # "u - 1" +print(type(a)) # # correct way to access the value b = pyo.value(model.u) - 1 -print(b) # 1.0 -print(type(b)) # - - - +print(b) # 1.0 +print(type(b)) # diff --git a/examples/pyomobook/scripts-ch/value_expression.txt b/examples/pyomobook/scripts-ch/value_expression.txt index dce057630c9..746303ade1b 100644 --- a/examples/pyomobook/scripts-ch/value_expression.txt +++ b/examples/pyomobook/scripts-ch/value_expression.txt @@ -1,4 +1,4 @@ u - 1 - + 1.0 diff --git a/examples/pyomobook/scripts-ch/warehouse_cuts.py b/examples/pyomobook/scripts-ch/warehouse_cuts.py index 6bd868c1224..c6516e796af 100644 --- a/examples/pyomobook/scripts-ch/warehouse_cuts.py +++ b/examples/pyomobook/scripts-ch/warehouse_cuts.py @@ -1,9 +1,11 @@ import warnings + warnings.filterwarnings("ignore") # The following import/use is needed to prevent matplotlib from using # the X-backend on *nix platforms, which would fail when run in # automated testing environments or when $DISPLAY is not set. import matplotlib + matplotlib.use('agg') # @all: import json @@ -28,7 +30,7 @@ term_cond = results.solver.termination_condition print('') print('--- Solver Status: {0} ---'.format(term_cond)) - + if pyo.check_optimal_termination(results): # look at the solution print('Optimal Obj. Value = {0}'.format(pyo.value(model.obj))) @@ -41,13 +43,13 @@ expr1 = sum(model.y[i] for i in WH_True) expr2 = sum(model.y[i] for i in WH_False) model.integer_cuts.add( - sum(model.y[i] for i in WH_True) \ - - sum(model.y[i] for i in WH_False) \ - <= len(WH_True)-1) + sum(model.y[i] for i in WH_True) - sum(model.y[i] for i in WH_False) + <= len(WH_True) - 1 + ) else: done = True -x = range(1, len(objective_values)+1) +x = range(1, len(objective_values) + 1) plt.bar(x, objective_values, align='center') plt.gca().set_xticks(x) plt.xlabel('Solution Number') diff --git a/examples/pyomobook/scripts-ch/warehouse_load_solutions.py b/examples/pyomobook/scripts-ch/warehouse_load_solutions.py index 8719112813c..790333a0e64 100644 --- a/examples/pyomobook/scripts-ch/warehouse_load_solutions.py +++ b/examples/pyomobook/scripts-ch/warehouse_load_solutions.py @@ -17,10 +17,13 @@ # @load_solutions: from pyomo.opt import SolverStatus, TerminationCondition + # Wait to load the solution into the model until # after the solver status is checked results = solver.solve(model, load_solutions=False) -if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal): +if (results.solver.status == SolverStatus.ok) and ( + results.solver.termination_condition == TerminationCondition.optimal +): # Manually load the solution into the model model.solutions.load_from(results) else: diff --git a/examples/pyomobook/scripts-ch/warehouse_model.py b/examples/pyomobook/scripts-ch/warehouse_model.py index 49c3b4ecdfc..f5983d3cd89 100644 --- a/examples/pyomobook/scripts-ch/warehouse_model.py +++ b/examples/pyomobook/scripts-ch/warehouse_model.py @@ -1,5 +1,6 @@ import pyomo.environ as pyo + def create_wl_model(data, P): # create the model model = pyo.ConcreteModel(name="(WL)") @@ -7,23 +8,29 @@ def create_wl_model(data, P): model.CUST = data['CUST'] model.dist = data['dist'] model.P = P - model.x = pyo.Var(model.WH, model.CUST, bounds=(0,1)) + model.x = pyo.Var(model.WH, model.CUST, bounds=(0, 1)) model.y = pyo.Var(model.WH, within=pyo.Binary) def obj_rule(m): - return sum(m.dist[w][c]*m.x[w,c] for w in m.WH for c in m.CUST) + return sum(m.dist[w][c] * m.x[w, c] for w in m.WH for c in m.CUST) + model.obj = pyo.Objective(rule=obj_rule) def one_per_cust_rule(m, c): - return sum(m.x[w,c] for w in m.WH) == 1 + return sum(m.x[w, c] for w in m.WH) == 1 + model.one_per_cust = pyo.Constraint(model.CUST, rule=one_per_cust_rule) def warehouse_active_rule(m, w, c): - return m.x[w,c] <= m.y[w] - model.warehouse_active = pyo.Constraint(model.WH, model.CUST, rule=warehouse_active_rule) + return m.x[w, c] <= m.y[w] + + model.warehouse_active = pyo.Constraint( + model.WH, model.CUST, rule=warehouse_active_rule + ) def num_warehouses_rule(m): return sum(m.y[w] for w in m.WH) <= m.P + model.num_warehouses = pyo.Constraint(rule=num_warehouses_rule) return model diff --git a/examples/pyomobook/scripts-ch/warehouse_print.py b/examples/pyomobook/scripts-ch/warehouse_print.py index 933824c5470..e0e2f961345 100644 --- a/examples/pyomobook/scripts-ch/warehouse_print.py +++ b/examples/pyomobook/scripts-ch/warehouse_print.py @@ -32,7 +32,7 @@ # @:printloopset # @printslicing: -for v in model.x['Ashland',:]: +for v in model.x['Ashland', :]: print('{0} = {1}'.format(v, pyo.value(v))) # @:printslicing diff --git a/examples/pyomobook/scripts-ch/warehouse_solver_options.py b/examples/pyomobook/scripts-ch/warehouse_solver_options.py index 0f60873e13d..c8eaf11a0f3 100644 --- a/examples/pyomobook/scripts-ch/warehouse_solver_options.py +++ b/examples/pyomobook/scripts-ch/warehouse_solver_options.py @@ -28,4 +28,5 @@ # @:script import os + os.remove('warehouse.log') diff --git a/examples/pyomobook/strip_examples.py b/examples/pyomobook/strip_examples.py index 7c6c95b01c3..0a65eef7c04 100644 --- a/examples/pyomobook/strip_examples.py +++ b/examples/pyomobook/strip_examples.py @@ -3,23 +3,24 @@ import os import os.path + def f(file): base, name = os.path.split(file) prefix = os.path.splitext(name)[0] if prefix.endswith('_strip'): return - with open(base+'/'+prefix+'_strip.py','w') as OUTPUT, \ - open(file,'r') as INPUT: + with open(base + '/' + prefix + '_strip.py', 'w') as OUTPUT, open( + file, 'r' + ) as INPUT: for line in INPUT: if line[0] == '#' and '@' in line: continue OUTPUT.write(line) -for file in glob.glob(os.path.abspath(os.path.dirname(__file__))+'/*/*.py'): +for file in glob.glob(os.path.abspath(os.path.dirname(__file__)) + '/*/*.py'): f(file) -for file in glob.glob(os.path.abspath(os.path.dirname(__file__))+'/*/*/*.py'): +for file in glob.glob(os.path.abspath(os.path.dirname(__file__)) + '/*/*/*.py'): f(file) - diff --git a/examples/pyomobook/test_book_examples.py b/examples/pyomobook/test_book_examples.py index 3d599ffe080..1aa2a3ed6b3 100644 --- a/examples/pyomobook/test_book_examples.py +++ b/examples/pyomobook/test_book_examples.py @@ -25,6 +25,7 @@ from pyomo.common.tee import capture_output import pyomo.environ as pyo + def gurobi_fully_licensed(): m = pyo.ConcreteModel() m.x = pyo.Var(list(range(2001)), within=pyo.NonNegativeReals) @@ -36,15 +37,21 @@ def gurobi_fully_licensed(): except: return False + parameterized, param_available = attempt_import('parameterized') if not param_available: raise unittest.SkipTest('Parameterized is not available.') +# Needed for testing (switches the matplotlib backend): +from pyomo.common.dependencies import matplotlib_available + +bool(matplotlib_available) + # Find all *.txt files, and use them to define baseline tests currdir = this_file_dir() datadir = currdir -solver_dependencies = { +solver_dependencies = { # abstract_ch 'test_abstract_ch_wl_abstract_script': ['glpk'], 'test_abstract_ch_pyomo_wl_abstract': ['glpk'], @@ -63,26 +70,21 @@ def gurobi_fully_licensed(): 'test_abstract_ch_pyomo_AbstractH': ['ipopt'], 'test_abstract_ch_AbstHLinScript': ['glpk'], 'test_abstract_ch_pyomo_AbstractHLinear': ['glpk'], - # blocks_ch 'test_blocks_ch_lotsizing': ['glpk'], 'test_blocks_ch_blocks_lotsizing': ['glpk'], - # dae_ch 'test_dae_ch_run_path_constraint_tester': ['ipopt'], - # gdp_ch 'test_gdp_ch_pyomo_gdp_uc': ['glpk'], 'test_gdp_ch_pyomo_scont': ['glpk'], 'test_gdp_ch_pyomo_scont2': ['glpk'], 'test_gdp_ch_scont_script': ['glpk'], - # intro_ch' 'test_intro_ch_pyomo_concrete1_generic': ['glpk'], 'test_intro_ch_pyomo_concrete1': ['glpk'], 'test_intro_ch_pyomo_coloring_concrete': ['glpk'], 'test_intro_ch_pyomo_abstract5': ['glpk'], - # mpec_ch 'test_mpec_ch_path1': ['path'], 'test_mpec_ch_nlp_ex1b': ['ipopt'], @@ -94,7 +96,6 @@ def gurobi_fully_licensed(): 'test_mpec_ch_nlp2': ['ipopt'], 'test_mpec_ch_nlp3': ['ipopt'], 'test_mpec_ch_mip1': ['glpk'], - # nonlinear_ch 'test_rosen_rosenbrock': ['ipopt'], 'test_react_design_ReactorDesign': ['ipopt'], @@ -103,7 +104,6 @@ def gurobi_fully_licensed(): 'test_multimodal_multimodal_init2': ['ipopt'], 'test_disease_est_disease_estimation': ['ipopt'], 'test_deer_DeerProblem': ['ipopt'], - # scripts_ch 'test_sudoku_sudoku_run': ['glpk'], 'test_scripts_ch_warehouse_script': ['glpk'], @@ -111,43 +111,35 @@ def gurobi_fully_licensed(): 'test_scripts_ch_warehouse_cuts': ['glpk'], 'test_scripts_ch_prob_mod_ex': ['glpk'], 'test_scripts_ch_attributes': ['glpk'], - # optimization_ch 'test_optimization_ch_ConcHLinScript': ['glpk'], - # overview_ch 'test_overview_ch_wl_mutable_excel': ['glpk'], 'test_overview_ch_wl_excel': ['glpk'], 'test_overview_ch_wl_concrete_script': ['glpk'], 'test_overview_ch_wl_abstract_script': ['glpk'], 'test_overview_ch_pyomo_wl_abstract': ['glpk'], - # performance_ch 'test_performance_ch_wl': ['gurobi', 'gurobi_persistent', 'gurobi_license'], 'test_performance_ch_persistent': ['gurobi_persistent'], } -package_dependencies = { +package_dependencies = { # abstract_ch' 'test_abstract_ch_pyomo_solve4': ['yaml'], 'test_abstract_ch_pyomo_solve5': ['yaml'], - # gdp_ch 'test_gdp_ch_pyomo_scont': ['yaml'], 'test_gdp_ch_pyomo_scont2': ['yaml'], 'test_gdp_ch_pyomo_gdp_uc': ['sympy'], - # overview_ch' 'test_overview_ch_wl_excel': ['pandas', 'xlrd'], 'test_overview_ch_wl_mutable_excel': ['pandas', 'xlrd'], - # scripts_ch' 'test_scripts_ch_warehouse_cuts': ['matplotlib'], - # performance_ch' - 'test_performance_ch_wl': ['numpy','matplotlib'], + 'test_performance_ch_wl': ['numpy', 'matplotlib'], } - # # Initialize the availability data # @@ -155,7 +147,7 @@ def gurobi_fully_licensed(): available_solvers = check_available_solvers(*solvers_used) if gurobi_fully_licensed(): available_solvers.append('gurobi_license') -solver_available = {solver_:(solver_ in available_solvers) for solver_ in solvers_used} +solver_available = {solver_: (solver_ in available_solvers) for solver_ in solvers_used} package_available = {} package_modules = {} @@ -182,7 +174,8 @@ def check_skip(name): return "Solver%s %s %s not available" % ( 's' if len(_missing) > 1 else '', ", ".join(_missing), - 'are' if len(_missing) > 1 else 'is',) + 'are' if len(_missing) > 1 else 'is', + ) if name in package_dependencies: packages_ = package_dependencies[name] @@ -195,53 +188,63 @@ def check_skip(name): return "Package%s %s %s not available" % ( 's' if len(_missing) > 1 else '', ", ".join(_missing), - 'are' if len(_missing) > 1 else 'is',) + 'are' if len(_missing) > 1 else 'is', + ) # This is a hack, xlrd dropped support for .xlsx files in 2.0.1 which # causes problems with older versions of Pandas<=1.1.5 so skipping # tests requiring both these packages when incompatible versions are found - if 'pandas' in package_dependencies[name] and 'xlrd' in package_dependencies[name]: - if check_min_version(package_modules['xlrd'], '2.0.1') and \ - not check_min_version(package_modules['pandas'], '1.1.6'): + if ( + 'pandas' in package_dependencies[name] + and 'xlrd' in package_dependencies[name] + ): + if check_min_version( + package_modules['xlrd'], '2.0.1' + ) and not check_min_version(package_modules['pandas'], '1.1.6'): return "Incompatible versions of xlrd and pandas" return False + def filter(line): """ Ignore certain text when comparing output with baseline """ - for field in ( '[', - 'password:', - 'http:', - 'Job ', - 'Importing module', - 'Function', - 'File', - 'Matplotlib', - '-------', - '=======', - ' ^'): + for field in ( + '[', + 'password:', + 'http:', + 'Job ', + 'Importing module', + 'Function', + 'File', + 'Matplotlib', + '-------', + '=======', + ' ^', + ): if line.startswith(field): return True - for field in ( 'Total CPU', - 'Ipopt', - 'license', - 'Status: optimal', - 'Status: feasible', - 'time:', - 'Time:', - 'with format cpxlp', - 'usermodel = >sys.stderr, err diff --git a/pyomo/checker/doc/recompile.rb b/pyomo/checker/doc/recompile.rb deleted file mode 100644 index 236a1e878d5..00000000000 --- a/pyomo/checker/doc/recompile.rb +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env ruby - -require 'rb-inotify' - -notifier = INotify::Notifier.new - -notifier.watch(".", :modify) do |event| - if event.name.end_with? ".tex" - puts "#{`date`.chomp}: Modified #{event.name}; recompiling..." - `pdflatex #{event.name} > /dev/null 2>&1` - end -end - -notifier.run diff --git a/pyomo/checker/hooks.py b/pyomo/checker/hooks.py deleted file mode 100644 index 4f287185a64..00000000000 --- a/pyomo/checker/hooks.py +++ /dev/null @@ -1,25 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -__all__ = ['IPreCheckHook', 'IPostCheckHook'] - -from pyomo.common.plugin import Interface - -class IPreCheckHook(Interface): - - def precheck(self, runner, script, info): - pass - - -class IPostCheckHook(Interface): - - def postcheck(self, runner, script, info): - pass diff --git a/pyomo/checker/plugins/__init__.py b/pyomo/checker/plugins/__init__.py deleted file mode 100644 index e2375cf8a85..00000000000 --- a/pyomo/checker/plugins/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -def load(): - import pyomo.checker.plugins.checker - import pyomo.checker.plugins.function - import pyomo.checker.plugins.model - import pyomo.checker.plugins.checkers diff --git a/pyomo/checker/plugins/checker.py b/pyomo/checker/plugins/checker.py deleted file mode 100644 index 9df7e1d8bda..00000000000 --- a/pyomo/checker/plugins/checker.py +++ /dev/null @@ -1,139 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import sys -import re -import textwrap - -from pyomo.common.plugin import SingletonPlugin, implements, ExtensionPoint -from pyomo.checker import IModelChecker, IPreCheckHook, IPostCheckHook - - -class PyomoModelChecker(SingletonPlugin): - - implements(IModelChecker, inherit=True) - - _prehooks = ExtensionPoint(IPreCheckHook) - _posthooks = ExtensionPoint(IPostCheckHook) - - def __init__(self): - self._currentRunner = None - self._currentScript = None - - def _check(self, runner, script, info): - self._runner = runner - self._script = script - - for prehook in self._prehooks: - prehook.precheck(runner, script, info) - - try: - self.check(runner, script, info) - except Exception: - e = sys.exc_info()[1] - print(self.checkerLabel() + "ERROR during check call!") - raise e - - for posthook in self._posthooks: - posthook.postcheck(runner, script, info) - - self._runner = None - self._script = None - - def check(self, runner, script, info): - # Should be `pass` - checkers are not guaranteed to call - # superclass when running their own check() methods - pass - - def _beginChecking(self, runner, script): - self._currentRunner = runner - self._currentScript = script - - try: - self.beginChecking(runner, script) - except Exception: - print(self.checkerLabel() + "ERROR during pre-check call!") - - def beginChecking(self, runner, script): - pass - - def _endChecking(self, runner, script): - try: - self.endChecking(runner, script) - except Exception: - print(self.checkerLabel() + "ERROR during pre-check call!") - - self._currentRunner = None - self._currentScript = None - - def endChecking(self, runner, script): - pass - - def _checkerName(self): - match = re.search(r"", str(self.__class__)) - return match.group(1).split(".")[-1] - - def _checkerPackage(self): - match = re.search(r"", str(self.__class__)) - return match.group(1).split(".")[-3] - - def checkerLabel(self): - return "[" + self._checkerPackage() + "::" + self._checkerName() + "] " - - def checkerDoc(self): - return "" - - def problem(self, message = "Error", runner = None, script = None, lineno = None): - if script is None: - script = self._currentScript - if runner is None: - runner = self._currentRunner - - output = self.checkerLabel() - - if script is not None: - output += script.filename() + ":" - if lineno is not None: - output += str(lineno) + ":" - else: - output += ":" - - output += " " + message - - print(output) - - try: - if runner.verbose: - if len(self.checkerDoc()) > 0: - lines = textwrap.dedent(self.checkerDoc()).split("\n") - lines = filter((lambda x : len(x) > 0), lines) - for line in lines: - print(self.checkerLabel() + line) - print - except Exception: - print(self.checkerLabel() + "ERROR during verbose info generation") - print - - -class ImmediateDataChecker(PyomoModelChecker): - pass - - -class IterativeDataChecker(PyomoModelChecker): - pass - - -class ImmediateTreeChecker(PyomoModelChecker): - pass - - -class IterativeTreeChecker(PyomoModelChecker): - pass diff --git a/pyomo/checker/plugins/checkers/__init__.py b/pyomo/checker/plugins/checkers/__init__.py deleted file mode 100644 index be77f8b8416..00000000000 --- a/pyomo/checker/plugins/checkers/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import pyomo.checker.plugins.checkers.sample -import pyomo.checker.plugins.checkers.py3k -import pyomo.checker.plugins.checkers.model diff --git a/pyomo/checker/plugins/checkers/model/__init__.py b/pyomo/checker/plugins/checkers/model/__init__.py deleted file mode 100644 index 718ca644784..00000000000 --- a/pyomo/checker/plugins/checkers/model/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import pyomo.checker.plugins.checkers.model.model -import pyomo.checker.plugins.checkers.model.imports -import pyomo.checker.plugins.checkers.model.rule -import pyomo.checker.plugins.checkers.model.conditional -import pyomo.checker.plugins.checkers.model.assignment diff --git a/pyomo/checker/plugins/checkers/model/_rulebase.py b/pyomo/checker/plugins/checkers/model/_rulebase.py deleted file mode 100644 index dd681e038c2..00000000000 --- a/pyomo/checker/plugins/checkers/model/_rulebase.py +++ /dev/null @@ -1,32 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast - -from pyomo.checker.plugins.checker import IterativeTreeChecker -from pyomo.checker.plugins.function import FunctionTrackerHook - - -class _ModelRuleChecker(IterativeTreeChecker, FunctionTrackerHook): - - def check(self, runner, script, info): - if isinstance(info, ast.Call): - if hasattr(info.func,'id') and info.func.id in ['Objective', 'Constraint', 'Var', 'Param', 'Set']: - for keyword in info.keywords: - if keyword.arg == 'rule': - if isinstance(keyword.value, ast.Name): - funcname = keyword.value.id - if funcname in script.functionDefs: - funcdef = script.functionDefs[funcname] - self.checkBody(funcdef) - - def checkBody(self, funcdef): - pass diff --git a/pyomo/checker/plugins/checkers/model/assignment.py b/pyomo/checker/plugins/checkers/model/assignment.py deleted file mode 100644 index 29f8ce5bf75..00000000000 --- a/pyomo/checker/plugins/checkers/model/assignment.py +++ /dev/null @@ -1,59 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast -import pyomo.common.plugin - -from pyomo.checker.plugins.checker import IterativeTreeChecker -from pyomo.checker.plugins.model import ModelTrackerHook - - -class ArrayValue(IterativeTreeChecker, ModelTrackerHook): - - pyomo.common.plugin.alias('model.array_value', 'Check if assigning a value to an array of variables') - - varArrays = {} - - def checkerDoc(self): - return """\ - Assigning a value to an array of variables does nothing. - """ - - def checkVarArray(self, script, node): - """Check for the creation of a new VarArray; store name if created""" - - if isinstance(node.value, ast.Call): - if isinstance(node.value.func, ast.Name): - if node.value.func.id == 'Var': - if len(node.value.args) > 0: - for target in node.targets: - if isinstance(target, ast.Attribute): - if isinstance(target.value, ast.Name): - if target.value.id in script.modelVars: - if target.value.id not in self.varArrays: - self.varArrays[target.value.id] = [] - self.varArrays[target.value.id].append(target.attr) - - def checkArrayValue(self, script, node): - for target in node.targets: - if isinstance(target, ast.Attribute): - if isinstance(target.value, ast.Attribute): - if isinstance(target.value.value, ast.Name): - if target.value.value.id in script.modelVars: - if target.value.value.id in self.varArrays: - if target.value.attr in self.varArrays[target.value.value.id]: - if target.attr == 'value': - self.problem("Assigning value to variable array {0}.{1}".format(target.value.value.id, target.value.attr), lineno = node.lineno) - - def check(self, runner, script, info): - if isinstance(info, ast.Assign): - self.checkVarArray(script, info) - self.checkArrayValue(script, info) diff --git a/pyomo/checker/plugins/checkers/model/conditional.py b/pyomo/checker/plugins/checkers/model/conditional.py deleted file mode 100644 index ff5db5a36f2..00000000000 --- a/pyomo/checker/plugins/checkers/model/conditional.py +++ /dev/null @@ -1,95 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast -import pyomo.common.plugin - -from pyomo.checker.plugins.model import ModelTrackerHook -from pyomo.checker.plugins.checkers.model._rulebase import _ModelRuleChecker - - -class ModelValue(_ModelRuleChecker, ModelTrackerHook): - - pyomo.common.plugin.alias('model.value', 'Check if comparisons are done using the "value()" function.') - - def checkerDoc(self): - return """\ - Comparisons done on model objects should generally be wrapped in - a call to value(). The comparison alone will not produce a True/False - result, but instead generate an expression for later use in a model. - """ - - def check(self, runner, script, info): - # call superclass to execute checkBody() as necessary - _ModelRuleChecker.check(self, runner, script, info) - - # also check global If statements - if isinstance(info, ast.If): - self.checkCompare(info.test, script = script) - - def checkBody(self, funcdef): - """Check the body of a function definition for model comparisons local - to its scope (i.e. using its model argument).""" - - if not isinstance(funcdef.args.args[0], ast.Name): - return - modelArg = funcdef.args.args[0].id - - for bodyNode in funcdef.body: - for node in ast.walk(bodyNode): - if isinstance(node, ast.If): - self.checkCompare(node.test, modelName = modelArg) - - def checkCompare(self, compare, modelName = None, script = None): - """Check an AST Compare node - iterate for Attribute nodes and match - against modelName argument. Recurse for script's model defs.""" - - if modelName is None and script is None: - return - - if modelName is not None: - valueCallArgs = [] - generatorExps = [] - for node in ast.walk(compare): - if isinstance(node, ast.Attribute): - if isinstance(node.value, ast.Name): - if node.value.id == modelName: - wrapped = self.checkWrapped(node, valueCallArgs, generatorExps) - if not wrapped: - self.problem("Comparison on attribute {0}.{1} not wrapped in value()".format(modelName, node.attr), lineno=compare.lineno) - elif isinstance(node, ast.Call): - if isinstance(node.func, ast.Name): - if node.func.id == 'value': - valueCallArgs.append(node.args) - elif isinstance(node, ast.GeneratorExp): - generatorExps.append(node) - - if script is not None: - for name in script.modelVars: - self.checkCompare(compare, modelName = name) - - def checkWrapped(self, attrNode, valueCallArgs, generatorExps): - """check if the given attribute node has been 'wrapped', either - in a value() call or as part of the iterator in a generator - expression""" - for i in range(len(valueCallArgs)): - for j in range(len(valueCallArgs[i])): - # i = call idx (to return), j = arg idx - argNode = valueCallArgs[i][j] - for subnode in ast.walk(argNode): - if subnode is attrNode: - return True - for genExp in generatorExps: - for generator in genExp.generators: - for subnode in ast.walk(generator.iter): - if subnode is attrNode: - return True - return False diff --git a/pyomo/checker/plugins/checkers/model/imports.py b/pyomo/checker/plugins/checkers/model/imports.py deleted file mode 100644 index b1bd2c68cf3..00000000000 --- a/pyomo/checker/plugins/checkers/model/imports.py +++ /dev/null @@ -1,54 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast -import pyomo.common.plugin - -from pyomo.checker.plugins.checker import IterativeTreeChecker - - -class Imports(IterativeTreeChecker): - """ - Check that an import for the pyomo.core or pyomo.environ packages - exists somewhere within the initial imports block - """ - - pyomo.common.plugin.alias('model.imports', 'Check if pyomo.core or pyomo.environ has been imported.') - - def beginChecking(self, runner, script): - self.pyomoImported = False - - def endChecking(self, runner, script): - if not self.pyomoImported: - self.problem("The model script never imports pyomo.core or pyomo.environ.") - - def checkerDoc(self): - return """\ - You may have trouble creating model components. - Consider adding the following statement at the - top of your model file: - from pyomo.environ import * - """ - - def check(self, runner, script, info): - if isinstance(info, ast.Import): - for name in info.names: - if isinstance(name, ast.alias): - if name.name == 'pyomo.core': - self.pyomoImported = True - elif name.name == 'pyomo.environ': - self.pyomoImported = True - - if isinstance(info, ast.ImportFrom): - if info.module == 'pyomo.core': - self.pyomoImported = True - elif info.module == 'pyomo.environ': - self.pyomoImported = True diff --git a/pyomo/checker/plugins/checkers/model/model.py b/pyomo/checker/plugins/checkers/model/model.py deleted file mode 100644 index 0637e51fefd..00000000000 --- a/pyomo/checker/plugins/checkers/model/model.py +++ /dev/null @@ -1,95 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast -import pyomo.common.plugin - -from pyomo.checker.plugins.checker import IterativeTreeChecker - - -class ModelName(IterativeTreeChecker): - - pyomo.common.plugin.alias('model.model_name', 'Check that the "model" variable is assigned with a Pyomo model.') - - def beginChecking(self, runner, script): - self.modelAssigned = False - - def endChecking(self, runner, script): - if not self.modelAssigned: - self.problem("Global object 'model' is never assigned.") - - def checkerDoc(self): - return """\ - Pyomo will be unable to execute this model - file without additional arguments. - """ - - def checkTarget(self, node): - if isinstance(node, ast.Name) and node.id == 'model': - self.modelAssigned = True - - def check(self, runner, script, info): - # If assigning, check target name - if isinstance(info, ast.Assign): - for target in info.targets: - self.checkTarget(target) - - # Handle multiple assignment - if isinstance(target, ast.Tuple): - for elt in target.elts: - self.checkTarget(elt) - - -class ModelCreate(IterativeTreeChecker): - - pyomo.common.plugin.alias('model.create', 'Check if a Pyomo model class is being assigned to a variable.') - - def getTargetStrings(self, assign): - ls = [] - for target in assign.targets: - if isinstance(target, ast.Name): - ls.append(target.id) - elif isinstance(target, ast.Tuple): - ls.extend(list(map((lambda x: x.id), target.elts))) # TODO probably not resilient - return ls - - def checkerDoc(self): - return """\ - Usually, developers create an instance of a Pyomo model class. - It is rare that the class itself needs to be assigned to a - variable, e.g.: - x = ConcreteModel => x = ConcreteModel() - """ - - def check(self, runner, script, info): - if isinstance(info, ast.Assign): - if 'model' in self.getTargetStrings(info): - if isinstance(info.value, ast.Name): - if info.value.id in ['Model', 'AbstractModel', 'ConcreteModel']: - self.problem("Possible incorrect assignment of " + info.value.id + " class instead of instance", lineno = info.lineno) - - -class DeprecatedModel(IterativeTreeChecker): - - pyomo.common.plugin.alias('model.Model_class', 'Check if the deprecated Model class is being used.') - - def checkerDoc(self): - return """\ - The Model class is no longer supported as an object that can be - created - instead, use the ConcreteModel or AbstractModel class. - """ - - def check(self, runner, script, info): - if isinstance(info, ast.Assign): - if isinstance(info.value, ast.Call): - if isinstance(info.value.func, ast.Name): - if info.value.func.id == 'Model': - self.problem("Deprecated use of Model class", lineno = info.value.func.lineno) diff --git a/pyomo/checker/plugins/checkers/model/rule.py b/pyomo/checker/plugins/checkers/model/rule.py deleted file mode 100644 index 60e8eb8b681..00000000000 --- a/pyomo/checker/plugins/checkers/model/rule.py +++ /dev/null @@ -1,112 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import sys -import ast -import pyomo.common.plugin - -from pyomo.checker.plugins.model import ModelTrackerHook -from pyomo.checker.plugins.checker import IterativeTreeChecker -from pyomo.checker.plugins.checkers.model._rulebase import _ModelRuleChecker - - -if sys.version_info < (3,0): - def arg_name(arg): - return arg.id -else: - def arg_name(arg): - return arg.arg - - -if False: - # WEH: I don't think we should complain about this. - - class ModelShadowing(IterativeTreeChecker, ModelTrackerHook): - - pyomo.common.plugin.alias('model.rule.shadowing', 'Ignoring for now') - - def checkerDoc(self): - return """\ - Reusing the name of your model variable in a rule may lead to problems where - the variable shadows the global value. In your rule definitions, - consider changing the name of the model argument. - """ - - def check(self, runner, script, info): - if isinstance(info, ast.FunctionDef): - for arg in info.args.args: - if isinstance(arg, ast.Name): - if arg.id in script.modelVars: - self.problem("Function {0} may shadow model variable {1}".format(info.name, arg.id), lineno=info.lineno) - - -class ModelAccess(IterativeTreeChecker, ModelTrackerHook): - - pyomo.common.plugin.alias('model.rule.model_access', 'Check that a rule does not reference a global model instance.') - - def checkerDoc(self): - return """\ - Within model rules, you should access the instance of the model that - is passed in to the function, rather than the global model instance. - For example: - def rule(m, i): - return m.x[i] >= 10.0 # not model.x[i] - """ - - def check(self, runner, script, info): - if isinstance(info, ast.FunctionDef): - attrNodes = [x for x in list(ast.walk(info)) if isinstance(x, ast.Attribute)] - for attrNode in attrNodes: - if attrNode.value.id in script.modelVars: - args = getattr(script, 'functionArgs', []) - if len(args) > 0 and not attrNode.value.id in list(arg_name(arg) for arg in args[-1].args): - # NOTE: this probably will not catch arguments defined as keyword arguments. - self.problem("Expression '{0}.{1}' may access a model variable that is outside of the function scope".format(attrNode.value.id, attrNode.attr), lineno=attrNode.lineno) - - -class ModelArgument(_ModelRuleChecker): - - pyomo.common.plugin.alias('model.rule.model_argument', 'Check that the model instance is the first argument for a rule.') - - def checkerDoc(self): - return """\ - Model rule functions must have the model as the first argument in - the function definition. For example, change: - def con_rule(i, model): # ... - To: - def con_rule(model, i): # ... - """ - - def checkBody(self, funcdef): - for bodyNode in funcdef.body: - for node in ast.walk(bodyNode): - if isinstance(node, ast.Attribute): - if isinstance(node.value, ast.Name): - if node.value.id != arg_name(funcdef.args.args[0]): - self.problem("Model variable '{0}' is used in the rule, but this variable is not first argument in the rule argument list".format(node.value.id), lineno=funcdef.lineno) - - -class NoneReturn(_ModelRuleChecker): - - pyomo.common.plugin.alias('model.rule.none_return', 'Check that a rule does not return the value None.') - - def checkerDoc(self): - return """\ - Model rule functions may not return None. - """ - - def checkBody(self, funcdef): - """Look for statements of the format 'return None'""" - for node in ast.walk(funcdef): - if isinstance(node, ast.Return): - if isinstance(node.value, ast.Name): - if node.value.id == 'None': - self.problem("Cannot return None from model rule {0}".format(funcdef.name), lineno=funcdef.lineno) diff --git a/pyomo/checker/plugins/checkers/py3k/printing.py b/pyomo/checker/plugins/checkers/py3k/printing.py deleted file mode 100644 index 6e3a86c84de..00000000000 --- a/pyomo/checker/plugins/checkers/py3k/printing.py +++ /dev/null @@ -1,36 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import re -import pyomo.common.plugin -from pyomo.checker.plugins.checker import IterativeDataChecker - - -class PrintParens(IterativeDataChecker): - - pyomo.common.plugin.alias('py3k.print_parens', 'Check if print statements have parentheses.') - - def __init__(self): - self.current_lineno = 0 - - def check(self, runner, script, info): - self.current_lineno = info[0] - line = info[1] - if re.search(r"print[^\(]", line) is not None: - self.problem("Print statements in Python 3.x require parentheses", lineno = info[0]) - - def checkerDoc(self): - return """\ - In Python 3, 'print' changed from a language keyword to a function. - As such, developers need to surround the arguments to 'print' with - parentheses, e.g.: - print "Hello" => print("Hello") - """ diff --git a/pyomo/checker/plugins/checkers/py3k/range.py b/pyomo/checker/plugins/checkers/py3k/range.py deleted file mode 100644 index a2c48444322..00000000000 --- a/pyomo/checker/plugins/checkers/py3k/range.py +++ /dev/null @@ -1,32 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast -import pyomo.common.plugin -from pyomo.checker.plugins.checker import IterativeTreeChecker - - -class XRange(IterativeTreeChecker): - - pyomo.common.plugin.alias('py3k.xrange', 'Check if the xrange() function is used.') - - def check(self, runner, script, info): - if isinstance(info, ast.Name): - if info.id == 'xrange': - self.problem("'xrange' function was removed in Python 3.") - - def checkerDoc(self): - return """\ - In Python 3, 'xrange' was removed in favor of 'range', which was - reimplemented more efficiently. Please change your uses of 'xrange' - into 'range', e.g.: - xrange(1,10) => range(1,10) - """ diff --git a/pyomo/checker/plugins/checkers/sample/__init__.py b/pyomo/checker/plugins/checkers/sample/__init__.py deleted file mode 100644 index eed73f43029..00000000000 --- a/pyomo/checker/plugins/checkers/sample/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import pyomo.checker.plugins.checkers.sample.printing diff --git a/pyomo/checker/plugins/function.py b/pyomo/checker/plugins/function.py deleted file mode 100644 index 0099276af6f..00000000000 --- a/pyomo/checker/plugins/function.py +++ /dev/null @@ -1,53 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast - -from pyomo.common.plugin import SingletonPlugin, implements -from pyomo.checker.hooks import IPreCheckHook, IPostCheckHook - - -class FunctionTrackerHook(SingletonPlugin): - - implements(IPreCheckHook) - implements(IPostCheckHook) - - def precheck(self, runner, script, info): - # create models dict if nonexistent - if getattr(script, 'functionDefs', None) is None: - script.functionDefs = {} - # create function argument stack if nonexistent - if getattr(script, 'functionArgs', None) is None: - script.functionArgs = [] - - # add new function definitions - if isinstance(info, ast.FunctionDef): - script.functionDefs[info.name] = info - script.functionArgs.append(info.args) - - # update function def dictionary with assignments - elif isinstance(info, ast.Assign): - if isinstance(info.value, ast.Name): - if info.value.id in script.functionDefs: - for target in info.targets: - if isinstance(target, ast.Name): - script.functionDefs[target.id] = script.functionDefs[info.value.id] - else: - for target in info.targets: - if isinstance(target, ast.Name): - if target.id in script.functionDefs: - del script.functionDefs[target.id] - - def postcheck(self, runner, script, info): - """Remove function args from the stack""" - if isinstance(info, ast.FunctionDef): - script.functionArgs.pop() - diff --git a/pyomo/checker/plugins/model.py b/pyomo/checker/plugins/model.py deleted file mode 100644 index 48421b085e9..00000000000 --- a/pyomo/checker/plugins/model.py +++ /dev/null @@ -1,48 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast - -from pyomo.common.plugin import SingletonPlugin, implements -from pyomo.checker.hooks import IPreCheckHook - - -class ModelTrackerHook(SingletonPlugin): - - implements(IPreCheckHook) - - def precheck(self, runner, script, info): - # create models dict if nonexistent - if getattr(script, 'modelVars', None) is None: - script.modelVars = {} - - # parse AST node - if isinstance(info, ast.Assign): - if isinstance(info.value, ast.Call): - if isinstance(info.value.func, ast.Name): - if info.value.func.id.endswith("Model"): - for target in info.targets: - if isinstance(target, ast.Name): - script.modelVars[target.id] = info.value.func.id - elif isinstance(target, ast.Tuple): - for elt in target.elts: - if isinstance(elt, ast.Name): - script.modelVars[elt.id] = info.value.func.id - else: - for target in info.targets: - if isinstance(target, ast.Name): - if target.id in script.modelVars: - del script.modelVars[target.id] - elif isinstance(target, ast.Tuple): - for elt in target.elts: - if isinstance(elt, ast.Name): - if elt.id in script.modelVars: - del script.modelVars[elt.id] diff --git a/pyomo/checker/runner.py b/pyomo/checker/runner.py deleted file mode 100644 index ba49cf44463..00000000000 --- a/pyomo/checker/runner.py +++ /dev/null @@ -1,134 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import ast - -from pyomo.common.plugin import ExtensionPoint -from pyomo.checker.checker import IModelChecker -from pyomo.checker.script import ModelScript - - -class CheckingNodeVisitor(ast.NodeVisitor): - - def __init__(self, runner, script, tc = [], dc = [], pt = ""): - """ - @param tc iterative tree checkers - @param dc iterative data checkers - @param pt program text - """ - - super(CheckingNodeVisitor, self).__init__() - - self.runner = runner - self.script = script - self.treeCheckers = tc - self.dataCheckers = dc - self.programLines = pt.split("\n") - self.running_lineno = 0 - - def sendBegin(self): - for checker in self.treeCheckers + self.dataCheckers: - checker._beginChecking(self.runner, self.script) - - def sendEnd(self): - for checker in self.treeCheckers + self.dataCheckers: - checker._endChecking(self.runner, self.script) - - def generic_visit(self, node): - if 'lineno' in dir(node): - current_lineno = node.lineno - if current_lineno > self.running_lineno: - self.running_lineno = current_lineno - for checker in self.dataCheckers: - checker._check(self.runner, self.script, (current_lineno, self.programLines[current_lineno - 1])) - - for checker in self.treeCheckers: - checker._check(self.runner, self.script, node) - - super(CheckingNodeVisitor, self).generic_visit(node) - - -class ModelCheckRunner(object): - - _checkers = ExtensionPoint(IModelChecker) - - def __init__(self): - self.scripts = [] - - def run(self, *args, **kwargs): - from pyomo.checker.plugins.checker import ImmediateDataChecker, IterativeDataChecker, ImmediateTreeChecker, IterativeTreeChecker - - # Get args - script = kwargs.pop("script", None) - verbose = kwargs.pop("verbose", False) - checkers = kwargs.pop("checkers", {}) - - # Store args as necessary - self.verbose = verbose - - # Add script, if given - if script is not None: - self.addScript(ModelScript(script)) - - # Enable listed checkers - if checkers == {}: - print("WARNING: No checkers enabled!") - for c in self._checkers(all=True): - if c._checkerPackage() in checkers: - if c._checkerName() in checkers[c._checkerPackage()]: - c.enable() - else: - c.disable() - else: - c.disable() - - # Show checkers if requested - if False: - printable = {} - for c in self._checkers(): - if c._checkerPackage() not in printable: - printable[c._checkerPackage()] = [c._checkerName()] - else: - printable[c._checkerPackage()].append(c._checkerName()) - - for package in printable: - print("{0}: {1}".format(package, " ".join(printable[package]))) - print("") - - # Pre-partition checkers - immDataCheckers = [c for c in self._checkers if isinstance(c, ImmediateDataChecker)] - iterDataCheckers = [c for c in self._checkers if isinstance(c, IterativeDataChecker)] - immTreeCheckers = [c for c in self._checkers if isinstance(c, ImmediateTreeChecker)] - iterTreeCheckers = [c for c in self._checkers if isinstance(c, IterativeTreeChecker)] - - for script in self.scripts: - # Read in the script and call data checkers - data = script.read() - for checker in immDataCheckers: - checker._beginChecking(self, script) - checker._check(self, script, data) - checker._endChecking(self, script) - - # Get the data into a parse tree - tree = ast.parse(data) - for checker in immTreeCheckers: - checker._beginChecking(self, script) - checker._check(self, script, tree) - checker._endChecking(self, script) - - # Start walking the tree, calling checkers along the way - visitor = CheckingNodeVisitor(self, script, tc=iterTreeCheckers, dc=iterDataCheckers, pt = data) - visitor.sendBegin() - visitor.visit(tree) - visitor.sendEnd() - - def addScript(self, script): - self.scripts.append(script) diff --git a/pyomo/checker/script.py b/pyomo/checker/script.py deleted file mode 100644 index 7d53fabbd40..00000000000 --- a/pyomo/checker/script.py +++ /dev/null @@ -1,34 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -class ModelScript(object): - - def __init__(self, filename=None, text=None): - if filename is not None: - self._filename = filename - elif text is not None: - self._filename = None - self._text = text - else: - raise ValueError("Must provide either a script file or text data") - - def read(self): - if self._filename is not None: - with open(self._filename, 'r') as f: - return f.read() - else: - return self._text - - def filename(self): - if self._filename is not None: - return self._filename - else: - return "" diff --git a/pyomo/checker/tests/examples.yml b/pyomo/checker/tests/examples.yml deleted file mode 100644 index f00e56477e4..00000000000 --- a/pyomo/checker/tests/examples.yml +++ /dev/null @@ -1,72 +0,0 @@ -py3k: - PrintParens: - 1: - problems: 1 - 2: - problems: 2 - XRange: - 1: - problems: 1 -model: - Imports: - missing: - problems: 1 - wrong: - problems: 1 - ModelName: - missing: - problems: 1 - ModelCreate: - nocall: - problems: 1 - lines: [13] - DeprecatedModel: - wrong: - problems: 1 - lines: [11] - ModelAccess: - global: - problems: 1 - lines: [17] - global2: - problems: 2 - lines: [18, 20] - ModelArgument: - norule: - problems: 0 - firstarg: - problems: 0 - lastarg: - problems: 1 - lines: [17] - ModelValue: - globalif: - problems: 1 - lines: [16] - multiif: - problems: 2 - lines: [16, 22] - repeatif: - problems: 4 - lines: [16, 20, 23, 24] - ruleif: - problems: 1 - lines: [17] - globallistcomp: - problems: 1 - lines: [17] - rulelistcomp: - problems: 1 - lines: [18] - NoneReturn: - wrong: - problems: 1 - lines: [16] - ArrayValue: - nomodel: - problems: 0 - varonly: - problems: 0 - wrong: - problems: 2 - lines: [18, 19] diff --git a/pyomo/checker/tests/examples/model/ArrayValue_nomodel.py b/pyomo/checker/tests/examples/model/ArrayValue_nomodel.py deleted file mode 100644 index 57b99ec6cbe..00000000000 --- a/pyomo/checker/tests/examples/model/ArrayValue_nomodel.py +++ /dev/null @@ -1,18 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import Var - -class Foo: - pass -anotherObject = Foo() -anotherObject.x = Var([10]) -anotherObject.x.value = 42 diff --git a/pyomo/checker/tests/examples/model/ArrayValue_varonly.py b/pyomo/checker/tests/examples/model/ArrayValue_varonly.py deleted file mode 100644 index bdc5fd4dcd3..00000000000 --- a/pyomo/checker/tests/examples/model/ArrayValue_varonly.py +++ /dev/null @@ -1,19 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel, Var, NonNegativeReals - -model = AbstractModel() -model.w = Var(within=NonNegativeReals) -model.x = Var() - -model.w.value = 42 -model.x.value = 42 diff --git a/pyomo/checker/tests/examples/model/ArrayValue_wrong.py b/pyomo/checker/tests/examples/model/ArrayValue_wrong.py deleted file mode 100644 index 4a7ccd5abef..00000000000 --- a/pyomo/checker/tests/examples/model/ArrayValue_wrong.py +++ /dev/null @@ -1,20 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel, Var, RangeSet - -model = AbstractModel() -model.y = Var([10]) -model.s = RangeSet(10) -model.z = Var(model.s) - -model.y.value = 42 -model.z.value = 42 diff --git a/pyomo/checker/tests/examples/model/DeprecatedModel_wrong.py b/pyomo/checker/tests/examples/model/DeprecatedModel_wrong.py deleted file mode 100644 index c3f4645d918..00000000000 --- a/pyomo/checker/tests/examples/model/DeprecatedModel_wrong.py +++ /dev/null @@ -1,12 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -model = Model() diff --git a/pyomo/checker/tests/examples/model/Imports_wrong.py b/pyomo/checker/tests/examples/model/Imports_wrong.py deleted file mode 100644 index 9320e403e95..00000000000 --- a/pyomo/checker/tests/examples/model/Imports_wrong.py +++ /dev/null @@ -1,11 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - diff --git a/pyomo/checker/tests/examples/model/ModelAccess_global.py b/pyomo/checker/tests/examples/model/ModelAccess_global.py deleted file mode 100644 index ebe56a6168e..00000000000 --- a/pyomo/checker/tests/examples/model/ModelAccess_global.py +++ /dev/null @@ -1,19 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import ConcreteModel, Var, Constraint - -model = ConcreteModel() -model.X = Var() - -def c_rule(m): - return model.X >= 10.0 # wrongly access global 'model' -model.C = Constraint(rule=c_rule) diff --git a/pyomo/checker/tests/examples/model/ModelAccess_global2.py b/pyomo/checker/tests/examples/model/ModelAccess_global2.py deleted file mode 100644 index 5fd8778fb61..00000000000 --- a/pyomo/checker/tests/examples/model/ModelAccess_global2.py +++ /dev/null @@ -1,22 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import ConcreteModel, Var, Constraint - -model = ConcreteModel() -model.X = Var() - -def c_rule(m): - try: - return model.X >= 10.0 - except Exception: - return model.X >= 20.0 -model.C = Constraint(rule=c_rule) diff --git a/pyomo/checker/tests/examples/model/ModelArgument_lastarg.py b/pyomo/checker/tests/examples/model/ModelArgument_lastarg.py deleted file mode 100644 index 59fa605bc6f..00000000000 --- a/pyomo/checker/tests/examples/model/ModelArgument_lastarg.py +++ /dev/null @@ -1,20 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import ConcreteModel, RangeSet, Var, Constraint - -model = ConcreteModel() -model.S = RangeSet(10) -model.X = Var(model.S) - -def C_rule(i, m): - return m.X[i] >= 10.0 -model.C = Constraint(rule=C_rule) diff --git a/pyomo/checker/tests/examples/model/ModelCreate_nocall.py b/pyomo/checker/tests/examples/model/ModelCreate_nocall.py deleted file mode 100644 index 6f9bbd4b764..00000000000 --- a/pyomo/checker/tests/examples/model/ModelCreate_nocall.py +++ /dev/null @@ -1,14 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel - -model = AbstractModel diff --git a/pyomo/checker/tests/examples/model/ModelValue_globallistcomp.py b/pyomo/checker/tests/examples/model/ModelValue_globallistcomp.py deleted file mode 100644 index 59bcd7903a0..00000000000 --- a/pyomo/checker/tests/examples/model/ModelValue_globallistcomp.py +++ /dev/null @@ -1,21 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel, RangeSet, Var, value - -model = AbstractModel() -model.S = RangeSet(10) -model.X = Var(model.S) - -if sum(model.X[i] for i in model.S) <= 10.0: - pass -if sum(value(model.X[i]) for i in model.S) <= 10.0: - pass diff --git a/pyomo/checker/tests/examples/model/ModelValue_multiif.py b/pyomo/checker/tests/examples/model/ModelValue_multiif.py deleted file mode 100644 index ec79dd17374..00000000000 --- a/pyomo/checker/tests/examples/model/ModelValue_multiif.py +++ /dev/null @@ -1,29 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel, Var, Constraint, value - -model = AbstractModel() -model.X = Var() - -if model.X >= 10.0: - pass -if value(model.X) >= 10.0: - pass - -def c_rule(m): - if m.X >= 10.0: - pass - if value(m.X) >= 10.0: - pass - return m.X >= 10.0 - -model.C = Constraint(rule=c_rule) diff --git a/pyomo/checker/tests/examples/model/ModelValue_ruleif.py b/pyomo/checker/tests/examples/model/ModelValue_ruleif.py deleted file mode 100644 index 0164f0265c3..00000000000 --- a/pyomo/checker/tests/examples/model/ModelValue_ruleif.py +++ /dev/null @@ -1,24 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel, Var, Constraint, value - -model = AbstractModel() -model.X = Var() - -def c_rule(m): - if m.X >= 10.0: - pass - if value(m.X) >= 10.0: - pass - return m.X >= 10.0 - -model.C = Constraint(rule=c_rule) diff --git a/pyomo/checker/tests/examples/model/ModelValue_rulelistcomp.py b/pyomo/checker/tests/examples/model/ModelValue_rulelistcomp.py deleted file mode 100644 index 3e107fe06f0..00000000000 --- a/pyomo/checker/tests/examples/model/ModelValue_rulelistcomp.py +++ /dev/null @@ -1,24 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel, RangeSet, Var, Constraint, value - -model = AbstractModel() -model.S = RangeSet(10) -model.X = Var(model.S) - -def c_rule(m, i): - if sum(m.X[i] for i in m.S) <= 10.0: - pass - if sum(value(m.X[i]) for i in m.S) <= 10.0: - pass - return sum(m.X[i] for i in m.S) <= 10.0 -model.C = Constraint(rule=c_rule) diff --git a/pyomo/checker/tests/examples/model/NoneReturn_wrong.py b/pyomo/checker/tests/examples/model/NoneReturn_wrong.py deleted file mode 100644 index 7b253f703fc..00000000000 --- a/pyomo/checker/tests/examples/model/NoneReturn_wrong.py +++ /dev/null @@ -1,19 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from pyomo.environ import AbstractModel, Var, Constraint - -model = AbstractModel() -model.X = Var() - -def c_rule(m): - return None -model.C = Constraint(rule=c_rule) diff --git a/pyomo/checker/tests/examples/py3k/PrintParens_1.py b/pyomo/checker/tests/examples/py3k/PrintParens_1.py deleted file mode 100644 index 065eb3e7f6c..00000000000 --- a/pyomo/checker/tests/examples/py3k/PrintParens_1.py +++ /dev/null @@ -1,12 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -print "Hello, world!" diff --git a/pyomo/checker/tests/examples/py3k/PrintParens_2.py b/pyomo/checker/tests/examples/py3k/PrintParens_2.py deleted file mode 100644 index 7f69ed5f480..00000000000 --- a/pyomo/checker/tests/examples/py3k/PrintParens_2.py +++ /dev/null @@ -1,13 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -print "Hello, world!" -print "Hello, world!" diff --git a/pyomo/checker/tests/examples/py3k/XRange_1.py b/pyomo/checker/tests/examples/py3k/XRange_1.py deleted file mode 100644 index 28f6412149c..00000000000 --- a/pyomo/checker/tests/examples/py3k/XRange_1.py +++ /dev/null @@ -1,13 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - - -xrange(10) diff --git a/pyomo/checker/tests/test_examples.py b/pyomo/checker/tests/test_examples.py deleted file mode 100644 index a9c52ff1926..00000000000 --- a/pyomo/checker/tests/test_examples.py +++ /dev/null @@ -1,86 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import sys -import os - -import pyomo.common.unittest as unittest - -from pyomo.checker import ModelCheckRunner -from pyomo.checker.plugins.checker import PyomoModelChecker - -from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args - -currdir = os.path.dirname(os.path.abspath(__file__)) -exdir = os.path.join(currdir, "examples") - -def createTestMethod(defs, package, checkerName, key): - def testMethod(obj, name): - import pyomo.environ - runner = ModelCheckRunner() - path = os.path.join(exdir, package, "{0}_{1}.py".format(checkerName, key)) - runner.run(script = path, checkers = {package:[checkerName]}) - - checker = runner._checkers()[0] - pc = checker.problemCount - lns = checker.linenos - checker.resetProblemCount() - obj.assertEqual(defs[package][checkerName][key]['problems'], pc) - if 'lines' in defs[package][checkerName][key]: - obj.assertEqual(sorted(lns), sorted(defs[package][checkerName][key]['lines'])) - return testMethod - - -def assignTests(cls): - defs = yaml.load(open(os.path.join(currdir, 'examples.yml'), 'r'), - **yaml_load_args) - - for package in defs: - for checkerName in defs[package]: - for key in defs[package][checkerName]: - attrName = "{0}_{1}_{2}".format(package, checkerName, key) - cls.add_fn_test(name=attrName, fn=createTestMethod(defs, package, checkerName, key)) - #setattr(cls, attrName, createTestMethod(defs, package, checkerName, key)) - - -class ExampleTest(unittest.TestCase): - """ - Test an example script, provided in the 'scripts' directory. - """ - - def setUp(self): - def mockProblem(self, message = "Error", runner = None, script = None, lineno = None): - self.problemCount += 1 - if lineno is not None: - self.linenos.append(lineno) - def resetProblemCount(self): - self.problemCount = 0 - self.linenos = [] - PyomoModelChecker.problem_ = PyomoModelChecker.problem - PyomoModelChecker.problem = mockProblem - PyomoModelChecker.problemCount = 0 - PyomoModelChecker.linenos = [] - PyomoModelChecker.resetProblemCount = resetProblemCount - - def tearDown(self): - PyomoModelChecker.problem = PyomoModelChecker.problem_ - del PyomoModelChecker.problemCount - del PyomoModelChecker.resetProblemCount - -if yaml_available: - # Disable test for py3k. For some reason, this messes up nose - if not (sys.version_info[0:2] >= (3,0)): - assignTests(ExampleTest) - - -if __name__ == "__main__": - unittest.main() - diff --git a/pyomo/checker/tests/test_runner.py b/pyomo/checker/tests/test_runner.py deleted file mode 100644 index 77ac6183ef1..00000000000 --- a/pyomo/checker/tests/test_runner.py +++ /dev/null @@ -1,112 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import os - -import pyomo.common.unittest as unittest - -from pyomo.checker import ModelCheckRunner, ModelScript -from pyomo.checker.plugins.checker import IModelChecker, ImmediateDataChecker, ImmediateTreeChecker, IterativeDataChecker, IterativeTreeChecker - - -currdir = os.path.dirname(os.path.abspath(__file__)) - -class MockChecker(object): - checkCount = 0 - def check(self, runner, script, info): - self.checkCount += 1 - def _checkerPackage(self): - return 'mock' - def resetCount(self): - self.checkCount = 0 - -# Multiple inheritance, because otherwise something weird happens with the plugin system -# MockChecker MUST BE specified first so that its overridden methods take precedence -# See http://www.python.org/download/releases/2.3/mro/ - -class MockImmediateDataChecker(MockChecker, ImmediateDataChecker): pass -class MockImmediateTreeChecker(MockChecker, ImmediateTreeChecker): pass -class MockIterativeDataChecker(MockChecker, IterativeDataChecker): pass -class MockIterativeTreeChecker(MockChecker, IterativeTreeChecker): pass - -class RunnerTest(unittest.TestCase): - """ - Test the ModelCheckRunner class. - """ - - testScripts = [ - "print('Hello, world!')\n", - "import sys\nsys.stdout.write('Hello, world!\\n')\n" - "for i in range(10):\n\tprint(i)\n" - ] - - def test_init(self): - "Check that a ModelCheckRunner instantiates properly" - - runner = ModelCheckRunner() - - self.assertEqual([], runner.scripts) - self.assertTrue(len(runner._checkers(all=True)) > 0) - for c in runner._checkers: - self.assertTrue(IModelChecker in c._implements) - - def test_addScript(self): - "Check that a runner handles its script list properly" - - runner = ModelCheckRunner() - expectedScriptCount = 0 - - for text in self.testScripts: - self.assertEqual(expectedScriptCount, len(runner.scripts)) - - script = ModelScript(text = text) - runner.addScript(script) - expectedScriptCount += 1 - - self.assertEqual(expectedScriptCount, len(runner.scripts)) - self.assertTrue(script in runner.scripts) - - def test_run_immediate(self): - "Check that a runner calls check() on an immediate checker" - - for text in self.testScripts: - - runner = ModelCheckRunner() - script = ModelScript(text = text) - runner.addScript(script) - - runner.run(checkers = {'mock':['MockImmediateDataChecker', 'MockImmediateTreeChecker']}) - - for klass in [MockImmediateDataChecker, MockImmediateTreeChecker]: - mockChecker = list(filter((lambda c : c.__class__ == klass), runner._checkers()))[0] - self.assertEqual(1, mockChecker.checkCount) - mockChecker.resetCount() - - def test_run_iterative(self): - "Check that a runner calls check() on an iterative checker" - - for text in self.testScripts: - - runner = ModelCheckRunner() - script = ModelScript(text = text) - runner.addScript(script) - - runner.run(checkers = {'mock':['MockIterativeDataChecker', 'MockIterativeTreeChecker']}) - - for klass in [MockIterativeDataChecker, MockIterativeTreeChecker]: - mockChecker = list(filter((lambda c : c.__class__ == klass), runner._checkers()))[0] - self.assertTrue(mockChecker.checkCount >= 1) - mockChecker.resetCount() - - -if __name__ == "__main__": - unittest.main() - diff --git a/pyomo/checker/tests/test_script.py b/pyomo/checker/tests/test_script.py deleted file mode 100644 index 9a1835b3414..00000000000 --- a/pyomo/checker/tests/test_script.py +++ /dev/null @@ -1,60 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import os -import tempfile - -import pyomo.common.unittest as unittest - -from pyomo.checker import ModelScript - -currdir = os.path.dirname(os.path.abspath(__file__)) - -class ScriptTest(unittest.TestCase): - """ - Test the ModelScript class. Checks both raw text and file-based - interfaces (using the tempfile module). - """ - - testScripts = [ - "print('Hello, world!')\n", - "import sys\nsys.stdout.write('Hello, world!\\n')\n" - "for i in range(10):\n\tprint(i)\n" - ] - - def testScriptText(self): - "Check ModelScript handling of raw text scripts" - - for text in self.testScripts: - script = ModelScript(text = text) - self.assertEqual(text, script.read()) - self.assertEqual("", script.filename()) - - def testScriptFile(self): - "Check ModelScript handling of file-based scripts" - - for text in self.testScripts: - file, filename = tempfile.mkstemp() - - with os.fdopen(file, 'w') as f: - f.write(text) - - script = ModelScript(filename = filename) - - self.assertEqual(text, script.read()) - self.assertEqual(filename, script.filename()) - - os.unlink(filename) - - -if __name__ == "__main__": - unittest.main() - diff --git a/pyomo/common/__init__.py b/pyomo/common/__init__.py index d3ed8c62c63..563974b5617 100644 --- a/pyomo/common/__init__.py +++ b/pyomo/common/__init__.py @@ -17,9 +17,12 @@ from .factory import Factory from .fileutils import ( - Executable, Library, + Executable, + Library, # The following will be deprecated soon - register_executable, registered_executable, unregister_executable + register_executable, + registered_executable, + unregister_executable, ) from . import config, dependencies, shutdown, timing from .deprecation import deprecated diff --git a/pyomo/common/_command.py b/pyomo/common/_command.py index 33c9f538047..ae633648ace 100644 --- a/pyomo/common/_command.py +++ b/pyomo/common/_command.py @@ -22,25 +22,27 @@ registry = {} + # # Decorate functions that are Pyomo commands # def pyomo_command(name=None, doc=None): # def wrap(fn): - if name is None: #pragma:nocover + if name is None: # pragma:nocover logger.error("Error applying decorator. No command name!") return - if doc is None: #pragma:nocover + if doc is None: # pragma:nocover logger.error("Error applying decorator. No command documentation!") return # global registry registry[name] = doc return fn + # return wrap -def get_pyomo_commands(): #pragma:nocover +def get_pyomo_commands(): # pragma:nocover return registry diff --git a/pyomo/common/_common.py b/pyomo/common/_common.py index 786c69377e0..21a5ddcc7bc 100644 --- a/pyomo/common/_common.py +++ b/pyomo/common/_common.py @@ -25,7 +25,8 @@ except NameError: old_help = None -def help(thing=None): #pragma:nocover + +def help(thing=None): # pragma:nocover if not thing is None and hasattr(thing, '__help__'): print(thing.__help__) else: @@ -33,8 +34,9 @@ def help(thing=None): #pragma:nocover raise NameError("Builtin 'help' is not available") old_help(thing) + try: __builtins__['help'] = help -except: #pragma:nocover +except: # pragma:nocover # If this fails, then just die silently. pass diff --git a/pyomo/common/autoslots.py b/pyomo/common/autoslots.py new file mode 100644 index 00000000000..1b55a818b83 --- /dev/null +++ b/pyomo/common/autoslots.py @@ -0,0 +1,344 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import types +from collections import namedtuple +from copy import deepcopy +from weakref import ref as _weakref_ref + +_autoslot_info = namedtuple( + '_autoslot_info', ['has_dict', 'slots', 'slot_mappers', 'field_mappers'] +) + + +def _deepcopy_tuple(obj, memo, _id): + ans = [] + unchanged = True + for item in obj: + new_item = fast_deepcopy(item, memo) + ans.append(new_item) + if new_item is not item: + unchanged = False + if unchanged: + # Python does not duplicate "unchanged" tuples (i.e. allows the + # original objecct to be returned from deepcopy()). We will + # preserve that behavior here. + # + # It also appears to be faster *not* to cache the fact that this + # particular tuple was unchanged by the deepcopy (Note: the + # standard library also does not cache the unchanged tuples in + # the memo) + # + # memo[_id] = obj + return obj + memo[_id] = ans = tuple(ans) + return ans + + +def _deepcopy_list(obj, memo, _id): + # Two steps here because a list can include itself + memo[_id] = ans = [] + ans.extend(fast_deepcopy(x, memo) for x in obj) + return ans + + +def _deepcopy_dict(obj, memo, _id): + # Two steps here because a dict can include itself + memo[_id] = ans = {} + for key, val in obj.items(): + ans[fast_deepcopy(key, memo)] = fast_deepcopy(val, memo) + return ans + + +def _deepcopier(obj, memo, _id): + return deepcopy(obj, memo) + + +_atomic_types = { + int, + float, + bool, + complex, + bytes, + str, + type, + range, + type(None), + types.BuiltinFunctionType, + types.FunctionType, +} + +_deepcopy_mapper = {tuple: _deepcopy_tuple, list: _deepcopy_list, dict: _deepcopy_dict} + + +def fast_deepcopy(obj, memo): + """A faster implementation of copy.deepcopy() + + Python's default implementation of deepcopy has several features that + are slower than they need to be. This is an implementation of + deepcopy that provides special handling to circumvent some of the + slowest parts of deepcopy(). + + """ + if obj.__class__ in _atomic_types: + return obj + _id = id(obj) + if _id in memo: + return memo[_id] + else: + return _deepcopy_mapper.get(obj.__class__, _deepcopier)(obj, memo, _id) + + +class AutoSlots(type): + """Metaclass to automatically collect `__slots__` for generic pickling + + The class `__slots__` are collected in reverse MRO order. + + Any fields that require special handling are handled through + callbacks specified through the `__autoslot_mappers__` class + attribute. `__autoslot_mappers__` should be a `dict` that maps the + field name (either `__slot__` or regular `__dict__` entry) to a + function with the signature: + + mapper(encode: bool, val: Any) -> Any + + The value from the object field (or state) is passed to the mapper + function, and the function returns the corrected value. + `__getstate__` calls the mapper with `encode=True`, and + `__setstate__` calls the mapper with `encode=False`. + `__autoslot_mappers__` class attributes are collected and combined + in reverse MRO order (so duplicate mappers in more derived classes + will replace mappers defined in base classes). + + :py:class:`AutoSlots` defines several common mapper functions, including: + + - :py:meth:`AutoSlots.weakref_mapper` + - :py:meth:`AutoSlots.weakref_sequence_mapper` + - :py:meth:`AutoSlots.encode_as_none` + + Result + ~~~~~~ + + This metaclass will add a `__auto_slots__` class attribute to the + class (and all derived classes). This attribute is an instance of a + :py:class:`_autoslot_info` named 4-tuple: + + (has_dict, slots, slot_mappers, field_mappers) + + has_dict: bool + True if this class has a `__dict__` attribute (that would need to + be pickled in addition to the `__slots__`) + + slots: tuple + Tuple of all slots declared for this class (the union of any + slots declared locally with all slots declared on any base class) + + slot_mappers: dict + Dict mapping index in `slots` to a function with signature + `mapper(encode: bool, val: Any)` that can be used to encode or + decode that slot + + field_mappers: dict + Dict mapping field name in `__dict__` to a function with signature + `mapper(encode: bool, val: Any)` that can be used to encode or + decode that field value. + + """ + + _ignore_slots = {'__weakref__', '__dict__'} + + def __init__(cls, name, bases, classdict): + super().__init__(name, bases, classdict) + AutoSlots.collect_autoslots(cls) + + @staticmethod + def collect_autoslots(cls): + has_dict = '__dict__' in dir(cls.__mro__[0]) + + slots = [] + seen = set() + for c in reversed(cls.__mro__): + for slot in getattr(c, '__slots__', ()): + if slot in seen: + continue + if slot in AutoSlots._ignore_slots: + continue + seen.add(slot) + slots.append(slot) + slots = tuple(slots) + + slot_mappers = {} + dict_mappers = {} + for c in reversed(cls.__mro__): + for slot, mapper in getattr(c, '__autoslot_mappers__', {}).items(): + if slot in seen: + slot_mappers[slots.index(slot)] = mapper + else: + dict_mappers[slot] = mapper + + cls.__auto_slots__ = _autoslot_info(has_dict, slots, slot_mappers, dict_mappers) + + @staticmethod + def weakref_mapper(encode, val): + """__autoslot_mappers__ mapper for fields that contain weakrefs + + This mapper expects to be passed a field containing either a + weakref or None. It will resolve the weakref to a hard + reference when generating a state, and then convert the hard + reference back to a weakref when restoring the state. + + """ + if val is None: + return val + if encode: + return val() + else: + return _weakref_ref(val) + + @staticmethod + def weakref_sequence_mapper(encode, val): + """__autoslot_mappers__ mapper for fields with sequences of weakrefs + + This mapper expects to be passed a field that is a sequence of + weakrefs. It will resolve all weakrefs when generating a state, + and then convert the hard references back to a weakref when + restoring the state. + + """ + if val is None: + return val + if encode: + return val.__class__(v() for v in val) + else: + return val.__class__(_weakref_ref(v) for v in val) + + @staticmethod + def encode_as_none(encode, val): + """__autoslot_mappers__ mapper that will replace fields with None + + This mapper will encode the field as None (regardless of the + current field value). No mapping occurs when restoring a state. + + """ + if encode: + return None + else: + return val + + class Mixin(object): + """Mixin class to configure a class hierarchy to use AutoSlots + + Inheriting from this class will set up the automatic generation + of the `__auto_slots__` class attribute, and define the standard + implementations for `__deepcopy__`, `__getstate__`, and + `__setstate__`. + + """ + + __slots__ = () + + def __init_subclass__(cls, **kwds): + """Automatically define `__auto_slots__` on derived subclasses + + This accomplishes the same thing as the AutoSlots metaclass + without incurring the overhead / runtime penalty of using a + metaclass. + + """ + super().__init_subclass__(**kwds) + AutoSlots.collect_autoslots(cls) + + def __deepcopy__(self, memo): + """Default implementation of `__deepcopy__` based on `__getstate__` + + This defines a default implementation of `__deepcopy__` that + leverages :py:meth:`__getstate__` and :py:meth:`__setstate__` + to duplicate an object. Having a default `__deepcopy__` + implementation shortcuts significant logic in + :py:func:`copy.deepcopy()`, thereby speeding up deepcopy + operations. + + """ + # Note: this implementation avoids deepcopying the temporary + # 'state' list, significantly speeding things up. + memo[id(self)] = ans = self.__class__.__new__(self.__class__) + ans.__setstate__( + [fast_deepcopy(field, memo) for field in self.__getstate__()] + ) + return ans + + def __getstate__(self): + """Generic implementation of `__getstate__` + + This implementation will collect the slots (in order) and + then the `__dict__` (if necessary) and place everything into a + `list`. This standard format is significantly faster to + generate and deepcopy (when compared to a `dict`), although + it can be more fragile (changing the number of slots can + cause a pickle to no longer be loadable) + + Derived classes should not overload this method to provide + special handling for fields (e.g., to resolve weak + references). Instead, special field handlers should be + declared via the `__autoslot_mappers__` class attribute (see + :py:class:`AutoSlots`) + + """ + slots = [getattr(self, attr) for attr in self.__auto_slots__.slots] + # Map (encode) the slot values + for idx, mapper in self.__auto_slots__.slot_mappers.items(): + slots[idx] = mapper(True, slots[idx]) + # Copy and add the fields from __dict__ (if present) + if self.__auto_slots__.has_dict: + fields = dict(self.__dict__) + # Map (encode) any field values. It is not an error if + # the field if not present. + for name, mapper in self.__auto_slots__.field_mappers.items(): + if name in fields: + fields[name] = mapper(True, fields[name]) + slots.append(fields) + return slots + + def __setstate__(self, state): + """Generic implementation of `__setstate__` + + Restore the state generated by :py:meth:`__getstate__()` + + Derived classes should not overload this method to provide + special handling for fields (e.g., to restore weak + references). Instead, special field handlers should be + declared via the `__autoslot_mappers__` class attribute (see + :py:class:`AutoSlots`) + + """ + # Map (decode) the slot values + for idx, mapper in self.__auto_slots__.slot_mappers.items(): + state[idx] = mapper(False, state[idx]) + # + # Note: per the Python data model docs, we explicitly set the + # attribute using object.__setattr__() instead of setting + # self.__dict__[key] = val. + # + # Restore the slots + setter = object.__setattr__ + for attr, val in zip(self.__auto_slots__.slots, state): + setter(self, attr, val) + # If this class is not fully slotized, then pull off the + # __dict__ fields and map their values (if necessary) + if self.__auto_slots__.has_dict: + fields = state[-1] + for name, mapper in self.__auto_slots__.field_mappers.items(): + if name in fields: + fields[name] = mapper(False, fields[name]) + # Note that it appears to be faster to clear()/update() + # than to simplify assign to __dict__. + self.__dict__.clear() + self.__dict__.update(fields) diff --git a/pyomo/common/backports.py b/pyomo/common/backports.py index c6fd5db015c..a945bd89c2a 100644 --- a/pyomo/common/backports.py +++ b/pyomo/common/backports.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + # backport of contextlib.nullcontext for supporting Python < 3.7 class nullcontext(object): def __init__(self, enter_result=None): diff --git a/pyomo/common/cmake_builder.py b/pyomo/common/cmake_builder.py index 743e8c5debe..71358c29fb2 100644 --- a/pyomo/common/cmake_builder.py +++ b/pyomo/common/cmake_builder.py @@ -19,18 +19,23 @@ import pyomo.common.envvar as envvar from pyomo.common.fileutils import this_file_dir, find_executable + def handleReadonly(function, path, excinfo): excvalue = excinfo[1] if excvalue.errno == errno.EACCES: - os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777 + os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777 function(path) else: raise -def build_cmake_project(targets, package_name=None, description=None, - user_args=[], parallel=None): - import distutils.core + +def build_cmake_project( + targets, package_name=None, description=None, user_args=[], parallel=None +): + # Note: setuptools must be imported before distutils to avoid + # warnings / errors with recent setuptools distributions from setuptools import Extension + import distutils.core from distutils.command.build_ext import build_ext class _CMakeBuild(build_ext, object): @@ -57,8 +62,7 @@ def _cmake_build_target(self, cmake_ext): # --parallel was only added in cmake 3.12. Use an # environment variable so that we don't have to bump # the minimum cmake version. - os.environ['CMAKE_BUILD_PARALLEL_LEVEL'] = str( - cmake_ext.parallel) + os.environ['CMAKE_BUILD_PARALLEL_LEVEL'] = str(cmake_ext.parallel) cmake = find_executable('cmake') if cmake is None: @@ -69,11 +73,19 @@ def _cmake_build_target(self, cmake_ext): # harness should take care of dependencies and this # will prevent repeated builds in MSVS # - #self.spawn(['cmake', '--build', '.', + # self.spawn(['cmake', '--build', '.', # '--config', cmake_config]) - self.spawn([cmake, '--build', '.', - '--target', 'install', - '--config', cmake_config]) + self.spawn( + [ + cmake, + '--build', + '.', + '--target', + 'install', + '--config', + cmake_config, + ] + ) finally: # Restore stderr sys.stderr.flush() @@ -85,7 +97,8 @@ class CMakeExtension(Extension, object): def __init__(self, target_dir, user_args, parallel): # don't invoke the original build_ext for this special extension super(CMakeExtension, self).__init__( - self.__class__.__qualname__, sources=[]) + self.__class__.__qualname__, sources=[] + ) self.target_dir = target_dir self.user_args = user_args self.parallel = parallel diff --git a/pyomo/common/collections/__init__.py b/pyomo/common/collections/__init__.py index 0c15d543caf..9ffd1e931f6 100644 --- a/pyomo/common/collections/__init__.py +++ b/pyomo/common/collections/__init__.py @@ -10,9 +10,7 @@ # ___________________________________________________________________________ -from collections.abc import ( - MutableMapping, MutableSet, Mapping, Set, Sequence -) +from collections.abc import MutableMapping, MutableSet, Mapping, Set, Sequence from collections import UserDict from .orderedset import OrderedDict, OrderedSet diff --git a/pyomo/common/collections/bunch.py b/pyomo/common/collections/bunch.py index b0974abcc9b..f19e4ad64e3 100644 --- a/pyomo/common/collections/bunch.py +++ b/pyomo/common/collections/bunch.py @@ -4,7 +4,7 @@ # Copyright (c) 2008-2022 # National Technology and Engineering Solutions of Sandia, LLC # Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -17,106 +17,148 @@ # ___________________________________________________________________________ import shlex +from collections.abc import Mapping class Bunch(dict): - """ - A class that can be used to store a bunch of data dynamically. - This class allows all other attributes to have a default value of None. - This borrows the output formatting ideas from the + """A class that can be used to store a bunch of data dynamically. + + This class allows for unspecified attributes to have a default value + of None. This borrows the output formatting ideas from the ActiveState Code Container (recipe 496697). + + For historical reasons, attributes / keys are stored in the + underlying dict unless they begin with an underscore, in which case + they are stored as object attributes. + """ def __init__(self, *args, **kw): + self._name_ = self.__class__.__name__ for arg in args: + if not isinstance(arg, str): + raise TypeError("Bunch() positional arguments must be strings") for item in shlex.split(arg): - r = item.find('=') - if r != -1: - try: - val = eval(item[r + 1:]) - except: - val = item[r + 1:] - kw[item[:r]] = val - dict.__init__(self, kw) - self.__dict__.update(kw) - if not '_name_' in kw: - self._name_ = self.__class__.__name__ + item = item.split('=', 1) + if len(item) != 2: + raise ValueError( + "Bunch() positional arguments must be space separated " + f"strings of form 'key=value', got '{item[0]}'" + ) + + # Historically, this used 'exec'. That is unsafe in + # this context (because anyone can pass arguments to a + # Bunch). While not strictly backwards compatible, + # Pyomo was not using this for anything past parsing + # None/float/int values. We will explicitly parse those + # values + try: + val = float(item[1]) + if int(val) == val: + val = int(val) + item[1] = val + except: + if item[1].strip() == 'None': + item[1] = None + self[item[0]] = item[1] + for k, v in kw.items(): + self[k] = v def update(self, d): """ The update is specialized for JSON-like data. This - recursively replaces dictionaries with Container objects. + recursively replaces dictionaries with Bunch objects. """ - for k in d: - if type(d[k]) is dict: - tmp = Bunch() - tmp.update(d[k]) - self.__setattr__(k, tmp) - elif type(d[k]) is list: - val = [] - for i in d[k]: - if type(i) is dict: - tmp = Bunch() - tmp.update(i) - val.append(tmp) - else: - val.append(i) - self.__setattr__(k, val) + + def _replace_dict_in_list(lst): + ans = [] + for v in lst: + if type(v) is dict: + ans.append(Bunch()) + ans[-1].update(v) + elif type(v) is list: + ans.append(_replace_dict_in_list(v)) + else: + ans.append(v) + return ans + + if isinstance(d, Mapping): + item_iter = d.items() + else: + item_iter = d + for k, v in item_iter: + if type(v) is dict: + self[k] = Bunch() + self[k].update(v) + elif type(v) is list: + self[k] = _replace_dict_in_list(v) else: - self.__setattr__(k, d[k]) + self[k] = v def set_name(self, name): self._name_ = name + def __getitem__(self, name): + if not isinstance(name, str): + raise ValueError(f'Bunch keys must be str (got {type(name).__name__})') + # Map through Python's standard getattr functionality (which + # will resolve known attributes without hitting __getattr__) + return getattr(self, name) + def __setitem__(self, name, val): - self.__setattr__(name, val) + if not isinstance(name, str): + raise ValueError(f'Bunch keys must be str (got {type(name).__name__})') + setattr(self, name, val) - def __getitem__(self, name): - return self.__getattr__(name) + def __delitem__(self, name): + if not isinstance(name, str): + raise ValueError(f'Bunch keys must be str (got {type(name).__name__})') + delattr(self, name) + + def __getattr__(self, name): + if name[0] == '_': + raise AttributeError(f"Unknown attribute '{name}'") + return self.get(name, None) def __setattr__(self, name, val): - if name[0] != '_': - dict.__setitem__(self, name, val) - self.__dict__[name] = val + if name[0] == '_': + super().__setattr__(name, val) + else: + super().__setitem__(name, val) - def __getattr__(self, name): - try: - return dict.__getitem__(self, name) - except: - if name[0] == '_': - raise AttributeError("Unknown attribute %s" % name) - return None + def __delattr__(self, name): + if name[0] == '_': + super().__delattr__(name) + else: + super().__delitem__(name) def __repr__(self): - attrs = sorted("%s = %r" % (k, v) for k, v in self.__dict__.items() - if not k.startswith("_")) + attrs = sorted( + "%s = %r" % (k, v) for k, v in self.items() if not k.startswith("_") + ) return "%s(%s)" % (self.__class__.__name__, ", ".join(attrs)) - def __str__(self): - return self.as_string() - def __str__(self, nesting=0, indent=''): attrs = [] indentation = indent + " " * nesting - for k, v in self.__dict__.items(): - if not k.startswith("_"): - text = [indentation, k, ":"] - if isinstance(v, Bunch): - if len(v) > 0: - text.append('\n') - text.append(v.__str__(nesting + 1)) - elif isinstance(v, list): - if len(v) == 0: - text.append(' []') - else: - for v_ in v: - text.append('\n' + indentation + "-") - if isinstance(v_, Bunch): - text.append('\n' + v_.__str__(nesting + 1)) - else: - text.append(" " + repr(v_)) + for k, v in self.items(): + text = [indentation, k, ":"] + if isinstance(v, Bunch): + if len(v) > 0: + text.append('\n') + text.append(v.__str__(nesting + 1)) + elif isinstance(v, list): + if len(v) == 0: + text.append(' []') else: - text.append(' ' + repr(v)) - attrs.append("".join(text)) + for v_ in v: + text.append('\n' + indentation + "-") + if isinstance(v_, Bunch): + text.append('\n' + v_.__str__(nesting + 1)) + else: + text.append(" " + repr(v_)) + else: + text.append(' ' + repr(v)) + attrs.append("".join(text)) attrs.sort() return "\n".join(attrs) diff --git a/pyomo/common/collections/component_map.py b/pyomo/common/collections/component_map.py index e1b9d57f0a2..ceb4174ecca 100644 --- a/pyomo/common/collections/component_map.py +++ b/pyomo/common/collections/component_map.py @@ -11,9 +11,19 @@ from collections.abc import MutableMapping as collections_MutableMapping from collections.abc import Mapping as collections_Mapping +from pyomo.common.autoslots import AutoSlots -class ComponentMap(collections_MutableMapping): +def _rebuild_ids(encode, val): + if encode: + return val + else: + # object id() may have changed after unpickling, + # so we rebuild the dictionary keys + return {id(obj): (obj, v) for obj, v in val.values()} + + +class ComponentMap(AutoSlots.Mixin, collections_MutableMapping): """ This class is a replacement for dict that allows Pyomo modeling components to be used as entry keys. The @@ -37,54 +47,20 @@ class ComponentMap(collections_MutableMapping): components for which it contains map entries (e.g., as part of a block). *** """ + __slots__ = ("_dict",) + __autoslot_mappers__ = {'_dict': _rebuild_ids} + def __init__(self, *args, **kwds): # maps id(obj) -> (obj,val) self._dict = {} # handle the dict-style initialization scenarios self.update(*args, **kwds) - # - # This method must be defined for deepcopy/pickling - # because this class relies on Python ids. - # - def __setstate__(self, state): - # *** Temporary hack to allow this class to be used - # *** in inheritance chains for both the old and new - # *** component hierarchies. - for cls in self.__class__.__mro__: - if cls.__name__ == "ICategorizedObject": - super(ComponentMap, self).__setstate__(state) - break - - # object id() may have changed after unpickling, - # so we rebuild the dictionary keys - self._dict = \ - {id(obj):(obj,val) \ - for obj, val in state['_dict'].values()} - - def __getstate__(self): - # *** Temporary hack to allow this class to be used - # *** in inheritance chains for both the old and new - # *** component hierarchies. - try: - super(ComponentMap, self).__getstate__ - except AttributeError: - state = {} - else: - state = super(ComponentMap, self).__getstate__() - for cls in self.__class__.__mro__: - if cls.__name__ == "ICategorizedObject": - break - else: - for i in ComponentMap.__slots__: - state[i] = getattr(self, i) - return state - def __str__(self): """String representation of the mapping.""" - tmp = {str(c)+" (id="+str(id(c))+")":v for c,v in self.items()} - return "ComponentMap("+str(tmp)+")" + tmp = {str(c) + " (id=" + str(id(c)) + ")": v for c, v in self.items()} + return "ComponentMap(" + str(tmp) + ")" # # Implement MutableMapping abstract methods @@ -94,23 +70,19 @@ def __getitem__(self, obj): try: return self._dict[id(obj)][1] except KeyError: - raise KeyError("Component with id '%s': %s" - % (id(obj), str(obj))) + raise KeyError("Component with id '%s': %s" % (id(obj), str(obj))) def __setitem__(self, obj, val): - self._dict[id(obj)] = (obj,val) + self._dict[id(obj)] = (obj, val) def __delitem__(self, obj): try: del self._dict[id(obj)] except KeyError: - raise KeyError("Component with id '%s': %s" - % (id(obj), str(obj))) + raise KeyError("Component with id '%s': %s" % (id(obj), str(obj))) def __iter__(self): - return (obj \ - for obj, val in \ - self._dict.values()) + return (obj for obj, val in self._dict.values()) def __len__(self): return self._dict.__len__() @@ -126,10 +98,9 @@ def __len__(self): def __eq__(self, other): if not isinstance(other, collections_Mapping): return False - return {(type(key), id(key)):val - for key, val in self.items()} == \ - {(type(key), id(key)):val - for key, val in other.items()} + return {(type(key), id(key)): val for key, val in self.items()} == { + (type(key), id(key)): val for key, val in other.items() + } def __ne__(self, other): return not (self == other) @@ -162,5 +133,3 @@ def setdefault(self, key, default=None): else: self[key] = default return default - - diff --git a/pyomo/common/collections/component_set.py b/pyomo/common/collections/component_set.py index 76780ee5a04..0b16acd00be 100644 --- a/pyomo/common/collections/component_set.py +++ b/pyomo/common/collections/component_set.py @@ -36,28 +36,29 @@ class ComponentSet(collections_MutableSet): deepcopied/pickled unless it is done so along with its component entries (e.g., as part of a block). *** """ + __slots__ = ("_data",) + def __init__(self, *args): self._data = dict() if len(args) > 0: if len(args) > 1: raise TypeError( "%s expected at most 1 arguments, " - "got %s" % (self.__class__.__name__, - len(args))) + "got %s" % (self.__class__.__name__, len(args)) + ) self.update(args[0]) def __str__(self): """String representation of the mapping.""" tmp = [] for objid, obj in self._data.items(): - tmp.append(str(obj)+" (id="+str(objid)+")") - return "ComponentSet("+str(tmp)+")" + tmp.append(str(obj) + " (id=" + str(objid) + ")") + return "ComponentSet(" + str(tmp) + ")" def update(self, args): """Update a set with the union of itself and others.""" - self._data.update((id(obj), obj) - for obj in args) + self._data.update((id(obj), obj) for obj in args) # # This method must be defined for deepcopy/pickling @@ -67,7 +68,7 @@ def __setstate__(self, state): # object id() may have changed after unpickling, # so we rebuild the dictionary keys assert len(state) == 1 - self._data = {id(obj):obj for obj in state['_data']} + self._data = {id(obj): obj for obj in state['_data']} def __getstate__(self): return {'_data': tuple(self._data.values())} @@ -105,10 +106,9 @@ def discard(self, val): def __eq__(self, other): if not isinstance(other, collections_Set): return False - return set((type(val), id(val)) - for val in self) == \ - set((type(val), id(val)) - for val in other) + return set((type(val), id(val)) for val in self) == set( + (type(val), id(val)) for val in other + ) def __ne__(self, other): return not (self == other) @@ -127,5 +127,4 @@ def remove(self, val): try: del self._data[id(val)] except KeyError: - raise KeyError("Component with id '%s': %s" - % (id(val), str(val))) + raise KeyError("Component with id '%s': %s" % (id(val), str(val))) diff --git a/pyomo/common/collections/orderedset.py b/pyomo/common/collections/orderedset.py index 19fc1030a62..448939c8822 100644 --- a/pyomo/common/collections/orderedset.py +++ b/pyomo/common/collections/orderedset.py @@ -14,7 +14,7 @@ class OrderedSet(MutableSet): - __slots__ = ('_dict') + __slots__ = ('_dict',) def __init__(self, iterable=None): # TODO: Starting in Python 3.7, dict is ordered (and is faster @@ -32,7 +32,6 @@ def __str__(self): """String representation of the mapping.""" return "OrderedSet(%s)" % (', '.join(repr(x) for x in self)) - def update(self, iterable): for val in iterable: self.add(val) diff --git a/pyomo/common/config.py b/pyomo/common/config.py index ed7e4e7760b..1b44d555b91 100644 --- a/pyomo/common/config.py +++ b/pyomo/common/config.py @@ -28,23 +28,30 @@ import ply.lex import re import sys -from textwrap import wrap +import textwrap import types from pyomo.common.collections import Sequence, Mapping -from pyomo.common.deprecation import deprecated, relocated_module_attribute +from pyomo.common.deprecation import ( + deprecated, + deprecation_warning, + relocated_module_attribute, +) +from pyomo.common.errors import DeveloperError from pyomo.common.fileutils import import_file +from pyomo.common.formatting import wrap_reStructuredText from pyomo.common.modeling import NOTSET -logger = logging.getLogger('pyomo.common.config') +logger = logging.getLogger(__name__) relocated_module_attribute( - 'PYOMO_CONFIG_DIR', 'pyomo.common.envvar.PYOMO_CONFIG_DIR', - version='6.1') + 'PYOMO_CONFIG_DIR', 'pyomo.common.envvar.PYOMO_CONFIG_DIR', version='6.1' +) USER_OPTION = 0 -ADVANCED_OPTION = 1 -DEVELOPER_OPTION = 2 +ADVANCED_OPTION = 10 +DEVELOPER_OPTION = 20 + def Bool(val): """Domain validator for bool-like objects. @@ -68,8 +75,8 @@ def Bool(val): v = int(val) if v in {0, 1}: return bool(v) - raise ValueError( - "Expected Boolean, but received %s" % (val,)) + raise ValueError("Expected Boolean, but received %s" % (val,)) + def Integer(val): """Domain validation function admitting integers @@ -83,10 +90,10 @@ def Integer(val): ans = int(val) # We want to give an error for floating point numbers... if ans != float(val): - raise ValueError( - "Expected integer, but received %s" % (val,)) + raise ValueError("Expected integer, but received %s" % (val,)) return ans + def PositiveInt(val): """Domain validation function admitting strictly positive integers @@ -97,10 +104,10 @@ def PositiveInt(val): ans = int(val) # We want to give an error for floating point numbers... if ans != float(val) or ans <= 0: - raise ValueError( - "Expected positive int, but received %s" % (val,)) + raise ValueError("Expected positive int, but received %s" % (val,)) return ans + def NegativeInt(val): """Domain validation function admitting strictly negative integers @@ -110,10 +117,10 @@ def NegativeInt(val): """ ans = int(val) if ans != float(val) or ans >= 0: - raise ValueError( - "Expected negative int, but received %s" % (val,)) + raise ValueError("Expected negative int, but received %s" % (val,)) return ans + def NonPositiveInt(val): """Domain validation function admitting integers <= 0 @@ -123,10 +130,10 @@ def NonPositiveInt(val): """ ans = int(val) if ans != float(val) or ans > 0: - raise ValueError( - "Expected non-positive int, but received %s" % (val,)) + raise ValueError("Expected non-positive int, but received %s" % (val,)) return ans + def NonNegativeInt(val): """Domain validation function admitting integers >= 0 @@ -136,10 +143,10 @@ def NonNegativeInt(val): """ ans = int(val) if ans != float(val) or ans < 0: - raise ValueError( - "Expected non-negative int, but received %s" % (val,)) + raise ValueError("Expected non-negative int, but received %s" % (val,)) return ans + def PositiveFloat(val): """Domain validation function admitting strictly positive numbers @@ -150,10 +157,10 @@ def PositiveFloat(val): """ ans = float(val) if ans <= 0: - raise ValueError( - "Expected positive float, but received %s" % (val,)) + raise ValueError("Expected positive float, but received %s" % (val,)) return ans + def NegativeFloat(val): """Domain validation function admitting strictly negative numbers @@ -164,10 +171,10 @@ def NegativeFloat(val): """ ans = float(val) if ans >= 0: - raise ValueError( - "Expected negative float, but received %s" % (val,)) + raise ValueError("Expected negative float, but received %s" % (val,)) return ans + def NonPositiveFloat(val): """Domain validation function admitting numbers less than or equal to 0 @@ -178,10 +185,10 @@ def NonPositiveFloat(val): """ ans = float(val) if ans > 0: - raise ValueError( - "Expected non-positive float, but received %s" % (val,)) + raise ValueError("Expected non-positive float, but received %s" % (val,)) return ans + def NonNegativeFloat(val): """Domain validation function admitting numbers greater than or equal to 0 @@ -192,8 +199,7 @@ def NonNegativeFloat(val): """ ans = float(val) if ans < 0: - raise ValueError( - "Expected non-negative float, but received %s" % (val,)) + raise ValueError("Expected non-negative float, but received %s" % (val,)) return ans @@ -214,7 +220,7 @@ class In(object): values are passed to ``domain.__contains__()``, and if ``True`` is returned, the value is accepted and returned. - cast: callable, optional + cast: Callable, optional A callable object. If specified, incoming values are first passed to `cast`, and the resulting object is checked for membership in `domain` @@ -231,8 +237,12 @@ def __new__(cls, domain=None, cast=None): # Convenience: enum.Enum supported __contains__ through Python # 3.7. If the domain is an Enum and cast is not specified, # automatically return an InEnum to handle casting and validation - if cls is In and cast is None and inspect.isclass(domain) \ - and issubclass(domain, enum.Enum): + if ( + cls is In + and cast is None + and inspect.isclass(domain) + and issubclass(domain, enum.Enum) + ): return InEnum(domain) return super(In, cls).__new__(cls) @@ -270,6 +280,7 @@ class InEnum(object): The enum that incoming values should be mapped to """ + def __init__(self, domain): self._domain = domain @@ -283,8 +294,7 @@ def __call__(self, value): return self._domain[value] except KeyError: pass - raise ValueError("%r is not a valid %s" % ( - value, self._domain.__name__)) + raise ValueError("%r is not a valid %s" % (value, self._domain.__name__)) def domain_name(self): return f'InEnum[{self._domain.__name__}]' @@ -311,6 +321,7 @@ class ListOf(object): no tokenization is performed. """ + def __init__(self, itemtype, domain=None, string_lexer=NOTSET): self.itemtype = itemtype if domain is None: @@ -321,8 +332,7 @@ def __init__(self, itemtype, domain=None, string_lexer=NOTSET): self.string_lexer = _default_string_list_lexer else: self.string_lexer = string_lexer - self.__name__ = 'ListOf(%s)' % ( - getattr(self.domain, '__name__', self.domain),) + self.__name__ = 'ListOf(%s)' % (getattr(self.domain, '__name__', self.domain),) def __call__(self, value): if isinstance(value, str) and self.string_lexer is not None: @@ -337,7 +347,7 @@ def domain_name(self): class Module(object): - """ Domain validator for modules. + """Domain validator for modules. Modules can be specified as module objects, by module name, or by the path to the module's file. If specified by path, the @@ -350,21 +360,25 @@ class Module(object): Parameters ---------- - basePath: None, str, ConfigValue + basePath : None, str, ConfigValue The base path that will be prepended to any non-absolute path values provided. If None, defaults to :py:attr:`Path.BasePath`. - expandPath: bool + expandPath : bool If True, then the value will be expanded and normalized. If False, the string representation of the value will be used unchanged. If None, expandPath will defer to the (negated) value of :py:attr:`Path.SuppressPathExpansion`. + Examples + -------- + The following code shows the three ways you can specify a module: by file name, by module name, or by module object. Regardless of how the module is specified, what is stored in the configuration is a module object. .. doctest:: + >>> from pyomo.common.config import ( ... ConfigDict, ConfigValue, Module ... ) @@ -372,6 +386,7 @@ class Module(object): >>> config.declare('my_module', ConfigValue( ... domain=Module(), ... )) + >>> # Set using file path >>> config.my_module = '../../pyomo/common/tests/config_plugin.py' >>> # Set using python module name, as a string @@ -379,7 +394,9 @@ class Module(object): >>> # Set using an imported module object >>> import os.path >>> config.my_module = os.path + """ + def __init__(self, basePath=None, expandPath=None): self.basePath = basePath self.expandPath = expandPath @@ -426,6 +443,7 @@ class Path(object): value of :py:attr:`Path.SuppressPathExpansion` """ + BasePath = None SuppressPathExpansion = False @@ -459,16 +477,21 @@ def __call__(self, path): if path and path[:6].lower() == '${cwd}': path = os.getcwd() + path[6:] - ans = os.path.normpath(os.path.abspath(os.path.join( - os.path.expanduser(os.path.expandvars(base)), - os.path.expanduser(os.path.expandvars(path))))) + ans = os.path.normpath( + os.path.abspath( + os.path.join( + os.path.expanduser(os.path.expandvars(base)), + os.path.expanduser(os.path.expandvars(path)), + ) + ) + ) return ans class PathList(Path): """Domain validator for a list of path-like objects. - This will admit any iterable or object convertable to a string. + This will admit any iterable or object convertible to a string. Iterable objects (other than strings) will have each member normalized using :py:class:`Path`. Other types will be passed to :py:class:`Path`, returning a list with the single resulting path. @@ -489,9 +512,9 @@ class PathList(Path): def __call__(self, data): if hasattr(data, "__iter__") and not isinstance(data, str): - return [ super(PathList, self).__call__(i) for i in data ] + return [super(PathList, self).__call__(i) for i in data] else: - return [ super(PathList, self).__call__(data) ] + return [super(PathList, self).__call__(data)] class DynamicImplicitDomain(object): @@ -503,7 +526,7 @@ class DynamicImplicitDomain(object): ``pyomo/common/tests/config_plugin.py``: .. literalinclude:: /../../pyomo/common/tests/config_plugin.py - :lines: 10- + :start-at: import .. doctest:: :hide: @@ -543,6 +566,7 @@ class DynamicImplicitDomain(object): (ConfigValue, ConfigList, or ConfigDict) """ + def __init__(self, callback): self.callback = callback @@ -550,20 +574,6 @@ def __call__(self, key, value): return self.callback(key, value) -def add_docstring_list(docstring, configdict, indent_by=4): - """Returns the docstring with a formatted configuration arguments listing.""" - return docstring + (" " * indent_by).join( - configdict.generate_documentation( - block_start="Keyword Arguments\n-----------------\n", - block_end="", - item_start="%s\n", - item_body=" %s", - item_end="", - indent_spacing=0, - width=256 - ).splitlines(True)) - - # Note: Enum uses a metaclass to work its magic. To get a deprecation # warning when creating a subclass of ConfigEnum, we need to decorate # the __new__ method here (doing the normal trick of letting the class @@ -574,12 +584,13 @@ def add_docstring_list(docstring, configdict, indent_by=4): # the original __new__ to generate the class docstring. @deprecated() class ConfigEnum(enum.Enum): - - @deprecated("The ConfigEnum base class is deprecated. " - "Directly inherit from enum.Enum and then use " - "In() or InEnum() as the ConfigValue 'domain' for " - "validation and int/string type conversions.", - version='6.0') + @deprecated( + "The ConfigEnum base class is deprecated. " + "Directly inherit from enum.Enum and then use " + "In() or InEnum() as the ConfigValue 'domain' for " + "validation and int/string type conversions.", + version='6.0', + ) def __new__(cls, value, *args): member = object.__new__(cls) member._value_ = value @@ -616,7 +627,7 @@ def from_enum_or_string(cls, arg): .. doctest:: >>> from pyomo.common.config import ( - ... ConfigDict, ConfigList, ConfigValue, In, + ... ConfigDict, ConfigList, ConfigValue ... ) >>> config = ConfigDict() >>> config.declare('filename', ConfigValue( @@ -700,7 +711,7 @@ def from_enum_or_string(cls, arg): Module Path PathList - + DynamicImplicitDomain Configuring class hierarchies ============================= @@ -1013,16 +1024,18 @@ class will still create ``c`` instances that only have the single """ + def _dump(*args, **kwds): try: from yaml import dump except ImportError: - #dump = lambda x,**y: str(x) + # dump = lambda x,**y: str(x) # YAML uses lowercase True/False def dump(x, **args): if type(x) is bool: return str(x).lower() return str(x) + assert '_dump' in globals() globals()['_dump'] = dump return dump(*args, **kwds) @@ -1034,6 +1047,7 @@ def _munge_name(name, space_to_dash=True): name = re.sub(r'_', '-', name) return re.sub(r'[^a-zA-Z0-9-_]', '_', name) + def _domain_name(domain): if domain is None: return "" @@ -1046,8 +1060,10 @@ def _domain_name(domain): else: return None + _leadingSpace = re.compile('^([ \t]*)') + def _strip_indentation(doc): if not doc: return doc @@ -1071,8 +1087,7 @@ def _value2string(prefix, value, obj): if value is not None: try: _data = value._data if value is obj else value - if getattr(builtins, _data.__class__.__name__, None - ) is not None: + if getattr(builtins, _data.__class__.__name__, None) is not None: _str += _dump(_data, default_flow_style=True).rstrip() if _str.endswith("..."): _str = _str[:-3].rstrip() @@ -1082,6 +1097,7 @@ def _value2string(prefix, value, obj): _str += str(type(_data)) return _str.rstrip() + def _value2yaml(prefix, value, obj): _str = prefix if value is not None: @@ -1102,14 +1118,17 @@ def __init__(self, obj): def __call__(self, arg): logging.error( -"""%s '%s' was pickled with an unpicklable domain. + """%s '%s' was pickled with an unpicklable domain. The domain was stripped and lost during the pickle process. Setting new values on the restored object cannot be mapped into the correct domain. -""" % ( self._type, self._name)) +""" + % (self._type, self._name) + ) return arg -def _picklable(field,obj): + +def _picklable(field, obj): ftype = type(field) # If the field is a type (class, etc), cache the 'known' status of # the actual field type and not the generic 'type' class @@ -1142,33 +1161,33 @@ def _picklable(field,obj): _picklable.known[ftype] = False return _UnpickleableDomain(obj) + _picklable.known = {} # The "picklability" of some types is not categorically "knowable" # (e.g., functions can be pickled, but only if they are declared at the # module scope) -_picklable.unknowable_types = {type, types.FunctionType,} +_picklable.unknowable_types = {type, types.FunctionType} _store_bool = {'store_true', 'store_false'} + def _build_lexer(literals=''): # Ignore whitespace (space, tab, linefeed, and comma) t_ignore = " \t\r," - tokens = [ - "STRING", # quoted string - "WORD", # unquoted string - ] + tokens = ["STRING", "WORD"] # quoted string # unquoted string # A "string" is a proper quoted string _quoted_str = r"'(?:[^'\\]|\\.)*'" - _general_str = "|".join([_quoted_str, _quoted_str.replace("'",'"')]) + _general_str = "|".join([_quoted_str, _quoted_str.replace("'", '"')]) + @ply.lex.TOKEN(_general_str) def t_STRING(t): t.value = t.value[1:-1] return t # A "word" contains no whitesspace or commas - @ply.lex.TOKEN(r'[^' + repr(t_ignore+literals) + r']+') + @ply.lex.TOKEN(r'[^' + repr(t_ignore + literals) + r']+') def t_WORD(t): t.value = t.value return t @@ -1177,11 +1196,13 @@ def t_WORD(t): def t_error(t): # Note this parser does not allow "\n", so lexpos is the # column number - raise IOError("ERROR: Token '%s' Line %s Column %s" - % (t.value, t.lineno, t.lexpos+1)) + raise IOError( + "ERROR: Token '%s' Line %s Column %s" % (t.value, t.lineno, t.lexpos + 1) + ) return ply.lex.lex() + def _default_string_list_lexer(value): """Simple string tokenizer for lists of words. @@ -1201,8 +1222,10 @@ def _default_string_list_lexer(value): break yield tok.value + _default_string_list_lexer._lex = None + def _default_string_dict_lexer(value): """Simple string tokenizer for dict data. @@ -1223,39 +1246,359 @@ def _default_string_dict_lexer(value): break sep = _lex.token() if not sep: - raise ValueError( - "Expected ':' or '=' but encountered end of string") + raise ValueError("Expected ':' or '=' but encountered end of string") if sep.type not in ':=': raise ValueError( f"Expected ':' or '=' but found '{sep.value}' at " - f"Line {sep.lineno} Column {sep.lexpos+1}") + f"Line {sep.lineno} Column {sep.lexpos+1}" + ) val = _lex.token() if not val: raise ValueError( f"Expected value following '{sep.type}' " - f"but encountered end of string") + f"but encountered end of string" + ) yield key.value, val.value + _default_string_dict_lexer._lex = None +def _formatter_str_to_callback(pattern, formatter): + "Wrapper function that converts formatter strings to callback functions" + + if not pattern: + pattern = '' + if '%s' in pattern: + cb = lambda self, indent, obj: self.out.write(indent + pattern % obj.name()) + elif pattern: + cb = lambda self, indent, obj: self.out.write(indent + pattern) + else: + cb = lambda self, indent, obj: None + return types.MethodType(cb, formatter) + + +def _formatter_str_to_item_callback(pattern, formatter): + "Wrapper function that converts item formatter strings to callback functions" + + if not pattern: + pattern = '' + if '%s' in pattern: + _item_body_formatter = lambda doc: pattern % (doc,) + else: + _item_body_formatter = lambda doc: pattern + + def _item_body_cb(self, indent, obj): + _doc = obj._doc or obj._description or "" + if not _doc: + return '' + wraplines = '\n ' not in _doc + _doc = _item_body_formatter(_doc).rstrip() + if not _doc: + return '' + _indent = indent + ' ' * self.indent_spacing + if wraplines: + doc_lines = textwrap.wrap( + _doc, self.width, initial_indent=_indent, subsequent_indent=_indent + ) + self.out.write(('\n'.join(doc_lines)).rstrip() + '\n') + elif _doc.lstrip() == _doc: + self.out.write(_indent + _doc + '\n') + else: + self.out.write(_doc + '\n') + + return types.MethodType(_item_body_cb, formatter) + + +class ConfigFormatter(object): + def _initialize(self, indent_spacing, width, visibility): + self.out = io.StringIO() + self.indent_spacing = indent_spacing + self.width = width + self.visibility = visibility + + def _block_start(self, indent, obj): + pass + + def _block_end(self, indent, obj): + pass + + def _item_start(self, indent, obj): + pass + + def _item_body(self, indent, obj): + pass + + def _item_end(self, indent, obj): + pass + + def _finalize(self): + return self.out.getvalue() + + def generate(self, config, indent_spacing=2, width=78, visibility=None): + self._initialize(indent_spacing, width, visibility) + level = [] + lastObj = config + indent = '' + for lvl, pre, val, obj in config._data_collector(1, '', visibility, True): + if len(level) < lvl: + while len(level) < lvl - 1: + level.append(None) + level.append(lastObj) + self._block_start(indent, lastObj) + indent += ' ' * indent_spacing + while len(level) > lvl: + _last = level.pop() + if _last is not None: + indent = indent[:-indent_spacing] + self._block_end(indent, _last) + + lastObj = obj + self._item_start(indent, obj) + self._item_body(indent, obj) + self._item_end(indent, obj) + while level: + _last = level.pop() + if _last is not None: + indent = indent[:-indent_spacing] + self._block_end(indent, _last) + return self._finalize() + + +class String_ConfigFormatter(ConfigFormatter): + def __init__(self, block_start, block_end, item_start, item_body, item_end): + self._block_start = _formatter_str_to_callback(block_start, self) + self._block_end = _formatter_str_to_callback(block_end, self) + self._item_start = _formatter_str_to_callback(item_start, self) + self._item_end = _formatter_str_to_callback(item_end, self) + self._item_body = _formatter_str_to_item_callback(item_body, self) + + +class LaTeX_ConfigFormatter(String_ConfigFormatter): + def __init__(self): + super().__init__( + "\\begin{description}[topsep=0pt,parsep=0.5em,itemsep=-0.4em]\n", + "\\end{description}\n", + "\\item[{%s}]\\hfill\n", + "\\\\%s", + "", + ) + + +class numpydoc_ConfigFormatter(ConfigFormatter): + def _initialize(self, *args): + super()._initialize(*args) + self.wrapper = textwrap.TextWrapper(width=self.width) + + def _item_body(self, indent, obj): + typeinfo = ', '.join( + filter( + None, + [ + 'dict' if isinstance(obj, ConfigDict) else obj.domain_name(), + 'optional' + if obj._default is None + else f'default={repr(obj._default)}', + ], + ) + ) + # Note that numpydoc / ReST specifies that the colon in + # definition lists be surrounded by spaces (i.e., " : "). + # However, as of numpydoc (1.1.0) / Sphinx (3.4.3) / napoleon + # (0.7), things aren't really geared for nested lists of + # parameters. Definition lists omit the colon, and + # sub-definitions are rendered as normal definition sections + # (without the special formatting applied to Parameters lists), + # leading to less readable docs. As they tolerate omitting the + # space before the colon at the top level (which at lower levels + # causes nested definition lists to NOT omit the colon), we will + # generate non-standard ReST and omit the preceding space: + self.out.write(f'\n{indent}{obj.name()}: {typeinfo}\n') + self.wrapper.initial_indent = indent + ' ' * self.indent_spacing + self.wrapper.subsequent_indent = indent + ' ' * self.indent_spacing + vis = "" + if self.visibility is None and obj._visibility >= ADVANCED_OPTION: + vis = "[ADVANCED option]" + if obj._visibility >= DEVELOPER_OPTION: + vis = "[DEVELOPER option]" + itemdoc = wrap_reStructuredText( + '\n\n'.join( + filter( + None, [vis, inspect.cleandoc(obj._doc or obj._description or "")] + ) + ), + self.wrapper, + ) + if itemdoc: + self.out.write(itemdoc + '\n') + + def _finalize(self): + return inspect.cleandoc(self.out.getvalue()) + + +ConfigFormatter.formats = { + 'latex': LaTeX_ConfigFormatter, + 'numpydoc': numpydoc_ConfigFormatter, +} + + +@deprecated( + "add_docstring_list is deprecated. Please use the " + "@document_kwargs_from_configdict() decorator.", + version='6.6.0', +) +def add_docstring_list(docstring, configdict, indent_by=4): + """Returns the docstring with a formatted configuration arguments listing.""" + section = 'Keyword Arguments' + return ( + inspect.cleandoc(docstring) + + '\n' + + section + + '\n' + + '-' * len(section) + + '\n' + + configdict.generate_documentation( + indent_spacing=indent_by, width=256, visibility=0, format='numpydoc' + ) + ) + + +class document_kwargs_from_configdict(object): + """Decorator to append the documentation of a ConfigDict to the docstring + + This adds the documentation of the specified :py:class:`ConfigDict` + (using the :py:class:`numpydoc_ConfigFormatter` formatter) to the + decorated object's docstring. + + Parameters + ---------- + config : ConfigDict or str + the :py:class:`ConfigDict` to document. If a ``str``, then the + :py:class:`ConfigDict` is obtained by retrieving the named + attribute from the decorated object (thereby enabling + documenting class objects whose ``__init__`` keyword arguments + are processed by a :py:class:`ConfigDict` class attribute) + + section : str + the section header to preface config documentation with + + indent_spacing : int + number of spaces to indent each block of documentation + + width : int + total documentation width in characters (for wrapping paragraphs) + + doc : str, optional + the initial docstring to append the ConfigDict documentation to. + If None, then the decorated object's ``__doc__`` will be used. + + Examples + -------- + + >>> from pyomo.common.config import ( + ... ConfigDict, ConfigValue, document_kwargs_from_configdict + ... ) + >>> class MyClass(object): + ... CONFIG = ConfigDict() + ... CONFIG.declare('iterlim', ConfigValue( + ... default=3000, + ... domain=int, + ... doc="Iteration limit. Specify None for no limit" + ... )) + ... CONFIG.declare('tee', ConfigValue( + ... domain=bool, + ... doc="If True, stream the solver output to the console" + ... )) + ... + ... @document_kwargs_from_configdict(CONFIG) + ... def solve(self, **kwargs): + ... config = self.CONFIG(kwargs) + ... # ... + ... + >>> help(MyClass.solve) + Help on function solve: + + solve(self, **kwargs) + Keyword Arguments + ----------------- + iterlim: int, default=3000 + Iteration limit. Specify None for no limit + + tee: bool, optional + If True, stream the solver output to the console + + """ + + def __init__( + self, + config, + section='Keyword Arguments', + indent_spacing=4, + width=78, + visibility=None, + doc=None, + ): + if '\n' not in section: + section += '\n' + '-' * len(section) + '\n' + self.config = config + self.section = section + self.indent_spacing = indent_spacing + self.width = width + self.visibility = visibility + self.doc = doc + + def __call__(self, fcn): + if isinstance(self.config, str): + self.config = getattr(fcn, self.config) + if self.doc is not None: + doc = inspect.cleandoc(self.doc) + elif fcn.__doc__: + doc = inspect.cleandoc(fcn.__doc__) + else: + doc = "" + if doc: + if not doc.endswith('\n'): + doc += '\n\n' + else: + doc += '\n' + fcn.__doc__ = ( + doc + + f'{self.section}' + + self.config.generate_documentation( + indent_spacing=self.indent_spacing, + width=self.width, + visibility=self.visibility, + format='numpydoc', + ) + ) + return fcn + + class ConfigBase(object): - __slots__ = ('_parent', '_name', '_userSet', '_userAccessed', '_data', - '_default', '_domain', '_description', '_doc', '_visibility', - '_argparse') + __slots__ = ( + '_parent', + '_name', + '_userSet', + '_userAccessed', + '_data', + '_default', + '_domain', + '_description', + '_doc', + '_visibility', + '_argparse', + ) # This just needs to be any singleton-like object; we use it so that # we can tell if an argument is provided (and we can't use None as # None is a valid user-specified argument). Making it a class helps # when Config objects are pickled. - class NoArgument(object): pass - - def __init__(self, - default=None, - domain=None, - description=None, - doc=None, - visibility=0): + class NoArgument(object): + pass + + def __init__( + self, default=None, domain=None, description=None, doc=None, visibility=0 + ): self._parent = None self._name = None self._userSet = False @@ -1285,12 +1628,7 @@ def __getstate__(self): # can allocate the state dictionary. If it is not, then we call # the super-class's __getstate__ (since that class is NOT # 'object'). - _base = super() - if hasattr(_base, '__getstate__'): - state = _base.__getstate__() - else: - state = {} - state.update((key, getattr(self, key)) for key in ConfigBase.__slots__) + state = {key: getattr(self, key) for key in ConfigBase.__slots__} state['_domain'] = _picklable(state['_domain'], self) state['_parent'] = None return state @@ -1302,11 +1640,18 @@ def __setstate__(self, state): # of setting self.__dict__[key] = val. object.__setattr__(self, key, val) - def __call__(self, value=NOTSET, default=NOTSET, - domain=NOTSET, description=NOTSET, - doc=NOTSET, visibility=NOTSET, - implicit=NOTSET, implicit_domain=NOTSET, - preserve_implicit=False): + def __call__( + self, + value=NOTSET, + default=NOTSET, + domain=NOTSET, + description=NOTSET, + doc=NOTSET, + visibility=NOTSET, + implicit=NOTSET, + implicit_domain=NOTSET, + preserve_implicit=False, + ): # We will pass through overriding arguments to the constructor. # This way if the constructor does special processing of any of # the arguments (like implicit_domain), we don't have to repeat @@ -1321,16 +1666,14 @@ def __call__(self, value=NOTSET, default=NOTSET, assert default is NOTSET else: fields += ('domain',) - kwds['default'] = ( - self.value() if default is NOTSET else default - ) + kwds['default'] = self.value() if default is NOTSET else default assert implicit is NOTSET assert implicit_domain is NOTSET for field in fields: if type(field) is tuple: field, attr = field else: - attr = '_'+field + attr = '_' + field if locals()[field] is NOTSET: kwds[field] = getattr(self, attr, NOTSET) else: @@ -1400,9 +1743,11 @@ def _cast(self, value): _dom = self._domain.__name__ else: _dom = type(self._domain) - raise ValueError("invalid value for configuration '%s':\n" - "\tFailed casting %s\n\tto %s\n\tError: %s" % - (self.name(True), value, _dom, err)) + raise ValueError( + "invalid value for configuration '%s':\n" + "\tFailed casting %s\n\tto %s\n\tError: %s" + % (self.name(True), value, _dom, err) + ) else: return value @@ -1436,7 +1781,8 @@ def declare_as_argument(self, *args, **kwds): raise TypeError( "You cannot specify an argparse default value with " "ConfigBase.declare_as_argument(). The default value is " - "supplied automatically from the Config definition.") + "supplied automatically from the Config definition." + ) if 'action' not in kwds and self._domain is bool: if not self._default: @@ -1458,7 +1804,6 @@ def declare_as_argument(self, *args, **kwds): return self def initialize_argparse(self, parser): - def _get_subparser_or_group(_parser, name): # Note: strings also have a 'title()' method. We are # looking for things that look like argparse @@ -1466,15 +1811,16 @@ def _get_subparser_or_group(_parser, name): # is insufficient: it needs to be a string attribute as # well if isinstance(name, argparse._ActionsContainer): - #hasattr(_group, 'title') and \ + # hasattr(_group, 'title') and \ # isinstance(_group.title, str): return 2, name if not isinstance(name, str): raise RuntimeError( 'Unknown datatype (%s) for argparse group on ' - 'configuration definition %s' % - (type(name).__name__, obj.name(True))) + 'configuration definition %s' + % (type(name).__name__, obj.name(True)) + ) try: for _grp in _parser._subparsers._group_actions: @@ -1501,14 +1847,17 @@ def _process_argparse_def(obj, _args, _kwds): if not _issub and _idx < len(_group) - 1: raise RuntimeError( "Could not find argparse subparser '%s' for " - "Config item %s" % (_grp, obj.name(True))) + "Config item %s" % (_grp, obj.name(True)) + ) else: _issub, _parser = _get_subparser_or_group(_parser, _group) if 'dest' not in _kwds: _kwds['dest'] = 'CONFIGBLOCK.' + obj.name(True) - if ( 'metavar' not in _kwds - and _kwds.get('action','') not in _store_bool - and obj._domain is not None ): + if ( + 'metavar' not in _kwds + and _kwds.get('action', '') not in _store_bool + and obj._domain is not None + ): _kwds['metavar'] = obj.domain_name().upper() _parser.add_argument(*_args, default=argparse.SUPPRESS, **_kwds) @@ -1534,18 +1883,21 @@ def import_argparse(self, parsed_args): del parsed_args.__dict__[_dest] return parsed_args - def display(self, content_filter=None, indent_spacing=2, ostream=None, - visibility=None): + def display( + self, content_filter=None, indent_spacing=2, ostream=None, visibility=None + ): if content_filter not in ConfigDict.content_filters: - raise ValueError("unknown content filter '%s'; valid values are %s" - % (content_filter, ConfigDict.content_filters)) + raise ValueError( + "unknown content filter '%s'; valid values are %s" + % (content_filter, ConfigDict.content_filters) + ) _blocks = [] if ostream is None: - ostream=sys.stdout + ostream = sys.stdout for lvl, prefix, value, obj in self._data_collector(0, "", visibility): _str = _value2string(prefix, value, obj) - _blocks[lvl:] = [' ' * indent_spacing * lvl + _str + "\n",] + _blocks[lvl:] = [' ' * indent_spacing * lvl + _str + "\n"] if content_filter == 'userdata' and not obj._userSet: continue for i, v in enumerate(_blocks): @@ -1563,20 +1915,24 @@ def generate_yaml_template(self, indent_spacing=2, width=78, visibility=0): if lvl not in level_info: level_info[lvl] = {'data': [], 'off': 0, 'line': 0, 'over': 0} level_info[lvl]['data'].append( - (_str.find(':') + 2, len(_str), len(obj._description or ""))) + (_str.find(':') + 2, len(_str), len(obj._description or "")) + ) for lvl in sorted(level_info): indent = lvl * indent_spacing _ok = width - indent - len(comment) - minDocWidth - offset = \ - max( val if val < _ok else key - for key,val,doc in level_info[lvl]['data'] ) + offset = max( + val if val < _ok else key for key, val, doc in level_info[lvl]['data'] + ) offset += indent + len(comment) - over = sum(1 for key, val, doc in level_info[lvl]['data'] - if doc + offset > width) + over = sum( + 1 for key, val, doc in level_info[lvl]['data'] if doc + offset > width + ) if len(level_info[lvl]['data']) - over > 0: - line = max(offset + doc - for key, val, doc in level_info[lvl]['data'] - if offset + doc <= width) + line = max( + offset + doc + for key, val, doc in level_info[lvl]['data'] + if offset + doc <= width + ) else: line = width level_info[lvl]['off'] = offset @@ -1617,90 +1973,64 @@ def generate_yaml_template(self, indent_spacing=2, width=78, visibility=0): os.write(_str + '\n' + ' ' * field) os.write(comment) txtArea = max(width - field - len(comment), minDocWidth) - os.write(("\n" + ' ' * field + comment).join( - wrap( - obj._description, txtArea, subsequent_indent=' '))) + os.write( + ("\n" + ' ' * field + comment).join( + textwrap.wrap(obj._description, txtArea, subsequent_indent=' ') + ) + ) os.write('\n') return os.getvalue() - def generate_documentation( - self, block_start=None, block_end=None, - item_start=None, item_body=None, item_end=None, - indent_spacing=2, width=78, visibility=0, - format='latex'): - _formats = ConfigBase.generate_documentation.formats - if block_start is None: - block_start = _formats.get(format, {}).get('block_start','') - if block_end is None: - block_end = _formats.get(format, {}).get('block_end','') - if item_start is None: - item_start = _formats.get(format, {}).get('item_start','') - if item_body is None: - item_body = _formats.get(format, {}).get('item_body','') - if item_end is None: - item_end = _formats.get(format, {}).get('item_end','') + self, + block_start=None, + block_end=None, + item_start=None, + item_body=None, + item_end=None, + indent_spacing=2, + width=78, + visibility=None, + format='latex', + ): + if isinstance(format, str): + formatter = ConfigFormatter.formats.get(format, None) + if formatter is None: + raise ValueError(f"Unrecognized documentation formatter, '{format}'") + formatter = formatter() + else: + # Assume everything not a str is a valid formatter object. + formatter = format + + deprecated_args = (block_start, block_end, item_start, item_end) + if any(arg is not None for arg in deprecated_args): + names = ('block_start', 'block_end', 'item_start', 'item_end') + for arg, name in zip(deprecated_args, names): + if arg is None: + continue + deprecation_warning( + f"Overriding '{name}' by passing strings to " + "generate_documentation is deprecated. Create an instance of a " + "StringConfigFormatter and pass it as the 'format' argument.", + version='6.6.0', + ) + setattr( + formatter, "_" + name, _formatter_str_to_callback(arg, formatter) + ) + if item_body is not None: + deprecation_warning( + f"Overriding 'item_body' by passing strings to " + "generate_documentation is deprecated. Create an instance of a " + "StringConfigFormatter and pass it as the 'format' argument.", + version='6.6.0', + ) + setattr( + formatter, + "_item_body", + _formatter_str_to_item_callback(item_body, formatter), + ) - os = io.StringIO() - level = [] - lastObj = self - indent = '' - for lvl, pre, val, obj in self._data_collector(1, '', visibility, True): - if len(level) < lvl: - while len(level) < lvl - 1: - level.append(None) - level.append(lastObj) - if '%s' in block_start: - os.write(indent + block_start % lastObj.name()) - elif block_start: - os.write(indent + block_start) - indent += ' ' * indent_spacing - while len(level) > lvl: - _last = level.pop() - if _last is not None: - indent = indent[:-1 * indent_spacing] - if '%s' in block_end: - os.write(indent + block_end % _last.name()) - elif block_end: - os.write(indent + block_end) - - lastObj = obj - if '%s' in item_start: - os.write(indent + item_start % obj.name()) - elif item_start: - os.write(indent + item_start) - _doc = obj._doc or obj._description or "" - if _doc: - _wrapLines = '\n ' not in _doc - if '%s' in item_body: - _doc = item_body % (_doc,) - elif _doc: - _doc = item_body - if _wrapLines: - doc_lines = wrap( - _doc, - width, - initial_indent=indent + ' ' * indent_spacing, - subsequent_indent=indent + ' ' * indent_spacing) - else: - doc_lines = (_doc,) - # Write things out - os.writelines('\n'.join(doc_lines)) - if not doc_lines[-1].endswith("\n"): - os.write('\n') - if '%s' in item_end: - os.write(indent + item_end % obj.name()) - elif item_end: - os.write(indent + item_end) - while level: - _last = level.pop() - if _last is not None: - indent = indent[:-1 * indent_spacing] - if '%s' in block_end: - os.write(indent + block_end % _last.name()) - else: - os.write(indent + block_end) - return os.getvalue() + return formatter.generate(self, indent_spacing, width, visibility) def user_values(self): if self._userSet: @@ -1716,17 +2046,6 @@ def unused_user_values(self): if obj._userSet and not obj._userAccessed: yield obj -ConfigBase.generate_documentation.formats = { - 'latex': { - 'block_start': "\\begin{description}[" - "topsep=0pt,parsep=0.5em,itemsep=-0.4em]\n", - 'block_end': "\\end{description}\n", - 'item_start': "\\item[{%s}]\\hfill\n", - 'item_body': "\\\\%s", - 'item_end': "", - } -} - class ConfigValue(ConfigBase): """Store and manipulate a single configuration value. @@ -1737,7 +2056,7 @@ class ConfigValue(ConfigBase): The default value that this ConfigValue will take if no value is provided. - domain: callable, optional + domain: Callable, optional The domain can be any callable that accepts a candidate value and returns the value converted to the desired type, optionally performing any data validation. The result will be stored into @@ -1820,6 +2139,7 @@ class MarkImmutable(object): >>> locker.release_lock() """ + def __init__(self, *args): self._targets = args self._locked = [] @@ -1830,7 +2150,8 @@ def lock(self): for cfg in self._targets: if type(cfg) is not ConfigValue: raise ValueError( - 'Only ConfigValue instances can be marked immutable.') + 'Only ConfigValue instances can be marked immutable.' + ) cfg.__class__ = ImmutableConfigValue self._locked.append(cfg) except: @@ -1863,7 +2184,7 @@ class ConfigList(ConfigBase, Sequence): otherwise the default is cast to the domain and forms a default list with a single element. - domain: callable, optional + domain: Callable, optional The domain can be any callable that accepts a candidate value and returns the value converted to the desired type, optionally performing any data validation. The result will be stored / @@ -1915,7 +2236,7 @@ def __getitem__(self, key): def get(self, key, default=NOTSET): # Note: get() is borrowed from ConfigDict for cases where we - # want the raw stored object (and to aviod the implicit + # want the raw stored object (and to avoid the implicit # conversion of ConfigValue members to their stored data). try: val = self._data[key] @@ -1932,7 +2253,7 @@ def __setitem__(self, key, val): # As a result, *this* list doesn't change when someone tries to # change an element; instead, the *element* gets its _userSet # flag set. - #self._userSet = True + # self._userSet = True self._data[key].set_value(val) def __len__(self): @@ -1956,8 +2277,7 @@ def set_value(self, value): try: if isinstance(value, str): value = list(_default_string_list_lexer(value)) - if (type(value) is list) or \ - isinstance(value, ConfigList): + if (type(value) is list) or isinstance(value, ConfigList): for val in value: self.append(val) else: @@ -1971,7 +2291,7 @@ def reset(self): ConfigBase.reset(self) # Because the base reset() calls set_value, any deefault list # entries will get their userSet flag set. This is wrong, as - # reset() should conceptually reset teh object to it's default + # reset() should conceptually reset the object to it's default # state (e.g., before the user ever had a chance to mess with # things). As the list could contain a ConfigDict, this is a # recursive operation to put the userSet values back. @@ -1989,10 +2309,9 @@ def append(self, value=NOTSET): # Adding something to the container should not change the # userSet on the container (see Pyomo/pyomo#352; now # Pyomo/pysp#8 for justification) - #self._userSet = True + # self._userSet = True - @deprecated("ConfigList.add() has been deprecated. Use append()", - version='5.7.2') + @deprecated("ConfigList.add() has been deprecated. Use append()", version='5.7.2') def add(self, value=NOTSET): "Append the specified value to the list, casting as necessary." return self.append(value) @@ -2008,8 +2327,9 @@ def _data_collector(self, level, prefix, visibility=None, docMode=False): # somewhat redundant, and worse, if the list is empty, then # no documentation is generated at all!) yield (level, prefix, None, self) - subDomain = self._domain._data_collector(level + 1, '- ', - visibility, docMode) + subDomain = self._domain._data_collector( + level + 1, '- ', visibility, docMode + ) # Pop off the (empty) block entry next(subDomain) for v in subDomain: @@ -2041,10 +2361,10 @@ class ConfigDict(ConfigBase, Mapping): implicit: bool, optional If True, the ConfigDict will allow "implicitly" declared keys, that is, keys can be stored into the ConfigDict that - were not prevously declared using :py:meth:`declare` or + were not previously declared using :py:meth:`declare` or :py:meth:`declare_from`. - implicit_domain: callable, optional + implicit_domain: Callable, optional The domain that will be used for any implicitly-declared keys. Follows the same rules as :py:meth:`ConfigValue`'s `domain`. @@ -2061,22 +2381,30 @@ class ConfigDict(ConfigBase, Mapping): content_filters = {None, 'all', 'userdata'} - __slots__ = ('_decl_order', '_declared', '_implicit_declaration', - '_implicit_domain') + __slots__ = ( + '_decl_order', + '_declared', + '_implicit_declaration', + '_implicit_domain', + ) _all_slots = set(__slots__ + ConfigBase.__slots__) - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): self._decl_order = [] self._declared = set() self._implicit_declaration = implicit - if ( implicit_domain is None - or type(implicit_domain) is DynamicImplicitDomain - or isinstance(implicit_domain, ConfigBase) ): + if ( + implicit_domain is None + or type(implicit_domain) is DynamicImplicitDomain + or isinstance(implicit_domain, ConfigBase) + ): self._implicit_domain = implicit_domain else: self._implicit_domain = ConfigValue(None, domain=implicit_domain) @@ -2103,7 +2431,7 @@ def __dir__(self): def __getitem__(self, key): self._userAccessed = True - _key = str(key).replace(' ','_') + _key = str(key).replace(' ', '_') if isinstance(self._data[_key], ConfigValue): return self._data[_key].value() else: @@ -2111,7 +2439,7 @@ def __getitem__(self, key): def get(self, key, default=NOTSET): self._userAccessed = True - _key = str(key).replace(' ','_') + _key = str(key).replace(' ', '_') if _key in self._data: return self._data[_key] if default is NOTSET: @@ -2126,7 +2454,7 @@ def get(self, key, default=NOTSET): def setdefault(self, key, default=NOTSET): self._userAccessed = True - _key = str(key).replace(' ','_') + _key = str(key).replace(' ', '_') if _key in self._data: return self._data[_key] if default is NOTSET: @@ -2135,24 +2463,24 @@ def setdefault(self, key, default=NOTSET): return self.add(key, default) def __setitem__(self, key, val): - _key = str(key).replace(' ','_') + _key = str(key).replace(' ', '_') if _key not in self._data: self.add(key, val) else: self._data[_key].set_value(val) - #self._userAccessed = True + # self._userAccessed = True def __delitem__(self, key): # Note that this will produce a KeyError if the key is not valid # for this ConfigDict. - _key = str(key).replace(' ','_') + _key = str(key).replace(' ', '_') del self._data[_key] # Clean up the other data structures self._decl_order.remove(_key) self._declared.discard(_key) def __contains__(self, key): - _key = str(key).replace(' ','_') + _key = str(key).replace(' ', '_') return _key in self._data def __len__(self): @@ -2165,7 +2493,7 @@ def __getattr__(self, name): # Note: __getattr__ is only called after all "usual" attribute # lookup methods have failed. So, if we get here, we already # know that key is not a __slot__ or a method, etc... - #if name in ConfigDict._all_slots: + # if name in ConfigDict._all_slots: # return super(ConfigDict,self).__getattribute__(name) _name = name.replace(' ', '_') if _name not in self._data: @@ -2179,15 +2507,17 @@ def __setattr__(self, name, value): ConfigDict.__setitem__(self, name, value) def __delattr__(self, name): - _key = str(name).replace(' ','_') + _key = str(name).replace(' ', '_') if _key in self._data: del self[_key] elif _key in dir(self): - raise AttributeError("'%s' object attribute '%s' is read-only" % - (type(self).__name__, name)) + raise AttributeError( + "'%s' object attribute '%s' is read-only" % (type(self).__name__, name) + ) else: - raise AttributeError("'%s' object has no attribute '%s'" % - (type(self).__name__, name)) + raise AttributeError( + "'%s' object has no attribute '%s'" % (type(self).__name__, name) + ) def keys(self): return iter(self) @@ -2202,18 +2532,15 @@ def items(self): for key in self._decl_order: yield (self._data[key]._name, self[key]) - @deprecated('The iterkeys method is deprecated. Use dict.keys().', - version='6.0') + @deprecated('The iterkeys method is deprecated. Use dict.keys().', version='6.0') def iterkeys(self): return self.keys() - @deprecated('The itervalues method is deprecated. Use dict.keys().', - version='6.0') + @deprecated('The itervalues method is deprecated. Use dict.keys().', version='6.0') def itervalues(self): return self.values() - @deprecated('The iteritems method is deprecated. Use dict.keys().', - version='6.0') + @deprecated('The iteritems method is deprecated. Use dict.keys().', version='6.0') def iteritems(self): return self.items() @@ -2223,12 +2550,14 @@ def _add(self, name, config): if config._parent is not None: raise ValueError( "config '%s' is already assigned to ConfigDict '%s'; " - "cannot reassign to '%s'" % - (name, config._parent.name(True), self.name(True))) + "cannot reassign to '%s'" + % (name, config._parent.name(True), self.name(True)) + ) if _name in self._data: raise ValueError( - "duplicate config '%s' defined for ConfigDict '%s'" % - (name, self.name(True))) + "duplicate config '%s' defined for ConfigDict '%s'" + % (name, self.name(True)) + ) self._data[_name] = config self._decl_order.append(_name) config._parent = self @@ -2243,23 +2572,25 @@ def declare(self, name, config): def declare_from(self, other, skip=None): if not isinstance(other, ConfigDict): - raise ValueError( - "ConfigDict.declare_from() only accepts other ConfigDicts") + raise ValueError("ConfigDict.declare_from() only accepts other ConfigDicts") # Note that we duplicate ["other()"] other so that this # ConfigDict's entries are independent of the other's for key in other.keys(): if skip and key in skip: continue if key in self: - raise ValueError("ConfigDict.declare_from passed a block " - "with a duplicate field, '%s'" % (key,)) + raise ValueError( + "ConfigDict.declare_from passed a block " + "with a duplicate field, '%s'" % (key,) + ) self.declare(key, other.get(key)()) def add(self, name, config): if not self._implicit_declaration: - raise ValueError("Key '%s' not defined in ConfigDict '%s'" - " and Dict disallows implicit entries" % - (name, self.name(True))) + raise ValueError( + "Key '%s' not defined in ConfigDict '%s'" + " and Dict disallows implicit entries" % (name, self.name(True)) + ) if self._implicit_domain is None: if isinstance(config, ConfigBase): @@ -2274,24 +2605,27 @@ def add(self, name, config): # Adding something to the container should not change the # userSet on the container (see Pyomo/pyomo#352; now # Pyomo/pysp#8 for justification) - #self._userSet = True + # self._userSet = True return ans def value(self, accessValue=True): if accessValue: self._userAccessed = True - return { cfg._name: cfg.value(accessValue) - for cfg in map(self._data.__getitem__, self._decl_order) } + return { + cfg._name: cfg.value(accessValue) + for cfg in map(self._data.__getitem__, self._decl_order) + } def set_value(self, value, skip_implicit=False): if value is None: return self if isinstance(value, str): value = dict(_default_string_dict_lexer(value)) - if (type(value) is not dict) and \ - (not isinstance(value, ConfigDict)): - raise ValueError("Expected dict value for %s.set_value, found %s" % - (self.name(True), type(value).__name__)) + if (type(value) is not dict) and (not isinstance(value, ConfigDict)): + raise ValueError( + "Expected dict value for %s.set_value, found %s" + % (self.name(True), type(value).__name__) + ) if not value: return self _implicit = [] @@ -2311,8 +2645,9 @@ def set_value(self, value, skip_implicit=False): else: raise ValueError( "key '%s' not defined for ConfigDict '%s' and " - "implicit (undefined) keys are not allowed" % - (key, self.name(True))) + "implicit (undefined) keys are not allowed" + % (key, self.name(True)) + ) # If the set_value fails part-way through the new values, we # want to restore a deterministic state. That is, either @@ -2345,6 +2680,7 @@ def _keep(self, key): else: del self._data[key] return keep + # this is an in-place slice of a list... self._decl_order[:] = [x for x in self._decl_order if _keep(self, x)] self._userAccessed = False @@ -2359,9 +2695,8 @@ def _data_collector(self, level, prefix, visibility=None, docMode=False): level += 1 for key in self._decl_order: cfg = self._data[key] - for v in cfg._data_collector( - level, cfg._name + ': ', visibility, docMode): - yield v + yield from cfg._data_collector(level, cfg._name + ': ', visibility, docMode) + # Backwards compatibility: ConfigDict was originally named ConfigBlock. ConfigBlock = ConfigDict diff --git a/pyomo/common/dependencies.py b/pyomo/common/dependencies.py index da03de32a48..266074201b0 100644 --- a/pyomo/common/dependencies.py +++ b/pyomo/common/dependencies.py @@ -14,21 +14,22 @@ import importlib import logging import sys +import warnings -from .deprecation import ( - deprecated, deprecation_warning, in_testing_environment, -) +from .deprecation import deprecated, deprecation_warning, in_testing_environment +from .errors import DeferredImportError from . import numeric_types -class DeferredImportError(ImportError): - pass + +SUPPRESS_DEPENDENCY_WARNINGS = False + class ModuleUnavailable(object): - """Mock object that raises a DeferredImportError upon attribute access + """Mock object that raises :py:class:`.DeferredImportError` upon attribute access This object is returned by :py:func:`attempt_import()` in lieu of the module in the case that the module import fails. Any attempts - to access attributes on this object will raise a DeferredImportError + to access attributes on this object will raise a :py:class:`.DeferredImportError` exception. Parameters @@ -64,14 +65,13 @@ class ModuleUnavailable(object): def __init__(self, name, message, version_error, import_error, package): self.__name__ = name - self._moduleunavailable_info_ = ( - message, version_error, import_error, package, - ) + self._moduleunavailable_info_ = (message, version_error, import_error, package) def __getattr__(self, attr): if attr in ModuleUnavailable._getattr_raises_attributeerror: - raise AttributeError("'%s' object has no attribute '%s'" - % (type(self).__name__, attr)) + raise AttributeError( + "'%s' object has no attribute '%s'" % (type(self).__name__, attr) + ) raise DeferredImportError(self._moduleunavailable_message()) def __getstate__(self): @@ -99,12 +99,12 @@ def _moduleunavailable_message(self, msg=None): "failed to import: %s" % (self.__name__, _pkg_str, _imp) ) else: - msg = "%s (import raised %s)" % (msg, _imp,) + msg = "%s (import raised %s)" % (msg, _imp) if _ver: if not msg or not str(msg): msg = "The %s module %s" % (self.__name__, _ver) else: - msg = "%s (%s)" % (msg, _ver,) + msg = "%s (%s)" % (msg, _ver) return msg def log_import_warning(self, logger='pyomo', msg=None): @@ -131,14 +131,15 @@ class DeferredImportModule(object): ``defer_check=True``. Any attempts to access attributes on this object will trigger the actual module import and return either the appropriate module attribute or else if the module import fails, - raise a DeferredImportError exception. + raise a :py:class:`.DeferredImportError` exception. """ + def __init__(self, indicator, deferred_submodules, submodule_name): self._indicator_flag = indicator self._submodule_name = submodule_name - self.__file__ = None # Disable nose's coverage of this module - self.__spec__ = None # Indicate that this is not a "real" module + self.__file__ = None # Disable coverage of this module + self.__spec__ = None # Indicate that this is not a "real" module if not deferred_submodules: return @@ -147,12 +148,16 @@ def __init__(self, indicator, deferred_submodules, submodule_name): for name in deferred_submodules: if not name.startswith(submodule_name + '.'): continue - _local_name = name[(1+len(submodule_name)):] + _local_name = name[(1 + len(submodule_name)) :] if '.' in _local_name: continue - setattr(self, _local_name, DeferredImportModule( - indicator, deferred_submodules, - submodule_name + '.' + _local_name)) + setattr( + self, + _local_name, + DeferredImportModule( + indicator, deferred_submodules, submodule_name + '.' + _local_name + ), + ) def __getattr__(self, attr): self._indicator_flag.resolve() @@ -175,6 +180,83 @@ def mro(self): return [DeferredImportModule, object] +def UnavailableClass(unavailable_module): + """Function to generate an "unavailable" base class + + This function returns a custom class that wraps the + :py:class:`ModuleUnavailable` instance returned by + :py:func:`attempt_import` when the target module is not available. + Any attempt to instantiate this class (or a class derived from it) + or access a class attribute will raise the + :py:class:`.DeferredImportError` from the wrapped + :py:class:`ModuleUnavailable` object. + + Parameters + ---------- + unavailable_module: ModuleUnavailable + The :py:class:`ModuleUnavailable` instance (from + :py:func:`attempt_import`) to use to generate the + :py:class:`.DeferredImportError`. + + Example + ------- + + Declaring a class that inherits from an optional dependency: + + .. doctest:: + + >>> from pyomo.common.dependencies import attempt_import, UnavailableClass + >>> bogus, bogus_available = attempt_import('bogus_unavailable_class') + >>> class MyPlugin(bogus.plugin if bogus_available else UnavailableClass(bogus)): + ... pass + + Attempting to instantiate the derived class generates an exception + when the module is unavailable: + + .. doctest:: + + >>> MyPlugin() + Traceback (most recent call last): + ... + pyomo.common.dependencies.DeferredImportError: The class 'MyPlugin' cannot be + created because a needed optional dependency was not found (import raised + ModuleNotFoundError: No module named 'bogus_unavailable_class') + + As does attempting to access class attributes on the derived class: + + .. doctest:: + + >>> MyPlugin.create_instance() + Traceback (most recent call last): + ... + pyomo.common.dependencies.DeferredImportError: The class attribute + 'MyPlugin.create_instance' is not available because a needed optional + dependency was not found (import raised ModuleNotFoundError: No module + named 'bogus_unavailable_class') + + """ + + class UnavailableMeta(type): + def __getattr__(cls, name): + raise DeferredImportError( + unavailable_module._moduleunavailable_message( + f"The class attribute '{cls.__name__}.{name}' is not available " + "because a needed optional dependency was not found" + ) + ) + + class UnavailableBase(metaclass=UnavailableMeta): + def __new__(cls, *args, **kwargs): + raise DeferredImportError( + unavailable_module._moduleunavailable_message( + f"The class '{cls.__name__}' cannot be created because a " + "needed optional dependency was not found" + ) + ) + + return UnavailableBase + + class _DeferredImportIndicatorBase(object): def __and__(self, other): return _DeferredAnd(self, other) @@ -194,11 +276,11 @@ class DeferredImportIndicator(_DeferredImportIndicatorBase): This object serves as a placeholder for the Boolean indicator if a deferred module import was successful. Casting this instance to - bool will cause the import to be attempted. The actual import logic - is here and not in the DeferredImportModule to reduce the number of - attributes on the DeferredImportModule. + `bool` will cause the import to be attempted. The actual import logic + is here and not in the :py:class:`DeferredImportModule` to reduce the number of + attributes on the :py:class:`DeferredImportModule`. - ``DeferredImportIndicator`` supports limited logical expressions + :py:class:`DeferredImportIndicator` supports limited logical expressions using the ``&`` (and) and ``|`` (or) binary operators. Creating these expressions does not trigger the import of the corresponding :py:class:`DeferredImportModule` instances, although casting the @@ -207,9 +289,17 @@ class DeferredImportIndicator(_DeferredImportIndicatorBase): """ - def __init__(self, name, error_message, catch_exceptions, - minimum_version, original_globals, callback, importer, - deferred_submodules): + def __init__( + self, + name, + error_message, + catch_exceptions, + minimum_version, + original_globals, + callback, + importer, + deferred_submodules, + ): self._names = [name] for _n in tuple(self._names): if '.' in _n: @@ -256,8 +346,7 @@ def resolve(self): # If this module was not found, then we need to check for # deferred submodules and resolve them as well - if self._deferred_submodules and \ - type(self._module) is ModuleUnavailable: + if self._deferred_submodules and type(self._module) is ModuleUnavailable: info = self._module._moduleunavailable_info_ for submod in self._deferred_submodules: refmod = self._module @@ -265,8 +354,11 @@ def resolve(self): try: refmod = getattr(refmod, name) except DeferredImportError: - setattr(refmod, name, ModuleUnavailable( - refmod.__name__+submod, *info)) + setattr( + refmod, + name, + ModuleUnavailable(refmod.__name__ + submod, *info), + ) refmod = getattr(refmod, name) # Replace myself in the original globals() where I was @@ -281,11 +373,10 @@ def resolve(self): self.replace_self_in_globals(_frame.f_globals) def replace_self_in_globals(self, _globals): - for k,v in _globals.items(): + for k, v in _globals.items(): if v is self: _globals[k] = self._available - elif v.__class__ is DeferredImportModule and \ - v._indicator_flag is self: + elif v.__class__ is DeferredImportModule and v._indicator_flag is self: if v._submodule_name is None: _globals[k] = self._module else: @@ -325,6 +416,7 @@ def check_min_version(module, min_version): if check_min_version._parser is None: try: from packaging import version as _version + _parser = _version.parse except ImportError: # pkg_resources is an order of magnitude slower to import than @@ -338,13 +430,22 @@ def check_min_version(module, min_version): version = getattr(module, '__version__', '0.0.0') return _parser(min_version) <= _parser(version) + check_min_version._parser = None -def attempt_import(name, error_message=None, only_catch_importerror=None, - minimum_version=None, alt_names=None, callback=None, - importer=None, defer_check=True, deferred_submodules=None, - catch_exceptions=None): +def attempt_import( + name, + error_message=None, + only_catch_importerror=None, + minimum_version=None, + alt_names=None, + callback=None, + importer=None, + defer_check=True, + deferred_submodules=None, + catch_exceptions=None, +): """Attempt to import the specified module. This will attempt to import the specified module, returning a @@ -424,8 +525,8 @@ def attempt_import(name, error_message=None, only_catch_importerror=None, defer_check: bool, optional If True (the default), then the attempted import is deferred until the first use of either the module or the availability - flag. The method will return instances of DeferredImportModule - and DeferredImportIndicator. + flag. The method will return instances of :py:class:`DeferredImportModule` + and :py:class:`DeferredImportIndicator`. deferred_submodules: Iterable[str], optional If provided, an iterable of submodule names within this module @@ -453,16 +554,22 @@ def attempt_import(name, error_message=None, only_catch_importerror=None, """ if alt_names is not None: - deprecation_warning('alt_names=%s no longer needs to be specified ' - 'and is ignored' % (alt_names,), version='6.0') + deprecation_warning( + 'alt_names=%s no longer needs to be specified ' + 'and is ignored' % (alt_names,), + version='6.0', + ) if only_catch_importerror is not None: deprecation_warning( "only_catch_importerror is deprecated. Pass exceptions to " - "catch using the catch_exceptions argument", version='5.7.3') + "catch using the catch_exceptions argument", + version='5.7.3', + ) if catch_exceptions is not None: - raise ValueError("Cannot specify both only_catch_importerror " - "and catch_exceptions") + raise ValueError( + "Cannot specify both only_catch_importerror and catch_exceptions" + ) if only_catch_importerror: catch_exceptions = (ImportError,) else: @@ -478,7 +585,9 @@ def attempt_import(name, error_message=None, only_catch_importerror=None, deprecation_warning( 'attempt_import(): deferred_submodules takes an iterable ' 'and not a mapping (the alt_names supplied by the mapping ' - 'are no longer needed and are ignored).', version='6.0') + 'are no longer needed and are ignored).', + version='6.0', + ) deferred_submodules = list(deferred_submodules) # Ensures all names begin with '.' @@ -508,12 +617,12 @@ def attempt_import(name, error_message=None, only_catch_importerror=None, original_globals=inspect.currentframe().f_back.f_globals, callback=callback, importer=importer, - deferred_submodules=deferred) + deferred_submodules=deferred, + ) return DeferredImportModule(indicator, deferred, None), indicator if deferred_submodules: - raise ValueError( - "deferred_submodules is only valid if defer_check==True") + raise ValueError("deferred_submodules is only valid if defer_check==True") return _perform_import( name=name, @@ -526,30 +635,38 @@ def attempt_import(name, error_message=None, only_catch_importerror=None, ) -def _perform_import(name, error_message, minimum_version, callback, - importer, catch_exceptions, package): +def _perform_import( + name, error_message, minimum_version, callback, importer, catch_exceptions, package +): import_error = None version_error = None try: - if importer is None: - module = importlib.import_module(name) - else: - module = importer() - if ( minimum_version is None - or check_min_version(module, minimum_version) ): + with warnings.catch_warnings(): + # Temporarily suppress all warnings: we assume we are + # importing a third-party package here and we don't want to + # see them? + if SUPPRESS_DEPENDENCY_WARNINGS and not name.startswith('pyomo.'): + warnings.resetwarnings() + warnings.simplefilter("ignore") + if importer is None: + module = importlib.import_module(name) + else: + module = importer() + if minimum_version is None or check_min_version(module, minimum_version): if callback is not None: callback(module, True) return module, True else: version = getattr(module, '__version__', 'UNKNOWN') - version_error = ( - "version %s does not satisfy the minimum version %s" - % (version, minimum_version)) + version_error = "version %s does not satisfy the minimum version %s" % ( + version, + minimum_version, + ) except catch_exceptions as e: import_error = "%s: %s" % (type(e).__name__, e) module = ModuleUnavailable( - name, error_message, version_error, import_error, package, + name, error_message, version_error, import_error, package ) if callback is not None: callback(module, False) @@ -557,9 +674,9 @@ def _perform_import(name, error_message, minimum_version, callback, def declare_deferred_modules_as_importable(globals_dict): - """Make all DeferredImportModules in ``globals_dict`` importable + """Make all :py:class:`DeferredImportModules` in ``globals_dict`` importable - This function will go throught the specified ``globals_dict`` + This function will go throughout the specified ``globals_dict`` dictionary and add any instances of :py:class:`DeferredImportModule` that it finds (and any of their deferred submodules) to ``sys.modules`` so that the modules can be imported through the @@ -597,15 +714,19 @@ def declare_deferred_modules_as_importable(globals_dict): """ _global_name = globals_dict['__name__'] + '.' - deferred = list((k, v) for k, v in globals_dict.items() - if type(v) is DeferredImportModule ) + deferred = list( + (k, v) for k, v in globals_dict.items() if type(v) is DeferredImportModule + ) while deferred: name, mod = deferred.pop(0) mod.__path__ = None mod.__spec__ = None sys.modules[_global_name + name] = mod - deferred.extend((name + '.' + k, v) for k, v in mod.__dict__.items() - if type(v) is DeferredImportModule ) + deferred.extend( + (name + '.' + k, v) + for k, v in mod.__dict__.items() + if type(v) is DeferredImportModule + ) # @@ -613,16 +734,20 @@ def declare_deferred_modules_as_importable(globals_dict): # yaml_load_args = {} + + def _finalize_yaml(module, available): # Recent versions of PyYAML issue warnings if the Loader argument is # not set if available and hasattr(module, 'SafeLoader'): yaml_load_args['Loader'] = module.SafeLoader + def _finalize_scipy(module, available): if available: # Import key subpackages that we will want to assume are present import scipy.stats + # As of scipy 1.6.0, importing scipy.stats causes the following # to be automatically imported. However, we will still # explicitly import them here to guard against potential future @@ -631,11 +756,13 @@ def _finalize_scipy(module, available): import scipy.sparse import scipy.spatial + def _finalize_pympler(module, available): if available: # Import key subpackages that we will want to assume are present import pympler.muppy + def _finalize_matplotlib(module, available): if not available: return @@ -647,33 +774,51 @@ def _finalize_matplotlib(module, available): module.use('Agg') import matplotlib.pyplot + def _finalize_numpy(np, available): if not available: return - numeric_types.RegisterBooleanType(np.bool_) - for t in (np.int_, np.intc, np.intp, - np.int8, np.int16, np.int32, np.int64, - np.uint8, np.uint16, np.uint32, np.uint64): + numeric_types.RegisterLogicalType(np.bool_) + for t in ( + np.int_, + np.intc, + np.intp, + np.int8, + np.int16, + np.int32, + np.int64, + np.uint8, + np.uint16, + np.uint32, + np.uint64, + ): numeric_types.RegisterIntegerType(t) - numeric_types.RegisterBooleanType(t) - for t in (np.float_, np.float16, np.float32, np.float64, np.ndarray): + # We have deprecated RegisterBooleanType, so we will mock up the + # registration here (to bypass the deprecation warning) until we + # finally remove all support for it + numeric_types._native_boolean_types.add(t) + for t in (np.float_, np.float16, np.float32, np.float64): numeric_types.RegisterNumericType(t) - numeric_types.RegisterBooleanType(t) + # We have deprecated RegisterBooleanType, so we will mock up the + # registration here (to bypass the deprecation warning) until we + # finally remove all support for it + numeric_types._native_boolean_types.add(t) -yaml, yaml_available = attempt_import( - 'yaml', callback=_finalize_yaml) -pympler, pympler_available = attempt_import( - 'pympler', callback=_finalize_pympler) -numpy, numpy_available = attempt_import( - 'numpy', callback=_finalize_numpy) -scipy, scipy_available = attempt_import( - 'scipy', callback=_finalize_scipy, - deferred_submodules=['stats', 'sparse', 'spatial', 'integrate']) +dill, dill_available = attempt_import('dill') +mpi4py, mpi4py_available = attempt_import('mpi4py') networkx, networkx_available = attempt_import('networkx') +numpy, numpy_available = attempt_import('numpy', callback=_finalize_numpy) pandas, pandas_available = attempt_import('pandas') -dill, dill_available = attempt_import('dill') +plotly, plotly_available = attempt_import('plotly') +pympler, pympler_available = attempt_import('pympler', callback=_finalize_pympler) pyutilib, pyutilib_available = attempt_import('pyutilib') +scipy, scipy_available = attempt_import( + 'scipy', + callback=_finalize_scipy, + deferred_submodules=['stats', 'sparse', 'spatial', 'integrate'], +) +yaml, yaml_available = attempt_import('yaml', callback=_finalize_yaml) # Note that matplotlib.pyplot can generate a runtime error on OSX when # not installed as a Framework (as is the case in the CI systems) diff --git a/pyomo/common/deprecation.py b/pyomo/common/deprecation.py index 0e548218466..2e39083770d 100644 --- a/pyomo/common/deprecation.py +++ b/pyomo/common/deprecation.py @@ -33,7 +33,7 @@ _doc_flag = '.. deprecated::' -def _default_msg(obj, user_msg, version, remove_in): +def default_deprecation_msg(obj, user_msg, version, remove_in): """Generate the default deprecation message. See deprecated() function for argument details. @@ -52,10 +52,14 @@ def _default_msg(obj, user_msg, version, remove_in): _qual = getattr(obj, '__qualname__', '') or '' if _qual.endswith('.__init__') or _qual.endswith('.__new__'): - _obj = ' class' - - user_msg = 'This%s has been deprecated and may be removed in a ' \ - 'future release.' % (_obj,) + _obj = f' class ({_qual.rsplit(".", 1)[0]})' + elif _qual and _obj: + _obj += f' ({_qual})' + + user_msg = ( + 'This%s has been deprecated and may be removed in a ' + 'future release.' % (_obj,) + ) comment = [] if version: comment.append('deprecated in %s' % (version,)) @@ -68,11 +72,16 @@ def _default_msg(obj, user_msg, version, remove_in): def _deprecation_docstring(obj, msg, version, remove_in): - if version is None: # or version in ('','tbd','TBD'): - raise DeveloperError("@deprecated missing initial version") + # Note that _deprecation_docstring is guaranteed to be called by + # @deprecated in all situations where we would be creating a + # meaningful deprecation message (classes, functions, and methods), + # so this is a convenient place to check that the version is + # specified. + if version is None: + raise DeveloperError("@deprecated(): missing 'version' argument") return ( - '%s %s\n %s\n' - % (_doc_flag, version, _default_msg(obj, msg, None, remove_in)) + f'{_doc_flag} {version}\n' + f' {default_deprecation_msg(obj, msg, None, remove_in)}\n' ) @@ -86,8 +95,8 @@ def _wrap_class(cls, msg, logger, version, remove_in): if _flagIdx >= 0: _doc = _funcDoc[_flagIdx:] break - # Note: test msg is not None to revert back to the user-supplied - # message. Checking the fields is still useful as it lets us know + # Note: test 'msg is not None' to revert back to the user-supplied + # message. Checking the fields above is still useful as it lets us know # if there is already a deprecation message on either new or init. if msg is not None or _doc is None: _doc = _deprecation_docstring(cls, msg, version, remove_in) @@ -100,22 +109,25 @@ def _wrap_class(cls, msg, logger, version, remove_in): # find the "most derived" implementation of either __new__ or # __init__ and wrap that (breaking ties in favor of __init__) field = '__init__' - for c in cls.__mro__: - for f in ('__init__', '__new__'): + for c in reversed(cls.__mro__): + for f in ('__new__', '__init__'): if getattr(c, f, None) is not getattr(cls, f, None): field = f - setattr(cls, field, _wrap_func( - getattr(cls, field), msg, logger, version, remove_in)) + setattr( + cls, field, _wrap_func(getattr(cls, field), msg, logger, version, remove_in) + ) return cls def _wrap_func(func, msg, logger, version, remove_in): - message = _default_msg(func, msg, version, remove_in) + message = default_deprecation_msg(func, msg, version, remove_in) - @functools.wraps(func, assigned=( - '__module__', '__name__', '__qualname__', '__annotations__')) + @functools.wraps( + func, assigned=('__module__', '__name__', '__qualname__', '__annotations__') + ) def wrapper(*args, **kwargs): - deprecation_warning(message, logger) + cf = _find_calling_frame(1) + deprecation_warning(message, logger, version='', calling_frame=cf) return func(*args, **kwargs) wrapper.__doc__ = 'DEPRECATED.\n\n' @@ -147,12 +159,12 @@ def in_testing_environment(): """ - return any(mod in sys.modules for mod in ( - 'nose', 'nose2', 'pytest', 'sphinx')) + return any(mod in sys.modules for mod in ('nose', 'nose2', 'pytest', 'sphinx')) -def deprecation_warning(msg, logger=None, version=None, - remove_in=None, calling_frame=None): +def deprecation_warning( + msg, logger=None, version=None, remove_in=None, calling_frame=None +): """Standardized formatter for deprecation warnings This is a standardized routine for formatting deprecation warnings @@ -166,8 +178,9 @@ def deprecation_warning(msg, logger=None, version=None, version (str): [required] the version in which the decorated object was deprecated. General practice is to set version - to '' or 'TBD' during development and update it to the - actual release as part of the release process. + to the current development version (from `pyomo --version`) + during development and update it to the actual release as + part of the release process. remove_in (str): the version in which the decorated object will be removed from the code. @@ -175,7 +188,16 @@ def deprecation_warning(msg, logger=None, version=None, calling_frame (frame): the original frame context that triggered the deprecation warning. + Example + ------- + >>> from pyomo.common.deprecation import deprecation_warning + >>> deprecation_warning('This functionality is deprecated.', version='1.2.3') + WARNING: DEPRECATED: This functionality is deprecated. (deprecated in 1.2.3) ... + """ + if version is None: + raise DeveloperError("deprecation_warning() missing 'version' argument") + if logger is None: if calling_frame is not None: cf = calling_frame @@ -184,15 +206,18 @@ def deprecation_warning(msg, logger=None, version=None, # function/method that called deprecation_warning cf = _find_calling_frame(1) if cf is not None: - logger = cf.f_globals.get('__package__', None) + logger = cf.f_globals.get('__name__', None) if logger is not None and not logger.startswith('pyomo'): logger = None if logger is None: logger = 'pyomo' + if isinstance(logger, str): + logger = logging.getLogger(logger) msg = textwrap.fill( - 'DEPRECATED: %s' % (_default_msg(None, msg, version, remove_in),), - width=70) + f'DEPRECATED: {default_deprecation_msg(None, msg, version, remove_in)}', + width=70, + ) if calling_frame is None: # The useful thing to let the user know is what called the # function that generated the deprecation warning. The current @@ -210,7 +235,8 @@ def deprecation_warning(msg, logger=None, version=None, return deprecation_warning.emitted_warnings.add(msg) - logging.getLogger(logger).warning(msg) + logger.warning(msg) + if in_testing_environment(): deprecation_warning.emitted_warnings = None @@ -219,7 +245,7 @@ def deprecation_warning(msg, logger=None, version=None, def deprecated(msg=None, logger=None, version=None, remove_in=None): - """Decorator to indicate that a function, method or class is deprecated. + """Decorator to indicate that a function, method, or class is deprecated. This decorator will cause a warning to be logged when the wrapped function or method is called, or when the deprecated class is @@ -236,95 +262,56 @@ def deprecated(msg=None, logger=None, version=None, remove_in=None): version (str): [required] the version in which the decorated object was deprecated. General practice is to set version - to '' or 'TBD' during development and update it to the - actual release as part of the release process. + to the current development version (from `pyomo --version`) + during development and update it to the actual release as + part of the release process. remove_in (str): the version in which the decorated object will be removed from the code. + Example + ------- + >>> from pyomo.common.deprecation import deprecated + >>> @deprecated(version='1.2.3') + ... def sample_function(x): + ... return 2*x + >>> sample_function(5) + WARNING: DEPRECATED: This function (sample_function) has been deprecated and + may be removed in a future release. (deprecated in 1.2.3) ... + 10 + """ + def wrap(obj): if inspect.isclass(obj): return _wrap_class(obj, msg, logger, version, remove_in) else: return _wrap_func(obj, msg, logger, version, remove_in) + return wrap -def _import_object(name, target, version, remove_in): +def _import_object(name, target, version, remove_in, msg): from importlib import import_module - modname, targetname = target.rsplit('.',1) + + modname, targetname = target.rsplit('.', 1) _object = getattr(import_module(modname), targetname) - if inspect.isclass(_object): - _type = 'class' - elif inspect.isfunction(_object): - _type = 'function' - else: - _type = 'attribute' - deprecation_warning( - "the '%s' %s has been moved to '%s'. Please update your import." - % (name, _type, target), version=version, remove_in=remove_in) + if msg is None: + if inspect.isclass(_object): + _type = 'class' + elif inspect.isfunction(_object): + _type = 'function' + else: + _type = 'attribute' + msg = ( + f"the '{name}' {_type} has been moved to '{target}'." + " Please update your import." + ) + deprecation_warning(msg, version=version, remove_in=remove_in) return _object -class _ModuleGetattrBackport_27(object): - """Backport for support of module.__getattr__ - - Beginning in Python 3.7, modules support the declaration of a - module-scoped __getattr__ and __dir__ to allow for the dynamic - resolution of module attributes. This class wraps the module class - and implements `__getattr__`. As it declares no local - attributes, all module attribute accesses incur a slight runtime - penalty (one extra function call). - - """ - def __init__(self, module): - # Wrapped module needs to be a local attribute. Everything else - # is delegated to the inner module type - super(_ModuleGetattrBackport_27, self).__setattr__( - '_wrapped_module', module) - - def __getattr__(self, name): - try: - return getattr(self._wrapped_module, name) - except AttributeError: - info = self._wrapped_module.__relocated_attrs__.get(name, None) - if info is not None: - target_obj = _import_object(name, *info) - setattr(self, name, target_obj) - return target_obj - raise - - def __dir__(self): - return dir(self._wrapped_module) - - def __setattr__(self, name, val): - setattr(self._wrapped_module, name, val) - -class _ModuleGetattrBackport_35(types.ModuleType): - """Backport for support of module.__getattr__ - - Beginning in Python 3.7, modules support the declaration of a - module-scoped __getattr__ and __dir__ to allow for the dynamic - resolution of module attributes. This class derives from - types.ModuleType and implements `__getattr__`. As it is a direct - replacement for types.ModuleType (i.e., we can reassign the already - loaded module to this type, it is more efficient that the - ModuleGetattrBackport_27 class which must wrap the already loaded - module. - - """ - def __getattr__(self, name): - info = self.__relocated_attrs__.get(name, None) - if info is not None: - target_obj = _import_object(name, *info) - setattr(self, name, target_obj) - return target_obj - raise AttributeError("module '%s' has no attribute '%s'" - % (self.__name__, name)) - -def relocated_module(new_name, msg=None, logger=None, - version=None, remove_in=None): +def relocated_module(new_name, msg=None, logger=None, version=None, remove_in=None): """Provide a deprecation path for moved / renamed modules Upon import, the old module (that called `relocated_module()`) will @@ -346,10 +333,10 @@ def relocated_module(new_name, msg=None, logger=None, pyomo package, or "pyomo") version: str [required] - The version in which the module was renamed or moved. - General practice is to set version to '' or 'TBD' during - development and update it to the actual release as part of the - release process. + The version in which the module was renamed or moved. General + practice is to set version to the current development version + (from `pyomo --version`) during development and update it to the + actual release as part of the release process. remove_in: str The version in which the module will be removed from the code. @@ -364,6 +351,7 @@ def relocated_module(new_name, msg=None, logger=None, """ from importlib import import_module + new_module = import_module(new_name) # The relevant module (the one being deprecated) is the one that @@ -376,19 +364,23 @@ def relocated_module(new_name, msg=None, logger=None, cf = cf.f_back if cf is not None: importer = cf.f_back.f_globals['__name__'].split('.')[0] - while cf is not None and \ - cf.f_globals['__name__'].split('.')[0] == importer: + while cf is not None and cf.f_globals['__name__'].split('.')[0] == importer: cf = cf.f_back if cf is None: cf = _find_calling_frame(1) sys.modules[old_name] = new_module if msg is None: - msg = f"The '{old_name}' module has been moved to '{new_name}'. " \ - 'Please update your import.' + msg = ( + f"The '{old_name}' module has been moved to '{new_name}'. " + 'Please update your import.' + ) deprecation_warning(msg, logger, version, remove_in, cf) -def relocated_module_attribute(local, target, version, remove_in=None): + +def relocated_module_attribute( + local, target, version, remove_in=None, msg=None, f_globals=None +): """Provide a deprecation path for moved / renamed module attributes This function declares that a local module attribute has been moved @@ -397,53 +389,59 @@ def relocated_module_attribute(local, target, version, remove_in=None): object from the new location (on request), as well as emitting the deprecation warning. - It contains backports of the __getattr__ functionality for earlier - versions of Python (although the implementation for 3.5+ is more - efficient that the implementation for 2.7+) - Parameters ---------- local: str The original (local) name of the relocated attribute + target: str The new absolute import name of the relocated attribute + version: str The Pyomo version when this move was released (passed to deprecation_warning) + remove_in: str The Pyomo version when this deprecation path will be removed (passed to deprecation_warning) + + msg: str + If not None, then this specifies a custom deprecation message to + be emitted when the attribute is accessed from its original + location. + """ - _module = sys.modules[inspect.currentframe().f_back.f_globals['__name__']] - if not hasattr(_module, '__relocated_attrs__'): - _module.__relocated_attrs__ = {} - if sys.version_info >= (3,7): - _relocated = _module.__relocated_attrs__ - _mod_getattr = getattr(_module, '__getattr__', None) - def __getattr__(name): - info = _relocated.get(name, None) - if info is not None: - target_obj = _import_object(name, *info) - setattr(_module, name, target_obj) - return target_obj - elif _mod_getattr is not None: - return _mod_getattr(name) - raise AttributeError("module '%s' has no attribute '%s'" - % (_module.__name__, name)) - _module.__getattr__ = __getattr__ - elif sys.version_info >= (3,5): - # If you run across a case where this assertion fails - # (because someone else has messed with the module type), we - # could add logic to use the _ModuleGetattrBackport_27 class - # to wrap the module. However, as I believe that this will - # never happen in Pyomo, it is not worth adding unused - # functionality at this point - assert _module.__class__ is types.ModuleType - _module.__class__ = _ModuleGetattrBackport_35 - else: # sys.version_info >= (2,7): - _module = sys.modules[_module.__name__] \ - = _ModuleGetattrBackport_27(_module) - _module.__relocated_attrs__[local] = (target, version, remove_in) + if version is None: + raise DeveloperError("relocated_module_attribute(): missing 'version' argument") + # Historical note: This method only works for Python >= 3.7. There + # were backports to previous Python interpreters, but were removed + # after SHA 4e04819aaeefc2c08b7718460918885e12343451 + if f_globals is None: + f_globals = inspect.currentframe().f_back.f_globals + if f_globals['__name__'].startswith('importlib.'): + raise DeveloperError( + "relocated_module_attribute() called from a cythonized " + "module without passing f_globals" + ) + _relocated = f_globals.get('__relocated_attrs__', None) + if _relocated is None: + f_globals['__relocated_attrs__'] = _relocated = {} + _mod_getattr = f_globals.get('__getattr__', None) + + def __getattr__(name): + info = _relocated.get(name, None) + if info is not None: + target_obj = _import_object(name, *info) + f_globals[name] = target_obj + return target_obj + elif _mod_getattr is not None: + return _mod_getattr(name) + raise AttributeError( + "module '%s' has no attribute '%s'" % (f_globals['__name__'], name) + ) + + f_globals['__getattr__'] = __getattr__ + _relocated[local] = (target, version, remove_in, msg) class RenamedClass(type): @@ -488,43 +486,49 @@ class RenamedClass(type): True """ + def __new__(cls, name, bases, classdict, *args, **kwargs): new_class = classdict.get('__renamed__new_class__', None) if new_class is not None: + def __renamed__new__(cls, *args, **kwargs): - cls.__renamed__warning__( - "Instantiating class '%s'." % (cls.__name__,)) + cls.__renamed__warning__("Instantiating class '%s'." % (cls.__name__,)) return new_class(*args, **kwargs) + classdict['__new__'] = __renamed__new__ def __renamed__warning__(msg): version = classdict.get('__renamed__version__') remove_in = classdict.get('__renamed__remove_in__') deprecation_warning( - "%s The class '%s' has been renamed to '%s'." % ( - msg, name, new_class.__name__), - version=version, remove_in=remove_in, - calling_frame=_find_calling_frame(1)) + "%s The class '%s' has been renamed to '%s'." + % (msg, name, new_class.__name__), + version=version, + remove_in=remove_in, + calling_frame=_find_calling_frame(1), + ) + classdict['__renamed__warning__'] = __renamed__warning__ - if '__renamed__version__' not in classdict: - raise TypeError( + if not classdict.get('__renamed__version__'): + raise DeveloperError( "Declaring class '%s' using the RenamedClass metaclass, " "but without specifying the __renamed__version__ class " - "attribute" % (name,)) + "attribute" % (name,) + ) renamed_bases = [] for base in bases: new_class = getattr(base, '__renamed__new_class__', None) if new_class is not None: base.__renamed__warning__( - "Declaring class '%s' derived from '%s'." % ( - name, base.__name__,)) + "Declaring class '%s' derived from '%s'." % (name, base.__name__) + ) base = new_class # Flag that this class is derived from a renamed class classdict.setdefault('__renamed__new_class__', None) # Avoid duplicates (in case someone does a diamond between - # the renamed class and [a class dervied from] the new + # the renamed class and [a class derived from] the new # class) if base not in renamed_bases: renamed_bases.append(base) @@ -537,26 +541,33 @@ def __renamed__warning__(msg): renamed_bases.append(new_class) if new_class is None and '__renamed__new_class__' not in classdict: - if not any(hasattr(base, '__renamed__new_class__') for mro in - itertools.chain.from_iterable( - base.__mro__ for base in renamed_bases)): + if not any( + hasattr(base, '__renamed__new_class__') + for mro in itertools.chain.from_iterable( + base.__mro__ for base in renamed_bases + ) + ): raise TypeError( "Declaring class '%s' using the RenamedClass metaclass, " "but without specifying the __renamed__new_class__ class " - "attribute" % (name,)) + "attribute" % (name,) + ) return super().__new__( - cls, name, tuple(renamed_bases), classdict, *args, **kwargs) + cls, name, tuple(renamed_bases), classdict, *args, **kwargs + ) def __instancecheck__(cls, instance): # Note: the warning is issued by subclasscheck - return any(cls.__subclasscheck__(c) - for c in {type(instance), instance.__class__}) + return any( + cls.__subclasscheck__(c) for c in {type(instance), instance.__class__} + ) def __subclasscheck__(cls, subclass): if hasattr(cls, '__renamed__warning__'): cls.__renamed__warning__( - "Checking type relative to '%s'." % (cls.__name__,)) + "Checking type relative to '%s'." % (cls.__name__,) + ) if subclass is cls: return True elif getattr(cls, '__renamed__new_class__') is not None: diff --git a/pyomo/common/download.py b/pyomo/common/download.py index e1c9c34e879..79d5302a58e 100644 --- a/pyomo/common/download.py +++ b/pyomo/common/download.py @@ -15,6 +15,7 @@ import os import platform import re +import shutil import sys import subprocess @@ -25,6 +26,7 @@ from pyomo.common.dependencies import attempt_import request = attempt_import('urllib.request')[0] +urllib_error = attempt_import('urllib.error')[0] ssl = attempt_import('ssl')[0] zipfile = attempt_import('zipfile')[0] gzip = attempt_import('gzip')[0] @@ -34,6 +36,7 @@ DownloadFactory = pyomo.common.Factory('library downloaders') + class FileDownloader(object): _os_version = None @@ -45,9 +48,8 @@ def __init__(self, insecure=False, cacert=None): if cacert is not None: if not self.cacert or not os.path.isfile(self.cacert): raise RuntimeError( - "cacert='%s' does not refer to a valid file." - % (self.cacert,)) - + "cacert='%s' does not refer to a valid file." % (self.cacert,) + ) @classmethod def get_sysinfo(cls): @@ -75,7 +77,7 @@ def _get_distver_from_os_release(cls): line = line.strip() if not line: continue - key,val = line.lower().split('=') + key, val = line.lower().split('=') if val[0] == val[-1] and val[0] in '"\'': val = val[1:-1] if key == 'id': @@ -98,10 +100,19 @@ def _get_distver_from_redhat_release(cls): @classmethod def _get_distver_from_lsb_release(cls): - dist = subprocess.run(['lsb_release', '-si'], stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, universal_newlines=True) - ver = subprocess.run(['lsb_release', '-sr'], stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, universal_newlines=True) + lsb_release = shutil.which('lsb_release') + dist = subprocess.run( + [lsb_release, '-si'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + ver = subprocess.run( + [lsb_release, '-sr'], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) return cls._map_linux_dist(dist.stdout), ver.stdout.strip() @classmethod @@ -116,7 +127,7 @@ def _map_linux_dist(cls, dist): _map = [ ('redhat', 'rhel'), 'fedora', - 'ubuntu', # implicitly maps kubuntu / xubuntu + 'ubuntu', # implicitly maps kubuntu / xubuntu 'debian', # Additional RHEL (Fedora) spins 'centos', @@ -143,15 +154,14 @@ def _get_os_version(cls): dist, ver = cls._get_distver_from_distro() elif os.path.exists('/etc/redhat-release'): dist, ver = cls._get_distver_from_redhat_release() - elif subprocess.run(['lsb_release'], stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL).returncode == 0: + elif shutil.which('lsb_release'): dist, ver = cls._get_distver_from_lsb_release() elif os.path.exists('/etc/os-release'): # Note that (at least on centos), os_release is an # imprecise version string dist, ver = cls._get_distver_from_os_release() else: - dist, ver = '','' + dist, ver = '', '' return dist, ver elif _os == 'darwin': return 'macos', platform.mac_ver()[0] @@ -199,17 +209,15 @@ def get_os_version(cls, normalize=True): if _os in _map: _os = _map[_os] - if _os in {'ubuntu','macos','win'}: + if _os in {'ubuntu', 'macos', 'win'}: return _os + ''.join(_ver.split('.')[:2]) else: return _os + _ver.split('.')[0] - @deprecated("get_url() is deprecated. Use get_platform_url()", - version='5.6.9') + @deprecated("get_url() is deprecated. Use get_platform_url()", version='5.6.9') def get_url(self, urlmap): return self.get_platform_url(urlmap) - def get_platform_url(self, urlmap): """Select the url for this platform @@ -228,11 +236,10 @@ def get_platform_url(self, urlmap): url = urlmap.get(system, None) if url is None: raise RuntimeError( - "cannot infer the correct url for platform '%s'" - % (platform,)) + "cannot infer the correct url for platform '%s'" % (platform,) + ) return url - def create_parser(self, parser=None): if parser is None: parser = argparse.ArgumentParser() @@ -252,7 +259,8 @@ def create_parser(self, parser=None): "to verify peers.", ) parser.add_argument( - '-v','--verbose', + '-v', + '--verbose', action='store_true', dest='verbose', default=False, @@ -266,14 +274,14 @@ def parse_args(self, argv): 'target', nargs="?", default=None, - help="Target destination directory or filename" + help="Target destination directory or filename", ) parser.parse_args(argv, self) if self.cacert is not None: if not self.cacert or not os.path.isfile(self.cacert): raise RuntimeError( - "--cacert='%s' does not refer to a valid file." - % (self.cacert,)) + "--cacert='%s' does not refer to a valid file." % (self.cacert,) + ) def set_destination_filename(self, default): if self.target is not None: @@ -293,26 +301,34 @@ def destination(self): def retrieve_url(self, url): """Return the contents of a URL as an io.BytesIO object""" + ctx = ssl.create_default_context() + if self.cacert: + ctx.load_verify_locations(cafile=self.cacert) + if self.insecure: + ctx.check_hostname = False + ctx.verify_mode = ssl.CERT_NONE try: - ctx = ssl.create_default_context() - if self.cacert: - ctx.load_verify_locations(cafile=self.cacert) - if self.insecure: - ctx.check_hostname = False - ctx.verify_mode = ssl.CERT_NONE fetch = request.urlopen(url, context=ctx) - except AttributeError: - # Revert to pre-2.7.9 syntax - fetch = request.urlopen(url) + except urllib_error.HTTPError as e: + if e.code != 403: # Forbidden + raise + fetch = None + if fetch is None: + # This is a fix implemented if we get stuck behind server + # security features (attempting to block "bot" agents). + # We are setting a known user-agent to get around that. + req = request.Request(url=url, headers={'User-Agent': 'Mozilla/5.0'}) + fetch = request.urlopen(req, context=ctx) ans = fetch.read() logger.info(" ...downloaded %s bytes" % (len(ans),)) return ans - def get_file(self, url, binary): if self._fname is None: - raise DeveloperError("target file name has not been initialized " - "with set_destination_filename") + raise DeveloperError( + "target file name has not been initialized " + "with set_destination_filename" + ) with open(self._fname, 'wb' if binary else 'wt') as FILE: raw_file = self.retrieve_url(url) if binary: @@ -321,65 +337,68 @@ def get_file(self, url, binary): FILE.write(raw_file.decode()) logger.info(" ...wrote %s bytes" % (len(raw_file),)) - def get_binary_file(self, url): """Retrieve the specified url and write as a binary file""" return self.get_file(url, binary=True) - def get_text_file(self, url): """Retrieve the specified url and write as a text file""" return self.get_file(url, binary=False) - def get_binary_file_from_zip_archive(self, url, srcname): if self._fname is None: - raise DeveloperError("target file name has not been initialized " - "with set_destination_filename") + raise DeveloperError( + "target file name has not been initialized " + "with set_destination_filename" + ) with open(self._fname, 'wb') as FILE: zipped_file = io.BytesIO(self.retrieve_url(url)) raw_file = zipfile.ZipFile(zipped_file).open(srcname).read() FILE.write(raw_file) logger.info(" ...wrote %s bytes" % (len(raw_file),)) - def get_zip_archive(self, url, dirOffset=0): if self._fname is None: - raise DeveloperError("target file name has not been initialized " - "with set_destination_filename") + raise DeveloperError( + "target file name has not been initialized " + "with set_destination_filename" + ) if os.path.exists(self._fname) and not os.path.isdir(self._fname): raise RuntimeError( - "Target directory (%s) exists, but is not a directory" - % (self._fname,)) + "Target directory (%s) exists, but is not a directory" % (self._fname,) + ) zip_file = zipfile.ZipFile(io.BytesIO(self.retrieve_url(url))) # Simple sanity checks for info in zip_file.infolist(): f = info.filename if f[0] in '\\/' or '..' in f: - logger.error("malformed (potentially insecure) filename (%s) " - "found in zip archive. Skipping file." % (f,)) + logger.error( + "malformed (potentially insecure) filename (%s) " + "found in zip archive. Skipping file." % (f,) + ) continue target = self._splitpath(f) if len(target) <= dirOffset: if f[-1] != '/': - logger.warning("Skipping file (%s) in zip archive due to " - "dirOffset" % (f,)) + logger.warning( + "Skipping file (%s) in zip archive due to dirOffset" % (f,) + ) continue info.filename = target[-1] + '/' if f[-1] == '/' else target[-1] - zip_file.extract( - f, os.path.join(self._fname, *tuple(target[dirOffset:-1]))) + zip_file.extract(f, os.path.join(self._fname, *tuple(target[dirOffset:-1]))) def get_gzipped_binary_file(self, url): if self._fname is None: - raise DeveloperError("target file name has not been initialized " - "with set_destination_filename") + raise DeveloperError( + "target file name has not been initialized " + "with set_destination_filename" + ) with open(self._fname, 'wb') as FILE: gzipped_file = io.BytesIO(self.retrieve_url(url)) raw_file = gzip.GzipFile(fileobj=gzipped_file).read() FILE.write(raw_file) logger.info(" ...wrote %s bytes" % (len(raw_file),)) - def _splitpath(self, path): components = [] head, tail = os.path.split(os.path.normpath(path)) diff --git a/pyomo/common/env.py b/pyomo/common/env.py index 85963057b12..a90efcc2787 100644 --- a/pyomo/common/env.py +++ b/pyomo/common/env.py @@ -10,9 +10,9 @@ # ___________________________________________________________________________ import ctypes -import multiprocessing import os + def _as_bytes(val): """Helper function to coerce a string to a bytes() object""" if isinstance(val, bytes): @@ -64,8 +64,20 @@ def _load_dll(name, timeout=10): """ if not ctypes.util.find_library(name): return False, None + + import multiprocessing + if _load_dll.pool is None: - _load_dll.pool = multiprocessing.Pool(1) + try: + _load_dll.pool = multiprocessing.Pool(1) + except AssertionError: + # multiprocessing will fail with an assertion error if this + # Python process is a daemonic process (e.g., it was + # launched within a dask server). Fall back on a serial + # process (and live with the risk that the import hangs). + import multiprocessing.dummy + + _load_dll.pool = multiprocessing.dummy.Pool(1) job = _load_dll.pool.apply_async(_attempt_ctypes_cdll, (name,)) try: result = job.get(timeout) @@ -81,6 +93,7 @@ def _load_dll(name, timeout=10): else: return result, None + # For efficiency, cache the multiprocessing Pool between calls to _load_dll _load_dll.pool = None @@ -164,7 +177,7 @@ def getenv(self, key): try: return os.environb.get(key, None) except AttributeError: - return _as_bytes(os.environ.get(_as_unicode(key),None)) + return _as_bytes(os.environ.get(_as_unicode(key), None)) def wgetenv(self, key): # PY2 doesn't distinguish, and PY3's environ is nominally @@ -206,7 +219,7 @@ def available(self): if self._loaded is not None: return self._loaded - self._loaded, self.dll = _load_dll(self._libname) + self._loaded, self.dll = _load_dll(self._libname) if not self._loaded: return self._loaded @@ -233,16 +246,14 @@ def get_env_dict(self): return None try: - envp = ctypes.POINTER(ctypes.c_wchar_p).in_dll( - self.dll, '_wenviron') + envp = ctypes.POINTER(ctypes.c_wchar_p).in_dll(self.dll, '_wenviron') if not envp.contents: envp = None except ValueError: envp = None if envp is None: try: - envp = ctypes.POINTER(ctypes.c_char_p).in_dll( - self.dll, '_environ') + envp = ctypes.POINTER(ctypes.c_char_p).in_dll(self.dll, '_environ') if not envp.contents: return None except ValueError: @@ -256,12 +267,13 @@ def get_env_dict(self): size += len(line) if len(line) == 0: raise ValueError( - "Error processing MSVCRT _environ: " - "0-length string encountered") + "Error processing MSVCRT _environ: 0-length string encountered" + ) if size > 32767: raise ValueError( "Error processing MSVCRT _environ: " - "exceeded max environment block size (32767)") + "exceeded max environment block size (32767)" + ) key, val = line.split('=', 1) ans[key] = val return ans @@ -282,7 +294,7 @@ def available(self): if self._loaded is not None: return self._loaded - self._loaded, self.dll = _load_dll(self._libname) + self._loaded, self.dll = _load_dll(self._libname) if not self._loaded: return self._loaded @@ -296,13 +308,15 @@ def available(self): # Note DWORD == c_ulong self._getenv_dll = self.dll.GetEnvironmentVariableA - self._getenv_dll.argtypes = [ - ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong] + self._getenv_dll.argtypes = [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong] self._getenv_dll.restype = ctypes.c_ulong self._wgetenv_dll = self.dll.GetEnvironmentVariableW self._wgetenv_dll.argtypes = [ - ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_ulong] + ctypes.c_wchar_p, + ctypes.c_wchar_p, + ctypes.c_ulong, + ] self._wgetenv_dll.restype = ctypes.c_ulong # We (arbitrarily) choose to return the unicode environ @@ -352,14 +366,16 @@ def get_env_dict(self): if len(_str_buf[i]) == 0: raise ValueError( "Error processing Win32 GetEnvironmentStringsW: " - "0-length character encountered") - if i > 32767: # max var length + "0-length character encountered" + ) + if i > 32767: # max var length raise ValueError( "Error processing Win32 GetEnvironmentStringsW: " - "exceeded max environment block size (32767)") + "exceeded max environment block size (32767)" + ) key, val = _str.split('=', 1) ans[key] = val - i += len(_str_buf[i]) # Skip the NULL + i += len(_str_buf[i]) # Skip the NULL self._free_envstr(_str_buf) return ans @@ -422,7 +438,7 @@ class CtypesEnviron(object): # important to deal with it before the msvcrt libraries. DLLs = [ _Win32DLL('kernel32'), - _MsvcrtDLL(getattr(ctypes.util,'find_msvcrt',lambda: None)()), + _MsvcrtDLL(getattr(ctypes.util, 'find_msvcrt', lambda: None)()), _MsvcrtDLL('api-ms-win-crt-environment-l1-1-0'), _MsvcrtDLL('msvcrt'), _MsvcrtDLL('msvcr120'), @@ -435,17 +451,16 @@ class CtypesEnviron(object): ] def __init__(self, **kwds): - self.interfaces = [ - _RestorableEnvironInterface(_OSEnviron()), - ] - self.interfaces.extend(_RestorableEnvironInterface(dll) - for dll in self.DLLs if dll.available()) + self.interfaces = [_RestorableEnvironInterface(_OSEnviron())] + self.interfaces.extend( + _RestorableEnvironInterface(dll) for dll in self.DLLs if dll.available() + ) # If this is the first time a CtypesEnviron was created, the # calls to dll.activate() may have spawned a multiprocessing # pool, which we should clean up. if _load_dll.pool is not None: - _load_dll.pool.terminate() - _load_dll.pool = None + _load_dll.pool.terminate() + _load_dll.pool = None # Set the incoming env strings on all interfaces... for k, v in kwds.items(): self[k] = v diff --git a/pyomo/common/envvar.py b/pyomo/common/envvar.py index a5ee9928185..d74cb764641 100644 --- a/pyomo/common/envvar.py +++ b/pyomo/common/envvar.py @@ -14,12 +14,14 @@ if 'PYOMO_CONFIG_DIR' in os.environ: PYOMO_CONFIG_DIR = os.path.abspath(os.environ['PYOMO_CONFIG_DIR']) -elif platform.system().lower().startswith(('windows','cygwin')): +elif platform.system().lower().startswith(('windows', 'cygwin')): PYOMO_CONFIG_DIR = os.path.abspath( - os.path.join(os.environ.get('LOCALAPPDATA', ''), 'Pyomo')) + os.path.join(os.environ.get('LOCALAPPDATA', ''), 'Pyomo') + ) else: PYOMO_CONFIG_DIR = os.path.abspath( - os.path.join(os.environ.get('HOME', ''), '.pyomo')) + os.path.join(os.environ.get('HOME', ''), '.pyomo') + ) # Note that alternative platform-independent implementation of the above # could be to use: diff --git a/pyomo/common/errors.py b/pyomo/common/errors.py index cc5a0231876..17013ce4dca 100644 --- a/pyomo/common/errors.py +++ b/pyomo/common/errors.py @@ -9,22 +9,134 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import inspect +import textwrap + + +def format_exception(msg, prolog=None, epilog=None, exception=None, width=76): + """Generate a formatted exception message + + This returns a formatted exception message, line wrapped for display + on the console and with optional prolog and epilog messages. + + Parameters + ---------- + msg: str + The raw exception message + + prolog: str, optional + A message to output before the exception message, ``msg``. If + this message is long enough to line wrap, the ``msg`` will be + indented a level below the ``prolog`` message. + + epilog: str, optional + A message to output after the exception message, ``msg``. If + provided, the ``msg`` will be indented a level below the + ``prolog`` / ``epilog`` messages. + + exception: Exception, optional + The raw exception being raised (used to improve initial line wrapping). + + width: int, optional + The line length to wrap the exception message to. + + Returns + ------- + str + """ + fields = [] + + if epilog: + indent = ' ' * 8 + else: + indent = ' ' * 4 + + if exception is None: + # default to the length of 'NotImplementedError: ', the longest + # built-in name that we commonly raise + initial_indent = ' ' * 21 + else: + if not inspect.isclass(exception): + exception = exception.__class__ + initial_indent = ' ' * (len(exception.__name__) + 2) + if exception.__module__ != 'builtins': + initial_indent += ' ' * (len(exception.__module__) + 1) + + if prolog is not None: + if '\n' not in prolog: + # We want to strip off the leading indent that we added as a + # placeholder for the string representation of the exception + # class name. + prolog = textwrap.fill( + prolog, + width=width, + initial_indent=initial_indent, + subsequent_indent=' ' * 4, + break_long_words=False, + break_on_hyphens=False, + ).lstrip() + # If the prolog line-wrapped, ensure that the message is + # indented an additional level. + if '\n' in prolog: + indent = ' ' * 8 + fields.append(prolog) + initial_indent = indent + + if '\n' not in msg: + msg = textwrap.fill( + msg, + width=width, + initial_indent=initial_indent, + subsequent_indent=indent, + break_long_words=False, + break_on_hyphens=False, + ) + if not fields: + # We want to strip off the leading indent that we just + # added, but only if there is no prolog + msg = msg.lstrip() + fields.append(msg) + + if epilog is not None: + if '\n' not in epilog: + epilog = textwrap.fill( + epilog, + width=width, + initial_indent=' ' * 4, + subsequent_indent=' ' * 4, + break_long_words=False, + break_on_hyphens=False, + ) + fields.append(epilog) + + return '\n'.join(fields) + class ApplicationError(Exception): """ An exception used when an external application generates an error. """ - def __init__(self, *args, **kargs): - Exception.__init__(self, *args, **kargs) #pragma:nocover + pass class PyomoException(Exception): """ - Exception class for other pyomo exceptions to inherit from, - allowing pyomo exceptions to be caught in a general way + Exception class for other Pyomo exceptions to inherit from, + allowing Pyomo exceptions to be caught in a general way (e.g., in other applications that use Pyomo). """ + + pass + + +class DeferredImportError(ImportError): + """This exception is raised when something attempts to access a module + that was imported by :py:func:`.attempt_import`, but the module + import failed. + + """ + pass @@ -35,38 +147,100 @@ class DeveloperError(PyomoException, NotImplementedError): component not declaring a 'ctype'). """ - def __init__(self, val): - self.parameter = val - def __str__(self): - return ( "Internal Pyomo implementation error:\n\t%s\n" - "\tPlease report this to the Pyomo Developers." - % ( repr(self.parameter), ) ) + return format_exception( + repr(super().__str__()), + prolog="Internal Pyomo implementation error:", + epilog="Please report this to the Pyomo Developers.", + exception=self, + ) + + +class InfeasibleConstraintException(PyomoException): + """ + Exception class used by Pyomo transformations to indicate + that an infeasible constraint has been identified (e.g. in + the course of range reduction). + """ + + pass + + +class IterationLimitError(PyomoException, RuntimeError): + """A subclass of :py:class:`RuntimeError`, raised by an iterative method + when the iteration limit is reached. + + TODO: solvers currently do not raise this exception, but probably + should (at least when non-normal termination conditions are mapped + to exceptions) + + """ class IntervalException(PyomoException, ValueError): """ Exception class used for errors in interval arithmetic. """ + pass -class InfeasibleConstraintException(PyomoException): +class InvalidValueError(PyomoException, ValueError): """ - Exception class used by Pyomo transformations to indicate - that an infeasible constraint has been identified (e.g. in - the course of range reduction). + Exception class used for value errors in compiled model representations """ + pass +class MouseTrap(PyomoException, NotImplementedError): + """ + Exception class used to throw errors for not-implemented functionality + that might be rational to support (i.e., we already gave you a cookie) + but risks taking Pyomo's flexibility a step beyond what is sane, + or solvable, or communicable to a solver, etc. (i.e., Really? Now you + want a glass of milk too?) + """ + + def __str__(self): + return format_exception( + repr(super().__str__()), + prolog="Sorry, mouse, no cookies here!", + epilog="This is functionality we think may be rational to " + "support, but is not yet implemented (possibly due to developer " + "availability, complexity of edge cases, or general practicality " + "or tractability). However, please feed the mice: " + "pull requests are always welcome!", + exception=self, + ) + + class NondifferentiableError(PyomoException, ValueError): """A Pyomo-specific ValueError raised for non-differentiable expressions""" + pass + class TempfileContextError(PyomoException, IndexError): """A Pyomo-specific IndexError raised when attempting to use the TempfileManager when it does not have a currently active context. """ + pass + + +class TemplateExpressionError(ValueError): + """Special ValueError raised by getitem for template arguments + + This exception is triggered by the Pyomo expression system when + attempting to get a member of an IndexedComponent using either a + TemplateIndex, or an expression containing a TemplateIndex. + + Users should never see this exception. + + """ + + def __init__(self, template, *args, **kwds): + self.template = template + super(TemplateExpressionError, self).__init__(*args, **kwds) diff --git a/pyomo/common/factory.py b/pyomo/common/factory.py index 6eb024c7252..6a97759c714 100644 --- a/pyomo/common/factory.py +++ b/pyomo/common/factory.py @@ -58,10 +58,11 @@ def unregister(self, name): if name in self._cls: del self._cls[name] del self._doc[name] - + def register(self, name, doc=None): def fn(cls): self._cls[name] = cls self._doc[name] = doc return cls + return fn diff --git a/pyomo/common/fileutils.py b/pyomo/common/fileutils.py index af16070b724..16933df64af 100644 --- a/pyomo/common/fileutils.py +++ b/pyomo/common/fileutils.py @@ -44,8 +44,8 @@ from . import envvar from .deprecation import deprecated, relocated_module_attribute -relocated_module_attribute( - 'StreamIndenter', 'pyomo.common.formatting', version='6.2') +relocated_module_attribute('StreamIndenter', 'pyomo.common.formatting', version='6.2') + def this_file(stack_offset=1): """Returns the file name for the module that calls this function. @@ -69,16 +69,22 @@ def this_file(stack_offset=1): def this_file_dir(stack_offset=1): - """Returns the directory containing the module that calls this function. - """ + """Returns the directory containing the module that calls this function.""" return os.path.dirname(this_file(stack_offset=1 + stack_offset)) PYOMO_ROOT_DIR = os.path.dirname(os.path.dirname(this_file_dir())) -def find_path(name, validate, cwd=True, mode=os.R_OK, ext=None, - pathlist=[], allow_pathlist_deep_references=True): +def find_path( + name, + validate, + cwd=True, + mode=os.R_OK, + ext=None, + pathlist=[], + allow_pathlist_deep_references=True, +): """Locate a path, given a set of search parameters Parameters @@ -137,7 +143,7 @@ def find_path(name, validate, cwd=True, mode=os.R_OK, ext=None, if allow_pathlist_deep_references or os.path.basename(name) == name: if isinstance(pathlist, str): - locations.extend( pathlist.split(os.pathsep) ) + locations.extend(pathlist.split(os.pathsep)) else: locations.extend(pathlist) @@ -152,7 +158,7 @@ def find_path(name, validate, cwd=True, mode=os.R_OK, ext=None, if not path: continue for _ext in extlist: - for test in glob.glob(os.path.join(path, name+_ext)): + for test in glob.glob(os.path.join(path, name + _ext)): if not validate(test): continue if mode is not None and not os.access(test, mode): @@ -161,14 +167,20 @@ def find_path(name, validate, cwd=True, mode=os.R_OK, ext=None, return None -def find_file(filename, cwd=True, mode=os.R_OK, ext=None, pathlist=[], - allow_pathlist_deep_references=True): +def find_file( + filename, + cwd=True, + mode=os.R_OK, + ext=None, + pathlist=[], + allow_pathlist_deep_references=True, +): """Locate a file, given a set of search parameters Parameters ---------- filename : str - + The file name to locate. The file name may contain references to a user's home directory (``~user``), environment variables (``${HOME}/bin``), and shell wildcards (``?`` and ``*``); all of @@ -209,15 +221,19 @@ def find_file(filename, cwd=True, mode=os.R_OK, ext=None, pathlist=[], """ return find_path( - filename, os.path.isfile, cwd=cwd, mode=mode, ext=ext, + filename, + os.path.isfile, + cwd=cwd, + mode=mode, + ext=ext, pathlist=pathlist, - allow_pathlist_deep_references=allow_pathlist_deep_references + allow_pathlist_deep_references=allow_pathlist_deep_references, ) - -def find_dir(dirname, cwd=True, mode=os.R_OK, pathlist=[], - allow_pathlist_deep_references=True): +def find_dir( + dirname, cwd=True, mode=os.R_OK, pathlist=[], allow_pathlist_deep_references=True +): """Locate a directory, given a set of search parameters Parameters @@ -260,25 +276,25 @@ def find_dir(dirname, cwd=True, mode=os.R_OK, pathlist=[], """ return find_path( - dirname, os.path.isdir, cwd=cwd, mode=mode, pathlist=pathlist, - allow_pathlist_deep_references=allow_pathlist_deep_references + dirname, + os.path.isdir, + cwd=cwd, + mode=mode, + pathlist=pathlist, + allow_pathlist_deep_references=allow_pathlist_deep_references, ) -_exeExt = { - 'linux': None, - 'windows': '.exe', - 'cygwin': '.exe', - 'darwin': None, -} +_exeExt = {'linux': None, 'windows': '.exe', 'cygwin': '.exe', 'darwin': None} _libExt = { - 'linux': ('.so', '.so.*'), + 'linux': ('.so', '.so.*'), 'windows': ('.dll', '.pyd'), - 'cygwin': ('.dll', '.so', '.so.*'), - 'darwin': ('.dylib', '.so', '.so.*'), + 'cygwin': ('.dll', '.so', '.so.*'), + 'darwin': ('.dylib', '.so', '.so.*'), } + def _system(): system = platform.system().lower() for c in '.-_': @@ -287,7 +303,7 @@ def _system(): def _path(): - return (os.environ.get('PATH','') or os.defpath).split(os.pathsep) + return (os.environ.get('PATH', '') or os.defpath).split(os.pathsep) def find_library(libname, cwd=True, include_PATH=True, pathlist=None): @@ -335,10 +351,10 @@ def find_library(libname, cwd=True, include_PATH=True, pathlist=None): if pathlist is None: # Note: PYOMO_CONFIG_DIR/lib comes before LD_LIBRARY_PATH, and # PYOMO_CONFIG_DIR/bin comes immediately before PATH - pathlist = [ os.path.join(envvar.PYOMO_CONFIG_DIR, 'lib') ] - pathlist.extend(os.environ.get('LD_LIBRARY_PATH','').split(os.pathsep)) + pathlist = [os.path.join(envvar.PYOMO_CONFIG_DIR, 'lib')] + pathlist.extend(os.environ.get('LD_LIBRARY_PATH', '').split(os.pathsep)) if include_PATH: - pathlist.append( os.path.join(envvar.PYOMO_CONFIG_DIR, 'bin') ) + pathlist.append(os.path.join(envvar.PYOMO_CONFIG_DIR, 'bin')) elif isinstance(pathlist, str): pathlist = pathlist.split(os.pathsep) else: @@ -350,7 +366,7 @@ def find_library(libname, cwd=True, include_PATH=True, pathlist=None): lib = find_file(libname, cwd=cwd, ext=ext, pathlist=pathlist) if lib is None and not libname.startswith('lib'): # Search 2: prepend 'lib' (with extensions) in our paths - lib = find_file('lib'+libname, cwd=cwd, ext=ext, pathlist=pathlist) + lib = find_file('lib' + libname, cwd=cwd, ext=ext, pathlist=pathlist) if lib is not None: return lib # Search 3: use ctypes.util.find_library (which expects 'lib' and @@ -358,7 +374,7 @@ def find_library(libname, cwd=True, include_PATH=True, pathlist=None): libname_base, ext = os.path.splitext(os.path.basename(libname)) if libname_base.startswith('lib') and _system() != 'windows': libname_base = libname_base[3:] - if ext.lower().startswith(('.so','.dll','.dylib')): + if ext.lower().startswith(('.so', '.dll', '.dylib')): return ctypes.util.find_library(libname_base) else: return ctypes.util.find_library(libname) @@ -405,7 +421,7 @@ def find_executable(exename, cwd=True, include_PATH=True, pathlist=None): """ if pathlist is None: - pathlist = [ os.path.join(envvar.PYOMO_CONFIG_DIR, 'bin') ] + pathlist = [os.path.join(envvar.PYOMO_CONFIG_DIR, 'bin')] elif isinstance(pathlist, str): pathlist = pathlist.split(os.pathsep) else: @@ -413,15 +429,21 @@ def find_executable(exename, cwd=True, include_PATH=True, pathlist=None): if include_PATH: pathlist.extend(_path()) ext = _exeExt.get(_system(), None) - return find_file(exename, cwd=cwd, ext=ext, mode=os.R_OK|os.X_OK, - pathlist=pathlist, allow_pathlist_deep_references=False) + return find_file( + exename, + cwd=cwd, + ext=ext, + mode=os.R_OK | os.X_OK, + pathlist=pathlist, + allow_pathlist_deep_references=False, + ) def import_file(path, clear_cache=False, infer_package=True, module_name=None): """ Import a module given the full path/filename of the file. Replaces import_file from pyutilib (Pyomo 6.0.0). - + This function returns the module object that is created. Parameters @@ -431,16 +453,16 @@ def import_file(path, clear_cache=False, infer_package=True, module_name=None): clear_cache: bool Remove module if already loaded. The default is False. """ - path = os.path.normpath(os.path.abspath(os.path.expanduser( - os.path.expandvars(path)))) + path = os.path.normpath( + os.path.abspath(os.path.expanduser(os.path.expandvars(path))) + ) if not os.path.exists(path): raise FileNotFoundError('File does not exist. Check path.') module_dir, module_file = os.path.split(path) if module_name is None: module_name, module_ext = os.path.splitext(module_file) if infer_package: - while module_dir and os.path.exists( - os.path.join(module_dir, '__init__.py')): + while module_dir and os.path.exists(os.path.join(module_dir, '__init__.py')): module_dir, mod = os.path.split(module_dir) module_name = mod + '.' + module_name if clear_cache and module_name in sys.modules: @@ -448,16 +470,18 @@ def import_file(path, clear_cache=False, infer_package=True, module_name=None): sys.path.insert(0, module_dir) try: spec = importlib.util.spec_from_file_location(module_name, path) - module = spec.loader.load_module() + module = importlib.util.module_from_spec(spec) + if module_name not in sys.modules: + sys.modules[module_name] = module + spec.loader.exec_module(module) finally: sys.path.pop(0) return module class PathData(object): - """An object for storing and managing a :py:class:`PathManager` path + """An object for storing and managing a :py:class:`PathManager` path""" - """ def __init__(self, manager, name): self._mngr = manager self._registered_name = name @@ -489,11 +513,13 @@ def set_path(self, value): logging.getLogger('pyomo.common').warning( "explicitly setting the path for '%s' to an " "invalid object or nonexistent location ('%s')" - % (self._registered_name, value)) + % (self._registered_name, value) + ) - @deprecated("get_path() is deprecated; use " - "pyomo.common.Executable(name).path()", - version='5.6.2') + @deprecated( + "get_path() is deprecated; use pyomo.common.Executable(name).path()", + version='5.6.2', + ) def get_path(self): return self.path() @@ -546,9 +572,8 @@ def __str__(self): class ExecutableData(PathData): - """A :py:class:`PathData` class specifically for executables. + """A :py:class:`PathData` class specifically for executables.""" - """ @property def executable(self): """Get (or set) the path to the executable""" @@ -681,6 +706,7 @@ class PathManager(object): ... os.remove(_testfile) """ + def __init__(self, finder, dataClass): self._pathTo = {} self._find = finder @@ -703,6 +729,7 @@ def rehash(self): for _path in self._pathTo.values(): _path.rehash() + # # Define singleton objects for Pyomo / Users to interact with # @@ -710,20 +737,24 @@ def rehash(self): Library = PathManager(find_library, PathData) -@deprecated("pyomo.common.register_executable(name) has been deprecated; " - "explicit registration is no longer necessary", - version='5.6.2') +@deprecated( + "pyomo.common.register_executable(name) has been deprecated; " + "explicit registration is no longer necessary", + version='5.6.2', +) def register_executable(name, validate=None): # Setting to None will cause Executable to re-search the pathlist return Executable(name).rehash() + @deprecated( """pyomo.common.registered_executable(name) has been deprecated; use pyomo.common.Executable(name).path() to get the path or pyomo.common.Executable(name).available() to get a bool indicating file availability. Equivalent results can be obtained by casting Executable(name) to string or bool.""", - version='5.6.2') + version='5.6.2', +) def registered_executable(name): ans = Executable(name) if ans.path() is None: @@ -731,8 +762,11 @@ def registered_executable(name): else: return ans -@deprecated("pyomo.common.unregister_executable(name) has been deprecated; " - "use Executable(name).disable()", - version='5.6.2') + +@deprecated( + "pyomo.common.unregister_executable(name) has been deprecated; " + "use Executable(name).disable()", + version='5.6.2', +) def unregister_executable(name): Executable(name).disable() diff --git a/pyomo/common/formatting.py b/pyomo/common/formatting.py index af9fca24901..f76d16880df 100644 --- a/pyomo/common/formatting.py +++ b/pyomo/common/formatting.py @@ -14,12 +14,15 @@ tostr tabular_writer + wrap_reStructuredText StreamIndenter """ +import re import types from pyomo.common.sorting import sorted_robust + def tostr(value, quote_str=False): """Convert a value to a string @@ -53,7 +56,7 @@ def tostr(value, quote_str=False): """ # Override the generation of str(list), but only if the object is # using the default implementation of list.__str__. Note that the - # default implemention of __str__ (in CPython) is to call __repr__, + # default implementation of __str__ (in CPython) is to call __repr__, # so we will test both. This is particularly important for # collections.namedtuple, which reimplements __repr__ but not # __str__. @@ -63,37 +66,38 @@ def tostr(value, quote_str=False): # in particular instances: tostr.handlers[_type] = tostr.handlers[None] if isinstance(value, list): - if ( _type.__str__ is list.__str__ and - _type.__repr__ is list.__repr__ ): + if _type.__str__ is list.__str__ and _type.__repr__ is list.__repr__: tostr.handlers[_type] = tostr.handlers[list] elif isinstance(value, tuple): - if ( _type.__str__ is tuple.__str__ and - _type.__repr__ is tuple.__repr__ ): + if _type.__str__ is tuple.__str__ and _type.__repr__ is tuple.__repr__: tostr.handlers[_type] = tostr.handlers[tuple] elif isinstance(value, dict): - if ( _type.__str__ is dict.__str__ and - _type.__repr__ is dict.__repr__ ): + if _type.__str__ is dict.__str__ and _type.__repr__ is dict.__repr__: tostr.handlers[_type] = tostr.handlers[dict] elif isinstance(value, str): tostr.handlers[_type] = tostr.handlers[str] return tostr.handlers[_type](value, quote_str) + tostr.handlers = { list: lambda value, quote_str: ( "[%s]" % (', '.join(tostr(v, True) for v in value)) ), dict: lambda value, quote_str: ( - "{%s}" % (', '.join('%s: %s' % (tostr(k, True), tostr(v, True)) - for k, v in value.items())) + "{%s}" + % ( + ', '.join( + '%s: %s' % (tostr(k, True), tostr(v, True)) for k, v in value.items() + ) + ) ), tuple: lambda value, quote_str: ( - "(%s,)" % (tostr(value[0], True),) if len(value) == 1 + "(%s,)" % (tostr(value[0], True),) + if len(value) == 1 else "(%s)" % (', '.join(tostr(v, True) for v in value)) ), - str: lambda value, quote_str: ( - repr(value) if quote_str else value - ), + str: lambda value, quote_str: (repr(value) if quote_str else value), None: lambda value, quote_str: str(value), } @@ -142,19 +146,20 @@ def tabular_writer(ostream, prefix, data, header, row_generator): # A ValueError can be raised when row_generator is called # (if it is a function), or when it is exhausted generating # the list (if it is a generator) - _minWidth = 4 # Ensure columns are wide enough to output "None" + _minWidth = 4 # Ensure columns are wide enough to output "None" _rows[_key] = None continue _rows[_key] = [ ((tostr("" if i else _key),) if header else ()) + tuple(tostr(x) for x in _r) - for i, _r in enumerate(_rowSet) ] + for i, _r in enumerate(_rowSet) + ] if not _rows[_key]: _minWidth = 4 elif not _width: - _width = [0]*len(_rows[_key][0]) + _width = [0] * len(_rows[_key][0]) for _row in _rows[_key]: for col, x in enumerate(_row): _width[col] = max(_width[col], len(x), col and _minWidth) @@ -164,10 +169,11 @@ def tabular_writer(ostream, prefix, data, header, row_generator): # Note: do not right-pad the last header with unnecessary spaces tmp = _width[-1] _width[-1] = 0 - ostream.write(prefix - + " : ".join( "%%-%ds" % _width[i] % x - for i,x in enumerate(header) ) - + "\n") + ostream.write( + prefix + + " : ".join("%%-%ds" % _width[i] % x for i, x in enumerate(header)) + + "\n" + ) _width[-1] = tmp # If there is no data, we are done... @@ -176,21 +182,18 @@ def tabular_writer(ostream, prefix, data, header, row_generator): # right-justify data, except for the last column if there are spaces # in the data (probably an expression or vector) - _width = ["%"+str(i)+"s" for i in _width] + _width = ["%" + str(i) + "s" for i in _width] - if any( ' ' in r[-1] - for x in _rows.values() if x is not None - for r in x ): + if any(' ' in r[-1] for x in _rows.values() if x is not None for r in x): _width[-1] = '%s' for _key in sorted_robust(_rows): _rowSet = _rows[_key] if not _rowSet: - _rowSet = [ [_key] + [None]*(len(_width)-1) ] + _rowSet = [[_key] + [None] * (len(_width) - 1)] for _data in _rowSet: ostream.write( - prefix - + " : ".join( _width[i] % x for i,x in enumerate(_data) ) - + "\n") + prefix + " : ".join(_width[i] % x for i, x in enumerate(_data)) + "\n" + ) class StreamIndenter(object): @@ -201,7 +204,7 @@ class StreamIndenter(object): StreamIndenter objects may be arbitrarily nested. """ - def __init__(self, ostream, indent=' '*4): + def __init__(self, ostream, indent=' ' * 4): self.os = ostream self.indent = indent self.stripped_indent = indent.rstrip() @@ -216,7 +219,7 @@ def write(self, data): lines = data.split('\n') if self.newline: if lines[0]: - self.os.write(self.indent+lines[0]) + self.os.write(self.indent + lines[0]) else: self.os.write(self.stripped_indent) else: @@ -226,11 +229,11 @@ def write(self, data): return for line in lines[1:-1]: if line: - self.os.write("\n"+self.indent+line) + self.os.write("\n" + self.indent + line) else: - self.os.write("\n"+self.stripped_indent) + self.os.write("\n" + self.stripped_indent) if lines[-1]: - self.os.write("\n"+self.indent+lines[-1]) + self.os.write("\n" + self.indent + lines[-1]) self.newline = False else: self.os.write("\n") @@ -239,3 +242,139 @@ def write(self, data): def writelines(self, sequence): for x in sequence: self.write(x) + + +_indentation_re = re.compile(r'\s*') +_bullet_re = re.compile( + r'([-+*] +)' # bulleted lists + r'|(\(?[0-9]+[\)\.] +)' # enumerated lists (arabic numerals) + r'|(\(?[ivxlcdm]+[\)\.] +)' # enumerated lists (roman numerals) + r'|(\(?[IVXLCDM]+[\)\.] +)' # enumerated lists (roman numerals) + r'|(\(?[a-zA-Z][\)\.] +)' # enumerated lists (letters) + r'|(\(?\#[\)\.] +)' # auto enumerated lists + r'|([a-zA-Z0-9_ ]+ +: +)' # definitions + r'|(:[a-zA-Z0-9_ ]+: +)' # field name + r'|(?:\[\s*[A-Za-z0-9\.]+\s*\] +)' # [PASS]|[FAIL]|[ OK ] +) +_verbatim_line_start = re.compile( + r'(\| )' r'|(\+((-{3,})|(={3,}))\+)' # line blocks # grid table +) +_verbatim_line = re.compile( + r'(={3,}[ =]+)' # simple tables, ======== sections + # sections + + ''.join(r'|(\%s{3,})' % c for c in r'!"#$%&\'()*+,-./:;<>?@[\\]^_`{|}~') +) + + +def wrap_reStructuredText(docstr, wrapper): + """A text wrapper that honors paragraphs and basic reStructuredText markup + + This wraps `textwrap.fill()` to first separate the incoming text by + paragraphs before using ``wrapper`` to wrap each one. It includes a + basic (partial) parser for reStructuredText format to attempt to + avoid wrapping structural elements like section headings, bullet / + enumerated lists, and tables. + + Parameters + ---------- + docstr : str + The incoming string to parse and wrap + + wrapper : `textwrap.TextWrap` + The configured `TextWrap` object to use for wrapping paragraphs. + While the object will be reconfigured within this function, it + will be restored to its original state upon exit. + + """ + # As textwrap only works on single paragraphs, we need to break + # up the incoming message into paragraphs before we pass it to + # textwrap. + paragraphs = [(None, None, None)] + literal_block = False + verbatim = False + for line in docstr.rstrip().splitlines(): + leading = _indentation_re.match(line).group() + content = line.strip() + if not content: + if literal_block: + if literal_block[0] == 2: + literal_block = False + elif paragraphs[-1][2] and ''.join(paragraphs[-1][2]).endswith('::'): + literal_block = (0, paragraphs[-1][1]) + paragraphs.append((None, None, None)) + continue + if literal_block: + if literal_block[0] == 0: + if len(literal_block[1]) < len(leading): + # indented literal block + literal_block = 1, leading + paragraphs.append((None, None, line)) + continue + elif ( + len(literal_block[1]) == len(leading) + and content[0] in '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' + ): + # quoted literal block + literal_block = 2, leading + paragraphs.append((None, None, line)) + continue + else: + # invalid literal block + literal_block = False + elif leading.startswith(literal_block[1]): + paragraphs.append((None, None, line)) + continue + else: + # fall back on normal line processing + literal_block = False + if content == '```': + # Not part of ReST, but we have supported this in Pyomo for a long time + verbatim ^= True + elif verbatim: + paragraphs.append((None, None, line)) + elif _verbatim_line_start.match(content): + # This catches lines that start with patterns that indicate + # that the line should not be wrapped (line blocks, grid + # tables) + paragraphs.append((None, None, line)) + elif _verbatim_line.match(content): + # This catches whole line patterns that should not be + # wrapped with previous/subsequent lines (e.g., simple table + # headers, section headers) + paragraphs.append((None, None, line)) + else: + matchBullet = _bullet_re.match(content) + if matchBullet: + # Handle things that look like bullet lists specially + hang = matchBullet.group() + paragraphs.append((leading, leading + ' ' * len(hang), [content])) + elif paragraphs[-1][1] == leading: + # Continuing a text block + paragraphs[-1][2].append(content) + else: + # Beginning a new text block + paragraphs.append((leading, leading, [content])) + + while paragraphs and paragraphs[0][2] is None: + paragraphs.pop(0) + + wrapper_init = wrapper.initial_indent, wrapper.subsequent_indent + try: + for i, (indent, subseq, par) in enumerate(paragraphs): + base_indent = wrapper_init[1] if i else wrapper_init[0] + + if indent is None: + if par is None: + paragraphs[i] = '' + else: + paragraphs[i] = base_indent + par + continue + + wrapper.initial_indent = base_indent + indent + wrapper.subsequent_indent = base_indent + subseq + paragraphs[i] = wrapper.fill(' '.join(par)) + finally: + # Avoid side-effects and restore the initial wrapper state + wrapper.initial_indent, wrapper.subsequent_indent = wrapper_init + + return '\n'.join(paragraphs) diff --git a/pyomo/common/gc_manager.py b/pyomo/common/gc_manager.py index 98fd92d0459..54fbca32736 100644 --- a/pyomo/common/gc_manager.py +++ b/pyomo/common/gc_manager.py @@ -19,12 +19,15 @@ import gc from pyomo.common.multithread import MultiThreadWrapper + class __PauseGCCompanion(object): def __init__(self): self._stack_depth = 0 + PauseGCCompanion: __PauseGCCompanion = MultiThreadWrapper(__PauseGCCompanion) + # PauseGC is a class for clean, scoped management of the Python # garbage collector. To disable the GC for the duration of a # scoped block use PauseGC in combination with the Python 'with' @@ -40,6 +43,7 @@ def __init__(self): # if an outer function/method has its own instance of PauseGC. class PauseGC(object): __slots__ = ("reenable_gc", "stack_pointer") + def __init__(self): self.stack_pointer = None self.reenable_gc = None @@ -47,7 +51,8 @@ def __init__(self): def __enter__(self): if self.stack_pointer: raise RuntimeError( - "Entering PauseGC context manager that was already entered.") + "Entering PauseGC context manager that was already entered." + ) PauseGCCompanion._stack_depth += 1 self.stack_pointer = PauseGCCompanion._stack_depth self.reenable_gc = gc.isenabled() @@ -67,7 +72,8 @@ def close(self): "Exiting PauseGC context manager out of order: there " "are other active PauseGC context managers that were " "entered after this context manager and have not yet " - "been exited.") + "been exited." + ) PauseGCCompanion._stack_depth -= 1 self.stack_pointer = None if self.reenable_gc: diff --git a/pyomo/common/getGSL.py b/pyomo/common/getGSL.py index 8411f6eb35e..e8b2507ab81 100644 --- a/pyomo/common/getGSL.py +++ b/pyomo/common/getGSL.py @@ -9,51 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -import logging -import os -import platform -import sys -from pyomo.common import Library -from pyomo.common.download import FileDownloader +from pyomo.common.deprecation import relocated_module -logger = logging.getLogger('pyomo.common') - -# These URLs were retrieved from -# https://ampl.com/resources/extended-function-library/ -urlmap = { - 'linux': 'https://ampl.com/NEW/amplgsl/amplgsl.linux-intel%s.zip', - 'windows': 'https://ampl.com/NEW/amplgsl/amplgsl.mswin%s.zip', - 'cygwin': 'https://ampl.com/NEW/amplgsl/amplgsl.mswin%s.zip', - 'darwin': 'https://ampl.com/NEW/amplgsl/amplgsl.macosx%s.zip' -} - -def find_GSL(): - # FIXME: the GSL interface is currently broken in PyPy: - if platform.python_implementation().lower().startswith('pypy'): - return None - return Library('amplgsl.dll').path() - -def get_gsl(downloader): - system, bits = downloader.get_sysinfo() - url = downloader.get_platform_url(urlmap) % (bits,) - - downloader.set_destination_filename(os.path.join('lib', 'amplgsl.dll')) - - logger.info("Fetching GSL from %s and installing it to %s" - % (url, downloader.destination())) - - downloader.get_binary_file_from_zip_archive(url, 'amplgsl.dll') - -def main(argv): - downloader = FileDownloader() - downloader.parse_args(argv) - get_gsl(downloader) - -if __name__ == '__main__': - logger.setLevel(logging.INFO) - try: - main(sys.argv[1:]) - except Exception as e: - print(e.message or str(e)) - print("Usage: %s [--insecure] [target]" % os.path.basename(sys.argv[0])) - sys.exit(1) +relocated_module('pyomo.common.gsl', version='6.5.0') diff --git a/pyomo/common/gsl.py b/pyomo/common/gsl.py new file mode 100644 index 00000000000..5243758a0de --- /dev/null +++ b/pyomo/common/gsl.py @@ -0,0 +1,36 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import logging +import platform +from pyomo.common import Library +from pyomo.common.deprecation import deprecated + +logger = logging.getLogger('pyomo.common') + + +@deprecated( + "Use of get_gsl is deprecated and NO LONGER FUNCTIONS as of February 9, 2023. ", + version='6.5.0', +) +def get_gsl(downloader): + logger.info( + "As of February 9, 2023, AMPL GSL can no longer be downloaded\ + through download-extensions. Visit https://portal.ampl.com/\ + to download the AMPL GSL binaries." + ) + + +def find_GSL(): + # FIXME: the GSL interface is currently broken in PyPy: + if platform.python_implementation().lower().startswith('pypy'): + return None + return Library('amplgsl.dll').path() diff --git a/pyomo/common/log.py b/pyomo/common/log.py index bef60b4ed3a..3c4b01caf5a 100644 --- a/pyomo/common/log.py +++ b/pyomo/common/log.py @@ -18,6 +18,7 @@ # # Utility classes for working with the logger # +import inspect import io import logging import re @@ -27,28 +28,30 @@ from pyomo.version.info import releaselevel from pyomo.common.deprecation import deprecated from pyomo.common.fileutils import PYOMO_ROOT_DIR +from pyomo.common.formatting import wrap_reStructuredText _indentation_re = re.compile(r'\s*') -_bullet_re = re.compile(r'(?:[-*] +)|(\[\s*[A-Za-z0-9\.]+\s*\] +)') -_bullet_char = '-*[' _RTD_URL = "https://pyomo.readthedocs.io/en/%s/errors.html" % ( 'stable' - if (releaselevel == 'final' - or 'sphinx' in sys.modules - or 'Sphinx' in sys.modules) - else 'latest') + if (releaselevel == 'final' or 'sphinx' in sys.modules or 'Sphinx' in sys.modules) + else 'latest' +) + def RTD(_id): _id = str(_id).lower() assert _id[0] in 'wex' return f"{_RTD_URL}#{_id}" + _DEBUG = logging.DEBUG _NOTSET = logging.NOTSET if not __debug__: + def is_debug_set(logger): return False + elif hasattr(getattr(logging.getLogger(), 'manager', None), 'disable'): # This works for CPython and PyPy, but relies on a manager attribute # to get the current value of the logging.disabled() flag @@ -72,6 +75,7 @@ def is_debug_set(logger): _level = logger.getEffectiveLevel() # Filter out NOTSET and higher levels return _NOTSET < _level <= _DEBUG + else: # This is inefficient (it indirectly checks effective level twice), # but is included for [as yet unknown] platforms that ONLY implement @@ -81,6 +85,7 @@ def is_debug_set(logger): return False return logger.getEffectiveLevel() > _NOTSET + class WrappingFormatter(logging.Formatter): _flag = "<>" @@ -93,16 +98,18 @@ def __init__(self, **kwds): elif kwds['style'] == '$': kwds['fmt'] = '$levelname: $message' else: - raise ValueError('unrecognized style flag "%s"' - % (kwds['style'],)) + raise ValueError('unrecognized style flag "%s"' % (kwds['style'],)) self._wrapper = textwrap.TextWrapper(width=kwds.pop('wrap', 78)) - self.hang = kwds.pop('hang', ' '*4) + self._wrapper.subsequent_indent = kwds.pop('hang', ' ' * 4) + if not self._wrapper.subsequent_indent: + self._wrapper.subsequent_indent = '' self.basepath = kwds.pop('base', None) super(WrappingFormatter, self).__init__(**kwds) def format(self, record): - _orig = {k: getattr(record, k) - for k in ('msg', 'args', 'pathname', 'levelname')} + _orig = { + k: getattr(record, k) for k in ('msg', 'args', 'pathname', 'levelname') + } _id = getattr(record, 'id', None) msg = record.getMessage() record.msg = self._flag @@ -110,116 +117,71 @@ def format(self, record): if _id: record.levelname += f" ({_id.upper()})" if self.basepath and record.pathname.startswith(self.basepath): - record.pathname = '[base]' + record.pathname[len(self.basepath):] + record.pathname = '[base]' + record.pathname[len(self.basepath) :] try: raw_msg = super(WrappingFormatter, self).format(record) finally: - for k,v in _orig.items(): + for k, v in _orig.items(): setattr(record, k, v) + # We want to normalize the incoming message *before* we start + # formatting (wrapping) paragraphs. + # # Most of the messages are either unformatted long lines or # triple-quote blocks of text. In the latter case, if the text # starts on the same line as the triple-quote, then it is almost # certainly NOT indented with the bulk of the text, which will # cause dedent to get confused and not strip any leading - # whitespace. This attempts to work around that case: + # whitespace. # - #if not (_msg.startswith('\n') or _indentation_re.match(_msg).group()): - # # copy the indention for the second line to the first: - # lines = _msg.splitlines() - # if len(lines) > 1: - # _msg = _indentation_re.match(lines[1]).group() + _msg - # - # The problem with the above logic is that users may want a - # simple introductory line followed by an intented line (our - # tests did this!), and cannot specify it without adding an - # extra blank line to the output. In contrast, it is possible - # for the user to fix the scenario above that motivated this - # code by just indenting their first line correctly. - msg = textwrap.dedent(msg).strip() + # A standard approach is to use inspect.cleandoc, which + # allows for the first line to have 0 indent. + msg = inspect.cleandoc(msg) # Split the formatted log message (that currently has _flag in # lieu of the actual message content) into lines, then # recombine, substituting and wrapping any lines that contain # _flag. return '\n'.join( - self._wrap_msg(l, msg, _id) if self._flag in l else l - for l in raw_msg.splitlines() + self._wrap_msg(line, msg, _id) if self._flag in line else line + for line in raw_msg.splitlines() ) - def _wrap_msg(self, l, msg, _id): - indent = _indentation_re.match(l).group() - wrapped_msg = self._wrap(l.strip().replace(self._flag, msg), indent) + def _wrap_msg(self, format_line, msg, _id): + _init = self._wrapper.initial_indent, self._wrapper.subsequent_indent + # We will honor the "hang" argument (for specifying a hanging + # indent) unless the formatting line was indented (e.g. because + # DEBUG was set), in which case we will use that for both the + # first line and all subsequent lines. + indent = _indentation_re.match(format_line).group() + if indent: + self._wrapper.initial_indent = self._wrapper.subsequent_indent = indent + try: + wrapped_msg = wrap_reStructuredText( + format_line.strip().replace(self._flag, msg), self._wrapper + ) + finally: + # Restore the wrapper state + self._wrapper.initial_indent, self._wrapper.subsequent_indent = _init if _id: - wrapped_msg += f"\n{indent} See also {RTD(_id)}" + wrapped_msg += f"\n{indent}{_init[1]}See also {RTD(_id)}" return wrapped_msg - def _wrap(self, msg, base_indent): - # As textwrap only works on single paragraphs, we need to break - # up the incoming message into paragraphs before we pass it to - # textwrap. - paragraphs = [] - verbatim = False - for line in msg.rstrip().splitlines(): - leading = _indentation_re.match(line).group() - content = line.strip() - if not content: - paragraphs.append((None, None)) - elif content == '```': - verbatim ^= True - elif verbatim: - paragraphs.append((None, line)) - else: - matchBullet = _bullet_re.match(content) - if matchBullet: - paragraphs.append( - (leading + ' '*len(matchBullet.group()), [content])) - elif paragraphs and paragraphs[-1][0] == leading: - paragraphs[-1][1].append( content ) - else: - paragraphs.append((leading, [content])) - - base_indent = (self.hang or '') + base_indent - - for i, (indent, par) in enumerate(paragraphs): - if indent is None: - if par is None: - paragraphs[i] = '' - else: - paragraphs[i] = base_indent + par - continue - - par_indent = base_indent + indent - self._wrapper.subsequent_indent = par_indent - if not i and self.hang: - self._wrapper.initial_indent = par_indent[len(self.hang):] - else: - self._wrapper.initial_indent = par_indent - - # Bulleted lists get indented with a hanging indent - bullet = _bullet_re.match(par[0]) - if bullet: - self._wrapper.initial_indent = par_indent[:-len(bullet.group())] - - paragraphs[i] = self._wrapper.fill(' '.join(par)) - return '\n'.join(paragraphs) - class LegacyPyomoFormatter(logging.Formatter): - """This mocks up the legacy Pyomo log formating. + """This mocks up the legacy Pyomo log formatting. This formatter takes a callback function (`verbosity`) that will be called for each message. Based on the result, one of two formatting templates will be used. """ + def __init__(self, **kwds): if 'fmt' in kwds: - raise ValueError( - "'fmt' is not a valid option for the LegacyFormatter") + raise ValueError("'fmt' is not a valid option for the LegacyFormatter") if 'style' in kwds: - raise ValueError( - "'style' is not a valid option for the LegacyFormatter") + raise ValueError("'style' is not a valid option for the LegacyFormatter") self.verbosity = kwds.pop('verbosity', lambda: True) self.standard_formatter = WrappingFormatter(**kwds) @@ -227,7 +189,7 @@ def __init__(self, **kwds): fmt='%(levelname)s: "%(pathname)s", %(lineno)d, %(funcName)s\n' ' %(message)s', hang=False, - **kwds + **kwds, ) super(LegacyPyomoFormatter, self).__init__() @@ -265,29 +227,26 @@ def filter(self, record): pyomo_logger = logging.getLogger('pyomo') pyomo_handler = StdoutHandler() pyomo_formatter = LegacyPyomoFormatter( - base=PYOMO_ROOT_DIR, - verbosity=lambda: pyomo_logger.isEnabledFor(logging.DEBUG), + base=PYOMO_ROOT_DIR, verbosity=lambda: pyomo_logger.isEnabledFor(logging.DEBUG) ) pyomo_handler.setFormatter(pyomo_formatter) pyomo_handler.addFilter(_GlobalLogFilter()) pyomo_logger.addHandler(pyomo_handler) -@deprecated('The pyomo.common.log.LogHandler class has been deprecated ' - 'in favor of standard Handlers from the Python logging module ' - 'combined with the pyomo.common.log.WrappingFormatter.', - version='5.7.3') +@deprecated( + 'The pyomo.common.log.LogHandler class has been deprecated ' + 'in favor of standard Handlers from the Python logging module ' + 'combined with the pyomo.common.log.WrappingFormatter.', + version='5.7.3', +) class LogHandler(logging.StreamHandler): - def __init__(self, base='', stream=None, - level=logging.NOTSET, verbosity=None): + def __init__(self, base='', stream=None, level=logging.NOTSET, verbosity=None): super(LogHandler, self).__init__(stream) self.setLevel(level), if verbosity is None: verbosity = lambda: True - self.setFormatter(LegacyPyomoFormatter( - base=base, - verbosity=verbosity, - )) + self.setFormatter(LegacyPyomoFormatter(base=base, verbosity=verbosity)) class LoggingIntercept(object): @@ -323,8 +282,7 @@ class LoggingIntercept(object): """ - def __init__(self, output=None, module=None, level=logging.WARNING, - formatter=None): + def __init__(self, output=None, module=None, level=logging.WARNING, formatter=None): self.handler = None self.output = output self.module = module @@ -368,6 +326,7 @@ class LogStream(io.TextIOBase): This is useful for logging solver output (a LogStream instance can be handed to TeeStream from pyomo.common.tee). """ + def __init__(self, level, logger): self._level = level self._logger = logger diff --git a/pyomo/common/modeling.py b/pyomo/common/modeling.py index 63139a81bba..b3a6d59fcf0 100644 --- a/pyomo/common/modeling.py +++ b/pyomo/common/modeling.py @@ -13,7 +13,7 @@ import sys -def randint(a,b): +def randint(a, b): """Our implementation of random.randint. The Python random.randint is not consistent between python versions @@ -21,20 +21,20 @@ def randint(a,b): can support deterministic testing (i.e., setting the random.seed and expecting the same sequence), we will implement a simple, but stable version of randint().""" - return int((b-a+1)*random()) + return int((b - a + 1) * random()) def unique_component_name(instance, name): - # test if this name already exists in model. If not, we're good. + # test if this name already exists in model. If not, we're good. # Else, we add random numbers until it doesn't if instance.component(name) is None and not hasattr(instance, name): return name - name += '_%d' % (randint(0,9),) + name += '_%d' % (randint(0, 9),) while True: if instance.component(name) is None and not hasattr(instance, name): return name else: - name += str(randint(0,9)) + name += str(randint(0, 9)) class FlagType(type): @@ -50,10 +50,14 @@ class FlagType(type): in functions so that the Sphinx-generated documentation is "cleaner" """ + if 'sphinx' in sys.modules or 'Sphinx' in sys.modules: + def __repr__(cls): return cls.__qualname__ + else: + def __repr__(cls): return cls.__module__ + "." + cls.__qualname__ @@ -71,7 +75,9 @@ class NOTSET(object, metaclass=FlagType): >>> pass # no argument was provided to `value` """ + pass + # Backward compatibility with the previous name for this flag NoArgumentGiven = NOTSET diff --git a/pyomo/common/multithread.py b/pyomo/common/multithread.py index 2ec32d6cfab..415d8aaba7e 100644 --- a/pyomo/common/multithread.py +++ b/pyomo/common/multithread.py @@ -2,7 +2,7 @@ from threading import get_ident, main_thread -class MultiThreadWrapper(): +class MultiThreadWrapper: """A python object proxy that wraps different instances for each thread. This is useful for handling thread-safe access to singleton objects without @@ -20,27 +20,33 @@ def __init__(self, base): def __getattr__(self, attr): return getattr(self._mtdict[get_ident()], attr) - + def __setattr__(self, attr, value): setattr(self._mtdict[get_ident()], attr, value) - + def __delattr__(self, attr): delattr(self._mtdict[get_ident()], attr) - + def __enter__(self): return self._mtdict[get_ident()].__enter__() - + def __exit__(self, exc_type, exc_value, traceback): return self._mtdict[get_ident()].__exit__(exc_type, exc_value, traceback) - + def __dir__(self): return list(object.__dir__(self)) + list(self._mtdict[get_ident()].__dir__()) - + def __str__(self): return self._mtdict[get_ident()].__str__() def __new__(cls, wrapped): - return super().__new__(type('MultiThreadMeta' + wrapped.__name__, (cls,), {'__doc__': wrapped.__doc__})) + return super().__new__( + type( + 'MultiThreadMeta' + wrapped.__name__, + (cls,), + {'__doc__': wrapped.__doc__}, + ) + ) class MultiThreadWrapperWithMain(MultiThreadWrapper): @@ -65,6 +71,6 @@ def __setattr__(self, attr, value): raise ValueError('Setting `main_thread` attribute is not allowed') else: super().__setattr__(attr, value) - + def __dir__(self): return super().__dir__() + ['main_thread'] diff --git a/pyomo/common/numeric_types.py b/pyomo/common/numeric_types.py index 1450d30bb6c..dbad3ef0853 100644 --- a/pyomo/common/numeric_types.py +++ b/pyomo/common/numeric_types.py @@ -9,6 +9,13 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import logging +import sys + +from pyomo.common.deprecation import deprecated, relocated_module_attribute +from pyomo.common.errors import TemplateExpressionError + +logger = logging.getLogger(__name__) #: Python set used to identify numeric constants, boolean values, strings #: and instances of @@ -16,7 +23,7 @@ #: which is commonly used in code that walks Pyomo expression trees. #: #: :data:`nonpyomo_leaf_types` = :data:`native_types ` + { :data:`NonNumericValue ` } -nonpyomo_leaf_types = set([]) +nonpyomo_leaf_types = set() # It is *significantly* faster to build the list of types we want to # test against as a "static" set, and not to regenerate it locally for @@ -34,24 +41,37 @@ #: Python set used to identify numeric constants. This set includes #: native Python types as well as numeric types from Python packages #: like numpy, which may be registered by users. -native_numeric_types = set([ int, float, bool ]) -native_integer_types = set([ int, bool ]) -native_boolean_types = set([ int, bool, str, bytes ]) -native_logical_types = {bool, } +native_numeric_types = {int, float, complex} +native_integer_types = {int} +native_logical_types = {bool} pyomo_constant_types = set() # includes NumericConstant +_native_boolean_types = {int, bool, str, bytes} +relocated_module_attribute( + 'native_boolean_types', + 'pyomo.common.numeric_types._native_boolean_types', + version='6.6.0', + msg="The native_boolean_types set will be removed in the future: the set " + "contains types that were convertible to bool, and not types that should " + "be treated as if they were bool (as was the case for the other " + "native_*_types sets). Users likely should use native_logical_types.", +) + + #: Python set used to identify numeric constants and related native #: types. This set includes #: native Python types as well as numeric types from Python packages #: like numpy. #: #: :data:`native_types` = :data:`native_numeric_types ` + { str } -native_types = set([ bool, str, type(None), slice, bytes]) -native_types.update( native_numeric_types ) -native_types.update( native_integer_types ) -native_types.update( native_boolean_types ) +native_types = set([bool, str, type(None), slice, bytes]) +native_types.update(native_numeric_types) +native_types.update(native_integer_types) +native_types.update(_native_boolean_types) +native_types.update(native_logical_types) + +nonpyomo_leaf_types.update(native_types) -nonpyomo_leaf_types.update( native_types ) def RegisterNumericType(new_type): """ @@ -64,6 +84,7 @@ def RegisterNumericType(new_type): native_types.add(new_type) nonpyomo_leaf_types.add(new_type) + def RegisterIntegerType(new_type): """ A utility function for updating the set of types that are @@ -77,6 +98,12 @@ def RegisterIntegerType(new_type): native_types.add(new_type) nonpyomo_leaf_types.add(new_type) + +@deprecated( + "The native_boolean_types set (and hence RegisterBooleanType) " + "is deprecated. Users likely should use RegisterLogicalType.", + version='6.6.0', +) def RegisterBooleanType(new_type): """ A utility function for updating the set of types that are @@ -85,6 +112,155 @@ def RegisterBooleanType(new_type): The argument should be a class (e.g., numpy.bool_). """ - native_boolean_types.add(new_type) + _native_boolean_types.add(new_type) + native_types.add(new_type) + nonpyomo_leaf_types.add(new_type) + + +def RegisterLogicalType(new_type): + """ + A utility function for updating the set of types that are + recognized as handling boolean values. This function does not + register the type of integer or numeric. + + The argument should be a class (e.g., numpy.bool_). + """ + _native_boolean_types.add(new_type) + native_logical_types.add(new_type) native_types.add(new_type) nonpyomo_leaf_types.add(new_type) + + +def check_if_numeric_type(obj): + """Test if the argument behaves like a numeric type. + + We check for "numeric types" by checking if we can add zero to it + without changing the object's type. If that works, then we register + the type in native_numeric_types. + + """ + obj_class = obj.__class__ + # Do not re-evaluate known native types + if obj_class in native_types: + return obj_class in native_numeric_types + + try: + obj_plus_0 = obj + 0 + obj_p0_class = obj_plus_0.__class__ + # ensure that the object is comparable to 0 in a meaningful way + # (among other things, this prevents numpy.ndarray objects from + # being added to native_numeric_types) + if not ((obj < 0) ^ (obj >= 0)): + return False + # Native types *must* be hashable + hash(obj) + except: + return False + if obj_p0_class is obj_class or obj_p0_class in native_numeric_types: + # + # If we get here, this is a reasonably well-behaving + # numeric type: add it to the native numeric types + # so that future lookups will be faster. + # + RegisterNumericType(obj_class) + # + # Generate a warning, since Pyomo's management of third-party + # numeric types is more robust when registering explicitly. + # + logger.warning( + f"""Dynamically registering the following numeric type: + {obj_class.__module__}.{obj_class.__name__} +Dynamic registration is supported for convenience, but there are known +limitations to this approach. We recommend explicitly registering +numeric types using RegisterNumericType() or RegisterIntegerType().""" + ) + return True + else: + return False + + +def value(obj, exception=True): + """ + A utility function that returns the value of a Pyomo object or + expression. + + Args: + obj: The argument to evaluate. If it is None, a + string, or any other primitive numeric type, + then this function simply returns the argument. + Otherwise, if the argument is a NumericValue + then the __call__ method is executed. + exception (bool): If :const:`True`, then an exception should + be raised when instances of NumericValue fail to + s evaluate due to one or more objects not being + initialized to a numeric value (e.g, one or more + variables in an algebraic expression having the + value None). If :const:`False`, then the function + returns :const:`None` when an exception occurs. + Default is True. + + Returns: A numeric value or None. + """ + if obj.__class__ in native_types: + return obj + if obj.__class__ in pyomo_constant_types: + # + # I'm commenting this out for now, but I think we should never expect + # to see a numeric constant with value None. + # + # if exception and obj.value is None: + # raise ValueError( + # "No value for uninitialized NumericConstant object %s" + # % (obj.name,)) + return obj.value + # + # Test if we have a duck typed Pyomo expression + # + try: + obj.is_numeric_type() + except AttributeError: + # + # TODO: Historically we checked for new *numeric* types and + # raised exceptions for anything else. That is inconsistent + # with allowing native_types like None/str/bool to be returned + # from value(). We should revisit if that is worthwhile to do + # here. + # + if check_if_numeric_type(obj): + return obj + else: + if not exception: + return None + raise TypeError( + "Cannot evaluate object with unknown type: %s" % obj.__class__.__name__ + ) from None + # + # Evaluate the expression object + # + if exception: + # + # Here, we try to catch the exception + # + try: + tmp = obj(exception=True) + if tmp is None: + raise ValueError( + "No value for uninitialized NumericValue object %s" % (obj.name,) + ) + return tmp + except TemplateExpressionError: + # Template expressions work by catching this error type. So + # we should defer this error handling and not log an error + # message. + raise + except: + logger.error( + "evaluating object as numeric value: %s\n (object: %s)\n%s" + % (obj, type(obj), sys.exc_info()[1]) + ) + raise + else: + # + # Here, we do not try to catch the exception + # + return obj(exception=False) diff --git a/pyomo/common/plugin.py b/pyomo/common/plugin.py index aeea7a4fffd..b48fa96a483 100644 --- a/pyomo/common/plugin.py +++ b/pyomo/common/plugin.py @@ -8,334 +8,7 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# -# This module was originally developed as part of the PyUtilib project -# Copyright (c) 2008 Sandia Corporation. -# This software is distributed under the BSD License. -# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, -# the U.S. Government retains certain rights in this software. -# ___________________________________________________________________________ - -import collections -import inspect -import sys -from weakref import ref as weakref_ref - -from pyomo.common.errors import PyomoException -from pyomo.common.deprecation import deprecated, deprecation_warning - -if sys.version_info[:2] >= (3,7): - _deterministic_dict = dict -else: - from pyomo.common.collections import OrderedDict - _deterministic_dict = OrderedDict - - -class PluginGlobals(object): - @staticmethod - @deprecated("The PluginGlobals environment manager is deprecated: " - "Pyomo only supports a single global environment", - version='6.0') - def add_env(name): - pass - - @staticmethod - @deprecated("The PluginGlobals environment manager is deprecated: " - "Pyomo only supports a single global environment", - version='6.0') - def pop_env(): - pass - - @staticmethod - @deprecated("The PluginGlobals environment manager is deprecated: " - "Pyomo only supports a single global environment", - version='6.0') - def clear(): - pass - - -class PluginError(PyomoException): - pass - - -def alias(name, doc=None, subclass=None): - if subclass is not None: - deprecation_warning( - "The Pyomo plugin infrastructure alias() function does " - "not support the subclass flag.", version='6.0') - calling_frame = inspect.currentframe().f_back - locals_ = calling_frame.f_locals - # - # Some sanity checks - # - assert locals_ is not calling_frame.f_globals and '__module__' in locals_, \ - 'implements() can only be used in a class definition' - # - locals_.setdefault('__plugin_aliases__', []).append((name, doc)) - - -def implements(interface, inherit=None, namespace=None, service=False): - if namespace is not None: - deprecation_warning( - "The Pyomo plugin infrastructure only supports a " - "single global namespace.", version='6.0') - calling_frame = inspect.currentframe().f_back - locals_ = calling_frame.f_locals - # - # Some sanity checks - # - assert locals_ is not calling_frame.f_globals and '__module__' in locals_, \ - 'implements() can only be used in a class definition' - assert issubclass(interface, Interface) - # - locals_.setdefault('__implements__', []).append( - (interface, inherit, service) - ) - - -class InterfaceMeta(type): - def __new__(cls, name, bases, classdict, *args, **kwargs): - # Ensure that all interfaces have their own _plugins & _aliases - # dictionaries - classdict.setdefault('_next_id', 0) - classdict.setdefault('_plugins', {}) - classdict.setdefault('_aliases', {}) - return super().__new__(cls, name, bases, classdict, *args, **kwargs) - - -class Interface(metaclass=InterfaceMeta): - pass - - -class _deprecated_plugin_dict(dict): - def __init__(self, name, classdict): - super().__init__() - msg = classdict.pop('__deprecated_message__', None) - if not msg: - msg = 'The %s interface has been deprecated' % (name,) - version = classdict.pop('__deprecated_version__', None) - remove_in = classdict.pop('__deprecated_remove_in__', None) - self._deprecation_info = { - 'msg': msg, 'version': version, 'remove_in': remove_in - } - - def __setitem__(self, key, val): - deprecation_warning(**self._deprecation_info) - super().__setitem__(key, val) - - def items(self): - deprecation_warning(**self._deprecation_info) - return super().items() - - -class DeprecatedInterfaceMeta(InterfaceMeta): - def __new__(cls, name, bases, classdict, *args, **kwargs): - classdict.setdefault( - '_plugins', _deprecated_plugin_dict(name, classdict) - ) - return super().__new__(cls, name, bases, classdict, *args, **kwargs) - - -class DeprecatedInterface(Interface, metaclass=DeprecatedInterfaceMeta): - pass - - -class PluginMeta(type): - - def __new__(cls, name, bases, classdict, *args, **kwargs): - # This plugin is a singleton plugin based on the __singleton__ - # class attribute, OR if not specified, if any base class is a - # singleton plugin - _singleton = classdict.pop( - '__singleton__', - any(getattr(base, '__singleton__', None) is not None - for base in bases) - ) - # This prevents base class __singleton__, __plugin_aliases__, - # and __implements__ from implicitly bleeding through and being - # accidentally shared across subclasses. - classdict['__singleton__'] = None - aliases = classdict.setdefault('__plugin_aliases__', []) - implements = classdict.setdefault('__implements__', []) - # If multiple classes (classdict, and/or any base) implement() - # the same interface, use standard Python rules to determine - # which implements() should govern (i.e. classdict supersedes - # bases, bases resolved in order) - interfaces = set(impl[0] for impl in implements) - for base in bases: - implements.extend( - ep for ep in getattr(base, '__implements__', []) - if ep[0] not in interfaces - ) - interfaces.update(impl[0] for impl in implements) - for interface, inherit, service in implements: - if not inherit: - continue - if not any(issubclass(base, interface) for base in bases): - bases = bases + (interface,) - # Python requires that a class' metaclass be a - # (nonstrict) subclass of the metaclasses of all its - # base classes. Check, and declare a new metaclass if - # necessary. - if not issubclass(cls, type(interface)): - class tmp_meta(cls, type(interface)): - def __new__(cls, name, bases, classdict, - *args, **kwargs): - # This is a plugin and not an Interface. Do - # not set up dicts for the interface - # definition. - classdict.setdefault('_plugins', None) - classdict.setdefault('_aliases', None) - return super().__new__( - cls, name, bases, classdict, *args, **kwargs) - cls = tmp_meta - - new_class = super().__new__( - cls, name, bases, classdict, *args, **kwargs) - - # Register the new class with the interfaces - for interface, inherit, service in implements: - interface._plugins[new_class] = _deterministic_dict() - interface._aliases.update( - {name: (new_class, doc) for name, doc in aliases} - ) - - if _singleton: - new_class.__singleton__ = new_class() - - return new_class - - -class Plugin(object, metaclass=PluginMeta): - def __new__(cls): - if cls.__singleton__ is not None: - raise RuntimeError( - "Cannot create multiple singleton plugin instances of type %s" - % (cls,)) - obj = super().__new__(cls) - obj._plugin_ids = {} - # Record this instance (service) with all Interfaces - for interface, inherit, service in cls.__implements__: - _id = interface._next_id - interface._next_id += 1 - obj._plugin_ids[interface] = _id - interface._plugins[cls][_id] = (weakref_ref(obj), service) - return obj - - def activate(self): - cls = self.__class__ - for interface, inherit, service in cls.__implements__: - _id = self._plugin_ids[interface] - obj, service = interface._plugins[cls][_id] - if not service: - interface._plugins[cls][_id] = obj, True - enable = activate - - def deactivate(self): - cls = self.__class__ - for interface, inherit, service in cls.__implements__: - _id = self._plugin_ids[interface] - obj, service = interface._plugins[cls][_id] - if service: - interface._plugins[cls][_id] = obj, False - disable = deactivate - - def enabled(self): - cls = self.__class__ - return any(interface._plugins[cls][self._plugin_ids[interface]][1] - for interface, inherit, service in cls.__implements__) - - -class SingletonPlugin(Plugin): - __singleton__ = True - - -class ExtensionPoint(object): - def __init__(self, interface): - assert issubclass(interface, Interface) - self._interface = interface - - def __iter__(self, key=None, all=False): - for cls, plugins in self._interface._plugins.items(): - remove = [] - for i, (obj, service) in plugins.items(): - if not obj(): - remove.append(i) - elif ((all or service) and - (key is None or key is cls or key == cls.__name__)): - yield obj() - for i in remove: - del plugins[i] - - def __len__(self): - return len(list(self.__iter__())) - - def extensions(self, all=False, key=None): - return list(self.__iter__(key=key, all=all)) - - def __call__(self, key=None, all=False): - return self.extensions(all=all, key=key) - - def service(self, key=None, all=False): - """Return the unique service that matches the interface of this - extension point. An exception occurs if no service matches the - specified key, or if multiple services match. - """ - ans = self.extensions(all=all, key=key) - if len(ans) == 1: - # - # There is a single service, so return it. - # - return ans[0] - elif not ans: - return None - else: - raise PluginError("The ExtensionPoint does not have a unique " - "service! %d services are defined for interface" - " '%s' (key=%s)." % - (len(ans), self._interface.__name__, str(key))) - - -class PluginFactory(object): - - def __init__(self, interface): - self.interface = interface - - def __call__(self, name, *args, **kwds): - name = str(name) - if name not in self.interface._aliases: - return None - else: - return self.interface._aliases[name][0](*args, **kwds) - - def services(self): - return list(self.interface._aliases) - - def get_class(self, name): - return self.interface._aliases.get(name, [None])[0] - - def doc(self, name): - name = str(name) - if name not in self.interface._aliases: - return "" - else: - return self.interface._aliases[name][1] - - def deactivate(self, name): - if isinstance(name, str): - cls = self.get_class(name) - if cls is None: - return - for service in ExtensionPoint(self.interface)(key=cls): - service.deactivate() - def activate(self, name): - if isinstance(name, str): - cls = self.get_class(name) - if cls is None: - return - for service in ExtensionPoint(self.interface)(all=True, key=cls): - service.activate() +from pyomo.common.deprecation import relocated_module -# Old name for creating plugin factories -CreatePluginFactory = PluginFactory +relocated_module('pyomo.common.plugin_base', version='6.5.0') diff --git a/pyomo/common/plugin_base.py b/pyomo/common/plugin_base.py new file mode 100644 index 00000000000..aa029ff153a --- /dev/null +++ b/pyomo/common/plugin_base.py @@ -0,0 +1,356 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# +# This module was originally developed as part of the PyUtilib project +# Copyright (c) 2008 Sandia Corporation. +# This software is distributed under the BSD License. +# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +# the U.S. Government retains certain rights in this software. +# ___________________________________________________________________________ + +import collections +import inspect +import sys +from weakref import ref as weakref_ref + +from pyomo.common.errors import PyomoException +from pyomo.common.deprecation import deprecated, deprecation_warning + +if sys.version_info[:2] >= (3, 7): + _deterministic_dict = dict +else: + from pyomo.common.collections import OrderedDict + + _deterministic_dict = OrderedDict + + +class PluginGlobals(object): + @staticmethod + @deprecated( + "The PluginGlobals environment manager is deprecated: " + "Pyomo only supports a single global environment", + version='6.0', + ) + def add_env(name): + pass + + @staticmethod + @deprecated( + "The PluginGlobals environment manager is deprecated: " + "Pyomo only supports a single global environment", + version='6.0', + ) + def pop_env(): + pass + + @staticmethod + @deprecated( + "The PluginGlobals environment manager is deprecated: " + "Pyomo only supports a single global environment", + version='6.0', + ) + def clear(): + pass + + +class PluginError(PyomoException): + pass + + +def alias(name, doc=None, subclass=None): + if subclass is not None: + deprecation_warning( + "The Pyomo plugin infrastructure alias() function does " + "not support the subclass flag.", + version='6.0', + ) + calling_frame = inspect.currentframe().f_back + locals_ = calling_frame.f_locals + # + # Some sanity checks + # + assert ( + locals_ is not calling_frame.f_globals and '__module__' in locals_ + ), 'implements() can only be used in a class definition' + # + locals_.setdefault('__plugin_aliases__', []).append((name, doc)) + + +def implements(interface, inherit=None, namespace=None, service=False): + if namespace is not None: + deprecation_warning( + "The Pyomo plugin infrastructure only supports a " + "single global namespace.", + version='6.0', + ) + calling_frame = inspect.currentframe().f_back + locals_ = calling_frame.f_locals + # + # Some sanity checks + # + assert ( + locals_ is not calling_frame.f_globals and '__module__' in locals_ + ), 'implements() can only be used in a class definition' + assert issubclass(interface, Interface) + # + locals_.setdefault('__implements__', []).append((interface, inherit, service)) + + +class InterfaceMeta(type): + def __new__(cls, name, bases, classdict, *args, **kwargs): + # Ensure that all interfaces have their own _plugins & _aliases + # dictionaries + classdict.setdefault('_next_id', 0) + classdict.setdefault('_plugins', {}) + classdict.setdefault('_aliases', {}) + return super().__new__(cls, name, bases, classdict, *args, **kwargs) + + +class Interface(metaclass=InterfaceMeta): + pass + + +class _deprecated_plugin_dict(dict): + def __init__(self, name, classdict): + super().__init__() + msg = classdict.pop('__deprecated_message__', None) + if not msg: + msg = 'The %s interface has been deprecated' % (name,) + version = classdict.pop('__deprecated_version__', None) + remove_in = classdict.pop('__deprecated_remove_in__', None) + self._deprecation_info = { + 'msg': msg, + 'version': version, + 'remove_in': remove_in, + } + + def __setitem__(self, key, val): + deprecation_warning(**self._deprecation_info) + super().__setitem__(key, val) + + def items(self): + deprecation_warning(**self._deprecation_info) + return super().items() + + +class DeprecatedInterfaceMeta(InterfaceMeta): + def __new__(cls, name, bases, classdict, *args, **kwargs): + classdict.setdefault('_plugins', _deprecated_plugin_dict(name, classdict)) + return super().__new__(cls, name, bases, classdict, *args, **kwargs) + + +class DeprecatedInterface(Interface, metaclass=DeprecatedInterfaceMeta): + pass + + +class PluginMeta(type): + def __new__(cls, name, bases, classdict, *args, **kwargs): + # This plugin is a singleton plugin based on the __singleton__ + # class attribute, OR if not specified, if any base class is a + # singleton plugin + _singleton = classdict.pop( + '__singleton__', + any(getattr(base, '__singleton__', None) is not None for base in bases), + ) + # This prevents base class __singleton__, __plugin_aliases__, + # and __implements__ from implicitly bleeding through and being + # accidentally shared across subclasses. + classdict['__singleton__'] = None + aliases = classdict.setdefault('__plugin_aliases__', []) + implements = classdict.setdefault('__implements__', []) + # If multiple classes (classdict, and/or any base) implement() + # the same interface, use standard Python rules to determine + # which implements() should govern (i.e. classdict supersedes + # bases, bases resolved in order) + interfaces = set(impl[0] for impl in implements) + for base in bases: + implements.extend( + ep + for ep in getattr(base, '__implements__', []) + if ep[0] not in interfaces + ) + interfaces.update(impl[0] for impl in implements) + for interface, inherit, service in implements: + if not inherit: + continue + if not any(issubclass(base, interface) for base in bases): + bases = bases + (interface,) + # Python requires that a class' metaclass be a + # (nonstrict) subclass of the metaclasses of all its + # base classes. Check, and declare a new metaclass if + # necessary. + if not issubclass(cls, type(interface)): + + class tmp_meta(cls, type(interface)): + def __new__(cls, name, bases, classdict, *args, **kwargs): + # This is a plugin and not an Interface. Do + # not set up dicts for the interface + # definition. + classdict.setdefault('_plugins', None) + classdict.setdefault('_aliases', None) + return super().__new__( + cls, name, bases, classdict, *args, **kwargs + ) + + cls = tmp_meta + + new_class = super().__new__(cls, name, bases, classdict, *args, **kwargs) + + # Register the new class with the interfaces + for interface, inherit, service in implements: + interface._plugins[new_class] = _deterministic_dict() + interface._aliases.update({name: (new_class, doc) for name, doc in aliases}) + + if _singleton: + new_class.__singleton__ = new_class() + + return new_class + + +class Plugin(object, metaclass=PluginMeta): + def __new__(cls): + if cls.__singleton__ is not None: + raise RuntimeError( + "Cannot create multiple singleton plugin instances of type %s" % (cls,) + ) + obj = super().__new__(cls) + obj._plugin_ids = {} + # Record this instance (service) with all Interfaces + for interface, inherit, service in cls.__implements__: + _id = interface._next_id + interface._next_id += 1 + obj._plugin_ids[interface] = _id + interface._plugins[cls][_id] = (weakref_ref(obj), service) + return obj + + def activate(self): + cls = self.__class__ + for interface, inherit, service in cls.__implements__: + _id = self._plugin_ids[interface] + obj, service = interface._plugins[cls][_id] + if not service: + interface._plugins[cls][_id] = obj, True + + enable = activate + + def deactivate(self): + cls = self.__class__ + for interface, inherit, service in cls.__implements__: + _id = self._plugin_ids[interface] + obj, service = interface._plugins[cls][_id] + if service: + interface._plugins[cls][_id] = obj, False + + disable = deactivate + + def enabled(self): + cls = self.__class__ + return any( + interface._plugins[cls][self._plugin_ids[interface]][1] + for interface, inherit, service in cls.__implements__ + ) + + +class SingletonPlugin(Plugin): + __singleton__ = True + + +class ExtensionPoint(object): + def __init__(self, interface): + assert issubclass(interface, Interface) + self._interface = interface + + def __iter__(self, key=None, all=False): + for cls, plugins in self._interface._plugins.items(): + remove = [] + for i, (obj, service) in plugins.items(): + if not obj(): + remove.append(i) + elif (all or service) and ( + key is None or key is cls or key == cls.__name__ + ): + yield obj() + for i in remove: + del plugins[i] + + def __len__(self): + return len(list(self.__iter__())) + + def extensions(self, all=False, key=None): + return list(self.__iter__(key=key, all=all)) + + def __call__(self, key=None, all=False): + return self.extensions(all=all, key=key) + + def service(self, key=None, all=False): + """Return the unique service that matches the interface of this + extension point. An exception occurs if no service matches the + specified key, or if multiple services match. + """ + ans = self.extensions(all=all, key=key) + if len(ans) == 1: + # + # There is a single service, so return it. + # + return ans[0] + elif not ans: + return None + else: + raise PluginError( + "The ExtensionPoint does not have a unique " + "service! %d services are defined for interface" + " '%s' (key=%s)." % (len(ans), self._interface.__name__, str(key)) + ) + + +class PluginFactory(object): + def __init__(self, interface): + self.interface = interface + + def __call__(self, name, *args, **kwds): + name = str(name) + if name not in self.interface._aliases: + return None + else: + return self.interface._aliases[name][0](*args, **kwds) + + def services(self): + return list(self.interface._aliases) + + def get_class(self, name): + return self.interface._aliases.get(name, [None])[0] + + def doc(self, name): + name = str(name) + if name not in self.interface._aliases: + return "" + else: + return self.interface._aliases[name][1] + + def deactivate(self, name): + if isinstance(name, str): + cls = self.get_class(name) + if cls is None: + return + for service in ExtensionPoint(self.interface)(key=cls): + service.deactivate() + + def activate(self, name): + if isinstance(name, str): + cls = self.get_class(name) + if cls is None: + return + for service in ExtensionPoint(self.interface)(all=True, key=cls): + service.activate() + + +# Old name for creating plugin factories +CreatePluginFactory = PluginFactory diff --git a/pyomo/common/plugins.py b/pyomo/common/plugins.py index 5958ebaa736..7db8077855a 100644 --- a/pyomo/common/plugins.py +++ b/pyomo/common/plugins.py @@ -9,8 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from . import download -from . import getGSL def load(): - download.DownloadFactory.register('gsl')(getGSL.get_gsl) + pass diff --git a/pyomo/common/pyomo_typing.py b/pyomo/common/pyomo_typing.py index d5714159f5d..64ab2ddafc9 100644 --- a/pyomo/common/pyomo_typing.py +++ b/pyomo/common/pyomo_typing.py @@ -13,9 +13,11 @@ _overloads = {} + def _get_fullqual_name(func: typing.Callable) -> str: return f"{func.__module__}.{func.__qualname__}" + def overload(func: typing.Callable): """Wrap typing.overload that remembers the overloaded signatures @@ -27,5 +29,6 @@ def overload(func: typing.Callable): _overloads.setdefault(_get_fullqual_name(func), []).append(func) return typing.overload(func) + def get_overloads_for(func: typing.Callable): return _overloads.get(_get_fullqual_name(func), []) diff --git a/pyomo/common/shutdown.py b/pyomo/common/shutdown.py index 3738667e743..5054fd21279 100644 --- a/pyomo/common/shutdown.py +++ b/pyomo/common/shutdown.py @@ -1,5 +1,6 @@ import atexit + def python_is_shutting_down(): """Returns `True` if the interpreter is in the process of shutting down. @@ -10,8 +11,10 @@ def python_is_shutting_down(): """ return not python_is_shutting_down.isalive + python_is_shutting_down.isalive = [True] + @atexit.register def _flag_shutting_down(): python_is_shutting_down.isalive.clear() diff --git a/pyomo/common/sorting.py b/pyomo/common/sorting.py index 0f4753c4289..31e796c6a9e 100644 --- a/pyomo/common/sorting.py +++ b/pyomo/common/sorting.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + class _robust_sort_keyfcn(object): """Class for robustly generating sortable keys for arbitrary data. @@ -23,6 +24,7 @@ class _robust_sort_keyfcn(object): user's original key function, if provided """ + _typemap = { int: (1, float.__name__), float: (1, float.__name__), @@ -60,7 +62,7 @@ def _classify_type(self, val): # it is, sort it as if it were a float. try: # Extra check that the comparison returns a meaningful result - if bool(val < 1.) != bool(1. < val or 1. == val): + if bool(val < 1.0) != bool(1.0 < val or 1.0 == val): _typename = float.__name__ except: pass @@ -92,7 +94,7 @@ def _generate_sort_key(self, val): # value type is convertible to string return _typename, str(val) else: - # everything else (incuding i==3), fall back on id() + # everything else (including i==3), fall back on id() return _typename, id(val) @@ -110,7 +112,7 @@ def sorted_robust(iterable, key=None, reverse=False): the source of items to sort key: function a function of one argument that is used to extract the - comparison ket from each element in `iterable` + comparison key from each element in `iterable` reverse: bool if True, the iterable is sorted as if each comparison was reversed. diff --git a/pyomo/common/tee.py b/pyomo/common/tee.py index ff776f83085..029d66f5767 100644 --- a/pyomo/common/tee.py +++ b/pyomo/common/tee.py @@ -75,6 +75,7 @@ class redirect_fd(object): If True, and `fd` is 1 or 2, then update `sys.stdout` or `sys.stderr` to also point to the new file descriptor """ + def __init__(self, fd=1, output=None, synchronize=True): if output is None: # /dev/null is used just to discard what is being printed @@ -156,6 +157,7 @@ class capture_output(object): Takes in a StringIO, file-like object, or filename and temporarily redirects output to a string buffer. """ + def __init__(self, output=None, capture_fd=False): if output is None: output = StringIO() @@ -179,7 +181,7 @@ def __enter__(self): if self.capture_fd: self.fd_redirect = ( redirect_fd(1, sys.stdout.fileno()), - redirect_fd(2, sys.stderr.fileno()) + redirect_fd(2, sys.stderr.fileno()), ) self.fd_redirect[0].__enter__() self.fd_redirect[1].__enter__() @@ -223,9 +225,14 @@ def __init__(self, mode, buffering, encoding, newline): # While we support "unbuffered" behavior in text mode, # python does not buffering = -1 - self.write_file = os.fdopen(self.write_pipe, mode=mode, - buffering=buffering, encoding=encoding, - newline=newline, closefd=False) + self.write_file = os.fdopen( + self.write_pipe, + mode=mode, + buffering=buffering, + encoding=encoding, + newline=newline, + closefd=False, + ) self.decoder_buffer = b'' try: self.encoding = encoding or self.write_file.encoding @@ -270,12 +277,14 @@ def finalize(self, ostreams): logger.error( "Stream handle closed with a partial line " "in the output buffer that was not emitted to the " - "output stream(s):\n\t'%s'" % (self.output_buffer,)) + "output stream(s):\n\t'%s'" % (self.output_buffer,) + ) if self.decoder_buffer: logger.error( "Stream handle closed with un-decoded characters " "in the decoder buffer that was not emitted to the " - "output stream(s):\n\t%r" % (self.decoder_buffer,)) + "output stream(s):\n\t%r" % (self.decoder_buffer,) + ) def decodeIncomingBuffer(self): if not self.encoding: @@ -326,8 +335,8 @@ def writeOutputBuffer(self, ostreams): logger.error( "Output stream (%s) closed before all output was " "written to it. The following was left in " - "the output buffer:\n\t%r" % ( - stream, ostring[written:],)) + "the output buffer:\n\t%r" % (stream, ostring[written:]) + ) class TeeStream(object): @@ -391,17 +400,19 @@ def close(self, in_exception=False): if not self._threads: break _poll *= 2 - if _poll_timeout <= _poll < 2*_poll_timeout: + if _poll_timeout <= _poll < 2 * _poll_timeout: if in_exception: # We are already processing an exception: no reason # to trigger another, nor to deadlock for an extended time break logger.warning( "Significant delay observed waiting to join reader " - "threads, possible output stream deadlock") + "threads, possible output stream deadlock" + ) elif _poll >= _poll_timeout_deadlock: raise RuntimeError( - "TeeStream: deadlock observed joining reader threads") + "TeeStream: deadlock observed joining reader threads" + ) for h in list(self._handles): h.finalize(self.ostreams) @@ -439,7 +450,7 @@ def _start(self, handle): # The merged reader is already running... nothing additional # needs to be done pass - + def _streamReader(self, handle): while True: new_data = os.read(handle.read_pipe, io.DEFAULT_BUFFER_SIZE) @@ -453,14 +464,14 @@ def _streamReader(self, handle): # Now, output whatever we have decoded to the output streams handle.writeOutputBuffer(self.ostreams) # - #print("STREAM READER: DONE") + # print("STREAM READER: DONE") def _mergedReader(self): noop = [] handles = self._active_handles _poll = _poll_interval _fast_poll_ct = _poll_rampup - new_data = '' # something not None + new_data = '' # something not None while handles: if new_data is None: # For performance reasons, we use very aggressive @@ -503,8 +514,7 @@ def _mergedReader(self): # send select() a *copy* of the handles list, as we see # deadlocks when handles are added while select() is # waiting - ready_handles = select( - list(handles), noop, noop, _poll)[0] + ready_handles = select(list(handles), noop, noop, _poll)[0] if not ready_handles: new_data = None continue @@ -523,4 +533,4 @@ def _mergedReader(self): # Now, output whatever we have decoded to the output streams handle.writeOutputBuffer(self.ostreams) # - #print("MERGED READER: DONE") + # print("MERGED READER: DONE") diff --git a/pyomo/common/tempfiles.py b/pyomo/common/tempfiles.py index 6872dab6145..e981d26d84e 100644 --- a/pyomo/common/tempfiles.py +++ b/pyomo/common/tempfiles.py @@ -25,10 +25,9 @@ from pyomo.common.deprecation import deprecated, deprecation_warning from pyomo.common.errors import TempfileContextError from pyomo.common.multithread import MultiThreadWrapperWithMain + try: - from pyutilib.component.config.tempfiles import ( - TempfileManager as pyutilib_mngr - ) + from pyutilib.component.config.tempfiles import TempfileManager as pyutilib_mngr except ImportError: pyutilib_mngr = None @@ -109,15 +108,19 @@ def shutdown(self, remove=True): "Temporary files created through TempfileManager " "contexts have not been deleted (observed during " "TempfileManager instance shutdown).\n" - "Undeleted entries:\n\t"+ "\n\t".join( + "Undeleted entries:\n\t" + + "\n\t".join( fname if isinstance(fname, str) else fname.decode() for ctx in self._context_stack - for fd, fname in ctx.tempfiles)) + for fd, fname in ctx.tempfiles + ) + ) if self._context_stack: logger.warning( "TempfileManagerClass instance: un-popped tempfile " "contexts still exist during TempfileManager instance " - "shutdown") + "shutdown" + ) self.clear_tempfiles(remove) # Delete the stack so that subsequent operations generate an # exception @@ -133,35 +136,37 @@ def context(self): raise TempfileContextError( "TempfileManager has no currently active context. " "Create a context (with push() or __enter__()) before " - "attempting to create temporary objects.") + "attempting to create temporary objects." + ) return self._context_stack[-1] def create_tempfile(self, suffix=None, prefix=None, text=False, dir=None): "Call :meth:`TempfileContext.create_tempfile` on the active context" return self.context().create_tempfile( - suffix=suffix, prefix=prefix, text=text, dir=dir) + suffix=suffix, prefix=prefix, text=text, dir=dir + ) def create_tempdir(self, suffix=None, prefix=None, dir=None): "Call :meth:`TempfileContext.create_tempdir` on the active context" - return self.context().create_tempdir( - suffix=suffix, prefix=prefix, dir=dir) + return self.context().create_tempdir(suffix=suffix, prefix=prefix, dir=dir) def add_tempfile(self, filename, exists=True): "Call :meth:`TempfileContext.add_tempfile` on the active context" - return self.context().add_tempfile( - filename=filename, exists=exists) + return self.context().add_tempfile(filename=filename, exists=exists) def clear_tempfiles(self, remove=True): """Delete all temporary files and remove all contexts.""" while self._context_stack: self.pop(remove) - @deprecated("The TempfileManager.sequential_files() method has been " - "removed. All temporary files are created with guaranteed " - "unique names. Users wishing sequentially numbered files " - "should create a temporary (empty) directory using mkdtemp " - "/ create_tempdir and place the sequential files within it.", - version='6.2') + @deprecated( + "The TempfileManager.sequential_files() method has been " + "removed. All temporary files are created with guaranteed " + "unique names. Users wishing sequentially numbered files " + "should create a temporary (empty) directory using mkdtemp " + "/ create_tempdir and place the sequential files within it.", + version='6.2', + ) def sequential_files(self, ctr=0): pass @@ -220,7 +225,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): "the TempfileManager stack within a context manager " "(i.e., `with TempfileManager:`) but was not popped " "before the context manager exited. Popping the " - "context to preserve the stack integrity.") + "context to preserve the stack integrity." + ) class TempfileContext: @@ -322,9 +328,7 @@ def gettempdir(self): return dir def gettempdirb(self): - """Same as :meth:`gettempdir()`, but the return value is ``bytes`` - - """ + """Same as :meth:`gettempdir()`, but the return value is ``bytes``""" dir = self._resolve_tempdir() if dir is None: return tempfile.gettempdirb() @@ -341,9 +345,7 @@ def gettempprefix(self): return tempfile.gettempprefix() def gettempprefixb(self): - """Same as :meth:`gettempprefix()`, but the return value is ``bytes`` - - """ + """Same as :meth:`gettempprefix()`, but the return value is ``bytes``""" return tempfile.gettempprefixb() def create_tempfile(self, suffix=None, prefix=None, text=False, dir=None): @@ -360,8 +362,7 @@ def create_tempfile(self, suffix=None, prefix=None, text=False, dir=None): The absolute path of the new file. """ - fd, fname = self.mkstemp(suffix=suffix, prefix=prefix, - dir=dir, text=text) + fd, fname = self.mkstemp(suffix=suffix, prefix=prefix, dir=dir, text=text) os.close(fd) self.tempfiles[-1] = (None, fname) return fname @@ -437,7 +438,9 @@ def _resolve_tempdir(self, dir=None): "to specify the default location for Pyomo " "temporary files has been deprecated. " "Please set TempfileManager.tempdir in " - "pyomo.common.tempfiles", version='5.7.2') + "pyomo.common.tempfiles", + version='5.7.2', + ) return pyutilib_mngr.tempdir return None @@ -462,13 +465,10 @@ def _remove_filesystem_object(self, name): # Failure to delete a tempfile # should NOT be fatal logger = logging.getLogger(__name__) - logger.warning("Unable to delete temporary " - "file %s" % (name,)) + logger.warning("Unable to delete temporary file %s" % (name,)) return assert os.path.isdir(name) - shutil.rmtree( - name, - ignore_errors=not deletion_errors_are_fatal) + shutil.rmtree(name, ignore_errors=not deletion_errors_are_fatal) # The global Pyomo TempfileManager instance diff --git a/pyomo/common/tests/config_plugin.py b/pyomo/common/tests/config_plugin.py index 3f693eec160..ada788fd7d4 100644 --- a/pyomo/common/tests/config_plugin.py +++ b/pyomo/common/tests/config_plugin.py @@ -10,6 +10,7 @@ # ___________________________________________________________________________ from pyomo.common.config import ConfigDict, ConfigValue + def get_configuration(config): ans = ConfigDict() ans.declare('key1', ConfigValue(default=0, domain=int)) diff --git a/pyomo/common/tests/dep_mod.py b/pyomo/common/tests/dep_mod.py index 65b4ec6653f..54530393783 100644 --- a/pyomo/common/tests/dep_mod.py +++ b/pyomo/common/tests/dep_mod.py @@ -15,7 +15,6 @@ numpy, numpy_available = attempt_import('numpy', defer_check=True) -bogus_nonexisting_module, bogus_nonexisting_module_available \ - = attempt_import('bogus_nonexisting_module', - alt_names=['bogus_nem'], - defer_check=True) +bogus_nonexisting_module, bogus_nonexisting_module_available = attempt_import( + 'bogus_nonexisting_module', alt_names=['bogus_nem'], defer_check=True +) diff --git a/pyomo/common/tests/deps.py b/pyomo/common/tests/deps.py index b4886906a7f..e5236d0f7ec 100644 --- a/pyomo/common/tests/deps.py +++ b/pyomo/common/tests/deps.py @@ -13,7 +13,7 @@ # This module supports testing the attempt_import() functionality when # used at the module scope. It cannot be in the actual test module, as # pytest accesses objects in the module scope during test collection -# (which would inadvertantly trigger premature module import) +# (which would inadvertently trigger premature module import) # from pyomo.common.dependencies import attempt_import @@ -23,20 +23,20 @@ bogus_nonexisting_module_available as has_bogus_nem, ) -bogus, bogus_available \ - = attempt_import('nonexisting.module.bogus', defer_check=True) +bogus, bogus_available = attempt_import('nonexisting.module.bogus', defer_check=True) pkl_test, pkl_available = attempt_import( - 'nonexisting.module.pickle_test', - deferred_submodules=['submod'], defer_check=True + 'nonexisting.module.pickle_test', deferred_submodules=['submod'], defer_check=True ) pyo, pyo_available = attempt_import( - 'pyomo', alt_names=['pyo'], - deferred_submodules={'version': None, - 'common.tests.dep_mod': ['dm']}) + 'pyomo', + alt_names=['pyo'], + deferred_submodules={'version': None, 'common.tests.dep_mod': ['dm']}, +) dm = pyo.common.tests.dep_mod + def test_access_bogus_hello(): bogus_nem.hello diff --git a/pyomo/common/tests/import_ex.py b/pyomo/common/tests/import_ex.py index 1bbd003b4b0..e19ad956044 100644 --- a/pyomo/common/tests/import_ex.py +++ b/pyomo/common/tests/import_ex.py @@ -1,4 +1,5 @@ def a(): pass + b = 2 diff --git a/pyomo/common/tests/relo_mod_new.py b/pyomo/common/tests/relo_mod_new.py index 2957c44bcc7..1ef27681b66 100644 --- a/pyomo/common/tests/relo_mod_new.py +++ b/pyomo/common/tests/relo_mod_new.py @@ -11,4 +11,6 @@ RELO_ATTR = 42 -class ReloClass(object): pass + +class ReloClass(object): + pass diff --git a/pyomo/common/tests/relocated.py b/pyomo/common/tests/relocated.py index 0b414e5c540..9de63e0cec9 100644 --- a/pyomo/common/tests/relocated.py +++ b/pyomo/common/tests/relocated.py @@ -12,16 +12,16 @@ from pyomo.common.deprecation import relocated_module_attribute + class Bar(object): data = 42 + def __getattr__(name): if name.startswith('Foo'): return name[3:] - raise AttributeError( - "module '%s' has no attribute '%s'" % (__name__, name)) + raise AttributeError("module '%s' has no attribute '%s'" % (__name__, name)) + -relocated_module_attribute( - 'Foo', 'pyomo.common.tests.test_deprecated.Bar', 'test') -relocated_module_attribute( - 'Foo_2', 'pyomo.common.tests.relocated.Bar', 'test') +relocated_module_attribute('Foo', 'pyomo.common.tests.test_deprecated.Bar', 'test') +relocated_module_attribute('Foo_2', 'pyomo.common.tests.relocated.Bar', 'test') diff --git a/pyomo/common/tests/test_bunch.py b/pyomo/common/tests/test_bunch.py index 5fe033b3e51..a8daf5a0071 100644 --- a/pyomo/common/tests/test_bunch.py +++ b/pyomo/common/tests/test_bunch.py @@ -4,7 +4,7 @@ # Copyright (c) 2008-2022 # National Technology and Engineering Solutions of Sandia, LLC # Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -20,59 +20,209 @@ import unittest from pyomo.common.collections import Bunch + class Test(unittest.TestCase): def test_Bunch1(self): - opt = Bunch('a=None c=d e="1 2 3"', foo=1, bar='x') + opt = Bunch('a=None c=d e="1 2 3" f=" 5 "', foo=1, bar='x') self.assertEqual(opt.ll, None) self.assertEqual(opt.a, None) self.assertEqual(opt.c, 'd') self.assertEqual(opt.e, '1 2 3') + self.assertEqual(opt.f, 5) self.assertEqual(opt.foo, 1) self.assertEqual(opt.bar, 'x') self.assertEqual(opt['bar'], 'x') opt.xx = 1 opt['yy'] = 2 self.assertEqual( - set(opt.keys()), set(['a', 'bar', 'c', 'foo', 'e', 'xx', 'yy'])) + set(opt.keys()), set(['a', 'bar', 'c', 'f', 'foo', 'e', 'xx', 'yy']) + ) opt.x = Bunch(a=1, b=2) self.assertEqual( - set(opt.keys()), set( - ['a', 'bar', 'c', 'foo', 'e', 'xx', 'yy', 'x'])) + set(opt.keys()), set(['a', 'bar', 'c', 'f', 'foo', 'e', 'xx', 'yy', 'x']) + ) self.assertEqual( repr(opt), - "Bunch(a = None, bar = 'x', c = 'd', e = '1 2 3', foo = 1, x = Bunch(a = 1, b = 2), xx = 1, yy = 2)") + "Bunch(a = None, bar = 'x', c = 'd', e = '1 2 3', f = 5, " + "foo = 1, x = Bunch(a = 1, b = 2), xx = 1, yy = 2)", + ) self.assertEqual( - str(opt), """a: None + str(opt), + """a: None bar: 'x' c: 'd' e: '1 2 3' +f: 5 foo: 1 x: a: 1 b: 2 xx: 1 -yy: 2""") +yy: 2""", + ) opt._name_ = 'BUNCH' self.assertEqual( - set(opt.keys()), set( - ['a', 'bar', 'c', 'foo', 'e', 'xx', 'yy', 'x'])) + set(opt.keys()), set(['a', 'bar', 'c', 'f', 'foo', 'e', 'xx', 'yy', 'x']) + ) self.assertEqual( repr(opt), - "Bunch(a = None, bar = 'x', c = 'd', e = '1 2 3', foo = 1, x = Bunch(a = 1, b = 2), xx = 1, yy = 2)") + "Bunch(a = None, bar = 'x', c = 'd', e = '1 2 3', f = 5, " + "foo = 1, x = Bunch(a = 1, b = 2), xx = 1, yy = 2)", + ) self.assertEqual( - str(opt), """a: None + str(opt), + """a: None bar: 'x' c: 'd' e: '1 2 3' +f: 5 foo: 1 x: a: 1 b: 2 xx: 1 -yy: 2""") +yy: 2""", + ) + + with self.assertRaisesRegex( + TypeError, r"Bunch\(\) positional arguments must be strings" + ): + Bunch(5) + + with self.assertRaisesRegex( + ValueError, + r"Bunch\(\) positional arguments must be space " + "separated strings of form 'key=value', got 'foo'", + ): + Bunch('a=5 foo = 6') - def test_Container2(self): + def test_pickle(self): o1 = Bunch('a=None c=d e="1 2 3"', foo=1, bar='x') s = pickle.dumps(o1) o2 = pickle.loads(s) self.assertEqual(o1, o2) + + def test_attr_methods(self): + b = Bunch() + b.foo = 5 + self.assertEqual(list(b.keys()), ['foo']) + self.assertEqual(b.foo, 5) + b._foo = 50 + self.assertEqual(list(b.keys()), ['foo']) + self.assertEqual(b.foo, 5) + self.assertEqual(b._foo, 50) + + del b.foo + self.assertEqual(list(b.keys()), []) + self.assertEqual(b.foo, None) + self.assertEqual(b._foo, 50) + + del b._foo + self.assertEqual(list(b.keys()), []) + self.assertEqual(b.foo, None) + with self.assertRaisesRegex(AttributeError, "Unknown attribute '_foo'"): + b._foo + + def test_item_methods(self): + b = Bunch() + b['foo'] = 5 + self.assertEqual(list(b.keys()), ['foo']) + self.assertEqual(b['foo'], 5) + b['_foo'] = 50 + self.assertEqual(list(b.keys()), ['foo']) + self.assertEqual(b['foo'], 5) + self.assertEqual(b['_foo'], 50) + + del b['foo'] + self.assertEqual(list(b.keys()), []) + self.assertEqual(b['foo'], None) + self.assertEqual(b['_foo'], 50) + + del b['_foo'] + self.assertEqual(list(b.keys()), []) + self.assertEqual(b['foo'], None) + with self.assertRaisesRegex(AttributeError, "Unknown attribute '_foo'"): + b['_foo'] + + with self.assertRaisesRegex(ValueError, r"Bunch keys must be str \(got int\)"): + b[5] + + with self.assertRaisesRegex(ValueError, r"Bunch keys must be str \(got int\)"): + b[5] = 5 + + with self.assertRaisesRegex(ValueError, r"Bunch keys must be str \(got int\)"): + del b[5] + + def test_update(self): + data = { + 'a': 1, + 'b': [2, {'bb': 3}, [4, {'bbb': 5}]], + 'c': {'cc': 6, 'ccc': {'e': 7}}, + 'd': [], + } + + # Test passing a dict + b = Bunch() + b.update(data) + self.assertEqual( + repr(b), + 'Bunch(a = 1, ' + 'b = [2, Bunch(bb = 3), [4, Bunch(bbb = 5)]], ' + 'c = Bunch(cc = 6, ccc = Bunch(e = 7)), ' + 'd = [])', + ) + + # Test passing a generator + b = Bunch() + b.update(data.items()) + self.assertEqual( + repr(b), + 'Bunch(a = 1, ' + 'b = [2, Bunch(bb = 3), [4, Bunch(bbb = 5)]], ' + 'c = Bunch(cc = 6, ccc = Bunch(e = 7)), ' + 'd = [])', + ) + + # Test passing a list + b = Bunch() + b.update(list(data.items())) + self.assertEqual( + repr(b), + 'Bunch(a = 1, ' + 'b = [2, Bunch(bb = 3), [4, Bunch(bbb = 5)]], ' + 'c = Bunch(cc = 6, ccc = Bunch(e = 7)), ' + 'd = [])', + ) + + def test_str(self): + data = { + 'a': 1, + 'b': [2, {'bb': 3}, [4, {'bbb': 5}]], + 'c': {'cc': 6, 'ccc': {'e': 7}}, + 'd': [], + } + + b = Bunch() + b.update(data) + self.assertEqual( + str(b), + ''' +a: 1 +b: +- 2 +- + bb: 3 +- [4, Bunch(bbb = 5)] +c: + cc: 6 + ccc: + e: 7 +d: [] +'''.strip(), + ) + + def test_set_name(self): + b = Bunch() + self.assertEqual(b._name_, 'Bunch') + b.set_name('TEST') + self.assertEqual(b._name_, 'TEST') diff --git a/pyomo/common/tests/test_config.py b/pyomo/common/tests/test_config.py index 20a304aac55..9bafd852eb9 100644 --- a/pyomo/common/tests/test_config.py +++ b/pyomo/common/tests/test_config.py @@ -37,19 +37,47 @@ from io import StringIO from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args + + def yaml_load(arg): return yaml.load(arg, **yaml_load_args) + from pyomo.common.config import ( - ConfigDict, ConfigValue, - ConfigList, MarkImmutable, ImmutableConfigValue, - Bool, Integer, PositiveInt, NegativeInt, NonPositiveInt, NonNegativeInt, - PositiveFloat, NegativeFloat, NonPositiveFloat, NonNegativeFloat, - In, ListOf, Module, Path, PathList, ConfigEnum, DynamicImplicitDomain, - _UnpickleableDomain, _picklable, + ConfigDict, + ConfigValue, + ConfigList, + MarkImmutable, + ImmutableConfigValue, + Bool, + Integer, + PositiveInt, + NegativeInt, + NonPositiveInt, + NonNegativeInt, + PositiveFloat, + NegativeFloat, + NonPositiveFloat, + NonNegativeFloat, + In, + ListOf, + Module, + Path, + PathList, + ConfigEnum, + DynamicImplicitDomain, + ConfigFormatter, + String_ConfigFormatter, + document_kwargs_from_configdict, + add_docstring_list, + USER_OPTION, + DEVELOPER_OPTION, + _UnpickleableDomain, + _picklable, ) from pyomo.common.log import LoggingIntercept + # Utility to redirect display() to a string def _display(obj, *args): test = StringIO() @@ -62,6 +90,69 @@ class GlobalClass(object): pass +def ExampleConfig(): + CONFIG = ConfigDict() + CONFIG.declare( + 'option_1', + ConfigValue(default=5, domain=int, doc='The first configuration option'), + ) + SOLVER = CONFIG.declare('solver_options', ConfigDict()) + SOLVER.declare( + 'solver_option_1', + ConfigValue( + default=1, + domain=float, + doc='The first solver configuration option', + visibility=DEVELOPER_OPTION, + ), + ) + SOLVER.declare( + 'solver_option_2', + ConfigValue( + default=1, + domain=float, + doc="""The second solver configuration option + + With a very long line containing + wrappable text in a long, silly paragraph + with little actual information. + #) but a bulleted list + #) with two bullets + """, + ), + ) + SOLVER.declare( + 'solver_option_3', + ConfigValue( + default=1, + domain=float, + doc=""" + The third solver configuration option + + This has a leading newline and a very long line containing + wrappable text in a long, silly paragraph with + little actual information. + + .. and_a_list:: + #) but a bulleted list + #) with two bullets """, + ), + ) + CONFIG.declare( + 'option_2', + ConfigValue( + default=5, + domain=int, + doc="""The second solver configuration option + with a very long line containing + wrappable text in a long, silly paragraph + with little actual information. + """, + ), + ) + return CONFIG + + class TestConfigDomains(unittest.TestCase): def test_Bool(self): c = ConfigDict() @@ -79,7 +170,7 @@ def test_Bool(self): self.assertEqual(c.a, False) c.a = '1' self.assertEqual(c.a, True) - c.a = 0. + c.a = 0.0 self.assertEqual(c.a, False) c.a = True self.assertEqual(c.a, True) @@ -93,7 +184,7 @@ def test_Bool(self): self.assertEqual(c.a, True) c.a = '0' self.assertEqual(c.a, False) - c.a = 1. + c.a = 1.0 self.assertEqual(c.a, True) with self.assertRaises(ValueError): @@ -110,7 +201,7 @@ def test_Integer(self): c = ConfigDict() c.declare('a', ConfigValue(5, Integer)) self.assertEqual(c.a, 5) - c.a = 4. + c.a = 4.0 self.assertEqual(c.a, 4) c.a = -6 self.assertEqual(c.a, -6) @@ -130,7 +221,7 @@ def test_PositiveInt(self): c = ConfigDict() c.declare('a', ConfigValue(5, PositiveInt)) self.assertEqual(c.a, 5) - c.a = 4. + c.a = 4.0 self.assertEqual(c.a, 4) c.a = 6 self.assertEqual(c.a, 6) @@ -151,7 +242,7 @@ def test_NegativeInt(self): c = ConfigDict() c.declare('a', ConfigValue(-5, NegativeInt)) self.assertEqual(c.a, -5) - c.a = -4. + c.a = -4.0 self.assertEqual(c.a, -4) c.a = -6 self.assertEqual(c.a, -6) @@ -172,7 +263,7 @@ def test_NonPositiveInt(self): c = ConfigDict() c.declare('a', ConfigValue(-5, NonPositiveInt)) self.assertEqual(c.a, -5) - c.a = -4. + c.a = -4.0 self.assertEqual(c.a, -4) c.a = -6 self.assertEqual(c.a, -6) @@ -192,7 +283,7 @@ def test_NonNegativeInt(self): c = ConfigDict() c.declare('a', ConfigValue(5, NonNegativeInt)) self.assertEqual(c.a, 5) - c.a = 4. + c.a = 4.0 self.assertEqual(c.a, 4) c.a = 6 self.assertEqual(c.a, 6) @@ -212,7 +303,7 @@ def test_PositiveFloat(self): c = ConfigDict() c.declare('a', ConfigValue(5, PositiveFloat)) self.assertEqual(c.a, 5) - c.a = 4. + c.a = 4.0 self.assertEqual(c.a, 4) c.a = 6 self.assertEqual(c.a, 6) @@ -232,7 +323,7 @@ def test_NegativeFloat(self): c = ConfigDict() c.declare('a', ConfigValue(-5, NegativeFloat)) self.assertEqual(c.a, -5) - c.a = -4. + c.a = -4.0 self.assertEqual(c.a, -4) c.a = -6 self.assertEqual(c.a, -6) @@ -252,7 +343,7 @@ def test_NonPositiveFloat(self): c = ConfigDict() c.declare('a', ConfigValue(-5, NonPositiveFloat)) self.assertEqual(c.a, -5) - c.a = -4. + c.a = -4.0 self.assertEqual(c.a, -4) c.a = -6 self.assertEqual(c.a, -6) @@ -271,7 +362,7 @@ def test_NonNegativeFloat(self): c = ConfigDict() c.declare('a', ConfigValue(5, NonNegativeFloat)) self.assertEqual(c.a, 5) - c.a = 4. + c.a = 4.0 self.assertEqual(c.a, 4) c.a = 6 self.assertEqual(c.a, 6) @@ -288,7 +379,7 @@ def test_NonNegativeFloat(self): def test_In(self): c = ConfigDict() - c.declare('a', ConfigValue(None, In([1,3,5]))) + c.declare('a', ConfigValue(None, In([1, 3, 5]))) self.assertEqual(c.get('a').domain_name(), 'In[1, 3, 5]') self.assertEqual(c.a, None) c.a = 3 @@ -303,7 +394,7 @@ def test_In(self): c.a = '1' self.assertEqual(c.a, 3) - c.declare('b', ConfigValue(None, In([1,3,5], int))) + c.declare('b', ConfigValue(None, In([1, 3, 5], int))) self.assertEqual(c.b, None) c.b = 3 self.assertEqual(c.b, 3) @@ -319,12 +410,14 @@ def test_In(self): class Container(object): def __init__(self, vals): self._vals = vals + def __str__(self): return f'Container{self._vals}' + def __contains__(self, val): return val in self._vals - c.declare('c', ConfigValue(None, In(Container([1,3,5])))) + c.declare('c', ConfigValue(None, In(Container([1, 3, 5])))) self.assertEqual(c.get('c').domain_name(), 'In(Container[1, 3, 5])') self.assertEqual(c.c, None) c.c = 3 @@ -339,10 +432,7 @@ class TestEnum(enum.Enum): ITEM_TWO = 'two' cfg = ConfigDict() - cfg.declare('enum', ConfigValue( - default=TestEnum.ITEM_TWO, - domain=In(TestEnum) - )) + cfg.declare('enum', ConfigValue(default=TestEnum.ITEM_TWO, domain=In(TestEnum))) self.assertEqual(cfg.get('enum').domain_name(), 'InEnum[TestEnum]') self.assertEqual(cfg.enum, TestEnum.ITEM_TWO) cfg.enum = 'ITEM_ONE' @@ -356,14 +446,14 @@ class TestEnum(enum.Enum): with self.assertRaisesRegex(ValueError, '.*3 is not a valid'): cfg.enum = 3 with self.assertRaisesRegex(ValueError, '.*invalid value'): - cfg.enum ='ITEM_THREE' - + cfg.enum = 'ITEM_THREE' def test_Path(self): def norm(x): if cwd[1] == ':' and x[0] == '/': x = cwd[:2] + x - return x.replace('/',os.path.sep) + return x.replace('/', os.path.sep) + cwd = os.getcwd() + os.path.sep c = ConfigDict() @@ -374,10 +464,10 @@ def norm(x): self.assertEqual(c.a, norm('/a/b/c')) c.a = "a/b/c" self.assertTrue(os.path.sep in c.a) - self.assertEqual(c.a, norm(cwd+'a/b/c')) + self.assertEqual(c.a, norm(cwd + 'a/b/c')) c.a = "${CWD}/a/b/c" self.assertTrue(os.path.sep in c.a) - self.assertEqual(c.a, norm(cwd+'a/b/c')) + self.assertEqual(c.a, norm(cwd + 'a/b/c')) c.a = None self.assertIs(c.a, None) @@ -388,10 +478,10 @@ def norm(x): self.assertEqual(c.b, norm('/a/b/c')) c.b = "a/b/c" self.assertTrue(os.path.sep in c.b) - self.assertEqual(c.b, norm(cwd+'rel/path/a/b/c')) + self.assertEqual(c.b, norm(cwd + 'rel/path/a/b/c')) c.b = "${CWD}/a/b/c" self.assertTrue(os.path.sep in c.b) - self.assertEqual(c.b, norm(cwd+'a/b/c')) + self.assertEqual(c.b, norm(cwd + 'a/b/c')) c.b = None self.assertIs(c.b, None) @@ -405,7 +495,7 @@ def norm(x): self.assertEqual(c.c, norm('/my/dir/a/b/c')) c.c = "${CWD}/a/b/c" self.assertTrue(os.path.sep in c.c) - self.assertEqual(c.c, norm(cwd+'a/b/c')) + self.assertEqual(c.c, norm(cwd + 'a/b/c')) c.c = None self.assertIs(c.c, None) @@ -417,10 +507,10 @@ def norm(x): self.assertEqual(c.d, norm('/a/b/c')) c.d = "a/b/c" self.assertTrue(os.path.sep in c.d) - self.assertEqual(c.d, norm(cwd+'a/b/c')) + self.assertEqual(c.d, norm(cwd + 'a/b/c')) c.d = "${CWD}/a/b/c" self.assertTrue(os.path.sep in c.d) - self.assertEqual(c.d, norm(cwd+'a/b/c')) + self.assertEqual(c.d, norm(cwd + 'a/b/c')) c.d_base = '/my/dir' c.d = "/a/b/c" @@ -431,7 +521,7 @@ def norm(x): self.assertEqual(c.d, norm('/my/dir/a/b/c')) c.d = "${CWD}/a/b/c" self.assertTrue(os.path.sep in c.d) - self.assertEqual(c.d, norm(cwd+'a/b/c')) + self.assertEqual(c.d, norm(cwd + 'a/b/c')) c.d_base = 'rel/path' c.d = "/a/b/c" @@ -439,10 +529,10 @@ def norm(x): self.assertEqual(c.d, norm('/a/b/c')) c.d = "a/b/c" self.assertTrue(os.path.sep in c.d) - self.assertEqual(c.d, norm(cwd+'rel/path/a/b/c')) + self.assertEqual(c.d, norm(cwd + 'rel/path/a/b/c')) c.d = "${CWD}/a/b/c" self.assertTrue(os.path.sep in c.d) - self.assertEqual(c.d, norm(cwd+'a/b/c')) + self.assertEqual(c.d, norm(cwd + 'a/b/c')) try: Path.SuppressPathExpansion = True @@ -465,7 +555,8 @@ def test_PathList(self): def norm(x): if cwd[1] == ':' and x[0] == '/': x = cwd[:2] + x - return x.replace('/',os.path.sep) + return x.replace('/', os.path.sep) + cwd = os.getcwd() + os.path.sep c = ConfigDict() @@ -481,11 +572,11 @@ def norm(x): c.a = ["a/b/c", "/a/b/c", "${CWD}/a/b/c"] self.assertEqual(len(c.a), 3) self.assertTrue(os.path.sep in c.a[0]) - self.assertEqual(c.a[0], norm(cwd+'a/b/c')) + self.assertEqual(c.a[0], norm(cwd + 'a/b/c')) self.assertTrue(os.path.sep in c.a[1]) self.assertEqual(c.a[1], norm('/a/b/c')) self.assertTrue(os.path.sep in c.a[2]) - self.assertEqual(c.a[2], norm(cwd+'a/b/c')) + self.assertEqual(c.a[2], norm(cwd + 'a/b/c')) c.a = () self.assertEqual(len(c.a), 0) @@ -504,8 +595,10 @@ def test_ListOf(self): c.a = '7,8' self.assertEqual(c.a, [7, 8]) - ref=(r"(?m)Failed casting a\s+to ListOf\(int\)\s+" - r"Error: invalid literal for int\(\) with base 10: 'a'") + ref = ( + r"(?m)Failed casting a\s+to ListOf\(int\)\s+" + r"Error: invalid literal for int\(\) with base 10: 'a'" + ) with self.assertRaisesRegex(ValueError, ref): c.a = 'a' @@ -521,8 +614,9 @@ def test_ListOf(self): with self.assertRaises(ValueError): c.b = "'Hello, World" - c.declare('b1', ConfigValue(domain=ListOf( - str, string_lexer=None), default=None)) + c.declare( + 'b1', ConfigValue(domain=ListOf(str, string_lexer=None), default=None) + ) self.assertEqual(c.get('b1').domain_name(), 'ListOf[str]') self.assertEqual(c.b1, None) c.b1 = "'Hello, World'" @@ -540,8 +634,10 @@ def test_ListOf(self): c.c = 6 self.assertEqual(c.c, [6]) - ref=(r"(?m)Failed casting %s\s+to ListOf\(PositiveInt\)\s+" - r"Error: Expected positive int, but received %s") + ref = ( + r"(?m)Failed casting %s\s+to ListOf\(PositiveInt\)\s+" + r"Error: Expected positive int, but received %s" + ) with self.assertRaisesRegex(ValueError, ref % (6.5, 6.5)): c.c = 6.5 with self.assertRaisesRegex(ValueError, ref % (r"\[0\]", "0")): @@ -558,10 +654,12 @@ def test_Module(self): # Set using python module name to be imported c.a = 'os.path' import os.path + self.assertIs(c.a, os.path) # Set to python module object import os + c.a = os self.assertIs(c.a, os) @@ -575,22 +673,20 @@ def test_Module(self): def test_ConfigEnum(self): out = StringIO() with LoggingIntercept(out): + class TestEnum(ConfigEnum): ITEM_ONE = 1 ITEM_TWO = 2 + self.assertIn('The ConfigEnum base class is deprecated', out.getvalue()) - self.assertEqual(TestEnum.from_enum_or_string(1), - TestEnum.ITEM_ONE) - self.assertEqual(TestEnum.from_enum_or_string( - TestEnum.ITEM_TWO), TestEnum.ITEM_TWO) - self.assertEqual(TestEnum.from_enum_or_string('ITEM_ONE'), - TestEnum.ITEM_ONE) + self.assertEqual(TestEnum.from_enum_or_string(1), TestEnum.ITEM_ONE) + self.assertEqual( + TestEnum.from_enum_or_string(TestEnum.ITEM_TWO), TestEnum.ITEM_TWO + ) + self.assertEqual(TestEnum.from_enum_or_string('ITEM_ONE'), TestEnum.ITEM_ONE) cfg = ConfigDict() - cfg.declare('enum', ConfigValue( - default=2, - domain=TestEnum.from_enum_or_string - )) + cfg.declare('enum', ConfigValue(default=2, domain=TestEnum.from_enum_or_string)) self.assertEqual(cfg.enum, TestEnum.ITEM_TWO) cfg.enum = 'ITEM_ONE' self.assertEqual(cfg.enum, TestEnum.ITEM_ONE) @@ -601,7 +697,7 @@ class TestEnum(ConfigEnum): with self.assertRaisesRegex(ValueError, '.*3 is not a valid'): cfg.enum = 3 with self.assertRaisesRegex(ValueError, '.*invalid value'): - cfg.enum ='ITEM_THREE' + cfg.enum = 'ITEM_THREE' def test_DynamicImplicitDomain(self): def _rule(key, val): @@ -615,11 +711,10 @@ def _rule(key, val): if 'l' in key: raise ValueError('invalid key: %s' % key) return ans(val) - cfg = ConfigDict( - implicit=True, implicit_domain=DynamicImplicitDomain(_rule)) + + cfg = ConfigDict(implicit=True, implicit_domain=DynamicImplicitDomain(_rule)) self.assertEqual(len(cfg), 0) - test = cfg({'hi': {'option_i': 10}, - 'fast': {'option_f': 20}}) + test = cfg({'hi': {'option_i': 10}, 'fast': {'option_f': 20}}) self.assertEqual(len(test), 2) self.assertEqual(test.hi.value(), {'option_i': 10}) self.assertEqual(test.fast.value(), {'option_f': 20, 'option_s': '3'}) @@ -635,9 +730,13 @@ def _rule(key, val): self.assertEqual(fit.value(), {'option_f': 2, 'option_i': 1}) with self.assertRaisesRegex(ValueError, "invalid key: fail"): - test = cfg({'hi': {'option_i': 10}, - 'fast': {'option_f': 20}, - 'fail': {'option_f': 20}}) + test = cfg( + { + 'hi': {'option_i': 10}, + 'fast': {'option_f': 20}, + 'fail': {'option_f': 20}, + } + ) class TestImmutableConfigValue(unittest.TestCase): @@ -664,8 +763,8 @@ def test_immutable_config_value(self): self.assertEqual(config.a, 4) self.assertEqual(config.b, 5) with self.assertRaisesRegex( - ValueError, - 'Only ConfigValue instances can be marked immutable'): + ValueError, 'Only ConfigValue instances can be marked immutable' + ): locker = MarkImmutable(config.get('a'), config.b) self.assertEqual(type(config.get('a')), ConfigValue) config.a = 6 @@ -728,7 +827,6 @@ def test_immutable_config_value(self): class TestConfig(unittest.TestCase): - def setUp(self): # Save the original environment, then force a fixed column width # so tests do not fail on some platforms (notably, OSX) @@ -736,100 +834,129 @@ def setUp(self): os.environ["COLUMNS"] = "80" self.config = config = ConfigDict( - "Basic configuration for Flushing models", implicit=True) + "Basic configuration for Flushing models", implicit=True + ) net = config.declare('network', ConfigDict()) - net.declare('epanet file', - ConfigValue('Net3.inp', str, 'EPANET network inp file', - None)).declare_as_argument(dest='epanet') + net.declare( + 'epanet file', ConfigValue('Net3.inp', str, 'EPANET network inp file', None) + ).declare_as_argument(dest='epanet') sc = config.declare( 'scenario', - ConfigDict( - "Single scenario block", implicit=True, implicit_domain=str)) - sc.declare('scenario file', ConfigValue( - 'Net3.tsg', str, - 'Scenario generation file, see the TEVASIM documentation', - """This is the (long) documentation for the 'scenario file' + ConfigDict("Single scenario block", implicit=True, implicit_domain=str), + ) + sc.declare( + 'scenario file', + ConfigValue( + 'Net3.tsg', + str, + 'Scenario generation file, see the TEVASIM documentation', + """This is the (long) documentation for the 'scenario file' parameter. It contains multiple lines, and some internal formatting; like a bulleted list: - item 1 - item 2 - """)).declare_as_argument(group='Scenario definition') - sc.declare('merlion', ConfigValue( - default=False, - domain=bool, - description='Water quality model', - doc=""" + """, + ), + ).declare_as_argument(group='Scenario definition') + sc.declare( + 'merlion', + ConfigValue( + default=False, + domain=bool, + description='Water quality model', + doc=""" This is the (long) documentation for the 'merlion' parameter. It contains multiple lines, but no apparent internal - formatting; so the outputter should re-wrap everything.""" - )).declare_as_argument(group='Scenario definition') - sc.declare('detection', - ConfigValue( - # Note use of lambda for an "integer list domain" - [1, 2, 3], - lambda x: list(int(i) for i in x), - 'Sensor placement list, epanetID', - None)) - - config.declare('scenarios', ConfigList([], sc, - "List of scenario blocks", None)) - - config.declare('nodes', ConfigList( - [], ConfigValue(0, int, 'Node ID', None), "List of node IDs", None)) + formatting; so the outputter should re-wrap everything.""", + ), + ).declare_as_argument(group='Scenario definition') + sc.declare( + 'detection', + ConfigValue( + # Note use of lambda for an "integer list domain" + [1, 2, 3], + lambda x: list(int(i) for i in x), + 'Sensor placement list, epanetID', + None, + ), + ) + + config.declare('scenarios', ConfigList([], sc, "List of scenario blocks", None)) + + config.declare( + 'nodes', + ConfigList( + [], ConfigValue(0, int, 'Node ID', None), "List of node IDs", None + ), + ) im = config.declare('impact', ConfigDict()) - im.declare('metric', ConfigValue( - 'MC', str, 'Population or network based impact metric', None)) + im.declare( + 'metric', + ConfigValue('MC', str, 'Population or network based impact metric', None), + ) fl = config.declare('flushing', ConfigDict()) n = fl.declare('flush nodes', ConfigDict()) - n.declare('feasible nodes', ConfigValue( - 'ALL', str, 'ALL, NZD, NONE, list or filename', None)) - n.declare('infeasible nodes', ConfigValue( - 'NONE', str, 'ALL, NZD, NONE, list or filename', None)) - n.declare('max nodes', - ConfigValue(2, int, 'Maximum number of nodes to flush', None)) - n.declare('rate', ConfigValue(600, float, 'Flushing rate [gallons/min]', - None)) - n.declare('response time', ConfigValue( - 60, float, 'Time [min] between detection and flushing', None)) - n.declare('duration', ConfigValue(600, float, 'Time [min] for flushing', - None)) + n.declare( + 'feasible nodes', + ConfigValue('ALL', str, 'ALL, NZD, NONE, list or filename', None), + ) + n.declare( + 'infeasible nodes', + ConfigValue('NONE', str, 'ALL, NZD, NONE, list or filename', None), + ) + n.declare( + 'max nodes', ConfigValue(2, int, 'Maximum number of nodes to flush', None) + ) + n.declare('rate', ConfigValue(600, float, 'Flushing rate [gallons/min]', None)) + n.declare( + 'response time', + ConfigValue(60, float, 'Time [min] between detection and flushing', None), + ) + n.declare('duration', ConfigValue(600, float, 'Time [min] for flushing', None)) v = fl.declare('close valves', ConfigDict()) - v.declare('feasible pipes', ConfigValue( - 'ALL', str, 'ALL, DIAM min max [inch], NONE, list or filename', - None)) - v.declare('infeasible pipes', ConfigValue( - 'NONE', str, 'ALL, DIAM min max [inch], NONE, list or filename', - None)) - v.declare('max pipes', - ConfigValue(2, int, 'Maximum number of pipes to close', None)) - v.declare('response time', ConfigValue( - 60, float, 'Time [min] between detection and closing valves', None)) + v.declare( + 'feasible pipes', + ConfigValue( + 'ALL', str, 'ALL, DIAM min max [inch], NONE, list or filename', None + ), + ) + v.declare( + 'infeasible pipes', + ConfigValue( + 'NONE', str, 'ALL, DIAM min max [inch], NONE, list or filename', None + ), + ) + v.declare( + 'max pipes', ConfigValue(2, int, 'Maximum number of pipes to close', None) + ) + v.declare( + 'response time', + ConfigValue( + 60, float, 'Time [min] between detection and closing valves', None + ), + ) self._reference = { - 'network': { - 'epanet file': 'Net3.inp' - }, + 'network': {'epanet file': 'Net3.inp'}, 'scenario': { 'detection': [1, 2, 3], 'scenario file': 'Net3.tsg', - 'merlion': False + 'merlion': False, }, 'scenarios': [], 'nodes': [], - 'impact': { - 'metric': 'MC' - }, + 'impact': {'metric': 'MC'}, 'flushing': { 'close valves': { 'infeasible pipes': 'NONE', 'max pipes': 2, 'feasible pipes': 'ALL', - 'response time': 60.0 + 'response time': 60.0, }, 'flush nodes': { 'feasible nodes': 'ALL', @@ -837,10 +964,11 @@ def setUp(self): 'infeasible nodes': 'NONE', 'rate': 600.0, 'duration': 600.0, - 'response time': 60.0 + 'response time': 60.0, }, }, } + def tearDown(self): # Restore the original environment os.environ = self.original_environ @@ -917,8 +1045,7 @@ def test_template_3space(self): response time: 60.0 # Time [min] between detection and closing # valves """ - self._validateTemplate(self.config, reference_template, - indent_spacing=3) + self._validateTemplate(self.config, reference_template, indent_spacing=3) def test_template_4space(self): reference_template = """# Basic configuration for Flushing models @@ -950,8 +1077,7 @@ def test_template_4space(self): response time: 60.0 # Time [min] between detection and closing # valves """ - self._validateTemplate(self.config, reference_template, - indent_spacing=4) + self._validateTemplate(self.config, reference_template, indent_spacing=4) def test_template_3space_narrow(self): reference_template = """# Basic configuration for Flushing models @@ -984,8 +1110,9 @@ def test_template_3space_narrow(self): response time: 60.0 # Time [min] between detection and closing # valves """ - self._validateTemplate(self.config, reference_template, - indent_spacing=3, width=72) + self._validateTemplate( + self.config, reference_template, indent_spacing=3, width=72 + ) def test_display_default(self): reference = """network: @@ -1064,41 +1191,52 @@ def test_display_userdata_list(self): self.config['scenarios'].append() test = _display(self.config, 'userdata') sys.stdout.write(test) - self.assertEqual(test, """scenarios: + self.assertEqual( + test, + """scenarios: - -""") +""", + ) def test_display_userdata_list_nonDefault(self): self.config['scenarios'].append() self.config['scenarios'].append({'merlion': True, 'detection': []}) test = _display(self.config, 'userdata') sys.stdout.write(test) - self.assertEqual(test, """scenarios: + self.assertEqual( + test, + """scenarios: - - merlion: true detection: [] -""") +""", + ) def test_display_userdata_add_block(self): self.config.add("foo", ConfigValue(0, int, None, None)) self.config.add("bar", ConfigDict()) test = _display(self.config, 'userdata') sys.stdout.write(test) - self.assertEqual(test, """foo: 0 + self.assertEqual( + test, + """foo: 0 bar: -""") +""", + ) def test_display_userdata_add_block_nonDefault(self): self.config.add("foo", ConfigValue(0, int, None, None)) - self.config.add("bar", ConfigDict(implicit=True)) \ - .add("baz", ConfigDict()) + self.config.add("bar", ConfigDict(implicit=True)).add("baz", ConfigDict()) test = _display(self.config, 'userdata') sys.stdout.write(test) - self.assertEqual(test, """foo: 0 + self.assertEqual( + test, + """foo: 0 bar: baz: -""") +""", + ) def test_display_userdata_declare_block(self): self.config.declare("foo", ConfigValue(0, int, None, None)) @@ -1109,8 +1247,7 @@ def test_display_userdata_declare_block(self): def test_display_userdata_declare_block_nonDefault(self): self.config.declare("foo", ConfigValue(0, int, None, None)) - self.config.declare("bar", ConfigDict(implicit=True)) \ - .add("baz", ConfigDict()) + self.config.declare("bar", ConfigDict(implicit=True)).add("baz", ConfigDict()) test = _display(self.config, 'userdata') sys.stdout.write(test) self.assertEqual(test, "bar:\n baz:\n") @@ -1137,10 +1274,13 @@ def test_unusedUserValues_list_nonDefault(self): self.config['scenarios'].append({'merlion': True, 'detection': []}) test = '\n'.join(x.name(True) for x in self.config.unused_user_values()) sys.stdout.write(test) - self.assertEqual(test, """scenarios[0] + self.assertEqual( + test, + """scenarios[0] scenarios[1] scenarios[1].merlion -scenarios[1].detection""") +scenarios[1].detection""", + ) def test_unusedUserValues_list_nonDefault_listAccessed(self): self.config['scenarios'].append() @@ -1149,10 +1289,13 @@ def test_unusedUserValues_list_nonDefault_listAccessed(self): pass test = '\n'.join(x.name(True) for x in self.config.unused_user_values()) sys.stdout.write(test) - self.assertEqual(test, """scenarios[0] + self.assertEqual( + test, + """scenarios[0] scenarios[1] scenarios[1].merlion -scenarios[1].detection""") +scenarios[1].detection""", + ) def test_unusedUserValues_list_nonDefault_itemAccessed(self): self.config['scenarios'].append() @@ -1160,16 +1303,18 @@ def test_unusedUserValues_list_nonDefault_itemAccessed(self): self.config['scenarios'][1]['merlion'] test = '\n'.join(x.name(True) for x in self.config.unused_user_values()) sys.stdout.write(test) - self.assertEqual(test, """scenarios[0] -scenarios[1].detection""") + self.assertEqual( + test, + """scenarios[0] +scenarios[1].detection""", + ) def test_unusedUserValues_add_topBlock(self): self.config.add('foo', ConfigDict()) test = '\n'.join(x.name(True) for x in self.config.unused_user_values()) sys.stdout.write(test) self.assertEqual(test, "foo") - test = '\n'.join(x.name(True) for x in - self.config.foo.unused_user_values()) + test = '\n'.join(x.name(True) for x in self.config.foo.unused_user_values()) sys.stdout.write(test) self.assertEqual(test, "foo") @@ -1213,10 +1358,13 @@ def test_UserValues_list_nonDefault(self): self.config['scenarios'].append({'merlion': True, 'detection': []}) test = '\n'.join(x.name(True) for x in self.config.user_values()) sys.stdout.write(test) - self.assertEqual(test, """scenarios[0] + self.assertEqual( + test, + """scenarios[0] scenarios[1] scenarios[1].merlion -scenarios[1].detection""") +scenarios[1].detection""", + ) def test_UserValues_list_nonDefault_listAccessed(self): self.config['scenarios'].append() @@ -1225,10 +1373,13 @@ def test_UserValues_list_nonDefault_listAccessed(self): pass test = '\n'.join(x.name(True) for x in self.config.user_values()) sys.stdout.write(test) - self.assertEqual(test, """scenarios[0] + self.assertEqual( + test, + """scenarios[0] scenarios[1] scenarios[1].merlion -scenarios[1].detection""") +scenarios[1].detection""", + ) def test_UserValues_list_nonDefault_itemAccessed(self): self.config['scenarios'].append() @@ -1236,10 +1387,13 @@ def test_UserValues_list_nonDefault_itemAccessed(self): self.config['scenarios'][1]['merlion'] test = '\n'.join(x.name(True) for x in self.config.user_values()) sys.stdout.write(test) - self.assertEqual(test, """scenarios[0] + self.assertEqual( + test, + """scenarios[0] scenarios[1] scenarios[1].merlion -scenarios[1].detection""") +scenarios[1].detection""", + ) def test_UserValues_add_topBlock(self): self.config.add('foo', ConfigDict()) @@ -1299,12 +1453,11 @@ def test_parseDisplay_userdata_list(self): def test_parseDisplay_userdata_list_nonDefault(self): self.config['scenarios'].append() self.config['scenarios'].append({'merlion': True, 'detection': []}) - test = _display(self.config,'userdata') + test = _display(self.config, 'userdata') sys.stdout.write(test) self.assertEqual( - yaml_load(test), {'scenarios': - [None, {'merlion': True, - 'detection': []}]}) + yaml_load(test), {'scenarios': [None, {'merlion': True, 'detection': []}]} + ) @unittest.skipIf(not yaml_available, "Test requires PyYAML") def test_parseDisplay_userdata_add_block(self): @@ -1317,8 +1470,7 @@ def test_parseDisplay_userdata_add_block(self): @unittest.skipIf(not yaml_available, "Test requires PyYAML") def test_parseDisplay_userdata_add_block_nonDefault(self): self.config.add("foo", ConfigValue(0, int, None, None)) - self.config.add("bar", ConfigDict(implicit=True)) \ - .add("baz", ConfigDict()) + self.config.add("bar", ConfigDict(implicit=True)).add("baz", ConfigDict()) test = _display(self.config, 'userdata') sys.stdout.write(test) self.assertEqual(yaml_load(test), {'bar': {'baz': None}, foo: 0}) @@ -1334,8 +1486,7 @@ def test_parseDisplay_userdata_add_block(self): @unittest.skipIf(not yaml_available, "Test requires PyYAML") def test_parseDisplay_userdata_add_block_nonDefault(self): self.config.declare("foo", ConfigValue(0, int, None, None)) - self.config.declare("bar", ConfigDict(implicit=True)) \ - .add("baz", ConfigDict()) + self.config.declare("bar", ConfigDict(implicit=True)).add("baz", ConfigDict()) test = _display(self.config, 'userdata') sys.stdout.write(test) self.assertEqual(yaml_load(test), {'bar': {'baz': None}}) @@ -1364,25 +1515,27 @@ def test_value_ConfigList_complexPopulated(self): val = self.config['scenarios'].value() self.assertIs(type(val), list) self.assertEqual(len(val), 1) - self.assertEqual(val, [{'detection': [1, 2, 3], - 'merlion': False, - 'scenario file': 'Net3.tsg'}]) + self.assertEqual( + val, + [{'detection': [1, 2, 3], 'merlion': False, 'scenario file': 'Net3.tsg'}], + ) def test_name(self): self.config['scenarios'].append() self.assertEqual(self.config.name(), "") self.assertEqual(self.config['scenarios'].name(), "scenarios") self.assertEqual(self.config['scenarios'][0].name(), "[0]") - self.assertEqual(self.config['scenarios'][0].get('merlion').name(), - "merlion") + self.assertEqual(self.config['scenarios'][0].get('merlion').name(), "merlion") def test_name_fullyQualified(self): self.config['scenarios'].append() self.assertEqual(self.config.name(True), "") self.assertEqual(self.config['scenarios'].name(True), "scenarios") self.assertEqual(self.config['scenarios'][0].name(True), "scenarios[0]") - self.assertEqual(self.config['scenarios'][0].get('merlion').name(True), - "scenarios[0].merlion") + self.assertEqual( + self.config['scenarios'][0].get('merlion').name(True), + "scenarios[0].merlion", + ) def test_setValue_scalar(self): self.config['flushing']['flush nodes']['rate'] = 50 @@ -1391,8 +1544,7 @@ def test_setValue_scalar(self): self.assertEqual(val, 50.0) def test_setValue_scalar_badDomain(self): - with self.assertRaisesRegex( - ValueError, 'invalid value for configuration'): + with self.assertRaisesRegex(ValueError, 'invalid value for configuration'): self.config['flushing']['flush nodes']['rate'] = 'a' val = self.config['flushing']['flush nodes']['rate'] self.assertIs(type(val), float) @@ -1411,16 +1563,14 @@ def test_setValue_scalarList_withvalue(self): self.assertEqual(val, [6]) def test_setValue_scalarList_badDomain(self): - with self.assertRaisesRegex( - ValueError, 'invalid value for configuration'): + with self.assertRaisesRegex(ValueError, 'invalid value for configuration'): self.config['scenario']['detection'] = 50 val = self.config['scenario']['detection'] self.assertIs(type(val), list) self.assertEqual(val, [1, 2, 3]) def test_setValue_scalarList_badSubDomain(self): - with self.assertRaisesRegex( - ValueError, 'invalid value for configuration'): + with self.assertRaisesRegex(ValueError, 'invalid value for configuration'): self.config['scenario']['detection'] = [5.5, 'a'] val = self.config['scenario']['detection'] self.assertIs(type(val), list) @@ -1439,8 +1589,7 @@ def test_setValue_list_scalardomain_scalar(self): self.assertEqual(val, [10]) def test_setValue_list_badSubDomain(self): - with self.assertRaisesRegex( - ValueError, 'invalid value for configuration'): + with self.assertRaisesRegex(ValueError, 'invalid value for configuration'): self.config['nodes'] = [5, 'a'] val = self.config['nodes'].value() self.assertIs(type(val), list) @@ -1494,8 +1643,8 @@ def test_setItem_block_implicit_domain(self): def test_setValue_block_noImplicit(self): _test = {'epanet file': 'no_file.inp', 'foo': 1} with self.assertRaisesRegex( - ValueError, "key 'foo' not defined for ConfigDict " - "'network' and implicit"): + ValueError, "key 'foo' not defined for ConfigDict 'network' and implicit" + ): self.config['network'] = _test self.assertEqual(self._reference, self.config.value()) @@ -1525,14 +1674,13 @@ def test_setValue_block_implicit_domain(self): def test_setValue_block_badDomain(self): _test = {'merlion': True, 'detection': ['a'], 'foo': 1, 'a': 1} - with self.assertRaisesRegex( - ValueError, 'invalid value for configuration'): + with self.assertRaisesRegex(ValueError, 'invalid value for configuration'): self.config['scenario'] = _test self.assertEqual(self._reference, self.config.value()) with self.assertRaisesRegex( - ValueError, 'Expected dict value for scenario.set_value, ' - 'found list'): + ValueError, 'Expected dict value for scenario.set_value, found list' + ): self.config['scenario'] = [] self.assertEqual(self._reference, self.config.value()) @@ -1544,12 +1692,10 @@ def test_default_function(self): c.reset() self.assertEqual(c.value(), 10) - with self.assertRaisesRegex( - TypeError, r"\(\) .* argument"): + with self.assertRaisesRegex(TypeError, r"\(\) .* argument"): c = ConfigValue(default=lambda x: 10 * x, domain=int) - with self.assertRaisesRegex( - ValueError, 'invalid value for configuration'): + with self.assertRaisesRegex(ValueError, 'invalid value for configuration'): c = ConfigValue('a', domain=int) def test_set_default(self): @@ -1568,17 +1714,15 @@ def test_getItem_setItem(self): # a freshly-initialized object should not be accessed self.assertFalse(self.config._userAccessed) self.assertFalse(self.config._data['scenario']._userAccessed) - self.assertFalse(self.config._data['scenario']._data['detection']\ - ._userAccessed) + self.assertFalse(self.config._data['scenario']._data['detection']._userAccessed) # Getting a ConfigValue should not access it self.assertFalse(self.config['scenario'].get('detection')._userAccessed) - #... but should access the parent blocks traversed to get there + # ... but should access the parent blocks traversed to get there self.assertTrue(self.config._userAccessed) self.assertTrue(self.config._data['scenario']._userAccessed) - self.assertFalse(self.config._data['scenario']._data['detection']\ - ._userAccessed) + self.assertFalse(self.config._data['scenario']._data['detection']._userAccessed) # a freshly-initialized object should not be set self.assertFalse(self.config._userSet) @@ -1629,17 +1773,16 @@ def test_delattr(self): self.assertEqual(sorted(config._declared), []) with self.assertRaisesRegex( - AttributeError, - "'ConfigDict' object attribute 'get' is read-only"): + AttributeError, "'ConfigDict' object attribute 'get' is read-only" + ): del config.get with self.assertRaisesRegex( - AttributeError, - "'ConfigDict' object has no attribute 'foo'"): + AttributeError, "'ConfigDict' object has no attribute 'foo'" + ): del config.foo def test_generate_custom_documentation(self): - reference = \ -"""startBlock{} + reference = """startBlock{} startItem{network} endItem{network} startBlock{network} @@ -1652,7 +1795,7 @@ def test_generate_custom_documentation(self): endItem{scenario} startBlock{scenario} startItem{scenario file} -item{This is the (long) documentation for the 'scenario file' + item{This is the (long) documentation for the 'scenario file' parameter. It contains multiple lines, and some internal formatting; like a bulleted list: - item 1 @@ -1673,7 +1816,7 @@ def test_generate_custom_documentation(self): endItem{scenarios} startBlock{scenarios} startItem{scenario file} -item{This is the (long) documentation for the 'scenario file' + item{This is the (long) documentation for the 'scenario file' parameter. It contains multiple lines, and some internal formatting; like a bulleted list: - item 1 @@ -1743,49 +1886,113 @@ def test_generate_custom_documentation(self): endBlock{flushing} endBlock{} """ - test = self.config.generate_documentation( - block_start= "startBlock{%s}\n", - block_end= "endBlock{%s}\n", - item_start= "startItem{%s}\n", - item_body= "item{%s}\n", - item_end= "endItem{%s}\n", - ) - #print(test) + with LoggingIntercept() as LOG: + test = self.config.generate_documentation( + block_start="startBlock{%s}\n", + block_end="endBlock{%s}\n", + item_start="startItem{%s}\n", + item_body="item{%s}\n", + item_end="endItem{%s}\n", + ) + LOG = LOG.getvalue().replace('\n', ' ') + for name in ('block_start', 'block_end', 'item_start', 'item_end', 'item_body'): + self.assertIn( + f"Overriding '{name}' by passing strings to " + "generate_documentation is deprecated.", + LOG, + ) + self.maxDiff = None + # print(test) self.assertEqual(test, reference) - test = self.config.generate_documentation( - block_start= "startBlock\n", - block_end= "endBlock\n", - item_start= "startItem\n", - item_body= "item\n", - item_end= "endItem\n", - ) + with LoggingIntercept() as LOG: + test = self.config.generate_documentation( + format=String_ConfigFormatter( + block_start="startBlock{%s}\n", + block_end="endBlock{%s}\n", + item_start="startItem{%s}\n", + item_body="item{%s}\n", + item_end="endItem{%s}\n", + ) + ) + self.assertEqual(LOG.getvalue(), "") + self.maxDiff = None + # print(test) + self.assertEqual(test, reference) + + with LoggingIntercept() as LOG: + test = self.config.generate_documentation( + block_start="startBlock\n", + block_end="endBlock\n", + item_start="startItem\n", + item_body="item\n", + item_end="endItem\n", + ) - stripped_reference = re.sub(r'\{[^\}]*\}','',reference,flags=re.M) - #print(test) + stripped_reference = re.sub(r'\{[^\}]*\}', '', reference, flags=re.M) + # print(test) self.assertEqual(test, stripped_reference) + reference = """startBlock{} + startBlock{network} + startBlock{scenario} + startBlock{scenarios} + startBlock{impact} + startBlock{flushing} + startBlock{flush nodes} + startBlock{close valves} +""" + with LoggingIntercept() as LOG: + test = self.config.generate_documentation( + block_start="startBlock{%s}\n", + block_end="", + item_start="", + item_body="", + ) + LOG = LOG.getvalue().replace('\n', ' ') + for name in ('block_start', 'block_end', 'item_start', 'item_body'): + self.assertIn( + f"Overriding '{name}' by passing strings to " + "generate_documentation is deprecated.", + LOG, + ) + for name in 'item_end': + self.assertNotIn( + f"Overriding '{name}' by passing strings to " + "generate_documentation is deprecated.", + LOG, + ) + self.maxDiff = None + # print(test) + self.assertEqual(test, reference) def test_generate_latex_documentation(self): cfg = ConfigDict() - cfg.declare('int', ConfigValue( - domain=int, default=10, - doc="This is an integer parameter", - )) - cfg.declare('in', ConfigValue( - domain=In([1,3,5]), default=1, - description="This parameter must be in {1,3,5}", - )) - cfg.declare('lambda', ConfigValue( - domain=lambda x: int(x), default=1, - description="This is a float", - doc="This parameter is actually a float, but for testing " - "purposes we will use a lambda function for validation" - )) - cfg.declare('list', ConfigList( - domain=str, - description="A simple list of strings", - )) + cfg.declare( + 'int', + ConfigValue(domain=int, default=10, doc="This is an integer parameter"), + ) + cfg.declare( + 'in', + ConfigValue( + domain=In([1, 3, 5]), + default=1, + description="This parameter must be in {1,3,5}", + ), + ) + cfg.declare( + 'lambda', + ConfigValue( + domain=lambda x: int(x), + default=1, + description="This is a float", + doc="This parameter is actually a float, but for testing " + "purposes we will use a lambda function for validation", + ), + ) + cfg.declare( + 'list', ConfigList(domain=str, description="A simple list of strings") + ) self.assertEqual( cfg.generate_documentation(format='latex').strip(), """ @@ -1800,73 +2007,137 @@ def test_generate_latex_documentation(self): \\item[{list}]\\hfill \\\\A simple list of strings \\end{description} - """.strip()) + """.strip(), + ) + + def test_empty_ConfigFormatter(self): + cfg = ConfigDict() + cfg.declare('field', ConfigValue()) + with self.assertRaisesRegex( + ValueError, "Unrecognized documentation formatter, 'unknown'" + ): + cfg.generate_documentation(format="unknown") + + self.assertEqual(cfg.generate_documentation(format=ConfigFormatter()), '') + + def test_generate_documentation_StringFormatter(self): + # This test verifies behavior with simple StringFormatters (in + # particular, the handling of newlines and indentation reported + # in #IDAES/idaes-pse#1191) + CONFIG = ExampleConfig() + doc = CONFIG.generate_documentation( + format=String_ConfigFormatter( + block_start="", # %s\n", + block_end="", + item_start="%s\n", + item_body="%s", + item_end="\n", + ), + indent_spacing=4, + width=66, + ) + + # print(doc) + ref = """ option_1 + The first configuration option + + solver_options + + solver_option_1 + The first solver configuration option + + solver_option_2 + The second solver configuration option + + With a very long line containing + wrappable text in a long, silly paragraph + with little actual information. + #) but a bulleted list + #) with two bullets + solver_option_3 + The third solver configuration option + + This has a leading newline and a very long line containing + wrappable text in a long, silly paragraph with + little actual information. + + .. and_a_list:: + #) but a bulleted list + #) with two bullets + + option_2 + The second solver configuration option with a very long + line containing wrappable text in a long, silly paragraph + with little actual information. + +""" + self.assertEqual( + [_.rstrip() for _ in ref.splitlines()], + [_.rstrip() for _ in doc.splitlines()], + ) def test_block_get(self): self.assertTrue('scenario' in self.config) - self.assertNotEqual( - self.config.get('scenario', 'bogus').value(), 'bogus') + self.assertNotEqual(self.config.get('scenario', 'bogus').value(), 'bogus') self.assertFalse('fubar' in self.config) - self.assertEqual( - self.config.get('fubar', 'bogus').value(), 'bogus') + self.assertEqual(self.config.get('fubar', 'bogus').value(), 'bogus') cfg = ConfigDict() cfg.declare('foo', ConfigValue(1, int)) - self.assertEqual( cfg.get('foo', 5).value(), 1 ) - self.assertEqual( len(cfg), 1 ) - self.assertIs( cfg.get('bar'), None ) - self.assertEqual( cfg.get('bar',None).value(), None ) - self.assertEqual( len(cfg), 1 ) + self.assertEqual(cfg.get('foo', 5).value(), 1) + self.assertEqual(len(cfg), 1) + self.assertIs(cfg.get('bar'), None) + self.assertEqual(cfg.get('bar', None).value(), None) + self.assertEqual(len(cfg), 1) cfg = ConfigDict(implicit=True) cfg.declare('foo', ConfigValue(1, int)) - self.assertEqual( cfg.get('foo', 5).value(), 1 ) - self.assertEqual( len(cfg), 1 ) - self.assertEqual( cfg.get('bar', 5).value(), 5 ) - self.assertEqual( len(cfg), 1 ) - self.assertIs( cfg.get('baz'), None ) - self.assertIs( cfg.get('baz', None).value(), None ) - self.assertEqual( len(cfg), 1 ) - - cfg = ConfigDict( implicit=True, - implicit_domain=ConfigList(domain=str) ) + self.assertEqual(cfg.get('foo', 5).value(), 1) + self.assertEqual(len(cfg), 1) + self.assertEqual(cfg.get('bar', 5).value(), 5) + self.assertEqual(len(cfg), 1) + self.assertIs(cfg.get('baz'), None) + self.assertIs(cfg.get('baz', None).value(), None) + self.assertEqual(len(cfg), 1) + + cfg = ConfigDict(implicit=True, implicit_domain=ConfigList(domain=str)) cfg.declare('foo', ConfigValue(1, int)) - self.assertEqual( cfg.get('foo', 5).value(), 1 ) - self.assertEqual( len(cfg), 1 ) - self.assertEqual( cfg.get('bar', [5]).value(), ['5'] ) - self.assertEqual( len(cfg), 1 ) - self.assertIs( cfg.get('baz'), None ) - self.assertEqual( cfg.get('baz', None).value(), [] ) - self.assertEqual( len(cfg), 1 ) + self.assertEqual(cfg.get('foo', 5).value(), 1) + self.assertEqual(len(cfg), 1) + self.assertEqual(cfg.get('bar', [5]).value(), ['5']) + self.assertEqual(len(cfg), 1) + self.assertIs(cfg.get('baz'), None) + self.assertEqual(cfg.get('baz', None).value(), []) + self.assertEqual(len(cfg), 1) def test_setdefault(self): cfg = ConfigDict() cfg.declare('foo', ConfigValue(1, int)) - self.assertEqual( cfg.setdefault('foo', 5).value(), 1 ) - self.assertEqual( len(cfg), 1 ) - self.assertRaisesRegex(ValueError, '.*disallows implicit entries', - cfg.setdefault, 'bar', 0) - self.assertEqual( len(cfg), 1 ) + self.assertEqual(cfg.setdefault('foo', 5).value(), 1) + self.assertEqual(len(cfg), 1) + self.assertRaisesRegex( + ValueError, '.*disallows implicit entries', cfg.setdefault, 'bar', 0 + ) + self.assertEqual(len(cfg), 1) cfg = ConfigDict(implicit=True) cfg.declare('foo', ConfigValue(1, int)) - self.assertEqual( cfg.setdefault('foo', 5).value(), 1 ) - self.assertEqual( len(cfg), 1 ) - self.assertEqual( cfg.setdefault('bar', 5).value(), 5 ) - self.assertEqual( len(cfg), 2 ) - self.assertEqual( cfg.setdefault('baz').value(), None ) - self.assertEqual( len(cfg), 3 ) - - cfg = ConfigDict( implicit=True, - implicit_domain=ConfigList(domain=str) ) + self.assertEqual(cfg.setdefault('foo', 5).value(), 1) + self.assertEqual(len(cfg), 1) + self.assertEqual(cfg.setdefault('bar', 5).value(), 5) + self.assertEqual(len(cfg), 2) + self.assertEqual(cfg.setdefault('baz').value(), None) + self.assertEqual(len(cfg), 3) + + cfg = ConfigDict(implicit=True, implicit_domain=ConfigList(domain=str)) cfg.declare('foo', ConfigValue(1, int)) - self.assertEqual( cfg.setdefault('foo', 5).value(), 1 ) - self.assertEqual( len(cfg), 1 ) - self.assertEqual( cfg.setdefault('bar', [5]).value(), ['5'] ) - self.assertEqual( len(cfg), 2 ) - self.assertEqual( cfg.setdefault('baz').value(), [] ) - self.assertEqual( len(cfg), 3 ) + self.assertEqual(cfg.setdefault('foo', 5).value(), 1) + self.assertEqual(len(cfg), 1) + self.assertEqual(cfg.setdefault('bar', [5]).value(), ['5']) + self.assertEqual(len(cfg), 2) + self.assertEqual(cfg.setdefault('baz').value(), []) + self.assertEqual(len(cfg), 3) def test_block_keys(self): ref = ['scenario file', 'merlion', 'detection'] @@ -1916,8 +2187,11 @@ def test_block_values(self): self.assertEqual(list(valueiter), ref) def test_block_items(self): - ref = [('scenario file', 'Net3.tsg'), ('merlion', False), - ('detection', [1, 2, 3])] + ref = [ + ('scenario file', 'Net3.tsg'), + ('merlion', False), + ('detection', [1, 2, 3]), + ] # items iterator items = self.config['scenario'].items() @@ -1937,7 +2211,7 @@ def test_block_items(self): self.assertEqual(list(itemiter), ref) def test_value(self): - #print(self.config.value()) + # print(self.config.value()) self.assertEqual(self._reference, self.config.value()) def test_list_manipulation(self): @@ -1946,25 +2220,31 @@ def test_list_manipulation(self): os = StringIO() with LoggingIntercept(os): self.config['scenarios'].add() - self.assertIn("ConfigList.add() has been deprecated. Use append()", - os.getvalue()) + self.assertIn( + "ConfigList.add() has been deprecated. Use append()", os.getvalue() + ) self.assertEqual(len(self.config['scenarios']), 2) self.config['scenarios'].append({'merlion': True, 'detection': []}) self.assertEqual(len(self.config['scenarios']), 3) test = _display(self.config, 'userdata') sys.stdout.write(test) - self.assertEqual(test, """scenarios: + self.assertEqual( + test, + """scenarios: - - - merlion: true detection: [] -""") +""", + ) self.config['scenarios'][0] = {'merlion': True, 'detection': []} self.assertEqual(len(self.config['scenarios']), 3) test = _display(self.config, 'userdata') sys.stdout.write(test) - self.assertEqual(test, """scenarios: + self.assertEqual( + test, + """scenarios: - merlion: true detection: [] @@ -1972,10 +2252,13 @@ def test_list_manipulation(self): - merlion: true detection: [] -""") +""", + ) test = _display(self.config['scenarios']) sys.stdout.write(test) - self.assertEqual(test, """- + self.assertEqual( + test, + """- scenario file: Net3.tsg merlion: true detection: [] @@ -1987,7 +2270,8 @@ def test_list_manipulation(self): scenario file: Net3.tsg merlion: true detection: [] -""") +""", + ) def test_list_get(self): X = ConfigDict(implicit=True) @@ -1995,12 +2279,11 @@ def test_list_get(self): self.assertEqual(_display(X, 'userdata'), "") with self.assertRaisesRegex(IndexError, 'list index out of range'): self.assertIs(X.config.get(0), None) - self.assertIs(X.config.get(0,None).value(), None ) + self.assertIs(X.config.get(0, None).value(), None) val = X.config.get(0, 1) self.assertIsInstance(val, ConfigValue) self.assertEqual(val.value(), 1) - self.assertRaisesRegex( - IndexError, '.*out of range', X.config.__getitem__, 0 ) + self.assertRaisesRegex(IndexError, '.*out of range', X.config.__getitem__, 0) # get() shouldn't change the userdata flag... self.assertEqual(_display(X, 'userdata'), "") @@ -2020,15 +2303,13 @@ def test_list_get(self): with self.assertRaisesRegex(IndexError, 'list index out of range'): self.assertIs(X.config.get(1), None) - self.assertRaisesRegex( - IndexError, '.*out of range', X.config.__getitem__, 1) + self.assertRaisesRegex(IndexError, '.*out of range', X.config.__getitem__, 1) # this should ONLY change the userSet flag on the item (and not # the list) X.config.get(0).set_value(20) self.assertEqual(_display(X, 'userdata'), "config:\n - 20\n") - self.assertEqual([_.name(True) for _ in X.user_values()], - ["config[0]"]) + self.assertEqual([_.name(True) for _ in X.user_values()], ["config[0]"]) # this should ONLY change the userSet flag on the item (and not # the list) @@ -2036,8 +2317,7 @@ def test_list_get(self): X.declare('config', ConfigList([42], int)) X.config[0] = 20 self.assertEqual(_display(X, 'userdata'), "config:\n - 20\n") - self.assertEqual([_.name(True) for _ in X.user_values()], - ["config[0]"]) + self.assertEqual([_.name(True) for _ in X.user_values()], ["config[0]"]) # this should ONLY change the userSet flag on the item (and not # the list) @@ -2045,8 +2325,7 @@ def test_list_get(self): X.declare('config', ConfigList([42], int)) X.config.append(20) self.assertEqual(_display(X, 'userdata'), "config:\n - 20\n") - self.assertEqual([_.name(True) for _ in X.user_values()], - ["config[1]"]) + self.assertEqual([_.name(True) for _ in X.user_values()], ["config[1]"]) # This should change both... because the [42] was "declared" as # the default for the List, it will *not* be a user-set value @@ -2054,14 +2333,17 @@ def test_list_get(self): X.add('config', ConfigList([42], int)) X.config.append(20) self.assertEqual(_display(X, 'userdata'), "config:\n - 20\n") - self.assertEqual([_.name(True) for _ in X.user_values()], - ["config", "config[1]"]) + self.assertEqual( + [_.name(True) for _ in X.user_values()], ["config", "config[1]"] + ) def test_implicit_entries(self): config = ConfigDict() with self.assertRaisesRegex( - ValueError, "Key 'test' not defined in ConfigDict '' " - "and Dict disallows implicit entries"): + ValueError, + "Key 'test' not defined in ConfigDict '' " + "and Dict disallows implicit entries", + ): config['test'] = 5 config = ConfigDict(implicit=True) @@ -2069,8 +2351,7 @@ def test_implicit_entries(self): config.declare('formal', ConfigValue(42, int)) config['implicit_2'] = 5 self.assertEqual(3, len(config)) - self.assertEqual(['implicit_1', 'formal', 'implicit_2'], - list(config.keys())) + self.assertEqual(['implicit_1', 'formal', 'implicit_2'], list(config.keys())) config.reset() self.assertEqual(1, len(config)) self.assertEqual(['formal'], list(config.keys())) @@ -2080,19 +2361,22 @@ def test_argparse_help(self): self.config.initialize_argparse(parser) help = parser.format_help() self.assertIn( -""" -h, --help show this help message and exit + """ -h, --help show this help message and exit --epanet-file EPANET EPANET network inp file Scenario definition: --scenario-file STR Scenario generation file, see the TEVASIM documentation --merlion Water quality model -""", help) +""", + help, + ) def test_argparse_help_implicit_disable(self): - self.config['scenario'].declare('epanet', ConfigValue( - True, bool, 'Use EPANET as the Water quality model', - None)).declare_as_argument(group='Scenario definition') + self.config['scenario'].declare( + 'epanet', + ConfigValue(True, bool, 'Use EPANET as the Water quality model', None), + ).declare_as_argument(group='Scenario definition') parser = argparse.ArgumentParser(prog='tester') self.config.initialize_argparse(parser) help = parser.format_help() @@ -2107,7 +2391,9 @@ def test_argparse_help_implicit_disable(self): documentation --merlion Water quality model --disable-epanet [DON'T] Use EPANET as the Water quality model -""", help) +""", + help, + ) def test_argparse_import(self): parser = argparse.ArgumentParser(prog='tester') @@ -2125,8 +2411,9 @@ def test_argparse_import(self): self.assertEqual(1, len(vars(args))) leftovers = self.config.import_argparse(args) self.assertEqual(0, len(vars(args))) - self.assertEqual(['scenario.merlion'], - [x.name(True) for x in self.config.user_values()]) + self.assertEqual( + ['scenario.merlion'], [x.name(True) for x in self.config.user_values()] + ) args = parser.parse_args(['--merlion', '--epanet-file', 'foo']) self.config.reset() @@ -2135,8 +2422,10 @@ def test_argparse_import(self): self.assertEqual(2, len(vars(args))) leftovers = self.config.import_argparse(args) self.assertEqual(1, len(vars(args))) - self.assertEqual(['network.epanet file', 'scenario.merlion'], - [x.name(True) for x in self.config.user_values()]) + self.assertEqual( + ['network.epanet file', 'scenario.merlion'], + [x.name(True) for x in self.config.user_values()], + ) self.assertTrue(self.config['scenario']['merlion']) self.assertEqual('foo', self.config['network']['epanet file']) @@ -2145,18 +2434,21 @@ def test_argparse_subparsers(self): subp = parser.add_subparsers(title="Subcommands").add_parser('flushing') # Declare an argument by passing in the name of the subparser - self.config['flushing']['flush nodes'].get( - 'duration').declare_as_argument(group='flushing') + self.config['flushing']['flush nodes'].get('duration').declare_as_argument( + group='flushing' + ) # Declare an argument by passing in the name of the subparser # and an implicit group - self.config['flushing']['flush nodes'].get('feasible nodes') \ - .declare_as_argument( group=('flushing','Node information') ) + self.config['flushing']['flush nodes'].get( + 'feasible nodes' + ).declare_as_argument(group=('flushing', 'Node information')) # Declare an argument by passing in the subparser and a group name - self.config['flushing']['flush nodes'].get('infeasible nodes') \ - .declare_as_argument( group=(subp,'Node information') ) + self.config['flushing']['flush nodes'].get( + 'infeasible nodes' + ).declare_as_argument(group=(subp, 'Node information')) self.config.initialize_argparse(parser) - # Note that the output for argparse changes in diffeent versions + # Note that the output for argparse changes in different versions # (in particular, "options:" vs "optional arguments:"). We will # only test for a subset of the output that should stay consistent. help = parser.format_help() @@ -2172,7 +2464,9 @@ def test_argparse_subparsers(self): --scenario-file STR Scenario generation file, see the TEVASIM documentation --merlion Water quality model -""", help) +""", + help, + ) help = subp.format_help() self.assertIn( @@ -2184,7 +2478,9 @@ def test_argparse_subparsers(self): --feasible-nodes STR ALL, NZD, NONE, list or filename --infeasible-nodes STR ALL, NZD, NONE, list or filename -""", help) +""", + help, + ) def test_argparse_lists(self): c = ConfigDict() @@ -2195,59 +2491,68 @@ def test_argparse_lists(self): self.assertEqual(c.sub_dict.domain_name(), 'sub-dict') self.assertEqual(c.sub_dict.get('a').domain_name(), 'int') self.assertEqual(c.sub_dict.get('b').domain_name(), '') - c.declare( - 'lst', - ConfigList(domain=int)).declare_as_argument(action='append') - c.declare( - 'sub', - ConfigList(domain=c.sub_dict)).declare_as_argument(action='append') - c.declare('listof', ConfigValue( - domain=ListOf(int))).declare_as_argument() + c.declare('lst', ConfigList(domain=int)).declare_as_argument(action='append') + c.declare('sub', ConfigList(domain=c.sub_dict)).declare_as_argument( + action='append' + ) + c.declare('listof', ConfigValue(domain=ListOf(int))).declare_as_argument() parser = argparse.ArgumentParser(prog='tester') c.initialize_argparse(parser) - # Note that the output for argparse changes in diffeent versions + # Note that the output for argparse changes in different versions # (in particular, "options:" vs "optional arguments:"). We will # only test for a subset of the output that should stay consistent. - self.assertIn(""" + self.assertIn( + """ -h, --help show this help message and exit --lst INT --sub SUB-DICT - --listof LISTOF[INT]""".strip(), parser.format_help()) + --listof LISTOF[INT]""".strip(), + parser.format_help(), + ) - args = parser.parse_args([ - '--lst', '42', '--lst', '1', - '--sub', 'a=4', '--sub', 'b=12,a:0', - '--listof', '3,2 4' - ]) + args = parser.parse_args( + [ + '--lst', + '42', + '--lst', + '1', + '--sub', + 'a=4', + '--sub', + 'b=12,a:0', + '--listof', + '3,2 4', + ] + ) leftovers = c.import_argparse(args) self.assertEqual(c.lst.value(), [42, 1]) - self.assertEqual(c.sub.value(), [{'a':4, 'b':None}, {'a':0, 'b':'12'}]) + self.assertEqual(c.sub.value(), [{'a': 4, 'b': None}, {'a': 0, 'b': '12'}]) self.assertEqual(c.listof, [3, 2, 4]) args = parser.parse_args(['--sub', 'b=12,a 0']) with self.assertRaisesRegex( - ValueError, r"(?s)invalid value for configuration 'sub':.*" - r"Expected ':' or '=' but found '0' at Line 1 Column 8"): + ValueError, + r"(?s)invalid value for configuration 'sub':.*" + r"Expected ':' or '=' but found '0' at Line 1 Column 8", + ): leftovers = c.import_argparse(args) args = parser.parse_args(['--sub', 'b=']) with self.assertRaisesRegex( - ValueError, r"(?s)Expected value following '=' " - "but encountered end of string"): + ValueError, + r"(?s)Expected value following '=' but encountered end of string", + ): leftovers = c.import_argparse(args) args = parser.parse_args(['--sub', 'b']) with self.assertRaisesRegex( - ValueError, r"(?s)Expected ':' or '=' " - "but encountered end of string"): + ValueError, r"(?s)Expected ':' or '=' but encountered end of string" + ): leftovers = c.import_argparse(args) - def test_getattr_setattr(self): config = ConfigDict() - foo = config.declare( - 'foo', ConfigDict( - implicit=True, implicit_domain=int)) + foo = config.declare('foo', ConfigDict(implicit=True, implicit_domain=int)) foo.declare('explicit_bar', ConfigValue(0, int)) self.assertEqual(1, len(foo)) @@ -2264,15 +2569,15 @@ def test_getattr_setattr(self): self.assertEqual(20, foo.implicit_bar) with self.assertRaisesRegex( - ValueError, "Key 'baz' not defined in ConfigDict '' " - "and Dict disallows implicit entries"): + ValueError, + "Key 'baz' not defined in ConfigDict '' " + "and Dict disallows implicit entries", + ): config.baz = 10 - with self.assertRaisesRegex( - AttributeError, "Unknown attribute 'baz'"): + with self.assertRaisesRegex(AttributeError, "Unknown attribute 'baz'"): a = config.baz - def test_nonString_keys(self): config = ConfigDict(implicit=True) config.declare(5, ConfigValue(50, int)) @@ -2301,7 +2606,7 @@ def test_nonString_keys(self): self.assertEqual(_display(config), "5: 500\n1: 10\n") - config.set_value({5:5000}) + config.set_value({5: 5000}) self.assertIn(1, config) self.assertIn('1', config) self.assertEqual(config[1], 10) @@ -2321,7 +2626,7 @@ def test_set_value(self): config.declare('a_c', ConfigValue()) config.declare('a d e', ConfigValue()) - config.set_value({'a_b':10, 'a_c': 20, 'a_d_e': 30}) + config.set_value({'a_b': 10, 'a_c': 20, 'a_d_e': 30}) self.assertEqual(config.a_b, 10) self.assertEqual(config.a_c, 20) self.assertEqual(config.a_d_e, 30) @@ -2366,10 +2671,9 @@ def test_name_mapping(self): self.assertIn('g_h', config) def test_call_options(self): - config = ConfigDict(description="base description", - doc="base doc", - visibility=1, - implicit=True) + config = ConfigDict( + description="base description", doc="base doc", visibility=1, implicit=True + ) config.declare("a", ConfigValue(domain=int, doc="a doc", default=1)) config.declare("b", config.get("a")(2)) config.declare("c", config.get("a")(domain=float, doc="c doc")) @@ -2407,9 +2711,7 @@ def test_call_options(self): self.assertEqual(simple_copy._description, "base description") self.assertEqual(simple_copy._visibility, 1) - mod_copy = config(description="new description", - doc="new doc", - visibility=0) + mod_copy = config(description="new description", doc="new doc", visibility=0) reference_template = """# new description a: 1 b: 2 @@ -2424,10 +2726,12 @@ def test_pickle(self): def anon_domain(domain): def cast(x): return domain(x) + return cast + cfg = ConfigDict() cfg.declare('int', ConfigValue(domain=int, default=10)) - cfg.declare('in', ConfigValue(domain=In([1,3,5]), default=1)) + cfg.declare('in', ConfigValue(domain=In([1, 3, 5]), default=1)) cfg.declare('anon', ConfigValue(domain=anon_domain(int), default=1)) cfg.declare('lambda', ConfigValue(domain=lambda x: int(x), default=1)) cfg.declare('list', ConfigList(domain=str)) @@ -2435,27 +2739,23 @@ def cast(x): out = StringIO() with LoggingIntercept(out, module=None): cfg.set_value( - {'int': 100, 'in': 3, 'anon': 2.5, 'lambda': 1.5, - 'list': [2, 'a']} + {'int': 100, 'in': 3, 'anon': 2.5, 'lambda': 1.5, 'list': [2, 'a']} ) self.assertEqual( cfg.value(), - {'int': 100, 'in': 3, 'anon': 2, 'lambda': 1, - 'list': ['2', 'a']} + {'int': 100, 'in': 3, 'anon': 2, 'lambda': 1, 'list': ['2', 'a']}, ) cfg2 = pickle.loads(pickle.dumps(cfg)) self.assertEqual( cfg2.value(), - {'int': 100, 'in': 3, 'anon': 2, 'lambda': 1, - 'list': ['2', 'a']} + {'int': 100, 'in': 3, 'anon': 2, 'lambda': 1, 'list': ['2', 'a']}, ) cfg2.list.append(10) self.assertEqual( cfg2.value(), - {'int': 100, 'in': 3, 'anon': 2, 'lambda': 1, - 'list': ['2', 'a', '10']} + {'int': 100, 'in': 3, 'anon': 2, 'lambda': 1, 'list': ['2', 'a', '10']}, ) # No warnings due to anything above. self.assertEqual(out.getvalue(), "") @@ -2473,7 +2773,7 @@ def cast(x): if type(cfg2.get('anon')._domain) is _UnpickleableDomain: self.assertIn( "ConfigValue 'anon' was pickled with an unpicklable domain", - out.getvalue() + out.getvalue(), ) self.assertEqual(cfg2['anon'], 5.5) else: @@ -2487,7 +2787,7 @@ def cast(x): if type(cfg2.get('lambda')._domain) is _UnpickleableDomain: self.assertIn( "ConfigValue 'lambda' was pickled with an unpicklable domain", - out.getvalue() + out.getvalue(), ) self.assertEqual(cfg2['lambda'], 6.5) else: @@ -2497,8 +2797,10 @@ def cast(x): def test_unknowable_types(self): obj = ConfigValue() + def local_fcn(): pass + try: pickle.dumps(local_fcn) local_picklable = True @@ -2527,7 +2829,9 @@ def test_known_types(self): def local_fcn(): class LocalClass(object): pass + return LocalClass + local_class = local_fcn() self.assertIsNone(_picklable.known.get(local_class, None)) @@ -2556,7 +2860,6 @@ class LocalClass(object): # "known" dict self.assertNotIn(type, _picklable.known) - def test_self_assignment(self): cfg = ConfigDict() self.assertNotIn('d', dir(cfg)) @@ -2567,22 +2870,21 @@ def test_self_assignment(self): # test that dir is sorted self.assertEqual(dir(cfg), sorted(dir(cfg))) # check that inconsistent name is flagged - with self.assertRaisesRegex( - ValueError, "Key 'b' not defined in ConfigDict ''"): + with self.assertRaisesRegex(ValueError, "Key 'b' not defined in ConfigDict ''"): cfg.b = cfg.declare('bb', ConfigValue(2, int)) - def test_declaration_errors(self): cfg = ConfigDict() cfg.b = cfg.declare('b', ConfigValue(2, int)) with self.assertRaisesRegex( - ValueError, "duplicate config 'b' defined for ConfigDict ''"): + ValueError, "duplicate config 'b' defined for ConfigDict ''" + ): cfg.b = cfg.declare('b', ConfigValue(2, int)) with self.assertRaisesRegex( - ValueError, "config 'dd' is already assigned to ConfigDict ''"): + ValueError, "config 'dd' is already assigned to ConfigDict ''" + ): cfg.declare('dd', cfg.get('b')) - def test_declare_from(self): cfg = ConfigDict() cfg.declare('a', ConfigValue(default=1, domain=int)) @@ -2599,14 +2901,132 @@ def test_declare_from(self): self.assertNotIn('a', cfg2) with self.assertRaisesRegex( - ValueError, "passed a block with a duplicate field, 'b'"): + ValueError, "passed a block with a duplicate field, 'b'" + ): cfg2.declare_from(cfg) - with self.assertRaisesRegex( - ValueError, "only accepts other ConfigDicts"): + with self.assertRaisesRegex(ValueError, "only accepts other ConfigDicts"): cfg2.declare_from({}) + def test_docstring_decorator(self): + self.maxDiff = None + + @document_kwargs_from_configdict('CONFIG') + class ExampleClass(object): + CONFIG = ExampleConfig() + + @document_kwargs_from_configdict(CONFIG) + def __init__(self): + "A simple docstring" + + @document_kwargs_from_configdict( + CONFIG, doc="A simple docstring\n", visibility=USER_OPTION + ) + def fcn(self): + pass + + ref = """ +Keyword Arguments +----------------- +option_1: int, default=5 + The first configuration option + +solver_options: dict, optional + + solver_option_1: float, default=1 + [DEVELOPER option] + + The first solver configuration option + + solver_option_2: float, default=1 + The second solver configuration option + + With a very long line containing wrappable text in a long, silly + paragraph with little actual information. + #) but a bulleted list + #) with two bullets + + solver_option_3: float, default=1 + The third solver configuration option + + This has a leading newline and a very long line containing + wrappable text in a long, silly paragraph with little actual + information. + + .. and_a_list:: + #) but a bulleted list + #) with two bullets + +option_2: int, default=5 + The second solver configuration option with a very long line + containing wrappable text in a long, silly paragraph with little + actual information.""" + self.assertEqual(ExampleClass.__doc__, ref.lstrip()) + self.assertEqual(ExampleClass.__init__.__doc__, "A simple docstring\n" + ref) + + ref = """ +Keyword Arguments +----------------- +option_1: int, default=5 + The first configuration option + +solver_options: dict, optional + + solver_option_2: float, default=1 + The second solver configuration option + + With a very long line containing wrappable text in a long, silly + paragraph with little actual information. + #) but a bulleted list + #) with two bullets + + solver_option_3: float, default=1 + The third solver configuration option + + This has a leading newline and a very long line containing + wrappable text in a long, silly paragraph with little actual + information. + + .. and_a_list:: + #) but a bulleted list + #) with two bullets + +option_2: int, default=5 + The second solver configuration option with a very long line + containing wrappable text in a long, silly paragraph with little + actual information.""" + self.assertEqual(ExampleClass.fcn.__doc__, "A simple docstring\n" + ref) + + ref = """ +Keyword Arguments +----------------- +option_1: int, default=5 + The first configuration option + +solver_options: dict, optional + + solver_option_2: float, default=1 + The second solver configuration option + + With a very long line containing wrappable text in a long, silly paragraph with little actual information. + #) but a bulleted list + #) with two bullets + + solver_option_3: float, default=1 + The third solver configuration option + + This has a leading newline and a very long line containing wrappable text in a long, silly paragraph with little actual information. + + .. and_a_list:: + #) but a bulleted list + #) with two bullets + +option_2: int, default=5 + The second solver configuration option with a very long line containing wrappable text in a long, silly paragraph with little actual information.""" + with LoggingIntercept() as LOG: + self.assertEqual(add_docstring_list("", ExampleClass.CONFIG), ref) + self.assertIn('add_docstring_list is deprecated', LOG.getvalue()) + if __name__ == "__main__": unittest.main() - diff --git a/pyomo/common/tests/test_dependencies.py b/pyomo/common/tests/test_dependencies.py index b271555768f..65058e01812 100644 --- a/pyomo/common/tests/test_dependencies.py +++ b/pyomo/common/tests/test_dependencies.py @@ -16,37 +16,49 @@ from pyomo.common.log import LoggingIntercept from pyomo.common.dependencies import ( - attempt_import, ModuleUnavailable, DeferredImportModule, - DeferredImportIndicator, DeferredImportError, - _DeferredAnd, _DeferredOr, check_min_version, - dill, dill_available + attempt_import, + ModuleUnavailable, + DeferredImportModule, + DeferredImportIndicator, + DeferredImportError, + UnavailableClass, + _DeferredAnd, + _DeferredOr, + check_min_version, + dill, + dill_available, ) import pyomo.common.tests.dep_mod as dep_mod from . import deps + # global objects for the submodule tests def _finalize_pyo(module, available): if available: import pyomo.core + class TestDependencies(unittest.TestCase): def test_import_error(self): module_obj, module_available = attempt_import( '__there_is_no_module_named_this__', 'Testing import of a non-existent module', - defer_check=False) + defer_check=False, + ) self.assertFalse(module_available) with self.assertRaisesRegex( - DeferredImportError, 'Testing import of a non-existent module'): + DeferredImportError, 'Testing import of a non-existent module' + ): module_obj.try_to_call_a_method() # Note that some attribute will intentionally raise # AttributeErrors and NOT DeferredImportError: with self.assertRaisesRegex( - AttributeError, "'ModuleUnavailable' object has no " - "attribute '__sphinx_mock__'"): + AttributeError, + "'ModuleUnavailable' object has no attribute '__sphinx_mock__'", + ): module_obj.__sphinx_mock__ @unittest.skipUnless(dill_available, "Test requires dill module") @@ -60,7 +72,8 @@ def test_pickle(self): self.assertIsNot(deps.pkl_test, deps.new_pkl_test) self.assertIn('submod', deps.new_pkl_test.__dict__) with self.assertRaisesRegex( - DeferredImportError, 'nonexisting.module.pickle_test module'): + DeferredImportError, 'nonexisting.module.pickle_test module' + ): deps.new_pkl_test.try_to_call_a_method() # Pickle the ModuleUnavailable class self.assertIs(deps.new_pkl_test.__class__, ModuleUnavailable) @@ -72,9 +85,11 @@ def test_pickle(self): def test_import_success(self): module_obj, module_available = attempt_import( - 'ply', 'Testing import of ply', defer_check=False) + 'ply', 'Testing import of ply', defer_check=False + ) self.assertTrue(module_available) import ply + self.assertTrue(module_obj is ply) def test_local_deferred_import(self): @@ -86,16 +101,20 @@ def test_local_deferred_import(self): # Note: this also tests the implicit alt_names for dotted imports self.assertIs(type(deps.bogus), ModuleUnavailable) with self.assertRaisesRegex( - DeferredImportError, "The nonexisting.module.bogus module " - r"\(an optional Pyomo dependency\) failed to import"): + DeferredImportError, + "The nonexisting.module.bogus module " + r"\(an optional Pyomo dependency\) failed to import", + ): deps.bogus.hello def test_imported_deferred_import(self): self.assertIs(type(deps.has_bogus_nem), DeferredImportIndicator) self.assertIs(type(deps.bogus_nem), DeferredImportModule) with self.assertRaisesRegex( - DeferredImportError, "The bogus_nonexisting_module module " - r"\(an optional Pyomo dependency\) failed to import"): + DeferredImportError, + "The bogus_nonexisting_module module " + r"\(an optional Pyomo dependency\) failed to import", + ): deps.test_access_bogus_hello() self.assertIs(deps.has_bogus_nem, False) self.assertIs(type(deps.bogus_nem), ModuleUnavailable) @@ -103,65 +122,63 @@ def test_imported_deferred_import(self): self.assertIs(type(dep_mod.bogus_nonexisting_module), ModuleUnavailable) def test_min_version(self): - mod, avail = attempt_import('pyomo.common.tests.dep_mod', - minimum_version='1.0', - defer_check=False) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod', minimum_version='1.0', defer_check=False + ) self.assertTrue(avail) self.assertTrue(inspect.ismodule(mod)) self.assertTrue(check_min_version(mod, '1.0')) self.assertFalse(check_min_version(mod, '2.0')) - mod, avail = attempt_import('pyomo.common.tests.dep_mod', - minimum_version='2.0', - defer_check=False) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod', minimum_version='2.0', defer_check=False + ) self.assertFalse(avail) self.assertIs(type(mod), ModuleUnavailable) with self.assertRaisesRegex( - DeferredImportError, "The pyomo.common.tests.dep_mod module " - "version 1.5 does not satisfy the minimum version 2.0"): + DeferredImportError, + "The pyomo.common.tests.dep_mod module " + "version 1.5 does not satisfy the minimum version 2.0", + ): mod.hello - mod, avail = attempt_import('pyomo.common.tests.dep_mod', - error_message="Failed import", - minimum_version='2.0', - defer_check=False) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod', + error_message="Failed import", + minimum_version='2.0', + defer_check=False, + ) self.assertFalse(avail) self.assertIs(type(mod), ModuleUnavailable) with self.assertRaisesRegex( - DeferredImportError, "Failed import " - r"\(version 1.5 does not satisfy the minimum version 2.0\)"): + DeferredImportError, + "Failed import " + r"\(version 1.5 does not satisfy the minimum version 2.0\)", + ): mod.hello # Verify check_min_version works with deferred imports - mod, avail = attempt_import('pyomo.common.tests.dep_mod', - defer_check=True) + mod, avail = attempt_import('pyomo.common.tests.dep_mod', defer_check=True) self.assertTrue(check_min_version(mod, '1.0')) - mod, avail = attempt_import('pyomo.common.tests.dep_mod', - defer_check=True) + mod, avail = attempt_import('pyomo.common.tests.dep_mod', defer_check=True) self.assertFalse(check_min_version(mod, '2.0')) # Verify check_min_version works when called directly - mod, avail = attempt_import('pyomo.common.tests.dep_mod', - minimum_version='1.0') + mod, avail = attempt_import('pyomo.common.tests.dep_mod', minimum_version='1.0') self.assertTrue(check_min_version(mod, '1.0')) - mod, avail = attempt_import('pyomo.common.tests.bogus', - minimum_version='1.0') + mod, avail = attempt_import('pyomo.common.tests.bogus', minimum_version='1.0') self.assertFalse(check_min_version(mod, '1.0')) - - def test_and_or(self): - mod0, avail0 = attempt_import('ply', - defer_check=True) - mod1, avail1 = attempt_import('pyomo.common.tests.dep_mod', - defer_check=True) - mod2, avail2 = attempt_import('pyomo.common.tests.dep_mod', - minimum_version='2.0', - defer_check=True) + mod0, avail0 = attempt_import('ply', defer_check=True) + mod1, avail1 = attempt_import('pyomo.common.tests.dep_mod', defer_check=True) + mod2, avail2 = attempt_import( + 'pyomo.common.tests.dep_mod', minimum_version='2.0', defer_check=True + ) _and = avail0 & avail1 self.assertIsInstance(_and, _DeferredAnd) @@ -210,59 +227,70 @@ def test_and_or(self): self.assertIsInstance(_ror, _DeferredOr) self.assertTrue(_ror) - def test_callbacks(self): ans = [] + def _record_avail(module, avail): ans.append(avail) - mod0, avail0 = attempt_import('ply', - defer_check=True, - callback=_record_avail) - mod1, avail1 = attempt_import('pyomo.common.tests.dep_mod', - minimum_version='2.0', - defer_check=True, - callback=_record_avail) + mod0, avail0 = attempt_import('ply', defer_check=True, callback=_record_avail) + mod1, avail1 = attempt_import( + 'pyomo.common.tests.dep_mod', + minimum_version='2.0', + defer_check=True, + callback=_record_avail, + ) self.assertEqual(ans, []) self.assertTrue(avail0) self.assertEqual(ans, [True]) self.assertFalse(avail1) - self.assertEqual(ans, [True,False]) + self.assertEqual(ans, [True, False]) def test_import_exceptions(self): - mod, avail = attempt_import('pyomo.common.tests.dep_mod_except', - defer_check=True, - only_catch_importerror=True) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod_except', + defer_check=True, + only_catch_importerror=True, + ) with self.assertRaisesRegex(ValueError, "cannot import module"): bool(avail) # second test will not re-trigger the exception self.assertFalse(avail) - mod, avail = attempt_import('pyomo.common.tests.dep_mod_except', - defer_check=True, - only_catch_importerror=False) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod_except', + defer_check=True, + only_catch_importerror=False, + ) self.assertFalse(avail) self.assertFalse(avail) - mod, avail = attempt_import('pyomo.common.tests.dep_mod_except', - defer_check=True, - catch_exceptions=(ImportError, ValueError)) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod_except', + defer_check=True, + catch_exceptions=(ImportError, ValueError), + ) self.assertFalse(avail) self.assertFalse(avail) with self.assertRaisesRegex( - ValueError, 'Cannot specify both only_catch_importerror ' - 'and catch_exceptions'): - mod, avail = attempt_import('pyomo.common.tests.dep_mod_except', - defer_check=True, - only_catch_importerror=True, - catch_exceptions=(ImportError,)) + ValueError, + 'Cannot specify both only_catch_importerror and catch_exceptions', + ): + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod_except', + defer_check=True, + only_catch_importerror=True, + catch_exceptions=(ImportError,), + ) def test_generate_warning(self): - mod, avail = attempt_import('pyomo.common.tests.dep_mod_except', - defer_check=True, - only_catch_importerror=False) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod_except', + defer_check=True, + only_catch_importerror=False, + ) # Test generate warning log = StringIO() @@ -273,10 +301,11 @@ def test_generate_warning(self): self.assertIn( "The pyomo.common.tests.dep_mod_except module " "(an optional Pyomo dependency) failed to import", - log.getvalue()) + log.getvalue(), + ) self.assertIn( - "DEPRECATED: use :py:class:`log_import_warning()`", - dep.getvalue()) + "DEPRECATED: use :py:class:`log_import_warning()`", dep.getvalue() + ) log = StringIO() dep = StringIO() @@ -286,15 +315,18 @@ def test_generate_warning(self): self.assertIn( "The pyomo.common.tests.dep_mod_except module " "(an optional Pyomo dependency) failed to import", - log.getvalue()) + log.getvalue(), + ) self.assertIn( - "DEPRECATED: use :py:class:`log_import_warning()`", - dep.getvalue()) + "DEPRECATED: use :py:class:`log_import_warning()`", dep.getvalue() + ) def test_log_warning(self): - mod, avail = attempt_import('pyomo.common.tests.dep_mod_except', - defer_check=True, - only_catch_importerror=False) + mod, avail = attempt_import( + 'pyomo.common.tests.dep_mod_except', + defer_check=True, + only_catch_importerror=False, + ) log = StringIO() dep = StringIO() with LoggingIntercept(dep, 'pyomo'): @@ -303,7 +335,8 @@ def test_log_warning(self): self.assertIn( "The pyomo.common.tests.dep_mod_except module " "(an optional Pyomo dependency) failed to import", - dep.getvalue()) + dep.getvalue(), + ) self.assertNotIn("DEPRECATED:", dep.getvalue()) self.assertEqual("", log.getvalue()) @@ -315,7 +348,8 @@ def test_log_warning(self): self.assertIn( "The pyomo.common.tests.dep_mod_except module " "(an optional Pyomo dependency) failed to import", - log.getvalue()) + log.getvalue(), + ) self.assertEqual("", dep.getvalue()) log = StringIO() @@ -323,20 +357,18 @@ def test_log_warning(self): with LoggingIntercept(log, 'pyomo.core.base'): mod.log_import_warning('pyomo.core.base', "Custom") self.assertIn( - "Custom (import raised ValueError: cannot import module)", - log.getvalue()) + "Custom (import raised ValueError: cannot import module)", log.getvalue() + ) self.assertEqual("", dep.getvalue()) def test_importer(self): attempted_import = [] + def _importer(): attempted_import.append(True) - return attempt_import('pyomo.common.tests.dep_mod', - defer_check=False)[0] + return attempt_import('pyomo.common.tests.dep_mod', defer_check=False)[0] - mod, avail = attempt_import('foo', - importer=_importer, - defer_check=True) + mod, avail = attempt_import('foo', importer=_importer, defer_check=True) self.assertEqual(attempted_import, []) self.assertIsInstance(mod, DeferredImportModule) @@ -346,15 +378,15 @@ def _importer(): def test_deferred_submodules(self): import pyomo + pyo_ver = pyomo.version.version self.assertIsInstance(deps.pyo, DeferredImportModule) self.assertIsNone(deps.pyo._submodule_name) - self.assertEqual(deps.pyo_available._deferred_submodules, - ['.version', - '.common', - '.common.tests', - '.common.tests.dep_mod',]) + self.assertEqual( + deps.pyo_available._deferred_submodules, + ['.version', '.common', '.common.tests', '.common.tests.dep_mod'], + ) # This doesn't cause test_mod to be resolved version = deps.pyo.version self.assertIsInstance(deps.pyo, DeferredImportModule) @@ -369,15 +401,19 @@ def test_deferred_submodules(self): self.assertTrue(inspect.ismodule(deps.dm)) with self.assertRaisesRegex( - ValueError, - "deferred_submodules is only valid if defer_check==True"): + ValueError, "deferred_submodules is only valid if defer_check==True" + ): mod, mod_available = attempt_import( - 'nonexisting.module', defer_check=False, - deferred_submodules={'submod': None}) + 'nonexisting.module', + defer_check=False, + deferred_submodules={'submod': None}, + ) mod, mod_available = attempt_import( - 'nonexisting.module', defer_check=True, - deferred_submodules={'submod.subsubmod': None}) + 'nonexisting.module', + defer_check=True, + deferred_submodules={'submod.subsubmod': None}, + ) self.assertIs(type(mod), DeferredImportModule) self.assertFalse(mod_available) _mod = mod_available._module @@ -387,5 +423,33 @@ def test_deferred_submodules(self): self.assertTrue(hasattr(_mod.submod, 'subsubmod')) self.assertIs(type(_mod.submod.subsubmod), ModuleUnavailable) + def test_UnavailableClass(self): + module_obj, module_available = attempt_import( + '__there_is_no_module_named_this__', + 'Testing import of a non-existent module', + defer_check=False, + ) + + class A_Class(UnavailableClass(module_obj)): + pass + + with self.assertRaisesRegex( + DeferredImportError, + "The class 'A_Class' cannot be created because a needed optional " + r"dependency was not found \(import raised ModuleNotFoundError: No " + r"module named '__there_is_no_module_named_this__'\)", + ): + A_Class() + + with self.assertRaisesRegex( + DeferredImportError, + "The class attribute 'A_Class.method' is not available because a " + r"needed optional dependency was not found \(import raised " + "ModuleNotFoundError: No module named " + r"'__there_is_no_module_named_this__'\)", + ): + A_Class.method() + + if __name__ == '__main__': unittest.main() diff --git a/pyomo/common/tests/test_deprecated.py b/pyomo/common/tests/test_deprecated.py index 5ae47ae547c..1e93dc0b816 100644 --- a/pyomo/common/tests/test_deprecated.py +++ b/pyomo/common/tests/test_deprecated.py @@ -18,14 +18,18 @@ from pyomo.common import DeveloperError from pyomo.common.deprecation import ( - deprecated, deprecation_warning, relocated_module_attribute, RenamedClass, - _import_object + deprecated, + deprecation_warning, + relocated_module_attribute, + RenamedClass, + _import_object, ) from pyomo.common.log import LoggingIntercept from io import StringIO import logging + logger = logging.getLogger('local') @@ -37,30 +41,35 @@ def test_deprecation_warning(self): with LoggingIntercept(DEP_OUT, 'pyomo'): deprecation_warning(None, version='1.2', remove_in='3.4') - self.assertIn('DEPRECATED: This has been deprecated', - DEP_OUT.getvalue()) - self.assertIn('(deprecated in 1.2, will be removed in (or after) 3.4)', - DEP_OUT.getvalue().replace('\n',' ')) + self.assertIn('DEPRECATED: This has been deprecated', DEP_OUT.getvalue()) + self.assertIn( + '(deprecated in 1.2, will be removed in (or after) 3.4)', + DEP_OUT.getvalue().replace('\n', ' '), + ) DEP_OUT = StringIO() with LoggingIntercept(DEP_OUT, 'pyomo'): deprecation_warning("custom message here", version='1.2', remove_in='3.4') - self.assertIn('DEPRECATED: custom message here', - DEP_OUT.getvalue()) - self.assertIn('(deprecated in 1.2, will be removed in (or after) 3.4)', - DEP_OUT.getvalue().replace('\n',' ')) - + self.assertIn('DEPRECATED: custom message here', DEP_OUT.getvalue()) + self.assertIn( + '(deprecated in 1.2, will be removed in (or after) 3.4)', + DEP_OUT.getvalue().replace('\n', ' '), + ) def test_no_version_exception(self): with self.assertRaisesRegex( - DeveloperError, "@deprecated missing initial version"): + DeveloperError, r"@deprecated\(\): missing 'version' argument" + ): + @deprecated() def foo(): pass with self.assertRaisesRegex( - DeveloperError, "@deprecated missing initial version"): + DeveloperError, r"@deprecated\(\): missing 'version' argument" + ): + @deprecated() class foo(object): pass @@ -72,19 +81,22 @@ class foo(object): @deprecated(version="1.2") def __init__(self): pass + self.assertIn('.. deprecated:: 1.2', foo.__doc__) def test_no_doc_string(self): # Note: No docstring, else nose replaces the function name with # the docstring in output. - #"""Test for deprecated function decorator.""" + # """Test for deprecated function decorator.""" @deprecated(version='test') def foo(bar='yeah'): logger.warning(bar) - self.assertIn( - '.. deprecated:: test\n This function has been deprecated', - foo.__doc__) + self.assertRegex( + foo.__doc__, + r'^DEPRECATED.\n\n.. deprecated:: test\n' + r' This function \(.*\.foo\) has been deprecated', + ) # Test the default argument DEP_OUT = StringIO() @@ -96,8 +108,10 @@ def foo(bar='yeah'): self.assertIn('yeah', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This function has been deprecated', - DEP_OUT.getvalue()) + self.assertRegex( + DEP_OUT.getvalue().replace('\n', ' '), + r'DEPRECATED: This function \(.*\.foo\) has been deprecated', + ) # Test that the function argument gets passed in DEP_OUT = StringIO() @@ -110,9 +124,10 @@ def foo(bar='yeah'): self.assertIn('custom', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This function has been deprecated', - DEP_OUT.getvalue()) - + self.assertRegex( + DEP_OUT.getvalue().replace('\n', ' '), + r'DEPRECATED: This function \(.*\.foo\) has been deprecated', + ) def test_with_doc_string(self): @deprecated(version='test') @@ -124,10 +139,12 @@ def foo(bar='yeah'): """ logger.warning(bar) - self.assertIn( - '.. deprecated:: test\n This function has been deprecated', - foo.__doc__) - self.assertIn('I am a good person.', foo.__doc__) + self.assertRegex( + foo.__doc__, + r'I am a good person.\s+Because I document my public functions.\s+' + r'.. deprecated:: test\n' + r' This function \(.*\.foo\) has been deprecated', + ) # Test the default argument DEP_OUT = StringIO() @@ -139,8 +156,10 @@ def foo(bar='yeah'): self.assertIn('yeah', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This function has been deprecated', - DEP_OUT.getvalue()) + self.assertRegex( + DEP_OUT.getvalue().replace('\n', ' '), + r'DEPRECATED: This function \(.*\.foo\) has been deprecated', + ) # Test that the function argument gets passed in DEP_OUT = StringIO() @@ -153,9 +172,10 @@ def foo(bar='yeah'): self.assertIn('custom', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This function has been deprecated', - DEP_OUT.getvalue()) - + self.assertRegex( + DEP_OUT.getvalue().replace('\n', ' '), + r'DEPRECATED: This function \(.*\.foo\) has been deprecated', + ) def test_with_custom_message(self): @deprecated('This is a custom message, too.', version='test') @@ -167,9 +187,7 @@ def foo(bar='yeah'): """ logger.warning(bar) - self.assertIn( - '.. deprecated:: test\n This is a custom message', - foo.__doc__) + self.assertIn('.. deprecated:: test\n This is a custom message', foo.__doc__) self.assertIn('I am a good person.', foo.__doc__) # Test the default argument @@ -182,8 +200,7 @@ def foo(bar='yeah'): self.assertIn('yeah', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This is a custom message', - DEP_OUT.getvalue()) + self.assertIn('DEPRECATED: This is a custom message', DEP_OUT.getvalue()) # Test that the function argument gets passed in DEP_OUT = StringIO() @@ -196,13 +213,10 @@ def foo(bar='yeah'): self.assertIn('custom', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This is a custom message', - DEP_OUT.getvalue()) - + self.assertIn('DEPRECATED: This is a custom message', DEP_OUT.getvalue()) def test_with_custom_logger(self): - @deprecated('This is a custom message', logger='local', - version='test') + @deprecated('This is a custom message', logger='local', version='test') def foo(bar='yeah'): """Show that I am a good person. @@ -211,9 +225,7 @@ def foo(bar='yeah'): """ logger.warning(bar) - self.assertIn( - '.. deprecated:: test\n This is a custom message', - foo.__doc__) + self.assertIn('.. deprecated:: test\n This is a custom message', foo.__doc__) self.assertIn('I am a good person.', foo.__doc__) # Test the default argument @@ -224,11 +236,9 @@ def foo(bar='yeah'): foo() # Test that the function produces output self.assertIn('yeah', FCN_OUT.getvalue()) - self.assertIn('DEPRECATED: This is a custom message', - FCN_OUT.getvalue()) + self.assertIn('DEPRECATED: This is a custom message', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertNotIn('DEPRECATED:', - DEP_OUT.getvalue()) + self.assertNotIn('DEPRECATED:', DEP_OUT.getvalue()) # Test that the function argument gets passed in DEP_OUT = StringIO() @@ -239,12 +249,10 @@ def foo(bar='yeah'): # Test that the function produces output self.assertNotIn('yeah', FCN_OUT.getvalue()) self.assertIn('custom', FCN_OUT.getvalue()) - self.assertIn('DEPRECATED: This is a custom message', - FCN_OUT.getvalue()) + self.assertIn('DEPRECATED: This is a custom message', FCN_OUT.getvalue()) # Test that the deprecation warning was logged self.assertNotIn('DEPRECATED:', DEP_OUT.getvalue()) - def test_with_class(self): @deprecated(version='test') class foo(object): @@ -252,9 +260,10 @@ def __init__(self): logger.warning('yeah') self.assertIs(type(foo), type) - self.assertIn( - '.. deprecated:: test\n This class has been deprecated', - foo.__doc__) + self.assertRegex( + foo.__doc__, + r'.. deprecated:: test\n This class \(.*\.foo\) has been deprecated', + ) # Test the default argument DEP_OUT = StringIO() @@ -266,21 +275,26 @@ def __init__(self): self.assertIn('yeah', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This class has been deprecated', - DEP_OUT.getvalue()) - + self.assertRegex( + DEP_OUT.getvalue().replace('\n', ' '), + r'DEPRECATED: This class \(.*\.foo\) has been deprecated.*' + r'\(deprecated in test\)', + ) def test_with_method(self): class foo(object): def __init__(self): pass + @deprecated(version='test') def bar(self): logger.warning('yeah') - self.assertIn( - '.. deprecated:: test\n This function has been deprecated', - foo.bar.__doc__) + self.assertRegex( + foo.bar.__doc__, + r'.. deprecated:: test\n' + r' This function \(.*\.foo\.bar\) has been deprecated', + ) # Test the default argument DEP_OUT = StringIO() @@ -292,22 +306,27 @@ def bar(self): self.assertIn('yeah', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This function has been deprecated', - DEP_OUT.getvalue()) + self.assertRegex( + DEP_OUT.getvalue().replace('\n', ' '), + r'DEPRECATED: This function \(.*\.foo\.bar\) has been deprecated.*' + r'\(deprecated in test\)', + ) def test_with_remove_in(self): class foo(object): def __init__(self): pass + @deprecated(version='1.2', remove_in='3.4') def bar(self): logger.warning('yeah') - self.assertIn( - '.. deprecated:: 1.2\n This function has been deprecated', - foo.bar.__doc__) - self.assertIn('(will be removed in (or after) 3.4)', - foo.bar.__doc__.replace('\n',' ')) + self.assertRegex( + foo.bar.__doc__, + r'.. deprecated:: 1.2\n' + r' This function \(.*\.foo\.bar\) has been deprecated.*' + r'\(will be removed in \(or after\) 3.4\)', + ) # Test the default argument DEP_OUT = StringIO() @@ -319,25 +338,28 @@ def bar(self): self.assertIn('yeah', FCN_OUT.getvalue()) self.assertNotIn('DEPRECATED', FCN_OUT.getvalue()) # Test that the deprecation warning was logged - self.assertIn('DEPRECATED: This function has been deprecated', - DEP_OUT.getvalue()) - self.assertIn('(deprecated in 1.2, will be removed in (or after) 3.4)', - DEP_OUT.getvalue().replace('\n', ' ')) + self.assertRegex( + DEP_OUT.getvalue().replace('\n', ' '), + r'DEPRECATED: This function \(.*\.foo\.bar\) has been deprecated.*' + r'\(deprecated in 1.2, will be removed in \(or after\) 3.4\)', + ) class Bar(object): data = 21 -relocated_module_attribute( - 'myFoo', 'pyomo.common.tests.relocated.Bar', 'test') -class TestRelocated(unittest.TestCase): +relocated_module_attribute('myFoo', 'pyomo.common.tests.relocated.Bar', 'test') + +class TestRelocated(unittest.TestCase): def test_relocated_class(self): # Before we test multiple relocated objects, verify that it will # handle the import of a new module - warning = "DEPRECATED: the 'myFoo' class has been moved to " \ - "'pyomo.common.tests.relocated.Bar'" + warning = ( + "DEPRECATED: the 'myFoo' class has been moved to " + "'pyomo.common.tests.relocated.Bar'" + ) OUT = StringIO() with LoggingIntercept(OUT, 'pyomo'): from pyomo.common.tests.test_deprecated import myFoo @@ -346,16 +368,17 @@ def test_relocated_class(self): from pyomo.common.tests import relocated - if sys.version_info < (3,5): + if sys.version_info < (3, 5): # Make sure that the module is only wrapped once - self.assertIs(type(relocated._wrapped_module), - types.ModuleType) + self.assertIs(type(relocated._wrapped_module), types.ModuleType) self.assertNotIn('Foo', dir(relocated)) self.assertNotIn('Foo_2', dir(relocated)) - warning = "DEPRECATED: the 'Foo_2' class has been moved to " \ - "'pyomo.common.tests.relocated.Bar'" + warning = ( + "DEPRECATED: the 'Foo_2' class has been moved to " + "'pyomo.common.tests.relocated.Bar'" + ) OUT = StringIO() with LoggingIntercept(OUT, 'pyomo'): @@ -367,12 +390,15 @@ def test_relocated_class(self): self.assertIn('Foo_2', dir(relocated)) self.assertIs(relocated.Foo_2, relocated.Bar) - warning = "DEPRECATED: the 'Foo' class has been moved to " \ - "'pyomo.common.tests.test_deprecated.Bar'" + warning = ( + "DEPRECATED: the 'Foo' class has been moved to " + "'pyomo.common.tests.test_deprecated.Bar'" + ) OUT = StringIO() with LoggingIntercept(OUT, 'pyomo'): from pyomo.common.tests.relocated import Foo + self.assertEqual(Foo.data, 21) self.assertIn(warning, OUT.getvalue().replace('\n', ' ')) @@ -383,47 +409,72 @@ def test_relocated_class(self): # Note that relocated defines a __getattr__, which changes how # attribute processing is handled in python 3.7+ with self.assertRaisesRegex( - AttributeError, - "(?:module 'pyomo.common.tests.relocated')|" - "(?:'module' object) has no attribute 'Baz'"): + AttributeError, + "(?:module 'pyomo.common.tests.relocated')|" + "(?:'module' object) has no attribute 'Baz'", + ): relocated.Baz.data if sys.version_info[:2] >= (3, 7): self.assertEqual(relocated.Foo_3, '_3') with self.assertRaisesRegex( - AttributeError, - "(?:module 'pyomo.common.tests.test_deprecated')|" - "(?:'module' object) has no attribute 'Baz'"): + AttributeError, + "(?:module 'pyomo.common.tests.test_deprecated')|" + "(?:'module' object) has no attribute 'Baz'", + ): sys.modules[__name__].Baz.data - def test_relocated_message(self): with LoggingIntercept() as LOG: - self.assertIs(_import_object( - 'oldName', 'pyomo.common.tests.test_deprecated.logger', - 'TBD', None), logger) + self.assertIs( + _import_object( + 'oldName', + 'pyomo.common.tests.test_deprecated.logger', + 'TBD', + None, + None, + ), + logger, + ) self.assertRegex( LOG.getvalue().replace('\n', ' '), "DEPRECATED: the 'oldName' attribute has been moved to " - "'pyomo.common.tests.test_deprecated.logger'") + "'pyomo.common.tests.test_deprecated.logger'", + ) with LoggingIntercept() as LOG: - self.assertIs(_import_object( - 'oldName', 'pyomo.common.tests.test_deprecated._import_object', - 'TBD', None), _import_object) + self.assertIs( + _import_object( + 'oldName', + 'pyomo.common.tests.test_deprecated._import_object', + 'TBD', + None, + None, + ), + _import_object, + ) self.assertRegex( LOG.getvalue().replace('\n', ' '), "DEPRECATED: the 'oldName' function has been moved to " - "'pyomo.common.tests.test_deprecated._import_object'") + "'pyomo.common.tests.test_deprecated._import_object'", + ) with LoggingIntercept() as LOG: - self.assertIs(_import_object( - 'oldName', 'pyomo.common.tests.test_deprecated.TestRelocated', - 'TBD', None), TestRelocated) + self.assertIs( + _import_object( + 'oldName', + 'pyomo.common.tests.test_deprecated.TestRelocated', + 'TBD', + None, + None, + ), + TestRelocated, + ) self.assertRegex( LOG.getvalue().replace('\n', ' '), "DEPRECATED: the 'oldName' class has been moved to " - "'pyomo.common.tests.test_deprecated.TestRelocated'") + "'pyomo.common.tests.test_deprecated.TestRelocated'", + ) def test_relocated_module(self): with LoggingIntercept() as LOG: @@ -434,12 +485,14 @@ def test_relocated_module(self): r"DEPRECATED: The 'pyomo\.common\.tests\.relo_mod' module has " r"been moved to 'pyomo\.common\.tests\.relo_mod_new'. Please " r"update your import. \(deprecated in 1\.2\) \(called from " - r".*test_deprecated\.py") + r".*test_deprecated\.py", + ) with LoggingIntercept() as LOG: # Second import: no warning import pyomo.common.tests.relo_mod as relo self.assertEqual(LOG.getvalue(), '') import pyomo.common.tests.relo_mod_new as relo_new + self.assertIs(relo, relo_new) self.assertEqual(relo.RELO_ATTR, 42) self.assertIs(ReloClass, relo_new.ReloClass) @@ -456,16 +509,20 @@ class NewClassSubclass(NewClass): # The deprecated class does not generate a warning out = StringIO() with LoggingIntercept(out): + class DeprecatedClass(metaclass=RenamedClass): __renamed__new_class__ = NewClass __renamed__version__ = 'X.y' + self.assertEqual(out.getvalue(), "") # Inheriting from the deprecated class generates the warning out = StringIO() with LoggingIntercept(out): + class DeprecatedClassSubclass(DeprecatedClass): attr = 'DeprecatedClassSubclass' + self.assertRegex( out.getvalue().replace("\n", " ").strip(), r"^DEPRECATED: Declaring class 'DeprecatedClassSubclass' " @@ -478,8 +535,10 @@ class DeprecatedClassSubclass(DeprecatedClass): # not generate a warning out = StringIO() with LoggingIntercept(out): + class DeprecatedClassSubSubclass(DeprecatedClassSubclass): attr = 'DeprecatedClassSubSubclass' + self.assertEqual(out.getvalue(), "") # @@ -520,8 +579,13 @@ class DeprecatedClassSubSubclass(DeprecatedClassSubclass): self.assertIsInstance(deprecatedsubsubclass, NewClass) self.assertEqual(out.getvalue(), "") - for obj in (newclass, newclasssubclass, deprecatedclass, - deprecatedsubclass, deprecatedsubsubclass): + for obj in ( + newclass, + newclasssubclass, + deprecatedclass, + deprecatedsubclass, + deprecatedsubsubclass, + ): out = StringIO() with LoggingIntercept(out): self.assertIsInstance(obj, DeprecatedClass) @@ -543,8 +607,13 @@ class DeprecatedClassSubSubclass(DeprecatedClassSubclass): self.assertTrue(issubclass(DeprecatedClassSubSubclass, NewClass)) self.assertEqual(out.getvalue(), "") - for cls in (NewClass, NewClassSubclass, DeprecatedClass, - DeprecatedClassSubclass, DeprecatedClassSubSubclass): + for cls in ( + NewClass, + NewClassSubclass, + DeprecatedClass, + DeprecatedClassSubclass, + DeprecatedClassSubSubclass, + ): out = StringIO() with LoggingIntercept(out): self.assertTrue(issubclass(cls, DeprecatedClass)) @@ -561,35 +630,39 @@ class DeprecatedClassSubSubclass(DeprecatedClassSubclass): self.assertEqual(newclass.attr, 'NewClass') self.assertEqual(newclasssubclass.attr, 'NewClass') self.assertEqual(deprecatedclass.attr, 'NewClass') - self.assertEqual(deprecatedsubclass.attr, - 'DeprecatedClassSubclass') - self.assertEqual(deprecatedsubsubclass.attr, - 'DeprecatedClassSubSubclass') + self.assertEqual(deprecatedsubclass.attr, 'DeprecatedClassSubclass') + self.assertEqual(deprecatedsubsubclass.attr, 'DeprecatedClassSubSubclass') self.assertEqual(NewClass.attr, 'NewClass') self.assertEqual(NewClassSubclass.attr, 'NewClass') self.assertEqual(DeprecatedClass.attr, 'NewClass') - self.assertEqual(DeprecatedClassSubclass.attr, - 'DeprecatedClassSubclass') - self.assertEqual(DeprecatedClassSubSubclass.attr, - 'DeprecatedClassSubSubclass') + self.assertEqual(DeprecatedClassSubclass.attr, 'DeprecatedClassSubclass') + self.assertEqual(DeprecatedClassSubSubclass.attr, 'DeprecatedClassSubSubclass') def test_renamed_errors(self): class NewClass(object): pass with self.assertRaisesRegex( - TypeError, "Declaring class 'DeprecatedClass' using the " - "RenamedClass metaclass, but without specifying the " - "__renamed__new_class__ class attribute"): + TypeError, + "Declaring class 'DeprecatedClass' using the " + "RenamedClass metaclass, but without specifying the " + "__renamed__new_class__ class attribute", + ): + class DeprecatedClass(metaclass=RenamedClass): __renamed_new_class__ = NewClass with self.assertRaisesRegex( - TypeError, "Declaring class 'DeprecatedClass' using the " - "RenamedClass metaclass, but without specifying the " - "__renamed__version__ class attribute"): + DeveloperError, + "Declaring class 'DeprecatedClass' using the " + "RenamedClass metaclass, but without specifying the " + "__renamed__version__ class attribute", + normalize_whitespace=True, + ): + class DeprecatedClass(metaclass=RenamedClass): __renamed__new_class__ = NewClass + if __name__ == '__main__': unittest.main() diff --git a/pyomo/common/tests/test_download.py b/pyomo/common/tests/test_download.py index 1e395ea4d6c..8c41edc1512 100644 --- a/pyomo/common/tests/test_download.py +++ b/pyomo/common/tests/test_download.py @@ -24,6 +24,7 @@ from pyomo.common.download import FileDownloader, distro_available from pyomo.common.tee import capture_output + class Test_FileDownloader(unittest.TestCase): def setUp(self): self.tmpdir = None @@ -50,8 +51,9 @@ def test_init(self): self.assertIsNone(f._fname) with self.assertRaisesRegex( - RuntimeError, "cacert='nonexistent_file_name' does not " - "refer to a valid file."): + RuntimeError, + "cacert='nonexistent_file_name' does not refer to a valid file.", + ): FileDownloader(True, 'nonexistent_file_name') def test_parse(self): @@ -83,28 +85,26 @@ def test_parse(self): with capture_output() as io: with self.assertRaises(SystemExit): f.parse_args(['--cacert']) - self.assertIn('argument --cacert: expected one argument', - io.getvalue()) + self.assertIn('argument --cacert: expected one argument', io.getvalue()) f = FileDownloader() with capture_output() as io: with self.assertRaises(SystemExit): f.parse_args(['--cacert', '--insecure']) - self.assertIn('argument --cacert: expected one argument', - io.getvalue()) + self.assertIn('argument --cacert: expected one argument', io.getvalue()) f = FileDownloader() with self.assertRaisesRegex( - RuntimeError, "--cacert='nonexistent_file_name' does " - "not refer to a valid file"): + RuntimeError, + "--cacert='nonexistent_file_name' does not refer to a valid file", + ): f.parse_args(['--cacert', 'nonexistent_file_name']) f = FileDownloader() with capture_output() as io: with self.assertRaises(SystemExit): f.parse_args(['--foo']) - self.assertIn('error: unrecognized arguments: --foo', - io.getvalue()) + self.assertIn('error: unrecognized arguments: --foo', io.getvalue()) def test_set_destination_filename(self): self.tmpdir = os.path.abspath(tempfile.mkdtemp()) @@ -112,8 +112,7 @@ def test_set_destination_filename(self): f = FileDownloader() self.assertIsNone(f._fname) f.set_destination_filename('foo') - self.assertEqual(f._fname, - os.path.join(envvar.PYOMO_CONFIG_DIR, 'foo')) + self.assertEqual(f._fname, os.path.join(envvar.PYOMO_CONFIG_DIR, 'foo')) # By this point, the CONFIG_DIR is guaranteed to have been created self.assertTrue(os.path.isdir(envvar.PYOMO_CONFIG_DIR)) @@ -124,11 +123,11 @@ def test_set_destination_filename(self): self.assertFalse(os.path.exists(target)) f.target = self.tmpdir - f.set_destination_filename(os.path.join('foo','bar')) + f.set_destination_filename(os.path.join('foo', 'bar')) target = os.path.join(self.tmpdir, 'foo', 'bar') self.assertEqual(f._fname, target) self.assertFalse(os.path.exists(target)) - target_dir = os.path.join(self.tmpdir, 'foo',) + target_dir = os.path.join(self.tmpdir, 'foo') self.assertTrue(os.path.isdir(target_dir)) def test_get_sysinfo(self): @@ -139,13 +138,13 @@ def test_get_sysinfo(self): self.assertTrue(len(ans[0]) > 0) self.assertTrue(platform.system().lower().startswith(ans[0])) self.assertFalse(any(c in ans[0] for c in '.-_')) - self.assertIn(ans[1], (32,64)) + self.assertIn(ans[1], (32, 64)) def test_get_os_version(self): f = FileDownloader() _os, _ver = f.get_os_version(normalize=False) _norm = f.get_os_version(normalize=True) - #print(_os,_ver,_norm) + # print(_os,_ver,_norm) _sys = f.get_sysinfo()[0] if _sys == 'linux': dist, dist_ver = re.match('^([^0-9]+)(.*)', _norm).groups() @@ -158,34 +157,40 @@ def test_get_os_version(self): if distro_available: d, v = f._get_distver_from_distro() - #print(d,v) + # print(d,v) self.assertEqual(_os, d) self.assertEqual(_ver, v) - self.assertTrue(v.replace('.','').startswith(dist_ver)) + self.assertTrue(v.replace('.', '').startswith(dist_ver)) if os.path.exists('/etc/redhat-release'): d, v = f._get_distver_from_redhat_release() - #print(d,v) + # print(d,v) self.assertEqual(_os, d) self.assertEqual(_ver, v) - self.assertTrue(v.replace('.','').startswith(dist_ver)) - - if subprocess.run(['lsb_release'], stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL).returncode == 0: + self.assertTrue(v.replace('.', '').startswith(dist_ver)) + + if ( + subprocess.run( + ['lsb_release'], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ).returncode + == 0 + ): d, v = f._get_distver_from_lsb_release() - #print(d,v) + # print(d,v) self.assertEqual(_os, d) self.assertEqual(_ver, v) - self.assertTrue(v.replace('.','').startswith(dist_ver)) + self.assertTrue(v.replace('.', '').startswith(dist_ver)) if os.path.exists('/etc/os-release'): d, v = f._get_distver_from_os_release() - #print(d,v) + # print(d,v) self.assertEqual(_os, d) # Note that (at least on centos), os_release is an # imprecise version string self.assertTrue(_ver.startswith(v)) - self.assertTrue(v.replace('.','').startswith(dist_ver)) + self.assertTrue(v.replace('.', '').startswith(dist_ver)) elif _sys == 'darwin': dist, dist_ver = re.match('^([^0-9]+)(.*)', _norm).groups() @@ -193,47 +198,48 @@ def test_get_os_version(self): self.assertEqual(dist, 'macos') self.assertNotIn('.', dist_ver) self.assertGreater(int(dist_ver), 0) - self.assertEqual(_norm, _os+''.join(_ver.split('.')[:2])) + self.assertEqual(_norm, _os + ''.join(_ver.split('.')[:2])) elif _sys == 'windows': self.assertEqual(_os, 'win') - self.assertEqual(_norm, _os+''.join(_ver.split('.')[:2])) + self.assertEqual(_norm, _os + ''.join(_ver.split('.')[:2])) else: self.assertEqual(ans, '') self.assertEqual((_os, _ver), FileDownloader._os_version) # Exercise the fetch from CACHE try: - FileDownloader._os_version, tmp \ - = ("test", '2'), FileDownloader._os_version - self.assertEqual(f.get_os_version(False), ("test","2")) + FileDownloader._os_version, tmp = ("test", '2'), FileDownloader._os_version + self.assertEqual(f.get_os_version(False), ("test", "2")) self.assertEqual(f.get_os_version(), "test2") finally: FileDownloader._os_version = tmp - def test_get_platform_url(self): f = FileDownloader() urlmap = {'bogus_sys': 'bogus'} with self.assertRaisesRegex( - RuntimeError, "cannot infer the correct url for platform '.*'"): + RuntimeError, "cannot infer the correct url for platform '.*'" + ): f.get_platform_url(urlmap) urlmap[f.get_sysinfo()[0]] = 'correct' self.assertEqual(f.get_platform_url(urlmap), 'correct') - def test_get_files_requires_set_destination(self): f = FileDownloader() with self.assertRaisesRegex( - DeveloperError, 'target file name has not been initialized'): + DeveloperError, 'target file name has not been initialized' + ): f.get_binary_file('bogus') with self.assertRaisesRegex( - DeveloperError, 'target file name has not been initialized'): + DeveloperError, 'target file name has not been initialized' + ): f.get_binary_file_from_zip_archive('bogus', 'bogus') with self.assertRaisesRegex( - DeveloperError, 'target file name has not been initialized'): + DeveloperError, 'target file name has not been initialized' + ): f.get_gzipped_binary_file('bogus') def test_get_test_binary_file(self): diff --git a/pyomo/common/tests/test_env.py b/pyomo/common/tests/test_env.py index f929c706483..d14326ddc19 100644 --- a/pyomo/common/tests/test_env.py +++ b/pyomo/common/tests/test_env.py @@ -14,8 +14,8 @@ from pyomo.common.env import CtypesEnviron + class TestCtypesEnviron(unittest.TestCase): - def test_temp_env_str(self): orig_env = CtypesEnviron() orig_env_has_1 = 'TEST_ENV_1' in orig_env @@ -31,23 +31,17 @@ def test_temp_env_str(self): for interface in orig_env.interfaces: self.assertIsNone(interface.dll.wgetenv(u'TEST_ENV_1')) self.assertIsNone(interface.dll.getenv(b'TEST_ENV_1')) - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") - + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") + with CtypesEnviron(TEST_ENV_1="test value: 1") as env: self.assertEqual(os.environ['TEST_ENV_1'], "test value: 1") self.assertEqual(os.environ['TEST_ENV_2'], "test value: 2") for interface in env.interfaces: - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_1'), u"test value: 1") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_1'), b"test value: 1") - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_1'), u"test value: 1") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_1'), b"test value: 1") + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") del env['TEST_ENV_2'] self.assertIsNone(os.environ.get('TEST_ENV_2', None)) @@ -60,16 +54,13 @@ def test_temp_env_str(self): for interface in orig_env.interfaces: self.assertIsNone(interface.dll.wgetenv(u'TEST_ENV_1')) self.assertIsNone(interface.dll.getenv(b'TEST_ENV_1')) - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") orig_env.restore() self.assertEqual(orig_env_has_1, 'TEST_ENV_1' in os.environ) self.assertEqual(orig_env_has_2, 'TEST_ENV_2' in os.environ) - def test_temp_env_unicode(self): orig_env = CtypesEnviron() orig_env_has_1 = u'TEST_ENV_1' in orig_env @@ -85,23 +76,17 @@ def test_temp_env_unicode(self): for interface in orig_env.interfaces: self.assertIsNone(interface.dll.wgetenv(u'TEST_ENV_1')) self.assertIsNone(interface.dll.getenv(b'TEST_ENV_1')) - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") - + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") + with CtypesEnviron(TEST_ENV_1=u"test value: 1") as env: self.assertEqual(os.environ[u'TEST_ENV_1'], u"test value: 1") self.assertEqual(os.environ[u'TEST_ENV_2'], u"test value: 2") for interface in env.interfaces: - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_1'), u"test value: 1") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_1'), b"test value: 1") - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_1'), u"test value: 1") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_1'), b"test value: 1") + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") del env[u'TEST_ENV_2'] self.assertIsNone(os.environ.get(u'TEST_ENV_2', None)) @@ -114,10 +99,8 @@ def test_temp_env_unicode(self): for interface in orig_env.interfaces: self.assertIsNone(interface.dll.wgetenv(u'TEST_ENV_1')) self.assertIsNone(interface.dll.getenv(b'TEST_ENV_1')) - self.assertEqual( - interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") - self.assertEqual( - interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") + self.assertEqual(interface.dll.wgetenv(u'TEST_ENV_2'), u"test value: 2") + self.assertEqual(interface.dll.getenv(b'TEST_ENV_2'), b"test value: 2") orig_env.restore() self.assertEqual(orig_env_has_1, u'TEST_ENV_1' in os.environ) diff --git a/pyomo/common/tests/test_errors.py b/pyomo/common/tests/test_errors.py new file mode 100644 index 00000000000..ec77643f722 --- /dev/null +++ b/pyomo/common/tests/test_errors.py @@ -0,0 +1,139 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.common.errors import format_exception + + +class LocalException(Exception): + pass + + +class TestFormatException(unittest.TestCase): + def test_basic_message(self): + self.assertEqual(format_exception("Hello world"), "Hello world") + + def test_formatted_message(self): + self.assertEqual(format_exception("Hello\nworld"), "Hello\nworld") + + def test_long_basic_message(self): + self.assertEqual( + format_exception( + "Hello world, this is a very long message that will " + "inevitably wrap onto another line." + ), + "Hello world, this is a very long message that will\n" + " inevitably wrap onto another line.", + ) + + def test_long_basic_message_exception(self): + self.assertEqual( + format_exception( + "Hello world, this is a very long message that will " + "inevitably wrap onto another line.", + exception=LocalException(), + ), + "Hello world, this is a very\n" + " long message that will inevitably wrap onto another line.", + ) + + def test_long_basic_message_builtin_exception(self): + self.assertEqual( + format_exception( + "Hello world, this is a very long message that will " + "inevitably wrap onto another line.", + exception=RuntimeError, + ), + "Hello world, this is a very long message that will inevitably\n" + " wrap onto another line.", + ) + + def test_basic_message_prolog(self): + self.assertEqual( + format_exception( + "This is a very, very, very long message that will " + "inevitably wrap onto another line.", + prolog="Hello world:", + ), + "Hello world:\n" + " This is a very, very, very long message that will inevitably " + "wrap onto\n" + " another line.", + ) + + def test_basic_message_long_prolog(self): + msg = format_exception( + "This is a very, very, very long message that will " + "inevitably wrap onto another line.", + prolog="Hello, this is a more verbose prolog that will " + "trigger a line wrap:", + ) + self.assertEqual( + msg, + "Hello, this is a more verbose prolog that will trigger\n" + " a line wrap:\n" + " This is a very, very, very long message that will inevitably " + "wrap\n" + " onto another line.", + ) + + def test_basic_message_formatted_prolog(self): + msg = format_exception( + "This is a very, very, very long message that will " + "inevitably wrap onto another line.", + prolog="Hello world:\n This is a prolog:", + ) + self.assertEqual( + msg, + "Hello world:\n This is a prolog:\n" + " This is a very, very, very long message that will inevitably " + "wrap\n" + " onto another line.", + ) + + def test_basic_message_epilog(self): + self.assertEqual( + format_exception( + "This is a very, very, very long message that will " + "inevitably wrap onto another line.", + epilog="Hello world", + ), + "This is a very, very, very long message that will\n" + " inevitably wrap onto another line.\n" + " Hello world", + ) + + def test_basic_message_long_epilog(self): + self.assertEqual( + format_exception( + "This is a very, very, very long message that will " + "inevitably wrap onto another line.", + epilog="Hello, this is a very, very, very verbose epilog that will " + "trigger a line wrap", + ), + "This is a very, very, very long message that will\n" + " inevitably wrap onto another line.\n" + " Hello, this is a very, very, very verbose epilog that will trigger a\n" + " line wrap", + ) + + def test_basic_message_formatted_epilog(self): + msg = format_exception( + "This is a very, very, very long message that will " + "inevitably wrap onto another line.", + epilog="Hello world:\n This is an epilog:", + ) + self.assertEqual( + msg, + "This is a very, very, very long message that will\n" + " inevitably wrap onto another line.\n" + "Hello world:\n This is an epilog:", + ) diff --git a/pyomo/common/tests/test_fileutils.py b/pyomo/common/tests/test_fileutils.py index 8c20f876e59..2aaebded6cd 100644 --- a/pyomo/common/tests/test_fileutils.py +++ b/pyomo/common/tests/test_fileutils.py @@ -26,8 +26,17 @@ import pyomo.common.envvar as envvar from pyomo.common.log import LoggingIntercept from pyomo.common.fileutils import ( - this_file, this_file_dir, find_file, find_library, find_executable, - PathManager, _system, _path, _exeExt, _libExt, ExecutableData, + this_file, + this_file_dir, + find_file, + find_library, + find_executable, + PathManager, + _system, + _path, + _exeExt, + _libExt, + ExecutableData, import_file, ) from pyomo.common.download import FileDownloader @@ -37,12 +46,14 @@ except AttributeError: # os.path.samefile is not available in Python 2.7 under Windows. # Mock up a dummy function for that platform. - def samefile(a,b): + def samefile(a, b): return True + _this_file = this_file() _this_file_dir = this_file_dir() + class TestFileUtils(unittest.TestCase): def setUp(self): self.tmpdir = None @@ -66,9 +77,9 @@ def tearDown(self): os.environ['PATH'] = self.path def _make_exec(self, fname): - open(fname,'w').close() + open(fname, 'w').close() mode = os.stat(fname).st_mode - os.chmod( fname, mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH ) + os.chmod(fname, mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) def _check_file(self, found, ref): # @@ -78,31 +89,41 @@ def _check_file(self, found, ref): # first so that we can generate a more informative error in the # case of "gross" failure. # - self.assertTrue( - found.endswith(ref), "%s does not end with %s" % (found, ref)) + self.assertTrue(found.endswith(ref), "%s does not end with %s" % (found, ref)) self.assertTrue(samefile(ref, found)) def test_this_file(self): - self.assertEqual(_this_file, __file__.replace('.pyc','.py')) + self.assertEqual(_this_file, __file__.replace('.pyc', '.py')) # Note that in some versions of PyPy, this can return # instead of the normal - self.assertIn(subprocess.run([ - sys.executable,'-c', - 'from pyomo.common.fileutils import this_file;' - 'print(this_file())' - ], stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True).stdout.strip(), - ['','']) - self.assertEqual(subprocess.run( - [sys.executable], - input='from pyomo.common.fileutils import this_file;' - 'print(this_file())', stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, universal_newlines=True - ).stdout.strip(), '') + self.assertIn( + subprocess.run( + [ + sys.executable, + '-c', + 'from pyomo.common.fileutils import this_file;' + 'print(this_file())', + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ).stdout.strip(), + ['', ''], + ) + self.assertEqual( + subprocess.run( + [sys.executable], + input='from pyomo.common.fileutils import this_file;' + 'print(this_file())', + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ).stdout.strip(), + '', + ) def test_this_file_dir(self): - expected_path = os.path.join('pyomo','common','tests') + expected_path = os.path.join('pyomo', 'common', 'tests') self.assertTrue(_this_file_dir.endswith(expected_path)) def test_import_file(self): @@ -138,81 +159,88 @@ def test_findfile(self): os.chdir(self.tmpdir) fname = 'foo.py' - self.assertEqual( - None, - find_file(fname) - ) + self.assertEqual(None, find_file(fname)) - open(os.path.join(self.tmpdir,fname),'w').close() - open(os.path.join(subdir,fname),'w').close() - open(os.path.join(subdir,'aaa'),'w').close() + open(os.path.join(self.tmpdir, fname), 'w').close() + open(os.path.join(subdir, fname), 'w').close() + open(os.path.join(subdir, 'aaa'), 'w').close() # we can find files in the CWD - self._check_file(find_file(fname),os.path.join(self.tmpdir,fname)) + self._check_file(find_file(fname), os.path.join(self.tmpdir, fname)) # unless we don't look in the cwd self.assertIsNone(find_file(fname, cwd=False)) # cwd overrides pathlist - self._check_file(find_file(fname, pathlist=[subdir]), - os.path.join(self.tmpdir,fname)) - - self._check_file(find_file(fname, pathlist=[subdir], cwd=False), - os.path.join(subdir,fname)) + self._check_file( + find_file(fname, pathlist=[subdir]), os.path.join(self.tmpdir, fname) + ) + + self._check_file( + find_file(fname, pathlist=[subdir], cwd=False), os.path.join(subdir, fname) + ) # ...unless the CWD match fails the MODE check # (except on Windows, where all files have X_OK) found = find_file(fname, pathlist=[subdir], mode=os.X_OK) - if _system() in ('windows','cygwin'): - self._check_file(found, os.path.join(self.tmpdir,fname)) + if _system() in ('windows', 'cygwin'): + self._check_file(found, os.path.join(self.tmpdir, fname)) else: self.assertIsNone(found) - self._make_exec(os.path.join(subdir,fname)) + self._make_exec(os.path.join(subdir, fname)) found = find_file(fname, pathlist=[subdir], mode=os.X_OK) - if _system() in ('windows','cygwin'): - ref = os.path.join(self.tmpdir,fname) + if _system() in ('windows', 'cygwin'): + ref = os.path.join(self.tmpdir, fname) else: - ref = os.path.join(subdir,fname) + ref = os.path.join(subdir, fname) self._check_file(found, ref) # pathlist may also be a string self._check_file( - find_file(fname, pathlist=os.pathsep+subdir+os.pathsep, cwd=False), - os.path.join(subdir,fname) + find_file(fname, pathlist=os.pathsep + subdir + os.pathsep, cwd=False), + os.path.join(subdir, fname), ) # implicit extensions work (even if they are not necessary) - self._check_file(find_file(fname, ext='.py'), - os.path.join(self.tmpdir,fname)) - self._check_file(find_file(fname, ext=['.py']), - os.path.join(self.tmpdir,fname)) + self._check_file(find_file(fname, ext='.py'), os.path.join(self.tmpdir, fname)) + self._check_file( + find_file(fname, ext=['.py']), os.path.join(self.tmpdir, fname) + ) # implicit extensions work (when they are necessary) - self._check_file(find_file(fname[:-3], ext='.py'), - os.path.join(self.tmpdir,fname)) + self._check_file( + find_file(fname[:-3], ext='.py'), os.path.join(self.tmpdir, fname) + ) - self._check_file(find_file(fname[:-3], ext=['.py']), - os.path.join(self.tmpdir,fname)) + self._check_file( + find_file(fname[:-3], ext=['.py']), os.path.join(self.tmpdir, fname) + ) # only files are found - self._check_file(find_file( subdir_name, - pathlist=[self.tmpdir, subdir], cwd=False ), - os.path.join(subdir,subdir_name)) + self._check_file( + find_file(subdir_name, pathlist=[self.tmpdir, subdir], cwd=False), + os.path.join(subdir, subdir_name), + ) # empty dirs are skipped self._check_file( - find_file( subdir_name, - pathlist=['', self.tmpdir, subdir], cwd=False ), - os.path.join(subdir,subdir_name) + find_file(subdir_name, pathlist=['', self.tmpdir, subdir], cwd=False), + os.path.join(subdir, subdir_name), ) - @unittest.skipIf(sys.version_info[:2] < (3, 8) - and platform.mac_ver()[0].startswith('10.16'), - "find_library has known bugs in Big Sur for Python<3.8") + # TODO: Remove this when Python 3.7 is no longer supported + @unittest.skipIf( + sys.version_info[:2] < (3, 8) + and ( + platform.mac_ver()[0].startswith('10.16') + or platform.mac_ver()[0].startswith('12.6') + ), + "find_library has known bugs in Big Sur/Monterey for Python<3.8", + ) def test_find_library_system(self): # Find a system library (before we muck with the PATH) - _args = {'cwd':False, 'include_PATH':False, 'pathlist':[]} + _args = {'cwd': False, 'include_PATH': False, 'pathlist': []} if FileDownloader.get_sysinfo()[0] == 'windows': a = find_library('ntdll', **_args) b = find_library('ntdll.dll', **_args) @@ -224,7 +252,7 @@ def test_find_library_system(self): self.assertIsNotNone(a) self.assertIsNotNone(b) self.assertIsNotNone(c) - self.assertEqual(a,b) + self.assertEqual(a, b) # find_library could have found libc.so.6 self.assertTrue(c.startswith(a)) # Verify that the library is loadable (they are all the same @@ -256,79 +284,64 @@ def test_find_library_user(self): libExt = _libExt[_system()][0] f_in_cwd_ldlib_path = 'f_in_cwd_ldlib_path' - open(os.path.join(self.tmpdir,f_in_cwd_ldlib_path),'w').close() - open(os.path.join(ldlibdir,f_in_cwd_ldlib_path),'w').close() - open(os.path.join(pathdir,f_in_cwd_ldlib_path),'w').close() + open(os.path.join(self.tmpdir, f_in_cwd_ldlib_path), 'w').close() + open(os.path.join(ldlibdir, f_in_cwd_ldlib_path), 'w').close() + open(os.path.join(pathdir, f_in_cwd_ldlib_path), 'w').close() f_in_ldlib_extension = 'f_in_ldlib_extension' - open(os.path.join(ldlibdir,f_in_ldlib_extension + libExt),'w').close() + open(os.path.join(ldlibdir, f_in_ldlib_extension + libExt), 'w').close() f_in_path = 'f_in_path' - open(os.path.join(pathdir,f_in_path),'w').close() + open(os.path.join(pathdir, f_in_path), 'w').close() f_in_configlib = 'f_in_configlib' - open(os.path.join(config_libdir, f_in_configlib),'w').close() + open(os.path.join(config_libdir, f_in_configlib), 'w').close() f_in_configbin = 'f_in_configbin' - open(os.path.join(config_bindir, f_in_ldlib_extension),'w').close() - open(os.path.join(config_bindir, f_in_configbin),'w').close() - + open(os.path.join(config_bindir, f_in_ldlib_extension), 'w').close() + open(os.path.join(config_bindir, f_in_configbin), 'w').close() self._check_file( find_library(f_in_cwd_ldlib_path), - os.path.join(self.tmpdir, f_in_cwd_ldlib_path) + os.path.join(self.tmpdir, f_in_cwd_ldlib_path), ) self._check_file( os.path.join(ldlibdir, f_in_cwd_ldlib_path), - find_library(f_in_cwd_ldlib_path, cwd=False) + find_library(f_in_cwd_ldlib_path, cwd=False), ) self._check_file( os.path.join(ldlibdir, f_in_ldlib_extension) + libExt, - find_library(f_in_ldlib_extension) - ) - self._check_file( - os.path.join(pathdir, f_in_path), - find_library(f_in_path) + find_library(f_in_ldlib_extension), ) + self._check_file(os.path.join(pathdir, f_in_path), find_library(f_in_path)) if _system() == 'windows': self._check_file( os.path.join(pathdir, f_in_path), - find_library(f_in_path, include_PATH=False) + find_library(f_in_path, include_PATH=False), ) else: # Note that on Windows, ctypes.util.find_library *always* # searches the PATH - self.assertIsNone( - find_library(f_in_path, include_PATH=False) - ) + self.assertIsNone(find_library(f_in_path, include_PATH=False)) self._check_file( os.path.join(pathdir, f_in_path), - find_library(f_in_path, pathlist=os.pathsep+pathdir+os.pathsep) + find_library(f_in_path, pathlist=os.pathsep + pathdir + os.pathsep), ) # test an explicit pathlist overrides LD_LIBRARY_PATH self._check_file( os.path.join(pathdir, f_in_cwd_ldlib_path), - find_library(f_in_cwd_ldlib_path, cwd=False, pathlist=[pathdir]) + find_library(f_in_cwd_ldlib_path, cwd=False, pathlist=[pathdir]), ) # test that the PYOMO_CONFIG_DIR 'lib' dir is included self._check_file( - os.path.join(config_libdir, f_in_configlib), - find_library(f_in_configlib) + os.path.join(config_libdir, f_in_configlib), find_library(f_in_configlib) ) # and the Bin dir self._check_file( - os.path.join(config_bindir, f_in_configbin), - find_library(f_in_configbin) + os.path.join(config_bindir, f_in_configbin), find_library(f_in_configbin) ) # ... but only if include_PATH is true - self.assertIsNone( - find_library(f_in_configbin, include_PATH=False) - ) + self.assertIsNone(find_library(f_in_configbin, include_PATH=False)) # And none of them if the pathlist is specified - self.assertIsNone( - find_library(f_in_configlib, pathlist=pathdir) - ) - self.assertIsNone( - find_library(f_in_configbin, pathlist=pathdir) - ) - + self.assertIsNone(find_library(f_in_configlib, pathlist=pathdir)) + self.assertIsNone(find_library(f_in_configbin, pathlist=pathdir)) def test_find_executable(self): self.tmpdir = os.path.abspath(tempfile.mkdtemp()) @@ -354,15 +367,15 @@ def test_find_executable(self): exeExt = _exeExt[_system()] or '' f_in_cwd_notexe = 'f_in_cwd_notexe' - open(os.path.join(self.tmpdir,f_in_cwd_notexe), 'w').close() + open(os.path.join(self.tmpdir, f_in_cwd_notexe), 'w').close() f_in_cwd_ldlib_path = 'f_in_cwd_ldlib_path' - self._make_exec(os.path.join(self.tmpdir,f_in_cwd_ldlib_path)) - self._make_exec(os.path.join(ldlibdir,f_in_cwd_ldlib_path)) - self._make_exec(os.path.join(pathdir,f_in_cwd_ldlib_path)) + self._make_exec(os.path.join(self.tmpdir, f_in_cwd_ldlib_path)) + self._make_exec(os.path.join(ldlibdir, f_in_cwd_ldlib_path)) + self._make_exec(os.path.join(pathdir, f_in_cwd_ldlib_path)) f_in_path_extension = 'f_in_path_extension' - self._make_exec(os.path.join(pathdir,f_in_path_extension + exeExt)) + self._make_exec(os.path.join(pathdir, f_in_path_extension + exeExt)) f_in_path = 'f_in_path' - self._make_exec(os.path.join(pathdir,f_in_path)) + self._make_exec(os.path.join(pathdir, f_in_path)) f_in_configlib = 'f_in_configlib' self._make_exec(os.path.join(config_libdir, f_in_configlib)) @@ -370,52 +383,42 @@ def test_find_executable(self): self._make_exec(os.path.join(config_libdir, f_in_path_extension)) self._make_exec(os.path.join(config_bindir, f_in_configbin)) - found = find_executable(f_in_cwd_notexe) - if _system() in ('windows','cygwin'): - self._check_file(found, os.path.join(self.tmpdir,f_in_cwd_notexe)) + if _system() in ('windows', 'cygwin'): + self._check_file(found, os.path.join(self.tmpdir, f_in_cwd_notexe)) else: self.assertIsNone(found) self._check_file( find_executable(f_in_cwd_ldlib_path), - os.path.join(self.tmpdir, f_in_cwd_ldlib_path) + os.path.join(self.tmpdir, f_in_cwd_ldlib_path), ) self._check_file( os.path.join(pathdir, f_in_cwd_ldlib_path), - find_executable(f_in_cwd_ldlib_path, cwd=False) + find_executable(f_in_cwd_ldlib_path, cwd=False), ) self._check_file( find_executable(f_in_path_extension), - os.path.join(pathdir, f_in_path_extension) + exeExt - ) - self._check_file( - find_executable(f_in_path), - os.path.join(pathdir, f_in_path) - ) - self.assertIsNone( - find_executable(f_in_path, include_PATH=False) + os.path.join(pathdir, f_in_path_extension) + exeExt, ) + self._check_file(find_executable(f_in_path), os.path.join(pathdir, f_in_path)) + self.assertIsNone(find_executable(f_in_path, include_PATH=False)) self._check_file( - find_executable(f_in_path, pathlist=os.pathsep+pathdir+os.pathsep), - os.path.join(pathdir, f_in_path) + find_executable(f_in_path, pathlist=os.pathsep + pathdir + os.pathsep), + os.path.join(pathdir, f_in_path), ) - + # test an explicit pathlist overrides PATH self._check_file( os.path.join(ldlibdir, f_in_cwd_ldlib_path), - find_executable(f_in_cwd_ldlib_path, cwd=False, pathlist=[ldlibdir]) + find_executable(f_in_cwd_ldlib_path, cwd=False, pathlist=[ldlibdir]), ) # test that the PYOMO_CONFIG_DIR 'bin' dir is included self._check_file( - os.path.join(config_bindir, f_in_configbin), - find_executable(f_in_configbin) + os.path.join(config_bindir, f_in_configbin), find_executable(f_in_configbin) ) # ... but only if the pathlist is not specified - self.assertIsNone( - find_executable(f_in_configbin, pathlist=pathdir) - ) - + self.assertIsNone(find_executable(f_in_configbin, pathlist=pathdir)) def test_PathManager(self): Executable = PathManager(find_executable, ExecutableData) @@ -431,73 +434,75 @@ def test_PathManager(self): os.environ['PATH'] = os.pathsep + pathdir + os.pathsep f_in_tmp = 'f_in_tmp' - self._make_exec(os.path.join(self.tmpdir,f_in_tmp)) + self._make_exec(os.path.join(self.tmpdir, f_in_tmp)) f_in_path = 'f_in_path' - self._make_exec(os.path.join(pathdir,f_in_path)) + self._make_exec(os.path.join(pathdir, f_in_path)) f_in_cfg = 'f_in_configbin' self._make_exec(os.path.join(config_bindir, f_in_cfg)) # Test availability - self.assertTrue( Executable(f_in_path).available() ) + self.assertTrue(Executable(f_in_path).available()) if not Executable(f_in_path): self.fail("Expected casting Executable(f_in_path) to bool=True") # Test getting the path to the executable - self._check_file( Executable(f_in_path).path(), - os.path.join(pathdir, f_in_path) ) - self._check_file( "%s" % Executable(f_in_path), - os.path.join(pathdir, f_in_path) ) - self._check_file( Executable(f_in_path).executable, - os.path.join(pathdir, f_in_path) ) + self._check_file(Executable(f_in_path).path(), os.path.join(pathdir, f_in_path)) + self._check_file("%s" % Executable(f_in_path), os.path.join(pathdir, f_in_path)) + self._check_file( + Executable(f_in_path).executable, os.path.join(pathdir, f_in_path) + ) # Test the above for a nonexistent file - self.assertFalse( Executable(f_in_tmp).available() ) + self.assertFalse(Executable(f_in_tmp).available()) if Executable(f_in_tmp): self.fail("Expected casting Executable(f_in_tmp) to bool=False") - self.assertIsNone( Executable(f_in_tmp).path() ) - self.assertEqual( "%s" % Executable(f_in_tmp), "" ) - self.assertIsNone( Executable(f_in_tmp).executable ) + self.assertIsNone(Executable(f_in_tmp).path()) + self.assertEqual("%s" % Executable(f_in_tmp), "") + self.assertIsNone(Executable(f_in_tmp).executable) # If we override the pathlist, then we will not find the CONFIGDIR Executable.pathlist = [] - self.assertFalse( Executable(f_in_cfg).available() ) + self.assertFalse(Executable(f_in_cfg).available()) Executable.pathlist.append(config_bindir) # and adding it won't change things (status is cached) - self.assertFalse( Executable(f_in_cfg).available() ) + self.assertFalse(Executable(f_in_cfg).available()) # until we tell the manager to rehash the executables Executable.rehash() - self.assertTrue( Executable(f_in_cfg).available() ) - self.assertEqual( Executable(f_in_cfg).path(), - os.path.join(config_bindir, f_in_cfg) ) + self.assertTrue(Executable(f_in_cfg).available()) + self.assertEqual( + Executable(f_in_cfg).path(), os.path.join(config_bindir, f_in_cfg) + ) # Note that if we clear the pathlist, then the current value of # CONFIGDIR will be honored Executable.pathlist = None Executable.rehash() - self.assertTrue( Executable(f_in_cfg).available() ) - self.assertEqual( Executable(f_in_cfg).path(), - os.path.join(config_bindir, f_in_cfg) ) + self.assertTrue(Executable(f_in_cfg).available()) + self.assertEqual( + Executable(f_in_cfg).path(), os.path.join(config_bindir, f_in_cfg) + ) # Another file that doesn't exist f_in_path2 = 'f_in_path2' f_loc = os.path.join(pathdir, f_in_path2) - self.assertFalse( Executable(f_in_path2).available() ) + self.assertFalse(Executable(f_in_path2).available()) output = StringIO() with LoggingIntercept(output, 'pyomo.common', logging.WARNING): Executable(f_in_path2).executable = f_loc self.assertIn( "explicitly setting the path for '%s' to an " - "invalid object or nonexistent location ('%s')" - % (f_in_path2, f_loc), output.getvalue()) - self.assertFalse( Executable(f_in_path2).available() ) - self._make_exec(os.path.join(pathdir,f_in_path2)) - self.assertFalse( Executable(f_in_path2).available() ) + "invalid object or nonexistent location ('%s')" % (f_in_path2, f_loc), + output.getvalue(), + ) + self.assertFalse(Executable(f_in_path2).available()) + self._make_exec(os.path.join(pathdir, f_in_path2)) + self.assertFalse(Executable(f_in_path2).available()) Executable(f_in_path2).rehash() - self.assertTrue( Executable(f_in_path2).available() ) + self.assertTrue(Executable(f_in_path2).available()) # And disabling it will "remove" it Executable(f_in_path2).disable() - self.assertFalse( Executable(f_in_path2).available() ) - self.assertIsNone( Executable(f_in_path2).path() ) + self.assertFalse(Executable(f_in_path2).available()) + self.assertIsNone(Executable(f_in_path2).path()) Executable(f_in_path2).rehash() - self.assertTrue( Executable(f_in_path2).available() ) - self.assertEqual( Executable(f_in_path2).path(), f_loc ) + self.assertTrue(Executable(f_in_path2).available()) + self.assertEqual(Executable(f_in_path2).path(), f_loc) diff --git a/pyomo/common/tests/test_formatting.py b/pyomo/common/tests/test_formatting.py index fa1f8bfb48b..d502c81da5a 100644 --- a/pyomo/common/tests/test_formatting.py +++ b/pyomo/common/tests/test_formatting.py @@ -16,10 +16,23 @@ from pyomo.common.formatting import tostr, tabular_writer, StreamIndenter -class DerivedList(list): pass -class DerivedTuple(tuple): pass -class DerivedDict(dict): pass -class DerivedStr(str): pass + +class DerivedList(list): + pass + + +class DerivedTuple(tuple): + pass + + +class DerivedDict(dict): + pass + + +class DerivedStr(str): + pass + + NamedTuple = namedtuple('NamedTuple', ['x', 'y']) @@ -37,7 +50,7 @@ def test_new_type_str(self): self.assertIs(tostr.handlers[DerivedStr], tostr.handlers[str]) def test_new_type_list(self): - self.assertEqual(tostr(DerivedList([1,2])), '[1, 2]') + self.assertEqual(tostr(DerivedList([1, 2])), '[1, 2]') self.assertIs(tostr.handlers[DerivedList], tostr.handlers[list]) def test_new_type_dict(self): @@ -45,7 +58,7 @@ def test_new_type_dict(self): self.assertIs(tostr.handlers[DerivedDict], tostr.handlers[dict]) def test_new_type_tuple(self): - self.assertEqual(tostr(DerivedTuple([1,2])), '(1, 2)') + self.assertEqual(tostr(DerivedTuple([1, 2])), '(1, 2)') self.assertIs(tostr.handlers[DerivedTuple], tostr.handlers[tuple]) def test_new_type_namedtuple(self): @@ -100,9 +113,11 @@ def test_no_data(self): def test_multiline_generator(self): os = StringIO() data = {'a': 0, 'b': 1, 'c': 3} + def _data_gen(i, j): for n in range(j): - yield (n, chr(ord('a')+n)*j) + yield (n, chr(ord('a') + n) * j) + tabular_writer(os, "", data.items(), ['i', 'j'], _data_gen) ref = u""" Key : i : j @@ -117,11 +132,13 @@ def _data_gen(i, j): def test_multiline_generator_exception(self): os = StringIO() data = {'a': 0, 'b': 1, 'c': 3} + def _data_gen(i, j): if i == 'b': raise ValueError("invalid") for n in range(j): - yield (n, chr(ord('a')+n)*j) + yield (n, chr(ord('a') + n) * j) + tabular_writer(os, "", data.items(), ['i', 'j'], _data_gen) ref = u""" Key : i : j @@ -136,10 +153,12 @@ def _data_gen(i, j): def test_data_exception(self): os = StringIO() data = {'a': 0, 'b': 1, 'c': 3} + def _data_gen(i, j): if i == 'b': raise ValueError("invalid") - return (j, i*(j+1)) + return (j, i * (j + 1)) + tabular_writer(os, "", data.items(), ['i', 'j'], _data_gen) ref = u""" Key : i : j @@ -152,14 +171,16 @@ def _data_gen(i, j): def test_multiline_alignment(self): os = StringIO() data = {'a': 1, 'b': 2, 'c': 3} + def _data_gen(i, j): for n in range(j): - _str = chr(ord('a')+n)*(j+1) + _str = chr(ord('a') + n) * (j + 1) if n % 2: _str = list(_str) _str[1] = ' ' _str = ''.join(_str) yield (n, _str) + tabular_writer(os, "", data.items(), ['i', 'j'], _data_gen) ref = u""" Key : i : j @@ -178,8 +199,7 @@ def test_noprefix(self): OUT1 = StringIO() OUT2 = StreamIndenter(OUT1) OUT2.write('Hello?\nHello, world!') - self.assertEqual(' Hello?\n Hello, world!', - OUT2.getvalue()) + self.assertEqual(' Hello?\n Hello, world!', OUT2.getvalue()) def test_prefix(self): prefix = 'foo:' @@ -192,12 +212,10 @@ def test_blank_lines(self): OUT1 = StringIO() OUT2 = StreamIndenter(OUT1) OUT2.write('Hello?\n\nText\n\nHello, world!') - self.assertEqual(' Hello?\n\n Text\n\n Hello, world!', - OUT2.getvalue()) + self.assertEqual(' Hello?\n\n Text\n\n Hello, world!', OUT2.getvalue()) def test_writelines(self): OUT1 = StringIO() OUT2 = StreamIndenter(OUT1) OUT2.writelines(['Hello?\n', '\n', 'Text\n', '\n', 'Hello, world!']) - self.assertEqual(' Hello?\n\n Text\n\n Hello, world!', - OUT2.getvalue()) + self.assertEqual(' Hello?\n\n Text\n\n Hello, world!', OUT2.getvalue()) diff --git a/pyomo/common/tests/test_gc.py b/pyomo/common/tests/test_gc.py index 90a2ec612e4..b2f23102a0e 100644 --- a/pyomo/common/tests/test_gc.py +++ b/pyomo/common/tests/test_gc.py @@ -14,6 +14,7 @@ import pyomo.common.unittest as unittest + class TestPauseGC(unittest.TestCase): def test_gc_disable(self): self.assertTrue(gc.isenabled()) @@ -50,8 +51,9 @@ def test_gc_errors(self): with pgc: with self.assertRaisesRegex( - RuntimeError, "Entering PauseGC context manager that " - "was already entered"): + RuntimeError, + "Entering PauseGC context manager that was already entered", + ): with pgc: pass self.assertFalse(gc.isenabled()) @@ -62,11 +64,12 @@ def test_gc_errors(self): with PauseGC(): self.assertFalse(gc.isenabled()) with self.assertRaisesRegex( - RuntimeError, - "Exiting PauseGC context manager out of order: there " - "are other active PauseGC context managers that were " - "entered after this context manager and have not yet " - "been exited."): + RuntimeError, + "Exiting PauseGC context manager out of order: there " + "are other active PauseGC context managers that were " + "entered after this context manager and have not yet " + "been exited.", + ): pgc.close() self.assertFalse(gc.isenabled()) self.assertTrue(gc.isenabled()) diff --git a/pyomo/common/tests/test_log.py b/pyomo/common/tests/test_log.py index c66f713166b..02afdd1dac1 100644 --- a/pyomo/common/tests/test_log.py +++ b/pyomo/common/tests/test_log.py @@ -25,13 +25,18 @@ import pyomo.common.unittest as unittest from pyomo.common.log import ( - LoggingIntercept, WrappingFormatter, LegacyPyomoFormatter, LogHandler, - LogStream, pyomo_formatter + LoggingIntercept, + WrappingFormatter, + LegacyPyomoFormatter, + LogHandler, + LogStream, + pyomo_formatter, ) logger = logging.getLogger('pyomo.common.log.testing') filename = getframeinfo(currentframe()).filename + class TestLegacyLogHandler(unittest.TestCase): def setUp(self): self.stream = StringIO() @@ -45,8 +50,8 @@ def test_simple_log(self): with LoggingIntercept(log): self.handler = LogHandler( os.path.dirname(__file__), - stream = self.stream, - verbosity=lambda: logger.isEnabledFor(logging.DEBUG) + stream=self.stream, + verbosity=lambda: logger.isEnabledFor(logging.DEBUG), ) self.assertIn('LogHandler class has been deprecated', log.getvalue()) logger.addHandler(self.handler) @@ -61,26 +66,27 @@ def test_simple_log(self): logger.setLevel(logging.DEBUG) logger.warning("(warn)") lineno = getframeinfo(currentframe()).lineno - 1 - ans += 'WARNING: "[base]%stest_log.py", %d, test_simple_log\n' \ - ' (warn)\n' % (os.path.sep, lineno,) + ans += ( + 'WARNING: "[base]%stest_log.py", %d, test_simple_log\n' + ' (warn)\n' % (os.path.sep, lineno) + ) self.assertEqual(self.stream.getvalue(), ans) def test_default_verbosity(self): # Testing positional base, configurable verbosity log = StringIO() with LoggingIntercept(log): - self.handler = LogHandler( - os.path.dirname(__file__), - stream = self.stream, - ) + self.handler = LogHandler(os.path.dirname(__file__), stream=self.stream) self.assertIn('LogHandler class has been deprecated', log.getvalue()) logger.addHandler(self.handler) logger.setLevel(logging.WARNING) logger.warning("(warn)") lineno = getframeinfo(currentframe()).lineno - 1 - ans = 'WARNING: "[base]%stest_log.py", %d, test_default_verbosity\n' \ - ' (warn)\n' % (os.path.sep, lineno,) + ans = ( + 'WARNING: "[base]%stest_log.py", %d, test_default_verbosity\n' + ' (warn)\n' % (os.path.sep, lineno) + ) self.assertEqual(self.stream.getvalue(), ans) @@ -114,6 +120,7 @@ def test_style_options(self): with self.assertRaisesRegex(ValueError, 'unrecognized style flag "s"'): WrappingFormatter(style='s') + class TestLegacyPyomoFormatter(unittest.TestCase): def setUp(self): self.stream = StringIO() @@ -124,20 +131,20 @@ def tearDown(self): logger.removeHandler(self.handler) def test_unallowed_options(self): - with self.assertRaisesRegex( - ValueError, "'fmt' is not a valid option"): + with self.assertRaisesRegex(ValueError, "'fmt' is not a valid option"): LegacyPyomoFormatter(fmt='%(message)') - with self.assertRaisesRegex( - ValueError, "'style' is not a valid option"): + with self.assertRaisesRegex(ValueError, "'style' is not a valid option"): LegacyPyomoFormatter(style='%') def test_simple_log(self): # Testing positional base, configurable verbosity - self.handler.setFormatter(LegacyPyomoFormatter( - base=os.path.dirname(__file__), - verbosity=lambda: logger.isEnabledFor(logging.DEBUG) - )) + self.handler.setFormatter( + LegacyPyomoFormatter( + base=os.path.dirname(__file__), + verbosity=lambda: logger.isEnabledFor(logging.DEBUG), + ) + ) logger.setLevel(logging.WARNING) logger.info("(info)") @@ -149,22 +156,24 @@ def test_simple_log(self): logger.setLevel(logging.DEBUG) logger.warning("(warn)") lineno = getframeinfo(currentframe()).lineno - 1 - ans += 'WARNING: "[base]%stest_log.py", %d, test_simple_log\n' \ - ' (warn)\n' % (os.path.sep, lineno,) + ans += ( + 'WARNING: "[base]%stest_log.py", %d, test_simple_log\n' + ' (warn)\n' % (os.path.sep, lineno) + ) self.assertEqual(self.stream.getvalue(), ans) def test_alternate_base(self): - self.handler.setFormatter(LegacyPyomoFormatter( - base = 'log_config', - )) + self.handler.setFormatter(LegacyPyomoFormatter(base='log_config')) logger.setLevel(logging.WARNING) logger.info("(info)") self.assertEqual(self.stream.getvalue(), "") logger.warning("(warn)") lineno = getframeinfo(currentframe()).lineno - 1 - ans = 'WARNING: "%s", %d, test_alternate_base\n' \ - ' (warn)\n' % (filename, lineno,) + ans = 'WARNING: "%s", %d, test_alternate_base\n (warn)\n' % ( + filename, + lineno, + ) self.assertEqual(self.stream.getvalue(), ans) def test_no_base(self): @@ -175,15 +184,16 @@ def test_no_base(self): self.assertEqual(self.stream.getvalue(), "") logger.warning("(warn)") lineno = getframeinfo(currentframe()).lineno - 1 - ans = 'WARNING: "%s", %d, test_no_base\n' \ - ' (warn)\n' % (filename, lineno,) + ans = 'WARNING: "%s", %d, test_no_base\n (warn)\n' % (filename, lineno) self.assertEqual(self.stream.getvalue(), ans) def test_no_message(self): - self.handler.setFormatter(LegacyPyomoFormatter( - base=os.path.dirname(__file__), - verbosity=lambda: logger.isEnabledFor(logging.DEBUG) - )) + self.handler.setFormatter( + LegacyPyomoFormatter( + base=os.path.dirname(__file__), + verbosity=lambda: logger.isEnabledFor(logging.DEBUG), + ) + ) logger.setLevel(logging.WARNING) logger.info("") @@ -196,15 +206,19 @@ def test_no_message(self): logger.setLevel(logging.DEBUG) logger.warning("") lineno = getframeinfo(currentframe()).lineno - 1 - ans += 'WARNING: "[base]%stest_log.py", %d, test_no_message\n\n' \ - % (os.path.sep, lineno,) + ans += 'WARNING: "[base]%stest_log.py", %d, test_no_message\n\n' % ( + os.path.sep, + lineno, + ) self.assertEqual(self.stream.getvalue(), ans) def test_blank_lines(self): - self.handler.setFormatter(LegacyPyomoFormatter( - base=os.path.dirname(__file__), - verbosity=lambda: logger.isEnabledFor(logging.DEBUG), - )) + self.handler.setFormatter( + LegacyPyomoFormatter( + base=os.path.dirname(__file__), + verbosity=lambda: logger.isEnabledFor(logging.DEBUG), + ) + ) logger.setLevel(logging.WARNING) logger.warning("\n\nthis is a message.\n\n\n") @@ -214,16 +228,20 @@ def test_blank_lines(self): logger.setLevel(logging.DEBUG) logger.warning("\n\nthis is a message.\n\n\n") lineno = getframeinfo(currentframe()).lineno - 1 - ans += 'WARNING: "[base]%stest_log.py", %d, test_blank_lines\n' \ - " this is a message.\n" % (os.path.sep, lineno) + ans += ( + 'WARNING: "[base]%stest_log.py", %d, test_blank_lines\n' + " this is a message.\n" % (os.path.sep, lineno) + ) self.assertEqual(self.stream.getvalue(), ans) def test_numbered_level(self): - testname ='test_numbered_level' - self.handler.setFormatter(LegacyPyomoFormatter( - base=os.path.dirname(__file__), - verbosity=lambda: logger.isEnabledFor(logging.DEBUG), - )) + testname = 'test_numbered_level' + self.handler.setFormatter( + LegacyPyomoFormatter( + base=os.path.dirname(__file__), + verbosity=lambda: logger.isEnabledFor(logging.DEBUG), + ) + ) logger.setLevel(logging.WARNING) logger.log(45, "(hi)") @@ -237,96 +255,120 @@ def test_numbered_level(self): logger.setLevel(logging.DEBUG) logger.log(45, "(hi)") lineno = getframeinfo(currentframe()).lineno - 1 - ans += 'Level 45: "[base]%stest_log.py", %d, %s\n' \ - ' (hi)\n' % (os.path.sep, lineno, testname) + ans += 'Level 45: "[base]%stest_log.py", %d, %s\n (hi)\n' % ( + os.path.sep, + lineno, + testname, + ) self.assertEqual(self.stream.getvalue(), ans) logger.log(45, "") lineno = getframeinfo(currentframe()).lineno - 1 - ans += 'Level 45: "[base]%stest_log.py", %d, %s\n\n' \ - % (os.path.sep, lineno, testname) + ans += 'Level 45: "[base]%stest_log.py", %d, %s\n\n' % ( + os.path.sep, + lineno, + testname, + ) self.assertEqual(self.stream.getvalue(), ans) def test_long_messages(self): - self.handler.setFormatter(LegacyPyomoFormatter( - base=os.path.dirname(__file__), - verbosity=lambda: logger.isEnabledFor(logging.DEBUG) - )) - - msg = ("This is a long message\n" - "\n" - "With some kind of internal formatting\n" - " - including a bulleted list\n" - " - list 2 ") + self.handler.setFormatter( + LegacyPyomoFormatter( + base=os.path.dirname(__file__), + verbosity=lambda: logger.isEnabledFor(logging.DEBUG), + ) + ) + + msg = ( + "This is a long message\n" + "\n" + "With some kind of internal formatting\n" + " - including a bulleted list\n" + " - list 2 " + ) logger.setLevel(logging.WARNING) logger.warning(msg) - ans = ( "WARNING: This is a long message\n" - "\n" - " With some kind of internal formatting\n" - " - including a bulleted list\n" - " - list 2\n" ) + ans = ( + "WARNING: This is a long message\n" + "\n" + " With some kind of internal formatting\n" + " - including a bulleted list\n" + " - list 2\n" + ) self.assertEqual(self.stream.getvalue(), ans) logger.setLevel(logging.DEBUG) logger.info(msg) lineno = getframeinfo(currentframe()).lineno - 1 - ans += ( 'INFO: "[base]%stest_log.py", %d, test_long_messages\n' - " This is a long message\n" - "\n" - " With some kind of internal formatting\n" - " - including a bulleted list\n" - " - list 2\n" % (os.path.sep, lineno,)) + ans += ( + 'INFO: "[base]%stest_log.py", %d, test_long_messages\n' + " This is a long message\n" + "\n" + " With some kind of internal formatting\n" + " - including a bulleted list\n" + " - list 2\n" % (os.path.sep, lineno) + ) self.assertEqual(self.stream.getvalue(), ans) # test trailing newline msg += "\n" logger.setLevel(logging.WARNING) logger.warning(msg) - ans += ( "WARNING: This is a long message\n" - "\n" - " With some kind of internal formatting\n" - " - including a bulleted list\n" - " - list 2\n" ) + ans += ( + "WARNING: This is a long message\n" + "\n" + " With some kind of internal formatting\n" + " - including a bulleted list\n" + " - list 2\n" + ) self.assertEqual(self.stream.getvalue(), ans) logger.setLevel(logging.DEBUG) logger.info(msg) lineno = getframeinfo(currentframe()).lineno - 1 - ans += ( 'INFO: "[base]%stest_log.py", %d, test_long_messages\n' - " This is a long message\n" - "\n" - " With some kind of internal formatting\n" - " - including a bulleted list\n" - " - list 2\n" % (os.path.sep, lineno,)) + ans += ( + 'INFO: "[base]%stest_log.py", %d, test_long_messages\n' + " This is a long message\n" + "\n" + " With some kind of internal formatting\n" + " - including a bulleted list\n" + " - list 2\n" % (os.path.sep, lineno) + ) self.assertEqual(self.stream.getvalue(), ans) # test initial and final blank lines msg = "\n" + msg + "\n\n" logger.setLevel(logging.WARNING) logger.warning(msg) - ans += ( "WARNING: This is a long message\n" - "\n" - " With some kind of internal formatting\n" - " - including a bulleted list\n" - " - list 2\n" ) + ans += ( + "WARNING: This is a long message\n" + "\n" + " With some kind of internal formatting\n" + " - including a bulleted list\n" + " - list 2\n" + ) self.assertEqual(self.stream.getvalue(), ans) logger.setLevel(logging.DEBUG) logger.info(msg) lineno = getframeinfo(currentframe()).lineno - 1 - ans += ( 'INFO: "[base]%stest_log.py", %d, test_long_messages\n' - " This is a long message\n" - "\n" - " With some kind of internal formatting\n" - " - including a bulleted list\n" - " - list 2\n" % (os.path.sep, lineno,)) + ans += ( + 'INFO: "[base]%stest_log.py", %d, test_long_messages\n' + " This is a long message\n" + "\n" + " With some kind of internal formatting\n" + " - including a bulleted list\n" + " - list 2\n" % (os.path.sep, lineno) + ) self.assertEqual(self.stream.getvalue(), ans) def test_verbatim(self): - self.handler.setFormatter(LegacyPyomoFormatter( - base=os.path.dirname(__file__), - verbosity=lambda: logger.isEnabledFor(logging.DEBUG) - )) + self.handler.setFormatter( + LegacyPyomoFormatter( + base=os.path.dirname(__file__), + verbosity=lambda: logger.isEnabledFor(logging.DEBUG), + ) + ) msg = ( "This is a long message\n" @@ -346,6 +388,36 @@ def test_verbatim(self): " - including a\n" " long list\n" " - and a short list \n" + "\n" + "And a section\n" + "~~~~~~~~~~~~~\n" + "\n" + " | and\n" + " | a line\n" + " | block\n" + "\n" + "And a\n" + "quoted literal::\n" + "\n" + ">> he said\n" + ">\n" + "> and they replied\n" + "\n" + "this is\n" + "outside the quote\n" + "\n" + "indented literal::\n" + "\n" + " Here is\n" + " an indented\n" + "\n" + " literal\n" + " with a blank line\n" + "\n" + "Finally, an invalid::\n" + "\n" + "quote\n" + "block\n" ) logger.setLevel(logging.WARNING) logger.warning(msg) @@ -362,9 +434,38 @@ def test_verbatim(self): " And some internal non-verbatim\n" " - including a long list\n" " - and a short list\n" + "\n" + " And a section\n" + " ~~~~~~~~~~~~~\n" + "\n" + " | and\n" + " | a line\n" + " | block\n" + "\n" + " And a quoted literal::\n" + "\n" + " >> he said\n" + " >\n" + " > and they replied\n" + "\n" + " this is outside the quote\n" + "\n" + " indented literal::\n" + "\n" + " Here is\n" + " an indented\n" + "\n" + " literal\n" + " with a blank line\n" + "\n" + " Finally, an invalid::\n" + "\n" + " quote block\n" ) + self.maxDiff = None self.assertEqual(self.stream.getvalue(), ans) + class TestLogStream(unittest.TestCase): def test_log_stream(self): ls = LogStream(logging.INFO, logging.getLogger('pyomo')) @@ -375,22 +476,18 @@ def test_log_stream(self): with LI as OUT: ls.write("line 1\nline 2\n") - self.assertEqual( - OUT.getvalue(), "INFO: line 1\nINFO: line 2\n") + self.assertEqual(OUT.getvalue(), "INFO: line 1\nINFO: line 2\n") with LI as OUT: ls.write("line 1\nline 2") - self.assertEqual( - OUT.getvalue(), "INFO: line 1\n") + self.assertEqual(OUT.getvalue(), "INFO: line 1\n") with LI as OUT: ls.flush() - self.assertEqual( - OUT.getvalue(), "INFO: line 2\n") + self.assertEqual(OUT.getvalue(), "INFO: line 2\n") # Second flush should do nothing ls.flush() - self.assertEqual( - OUT.getvalue(), "INFO: line 2\n") + self.assertEqual(OUT.getvalue(), "INFO: line 2\n") with LI as OUT: with LogStream(logging.INFO, logging.getLogger('pyomo')) as ls: diff --git a/pyomo/common/tests/test_modeling.py b/pyomo/common/tests/test_modeling.py index 5063f40f60d..0684d77b2e9 100644 --- a/pyomo/common/tests/test_modeling.py +++ b/pyomo/common/tests/test_modeling.py @@ -16,6 +16,7 @@ from pyomo.environ import ConcreteModel, Var from pyomo.common.modeling import unique_component_name, NOTSET + class TestModeling(unittest.TestCase): def test_unique_component_name(self): m = ConcreteModel() diff --git a/pyomo/common/tests/test_multithread.py b/pyomo/common/tests/test_multithread.py index e19d0d0308d..ae1bc48be44 100644 --- a/pyomo/common/tests/test_multithread.py +++ b/pyomo/common/tests/test_multithread.py @@ -4,8 +4,10 @@ from threading import Thread from pyomo.opt.base.solvers import check_available_solvers -class Dummy(): + +class Dummy: """asdfg""" + def __init__(self): self.number = 1 self.rnd = threading.get_ident() @@ -15,7 +17,6 @@ def __str__(self): class TestMultithreading(unittest.TestCase): - def test_wrapper_docs(self): sut = MultiThreadWrapper(Dummy) self.assertEqual(sut.__class__.__doc__, Dummy.__doc__) @@ -27,16 +28,20 @@ def test_wrapper_field(self): def test_independent_writes(self): sut = MultiThreadWrapper(Dummy) sut.number = 2 + def thread_func(): self.assertEqual(sut.number, 1) + t = Thread(target=thread_func) t.start() t.join() def test_independent_writes2(self): sut = MultiThreadWrapper(Dummy) + def thread_func(): sut.number = 2 + t = Thread(target=thread_func) t.start() t.join() @@ -45,16 +50,20 @@ def thread_func(): def test_independent_del(self): sut = MultiThreadWrapper(Dummy) del sut.number + def thread_func(): self.assertEqual(sut.number, 1) + t = Thread(target=thread_func) t.start() t.join() def test_independent_del2(self): sut = MultiThreadWrapper(Dummy) + def thread_func(): del sut.number + t = Thread(target=thread_func) t.start() t.join() @@ -69,16 +78,20 @@ def test_main(self): sut = MultiThreadWrapperWithMain(Dummy) self.assertIs(sut.main_thread.rnd, sut.rnd) sut.number = 5 + def thread_func(): self.assertEqual(sut.number, 1) self.assertIsNot(sut.main_thread.rnd, sut.rnd) del sut.number + t = Thread(target=thread_func) t.start() t.join() self.assertEqual(sut.number, 5) - - @unittest.skipIf(len(check_available_solvers('glpk')) < 1, "glpk solver not available") + + @unittest.skipIf( + len(check_available_solvers('glpk')) < 1, "glpk solver not available" + ) def test_solve(self): # Based on the minimal example in https://github.com/Pyomo/pyomo/issues/2475 import pyomo.environ as pyo @@ -94,7 +107,7 @@ def test_solve(self): def test(model): opt = SolverFactory('glpk') - opt.solve(model) + opt.solve(model) # Iterate, adding a cut to exclude the previously found solution for _ in range(5): @@ -103,8 +116,8 @@ def test(model): if pyo.value(model.x[j]) < 0.5: expr += model.x[j] else: - expr += (1 - model.x[j]) - model.cuts.add( expr >= 1 ) + expr += 1 - model.x[j] + model.cuts.add(expr >= 1) results = opt.solve(model) return results, [v for v in model.x] @@ -112,7 +125,9 @@ def test(model): results = tp.map(test, [model.clone() for i in range(4)]) tp.close() for result, _ in results: - self.assertEqual(result.solver.termination_condition, pyo.TerminationCondition.optimal) + self.assertEqual( + result.solver.termination_condition, pyo.TerminationCondition.optimal + ) results = list(results) for _, values in results[1:]: self.assertEqual(values, results[0][1]) diff --git a/pyomo/common/tests/test_orderedset.py b/pyomo/common/tests/test_orderedset.py index 7c9443e2834..d87bebc1e4a 100644 --- a/pyomo/common/tests/test_orderedset.py +++ b/pyomo/common/tests/test_orderedset.py @@ -14,6 +14,7 @@ from pyomo.common.collections import OrderedSet + class testOrderedSet(unittest.TestCase): def test_constructor(self): a = OrderedSet() @@ -21,7 +22,7 @@ def test_constructor(self): self.assertEqual(list(a), []) self.assertEqual(str(a), 'OrderedSet()') - ref = [1,9,'a',4,2,None] + ref = [1, 9, 'a', 4, 2, None] a = OrderedSet(ref) self.assertEqual(len(a), 6) self.assertEqual(list(a), ref) @@ -41,29 +42,29 @@ def test_in_add(self): self.assertIn(None, a) a.add(0) - self.assertEqual(list(a), [None,1,0]) + self.assertEqual(list(a), [None, 1, 0]) - # Adding a member alrady in the set does not change the ordering + # Adding a member already in the set does not change the ordering a.add(1) - self.assertEqual(list(a), [None,1,0]) + self.assertEqual(list(a), [None, 1, 0]) def test_discard_remove_clear(self): - a = OrderedSet([1,3,2,4]) + a = OrderedSet([1, 3, 2, 4]) a.discard(3) - self.assertEqual(list(a), [1,2,4]) + self.assertEqual(list(a), [1, 2, 4]) a.discard(3) - self.assertEqual(list(a), [1,2,4]) + self.assertEqual(list(a), [1, 2, 4]) a.remove(2) - self.assertEqual(list(a), [1,4]) - with self.assertRaisesRegex(KeyError,'2'): + self.assertEqual(list(a), [1, 4]) + with self.assertRaisesRegex(KeyError, '2'): a.remove(2) - + a.clear() self.assertEqual(list(a), []) def test_pickle(self): - ref = [1,9,'a',4,2,None] + ref = [1, 9, 'a', 4, 2, None] a = OrderedSet(ref) b = pickle.loads(pickle.dumps(a)) self.assertEqual(a, b) @@ -87,6 +88,6 @@ def test_intersection(self): self.assertEqual(list(b), [3, 4, 'c', 'd']) def test_reversed(self): - a = OrderedSet([1,5,3]) + a = OrderedSet([1, 5, 3]) self.assertEqual(list(a), [1, 5, 3]) self.assertEqual(list(reversed(a)), [3, 5, 1]) diff --git a/pyomo/common/tests/test_plugin.py b/pyomo/common/tests/test_plugin.py index fafccc7a80c..86d136dd9d1 100644 --- a/pyomo/common/tests/test_plugin.py +++ b/pyomo/common/tests/test_plugin.py @@ -14,14 +14,22 @@ from pyomo.common.unittest import TestCase from pyomo.common.log import LoggingIntercept -from pyomo.common.plugin import ( - Interface, Plugin, SingletonPlugin, ExtensionPoint, implements, alias, - PluginFactory, PluginError, PluginGlobals, DeprecatedInterface +from pyomo.common.plugin_base import ( + Interface, + Plugin, + SingletonPlugin, + ExtensionPoint, + implements, + alias, + PluginFactory, + PluginError, + PluginGlobals, + DeprecatedInterface, ) + class TestPlugin(TestCase): def test_plugin_interface(self): - class IFoo(Interface): pass @@ -35,23 +43,17 @@ class myFoo(Plugin): a = myFoo() self.assertEqual(ep.extensions(), []) - self.assertEqual(IFoo._plugins, { - myFoo: {0: (weakref.ref(a), False)}, - }) + self.assertEqual(IFoo._plugins, {myFoo: {0: (weakref.ref(a), False)}}) self.assertEqual(len(ep), 0) a.activate() self.assertEqual(ep.extensions(), [a]) - self.assertEqual(IFoo._plugins, { - myFoo: {0: (weakref.ref(a), True)}, - }) + self.assertEqual(IFoo._plugins, {myFoo: {0: (weakref.ref(a), True)}}) self.assertEqual(len(ep), 1) a.deactivate() self.assertEqual(ep.extensions(), []) - self.assertEqual(IFoo._plugins, { - myFoo: {0: (weakref.ref(a), False)}, - }) + self.assertEqual(IFoo._plugins, {myFoo: {0: (weakref.ref(a), False)}}) self.assertEqual(len(ep), 0) # Free a and make sure the garbage collector collects it (so @@ -66,7 +68,6 @@ class myFoo(Plugin): self.assertEqual(len(ep), 0) def test_singleton_plugin_interface(self): - class IFoo(Interface): pass @@ -75,56 +76,69 @@ class mySingleton(SingletonPlugin): ep = ExtensionPoint(IFoo) self.assertEqual(ep.extensions(), []) - self.assertEqual(IFoo._plugins, { - mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}, - }) + self.assertEqual( + IFoo._plugins, + {mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}}, + ) self.assertIsNotNone(mySingleton.__singleton__) with self.assertRaisesRegex( - RuntimeError, - 'Cannot create multiple singleton plugin instances'): + RuntimeError, 'Cannot create multiple singleton plugin instances' + ): mySingleton() class myDerivedSingleton(mySingleton): pass + self.assertEqual(ep.extensions(), []) - self.assertEqual(IFoo._plugins, { - mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}, - myDerivedSingleton: { - 1: (weakref.ref(myDerivedSingleton.__singleton__), False)}, - }) + self.assertEqual( + IFoo._plugins, + { + mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}, + myDerivedSingleton: { + 1: (weakref.ref(myDerivedSingleton.__singleton__), False) + }, + }, + ) self.assertIsNotNone(myDerivedSingleton.__singleton__) - self.assertIsNot(mySingleton.__singleton__, - myDerivedSingleton.__singleton__) + self.assertIsNot(mySingleton.__singleton__, myDerivedSingleton.__singleton__) class myDerivedNonSingleton(mySingleton): __singleton__ = False self.assertEqual(ep.extensions(), []) - self.assertEqual(IFoo._plugins, { - mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}, - myDerivedSingleton: { - 1: (weakref.ref(myDerivedSingleton.__singleton__), False)}, - myDerivedNonSingleton: {}, - }) + self.assertEqual( + IFoo._plugins, + { + mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}, + myDerivedSingleton: { + 1: (weakref.ref(myDerivedSingleton.__singleton__), False) + }, + myDerivedNonSingleton: {}, + }, + ) self.assertIsNone(myDerivedNonSingleton.__singleton__) class myServiceSingleton(mySingleton): implements(IFoo, service=True) self.assertEqual(ep.extensions(), [myServiceSingleton.__singleton__]) - self.assertEqual(IFoo._plugins, { - mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}, - myDerivedSingleton: { - 1: (weakref.ref(myDerivedSingleton.__singleton__), False)}, - myDerivedNonSingleton: {}, - myServiceSingleton: { - 2: (weakref.ref(myServiceSingleton.__singleton__), True)}, - }) + self.assertEqual( + IFoo._plugins, + { + mySingleton: {0: (weakref.ref(mySingleton.__singleton__), False)}, + myDerivedSingleton: { + 1: (weakref.ref(myDerivedSingleton.__singleton__), False) + }, + myDerivedNonSingleton: {}, + myServiceSingleton: { + 2: (weakref.ref(myServiceSingleton.__singleton__), True) + }, + }, + ) self.assertIsNotNone(myServiceSingleton.__singleton__) def test_inherit_interface(self): - class IFoo(Interface): def fcn(self): return 'base' @@ -160,7 +174,6 @@ class myCombined(myFoo): self.assertEqual(a.mock(), 'mock') def test_plugin_factory(self): - class IFoo(Interface): pass @@ -214,9 +227,10 @@ class myFoo(Plugin): self.assertTrue(a.enabled()) self.assertTrue(b.enabled()) with self.assertRaisesRegex( - PluginError, - r"The ExtensionPoint does not have a unique service! " - r"2 services are defined for interface 'IFoo' \(key=None\)."): + PluginError, + r"The ExtensionPoint does not have a unique service! " + r"2 services are defined for interface 'IFoo' \(key=None\).", + ): self.assertIsNone(ep.service()) a.deactivate() @@ -238,66 +252,86 @@ def test_deprecation(self): out = StringIO() with LoggingIntercept(out): PluginGlobals.add_env(None) - self.assertIn("Pyomo only supports a single global environment", - out.getvalue().replace('\n', ' ')) + self.assertIn( + "Pyomo only supports a single global environment", + out.getvalue().replace('\n', ' '), + ) out = StringIO() with LoggingIntercept(out): PluginGlobals.pop_env() - self.assertIn("Pyomo only supports a single global environment", - out.getvalue().replace('\n', ' ')) + self.assertIn( + "Pyomo only supports a single global environment", + out.getvalue().replace('\n', ' '), + ) out = StringIO() with LoggingIntercept(out): PluginGlobals.clear() - self.assertIn("Pyomo only supports a single global environment", - out.getvalue().replace('\n', ' ')) + self.assertIn( + "Pyomo only supports a single global environment", + out.getvalue().replace('\n', ' '), + ) class IFoo(Interface): pass out = StringIO() with LoggingIntercept(out): + class myFoo(Plugin): alias('myFoo', subclass=True) - self.assertIn("alias() function does not support the subclass", - out.getvalue().replace('\n', ' ')) + + self.assertIn( + "alias() function does not support the subclass", + out.getvalue().replace('\n', ' '), + ) out = StringIO() with LoggingIntercept(out): + class myFoo(Plugin): implements(IFoo, namespace='here') - self.assertIn("only supports a single global namespace.", - out.getvalue().replace('\n', ' ')) + + self.assertIn( + "only supports a single global namespace.", + out.getvalue().replace('\n', ' '), + ) class IGone(DeprecatedInterface): + __deprecated_version__ = '1.2.3' pass out = StringIO() with LoggingIntercept(out): + class myFoo(Plugin): implements(IGone) - self.assertIn("The IGone interface has been deprecated", - out.getvalue().replace('\n', ' ')) + + self.assertIn( + "The IGone interface has been deprecated", out.getvalue().replace('\n', ' ') + ) out = StringIO() with LoggingIntercept(out): ExtensionPoint(IGone).extensions() - self.assertIn("The IGone interface has been deprecated", - out.getvalue().replace('\n', ' ')) + self.assertIn( + "The IGone interface has been deprecated", out.getvalue().replace('\n', ' ') + ) class ICustomGone(DeprecatedInterface): __deprecated_message__ = 'This interface is gone!' + __deprecated_version__ = '1.2.3' out = StringIO() with LoggingIntercept(out): + class myFoo(Plugin): implements(ICustomGone) - self.assertIn("This interface is gone!", - out.getvalue().replace('\n', ' ')) + + self.assertIn("This interface is gone!", out.getvalue().replace('\n', ' ')) out = StringIO() with LoggingIntercept(out): ExtensionPoint(ICustomGone).extensions() - self.assertIn("This interface is gone!", - out.getvalue().replace('\n', ' ')) + self.assertIn("This interface is gone!", out.getvalue().replace('\n', ' ')) diff --git a/pyomo/common/tests/test_sorting.py b/pyomo/common/tests/test_sorting.py index b6aa7208412..7a9fe5ac923 100644 --- a/pyomo/common/tests/test_sorting.py +++ b/pyomo/common/tests/test_sorting.py @@ -18,34 +18,44 @@ from pyomo.common.sorting import sorted_robust, _robust_sort_keyfcn + # The following are custom types used for testing sorted_robust. They # are declared at the module scope to ensure consistent generation of # the class __name__. class LikeFloat(object): def __init__(self, n): self.n = n + def __lt__(self, other): return self.n < other + def __gt__(self, other): return self.n > other + class Comparable(object): def __init__(self, n): self.n = str(n) + def __lt__(self, other): return self.n < other + def __gt__(self, other): return self.n > other + class ToStr(object): def __init__(self, n): self.n = str(n) + def __str__(self): return self.n + class NoStr(object): def __init__(self, n): self.n = str(n) + def __str__(self): raise ValueError('') @@ -75,26 +85,20 @@ def test_sorted_robust(self): def test_user_key(self): # ensure it doesn't throw an error # Test for issue https://github.com/Pyomo/pyomo/issues/2019 - sorted_robust( - [ - (("10_1", 2), None), - ((10, 2), None) - ], - key=lambda x: x[0] - ) + sorted_robust([(("10_1", 2), None), ((10, 2), None)], key=lambda x: x[0]) def test_unknown_types(self): orig = [ - LikeFloat(4), # 0 - Comparable('hello'), # 1 - LikeFloat(1), # 2 - 2., # 3 - Comparable('world'), # 4 - ToStr(1), # 5 - NoStr('bogus'), # 6 - ToStr('a'), # 7 - ToStr('A'), # 8 - 3, # 9 + LikeFloat(4), # 0 + Comparable('hello'), # 1 + LikeFloat(1), # 2 + 2.0, # 3 + Comparable('world'), # 4 + ToStr(1), # 5 + NoStr('bogus'), # 6 + ToStr('a'), # 7 + ToStr('A'), # 8 + 3, # 9 ] ref = [orig[i] for i in (1, 4, 6, 5, 8, 7, 2, 3, 9, 0)] @@ -102,15 +106,13 @@ def test_unknown_types(self): self.assertEqual(len(orig), len(ans)) for _r, _a in zip(ref, ans): self.assertIs(_r, _a) - self.assertEqual(_robust_sort_keyfcn._typemap[LikeFloat], - (1, float.__name__)) - self.assertEqual(_robust_sort_keyfcn._typemap[Comparable], - (1, Comparable.__name__)) - self.assertEqual(_robust_sort_keyfcn._typemap[ToStr], - (2, ToStr.__name__)) - self.assertEqual(_robust_sort_keyfcn._typemap[NoStr], - (3, NoStr.__name__)) + self.assertEqual(_robust_sort_keyfcn._typemap[LikeFloat], (1, float.__name__)) + self.assertEqual( + _robust_sort_keyfcn._typemap[Comparable], (1, Comparable.__name__) + ) + self.assertEqual(_robust_sort_keyfcn._typemap[ToStr], (2, ToStr.__name__)) + self.assertEqual(_robust_sort_keyfcn._typemap[NoStr], (3, NoStr.__name__)) + if __name__ == "__main__": unittest.main() - diff --git a/pyomo/common/tests/test_tee.py b/pyomo/common/tests/test_tee.py index 8533cd117c4..666a431631f 100644 --- a/pyomo/common/tests/test_tee.py +++ b/pyomo/common/tests/test_tee.py @@ -21,11 +21,12 @@ from pyomo.common.tempfiles import TempfileManager import pyomo.common.tee as tee + class TestTeeStream(unittest.TestCase): def test_stdout(self): a = StringIO() b = StringIO() - with tee.TeeStream(a,b) as t: + with tee.TeeStream(a, b) as t: t.STDOUT.write("Hello\n") self.assertEqual(a.getvalue(), "Hello\n") self.assertEqual(b.getvalue(), "Hello\n") @@ -38,8 +39,10 @@ def test_err_and_out_are_different(self): self.assertIs(err, t.STDERR) self.assertIsNot(out, err) - @unittest.skipIf(not tee._peek_available, - "Requires the _mergedReader, but _peek_available==False") + @unittest.skipIf( + not tee._peek_available, + "Requires the _mergedReader, but _peek_available==False", + ) def test_merge_out_and_err(self): # Test that the STDERR/STDOUT streams are merged correctly # (i.e., STDOUT is line buffered and STDERR is not). This merge @@ -49,12 +52,12 @@ def test_merge_out_and_err(self): b = StringIO() # make sure this doesn't accidentally become a very long wait assert tee._poll_interval <= 0.1 - with tee.TeeStream(a,b) as t: + with tee.TeeStream(a, b) as t: # This is a slightly nondeterministic (on Windows), so a # flush() and short pause should help t.STDOUT.write("Hello\nWorld") t.STDOUT.flush() - time.sleep(tee._poll_interval*100) + time.sleep(tee._poll_interval * 100) t.STDERR.write("interrupting\ncow") t.STDERR.flush() # For determinism, it is important that the STDERR message @@ -65,9 +68,9 @@ def test_merge_out_and_err(self): while 'cow' not in a.getvalue() and time.time() - start_time < 1: time.sleep(tee._poll_interval) acceptable_results = { - "Hello\ninterrupting\ncowWorld", # expected - "interrupting\ncowHello\nWorld", # Windows occasionally puts - # all error before stdout + "Hello\ninterrupting\ncowWorld", # expected + "interrupting\ncowHello\nWorld", # Windows occasionally puts + # all error before stdout } self.assertIn(a.getvalue(), acceptable_results) self.assertEqual(b.getvalue(), a.getvalue()) @@ -77,7 +80,7 @@ def test_merged_out_and_err_without_peek(self): b = StringIO() try: _tmp, tee._peek_available = tee._peek_available, False - with tee.TeeStream(a,b) as t: + with tee.TeeStream(a, b) as t: # Ensure both threads are running t.STDOUT t.STDERR @@ -85,7 +88,7 @@ def test_merged_out_and_err_without_peek(self): # nondeterministic, so a short pause should help t.STDERR.write("Hello\n") t.STDERR.flush() - time.sleep(tee._poll_interval*2) + time.sleep(tee._poll_interval * 2) t.STDOUT.write("World\n") finally: tee._peek_available = _tmp @@ -95,7 +98,7 @@ def test_merged_out_and_err_without_peek(self): def test_binary_tee(self): a = BytesIO() b = BytesIO() - with tee.TeeStream(a,b) as t: + with tee.TeeStream(a, b) as t: t.open('wb').write(b"Hello\n") self.assertEqual(a.getvalue(), b"Hello\n") self.assertEqual(b.getvalue(), b"Hello\n") @@ -115,7 +118,7 @@ def test_decoder_and_buffer_errors(self): "\t'Hello, '\n" "Stream handle closed with un-decoded characters in the decoder " "buffer that was not emitted to the output stream(s):\n" - "\tb'\\xc2'\n" + "\tb'\\xc2'\n", ) out = StringIO() @@ -127,7 +130,7 @@ def test_decoder_and_buffer_errors(self): self.assertRegex( log.getvalue(), r"^Output stream \(<.*?>\) closed before all output was written " - r"to it. The following was left in the output buffer:\n\t'hi\\n'\n$" + r"to it. The following was left in the output buffer:\n\t'hi\\n'\n$", ) def test_capture_output(self): @@ -140,8 +143,10 @@ def test_duplicate_capture_output(self): out = StringIO() capture = tee.capture_output(out) capture.setup() - try: - with self.assertRaisesRegex(RuntimeError, 'Duplicate call to capture_output.setup'): + try: + with self.assertRaisesRegex( + RuntimeError, 'Duplicate call to capture_output.setup' + ): capture.setup() finally: capture.reset() @@ -166,7 +171,8 @@ def test_capture_output_stack_error(self): b = tee.capture_output(OUT2) b.setup() with self.assertRaisesRegex( - RuntimeError, 'Captured output does not match sys.stdout'): + RuntimeError, 'Captured output does not match sys.stdout' + ): a.reset() b.tee = None finally: @@ -179,22 +185,24 @@ def write(self, data): _save = tee._poll_timeout, tee._poll_timeout_deadlock tee._poll_timeout = tee._poll_interval * 2**5 # 0.0032 - tee._poll_timeout_deadlock = tee._poll_interval * 2**7 # 0.0128 + tee._poll_timeout_deadlock = tee._poll_interval * 2**7 # 0.0128 try: with LoggingIntercept() as LOG, self.assertRaisesRegex( - RuntimeError, 'deadlock'): + RuntimeError, 'deadlock' + ): with tee.TeeStream(MockStream()) as t: err = t.STDERR err.write('*') self.assertEqual( 'Significant delay observed waiting to join reader ' 'threads, possible output stream deadlock\n', - LOG.getvalue() + LOG.getvalue(), ) finally: tee._poll_timeout, tee._poll_timeout_deadlock = _save + class TestFileDescriptor(unittest.TestCase): def setUp(self): self.out = sys.stdout @@ -220,7 +228,7 @@ def _generate_output(self, redirector): F.flush() def test_redirect_synchronize_stdout(self): - r,w = os.pipe() + r, w = os.pipe() os.dup2(w, 1) sys.stdout = os.fdopen(1, 'w', closefd=False) rd = tee.redirect_fd(synchronize=True) @@ -232,7 +240,7 @@ def test_redirect_synchronize_stdout(self): self.assertEqual(FILE.read(), "to_stdout_2\nto_fd1_2\n") def test_redirect_no_synchronize_stdout(self): - r,w = os.pipe() + r, w = os.pipe() os.dup2(w, 1) sys.stdout = os.fdopen(1, 'w', closefd=False) rd = tee.redirect_fd(synchronize=False) @@ -241,8 +249,7 @@ def test_redirect_no_synchronize_stdout(self): with os.fdopen(r, 'r') as FILE: os.close(w) os.close(1) - self.assertEqual(FILE.read(), - "to_stdout_1\nto_stdout_2\nto_fd1_2\n") + self.assertEqual(FILE.read(), "to_stdout_1\nto_stdout_2\nto_fd1_2\n") # Pytest's default capture method causes failures for the following # two tests. This re-implementation of the capfd fixture allows @@ -256,7 +263,7 @@ def capfd(self, capfd): def test_redirect_synchronize_stdout_not_fd1(self): self.capfd.disabled() - r,w = os.pipe() + r, w = os.pipe() os.dup2(w, 1) rd = tee.redirect_fd(synchronize=True) self._generate_output(rd) @@ -268,7 +275,7 @@ def test_redirect_synchronize_stdout_not_fd1(self): def test_redirect_no_synchronize_stdout_not_fd1(self): self.capfd.disabled() - r,w = os.pipe() + r, w = os.pipe() os.dup2(w, 1) rd = tee.redirect_fd(synchronize=False) self._generate_output(rd) @@ -279,7 +286,7 @@ def test_redirect_no_synchronize_stdout_not_fd1(self): self.assertEqual(FILE.read(), "to_fd1_2\n") def test_redirect_synchronize_stringio(self): - r,w = os.pipe() + r, w = os.pipe() os.dup2(w, 1) try: sys.stdout, out = StringIO(), sys.stdout @@ -295,7 +302,7 @@ def test_redirect_synchronize_stringio(self): self.assertEqual(FILE.read(), "to_fd1_2\n") def test_redirect_no_synchronize_stringio(self): - r,w = os.pipe() + r, w = os.pipe() os.dup2(w, 1) try: sys.stdout, out = StringIO(), sys.stdout @@ -310,8 +317,8 @@ def test_redirect_no_synchronize_stringio(self): os.close(1) self.assertEqual(FILE.read(), "to_fd1_2\n") - def test_caputure_output_fd(self): - r,w = os.pipe() + def test_capture_output_fd(self): + r, w = os.pipe() os.dup2(w, 1) sys.stdout = os.fdopen(1, 'w', closefd=False) with tee.capture_output(capture_fd=True) as OUT: @@ -333,5 +340,6 @@ def test_caputure_output_fd(self): os.close(w) self.assertEqual(FILE.read(), "to_stdout_2\nto_fd1_2\n") + if __name__ == '__main__': unittest.main() diff --git a/pyomo/common/tests/test_tempfile.py b/pyomo/common/tests/test_tempfile.py index 0d22b47afb0..b82082ac1af 100644 --- a/pyomo/common/tests/test_tempfile.py +++ b/pyomo/common/tests/test_tempfile.py @@ -32,21 +32,21 @@ from pyomo.common.log import LoggingIntercept from pyomo.common.tempfiles import ( - TempfileManager, TempfileManagerClass, TempfileContextError, + TempfileManager, + TempfileManagerClass, + TempfileContextError, ) try: - from pyutilib.component.config.tempfiles import ( - TempfileManager as pyutilib_mngr - ) + from pyutilib.component.config.tempfiles import TempfileManager as pyutilib_mngr except ImportError: pyutilib_mngr = None old_tempdir = TempfileManager.tempdir tempdir = None -class Test_LegacyTestSuite(unittest.TestCase): +class Test_LegacyTestSuite(unittest.TestCase): def setUp(self): global tempdir tempdir = tempfile.mkdtemp() + os.sep @@ -58,7 +58,7 @@ def tearDown(self): TempfileManager.pop() TempfileManager.tempdir = old_tempdir if os.path.exists(tempdir): - shutil.rmtree(tempdir) + shutil.rmtree(tempdir) tempdir = None def test_add1(self): @@ -248,7 +248,6 @@ def test_create3_dir(self): class Test_TempfileManager(unittest.TestCase): - def setUp(self): self.TM = TempfileManagerClass() @@ -308,8 +307,9 @@ def test_add_tempfile(self): sub_fname = os.path.join(dname, "testfile") self.TM.add_tempfile(fname) with self.assertRaisesRegex( - IOError, "Temporary file does not exist: %s" - % sub_fname.replace('\\', '\\\\')): + IOError, + "Temporary file does not exist: %s" % sub_fname.replace('\\', '\\\\'), + ): self.TM.add_tempfile(sub_fname) self.TM.add_tempfile(sub_fname, exists=False) with open(sub_fname, "w") as FILE: @@ -334,7 +334,8 @@ def test_sequential_files(self): self.assertIsNone(self.TM.sequential_files()) self.assertIn( "The TempfileManager.sequential_files() method has been removed", - LOG.getvalue().replace('\n',' ')) + LOG.getvalue().replace('\n', ' '), + ) self.assertIsNone(self.TM.unique_files()) def test_gettempprefix(self): @@ -414,7 +415,7 @@ def test_shutdown(self): LOG.getvalue().strip(), "TempfileManagerClass instance: un-popped tempfile " "contexts still exist during TempfileManager instance " - "shutdown" + "shutdown", ) self.TM = TempfileManagerClass() @@ -432,7 +433,7 @@ def test_shutdown(self): "Undeleted entries:\n\t%s\n" "TempfileManagerClass instance: un-popped tempfile " "contexts still exist during TempfileManager instance " - "shutdown" % fname + "shutdown" % fname, ) # The TM is already shut down, so this should be a noop @@ -459,7 +460,7 @@ def test_del_clears_contexts(self): "Undeleted entries:\n\t%s\n" "TempfileManagerClass instance: un-popped tempfile " "contexts still exist during TempfileManager instance " - "shutdown" % fname + "shutdown" % fname, ) def test_tempfilemanager_as_context_manager(self): @@ -481,7 +482,7 @@ def test_tempfilemanager_as_context_manager(self): "the TempfileManager stack within a context manager " "(i.e., `with TempfileManager:`) but was not popped " "before the context manager exited. Popping the " - "context to preserve the stack integrity." + "context to preserve the stack integrity.", ) def test_tempfilecontext_as_context_manager(self): @@ -493,8 +494,10 @@ def test_tempfilecontext_as_context_manager(self): self.assertFalse(os.path.exists(fname)) self.assertEqual(LOG.getvalue(), "") - @unittest.skipIf(not sys.platform.lower().startswith('win'), - "test only applies to Windows platforms") + @unittest.skipIf( + not sys.platform.lower().startswith('win'), + "test only applies to Windows platforms", + ) def test_open_tempfile_windows(self): self.TM.push() fname = self.TM.create_tempfile() @@ -503,7 +506,8 @@ def test_open_tempfile_windows(self): _orig = tempfiles.deletion_errors_are_fatal tempfiles.deletion_errors_are_fatal = True with self.assertRaisesRegex( - WindowsError, ".*process cannot access the file"): + WindowsError, ".*process cannot access the file" + ): self.TM.pop() finally: tempfiles.deletion_errors_are_fatal = _orig @@ -524,8 +528,7 @@ def test_open_tempfile_windows(self): f.close() os.remove(fname) - @unittest.skipIf(pyutilib_mngr is None, - "deprecation test requires pyutilib") + @unittest.skipIf(pyutilib_mngr is None, "deprecation test requires pyutilib") def test_deprecated_tempdir(self): self.TM.push() try: @@ -539,25 +542,30 @@ def test_deprecated_tempdir(self): self.assertIn( "The use of the PyUtilib TempfileManager.tempdir " "to specify the default location for Pyomo " - "temporary files", LOG.getvalue().replace("\n", " ")) + "temporary files", + LOG.getvalue().replace("\n", " "), + ) with LoggingIntercept() as LOG: dname = self.TM.create_tempdir() self.assertIn( "The use of the PyUtilib TempfileManager.tempdir " "to specify the default location for Pyomo " - "temporary files", LOG.getvalue().replace("\n", " ")) + "temporary files", + LOG.getvalue().replace("\n", " "), + ) finally: self.TM.pop() pyutilib_mngr.tempdir = _orig def test_context(self): with self.assertRaisesRegex( - TempfileContextError, - "TempfileManager has no currently active context"): + TempfileContextError, "TempfileManager has no currently active context" + ): self.TM.context() ctx = self.TM.push() self.assertIs(ctx, self.TM.context()) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/common/tests/test_timing.py b/pyomo/common/tests/test_timing.py index 4e762b2397e..d2ce6175801 100644 --- a/pyomo/common/tests/test_timing.py +++ b/pyomo/common/tests/test_timing.py @@ -20,16 +20,21 @@ from pyomo.common.log import LoggingIntercept from pyomo.common.timing import ( - ConstructionTimer, TransformationTimer, report_timing, - TicTocTimer, HierarchicalTimer, + ConstructionTimer, + TransformationTimer, + report_timing, + TicTocTimer, + HierarchicalTimer, ) from pyomo.environ import ConcreteModel, RangeSet, Var, Any, TransformationFactory from pyomo.core.base.var import _VarData + class _pseudo_component(Var): def getname(*args, **kwds): raise RuntimeError("fail") + class TestTiming(unittest.TestCase): def setUp(self): self.reenable_gc = gc.isenabled() @@ -45,38 +50,33 @@ def test_raw_construction_timer(self): self.assertRegex( str(a), r"ConstructionTimer object for NoneType \(unknown\); " - r"[0-9\.]+ elapsed seconds") + r"[0-9\.]+ elapsed seconds", + ) v = Var() v.construct() a = ConstructionTimer(_VarData(v)) self.assertRegex( str(a), r"ConstructionTimer object for Var ScalarVar\[NOTSET\]; " - r"[0-9\.]+ elapsed seconds") + r"[0-9\.]+ elapsed seconds", + ) def test_raw_transformation_timer(self): a = TransformationTimer(None) self.assertRegex( - str(a), - r"TransformationTimer object for NoneType; " - r"[0-9\.]+ elapsed seconds") + str(a), r"TransformationTimer object for NoneType; [0-9\.]+ elapsed seconds" + ) v = _pseudo_component() a = ConstructionTimer(v) - self.assertIn( - "ConstructionTimer object for Var (unknown); ", - str(a)) + self.assertIn("ConstructionTimer object for Var (unknown); ", str(a)) def test_raw_transformation_timer(self): a = TransformationTimer(None, 'fwd') - self.assertIn( - "TransformationTimer object for NoneType (fwd); ", - str(a)) + self.assertIn("TransformationTimer object for NoneType (fwd); ", str(a)) a = TransformationTimer(None) - self.assertIn( - "TransformationTimer object for NoneType; ", - str(a)) + self.assertIn("TransformationTimer object for NoneType; ", str(a)) def test_report_timing(self): ref = r""" @@ -134,7 +134,7 @@ def test_report_timing(self): def test_TicTocTimer_tictoc(self): SLEEP = 0.1 - RES = 0.02 # resolution (seconds): 1/5 the sleep + RES = 0.02 # resolution (seconds): 1/5 the sleep # Note: pypy on GHA occasionally has timing # differences of >0.04s @@ -158,8 +158,7 @@ def test_TicTocTimer_tictoc(self): start_time = time.perf_counter() timer.tic() self.assertRegex( - out.getvalue(), - r'\[ [.0-9]+\] Resetting the tic/toc delta timer' + out.getvalue(), r'\[ [.0-9]+\] Resetting the tic/toc delta timer' ) time.sleep(SLEEP) @@ -169,14 +168,14 @@ def test_TicTocTimer_tictoc(self): delta = timer.toc() self.assertAlmostEqual(ref - start_time, delta, delta=RES) self.assertRegex( - out.getvalue(), - r'\[\+ [.0-9]+\] .* in test_TicTocTimer_tictoc' + out.getvalue(), r'\[\+ [.0-9]+\] .* in test_TicTocTimer_tictoc' ) with capture_output() as out: # entering / leaving the context manager can take non-trivial # time on some platforms (up to 0.03 on Windows / Python 3.10) self.assertAlmostEqual( - time.perf_counter() - ref, timer.toc(None), delta=RES) + time.perf_counter() - ref, timer.toc(None), delta=RES + ) self.assertEqual(out.getvalue(), '') with capture_output() as out: @@ -184,8 +183,7 @@ def test_TicTocTimer_tictoc(self): total = timer.toc(delta=False) self.assertAlmostEqual(ref - start_time, total, delta=RES) self.assertRegex( - out.getvalue(), - r'\[ [.0-9]+\] .* in test_TicTocTimer_tictoc' + out.getvalue(), r'\[ [.0-9]+\] .* in test_TicTocTimer_tictoc' ) ref *= -1 @@ -196,8 +194,8 @@ def test_TicTocTimer_tictoc(self): cumul_stop1 = timer.toc(None) self.assertAlmostEqual(ref, cumul_stop1, delta=RES) with self.assertRaisesRegex( - RuntimeError, - 'Stopping a TicTocTimer that was already stopped'): + RuntimeError, 'Stopping a TicTocTimer that was already stopped' + ): timer.stop() time.sleep(SLEEP) cumul_stop2 = timer.toc(None) @@ -213,21 +211,19 @@ def test_TicTocTimer_tictoc(self): delta = timer.toc() self.assertAlmostEqual(ref, delta, delta=RES) self.assertRegex( - out.getvalue(), - r'\[ [.0-9]+\| 1\] .* in test_TicTocTimer_tictoc' + out.getvalue(), r'\[ [.0-9]+\| 1\] .* in test_TicTocTimer_tictoc' ) with capture_output() as out: # Note that delta is ignored if the timer is a cumulative timer total = timer.toc(delta=False) self.assertAlmostEqual(delta, total, delta=RES) self.assertRegex( - out.getvalue(), - r'\[ [.0-9]+\| 1\] .* in test_TicTocTimer_tictoc' + out.getvalue(), r'\[ [.0-9]+\| 1\] .* in test_TicTocTimer_tictoc' ) def test_TicTocTimer_context_manager(self): SLEEP = 0.1 - RES = 0.05 # resolution (seconds): 1/2 the sleep + RES = 0.05 # resolution (seconds): 1/2 the sleep abs_time = time.perf_counter() with TicTocTimer() as timer: @@ -238,7 +234,7 @@ def test_TicTocTimer_context_manager(self): with timer: time.sleep(SLEEP) abs_time = time.perf_counter() - abs_time - self.assertGreater(abs_time, SLEEP*3 - RES/10) + self.assertGreater(abs_time, SLEEP * 3 - RES / 10) self.assertAlmostEqual(timer.toc(None), abs_time - exclude, delta=RES) def test_TicTocTimer_logger(self): @@ -278,7 +274,8 @@ def test_TicTocTimer_deprecated(self): self.assertRegex( LOG.getvalue().replace('\n', ' ').strip(), r"DEPRECATED: tic\(\): 'ostream' and 'logger' should be specified " - r"as keyword arguments( +\([^\)]+\)){2}") + r"as keyword arguments( +\([^\)]+\)){2}", + ) with LoggingIntercept() as LOG, capture_output() as out: timer.toc("msg", True, None, None) @@ -286,7 +283,8 @@ def test_TicTocTimer_deprecated(self): self.assertRegex( LOG.getvalue().replace('\n', ' ').strip(), r"DEPRECATED: toc\(\): 'delta', 'ostream', and 'logger' should be " - r"specified as keyword arguments( +\([^\)]+\)){2}") + r"specified as keyword arguments( +\([^\)]+\)){2}", + ) timer = TicTocTimer() with LoggingIntercept() as LOG, capture_output() as out: @@ -299,9 +297,8 @@ def test_TicTocTimer_deprecated(self): self.assertIn('msg True, None, None', out.getvalue()) self.assertEqual(LOG.getvalue(), "") - def test_HierarchicalTimer(self): - RES = 0.01 # resolution (seconds) + RES = 0.01 # resolution (seconds) timer = HierarchicalTimer() start_time = time.perf_counter() @@ -319,8 +316,7 @@ def test_HierarchicalTimer(self): timer.stop('a') end_time = time.perf_counter() timer.stop('all') - ref = \ -"""Identifier ncalls cumtime percall % + ref = """Identifier ncalls cumtime percall % --------------------------------------------------- all 1 [0-9.]+ +[0-9.]+ +100.0 ---------------------------------------------- @@ -339,32 +335,33 @@ def test_HierarchicalTimer(self): self.assertEqual(1, timer.get_num_calls('all')) self.assertAlmostEqual( - end_time - start_time, timer.get_total_time('all'), delta=RES) - self.assertEqual(100., timer.get_relative_percent_time('all')) - self.assertTrue(100. > timer.get_relative_percent_time('all.a')) - self.assertTrue(50. < timer.get_relative_percent_time('all.a')) + end_time - start_time, timer.get_total_time('all'), delta=RES + ) + self.assertEqual(100.0, timer.get_relative_percent_time('all')) + self.assertTrue(100.0 > timer.get_relative_percent_time('all.a')) + self.assertTrue(50.0 < timer.get_relative_percent_time('all.a')) def test_HierarchicalTimer_longNames(self): - RES = 0.01 # resolution (seconds) + RES = 0.01 # resolution (seconds) timer = HierarchicalTimer() start_time = time.perf_counter() - timer.start('all'*25) + timer.start('all' * 25) time.sleep(0.02) for i in range(10): - timer.start('a'*75) + timer.start('a' * 75) time.sleep(0.01) for j in range(5): - timer.start('aa'*20) + timer.start('aa' * 20) time.sleep(0.001) - timer.stop('aa'*20) - timer.start('ab'*20) - timer.stop('ab'*20) - timer.stop('a'*75) + timer.stop('aa' * 20) + timer.start('ab' * 20) + timer.stop('ab' * 20) + timer.stop('a' * 75) end_time = time.perf_counter() - timer.stop('all'*25) + timer.stop('all' * 25) ref = ( -"""Identifier%s ncalls cumtime percall %% + """Identifier%s ncalls cumtime percall %% %s------------------------------------ %s%s 1 [0-9.]+ +[0-9.]+ +100.0 %s------------------------------------ @@ -377,19 +374,287 @@ def test_HierarchicalTimer_longNames(self): other%s n/a [0-9.]+ +n/a +[0-9.]+ %s==================================== %s==================================== -""" % ( - ' '*69, - '-'*79, - 'all'*25, ' '*4, - '-'*75, - 'a'*75, '', - '-'*71, - 'aa'*20, ' '*31, - 'ab'*20, ' '*31, - ' '*66, - '='*71, - ' '*70, - '='*75, - '='*79)).splitlines() +""" + % ( + ' ' * 69, + '-' * 79, + 'all' * 25, + ' ' * 4, + '-' * 75, + 'a' * 75, + '', + '-' * 71, + 'aa' * 20, + ' ' * 31, + 'ab' * 20, + ' ' * 31, + ' ' * 66, + '=' * 71, + ' ' * 70, + '=' * 75, + '=' * 79, + ) + ).splitlines() for l, r in zip(str(timer).splitlines(), ref): self.assertRegex(l, r) + + def test_clear_except_base_timer(self): + timer = HierarchicalTimer() + timer.start("a") + timer.start("b") + timer.stop("b") + timer.stop("a") + timer.start("c") + timer.stop("c") + timer.start("d") + timer.stop("d") + timer.clear_except("b", "c") + key_set = set(timer.timers.keys()) + self.assertEqual(key_set, {"c"}) + + def test_clear_except_subtimer(self): + # Testing this method on "sub-timers" exercises different code + # as while the base timer is a HierarchicalTimer, the sub-timers + # are _HierarchicalHelpers + timer = HierarchicalTimer() + timer.start("root") + timer.start("a") + timer.start("b") + timer.stop("b") + timer.stop("a") + timer.start("c") + timer.stop("c") + timer.start("d") + timer.stop("d") + timer.stop("root") + root = timer.timers["root"] + root.clear_except("b", "c") + key_set = set(root.timers.keys()) + self.assertEqual(key_set, {"c"}) + + +class TestFlattenHierarchicalTimer(unittest.TestCase): + # + # The following methods create some hierarchical timers, then + # hand-code the total time of each timer in the data structure. + # This is so we can assert that total_time fields of flattened + # timers are computed correctly without relying on the actual + # time spent. + # + def make_singleton_timer(self): + timer = HierarchicalTimer() + timer.start("root") + timer.stop("root") + timer.timers["root"].total_time = 5.0 + return timer + + def make_flat_timer(self): + timer = HierarchicalTimer() + timer.start("root") + timer.start("a") + timer.stop("a") + timer.start("b") + timer.stop("b") + timer.stop("root") + timer.timers["root"].total_time = 5.0 + timer.timers["root"].timers["a"].total_time = 1.0 + timer.timers["root"].timers["b"].total_time = 2.5 + return timer + + def make_timer_depth_2_one_child(self): + timer = HierarchicalTimer() + timer.start("root") + timer.start("a") + timer.start("b") + timer.stop("b") + timer.start("c") + timer.stop("c") + timer.stop("a") + timer.stop("root") + timer.timers["root"].total_time = 5.0 + timer.timers["root"].timers["a"].total_time = 4.0 + timer.timers["root"].timers["a"].timers["b"].total_time = 1.1 + timer.timers["root"].timers["a"].timers["c"].total_time = 2.2 + return timer + + def make_timer_depth_2_with_name_collision(self): + timer = HierarchicalTimer() + timer.start("root") + timer.start("a") + timer.start("b") + timer.stop("b") + timer.start("c") + timer.stop("c") + timer.stop("a") + timer.start("b") + timer.stop("b") + timer.stop("root") + timer.timers["root"].total_time = 5.0 + timer.timers["root"].timers["a"].total_time = 4.0 + timer.timers["root"].timers["a"].timers["b"].total_time = 1.1 + timer.timers["root"].timers["a"].timers["c"].total_time = 2.2 + timer.timers["root"].timers["b"].total_time = 0.11 + return timer + + def make_timer_depth_2_two_children(self): + timer = HierarchicalTimer() + timer.start("root") + timer.start("a") + timer.start("b") + timer.stop("b") + timer.start("c") + timer.stop("c") + timer.stop("a") + timer.start("b") + timer.start("c") + timer.stop("c") + timer.start("d") + timer.stop("d") + timer.stop("b") + timer.stop("root") + timer.timers["root"].total_time = 5.0 + timer.timers["root"].timers["a"].total_time = 4.0 + timer.timers["root"].timers["a"].timers["b"].total_time = 1.1 + timer.timers["root"].timers["a"].timers["c"].total_time = 2.2 + timer.timers["root"].timers["b"].total_time = 0.88 + timer.timers["root"].timers["b"].timers["c"].total_time = 0.07 + timer.timers["root"].timers["b"].timers["d"].total_time = 0.05 + return timer + + def make_timer_depth_4(self): + timer = HierarchicalTimer() + timer.start("root") + timer.start("a") + timer.start("b") + timer.stop("b") + timer.start("c") + timer.start("d") + timer.start("e") + timer.stop("e") + timer.stop("d") + timer.stop("c") + timer.stop("a") + timer.start("b") + timer.start("c") + timer.start("e") + timer.stop("e") + timer.stop("c") + timer.start("d") + timer.stop("d") + timer.stop("b") + timer.stop("root") + timer.timers["root"].total_time = 5.0 + timer.timers["root"].timers["a"].total_time = 4.0 + timer.timers["root"].timers["a"].timers["b"].total_time = 1.1 + timer.timers["root"].timers["a"].timers["c"].total_time = 2.2 + timer.timers["root"].timers["a"].timers["c"].timers["d"].total_time = 0.9 + timer.timers["root"].timers["a"].timers["c"].timers["d"].timers[ + "e" + ].total_time = 0.6 + timer.timers["root"].timers["b"].total_time = 0.88 + timer.timers["root"].timers["b"].timers["c"].total_time = 0.07 + timer.timers["root"].timers["b"].timers["c"].timers["e"].total_time = 0.04 + timer.timers["root"].timers["b"].timers["d"].total_time = 0.05 + return timer + + def make_timer_depth_4_same_name(self): + timer = HierarchicalTimer() + timer.start("root") + timer.start("a") + timer.start("a") + timer.start("a") + timer.start("a") + timer.stop("a") + timer.stop("a") + timer.stop("a") + timer.stop("a") + timer.stop("root") + timer.timers["root"].total_time = 5.0 + timer.timers["root"].timers["a"].total_time = 1.0 + timer.timers["root"].timers["a"].timers["a"].total_time = 0.1 + timer.timers["root"].timers["a"].timers["a"].timers["a"].total_time = 0.01 + timer.timers["root"].timers["a"].timers["a"].timers["a"].timers[ + "a" + ].total_time = 0.001 + return timer + + def test_singleton(self): + timer = self.make_singleton_timer() + root = timer.timers["root"] + root.flatten() + self.assertAlmostEqual(root.total_time, 5.0) + + def test_already_flat(self): + timer = self.make_flat_timer() + root = timer.timers["root"] + root.flatten() + self.assertAlmostEqual(root.total_time, 5.0) + self.assertAlmostEqual(root.timers["a"].total_time, 1.0) + self.assertAlmostEqual(root.timers["b"].total_time, 2.5) + + def test_depth_2_one_child(self): + timer = self.make_timer_depth_2_one_child() + root = timer.timers["root"] + root.flatten() + self.assertAlmostEqual(root.total_time, 5.0) + self.assertAlmostEqual(root.timers["a"].total_time, 0.7) + self.assertAlmostEqual(root.timers["b"].total_time, 1.1) + self.assertAlmostEqual(root.timers["c"].total_time, 2.2) + + def test_timer_depth_2_with_name_collision(self): + timer = self.make_timer_depth_2_with_name_collision() + root = timer.timers["root"] + root.flatten() + self.assertAlmostEqual(root.total_time, 5.0) + self.assertAlmostEqual(root.timers["a"].total_time, 0.700) + self.assertAlmostEqual(root.timers["b"].total_time, 1.210) + self.assertAlmostEqual(root.timers["c"].total_time, 2.200) + + def test_timer_depth_2_two_children(self): + timer = self.make_timer_depth_2_two_children() + root = timer.timers["root"] + root.flatten() + self.assertAlmostEqual(root.total_time, 5.0) + self.assertAlmostEqual(root.timers["a"].total_time, 0.700) + self.assertAlmostEqual(root.timers["b"].total_time, 1.860) + self.assertAlmostEqual(root.timers["c"].total_time, 2.270) + self.assertAlmostEqual(root.timers["d"].total_time, 0.050) + + def test_timer_depth_4(self): + timer = self.make_timer_depth_4() + root = timer.timers["root"] + root.flatten() + self.assertAlmostEqual(root.total_time, 5.0) + self.assertAlmostEqual(root.timers["a"].total_time, 0.700) + self.assertAlmostEqual(root.timers["b"].total_time, 1.860) + self.assertAlmostEqual(root.timers["c"].total_time, 1.330) + self.assertAlmostEqual(root.timers["d"].total_time, 0.350) + self.assertAlmostEqual(root.timers["e"].total_time, 0.640) + + def test_timer_depth_4_same_name(self): + timer = self.make_timer_depth_4_same_name() + root = timer.timers["root"] + root.flatten() + self.assertAlmostEqual(root.total_time, 5.0) + self.assertAlmostEqual(root.timers["a"].total_time, 1.0) + + def test_base_timer_depth_3(self): + # This is depth 2 wrt the root, depth 3 wrt the + # "base timer" + timer = self.make_timer_depth_2_two_children() + timer.flatten() + self.assertAlmostEqual(timer.timers["root"].total_time, 0.120) + self.assertAlmostEqual(timer.timers["a"].total_time, 0.700) + self.assertAlmostEqual(timer.timers["b"].total_time, 1.860) + self.assertAlmostEqual(timer.timers["c"].total_time, 2.270) + self.assertAlmostEqual(timer.timers["d"].total_time, 0.050) + + def test_timer_still_active(self): + timer = HierarchicalTimer() + timer.start("a") + timer.stop("a") + timer.start("b") + msg = "Cannot flatten.*while any timers are active" + with self.assertRaisesRegex(RuntimeError, msg): + timer.flatten() + timer.stop("b") diff --git a/pyomo/common/tests/test_typing.py b/pyomo/common/tests/test_typing.py index 63bfc23f720..982462f8a8d 100644 --- a/pyomo/common/tests/test_typing.py +++ b/pyomo/common/tests/test_typing.py @@ -16,6 +16,7 @@ from pyomo.common.pyomo_typing import get_overloads_for from pyomo.environ import Block + class TestTyping(unittest.TestCase): def test_get_overloads_for(self): func_list = get_overloads_for(Block.__init__) diff --git a/pyomo/common/tests/test_unittest.py b/pyomo/common/tests/test_unittest.py index 8efc4a16873..a87fc57da9e 100644 --- a/pyomo/common/tests/test_unittest.py +++ b/pyomo/common/tests/test_unittest.py @@ -18,19 +18,23 @@ from pyomo.common.log import LoggingIntercept from pyomo.environ import ConcreteModel, Var, Param + @unittest.timeout(10) def short_sleep(): return 42 + @unittest.timeout(0.01) def long_sleep(): time.sleep(1) return 42 + @unittest.timeout(10) def raise_exception(): foo.bar + @unittest.timeout(10) def fail(): raise AssertionError("0 != 1") @@ -73,41 +77,42 @@ def test_assertStructuredAlmostEqual_comparison(self): with self.assertRaisesRegex(self.failureException, '10 !~= 10.01'): self.assertStructuredAlmostEqual(10, 10.01, delta=1e-3) + def test_assertStructuredAlmostEqual_nan(self): + a = float('nan') + b = float('nan') + self.assertStructuredAlmostEqual(a, b) + def test_assertStructuredAlmostEqual_errorChecking(self): with self.assertRaisesRegex( - ValueError, "Cannot specify more than one of " - "{places, delta, abstol}"): + ValueError, "Cannot specify more than one of {places, delta, abstol}" + ): self.assertStructuredAlmostEqual(1, 1, places=7, delta=1) def test_assertStructuredAlmostEqual_str(self): self.assertStructuredAlmostEqual("hi", "hi") - with self.assertRaisesRegex( - self.failureException, "'hi' !~= 'hello'"): + with self.assertRaisesRegex(self.failureException, "'hi' !~= 'hello'"): self.assertStructuredAlmostEqual("hi", "hello") - with self.assertRaisesRegex( - self.failureException, r"'hi' !~= \['h',"): - self.assertStructuredAlmostEqual("hi", ['h','i']) + with self.assertRaisesRegex(self.failureException, r"'hi' !~= \['h',"): + self.assertStructuredAlmostEqual("hi", ['h', 'i']) def test_assertStructuredAlmostEqual_othertype(self): - a = datetime.datetime(1,1,1) - b = datetime.datetime(1,1,1) + a = datetime.datetime(1, 1, 1) + b = datetime.datetime(1, 1, 1) self.assertStructuredAlmostEqual(a, b) - b = datetime.datetime(1,1,2) - with self.assertRaisesRegex( - self.failureException, "datetime.* !~= datetime"): + b = datetime.datetime(1, 1, 2) + with self.assertRaisesRegex(self.failureException, "datetime.* !~= datetime"): self.assertStructuredAlmostEqual(a, b) def test_assertStructuredAlmostEqual_list(self): - a = [1,2] - b = [1,2,3] + a = [1, 2] + b = [1, 2, 3] with self.assertRaisesRegex( - self.failureException, - r'sequences are different sizes \(2 != 3\)'): + self.failureException, r'sequences are different sizes \(2 != 3\)' + ): self.assertStructuredAlmostEqual(a, b) self.assertStructuredAlmostEqual(a, b, allow_second_superset=True) a.append(3) - self.assertStructuredAlmostEqual(a, b) b[1] -= 1.999e-7 self.assertStructuredAlmostEqual(a, b) @@ -116,16 +121,15 @@ def test_assertStructuredAlmostEqual_list(self): self.assertStructuredAlmostEqual(a, b) def test_assertStructuredAlmostEqual_dict(self): - a = {1:2, 3:4} - b = {1:2, 3:4, 5:6} + a = {1: 2, 3: 4} + b = {1: 2, 3: 4, 5: 6} with self.assertRaisesRegex( - self.failureException, - r'mappings are different sizes \(2 != 3\)'): + self.failureException, r'mappings are different sizes \(2 != 3\)' + ): self.assertStructuredAlmostEqual(a, b) self.assertStructuredAlmostEqual(a, b, allow_second_superset=True) a[5] = 6 - self.assertStructuredAlmostEqual(a, b) b[1] -= 1.999e-7 self.assertStructuredAlmostEqual(a, b) @@ -136,44 +140,41 @@ def test_assertStructuredAlmostEqual_dict(self): del b[1] b[6] = 6 with self.assertRaisesRegex( - self.failureException, - r'key \(1\) from first not found in second'): + self.failureException, r'key \(1\) from first not found in second' + ): self.assertStructuredAlmostEqual(a, b) def test_assertStructuredAlmostEqual_nested(self): - a = {1.1: [1,2,3], 'a': 'hi', 3: {1:2, 3:4}} - b = {1.1: [1,2,3], 'a': 'hi', 3: {1:2, 3:4}} + a = {1.1: [1, 2, 3], 'a': 'hi', 3: {1: 2, 3: 4}} + b = {1.1: [1, 2, 3], 'a': 'hi', 3: {1: 2, 3: 4}} self.assertStructuredAlmostEqual(a, b) b[1.1][2] -= 1.999e-7 b[3][1] -= 9.999e-8 self.assertStructuredAlmostEqual(a, b) b[1.1][2] -= 1.999e-7 - with self.assertRaisesRegex(self.failureException, - '3 !~= 2.999'): + with self.assertRaisesRegex(self.failureException, '3 !~= 2.999'): self.assertStructuredAlmostEqual(a, b) def test_assertStructuredAlmostEqual_numericvalue(self): m = ConcreteModel() - m.v = Var(initialize=2.) - m.p = Param(initialize=2.) - a = {1.1: [1,m.p,3], 'a': 'hi', 3: {1:2, 3:4}} - b = {1.1: [1,m.v,3], 'a': 'hi', 3: {1:2, 3:4}} + m.v = Var(initialize=2.0) + m.p = Param(initialize=2.0) + a = {1.1: [1, m.p, 3], 'a': 'hi', 3: {1: 2, 3: 4}} + b = {1.1: [1, m.v, 3], 'a': 'hi', 3: {1: 2, 3: 4}} self.assertStructuredAlmostEqual(a, b) m.v.set_value(m.v.value - 1.999e-7) self.assertStructuredAlmostEqual(a, b) m.v.set_value(m.v.value - 1.999e-7) - with self.assertRaisesRegex(self.failureException, - '2.0 !~= 1.999'): + with self.assertRaisesRegex(self.failureException, '2.0 !~= 1.999'): self.assertStructuredAlmostEqual(a, b) def test_timeout_fcn_call(self): self.assertEqual(short_sleep(), 42) - with self.assertRaisesRegex( - TimeoutError, 'test timed out after 0.01 seconds'): + with self.assertRaisesRegex(TimeoutError, 'test timed out after 0.01 seconds'): long_sleep() with self.assertRaisesRegex( - NameError, - r"name 'foo' is not defined\s+Original traceback:"): + NameError, r"name 'foo' is not defined\s+Original traceback:" + ): raise_exception() with self.assertRaisesRegex(AssertionError, r"^0 != 1$"): fail() @@ -198,8 +199,7 @@ def test_timeout_skip(self): def test_timeout_skip_fails(self): try: - with self.assertRaisesRegex( - unittest.SkipTest, r"Skipping this test"): + with self.assertRaisesRegex(unittest.SkipTest, r"Skipping this test"): self.test_timeout_skip() TestPyomoUnittest.test_timeout_skip.skip = False with self.assertRaisesRegex(AssertionError, r"0 != 1"): @@ -220,8 +220,7 @@ def test_bound_function(self): with self.assertRaises((TypeError, EOFError, AttributeError)): self.bound_function() self.assertIn("platform that does not support 'fork'", LOG.getvalue()) - self.assertIn( - "one of its arguments is not serializable", LOG.getvalue()) + self.assertIn("one of its arguments is not serializable", LOG.getvalue()) @unittest.timeout(10, require_fork=True) def bound_function_require_fork(self): @@ -232,11 +231,10 @@ def test_bound_function_require_fork(self): self.bound_function_require_fork() return with self.assertRaisesRegex( - unittest.SkipTest, - "timeout requires unavailable fork interface"): + unittest.SkipTest, "timeout requires unavailable fork interface" + ): self.bound_function_require_fork() - if __name__ == '__main__': unittest.main() diff --git a/pyomo/common/timing.py b/pyomo/common/timing.py index 042bd1bd9e6..96360c61a1b 100644 --- a/pyomo/common/timing.py +++ b/pyomo/common/timing.py @@ -15,6 +15,19 @@ # Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, # the U.S. Government retains certain rights in this software. # ___________________________________________________________________________ + +"""A module of utilities for collecting timing information + +.. autosummary:: + + report_timing + TicTocTimer + tic + toc + HierarchicalTimer + +""" + import functools import logging import sys @@ -30,6 +43,7 @@ _construction_logger = logging.getLogger('pyomo.common.timing.construction') _transform_logger = logging.getLogger('pyomo.common.timing.transformation') + def report_timing(stream=True, level=logging.INFO): """Set reporting of Pyomo timing information. @@ -87,7 +101,7 @@ def __init__(self, obj): def report(self): # Record the elapsed time, as some log handlers may not - # immediately generate the messge string + # immediately generate the message string self.timer += default_timer() _construction_logger.info(self) @@ -132,11 +146,13 @@ def __str__(self): if total_time < 0: total_time += default_timer() return self.in_progress % (_type, self.name, total_time) - return self.msg % ( 2 if total_time >= 0.005 else 0, - total_time, - _type, - self.name, - idx_label ) + return self.msg % ( + 2 if total_time >= 0.005 else 0, + total_time, + _type, + self.name, + idx_label, + ) class TransformationTimer(object): @@ -167,16 +183,19 @@ def __str__(self): if total_time < 0: total_time += default_timer() return self.in_progress % (self.name, self.mode, total_time) - return self.msg % ( 2 if total_time>=0.005 else 0, - total_time, - self.name, - self.mode ) + return self.msg % ( + 2 if total_time >= 0.005 else 0, + total_time, + self.name, + self.mode, + ) + # # Setup the timer # # TODO: Remove this bit for Pyomo 6.0 - we won't care about older versions -if sys.version_info >= (3,3): +if sys.version_info >= (3, 3): # perf_counter is guaranteed to be monotonic and the most accurate timer default_timer = time.perf_counter elif sys.platform.startswith('win'): @@ -212,6 +231,7 @@ class TicTocTimer(object): logger (Logger): an optional output stream using the python logging package. Note: the timing logged using ``logger.info()`` """ + def __init__(self, ostream=_NotSpecified, logger=None): if ostream is _NotSpecified and logger is not None: ostream = None @@ -222,8 +242,14 @@ def __init__(self, ostream=_NotSpecified, logger=None): self._start_count = 0 self._cumul = 0 - def tic(self, msg=_NotSpecified, *args, - ostream=_NotSpecified, logger=_NotSpecified, level=_NotSpecified): + def tic( + self, + msg=_NotSpecified, + *args, + ostream=_NotSpecified, + logger=_NotSpecified, + level=_NotSpecified, + ): """Reset the tic/toc delta timer. This resets the reference time from which the next delta time is @@ -248,18 +274,31 @@ class was constructed). Note: timing logged using logger.info msg = "Resetting the tic/toc delta timer" if msg is not None: if args and '%' not in msg: + # Note: specify the parent module scope for the logger + # so this does not hit (and get handled by) the local + # pyomo.common.timing logger. deprecation_warning( "tic(): 'ostream' and 'logger' should be " - "specified as keyword arguments", version='6.4.2') + "specified as keyword arguments", + version='6.4.2', + logger=__package__, + ) ostream, *args = args if args: logger, *args = args - self.toc(msg, *args, delta=False, - ostream=ostream, logger=logger, level=level) - + self.toc( + msg, *args, delta=False, ostream=ostream, logger=logger, level=level + ) - def toc(self, msg=_NotSpecified, *args, delta=True, - ostream=_NotSpecified, logger=_NotSpecified, level=_NotSpecified): + def toc( + self, + msg=_NotSpecified, + *args, + delta=True, + ostream=_NotSpecified, + logger=_NotSpecified, + level=_NotSpecified, + ): """Print out the elapsed time. This resets the reference time from which the next delta time is @@ -285,12 +324,17 @@ class was constructed). Note: timing logged using `level` """ if msg is _NotSpecified: - msg = 'File "%s", line %s in %s' % \ - traceback.extract_stack(limit=2)[0][:3] + msg = 'File "%s", line %s in %s' % traceback.extract_stack(limit=2)[0][:3] if args and msg is not None and '%' not in msg: + # Note: specify the parent module scope for the logger + # so this does not hit (and get handled by) the local + # pyomo.common.timing logger. deprecation_warning( "toc(): 'delta', 'ostream', and 'logger' should be " - "specified as keyword arguments", version='6.4.2') + "specified as keyword arguments", + version='6.4.2', + logger=__package__, + ) delta, *args = args if args: ostream, *args = args @@ -346,8 +390,7 @@ class was constructed). Note: timing logged using `level` def stop(self): delta, self._lastTime = self._lastTime, None if delta is None: - raise RuntimeError( - "Stopping a TicTocTimer that was already stopped") + raise RuntimeError("Stopping a TicTocTimer that was already stopped") delta = default_timer() - delta self._cumul += delta return delta @@ -381,6 +424,60 @@ def __exit__(self, et, ev, tb): """ +def _move_grandchildren_to_root(root, child): + """A helper function to assist with flattening of HierarchicalTimer + objects + + Parameters + ---------- + root: HierarchicalTimer or _HierarchicalHelper + The root node. Children of `child` will become children of + this node + + child: _HierarchicalHelper + The child node that will be turned into a leaf by moving + its children to the root + + """ + for gchild_key, gchild_timer in child.timers.items(): + # For each grandchild, if this key corresponds to a child, + # combine the information from these timers. Otherwise, + # add the new timer as a child of the root. + if gchild_key in root.timers: + gchild_total_time = gchild_timer.total_time + gchild_n_calls = gchild_timer.n_calls + root.timers[gchild_key].total_time += gchild_total_time + root.timers[gchild_key].n_calls += gchild_n_calls + else: + root.timers[gchild_key] = gchild_timer + + # Subtract the grandchild's total time from the child (which + # will no longer be a parent of the grandchild) + child.total_time -= gchild_timer.total_time + + # Clear the child timer's dict to make it a leaf node + child.timers.clear() + + +def _clear_timers_except(timer, to_retain): + """A helper function for removing keys, except for those specified, + from the dictionary of timers + + Parameters + ---------- + timer: HierarchicalTimer or _HierarchicalHelper + The timer whose dict of "sub-timers" will be pruned + + to_retain: set + Set of keys of the "sub-timers" to retain + + """ + keys = list(timer.timers.keys()) + for key in keys: + if key not in to_retain: + timer.timers.pop(key) + + class _HierarchicalHelper(object): def __init__(self): self.tic_toc = TicTocTimer() @@ -409,16 +506,20 @@ def to_str(self, indent, stage_identifier_lengths): else: _percent = float('nan') s += indent - s += ( name_formatter + '{ncalls:>9d} {cumtime:>9.3f} ' - '{percall:>9.3f} {percent:>6.1f}\n' ).format( - name=name, - ncalls=timer.n_calls, - cumtime=timer.total_time, - percall=timer.total_time/timer.n_calls, - percent=_percent ) + s += ( + name_formatter + '{ncalls:>9d} {cumtime:>9.3f} ' + '{percall:>9.3f} {percent:>6.1f}\n' + ).format( + name=name, + ncalls=timer.n_calls, + cumtime=timer.total_time, + percall=timer.total_time / timer.n_calls, + percent=_percent, + ) s += timer.to_str( - indent=indent + ' '*stage_identifier_lengths[0], - stage_identifier_lengths=sub_stage_identifier_lengths) + indent=indent + ' ' * stage_identifier_lengths[0], + stage_identifier_lengths=sub_stage_identifier_lengths, + ) other_time -= timer.total_time if self.total_time > 0: @@ -426,13 +527,16 @@ def to_str(self, indent, stage_identifier_lengths): else: _percent = float('nan') s += indent - s += ( name_formatter + '{ncalls:>9} {cumtime:>9.3f} ' - '{percall:>9} {percent:>6.1f}\n' ).format( - name='other', - ncalls='n/a', - cumtime=other_time, - percall='n/a', - percent=_percent ) + s += ( + name_formatter + '{ncalls:>9} {cumtime:>9.3f} ' + '{percall:>9} {percent:>6.1f}\n' + ).format( + name='other', + ncalls='n/a', + cumtime=other_time, + percall='n/a', + percent=_percent, + ) s += underline.replace('-', '=') return s @@ -442,9 +546,32 @@ def get_timers(self, res, prefix): res.append(_name) timer.get_timers(res, _name) + def flatten(self): + # Get keys and values so we don't modify dict while iterating it. + items = list(self.timers.items()) + for child_key, child_timer in items: + # Flatten the child timer. Now all grandchildren are leaf nodes + child_timer.flatten() + # Flatten by removing grandchildren and adding them as children + # of the root. + _move_grandchildren_to_root(self, child_timer) + + def clear_except(self, *args): + to_retain = set(args) + _clear_timers_except(self, to_retain) + class HierarchicalTimer(object): - """A class for hierarchical timing. + """A class for collecting and displaying hierarchical timing + information + + When implementing an iterative algorithm with nested subroutines + (e.g. an optimization solver), we often want to know the cumulative + time spent in each subroutine as well as this time as a proportion + of time spent in the calling routine. This class collects timing + information, for user-specified keys, that accumulates over the life + of the timer object and preserves the hierarchical (nested) structure + of timing categories. Examples -------- @@ -513,11 +640,106 @@ class HierarchicalTimer(object): # doctest: +SKIP aa % total: 35.976058 + When implementing an algorithm, it is often useful to collect detailed + hierarchical timing information. However, when communicating a timing + profile, it is often best to retain only the most relevant information + in a flattened data structure. In the following example, suppose we + want to compare the time spent in the ``"c"`` and ``"f"`` subroutines. + We would like to generate a timing profile that displays only the time + spent in these two subroutines, in a flattened structure so that they + are easy to compare. To do this, we + + #. Ignore subroutines of ``"c"`` and ``"f"`` that are unnecessary for\ + this comparison + + #. Flatten the hierarchical timing information + + #. Eliminate all the information we don't care about + + >>> import time + >>> from pyomo.common.timing import HierarchicalTimer + >>> timer = HierarchicalTimer() + >>> timer.start("root") + >>> timer.start("a") + >>> time.sleep(0.01) + >>> timer.start("b") + >>> timer.start("c") + >>> time.sleep(0.1) + >>> timer.stop("c") + >>> timer.stop("b") + >>> timer.stop("a") + >>> timer.start("d") + >>> timer.start("e") + >>> time.sleep(0.01) + >>> timer.start("f") + >>> time.sleep(0.05) + >>> timer.stop("f") + >>> timer.start("c") + >>> timer.start("g") + >>> timer.start("h") + >>> time.sleep(0.1) + >>> timer.stop("h") + >>> timer.stop("g") + >>> timer.stop("c") + >>> timer.stop("e") + >>> timer.stop("d") + >>> timer.stop("root") + >>> print(timer) # doctest: +SKIP + Identifier ncalls cumtime percall % + ------------------------------------------------------------------ + root 1 0.290 0.290 100.0 + ------------------------------------------------------------- + a 1 0.118 0.118 40.5 + -------------------------------------------------------- + b 1 0.105 0.105 89.4 + --------------------------------------------------- + c 1 0.105 0.105 100.0 + other n/a 0.000 n/a 0.0 + =================================================== + other n/a 0.013 n/a 10.6 + ======================================================== + d 1 0.173 0.173 59.5 + -------------------------------------------------------- + e 1 0.173 0.173 100.0 + --------------------------------------------------- + c 1 0.105 0.105 60.9 + ---------------------------------------------- + g 1 0.105 0.105 100.0 + ----------------------------------------- + h 1 0.105 0.105 100.0 + other n/a 0.000 n/a 0.0 + ========================================= + other n/a 0.000 n/a 0.0 + ============================================== + f 1 0.055 0.055 31.9 + other n/a 0.013 n/a 7.3 + =================================================== + other n/a 0.000 n/a 0.0 + ======================================================== + other n/a 0.000 n/a 0.0 + ============================================================= + ================================================================== + >>> # Clear subroutines under "c" that we don't care about + >>> timer.timers["root"].timers["d"].timers["e"].timers["c"].timers.clear() + >>> # Flatten hierarchy + >>> timer.timers["root"].flatten() + >>> # Clear except for the subroutines we care about + >>> timer.timers["root"].clear_except("c", "f") + >>> print(timer) # doctest: +SKIP + Identifier ncalls cumtime percall % + ---------------------------------------------- + root 1 0.290 0.290 100.0 + ----------------------------------------- + c 2 0.210 0.105 72.4 + f 1 0.055 0.055 19.0 + other n/a 0.025 n/a 8.7 + ========================================= + ============================================== Notes ----- - The :py:class:`HierarchicalTimer` use a stack to track which timers + The :py:class:`HierarchicalTimer` uses a stack to track which timers are active at any point in time. Additionally, each timer has a dictionary of timers for its children timers. Consider @@ -539,6 +761,7 @@ class HierarchicalTimer(object): code is not). """ + def __init__(self): self.stack = list() self.timers = dict() @@ -569,7 +792,9 @@ def _get_timer(self, identifier, should_exist=False): if should_exist: raise RuntimeError( 'Could not find timer {0}'.format( - '.'.join(self.stack + [identifier]))) + '.'.join(self.stack + [identifier]) + ) + ) parent.timers[identifier] = _HierarchicalHelper() return parent.timers[identifier] @@ -597,7 +822,8 @@ def stop(self, identifier): raise ValueError( str(identifier) + ' is not the currently active timer. ' 'The only timer that can currently be stopped is ' - + '.'.join(self.stack)) + + '.'.join(self.stack) + ) self.stack.pop() timer = self._get_timer(identifier, should_exist=True) timer.stop() @@ -627,36 +853,41 @@ def __str__(self): # switch to a constant indentation of const_indent spaces # (to hopefully shorten the line lengths name_field_width = max( - const_indent*i + l - for i, l in enumerate(stage_identifier_lengths) + const_indent * i + l for i, l in enumerate(stage_identifier_lengths) ) for i in range(len(stage_identifier_lengths) - 1): stage_identifier_lengths[i] = const_indent - stage_identifier_lengths[-1] = ( - name_field_width - - const_indent*(len(stage_identifier_lengths) - 1) ) + stage_identifier_lengths[-1] = name_field_width - const_indent * ( + len(stage_identifier_lengths) - 1 + ) name_formatter = '{name:<' + str(name_field_width) + '}' - s = ( name_formatter + '{ncalls:>9} {cumtime:>9} ' - '{percall:>9} {percent:>6}\n').format( - name='Identifier', - ncalls='ncalls', - cumtime='cumtime', - percall='percall', - percent='%') + s = ( + name_formatter + '{ncalls:>9} {cumtime:>9} {percall:>9} {percent:>6}\n' + ).format( + name='Identifier', + ncalls='ncalls', + cumtime='cumtime', + percall='percall', + percent='%', + ) underline = '-' * (name_field_width + 36) + '\n' s += underline sub_stage_identifier_lengths = stage_identifier_lengths[1:] for name, timer in sorted(self.timers.items()): - s += ( name_formatter + '{ncalls:>9d} {cumtime:>9.3f} ' - '{percall:>9.3f} {percent:>6.1f}\n').format( - name=name, - ncalls=timer.n_calls, - cumtime=timer.total_time, - percall=timer.total_time/timer.n_calls, - percent=self.get_total_percent_time(name)) + s += ( + name_formatter + '{ncalls:>9d} {cumtime:>9.3f} ' + '{percall:>9.3f} {percent:>6.1f}\n' + ).format( + name=name, + ncalls=timer.n_calls, + cumtime=timer.total_time, + percall=timer.total_time / timer.n_calls, + percent=self.get_total_percent_time(name), + ) s += timer.to_str( - indent=' '*stage_identifier_lengths[0], - stage_identifier_lengths=sub_stage_identifier_lengths) + indent=' ' * stage_identifier_lengths[0], + stage_identifier_lengths=sub_stage_identifier_lengths, + ) s += underline.replace('-', '=') return s @@ -712,7 +943,7 @@ def get_num_calls(self, identifier): Returns ------- - num_calss: int + n_calls: int The number of times start was called for the specified timer. """ stack = identifier.split('.') @@ -780,3 +1011,43 @@ def get_timers(self): res.append(name) timer.get_timers(res, name) return res + + def flatten(self): + """Flatten the HierarchicalTimer in-place, moving all the timing + categories into a single level + + If any timers moved into the same level have the same identifier, + the ``total_time`` and ``n_calls`` fields are added together. + The ``total_time`` of a "child timer" that is "moved upwards" is + subtracted from the ``total_time`` of that timer's original + parent. + + """ + if self.stack: + raise RuntimeError( + "Cannot flatten a HierarchicalTimer while any timers are" + " active. Current active timer is %s. flatten should only" + " be called as a post-processing step." % self.stack[-1] + ) + items = list(self.timers.items()) + for key, timer in items: + timer.flatten() + _move_grandchildren_to_root(self, timer) + + def clear_except(self, *args): + """Prune all "sub-timers" except those specified + + Parameters + ---------- + args: str + Keys that will be retained + + """ + if self.stack: + raise RuntimeError( + "Cannot clear a HierarchicalTimer while any timers are" + " active. Current active timer is %s. clear_except should" + " only be called as a post-processing step." % self.stack[-1] + ) + to_retain = set(args) + _clear_timers_except(self, to_retain) diff --git a/pyomo/common/unittest.py b/pyomo/common/unittest.py index e90f248eb04..5ab79d668f3 100644 --- a/pyomo/common/unittest.py +++ b/pyomo/common/unittest.py @@ -18,6 +18,8 @@ import enum import logging +import math +import re import sys from io import StringIO @@ -33,9 +35,11 @@ from unittest import mock + def _defaultFormatter(msg, default): return msg or default + def _floatOrCall(val): """Cast the value to float, if that fails call it and then cast. @@ -49,15 +53,31 @@ def _floatOrCall(val): try: return float(val) except TypeError: + pass + try: return float(val()) - -def assertStructuredAlmostEqual(first, second, - places=None, msg=None, delta=None, - reltol=None, abstol=None, - allow_second_superset=False, - item_callback=_floatOrCall, - exception=ValueError, - formatter=_defaultFormatter): + except TypeError: + pass + try: + return val.value + except AttributeError: + # likely a complex + return val + + +def assertStructuredAlmostEqual( + first, + second, + places=None, + msg=None, + delta=None, + reltol=None, + abstol=None, + allow_second_superset=False, + item_callback=_floatOrCall, + exception=ValueError, + formatter=_defaultFormatter, +): """Test that first and second are equal up to a tolerance This compares first and second using both an absolute (`abstol`) and @@ -86,7 +106,7 @@ def assertStructuredAlmostEqual(first, second, `abs(first - second) / max(abs(first), abs(second))`, only when first != second (thereby avoiding divide-by-zero errors). - Items (entries other than Sequence / Mapping containters, matching + Items (entries other than Sequence / Mapping containers, matching strings, and items that satisfy `first is second`) are passed to the `item_callback` before testing equality and relative tolerances. @@ -131,11 +151,10 @@ def assertStructuredAlmostEqual(first, second, """ if sum(1 for _ in (places, delta, abstol) if _ is not None) > 1: - raise ValueError("Cannot specify more than one of " - "{places, delta, abstol}") + raise ValueError("Cannot specify more than one of {places, delta, abstol}") if places is not None: - abstol = 10**(-places) + abstol = 10 ** (-places) if delta is not None: abstol = delta if abstol is None and reltol is None: @@ -144,28 +163,36 @@ def assertStructuredAlmostEqual(first, second, fail = None try: _assertStructuredAlmostEqual( - first, second, abstol, reltol, not allow_second_superset, - item_callback, exception) + first, + second, + abstol, + reltol, + not allow_second_superset, + item_callback, + exception, + ) except exception as e: fail = formatter( msg, "%s\n Found when comparing with tolerance " "(abs=%s, rel=%s):\n" - " first=%s\n second=%s" % ( + " first=%s\n second=%s" + % ( str(e), abstol, reltol, _unittest.case.safe_repr(first), _unittest.case.safe_repr(second), - )) + ), + ) if fail: raise exception(fail) -def _assertStructuredAlmostEqual(first, second, - abstol, reltol, exact, - item_callback, exception): +def _assertStructuredAlmostEqual( + first, second, abstol, reltol, exact, item_callback, exception +): """Recursive implementation of assertStructuredAlmostEqual""" args = (first, second) @@ -173,46 +200,49 @@ def _assertStructuredAlmostEqual(first, second, if all(isinstance(_, Mapping) for _ in args): if exact and len(first) != len(second): raise exception( - "mappings are different sizes (%s != %s)" % ( - len(first), - len(second), - )) + "mappings are different sizes (%s != %s)" % (len(first), len(second)) + ) for key in first: if key not in second: raise exception( - "key (%s) from first not found in second" % ( - _unittest.case.safe_repr(key), - )) + "key (%s) from first not found in second" + % (_unittest.case.safe_repr(key),) + ) try: _assertStructuredAlmostEqual( - first[key], second[key], abstol, reltol, exact, - item_callback, exception) + first[key], + second[key], + abstol, + reltol, + exact, + item_callback, + exception, + ) except exception as e: raise exception( - "%s\n Found when comparing key %s" % ( - str(e), _unittest.case.safe_repr(key))) - return # PASS! + "%s\n Found when comparing key %s" + % (str(e), _unittest.case.safe_repr(key)) + ) + return # PASS! elif any(isinstance(_, str) for _ in args): if first == second: - return # PASS! + return # PASS! elif all(isinstance(_, Sequence) for _ in args): # Note that Sequence includes strings if exact and len(first) != len(second): raise exception( - "sequences are different sizes (%s != %s)" % ( - len(first), - len(second), - )) + "sequences are different sizes (%s != %s)" % (len(first), len(second)) + ) for i, (f, s) in enumerate(zip(first, second)): try: _assertStructuredAlmostEqual( - f, s, abstol, reltol, exact, item_callback, exception) + f, s, abstol, reltol, exact, item_callback, exception + ) except exception as e: - raise exception( - "%s\n Found at position %s" % (str(e), i)) - return # PASS! + raise exception("%s\n Found at position %s" % (str(e), i)) + return # PASS! else: # Catch things like None, which may cause problems for the @@ -223,7 +253,7 @@ def _assertStructuredAlmostEqual(first, second, # the values to be comparable. try: if first is second or first == second: - return # PASS! + return # PASS! except: pass try: @@ -233,9 +263,11 @@ def _assertStructuredAlmostEqual(first, second, return diff = abs(f - s) if abstol is not None and diff <= abstol: - return # PASS! + return # PASS! if reltol is not None and diff / max(abs(f), abs(s)) <= reltol: - return # PASS! + return # PASS! + if math.isnan(f) and math.isnan(s): + return # PASS! (we will treat NaN as equal) except: pass @@ -251,6 +283,7 @@ def _assertStructuredAlmostEqual(first, second, ) raise exception(msg) + def _runner(q, qualname): "Utility wrapper for running functions, used by timeout()" resultType = _RunnerResult.call @@ -259,11 +292,13 @@ def _runner(q, qualname): elif isinstance(qualname, str): # Use unittest to instantiate the TestCase and run it resultType = _RunnerResult.unittest + def fcn(): s = _unittest.TestLoader().loadTestsFromName(qualname) r = _unittest.TestResult() s.run(r) return r.errors + r.failures, r.skipped + args = () kwargs = {} else: @@ -276,18 +311,22 @@ def fcn(): q.put((resultType, result, OUT.getvalue())) except: import traceback + etype, e, tb = sys.exc_info() if not isinstance(e, AssertionError): - e = etype("%s\nOriginal traceback:\n%s" % ( - e, ''.join(traceback.format_tb(tb)))) + e = etype( + "%s\nOriginal traceback:\n%s" % (e, ''.join(traceback.format_tb(tb))) + ) q.put((_RunnerResult.exception, e, OUT.getvalue())) finally: _runner.data.pop(qualname) + # Data structure for passing functions/arguments to the _runner # without forcing them to be pickled / unpickled _runner.data = {} + class _RunnerResult(enum.Enum): exception = 0 call = 1 @@ -344,6 +383,7 @@ def timeout(seconds, require_fork=False, timeout_raises=TimeoutError): import functools import multiprocessing import queue + def timeout_decorator(fcn): @functools.wraps(fcn) def test_timer(*args, **kwargs): @@ -351,8 +391,7 @@ def test_timer(*args, **kwargs): if qualname in _runner.data: return fcn(*args, **kwargs) if require_fork and multiprocessing.get_start_method() != 'fork': - raise _unittest.SkipTest( - "timeout requires unavailable fork interface") + raise _unittest.SkipTest("timeout requires unavailable fork interface") q = multiprocessing.Queue() if multiprocessing.get_start_method() == 'fork': @@ -361,8 +400,11 @@ def test_timer(*args, **kwargs): # wrapped function operates in the same environment. _runner.data[q] = (fcn, args, kwargs) runner_args = (q, qualname) - elif (args and fcn.__name__.startswith('test') - and _unittest.case.TestCase in args[0].__class__.__mro__): + elif ( + args + and fcn.__name__.startswith('test') + and _unittest.case.TestCase in args[0].__class__.__mro__ + ): # Option 2: this is wrapping a unittest. Re-run # unittest in the child process with this function as # the sole target. This ensures that things like setUp @@ -375,8 +417,7 @@ def test_timer(*args, **kwargs): # environment configuration that it does not set up # itself. runner_args = (q, (qualname, test_timer, args, kwargs)) - test_proc = multiprocessing.Process( - target=_runner, args=runner_args) + test_proc = multiprocessing.Process(target=_runner, args=runner_args) test_proc.daemon = True try: test_proc.start() @@ -386,14 +427,16 @@ def test_timer(*args, **kwargs): "Exception raised spawning timeout subprocess " "on a platform that does not support 'fork'. " "It is likely that either the wrapped function or " - "one of its arguments is not serializable") + "one of its arguments is not serializable" + ) raise try: resultType, result, stdout = q.get(True, seconds) except queue.Empty: test_proc.terminate() raise timeout_raises( - "test timed out after %s seconds" % (seconds,)) from None + "test timed out after %s seconds" % (seconds,) + ) from None finally: _runner.data.pop(q, None) sys.stdout.write(stdout) @@ -409,10 +452,30 @@ def test_timer(*args, **kwargs): args[0].skipTest(msg) else: raise result + return test_timer + return timeout_decorator +class _AssertRaisesContext_NormalizeWhitespace(_unittest.case._AssertRaisesContext): + def __exit__(self, exc_type, exc_value, tb): + try: + _save_re = self.expected_regex + self.expected_regex = None + if not super().__exit__(exc_type, exc_value, tb): + return False + finally: + self.expected_regex = _save_re + + exc_value = re.sub(r'(?s)\s+', ' ', str(exc_value)) + if not _save_re.search(exc_value): + self._raiseFailure( + '"{}" does not match "{}"'.format(_save_re.pattern, exc_value) + ) + return True + + class TestCase(_unittest.TestCase): """A Pyomo-specific class whose instances are single test cases. @@ -424,13 +487,21 @@ class TestCase(_unittest.TestCase): unittest.TestCase documentation ------------------------------- """ + __doc__ += _unittest.TestCase.__doc__ - def assertStructuredAlmostEqual(self, first, second, - places=None, msg=None, delta=None, - reltol=None, abstol=None, - allow_second_superset=False, - item_callback=_floatOrCall): + def assertStructuredAlmostEqual( + self, + first, + second, + places=None, + msg=None, + delta=None, + reltol=None, + abstol=None, + allow_second_superset=False, + item_callback=_floatOrCall, + ): assertStructuredAlmostEqual( first=first, second=second, @@ -444,3 +515,34 @@ def assertStructuredAlmostEqual(self, first, second, exception=self.failureException, formatter=self._formatMessage, ) + + def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs): + """Asserts that the message in a raised exception matches a regex. + + This is a light weight wrapper around + :py:meth:`unittest.TestCase.assertRaisesRegex` that adds + handling of a `normalize_whitespace` keyword argument that + normalizes all consecutive whitespace in the exception message + to a single space before checking the regular expression. + + Args: + expected_exception: Exception class expected to be raised. + expected_regex: Regex (re.Pattern object or string) expected + to be found in error message. + args: Function to be called and extra positional args. + kwargs: Extra kwargs. + msg: Optional message used in case of failure. Can only be used + when assertRaisesRegex is used as a context manager. + normalize_whitespace: Optional bool that, if True, collapses + consecutive whitespace (including newlines) into a + single space before checking against the regular + expression + + """ + normalize_whitespace = kwargs.pop('normalize_whitespace', False) + if normalize_whitespace: + contextClass = _AssertRaisesContext_NormalizeWhitespace + else: + contextClass = _unittest.case._AssertRaisesContext + context = contextClass(expected_exception, self, expected_regex) + return context.handle('assertRaisesRegex', args, kwargs) diff --git a/pyomo/contrib/ampl_function_demo/build.py b/pyomo/contrib/ampl_function_demo/build.py index fc82429b4e2..cd35064ea4e 100644 --- a/pyomo/contrib/ampl_function_demo/build.py +++ b/pyomo/contrib/ampl_function_demo/build.py @@ -12,6 +12,7 @@ import sys from pyomo.common.cmake_builder import build_cmake_project + def build_ampl_function_demo(user_args=[], parallel=None): return build_cmake_project( targets=['src'], @@ -21,9 +22,11 @@ def build_ampl_function_demo(user_args=[], parallel=None): parallel=parallel, ) + class AMPLFunctionDemoBuilder(object): def __call__(self, parallel): return build_ampl_function_demo(parallel=parallel) + if __name__ == "__main__": build_ampl_function_demo(sys.argv[1:]) diff --git a/pyomo/contrib/ampl_function_demo/plugins.py b/pyomo/contrib/ampl_function_demo/plugins.py index 47719c9883f..230d9c4b667 100644 --- a/pyomo/contrib/ampl_function_demo/plugins.py +++ b/pyomo/contrib/ampl_function_demo/plugins.py @@ -12,5 +12,6 @@ from pyomo.common.extensions import ExtensionBuilderFactory from pyomo.contrib.ampl_function_demo.build import AMPLFunctionDemoBuilder + def load(): ExtensionBuilderFactory.register('ampl_function_demo')(AMPLFunctionDemoBuilder) diff --git a/pyomo/contrib/ampl_function_demo/src/functions.c b/pyomo/contrib/ampl_function_demo/src/functions.c index 1be8c0cc8dd..f62148c995a 100644 --- a/pyomo/contrib/ampl_function_demo/src/functions.c +++ b/pyomo/contrib/ampl_function_demo/src/functions.c @@ -27,7 +27,7 @@ * * The module defines two functions: * - a demo function demonstrating the use of string arguments and - * vairable-length argument lists + * variable-length argument lists * - a "safe" cube root function that avoids infinite derivatives at 0 * * The functaions are registered with the ASL using the funcadd() diff --git a/pyomo/contrib/ampl_function_demo/tests/test_ampl_function_demo.py b/pyomo/contrib/ampl_function_demo/tests/test_ampl_function_demo.py index 95b60a4c6ee..af52c2def9f 100644 --- a/pyomo/contrib/ampl_function_demo/tests/test_ampl_function_demo.py +++ b/pyomo/contrib/ampl_function_demo/tests/test_ampl_function_demo.py @@ -14,10 +14,12 @@ import pyomo.common.unittest as unittest from pyomo.common.fileutils import find_library from pyomo.opt import check_available_solvers +from pyomo.core.base.external import nan flib = find_library("asl_external_demo") is_pypy = platform.python_implementation().lower().startswith('pypy') + @unittest.skipUnless(flib, 'Could not find the "asl_external_demo.so" library') class TestAMPLExternalFunction(unittest.TestCase): @unittest.skipIf(is_pypy, 'Cannot evaluate external functions under pypy') @@ -29,13 +31,29 @@ def test_eval_function(self): m.cbrt = pyo.ExternalFunction(library=flib, function="safe_cbrt") self.assertAlmostEqual(m.cbrt(6)(), 1.81712059, 4) self.assertStructuredAlmostEqual( - m.cbrt.evaluate_fgh([0]), - (0, [100951], [-1.121679e13]), - reltol=1e-5 + m.cbrt.evaluate_fgh([0]), (0, [100951], [-1.121679e13]), reltol=1e-5 + ) + + @unittest.skipIf(is_pypy, 'Cannot evaluate external functions under pypy') + def test_eval_function_fgh(self): + m = pyo.ConcreteModel() + m.tf = pyo.ExternalFunction(library=flib, function="demo_function") + + f, g, h = m.tf.evaluate_fgh(("sum", 1, 2, 3)) + self.assertEqual(f, 6) + self.assertEqual(g, [nan, 1, 1, 1]) + self.assertEqual(h, [nan, nan, 0, nan, 0, 0, nan, 0, 0, 0]) + + f, g, h = m.tf.evaluate_fgh(("inv", 1, 2, 3)) + self.assertAlmostEqual(f, 1.8333333, 4) + self.assertStructuredAlmostEqual(g, [nan, -1, -1 / 4, -1 / 9]) + self.assertStructuredAlmostEqual( + h, [nan, nan, 2, nan, 0, 1 / 4, nan, 0, 0, 2 / 27] ) - @unittest.skipUnless(check_available_solvers('ipopt'), - "The 'ipopt' solver is not available") + @unittest.skipUnless( + check_available_solvers('ipopt'), "The 'ipopt' solver is not available" + ) def test_solve_function(self): m = pyo.ConcreteModel() m.sum_it = pyo.ExternalFunction(library=flib, function="demo_function") @@ -45,7 +63,7 @@ def test_solve_function(self): m.y.fix() # Note: this also tests passing constant expressions to external # functions in the NL writer - m.c = pyo.Constraint(expr=1.5 == m.sum_it("inv", 3, m.x, 1/(m.y+1))) + m.c = pyo.Constraint(expr=1.5 == m.sum_it("inv", 3, m.x, 1 / (m.y + 1))) m.o = pyo.Objective(expr=m.cbrt(m.x)) solver = pyo.SolverFactory("ipopt") solver.solve(m, tee=True) diff --git a/pyomo/contrib/appsi/base.py b/pyomo/contrib/appsi/base.py index c0cf6b5b1e1..e2a694cfbd6 100644 --- a/pyomo/contrib/appsi/base.py +++ b/pyomo/contrib/appsi/base.py @@ -1,6 +1,15 @@ import abc import enum -from typing import Sequence, Dict, Optional, Mapping, NoReturn, List, Tuple +from typing import ( + Sequence, + Dict, + Optional, + Mapping, + NoReturn, + List, + Tuple, + MutableMapping, +) from pyomo.core.base.constraint import _GeneralConstraintData, Constraint from pyomo.core.base.sos import _SOSConstraintData, SOSConstraint from pyomo.core.base.var import _GeneralVarData, Var @@ -17,8 +26,14 @@ from pyomo.common.factory import Factory import os from pyomo.opt.results.results_ import SolverResults as LegacySolverResults -from pyomo.opt.results.solution import Solution as LegacySolution, SolutionStatus as LegacySolutionStatus -from pyomo.opt.results.solver import TerminationCondition as LegacyTerminationCondition, SolverStatus as LegacySolverStatus +from pyomo.opt.results.solution import ( + Solution as LegacySolution, + SolutionStatus as LegacySolutionStatus, +) +from pyomo.opt.results.solver import ( + TerminationCondition as LegacyTerminationCondition, + SolverStatus as LegacySolverStatus, +) from pyomo.core.kernel.objective import minimize from pyomo.core.base import SymbolMap import weakref @@ -31,6 +46,7 @@ class TerminationCondition(enum.Enum): """ An enumeration for checking the termination condition of solvers """ + unknown = 0 """unknown serves as both a default value, and it is used when no other enum member makes sense""" @@ -87,17 +103,22 @@ class SolverConfig(ConfigDict): If True, then some timing information will be printed at the end of the solve. """ - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): - super(SolverConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(SolverConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) self.declare('time_limit', ConfigValue(domain=NonNegativeFloat)) self.declare('stream_solver', ConfigValue(domain=bool)) @@ -122,17 +143,22 @@ class MIPSolverConfig(SolverConfig): If True, all integer variables will be relaxed to continuous variables before solving """ - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): - super(MIPSolverConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(MIPSolverConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) self.declare('mip_gap', ConfigValue(domain=NonNegativeFloat)) self.declare('relax_integrality', ConfigValue(domain=bool)) @@ -142,7 +168,9 @@ def __init__(self, class SolutionLoaderBase(abc.ABC): - def load_vars(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> NoReturn: + def load_vars( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> NoReturn: """ Load the solution of the primal variables into the value attribute of the variables. @@ -157,15 +185,17 @@ def load_vars(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> StaleFlagManager.mark_all_as_stale(delayed=True) @abc.abstractmethod - def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_primals( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: """ Returns a ComponentMap mapping variable to var value. Parameters ---------- vars_to_load: list - A list of the variables whose solution value should be retreived. If vars_to_load is None, - then the values for all variables will be retreived. + A list of the variables whose solution value should be retrieved. If vars_to_load is None, + then the values for all variables will be retrieved. Returns ------- @@ -174,15 +204,17 @@ def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) """ pass - def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_duals( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: """ Returns a dictionary mapping constraint to dual value. Parameters ---------- cons_to_load: list - A list of the constraints whose duals should be retreived. If cons_to_load is None, then the duals for all - constraints will be retreived. + A list of the constraints whose duals should be retrieved. If cons_to_load is None, then the duals for all + constraints will be retrieved. Returns ------- @@ -191,7 +223,9 @@ def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = N """ raise NotImplementedError(f'{type(self)} does not support the get_duals method') - def get_slacks(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_slacks( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: """ Returns a dictionary mapping constraint to slack. @@ -206,16 +240,20 @@ def get_slacks(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = slacks: dict Maps constraints to slacks """ - raise NotImplementedError(f'{type(self)} does not support the get_slacks method') + raise NotImplementedError( + f'{type(self)} does not support the get_slacks method' + ) - def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_reduced_costs( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: """ Returns a ComponentMap mapping variable to reduced cost. Parameters ---------- vars_to_load: list - A list of the variables whose reduced cost should be retreived. If vars_to_load is None, then the + A list of the variables whose reduced cost should be retrieved. If vars_to_load is None, then the reduced costs for all variables will be loaded. Returns @@ -223,11 +261,19 @@ def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = reduced_costs: ComponentMap Maps variables to reduced costs """ - raise NotImplementedError(f'{type(self)} does not support the get_reduced_costs method') + raise NotImplementedError( + f'{type(self)} does not support the get_reduced_costs method' + ) class SolutionLoader(SolutionLoaderBase): - def __init__(self, primals, duals, slacks, reduced_costs): + def __init__( + self, + primals: Optional[MutableMapping], + duals: Optional[MutableMapping], + slacks: Optional[MutableMapping], + reduced_costs: Optional[MutableMapping], + ): """ Parameters ---------- @@ -245,7 +291,14 @@ def __init__(self, primals, duals, slacks, reduced_costs): self._slacks = slacks self._reduced_costs = reduced_costs - def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_primals( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if self._primals is None: + raise RuntimeError( + 'Solution loader does not currently have a valid solution. Please ' + 'check the termination condition.' + ) if vars_to_load is None: return ComponentMap(self._primals.values()) else: @@ -254,7 +307,15 @@ def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) primals[v] = self._primals[id(v)][1] return primals - def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_duals( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: + if self._duals is None: + raise RuntimeError( + 'Solution loader does not currently have valid duals. Please ' + 'check the termination condition and ensure the solver returns duals ' + 'for the given problem type.' + ) if cons_to_load is None: duals = dict(self._duals) else: @@ -263,7 +324,15 @@ def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = N duals[c] = self._duals[c] return duals - def get_slacks(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_slacks( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: + if self._slacks is None: + raise RuntimeError( + 'Solution loader does not currently have valid slacks. Please ' + 'check the termination condition and ensure the solver returns slacks ' + 'for the given problem type.' + ) if cons_to_load is None: slacks = dict(self._slacks) else: @@ -272,7 +341,15 @@ def get_slacks(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = slacks[c] = self._slacks[c] return slacks - def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_reduced_costs( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if self._reduced_costs is None: + raise RuntimeError( + 'Solution loader does not currently have valid reduced costs. Please ' + 'check the termination condition and ensure the solver returns reduced ' + 'costs for the given problem type.' + ) if vars_to_load is None: rc = ComponentMap(self._reduced_costs.values()) else: @@ -322,17 +399,20 @@ class Results(object): ... else: #doctest:+SKIP ... print('The following termination condition was encountered: ', results.termination_condition) #doctest:+SKIP """ + def __init__(self): - self.solution_loader: Optional[SolutionLoaderBase] = None + self.solution_loader: SolutionLoaderBase = SolutionLoader( + None, None, None, None + ) self.termination_condition: TerminationCondition = TerminationCondition.unknown self.best_feasible_objective: Optional[float] = None self.best_objective_bound: Optional[float] = None def __str__(self): s = '' - s += 'termination_condition: ' + str(self.termination_condition) + '\n' + s += 'termination_condition: ' + str(self.termination_condition) + '\n' s += 'best_feasible_objective: ' + str(self.best_feasible_objective) + '\n' - s += 'best_objective_bound: ' + str(self.best_objective_bound) + s += 'best_objective_bound: ' + str(self.best_objective_bound) return s @@ -348,94 +428,149 @@ class UpdateConfig(ConfigDict): update_params: bool update_named_expressions: bool """ - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): if doc is None: doc = 'Configuration options to detect changes in model between solves' - super(UpdateConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) - - self.declare('check_for_new_or_removed_constraints', - ConfigValue(domain=bool, - default=True, - doc="""If False, new/old constraints will not be automatically detected on subsequent - solves. Use False only when manually updating the solver with opt.add_constraints() - and opt.remove_constraints() or when you are certain constraints are not being - added to/removed from the model.""")) - self.declare('check_for_new_or_removed_vars', - ConfigValue(domain=bool, - default=True, - doc="""If False, new/old variables will not be automatically detected on subsequent - solves. Use False only when manually updating the solver with opt.add_variables() and - opt.remove_variables() or when you are certain variables are not being added to / - removed from the model.""")) - self.declare('check_for_new_or_removed_params', - ConfigValue(domain=bool, - default=True, - doc="""If False, new/old parameters will not be automatically detected on subsequent - solves. Use False only when manually updating the solver with opt.add_params() and - opt.remove_params() or when you are certain parameters are not being added to / - removed from the model.""")) - self.declare('check_for_new_objective', - ConfigValue(domain=bool, - default=True, - doc="""If False, new/old objectives will not be automatically detected on subsequent - solves. Use False only when manually updating the solver with opt.set_objective() or - when you are certain objectives are not being added to / removed from the model.""")) - self.declare('update_constraints', - ConfigValue(domain=bool, - default=True, - doc="""If False, changes to existing constraints will not be automatically detected on - subsequent solves. This includes changes to the lower, body, and upper attributes of - constraints. Use False only when manually updating the solver with - opt.remove_constraints() and opt.add_constraints() or when you are certain constraints - are not being modified.""")) - self.declare('update_vars', - ConfigValue(domain=bool, - default=True, - doc="""If False, changes to existing variables will not be automatically detected on - subsequent solves. This includes changes to the lb, ub, domain, and fixed - attributes of variables. Use False only when manually updating the solver with - opt.update_variables() or when you are certain variables are not being modified.""")) - self.declare('update_params', - ConfigValue(domain=bool, - default=True, - doc="""If False, changes to parameter values will not be automatically detected on - subsequent solves. Use False only when manually updating the solver with - opt.update_params() or when you are certain parameters are not being modified.""")) - self.declare('update_named_expressions', - ConfigValue(domain=bool, - default=True, - doc="""If False, changes to Expressions will not be automatically detected on - subsequent solves. Use False only when manually updating the solver with - opt.remove_constraints() and opt.add_constraints() or when you are certain - Expressions are not being modified.""")) - self.declare('update_objective', - ConfigValue(domain=bool, - default=True, - doc="""If False, changes to objectives will not be automatically detected on - subsequent solves. This includes the expr and sense attributes of objectives. Use - False only when manually updating the solver with opt.set_objective() or when you are - certain objectives are not being modified.""")) - self.declare('treat_fixed_vars_as_params', - ConfigValue(domain=bool, - default=True, - doc="""This is an advanced option that should only be used in special circumstances. - With the default setting of True, fixed variables will be treated like parameters. - This means that z == x*y will be linear if x or y is fixed and the constraint - can be written to an LP file. If the value of the fixed variable gets changed, we have - to completely reprocess all constraints using that variable. If - treat_fixed_vars_as_params is False, then constraints will be processed as if fixed - variables are not fixed, and the solver will be told the variable is fixed. This means - z == x*y could not be written to an LP file even if x and/or y is fixed. However, - updating the values of fixed variables is much faster this way.""")) + super(UpdateConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + + self.declare( + 'check_for_new_or_removed_constraints', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, new/old constraints will not be automatically detected on subsequent + solves. Use False only when manually updating the solver with opt.add_constraints() + and opt.remove_constraints() or when you are certain constraints are not being + added to/removed from the model.""", + ), + ) + self.declare( + 'check_for_new_or_removed_vars', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, new/old variables will not be automatically detected on subsequent + solves. Use False only when manually updating the solver with opt.add_variables() and + opt.remove_variables() or when you are certain variables are not being added to / + removed from the model.""", + ), + ) + self.declare( + 'check_for_new_or_removed_params', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, new/old parameters will not be automatically detected on subsequent + solves. Use False only when manually updating the solver with opt.add_params() and + opt.remove_params() or when you are certain parameters are not being added to / + removed from the model.""", + ), + ) + self.declare( + 'check_for_new_objective', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, new/old objectives will not be automatically detected on subsequent + solves. Use False only when manually updating the solver with opt.set_objective() or + when you are certain objectives are not being added to / removed from the model.""", + ), + ) + self.declare( + 'update_constraints', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, changes to existing constraints will not be automatically detected on + subsequent solves. This includes changes to the lower, body, and upper attributes of + constraints. Use False only when manually updating the solver with + opt.remove_constraints() and opt.add_constraints() or when you are certain constraints + are not being modified.""", + ), + ) + self.declare( + 'update_vars', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, changes to existing variables will not be automatically detected on + subsequent solves. This includes changes to the lb, ub, domain, and fixed + attributes of variables. Use False only when manually updating the solver with + opt.update_variables() or when you are certain variables are not being modified.""", + ), + ) + self.declare( + 'update_params', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, changes to parameter values will not be automatically detected on + subsequent solves. Use False only when manually updating the solver with + opt.update_params() or when you are certain parameters are not being modified.""", + ), + ) + self.declare( + 'update_named_expressions', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, changes to Expressions will not be automatically detected on + subsequent solves. Use False only when manually updating the solver with + opt.remove_constraints() and opt.add_constraints() or when you are certain + Expressions are not being modified.""", + ), + ) + self.declare( + 'update_objective', + ConfigValue( + domain=bool, + default=True, + doc=""" + If False, changes to objectives will not be automatically detected on + subsequent solves. This includes the expr and sense attributes of objectives. Use + False only when manually updating the solver with opt.set_objective() or when you are + certain objectives are not being modified.""", + ), + ) + self.declare( + 'treat_fixed_vars_as_params', + ConfigValue( + domain=bool, + default=True, + doc=""" + This is an advanced option that should only be used in special circumstances. + With the default setting of True, fixed variables will be treated like parameters. + This means that z == x*y will be linear if x or y is fixed and the constraint + can be written to an LP file. If the value of the fixed variable gets changed, we have + to completely reprocess all constraints using that variable. If + treat_fixed_vars_as_params is False, then constraints will be processed as if fixed + variables are not fixed, and the solver will be told the variable is fixed. This means + z == x*y could not be written to an LP file even if x and/or y is fixed. However, + updating the values of fixed variables is much faster this way.""", + ), + ) self.check_for_new_or_removed_constraints: bool = True self.check_for_new_or_removed_vars: bool = True @@ -465,7 +600,14 @@ def __format__(self, format_spec): # We want general formatting of this Enum to return the # formatted string value and not the int (which is the # default implementation from IntEnum) - return format(str(self).split('.')[-1], format_spec) + return format(self.name, format_spec) + + def __str__(self): + # Note: Python 3.11 changed the core enums so that the + # "mixin" type for standard enums overrides the behavior + # specified in __format__. We will override str() here to + # preserve the previous behavior + return self.name @abc.abstractmethod def solve(self, model: _BlockData, timer: HierarchicalTimer = None) -> Results: @@ -555,9 +697,11 @@ class PersistentSolver(Solver): def is_persistent(self): return True - def load_vars(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> NoReturn: + def load_vars( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> NoReturn: """ - Load the solution of the primal variables into the value attribut of the variables. + Load the solution of the primal variables into the value attribute of the variables. Parameters ---------- @@ -570,10 +714,14 @@ def load_vars(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> StaleFlagManager.mark_all_as_stale(delayed=True) @abc.abstractmethod - def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_primals( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: pass - def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_duals( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: """ Declare sign convention in docstring here. @@ -588,9 +736,13 @@ def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = N duals: dict Maps constraints to dual values """ - raise NotImplementedError('{0} does not support the get_duals method'.format(type(self))) + raise NotImplementedError( + '{0} does not support the get_duals method'.format(type(self)) + ) - def get_slacks(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_slacks( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: """ Parameters ---------- @@ -603,9 +755,13 @@ def get_slacks(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = slacks: dict Maps constraints to slack values """ - raise NotImplementedError('{0} does not support the get_slacks method'.format(type(self))) + raise NotImplementedError( + '{0} does not support the get_slacks method'.format(type(self)) + ) - def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_reduced_costs( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: """ Parameters ---------- @@ -618,7 +774,9 @@ def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = reduced_costs: ComponentMap Maps variable to reduced cost """ - raise NotImplementedError('{0} does not support the get_reduced_costs method'.format(type(self))) + raise NotImplementedError( + '{0} does not support the get_reduced_costs method'.format(type(self)) + ) @property @abc.abstractmethod @@ -687,15 +845,21 @@ def get_primals(self, vars_to_load=None): self._assert_solution_still_valid() return self._solver.get_primals(vars_to_load=vars_to_load) - def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_duals( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: self._assert_solution_still_valid() return self._solver.get_duals(cons_to_load=cons_to_load) - def get_slacks(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: + def get_slacks( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: self._assert_solution_still_valid() return self._solver.get_slacks(cons_to_load=cons_to_load) - def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_reduced_costs( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: self._assert_solution_still_valid() return self._solver.get_reduced_costs(vars_to_load=vars_to_load) @@ -736,7 +900,7 @@ def invalidate(self): class PersistentBase(abc.ABC): - def __init__(self, only_child_vars=True): + def __init__(self, only_child_vars=False): self._model = None self._active_constraints = dict() # maps constraint to (lower, body, upper) self._vars = dict() # maps var id to (var, lb, ub, fixed, domain, value) @@ -744,11 +908,15 @@ def __init__(self, only_child_vars=True): self._objective = None self._objective_expr = None self._objective_sense = None - self._named_expressions = dict() # maps constraint to list of tuples (named_expr, named_expr.expr) + self._named_expressions = ( + dict() + ) # maps constraint to list of tuples (named_expr, named_expr.expr) self._external_functions = ComponentMap() self._obj_named_expressions = list() self._update_config = UpdateConfig() - self._referenced_variables = dict() # var_id: [dict[constraints, None], dict[sos constraints, None], None or objective] + self._referenced_variables = ( + dict() + ) # var_id: [dict[constraints, None], dict[sos constraints, None], None or objective] self._vars_referenced_by_con = dict() self._vars_referenced_by_obj = list() self._expr_types = None @@ -781,9 +949,18 @@ def _add_variables(self, variables: List[_GeneralVarData]): def add_variables(self, variables: List[_GeneralVarData]): for v in variables: if id(v) in self._referenced_variables: - raise ValueError('variable {name} has already been added'.format(name=v.name)) + raise ValueError( + 'variable {name} has already been added'.format(name=v.name) + ) self._referenced_variables[id(v)] = [dict(), dict(), None] - self._vars[id(v)] = (v, v._lb, v._ub, v.fixed, v.domain.get_interval(), v.value) + self._vars[id(v)] = ( + v, + v._lb, + v._ub, + v.fixed, + v.domain.get_interval(), + v.value, + ) self._add_variables(variables) @abc.abstractmethod @@ -820,7 +997,9 @@ def add_constraints(self, cons: List[_GeneralConstraintData]): all_fixed_vars = dict() for con in cons: if con in self._named_expressions: - raise ValueError('constraint {name} has already been added'.format(name=con.name)) + raise ValueError( + 'constraint {name} has already been added'.format(name=con.name) + ) self._active_constraints[con] = (con.lower, con.body, con.upper) if self.use_extensions and cmodel_available: tmp = cmodel.prep_for_repn(con.body, self._expr_types) @@ -850,7 +1029,9 @@ def _add_sos_constraints(self, cons: List[_SOSConstraintData]): def add_sos_constraints(self, cons: List[_SOSConstraintData]): for con in cons: if con in self._vars_referenced_by_con: - raise ValueError('constraint {name} has already been added'.format(name=con.name)) + raise ValueError( + 'constraint {name} has already been added'.format(name=con.name) + ) self._active_constraints[con] = tuple() variables = con.get_variables() if not self._only_child_vars: @@ -911,11 +1092,30 @@ def add_block(self, block): param_dict[id(_p)] = _p self.add_params(list(param_dict.values())) if self._only_child_vars: - self.add_variables(list(dict((id(var), var) for var in block.component_data_objects(Var, descend_into=True)).values())) - self.add_constraints([con for con in block.component_data_objects(Constraint, descend_into=True, - active=True)]) - self.add_sos_constraints([con for con in block.component_data_objects(SOSConstraint, descend_into=True, - active=True)]) + self.add_variables( + list( + dict( + (id(var), var) + for var in block.component_data_objects(Var, descend_into=True) + ).values() + ) + ) + self.add_constraints( + [ + con + for con in block.component_data_objects( + Constraint, descend_into=True, active=True + ) + ] + ) + self.add_sos_constraints( + [ + con + for con in block.component_data_objects( + SOSConstraint, descend_into=True, active=True + ) + ] + ) obj = get_objective(block) if obj is not None: self.set_objective(obj) @@ -928,7 +1128,11 @@ def remove_constraints(self, cons: List[_GeneralConstraintData]): self._remove_constraints(cons) for con in cons: if con not in self._named_expressions: - raise ValueError('cannot remove constraint {name} - it was not added'.format(name=con.name)) + raise ValueError( + 'cannot remove constraint {name} - it was not added'.format( + name=con.name + ) + ) for v in self._vars_referenced_by_con[con]: self._referenced_variables[id(v)][0].pop(con) if not self._only_child_vars: @@ -946,7 +1150,11 @@ def remove_sos_constraints(self, cons: List[_SOSConstraintData]): self._remove_sos_constraints(cons) for con in cons: if con not in self._vars_referenced_by_con: - raise ValueError('cannot remove constraint {name} - it was not added'.format(name=con.name)) + raise ValueError( + 'cannot remove constraint {name} - it was not added'.format( + name=con.name + ) + ) for v in self._vars_referenced_by_con[con]: self._referenced_variables[id(v)][1].pop(con) self._check_to_remove_vars(self._vars_referenced_by_con[con]) @@ -963,10 +1171,18 @@ def remove_variables(self, variables: List[_GeneralVarData]): for v in variables: v_id = id(v) if v_id not in self._referenced_variables: - raise ValueError('cannot remove variable {name} - it has not been added'.format(name=v.name)) + raise ValueError( + 'cannot remove variable {name} - it has not been added'.format( + name=v.name + ) + ) cons_using, sos_using, obj_using = self._referenced_variables[v_id] if cons_using or sos_using or (obj_using is not None): - raise ValueError('cannot remove variable {name} - it is still being used by constraints or the objective'.format(name=v.name)) + raise ValueError( + 'cannot remove variable {name} - it is still being used by constraints or the objective'.format( + name=v.name + ) + ) del self._referenced_variables[v_id] del self._vars[v_id] @@ -980,13 +1196,43 @@ def remove_params(self, params: List[_ParamData]): del self._params[id(p)] def remove_block(self, block): - self.remove_constraints([con for con in block.component_data_objects(ctype=Constraint, descend_into=True, - active=True)]) - self.remove_sos_constraints([con for con in block.component_data_objects(ctype=SOSConstraint, descend_into=True, - active=True)]) + self.remove_constraints( + [ + con + for con in block.component_data_objects( + ctype=Constraint, descend_into=True, active=True + ) + ] + ) + self.remove_sos_constraints( + [ + con + for con in block.component_data_objects( + ctype=SOSConstraint, descend_into=True, active=True + ) + ] + ) if self._only_child_vars: - self.remove_variables(list(dict((id(var), var) for var in block.component_data_objects(ctype=Var, descend_into=True)).values())) - self.remove_params(list(dict((id(p), p) for p in block.component_data_objects(ctype=Param, descend_into=True)).values())) + self.remove_variables( + list( + dict( + (id(var), var) + for var in block.component_data_objects( + ctype=Var, descend_into=True + ) + ).values() + ) + ) + self.remove_params( + list( + dict( + (id(p), p) + for p in block.component_data_objects( + ctype=Param, descend_into=True + ) + ).values() + ) + ) @abc.abstractmethod def _update_variables(self, variables: List[_GeneralVarData]): @@ -994,7 +1240,14 @@ def _update_variables(self, variables: List[_GeneralVarData]): def update_variables(self, variables: List[_GeneralVarData]): for v in variables: - self._vars[id(v)] = (v, v._lb, v._ub, v.fixed, v.domain.get_interval(), v.value) + self._vars[id(v)] = ( + v, + v._lb, + v._ub, + v.fixed, + v.domain.get_interval(), + v.value, + ) self._update_variables(variables) @abc.abstractmethod @@ -1017,8 +1270,13 @@ def update(self, timer: HierarchicalTimer = None): current_cons_dict = dict() current_sos_dict = dict() timer.start('vars') - if self._only_child_vars and (config.check_for_new_or_removed_vars or config.update_vars): - current_vars_dict = {id(v): v for v in self._model.component_data_objects(Var, descend_into=True)} + if self._only_child_vars and ( + config.check_for_new_or_removed_vars or config.update_vars + ): + current_vars_dict = { + id(v): v + for v in self._model.component_data_objects(Var, descend_into=True) + } for v_id, v in current_vars_dict.items(): if v_id not in self._vars: new_vars.append(v) @@ -1044,8 +1302,18 @@ def update(self, timer: HierarchicalTimer = None): timer.stop('params') timer.start('cons') if config.check_for_new_or_removed_constraints or config.update_constraints: - current_cons_dict = {c: None for c in self._model.component_data_objects(Constraint, descend_into=True, active=True)} - current_sos_dict = {c: None for c in self._model.component_data_objects(SOSConstraint, descend_into=True, active=True)} + current_cons_dict = { + c: None + for c in self._model.component_data_objects( + Constraint, descend_into=True, active=True + ) + } + current_sos_dict = { + c: None + for c in self._model.component_data_objects( + SOSConstraint, descend_into=True, active=True + ) + } for c in current_cons_dict.keys(): if c not in self._vars_referenced_by_con: new_cons.append(c) @@ -1054,17 +1322,18 @@ def update(self, timer: HierarchicalTimer = None): new_sos.append(c) for c in self._vars_referenced_by_con.keys(): if c not in current_cons_dict and c not in current_sos_dict: - if (c.ctype is Constraint) or (c.ctype is None and isinstance(c, _GeneralConstraintData)): + if (c.ctype is Constraint) or ( + c.ctype is None and isinstance(c, _GeneralConstraintData) + ): old_cons.append(c) else: - assert (c.ctype is SOSConstraint) or (c.ctype is None and isinstance(c, _SOSConstraintData)) + assert (c.ctype is SOSConstraint) or ( + c.ctype is None and isinstance(c, _SOSConstraintData) + ) old_sos.append(c) self.remove_constraints(old_cons) self.remove_sos_constraints(old_sos) timer.stop('cons') - timer.start('vars') - self.remove_variables(old_vars) - timer.stop('vars') timer.start('params') self.remove_params(old_params) @@ -1102,13 +1371,21 @@ def update(self, timer: HierarchicalTimer = None): cons_to_remove_and_add[c] = None continue if new_lower is not lower: - if type(new_lower) is NumericConstant and type(lower) is NumericConstant and new_lower.value == lower.value: + if ( + type(new_lower) is NumericConstant + and type(lower) is NumericConstant + and new_lower.value == lower.value + ): pass else: cons_to_remove_and_add[c] = None continue if new_upper is not upper: - if type(new_upper) is NumericConstant and type(upper) is NumericConstant and new_upper.value == upper.value: + if ( + type(new_upper) is NumericConstant + and type(upper) is NumericConstant + and new_upper.value == upper.value + ): pass else: cons_to_remove_and_add[c] = None @@ -1183,6 +1460,12 @@ def update(self, timer: HierarchicalTimer = None): self.set_objective(pyomo_obj) timer.stop('objective') + # this has to be done after the objective and constraints in case the + # old objective/constraints use old variables + timer.start('vars') + self.remove_variables(old_vars) + timer.stop('vars') + legacy_termination_condition_map = { TerminationCondition.unknown: LegacyTerminationCondition.unknown, @@ -1196,7 +1479,7 @@ def update(self, timer: HierarchicalTimer = None): TerminationCondition.infeasibleOrUnbounded: LegacyTerminationCondition.infeasibleOrUnbounded, TerminationCondition.error: LegacyTerminationCondition.error, TerminationCondition.interrupted: LegacyTerminationCondition.resourceInterrupt, - TerminationCondition.licensingProblems: LegacyTerminationCondition.licensingProblems + TerminationCondition.licensingProblems: LegacyTerminationCondition.licensingProblems, } @@ -1212,7 +1495,7 @@ def update(self, timer: HierarchicalTimer = None): TerminationCondition.infeasibleOrUnbounded: LegacySolverStatus.error, TerminationCondition.error: LegacySolverStatus.error, TerminationCondition.interrupted: LegacySolverStatus.aborted, - TerminationCondition.licensingProblems: LegacySolverStatus.error + TerminationCondition.licensingProblems: LegacySolverStatus.error, } @@ -1228,24 +1511,26 @@ def update(self, timer: HierarchicalTimer = None): TerminationCondition.infeasibleOrUnbounded: LegacySolutionStatus.unsure, TerminationCondition.error: LegacySolutionStatus.error, TerminationCondition.interrupted: LegacySolutionStatus.error, - TerminationCondition.licensingProblems: LegacySolutionStatus.error + TerminationCondition.licensingProblems: LegacySolutionStatus.error, } class LegacySolverInterface(object): - def solve(self, - model: _BlockData, - tee: bool = False, - load_solutions: bool = True, - logfile: Optional[str] = None, - solnfile: Optional[str] = None, - timelimit: Optional[float] = None, - report_timing: bool = False, - solver_io: Optional[str] = None, - suffixes: Optional[Sequence] = None, - options: Optional[Dict] = None, - keepfiles: bool = False, - symbolic_solver_labels: bool = False): + def solve( + self, + model: _BlockData, + tee: bool = False, + load_solutions: bool = True, + logfile: Optional[str] = None, + solnfile: Optional[str] = None, + timelimit: Optional[float] = None, + report_timing: bool = False, + solver_io: Optional[str] = None, + suffixes: Optional[Sequence] = None, + options: Optional[Dict] = None, + keepfiles: bool = False, + symbolic_solver_labels: bool = False, + ): original_config = self.config self.config = self.config() self.config.stream_solver = tee @@ -1273,8 +1558,12 @@ def solve(self, legacy_results = LegacySolverResults() legacy_soln = LegacySolution() - legacy_results.solver.status = legacy_solver_status_map[results.termination_condition] - legacy_results.solver.termination_condition = legacy_termination_condition_map[results.termination_condition] + legacy_results.solver.status = legacy_solver_status_map[ + results.termination_condition + ] + legacy_results.solver.termination_condition = legacy_termination_condition_map[ + results.termination_condition + ] legacy_soln.status = legacy_solution_status_map[results.termination_condition] legacy_results.solver.termination_message = str(results.termination_condition) @@ -1287,15 +1576,20 @@ def solve(self, else: legacy_results.problem.upper_bound = results.best_objective_bound legacy_results.problem.lower_bound = results.best_feasible_objective - if results.best_feasible_objective is not None and results.best_objective_bound is not None: - legacy_soln.gap = abs(results.best_feasible_objective - results.best_objective_bound) + if ( + results.best_feasible_objective is not None + and results.best_objective_bound is not None + ): + legacy_soln.gap = abs( + results.best_feasible_objective - results.best_objective_bound + ) else: legacy_soln.gap = None symbol_map = SymbolMap() symbol_map.byObject = dict(self.symbol_map.byObject) - symbol_map.bySymbol = {symb: weakref.ref(obj()) for symb, obj in self.symbol_map.bySymbol.items()} - symbol_map.aliases = {symb: weakref.ref(obj()) for symb, obj in self.symbol_map.aliases.items()} + symbol_map.bySymbol = dict(self.symbol_map.bySymbol) + symbol_map.aliases = dict(self.symbol_map.aliases) symbol_map.default_labeler = self.symbol_map.default_labeler model.solutions.add_symbol_map(symbol_map) legacy_results._smap_id = id(symbol_map) @@ -1360,7 +1654,7 @@ def license_is_valid(self) -> bool: @property def options(self): - for solver_name in ['gurobi', 'ipopt', 'cplex', 'cbc']: + for solver_name in ['gurobi', 'ipopt', 'cplex', 'cbc', 'highs']: if hasattr(self, solver_name + '_options'): return getattr(self, solver_name + '_options') raise NotImplementedError('Could not find the correct options') @@ -1368,7 +1662,7 @@ def options(self): @options.setter def options(self, val): found = False - for solver_name in ['gurobi', 'ipopt', 'cplex', 'cbc']: + for solver_name in ['gurobi', 'ipopt', 'cplex', 'cbc', 'highs']: if hasattr(self, solver_name + '_options'): setattr(self, solver_name + '_options', val) found = True @@ -1390,9 +1684,11 @@ def decorator(cls): class LegacySolver(LegacySolverInterface, cls): pass + LegacySolverFactory.register(name, doc)(LegacySolver) return cls + return decorator diff --git a/pyomo/contrib/appsi/build.py b/pyomo/contrib/appsi/build.py index a924a8e0a03..2a4e7bb785e 100644 --- a/pyomo/contrib/appsi/build.py +++ b/pyomo/contrib/appsi/build.py @@ -15,32 +15,35 @@ import sys import tempfile + def handleReadonly(function, path, excinfo): excvalue = excinfo[1] if excvalue.errno == errno.EACCES: - os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777 + os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) # 0777 function(path) else: raise + def get_appsi_extension(in_setup=False, appsi_root=None): from pybind11.setup_helpers import Pybind11Extension if appsi_root is None: from pyomo.common.fileutils import this_file_dir + appsi_root = this_file_dir() sources = [ os.path.join(appsi_root, 'cmodel', 'src', file_) for file_ in ( - 'interval.cpp', - 'expression.cpp', - 'common.cpp', - 'nl_writer.cpp', - 'lp_writer.cpp', - 'model_base.cpp', - 'fbbt_model.cpp', - 'cmodel_bindings.cpp', + 'interval.cpp', + 'expression.cpp', + 'common.cpp', + 'nl_writer.cpp', + 'lp_writer.cpp', + 'model_base.cpp', + 'fbbt_model.cpp', + 'cmodel_bindings.cpp', ) ] @@ -57,6 +60,7 @@ def get_appsi_extension(in_setup=False, appsi_root=None): extra_args = ['-std=c++11'] return Pybind11Extension(package_name, sources, extra_compile_args=extra_args) + def build_appsi(args=[]): print('\n\n**** Building APPSI ****') import setuptools @@ -80,9 +84,12 @@ def run(self): if not self.inplace: library = glob.glob("build/*/appsi_cmodel.*")[0] target = os.path.join( - PYOMO_CONFIG_DIR, 'lib', + PYOMO_CONFIG_DIR, + 'lib', 'python%s.%s' % sys.version_info[:2], - 'site-packages', '.') + 'site-packages', + '.', + ) if not os.path.exists(target): os.makedirs(target) shutil.copy(library, target) @@ -98,10 +105,8 @@ def run(self): package_config = { 'name': 'appsi_cmodel', 'packages': [], - 'ext_modules': [ get_appsi_extension(False) ], - 'cmdclass': { - "build_ext": appsi_build_ext, - }, + 'ext_modules': [get_appsi_extension(False)], + 'cmdclass': {"build_ext": appsi_build_ext}, } dist = Distribution(package_config) diff --git a/pyomo/contrib/appsi/cmodel/__init__.py b/pyomo/contrib/appsi/cmodel/__init__.py index 5d92d857bfb..9c276b518de 100644 --- a/pyomo/contrib/appsi/cmodel/__init__.py +++ b/pyomo/contrib/appsi/cmodel/__init__.py @@ -11,14 +11,19 @@ from pyomo.common.dependencies import attempt_import as _attempt_import + def _importer(): import os import sys from pyomo.common.envvar import PYOMO_CONFIG_DIR + try: pyomo_config_dir = os.path.join( - PYOMO_CONFIG_DIR, 'lib', 'python%s.%s' % sys.version_info[:2], - 'site-packages') + PYOMO_CONFIG_DIR, + 'lib', + 'python%s.%s' % sys.version_info[:2], + 'site-packages', + ) sys.path.insert(0, pyomo_config_dir) import appsi_cmodel except ImportError: @@ -29,9 +34,12 @@ def _importer(): return appsi_cmodel + cmodel, cmodel_available = _attempt_import( 'appsi_cmodel', - error_message=('Appsi requires building a small c++ extension. ' - 'Please use the "pyomo build-extensions" command'), + error_message=( + 'Appsi requires building a small c++ extension. ' + 'Please use the "pyomo build-extensions" command' + ), importer=_importer, ) diff --git a/pyomo/contrib/appsi/cmodel/tests/test_import.py b/pyomo/contrib/appsi/cmodel/tests/test_import.py index 0d6c4837bc3..f4647c216ba 100644 --- a/pyomo/contrib/appsi/cmodel/tests/test_import.py +++ b/pyomo/contrib/appsi/cmodel/tests/test_import.py @@ -9,8 +9,10 @@ class TestCmodelImport(unittest.TestCase): def test_import(self): pyomo_config_dir = os.path.join( - PYOMO_CONFIG_DIR, "lib", "python%s.%s" % sys.version_info[:2], - "site-packages" + PYOMO_CONFIG_DIR, + "lib", + "python%s.%s" % sys.version_info[:2], + "site-packages", ) cmodel_dir = this_file_dir() cmodel_dir = os.path.join(cmodel_dir, os.pardir) diff --git a/pyomo/contrib/appsi/examples/getting_started.py b/pyomo/contrib/appsi/examples/getting_started.py index bf08c9d2b4f..de22d28e0a4 100644 --- a/pyomo/contrib/appsi/examples/getting_started.py +++ b/pyomo/contrib/appsi/examples/getting_started.py @@ -11,16 +11,17 @@ def main(plot=True, n_points=200): m.x = pe.Var() m.y = pe.Var() m.p = pe.Param(initialize=1, mutable=True) - + m.obj = pe.Objective(expr=m.x**2 + m.y**2) - m.c1 = pe.Constraint(expr=m.y >= (m.x + 1)**2) - m.c2 = pe.Constraint(expr=m.y >= (m.x - m.p)**2) - + m.c1 = pe.Constraint(expr=m.y >= (m.x + 1) ** 2) + m.c2 = pe.Constraint(expr=m.y >= (m.x - m.p) ** 2) + opt = appsi.solvers.Cplex() # create an APPSI solver interface opt.config.load_solution = False # modify the config options - opt.update_config.check_for_new_or_removed_vars = False # change how automatic updates are handled + # change how automatic updates are handled + opt.update_config.check_for_new_or_removed_vars = False opt.update_config.update_vars = False - + # write a for loop to vary the value of parameter p from 1 to 10 p_values = [float(i) for i in np.linspace(1, 10, n_points)] obj_values = list() @@ -39,6 +40,7 @@ def main(plot=True, n_points=200): if plot: import matplotlib.pyplot as plt + # plot the results fig, ax1 = plt.subplots() ax1.set_xlabel('p') diff --git a/pyomo/contrib/appsi/examples/tests/test_examples.py b/pyomo/contrib/appsi/examples/tests/test_examples.py index 2942b29da81..d2c88224a7d 100644 --- a/pyomo/contrib/appsi/examples/tests/test_examples.py +++ b/pyomo/contrib/appsi/examples/tests/test_examples.py @@ -4,6 +4,7 @@ from pyomo.contrib.appsi.cmodel import cmodel_available from pyomo.contrib import appsi + @unittest.skipUnless(cmodel_available, 'appsi extensions are not available') class TestExamples(unittest.TestCase): def test_getting_started(self): diff --git a/pyomo/contrib/appsi/fbbt.py b/pyomo/contrib/appsi/fbbt.py index 1b8766c7a22..92a0e0c8cbc 100644 --- a/pyomo/contrib/appsi/fbbt.py +++ b/pyomo/contrib/appsi/fbbt.py @@ -1,5 +1,10 @@ from pyomo.contrib.appsi.base import PersistentBase -from pyomo.common.config import ConfigDict, ConfigValue, NonNegativeFloat, NonNegativeInt +from pyomo.common.config import ( + ConfigDict, + ConfigValue, + NonNegativeFloat, + NonNegativeInt, +) from .cmodel import cmodel, cmodel_available from typing import List, Optional from pyomo.core.base.var import _GeneralVarData @@ -21,28 +26,38 @@ class IntervalConfig(ConfigDict): improvement_tol: float max_iter: int """ - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): - super(IntervalConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) - - self.feasibility_tol: float = self.declare('feasibility_tol', - ConfigValue(domain=NonNegativeFloat, default=1e-8)) - self.integer_tol: float = self.declare('integer_tol', - ConfigValue(domain=NonNegativeFloat, default=1e-5)) - self.improvement_tol: float = self.declare('improvement_tol', - ConfigValue(domain=NonNegativeFloat, default=1e-4)) - self.max_iter: int = self.declare('max_iter', - ConfigValue(domain=NonNegativeInt, default=10)) - self.deactivate_satisfied_constraints: bool = self.declare('deactivate_satisfied_constraints', - ConfigValue(domain=bool, default=False)) + + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(IntervalConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + + self.feasibility_tol: float = self.declare( + 'feasibility_tol', ConfigValue(domain=NonNegativeFloat, default=1e-8) + ) + self.integer_tol: float = self.declare( + 'integer_tol', ConfigValue(domain=NonNegativeFloat, default=1e-5) + ) + self.improvement_tol: float = self.declare( + 'improvement_tol', ConfigValue(domain=NonNegativeFloat, default=1e-4) + ) + self.max_iter: int = self.declare( + 'max_iter', ConfigValue(domain=NonNegativeInt, default=10) + ) + self.deactivate_satisfied_constraints: bool = self.declare( + 'deactivate_satisfied_constraints', ConfigValue(domain=bool, default=False) + ) class IntervalTightener(PersistentBase): @@ -104,8 +119,18 @@ def _add_variables(self, variables: List[_GeneralVarData]): set_name = False symbol_map = None labeler = None - cmodel.process_pyomo_vars(self._pyomo_expr_types, variables, self._var_map, self._param_map, - self._vars, self._rvar_map, set_name, symbol_map, labeler, False) + cmodel.process_pyomo_vars( + self._pyomo_expr_types, + variables, + self._var_map, + self._param_map, + self._vars, + self._rvar_map, + set_name, + symbol_map, + labeler, + False, + ) def _add_params(self, params: List[_ParamData]): cparams = cmodel.create_params(len(params)) @@ -119,15 +144,25 @@ def _add_params(self, params: List[_ParamData]): cp.name = self._symbol_map.getSymbol(p, self._param_labeler) def _add_constraints(self, cons: List[_GeneralConstraintData]): - cmodel.process_fbbt_constraints(self._cmodel, self._pyomo_expr_types, cons, self._var_map, self._param_map, - self._active_constraints, self._con_map, self._rcon_map) + cmodel.process_fbbt_constraints( + self._cmodel, + self._pyomo_expr_types, + cons, + self._var_map, + self._param_map, + self._active_constraints, + self._con_map, + self._rcon_map, + ) if self._symbolic_solver_labels: for c, cc in self._con_map.items(): cc.name = self._symbol_map.getSymbol(c, self._con_labeler) def _add_sos_constraints(self, cons: List[_SOSConstraintData]): if len(cons) != 0: - raise NotImplementedError('IntervalTightener does not support SOS constraints') + raise NotImplementedError( + 'IntervalTightener does not support SOS constraints' + ) def _remove_constraints(self, cons: List[_GeneralConstraintData]): if self._symbolic_solver_labels: @@ -140,7 +175,9 @@ def _remove_constraints(self, cons: List[_GeneralConstraintData]): def _remove_sos_constraints(self, cons: List[_SOSConstraintData]): if len(cons) != 0: - raise NotImplementedError('IntervalTightener does not support SOS constraints') + raise NotImplementedError( + 'IntervalTightener does not support SOS constraints' + ) def _remove_variables(self, variables: List[_GeneralVarData]): if self._symbolic_solver_labels: @@ -158,8 +195,18 @@ def _remove_params(self, params: List[_ParamData]): del self._param_map[id(p)] def _update_variables(self, variables: List[_GeneralVarData]): - cmodel.process_pyomo_vars(self._pyomo_expr_types, variables, self._var_map, self._param_map, - self._vars, self._rvar_map, False, None, None, True) + cmodel.process_pyomo_vars( + self._pyomo_expr_types, + variables, + self._var_map, + self._param_map, + self._vars, + self._rvar_map, + False, + None, + None, + True, + ) def update_params(self): for p_id, p in self._params.items(): @@ -177,7 +224,9 @@ def _set_objective(self, obj: _GeneralObjectiveData): ce = cmodel.Constant(0) sense = 0 else: - ce = cmodel.appsi_expr_from_pyomo_expr(obj.expr, self._var_map, self._param_map, self._pyomo_expr_types) + ce = cmodel.appsi_expr_from_pyomo_expr( + obj.expr, self._var_map, self._param_map, self._pyomo_expr_types + ) if obj.sense is minimize: sense = 0 else: @@ -214,18 +263,29 @@ def _deactivate_satisfied_cons(self): for c in cons_to_deactivate: c.deactivate() - def perform_fbbt(self, model: _BlockData, symbolic_solver_labels: Optional[bool] = None): + def perform_fbbt( + self, model: _BlockData, symbolic_solver_labels: Optional[bool] = None + ): if model is not self._model: self.set_instance(model, symbolic_solver_labels=symbolic_solver_labels) else: - if symbolic_solver_labels is not None and symbolic_solver_labels != self._symbolic_solver_labels: - raise RuntimeError('symbolic_solver_labels can only be changed through the set_instance method. ' - 'Please either use set_instance or create a new instance of IntervalTightener.') + if ( + symbolic_solver_labels is not None + and symbolic_solver_labels != self._symbolic_solver_labels + ): + raise RuntimeError( + 'symbolic_solver_labels can only be changed through the set_instance method. ' + 'Please either use set_instance or create a new instance of IntervalTightener.' + ) self.update() try: - n_iter = self._cmodel.perform_fbbt(self.config.feasibility_tol, self.config.integer_tol, - self.config.improvement_tol, self.config.max_iter, - self.config.deactivate_satisfied_constraints) + n_iter = self._cmodel.perform_fbbt( + self.config.feasibility_tol, + self.config.integer_tol, + self.config.improvement_tol, + self.config.max_iter, + self.config.deactivate_satisfied_constraints, + ) finally: # we want to make sure the pyomo model and cmodel stay in sync # even if an exception is raised and caught @@ -239,10 +299,14 @@ def perform_fbbt_with_seed(self, model: _BlockData, seed_var: _GeneralVarData): else: self.update() try: - n_iter = self._cmodel.perform_fbbt_with_seed(self._var_map[id(seed_var)], self.config.feasibility_tol, - self.config.integer_tol, self.config.improvement_tol, - self.config.max_iter, - self.config.deactivate_satisfied_constraints) + n_iter = self._cmodel.perform_fbbt_with_seed( + self._var_map[id(seed_var)], + self.config.feasibility_tol, + self.config.integer_tol, + self.config.improvement_tol, + self.config.max_iter, + self.config.deactivate_satisfied_constraints, + ) finally: # we want to make sure the pyomo model and cmodel stay in sync # even if an exception is raised and caught diff --git a/pyomo/contrib/appsi/plugins.py b/pyomo/contrib/appsi/plugins.py index b1fda2f0304..5333158239e 100644 --- a/pyomo/contrib/appsi/plugins.py +++ b/pyomo/contrib/appsi/plugins.py @@ -1,16 +1,23 @@ from pyomo.common.extensions import ExtensionBuilderFactory from .base import SolverFactory -from .solvers import Gurobi, Ipopt, Cbc, Cplex +from .solvers import Gurobi, Ipopt, Cbc, Cplex, Highs from .build import AppsiBuilder def load(): ExtensionBuilderFactory.register('appsi')(AppsiBuilder) - SolverFactory.register(name='appsi_gurobi', - doc='Automated persistent interface to Gurobi')(Gurobi) - SolverFactory.register(name='appsi_cplex', - doc='Automated persistent interface to Cplex')(Cplex) - SolverFactory.register(name='appsi_ipopt', - doc='Automated persistent interface to Ipopt')(Ipopt) - SolverFactory.register(name='appsi_cbc', - doc='Automated persistent interface to Cbc')(Cbc) + SolverFactory.register( + name='appsi_gurobi', doc='Automated persistent interface to Gurobi' + )(Gurobi) + SolverFactory.register( + name='appsi_cplex', doc='Automated persistent interface to Cplex' + )(Cplex) + SolverFactory.register( + name='appsi_ipopt', doc='Automated persistent interface to Ipopt' + )(Ipopt) + SolverFactory.register( + name='appsi_cbc', doc='Automated persistent interface to Cbc' + )(Cbc) + SolverFactory.register( + name='appsi_highs', doc='Automated persistent interface to Highs' + )(Highs) diff --git a/pyomo/contrib/appsi/solvers/__init__.py b/pyomo/contrib/appsi/solvers/__init__.py index 61bcd5475af..df58a0cb245 100644 --- a/pyomo/contrib/appsi/solvers/__init__.py +++ b/pyomo/contrib/appsi/solvers/__init__.py @@ -1,4 +1,5 @@ from .gurobi import Gurobi, GurobiResults from .ipopt import Ipopt from .cbc import Cbc -from.cplex import Cplex +from .cplex import Cplex +from .highs import Highs diff --git a/pyomo/contrib/appsi/solvers/cbc.py b/pyomo/contrib/appsi/solvers/cbc.py index 46c43186c91..a3aae2a9213 100644 --- a/pyomo/contrib/appsi/solvers/cbc.py +++ b/pyomo/contrib/appsi/solvers/cbc.py @@ -1,6 +1,12 @@ from pyomo.common.tempfiles import TempfileManager from pyomo.common.fileutils import Executable -from pyomo.contrib.appsi.base import PersistentSolver, Results, TerminationCondition, SolverConfig, PersistentSolutionLoader +from pyomo.contrib.appsi.base import ( + PersistentSolver, + Results, + TerminationCondition, + SolverConfig, + PersistentSolutionLoader, +) from pyomo.contrib.appsi.writers import LPWriter from pyomo.common.log import LogStream import logging @@ -28,17 +34,21 @@ class CbcConfig(SolverConfig): - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): - super(CbcConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(CbcConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) self.declare('executable', ConfigValue()) self.declare('filename', ConfigValue(domain=str)) @@ -54,7 +64,7 @@ def __init__(self, class Cbc(PersistentSolver): - def __init__(self, only_child_vars=True): + def __init__(self, only_child_vars=False): self._config = CbcConfig() self._solver_options = dict() self._writer = LPWriter(only_child_vars=only_child_vars) @@ -72,11 +82,13 @@ def available(self): return self.Availability.FullLicense def version(self): - results = subprocess.run([str(self.config.executable), '-stop'], - timeout=5, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [str(self.config.executable), '-stop'], + timeout=5, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) version = results.stdout.splitlines()[1] version = version.split(':')[1] version = version.strip() @@ -193,7 +205,7 @@ def solve(self, model, timer: HierarchicalTimer = None): TempfileManager.add_tempfile(self._filename + '.soln', exists=False) TempfileManager.add_tempfile(self._filename + '.log', exists=False) timer.start('write lp file') - self._writer.write(model, self._filename+'.lp', timer=timer) + self._writer.write(model, self._filename + '.lp', timer=timer) timer.stop('write lp file') res = self._apply_solver(timer) self._last_results_object = res @@ -255,14 +267,14 @@ def _parse_soln(self): symbol_map = self._writer.symbol_map - for line in all_lines[first_con_line:last_con_line+1]: + for line in all_lines[first_con_line : last_con_line + 1]: split_line = line.strip('*') split_line = split_line.split() name = split_line[1] orig_name = name[:-3] if orig_name == 'obj_const_con': continue - con = symbol_map.bySymbol[orig_name]() + con = symbol_map.bySymbol[orig_name] dual_val = float(split_line[-1]) if con in self._dual_sol: if abs(dual_val) > abs(self._dual_sol[con]): @@ -270,7 +282,7 @@ def _parse_soln(self): else: self._dual_sol[con] = dual_val - for line in all_lines[first_var_line:last_var_line+1]: + for line in all_lines[first_var_line : last_var_line + 1]: split_line = line.strip('*') split_line = split_line.split() name = split_line[1] @@ -278,13 +290,15 @@ def _parse_soln(self): continue val = float(split_line[2]) rc = float(split_line[3]) - var = symbol_map.bySymbol[name]() + var = symbol_map.bySymbol[name] self._primal_sol[id(var)] = (var, val) self._reduced_costs[id(var)] = (var, rc) - if (self.version() < (2, 10, 2) and - self._writer.get_active_objective() is not None and - self._writer.get_active_objective().sense == maximize): + if ( + self.version() < (2, 10, 2) + and self._writer.get_active_objective() is not None + and self._writer.get_active_objective().sense == maximize + ): if obj_val is not None: obj_val = -obj_val for con, dual_val in self._dual_sol.items(): @@ -292,7 +306,10 @@ def _parse_soln(self): for v_id, (v, rc_val) in self._reduced_costs.items(): self._reduced_costs[v_id] = (v, -rc_val) - if results.termination_condition == TerminationCondition.optimal and self.config.load_solution: + if ( + results.termination_condition == TerminationCondition.optimal + and self.config.load_solution + ): for v_id, (v, val) in self._primal_sol.items(): v.set_value(val, skip_validation=True) if self._writer.get_active_objective() is None: @@ -305,12 +322,12 @@ def _parse_soln(self): else: results.best_feasible_objective = obj_val elif self.config.load_solution: - raise RuntimeError('A feasible solution was not found, so no solution can be loaded.' - 'Please set opt.config.load_solution=False and check ' - 'results.termination_condition and ' - 'resutls.best_feasible_objective before loading a solution.') - - results.solution_loader = PersistentSolutionLoader(solver=self) + raise RuntimeError( + 'A feasible solution was not found, so no solution can be loaded.' + 'Please set opt.config.load_solution=False and check ' + 'results.termination_condition and ' + 'results.best_feasible_objective before loading a solution.' + ) return results @@ -338,8 +355,10 @@ def _check_and_escape_options(): tmp_v = '"' + tmp_v + '"' if _bad: - raise ValueError("Unable to properly escape solver option:" - "\n\t%s=%s" % (key, val) ) + raise ValueError( + "Unable to properly escape solver option:" + "\n\t%s=%s" % (key, val) + ) yield tmp_k, tmp_v cmd = [str(config.executable)] @@ -349,9 +368,9 @@ def _check_and_escape_options(): cmd.extend(['-timeMode', 'elapsed']) for key, val in _check_and_escape_options(): if val.strip() != '': - cmd.extend(['-'+key, val]) + cmd.extend(['-' + key, val]) else: - action_options.append('-'+key) + action_options.append('-' + key) cmd.extend(['-printingOptions', 'all']) cmd.extend(['-import', self._filename + '.lp']) cmd.extend(action_options) @@ -359,31 +378,36 @@ def _check_and_escape_options(): cmd.extend(['-solve']) cmd.extend(['-solu', self._filename + '.soln']) - ostreams = [LogStream(level=self.config.log_level, logger=self.config.solver_output_logger)] + ostreams = [ + LogStream( + level=self.config.log_level, logger=self.config.solver_output_logger + ) + ] if self.config.stream_solver: ostreams.append(sys.stdout) with TeeStream(*ostreams) as t: timer.start('subprocess') - cp = subprocess.run(cmd, - timeout=timeout, - stdout=t.STDOUT, - stderr=t.STDERR, - universal_newlines=True) + cp = subprocess.run( + cmd, + timeout=timeout, + stdout=t.STDOUT, + stderr=t.STDERR, + universal_newlines=True, + ) timer.stop('subprocess') if cp.returncode != 0: if self.config.load_solution: - raise RuntimeError('A feasible solution was not found, so no solution can be loaded.' - 'Please set opt.config.load_solution=False and check ' - 'results.termination_condition and ' - 'results.best_feasible_objective before loading a solution.') + raise RuntimeError( + 'A feasible solution was not found, so no solution can be loaded.' + 'Please set opt.config.load_solution=False and check ' + 'results.termination_condition and ' + 'results.best_feasible_objective before loading a solution.' + ) results = Results() results.termination_condition = TerminationCondition.error results.best_feasible_objective = None - self._primal_sol = None - self._dual_sol = None - self._reduced_costs = None else: timer.start('parse solution') results = self._parse_soln() @@ -398,9 +422,22 @@ def _check_and_escape_options(): else: results.best_objective_bound = math.inf + results.solution_loader = PersistentSolutionLoader(solver=self) + return results - def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_primals( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if ( + self._last_results_object is None + or self._last_results_object.best_feasible_objective is None + ): + raise RuntimeError( + 'Solver does not currently have a valid solution. Please ' + 'check the termination condition.' + ) + res = ComponentMap() if vars_to_load is None: for v_id, (v, val) in self._primal_sol.items(): @@ -410,14 +447,38 @@ def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) res[v] = self._primal_sol[id(v)][1] return res - def get_duals(self, cons_to_load = None): + def get_duals(self, cons_to_load=None): + if ( + self._last_results_object is None + or self._last_results_object.termination_condition + != TerminationCondition.optimal + ): + raise RuntimeError( + 'Solver does not currently have valid duals. Please ' + 'check the termination condition.' + ) + if cons_to_load is None: return {k: v for k, v in self._dual_sol.items()} else: return {c: self._dual_sol[c] for c in cons_to_load} - def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_reduced_costs( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if ( + self._last_results_object is None + or self._last_results_object.termination_condition + != TerminationCondition.optimal + ): + raise RuntimeError( + 'Solver does not currently have valid reduced costs. Please ' + 'check the termination condition.' + ) + if vars_to_load is None: return ComponentMap((k, v) for k, v in self._reduced_costs.values()) else: - return ComponentMap((v, self._reduced_costs[id(v)][1]) for v in vars_to_load) + return ComponentMap( + (v, self._reduced_costs[id(v)][1]) for v in vars_to_load + ) diff --git a/pyomo/contrib/appsi/solvers/cplex.py b/pyomo/contrib/appsi/solvers/cplex.py index 383dbd28fb1..f03bee6ecc5 100644 --- a/pyomo/contrib/appsi/solvers/cplex.py +++ b/pyomo/contrib/appsi/solvers/cplex.py @@ -1,5 +1,11 @@ from pyomo.common.tempfiles import TempfileManager -from pyomo.contrib.appsi.base import PersistentSolver, Results, TerminationCondition, MIPSolverConfig, PersistentSolutionLoader +from pyomo.contrib.appsi.base import ( + PersistentSolver, + Results, + TerminationCondition, + MIPSolverConfig, + PersistentSolutionLoader, +) from pyomo.contrib.appsi.writers import LPWriter import logging import math @@ -24,17 +30,21 @@ class CplexConfig(MIPSolverConfig): - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): - super(CplexConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(CplexConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) self.declare('filename', ConfigValue(domain=str)) self.declare('keepfiles', ConfigValue(domain=bool)) @@ -57,7 +67,7 @@ def __init__(self, solver): class Cplex(PersistentSolver): _available = None - def __init__(self, only_child_vars=True): + def __init__(self, only_child_vars=False): self._config = CplexConfig() self._solver_options = dict() self._writer = LPWriter(only_child_vars=only_child_vars) @@ -66,6 +76,7 @@ def __init__(self, only_child_vars=True): try: import cplex + self._cplex = cplex self._cplex_model: Optional[cplex.Cplex] = None self._cplex_available = True @@ -95,7 +106,7 @@ def _check_license(self): try: m = self._cplex.Cplex() m.set_results_stream(None) - m.variables.add(lb=[0]*1001) + m.variables.add(lb=[0] * 1001) m.solve() Cplex._available = self.Availability.FullLicense except self._cplex.exceptions.errors.CplexSolverError: @@ -208,7 +219,7 @@ def solve(self, model, timer: HierarchicalTimer = None): TempfileManager.add_tempfile(self._filename + '.lp', exists=False) TempfileManager.add_tempfile(self._filename + '.log', exists=False) timer.start('write lp file') - self._writer.write(model, self._filename+'.lp', timer=timer) + self._writer.write(model, self._filename + '.lp', timer=timer) timer.stop('write lp file') res = self._apply_solver(timer) self._last_results_object = res @@ -231,11 +242,15 @@ def _apply_solver(self, timer: HierarchicalTimer): cplex_model.read(self._filename + '.lp') timer.stop('cplex read lp') - log_stream = LogStream(level=self.config.log_level, logger=self.config.solver_output_logger) + log_stream = LogStream( + level=self.config.log_level, logger=self.config.solver_output_logger + ) if config.stream_solver: + def _process_stream(arg): sys.stdout.write(arg) return arg + cplex_model.set_results_stream(log_stream, _process_stream) else: cplex_model.set_results_stream(log_stream) @@ -258,7 +273,7 @@ def _process_stream(arg): t1 = time.time() timer.stop('cplex solve') - return self._postsolve(timer, t1-t0) + return self._postsolve(timer, t1 - t0) def _postsolve(self, timer: HierarchicalTimer, solve_time): config = self.config @@ -288,12 +303,23 @@ def _postsolve(self, timer: HierarchicalTimer, solve_time): results.best_objective_bound = None else: if cpxprob.solution.get_solution_type() != cpxprob.solution.type.none: - if (cpxprob.variables.get_num_binary() + cpxprob.variables.get_num_integer()) == 0: - results.best_feasible_objective = cpxprob.solution.get_objective_value() - results.best_objective_bound = cpxprob.solution.get_objective_value() + if ( + cpxprob.variables.get_num_binary() + + cpxprob.variables.get_num_integer() + ) == 0: + results.best_feasible_objective = ( + cpxprob.solution.get_objective_value() + ) + results.best_objective_bound = ( + cpxprob.solution.get_objective_value() + ) else: - results.best_feasible_objective = cpxprob.solution.get_objective_value() - results.best_objective_bound = cpxprob.solution.MIP.get_best_objective() + results.best_feasible_objective = ( + cpxprob.solution.get_objective_value() + ) + results.best_objective_bound = ( + cpxprob.solution.MIP.get_best_objective() + ) else: results.best_feasible_objective = None if cpxprob.objective.get_sense() == cpxprob.objective.sense.minimize: @@ -303,24 +329,37 @@ def _postsolve(self, timer: HierarchicalTimer, solve_time): if config.load_solution: if cpxprob.solution.get_solution_type() == cpxprob.solution.type.none: - raise RuntimeError('A feasible solution was not found, so no solution can be loades. ' - 'Please set opt.config.load_solution=False and check ' - 'results.termination_condition and ' - 'results.best_feasible_objective before loading a solution.') + raise RuntimeError( + 'A feasible solution was not found, so no solution can be loades. ' + 'Please set opt.config.load_solution=False and check ' + 'results.termination_condition and ' + 'results.best_feasible_objective before loading a solution.' + ) else: if results.termination_condition != TerminationCondition.optimal: - logger.warning('Loading a feasible but suboptimal solution. ' - 'Please set load_solution=False and check ' - 'results.termination_condition before loading a solution.') + logger.warning( + 'Loading a feasible but suboptimal solution. ' + 'Please set load_solution=False and check ' + 'results.termination_condition before loading a solution.' + ) timer.start('load solution') self.load_vars() timer.stop('load solution') return results - def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: - if self._cplex_model.solution.get_solution_type() == self._cplex_model.solution.type.none: - raise RuntimeError('Cannot load variable values - no feasible solution was found.') + def get_primals( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if ( + self._cplex_model.solution.get_solution_type() + == self._cplex_model.solution.type.none + ): + raise RuntimeError( + 'Solver does not currently have a valid solution. Please ' + 'check the termination condition.' + ) + symbol_map = self._writer.symbol_map if vars_to_load is None: var_names = self._cplex_model.variables.get_names() @@ -331,17 +370,28 @@ def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) for name, val in zip(var_names, var_vals): if name == 'obj_const': continue - v = symbol_map.bySymbol[name]() + v = symbol_map.bySymbol[name] if self._writer._referenced_variables[id(v)]: res[v] = val return res - def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None) -> Dict[_GeneralConstraintData, float]: - if self._cplex_model.solution.get_solution_type() == self._cplex_model.solution.type.none: - raise RuntimeError('Cannot get duals - no feasible solution was found.') - if self._cplex_model.get_problem_type() in [self._cplex_model.problem_type.MILP, - self._cplex_model.problem_type.MIQP, - self._cplex_model.problem_type.MIQCP]: + def get_duals( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ) -> Dict[_GeneralConstraintData, float]: + if ( + self._cplex_model.solution.get_solution_type() + == self._cplex_model.solution.type.none + ): + raise RuntimeError( + 'Solver does not currently have valid duals. Please ' + 'check the termination condition.' + ) + + if self._cplex_model.get_problem_type() in [ + self._cplex_model.problem_type.MILP, + self._cplex_model.problem_type.MIQP, + self._cplex_model.problem_type.MIQCP, + ]: raise RuntimeError('Cannot get duals for mixed-integer problems') symbol_map = self._writer.symbol_map @@ -367,7 +417,7 @@ def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = N orig_name = name[:-3] if orig_name == 'obj_const_con': continue - _con = symbol_map.bySymbol[orig_name]() + _con = symbol_map.bySymbol[orig_name] if _con in res: if abs(val) > abs(res[_con]): res[_con] = val @@ -376,12 +426,23 @@ def get_duals(self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = N return res - def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: - if self._cplex_model.solution.get_solution_type() == self._cplex_model.solution.type.none: - raise RuntimeError('Cannot get reduced costs - no feasible solution was found.') - if self._cplex_model.get_problem_type() in [self._cplex_model.problem_type.MILP, - self._cplex_model.problem_type.MIQP, - self._cplex_model.problem_type.MIQCP]: + def get_reduced_costs( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if ( + self._cplex_model.solution.get_solution_type() + == self._cplex_model.solution.type.none + ): + raise RuntimeError( + 'Solver does not currently have valid reduced costs. Please ' + 'check the termination condition.' + ) + + if self._cplex_model.get_problem_type() in [ + self._cplex_model.problem_type.MILP, + self._cplex_model.problem_type.MIQP, + self._cplex_model.problem_type.MIQCP, + ]: raise RuntimeError('Cannot get reduced costs for mixed-integer problems') symbol_map = self._writer.symbol_map @@ -394,6 +455,6 @@ def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = for name, val in zip(var_names, rc): if name == 'obj_const': continue - v = symbol_map.bySymbol[name]() + v = symbol_map.bySymbol[name] res[v] = val return res diff --git a/pyomo/contrib/appsi/solvers/gurobi.py b/pyomo/contrib/appsi/solvers/gurobi.py index 76702ef7ddb..a173c69abc6 100644 --- a/pyomo/contrib/appsi/solvers/gurobi.py +++ b/pyomo/contrib/appsi/solvers/gurobi.py @@ -3,29 +3,33 @@ import math from typing import List, Dict, Optional from pyomo.common.collections import ComponentSet, ComponentMap, OrderedSet +from pyomo.common.log import LogStream from pyomo.common.dependencies import attempt_import from pyomo.common.errors import PyomoException -from pyomo.common.tee import capture_output +from pyomo.common.tee import capture_output, TeeStream from pyomo.common.timing import HierarchicalTimer from pyomo.common.shutdown import python_is_shutting_down -from pyomo.common.config import ConfigValue +from pyomo.common.config import ConfigValue, NonNegativeInt from pyomo.core.kernel.objective import minimize, maximize from pyomo.core.base import SymbolMap, NumericLabeler, TextLabeler from pyomo.core.base.var import Var, _GeneralVarData from pyomo.core.base.constraint import _GeneralConstraintData from pyomo.core.base.sos import _SOSConstraintData from pyomo.core.base.param import _ParamData -from pyomo.core.expr.numvalue import ( - value, is_constant, is_fixed, native_numeric_types, -) +from pyomo.core.expr.numvalue import value, is_constant, is_fixed, native_numeric_types from pyomo.repn import generate_standard_repn from pyomo.core.expr.numeric_expr import NPV_MaxExpression, NPV_MinExpression from pyomo.contrib.appsi.base import ( - PersistentSolver, Results, TerminationCondition, MIPSolverConfig, - PersistentBase, PersistentSolutionLoader + PersistentSolver, + Results, + TerminationCondition, + MIPSolverConfig, + PersistentBase, + PersistentSolutionLoader, ) from pyomo.contrib.appsi.cmodel import cmodel, cmodel_available from pyomo.core.staleflag import StaleFlagManager +import sys logger = logging.getLogger(__name__) @@ -42,8 +46,7 @@ def _import_gurobipy(): return gurobipy -gurobipy, gurobipy_available = attempt_import('gurobipy', - importer=_import_gurobipy) +gurobipy, gurobipy_available = attempt_import('gurobipy', importer=_import_gurobipy) class DegreeError(PyomoException): @@ -51,30 +54,43 @@ class DegreeError(PyomoException): class GurobiConfig(MIPSolverConfig): - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): - super(GurobiConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(GurobiConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) self.declare('logfile', ConfigValue(domain=str)) + self.declare('solver_output_logger', ConfigValue()) + self.declare('log_level', ConfigValue(domain=NonNegativeInt)) + self.logfile = '' + self.solver_output_logger = logger + self.log_level = logging.INFO class GurobiSolutionLoader(PersistentSolutionLoader): def load_vars(self, vars_to_load=None, solution_number=0): self._assert_solution_still_valid() - self._solver.load_vars(vars_to_load=vars_to_load, solution_number=solution_number) + self._solver.load_vars( + vars_to_load=vars_to_load, solution_number=solution_number + ) def get_primals(self, vars_to_load=None, solution_number=0): self._assert_solution_still_valid() - return self._solver.get_primals(vars_to_load=vars_to_load, solution_number=solution_number) + return self._solver.get_primals( + vars_to_load=vars_to_load, solution_number=solution_number + ) class GurobiResults(Results): @@ -139,7 +155,9 @@ def update(self): class _MutableQuadraticConstraint(object): - def __init__(self, gurobi_model, gurobi_con, constant, linear_coefs, quadratic_coefs): + def __init__( + self, gurobi_model, gurobi_con, constant, linear_coefs, quadratic_coefs + ): self.con = gurobi_con self.gurobi_model = gurobi_model self.constant = constant @@ -153,12 +171,16 @@ def get_updated_expression(self): gurobi_expr = self.gurobi_model.getQCRow(self.con) for ndx, coef in enumerate(self.linear_coefs): current_coef_value = value(coef.expr) - incremental_coef_value = current_coef_value - self.last_linear_coef_values[ndx] + incremental_coef_value = ( + current_coef_value - self.last_linear_coef_values[ndx] + ) gurobi_expr += incremental_coef_value * coef.var self.last_linear_coef_values[ndx] = current_coef_value for ndx, coef in enumerate(self.quadratic_coefs): current_coef_value = value(coef.expr) - incremental_coef_value = current_coef_value - self.last_quadratic_coef_values[ndx] + incremental_coef_value = ( + current_coef_value - self.last_quadratic_coef_values[ndx] + ) gurobi_expr += incremental_coef_value * coef.var1 * coef.var2 self.last_quadratic_coef_values[ndx] = current_coef_value return gurobi_expr @@ -187,7 +209,9 @@ def get_updated_expression(self): self.gurobi_model.update() gurobi_expr = self.gurobi_model.getObjective() current_coef_value = value(coef.expr) - incremental_coef_value = current_coef_value - self.last_quadratic_coef_values[ndx] + incremental_coef_value = ( + current_coef_value - self.last_quadratic_coef_values[ndx] + ) gurobi_expr += incremental_coef_value * coef.var1 * coef.var2 self.last_quadratic_coef_values[ndx] = current_coef_value return gurobi_expr @@ -204,10 +228,13 @@ class Gurobi(PersistentBase, PersistentSolver): """ Interface to Gurobi """ + _available = None + _num_instances = 0 - def __init__(self, only_child_vars=True): + def __init__(self, only_child_vars=False): super(Gurobi, self).__init__(only_child_vars=only_child_vars) + self._num_instances += 1 self._config = GurobiConfig() self._solver_options = dict() self._solver_model = None @@ -230,42 +257,46 @@ def __init__(self, only_child_vars=True): self._last_results_object: Optional[GurobiResults] = None def available(self): - if self._available is None: - self._check_license() - return self._available + if not gurobipy_available: # this triggers the deferred import + return self.Availability.NotFound + elif self._available == self.Availability.BadVersion: + return self.Availability.BadVersion + else: + return self._check_license() - @classmethod - def _check_license(cls): + def _check_license(self): + avail = False try: # Gurobipy writes out license file information when creating # the environment with capture_output(capture_fd=True): m = gurobipy.Model() - m.dispose() - except ImportError: - # Triggered if this is the first time the deferred import of - # gurobipy is resolved. _import_gurobipy will have already - # set _available appropriately. - return + if self._solver_model is None: + self._solver_model = m + avail = True except gurobipy.GurobiError: - cls._available = Gurobi.Availability.BadLicense - return + avail = False + + if avail: + if self._available is None: + res = Gurobi._check_full_license() + self._available = res + return res + else: + return self._available + else: + return self.Availability.BadLicense + + @classmethod + def _check_full_license(cls): m = gurobipy.Model() m.setParam('OutputFlag', 0) try: - # As of 3/2021, the limited-size Gurobi license was limited - # to 2000 variables. m.addVars(range(2001)) - m.setParam('OutputFlag', 0) m.optimize() - cls._available = Gurobi.Availability.FullLicense + return cls.Availability.FullLicense except gurobipy.GurobiError: - cls._available = Gurobi.Availability.LimitedLicense - finally: - m.dispose() - del m - with capture_output(capture_fd=True): - gurobipy.disposeDefaultEnv() + return cls.Availability.LimitedLicense def release_license(self): self._reinit() @@ -275,12 +306,16 @@ def release_license(self): def __del__(self): if not python_is_shutting_down(): - self.release_license() + self._num_instances -= 1 + if self._num_instances == 0: + self.release_license() def version(self): - version = (gurobipy.GRB.VERSION_MAJOR, - gurobipy.GRB.VERSION_MINOR, - gurobipy.GRB.VERSION_TECHNICAL) + version = ( + gurobipy.GRB.VERSION_MAJOR, + gurobipy.GRB.VERSION_MINOR, + gurobipy.GRB.VERSION_TECHNICAL, + ) return version @property @@ -313,24 +348,34 @@ def symbol_map(self): return self._symbol_map def _solve(self, timer: HierarchicalTimer): - config = self.config - options = self.gurobi_options - if config.stream_solver: - self._solver_model.setParam('LogToConsole', 1) - else: - self._solver_model.setParam('LogToConsole', 0) - self._solver_model.setParam('LogFile', config.logfile) - - if config.time_limit is not None: - self._solver_model.setParam('TimeLimit', config.time_limit) - if config.mip_gap is not None: - self._solver_model.setParam('MIPGap', config.mip_gap) - - for key, option in options.items(): - self._solver_model.setParam(key, option) - timer.start('optimize') - self._solver_model.optimize(self._callback) - timer.stop('optimize') + ostreams = [ + LogStream( + level=self.config.log_level, logger=self.config.solver_output_logger + ) + ] + if self.config.stream_solver: + ostreams.append(sys.stdout) + + with TeeStream(*ostreams) as t: + with capture_output(output=t.STDOUT, capture_fd=False): + config = self.config + options = self.gurobi_options + + self._solver_model.setParam('LogToConsole', 1) + self._solver_model.setParam('LogFile', config.logfile) + + if config.time_limit is not None: + self._solver_model.setParam('TimeLimit', config.time_limit) + if config.mip_gap is not None: + self._solver_model.setParam('MIPGap', config.mip_gap) + + for key, option in options.items(): + self._solver_model.setParam(key, option) + + timer.start('optimize') + self._solver_model.optimize(self._callback) + timer.stop('optimize') + self._needs_updated = False return self._postsolve(timer) @@ -357,7 +402,9 @@ def solve(self, model, timer: HierarchicalTimer = None) -> Results: logger.info('\n' + str(timer)) return res - def _process_domain_and_bounds(self, var, var_id, mutable_lbs, mutable_ubs, ndx, gurobipy_var): + def _process_domain_and_bounds( + self, var, var_id, mutable_lbs, mutable_ubs, ndx, gurobipy_var + ): _v, _lb, _ub, _fixed, _domain_interval, _value = self._vars[id(var)] lb, ub, step = _domain_interval if lb is None: @@ -372,7 +419,9 @@ def _process_domain_and_bounds(self, var, var_id, mutable_lbs, mutable_ubs, ndx, else: vtype = gurobipy.GRB.INTEGER else: - raise ValueError(f'Unrecognized domain step: {step} (should be either 0 or 1)') + raise ValueError( + f'Unrecognized domain step: {step} (should be either 0 or 1)' + ) if _fixed: lb = _value ub = _value @@ -407,13 +456,17 @@ def _add_variables(self, variables: List[_GeneralVarData]): mutable_ubs = dict() for ndx, var in enumerate(variables): varname = self._symbol_map.getSymbol(var, self._labeler) - lb, ub, vtype = self._process_domain_and_bounds(var, id(var), mutable_lbs, mutable_ubs, ndx, None) + lb, ub, vtype = self._process_domain_and_bounds( + var, id(var), mutable_lbs, mutable_ubs, ndx, None + ) var_names.append(varname) vtypes.append(vtype) lbs.append(lb) ubs.append(ub) - gurobi_vars = self._solver_model.addVars(len(variables), lb=lbs, ub=ubs, vtype=vtypes, name=var_names) + gurobi_vars = self._solver_model.addVars( + len(variables), lb=lbs, ub=ubs, vtype=vtypes, name=var_names + ) for ndx, pyomo_var in enumerate(variables): gurobi_var = gurobi_vars[ndx] @@ -435,7 +488,7 @@ def _reinit(self): self.__init__(only_child_vars=self._only_child_vars) self.config = saved_config self.gurobi_options = saved_options - self.update_config = saved_update_config + self.update_config = saved_update_config def set_instance(self, model): if self._last_results_object is not None: @@ -444,7 +497,8 @@ def set_instance(self, model): c = self.__class__ raise PyomoException( f'Solver {c.__module__}.{c.__qualname__} is not available ' - f'({self.available()}).') + f'({self.available()}).' + ) self._reinit() self._model = model if self.use_extensions and cmodel_available: @@ -471,7 +525,9 @@ def _get_expr_from_pyomo_expr(self, expr): degree = repn.polynomial_degree() if (degree is None) or (degree > 2): - raise DegreeError('GurobiAuto does not support expressions of degree {0}.'.format(degree)) + raise DegreeError( + 'GurobiAuto does not support expressions of degree {0}.'.format(degree) + ) if len(repn.linear_vars) > 0: linear_coef_vals = list() @@ -479,10 +535,15 @@ def _get_expr_from_pyomo_expr(self, expr): if not is_constant(coef): mutable_linear_coefficient = _MutableLinearCoefficient() mutable_linear_coefficient.expr = coef - mutable_linear_coefficient.var = self._pyomo_var_to_solver_var_map[id(repn.linear_vars[ndx])] + mutable_linear_coefficient.var = self._pyomo_var_to_solver_var_map[ + id(repn.linear_vars[ndx]) + ] mutable_linear_coefficients.append(mutable_linear_coefficient) linear_coef_vals.append(value(coef)) - new_expr = gurobipy.LinExpr(linear_coef_vals, [self._pyomo_var_to_solver_var_map[id(i)] for i in repn.linear_vars]) + new_expr = gurobipy.LinExpr( + linear_coef_vals, + [self._pyomo_var_to_solver_var_map[id(i)] for i in repn.linear_vars], + ) else: new_expr = 0.0 @@ -500,25 +561,33 @@ def _get_expr_from_pyomo_expr(self, expr): coef_val = value(coef) new_expr += coef_val * gurobi_x * gurobi_y - return new_expr, repn.constant, mutable_linear_coefficients, mutable_quadratic_coefficients + return ( + new_expr, + repn.constant, + mutable_linear_coefficients, + mutable_quadratic_coefficients, + ) def _add_constraints(self, cons: List[_GeneralConstraintData]): for con in cons: conname = self._symbol_map.getSymbol(con, self._labeler) - (gurobi_expr, - repn_constant, - mutable_linear_coefficients, - mutable_quadratic_coefficients) = self._get_expr_from_pyomo_expr(con.body) - - if (gurobi_expr.__class__ in {gurobipy.LinExpr, gurobipy.Var} or - gurobi_expr.__class__ in native_numeric_types): + ( + gurobi_expr, + repn_constant, + mutable_linear_coefficients, + mutable_quadratic_coefficients, + ) = self._get_expr_from_pyomo_expr(con.body) + + if ( + gurobi_expr.__class__ in {gurobipy.LinExpr, gurobipy.Var} + or gurobi_expr.__class__ in native_numeric_types + ): if con.equality: rhs_expr = con.lower - repn_constant rhs_val = value(rhs_expr) - gurobipy_con = self._solver_model.addLConstr(gurobi_expr, - gurobipy.GRB.EQUAL, - rhs_val, - name=conname) + gurobipy_con = self._solver_model.addLConstr( + gurobi_expr, gurobipy.GRB.EQUAL, rhs_val, name=conname + ) if not is_constant(rhs_expr): mutable_constant = _MutableConstant() mutable_constant.expr = rhs_expr @@ -529,7 +598,9 @@ def _add_constraints(self, cons: List[_GeneralConstraintData]): rhs_expr = con.upper - repn_constant lhs_val = value(lhs_expr) rhs_val = value(rhs_expr) - gurobipy_con = self._solver_model.addRange(gurobi_expr, lhs_val, rhs_val, name=conname) + gurobipy_con = self._solver_model.addRange( + gurobi_expr, lhs_val, rhs_val, name=conname + ) self._range_constraints.add(con) if not is_constant(lhs_expr) or not is_constant(rhs_expr): mutable_range_constant = _MutableRangeConstant() @@ -542,7 +613,9 @@ def _add_constraints(self, cons: List[_GeneralConstraintData]): elif con.has_lb(): rhs_expr = con.lower - repn_constant rhs_val = value(rhs_expr) - gurobipy_con = self._solver_model.addLConstr(gurobi_expr, gurobipy.GRB.GREATER_EQUAL, rhs_val, name=conname) + gurobipy_con = self._solver_model.addLConstr( + gurobi_expr, gurobipy.GRB.GREATER_EQUAL, rhs_val, name=conname + ) if not is_constant(rhs_expr): mutable_constant = _MutableConstant() mutable_constant.expr = rhs_expr @@ -551,15 +624,19 @@ def _add_constraints(self, cons: List[_GeneralConstraintData]): elif con.has_ub(): rhs_expr = con.upper - repn_constant rhs_val = value(rhs_expr) - gurobipy_con = self._solver_model.addLConstr(gurobi_expr, gurobipy.GRB.LESS_EQUAL, rhs_val, name=conname) + gurobipy_con = self._solver_model.addLConstr( + gurobi_expr, gurobipy.GRB.LESS_EQUAL, rhs_val, name=conname + ) if not is_constant(rhs_expr): mutable_constant = _MutableConstant() mutable_constant.expr = rhs_expr mutable_constant.con = gurobipy_con self._mutable_helpers[con] = [mutable_constant] else: - raise ValueError("Constraint does not have a lower " - "or an upper bound: {0} \n".format(con)) + raise ValueError( + "Constraint does not have a lower " + "or an upper bound: {0} \n".format(con) + ) for tmp in mutable_linear_coefficients: tmp.con = gurobipy_con tmp.gurobi_model = self._solver_model @@ -572,30 +649,49 @@ def _add_constraints(self, cons: List[_GeneralConstraintData]): if con.equality: rhs_expr = con.lower - repn_constant rhs_val = value(rhs_expr) - gurobipy_con = self._solver_model.addQConstr(gurobi_expr, gurobipy.GRB.EQUAL, rhs_val, name=conname) + gurobipy_con = self._solver_model.addQConstr( + gurobi_expr, gurobipy.GRB.EQUAL, rhs_val, name=conname + ) elif con.has_lb() and con.has_ub(): - raise NotImplementedError('Quadratic range constraints are not supported') + raise NotImplementedError( + 'Quadratic range constraints are not supported' + ) elif con.has_lb(): rhs_expr = con.lower - repn_constant rhs_val = value(rhs_expr) - gurobipy_con = self._solver_model.addQConstr(gurobi_expr, gurobipy.GRB.GREATER_EQUAL, rhs_val, name=conname) + gurobipy_con = self._solver_model.addQConstr( + gurobi_expr, gurobipy.GRB.GREATER_EQUAL, rhs_val, name=conname + ) elif con.has_ub(): rhs_expr = con.upper - repn_constant rhs_val = value(rhs_expr) - gurobipy_con = self._solver_model.addQConstr(gurobi_expr, gurobipy.GRB.LESS_EQUAL, rhs_val, name=conname) + gurobipy_con = self._solver_model.addQConstr( + gurobi_expr, gurobipy.GRB.LESS_EQUAL, rhs_val, name=conname + ) else: - raise ValueError("Constraint does not have a lower " - "or an upper bound: {0} \n".format(con)) - if len(mutable_linear_coefficients) > 0 or len(mutable_quadratic_coefficients) > 0 or not is_constant(repn_constant): + raise ValueError( + "Constraint does not have a lower " + "or an upper bound: {0} \n".format(con) + ) + if ( + len(mutable_linear_coefficients) > 0 + or len(mutable_quadratic_coefficients) > 0 + or not is_constant(repn_constant) + ): mutable_constant = _MutableConstant() mutable_constant.expr = rhs_expr - mutable_quadratic_constraint = _MutableQuadraticConstraint(self._solver_model, gurobipy_con, - mutable_constant, - mutable_linear_coefficients, - mutable_quadratic_coefficients) + mutable_quadratic_constraint = _MutableQuadraticConstraint( + self._solver_model, + gurobipy_con, + mutable_constant, + mutable_linear_coefficients, + mutable_quadratic_coefficients, + ) self._mutable_quadratic_helpers[con] = mutable_quadratic_constraint else: - raise ValueError('Unrecognized Gurobi expression type: ' + str(gurobi_expr.__class__)) + raise ValueError( + 'Unrecognized Gurobi expression type: ' + str(gurobi_expr.__class__) + ) self._pyomo_con_to_solver_con_map[con] = gurobipy_con self._solver_con_to_pyomo_con_map[id(gurobipy_con)] = con @@ -611,8 +707,9 @@ def _add_sos_constraints(self, cons: List[_SOSConstraintData]): elif level == 2: sos_type = gurobipy.GRB.SOS_TYPE2 else: - raise ValueError("Solver does not support SOS " - "level {0} constraints".format(level)) + raise ValueError( + "Solver does not support SOS level {0} constraints".format(level) + ) gurobi_vars = [] weights = [] @@ -670,11 +767,17 @@ def _update_variables(self, variables: List[_GeneralVarData]): for var in variables: var_id = id(var) if var_id not in self._pyomo_var_to_solver_var_map: - raise ValueError('The Var provided to update_var needs to be added first: {0}'.format(var)) + raise ValueError( + 'The Var provided to update_var needs to be added first: {0}'.format( + var + ) + ) self._mutable_bounds.pop((var_id, 'lb'), None) self._mutable_bounds.pop((var_id, 'ub'), None) gurobipy_var = self._pyomo_var_to_solver_var_map[var_id] - lb, ub, vtype = self._process_domain_and_bounds(var, var_id, None, None, None, gurobipy_var) + lb, ub, vtype = self._process_domain_and_bounds( + var, var_id, None, None, None, gurobipy_var + ) gurobipy_var.setAttr('lb', lb) gurobipy_var.setAttr('ub', ub) gurobipy_var.setAttr('vtype', vtype) @@ -697,7 +800,9 @@ def update_params(self): pyomo_con = self._solver_con_to_pyomo_con_map[id(gurobi_con)] name = self._symbol_map.getSymbol(pyomo_con, self._labeler) self._solver_model.remove(gurobi_con) - new_con = self._solver_model.addQConstr(new_gurobi_expr, new_sense, new_rhs, name=name) + new_con = self._solver_model.addQConstr( + new_gurobi_expr, new_sense, new_rhs, name=name + ) self._pyomo_con_to_solver_con_map[id(pyomo_con)] = new_con del self._solver_con_to_pyomo_con_map[id(gurobi_con)] self._solver_con_to_pyomo_con_map[id(new_con)] = pyomo_con @@ -727,19 +832,25 @@ def _set_objective(self, obj): elif obj.sense == maximize: sense = gurobipy.GRB.MAXIMIZE else: - raise ValueError('Objective sense is not recognized: {0}'.format(obj.sense)) + raise ValueError( + 'Objective sense is not recognized: {0}'.format(obj.sense) + ) - (gurobi_expr, - repn_constant, - mutable_linear_coefficients, - mutable_quadratic_coefficients) = self._get_expr_from_pyomo_expr(obj.expr) + ( + gurobi_expr, + repn_constant, + mutable_linear_coefficients, + mutable_quadratic_coefficients, + ) = self._get_expr_from_pyomo_expr(obj.expr) mutable_constant = _MutableConstant() mutable_constant.expr = repn_constant - mutable_objective = _MutableObjective(self._solver_model, - mutable_constant, - mutable_linear_coefficients, - mutable_quadratic_coefficients) + mutable_objective = _MutableObjective( + self._solver_model, + mutable_constant, + mutable_linear_coefficients, + mutable_quadratic_coefficients, + ) self._mutable_objective = mutable_objective # These two lines are needed as a workaround @@ -794,45 +905,53 @@ def _postsolve(self, timer: HierarchicalTimer): results.best_feasible_objective = None results.best_objective_bound = None if self._objective is not None: - if gprob.SolCount > 0: - try: - results.best_feasible_objective = gprob.ObjVal - except (gurobipy.GurobiError, AttributeError): - results.best_feasible_objective = None try: - if gprob.NumBinVars + gprob.NumIntVars == 0: - results.best_objective_bound = gprob.ObjVal - else: - results.best_objective_bound = gprob.ObjBound + results.best_feasible_objective = gprob.ObjVal + except (gurobipy.GurobiError, AttributeError): + results.best_feasible_objective = None + try: + results.best_objective_bound = gprob.ObjBound except (gurobipy.GurobiError, AttributeError): if self._objective.sense == minimize: results.best_objective_bound = -math.inf else: results.best_objective_bound = math.inf - if results.best_feasible_objective is not None and not math.isfinite(results.best_feasible_objective): + + if results.best_feasible_objective is not None and not math.isfinite( + results.best_feasible_objective + ): results.best_feasible_objective = None timer.start('load solution') if config.load_solution: if gprob.SolCount > 0: if results.termination_condition != TerminationCondition.optimal: - logger.warning('Loading a feasible but suboptimal solution. ' - 'Please set load_solution=False and check ' - 'results.termination_condition and ' - 'resutls.found_feasible_solution() before loading a solution.') + logger.warning( + 'Loading a feasible but suboptimal solution. ' + 'Please set load_solution=False and check ' + 'results.termination_condition and ' + 'results.found_feasible_solution() before loading a solution.' + ) self.load_vars() else: - raise RuntimeError('A feasible solution was not found, so no solution can be loaded.' - 'Please set opt.config.load_solution=False and check ' - 'results.termination_condition and ' - 'resutls.best_feasible_objective before loading a solution.') + raise RuntimeError( + 'A feasible solution was not found, so no solution can be loaded.' + 'Please set opt.config.load_solution=False and check ' + 'results.termination_condition and ' + 'results.best_feasible_objective before loading a solution.' + ) timer.stop('load solution') return results def _load_suboptimal_mip_solution(self, vars_to_load, solution_number): - if self.get_model_attr('NumIntVars') == 0 and self.get_model_attr('NumBinVars') == 0: - raise ValueError('Cannot obtain suboptimal solutions for a continuous model') + if ( + self.get_model_attr('NumIntVars') == 0 + and self.get_model_attr('NumBinVars') == 0 + ): + raise ValueError( + 'Cannot obtain suboptimal solutions for a continuous model' + ) var_map = self._pyomo_var_to_solver_var_map ref_vars = self._referenced_variables original_solution_number = self.get_gurobi_param_info('SolutionNumber')[2] @@ -848,7 +967,9 @@ def _load_suboptimal_mip_solution(self, vars_to_load, solution_number): return res def load_vars(self, vars_to_load=None, solution_number=0): - for v, val in self.get_primals(vars_to_load=vars_to_load, solution_number=solution_number).items(): + for v, val in self.get_primals( + vars_to_load=vars_to_load, solution_number=solution_number + ).items(): v.set_value(val, skip_validation=True) StaleFlagManager.mark_all_as_stale(delayed=True) @@ -856,6 +977,12 @@ def get_primals(self, vars_to_load=None, solution_number=0): if self._needs_updated: self._update_gurobi_model() # this is needed to ensure that solutions cannot be loaded after the model has been changed + if self._solver_model.SolCount == 0: + raise RuntimeError( + 'Solver does not currently have a valid solution. Please ' + 'check the termination condition.' + ) + var_map = self._pyomo_var_to_solver_var_map ref_vars = self._referenced_variables if vars_to_load is None: @@ -864,9 +991,13 @@ def get_primals(self, vars_to_load=None, solution_number=0): vars_to_load = [id(v) for v in vars_to_load] if solution_number != 0: - return self._load_suboptimal_mip_solution(vars_to_load=vars_to_load, solution_number=solution_number) + return self._load_suboptimal_mip_solution( + vars_to_load=vars_to_load, solution_number=solution_number + ) else: - gurobi_vars_to_load = [var_map[pyomo_var_id] for pyomo_var_id in vars_to_load] + gurobi_vars_to_load = [ + var_map[pyomo_var_id] for pyomo_var_id in vars_to_load + ] vals = self._solver_model.getAttr("X", gurobi_vars_to_load) res = ComponentMap() @@ -880,6 +1011,12 @@ def get_reduced_costs(self, vars_to_load=None): if self._needs_updated: self._update_gurobi_model() + if self._solver_model.Status != gurobipy.GRB.OPTIMAL: + raise RuntimeError( + 'Solver does not currently have valid reduced costs. Please ' + 'check the termination condition.' + ) + var_map = self._pyomo_var_to_solver_var_map ref_vars = self._referenced_variables res = ComponentMap() @@ -902,6 +1039,12 @@ def get_duals(self, cons_to_load=None): if self._needs_updated: self._update_gurobi_model() + if self._solver_model.Status != gurobipy.GRB.OPTIMAL: + raise RuntimeError( + 'Solver does not currently have valid duals. Please ' + 'check the termination condition.' + ) + con_map = self._pyomo_con_to_solver_con_map reverse_con_map = self._solver_con_to_pyomo_con_map dual = dict() @@ -910,9 +1053,19 @@ def get_duals(self, cons_to_load=None): linear_cons_to_load = self._solver_model.getConstrs() quadratic_cons_to_load = self._solver_model.getQConstrs() else: - gurobi_cons_to_load = OrderedSet([con_map[pyomo_con] for pyomo_con in cons_to_load]) - linear_cons_to_load = list(gurobi_cons_to_load.intersection(OrderedSet(self._solver_model.getConstrs()))) - quadratic_cons_to_load = list(gurobi_cons_to_load.intersection(OrderedSet(self._solver_model.getQConstrs()))) + gurobi_cons_to_load = OrderedSet( + [con_map[pyomo_con] for pyomo_con in cons_to_load] + ) + linear_cons_to_load = list( + gurobi_cons_to_load.intersection( + OrderedSet(self._solver_model.getConstrs()) + ) + ) + quadratic_cons_to_load = list( + gurobi_cons_to_load.intersection( + OrderedSet(self._solver_model.getQConstrs()) + ) + ) linear_vals = self._solver_model.getAttr("Pi", linear_cons_to_load) quadratic_vals = self._solver_model.getAttr("QCPi", quadratic_cons_to_load) @@ -929,19 +1082,37 @@ def get_slacks(self, cons_to_load=None): if self._needs_updated: self._update_gurobi_model() + if self._solver_model.SolCount == 0: + raise RuntimeError( + 'Solver does not currently have valid slacks. Please ' + 'check the termination condition.' + ) + con_map = self._pyomo_con_to_solver_con_map reverse_con_map = self._solver_con_to_pyomo_con_map slack = dict() - gurobi_range_con_vars = OrderedSet(self._solver_model.getVars()) - OrderedSet(self._pyomo_var_to_solver_var_map.values()) + gurobi_range_con_vars = OrderedSet(self._solver_model.getVars()) - OrderedSet( + self._pyomo_var_to_solver_var_map.values() + ) if cons_to_load is None: linear_cons_to_load = self._solver_model.getConstrs() quadratic_cons_to_load = self._solver_model.getQConstrs() else: - gurobi_cons_to_load = OrderedSet([con_map[pyomo_con] for pyomo_con in cons_to_load]) - linear_cons_to_load = list(gurobi_cons_to_load.intersection(OrderedSet(self._solver_model.getConstrs()))) - quadratic_cons_to_load = list(gurobi_cons_to_load.intersection(OrderedSet(self._solver_model.getQConstrs()))) + gurobi_cons_to_load = OrderedSet( + [con_map[pyomo_con] for pyomo_con in cons_to_load] + ) + linear_cons_to_load = list( + gurobi_cons_to_load.intersection( + OrderedSet(self._solver_model.getConstrs()) + ) + ) + quadratic_cons_to_load = list( + gurobi_cons_to_load.intersection( + OrderedSet(self._solver_model.getQConstrs()) + ) + ) linear_vals = self._solver_model.getAttr("Slack", linear_cons_to_load) quadratic_vals = self._solver_model.getAttr("QCSlack", quadratic_cons_to_load) @@ -1023,9 +1194,11 @@ def set_linear_constraint_attr(self, con, attr, val): See gurobi documentation for acceptable values. """ if attr in {'Sense', 'RHS', 'ConstrName'}: - raise ValueError('Linear constraint attr {0} cannot be set with' + - ' the set_linear_constraint_attr method. Please use' + - ' the remove_constraint and add_constraint methods.'.format(attr)) + raise ValueError( + 'Linear constraint attr {0} cannot be set with' + + ' the set_linear_constraint_attr method. Please use' + + ' the remove_constraint and add_constraint methods.'.format(attr) + ) self._pyomo_con_to_solver_con_map[con].setAttr(attr, val) self._needs_updated = True @@ -1050,13 +1223,17 @@ def set_var_attr(self, var, attr, val): See gurobi documentation for acceptable values. """ if attr in {'LB', 'UB', 'VType', 'VarName'}: - raise ValueError('Var attr {0} cannot be set with' + - ' the set_var_attr method. Please use' + - ' the update_var method.'.format(attr)) + raise ValueError( + 'Var attr {0} cannot be set with' + + ' the set_var_attr method. Please use' + + ' the update_var method.'.format(attr) + ) if attr == 'Obj': - raise ValueError('Var attr Obj cannot be set with' + - ' the set_var_attr method. Please use' + - ' the set_objective method.') + raise ValueError( + 'Var attr Obj cannot be set with' + + ' the set_var_attr method. Please use' + + ' the set_objective method.' + ) self._pyomo_var_to_solver_var_map[id(var)].setAttr(attr, val) self._needs_updated = True @@ -1145,7 +1322,7 @@ def get_gurobi_param_info(self, param): Parameters ---------- param: str - The gurobi parameter to get info for. See Gurobi documenation for possible options. + The gurobi parameter to get info for. See Gurobi documentation for possible options. Returns ------- @@ -1156,6 +1333,7 @@ def get_gurobi_param_info(self, param): def _intermediate_callback(self): def f(gurobi_model, where): self._callback_func(self._model, self, where) + return f def set_callback(self, func=None): @@ -1173,15 +1351,15 @@ def set_callback(self, func=None): .. math:: min 2*x + y - + s.t. - + y >= (x-2)**2 - + 0 <= x <= 4 - + y >= 0 - + y integer as an MILP using extended cutting planes in callbacks. @@ -1241,33 +1419,50 @@ def cbCut(self, con): raise ValueError('cbCut expected an active constraint.') if is_fixed(con.body): - raise ValueError('cbCut expected a non-trival constraint') + raise ValueError('cbCut expected a non-trivial constraint') - (gurobi_expr, - repn_constant, - mutable_linear_coefficients, - mutable_quadratic_coefficients) = self._get_expr_from_pyomo_expr(con.body) + ( + gurobi_expr, + repn_constant, + mutable_linear_coefficients, + mutable_quadratic_coefficients, + ) = self._get_expr_from_pyomo_expr(con.body) if con.has_lb(): if con.has_ub(): raise ValueError('Range constraints are not supported in cbCut.') if not is_fixed(con.lower): - raise ValueError('Lower bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Lower bound of constraint {0} is not constant.'.format(con) + ) if con.has_ub(): if not is_fixed(con.upper): - raise ValueError('Upper bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Upper bound of constraint {0} is not constant.'.format(con) + ) if con.equality: - self._solver_model.cbCut(lhs=gurobi_expr, sense=gurobipy.GRB.EQUAL, - rhs=value(con.lower - repn_constant)) + self._solver_model.cbCut( + lhs=gurobi_expr, + sense=gurobipy.GRB.EQUAL, + rhs=value(con.lower - repn_constant), + ) elif con.has_lb() and (value(con.lower) > -float('inf')): - self._solver_model.cbCut(lhs=gurobi_expr, sense=gurobipy.GRB.GREATER_EQUAL, - rhs=value(con.lower - repn_constant)) + self._solver_model.cbCut( + lhs=gurobi_expr, + sense=gurobipy.GRB.GREATER_EQUAL, + rhs=value(con.lower - repn_constant), + ) elif con.has_ub() and (value(con.upper) < float('inf')): - self._solver_model.cbCut(lhs=gurobi_expr, sense=gurobipy.GRB.LESS_EQUAL, - rhs=value(con.upper - repn_constant)) + self._solver_model.cbCut( + lhs=gurobi_expr, + sense=gurobipy.GRB.LESS_EQUAL, + rhs=value(con.upper - repn_constant), + ) else: - raise ValueError('Constraint does not have a lower or an upper bound {0} \n'.format(con)) + raise ValueError( + 'Constraint does not have a lower or an upper bound {0} \n'.format(con) + ) def cbGet(self, what): return self._solver_model.cbGet(what) @@ -1309,33 +1504,50 @@ def cbLazy(self, con): raise ValueError('cbLazy expected an active constraint.') if is_fixed(con.body): - raise ValueError('cbLazy expected a non-trival constraint') + raise ValueError('cbLazy expected a non-trivial constraint') - (gurobi_expr, - repn_constant, - mutable_linear_coefficients, - mutable_quadratic_coefficients) = self._get_expr_from_pyomo_expr(con.body) + ( + gurobi_expr, + repn_constant, + mutable_linear_coefficients, + mutable_quadratic_coefficients, + ) = self._get_expr_from_pyomo_expr(con.body) if con.has_lb(): if con.has_ub(): raise ValueError('Range constraints are not supported in cbLazy.') if not is_fixed(con.lower): - raise ValueError('Lower bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Lower bound of constraint {0} is not constant.'.format(con) + ) if con.has_ub(): if not is_fixed(con.upper): - raise ValueError('Upper bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Upper bound of constraint {0} is not constant.'.format(con) + ) if con.equality: - self._solver_model.cbLazy(lhs=gurobi_expr, sense=gurobipy.GRB.EQUAL, - rhs=value(con.lower - repn_constant)) + self._solver_model.cbLazy( + lhs=gurobi_expr, + sense=gurobipy.GRB.EQUAL, + rhs=value(con.lower - repn_constant), + ) elif con.has_lb() and (value(con.lower) > -float('inf')): - self._solver_model.cbLazy(lhs=gurobi_expr, sense=gurobipy.GRB.GREATER_EQUAL, - rhs=value(con.lower - repn_constant)) + self._solver_model.cbLazy( + lhs=gurobi_expr, + sense=gurobipy.GRB.GREATER_EQUAL, + rhs=value(con.lower - repn_constant), + ) elif con.has_ub() and (value(con.upper) < float('inf')): - self._solver_model.cbLazy(lhs=gurobi_expr, sense=gurobipy.GRB.LESS_EQUAL, - rhs=value(con.upper - repn_constant)) + self._solver_model.cbLazy( + lhs=gurobi_expr, + sense=gurobipy.GRB.LESS_EQUAL, + rhs=value(con.upper - repn_constant), + ) else: - raise ValueError('Constraint does not have a lower or an upper bound {0} \n'.format(con)) + raise ValueError( + 'Constraint does not have a lower or an upper bound {0} \n'.format(con) + ) def cbSetSolution(self, vars, solution): if not isinstance(vars, Iterable): @@ -1348,4 +1560,3 @@ def cbUseSolution(self): def reset(self): self._solver_model.reset() - diff --git a/pyomo/contrib/appsi/solvers/highs.py b/pyomo/contrib/appsi/solvers/highs.py new file mode 100644 index 00000000000..1cf60d36e10 --- /dev/null +++ b/pyomo/contrib/appsi/solvers/highs.py @@ -0,0 +1,769 @@ +import logging +from typing import List, Dict, Optional +from pyomo.common.collections import ComponentMap +from pyomo.common.dependencies import attempt_import +from pyomo.common.errors import PyomoException +from pyomo.common.timing import HierarchicalTimer +from pyomo.common.config import ConfigValue, NonNegativeInt +from pyomo.common.tee import TeeStream, capture_output +from pyomo.common.log import LogStream +from pyomo.core.kernel.objective import minimize, maximize +from pyomo.core.base import SymbolMap +from pyomo.core.base.var import _GeneralVarData +from pyomo.core.base.constraint import _GeneralConstraintData +from pyomo.core.base.sos import _SOSConstraintData +from pyomo.core.base.param import _ParamData +from pyomo.core.expr.numvalue import value, is_constant +from pyomo.repn import generate_standard_repn +from pyomo.core.expr.numeric_expr import NPV_MaxExpression, NPV_MinExpression +from pyomo.contrib.appsi.base import ( + PersistentSolver, + Results, + TerminationCondition, + MIPSolverConfig, + PersistentBase, + PersistentSolutionLoader, +) +from pyomo.contrib.appsi.cmodel import cmodel, cmodel_available +from pyomo.common.dependencies import numpy as np +from pyomo.core.staleflag import StaleFlagManager +import sys + +logger = logging.getLogger(__name__) + +highspy, highspy_available = attempt_import('highspy') + + +class DegreeError(PyomoException): + pass + + +class HighsConfig(MIPSolverConfig): + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(HighsConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) + + self.declare('logfile', ConfigValue(domain=str)) + self.declare('solver_output_logger', ConfigValue()) + self.declare('log_level', ConfigValue(domain=NonNegativeInt)) + + self.logfile = '' + self.solver_output_logger = logger + self.log_level = logging.INFO + + +class HighsResults(Results): + def __init__(self, solver): + super().__init__() + self.wallclock_time = None + self.solution_loader = PersistentSolutionLoader(solver=solver) + + +class _MutableVarBounds(object): + def __init__(self, lower_expr, upper_expr, pyomo_var_id, var_map, highs): + self.pyomo_var_id = pyomo_var_id + self.lower_expr = lower_expr + self.upper_expr = upper_expr + self.var_map = var_map + self.highs = highs + + def update(self): + col_ndx = self.var_map[self.pyomo_var_id] + lb = value(self.lower_expr) + ub = value(self.upper_expr) + self.highs.changeColBounds(col_ndx, lb, ub) + + +class _MutableLinearCoefficient(object): + def __init__(self, pyomo_con, pyomo_var_id, con_map, var_map, expr, highs): + self.expr = expr + self.highs = highs + self.pyomo_var_id = pyomo_var_id + self.pyomo_con = pyomo_con + self.con_map = con_map + self.var_map = var_map + + def update(self): + row_ndx = self.con_map[self.pyomo_con] + col_ndx = self.var_map[self.pyomo_var_id] + self.highs.changeCoeff(row_ndx, col_ndx, value(self.expr)) + + +class _MutableObjectiveCoefficient(object): + def __init__(self, pyomo_var_id, var_map, expr, highs): + self.expr = expr + self.highs = highs + self.pyomo_var_id = pyomo_var_id + self.var_map = var_map + + def update(self): + col_ndx = self.var_map[self.pyomo_var_id] + self.highs.changeColCost(col_ndx, value(self.expr)) + + +class _MutableObjectiveOffset(object): + def __init__(self, expr, highs): + self.expr = expr + self.highs = highs + + def update(self): + self.highs.changeObjectiveOffset(value(self.expr)) + + +class _MutableConstraintBounds(object): + def __init__(self, lower_expr, upper_expr, pyomo_con, con_map, highs): + self.lower_expr = lower_expr + self.upper_expr = upper_expr + self.con = pyomo_con + self.con_map = con_map + self.highs = highs + + def update(self): + row_ndx = self.con_map[self.con] + lb = value(self.lower_expr) + ub = value(self.upper_expr) + self.highs.changeRowBounds(row_ndx, lb, ub) + + +class Highs(PersistentBase, PersistentSolver): + """ + Interface to HiGHS + """ + + _available = None + + def __init__(self, only_child_vars=False): + super().__init__(only_child_vars=only_child_vars) + self._config = HighsConfig() + self._solver_options = dict() + self._solver_model = None + self._pyomo_var_to_solver_var_map = dict() + self._pyomo_con_to_solver_con_map = dict() + self._solver_con_to_pyomo_con_map = dict() + self._mutable_helpers = dict() + self._mutable_bounds = dict() + self._objective_helpers = list() + self._last_results_object: Optional[HighsResults] = None + self._sol = None + + def available(self): + if highspy_available: + return self.Availability.FullLicense + else: + return self.Availability.NotFound + + def version(self): + version = ( + highspy.HIGHS_VERSION_MAJOR, + highspy.HIGHS_VERSION_MINOR, + highspy.HIGHS_VERSION_PATCH, + ) + return version + + @property + def config(self) -> HighsConfig: + return self._config + + @config.setter + def config(self, val: HighsConfig): + self._config = val + + @property + def highs_options(self): + """ + A dictionary mapping solver options to values for those options. These + are solver specific. + + Returns + ------- + dict + A dictionary mapping solver options to values for those options + """ + return self._solver_options + + @highs_options.setter + def highs_options(self, val: Dict): + self._solver_options = val + + @property + def symbol_map(self): + return SymbolMap() + # raise RuntimeError('Highs interface does not have a symbol map') + + def _solve(self, timer: HierarchicalTimer): + config = self.config + options = self.highs_options + + ostreams = [ + LogStream( + level=self.config.log_level, logger=self.config.solver_output_logger + ) + ] + if self.config.stream_solver: + ostreams.append(sys.stdout) + + with TeeStream(*ostreams) as t: + with capture_output(output=t.STDOUT, capture_fd=True): + self._solver_model.setOptionValue('log_to_console', True) + if config.logfile != '': + self._solver_model.setOptionValue('log_file', config.logfile) + + if config.time_limit is not None: + self._solver_model.setOptionValue('time_limit', config.time_limit) + if config.mip_gap is not None: + self._solver_model.setOptionValue('mip_rel_gap', config.mip_gap) + + for key, option in options.items(): + self._solver_model.setOptionValue(key, option) + timer.start('optimize') + self._solver_model.run() + timer.stop('optimize') + + return self._postsolve(timer) + + def solve(self, model, timer: HierarchicalTimer = None) -> Results: + StaleFlagManager.mark_all_as_stale() + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + if timer is None: + timer = HierarchicalTimer() + if model is not self._model: + timer.start('set_instance') + self.set_instance(model) + timer.stop('set_instance') + else: + timer.start('update') + self.update(timer=timer) + timer.stop('update') + res = self._solve(timer) + self._last_results_object = res + if self.config.report_timing: + logger.info('\n' + str(timer)) + return res + + def _process_domain_and_bounds(self, var_id): + _v, _lb, _ub, _fixed, _domain_interval, _value = self._vars[var_id] + lb, ub, step = _domain_interval + if lb is None: + lb = -highspy.kHighsInf + if ub is None: + ub = highspy.kHighsInf + if step == 0: + vtype = highspy.HighsVarType.kContinuous + elif step == 1: + vtype = highspy.HighsVarType.kInteger + else: + raise ValueError( + f'Unrecognized domain step: {step} (should be either 0 or 1)' + ) + if _fixed: + lb = _value + ub = _value + else: + if _lb is not None or _ub is not None: + if not is_constant(_lb) or not is_constant(_ub): + if _lb is None: + tmp_lb = -highspy.kHighsInf + else: + tmp_lb = _lb + if _ub is None: + tmp_ub = highspy.kHighsInf + else: + tmp_ub = _ub + mutable_bound = _MutableVarBounds( + lower_expr=NPV_MaxExpression((tmp_lb, lb)), + upper_expr=NPV_MinExpression((tmp_ub, ub)), + pyomo_var_id=var_id, + var_map=self._pyomo_var_to_solver_var_map, + highs=self._solver_model, + ) + self._mutable_bounds[var_id] = (_v, mutable_bound) + if _lb is not None: + lb = max(value(_lb), lb) + if _ub is not None: + ub = min(value(_ub), ub) + + return lb, ub, vtype + + def _add_variables(self, variables: List[_GeneralVarData]): + self._sol = None + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + lbs = list() + ubs = list() + indices = list() + vtypes = list() + + current_num_vars = len(self._pyomo_var_to_solver_var_map) + for v in variables: + v_id = id(v) + lb, ub, vtype = self._process_domain_and_bounds(v_id) + lbs.append(lb) + ubs.append(ub) + vtypes.append(vtype) + indices.append(current_num_vars) + self._pyomo_var_to_solver_var_map[v_id] = current_num_vars + current_num_vars += 1 + + self._solver_model.addVars( + len(lbs), np.array(lbs, dtype=np.double), np.array(ubs, dtype=np.double) + ) + self._solver_model.changeColsIntegrality( + len(vtypes), np.array(indices), np.array(vtypes) + ) + + def _add_params(self, params: List[_ParamData]): + pass + + def _reinit(self): + saved_config = self.config + saved_options = self.highs_options + saved_update_config = self.update_config + self.__init__(only_child_vars=self._only_child_vars) + self.config = saved_config + self.highs_options = saved_options + self.update_config = saved_update_config + + def set_instance(self, model): + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + if not self.available(): + c = self.__class__ + raise PyomoException( + f'Solver {c.__module__}.{c.__qualname__} is not available ' + f'({self.available()}).' + ) + self._reinit() + self._model = model + if self.use_extensions and cmodel_available: + self._expr_types = cmodel.PyomoExprTypes() + + self._solver_model = highspy.Highs() + self.add_block(model) + if self._objective is None: + self.set_objective(None) + + def _add_constraints(self, cons: List[_GeneralConstraintData]): + self._sol = None + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + current_num_cons = len(self._pyomo_con_to_solver_con_map) + lbs = list() + ubs = list() + starts = list() + var_indices = list() + coef_values = list() + + for con in cons: + repn = generate_standard_repn( + con.body, quadratic=False, compute_values=False + ) + if repn.nonlinear_expr is not None: + raise DegreeError( + f'Highs interface does not support expressions of degree {repn.polynomial_degree()}' + ) + + starts.append(len(coef_values)) + for ndx, coef in enumerate(repn.linear_coefs): + v = repn.linear_vars[ndx] + v_id = id(v) + coef_val = value(coef) + if not is_constant(coef): + mutable_linear_coefficient = _MutableLinearCoefficient( + pyomo_con=con, + pyomo_var_id=v_id, + con_map=self._pyomo_con_to_solver_con_map, + var_map=self._pyomo_var_to_solver_var_map, + expr=coef, + highs=self._solver_model, + ) + if con not in self._mutable_helpers: + self._mutable_helpers[con] = list() + self._mutable_helpers[con].append(mutable_linear_coefficient) + if coef_val == 0: + continue + var_indices.append(self._pyomo_var_to_solver_var_map[v_id]) + coef_values.append(coef_val) + + if con.has_lb(): + lb = con.lower - repn.constant + else: + lb = -highspy.kHighsInf + if con.has_ub(): + ub = con.upper - repn.constant + else: + ub = highspy.kHighsInf + + if not is_constant(lb) or not is_constant(ub): + mutable_con_bounds = _MutableConstraintBounds( + lower_expr=lb, + upper_expr=ub, + pyomo_con=con, + con_map=self._pyomo_con_to_solver_con_map, + highs=self._solver_model, + ) + if con not in self._mutable_helpers: + self._mutable_helpers[con] = [mutable_con_bounds] + else: + self._mutable_helpers[con].append(mutable_con_bounds) + + lbs.append(value(lb)) + ubs.append(value(ub)) + self._pyomo_con_to_solver_con_map[con] = current_num_cons + self._solver_con_to_pyomo_con_map[current_num_cons] = con + current_num_cons += 1 + + self._solver_model.addRows( + len(lbs), + np.array(lbs, dtype=np.double), + np.array(ubs, dtype=np.double), + len(coef_values), + np.array(starts), + np.array(var_indices), + np.array(coef_values, dtype=np.double), + ) + + def _add_sos_constraints(self, cons: List[_SOSConstraintData]): + if cons: + raise NotImplementedError( + 'Highs interface does not support SOS constraints' + ) + + def _remove_constraints(self, cons: List[_GeneralConstraintData]): + self._sol = None + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + indices_to_remove = list() + for con in cons: + con_ndx = self._pyomo_con_to_solver_con_map.pop(con) + del self._solver_con_to_pyomo_con_map[con_ndx] + indices_to_remove.append(con_ndx) + self._mutable_helpers.pop(con, None) + self._solver_model.deleteRows( + len(indices_to_remove), np.array(indices_to_remove) + ) + con_ndx = 0 + new_con_map = dict() + for c in self._pyomo_con_to_solver_con_map.keys(): + new_con_map[c] = con_ndx + con_ndx += 1 + self._pyomo_con_to_solver_con_map = new_con_map + self._solver_con_to_pyomo_con_map = { + v: k for k, v in self._pyomo_con_to_solver_con_map.items() + } + + def _remove_sos_constraints(self, cons: List[_SOSConstraintData]): + if cons: + raise NotImplementedError( + 'Highs interface does not support SOS constraints' + ) + + def _remove_variables(self, variables: List[_GeneralVarData]): + self._sol = None + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + indices_to_remove = list() + for v in variables: + v_id = id(v) + v_ndx = self._pyomo_var_to_solver_var_map.pop(v_id) + indices_to_remove.append(v_ndx) + self._mutable_bounds.pop(v_id, None) + self._solver_model.deleteVars( + len(indices_to_remove), np.array(indices_to_remove) + ) + v_ndx = 0 + new_var_map = dict() + for v_id in self._pyomo_var_to_solver_var_map.keys(): + new_var_map[v_id] = v_ndx + v_ndx += 1 + self._pyomo_var_to_solver_var_map = new_var_map + + def _remove_params(self, params: List[_ParamData]): + pass + + def _update_variables(self, variables: List[_GeneralVarData]): + self._sol = None + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + indices = list() + lbs = list() + ubs = list() + vtypes = list() + + for v in variables: + v_id = id(v) + self._mutable_bounds.pop(v_id, None) + v_ndx = self._pyomo_var_to_solver_var_map[v_id] + lb, ub, vtype = self._process_domain_and_bounds(v_id) + lbs.append(lb) + ubs.append(ub) + vtypes.append(vtype) + indices.append(v_ndx) + + self._solver_model.changeColsBounds( + len(indices), + np.array(indices), + np.array(lbs, dtype=np.double), + np.array(ubs, dtype=np.double), + ) + self._solver_model.changeColsIntegrality( + len(indices), np.array(indices), np.array(vtypes) + ) + + def update_params(self): + self._sol = None + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + for con, helpers in self._mutable_helpers.items(): + for helper in helpers: + helper.update() + for k, (v, helper) in self._mutable_bounds.items(): + helper.update() + for helper in self._objective_helpers: + helper.update() + + def _set_objective(self, obj): + self._sol = None + if self._last_results_object is not None: + self._last_results_object.solution_loader.invalidate() + n = len(self._pyomo_var_to_solver_var_map) + indices = np.arange(n) + costs = np.zeros(n, dtype=np.double) + self._objective_helpers = list() + if obj is None: + sense = highspy.ObjSense.kMinimize + self._solver_model.changeObjectiveOffset(0) + else: + if obj.sense == minimize: + sense = highspy.ObjSense.kMinimize + elif obj.sense == maximize: + sense = highspy.ObjSense.kMaximize + else: + raise ValueError( + 'Objective sense is not recognized: {0}'.format(obj.sense) + ) + + repn = generate_standard_repn( + obj.expr, quadratic=False, compute_values=False + ) + if repn.nonlinear_expr is not None: + raise DegreeError( + f'Highs interface does not support expressions of degree {repn.polynomial_degree()}' + ) + + for coef, v in zip(repn.linear_coefs, repn.linear_vars): + v_id = id(v) + v_ndx = self._pyomo_var_to_solver_var_map[v_id] + costs[v_ndx] = value(coef) + if not is_constant(coef): + mutable_objective_coef = _MutableObjectiveCoefficient( + pyomo_var_id=v_id, + var_map=self._pyomo_var_to_solver_var_map, + expr=coef, + highs=self._solver_model, + ) + self._objective_helpers.append(mutable_objective_coef) + + self._solver_model.changeObjectiveOffset(value(repn.constant)) + if not is_constant(repn.constant): + mutable_objective_offset = _MutableObjectiveOffset( + expr=repn.constant, highs=self._solver_model + ) + self._objective_helpers.append(mutable_objective_offset) + + self._solver_model.changeObjectiveSense(sense) + self._solver_model.changeColsCost(n, indices, costs) + + def _postsolve(self, timer: HierarchicalTimer): + config = self.config + + highs = self._solver_model + status = highs.getModelStatus() + + results = HighsResults(self) + results.wallclock_time = highs.getRunTime() + + if status == highspy.HighsModelStatus.kNotset: + results.termination_condition = TerminationCondition.unknown + elif status == highspy.HighsModelStatus.kLoadError: + results.termination_condition = TerminationCondition.error + elif status == highspy.HighsModelStatus.kModelError: + results.termination_condition = TerminationCondition.error + elif status == highspy.HighsModelStatus.kPresolveError: + results.termination_condition = TerminationCondition.error + elif status == highspy.HighsModelStatus.kSolveError: + results.termination_condition = TerminationCondition.error + elif status == highspy.HighsModelStatus.kPostsolveError: + results.termination_condition = TerminationCondition.error + elif status == highspy.HighsModelStatus.kModelEmpty: + results.termination_condition = TerminationCondition.unknown + elif status == highspy.HighsModelStatus.kOptimal: + results.termination_condition = TerminationCondition.optimal + elif status == highspy.HighsModelStatus.kInfeasible: + results.termination_condition = TerminationCondition.infeasible + elif status == highspy.HighsModelStatus.kUnboundedOrInfeasible: + results.termination_condition = TerminationCondition.infeasibleOrUnbounded + elif status == highspy.HighsModelStatus.kUnbounded: + results.termination_condition = TerminationCondition.unbounded + elif status == highspy.HighsModelStatus.kObjectiveBound: + results.termination_condition = TerminationCondition.objectiveLimit + elif status == highspy.HighsModelStatus.kObjectiveTarget: + results.termination_condition = TerminationCondition.objectiveLimit + elif status == highspy.HighsModelStatus.kTimeLimit: + results.termination_condition = TerminationCondition.maxTimeLimit + elif status == highspy.HighsModelStatus.kIterationLimit: + results.termination_condition = TerminationCondition.maxIterations + elif status == highspy.HighsModelStatus.kUnknown: + results.termination_condition = TerminationCondition.unknown + else: + results.termination_condition = TerminationCondition.unknown + + timer.start('load solution') + self._sol = highs.getSolution() + has_feasible_solution = False + if results.termination_condition == TerminationCondition.optimal: + has_feasible_solution = True + elif results.termination_condition in { + TerminationCondition.objectiveLimit, + TerminationCondition.maxIterations, + TerminationCondition.maxTimeLimit, + }: + if self._sol.value_valid: + has_feasible_solution = True + + if config.load_solution: + if has_feasible_solution: + if results.termination_condition != TerminationCondition.optimal: + logger.warning( + 'Loading a feasible but suboptimal solution. ' + 'Please set load_solution=False and check ' + 'results.termination_condition and ' + 'results.found_feasible_solution() before loading a solution.' + ) + self.load_vars() + else: + raise RuntimeError( + 'A feasible solution was not found, so no solution can be loaded.' + 'Please set opt.config.load_solution=False and check ' + 'results.termination_condition and ' + 'results.best_feasible_objective before loading a solution.' + ) + timer.stop('load solution') + + info = highs.getInfo() + results.best_objective_bound = None + results.best_feasible_objective = None + if self._objective is not None: + if has_feasible_solution: + results.best_feasible_objective = info.objective_function_value + if info.mip_node_count == -1: + if has_feasible_solution: + results.best_objective_bound = info.objective_function_value + else: + results.best_objective_bound = None + else: + results.best_objective_bound = info.mip_dual_bound + + return results + + def load_vars(self, vars_to_load=None): + for v, val in self.get_primals(vars_to_load=vars_to_load).items(): + v.set_value(val, skip_validation=True) + StaleFlagManager.mark_all_as_stale(delayed=True) + + def get_primals(self, vars_to_load=None, solution_number=0): + if self._sol is None or not self._sol.value_valid: + raise RuntimeError( + 'Solver does not currently have a valid solution. Please ' + 'check the termination condition.' + ) + + res = ComponentMap() + if vars_to_load is None: + var_ids_to_load = list() + for v, ref_info in self._referenced_variables.items(): + using_cons, using_sos, using_obj = ref_info + if using_cons or using_sos or (using_obj is not None): + var_ids_to_load.append(v) + else: + var_ids_to_load = [id(v) for v in vars_to_load] + + var_vals = self._sol.col_value + + for v_id in var_ids_to_load: + v = self._vars[v_id][0] + v_ndx = self._pyomo_var_to_solver_var_map[v_id] + res[v] = var_vals[v_ndx] + + return res + + def get_reduced_costs(self, vars_to_load=None): + if self._sol is None or not self._sol.dual_valid: + raise RuntimeError( + 'Solver does not currently have valid reduced costs. Please ' + 'check the termination condition.' + ) + res = ComponentMap() + if vars_to_load is None: + var_ids_to_load = list(self._vars.keys()) + else: + var_ids_to_load = [id(v) for v in vars_to_load] + + var_vals = self._sol.col_dual + + for v_id in var_ids_to_load: + v = self._vars[v_id][0] + v_ndx = self._pyomo_var_to_solver_var_map[v_id] + res[v] = var_vals[v_ndx] + + return res + + def get_duals(self, cons_to_load=None): + if self._sol is None or not self._sol.dual_valid: + raise RuntimeError( + 'Solver does not currently have valid duals. Please ' + 'check the termination condition.' + ) + + res = dict() + if cons_to_load is None: + cons_to_load = list(self._pyomo_con_to_solver_con_map.keys()) + + duals = self._sol.row_dual + + for c in cons_to_load: + c_ndx = self._pyomo_con_to_solver_con_map[c] + res[c] = duals[c_ndx] + + return res + + def get_slacks(self, cons_to_load=None): + if self._sol is None or not self._sol.value_valid: + raise RuntimeError( + 'Solver does not currently have valid slacks. Please ' + 'check the termination condition.' + ) + + res = dict() + if cons_to_load is None: + cons_to_load = list(self._pyomo_con_to_solver_con_map.keys()) + + slacks = self._sol.row_value + + for c in cons_to_load: + c_ndx = self._pyomo_con_to_solver_con_map[c] + res[c] = slacks[c_ndx] + + return res diff --git a/pyomo/contrib/appsi/solvers/ipopt.py b/pyomo/contrib/appsi/solvers/ipopt.py index 1e36eaf1a69..d38a836a2ac 100644 --- a/pyomo/contrib/appsi/solvers/ipopt.py +++ b/pyomo/contrib/appsi/solvers/ipopt.py @@ -1,6 +1,12 @@ from pyomo.common.tempfiles import TempfileManager from pyomo.common.fileutils import Executable -from pyomo.contrib.appsi.base import PersistentSolver, Results, TerminationCondition, SolverConfig, PersistentSolutionLoader +from pyomo.contrib.appsi.base import ( + PersistentSolver, + Results, + TerminationCondition, + SolverConfig, + PersistentSolutionLoader, +) from pyomo.contrib.appsi.writers import NLWriter from pyomo.common.log import LogStream import logging @@ -31,17 +37,21 @@ class IpoptConfig(SolverConfig): - def __init__(self, - description=None, - doc=None, - implicit=False, - implicit_domain=None, - visibility=0): - super(IpoptConfig, self).__init__(description=description, - doc=doc, - implicit=implicit, - implicit_domain=implicit_domain, - visibility=visibility) + def __init__( + self, + description=None, + doc=None, + implicit=False, + implicit_domain=None, + visibility=0, + ): + super(IpoptConfig, self).__init__( + description=description, + doc=doc, + implicit=implicit, + implicit_domain=implicit_domain, + visibility=visibility, + ) self.declare('executable', ConfigValue()) self.declare('filename', ConfigValue(domain=str)) @@ -56,66 +66,68 @@ def __init__(self, self.log_level = logging.INFO -ipopt_command_line_options = {'acceptable_compl_inf_tol', - 'acceptable_constr_viol_tol', - 'acceptable_dual_inf_tol', - 'acceptable_tol', - 'alpha_for_y', - 'bound_frac', - 'bound_mult_init_val', - 'bound_push', - 'bound_relax_factor', - 'compl_inf_tol', - 'constr_mult_init_max', - 'constr_viol_tol', - 'diverging_iterates_tol', - 'dual_inf_tol', - 'expect_infeasible_problem', - 'file_print_level', - 'halt_on_ampl_error', - 'hessian_approximation', - 'honor_original_bounds', - 'linear_scaling_on_demand', - 'linear_solver', - 'linear_system_scaling', - 'ma27_pivtol', - 'ma27_pivtolmax', - 'ma57_pivot_order', - 'ma57_pivtol', - 'ma57_pivtolmax', - 'max_cpu_time', - 'max_iter', - 'max_refinement_steps', - 'max_soc', - 'maxit', - 'min_refinement_steps', - 'mu_init', - 'mu_max', - 'mu_oracle', - 'mu_strategy', - 'nlp_scaling_max_gradient', - 'nlp_scaling_method', - 'obj_scaling_factor', - 'option_file_name', - 'outlev', - 'output_file', - 'pardiso_matching_strategy', - 'print_level', - 'print_options_documentation', - 'print_user_options', - 'required_infeasibility_reduction', - 'slack_bound_frac', - 'slack_bound_push', - 'tol', - 'wantsol', - 'warm_start_bound_push', - 'warm_start_init_point', - 'warm_start_mult_bound_push', - 'watchdog_shortened_iter_trigger'} +ipopt_command_line_options = { + 'acceptable_compl_inf_tol', + 'acceptable_constr_viol_tol', + 'acceptable_dual_inf_tol', + 'acceptable_tol', + 'alpha_for_y', + 'bound_frac', + 'bound_mult_init_val', + 'bound_push', + 'bound_relax_factor', + 'compl_inf_tol', + 'constr_mult_init_max', + 'constr_viol_tol', + 'diverging_iterates_tol', + 'dual_inf_tol', + 'expect_infeasible_problem', + 'file_print_level', + 'halt_on_ampl_error', + 'hessian_approximation', + 'honor_original_bounds', + 'linear_scaling_on_demand', + 'linear_solver', + 'linear_system_scaling', + 'ma27_pivtol', + 'ma27_pivtolmax', + 'ma57_pivot_order', + 'ma57_pivtol', + 'ma57_pivtolmax', + 'max_cpu_time', + 'max_iter', + 'max_refinement_steps', + 'max_soc', + 'maxit', + 'min_refinement_steps', + 'mu_init', + 'mu_max', + 'mu_oracle', + 'mu_strategy', + 'nlp_scaling_max_gradient', + 'nlp_scaling_method', + 'obj_scaling_factor', + 'option_file_name', + 'outlev', + 'output_file', + 'pardiso_matching_strategy', + 'print_level', + 'print_options_documentation', + 'print_user_options', + 'required_infeasibility_reduction', + 'slack_bound_frac', + 'slack_bound_push', + 'tol', + 'wantsol', + 'warm_start_bound_push', + 'warm_start_init_point', + 'warm_start_mult_bound_push', + 'watchdog_shortened_iter_trigger', +} class Ipopt(PersistentSolver): - def __init__(self, only_child_vars=True): + def __init__(self, only_child_vars=False): self._config = IpoptConfig() self._solver_options = dict() self._writer = NLWriter(only_child_vars=only_child_vars) @@ -133,11 +145,13 @@ def available(self): return self.Availability.FullLicense def version(self): - results = subprocess.run([str(self.config.executable), '--version'], - timeout=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [str(self.config.executable), '--version'], + timeout=1, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) version = results.stdout.splitlines()[0] version = version.split(' ')[1] version = version.strip() @@ -199,6 +213,7 @@ def symbol_map(self): return self._writer.symbol_map def set_instance(self, model): + self._writer.config.symbolic_solver_labels = self.config.symbolic_solver_labels self._writer.set_instance(model) def add_variables(self, variables: List[_GeneralVarData]): @@ -262,7 +277,7 @@ def solve(self, model, timer: HierarchicalTimer = None): TempfileManager.add_tempfile(self._filename + '.opt', exists=False) self._write_options_file() timer.start('write nl file') - self._writer.write(model, self._filename+'.nl', timer=timer) + self._writer.write(model, self._filename + '.nl', timer=timer) timer.stop('write nl file') res = self._apply_solver(timer) self._last_results_object = res @@ -302,20 +317,35 @@ def _parse_sol(self): n_cons = len(solve_cons) n_vars = len(solve_vars) - dual_lines = all_lines[12:12+n_cons] - primal_lines = all_lines[12+n_cons:12+n_cons+n_vars] + dual_lines = all_lines[12 : 12 + n_cons] + primal_lines = all_lines[12 + n_cons : 12 + n_cons + n_vars] - rc_upper_info_line = all_lines[12+n_cons+n_vars+1] + rc_upper_info_line = all_lines[12 + n_cons + n_vars + 1] assert rc_upper_info_line.startswith('suffix') n_rc_upper = int(rc_upper_info_line.split()[2]) - assert 'ipopt_zU_out' in all_lines[12+n_cons+n_vars+2] - upper_rc_lines = all_lines[12+n_cons+n_vars+3:12+n_cons+n_vars+3+n_rc_upper] + assert 'ipopt_zU_out' in all_lines[12 + n_cons + n_vars + 2] + upper_rc_lines = all_lines[ + 12 + n_cons + n_vars + 3 : 12 + n_cons + n_vars + 3 + n_rc_upper + ] - rc_lower_info_line = all_lines[12+n_cons+n_vars+3+n_rc_upper] + rc_lower_info_line = all_lines[12 + n_cons + n_vars + 3 + n_rc_upper] assert rc_lower_info_line.startswith('suffix') n_rc_lower = int(rc_lower_info_line.split()[2]) - assert 'ipopt_zL_out' in all_lines[12+n_cons+n_vars+3+n_rc_upper+1] - lower_rc_lines = all_lines[12+n_cons+n_vars+3+n_rc_upper+2:12+n_cons+n_vars+3+n_rc_upper+2+n_rc_lower] + assert 'ipopt_zL_out' in all_lines[12 + n_cons + n_vars + 3 + n_rc_upper + 1] + lower_rc_lines = all_lines[ + 12 + + n_cons + + n_vars + + 3 + + n_rc_upper + + 2 : 12 + + n_cons + + n_vars + + 3 + + n_rc_upper + + 2 + + n_rc_lower + ] self._dual_sol = dict() self._primal_sol = ComponentMap() @@ -353,30 +383,38 @@ def _parse_sol(self): if var not in self._reduced_costs: self._reduced_costs[var] = 0 - if results.termination_condition == TerminationCondition.optimal and self.config.load_solution: + if ( + results.termination_condition == TerminationCondition.optimal + and self.config.load_solution + ): for v, val in self._primal_sol.items(): v.set_value(val, skip_validation=True) - if self._writer.get_active_objective() is None: results.best_feasible_objective = None else: - results.best_feasible_objective = value(self._writer.get_active_objective().expr) + results.best_feasible_objective = value( + self._writer.get_active_objective().expr + ) elif results.termination_condition == TerminationCondition.optimal: if self._writer.get_active_objective() is None: results.best_feasible_objective = None else: - obj_expr_evaluated = replace_expressions(self._writer.get_active_objective().expr, - substitution_map={id(v): val for v, val in self._primal_sol.items()}, - descend_into_named_expressions=True, - remove_named_expressions=True) + obj_expr_evaluated = replace_expressions( + self._writer.get_active_objective().expr, + substitution_map={ + id(v): val for v, val in self._primal_sol.items() + }, + descend_into_named_expressions=True, + remove_named_expressions=True, + ) results.best_feasible_objective = value(obj_expr_evaluated) elif self.config.load_solution: - raise RuntimeError('A feasible solution was not found, so no solution can be loaded.' - 'Please set opt.config.load_solution=False and check ' - 'results.termination_condition and ' - 'resutls.best_feasible_objective before loading a solution.') - - results.solution_loader = PersistentSolutionLoader(solver=self) + raise RuntimeError( + 'A feasible solution was not found, so no solution can be loaded.' + 'Please set opt.config.load_solution=False and check ' + 'results.termination_condition and ' + 'results.best_feasible_objective before loading a solution.' + ) return results @@ -384,21 +422,29 @@ def _apply_solver(self, timer: HierarchicalTimer): config = self.config if config.time_limit is not None: - timeout = config.time_limit + min(max(1, 0.01 * config.time_limit), 100) + timeout = config.time_limit + min(max(1.0, 0.01 * config.time_limit), 100) else: timeout = None - ostreams = [LogStream(level=self.config.log_level, logger=self.config.solver_output_logger)] + ostreams = [ + LogStream( + level=self.config.log_level, logger=self.config.solver_output_logger + ) + ] if self.config.stream_solver: ostreams.append(sys.stdout) - cmd = [str(config.executable), - self._filename + '.nl', - '-AMPL', - 'option_file_name=' + self._filename + '.opt'] + cmd = [ + str(config.executable), + self._filename + '.nl', + '-AMPL', + 'option_file_name=' + self._filename + '.opt', + ] if 'option_file_name' in self.ipopt_options: - raise ValueError('Use Ipopt.config.filename to specify the name of the options file. ' - 'Do not use Ipopt.ipopt_options["option_file_name"].') + raise ValueError( + 'Use Ipopt.config.filename to specify the name of the options file. ' + 'Do not use Ipopt.ipopt_options["option_file_name"].' + ) ipopt_options = dict(self.ipopt_options) if config.time_limit is not None: ipopt_options['max_cpu_time'] = config.time_limit @@ -407,29 +453,35 @@ def _apply_solver(self, timer: HierarchicalTimer): env = os.environ.copy() if 'PYOMO_AMPLFUNC' in env: - env['AMPLFUNC'] = "\n".join(filter(None, (env.get('AMPLFUNC', None), env.get('PYOMO_AMPLFUNC', None)))) + env['AMPLFUNC'] = "\n".join( + filter( + None, (env.get('AMPLFUNC', None), env.get('PYOMO_AMPLFUNC', None)) + ) + ) with TeeStream(*ostreams) as t: timer.start('subprocess') - cp = subprocess.run(cmd, - timeout=timeout, - stdout=t.STDOUT, - stderr=t.STDERR, - env=env, - universal_newlines=True) + cp = subprocess.run( + cmd, + timeout=timeout, + stdout=t.STDOUT, + stderr=t.STDERR, + env=env, + universal_newlines=True, + ) timer.stop('subprocess') if cp.returncode != 0: if self.config.load_solution: - raise RuntimeError('A feasible solution was not found, so no solution can be loaded.' - 'Please set opt.config.load_solution=False and check ' - 'results.termination_condition and ' - 'results.best_feasible_objective before loading a solution.') + raise RuntimeError( + 'A feasible solution was not found, so no solution can be loaded.' + 'Please set opt.config.load_solution=False and check ' + 'results.termination_condition and ' + 'results.best_feasible_objective before loading a solution.' + ) results = Results() results.termination_condition = TerminationCondition.error results.best_feasible_objective = None - self._primal_sol = None - self._dual_sol = None else: timer.start('parse solution') results = self._parse_sol() @@ -443,9 +495,22 @@ def _apply_solver(self, timer: HierarchicalTimer): else: results.best_objective_bound = math.inf + results.solution_loader = PersistentSolutionLoader(solver=self) + return results - def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_primals( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if ( + self._last_results_object is None + or self._last_results_object.best_feasible_objective is None + ): + raise RuntimeError( + 'Solver does not currently have a valid solution. Please ' + 'check the termination condition.' + ) + res = ComponentMap() if vars_to_load is None: for v, val in self._primal_sol.items(): @@ -455,13 +520,37 @@ def get_primals(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) res[v] = self._primal_sol[v] return res - def get_duals(self, cons_to_load = None): + def get_duals( + self, cons_to_load: Optional[Sequence[_GeneralConstraintData]] = None + ): + if ( + self._last_results_object is None + or self._last_results_object.termination_condition + != TerminationCondition.optimal + ): + raise RuntimeError( + 'Solver does not currently have valid duals. Please ' + 'check the termination condition.' + ) + if cons_to_load is None: return {k: v for k, v in self._dual_sol.items()} else: return {c: self._dual_sol[c] for c in cons_to_load} - def get_reduced_costs(self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None) -> Mapping[_GeneralVarData, float]: + def get_reduced_costs( + self, vars_to_load: Optional[Sequence[_GeneralVarData]] = None + ) -> Mapping[_GeneralVarData, float]: + if ( + self._last_results_object is None + or self._last_results_object.termination_condition + != TerminationCondition.optimal + ): + raise RuntimeError( + 'Solver does not currently have valid reduced costs. Please ' + 'check the termination condition.' + ) + if vars_to_load is None: return ComponentMap((k, v) for k, v in self._reduced_costs.items()) else: diff --git a/pyomo/contrib/appsi/solvers/tests/test_gurobi_persistent.py b/pyomo/contrib/appsi/solvers/tests/test_gurobi_persistent.py index 94cfb018e85..b032f5c827e 100644 --- a/pyomo/contrib/appsi/solvers/tests/test_gurobi_persistent.py +++ b/pyomo/contrib/appsi/solvers/tests/test_gurobi_persistent.py @@ -15,91 +15,103 @@ def create_pmedian_model(): - d_dict = {(1, 1): 1.777356642700564, - (1, 2): 1.6698255595592497, - (1, 3): 1.099139603924817, - (1, 4): 1.3529705111901453, - (1, 5): 1.467907742900842, - (1, 6): 1.5346837414708774, - (2, 1): 1.9783090609123972, - (2, 2): 1.130315350158659, - (2, 3): 1.6712434682302661, - (2, 4): 1.3642294159473756, - (2, 5): 1.4888357071619858, - (2, 6): 1.2030122107340537, - (3, 1): 1.6661983755713592, - (3, 2): 1.227663031206932, - (3, 3): 1.4580640582967632, - (3, 4): 1.0407223975549575, - (3, 5): 1.9742897953778287, - (3, 6): 1.4874760742689066, - (4, 1): 1.4616138636373597, - (4, 2): 1.7141471558082002, - (4, 3): 1.4157281494999725, - (4, 4): 1.888011688001529, - (4, 5): 1.0232934487237717, - (4, 6): 1.8335062677845464, - (5, 1): 1.468494740997508, - (5, 2): 1.8114798126442795, - (5, 3): 1.9455914886158723, - (5, 4): 1.983088378194899, - (5, 5): 1.1761820755785306, - (5, 6): 1.698655759576308, - (6, 1): 1.108855711312383, - (6, 2): 1.1602637342062019, - (6, 3): 1.0928602740245892, - (6, 4): 1.3140620798928404, - (6, 5): 1.0165386843386672, - (6, 6): 1.854049125736362, - (7, 1): 1.2910160386456968, - (7, 2): 1.7800475863350327, - (7, 3): 1.5480965161255695, - (7, 4): 1.1943306766997612, - (7, 5): 1.2920382721805297, - (7, 6): 1.3194527773994338, - (8, 1): 1.6585982235379078, - (8, 2): 1.2315210354122292, - (8, 3): 1.6194303369953538, - (8, 4): 1.8953386098022103, - (8, 5): 1.8694342085696831, - (8, 6): 1.2938069356684523, - (9, 1): 1.4582048085805495, - (9, 2): 1.484979797871119, - (9, 3): 1.2803882693587225, - (9, 4): 1.3289569463506004, - (9, 5): 1.9842424240265042, - (9, 6): 1.0119441379208745, - (10, 1): 1.1429007682932852, - (10, 2): 1.6519772165446711, - (10, 3): 1.0749931799469326, - (10, 4): 1.2920787022811089, - (10, 5): 1.7934429721917704, - (10, 6): 1.9115931008709737} + d_dict = { + (1, 1): 1.777356642700564, + (1, 2): 1.6698255595592497, + (1, 3): 1.099139603924817, + (1, 4): 1.3529705111901453, + (1, 5): 1.467907742900842, + (1, 6): 1.5346837414708774, + (2, 1): 1.9783090609123972, + (2, 2): 1.130315350158659, + (2, 3): 1.6712434682302661, + (2, 4): 1.3642294159473756, + (2, 5): 1.4888357071619858, + (2, 6): 1.2030122107340537, + (3, 1): 1.6661983755713592, + (3, 2): 1.227663031206932, + (3, 3): 1.4580640582967632, + (3, 4): 1.0407223975549575, + (3, 5): 1.9742897953778287, + (3, 6): 1.4874760742689066, + (4, 1): 1.4616138636373597, + (4, 2): 1.7141471558082002, + (4, 3): 1.4157281494999725, + (4, 4): 1.888011688001529, + (4, 5): 1.0232934487237717, + (4, 6): 1.8335062677845464, + (5, 1): 1.468494740997508, + (5, 2): 1.8114798126442795, + (5, 3): 1.9455914886158723, + (5, 4): 1.983088378194899, + (5, 5): 1.1761820755785306, + (5, 6): 1.698655759576308, + (6, 1): 1.108855711312383, + (6, 2): 1.1602637342062019, + (6, 3): 1.0928602740245892, + (6, 4): 1.3140620798928404, + (6, 5): 1.0165386843386672, + (6, 6): 1.854049125736362, + (7, 1): 1.2910160386456968, + (7, 2): 1.7800475863350327, + (7, 3): 1.5480965161255695, + (7, 4): 1.1943306766997612, + (7, 5): 1.2920382721805297, + (7, 6): 1.3194527773994338, + (8, 1): 1.6585982235379078, + (8, 2): 1.2315210354122292, + (8, 3): 1.6194303369953538, + (8, 4): 1.8953386098022103, + (8, 5): 1.8694342085696831, + (8, 6): 1.2938069356684523, + (9, 1): 1.4582048085805495, + (9, 2): 1.484979797871119, + (9, 3): 1.2803882693587225, + (9, 4): 1.3289569463506004, + (9, 5): 1.9842424240265042, + (9, 6): 1.0119441379208745, + (10, 1): 1.1429007682932852, + (10, 2): 1.6519772165446711, + (10, 3): 1.0749931799469326, + (10, 4): 1.2920787022811089, + (10, 5): 1.7934429721917704, + (10, 6): 1.9115931008709737, + } model = pe.ConcreteModel() model.N = pe.Param(initialize=10) - model.Locations = pe.RangeSet(1,model.N) + model.Locations = pe.RangeSet(1, model.N) model.P = pe.Param(initialize=3) model.M = pe.Param(initialize=6) - model.Customers = pe.RangeSet(1,model.M) - model.d = pe.Param(model.Locations, model.Customers, initialize=d_dict, within=pe.Reals) - model.x = pe.Var(model.Locations, model.Customers, bounds=(0.0,1.0)) + model.Customers = pe.RangeSet(1, model.M) + model.d = pe.Param( + model.Locations, model.Customers, initialize=d_dict, within=pe.Reals + ) + model.x = pe.Var(model.Locations, model.Customers, bounds=(0.0, 1.0)) model.y = pe.Var(model.Locations, within=pe.Binary) def rule(model): - return sum( model.d[n,m]*model.x[n,m] for n in model.Locations for m in model.Customers ) + return sum( + model.d[n, m] * model.x[n, m] + for n in model.Locations + for m in model.Customers + ) + model.obj = pe.Objective(rule=rule) def rule(model, m): - return (sum( model.x[n,m] for n in model.Locations ), 1.0) + return (sum(model.x[n, m] for n in model.Locations), 1.0) + model.single_x = pe.Constraint(model.Customers, rule=rule) - def rule(model, n,m): - return (None, model.x[n,m] - model.y[n], 0.0) + def rule(model, n, m): + return (None, model.x[n, m] - model.y[n], 0.0) + model.bound_y = pe.Constraint(model.Locations, model.Customers, rule=rule) def rule(model): - return (sum( model.y[n] for n in model.Locations ) - model.P, 0.0) + return (sum(model.y[n] for n in model.Locations) - model.P, 0.0) + model.num_facilities = pe.Constraint(rule=rule) return model @@ -128,10 +140,8 @@ def get_solution(self): p2 = self.m.p2.value p3 = self.m.p3.value p4 = self.m.p4.value - A = np.array([[1, -p1], - [1, -p3]]) - rhs = np.array([p2, - p4]) + A = np.array([[1, -p1], [1, -p3]]) + rhs = np.array([p2, p4]) sol = np.linalg.solve(A, rhs) x = float(sol[1]) y = float(sol[0]) @@ -167,20 +177,47 @@ def test_lp(self): self.assertAlmostEqual(x, self.m.x.value) self.assertAlmostEqual(y, self.m.y.value) - def test_set_instance_not_available(self): - _avail = Gurobi._available - try: - Gurobi._available = Gurobi.Availability.NeedsCompiledExtension - with self.assertRaisesRegex( - PyomoException, - r'Solver pyomo.contrib.appsi.solvers.gurobi.Gurobi ' - r'is not available \(NeedsCompiledExtension\).'): - opt.set_instance(pe.ConcreteModel()) - finally: - Gurobi._available = _avail - class TestGurobiPersistent(unittest.TestCase): + def test_nonconvex_qcp_objective_bound_1(self): + # the goal of this test is to ensure we can get an objective bound + # for nonconvex but continuous problems even if a feasible solution + # is not found + # + # This is a fragile test because it could fail if Gurobi's algorithms improve + # (e.g., a heuristic solution is found before an objective bound of -8 is reached + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(-5, 5)) + m.y = pe.Var(bounds=(-5, 5)) + m.obj = pe.Objective(expr=-m.x**2 - m.y) + m.c1 = pe.Constraint(expr=m.y <= -2 * m.x + 1) + m.c2 = pe.Constraint(expr=m.y <= m.x - 2) + opt = Gurobi() + opt.gurobi_options['nonconvex'] = 2 + opt.gurobi_options['BestBdStop'] = -8 + opt.config.load_solution = False + res = opt.solve(m) + self.assertEqual(res.best_feasible_objective, None) + self.assertAlmostEqual(res.best_objective_bound, -8) + + def test_nonconvex_qcp_objective_bound_2(self): + # the goal of this test is to ensure we can best_objective_bound properly + # for nonconvex but continuous problems when the solver terminates with a nonzero gap + # + # This is a fragile test because it could fail if Gurobi's algorithms change + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(-5, 5)) + m.y = pe.Var(bounds=(-5, 5)) + m.obj = pe.Objective(expr=-m.x**2 - m.y) + m.c1 = pe.Constraint(expr=m.y <= -2 * m.x + 1) + m.c2 = pe.Constraint(expr=m.y <= m.x - 2) + opt = Gurobi() + opt.gurobi_options['nonconvex'] = 2 + opt.gurobi_options['MIPGap'] = 0.5 + res = opt.solve(m) + self.assertAlmostEqual(res.best_feasible_objective, -4) + self.assertAlmostEqual(res.best_objective_bound, -6) + def test_range_constraints(self): m = pe.ConcreteModel() m.x = pe.Var() @@ -216,19 +253,23 @@ def test_quadratic_constraint_with_params(self): m.x = pe.Var() m.y = pe.Var() m.obj = pe.Objective(expr=m.y) - m.con = pe.Constraint(expr=m.y >= m.a*m.x**2 + m.b*m.x + m.c) + m.con = pe.Constraint(expr=m.y >= m.a * m.x**2 + m.b * m.x + m.c) opt = Gurobi() res = opt.solve(m) self.assertAlmostEqual(m.x.value, -m.b.value / (2 * m.a.value)) - self.assertAlmostEqual(m.y.value, m.a.value * m.x.value ** 2 + m.b.value * m.x.value + m.c.value) + self.assertAlmostEqual( + m.y.value, m.a.value * m.x.value**2 + m.b.value * m.x.value + m.c.value + ) m.a.value = 2 m.b.value = 4 m.c.value = -1 res = opt.solve(m) self.assertAlmostEqual(m.x.value, -m.b.value / (2 * m.a.value)) - self.assertAlmostEqual(m.y.value, m.a.value * m.x.value ** 2 + m.b.value * m.x.value + m.c.value) + self.assertAlmostEqual( + m.y.value, m.a.value * m.x.value**2 + m.b.value * m.x.value + m.c.value + ) def test_quadratic_objective(self): m = pe.ConcreteModel() @@ -236,21 +277,25 @@ def test_quadratic_objective(self): m.b = pe.Param(initialize=1, mutable=True) m.c = pe.Param(initialize=1, mutable=True) m.x = pe.Var() - m.obj = pe.Objective(expr=m.a*m.x**2 + m.b*m.x + m.c) + m.obj = pe.Objective(expr=m.a * m.x**2 + m.b * m.x + m.c) opt = Gurobi() res = opt.solve(m) self.assertAlmostEqual(m.x.value, -m.b.value / (2 * m.a.value)) - self.assertAlmostEqual(res.best_feasible_objective, - m.a.value * m.x.value ** 2 + m.b.value * m.x.value + m.c.value) + self.assertAlmostEqual( + res.best_feasible_objective, + m.a.value * m.x.value**2 + m.b.value * m.x.value + m.c.value, + ) m.a.value = 2 m.b.value = 4 m.c.value = -1 res = opt.solve(m) self.assertAlmostEqual(m.x.value, -m.b.value / (2 * m.a.value)) - self.assertAlmostEqual(res.best_feasible_objective, - m.a.value * m.x.value ** 2 + m.b.value * m.x.value + m.c.value) + self.assertAlmostEqual( + res.best_feasible_objective, + m.a.value * m.x.value**2 + m.b.value * m.x.value + m.c.value, + ) def test_var_bounds(self): m = pe.ConcreteModel() @@ -284,7 +329,7 @@ def test_fixed_var(self): m.x = pe.Var() m.y = pe.Var() m.obj = pe.Objective(expr=m.y) - m.con = pe.Constraint(expr=m.y >= m.a*m.x**2 + m.b*m.x + m.c) + m.con = pe.Constraint(expr=m.y >= m.a * m.x**2 + m.b * m.x + m.c) m.x.fix(1) opt = Gurobi() @@ -300,7 +345,9 @@ def test_fixed_var(self): m.x.unfix() res = opt.solve(m) self.assertAlmostEqual(m.x.value, -m.b.value / (2 * m.a.value)) - self.assertAlmostEqual(m.y.value, m.a.value * m.x.value ** 2 + m.b.value * m.x.value + m.c.value) + self.assertAlmostEqual( + m.y.value, m.a.value * m.x.value**2 + m.b.value * m.x.value + m.c.value + ) def test_linear_constraint_attr(self): m = pe.ConcreteModel() @@ -326,6 +373,7 @@ def test_quadratic_constraint_attr(self): def test_var_attr(self): m = pe.ConcreteModel() m.x = pe.Var(within=pe.Binary) + m.obj = pe.Objective(expr=m.x) opt = Gurobi() opt.set_instance(m) @@ -336,12 +384,12 @@ def test_callback(self): m = pe.ConcreteModel() m.x = pe.Var(bounds=(0, 4)) m.y = pe.Var(within=pe.Integers, bounds=(0, None)) - m.obj = pe.Objective(expr=2*m.x + m.y) + m.obj = pe.Objective(expr=2 * m.x + m.y) m.cons = pe.ConstraintList() def _add_cut(xval): m.x.value = xval - return m.cons.add(m.y >= taylor_series_expansion((m.x - 2)**2)) + return m.cons.add(m.y >= taylor_series_expansion((m.x - 2) ** 2)) _add_cut(0) _add_cut(4) @@ -354,7 +402,7 @@ def _add_cut(xval): def _my_callback(cb_m, cb_opt, cb_where): if cb_where == gurobipy.GRB.Callback.MIPSOL: cb_opt.cbGetSolution(vars=[m.x, m.y]) - if m.y.value < (m.x.value - 2)**2 - 1e-6: + if m.y.value < (m.x.value - 2) ** 2 - 1e-6: cb_opt.cbLazy(_add_cut(m.x.value)) opt.set_callback(_my_callback) @@ -369,7 +417,7 @@ def test_nonconvex(self): m.x = pe.Var() m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) - m.c = pe.Constraint(expr=m.y == (m.x-1)**2 - 2) + m.c = pe.Constraint(expr=m.y == (m.x - 1) ** 2 - 2) opt = Gurobi() opt.gurobi_options['nonconvex'] = 2 opt.solve(m) @@ -383,8 +431,8 @@ def test_nonconvex2(self): m.x = pe.Var() m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) - m.c1 = pe.Constraint(expr=0 <= -m.y + (m.x-1)**2 - 2) - m.c2 = pe.Constraint(expr=0 >= -m.y + (m.x-1)**2 - 2) + m.c1 = pe.Constraint(expr=0 <= -m.y + (m.x - 1) ** 2 - 2) + m.c2 = pe.Constraint(expr=0 >= -m.y + (m.x - 1) ** 2 - 2) opt = Gurobi() opt.gurobi_options['nonconvex'] = 2 opt.solve(m) @@ -440,7 +488,7 @@ def test_basics(self): m.x = pe.Var(bounds=(-10, 10)) m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) - m.c1 = pe.Constraint(expr=m.y >= 2*m.x + 1) + m.c1 = pe.Constraint(expr=m.y >= 2 * m.x + 1) opt = self.opt opt.set_instance(m) @@ -603,7 +651,7 @@ def test_update4(self): def test_update5(self): m = pe.ConcreteModel() - m.a = pe.Set(initialize=[1,2,3], ordered=True) + m.a = pe.Set(initialize=[1, 2, 3], ordered=True) m.x = pe.Var(m.a, within=pe.Binary) m.y = pe.Var(within=pe.Binary) m.obj = pe.Objective(expr=m.y) @@ -624,7 +672,7 @@ def test_update5(self): def test_update6(self): m = pe.ConcreteModel() - m.a = pe.Set(initialize=[1,2,3], ordered=True) + m.a = pe.Set(initialize=[1, 2, 3], ordered=True) m.x = pe.Var(m.a, within=pe.Binary) m.y = pe.Var(within=pe.Binary) m.obj = pe.Objective(expr=m.y) @@ -647,6 +695,8 @@ def test_update7(self): m.y = pe.Var() opt = self.opt + orig_only_child_vars = opt._only_child_vars + opt._only_child_vars = True opt.set_instance(m) self.assertEqual(opt._solver_model.getAttr('NumVars'), 2) @@ -665,3 +715,4 @@ def test_update7(self): opt.remove_variables([m.x]) opt.update() self.assertEqual(opt._solver_model.getAttr('NumVars'), 1) + opt._only_child_vars = orig_only_child_vars diff --git a/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py b/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py index 2751c0a4746..6b86deaa535 100644 --- a/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py +++ b/pyomo/contrib/appsi/solvers/tests/test_ipopt_persistent.py @@ -1,7 +1,7 @@ import pyomo.environ as pe import pyomo.common.unittest as unittest from pyomo.contrib.appsi.cmodel import cmodel_available -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL @unittest.skipUnless(cmodel_available, 'appsi extensions are not available') diff --git a/pyomo/contrib/appsi/solvers/tests/test_persistent_solvers.py b/pyomo/contrib/appsi/solvers/tests/test_persistent_solvers.py index 5036b274b61..1520407e294 100644 --- a/pyomo/contrib/appsi/solvers/tests/test_persistent_solvers.py +++ b/pyomo/contrib/appsi/solvers/tests/test_persistent_solvers.py @@ -1,14 +1,16 @@ import pyomo.environ as pe from pyomo.common.dependencies import attempt_import import pyomo.common.unittest as unittest + parameterized, param_available = attempt_import('parameterized') parameterized = parameterized.parameterized from pyomo.contrib.appsi.base import TerminationCondition, Results, PersistentSolver from pyomo.contrib.appsi.cmodel import cmodel_available -from pyomo.contrib.appsi.solvers import Gurobi, Ipopt, Cplex, Cbc +from pyomo.contrib.appsi.solvers import Gurobi, Ipopt, Cplex, Cbc, Highs from typing import Type from pyomo.core.expr.numeric_expr import LinearExpression import os + numpy, numpy_available = attempt_import('numpy') import random from pyomo import gdp @@ -17,11 +19,18 @@ if not param_available: raise unittest.SkipTest('Parameterized is not available.') -all_solvers = [('gurobi', Gurobi), ('ipopt', Ipopt), ('cplex', Cplex), ('cbc', Cbc)] -mip_solvers = [('gurobi', Gurobi), ('cplex', Cplex), ('cbc', Cbc)] +all_solvers = [ + ('gurobi', Gurobi), + ('ipopt', Ipopt), + ('cplex', Cplex), + ('cbc', Cbc), + ('highs', Highs), +] +mip_solvers = [('gurobi', Gurobi), ('cplex', Cplex), ('cbc', Cbc), ('highs', Highs)] nlp_solvers = [('ipopt', Ipopt)] qcp_solvers = [('gurobi', Gurobi), ('ipopt', Ipopt), ('cplex', Cplex)] miqcqp_solvers = [('gurobi', Gurobi), ('cplex', Cplex)] +only_child_vars_options = [True, False] """ @@ -40,7 +49,7 @@ get_reduced_costs x range constraints x MILP -Model updates - added constriants x +Model updates - added constraints x Model updates - removed constraints x Model updates - added vars Model updates - removed vars @@ -57,12 +66,47 @@ fixed variables """ + +def _load_tests(solver_list, only_child_vars_list): + res = list() + for solver_name, solver in solver_list: + for child_var_option in only_child_vars_list: + test_name = f"{solver_name}_only_child_vars_{child_var_option}" + res.append((test_name, solver, child_var_option)) + return res + + @unittest.skipUnless(cmodel_available, 'appsi extensions are not available') @unittest.skipUnless(numpy_available, 'numpy is not available') class TestSolvers(unittest.TestCase): - @parameterized.expand(input=all_solvers) - def test_stale_vars(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_remove_variable_and_objective( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + # this test is for issue #2888 + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) + if not opt.available(): + raise unittest.SkipTest + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(2, None)) + m.obj = pe.Objective(expr=m.x) + res = opt.solve(m) + self.assertEqual(res.termination_condition, TerminationCondition.optimal) + self.assertAlmostEqual(m.x.value, 2) + + del m.x + del m.obj + m.x = pe.Var(bounds=(2, None)) + m.obj = pe.Objective(expr=m.x) + res = opt.solve(m) + self.assertEqual(res.termination_condition, TerminationCondition.optimal) + self.assertAlmostEqual(m.x.value, 2) + + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_stale_vars( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -92,7 +136,7 @@ def test_stale_vars(self, name: str, opt_class: Type[PersistentSolver]): res.solution_loader.load_vars() self.assertFalse(m.x.stale) self.assertFalse(m.y.stale) - self.assertTrue(m.z.stale) + self.assertTrue(m.z.stale) res = opt.solve(m) self.assertTrue(m.x.stale) @@ -101,11 +145,13 @@ def test_stale_vars(self, name: str, opt_class: Type[PersistentSolver]): res.solution_loader.load_vars([m.y]) self.assertTrue(m.x.stale) self.assertFalse(m.y.stale) - self.assertTrue(m.z.stale) - - @parameterized.expand(input=all_solvers) - def test_range_constraint(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + self.assertTrue(m.z.stale) + + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_range_constraint( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -124,15 +170,17 @@ def test_range_constraint(self, name: str, opt_class: Type[PersistentSolver]): duals = opt.get_duals() self.assertAlmostEqual(duals[m.c], 1) - @parameterized.expand(input=all_solvers) - def test_reduced_costs(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_reduced_costs( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() m.x = pe.Var(bounds=(-1, 1)) m.y = pe.Var(bounds=(-2, 2)) - m.obj = pe.Objective(expr=3*m.x + 4*m.y) + m.obj = pe.Objective(expr=3 * m.x + 4 * m.y) res = opt.solve(m) self.assertEqual(res.termination_condition, TerminationCondition.optimal) self.assertAlmostEqual(m.x.value, -1) @@ -141,9 +189,11 @@ def test_reduced_costs(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(rc[m.x], 3) self.assertAlmostEqual(rc[m.y], 4) - @parameterized.expand(input=all_solvers) - def test_reduced_costs2(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_reduced_costs2( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -161,9 +211,11 @@ def test_reduced_costs2(self, name: str, opt_class: Type[PersistentSolver]): rc = opt.get_reduced_costs() self.assertAlmostEqual(rc[m.x], 1) - @parameterized.expand(input=all_solvers) - def test_param_changes(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_param_changes( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -174,11 +226,11 @@ def test_param_changes(self, name: str, opt_class: Type[PersistentSolver]): m.b1 = pe.Param(mutable=True) m.b2 = pe.Param(mutable=True) m.obj = pe.Objective(expr=m.y) - m.c1 = pe.Constraint(expr=(0, m.y - m.a1*m.x - m.b1, None)) - m.c2 = pe.Constraint(expr=(None, -m.y + m.a2*m.x + m.b2, 0)) + m.c1 = pe.Constraint(expr=(0, m.y - m.a1 * m.x - m.b1, None)) + m.c2 = pe.Constraint(expr=(None, -m.y + m.a2 * m.x + m.b2, 0)) params_to_test = [(1, -1, 2, 1), (1, -2, 2, 1), (1, -1, 3, 1)] - for (a1, a2, b1, b2) in params_to_test: + for a1, a2, b1, b2 in params_to_test: m.a1.value = a1 m.a2.value = a2 m.b1.value = b1 @@ -193,13 +245,15 @@ def test_param_changes(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(duals[m.c1], (1 + a1 / (a2 - a1))) self.assertAlmostEqual(duals[m.c2], a1 / (a2 - a1)) - @parameterized.expand(input=all_solvers) - def test_immutable_param(self, name: str, opt_class: Type[PersistentSolver]): + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_immutable_param( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): """ This test is important because component_data_objects returns immutable params as floats. We want to make sure we process these correctly. """ - opt: PersistentSolver = opt_class() + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -210,11 +264,11 @@ def test_immutable_param(self, name: str, opt_class: Type[PersistentSolver]): m.b1 = pe.Param(mutable=True) m.b2 = pe.Param(mutable=True) m.obj = pe.Objective(expr=m.y) - m.c1 = pe.Constraint(expr=(0, m.y - m.a1*m.x - m.b1, None)) - m.c2 = pe.Constraint(expr=(None, -m.y + m.a2*m.x + m.b2, 0)) + m.c1 = pe.Constraint(expr=(0, m.y - m.a1 * m.x - m.b1, None)) + m.c2 = pe.Constraint(expr=(None, -m.y + m.a2 * m.x + m.b2, 0)) params_to_test = [(1, 2, 1), (1, 2, 1), (1, 3, 1)] - for (a1, b1, b2) in params_to_test: + for a1, b1, b2 in params_to_test: a2 = m.a2.value m.a1.value = a1 m.b1.value = b1 @@ -229,9 +283,11 @@ def test_immutable_param(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(duals[m.c1], (1 + a1 / (a2 - a1))) self.assertAlmostEqual(duals[m.c2], a1 / (a2 - a1)) - @parameterized.expand(input=all_solvers) - def test_equality(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_equality( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -246,7 +302,7 @@ def test_equality(self, name: str, opt_class: Type[PersistentSolver]): m.c2 = pe.Constraint(expr=m.y == m.a2 * m.x + m.b2) params_to_test = [(1, -1, 2, 1), (1, -2, 2, 1), (1, -1, 3, 1)] - for (a1, a2, b1, b2) in params_to_test: + for a1, a2, b1, b2 in params_to_test: m.a1.value = a1 m.a2.value = a2 m.b1.value = b1 @@ -261,9 +317,11 @@ def test_equality(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(duals[m.c1], (1 + a1 / (a2 - a1))) self.assertAlmostEqual(duals[m.c2], -a1 / (a2 - a1)) - @parameterized.expand(input=all_solvers) - def test_linear_expression(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_linear_expression( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -274,13 +332,17 @@ def test_linear_expression(self, name: str, opt_class: Type[PersistentSolver]): m.b1 = pe.Param(mutable=True) m.b2 = pe.Param(mutable=True) m.obj = pe.Objective(expr=m.y) - e = LinearExpression(constant=m.b1, linear_coefs=[-1, m.a1], linear_vars=[m.y, m.x]) + e = LinearExpression( + constant=m.b1, linear_coefs=[-1, m.a1], linear_vars=[m.y, m.x] + ) m.c1 = pe.Constraint(expr=e == 0) - e = LinearExpression(constant=m.b2, linear_coefs=[-1, m.a2], linear_vars=[m.y, m.x]) + e = LinearExpression( + constant=m.b2, linear_coefs=[-1, m.a2], linear_vars=[m.y, m.x] + ) m.c2 = pe.Constraint(expr=e == 0) params_to_test = [(1, -1, 2, 1), (1, -2, 2, 1), (1, -1, 3, 1)] - for (a1, a2, b1, b2) in params_to_test: + for a1, a2, b1, b2 in params_to_test: m.a1.value = a1 m.a2.value = a2 m.b1.value = b1 @@ -291,9 +353,11 @@ def test_linear_expression(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(res.best_feasible_objective, m.y.value) self.assertTrue(res.best_objective_bound <= m.y.value) - @parameterized.expand(input=all_solvers) - def test_no_objective(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_no_objective( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -308,7 +372,7 @@ def test_no_objective(self, name: str, opt_class: Type[PersistentSolver]): opt.config.stream_solver = True params_to_test = [(1, -1, 2, 1), (1, -2, 2, 1), (1, -1, 3, 1)] - for (a1, a2, b1, b2) in params_to_test: + for a1, a2, b1, b2 in params_to_test: m.a1.value = a1 m.a2.value = a2 m.b1.value = b1 @@ -323,9 +387,11 @@ def test_no_objective(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(duals[m.c1], 0) self.assertAlmostEqual(duals[m.c2], 0) - @parameterized.expand(input=all_solvers) - def test_add_remove_cons(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_add_remove_cons( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -373,9 +439,11 @@ def test_add_remove_cons(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(duals[m.c1], -(1 + a1 / (a2 - a1))) self.assertAlmostEqual(duals[m.c2], a1 / (a2 - a1)) - @parameterized.expand(input=all_solvers) - def test_results_infeasible(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_results_infeasible( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -390,19 +458,36 @@ def test_results_infeasible(self, name: str, opt_class: Type[PersistentSolver]): res = opt.solve(m) self.assertNotEqual(res.termination_condition, TerminationCondition.optimal) if opt_class is Ipopt: - acceptable_termination_conditions = {TerminationCondition.infeasible, - TerminationCondition.unbounded} + acceptable_termination_conditions = { + TerminationCondition.infeasible, + TerminationCondition.unbounded, + } else: - acceptable_termination_conditions = {TerminationCondition.infeasible, - TerminationCondition.infeasibleOrUnbounded} + acceptable_termination_conditions = { + TerminationCondition.infeasible, + TerminationCondition.infeasibleOrUnbounded, + } self.assertIn(res.termination_condition, acceptable_termination_conditions) self.assertAlmostEqual(m.x.value, None) self.assertAlmostEqual(m.y.value, None) self.assertTrue(res.best_feasible_objective is None) - @parameterized.expand(input=all_solvers) - def test_duals(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + with self.assertRaisesRegex( + RuntimeError, '.*does not currently have a valid solution.*' + ): + res.solution_loader.load_vars() + with self.assertRaisesRegex( + RuntimeError, '.*does not currently have valid duals.*' + ): + res.solution_loader.get_duals() + with self.assertRaisesRegex( + RuntimeError, '.*does not currently have valid reduced costs.*' + ): + res.solution_loader.get_reduced_costs() + + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_duals(self, name: str, opt_class: Type[PersistentSolver], only_child_vars): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -423,9 +508,11 @@ def test_duals(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(duals[m.c1], 0.5) self.assertNotIn(m.c2, duals) - @parameterized.expand(input=qcp_solvers) - def test_mutable_quadratic_coefficient(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(qcp_solvers, only_child_vars_options)) + def test_mutable_quadratic_coefficient( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -434,7 +521,7 @@ def test_mutable_quadratic_coefficient(self, name: str, opt_class: Type[Persiste m.a = pe.Param(initialize=1, mutable=True) m.b = pe.Param(initialize=-1, mutable=True) m.obj = pe.Objective(expr=m.x**2 + m.y**2) - m.c = pe.Constraint(expr=m.y >= (m.a*m.x + m.b)**2) + m.c = pe.Constraint(expr=m.y >= (m.a * m.x + m.b) ** 2) res = opt.solve(m) self.assertAlmostEqual(m.x.value, 0.41024548525899274, 4) @@ -445,9 +532,11 @@ def test_mutable_quadratic_coefficient(self, name: str, opt_class: Type[Persiste self.assertAlmostEqual(m.x.value, 0.10256137418973625, 4) self.assertAlmostEqual(m.y.value, 0.0869525991355825, 4) - @parameterized.expand(input=qcp_solvers) - def test_mutable_quadratic_objective(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(qcp_solvers, only_child_vars_options)) + def test_mutable_quadratic_objective( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -457,8 +546,8 @@ def test_mutable_quadratic_objective(self, name: str, opt_class: Type[Persistent m.b = pe.Param(initialize=-1, mutable=True) m.c = pe.Param(initialize=1, mutable=True) m.d = pe.Param(initialize=1, mutable=True) - m.obj = pe.Objective(expr=m.x**2 + m.c*m.y**2 + m.d*m.x) - m.ccon = pe.Constraint(expr=m.y >= (m.a*m.x + m.b)**2) + m.obj = pe.Objective(expr=m.x**2 + m.c * m.y**2 + m.d * m.x) + m.ccon = pe.Constraint(expr=m.y >= (m.a * m.x + m.b) ** 2) res = opt.solve(m) self.assertAlmostEqual(m.x.value, 0.2719178742733325, 4) @@ -470,9 +559,11 @@ def test_mutable_quadratic_objective(self, name: str, opt_class: Type[Persistent self.assertAlmostEqual(m.x.value, 0.6962249634573562, 4) self.assertAlmostEqual(m.y.value, 0.09227926676152151, 4) - @parameterized.expand(input=all_solvers) - def test_fixed_vars(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_fixed_vars( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) for treat_fixed_vars_as_params in [True, False]: opt.update_config.treat_fixed_vars_as_params = treat_fixed_vars_as_params if not opt.available(): @@ -508,9 +599,11 @@ def test_fixed_vars(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(m.x.value, 0) self.assertAlmostEqual(m.y.value, 2) - @parameterized.expand(input=all_solvers) - def test_fixed_vars_2(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_fixed_vars_2( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) opt.update_config.treat_fixed_vars_as_params = True if not opt.available(): raise unittest.SkipTest @@ -545,9 +638,11 @@ def test_fixed_vars_2(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(m.x.value, 0) self.assertAlmostEqual(m.y.value, 2) - @parameterized.expand(input=all_solvers) - def test_fixed_vars_3(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_fixed_vars_3( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) opt.update_config.treat_fixed_vars_as_params = True if not opt.available(): raise unittest.SkipTest @@ -560,9 +655,11 @@ def test_fixed_vars_3(self, name: str, opt_class: Type[PersistentSolver]): res = opt.solve(m) self.assertAlmostEqual(m.x.value, 2) - @parameterized.expand(input=nlp_solvers) - def test_fixed_vars_4(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(nlp_solvers, only_child_vars_options)) + def test_fixed_vars_4( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) opt.update_config.treat_fixed_vars_as_params = True if not opt.available(): raise unittest.SkipTest @@ -579,9 +676,11 @@ def test_fixed_vars_4(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(m.x.value, 2**0.5) self.assertAlmostEqual(m.y.value, 2**0.5) - @parameterized.expand(input=all_solvers) - def test_mutable_param_with_range(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_mutable_param_with_range( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest try: @@ -602,19 +701,45 @@ def test_mutable_param_with_range(self, name: str, opt_class: Type[PersistentSol m.con2 = pe.Constraint(expr=(m.b2, m.y - m.a2 * m.x, m.c2)) np.random.seed(0) - params_to_test = [(np.random.uniform(0, 10), np.random.uniform(-10, 0), - np.random.uniform(-5, 2.5), np.random.uniform(-5, 2.5), - np.random.uniform(2.5, 10), np.random.uniform(2.5, 10), pe.minimize), - (np.random.uniform(0, 10), np.random.uniform(-10, 0), - np.random.uniform(-5, 2.5), np.random.uniform(-5, 2.5), - np.random.uniform(2.5, 10), np.random.uniform(2.5, 10), pe.maximize), - (np.random.uniform(0, 10), np.random.uniform(-10, 0), - np.random.uniform(-5, 2.5), np.random.uniform(-5, 2.5), - np.random.uniform(2.5, 10), np.random.uniform(2.5, 10), pe.minimize), - (np.random.uniform(0, 10), np.random.uniform(-10, 0), - np.random.uniform(-5, 2.5), np.random.uniform(-5, 2.5), - np.random.uniform(2.5, 10), np.random.uniform(2.5, 10), pe.maximize)] - for (a1, a2, b1, b2, c1, c2, sense) in params_to_test: + params_to_test = [ + ( + np.random.uniform(0, 10), + np.random.uniform(-10, 0), + np.random.uniform(-5, 2.5), + np.random.uniform(-5, 2.5), + np.random.uniform(2.5, 10), + np.random.uniform(2.5, 10), + pe.minimize, + ), + ( + np.random.uniform(0, 10), + np.random.uniform(-10, 0), + np.random.uniform(-5, 2.5), + np.random.uniform(-5, 2.5), + np.random.uniform(2.5, 10), + np.random.uniform(2.5, 10), + pe.maximize, + ), + ( + np.random.uniform(0, 10), + np.random.uniform(-10, 0), + np.random.uniform(-5, 2.5), + np.random.uniform(-5, 2.5), + np.random.uniform(2.5, 10), + np.random.uniform(2.5, 10), + pe.minimize, + ), + ( + np.random.uniform(0, 10), + np.random.uniform(-10, 0), + np.random.uniform(-5, 2.5), + np.random.uniform(-5, 2.5), + np.random.uniform(2.5, 10), + np.random.uniform(2.5, 10), + pe.maximize, + ), + ] + for a1, a2, b1, b2, c1, c2, sense in params_to_test: m.a1.value = float(a1) m.a2.value = float(a2) m.b1.value = float(b1) @@ -641,9 +766,11 @@ def test_mutable_param_with_range(self, name: str, opt_class: Type[PersistentSol self.assertAlmostEqual(duals[m.con1], (1 + a1 / (a2 - a1)), 6) self.assertAlmostEqual(duals[m.con2], -a1 / (a2 - a1), 6) - @parameterized.expand(input=all_solvers) - def test_add_and_remove_vars(self, name: str, opt_class: Type[PersistentSolver]): - opt = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_add_and_remove_vars( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -666,9 +793,10 @@ def test_add_and_remove_vars(self, name: str, opt_class: Type[PersistentSolver]) a2 = -1 b1 = 2 b2 = 1 - m.c1 = pe.Constraint(expr=(0, m.y - a1*m.x-b1, None)) - m.c2 = pe.Constraint(expr=(None, -m.y + a2*m.x+b2, 0)) - opt.add_variables([m.x]) + m.c1 = pe.Constraint(expr=(0, m.y - a1 * m.x - b1, None)) + m.c2 = pe.Constraint(expr=(None, -m.y + a2 * m.x + b2, 0)) + if only_child_vars: + opt.add_variables([m.x]) opt.add_constraints([m.c1, m.c2]) res = opt.solve(m) self.assertEqual(res.termination_condition, TerminationCondition.optimal) @@ -676,7 +804,8 @@ def test_add_and_remove_vars(self, name: str, opt_class: Type[PersistentSolver]) self.assertAlmostEqual(m.x.value, (b2 - b1) / (a1 - a2)) self.assertAlmostEqual(m.y.value, a1 * (b2 - b1) / (a1 - a2) + b1) opt.remove_constraints([m.c1, m.c2]) - opt.remove_variables([m.x]) + if only_child_vars: + opt.remove_variables([m.x]) m.x.value = None res = opt.solve(m) self.assertEqual(res.termination_condition, TerminationCondition.optimal) @@ -686,9 +815,9 @@ def test_add_and_remove_vars(self, name: str, opt_class: Type[PersistentSolver]) with self.assertRaises(Exception): opt.load_vars([m.x]) - @parameterized.expand(input=nlp_solvers) - def test_exp(self, name: str, opt_class: Type[PersistentSolver]): - opt = opt_class() + @parameterized.expand(input=_load_tests(nlp_solvers, only_child_vars_options)) + def test_exp(self, name: str, opt_class: Type[PersistentSolver], only_child_vars): + opt = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -700,9 +829,9 @@ def test_exp(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(m.x.value, -0.42630274815985264) self.assertAlmostEqual(m.y.value, 0.6529186341994245) - @parameterized.expand(input=nlp_solvers) - def test_log(self, name: str, opt_class: Type[PersistentSolver]): - opt = opt_class() + @parameterized.expand(input=_load_tests(nlp_solvers, only_child_vars_options)) + def test_log(self, name: str, opt_class: Type[PersistentSolver], only_child_vars): + opt = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -714,9 +843,11 @@ def test_log(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(m.x.value, 0.6529186341994245) self.assertAlmostEqual(m.y.value, -0.42630274815985264) - @parameterized.expand(input=all_solvers) - def test_with_numpy(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_with_numpy( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -727,16 +858,26 @@ def test_with_numpy(self, name: str, opt_class: Type[PersistentSolver]): b1 = 3 a2 = -2 b2 = 1 - m.c1 = pe.Constraint(expr=(numpy.float64(0), m.y - numpy.int64(1) * m.x - numpy.float32(3), None)) - m.c2 = pe.Constraint(expr=(None, -m.y + numpy.int32(-2) * m.x + numpy.float64(1), numpy.float16(0))) + m.c1 = pe.Constraint( + expr=(numpy.float64(0), m.y - numpy.int64(1) * m.x - numpy.float32(3), None) + ) + m.c2 = pe.Constraint( + expr=( + None, + -m.y + numpy.int32(-2) * m.x + numpy.float64(1), + numpy.float16(0), + ) + ) res = opt.solve(m) self.assertEqual(res.termination_condition, TerminationCondition.optimal) self.assertAlmostEqual(m.x.value, (b2 - b1) / (a1 - a2)) self.assertAlmostEqual(m.y.value, a1 * (b2 - b1) / (a1 - a2) + b1) - @parameterized.expand(input=all_solvers) - def test_bounds_with_params(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_bounds_with_params( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -766,9 +907,11 @@ def test_bounds_with_params(self, name: str, opt_class: Type[PersistentSolver]): res = opt.solve(m) self.assertAlmostEqual(m.y.value, 3) - @parameterized.expand(input=all_solvers) - def test_solution_loader(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_solution_loader( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -817,12 +960,15 @@ def test_solution_loader(self, name: str, opt_class: Type[PersistentSolver]): self.assertIn(m.c1, duals) self.assertAlmostEqual(duals[m.c1], 1) - @parameterized.expand(input=all_solvers) - def test_time_limit(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_time_limit( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest from sys import platform + if platform == 'win32': raise unittest.SkipTest @@ -839,16 +985,26 @@ def test_time_limit(self, name: str, opt_class: Type[PersistentSolver]): for t in m.tasks: coefs.append(random.uniform(0, 10)) lin_vars.append(m.x[j, t]) - obj_expr = LinearExpression(linear_coefs=coefs, linear_vars=lin_vars, constant=0) + obj_expr = LinearExpression( + linear_coefs=coefs, linear_vars=lin_vars, constant=0 + ) m.obj = pe.Objective(expr=obj_expr, sense=pe.maximize) m.c1 = pe.Constraint(m.jobs) m.c2 = pe.Constraint(m.tasks) for j in m.jobs: - expr = LinearExpression(linear_coefs=[1]*N, linear_vars=[m.x[j, t] for t in m.tasks], constant=0) + expr = LinearExpression( + linear_coefs=[1] * N, + linear_vars=[m.x[j, t] for t in m.tasks], + constant=0, + ) m.c1[j] = expr == 1 for t in m.tasks: - expr = LinearExpression(linear_coefs=[1]*N, linear_vars=[m.x[j, t] for j in m.jobs], constant=0) + expr = LinearExpression( + linear_coefs=[1] * N, + linear_vars=[m.x[j, t] for j in m.jobs], + constant=0, + ) m.c2[t] = expr == 1 if type(opt) is Ipopt: opt.config.time_limit = 1e-6 @@ -857,13 +1013,20 @@ def test_time_limit(self, name: str, opt_class: Type[PersistentSolver]): opt.config.load_solution = False res = opt.solve(m) if type(opt) is Cbc: # I can't figure out why CBC is reporting max iter... - self.assertIn(res.termination_condition, {TerminationCondition.maxIterations, TerminationCondition.maxTimeLimit}) + self.assertIn( + res.termination_condition, + {TerminationCondition.maxIterations, TerminationCondition.maxTimeLimit}, + ) else: - self.assertEqual(res.termination_condition, TerminationCondition.maxTimeLimit) - - @parameterized.expand(input=all_solvers) - def test_objective_changes(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + self.assertEqual( + res.termination_condition, TerminationCondition.maxTimeLimit + ) + + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_objective_changes( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -874,20 +1037,25 @@ def test_objective_changes(self, name: str, opt_class: Type[PersistentSolver]): m.obj = pe.Objective(expr=m.y) res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 1) - m.obj = pe.Objective(expr=2*m.y) + m.obj = pe.Objective(expr=2 * m.y) res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 2) - m.obj.expr = 3*m.y + m.obj.expr = 3 * m.y res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 3) m.obj.sense = pe.maximize opt.config.load_solution = False res = opt.solve(m) - self.assertIn(res.termination_condition, {TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded}) + self.assertIn( + res.termination_condition, + { + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + }, + ) m.obj.sense = pe.minimize opt.config.load_solution = True - m.obj = pe.Objective(expr=m.x*m.y) + m.obj = pe.Objective(expr=m.x * m.y) m.x.fix(2) res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 6, 6) @@ -915,9 +1083,11 @@ def test_objective_changes(self, name: str, opt_class: Type[PersistentSolver]): res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 4) - @parameterized.expand(input=all_solvers) - def test_domain(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_domain( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -939,9 +1109,11 @@ def test_domain(self, name: str, opt_class: Type[PersistentSolver]): res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 0) - @parameterized.expand(input=mip_solvers) - def test_domain_with_integers(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(mip_solvers, only_child_vars_options)) + def test_domain_with_integers( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -963,9 +1135,11 @@ def test_domain_with_integers(self, name: str, opt_class: Type[PersistentSolver] res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 1) - @parameterized.expand(input=all_solvers) - def test_fixed_binaries(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_fixed_binaries( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest m = pe.ConcreteModel() @@ -980,7 +1154,7 @@ def test_fixed_binaries(self, name: str, opt_class: Type[PersistentSolver]): res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 1) - opt: PersistentSolver = opt_class() + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) opt.update_config.treat_fixed_vars_as_params = False m.x.fix(0) res = opt.solve(m) @@ -989,9 +1163,11 @@ def test_fixed_binaries(self, name: str, opt_class: Type[PersistentSolver]): res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 1) - @parameterized.expand(input=mip_solvers) - def test_with_gdp(self, name: str, opt_class: Type[PersistentSolver]): - opt: PersistentSolver = opt_class() + @parameterized.expand(input=_load_tests(mip_solvers, only_child_vars_options)) + def test_with_gdp( + self, name: str, opt_class: Type[PersistentSolver], only_child_vars + ): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) if not opt.available(): raise unittest.SkipTest @@ -1013,7 +1189,7 @@ def test_with_gdp(self, name: str, opt_class: Type[PersistentSolver]): self.assertAlmostEqual(m.x.value, 0) self.assertAlmostEqual(m.y.value, 1) - opt: PersistentSolver = opt_class() + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) opt.use_extensions = True res = opt.solve(m) self.assertAlmostEqual(res.best_feasible_objective, 1) @@ -1082,6 +1258,29 @@ def test_variables_elsewhere2(self, name: str, opt_class: Type[PersistentSolver] self.assertIn(m.y, sol) self.assertNotIn(m.z, sol) + @parameterized.expand(input=_load_tests(all_solvers, only_child_vars_options)) + def test_bug_1(self, name: str, opt_class: Type[PersistentSolver], only_child_vars): + opt: PersistentSolver = opt_class(only_child_vars=only_child_vars) + if not opt.available(): + raise unittest.SkipTest + + m = pe.ConcreteModel() + m.x = pe.Var(bounds=(3, 7)) + m.y = pe.Var(bounds=(-10, 10)) + m.p = pe.Param(mutable=True, initialize=0) + + m.obj = pe.Objective(expr=m.y) + m.c = pe.Constraint(expr=m.y >= m.p * m.x) + + res = opt.solve(m) + self.assertEqual(res.termination_condition, TerminationCondition.optimal) + self.assertAlmostEqual(res.best_feasible_objective, 0) + + m.p.value = 1 + res = opt.solve(m) + self.assertEqual(res.termination_condition, TerminationCondition.optimal) + self.assertAlmostEqual(res.best_feasible_objective, 3) + @unittest.skipUnless(cmodel_available, 'appsi extensions are not available') class TestLegacySolverInterface(unittest.TestCase): @@ -1098,12 +1297,12 @@ def test_param_updates(self, name: str, opt_class: Type[PersistentSolver]): m.b1 = pe.Param(mutable=True) m.b2 = pe.Param(mutable=True) m.obj = pe.Objective(expr=m.y) - m.c1 = pe.Constraint(expr=(0, m.y - m.a1*m.x - m.b1, None)) - m.c2 = pe.Constraint(expr=(None, -m.y + m.a2*m.x + m.b2, 0)) + m.c1 = pe.Constraint(expr=(0, m.y - m.a1 * m.x - m.b1, None)) + m.c2 = pe.Constraint(expr=(None, -m.y + m.a2 * m.x + m.b2, 0)) m.dual = pe.Suffix(direction=pe.Suffix.IMPORT) params_to_test = [(1, -1, 2, 1), (1, -2, 2, 1), (1, -1, 3, 1)] - for (a1, a2, b1, b2) in params_to_test: + for a1, a2, b1, b2 in params_to_test: m.a1.value = a1 m.a2.value = a2 m.b1.value = b1 diff --git a/pyomo/contrib/appsi/tests/test_base.py b/pyomo/contrib/appsi/tests/test_base.py new file mode 100644 index 00000000000..0d67ca4d01a --- /dev/null +++ b/pyomo/contrib/appsi/tests/test_base.py @@ -0,0 +1,91 @@ +from pyomo.common import unittest +from pyomo.contrib import appsi +import pyomo.environ as pe +from pyomo.core.base.var import ScalarVar + + +class TestResults(unittest.TestCase): + def test_uninitialized(self): + res = appsi.base.Results() + self.assertIsNone(res.best_feasible_objective) + self.assertIsNone(res.best_objective_bound) + self.assertEqual( + res.termination_condition, appsi.base.TerminationCondition.unknown + ) + + with self.assertRaisesRegex( + RuntimeError, '.*does not currently have a valid solution.*' + ): + res.solution_loader.load_vars() + with self.assertRaisesRegex( + RuntimeError, '.*does not currently have valid duals.*' + ): + res.solution_loader.get_duals() + with self.assertRaisesRegex( + RuntimeError, '.*does not currently have valid reduced costs.*' + ): + res.solution_loader.get_reduced_costs() + with self.assertRaisesRegex( + RuntimeError, '.*does not currently have valid slacks.*' + ): + res.solution_loader.get_slacks() + + def test_results(self): + m = pe.ConcreteModel() + m.x = ScalarVar() + m.y = ScalarVar() + m.c1 = pe.Constraint(expr=m.x == 1) + m.c2 = pe.Constraint(expr=m.y == 2) + + primals = dict() + primals[id(m.x)] = (m.x, 1) + primals[id(m.y)] = (m.y, 2) + duals = dict() + duals[m.c1] = 3 + duals[m.c2] = 4 + rc = dict() + rc[id(m.x)] = (m.x, 5) + rc[id(m.y)] = (m.y, 6) + slacks = dict() + slacks[m.c1] = 7 + slacks[m.c2] = 8 + + res = appsi.base.Results() + res.solution_loader = appsi.base.SolutionLoader( + primals=primals, duals=duals, slacks=slacks, reduced_costs=rc + ) + + res.solution_loader.load_vars() + self.assertAlmostEqual(m.x.value, 1) + self.assertAlmostEqual(m.y.value, 2) + + m.x.value = None + m.y.value = None + + res.solution_loader.load_vars([m.y]) + self.assertIsNone(m.x.value) + self.assertAlmostEqual(m.y.value, 2) + + duals2 = res.solution_loader.get_duals() + self.assertAlmostEqual(duals[m.c1], duals2[m.c1]) + self.assertAlmostEqual(duals[m.c2], duals2[m.c2]) + + duals2 = res.solution_loader.get_duals([m.c2]) + self.assertNotIn(m.c1, duals2) + self.assertAlmostEqual(duals[m.c2], duals2[m.c2]) + + rc2 = res.solution_loader.get_reduced_costs() + self.assertAlmostEqual(rc[id(m.x)][1], rc2[m.x]) + self.assertAlmostEqual(rc[id(m.y)][1], rc2[m.y]) + + rc2 = res.solution_loader.get_reduced_costs([m.y]) + self.assertNotIn(m.x, rc2) + self.assertAlmostEqual(rc[id(m.y)][1], rc2[m.y]) + + slacks2 = res.solution_loader.get_slacks() + self.assertAlmostEqual(slacks[m.c1], slacks2[m.c1]) + self.assertAlmostEqual(slacks[m.c2], slacks2[m.c2]) + + slacks2 = res.solution_loader.get_slacks([m.c2]) + self.assertNotIn(m.c1, slacks2) + self.assertAlmostEqual(slacks[m.c2], slacks2[m.c2]) diff --git a/pyomo/contrib/appsi/tests/test_fbbt.py b/pyomo/contrib/appsi/tests/test_fbbt.py index 4f06846dce0..f92960769cf 100644 --- a/pyomo/contrib/appsi/tests/test_fbbt.py +++ b/pyomo/contrib/appsi/tests/test_fbbt.py @@ -5,6 +5,7 @@ from pyomo.contrib.fbbt.tests.test_fbbt import FbbtTestBase from pyomo.common.errors import InfeasibleConstraintException import math + pe = pyo @@ -72,7 +73,7 @@ def test_persistent(self): def test_sync_after_infeasible(self): m = pe.ConcreteModel() - m.x = pe.Var(bounds=(1,1)) + m.x = pe.Var(bounds=(1, 1)) m.y = pe.Var() m.c1 = pe.Constraint(expr=m.x == m.y) m.c2 = pe.Constraint(expr=m.y == 2) @@ -89,7 +90,7 @@ def test_sync_after_infeasible(self): self.assertAlmostEqual(m.y.ub, 1) m = pe.ConcreteModel() - m.x = pe.Var(bounds=(1,1)) + m.x = pe.Var(bounds=(1, 1)) m.y = pe.Var() m.c1 = pe.Constraint(expr=m.x == m.y) m.c2 = pe.Constraint(expr=m.y == 2) @@ -128,11 +129,11 @@ def test_deactivated_constraints(self): def test_named_exprs(self): m = pe.ConcreteModel() - m.a = pe.Set(initialize=[1,2,3]) + m.a = pe.Set(initialize=[1, 2, 3]) m.x = pe.Var(m.a, bounds=(0, None)) m.e = pe.Expression(m.a) for i in m.a: - m.e[i].expr = i*m.x[i] + m.e[i].expr = i * m.x[i] m.c = pe.Constraint(expr=sum(m.e.values()) == 0) it = appsi.fbbt.IntervalTightener() it.perform_fbbt(m) diff --git a/pyomo/contrib/appsi/tests/test_interval.py b/pyomo/contrib/appsi/tests/test_interval.py index 3f37b808f31..7963cc31665 100644 --- a/pyomo/contrib/appsi/tests/test_interval.py +++ b/pyomo/contrib/appsi/tests/test_interval.py @@ -30,7 +30,23 @@ def setUp(self): @unittest.skipUnless(cmodel_available, 'appsi extensions are not available') class TestCInterval(unittest.TestCase): def test_pow_with_inf(self): - x_list = [0, -math.inf, math.inf, -3, 3, -2, 2, -1, 1, -2.5, 2.5, -0.5, 0.5, -1.5, 1.5] + x_list = [ + 0, + -math.inf, + math.inf, + -3, + 3, + -2, + 2, + -1, + 1, + -2.5, + 2.5, + -0.5, + 0.5, + -1.5, + 1.5, + ] y_list = list(x_list) for x in x_list: for y in y_list: diff --git a/pyomo/contrib/appsi/utils/__init__.py b/pyomo/contrib/appsi/utils/__init__.py index 1758d74faed..f665736fd4a 100644 --- a/pyomo/contrib/appsi/utils/__init__.py +++ b/pyomo/contrib/appsi/utils/__init__.py @@ -1,2 +1,2 @@ from .get_objective import get_objective -from .collect_vars_and_named_exprs import collect_vars_and_named_exprs \ No newline at end of file +from .collect_vars_and_named_exprs import collect_vars_and_named_exprs diff --git a/pyomo/contrib/appsi/utils/collect_vars_and_named_exprs.py b/pyomo/contrib/appsi/utils/collect_vars_and_named_exprs.py index 6814d036e32..9027080f08c 100644 --- a/pyomo/contrib/appsi/utils/collect_vars_and_named_exprs.py +++ b/pyomo/contrib/appsi/utils/collect_vars_and_named_exprs.py @@ -1,5 +1,5 @@ from pyomo.core.expr.visitor import ExpressionValueVisitor, nonpyomo_leaf_types -from pyomo.core.expr import current as _expr +import pyomo.core.expr as EXPR class _VarAndNamedExprCollector(ExpressionValueVisitor): @@ -26,7 +26,7 @@ def visiting_potential_leaf(self, node): self.named_expressions[id(node)] = node return False, None - if type(node) is _expr.ExternalFunctionExpression: + if type(node) is EXPR.ExternalFunctionExpression: self._external_functions[id(node)] = node return False, None @@ -42,7 +42,9 @@ def visiting_potential_leaf(self, node): def collect_vars_and_named_exprs(expr): _visitor.__init__() _visitor.dfs_postorder_stack(expr) - return (list(_visitor.named_expressions.values()), - list(_visitor.variables.values()), - list(_visitor.fixed_vars.values()), - list(_visitor._external_functions.values())) + return ( + list(_visitor.named_expressions.values()), + list(_visitor.variables.values()), + list(_visitor.fixed_vars.values()), + list(_visitor._external_functions.values()), + ) diff --git a/pyomo/contrib/appsi/utils/get_objective.py b/pyomo/contrib/appsi/utils/get_objective.py index 7d44721e262..30dd911f9c8 100644 --- a/pyomo/contrib/appsi/utils/get_objective.py +++ b/pyomo/contrib/appsi/utils/get_objective.py @@ -3,7 +3,9 @@ def get_objective(block): obj = None - for o in block.component_data_objects(Objective, descend_into=True, active=True, sort=True): + for o in block.component_data_objects( + Objective, descend_into=True, active=True, sort=True + ): if obj is not None: raise ValueError('Multiple active objectives found') obj = o diff --git a/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py b/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py index 85ab7ad6096..4c2a167a017 100644 --- a/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py +++ b/pyomo/contrib/appsi/utils/tests/test_collect_vars_and_named_exprs.py @@ -3,7 +3,7 @@ from pyomo.contrib.appsi.utils import collect_vars_and_named_exprs from pyomo.contrib.appsi.cmodel import cmodel, cmodel_available from typing import Callable -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL class TestCollectVarsAndNamedExpressions(unittest.TestCase): @@ -12,9 +12,9 @@ def basics_helper(self, collector: Callable, *args): m.x = pe.Var() m.y = pe.Var() m.z = pe.Var() - m.E = pe.Expression(expr=2*m.z + 1) + m.E = pe.Expression(expr=2 * m.z + 1) m.y.fix(3) - e = m.x*m.y + m.x*m.E + e = m.x * m.y + m.x * m.E named_exprs, var_list, fixed_vars, external_funcs = collector(e, *args) self.assertEqual([m.E], named_exprs) self.assertEqual([m.x, m.y, m.z], var_list) @@ -38,10 +38,10 @@ def external_func_helper(self, collector: Callable, *args): m.y = pe.Var() m.z = pe.Var() m.hypot = pe.ExternalFunction(library=DLL, function='gsl_hypot') - func = m.hypot(m.x, m.x*m.y) - m.E = pe.Expression(expr=2*func) + func = m.hypot(m.x, m.x * m.y) + m.E = pe.Expression(expr=2 * func) m.y.fix(3) - e = m.z + m.x*m.E + e = m.z + m.x * m.E named_exprs, var_list, fixed_vars, external_funcs = collector(e, *args) self.assertEqual([m.E], named_exprs) self.assertEqual([m.z, m.x, m.y], var_list) diff --git a/pyomo/contrib/appsi/writers/lp_writer.py b/pyomo/contrib/appsi/writers/lp_writer.py index f5f73007bc3..8a76fa5f9eb 100644 --- a/pyomo/contrib/appsi/writers/lp_writer.py +++ b/pyomo/contrib/appsi/writers/lp_writer.py @@ -16,7 +16,7 @@ class LPWriter(PersistentBase): - def __init__(self, only_child_vars=True): + def __init__(self, only_child_vars=False): super(LPWriter, self).__init__(only_child_vars=only_child_vars) self._config = WriterConfig() self._writer = None @@ -67,10 +67,18 @@ def set_instance(self, model): self.set_objective(None) def _add_variables(self, variables: List[_GeneralVarData]): - cmodel.process_pyomo_vars(self._expr_types, variables, self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, self._vars, - self._solver_var_to_pyomo_var_map, True, self._symbol_map, - self._var_labeler, False) + cmodel.process_pyomo_vars( + self._expr_types, + variables, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + self._vars, + self._solver_var_to_pyomo_var_map, + True, + self._symbol_map, + self._var_labeler, + False, + ) def _add_params(self, params: List[_ParamData]): cparams = cmodel.create_params(len(params)) @@ -110,10 +118,18 @@ def _remove_params(self, params: List[_ParamData]): self._symbol_map.removeSymbol(p) def _update_variables(self, variables: List[_GeneralVarData]): - cmodel.process_pyomo_vars(self._expr_types, variables, self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, self._vars, - self._solver_var_to_pyomo_var_map, False, None, - None, True) + cmodel.process_pyomo_vars( + self._expr_types, + variables, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + self._vars, + self._solver_var_to_pyomo_var_map, + False, + None, + None, + True, + ) def update_params(self): for p_id, p in self._params.items(): @@ -121,8 +137,12 @@ def update_params(self): cp.value = p.value def _set_objective(self, obj: _GeneralObjectiveData): - cobj = cmodel.process_lp_objective(self._expr_types, obj, self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map) + cobj = cmodel.process_lp_objective( + self._expr_types, + obj, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + ) if obj is None: sense = 0 cname = 'objective' @@ -152,10 +172,14 @@ def write(self, model: _BlockData, filename: str, timer: HierarchicalTimer = Non timer.stop('write file') def get_vars(self): - return [self._solver_var_to_pyomo_var_map[i] for i in self._writer.get_solve_vars()] + return [ + self._solver_var_to_pyomo_var_map[i] for i in self._writer.get_solve_vars() + ] def get_ordered_cons(self): - return [self._solver_con_to_pyomo_con_map[i] for i in self._writer.get_solve_cons()] + return [ + self._solver_con_to_pyomo_con_map[i] for i in self._writer.get_solve_cons() + ] def get_active_objective(self): return self._objective diff --git a/pyomo/contrib/appsi/writers/nl_writer.py b/pyomo/contrib/appsi/writers/nl_writer.py index b082742a86c..9c739fd6ebb 100644 --- a/pyomo/contrib/appsi/writers/nl_writer.py +++ b/pyomo/contrib/appsi/writers/nl_writer.py @@ -19,7 +19,7 @@ class NLWriter(PersistentBase): - def __init__(self, only_child_vars=True): + def __init__(self, only_child_vars=False): super(NLWriter, self).__init__(only_child_vars=only_child_vars) self._config = WriterConfig() self._writer = None @@ -59,10 +59,6 @@ def set_instance(self, model): self._var_labeler = TextLabeler() self._con_labeler = TextLabeler() self._param_labeler = TextLabeler() - else: - self._var_labeler = NumericLabeler('x') - self._con_labeler = NumericLabeler('c') - self._param_labeler = NumericLabeler('p') self._writer = cmodel.NLWriter() @@ -72,33 +68,62 @@ def set_instance(self, model): self._set_pyomo_amplfunc_env() def _add_variables(self, variables: List[_GeneralVarData]): - cmodel.process_pyomo_vars(self._expr_types, variables, self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, self._vars, - self._solver_var_to_pyomo_var_map, False, None, None, False) + if self.config.symbolic_solver_labels: + set_name = True + symbol_map = self._symbol_map + labeler = self._var_labeler + else: + set_name = False + symbol_map = None + labeler = None + cmodel.process_pyomo_vars( + self._expr_types, + variables, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + self._vars, + self._solver_var_to_pyomo_var_map, + set_name, + symbol_map, + labeler, + False, + ) def _add_params(self, params: List[_ParamData]): cparams = cmodel.create_params(len(params)) for ndx, p in enumerate(params): cp = cparams[ndx] - cp.name = self._symbol_map.getSymbol(p, self._param_labeler) cp.value = p.value self._pyomo_param_to_solver_param_map[id(p)] = cp + if self.config.symbolic_solver_labels: + for ndx, p in enumerate(params): + cp = cparams[ndx] + cp.name = self._symbol_map.getSymbol(p, self._param_labeler) def _add_constraints(self, cons: List[_GeneralConstraintData]): - cmodel.process_nl_constraints(self._writer, - self._expr_types, - cons, - self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, - self._active_constraints, - self._pyomo_con_to_solver_con_map, - self._solver_con_to_pyomo_con_map) + cmodel.process_nl_constraints( + self._writer, + self._expr_types, + cons, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + self._active_constraints, + self._pyomo_con_to_solver_con_map, + self._solver_con_to_pyomo_con_map, + ) + if self.config.symbolic_solver_labels: + for c, cc in self._pyomo_con_to_solver_con_map.items(): + cc.name = self._symbol_map.getSymbol(c, self._con_labeler) def _add_sos_constraints(self, cons: List[_SOSConstraintData]): if len(cons) != 0: raise NotImplementedError('NL writer does not support SOS constraints') def _remove_constraints(self, cons: List[_GeneralConstraintData]): + if self.config.symbolic_solver_labels: + for c in cons: + self._symbol_map.removeSymbol(c) + self._con_labeler.remove_obj(c) for c in cons: cc = self._pyomo_con_to_solver_con_map.pop(c) self._writer.remove_constraint(cc) @@ -109,20 +134,35 @@ def _remove_sos_constraints(self, cons: List[_SOSConstraintData]): raise NotImplementedError('NL writer does not support SOS constraints') def _remove_variables(self, variables: List[_GeneralVarData]): + if self.config.symbolic_solver_labels: + for v in variables: + self._symbol_map.removeSymbol(v) + self._var_labeler.remove_obj(v) for v in variables: cvar = self._pyomo_var_to_solver_var_map.pop(id(v)) del self._solver_var_to_pyomo_var_map[cvar] - # self._symbol_map.removeSymbol(v) def _remove_params(self, params: List[_ParamData]): + if self.config.symbolic_solver_labels: + for p in params: + self._symbol_map.removeSymbol(p) + self._param_labeler.remove_obj(p) for p in params: del self._pyomo_param_to_solver_param_map[id(p)] - self._symbol_map.removeSymbol(p) def _update_variables(self, variables: List[_GeneralVarData]): - cmodel.process_pyomo_vars(self._expr_types, variables, self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, self._vars, - self._solver_var_to_pyomo_var_map, False, None, None, True) + cmodel.process_pyomo_vars( + self._expr_types, + variables, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + self._vars, + self._solver_var_to_pyomo_var_map, + False, + None, + None, + True, + ) def update_params(self): for p_id, p in self._params.items(): @@ -138,26 +178,41 @@ def _set_objective(self, obj: _GeneralObjectiveData): sense = 0 else: pyomo_expr_types = cmodel.PyomoExprTypes() - repn = generate_standard_repn(obj.expr, compute_values=False, quadratic=False) - const = cmodel.appsi_expr_from_pyomo_expr(repn.constant, - self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, - pyomo_expr_types) - lin_vars = [self._pyomo_var_to_solver_var_map[id(i)] for i in repn.linear_vars] - lin_coef = [cmodel.appsi_expr_from_pyomo_expr(i, - self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, - pyomo_expr_types) for i in repn.linear_coefs] + repn = generate_standard_repn( + obj.expr, compute_values=False, quadratic=False + ) + const = cmodel.appsi_expr_from_pyomo_expr( + repn.constant, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + pyomo_expr_types, + ) + lin_vars = [ + self._pyomo_var_to_solver_var_map[id(i)] for i in repn.linear_vars + ] + lin_coef = [ + cmodel.appsi_expr_from_pyomo_expr( + i, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + pyomo_expr_types, + ) + for i in repn.linear_coefs + ] if repn.nonlinear_expr is None: - nonlin = cmodel.appsi_expr_from_pyomo_expr(0, - self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, - pyomo_expr_types) + nonlin = cmodel.appsi_expr_from_pyomo_expr( + 0, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + pyomo_expr_types, + ) else: - nonlin = cmodel.appsi_expr_from_pyomo_expr(repn.nonlinear_expr, - self._pyomo_var_to_solver_var_map, - self._pyomo_param_to_solver_param_map, - pyomo_expr_types) + nonlin = cmodel.appsi_expr_from_pyomo_expr( + repn.nonlinear_expr, + self._pyomo_var_to_solver_var_map, + self._pyomo_param_to_solver_param_map, + pyomo_expr_types, + ) if obj.sense is minimize: sense = 0 else: @@ -189,10 +244,14 @@ def update(self, timer: HierarchicalTimer = None): self._set_pyomo_amplfunc_env() def get_ordered_vars(self): - return [self._solver_var_to_pyomo_var_map[i] for i in self._writer.get_solve_vars()] + return [ + self._solver_var_to_pyomo_var_map[i] for i in self._writer.get_solve_vars() + ] def get_ordered_cons(self): - return [self._solver_con_to_pyomo_con_map[i] for i in self._writer.get_solve_cons()] + return [ + self._solver_con_to_pyomo_con_map[i] for i in self._writer.get_solve_cons() + ] def get_active_objective(self): return self._objective diff --git a/pyomo/contrib/appsi/writers/tests/test_nl_writer.py b/pyomo/contrib/appsi/writers/tests/test_nl_writer.py index 983d09b2e8e..3b61a5901c3 100644 --- a/pyomo/contrib/appsi/writers/tests/test_nl_writer.py +++ b/pyomo/contrib/appsi/writers/tests/test_nl_writer.py @@ -14,15 +14,17 @@ def test_all_vars_fixed(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) m.c1 = pe.Constraint(expr=m.y >= pe.exp(m.x)) - m.c2 = pe.Constraint(expr=m.y >= (m.x - 1)**2) + m.c2 = pe.Constraint(expr=m.y >= (m.x - 1) ** 2) m.x.fix(1) m.y.fix(2) writer = appsi.writers.NLWriter() with TempfileManager: fname = TempfileManager.create_tempfile(suffix='.appsi.nl') - with self.assertRaisesRegex(ValueError, 'there are not any unfixed variables in the problem'): + with self.assertRaisesRegex( + ValueError, 'there are not any unfixed variables in the problem' + ): writer.write(m, fname) - + def _write_and_check_header(self, m, correct_lines): writer = appsi.writers.NLWriter() with TempfileManager: @@ -38,16 +40,18 @@ def test_header_1(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y) m.c = pe.Constraint(expr=m.x + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '0 0', - '0 0', - '0 0 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '0 0', + '0 0', + '0 0 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_2(self): @@ -56,16 +60,18 @@ def test_header_2(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y) m.c = pe.Constraint(expr=m.x + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '0 1', - '0 0', - '0 1 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '0 1', + '0 0', + '0 1 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_3(self): @@ -74,16 +80,18 @@ def test_header_3(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y) m.c = pe.Constraint(expr=m.x**2 + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 0', - '0 0', - '1 0 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 0', + '0 0', + '1 0 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_4(self): @@ -92,16 +100,18 @@ def test_header_4(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y) m.c = pe.Constraint(expr=m.x**2 + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '1 1 1', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '1 1 1', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_5(self): @@ -110,16 +120,18 @@ def test_header_5(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) m.c = pe.Constraint(expr=m.x**2 + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '1 2 1', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '1 2 1', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_6(self): @@ -128,16 +140,18 @@ def test_header_6(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y) m.c = pe.Constraint(expr=m.x**2 + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '2 1 1', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '2 1 1', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_7(self): @@ -146,16 +160,18 @@ def test_header_7(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y) m.c = pe.Constraint(expr=m.x + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 0', - '0 0', - '1 0 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 0', + '0 0', + '1 0 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_8(self): @@ -164,16 +180,18 @@ def test_header_8(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y) m.c = pe.Constraint(expr=m.x**2 + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 0', - '0 0', - '2 0 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 0', + '0 0', + '2 0 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_9(self): @@ -182,16 +200,18 @@ def test_header_9(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y**2) m.c = pe.Constraint(expr=m.x + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '0 1', - '0 0', - '0 1 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '0 1', + '0 0', + '0 1 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_10(self): @@ -200,16 +220,18 @@ def test_header_10(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y**2) m.c = pe.Constraint(expr=m.x + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '1 1 1', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '1 1 1', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_11(self): @@ -218,16 +240,18 @@ def test_header_11(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y**2) m.c = pe.Constraint(expr=m.x**2 + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '1 2 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '1 2 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_12(self): @@ -236,16 +260,18 @@ def test_header_12(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x + m.y**2) m.c = pe.Constraint(expr=m.x**2 + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '2 1 1', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '2 1 1', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_13(self): @@ -254,16 +280,18 @@ def test_header_13(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y) m.c = pe.Constraint(expr=m.x + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '1 2 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '1 2 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_14(self): @@ -272,16 +300,18 @@ def test_header_14(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) m.c = pe.Constraint(expr=m.x + m.y == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '0 1', - '0 0', - '0 2 0', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '0 1', + '0 0', + '0 2 0', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_15(self): @@ -290,16 +320,18 @@ def test_header_15(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) m.c = pe.Constraint(expr=m.x + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '1 2 1', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '1 2 1', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) def test_header_16(self): @@ -308,14 +340,16 @@ def test_header_16(self): m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) m.c = pe.Constraint(expr=m.x**2 + m.y**2 == 1) - correct_lines = ['g3 1 1 0', - '2 1 1 0 1', - '1 1', - '0 0', - '2 2 2', - '0 0 0 1', - '0 0 0 0 0', - '2 2', - '0 0', - '0 0 0 0 0'] + correct_lines = [ + 'g3 1 1 0', + '2 1 1 0 1', + '1 1', + '0 0', + '2 2 2', + '0 0 0 1', + '0 0 0 0 0', + '2 2', + '0 0', + '0 0 0 0 0', + ] self._write_and_check_header(m, correct_lines) diff --git a/pyomo/contrib/benders/benders_cuts.py b/pyomo/contrib/benders/benders_cuts.py index d0e00ebdfcf..5eb2e91cc82 100644 --- a/pyomo/contrib/benders/benders_cuts.py +++ b/pyomo/contrib/benders/benders_cuts.py @@ -14,13 +14,16 @@ from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver from pyomo.core.expr.visitor import identify_variables from pyomo.common.collections import ComponentSet + try: from mpi4py import MPI + mpi4py_available = True except: mpi4py_available = False try: import numpy as np + numpy_available = True except: numpy_available = False @@ -114,7 +117,9 @@ def _setup_subproblem(b, root_vars, relax_subproblem_cons): # first get the objective and turn it into a constraint root_vars = ComponentSet(root_vars) - objs = list(b.component_data_objects(pyo.Objective, descend_into=False, active=True)) + objs = list( + b.component_data_objects(pyo.Objective, descend_into=False, active=True) + ) if len(objs) != 1: raise ValueError('Subproblem must have exactly one objective') orig_obj = objs[0] @@ -127,7 +132,11 @@ def _setup_subproblem(b, root_vars, relax_subproblem_cons): b._eta = pyo.Var() b.aux_cons = pyo.ConstraintList() - for c in list(b.component_data_objects(pyo.Constraint, descend_into=True, active=True, sort=True)): + for c in list( + b.component_data_objects( + pyo.Constraint, descend_into=True, active=True, sort=True + ) + ): if not relax_subproblem_cons: c_vars = ComponentSet(identify_variables(c.body, include_fixed=False)) if not _any_common_elements(root_vars, c_vars): @@ -164,8 +173,8 @@ def __init__(self, component): if not numpy_available: raise ImportError('BendersCutGenerator requires numpy.') _BlockData.__init__(self, component) - - self.num_subproblems_by_rank = 0 #np.zeros(self.comm.Get_size()) + + self.num_subproblems_by_rank = 0 # np.zeros(self.comm.Get_size()) self.subproblems = list() self.complicating_vars_maps = list() self.root_vars = list() @@ -175,8 +184,8 @@ def __init__(self, component): self.subproblem_solvers = list() self.tol = None self.all_root_etas = list() - self._subproblem_ndx_map = dict() # map from ndx in self.subproblems (local) to the global subproblem ndx - + # map from ndx in self.subproblems (local) to the global subproblem ndx + self._subproblem_ndx_map = dict() def global_num_subproblems(self): return int(self.num_subproblems_by_rank.sum()) @@ -184,7 +193,7 @@ def global_num_subproblems(self): def local_num_subproblems(self): return len(self.subproblems) - def set_input(self, root_vars, tol=1e-6, comm = None): + def set_input(self, root_vars, tol=1e-6, comm=None): """ It is very important for root_vars to be in the same order for every process. @@ -214,7 +223,14 @@ def set_input(self, root_vars, tol=1e-6, comm = None): self.all_root_etas = list() self._subproblem_ndx_map = dict() - def add_subproblem(self, subproblem_fn, subproblem_fn_kwargs, root_eta, subproblem_solver='gurobi_persistent', relax_subproblem_cons=False): + def add_subproblem( + self, + subproblem_fn, + subproblem_fn_kwargs, + root_eta, + subproblem_solver='gurobi_persistent', + relax_subproblem_cons=False, + ): _rank = np.argmin(self.num_subproblems_by_rank) self.num_subproblems_by_rank[_rank] += 1 self.all_root_etas.append(root_eta) @@ -223,8 +239,18 @@ def add_subproblem(self, subproblem_fn, subproblem_fn_kwargs, root_eta, subprobl subproblem, complicating_vars_map = subproblem_fn(**subproblem_fn_kwargs) self.subproblems.append(subproblem) self.complicating_vars_maps.append(complicating_vars_map) - _setup_subproblem(subproblem, root_vars=[complicating_vars_map[i] for i in self.root_vars if i in complicating_vars_map], relax_subproblem_cons=relax_subproblem_cons) - self._subproblem_ndx_map[len(self.subproblems) - 1] = self.global_num_subproblems() - 1 + _setup_subproblem( + subproblem, + root_vars=[ + complicating_vars_map[i] + for i in self.root_vars + if i in complicating_vars_map + ], + relax_subproblem_cons=relax_subproblem_cons, + ) + self._subproblem_ndx_map[len(self.subproblems) - 1] = ( + self.global_num_subproblems() - 1 + ) if isinstance(subproblem_solver, str): subproblem_solver = pyo.SolverFactory(subproblem_solver) @@ -233,7 +259,9 @@ def add_subproblem(self, subproblem_fn, subproblem_fn_kwargs, root_eta, subprobl subproblem_solver.set_instance(subproblem) def generate_cut(self): - coefficients = np.zeros(self.global_num_subproblems() * len(self.root_vars), dtype='d') + coefficients = np.zeros( + self.global_num_subproblems() * len(self.root_vars), dtype='d' + ) constants = np.zeros(self.global_num_subproblems(), dtype='d') eta_coeffs = np.zeros(self.global_num_subproblems(), dtype='d') @@ -250,37 +278,56 @@ def generate_cut(self): if root_var in complicating_vars_map: sub_var = complicating_vars_map[root_var] sub_var.set_value(root_var.value, skip_validation=True) - new_con = subproblem.fix_complicating_vars.add(sub_var - root_var.value == 0) + new_con = subproblem.fix_complicating_vars.add( + sub_var - root_var.value == 0 + ) var_to_con_map[root_var] = new_con - subproblem.fix_eta = pyo.Constraint(expr=subproblem._eta - root_eta.value == 0) + subproblem.fix_eta = pyo.Constraint( + expr=subproblem._eta - root_eta.value == 0 + ) subproblem._eta.set_value(root_eta.value, skip_validation=True) subproblem_solver = self.subproblem_solvers[local_subproblem_ndx] if subproblem_solver.name not in solver_dual_sign_convention: - raise NotImplementedError('BendersCutGenerator is unaware of the dual sign convention of subproblem solver ' + subproblem_solver.name) + raise NotImplementedError( + 'BendersCutGenerator is unaware of the dual sign convention of subproblem solver ' + + subproblem_solver.name + ) sign_convention = solver_dual_sign_convention[subproblem_solver.name] if isinstance(subproblem_solver, PersistentSolver): for c in subproblem.fix_complicating_vars.values(): subproblem_solver.add_constraint(c) subproblem_solver.add_constraint(subproblem.fix_eta) - res = subproblem_solver.solve(tee=False, load_solutions=False, save_results=False) + res = subproblem_solver.solve( + tee=False, load_solutions=False, save_results=False + ) if res.solver.termination_condition != pyo.TerminationCondition.optimal: - raise RuntimeError('Unable to generate cut because subproblem failed to converge.') + raise RuntimeError( + 'Unable to generate cut because subproblem failed to converge.' + ) subproblem_solver.load_vars() subproblem_solver.load_duals() else: - res = subproblem_solver.solve(subproblem, tee=False, load_solutions=False) + res = subproblem_solver.solve( + subproblem, tee=False, load_solutions=False + ) if res.solver.termination_condition != pyo.TerminationCondition.optimal: - raise RuntimeError('Unable to generate cut because subproblem failed to converge.') + raise RuntimeError( + 'Unable to generate cut because subproblem failed to converge.' + ) subproblem.solutions.load_from(res) constants[global_subproblem_ndx] = pyo.value(subproblem._z) - eta_coeffs[global_subproblem_ndx] = sign_convention * pyo.value(subproblem.dual[subproblem.obj_con]) + eta_coeffs[global_subproblem_ndx] = sign_convention * pyo.value( + subproblem.dual[subproblem.obj_con] + ) for root_var in self.root_vars: if root_var in complicating_vars_map: c = var_to_con_map[root_var] - coefficients[coeff_ndx] = sign_convention * pyo.value(subproblem.dual[c]) + coefficients[coeff_ndx] = sign_convention * pyo.value( + subproblem.dual[c] + ) coeff_ndx += 1 if isinstance(subproblem_solver, PersistentSolver): @@ -293,7 +340,7 @@ def generate_cut(self): total_num_subproblems = self.global_num_subproblems() global_constants = np.zeros(total_num_subproblems, dtype='d') - global_coeffs = np.zeros(total_num_subproblems*len(self.root_vars), dtype='d') + global_coeffs = np.zeros(total_num_subproblems * len(self.root_vars), dtype='d') global_eta_coeffs = np.zeros(total_num_subproblems, dtype='d') comm = self.comm @@ -311,7 +358,9 @@ def generate_cut(self): cut_expr = global_constants[global_subproblem_ndx] if cut_expr > self.tol: root_eta = self.all_root_etas[global_subproblem_ndx] - cut_expr -= global_eta_coeffs[global_subproblem_ndx] * (root_eta - root_eta.value) + cut_expr -= global_eta_coeffs[global_subproblem_ndx] * ( + root_eta - root_eta.value + ) for root_var in self.root_vars: coeff = global_coeffs[coeff_ndx] cut_expr -= coeff * (root_var - root_var.value) diff --git a/pyomo/contrib/benders/examples/farmer.py b/pyomo/contrib/benders/examples/farmer.py index 8e624ce382d..bf5d40e112c 100644 --- a/pyomo/contrib/benders/examples/farmer.py +++ b/pyomo/contrib/benders/examples/farmer.py @@ -9,12 +9,13 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import os +import sys +import time + +from pyomo.common.dependencies import mpi4py from pyomo.contrib.benders.benders_cuts import BendersCutGenerator import pyomo.environ as pyo -import time -from mpi4py import MPI -import sys -import os """ @@ -34,11 +35,27 @@ def __init__(self): self.CattleFeedRequirement = {'WHEAT': 200.0, 'CORN': 240.0, 'SUGAR_BEETS': 0.0} self.PurchasePrice = {'WHEAT': 238.0, 'CORN': 210.0, 'SUGAR_BEETS': 100000.0} self.PlantingCostPerAcre = {'WHEAT': 150.0, 'CORN': 230.0, 'SUGAR_BEETS': 260.0} - self.scenarios = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario'] + self.scenarios = [ + 'BelowAverageScenario', + 'AverageScenario', + 'AboveAverageScenario', + ] self.crop_yield = dict() - self.crop_yield['BelowAverageScenario'] = {'WHEAT': 2.0, 'CORN': 2.4, 'SUGAR_BEETS': 16.0} - self.crop_yield['AverageScenario'] = {'WHEAT': 2.5, 'CORN': 3.0, 'SUGAR_BEETS': 20.0} - self.crop_yield['AboveAverageScenario'] = {'WHEAT': 3.0, 'CORN': 3.6, 'SUGAR_BEETS': 24.0} + self.crop_yield['BelowAverageScenario'] = { + 'WHEAT': 2.0, + 'CORN': 2.4, + 'SUGAR_BEETS': 16.0, + } + self.crop_yield['AverageScenario'] = { + 'WHEAT': 2.5, + 'CORN': 3.0, + 'SUGAR_BEETS': 20.0, + } + self.crop_yield['AboveAverageScenario'] = { + 'WHEAT': 3.0, + 'CORN': 3.6, + 'SUGAR_BEETS': 24.0, + } self.scenario_probabilities = dict() self.scenario_probabilities['BelowAverageScenario'] = 0.3333 self.scenario_probabilities['AverageScenario'] = 0.3334 @@ -56,9 +73,17 @@ def create_root(farmer): for s in m.scenarios: m.eta[s].setlb(-432000 * farmer.scenario_probabilities[s]) - m.total_acreage_con = pyo.Constraint(expr=sum(m.devoted_acreage.values()) <= farmer.total_acreage) - - m.obj = pyo.Objective(expr=sum(farmer.PlantingCostPerAcre[crop] * m.devoted_acreage[crop] for crop in m.crops) + sum(m.eta.values())) + m.total_acreage_con = pyo.Constraint( + expr=sum(m.devoted_acreage.values()) <= farmer.total_acreage + ) + + m.obj = pyo.Objective( + expr=sum( + farmer.PlantingCostPerAcre[crop] * m.devoted_acreage[crop] + for crop in m.crops + ) + + sum(m.eta.values()) + ) return m @@ -73,21 +98,44 @@ def create_subproblem(root, farmer, scenario): m.QuantityPurchased = pyo.Var(m.crops, bounds=(0.0, None)) def EnforceCattleFeedRequirement_rule(m, i): - return (farmer.CattleFeedRequirement[i] <= (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + - m.QuantityPurchased[i] - m.QuantitySubQuotaSold[i] - m.QuantitySuperQuotaSold[i]) - m.EnforceCattleFeedRequirement = pyo.Constraint(m.crops, rule=EnforceCattleFeedRequirement_rule) + return ( + farmer.CattleFeedRequirement[i] + <= (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + + m.QuantityPurchased[i] + - m.QuantitySubQuotaSold[i] + - m.QuantitySuperQuotaSold[i] + ) + + m.EnforceCattleFeedRequirement = pyo.Constraint( + m.crops, rule=EnforceCattleFeedRequirement_rule + ) def LimitAmountSold_rule(m, i): - return m.QuantitySubQuotaSold[i] + m.QuantitySuperQuotaSold[i] - (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) <= 0.0 + return ( + m.QuantitySubQuotaSold[i] + + m.QuantitySuperQuotaSold[i] + - (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + <= 0.0 + ) + m.LimitAmountSold = pyo.Constraint(m.crops, rule=LimitAmountSold_rule) def EnforceQuotas_rule(m, i): return (0.0, m.QuantitySubQuotaSold[i], farmer.PriceQuota[i]) + m.EnforceQuotas = pyo.Constraint(m.crops, rule=EnforceQuotas_rule) - obj_expr = sum(farmer.PurchasePrice[crop] * m.QuantityPurchased[crop] for crop in m.crops) - obj_expr -= sum(farmer.SubQuotaSellingPrice[crop] * m.QuantitySubQuotaSold[crop] for crop in m.crops) - obj_expr -= sum(farmer.SuperQuotaSellingPrice[crop] * m.QuantitySuperQuotaSold[crop] for crop in m.crops) + obj_expr = sum( + farmer.PurchasePrice[crop] * m.QuantityPurchased[crop] for crop in m.crops + ) + obj_expr -= sum( + farmer.SubQuotaSellingPrice[crop] * m.QuantitySubQuotaSold[crop] + for crop in m.crops + ) + obj_expr -= sum( + farmer.SuperQuotaSellingPrice[crop] * m.QuantitySuperQuotaSold[crop] + for crop in m.crops + ) m.obj = pyo.Objective(expr=farmer.scenario_probabilities[scenario] * obj_expr) complicating_vars_map = pyo.ComponentMap() @@ -98,7 +146,7 @@ def EnforceQuotas_rule(m, i): def main(): - rank = MPI.COMM_WORLD.Get_rank() + rank = mpi4py.MPI.COMM_WORLD.Get_rank() if rank != 0: sys.stdout = open(os.devnull, 'w') @@ -113,28 +161,34 @@ def main(): subproblem_fn_kwargs['root'] = m subproblem_fn_kwargs['farmer'] = farmer subproblem_fn_kwargs['scenario'] = s - m.benders.add_subproblem(subproblem_fn=create_subproblem, - subproblem_fn_kwargs=subproblem_fn_kwargs, - root_eta=m.eta[s], - subproblem_solver='gurobi_persistent') + m.benders.add_subproblem( + subproblem_fn=create_subproblem, + subproblem_fn_kwargs=subproblem_fn_kwargs, + root_eta=m.eta[s], + subproblem_solver='gurobi_persistent', + ) opt = pyo.SolverFactory('gurobi_persistent') opt.set_instance(m) - print('{0:<15}{1:<15}{2:<15}{3:<15}{4:<15}'.format('# Cuts', - 'Corn', - 'Sugar Beets', - 'Wheat', - 'Time')) + print( + '{0:<15}{1:<15}{2:<15}{3:<15}{4:<15}'.format( + '# Cuts', 'Corn', 'Sugar Beets', 'Wheat', 'Time' + ) + ) for i in range(30): res = opt.solve(tee=False, save_results=False) cuts_added = m.benders.generate_cut() for c in cuts_added: opt.add_constraint(c) - print('{0:<15}{1:<15.2f}{2:<15.2f}{3:<15.2f}{4:<15.2f}'.format(len(cuts_added), - m.devoted_acreage['CORN'].value, - m.devoted_acreage['SUGAR_BEETS'].value, - m.devoted_acreage['WHEAT'].value, - time.time()-t0)) + print( + '{0:<15}{1:<15.2f}{2:<15.2f}{3:<15.2f}{4:<15.2f}'.format( + len(cuts_added), + m.devoted_acreage['CORN'].value, + m.devoted_acreage['SUGAR_BEETS'].value, + m.devoted_acreage['WHEAT'].value, + time.time() - t0, + ) + ) if len(cuts_added) == 0: break diff --git a/pyomo/contrib/benders/examples/grothey_ex.py b/pyomo/contrib/benders/examples/grothey_ex.py index c90752938cc..66457fa7293 100644 --- a/pyomo/contrib/benders/examples/grothey_ex.py +++ b/pyomo/contrib/benders/examples/grothey_ex.py @@ -27,8 +27,8 @@ def create_subproblem(root): m.x2 = pyo.Var() m.y = pyo.Var() m.obj = pyo.Objective(expr=-m.x2) - m.c1 = pyo.Constraint(expr=(m.x1 - 1)**2 + m.x2**2 <= pyo.log(m.y)) - m.c2 = pyo.Constraint(expr=(m.x1 + 1)**2 + m.x2**2 <= pyo.log(m.y)) + m.c1 = pyo.Constraint(expr=(m.x1 - 1) ** 2 + m.x2**2 <= pyo.log(m.y)) + m.c2 = pyo.Constraint(expr=(m.x1 + 1) ** 2 + m.x2**2 <= pyo.log(m.y)) complicating_vars_map = pyo.ComponentMap() complicating_vars_map[root.y] = m.y @@ -41,10 +41,12 @@ def main(): root_vars = [m.y] m.benders = BendersCutGenerator() m.benders.set_input(root_vars=root_vars, tol=1e-8) - m.benders.add_subproblem(subproblem_fn=create_subproblem, - subproblem_fn_kwargs={'root': m}, - root_eta=m.eta, - subproblem_solver='ipopt', ) + m.benders.add_subproblem( + subproblem_fn=create_subproblem, + subproblem_fn_kwargs={'root': m}, + root_eta=m.eta, + subproblem_solver='ipopt', + ) opt = pyo.SolverFactory('gurobi_direct') for i in range(30): diff --git a/pyomo/contrib/benders/tests/test_benders.py b/pyomo/contrib/benders/tests/test_benders.py index 6fc0a4fa95e..26a2a0b7910 100644 --- a/pyomo/contrib/benders/tests/test_benders.py +++ b/pyomo/contrib/benders/tests/test_benders.py @@ -12,13 +12,16 @@ import pyomo.common.unittest as unittest from pyomo.contrib.benders.benders_cuts import BendersCutGenerator import pyomo.environ as pyo + try: import mpi4py + mpi4py_available = True except: mpi4py_available = False try: import numpy as np + numpy_available = True except: numpy_available = False @@ -41,17 +44,57 @@ class Farmer(object): def __init__(self): self.crops = ['WHEAT', 'CORN', 'SUGAR_BEETS'] self.total_acreage = 500 - self.PriceQuota = {'WHEAT': 100000.0, 'CORN': 100000.0, 'SUGAR_BEETS': 6000.0} - self.SubQuotaSellingPrice = {'WHEAT': 170.0, 'CORN': 150.0, 'SUGAR_BEETS': 36.0} - self.SuperQuotaSellingPrice = {'WHEAT': 0.0, 'CORN': 0.0, 'SUGAR_BEETS': 10.0} - self.CattleFeedRequirement = {'WHEAT': 200.0, 'CORN': 240.0, 'SUGAR_BEETS': 0.0} - self.PurchasePrice = {'WHEAT': 238.0, 'CORN': 210.0, 'SUGAR_BEETS': 100000.0} - self.PlantingCostPerAcre = {'WHEAT': 150.0, 'CORN': 230.0, 'SUGAR_BEETS': 260.0} - self.scenarios = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario'] + self.PriceQuota = { + 'WHEAT': 100000.0, + 'CORN': 100000.0, + 'SUGAR_BEETS': 6000.0, + } + self.SubQuotaSellingPrice = { + 'WHEAT': 170.0, + 'CORN': 150.0, + 'SUGAR_BEETS': 36.0, + } + self.SuperQuotaSellingPrice = { + 'WHEAT': 0.0, + 'CORN': 0.0, + 'SUGAR_BEETS': 10.0, + } + self.CattleFeedRequirement = { + 'WHEAT': 200.0, + 'CORN': 240.0, + 'SUGAR_BEETS': 0.0, + } + self.PurchasePrice = { + 'WHEAT': 238.0, + 'CORN': 210.0, + 'SUGAR_BEETS': 100000.0, + } + self.PlantingCostPerAcre = { + 'WHEAT': 150.0, + 'CORN': 230.0, + 'SUGAR_BEETS': 260.0, + } + self.scenarios = [ + 'BelowAverageScenario', + 'AverageScenario', + 'AboveAverageScenario', + ] self.crop_yield = dict() - self.crop_yield['BelowAverageScenario'] = {'WHEAT': 2.0, 'CORN': 2.4, 'SUGAR_BEETS': 16.0} - self.crop_yield['AverageScenario'] = {'WHEAT': 2.5, 'CORN': 3.0, 'SUGAR_BEETS': 20.0} - self.crop_yield['AboveAverageScenario'] = {'WHEAT': 3.0, 'CORN': 3.6, 'SUGAR_BEETS': 24.0} + self.crop_yield['BelowAverageScenario'] = { + 'WHEAT': 2.0, + 'CORN': 2.4, + 'SUGAR_BEETS': 16.0, + } + self.crop_yield['AverageScenario'] = { + 'WHEAT': 2.5, + 'CORN': 3.0, + 'SUGAR_BEETS': 20.0, + } + self.crop_yield['AboveAverageScenario'] = { + 'WHEAT': 3.0, + 'CORN': 3.6, + 'SUGAR_BEETS': 24.0, + } self.scenario_probabilities = dict() self.scenario_probabilities['BelowAverageScenario'] = 0.3333 self.scenario_probabilities['AverageScenario'] = 0.3334 @@ -68,11 +111,17 @@ def create_root(farmer): for s in m.scenarios: m.eta[s].setlb(-432000 * farmer.scenario_probabilities[s]) - m.total_acreage_con = pyo.Constraint(expr=sum(m.devoted_acreage.values()) <= farmer.total_acreage) + m.total_acreage_con = pyo.Constraint( + expr=sum(m.devoted_acreage.values()) <= farmer.total_acreage + ) m.obj = pyo.Objective( - expr=sum(farmer.PlantingCostPerAcre[crop] * m.devoted_acreage[crop] for crop in m.crops) + sum( - m.eta.values())) + expr=sum( + farmer.PlantingCostPerAcre[crop] * m.devoted_acreage[crop] + for crop in m.crops + ) + + sum(m.eta.values()) + ) return m def create_subproblem(root, farmer, scenario): @@ -86,14 +135,25 @@ def create_subproblem(root, farmer, scenario): m.QuantityPurchased = pyo.Var(m.crops, bounds=(0.0, None)) def EnforceCattleFeedRequirement_rule(m, i): - return (farmer.CattleFeedRequirement[i] <= (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + - m.QuantityPurchased[i] - m.QuantitySubQuotaSold[i] - m.QuantitySuperQuotaSold[i]) - - m.EnforceCattleFeedRequirement = pyo.Constraint(m.crops, rule=EnforceCattleFeedRequirement_rule) + return ( + farmer.CattleFeedRequirement[i] + <= (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + + m.QuantityPurchased[i] + - m.QuantitySubQuotaSold[i] + - m.QuantitySuperQuotaSold[i] + ) + + m.EnforceCattleFeedRequirement = pyo.Constraint( + m.crops, rule=EnforceCattleFeedRequirement_rule + ) def LimitAmountSold_rule(m, i): - return m.QuantitySubQuotaSold[i] + m.QuantitySuperQuotaSold[i] - ( - farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) <= 0.0 + return ( + m.QuantitySubQuotaSold[i] + + m.QuantitySuperQuotaSold[i] + - (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + <= 0.0 + ) m.LimitAmountSold = pyo.Constraint(m.crops, rule=LimitAmountSold_rule) @@ -102,14 +162,27 @@ def EnforceQuotas_rule(m, i): m.EnforceQuotas = pyo.Constraint(m.crops, rule=EnforceQuotas_rule) - obj_expr = sum(farmer.PurchasePrice[crop] * m.QuantityPurchased[crop] for crop in m.crops) - obj_expr -= sum(farmer.SubQuotaSellingPrice[crop] * m.QuantitySubQuotaSold[crop] for crop in m.crops) - obj_expr -= sum(farmer.SuperQuotaSellingPrice[crop] * m.QuantitySuperQuotaSold[crop] for crop in m.crops) - m.obj = pyo.Objective(expr=farmer.scenario_probabilities[scenario] * obj_expr) + obj_expr = sum( + farmer.PurchasePrice[crop] * m.QuantityPurchased[crop] + for crop in m.crops + ) + obj_expr -= sum( + farmer.SubQuotaSellingPrice[crop] * m.QuantitySubQuotaSold[crop] + for crop in m.crops + ) + obj_expr -= sum( + farmer.SuperQuotaSellingPrice[crop] * m.QuantitySuperQuotaSold[crop] + for crop in m.crops + ) + m.obj = pyo.Objective( + expr=farmer.scenario_probabilities[scenario] * obj_expr + ) complicating_vars_map = pyo.ComponentMap() for crop in m.crops: - complicating_vars_map[root.devoted_acreage[crop]] = m.devoted_acreage[crop] + complicating_vars_map[root.devoted_acreage[crop]] = m.devoted_acreage[ + crop + ] return m, complicating_vars_map @@ -123,10 +196,12 @@ def EnforceQuotas_rule(m, i): subproblem_fn_kwargs['root'] = m subproblem_fn_kwargs['farmer'] = farmer subproblem_fn_kwargs['scenario'] = s - m.benders.add_subproblem(subproblem_fn=create_subproblem, - subproblem_fn_kwargs=subproblem_fn_kwargs, - root_eta=m.eta[s], - subproblem_solver='cplex_direct') + m.benders.add_subproblem( + subproblem_fn=create_subproblem, + subproblem_fn_kwargs=subproblem_fn_kwargs, + root_eta=m.eta[s], + subproblem_solver='cplex_direct', + ) opt = pyo.SolverFactory('cplex_direct') for i in range(30): @@ -147,7 +222,7 @@ def create_root(): m = pyo.ConcreteModel() m.y = pyo.Var(bounds=(1, None)) m.eta = pyo.Var(bounds=(-10, None)) - m.obj = pyo.Objective(expr=m.y ** 2 + m.eta) + m.obj = pyo.Objective(expr=m.y**2 + m.eta) return m def create_subproblem(root): @@ -156,8 +231,8 @@ def create_subproblem(root): m.x2 = pyo.Var() m.y = pyo.Var() m.obj = pyo.Objective(expr=-m.x2) - m.c1 = pyo.Constraint(expr=(m.x1 - 1) ** 2 + m.x2 ** 2 <= pyo.log(m.y)) - m.c2 = pyo.Constraint(expr=(m.x1 + 1) ** 2 + m.x2 ** 2 <= pyo.log(m.y)) + m.c1 = pyo.Constraint(expr=(m.x1 - 1) ** 2 + m.x2**2 <= pyo.log(m.y)) + m.c2 = pyo.Constraint(expr=(m.x1 + 1) ** 2 + m.x2**2 <= pyo.log(m.y)) complicating_vars_map = pyo.ComponentMap() complicating_vars_map[root.y] = m.y @@ -168,10 +243,12 @@ def create_subproblem(root): root_vars = [m.y] m.benders = BendersCutGenerator() m.benders.set_input(root_vars=root_vars, tol=1e-8) - m.benders.add_subproblem(subproblem_fn=create_subproblem, - subproblem_fn_kwargs={'root': m}, - root_eta=m.eta, - subproblem_solver='ipopt', ) + m.benders.add_subproblem( + subproblem_fn=create_subproblem, + subproblem_fn_kwargs={'root': m}, + root_eta=m.eta, + subproblem_solver='ipopt', + ) opt = pyo.SolverFactory('ipopt') for i in range(30): @@ -190,18 +267,63 @@ class FourScenFarmer(object): def __init__(self): self.crops = ['WHEAT', 'CORN', 'SUGAR_BEETS'] self.total_acreage = 500 - self.PriceQuota = {'WHEAT': 100000.0, 'CORN': 100000.0, 'SUGAR_BEETS': 6000.0} - self.SubQuotaSellingPrice = {'WHEAT': 170.0, 'CORN': 150.0, 'SUGAR_BEETS': 36.0} - self.SuperQuotaSellingPrice = {'WHEAT': 0.0, 'CORN': 0.0, 'SUGAR_BEETS': 10.0} - self.CattleFeedRequirement = {'WHEAT': 200.0, 'CORN': 240.0, 'SUGAR_BEETS': 0.0} - self.PurchasePrice = {'WHEAT': 238.0, 'CORN': 210.0, 'SUGAR_BEETS': 100000.0} - self.PlantingCostPerAcre = {'WHEAT': 150.0, 'CORN': 230.0, 'SUGAR_BEETS': 260.0} - self.scenarios = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario', 'Scenario4'] + self.PriceQuota = { + 'WHEAT': 100000.0, + 'CORN': 100000.0, + 'SUGAR_BEETS': 6000.0, + } + self.SubQuotaSellingPrice = { + 'WHEAT': 170.0, + 'CORN': 150.0, + 'SUGAR_BEETS': 36.0, + } + self.SuperQuotaSellingPrice = { + 'WHEAT': 0.0, + 'CORN': 0.0, + 'SUGAR_BEETS': 10.0, + } + self.CattleFeedRequirement = { + 'WHEAT': 200.0, + 'CORN': 240.0, + 'SUGAR_BEETS': 0.0, + } + self.PurchasePrice = { + 'WHEAT': 238.0, + 'CORN': 210.0, + 'SUGAR_BEETS': 100000.0, + } + self.PlantingCostPerAcre = { + 'WHEAT': 150.0, + 'CORN': 230.0, + 'SUGAR_BEETS': 260.0, + } + self.scenarios = [ + 'BelowAverageScenario', + 'AverageScenario', + 'AboveAverageScenario', + 'Scenario4', + ] self.crop_yield = dict() - self.crop_yield['BelowAverageScenario'] = {'WHEAT': 2.0, 'CORN': 2.4, 'SUGAR_BEETS': 16.0} - self.crop_yield['AverageScenario'] = {'WHEAT': 2.5, 'CORN': 3.0, 'SUGAR_BEETS': 20.0} - self.crop_yield['AboveAverageScenario'] = {'WHEAT': 3.0, 'CORN': 3.6, 'SUGAR_BEETS': 24.0} - self.crop_yield['Scenario4'] = {'WHEAT':2.0, 'CORN':3.0, 'SUGAR_BEETS':24.0} + self.crop_yield['BelowAverageScenario'] = { + 'WHEAT': 2.0, + 'CORN': 2.4, + 'SUGAR_BEETS': 16.0, + } + self.crop_yield['AverageScenario'] = { + 'WHEAT': 2.5, + 'CORN': 3.0, + 'SUGAR_BEETS': 20.0, + } + self.crop_yield['AboveAverageScenario'] = { + 'WHEAT': 3.0, + 'CORN': 3.6, + 'SUGAR_BEETS': 24.0, + } + self.crop_yield['Scenario4'] = { + 'WHEAT': 2.0, + 'CORN': 3.0, + 'SUGAR_BEETS': 24.0, + } self.scenario_probabilities = dict() self.scenario_probabilities['BelowAverageScenario'] = 0.25 self.scenario_probabilities['AverageScenario'] = 0.25 @@ -219,11 +341,17 @@ def create_root(farmer): for s in m.scenarios: m.eta[s].setlb(-432000 * farmer.scenario_probabilities[s]) - m.total_acreage_con = pyo.Constraint(expr=sum(m.devoted_acreage.values()) <= farmer.total_acreage) + m.total_acreage_con = pyo.Constraint( + expr=sum(m.devoted_acreage.values()) <= farmer.total_acreage + ) m.obj = pyo.Objective( - expr=sum(farmer.PlantingCostPerAcre[crop] * m.devoted_acreage[crop] for crop in m.crops) + sum( - m.eta.values())) + expr=sum( + farmer.PlantingCostPerAcre[crop] * m.devoted_acreage[crop] + for crop in m.crops + ) + + sum(m.eta.values()) + ) return m def create_subproblem(root, farmer, scenario): @@ -237,14 +365,25 @@ def create_subproblem(root, farmer, scenario): m.QuantityPurchased = pyo.Var(m.crops, bounds=(0.0, None)) def EnforceCattleFeedRequirement_rule(m, i): - return (farmer.CattleFeedRequirement[i] <= (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + - m.QuantityPurchased[i] - m.QuantitySubQuotaSold[i] - m.QuantitySuperQuotaSold[i]) - - m.EnforceCattleFeedRequirement = pyo.Constraint(m.crops, rule=EnforceCattleFeedRequirement_rule) + return ( + farmer.CattleFeedRequirement[i] + <= (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + + m.QuantityPurchased[i] + - m.QuantitySubQuotaSold[i] + - m.QuantitySuperQuotaSold[i] + ) + + m.EnforceCattleFeedRequirement = pyo.Constraint( + m.crops, rule=EnforceCattleFeedRequirement_rule + ) def LimitAmountSold_rule(m, i): - return m.QuantitySubQuotaSold[i] + m.QuantitySuperQuotaSold[i] - ( - farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) <= 0.0 + return ( + m.QuantitySubQuotaSold[i] + + m.QuantitySuperQuotaSold[i] + - (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) + <= 0.0 + ) m.LimitAmountSold = pyo.Constraint(m.crops, rule=LimitAmountSold_rule) @@ -253,14 +392,27 @@ def EnforceQuotas_rule(m, i): m.EnforceQuotas = pyo.Constraint(m.crops, rule=EnforceQuotas_rule) - obj_expr = sum(farmer.PurchasePrice[crop] * m.QuantityPurchased[crop] for crop in m.crops) - obj_expr -= sum(farmer.SubQuotaSellingPrice[crop] * m.QuantitySubQuotaSold[crop] for crop in m.crops) - obj_expr -= sum(farmer.SuperQuotaSellingPrice[crop] * m.QuantitySuperQuotaSold[crop] for crop in m.crops) - m.obj = pyo.Objective(expr=farmer.scenario_probabilities[scenario] * obj_expr) + obj_expr = sum( + farmer.PurchasePrice[crop] * m.QuantityPurchased[crop] + for crop in m.crops + ) + obj_expr -= sum( + farmer.SubQuotaSellingPrice[crop] * m.QuantitySubQuotaSold[crop] + for crop in m.crops + ) + obj_expr -= sum( + farmer.SuperQuotaSellingPrice[crop] * m.QuantitySuperQuotaSold[crop] + for crop in m.crops + ) + m.obj = pyo.Objective( + expr=farmer.scenario_probabilities[scenario] * obj_expr + ) complicating_vars_map = pyo.ComponentMap() for crop in m.crops: - complicating_vars_map[root.devoted_acreage[crop]] = m.devoted_acreage[crop] + complicating_vars_map[root.devoted_acreage[crop]] = m.devoted_acreage[ + crop + ] return m, complicating_vars_map @@ -274,10 +426,12 @@ def EnforceQuotas_rule(m, i): subproblem_fn_kwargs['root'] = m subproblem_fn_kwargs['farmer'] = farmer subproblem_fn_kwargs['scenario'] = s - m.benders.add_subproblem(subproblem_fn=create_subproblem, - subproblem_fn_kwargs=subproblem_fn_kwargs, - root_eta=m.eta[s], - subproblem_solver='cplex_direct') + m.benders.add_subproblem( + subproblem_fn=create_subproblem, + subproblem_fn_kwargs=subproblem_fn_kwargs, + root_eta=m.eta[s], + subproblem_solver='cplex_direct', + ) opt = pyo.SolverFactory('cplex_direct') for i in range(30): @@ -286,8 +440,6 @@ def EnforceQuotas_rule(m, i): if len(cuts_added) == 0: break - self.assertAlmostEqual(m.devoted_acreage['CORN'].value ,100, 7) + self.assertAlmostEqual(m.devoted_acreage['CORN'].value, 100, 7) self.assertAlmostEqual(m.devoted_acreage['SUGAR_BEETS'].value, 250, 7) self.assertAlmostEqual(m.devoted_acreage['WHEAT'].value, 150, 7) - - diff --git a/pyomo/contrib/community_detection/community_graph.py b/pyomo/contrib/community_detection/community_graph.py index 9ce73079b8f..d4aa7e0b973 100644 --- a/pyomo/contrib/community_detection/community_graph.py +++ b/pyomo/contrib/community_detection/community_graph.py @@ -2,12 +2,17 @@ from pyomo.common.dependencies import networkx as nx from pyomo.core import Constraint, Objective, Var, ComponentMap, SortComponents -from pyomo.core.expr.current import identify_variables +from pyomo.core.expr import identify_variables from pyomo.contrib.community_detection.event_log import _event_log -def generate_model_graph(model, type_of_graph, with_objective=True, weighted_graph=True, - use_only_active_components=True): +def generate_model_graph( + model, + type_of_graph, + with_objective=True, + weighted_graph=True, + use_only_active_components=True, +): """ Creates a networkX graph of nodes and edges based on a Pyomo optimization model @@ -53,44 +58,73 @@ def generate_model_graph(model, type_of_graph, with_objective=True, weighted_gra # without edge weights, because edge weights are not useful for this bipartite graph) edge_set = set() - bipartite_model_graph = nx.Graph() # Initialize NetworkX graph for the bipartite graph - constraint_variable_map = {} # Initialize map of the variables in constraint equations + bipartite_model_graph = ( + nx.Graph() + ) # Initialize NetworkX graph for the bipartite graph + constraint_variable_map = ( + {} + ) # Initialize map of the variables in constraint equations # Make a dict of all the components we need for the NetworkX graph (since we cannot use the components directly # in the NetworkX graph) if with_objective: - component_number_map = ComponentMap((component, number) for number, component in enumerate( - model.component_data_objects(ctype=(Constraint, Var, Objective), active=use_only_active_components, - descend_into=True, - sort=SortComponents.deterministic))) + component_number_map = ComponentMap( + (component, number) + for number, component in enumerate( + model.component_data_objects( + ctype=(Constraint, Var, Objective), + active=use_only_active_components, + descend_into=True, + sort=SortComponents.deterministic, + ) + ) + ) else: - component_number_map = ComponentMap((component, number) for number, component in enumerate( - model.component_data_objects(ctype=(Constraint, Var), active=use_only_active_components, descend_into=True, - sort=SortComponents.deterministic))) + component_number_map = ComponentMap( + (component, number) + for number, component in enumerate( + model.component_data_objects( + ctype=(Constraint, Var), + active=use_only_active_components, + descend_into=True, + sort=SortComponents.deterministic, + ) + ) + ) # Create the reverse of component_number_map, which will be used in detect_communities to convert the node numbers # to their corresponding Pyomo modeling components - number_component_map = dict((number, comp) for comp, number in component_number_map.items()) + number_component_map = dict( + (number, comp) for comp, number in component_number_map.items() + ) # Add the components as nodes to the bipartite graph - bipartite_model_graph.add_nodes_from([node_number for node_number in range(len(component_number_map))]) + bipartite_model_graph.add_nodes_from( + [node_number for node_number in range(len(component_number_map))] + ) # Loop through all constraints in the Pyomo model to determine what edges need to be created - for model_constraint in model.component_data_objects(ctype=Constraint, active=use_only_active_components, - descend_into=True): + for model_constraint in model.component_data_objects( + ctype=Constraint, active=use_only_active_components, descend_into=True + ): numbered_constraint = component_number_map[model_constraint] # Create a list of the variable numbers that occur in the given constraint equation - numbered_variables_in_constraint_equation = [component_number_map[constraint_variable] - for constraint_variable in - identify_variables(model_constraint.body)] + numbered_variables_in_constraint_equation = [ + component_number_map[constraint_variable] + for constraint_variable in identify_variables(model_constraint.body) + ] # Update constraint_variable_map - constraint_variable_map[numbered_constraint] = numbered_variables_in_constraint_equation + constraint_variable_map[ + numbered_constraint + ] = numbered_variables_in_constraint_equation # Create a list of all the edges that need to be created based on the variables in this constraint equation - edges_between_nodes = [(numbered_constraint, numbered_variable_in_constraint) - for numbered_variable_in_constraint in numbered_variables_in_constraint_equation] + edges_between_nodes = [ + (numbered_constraint, numbered_variable_in_constraint) + for numbered_variable_in_constraint in numbered_variables_in_constraint_equation + ] # Update edge_set based on the determined edges between nodes edge_set.update(edges_between_nodes) @@ -98,22 +132,28 @@ def generate_model_graph(model, type_of_graph, with_objective=True, weighted_gra # This if statement will be executed if the user chooses to include the objective function as a node in # the model graph if with_objective: - # Use a loop to account for the possibility of multiple objective functions - for objective_function in model.component_data_objects(ctype=Objective, active=use_only_active_components, - descend_into=True): + for objective_function in model.component_data_objects( + ctype=Objective, active=use_only_active_components, descend_into=True + ): numbered_objective = component_number_map[objective_function] # Create a list of the variable numbers that occur in the given objective function - numbered_variables_in_objective = [component_number_map[objective_variable] - for objective_variable in identify_variables(objective_function)] + numbered_variables_in_objective = [ + component_number_map[objective_variable] + for objective_variable in identify_variables(objective_function) + ] # Update constraint_variable_map - constraint_variable_map[numbered_objective] = numbered_variables_in_objective + constraint_variable_map[ + numbered_objective + ] = numbered_variables_in_objective # Create a list of all the edges that need to be created based on the variables in the objective function - edges_between_nodes = [(numbered_objective, numbered_variable_in_objective) - for numbered_variable_in_objective in numbered_variables_in_objective] + edges_between_nodes = [ + (numbered_objective, numbered_variable_in_objective) + for numbered_variable_in_objective in numbered_variables_in_objective + ] # Update edge_set based on the determined edges between nodes edge_set.update(edges_between_nodes) @@ -122,9 +162,17 @@ def generate_model_graph(model, type_of_graph, with_objective=True, weighted_gra # sorting prevents any unpredictable changes) bipartite_model_graph.add_edges_from(sorted(edge_set)) - if type_of_graph == 'bipartite': # This is the case where the user wants a bipartite graph, which we made above + if ( + type_of_graph == 'bipartite' + ): # This is the case where the user wants a bipartite graph, which we made above # Log important information with the following logger function - _event_log(model, bipartite_model_graph, set(constraint_variable_map), type_of_graph, with_objective) + _event_log( + model, + bipartite_model_graph, + set(constraint_variable_map), + type_of_graph, + with_objective, + ) # Return the bipartite NetworkX graph, the dictionary of node numbers mapped to their respective Pyomo # components, and the map of constraints to the variables they contain @@ -142,9 +190,13 @@ def generate_model_graph(model, type_of_graph, with_objective=True, weighted_gra try: if weighted_graph: - projected_model_graph = nx.bipartite.weighted_projected_graph(bipartite_model_graph, graph_nodes) + projected_model_graph = nx.bipartite.weighted_projected_graph( + bipartite_model_graph, graph_nodes + ) else: - projected_model_graph = nx.bipartite.projected_graph(bipartite_model_graph, graph_nodes) + projected_model_graph = nx.bipartite.projected_graph( + bipartite_model_graph, graph_nodes + ) except nx.exception.NetworkXAlgorithmError: # See Pyomo #2413: networkx now raises exceptions for invalid # projections. This restores the (probably invalid) previous @@ -152,7 +204,13 @@ def generate_model_graph(model, type_of_graph, with_objective=True, weighted_gra projected_model_graph = nx.Graph() # Log important information with the following logger function - _event_log(model, projected_model_graph, set(constraint_variable_map), type_of_graph, with_objective) + _event_log( + model, + projected_model_graph, + set(constraint_variable_map), + type_of_graph, + with_objective, + ) # Return the projected NetworkX graph, the dictionary of node numbers mapped to their respective Pyomo # components, and the map of constraints to the variables they contain diff --git a/pyomo/contrib/community_detection/detection.py b/pyomo/contrib/community_detection/detection.py index 5976a2f3c89..5751f54e9c1 100644 --- a/pyomo/contrib/community_detection/detection.py +++ b/pyomo/contrib/community_detection/detection.py @@ -10,10 +10,17 @@ from logging import getLogger from pyomo.common.dependencies import attempt_import -from pyomo.core import ConcreteModel, ComponentMap, Block, Var, Constraint, Objective, ConstraintList +from pyomo.core import ( + ConcreteModel, + ComponentMap, + Block, + Var, + Constraint, + Objective, + ConstraintList, +) from pyomo.core.base.objective import _GeneralObjectiveData -from pyomo.core.expr.current import identify_variables -from pyomo.core.expr.visitor import replace_expressions +from pyomo.core.expr.visitor import replace_expressions, identify_variables from pyomo.contrib.community_detection.community_graph import generate_model_graph from pyomo.common.dependencies import networkx as nx from pyomo.common.dependencies.matplotlib import pyplot as plt @@ -25,11 +32,19 @@ # Attempt import of louvain community detection package community_louvain, community_louvain_available = attempt_import( - 'community', error_message="Could not import the 'community' library, available via 'python-louvain' on PyPI.") - - -def detect_communities(model, type_of_community_map='constraint', with_objective=True, weighted_graph=True, - random_seed=None, use_only_active_components=True): + 'community', + error_message="Could not import the 'community' library, available via 'python-louvain' on PyPI.", +) + + +def detect_communities( + model, + type_of_community_map='constraint', + with_objective=True, + weighted_graph=True, + random_seed=None, + use_only_active_components=True, +): """ Detects communities in a Pyomo optimization model @@ -70,43 +85,63 @@ def detect_communities(model, type_of_community_map='constraint', with_objective # Check that all arguments are of the correct type if not isinstance(model, ConcreteModel): - raise TypeError("Invalid model: 'model=%s' - model must be an instance of ConcreteModel" % model) + raise TypeError( + "Invalid model: 'model=%s' - model must be an instance of ConcreteModel" + % model + ) if type_of_community_map not in ('bipartite', 'constraint', 'variable'): raise TypeError( "Invalid value for type_of_community_map: 'type_of_community_map=%s' - " - "Valid values: 'bipartite', 'constraint', 'variable'" % type_of_community_map) + "Valid values: 'bipartite', 'constraint', 'variable'" + % type_of_community_map + ) if type(with_objective) != bool: raise TypeError( - "Invalid value for with_objective: 'with_objective=%s' - with_objective must be a Boolean" % with_objective) + "Invalid value for with_objective: 'with_objective=%s' - with_objective must be a Boolean" + % with_objective + ) if type(weighted_graph) != bool: raise TypeError( - "Invalid value for weighted_graph: 'weighted_graph=%s' - weighted_graph must be a Boolean" % weighted_graph) + "Invalid value for weighted_graph: 'weighted_graph=%s' - weighted_graph must be a Boolean" + % weighted_graph + ) if random_seed is not None: if type(random_seed) != int: raise TypeError( "Invalid value for random_seed: 'random_seed=%s' - " - "random_seed must be a non-negative integer" % random_seed) + "random_seed must be a non-negative integer" % random_seed + ) if random_seed < 0: raise ValueError( "Invalid value for random_seed: 'random_seed=%s' - " - "random_seed must be a non-negative integer" % random_seed) + "random_seed must be a non-negative integer" % random_seed + ) - if use_only_active_components is not True and use_only_active_components is not None: + if ( + use_only_active_components is not True + and use_only_active_components is not None + ): raise TypeError( "Invalid value for use_only_active_components: 'use_only_active_components=%s' - " - "use_only_active_components must be True or None" % use_only_active_components) + "use_only_active_components must be True or None" + % use_only_active_components + ) # Generate model_graph (a NetworkX graph based on the given Pyomo optimization model), # number_component_map (a dictionary to convert the communities into lists of Pyomo components # instead of number values), and constraint_variable_map (a dictionary that maps a constraint to the variables # it contains) model_graph, number_component_map, constraint_variable_map = generate_model_graph( - model, type_of_graph=type_of_community_map, with_objective=with_objective, weighted_graph=weighted_graph, - use_only_active_components=use_only_active_components) + model, + type_of_graph=type_of_community_map, + with_objective=with_objective, + weighted_graph=weighted_graph, + use_only_active_components=use_only_active_components, + ) # # TODO - Add option for other community detection package # # Maybe something like this: @@ -115,12 +150,16 @@ def detect_communities(model, type_of_community_map='constraint', with_objective # Use Louvain community detection to find the communities - this returns a dictionary mapping # individual nodes to their communities - partition_of_graph = community_louvain.best_partition(model_graph, random_state=random_seed) + partition_of_graph = community_louvain.best_partition( + model_graph, random_state=random_seed + ) # Now, use partition_of_graph to create a dictionary (community_map) that maps community keys to the nodes # in each community number_of_communities = len(set(partition_of_graph.values())) - community_map = {nth_community: [] for nth_community in range(number_of_communities)} + community_map = { + nth_community: [] for nth_community in range(number_of_communities) + } for node in partition_of_graph: nth_community = partition_of_graph[node] community_map[nth_community].append(node) @@ -156,10 +195,25 @@ def detect_communities(model, type_of_community_map='constraint', with_objective for community_key in community_map: constraint_list = sorted(community_map[community_key]) - variable_list = [constraint_variable_map[numbered_constraint] for numbered_constraint in constraint_list] - variable_list = sorted(set([node for variable_sublist in variable_list for node in variable_sublist])) - variable_list = [number_component_map[variable] for variable in variable_list] - constraint_list = [number_component_map[constraint] for constraint in constraint_list] + variable_list = [ + constraint_variable_map[numbered_constraint] + for numbered_constraint in constraint_list + ] + variable_list = sorted( + set( + [ + node + for variable_sublist in variable_list + for node in variable_sublist + ] + ) + ) + variable_list = [ + number_component_map[variable] for variable in variable_list + ] + constraint_list = [ + number_component_map[constraint] for constraint in constraint_list + ] community_map[community_key] = (constraint_list, variable_list) elif type_of_community_map == 'variable': @@ -170,11 +224,20 @@ def detect_communities(model, type_of_community_map='constraint', with_objective variable_list = sorted(community_map[community_key]) constraint_list = [] for numbered_variable in variable_list: - constraint_list.extend([constraint_key for constraint_key in constraint_variable_map if - numbered_variable in constraint_variable_map[constraint_key]]) + constraint_list.extend( + [ + constraint_key + for constraint_key in constraint_variable_map + if numbered_variable in constraint_variable_map[constraint_key] + ] + ) constraint_list = sorted(set(constraint_list)) - constraint_list = [number_component_map[constraint] for constraint in constraint_list] - variable_list = [number_component_map[variable] for variable in variable_list] + constraint_list = [ + number_component_map[constraint] for constraint in constraint_list + ] + variable_list = [ + number_component_map[variable] for variable in variable_list + ] community_map[community_key] = (constraint_list, variable_list) # Thus, each key in community_map now maps to a tuple of two lists, a constraint list and a variable list (in that @@ -185,14 +248,26 @@ def detect_communities(model, type_of_community_map='constraint', with_objective if number_of_communities == 0: logger.error("in detect_communities: Empty community map was returned") if number_of_communities == 1: - logger.warning("Community detection found that with the given parameters, the model could not be decomposed - " - "only one community was found") + logger.warning( + "Community detection found that with the given parameters, the model could not be decomposed - " + "only one community was found" + ) # Return an instance of CommunityMap class which contains the community_map along with other relevant information # for the community_map - return CommunityMap(community_map, type_of_community_map, with_objective, weighted_graph, random_seed, - use_only_active_components, model, model_graph, number_component_map, constraint_variable_map, - partition_of_graph) + return CommunityMap( + community_map, + type_of_community_map, + with_objective, + weighted_graph, + random_seed, + use_only_active_components, + model, + model_graph, + number_component_map, + constraint_variable_map, + partition_of_graph, + ) class CommunityMap(object): @@ -209,9 +284,20 @@ class CommunityMap(object): visualize_model_graph """ - def __init__(self, community_map, type_of_community_map, with_objective, weighted_graph, random_seed, - use_only_active_components, model, graph, graph_node_mapping, constraint_variable_map, - graph_partition): + def __init__( + self, + community_map, + type_of_community_map, + with_objective, + weighted_graph, + random_seed, + use_only_active_components, + model, + graph, + graph_node_mapping, + constraint_variable_map, + graph_partition, + ): """ Constructor method for the CommunityMap class @@ -280,8 +366,10 @@ def __str__(self): # Create str_community_map and give it values that are the strings of the components in community_map str_community_map = dict() for key in self.community_map: - str_community_map[key] = ([str(component) for component in self.community_map[key][0]], - [str(component) for component in self.community_map[key][1]]) + str_community_map[key] = ( + [str(component) for component in self.community_map[key][0]], + [str(component) for component in self.community_map[key][1]], + ) # Return str_community_map, which is identical to community_map except it has the strings of all of the Pyomo # components instead of the actual components @@ -314,7 +402,9 @@ def values(self): def items(self): return self.community_map.items() - def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=None): + def visualize_model_graph( + self, type_of_graph='constraint', filename=None, pos=None + ): """ This function draws a graph of the communities for a Pyomo model. @@ -346,12 +436,15 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N # Check that all arguments are of the correct type - assert type_of_graph in ('bipartite', 'constraint', 'variable'), \ - "Invalid graph type specified: 'type_of_graph=%s' - Valid values: " \ + assert type_of_graph in ('bipartite', 'constraint', 'variable'), ( + "Invalid graph type specified: 'type_of_graph=%s' - Valid values: " "'bipartite', 'constraint', 'variable'" % type_of_graph + ) - assert isinstance(filename, (type(None), str)), "Invalid value for filename: 'filename=%s' - filename " \ - "must be a string" % filename + assert isinstance(filename, (type(None), str)), ( + "Invalid value for filename: 'filename=%s' - filename " + "must be a string" % filename + ) # No assert statement for pos; the NetworkX function can handle issues with the pos argument @@ -360,18 +453,31 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N if type_of_graph != self.type_of_community_map: # Use the generate_model_graph function to create a NetworkX graph of the given model (along with # number_component_map and constraint_variable_map, which will be used to help with drawing the graph) - model_graph, number_component_map, constraint_variable_map = generate_model_graph( - self.model, type_of_graph=type_of_graph, with_objective=self.with_objective, - weighted_graph=self.weighted_graph, use_only_active_components=self.use_only_active_components) + ( + model_graph, + number_component_map, + constraint_variable_map, + ) = generate_model_graph( + self.model, + type_of_graph=type_of_graph, + with_objective=self.with_objective, + weighted_graph=self.weighted_graph, + use_only_active_components=self.use_only_active_components, + ) else: # This is the case where, as mentioned above, we can use the networkX graph that was made to create # the CommunityMap object - model_graph, number_component_map, constraint_variable_map = self.graph, self.graph_node_mapping, \ - self.constraint_variable_map + model_graph, number_component_map, constraint_variable_map = ( + self.graph, + self.graph_node_mapping, + self.constraint_variable_map, + ) # This line creates the "reverse" of the number_component_map above, since mapping the Pyomo # components to their nodes in the networkX graph is more convenient in this function - component_number_map = ComponentMap((comp, number) for number, comp in number_component_map.items()) + component_number_map = ComponentMap( + (comp, number) for number, comp in number_component_map.items() + ) # Create a deep copy of the community_map attribute to avoid destructively modifying it numbered_community_map = copy.deepcopy(self.community_map) @@ -380,15 +486,25 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N # numbers that correspond to their nodes/edges in the NetworkX graph, model_graph for key in self.community_map: numbered_community_map[key] = ( - [component_number_map[component] for component in self.community_map[key][0]], - [component_number_map[component] for component in self.community_map[key][1]]) + [ + component_number_map[component] + for component in self.community_map[key][0] + ], + [ + component_number_map[component] + for component in self.community_map[key][1] + ], + ) # Based on type_of_graph, which specifies what Pyomo modeling components are to be drawn as nodes in the graph # illustration, we will now get the node list and the color list, which describes how to color nodes # according to their communities (which is based on community_map) if type_of_graph == 'bipartite': - list_of_node_lists = [list_of_nodes for list_tuple in numbered_community_map.values() for list_of_nodes in - list_tuple] + list_of_node_lists = [ + list_of_nodes + for list_tuple in numbered_community_map.values() + for list_of_nodes in list_tuple + ] # list_of_node_lists is (as it implies) a list of lists, so we will use the list comprehension # below to flatten the list and get our one-dimensional node list @@ -402,7 +518,9 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N not_found = True for community_key in numbered_community_map: if not_found and node in ( - numbered_community_map[community_key][0] + numbered_community_map[community_key][1]): + numbered_community_map[community_key][0] + + numbered_community_map[community_key][1] + ): color_list.append(community_key) not_found = False @@ -413,16 +531,21 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N # consistent with the else case top_nodes = nx.bipartite.sets(model_graph)[1] else: - top_nodes = {node for node in model_graph.nodes() if node in constraint_variable_map} + top_nodes = { + node + for node in model_graph.nodes() + if node in constraint_variable_map + } if pos is None: # The case where the user has not provided their own layout pos = nx.bipartite_layout(model_graph, top_nodes) else: # This covers the case that type_of_community_map is 'constraint' or 'variable' - # Constraints are in the first list of the tuples in community map and variables are in the second list position = 0 if type_of_graph == 'constraint' else 1 - list_of_node_lists = list(i[position] for i in numbered_community_map.values()) + list_of_node_lists = list( + i[position] for i in numbered_community_map.values() + ) # list_of_node_lists is (as it implies) a list of lists, so we will use the list comprehension # below to flatten the list and get our one-dimensional node list @@ -435,7 +558,10 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N for node in node_list: not_found = True for community_key in numbered_community_map: - if not_found and node in numbered_community_map[community_key][position]: + if ( + not_found + and node in numbered_community_map[community_key][position] + ): color_list.append(community_key) not_found = False @@ -448,14 +574,23 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N # Create the figure and draw the graph fig = plt.figure() - nx.draw_networkx_nodes(model_graph, pos, nodelist=node_list, node_size=40, cmap=color_map, - node_color=color_list) + nx.draw_networkx_nodes( + model_graph, + pos, + nodelist=node_list, + node_size=40, + cmap=color_map, + node_color=color_list, + ) nx.draw_networkx_edges(model_graph, pos, alpha=0.5) # Make the main title graph_type = type_of_graph.capitalize() community_map_type = self.type_of_community_map.capitalize() - main_graph_title = "%s graph - colored using %s community map" % (graph_type, community_map_type) + main_graph_title = "%s graph - colored using %s community map" % ( + graph_type, + community_map_type, + ) main_font_size = 14 plt.suptitle(main_graph_title, fontsize=main_font_size) @@ -464,7 +599,8 @@ def visualize_model_graph(self, type_of_graph='constraint', filename=None, pos=N subtitle_naming_dict = { 'bipartite': 'Nodes are variables and constraints & Edges are variables in a constraint', 'constraint': 'Nodes are constraints & Edges are common variables', - 'variable': 'Nodes are variables & Edges are shared constraints'} + 'variable': 'Nodes are variables & Edges are shared constraints', + } # Make the subtitle subtitle_font_size = 11 @@ -496,7 +632,9 @@ def generate_structured_model(self): structured_model = ConcreteModel() # Create N blocks (where N is the number of communities found within the model) - structured_model.b = Block([0, len(self.community_map) - 1, 1]) # values given for (start, stop, step) + structured_model.b = Block( + [0, len(self.community_map) - 1, 1] + ) # values given for (start, stop, step) # Initialize a ComponentMap that will map a variable from the model (for example, old_model.x1) used to # create the CommunityMap to a list of variables in various blocks that were created based on this @@ -517,11 +655,15 @@ def generate_structured_model(self): for stored_variable in variables_in_community: # Construct a new_variable whose attributes are determined by querying the variable from the # original model - new_variable = Var(domain=stored_variable.domain, bounds=stored_variable.bounds) + new_variable = Var( + domain=stored_variable.domain, bounds=stored_variable.bounds + ) # Add this new_variable to its block/community and name it using the string of the variable from the # original model - structured_model.b[community_key].add_component(str(stored_variable), new_variable) + structured_model.b[community_key].add_component( + str(stored_variable), new_variable + ) # Since there could be multiple variables 'x1' (such as # structured_model.b[0].x1, structured_model.b[3].x1, etc), we need to create equality constraints @@ -529,8 +671,9 @@ def generate_structured_model(self): # Here we update blocked_variable_map to keep track of what equality constraints need to be made variable_in_new_model = structured_model.find_component(new_variable) - blocked_variable_map[stored_variable] = blocked_variable_map.get(stored_variable, - []) + [variable_in_new_model] + blocked_variable_map[stored_variable] = blocked_variable_map.get( + stored_variable, [] + ) + [variable_in_new_model] # Now that we have all of our variables within the model, we will initialize a dictionary that used to # replace variables within constraints to other variables (in our case, this will convert variables from the @@ -543,10 +686,10 @@ def generate_structured_model(self): # Loop through all of the constraints (from the original model) in the given community for stored_constraint in constraints_in_community: - # Now, loop through all of the variables within the given constraint expression - for variable_in_stored_constraint in identify_variables(stored_constraint.expr): - + for variable_in_stored_constraint in identify_variables( + stored_constraint.expr + ): # Loop through each of the "blocked" variables that a variable is mapped to and update # replace_variables_in_expression_map if a variable has a "blocked" form in the given community @@ -555,48 +698,77 @@ def generate_structured_model(self): # blocked versions of the variable x1 exist (which depends on the community map)) variable_in_current_block = False - for blocked_variable in blocked_variable_map[variable_in_stored_constraint]: + for blocked_variable in blocked_variable_map[ + variable_in_stored_constraint + ]: if 'b[%d]' % community_key in str(blocked_variable): # Update replace_variables_in_expression_map accordingly - replace_variables_in_expression_map[id(variable_in_stored_constraint)] = blocked_variable + replace_variables_in_expression_map[ + id(variable_in_stored_constraint) + ] = blocked_variable variable_in_current_block = True if not variable_in_current_block: # Create a version of the given variable outside of blocks then add it to # replace_variables_in_expression_map - new_variable = Var(domain=variable_in_stored_constraint.domain, - bounds=variable_in_stored_constraint.bounds) + new_variable = Var( + domain=variable_in_stored_constraint.domain, + bounds=variable_in_stored_constraint.bounds, + ) # Add the new variable just as we did above (but now it is not in any blocks) - structured_model.add_component(str(variable_in_stored_constraint), new_variable) + structured_model.add_component( + str(variable_in_stored_constraint), new_variable + ) # Update blocked_variable_map to keep track of what equality constraints need to be made - variable_in_new_model = structured_model.find_component(new_variable) - blocked_variable_map[variable_in_stored_constraint] = blocked_variable_map.get( - variable_in_stored_constraint, []) + [variable_in_new_model] + variable_in_new_model = structured_model.find_component( + new_variable + ) + blocked_variable_map[ + variable_in_stored_constraint + ] = blocked_variable_map.get( + variable_in_stored_constraint, [] + ) + [ + variable_in_new_model + ] # Update replace_variables_in_expression_map accordingly - replace_variables_in_expression_map[id(variable_in_stored_constraint)] = variable_in_new_model + replace_variables_in_expression_map[ + id(variable_in_stored_constraint) + ] = variable_in_new_model # TODO - Is there a better way to check whether something is actually an objective? (as done below) # Check to see whether 'stored_constraint' is actually an objective (since constraints and objectives # grouped together) - if self.with_objective and isinstance(stored_constraint, (_GeneralObjectiveData, Objective)): + if self.with_objective and isinstance( + stored_constraint, (_GeneralObjectiveData, Objective) + ): # If the constraint is actually an objective, we add it to the block as an objective new_objective = Objective( - expr=replace_expressions(stored_constraint.expr, replace_variables_in_expression_map)) - structured_model.b[community_key].add_component(str(stored_constraint), new_objective) + expr=replace_expressions( + stored_constraint.expr, replace_variables_in_expression_map + ) + ) + structured_model.b[community_key].add_component( + str(stored_constraint), new_objective + ) else: # Construct a constraint based on the expression within stored_constraint and the dict we have # created for the purpose of replacing the variables within the constraint expression new_constraint = Constraint( - expr=replace_expressions(stored_constraint.expr, replace_variables_in_expression_map)) + expr=replace_expressions( + stored_constraint.expr, replace_variables_in_expression_map + ) + ) # Add this new constraint to the corresponding community/block with its name as the string of the # constraint from the original model - structured_model.b[community_key].add_component(str(stored_constraint), new_constraint) + structured_model.b[community_key].add_component( + str(stored_constraint), new_constraint + ) # If with_objective was set to False, that means we might have missed an objective function within the # original model @@ -604,45 +776,68 @@ def generate_structured_model(self): # Construct a new dictionary for replacing the variables (replace_variables_in_objective_map) which will # be specific to the variables in the objective function, since there is the possibility that the # objective contains variables we have not yet seen (and thus not yet added to our new model) - for objective_function in self.model.component_data_objects(ctype=Objective, - active=self.use_only_active_components, - descend_into=True): - + for objective_function in self.model.component_data_objects( + ctype=Objective, + active=self.use_only_active_components, + descend_into=True, + ): for variable_in_objective in identify_variables(objective_function): # Add all of the variables in the objective function (not within any blocks) # Check to make sure a form of the variable has not already been made outside of the blocks - if structured_model.find_component(str(variable_in_objective)) is None: - - new_variable = Var(domain=variable_in_objective.domain, bounds=variable_in_objective.bounds) - structured_model.add_component(str(variable_in_objective), new_variable) + if ( + structured_model.find_component(str(variable_in_objective)) + is None + ): + new_variable = Var( + domain=variable_in_objective.domain, + bounds=variable_in_objective.bounds, + ) + structured_model.add_component( + str(variable_in_objective), new_variable + ) # Again we update blocked_variable_map to keep track of what # equality constraints need to be made - variable_in_new_model = structured_model.find_component(new_variable) - blocked_variable_map[variable_in_objective] = blocked_variable_map.get( - variable_in_objective, []) + [variable_in_new_model] + variable_in_new_model = structured_model.find_component( + new_variable + ) + blocked_variable_map[ + variable_in_objective + ] = blocked_variable_map.get(variable_in_objective, []) + [ + variable_in_new_model + ] # Update the dictionary that we will use to replace the variables - replace_variables_in_expression_map[id(variable_in_objective)] = variable_in_new_model + replace_variables_in_expression_map[ + id(variable_in_objective) + ] = variable_in_new_model else: - for version_of_variable in blocked_variable_map[variable_in_objective]: + for version_of_variable in blocked_variable_map[ + variable_in_objective + ]: if 'b[' not in str(version_of_variable): - replace_variables_in_expression_map[id(variable_in_objective)] = version_of_variable + replace_variables_in_expression_map[ + id(variable_in_objective) + ] = version_of_variable # Now we will construct a new objective function based on the one from the original model and then # add it to the new model just as we have done before new_objective = Objective( - expr=replace_expressions(objective_function.expr, replace_variables_in_expression_map)) + expr=replace_expressions( + objective_function.expr, replace_variables_in_expression_map + ) + ) structured_model.add_component(str(objective_function), new_objective) # Now, we need to create equality constraints for all of the different "versions" of a variable (such # as x1, b[0].x1, b[2].x2, etc.) # Create a constraint list for the equality constraints - structured_model.equality_constraint_list = ConstraintList(doc="Equality Constraints for the different " - "forms of a given variable") + structured_model.equality_constraint_list = ConstraintList( + doc="Equality Constraints for the different forms of a given variable" + ) # Loop through blocked_variable_map and create constraints accordingly for variable, duplicate_variables in blocked_variable_map.items(): @@ -654,7 +849,9 @@ def generate_structured_model(self): # Loop through the list of two-variable tuples and create an equality constraint for those two variables for variable_1, variable_2 in equalities_to_make: - structured_model.equality_constraint_list.add(expr=variable_1 == variable_2) + structured_model.equality_constraint_list.add( + expr=variable_1 == variable_2 + ) # Return 'structured_model', which is essentially identical to the original model but now has all of the # variables, constraints, and objectives placed into blocks based on the nature of the CommunityMap diff --git a/pyomo/contrib/community_detection/event_log.py b/pyomo/contrib/community_detection/event_log.py index 5a0512b58f3..30e28257de8 100644 --- a/pyomo/contrib/community_detection/event_log.py +++ b/pyomo/contrib/community_detection/event_log.py @@ -39,15 +39,36 @@ def _event_log(model, model_graph, constraint_set, type_of_graph, with_objective """ # Collect some information that will be useful for the logger - all_variables_count = len(list(model.component_data_objects(ctype=Var, descend_into=True))) - - active_constraints_count = len(list(model.component_data_objects(ctype=Constraint, active=True, descend_into=True))) - all_constraints_count = len(list(model.component_data_objects(ctype=Constraint, descend_into=True))) - - active_objectives_count = len(list(model.component_data_objects(ctype=Objective, active=True, descend_into=True))) - all_objectives_count = len(list(model.component_data_objects(ctype=Objective, descend_into=True))) - - number_of_nodes, number_of_edges = model_graph.number_of_nodes(), model_graph.number_of_edges() + all_variables_count = len( + list(model.component_data_objects(ctype=Var, descend_into=True)) + ) + + active_constraints_count = len( + list( + model.component_data_objects( + ctype=Constraint, active=True, descend_into=True + ) + ) + ) + all_constraints_count = len( + list(model.component_data_objects(ctype=Constraint, descend_into=True)) + ) + + active_objectives_count = len( + list( + model.component_data_objects( + ctype=Objective, active=True, descend_into=True + ) + ) + ) + all_objectives_count = len( + list(model.component_data_objects(ctype=Objective, descend_into=True)) + ) + + number_of_nodes, number_of_edges = ( + model_graph.number_of_nodes(), + model_graph.number_of_edges(), + ) # Log this information as info logger.info("%s variables found in the model" % all_variables_count) @@ -86,29 +107,46 @@ def _event_log(model, model_graph, constraint_set, type_of_graph, with_objective else: # If the graph is not connected then we must construct the constraint node set # and variable node set manually - constraint_nodes = {node for node in model_graph.nodes() if node in constraint_set} + constraint_nodes = { + node for node in model_graph.nodes() if node in constraint_set + } variable_nodes = set(model_graph) - constraint_nodes - constraint_density = round(nx.bipartite.density(model_graph, constraint_nodes), 2) - variable_density = round(nx.bipartite.density(model_graph, variable_nodes), 2) + constraint_density = round( + nx.bipartite.density(model_graph, constraint_nodes), 2 + ) + variable_density = round( + nx.bipartite.density(model_graph, variable_nodes), 2 + ) - if constraint_density == variable_density == 1.0: # If the graph is complete, both will equal 1 - logger.info("The bipartite graph constructed from the model is complete (graph density equals 1)") + if ( + constraint_density == variable_density == 1.0 + ): # If the graph is complete, both will equal 1 + logger.info( + "The bipartite graph constructed from the model is complete (graph density equals 1)" + ) else: logger.info( - "For the bipartite graph constructed from the model, the density for constraint nodes is %s" % - constraint_density) + "For the bipartite graph constructed from the model, the density for constraint nodes is %s" + % constraint_density + ) logger.info( - "For the bipartite graph constructed from the model, the density for variable nodes is %s" % - variable_density) + "For the bipartite graph constructed from the model, the density for variable nodes is %s" + % variable_density + ) else: graph_density = round(nx.density(model_graph), 2) if graph_density == 1.0: - logger.info("The graph constructed from the model is complete (graph density equals 1)") + logger.info( + "The graph constructed from the model is complete (graph density equals 1)" + ) else: - logger.info("The graph constructed from the model has a density of %s" % graph_density) + logger.info( + "The graph constructed from the model has a density of %s" + % graph_density + ) # Given one of the conditionals below, we will log this information as a warning if all_variables_count == 0: @@ -121,16 +159,24 @@ def _event_log(model, model_graph, constraint_set, type_of_graph, with_objective if all_objectives_count == 0: if with_objective: - logger.info("Parameter 'with_objective' is True but no objective(s) found in the model") + logger.info( + "Parameter 'with_objective' is True but no objective(s) found in the model" + ) else: logger.info("No objective(s) found in the model") elif active_objectives_count == 0: if with_objective: - logger.info("Parameter 'with_objective' is True but no active objective(s) found in the model") + logger.info( + "Parameter 'with_objective' is True but no active objective(s) found in the model" + ) else: logger.info("No active objective(s) found in the model") if number_of_nodes == 0: - logger.warning("No nodes were created for the graph (based on the model and the given parameters)") + logger.warning( + "No nodes were created for the graph (based on the model and the given parameters)" + ) if number_of_edges == 0: - logger.warning("No edges were created for the graph (based on the model and the given parameters)") + logger.warning( + "No edges were created for the graph (based on the model and the given parameters)" + ) diff --git a/pyomo/contrib/community_detection/tests/test_detection.py b/pyomo/contrib/community_detection/tests/test_detection.py index 6c41ae92013..724388f9ab6 100644 --- a/pyomo/contrib/community_detection/tests/test_detection.py +++ b/pyomo/contrib/community_detection/tests/test_detection.py @@ -23,16 +23,23 @@ from pyomo.common.log import LoggingIntercept from pyomo.common.tempfiles import TempfileManager from pyomo.environ import ( - ConcreteModel, Constraint, Objective, Var, Integers, minimize, - RangeSet, Block, ConstraintList, + ConcreteModel, + Constraint, + Objective, + Var, + Integers, + minimize, + RangeSet, + Block, + ConstraintList, ) from pyomo.contrib.community_detection.detection import ( - detect_communities, CommunityMap, - community_louvain_available, community_louvain -) -from pyomo.contrib.community_detection.community_graph import ( - generate_model_graph, + detect_communities, + CommunityMap, + community_louvain_available, + community_louvain, ) +from pyomo.contrib.community_detection.community_graph import generate_model_graph from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded from pyomo.solvers.tests.models.QP_simple import QP_simple @@ -40,10 +47,12 @@ from pyomo.solvers.tests.models.SOS1_simple import SOS1_simple -@unittest.skipUnless(community_louvain_available, "'community' package from 'python-louvain' is not available.") +@unittest.skipUnless( + community_louvain_available, + "'community' package from 'python-louvain' is not available.", +) @unittest.skipUnless(networkx_available, "networkx is not available.") class TestDecomposition(unittest.TestCase): - def test_communities_1(self): m_class = LP_inactive_index() m_class._generate_model() @@ -51,28 +60,96 @@ def test_communities_1(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, - {3: 0, 4: 1, 5: 0, 6: 2, 7: 2, 8: 0, 9: 0, 10: 0, 11: 2, 12: 1, 13: 1, 14: 1}, - {3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, - {3: 0, 4: 1, 5: 0, 6: 0, 7: 0, 8: 2, 9: 2, 10: 2, 11: 0, 12: 1, 13: 1, 14: 1}, - {3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, {3: 0, 4: 1, 5: 0, 6: 0, 7: 1, 8: 0}, - {3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, {3: 0, 4: 1, 5: 0, 6: 0, 7: 1, 8: 0}, - {0: 0, 1: 1, 2: 2}, {0: 0, 1: 0, 2: 0}, {0: 0, 1: 1, 2: 2}, {0: 0, 1: 0, 2: 0}, - {0: 0, 1: 1, 2: 2}, {0: 0, 1: 0, 2: 0}, {0: 0, 1: 1, 2: 2}, {0: 0, 1: 0, 2: 0}, - {0: 0, 1: 1, 2: 2, 3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, - {0: 0, 1: 1, 2: 2, 3: 1, 4: 2, 5: 0, 6: 0, 7: 0, 8: 1, 9: 1, 10: 1, 11: 0, - 12: 2, 13: 2, 14: 2}, - {0: 0, 1: 1, 2: 2, 3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, - {0: 0, 1: 1, 2: 2, 3: 1, 4: 2, 5: 0, 6: 0, 7: 0, 8: 1, 9: 1, 10: 1, 11: 0, - 12: 2, 13: 2, 14: 2}, - {0: 0, 1: 1, 2: 2, 3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, - {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1, 6: 1, 7: 0, 8: 2}, - {0: 0, 1: 1, 2: 2, 3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, - {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1, 6: 1, 7: 0, 8: 2}] + correct_partitions = [ + {3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, + { + 3: 0, + 4: 1, + 5: 0, + 6: 2, + 7: 2, + 8: 0, + 9: 0, + 10: 0, + 11: 2, + 12: 1, + 13: 1, + 14: 1, + }, + {3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, + { + 3: 0, + 4: 1, + 5: 0, + 6: 0, + 7: 0, + 8: 2, + 9: 2, + 10: 2, + 11: 0, + 12: 1, + 13: 1, + 14: 1, + }, + {3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, + {3: 0, 4: 1, 5: 0, 6: 0, 7: 1, 8: 0}, + {3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, + {3: 0, 4: 1, 5: 0, 6: 0, 7: 1, 8: 0}, + {0: 0, 1: 1, 2: 2}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 2}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 2}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 2}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, + { + 0: 0, + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 0, + 6: 0, + 7: 0, + 8: 1, + 9: 1, + 10: 1, + 11: 0, + 12: 2, + 13: 2, + 14: 2, + }, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 0, 5: 1, 6: 1, 7: 1, 8: 0, 9: 2, 10: 2, 11: 2}, + { + 0: 0, + 1: 1, + 2: 2, + 3: 1, + 4: 2, + 5: 0, + 6: 0, + 7: 0, + 8: 1, + 9: 1, + 10: 1, + 11: 0, + 12: 2, + 13: 2, + 14: 2, + }, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, + {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1, 6: 1, 7: 0, 8: 2}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 1, 5: 1, 6: 0, 7: 2}, + {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1, 6: 1, 7: 0, 8: 2}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] correct_community_maps = [ "{0: (['c1[1]', 'c1[2]', 'c2[2]'], ['x']), 1: (['c1[3]', 'c1[4]', 'c2[1]'], ['y']), " @@ -110,13 +187,18 @@ def test_communities_1(self): "{0: (['c1[2]', 'c2[2]'], ['x']), 1: (['c1[3]', 'c2[1]'], ['y']), 2: (['B[2].c'], ['z'])}", "{0: (['c1[2]', 'c2[2]'], ['x']), 1: (['c1[3]', 'c2[1]'], ['y']), 2: (['obj[2]', 'B[2].c'], ['z'])}", "{0: (['c1[2]', 'c2[2]'], ['x']), 1: (['c1[3]', 'c2[1]'], ['y']), 2: (['B[2].c'], ['z'])}", - "{0: (['c1[2]', 'c2[2]'], ['x']), 1: (['c1[3]', 'c2[1]'], ['y']), 2: (['obj[2]', 'B[2].c'], ['z'])}"] + "{0: (['c1[2]', 'c2[2]'], ['x']), 1: (['c1[3]', 'c2[1]'], ['y']), 2: (['obj[2]', 'B[2].c'], ['z'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -128,43 +210,75 @@ def test_communities_2(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{2: 0, 3: 0}, {2: 0, 3: 0, 4: 0, 5: 0}, {2: 0, 3: 0}, {2: 0, 3: 0, 4: 0, 5: 0}, - {2: 0, 3: 0}, {2: 0, 3: 0, 4: 0}, {2: 0, 3: 0}, {2: 0, 3: 0, 4: 0}, {0: 0, 1: 0}, - {0: 0, 1: 0}, {0: 0, 1: 0}, {0: 0, 1: 0}, {0: 0, 1: 0}, {0: 0, 1: 0}, {0: 0, 1: 0}, - {0: 0, 1: 0}, {0: 0, 1: 1, 2: 1, 3: 0}, {0: 0, 1: 1, 2: 1, 3: 0, 4: 1, 5: 0}, - {0: 0, 1: 1, 2: 1, 3: 0}, {0: 0, 1: 1, 2: 1, 3: 0, 4: 1, 5: 0}, {0: 0, 1: 1, 2: 1, 3: 0}, - {0: 0, 1: 1, 2: 1, 3: 1, 4: 0}, {0: 0, 1: 1, 2: 1, 3: 0}, {0: 0, 1: 1, 2: 1, 3: 1, 4: 0}] + correct_partitions = [ + {2: 0, 3: 0}, + {2: 0, 3: 0, 4: 0, 5: 0}, + {2: 0, 3: 0}, + {2: 0, 3: 0, 4: 0, 5: 0}, + {2: 0, 3: 0}, + {2: 0, 3: 0, 4: 0}, + {2: 0, 3: 0}, + {2: 0, 3: 0, 4: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 0}, + {0: 0, 1: 1, 2: 1, 3: 0}, + {0: 0, 1: 1, 2: 1, 3: 0, 4: 1, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 0}, + {0: 0, 1: 1, 2: 1, 3: 0, 4: 1, 5: 0}, + {0: 0, 1: 1, 2: 1, 3: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 0}, + {0: 0, 1: 1, 2: 1, 3: 0}, + {0: 0, 1: 1, 2: 1, 3: 1, 4: 0}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] - - correct_community_maps = ["{0: (['c1', 'c2'], ['x', 'y'])}", - "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c1', 'c2'], ['x', 'y'])}", - "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c1', 'c2'], ['x', 'y'])}", "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c1', 'c2'], ['x', 'y'])}", "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c1', 'c2'], ['x', 'y'])}", - "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c1', 'c2'], ['x', 'y'])}", - "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c1', 'c2'], ['x', 'y'])}", "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c1', 'c2'], ['x', 'y'])}", "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", - "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", - "{0: (['obj', 'c2'], ['x']), 1: (['inactive_obj', 'c1'], ['y'])}", - "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", - "{0: (['obj', 'c2'], ['x']), 1: (['inactive_obj', 'c1'], ['y'])}", - "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", - "{0: (['c2'], ['x']), 1: (['obj', 'c1'], ['y'])}", - "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", - "{0: (['c2'], ['x']), 1: (['obj', 'c1'], ['y'])}"] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] + + correct_community_maps = [ + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['inactive_obj', 'obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c1', 'c2'], ['x', 'y'])}", + "{0: (['obj', 'c1', 'c2'], ['x', 'y'])}", + "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", + "{0: (['obj', 'c2'], ['x']), 1: (['inactive_obj', 'c1'], ['y'])}", + "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", + "{0: (['obj', 'c2'], ['x']), 1: (['inactive_obj', 'c1'], ['y'])}", + "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", + "{0: (['c2'], ['x']), 1: (['obj', 'c1'], ['y'])}", + "{0: (['c2'], ['x']), 1: (['c1'], ['y'])}", + "{0: (['c2'], ['x']), 1: (['obj', 'c1'], ['y'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -176,31 +290,75 @@ def test_communities_3(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{}, {2: 0}, {}, {2: 0}, {}, {2: 0}, {}, {2: 0}, {0: 0, 1: 1}, {0: 0, 1: 0}, {0: 0, 1: 1}, - {0: 0, 1: 0}, {0: 0, 1: 1}, {0: 0, 1: 0}, {0: 0, 1: 1}, {0: 0, 1: 0}, {0: 0, 1: 1}, - {0: 0, 1: 0, 2: 0}, {0: 0, 1: 1}, {0: 0, 1: 0, 2: 0}, {0: 0, 1: 1}, {0: 0, 1: 0, 2: 0}, - {0: 0, 1: 1}, {0: 0, 1: 0, 2: 0}] + correct_partitions = [ + {}, + {2: 0}, + {}, + {2: 0}, + {}, + {2: 0}, + {}, + {2: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1}, + {0: 0, 1: 0, 2: 0}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] - - correct_community_maps = ['{}', "{0: (['o'], ['x', 'y'])}", '{}', "{0: (['o'], ['x', 'y'])}", '{}', - "{0: (['o'], ['x', 'y'])}", '{}', "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}", - "{0: ([], ['x']), 1: ([], ['y'])}", "{0: (['o'], ['x', 'y'])}"] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] + + correct_community_maps = [ + '{}', + "{0: (['o'], ['x', 'y'])}", + '{}', + "{0: (['o'], ['x', 'y'])}", + '{}', + "{0: (['o'], ['x', 'y'])}", + '{}', + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + "{0: ([], ['x']), 1: ([], ['y'])}", + "{0: (['o'], ['x', 'y'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -212,52 +370,75 @@ def test_communities_4(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{3: 0, 4: 1, 5: 0}, {3: 0, 4: 1, 5: 0, 6: 1}, {3: 0, 4: 1, 5: 0}, - {3: 0, 4: 0, 5: 0, 6: 0}, {3: 0, 4: 1, 5: 0}, - {3: 0, 4: 1, 5: 0, 6: 1}, {3: 0, 4: 1, 5: 0}, {3: 0, 4: 0, 5: 0, 6: 0}, - {0: 0, 1: 1, 2: 1}, {0: 0, 1: 0, 2: 0}, - {0: 0, 1: 1, 2: 1}, {0: 0, 1: 0, 2: 0}, {0: 0, 1: 1, 2: 1}, {0: 0, 1: 0, 2: 0}, - {0: 0, 1: 1, 2: 1}, - {0: 0, 1: 0, 2: 0}, {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, - {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}, - {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}, - {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}, - {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}] + correct_partitions = [ + {3: 0, 4: 1, 5: 0}, + {3: 0, 4: 1, 5: 0, 6: 1}, + {3: 0, 4: 1, 5: 0}, + {3: 0, 4: 0, 5: 0, 6: 0}, + {3: 0, 4: 1, 5: 0}, + {3: 0, 4: 1, 5: 0, 6: 1}, + {3: 0, 4: 1, 5: 0}, + {3: 0, 4: 0, 5: 0, 6: 0}, + {0: 0, 1: 1, 2: 1}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 1}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 1}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 1}, + {0: 0, 1: 0, 2: 0}, + {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}, + {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}, + {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}, + {0: 0, 1: 1, 2: 2, 3: 2, 4: 0, 5: 1}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 2, 5: 0, 6: 1}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] - - correct_community_maps = ["{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", - "{0: (['obj', 'c2'], ['x', 'y[1]', 'y[2]']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", - "{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", - "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", - "{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", - "{0: (['obj', 'c2'], ['x', 'y[1]', 'y[2]']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", - "{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", - "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", - "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", - "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", - "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", - "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", - "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", - "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", - "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", - "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", - "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}"] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] + + correct_community_maps = [ + "{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", + "{0: (['obj', 'c2'], ['x', 'y[1]', 'y[2]']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", + "{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", + "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", + "{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", + "{0: (['obj', 'c2'], ['x', 'y[1]', 'y[2]']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", + "{0: (['c1', 'c4'], ['y[1]', 'y[2]']), 1: (['c2'], ['x'])}", + "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", + "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", + "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", + "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c1', 'c4'], ['y[1]', 'y[2]'])}", + "{0: (['obj', 'c1', 'c2', 'c4'], ['x', 'y[1]', 'y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + "{0: (['c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + "{0: (['obj', 'c2'], ['x']), 1: (['c4'], ['y[1]']), 2: (['c1'], ['y[2]'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -267,26 +448,38 @@ def test_communities_5(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, - {6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, - {6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, - {6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, - {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, - {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, - {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}, - {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, - {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}] + correct_partitions = [ + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}, + {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0}, + {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 4, 9: 0, 10: 3, 11: 5}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] correct_community_maps = [ "{0: (['c1', 'c2', 'c3', 'c4', 'c5'], ['i1', 'i2', 'i3', 'i4', 'i5', 'i6'])}", @@ -316,13 +509,18 @@ def test_communities_5(self): "3: (['c4'], ['i4']), 4: (['c2'], ['i5']), 5: (['c5'], ['i6'])}", "{0: (['c1', 'c2', 'c3', 'c4', 'c5'], ['i1', 'i2', 'i3', 'i4', 'i5', 'i6'])}", "{0: (['c3'], ['i1']), 1: (['obj'], ['i2']), 2: (['c1'], ['i3']), " - "3: (['c4'], ['i4']), 4: (['c2'], ['i5']), 5: (['c5'], ['i6'])}"] + "3: (['c4'], ['i4']), 4: (['c2'], ['i5']), 5: (['c5'], ['i6'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -332,50 +530,75 @@ def test_communities_6(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{4: 0, 5: 1}, {4: 0, 5: 0, 6: 1}, {4: 0, 5: 1}, {4: 0, 5: 0, 6: 1}, {4: 0, 5: 1}, - {4: 0, 5: 0, 6: 1}, {4: 0, 5: 1}, {4: 0, 5: 0, 6: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, - {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, - {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, - {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}, {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}, {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}, {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}] + correct_partitions = [ + {4: 0, 5: 1}, + {4: 0, 5: 0, 6: 1}, + {4: 0, 5: 1}, + {4: 0, 5: 0, 6: 1}, + {4: 0, 5: 1}, + {4: 0, 5: 0, 6: 1}, + {4: 0, 5: 1}, + {4: 0, 5: 0, 6: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] - - correct_community_maps = ["{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", - "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}"] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] + + correct_community_maps = [ + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + "{0: (['obj', 'c1'], ['x1', 'x2']), 1: (['c2'], ['x3', 'x4'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -385,43 +608,75 @@ def test_communities_7(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{2: 0}, {2: 0, 3: 1, 4: 1}, {2: 0}, {2: 0, 3: 1, 4: 1}, {2: 0}, {2: 0, 3: 1, 4: 1}, - {2: 0}, {2: 0, 3: 1, 4: 1}, {0: 0, 1: 1}, {0: 0, 1: 1}, {0: 0, 1: 1}, {0: 0, 1: 1}, - {0: 0, 1: 1}, {0: 0, 1: 1}, {0: 0, 1: 1}, {0: 0, 1: 1}, {0: 0, 1: 1, 2: 0}, - {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}, {0: 0, 1: 1, 2: 0}, {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}, - {0: 0, 1: 1, 2: 0}, {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}, {0: 0, 1: 1, 2: 0}, - {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}] + correct_partitions = [ + {2: 0}, + {2: 0, 3: 1, 4: 1}, + {2: 0}, + {2: 0, 3: 1, 4: 1}, + {2: 0}, + {2: 0, 3: 1, 4: 1}, + {2: 0}, + {2: 0, 3: 1, 4: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1}, + {0: 0, 1: 1, 2: 0}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}, + {0: 0, 1: 1, 2: 0}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}, + {0: 0, 1: 1, 2: 0}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}, + {0: 0, 1: 1, 2: 0}, + {0: 0, 1: 1, 2: 2, 3: 0, 4: 0}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] - - correct_community_maps = ["{0: (['c1'], ['x1'])}", "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", - "{0: (['c1'], ['x1'])}", "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", - "{0: (['c1'], ['x1'])}", "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", - "{0: (['c1'], ['x1'])}", "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}", - "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", - "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}"] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] + + correct_community_maps = [ + "{0: (['c1'], ['x1'])}", + "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", + "{0: (['c1'], ['x1'])}", + "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", + "{0: (['c1'], ['x1'])}", + "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", + "{0: (['c1'], ['x1'])}", + "{0: (['OBJ'], []), 1: (['obj', 'c1'], ['x1'])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}", + "{0: (['c1'], ['x1']), 1: ([], ['x2'])}", + "{0: (['obj', 'c1'], ['x1']), 1: ([], ['x2']), 2: (['OBJ'], [])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -431,55 +686,75 @@ def test_decode_1(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, - {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, - {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1}, - {0: 0, 1: 0, 2: 1, 3: 1}, {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, - {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}] + correct_partitions = [ + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + {0: 0, 1: 0, 2: 1, 3: 1, 4: 0, 5: 0, 6: 1, 7: 1, 8: 1}, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] - - correct_community_maps = ["{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", - "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}"] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] + + correct_community_maps = [ + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x2', 'x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2', 'c3'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + "{0: (['c1', 'c2'], ['x1', 'x2']), 1: (['c3', 'c4', 'c5'], ['x3', 'x4'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -489,26 +764,150 @@ def test_decode_2(self): test_community_maps, test_partitions = _collect_community_maps(model) - correct_partitions = [{7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, {7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, - {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, {7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, - {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, - {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1, 7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}] + correct_partitions = [ + {7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, + {7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, + {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, + {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, + {7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, + {7: 0, 8: 0, 9: 0, 10: 1, 11: 1, 12: 1}, + {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, + {7: 0, 8: 0, 9: 1, 10: 1, 11: 1, 12: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + {0: 0, 1: 0, 2: 0, 3: 1, 4: 1, 5: 1, 6: 1}, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + { + 0: 0, + 1: 0, + 2: 0, + 3: 1, + 4: 1, + 5: 1, + 6: 1, + 7: 0, + 8: 0, + 9: 1, + 10: 1, + 11: 1, + 12: 1, + }, + ] if correct_partitions == test_partitions: # Convert test_community_maps to a string because memory locations may vary by OS and PC - str_test_community_maps = [str(community_map) for community_map in test_community_maps] + str_test_community_maps = [ + str(community_map) for community_map in test_community_maps + ] correct_community_maps = [ "{0: (['c1', 'c2', 'c3'], ['x[1]', 'x[2]', 'x[3]', 'x[4]', 'x[5]']), " @@ -558,13 +957,18 @@ def test_decode_2(self): "{0: (['c1', 'c2'], ['x[1]', 'x[2]', 'x[3]']), " "1: (['c3', 'c4', 'c5', 'c6'], ['x[4]', 'x[5]', 'x[6]', 'x[7]'])}", "{0: (['c1', 'c2'], ['x[1]', 'x[2]', 'x[3]']), " - "1: (['c3', 'c4', 'c5', 'c6'], ['x[4]', 'x[5]', 'x[6]', 'x[7]'])}"] + "1: (['c3', 'c4', 'c5', 'c6'], ['x[4]', 'x[5]', 'x[6]', 'x[7]'])}", + ] self.assertEqual(correct_community_maps, str_test_community_maps) # Partition-based diagnostic test - correct_num_communities, correct_num_nodes, test_num_communities, test_num_nodes = \ - _collect_partition_dependent_tests(test_community_maps, test_partitions) + ( + correct_num_communities, + correct_num_nodes, + test_num_communities, + test_num_nodes, + ) = _collect_partition_dependent_tests(test_community_maps, test_partitions) self.assertEqual(correct_num_communities, test_num_communities) self.assertEqual(correct_num_nodes, test_num_nodes) @@ -572,54 +976,85 @@ def test_decode_2(self): def test_communities_8(self): output = StringIO() - with LoggingIntercept(output, 'pyomo.contrib.community_detection', logging.ERROR): + with LoggingIntercept( + output, 'pyomo.contrib.community_detection', logging.ERROR + ): detect_communities(ConcreteModel()) - self.assertIn('in detect_communities: Empty community map was returned', output.getvalue()) + self.assertIn( + 'in detect_communities: Empty community map was returned', output.getvalue() + ) - with LoggingIntercept(output, 'pyomo.contrib.community_detection', logging.WARNING): + with LoggingIntercept( + output, 'pyomo.contrib.community_detection', logging.WARNING + ): detect_communities(one_community_model()) - self.assertIn("Community detection found that with the given parameters, the model could not be decomposed - " - "only one community was found", output.getvalue()) + self.assertIn( + "Community detection found that with the given parameters, the model could not be decomposed - " + "only one community was found", + output.getvalue(), + ) model = 'foo' - with self.assertRaisesRegex(TypeError, "Invalid model: 'model=%s' - model must be an instance of " - "ConcreteModel" % model): + with self.assertRaisesRegex( + TypeError, + "Invalid model: 'model=%s' - model must be an instance of " + "ConcreteModel" % model, + ): detect_communities(model) model = create_model_6() type_of_community_map = 'foo' - with self.assertRaisesRegex(TypeError, "Invalid value for type_of_community_map: " - "'type_of_community_map=%s' - Valid values: 'bipartite', " - "'constraint', 'variable'" - % type_of_community_map): + with self.assertRaisesRegex( + TypeError, + "Invalid value for type_of_community_map: " + "'type_of_community_map=%s' - Valid values: 'bipartite', " + "'constraint', 'variable'" % type_of_community_map, + ): detect_communities(model, type_of_community_map=type_of_community_map) with_objective = 'foo' - with self.assertRaisesRegex(TypeError, "Invalid value for with_objective: 'with_objective=%s' - " - "with_objective must be a Boolean" % with_objective): + with self.assertRaisesRegex( + TypeError, + "Invalid value for with_objective: 'with_objective=%s' - " + "with_objective must be a Boolean" % with_objective, + ): detect_communities(model, with_objective=with_objective) weighted_graph = 'foo' - with self.assertRaisesRegex(TypeError, "Invalid value for weighted_graph: 'weighted_graph=%s' - " - "weighted_graph must be a Boolean" % weighted_graph): + with self.assertRaisesRegex( + TypeError, + "Invalid value for weighted_graph: 'weighted_graph=%s' - " + "weighted_graph must be a Boolean" % weighted_graph, + ): detect_communities(model, weighted_graph=weighted_graph) random_seed = 'foo' - with self.assertRaisesRegex(TypeError, "Invalid value for random_seed: 'random_seed=%s' - random_seed " - "must be a non-negative integer" % random_seed): + with self.assertRaisesRegex( + TypeError, + "Invalid value for random_seed: 'random_seed=%s' - random_seed " + "must be a non-negative integer" % random_seed, + ): detect_communities(model, random_seed=random_seed) random_seed = -1 - with self.assertRaisesRegex(ValueError, "Invalid value for random_seed: 'random_seed=%s' - random_seed " - "must be a non-negative integer" % random_seed): + with self.assertRaisesRegex( + ValueError, + "Invalid value for random_seed: 'random_seed=%s' - random_seed " + "must be a non-negative integer" % random_seed, + ): detect_communities(model, random_seed=random_seed) use_only_active_components = 'foo' - with self.assertRaisesRegex(TypeError, - "Invalid value for use_only_active_components: 'use_only_active_components=%s' " - "- use_only_active_components must be True or None" % use_only_active_components): - detect_communities(model, use_only_active_components=use_only_active_components) + with self.assertRaisesRegex( + TypeError, + "Invalid value for use_only_active_components: 'use_only_active_components=%s' " + "- use_only_active_components must be True or None" + % use_only_active_components, + ): + detect_communities( + model, use_only_active_components=use_only_active_components + ) @unittest.skipUnless(matplotlib_available, "matplotlib is not available.") def test_visualize_model_graph_1(self): @@ -629,7 +1064,9 @@ def test_visualize_model_graph_1(self): with TempfileManager: fig, pos = community_map_object.visualize_model_graph( filename=TempfileManager.create_tempfile( - 'test_visualize_model_graph_1.png')) + 'test_visualize_model_graph_1.png' + ) + ) correct_pos_dict_length = 5 self.assertTrue(isinstance(pos, dict)) @@ -644,7 +1081,9 @@ def test_visualize_model_graph_2(self): fig, pos = community_map_object.visualize_model_graph( type_of_graph='bipartite', filename=TempfileManager.create_tempfile( - 'test_visualize_model_graph_2.png')) + 'test_visualize_model_graph_2.png' + ), + ) correct_pos_dict_length = 13 self.assertTrue(isinstance(pos, dict)) @@ -657,27 +1096,58 @@ def test_generate_structured_model_1(self): community_map_object = cmo = detect_communities(model, random_seed=5) correct_partition = {3: 0, 4: 1, 5: 0, 6: 0, 7: 1, 8: 0} - correct_components = {"b[0].'B[2].c'", "b[0].'c2[1]'", "b[0].'c1[3]'", 'equality_constraint_list[1]', - "b[1].'c2[2]'", 'b[1].x', 'b[0].x', 'b[0].y', 'b[0].z', "b[0].'obj[2]'", "b[1].'c1[2]'"} + correct_components = { + "b[0].'B[2].c'", + "b[0].'c2[1]'", + "b[0].'c1[3]'", + 'equality_constraint_list[1]', + "b[1].'c2[2]'", + 'b[1].x', + 'b[0].x', + 'b[0].y', + 'b[0].z', + "b[0].'obj[2]'", + "b[1].'c1[2]'", + } structured_model = cmo.generate_structured_model() self.assertIsInstance(structured_model, Block) - all_components = set([str(component) for component in structured_model.component_data_objects( - ctype=(Var, Constraint, Objective, ConstraintList), active=cmo.use_only_active_components, - descend_into=True)]) + all_components = set( + [ + str(component) + for component in structured_model.component_data_objects( + ctype=(Var, Constraint, Objective, ConstraintList), + active=cmo.use_only_active_components, + descend_into=True, + ) + ] + ) if cmo.graph_partition == correct_partition: # Test the number of blocks - self.assertEqual(2, len(cmo.community_map), - len(list(structured_model.component_data_objects(ctype=Block, descend_into=True)))) + self.assertEqual( + 2, + len(cmo.community_map), + len( + list( + structured_model.component_data_objects( + ctype=Block, descend_into=True + ) + ) + ), + ) # Test what components have been created self.assertEqual(all_components, correct_components) # Basic test for the replacement of variables - for objective in structured_model.component_data_objects(ctype=Objective, descend_into=True): - objective_expr = str(objective.expr) # This for loop should execute once (only one active objective) + for objective in structured_model.component_data_objects( + ctype=Objective, descend_into=True + ): + objective_expr = str( + objective.expr + ) # This for loop should execute once (only one active objective) correct_objective_expr = '- b[0].x + b[0].y + b[0].z' self.assertEqual(correct_objective_expr, objective_expr) @@ -689,31 +1159,66 @@ def test_generate_structured_model_2(self): m_class._generate_model() model = m = m_class.model - community_map_object = cmo = detect_communities(model, with_objective=False, random_seed=5) + community_map_object = cmo = detect_communities( + model, with_objective=False, random_seed=5 + ) correct_partition = {3: 0, 4: 1, 5: 1, 6: 0, 7: 2} - correct_components = {'b[2].B[2].c', 'b[1].y', 'z', 'b[0].c1[2]', 'b[1].c1[3]', 'obj[2]', - 'equality_constraint_list[3]', 'b[0].x', 'b[1].c2[1]', 'b[2].z', 'x', - 'equality_constraint_list[1]', 'b[0].c2[2]', 'y', 'equality_constraint_list[2]'} + correct_components = { + 'b[2].B[2].c', + 'b[1].y', + 'z', + 'b[0].c1[2]', + 'b[1].c1[3]', + 'obj[2]', + 'equality_constraint_list[3]', + 'b[0].x', + 'b[1].c2[1]', + 'b[2].z', + 'x', + 'equality_constraint_list[1]', + 'b[0].c2[2]', + 'y', + 'equality_constraint_list[2]', + } structured_model = cmo.generate_structured_model() self.assertIsInstance(structured_model, Block) - all_components = set([str(component) for component in structured_model.component_data_objects( - ctype=(Var, Constraint, Objective, ConstraintList), active=cmo.use_only_active_components, - descend_into=True)]) + all_components = set( + [ + str(component) + for component in structured_model.component_data_objects( + ctype=(Var, Constraint, Objective, ConstraintList), + active=cmo.use_only_active_components, + descend_into=True, + ) + ] + ) if cmo.graph_partition == correct_partition: # Test the number of blocks - self.assertEqual(3, len(cmo.community_map), - len(list(structured_model.component_data_objects(ctype=Block, descend_into=True)))) + self.assertEqual( + 3, + len(cmo.community_map), + len( + list( + structured_model.component_data_objects( + ctype=Block, descend_into=True + ) + ) + ), + ) # Test what components have been created self.assertEqual(correct_components, all_components) # Basic test for the replacement of variables - for objective in structured_model.component_data_objects(ctype=Objective, descend_into=True): + for objective in structured_model.component_data_objects( + ctype=Objective, descend_into=True + ): objective_expr = str( - objective.expr) # This for loop should only execute once (only one active objective) + objective.expr + ) # This for loop should only execute once (only one active objective) correct_objective_expr = '- x + y + z' self.assertEqual(correct_objective_expr, objective_expr) @@ -794,10 +1299,14 @@ def _collect_community_maps(model): types_of_type_of_community_map = ['constraint', 'variable', 'bipartite'] list_of_community_maps = [] # this will ultimately contain 24 community map objects - list_of_partitions = [] # this will ultimately contain 24 dictionaries (partitions of networkX graphs) + list_of_partitions = ( + [] + ) # this will ultimately contain 24 dictionaries (partitions of networkX graphs) for community_map_type in types_of_type_of_community_map: - for test_number in range(2 ** 3): # raised to the third power because there are three boolean arguments + for test_number in range( + 2**3 + ): # raised to the third power because there are three boolean arguments argument_value_list = [0, 0, 0] index = 0 @@ -809,18 +1318,26 @@ def _collect_community_maps(model): index += 1 # Given a permutation, we will now assign values for arguments based on the 0 and 1 values - with_objective, weighted_graph, use_only_active_components = [bool(val) for val in argument_value_list] + with_objective, weighted_graph, use_only_active_components = [ + bool(val) for val in argument_value_list + ] # The 'active' parameter treats 'False' as logically ambiguous so we must change it to None if we do # not wish to include active components in this particular function call - if not use_only_active_components: # if we have given use_only_active_components a value of False + if ( + not use_only_active_components + ): # if we have given use_only_active_components a value of False use_only_active_components = None # Make the function call - latest_community_map = detect_communities(model, type_of_community_map=community_map_type, - with_objective=with_objective, weighted_graph=weighted_graph, - random_seed=random_seed_test, - use_only_active_components=use_only_active_components) + latest_community_map = detect_communities( + model, + type_of_community_map=community_map_type, + with_objective=with_objective, + weighted_graph=weighted_graph, + random_seed=random_seed_test, + use_only_active_components=use_only_active_components, + ) # Add this latest community map object and its partition to their respective lists list_of_community_maps.append(latest_community_map) @@ -843,19 +1360,20 @@ def _collect_partition_dependent_tests(test_community_maps, test_partitions): # Now we will extract the lists within the community map that correspond to the # nodes of the networkX graph if community_map.type_of_community_map == 'constraint': - list_of_node_lists = [community[0] for community in - community_map.values()] + list_of_node_lists = [community[0] for community in community_map.values()] elif community_map.type_of_community_map == 'variable': - list_of_node_lists = [community[1] for community in - community_map.values()] + list_of_node_lists = [community[1] for community in community_map.values()] else: - list_of_node_lists = [community[0] + community[1] for community in - community_map.values()] + list_of_node_lists = [ + community[0] + community[1] for community in community_map.values() + ] # We have to flatten list_of_node_lists - actual_number_of_members = len([node for node_list in list_of_node_lists for node in node_list]) + actual_number_of_members = len( + [node for node_list in list_of_node_lists for node in node_list] + ) actual_number_of_communities = len(community_map) expected_num_comm_list.append(expected_number_of_communities) @@ -863,7 +1381,12 @@ def _collect_partition_dependent_tests(test_community_maps, test_partitions): actual_num_comm_list.append(actual_number_of_communities) actual_num_members_list.append(actual_number_of_members) - return expected_num_comm_list, expected_num_members_list, actual_num_comm_list, actual_num_members_list + return ( + expected_num_comm_list, + expected_num_members_list, + actual_num_comm_list, + actual_num_members_list, + ) def create_model_5(): # This model comes from a GAMS convert of instance st_test4.gms at minlplib.com @@ -875,13 +1398,31 @@ def create_model_5(): # This model comes from a GAMS convert of instance st_tes m.i5 = Var(within=Integers, bounds=(0, 1), initialize=0) m.i6 = Var(within=Integers, bounds=(0, 2), initialize=0) m.obj = Objective( - expr=0.5 * m.i1 * m.i1 + 6.5 * m.i1 + 7 * m.i6 * m.i6 - m.i6 - m.i2 - 2 * m.i3 + 3 * m.i4 - 2 * m.i5, - sense=minimize) - m.c1 = Constraint(expr=m.i1 + 2 * m.i2 + 8 * m.i3 + m.i4 + 3 * m.i5 + 5 * m.i6 <= 16) - m.c2 = Constraint(expr=- 8 * m.i1 - 4 * m.i2 - 2 * m.i3 + 2 * m.i4 + 4 * m.i5 - m.i6 <= -1) - m.c3 = Constraint(expr=2 * m.i1 + 0.5 * m.i2 + 0.2 * m.i3 - 3 * m.i4 - m.i5 - 4 * m.i6 <= 24) - m.c4 = Constraint(expr=0.2 * m.i1 + 2 * m.i2 + 0.1 * m.i3 - 4 * m.i4 + 2 * m.i5 + 2 * m.i6 <= 12) - m.c5 = Constraint(expr=- 0.1 * m.i1 - 0.5 * m.i2 + 2 * m.i3 + 5 * m.i4 - 5 * m.i5 + 3 * m.i6 <= 3) + expr=0.5 * m.i1 * m.i1 + + 6.5 * m.i1 + + 7 * m.i6 * m.i6 + - m.i6 + - m.i2 + - 2 * m.i3 + + 3 * m.i4 + - 2 * m.i5, + sense=minimize, + ) + m.c1 = Constraint( + expr=m.i1 + 2 * m.i2 + 8 * m.i3 + m.i4 + 3 * m.i5 + 5 * m.i6 <= 16 + ) + m.c2 = Constraint( + expr=-8 * m.i1 - 4 * m.i2 - 2 * m.i3 + 2 * m.i4 + 4 * m.i5 - m.i6 <= -1 + ) + m.c3 = Constraint( + expr=2 * m.i1 + 0.5 * m.i2 + 0.2 * m.i3 - 3 * m.i4 - m.i5 - 4 * m.i6 <= 24 + ) + m.c4 = Constraint( + expr=0.2 * m.i1 + 2 * m.i2 + 0.1 * m.i3 - 4 * m.i4 + 2 * m.i5 + 2 * m.i6 <= 12 + ) + m.c5 = Constraint( + expr=-0.1 * m.i1 - 0.5 * m.i2 + 2 * m.i3 + 5 * m.i4 - 5 * m.i5 + 3 * m.i6 <= 3 + ) return model @@ -915,9 +1456,9 @@ def decode_model_1(): m.x4 = Var(initialize=-1) m.c1 = Constraint(expr=m.x1 + m.x2 <= 0) m.c2 = Constraint(expr=m.x1 - 3 * m.x2 <= 0) - m.c3 = Constraint(expr=m.x2 + m.x3 + 4 * m.x4 ** 2 == 0) + m.c3 = Constraint(expr=m.x2 + m.x3 + 4 * m.x4**2 == 0) m.c4 = Constraint(expr=m.x3 + m.x4 <= 0) - m.c5 = Constraint(expr=m.x3 ** 2 + m.x4 ** 2 - 10 == 0) + m.c5 = Constraint(expr=m.x3**2 + m.x4**2 - 10 == 0) return model diff --git a/pyomo/contrib/cp/__init__.py b/pyomo/contrib/cp/__init__.py new file mode 100644 index 00000000000..c51160bf931 --- /dev/null +++ b/pyomo/contrib/cp/__init__.py @@ -0,0 +1,16 @@ +from pyomo.contrib.cp.interval_var import ( + IntervalVar, + IntervalVarStartTime, + IntervalVarEndTime, + IntervalVarLength, + IntervalVarPresence, +) +from pyomo.contrib.cp.repn.docplex_writer import DocplexWriter, CPOptimizerSolver +from pyomo.contrib.cp.scheduling_expr.step_function_expressions import ( + AlwaysIn, + Step, + Pulse, +) + +# register logical_to_disjunctive transformation +import pyomo.contrib.cp.transform.logical_to_disjunctive_program diff --git a/pyomo/contrib/cp/interval_var.py b/pyomo/contrib/cp/interval_var.py new file mode 100644 index 00000000000..911d9ba50ba --- /dev/null +++ b/pyomo/contrib/cp/interval_var.py @@ -0,0 +1,220 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.collections import ComponentSet +from pyomo.common.pyomo_typing import overload +from pyomo.contrib.cp.scheduling_expr.precedence_expressions import ( + BeforeExpression, + AtExpression, +) + +from pyomo.core import Integers, value +from pyomo.core.base import Any, ScalarVar, ScalarBooleanVar +from pyomo.core.base.block import _BlockData, Block +from pyomo.core.base.component import ModelComponentFactory +from pyomo.core.base.global_set import UnindexedComponent_index +from pyomo.core.base.indexed_component import IndexedComponent, UnindexedComponent_set +from pyomo.core.base.initializer import BoundInitializer, Initializer +from pyomo.core.expr import GetItemExpression + + +class IntervalVarTimePoint(ScalarVar): + """This class defines the abstract interface for a single variable + denoting a start or end time point of an IntervalVar""" + + __slots__ = () + + def get_associated_interval_var(self): + return self.parent_block() + + def before(self, time, delay=0): + return BeforeExpression((self, time, delay)) + + def after(self, time, delay=0): + return BeforeExpression((time, self, delay)) + + def at(self, time, delay=0): + return AtExpression((self, time, delay)) + + +class IntervalVarStartTime(IntervalVarTimePoint): + """This class defines a single variable denoting a start time point + of an IntervalVar""" + + def __init__(self): + super().__init__(domain=Integers, ctype=IntervalVarStartTime) + + +class IntervalVarEndTime(IntervalVarTimePoint): + """This class defines a single variable denoting an end time point + of an IntervalVar""" + + def __init__(self): + super().__init__(domain=Integers, ctype=IntervalVarEndTime) + + +class IntervalVarLength(ScalarVar): + """This class defines the abstract interface for a single variable + denoting the length of an IntervalVar""" + + __slots__ = () + + def __init__(self): + super().__init__(domain=Integers, ctype=IntervalVarLength) + + def get_associated_interval_var(self): + return self.parent_block() + + +class IntervalVarPresence(ScalarBooleanVar): + """This class defines the abstract interface for a single Boolean variable + denoting whether or not an IntervalVar is scheduled""" + + __slots__ = () + + def __init__(self): + super().__init__(ctype=IntervalVarPresence) + + def get_associated_interval_var(self): + return self.parent_block() + + +class IntervalVarData(_BlockData): + """This class defines the abstract interface for a single interval variable.""" + + # We will put our four variables on this, and everything else is off limits. + _Block_reserved_words = Any + + def __init__(self, component=None): + _BlockData.__init__(self, component) + + with self._declare_reserved_components(): + self.is_present = IntervalVarPresence() + self.start_time = IntervalVarStartTime() + self.end_time = IntervalVarEndTime() + self.length = IntervalVarLength() + + @property + def optional(self): + # We only store this information in one place, but it's kind of annoying + # to have to check if the BooleanVar is fixed, so this way you can ask + # the IntervalVar directly. + return not self.is_present.fixed or ( + self.is_present.fixed and not value(self.is_present) + ) + + @optional.setter + def optional(self, val): + if type(val) is not bool: + raise ValueError( + "Cannot set 'optional' to %s: Must be True or False." % val + ) + if val: + self.is_present.unfix() + else: + self.is_present.fix(True) + + +@ModelComponentFactory.register("Interval variables for scheduling.") +class IntervalVar(Block): + """An interval variable, which may be defined over an index. + + Args: + start (tuple of two integers): Feasible range for the + interval variable's start time + end (tuple of two integers, optional): Feasible range for the + interval variables end time + length (integer or tuple of two integers, optional): Feasible + range for the length of the interval variable + optional (boolean, optional) : If False, the interval variable must + be scheduled. Otherwise the interval variable is optionally + present. Default behavior is 'False' + name (str, optional): Name for this component. + doc (str, optional): Text describing this component. + """ + + _ComponentDataClass = IntervalVarData + + def __new__(cls, *args, **kwds): + if cls != IntervalVar: + return super(IntervalVar, cls).__new__(cls) + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): + return ScalarIntervalVar.__new__(ScalarIntervalVar) + else: + return IndexedIntervalVar.__new__(IndexedIntervalVar) + + @overload + def __init__( + self, + *indices, + start=None, + end=None, + length=None, + optional=False, + name=None, + doc=None + ): + ... + + def __init__(self, *args, **kwargs): + _start_arg = kwargs.pop('start', None) + _end_arg = kwargs.pop('end', None) + _length_arg = kwargs.pop('length', None) + _optional_arg = kwargs.pop('optional', False) + + kwargs.setdefault('ctype', IntervalVar) + Block.__init__(self, *args, **kwargs) + + self._start_bounds = BoundInitializer(_start_arg, self) + self._end_bounds = BoundInitializer(_end_arg, self) + self._length_bounds = BoundInitializer(_length_arg, self) + self._optional = Initializer(_optional_arg) + + def _getitem_when_not_present(self, index): + if index is None and not self.is_indexed(): + obj = self._data[index] = self + else: + obj = self._data[index] = self._ComponentDataClass(component=self) + parent = obj.parent_block() + obj._index = index + + if self._start_bounds is not None: + obj.start_time.bounds = self._start_bounds(parent, index) + if self._end_bounds is not None: + obj.end_time.bounds = self._end_bounds(parent, index) + if self._length_bounds is not None: + obj.length.bounds = self._length_bounds(parent, index) + # hit the setter so I get error checking + obj.optional = self._optional(parent, index) + + return obj + + +class ScalarIntervalVar(IntervalVarData, IntervalVar): + def __init__(self, *args, **kwds): + self._suppress_ctypes = set() + + IntervalVarData.__init__(self, self) + IntervalVar.__init__(self, *args, **kwds) + self._data[None] = self + self._index = UnindexedComponent_index + + +class IndexedIntervalVar(IntervalVar): + # We allow indexing IntervalVars by expressions (including Vars). + def __getitem__(self, args): + tmp = args if args.__class__ is tuple else (args,) + if any( + hasattr(arg, 'is_potentially_variable') and arg.is_potentially_variable() + for arg in tmp + ): + return GetItemExpression((self,) + tmp) + return super().__getitem__(args) diff --git a/pyomo/checker/tests/examples/model/ModelArgument_norule.py b/pyomo/contrib/cp/plugins.py similarity index 79% rename from pyomo/checker/tests/examples/model/ModelArgument_norule.py rename to pyomo/contrib/cp/plugins.py index 6322f014d5e..445599daab0 100644 --- a/pyomo/checker/tests/examples/model/ModelArgument_norule.py +++ b/pyomo/contrib/cp/plugins.py @@ -10,5 +10,7 @@ # ___________________________________________________________________________ -def myAdd(a, b): - return a.val + b.val +def load(): + from . import interval_var + from .repn import docplex_writer + from .transform import logical_to_disjunctive_program diff --git a/pyomo/checker/tests/__init__.py b/pyomo/contrib/cp/repn/__init__.py similarity index 100% rename from pyomo/checker/tests/__init__.py rename to pyomo/contrib/cp/repn/__init__.py diff --git a/pyomo/contrib/cp/repn/docplex_writer.py b/pyomo/contrib/cp/repn/docplex_writer.py new file mode 100644 index 00000000000..51c3f66140e --- /dev/null +++ b/pyomo/contrib/cp/repn/docplex_writer.py @@ -0,0 +1,1305 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.dependencies import attempt_import + +import itertools +import logging +from operator import attrgetter + +from pyomo.common import DeveloperError +from pyomo.common.config import ConfigDict, ConfigValue +from pyomo.common.collections import ComponentMap +from pyomo.common.fileutils import Executable + +from pyomo.contrib.cp import IntervalVar +from pyomo.contrib.cp.interval_var import ( + IntervalVarStartTime, + IntervalVarEndTime, + IntervalVarPresence, + IntervalVarLength, + ScalarIntervalVar, + IntervalVarData, + IndexedIntervalVar, +) +from pyomo.contrib.cp.scheduling_expr.precedence_expressions import ( + BeforeExpression, + AtExpression, +) +from pyomo.contrib.cp.scheduling_expr.step_function_expressions import ( + AlwaysIn, + StepAt, + StepAtStart, + StepAtEnd, + Pulse, + CumulativeFunction, + NegatedStepFunction, +) + +from pyomo.core.base import ( + minimize, + maximize, + SortComponents, + Block, + Objective, + Constraint, + Var, + Param, + BooleanVar, + LogicalConstraint, + Suffix, + value, +) +from pyomo.core.base.boolean_var import ( + ScalarBooleanVar, + _GeneralBooleanVarData, + IndexedBooleanVar, +) +from pyomo.core.base.expression import ScalarExpression, _GeneralExpressionData +from pyomo.core.base.param import IndexedParam, ScalarParam +from pyomo.core.base.var import ScalarVar, _GeneralVarData, IndexedVar +import pyomo.core.expr as EXPR +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor, identify_variables +from pyomo.core.base import Set, RangeSet +from pyomo.core.base.set import SetProduct +from pyomo.opt import WriterFactory, SolverFactory, TerminationCondition, SolverResults + +### FIXME: Remove the following as soon as non-active components no +### longer report active==True +from pyomo.network import Port + +### + + +def _finalize_docplex(module, available): + if not available: + return + _deferred_element_getattr_dispatcher['start_time'] = module.start_of + _deferred_element_getattr_dispatcher['end_time'] = module.end_of + _deferred_element_getattr_dispatcher['length'] = module.length_of + _deferred_element_getattr_dispatcher['is_present'] = module.presence_of + + # Scheduling dispatchers + _before_dispatchers[_START_TIME, _START_TIME] = module.start_before_start + _before_dispatchers[_START_TIME, _END_TIME] = module.start_before_end + _before_dispatchers[_END_TIME, _START_TIME] = module.end_before_start + _before_dispatchers[_END_TIME, _END_TIME] = module.end_before_end + + _at_dispatchers[_START_TIME, _START_TIME] = module.start_at_start + _at_dispatchers[_START_TIME, _END_TIME] = module.start_at_end + _at_dispatchers[_END_TIME, _START_TIME] = module.end_at_start + _at_dispatchers[_END_TIME, _END_TIME] = module.end_at_end + + _time_point_dispatchers[_START_TIME] = module.start_of + _time_point_dispatchers[_END_TIME] = module.end_of + + +cp, docplex_available = attempt_import('docplex.cp.model', callback=_finalize_docplex) +cp_solver, docplex_available = attempt_import('docplex.cp.solver') + +logger = logging.getLogger('pyomo.contrib.cp') + + +# These are things that don't need special handling: +class _GENERAL(object): + pass + + +# These are operations that need to be deferred sometimes, usually because of +# indirection: +class _START_TIME(object): + pass + + +class _END_TIME(object): + pass + + +class _DEFERRED_ELEMENT_CONSTRAINT(object): + pass + + +class _ELEMENT_CONSTRAINT(object): + pass + + +class _DEFERRED_BEFORE(object): + pass + + +class _DEFERRED_AFTER(object): + pass + + +class _DEFERRED_AT(object): + pass + + +class _BEFORE(object): + pass + + +class _AT(object): + pass + + +class _IMPLIES(object): + pass + + +class _LAND(object): + pass + + +class _LOR(object): + pass + + +class _XOR(object): + pass + + +class _EQUIVALENT_TO(object): + pass + + +def _check_var_domain(visitor, node, var): + if not var.domain.isdiscrete(): + # Note: in the context of the current writer, this should be unreachable + # because we can't handle non-discrete variables at all, so there will + # already be errors handling the children of this expression. + raise ValueError( + "Variable indirection '%s' contains argument '%s', " + "which is not a discrete variable" % (node, var) + ) + bnds = var.bounds + if None in bnds: + raise ValueError( + "Variable indirection '%s' contains argument '%s', " + "which is not restricted to a finite discrete domain" % (node, var) + ) + return var.domain & RangeSet(*bnds) + + +def _handle_getitem(visitor, node, *data): + # First we need to determine the range for each of the the + # arguments. They can be: + # + # - simple values + # - docplex integer variables + # - docplex integer expressions + arg_domain = [] + arg_scale = [] + expr = 0 + mult = 1 + # Note: skipping the first argument: that should be the IndexedComponent + for i, arg in enumerate(data[1:]): + if arg[1].__class__ in EXPR.native_types: + arg_set = Set(initialize=[arg[1]]) + arg_set.construct() + arg_domain.append(arg_set) + arg_scale.append(None) + elif node.arg(i + 1).is_expression_type(): + # This argument is an expression. It could be any + # combination of any number of integer variables, as long as + # the resulting expression is still an IntExpression. We + # can't really rely on FBBT here, because we need to know + # that the expression returns values in a regular domain + # (i.e., the set of possible values has to have a start, + # end, and finite, regular step). + # + # We will brute force it: go through every combination of + # every variable and record the resulting expression value. + arg_expr = node.arg(i + 1) + var_list = list(identify_variables(arg_expr, include_fixed=False)) + var_domain = [list(_check_var_domain(visitor, node, v)) for v in var_list] + arg_vals = set() + for var_vals in itertools.product(*var_domain): + for v, val in zip(var_list, var_vals): + v.set_value(val) + arg_vals.add(arg_expr()) + # Now that we have all the values that define the domain of + # the result of the expression, stick them into a set and + # rely on the Set infrastructure to calculate (and verify) + # the interval. + arg_set = Set(initialize=sorted(arg_vals)) + arg_set.construct() + interval = arg_set.get_interval() + if not interval[2]: + raise ValueError( + "Variable indirection '%s' contains argument expression " + "'%s' that does not evaluate to a simple discrete set" + % (node, arg_expr) + ) + arg_domain.append(arg_set) + arg_scale.append(interval) + else: + # This had better be a simple variable over a regular + # discrete domain. When we add support for categorical + # variables, we will need to ensure that the categoricals + # have already been converted to simple integer domains by + # this point. + var = node.arg(i + 1) + arg_domain.append(_check_var_domain(visitor, node, var)) + arg_scale.append(arg_domain[-1].get_interval()) + # Build the expression that maps arguments to GetItem() to a + # position in the elements list + if arg_scale[-1] is not None: + _min, _max, _step = arg_scale[-1] + # ESJ: Have to use integer division here because otherwise, later, + # when we construct the element constraint, docplex won't believe + # the index is an integer expression. + if _step is None: + raise ValueError( + "Variable indirection '%s' is over a discrete domain " + "without a constant step size. This is not supported." % node + ) + expr += mult * (arg[1] - _min) // _step + # This could be (_max - _min) // _step + 1, but that assumes + # that the set correctly collapsed the bounds and that the + # lower and upper bounds were part of the step. That + # *should* be the case for Set, but I am suffering from a + # crisis of confidence at the moment. + mult *= len(arg_domain[-1]) + # Get the list of all elements selectable by the argument + # expression(s); fill in new variables for any indices allowable by + # the argument expression(s) but not present in the IndexedComponent + # indexing set. + elements = [] + for idx in SetProduct(*arg_domain): + try: + idx = idx if len(idx) > 1 else idx[0] + elements.append(data[0][1][idx]) + except KeyError: + raise ValueError( + "Variable indirection '%s' permits an index '%s' " + "that is not a valid key. In CP Optimizer, this is a " + "structural infeasibility." % (node, idx) + ) + # NOTE: If we thought it was the right thing to do in the future, we + # could fill in with a bogus variable and add a constraint + # disallowing it from being selected + try: + return (_ELEMENT_CONSTRAINT, cp.element(elements, expr)) + except AssertionError: + return (_DEFERRED_ELEMENT_CONSTRAINT, (elements, expr)) + + +_element_constraint_attr_dispatcher = { + 'before': _DEFERRED_BEFORE, + 'after': _DEFERRED_AFTER, + 'at': _DEFERRED_AT, + 'implies': _IMPLIES, + 'land': _LAND, + 'lor': _LOR, + 'xor': _XOR, + 'equivalent_to': _EQUIVALENT_TO, +} +# This will get populated when cp is finally imported +_deferred_element_getattr_dispatcher = {} + + +def _handle_getattr(visitor, node, obj, attr): + # We either end up here because we do not yet know the list of variables to + # make an element constraint (the first case) or because we are asking for + # an attribute on something with indirection, so at this point we *have* a + # constructed element constraint (the second case). + if obj[0] is _DEFERRED_ELEMENT_CONSTRAINT: + # then obj[1] is a list of cp thingies that we need to get the attr on, + # and then at the end we need to make the element constraint we couldn't + # make before. + try: + ans = list(map(_deferred_element_getattr_dispatcher[attr[1]], obj[1][0])) + except KeyError: + logger.error("Unrecognized attribute in GetAttrExpression: %s." % attr[1]) + raise + return (_ELEMENT_CONSTRAINT, cp.element(array=ans, index=obj[1][1])) + elif obj[0] is _ELEMENT_CONSTRAINT: + try: + return (_element_constraint_attr_dispatcher[attr[1]], obj) + except KeyError: + logger.error( + "Unrecognized attribute in GetAttrExpression:" + "%s. Found for object: %s" % (attr[1], obj[1]) + ) + raise + else: + raise DeveloperError( + "Unrecognized argument type '%s' to getattr dispatcher." % obj[0] + ) + + +def _before_boolean_var(visitor, child): + _id = id(child) + if _id not in visitor.var_map: + if child.fixed: + return False, (_GENERAL, child.value) + nm = child.name if visitor.symbolic_solver_labels else None + # Sorry, universe, but docplex doesn't know the difference between + # Boolean and Binary... + cpx_var = cp.binary_var(name=nm) + # Because I want to pretend the world is sane from here on out, we will + # return a Boolean expression (in docplex land) so this can be used as + # an argument to logical expressions later + visitor.var_map[_id] = cpx_var == 1 + visitor.pyomo_to_docplex[child] = cpx_var + return False, (_GENERAL, visitor.var_map[_id]) + + +def _before_indexed_boolean_var(visitor, child): + cpx_vars = {} + for i, v in child.items(): + if v.fixed: + cpx_vars[i] = v.value + continue + cpx_var = cp.binary_var(name=v.name if visitor.symbolic_solver_labels else None) + visitor.cpx.add(cpx_var) + visitor.var_map[id(v)] = cpx_var == 1 + visitor.pyomo_to_docplex[v] = cpx_var + cpx_vars[i] = cpx_var == 1 + return False, (_GENERAL, cpx_vars) + + +def _before_param(visitor, child): + return False, (_GENERAL, value(child)) + + +def _before_indexed_param(visitor, child): + return False, (_GENERAL, {idx: value(p) for idx, p in child.items()}) + + +def _create_docplex_var(pyomo_var, name=None): + if pyomo_var.is_binary(): + return cp.binary_var(name=name) + elif pyomo_var.is_integer(): + return cp.integer_var( + min=pyomo_var.bounds[0], max=pyomo_var.bounds[1], name=name + ) + elif pyomo_var.domain.isdiscrete(): + if pyomo_var.domain.isfinite(): + return cp.integer_var(domain=[d for d in pyomo_var.domain], name=name) + else: + # If we ever want to handle this case, I think we might be able to + # make a normal integer var and then constrain it into the + # domain. But no reason to go to the effort for now because I don't + # know if the solver can even work with such a var. + raise ValueError( + "The LogicalToDoCplex writer does not support " + "infinite discrete domains. Cannot write " + "Var '%s' with domain '%s'" % (pyomo_var.name, pyomo_var.domain) + ) + else: + raise ValueError( + "The LogicalToDoCplex writer can only support " + "integer- or Boolean-valued variables. Cannot " + "write Var '%s' with domain '%s'" % (pyomo_var.name, pyomo_var.domain) + ) + + +def _before_var(visitor, child): + _id = id(child) + if _id not in visitor.var_map: + if child.fixed: + return False, (_GENERAL, child.value) + cpx_var = _create_docplex_var( + child, name=child.name if visitor.symbolic_solver_labels else None + ) + visitor.cpx.add(cpx_var) + visitor.var_map[_id] = cpx_var + visitor.pyomo_to_docplex[child] = cpx_var + return False, (_GENERAL, visitor.var_map[_id]) + + +def _before_indexed_var(visitor, child): + cpx_vars = {} + for i, v in child.items(): + cpx_var = _create_docplex_var( + v, name=v.name if visitor.symbolic_solver_labels else None + ) + visitor.cpx.add(cpx_var) + visitor.var_map[id(v)] = cpx_var + visitor.pyomo_to_docplex[v] = cpx_var + cpx_vars[i] = cpx_var + return False, (_GENERAL, cpx_vars) + + +def _handle_named_expression_node(visitor, node, expr): + visitor._named_expressions[id(node)] = expr[1] + return expr + + +def _before_named_expression(visitor, child): + _id = id(child) + if _id not in visitor._named_expressions: + return True, None + return False, (_GENERAL, visitor._named_expressions[_id]) + + +def _create_docplex_interval_var(visitor, interval_var): + # Create a new docplex interval var and then figure out all the info that + # gets stored on it + nm = interval_var.name if visitor.symbolic_solver_labels else None + cpx_interval_var = cp.interval_var(name=nm) + visitor.var_map[id(interval_var)] = cpx_interval_var + + # Figure out if it exists + if interval_var.is_present.fixed and not interval_var.is_present.value: + # Someone has fixed that this will not get scheduled. + cpx_interval_var.set_absent() + elif interval_var.optional: + cpx_interval_var.set_optional() + else: + cpx_interval_var.set_present() + + # Figure out constraints on its length + length = interval_var.length + if length.fixed: + cpx_interval_var.set_length(length.value) + if length.lb is not None: + cpx_interval_var.set_length_min(length.lb) + if length.ub is not None: + cpx_interval_var.set_length_max(length.ub) + + # Figure out constraints on start time + start_time = interval_var.start_time + if start_time.fixed: + cpx_interval_var.set_start(start_time.value) + else: + if start_time.lb is not None: + cpx_interval_var.set_start_min(start_time.lb) + if start_time.ub is not None: + cpx_interval_var.set_start_max(start_time.ub) + + # Figure out constraints on end time + end_time = interval_var.end_time + if end_time.fixed: + cpx_interval_var.set_end(end_time.value) + else: + if end_time.lb is not None: + cpx_interval_var.set_end_min(end_time.lb) + if end_time.ub is not None: + cpx_interval_var.set_end_max(end_time.ub) + + return cpx_interval_var + + +def _get_docplex_interval_var(visitor, interval_var): + # We might already have the interval_var and just need to retrieve it + if id(interval_var) in visitor.var_map: + cpx_interval_var = visitor.var_map[id(interval_var)] + else: + cpx_interval_var = _create_docplex_interval_var(visitor, interval_var) + visitor.cpx.add(cpx_interval_var) + return cpx_interval_var + + +def _before_interval_var(visitor, child): + _id = id(child) + if _id not in visitor.var_map: + cpx_interval_var = _get_docplex_interval_var(visitor, child) + visitor.var_map[_id] = cpx_interval_var + visitor.pyomo_to_docplex[child] = cpx_interval_var + + return False, (_GENERAL, visitor.var_map[_id]) + + +def _before_indexed_interval_var(visitor, child): + cpx_vars = {} + for i, v in child.items(): + cpx_interval_var = _get_docplex_interval_var(visitor, v) + visitor.var_map[id(v)] = cpx_interval_var + visitor.pyomo_to_docplex[v] = cpx_interval_var + cpx_vars[i] = cpx_interval_var + return False, (_GENERAL, cpx_vars) + + +def _before_interval_var_start_time(visitor, child): + _id = id(child) + interval_var = child.get_associated_interval_var() + if _id not in visitor.var_map: + cpx_interval_var = _get_docplex_interval_var(visitor, interval_var) + + return False, (_START_TIME, visitor.var_map[id(interval_var)]) + + +def _before_interval_var_end_time(visitor, child): + _id = id(child) + interval_var = child.get_associated_interval_var() + if _id not in visitor.var_map: + cpx_interval_var = _get_docplex_interval_var(visitor, interval_var) + + return False, (_END_TIME, visitor.var_map[id(interval_var)]) + + +def _before_interval_var_length(visitor, child): + _id = id(child) + if _id not in visitor.var_map: + interval_var = child.get_associated_interval_var() + cpx_interval_var = _get_docplex_interval_var(visitor, interval_var) + + visitor.var_map[_id] = cp.length_of(cpx_interval_var) + # There aren't any special types of constraints involving the length, so we + # just treat this expression as if it's a normal variable. + return False, (_GENERAL, visitor.var_map[_id]) + + +def _before_interval_var_presence(visitor, child): + _id = id(child) + if _id not in visitor.var_map: + interval_var = child.get_associated_interval_var() + cpx_interval_var = _get_docplex_interval_var(visitor, interval_var) + + visitor.var_map[_id] = cp.presence_of(cpx_interval_var) + # There aren't any special types of constraints involving the presence, so + # we just treat this expression as if it's a normal variable. + return False, (_GENERAL, visitor.var_map[_id]) + + +def _handle_step_at_node(visitor, node): + return cp.step_at(node._time, node._height) + + +def _handle_step_at_start_node(visitor, node): + cpx_var = _get_docplex_interval_var(visitor, node._time) + return cp.step_at_start(cpx_var, node._height) + + +def _handle_step_at_end_node(visitor, node): + cpx_var = _get_docplex_interval_var(visitor, node._time) + return cp.step_at_end(cpx_var, node._height) + + +def _handle_pulse_node(visitor, node): + cpx_var = _get_docplex_interval_var(visitor, node._interval_var) + return cp.pulse(cpx_var, node._height) + + +def _handle_negated_step_function_node(visitor, node): + return _step_function_handles[node.args[0].__class__](visitor, node.args[0]) + + +def _handle_cumulative_function(visitor, node): + expr = 0 + for arg in node.args: + if arg.__class__ is NegatedStepFunction: + expr -= _handle_negated_step_function_node(visitor, arg) + else: + expr += _step_function_handles[arg.__class__](visitor, arg) + + return False, (_GENERAL, expr) + + +_step_function_handles = { + StepAt: _handle_step_at_node, + StepAtStart: _handle_step_at_start_node, + StepAtEnd: _handle_step_at_end_node, + Pulse: _handle_pulse_node, + CumulativeFunction: _handle_cumulative_function, + NegatedStepFunction: _handle_negated_step_function_node, +} +step_func_expression_types = _step_function_handles.keys() + +## +# Algebraic expressions +## + + +def _get_int_valued_expr(arg): + if arg[0] in {_GENERAL, _ELEMENT_CONSTRAINT}: + return arg[1] + elif arg[0] is _START_TIME: + return cp.start_of(arg[1]) + elif arg[0] is _END_TIME: + return cp.end_of(arg[1]) + else: + raise DeveloperError( + "Attempting to get a docplex integer-valued " + "expression from object in class %s" % str(arg[0]) + ) + + +def _get_bool_valued_expr(arg): + if arg[0] is _GENERAL: + return arg[1] + elif arg[0] is _ELEMENT_CONSTRAINT: + # docplex doesn't bother to check if 'element' expressions are integer- + # or boolean-valued: they just complain if you use them in a boolean + # context. So if we are about to use one that way, we set it equivalent + # to True so that it will be boolean-valued according to docplex's + # idiosyncrasies. + return arg[1] == True + elif arg[0] is _BEFORE: + # We're using a start-before-start or its ilk in a boolean-valued + # context. docplex doesn't believe these things are boolean-valued, so + # we have to convert to the inequality version: + (lhs, rhs) = arg[2] + return _handle_inequality_node(None, None, lhs, rhs)[1] + elif arg[0] is _AT: + # Same as above, but now we need an equality node + (lhs, rhs) = arg[2] + return _handle_equality_node(None, None, lhs, rhs)[1] + else: + raise DeveloperError( + "Attempting to get a docplex Boolean-valued " + "expression from object in class %s" % str(arg[0]) + ) + + +def _handle_monomial_expr(visitor, node, arg1, arg2): + # Monomial terms show up a lot. This handles some common + # simplifications (necessary in part for the unit tests) + if arg2[1].__class__ in EXPR.native_types: + return _GENERAL, arg1[1] * arg2[1] + elif arg1[1] == 1: + return arg2 + return (_GENERAL, cp.times(_get_int_valued_expr(arg1), _get_int_valued_expr(arg2))) + + +def _handle_sum_node(visitor, node, *args): + return ( + _GENERAL, + sum( + (_get_int_valued_expr(arg) for arg in args[1:]), + _get_int_valued_expr(args[0]), + ), + ) + + +def _handle_negation_node(visitor, node, arg1): + return (_GENERAL, cp.times(-1, _get_int_valued_expr(arg1))) + + +def _handle_product_node(visitor, node, arg1, arg2): + return (_GENERAL, cp.times(_get_int_valued_expr(arg1), _get_int_valued_expr(arg2))) + + +def _handle_division_node(visitor, node, arg1, arg2): + return ( + _GENERAL, + cp.float_div(_get_int_valued_expr(arg1), _get_int_valued_expr(arg2)), + ) + + +def _handle_pow_node(visitor, node, arg1, arg2): + return (_GENERAL, cp.power(_get_int_valued_expr(arg1), _get_int_valued_expr(arg2))) + + +def _handle_abs_node(visitor, node, arg1): + return (_GENERAL, cp.abs(_get_int_valued_expr(arg1))) + + +def _handle_min_node(visitor, node, *args): + return (_GENERAL, cp.min((_get_int_valued_expr(arg) for arg in args))) + + +def _handle_max_node(visitor, node, *args): + return (_GENERAL, cp.max((_get_int_valued_expr(arg) for arg in args))) + + +## +# Relational expressions +## + + +def _handle_equality_node(visitor, node, arg1, arg2): + return (_GENERAL, cp.equal(_get_int_valued_expr(arg1), _get_int_valued_expr(arg2))) + + +def _handle_inequality_node(visitor, node, arg1, arg2): + return ( + _GENERAL, + cp.less_or_equal(_get_int_valued_expr(arg1), _get_int_valued_expr(arg2)), + ) + + +def _handle_ranged_inequality_node(visitor, node, arg1, arg2, arg3): + return ( + _GENERAL, + cp.range( + _get_int_valued_expr(arg2), + lb=_get_int_valued_expr(arg1), + ub=_get_int_valued_expr(arg3), + ), + ) + + +def _handle_not_equal_node(visitor, node, arg1, arg2): + return (_GENERAL, cp.diff(_get_int_valued_expr(arg1), _get_int_valued_expr(arg2))) + + +## +# Logical expressions +## + + +def _handle_and_node(visitor, node, *args): + return (_GENERAL, cp.logical_and((_get_bool_valued_expr(arg) for arg in args))) + + +def _handle_or_node(visitor, node, *args): + return (_GENERAL, cp.logical_or((_get_bool_valued_expr(arg) for arg in args))) + + +def _handle_xor_node(visitor, node, arg1, arg2): + return ( + _GENERAL, + cp.equal( + cp.count([_get_bool_valued_expr(arg1), _get_bool_valued_expr(arg2)], 1), 1 + ), + ) + + +def _handle_not_node(visitor, node, arg): + return (_GENERAL, cp.logical_not(_get_bool_valued_expr(arg))) + + +def _handle_equivalence_node(visitor, node, arg1, arg2): + return ( + _GENERAL, + cp.equal(_get_bool_valued_expr(arg1), _get_bool_valued_expr(arg2)), + ) + + +def _handle_implication_node(visitor, node, arg1, arg2): + return ( + _GENERAL, + cp.if_then(_get_bool_valued_expr(arg1), _get_bool_valued_expr(arg2)), + ) + + +def _handle_exactly_node(visitor, node, *args): + return ( + _GENERAL, + cp.equal( + cp.count((_get_bool_valued_expr(arg) for arg in args[1:]), 1), + _get_int_valued_expr(args[0]), + ), + ) + + +def _handle_at_most_node(visitor, node, *args): + return ( + _GENERAL, + cp.less_or_equal( + cp.count((_get_bool_valued_expr(arg) for arg in args[1:]), 1), + _get_int_valued_expr(args[0]), + ), + ) + + +def _handle_at_least_node(visitor, node, *args): + return ( + _GENERAL, + cp.greater_or_equal( + cp.count((_get_bool_valued_expr(arg) for arg in args[1:]), 1), + _get_int_valued_expr(args[0]), + ), + ) + + +## CallExpression handllers + + +def _before_call_dispatcher(visitor, node, *args): + if len(args) == 2: + return _handle_inequality_node(visitor, node, args[0], args[1]) + else: # a delay is also specified + lhs = _handle_sum_node(visitor, node, args[0], args[2]) + return _handle_inequality_node(visitor, node, lhs, args[1]) + + +def _after_call_dispatcher(visitor, node, *args): + if len(args) == 2: + return _handle_inequality_node(visitor, node, args[1], args[0]) + else: # delay is also specified + lhs = _handle_sum_node(visitor, node, args[1], args[2]) + return _handle_inequality_node(visitor, node, lhs, args[0]) + + +def _at_call_dispatcher(visitor, node, *args): + if len(args) == 2: + return _handle_equality_node(visitor, node, args[0], args[1]) + else: # a delay is also specified + rhs = _handle_sum_node(visitor, node, args[1], args[2]) + return _handle_equality_node(visitor, node, args[0], rhs) + + +_call_dispatchers = { + _DEFERRED_BEFORE: _before_call_dispatcher, + _DEFERRED_AFTER: _after_call_dispatcher, + _DEFERRED_AT: _at_call_dispatcher, + _IMPLIES: _handle_implication_node, + _LAND: _handle_and_node, + _LOR: _handle_or_node, + _XOR: _handle_xor_node, + _EQUIVALENT_TO: _handle_equivalence_node, +} + + +def _handle_call(visitor, node, *args): + return _call_dispatchers[args[0][0]](visitor, node, args[0][1], *args[1:]) + + +## +# Scheduling +## + +# This will get populated when cp is finally imported +_before_dispatchers = {} +_at_dispatchers = {} +_time_point_dispatchers = {_GENERAL: lambda x: x, _ELEMENT_CONSTRAINT: lambda x: x} + +_non_precedence_types = {_GENERAL, _ELEMENT_CONSTRAINT} + + +def _handle_before_expression_node(visitor, node, time1, time2, delay): + t1 = (_GENERAL, _time_point_dispatchers[time1[0]](time1[1])) + t2 = (_GENERAL, _time_point_dispatchers[time2[0]](time2[1])) + lhs = _handle_sum_node(visitor, None, t1, delay) + if time1[0] in _non_precedence_types or time2[0] in _non_precedence_types: + # we already know we can't use a start_before_start function or its ilk: + # Just build the correct inequality. + return _handle_inequality_node(visitor, None, lhs, t2) + + # If this turns out to be the root, we can use the second return, but we + # also pass the args for the inequality expression in case we use this in a + # boolean-valued context. + return ( + _BEFORE, + _before_dispatchers[time1[0], time2[0]](time1[1], time2[1], delay[1]), + (lhs, t2), + ) + + +def _handle_at_expression_node(visitor, node, time1, time2, delay): + t1 = (_GENERAL, _time_point_dispatchers[time1[0]](time1[1])) + t2 = (_GENERAL, _time_point_dispatchers[time2[0]](time2[1])) + lhs = _handle_sum_node(visitor, None, t1, delay) + if time1[0] in _non_precedence_types or time2[0] in _non_precedence_types: + # we can't use a start_before_start function or its ilk: Just build the + # correct inequality. + return _handle_equality_node(visitor, None, lhs, t2) + + return ( + _AT, + _at_dispatchers[time1[0], time2[0]](time1[1], time2[1], delay[1]), + (lhs, t2), + ) + + +def _handle_always_in_node(visitor, node, cumul_func, lb, ub, start, end): + return ( + _GENERAL, + cp.always_in(cumul_func[1], interval=(start[1], end[1]), min=lb[1], max=ub[1]), + ) + + +class LogicalToDoCplex(StreamBasedExpressionVisitor): + _operator_handles = { + EXPR.GetItemExpression: _handle_getitem, + EXPR.Structural_GetItemExpression: _handle_getitem, + EXPR.Numeric_GetItemExpression: _handle_getitem, + EXPR.Boolean_GetItemExpression: _handle_getitem, + EXPR.GetAttrExpression: _handle_getattr, + EXPR.Structural_GetAttrExpression: _handle_getattr, + EXPR.Numeric_GetAttrExpression: _handle_getattr, + EXPR.Boolean_GetAttrExpression: _handle_getattr, + EXPR.CallExpression: _handle_call, + EXPR.NegationExpression: _handle_negation_node, + EXPR.ProductExpression: _handle_product_node, + EXPR.DivisionExpression: _handle_division_node, + EXPR.PowExpression: _handle_pow_node, + EXPR.AbsExpression: _handle_abs_node, + EXPR.MonomialTermExpression: _handle_monomial_expr, + EXPR.SumExpression: _handle_sum_node, + EXPR.LinearExpression: _handle_sum_node, + EXPR.MinExpression: _handle_min_node, + EXPR.MaxExpression: _handle_max_node, + EXPR.NotExpression: _handle_not_node, + EXPR.EquivalenceExpression: _handle_equivalence_node, + EXPR.ImplicationExpression: _handle_implication_node, + EXPR.AndExpression: _handle_and_node, + EXPR.OrExpression: _handle_or_node, + EXPR.XorExpression: _handle_xor_node, + EXPR.ExactlyExpression: _handle_exactly_node, + EXPR.AtMostExpression: _handle_at_most_node, + EXPR.AtLeastExpression: _handle_at_least_node, + EXPR.EqualityExpression: _handle_equality_node, + EXPR.NotEqualExpression: _handle_not_equal_node, + EXPR.InequalityExpression: _handle_inequality_node, + EXPR.RangedExpression: _handle_ranged_inequality_node, + BeforeExpression: _handle_before_expression_node, + AtExpression: _handle_at_expression_node, + AlwaysIn: _handle_always_in_node, + _GeneralExpressionData: _handle_named_expression_node, + ScalarExpression: _handle_named_expression_node, + } + _var_handles = { + IntervalVarStartTime: _before_interval_var_start_time, + IntervalVarEndTime: _before_interval_var_end_time, + IntervalVarLength: _before_interval_var_length, + IntervalVarPresence: _before_interval_var_presence, + ScalarIntervalVar: _before_interval_var, + IntervalVarData: _before_interval_var, + IndexedIntervalVar: _before_indexed_interval_var, + ScalarVar: _before_var, + _GeneralVarData: _before_var, + IndexedVar: _before_indexed_var, + ScalarBooleanVar: _before_boolean_var, + _GeneralBooleanVarData: _before_boolean_var, + IndexedBooleanVar: _before_indexed_boolean_var, + _GeneralExpressionData: _before_named_expression, + ScalarExpression: _before_named_expression, + IndexedParam: _before_indexed_param, # Because of indirection + ScalarParam: _before_param, + } + + def __init__(self, cpx_model, symbolic_solver_labels=False): + self.cpx = cpx_model + self.symbolic_solver_labels = symbolic_solver_labels + self._process_node = self._process_node_bx + + self.var_map = {} + self._named_expressions = {} + self.pyomo_to_docplex = ComponentMap() + + def initializeWalker(self, expr): + expr, src, src_idx = expr + walk, result = self.beforeChild(None, expr, 0) + if not walk: + return False, result + return True, expr + + def beforeChild(self, node, child, child_idx): + # Return native types + if child.__class__ in EXPR.native_types: + return False, (_GENERAL, child) + + if child.__class__ in step_func_expression_types: + return _step_function_handles[child.__class__](self, child) + + # Convert Vars Logical vars to docplex equivalents + if not child.is_expression_type() or child.is_named_expression_type(): + return self._var_handles[child.__class__](self, child) + + return True, None + + def exitNode(self, node, data): + return self._operator_handles[node.__class__](self, node, *data) + + finalizeResult = None + + +# [ESJ 11/7/22]: TODO: We should revisit this method in the future, as it is not +# very efficient. +def collect_valid_components(model, active=True, sort=None, valid=set(), targets=set()): + assert active in (True, None) + unrecognized = {} + components = {k: [] for k in targets} + for obj in model.component_data_objects(active=True, descend_into=True, sort=sort): + ctype = obj.ctype + if ctype in components: + components[ctype].append(obj) + elif ctype not in valid: + if ctype not in unrecognized: + unrecognized[ctype] = [obj] + else: + unrecognized[ctype].append(obj) + + return components, unrecognized + + +@WriterFactory.register( + 'docplex_model', 'Generate the corresponding docplex model object' +) +class DocplexWriter(object): + CONFIG = ConfigDict('docplex_model_writer') + CONFIG.declare( + 'symbolic_solver_labels', + ConfigValue( + default=False, + domain=bool, + description='Write Pyomo Var and Constraint names to docplex model', + ), + ) + + def __init__(self): + self.config = self.CONFIG() + + def write(self, model, **options): + config = options.pop('config', self.config)(options) + + components, unknown = collect_valid_components( + model, + active=True, + sort=SortComponents.deterministic, + valid={ + Block, + Objective, + Constraint, + Var, + Param, + BooleanVar, + LogicalConstraint, + Suffix, + # FIXME: Non-active components should not report as Active + Set, + RangeSet, + Port, + }, + targets={Objective, Constraint, LogicalConstraint, IntervalVar}, + ) + if unknown: + raise ValueError( + "The model ('%s') contains the following active components " + "that the docplex writer does not know how to process:\n\t%s" + % ( + model.name, + "\n\t".join( + "%s:\n\t\t%s" % (k, "\n\t\t".join(map(attrgetter('name'), v))) + for k, v in unknown.items() + ), + ) + ) + + cpx_model = cp.CpoModel() + visitor = LogicalToDoCplex( + cpx_model, symbolic_solver_labels=config.symbolic_solver_labels + ) + + active_objs = components[Objective] + # [ESJ 09/29/22]: TODO: I think that CP Optimizer can support + # multiple objectives. We should generalize this later, but for + # now I don't much care. + if len(active_objs) > 1: + raise ValueError( + "More than one active objective defined for " + "input model '%s': Cannot write to docplex." % model.name + ) + elif len(active_objs) == 1: + obj = active_objs[0] + obj_expr = visitor.walk_expression((obj.expr, obj, 0)) + if obj.sense is minimize: + cpx_model.add(cp.minimize(obj_expr[1])) + else: + cpx_model.add(cp.maximize(obj_expr[1])) + + # No objective is fine too, this is CP afterall... + + # Write algebraic constraints + for cons in components[Constraint]: + expr = visitor.walk_expression((cons.body, cons, 0)) + if cons.lower is not None and cons.upper is not None: + cpx_model.add(cp.range(expr[1], lb=cons.lb, ub=cons.ub)) + elif cons.lower is not None: + cpx_model.add(cons.lb <= expr[1]) + elif cons.upper is not None: + cpx_model.add(cons.ub >= expr[1]) + + # Write interval vars (these are secretly constraints if they have to be + # scheduled) + for var in components[IntervalVar]: + # we just walk it so it gets added to the model. Note that + # adding it again here would add it for a second time, so that's + # why we don't. + visitor.walk_expression((var, var, 0)) + + # Write logical constraints + for cons in components[LogicalConstraint]: + expr = visitor.walk_expression((cons.expr, cons, 0)) + if expr[0] is _ELEMENT_CONSTRAINT: + # Make the expression into a docplex-approved boolean-valued + # expression, if it turned out that the root of the + # expression was just an element constraint. (This can + # happen for something like a constraint that requires that + # an interval var specified by indirection has to be + # present.) + cpx_model.add(expr[1] == True) + else: + cpx_model.add(expr[1]) + + # That's all, folks. + return cpx_model, visitor.pyomo_to_docplex + + +@SolverFactory.register('cp_optimizer', doc='Direct interface to CPLEX CP Optimizer') +class CPOptimizerSolver(object): + CONFIG = ConfigDict("cp_optimizer_solver") + CONFIG.declare( + 'symbolic_solver_labels', + ConfigValue( + default=False, + domain=bool, + description='Write Pyomo Var and Constraint names to docplex model', + ), + ) + CONFIG.declare( + 'tee', + ConfigValue( + default=False, domain=bool, description="Stream solver output to terminal." + ), + ) + CONFIG.declare( + 'options', ConfigValue(default={}, description="Dictionary of solver options.") + ) + + _unrestricted_license = None + + def __init__(self, **kwds): + self.config = self.CONFIG() + self.config.set_value(kwds) + if docplex_available: + self._solve_status_map = { + cp.SOLVE_STATUS_UNKNOWN: TerminationCondition.unknown, + cp.SOLVE_STATUS_INFEASIBLE: TerminationCondition.infeasible, + cp.SOLVE_STATUS_FEASIBLE: TerminationCondition.feasible, + cp.SOLVE_STATUS_OPTIMAL: TerminationCondition.optimal, + cp.SOLVE_STATUS_JOB_ABORTED: None, # we need the fail status + cp.SOLVE_STATUS_JOB_FAILED: TerminationCondition.solverFailure, + } + self._stop_cause_map = { + # We only need to check this if we get an 'aborted' status, so + # if this says it hasn't been stopped, we're just confused at + # this point. + cp.STOP_CAUSE_NOT_STOPPED: TerminationCondition.unknown, + cp.STOP_CAUSE_LIMIT: TerminationCondition.maxTimeLimit, + # User called exit, maybe in a callback. + cp.STOP_CAUSE_EXIT: TerminationCondition.userInterrupt, + # docplex says "Search aborted externally" + cp.STOP_CAUSE_ABORT: TerminationCondition.userInterrupt, + # This is in their documentation, but not here, for some reason + # cp.STOP_CAUSE_UNKNOWN: TerminationCondition.unknown + } + + @property + def options(self): + return self.config.options + + # Support use as a context manager under current solver API + def __enter__(self): + return self + + def __exit__(self, t, v, traceback): + pass + + def available(self, exception_flag=True): + return Executable('cpoptimizer').available() and docplex_available + + def license_is_valid(self): + if CPOptimizerSolver._unrestricted_license is None: + # Note: 140*log_2(140) == 998.1 fits in CE, + # 141*log_2(141) == 1006.7 does not + x = cp.integer_var_list(141, 1, 141, "X") + m = cp.CpoModel() + m.add(cp.all_diff(x)) + try: + m.solve() + CPOptimizerSolver._unrestricted_license = True + except cp_solver.solver.CpoSolverException: + CPOptimizerSolver._unrestricted_license = False + return CPOptimizerSolver._unrestricted_license + + def solve(self, model, **kwds): + """Solve the model. + + Args: + model (Block): a Pyomo model or block to be solved + + """ + config = self.config() + config.set_value(kwds) + + writer = DocplexWriter() + cpx_model, var_map = writer.write( + model, symbolic_solver_labels=config.symbolic_solver_labels + ) + if not config.tee: + # If the user has also set LogVerbosity, we'll assume they know what + # they're doing. + verbosity = config.options.get('LogVerbosity') + if verbosity is None: + config.options['LogVerbosity'] = 'Quiet' + + msol = cpx_model.solve(**self.options) + + # Transfer the solver status to the pyomo results object + results = SolverResults() + results.solver.name = "CP Optimizer" + results.problem.name = model.name + + info = msol.get_solver_infos() + results.problem.number_of_constraints = info.get_number_of_constraints() + int_vars = info.get_number_of_integer_vars() + interval_vars = info.get_number_of_interval_vars() + results.problem.number_of_integer_vars = int_vars + results.problem.number_of_interval_vars = interval_vars + # This is a useless number, but so is 0, so... + results.problem.number_of_variables = int_vars + interval_vars + + val = msol.get_objective_value() + bound = msol.get_objective_bound() + if cpx_model.is_maximization(): + results.problem.number_of_objectives = 1 + results.problem.sense = maximize + results.problem.lower_bound = val + results.problem.upper_bound = bound + elif cpx_model.is_minimization(): + results.problem.number_of_objectives = 1 + results.problem.sense = minimize + results.problem.lower_bound = bound + results.problem.upper_bound = val + else: + # it's a satisfaction problem + results.problem.number_of_objectives = 0 + results.problem.sense = None + results.problem.lower_bound = None + results.problem.upper_bound = None + + results.solver.solve_time = msol.get_solve_time() + solve_status = msol.get_solve_status() + results.solver.termination_condition = ( + self._solve_status_map[solve_status] + if solve_status is not None + else self._stop_cause_map[msol.get_stop_cause()] + ) + + # Copy the variable values onto the Pyomo model, using the map we stored + # on the writer. + cp_sol = msol.get_solution() + if cp_sol is not None: + for py_var, cp_var in var_map.items(): + sol = cp_sol.get_var_solution(cp_var) + if sol is None: + logger.warning( + "CP optimizer did not return a value " + "for variable '%s'" % py_var.name + ) + else: + sol = sol.get_value() + if py_var.ctype is IntervalVar: + if len(sol) == 0: + # The interval_var is absent + py_var.is_present.set_value(False) + else: + (start, end, size) = sol + py_var.is_present.set_value(True) + py_var.start_time.set_value(start, skip_validation=True) + py_var.end_time.set_value(end, skip_validation=True) + py_var.length.set_value(end - start, skip_validation=True) + elif py_var.ctype in {Var, BooleanVar}: + py_var.set_value(sol, skip_validation=True) + else: + raise DeveloperError( + "Unrecognized Pyomo type in pyomo-to-docplex " + "variable map: %s" % type(py_var) + ) + + return results diff --git a/pyomo/contrib/cp/scheduling_expr/__init__.py b/pyomo/contrib/cp/scheduling_expr/__init__.py new file mode 100644 index 00000000000..8b137891791 --- /dev/null +++ b/pyomo/contrib/cp/scheduling_expr/__init__.py @@ -0,0 +1 @@ + diff --git a/pyomo/contrib/cp/scheduling_expr/precedence_expressions.py b/pyomo/contrib/cp/scheduling_expr/precedence_expressions.py new file mode 100644 index 00000000000..5340583a216 --- /dev/null +++ b/pyomo/contrib/cp/scheduling_expr/precedence_expressions.py @@ -0,0 +1,61 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.expr.logical_expr import BooleanExpression + + +class PrecedenceExpression(BooleanExpression): + def nargs(self): + return 3 + + @property + def delay(self): + return self._args_[2] + + def _to_string_impl(self, values, relation): + delay = int(values[2]) + if delay == 0: + first = values[0] + elif delay > 0: + first = "%s + %s" % (values[0], delay) + else: + first = "%s - %s" % (values[0], abs(delay)) + return "%s %s %s" % (first, relation, values[1]) + + +class BeforeExpression(PrecedenceExpression): + """ + Base class for all precedence expressions. + + args: + args (tuple): child nodes of type IntervalVar. We expect them to be + (time_that_comes_before, time_that_comes_after, delay). + delay: A (possibly negative) integer value representing the number of + time periods delay in the precedence relationship + """ + + def _to_string(self, values, verbose, smap): + return self._to_string_impl(values, "<=") + + +class AtExpression(PrecedenceExpression): + """ + Base class for all precedence expressions. + + args: + args (tuple): child nodes of type IntervalVar. We expect them to be + (first_time, second_time, delay). + delay: A (possibly negative) integer value representing the number of + time periods delay in the precedence relationship + """ + + def _to_string(self, values, verbose, smap): + return self._to_string_impl(values, "==") diff --git a/pyomo/contrib/cp/scheduling_expr/step_function_expressions.py b/pyomo/contrib/cp/scheduling_expr/step_function_expressions.py new file mode 100644 index 00000000000..b4f8fbb4977 --- /dev/null +++ b/pyomo/contrib/cp/scheduling_expr/step_function_expressions.py @@ -0,0 +1,373 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.cp.interval_var import ( + IntervalVar, + IntervalVarData, + IntervalVarStartTime, + IntervalVarEndTime, +) +from pyomo.core.base.component import Component +from pyomo.core.expr.base import ExpressionBase +from pyomo.core.expr.logical_expr import BooleanExpression + + +def _sum_two_units(_self, _other): + return CumulativeFunction([_self, _other]) + + +def _sum_cumul_and_unit(_cumul, _unit): + if _cumul.nargs() == len(_cumul._args_): + # we can just append to the cumul list + _cumul._args_.append(_unit) + return CumulativeFunction(_cumul._args_, nargs=len(_cumul._args_)) + else: + return CumulativeFunction(_cumul.args + [_unit]) + + +def _sum_unit_and_cumul(_unit, _cumul): + # Nothing to be done: we need to make a new one because we can't prepend to + # the list of args. + return CumulativeFunction([_unit] + _cumul.args) + + +def _sum_cumuls(_self, _other): + if _self.nargs() == len(_self._args_): + _self._args_.extend(_other.args) + return CumulativeFunction(_self._args_, nargs=len(_self._args_)) + else: + # we have to clone the list of _args_ + return CumulativeFunction(_self.args + _other.args) + + +def _subtract_two_units(_self, _other): + return CumulativeFunction([_self, NegatedStepFunction((_other,))]) + + +def _subtract_cumul_and_unit(_cumul, _unit): + if _cumul.nargs() == len(_cumul._args_): + # we can just append to the cumul list + _cumul._args_.append(NegatedStepFunction((_unit,))) + return CumulativeFunction(_cumul._args_, nargs=len(_cumul._args_)) + else: + return CumulativeFunction(_cumul.args + [NegatedStepFunction((_unit,))]) + + +def _subtract_unit_and_cumul(_unit, _cumul): + # Nothing to be done: we need to make a new one because we can't prepend to + # the list of args. + return CumulativeFunction( + [_unit] + [NegatedStepFunction((a,)) for a in _cumul.args] + ) + + +def _subtract_cumuls(_self, _other): + if _self.nargs() == len(_self._args_): + _self._args_.extend([NegatedStepFunction((a,)) for a in _other.args]) + return CumulativeFunction(_self._args_, nargs=len(_self._args_)) + else: + # we have to clone the list of _args_ + return CumulativeFunction( + _self.args + [NegatedStepFunction((a,)) for a in _other.args] + ) + + +def _generate_sum_expression(_self, _other): + if isinstance(_self, CumulativeFunction): + if isinstance(_other, CumulativeFunction): + return _sum_cumuls(_self, _other) + elif isinstance(_other, StepFunction): + return _sum_cumul_and_unit(_self, _other) + elif isinstance(_self, StepFunction): + if isinstance(_other, CumulativeFunction): + return _sum_unit_and_cumul(_self, _other) + elif isinstance(_other, StepFunction): + return _sum_two_units(_self, _other) + raise TypeError( + "Cannot add object of class %s to object of " + "class %s" % (_other.__class__, _self.__class__) + ) + + +def _generate_difference_expression(_self, _other): + if isinstance(_self, CumulativeFunction): + if isinstance(_other, CumulativeFunction): + return _subtract_cumuls(_self, _other) + elif isinstance(_other, StepFunction): + return _subtract_cumul_and_unit(_self, _other) + elif isinstance(_self, StepFunction): + if isinstance(_other, CumulativeFunction): + return _subtract_unit_and_cumul(_self, _other) + elif isinstance(_other, StepFunction): + return _subtract_two_units(_self, _other) + raise TypeError( + "Cannot subtract object of class %s from object of " + "class %s" % (_other.__class__, _self.__class__) + ) + + +class StepFunction(ExpressionBase): + """ + The base class for the step function expression system. + """ + + __slots__ = () + + def __add__(self, other): + return _generate_sum_expression(self, other) + + def __radd__(self, other): + # Mathematically this doesn't make a whole lot of sense, but we'll call + # 0 a function and be happy so that sum() works as expected. + if other == 0: + return self + return _generate_sum_expression(other, self) + + def __iadd__(self, other): + return _generate_sum_expression(self, other) + + def __sub__(self, other): + return _generate_difference_expression(self, other) + + def __rsub__(self, other): + return _generate_difference_expression(other, self) + + def __isub__(self, other): + return _generate_difference_expression(self, other) + + def within(self, bounds, times): + return AlwaysIn(cumul_func=self, bounds=bounds, times=times) + + @property + def args(self): + return self._args_[: self.nargs()] + + +class Pulse(StepFunction): + """ + A step function specified by an IntervalVar and an integer height that + has value 0 before the IntervalVar's start_time and after the + IntervalVar's end time and that takes the value specified by the 'height' + during the IntervalVar. (These are often used to model resource + constraints.) + + Args: + interval_var (IntervalVar): the interval variable over which the + pulse function is non-zero + height (int): The value of the pulse function during the time + interval_var is scheduled + """ + + __slots__ = '_args_' + + def __init__(self, args=None, interval_var=None, height=None): + if args: + if any(arg is not None for arg in (interval_var, height)): + raise ValueError( + "Cannot specify both args and any of {interval_var, height}" + ) + # Make sure this is a list because we may add to it if this is + # summed with other StepFunctions + self._args_ = [arg for arg in args] + else: + self._args_ = [interval_var, height] + + interval_var = self._args_[0] + if ( + not isinstance(interval_var, IntervalVarData) + or interval_var.ctype is not IntervalVar + ): + raise TypeError( + "The 'interval_var' argument for a 'Pulse' must " + "be an 'IntervalVar'.\n" + "Received: %s" % type(interval_var) + ) + + @property + def _interval_var(self): + return self._args_[0] + + @property + def _height(self): + return self._args_[1] + + def nargs(self): + return 2 + + def _to_string(self, values, verbose, smap): + return "Pulse(%s, height=%s)" % (values[0], values[1]) + + +class Step(StepFunction): + """ + A step function specified by a time point and an integer height that + has value 0 before the time point and takes the value specified by the + 'height' after the time point. + + Args: + time (IntervalVarTimePoint or int): the time point at which the step + function becomes non-zero + height (int): The value of the step function after the time point + """ + + __slots__ = '_args_' + + def __new__(cls, time, height): + if isinstance(time, int): + return StepAt((time, height)) + elif time.ctype is IntervalVarStartTime: + return StepAtStart((time.get_associated_interval_var(), height)) + elif time.ctype is IntervalVarEndTime: + return StepAtEnd((time.get_associated_interval_var(), height)) + else: + raise TypeError( + "The 'time' argument for a 'Step' must be either " + "an 'IntervalVarTimePoint' (for example, the " + "'start_time' or 'end_time' of an IntervalVar) or " + "an integer time point in the time horizon.\n" + "Received: %s" % type(time) + ) + + +class StepBase(StepFunction): + __slots__ = '_args_' + + def __init__(self, args): + # Make sure this is a list because we may add to it if this is summed + # with otther StepFunctions + self._args_ = [arg for arg in args] + + @property + def _time(self): + return self._args_[0] + + @property + def _height(self): + return self._args_[1] + + def nargs(self): + return 2 + + def _to_string(self, values, verbose, smap): + return "Step(%s, height=%s)" % (values[0], values[1]) + + +class StepAt(StepBase): + __slots__ = () + + +class StepAtStart(StepBase): + __slots__ = () + + def _to_string(self, values, verbose, smap): + return "Step(%s, height=%s)" % (self._time.start_time, values[1]) + + +class StepAtEnd(StepBase): + __slots__ = () + + def _to_string(self, values, verbose, smap): + return "Step(%s, height=%s)" % (self._time.end_time, values[1]) + + +class CumulativeFunction(StepFunction): + """ + A sum of elementary step functions (Pulse and Step), defining a step + function over time. (Often used to model resource constraints.) + + Args: + args (list or tuple): Child elementary step functions of this node + """ + + __slots__ = ('_args_', '_nargs') + + def __init__(self, args, nargs=None): + # We make sure args are a list because we might add to them later, if + # this is summed with another cumulative function. + self._args_ = [arg for arg in args] + if nargs is None: + self._nargs = len(args) + else: + self._nargs = nargs + + def nargs(self): + return self._nargs + + def _to_string(self, values, verbose, smap): + s = "" + for i, arg in enumerate(self.args): + if isinstance(arg, NegatedStepFunction): + s += str(arg) + ' ' + else: + s += "+ %s "[2 * (i == 0) :] % str(arg) + return s[:-1] + + +class NegatedStepFunction(StepFunction): + """ + The negated form of an elementary step function: That is, it represents + subtracting the elementary function's (nonnegative) height rather than + adding it. + + Args: + arg (Step or Pulse): Child elementary step function of this node + """ + + __slots__ = '_args_' + + def __init__(self, args): + self._args_ = args + + def nargs(self): + return 1 + + def _to_string(self, values, verbose, smap): + return "- %s" % values[0] + + +class AlwaysIn(BooleanExpression): + """ + An expression representing the constraint that a cumulative function is + required to take values within a tuple of bounds over a specified time + interval. (Often used to enforce limits on resource availability.) + + Args: + cumul_func (CumulativeFunction): Step function being constrained + bounds (tuple of two integers): Lower and upper bounds to enforce on + the cumulative function + times (tuple of two integers): The time interval (start, end) over + which to enforce the bounds on the values of the cumulative + function. + """ + + __slots__ = () + + def __init__(self, args=None, cumul_func=None, bounds=None, times=None): + if args: + if any(arg is not None for arg in {cumul_func, bounds, times}): + raise ValueError( + "Cannot specify both args and any of {cumul_func, bounds, times}" + ) + self._args_ = args + else: + self._args_ = (cumul_func, bounds[0], bounds[1], times[0], times[1]) + + def nargs(self): + return 5 + + def _to_string(self, values, verbose, smap): + return "(%s).within(bounds=(%s, %s), times=(%s, %s))" % ( + values[0], + values[1], + values[2], + values[3], + values[4], + ) diff --git a/examples/pyomobook/abstract-ch/pyomo.bad1.txt b/pyomo/contrib/cp/tests/__init__.py similarity index 100% rename from examples/pyomobook/abstract-ch/pyomo.bad1.txt rename to pyomo/contrib/cp/tests/__init__.py diff --git a/pyomo/contrib/cp/tests/test_docplex_walker.py b/pyomo/contrib/cp/tests/test_docplex_walker.py new file mode 100644 index 00000000000..97bc538c827 --- /dev/null +++ b/pyomo/contrib/cp/tests/test_docplex_walker.py @@ -0,0 +1,1493 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest + +from pyomo.contrib.cp import IntervalVar +from pyomo.contrib.cp.scheduling_expr.step_function_expressions import ( + AlwaysIn, + Step, + Pulse, +) +from pyomo.contrib.cp.repn.docplex_writer import docplex_available, LogicalToDoCplex + +from pyomo.core.base.range import NumericRange +from pyomo.core.expr.numeric_expr import MinExpression, MaxExpression +from pyomo.core.expr.logical_expr import equivalent, exactly, atleast, atmost +from pyomo.core.expr.relational_expr import NotEqualExpression + +from pyomo.environ import ( + ConcreteModel, + RangeSet, + Var, + BooleanVar, + Constraint, + LogicalConstraint, + PositiveIntegers, + Binary, + NonNegativeIntegers, + NegativeIntegers, + NonPositiveIntegers, + Integers, + inequality, + Expression, + Reals, + Set, + Param, +) + +try: + import docplex.cp.model as cp + + docplex_available = True +except: + docplex_available = False + + +class CommonTest(unittest.TestCase): + def get_visitor(self): + docplex_model = cp.CpoModel() + return LogicalToDoCplex(docplex_model, symbolic_solver_labels=True) + + def get_model(self): + m = ConcreteModel() + m.I = RangeSet(10) + m.a = Var(m.I) + m.x = Var(within=PositiveIntegers, bounds=(6, 8)) + m.i = IntervalVar(optional=True) + m.i2 = IntervalVar([1, 2], optional=False) + + m.b = BooleanVar() + m.b2 = BooleanVar(['a', 'b', 'c']) + + return m + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestCPExpressionWalker_AlgebraicExpressions(CommonTest): + def test_write_addition(self): + m = self.get_model() + m.c = Constraint(expr=m.x + m.i.start_time + m.i2[2].length <= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i2[2].length), visitor.var_map) + + cpx_x = visitor.var_map[id(m.x)] + cpx_i = visitor.var_map[id(m.i)] + cpx_i2 = visitor.var_map[id(m.i2[2])] + self.assertTrue( + expr[1].equals(cpx_x + cp.start_of(cpx_i) + cp.length_of(cpx_i2)) + ) + + def test_write_subtraction(self): + m = self.get_model() + m.a.domain = Binary + m.c = Constraint(expr=m.x - m.a[1] <= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + self.assertIn(id(m.a[1]), visitor.var_map) + + x = visitor.var_map[id(m.x)] + a1 = visitor.var_map[id(m.a[1])] + + self.assertTrue(expr[1].equals(x + (-1 * a1))) + + def test_write_product(self): + m = self.get_model() + m.a.domain = PositiveIntegers + m.c = Constraint(expr=m.x * (m.a[1] + 1) <= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + self.assertIn(id(m.a[1]), visitor.var_map) + + x = visitor.var_map[id(m.x)] + a1 = visitor.var_map[id(m.a[1])] + + self.assertTrue(expr[1].equals(x * (a1 + 1))) + + def test_write_floating_point_division(self): + m = self.get_model() + m.a.domain = NonNegativeIntegers + m.c = Constraint(expr=m.x / (m.a[1] + 1) <= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + self.assertIn(id(m.a[1]), visitor.var_map) + + x = visitor.var_map[id(m.x)] + a1 = visitor.var_map[id(m.a[1])] + + self.assertTrue(expr[1].equals(x / (a1 + 1))) + + def test_write_power_expression(self): + m = self.get_model() + m.c = Constraint(expr=m.x**2 <= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + cpx_x = visitor.var_map[id(m.x)] + # .equals checks the equality of two expressions in docplex. + self.assertTrue(expr[1].equals(cpx_x**2)) + + def test_write_absolute_value_expression(self): + m = self.get_model() + m.a.domain = NegativeIntegers + m.c = Constraint(expr=abs(m.a[1]) + 1 <= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.a[1]), visitor.var_map) + + a1 = visitor.var_map[id(m.a[1])] + + self.assertTrue(expr[1].equals(cp.abs(a1) + 1)) + + def test_write_min_expression(self): + m = self.get_model() + m.a.domain = NonPositiveIntegers + m.c = Constraint(expr=MinExpression([m.a[i] for i in m.I]) >= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + a = {} + for i in m.I: + self.assertIn(id(m.a[i]), visitor.var_map) + a[i] = visitor.var_map[id(m.a[i])] + + self.assertTrue(expr[1].equals(cp.min(a[i] for i in m.I))) + + def test_write_max_expression(self): + m = self.get_model() + m.a.domain = NonPositiveIntegers + m.c = Constraint(expr=MaxExpression([m.a[i] for i in m.I]) >= 3) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + a = {} + for i in m.I: + self.assertIn(id(m.a[i]), visitor.var_map) + a[i] = visitor.var_map[id(m.a[i])] + + self.assertTrue(expr[1].equals(cp.max(a[i] for i in m.I))) + + def test_expression_with_mutable_param(self): + m = ConcreteModel() + m.x = Var(domain=Integers, bounds=(2, 3)) + m.p = Param(initialize=4, mutable=True) + e = m.p * m.x + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + + self.assertTrue(expr[1].equals(4 * x)) + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestCPExpressionWalker_LogicalExpressions(CommonTest): + def test_write_logical_and(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.b.land(m.b2['b'])) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.b2['b']), visitor.var_map) + + b = visitor.var_map[id(m.b)] + b2b = visitor.var_map[id(m.b2['b'])] + + self.assertTrue(expr[1].equals(cp.logical_and(b, b2b))) + + def test_write_logical_or(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.b.lor(m.i.is_present)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + b = visitor.var_map[id(m.b)] + i = visitor.var_map[id(m.i)] + + self.assertTrue(expr[1].equals(cp.logical_or(b, cp.presence_of(i)))) + + def test_write_xor(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.b.xor(m.i2[2].start_time >= 5)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + b = visitor.var_map[id(m.b)] + i22 = visitor.var_map[id(m.i2[2])] + + # [ESJ 9/22/22]: This isn't the greatest test because there's no direct + # translation so how we choose to represent this could change. + self.assertTrue( + expr[1].equals(cp.count([b, cp.less_or_equal(5, cp.start_of(i22))], 1) == 1) + ) + + def test_write_logical_not(self): + m = self.get_model() + m.c = LogicalConstraint(expr=~m.b2['a']) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b2['a']), visitor.var_map) + b2a = visitor.var_map[id(m.b2['a'])] + + self.assertTrue(expr[1].equals(cp.logical_not(b2a))) + + def test_equivalence(self): + m = self.get_model() + m.c = LogicalConstraint(expr=equivalent(~m.b2['a'], m.b)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.b2['a']), visitor.var_map) + b = visitor.var_map[id(m.b)] + b2a = visitor.var_map[id(m.b2['a'])] + + self.assertTrue(expr[1].equals(cp.equal(cp.logical_not(b2a), b))) + + def test_implication(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.b2['a'].implies(~m.b)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.b2['a']), visitor.var_map) + b = visitor.var_map[id(m.b)] + b2a = visitor.var_map[id(m.b2['a'])] + + self.assertTrue(expr[1].equals(cp.if_then(b2a, cp.logical_not(b)))) + + def test_equality(self): + m = self.get_model() + m.a.domain = Integers + m.c = LogicalConstraint(expr=m.b.implies(m.a[3] == 4)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.a[3]), visitor.var_map) + b = visitor.var_map[id(m.b)] + a3 = visitor.var_map[id(m.a[3])] + + self.assertTrue(expr[1].equals(cp.if_then(b, cp.equal(a3, 4)))) + + def test_inequality(self): + m = self.get_model() + m.a.domain = Integers + m.c = LogicalConstraint(expr=m.b.implies(m.a[3] >= m.a[4])) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.a[3]), visitor.var_map) + self.assertIn(id(m.a[4]), visitor.var_map) + b = visitor.var_map[id(m.b)] + a3 = visitor.var_map[id(m.a[3])] + a4 = visitor.var_map[id(m.a[4])] + + self.assertTrue(expr[1].equals(cp.if_then(b, cp.less_or_equal(a4, a3)))) + + def test_ranged_inequality(self): + m = self.get_model() + m.a.domain = Integers + m.c = Constraint(expr=inequality(3, m.a[2], 5)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.a[2]), visitor.var_map) + a2 = visitor.var_map[id(m.a[2])] + + self.assertTrue(expr[1].equals(cp.range(a2, 3, 5))) + + def test_not_equal(self): + m = self.get_model() + m.a.domain = Integers + m.c = LogicalConstraint(expr=m.b.implies(NotEqualExpression([m.a[3], m.a[4]]))) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.a[3]), visitor.var_map) + self.assertIn(id(m.a[4]), visitor.var_map) + b = visitor.var_map[id(m.b)] + a3 = visitor.var_map[id(m.a[3])] + a4 = visitor.var_map[id(m.a[4])] + + self.assertTrue(expr[1].equals(cp.if_then(b, a3 != a4))) + + def test_exactly_expression(self): + m = self.get_model() + m.a.domain = Integers + m.c = LogicalConstraint(expr=exactly(3, [m.a[i] == 4 for i in m.I])) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + a = {} + for i in m.I: + self.assertIn(id(m.a[i]), visitor.var_map) + a[i] = visitor.var_map[id(m.a[i])] + + self.assertTrue( + expr[1].equals(cp.equal(cp.count([a[i] == 4 for i in m.I], 1), 3)) + ) + + def test_atleast_expression(self): + m = self.get_model() + m.a.domain = Integers + m.c = LogicalConstraint(expr=atleast(3, [m.a[i] == 4 for i in m.I])) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + a = {} + for i in m.I: + self.assertIn(id(m.a[i]), visitor.var_map) + a[i] = visitor.var_map[id(m.a[i])] + + self.assertTrue( + expr[1].equals( + cp.greater_or_equal(cp.count([a[i] == 4 for i in m.I], 1), 3) + ) + ) + + def test_atmost_expression(self): + m = self.get_model() + m.a.domain = Integers + m.c = LogicalConstraint(expr=atmost(3, [m.a[i] == 4 for i in m.I])) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + a = {} + for i in m.I: + self.assertIn(id(m.a[i]), visitor.var_map) + a[i] = visitor.var_map[id(m.a[i])] + + self.assertTrue( + expr[1].equals(cp.less_or_equal(cp.count([a[i] == 4 for i in m.I], 1), 3)) + ) + + def test_interval_var_is_present(self): + m = self.get_model() + m.a.domain = Integers + m.c = LogicalConstraint(expr=m.i.is_present.implies(m.a[1] == 5)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.a[1]), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + a1 = visitor.var_map[id(m.a[1])] + i = visitor.var_map[id(m.i)] + + self.assertTrue(expr[1].equals(cp.if_then(cp.presence_of(i), a1 == 5))) + + def test_interval_var_is_present_indirection(self): + m = self.get_model() + m.a.domain = Integers + m.y = Var(domain=Integers, bounds=[1, 2]) + + m.c = LogicalConstraint(expr=m.i2[m.y].is_present.implies(m.a[1] >= 7)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.a[1]), visitor.var_map) + a1 = visitor.var_map[id(m.a[1])] + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + + self.assertTrue( + expr[1].equals( + cp.if_then( + cp.element( + [cp.presence_of(i21), cp.presence_of(i22)], 0 + 1 * (y - 1) // 1 + ) + == True, + cp.less_or_equal(7, a1), + ) + ) + ) + + def test_is_present_indirection_and_length(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=[1, 2]) + + m.c = LogicalConstraint(expr=m.i2[m.y].is_present.land(m.i2[m.y].length >= 7)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + + self.assertTrue( + expr[1].equals( + cp.logical_and( + cp.element( + [cp.presence_of(i21), cp.presence_of(i22)], 0 + 1 * (y - 1) // 1 + ) + == True, + cp.less_or_equal( + 7, + cp.element( + [cp.length_of(i21), cp.length_of(i22)], 0 + 1 * (y - 1) // 1 + ), + ), + ) + ) + ) + + def test_handle_getattr_lor(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=(1, 2)) + + e = m.i2[m.y].is_present.lor(~m.b) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.b), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + b = visitor.var_map[id(m.b)] + + self.assertTrue( + expr[1].equals( + cp.logical_or( + cp.element( + [cp.presence_of(i21), cp.presence_of(i22)], 0 + 1 * (y - 1) // 1 + ) + == True, + cp.logical_not(b), + ) + ) + ) + + def test_handle_getattr_xor(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=(1, 2)) + + e = m.i2[m.y].is_present.xor(m.b) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.b), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + b = visitor.var_map[id(m.b)] + + self.assertTrue( + expr[1].equals( + cp.equal( + cp.count( + [ + cp.element( + [cp.presence_of(i21), cp.presence_of(i22)], + 0 + 1 * (y - 1) // 1, + ) + == True, + b, + ], + 1, + ), + 1, + ) + ) + ) + + def test_handle_getattr_equivalent_to(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=(1, 2)) + + e = m.i2[m.y].is_present.equivalent_to(~m.b) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.b), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + b = visitor.var_map[id(m.b)] + + self.assertTrue( + expr[1].equals( + cp.equal( + cp.element( + [cp.presence_of(i21), cp.presence_of(i22)], 0 + 1 * (y - 1) // 1 + ) + == True, + cp.logical_not(b), + ) + ) + ) + + def test_logical_or_on_indirection(self): + m = ConcreteModel() + m.b = BooleanVar([2, 3, 4, 5]) + m.x = Var(domain=Integers, bounds=(3, 5)) + + e = m.b[m.x].lor(m.x == 5) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.x), visitor.var_map) + self.assertIn(id(m.b[3]), visitor.var_map) + self.assertIn(id(m.b[4]), visitor.var_map) + self.assertIn(id(m.b[5]), visitor.var_map) + + x = visitor.var_map[id(m.x)] + b3 = visitor.var_map[id(m.b[3])] + b4 = visitor.var_map[id(m.b[4])] + b5 = visitor.var_map[id(m.b[5])] + + self.assertTrue( + expr[1].equals( + cp.logical_or( + cp.element([b3, b4, b5], 0 + 1 * (x - 3) // 1) == True, + cp.equal(x, 5), + ) + ) + ) + + def test_logical_xor_on_indirection(self): + m = ConcreteModel() + m.b = BooleanVar([2, 3, 4, 5]) + m.b[4].fix(False) + m.x = Var(domain=Integers, bounds=(3, 5)) + + e = m.b[m.x].xor(m.x == 5) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.x), visitor.var_map) + self.assertIn(id(m.b[3]), visitor.var_map) + self.assertIn(id(m.b[5]), visitor.var_map) + + x = visitor.var_map[id(m.x)] + b3 = visitor.var_map[id(m.b[3])] + b5 = visitor.var_map[id(m.b[5])] + + self.assertTrue( + expr[1].equals( + cp.equal( + cp.count( + [ + cp.element([b3, False, b5], 0 + 1 * (x - 3) // 1) == True, + cp.equal(x, 5), + ], + 1, + ), + 1, + ) + ) + ) + + def test_using_precedence_expr_as_boolean_expr(self): + m = self.get_model() + e = m.b.implies(m.i2[2].start_time.before(m.i2[1].start_time)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + + b = visitor.var_map[id(m.b)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + + self.assertTrue( + expr[1].equals(cp.if_then(b, cp.start_of(i22) + 0 <= cp.start_of(i21))) + ) + + def test_using_precedence_expr_as_boolean_expr_positive_delay(self): + m = self.get_model() + e = m.b.implies(m.i2[2].start_time.before(m.i2[1].start_time, delay=4)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + + b = visitor.var_map[id(m.b)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + + self.assertTrue( + expr[1].equals(cp.if_then(b, cp.start_of(i22) + 4 <= cp.start_of(i21))) + ) + + def test_using_precedence_expr_as_boolean_expr_negative_delay(self): + m = self.get_model() + e = m.b.implies(m.i2[2].start_time.at(m.i2[1].start_time, delay=-3)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.b), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + + b = visitor.var_map[id(m.b)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + + self.assertTrue( + expr[1].equals(cp.if_then(b, cp.start_of(i22) + (-3) == cp.start_of(i21))) + ) + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestCPExpressionWalker_IntervalVars(CommonTest): + def test_interval_var_fixed_presences_correct(self): + m = self.get_model() + + m.silly = LogicalConstraint(expr=m.i.is_present) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.silly.expr, m.silly, 0)) + self.assertIn(id(m.i), visitor.var_map) + i = visitor.var_map[id(m.i)] + # Check that docplex knows it's optional + self.assertTrue(i.is_optional()) + + # Now fix it to absent + m.i.is_present.fix(False) + m.c = LogicalConstraint(expr=m.i.is_present.lor(m.i2[1].start_time == 2)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i2[1]), visitor.var_map) + i21 = visitor.var_map[id(m.i2[1])] + self.assertIn(id(m.i), visitor.var_map) + i = visitor.var_map[id(m.i)] + + # Check that we passed on the presence info to docplex + self.assertTrue(i.is_absent()) + self.assertTrue(i21.is_present()) + # Not testing the expression here because sometime we might optimize out + # the presence_of call for fixed absent vars, but for now I haven't. + + def test_interval_var_fixed_length(self): + m = ConcreteModel() + m.i = IntervalVar(start=(2, 7), end=(6, 11), optional=True) + m.i.length.fix(4) + m.silly = LogicalConstraint(expr=m.i.is_present) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.silly.expr, m.silly, 0)) + + self.assertIn(id(m.i), visitor.var_map) + i = visitor.var_map[id(m.i)] + + self.assertTrue(i.is_optional()) + self.assertEqual(i.get_length(), (4, 4)) + self.assertEqual(i.get_start(), (2, 7)) + self.assertEqual(i.get_end(), (6, 11)) + + def test_interval_var_fixed_start_and_end(self): + m = ConcreteModel() + m.i = IntervalVar(start=(3, 7), end=(6, 10)) + m.i.start_time.fix(3) + m.i.end_time.fix(6) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.i, m.i, 0)) + + self.assertIn(id(m.i), visitor.var_map) + i = visitor.var_map[id(m.i)] + + self.assertFalse(i.is_optional()) + self.assertEqual(i.get_start(), (3, 3)) + self.assertEqual(i.get_end(), (6, 6)) + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestCPExpressionWalker_PrecedenceExpressions(CommonTest): + def test_start_before_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.start_time.before(m.i2[1].start_time)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.start_before_start(i, i21, 0))) + + def test_start_before_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.start_time.before(m.i2[1].end_time, delay=3)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.start_before_end(i, i21, 3))) + + def test_end_before_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.end_time.before(m.i2[1].start_time, delay=-2)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.end_before_start(i, i21, -2))) + + def test_end_before_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.end_time.before(m.i2[1].end_time, delay=6)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.end_before_end(i, i21, 6))) + + def test_start_at_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.start_time.at(m.i2[1].start_time)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.start_at_start(i, i21, 0))) + + def test_start_at_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.start_time.at(m.i2[1].end_time, delay=3)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.start_at_end(i, i21, 3))) + + def test_end_at_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.end_time.at(m.i2[1].start_time, delay=-2)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.end_at_start(i, i21, -2))) + + def test_end_at_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.end_time.at(m.i2[1].end_time, delay=6)) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + + self.assertTrue(expr[1].equals(cp.end_at_end(i, i21, 6))) + + ## + # Tests for precedence constraints with indirection + ## + + def test_indirection_before_constraint(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint(expr=m.i2[m.y].start_time.before(m.i.end_time, delay=3)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i = visitor.var_map[id(m.i)] + + self.assertTrue( + expr[1].equals( + cp.element([cp.start_of(i21), cp.start_of(i22)], 0 + 1 * (y - 1) // 1) + + 3 + <= cp.end_of(i) + ) + ) + + def test_indirection_after_constraint(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint(expr=m.i2[m.y].start_time.after(m.i.end_time, delay=-2)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i = visitor.var_map[id(m.i)] + + self.assertTrue( + expr[1].equals( + cp.end_of(i) + (-2) + <= cp.element( + [cp.start_of(i21), cp.start_of(i22)], 0 + 1 * (y - 1) // 1 + ) + ) + ) + + def test_indirection_at_constraint(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint(expr=m.i2[m.y].start_time.at(m.i.end_time, delay=4)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i = visitor.var_map[id(m.i)] + + self.assertTrue( + expr[1].equals( + cp.element([cp.start_of(i21), cp.start_of(i22)], 0 + 1 * (y - 1) // 1) + == cp.end_of(i) + 4 + ) + ) + + def test_before_indirection_constraint(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint( + expr=m.i.start_time.before(m.i2[m.y].end_time, delay=-4) + ) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i = visitor.var_map[id(m.i)] + + self.assertTrue( + expr[1].equals( + cp.start_of(i) + (-4) + <= cp.element([cp.end_of(i21), cp.end_of(i22)], 0 + 1 * (y - 1) // 1) + ) + ) + + def test_after_indirection_constraint(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint(expr=m.i.start_time.after(m.i2[m.y].end_time)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i = visitor.var_map[id(m.i)] + + self.assertTrue( + expr[1].equals( + cp.element([cp.end_of(i21), cp.end_of(i22)], 0 + 1 * (y - 1) // 1) + 0 + <= cp.start_of(i) + ) + ) + + def test_at_indirection_constraint(self): + m = self.get_model() + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint(expr=m.i.start_time.at(m.i2[m.y].end_time, delay=-6)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i), visitor.var_map) + + y = visitor.var_map[id(m.y)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i = visitor.var_map[id(m.i)] + + self.assertTrue( + expr[1].equals( + cp.start_of(i) + (-6) + == cp.element([cp.end_of(i21), cp.end_of(i22)], 0 + 1 * (y - 1) // 1) + ) + ) + + def test_double_indirection_before_constraint(self): + m = self.get_model() + # add interval var x can index + m.i3 = IntervalVar([(1, 3), (1, 4), (1, 5)], length=4, optional=True) + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint( + expr=m.i3[1, m.x - 3].start_time.before(m.i2[m.y].end_time) + ) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i3[1, 3]), visitor.var_map) + self.assertIn(id(m.i3[1, 4]), visitor.var_map) + self.assertIn(id(m.i3[1, 5]), visitor.var_map) + + y = visitor.var_map[id(m.y)] + x = visitor.var_map[id(m.x)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i33 = visitor.var_map[id(m.i3[1, 3])] + i34 = visitor.var_map[id(m.i3[1, 4])] + i35 = visitor.var_map[id(m.i3[1, 5])] + + self.assertTrue( + expr[1].equals( + cp.element( + [cp.start_of(i33), cp.start_of(i34), cp.start_of(i35)], + 0 + 1 * (x + (-3) - 3) // 1, + ) + <= cp.element([cp.end_of(i21), cp.end_of(i22)], 0 + 1 * (y - 1) // 1) + ) + ) + + def test_double_indirection_after_constraint(self): + m = self.get_model() + # add interval var x can index + m.i3 = IntervalVar([(1, 3), (1, 4), (1, 5)], length=4, optional=True) + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint( + expr=m.i3[1, m.x - 3].start_time.after(m.i2[m.y].end_time) + ) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i3[1, 3]), visitor.var_map) + self.assertIn(id(m.i3[1, 4]), visitor.var_map) + self.assertIn(id(m.i3[1, 5]), visitor.var_map) + + y = visitor.var_map[id(m.y)] + x = visitor.var_map[id(m.x)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i33 = visitor.var_map[id(m.i3[1, 3])] + i34 = visitor.var_map[id(m.i3[1, 4])] + i35 = visitor.var_map[id(m.i3[1, 5])] + + self.assertTrue( + expr[1].equals( + cp.element([cp.end_of(i21), cp.end_of(i22)], 0 + 1 * (y - 1) // 1) + <= cp.element( + [cp.start_of(i33), cp.start_of(i34), cp.start_of(i35)], + 0 + 1 * (x + (-3) - 3) // 1, + ) + ) + ) + + def test_double_indirection_at_constraint(self): + m = self.get_model() + # add interval var x can index + m.i3 = IntervalVar([(1, 3), (1, 4), (1, 5)], length=4, optional=True) + m.y = Var(domain=Integers, bounds=[1, 2]) + m.c = LogicalConstraint(expr=m.i3[1, m.x - 3].start_time.at(m.i2[m.y].end_time)) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.y), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + self.assertIn(id(m.i3[1, 3]), visitor.var_map) + self.assertIn(id(m.i3[1, 4]), visitor.var_map) + self.assertIn(id(m.i3[1, 5]), visitor.var_map) + + y = visitor.var_map[id(m.y)] + x = visitor.var_map[id(m.x)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + i33 = visitor.var_map[id(m.i3[1, 3])] + i34 = visitor.var_map[id(m.i3[1, 4])] + i35 = visitor.var_map[id(m.i3[1, 5])] + + self.assertTrue( + expr[1].equals( + cp.element( + [cp.start_of(i33), cp.start_of(i34), cp.start_of(i35)], + 0 + 1 * (x + (-3) - 3) // 1, + ) + == cp.element([cp.end_of(i21), cp.end_of(i22)], 0 + 1 * (y - 1) // 1) + ) + ) + + def test_indirection_nonconstant_step_size(self): + m = ConcreteModel() + + def param_rule(m, i): + return i + 1 + + m.p = Param([1, 3, 4], initialize=param_rule) + m.x = Var(within={1, 3, 4}) + e = m.p[m.x] + + visitor = self.get_visitor() + with self.assertRaisesRegex( + ValueError, + r"Variable indirection 'p\[x\]' is over a discrete domain " + "without a constant step size. This is not supported.", + ): + expr = visitor.walk_expression((e, e, 0)) + + def test_indirection_with_param(self): + m = ConcreteModel() + + def param_rule(m, i): + return i + 1 + + m.p = Param([1, 3, 5], initialize=param_rule) + m.x = Var(within={1, 3, 5}) + m.a = Var(domain=Integers, bounds=(0, 100)) + + e = m.p[m.x] / m.a + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + self.assertIn(id(m.x), visitor.var_map) + self.assertIn(id(m.a), visitor.var_map) + x = visitor.var_map[id(m.x)] + a = visitor.var_map[id(m.a)] + + self.assertTrue(expr[1].equals(cp.element([2, 4, 6], 0 + 1 * (x - 1) // 2) / a)) + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestCPExpressionWalker_CumulFuncExpressions(CommonTest): + def test_always_in(self): + m = self.get_model() + f = ( + Pulse((m.i, 3)) + + Step(m.i2[1].start_time, height=2) + - Step(m.i2[2].end_time, height=-1) + + Step(3, height=4) + ) + m.c = LogicalConstraint(expr=f.within((0, 3), (0, 10))) + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.i), visitor.var_map) + self.assertIn(id(m.i2[1]), visitor.var_map) + self.assertIn(id(m.i2[2]), visitor.var_map) + + i = visitor.var_map[id(m.i)] + i21 = visitor.var_map[id(m.i2[1])] + i22 = visitor.var_map[id(m.i2[2])] + + self.assertTrue( + expr[1].equals( + cp.always_in( + cp.pulse(i, 3) + + cp.step_at_start(i21, 2) + - cp.step_at_end(i22, -1) + + cp.step_at(3, 4), + interval=(0, 10), + min=0, + max=3, + ) + ) + ) + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestCPExpressionWalker_NamedExpressions(CommonTest): + def test_named_expression(self): + m = self.get_model() + m.e = Expression(expr=m.x**2 + 7) + m.c = Constraint(expr=m.e <= 32) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + + self.assertTrue(expr[1].equals(x**2 + 7)) + + def test_repeated_named_expression(self): + m = self.get_model() + m.e = Expression(expr=m.x**2 + 7) + m.c = Constraint(expr=m.e - 8 * m.e <= 32) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + + self.assertTrue(expr[1].equals(x**2 + 7 + (-1) * (8 * (x**2 + 7)))) + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestCPExpressionWalker_Vars(CommonTest): + def test_complain_about_non_integer_vars(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.i.is_present.implies(m.a[1] == 5)) + + visitor = self.get_visitor() + with self.assertRaisesRegex( + ValueError, + "The LogicalToDoCplex writer can only support integer- or " + r"Boolean-valued variables. Cannot write Var 'a\[1\]' with " + "domain 'Reals'", + ): + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + def test_fixed_integer_var(self): + m = self.get_model() + m.a.domain = Integers + m.a[1].fix(3) + m.c = Constraint(expr=m.a[1] + m.a[2] >= 4) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.a[2]), visitor.var_map) + a2 = visitor.var_map[id(m.a[2])] + + self.assertTrue(expr[1].equals(3 + a2)) + + def test_fixed_boolean_var(self): + m = self.get_model() + m.b.fix(False) + m.b2['a'].fix(True) + m.c = LogicalConstraint(expr=m.b.lor(m.b2['a'].land(m.b2['b']))) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.expr, m.c, 0)) + + self.assertIn(id(m.b2['b']), visitor.var_map) + b2b = visitor.var_map[id(m.b2['b'])] + + self.assertTrue(expr[1].equals(cp.logical_or(False, cp.logical_and(True, b2b)))) + + def test_indirection_single_index(self): + m = self.get_model() + m.a.domain = Integers + m.c = Constraint(expr=m.a[m.x] >= 3.5) + + visitor = self.get_visitor() + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + a = [] + # only need indices 6, 7, and 8 from a, since that's what x is capable + # of selecting. + for idx in [6, 7, 8]: + v = m.a[idx] + self.assertIn(id(v), visitor.var_map) + a.append(visitor.var_map[id(v)]) + # since x is between 6 and 8, we subtract 6 from it for it to be the + # right index + self.assertTrue(expr[1].equals(cp.element(a, 0 + 1 * (x - 6) // 1))) + + def test_indirection_multi_index_second_constant(self): + m = self.get_model() + m.z = Var(m.I, m.I, domain=Integers) + + e = m.z[m.x, 3] + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + z = {} + for i in [6, 7, 8]: + self.assertIn(id(m.z[i, 3]), visitor.var_map) + z[i, 3] = visitor.var_map[id(m.z[i, 3])] + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + + self.assertTrue( + expr[1].equals( + cp.element([z[i, 3] for i in [6, 7, 8]], 0 + 1 * (x - 6) // 1) + ) + ) + + def test_indirection_multi_index_first_constant(self): + m = self.get_model() + m.z = Var(m.I, m.I, domain=Integers) + + e = m.z[3, m.x] + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + z = {} + for i in [6, 7, 8]: + self.assertIn(id(m.z[3, i]), visitor.var_map) + z[3, i] = visitor.var_map[id(m.z[3, i])] + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + + self.assertTrue( + expr[1].equals( + cp.element([z[3, i] for i in [6, 7, 8]], 0 + 1 * (x - 6) // 1) + ) + ) + + def test_indirection_multi_index_neither_constant_same_var(self): + m = self.get_model() + m.z = Var(m.I, m.I, domain=Integers) + + e = m.z[m.x, m.x] + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + z = {} + for i in [6, 7, 8]: + for j in [6, 7, 8]: + self.assertIn(id(m.z[i, j]), visitor.var_map) + z[i, j] = visitor.var_map[id(m.z[i, j])] + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + + self.assertTrue( + expr[1].equals( + cp.element( + [z[i, j] for i in [6, 7, 8] for j in [6, 7, 8]], + 0 + 1 * (x - 6) // 1 + 3 * (x - 6) // 1, + ) + ) + ) + + def test_indirection_multi_index_neither_constant_diff_vars(self): + m = self.get_model() + m.z = Var(m.I, m.I, domain=Integers) + m.y = Var(within=[1, 3, 5]) + + e = m.z[m.x, m.y] + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + z = {} + for i in [6, 7, 8]: + for j in [1, 3, 5]: + self.assertIn(id(m.z[i, 3]), visitor.var_map) + z[i, j] = visitor.var_map[id(m.z[i, j])] + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + self.assertIn(id(m.y), visitor.var_map) + y = visitor.var_map[id(m.y)] + + self.assertTrue( + expr[1].equals( + cp.element( + [z[i, j] for i in [6, 7, 8] for j in [1, 3, 5]], + 0 + 1 * (x - 6) // 1 + 3 * (y - 1) // 2, + ) + ) + ) + + def test_indirection_expression_index(self): + m = self.get_model() + m.a.domain = Integers + m.y = Var(within=[1, 3, 5]) + + e = m.a[m.x - m.y] + + visitor = self.get_visitor() + expr = visitor.walk_expression((e, e, 0)) + + a = {} + for i in range(1, 8): + self.assertIn(id(m.a[i]), visitor.var_map) + a[i] = visitor.var_map[id(m.a[i])] + self.assertIn(id(m.x), visitor.var_map) + x = visitor.var_map[id(m.x)] + self.assertIn(id(m.y), visitor.var_map) + y = visitor.var_map[id(m.y)] + + self.assertTrue( + expr[1].equals( + cp.element([a[i] for i in range(1, 8)], 0 + 1 * (x + -1 * y - 1) // 1) + ) + ) + + def test_indirection_fails_with_non_finite_index_domain(self): + m = self.get_model() + m.a.domain = Integers + # release the bounds + m.x.setlb(None) + m.x.setub(None) + m.c = Constraint(expr=m.a[m.x] >= 0) + + visitor = self.get_visitor() + with self.assertRaisesRegex( + ValueError, + r"Variable indirection 'a\[x\]' contains argument 'x', " + "which is not restricted to a finite discrete domain", + ): + expr = visitor.walk_expression((m.c.body, m.c, 0)) + + def test_indirection_invalid_index_domain(self): + m = self.get_model() + m.a.domain = Integers + m.a.bounds = (6, 8) + m.y = Var(within=Integers, bounds=(0, 10)) + + e = m.a[m.y] + + visitor = self.get_visitor() + with self.assertRaisesRegex( + ValueError, + r"Variable indirection 'a\[y\]' permits an index '0' " + "that is not a valid key.", + ): + expr = visitor.walk_expression((e, e, 0)) + + def test_infinite_domain_var(self): + m = ConcreteModel() + m.Evens = RangeSet(ranges=(NumericRange(0, None, 2), NumericRange(0, None, -2))) + m.x = Var(domain=m.Evens) + e = m.x**2 + + visitor = self.get_visitor() + with self.assertRaisesRegex( + ValueError, + "The LogicalToDoCplex writer does not support " + "infinite discrete domains. Cannot " + "write Var 'x' with domain 'Evens'", + ): + expr = visitor.walk_expression((e, e, 0)) diff --git a/pyomo/contrib/cp/tests/test_docplex_writer.py b/pyomo/contrib/cp/tests/test_docplex_writer.py new file mode 100644 index 00000000000..d569ef2e696 --- /dev/null +++ b/pyomo/contrib/cp/tests/test_docplex_writer.py @@ -0,0 +1,256 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.common.fileutils import Executable + +from pyomo.contrib.cp import IntervalVar, Pulse, Step, AlwaysIn +from pyomo.contrib.cp.repn.docplex_writer import LogicalToDoCplex +from pyomo.environ import ( + ConcreteModel, + Set, + Var, + Integers, + LogicalConstraint, + implies, + value, + TerminationCondition, + Constraint, + PositiveIntegers, + maximize, + minimize, + Objective, +) +from pyomo.opt import WriterFactory, SolverFactory + +try: + import docplex.cp.model as cp + + docplex_available = True +except: + docplex_available = False + +cpoptimizer_available = Executable('cpoptimizer').available() + + +@unittest.skipIf(not docplex_available, "docplex is not available") +class TestWriteModel(unittest.TestCase): + def test_write_scheduling_model_only_interval_vars(self): + m = ConcreteModel() + m.i = IntervalVar(start=(2, 4), end=(5, 19), length=7, optional=False) + m.tasks = Set(initialize=range(2)) + m.h = IntervalVar(m.tasks, optional=True, length=(4, 5), start=(1, 2)) + + cpx_mod, var_map = WriterFactory('docplex_model').write(m) + + # We have nothing on this model other than interval vars + exprs = cpx_mod.get_all_expressions() + self.assertEqual(len(exprs), 3) + + # We should have the three interval vars above + variables = cpx_mod.get_all_variables() + self.assertEqual(len(variables), 3) + # I'm assuming that the lists of exprs and vars are in a deterministic + # order. If they're not, this will fail periodically, so I guess we'll + # find out. + self.assertIs(variables[0], var_map[m.h[1]]) + self.assertIs(exprs[2][0], var_map[m.h[1]]) + self.assertIs(variables[1], var_map[m.h[0]]) + self.assertIs(exprs[1][0], var_map[m.h[0]]) + + for i in [0, 1]: + self.assertTrue(variables[i].is_optional()) + self.assertEqual(variables[i].get_start(), (1, 2)) + self.assertEqual(variables[i].get_length(), (4, 5)) + + self.assertIs(variables[2], var_map[m.i]) + self.assertIs(exprs[0][0], var_map[m.i]) + self.assertTrue(variables[2].is_present()) + self.assertEqual(variables[2].get_start(), (2, 4)) + self.assertEqual(variables[2].get_end(), (5, 19)) + self.assertEqual(variables[2].get_length(), (7, 7)) + + def test_write_model_with_bool_expr_as_constraint(self): + # This tests our handling of a quirk with docplex that even some things + # that are boolean-valued can't be added to the model as constraints. We + # need to explicitly recognize them and add an "== True" right-hand + # side. + m = ConcreteModel() + m.i = IntervalVar([1, 2], optional=True) + m.x = Var(within={1, 2}) + # This is a perfectly reasonable constraint in a context where x is some + # variable that decides what needs to be scheduled. + m.cons = LogicalConstraint(expr=m.i[m.x].is_present) + + cpx_mod, var_map = WriterFactory('docplex_model').write(m) + + variables = cpx_mod.get_all_variables() + self.assertEqual(len(variables), 3) + # The three variables plus the one constraint: + exprs = cpx_mod.get_all_expressions() + self.assertEqual(len(exprs), 4) + + x = var_map[m.x] + i1 = var_map[m.i[1]] + i2 = var_map[m.i[2]] + + self.assertIs(variables[0], x) + self.assertIs(variables[1], i2) + self.assertIs(variables[2], i1) + + self.assertTrue( + exprs[3][0].equals( + cp.element( + [cp.presence_of(i1), cp.presence_of(i2)], 0 + 1 * (x - 1) // 1 + ) + == True + ) + ) + + +@unittest.skipIf(not docplex_available, "docplex is not available") +@unittest.skipIf(not cpoptimizer_available, "CP optimizer is not available") +class TestSolveModel(unittest.TestCase): + def test_solve_scheduling_problem(self): + m = ConcreteModel() + m.eat_cookie = IntervalVar([0, 1], length=8, end=(0, 24), optional=False) + m.eat_cookie[0].start_time.bounds = (0, 4) + m.eat_cookie[1].start_time.bounds = (5, 20) + + m.read_story = IntervalVar(start=(15, 24), end=(0, 24), length=(2, 3)) + m.sweep_crumbs = IntervalVar(optional=True, length=1, end=(0, 24)) + m.do_dishes = IntervalVar(optional=True, length=5, end=(0, 24)) + + m.num_crumbs = Var(domain=Integers, bounds=(0, 100)) + + ## Precedence + m.cookies = LogicalConstraint( + expr=m.eat_cookie[1].start_time.after(m.eat_cookie[0].end_time) + ) + m.cookies_imply_crumbs = LogicalConstraint( + expr=m.eat_cookie[0].is_present.implies(m.num_crumbs == 5) + ) + m.good_mouse = LogicalConstraint( + expr=implies(m.num_crumbs >= 3, m.sweep_crumbs.is_present) + ) + m.sweep_after = LogicalConstraint( + expr=m.sweep_crumbs.start_time.after(m.eat_cookie[1].end_time) + ) + + m.mice_occupied = ( + sum(Pulse((m.eat_cookie[i], 1)) for i in range(2)) + + Step(m.read_story.start_time, 1) + + Pulse((m.sweep_crumbs, 1)) + - Pulse((m.do_dishes, 1)) + ) + + # Must keep exactly one mouse occupied for a 25-hour day + m.treat_your_mouse_well = LogicalConstraint( + expr=AlwaysIn(cumul_func=m.mice_occupied, bounds=(1, 1), times=(0, 24)) + ) + + results = SolverFactory('cp_optimizer').solve( + m, symbolic_solver_labels=True, tee=True + ) + + self.assertEqual( + results.solver.termination_condition, TerminationCondition.feasible + ) + + # check solution + self.assertTrue(value(m.eat_cookie[0].is_present)) + self.assertTrue(value(m.eat_cookie[1].is_present)) + # That means there were crumbs: + self.assertEqual(value(m.num_crumbs), 5) + # So there was sweeping: + self.assertTrue(value(m.sweep_crumbs.is_present)) + + # start with the first cookie: + self.assertEqual(value(m.eat_cookie[0].start_time), 0) + self.assertEqual(value(m.eat_cookie[0].end_time), 8) + self.assertEqual(value(m.eat_cookie[0].length), 8) + # Proceed to second cookie: + self.assertEqual(value(m.eat_cookie[1].start_time), 8) + self.assertEqual(value(m.eat_cookie[1].end_time), 16) + self.assertEqual(value(m.eat_cookie[1].length), 8) + # Sweep + self.assertEqual(value(m.sweep_crumbs.start_time), 16) + self.assertEqual(value(m.sweep_crumbs.end_time), 17) + self.assertEqual(value(m.sweep_crumbs.length), 1) + # End with read story, as it keeps exactly one mouse occupied + # indefinitely (in this particular retelling) + self.assertEqual(value(m.read_story.start_time), 17) + + # Since doing the dishes actually *bores* a mouse, we leave the dishes + # in the sink + self.assertFalse(value(m.do_dishes.is_present)) + + self.assertEqual(results.problem.number_of_objectives, 0) + self.assertEqual(results.problem.number_of_constraints, 5) + self.assertEqual(results.problem.number_of_integer_vars, 1) + self.assertEqual(results.problem.number_of_interval_vars, 5) + + def test_solve_infeasible_problem(self): + m = ConcreteModel() + m.x = Var(within=[1, 2, 3, 5]) + m.c = Constraint(expr=m.x == 0) + + result = SolverFactory('cp_optimizer').solve(m) + self.assertEqual( + result.solver.termination_condition, TerminationCondition.infeasible + ) + + self.assertIsNone(m.x.value) + + def test_solve_max_problem(self): + m = ConcreteModel() + m.cookies = Var(domain=PositiveIntegers, bounds=(7, 10)) + m.chocolate_chip_equity = Constraint(expr=m.cookies <= 9) + + m.obj = Objective(expr=m.cookies, sense=maximize) + + results = SolverFactory('cp_optimizer').solve(m) + + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(value(m.cookies), 9) + + self.assertEqual(results.problem.number_of_objectives, 1) + self.assertEqual(results.problem.sense, maximize) + self.assertEqual(results.problem.lower_bound, 9) + self.assertEqual(results.problem.upper_bound, 9) + + def test_solve_min_problem(self): + m = ConcreteModel() + m.x = Var([1, 2, 3], bounds=(4, 6), domain=Integers) + m.y = Var(within=[1, 2, 3]) + + m.c1 = Constraint(expr=m.y >= 2.5) + + @m.Constraint([1, 2, 3]) + def x_bounds(m, i): + return m.x[i] >= 3 * (i - 1) + + m.obj = Objective(expr=m.x[m.y]) + + results = SolverFactory('cp_optimizer').solve(m) + + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(value(m.x[3]), 6) + self.assertEqual(value(m.y), 3) + + self.assertEqual(results.problem.number_of_objectives, 1) + self.assertEqual(results.problem.sense, minimize) + self.assertEqual(results.problem.lower_bound, 6) + self.assertEqual(results.problem.upper_bound, 6) diff --git a/pyomo/contrib/cp/tests/test_interval_var.py b/pyomo/contrib/cp/tests/test_interval_var.py new file mode 100644 index 00000000000..edbf889fcda --- /dev/null +++ b/pyomo/contrib/cp/tests/test_interval_var.py @@ -0,0 +1,221 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.contrib.cp.interval_var import ( + IntervalVar, + IntervalVarTimePoint, + IntervalVarLength, + IntervalVarPresence, +) +from pyomo.core.expr import GetItemExpression, GetAttrExpression +from pyomo.environ import ConcreteModel, Integers, Set, value, Var + + +class TestScalarIntervalVar(unittest.TestCase): + def test_initialize_with_no_data(self): + m = ConcreteModel() + m.i = IntervalVar() + + self.assertIsInstance(m.i.start_time, IntervalVarTimePoint) + self.assertEqual(m.i.start_time.domain, Integers) + self.assertIsNone(m.i.start_time.lower) + self.assertIsNone(m.i.start_time.upper) + + self.assertIsInstance(m.i.end_time, IntervalVarTimePoint) + self.assertEqual(m.i.end_time.domain, Integers) + self.assertIsNone(m.i.end_time.lower) + self.assertIsNone(m.i.end_time.upper) + + self.assertIsInstance(m.i.length, IntervalVarLength) + self.assertEqual(m.i.length.domain, Integers) + self.assertIsNone(m.i.length.lower) + self.assertIsNone(m.i.length.upper) + + self.assertIsInstance(m.i.is_present, IntervalVarPresence) + + def test_add_components_that_do_not_belong(self): + m = ConcreteModel() + m.i = IntervalVar() + + with self.assertRaisesRegex( + ValueError, + "Attempting to declare a block component using the name of a " + "reserved attribute:\n\tnew_thing", + ): + m.i.new_thing = IntervalVar() + + def test_start_and_end_bounds(self): + m = ConcreteModel() + m.i = IntervalVar(start=(0, 5)) + self.assertEqual(m.i.start_time.lower, 0) + self.assertEqual(m.i.start_time.upper, 5) + + m.i.end_time.bounds = (12, 14) + + self.assertEqual(m.i.end_time.lower, 12) + self.assertEqual(m.i.end_time.upper, 14) + + def test_constant_length_and_start(self): + m = ConcreteModel() + m.i = IntervalVar(length=7, start=3) + + self.assertEqual(m.i.length.lower, 7) + self.assertEqual(m.i.length.upper, 7) + + self.assertEqual(m.i.start_time.lower, 3) + self.assertEqual(m.i.start_time.upper, 3) + + def test_non_optional(self): + m = ConcreteModel() + m.i = IntervalVar(length=2, end=(4, 9), optional=False) + + self.assertEqual(value(m.i.is_present), True) + self.assertTrue(m.i.is_present.fixed) + self.assertFalse(m.i.optional) + + # Should also be true by default + m.i2 = IntervalVar() + + self.assertEqual(value(m.i2.is_present), True) + self.assertTrue(m.i.is_present.fixed) + self.assertFalse(m.i2.optional) + + def test_optional(self): + m = ConcreteModel() + m.i = IntervalVar(optional=True) + + self.assertFalse(m.i.is_present.fixed) + self.assertTrue(m.i.optional) + + # Now set to False + m.i.optional = False + self.assertEqual(value(m.i.is_present), True) + self.assertTrue(m.i.is_present.fixed) + self.assertFalse(m.i.optional) + + def test_is_present_fixed_False(self): + m = ConcreteModel() + m.i = IntervalVar(optional=True) + + m.i.is_present.fix(False) + self.assertTrue(m.i.optional) + + +class TestIndexedIntervalVar(unittest.TestCase): + def test_initialize_with_no_data(self): + m = ConcreteModel() + + m.i = IntervalVar([1, 2]) + + for j in [1, 2]: + self.assertIsInstance(m.i[j].start_time, IntervalVarTimePoint) + self.assertEqual(m.i[j].start_time.domain, Integers) + self.assertIsNone(m.i[j].start_time.lower) + self.assertIsNone(m.i[j].start_time.upper) + + self.assertIsInstance(m.i[j].end_time, IntervalVarTimePoint) + self.assertEqual(m.i[j].end_time.domain, Integers) + self.assertIsNone(m.i[j].end_time.lower) + self.assertIsNone(m.i[j].end_time.upper) + + self.assertIsInstance(m.i[j].length, IntervalVarLength) + self.assertEqual(m.i[j].length.domain, Integers) + self.assertIsNone(m.i[j].length.lower) + self.assertIsNone(m.i[j].length.upper) + + self.assertIsInstance(m.i[j].is_present, IntervalVarPresence) + + def test_constant_length(self): + m = ConcreteModel() + m.i = IntervalVar(['a', 'b'], length=45) + + for j in ['a', 'b']: + self.assertEqual(m.i[j].length.lower, 45) + self.assertEqual(m.i[j].length.upper, 45) + + def test_rule_based_start(self): + m = ConcreteModel() + + def start_rule(m, i): + return (1 - i, 13 + i) + + m.act = IntervalVar([1, 2, 3], start=start_rule, length=4) + + for i in [1, 2, 3]: + self.assertEqual(m.act[i].start_time.lower, 1 - i) + self.assertEqual(m.act[i].start_time.upper, 13 + i) + + self.assertEqual(m.act[i].length.lower, 4) + self.assertEqual(m.act[i].length.upper, 4) + + self.assertFalse(m.act[i].optional) + self.assertTrue(m.act[i].is_present.fixed) + self.assertEqual(value(m.act[i].is_present), True) + + def test_optional(self): + m = ConcreteModel() + m.act = IntervalVar([1, 2], end=[0, 10], optional=True) + + for i in [1, 2]: + self.assertTrue(m.act[i].optional) + self.assertFalse(m.act[i].is_present.fixed) + + self.assertEqual(m.act[i].end_time.lower, 0) + self.assertEqual(m.act[i].end_time.upper, 10) + + # None doesn't make sense for this: + with self.assertRaisesRegex( + ValueError, "Cannot set 'optional' to None: Must be True or False." + ): + m.act[1].optional = None + + # We can change it, and that has the correct effect on is_present + m.act[1].optional = False + self.assertFalse(m.act[1].optional) + self.assertTrue(m.act[1].is_present.fixed) + + m.act[1].optional = True + self.assertTrue(m.act[1].optional) + self.assertFalse(m.act[1].is_present.fixed) + + def test_optional_rule(self): + m = ConcreteModel() + m.idx = Set(initialize=[(4, 2), (5, 2)], dimen=2) + + def optional_rule(m, i, j): + return i % j == 0 + + m.act = IntervalVar(m.idx, optional=optional_rule) + self.assertTrue(m.act[4, 2].optional) + self.assertFalse(m.act[5, 2].optional) + + def test_index_by_expr(self): + m = ConcreteModel() + m.act = IntervalVar([(1, 2), (2, 1), (2, 2)]) + m.i = Var(domain=Integers) + m.i2 = Var([1, 2], domain=Integers) + + thing1 = m.act[m.i, 2] + self.assertIsInstance(thing1, GetItemExpression) + self.assertEqual(len(thing1.args), 3) + self.assertIs(thing1.args[0], m.act) + self.assertIs(thing1.args[1], m.i) + self.assertEqual(thing1.args[2], 2) + + thing2 = thing1.start_time + self.assertIsInstance(thing2, GetAttrExpression) + self.assertEqual(len(thing2.args), 2) + self.assertIs(thing2.args[0], thing1) + self.assertEqual(thing2.args[1], 'start_time') + + # TODO: But this is where it dies. + expr1 = m.act[m.i, 2].start_time.before(m.act[m.i**2, 1].end_time) diff --git a/pyomo/contrib/cp/tests/test_logical_to_disjunctive.py b/pyomo/contrib/cp/tests/test_logical_to_disjunctive.py new file mode 100755 index 00000000000..9cd5b2556ca --- /dev/null +++ b/pyomo/contrib/cp/tests/test_logical_to_disjunctive.py @@ -0,0 +1,852 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.errors import MouseTrap +import pyomo.common.unittest as unittest +from pyomo.contrib.cp.transform.logical_to_disjunctive_program import ( + LogicalToDisjunctive, +) +from pyomo.contrib.cp.transform.logical_to_disjunctive_walker import ( + LogicalToDisjunctiveVisitor, +) +from pyomo.core.expr.compare import assertExpressionsEqual +from pyomo.core.plugins.transform.logical_to_linear import ( + update_boolean_vars_from_binary, +) +from pyomo.gdp import Disjunct +from pyomo.environ import ( + atmost, + atleast, + exactly, + Block, + BooleanVar, + Binary, + ConcreteModel, + Constraint, + Expression, + Integers, + land, + lnot, + lor, + LogicalConstraint, + Objective, + Param, + SolverFactory, + value, + Var, + TransformationFactory, +) + +gurobi_available = SolverFactory('gurobi').available(exception_flag=False) + + +class TestLogicalToDisjunctiveVisitor(unittest.TestCase): + def make_model(self): + m = ConcreteModel() + + m.a = BooleanVar() + m.b = BooleanVar() + m.c = BooleanVar() + + return m + + def test_logical_or(self): + m = self.make_model() + e = lor(m.a, m.b, m.c) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + visitor.walk_expression(e) + + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertIs(m.b.get_associated_binary(), m.z[2]) + self.assertIs(m.c.get_associated_binary(), m.z[3]) + + self.assertEqual(len(m.cons), 5) + self.assertEqual(len(m.z), 4) + # !z4 v a v b v c + assertExpressionsEqual( + self, m.cons[1].expr, 1 - m.z[4] + m.z[1] + m.z[2] + m.z[3] >= 1 + ) + # z4 v !a + assertExpressionsEqual(self, m.cons[2].expr, m.z[4] + (1 - m.z[1]) >= 1) + # z4 v !b + assertExpressionsEqual(self, m.cons[3].expr, m.z[4] + (1 - m.z[2]) >= 1) + # z4 v !c + assertExpressionsEqual(self, m.cons[4].expr, m.z[4] + (1 - m.z[3]) >= 1) + + # z4 is constrained to be 'True' + assertExpressionsEqual(self, m.cons[5].expr, m.z[4] >= 1) + + def test_logical_and(self): + m = self.make_model() + e = land(m.a, m.b, m.c) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + visitor.walk_expression(e) + + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertIs(m.b.get_associated_binary(), m.z[2]) + self.assertIs(m.c.get_associated_binary(), m.z[3]) + + self.assertEqual(len(m.cons), 5) + self.assertEqual(len(m.z), 4) + assertExpressionsEqual(self, m.cons[1].expr, m.z[4] <= m.z[1]) + assertExpressionsEqual(self, m.cons[2].expr, m.z[4] <= m.z[2]) + assertExpressionsEqual(self, m.cons[3].expr, m.z[4] <= m.z[3]) + assertExpressionsEqual( + self, m.cons[4].expr, 1 - m.z[4] <= 3 - (m.z[1] + m.z[2] + m.z[3]) + ) + assertExpressionsEqual(self, m.cons[5].expr, m.z[4] >= 1) + + def test_logical_not(self): + m = self.make_model() + e = lnot(m.a) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + visitor.walk_expression(e) + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertEqual(len(m.cons), 2) + self.assertEqual(len(m.z), 2) + assertExpressionsEqual(self, m.cons[1].expr, m.z[2] == 1 - m.z[1]) + assertExpressionsEqual(self, m.cons[2].expr, m.z[2] >= 1) + + def test_implication(self): + m = self.make_model() + e = m.a.implies(m.b.land(m.c)) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + visitor.walk_expression(e) + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertIs(m.b.get_associated_binary(), m.z[2]) + self.assertIs(m.c.get_associated_binary(), m.z[3]) + + self.assertEqual(len(m.cons), 7) + # z4 = b ^ c + assertExpressionsEqual(self, m.cons[1].expr, m.z[4] <= m.z[2]) + assertExpressionsEqual(self, m.cons[2].expr, m.z[4] <= m.z[3]) + assertExpressionsEqual( + self, m.cons[3].expr, 1 - m.z[4] <= 2 - (m.z[2] + m.z[3]) + ) + # z5 = a -> z4 + # which means z5 = !a v z4 + assertExpressionsEqual( + self, m.cons[4].expr, (1 - m.z[5]) + (1 - m.z[1]) + m.z[4] >= 1 + ) + # z5 >= 1 - z1 + assertExpressionsEqual(self, m.cons[5].expr, m.z[5] + (1 - (1 - m.z[1])) >= 1) + # z5 >= z4 + assertExpressionsEqual(self, m.cons[6].expr, m.z[5] + (1 - m.z[4]) >= 1) + + # z5 is constrained to be 'True' + assertExpressionsEqual(self, m.cons[7].expr, m.z[5] >= 1) + + def test_equivalence(self): + m = self.make_model() + e = m.a.equivalent_to(m.c) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + visitor.walk_expression(e) + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertIs(m.c.get_associated_binary(), m.z[2]) + self.assertEqual(len(m.z), 5) + self.assertEqual(len(m.cons), 10) + + # z[3] == !a v c + assertExpressionsEqual( + self, m.cons[1].expr, (1 - m.z[3]) + (1 - m.z[1]) + m.z[2] >= 1 + ) + assertExpressionsEqual(self, m.cons[2].expr, 1 - (1 - m.z[1]) + m.z[3] >= 1) + assertExpressionsEqual(self, m.cons[3].expr, m.z[3] + (1 - m.z[2]) >= 1) + + # z[4] == a v ! c + assertExpressionsEqual( + self, m.cons[4].expr, (1 - m.z[4]) + (1 - m.z[2]) + m.z[1] >= 1 + ) + assertExpressionsEqual(self, m.cons[5].expr, m.z[4] + (1 - m.z[1]) >= 1) + assertExpressionsEqual(self, m.cons[6].expr, 1 - (1 - m.z[2]) + m.z[4] >= 1) + + # z[5] == z[3] ^ z[4] + assertExpressionsEqual(self, m.cons[7].expr, m.z[5] <= m.z[3]) + assertExpressionsEqual(self, m.cons[8].expr, m.z[5] <= m.z[4]) + assertExpressionsEqual( + self, m.cons[9].expr, 1 - m.z[5] <= 2 - (m.z[3] + m.z[4]) + ) + + assertExpressionsEqual(self, m.cons[10].expr, m.z[5] >= 1) + + def test_xor(self): + m = self.make_model() + e = m.a.xor(m.b) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + m.disjuncts = visitor.disjuncts + m.disjunctions = visitor.disjunctions + + visitor.walk_expression(e) + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertIs(m.b.get_associated_binary(), m.z[2]) + + self.assertEqual(len(m.z), 2) + self.assertEqual(len(m.cons), 1) + self.assertEqual(len(m.disjuncts), 2) + self.assertEqual(len(m.disjunctions), 1) + + assertExpressionsEqual( + self, m.disjuncts[0].constraint.expr, m.z[1] + m.z[2] == 1 + ) + assertExpressionsEqual( + self, + m.disjuncts[1].disjunction.disjuncts[0].constraint[1].expr, + m.z[1] + m.z[2] <= 0, + ) + assertExpressionsEqual( + self, + m.disjuncts[1].disjunction.disjuncts[1].constraint[1].expr, + m.z[1] + m.z[2] >= 2, + ) + + assertExpressionsEqual( + self, m.cons[1].expr, m.disjuncts[0].binary_indicator_var >= 1 + ) + + def test_at_most(self): + m = self.make_model() + e = atmost(2, m.a, (m.a.land(m.b)), m.c) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + m.disjuncts = visitor.disjuncts + m.disjunctions = visitor.disjunctions + + visitor.walk_expression(e) + self.assertIs(m.a.get_associated_binary(), m.z[1]) + a = m.z[1] + self.assertIs(m.b.get_associated_binary(), m.z[2]) + b = m.z[2] + self.assertIs(m.c.get_associated_binary(), m.z[4]) + c = m.z[4] + + self.assertEqual(len(m.z), 4) + self.assertEqual(len(m.cons), 4) + self.assertEqual(len(m.disjuncts), 2) + self.assertEqual(len(m.disjunctions), 1) + + # z3 = a ^ b + assertExpressionsEqual(self, m.cons[1].expr, m.z[3] <= a) + assertExpressionsEqual(self, m.cons[2].expr, m.z[3] <= b) + m.cons.pprint() + print(m.cons[3].expr) + assertExpressionsEqual(self, m.cons[3].expr, 1 - m.z[3] <= 2 - sum([a, b])) + + # atmost in disjunctive form + assertExpressionsEqual( + self, m.disjuncts[0].constraint.expr, m.z[1] + m.z[3] + m.z[4] <= 2 + ) + assertExpressionsEqual( + self, m.disjuncts[1].constraint.expr, m.z[1] + m.z[3] + m.z[4] >= 3 + ) + assertExpressionsEqual( + self, m.cons[4].expr, m.disjuncts[0].binary_indicator_var >= 1 + ) + + def test_at_least(self): + m = self.make_model() + e = atleast(2, m.a, m.b, m.c) + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + m.disjuncts = visitor.disjuncts + m.disjunctions = visitor.disjunctions + + visitor.walk_expression(e) + self.assertIs(m.a.get_associated_binary(), m.z[1]) + a = m.z[1] + self.assertIs(m.b.get_associated_binary(), m.z[2]) + b = m.z[2] + self.assertIs(m.c.get_associated_binary(), m.z[3]) + c = m.z[3] + + self.assertEqual(len(m.z), 3) + self.assertEqual(len(m.cons), 1) + + # atleast in disjunctive form + assertExpressionsEqual( + self, m.disjuncts[0].constraint.expr, m.z[1] + m.z[2] + m.z[3] >= 2 + ) + assertExpressionsEqual( + self, m.disjuncts[1].constraint.expr, m.z[1] + m.z[2] + m.z[3] <= 1 + ) + + assertExpressionsEqual( + self, m.cons[1].expr, m.disjuncts[0].binary_indicator_var >= 1 + ) + + @unittest.skipUnless(gurobi_available, "Gurobi is not available") + def test_logical_integration(self): + """ + This is kind of a ridiculous test, but I bothered type it and it has + a lot of logical things together, so adding it. + """ + m = self.make_model() + m.d = BooleanVar() + m.t = BooleanVar() + e = m.t.equivalent_to(lnot(lor(m.a, m.b)).land(exactly(1, [m.c, m.d]))) + + # We're forcing t to be True. + m.c.fix(True) + m.d.fix(False) + m.a.fix(False) + m.b.fix(False) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + m.disjuncts = visitor.disjuncts + m.disjunctions = visitor.disjunctions + + visitor.walk_expression(e) + + self.assertEqual(len(m.z), 11) + self.assertIs(m.a.get_associated_binary(), m.z[2]) + a = m.z[2] + self.assertIs(m.b.get_associated_binary(), m.z[3]) + b = m.z[3] + # apologies to the universe for this one, but because my own notation + # is awful: + z3 = m.z[4] + z4 = m.z[5] + z5 = m.z[8] + self.assertIs(m.t.get_associated_binary(), m.z[1]) + t = m.z[1] + z6 = m.z[9] + z7 = m.z[10] + self.assertIs(m.c.get_associated_binary(), m.z[6]) + c = m.z[6] + self.assertIs(m.d.get_associated_binary(), m.z[7]) + d = m.z[7] + z8 = m.z[11] + + self.assertEqual(len(m.disjuncts), 2) + self.assertEqual( + len( + list( + m.disjuncts[0].component_data_objects( + Constraint, descend_into=False + ) + ) + ), + 1, + ) + assertExpressionsEqual(self, m.disjuncts[0].constraint.expr, c + d == 1) + # not nested + self.assertEqual( + len( + list( + m.disjuncts[0].component_data_objects(Disjunct, descend_into=False) + ) + ), + 0, + ) + + # nested + self.assertEqual( + len( + list( + m.disjuncts[1].component_data_objects( + Constraint, descend_into=False + ) + ) + ), + 0, + ) + self.assertEqual( + len( + list( + m.disjuncts[1].component_data_objects(Disjunct, descend_into=False) + ) + ), + 2, + ) + self.assertEqual(len(m.disjuncts[1].disjunction.disjuncts), 2) + assertExpressionsEqual( + self, m.disjuncts[1].disjunction.disjuncts[0].constraint[1].expr, c + d <= 0 + ) + assertExpressionsEqual( + self, m.disjuncts[1].disjunction.disjuncts[1].constraint[1].expr, c + d >= 2 + ) + + self.assertEqual(len(m.disjunctions), 1) + self.assertIs(m.disjunctions[0].disjuncts[0], m.disjuncts[0]) + self.assertIs(m.disjunctions[0].disjuncts[1], m.disjuncts[1]) + + self.assertEqual(len(m.cons), 17) + assertExpressionsEqual(self, m.cons[1].expr, (1 - z3) + a + b >= 1) + assertExpressionsEqual(self, m.cons[2].expr, z3 + (1 - a) >= 1) + assertExpressionsEqual(self, m.cons[3].expr, z3 + (1 - b) >= 1) + assertExpressionsEqual(self, m.cons[4].expr, z4 == 1 - z3) + assertExpressionsEqual(self, m.cons[5].expr, z5 <= z4) + assertExpressionsEqual( + self, m.cons[6].expr, z5 <= m.disjuncts[0].binary_indicator_var + ) + assertExpressionsEqual( + self, + m.cons[7].expr, + 1 - z5 <= 2 - (z4 + m.disjuncts[0].binary_indicator_var), + ) + assertExpressionsEqual(self, m.cons[8].expr, (1 - z6) + (1 - t) + z5 >= 1) + assertExpressionsEqual(self, m.cons[9].expr, 1 - (1 - t) + z6 >= 1) + assertExpressionsEqual(self, m.cons[10].expr, z6 + (1 - z5) >= 1) + assertExpressionsEqual(self, m.cons[11].expr, (1 - z7) + (1 - z5) + t >= 1) + assertExpressionsEqual(self, m.cons[12].expr, z7 + (1 - t) >= 1) + assertExpressionsEqual(self, m.cons[13].expr, 1 - (1 - z5) + z7 >= 1) + assertExpressionsEqual(self, m.cons[14].expr, z8 <= z6) + assertExpressionsEqual(self, m.cons[15].expr, z8 <= z7) + assertExpressionsEqual(self, m.cons[16].expr, 1 - z8 <= 2 - (z6 + z7)) + assertExpressionsEqual(self, m.cons[17].expr, z8 >= 1) + + TransformationFactory('gdp.bigm').apply_to(m) + m.obj = Objective(expr=m.t.get_associated_binary()) + SolverFactory('gurobi').solve(m, tee=True) + update_boolean_vars_from_binary(m) + + self.assertTrue(value(e)) + self.assertEqual(value(m.obj), 1) + self.assertTrue(value(m.t)) + + def test_boolean_fixed_true(self): + m = self.make_model() + e = m.a.implies(m.b) + m.a.fix(True) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + m.disjuncts = visitor.disjuncts + m.disjunctions = visitor.disjunctions + + visitor.walk_expression(e) + # we'll get !a v b + self.assertEqual(len(m.z), 3) + self.assertEqual(len(m.cons), 4) + + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertTrue(m.z[1].fixed) + self.assertEqual(value(m.z[1]), 1) + self.assertIs(m.b.get_associated_binary(), m.z[2]) + + assertExpressionsEqual( + self, m.cons[1].expr, (1 - m.z[3]) + (1 - m.z[1]) + m.z[2] >= 1 + ) + assertExpressionsEqual(self, m.cons[2].expr, 1 - (1 - m.z[1]) + m.z[3] >= 1) + assertExpressionsEqual(self, m.cons[3].expr, m.z[3] + (1 - m.z[2]) >= 1) + assertExpressionsEqual(self, m.cons[4].expr, m.z[3] >= 1) + + def test_boolean_fixed_false(self): + m = self.make_model() + e = m.a & m.b + m.a.fix(False) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + m.disjuncts = visitor.disjuncts + m.disjunctions = visitor.disjunctions + + visitor.walk_expression(e) + # we'll get !a v b + self.assertEqual(len(m.z), 3) + self.assertEqual(len(m.cons), 4) + + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertTrue(m.z[1].fixed) + self.assertEqual(value(m.z[1]), 0) + self.assertIs(m.b.get_associated_binary(), m.z[2]) + + assertExpressionsEqual(self, m.cons[1].expr, m.z[1] >= m.z[3]) + assertExpressionsEqual(self, m.cons[2].expr, m.z[2] >= m.z[3]) + assertExpressionsEqual( + self, m.cons[3].expr, 1 - m.z[3] <= 2 - (m.z[1] + m.z[2]) + ) + assertExpressionsEqual(self, m.cons[4].expr, m.z[3] >= 1) + + def test_boolean_fixed_none(self): + m = self.make_model() + e = m.a & m.b + # I don't get what this means, but you can do it, so... I guess we need + # to handle it. + m.a.fix(None) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + m.disjuncts = visitor.disjuncts + m.disjunctions = visitor.disjunctions + + visitor.walk_expression(e) + # we'll get !a v b + self.assertEqual(len(m.z), 3) + self.assertEqual(len(m.cons), 4) + + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertTrue(m.z[1].fixed) + self.assertIsNone(m.z[1].value) + self.assertIs(m.b.get_associated_binary(), m.z[2]) + + assertExpressionsEqual(self, m.cons[1].expr, m.z[1] >= m.z[3]) + assertExpressionsEqual(self, m.cons[2].expr, m.z[2] >= m.z[3]) + assertExpressionsEqual( + self, m.cons[3].expr, 1 - m.z[3] <= 2 - (m.z[1] + m.z[2]) + ) + assertExpressionsEqual(self, m.cons[4].expr, m.z[3] >= 1) + + def test_no_need_to_walk(self): + m = self.make_model() + e = m.a + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + visitor.walk_expression(e) + self.assertEqual(len(m.z), 1) + self.assertIs(m.a.get_associated_binary(), m.z[1]) + self.assertEqual(len(m.cons), 1) + assertExpressionsEqual(self, m.cons[1].expr, m.z[1] >= 1) + + def test_binary_already_associated(self): + m = self.make_model() + m.mine = Var(domain=Binary) + m.a.associate_binary_var(m.mine) + + e = m.a.land(m.b) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + visitor.walk_expression(e) + + self.assertEqual(len(m.z), 2) + self.assertIs(m.b.get_associated_binary(), m.z[1]) + self.assertEqual(len(m.cons), 4) + assertExpressionsEqual(self, m.cons[1].expr, m.z[2] <= m.mine) + assertExpressionsEqual(self, m.cons[2].expr, m.z[2] <= m.z[1]) + assertExpressionsEqual( + self, m.cons[3].expr, 1 - m.z[2] <= 2 - (m.mine + m.z[1]) + ) + assertExpressionsEqual(self, m.cons[4].expr, m.z[2] >= 1) + + # [ESJ 11/22]: We'll probably eventually support all of these examples, but + # for now test that we handle them gracefully: + def test_integer_var_in_at_least(self): + m = self.make_model() + m.x = Var(bounds=(0, 10), domain=Integers) + e = atleast(m.x, m.a, m.b, m.c) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + with self.assertRaisesRegex( + MouseTrap, + r"The first argument 'x' to " + r"'atleast\(x: \[a, b, c\]\)' is potentially variable. " + r"This may be a mathematically coherent expression; However " + r"it is not yet supported to convert it to a disjunctive " + r"program.", + normalize_whitespace=True, + ): + visitor.walk_expression(e) + + def test_numeric_expression_in_at_most(self): + m = self.make_model() + m.x = Var([1, 2], bounds=(0, 10), domain=Integers) + m.y = Var(domain=Integers) + m.e = Expression(expr=m.x[1] * m.x[2]) + e = atmost(m.e + m.y, m.a, m.b, m.c) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + with self.assertRaisesRegex( + MouseTrap, + r"The first argument '\(x\[1\]\*x\[2\]\) \+ y' to " + r"'atmost\(\(x\[1\]\*x\[2\]\) \+ y: \[a, b, c\]\)' is " + r"potentially variable. " + r"This may be a mathematically coherent expression; However " + r"it is not yet supported to convert it to a disjunctive " + r"program", + normalize_whitespace=True, + ): + visitor.walk_expression(e) + + def test_named_expression_in_at_most(self): + m = self.make_model() + m.x = Var([1, 2], bounds=(0, 10), domain=Integers) + m.y = Var(domain=Integers) + m.e = Expression(expr=m.x[1] * m.x[2]) + e = atmost(m.e, m.a, m.b, m.c) + + visitor = LogicalToDisjunctiveVisitor() + m.cons = visitor.constraints + m.z = visitor.z_vars + + with self.assertRaisesRegex( + MouseTrap, + r"The first argument 'e' to " + r"'atmost\(\(x\[1\]\*x\[2\]\): \[a, b, c\]\)' is " + r"potentially variable. " + r"This may be a mathematically coherent expression; However " + r"it is not yet supported to convert it to a disjunctive " + r"program", + normalize_whitespace=True, + ): + visitor.walk_expression(e) + + def test_relational_expr_as_boolean_atom(self): + m = self.make_model() + m.x = Var() + e = m.a.land(m.x >= 3) + visitor = LogicalToDisjunctiveVisitor() + + with self.assertRaisesRegex( + MouseTrap, + "The RelationalExpression '3 <= x' was used as a Boolean " + "term in a logical proposition. This is not yet supported " + "when transforming to disjunctive form.", + normalize_whitespace=True, + ): + visitor.walk_expression(e) + + +class TestLogicalToDisjunctiveTransformation(unittest.TestCase): + def make_model(self): + m = ConcreteModel() + + m.a = BooleanVar() + m.b = BooleanVar([1, 2]) + m.p = Param(initialize=1) + m.p2 = Param([1, 2], mutable=True) + m.p2[1] = 1 + m.p2[2] = 2 + + m.block = Block() + m.block.c1 = LogicalConstraint(expr=m.a.land(m.b[1])) + m.block.c2 = LogicalConstraint( + expr=exactly(m.p2[2], m.a, m.b[1], m.b[2].lor(m.b[1])) + ) + + m.c1 = LogicalConstraint(expr=atmost(m.p + m.p2[1], m.a, m.b[1], m.b[2])) + + return m + + def check_and_constraints(self, a, b1, z, transBlock): + assertExpressionsEqual(self, transBlock.transformed_constraints[1].expr, z <= a) + assertExpressionsEqual( + self, transBlock.transformed_constraints[2].expr, z <= b1 + ) + assertExpressionsEqual( + self, transBlock.transformed_constraints[3].expr, 1 - z <= 2 - (a + b1) + ) + assertExpressionsEqual(self, transBlock.transformed_constraints[4].expr, z >= 1) + + def check_block_c1_transformed(self, m, transBlock): + self.assertFalse(m.block.c1.active) + self.assertIs(m.a.get_associated_binary(), transBlock.auxiliary_vars[1]) + self.assertIs(m.b[1].get_associated_binary(), transBlock.auxiliary_vars[2]) + self.check_and_constraints( + transBlock.auxiliary_vars[1], + transBlock.auxiliary_vars[2], + transBlock.auxiliary_vars[3], + transBlock, + ) + + def check_block_exactly(self, a, b1, b2, z4, transBlock): + m = transBlock.model() + + # z[4] = b[2] v b[1] + assertExpressionsEqual( + self, transBlock.transformed_constraints[5].expr, (1 - z4) + b2 + b1 >= 1 + ) + assertExpressionsEqual( + self, transBlock.transformed_constraints[6].expr, z4 + (1 - b2) >= 1 + ) + assertExpressionsEqual( + self, transBlock.transformed_constraints[7].expr, z4 + (1 - b1) >= 1 + ) + + # exactly in disjunctive form + assertExpressionsEqual( + self, + transBlock.auxiliary_disjuncts[0].constraint.expr, + a + b1 + z4 == m.p2[2], + ) + assertExpressionsEqual( + self, + transBlock.auxiliary_disjuncts[1] + .disjunction.disjuncts[0] + .constraint[1] + .expr, + a + b1 + z4 <= m.p2[2] - 1, + ) + assertExpressionsEqual( + self, + transBlock.auxiliary_disjuncts[1] + .disjunction.disjuncts[1] + .constraint[1] + .expr, + a + b1 + z4 >= m.p2[2] + 1, + ) + + assertExpressionsEqual( + self, + transBlock.transformed_constraints[8].expr, + transBlock.auxiliary_disjuncts[0].binary_indicator_var >= 1, + ) + + def check_block_transformed(self, m): + self.assertFalse(m.block.c2.active) + transBlock = m.block._logical_to_disjunctive + self.assertEqual(len(transBlock.auxiliary_vars), 5) + self.assertEqual(len(transBlock.transformed_constraints), 8) + self.assertEqual(len(transBlock.auxiliary_disjuncts), 2) + self.assertEqual(len(transBlock.auxiliary_disjunctions), 1) + + self.check_block_c1_transformed(m, transBlock) + + self.assertIs(m.b[2].get_associated_binary(), transBlock.auxiliary_vars[4]) + + z4 = transBlock.auxiliary_vars[5] + a = transBlock.auxiliary_vars[1] + b1 = transBlock.auxiliary_vars[2] + b2 = transBlock.auxiliary_vars[4] + self.check_block_exactly(a, b1, b2, z4, transBlock) + + def test_constraint_target(self): + m = self.make_model() + TransformationFactory('contrib.logical_to_disjunctive').apply_to( + m, targets=[m.block.c1] + ) + + transBlock = m.block._logical_to_disjunctive + self.assertEqual(len(transBlock.auxiliary_vars), 3) + self.assertEqual(len(transBlock.transformed_constraints), 4) + self.assertEqual(len(transBlock.auxiliary_disjuncts), 0) + self.assertEqual(len(transBlock.auxiliary_disjunctions), 0) + self.check_block_c1_transformed(m, transBlock) + self.assertTrue(m.block.c2.active) + self.assertTrue(m.c1.active) + + def test_block_target(self): + m = self.make_model() + TransformationFactory('contrib.logical_to_disjunctive').apply_to( + m, targets=[m.block] + ) + + self.check_block_transformed(m) + self.assertTrue(m.c1.active) + + def test_transform_block(self): + m = self.make_model() + TransformationFactory('contrib.logical_to_disjunctive').apply_to(m.block) + + self.check_block_transformed(m) + self.assertTrue(m.c1.active) + + def test_transform_model(self): + m = self.make_model() + TransformationFactory('contrib.logical_to_disjunctive').apply_to(m) + + # c1 got transformed first + self.assertFalse(m.c1.active) + transBlock = m._logical_to_disjunctive + self.assertEqual(len(transBlock.auxiliary_vars), 3) + self.assertEqual(len(transBlock.transformed_constraints), 1) + self.assertEqual(len(transBlock.auxiliary_disjuncts), 2) + self.assertEqual(len(transBlock.auxiliary_disjunctions), 1) + + a = m._logical_to_disjunctive.auxiliary_vars[1] + b1 = m._logical_to_disjunctive.auxiliary_vars[2] + b2 = m._logical_to_disjunctive.auxiliary_vars[3] + + # atmost in disjunctive form + assertExpressionsEqual( + self, + transBlock.auxiliary_disjuncts[0].constraint.expr, + a + b1 + b2 <= 1 + m.p2[1], + ) + assertExpressionsEqual( + self, + transBlock.auxiliary_disjuncts[1].constraint.expr, + a + b1 + b2 >= 1 + m.p2[1] + 1, + ) + + assertExpressionsEqual( + self, + transBlock.transformed_constraints[1].expr, + transBlock.auxiliary_disjuncts[0].binary_indicator_var >= 1, + ) + + # and everything on the block is transformed too + transBlock = m.block._logical_to_disjunctive + self.assertEqual(len(transBlock.auxiliary_vars), 2) + self.assertEqual(len(transBlock.transformed_constraints), 8) + self.assertEqual(len(transBlock.auxiliary_disjuncts), 2) + self.assertEqual(len(transBlock.auxiliary_disjunctions), 1) + self.check_and_constraints(a, b1, transBlock.auxiliary_vars[1], transBlock) + self.check_block_exactly(a, b1, b2, transBlock.auxiliary_vars[2], transBlock) + + @unittest.skipUnless(gurobi_available, "Gurobi is not available") + def test_reverse_implication_for_land(self): + m = ConcreteModel() + + m.t = BooleanVar() + m.a = BooleanVar() + m.d = BooleanVar() + + m.c = LogicalConstraint(expr=m.t.equivalent_to(m.a.land(m.d))) + + m.a.fix(True) + m.d.fix(True) + + m.binary = Var(domain=Binary) + m.t.associate_binary_var(m.binary) + + m.obj = Objective(expr=m.binary) + + TransformationFactory('contrib.logical_to_disjunctive').apply_to(m) + TransformationFactory('gdp.bigm').apply_to(m) + + SolverFactory('gurobi').solve(m) + + update_boolean_vars_from_binary(m) + # Should be 1 (we forced it) + self.assertEqual(value(m.obj), 1) + self.assertTrue(value(m.t)) diff --git a/pyomo/contrib/cp/tests/test_precedence_constraints.py b/pyomo/contrib/cp/tests/test_precedence_constraints.py new file mode 100644 index 00000000000..461dabf564c --- /dev/null +++ b/pyomo/contrib/cp/tests/test_precedence_constraints.py @@ -0,0 +1,175 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.contrib.cp import IntervalVar +from pyomo.contrib.cp.scheduling_expr.precedence_expressions import ( + BeforeExpression, + AtExpression, +) +from pyomo.environ import ConcreteModel, LogicalConstraint + + +class TestPrecedenceRelationships(unittest.TestCase): + def get_model(self): + m = ConcreteModel() + m.a = IntervalVar() + m.b = IntervalVar() + + return m + + def test_start_before_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.start_time.before(m.b.start_time)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertEqual(m.c.expr.nargs(), 3) + self.assertIs(m.c.expr.args[0], m.a.start_time) + self.assertIs(m.c.expr.args[1], m.b.start_time) + self.assertEqual(m.c.expr.delay, 0) + + self.assertEqual(str(m.c.expr), "a.start_time <= b.start_time") + + def test_start_before_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.start_time.before(m.b.end_time)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertEqual(m.c.expr.nargs(), 3) + self.assertIs(m.c.expr.args[0], m.a.start_time) + self.assertIs(m.c.expr.args[1], m.b.end_time) + self.assertEqual(m.c.expr.delay, 0) + + self.assertEqual(str(m.c.expr), "a.start_time <= b.end_time") + + def test_start_after_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.start_time.after(m.b.start_time)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.b.start_time) + self.assertIs(m.c.expr.args[1], m.a.start_time) + self.assertEqual(m.c.expr.delay, 0) + + self.assertEqual(str(m.c.expr), "b.start_time <= a.start_time") + + def test_start_after_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.start_time.after(m.b.end_time, delay=2)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.b.end_time) + self.assertIs(m.c.expr.args[1], m.a.start_time) + self.assertEqual(m.c.expr.delay, 2) + + self.assertEqual(str(m.c.expr), "b.end_time + 2 <= a.start_time") + + def test_start_at_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.start_time.at(m.b.start_time)) + + self.assertIsInstance(m.c.expr, AtExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertEqual(m.c.expr.nargs(), 3) + self.assertIs(m.c.expr.args[0], m.a.start_time) + self.assertIs(m.c.expr.args[1], m.b.start_time) + self.assertEqual(m.c.expr.delay, 0) + + self.assertEqual(str(m.c.expr), "a.start_time == b.start_time") + + def test_start_at_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.start_time.at(m.b.end_time, delay=-1)) + + self.assertIsInstance(m.c.expr, AtExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertEqual(m.c.expr.nargs(), 3) + self.assertIs(m.c.expr.args[0], m.a.start_time) + self.assertIs(m.c.expr.args[1], m.b.end_time) + self.assertEqual(m.c.expr.delay, -1) + + self.assertEqual(str(m.c.expr), "a.start_time - 1 == b.end_time") + + def test_end_before_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.end_time.before(m.b.start_time, delay=3)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.a.end_time) + self.assertIs(m.c.expr.args[1], m.b.start_time) + self.assertEqual(m.c.expr.delay, 3) + + self.assertEqual(str(m.c.expr), "a.end_time + 3 <= b.start_time") + + def test_end_at_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.end_time.at(m.b.start_time, delay=4)) + + self.assertIsInstance(m.c.expr, AtExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.a.end_time) + self.assertIs(m.c.expr.args[1], m.b.start_time) + self.assertEqual(m.c.expr.delay, 4) + + self.assertEqual(str(m.c.expr), "a.end_time + 4 == b.start_time") + + def test_end_after_start(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.end_time.after(m.b.start_time, delay=-2)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.b.start_time) + self.assertIs(m.c.expr.args[1], m.a.end_time) + self.assertEqual(m.c.expr.delay, -2) + + self.assertEqual(str(m.c.expr), "b.start_time - 2 <= a.end_time") + + def test_end_before_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.end_time.before(m.b.end_time, delay=-5)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.a.end_time) + self.assertIs(m.c.expr.args[1], m.b.end_time) + self.assertEqual(m.c.expr.delay, -5) + + self.assertEqual(str(m.c.expr), "a.end_time - 5 <= b.end_time") + + def test_end_at_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.end_time.at(m.b.end_time, delay=-3)) + + self.assertIsInstance(m.c.expr, AtExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.a.end_time) + self.assertIs(m.c.expr.args[1], m.b.end_time) + self.assertEqual(m.c.expr.delay, -3) + + self.assertEqual(str(m.c.expr), "a.end_time - 3 == b.end_time") + + def test_end_after_end(self): + m = self.get_model() + m.c = LogicalConstraint(expr=m.a.end_time.after(m.b.end_time)) + + self.assertIsInstance(m.c.expr, BeforeExpression) + self.assertEqual(len(m.c.expr.args), 3) + self.assertIs(m.c.expr.args[0], m.b.end_time) + self.assertIs(m.c.expr.args[1], m.a.end_time) + self.assertEqual(m.c.expr.delay, 0) + + self.assertEqual(str(m.c.expr), "b.end_time <= a.end_time") diff --git a/pyomo/contrib/cp/tests/test_step_function_expressions.py b/pyomo/contrib/cp/tests/test_step_function_expressions.py new file mode 100644 index 00000000000..7212cc870d5 --- /dev/null +++ b/pyomo/contrib/cp/tests/test_step_function_expressions.py @@ -0,0 +1,544 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.contrib.cp import IntervalVar, Step, Pulse +from pyomo.contrib.cp.scheduling_expr.step_function_expressions import ( + AlwaysIn, + CumulativeFunction, + NegatedStepFunction, + StepAtStart, + StepAtEnd, + StepAt, +) + +from pyomo.environ import ConcreteModel, LogicalConstraint + + +class CommonTests(unittest.TestCase): + def get_model(self): + m = ConcreteModel() + m.a = IntervalVar() + m.b = IntervalVar() + m.c = IntervalVar([1, 2]) + + return m + + +class TestPulse(CommonTests): + def test_bad_interval_var(self): + with self.assertRaisesRegex( + TypeError, + "The 'interval_var' argument for a 'Pulse' must " + "be an 'IntervalVar'.\n" + "Received: ", + ): + thing = Pulse(interval_var=1.2, height=4) + + def test_create_pulse_with_scalar_interval_var(self): + m = self.get_model() + p = Pulse(interval_var=m.a, height=1) + + self.assertIsInstance(p, Pulse) + self.assertEqual(str(p), "Pulse(a, height=1)") + + def test_create_pulse_with_interval_var_data(self): + m = self.get_model() + p = Pulse(interval_var=m.c[2], height=2) + self.assertIsInstance(p, Pulse) + self.assertEqual(str(p), "Pulse(c[2], height=2)") + + +class TestStep(CommonTests): + def test_bad_time_point(self): + m = self.get_model() + with self.assertRaisesRegex( + TypeError, + "The 'time' argument for a 'Step' must be either " + r"an 'IntervalVarTimePoint' \(for example, the " + r"'start_time' or 'end_time' of an IntervalVar\) or " + "an integer time point in the time horizon.\n" + "Received: " + "", + ): + thing = Step(m.a, height=2) + + +class TestSumStepFunctions(CommonTests): + def test_sum_step_and_pulse(self): + m = self.get_model() + expr = Step(m.a.start_time, height=4) + Pulse((m.b, -1)) + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 2) + self.assertEqual(len(expr.args), 2) + self.assertIsInstance(expr.args[0], StepAtStart) + self.assertIsInstance(expr.args[1], Pulse) + + self.assertEqual( + str(expr), "Step(a.start_time, height=4) + Pulse(b, height=-1)" + ) + + def test_args_clone_correctly(self): + m = self.get_model() + expr = Step(m.a.start_time, height=4) + Pulse((m.b, -1)) + expr2 = expr + Step(m.b.end_time, height=4) + + self.assertIsInstance(expr2, CumulativeFunction) + self.assertEqual(len(expr2.args), 3) + self.assertEqual(expr2.nargs(), 3) + self.assertIsInstance(expr2.args[0], StepAtStart) + self.assertIsInstance(expr2.args[1], Pulse) + self.assertIsInstance(expr2.args[2], StepAtEnd) + + # This will force expr to clone its arguments because it did the + # appending trick to make expr2. + expr3 = expr + Pulse(interval_var=m.b, height=-5) + + self.assertIsInstance(expr3, CumulativeFunction) + self.assertEqual(len(expr3.args), 3) + self.assertEqual(expr3.nargs(), 3) + self.assertIsInstance(expr3.args[0], StepAtStart) + self.assertIsInstance(expr3.args[1], Pulse) + self.assertIsInstance(expr3.args[2], Pulse) + + def test_args_clone_correctly_in_place(self): + m = self.get_model() + s1 = Step(m.a.start_time, height=1) + s2 = Step(m.b.end_time, height=1) + s3 = Step(m.b.start_time, height=2) + p = Pulse(interval_var=m.b, height=3) + + e1 = s1 + s2 + e2 = e1 + s3 + e3 = e1 + e3 += p + + self.assertIsInstance(e1, CumulativeFunction) + self.assertEqual(e1.nargs(), 2) + self.assertIs(e1.args[0], s1) + self.assertIs(e1.args[1], s2) + + self.assertIsInstance(e2, CumulativeFunction) + self.assertEqual(e2.nargs(), 3) + self.assertIs(e2.args[0], s1) + self.assertIs(e2.args[1], s2) + self.assertIs(e2.args[2], s3) + + self.assertIsInstance(e3, CumulativeFunction) + self.assertEqual(e3.nargs(), 3) + self.assertIs(e3.args[0], s1) + self.assertIs(e3.args[1], s2) + self.assertIs(e3.args[2], p) + + def test_sum_two_pulses(self): + m = self.get_model() + m.p1 = Pulse(interval_var=m.a, height=3) + m.p2 = Pulse(interval_var=m.b, height=-2) + + expr = m.p1 + m.p2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + self.assertIs(expr.args[0], m.p1) + self.assertIs(expr.args[1], m.p2) + + def test_sum_in_place(self): + m = self.get_model() + expr = Step(m.a.start_time, height=4) + Pulse(interval_var=m.b, height=-1) + expr += Step(0, 1) + + self.assertEqual(len(expr.args), 3) + self.assertEqual(expr.nargs(), 3) + self.assertIsInstance(expr.args[0], StepAtStart) + self.assertIsInstance(expr.args[1], Pulse) + self.assertIsInstance(expr.args[2], StepAt) + + self.assertEqual( + str(expr), + "Step(a.start_time, height=4) + Pulse(b, height=-1) + Step(0, height=1)", + ) + + def test_sum_steps_in_place(self): + m = self.get_model() + s1 = Step(m.a.end_time, height=2) + expr = s1 + + # Just a step function + self.assertIsInstance(expr, StepAtEnd) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + + s2 = Step(m.b.end_time, height=3) + expr += s2 + + # becomes a cumulative function + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + self.assertIs(expr.args[0], s1) + self.assertIs(expr.args[1], s2) + + def test_sum_pulses_in_place(self): + m = self.get_model() + p1 = Pulse(interval_var=m.a, height=2) + expr = p1 + + self.assertIsInstance(expr, Pulse) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + + p2 = Pulse(interval_var=m.b, height=3) + expr += p2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + self.assertIs(expr.args[0], p1) + self.assertIs(expr.args[1], p2) + + def test_sum_step_and_cumul_func(self): + m = self.get_model() + s1 = Step(m.a.start_time, height=4) + p1 = Step(m.a.start_time, height=4) + cumul = s1 + p1 + s = Step(m.a.end_time, height=3) + expr = s + cumul + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 3) + self.assertIs(expr.args[0], s) + self.assertIs(expr.args[1], s1) + self.assertIs(expr.args[2], p1) + + def test_subtract_cumul_from_pulse(self): + m = self.get_model() + p1 = Pulse(interval_var=m.a, height=2) + s1 = Step(m.a.start_time, height=4) + p2 = Pulse(interval_var=m.b, height=3) + cumul = s1 - p2 + expr = p1 - cumul + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 3) + self.assertIs(expr.args[0], p1) + self.assertIsInstance(expr.args[1], NegatedStepFunction) + self.assertIs(expr.args[1].args[0], s1) + self.assertIsInstance(expr.args[2], NegatedStepFunction) + self.assertIsInstance(expr.args[2].args[0], NegatedStepFunction) + self.assertIs(expr.args[2].args[0].args[0], p2) + + def test_subtract_two_cumul_functions(self): + m = self.get_model() + p1 = Pulse(interval_var=m.a, height=2) + s1 = Step(m.a.start_time, height=4) + p2 = Pulse(interval_var=m.b, height=3) + p3 = Pulse(interval_var=m.a, height=-4) + cumul1 = s1 - p2 + cumul2 = p2 + p3 + expr = cumul1 - cumul2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 4) + self.assertIs(expr.args[0], s1) + self.assertIsInstance(expr.args[1], NegatedStepFunction) + self.assertIs(expr.args[1].args[0], p2) + self.assertIsInstance(expr.args[2], NegatedStepFunction) + self.assertIs(expr.args[2].args[0], p2) + self.assertIsInstance(expr.args[3], NegatedStepFunction) + self.assertIs(expr.args[3].args[0], p3) + + def test_subtract_two_cumul_functions_requiring_cloning(self): + m = self.get_model() + p1 = Pulse(interval_var=m.a, height=2) + s1 = Step(m.a.start_time, height=4) + p2 = Pulse(interval_var=m.b, height=3) + p3 = Pulse(interval_var=m.a, height=-4) + cumul1 = s1 - p2 + # This will append to the args of cumul1, and then we'll have to clone + # them when we make expr + aux = cumul1 + Step(0, 4) + cumul2 = p2 + p3 + expr = cumul1 - cumul2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 4) + self.assertIs(expr.args[0], s1) + self.assertIsInstance(expr.args[1], NegatedStepFunction) + self.assertIs(expr.args[1].args[0], p2) + self.assertIsInstance(expr.args[2], NegatedStepFunction) + self.assertIs(expr.args[2].args[0], p2) + self.assertIsInstance(expr.args[3], NegatedStepFunction) + self.assertIs(expr.args[3].args[0], p3) + + def test_sum_two_cumul_funcs(self): + m = self.get_model() + s1 = Step(m.a.start_time, height=4) + p1 = Step(m.a.start_time, height=4) + cumul1 = s1 + p1 + s2 = Step(m.a.end_time, height=3) + s3 = Step(0, height=34) + cumul2 = s2 + s3 + expr = cumul1 + cumul2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 4) + self.assertIs(expr.args[0], s1) + self.assertIs(expr.args[1], p1) + self.assertIs(expr.args[2], s2) + self.assertIs(expr.args[3], s3) + + def test_sum_two_cumul_funcs_requiring_cloning_args(self): + m = self.get_model() + s1 = Step(m.a.start_time, height=4) + p1 = Step(m.a.start_time, height=4) + cumul1 = s1 + p1 + # This one will extend cumul1, so we'll have to clone it when we build + # expr + aux = cumul1 + Step(5, 4) + + s2 = Step(m.a.end_time, height=3) + s3 = Step(0, height=34) + cumul2 = s2 + s3 + expr = cumul1 + cumul2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 4) + self.assertIs(expr.args[0], s1) + self.assertIs(expr.args[1], p1) + self.assertIs(expr.args[2], s2) + self.assertIs(expr.args[3], s3) + + def test_cannot_add_constant(self): + m = self.get_model() + with self.assertRaisesRegex( + TypeError, + "Cannot add object of class to object of class " + "", + ): + expr = Step(m.a.start_time, height=6) + 3 + + def test_cannot_add_to_constant(self): + m = self.get_model() + with self.assertRaisesRegex( + TypeError, + "Cannot add object of class to " + "object of class ", + ): + expr = 4 + Step(m.a.start_time, height=6) + + def test_python_sum_funct(self): + # We allow adding to 0 so that sum() works as expected + m = self.get_model() + expr = sum(Pulse(interval_var=m.c[i], height=1) for i in [1, 2]) + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + self.assertIsInstance(expr.args[0], Pulse) + self.assertIsInstance(expr.args[1], Pulse) + + +class TestSubtractStepFunctions(CommonTests): + def test_subtract_two_steps(self): + m = self.get_model() + + s = Step(m.a.start_time, height=2) - Step(m.b.start_time, height=5) + + self.assertIsInstance(s, CumulativeFunction) + self.assertEqual(len(s.args), 2) + self.assertEqual(s.nargs(), 2) + self.assertIsInstance(s.args[0], StepAtStart) + self.assertIsInstance(s.args[1], NegatedStepFunction) + self.assertEqual(len(s.args[1].args), 1) + self.assertEqual(s.args[1].nargs(), 1) + self.assertIsInstance(s.args[1].args[0], StepAtStart) + + def test_subtract_step_and_pulse(self): + m = self.get_model() + s1 = Step(m.a.end_time, height=2) + s2 = Step(m.b.start_time, height=5) + p = Pulse(interval_var=m.a, height=3) + + expr = s1 - s2 - p + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 3) + self.assertEqual(expr.nargs(), 3) + self.assertIs(expr.args[0], s1) + self.assertIsInstance(expr.args[1], NegatedStepFunction) + self.assertIs(expr.args[1].args[0], s2) + self.assertIsInstance(expr.args[2], NegatedStepFunction) + self.assertIs(expr.args[2].args[0], p) + + def test_subtract_pulse_from_two_steps(self): + m = self.get_model() + s1 = Step(m.a.end_time, height=2) + s2 = Step(m.b.start_time, height=5) + p = Pulse(interval_var=m.a, height=3) + + expr = s1 + s2 - p + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 3) + self.assertEqual(expr.nargs(), 3) + self.assertIs(expr.args[0], s1) + self.assertIs(expr.args[1], s2) + self.assertIsInstance(expr.args[2], NegatedStepFunction) + self.assertIs(expr.args[2].args[0], p) + + def test_args_clone_correctly(self): + m = self.get_model() + m.p1 = Pulse(interval_var=m.a, height=3) + m.p2 = Pulse(interval_var=m.b, height=4) + m.s = Step(m.a.start_time, height=-1) + + expr1 = m.p1 - m.p2 + self.assertIsInstance(expr1, CumulativeFunction) + self.assertEqual(expr1.nargs(), 2) + self.assertIs(expr1.args[0], m.p1) + self.assertIsInstance(expr1.args[1], NegatedStepFunction) + self.assertIs(expr1.args[1].args[0], m.p2) + + expr2 = m.p1 - m.s + self.assertIsInstance(expr2, CumulativeFunction) + self.assertEqual(expr2.nargs(), 2) + self.assertIs(expr2.args[0], m.p1) + self.assertIsInstance(expr2.args[1], NegatedStepFunction) + self.assertIs(expr2.args[1].args[0], m.s) + + def test_args_clone_correctly_in_place(self): + m = self.get_model() + m.p1 = Pulse(interval_var=m.a, height=3) + m.p2 = Pulse(interval_var=m.b, height=4) + m.s = Step(m.a.start_time, height=-1) + + expr1 = m.p1 - m.p2 + # This will append p1 to expr1's args + expr = expr1 + m.p1 + # Now we have to clone in place + expr1 -= m.s + + self.assertIsInstance(expr1, CumulativeFunction) + self.assertEqual(expr1.nargs(), 3) + self.assertIs(expr1.args[0], m.p1) + self.assertIsInstance(expr1.args[1], NegatedStepFunction) + self.assertIs(expr1.args[1].args[0], m.p2) + self.assertIsInstance(expr1.args[2], NegatedStepFunction) + self.assertIs(expr1.args[2].args[0], m.s) + + # and expr is what we expect too + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 3) + self.assertIs(expr.args[0], m.p1) + self.assertIsInstance(expr.args[1], NegatedStepFunction) + self.assertIs(expr.args[1].args[0], m.p2) + self.assertIs(expr.args[2], m.p1) + + def test_subtract_pulses_in_place(self): + m = self.get_model() + p1 = Pulse(interval_var=m.a, height=1) + p2 = Pulse(interval_var=m.b, height=3) + + expr = p1 + expr -= p2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + self.assertIs(expr.args[0], p1) + self.assertIsInstance(expr.args[1], NegatedStepFunction) + self.assertIs(expr.args[1].args[0], p2) + + def test_subtract_steps_in_place(self): + m = self.get_model() + s1 = Step(m.a.start_time, height=1) + s2 = Step(m.b.end_time, height=3) + + expr = s1 + expr -= s2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(len(expr.args), 2) + self.assertEqual(expr.nargs(), 2) + self.assertIs(expr.args[0], s1) + self.assertIsInstance(expr.args[1], NegatedStepFunction) + self.assertIs(expr.args[1].args[0], s2) + + def test_subtract_from_cumul_func_in_place(self): + m = self.get_model() + m.p1 = Pulse(interval_var=m.a, height=5) + m.p2 = Pulse(interval_var=m.b, height=-3) + m.s = Step(m.b.end_time, height=5) + + expr = m.p1 + m.s + expr -= m.p2 + + self.assertIsInstance(expr, CumulativeFunction) + self.assertEqual(expr.nargs(), 3) + self.assertIs(expr.args[0], m.p1) + self.assertIs(expr.args[1], m.s) + self.assertIsInstance(expr.args[2], NegatedStepFunction) + self.assertIs(expr.args[2].args[0], m.p2) + + self.assertEqual( + str(expr), + "Pulse(a, height=5) + Step(b.end_time, height=5) - Pulse(b, height=-3)", + ) + + def test_cannot_subtract_constant(self): + m = self.get_model() + with self.assertRaisesRegex( + TypeError, + "Cannot subtract object of class from object of " + "class ", + ): + expr = Step(m.a.start_time, height=6) - 3 + + def test_cannot_subtract_from_constant(self): + m = self.get_model() + with self.assertRaisesRegex( + TypeError, + "Cannot subtract object of class from " + "object of class ", + ): + expr = 3 - Step(m.a.start_time, height=6) + + +class TestAlwaysIn(CommonTests): + def test_always_in(self): + m = self.get_model() + f = ( + Pulse(interval_var=m.a, height=3) + + Step(m.b.start_time, height=2) + - Step(m.a.end_time, height=-1) + ) + + m.cons = LogicalConstraint(expr=f.within((0, 3), (0, 10))) + self.assertIsInstance(m.cons.expr, AlwaysIn) + + self.assertEqual(m.cons.expr.nargs(), 5) + self.assertEqual(len(m.cons.expr.args), 5) + self.assertIs(m.cons.expr.args[0], f) + self.assertEqual(m.cons.expr.args[1], 0) + self.assertEqual(m.cons.expr.args[2], 3) + self.assertEqual(m.cons.expr.args[3], 0) + self.assertEqual(m.cons.expr.args[4], 10) + self.assertEqual( + str(m.cons.expr), + "(Pulse(a, height=3) + Step(b.start_time, height=2) - " + "Step(a.end_time, height=-1)).within(bounds=(0, 3), " + "times=(0, 10))", + ) diff --git a/examples/pyomobook/abstract-ch/pyomo.bad2.txt b/pyomo/contrib/cp/transform/__init__.py similarity index 100% rename from examples/pyomobook/abstract-ch/pyomo.bad2.txt rename to pyomo/contrib/cp/transform/__init__.py diff --git a/pyomo/contrib/cp/transform/logical_to_disjunctive_program.py b/pyomo/contrib/cp/transform/logical_to_disjunctive_program.py new file mode 100644 index 00000000000..cd7681d4d87 --- /dev/null +++ b/pyomo/contrib/cp/transform/logical_to_disjunctive_program.py @@ -0,0 +1,141 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.cp.transform.logical_to_disjunctive_walker import ( + LogicalToDisjunctiveVisitor, +) +from pyomo.common.collections import ComponentMap +from pyomo.common.modeling import unique_component_name +from pyomo.common.config import ConfigDict, ConfigValue + +from pyomo.core import ( + TransformationFactory, + VarList, + Binary, + LogicalConstraint, + Block, + ConstraintList, + Transformation, + NonNegativeIntegers, +) +from pyomo.core.base.block import _BlockData +from pyomo.core.base import SortComponents +from pyomo.core.util import target_list +from pyomo.gdp import Disjunct, Disjunction + + +@TransformationFactory.register( + "contrib.logical_to_disjunctive", + doc="Convert logical propositions with only Boolean arguments to MIP " + "representation and convert logical expressions with mixed " + "integer-Boolean arguments (such as atleast, atmost, and exactly) to " + "disjunctive representation", +) +class LogicalToDisjunctive(Transformation): + """ + Re-encode logical constraints as linear constraints, + converting Boolean variables to binary. + """ + + CONFIG = ConfigDict('core.logical_to_disjunctive') + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be relaxed", + doc=""" + This specifies the list of LogicalConstraints to transform, or the + list of Blocks or Disjuncts on which to transform all of the + LogicalConstraints. Note that if the transformation is done out + of place, the list of targets should be attached to the model before it + is cloned, and the list will specify the targets on the cloned + instance. + """, + ), + ) + + def _apply_to(self, model, **kwds): + config = self.CONFIG(kwds.pop('options', {})) + config.set_value(kwds) + targets = config.targets + if targets is None: + targets = (model,) + + transBlocks = {} + visitor = LogicalToDisjunctiveVisitor() + for t in targets: + if t.ctype is Block or isinstance(t, _BlockData): + self._transform_block(t, model, visitor, transBlocks) + elif t.ctype is LogicalConstraint: + if t.is_indexed(): + self._transform_constraint(t, visitor, transBlocks) + else: + self._transform_constraintData(t, visitor, transBlocks) + else: + raise RuntimeError( + "Target '%s' was not a Block, Disjunct, or" + " LogicalConstraint. It was of type %s " + "and can't be transformed." % (t.name, type(t)) + ) + + def _transform_constraint(self, constraint, visitor, transBlocks): + for i in constraint.keys(sort=SortComponents.ORDERED_INDICES): + self._transform_constraintData(constraint[i], visitor, transBlocks) + constraint.deactivate() + + def _transform_block(self, target_block, model, new_varlists, transBlocks): + _blocks = ( + target_block.values() if target_block.is_indexed() else (target_block,) + ) + for block in _blocks: + # Note that this changes the current (though not the original) + # behavior of logical-to-linear because we descend into Disjuncts in + # order to find logical constraints. In the context of creating a + # traditional disjunctive program, this makes sense--we cannot have + # logical constraints *anywhere* in the active tree after this + # transformation. + for logical_constraint in block.component_objects( + ctype=LogicalConstraint, active=True, descend_into=(Block, Disjunct) + ): + self._transform_constraint( + logical_constraint, new_varlists, transBlocks + ) + + def _transform_constraintData(self, logical_constraint, visitor, transBlocks): + # now create a transformation block on the constraint's parent block (if + # we don't have one already) + parent_block = logical_constraint.parent_block() + xfrm_block = transBlocks.get(parent_block) + if xfrm_block is None: + xfrm_block = self._create_transformation_block(parent_block) + transBlocks[parent_block] = xfrm_block + + # This is may be too cute, but just deceive the walker so it puts stuff + # in the right place. + visitor.constraints = xfrm_block.transformed_constraints + visitor.z_vars = xfrm_block.auxiliary_vars + visitor.disjuncts = xfrm_block.auxiliary_disjuncts + visitor.disjunctions = xfrm_block.auxiliary_disjunctions + visitor.walk_expression(logical_constraint.expr) + logical_constraint.deactivate() + + def _create_transformation_block(self, context): + new_xfrm_block_name = unique_component_name(context, '_logical_to_disjunctive') + new_xfrm_block = Block(doc="Transformation objects for logical_to_disjunctive") + context.add_component(new_xfrm_block_name, new_xfrm_block) + + new_xfrm_block.transformed_constraints = ConstraintList() + new_xfrm_block.auxiliary_vars = VarList(domain=Binary) + new_xfrm_block.auxiliary_disjuncts = Disjunct(NonNegativeIntegers) + new_xfrm_block.auxiliary_disjunctions = Disjunction(NonNegativeIntegers) + + return new_xfrm_block diff --git a/pyomo/contrib/cp/transform/logical_to_disjunctive_walker.py b/pyomo/contrib/cp/transform/logical_to_disjunctive_walker.py new file mode 100644 index 00000000000..624629d326d --- /dev/null +++ b/pyomo/contrib/cp/transform/logical_to_disjunctive_walker.py @@ -0,0 +1,273 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import collections + +from pyomo.common.collections import ComponentMap +from pyomo.common.errors import MouseTrap +from pyomo.core.expr.expr_common import ExpressionType +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor +from pyomo.core.expr.numeric_expr import NumericExpression +from pyomo.core.expr.relational_expr import RelationalExpression +import pyomo.core.expr as EXPR +from pyomo.core.base import ( + Binary, + Constraint, + ConstraintList, + NonNegativeIntegers, + VarList, + value, +) +import pyomo.core.base.boolean_var as BV +from pyomo.core.base.expression import ScalarExpression, _GeneralExpressionData +from pyomo.core.base.param import ScalarParam, _ParamData +from pyomo.core.base.var import ScalarVar, _GeneralVarData +from pyomo.gdp.disjunct import AutoLinkedBooleanVar, Disjunct, Disjunction + + +def _dispatch_boolean_var(visitor, node): + if node not in visitor.boolean_to_binary_map: + binary = node.get_associated_binary() + if binary is not None: + visitor.boolean_to_binary_map[node] = binary + else: + z = visitor.z_vars.add() + visitor.boolean_to_binary_map[node] = z + node.associate_binary_var(z) + if node.fixed: + visitor.boolean_to_binary_map[node].fixed = True + visitor.boolean_to_binary_map[node].set_value( + int(node.value) if node.value is not None else None, skip_validation=True + ) + return False, visitor.boolean_to_binary_map[node] + + +def _dispatch_var(visitor, node): + return False, node + + +def _dispatch_param(visitor, node): + if int(value(node)) == value(node): + return False, node + else: + raise ValueError( + "Found non-integer valued Param '%s' in a logical " + "expression. This cannot be written to a disjunctive " + "form." % node.name + ) + + +def _dispatch_expression(visitor, node): + return False, node.expr + + +def _before_relational_expr(visitor, node): + raise MouseTrap( + "The RelationalExpression '%s' was used as a Boolean term " + "in a logical proposition. This is not yet supported " + "when transforming to disjunctive form." % node + ) + + +def _dispatch_not(visitor, node, a): + # z == !a + if a not in visitor.expansions: + z = visitor.z_vars.add() + visitor.constraints.add(z == 1 - a) + visitor.expansions[a] = z + return visitor.expansions[a] + + +def _dispatch_implication(visitor, node, a, b): + # z == !a v b + return _dispatch_or(visitor, node, 1 - a, b) + + +def _dispatch_equivalence(visitor, node, a, b): + # z == (!a v b) ^ (a v !b) + return _dispatch_and( + visitor, + node, + _dispatch_or(visitor, node, 1 - a, b), + _dispatch_or(visitor, node, a, 1 - b), + ) + + +def _dispatch_and(visitor, node, *args): + # z == a ^ b ^ ... + z = visitor.z_vars.add() + for arg in args: + visitor.constraints.add(arg >= z) + visitor.constraints.add(len(args) - sum(args) >= 1 - z) + return z + + +def _dispatch_or(visitor, node, *args): + # z == a v b v ... + # (!z v a v b v ...) ^ (z v !a) ^ (z v !b) ^ ... + z = visitor.z_vars.add() + visitor.constraints.add((1 - z) + sum(args) >= 1) + for arg in args: + visitor.constraints.add(z + (1 - arg) >= 1) + return z + + +def _dispatch_xor(visitor, node, a, b): + # z == a XOR b + # This is a special case of exactly + return _dispatch_exactly(visitor, node, 1, a, b) + + +def _get_integer_value(n, node): + if n.__class__ in EXPR.native_numeric_types and int(n) == n: + return n + if n.__class__ not in EXPR.native_types: + if n.is_potentially_variable(): + # [ESJ 11/22]: This is probably worth supporting sometime, but right + # now we are abiding by what docplex allows in their 'count' + # function. Part of supporting this will be making sure we catch + # strict inequalities in the GDP transformations. Because if we + # don't know that n is integer-valued we will be forced to write + # strict inequalities instead of incrememting or decrementing by 1 + # in the disjunctions. + raise MouseTrap( + "The first argument '%s' to '%s' is potentially variable. " + "This may be a mathematically coherent expression; However " + "it is not yet supported to convert it to a disjunctive " + "program." % (n, node) + ) + else: + return n + raise ValueError( + "The first argument to '%s' must be an integer.\n\tRecieved: %s" % (node, n) + ) + + +def _dispatch_exactly(visitor, node, *args): + # z = sum(args[1:]) == args[0] + # This is currently implemented as: + # [sum(args[1:] = n] v [[sum(args[1:]) < n] v [sum(args[1:]) > n]] + M = len(args) - 1 + n = _get_integer_value(args[0], node) + sum_expr = sum(args[1:]) + equality_disj = visitor.disjuncts[len(visitor.disjuncts)] + equality_disj.constraint = Constraint(expr=sum_expr == n) + inequality_disj = visitor.disjuncts[len(visitor.disjuncts)] + inequality_disj.disjunction = Disjunction( + expr=[[sum_expr <= n - 1], [sum_expr >= n + 1]] + ) + visitor.disjunctions[len(visitor.disjunctions)] = [equality_disj, inequality_disj] + return equality_disj.indicator_var.get_associated_binary() + + +def _dispatch_atleast(visitor, node, *args): + # z = sum[args[1:] >= n + # This is implemented as: + # [sum(args[1:] >= n] v [sum(args[1:] < n] + n = _get_integer_value(args[0], node) + sum_expr = sum(args[1:]) + atleast_disj = visitor.disjuncts[len(visitor.disjuncts)] + less_disj = visitor.disjuncts[len(visitor.disjuncts)] + atleast_disj.constraint = Constraint(expr=sum_expr >= n) + less_disj.constraint = Constraint(expr=sum_expr <= n - 1) + visitor.disjunctions[len(visitor.disjunctions)] = [atleast_disj, less_disj] + return atleast_disj.indicator_var.get_associated_binary() + + +def _dispatch_atmost(visitor, node, *args): + # z = sum[args[1:] <= n + # This is implemented as: + # [sum(args[1:] <= n] v [sum(args[1:] > n] + n = _get_integer_value(args[0], node) + sum_expr = sum(args[1:]) + atmost_disj = visitor.disjuncts[len(visitor.disjuncts)] + more_disj = visitor.disjuncts[len(visitor.disjuncts)] + atmost_disj.constraint = Constraint(expr=sum_expr <= n) + more_disj.constraint = Constraint(expr=sum_expr >= n + 1) + visitor.disjunctions[len(visitor.disjunctions)] = [atmost_disj, more_disj] + return atmost_disj.indicator_var.get_associated_binary() + + +_operator_dispatcher = {} +_operator_dispatcher[EXPR.ImplicationExpression] = _dispatch_implication +_operator_dispatcher[EXPR.EquivalenceExpression] = _dispatch_equivalence +_operator_dispatcher[EXPR.NotExpression] = _dispatch_not +_operator_dispatcher[EXPR.AndExpression] = _dispatch_and +_operator_dispatcher[EXPR.OrExpression] = _dispatch_or +_operator_dispatcher[EXPR.XorExpression] = _dispatch_xor +_operator_dispatcher[EXPR.ExactlyExpression] = _dispatch_exactly +_operator_dispatcher[EXPR.AtLeastExpression] = _dispatch_atleast +_operator_dispatcher[EXPR.AtMostExpression] = _dispatch_atmost + +_before_child_dispatcher = {} +_before_child_dispatcher[BV.ScalarBooleanVar] = _dispatch_boolean_var +_before_child_dispatcher[BV._GeneralBooleanVarData] = _dispatch_boolean_var +_before_child_dispatcher[AutoLinkedBooleanVar] = _dispatch_boolean_var +_before_child_dispatcher[_ParamData] = _dispatch_param +_before_child_dispatcher[ScalarParam] = _dispatch_param +# for the moment, these are all just so we can get good error messages when we +# don't handle them: +_before_child_dispatcher[ScalarVar] = _dispatch_var +_before_child_dispatcher[_GeneralVarData] = _dispatch_var +_before_child_dispatcher[_GeneralExpressionData] = _dispatch_expression +_before_child_dispatcher[ScalarExpression] = _dispatch_expression + + +class LogicalToDisjunctiveVisitor(StreamBasedExpressionVisitor): + """Converts BooleanExpressions to Linear (MIP) representation + + This converter eschews conjunctive normal form, and instead follows + the well-trodden MINLP path of factorable programming. + + """ + + def __init__(self): + super().__init__() + self.z_vars = VarList(domain=Binary) + self.z_vars.construct() + self.constraints = ConstraintList() + self.disjuncts = Disjunct(NonNegativeIntegers, concrete=True) + self.disjunctions = Disjunction(NonNegativeIntegers) + self.disjunctions.construct() + self.expansions = ComponentMap() + self.boolean_to_binary_map = ComponentMap() + + def initializeWalker(self, expr): + walk, result = self.beforeChild(None, expr, 0) + if not walk: + return False, self.finalizeResult(result) + return True, expr + + def beforeChild(self, node, child, child_idx): + if child.__class__ in EXPR.native_types: + return False, child + + if child.is_numeric_type(): + # Just pass it through, we'll figure it out later + return False, child + if child.is_expression_type(ExpressionType.RELATIONAL): + # Eventually we'll handle these. Right now we set a MouseTrap + return _before_relational_expr(self, child) + + if not child.is_expression_type() or child.is_named_expression_type(): + return _before_child_dispatcher[child.__class__](self, child) + + return True, None + + def exitNode(self, node, data): + return _operator_dispatcher[node.__class__](self, node, *data) + + def finalizeResult(self, result): + # This LogicalExpression must evaluate to True (but note that we cannot + # fix this variable to 1 since this logical expression could be living + # on a Disjunct and later need to be relaxed.) + self.constraints.add(result >= 1) + return result diff --git a/pyomo/checker/__init__.py b/pyomo/contrib/doe/__init__.py similarity index 65% rename from pyomo/checker/__init__.py rename to pyomo/contrib/doe/__init__.py index 4d81f020fc5..e38b5dce1d9 100644 --- a/pyomo/checker/__init__.py +++ b/pyomo/contrib/doe/__init__.py @@ -8,8 +8,7 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - -from pyomo.checker.checker import IModelChecker -from pyomo.checker.runner import CheckingNodeVisitor, ModelCheckRunner -from pyomo.checker.script import ModelScript -from pyomo.checker.hooks import IPreCheckHook, IPostCheckHook +from .measurements import MeasurementVariables, DesignVariables, VariablesWithIndices +from .doe import DesignOfExperiments, CalculationMode, ObjectiveLib, ModelOptionLib +from .scenario import ScenarioGenerator, FiniteDifferenceStep +from .result import FisherResults, GridSearchResult diff --git a/pyomo/contrib/doe/doe.py b/pyomo/contrib/doe/doe.py new file mode 100644 index 00000000000..a668ebb524e --- /dev/null +++ b/pyomo/contrib/doe/doe.py @@ -0,0 +1,1183 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + + +from pyomo.common.dependencies import numpy as np, numpy_available + +import pyomo.environ as pyo +from pyomo.opt import SolverFactory +import pickle +from itertools import permutations, product +import logging +from enum import Enum +from pyomo.common.timing import TicTocTimer +from pyomo.contrib.sensitivity_toolbox.sens import get_dsdp +from pyomo.contrib.doe.scenario import ScenarioGenerator, FiniteDifferenceStep +from pyomo.contrib.doe.result import FisherResults, GridSearchResult + + +class CalculationMode(Enum): + sequential_finite = "sequential_finite" + direct_kaug = "direct_kaug" + + +class ObjectiveLib(Enum): + det = "det" + trace = "trace" + zero = "zero" + + +class ModelOptionLib(Enum): + parmest = "parmest" + stage1 = "stage1" + stage2 = "stage2" + + +class DesignOfExperiments: + def __init__( + self, + param_init, + design_vars, + measurement_vars, + create_model, + solver=None, + prior_FIM=None, + discretize_model=None, + args=None, + ): + """ + This package enables model-based design of experiments analysis with Pyomo. + Both direct optimization and enumeration modes are supported. + NLP sensitivity tools, e.g., sipopt and k_aug, are supported to accelerate analysis via enumeration. + It can be applied to dynamic models, where design variables are controlled throughout the experiment. + + Parameters + ---------- + param_init: + A ``dictionary`` of parameter names and values. + If they defined as indexed Pyomo variable, put the variable name and index, such as 'theta["A1"]'. + design_vars: + A ``DesignVariables`` which contains the Pyomo variable names and their corresponding indices + and bounds for experiment degrees of freedom + measurement_vars: + A ``MeasurementVariables`` which contains the Pyomo variable names and their corresponding indices and + bounds for experimental measurements + create_model: + A Python ``function`` that returns a Concrete Pyomo model, similar to the interface for ``parmest`` + solver: + A ``solver`` object that User specified, default=None. + If not specified, default solver is IPOPT MA57. + prior_FIM: + A 2D numpy array containing Fisher information matrix (FIM) for prior experiments. + The default None means there is no prior information. + discretize_model: + A user-specified ``function`` that discretizes the model. Only use with Pyomo.DAE, default=None + args: + Additional arguments for the create_model function. + """ + + # parameters + self.param = param_init + # design variable name + self.design_name = design_vars.variable_names + self.design_vars = design_vars + self.create_model = create_model + self.args = args + + # create the measurement information object + self.measurement_vars = measurement_vars + self.measure_name = self.measurement_vars.variable_names + + # check if user-defined solver is given + if solver: + self.solver = solver + # if not given, use default solver + else: + self.solver = self._get_default_ipopt_solver() + + # check if discretization is needed + self.discretize_model = discretize_model + + # check if there is prior info + if prior_FIM is None: + self.prior_FIM = np.zeros((len(self.param), len(self.param))) + else: + self.prior_FIM = prior_FIM + self._check_inputs() + + # if print statements + self.logger = logging.getLogger(__name__) + self.logger.setLevel(level=logging.INFO) + + def _check_inputs(self): + """ + Check if the prior FIM is N*N matrix, where N is the number of parameter + """ + if type(self.prior_FIM) != type(None): + if np.shape(self.prior_FIM)[0] != np.shape(self.prior_FIM)[1]: + raise ValueError('Found wrong prior information matrix shape.') + elif np.shape(self.prior_FIM)[0] != len(self.param): + raise ValueError('Found wrong prior information matrix shape.') + + def stochastic_program( + self, + if_optimize=True, + objective_option="det", + scale_nominal_param_value=False, + scale_constant_value=1, + optimize_opt=None, + if_Cholesky=False, + L_LB=1e-7, + L_initial=None, + jac_initial=None, + fim_initial=None, + formula="central", + step=0.001, + tee_opt=True, + ): + """ + Optimize DOE problem with design variables being the decisions. + The DOE model is formed invasively and all scenarios are computed simultaneously. + The function will first run a square problem with design variable being fixed at + the given initial points (Objective function being 0), then a square problem with + design variables being fixed at the given initial points (Objective function being Design optimality), + and then unfix the design variable and do the optimization. + + Parameters + ---------- + if_optimize: + if true, continue to do optimization. else, just run square problem with given design variable values + objective_option: + choose from the ObjectiveLib enum, + "det": maximizing the determinant with ObjectiveLib.det, + "trace": or the trace of the FIM with ObjectiveLib.trace + scale_nominal_param_value: + if True, the parameters are scaled by its own nominal value in param_init + scale_constant_value: + scale all elements in Jacobian matrix, default is 1. + optimize_opt: + A dictionary, keys are design variables, values are True or False deciding if this design variable will be optimized as DOF or not + if_Cholesky: + if True, Cholesky decomposition is used for Objective function for D-optimality. + L_LB: + L is the Cholesky decomposition matrix for FIM, i.e. FIM = L*L.T. + L_LB is the lower bound for every element in L. + if FIM is positive definite, the diagonal element should be positive, so we can set a LB like 1E-10 + L_initial: + initialize the L + jac_initial: + a matrix used to initialize jacobian matrix + fim_initial: + a matrix used to initialize FIM matrix + formula: + choose from "central", "forward", "backward", + which refers to the Enum FiniteDifferenceStep.central, .forward, or .backward + step: + Sensitivity perturbation step size, a fraction between [0,1]. default is 0.001 + tee_opt: + if True, IPOPT console output is printed + + Returns + ------- + analysis_square: result summary of the square problem solved at the initial point + analysis_optimize: result summary of the optimization problem solved + + """ + # store inputs in object + self.design_values = self.design_vars.variable_names_value + self.optimize = if_optimize + self.objective_option = ObjectiveLib(objective_option) + self.scale_nominal_param_value = scale_nominal_param_value + self.scale_constant_value = scale_constant_value + self.Cholesky_option = if_Cholesky + self.L_LB = L_LB + self.L_initial = L_initial + self.jac_initial = jac_initial + self.fim_initial = fim_initial + self.formula = FiniteDifferenceStep(formula) + self.step = step + self.tee_opt = tee_opt + + # calculate how much the FIM element is scaled by a constant number + # FIM = Jacobian.T@Jacobian, the FIM is scaled by squared value the Jacobian is scaled + self.fim_scale_constant_value = self.scale_constant_value**2 + + sp_timer = TicTocTimer() + sp_timer.tic(msg=None) + + # build the large DOE pyomo model + m = self._create_doe_model(no_obj=True) + + # solve model, achieve results for square problem, and results for optimization problem + m, analysis_square = self._compute_stochastic_program(m, optimize_opt) + + if self.optimize: + analysis_optimize = self._optimize_stochastic_program(m) + dT = sp_timer.toc(msg=None) + self.logger.info("elapsed time: %0.1f" % dT) + return analysis_square, analysis_optimize + + else: + dT = sp_timer.toc(msg=None) + self.logger.info("elapsed time: %0.1f" % dT) + return analysis_square + + def _compute_stochastic_program(self, m, optimize_option): + """ + Solve the stochastic program problem as a square problem. + """ + + # Solve square problem first + # result_square: solver result + result_square = self._solve_doe(m, fix=True, opt_option=optimize_option) + + # extract Jac + jac_square = self._extract_jac(m) + + # create result object + analysis_square = FisherResults( + list(self.param.keys()), + self.measurement_vars, + jacobian_info=None, + all_jacobian_info=jac_square, + prior_FIM=self.prior_FIM, + scale_constant_value=self.scale_constant_value, + ) + # for simultaneous mode, FIM and Jacobian are extracted with extract_FIM() + analysis_square.result_analysis(result=result_square) + + analysis_square.model = m + + self.analysis_square = analysis_square + return m, analysis_square + + def _optimize_stochastic_program(self, m): + """ + Solve the stochastic program problem as an optimization problem. + """ + + m = self._add_objective(m) + + result_doe = self._solve_doe(m, fix=False) + + # extract Jac + jac_optimize = self._extract_jac(m) + + # create result object + analysis_optimize = FisherResults( + list(self.param.keys()), + self.measurement_vars, + jacobian_info=None, + all_jacobian_info=jac_optimize, + prior_FIM=self.prior_FIM, + ) + # for simultaneous mode, FIM and Jacobian are extracted with extract_FIM() + analysis_optimize.result_analysis(result=result_doe) + analysis_optimize.model = m + + return analysis_optimize + + def compute_FIM( + self, + mode="direct_kaug", + FIM_store_name=None, + specified_prior=None, + tee_opt=True, + scale_nominal_param_value=False, + scale_constant_value=1, + store_output=None, + read_output=None, + extract_single_model=None, + formula="central", + step=0.001, + ): + """ + This function calculates the Fisher information matrix (FIM) using sensitivity information obtained + from two possible modes (defined by the CalculationMode Enum): + + 1. sequential_finite: sequentially solve square problems and use finite difference approximation + 2. direct_kaug: solve a single square problem then extract derivatives using NLP sensitivity theory + + Parameters + ---------- + mode: + supports CalculationMode.sequential_finite or CalculationMode.direct_kaug + FIM_store_name: + if storing the FIM in a .csv or .txt, give the file name here as a string. + specified_prior: + a 2D numpy array providing alternate prior matrix, default is no prior. + tee_opt: + if True, IPOPT console output is printed + scale_nominal_param_value: + if True, the parameters are scaled by its own nominal value in param_init + scale_constant_value: + scale all elements in Jacobian matrix, default is 1. + store_output: + if storing the output (value stored in Var 'output_record') as a pickle file, give the file name here as a string. + read_output: + if reading the output (value for Var 'output_record') as a pickle file, give the file name here as a string. + extract_single_model: + if True, the solved model outputs for each scenario are all recorded as a .csv file. + The output file uses the name AB.csv, where string A is store_output input, B is the index of scenario. + scenario index is the number of the scenario outputs which is stored. + formula: + choose from the Enum FiniteDifferenceStep.central, .forward, or .backward. + This option is only used for CalculationMode.sequential_finite mode. + step: + Sensitivity perturbation step size, a fraction between [0,1]. default is 0.001 + + Returns + ------- + FIM_analysis: result summary object of this solve + """ + + # save inputs in object + self.design_values = self.design_vars.variable_names_value + self.scale_nominal_param_value = scale_nominal_param_value + self.scale_constant_value = scale_constant_value + self.formula = FiniteDifferenceStep(formula) + self.mode = CalculationMode(mode) + self.step = step + + # This method only solves square problem + self.optimize = False + # Set the Objective Function to 0 helps solve square problem quickly + self.objective_option = ObjectiveLib.zero + self.tee_opt = tee_opt + + self.FIM_store_name = FIM_store_name + self.specified_prior = specified_prior + + # calculate how much the FIM element is scaled by a constant number + # As FIM~Jacobian.T@Jacobian, FIM is scaled twice the number the Q is scaled + self.fim_scale_constant_value = self.scale_constant_value**2 + + square_timer = TicTocTimer() + square_timer.tic(msg=None) + if self.mode == CalculationMode.sequential_finite: + FIM_analysis = self._sequential_finite( + read_output, extract_single_model, store_output + ) + + elif self.mode == CalculationMode.direct_kaug: + FIM_analysis = self._direct_kaug() + + dT = square_timer.toc(msg=None) + self.logger.info("elapsed time: %0.1f" % dT) + + return FIM_analysis + + def _sequential_finite(self, read_output, extract_single_model, store_output): + """Sequential_finite mode uses Pyomo Block to evaluate the sensitivity information.""" + + # if measurements are provided + if read_output: + with open(read_output, 'rb') as f: + output_record = pickle.load(f) + f.close() + jac = self._finite_calculation(output_record) + + # if measurements are not provided + else: + mod = self._create_block() + + # dict for storing model outputs + output_record = {} + + # solve model + square_result = self._solve_doe(mod, fix=True) + + if extract_single_model: + mod_name = store_output + '.csv' + dataframe = extract_single_model(mod, square_result) + dataframe.to_csv(mod_name) + + # loop over blocks for results + for s in range(len(self.scenario_list)): + # loop over measurement item and time to store model measurements + output_iter = [] + + # extract variable values + for r in self.measure_name: + cuid = pyo.ComponentUID(r) + try: + var_up = cuid.find_component_on(mod.block[s]) + except: + raise ValueError( + f"measurement {r} cannot be found in the model." + ) + output_iter.append(pyo.value(var_up)) + + output_record[s] = output_iter + + output_record['design'] = self.design_values + + if store_output: + f = open(store_output, 'wb') + pickle.dump(output_record, f) + f.close() + + # calculate jacobian + jac = self._finite_calculation(output_record) + + # return all models formed + self.model = mod + + # Store the Jacobian information for access by users, not necessarily call result object to achieve jacobian information + # It is the overall set of Jacobian information, + # while in the result object the jacobian can be cut to achieve part of the FIM information + self.jac = jac + + # Assemble and analyze results + if self.specified_prior is None: + prior_in_use = self.prior_FIM + else: + prior_in_use = self.specified_prior + + FIM_analysis = FisherResults( + list(self.param.keys()), + self.measurement_vars, + jacobian_info=None, + all_jacobian_info=jac, + prior_FIM=prior_in_use, + store_FIM=self.FIM_store_name, + scale_constant_value=self.scale_constant_value, + ) + + return FIM_analysis + + def _direct_kaug(self): + # create model + mod = self.create_model(model_option=ModelOptionLib.parmest) + + # discretize if needed + if self.discretize_model: + mod = self.discretize_model(mod, block=False) + + # add objective function + mod.Obj = pyo.Objective(expr=0, sense=pyo.minimize) + + # set ub and lb to parameters + for par in self.param.keys(): + cuid = pyo.ComponentUID(par) + var = cuid.find_component_on(mod) + var.setlb(self.param[par]) + var.setub(self.param[par]) + + # generate parameter name list and value dictionary with index + var_name = list(self.param.keys()) + + # call k_aug get_dsdp function + square_result = self._solve_doe(mod, fix=True) + dsdp_re, col = get_dsdp( + mod, list(self.param.keys()), self.param, tee=self.tee_opt + ) + + # analyze result + dsdp_array = dsdp_re.toarray().T + self.dsdp = dsdp_array + self.dsdp = col + # store dsdp returned + dsdp_extract = [] + # get right lines from results + measurement_index = [] + + # loop over measurement variables and their time points + for mname in self.measure_name: + try: + kaug_no = col.index(mname) + measurement_index.append(kaug_no) + # get right line of dsdp + dsdp_extract.append(dsdp_array[kaug_no]) + except: + # k_aug does not provide value for fixed variables + self.logger.debug('The variable is fixed: %s', mname) + # produce the sensitivity for fixed variables + zero_sens = np.zeros(len(self.param)) + # for fixed variables, the sensitivity are a zero vector + dsdp_extract.append(zero_sens) + + # Extract and calculate sensitivity if scaled by constants or parameters. + # Convert sensitivity to a dictionary + jac = {} + for par in self.param.keys(): + jac[par] = [] + + for d in range(len(dsdp_extract)): + for p, par in enumerate(self.param.keys()): + # if scaled by parameter value or constant value + sensi = dsdp_extract[d][p] * self.scale_constant_value + if self.scale_nominal_param_value: + sensi *= self.param[par] + jac[par].append(sensi) + + # check if another prior experiment FIM is provided other than the user-specified one + if self.specified_prior is None: + prior_in_use = self.prior_FIM + else: + prior_in_use = self.specified_prior + + # Assemble and analyze results + FIM_analysis = FisherResults( + list(self.param.keys()), + self.measurement_vars, + jacobian_info=None, + all_jacobian_info=jac, + prior_FIM=prior_in_use, + store_FIM=self.FIM_store_name, + scale_constant_value=self.scale_constant_value, + ) + + self.jac = jac + self.mod = mod + + return FIM_analysis + + def _create_block(self): + """ + Create a pyomo Concrete model and add blocks with different parameter perturbation scenarios. + + Returns + ------- + mod: Concrete Pyomo model + """ + + # create scenario information for block scenarios + scena_gen = ScenarioGenerator( + parameter_dict=self.param, formula=self.formula, step=self.step + ) + + self.scenario_data = scena_gen.ScenarioData + + # a list of dictionary, each one is a parameter dictionary with perturbed parameter values + self.scenario_list = self.scenario_data.scenario + # dictionary, keys are parameter name, values are a list of scenario index where this parameter is perturbed. + self.scenario_num = self.scenario_data.scena_num + # dictionary, keys are parameter name, values are the perturbation step + self.eps_abs = self.scenario_data.eps_abs + self.scena_gen = scena_gen + + # Create a global model + mod = pyo.ConcreteModel() + + # Set for block/scenarios + mod.scenario = pyo.Set(initialize=self.scenario_data.scenario_indices) + + # Allow user to self-define complex design variables + self.create_model(mod=mod, model_option=ModelOptionLib.stage1) + + def block_build(b, s): + # create block scenarios + self.create_model(mod=b, model_option=ModelOptionLib.stage2) + + # fix parameter values to perturbed values + for par in self.param: + cuid = pyo.ComponentUID(par) + var = cuid.find_component_on(b) + var.fix(self.scenario_data.scenario[s][par]) + + mod.block = pyo.Block(mod.scenario, rule=block_build) + + # discretize the model + if self.discretize_model: + mod = self.discretize_model(mod) + + # force design variables in blocks to be equal to global design values + for name in self.design_name: + + def fix1(mod, s): + cuid = pyo.ComponentUID(name) + design_var_global = cuid.find_component_on(mod) + design_var = cuid.find_component_on(mod.block[s]) + return design_var == design_var_global + + con_name = "con" + name + mod.add_component(con_name, pyo.Constraint(mod.scenario, expr=fix1)) + + return mod + + def _finite_calculation(self, output_record): + """ + Calculate Jacobian for sequential_finite mode + + Parameters + ---------- + output_record: a dict of outputs, keys are scenario names, values are a list of measurements values + scena_gen: an object generated by Scenario_creator class + + Returns + ------- + jac: Jacobian matrix, a dictionary, keys are parameter names, values are a list of jacobian values with respect to this parameter + """ + # dictionary form of jacobian + jac = {} + + # After collecting outputs from all scenarios, calculate sensitivity + for para in self.param.keys(): + # extract involved scenario No. for each parameter from scenario class + involved_s = self.scenario_data.scena_num[para] + + # each parameter has two involved scenarios + s1 = involved_s[0] # positive perturbation + s2 = involved_s[1] # negative perturbation + list_jac = [] + for i in range(len(output_record[s1])): + sensi = ( + (output_record[s1][i] - output_record[s2][i]) + / self.scenario_data.eps_abs[para] + * self.scale_constant_value + ) + if self.scale_nominal_param_value: + sensi *= self.param[para] + list_jac.append(sensi) + # get Jacobian dict, keys are parameter name, values are sensitivity info + jac[para] = list_jac + + return jac + + def _extract_jac(self, m): + """ + Extract jacobian from the stochastic program + + Parameters + ---------- + m: solved stochastic program model + + Returns + ------- + JAC: the overall jacobian as a dictionary + """ + # dictionary form of jacobian + jac = {} + # loop over parameters + for p in self.param.keys(): + jac_para = [] + for res in m.measured_variables: + jac_para.append(pyo.value(m.sensitivity_jacobian[p, res])) + jac[p] = jac_para + return jac + + def run_grid_search( + self, + design_ranges, + mode="sequential_finite", + tee_option=False, + scale_nominal_param_value=False, + scale_constant_value=1, + store_name=None, + read_name=None, + store_optimality_as_csv=None, + formula="central", + step=0.001, + ): + """ + Enumerate through full grid search for any number of design variables; + solve square problems sequentially to compute FIMs. + It calculates FIM with sensitivity information from two modes: + + 1. sequential_finite: Calculates a one scenario model multiple times for multiple scenarios. + Sensitivity info estimated by finite difference + 2. direct_kaug: calculate sensitivity by k_aug with direct sensitivity + + Parameters + ---------- + design_ranges: + a ``dict``, keys are design variable names, + values are a list of design variable values to go over + mode: + choose from CalculationMode.sequential_finite, .direct_kaug. + tee_option: + if solver console output is made + scale_nominal_param_value: + if True, the parameters are scaled by its own nominal value in param_init + scale_constant_value: + scale all elements in Jacobian matrix, default is 1. + store_name: + a string of file name. If not None, store results with this name. + It is a pickle file containing all measurement information after solving the + model with perturbations. + Since there are multiple experiments, results are numbered with a scalar number, + and the result for one grid is 'store_name(count).csv' (count is the number of count). + read_name: + a string of file name. If not None, read result files. + It should be a pickle file previously generated by store_name option. + Since there are multiple experiments, this string should be the common part of all files; + Real name of the file is "read_name(count)", where count is the number of the experiment. + store_optimality_as_csv: + if True, the design criterion values of grid search results stored with this file name as a csv + formula: + choose from FiniteDifferenceStep.central, .forward, or .backward. + This option is only used for CalculationMode.sequential_finite. + step: + Sensitivity perturbation step size, a fraction between [0,1]. default is 0.001 + + Returns + ------- + figure_draw_object: a combined result object of class Grid_search_result + """ + # Set the Objective Function to 0 helps solve square problem quickly + self.objective_option = ObjectiveLib.zero + self.store_optimality_as_csv = store_optimality_as_csv + + # calculate how much the FIM element is scaled + self.fim_scale_constant_value = scale_constant_value**2 + + # to store all FIM results + result_combine = {} + + # all lists of values of each design variable to go over + design_ranges_list = list(design_ranges.values()) + # design variable names to go over + design_dimension_names = list(design_ranges.keys()) + + # iteration 0 + count = 0 + failed_count = 0 + # how many sets of design variables will be run + total_count = 1 + for rng in design_ranges_list: + total_count *= len(rng) + + time_set = [] # record time for every iteration + + # generate combinations of design variable values to go over + search_design_set = product(*design_ranges_list) + + # loop over design value combinations + for design_set_iter in search_design_set: + # generate the design variable dictionary needed for running compute_FIM + # first copy value from design_values + design_iter = self.design_vars.variable_names_value.copy() + # update the controlled value of certain time points for certain design variables + for i, names in enumerate(design_dimension_names): + # if the element is a list, all design variables in this list share the same values + if type(names) is list or type(names) is tuple: + for n in names: + design_iter[n] = list(design_set_iter)[i] + else: + design_iter[names] = list(design_set_iter)[i] + + self.design_vars.variable_names_value = design_iter + iter_timer = TicTocTimer() + self.logger.info('=======Iteration Number: %s =====', count + 1) + self.logger.debug( + 'Design variable values of this iteration: %s', design_iter + ) + iter_timer.tic(msg=None) + # generate store name + if store_name is None: + store_output_name = None + else: + store_output_name = store_name + str(count) + + if read_name: + read_input_name = read_name + str(count) + else: + read_input_name = None + + # call compute_FIM to get FIM + try: + result_iter = self.compute_FIM( + mode=mode, + tee_opt=tee_option, + scale_nominal_param_value=scale_nominal_param_value, + scale_constant_value=scale_constant_value, + store_output=store_output_name, + read_output=read_input_name, + formula=formula, + step=step, + ) + + count += 1 + + result_iter.result_analysis() + + # iteration time + iter_t = iter_timer.toc(msg=None) + time_set.append(iter_t) + + # give run information at each iteration + self.logger.info( + 'This is the %s run out of %s run.', (count + 1), total_count + ) + self.logger.info('The code has run %s seconds.', sum(time_set)) + self.logger.info( + 'Estimated remaining time: %s seconds', + (sum(time_set) / (count + 1) * (total_count - count - 1)), + ) + + # the combined result object are organized as a dictionary, keys are a tuple of the design variable values, values are a result object + result_combine[tuple(design_set_iter)] = result_iter + + except: + self.logger.warning( + ':::::::::::Warning: Cannot converge this run.::::::::::::' + ) + count += 1 + failed_count += 1 + self.logger.warning('failed count:', failed_count) + result_combine[tuple(design_set_iter)] = None + + # For user's access + self.all_fim = result_combine + + # Create figure drawing object + figure_draw_object = GridSearchResult( + design_ranges_list, + design_dimension_names, + result_combine, + store_optimality_name=store_optimality_as_csv, + ) + + self.logger.info('Overall wall clock time [s]: %s', sum(time_set)) + + return figure_draw_object + + def _create_doe_model(self, no_obj=True): + """ + Add equations to compute sensitivities, FIM, and objective. + + Parameters + ----------- + no_obj: if True, objective function is 0. + + Return + ------- + model: the DOE model + """ + model = self._create_block() + + # variables for jacobian and FIM + model.regression_parameters = pyo.Set(initialize=list(self.param.keys())) + model.measured_variables = pyo.Set(initialize=self.measure_name) + + def identity_matrix(m, i, j): + if i == j: + return 1 + else: + return 0 + + model.sensitivity_jacobian = pyo.Var( + model.regression_parameters, model.measured_variables, initialize=0.1 + ) + + if self.fim_initial: + dict_fim_initialize = {} + for i, bu in enumerate(model.regression_parameters): + for j, un in enumerate(model.regression_parameters): + dict_fim_initialize[(bu, un)] = self.fim_initial[i][j] + + def initialize_fim(m, j, d): + return dict_fim_initialize[(j, d)] + + if self.fim_initial: + model.fim = pyo.Var( + model.regression_parameters, + model.regression_parameters, + initialize=initialize_fim, + ) + else: + model.fim = pyo.Var( + model.regression_parameters, + model.regression_parameters, + initialize=identity_matrix, + ) + + # move the L matrix initial point to a dictionary + if type(self.L_initial) != type(None): + dict_cho = {} + for i, bu in enumerate(model.regression_parameters): + for j, un in enumerate(model.regression_parameters): + dict_cho[(bu, un)] = self.L_initial[i][j] + + # use the L dictionary to initialize L matrix + def init_cho(m, i, j): + return dict_cho[(i, j)] + + # if cholesky, define L elements as variables + if self.Cholesky_option: + # Define elements of Cholesky decomposition matrix as Pyomo variables and either + # Initialize with L in L_initial + if type(self.L_initial) != type(None): + model.L_ele = pyo.Var( + model.regression_parameters, + model.regression_parameters, + initialize=init_cho, + ) + # or initialize with the identity matrix + else: + model.L_ele = pyo.Var( + model.regression_parameters, + model.regression_parameters, + initialize=identity_matrix, + ) + + # loop over parameter name + for i, c in enumerate(model.regression_parameters): + for j, d in enumerate(model.regression_parameters): + # fix the 0 half of L matrix to be 0.0 + if i < j: + model.L_ele[c, d].fix(0.0) + # Give LB to the diagonal entries + if self.L_LB: + if c == d: + model.L_ele[c, d].setlb(self.L_LB) + + # jacobian rule + def jacobian_rule(m, p, n): + """ + m: Pyomo model + p: parameter + n: response + """ + cuid = pyo.ComponentUID(n) + var_up = cuid.find_component_on(m.block[self.scenario_num[p][0]]) + var_lo = cuid.find_component_on(m.block[self.scenario_num[p][1]]) + if self.scale_nominal_param_value: + return ( + m.sensitivity_jacobian[p, n] + == (var_up - var_lo) + / self.eps_abs[p] + * self.param[p] + * self.scale_constant_value + ) + else: + return ( + m.sensitivity_jacobian[p, n] + == (var_up - var_lo) / self.eps_abs[p] * self.scale_constant_value + ) + + # A constraint to calculate elements in Hessian matrix + # transfer prior FIM to be Expressions + fim_initial_dict = {} + for i, bu in enumerate(model.regression_parameters): + for j, un in enumerate(model.regression_parameters): + fim_initial_dict[(bu, un)] = self.prior_FIM[i][j] + + def read_prior(m, i, j): + return fim_initial_dict[(i, j)] + + model.priorFIM = pyo.Expression( + model.regression_parameters, model.regression_parameters, rule=read_prior + ) + + def fim_rule(m, p, q): + """ + m: Pyomo model + p: parameter + q: parameter + """ + return ( + m.fim[p, q] + == sum( + 1 + / self.measurement_vars.variance[n] + * m.sensitivity_jacobian[p, n] + * m.sensitivity_jacobian[q, n] + for n in model.measured_variables + ) + + m.priorFIM[p, q] * self.fim_scale_constant_value + ) + + model.jacobian_constraint = pyo.Constraint( + model.regression_parameters, model.measured_variables, rule=jacobian_rule + ) + model.fim_constraint = pyo.Constraint( + model.regression_parameters, model.regression_parameters, rule=fim_rule + ) + + return model + + def _add_objective(self, m): + def cholesky_imp(m, c, d): + """ + Calculate Cholesky L matrix using algebraic constraints + """ + # If it is the left bottom half of L + if list(self.param.keys()).index(c) >= list(self.param.keys()).index(d): + return m.fim[c, d] == sum( + m.L_ele[c, list(self.param.keys())[k]] + * m.L_ele[d, list(self.param.keys())[k]] + for k in range(list(self.param.keys()).index(d) + 1) + ) + else: + # This is the empty half of L above the diagonal + return pyo.Constraint.Skip + + def trace_calc(m): + """ + Calculate FIM elements. Can scale each element with 1000 for performance + """ + return m.trace == sum(m.fim[j, j] for j in m.regression_parameters) + + def det_general(m): + r"""Calculate determinant. Can be applied to FIM of any size. + det(A) = sum_{\sigma \in \S_n} (sgn(\sigma) * \Prod_{i=1}^n a_{i,\sigma_i}) + Use permutation() to get permutations, sgn() to get signature + """ + r_list = list(range(len(m.regression_parameters))) + # get all permutations + object_p = permutations(r_list) + list_p = list(object_p) + + # generate a name_order to iterate \sigma_i + det_perm = 0 + for i in range(len(list_p)): + name_order = [] + x_order = list_p[i] + # sigma_i is the value in the i-th position after the reordering \sigma + for x in range(len(x_order)): + for y, element in enumerate(m.regression_parameters): + if x_order[x] == y: + name_order.append(element) + + # det(A) = sum_{\sigma \in \S_n} (sgn(\sigma) * \Prod_{i=1}^n a_{i,\sigma_i}) + det_perm = sum( + self._sgn(list_p[d]) + * sum( + m.fim[each, name_order[b]] + for b, each in enumerate(m.regression_parameters) + ) + for d in range(len(list_p)) + ) + return m.det == det_perm + + if self.Cholesky_option: + m.cholesky_cons = pyo.Constraint( + m.regression_parameters, m.regression_parameters, rule=cholesky_imp + ) + m.Obj = pyo.Objective( + expr=2 * sum(pyo.log(m.L_ele[j, j]) for j in m.regression_parameters), + sense=pyo.maximize, + ) + # if not cholesky but determinant, calculating det and evaluate the OBJ with det + elif self.objective_option == ObjectiveLib.det: + m.det_rule = pyo.Constraint(rule=det_general) + m.Obj = pyo.Objective(expr=pyo.log(m.det), sense=pyo.maximize) + # if not determinant or cholesky, calculating the OBJ with trace + elif self.objective_option == ObjectiveLib.trace: + m.trace_rule = pyo.Constraint(rule=trace_calc) + m.Obj = pyo.Objective(expr=pyo.log(m.trace), sense=pyo.maximize) + elif self.objective_option == ObjectiveLib.zero: + m.Obj = pyo.Objective(expr=0) + + return m + + def _fix_design(self, m, design_val, fix_opt=True, optimize_option=None): + """ + Fix design variable + + Parameters + ---------- + m: model + design_val: design variable values dict + fix_opt: if True, fix. Else, unfix + optimize: a dictionary, keys are design variable name, values are True or False, deciding if this design variable is optimized as DOF this time + + Returns + ------- + m: model + """ + for name in self.design_name: + cuid = pyo.ComponentUID(name) + var = cuid.find_component_on(m) + if fix_opt: + var.fix(design_val[name]) + else: + if optimize_option is None: + var.unfix() + else: + if optimize_option[name]: + var.unfix() + return m + + def _get_default_ipopt_solver(self): + """Default solver""" + solver = SolverFactory('ipopt') + solver.options['linear_solver'] = 'ma57' + solver.options['halt_on_ampl_error'] = 'yes' + solver.options['max_iter'] = 3000 + return solver + + def _solve_doe(self, m, fix=False, opt_option=None): + """Solve DOE model. + If it's a square problem, fix design variable and solve. + Else, fix design variable and solve square problem firstly, then unfix them and solve the optimization problem + + Parameters + ---------- + m:model + fix: if true, solve two times (square first). Else, just solve the square problem + opt_option: a dictionary, keys are design variable name, values are True or False, + deciding if this design variable is optimized as DOF this time. + If None, all design variables are optimized as DOF this time. + + Returns + ------- + solver_results: solver results + """ + ### Solve square problem + mod = self._fix_design( + m, self.design_values, fix_opt=fix, optimize_option=opt_option + ) + + # if user gives solver, use this solver. if not, use default IPOPT solver + solver_result = self.solver.solve(mod, tee=self.tee_opt) + + return solver_result + + def _sgn(self, p): + """ + This is a helper function for stochastic_program function to compute the determinant formula. + Give the signature of a permutation + + Parameters + ----------- + p: the permutation (a list) + + Returns + ------- + 1 if the number of exchange is an even number + -1 if the number is an odd number + """ + + if len(p) == 1: + return 1 + + trans = 0 + + for i in range(0, len(p)): + j = i + 1 + + for j in range(j, len(p)): + if p[i] > p[j]: + trans = trans + 1 + + if (trans % 2) == 0: + return 1 + else: + return -1 diff --git a/pyomo/checker/tests/examples/model/Imports_missing.py b/pyomo/contrib/doe/examples/__init__.py similarity index 100% rename from pyomo/checker/tests/examples/model/Imports_missing.py rename to pyomo/contrib/doe/examples/__init__.py diff --git a/pyomo/contrib/doe/examples/fim_5_300_500_scale.csv b/pyomo/contrib/doe/examples/fim_5_300_500_scale.csv new file mode 100644 index 00000000000..77c0424aa13 --- /dev/null +++ b/pyomo/contrib/doe/examples/fim_5_300_500_scale.csv @@ -0,0 +1,5 @@ +A1,A2,E1,E2 +28.678928056936364,5.412497388906993,-81.73674601413501,-24.023773235011475 +5.412497388906993,26.409350356572013,-12.418164773953235,-139.2399253159117 +-81.73674601413501,-12.418164773953235,240.46276003997696,58.764228064029076 +-24.023773235011475,-139.2399253159117,58.764228064029076,767.255845082616 diff --git a/pyomo/contrib/doe/examples/fim_5_300_scale.csv b/pyomo/contrib/doe/examples/fim_5_300_scale.csv new file mode 100644 index 00000000000..381e916b9d4 --- /dev/null +++ b/pyomo/contrib/doe/examples/fim_5_300_scale.csv @@ -0,0 +1,5 @@ +A1,A2,E1,E2 +22.529430237938822,1.8403431417002734,-70.23273336318343,-11.094329617631416 +1.8403431417002734,18.098481155262718,-5.7356503398877745,-109.15866135211135 +-70.23273336318343,-5.7356503398877745,218.9419284259853,34.576808479575064 +-11.094329617631416,-109.15866135211135,34.576808479575064,658.3764463408718 diff --git a/pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb b/pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb new file mode 100644 index 00000000000..2535032a9a4 --- /dev/null +++ b/pyomo/contrib/doe/examples/fim_doe_tutorial.ipynb @@ -0,0 +1,1169 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Reactor Kinetics Example \n", + "\n", + "Jialu Wang (jwang44@nd.edu) and Alex Dowling (adowling@nd.edu)\n", + "\n", + "University of Notre Dame\n", + "\n", + "This notebook conducts design of experiments for a reactor kinetics experiment with the Pyomo.DoE.\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 0: Import Pyomo and Pyomo.DoE module" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pyomo.environ as pyo\n", + "from pyomo.contrib.doe import DesignOfExperiments, MeasurementVariables, DesignVariables" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "## check if ipopt available \n", + "ipopt_available = pyo.SolverFactory('ipopt').available()\n", + "if not (ipopt_available):\n", + " raise RuntimeError('This Pyomo.DoE example requires Ipopt.')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 1: Import Reaction Example Mathematical Model" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Consider two chemical reactions that convert molecule $A$ to desired product $B$ and a less valuable side-product $C$.\n", + "\n", + "$A \\overset{k_1}{\\rightarrow} B \\overset{k_2}{\\rightarrow} C$\n", + "\n", + "Our ultimate goal is to design a large-scale continuous reactor that maximizes the production of $B$. This general sequential reactions problem is widely applicable to CO$_2$ capture and industry more broadly (petrochemicals, pharmaceuticals, etc.).\n", + "\n", + "The rate laws for these two chemical reactions are:\n", + "\n", + "$r_A = -k_1 C_A$\n", + "\n", + "$r_B = k_1 C_A - k_2 C_B$\n", + "\n", + "$r_C = k_2 C_B$\n", + "\n", + "Here, $C_A$, $C_B$, and $C_C$ are the concentrations of each species. The rate constants $k_1$ and $k_2$ depend on temperature as follows:\n", + "\n", + "$k_1 = A_1 \\exp{\\frac{-E_1}{R T}}$\n", + "\n", + "$k_2 = A_2 \\exp{\\frac{-E_2}{R T}}$\n", + "\n", + "$A_1, A_2, E_1$, and $E_2$ are fitted model parameters. $R$ is the ideal-gas constant and $T$ is absolute temperature.\n", + "\n", + "Using the **CCSI$^2$ toolset**, we would like to perform **uncertainty quantification** and **design of experiments** on a small-scale **batch reactor** to infer parameters $A_1$, $A_2$, $E_1$, and $E_2$." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Batch reactor\n", + "\n", + "The concentrations in a batch reactor evolve with time per the following differential equations:\n", + "\n", + "$$ \\frac{d C_A}{dt} = r_A = -k_1 C_A $$\n", + "\n", + "$$ \\frac{d C_B}{dt} = r_B = k_1 C_A - k_2 C_B $$\n", + "\n", + "$$ \\frac{d C_C}{dt} = r_C = k_2 C_B $$\n", + "\n", + "This is a linear system of differential equations. Assuming the feed is only species $A$, i.e., \n", + "\n", + "$$C_A(t=0) = C_{A0} \\quad C_B(t=0) = 0 \\quad C_C(t=0) = 0$$\n", + "\n", + "When the temperature is constant, it leads to the following analytic solution:\n", + "\n", + "$$C_A(t) = C_{A,0} \\exp(-k_1 t)$$\n", + "\n", + "$$C_B(t) = \\frac{k_1}{k_2 - k_1} C_{A,0} \\left[\\exp(-k_1 t) - \\exp(-k_2 t) \\right]$$\n", + "\n", + "$$C_C(t) = C_{A,0} - \\frac{k_2}{k_2 - k_1} C_{A,0} \\exp(-k_1 t) + \\frac{k_1}{k_2 - k_1} \\exp(-k_2 t) C_{A,0} = C_{A,0} - C_{A}(t) - C_{B}(t)$$" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from pyomo.contrib.doe.example.reactor_kinetics import create_model, disc_for_measure" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Step 2: Define inputs" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# Control time set [h]\n", + "t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] \n", + "# Define parameter nominal value \n", + "parameter_dict = {'A1': 84.79, 'A2': 371.72, 'E1': 7.78, 'E2': 15.05}" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "measurement names: ['C[CA,0]', 'C[CA,0.125]', 'C[CA,0.25]', 'C[CA,0.375]', 'C[CA,0.5]', 'C[CA,0.625]', 'C[CA,0.75]', 'C[CA,0.875]', 'C[CA,1]', 'C[CB,0]', 'C[CB,0.125]', 'C[CB,0.25]', 'C[CB,0.375]', 'C[CB,0.5]', 'C[CB,0.625]', 'C[CB,0.75]', 'C[CB,0.875]', 'C[CB,1]', 'C[CC,0]', 'C[CC,0.125]', 'C[CC,0.25]', 'C[CC,0.375]', 'C[CC,0.5]', 'C[CC,0.625]', 'C[CC,0.75]', 'C[CC,0.875]', 'C[CC,1]']\n" + ] + } + ], + "source": [ + "### Pyomo.DoE define measurements. Measurements have at most 1 index besides the time index\n", + "variable_name = \"C\"\n", + "indices = {0:['CA', 'CB', 'CC'], 1: t_control}\n", + "\n", + "measure_class = MeasurementVariables()\n", + "measure_class.add_variables(variable_name, indices=indices, time_index_position = 1)\n", + "print(\"measurement names:\", measure_class.variable_names)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Design variable names: ['CA0[0]', 'T[0]', 'T[0.125]', 'T[0.25]', 'T[0.375]', 'T[0.5]', 'T[0.625]', 'T[0.75]', 'T[0.875]', 'T[1]']\n" + ] + } + ], + "source": [ + "design_gen = DesignVariables()\n", + "\n", + "var_C = 'CA0'\n", + "indices_C = {0:[0]}\n", + "exp1_C = [5]\n", + "\n", + "# add design variable\n", + "design_gen.add_variables(var_C, indices = indices_C, time_index_position=0,\n", + " values=exp1_C,lower_bounds=1, upper_bounds=5)\n", + " \n", + "\n", + "var_T = 'T'\n", + "indices_T = {0:t_control}\n", + "exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300]\n", + "\n", + "design_gen.add_variables(var_T, indices = indices_T, time_index_position=0,\n", + " values=exp1_T,lower_bounds=300, upper_bounds=700)\n", + "print(\"Design variable names:\", design_gen.variable_names)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "param_dict = {'A1': 84.79, 'A2': 371.72, 'E1': 7.78, 'E2': 15.05}\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Method: Compute FIM \n", + "\n", + "This method computes an MBDoE optimization problem with no degrees of freedom." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "# choose from 'sequential_finite', 'direct_kaug'\n", + "#sensi_opt = \"direct_kaug\"\n", + "sensi_opt = \"sequential_finite\"\n", + "\n", + "# Define experiments\n", + "design_names = design_gen.variable_names\n", + "exp1 = [5, 470, 300, 300, 300, 300, 300, 300, 300, 300]\n", + "exp1_design_dict = dict(zip(design_names, exp1))\n", + "\n", + "design_gen.update_values(exp1_design_dict)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ipopt 3.13.2: linear_solver=ma57\n", + "halt_on_ampl_error=yes\n", + "max_iter=3000\n", + "\n", + "\n", + "******************************************************************************\n", + "This program contains Ipopt, a library for large-scale nonlinear optimization.\n", + " Ipopt is released as open source code under the Eclipse Public License (EPL).\n", + " For more information visit http://projects.coin-or.org/Ipopt\n", + "\n", + "This version of Ipopt was compiled from source code available at\n", + " https://github.com/IDAES/Ipopt as part of the Institute for the Design of\n", + " Advanced Energy Systems Process Systems Engineering Framework (IDAES PSE\n", + " Framework) Copyright (c) 2018-2019. See https://github.com/IDAES/idaes-pse.\n", + "\n", + "This version of Ipopt was compiled using HSL, a collection of Fortran codes\n", + " for large-scale scientific computation. All technical papers, sales and\n", + " publicity material resulting from use of the HSL codes within IPOPT must\n", + " contain the following acknowledgement:\n", + " HSL, a collection of Fortran codes for large-scale scientific\n", + " computation. See http://www.hsl.rl.ac.uk.\n", + "******************************************************************************\n", + "\n", + "This is Ipopt version 3.13.2, running with linear solver ma57.\n", + "\n", + "Number of nonzeros in equality constraint Jacobian...: 25344\n", + "Number of nonzeros in inequality constraint Jacobian.: 0\n", + "Number of nonzeros in Lagrangian Hessian.............: 2320\n", + "\n", + "Total number of variables............................: 6968\n", + " variables with only lower bounds: 2312\n", + " variables with lower and upper bounds: 784\n", + " variables with only upper bounds: 0\n", + "Total number of equality constraints.................: 6968\n", + "Total number of inequality constraints...............: 0\n", + " inequality constraints with only lower bounds: 0\n", + " inequality constraints with lower and upper bounds: 0\n", + " inequality constraints with only upper bounds: 0\n", + "\n", + "iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls\n", + " 0 0.0000000e+00 1.67e+02 1.00e+00 -1.0 0.00e+00 - 0.00e+00 0.00e+00 0\n", + "Reallocating memory for MA57: lfact (239448)\n", + " 1 0.0000000e+00 3.23e+01 3.85e+02 -1.0 1.67e+02 - 2.54e-03 9.90e-01f 1\n", + " 2 0.0000000e+00 6.16e+00 8.61e+01 -1.0 3.87e+01 - 1.11e-01 9.90e-01h 1\n", + " 3 0.0000000e+00 5.12e-02 9.97e+00 -1.0 4.16e+00 - 9.60e-01 9.90e-01h 1\n", + " 4 0.0000000e+00 1.20e-06 7.52e+01 -1.0 3.62e-02 - 9.97e-01 1.00e+00h 1\n", + "Reallocating memory for MA57: lfact (287125)\n", + " 5 0.0000000e+00 2.82e-13 1.00e-06 -1.0 7.68e-07 - 1.00e+00 1.00e+00h 1\n", + "\n", + "Number of Iterations....: 5\n", + "\n", + " (scaled) (unscaled)\n", + "Objective...............: 0.0000000000000000e+00 0.0000000000000000e+00\n", + "Dual infeasibility......: 0.0000000000000000e+00 0.0000000000000000e+00\n", + "Constraint violation....: 1.1690770500968800e-13 2.8177460364986478e-13\n", + "Complementarity.........: 0.0000000000000000e+00 0.0000000000000000e+00\n", + "Overall NLP error.......: 1.1690770500968800e-13 2.8177460364986478e-13\n", + "\n", + "\n", + "Number of objective function evaluations = 6\n", + "Number of objective gradient evaluations = 6\n", + "Number of equality constraint evaluations = 6\n", + "Number of inequality constraint evaluations = 0\n", + "Number of equality constraint Jacobian evaluations = 6\n", + "Number of inequality constraint Jacobian evaluations = 0\n", + "Number of Lagrangian Hessian evaluations = 5\n", + "Total CPU secs in IPOPT (w/o function evaluations) = 0.126\n", + "Total CPU secs in NLP function evaluations = 0.006\n", + "\n", + "EXIT: Optimal Solution Found.\n" + ] + } + ], + "source": [ + "doe_object = DesignOfExperiments(parameter_dict, design_gen,\n", + " measure_class, create_model,\n", + " discretize_model=disc_for_measure)\n", + "\n", + "\n", + "result = doe_object.compute_FIM(mode=sensi_opt, FIM_store_name = 'dynamic.csv', \n", + " read_output=None,\n", + " scale_nominal_param_value=True,\n", + " formula = \"central\")\n", + "\n", + "\n", + "result.result_analysis()\n" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "======Result summary======\n", + "Four design criteria log10() value:\n", + "A-optimality: 2.9897244624254227\n", + "D-optimality: 3.3010989022799095\n", + "E-optimality: -0.9193349136173019\n", + "Modified E-optimality: 3.8768075549543988\n", + "[[ 17.22096879 13.67125453 -37.1471375 -68.68858407]\n", + " [ 13.67125453 34.5737961 -26.37449298 -170.10871631]\n", + " [ -37.1471375 -26.37449298 81.32448107 133.30724227]\n", + " [ -68.68858407 -170.10871631 133.30724227 843.49816474]]\n" + ] + } + ], + "source": [ + "print('======Result summary======')\n", + "print('Four design criteria log10() value:')\n", + "print('A-optimality:', np.log10(result.trace))\n", + "print('D-optimality:', np.log10(result.det))\n", + "print('E-optimality:', np.log10(result.min_eig))\n", + "print('Modified E-optimality:', np.log10(result.cond))\n", + "print(result.FIM)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "['C[CB,0.125]', 'C[CB,0.25]', 'C[CB,0.5]', 'C[CB,0.75]', 'C[CB,0.875]', 'C[CC,0.125]', 'C[CC,0.25]', 'C[CC,0.5]', 'C[CC,0.75]', 'C[CC,0.875]']\n" + ] + } + ], + "source": [ + "### choose a subset of measurements, get results without resolving the model\n", + "sub_name = \"C\"\n", + "sub_indices = {0: [\"CB\", \"CC\"], 1:[0.125, 0.25, 0.5, 0.75, 0.875] }\n", + "\n", + "measure_subset = MeasurementVariables()\n", + "measure_subset.add_variables(sub_name, indices = sub_indices, time_index_position=1)\n", + "print(measure_subset.variable_names)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "======Subset Result summary======\n", + "Four design criteria log10() value:\n", + "A-optimality: 2.7312606650205957\n", + "D-optimality: 1.82134503385468\n", + "E-optimality: -1.430816119608334\n", + "Modified E-optimality: 4.147090377572721\n" + ] + } + ], + "source": [ + "sub_result = result.subset(measure_subset)\n", + "sub_result.result_analysis()\n", + "print('======Subset Result summary======')\n", + "print('Four design criteria log10() value:')\n", + "print('A-optimality:', np.log10(sub_result.trace))\n", + "print('D-optimality:', np.log10(sub_result.det))\n", + "print('E-optimality:', np.log10(sub_result.min_eig))\n", + "print('Modified E-optimality:', np.log10(sub_result.cond))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Method: Optimization\n", + "Gradient-based optimization with IPOPT with stochastic_program()\n", + "\n", + "This function solves twice: It solves the square version of the MBDoE problem first, and then unfixes the design variables as degrees of freedom and solves again. In this way the optimization problem can be well initialized. " + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "exp1 = [5, 500, 300, 300, 300, 300, 300, 300, 300, 300]\n", + "exp1_design_dict = dict(zip(design_names, exp1))\n", + "design_gen.update_values(exp1_design_dict)\n", + "\n", + "# add a prior information (scaled FIM with T=500 and T=300 experiments)\n", + "prior = np.asarray([[ 28.67892806 , 5.41249739 , -81.73674601 , -24.02377324],\n", + " [ 5.41249739 , 26.40935036 , -12.41816477 , -139.23992532],\n", + " [ -81.73674601 , -12.41816477 , 240.46276004 , 58.76422806],\n", + " [ -24.02377324 , -139.23992532 , 58.76422806 , 767.25584508]])\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Ipopt 3.13.2: linear_solver=ma57\n", + "halt_on_ampl_error=yes\n", + "max_iter=3000\n", + "\n", + "\n", + "******************************************************************************\n", + "This program contains Ipopt, a library for large-scale nonlinear optimization.\n", + " Ipopt is released as open source code under the Eclipse Public License (EPL).\n", + " For more information visit http://projects.coin-or.org/Ipopt\n", + "\n", + "This version of Ipopt was compiled from source code available at\n", + " https://github.com/IDAES/Ipopt as part of the Institute for the Design of\n", + " Advanced Energy Systems Process Systems Engineering Framework (IDAES PSE\n", + " Framework) Copyright (c) 2018-2019. See https://github.com/IDAES/idaes-pse.\n", + "\n", + "This version of Ipopt was compiled using HSL, a collection of Fortran codes\n", + " for large-scale scientific computation. All technical papers, sales and\n", + " publicity material resulting from use of the HSL codes within IPOPT must\n", + " contain the following acknowledgement:\n", + " HSL, a collection of Fortran codes for large-scale scientific\n", + " computation. See http://www.hsl.rl.ac.uk.\n", + "******************************************************************************\n", + "\n", + "This is Ipopt version 3.13.2, running with linear solver ma57.\n", + "\n", + "Number of nonzeros in equality constraint Jacobian...: 26424\n", + "Number of nonzeros in inequality constraint Jacobian.: 0\n", + "Number of nonzeros in Lagrangian Hessian.............: 2590\n", + "\n", + "Total number of variables............................: 7092\n", + " variables with only lower bounds: 2312\n", + " variables with lower and upper bounds: 784\n", + " variables with only upper bounds: 0\n", + "Total number of equality constraints.................: 7092\n", + "Total number of inequality constraints...............: 0\n", + " inequality constraints with only lower bounds: 0\n", + " inequality constraints with lower and upper bounds: 0\n", + " inequality constraints with only upper bounds: 0\n", + "\n", + "iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls\n", + " 0 0.0000000e+00 7.67e+02 1.00e+00 -1.0 0.00e+00 - 0.00e+00 0.00e+00 0\n", + "Reallocating memory for MA57: lfact (295796)\n", + " 1 0.0000000e+00 6.67e+02 3.85e+02 -1.0 7.66e+02 - 2.54e-03 9.90e-01f 1\n", + " 2 0.0000000e+00 1.44e+02 1.31e+02 -1.0 9.19e+02 - 9.17e-02 9.90e-01h 1\n", + " 3 0.0000000e+00 2.07e+01 1.75e+01 -1.0 1.51e+02 - 9.37e-01 9.90e-01h 1\n", + " 4 0.0000000e+00 1.53e-03 1.41e+02 -1.0 1.86e+01 - 9.94e-01 1.00e+00h 1\n", + "Reallocating memory for MA57: lfact (394529)\n", + " 5 0.0000000e+00 4.55e-13 1.00e-06 -1.0 1.56e-03 - 1.00e+00 1.00e+00h 1\n", + "\n", + "Number of Iterations....: 5\n", + "\n", + " (scaled) (unscaled)\n", + "Objective...............: 0.0000000000000000e+00 0.0000000000000000e+00\n", + "Dual infeasibility......: 0.0000000000000000e+00 0.0000000000000000e+00\n", + "Constraint violation....: 1.3069354547123093e-13 4.5474735088646412e-13\n", + "Complementarity.........: 0.0000000000000000e+00 0.0000000000000000e+00\n", + "Overall NLP error.......: 1.3069354547123093e-13 4.5474735088646412e-13\n", + "\n", + "\n", + "Number of objective function evaluations = 6\n", + "Number of objective gradient evaluations = 6\n", + "Number of equality constraint evaluations = 6\n", + "Number of inequality constraint evaluations = 0\n", + "Number of equality constraint Jacobian evaluations = 6\n", + "Number of inequality constraint Jacobian evaluations = 0\n", + "Number of Lagrangian Hessian evaluations = 5\n", + "Total CPU secs in IPOPT (w/o function evaluations) = 0.170\n", + "Total CPU secs in NLP function evaluations = 0.007\n", + "\n", + "EXIT: Optimal Solution Found.\n", + "Ipopt 3.13.2: linear_solver=ma57\n", + "halt_on_ampl_error=yes\n", + "max_iter=3000\n", + "\n", + "\n", + "******************************************************************************\n", + "This program contains Ipopt, a library for large-scale nonlinear optimization.\n", + " Ipopt is released as open source code under the Eclipse Public License (EPL).\n", + " For more information visit http://projects.coin-or.org/Ipopt\n", + "\n", + "This version of Ipopt was compiled from source code available at\n", + " https://github.com/IDAES/Ipopt as part of the Institute for the Design of\n", + " Advanced Energy Systems Process Systems Engineering Framework (IDAES PSE\n", + " Framework) Copyright (c) 2018-2019. See https://github.com/IDAES/idaes-pse.\n", + "\n", + "This version of Ipopt was compiled using HSL, a collection of Fortran codes\n", + " for large-scale scientific computation. All technical papers, sales and\n", + " publicity material resulting from use of the HSL codes within IPOPT must\n", + " contain the following acknowledgement:\n", + " HSL, a collection of Fortran codes for large-scale scientific\n", + " computation. See http://www.hsl.rl.ac.uk.\n", + "******************************************************************************\n", + "\n", + "This is Ipopt version 3.13.2, running with linear solver ma57.\n", + "\n", + "Number of nonzeros in equality constraint Jacobian...: 26544\n", + "Number of nonzeros in inequality constraint Jacobian.: 0\n", + "Number of nonzeros in Lagrangian Hessian.............: 2610\n", + "\n", + "Reallocating memory for MA57: lfact (273960)\n", + "Total number of variables............................: 7112\n", + " variables with only lower bounds: 2316\n", + " variables with lower and upper bounds: 794\n", + " variables with only upper bounds: 0\n", + "Total number of equality constraints.................: 7102\n", + "Total number of inequality constraints...............: 0\n", + " inequality constraints with only lower bounds: 0\n", + " inequality constraints with lower and upper bounds: 0\n", + " inequality constraints with only upper bounds: 0\n", + "\n", + "iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls\n", + " 0 -1.1850752e+01 7.70e+02 1.75e+00 -1.0 0.00e+00 - 0.00e+00 0.00e+00 0\n", + "Reallocating memory for MA57: lfact (303350)\n", + " 1 -1.3675099e+01 2.82e+02 1.02e+00 -1.0 1.74e+01 - 5.78e-01 5.20e-01h 1\n", + " 2 -1.4089220e+01 2.17e+01 1.63e+00 -1.0 1.13e+01 - 9.61e-01 1.00e+00f 1\n", + " 3 -1.3921907e+01 1.69e+00 3.69e+00 -1.0 6.63e+01 - 9.32e-01 1.00e+00f 1\n", + " 4 -1.3336947e+01 2.31e+01 1.65e+01 -1.0 1.77e+02 - 1.00e+00 1.00e+00f 1\n", + " 5 -1.3222146e+01 1.86e+01 1.00e+01 -1.0 1.99e+02 - 1.00e+00 1.00e+00h 1\n", + " 6 -1.3243949e+01 2.13e-01 6.39e-01 -1.0 1.01e+01 - 1.00e+00 1.00e+00h 1\n", + " 7 -1.3252911e+01 1.32e-03 1.78e-02 -1.7 5.37e-01 - 1.00e+00 1.00e+00h 1\n", + " 8 -1.3275341e+01 5.50e-02 1.07e+00 -3.8 4.82e+00 - 9.24e-01 1.00e+00h 1\n", + " 9 -1.3682468e+01 1.73e+01 2.63e+01 -3.8 8.88e+01 - 5.41e-01 1.00e+00h 1\n", + "iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls\n", + " 10 -1.4419541e+01 8.07e+01 9.14e+01 -3.8 5.70e+02 - 2.57e-01 5.52e-01h 1\n", + "Reallocating memory for MA57: lfact (318874)\n", + " 11 -1.4227603e+01 2.35e+01 2.67e+00 -3.8 7.85e+01 - 7.14e-01 1.00e+00h 1\n", + " 12 -1.4224985e+01 3.88e-01 5.16e-01 -3.8 2.90e+01 - 1.00e+00 1.00e+00h 1\n", + " 13 -1.4226481e+01 2.67e-02 5.57e-03 -3.8 6.57e+00 - 1.00e+00 1.00e+00h 1\n", + " 14 -1.4226282e+01 1.91e-05 5.93e-06 -3.8 1.11e-01 - 1.00e+00 1.00e+00h 1\n", + " 15 -1.4292847e+01 1.06e+00 7.22e-01 -5.7 4.62e+01 - 7.74e-01 1.00e+00f 1\n", + " 16 -1.4306021e+01 5.87e-02 4.67e-02 -5.7 2.09e+01 - 1.00e+00 1.00e+00h 1\n", + " 17 -1.4307820e+01 1.18e-02 2.10e-03 -5.7 9.64e+00 - 1.00e+00 1.00e+00h 1\n", + "Reallocating memory for MA57: lfact (336029)\n", + " 18 -1.4307833e+01 3.56e-04 4.99e-06 -5.7 1.70e+00 - 1.00e+00 1.00e+00h 1\n", + " 19 -1.4307833e+01 2.54e-07 3.05e-09 -5.7 4.55e-02 - 1.00e+00 1.00e+00h 1\n", + "iter objective inf_pr inf_du lg(mu) ||d|| lg(rg) alpha_du alpha_pr ls\n", + " 20 -1.4309105e+01 8.75e-04 2.56e-04 -8.6 2.68e+00 - 9.92e-01 1.00e+00h 1\n", + "Reallocating memory for MA57: lfact (366389)\n", + " 21 -1.4309111e+01 2.81e-06 7.65e-08 -8.6 1.52e-01 - 1.00e+00 1.00e+00h 1\n", + "Reallocating memory for MA57: lfact (404617)\n", + " 22 -1.4309111e+01 4.89e-10 5.90e-13 -8.6 4.63e-04 - 1.00e+00 1.00e+00h 1\n", + "\n", + "Number of Iterations....: 22\n", + "\n", + " (scaled) (unscaled)\n", + "Objective...............: -1.4309111333872041e+01 -1.4309111333872041e+01\n", + "Dual infeasibility......: 5.9023626941970669e-13 5.9023626941970669e-13\n", + "Constraint violation....: 9.7725205705501142e-11 4.8862602852750570e-10\n", + "Complementarity.........: 2.5059056106232526e-09 2.5059056106232526e-09\n", + "Overall NLP error.......: 2.5059056106232526e-09 2.5059056106232526e-09\n", + "\n", + "\n", + "Number of objective function evaluations = 23\n", + "Number of objective gradient evaluations = 23\n", + "Number of equality constraint evaluations = 23\n", + "Number of inequality constraint evaluations = 0\n", + "Number of equality constraint Jacobian evaluations = 23\n", + "Number of inequality constraint Jacobian evaluations = 0\n", + "Number of Lagrangian Hessian evaluations = 22\n", + "Total CPU secs in IPOPT (w/o function evaluations) = 0.732\n", + "Total CPU secs in NLP function evaluations = 0.032\n", + "\n", + "EXIT: Optimal Solution Found.\n" + ] + } + ], + "source": [ + "doe_object = DesignOfExperiments(parameter_dict, design_gen,\n", + " measure_class, create_model,\n", + " prior_FIM=prior, discretize_model=disc_for_measure)\n", + "\n", + "square_result, optimize_result= doe_object.stochastic_program(if_optimize=True, if_Cholesky=True, \n", + " scale_nominal_param_value=True, objective_option=\"det\", \n", + " L_initial=np.linalg.cholesky(prior))" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "======Result summary======\n", + "This optimization is solved with status: converged\n", + "C solution: 5.0\n", + "T solution:\n", + "579.3896783399584\n", + "300.0000882599865\n", + "300.00014490662824\n", + "300.00020111344645\n", + "300.00026910716207\n", + "300.000375033031\n", + "300.0005830403824\n", + "300.0011944366598\n", + "300.00407697618726\n", + "The result FIM is: \n", + " [[ 46.26165476 24.02303687 -111.13766256 -98.84248626]\n", + " [ 24.02303687 56.00005104 -41.78107761 -257.31551924]\n", + " [-111.13766256 -41.78107761 290.39184704 177.30569626]\n", + " [ -98.84248626 -257.31551924 177.30569626 1245.59268666]]\n", + "Four design criteria log10() value:\n", + "A-optimality: 3.2143791797303467\n", + "D-optimality: 6.214368093239911\n", + "E-optimality: 0.007877626244739868\n", + "Modified E-optimality: 3.1198074131079294\n" + ] + } + ], + "source": [ + "print('======Result summary======')\n", + "print('This optimization is solved with status:', optimize_result.status)\n", + "print('C solution:', pyo.value(optimize_result.model.CA0[0]))\n", + "print(\"T solution:\")\n", + "for t in t_control:\n", + " print(pyo.value(optimize_result.model.T[t]))\n", + "\n", + "print('The result FIM is: \\n', optimize_result.FIM)\n", + "print('Four design criteria log10() value:')\n", + "print('A-optimality:', np.log10(optimize_result.trace))\n", + "print('D-optimality:', np.log10(optimize_result.det))\n", + "print('E-optimality:', np.log10(optimize_result.min_eig))\n", + "print('Modified E-optimality:', np.log10(optimize_result.cond))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Method: Exploratory analysis (Enumeration)\n", + "\n", + "This method conducts exploratory analysis by enumeration. \n", + "It allows a user to define any number (dimensions) of design variables.\n", + "Heatmaps can be drawn by two design variables, fixing other design variables; \n", + "1D curve can be drawn by one design variable, fixing other design variables." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Specify user inputs" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# Design variable ranges as lists \n", + "design_ranges = {'CA0[0]': [1,3,5], \n", + " ('T[0]', 'T[0.125]','T[0.25]','T[0.375]',\n", + " 'T[0.5]','T[0.625]','T[0.75]','T[0.875]','T[1]'): [300,500,700]}\n", + "\n", + "## choose from 'sequential_finite', 'direct_kaug'\n", + "#sensi_opt = \"sequential_finite\"\n", + "sensi_opt = \"direct_kaug\"" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The prior information FIM:\n", + " [[22.52943024, 1.84034314, -70.23273336, -11.09432962], [1.84034314, 18.09848116, -5.73565034, -109.15866135], [-70.23273336, -5.73565034, 218.94192843, 34.57680848], [-11.09432962, -109.15866135, 34.57680848, 658.37644634]]\n", + "Prior Det: 1.9558434466145787e-08\n" + ] + } + ], + "source": [ + "# add prior information\n", + "prior_pass = [[ 22.52943024 , 1.84034314, -70.23273336, -11.09432962],\n", + " [ 1.84034314 , 18.09848116 , -5.73565034 , -109.15866135],\n", + " [ -70.23273336 , -5.73565034 , 218.94192843 , 34.57680848],\n", + " [ -11.09432962 , -109.15866135 , 34.57680848 , 658.37644634]]\n", + "\n", + "print('The prior information FIM:\\n', prior_pass)\n", + "print('Prior Det:', np.linalg.det(prior_pass))" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "doe_object = DesignOfExperiments(parameter_dict, design_gen,\n", + " measure_class, create_model,\n", + " prior_FIM=prior_pass, discretize_model=disc_for_measure)\n", + "\n", + "all_fim = doe_object.run_grid_search(design_ranges, \n", + " mode=sensi_opt)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### 1D sensitivity curve" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + " CA0[0] \\\n", + "0 1.0 \n", + "1 1.0 \n", + "2 1.0 \n", + "3 3.0 \n", + "4 3.0 \n", + "5 3.0 \n", + "6 5.0 \n", + "7 5.0 \n", + "8 5.0 \n", + "\n", + " (T[0], T[0.125], T[0.25], T[0.375], T[0.5], T[0.625], T[0.75], T[0.875], T[1]) \\\n", + "0 300.0 \n", + "1 500.0 \n", + "2 700.0 \n", + "3 300.0 \n", + "4 500.0 \n", + "5 700.0 \n", + "6 300.0 \n", + "7 500.0 \n", + "8 700.0 \n", + "\n", + " A D E ME \n", + "0 918.207526 5.129865 0.002829 2.402240e+05 \n", + "1 917.979819 0.052028 0.000288 2.358410e+06 \n", + "2 917.951300 0.000610 0.000020 3.457451e+07 \n", + "3 920.297448 415.446336 0.025426 2.676791e+04 \n", + "4 918.248082 4.208215 0.002590 2.624381e+05 \n", + "5 917.991412 0.048511 0.000174 3.907348e+06 \n", + "6 924.477291 3205.559576 0.070438 9.688727e+03 \n", + "7 918.784607 32.467061 0.007192 9.455956e+04 \n", + "8 918.071634 0.373747 0.000482 1.408681e+06 \n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAawAAAEhCAYAAAAj5pSqAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAA8YklEQVR4nO3deZxUxbn/8c932IdBEIZ9X9ziEtTRuBC3uCf3inGL/iKiRmNwidEYReMNMRpzr0YvxpgoScDE4BI1MepVjCSaiKiAElwRZVFB2UFhhtXn90dVw5mmu2d6GKanZ57369Wvps+pU119Zuhnqk6demRmOOecc41dSaEb4JxzztWGByznnHNFwQOWc865ouAByznnXFHwgOWcc64oeMByzjlXFDxguXohaaQkSzw2SHpf0k8ltW0EbTuvkG1oKiT1lbQ5/nzLC9iOMZKOyrB9gqT5BWhPtfeVNCD+PxiZ2Oa/h9vJA5arb6cBBwNfBSYBo4FbCtoiGAn4F0X9GEH43mgFnFnAdvwI2CZgAT8BTm7gtmTyMeH/wZOJbSPx38Pt0rLQDXBNzkwzey/++2+SdgHOl/RdM/u8kA2rT5LamNn6QrejAEYAbwA7AecAvyhsc6ozs/cL3QaA+LvxUqHb0dR4D8vtaK8C7YAtw0eSSiX9t6R5cWhpnqTrJJUkyrSVdLukNyStkfSJpMcl7Z7+BpIGSvpDLLNe0lxJY+O+54DDgUMTw5XPJY49UNKz8T3WSpos6cC0+idI+kjSwZJelFQF/E+uDy3pZElTYr2fSnpF0n8m9l8iaaqkFZJWSXpJ0lfT6mgp6SdxaHWdpGWSXpA0LK3cBZL+nSjzW0mdc7WvLiQdDOwK/B74A7C/pD1reWwrSTdKmh9/5vPj61aJMqlhtFGSbpO0RFKlpCckDUiUSy3Pc13iZzom7ss2NHeRpJvj78hnku6Lv4dDJE2KP6f3JJ2T1u4h8XdrnqSq+Lv1K0k71/B5qw0JZvs9lLR//PdJGepI/d61qM05bg68h+V2tAHAamA5hC9hwlDhFwjDN68DBwHXA52BK+NxbYAOwI2E4ZXOwCjgJUm7m9knsb6BwCtAJWGYaA7QFzg21jMKuA9oAXw7bvs0HrsP8DzwFmG4xoBrgOclHWRm/058jo7AA8CtwLVAVbYPLOlS4A7gL4ReyBpgv3gukuflN8B8wv/D/wCekHSimT0Vy1wNfA+4DphJ6NVUxHOReq+fxXN2B3AV0Dues70kHWJmm7O1sw7OAT4H/giUxXaNiO2syb3A6cBPgRcIw2U/BAYBZ6WVHU34vOcC3eIxz0ja08w2xmOnAhOAu+MxH9Xw/qOB5+Jn+ALhD47PgX2BcYSf63eA8ZKmm9mb8bhese7LgZWxvdcC/xfbUVsZfw/N7C1J0+K2x1KFJXUinK//qeefYXEzM3/4Y7sfbP3C343wBbwzYbx+E3BJotzZsdxhacdfB2wAumWpvwVQCnwGfC+x/feEgNArR9ueA17IsP1hYBXQKbFtJ2AF8Ghi24TY5pNqcR52im18tKayiWNK4jl7Bngssf2JXPUQgt5m4L/Sth8a2zu8Hn++bQhf2JMS26YCC4GSGo7dK7ZnTNr2H8bt+yQ+jxH+gChJlEt9nvMT2wy4McN7TQDmp50jA/6eVu7RuP2biW07x9/XH+X4LC2BYfHYfWvxviNr8Xs4Mv4c+ye2XRbb0qe+foZN4eFDgq6+vQNsJHzp/xa428zuTOw/HlgAvBiHvFrGXtczhAv5B6UKSjpd0suSVhH+864l/GW/W6K+Y4EnzGxRHdp6WDx2VWqDmX0K/JUwfJO0iRBAanJIbOM9uQrFoaAnJC2OdW8EjqH6Z5sGnCjpJknDJLVOq+YYQrD7Y9q5fJnQizwsx/uXJI9RYjg2i5OAToQ/EFLuJfRAjq7h2FQ77kvbnnqdfq4ftsT1TjObQujl5NOjSfdU2ut34vOkxPusBJYQeugASGot6VpJ78Sh4I3Av+Lu5M9qezxA+MPpgsS2bwNPmllNPcdmxQOWq28nAwcAJwLPAqMkjUjs7wb0J/zHTz5eifu7AEj6D+BB4G3CkNGXYr1LgeQ0+S7UPByUTWfCcGO6Twh/bSctsdoNzXSJz1nbJKkvMDm+/6WEIHcA8DTVP9tPCcOc/0n4klwuaby2TifvFp/fY9vzuVOiLZn8Lq3872r4XOcQhl3/IalTHLKaFI89J9eBbB3CTD/Xn6TtT1mcoY7FhOHOulqZ9npDju3Jn8HNwBhCcP0qcCDw9bivXm7XMLN1wHjC5KSWkr5MGLb8dX3U35T4NSxX396wOEtQ0t+BWcAtkh4xs7WEa1nzCOPzmcyPz98A3jOzkakd8QJ9+pfbMur+RbYC6JFhe4+4L6m2eXiWxefehNl0mRxPuCZ2evIvaEml1d4wXK/5b+C/JfUAvgbcRhgaPYN4XZDQy0z/4iWxP5MxQLLnuyxLOSR1j+/RkjAEmO5kSR3M7LMsVaTOZQ8gOYsvde7T29k9Qx3dCde1Gto3gN+b2Y2pDZLKdsD7/Aq4gtCTPZnw/2BSrgOaIw9Ybocxs/WSriJcTB5FuB/raeAUYI2ZvZPj8FLCUFnS2YRrWUnPAF+X1NPMMvWWANYTJnCkex74avLLVlIHwgSI53K0LZcXCdfULiT7F04qMG1MbZC0K+FaTcaemYVJJr+RdCLhmhDA3wgTB/qZ2d/yaaSZzWfrHwc1+Sbhu+I7bB1KS/ki8L+E+++y9dKej8/fAG5KbP9/8fmfaeVPlTQmNSwo6VCgD+GaWcoGwuzTHa2UxM8pOreOdWX7PcTM3pf0DGHizFDgBmtCt4HUFw9Ybocys7/GWVDfl3QnYYbZucBkST8H/g20BgYThr6Gm1klIbANl3Q74drR/oQL0avS3uJHhKGaFyX9lDA81hs43sy+Gcu8RRiaPIPwF/5nZjabMEvxa7Et/03oRV1N+JK6oY6f9zNJo4FfSHokft7PCF9C68zsF4Sh0k3A7+M56An8GPiAxDC9pMfi+XmV0IPal9A7uzu+1/ux3XdK2o0QGNYRrsEcA/zGzP5Rl8+RZgShV3y3xRkBiTb+C/gBYVgwY8Ayszcl3Q+MidfYXiRcj7oeuN/MZqUd0gH4i6S7ga6EYbk5VL9+9hbhj42nCedmUR2vY9bkaeAcSa8Tfre+ThjCrYtsv4cpdxH+uKvNEG3zVOhZH/5oGg+2zhIckmHfsXHf9+LrtoQhqXcIf3WuIEwwGAO0jGVKCNOzFxGunTxP+MKeD0xIq38wcD9hWGs9MBe4PbG/B2Ea8mexHc8l9n2JEEDWECZ1TAYOTKt/AvBRnufjVMLkhyrCBIiXga8l9p8eP/864E1C72MC1WeaXUm4+XR5rGd2PEet0t7r7FhubfwcbxOG+7Z7hlk85wZcn6PMTYSe3sAcZVrFn+cCwhfygvi6VaLMgPheowhDn0vjz/7J9LoJvdEZ8fxtmYGY4Rym6vxW2vFj4vaWadvnA/clXpcTJkWsjI8/Eq43ps8AzPa+yTJZfw/j/hbxZ/inQv9/bqwPxRPlnHMFFW8OngdcYGa/KXBzGpykYwhD3Eeb2eRCt6cx8iFB55wrIEmDCTck3w686sEqO5/W7pxzhXU94T6x9YTrhS4LHxJ0zjlXFLyH5ZxzrigU9BpWnP77dcISJ6nl+EebWbYbLlPHnU5YgHJXwkyiO83slrQyrQlrlZ1NWD5mMXCrmd2RKLMTYabSqYRVAT4ErjWzhxJlRhHujehJmM11uZmllmbJqry83AYMGFBTMeeccwkzZsxYZmZdM+0r9KSLIwj3HkwDRLj35VlJXzCz9JUGAJB0AjCRcE/O08AewDhJVVZ9zbr7CfejXEi4h6M7iRsN46oJzxCmqp5OuGGzDyFwpsqcAYwlTLN9IT4/Fdv3Qa4PNmDAAKZPn167s+Cccw4ASQuy7mtM17DikierCTePPp6lzESgnZmdnNh2KeHmxX5mZpKOBf4EDDazjEvOSLqQkEpidzPbkKXMy8AsM7sgsW0OYXHO0bk+S0VFhXnAcs65/EiaYWYVmfY1tmtYHQhtyrQuWkobws2CSVWE3lH/+Ho4odd2RUyANkfSHWlrgA0HphBWJPhE0luSxsSeV2pIcX9CLyzpGbLc6S7pQknTJU1funRpDR/VOedcPhpbwBpLWOByao4ykwhL9hwbUyTsytakfz3j8yBCzpovEtatu4SwpM2ERD2DCOuftSIs7XM9cBFhGRgId7i3YNuVoxeTecFUzOweM6sws4quXTMOwTrnnKujQl/D2kLSbYQgM8xyp3EYR1iK5zFCsPmUEOjGEJKgQQjEBpxlZqtj/ZcAkyR1N7PFscwSwl31m4EZkroAt8cFW1PSx0yVYZtzzrkdrFH0sOICp2cCR5nZ3FxlLbiakCSvP6G3k8qlND8+fwwsTAWr6O343C9R5t204Pg2YeHTcsK6dJvZtjfVjcz5epxzzu1ABQ9YksYSEvQdZbnTTVRjZpvNbGGcMHEmMNXMlsTdU4Beadesdo3PCxJlhqRlWt2VsNjmsljvDMKq10nHEFabrnd/eW0hh/7s7wy85kkO/dnf+ctrmVIPOedc81TQgCXpl4RUE2cCKyX1iI+yRJmbJU1OvC6X9B1Je0gaGgPeacDliaonEla4Hi9pz5hPZyxhdl8qqP2KkAxwrKTdJB1HSPFwl22dOnkbMFLSt+L7jSXc01XvmUD/8tpCRj/6OgtXVWHAwlVVjH70dQ9azjkXFbqHNYowM3AyYYgu9fh+okxPwjWrpBGEWYBTgD2BI8wsNSyIma0BjiZkdZ0GPERIT3FeosyHhLQX+xMmevyakIPmukSZBwmB8IexzDDgRDPLep9AXd0yaTZVG6tfuqvauJlbJs3OcoRzzjUvBZ10YWaqRZmRaa+XEZK/1XTcbEJAylXmJWpIxmZmdxFubt6hFq2qymu7c841N4XuYbmoV6fM2b6zbXfOuebGA1YjcdVxu9GuVYtttp80tFcBWuOcc42PB6xGYvi+vbn563vTu1M7BPTs2JaeHdvyh5cW8O7izwrdPOecK7hGtZZgU1IfawkuXFXFyb+cQqsWJfz54kPo1qFtPbXOOecap2JaS9Al9O7Ujt+NPICVlRs4f8J0KjdsKnSTnHOuYDxgNXJ79e7IL87clzcXreay+19j8+feI3bONU8esIrAV/bozo//c0+efXsJNzz+Jj6M65xrjhrN4rcut7MPHsAHKyoZ96959OvSnvOHDSx0k5xzrkF5wCoio0/Yg49WVnHjk2/Ru1M7jt8rY5YT55xrknxIsIiUlIjbzxjK0L6duPzB15j54apCN8k55xqMB6wi07ZVC8aNqKBbh7Z8695pfLiistBNcs65BuEBqwiVl7Vh/LkHsHGzMXL8K6yu3FjoJjnn3A7nAatIDe5axj1n78+HK6r49n3TWb8pV5Jm55wrfh6witiXBnXhltP24aW5K7jmkdd9urtzrknzWYJF7qShvflwRSW3PvMufTuXcsUxu9Z8kHPOFSEPWE3AxUcO4YMVldwxeQ59d27HaRV9C90k55yrdx6wmgBJ3HTy3ixatY7Rj75Or07tOHRIeaGb5Zxz9cqvYTURrVqUcNc392Nw1zIuum+GpyRxzjU5HrCakJ3atuJ35x5A21YtOHf8NJZ8tq7QTXLOuXrjAauJ6d2pHeM9JYlzrgnygNUEeUoS51xT5AGrifKUJM65pqagAUvSaEnTJH0qaamkxyXtVYvjTpc0U1KlpAWSrspQprWkGyTNk7Re0geSLkvsHynJMjzaJsqMybD/k/o7AzvW2QcP4IIvD+TeqQv43ZT5hW6Oc85tl0JPaz8CuAuYBgi4AXhW0hfMbEWmAySdAEwELgOeBvYAxkmqMrM7E0XvB/oCFwJzgO5Au7TqKoHByQ1mlj5TYXZsZ0pRrYGUTEnSZ+d2HLenpyRxzhWnggYsMzsu+VrS2cBq4FDg8SyHnQ08bmZ3xddzJd0MXC3pl2Zmko4FjgYGm9myWG5+5iZYTT2mTbUo02ilUpJ8Mu4lvvvAazxw4cEM7dup0M1yzrm8NbZrWB0IbVqZo0wbIL0XVAX0AfrH18MJvbYrJH0kaY6kOySVpR3XLg4pfiTpCUn7Zni/QZIWxqHFByQNytYwSRdKmi5p+tKlS3N8hIblKUmcc01BYwtYY4GZwNQcZSYBwyUdK6lE0q7AlXFfz/g8CBgGfBE4BbgEOB6YkKhnNnAecBJwJiEITpG0S6LMy8BI4ATgAqAH8KKkLpkaZmb3mFmFmVV07dq1Np+3wXhKEudcsWs0AUvSbYQgc4qZ5bpONA64A3gM2AC8BDwQ96WOKwEMOMvMXjazSYSgdYqk7gBmNtXM7jWzmWb2L+AM4H3g0tQbmdlTZvaQmc0ys2eBr8W6z6mfT92wPCWJc66YNYqAJel2Qi/nKDObm6usBVcDZYQhwB7AK3H3/Pj8MbDQzFYnDn07PvfLUu9mYDqwS6b9scwa4M1cZRo7T0ninCtWBQ9YksYCZxGC1Tu1Pc7MNpvZQjPbQAh2U81sSdw9BeiVds0qlXdjQZZ2CNiHEOyytbUtsHuuMsXgpKG9+f6xu/Ln1xZy+7NzCt0c55yrlYLOEpT0S8Ksv+HASkmpOddrYm+GOAPwQDP7SnxdDpwGPEeYgHFufH14ouqJwPXAeEljgE6E62MPp4KapB8RhhPnADsRpsnvA3wn0b5bCbMVPwC6xTrbA/fW20kokGRKkn6dSzl1/z6FbpJzzuVU6B7WKMLMwMmEXkvq8f1EmZ6k3SsFjCDMApwC7AkcYWapYcHU0N3RQMdY7iHgecIki5ROwD2EocJngN7AYcl6CDMP7ydM0HgUWA8cZGYZe2nFJJWSZNiQcq55ZBZT3ltW80HOOVdA8msYO0ZFRYVNnz690M2o0afrNnLar6ayaHUVj3znEHbt3qHQTXLONWOSZphZRaZ9he5huQLzlCTOuWLhAct5ShLnXFHwgOUAT0ninGv8PGC5LZIpSX7yxFuFbo5zzlVT6NXaXSNz9sED+GBFJeP+NY9+nUs5b9jAQjfJOecAD1gug1RKkp88+Ra9PSWJc66R8CFBt41USpKhfTvx3QdeY+aHqwrdJOec84DlMkulJOnaoY2nJHHONQoesFxW5WVtGD/yQE9J4pxrFDxguZyGdPOUJM65xsEDlquRpyRxzjUGPkvQ1cpJQ3vz4YpKbn3mXfp2LuWKY3at+SDnnKtHHrBcrXlKEudcIXnAcrWWSkmyaNU6rnlkFj07tuXQIeWFbpZzrpnwa1guL61alHDXN/djcNcyLrpvBu8u/qzQTXLONRMesFzePCWJc64QPGC5OvGUJM65huYBy9WZpyRxzjUkD1huu3hKEudcQ/FZgm67eUoS51xD8IDl6oWnJHHO7WgFHRKUNFrSNEmfSloq6XFJe9XiuNMlzZRUKWmBpKsylGkt6QZJ8yStl/SBpMsS+0dKsgyPtmn1jIp1rJM0Q9KX6+fTNy2eksQ5t6MV+hrWEcBdwCHAUcAm4FlJnbMdIOkEYCJwD7AXMAr4nqRL0oreDxwPXAjsBpwGzEorUwn0TD7MbMscbUlnAGOBnwL7Ai8CT0nqV4fP2uR5ShLn3I6kfBcylbQvcD1wGNAJONDMXpX0U+CfZvZ0nRsjlQGrgeFm9niWMhOBdmZ2cmLbpcAPgH5mZpKOBf4EDDazZVnqGQncaWZlOdrzMjDLzC5IbJsDPGxmo3N9loqKCps+fXquIk3We0vWcMqvXqS8rDWPfudQOpa2KnSTnHNFQtIMM6vItC+vHpakYcBUYHdCLyd5/OfARXVtZNQh1rkyR5k2QPqdqlVAH6B/fD0cmAZcIekjSXMk3REDYlK7OKT4kaQnYjAGwpAisD/wTNoxzxB6hC4LT0ninNsR8h0S/BkwCdgTuCJt36vAftvZnrHATEJQzGYSMFzSsZJKJO0KXBn39YzPg4BhwBeBU4BLCMODExL1zAbOA04CziQEwSmSdon7y4EWwOK0918MZJxRIOlCSdMlTV+6dGnuT9rEJVOSjPaUJM65epDvLMH9gK/HYbf0b6BlQNe6NkTSbYQgM8zMcv1JPg4YDDwGtAI+JQS6MUDquBLAgLPMbHWs/xJgkqTuZrbYzKaSCIySXiQEy0uBLZMzYj3VmpphWyhodg/h2hoVFRXN/hs6PSXJ9zwliXNuO+Tbw1oHlGbZ15Nw/Slvkm4n9HKOMrO5ucpacDVQRhgC7AG8EnfPj88fAwtTwSp6Oz5nnDARg+R0INXDWkYIgOm9qW5s2+tyWVx85BBOr+jD2MlzeHjGR4VujnOuiOUbsF4ALpfUIrEt1ZM4H/h7vg2QNBY4ixCs3qntcWa22cwWmtkGQrCbamZL4u4pQK+0a1apP+8XZGmHgH0IwY5Y7wzgmLSixxBmC7paSKUkGTaknGsemcWU9zLOgXHOuRrlG7CuJwwL/jv+24BzJP0DOAj4cT6VSfolcC4h4KyU1CM+yhJlbpY0OfG6XNJ3JO0haWgMeKcBlyeqnggsB8ZL2lPSoYRhw4dTQU3SjyQdJ2mQpKHAbwkB69eJem4DRkr6Vny/sUCvtDKuBp6SxDlXH/IKWGb2b8J09sXAdYTrOan7nw43s9l5vv8owszAyYSeTerx/USZnoRrVkkjCLMApxAmgBxhZqlhQcxsDXA00DGWewh4njDJIqUT4XrT24SZf72Bw9LqeZAQCH9IuL41DDjRzDL20lx2npLEObe98r4Pa8uBYUWIzsAqM/M7RNM05/uwcnlj4WpOv3sqg7uW8eC3D6K0ta8O5pzbqt7uw0oys3VmtsiDlcuHpyRxztVVvjcOXy3pF1n23ZFpTT/n0nlKEudcXeTbwzqXbdfjS5kZ9ztXo7MPHsAFXx7IhBfn87sX5hW6Oc65IpDvBYR+wJws++aydWkk52rkKUmcc/nIt4dVSZhNl0kfYP32Ncc1J6mUJF/s4ylJnHM1yzdg/Qu4SlKb5Mb4+sq437laa9uqBb85x1OSOOdqlm/AGkNYuuhdSTfF5IY3Ae/G7f9Vz+1zzUB5WRvGjzyQjZuNkeNfYXXlxkI3yTnXCNXlxuEjCcsbXQ3cGZ/nEW7e/Xe9t9A1C+kpSTZs+rzQTXLONTJ534dlZq+Y2WGEFSr6AB3M7Agz87tk3XZJpiS55pFZnpLEOVdNnZcZMLMqQuJE5+qNpyRxzmVTp4Al6YvAbkDb9H1m9vvtbZRr3i4+cggfrKhk7OQ59O1cyqn79yl0k5xzjUBeAUtSJ+BJwsrsEBa/heoJDT1gue2SSkmyaNU6rnlkFj07tuXQIeWFbpZzrsDyvYb1U6ALYcV2AScDRwF/JNw4fGC9ts41W6mUJIO6tveUJM45IP+AdRwhaL0UX39kZs+Z2QjgWeC79dk417zt1LYV48890FOSOOeA/ANWT2BuTCe/jjBTMOVR4Kv11TDnAHp3asf4kQewsnID50+YTuWGTYVuknOuQPINWJ8QEh9CuBfr4MS+IfXRIOfSeUoS5xzkH7BeYGuQ+gPwI0l3x1T3twCT6rNxzqV4ShLnXL7T2n8M9Ir/voUwAeMMoBT4K3Bp/TXNuerOPngAH6yoZNy/5tGvcynnDRtY6CY55xpQXgHLzN4H3o//3khY8PbKHdAu5zLylCTONV+1HhKU1FrSnyUdtiMb5FwunpLEuear1gHLzDYAR+dzjHM7gqckca55yjf4TGHrKhfOFYynJHGu+ck3YF0JnC/pEkl9JLWQVJJ85FOZpNGSpkn6VNJSSY9L2qsWx50uaaakSkkLJF2VoUxrSTdImidpvaQPJF2Wpb4zJZmkJ9K2j4nbk49P8vmMbsfxlCTONS/5BqzXgcHAWMJ9WBuAjYnHhjzrOwK4CziEsMTTJuBZSZ2zHSDpBGAicA+wFzAK+J6kS9KK3g8cD1xIWKj3NGBWhvoGEWY8ZsuWPJtww3TqsXftPpprCJ6SxLnmI99p7TdQfaHb7WJmxyVfSzobWA0cCjye5bCzgcfN7K74eq6km4GrJf3SzEzSsYTrbYPNbFksNz+9IkmtCIHtOkJiykwrrG4yM+9VNWKeksS55qHGgCVpLnCymf3bzMbs4PZ0IPT6VuYo04awLFRSFSGZZH9CYBoOTAOukDQi7n8KuNbM1iSOuwmYb2b3Sjoyy/sNkrSQ0Ht8OdYxN58P5XY8T0niXNNXmyHBAYQg0RDGAjOBqTnKTAKGSzo2Xjfbla33gvWMz4OAYcAXgVOASwjDgxNSlcRe2BnARTne62VgJHACcAHQA3hRUpdMhSVdKGm6pOlLly7NUa2rb6mUJMOGlHPNI7OY8t6ymg9yzhWVRjNFXdJthCBzSlxcN5txwB3AY4Rez0vAA3Ff6rgSwtDlWWb2splNIgStUyR1l1ROCF7nmFnW3pyZPWVmD5nZLDN7FvharPucLOXvMbMKM6vo2rVr7T64qzeeksS5pq22AWuHXsmWdDtwJnBUTcNtFlwNlBGGAHsAr8Td8+Pzx8BCM1udOPTt+NyPMFmjJ2GCxyZJm4ARwInx9W5Z3nsN8CawS54f0TUQT0niXNNV20kXP5ZUmzEWM7OMvY9sJI0FvgEcYWbv1Pa42AtbGOs4E5hqZkvi7inAaZLKEtesUlfiFwBr2Xa2343AzsDFwLwsbW0L7A78o7btdA0vlZLk9Luncv6E6Tz47YMobZ3v/CLnXGNT2//FQ4H1tSiXV08srvJ+NmGSxEpJqYXh1qQCTZwBeKCZfSW+LidMUX+OcG3t3Pj68ETVE4HrgfGSxhBSoowFHk4EtTfS2rIKaGlmbyS23UqYrfgB0C3W2R64N5/P6RpeKiXJBb+fzmX3z+Tus/enRYkK3Szn3Hao7ZDgcDMbWIvHoDzffxRhZuBkwjBe6vH9RJmehHu/kkYQZgFOAfYk9M5Sw4KpobujgY6x3EPA88B5ebavD2Ha+2xCgsr1wEFmtiDPelwBbE1JsthTkjjXBBR0nMTMavyT18xGpr1eRvXEkdmOmw0cm0dbRmbY9o3aHu8aJ09J4lzT4QP7rskbfcIefLjCU5I4V+wazbR253YUT0niXNNQY8Ays5Lk9SHnilG71p6SxLli5z0s12x4ShLnipsHLNeseEoS54qXByzX7HhKEueKk88SdM2SpyRxrvh4wHLNlqckca64bHfAkrQfYUFZgA/M7NXtrdO5hpBKSbJo1TqueWQWPTu25dAhmXJ4Oucagzpfw5K0v6R3gD8C3yEss3S/pNmS9q+vBjq3I3lKEueKx/ZMurgHuMTM9jCz48zsWDPbjZB3alz9NM+5Hc9TkjhXHLYnYJXFpIbVmNnfCCuaO1c0UilJVlZu4Fv3Tqdyw6ZCN8k5l2Z7AtYSSedKapHaIKmFpG8Bnp/cFZ1USpI3Fq7msvtnsvlzn+7uXGOyPQHrHEKW4BWS3pb0NrACOIMsKeSda+w8JYlzjVedZwnGVPbHxoSKyVmC3rtyRc1TkjjXOG33tPYYoKoFKUndEpl9nSs6npLEucZnRy3NNH0H1etcg/CUJM41PnXuYUn6zxy729a1Xucai1RKkpPvmsK37p3Gn0cdSt/OpYVulnPNluq68KekzcDzQKY09weZWbvtaVixq6iosOnTvaPZFLy3ZA2n/OpFunZowyMXHULH0laFbpJzTZakGWZWkWnf9gwJzgHON7Mj0x/4tHbXhKRSknywvNJTkjhXQNsTsO4Fsi289uvtqNe5RsdTkjhXeNszrf3mHPtuqmu9zjVWnpLEucLKq4cl6XJJh0nqUB9vLmm0pGmSPpW0VNLjkvaqxXGnS5opqVLSAklXZSjTWtINkuZJWi/pA0mXZanvTEkm6YkM+0bFOtZJmiHpy3X7tK4puPjIIZxe0Yexk+fw8IyPCt0c55qVfHtYPwNaASbpfWAG8Grq2cxW51nfEcBdwDTC5I0bgGclfcHMVmQ6QNIJwETgMuBpYA9gnKQqM7szUfR+oC9wIeF6W3dgm4kgkgYBtwD/yrDvDGAsYSX6F+LzU7F9H+T5WV0T4ClJnCucvGYJxi/3vxGWYHqPEBC+CJQCBvybsIr7ODPL+8q0pDJgNTDczB7PUmYi0M7MTk5suxT4AdDPzEzSscCfgMG5Vt6Q1IoQiO4CjgTKzexrif0vA7PM7ILEtjnAw2Y2Otdn8VmCTdun6zZy6q9e5OPV63jkO4ewa/d6GXRwrtmrz1mCvwb+bGYHmNmZZjYM6A/8AlgJvAjcBEyKwSBfHWKbVuYo0wZIz/9QBfSJbQEYTui1XSHpI0lzJN0RA2LSTcB8M7s3/U0ktQb2B55J2/UMcEimhkm6UNJ0SdOXLl2a4yO4YucpSZxrePkGrGHA/yU3mNkKM7sceJTQ86oA9gS+X4f2jAVmAlNzlJkEDJd0rKQSSbsCV8Z9PePzoNjWLwKnEHJ0HQ9MSFUSe2FnABdleZ9yoAWwOG37YiDjOj1mdo+ZVZhZRdeuXXN8BNcUeEoS5xpWvgFrCbBvln0PAiPMbD5wO/D/8qlY0m2EIHOKmW3OUXQccAfwGLABeAl4IO5LHVdCGKI8y8xeNrNJhKB1iqTuccHeCcA5ZparN0esp1pTM2xzzZSnJHGu4eQbsH4P/FBSpqDVm633Zc0ABte2Ukm3E1KVHBVXgc/KgquBMsIQYA/glbh7fnz+GFiYNgnk7fjcD9iL0Bt7VtImSZuAEcCJ8fVuhJufN7Ntb6ob2/a6XDPmKUmcaxj5zhL8CbAP8JKkBwnDg58AuwE/Yuuit22AjbWpUNJY4BvAEWb2Tm0bEnthC2MdZwJTEyvETwFOk1RmZmvittRNMwuAtcDeaVXeCOwMXAzMM7MNkmYAxxAmcKQcAzxS23a65uHsgwewYHklv3nBU5I4t6PkFbDMbCPh+tGFwFXANwnDYwLeBL4dix4EvF9TfZJ+CZxNmCSxUlKqN7MmFWgk3QwcaGZfia/LgdOA5wiB8dz4+vBE1ROB64HxksYAnQjXxx5OBLU30tqyCmhpZsnttwF/kPQKIQheBPTCV/JwGVx74h58tNJTkji3o9RpaaY4uWAXYAhwGLA7sI+ZvRuLPMXWiRC5jCLMDJxMGMZLPZITNnqy7fDiCMIswCmECR5HmFlqWJAY7I4GOsZyDxEW6j2v9p8SzOxB4HLgh4TJIMOAE81sQT71uObBU5I4t2Ntz2rtrc1sQz23p8nw+7Car2Vr1nPyXVOo2rDZU5I4l6d6uw9LUktJN0laClRJWibpAUkZ70tyrjkqL2vD+JEHsnGzce6EaayurNXlXOdcDfIdErwOuIIw6eBq4I+EIbl/SbqmntvmXNHylCTO1b98A9YI4Fozu8jMbjWz75rZ3sAFwBhJJ9V/E50rTp6SxLn6le+09p6ExW6rMbPfxRUnvk+4odc5R0hJ8sHySn7+N09J4tz2yreHNQcYmmXf04SlkJxzCZccNYTT9veUJM5tr3wD1m+B/5I0NMO+PsCaDNuda9Yk8dOv782wIeVc88gspryXNYGAcy6HfAPWLwg37L4s6fcx8eGRki4g5Mp6ur4b6FxT0KpFCXd9cz8GdW3PRffNYM7izwrdJOeKTl4By8JV49OAawnJF/9IuOn3bsLKEbW5Wdi5ZimZkmSkpyRxLm81BixJR0vqnHptZp+b2c/NrB8h2++XCYkTj6/FyufONWueksS5uqtND+sZYKmkuZIeljQ65qIqN7PZZjbFzPxKsnO15ClJnKub2gSsPQn3X/0F6ApcQ7hWtVjSAkmPSvqhpBMkdd9xTXWu6fCUJM7lr8b7sMzsbUIuqT+mtsV7rvYH9ovPVxIWmjVCll7nXA08JYlz+cn3xmEA4qrs7wL3p7ZJGkIIYM65WvKUJM7VXp3Si2RiZu+Z2UP1VZ9zzYGnJHGu9uotYDnn6qZd6xb85pwKunZow7funcaHKyoL3STnGiUPWM41Ap6SxLmaecByrpHwlCTO5eYBy7lGxFOSOJddnWYJOud2HE9J4lxmHrCca4QuOWoIH6yoZOzkOfTtXMqp+/cpdJOcKzgPWM41QqmUJB+vXsc1j8yiV8e2HDKkvNDNcq6gCnoNK65LOE3Sp5KWSnpc0l61OO50STMlVcbloa7KUKa1pBskzZO0XtIHki5L7D9N0nRJqyStjfWdk1bHGEmW9vikfj69c7klU5J821OSOFfwSRdHAHcBhwBHAZuAZ5Orw6eTdAIwEbgH2AsYBXxP0iVpRe8HjgcuBHYjpEWZldi/HLgROAjYBxgP/FbSiWn1zAZ6Jh575/shnasrT0ni3FZqTLOQJJUBq4HhZvZ4ljITgXZmdnJi26XADwhpTkzSscCfgMFmVuv0rpJeBSaZ2ej4egxwqpnV2OtLV1FRYdOnT8/3MOcyemPhak6/eypDupXxwIUHUdraR/Nd0yRphplVZNpX6B5Wug6ENuXKq9UGSP8zswroA/SPr4cD04ArJH0kaY6kO2JA3IaCrxB6Yv9M2z1I0sI4tPiApEH5fSTntp+nJHGu8QWsscBMYGqOMpOA4TEnV0lcOT6V6bhnfB4EDAO+CJwCXEIYHpyQrEhSR0lrgA3Ak8BlZvZUosjLwEjgBOACoAfwoqQumRom6cJ4XWz60qVLa/WBnautr+zRnTGeksQ1Y41mXEHSbYQgM8zMNucoOg4YDDwGtAI+JQS6MUDquBJCqpOzzGx1rP8SYJKk7ma2OJb7DBgKlAFfAW6TNN/MJgOkBS8kvQTMBc4BbktvmJndQ7i2RkVFhf8J7OrdiIMH8IGnJHHNVKPoYUm6HTgTOMrM5uYqa8HVhCDTn9DreSXunh+fPwYWpoJV9HZ87peo6/O4yvxMM/s54brXtTneew3wJrBLbT+bc/Xt2hP34Pg9e/CTJ99i0ps+adU1HwUPWJLGAmcRgtU7tT3OzDab2UIz20AIdlPNbEncPQXolXbNKrVcwIIc1ZYQrpFla2tbYHdCQHSuIDwliWuuCn0f1i+BcwkBZ6WkHvFRlihzs6TJidflkr4jaQ9JQ2PAOw24PFH1RMK09fGS9pR0KGHY8OFUUJN0naSjJQ2KdV0JnA3cl3ivWyUdLmmgpC8BDwPtgXt3zBlxrnY8JYlrjgrdwxpFmBk4mdBrST2+nyjTk3DNKmkEYRbgFGBP4AgzSw0LpobujgY6xnIPAc8D5yXqKAN+RRjim0KYnDHCzH6dKNOHcD/XbOBRYD1wkJnl6qU51yA8JYlrbhrVfVhNid+H5RrKy3OXc/ZvX2G//p34/XlfonXLQv8d6lzdFdN9WM65PH1pUBf+51RPSeKavkYzrd05V3fD9+3Nhys8JYlr2jxgOddEeEoS19R5wHKuifCUJK6p82tYzjUhnpLENWUesJxrYjwliWuqPGA51wT17tSO8SMPYGXlBr5173QqN2wqdJOc224esJxrojwliWtqPGA514R5ShLXlPgsQeeauGRKkj+/tpBPqzbSq1M7rjpuN4bv27vQzXOu1jxgOdcMfKHXTpQIVleF9QYXrqpi9KOvA3jQckXDA5ZzzcDPn3mX9EtYVRs381+PvUGrFiUMKC9lYHl7Slv7V4JrvPy307lmYNGqqozbP123iYsnvrrldfed2jCgS3sGlrdnQHn7Lf/u36WUtq1aNFRzncvIA5ZzzUCvTu1YmCFo9ezYlt+cU8G8ZWuZv2wt85ZVMn/5Wv721mKWr92wpZwEvTq2Y0B56daA1iUEtX6dS32FeNcgPGA51wxcddxujH70dao2bt6yrV2rFlx9/O7s2asje/bquM0xq6s2smD5WuYtW7s1oC2v5IlZH2+5FgZQIui9czsGlpcxsEtp6JmVt2dgl/b02bkdLVt4MHP1wwOWc81AamLFLZNms2hVVa1mCXZs14p9+nRinz6dttm3cu0G5i0PQWz+srXMXbaW+cvX8uqClaxZv/Um5ZYlom/nUgbEQDYoMdTYq1M7WpSo3j+ra7o8geMO4gkcXXNkZixbs4H5y7f2yuYvX8vcpWtZsLyyWg+vdYsS+nVJDTGWMrC8bMvkj+4d2lLiwaxZypXA0XtYzrl6I4muHdrQtUMbDhjQudo+M2Pxp+tDIFueumYW/v3POUvZsOnzLWXbtioJ18jidbJkQOta1gbJg1lz5AHLOdcgJNGjY1t6dGzLwYO7VNv3+efGotVVzF9WWW2o8d0lnzH5ncVs3Lx1JKh96xbVrpOlAtqALu3p3L61B7MmzAOWc67gSkpEn51L6bNzKcN2qZ7Da9Pmz1m0ah1zl62JQ4yVzFu2ljcWrubpNz6ptkbiTm1bbjMlPxXYOpa2auiP5eqZByznXKPWMl7r6telFHarvm/Dps/5aGXllutkYaixkunzV/LXfy8ieYm+c/vWWyZ/bO2ZheeyNv5VWAz8p+ScK1qtW5YwqGsZg7qWcdTu1fet27iZD1dUbrlOlpqe/+J7y3n01YXVypaXtYkzGKsHtAFd2tOutd8w3VgUNGBJGg18nfB303rgJWC0mb1Rw3GnA9cCuwJLgTvN7Ja0Mq2BHwJnA72AxcCtZnZH3H8acDUwBGgFzAFuN7N70+oZBVwF9ATeBC43s39tx8d2zjWAtq1asEv3DuzSvcM2+yo3bGLB8sp4b1nqmlklf39nKcvWfFStbI+d2m6ZvTgwMdTYt7Ov/tHQCt3DOgK4C5gGCLgBeFbSF8xsRaYDJJ0ATAQuA54G9gDGSaoyszsTRe8H+gIXEoJRd6BdYv9y4EbgHWAj8DXgt5KWmtn/xfc6AxgLjAJeiM9PxfZ9sP0f3zlXCKWtW7JHz53Yo+dO2+z7bN1GFsTrZMmANunNxazIsPrHwNgzG1hetmXyR9/OpbTyG6brXaO6D0tSGbAaGG5mj2cpMxFoZ2YnJ7ZdCvwA6GdmJulY4E/AYDNblsf7vwpMMrPR8fXLwCwzuyBRZg7wcKpMNn4flnNNz+rKjVsCWPr0/E/Xbb1hukWJ6LNzu8QyVqkbp8vo1amtr/6RQzHdh9WBkFRyZY4ybYB1aduqgD5Af2A+MJzQa7tC0oi4/yngWjNbk16hwjzYowhDk9fFba2B/YFb04o/AxySx2dyzjURHUtbMbS0E0P7dqq23cxYsTZ1w3T1ocbp81ewdsPWG6ZbtQirf2y5TrblmlkpvTq28xumc2hsAWssMBOYmqPMJGBs7EU9S7gGdWXc15MQsAYBwwjXxU4BOgG/IFzLOjVVkaSOwEJCENwMXGxmT8Xd5UALwrWvpMXA0ZkaJulCwhAk/fr1y/1JnXNNhiS6lLWhS1kb9u+/7Q3TS9esD/eYLVuzJaDNX76WKe8vY93GrTdMt2lZQv8upRlXzO++k98w3WgClqTbCEFmmJltzlF0HDAYeIwwWeJTQqAbQwg6EHppBpxlZqtj/ZcAkyR1N7NUEPoMGAqUAV8BbpM038wmJ94vfcxUGbaFgmb3APdAGBKs4SM755oBSXTr0JZuHdpy4MDqwezzz43Fn62L18u2BrS5y9by3OylbNi8NZiVtm5B/y5bb5IekJgEUl7WPG6YbhQBS9LtwDeAI81sbq6yFi66XS3pWqAHYZbgV+Lu+fH5Y2BhKlhFb8fnfsRek5l9DrwXt8+UtAdh9uFkYBkhAPZIa0I3tu11Oedc3kpKRM+O7ejZsR2HDK6+b/PnxqJVVYnrZCGgvf3xZzzz5mI2JW6Y7tCmZWJ4sfqK+Tu3b93An2rHKXjAkjSWEKyOMLN3antc7IUtjHWcCUw1syVx9xTgNElliWtWu8bnBTmqLSEMD2JmGyTNAI4hTOBIOQZ4pLbtdM65umgRV7rv27mUL+/Stdq+TZs/56OVVdWWsZq3vJKZH67kyVmLqmWX7tiu1ZZAllxgeEB5e3ZqW1yrfxT6PqxfEu6TGg6slJTqzaxJBRpJNwMHmtlX4uty4DTgOUJwOTe+PjxR9UTgemC8pDGEa1hjCbP7lsR6rgNeBubGek6Mbbk0Uc9twB8kvUIIghcRroP9up5OgXPO5a1li5Itvaj01T/Wb9rMhyuqtlwnS81mnDZ/JY+lrf7RpX3rLdfJBnVNLTYchhzbN8LVPwrdolHxeXLa9h8TrklBmEiR1llmBHAL4XrSVELv7JXUTjNbI+lowkSLaYRZh38BrknUUQb8ijC7sIpwP9YIM7s/Uc+DkroQbkDuCbwBnGhmuXppzjlXMG1atmBItzKGdCvbZt+6jZu33mOWmJL/wntLeeTV6jdMd+vQZpsFhgeWl9G/S/Ybpv/y2sK8cq7lq1Hdh9WU+H1Yzrlisnb9pi1rMabnM1u2ZkO1sr06tt1mxfz3l67hf//2LusSaWLatWrBzV/fO6+gVUz3YTnnnCuA9m1asmevjuzZq+M2+z5dt5EFyyrjivlbA9r/vf4xqyo3Zq2zauNmbpk0u956WR6wnHPO5bRT21bs3acje/fZNpitqtzAvGVrOfmuFzMeu2hVVb21w9cHcc45V2edSluzb7+d6d2pXcb9vbJsrwsPWM4557bbVcftRru0yRjtWrXgquN2y3JE/nxI0Dnn3HZLXafakbMEPWA555yrF8P37V2vASqdDwk655wrCh6wnHPOFQUPWM4554qCByznnHNFwQOWc865ouBrCe4gkpaSO5VJLuWEfFyudvx85cfPV/78nOVne85XfzPrmmmHB6xGSNL0bIs/um35+cqPn6/8+TnLz446Xz4k6Jxzrih4wHLOOVcUPGA1TvcUugFFxs9Xfvx85c/PWX52yPnya1jOOeeKgvewnHPOFQUPWM4554qCByznnHNFwQNWA5B0saRZkj6Nj6mSvprYL0ljJC2SVCXpOUl7ptXRRtIvJC2TtFbSXyX1afhP0/AkXSvJJN2Z2ObnLIrnwdIenyT2+7lKI6mnpHslLZW0TtJbkg5P7PdzFkman+H3yyQ9Gfc32LnygNUwPgKuBvYDKoC/A3+RtE/c/wPgSuBS4ABgCfA3SR0SdfwvcApwJvBlYCfgCUnVU3w2MZIOAi4AZqXt8nNW3WygZ+Kxd2Kfn6sESZ2AKYCArwJ7EM7NkkQxP2dbHUD13639AAMeivsb7lyZmT8K8ABWAN8m/Kf5GLgusa8d8Bnw7fi6I7AB+H+JMn2Bz4HjCv1ZduA56gi8DxwFPAfcGbf7Oat+nsYAb2TZ5+dq23PyU2BKjv1+znKfv+uAVUBpQ58r72E1MEktJH0DKANeBAYCPYBnUmXMrAr4J3BI3LQ/0CqtzIfA24kyTdE9wMNm9ve07X7OtjVI0kJJ8yQ9IGlQ3O7nalvDgZclPShpiaSZki6RpLjfz1kW8RydD9xnZpU08LnygNVAJO0taQ2wHvg1cLKZvU74YQMsTjtkcWJfD2Az2y4mmSzTpEi6ABgCXJ9ht5+z6l4GRgInEIZPewAvSuqCn6tMBgGjgLnAccBY4GfAxXG/n7PsjiEEqd/E1w16rlrmU9htl9nAUKATYSz3XklHJPan38GtDNvS1aZM0ZG0G2HY5stmtiFHUT9ngJk9lXwt6SXCl/E5wEupYmmHNctzFZUA081sdHz9mqRdCAHrzkQ5P2fbugCYZmYz07Y3yLnyHlYDMbMNZvaemaX+o8wEvgekZnOl/6XRja1/tXwCtCAs2Z+tTFNyMOGzviFpk6RNwOHAqPjv5bGcn7MMzGwN8CawC/77lcnHwFtp294G+sV/+znLQFI34CRgXGJzg54rD1iFUwK0AeYRfqDHpHZIakuYSfNi3DQD2JhWpg9hdlOqTFPyF8Ist6GJx3Tggfjvd/FzllU8F7sTvpj992tbU4Dd0rbtytb8dX7OMhtJuKTxQGJbw56rQs84aQ4Pwvj4l4EBhC/imwkzZE6I+68GPgW+DuwVfyEWAR0SdfwKWAgcDewL/IPQS2tR6M/XQOfwOeIsQT9n25ybWwk90IHAl4An4rnp7+cq4/k6IH6BXke4TnoasBq42H+/sp4zEf5QHJdhX4Odq4KfiObwACYQ/npbT7hH4VkS0znjL8MYwl/E64Dngb3S6mgL/IIwHFYJPA70LfRna8BzmB6w/Jxt/ZypL4gN8UvhEeALfq5ynrOvAv+O5+Nd4DLiYuB+zjKeryMJ15sOzLCvwc6Vr9bunHOuKPg1LOecc0XBA5Zzzrmi4AHLOedcUfCA5Zxzrih4wHLOOVcUPGA555wrCh6wnCsCWRLopT/m5zh+QqLcc4ntA+K2b6WVL5f0mqQVkg6I255N1HHfjvqszmXji986VxwOTnv9Z8KNr2MS29bXUMcnwMmEVQmyktQdmAx0B46yrQudXkrIbfTnWrXYuXrmAcu5ImBmLyVfS1oPLEvfXoP1NZWX1JsQrDoCh5vZlkVizeztxHs71+A8YDnnAJDUD/g7YVHmw83s3QI3yblqPGA55yAkNfwnYb24w8xsXoHb49w2PGA55wBGE7LC7unByjVWPkvQOQfwNGHV7bGS2hS6Mc5l4gHLOQchJcl5wLHAnyS1KnB7nNuGByznHABmdi9wEfAfwERJLQrcJOeq8YDlnNvCzO4BvgucCvxekn9HuEbDJ10456oxszvidaz/AdZLOt8806trBDxgOee2YWa3xKD1E0La81EFbpJzHrCcK0ZmNqAux0lqGQ63zbGe+YTZgZne40bgxsSxJfhlBFdA/svnXPPRH9hIWHqpLp6Jx/evtxY5lwf50LRzTZ+kAUB5fPmZmc2uQx27AjvFl8v9BmPX0DxgOeecKwo+JOicc64oeMByzjlXFDxgOeecKwoesJxzzhUFD1jOOeeKwv8H5xRgwQPGJqgAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYQAAAEhCAYAAAB/bNeOAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAA1hElEQVR4nO3dd3wU1frH8c+TEEjo0gXpVZo0lY6FZgfserl2vSiK4M/esFy7cFEu9l4uFlAQlabSBaV3ECmCSO8dwvn9MRNdlyRkk81Oyvf9eu0r2ZnZ2WfPJvvsKXOOOecQERGJCzoAERHJGZQQREQEUEIQERGfEoKIiABKCCIi4lNCEBERQAkhVzCza83MhdwOmdmvZvaUmSXmgNiuDzKG3M7MVoe8t0fMbKuZzTCzZ8ysWkAxdTOzfqlsP8OP84wYx3PM85rZBDObEHK/iZn1N7NSsYwtL1FCyF0uBVoB5wFjgPuB5wONCK4FlBCybgzee9sO+AcwErgCWGhm3QOIpxtwTEIAZuPFOTum0aTuVv+WognwKKCEkEkFgg5AIjLXObfC/32cmdUGbjCzPs65o0EGFk1mVsg5dzDoOGJsi3Nuesj9b81sEF6i+MjM6jjn1gUU25+cc7uA6cc9MAacc4uDjiGvUQ0hd5sNJAFlUjaYWWEze9bMVvlNS6vM7EEziws5JtHMBprZQjPbY2YbzOwrM6sX/gRmVt3MPvCPOWhmK/0PKvzqegegTUiTx4SQx55mZuP959hrZt+Z2Wlh53/XzNaZWSszm2Zm+4Hn0nvRZtbdzKb6591lZj+Z2YUh+3ub2Y9mts3MdpjZdDM7L+wcBczsCb/p7YCZbTGzKWbWNuy4m8xsXsgxb8WqScI5twfvG3AScMvxjjezrv7r3m9mO83sSzOrG3bMBP91XuS//wfNbKmZXRZyzLvANUClkPd1tb8vraabKf7zz/Wff46Zne6X81Nm9of/frxrZkXCYnrMzGb7MW8xs+/NrGUGXu+fTUZmdi3wjr/rl5C4q5nZAjP7IpXHp7yWLsd7rvxCNYTcrRqwE9gK3occ3jfK+sATwAKgJfAwXjX6Lv9xhYBiwJPAH/6+W4HpZlbPObfBP1914CdgH15V/BegMtDZP8+twIdAPH99YO3yH9sYmAgsxmtWcsB9wEQza+mcmxfyOkoAQ4EXgAeA/Wm9YDO7HXgJ+BLvQ2sP0Mwvi9ByeRNYjfc3fgEwyszOdc596x9zL9AXeBCYCxQHWhDS3GBmz/hl9hJwN1DJL7OGZtbaOZecVpzR4pybZ2brgTbpHWdmXYGvge+By4GiwOPAFDNr4pz7PeTwWnivqT+wCegFDDWzzc65H/D+dsoCpwIpifZ4NbZaeM2X/8Z7T57Da/YaifceXAuc7B+zCbgn5LGVgIHAOqAIXpPZJDNr4Zybf5znTfE13nvzEF7Takpt6g/gFWCQmVV0zq0PecwtwCpgbAafI+9zzumWw2/89YFaF++f6wS8dvsjQO+Q43r6x7UPe/yDwCGgXBrnjwcKA7uBviHb38f7566YTmwTgCmpbP8c2AGUDNlWHNgGDA/Z9q4f80UZKIfifozDj3dsyGPi/DIbC4wI2T4qvfPgJZVk4JGw7W38eLtF8f1dDXyYzv4fgSXHOcdMvIRdIGRbdeAwMCDs/XJAy7D3fykwOex9WZfK85zhP/6MsHMeBmqEbLvQP2582OOHA6vSeR3x/vu1DBiUgeedkMr/Sa2wcxbD+6LycMi2MnhJ7r5ovY954aYmo9xlKd4/3jbgLeA159zgkP1dgTXANL+qXsCvNYwFEvBqCwCY2WXmjWTZgZdY9uJ9qwxtYugMjHJ//1aVUe39x+5I2eC89ueReM1MoY7gfUAfT2s/xtfTO8jMmpvZKDPb6J/7MNCJv7+2n4FzzezfZtbWzAqGnaYTXjL5KKwsZ+B9uLRP5/njQh9jIc11mWR4H3RpPV8RvFrSJ865IynbnXOrgKkcW95rXUh/hfNqOp8Bp2Uh1uXOuZUh95f6P8eEHbcUOMnMLCT+jmb2g5lt5a/3qw5/f78yzTm3G68me2PI67sOr1zfSfOB+ZASQu7SHa8afy4wHrjVzP4Zsr8cUBXvHyr09pO/vzSAmV0AfAIsAa4CTvfPuxkIHcZamr+q3pEqhVddD7cBr4YTapPLWPNLaf9nmjGZWWXgO//5b8dLIqcCo/n7a3sKrxnsQmAysNXM3jGzlP6Ycv7PFRxbnsVDYknN22HHv52B15aeyqRelilOwPtwS6u8w/s8NqZy3EagIF5TUWZsD7t/KJ3tBfBqAphZM+AbvJroDXhfWk4F5vH39yurhgBV8L4EGHAz8IVzLrWyyLfUh5C7LHT+KCMz+x6YDzxvZsOcc3vx+hJWAZel8fjV/s8rgBXOuWtTdphZAsd+cGzBa9/NjG1AhVS2V/D3hcroHOxb/J+VgIVpHNMVr0/iMhcyKsfMCv/tCZ07DDwLPGtmFYDzgQF4TWeX4/fL4NWSwj/UCNmfmv5AaM1tSxrHHZeZNQEq4vWJpGU7XhmmVd7hsZZP5bjyeB/WmyOPMksuxqsV9PDfEwDM7AS8JseocM4tNLPJeP0GB/D6PI7bUZ/fKCHkUs65g2Z2NzACr3P3ebxvwRcDe5xzS9N5eGG8f8JQPfG/tYUYC/QwsxOdc2l9Qz2I10YbbiJwnpkV86vsmFkxvA7eCenElp5peN8kb+bYpogUKR/8oR8udfDa/lOtWTivE/1NMzsXaOhvHgccBao458ZFEqRzbjV/Jd9MM7OiwH/xOvVfS+f59prZLOBSM+ufUtsys6p4NaSXwx5S2e/Yn+4fF4/XEfuT+2v48kG80U3ZrTBeX82fXwrM7Cy8b/OrIjxXSsd3WnEPwWs6OgGviev7CM+f5ykh5GLOuZFm9jPwf2Y2GPgIr230OzN7Ea/aXRCoidc00s05tw8vcXQzs4F4bffNgTs49hvZo3gXwU0zs6fwmk8qAV2dc//wj1mM13R1OfArsNs5twxvpMr5fizP4v3D34v3AfB4Jl/vbjO7H3jZzIb5r3c33gVJB5xzL+M1pR0B3vfL4ETgMeA3QppIzWyEXz6z8b5hN8WrXbzmP9evftyDzRu6ORHvm2VlvP6FN503IidayvhDLQ2vhtMMuAmvCefKDPTjPIw30maUmQ3B62t5DG8U2othx24EPjGzR/FqBL3w2ux7hRyzGChlZr3wOqwPOOcWZOH1pWU0cCfwrpm948fxMPB7eg9KQ8p1CbeZ2Xt4XwrmO+dSmq+GAf/B+3Jw17EPl8B7tXU7/o00Rk/4+zr7+/r69xPxmiyW4n1j2obXgdoffwQK3gfjk8B6vG+fE/E+EFcD74advybwP7xmj4PASmBgyP4KeG3Au/04JoTsOx3vA3oPXqf1d8BpYed/l1RGsxynPC7B69zdj9fBOwM4P2T/Zf7rPwAswmsiexdYHXLMXXgXWG31z7PML6OEsOfq6R+3138dS/Cag06K4vu72i87h/dtebv/nj0DVI3gPF3xRiTtx0sEI4C6YcdMAKbgfUFY6L+ny4DLw44r4r/vKc1Rq/3tZ5D6aJ8pYY+v5h93Y9j2/v720NFQt+PVBvb7r7sjx44gSut5J4Sd/1G8ZJJS66gWtv81/++idND/1znxZn4hiUg+4F/IVcA51/Z4x+Y1/iixFXjDa3sGHU9OpCYjEcnTzKw4Xt/QVXhNfuFNaOJTQhCRvK4Z8APeFdJ9nHNzgw0n51KTkYiIALowTUREfLm2yahMmTKuWrVqQYchIpKrzJo1a4tzLtUr0nNtQqhWrRozZ84MOgwRkVzFzNaktU9NRiIiAighiIiITwlBREQAJQQREfEpIYiICJCLRxllxpdzfuf5MctYv2M/FUsmcXeXunRrmtnp/kVE8pZ8kxC+nPM79w9fwP7D3sJcv+/Yz/3Dvdl8lRRERPJRk9HzY5b9mQxS7D+czPNjlgUUkYhIzpJvEsL6Hfsj2i4ikt/km4RQsWTqq+oVKVSAA2E1BxGR/CjfJIS7u9QlKeHvSwbHxxl7Dh7hnEGT+Xl1+LrvIiL5S75JCN2aVuLpHo2oVDIJAyqVTOLFS0/hoxtP53DyUS577Uf6j1zEvkPha8+LiOQPuXY9hBYtWrhoTW639+ARnh+zjHenraZyqSSe7dGY1rXKROXcIiI5iZnNcs61SG1fvqkhpKdIoQL0v7ABn97SingzrnpzBg98sYDdBw4HHZqISMwoIYQ4rXopvu3TnpvaVWfoT7/RZeAkJi7fHHRYIiIxoYQQJqlgPA+eV5/Pe7UmqWA817z9E3d/No+d+1RbEJG8TQkhDc2qnMDXd7Tj1jNqMnzO73QaOJHxizcGHZaISLZRQkhHYkI893Stx5e3tqFUkYLc+P5M7hw6h+17DwUdmohI1CkhZECjk0owsndb+pxdm1Hz/6DTwIl8u+CPoMMSEYkqJYQMKlggjr6d6jCyd1vKF0+k10ezufWjWWzZczDo0EREokIJIUL1Kxbny9vacHeXuoxfvIlOAyYyYu7v5NbrOUREUighZEJCfBy3nVmLr+9oS9XSRegzdC43fzCLTbsOBB2aiEimKSFkQe3yxRjWqzUPnFuPScs303HARD6ftU61BRHJlZQQsig+zri5fU2+7dOOuhWK8X+fzeO6d3/WtNoikusoIURJjbJF+eTmVvS/oD4zVm6j88BJ/O+n31RbEJFcQwkhiuLijGvbVGfMne1pVKkE9w9fwD/emsHabfuCDk1E5LiUELJBldKF+ejG0/l394bMW7uTLv+ZxPs/ruboUdUWRCTnUkLIJnFxxtWnV2VM3/Y0r3oCj4xYxBVvTGf1lr1BhyYikiolhGxWqWQS719/Gs9d0pglf+yi66BJvDl5JcmqLYhIDqOEEANmxmUtKjOubwfa1CzDk18v4dJXp7Fi056gQxMR+VOgCcHMbjOz+Wa2y7/9aGbnBRlTdqpQIpE3r2nBfy5vwsotezn3pckMmbCCI8lHgw5NRCTwGsI64F6gGdAC+B740swaBxpVNjIzujWtxNi+7TmrbjmeG72M7kOmsXTDrqBDE5F8LtCE4Jwb4Zz71jm3wjm33Dn3ILAbaBVkXLFQrlgir/Zszn+vasb6Hfu54OUpDBr/C4dVWxCRgARdQ/iTmcWb2RVAUWBa0PHEynmNT2Rcvw6c0/BEBo5fzoWDp7Lw951BhyUi+VDgCcHMGpnZHuAg8CrQ3Tm3II1jbzazmWY2c/PmvLPWcakiBXnpyqa83rM5W/Yc5KL/TuWFMcs4eCQ56NBEJB+xoKdWMLOCQBWgJHAxcBNwhnNuYXqPa9GihZs5c2b2BxhjO/cd5vFRixk2ex21yxXl+UtPoUnlkkGHJSJ5hJnNcs61SG1fRDUEM1tpZqeksa+hma2MNDjn3CG/D2Gmc+5+YC7QN9Lz5BUlCifw4mWn8M51p7Ln4BF6DJnK098s4cBh1RZEJHtF2mRUDSiUxr5EoGqWovHEpfMc+caZdcsxpm97Lj+1Mq9NWsm5gyYzc/W2oMMSkTwsM30IabUxtQB2RHIiM3vGzNqZWTW/L+Fp4Azgo0zElecUT0zg6R6N+fCG0zmUfJRLX/uRx75axL5DR4IOTUTyoALHO8DM+vJXE44DvjKzQ2GHJQGlgKERPn8F4EP/505gPnCOc25MhOfJ09rWLsOYO9vz3OilvDN1Nd8t2cQzFzeidc0yQYcmInnIcTuVzewioJt/9xrgGyB8iM9BYDHwpnMuJnM959VO5eOZsXIr9wybz5qt+7j69Crcf+7JFC103LwuIgKk36kc0SgjM3sHeNw5typawWVWfk0IAPsPJfPi2GW8NXUVFUsk8XSPRrSvUzbosEQkF4jaKCPn3HU5IRnkd0kF43no/Pp8/q/WJCbE8c+3f+Kez+exc//hoEMTkVws4k5lM7vGzEab2WJ/GGro7dfsCFJS17zqCXx9Rzt6nVGTz2eto/PAiXy3ZGPQYYlILhXpdQgPA+8AFfGuF5gYdpsU5fjkOBIT4rm3az2+vK0NJZMKcsN7M+n7yVx27Avv9xcRSV+kfQirgS+cc4FfOJaf+xDScujIUQb/sIIhP6ygZOGCPNmtAV0bnhh0WCKSg0StDwEoDXyV9ZAkOxQsEEe/TnUY0bsN5YsX4l8fzua2j2ezZc/BoEMTkVwg0oQwEUh16grJORpULMGXt7Xh/zrXYdyijXQeOImR89YT9LxVIpKzRZoQ7gSuM7N/mlkZM4sLv2VDjJIJCfFx9D6rNqPuaEvlUoW5439zuOWDWWzadSDo0EQkh4q0DyFl9Za0HuScczG5Skp9CBl3JPkob01ZxYvjlpOUEM8j59enR7NKmFnQoYlIjKXXhxDph/fjpJ0MJIcqEB/HLR1q0rF+ee79fD53fTaPUfPX81SPRpxYIino8EQkhwh8PYTMUg0hc5KPOt7/cTXPjV5GgTjjwfNO5vJTK6u2IJJPRHOUkeRy8XHGdW2qM/rOdjSoVJz7hi+g51s/sXZbTKagEpEcLFM1BH+RnLp4ayD8jXPu/SjEdVyqIWTd0aOOj3/6jae/WYID7junHv84vSpxcaotiORV0ZzcriTwNdAyZZP/88+TOOfiMxdmZJQQomfd9n3cP3wBk3/ZwunVS/HsxY2pVqZI0GGJSDaIZpPRU3gXp7XHSwbdgbPwFrRZCZyWhTglICedUJj3rz+N5y5uzOI/dtF10CTenLyS5KO5s39JRDIn0oTQBS8pTPfvr3POTXDO/RMYD/SJZnASO2bGZadWZlzfDrSuWYYnv17Cpa9OY8WmPUGHJiIxEmlCOBFY6ZxLBg4AxUL2DQfOi1ZgEowKJRJ565oWDLz8FH7dvJdzX5rMKxN+5Ujy0eM/WERytUgTwgagpP/7GqBVyL5a0QhIgmdmdG96EuP6teesuuV4dvRSerwyjWUbdgcdmohko0gTwhT+SgIfAI+a2Wtm9l/geUBrIech5Yol8so/mjH4qqas276f81+ezEvf/cJh1RZE8qRIr1R+DG8tBPASQGngcqAwMBK4PXqhSU5gZpzfuCKtapSm/1eLGTBuOaMXbuC5SxrTsFKJoMMTkSjSlcoSkTGLNvDQlwvZvvcQvc6oSe+zalGoQExGGotIFOhKZYmaLg0qMK5vey5sUpGXv1/BBS9PYd7aHUGHJSJRoDWVJWIlCxdkwGVNeOfaU9m1/wjdh0zl6W+XcOBwctChiUgWRNSH4K+p/BiwEG9NZS3FlY+dWa8cY/u15+lvlvDaxJWMW7yR5y9pTPOqpYIOTUQyQWsqS1RM+WUL9w6bz/qd+7mudXX+r0sdCheMydIYIhIBraks2a5t7TKM6dueni2r8vbUVZwzaDI//ro16LBEJAJaU1mipmihAjx+UUOG3uzNfXjlG9N5+MuF7Dl4JODIRCQjtKayRF3LGqUZ3ac9N7Stzocz1tBl4CQm/7I56LBE5Dgi/QBfDjQE3gE2AofDboeiGp3kWkkF43n4/Pp8/q9WFEqIo+dbP3HfsPnsOnA46NBEJA1aU1myVfOqpfjmjnb8Z/wvvD7pVyYs28xTPRpyVr3yQYcmImF0pbLEzLy1O7j783ks37iHHk0r8cgF9SlZuGDQYYnkK7pSWXKEUyqX5Kvb23LHWbUYOW89nQZOYsyiDUGHJSI+JQSJqUIF4unXuS4jerehbNFC3PLBLHp/PJute3SNo0jQjpsQzCzZzE7zfz/q30/rpvGFkiENKpZgRO823NWpDmMWbaDTwEl8NW89ubUJUyQvyEin8uPAupDf9R8rUZEQH8ftZ9emc4MK3PP5PG7/3xxGzV/PE90aUq5YYtDhieQ76lSWHOFI8lHenLKKAeOWk5QQz6MX1Kd700qYWdChieQp6XUqZyohmFlloDJwzNc459z3EZ8wE5QQ8qZfN+/hns/nM2vNds6qV46nujeiQgnVFkSiJWoJwcxqAB8Bp6Vs8n86/3fnnIvJailKCHlX8lHHu9NW8/yYpSTExfHQ+SdzWYvKqi2IREF6CSHSC9PeBKrgTWGxFF2ZLNkgPs64oW11Op5cjns+n8+9wxYwav4fPN2jESedUDjo8ETyrEhrCLuBa51zw6Ly5Gb3Az2AunhrK0wH7nfOLTzeY1VDyB+OHnV8NGMNT3+7FAPuO/dkrj6tCnFxqi2IZEY0L0xbR3RrBWcAQ4DWwFnAEWC8mWmFFQEgLs7o2aoaY+5sT7OqJ/Dwlwu56s3prNm6N+jQRPKcSGsIPYFbgC7Ouaj/R5pZUWAn0M05l+66C6oh5D/OOT6duZYnRy3hyFHH3V3qck3rasSrtiCSYVHrQ3DOfWBm9YDVZjYd2H7sIe6aTMYJUAyv1hJ+XhHMjMtPrUL7OmV5YPgCHh+1mK8X/MFzlzSmZtmiQYcnkutFWkO4FngbSAY2cWzzkXPO1ch0MGafArWBFs65Y1ZsN7ObgZsBqlSp0nzNmjWZfSrJ5ZxzfDHndx77ajH7DyfTr1MdbmxbnQLxmo1FJD3RHHa6BpgJ3OCc2xGd8P489wDgCqCtc27l8Y5Xk5EAbNp1gIe+XMjYxRs55aQSPH/pKdQpXyzosERyrGivqTwkG5LBQOBK4KyMJAORFOWKJ/Jaz+a8fGVT1m7fz3kvTebl737hcPLRoEMTyXUiTQhTgJOjGYCZDQKuwksGS6N5bskfzIwLTqnIuL7t6dKgAi+OW85Fg6eyaP3OoEMTyVUiTQh9gJvM7GozK53VNZXN7L/AdXi1g+1mVsG/qYdQIla6aCEGX9WMV//RnE27D3LR4KkMGLuMQ0dUWxDJiEj7EFL+s9J6kHPOZXjkkpmldZ7HnHP903us+hAkPTv2HeLxrxYzfM7v1C1fjOcuacwplUsGHZZI4KI5dUVUp792zmkAuWSLkoULMuDyJpx/yoncP3wB3YdM5eb2NbmzY20SE2Iy3ZZIrqPpryXP27n/ME99vYRPZq6lRtkiPH9JY5pX1cXwkj9FZZSRmRU0sy/MrH30QhPJfiWSEnj2ksZ8cMNpHDx8lEte/ZEnRi1m/6FjLnURydcynBCcc4eAjpE8RiQnaVe7LGP6tufq06vw1pRVdB00iekrtwYdlkiOEemH+1SgZXYEIhILRQsV4MlujfjfTS1xDq54fTqPjFjI3oNaDlwk0oRwF3CDmfU2s5PMLD4rw05FgtKqZmlG39mO69tU54Ppa+g8cBJTftkSdFgigQp02GlWqFNZomXm6m3c8/l8Vm7ZyxWnVuaB806meGJC0GGJZIscO+xUJCdoUa0U3/Rpx8Dxy3lj0komLt/MUz0acWbdckGHJhJTGnYqEmLu2h3c8/k8lm/cQ49mlXjk/PqULFww6LBEoiaak9uJ5GlNKpfkq9vbcvtZtRgxdz2dBk5i7KINQYclEhMRJwQza2pmw81si5kdMbNm/vanzKxr9EMUia1CBeK5q3NdRtzWhjJFC3HzB7O4/X9z2LY3mqvHiuQ8kU5G1xb4EagHfBz2+KPAv6IXmkiwGlYqwcjebejXqQ6jF/5BpwETGTV/Pbm1mVXkeCKtITwDjAEaAP3C9s0GmkUjKJGcIiE+jjvOrs1Xt7el0glJ9P54Dr0+nM3m3QeDDk0k6iJNCM2AV5z3FSn8a9IWoGxUohLJYepVKM7wXq25t2s9vl+2iU4DJ/LFnHWqLUieEmlCOAAUTmPfiYBWJJE8q0B8HL3OqMk3d7SjRpki9P1kHje+N5MNOw8EHZpIVER6YdpIoCRwpr/pMNDcOTfHzMYCW5xzV0U9ylRo2KkEKfmo452pq3hh7DIS4uN4+Lz6JMQbL4xdzvod+6lYMom7u9SlW9NKQYcq8jfpDTuNNCGcgjef0Wrgc+Bh4GXgFKA5cKpzbllWA84IJQTJCVZv2cs9w+bz06ptxBkcDfl3SkqI5+kejZQUJEeJ2nUIzrl5QHtgI/AgYEBvf3eHWCUDkZyiWpkiDL2pJSWSEv6WDAD2H07m+TH6l5DcI+J5h5xzs4GzzSwRKAXscM7ti3pkIrlEXJyxa//hVPet37E/xtGIZF6k1yGs9JuNcM4dcM6tT0kGZtbQzFZmR5AiOV3Fkkmpbi+WWIDDyUdT3SeS00Q6yqgaUCiNfYlA1SxFI5JL3d2lLklhazXHm7HrwBEueHkKc9fuCCYwkQhkZi6jtHqhWwA7Mh+KSO7VrWklnu7RiEolkzCgUskkXrzsFF7v2Zwd+w7TY8hUHv9qsRbikRztuKOMzKwv0Ne/WwnYDIRP6pKE158w1Dl3dbSDTI1GGUlusfvAYZ4bvYwPpq+hUskk/t29IWdoam0JSFbXQ1gJfOf/fg0wEy8phDoILAbezGyQInlVscQEnujWkAubVOS+YfO59p2f6d60Eg+fX59SRTS1tuQckV6H8A7wuHNuVfaFlDGqIUhudOBwMkN+WMErE3+lWGICj5xfn4uaVMTMgg5N8oloXodwXU5IBiK5VWJCPP0612XU7e2oUqowd34yl2vf+Zl12zVyW4Kn9RBEAlC3QjGG9WpN/wvq8/PqbXQeOIm3p6wiOfzqNpEY0noIIgGJjzOubVOdcf06cHr1Ujw+ajE9XpnG0g27gg5N8imthyASsEolk3j72lMZdEUT1m7bx/kvTeGFMcs4cDg56NAkn9F6CCI5gJlxUZNKjO/XgQubVGTwDys4d9BkZqzcGnRoko9oPQSRHKRUkYIMuKwJ719/GoeSj3L569N54IsF7DqQ+lxJItEUaUKYAtxpZqHX6KfUFG4Avo9KVCL5XPs6ZRnbtz03tavO0J9+o9OAiYxZtCHosCSPizQhPIzXbDTP/90B15jZD0BL4LHohieSfxUuWIAHz6vPl7e1oVSRQtzywSx6fTiLTbu0QptkD62HIJLDNT6pJCN7t+GernX5bukmzh4wkaE//ab1nCXqIrpS+W8PDHg9BF2pLPnRqi17uW/YfGas2kbLGqV4ukdjqpcpEnRYkotE7UplM0s0s7ZmdilwPlAd7/oDEYmB6mWK8L+bWvJMj0YsWr+LLv+ZxJAJK7TmgkRFhhKCmRUys0HANmAi8AnwKTAJ2GpmL5iZZukSiYG4OOOK06rwXb8OnF2vHM+NXsaFg6cyf92OoEOTXC4j018bMBY4CxgBfAP8htd/UBmvpnABMMY5d262RhtCTUYinjGLNvDIiIVs3n2Q69tUp1/nOhQuGPHquJJPZHX660uAM4FLnHNfpLL/TTPrAXxqZj2cc8OzEKuIRKhLgwq0qlmaZ79dyptTVjF60Qae6t6I9nV0nahEJiNNRlcCn6aRDADwk8BnQEwWxxGRvyuemMC/uzfi01taUTA+jn++/RP9Pp3L9r3ha1mJpC0jCaEp8HUGjhuF5jISCdRp1UvxTZ923H5WLUbOXU/HARMZMfd3DVGVDMlIQiiL12dwPL8BEa8LaGbtzWykmf1uZs7Mro30HCLyl8SEeO7qXJdRd7TlpFKF6TN0Lte/+zO/79gfdGiSw2UkIRTGWyLzeA4BiZmIoSiwEOgD6C9WJErqVSjO8F6teeT8+sxYtY1OAyby7lStuSBpy+hQhEpmVuM4x5yUmQCcc9/gjVzCzN7NzDlEJHXxccb1bavTqX55HvpyIf2/WsyXc9fz7MWNqVuhWNDhSQ6TkWGnRzl2qutUDwWccy7+uEem/Vx7gN7OuXfT2H8zcDNAlSpVmq9ZsyazTyWS7zjnGDlvPY99tZjdBw7Tq0NNbjurFoUKZPpfVnKhrA47vS7K8WSac+514HXwrkMIOByRXCVlzYV2tcvyxKjFvPT9Cr5e8AfPXNyYU6uVCjo8yQGOmxCcc+/FIhARiY1SRQoy8PImdGtaiQeGL+DSV3/kHy2rcG/XehRLTAg6PAlQpNNfi0ge0cFfc+GGttX5eMZvdBowiXGLNwYdlgRICUEkHytSqAAPn1+f4be2oWThBG56fya3fTSbTbu15kJ+FHhCMLOiZtbEzJr48VTx71cJODSRfKNJ5ZJ8dXtb7u5Sl3FLNtLxxYl8+vNaXdCWzwSeEIAWwBz/loS36toc4PEggxLJbxLi47jtzFp826cd9U4szj3D5nPVGzNYvWVv0KFJjGR6gZygabZTkexz9Khj6M9refqbJRxKPsqdHetwY7vqJMTnhO+QkhVRWyBHRPKHuDjjqtOrMP6uDpxRtyzPjl7KRYOnsmDdzqBDk2yU5YRgZs3MrJt/0+R2InlI+eKJvNazBa/+oxlb9hzkov9O4alvlrD/UHLQoUk2yPQqGmbWHPgI7yrmlAVzqnrr6XCVc25WVCIUkcB1bXgirWqW4Zlvl/L6pJWMXuitudC2dpmgQ5MoykoN4XW8aSZOds51cc51ds7VBXoDb0QnPBHJKUokJfB0j0YMvbkl8XHGP96awf99Nk9rLuQhWUkIRZ1z48M3OufGAUWycF4RycFa1ijNt33acduZNflyzu90GjiRkfPWa4hqHpCVhLDJzK4zsz9nxjKzeDO7EdiS9dBEJKdKTIjn7i71GNm7LRVLJnHH/+Zw43szWa81F3K1rCSEa/CW19xmZkvMbAmwDbjc3ycieVz9isX54tY2PHTeyUz7dSudBkzk/R9Xc1RrLuRKWb4OwczKAClXFf/mnItJ7UDXIYjkLGu37eOBLxYw+ZctNKtSkmcubkyd8lpzIafJ1usQnHNbnHOz/dsW/wkjXkpTRHK3yqUK8/71pzHw8lNYtWUv5700mYHjlnPwiIao5hbZdWGavrqL5ENmRvemJzG+XwfObXQig777hfNemsKsNduCDk0yICvXIVyYzu7MrK0sInlE6aKFGHRFU7o1rcRDXyzkkld/pGfLqtzdpa7WXMjBMt2HYGbJwES8C9LCtXTOJWUlsONRH4JI7rD34BFeGLuMd6etpkLxRJ7s1pCzTy4fdFj5Vnp9CFlJCEuBc5xzq1LZt9Y5VzlTJ84gJQSR3GXOb9u5b9gClm3czfmNT+TRCxpQtlihoMPKd7KrU/k9IK3r1l/NwnlFJA9qWuUEvrq9LXd1qsPYRRvpOGAin87Umgs5iaa/FpGYW7FpD/cPn8/Pq7fTplZpnureiKqlNcFBLESthmBmd5pZezPT4GIRybRa5Yryyc2teLJbQ+at3UmX/0zitYm/ciT5aNCh5WsR1RDM7ACQgDfD6a/ALGB2yk/nXMwmS1cNQSRv2LDzAA+PWMi4xRtpULE4z17cmIaVSgQdVp4VzT6E+sBqvCUuZ+NdofwoMB7YamazzOwWM9PCOyKSIRVKJPJ6z+a8cnUzNu0+yEX/ncrT32rNhSBE+sH9KvCFc+5U59yVzrm2QFXgZWA7MA34NzDGzDTYWEQyxMw4p9GJjO/bgUubn8RrE1fSddAkpq3QPJmxFGlCaAt8E7rBObfNOXcnMBxvcrsWQAPg/6IRoIjkHyUKJ/DMxY35+KbTMeCqN2dwz+fz2LFPay7EQqQJYRPQNI19nwD/dM6tBgYCV2chLhHJx1rXLMPoO9vT64yaDJv9Ox0HTGTUfK25kN0iTQjvAw+ZWWpJoRJ/XZcwC6iZlcBEJH9LTIjn3q71GNm7DSeWSKL3x3O46f2Z/LFTay5kl0gTwhN401VMN7P3zewKMzvDzG4BnuWvSe0KAYejGKeI5FMNKpbgi1tb8+C5JzNlxRY6DZjEB1pzIVtElBCcc4edc92A24FWwMfAd8ArwFbgFv/QlnjDUkVEsqxAfBw3ta/B2Ds70LRKSR4esYhLX/uRFZt2Bx1anpKlK5XNrAZwIrAZ+MX5JzOzlkBh59z3UYkyFboOQSR/cs4xfPbvPPH1YvYdTOa2M2vR64yaFCyg0e4ZkV2T2xV0zgXW9a+EIJK/bdlzkMe/WszIeeupU74oT/doTPOqJwQdVo4XzakrCpjZv81sM7DfzLaY2VAzax2VSEVEMqhM0UK8dGVT3r62BXsOHOGSV6fRf+Qi9hw8EnRouVakdawHgX7AMOBe4CO8aw4mm9l9UY5NROS4zqpXnrH9OnBNq2q89+NqOg+YyPdLNwYdVq4U6VxGvwKDnXMDw7ZfDwwBLnfOjYhuiKlTk5GIhJu1Zjv3DZvPL5v2cMEpFXn0gvqUKao1F0JFcy6jE/HmMPob59zbwH/Q1ckiEqDmVU/g6zva0bdjHcYs3EDHARP5fNY6XdCWQZEmhF+AJmnsGw2ckqVoRESyqGCBOPp0rM03fdpSq2xR/u+zefR86yd+27ov6NByvEgTwlvAI2bWJJV9JwF7shyRiEgU1CpXjE9vacUT3Royd+0OOv9nIm9MWqk1F9IRaUJ4GZgAzPCvVL7SzM40s5uAZ/BqCSIiOUJcnNGzZVXG9m1P21pl+Pc3S+g+ZBqL1sds6ZZcJeLrEPy1DvoCffBqBSnGAlc657ZHL7y0qVNZRCLhnOObBRt4dORCtu87zE3tanBnx9okJsQHHVpMZenCNDPriLca2rZU9tXFm9BujXNuXTSCzSglBBHJjB37DvHUN0v4dOY6qpUuzFM9GtG6ZpnjPzCPyGpCOIq3ZOYa/louM2XJzMBWr1BCEJGsmLpiCw98sYA1W/dxxamVuf+ckylROO+v65XVhHAy0Axo7t+aAMXwksQ6jl1XOSZXhCghiEhW7T+UzH++W86bk1dxQuGCPH5RA85pWAEzCzq0bBP1uYzMrA5eckhJFE2BEoBzzsWkQU4JQUSiZeHvO7l32HwWrd9Fp/rleeKihlQokRh0WNkiWya3S+VJagHNnHOfRuWEx6GEICLRdCT5KG9NWcWAccspGB/HvefU46rTqhAXl7dqC9G8UjlNzrkVmU0GZnarma0yswNmNsvM2kUrLhGRjCgQH8ctHWoytm97GlcuwUNfLuTy139kxab8c3lV4BOIm9nlwCDgKbymp2nAt2ZWJdDARCRfqlq6CB/ecDrPX9KY5Rv3cO6gybz83S8cOpL3L2gLPCHgzZ76rnPuDefcEufc7cAfQK+A4xKRfMrMuLRFZcb360DnBuV5cdxyLnh5CnN+i8llVoEJNCGYWUG8TumxYbvGAsessWBmN5vZTDObuXnz5liEKCL5WNlihRh8VTPe/GcLdh04TI9XvDUX9ubRNReCriGUAeKB8KGqG4EK4Qc75153zrVwzrUoW7ZsLOITEaFj/fKM7dueni2remsuDJzED8s2BR1W1AWdEFKED3WyVLaJiASmWGICj1/UkM9uaUVSwXiue+dn+gydw9Y9B4MOLWqCTghbgGSOrQ2U49hag4hI4FpUK8XXd7Slz9m1+WbBH3QcMJHhs/PGmguBJgTn3CG8K5w7he3qhDfaSEQkxylUIJ6+nerw9R3tqF6mCP0+ncc/3/6Jtdty95oLQdcQAAYA15rZjWZ2spkNAioCrwYcl4hIuuqUL8bn/2rN4xc1YPaa7XQeOIk3J68k+WjurC0EnhCcc58AdwIPAXOBtsC5zrk1AYYlIpIhcXHGP1tVY1y/DrSqWZonv15CjyFTWbx+V9ChRSxqU1fEmqauEJGcxjnHqPl/0H/kInbuP8zN7Wtwx9k5a82FmExdISKS35kZF5xSkfH9OtCtaSWGTPiVcwZNZvrKrUGHliFKCCIiUXZCkYK8cOkpfHjD6Rw5epQrXp/O/cPns3P/4aBDS5cSgohINmlbuwxj7+zAze1r8MnPa+k4YCKjF/4RdFhpUkIQEclGSQXjeeDckxlxW1vKFi3Evz6czS0fzGTjrgNBh3YMJQQRkRhodFIJRvRuw71d6zFh2WY6vjiRj2f8xtEcNERVCUFEJEYS4uPodUZNxtzZnoaVSvDAFwu44o3p/Lo5Z6y5oIQgIhJj1coU4eObTue5ixuz9I9dnDNoMoO/D37NBSUEEZEAmBmXnVqZ8Xd1oNPJ5Xlh7HIuHDyFuWt3BBaTEoKISIDKFUvkv1c34/Wezdm+7xA9hkzl8a8WB7LmghKCiEgO0LlBBcb168BVp1fh7amr6DxwEhNivOaCEoKISA5RPDGBJ7s14rN/tSIxIY5r3/mZvp/MZdveQzF5fs1lJCKSAx04nMyQH1bwysRfKZaYwCPn18c5xwtjl7N+x34qlkzi7i516da0UkTnTW8uIyUEEZEcbNmG3dw7bD5z1+4gziD0soWkhHie7tEooqSgye1ERHKpuhWKMaxXa0okJRB+Ddv+w8k8P2ZZ1J5LCUFEJIeLjzN2pTEx3vod+6P2PEoIIiK5QMWSSRFtzwwlBBGRXODuLnVJCltoJykhnru71I3acxSI2plERCTbpHQcPz9mWZZGGaVHCUFEJJfo1rRSVBNAODUZiYgIoIQgIiI+JQQREQGUEERExKeEICIiQC6ey8jMNgNrMvnwMsCWKIaT16m8IqPyipzKLDJZKa+qzrmyqe3ItQkhK8xsZlqTO8mxVF6RUXlFTmUWmewqLzUZiYgIoIQgIiK+/JoQXg86gFxG5RUZlVfkVGaRyZbyypd9CCIicqz8WkMQEZEwSggiIgIoIYiIiC9PJAQzu83M5pvZLv/2o5mdF7LfzKy/ma03s/1mNsHMGoSdo5CZvWxmW8xsr5mNNLOTYv9qYs/MHjAzZ2aDQ7apzHx+Obiw24aQ/SqrMGZ2opm9Z2abzeyAmS02sw4h+1VmPjNbncrflzOzr/39sSsr51yuvwEXAecAtYA6wL+Bw0Bjf/+9wG7gYqAh8CmwHigWco5X/G2dgGbABGAuEB/068vmsmsJrALmAYNDtqvM/nqd/YGlQIWQW1mVVZrlVRJYCbwPnAZUB84GTlaZpVpeZcP+tpoCR4FrYl1WgRdGNhbyNuAWwIA/gAdD9iX5BXyLf78EcAi4OuSYyv6b0iXo15KNZVQC+BU4y/8DGuxvV5n9vZz6AwvT2KeyOrZMngKmprNfZZZ++T0I7AAKx7qs8kSTUSgzizezK4CiwDS8bycVgLEpxzjn9gOTgNb+puZAQtgxa4ElIcfkRa8Dnzvnvg/brjI7Vg0z+93MVpnZUDOr4W9XWR2rGzDDzD4xs01mNtfMepuZ+ftVZmnwy+gG4EPn3D5iXFZ5JiGYWSMz2wMcBF4FujvnFuAVJsDGsIdsDNlXAUjm2MmiQo/JU8zsJrwmtodT2a0y+7sZwLV4zZI34b2+aWZWGpVVamoAt+I1G3UBBgHPALf5+1VmaeuElwTe9O/HtKzy0prKy4AmeO2XFwPvmdkZIfvDr8CzVLaFy8gxuY6Z1cWr1rdzzh1K51CVGeCc+zb0vplNx/uwuwaYnnJY2MPyZVn54oCZzrn7/ftzzKw2XkIYHHKcyuxYNwE/O+fmhm2PSVnlmRqCc+6Qc26Fcy7lD3Eu0BdIGQ0SninL8VfW3QDE400pm9YxeUkrvNe60MyOmNkRoANwq//7Vv84lVkqnHN7gEVAbfT3lZo/gMVh25YAVfzfVWapMLNyeANk3gjZHNOyyjMJIRVxQCG8ETQb8KpiAJhZItAOr48BYBbeqKTQY04CTg45Ji/5EmiEV6NKuc0Ehvq/L0dllia/LOrhffDp7+tYU4G6Ydvq8Nf6JSqz1F2L1+Q9NGRbbMsq6B71KPXKP+MXUDW8D7qn8XrYz/H33wvsAnrgDdsaSurDtn4HOuIN+/qBPDjELZ0ynMCxw05VZt7rfAGvBlUdOB0Y5ZdNVZVVquV1qv8B9SBeP9WlwE7gNv19pVlmhvdF7I1U9sWsrAIviCgV5rt43z4OApuA8YQMt/ILuz/eN7oDwESgYdg5EoGX8ZpL9gFfAZWDfm0xLMPwhKAy++t1pvwDHvL/6YYB9VVW6ZbZeXjXthzwP+juwJ9MU2WWanmdidfef1oq+2JWVprtVEREgLzdhyAiIhFQQhAREUAJQUREfEoIIiICKCGIiIhPCUFERAAlBBEA0ligJPy2Op3Hvxty3ISQ7dX8bTeGHV/GzOaY2TYzO9XfNj7kHB9m12sVSUtemtxOJCtahd3/Au/Cqv4h2w4e5xwbgO54V5WmyczKA98B5YGz3F8Tmd2ON7f9FxmKWCTKlBBEAOfc9ND7ZnYQ2BK+/TgOHu94M6uElwxKAB2cc39OAuecWxLy3CIxp4QgEiNmVgX4Hm/SxQ7OueUBhyTyN0oIIrFRA2+VKwe0d86tCjgekWMoIYjExv14q1o1UDKQnEqjjERiYzTerJWDzKxQ0MGIpEYJQSQ2hgHXA52Bz8wsIeB4RI6hhCASI86594B/ARcAH5tZfMAhifyNEoJIDDnnXgf6AJcA75uZ/gclx1CnskiMOede8vsRngMOmtkNTitVSQ6ghCASAOfc835SeAJvWcRbAw5JRAlBJDXOuWqZeZyZFfAe7pL986zGG12U2nM8CTwZ8tg41IwrAdIfn0j0VAUO401NkRlj/cdXjVpEIhEwNV2KZJ2ZVQPK+Hd3O+eWZeIcdYDi/t2tuoBNYk0JQUREADUZiYiITwlBREQAJQQREfEpIYiICKCEICIivv8HUnFm4x6R3GEAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZ0AAAEhCAYAAACk132sAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAA72UlEQVR4nO3dd3gUVffA8e9JgdB77x0p0pEOYgFBEERfUF8Ru6ggKOrPCnZ9saDYu6goFkREmvTepPeO9F6lJ+f3x0x0WdI22exkk/N5nnmyO23P3mz2ZO69c6+oKsYYY0woRHgdgDHGmKzDko4xxpiQsaRjjDEmZCzpGGOMCRlLOsYYY0LGko4xxpiQsaRjLiAivUREfZazIrJJRF4WkZgMENsdXsYQ7kRkq9/v13fpF+JY+onI9QmsHyQiIb+XI6HXdctlkM/zLiLycKhjy0yivA7AZFg3AjuAPEBX4An3cR8PY+qF85n93MMYMoMJwKAE1m8NbRj0A2YBI/3WfwqMD3EsiWmK83cQrwtwJfCmJ9FkApZ0TGKWqupG9/EfIlIFuFNEHlLVOC8DCyYRya6qZ7yOI8QOqOo8r4NIjKru4MIves9k5HIKV1a9ZlJqMZADKBy/QkRyishrIrLFrYbbIiJPiUiEzz4xIvKWiKwUkRMiskdEfhOR6v4vICIVRORrd58zIrJZRN52t00DWgPNfaqDpvkc21hEJrmv8beITBaRxn7n/1JEdohIUxGZIyKngP8l9aZFpKuIzHbPe0xEFohIZ5/tD4rIXBE5JCJHRGSeiHT0O0eUiLzgVlOeFpEDIjJLRFr47Xe3iCzz2eczESmYVHyhJI7+IrLO/X3vFpF3RSSv334qIi+5n4UdInJKRGaISF2ffbYC5YBbfH6fX7rbEqvmelFEHhGRbe7v+HcRKeouP4jIURHZLiKP+x1bREQ+EpH1InLS3We4iJRKwXv+p3rNje82oJRPzFtFpLhbHg8lcPwg9zULpKiQswC70jEpVR44ChwE54sUp5qmBvACsAJoAjwDFAQecY/LjlMt9yKw2912PzBPRKqr6h73fBWABcBJYCCwASgDXO2e537gGyASuNddd8w99lJgOrAapwpOgf8DpotIE1Vd5vM+8gHfA68DTwKnEnvDItIHeAcYhfNlcwKo75aFb7l8ilM1FQV0AsaISAdVHefu8zjQH3gKWArkBRq6ZRH/Wq+6ZfYO8ChQyi2zWiLSTFVjE4szFcT9/V1AVc8nc9xLONWs7wG/8e/vvo6ItPa7Au4J/AU8iPMZeB6YLCJVVPUQTpXtWGAZ/1b17U/m9W8FVuJ8FooBQ4BhOJ+vccDHONXCr4rIClUd6x5XEDjtxr4fKIlT1rPdz+DpZF433gtAEaAREP+PxxlV3SMio3A+l2/H7ywikcCdwA+qejiFr5H5qaottvyz8O+XdjWcL9ECwB3AeeBBn/1udfdr5Xf8U8BZoGgi548EcgLHgf4+64fhfKmXTCK2acCsBNb/BBwB8vusywscAkb6rPvSjfm6FJRDXjfGkcnt63NMhFtmE4FffdaPSeo8OIkrFnjWb31zN94uQfz9bnXPmdDSMInj4r+4v/Rb/1/32M4+6xQ4AOTye4/ngBf8Yvkmgdca5Hw1XbBOgfVAlM+6N931T/usiwL2AV8k8V4icf6hUaBrCl53kN9naEcC52zj7tvSZ11nd12TYP3+MsNi1WsmMWtxviQOAZ8BH6nquz7b2wPbgDlu9VGU+9/zRCAa56oHABH5j4jMF5EjOMnrbyA3TmKLdzUwRlV3pSLWVu6xR+JXqOoxYDROlZyv8zhJIDnN3Bg/TmonEWkgImNEZK977nPAVVz43hYCHdwqpxYiks3vNFfhJKxv/cpyPs7VXKskXj/C9xjxqdpMwjic/9b9l9VJHNME54rlG7/13+O8b/9yHquqf8c/UdWtwDychvnU+kMvvBpb6/6c4PM654GNOEnlHyLS2626POHG+5e7yff3lGqqOg2n/O71WX0vsFytXegClnRMYrrifBF1ACYB94tIT5/tRXHq5M/5LQvc7YUARKQTMAJYA9wMXOaedz/g2wW7EKlvPC6IU3Xnbw/OlZqvfZqyqqpC7s9EYxKRMsBk9/X74CSqRjg9r3zf28s4VYadgZnAQRH5QkTi28eKuj83cnF55vWJJSGf++2fkp59h1R1UQLLySSOia8KvKCc3S/5gz7b4+1N4Bx7caoNU8u/iupsEuv/KX+3mvR9nM/x9UBj/v2nKJi3AXwA3CAihUSkHM4/Zh8G8fyZgrXpmMSsVLf3mohMAZYDg0XkZ/c/2IPAFuA/iRy/1f3ZA9ioqr3iN4hINBd/SR0g9V9Ih4DiCawv7m7zldL7Pw64P0vhtCMkpD1OG9F/1OlxBTgdLC54QdVzwGvAayJSHLgWp2ooJ9Adt50M52ovobr/gwmsizcI8L0CPZDIfmkVX47FgVXxK90rskJcHGOxBM5RDNiZLtElrQcwWVXj2xnj2xCDbRjwCk4VdQGc9sJv0+F1wpolHZMsVT0jIo8Cv+I04g7G+W++G3BCVdcmcXhOnOoMX7fi1Kv7mghcLyIlVDWhqxaAMziNxv6mAx1FJI+qHgcQkTw4jfrTkogtKXNw2pjuwaf6xk98cjkXv0JEquK0xSR4haROx4lPRaQDUMtd/QcQB5RV1T8CCdKtttoayDGpNA+n/HvgXN3F647zPTLdb/8OIpIrvopNRMrjXF286rPPGZwekektJ26nEx+3p/JcicasqsdE5FucarXcwHC3mtf4sKRjUkRVR4vIQmCAiLyL8x/c7Tg9kt7A6YWUDaiEU43Uxa2uGQ90EZG3cNpSGgB9cRr+fQ0EOuK0Eb2MU9VUCmivqv9191mNU83XHdgEHFfVdTi9iq51Y3kN52rmcZwvm+dT+X6Pi8gTwFAR+dl9v8eBusBpVR2KU11zHhjmlkEJ4Dmc9gLfbuO/uuWzGOdKph7OVdJH7mttcuN+V0Sq4XyBn8Zpl7gK+FRVp6bmfSSisIg0SWD9HjeJXURVD4nIm8ATIvI3Ts+zS3B62M0Cfvc75BQwUUQG47QFPYfzxf+Wzz6rgZYici1OVeiBxF4/jcYDj4vIkzjVv22BG1J5rtVAQRHpDSzC+Sys8Nn+Pv+261jVWkK87slgS8Za+Lf3WuUEtl3tbuvvPo/Bqd5Zi/Mf4CGcRvNBuL2McL58XwR24XSHno7zpbuVi3tCVQK+w6kiOgNsBt7y2V4c58vuuBvHNJ9tl+EkgRM4HRUmA439zv8lCfQ8SqY8bsBp0D+F86U5H7jWZ/t/3Pd/GqfaqYf7Olt99nkE50rhoHuedW4ZRfu91q3ufn+772MNTtVZ6SD+freSeO+1d5M5VnC6fq/DaTfZjdN9Oq/fforTvfpJnCu+0zhtWXX99qvurj/pHvOlu34QCfciezEln1X8ejniXJl8gNOOeBznn58KXNwzLbHX9d0nl/sZPexu25pAOa0DFnr9t5xRF3ELyRhjgsK9sfMlVX3a61hCza1eXQvcraqfeR1PRmTVa8YYk0YiUhqojFONuBsY7m1EGZd1mTbGmLS7C5iC00PvZlVNdKSLrM6q14wxxoSMXekYY4wJGWvTSULhwoW1fPnyXodhjDFh5c8//zygqkUS2mZJJwnly5dn0aJFXodhjDFhRUS2JbbNqteMMcaEjCUdY4wxIWNJxxhjTMhY0jHGGBMylnSMMcaEjPVeC7JRS3YyeMI6dh05Rcn8OXi0XTW61EvLvFXGGJN5WNIJolFLdvLEyBWcOudMTLnzyCmeGOmMem6JxxhjrHotqAZPWPdPwol36lwsgyes8ygiY4zJWCzpBNGuIwmP8bczkfXGGJPVWNIJopL5E555NzpSWLr9SGiDMcaYDCjgpCMi9URkpIgcEJHzIlLfXf+yiLQPfojh49F21cgRHXnBuuhIIUd0JF3em83jPy3n4IkzHkVnjDHeCyjpiEgLYC7ONLPD/Y6PA+4LXmjhp0u9UrxyfW1K5c+BAKXy52DwDXWY/X9tuadVRX5evIPLX5/GsLlbOR8b53W4xhgTcgHNpyMis3Dmee8CROLMk95QVReLyPXAEFUtmx6BeqFhw4YazAE/N+47zsDRq5i98SCXlMjL89fVpFH5gkE7vzHGZAQi8qeqNkxoW6DVa/WBD9TJVP7Z6gCQ4FDWxlG5aB6+ufMy3r+lPkdPnuXGD+fSf8RS9h077XVoxhgTEoEmndNAzkS2lQCOpi2czE9E6FC7BJMeac2Dl1fm9+W7afvGdD6duZlzVuVmjMnkAk06s4B+IuLbWh5/xXMnzhzhJgVyZotiQLtqTOzfikblC/Di72u45u2ZzNl4wOvQjDEm3QSadJ7BqWJb5j5W4DYRmQo0AZ4LbniZX/nCufi8VyM+7dmQM+djufnT+TwwfHGi9/wYY0w4CyjpqOoyoBWwF3gKEOBBd3NrVbVb71NBRLiyRjH+6N+ah6+qyqTVe7nijem8N3UjZ87HJn8CY4wJEwH1XrvgQJEYoCBwRFVPBjWqDCLYvddSavuhk7z4+2omrNpLhcK5GNipBm2qFQ15HMYYkxrB7L32D1U9raq7MmvC8VKZgjn56NaGfHVHYwTo9cVC7h62iO2HrKiNMeEt0Pt0kusooKp6RdpCyji8utLxdfZ8HJ/N2sLQKRuIjVPua12J3m0qEeM38oExxmQUwbzSicBpx/FdCgPNgarucxNE2aIi6N2mEpMfac3VNYvz9uQNXPnmdCau2kNqq0aNMcYrqW7TueAkIpWAUUB/VZ2U5hNmEBnhSsff3E0HGTh6Jev3nqB11SIM6lyTCoVzeR2WMcb8I13adHyp6ibgVWBwMM5nEte0UiF+79uSZ66tweJth2n31gz+N34tJ8+e9zo0Y4xJVjCnNtiPU8Vm0ll0ZAR3tqjA5AGt6VSnJO9P28QVb0xnzPJdVuVmjMnQgpJ0RKQg8DCwKRjnMylTNE8Mb/ynDj/d15QCObPx4PAl3PLpfDbsPe51aMYYk6BAe69t4eKBPrMBxdzH3VR1dJBi81xGbNNJTGycMnzBX7w+YR1/nzlPr2bleejKKuSJifY6NGNMFpNUm05UgOeazsVJ5zSwDfjRbdsxHoiMEG5tUo6OtUsweMI6Ppu9hV+X7eLJDtXpUrcUItax0BjjvaD0XsuswulKx9+y7Ud4dvQqlm0/QqPyBRjUuSY1S+bzOixjTBaQ7r3XTMZTp0x+fundjNe61WbT/r/pNHQWz/66kqMnz3kdmjEmC7Okk4lFRAjdG5Vl6iNtuLVJOb6Zt43L35jGiIV/ERdnV7jGmNBLNumISJyIxKZwsZtFMqB8OaN57rpajOnTkkpFcvH4zyvo+v5slm0/4nVoxpgsJiUdCZ7n4s4DJgzVKJmXH+5tyqilO3l57Fq6vD+bHo3K8Gi76hTMlc3r8IwxWYB1JEhCOHckSM7x0+d4Z/IGvpi9lVzZoxhwdVVuvqwckRHWy80YkzbWkcBcJE9MNE91rMG4h1pSs2Renvl1FZ2GzuLPbYe8Ds0Yk4ml6kpHROoA1YAY/22qOiwIcWUImflKx5eqMnbFHl78fTW7j57m+vql+L9rqlM0z0W/XmOMSVZSVzqBjkiQH/gdaBK/yv35z0lUNd0mehGRe4CbgHpAPqCCqm5N5phewBcJbMqhqqeTOjarJJ14J8+e572pG/lkxhayRUXQ78oq3NasPNGRdkFsjEm5YFavvQwUAlrhJJyuQFvgW2Az0DgNcaZETmAiMCjA404CJXyX5BJOVpQzWxSPtqvOhP6taFi+AC/+voaO78xkzqYDXodmjMkkAk067XASzzz3+Q5VnaaqPYFJwEPBDM6fqg5R1VeAWYEfqnt8l/SIL7OoUDgXX/RqxCc9G3LybCw3fzKfB4cvZvfRU16HZowJc4EmnRLAZlWNxRlzLY/PtpFAx2AFFmQ5RGSbiOwQkTEiUs/rgDI6EeGqGsWY9HBr+l1ZhT9W7+WKN6bzwbRNnDkf63V4xpgwFWjS2QPkdx9vA5r6bKscjIDSwTrgDuA6nPag08BsEamS0M4ico+ILBKRRfv37w9hmBlTTHQk/a6syqSHW9OicmFeG7+Wa4bMZPp6KxtjTOACTTqz+DfRfA0MFJGPROQ9nFlDJwQagIi8KCKazNIm0PPGU9W5qvqVqi5V1ZlAd5x5f/oksv/HqtpQVRsWKVIktS+b6ZQpmJOPezbky9sbocBtny/gnmGL2H7opNehGWPCSKBTGzwHlHQfD8bpVNAdp4F/NIl8kSdjCPBNMvv8lYrzJkhVY0VkEZDglY5JWptqRWlaqRCfzdrC0MkbufLN6dzfpjL3tq5ITHS6dVw0xmQSYTkigYg0BBaSgi7TCRwrwCJgmarekdS+Wa3LdKB2HTnFy2PXMGb5bsoUzMGz19bkykuK2tw9xmRxQesyLSKLRaSfiBRLfu/gE5HiIlIXqOquqiEidd3psuP3mSwir/g8Hygi7USkonvsZ8ClwIchDD1TKpk/B+/eXJ/hd11GTFQkdw9bxB1fLmTLgb+9Ds0Yk0EF2qazF/gfsF1ExopIDxEJ5W3r9wFLcO4LAudG1SVAZ599KuH0souXH/gYWINzj08poJWqLkjvYLOKZpULM/ahljzd8RIWbj1Mu7dmMHjCWk6etUHHjTEXCrh6TUSKAjcD/wXqA8eBn4BvVHVq0CP0kFWvBW7f8dO8OnYtI5fspGS+GJ6+tgbX1CpuVW7GZCFBGwYngRNfAtyKk4TK4NwsWi7VJ8xgLOmk3sKth3j211Ws2X2M5pUL8VznmlQumif5A40xYS/dko578higG/AqUDI9x14LNUs6aRMbpwyfv43BE9Zx8mwstzcvT98rqpAnJtrr0Iwx6ShdpjYQkbYi8gVOO88wYAep6zJtMqnICOHWpuWZOqANNzQozaeztnDFG9MZtWQn4dhr0hiTdoGOMl0Lpy3nZpwG+W0499h8raob0iVCD9mVTnAt3X6Egb+uZNmOozQuX5BBnWtSo2Rer8MyxgRZMKc2iAOOAj/iJJqZwQkxY7KkE3xxccoPi7bz2vi1HD11jlublOPhq6uRL4dVuRmTWSSVdAIdkaA7MFpVz6Q9LJMVRUQIPRqXpX2t4rz5x3q+nreNMct383j76tzQoDQRNl22MZlaQG06qvqjJRwTDPlzZuP562rxW58WVCici8d+Xs71H8xh+Y4jXodmjElHqblP5zac0ZrLcvF01aqqlYIUm+esei00VJVfluzk5bFrOfj3GXo0Ksuj7apRMFc2r0MzxqRC0KrXROQZnEE/VwJLAbvqMWkmIlxfvzRX1SjG25M28MWcrYxbuZsBV1fjpsZlibQqN2MyjUA7EmwFflHV/ukWUQZiVzreWL/3OAN/XcXczQepWTIvz19XiwblCngdljEmhYJ5n04h4Le0h2RM4qoWy8Pwuy/j3ZvrcfDEWbp9MIdHfljG/uN2YW1MuAs06UwH6qRHIMb4EhGuvbQkkx9pTe82lRi9bCdtX5/G57O2cD42zuvwjDGpFGjS6QfcLiI9RaSwiET4L+kQo8nCcmWP4vH21ZnQrxX1yhXg+TGr6fjOLOZuOuh1aMaYVEjNzaEAiR2kqhrovT8ZlrXpZCyqyh+r9/L8mNXsOHyKTnVK8lSHSyieL5SzaxhjkhPMm0OfJ/GEY0y6EhGurlmcVlWL8MG0TXwwfROT1+yl7xVVuKN5BbJF2YW2MRldWE5XHSp2pZOx/XXwJM+PWc2kNXupWCQXgzrVpFXVIl6HZUyWly6jTBvjtbKFcvLpbQ35olcj4uKUnp8v4L6v/2TH4ZNeh2aMSUTASUdE6onISBE5ICLnRaS+u/5lEWkf/BCNSdrl1YsyoX8rHm1Xjenr93Plm9N5Z/IGTp+L9To0Y4yfgJKOiLQA5gLVgeF+x8cB9wUvNGNSLntUJA9cXpnJj7TmikuK8eYf67n6rRlMXrPX69CMMT4CvdJ5FZgA1AQe9tu2GKgfjKCMSa2S+XPw3s31+fauy8gWFcGdXy3iji8XsvXA316HZowh8KRTH/hAnd4H/j0QDgDWimsyhOaVCzPuoZY81eES5m8+yNVvzeD1Ces4ddaq3IzxUqBJ5zSQM5FtJXAmeDMmQ4iOjODuVhWZOqANHS8twbtTN3Llm9MZt2K3TZdtjEcCTTqzgH4iEumzLv6v905gSlCiMiaIiuaN4a3udfnh3qbkiYmi97eLufWzBWzcd9zr0IzJcgJNOs/gVLEtcx8rcJuITAWa4Ex7YEyG1LhCQcb0acFznWuyfMcR2g+ZyStj13DizHmvQzMmywh05tBlQCtgL/AUIMCD7ubWqrouuOEZE1xRkRHc1qw8Uwa0oVv90nw0YzNtX5/Gr0t3WpWbMSGQ6hEJRCQGKAgcUdVMeTeejUiQ+S356zADR69i+Y6jNK5QkOevq0n14nm9DsuYsJYuIxKo6mlV3ZVZE47JGuqVLcAv9zfnletrs2HvcTq+M4tBo1dx9NQ5r0MzJlMKdLrqpDoKxOH0XvsT+ExV7a48ExYiI4SbGpflmlrFeWPieobN3cpvy3bx+DXVuaF+aSJsumxjgibQKx0BqgFtgHJAjPuzDXAJUAGng8FKEakRtCiNCYH8ObPxQpdajH6wBeUK5eSxn5bT7cM5rNhhdwIYEyyBJp03ce7VaaCqlVS1mapWAhq5658DqgD7gZeCGqkxIVKrVD5+uq8Zb9xYh+2HTtH5vVk8+csKDv991uvQjAl7gSadF4FBqrrEd6Wq/omTcF5U1R3AYJxebsaEpYgIoVuD0kwZ0Jrbm1VgxMLtXP7GNL6dv43YOOvlZkxqBZp0quIMd5OQ/UBl9/EmIFdqgzImo8gbE82znWowtm9LqhXLw1O/rOS692bx57bDXodmTFgKNOlsBe5KZNs97naAwoBNYm8yjWrF8/D9PU0YelM9Dhw/S7cP5vDoj8s4cOKM16EZE1ZSM131NyKyHPgZ2AcUBboBtYCb3f2uBOYHK0hjMgIRoVOdkrStXpShUzby2azNjF+1h4evqsqtTcoRFWlzIhqTnIBvDhWRq3DabxoA0cA5YBEwUFUnufvEALGqGtY3O9jNoSYpm/afYNDoVczccIDqxfPwXOeaXFaxkNdhGeO5oN4cqqp/qGozIAdQHMihqs3jE467z+lgJxwRKSgiQ0VkrYicEpHtIvKBiCT7Vy4i3URktYiccX92DWZsJmuqVCQ3w+5ozIf/bcDx0+fp/vE8Hvp+CXuPnfY6NGMyrLSMSBCnqvtUNS6YASWhJFAKeAyoDfwXp4fcd0kdJCJNgRHAt0Bd9+ePInJZegZrsgYRoX2t4kx6uDV9r6jCuJV7aPv6ND6avomz50P1p2FM+Ej12GsZgYh0AMYA+VX1WCL7jAAKqupVPusmAftV9aakzm/VayZQfx08yfNjVjFpzT4qFcnFoM41aVnF5jY0WUuaqtdEJFZEGruP49zniS2hHiM+L3AGSGr8t6bARL91E4Bm6RWUybrKFsrJp7c14vNeDTkfp9z62QJ6f/MnO4+c8jo0YzKElPReex7Y4fM4Q1waiUh+4AXgE1VNKtkVx5mKwdded31C570Hp/s3ZcuWTXugJktqW70YzSoV5rNZWxg6ZQNT1+3jwcsrc1fLisRERyZ/AmMyKc+r10TkRZy5eZJyuapO8zkmFzAeiAXaq2qiLbcicha4U1W/9ll3G/CRqsYk9aJWvWaCYeeRU7z0+2rGrthDuUI5GdipBm2rF/M6LGPSTVLVa4Hep5MehgDfJLPPX/EPRCQ3MNZ9em1SCce1h4uvaopy8dWPMemiVP4cvH9LA2ZtOMDA0Su548tFXFG9KM92qkG5QjZwh8laAu69JiL1RGSkiBwQkfMiUt9d/7KItA/0fKp6QFXXJrOcdF8jD84VTiTQQVVPpOAl5gJX+a27CpgTaKzGpEWLKoUZ91ArnuxQnXmbD3LVWzN4c+I6Tp2N9To0Y0ImoKQjIi1wvsSrA8P9jo8D7gteaBe9dh6cDgEFgF5ALhEp7i7ZfPabLCKv+Bz6NtBWRJ4Qkeoi8gRwOc4VljEhlS0qgntaVWLKgDZ0qFWcd6Zs5Mo3pzN+5W6bLttkCYFe6byK0/OrJvCw37bFQP1gBJWIBkAToAawHtjts/j2RKsElIh/oqpzgB7AbcByoCfQXVVtmB7jmWJ5YxjSox4j7mlCnpgo7vtmMT0/X8Cm/Sm5eDcmfAXUkUBETgLXq+p4EYnEGQKnoaouFpFWwARVzZFOsYacdSQwoXA+No5v5m3jjT/Wc/pcLHe0qECftlXInT0jNLkaE7hgDoNzGsiZyLYSONNVG2MCEBUZQa/mFZg6oA1d65Xio+mbueKNaYxetsuq3EymE2jSmQX0c69y4sX/VdwJTAlKVMZkQYVzZ+d/N9Rh5P3NKJonhr7fLaHHx/NYt+e416EZEzSBJp1ncNptlrmPFbhNRKbitLc8F9zwjMl66pctwKgHmvNy19qs23ucDu/M5LnfVnHsdFgP2m4MkLqpDerz73TUkTi91mYCD/tPYx3urE3HeO3w32d54491fDv/Lwrlysb/XXMJEQJvTFzPriOnKJk/B4+2q0aXeqW8DtWYfyTVppPqEQncOXMKAkfi76PJbCzpmIxi5c6jPPPrSpb8dYQIgTifP9sc0ZG8cn1tSzwmwwjqfDrx3DlzdmXWhGNMRlKrVD5+vq8Z+XNEX5BwAE6di2XwhHXeBGZMgGx+XWPCRESEcPRUwu06u2wUaxMmLOkYE0ZK5k/4Nri8OaKte7UJC5Z0jAkjj7arRg6/qREiBI6eOkevLxay77hNlW0yNks6xoSRLvVK8cr1tSmVPweCM4L1GzfW4YXrajJv80HaD5nJxFV7vA7TmER5Pp9ORma910w42bjvOP1GLGXlzmP0aFSGZ66tQS4bSsd4IK3TVSc3RbWX01UbY1yVi+ZhZO/m9G5TiRGLttPxnZks+euw12EZc4GUTldtl0PGhIFsURE83r46baoW4eEflnHDh3Pp27YKD1xeiahIq0033rPqtSRY9ZoJZ8dOn2Pgr6v4ZclO6pfNz1vd69pMpSYk0uXmUGNMxpY3Jpq3utflnZvqsXHfCTq8PZMfFm23rtXGU6lqZRSROkA1IMZ/m6oOS2tQxpjg6VynJA3LFeDhH5by2E/LmbJmH69cX5sCubIlf7AxQRboJG75gd9xRpQGEPfnPydR1UgyCateM5lJXJzy6azNDJ6wjgI5s/H6jXVoVbWI12GZTCiY1WsvA4VwRpgWoCvQFvgW2Aw0TkOcxph0FBEh3NOqEqMeaE6+HNH0/HwBz/22itPnYr0OzWQhgSaddjiJZ577fIeqTlPVnsAk4KFgBmeMCb6aJfPxW58W9GpWni9mb6Xzu7NYveuY12GZLCLQpFMC2KyqsThTV+fx2TYS6BiswIwx6ScmOpJBnWvy1R2NOXzyHF3em80nMzYT5z+EtTFBFmjS2QPkdx9vA5r6bKscjICMMaHTumoRJvRrRZtqRXhp7Bpu+XS+jVht0lWgSWcW/yaar4GBIvKRiLyHM5vohGAGZ4xJfwVzZeOjWxvwWrfaLNtxhPZDZjBm+S6vwzKZVKBdpp8DSrqPB+N0KugO5ARGA32CF5oxJlREhO6NynJZhUL0G7GUB4cvYcqafQy6riZ5Y6K9Ds9kIjYiQRKsy7TJis7HxvHu1I0MnbKR4nljeKt7XRpXKOh1WCaMJNVlOlVJR0TKAGVI+ObQKQGfMIOypGOysj+3HebhH5ay/dBJerepxENXVCVblA1iYpKXVNIJqHpNRCri3JMTfz+O782h4v7MNDeHGpOVNShXgN/7tuSF31bz3tRNzFh/gLe616Vy0dxeh2bCWKAjEkzBGf7mVWAtcNZ/H1WdHrToPGZXOsY4xq/cwxMjl3PqXCxPd6zBLZeVRUSSP9BkSUG70gEaAb1U9ee0h2WMCRftaxWnftn8DPhpOU+PWsmUtft4rdulFMmT3evQTJgJtIJ2Bwlc3RhjMr+ieWP4slcjBnWqwayNB2g/ZAaT1+z1OiwTZlIz9trjImKTchiTBUVECL2aV2BMnxYUzRvDnV8t4qlfVnDyrE0abFImoOo1Vf1aRKoDW0VkHuA/F66q6m1Bi84YkyFVLZaHUQ80482J6/l45mbmbjrIkB51ubR0fq9DMxlcoB0JegGfA7HAPi6ualNVrRi06DxmHQmMSd6cTQd45Idl7D9+hn5XVqF3m8pERlgng6wsaPfpiMg2YBFwp6oeCU54GZclHWNS5ujJczz960p+W7aLhuUK8Fb3upQpmNPrsIxHgjmfTiHg/ayQcIwxKZcvZzTv9KjLkO51WbfnONe8PZOf/9xhU2Obi6RmwM9L0iMQY0x4ExG61CvFuH4tqVEiL4/8uIwHv1vCkZPW4dX8K9Ck8xBwt4jcIiKFRCTCf0mPIAFEpKCIDBWRtSJySkS2i8gHIlIomeN6iYgmsFw0hI8xJu1KF8jJd/c04bH21Ziwcg/th8xk9sYDXodlMohAk8QaoDYwDKcjwTm/JT3/pSkJlAIec2P4L8602d+l4NiTOBPQ/bOo6ul0itOYLC8yQri/TWVGPdCcnNkjueXT+bz0+2rOnLepsbO6QEckeB5nfLWQU9WVwPU+qzaKyKPAGBHJq6pJzberqronfSM0xvirVSofv/dpyctj1/DJzC3M3HCAt3vUo1rxPMkfbDKlsJ7aQER6AF8AeVQ1wbvT3G7en+GMphAJLAWeUdUliex/D3APQNmyZRts27Yt+IEbkwVNXbuPR39axrHT53m8fXVub1aeCOtanSkFfWqDjEBE8gMLgXGq2jeJ/ZoCVYFlQB6cdqkOQB1V3ZDUa1iXaWOC68CJM/zfz8uZtGYfLasU5vUb61AsrzWvZjbB7DIddCLyYiIN/b5LG79jcgG/ATtx2ngSpapzVfUrVV2qqjNxZjrdhM1yakzIFc6dnU96NuTlrrVZtPUw7YbMYNyK3V6HZUIo2TYdEYkFmqrqAhGJI+k2HVXVQNuJhgDfJLPPXz7x5AbGuk+vDbRDgKrGisgioEogxxljgkNEuPmysjSpWJD+I5bS+9vF3NCgNAM71SCPTY2d6aUkQTyP0x4S/zio9XGqegBIUX9KEckDjMOZMK69qp4I9PXEmQTkUpzqNmOMRyoWyc1PvZsxdPIG3p26kflbDjKke10alLOpsTOzsGnTcRPORCAv0AU47rP5kKqedfebDCxQ1Sfc5wOBecAG99i+wK1Ac1VdkNRrWpuOMaGxaOsh+v+wlJ2HT/Hg5ZXpc0UVoiM9r/03qZSh23QC0ABoAtQA1gO7fZZmPvtVwrkXJ15+4GOce4wm4tzr0yq5hGOMCZ2G5Qsytm9Lrq9fmnembOSGD+ey5cDfXodl0kHAVzoichtwE1AW8O92oqpaKUixec6udIwJvbErdvPEyBWcPR/Hs51q0KNRGZsaO8wEbbpqEXkGeA5YiXO/y5k0R2eMMT461C5B/bIFGPDjMp4YuYLJa/bxWrfaFMptU2NnBoFObbAV+EVV+6dbRBmIXekY4524OOXz2Vv43/h15M0RzeAbL+XyakW9DsukQLCnNvgt7SEZY0zSIiKEu1pWZHSf5hTOnY3bv1jIs7+u5NRZG78tnAWadKYDddIjEGOMSUj14nkZ9UBz7mxRgWFzt9Hp3Vms3HnU67BMKgWadPoBt4tITxEpHMqpDYwxWVdMdCTPXFuDb+68jOOnz9H1/dl8MG0TsXHhccuH+VegSWI9UAtnkM29hHZqA2NMFteiSmEm9GvFVTWK8dr4tdz0yTx2HD7pdVgmAGEztYExxgDkz5mN926uz8jFOxk4ehXXDJnJC11q0aVeKa9DMykQNiMSeMF6rxmTsW0/dJL+I5ayaNthOtcpyQtdapEvh43f5rXMMiKBMcZcoEzBnHx/TxMGXF2VsSt2c82QGczddNDrsEwSLOkYY8JaVGQED7atws+9mxETHcnNn87jlbFrbGrsDCrZpCMisSLS2H0c5z5PbElw9k5jjElvdcrkZ0zfFtzUuCwfzdhM1/fmsGHv8eQPNCHl+dQGxhgTLDmzRfFy19q0rVaUx39ezrVDZ/HENdW5rVl5G78tg7COBEmwjgTGhK/9x8/w2E/LmLpuP62rFmHwDZdS1KbGDomkOhIkm3REpG0gL6aqUwLZPyOzpGNMeFNVvpn/Fy/9vpoc0ZG82u1S2tUs7nVYmV5ak47vFNWJXZ+qu01VNTK1gWY0lnSMyRw27jtBvxFLWLnzGD0aleGZa2uQK3ugtymalArG1AbHgZ/dxWZWMsaElcpFczOyd3OGTFrPB9M3MXezMzV2vbIFvA4ty0nJlU5roCfQDae32y/AV5mpGi0xdqVjTOazYMsh+o9Yyp5jp+nbtgoPXF6JKJsaO6jSdHOoqk5X1TuB4sB9QFFggoj8JSKviMglwQ3XGGPST+MKBRnXryWd65TkrUnrufGjuWw7aBU4oZLi9K6qp1V1uKpegzNV9dtAB2CliLybXgEaY0yw5Y2J5q3udXnnpnps2neCDm/P5IeF27HevOkvtdeUB4Gt7qKAVYwaY8JO5zolGd+vFZeWzs9jPy+n9zeLOfS3DZafngJKOiLSXEQ+BHYDXwEngI7ArekQmzHGpLuS+XPw7V2X8WSH6kxeu5f2Q2YwY/1+r8PKtFIyDE5lEXlORDYBM4BqwACguKreoqoTVDUuvQM1xpj0EhEh3NOqEqMeaE6+HNH0/HwBg0av4vQ5G78t2FJ6n84xYCTwNbAtqf1VdXPQovOY9V4zJus5fS6W18av5YvZW6laLDdDutejRsm8XocVVoJxc2i8ZFvZ7OZQY0xmMH39fgb8uIyjJ88xoF1V7mpRkYgIG78tJdJ6c+jtQY7HGGMyvNZVizChXyueGLmcl8euZera/bzxnzqUzJ/D69DCmg34mQS70jHGqCo/LtrBoN9WERUhvNS1Np3qlPQ6rAzNZg41xphUEhH+06gM4x5qSaWiuenz3RL6j1jKsdPnvA4tLFnSMcaYFChXKBc/3tuUfldWYfSyXVwzZCYLthzyOqywY0nHGGNSKCoygn5XVuXH+5oSFSl0/3gu/xu/lrPn7a6RlLKkY4wxAapftgBj+7bkPw3K8P60TXT7YA4b953wOqywYEnHGGNSIVf2KF674VI+/G8Ddhw+ybVDZ/L1vG02flsyLOkYY0watK9VnAn9WtG4QiGeGbWSO75cyP7jZ7wOK8NKc9IRkfoi0sVd6gcjKGOMCSdF88bw1e2NGNSpBnM2HaT9kBlMWr3X67AypFQnHRFpICJrgW+B3sD9wHcisk5EGgQrQGOMCQciQq/mFfitTwuK5o3hrmGLePKXFZw8e97r0DKUtFzpfAw8qKqXqGo7Vb1aVasBDwKfBCe8C4nIJyKySUROich+Efk1JZPIiUg3EVktImfcn13TIz5jjKlaLA+jHmjGva0r8t2Cv+j4ziyWbT/idVgZRlqSTm5VneS/UlX/AHKl4bxJWQT0Ai4B2gECTBKR6MQOEJGmwAicK7K67s8fReSydIrRGJPFZY+K5IlrLmH4XU04cy6Wbh/M4d0pG4iNs04GqR4GR0RmAp8Dw1Q11l0XiTNW2+2q2jxoUSYew6XAMqC6qq5LZJ8RQEFVvcpn3SRgv6relNT5bRgcY0xaHT15jqd/Xclvy3bRsFwB3upelzIFc3odVrpKr2FwbgNuAg6JyBoRWQMcArq729KViOTCSXB/4cxgmpimwES/dROAZukTmTHG/CtfzmiG3lSPt3vUZd2e41zz9kx++nNHlu1anZJRphPkzptztYgUBsq6q/9S1QNBiSwRInI/8D+cKrx1wBWqmlT/xOKAfzeSve76hM5/D3APQNmyZRPaxRhjAnZd3VI0KFeAh39YxoAflzF17T5e6lqL/DmzeR1aSKW5y7SqHlDVxe5yAEBEiqb0eBF5UUQ0maWNzyHfAvWA1sB6nPaZ5K5V/f+lkATWxb+fj1W1oao2LFKkSErfhjHGJKt0gZx8d3cTHm9fnYmr99BuyAxmbUjX/9MznPS6OTSQhpAhOB0DkloWxO+sqkdVdYOqzgBuAKoC3ZI4/x4uvqopysVXP8YYk+4iI4TebSrxy/3NyZ09iv9+Np8Xx6zOMlNjp7p6TUQ6J7E5JqXnca+OUpvqxV2yJ7HPXOAqYLDPuquAOal8TWOMSbNapfIxpk9LXhm3hk9nbWHWxgMM6VGX6sUz99TYqU46wC/AdJwvfX950nDeBIlIZZwrmknAfqA08H/AGWCMz36TgQWq+oS76m1ghog84cbcFbgcaBHsGI0xJhA5skXy/HW1uLxaUR79aTmdh87msfbVuKN5hUw7NXZaqtc2AHeq6uX+C6m/cknKGaANMA7YiHPvzXGgqaru8dmvElAi/omqzgF64PSoWw70BLqr6vx0iNEYYwJ2efWijO/XklZVi/Di72vo+fkC9hw97XVY6SIt9+k8AUxS1YUJbHtKVV9Ka3Bes/t0jDGhpKp8v3A7z/+2mmxREbxyfW061C6R/IEZTFL36aQ66WQFlnSMMV7YvP8E/UcsZdmOo9zQoDQDO9UgT0yiA69kOEG7OVRE+olIKxEJepuNMcYYR8UiufmpdzP6tq3MyMU76PDOTBZtzRxTYwfapvMqMBU47I4mPVxEBojI5SKSLx3iM8aYLCk6MoKHr67Gj/c1BeA/H83ljYnrOBcb3lNjB5p0auAMObMEWIwzEsFAnB5lB0XkTxG5V0RscjhjjAmCBuUKMrZvS7rVL83QKRu54YM5bN4fvlNjB5ocPgR+UdVGqnqTqrYAygFDgcM49768BExIauRnY4wxKZcnJprBN9bh/Vvqs/XgSTq+M4vh8/8Ky/HbAk06LYCxvitU9ZCq9gNG4gz42RCoCQwIRoDGGGMcHWqXYEK/VjQoV4Anf1nB3cP+5OCJ8JoaO9Cksw9n3LOEjAB6qupW4C3gljTEZYwxJgHF88Uw7I7GPHNtDWZs2E+7ITOZunaf12GlWKBJZxjwtIgklHhKAYXdx3/i3KRpjDEmyCIihDtbVGD0g80pnDsbt3+5kGdGreTU2Yw/flugSecFnKFv5onIMBHpISJtRORe4DX+HegzO3AuiHEaY4zxU714XkY90Jy7WlTg63nbuHboTFbuPOp1WEkKKOmo6jlV7QL0wZkcbTgwGfgAOAjc6+7aBNgUvDCNMcYkJCY6kqevrcG3d13G32di6fLebN6ftjHDTo2dphEJRKQizjhn+4EN6p5MRJoAOVV1SlCi9IiNSGCMCSdHTp7lqV9W8vuK3TQuX5A3u9ehdIHQT42dLsPgiEg2VT2bpsgyOEs6xphwo6qMXLyTgaNXIcALXWpxXd2SiIRu1OpgDoMTJSIvich+4JSIHBCR70WkWVAiNcYYkyYiQrcGpRn3UEuqFc9DvxFL6fv9Uo6ezBjN7IF2JHgKeBj4GXgcZ+romsBMEfm/IMdmjDEmlcoUzMn39zRhwNVVGbdiN9e8PYM5m7yfGjvQpNMTeFJV71PV11X1IVWtDdwNDBKR64IfojHGmNSIiozgwbZV+Ll3M2KiI7nl0/m8MnYNZ85717U60KRTAmfMtQuo6ufAEGwUAmOMyXDqlMnPmL4tuLlxWT6asZku781h/d7jnsQSaNLZANRNZNt4oE6aojHGGJMucmaL4qWutfm0Z0P2HTtNp6Gz+HL2lpCP3xZo0vkMeFZE6iawrTQQvkOfGmNMFnBljWKM79eK5pULM+i31dz2xUL2HQvd1NiBJp2hwDRgvjsiwU3uXDp348y1Mz7YARpjjAmuInmy89ltDXmxSy0WbDlIuyEzGL9yT0heO+D7dNy5cvoDD+Fc3cSbCNykqoeDF5637D4dY0xmt3HfCfqNWMLKncfo3rAM9cvm550pG9l15BQl8+fg0XbV6FKvVEDnTNPNoSJyJbBYVS+aK1VEquEM8rlNVXcEFFUYsKRjjMkKzp6P4+3J63l/qjN6mW9WyBEdySvX1w4o8aT15tCJwH4R2SwiP4nIEyJytYgUVtV1qjo7MyYcY4zJKrJFRfBou+oUyp0N/8uQU+diGTxhXdBeKyoF+9QE6gMN3OX/gDyAisgOnGkMFsf/VNW9QYvOGGNMyBw8kfDIZruOnAraaySbdFR1DbAGZ/QBAESkKk4Cik9GjwD5cK7KIoMWnTHGmJApmT8HOxNIMCXz5wjaawTaew0AVV2vqt+p6qOq2lZVCwBVgZuCFpkxxpiQerRdNXJEX3jdkCM6kkfbVQvaa6Skei1FVHUjsDFY5zPGGBNa8Z0FBk9Yl6bea0kJWtIxxhgT/rrUKxXUJOMvVdVrxhhjTGpY0jHGGBMylnSMMcaEjCUdY4wxIWNJxxhjTMgEPOBnViIi+4FtqTy8MOD93LDhxcosMFZegbHyCkxayqucqhZJaIMlnXQiIosSG/DOJMzKLDBWXoGx8gpMepWXVa8ZY4wJGUs6xhhjQsaSTvr52OsAwpCVWWCsvAJj5RWYdCkva9MxxhgTMnalY4wxJmQs6RhjjAkZSzrGGGNCxpJOConIAyKyXESOuctcEenos11EZJCI7BKRUyIyTURq+p0ju4gMFZEDIvK3iIwWkdKhfzehJyJPioiKyLs+66zMXG45qN+yx2e7lZUfESkhIl+JyH4ROS0iq0Wktc92KzMfIrI1gc+Yisjv7vaQlJclnZTbATyOM0V3Q2AKMEpELnW3P4YzbXcfoBGwD/hDRPL4nGMI0A1nhtWWQF5gjIhk6im+RaQJcDew3G+TldmF1gElfJbaPtusrHyISH5gNiBAR+ASnLLZ57ObldmFGnHh56s+oMAP7vbQlJeq2pLKBTgE3Ivzwd8NPOWzLQdwHLjXfZ4POAvc4rNPGSAOaOf1e0nHMsoHbALaAtOAd931VmYXltMgYGUi26ysLi6Tl4HZSWy3Mku+DJ8CjgA5Q1ledqWTCiISKSI9gNzAHKACUByYGL+Pqp4CZgDN3FUNgGi/fbYDa3z2yYw+Bn5S1Sl+663MLlZRRHaKyBYR+V5EKrrrrawu1gWYLyIjRGSfiCwVkQdFRNztVmZJcMvpTuAbVT1JCMvLkk4ARKS2iJwAzgAfAl1VdQXOLwtgr98he322FQdiuXgAPd99MhURuRuoDDyTwGYrswvNB3oB1+BURRYH5ohIIaysElIRuB/YDLQD3gZeBR5wt1uZJe0qnETzqfs8ZOUVFVCYZh1QF8iPU6/5lYi08dnuf6etJLDOX0r2CTsiUg2nCqSlqp5NYlcrM0BVx/k+F5F5OF+otwHz4nfzOyxLlpUrAlikqk+4z5eISBWcpPOuz35WZgm7G1ioqkv91qd7edmVTgBU9ayqblTV+A/7UqA/EN/LyD/bF+Xf/xz2AJE4w4Untk9m0hTnva4UkfMich5oDdzvPj7o7mdllgBVPQGsAqpgn6+E7AZW+61bA5R1H1uZJUJEigLXAZ/4rA5ZeVnSSZsIIDuwBecXclX8BhGJwendMcdd9Sdwzm+f0ji9buL3yUxG4fS+quuzLAK+dx+vx8osUW5ZVMf5crXP18VmA9X81lXl3/mvrMwS1wunieB7n3WhKy+ve1CEy4JTX9wSKI/zZfoKTq+Na9ztjwPHgOuBWu4vdBeQx+ccHwA7gSuBesBUnKulSK/fX4jKcBpu7zUrs4vK5nWcK8EKwGXAGLdsyllZJVhejdwvwKdw2g1vBI4CD9jnK8lyE5x/+D5JYFtIysvzQgiXBfgS57+oMzj91yfh003Q/WUOwvnP9DQwHajld44YYChO1dJJ4DegjNfvLYRl6J90rMz+fZ/xf+Bn3T/qn4EaVlZJlllHYJlbHuuBvriDGFuZJVpml+O0vzROYFtIystGmTbGGBMy1qZjjDEmZCzpGGOMCRlLOsYYY0LGko4xxpiQsaRjjDEmZCzpGGOMCRlLOsaEUCKTaPkvW5M4/kuf/ab5rC/vrrvLb//CIrJERA6JSCN33SSfc3yTXu/VmITYgJ/GhFZTv+e/4NzgOMhn3ZlkzrEH6Ipz93iiRKQYMBkoBrTVfwd37IMzN8ovKYrYmCCypGNMCKnqPN/nInIGOOC/PhlnkttfRErhJJx8QGtV/WdwTFVd4/PaxoSUJR1jMhkRKYsznXp2nISz3uOQjPmHJR1jMpeKOLM9KtBKVbd4HI8xF7CkY0zm8gTO7I41LeGYjMh6rxmTuYzHGS34bRHJ7nUwxvizpGNM5vIzcAdwNfCjiER7HI8xF7CkY0wmo6pfAfcBnYDhIhLpcUjG/MOSjjGZkKp+DDwE3AAMExH7WzcZgnUkMCaTUtV33Had/wFnROROtVkbjccs6RiTianqYDfxvIAzBfH9HodksjhLOsZ4SFXLp+Y4EYlyDtdY9zxbcXqtJfQaLwIv+hwbgVWtG4/YB8+Y8FMOOIczzE1qTHSPLxe0iIxJIbEqXmPCh4iUBwq7T4+r6rpUnKMqkNd9etBuIjWhZEnHGGNMyFj1mjHGmJCxpGOMMSZkLOkYY4wJGUs6xhhjQsaSjjHGmJD5f5c6JI4WtXBlAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZEAAAEhCAYAAAC+650iAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAA8j0lEQVR4nO3dd3gU1frA8e9L773XSO9NQLGAHRUL9oIKdn+Wq1iuhauAYlfEctWrXkWvBSwIiAqICKIIClITCDX0HkJNz/v740x02WzKbjbZlPfzPPskO3Nm5t2Tzb47Z86cI6qKMcYYE4oykQ7AGGNM8WVJxBhjTMgsiRhjjAmZJRFjjDEhsyRijDEmZJZEjDHGhMySSBiJyDARUZ9HioisF5FnRKRSEYjtpkjGUNyJSJz3d/00m/VzvPW/hPGY40Ukzud5lHeMYX7lHhORzSKSJiJLvWUqIqPCGEuciIzPpUyU3/+A/6NHuOLJjYjUEpFRItIrwLo5IjKnsGLJ7rgicppXL6f5LLtPRC4t7NhCVS7SAZRQVwBbgerAJcCj3u/3RDCmYbi/9/sRjKEkOAQMFpHqqnooc6GItAT6e+sL0g6gH7De59h9gaeBF4HJPjH0w70PI+FZYGqA5WsKMYZawEhcHfzpt+7OQowjJ3/i/k4xPsvuA34BJkUioGBZEikYS1V1nff7DyLSFrhZRO5V1YxIBhZOIlJRVZMjHUch+wE4C7gMGO+z/HogDtgClC2og3v1vcBvcUfv59uqusGnrH+5wrQhwsfPkarG5F6q4KnqQbL+PYsVa84qHH8ClYF6mQtEpIqIPC8iG71mr40iMkJEyviUqSQir4jIShE5LCI7ReQbEengfwAROU5E/ueVSRaRDSLyqrduDjAAONmnWWGOz7Z9RWSWd4wjIvKj9+3Wd//jRWSriPQTkfkikgi8kNOLFpFLRORXb78HReR3EbnIZ/3dIvKbiMSLSIKILBCRQX77KCciT3nNgkkisldEfhGRU/zK3Soiy3zK/FdE6uQUX4gSga9wScPX9cD/gCxDQIhIYxH5yIsrWUSWi8h1AcqdKSJ/eq9hvYjcHqDMMc1Z3t9xvLd6vW8TVqDmLBHpLiJTRWS/iCR6f59TAxznXq/5KklEFgUqUxBEpIaIvCEi2726ihWR4SIiPmUym4Au896X+7331yciUtcrEwVs9DZ51+d9P8xbn12z0mAR+Y/3ntzv/f+VFZE+3vvuiIhEi8hAv7j7iMiX3v9Iohf3MyJSOZfXe0xzlrimy5bAEJ+Yx4vI5d7v3QPsY46I/BZsXYeLnYkUjijgALAP3AcjMAPoBDwFrABOBB4H6gAPeNtVxDWDjcE1Y9TBnYYvEJEOqrrT299xwO/AUdzp+1qgOXCOt587gY9x35AzP5gOett2A+biTqeH4T4EHwHmisiJqrrM53XUBCYALwGP4T5QAxKRe4DXcM0rQ4HDQC+vLnzr5T3cN/hywIXANBE5X1W/98o8DAwHRgBLgRpAb68uMo/1nFdnrwEPAU29OusiIiepanp2cYboI+BHEWmmqltF5ESgHS6JDPAtKCJVcfVbG1dnW4DrgP+JSBVVfccr1xH4DlgEXI37248CqgE5xX+nt79HgUtx75OATVjirg3MA5YAt+LeL3cAs7x6WuyVuxkYh0tOE4E2wGe492JelfHe5740p7+FuC9Q3+LeJ0/g/i8GAWOB+rj68zUOmAVcA7QFngGaAKfj6uFSXJOQb9PaenI2ztvmKlzz5L9w782zcM2F27xlk0Skparu9bZrgXt/jsc1J3b2XkMr3N8zry7BvQ+W4f7+AHuATcB23P/vX01xItIe9567MYhjhJeq2iNMD/7+EG6Pe+PVBm4C0oC7fcpd75Xr77f9CCAFaJDN/ssCVXBv0uE+yz/CfUg3ySG2OcAvAZZ/CSQAtXyW1QDigUk+y8Z7MV+ch3qo4cU4KbeyPtuU8epsJjDFZ/m0nPaDS0TpwBN+y0/24h0cxr9vHC4Zi/f7I97yN4FfA9UzcLcXx2l++5oF7AbKes8/AfYCVX3KNPfeD3F+r1eBYT7LbvGWRfkdQ4FRPs9/BFYBFfzeU6uAyT5/hy3AdL99XeXtb3wudZQZX6DH4Vy2vcD/tXnL3wOSgXre89O8cv4xDvGWn+kXyy3Z/D/M8Xmeuc/3/cr96S0/xWdZN2/Z0Gxeh3jv5euADKBuHo57ms+yOODjAPsdhfsy6vseGQvsByqH630e7MOaswrGaiAV90H8X+A/qvqGz/pzcd8s5nvNNeW8b20zgfK4sxIARORKEVkoIgm4ZHQE9+20vc/+zgGmqer2EGLt722bkLlAXTvtVPy+VXvHn5aHfZ7kxfhOToVE5HgRmSYiu7x9pwJnc+xr+wM4X0SeFpFTRKSC327Oxn3wfeJXlwtxZ1v9czh+Gd9txKcpMSfq/ns/Bq734rkKl8gD6Q9sU9U5fss/xn277uQ97wd8p6pHfI6zBfg1LzHlxmtWGQB8AWT41JPgElpmPTXzHp/77eIr3N8or8YAffweuTWJ9cd96H7mt/xjoAKujnz5x/iFt71/uWB87/d8NXBEVX/xWwYuyQN/NcM9LyLrcQkvFXdmKrizpHB4B/cl8hrvmJVwZ/kfqWq2rQIFzZJIwbgE909zPu4f9E4RucFnfQNcu2eq3+N3b31mu+6FuOaEVcC1wAnefvcAvl2G6xJ6L5w6uFN/fztxZ1K+dmvemobqej+zjUlEmuO+GdfB9Vo7CffapnPsa3sG10R3Ea4pZp+IfCAimdeXGng/15G1Pmv4xBLI+37lg+m59hEuAYwEquL+ToHkVL+Z6wEaA7sClAu0LBR1cGcdj5O1nu4GantJtHGg46pqGl5zbB5tUtVFfo8leYgxXrN21vCvq0z+MabgvpU3DSJOf/v9nqfgztT9jwPHvk8/wDUNvob7YtMHuCtAuZB5XxKneMcB1wu0DvCfcOw/VHZNpGCsVK93lojMBpYDL4rIV943zX24i35XZrN9nPfzamCdqg7LXCEi5cn6z7SX0P9x4oFGAZY38tb5yuu8AZntxE2BldmUORd3jeVKVf0r2YhIlWMOqJoKPA88LyKNcE0eY3HfyK7i7w+2c8j6AQA5f/CNAnzPEPdmUy4LVV0jIgtx148m+Z7J+Ynn2DOrTJl1nhnfDqBhgHKBloUiAfct/d9kc9akqhkikpnwjjmud9aSU0IOh3igjohU8Pmghqx1lck/xgq4Lz7bCi7ErLwzgotxTYev+izvWgCHexN3Pe543PWReRrhnmZ2JlLAvG9VD+G+MWdeEJuOOxU+HODb2iL9+2JdFbI2IVxP1i6kM4ELRKQx2UvG9RDzNxcYJCJ/XTT1fr/QWxeK+bhrNLflUCYzWaT6HLcd7lpGQKq6U1Xfw53ddfEW/4D7cGyRTV1uzGF/cX5l4/L06v72AvANxyYif3OBZiLi/7quxV0TWeU9/w3XbFc1s4B3tpZtfQTD+/IyD+gO/BmorryiW3HXRPy/4FxGwX/pnIv7TLrCb/kQ3BmBf1dY/xiv8LbP7KmUeUaTYw+pMKiI+59M9Vs+LMT9Zfe/iqrOxr1nxuLeG2+HeIywsTORQqCqU0XkD+BBEXkDdxH1Rtw3ipdxPTEqAK1xzTaDVfUoLtkMFpFXcNcijgf+gd/pNa5JZRDuGsszuKadpsC5qprZlTQG16x2Fa6HyiFVjcX1DrvAi+V53NnGw7gP+SdDfL2HRORR4HUR+cp7vYeAHkCSqr6OSwRpwEdeHTQGRgOb8flyIyJTvPr5E3em0RN3FvMf71jrvbjf8HqqzAWScEn6bOA9Vf0plNeRh9c5idxvCBsP3IvrzTMC9yE9xIvtdp/mwTG4D8GZIvIi7v0wmvA1ZwHcD/wMzBCR/+LOfurhekOVVdVHvLOR0cB7IvIBrjdeG1zvr4NBHKuV12vN3xpV9T/DzfQ97ia7t0WkPhCNaxK+BXjW58tVps4+MbbD3XA5V1V/9Nbvwp29XC0iy3HXEzeqajDNcrlS1QMisgB4wDuT24vrUBNq60AMcKqIXIBrytvr9wXnbeBV7zhfhRx4uETqin5JfPB376w2Adad460b7j2vhGtOWY375hGPu4g8CijnlSmD+3DZjuuOORf3IRqHXy8ZXAL6DPfGSgY2AK/4rG+E6zp4yItjjs+6E3Af6odx/2g/An399j8e2BpkfVyOu8CdiPsAWghc4LP+Su/1J+E+MK72jhPnU+YB3DfQfd5+Yr06Ku93rOu9cke817EKd4bQLIx/3zgC9JrxKzMHv15wuAT5P5+/zXLgugDbnoXrfpv597s9QH1EEWLvLG9ZR9yH7m7vOFtxnSjO9yt3L67zRxKu2/Epgd53AV5DZnzZPS7PZfsa3t9tB+7sYw2ui7f4lDnN29elXv0keO/rT/F6cPmUHYz7UE71rTey7yV1Vl7e917ZMX6v+3svjt3eaxhE1p5X2R3Xt0wH3FnjUQL0iPPeTwq8GK73dn4e4gVljDHFgndj3k/A2ao6K7LRFD4RuRV3Jt5O/x4ZI2KsOcsYY4oBEemEa3EYjbuvJ+IJBCyJGGNMcfEmriv8fFy37CLBmrOMMcaEzLr4GmOMCVmpas6qV6+eRkVFRToMY4wpVhYvXrxXVesHWleqkkhUVBSLFi3KvaAxxpi/iMim7NZZc5YxxpiQWRIxxhgTMksixhhjQmZJxBhjTMgsiRhjjAlZqeqdZYwxpc3kJdt4cUYs2xMSaVKrMg8NbM/gnvmZt+tYlkSMMaaEmrxkG49OWkFiqptxYFtCIo9OWgEQtkRizVnGGFNCvTgj9q8EkikxNZ0XZ8SG7RiWRIwxpoTalpAYcPn2bJaHwpKIMcaUQMu2JFC2jARc16RW+GYMtmsixhhTgqRnKG/NWce4WWupXqkcR5PTSUnP+Gt95fJleWhg+7Adz5KIMcaUEFvijzJ84lIWbdrPhd2bMObiLvwUu9t6ZxljjMmeqvLVn9sYNTUaAcZd1eOvRDG4Z9OwJg1/lkSMMaYYSziawoivV/Ltih30Pa4OY6/sTrPaVQrt+JZEjDGmmPp13V4e+HwZew8n889z23N7/9bZXkwvKHlOIiJSAZgIvKKqPxdcSMYYY3KSlJrOSzNiee+XjbSqX5XJQ0+mS9OaEYklz0lEVVNE5Czg1QKMxxhjTA5idx7i3glLWL3zENef2JLHzu9I5QplIxZPsM1ZvwInAnPCH4oxxpjsZGQoH8yP4/npq6lRqRwfDOvD6R0aRDqsoG82fAC4WUTuFpFmIlJWRMr4PoINQEQai8iHIrJHRJJEJEZEBuSyTVcRmSsiiSKyTUSeEJHCbQg0xphCsutgEkM/+J2npsXQv209pt/Xv0gkEAj+TGSF9/NVAjdraTD7FJFauLObX4BBwB6gFbA7h21qAD8APwN9gPbAeOAI8HJej22MMcXB9yt28OjXK0hOzeCZS7pyTd/mFKXvzMEmkSdxiSJc/gnsUNUbfJZtzGWbIUAVYKiqJgIrRaQjcL+IjFXVcMZnjDERcTg5jdFTo/li8Va6NavJuKt60Kp+tUiHlUVQSURVR4X5+IOB6SIyETgd2A68B/w7h2TQD5jnJZBMM4CngChyT0LGGFOkLd4Uz/CJy9i6/yh3n96Ge89qS/myRXOow0jfJ9IKuBN4BXgO6AG87q17I5ttGgFb/Zbt8ll3TBIRkduA2wBatGiR74CNMaagpKZn8Prsdbwxey1NalVm4u396BNVJ9Jh5SiUC+E9RWSSiOwVkTQR6eUtf0ZEzg3h+H+q6qOqukRVPwBeA+7KZTv/sxTJZjmq+o6q9lbV3vXr1w8yPGOMKRwb9x7hird/47Uf1zK4Z1O+v/fUIp9AIMgkIiKnAL8BHYBP/bbPAO4I8vg7gBi/ZauAnE4ZduLOOHxldlPYhTHGFCOqyoTfNzPotXls3HuEN67tydgre1C9UvlIh5YnwTZnPYe7/jAYKAvc7bPuT+CGANvk5Fdc7ypf7YBNOWzzG/C8iFRS1SRv2dm46ylxQR7fGGMiJv5ICo98tZyZMbs4uU1dXrqiO41rhm+uj8IQbHNWL+At76K3f9PRXiDY9qJXgBNFZISItBGRK4B/AP/OLCAiz4rIjz7bfAocBcaLSBcRuRR4BLCeWcaYYmNO7G4GjvuZObF7+NegjvzvphOKXQKB4M9EknDdawNpDBwIZmeq+oeIDAaeAR4HNns/3/Tbb2ufbQ6IyNm4RLMI2I+7P2RsMMc2xphISEpN59nvVvHhb5to17AaH97Yl05NakQ6rJAFm0R+Ae4TkSk+yzK//d8MzA42AFX9Fvg2h/XDAixbAfQP9ljGGBNJ0dsPcO+EpazbfZgbT47i4XM7UKl85Ma9Codgk8jjuOsYy4AvcQlkqIiMBY7H3UFujDHGR3qG8t68Dbw0M5baVSrw0U196d+uZPQWDfZmw2Ui0h94ERiB61p7NzAPGKCqseEP0Rhjiq9tCYk88PlSFmyI59zOjXj20q7Urloh0mGFTdA3G6rqn8CZIlIJqAMkqOrRsEdmjDHF3NRl2xnx9QoyMpQXLu/GFcc3K1LjXoVDSHese4MgdgGaAltFJFpVD4Y1MmOMKaYOJKYycspKJi/dTq8WtXjlqh60rFs10mEViKCTiIg8gRsSvhp/3yl+SEReVNUx4QzOGGOKm4Ub9nH/58vYeTCJ4We1467TW1OuiI57FQ5BJRERGY27uP4eMAF3h3hD4BpgtIiUK4BBGo0xpshLScvglVlreHvuelrUqcIXd/SjV4vakQ6rwAV7JnIr8LKqPuSzLBqYLSIHcAMdjgpTbMYYUyys232Y+yYuYeW2g1zdpzmPX9CJqhUjPb5t4Qj2VdbEDXsSyHTg//IXjjHGFB+qyscLNvH0d6uoXL4s/7n+eAZ29h/ar2QLNoksxN0LMivAuj7eemOMKfH2HErmn18u46fYPfRvV5+XLu9GgxqVIh1Wocs1ifjNm/4P4GsRSQO+4O9rIlcCNwEXF0SQxhhTlMyK2cXDXy3ncHIaoy7sxNCTokpc1928ysuZSBrHDrYouNF8n/MrJ8DyPO7TGGOKnaMpaYz5dhWfLtxMx8Y1+OzqHrRrWD3SYUVUXj7wwz2vujHGFDvLtyZw34SlbNx3hNv7t+L+c9pRsVzxHvcqHHJNItZl1xhTmqVnKG/NWce4WWupX70in9xyAie1rhfpsIoMa3oyxphsbIk/yvCJS1m0aT8Xdm/CmIu7ULNK8ZhxsLCEOuxJc6A5kKUrgqoGPRy8McYUJarKpD+3MXJqNAKMu6oHF/doUmovnuck2DvWWwGfAH0zF3k/1ftdcdPmGmNMsZRwNIURk1fy7fId9I2qw8tXdqd5nezm4jPBnom8B7QA7gNWAynhDsgYYyLl13V7eeDzZew9nMw/z23P7f1bU7aMnX3kJNgk0gcYpqpfFUQwxhgTCclp6bw0I5Z3522kVf2qfH3DyXRtVjPSYRULwSaRrdjZhzGmBIndeYh7Jyxh9c5DXH9iSx47vyOVK1irfF4Fm0SeAR4WkdmqeqQgAjLGmMKQkaGMnx/Hc9NXU6NSOd4f1pszOjSMdFjFTrDT4/5PRDoAcSKyANiftYgODVt0xhhTAHYdTOLBL5Yxb+1ezuzQgOcv70a9ahUjHVaxFGzvrGHAo0A60IusTVt2Z7sxpkibvnIHj0xaQVJqOk9f0oVr+7awrrv5EGxz1mjga+BmVU0IfzjGGFMwDien8eQ30Xy+aCtdm9Zk3NU9aF2/WqTDKvaCTSJ1gTctgRhjipPFm/YzfOJStu4/yt2nt+Hes9pSvgRPWVuYgk0ivwAdgR8LIBZjjAmrtPQMXp+9jjd+WkejGpWYcFs/+h5XJ9JhlSjBJpF7gc9FZD9uJkP/C+uoakY4AjPGmPyI23uE+yYuZemWBC7t1ZRRF3WmRiUb9yrcgk0iq7yfH2WzXkPYpzHGhI2qMvGPLTw5LYZyZYQ3ru3JBd2aRDqsEivYD3ybW8QYU2TFH0nhka+WMzNmFye1rsvLV3ancc3KkQ6rRAv2PpFRBRSHMcbky9w1e3jwi2UcOJrKiPM7cvMpx1HGxr0qcNb0ZIwp1pJS03nu+9WMnx9Hu4bV+PDGvnRqUiPSYZUawd5smNtcIaqqZ+YjHmOMybPo7Qe4b8JS1u4+zI0nR/HwuR2oVN7GvSpMwZ6JlCHrNZG6QHtgD7AmHEEZY0xOMjKUd+dt4KWZsdSuUoGPbupL/3b1Ix1WqRTsNZHTAi0XkdbAZNwAjcYYU2C2JyRy/+dLWbAhnoGdG/Lspd2oU7VCpMMqtcJyTURV14vIc8CLQM9w7NMYY/xNXbadf329grQM5YXLunFF72Y27lWEhfPC+h6gXRj3Z4wxABxMSmXklGi+XrKNni1qMe6qHrSsWzXSYRncNY58E5E6wP3A+iC3GyUi6vfYmUP5qADlVUTOze9rMMYUTQs37OO8cfOYumw7953Vli9u72cJpAgJtnfWRrJeWK8AZM7kclkIMcQCp/k8T8/DNucCy3yex4dwXGNMEZaSlsG4WWt4a+56WtSpwhd39KNXi9qRDsv4CbY5ay5Zk0gSsAn4QlWDOhPxpKlqtmcf2dgXwjbGmGJi3e7DDJ+4lBXbDnBV7+Y8fmEnqlW029qKomB7Zw0rgBhaicg23ARXC4HHVHVDLttMEpFKwFrgFVX9sgDiMsYUMlXl44WbefrbGCqXL8vb1x3PuV0aRTosk4NIp/aFwDBgNdAA+BcwX0Q6q+q+AOUPAw8CvwJpwEXARBEZqqofBzqAiNwG3AbQokWLsL8AY0x47DmUzMNfLWf26t2c2rYeL13RnYY1KkU6LJMLUQ1uPEURGQpcA7QA/P/CqqqtQw5GpBqwAXhOVcfmcZs3gVNUtVtuZXv37q2LFi0KNTxjTAGZFbOLh79azqHkNB47rwM39Iuyca+KEBFZrKq9A60L9sL647gpclcCS4HkfEfnQ1UPi0g00DaIzRYCN4YzDmNM4TiaksaYb1fx6cLNdGxcg8+u7kG7htUjHZYJQrDNWTcDr6rq8IIIxrvO0QH4KYjNegA7CiIeY0zBWb41gfsmLGXjviPc1r8VD5zTjorlbNyr4iaUOda/CdfBReQlb3+bcddEHgeqAh96658F+mYO6ug1paUCS4AM4ELgLuDhcMVkjClY6RnK23PX88oPa6hfvSKf3HwCJ7WpF+mwTIhC6eLbHchtNN+8agZ8BtTD3fG+ADhRVTd56xsD/tdY/gW0xN1Psga4KbuL6saYomVL/FGGT1zKok37GdStMc8M7krNKjZlbXEWbBK5D9e9dh/wHQFu8gtmjnVVvTqX9cP8nn+Id5ZijCk+VJWvl2zjiSnRCPDKVd0Z3KOpjXtVAgSbRDKHev8gm/U2x7ox5hgJR1MYMXkl3y7fQZ+o2oy9sgfN61SJdFgmTGyOdWNMgZm/bi/3f76MvYeTeWhge+4Y0Jqy1nW3RLE51o0xYZecls5LM2J5d95GWtWryqQ7T6Jbs1qRDssUAGt6MsaEVezOQ9w7YQmrdx5iyAktGDGoI1Uq2EdNSWV/WWNMWGRkKOPnx/Hc9NVUr1iO/w7tzZkdG+a+oSnWLIkYY/Jt18EkHvxiGfPW7uWMDg14/rJu1K9eMdJhmUJgScQYky/TV+7g0UkrSExNZ8zgLgw5oYV13S1FLIkYY0JyODmNJ7+J5vNFW+natCbjru5B6/rVIh2WKWSWRIwxQftz836GT1zK5vij3HV6a+49sx0VyoVltm1TzASdRApyKHhjTNGWlp7B67PX8cZP62hUoxITb+tH3+PqRDosE0FFaih4Y0zRFbf3CPdNXMrSLQlc2rMpoy7uTI1KNu5VaVekhoI3xhQ9qsrni7Yw+psYypURXr+mJxd2bxLpsEwREdGh4I0xRVv8kRQenbScGdG76NeqLi9f2Z0mtSpHOixThER6KHhjTBE1d80eHvpiGfuPpvDY+R245ZRWNmWtySKiQ8EbY4qepNR0nvt+NePnx9G2QTXG39iXTk1qRDosU0TZUPDGmL/EbD/IfROXsGbXYYadFMUj53WgUnmbstZkz4aCN8aQkaG898sGXpqxhppVyvPhTX0Z0K5+pMMyxYANBW9MKbc9IZEHPl/Gbxv2MbBzQ569tBt1qlaIdFimmLCmJ2NKsW+WbWfE1ytIy1Cev6wrV/ZubuNemaCEcsd6Y+ABYABQB9gHzAHGqurOsEZnjCkQB5NSGTUlmklLttGjeS3GXdWDqHpVIx2WKYaCvWO9HTAPqA38CqwDGgH3AjeIyKmqujbsURpjwub3jfEMn7iUnQeTuPfMttxzRhvKlbVxr0xogj0TeR44CJygqnGZC0WkJTDTW39p2KIzxoRNSloGr/64hrfmrKdZ7Sp8fns/jm9ZO9JhmWIu2CRyOnCHbwIBUNVNIjIKeDNMcRljwmjd7sMMn7iUFdsOcGXvZjxxYWeqVbRLoib/gn0XVQAOZbPukLfeGFNEqCqfLNzMmG9jqFS+LG9f14tzuzSOdFimBAk2iSwF7hGR733vTBfXneNOb70xpgjYeziZh79czo+rd3Nq23q8dEV3Gtbwn73BmPwJ5WbDacAqEZkI7MBdWL8CaAsMCm94xphQ/LhqFw9/tZyDSWmMvLATQ/tF2bhXpkAEe7PhdBG5ABgDjAAEdwf7YuACVZ0Z/hCNMXmVmJLOmG9j+GThZjo0qs4nt5xI+0bVIx2WKcGCvrKmqtOB6SJSBdfVd7+qHg17ZMaYoKzYeoB7Jy5hw54j3Na/FQ+c046K5WzcK1OwQu6e4SUOSx7GRFh6hvL23PW88sMa6lWryKe3nMBJbepFOixTSuSaREQkHeinqr+LSAY5D8Coqmr9Bo0pJFvij3L/50v5I24/g7o15unBXahVxTpJmsKTlw/8J4GtPr/bKL7GRJiqMnnpNp6YHI0CY6/sziU9m9q4V6bQ5ZpEVHW0z++jCjQaY0yuDhxNZcTkFUxbvoM+UbUZe2UPmtepEumwTCllTU/GFCPz1+3lgS+WsedQMg8NbM8dA1pT1rrumggKatQ1EdkgIt2zWddFRDaEJyxjjK/ktHSe+W4VQ/67kMrlyzLpzpO46/Q2lkBMxAV7JhIFVMxmXSWgZb6iMcZksWbXIe6dsJRVOw4y5IQWjBjUkSoVrBHBFA2hjP+c3YX13kBCMDsSkVEion6PHOckEZGuIjJXRBJFZJuIPCF2NdGUQBkZyge/buSC139h98Ek3ruhN09f0tUSiClS8tLFdzgw3HuqwDcikuJXrDJugqoJIcQQC5zm8zw9h1hqAD8APwN9gPbAeOAI8HIIxzamSNp9MIkHv1zOz2v2cEaHBjx/WTfqV8+uEcCYyMnLV5oNwI/e70OBRcAevzLJQAzwXggxpAUxI+IQoAowVFUTgZUi0hG4X0TGqqp1PzbFzuQl23hxRizbExJpUqsyAzs35Osl20hMTeepwV247oQW1nXXFFl56eI7BZgCZL6Rn1LVcF5AbyUi24AUYCHwWA777wfM8xJIphnAU7jrNRv9NxCR24DbAFq0aBHGsI3Jv8lLtvHopBUkproT8G0Jibz/axzNalfmiztOok2DahGO0JicBXVNRFVvDHMCWQgMA84DbsWNCDxfROpmU74RsMtv2S6fdVmo6juq2ltVe9evXz//ERsTRi/OiP0rgfjKyFBLIKZYiOiwJ6r6vd+xFuCaz4YCY7PbzD/EbJYbU+RtS0gMuHzHgaRCjsSY0BSpYU9U9bCIROPmJglkJ1nPOBp4P/3PUIwpstIzlE9/34wIBLqS16RW5cIPypgQFKlhT0SkEtAB+CmbIr8Bz4tIJVXN/Kp2NrAdiCvI2IwJl8Wb4nl8cjQxOw7StkE1NscfJTntr4lCqVy+LA8NbB/BCI3Ju1DuEwkbEXlJRAaIyHEicgLwJVAV+NBb/6yI/Oizyae44efHe3fIXwo8AljPLFPk7T6UxP2fL+Wyt34j/kgKb1zbk5nD+/P8Zd1oWqsyAjStVZlnL+3K4J5NIx2uMXmSl2sis4PZoaqeEUTxZsBnQD1ct+EFwImquslb3xho7bPvAyJyNvBvXFfj/bj7Q7K7fmJMxKWmZ/Dh/DjGzVpLclo6d57WmrtOb0PViu7fb3DPppY0TLGVl2siZTj2Okh73HWJONx1iIa47rU7cDcO5pmqXp3L+mEBlq0A+gdzHGMiZf76vYyaGs2aXYcZ0K4+Iy/sRKv61uvKlBx5uSZyWubvIjIYeBV3tvC7z/ITgIneOmNKvR0HEnn621VMW76DZrUr8871x3N2p4Z206ApcYIdhOcp4HHfBAKgqgtFZBQwBu/GRGNKo+S0dP77y0Ze/3EdGarcd1Zb7hjQmkrlba5zUzIFm0TaknXIk0y7gTb5C8eY4mtO7G5GfxPDxr1HOKdTQx6/oJNNFmVKvGCTyEbgduD7AOtux7rZmlJoS/xRnpwWww8xuziuXlXG39iH09o3yH1DY0qAYJPIaOATEVmJ646beWH9ctz9HUPCG54xRVdSajpvz13PW3PWU0aEf57bnptPOY6K5azpypQeQSURVZ0gIntxyeRRoDyQCvwBDFTVH3Pa3piSQFX5IWYXT06LYev+RC7o1pgRgzrSuKbdZW5Kn6Bnt1HVWcAsESmDu79jr6pm5LKZMSXCxr1HGDU1mrlr9tC2QTU+vfUETmpdL9JhGRMxIU+R5iWO3WGMxZgi62hKGm/MXsd78zZSsVwZHr+gEzf0a0n5shEd9MGYiAs6iYjIUOAaoAVuXnVfqqqts25lTPGkqny7YgdPf7uKHQeSuKxXMx4+rz0Nqvu/9Y0pnYJKIiLyOO56yEpgKW5GQ2NKpDW7DjFySjS/bdhHp8Y1eP2anvSOqhPpsIwpUoI9E7kZeFVVh+da0phi6lBSKuNmrWX8/DiqVSzHU4O7cG3fFpQtY3ebG+Mv2CRSF/imIAIxJtJUla+XbOOZ71az70gyV/dpwUMD21OnaoVIh2ZMkRVsEpkLdAeCGtnXmKIuevsBRk6JZtGm/fRoXov3h/WmW7NakQ7LmCIv2CRyHzBJRPYB3wHx/gWsu68pThKOpvDyzDV8snATtapU4IXLunH58c0oY01XxuRJsElkjffzg2zWawj7NKbQZWQoExdt4YXpqzmQmMoN/aIYflY7alYpH+nQjClWgv3AL9A51o0pDEu3JDByykqWbT1A36g6jLqoM52a1Ih0WMYUS8EOezKqgOIwpsDtO5zMC9NjmbhoCw2qV+TVq3twUfcmNseHMflgTU+mxEtLz+CThZt5eWYsR1PSua1/K/5xZluqVbS3vzH5Fcod642BB4ABQB1gHzAHGKuqO8ManTH59PvGeJ6YspLVOw9xcpu6jL6oM20aVI90WMaUGMHesd4OmAfUBn4F1uHmW78XuEFETlXVtWGP0pgg7T6YxLPfr+brJdtoUrMSbw3pxbldGlnTlTFhFuyZyPPAQeAEVY3LXCgiLYGZ3vpLwxadMUFKTc9g/K9xjJu1htR05Z4z2vB/p7WmSgVrujKmIAT7n3U6cIdvAgFQ1U3eHOtvhikuY4L2y9q9jPommnW7D3NGhwY8cUEnoupVjXRYxpRowSaRCsChbNYd8tYbU6i2JSTy9LcxfLdiJy3qVOG/Q3tzZseGkQ7LmFIh2CSyFLhHRL73vTNdXEPznd56YwpFclo6783byBuz16EoD5zdjlv7t6JSeZue1pjCEsrNhtOAVSIyEdiBu7B+BdAWGBTe8IwJ7KfVuxn9TTRx+45yXpdGjBjUkWa1q0Q6LGNKnWBvNpwuIhcAY4ARgODuYF8MXKCqM8MfojF/27TvCE9Ni2HWqt20ql+V/93cl1Pb1o90WMaUWqHMsT4dmC4iVXBdffer6tGwR2aMj8SUdN6as463f95AuTLCo+d14MaTj6NCOZue1phIyjWJiEgZXDPVRlVdmbncSxxHvTJdgShVtblGTFipKjOid/HUtBi2JSRycY8mPHpeRxrVtOlpjSkK8nImch2u627XHMocAj4TkVtV9bOwRGZKvfV7DjNqajTz1u6lQ6PqTLjtRE5sVTfSYRljfOQ1iXygqhuzK6CqcSLyX2AoYEnE5Mvh5DRen72W93/ZSKXyZRl1YSeuO7El5cpa05UxRU1ekkgv4PU8lJsFDMlfOKY0U1WmLtvOM9+tYtfBZK44vhn/PLcD9atXjHRoxphs5CWJVAf256Hcfq+sMUFbvfMgI6dEs3BjPF2a1uCt646nV4vakQ7LGJOLvCSRvUBL4JdcyrXwyhqTZwcSUxk3aw0f/baJ6pXK8cwlXbmqT3PK2vS0xhQLeUkiv+CudXySS7lh5J5ojAHc9LRf/bmV56evZt+RFIac0IIHzm5P7ao2co4xxUlerlSOA84UkVdEJMt/uIiUF5FXgTOAV/ITjIg8JiIqIm/kUCbKK+P/ODc/xzaFZ8XWA1z29nwe+nI5zetU4Zu7T2HM4K6WQIwphnI9E1HV30TkAeBlYIiIzAQ2eatbAmcDdYEHVHVBqIGIyInArcDyPG5yLrDM53l8qMc2hWP/kRRemhnLp79vpm7VCrx0RXcu7dmUMtZ0ZUyxlac71lV1nIj8CTwCXAJU9lYl4mY1fE5V54UahIjUxDWX3Qw8kcfN9tlMisVDeoYy4Y/NvDgjlkNJadx40nHcd3ZbalQqH+nQjDH5lOdhT1T1Z+Bn7w72et7ifaqaHoY43gG+VNXZIpLXJDJJRCoBa4FXVPXLMMRhwmzxpv2MnLqSldsOcsJxdXjy4i60b2Sd+IwpKUIZOysD2B2uAETkVqANcH0eNzkMPIibnjcNuAiYKCJDVfXjAPu/DbgNoEWLFmGJ2eRuz6Fknp++mi8Xb6VhjYq8dk1PLuzW2KanNaaEieicoSLSHngGOFVVU/KyjaruxV2fybRIROoB/wSyJBFVfQd3pkPv3r0130GbHKWlZ/C/BZsY+8MaklLTuWNAa+45ow1VK9r0tMaURJH+z+6Haxpb6fMNtSzQX0TuAKqqanIe9rMQuLFgQjR5tWDDPkZOiSZ21yFObVuPURd1pnX9apEOyxhTgCKdRCYDi/yWfYC7zvEMkKezE6AHboIsEwE7DyTxzHermLpsO01rVeY/1x/POZ0aWtOVMaVARJOIqiYACb7LROQIEJ857LyIPAv0VdUzvedDgVRgCZABXAjcBTxcaIEbAFLSMnj/14289uNa0jKUf5zZlv8b0JrKFWx6WmNKi0ifieRFY6C137J/4e5RSQfWADcFuqhuCs7Pa/Yw6ptoNuw5wlkdG/LEBZ1oUdempzWmtMl3EhGRXrhxswA2q+qf+dmfqp7m93yY3/MPgQ/zcwwTuq37jzJm2iqmR+8kqm4VPhjWh9M7NIh0WMaYCAk5iYjI8bgbBBXYjJtvvaXXDn6tqi4OS4SmSEhKTeednzfw75/WUUaEhwa255ZTj6NiOWu6MqY0y8+ZyDvA3ao6y3ehiJwNvIubh8SUALNidvHktBg2xx9lUNfGPDaoI01rVc59Q2NMiZefJFLNP4EAqOoPOQ2gaIqPuL1HeHJaDLNX76ZNg2p8cssJnNymXu4bGmNKjfwkkd0iciPwUebQJyJSFne/hs0rUowdTUnjzZ/W887PG6hQrgz/GtSRoSdFUd6mpzXG+MlPEhkKvA2ME5Ht3rImwO/eOlPMqCrfr9zJmGkxbD+QxKU9m/LIeR1oUKNSpEMzxhRRIScRVd0AnOMNOeLbO8vOQoqhdbsPMXJqNL+u20fHxjV49Zqe9ImqE+mwjDFFXL67+HpJ45jEISINVDVsgzSagnMoKZXXflzLB7/GUaVCWZ68uDPX9m1BOWu6MsbkQUHdbLiIv89OTBGkqkxZup1nvlvFnsPJXNW7OQ8NbE/dahUjHZoxphjJz30iF+Ww2hrRi7CY7QcZOXUlf8Ttp3uzmrxzQ296NK8V6bCMMcVQfs5Evgbm4m4y9GezDhVBB46mMvaHWP63YBO1qlTg+cu6csXxzW16WmNMyPKTRNYCN6vqRv8VIrIlH/s1YZaRoXyxeAvPT48l4WgK153YkvvPbketKhUiHZoxppjLTxL5EDcXSJYkguv6a4qAZVsSeGJqNMu2JNC7ZW1GX9yXzk1qRjosY0wJkZ8uvs/msO7pUPdrwiP+SAovzljNhD+2UK9aRV65qjuDezS1OT6MMWEVVBIRkfuAP4ElqnqoQCIy+ZKeoXy6cBMvzVzDkeQ0bjnlOP5xZluqVyof6dCMMSVQsGcizwHlARWR9cBiXFJZDPypqgfCHJ8JwqK4eJ6YEk3MjoOc1Louoy/qTNuG1sfBGFNwgk0inYAfgHhgHe5ekAuBKrjEsgw3uu+7qpoRzkBN9nYfSuK571cz6c9tNK5ZiX9f24vzuzaypitjTIELNom8DXytqg9mLhCROsATwBBgPvA0cLmInK+qqWGL1GSRmp7Bh/PjGDdrLSlpGdx1emvuOr0NVSoUhwkrjTElQbCfNqfgmrT+oqrxwH0iUhl3htIbl0weBLK9+G7yZ/76vYycEs3a3Yc5rX19Rl7YmePqVY10WMaYUibYAZJ2Az2zWTcRuEFV44BXcGcmJsy2JyRy16d/cu27C0lKS+fdG3rzwbA+lkCMMRER7JnIR8C/RGS2qi7xW9cUd98IuAvtT+Y3OPO35LR03pu3kTdmryNDleFnteP2Aa2oVN6mpzXGRE6wSeQpoBuwQEQmAt8BO4H2wEjcwIsAFQG7HhImc2J3M/qbGDbuPcLAzg3516BONK9TJdJhGWNMcEnEu1A+WERuAx4CrgMUN35WNHC7V/REYH0Y4yyVtsQf5clpMfwQs4tW9ary4U19GdCufqTDMsaYv4TUjUdV3wHeEZFWQGNgD7BWVdUr8j1ucEYTgqTUdN6as563566nbBnh4XM7cPMpx1GhnM3xYYwpWvIzFHwFb3bDDf7rVHVBvqIqpVSVmTG7eGpaDFv3J3Jh9yY8dn4HGtesHOnQjDEmoGCHPSkHjAZuA+qIyH5gFvCaqs4vgPhKjQ17DjP6mxjmrtlDu4bV+OzWE+nXum6kwzLGmBwFeyYyArgfN4LvOqA5cAYwT0RGqOpzOW1ssjqSnMYbP63jvXkbqFSuLE9c0Inr+7WkvE1Pa4wpBoJNIjcAj6nqK74LReQm4E0RWaWqU8IWXQmmqkxbvoOnv13FzoNJXNarGY+c14H61W16WmNM8RFsEmmMG3DxGKr6voi0w92lbkkkF7E7DzFy6koWbIinc5Ma/HtIT45vWSfSYRljTNCCTSJrgR4E7nk1HbgzvwGVZAeTUnl11lrGz4+jWsVyjBnchWv6tqCsTU9rjCmmgk0i/wVGishcVV3qt64ZcDgsUZUwGRnK10u28ez3q9l3JJlr+rbgwXPaU6eqTU9rjCnegk0irwMDgIXeHevf4+5Yb4O7Y316eMMr/lZuO8DIqdEs3rSfHs1r8f6w3nRrVivSYRljTFgEe8e6isgVwHDgXtwd65lmAg+EMbZiLeFoCi/NjOXThZupXaUCL1zejct7NaOMNV0ZY0qQXJOIiJyFm7UwHsCbbOpl4GURaY8bdHGTqm4t0EiLifQM5fNFW3hh+moOJKZyQ78ohp/djpqVbXpaY0zJk5czkZm4WQs38fdUuJnT4cYCsQUYX7GyZPN+Rk6NZvnWA/Q9rg6jL+pMx8Y1Ih2WMcYUmLwkkc5AL+B47/EIUB2XWLaSdZ71XQUUa5G193AyL0xfzeeLttKwRkVevboHF3VvYtPTGmNKvFyTiKquAlYBn2Qu8+4JOZ6/k8sDQE3ciL4hT3AhIo/hptf9t6renUO5rsAbQF/cbIr/AZ7yGQCyUKSlZ/Dxgk28/MMaElPSub1/K+45sy3VKtr0tMaY0iHUUXzXAGuAzzKXiUgbXFIJiYicCNwKLM+lXA3gB+BnoA9uLpPxwBHctZpC8fvGeJ6YspLVOw9xSpt6jLqoM20aVCuswxtjTJEQtq/MqroON55W0ESkJu5M52bgiVyKDwGqAENVNRFYKSIdgftFZGxBnI1MXrKNF2fEsj0hkYY1KtG0dmUWb9pP01qVefu6Xgzs3MiarowxpVJRGeXvHeBLVZ2dh7L9gHleAsk0A2gCRIU7sMlLtvHopBVsS0hEgZ0Hk1i8aT/ndGrIrPsHcG6XxpZAjDGlVsSTiIjcirtZ8fE8btII8L94v8tnnf/+bxORRSKyaM+ePUHH9+KMWBJT07Msj95+kMoVbH5zY0zpFtEk4t1n8gwwRFVTgtjUv8lKslmOqr6jqr1VtXf9+sFPLbs9ITGo5cYYU5pE+kykH+5mxZUikiYiabhhVe70ngcaF30nWc84Gng/w969uEmtwLMKZrfcGGNKk0gnkclAV9zIwJmPRcAE7/dAZye/AaeKSCWfZWcD24G4cAf40MD2VC5/bLNV5fJleWhg+3Afyhhjip2I3tCgqglAgu8yETkCxKvqSu/5s0BfVT3TK/IpbrDH8SIyBmiHuwFydEH0zBrcsynAX72zmtSqzEMD2/+13BhjSrPicFdcY6B15hNVPSAiZwP/xp217MfdHzK2oAIY3LOpJQ1jjAmgyCURVT3N7/mwAGVWAP0LKSRjjDHZiPQ1EWOMMcWYJRFjjDEhsyRijDEmZJZEjDHGhEwKefT0iBKRPcCmfOyiHrA3TOGUBlZfwbH6Co7VV3DyU18tVTXgkB+lKonkl4gsUtXekY6juLD6Co7VV3CsvoJTUPVlzVnGGGNCZknEGGNMyCyJBOedSAdQzFh9BcfqKzhWX8EpkPqyayLGGGNCZmcixhhjQmZJxBhjTMgsiRhjjAlZqU0iInKXiCwXkYPe4zcRGeSzXkRklIhsF5FEEZkjIp399lFRRF4Xkb0ickREpopIs8J/NYVPRB4TERWRN3yWWZ358OpC/R47fdZbffkRkcYi8qGI7BGRJBGJEZEBPuutzjwiEhfg/aUi8q23vlDqqtQmEWAr8DDQC+gNzAYmi0g3b/0/gQeAe4A+wG7gBxGp7rOPccBlwDXAqUANYJqIHDsVYgkjIicCtwLL/VZZnWUVi5sTJ/PR1Wed1ZcPEakF/AoIMAjoiKub3T7FrM7+1odj31u9AAU+99YXTl2pqj28BxAP3I57E+8ARvisqwwcAm73ntfETd87xKdMcyADGBjp11KAdVQTWA+cAcwB3vCWW51lratRwMps1ll9Za2TZ4Bfc1hvdZZz/Y3AzRRbpTDrqjSfifxFRMqKyNVANWA+cBzQCJiZWUZVE4GfgZO8RccD5f3KbAFW+ZQpid4BvlTV2X7Lrc4CayUi20Rko4hMEJFW3nKrr6wGAwtFZKKI7BaRpSJyt4iIt97qLBteHd0MfKyqRynEuirVSUREuorIYSAZeBu4RN2siY28Irv8Ntnls64RkE7WAc18y5QoInIr0AZ4PMBqq7OsFgLDgPNwzX+NgPkiUherr0BaAXcCG4CBwKvAc8Bd3nqrs+ydjUsc73nPC62uitz0uIUsFugB1MK1C34oIqf5rPe/E1MCLPOXlzLFjoi0xzU3nKqqKTkUtTrzqOr3vs9FZAHuA3IosCCzmN9mpba+cF9qF6nqo97zJSLSFpdE3vApZ3WW1a3AH6q61G95gddVqT4TUdUUVV2nqplv3KXAcCCzB41/Nm7A35l9J1AWN7xydmVKkn6417pSRNJEJA0YANzp/b7PK2d1lg1VPQxEA22x91ggO4AYv2WrgBbe71ZnAYhIA+Bi4F2fxYVWV6U6iQRQBqgIbMRV8NmZK0SkEq73wnxv0WIg1a9MM1yPkswyJclkXM+iHj6PRcAE7/c1WJ3lyKuPDrgPS3uPZfUr0N5vWTv+ngPI6iywYbgm+Qk+ywqvriLdoyCCPRme8yo0Cvfh+CyuV8J53vqHgYPApUAX7w+0Hajus4+3gG3AWUBP4Cfc2UzZSL++QqrDOXi9s6zOAtbPS7izteOAE4BpXv20tPoKWF99vA+1Ebhrb1cAB4C77D2WbZ0J7gvcuwHWFUpdRbwSIlj543HfcJJx/adn4dOtzfvjjMJ9a0wC5gJd/PZRCXgd15RzFPgGaB7p11aIdeifRKzOjn2tmf+0Kd4/6ldAJ6uvHOtsELDMq481wD/wBoq1OgtYX6fjrl/0DbCuUOrKRvE1xhgTMrsmYowxJmSWRIwxxoTMkogxxpiQWRIxxhgTMksixhhjQmZJxBhjTMgsiRiTD9lMCuT/iMth+/E+5eb4LI/ylt3iV76eiCwRkXgR6eMtm+Wzj48L6rUaE0hpH4DRmPzq5/f8a9zNcqN8liXnso+dwCW4u4uzJSINgR+BhsAZ+vdge/fg5ob4Ok8RGxNGlkSMyQdVXeD7XESSgb3+y3ORnFt5EWmKSyA1gQGq+tdAhaq6yufYxhQqSyLGFHEi0gI3fXNFXAJZE+GQjPmLJRFjirZWuNnoFOivqhsjHI8xx7AkYkzR9ihu9rnOlkBMUWS9s4wp2qbjRmN9VUQqRjoYY/xZEjGmaPsKuAk4B/hCRMpHOB5jjmFJxJgiTlU/BO4ALgQ+FZGyEQ7JmL9YEjGmGFDVd4B7gcuBj0TE/ndNkWAX1o0pJlT1Ne+6yAtAsojcrDarnIkwSyLGFCOq+qKXSJ7CTXl6Z4RDMqWcJRFjwkhVo0LZTkTKuc013dtPHK5XVqBjjAHG+GxbBmuaNhFibzxjIq8lkIob1iQUM73tW4YtImPySKxJ1ZjIEZEooJ739JCqxoawj3ZADe/pPrsp0RQmSyLGGGNCZs1ZxhhjQmZJxBhjTMgsiRhjjAmZJRFjjDEhsyRijDEmZP8P/HVhXDeQ8w8AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "all_fim.extract_criteria()\n", + "print(all_fim.store_all_results_dataframe)\n", + "## draw 1D sensitivity curve \n", + "# this problem has two degrees of freedom, to draw 1D curve it needs to fix one dimension\n", + "fixed = {\"'CA0[0]'\": 5.0}\n", + "\n", + "all_fim.figure_drawing(fixed, [('T[0]', 'T[0.125]','T[0.25]','T[0.375]',\n", + " 'T[0.5]','T[0.625]','T[0.75]','T[0.875]','T[1]')], 'Reactor case','T [K]','$C_{A0}$ [M]' )\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Heatmap" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAElCAYAAADp4+XfAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAA3lklEQVR4nO3defxmc/3/8cdz7EVZBkMazISKSpGImLJkaeEbQs0YLdKEQiWkJi0qGsZWSJayVPLD2MYWisFQEiGaRUaWsY1hxjJevz/e78ucuVzL+cznmutzZj7P++123T5znfM+7+t9nc9nzuu8l/N+KyIwM7P+a0BfF8DMzPqWA4GZWT/nQGBm1s85EJiZ9XMOBGZm/ZwDgZlZP9cvA4GkkZKi8HpZ0n8k/UTS0hUo2xf6sgyLCklvlzQn/34H9mE5Rkv6WIPtZ0ma0gflmedzJa2V/x+MLGzz32E/0i8DQcFuwGbATsB44DDgmD4tEYwE/B+wM0aQ/saXAPbsw3J8H3hDIAB+COzS5bI08j/S/4PLC9tG4r/DfmPxvi5AH7srIh7K/75G0jrAFyV9PSJe68uCdZKkpSLipb4uRx8YAdwDvAXYGzixb4szr4j4T1+XASD/bdza1+WwvtPfawT1/gYsA7zejCDpTZJ+JmlybmKYLOkISQMKaZaWdJykeyTNlPSYpHGS3ln/AZLWlvTbnOYlSZMkjc37bgC2AjYvNFvdUDh2E0nX5s94QdJ1kjapy/8sSY9I2kzSLZJmAT9v9aUl7SLp5pzvDEm3S/pUYf/+kiZIelrSs5JulbRTXR6LS/phbmKbLWm6pL9K2qIu3Zcl/aOQ5gxJK7Yq3/yQtBmwLnAO8FtgI0nrlzx2CUk/kjQl/86n5PdLFNLUmlNGSRoj6QlJL0q6TNJahXS1R/ePKPxOR+d9zZpo9pN0dP4beV7S7/Lf4Tskjc+/p4ck7V1X7nfkv63Jkmblv61fSlqhzfedp2mo2d+hpI3yvz/dII/a391iZc6xVUt/rxHUWwt4DngK0sWN1GT0blI1/p/ApsCRwIrAIfm4pYDlgB+RqtkrAqOAWyW9MyIey/mtDdwOvEhqLngQeDuwXc5nFPA7YDHgK3nbjHzse4EbgX+Rqu0BfAe4UdKmEfGPwvd4K3ABcCxwODCr2ReWdABwAnAx6a55JvCBfC6K5+XXwBTS38wngcsk7RgRV+Y0hwIHAUcAd5HuwjfO56L2WT/N5+wE4FvA2/I520DShyNiTrNyzoe9gdeAc4Flc7lG5HK2czawO/AT4K+kZpPvAkOAverSHkb6vvsAq+Rjrpa0fkS8ko+dAJwFnJqPeaTN5x8G3JC/w7tJgfw14P3A6aTf61eBMyXdERH35uNWz3l/A3gml/dw4IpcjrIa/h1GxL8kTczbLqkllrQ86Xz9vMO/Q+uWiOh3L+ZeSNcjXdhWILWHvgrsX0g3PKfbsu74I4CXgVWa5L8Y8CbgeeCgwvZzSBfa1VuU7Qbgrw22Xwg8Cyxf2PYW4GngosK2s3KZP13iPLwll/GidmkLxwzI5+xq4JLC9sta5UMKJnOA79Vt3zyXd+cO/n6XIl0Ixxe2TQCmAQPaHLtBLs/ouu3fzdvfW/g+QQrMAwrpat/ni4VtAfyowWedBUypO0cBXF+X7qK8/fOFbSvkv9fvt/guiwNb5GPfX+JzR5b4OxyZf49rFrYdmMuyRqd+h35199Xfm4buB14hXUzPAE6NiJMK+7cHpgK35KaPxXMt4WpSB+SmtYSSdpd0m6RnSf8pXiDdia5XyG874LKIeHQ+yrplPvbZ2oaImAFcSqrGF71KujC38+FcxtNaJcpNApdJejzn/QqwLfN+t4nAjpJ+LGkLSUvWZbMtKYicW3cubyPVerZs8fkDiseo0CzXxKeB5UmBt+Zs0h3zNm2OrZXjd3Xba+/rz/WFUehPioibSXflPbkDr3dl3fv788/xhc95BniCVKMEQNKSkg6XdH9uEnwF+EveXfxd9cYFpBuSLxe2fQW4PCLa1XSsovp7INgF+CCwI3AtMErSiML+VYA1Sf+hiq/b8/6VACR9Evg9cB+p6eBDOd8ngeJw1JVo3yzQzIqkZqd6j5HuDoueiHJV9JXyz6ZlkvR24Lr8+QeQgscHgauY97v9hNTc9SnSxecpSWdq7rDNVfLPh3jj+XxLoSyN/KYu/W/afK+9Sc1vf5a0fG66GJ+P3bvVgcxtyqo/14/V7a95vEEej5OavebXM3XvX26xvfg7OBoYTQpaOwGbAP+X93VkWHREzAbOJA2qWFzSR0jNV7/qRP7WN/p7H8E9kUcNSboeuBs4RtKfIuIFUl/BZFL7ZyNT8s89gIciYmRtR+5YrL9oTGf+LxBPA4MabB+U9xWVnVt8ev75NtLomka2J/U57F6845P0pnk+MLWH/wz4maRBwCeAMaQmss+S+11ItaL6CxqF/Y2MBoo1telN0iFp1fwZi5OagurtImm5iHi+SRa1czkIKI7qqZ37+nKu2iCPVUn9Bt22B3BORPyotkHSsgvgc34JHEyqee1C+n8wvtUBVm39PRC8LiJekvQtUifYKNLzBFcBnwFmRsT9LQ5/E6nJpGg4qa+g6Grg/yStFhGN7u4BXiJ1PNe7EdipeBGTtByp4/aGFmVr5RZSn8W+NP+PXLvgv1LbIGldUlt4w5pEpM7xX0vakdTmDnANqcNzcERc05NCRsQU5gbddj5P+rv+KnObVGreBxxPen6kWa3ixvxzD+DHhe2fyz9vqku/q6TRteYhSZsDa5D6JGpeJo1GW9DeROH3lO0zn3k1+zskIv4j6WpSh/+GwFGxCA237o8cCAoi4tI8KuKbkk4ijTjZB7hO0i+AfwBLAkNJTSA7R8SLpICxs6TjSG3zG5E60J6t+4jvk6rst0j6CamZ5G3A9hHx+ZzmX6Qmqs+S7kifj4gHSKOWPpHL8jPSXf+hpP/8R83n931e0mHAiZL+lL/v86T/3LMj4kRSk9mrwDn5HKwG/AB4mELToqRL8vn5G+mO//2k2sSp+bP+k8t9kqT1SBfc2aQ27m2BX0fEn+fne9QZQarFnRoR89SMJP0F+DapeahhIIiIeyWdD4zOfRi3kNr7jwTOj4i76w5ZDrhY0qnAyqTmmQeZt3/iX6QgfhXp3Dw6n/1E7VwF7C3pn6S/rf8jNeXNj2Z/hzWnkG6ayjTVWdX1dW91X7yYO2roHQ32bZf3HZTfL01qmrifdJf0NKljdDSweE4zgDQM8lFS2/SNpAvhFOCsuvyHAueTmjdeAiYBxxX2DyIN93s+l+OGwr4PkS7MM0md0dcBm9TlfxbwSA/Px66kTttZpI7b24BPFPbvnr//bOBe0t3yWcw78uQQ0kNJT+V8HsjnaIm6zxqe072Qv8d9pGafXo84yec8gCNbpPkxqWaydos0S+Tf51TShW5qfr9EIc1a+bNGkZrAnsy/+8vr8ybVnu7M5+/1EUkNzmEtzy/VHT86b1+8bvsU4HeF9wNJnbnP5Ne5pP6c+hFBzT63mKbp32Hev1j+Hf6xr/8/+9X7l/Iv1cx6ID80Nhn4ckT8uo+L03WStiU1dW4TEdf1dXmsd9w0ZGalSRpKelDtOOBvDgKLhv4+fNTMeuZI0nMOL5H6Y2wR4KYhM7N+zjUCM7N+zoHAzKyf63edxQMHrhBrrdWbp/8XcTPubZ+mn/v7g31dgmp7DYgI9SaPxaXSjdavpckFt+/N5/V3/S4QrLXW27jjjgv7uhjVddW7+roElffmHfq6BNU2uwN5BPDmkmmfL6wfYvOn3wUCM6s+8cb5WWzBcSAws8oR6fFu6w4HAjOrJNcIuseBwMwqR3hIYzc5EJhZJblG0D0OBGZWOe4s7i4HAjOrHHcWd5cDgZlVkvsIuseBwMwqx01D3eVAYGaV40DQXQ4EZlZJbhrqHgcCM6sc1wi6y4HAzCrHo4a6y4HAzCrJNYLucSAws8rxFBPd5UBgZpXkGkH3OBCYWeW4s7i7HAjMrHLcWdxdDgRmVkmuEXSPA4GZVY47i7vLgcDMKsk1gu5xIDCzynGNoLt8rs2scmqjhsq82uYlHSZpoqQZkp6UNE7SBiWO213SXZJelDRV0rcapFlS0lGSJkt6SdLDkg4s7B8pKRq8li6kGd1g/2MlvlrHuEZgZpXT4VFDw4BTgIk566OAayW9OyKebvj50g7AecCBwFXAu4DTJc2KiJMKSc8H3g7sCzwIrAosU5fdi8DQ4oaImF2X5oFczpo5Jb9bRzgQmFkldaqPICI+XnwvaTjwHLA5MK7JYcOBcRFxSn4/SdLRwKGSTo6IkLQdsA0wNCKm53RTGhch2t3hv1oizQLjpiEzq5xONg01sBzp2vdMizRLAfV37bOANYA18/udSbWMgyU9IulBSSdIWrbuuGVy09Ijki6T9P4GnzdE0rTcxHSBpCE9/la94EBgZpU0oOQLGCjpjsJr3zZZjwXuAia0SDMe2FnSdpIGSFoXOCTvWy3/HAJsAbwP+AywP7A9cFYhnweALwCfBvYkBZebJa1TSHMbMBLYAfgyMAi4RdJKbb5Hx7hpyMwqp4dTTEyPiI1L5SuNIV28t4iIVu3wp5Pa9S8hdVfMIAWQ0cxtvx8ABLBXRDyX898fGC9p1Yh4PCImUAg4km4hBaEDSP0PRMSVdWW8FZgE7A2MKfO9ess1AjOrpB7UCEqRdBzprvxjETGpVdpIDgWWJTUFDQJuz7un5J//A6bVgkB2X/45uEm+c4A7gHUa7c9pZgL3tkrTaQ4EZlY5ApYs+SqVnzQW2IsUBO4vW46ImBMR0yLiZVIQmRART+TdNwOr1/UJrJt/Tm1SDgHvJQWRZmVdGnhnqzSd1tVAIOlrku7O43lnSJogaafCfuUxtY9KmiXpBknr1+WxlKQTJU2X9IKkSyWt0c3vYWYLVu2Bsk7UCCSdDOxDupA/I2lQfi1bSHO0pOsK7wdK+qqkd0naMAeS3YBvFLI+D3gKOFPS+pI2JzUfXVgLFpK+L+njkoZI2hA4gxQIflX4rGMlbSVpbUkfAi4E3gycXfqE9VK3awSPAIcCHwA2Bq4HLpb03rz/26QOmQOADwJPANdIWq6Qx/Gkjpk9gY8AbwEuk+Qn0s0WIR0cNTSKNFLoOtJddu31zUKa1agb6w+MII0KuhlYHxgWEbXmoVoTzjbAW3O6PwA3kjqHa5YHTiM1GV0NvA3YspgPaSTS+aSO5YuAl4BNI6JhrWJBUER067MaF0B6GjiMdLIeBU6KiB/nfcuQgsE3I+JUSW8FngT2iYhzc5q3k6phO0TE+Haft/HGG8Qdd1y4YL7MouCqd/V1CSrvzTv0dQmqbTYwJ0K9yWOQFMNLpj0W7izbWWyN9VkfgaTFJO1B6oy5BVib1CFzdS1NRMwCbgI+nDdtROrBL6b5Lyna1tKY2SKg053F1lzXh49Keg9pONXSwExgl4j4p6TahfzxukMeJ1WnIAWKOcD0BmkGtfjMfUmPgDN48Oq9Kr+ZLXhemKa7+iKgPgBsCGwK/BI4u24CqPq2KjXYVq9lmog4LSI2joiNV155hZ6X2My6agE/WWx1uh4IIuLliHgoIu6IiMNID1ccBNTm2ai/s1+FubWEx0i/+4Et0pjZQs6BoLuq0MQ2gDSvx2TShX7b2o48nvYjpD4EgDuBV+rSrEGaGbCWxswWAe4j6J6u9hFI+ilwOfBf0nCuvUhTr+6UZ/M7HjhC0v3Av4HvkvoRzgOIiOcknQEcI+kJ0hjeMcDdwLXd/C5mtuD0cIoJ66VudxYPAn6Xfz5HuoAXh33+nDSX98nACqTJmLaLiOcLeRwEvAr8Pqe9DhjRZt4QM1vIOBB0T1cDQUSMbLM/SJM6jW6RZjbpgbMDOlg0M6sQjxrqLs8+amaV4zWLy5H0NlKf6abA6qRWkumk0Zk3AjdGxGvt8vG5NrNK8qih5iQNk3QZaSbU3wA7kp63Woa0PsIhpGbz/+b5297SKj8HAjOrHA8fbS4HgCuAF4DdgZUjYnBEbBQRW0TEu0nzH21IWqt5N+A/kj7eLE83DZlZJfkutamHgC+1WuM4NwfdnV8/lvQpUnBoyIHAzCrHw0ebi4hvzMcxl7ba70BgZpXjUUPd5UBgZpXjGkF5ktai8aihW/Nw+7YcCMysktxH0Jyk5YEv5dc6pNhZ72VJlwKnRMQNrfLzuTazyvGooeYkHQJMAg4GxpNGDr2D1Bm8JGnmhs1Iq0EuT1rl8VpJ6zXL0zUCM6uk/niRL+lzpOUwL23ysNgT+XUbMFbS6qRlOT9JajJ6AwcCM6scdxY3FxEf6GH6R0m1h6YcCMysctxZ3F0OBGZWSe7AbExSj05NmbmGHAjMrHJcI2jplR6kDUpc5x0IzKySXCNoSsAM4CJgaicydCAws8pxjaClo0gjh/YGbgLOAf4YETPnN0MHXTOrnNqooTKvtnlJh0maKGmGpCcljZO0QYnjdpd0l6QXJU2V9K0GaZaUdJSkyZJekvSwpAML+0dKigavpevyGZXzmC3pTkkfaVauiBgdEesAW5GW9P0F8JikcyVtJ6nRw2UtORCYWSV18IGyYaTpmD8MfIy01O21klZsdoCkHUhrpZ8GbACMAg6StH9d0vOB7YF9gfVIUz7fXZfmRWC14qs49YOkzwJjgZ8A7wduAa6UNLjVl4qIv0bEV0gPkH0BeAtwGfCIpP1aHVvPTUNmVjmdbBqKiHnm4Zc0nLRm+ubAuCaHDQfGRcQp+f0kSUcDh0o6OSJC0nbANsDQiJie001pXITmU0aTxvifFRGn5/cHSNoe+CpwWJuvR0S8DPxB0g05r28C2wG/andsjWsEZlZJA0q+5sNy+dBnWqRZCqifsG0WsAawZn6/MzAROFjSI5IelHSCpGXrjlsmNy09IukySe+v7ZC0JLARcHXdMVeTajAtSVpa0h6SrgAeAfYEjiFNL1GaawRmVjk9rBEMlHRH4f1pEXFai/RjgbuACS3SjCdNz7AdcC1pLp9D8r7VSHf+Q4AtgJeAz5Dm9TmRNAvorjntA6Rmm3+QAtDXgZslvS8iHgQGkr7q43Wf/zipttGQpI+Sai27kk7XRcAOwPURES2+V0MOBGZWST24258eERuXSShpDOnivUVEzGmR9HRgKHAJqU96BimAjAZqxw0gjdPfKyKey/nvD4yXtGpEPB4REygEHEm3kILQAcDrnco5n3mK2mBbLY+ppGDzZ1LfxZ8iYlbLL96GA4GZVc4A0jSanSTpOGAP4KMRMalV2nxXfaikw0mdsU8CW+fdU/LP/wHTakEguy//HMwb7/KJiDm59rJO3jSdFFgG1SVdpdHx2dtJHdDvJHUw/6TFQKGIiDWb7axxIDCzSupkB6aksaQgMCwi7i97XK41TMt57AlMiIgn8u6bgd0kLVsYw79u/tnwQa88tPO9pKYiIuJlSXcC2wJ/LCTdFvhTk2KdXbb8ZTkQmFnldHLUkKSTSe3pOwPPSKrdfc+sXcDziKBNImLr/H4gaSjoDaSO433y+60KWZ8HHAmcKWk0qY9gLHBhLVhI+j5wK/AgaXjngaRA8NVCPmOA30q6nRRc9iM1/TQc9RMR+8zXiWjBgcDMKqmDTxaPyj+vq9v+A1KbP6QO4KF1+0eQRuCI1M4/LCJur+2MiJmStiF1EE8kjUK6GPhOIY/lSc8iDCINWf07sGVdPr+XtBLw3VyOe4AdI6Ij00eU4UBgZpUjOtc0FBFtn7SNiJF176eTVvlqd9wDpDH7zfYfBBxUIp9TSA+9tSVpSJl0hbxb9oeAA4GZVZAXpmnpIZqMKGqibeXKgcDMKsmTzjXlPoLeW4r0bIg1tP0/+7oElfdCNBvMYQAbb3xqr/Pw7KPNRYRHDZlZ/+D5bxqTNKDMqmM94XNtZpVTqxF0aPbRRc0rkjapvVFyVGFYbI85EJhZ5TgQtFQ/CmoAcATp2YP54qYhM6scjxrqsR4vRlPkQGBmleTmiu5xIDCzyvGoobYaPUfQ4+mnaxwIzKySHAhaGifp5bptV0h6pW6bZx81s4VY2bahjg6kXCj4OQIz6wd60jbUzwKBZx81s/6hJ8OG6htDrMfcMW9m1eQHCRqS9IH5OGZpSe9stt+BwMyqx0+UtXKTpEslbS+p5TVc0uC83OZk4BPN0rlpyMyqybepzawH/BC4BJghaQJp6csngZeAFYAhwCbABqQgcEhEnNcsQwcCM6seP0jQVERMA74g6TukKak/DhwMLFNINhm4ibRa2viIaPmMgQOBmVWP55hoK6+L/LP8QtLywNLAUxHRoy50BwIzqybXCHokIp6d32PdCmdm1VNbtLjMq5/L01B/StKxks6UtGbevpWkUjOSukZgZtXkGkFbklYArgA+BMwAlgNOBKYCXwaeBg5sl4/jqZlVj4ePlnUM8HZgc2Ag805HfS2wdZlMmtYIJJ0znwU7MiKmzuexZmaJb1PL+DTwzYiYIKk+LD5MChJttWoa+jzwGGlcalmDgeNJ1RIzs/njUUNlLQtMa7JvaUouWNMu5u4cEWuXeQHrlP1QM7OWOtg0JOkwSRMlzZD0pKRxkjYocdzuku6S9KKkqZK+1SDNknm94MmSXpL0sKSGbfKS9pQUki6r2z46by++Hmv/zQB4ANiuyb6tgH+WyaRVjeA64LmShQGYk4+Z0YNjzMzeqLMPlA0DTgEm5pyPAq6V9O6IeLrhx0s7AOeROlqvAt4FnC5pVkScVEh6Pqn5ZV/gQWBV5n2wq5bfEFJ7/l+alPGBXM6aOSW/28nAyZKey+UFWF7SPsD+uVxtNQ0EEbFtyYLU0gfQo2PMzJrqUB9BRHy8+F7ScNJN7ubAuCaHDQfGRcQp+f0kSUcDh0o6OSJC0nbANsDQiJie002pz0jSEqSAcQTwUVKnbr1XI6JsLeB1EXG6pKHAD0gBDuAa0uTcP4+Ic8vk0/RUS9qobGEkndQ+lZlZSQt21NBypGvfMy3SLAXMrts2C1gDqK34tTOplnGwpEckPSjpBEnL1h33Y2BKRLRaUGaIpGm5iemCXIMoJSK+AwwFvgJ8FxgFrBcRR5TNo1XT0HhJH42Ilm1Mks4ARpKqIWZmvbdgO4vHAncBE1qkGQ+MzXf91wLvAA7J+1Yj3fkPAbYgDaj5DLA8aQz/6sCuAPn4zwIbtvis20jX0PuBVUgX81skrR8RT5X5Qnmk5q/LpG2kVSD4L6kdbcuIeKB+pyQB5wCfA46c3wKYmTVU/m5/oKQ7Cu9Pi4jTGiWUNIZ08d4iIlq1w59Ousu+hBSSZpACyGjmtt8PIC0Yv1dEPJfz3590E71qTndW3t+09hERV9aV8VZgErA3MKZFGcl9AWtGxOgG+0YDk9vURIDWgWAb4EbgOklbRcR/Ch+wGKljYjfg2xFxbLsPMjMrrTbFRDnTI2LjtllKxwF7AB+NiEmt0uY+z0PzXP6DSFM81x7OmpJ//g+YVgsC2X3552DgzaTaw7XpvhnI30rSq8D6jW6yI2KmpHtJIzHb+TpwRpN9TwDfoMQax01Pda6SfAyYSQoGg+H1jo8/kYLA1x0EzGyB6GAfgaSxwF7AxyLi/rJFiIg5ETEtIl4G9gQm5Fk/AW4GVq/rE1g3/5xK6j94D6lZqPa6lDRyaEPSVNGNyro08E5SoGnnHcC9TfbdR6rVtNVyrqGIeELS1qSawfV5SNUJpHGrX4mI08t8iJlZj3Rw+Kikk0mjgHYGnpE0KO+aGREzc5qjgU0iYuv8fiDpZvcGUsfxPvn9VoWszyM1i5+Zm2GWJzUfXVgIFvfUleVZYPGIuKew7VjS6KWHSX0ER5JqE23v5IFXaTwKCWDlEscDJSpfeRGEj5J+LfeSmoxGOgiY2QLVudlHR5FGCl1Husuuvb5ZSLMab7x7HkG6q78ZWB8YFhG313bmILIN8Nac7g+km+YvlP6OyRqk4aUPABeROp83LTlVz+3Afk327ZfL1VaruYbqv8x5wGHAlcAS9fsj4jftPixHze/XbX48Igbl/cr79yUtt3Yb8LWIuLeQx1LAsaRq2jKkX+6oiHik3eeb2UJCwJKdySoi2s54EBEj695PBzYrcVyrJ3vbfk7etkfZ4xv4MakP4jbSqKFpwNuALwEfoOSzXa2ahpoNRdohv4oCaBsIslZP0H2bNERrZE73PeAaSetFxPM5zfGkiZb2BJ4i9apfJmmjNqMAzGxh0bPO4n4rIm6UtCvpunhqYdcU4DMRcUOZfFoFgrXnt3BtNHyCLtcGvgH8NCL+lLftTer53gs4VdJbgS8C+0TENTnNcFLHzDaksb9mtrDzmsWlRcQlwCWS1gNWIo2i+ndP8mg1xcSCmkF0iKRpwMukpp/D81CutUnDtK4ulGGWpJuAD5Oi3UakMb3FNP+VdF9O40BgtqhwjaBHGg1FLavbK5Q1fYKOFAQAHq875nFSmxc5zRxgeoM0gzCzRYNrBD0i6X3AeqSpp+cREW3XlmnVWXw9qRO21JhbSQNIj2J/JSIebJSmzRN0t9aS1WfdYNsbPr5VGkn7kmfhGzx4cJuszKwSHAjakrQ8cDmwaW1T/lm8HrYNBK0qX8NIQ65Kl6mnx+ThV7Un6Gr9BvV39qswt5bwGOnPo37cbDFNo885LSI2joiNV1652ZBbM6uM2lxDZV79209I/QJbks7aLqQHgc8l3WRvUiaTdk1DF0vqyQpl7e7c51F4gu7PpKfsHiMNd5pY2P8RoLYgxJ3AKznNeTnNGqS5wm/pyWebWYW5aaisj5OmoK61qDwSEXcCN0j6JWkKihHtMmkVCMo81dZIffv961o9QZfn9z4eOELS/cC/SX0IM8kX/Yh4Ls92eoykJ5g7fPRuUrOUmS0q3FlcxmrApIiYI2k287bIXARcUCaTVqOG9uld+RqqPUE3kDSJ063M+wTdz0kPiZ3M3AfKtis8QwBwEOmx6t8z94GyEX6GwGwR4hpBWY+RpraANIx+M9K0GJDmISqlq6OG2j1Bl2f8G51fzdLMBg7ILzNbVLlGUMZfSRf/y4DfAt+XtBbpZnlv0iR3bXV7+KiZWXsdnGJiEfcD0kI4kNZEXom0EM6bSEGg1A2zA4GZVY+nmCglrxPzn/zvV0hT9BzS8qAGfKrNrJoW3JrFiwRJS0p6WtKnepuXawRmVj3uLG4rIl7OK53N7m1eTWsEkiblx5bNzLqvc+sRLMouBnbtbSatagRrkVbmMTPrLtcIyroSOEHShaSg8D/qHuyNiOvbZeKmITOrntoUE9bOn/LP/8uvmmDuHGxtQ2q7QNCjKSPMzDrCNYKyPkYHrtPtAsEPJDWdMqIgImLv3hbGzOx1bv9vq+wKZO20CwQbkhZSbsc1BzPrHNcISpE0CdglIv7RYN8GwKURMaRdPu0Cwc4Rcft8ltHMbP45EJSxFs0H9SwNrFkmE3cWm1n1+MninmjWIrMx8GyZDBwIzKx6PGqoKUkHkWZhhhQExkl6uS7ZMsCK9HYaajOzPuWmoWYmkabfhzTD6B2kaf2LXgL+Bfy6TIat1iNwxczM+oY7i5uKiEuASwAkARwVEZN7k6cv9mZWTR2aYkLSYZImSpoh6UlJ4/KImnbH7S7pLkkvSpoq6VsN0iwp6ShJkyW9JOlhSQc2yW9PSSHpsgb7RuU8Zku6U9JH2n+ztIBYb4MAOBCYWRXVagSdmX10GHAK8GHSA1ivAtdKWrHpx0s7kJbIPQ3YABgFHCRp/7qk5wPbA/sC6wG7kZbOrc9vCGm9gL802PdZYCxpIfr3k9Zfv1LS4CZlOyiv516apA9I2r7ZfgcCM6ueWmdxmVcbEfHxiDgzIu6JiH8Cw4GVgc1bHDYcGBcRp0TEpIi4HDgaOFS5PUbSdsA2wI4RcU1ETImI2+of8pK0BClgHEFq3693MHBWRJweEfdFxAGkOYO+2qRsI4Apkn7aamJQSStIGi7patJKZm9pltaBwMyqp7M1gnrLka59z7RIsxRvnN55Fmnd9drY/J2BicDBkh6R9KCkEyQtW3fcj4EpEXF2/YdIWhLYCLi6btfVpBpMIx8Avg3sAPxd0rOS/iLpIknnS7pK0r+B6cCpwDTg3RHxh2Zf1qOGzKyayt+mDpR0R+H9aRFxWov0Y4G7gAkt0owHxua7/mtJC8HXVv5aDZgCDAG2II3Q+QxpEfkTSUtH7gqv1xo+S5qloWHZSeHs8brtj5NqG2+Q13Y/BzhH0odITVMfyuVZGniK1AT1Y+CSiHi2xfcEHAjMrIp6NmpoekRsXCpbaQzp4r1FRMxpkfR0YChpdM4SwAxSABkN1I4bQBrHv1dEPJfz3x8YL2nVnO6svL9V7QPe+FCYGmx740ERtwG3tUvXjgOBmVVTh4ePSjoO2AP4aEQ0aqt/Xb7rPlTS4cAg0jj9rfPuKfnn/4BptSCQ3Zd/DgbeTKo9XJu7FSDXc/LKYusDk0kBY1BdEVbhjbWEBcaBwMyqp8NTTEgaSwoCwyLi/rLH5VrDtJzHnsCEiHgi774Z2E3SshExM29bN/+cCrwAvKcuyx8BKwBfAybn5SbvBLYF/lhIty1z1xpo9p1WIfUTvJv0FDHA06QHya4slLMtBwIzq54OTjEh6WTSKKCdgWck1e6+Z9Yu4JKOBjaJiK3z+4GkoaA3kDqO98nvtypkfR5wJHCmpNGkPoKxwIWFi/A9dWV5Flg8IorbxwC/lXQ7KbjsR+pn+FWT7zOA1P5/ELAk8CKp41u5DG8CXpZ0PHBYrt205EBgZtXUuaahUfnndXXbf0Bq84fUhDO0bv8I0th/kTqWhxVnY46ImZK2IXUQTyRdjC8GvtOTwkXE7yWtBHw3l+Me0pDUqU0OOQz4BikY/DYiphR3SlqTFPiOAJ7P6VpyIDCz6ungFBMRoRJpRta9nw5sVuK4B4DtelCWkU22n0J66K2ML5Pu9I9vktdU4EeSXgC+jgOBmS20/JRTM6sCfy+R7m85bVs+1WZWPQv2gbKF3X2kju929gRKdYy7RmBm1dQ/L/JlHAVcKGk94HfAvaT+iSCNHlof+BypY3vXMhn2w0Ag+uXXLu2dfV2AhcDX+7oAFXdx77PwwjRNRcTFknYizX30axo/jPYP4JMRcWWZPH1FNLPq8VKVLUXEeNITzGuQagArks7a08C9EfHfnuTnQGBm1eSmobYi4hHgkd7m45hrZtXjzuJekzRQ0pZl0joQmFk1dWiFsn5sK+DPZRK6acjMqmcAafIE6woHAjOrJt/tNyTpNyWTrtk+SeJAYGbV08EpJhZBI4HngJlt0i1TNkMHAjOrJtcImnkYuDoi9m2VSNKuwO/LZOhAYGbV4xpBK3cAZVZkazv9dI1jrplVk4ePNnMpaWH6dv5Fmo6iLdcIzKx6PMVEUxFxDmnx+nbp7iOtudCWA4GZVY+bhrrKgcDMqskN113jU21m1eMpJkqR9JqkOU1er0p6StI1klquouYagZlVk29Ty/ghsDewNHA58DgwCNgRmE2aE3wYcKWkT0fEZY0ycSAws+rxFBNlzQYmAztExOzaRknLAFcCTwIfIAWJw4GGgcAx18yqyZPOlbEfcFwxCABExCzgOGC/iHiNtIDNe5tl4hqBmVWTynYAzFmgxai4VWg+0HZJYKX87+mknpeGHE/NrIJqS8qWefVrdwCjJa1W3ChpdeD7eT+kCegebZZJvz+LZlZFPVlb/KUFWZCq+zpwHTBZ0gTgCVItYTPgReDzOd07gPOaZeIagZlVUOdqBJIOkzRR0gxJT0oaJ2mDEsftLukuSS9KmirpWw3SLCnpKEmTJb0k6WFJBxb27ybpDknPSnoh57d3XR6jJUXd67G2XwyIiL+RLvJjgNeA9+SfvwDWiYi7crrvRcT3m+XjGoGZVZBIIyLLeK5dgmHAKcDEnPFRwLWS3h0RTzf8dGkH0h30gcBVwLuA0yXNioiTCknPB94O7As8CKzKvNM/PwX8CLgfeAX4BHCGpCcj4opCugdyOWtKd3xExFOkEUHzzYHAzCqoJ01DrUXEx+fJWRpOih6bA+OaHDYcGBcRp+T3kyQdDRwq6eSIiPyQ1jbA0IioTQI3pe6zr6/Ld2yuEXwEKAaCVyOiVC2gEUkrkpqDViR1DN/WLMg14qYhM6ugBdpZvBzp2vdMizRLkcboF80C1mDuyl87k2oZB0t6RNKDkk6QtGyjDJVsDawH3FS3e4ikabmJ6QJJQ8p+GUk/AqaRgtrZpGcGpkn6Ydk8XCMws4oqPX/EQEl3FN6fFhGntUg/FrgLmNAizXjS3ft2wLWkdvhD8r7VSHf+Q4AtSL3VnwGWB04EVgd2rWUk6a2kC/VSpCafr0XElYXPuo206tj9pI7e7wK3SFo/N/s0JekbpGahM4DfAY+Rniz+PHB4boI6oVUe4EBgZpXUo6ah6RFRZqEWJI0hXby3iIhW7fCnA0OBS0jj9GeQAsho5rbfDyAt/rJXRDyX898fGC9p1Yh4PKd7HtgQWBbYGhgjaUpEXAdQFxSQdCswiTR1xJg2X2k/YGxEHFTY9gBwo6SZwCigbSBw05CZVVDnm4YkHQfsCXwsIia1ShvJoaSL95qku+zb8+4p+ef/gGm1IJDdl38OLuT1WkQ8FBF3RcQvgD/SonM3ImYC9wLrlPhaa5Gaghq5PO9vy4HAzCpoAGnUUJlXe5LGAnuRgsD9ZUsREXMiYlpEvEwKIhMi4om8+2Zg9bo+gXXzz6ktsh1AaiZqVtalgXeSAk07TwHNhsKun/e35aYhM6uozlyeJJ1MGgW0M/CMpEF518x8900eEbRJRGyd3w8EdgNuIF2098nvtypkfR5wJHCmpNGkPoKxwIW1YCHpCFIfwKScz465LAcUyncsqaP3YVIfwZHAm0kdv+38P+CHkp4CLoiIVyQtnst6VMk8HAjMrIo6N3yU1E4O6Qncoh+Q2vwhdQAPrds/AjgmF2YCMCwias1DRMRMSduQOognkkYhXQx8p5DHssAvSaONZpE6hEdExPmFNGuQnkcYSJot9FZg04hoVauoOQx4H+mC/xtJT5OGkC4G/JWSzxc4EJhZBXX0OYKmk60V0oysez+dNC6/3XEPAE0XfYmIw0gX61Z57NHuc1oc+7ykLYGdgC2BFYCngRuBKyMiyuTjQGBmFdTRGsEiLV/sL6PJWgNldLWzWNKUBnNqhKTL837leTcelTRL0g2S1q/LYylJJ0qanufuuFTSGt38Hma2oNWmmOhMZ/GipM3ylG9YrrJMnt0OuR9k3qdEVgPuBP6Q33+b9NDGSNJY2O8B10haLyKez2mOBz5N6sF/ijTO9jJJG7UZF2xmCw3XCFo4ivT8Qsd09UxHxJPF95K+SHpQ44+SBHwD+GlE/Cnv35s0repewKn5Cb0vAvtExDU5zXDSUK1tSE8DmtlCz4GgmYgY3ek8++w5gnzh/yLwu4h4EVib9NDG1bU0ebm1m4AP500bkZ7yK6b5L+khjloaM1voeWGaburLs7gt6eL/6/y+Nrb38bp0jwNvK6SZQ5pdrz7NIJqQtC9pmlgGDx7cLJmZVYZrBN3Ul08WfxmYWFs4oaC+7UsNttVrmSYiTouIjSNi45VXXrnHBTWzbnONoJv6JBBIWoXU4Xt6YXNtLu76O/tVmFtLeIzU2TywRRozW+iJ9CBumZf1Vl/VCEaSpm69oLBtMulCv21tQ55z4yPALXnTnaRVfopp1iCtHnQLZraIcI2gm7p+FnMn8ZdI82LUhoSSV/w5HjhC0v3Av0nzcs8kL7ocEc9JOgM4RtITzB0+ejdpznAzWyS4j6Cb+uJMDyNNr/r5Bvt+Tlrv82TSo9K3AdsVAwZwEPAq8Puc9jrS3B1+hsBskeJA0C1dP9MR8WdSuG+0L0iTQI1ucfxs0sx9BzRLY2YLO9cIusln2swqyIGgm3ymzayCagvTWDc4EJhZRfny1C0+02ZWQW4a6iafaTOrIAeCbvKZNrMKciDoJp9pM6ug2sI01g0OBGZWQa4RdJPPtJlVkJh3MUNbkBwIzKyCXCPoJp9pM6sgB4Ju8pk2s4ry5alb+nKFMjOzJmpTTJR5tSbpMEkTJc2Q9KSkcZI2KHHc7pLukvSipKmSvtUgzZKSjpI0WdJLkh6WdGBh/26S7pD0rKQXcn57N8hnVM5jtqQ7JX2k7RfrIIdcM6ugjjYNDQNOASbmjI8CrpX07oh4uuGnSzuQ1kE5ELiKtPjV6ZJmRcRJhaTnA28nrYn+ILAqaXr8mqeAHwH3kxbV+gRwhqQnI+KK/FmfBcYCo4C/5p9X5vI93Puv354DgZlVUOcCQUR8fJ6cpeHAc8DmwLgmhw0HxkXEKfn9JElHA4dKOjkvpLUdsA0wNCKm53RT6j77+rp8x+YawUeAK/K2g4GzIqK2dO8BkrYHvgoc1oOvOt/cNGRmFbXAlqpcjnTte6ZFmqWA2XXbZgFrAGvm9zuTahkHS3pE0oOSTpC0bKMMlWwNrAfclLctCWwEXF2X/Grgw6W/US+5RmBmFbRARw2NBe4CJrRIM550974daRncdwCH5H2rke78hwBbkNZf/wywPHAisDqway0jSW8FppGCyxzgaxFxZd49kPTAxON1n/84qbbRFQ4EZlZBPQoEAyXdUXh/WkSc1jBXaQzp4r1Fm+VtTweGApcASwAzSAFkNOliDqlWEcBeEfFczn9/YLykVSOidnF/HtgQWBbYGhgjaUpEXFf4vKgvaoNtC4wDgZlVUI8WppkeERu3SyTpOGAP4KMRMalV2rxs7qGSDgcGAU+SLuIwtx/gf8C0WhDI7ss/B5Pv8iPiNeChvP0uSe8CDiettz6dFFgG1RVhFd5YS1hg3EdgZhXVuT4CSWOBvYCPRcT9ZUsQEXMiYlpEvAzsCUyIiCfy7puB1ev6BNbNP6e2yHYAqZmInO+dwLZ1abYFbilbzt5yjcDMKqhzfQSSTiaNAtoZeEZS7e57ZkTMzGmOBjaJiK3z+4HAbsANpIv2Pvn9VoWszwOOBM6UNJrURzAWuLAWLCQdAdwGTMr57JjLckAhnzHAbyXdTgou+5H6GX7VkRNQggOBmVVQRzuLR+Wf19Vt/wGpzR9SB/DQuv0jgGNyYSYAwyLi9trOiJgpaRtSB/FE0iiki4HvFPJYFvglabTRLNLzBCMi4vxCPr+XtBLw3VyOe4AdI6JVraKjHAjMrII6N/toRKhEmpF176cDm5U47gFguxb7D6PEswD5eYVT2qVbUBwIzKyCvDBNNzkQmFkFefbRbvKZNrMKciDoJp9pM6sgB4Ju8pk2swpyIOgmn2kzqyhfnrpF6Unq/kPSk7R+6q/bBpIeM7fmfI5aq9r5WTMiVu5NBpKuIn2vMqZHxPa9+bz+rt8FgqqRdEeZeVL6M5+j1nx+rLc815CZWT/nQGBm1s85EPS9hvOm2zx8jlrz+bFecR+BmVk/5xqBmVk/50BgZtbPORCYmfVzDgR9QNKWki6VNE1SSBrZ12WqEklfk3S3pBn5NUHSTn1driqRNDr/7RRfj/V1uWzh5EDQN5YlrUL0ddKqRTavR4BDgQ8AGwPXAxdLem+flqp6HiCtaFV7vadvi2MLK0/m0Qci4grgCgBJZ/VtaaonIi6p23SEpK+SVoy6uw+KVFWvRoRrAdZrrhFYpUlaTNIepFrULX1dnooZkpsXJ0u6QNKQvi6QLZxcI7BKkvQe0oLhSwMzgV0i4p99W6pKuQ0YSVoMfRXSwue3SFo/Ip7qy4LZwseBwKrqAWBDYHngM8DZkoZFxD19WaiqiIgri+8l3QpMAvYGxvRJoWyh5UBglRQRLwMP5bd3SPogcBDwxb4rVXVFxExJ9wLr9HVZbOHjPgJbWAwAlurrQlSVpKWBdwL/6+uy2MLHNYI+IGlZ4B357QBgsKQNgacj4uE+K1hFSPopcDnwX2A5YC9gGOBnCTJJxwLjgIdJfQRHAm8Gzu7LctnCyZPO9QFJw4A/N9h1dkSM7GphKigPqf0oMAh4jjRk9JiIGN+X5aoSSRcAW5JW8XoSuBU4MiL+1acFs4WSA4GZWT/nPgIzs37OgcDMrJ9zIDAz6+ccCMzM+jkHAjOzfs6BwMysn3MgMDPr5xwIbL5J2kzSHyQ9KullSU9JukbS3pIWW0CfeVZhRa4bCttHFrav2+C4YYX92xS2f7ew/ZEFUWazqnMgsPki6RvAzcCKpNXEtgG+APwb+CXwiQX48Y+RFqkZ1WDf88DwBttH5H31zsx5XdGx0pktZDzXkPWYpC1JUx2fFBEH1u2+RNIY0rw3C8pLEXFrk30XAZ+X9L3Ij81LWoY0lfWfSHP4vy4ipgHTJD25AMtrVmmuEdj8+A7wNPDtRjsj4j8R0VdLSv4WWBPYorBtF2AxUiAwszoOBNYjue1/GHB1RMzu4+I0MhW4iXmbh0YA/4+00pmZ1XEgsJ4aCCxDuuBW1TnAbpKWlrQaqf/inD4uk1llORDYAifpzZJmSDq3bvtQSX+V9G9Jf5e0cYc+8o+kRWw+CXyO1Ll8XYfyNlvkOBBYTz0FzCK1w5f1WeAfwE6SVihs/xVwVkSsS+pvOFeSelvAiHgeuJjUPDQCODciXuttvmaLKgcC65GIeBW4AdhWUtmlI78I/AIYT7pDR9LKwKbkFbUi4pqcdqMOFfUc0opm78HNQmYtORDY/PgpsBJwTKOdktaW9N7873cBQ4HLgDOYu/j8YODRiHilcOjUvL0TrgH+APwqIu7tUJ5miyQ/R2A9FhE3SToYGJMv9GeR1s5dAdga+BJpneG7SRf+30bEq5KuBVaU1Oyuv9fNQoUyzgH27FR+Zosy1whsvkTE8aSx+s8CxwLXkwLCu4CvAOMkLUFqp99L0hRgEvBWUnB4GFg9p6lZM283sy5yjcDmW0TcAtzSbL+kXYCHI+KDhW1rA38DDgFuJz3pe7qkbUk1gjvLfLakxVMRYk4uy1mkQNSqvDdQV+vIndOL1W83609cI7AF6YukJ31fFxGTgXuA3YD9gH0k/ZvU3/C52rQQbawJvEJnhoQekfMa0YG8zBZKKvf/zqwaJK1FeqgN4PmIeKCX+a0GvC2/fbkPp8Yw6zMOBGZm/ZybhszM+jkHAjOzfs6BwMysn3MgMDPr5xwIzMz6OQcCM7N+zoHAzKyfcyAwM+vn/j+uH+0b2uiC/gAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWYAAAElCAYAAADX6kjUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAApnUlEQVR4nO3dd9wcVb3H8c+X0JuUGANEmghiu1wIAhYMCohiAbk2pERRVLyoyBULoKHoRVAEQb2AaEBQsAJShFACIs2AgpSASCI95AkCCSaBwO/+cWbJsHl2d/Z55tmdzX7fr9e8kp05e+bMbvLbM2dOUURgZmbVsUy3C2BmZi/mwGxmVjEOzGZmFePAbGZWMQ7MZmYV48BsZlYxfRmYJU2UFLntGUn/kPQtSStWoGwf72YZep2kmbnvdpGkOZJulHSMpA27VKbdJH1xkP0TsnJO6HB5ljivpKmSpuZebyFpkqS1Olk269PAnPMBYDtgV+BS4KvAcV0tEUwEHJiH71LSd/sWYC/gAuDDwO2Sdu9CeXYDlgjMwC2kct7S0dIM7oBsq9kC+AbgwNxhy3a7AF3214i4N/v7FEmvBPaT9PmIeL6bBSuTpBUiYmG3y9FhAxFxQ+71JZJOJAXssyVtGhEPdqlsL4iIp4AbWibsgIi4s9tlsKTfa8z1bgFWAkbXdkhaWdK3Jc3ImjxmSDpU0jK5NCtK+p6k2yXNk/SopN9LelX9CSRtJOlnWZqFku7LAgbZbeRbgTflbsWn5t77BkmXZ+d4WtIVkt5Ql/9kSQ9K2k7SdZLmA8c2u2hJu0v6U5bvU5JukvTe3PH/lnS9pMclPSHpBkm71uWxrKSjsiahBZIGJF0r6c116T4p6dZcmtM7dascEfNINcKVgE+1Si9pl+y650t6UtJ5kjarSzM1u873Zd//QknTJX0wl2YysC+wXu57nZkda9SkcG12/r9m5/+LpG2yz/lbkh7Jvo/JklapK9MRkm7Jyjwg6UpJ2xa43heaMiRNBH6aHfp7rtwbSvqbpN8N8v7atbyj1bmsuX6vMdfbEHgSmAMp2JBqWK8GjgL+BmwLHE66vTs4e98KwGrA0cAj2bEDgBskvSoiHs3y2wi4Cfg36Rbx78DLgZ2zfA4AzgJGsThwPJW99/XA1cCdpOaOAL4CXC1p24i4NXcdLwHOAb4DfA2Y3+iCJR0IfB84jxQ85gFbZp9F/nP5MTCT9G/mPcCFkt4VEZdkab4MHAQcCvwVWB0YT+42WNIx2Wf2feBLwHrZZ/ZaSW+MiOcalbMsEXGrpIeBNzVLJ2kX4CLgSuBDwKrAkcC1kraIiIdyyTchXdMk4DHgM8A5kmZHxFWkfzsvBbYGaj94re5gNiE1q32T9J0cS2qOuYD0HUwENs/SPAYcknvvesD3gAeBVUhNOddIGh8Rt7U4b81FpO/mMFKTX+3u4hHgR8CJktaNiIdz7/kUMAO4rOA5rJGI6LuNxYFtM9I/8jVJ7bqLgP/Opds7S7d93fsPBZ4BxjTIfxSwMjAXOCi3/0zSf7J1m5RtKnDtIPt/DTwBrJHbtzrwOPDb3L7JWZnfV+BzWD0r429bpc29Z5nsM7sMOD+3/8Jm+ZCC+3PA1+v2vykr724lfr8zgbOaHL8euKtFHtNIP5zL5vZtBDwLHF/3fQWwbd33Px34Y9338uAg55mQvX9CXZ7PAhvn9r03S3d53ft/C8xoch2jsu/rbuDEAuedOsj/k03q8lyNVGE4PLdvNOnH5itlfY/9vPV7U8Z00n+Ax4HTgVMi4uTc8V2AfwLXZbeQy2a16MuA5Ui1ZwAkfVDpyf8TpAD/NKmWlb/13Rm4MF5cyyhq++y9T9R2RGqfvIDU/JG3iBQoW3ljVsZTmyWStJWkCyXNyvJ+FtiJF1/bn4F3SfqmpDdLWr4um51IQf3sus/yRtJ/8u2bnH+Z/HuUa0YaIpECTqPzrUK6azg3IhbV9kfEDOBPLPl5PxC59uxINf9fAW8YRlnviYj7cq+nZ39eWpduOjBOknLl31HSVZLmsPj72pQXf19DFhFzSXd2n8hd38dIn+tPG77RCuv3wLw76fbyXcDlwAGS9skdHwNsQPqHnd9uyo6vDSDpPcC5wF3AnsA2Wb6zgXz3u7VZfEvYrrVIt5H1HiXV+PMei2LNAmtnfzYsk6SXA1dk5z+QFMy3Bv7Ai6/tW6TmmfcCfwTmSPqppFp7/Zjsz3tZ8vNcPVeWwfykLv1PClxbMy9n8M+yZk1SkGn0ede3ic8aJN0sYHlSE8ZQ/Kvu9TNN9i9LqhkjaUvgYtKd2X6kysPWwK28+Psarh8C65N+jAXsD/wuIgb7LKxN/d7GfHtkvTIkXQncBhwn6TcR8TSprXkG8MEG75+Z/flh4N6ImFg7IGk5lvwPPEBq/xuKx4Gxg+wfmx3LKzqX60D253rA7Q3S7EJqs/5g5HoxSFr5RSeMeBb4NvBtSWOBdwPHk5p0PkTWbk+6a6gPLuSOD2YSkL+TGWiQriVJWwDrktrMG/kX6TNs9HnXl/Vlg6R7GSlozm6/lMOyB6mW/P7sOwFA0pqkprBSRMTtkv5IaldeQGoTb/lA1Yrp98D8gohYKOlLwPmkh3DHkWqFewDzImJ6k7evTPrPkLc3WS0m5zLg/ZLWiYhGNbaFpDa8elcDu0paLbuVRNJqpAdxU5uUrZnrSDWr/VnyFrmmFoDz/8k3JbUND1rTjvSw88eS3gW8Nts9BXgeWD8iprRTyIiYyeIfwSGTtCrwA9LD11OanO9pSTcDH5A0qXb3IWkD0h3DSXVveXn2APaGLN0o0gOzm2Jxt8uFpN4gI21lUlv+Cz/Okt5Gqt3OaDOv2gPKRuX+IalJY01S08uVbeZvDTgw50TEBZL+DPyPpJOBs0ltZ1dI+i7pdnB54BWkW/bdIuLfpAC+m6Tvkdp2twI+x5I1lG+QBrNcJ+lbpNv69YBdImKvLM2dpCaVDwH/AOZGxN2kJ/vvzsrybdJ/vC+T/iMeOcTrnSvpq8BJkn6TXe9c0sCCBRFxEqmJZxFwZvYZrAMcAdxPrilM0vnZ53MLqcb5n6Ta9inZuf6RlftkpS5nV5NqWi8ntT//OFIPhrKMzrqIiVTj3xL4JKlp4SMF2vkPJ/VMuFDSD0lt8UeQeu18ty7tLOBcSd8g1ZA/Q2rT/UwuzZ3AWpI+Q3qwuCAi/jaM62vkD8AXgMmSfpqV43DgoWZvaqDWr/mzks4g/TjfFhG1ZpXfACeQfqQPXvLtNmTdfvrYjY0GT5uzYztnxw7KXq9IupWeTqpBPE560DWJ7Ik9KUAdDTxMqo1dTQpMM4HJdfm/AvgF6XZ8IXAf8L3c8bGkNsK5WTmm5o5tQwqU80gPF68A3lCX/2QGefrf4vP4L9JDuPmkB3E3Au/OHf9gdv0LgDtITTeTgZm5NAeTBkrMyfK5O/uMlqs7195Zuqez67iL1EwxrsTvd2b22QWp9viv7Ds7BtigjXx2IfXgmE8KyOcDm9WlmQpcS/qhvj37Tu8GPlSXbpXse681k8zM9k9g8N4R19a9f8Ms3Sfq9k/K9ud7jxxIqh3Pz657R5bscdHovFPr8v8GKajXauEb1h0/Jft3sXa3/18vTZuyD9fMhiAbkLFsRLy5VdqlTdar5l5St8C9u12epYmbMsysLZJWJz072JPUFFXftGPD5MBsZu3aEriKNOLw8xHx1+4WZ+njpgwzs4rp9wEmZmaV48BsZlYxfdfGPHqUYsO+u+riZj7TOk2/G2zYoi32PBARapmwiWWlwo2sz8OlEbHLcM5XNX0XojZcFqYNdVB0H/h4u2PD+tC53S5AxS0oIY8gdfwuYm5u/vSlRd8FZjOrPrHkfAb9xIHZzCpHpHl1+5UDs5lVkmvMZmYVIvq7y5gDs5lVkmvMZmYV4od/ZmYV44d/ZmYV5DZmM7MKcVOGmVnF9Htg7ue7BTOrsGUKbq1I+qyk2yQ9lW3XS9p1pMpdBteYzaxySq4xP0hauPjvpFi+L3CepK0i4rbyTlMeB2Yzq5wye2VExPl1uw7NVivfDnBgNjMraiTamCWNAj4ArApcNwKnKIUDs5lVTptDskdLmpZ7fWpEnPqi/KTXAdcDKwLzgN0j4m/DL+nIcGA2s0pqo8Y8EBHjW6S5G9gCWAPYAzhD0oSIuH2IxRtRDsxmVjlld5eLiGeAe7OX0yRtDRwE7FfiaUrjwGxmldOBIdnLACuM7CmGzoHZzCqprBqzpGOAi4AHgNWAPYEJQGX7Mjswm1nllDwf81jgrOzPJ0ld5N4ZEZeWd4pyOTCbWSWVVWOOiIklZdUxDsxmVjlewcTMrGL6fRIjB2YzqxxPlG9mVkGuMZuZVYibMszMKsgP/8zMKsQ1ZjOzCnKN2cysQgQs3+1CdFFHf5Rarb2lZJKkhyXNlzRV0mvq8lhB0kmSBiQ9LekCSeM6eR1mNrJqA0zKWPOvF3X6umprb20JjAeuJK299frs+CHAwcCBwNbAY8AUSavl8jiBNJ/qR4C3AKsDF2YrE5jZUmJUwW1p1NHAHBHnR8QlEXFvRNwTEYcCc4HtJAn4AnBMRPwmm8B6XxbPBoWkl5DmT/1SREyJiFuAvYHXAzt28lrMbOTUHv45MHeYpFGSPszitbc2Is3+dFktTUTMB64B3pjt2oo0ICif5gHgrlwaM1sK9HNTRscf/jVae0tSLbDOqnvLLGC97O9jgeeAgUHSjG1yzv2B/QHWX1p/Ys2WIh6S3XmDrr2VOx516TXIvnpN02QLM54KMH4FtcrLzLqs3/sxd/xOICKeydqYp0XEV4G/ktbeejRLUl/zHcPiWvSjpO9rdJM0Ztbj3MbcfbW1t2aQAu9OtQOSViT1vLgu23Uz8GxdmnHA5rk0ZrYUcBtzhzRbeysiQtIJwKGSpgP3AIeR2qF/DhART0o6HThO0mPAHOB40lIxl3fyWsxs5PR7U0an25hbrb11LLAS8ANgTeBGYOeImJvL4yBgEXBulvYKYJ+IeK4jV2BmHeHA3CGt1t6KiAAmZVujNAtIA1AOLLFoZlYh7pVhZlYx/b7mXz9fu5lVWFm9MiR9VdKfs/l5Zkv6vaTXjlS5y+DAbGaVU3J3uQnAD0mjg99GekZ1uaS1yi11edyUYWaVVFatMSLekX8taW9S54M3Ab8v6TSlcmA2s8oZ4e5yq5Hi/r9G7hTD48BsZpUzwr0yTiSNOL5+5E4xPA7MZlY5bdaYR0ualnt9ajY/zpL5SscDbwbeXOWxDw7MZlZJbbQxD0TE+FaJJH0P+DCwQ0TcN/SSjTwHZjOrnLLbmCWdSArKEyJieolZ159nBWA7YFtgXdLo5AHSrJrXFP1BcGA2s0oqKzBL+gFppaPdgH9Jqs1gOS8i5pV0jk1IKzB9FHgJ8Dyp58d8YC3S/PMh6WZS170zI+L5Rvm5H7OZVU7t4V+RrYADSD0xrgAeyW3/U0pZpZOBO0jrlB6Z/bliRKwdEeMiYmVgHeD9pIeOxwN3SNqmUZ6uMZtZ5ZTZlBERKimrRsYB20TEX5uUYRZwPnC+pAOBTwH/QZqobQkOzGZWSb1yOx8Ru7WZfiHw/WZpHJjNrHI8H7OZWQX1So25nqTlgS2p65URETOL5uHAbGaV02s1ZkmjgN2BTwBvBZYnXUZNSHoI+AVwWkTc2yy/Xv1RMrOlWMm9MkaUpD2A6aTVmRaSlsTbifRwb1NSn+Y9gV+TuuzdJek0SS9rlKdrzGZWST1UYz6JtCze5Ih4okGam0jL4X0x6yb3ZeCTwNGDJXZgNrPK6bGmjI2zJe8KiYgbgfdnowQH5cBsZpXUK+2s7QTluvctbHTMgdnMKqfHasylc2A2s0rqlRqzpPvbSB4RsUGrRA7MZlY5y5D6m/WIccBTwKXAv8vI0IHZzCqpV2rMwJmkPsy7AL8lzRx31XAy7KFrN7N+UfIq2SMqIiYCY4HPkkb7TZH0T0lHS9p0KHk6MJtZJfVKYAaIiPkRcVa2Ivf6wA+A95IGk9yQDUIpzIHZzCpHpOBUZKuaiHg4Io4FxgP/m/350XbycBuzmVXOCK+SPaIkbQvsA3yQNInRr4CT28nDgdnMKqkqzRRFSNoY2CvbXgFcSxp2/cuImNtufv0XmDcHLut2IarrJxO6XYLq+9xd3S5Bte1ZQh69NMBE0rWkBVjvJfXQOKudKT4H03+B2cx6QhXbjxt4I6kf8yxgZ2BnqeFqVhERb22VoQOzmVVOL9WYgWuAKDNDB2Yzq5xeCswRMaHsPB2YzaxyerlXRhl6qBnHzPpJWf2YJW0v6QJJD0kKSRPLLKekZdrZiuTpGrOZVU7JTRmrAreTekycWV62L1hE8TbmoEDcdWA2s0oqKzBHxMXAxQCSJpeUbd6R+OGfmfWFog2tz49oKVqKiEll5+nAbGbV005bxvOMljQtt+fUiDh1BErVMQ7MZlY97XTLeJaBiBg/ksVpJlvB5D0RcWtu38eB8yLi8aHk6V4ZZlZNvTPv5zjghRWvJY0CTgM2HGqGrjGbWfX00giTwTUck12EA7OZVVNJ9/OSVgU2yeW6vqQtgMcjop2FVDvGTRlmVj3lri01HvhLtq0EHJH9/cgSSzxYd7khd6FzjdnMqqfEMdkRMZVhNi0UcKqk+nmXT5c0b8nieHY5M+tVvdPGPNjsclcPJ0MHZjOrntqifz3As8uZWf/onRpz6XrkN8nM+kq5D/9GlKR1hvi+sY2ONawxSxrqLEyHR8Q/h/heM7Okd6qN90r6MfCjiJjeLKGklYDdgUOAXwNHD5auWVPGXsCjwMI2Crg+cALgwGxmQ9dbM+VvDxwL3CHpNuCPwK3AbFL8XBPYGHgD8DbStEvHAsc3yrBVG/NuEXFTkZJJWhZ4pkhaM7OmemjkX0TcDLxd0pbAJ4F3A/9dl2wBcCOppnx2RNR3rXuRZoH5CuDJNsr3XPaep9p4j5nZknooMNdExC3AZwAkjQHWBVYE5gAzI+LZonk1DMwRsVObhQqgrfeYmTXUO23MS4iIx4DHhvr+hpcuaauimUg6eagFMDNbQg/1ysiT9JykNzQ4tpWk54rk0+w36VJJrytQkNPJqu9mZqWoPfwrslVLs6Hfoyg4f0azwPwAcLmkzQY9e/Iz4GPA14uczMyssB6qMWcrYNdKM9jq2KsA7wQGiuTXLDDvSOrucYWkV9QVYhRwDvBR4JCI+GbbV2Jm1khtSHaRrcskfQN4ltQrLYA/Za/z21OkCuyviuTZ7OHfHElvI03QcYWk7SPifknLZZm/F/h8RJw09EsyM2ugIrXhAqZmf4oUfE8HHqxLsxC4E7iwSIZN+zFHxGOS3k6aKelKSe8Evg/sDHwqIk4rXHQzs6J6qLtcRFxNNpucpABOi4iHh5Nny0mMIuIhSTuQas53kD6yiRHxs+Gc2MysqQo0U7QrIo6A1OYMvBpYG5gWEU+3k0+z7nIfr22k/sk/JwXyy4Dl8sezNC1JmiQp6rZHc8eVpXlY0nxJUyW9pi6PFSSdJGlA0tOSLpA0rp2LNrOKE7B8wa1iJH2WNJ3FbcCVwGbZ/vMkfa5IHs1qzD9usP+d2ZYXwE+KnBC4G5iQe53v13cIcDAwMUv3dWCKpM1yQxhPAN4HfIQ0ouZ44EJJW0VEoT6CZlZxPTQfc56kTwInkuLhZcAvc4f/COxBag5uqllg3mg4BWxiUUQ8Wr9TkoAvAMdExG+yffuSRs/sCZwi6SXAfsDHImJKlmZv0qRJOwKXjlCZzayTeqiNuc4Xge9GxJdz3edqpgNfKpJJs14ZIzVD3MaSHiJ1LbkR+FpE3Ef6IRhL+pWplWG+pGuANwKnAFuRupTn0zwg6a4sjQOz2dKiB2vMpDjWKA49DaxRJJNOr2ByI6mZYjowBjgMuC5rR65NGj2r7j2zgPWyv48lNX3Ud9KelXu/mfW63q0xDwAbNji2GfBQkUyaPfy7UtKripYmG91ypaRXNkoTEZdExC8j4raIuJw0Pd4ywL75ZPVZD7JvidM3SyNpf0nTJE2bPadFTmZWDSWP/JN0gKQZkhZIulnSW0ovM/we+LqkjXP7QtJo4CDgvCKZNLtZmACs1kaB1O57ImIeqQveK0lPMWHJmu8YFteiHyV9FaObpBnsPKdGxPiIGP/StYuWzsy6puS5MiR9iPRQ7lvAfwLXAZdIWr/kkh9GGkxyO3A5qcL4feAu0t3+kUUyadWKc56k+4pswN8pOEFHjaQVgVcBjwAzSIF3p7rjbyF9iAA3k4Y35tOMAzbPpTGzXlf+7HJfBCZHxGkRcVdEHEiKO6VOwBYRc4DxwP+Sfjb+QWoyPhnYLiIKzXHfrI35jCGWreEkHZK+Q6rq30+q5R4OrAKcEREh6QTgUEnTgXtIvz7zSH2oiYgns9nsjpP0GIu7y91G+nUys6VFSQ//JC1P6jjwnbpDl5E6DZQq69p7VLYNSbNeGR8baqZNjAN+QWqKmA3cAGyb6wFyLLAS8APSOlk3AjvXLcNyELAIODdLewWwj/swmy1F2nv4N1rStNzrUyPi1PzxLLfBOhbsONQijqSO9sqIiA+3OB7ApGxrlGYBcGC2mdnSqniNeSAixhdIN5SOBS1JurKN5BERb2+VqNPd5czMWqsNyS7HAOnBW7OOBcOxDC8O8Jtl55qZ5f8yUhe6R0gjmgtlaGZWLSXOxxwRz5A6DtSvSboTJXQaiIgJEbFDROxA6vnxLKmJduOI2C4iNga2y/afWCRPB2Yzq6Zye2UcD0yU9AlJm0s6kbSK9f+VXOqjgMMj4qb8zoi4kdREe3SRTNyUYWbVU/LIv4g4V9LapJ5e65D6Gb9rBKaeeCWpY8NgHgM2KZJJs5F/90n6jyEUzMxs+EpeWioifhgRG0bEChGxVURcU3qZ03iMTzU49ilSu3NLzWrMGwIrtFUkM7My9O5cGUcAZ0u6Hfg1ix/+/RdpMN1Hi2Tipgwzq57akOweExHnSBogBeivkq7iWeDPwDsi4ooi+bQKzMPu42dm1rberTGTTdB2eba81GhSP+vn28mjVWA+Iov+BcoS+7ZOZmZWUI/3GcuC8WNDeW+rwLwFaaaklmUYysnNzAbVQzVmSQcBP8pGJRd9z5bAmIj4w2DHWwXm3er745mZdUSPBGZgH+DLkiYDv4iIWwdLJGlN0hz0ewNvJi0aMig//DOz6umtxVi3JAXbg4FDJD0F/I3Un3khaUK2jYFXZK/PBV4dETMbZejAbGbV00O9MrLJ184EzpS0DbALsA0pGK9Imp74j8A3gfMj4olWeTowm1k19U5Txguyodc3DjefZvMx986NhJktXXro4d9IcI3ZzKqpx6qGksYA7wReDayV7X4cuBO4JCIKd51zYDaz6umhGnM2kOSbpNWVlgf+DfyLdBVrACsDz2RL5301a5NuyoHZzKqnhx7+kYZef4EUnH9W39tC0gakXhuHAnOzdE05MJtZ9fRQjRn4JKkmfMJgB7OpRY+W9DTweRyYzaxn9U4b88uAvxRId0uWtqXeuXQz6x+1GnN5K5iMpLuApgtNZz4CTC+SoWvMZlZN1Qi6RRwJ/FrSZsBZwB2kh39B6p3xGtI8zG8lzcvckgOzmVVPDw3JjojzJO0K/C/wY5ac1E3ArcB7IuKSInk6MJtZ9fRWrwwi4lLgUknjSDXktUhX8ThwR0Q80E5+DsxmVk2905Txgoh4EHhwuPn0yM2CmfWV3nr4V4ik0ZK2L5LWgdnMqqnkVbKLkLS/pKskPSEpJG1YYvZvBa4qktCB2cyqp3s15pWBy4BJpefcBrcxm1k1daGZojZ6T9L4ou+R9JOCSTcommf/BeZlt4Ix07pdiuq6Rt0uQeVt8Ytul6DaVj6uhEx6q1fGROBJYF6LdCsVzbD/ArOZVV8P9WMG7gcui4j9myWS9F+kZaVa6p1LN7P+UryNebSkabntRQFS0tHZg7xm24RhlHQaUKTpo+V0nzWuMZtZ9bQ3u9xARDQLjCeQhko3c3/hsy3pAmCvAunuJA3fbsmB2cyqqaT7+YgYAAbKyW3Q/M8kLcbaKt1dwBFF8nRgNrPqWYa0FkiHSRoLjAU2zXa9WtIawP0R8XinyuE2ZjOrpi4MMAE+TZpb+ezs9UXZ6/eWfqYmHJjNrHq6NMAkIiZFhAbZJhcqtvS8pOcabIskzZE0RdLOzfJxU4aZVVNvVhuPAvYFViTVtmeRmkbeBSwAzgMmAJdIel9EXDhYJg7MZlY9vbXmX94CYAbwzohYUNspaSXgEmA2sCUpaH8NGDQw9+Zvkpkt/XpzdrlPA9/LB2WAiJgPfA/4dEQ8T5pQ//WNMnGN2cyqp7eGZOeNoXHJlwfWzv4+QLrKQbnGbGbV07vzMU8DJklaJ79T0rrAN7LjkCY0erhRJq4xm1k19Wa18fPAFcAMSdcDj5Fq0dsB/2bxCMFNgJ83ysSB2cyqp0cf/kXELZI2AQ4GtgFeBzwCfBc4PiLmZOm+3iwfB2Yzq6berDGTBd+vDScPB2Yzq54uDckui6S1SM0Xa5Ee9N3YzpBuB2Yzq6YerTFLOprUlLFCbvdCSd+JiMOL5OHAbGbVpKKNzM+NaDHaIekLpGaM00lTjT5KGvm3F/A1SbMj4vut8nFgNrMKEsXDU3UCM2mAyYkRcVBu393A1ZLmAQcALQNzj94smNnSrRaYi2yVsiFpuPVgLsqOt+TAbGYV1LOBeQ7w2gbHXpMdb6lyV2VmlgLzigXTPjmSBWnX74CjJM0BzomIZyUtC3yAtKzUGUUycWA2swpqp425Ur4K/AcpAP9E0uOkLnOjgGsp2L+5J6/czJZ2vRmYI2KupO2BXYHtgTWBx4GrgUsiotBK2b135WbWJ3pwTDaQBd8LaTDXchEOzGZWQb1TY5b0PFCoJkyK2y0vrDeu3Mz6TO8EZtJDvaKBuZCeuXIz6yfLULxXRndFxKSy83RgNrOK6t/w5AEmZlZBnR9gImktSSdJmi5pvqQHJP1I0tqt310uB2Yzq6CujPxbF1gPOIQ0wf1epC5vvyjzJEX0772CmVVY5x/+RcTtwPtzu+6V9CXgQkmrR8RTnSpLR2vMkmZKikG2i7LjkjRJ0sPZrcRUSa+py2OF7HZjQNLTki6QNK6T12FmI602JLvINqJWBxaS1uvrmE43ZWwNrJPbtiR1M/lldvwQ0gTTB2ZpHwOmSFotl8cJwB7AR4C3kD64C6XCk7eaWeW11ZQxWtK03LZ/KSWQ1gCOAk6LiEVl5FlUR+8VImJ2/rWk/YCngF9JEvAF4JiI+E12fF9ScN4TOEXSS4D9gI9FxJQszd7AP4EdgUs7dClmNqLaasoYiIjxDXNKK4oc2iKPHSJiau49qwC/Bx4iVRg7qmttzFkg3g84KyL+LWlj0kz/l9XSRMR8SdcAbwROAbYClqtL84Cku7I0DsxmS4VS25hPIK0m0sz9L5xZWhW4OHv57ohYUFZBiurmw7+dgI2AH2evx2Z/zqpLN4v0pLSW5jnS4ob1acbSQHZrsz/A+uuvP/QSm1mHlBeYI2KAJWPG4GdNzaaXZAXYJSLmlVKINnWzu9wngT9HxF/r9tcPbdQg++o1TRMRp0bE+IgY/9KXvrTtgppZp3WlH/NqpLvxNYGJwCqSxmZbR9fs7kqNWdIY4H3AZ3O7H83+HAs8kNs/hsW16EdJU06NBmbXpblmRAprZl0gXrzIdEdsBWyb/f2eumM7AFM7VZBu1ZgnkrqgnJPbN4MUeHeq7ZC0IqnnxXXZrpuBZ+vSjAM2z6Uxs57X+RpzREyNCDXYppZ2ogI6XmPOHvp9grTsytza/ogISScAh0qaTvrFOgyYB/w8S/OkpNOB4yQ9Rlo/63jgNuDyjl6ImY2gnppdrnTduPIJwCtJwx3rHQusBPyA1M5zI7BzPoADBwGLgHOztFcA+0REpdYwN7PhcmDumIi4ivRzONixACZlW6P3LyANQDlwBIpnZpXgGrOZWcU4MJuZVUzvTJQ/EhyYzayi+jc89e+Vm1mFuSnDzKxiHJjNzCrGgdnMrGJqE+X3JwdmM6sg15jNzCpGpPnK+pMDs5lVkGvMZmYV48BsZlZB/Rue+vfKzazCPCTbzKxi3JRhZlYxDsxmZhXUv+Gpf6/czCrMNWYzs4rp78DcrVWyzcyaqPXKKLKVR9Jpkv4hab6k2ZLOl7R5qScpwIHZzCpq2YJbqaYBE4HNgXeQqu6XS1qu7BM107/3CmZWYd1pyoiIU3IvZ0o6DLgV2Bi4u1PlcGA2swrqfhuzpFWAjwH3AzM7eW43ZZhZBdVmlyuyMVrStNy2/7DOLB0gaR4wD3gn8PaIWDicPNvlGrOZVVBbE+UPRMT4hjlJRwOHtshjh4iYmv39bGAKsA7wP8CvJL0pIv5dtEDD5cBsZhVUalPGCcBZLdLcX/tLRDwJPAn8XdINwL+APYCflVWgVhyYzayCygvMETEADAyjIAJWKKUwBTkwm1kFdf7hn6RNSDXjy4HZwDjgK8BC4MJOlsWB2cwqqCu9MhYCE4CDgTWAWcA1wHYR8WgnC+LAbGYV1dnwFBEPkHphdJ0iottl6ChJs4F/drscOaMZevtXv/Bn1FzVPp8NIuKlw8lA0h9I11XEQETsMpzzVU3fBeaqkTStWVcf82fUij+fpY8HmJiZVYwDs5lZxTgwd9+p3S5AD/Bn1Jw/n6WM25jNzCrGNWYzs4pxYDYzqxgHZjOzinFg7gJJ20u6QNJDkkLSxG6XqUokfVbSbZKeyrbrJe3a7XJViaRJ2b+d/NbRYcM2chyYu2NV4Hbg88D8Lpelih4EvgxsCYwHrgTOk/T6rpaqeu4mzRlc217X3eJYWTxXRhdExMXAxQCSJne3NNUTEefX7TpU0meA7YDbulCkqlrU6cl1rDNcY7ZKkzRK0odJdxnXdbs8FbNx1hw2Q9I5kjbudoGsHK4xWyVJeh1wPWl9oXnA7hHxt+6WqlJuBCYC04ExwGHAdZJeExFzulkwGz4HZququ4EtSPPi7gGcIWlCRNzezUJVRURckn+dLYF0H7AvcHxXCmWlcWC2SoqIZ4B7s5fTJG0NHATs171SVVdEzJN0B/DKbpfFhs9tzNYrlqHD6671EkkrAq8CHul2WWz4XGPuAkmrAptkL5cB1pe0BfB4RNzf8I19QtIxwEXAA8BqwJ6kJX/clzkj6TvA70mrO48BDgdWAc7oZrmsHJ7EqAskTQCuGuTQGRExsaOFqaCsC+EOwFjSMvK3AcdFxKXdLFeVSDoH2J60ysds4Abg8Ii4s6sFs1I4MJuZVYzbmM3MKsaB2cysYhyYzcwqxoHZzKxiHJjNzCrGgdnMrGIcmM3MKsaB2YZM0naSfinpYUnPSJojaYqkfSWNGqFzTs6t2DE1t39ibv+mg7xvQu74jrn9h+X2PzgSZTZrlwOzDYmkLwB/AtYirTayI/Bx4B7gR8C7R/D0j5ImzT9gkGNzgb0H2b9PdqzeT7O8Li6tdGbD5LkyrG2StidNLXlyRHyu7vD5ko4nzdswUhZGxA0Njv0W2EvS1yMb1ippJdLUob8hzWH8goh4CHhI0uwRLK9ZW1xjtqH4CvA4cMhgByPiHxHRrSWgfgZsALw5t293YBQpMJtVngOztSVrO54AXBYRC7pcnMH8E7iGFzdn7AP8jrQSilnlOTBbu0YDK5ECYFWdCXxA0oqS1iG1f5/Z5TKZFebAbCNO0iqSnpJ0dt3+V0i6VtI9kv4iaXxJp/wVaVL99wAfJT0svKKkvM1GnAOztWsOMJ/UjlvUh4BbgV0lrZnb/3/A5IjYlNRefbYkDbeAETEXOI/UnLEPcHZEPD/cfM06xYHZ2hIRi4CpwE6Sii71tB/wXeBSUg0WSS8FtiVbcSMipmRptyqpqGeSVjx5HW7GsB7jwGxDcQywNnDcYAclbSTp9dnfNwdeAVwInM7ixVTXBx6OiGdzb/1ntr8MU4BfAv8XEXeUlKdZR7gfs7UtIq6R9EXg+CzwTiatPbcm8HbgE6R1+m4jBeKfRcQiSZcDa0lqVCsedjNGrozPAR8pKz+zTnKN2YYkIk4g9RV+AvgOcCUpQG8OfAr4vaTlSO28e0qaCdwHvIQUrO8H1s3S1GyQ7Tfra64x25BFxHXAdY2OS9oduD8its7t2wi4BTgYuIk0Eu80STuRasw3Fzm3pGVTEeK5rCyTST8Mzco7lbpaefawcVT9frNuco3ZRtJ+pJF4L4iIGcDtwAeATwMfk3QPqb36o1FsdeANgGcppwvcoVle+5SQl1kpvEq29RRJG5IGuQDMjYi7h5nfOsB62ctnujiU3OwFDsxmZhXjpgwzs4pxYDYzqxgHZjOzinFgNjOrGAdmM7OKcWA2M6sYB2Yzs4pxYDYzq5j/B2AIT1n236aEAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWYAAAElCAYAAADX6kjUAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAApz0lEQVR4nO3deZgcVbnH8e8vIIRdwmJYZFdAr8qFIOCCoIIIqOCKsgW5ooIIXNxYjWyicBEVXBA0hEVRUVAEIYDAZTdyDWtAhAgBAlkEEiAhCe/941STSqeX6pme6arM7/M89UxP1alTp3pm3jn91qlTigjMzKw8hvW6AWZmtigHZjOzknFgNjMrGQdmM7OScWA2MysZB2Yzs5IZsoFZ0mhJkVtelvRPSadIGl6Ctn22l22oOkmT636++eXwQW7L4ZI+2mD9GEmDPl610XGz92VM7vs9JP33YLfNkqV73YAS+AQwBVgJ2BM4Knt9aA/bNJr0s/l5D9uwJLgaGNNg/eTBbQaHAzcDv6tbfy7w50FuSzPbkf4OavYA3g+c0ZPWDHEOzPD3iHg4ez1e0huAAyUdFhGv9LJh3SRp2YiY2+t2DLLpEXF7rxvRTERMYdFg2DNlfp+GoiGbymjhLmA5YPXaCknLS/qOpEezlMejko6RNCxXZrik70m6V9JsSVMl/VHSZvUHkLShpAuyMnMlPSLp+9m2G4D3AO/MffS+Ibfv2yVdmx3jBUnXSXp7Xf1jJU2RtJ2kWyW9BHy31UlL2lPSLVm9z0u6U9KHc9u/JOk2STMlPSvpdkm71dWxtKQTs5TQHEnTJd0s6V115T4naWKuzHmSRrRq32BScoSkB7Of91OSzpK0cl25kHRy9rswRdJLkm6StEWuzGRgfWDv3M9zbLatWUrhJElHSvpX9jP+k6Q1s+XXkp6T9Likr9ftu4akn0p6SNKLWZmLJa1T4JxfTWVk7dsfWCfX5smSRmbvx2EN9h+THXPVQm+yteQe8+I2AJ4DZkAKNqSPxG8CTgTuAbYFjgNGAEdm+y1LSoGcBDyVbTsYuF3SZhExNatvQ+BO4EXgm8A/gNcDO2f1HAxcCCwFfD5b93y271uBG4H7SemOAL4B3Chp24iYmDuPVYBfAacDRwMvNTthSYcCPwAuI/1Bzga2zN6L/PtyLikNsDTwIeAKSbtGxFVZma8DRwDHAH8HVgZGZe9F7VinZu/ZD4CvAutk79l/SHpHRCxo1s4+UPbzW0REzG+z38mklNbZwB9Z+LN/m6T31H2S2g94DPgS6XfgBOA6SW+IiJmk9NiVwEQWplWmtTn+vsC9pN+F1wFnAuNIv19XAeeQUnCnSronIq7M9hsBzMnaPg1Ym/Re35L9Ds5pc9yaE4E1gK2B2j/nuRExVdJlpN/L79cKS1oKOBD4dUT8u+AxrJWIGJILCwPbpqRAsyrwWWA+8KVcuX2zctvX7X8M8DKwZpP6lwKWB2YBR+TWjyMFvrVbtO0G4OYG638LPAu8NrduZWAm8LvcurFZmz9S4H1YOWvj79qVze0zLHvPrgEuz62/olU9pOC+ADi+bv07s/bu0cWf7+SszkbLqBb71YLb2Lr1+2T7fji3LoDpwAp15zgPOLGuLRc2ONaY9Ce4yLoAHgKWzq07I1t/bG7d0sAzwC9anMtSpH/6AexZ4Lhj6n6HpjSoc4es7Ltz6z6crdu2Wz+/ob44lQGTSH9IM4HzgJ9GxFm57bsA/wJuzT6qL531wq4BXkPqPQMg6ZOS7pD0LCnAvwCsSAr+NTsDV0TEk31o6/bZvs/WVkTE88AfSOmPvPmkQNnOO7I2ntOqkKStJF0h6ems7nnATix6bn8Fds0+3r9L0jJ11exECuoX1b2Xd5A+FWzf4vjD8vsol0Zq4SpSr69+ub/FPtuSer4X1q3/Fem869/nKyPihdo3ETEZuJ10Ma2vxseivfpJ2derc8eZDzxMCryvkvTFLE00O2vvY9mm/M+pzyLiBtL79/nc6s8Dd4fz1F3jwJw+am4N7ApcCxwsab/c9jVJOcJ5dcud2fbVACR9CLgEeAD4DLBNVu80ID/8bjX6fsFnBClNUm8qqcef90wUSwusln1t2iZJrweuy45/KCmYb00aUZA/t1NI6ZkPA/8LzJD0C0m1fP2a2deHWfz9XDnXlkZ+Xle+yIiVmRExocHyYot9ammXRd7nLBDOyG2vebpBHU+TUjR9VZ8OeLnF+lff/ywl9SPS7/FHgbezsOPQzSGgPwY+Lmk1SeuTOi8/6WL9Q55zzHBvZKMyJF0P3A2cJunSrCc0A3gU+GST/SdnX/cCHo6I0bUNkl7D4n/I0+n7H+1MYGSD9SOzbXlFx8dOz76uQ8prNrILKWf9yUgjCYB0UXSRA0bMA74DfEfSSGB30sfw5YFPkeXtSZ8aGuUiZzRYVzMGyH+Smd6kXH/V3seRwH21lVnPfjUWb+PrGtTxOuCJAWlda3sB10VE7bpH7ZpGt40Dvk1KB65Kun5x0QAcZ8hyYM6JiLmSvgpcTrrwchqpV/gxYHZETGqx+/Kkj455+5LyfHnXAB+VtFZENOr9AswlXeipdyOwm6SVImIWgKSVSBfibmjRtlZuJeW8DyL3UblOLQDPq62Q9EZSbrhhTzvSxc5zJe0K/Ee2ejzwCrBeRIzvpJFZimByJ/v00e2k938v0qeEmk+R/l5urCu/q6QVaukMSRuQeqmn5srMJY30GWjLk10ozjmgj3U1bXNEPC/pIlIKY0Xg4iylZl3iwFwnIv4g6a/AVySdReoJHEC60v4/pKvrywAbkz6y75F9NP4zsIek75Fyu1sBXyZdrMv7JrAbKWd9Culj/TrALhGxT1bmflJK5VPAP4FZEfEg6Wr57llbvkPqFX+d9Ad5Qh/Pd5ako4AfSro0O99ZwBbAnIj4Iemj8XxgXPYerAV8i5S/zA8ZvDx7f+4i9Yj/k9Tb/ml2rH9m7T5L0qakIDeHlCfdCTg3Iv7Sl/NoYnVJ2zZYPzUL9IuJiJmSzgCOkvQCaUTF5qSRIzcDf6rb5SXgGkmnkXLT3yIFx+/lytwPvFvS7qS00/Rmx++nPwNfl3Q0KdX2XuDjfazrfmCEpC8CE0i/C/fktv+IhXlmpzG6rddXH3u1sHBUxiYNtu2cbTsi+3446aP0JFJPYibpQtcYsqvnpAB1EvAkaSjcjaTANJnFr/BvDPyS9HF8LvAI8L3c9pGkgDAra8cNuW3bkALlbNLFxeuAt9fVP5YGV9TbvB8fJ12Ee4kUWO4Ads9t/2R2/nNIH/H3yo4zOVfmSFKPc0ZWz4PZe/SaumPtm5V7ITuPB0hpinW7+POdTPNRGWe12VekYX8PkvK4T5GGzq1cVy5IQ+uOJn1ymEPKrW9RV26zbP2L2T5js/VjaDw64qQiv6vUjd4h9XB/TLquMYvUQdiQxUdcNDtuvswK2e/ov7Ntkxu8Tw8Cf+313/KSuCh7g82sQ9nNISdHxLG9bstgy1JZk4DPRcR5vW7PksapDDMrTNK6wCaklM1TwMW9bdGSycPlzKwT/wVcTxp58pmIaHpHqfWdUxlmZiXjHrOZWck4MJuZlcyQu/i3+jDFBv531NSj3ZzbbQn1bK8bUHKvABGh/tSxtFQ4yfoKXB0Ru/TneGUz5ALzBsNgwiq9bkV57VN/Y7ct5ve9bkDJFZ1btJUgDaQuYlZu7vQlxZALzGZWfmLxuQyGEgdmMysdkebUHaocmM2slNxjNjMrETG0h4w5MJtZKbnHbGZWIr74Z2ZWMr74Z2ZWQs4xm5mViFMZZmYl48BsZlZCTmWYmZWIe8xmZiXjURlmZiVUtR6zpG2BXYBtgbVJTy2fTnqa+I3AZRHx7yJ1DeU0jpmVVO2W7CJLr0naX9I9wK3A4cDywD+AO4B/A9sA5wJPSBoracN2dbrHbGalVIUes6SJwJrAOGA/4O/R4EGqklYBdgf2Bu6TdEBEXNKsXgdmMyudCl38+wXwk4ho+XyAiHgOuAi4SNLbgJGtyjswm1npVOXiX0Sc2Yd9JgITW5VxYDazUqpIj3lAODCbWelUcT5mScsC29F4VMZNEfFI0bocmM2slKrSY5a0CWk0xt7AKqQHhT8HvASMAIYDIelvwI+AcRHxSqs6q/ZPycyGgKoMl5N0FnAfsDVwQvZ1eESsFhHrRsTywFrAR4G/A2eQRmVs06pe95jNrHQqNCpjXWCbiPh7swIR8TRwOXC5pEOBzwNvI41zbsiB2cxKp0KjMvbosPxc4Aftyjkwm1kpVaTHPCAcmM2sdKqSypC0USfli47McGA2s1Lq9YW9gh4GFrsFu4VC/28cmM2sdKrSY87MAi4F/pfOgnRTDsxmVkoV6TF/FtgX2B/YAbiQNE75n/2ptCLnbmZDiYBlCi69FBFjI+J9wAbAecAngIck3SLpIEmv7Uu9gxqYJR0i6W5Jz2fLbZJ2y22XpDGSnpT0kqQbJL25ro5lJf1Q0nRJL0j6g6R1B/M8zGxgVeUGk5qImBIRp0TEm0i3Zd8FnAw8Jen4Tusb7POaAnwd2BIYBVwPXCbprdn2rwFHAoeS7qB5BhgvaaVcHWcCHwM+DbwbWBm4QlKFUlJm1s5SBZeyiYg7gbHAb0nDsd/WaR2DGpgj4vKIuCoiHo6IhyLiGFLifDtJIt1vfmpEXBoR95LyNisBn4FXJ5s+EPhqRIyPiLtI+Z23Au8fzHMxs4FTu/hXpcAs6fWSviHpPuBO4C3AIaSY1ZGefRKQtJSkvYAVSY9k2ZA0efQ1tTIR8RJwE/CObNVWpP9A+TKPAw/kypjZEqAKqQxJK0k6QNL1wGTgc6Se8hsj4l0R8dOIeLbTegd9VIaktwC3kWZcmg3sGRH3SKoF1qfrdnkaWCd7PRJYQJpKr75M0ycCSDoIOAhgvV7/JM2srarckg1MBeYBvyNNYvTqkDlJi0WbdrPK1fRiuNyDwBbAa0m54vMl7ZDbXj8OUA3W1WtZJiLOAc4BGLW0ujLO0MwGToXGMS+XLftnSytBwZg76IE5Il4m3S0DMEHS1sARpCuYkHq+j+d2WZOFveippJ/X6sC0ujI3DVSbzWxwVSgwf2sgKi3DDSbDgGWBR0mBdyfgrwCShpNGXnw1K/s30seGnYCLszLrApuT8tRmtoSoQtYxIqofmCWdCvyJ1COujbbYAdgtIkLSmcAxkiYBDwHHkvLQF0N60qyk84DTJD0DzCBNPH03cO1gnouZDZwK9ZgHxGD3mEeSblkcSXr0yt3AByPi6mz7d0n5mrOBVUkTSe8cEbNydRwBzAcuycpeB+wXEQsG5QzMbFBUITBLem8n5SPi+iLlBjUwR8ToNtsDGJMtzcrMId2AcmgXm2ZmJVKhURnXki7qqUWZ2vbAs8uZWVVV6CnZOw5EpQ7MZlZKVUhlRMSNA1FvRf4pmdlQUpVbsiVtJKnrWRcHZjMrpSrckg38A/jP2jfZDJnjJK3fn0pLcF5mZouqSo+ZxS/6DQP2AVbrT6UOzGZWOrVRGUWWrh1TGpHN9T4pmw/+cUk/ltSvINsXDsxmVjo96jGvTZow7WukKTv3AbYHftndw7TnURlmVkqD3WvM5oD/aG7Vw5K+SnoQx8oR8XyTXYflZpJbqsG6/DFKO7ucmVlLJbole2VgLvBiizK3NFh3R4N15Z1dzsysiA4C8+qSJuS+Pyeb6rdfsgepngj8LCLmNylW/UmMzMyK6PCW7OkRMappXdJJwDFt6tgxIm7I7bMC8EfgCVLOuaElYnY5M7MiupzKOJM0eVorj716bGlF4Mrs292z+XkGlQOzmZVSty7+RcR0Fn8cXUOSVgKuIv1v2CUiZrcpv2dE/L6T9khaC1g/Im5vVsbD5cysdHoxXC4LyteQphweDawgaWS2LNNkt7MlTZT0BUkj2tT/bknnkJ7g9NZWZd1jNrNS6kGvcStg2+z1Q3XbdgRuaLDPJsBXSA9i/aGkB4CJpEffzSUF+Y2AUcAqpEfg7RQRLZ+45MBsZqXTi+Fy2cW/VvMqN9rnReAESd8mjYH+ACm4rw0MJz1laRLwfeCSiJhUpF4HZjMrnQpNlA9ARMwjPVXpkm7U58BsZqVUkhtMesKB2cxKp0R3/hUmab0Wm18Bnqt7fmlTDsxmVkoVHDI2mXTbdVOSHgG+GxE/a1XOgdnMSqeKPWbgC8DRwLPApcDTwEjgY6QRGT8izVb3E0nzImJss4ocmM2slCrYY34jMCEiPl63/gRJlwIjI2J3SRcAhwFjm1VUwXM3syXdMGCZgkuJ7AOc22TbucDe2evfAJu2qsg9ZjMrpQr2GlcC1miybQ1gxez188CCVhVV8NzNbElXoWf+5d0InCJpq/xKSaOAk4G/ZKveQG7SpEbcYzazUipZ0C3iEOBa4E5JjwHPAGsC6wGPAodm5VYEzm5VkQOzmZWOqN7H+Yh4VNJmwAHANsBawL3A7cDY7O5AIuJ77epyYDaz0qnaLdk1WfA9J1v6zIHZzEqpgqmMrhl6gfmNwMW9bkR5Xbhbr1tQfrc82esWlNuBXaijKjeYZHfy7RkREyU9Sus7/yIiNi5S79ALzGZWCRXJMd9IGv5We93yluyiHJjNrHSq0mOOiANyr0d3q14HZjMrnaoE5oFSkU8LZjaU1EZlFFnKRNJ/SvqdpOmS5kvaMlt/iqRditbjwGxmpTSs4FIWkt4F3AZsRhpikG/eK6TZ5wop03mZmQGVvSX7VOBq4M3Af9dtuwvYsmhFzjGbWSmVLOgWsSXw0YgISfWjM6bTfIKjxTgwm1k5Ff08/8qAtqITc4Dlm2xbC3iuaEVOZZhZ+VQzl3EzcLikfKtqPecDgeuLVuQes5mVTyeTZcwbyIZ05DjgFmAi8FtSUN5f0hnAVsDWRStyj9nMyqliPeaImEh6pt/TwDGkfy9fyja/JyIeLFqXe8xmVj4VvcMkIu4C3idpODACeDYiXuy0HgdmMyunCn+ej4g5QJ+nu3JgNrPyqWiPuVscmM2sfKo6U36XODCbWTm5x2xmViJVfOhfFzkwm1k5ucdsZlYiFbn4J2m/TspHxLgi5ZoGZkmFKmjguIj4Vx/3NTNLqpHKGNtB2QD6F5iBfYCpwNwODrwecCbgwGxmfVedURkbDkSl7VIZe0TEnUUqkrQ08HL/m2RmQ15FUhkDlR1oFZivo4Np6oAF2T7PtytoZtZSRQLzQGkamCNip04qiogAOtrHzKypauSYFyHpA6RHSG0KDK/fHhEbFamn6alL2qqDxpxVtKyZWVsVnI9Z0q7AlaTJ8jcDJgGPAa8nTed/Y9G6Wv1PulrSWwo05jzgi0UPaGbWVjUfk30ccDawa/b9sRGxA+kZgEsBVxWtqFVgfhy4VtKmjTYquQA4ADi+6AHNzAqpWI+Z1Ev+I6l3HGSp4oh4CBhDCtyFtArM7wemAddJ2ji/IXt0yq+AvYGvRcTJHTTezKy12i3ZRZbyeAWYn11vm0YaPlzzJLBxw70aaHpaETEDeC8wmxSc1wOQ9BrgUuATwGERcXrHzTcza6d6PeYHgQ2y1xNIz/9bS9IawJHA5KIVtRzHHBHPSHofKWl9vaQPAj8AdgY+HxE/67ztZmZtVHO43EXA5tnrbwLXAlOy7xcAnylaUdu5MiLiCUk7AjcB95HestERcUEnLTYz60i50hRtRcTZudd/ywZPfBBYDrg2Iu4vWleruTI+W7fqYuAo0pXF19Rvj4iftzuYpDGk/yR5T0fEyGy7su0HAasCdwCHRMR9uTqWBU4HPk064euAgyNiCma2ZBCwTK8b0T9ZTOpTVqFVj/ncJus/mC2LtAFoG5gzDwI75L5fkHv9NVIuZnRW7nhgvKRNI2JWVuZM4COkwDwDOAO4QtJWEZGvy8yqqsLzMWcdzLVofIPJI0XqaBWYB2RyDtJVy6n1K7OTORw4NSIuzdbtDzxDys38VNIqwIHAARExPiuzL2nSpPcDVw9Qm81sMFUwxyxpNdI45j1pHlsLnVWrW7IHaoa4jSQ9QZrw6A7g6Oy/yIbASOCaXBteknQT8A7gp8BWpCHl+TKPS3ogK+PAbLakqF6P+TxgR+As0l1/fZ7UbbAnyr+DlKaYBKwJHAvcKunNpKAM8HTdPk8D62SvR5JSH9MblBmJmS0ZKthjJgXlwyJibH8rajVXxvWSNitakaRh2T5vaFYmIq6KiF9HxN0RcS2we9aG/fPF6qtusG6xw7cqI+kgSRMkTZj2bJuazKwcejCOWdLPJP1T0kuSpkm6XNLm7fcEYCaLdyz7pNWHhR2AlTqoS53uExGzSUPw3kCalB8W7/muycKTnUr6Uazeokyj45wTEaMiYtQary3aOjPrmd7NlTGB9Kl+c+ADWUuuzW6sa+eHwBey62X90i6VcZmkTp5g0q5nuwhJw0n3l/8FeJQUeHcC/prb/m7gq9kufwPmZWUuzsqsS3oTb+3k2GZWYj1KZUTET3PfTpZ0LDAR2Ig0UqzVvmdIWhu4X9K1wL8XLxL1w4UbahWYzy9SQQP1+d9XSTqdNMnHY6Re7nHACsD5ERGSzgSOkTQJeIiUg55NFoQj4rlsNrvTJD3DwuFyd5PusjGzJUWPL/5JWoE0SdtjFLidOpv28xBgWdJ8zPWCxe/jaKjVqIwDilTQoXWBX5JSEdOA24FtcyNAvku6aeRsFt5gsnNuDDPAEcB84BIW3mCyn8cwmy1BOusxry5pQu77cyLinD4fWjqYFItWIPWS3xcRRTIHZ5A+7R8CTIqIeX1tw6COyoiIvdpsD9L0eGNalJkDHJotZrakKt5jnh4Ro5ptlHQScEybOnaMiBuy1xcB40k3iXwF+I2kd0bEi23qWA/4ckTcU6zZzQ32cDkzs/a6e0v2mcCFbco8VnsREc+Rnnf6D0m3k3LFHwPazQ/0f8DafW/mQg7MZlY+XbwlOyKm0+LaV4GWiJQ3bufLwPmS/hERt/TxeIADs5mV1SCPypC0CalnfC3pGti6wDeAucAVBaq4DFgZuEnSC8CzddsjItYv0hYHZjMrn94Ml5tLuhfjSOC1pHsjbgK2azS/TwPX0eGQ4WZaTfv5CLBnREzsxoHMzDoyyMPlIuJxFp85s5P9R3erLa16zBtQLK9iZtZd1Zwro2ucyjCz8qndkl1ykvYD/hQRM7LXLUXEuCL1tgvMXcmXmJl1pDo95rHAtqS7kMe2KRtAVwLztyQVGWYSEbF/+2JmZgVVYz7mDYGncq+7ol1g3oJ0pbId96zNrHsq0mPOP1Ckmw8XaReY94iIO7t1MDOzwioQmAdKNT4smNnQUrvzr8hSEpKWkfRNSZMkvShpQd0yv2hdHpVhZuVTkVEZdU4jzSx3FfA7iqWBG3JgNrNyql4q4+PANyPi5P5W1Go+5hJ9SDCzIaUiF//qrAjc1o2KHHzNrJwqlmMmPZ1p+25U5FSGmZVPNXvMPwTGSXoFuJL01OxFRMQjRSpyYDaz8qnmxb9aGmMMzZ/tV+jfjQOzmZVPNXvMn2Wgp/00M+upcuWP24qIsd2qy4HZzMqnmj3mrnFgNrNyqkBglvRz4MSIeDR73UpExIFF6nVgNrPy6eLDWAfYjsD3s9fvpXWOuXD+2YHZzMqnIqMyImLD3OsNulWvA7OZlVMFUhkDxYHZzMqnwhf/JL0eeD0wvH5bRFxfpA4HZjMrp2rkmF8laSPgIuDttVXZ18heB77BxMwqq5o95nOB9YDDgUnAy32tyIHZzMqpeoF5a2B0RFza34qGXmBefivYYkKvW1FeD6t9mSHunb/tdQvKbcXju1BJRUZl1JlCP3rJeRXL4pjZkFDBR0sBpwBfl7RCfysaej1mM6uGiqUyIuICSZsBkyXdDvx78SKxf5G6HJjNrHwqePFP0mjgKGABsCWLpzV855+ZVVy50hRFfAv4PXBgRDzbn4ocmM2sfIYBy/S6ER1bDfhRf4MyVPF/kpkNDdW7+HczsHk3KnKP2czKp4I5ZuAw4NeS/g38mcUv/hERrxSpyIHZzMqpXL3hIh7Ivo5rsj0oGHMdmM2sfKrZYz4BP/PPzJZoFQvMETGmW3U5MJtZ+VTzluyuqV4Wx8yWfLVURpGlhyQdIWmxeZfb7LOlpF1alXFgNrNyqsZwuf1It2CfKultzQpJWlXSvpKuIQ2rW7lVpU5lmFn5VOfi35bAvsCRwNckPQ/cA0wD5gKrAhsBG2ffXwK8KSImt6rUgdnMyqn3veG2IiJIw+PGSdoG2AXYhhSMhwMzgP8FTgYuL3pXoAOzmZVPBW/Jjog7gDu6UZcDs5mVUwV6zAPFgdnMyklFk8wLBrQZRUhaE/gg8CZgRLZ6JnA/cFVEPNNJfQ7MZlZConh46l1gljSMlD8+gpR8eZE0R4aA1wLLAy9LOhM4KstJt+XAbGYl1ElgnjuQDWnnKNJTsU8GLqgfbSFpfdKojWOAWVm5thyYzayEOgnMPfU5Uk/4zEYbI+JfwEmSXiDNPufAbGZVJdJosyKeG8iGtPM64P8KlLsrK1vIEL7uaWblVesxF1l66gFgrwLlPg1MKlppz8/KzGxxlUllnAD8VtKmwIXAfaSLf0EanfFmYG/gPcDHi1ZaiTM3s6Go/PdkR8RlknYDvg2cy+LzMQuYCHwoIq4qWq8Ds5mVUG97zJIEXAV8APhERPy2WdmIuBq4WtK6pB7yCNIJzATui4jHOz2+A7OZlVDPUxlH0uEA6YiYAkzpxsEdmM2shIZRfFRGd0kaRRrathXwdJfqXJ00q9xNRco7MJtZSQ1+eJK0EvBL4PMR8UzKaHTFe4BfUzBx7sBsZiXUUSpjdUkTct+fExHn9PHAPwH+HBFX9nH/rnBgNrMS6igwT4+IUU1rkk4i3RLdyo7A64G3AU3ralD3zwsWXb9oneDAbGal1NWLf2eSxhi38hgwmjQ73Oy6FMYlkm6LiHc12G806dbD2W3qX65IQ2sGNTBLmkzj/xxXRsRu2RCVbwIHkR7JcgdwSETcl6tjWeB00p00ywHXAQdnV0TNbInQyS3ZrUXEdGB62yNKx5BiS949wFeAy5vs9hhwTUQc1Kbuj5MeK1XIYPeYt2bR5PdawN9ISXGAr5GGqYwGHgSOB8ZL2jQiZmVlzgQ+QgrMM4AzgCskbRURvZ+Y1cy6YPCHy0XEE8ATi7Qi9Zwfj4hHmuw2gWKpj0LTfdYM6lwZETEtIqbWFmBX4HngN1lv+XDg1Ii4NCLuBfYHVgI+AyBpFeBA4KsRMT4i7iJNqfdW4P2DeS5mNpAqM1fGHyjQGydNmH9C0Up7NolRFogPBC6MiBeBDYGRwDW1MhHxEnAT8I5s1VbAa+rKPE6aSKRWxswqrxyBOSLU5q6/cRGxc4F6HoiIbxU9bi//3exECsbnZt+PzL7WD+h+GlgnV2YBi/+Hejq3/2IkHUTKW7Peeuv1vcVmNkh6fudfT/Vy2s/PAX+NiL/XrW80CUi7/EzLMhFxTkSMiohRa6yxRscNNbPBVo4ec6/0JDBnDy78CPCz3Oqp2df6nu+aLOxFTyVdPFy9RRkzqzwByxZcykHSK5IWNFnmS5ohabyktqmPXvWYR5Me1PWr3LpHSYF3p9oKScOBdwO3Zqv+BsyrK7MusHmujJlVXiV7zCcCjwPTgLHAd4Dzs++nABcAawBXSdq9VUWDflbZRb//An6VGwJHRET2JNljJE0CHgKOJQ3cvjgr85yk84DTJD3DwuFydwPXDuqJmNkAqmSOeQ6pg/nBiJhTWylpOdIUotOALYE/AUcDVzSrqBdnvgPwBmCfBtu+S7pp5GwW3mCycz6Akx4TPp80WLt2g8l+HsNstqSpXGD+AnBoPihDGl0m6XvAWRFxsqRzST3ppgb9zCPiL6R/h422BTAmW5rtPwc4NFvMbIlUyR7zmqThvI0sA6yWvZ5OkxhY44exmlkJVTLHPAEYI2mt/EpJa5OmmqjNgLc+8GSrikp1VmZmSe8myu+Hw0ip1Ucl3QY8Q+pFbwe8yML07SZk182acWA2s5KqVniKiLskbUKa72cb4C3AU8D/AGdExIys3PHt6qrWmZvZEFHJHDNZ8D26v/VU78zNbAioZmAGkDSClL4YQbrQd0dEzOykjmqeuZkt4aoZmLOnpRzJorckzpV0ekQcV7Se6p25mQ0B3Zsof7BIOpyUxjiP9MSUqaQpJvYBjpY0LSJ+UKQuB2YzK6FK9pi/AHw/Io7IrXsQuFHSbOBgoFBg9jhmMyshkeYrK7KUxgak260b+VO2vRAHZjMroUreYDID+I8m296cbS+kVGdlZpZUMpXxe+BESTNIk7TNk7Q08AnSY6Vazo+RV7kzN7OhonLh6SjgbaQA/HNJM0lD5pYCbqaD8c2VO3MzGwqqd0t2RMyStD2wG2ke+RHATOBG4KpskrZCHJjNrIQqmcqozZB5BS3mWi6iemduZkNANQNztwzdMzezkit/eJL0Cu0fFl0TEVHopMp/5mY2BFWmx3wCxQNzYZU4czMbaqoRmCNizEDUW/4zN7MhqHqjMrrJgdnMSmrohqehe+ZmVmLVSGUMlKF75mZWYg7MZmYlU5tdbmhyYDazEqreRPnd5MBsZiXkVIaZWck4MJuZlYwDs5lZyTgwm5mV0NANT+pg7uYlgqRpwL963Y6c1YHpvW5Eyfk9aq1s78/6EbFGfyqQ9GfSeRUxPSJ26c/xymbIBeaykTQhIkb1uh1l5veoNb8/Sx4/JdvMrGQcmM3MSsaBuffO6XUDKsDvUWt+f5YwzjGbmZWMe8xmZiXjwGxmVjIOzGZmJePA3AOStpf0B0lPSApJo3vdpjKRdIikuyU9ny23Sdqt1+0qE0ljst+d/DK11+2y7nBg7o0VgXuBw4CXetyWMpoCfB3YEhgFXA9cJumtPW1V+TwIrJVb3tLb5li3DN2b0XsoIq4ErgSQNLa3rSmfiLi8btUxkr4IbAfc3YMmldX8iHAveQnkHrOVmqSlJO1F+pRxa6/bUzIbZemwRyX9StJGvW6QdYd7zFZKkt4C3EZ6vtBsYM+IuKe3rSqVO4DRwCRgTeBY4FZJb46IGb1smPWfA7OV1YPAFsBrgY8B50vaISLu7WWjyiIirsp/L+l24BFgf+CMnjTKusaB2UopIl4GHs6+nSBpa+AI4MDetaq8ImK2pPuAN/S6LdZ/zjFbVQwDlu11I8pK0nBgM+CpXrfF+s895h6QtCKwSfbtMGA9SVsAMyPisZ41rCQknQr8CXgcWAn4DLAD4LHMGUmnA38EHiPlmI8DVgDO72W7rDs8iVEPSNoB+EuDTedHxOhBbUwJZUMIdwRGAs+RhsidFhFX97JdZSLpV8D2pKd8TANuB46LiPt72jDrCgdmM7OScY7ZzKxkHJjNzErGgdnMrGQcmM3MSsaB2cysZByYzcxKxoHZzKxkHJitzyRtJ+nXkp6U9LKkGZLGS9pf0lIDdMyxuSd23JBbPzq3/o0N9tsht/39ufXH5tZPGYg2m3XKgdn6RNLhwC3ACNLTRt4PfBZ4CPgxsPsAHn4qadL8gxtsmwXs22D9ftm2er/I6rqya60z6yfPlWEdk7Q9aWrJsyLiy3WbL5d0BmnehoEyNyJub7Ltd8A+ko6P7LZWScuRpg69lDSH8asi4gngCUnTBrC9Zh1xj9n64hvATOBrjTZGxD8jolePgLoAWB94V27dnsBSpMBsVnoOzNaRLHe8A3BNRMzpcXMa+RdwE4umM/YDfk96EopZ6TkwW6dWB5YjBcCyGgd8QtJwSWuR8t/jetwms8IcmG3ASVpB0vOSLqpbv7GkmyU9JOn/JI3q0iF/Q5pU/0PA3qSLhdd1qW6zAefAbJ2aAbxEyuMW9SlgIrCbpFVz638CjI2IN5Ly1RdJUn8bGBGzgMtI6Yz9gIsi4pX+1ms2WByYrSMRMR+4AdhJUtFHPR0I/A9wNakHi6Q1gG3JnrgREeOzslt1qanjSE88eQtOY1jFODBbX5wKrAac1mijpA0lvTV7vTmwMXAFcB4LH6a6HvBkRMzL7fqvbH03jAd+DfwkIu7rUp1mg8LjmK1jEXGTpP8GzsgC71jSs+dWBd4H/BfpOX13kwLxBRExX9K1wAhJzXrF/U5j5Nq4APh0t+ozG0zuMVufRMSZpLHCzwKnA9eTAvTmwOeBP0p6DSnP+xlJk4FHgFVIwfoxYO2sTM362XqzIc09ZuuziLgVuLXZdkl7Ao9FxNa5dRsCdwFHAneS7sT7maSdSD3mvxU5tqSlUxNiQdaWsaR/DK3aewN1vfLsYuNS9evNesk9ZhtIB5LuxHtVRDwK3At8AvgCcICkh0j56r2j2NOB1wfm0Z0hcMdkde3XhbrMusJPybZKkbQB6SYXgFkR8WA/61sLWCf79uUe3kpu9ioHZjOzknEqw8ysZByYzcxKxoHZzKxkHJjNzErGgdnMrGQcmM3MSsaB2cysZByYzcxK5v8BOPVcYRmsgLMAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAVoAAAElCAYAAAClJSpiAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAqF0lEQVR4nO3debxd0/3/8dc7tKKqraEaQ41V1VFRQ4dIWzwUHXRQbYWooUoV9etADOn41VJ8W/qtlDaCFi1flCpBUbNQNfMtCaKCJEWCIPH5/bHWkZ2TM917z7l75+b9fDzOIzlrr7P22vuc+znrrL3W2ooIzMysd4aVXQEzs6HOgdbMrMccaM3MesyB1sysxxxozcx6zIHWzKzHeh5oJY2RFIXHS5IelPQTScN7vf8O6vbVMuuwuJM0Nb+vv2+y/aq8/dou7nOCpKmF52vnfYypy3eYpEckzZN0e04LSeO6WJepkia0ybN23d9A/WOjbtWnE7k+4ySt22Bb2+PpUZ0W2m8hbqxdSBsn6WODXbduWHoQ9/UFYBqwPLATcGj+/wGDWId6Y0jn4Lcl1mEomA18RtLyETG7lihpLWBk3t5LjwNbAg8W9r0Z8GPgGOD8Qh22JH0Oy/BfwIUN0h8Y5HqsDRwFXAs8VLdtJ+DZQa5PIxeT3qvHC2lHkd7TK0up0QAMZqC9PSL+lf8/SdL6wJ6SDoyIVwaxHj0laZmIeLHsegyyScDWwOeACYX00cBU4FFgqV7tPJ/vG+uSN8z//joiHirkrc83mB4qef9tRcQ/yq4DQEQ8BTxVdj26pcw+2tuAZYGVawmSXifpp5Km5C6GKZLGShpWyDNc0vGS7pI0R9J0SX+W9I76HUhaR9LpOc+Lkh6S9N9521XAVsCHCj/hriq8djNJl+d9PCfpitxKKpY/QdI0SVtKul7SC8DPWh20pJ0kXZfLfVbSzZI+Vdj+DUk3SJol6WlJN0raoa6MpSX9MHfBzJU0Q9K1kj5cl29vSf8s5DlV0oqt6tdPLwDnkgJr0WjgdGCR6YeSVpU0MdfrRUl3SNq1Qb6PS7otH8ODkr7WIM9CXQf5fZyQNz9Y7C5o1HUg6X2SLpT0H0kv5PfnIw32c2D+iTtX0uRGeXqlk/NV+Lk9UtL5+TM2U9JJkpbNeUYBf8svmVT47I/K25v9hP+gpHMkzZb0hKRD8/btJP0j/43cImmTujptK+kvkh6X9Hz+uz1EUssvXtV1HUiqfYbGFuo8TtL/y+fjzXWvV/57/0MfTnPPDGaLtt7awDPATEjBA7gUeCfwQ+BOYAvgCGBF4JD8umVIXQ4/Iv2sWBHYD7hR0jsiYnoubx3gZuB50k+O/wPeCmyby9kPOIPU0qr98T6bX/te4GrgHlL3QgDfA66WtEVE/LNwHG8EzgKOBQ4jBZ2GJB0A/IL0U3Z3YA6wcT4XxfNyCqkluDTwSeAiSdtHxCU5z3eBg4GxwO3AG4BN87mo7evofM5+AXwbWD2fs3dL+mBEzG9Wz36aCFwhaY2ImCZpC+DtpEC7VTGjpOVI53cF0jl7FNgVOF3S6yJifM63IfAXYDKwC+m9Hwe8HmhV//1yeYcCnyV9Thp2F0jaGPg78A9gb9LnZV/g8nyebs359gROIAXws4G3AX8gfRY7NSx/zoui3XvR6fkqOAM4B/gVsBlwJLAc6bN8G7A/cBLwTeCW/Jp72tT9NNJ7PJ7UDfgTSW8Ctif9nJ9DamScL2m9iHgpv25d4Argl8Bc0ud0HPBm0t9Up7YEbiCd/5Nz2jTS+/VDYA8WbuRsC6wDVOMaTET09MGCQLUBKXCsQDr4ecA3CvlG53wj614/FngJWKVJ+UsBryP1wR1cSJ9IevNXa1G3q4BrG6T/CXgaeFMh7Q3ALOC8QtqEXOdPd3Ae3pDreF67vIXXDMvn7DLggkL6Ra3KIQXr+cCRdekfyvX9TBff36mkP2zl/38vp/8KuK7ReQa+kesxqq6sy4EngaXy8zOBGcByhTxvzZ+HqXXHG8CYQtpeOW3tun0EMK7w/ArgXuC1dZ+pe4HzC+/Do8Bf68r6Yi5vQptzVKtfo8ecDs5xp+drTM736wZ/Q/OBt+fno3K+rZu8nxMKz2tlHllIWzrv92VgnUL6p3LerZoch/JrxwL/AYZ1sN+1C2kB/KhBuROAfwEqpJ0H3Netz/lAH4PZdXAf6Y2ZBZwKnBwRJxa2bwc8DFyffxovnb/9LwNeQ2rdAiBpZ0k3SXqaFLCfI7VyNiiUty1wUUT8ux91HZlf+3QtISKeJV3I2Kou7zxS4Gvng7mO9a2PhUjaRNJFkp7IZb8MbMPCx3YLsL2kH0v6sKTX1hWzDSk4nFl3Lm8itdpHttj/sOJrVOi2aSXSp/sMYHSuzxdJX3aNjAQei4ir6tLPILV03pmfbwn8JSKeK+znUeC6TurUTv45vRXwR+CVwnkSKYjVztMa+XFOXRHnkt6jTv0I+EDd49XuB0lL1Z175U2dnq+a+nqeRfo8bEb/1X5NERHzSIHtgYiYUshzX/73rbWE3OVxsqSHSV+QL5POw5uAVQZQn6JfAesBH6/tk/RL8ORWLxpMgxlodyJ9sLYnfYj3k7RbYfsqwFqkN6L4uDlvXwlA0idJP93uBb4MbJ7LfQooDhdbif5fXV6Rha921kwntciLnozOfoavlP9tWidJbyW1sFYkjcb4IOnY/srCx/YTUnfIp0g/e2dK+p2kWn937QP8LxY9n28o1KWR39bl78uIjImkP/qjSD9Vz26Sr9X5rW0HWBV4okG+Rmn9sSKp9XoEi56nbwAr5C+aVRvtNwecmX3Y38MRMbnuUbz49GBdHXYv1LOT81VTf35qz1fvQ13r/afu+UtN0iB/VvO5uxDYkRRcP0b6PP+4mG+gIuJmUvfSvjlpL9IX4GndKL8bBrOP9q7Iow4kXQncARwj6dzcYpkJTAF2bvL6qfnfXYB/RcSY2gZJr2HRD9sM+v/BmgWMaJA+Im8r6nSdyRn539WBu5rk2Y7U57tzRLwakCW9bqEdRrwM/BT4qaQRpA/ycaQulC+y4I9/Wxb9Y4DWwWEcUPylMaNJvkVExAOSbiL1vZ1X/EVQZxYLt9Braue8Vr/Hgbc0yNcorT+eBl4h9Vc2bH1HxCuSakFuof3m1m+rL62++iSpH7qm1lrs9HzVvAW4u+45wGMDrWAfrUfqkx0dEWfUEnNjqdv+BzhZ0uqkQPvHiKj/Wy1NKRfDIuJFSd8GLiBduDiG1Gr7HKnP6r4WL38di/5cG82iw4cuAz4radWIaNQaAHiRxhczrgZ2UGFcqKTlSX8IV7WoWyvXk/qM9yFd9GukFlBfriVIejupb7VhSzjSxb9TJG0PvDsnTyIFkDUjYlJfKhkRU1nwpdYfPwN2Y+FgXe9q4AuSPhQRxW6AL5P6/u7Nz28gdZEsV+s+yK3+DwH96RJaSEQ8J+nvwPuA26L5MMNppD7anVm4hf85uvg3FBF3NtnU6fmq2ZmFx5ruQvo81H4d1oYfLjuwGrfV6PP8GuAr/SzvJZrX+Q+kC9K/B9YEft3PffREaaMOIuJCSbcA/0/SiaQLH3uQrlz/HPgn8FrSt+KnSBdwnicF5M9IOp7UN7oJ6erp03W7OArYgdTn+xPSz+jVge0iojYs5h5SF8YXST/bZkfE/aSrmDvmuvyU1Gr9LumD84N+Hu/sPCTml5LOzcc7G9gImBsRvyR1qcwDJuZzsCrwfeARCt08ki7I5+c2Uov1/aTW8Ml5Xw/mep8oaQPSH+pcUt/ZNsApEVEb4tNVEXEe6UJEKxOAA4HzJI0lBbKv5Lp9rdAV8yPSFe7LJB1D+jx8n+51HQB8C7gGuFTSqaRW9Mqk0SBLRcT3cqv2+6QvtN+R+jzfRhrV0JfB/evm0Rj1HmjT+ppAZ+erZvt8vi4j9cseBUyMiNrEiAdIn7OvSppFCrz3R2GySZfcS7ru8mNJ80kB9+ABlHcPqQH0V9Ln/t+1azAR8YLSsLSDgTsj4voB1bzben21jQVXD9/WYNu2edvB+flw0k/X+0hv/izShZ9xwNKx4Arwj0gtmudJQeT91F21zHnXI33TzcjlPQQcX9g+gjR8aHaux1WFbZuTAt8c0sW2K4DN6sqfAEzr4/n4POmi1AukP9KbgB0L23fOxz+X9PNvl7yfqYU8h5AG6M/M5dyfz9Fr6vY1Oud7Lh/HvaSW5hpdfH+nAme0yXMVdaM7SF8ipxfemzuAXRu8dmvS0Kva+/e1Budjbfo56iCnbUgKnk/m/Uwj9S1uX5fvQFLgmEvqE/xwo89dg2Oo1a/Z4/MdnOe254sFf2sjSb8W55D+hk4Clq3L+7V8PudRGNFQfzw0+ftt8p7WjnOvQtpGpBloz+fz+oNG702L/RbzfAi4NZ//Ru/jljl9/259vrv1UK6gmS3mlCZs/A5YPxbMwlxiSPox6ctwtUijhCqjzAkLZmYDJun9pIuFBwLjqxZkwYHWzBZ//0saWXEpqT+6ctx1YGbWY17428ysxxxozcx6bInro115ecXaK7fPt8RaabWya7AYWLV9liXY1KlTmTFjhtrnbG5pqeNOzVfg0ojYbiD767UlLtCuvTJM7teUgyXE6P3KrsFiYGzZFai0TTfddMBlBGmxjE7MLqxpXVVLXKA1s+oTPbwlRwkcaM2sckRaG3WocKA1s0pyi9bMrIfE0BoS5UBrZpXkFq2ZWQ/5YpiZWY/5YpiZ2SBwH62ZWQ+568DMrMccaM3MBoG7DszMesgtWjOzHvOoAzOzQeAWrZlZD3kKrpnZIHCL1sysh3wxzMysx3wxzMxsELhFa2bWQ74YZmY2CNyiNTPrIbdozcx6zKMOzMx6zKMOzMwGgVu0ZmY95K4DM7NBMJQuhg2lYzGzIaLWou3k0bYsaaqkaPC4uEfVX4RbtGZWSV1sBX6AhWPyqsCtwDnd20VrDrRmVjkCXtulsiLiqYXKlvYEngX+2KVdtDWoXQeS9pd0h6Rn8+MGSTsUtkvSOEn/lvSCpKskvauujGUk/VLSDEnPSbpQ0hqDeRxm1lu1CQudPPpUriRgT+CMiHi+axVuY7D7aKcB3wU2BjYFrgTOl/TevP07wCHAAaTm/pPAJEnLF8o4Afgc8CXgI8AbgIskDaWLlGZLvD700a4saXLhsU+LYrcB1gFO6V3NFzWoXQcRcUFd0lhJXwe2lHQncBBwdEScCyBpd1Kw/TJwsqQ3kr6N9oiISTnPaOBhYGvg0kE5EDPrqT4O75oREZt2mHdv4JaIuL3vteq/0kYdSFpK0i7A64HrSd8yI4DLanki4gXgGuCDOWkT0oSRYp5HgXsLecxsCOh214GkVYBPA7/pbk3bG/SLYZLeA9wADAfmADtFxJ2SaoHyibqXPAGsnv8/ApgPzGiQZ0SLfe4D7AOw5koDqr6ZDYIeTcEdA7wInNX9olsrY9TB/cBGwJtIfa2nSRpV2B51+dUgrV7LPBExHhgPsOk6aleWmZWs2zPD8kWwvYCzImJ2F4vuyKB3HUTESxHxr4iYHBGHArcDBwPTc5b6lukqLGjlTied/5Vb5DGzxVw3Jyxko4D1KaHbAKoxM2wYsAwwhRRIt6ltkDScNLLg+px0K/ByXZ41gA0LecxsCOhmH21E/C0iFBE396Ku7Qxq14Gko4GLgUeB5UmjCUYBO0RESDqBNBLhPuAB4HBSP+7vASLiGUmnAsdIehKYCRwH3AFcPpjHYma940VlBmYEcEb+9xlSgPxERNSGZf0MWBY4CVgBuAnYtq5P5WBgHnB2znsFsFtEzB+UIzCzQeFA208RMabN9gDG5UezPHNJExoO6GLVzKxCvPC3mVmP+Z5hZmaDoEpdB5JGAKuRuitnAFMi4qVOX+9Aa2aVU4WLYZI2JY293Q54a93mlyTdAvwBODMinm1VlgOtmVVSWV0HOcAeC4wE7gT+DPwDeAp4AViRtGTA5sDRwNGSfgb8PF9DWoQDrZlVTskt2qtJExu+HhH3tsqYx/p/mrTy4DDgh43yOdCaWeWUPOpgvYiY3j7bq6OgzgbOlvSWZvkcaM2scsps0XYaZBu8rukyAA60ZlZJHt5lZtZDFRl18Frgs6RRB1uQhncNJ039v5/Ul3t2RNzTriwHWjOrpLICraTXAd8GvkFaCuBe4GYWHXWwP3C4pGuBwyLiumZlOtCaWeWUfDHsQdJKgkcC50TEzGYZJX0I2BW4VNIhEXFyo3wOtGZWOSV3HXw9Is7vJGNuxV4naRywdrN8DrRmVkllXQzrNMjWveYJWtx8wIHWzCqnChfDusmB1swqqcQpuD/oQ/aIiKPaZXKgNbPKKblFezjpZq/qIG8ADrRmtvgpedTBc8ArwJ+AiRFx9UALHEqTL8xsCOnyXXD74i2kMbRrApdLmiLpB5Le1t8CHWjNrHJ6cLvxjkXE8xFxekRsQ5qYcDJphtgDkm6Q9HVJK/SlTAdaM6ukbt5uvL8iYlpEHB0R7wY+ANwO/AI4pS/luI/WzCqnasO7JG0GjAY+D7xMWgi8Yw60ZlZJZf/clrQWaXrtaODtwHXAYaRpuc/0pSwHWjOrnGHAa0vat6S9SMH1Q8BDwJmk0QdT+lumA62ZVVKJLdrxwLPARODanPZRSR9tlDkiftuuQAdaM6ucCvTRvgEYkx+tBOBAa2aLpxID7TrdLtCB1swqR5S6etfD3S7TgdbMKqfkKbhd50BrZpVU4q1sJvYhe0TE7u0yLXGB9uWpMH23smtRXSPmHl52Farv8z5HLc0feBElXwwbSbrI1YmO8i1xgdbMFg8l9tGu3e0yHWjNrHIqMLyrq8qe5WZmtogyV+/KSyKuVpc2oFjpQGtmlVMbddDJowfGAmu8WhdpKeBlSRv3t0B3HZhZJZXYCmx0C5tObmvTlAOtmVXOUOujdaA1s0pyoDUz67VO+w5e6cnePynp3YWaBPApSRvVZ/TqXWa2eBKdL0g7tyc1GNsg7cgGaV69y8wWU2WuKuPVu8xsiVFSJ61X7zKzJcMQG3bgCQtmVk0l3W9c0gWS3t+H/MMlfUvSvs3yuEVrZtVTbov2EeBGSbeTbsx4LXBHRMyrZchTdDcDPgl8FngM+GqzAh1ozax6Slz5OyIOkHQCcBAwDngjEJKeBV4EVsi1E3Bzznd6RDQdaOZAa2bVVGIfbUQ8CBwg6RBgS2BzYDVgODATuA+4ptMLZw60ZlY95Q7velVEvARcnR/9VoFDMTNroIvrJEpaVdJpkp6SNFfSPZK26km9G3CL1syqp4sXwyS9CbiOdFFrB+ApYF3gySb5G80AayYi4oftMjUNtH28QVnREb0Y8GtmS5ju/d7+DvB4RBTvFjilRf5xdc+Dxssk1u4X1v9AC+wKTCddZevUmsAJgAOtmfVfd0cdfAb4q6SzgY8C/wZOAU6KiEVurhgRr4Z4Se8ELgTGA2cBTwBvAb4E7A3s2EkF2nUdfCYibu6kIElLAy91ktfMrKW+dR2sLGly4fn4iBhfeL4usB9wPHA0sBHwy7ztxDZlnwicEhE/K6Q9Avw0397mJODj7SrYKtBeATzTroCC+fk1z/bhNWZmi+pboJ0REZu22D4MmBwRh+bn/5C0PrA/7QPt5sBPmmy7Bejo3vNNe0EiYpuIuL+TQnL+yK/5V6evMTNrqntTcB8H7qlLu5fU1dnOM8A2TbZtS4eN0VYXwzaJiFs7KUTSiRHxjU7ympm11d0puNcBG9SlvZ3OriX9FjhU0uuBP7Kgj3ZnYB+at3YX0qrr4FJJH42IO1sVIOlUYAzgQGtm3dHdi2HHA9dLGgucDbwf+CZwWAevPZI0uuAgoLZojIDnSEF2XCcVaBVoHwUulzSyUReCJAETga8AR3SyMzOzjnWpRRsRt0j6DCkwHkG6mHUE8KsOXvsKcISknwPvAVYldUXcEREdX8NqFWi3Jk07u0LSVnnuL/Dqfc5/D3wB+E5EHNvpDs3M2uryFNyIuBi4eACvfxr4e39f3zTQRsRMSR8DriEF25ER8Yik15D6Kj4FHBgRv2xWhplZv1Vk4e88jGsz0sWz4fXbI6Lt5K6W42gj4klJHye1bK+U9AngF6SrbV+LiN/0p+JmZi1V5A4LecLC+cB6NJ8dNrBACxARj0n6KKlle3fe2ZiIOL0vFTYz65NqLHn1K1Kc3Bm4k77NlH1Vq+Fd9auF/x44FLgEeE399k7ubS5pHHBUXfITETEib1fevg9pcd2bgP0j4u5CGcsAx5KmwC1LmiSxX0RMa7d/M1tM9OV24721Malhed5ACmnVoj2lSfon8qOoo3ubZ/cDowrP5xf+/x3gENJwsftJQysmSdogImbnPCcAnyYF2pnAccBFedxvsSwzW1xVZD1aYAZdWFqgVaDt+r3Ns3kRMb0+MbdmDwKOjohzc9rupKXMvgycLOmNwJ7AHhExKecZTRp4vDVwaY/qbGaDqSJ9tKQxuPtLumQgDblWow56tQLXupIeI31L3AQcFhEPkQL7COCyQh1ekHQN8EHgZGAT0jDmYp5HJd2b8zjQmg0V1WjRvpk0q+weSZOAWXXbIyLqu0MXMdgLf99E6ha4D1iFtCDD9ZLeRQqykKa4FT0BrJ7/P4LU1TCjQZ4RmNnQUJ0WbXHRmPUbbA8Wve60iKbfGZKulPSOTmsjaVh+TaPKpBpFXBIR50TEHRFxOWktx2HA7nUVX6joBmmL7L5VHkn7SJosafLMNgWZWUV08VY2/RURw9o8OqpBq8b5KGD5PtRJfX1NRMwhDRlbn7TIOCzaMl2FBa3c6aRTu3KLPI32Mz4iNo2ITVfqtHJmVp7aWgedPBYD7boOzpfUl3Fj7VqeC5E0HHgH8DfSrSWmk5Yku6Ww/SPAt/NLbgVeznl+n/OsAWwIXN+XfZtZhVWn6wAASTsCWwErkkY7XZ2n9XakVaA9rZ91qu8/fZWkY4E/kxZ1WIW0sMNywGkREZJOAMZKug94gNQ/MoccVCPimbxa2DGSnmTB8K47gMv7WV8zq6IKXAyTtDxwEanBN48Uc1YCDpH0d2DH/Mu8pVajDvboUl2L1gD+QPrp/xRwI7BFYYTDz0iTEE5iwYSFbQtjaAEOJh3w2SyYsLCbx9CaDSHVadH+hDRpYTRwVkTMz4tq7QL8T97+zXaFDOqog4jYpc32IK3vOK5FnrnAAflhZkNVBVq0wOeAwyPizFpCbtSdKWll0iSragVaM7OOVGcK7kosehucmnvy9raq8Z1hZlZUm4LbnXuGDcQUmt9SfPu8vS23aM2smqrRR3sy8PN8z7AzSXdXGEHqo90L+FYnhTjQmln1VORiWEQcL+nNpIvwY3KySMslHh0R/91JOa2WSXwI2Cki/jnAupqZ9V1FOjYj4jBJxwBbkMbRzgJujIj/dFpGqxbt2sAyA6qhmVl/VKRFW5OD6iX9fX1FvjPMzAoqMgVX0nclNbwvoqRfSPp2o2312gXaPk2pNTPrilqLtuRFZYA9SDNPG7k9b2+r3cWw70tqOqW2ICJi9/bZzMw6VI3f22sC/9dk20PAWp0U0i7QbkRnNyNzy9fMuqc6fbTPs2A97Hpr0OHNGtsF2s9ExM19qZWZWVdUI9D+Hfi2pD9FxKtBNd8k9pC8vS2PozWz6qnOzRnHkZZgfUDSGcBjpBburqTpt2M6KcSB1syqpzbqoGQR8U9JHwWOBb5LCv+vANcCn+t0noEDrZlVUzW6DsjdpyMlLUtavvU/EfFCX8potR5tNRruZrbkqc7FsFfl4NqnAFvjYGpm1VTS6l2SDs630erLazaWtF2z7Q60ZlY95U5Y2A2YKuloSe9rWkVpBUmjJV1G6rN9Q7O87qM1s+op92JY7dY1hwDfkfQscCfp9lsvkvpp1wXWy8/PBt4ZEVObFehAa2bVU2Ifbb6l1kRgoqTNge2AzUnBdTjpBo1/B34MXBART7cr04HWzKqpAh2bEXET6SaxA+JAa2bVU8FRBwPhQGtm1VRyoJW0CvAJ4J2kBb8hLfp9D3BJRDzZaVkOtGZWPSVOwZU0jNT/ejDpXrzPA//JtXoT8DrgJUknAIfmPt2WHGjNrHrKHXVwKHAQKdieXj+aQNJapFEJY4HZOV9LDrRmVk3ldR3sTWqpntBoY0Q8DPxI0nPAgTjQmtliqdyLYW8B/tFBvtty3rYqMIDCzKyBkqbgAvcCu3SQ70vAfZ0U6BatmVVPuS3aHwB/krQBcAZwN+liWJBGH7wL+AqwFfD5Tgp0oDWzaipvZtj5knYA/gs4hUVv1SXgn8AnI6KjW5AvcYH2LtIEZWvsz/uUXYPq+9j3yq5BxT3ThTJKXvg7Ii4FLpW0BqkFu2Ku1Szg7oh4tC/lLXGB1swWAxW5lU1ETAOmDbScChyKmVkD5S2T2BFJK0sa2Ulet2jNrHoWj7UOtgLOoYOaOtCaWTUNod/bDrRmVj3DSKsMlEDSbzvMulanZTrQmlk1ldeiHUMaOzGnTb5lOy1wCDXOzWzI6OI9wySNkxR1j+ktXvII8MeIeGurB7Bvp4fjFq2ZVVN3m4H3A6MKz+e3yDsZ2LSDMtsuj1jjQGtm1dP9UQfzIqJVK7boQmDXDvLdQ5qu25a7Dsysmro7jnZdSY9JmiLpLEnrNssYERMjYtt2BUbEvRHx/U527hatmVVP36bgrixpcuH5+IgYX3h+E+kC133AKsDhwPWS3hURMwde2fYcaM2sevrWdTAjIpr2qdYv/CLpRuAhYHfguH7WsE/cdWBm1dSj9WgjYg5p6cP12+WV9Iqk+U0e8yTNlDRJUsuuBgdaM6ueLg7vWqRoaTjwDuDxDrL/EHgUeAqYAPwUOC0/nwacDrwZuETSjs0KcdeBmVVTl5qBko4F/kwaH7sKcASwHClgtjMXmAJ8IiLmFspcFriEFHA3Bi4GDgMualSIW7RmVj21KbidPNpbA/gDaSztecCLwBb5Jovt7AscXwyyABHxAnA8sG9EvEJaIPy9zQpxi9bMqqlLzcCI6OT+X82sQvPxD68FVsr/n0Hq8GjILVozqyYt1dmjtyYD4yStulDVpNWAo/J2SAvM/LtZIW7RmlkFic7DU6vZtAN2IHAFMEXSDcCTpFbulsDzLJhB9jbg980KcaA1swrqS6B9sWe1iIjbJL0NOATYHHgPabTCz4HjahMeIuLIVuU40JpZBfUl0PZWDqaHDaSMahyJmdlCBAzvMG83brvbmqQVSd0FK5IufN0UEbM6fb0DrZlVUHVatJJ+ROo6WKaQ/KKkYyPiiE7KqMaRmJktpBqBVtJBpG6DU4EzgOnACNJFsMMkPRURv2hXTvlHYmbWUCVug7sv8N8RcXAh7X7gaklzgP2AtoHW42jNrIJqLdpOHj21Nml6bSMX5+1tOdCaWQVVJtDOBN7dZNu78va23HVgZhU0jM5HHfTU/wI/lDQTOCsiXpa0NPAF0m1sOlmYxoHWzKqqEuHpUOB9pID6W0mzSEO8lgKupcPxtZU4EjOzhVVj1EFEzJY0EtgBGAmsAMwCrgYuiYiO7oRb/pGYmS2iGoEWIAfTi2iy1mwnqnEkZmYLKS/QSnoF6KilSorDbSs6qEciaSppObF6f4mIHSSJtPTYPqQm+k3A/hFxd6GMZYBjgS8By5JW1tkvIqb1uPpmNmj6MgW3635A54G2I4P9lfEBFh6FvCpwK3BOfv4d0lS3MaRBwUcCkyRtEBGzc54TgE+TAu1M0l0sL5K0SUT0dL00Mxss5bVoI2Jct8sc1COJiKeKzyXtCTwL/DG3Zg8Cjo6Ic/P23UnrP34ZOFnSG4E9gT0iYlLOMxp4GNgauHSQDsXMeqo6fbTdUNqEhRxY9wTOiIjngXVIc4gvq+XJ9+W5BvhgTtqEdFuJYp5HgXsLecxssVeZCQtdUWYttyEF11Py8xH53yfq8j0BrF7IM5+0TFl9nhE0IWkfUr9v85v6mFmFDK0WbZlHsjdwS0TcXpde3wmtBmn1WuaJiPHAeIClpK52cptZLwytQFtK14GkVUgXtH5TSJ6e/61vma7CglbudNLFtJVb5DGzxZ5Iy7928qi+svpox5Bu9HNWIW0KKZBuU0uQNBz4CHB9TroVeLkuzxrAhoU8ZrbYcx/tgOSLYHuRFmioDdkiIkLSCcBYSfcBDwCHA3PId5eMiGcknQocI+lJFgzvugO4fFAPxMx6aGh1HZRxJKOA9Vlwm96in5EmIZzEggkL2xYDMnAwMA84mwUTFnbzGFqzocaBtt8i4m80ufif5xSPy49mr58LHJAfZjYkuUVrZtZjDrRmZj1WmYW/u8KB1swqauiEp6FzJGY2hLjrwMysxxxozcx6zIHWzKzHSl34u+scaM2sgtyiNTPrMbHwzVgWbw60ZlZBbtGamfWYA62Z2SAYOuFp6ByJmQ0hnoJrZtZj7jowM+sxB1ozs0EwdMLT0DkSMxtC3KI1M+sxB1ozsx7zqAMzs0EwdMLT0DkSMxtC3HVgZtZjDrRmZj3m1bvMzHpsaC38PazsCpiZLarWddDJo48lS4dJCkkndq26bbhFa2YV1Js+WklbAHsDd3S98BbcojWzCup+i1bSG4EzgT2B/3S3vq050JpZBfWk62A88KeIuLKrVe2Auw7MrKK6F54k7Q28DRjdtUL7sv+IKGO/pZH0FPBw2fUoWBmYUXYlKs7nqLWqnZ+1IuLNAylA0l9Jx9WJ4cDcwvPxETG+UNYGwLXARyLivpx2FXBXRHxjIPXs1BIXaKtG0uSI2LTselSZz1FrPj+tSRoD/A6YX0heCgjgFWC5iHixl3Vw14GZDXXnA5Pr0n4H/B/wE+ClXlfAgdbMhrSIeBp4upgm6TlgVkTcNRh18KiD8o1vn2WJ53PUms9PxbmP1sysx9yiNTPrMQdaM7Mec6A1M+sxB9oSSBop6UJJj+VVhMaUXacqkbS/pDskPZsfN0jaoex6VYmkcfmzU3xML7te1pgDbTleD9wFHAi8UHJdqmga8F1gY2BT4ErgfEnvLbVW1XM/sGrh8Z5yq2PNeBxtCSLiL8BfACRNKLc21RMRF9QljZX0dWBLBnl5u4qbFxFuxS4G3KK1SpO0lKRdSL8Cri+7PhWzbu5+miLpLEnrll0ha8wtWqskSe8BbiAtGDIH2Cki7iy3VpVyEzAGuA9YBTgcuF7SuyJiZpkVs0U50FpV3Q9sBLwJ+BxwmqRRgzVlsuoi4pLic0k3Ag8BuwPHlVIpa8qB1iopIl4C/pWfTpb0AeBg0ur4Vici5ki6G1i/7LrYotxHa4uLYcAyZVeiqiQNB94BPF52XWxRbtGWQNLrSau9Qwoga0raiLSa0COlVawiJB0NXAw8CiwPfBkYBXgsbSbpWODPwCOkPtojgOWA08qslzXmRWVKIGkU8LcGm06LiDGDWpkKykPePgqMAJ4hDek6JiIuLbNeVSLpLGAk6S4ETwE3AkdExD2lVswacqA1M+sx99GamfWYA62ZWY850JqZ9ZgDrZlZjznQmpn1mAOtmVmPOdCamfWYA631m6QtJZ0j6d+SXpI0U9IkSbtLWqpH+5xQuKPAVYX0MYX0tzd43ajC9q0L6YcX0qf1os5mDrTWL5IOAq4DViTdDWFr4KvAA8D/ADv2cPfTSYuA79dg22xgdIP03fK2er/LZf2la7Uzq+O1DqzPJI0kLcV3YkR8s27zBZKOI82775UXI+LGJtvOA3aVdGTkaY+SliUttXguaQ3XV0XEY8Bjkp7qYX1tCecWrfXH94BZwHcabYyIByOirFvOnA6sBXy4kLYTsBQp0JoNOgda65Pc9zoKuCwi5pZcnUYeBq5h4e6D3YD/Jd2pwWzQOdBaX60MLEsKaFU1EfiCpOGSViX1H08suU62BHOgtZ6TtJykZyWdWZe+nqRrJT0g6R+SNu3SLv9IWiT8k8BXSBfPruhS2WZ95kBrfTUTeIHUD9qpLwL/BHaQtEIh/dfAhIh4O6m/90xJGmgFI2I2cD6p+2A34MyIeGWg5Zr1lwOt9UlEzAOuAraR1OmtZfYEfg5cSmphIunNwBbkOwJExKScd5MuVXUi6Y4M78HdBlYyB1rrj6OBlYBjGm2UtI6k9+b/bwisB1wEnMqCmyuuCfw7Il4uvPThnN4Nk4BzgF9HxN1dKtOsXzyO1vosIq6R9C3guBxIJ5DuXbUC8HFgL9J9vu4gBdbTI2KepMuBFSU1a7UOuNugUMf5wJe6VZ7ZQLhFa/0SESeQxqo+DRwLXEkKuBsCXwP+LOk1pH7SL0uaCjwEvJEUfB8BVst5atbK6WZDilu01m8RcT1wfbPtknYCHomIDxTS1gFuAw4BbibN1PqNpG1ILdpbO9m3pKVTFWJ+rssEUqBvVd+rqGs154tvS9Wnm3WTW7TWS3uSZmq9KiKmAHcBXwD2BfaQ9ACpv/cr0dndQtcCXqY7Q7bG5rJ260JZZg35Lri2WJG0NmnSBMDsiLh/gOWtCqyen75U4tRhG8IcaM3MesxdB2ZmPeZAa2bWYw60ZmY95kBrZtZjDrRmZj3mQGtm1mMOtGZmPeZAa2bWY/8f8QeLPcYoQy4AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# this problem has two degrees of freedom, to draw heatmap it does not need to fix any dimension\n", + "\n", + "fixed = {}\n", + "all_fim.figure_drawing(fixed, ['CA0[0]','T[0]'], 'Reactor case','$C_{A0}$ [M]', 'T [K]' )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Read Heatmaps\n", + "\n", + "A heatmap shows the change of the objective function, a.k.a. the experimental information content, in the design region. Horizontal and vertical axes are two design variables, while the color of each grid shows the experimental information content. Taking the Fig. Reactor case - A optimality as example, A-optimality shows that the most informative region is around $C_{A0}=5.0$ M, $T=300.0$ K, while the least informative region is around $C_{A0}=1.0$ M, $T=700.0$ K." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Grid search for 3 design variables" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "# Define design ranges\n", + "design_ranges ={'CA0[0]': list(np.linspace(1,5,2)), \n", + " 'T[0]': list(np.linspace(300,700,2)), \n", + " ('T[0.125]','T[0.25]','T[0.375]','T[0.5]','T[0.625]','T[0.75]','T[0.875]','T[1]'): [300,500]}\n", + "\n", + "## choose from 'sequential_finite', 'direct_kaug'\n", + "sensi_opt = \"direct_kaug\"" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "doe_object = DesignOfExperiments(parameter_dict, design_gen,\n", + " measure_class, create_model,\n", + " prior_FIM=prior_pass, discretize_model=disc_for_measure)\n", + "\n", + "all_fim = doe_object.run_grid_search(design_ranges, \n", + " mode=sensi_opt)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Draw 1D sensitivity curve" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "test = all_fim.extract_criteria()" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbQAAAEhCAYAAAAXn1W2AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAABHlklEQVR4nO3dd5gUVdbH8e9vAkmSCAgIEkQlKwIqaVAXRDGD2VVQMaGisOsqa8LVVVGXYMT0ilnWvAYkGMiigwEQRSWpJAEVUTKc949bo0UzEQZ6mDmf5+lnpqtOVd2+09On761bt2RmOOecc7u7lGQXwDnnnCsMntCcc84VC57QnHPOFQue0JxzzhULntCcc84VC57QnHPOFQue0NwuI6mXJIs9NkiaK+l2SWWKQNkuSGYZigtJdSRtjv6+VZNYjoGSjspm+QhJC5JQnq2OK6le9H/QK7bM34c7wBOaS4bTgLbAccBoYABwd1JLBL0A/yApHOcRPlvSgbOSWI6bgW0SGnArcMouLkt2lhD+D96KLeuFvw+3W1qyC+BKpM/M7Nvo97GS9gculHSVmW1JZsEKk6TSZrY+2eVIgvOAWUBFoCdwX3KLszUzm5vsMgBE740Pk12O4sRbaK4o+AQoC/zRPSWpnKRBkuZHXVfzJV0vKSUWU0bSEEmzJP0maamkNyQ1SjyApPqSno5i1kuaJ2lYtO4DoBPQPtYd+kFs20MljYuO8bukdyUdmrD/EZJ+kNRW0hRJa4G7cnvRkk6RNDna76+SPpJ0Ymz9FZKmSvpJ0i+SPpR0XMI+0iTdGnXdrpO0QtIkSR0S4i6S9Hks5nFJVXIr3/aQ1BY4AHgKeBpoJalpPrdNl3SbpAXR33xB9Dw9FpPVTddH0mBJP0paI+lNSfVicVlTIF0f+5sOjNbl1PV3qaQ7ovfIaknPRO/DhpJGR3+nbyX1TCh3w+i9NV/S2ui99ZCkPfN4vVt1Oeb0PpTUKvr9pGz2kfW+S81PHRd33kJzRUE9YBWwEsKHNKErsgmhe2gmcDhwI1AF+Fu0XWmgAnAbofumCtAH+FBSIzNbGu2vPvARsIbQDfUNUAc4OtpPH+AZIBW4JFr2a7RtC2A8MJvQHWTAdcB4SYeb2eex11EJeAG4B/gnsDanFyzpSuBe4DVCK+Y34JCoLuL18hiwgPC/egLwpqRuZjYqirkW6AdcD3xGaBW1juoi61h3RnV2L3ANsE9UZ80ktTOzzTmVczv0BLYAzwLlo3KdF5UzL08CpwO3A5MI3XE3AA2AsxNiBxBe7/lA9WibMZKamtnGaNupwAjg4WibH/I4/gDgg+g1NCF8IdkCtAQeJfxdLwOekJRpZl9E29WK9n018HNU3n8Cb0flyK9s34dmNlvSx9Gy17OCJVUm1Nddhfw33H2ZmT92gwfhA+o+4CvCB+X3wEPAXnlslw7cBMwF1gGfA8dkE1eT8IGyPIqbDXSK7WMQMAP4nZA8ngP2TdhH6aiMK6K4/wG1Y+t7ERLC64QEtgqYAmwCrojFXR3FrY32dS9QivDhuIHwAdackGjWAoui15gKlANWEz7kOwHTgc2ED6Zrs3ndPaLXuoWQ8E5JWP9StHxhVC/Tga7AT8ArsbgRUZlPysffsmJUxlfyio1tk0JIamOA12PL38xtP4SkuBm4KWF5+6i8Jxfie7Q04QN9dGzZ1Ojvk5LHts2i8gxMWH5DtLxF7PVY9DdLicVlvZ4LY8sMuC2bY40AFiTUkQHvJcS9Ei3/a2zZntH79eZcXksa0CHatmU+jtsrtuwDYFI2++wV/R3rxpb1jcpSO6eylLSHdzkWMVEXwsBsVtUifLP+B+HD/K9ABvB8Hru8DbiU8OZvAgwHXpXUMnbMysBkQISBGo2BK4Efo5ByhNbDv6OfJxFaOO9EraksQwkJ4iygI+GD+81sukNOjNZVJHyD/d7M7o/KkgoMBNZHr+8c4FRgMOHDPB04EhgLLCN84P0O3EL45/6d0DJoTfiGPIWQfDKB2yT1iL3utsBIQmsiM3q9L0o6LFbWLkCZqB5bRvt7GXiXkDDjNhESTF7aRWV8JLegqKvpTUnLon1vjMpzYCzsY6CbpH9L6iCpVMJuuhCS4bNR92Ra9DebRmiFZuRy/JT4Nop19+bgJKAyobsxy5OE927nPLbNKsczCcuznifW9UsWO99qZpMJraSCtIgSjUp4/lX0c3TsOD8T3id1spZJKiXpn5K+irqaNwITo9Xxv9WOeAH4BbgotuwS4C0zy6vlWXIkO6P6Y+sH4VvcwHzGdiO0LCrmErMYuCph2cvAM7HntwOTC1jOJoRvl82j55UIradzYjF1ovJ1jZ4P4M8uu9bAsYQPZAP+EcUcGz3P7fEU4cO4R/R8RLTsx2i/PxK6Kb+J9rmR0F30GDA1Vr6RwNjo9w8I3VzjgOdjMQZ8nvDavyG0PLYk/N0W5bPuzon22yyXmDqED7AphG6lw6PXNoqtv+WnE7rzZkb7XA08AVSN1l+fR10+mcd7MR47Io/X9RbhC0UtQmKrDNSP3hfP5rFtVktsj4TlZaLlN0fP60XPL89mH5nAqIS/XUFaaL0T4gZGy9MSli9g6/+f/0Sv8QbCqMo2hFGUia2vnI4bj/mAbFpo0bp7CL0jaYQvjAYcW5D/2+L+8HNou7eKhJbMmlxiShO6yuLWErpEspxMaG2NJLR+FhM+/B+w6D8ph2ND6GICaEX4cB2TFWBm30v6ktAiGQ00jFa9ZNEoR0nvReW5XtIDhG/YvxLOp50exVcmtMguJXT5XUP4Btwd+NbMeklqA5xL6MasQkikL0fbryC0bl8BekpKtz/PsySOwBsNXBGVLau180tCzBhCMv0pYXlOdZVoRfRzH8JowOwcQ/iScLrFvoFLKrfVAcPrGAQMklQDOJ7Qmi0HnEF0XpJwvvBntrUym2VZBgL3Z1PubUjaOzpGGqGLMdEpkiqY2eocdpFVlzUI3ePEnmdXzr2z2cfehPNqu9qZwFNmdlvWAknld8JxHgL6E1rCpxAS6+jcNihpvMtxNxV1E94KPGpmm3IJHQ1cLenAqAupCyER1IzFNCCckJ5HOD80DLgTuDyHY5cifCt9I/ZhW4PQx5/4obeMPz+UKiXuy8LQ5R8JCbJPFLuI0EL5zcwyCd17mwknyDMJ5zGWET60s177suhnL8K5tLKxZWMIH/QbCR+4WaMpa8Ri1se2ySpvVlxLSRVixf4ZqEY4h7c9phAGgVycS0xW4tqYtUDSAYRzRdkys6Vm9hihldksWjyWkNz3NbPMbB7zc9nfgoTYBbmU96+Eur2M8KUo/riaULen5bJ9Vl2embD8nOjnhITlp2rrEa/tgdqElnOWDdFxd7ZyxP5OkfO3c19Z78NtWLjcYAzhC92phP/9YnOZS2HwFlqSSfonYURUltKASfp7bNmxZjYxts0ewBuED/5/5HGIqwgjtGYTWhBzCV1S8X+4FCDTzAZEzz9VuDbscrb+hp41AvEZQqvpRPIm8m65rCWc//g74RzUUsLglHcl/YcwkEVAF0nnR+U14B3gZElDCKMYIZxj+CUWA2Fk43GEb7gAbaOWTmosZjYhoR4Wvc4DCd13EP4m70oaFMX3isrzr3y8/m2Y2WpJA4D7JL1MOIe3GjgYWGdm9xGS0ibgqagOahLOE35H7IuopNej+vmEkGhbElp3D0fHmhuV+/7oNY0ntNjrEM6vPWZm72/P60hwHjAfeDixVS9pIuF92hP4v+w2NrMvJD0PDIzeY1MILegbCV3AMxI2qQC8JulhwpeLOwhdwfHzd7OB4yS9Q6ibxWa2eMdeZrbeIbT8ZwLfEr4wttvOfc0G+kg6g/C/utrM5sTWP0gYVLWRHOqyREt2n2dJfxC6xxrGHi8TRvXFl5WNxZcnfFudCJQvwHHKELq4ROii+iK2biHhgy0efy7we8KyNOBFwsnyGgnrjiJ82FdLWP4FcEv0++NRTMPYehFaK/dE60ZH25QhdHl9RfjWatHvAwnXN71F+GC/jdBFui6KOY7QFbOU0GWadZz9COfILNrfPELX5jXR+hqEQSRZ+/mAMLJyE+Hc37ionL8TRph+kvA6RwA/FPBvfyphcMbaqCzTgONj60+PXvO6qE7OZNvzMH8jXJy7MtrPnKiO0rP5e34Ylf834EvCl5UdHiFHSKIG3JhLzL8JLcX6ucSkR3/PhYQP7IXR8/RYTL3oWH0IXavLCV3ubyXum9CanR77mw6M/a0WZLPP7T2HVpUwaOPn6PEs4Tza9pxDy3ofrs56HyYcOzX6G764sz6TdudH0gvgj4Q/SC6DQgjfSicRRiRW2M79pxO+Rd4eW/YcMDEh7lZgdsJ2L0cfmDWz2W8lQhfP2bFltdl6UEjj6J+0XSymXbTswOj5sdE28eH+Z0cfShWj55cREkCZWMw/CS1WRc8HAXMSyvgI2w4KGZMQM4atB4VMAx5JiPkauCPZ75WS+Mgp+ZSUB6FVbcBfkl2WovhIegH8kfAHySGhRclsKuGb+v7RN7msR6lY3LvxD1tCF1p3wnmyjtH6eUDlWEwbwjfi6wktwtMIgysuj9anES4AXkQYth8/drz1+FAU05nwrf19wkn61FjMKP68ULpt9PsbsfWp0bL3on10jvZ5XyymEqEF9gLhXFH3KMH9LRZTn/BNdighkfYmJNwesZh2/NkCaxT93AgcFos5I9qud7SfYYQWTt1kv1dK4qOkJjRCD0MXwiCi6ckuT1F9JL0A/kj4g+Sc0I4g56HXR8TiFhAbXk24fmc2oYWzgnCOoVY2+z+OcC5mHaEF0pc/Wzv1cjl2r9g+yhBGDa4kdAO9AdRJOE4Vwjm4X6PHM8SSaxSzL+Fc2ppoX/cBpRNimhO6XtcRhjLfnFXehNf+CaGLcT5waTav+1RCt94GQjdc92xi+kT1up7QhZWR7PdJSX2U4IQ2IvryNR1omuzyFNVH1geWc845t1vzYfvOOeeKBR+2n0RVq1a1evXqJbsYzjm3W5k+ffoKM6uWuNwTWhLVq1ePzMzMZBfDOed2K5IWZrfcuxydc84VC57QnHPOFQue0JxzzhULntCcc84VC57QnHPOFQs+ynE389qni7h79BwW/7KWWpXLck3XAzm55T7JLpZzziVdkWihSeojab6kdZKmS+qYR3xzSeMlrZW0SNJNkpQQ0yna1zpJ8yRdms1+ekiaLWl99POUhPUVJA2VtDA61pToRpI5lesRSYm3fik0r326iAGvzGTRL2sxYNEvaxnwykxe+zS7+yk651zJkvSEFt33ZxhwO2Ey2inAKEn75hBfkXDTwmWESXX7Em541z8WU59wC4Yp0T7vINx7qkcspi1htvVnCfehehZ4UdJhscM9RrjhZU/C3IFjgHGStmkSSTo1Ks/OuN8SAHePnsPajZu3WrZ242buHj0nhy2cc67kSHpCIySiEWb2qJl9aWZXEiabvSyH+HMId4jtaWazzOxlwq1C+sdaaZcSbuZ3ZbTPR4EnCTeQzHI18L6Z/TuK+TfhHlhXA0gqC/QArjOzD8zsWzMbSLj1ylZlk1SXkJTPZts71xaaxb+sLdBy55wrSZKa0CSVAloRWj5xY8j5jq9tCffuin+KjwZqEWbizopJ3OdooLWk9Dxiso6bRriVybqEmLVAh9hrSAOeB24zsy9zKHOhqFU5+7vJly+TxoZNfid251zJluwWWlVC0liWsHwZ4V5b2amRQ3zWutxi0qJj5hZTA8DMVhPuP3aDpH0kpUr6KyER1oxtcwuw0sweyqG8W5F0saRMSZnLly/PzyZ/uKbrgZRNT91qWarE6nWbOPH+Scz44ZcC7c8554qTZCe0LIn3sFE2y/KKT1y+vTHxZecS7p78A+FeWH0JrbHNEAaeAL2AC3Ip69YFN3vEzFqbWetq1baZWzNXJ7fchzu6N2efymURsE/lsvzn9IN49LzW/LxmAyc/MJk73v6StRs257kv55wrbpI9bH8FITkktsaqs23rKcvSHOKJbZNTzCbCDSNzi/njuGY2F+gkaQ+gopktkTSScLNIgCMJrbUlsUGWqcAgSVebWe0cXsN2O7nlPtkO0z+0fhXuHPUlD0+Yx+gvlnJnjxYc3mCvwj68c84VWUltoZnZBsIdWLskrOpCGKGYnalAR0llEuIXE+4qnBXTOZt9ZprZxlhMvo5rZr9HyWxPwqjH16NVDwItCKMksx6LgSHAX3Io/05RqWw6d3RvwXO9D2OLwZmPfMj1r85k9bqdNkbFOeeKlKLQ5TgY6CWpt6TGkoYRBngMB5B0h6R3Y/HPAWuAEZKaSeoOXAcMtj9vvz0cqB1dQ9ZYUm9C1+A9sf0MA46SNEBSI0kDCC2uoVkBkrpKOlZSfUldgPeBOcATAGb2YzTS8o8HYZTjUjNLylj6dg2r8s7VHendoT7Pf/QdRw+ZwHtf5dTYdc654iPpCc3MRhKGyt8AfEYYQdjNzLLud1MT2C8Wv4rQkqoFZAIPAP8hJMasmPlANyAj2uf1QN9oiH9WzBTgTMI1ZjOA84AzzGxarHiVgPuBr4CngEnA0bFWXpFUrlQaNxzfhJcva0eFMmlcMCKTq1/4lJ9+35Dsojnn3E6jPxs1bldr3bq17ewbfG7YtIUH3v+WBz/4lgpl0hl4YlNOaFGThIlVnHNutyFpupm1Tlye9Baa27lKpaXQr8sBvHFlB+rsWZa+z3/KRU9lsnRV4uV1zjm3e/OEVkI0qlGRV/q05/pujZn07Qq6DB7P8x99h7fQnXPFhSe0EiQ1RVyU0YB3rsqg6T4VGfDKTM5+dBoLV/6e7KI559wO84RWAtWrugfP9T6cO7o3Z9aiVXQdOoHHJs5j8xZvrTnndl+e0EqolBRx1qH7MrZ/Jzo0rMptb31J94emMGfp6mQXzTnntosntBKuRqUyPHpea+49qyXf/7SG4++byJCxX/tkx8653Y4nNIckTjyoFuP6d6Jb85oMe/cbjr9vIp99/0uyi+acc/nmCc39ocoepRh2Zkse79maX9duovuDk7ntzdk+2bFzbrfgCc1t4y+N92ZM/wzOPHRfHps0n65DJzBl7opkF8s553LlCc1lq2KZdG4/pTnPX3Q4KYKzH53GgFdm8KtPduycK6I8oblctd1vL0ZdlcElGQ0Y+fH3dBk8nrGzfbJj51zR4wnN5alsqVQGdGvMa5e3Z89ypbjoqUyueO4TVvy2PtlFc865P3hCc/nWonZl/ndFB/p3OYDRXyyly+DxvPbpIp8+yzlXJHhCcwVSKi2Fvn/Zn7f6dqTuXntw9cjPuPDJTBb/sjbZRXPOlXCe0Nx2OWDvCrx8WTtuPL4JU+eu5OghE3jmw4Vs8emznHNJ4gnNbbfUFHFhh/qMvjqDg+pU4obXZnHWox8yf4VPduyc2/U8obkdtu9e5XjmwsO4q0cLZi/5lWOGTuDh8XPZtNmnz3LO7Tqe0FyhkMTpbeowrn8nMg6oxh2jvuKUB6cwe/GvyS6ac66E8ITmCtXeFcvwyLmteODsQ1iyai0n3j+J/4yZw/pNPn2Wc27n8oTmCp0kjmtRk7H9OnHiQbW4771vOe7eSUxf+HOyi+acK8Y8obmdZs89SjH4jIN54vw2rFm/iVOHT+GWN75gzYZNyS6ac64Y8oTmdrojD6zOmP6dOPfwujwxeQFHD5nApG98smPnXOHyhOZ2ifKl0/jXSc347yVtSU9N4a+PT+MfL33OqjU+2bFzrnB4QnO71KH1qzDqqo5cdsR+vPzJIjoPGc87s5Ymu1jOuWLAE5rb5cqkp3LtMY14rU97qpYvzaXPTOfyZz9h+Wqf7Ng5t/08obmkaV67Ev+7oj3XdD2QsbOX0XnweF6e/oNPduyc2y6e0FxSpaemcPmRDXn7qo40rF6ev734Ob2e+JhFPtmxc66AikRCk9RH0nxJ6yRNl9Qxj/jmksZLWitpkaSbJCkhplO0r3WS5km6NJv99JA0W9L66OcpCesrSBoqaWF0rCmS2sTWp0saJGmGpN8lLZH0nKR9d7ROSpqG1cvz4iVtGXhCEz5e8BNHDx7PU1MX+GTHzrl8S3pCk3QGMAy4HWgJTAFG5ZQUJFUExgLLgDZAX+AaoH8spj7wdrSvlsAdwH2SesRi2gIjgWeBg6OfL0o6LHa4x4CuQE+gOTAGGCdpn2h9OeAQ4N/Rz5OAOsA7ktK2q0JKsJQU0at9mOz4kLp7ctPrX3DGI1OZu/y3ZBfNObcbULLPV0iaBswws4tiy74BXjKzAdnEXwYMAvY2s7XRshuAy4DaZmaSBgHdzWz/2HaPAU3NrG30fCRQxcy6xGLGAcvN7CxJZYHVQA8zez0WMx0YZWY35PB6mgBfAC3MbGZur71169aWmZmZa/2UVGbGS9N/4NY3Z7Nu0xau7rw/F3VsQHpq0r+DOeeSTNJ0M2uduDypnw6SSgGtCC2fuDFAuxw2awtMzEpmkdFALaBeLCZxn6OB1pLS84jJOm4akAqsS4hZC3TIoWwAFaOfPs/TDpDEaa3rMO5vnTjqwOrc9c4cTn5gMrMWrUp20ZxzRVSyv+5WJSSNZQnLlwE1ctimRg7xWetyi0mLjplbTA0AM1sNTAVukLSPpFRJfyUkwprZFSxK0P8B3jCzH3KIuVhSpqTM5cuX5/ASXZbqFcow/NxWPHTOISz7dT0nPTCZu0d/xbqNPtmxc25ryU5oWRL7PZXNsrziE5dvb0x82bnAFuAHYD3hfN3zwDafptE5s2eAysD5ORbc7BEza21mratVq5ZTmEtwbPOajOufwSkt9+GB9+fS7d6JZC74KdnFcs4VIclOaCsIySGxNVadbVtPWZbmEE9sm5xiNgEr84j547hmNtfMOgHlgTpmdiiQDsyPbxQls+eBFsBfzGwlrtBVLleKe047iKcuOJT1G7dw2sNTGfi/L/h9vU927JxLckIzsw3AdKBLwqouhBGK2ZkKdJRUJiF+MbAgFtM5m31mmtnGWEy+jmtmv5vZEkl7EkY9xgeJpBNGS7YAjjQzn8dpJ8s4oBpj+mXQs209npwaJjse/7V33zpX0iW7hQYwGOglqbekxpKGEQZ4DAeQdIekd2PxzwFrgBGSmknqDlwHDLY/h2wOB2pH15A1ltQb6AXcE9vPMOAoSQMkNZI0ADgSGJoVIKmrpGMl1ZfUBXgfmAM8Ea1PA14EDgfOAkxSjehRthDryCXYo3QaA09syouXtKV0ego9/+8j/vbfz/llzYZkF805lyRJT2hmNhK4GrgB+IwwgrCbmS2MQmoC+8XiVxFaUrWATOABwkCMwbGY+UA3ICPa5/VAXzN7ORYzBTiTcI3ZDOA84AwzmxYrXiXgfuAr4ClgEnB0rJVXm3DtWS1CS3NJ7HHG9taJy7/W9arwdt+OXHFkQ177bBGdB09g1MwlyS6Wcy4Jkn4dWknm16EVri8Wr+IfL83gi8W/ckzTGvzrpKZUr1gm7w2dc7uVInkdmnOFqWmtSrx+eXuuPaYR7835kc6Dx/Ni5vc+2bFzJYQnNFespKWmcNkR+zHqqo4cWKMC17w0g/P+7yO+/2lNsovmnNvJPKG5Ymm/auUZeXFbbj2pKZ8s/JmuQyfwxOT5bPbJjp0rtjyhuWIrJUWc27Yeo/tl0KZeFW55YzanPzyVb39cneyiOed2Ak9ortirvWc5RpzfhsGnH8Tc5b/Rbdgk7n/vGzZu3pLsojnnCpEnNFciSKL7IbUZ268TXZruzT1jvubE+32yY+eKE09orkSpVqE0D5x9CA+f24oVv4XJju8c5ZMdO1cceEJzJVLXpjUY168Tpx5Sm+Hj59Jt2EQ+mu+THTu3O/OE5kqsSuXSGXRqC5658DA2bN7C6Q9P5cbXZrF63ca8N3bOFTme0FyJ12H/qozpl8EF7evzzLSFdB0ygffn/JjsYjnnCqjACU1SS0mvSFohaZOkQ6Llt0s6pvCL6NzOV65UGjed0ISXLm3HHqXTOP+Jj+k/8jN+/t0nO3Zud1GghCapA+G2K40Is97Ht98CXFp4RXNu12tVd0/e7NuBvkc15H+fL6bz4PG8OWOxT5/l3G6goC20O4HRQFOgf8K6T4BDCqNQziVT6bRU+h99IG9c2YFalctyxXOfcvHT01n267pkF805l4uCJrRDgIei+44lfmVdAVQrlFI5VwQ0rlmRV/u0Y8CxjZjw9XI6Dx7PyI+/89aac0VUQRPaOqBcDutqAn6VqitW0lJTuKTTfrxzdQaNa1bk2pdncs5j0/hupU927FxRU9CENgm4WlJqbFnW19ULgfcKpVTOFTH1q+7BCxcdzr9PacaMH1bRdegEHp/kkx07V5QUNKHdSOh2/Dz63YCekt4HDgduKdziOVd0pKSIcw6ry9j+GbTdby9ufXM2PR6awtfLfLJj54qCAiU0M/scyACWAdcDAq6IVncyszmFWzznip6alcryeM/WDDvzYBau/J3j7p3Ive9+w4ZNPtmxc8mk7T3BLakMUAX4xcz8hMJ2aN26tWVmZia7GG4HrPxtPQPfmM0bny+mUY0KDOrRgoPqVE52sZwr1iRNN7PWicu3e6YQM1tnZos9mbmSbK/ypbnvrJY8el5rfl6zgVMenMztb3/J2g0+2bFzu1pBL6y+VtJ9Oay7V9I1hVMs53YvXZrszdj+nTijTR0emTCPY4dNYOrclckulnMlSkFbaOcDM3JY91m03rkSqWKZdO7o3oLneh/GFoOzHv2Qf746k199smPndomCJrR9gW9yWDcPqLtjxXFu99euYVVGX53BRR3r88JH33H04Am899WyZBfLuWKvoAltDbBPDutqA+t3rDjOFQ9lS6Vy/XFNeKVPeyqVTeeCEZlc9cKnrPzN/0Wc21kKmtAmAtdIKh1fGD3/W7TeORc5uE5l3riyA1d33p+3Zy6hy5AJvP7ZIp8+y7mdoEDD9iUdBEwhzNv4DLCI0GL7K7AX0D66Vs3lgw/bL1nmLF3NP16eweff/8JfGlXntlOaUbNS2WQXy7ndTk7D9gt8HZqkQ4F7gHaEFt4WwpRYfzcz/3QuAE9oJc/mLcYTk+dzz5g5pKekMKBbY85sU4eUFCW7aM7tNgrtOjQz+8jMMoAKhPNmFczsiB1JZpL6SJovaZ2k6ZI65hHfXNJ4SWslLZJ0kyQlxHSK9rVO0jxJ29yrTVIPSbMlrY9+npKwvoKkoZIWRseaIqlNQowkDZS0OIr5QFLT7a0LV7ylpojeHRsw+uoMmu1TiX++OpOzH/uQBSt+T3bRnNvt7ciF1WujC6vX7kgBJJ0BDANuB1oSujRHSdo3h/iKwFjC9FttgL7ANcTuzyapPvB2tK+WwB3AfZJ6xGLaAiOBZ4GDo58vSjosdrjHgK5AT6A5MAYYJyk+MOYfhPOHV0bl+REYK6lCwWvDlRR199qD5y46jDu7N+eLRb9yzLAJPDphnk927NwO2K6pr6JzaQcCZRLXmdlTBdzXNGCGmV0UW/YN8JKZDcgm/jJgELB3VjKVdANwGVDbzEzSIKC7me0f2+4xoKmZtY2ejwSqmFmXWMw4YLmZnSWpLLAa6GFmr8dipgOjzOyGqFW4GLjfzP4drS9LSGp/N7OHc3vt3uXoAJauWscNr81k3Jc/clDtStx16kEcWMO/DzmXk0LpcpRUWdJkwt2pnwdGRI8nYo+C7K8U0IrQ8okbQzhHl522wMSEluFooBZQLxaTuM/RQGtJ6XnEZB03DUgl3AMubi3QIfq9PlAjvp+oXBNyKb9zW6lRqQyPntea+85qyQ8/r+X4+yYyZOzXrN/k02c5VxAF7XK8nTCaMYMw0/4pwFGE7rp5wKEF3F9VQtJIvOp0GSFRZKdGDvFZ63KLSYuOmVtMDQAzWw1MBW6QtI+kVEl/JSTCmgnHy3f5JV0sKVNS5vLly7N/ha7EkcQJB9VibP9OHNe8JsPe/YYT7pvEp9/9nOyiObfbKGhC60pIah9Gz38wsw/M7DxgHHDVdpYjsd9T2SzLKz5x+fbGxJedSxjF+QPhovG+hJZp4lfnfJffzB4xs9Zm1rpatWrZhbgSrMoepRh6Zkv+r1drVq/bRPeHpnDrm7NZs2FTsovmXJFX0IRWE5hnZpsJXXHxjv5XgOMKuL8VhOSQ2JqpzratnixLc4gntk1OMZuAlXnE/HFcM5trZp2A8kAdMzsUSAfmx/ZBAcvvXJ6OarQ3Y/plcM5h+/L4pPkcM3QiU75dkexiOVekFTShLQUqR78vJHS/ZWlY0IOb2QZgOtAlYVUXwgjF7EwFOkb3Y4vHLwYWxGI6Z7PPTDPbGIvJ13HN7HczWyJpT0IrNWuQyHxCncQHlpQBOuZSfufypUKZdG47uTkvXHw4KYKzH5vGdS/PYNVan+zYuewUNKFN4s8k9jRws6SHJT0A3E0YVFFQg4FeknpLaixpGGGAx3AASXdIejcW/xxhTskRkppJ6g5cBwy2P4dsDgdqR9eQNZbUG+hFuCA8yzDgKEkDJDWSNAA4EhiaFSCpq6RjJdWX1AV4H5hDNPglOt5Q4DpJ3SU1IwyS+S0qp3M77PAGe/HO1Rlc0qkB/838nqOHjGfsbO8AcG4bZpbvB7Af0DH6PR34D+H80k+ED/C9CrK/2H77EFpX6wkttozYuhHAgoT45oSRhOuAJcDNRJcgxGI6EUZjrie0pC7N5rinAl8BG4AvCUP94+tPB+ZG+1gC3A9USogRMDBavw4YDzTLz+tu1aqVOVcQn3//s3UdMt7qXvumXf7sdFu+el2yi+TcLkfobdvmMzXf16FFQ+xHAkPMbEK+NnK58uvQ3PbYsGkLD4+fy33vfUu50qncfEITTj54HxImy3Gu2Nrh69AsnO/qXJBtnHOFr1RaClf+ZX/e6tuB+lX3oN/Iz7lgxMcs/mWHJu1xbrdX0OQ0GTh8ZxTEOVcw++9dgZcubcdNxzfhw3k/cfSQCTz94UK2+PRZroQqaEL7G3ChpCsk1Y4uNk6JP3ZGIZ1z2UtNERd0qM+YfhkcXKcyN742izMf/ZD5PtmxK4EKej+0LdGvOW1kZpa2w6UqIfwcmitMZsaLmT9w61uz2bBpC/26HEDvDvVJS/Xvma54yekcWkGTz7/IfQYP51ySSOL0NnXodGA1bnxtFneO+oo3Zyzmrh4H0aRWxWQXz7mdbrtm23eFw1tobmcxM0bNWspNr8/ilzUbueyI/bjiqIaUTktNdtGc22HbPcoxujnmQTunWM65nUES3ZrXZGy/Tpx4cC3ue+9bjrt3EtMX+mTHrvjKT+d6PaD0Ti6Hc24n2HOPUgw+/WBGnN+GtRs2c+rwKdzyxhf8vt4nO3bFj58tdq4EOOLA6ozul8G5h9flickL6Dp0AhO/8dsXueIlvwnNT7Q5t5srXzqNf53UjP9e0pZSqSmc+/hH/OOlz1m1xic7dsVDnoNCoqH67xBu9ZIXM7OehVGwksAHhbhkWbdxM8Pe/YZHJsyjyh6luPWkZhzTLKd76jpXtOQ0KCS/CW0pYYLevJiZNdi+IpY8ntBcss1atIp/vDSD2Ut+pVvzGgw8sSnVK5TJe0PnkmhHE9rhZvbRzipcSeUJzRUFGzdv4ZEJ8xj27jeUTU/lpuOb0P0Qn+zYFV07PDmxc654Sk9N4fIjG/J23440rF6ev734OT2f+Jgffl6T7KI5VyCe0JxzADSsXp4XL2nLLSc2JXPBT3QdMoGnpi7wyY7dbsMTmnPuDykpome7eoy+OoND6u7JTa9/wekPT2Xu8t+SXTTn8pRnQjOzFD9/5lzJUqdKOZ664FDuOe0gvvnxN44dNpEH3v+WjZu35L2xc0niLTTnXLYkcWqr2oztn0HnxtW5e/QcTn5gMrMWrUp20ZzLlic051yuqlcow4PntGL4Xw9h2a/rOemBydz1zles27g52UVzbiue0Jxz+XJMs5q8278T3Vvuw4MfzKXbvRPJXPBTsovl3B88oTnn8q1SuXTuPu0gnrrgUNZv3MJpD0/l5tdn8ZtPduyKAE9ozrkCyzigGmP6ZdCzbT2e+nAhXYdMYPzXPtmxS64dTmiSDpF0cvQ4pDAK5Zwr+vYoncbAE5vy0qVtKZOeQs//+4j+//2MX9ZsSHbRXAmVtr0bSmoFPEuYif87QEDdaLqcs81seqGU0DlXpLWqW4W3+nbk/ve+Zfj4uUz4ejn/OqkZ3ZrXTHbRXAmzIy20R4ArzKyxmXU1s6PN7EDgCuDRwimec253UCY9lb93PZDXr2hPjUpl6PPsJ1z69HR+/HVdsovmSpAdSWjlzWxc4kIzGwvssQP7dc7tpprWqsRrfdpz7TGNeG/Oj3QePJ7/Zn5PXpOgO1cYdiSh/SjpfEmpWQskpUrqTf7uneacK4bSUlO47Ij9eOeqjjSqUZF/vDSD8/7vI77/ySc7djvXjiS0nsBZwE+SvpT0JfATcEa0Lt8k9ZE0X9I6SdMldcwjvrmk8ZLWSlok6SYl3OtCUqdoX+skzZN0aTb76SFptqT10c9TEtanSro1Vrb5km6TlBaLKS/pPkk/ROWZI6lfQV6/c8VRg2rleeHiw7n15GZ8svBnjh4ygScmz2ezT3bsdpLtHhRiZvOAoyVVBfaNFn9nZgVqnUk6AxgG9AEmRT9HSWpiZt9lE18RGAtMANoABwIjgN+B/0Qx9YG3gf8D/gp0AB6UtNzMXo5i2gIjgZuBV4DuwIuS2pvZtOhw1wKXExL0TKAF8CThZqe3RjGDgc7AucB8IAN4VNIKM3u6IHXhXHGTkiLOPbwuRzWqzvWvzuSWN2bzxueLuevUFjSsXiHZxXPFTJ43+NyunUrVzezHfMZOA2aY2UWxZd8AL5nZgGziLwMGAXub2dpo2Q3AZUBtMzNJg4DuZrZ/bLvHgKZm1jZ6PhKoYmZdYjHjgOVmdlb0/E1gpZn1jMU8CexlZsdHz2cBL5vZzbGY8cBMM7sit9fuN/h0JYmZ8dpni7jljdmsWb+Zvn9pyCWd9iM91S+HdQWzq2/wma9PaUmlgFbAmIRVY4B2OWzWFpiYlcwio4FaQL1YTOI+RwOtJaXnERM/7iTgSEmNovI2AY4itP7iMSdIqhPFtAMOBt7JofzOlUiSOKVlbcb170SXpntzz5ivOeG+Scz8wSc7doVjR65DOzGX1WXyuZuqQCqwLGH5MkI3XnZqAD9kE5+1bn70M3EE5jLC660KLIlisjtujdjzQUAFYLakzdH2/zazB2MxfYHhwHeSsub/udLM3syu8JIuBi4G2HfffbMLca5Yq1q+NA+cfQgnHrSUG1+bxckPTuaijg24uvP+lElPzXsHzuVguxMa8CownnBBdaKCdo4n9nsqm2V5xScu396Y+LIzgPOAs4EvCC2vYZLmm9njUcyVQHvgRGAh4RzaPZIWmNk2rTQze4RwDR+tW7f2s+OuxOratAaHN9iL29/6kuHj5zL6i6Xc2b05hzXYK9lFc7upHUlo3wAXmtn8xBWSvs/nPlYAm9m6VQRQnW1bT1mW5hBPbJucYjYBK/OIiR/3buAeM3shej5TUl1gAPC4pLLAHcBpZvZGFDND0sHA3/FuR+dyValsOoNObcGJB9fiuldmcMYjH/LXw/fl2mMaUaFMet47cC5mR86hPUnovsvO8PzswMw2ANOBLgmrugBTcthsKtBRUpmE+MXAglhMYpdlFyDTzDbGYvI6bjlCwo3bzJ/1lh49cotxzuWhfcOqjL46gws71OfZad/RdcgE3v8qX+PKnPvDdn/omtkdZvZxDuv+XYBdDQZ6SeotqbGkYYQBHsMBJN0h6d1Y/HPAGmCEpGaSugPXAYPtzyGbw4HakoZG++wN9ALuie1nGHCUpAGSGkkaABwJDI3FvAFcJ+k4SfWi69T6E7pbMbNfCd2ud0o6QlJ9Sb0I3ZSvFqAOnCvxypVK48bjm/DyZe3Yo3Qa54/4mH4jP+On332yY5c/O2XYfoELIfUB/gHUBGYB/cxsQrRuBHCEmdWLxTcHHgAOBX4mJLB/xRIakjoBQ4CmhNbbIDPbquUo6VTgNqABMBe43sxeia2vQLje7BRCd+QS4IXoWOuimBqEbsejgSqE82iPAf+xPCrXh+07l731mzbzwPtzefD9b6lUNp1bTmrKcc1rkjB/giuhchq2X6CEJum9XFZvAVYRuhAfN7OczoG5iCc053L35ZJfufblGcz4YRVdmuzNbSc3Y++K+R1E7YqrwroOTYSZOY4A6hKG59eNnjcG6gM3ArOia7acc267Na5ZkVcua8c/uzViwtfL6Tx4PC989J1PduyyVdCENhhYB7Qys/3MrJ2Z7UeYgmodcAuwP7AcKMh5NOecy1ZaagoXZ+zH6KszaFKzIte9MpNzHpvGdyt9smO3tYImtNuAgWb2aXxhdDPPW4DbzOwHwnD3jMIponPOQb2qe/D8RYdz+ynNmfHDKo4eOp7HJs7zyY7dHwqa0A4g51vDLAcaRr/Pxe+J5pwrZCkp4uzD9mVs/wza7VeV2976kh4PTeHrZauTXTRXBBQ0oS0Aeuew7mL+vA6sKn9ewOycc4WqZqWyPN6zNcPOPJjvflrDcfdOZNi4b9iwaUuyi+aSqKAzhfwLeEbSDOBl4EfCcPYeQDPCFFEQLmqelu0enHOuEEjipIP3oUPDqtzyxmyGjPuat2cu4a5TW3BQncrJLp5LggJfhyapC+F8WSvCLBkbCbPr32xm46KYMsDm2KwcLhs+bN+5wjNu9jJueG0WP65ex4Ud6tO/y4GULeWTHRdHhXIdWsIOUwhdiyvMzNv528ETmnOF69d1G7lz1Fc8N+076u5Vjju7t6Dtfj7ZcXFT6PdDM7MtZvajJzPnXFFRsUw6t5/SnOcuOgyAsx79kAGvzOTXdd5ZVBIUOKFJqinpHkkfS5or6SNJd0VTQDnnXNK1268q71yVwcUZDRj58XccPXgC737pkxcVdwVKaJIOAD4j3NTyN+Aj4HfgKuAzSfsXdgGdc257lC2Vyj+7NeaVPu2pVDadC5/MpO/zn7Lyt/XJLprbSQraQhsE/AocYGZHmtlZZnYk4fq0VdF655wrMg6uU5k3ruxAv84HMGrWEroMmcDrny3y6bOKoYImtCOBG81sQXyhmS0EBkbrnXOuSCmVlsJVnffnrb4d2bdKOa564TN6P5nJklVrk100V4gKmtBKATldkr86Wu+cc0XSAXtX4OXL2nHDcY2ZPHcFXQZP4NlpC9ni02cVCwVNaJ8BV0ZD9v+gcJOiPtF655wrslJTRO+ODRhzdSda1K7E9a/O4uzHPmTBit+TXTS3gwp6P7RjgDcJczWOJNzwsgZwGmGW/ePMbMxOKGex5NehOZdcZsbIj7/n3299yYbNW/jb0QdwQfv6pKVu9xVNbhcotAuro6R2G9CScH80I9zU80YzG10IZS0xPKE5VzQsXbWOG16bxbgvl3FQ7UoMOrUFjWpUTHaxXA52xkwh5YA9gZ/NzG9MtB08oTlXdJgZb81cws2vf8GqtRvpc2RDLj9yP0qn+fRZRc3OmClkjZkt8mTmnCsOJHF8i1qM69+JEw6qxb3vfsPx907ik+9+TnbRXD7lmdAkbZG0OZ+PTbui0M45t7PsuUcphpxxME/0asNv6zfR46Ep3PrmbNZs8I+3oi4/t4/5F+E8mXPOlRhHNqrOmH4ZDHrnKx6fNJ8xs5dyZ/cWtG9YNdlFcznY7nNobsf5OTTndg/T5q3kuldmMn/F75zZpg4DujWmUtn0ZBerxCr0c2jOOVdSHNZgL0Zd1ZFLOjXgv5nf02XweMZ8sTTZxXIJPKE551w+lElPZcCxjXnt8vZU2aMUFz89ncuf+4Tlq32y46LCE5pzzhVAi9phsuO/H30AY79YRpch43n10x98suMiwBOac84VUHpqClcctT9vX9WBBlX3oN/Izzl/xMcs+sUnO04mT2jOObedGlavwIuXtuPmE5owbd5PHD14PE9/6JMdJ0uRSGiS+kiaL2mdpOmSOuYR31zSeElrJS2SdFM0QXI8plO0r3WS5km6NJv99JA0W9L66OcpCetTJd0aK9t8SbdJSkuIO0DSK5J+kbRG0ieSGu9InTjndg+pKeL89vUZ0y+DlvvuyY2vzeLMRz5k3vLfkl20EifpCU3SGcAw4HbC/JBTgFGS9s0hviIwFlgGtCHcPfsaoH8spj7wdrSvlsAdwH2SesRi2hImWH4WODj6+aKkw2KHuxa4PDpGI8KduS8HBiQcazIwHzgKaAbcQLijt3OuhKhTpRxPX3god53agq+W/sqxwyYyfPxcNm3ekuyilRhJvw5N0jRghpldFFv2DfCSmQ3IJv4ywp2x9zaztdGyG4DLgNpmZpIGAd3NbP/Ydo8BTc2sbfR8JFDFzLrEYsYBy83srOj5m8BKM+sZi3kS2MvMjo+ePweYmZ1T0Nfu16E5Vzz9+Os6bnx9FqO/WEazfSpyV4+DaFLLJzsuLEXyOjRJpYBWQOItZ8YA7XLYrC0wMSuZRUYDtYB6sZjEfY4GWktKzyMmftxJwJGSGkXlbUJohb0dPU8BTgBmS3pH0nJJH0etTudcCVW9YhkePrc1D51zCEtXrefE+ydxz+g5rNu4OdlFK9aS3eVYFUgldB/GLSPcZy07NXKIz1qXW0xadMzcYuLHHQQ8TUhYG4EvgCfN7MFofXWgPPBPQnLsAjwPPCvp+OwKL+liSZmSMpcvX57DS3TOFQfHNq/JuP4ZnHTwPtz//rccd+9Epi/8KdnFKraSndCyJPZ7KptlecUnLt/emPiyM4DzgLOBQ6Lf+0i6MFqfVX+vm9lgM/vMzAYD/yWca9u24GaPmFlrM2tdrVq17EKcc8VI5XKl+M/pB/HkBYeybuMWTh0+lYH/+4Lf1/tkx4Ut2QltBbCZbVtj1dm29ZRlaQ7xxLbJKWYTsDKPmPhx7wbuMbMXzGymmT0NDObPQSEron3OTtjPl0C2g1qccyVTpwOqMbpfBucdXpcRUxbQdegEJn7jvTSFKakJzcw2EO523SVhVRfCCMXsTAU6SiqTEL8YWBCL6ZzNPjPNbGMsJq/jliMk3LjNRPUWlf9j4MCEmAOAhTmU3zlXQpUvncYtJzXjxUvbUiothXMf/4hrXvycVWs25r2xy1OyW2gQWjy9JPWW1FjSMMIAj+EAku6Q9G4s/jlgDTBCUjNJ3YHrgMH255DN4UBtSUOjffYGegH3xPYzDDhK0gBJjSQNAI4EhsZi3gCuk3ScpHrRdWr9gVdjMXcBZ0TnxhpKugg4E3hgx6vGOVcctalXhbf7dqTPEfvxyqeL6DxkPO/MWpLsYu3+zCzpD6APoXW1ntBiy4itGwEsSIhvDkwA1gFLgJuJLkGIxXQCPon2OR+4NJvjngp8BWwgdBN2T1hfgZDgFgJrgXmE6+XKJMT1Ar6OYmYAZ+Xndbdq1cqccyXbzB9+sWOHTrC6175plz6dact+XZvsIhV5hN62bT5Tk34dWknm16E55wA2bt7CIxPmMezdbyibnsqNxzehxyH7kDABkosUyevQnHPOhcmOLz+yIW/37cj+1cvz9xc/p+cTH/PDz2uSXbTdiic055wrIhpWL89/L2nLv05qyvQFP3H0kAk8OWWBT3acT57QnHOuCElJEee1rcfofhm0rleFm//3Bac/PJVvf/TpYfPiCc0554qg2nuW48nz2/Cf0w7imx9/o9uwiTzw/rds9MmOc+QJzTnniihJ9GhVm3H9O9G5SXXuHj2Hk+6fzKxFq5JdtCLJE5pzzhVx1SqU5sFzWjH8r4ew/Lf1nPTAZAa985VPdpzAE5pzzu0mjmlWk3H9OtG95T489MFcug2byMcLfLLjLJ7QnHNuN1KpXDp3n3YQT194KBs2b+G04VO56fVZ/OaTHXtCc8653VHH/asx+uoMzm9fj6c/XEjXIRP4YM6PyS5WUnlCc8653dQepdO4+YSmvHRpO8qWSqXXEx/T/7+f8fPvG5JdtKTwhOacc7u5VnX35K2+HbjyqIb877PFdBkynrdnLqGkTW3oCc0554qB0mmp/O3oA/nfFR2oWaksfZ79hEufmc6Pv65LdtF2GU9ozjlXjDSpVZFX+7TjumMb8cGc5XQePJ7/Zn5fIlprntCcc66YSUtN4dJO+zHqqo40qlmRf7w0g3Mf/4jvfyrekx17QnPOuWKqQbXyvHDR4dx2cjM++/4Xjh4ygf+bNJ/NxXSyY09ozjlXjKWkiL8eXpcx/TI4rEEV/vXmbE4bPoVvlq1OdtEKnSc055wrAWpVLssTvdow9IyDmb/id467dxL3vftNsZrs2BOac86VEJI4ueU+jO3fiaOb7s1/xn7NCfdNYuYPxWOyY09ozjlXwlQtX5r7zz6ER85txc9rNnDSA5O4Y9SXu/1kx57QnHOuhDq6aQ3G9OvEGW3q8PD4eRw7bCIfzluZ7GJtN09ozjlXglUqm84d3VvwXO/D2LzFOPORD7n+1ZmsXrcx2UUrME9ozjnnaNewKu9c3ZHeHerz/EffcfSQCbz/1e412bEnNOeccwCUK5XGDcc34eXL2lG+dBrnj/iYq1/4lJ92k8mOPaE555zbSst99+TNvh246i/78+aMJXQZPJ43Pl9c5KfP8oTmnHNuG6XTUunX5QDe7NuB2nuW5crnP+Wip6azdFXRnezYE5pzzrkcNapRkVf6tOf6bo2Z9O1yugwez/MffVckW2ue0JxzzuUqNUVclNGAd67KoOk+FRnwykzOfnQaC1f+nuyibaVIJDRJfSTNl7RO0nRJHfOIby5pvKS1khZJukmSEmI6RftaJ2mepEuz2U8PSbMlrY9+npKwPlXSrbGyzZd0m6S0HMr1iCST9PftqQfnnCvK6lXdg+d6H87tpzRn1qJVdB06gccmzisykx0nPaFJOgMYBtwOtASmAKMk7ZtDfEVgLLAMaAP0Ba4B+sdi6gNvR/tqCdwB3CepRyymLTASeBY4OPr5oqTDYoe7Frg8OkYj4Kro+YBsynVqVJ7FBawC55zbbaSkiLMP25cx/TNov19VbnvrS7o/NIU5S5M/2bGS3Q8qaRoww8wuii37BnjJzLJLHJcBg4C9zWxttOwG4DKgtpmZpEFAdzPbP7bdY0BTM2sbPR8JVDGzLrGYccByMzsrev4msNLMesZingT2MrPjY8vqEpJnZ2AUcL+Z3ZPXa2/durVlZmbmXUnOOVcEmRlvzFjCwP99wep1G7n8yIb0OaIhpdJ2bltJ0nQza524PKktNEmlgFbAmIRVY4B2OWzWFpiYlcwio4FaQL1YTOI+RwOtJaXnERM/7iTgSEmNovI2AY4itP6yXkMa8Dxwm5l9mUOZnXOu2JHEiQfVYlz/TnRrXpOh477hhPsm8dn3vySlPMnucqwKpBK6D+OWATVy2KZGDvFZ63KLSYuOmVtM/LiDgKeB2ZI2Al8AT5rZg7GYWwituIdyKO9WJF0sKVNS5vLly/OziXPOFWlV9ijFsDNb8njP1qxau5HuD07m32/NZu2GXTvZcbITWpbEfk9lsyyv+MTl2xsTX3YGcB5wNnBI9HsfSRdCGHgC9AIuyKWsWxfc7BEza21mratVq5bfzZxzrsj7S+O9GdM/gzMP3ZdHJ86n69AJTJm7YpcdP9kJbQWwmW1bY9XZtvWUZWkO8cS2ySlmE7Ayj5j4ce8G7jGzF8xsppk9DQzmz0EhRwI1gSWSNknaBNQFBkn6IYfyO+dcsVWxTDq3n9Kc5y86HAnOfnQaA16Zya+7YLLjpCY0M9sATAe6JKzqQhhkkZ2pQEdJZRLiFwMLYjGds9lnppltjMXkddxyhIQbt5k/6+1BoAVhlGTWYzEwBPhLDuV3zrlir+1+e/HOVRlcnNGAkR9/R5fB4xk3exmvfbqI9ne+R/3r3qL9ne/x2qeLCu2Y2V5PtYsNBp6W9BEwGbiUMMBjOICkO4BDzSwrQTwH3AyMkHQbcABwHXCL/TlkczhwhaShwMNAe0LX4Fmx4w4DJkgaALwKnEJocXWIxbwBXCdpPuH8WUvC5QFPAZjZj8BW01FH59qWmtmc7a8S55zb/ZUtlco/uzXmuOY1ufblGfR+KpNUic3RR/WiX9Yy4JWZAJzccp8dPl6yuxwxs5HA1cANwGeEhNLNzBZGITWB/WLxqwgtqVpAJvAA8B9CYsyKmQ90AzKifV4P9DWzl2MxU4AzgZ7ADML5sTPMbFqseFcCLxFaYl9Gx3k02p9zzrl8OKhOZf53RQcqlEn7I5llWbtxM3ePLpzv/0m/Dq0k8+vQnHMlSf3r3sp2tJ+A+Xcel+/9FMnr0JxzzpUctSqXLdDygvKE5pxzbpe4puuBlE1P3WpZ2fRUrul6YKHsvygMCnHOOVcCZA38uHv0HBb/spZalctyTdcDC2VACHhCc845twud3HKfQktgibzL0TnnXLHgCc0551yx4AnNOedcseAJzTnnXLHgCc0551yx4DOFJJGk5cDCPAOzV5VwtwKXP15fBeP1VTBeXwW3I3VW18y2uf+WJ7TdlKTM7KZ+cdnz+ioYr6+C8foquJ1RZ97l6JxzrljwhOacc65Y8IS2+3ok2QXYzXh9FYzXV8F4fRVcodeZn0NzzjlXLHgLzTnnXLHgCc0551yx4AnNOedcseAJrYiQdLmkGZJ+jR5TJR0XWy9JAyUtlrRW0geSmibso7Sk+yStkPS7pP9Jqr3rX82uJ+mfkkzS/bFlXmeRqB4s4bE0tt7rKoGkmpKelLRc0jpJsyV1iq33OotIWpDN+8skvRWt3yV15Qmt6PgBuBY4BGgNvAe8JqlFtP4fwN+AK4E2wI/AWEkVYvsYCvQAzgI6AhWBNyVtfYvYYkbS4cBFwIyEVV5nW5sD1Iw9msfWeV3FSKoMTAYEHAc0JtTNj7Ewr7M/tWHr99YhgAH/jdbvmroyM38U0QfwE3AJ4Z9qCXB9bF1ZYDVwSfS8ErABOCcWUwfYAnRN9mvZiXVUCZgLHAV8ANwfLfc627qeBgKzcljndbVtndwOTM5lvddZ7vV3PfALUG5X1pW30IogSamSzgTKA1OA+kANYExWjJmtBSYA7aJFrYD0hJjvgS9jMcXRI8BLZvZewnKvs201kLRI0nxJL0hqEC33utrWycA0SSMl/SjpM0lXSFK03ussB1EdXQg8Y2Zr2IV15QmtCJHUXNJvwHpgOHCKmc0kvBkAliVssiy2rgawmW0n+4zHFCuSLgIaAjdms9rrbGvTgF7AsYTu2RrAFEl74XWVnQZAH2Ae0BUYBtwJXB6t9zrLWRdCEnsser7L6iqtQMV0O9sc4GCgMqEv+UlJR8TWJ14Fr2yWJcpPzG5H0oGEbqGOZrYhl1CvM8DMRsWfS/qQ8GHdE/gwKyxhsxJZV5EUINPMBkTPP5W0PyGh3R+L8zrb1kXAx2b2WcLynV5X3kIrQsxsg5l9a2ZZ/0ifAf2ArNFoid9UqvPnt56lQCrhlgw5xRQnbQmvdZakTZI2AZ2APtHvK6M4r7NsmNlvwBfA/vj7KztLgNkJy74E9o1+9zrLhqTqwEnAo7HFu6yuPKEVbSlAaWA+4Q/eJWuFpDKEkUBTokXTgY0JMbUJo7OyYoqT1wij9A6OPTKBF6Lfv8brLEdRXTQifHD7+2tbk4EDE5YdwJ/3L/Q6y14vwimTF2LLdl1dJXs0jD/+GNFzZ/QHrkf4oL6DMMLn2Gj9tcCvQHegWfSGWQxUiO3jIWAR0BloCbxPaOWlJvv17aI6/IBolKPX2TZ1cw+hBVsfOAx4M6qbul5X2dZXm+gD9nrCedrTgFXA5f7+yrHORPgi+Wg263ZJXSW9Evzxxx9zBOHb33rCNRrjiA1Xjd4sAwnfqNcB44FmCfsoA9xH6G5bA7wB1En2a9uFdZiY0LzO/nydWR8gG6IPjZeBJl5XudbZccDnUX18DfQlmtDd6yzb+jqScL7r0GzW7ZK68tn2nXPOFQt+Ds0551yx4AnNOedcseAJzTnnXLHgCc0551yx4AnNOedcseAJzTnnXLHgCc25YiKHGywmPhbksv2IWNwHseX1omW9E+KrSvpU0k+S2kTLxsX28czOeq3OZccnJ3au+Gib8PxVwoXBA2PL1uexj6XAKYRZHXIkaW/gXWBv4Cj7cyLaKwn3tno1XyV2rhB5QnOumDCzD+PPJa0HViQuz8P6vOIl7UNIZpWATmb2xyS+ZvZl7NjO7VKe0Jxz+SZpX+A9wqTZnczs6yQXybk/eEJzzuVXA8Jdhg3IMLP5SS6Pc1vxhOacy68BhLsKN/Vk5ooiH+XonMuvdwizpg+TVDrZhXEukSc051x+vQxcABwNvCgpPcnlcW4rntCcc/lmZk8ClwInAM9JSk1ykZz7gyc051yBmNkjwFXAqcBTkvxzxBUJPijEOVdgZnZvdB7tLmC9pAvN7xbskswTmnNuu5jZ3VFSuxVYB/RJcpFcCecJzbliyszqbc92ktLC5rY52s8CwujG7I5xG3BbbNsU/FSGSxJ/4znn4uoCGwlTW22PMdH2dQutRM7lk7zb2zkHYVZ9oGr0dLWZzdmOfRwAVIyervQLsN2u5AnNOedcseBdjs4554oFT2jOOeeKBU9ozjnnigVPaM4554oFT2jOOeeKhf8HANG7L3WF8oIAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZ0AAAEhCAYAAACk132sAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAA5iUlEQVR4nO3dd5gUVdbH8e+ZIeecc84ZFYRBXRAUUQTzvipmxUDYVddVVgxrXgTDqpgwrgklSlYYSSIgDEhSomQQyZm57x9Vo00zqWd6uif8Ps9Tz0xX3ao+fXumT9etW/eacw4REZFIiIl2ACIiknco6YiISMQo6YiISMQo6YiISMQo6YiISMQo6YiISMQo6cgfzKyfmbmA5biZrTWzp8ysUDaI7eZoxpDTmdmGgPf2pJn9Zmbfm9kzZlYrSjH1NrPByaw/z4/zvAjHc8bzmtlMM5sZ8LiVmQ01szKRjC23UNKR5FwJdAB6AlOAh4DnoxoR9AOUdDJvCt572xn4P2AccA2w3Mwuj0I8vYEzkg6wGC/OxRGNJnn9/SVJK+BRQEknA/JFOwDJlpY4537xf59mZvWBW8xsgHMuMZqBhZOZFXTOHYt2HBG22zk3P+DxJDMbgZeMPjKzBs65zVGK7Q/Ouf3A/DQLRoBzbkW0Y8hNdKYj6bEYKAyUS1phZkXM7FkzW+83w603s4fNLCagTCEze9HMlpvZQTPbbmbjzaxR8BOYWW0z+8Avc8zM1vkfhvhNG12AcwOah2YG7HuWmU33n+OQmc0ws7OCjj/KzDabWQczm2tmR4DnUnvRZna5mc3xj7vfzBaY2aUB2+8xs3lmtsfM9prZfDPrGXSMfGb2hN9MedTMdpvZbDPrFFTuNjNbGlDm7Ug13zjnDuJ9ky8M3JFWeTPr4b/uI2a2z8zGmFnDoDIz/dd5mf/+HzOzVWZ2VUCZUcCNQNWA93WDvy2lZq7Z/vMv8Z//RzM726/np8xsm/9+jDKzokExPWZmi/2Yd5vZN2Z2Tjpe7x/Na2bWD3jX3/RzQNy1zGyZmX2VzP5Jr6V7Ws+VF+hMR9KjFrAP+A28D1K8b8ZNgCeAZcA5wBC8Joe/+fsVBIoDTwLb/G39gflm1sg5t90/Xm1gAXAYr9niZ6A6cKF/nP7Ah0Asf34o7vf3bQHMAlbgNcE54B/ALDM7xzm3NOB1lAQ+AV4A/gkcSekFm9m9wEvAGLwPxoNAG78uAuvlLWAD3v9SL2CCmV3snJvkl3kQGAQ8DCwBSgDtCGiaMbNn/Dp7CbgfqOrXWTMz6+icO5VSnOHinFtqZluBc1MrZ2Y9gInAN8DVQDHgcWC2mbVyzm0JKF4P7zUNBXYCdwGfmNku59y3eH875YH2QFIyT+vMsx5eU++/8d6T5/CaCMfhvQf9gMZ+mZ3AAwH7VgVeBDYDRfGaF+PNrJ1zLiGN500yEe+9eQSvGTrprHAb8BowwsyqOOe2BuxzB7AemJrO58jdnHNatOCcgz8/tBvi/QOXxruOchK4J6Dc9X65uKD9HwaOAxVSOH4sUAQ4AAwKWP8+3gdIlVRimwnMTmb9F8BeoFTAuhLAHuDLgHWj/JgvS0c9lPBj/DKtsgH7xPh1NhUYG7B+QmrHwUtcp4B/Ba0/14+3dxjf3w3Ah6lsnwesTOMYC/G+FOQLWFcbOAEMC3q/HHBO0Pu/Cvgu6H3ZnMzznOfvf17QMU8AdQLWXeqXmx60/5fA+lReR6z/fq0GRqTjeWcm839SL+iYxfG+DA0JWFcOL5H+I1zvY05f1LwmyVmF98+9B3gbeMM590rA9h7ARmCu36yRzz/7mQrkxzvrAcDMrjKvh9RevOR1CO/bcWBzzIXABHf6t8P0ivP33Zu0wnnXA8bhNckFOomXBNLS0Y9xZGqFzKytmU0wsx3+sU8A3Tj9tf0AXGxm/zazTmZWIOgw3fAS1kdBdfk93gdYXCrPHxO4jwU0bWaQ4X2YpvR8RfHO9j51zp1MWu+cWw/M4cz6/tUFXD9y3hnb58BZmYh1jXNuXcDjVf7PKUHlVgHVzMwC4u9qZt+a2W/8+X414PT3K8OccwfwzshvDXh9N+HV67sp7pjHKOlIci7Ha/K4GJgO9DezGwK2VwBq4v3TBi4L/O1lAcysF/ApsBK4DjjbP+4uILALdln+bKYIVRm8po1g2/HO1ALtdOlrqirr/0wxJjOrDszwn/9evETVHpjM6a/tKbwmw0uB74DfzOxdM0u6PlbB//kLZ9ZniYBYkvNOUPl30vHaUlOd5OsySWm8D9CU6jv4GtSOZMrtAArgNatlxO9Bj4+nsj4f3hkNZtYG+BrvjPoWvC9G7YGlnP5+ZdZ/gRp4XzQMuB34yjmXXF3kSbqmI8lZ7vzea2b2DZAAPG9mo51zh/Cu7awHrkph/w3+z2uAX5xz/ZI2mFl+zvxw2o3X3p4Re4BKyayv5G8LlN55PHb7P6sCy1Mo0wPvGtFVLqC3l5kVOe0JnTsBPAs8a2aVgEuAYXjNjFfjXyfDO9sL/uAkYHtyhgKBZ6C7UyiXJjNrBVTBu0aVkt/x6jCl+g6OtWIy5SriJYRdoUeZKX3xzm76+O8JAGZWGq95Niycc8vN7Du86zhH8a5Bpdk5Iy9R0pFUOeeOmdn9wFi8C/rP432b7wscdM6tSmX3Inj/6IGux//2GWAq0MfMKjvnUvqmfQyvzTzYLKCnmRX3mzcws+J4F/VnphJbaubifSO+nTObbZIkJZfAD7AGeNdikj1Dcl7HibfM7GKgmb96GpAI1HDOTQslSOfcBv5M8BlmZsWAV/E6cryRyvMdMrNFwJVmNjTprNHMauKd6b0ctEt1vzPHfL9cLN7F9wXuz673x/B6zWW1InjXzv744mFmF+CdlawP8VhJnR1Sivu/eM1spfGaA78J8fi5mpKOpMk5N87MfgD+bmavAB/htVXPMLP/4DVRFADq4jUj9XbOHcZLTr3N7EW8ayltgfs485vlo3g3os41s6fwmpqqAj2cc//nl1mB18x3NbAWOOCcW43XA+oSP5Zn8T5UHsT7kHk8g6/3gJk9BLxsZqP913sA76bAo865l/GaHU8C7/t1UBl4DNhEQLO1mY3162cx3plCa7yzpDf851rrx/2Ked2OZ+F9Q66Od73nLef19AqXcn43YcM7U2sD3IbX3HVtOq6rDcHrwTXBzP6Ld+3rMbzejf8JKrsD+NTMHsU7s7kL7xrKXQFlVgBlzOwuvE4KR51zyzLx+lIyGRgIjDKzd/04hgBbUtspBUn37dxtZu/hffFIcM4lNfWNBobjfQH525m753HR7smgJfsspNArx992ob9tkP+4EF7zziq8b3578C6aD8Xv2YT34fsksBXvW/QsvA/dDcCooOPXBf6H10R0DFgHvBiwvRJem/wBP46ZAdvOxksCB/E6KswAzgo6/iiS6SWVRn1cgXdB/wjeRf3vgUsCtl/lv/6jwE94zYmjgA0BZf6Gd5Pjb/5xVvt1lD/oua73yx3yX8dKvKazamF8fzf4defwvvX/7r9nzwA1QzhOD7yebkfwks1YoGFQmZnAbLwvIcv993Q1cHVQuaL++57UdLfBX38eyfcimx20fy2/3K1B64f66wN72d2Ld1ZzxH/dXTmzZ1pKzzsz6PiP4iWspLOnWkHb3/D/LspG+/86uy3mV5CISNj4N1Pmc851SqtsbuP3PvwFr2v49dGOJ7tR85qISBiYWQm8a3XX4TWPBjc3Cko6IiLh0gb4Fm8khAHOuSXRDSd7UvOaiIhEjG4OFRGRiFHzWirKlSvnatWqFe0wRERylEWLFu12ziU76oSSTipq1arFwoULox2GiEiOYmYbU9qm5jUREYkYJR0REYkYJR0REYkYJR0REYkYJR0REYkY9V4LszE/buH5KavZuvcIVUoV5v7uDendOqNTxYiI5C5KOmE05sctPPTlMo6c8Can3LL3CA996Y3SrsQjIqLmtbB6fsrqPxJOkiMnTvH8lNVRikhEJHtR0gmjrXuPhLReRCSvUdIJoyqlkp+9tlD+WH47eCzZbSIieYmSThjd370hhfPHnrYuX4xx/FQiXYfNYuySLWhUbxHJy5R0wqh366o83ac5VUsVxoCqpQrzwpUtmTSgMzXLFmXAJ0u45b2Fam4TkTxL8+mkol27di5cA36eSnS8O2c9L0xdTb6YGB66uBHXtq9BTIyF5fgiItmFmS1yzrVLbpvOdCIkNsa4tXMdpg7sQotqJXn4q+Vc++Z81u8+FO3QREQiRkknwmqULcJHt57Ns32bs2LbfnoMj2dk/FpOnkqMdmgiIllOSScKzIyr29dg+uAuxDUoz1Nfr6LPa3NZuW1/tEMTEclSSjpRVLFEIUZe35ZXrmvNlt+P0Ovl2QybuppjJ0+lvbOISA6kpBNlZsYlLaowfXAXerWswkvf/MIlL81m8abfox2aiEjYKelkE6WLFuDFq1vxbr/2HDx2kr6vzeXx8Ss4fPxktEMTEQkbJZ1s5vxGFZg6KI6/nl2Dd+asp/vweOb8sjvaYYmIhIWSTjZUvFB+nuzdnE9vP4d8MTH89a3vefCLBPYdORHt0EREMkVJJxs7u05ZJg3ozJ1d6vLF4s10GzaLKT9tj3ZYIiIZpqSTzRXKH8s/LmrEmP7nUrZYQe74YBF3f7SYXQc0gKiI5Dw5MumYWX8zW29mR81skZl1TqVsLTNzySw9IhlzZjWvVpJx95zL3y9swLQVO+j24iy+XLxZA4iKSI6S45KOmV0NjACeAloDc4FJZlYjjV17AJUDlm+yMs6skD82hnsuqM/XAzpRp1xRBn+2lJtG/cAWDSAqIjlEjks6wGBglHPuTefcSufcvcA24K409vvNObc9YDme9aFmjXoVivP5nR15tFcTvl+3hwuHzeKDeRtITNRZj4hkbzkq6ZhZAaAtMDVo01SgYxq7f2lmO81sjpldkcpz3G5mC81s4a5duzIZcdaJjTFuOrc2UwfF0aZmaYaM/YlrRs5n3a6D0Q5NRCRFOSrpAOWAWGBH0PodQKUU9jkI/B24CrgYmAF8amb/l1xh59xI51w751y78uXLhyfqLFS9TBHev/ksnr+iBau276fHiO94baYGEBWR7ClftAPIoOB2JEtmnVfQud3AfwJWLTSzcsADwIdZE15kmRlXtqtOlwblGTJ2Oc9OXsXEZVt5tm8LmlYpGe3wRET+kNPOdHYDpzjzrKYCZ579pOZ7oH64gsouKpQoxBvXt+O1v7Zh+75jXPrKHJ6fsoqjJzSAqIhkDzkq6fgX/xcB3YI2dcPrxZZerfA6H+RKFzWvzPTBcfRuVZVXv11Lz5e+Y9HGPdEOS0QkZyUd3zCgn5ndamaNzWwEUAV4HcDMnjazGUmFzexGM7vOL9vQzP4O3A28HJXoI6RUkQL856qWvHfzWRw9kcgVr89j6LifOHRMA4iKSPTkuGs6zrlPzaws8Aje/TbLgYudcxv9IpWBukG7PQLUxGuaWwPc7JzLFddz0tKlQXmmDIrj+cmreG/eBqat2MHTfZoT1yD7d5IQkdzHdEd7ytq1a+cWLlwY7TDC5ocNe3hwdALrdh3iirbVeKRnY0oVKRDtsEQklzGzRc65dslty4nNa5JB7WuV4ev7OtP/vLp89eMWug6LZ9KyXHtpS0SyISWdPKZQ/lge6NGIsXefS4XiBbnro8Xc9eEidh44Gu3QRCQPUNLJo5pVLcnYe87l/u4NmbFqJ92GxfP5wl81gKiIZKmQko6ZrTOzlilsa2Zm68ITlkRC/tgY7j6/Hl/f15n6FYpx/xcJ3PDOAn7dczjaoYlILhXqmU4toGAK2wrh9RCTHKZehWJ8dkcHHr+sKYs3/k734fGMmrNeA4iKSNhlpHktpU+idsDejIci0RQTY9zQoRZTBsXRrlYZho5fwVVvzOOXnRpAVETCJ82kY2aDzGyTmW3CSzjjkx4HLLuAV4HJWR2wZK1qpYvw3k3t+c+VLfl550EuHvEdr377Cyc0gKiIhEF6bg5dhzcyM8CNwEIgeMz/Y8AK4K3whSbRYmb0bVuNuAbleXTccp6fspqJCdt47ooWNKuqAURFJONCujnUzN4FHnfOrc+6kLKP3HZzaEZNXr6dIWOXs+fQcW6Pq8OAv9SnUP7YaIclItlU2G4Odc7dlFcSjvypR7NKTB/Uhb5tqvLazLVcPOI7ftigAURFJHQhdyTwB9CcbGYr/C7UgcvarAhSoq9kkfw8d0VLPrzlbI6fSuTK1+fxr7HLOagBREUkBCEN+GlmQ4DH8AbZXIJ3LUfykE71yzFlYBwvTF3NqLkbmL5iB//u05zzG1aIdmgikgOEek1nA/CVc25QlkWUjeiaTuoWbfydB0cn8MvOg/RpXZUhlzShdFENICqS14VzwM+ywPjMhyS5QduapZl4XyfuvaAe45ZupduLs5iYsE1D6YhIikJNOrOAZIfBkbypYL5Y/nZhQ8bd04nKJQtz98eLueODRezcrwFEReRMoSadgcBNZnaDmZUzs5jgJQtilBygSZUSfNW/Iw9d1IhZa3bxl2Gz+OwHDSAqIqcL9ZpO0m3pKe3knHM5bjbSlOiaTsas23WQf3y5jAXr99CpXjmeurw5NcoWiXZYIhIhqV3TCTVBPE7KCUcEgDrli/HJbefw8YJNPDNpFd2Hx/P37g3p17EWsTEW7fBEJIo0XXUqdKaTeVv3HuGfXy1j5updtK5Riuf6tqB+xeLRDktEspCmq5aoqVKqMO/2a8/wq1uxYfcher40m5dm/MzxkxpAVCQvytD1F38it4Z4c+icxjn3fmaDktzFzOjduiqd6pfjsfErGDZtDV8v8wYQbVGtVLTDE5EICrUjQSlgInBO0ir/5x8Hcc7lmpEg1byWNaat2MEjY5ax68Axbutch0HdGmgAUZFcJJzNa0/h3SAah5dwLgcuAD7CmwLhrEzEKXlEtyYVmTqoC1e3r84b8evoMTye+et+i3ZYIhIBoSad7niJZ77/eLNzbqZz7gZgOjAgnMFJ7lWycH6e7tOCj289m0QH14ycz8NfLePA0RPRDk1EslCoSacysM45dwo4CgR2Q/oS6BmuwCRv6FivHJMHdubWTrX534JNXPhiPN+s2hHtsEQki4SadLYDpfzfNwIdArbVC0dAkvcUKZCPRy5pwui7OlK8UD5uHrWQgZ/8yJ5Dx6MdmoiEWahJZzZ/JpoPgEfN7A0zexV4HpgSzuAkb2ldozQT7u3MgL/UZ+KybXQdNotxS7dqKB2RXCTU3mt1gSrOue/MLD/wDHA1UASYDNzrnMs1V4TVey16Vm3fz4NfJLB08z66Nq7Ik72bUankGT30RSQbSq33mkYkSIWSTnSdSnS8M3s9/5m2mvwxMfyzZ2OuaV8dMw2lI5KdaUQCyZFiY4zb4uoweUAcTauW4KEvl3Hdm9+z8bdD0Q5NRDIo5KRjZjea2WQzW2Fm64KWtVkRpORttcoV5eNbz+Gpy5uzfMs+ug+P563v1nEqUWfpIjlNSEnHzIYA7wJVgCV4k7oFLvFhji+lOPqb2XozO2pmi8yscxrlm5vZLDM7YmZbzOxfpjaaHCUmxrju7BpMHRzHuXXL8eTElfR5bS6rtx+IdmgiEoJQOxJsAL5yzg3KsojSjuFq4EOgP15vuv7ATUAT59ymZMqXANbgJcTH8caMGwUMdc79J7Xn0jWd7Mk5x/iEbQwd9xMHjp7g7vPr0f+8ehTIp9ZikewgnNd0ygLjMx9SpgwGRjnn3nTOrXTO3QtsA+5Kofxf8XrX3eicW+6cGw08CwzW2U7OZGZc2rIK0wd34eLmlRk+/Wd6vTybJb/ujXZoIpKGUJPOLKBlVgSSHmZWAGgLTA3aNBXomMJuHYDvnHNHAtZNwWsirBXuGCVyyhQtwIhrWvP2je3Yd+QEff47hycnrODI8VPRDk1EUhBq0hkI3GRmN5hZOTOLCV6yIMZA5YBYIHiclB1ApRT2qZRC+aRtpzGz281soZkt3LVrV2ZilQj5S+OKTB0cxzVn1eCt2evpPjyeuWt3RzssEUlGqEliDdAMrzPBDuBE0BKpcUuCL0RZMuvSKp/cepxzI51z7Zxz7cqXL5+JECWSShTKz1OXN+d/t51DjMF1b37PQ18msF8DiIpkK6FO4vY4qX+4Z7XdwCnOPEOpwJlnM0m2p1CeVPaRHKpD3bJMGhDH8OlrePO7dXyzaif/7t2crk0qRjs0ESHEpOOcG5pFcaT3+Y+b2SKgG/B5wKZuwOgUdpsHPGtmhZxzRwPKbwU2ZFWsEj2FC8Ty0MWN6dmiMg98kcCt7y+kV8sqDO3VhLLFCkY7PJE8LSf2MR0G9DOzW82ssZmNwOsU8DqAmT1tZjMCyn8MHAZGmVkzM+sD/AMY5jQGUK7Wolopxt3TicHdGjB5uTeA6Jgft2gAUZEoynFJxzn3KV6HhkfwblDtBFzsnNvoF6kM1A0ovw/vzKYKsBB4FfgPXvKSXK5Avhju+0t9Jt7XmZplizLw0yXc8t5Ctu49kvbOIhJ2ad4camangA7OuQVmlkgaF+ydc6FeJ8q2dHNo7nIq0TFq7gZemLKa2BjjHxc14rqzahATo9u1RMIptZtD05MgHgc2B/yutgnJkWJjjFs61aZb44o89FUCj4xZzvilW3mmbwtqlysa7fBE8gRNbZAKnenkXs45Pl+4mScmruD4yUQGd2vALZ1qky82x7U4i2Q7mT3TSe6A1YHqwBmzajnnvsnIMUUiycy4qn11ujQszyNjlvP0pFVMXLaNZ/u2oHHlEtEOTyTXCnXAzzrAR8BZSav8n87/3TnnYsMaYRTpTCdvcM7x9bLtPDpuOXsPn6D/eXW5+4J6FMyXa/6URSIqnGc6bwE18HqPrSJyIxCIZBkzo2eLynSsW5YnJqzgpW9+4evl23m2bwva1iwd7fBEcpVQz3QOAP38kZpzPZ3p5E3frt7Jw18uY9v+o/TrWIv7uzekSIFc0ylTJMuFc2qDzejsRnK58xtWYOrgLlx/Tk3enbOBC1+MZ/bPGkBUJBxCTTpPAQ+amfqXSq5WrGA+Hr+sGZ/d0YH8sTH839vf88AXS9l3RAOIimRGqGOvfWBmjYANZjYf+P3MIu7GsEUnEmVn1S7DpAGdGTHjZ0bGr2Pm6l080bsZ3ZumNJOGiKQm1Gs6/YB38EZ63smZTW3OOVcnbNFFma7pSKBlm/fxwOgEVm7bT8/mlRl6aVPKF9cAoiLBUrumE2rS2Yg3ftktzrm94Qkv+1LSkWAnTiUyMn4dI6b/TOECsfzrkib0aVMVzXwu8qdwdiQoC/w3LyQckeTkj43h7vPr8fWATtSrUIy/fb6Ufu/+wBYNICqSLqEmndlA46wIRCQnqVehOJ/f0YGhvZrww4Y9XDhsFu/P20BiooaVEklNqElnAHCbmf3VzMqaWUzwkhVBimRHMTFGv3NrM2VgHG1qluZfY3/i6pHzWLvrYLRDE8m2Qr2mk+j/mtJOmtpA8iTnHF8s2swTE1Zw9GQiA7vW57bOdcivAUQlDwrnMDia2kAkGWbGle28AUT/NeYnnpu8mokJ3gCizaqWjHZ4ItmGpjZIhc50JKMmLdvGkLE/8fvh49zZpQ73XlCfQvk1gKjkDWHpvWZmBczsKzOLC19oIrnTRc0rM31wHJe3rsqr367l4pe+Y+GGPdEOSyTq0p10nHPHga6h7COSl5UqUoAXrmzJ+zefxbETiVz5xjyGjvuJQ8dORjs0kagJNYHMAc7JikBEcqu4BuWZOiiOGzvU4r153gCi8Wt2RTsskagINen8DbjFzO4xs2pmFqsu0yJpK1owH0Mvbcrnd3SgYP4YbnhnAX//fCl7D2vQdslb1GU6FepIIFnh6IlTvPzNz7w+ax2lixTgicuaclHzytEOSyRs1GVaJBsplD+W+7s34uLmlXngiwTu+mgxPZpW4vHLmlKhRKFohyeSpdRlOhU605GsdvJUIm9+t54Xp6+hUL4YhlzShCvaVtMAopKjhXPATxEJo3yxMdx1Xl0mDehMw0rFuf+LBG54ZwG/7jkc7dBEskTIScfMWpvZl2a228xOmlkbf/1TZtYj/CGK5H51yxfj09s78MRlTVm88Xe6D49n1Jz1GkBUcp2Qko6ZdQLmAY2Aj4P2TwTuDF9oInlLTIxxfYdaTBkUR/taZRg6fgVXvjGPX3YeiHZoImET6pnOM8AUoCkwOGjbYqBNOIISycuqlS7CqJvaM+yqlqzddZCLR8zmlW9+5sSpxLR3FsnmQk06bYDXnNf7IPi8fzdQPixRieRxZkafNtWYNqgL3ZpW5IWpa7j0lTks37Iv2qGJZEqoSecoUCSFbZUB/UeIhFH54gV59bo2vHF9W3YfPMZlr87hmUmrOHriVLRDE8mQjMwcOtDMAofLTTrjuQX4JixRichpujetxPRBXbiiTTVen7WWi0d8x4L1GkBUcp5Qk84QvCa2pf7vDrjRzL7FG5PtsfCGJyJJShbJz7NXtODDW87m+KlErnpjHkPGLOfA0RPRDk0k3UJKOs65pUAcsAN4GDDgHn9zF+fc6vCGdzozK2hmL/vdtQ+Z2Tgzq5bGPv3MzCWz6NZvyZE61S/H1EFx3HxubT78fiPdX4zn29U7ox2WSLqEfJ+Oc26xc+4vQHGgGlDCOXe+c+7HsEd3puFAX+BaoDNQApgQ1NyXnMN415z+WJxzR7MwTpEsVaRAPv7Vqwlf3NmRogXzcdO7PzD40yX8fkgDiEr2Fup9OuvMrCWAc+6oc26rc+6wv62Zma3LiiD945fEu250v3NumnNuMXA90AJvnp/UOOfc9sAlq+IUiaS2NUsz4b5O3HdBPcYt3UrXYbOYkLAVDW8l2VWoZzq1gIIpbCsE1MxUNKlrC+QHpiatcM79CqwEOqaxb2Ez22hmm81sgpm1zsI4RSKqYL5YBl/YkPH3dqJKqcLc8/GP3PHBInbs18m8ZD8ZGXstpa9Q7YC9GQ8lTZWAU3j3AwXa4W9LyWrgZuAyvGa5o8AcM6ufXGEzu93MFprZwl27NNGW5ByNK5fgq/4deeiiRsxas4uuw2bx6Q+bdNYj2Uqao0yb2SBgkP+wKrALCG44LgyUAT5xzv01pADMnsTrlJCa84EqwPtAfhcQtN9zbrVzLl1D8PjXf5YA3zrn7kutrEaZlpxq/e5DPDg6gQXr93BuvbI8fXkLapRN6RY7kfDK7Hw664AZ/u83AgvxEk+gY8AK4K0MxDcc+DCNMpvwumTHAuWCnr8CEJ/eJ3POnTKzhUCyZzoiuUHtckX55LZz+HjBJp6ZtIruw+P5e/eG9OtYi9gYTZsg0ZNm0nHOjQXGAklzfDzunFsfrgCcc7s5s8nsDGa2CDgBdMMbbBS/u3RjYG56n8+8F9EC714jkVwrJsb4v3NqckGjCjwyZjlPTFjB+KVbee6KFjSoWDza4UkeFep9OjeFM+GE+Nz7gLeB582sq98Z4AMgAZieVM7MZpjZ0wGPHzWz7mZWx8xa+cdoAbwe0RcgEiVVShXm7RvbMeKaVmz87RA9X/qOl2b8zPGTGkBUIi+nzaczCPgS+BSYAxwEejnnAgeiqot3L06SUsBIvF5uU/GuS8U55xZkcawi2YaZcVmrqkwf3IUezSozbNoaLn1lNkt/3Rvt0CSPCWm6an8+nel413mm441G0M45t9jvENDMOdc7KwKNBnUkkNxq2oodPDJmGbsOHOO2znUY2LUBhQukdY+1SPqEc7pqzacjkgt0a1KRaYO7cHX76rwRv46LRsQzb+1v0Q5L8gDNpyOSR5UolJ+n+7Tg41vPJtHBtW/O559fLWO/BhCVLKT5dETyuI71yjFlYBy3da7NJws2ceGweL5ZtSPaYUkupfl0RITCBWJ5uGcTvux/LiUL5+fmUQsZ8MmP/HbwWLRDk1xG8+mIyB9aVS/F+Hs7MbBrfb5eto1uL8YzbqkGEJXwyVHz6YhI1iuQL4aBXRsw4d7OVC9ThPv+9yO3vb+Q7fs0gKhkXkhdpk/b0ZsErQywN2l6g9xGXaYlrzuV6Hh3znpemLqa/DExPHRxY65pX50YDaUjqQhbl2kzK2RmnczsSuASoDag25pFcqnYGOPWznWYMjCOZlVL8s+vlnHdW/PZsPtQtEOTHCpdScefJnoEsAeYhTciwGd4A23+ZmYvmFmBrAtTRKKpZtmifHzb2TzTpzk/bdlPjxHxvBm/jlOJutYjoUnP1AaGN3zMBXgDf36NN+qzAdXxznh6AVOccxdnabQRpuY1kTNt33eUR8YsY/rKnbSsVpLnrmhJw0oaQFT+lFrzWnqSzpXA/4ArnXNfpVCmD96Zz1XOuS8zGW+2oaQjkjznHBMStjF03E/sP3qC/ufV4+7z61EgX0bmhZTcJrPXdK4FPksp4QD4ieZzIKQJ3EQkZzIzerWswrTBXejZvDIjZvzMJS9/x4+bfo92aJLNpSfptAYmpqPcBDT2mkieUqZoAYZf05p3+rXjwNGT9HltLk9MWMHh4yejHZpkU+lJOuXxruGkZRPeLJ4iksdc0KgiUwfF8deza/D27PX0GP4dc39Jc25GyYPSk3SK4E1HnZbjQKHMhSMiOVXxQvl5sndzPrn9HGIMrnvre/4xOoF9RzSAqPwpzemqfVXNrE4aZaplNhgRyfnOqVOWyQPjeHH6Gt6MX8e3q3fyZO/mdGtSMdqhSTaQnt5riZw5jUGyRQHnnMs1M0Gp95pI5iRs3ssDXySwavsBLmlRmaGXNqVcsYLRDkuyWGq919JzpnNTmOMRkTyiRbVSjLunE2/MWsvL3/zCnF9282ivplzWqgreLYCS12R47LW8QGc6IuHz844DPDA6gR837eX8huX59+XNqVKqcLTDkiwQzumqRUQypH7F4nxxZ0f+dUkT5q/bw4UvxvPB/I0kaiidPEVJR0QiJjbGuLlTbaYOiqNV9VIMGbOca96cz3oNIJpnKOmISMRVL1OED245i+f6tmDltv30GB7P67PWcvKUBq3P7ZR0RCQqzIyr2ldn+uAudGlQnmcmreLy/85lxdb90Q5NspCSjohEVcUShXjj+ra8el0btu07wqWvzOY/U1dz7OSpaIcmWUBJR0Sizszo2aIy0wZ14dJWVXj5m1/o+dJsFm3UAKK5TaaTjpm1MbPe/qIBP0Ukw0oXLcCwq1ox6qb2HDl+iiten8tj43/i0DENIJpbZPg+HTNrC3yEN1pB0qRuNf3N1znnFoUlwijSfToi0XPw2Emem7yK9+dtpFrpwjzdpzmd65ePdliSDll1n85I4B7nXGPnXHfn3IXOuYbAPcCbmTiuiAjFCubj8cua8dkdHSgQG8P1by/ggS+Wsu+wBhDNyTKTdIo556YHr3TOTQOKZuK4IiJ/OKt2Gb4e0Jm7zqvL6MVb6PriLCYv3x7tsCSDMpN0dprZTWb2xwCfZhZrZrcCmkhDRMKmUP5YHuzRiLF3n0v5YgW588NF3P3RYnYdSM+sK5KdZCbp3Ig3lfUeM1tpZiuBPcDV/jYRkbBqVrUkY+85l/u7N2Tayh10HTaL0Ys2ozEkc45MD/hpZuWAGv7DTc65XHOWo44EItnXLzsP8uDoBBZt/J24BuV56vJmVCtdJNphCVk84KdzbrdzbrG/7PafMEumrTaz283sWzPba2bOzGqlc7++ZrbCzI75Py/PivhEJHLqVSjG53d04LFLm7Jwwx66vxjP+/M2aADRbC6rbg7NqtODIsBUYGh6dzCzDsCneN27W/k/Pzezs7MgPhGJoJgY48aOtZgyMI42NUvzr7E/cfXIeazddTDaoUkKMnOfzqWpbH7LOZclZzv+c7cDfgBqO+c2pFH2U6CMc65bwLrpwC7n3LWp7avmNZGcwznH6MVbeGLCCo6cOMXArvW5rXMd8sdq4JVIy+zMoSn5CpiFd1NosOKZOG64dQBeDlo3Be9+ojOY2e3A7QA1atRIroiIZENmxhVtqxHXoBxDx/3Ec5NXMzFhG8/2bUGzqiWjHZ74MvMV4GfgFufc+cEL2avLdCVgR9C6Hf76MzjnRjrn2jnn2pUvr7ufRXKaCsUL8d+/tuX1/2vDjv3HuOzVOTw3eRVHT2gA0ewgM0nnPaBcCtteT+9BzOxJv1NAast5mYgTvKF6TnvaZNaJSC7So1llZgzuQp/WVfnvzLVc/NJ3LNywJ9ph5XkZbl5zzj2dyrZ/h3Co4cCHaZTZFMLxgm3nzLOaCpx59iMiuUzJIvl5/sqW9GpZhYe+XMaVb8zjhnNqcn+PRhQrmJmrC5JRUa91v5t1VjbHzQO6Ac8HrOsGzM3C5xSRbCSuQXmmDorj+SmreW/eBqav3MlTfZrTpYGa0CMtpKRjZt+ksjkR2AcsAt52zoX9TMLMKuGdtTTwVzUxs1J4N6Xu8cvMABY45x7yy4wA4s3sIbzOD5cD5wOdwh2fiGRfRQvmY+ilTenVsjIPfJHAje8soG+bagy5pDGlihSIdnh5RqjXdAxoCJyHN41BIf/neUBjoDYwBFhuZk3CFuWf7gR+xLvXBmCi/ziw+3ZdoHLSA+fcXOAavKF5EoAbgKudc99nQXwiks21rVmGifd15p7z6zF2yRa6DpvF18u2RTusPCOk+3TMrBfeNZgrnHM/BqxvC3wGDMY705kKrHbO5eg7/3Wfjkju9tPWfTw4OoHlW/bTo2klHr+sKRVKFIp2WDleOIfBeRIYGphwAPwJ2x4DnnTObca7fhKXkWBFRCKlaZWSjOl/Lg/2aMQ3q3fSddgsPlv4qwYQzUKhJp0GpHzRfxdQz/99LZpTR0RygHyxMdx1Xl0mD+hMo0oleOCLBG54ZwG/7jkc7dBypVCTzgbg1hS23e5vB+/+nd8yFpKISOTVKV+MT24/hycua8rijb/TfXg8785ZzykNIBpWoXaZfhz40MwSgNHATrx7XvoCzYDr/HJdAV2oF5EcJSbGuL5DLS5oXJGHv1rGY+NXMH7pVp67ogX1KmSn0b1yrpAH/DSzbnjXb9oC+YETeKNKP5o0fbWZFQJOOedy9GTm6kggknc55xizZAuPjV/B4WOnuO8v9bijS10NIJoOqXUkyMwo0zF4zWi7nXOJmYgv21LSEZHdB4/x6LifmJiwjUaVivP8FS1pXk0DiKYmSyZxc84lOud25taEIyICUK5YQV69rg1vXN+WPYeO0/u/c3hmkgYQzaiQk46ZVTazF8zsBzNba2YLzOw5f7QAEZFcqXvTSkwb3IUr2lTj9VlruWjEd3y/Tv2lQhVS0jGzBsAS4D7gILAAOAQMAJaYWf1wBygikl2ULJyfZ69owUe3ns3JxESuHjmfIWOWc+Bojr58HVGhnuk8C+wHGvhz51zrz5/TAG/ctWfDHaCISHZzbr1yTBkYxy2davPh9xvp/mI8367aGe2wcoRQk875wJDgKaKdcxuBof52EZFcr0iBfAy5pAmj7+pI0YL5uGnUDwz6dAl7Dh2PdmjZWqhJpwBwIIVtB/ztIiJ5RpsapZlwXyfu+0t9xi/dSrdhs5iQsFVD6aQg1KSzBLjX7y79BzMzoL+/XUQkTymYL5bB3Row/t5OVC1dmHs+/pHbP1jEjv1Hox1athPqKNM9gAl4Y6t9CmzDm9/mSqA+0NM5NzUL4owK3acjIqE6eSqRd+as5z9T11AgXwyP9GzMVe2q4303zxvCenOon3ieBFrjza/j8KYzGOKcm5LJWLMVJR0RyagNuw/x4OgEvl+/h451y/JMnxbUKFsk2mFFRFaNSFAEKA387pzLlcOxKumISGYkJjo++eFXnvp6JScTE/n7hQ256dzaxMbk7rOerBqR4LBzbktuTTgiIpkVE2Ncd3YNpg2Oo2Pdcjw5cSV9X5vLmh0p9cfK/dJMOmaWaGan0rmcjETQIiI5SeWShXn7xnaMuKYVm/YcpudL3zFi+s8cP5n3RhFLz9QGj+NdtxERkQwyMy5rVZVO9crx2PgVvDh9DZOWb+PZvi1oWb1UtMOLmAxf08kLdE1HRLLK9BU7eGTMcnYeOMqtneswqGsDCheIjXZYYZEl13RERCTjujapyNTBcVxzVg1Gxq+jx4h45q3N/QOIKumIiERJiUL5eery5nx829kAXPvmfB76chn7c/EAoko6IiJR1rFuOSYPiOP2uDp8+sMmLhwWz4yVO6IdVpZQ0hERyQYKF4jlnxc35sv+51KycH5ueW8h9/3vR347eCzaoYWVko6ISDbSqnopxt/biUFdGzBp+Ta6vRjP2CVbcs0Aoko6IiLZTIF8MQzoWp+J93WmRpkiDPhkCbe+t5Bt+45EO7RMU9IREcmmGlQszui7OvJIz8bMWbubC4fF8/H3m0hMzLlnPUo6IiLZWGyMcWvnOkwd2IXm1Uryz6+Wcd1b89mw+1C0Q8sQJR0RkRygRtkifHTr2TzTpzk/bdlP9+HxjIxfy8lTOWsoHSUdEZEcwsy45qwaTBvchc71y/PU16vo+9pcVm3fH+3Q0k1JR0Qkh6lUshBv3tCWl69tzebfj3DJS7MZNm0Nx06einZoacpRScfMbjezb81sr5k5M6uVjn36+WWDl0IRCFlEJEuYGb1aVmHa4C70almFl2b8TK+XZ/Pjpt+jHVqqclTSAYoAU4GhIe53GKgcuDjnNHm5iOR4ZYoW4MWrW/Fuv/YcOHqSPq/N5YkJKzh8PHvONJOeqQ2yDefccAAzS3b00tR3ddvDH5GISPZwfqMKTB0Ux7OTV/H27PVMXbGdZ/q04Nx65aId2mly2plORhU2s41mttnMJphZ62gHJCISbsUL5efJ3s359PZzyBcTw1/f+p5/jE5g35HsM4BoXkg6q4GbgcuAa4GjwBwzq59cYf+60UIzW7hr164IhikiEh5n1ynLpAGduaNLHT5b+Cvdhs1i6k/Zo7En6pO4mdmTwMNpFDvfOTczYJ92wA9AbefchhCfLxZYAnzrnLsvtbKaxE1EcrqEzXt54IsEVm0/wCUtKjP00qaUK1YwS58ztUncssM1neHAh2mU2RSuJ3POnTKzhUCyZzoiIrlJi2reAKJvzFrLSzN+YfYvu3m0VxN6t6qKmUU8nqgnHefcbmB3pJ7PvFpuASyN1HOKiERT/tgY7rmgPj2aVeKBLxIY9OlSxi7Zyr8vb07VUoUjGkuOuqZjZpXMrBXQwF/VxMxamVmZgDIzzOzpgMePmll3M6vj7/s2XtJ5PYKhi4hEXb0Kxfn8zo482qsJ36/bw4XDZvHB/I0RHUA0RyUd4E7gR+Aj//FE//GlAWXq4t2Lk6QUMBJYiXePT1Ugzjm3IKuDFRHJbmJjjJvOrc3UQXG0rlGaIWOWc83I+azbdTAizx/1jgTZmToSiEhu5pzj80WbeXLCCo6dTGRQtwaUL1aQYdPWsHXvEaqUKsz93RvSu3XVkI6b3TsSiIhIFJgZV7WrznkNyjNk7HKembQKA5JORbbsPcJDXy4DCDnxpCSnNa+JiEiYVShRiDeub0eZIgUIbvs6cuIUz09ZHbbnUtIREREAfj98PNn1W/eGb5psJR0REQGgSgrdp1NanxFKOiIiAsD93RtSOH/saesK54/l/u4Nw/Yc6kggIiLAn50Fnp+yOlO911KjpCMiIn/o3bpqWJNMMDWviYhIxCjpiIhIxCjpiIhIxCjpiIhIxCjpiIhIxGjAz1SY2S5gYwZ3L0cE5wnKJVRnoVF9hUb1FZrM1FdN51z55DYo6WQRM1uY0iirkjzVWWhUX6FRfYUmq+pLzWsiIhIxSjoiIhIxSjpZZ2S0A8iBVGehUX2FRvUVmiypL13TERGRiNGZjoiIRIySjoiIRIySjoiIRIySTjqZ2d1mlmBm+/1lnpn1DNhuZjbUzLaa2REzm2lmTYOOUdDMXjaz3WZ2yMzGmVm1yL+ayDOzf5qZM7NXAtapznx+PbigZXvAdtVVEDOrbGbvmdkuMztqZivMrEvAdtVZADPbkMzfmDOzif72iNSXkk76bQYeBNoA7YBvgDFm1sLf/gDwN+BeoD2wE5hmZsUDjjEc6AtcC3QGSgATzOz0qfpyGTM7B7gNSAjapDo73WqgcsDSPGCb6iqAmZUC5gAG9AQa49XNzoBiqrPTtef0v682gAM+87dHpr6cc1oyuAB7gDvw/vC3AQ8HbCsMHADu8B+XBI4Dfw0oUx1IBLpH+7VkYR2VBNYCFwAzgVf89aqz0+tpKLA8hW2qqzPr5ClgTirbVWdp1+HDwF6gSCTrS2c6GWBmsWZ2DVAMmAvUBioBU5PKOOeOAPFAR39VWyB/UJlfgZUBZXKjkcAXzrlvgtarzs5Ux8y2mNl6M/vEzOr461VXZ+oNfG9mn5rZTjNbYmb3mJn521VnqfDr6RbgQ+fcYSJYX0o6ITCz5mZ2EDgGvA5c7pxbhvdmAewI2mVHwLZKwCnOHEAvsEyuYma3AfWAIclsVp2d7nugH3ARXlNkJWCumZVFdZWcOkB/YB3QHRgBPAPc7W9XnaWuG16iect/HLH6yhdSmLIaaAWUwmvXfM/MzgvYHnynrSWzLlh6yuQ4ZtYQrwmks3PueCpFVWeAc25S4GMzm4/3gXojMD+pWNBuebKufDHAQufcQ/7jH82sPl7SeSWgnOosebcBPzjnlgStz/L60plOCJxzx51zvzjnkv7YlwCDgKReRsHZvgJ/fnPYDsTiDReeUpncpAPea11uZifN7CTQBejv//6bX051lgzn3EHgJ6A++vtKzjZgRdC6lUAN/3fVWQrMrAJwGfBmwOqI1ZeSTubEAAWB9XhvSLekDWZWCK93x1x/1SLgRFCZani9bpLK5CZj8HpftQpYFgKf+L+vQXWWIr8uGuF9uOrv60xzgIZB6xrw5/xXqrOU9cO7RPBJwLrI1Ve0e1DklAWvvbgzUAvvw/RpvF4bF/nbHwT2A32AZv4buhUoHnCM14AtQFegNfAt3tlSbLRfX4TqcCZ+7zXV2Rl18wLemWBt4Gxggl83NVVXydZXe/8D8GG864ZXAvuAu/X3lWq9Gd4XvjeT2RaR+op6JeSUBRiF9y3qGF7/9ekEdBP038yheN9MjwKzgGZBxygEvIzXtHQYGA9Uj/Zri2AdBicd1dmfrzPpH/y4/089Gmiiukq1znoCS/36WAPchz+IseosxTo7H+/6y1nJbItIfWmUaRERiRhd0xERkYhR0hERkYhR0hERkYhR0hERkYhR0hERkYhR0hERkYhR0hGJoBQm0QpeNqSy/6iAcjMD1tfy190aVL6cmf1oZnvMrL2/bnrAMT7MqtcqkhwN+CkSWR2CHn+Fd4Pj0IB1x9I4xnbgcry7x1NkZhWBGUBF4AL35+CO9+LNjfJVuiIWCSMlHZEIcs7ND3xsZseA3cHr03AsrfJmVhUv4ZQEujjn/hgc0zm3MuC5RSJKSUcklzGzGnjTqRfESzhrohySyB+UdERylzp4sz06IM45tz7K8YicRklHJHd5CG92x6ZKOJIdqfeaSO4yGW+04BFmVjDawYgEU9IRyV1GAzcDFwKfm1n+KMcjcholHZFcxjn3HnAn0Av42MxioxySyB+UdERyIefcSGAAcAXwvpnpf12yBXUkEMmlnHMv+dd1ngOOmdktTrM2SpQp6YjkYs655/3E8wTeFMT9oxyS5HFKOiJR5JyrlZH9zCyft7s75R9nA16vteSe40ngyYB9Y1DTukSJ/vBEcp6awAm8YW4yYqq/f82wRSSSTqYmXpGcw8xqAeX8hwecc6szcIwGQAn/4W+6iVQiSUlHREQiRs1rIiISMUo6IiISMUo6IiISMUo6IiISMUo6IiISMf8PaidyzNNTy64AAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAaYAAAEhCAYAAAA0xARjAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAABCyklEQVR4nO3dd5gUVdbH8e9vhpwlSRKQIEFAEVSQZAARTAi6pjWtiooJdF1l1VdUVJRdzGtes6uLYEKR5AoCCoKSREDJOYggSIbz/lE12jaTeqZnenrmfJ6nHmaqblWfrhn6TN26da7MDOecc66gSEl0AM4551wkT0zOOecKFE9MzjnnChRPTM455woUT0zOOecKFE9MzjnnChRPTC5mki6XZBHLHkmLJT0oqVQBiO0viYwh2UlaFvXzjVz653Ms/SX1Tmf9IEn5/qxLeq8bnpdBEd/3knRLfsdWmBRLdAAuqZ0HrALKA+cAA8Ovb0xgTJcT/F7/O4ExFAZjgEHprF+Wv2HQH5gMjIxa/yLwaT7HkpH2BP8P0vQCugLDEhJNIeCJyeXGLDP7Mfx6nKTGwJWSbjazA4kMLJ4klTSz3YmOI59tMrOvEh1ERsxsFX9MBglTkM9TsvKuPBdP3wClgappKySVkfSwpKVhl99SSXdKSoloU0rSo5LmSdouaZ2kjyQ1jX4BSYdLej1ss1vSEkmPh9s+B7oAHSK6nj6P2Pc4SePD1/hV0gRJx0Ud/xVJqyS1lzRV0k7gkczetKRzJE0Jj/uLpOmSzorYfoOkLyVtlrRF0leSTo86RjFJ94ddorskbZI0WVLHqHZXS5od0eYlSZUziy8/KTBA0sLw571W0lOSKkS1M0kPhL8LqyTtlDRJ0tERbZYB9YCLI36er4TbMupSGyzpVknLw5/xx5Kqh8t/JW2VtFLS7VH7VpP0nKRFknaEbd6SVDsb7/m3rrwwvsuA2hExL5NUIzwfN6ez/6DwNQ/J1kkuAvyKycVTfWAr8BMEH7YEXULNgfuBuUA74G6gMnBruF9Jgi7AwcDacFs/4CtJTc1sXXi8w4HpwA7gHuAH4DDg1PA4/YA3gFTgmnDdL+G+rYCJwHyC7j4D7gAmSmpnZrMj3kdF4G3gH8DfgZ0ZvWFJNwJPAO8TfCBtB44Jz0XkeXmRoBusGHAmMEpSTzMbHba5HRgA3AnMAioAbcNzkfZaQ8Jz9gRwG1A7PGctJJ1gZvszijMHFP78/sDM9mWx3wMEXbpPAx/x+8/+KEldoq6kLwVWADcQ/A7cB0yQ1NjMNhN0D38CzOb3bsWNWbz+JcA8gt+FQ4HHgNcIfr9GA88TdEEPkTTXzD4J96sM7Apj3wjUIjjXU8LfwV1ZvG6a+4FqwLFA2h8nu81snaT3CX4vH09rLCkVuBL4r5n9nM3XKPzMzBdfYlr4/YO9CcEH7SHAX4B9wA0R7S4J23WO2v9OYA9QPYPjpwJlgG3AgIj1rxF88NfKJLbPgcnprH8X2AJUilhXAdgMjIxY90oY89nZOA8VwhhHZtU2Yp+U8JyNBT6IWD8qs+MQJLf9wP9Fre8Qxtsrjj/fZeEx01vaZrJf2of7K1Hr/xzue1bEOgM2AWWj3uNe4P6oWN5I57UGBR9ff1hnwCKgWMS6YeH6uyLWFQM2AC9n8l5SCf7oMeCcbLzuoKjfoVXpHPPEsG2niHVnhevaxevnVxgW78pzubGA4INkM/AS8JyZPRWx/TRgOTA17KoqFv4VPhYoTnD1BICkP0maJmkLQYL7FShHkPzSnAqMMrM1OYi1c7jvlrQVZvYL8CFB91+kfQSJIisnhDE+n1kjSW0kjZK0Pjz2XqAbf3xvXwM9w+6tjpJKRB2mG0FSezPqXE4juCrsnMnrp0Tuo4hu1EyMJvirP3qZn8k+7QiufN6IWv82wfuOPs+fmNmvad+Y2TLgK4LBBDk1zv54Vbcg/HdMxOvsA34kSDy/kXRd2E26PYx3Rbgp8ueUY2b2OcH5uyZi9TXAHPP7VH/gicnlxjkEH1Y9gfFAP0mXRmyvTnCPYG/UMj3cXgVA0pnAO8D3wEXA8eFxNwKRw8+rkPMb3pUJugmjrSO44ou0wbLXLVYl/DfDmCQdBkwIX/9GgmR2LMGIssj39iBB9+RZwBfAT5JelpR2v656+O+PHHw+K0TEkp5/R7XPzojFzWY2I51lRyb7pHU7/uE8h4ngp4jtadanc4z1BF2UORXdHbYnk/W/nf+wS/ZfBL/HvYHj+P0Pp3g+AvEMcK6kKpLqEfzx9mwcj18o+D0mlxvzLByVJ+kzYA4wVNKI8C/hn4ClwJ8y2H9Z+O8FwI9mdnnaBknFOfiDbBM5/9DaDNRIZ32NcFuk7D4fsyn8tzbBfY30nEZwz+pPFowkA4JBIX94QbO9wMPAw5JqAGcQdEOVAc4nvG9HcNWY3r2In9JZl2YQEHkluymDdrmVdh5rAN+lrQyv7KpwcIyHpnOMQ4HVeRJd5i4AJphZ2n3PtHua8fYa8BBBd/ghBPcv38yD10lqnphcXJjZbkm3AR8Q3HgeSnBV0AfYbmYLMtm9DEHXSaRLCPr5I40FekuqaWbpXf0A7Ca40R1tInC6pPJmtg1AUnmCgQifZxJbZqYS3PPqS0RXUZS0BLQ3bYWkIwjuDaV7pWXBYI8XJfUEWoSrxwEHgLpmNi6WIMMusmWx7JNDXxGc/wsIrhLTnE/wWTMxqn1PSWXTuvMk1Se4ShkS0WY3wUjPvFaGcKBMhCtyeKwMYzazXyS9SdCFVw54K+xSdhE8Mbm4MbMPJX0N/FXSUwR/CV5BMNLqnwSjq0oADQm6rHqFXUOfAr0kPUpwb6cNcBPBYIVI9wCnE9yzepCgW6s2cJqZ/TlsM5+gS/F8YDGwzcwWEoyWOiOM5WGCq6LbCT6Q7svh+90maSDwpKQR4fvdBhwN7DKzJwm6hvYBr4XnoCZwL8H9i8gh8x+E5+cbgiui1gRXW8+Fr7U4jPspSU0IPuR3Edwn6Qa8aGb/y8n7yEBVSe3SWb8uTHQHMbPNkoYBAyX9SjCirhnByMHJwMdRu+wExkoaSnBv6l6C5PBoRJv5QCdJZxB0u27K6PVz6VPgdkl/J+hqPhk4N4fHmg9UlnQdMIPgd2FuxPZ/8ft9Ju/GS0+iR1/4knwLv4/Ka5TOtlPDbQPC70sRdCUtIPhLcjPBjf5BhKOnCD6gBwNrCIaCTyT4YF7GwSO8GgL/IeiO2g0sAR6N2F6D4ANxWxjH5xHbjidIFNsJBldMAI6LOv4rpDOiKovzcS7BIISdBB+s04AzIrb/KXz/uwi6uC4IX2dZRJtbCa44fgqPszA8R8WjXuuSsN2v4fv4nqCbrk4cf77LyHhU3lNZ7CuCYe8LCe7jrCUYOl4hqp0RDC3/O8GV4y6Ce2tHR7VrGq7fEe7zSrh+EOmPjhucnd9VokZvElzhPENwX3MbwR9Ih3PwiLuMXjeyTdnwd/TncNuydM7TQuDrRP9fLqiLwpPknHP5Jnw49gEzuyvRseS3sCt3AXC1mb2U6HgKIu/Kc865fCCpDtCIoMtyLfBWYiMquHy4uHPO5Y+rgM8IRh5eZGYZVhQp6rwrzznnXIHiV0zOOecKlKS5x6SggvK9BENj6xGMyhpFUAMrw4cLJV0OvJzOptIWUZhRUj+Cwpg1CUZO9TezL7KKq2rVqla/fv3svxHnnHPMnDlzk5lVS29b0iQmgmq/tYG/ETwnUJvgeYD/8Ht16YzsIBhm/JuopHQ+QcXffgTPW/QDRktqbmYryET9+vWZMWNGbO/EOeeKOEnLM9qWNInJzOYR1LBK82NYaWCUpAqW+dPTZuHUCRm4heD5iBfC72+UdBpwHUEZfOecc/kk2e8xVSB4yDKzwpIApcOJw1aFVZ5bp20Iqzi3ISh3E2ksQcHNg0jqK2mGpBkbN2Y1PYxzzrlYJG1iklSJoMzMC5b55GULCeYKOhu4kOAJ8ykKpgGHYLbVVA6udLye9It+YmbPm1lbM2tbrVq6XaTOOedyKOGJScFUyJbFcmLUPmUJZsdcTXDPKUNm9qWZvWpms8LBDGk11G6MbhodWjrrnHPO5bGCcI/pMQ6eWCzabwMQJJUjqIUGQT2y7E55DICZ7Zc0A0i7YtpEMDNo9NVRddKfL8Y551weSnhiMrNNZHN+mHCagtEEVzOnmdn2WF9PkoBWBJWcMbM9kmYSDEMfHtG0GzAi1uNnx/vfrmbomIWs2bKTWpVKc1v3JvRqnZu50ZxzrvBIeGLKrjApjSUY8NALKBt26UEw2+aesN0EYLqZDQy/v4egGvMP4b43ESSm6yIOPwx4XdJ0YApwLcHw9LiXpH//29UMHDmXnXuDCVJXb9nJwJFBRXxPTs45l0SJiWDkXNr8MIuitp3E75O9NQRWRmyrBDxP0FW3FfgW6GxmadN7Y2bvSKoC3EXwgO08oKeZZTjOPqeGjln4W1JKs3PvfoaOWeiJyTnnSKLEZGafE3ThZdWuftT3Awjmh8lqv38RPLCbp9ZsSb9uY0brnXOuqEn4qLyiplal9GeJLlMilV93Zzbq3TnnioaYE5Ok1pJGStokaZ+kY8L1D4bVElwmbuvehNLFU/+wLjVF7Nizn1MfncSkRf7ArnOuaIspMUnqCHxJMN3xW1H7HyAYNOAy0at1bR7q3ZLalUojoHal0vzzvKP477XtKVk8hUv/PZ2/Dp/Nlh17Eh2qc84lREzzMUmaDPxEMCouFdgDtDWzbyT1Bh4zs7p5EWhB1bZtW4tXEddde/fzxIQfeG7SEg4pU4L7zz6SHi1rxuXYzjlXkEiaaWZt09sWa1feMcAzFmSz6Iy2CfD6PLlQqngqfzutKR9c34Hq5Uty3ZvfcN0bM9mwLaZniJ1zLqnFmph2AWUy2FaTYDi2y6UWtSvywQ0d+NtpTZiwYAPdhk1i+IyV+GzDzrmiINbENBnoLyny7n3ap+WVBPPZuzgonppCvxMbMfrmThxxaDlue3cOl/57Ois3Z1VI3Tnnklusielugu682eHXBlwm6X8ED7/eG9/wXMNq5Xinb3vuO/tIvln+M90fm8QrU5Zy4IBfPTnnCqeYEpOZzQY6ExQ3vZPggdcbws1dzGxhfMNzACkp4tL29RkzoDNt61dm0EfzOe+5L/lxw7ZEh+acc3EX06i8P+wolQIqA1vMrMj2L8VzVF52mBkjv1nNfaPms3PPfm7u2pi+nRtQPNWflXbOJY94jsr7jZntMrM1RTkpJYIk+rSpw/hbutC1eXWGjlnI2U9NYd5qH3finCscYn2OKavBDWZmp+QupOSS31dM0T6dt467P5jH5l/30LdzA24+pTGloipLOOdcQRPPK6YUgvtKkUtVoANwBNkosuri67QWNRg/oAt9jqnNM58vpufjXzB96eZEh+WcczmW43tMfziI1BB4HxhgZuNzfcAkkugrpkiTf9jEHSPnsOrnnVzSrh6392hKuZJJU0DeOVeE5Mk9pkhmthgYAgyNx/FcznRsXJUx/TtzRYf6vDFtOacOm8j/Fm5IdFjOOReTeA7l2kjQnecSqGzJYtxz5pG8e+0JlClZjCte/ppb3pnFz796UVjnXHKIS2KSVBm4BVgcj+O53GtT7xA+vqkjN57ciA9nr6HboxP5eM5aL2vknCvwYroBIWkpBxdvLQEcGn7dJx5BufgoWSyVW09tQo8WNbl9xByuf+sbTm1+KIN7taB6hVKJDs8559IV63DxVzg4Me0ClgPDw3tNRUpBGvyQmX37D/DS5KUMG7eIEsVSuPv05pzXtg6SD6R0zuW/zAY/xGVUXlGWLIkpzZKN27lj5FymL91Mh0ZVeOicVtStklHBeOecyxt5PirPJY8G1crx9tXtGNyrBbNXbqX7Y5N4afJS9ntRWOdcAeGJqQhKSRF/blePsQM6c3yDytw/aj7nPjuVH9Z7UVjnXOJlmZgkHZC0P5vLvvwI2sVHrUqlefnyY3ns/KNZtulXTn9iMk9M+IE9+w4kOjTnXBGWnVF593HwgAdXSEiiV+vadGxclXs/ms+wcYv4ZO5aHjm3Fa3qVEp0eM65IihpBj+Ez0rdC3QD6gGbgFHAXWb2Uyb7fQ50SWfTfDM7MmwzCLgnavt6M6uRVVzJNvghK+Pmr+eu9+eycdturu7UgP5dj6B0CS8K65yLr8wGPyRTIbVaQG3gb8D88Ot/Af8BTs1kv94Ez1qlKQnMBf4b1W4hcGLE9/tzF25y6tb8UI47vDJDRn/Pc5OWMOa7dQzp04p2DaokOjTnXBGRoysmSUcBTYCDntI0s9fiEFd24+hJcNVUycx+yeY+FwOvAfXNbGW4bhBwrpm1iDWGwnbFFGnqj5u4Y+RcVmzewcXH1+WOHk0pX6p4osNyzhUCcbtiklQJ+Bhol7Yq/Dcyu+VbYgIqALuBWCYrvBoYnZaUIjSQtBrYA0wD/m5mS9I7gKS+QF+AunXrxhx0sjihUVU+7d+JYWMX8e8pS/lswQYeOKcFJzc9NOudnXMuh2IdLv4gUAXoTJCUzgFOBt4ElgDHxTW6TIRJ8n7gBTPL1mhASUcQ3G96IWrTNOByoAdB4qoBTJWUbv+VmT1vZm3NrG21atVy9gaSRJkSxbjrjOaMuO4Eypcqxl9emUH/t79lsxeFdc7lkVgTU3eC5PRV+P0qM/vczC4FxgM3xxqApMGSLIvlxKh9ygIfAasJ7jll19XAWoKrvt+Y2Wgz+6+ZzQnnkzqD4NxcFuv7Kaxa1z2EUTd24uZTGvPx3LV0HTaRD2ev8aKwzrm4izUx1QSWmNl+ghp55SO2jQROz0EMjwHNslimpzWWVA4YHX57hpntys6LSCpBkGhezuoKy8y2A98BjWN5I4VdiWIpDOh2BB/d2JHDDinNTf/5lqtfm8m6rdn6ETjnXLbEmpjWAZXCr5cD7SO2NcpJAGa2ycwWZLHsAJBUHvgUSAV6hgkku3oRTAP/UlYNJZUCmhJcXbkoTWtUYGS/DtzZsxmTf9xIt2ET+c/0FX715JyLi1gT02R+T0avA/dIek7S0wSz146JZ3CRwqQ0FjiE4H5QWUk1wqVERLsJkh5K5xB9gQnpDWiQ9A9JXSQdLul44F2gLPBqXryXwiA1RVzduQGf3tyZI2tXYODIuVz0wjSW//RrokNzziW5WBPTvQRXLBAkoqcJuu8uBD4EboxfaAdpQzAasDmwiOBqJm05IaJdQ4Iux99IakAwSCN60EOaOgTPQy0k6JLcDbQzs+VxjL9Qql+1LG9d1Y6Herdk3uqgKOyLXyzxorDOuRxLmsoPBVVhfo4pVuu27uKu9+cy/vsNHHVYJR7p04omNcpnvaNzrsiJ27QXkr6R1F+SP8jiDlKjYileuLQtT1zYmpWbd3DGk1/w6LhFXhTWOReTWLvy1gOPACslfSLpgnCggHNAUBT2rKNqMf6WLvRsWZPHJ/zAGU9+wayVWxIdmnMuScSUmMysB8H9mL8B1YG3gPWSXpJ0Uh7E55JU5bIlePyC1rx0WVt+2bmP3v+awuBR89m5p0iWIHTOxSBX95gkNQMuAS4CDiN44LZenGJLCn6PKWu/7NrLkNELeGvaCupWLsOQPi05oWHVRIflnEugPJta3cy+J5iv6U5gDcHVlHN/UKFUcR48pyX/ubodKYKLXpjGwJFz+GXX3kSH5pwrgHKcmCSdLOllgvtOrwGryNvh4i7JtW9YhdE3d+aazg145+uVdBs2kfHz1yc6LOdcARPrqLwWkoZIWgGMIyiI+jjQ1Mzam9m/8iJIV3iULpHKwJ7NeP/6DhxSpgRXvTaDG//zLZu27050aM65AiKme0ySDgBbgeHA62b2RV4Fliz8HlPO7dl3gGcnLubJz36gXMli3HPmkZx9dC0kZb2zcy6pxfMe0/lADTPr60nJ5VaJYincdEpjPr6pE/WqlKX/O7O48tUZrNmyM9GhOecSKNbh4sPNzPtcXFwdcWh5Rlx3Anef0ZwvF//EqY9O4o2vlnPAyxo5VyTFPPhB0mWSPpU0X9KSqGVxXgTpCr/UFHFlx8MZ078zRx1Wkbven8eFL3zF0k1eFNa5oibWwQ93Ay8DtYBZwMSoZVKc43NFTN0qZXjjyuN5pE8r5q/9hdMem8RzExezb7+XNXKuqIh18MMy4D0zG5BnESUZH/yQd9b/sou73p/HuPnraVm7Ig/3aUXzWhUSHZZzLg7iOfihCsGU5s7luUMrlOL5S9rw9EXHsHbrTs56ajL/HLuQ3fu8rJFzhVmsiWkicFReBOJceiRxequajBvQhbOOqsWTn/3I6U9MZubynxMdmnMuj8SamPoDV0i6VFJVSSnRSx7E6ByHlC3BsPOP5uUrjmXH7n2c++xU7v3oO3bs2Zfo0JxzcZaTB2wBMtrJzKxYrqNKIn6PKf9t372PRz5dwGtfLqfOIaUZ0rsVHRt7UVjnkklm95hiTSL3kXFSci5flCtZjPvObsEZrWpx+4g5/PmlafypbR3uPL05FUsXT3R4zrlc8qnVc8mvmBJr1979PD7hB56ftIQqZUtwf68WdD+yRqLDcs5lIc+mvXAu0UoVT+X205ryfr8OVClXkmten8n1b37Dxm1eoMS5ZJWTyg+tJY2UtEnSPknHhOsflHRa/EN0Lmst61Tkwxs6cFv3Joybv56uwyYyYuYqvEfAueQTa+WHjsCXQFOCadUj9z8AXBu/0JyLTfHUFK4/qRGf3NyRRtXLcevw2Vz+8tes9qKwziWVWK+YhgBjgCOBW6K2fQMcE4+gnMuNRtXLM/ya9gw6szlfL9vMqcMm8tqXy7worHNJItbEdAzwjAX9I9H/yzcB1eISlXO5lJIiLu8QFIU9pt4h/N8H33H+81+yeOP2RIfmnMtCrIlpF1Amg201CSYRzDOSXpC0WNJOSRslfSCpWTb26xNWQ98d/ntOOm36SVoqaZekmZI65c27cPnpsMpleO0vxzH03FYsXLeNHo9/wb8+/5G9XhTWuQIr1sQ0GegvKTViXdqV05XAZ3GJKmMzgMuBZkB3QMB4SRk+vCKpPfAO8CZwdPjvcEnHR7Q5n2CK+AeB1sBUYLSkunnyLly+ksR5bQ9j/K1dOLlJdR75dCG9np7CvNV5+neUcy6HYq38cBQwBVgGvAvcDTxJUD+vDXCsmS2Mf5gZxtMKmA00zeh1Jb0DVDazbhHrxgMbzezC8PtpwBwzuzqizQ/Au2Y2MLMY/Dmm5DN67lru/uA7ft6xh2u7NODGkxtTqnhq1js65+Imbs8xmdlsoDOwHriT4IrlhnBzl3xOSmWBK4AVBIkyI+2BsVHrxgAnhMcpQZBUo9uMTWuTzmv3lTRD0oyNGzfGHrxLqB4tazL+ls6c07o2T/9vMT2f+IIZyzYnOiznXCjm55jM7BszOwUoD9QBKpjZSWb2bdyjS0d4L2g7sB3oAZySxXTvNQgSaaT14XqAqkBqFm3+wMyeN7O2Zta2WjUf75GMKpUpwT/OO4rX/nIcu/ce4LznvmTQh9/x624vCutcouW48oOZ7TKzNWa2IzcBSBosybJYTozY5U2C+0BdgEUE94syGpDxW7jRL5vOuuy0cYVM5yOqMXZAZy5rX59Xv1zGqY9OYuIivwp2LpFiKuIqKbPBDQcIRuXNBF4ys+grkIw8BryRRZsVaV+Y2dbwdX6Q9BXwM9AHeD2Dfddx8JVPdX6/QtoE7M+ijSvEypYsxqCzjuSMVjX524g5XPbv6fQ5pg53n9GMSmVKJDo854qcWK+YBDQBTgTqAaXCf08kGCl3OMGAiHmSmmfngGa2ycwWZLFkdFWmcCmZyUt8CXSLWteNYOQdZraHIJlm2MYVDW3rV+aTmzpx/UkNeX/WaroOm8TouWsTHZZzRU6siWkYwbNMbcysoZmdYGYNgWPD9fcCjYGNwAPxDFRSI0m3S2ojqa6kE4DhwG5gVES7CZIeitj1ceBkSQMlNZU0EDiJ4Eot8n1dLukqSc0kPQ7UAp6N53twBV+p4qnc1r0pH97QgUMrlOS6N7/h2tdnsuGXXYkOzbkiI9bENBgYFD3QwcxmEiSlwWa2ChhKMHovnnYTXJmNBn4keDZpG9DezNZFtGtI8LBvWmxTgQuAy4A5wKXA+WY2LaLNOwSz894FzAI6Aj3NbHmc34NLEkfWqsgH13fg9tOa8tnCDXQdNpHhM1Z6UVjn8kGszzHtBHqb2eh0tvUARppZaUmdgbFmVip+oRZM/hxT4bd443buGDGHr5f9TKfGVXnwnJYcVjmr8TbOuczEcz6mZcBVGWzry+/PE1UFforx2M4VSA2rleOdvu25/+wj+Wb5z3R/bBKvTFnKfi8K61yeyMnU6m9ImgOMADYQjF7rA7QALgrbdQWmpXsE55JQSoq4pH19TmpanTvfm8egj+bz0Zy1PNynJY2ql090eM4VKjFPrS6pG8H9pDZAcWAvQQ27e8xsfNimFLDfzPbGN9yCx7vyih4z471vV3PfqPns2L2fm05pxDVdGlI81SeEdi67MuvKizkxRRw0haDLbpOZFdlSzZ6Yiq6N23Yz6KPv+HjOWprVrMDQc1vRonbFRIflXFKI5z2m35jZATPbUJSTkivaqpUvydMXHcNzl7Rh0/bdnP30FIaMXsCuvfsTHZpzSc37HpzLpe5H1mD8gC6ce0wdnp24mJ6Pf8H0pV4U1rmcyjIxSdov6bjw6wPh9xktXgHTFUkVyxTn4XNb8caVx7Nn/wH+9NyX3P3+PLbtKvS3WZ2Lu+yMyrsPWBXxtY+RdS4DHRtXZeyAzvxjzCJenrqUCd+v54HeLTmpSfVEh+Zc0sjx4AcX8MEPLiMzl//MHSPm8MOG7fRuXZu7z2jOIWW9KKxzEOfBD5JaSxopaZOkfZKOCdc/KOm03AbrXGHRpt4hjLqpIzed3IgPZ6+h67CJjJqzxssaOZeFmBKTpI4E1bqbAm9F7X8AuDZ+oTmX/EoWS+WWU5vw0Y0dqVWpNDe89S3XvD6T9V4U1rkMxXrFNIRgWvIjgVuitn0DHBOPoJwrbJrVrMB7/U5gYI+mTFy0ka7DJvLO1yv86sm5dMSamI4BnrHgf1P0/6hNgM8z7lwGiqWmcE2XhnzavzPNalbg9hFzufjFaaz4KVeTQDtX6MSamHYBGZVVrkkws6xzLhOHVy3L21e344FzWjBn1Va6PzaJlyZ7UVjn0sSamCYD/SWlRqxL+990JZDZ1OvOuVBKirj4+HqMu6Uz7RtW4f5R8+nzzFQWrd+W6NCcS7hYE9PdBN15s8OvDbhM0v+AdgTFXZ1z2VSzYmleuqwtj19wNMt/+pXTn/iCJyb8wJ59XunLFV0xJSYzm00wM+164E5AwA3h5i5mtjC+4TlX+Eni7KNrM/6WLpzWoibDxi3irKcmM3vllkSH5lxC5Ka6eCmgMrDFzIrs3Vt/wNbF27j567nr/bls3Labqzo1YEDXIyhdIjXrHZ1LInlVXXyXma0pyknJubzQrfmhjLulC+cfexjPT1pCj8cn8eVinxDaFR1eXdy5AqhCqeI81LsVb111PAcMLnzhK/7+3lx+8aKwrgjwxORcAXZCo6qM6d+ZqzsdztvTV3DqsEl8tmB9osNyLk95YnKugCtdIpU7T2/OyH4dqFi6OH95ZQY3v/0tP23fnejQnMsTnpicSxJHH1aJj27sSP+ujflk7lq6PTqJD2d7UVhX+Hhici6JlCiWQv+uRzDqxk4cVrkMN/3nW65+bQZrt+5MdGjOxU1SJSZJL0haLGmnpI2SPpDULIt9rpb0haTNkrZI+l9YJT2yzSBJFrWsy9t341zONalRnpHXncBdpzdj8o+bOHXYJN6atoIDXtbIFQLZmVo9q+nU83Nq9RnA5UAzoDvBA77jJRXPZJ8TgXeAU4DjgYXAGEmNo9otJKj3l7a0jGfgzsVbaoq4qlMDxvTvTIvaFfn7e3O56MWvWLbp10SH5lyuZPmAraRBxDCdupnlW1kiSa0IyiM1zW7VCUkC1gIPmNmT4bpBwLlm1iLWGPwBW1cQmBnvfL2SBz7+nr0HDnBrtyb8pePhpKYo0aE5l67MHrAtltXOZjYo7hHFgaSywBXACmBZDLuWAEoBP0etbyBpNbAHmAb83cyWZPDafYG+AHXr1o0tcOfygCQuOK4uJzapzl3vz+WBT75n1Jw1PHLuUTSpUT7R4TkXk6S6xwQgqZ+k7cB2oAdwipnFMm52cLjvhxHrphF0EfYArgZqAFMlVUnvAGb2vJm1NbO21ar5FFSu4KhRsRQvXNqWJy9szaqfd3LGk1/w6LhF7N63P9GhOZdtOaqVJ+kooAnBlccfmNlrMR5rMEFB2MycZGafh+0rAtUJ7gP9FTgM6JCd0kiSbgbuB7qa2fRM2pUDlgBDzGxYZsf0rjxXUG3+dQ/3ffQd789awxGHluPhPq1oXfeQRIflHJB5V15MiUlSJeBjgikuIBh8ABH3oMwspmqTkqoCVbNotiK9xCOpBEGX3LVm9noWr3MzwdVSDzObnI24/gcsMLPrMmvnickVdJ8tWM+d781j3S+7+EuHw7n11CMoUyLLXnzn8lSu7jFFeRCoQjD1xRfAOQSz1v4FaA9cEGtwZraJYFr2nFC4lMy0kXQLcB/QM5tJqRTQFPhfDuNyrsA4uemhjB1QmYc/XcBLk5cybv56hvRuyQmNsvp70LnEiPUeU3eC5PRV+P0qM/vczC4FxgM3xzO4SJIaSbpdUhtJdSWdAAwHdgOjItpNkPRQxPe3AUMIkuciSTXCpWJEm39I6iLpcEnHA+8CZYFX8+r9OJefypcqzuBeLXm7bztSBBe9OI07Rsxh604vCusKnlgTU01giZntB3YBkcN9RgKnxyuwdOwmeCZpNPAjwbNJ24D2Zhb5MGzDMM401wPFw/ZrI5bHI9rUAf5D8CzTyPC12pnZ8rx4I84lSrsGVfi0f2eu6dKA/85YyamPTmTcfC8K6wqWWLvy1gGVwq+XE3TffR5+3yg+IaXPzFYSjJrLql39zL7PYJ+YuyCdS1aliqcysEczTm9Zk7+9O4erX5vBGa1qMuisI6laLtNecefyRaxXTJMJkhHA68A9kp6T9DQwFBgTz+Ccc3mnVZ1KfHhDR27tdgRjv1tP12ETee/bVV4U1iVcrKPyGgK1zOyLsAzQEOB8oAzwKXCjmRWpqTZ9VJ4rDH5Yv42/jZjDtyu2cFKTajxwTktqVSqd6LBcIRa34eLuYJ6YXGGx/4Dx6tRlDB2zkNQUcXuPplx8XF1SvKyRywNxT0ySDiN4sDW9B2w/i/mAScwTkytsVm7ewcCRc5n84yaOO7wyD/dpxeFVyyY6LFfIxPMB2wbAm8BxaavCfy382mJ9wDbZeWJyhZGZMXzGKu7/eD579h1gQLcjuKrj4RRLTboqZq6AiucDti8CdYH+wAKCgqfOuUJGEn869jC6NKnG3e/PY8joBXw8Zy0P92lF81oVEh2eK+RivWLaBlxuZiPyLqTk4ldMrrAzM0bPW8f/fTCPLTv2ct2JDbnh5EaULFakOkdcnGV2xRTrdfkq/CrJuSJFEj1b1mTcgC6cdXQtnvzsR05/YjIzl0fPHONcfMSamB4Ebg/nQnLOFSGHlC3BsD8dzStXHMvOPfs599mp3PvRd/y6O68nrnZFTUz3mMzsdUlNgWWSvuLgyfbMzC6LW3TOuQLnxCbVGTOgM498uoCXpyxj3Pz1PNS7JZ0a+9xkLj5ivcd0OfBvYD+wgYO79czMGsQtuiTg95hcUTZ96WbuGDGHJZt+5U9t63Bnz+ZULFM80WG5JBDP4eLLgRnAlWa2JT7hJTdPTK6o27V3P49P+IHnJy2hctkS3H92C05rUSPRYbkCLp6DH6oA//Kk5JxLU6p4Kref1pQPru9AtXIlufaNmfR7cyYbtu1KdGguSeWkiGuzvAjEOZfcWtSuyAc3dOC27k0Y//0Gug2bxIiZXhTWxS7WxHQzcLWkiyVVkZQSveRFkM655FA8NYXrT2rEJzd1olH1ctw6fDaXvfw1q37ekejQXBKJ9R7TgfDLjHYyM4u1mkRS83tMzqXvwAHj9a+W8/CnCxBwe4+m/Pn4el4U1gHxLUl0HxknJeec+01KirjshPqc3LQ6f39vLv/3wXd8NHsNQ/q0omG1cokOzxVgPu1FLvkVk3NZMzNGfLOa+0fNZ+fe/dx8SmP6dm5AcS8KW2TFc1Sec87FTBLntqnDuFs607VZdYaOWUivp6cwb/XWRIfmCiBPTM65fFO9fCn+dXEbnv3zMaz/ZTdnPz2FRz5dwK69+xMdmitAskxMkvZLOi78+kD4fUaLF81yzmXptBY1mXBLF3q3rs2/Pl9Mzye+YMayzYkOyxUQ2Rn8cB9BVfG0r/2mlHMu1yqWKc7Q847izKNqMXDkXM577ksubVeP205rSrmSRWpwr4vigx9yyQc/OJd7v+7ex9AxC3n1y2XUqliaB3u3pMsRXhS2MPPBD865Aq1syWIMOutI3r22PaWKp3DZv6dzy39nsWWHT/9WFMWcmCRdJulTSfMlLYlaFudFkBGv/YKkxZJ2Stoo6QNJmZZIknS5JEtnKRXVrp+kpZJ2SZopqVNevhfn3MHa1KvMxzd14oaTGvHhrDV0HTaRT+auTXRYLp/FlJgk3Q28DNQCZgETo5ZJcY4v2gzgcoJ6fd0BAeMlZVVnfwdQM3Ixs98qTEo6H3icYCLE1sBUYLSkuvF+A865zJUqnspfuzfhgxs6UKNiKfq9+Q3Xvj6TDb94UdiiItaSRMuA98xsQJ5FFANJrYDZQFMzW5hBm8uBp8wsw0fNJU0D5pjZ1RHrfgDeNbOBmcXg95icyzv79h/ghS+W8uj4RZQqlsJdZzTnvDZ1kLysUbKL97QXH+U+pNwLp3e/AlgBLMuieWlJyyWtkjRKUuuI45QA2gBjo/YZC5yQwWv3lTRD0oyNGzfm+D045zJXLDWF605syKc3d6JpjQr87d05XPrv6azc7EVhC7NYE9NE4Ki8CCS7wntB24HtQA/gFDPbnckuC4G/AGcDFwK7gCmSGofbqwKpwPqo/dYD6c52ZmbPm1lbM2tbrZqPHHIurzWoVo63+7bj/rOP5JvlP9P9sUm8PGUp+w/4qOLCKNbE1B+4QtKlkqrGY9oLSYMzGJwQuZwYscubBPeBugCLgOGSymR0fDP70sxeNbNZZvYFcD6wGLgxuml0aOmsc84lSEqKuKR9fcbe0oXjDq/MvR/N57xnp/Ljhm2JDs3FWcKnvZBUleCqJTMrzOyga/ewG+5n4Fozez2G13wZqGFmPcJj7AAuNLPhEW2eBlqYWZfMjuX3mJzLf2bG+7NWc+9H89mxez83ndKIa7o09KKwSaRAT3thZpuATTncXeFSMts7BHdN0wZNYGZ7JM0EugHDI5p2A0bkMC7nXB6SxDmt69CpcTXu+fA7/jF2EaPmrGXouUfRsk7FRIfncilpKj9IagT0AcYDG4E6wB1AJ6CZma0L200ApqeNppN0D/AV8ANQAbgJuAToYGbTwzbnA68D/YApwLXAlcCRZrY8s7j8ism5xBvz3Trufn8eP/26h6s7NaB/18aUKp6a6LBcJuJ5xZRIu4ETgVuBSgSDEyYB7dOSUqghsDLi+0rA8wQDGbYC3wKd05ISgJm9I6kKcBfBc07zgJ5ZJSXnXMHQ/cgatGtQhQc//p5nJy5mzHfrGNK7Jcc3qJLo0FwOZHnFJGk/wYf/9PAeU2Y7+NTqzrmEmvLjJu4YOYeVm3fy53Z1uf20ppQvldUz+C6/5faKyauLO+eSRodGVRnTvzP/HLuIf09Zymffb+CBc1pyUtPqiQ7NZVPS3GMqqPyKybmC65sVP3P7u3P4YcN2zmldm7vPaE7lsiUSHZbDq4s754qoY+oewqibOnLTKY35aPYaug2byKg5a/A/yAu27NxjOjmWA5rZZ7mKKMn4FZNzyeH7tb9w+4g5zFm1lW7ND2VwrxYcWqFU1ju6PJHZFVN2ElPkgIeMKidauM3MrEiN0fTE5Fzy2Lf/AP+espR/jl1EiWIp3NmzGecfe5gXhU2AeAwX30bwsOkI4Nd4Beacc/mpWGoKfTs35NTmNbh9xBzuGDmXD2evYUjvVtStkmFlM5fPsnPF1AW4lODh1hTgPeDVotZllxG/YnIuOR04YLz99Uoe/OR79h04wF9PbcIVHQ4nNcWvnvJDrgY/mNlEM7uS4AHVa4HqwBhJKyQ9lNUMss45VxClpIiLjq/LuFs6c0LDqgz++Hv6PDOVReu9KGyiZXtUnpntMrO3zKwHUJdgxteewDxJT+VVgM45l5dqVizNS5e15fELjmbF5h2c/sQXPD7+B/bsO5D1zi5P5HS4+E8Ek/MtIxj4cEic4nHOuXwnibOPrs24AZ3p0aImj45fxFlPTWb2yi2JDq1IiikxSeog6VlgLfAqwWR9pxMURXXOuaRWpVxJnriwNS9e2pYtO/Zyzr+m8OAn37Nzz/5Eh1akZDkqL6zqfQnwZ6A+QeHUvwLDzWx7nkbnnHMJ0LX5oRzXoDJDRi/g+UlLwqKwrWjf0IvC5ofsPsf0CzCSYGqITCtum9mSuEWXBHxUnnOF29TFmxg4ci7Lf9rBhcfVZWDPplTworC5Fo8HbNNkWcfDH7B1zhU2O/fs59Hxi3jxiyVUL1+KB85pwSnNDk10WEkttw/YXhHneJxzLqmULpHK33s2o2fLmtz+7hyufHUGZx1Vi3vObE6VctmeQNtlk1cXzyW/YnKuaNmz7wDPfL6Yp/73A+VLFeeeM5tz1lG1vKxRjLy6uHPOxUmJYinc3LUxH9/UibqVy3Dz27O46tUZrN26M9GhFRqemJxzLgeOOLQ8I647gbtOb8aUxZvoNmwSb05bzoED3guVW56YnHMuh1JTxFWdGjC2fxda1anIne/N46IXv2LZJq91nRuemJxzLpfqVinDm1cdz5DeLflu9S90f2wSz09azL79XtYoJzwxOedcHEjiguPqMu6WLnRqXI0HP1lAn2emsmDdL4kOLel4YnLOuTiqUbEUL1zahqcuas2qn3dyxhOTGTZuEbv3eVmj7Mp1YpJ0jKRe4XJMPIJyzrlkJokzWtVi/C1dOPOoWjwx4QfOfHIy3674OdGhJYUcJyZJbSQtAN4ErgP6Af+RtFBSm3gFGPWaL0haLGmnpI2SPshqPihJn0uydJbvItoMSmf7urx4D865ouOQsiV49PyjefnyY9m2ax+9n5nK/aPms2PPvkSHVqDl5orpeeAGM2tmZt3N7FQzawLcALwQn/AOMgO4HGgGdAcEjJeUWeGq3kDNiKU+wVTx/41qtzCqXcs4xu2cK8JOalqdsQM6c/HxdXlp8lK6PzaJKT9uSnRYBVZuElM5MxsfvdLMxgFlc3HcDJnZc2b2hZktM7NvgLuAWkCDTPbZbGbr0hagYxjfv6Oa7otsZ2Yb8+I9OOeKpvKlijO4V0ve6duOYikpXPziNO4YMYetO/cmOrQCJzeJaYOkKyT9VrRVUqqkq4A8/1NAUlmCOn4rCCYszK6rgdFmtjJqfQNJqyUtlfS2pAyTnXPO5dTxDaow+uZOXNOlAf+dsZJuwyYy9ju/cxApN4npMuBCYLOk7yV9D2wGzg+35QlJ/SRtJ5iksAdwipntzua+RwBdOLircRpBF2EPgsRVA5gqKd3JVyT1lTRD0oyNG/3CyjkXm1LFUxnYoxnvX9+BymVL0Pf1mdzw1jds2p6tj7JCL9dFXCVVBeqG364ws5iuliQNBu7MotlJZvZ52L4iUJ3gPtBfgcOADma2IxuvNRS4GKhrZhnefZRUDlgCDDGzYZkd04u4OudyY+/+Azw3cTFPTPiRMiVTuefM5vQ6unahLwqbq/mYcviC1c1sQzbbVgWqZtFsRXqJR1IJ4GfgWjN7PYvXKQGsAl4ws6wSIZL+Bywws+sya+eJyTkXDz9u2Mbf3p3DNyu2cGKTajxwTktqVyqd6LDyTCKqi2f7k9rMNpnZgiyWjK6GFC7ZmRClF0ECfCmrhpJKAU2Btdl8G845lyuNqpdn+LUncM+ZzZm2ZDOnDpvI618VzaKw2ZkoMF2Szspkc6mcHjeT12sE9AHGAxuBOsAdwG5gVES7CcB0MxsYdYi+wIT0pn6X9A/gI4KBFNWBuwlG7r0a7/fhnHMZSU0RV3Q4nK7NDmXgyLnc/f48Ppq1hiF9WtKgWrlEh5dvcpyYgPeAiQRXLNHK5+K4GdkNnAjcClQC1gOTgPbhMPA0DYE/jLgLR9idDFyQwbHrAP8huKLaCHwFtDOz5fEL3znnsuewymV4/crjGD5zFYNHzafH418woNsRXNXxcIqlFv5Kcjm+xxRWfehhZkvT2bbSzA7LbXDJwO8xOefy0oZfdnH3B/MY8916WtSuwCN9jqJ5rQqJDivX8uoe06tkPGjh2Vwc1znnXKh6hVI8d0lbnrn4GNZt3c1ZT03mH2MWsmtv4S0Kmyej8ooSv2JyzuWXLTv2cP+o7xnxzSoaVivLI+e2ok29yokOK0cSMSrPOedcnFUqU4J//ukoXv3Lcezae4Bzn/2SQR9+x6+7C1dR2JiumCR9lsnmA8BWYCbwkpmtz2VsScGvmJxzibB99z6GfrqAV79cTp1DSvNQ75Z0alwt0WFlWzyvmAQ0IRgdV49gWHi98PtmwOEEQ63nSWqew3idc85loVzJYtx7dguGX9ueEsVSuOSl6dw2fDZbdyR/UdhYE9MwYBfQxswamtkJZtYQODZcfy/QmGDI9QNxjdQ559xBjq1fmU9u6kS/Exsy8tvVdH10Ip/OS+7aALEmpsHAIDP7NnKlmc0kSEqDzWwVMBToHJ8QnXPOZaZU8VT+dlpTPri+A9XKleTaN77hujdmsmHbrkSHliOxJqYjyHhKi41Ao/DrxeTRnEzOOefS16J2RT64oQO3dW/ChAUb6DZsEu/OXEWyjb6ONTEtA67KYFtffp8XqSrwU85Ccs45l1PFU1O4/qRGfHJTJxpXL8dfh8/mspe/ZtXPWU7AUGDEmpjuA3pJmiPpHknXhf/OAc4m6M4D6Eowx5FzzrkEaFS9HP+9pj33nnUkM5Zt5tRHJ/Hq1GVJURQ25gdsJXUjSEBtgOLAXoJq4vekTbUeVufeb2bJPzwkCz5c3DlX0K36eQd/f28ekxZtpG29QxjSpxWNqie2KGyezMckKYWgy26TmR3IRXxJzROTcy4ZmBkjv1nNfaPms3PPfm7u2pi+nRtQPEFFYfOk8oOZHTCzDUU5KTnnXLKQRJ82dRh/Sxe6Nq/O0DELOfupKcxbvTXRoR0k5sQkqaakf0j6WtJiSdMlPSKpRl4E6JxzLn6qlS/Jvy5uw7N/PoaN23dz9tNTePjTBQWqKGxMiUnSEcAs4CZgOzAd+BW4GZglqXG8A3TOORd/p7WoyfgBXejdujbPfL6Yno9/wdfLNic6LCD2K6aHgV+AI8zsJDO70MxOIni+aWu43TnnXBKoWKY4Q887itevPI49+w9w3rNf8n8fzGN7govCxpqYTgLuNrNlkSvDmV4Hhdudc84lkU6NqzGmf2eu6FCf179aTvdHJ/H5wg0JiyfWxFQC2JbBtm3hduecc0mmbMli3HPmkbx77QmULpHK5S9/zS3/ncXPv+7J91hiTUyzgBvDoeK/kSSgX7jdOedckmpT7xA+vqkjN57ciA9nraHboxP5ZO7afC1rFOt8TKcBowhq4b0DrAVqAOcRVBU/3czG5kGcBZY/x+ScK6zmr/mF20fMYe7qrXQ/8lDuP7sF1SuUisux4/qAbZicBgOtCeZnMoLJAe82szG5jDXpeGJyzhVm+/Yf4MXJS3l03CJKFkvhrjOaUzxF/GPsItZs2UmtSqW5rXsTerWuHdNx86ryQxngEOBnM0ue6oBx5onJOVcULNm4nTtGzmX60s2kCCJL7pUunspDvVvGlJzyqvLDDjNbXZSTknPOFRUNqpXj7avbUal0caLrwO7cu5+hYxbG7bWKZdVA0gGC7rrsMDPL8pjOOeeST0qK2Loz/drca7bsjNvrZCeJ3Ef2E1O+CEcBjga6A+eZ2btZtO8D3A80JBi4caeZvRfVph9wG1AT+A7ob2Zf5EH4zjmXtGpVKs3qdJJQrUql4/YaWSYmMxsUt1eLn1uBbBV2ktSeYAThPcBIoDcwXFIHM5sWtjkfeJxgyPvk8N/Rkpqb2Yo8iN8555LSbd2bMHDkXHZG1NYrXTyV27o3idtrJKbeeS5IaktQm++KbO7SH/ifmT1gZt+b2QPA5+H6NLcAr5jZC2GbGwmGwl8Xt8Cdc64Q6NW6Ng/1bkntSqURULtS6ZgHPmQlqe4HSSoP/Ae4xsw2BD16WWoPPBm1bgxwQ3jMEgSTHv4jqs1Y4IRcBeycc4VQr9a145qIoiXbFdOzwKdm9kkM+9QA1ketWx+uh2Cyw9Qs2vyBpL6SZkiasXHjxhhCcc45l5WEJyZJgyVZFsuJki4BjiIYoBCr6MEbSmdddtoEDc2eN7O2Zta2WrVqOQjHOedcRgpCV95jwBtZtFkBXA40B7ZHdeG9I+lLM+uYwb7rOPjKpzq/XyFtIhhIkVkb55xz+SThicnMNhEkh0xJupOD7wPNBf4KfJDJrl8C3YChEeu6AVPD198jaWa4bnhUmxFZxeWccy6+Ep6YssvMVgOrI9eFV04rzWxJxLoJwHQzGxiuehyYJGkg8B5wDsG8UZFXWMOA1yVNB6YA1wK1CO5pOeecy0dJk5hi0BBYmfaNmU2VdAFB4dl7CR6wPT/tGaawzTuSqgB3ETxgOw/oGU6AmKmZM2dukpRluwxUJRtXi+43fr5i4+crdn7OYpOb81Uvow05LuLqck/SjIyKGLqD+fmKjZ+v2Pk5i01ena+Ej8pzzjnnInlics45V6B4Ykqs5xMdQJLx8xUbP1+x83MWmzw5X36PyTnnXIHiV0zOOecKFE9MzjnnChRPTM455woUT0xxJOl6SXMk/RIuX0o6PWK7JA2StEbSTkmfSzoy6hglJT0paZOkXyV9KKlO/r+b/Cfp72HR3qci1vk5C4XnIbrA8bqI7X6uokiqKelVSRsl7ZI0X1KXiO1+zkKSlmVQRPvjcHu+nStPTPG1CrgdOAZoC3wGvC+pVbj9bwSz794IHAtsAMYpmGcqzWNAH+BCoBNQARglKTU/3kCiSGoHXA3Midrk5+yPFhJUJ0lbWkZs83MVQVIlghJjAk4HmhGcmw0Rzfyc/e5Y/vi7dQzBDAv/Dbfn37kyM1/ycAE2A9cQ/OdYC9wZsa00sI1g4kOAisAe4OKINocBB4DuiX4veXiOKhKUijqZYHbhp8L1fs7+eJ4GAfMy2Obn6uBz8iAwJZPtfs4yP393AluAMvl9rvyKKY9ISg1r9JUjqGR+OMHUGmPT2pjZTmASv8+U2wYoHtVmJfA9hXs23eeBd83ss6j1fs4O1kDSaklLJb0tqUG43s/VwXoB0yS9I2mDpFmSbpB+mzfHz1kGwnN0JfCGme0gn8+VJ6Y4k9RS0nZgN0F18nPMbC6/z/eU2Uy5NQjmhoouipjhbLrJTtLVQCPg7nQ2+zn7o2kE85L1IOj2rAFMVVCA2M/VwRoA/YAlQHeCmQaGANeH2/2cZawbQTJ6Mfw+X89VYawunmgLgaOBSgR9ra9KOjFie7Znyo2xTdKR1ISgu6WTme3JpKmfM8DMRkd+L+krgg/dy4Cv0ppF7VYkz1UoBZhhv0+B862kxgSJ6amIdn7ODnY18LWZzYpany/nyq+Y4szM9pjZj2aW9h9iFjCAYCZdyHym3HVAKkEp+YzaFCbtCd7rPEn7JO0DugD9wq9/Ctv5OUuHmW0HvgMa479f6VkLzI9a9z1QN/zaz1k6JFUHzgZeiFidr+fKE1PeSwFKAksJfnDd0jZIKkUwcmVquGomsDeqTR2C0URpbQqT9wlGlR0dscwA3g6/XoSfswyF56IpwQew/34dbArQJGrdEUDa/Gl+ztJ3OcGtiLcj1uXvuUr0yI/CtBD0X3cC6hN84D5EMCKlR7j9duAXoDfQIvzBrwHKRxzjGYKZersCrYH/EVx1pSb6/eXTOfyccFSen7ODzs0/CK4oDweOB0aF56aen6t0z9ex4QflnQT3Mc8DtgLX++9XhudMBH8QvpDOtnw7Vwk/EYVpAV4h+GtsN8EY//FEDJMMf+iDCP7C3QVMBFpEHaMU8CRBN9YO4CPgsES/t3w8h9GJyc/Z7+8z7YNgT/iffwTQ3M9VpufsdGB2eD4WATcRFq/2c5bu+TqJ4H7Qcelsy7dz5dXFnXPOFSh+j8k551yB4onJOedcgeKJyTnnXIHiick551yB4onJOedcgeKJyTnnXIHiicm5AiSDidqil2WZ7P9KRLvPI9bXD9ddFdW+qqRvJW2WdGy4bnzEMd7Iq/fqXEa8iKtzBUv7qO/fI3hAdFDEut1ZHGMdcA7BU/oZknQoMAE4FDjZfi/YeSPB3DrvZSti5+LME5NzBYiZfRX5vaTdwKbo9VnYnVV7SbUJklJFoIuZ/Vbs1My+j3ht5/KdJybnihhJdYHPCIoLdzGzRQkOybk/8MTkXNHSgGDWUQM6m9nSBMfj3EE8MTlXtAwkmGX0SE9KrqDyUXnOFS2fElSJflxSyUQH41x6PDE5V7SMAP4CnAoMl1Q8wfE4dxBPTM4VMWb2KnAtcCbwlqTUBIfk3B94YnKuCDKz54GbgXOB1yT5Z4ErMHzwg3NFlJk9Ed5negTYLelK85lDXQHgicm5IszMhobJ6X6C6bL7JTgk5zwxOVeQmVn9nOwnqViwu+0Pj7OMYDReeq8xGBgcsW8K3s3vEsh/+ZwrfOoBewlKDuXE2HD/enGLyLkYyLuUnSs8JNUHqobfbjOzhTk4xhFAhfDbn/xBXJffPDE555wrULwrzznnXIHiick551yB4onJOedcgeKJyTnnXIHiick551yB8v8fLbf0TgMzDQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZoAAAEhCAYAAABGC2bVAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAABAE0lEQVR4nO3dd3hUVfrA8e9LL1Kk996bqAHEBvYCKkV3dS1g720tgMiKygp2XZWfi9217gI2VEBQVEDRIBJ6D72E3hISkvf3x7nRyzApN5nJpLyf55knmXvOvfedk8m8c88991xRVYwxxphoKRXrAIwxxhRvlmiMMcZElSUaY4wxUWWJxhhjTFRZojHGGBNVlmiMMcZElSWaAiYig0VEfY9UEVklIk+ISIVCENt1sYyhqBORRO/v+kEW5TO88pkR3OfbIpLoe97M28fgkHoPicg6ETksIr97y1RERkYwlkQReTuHOs1C/gdCH10jFU9ORKS6iIwUkRPClM0QkRkFFUtW+xWR3l679PYtu0dEBhR0bHlVJtYBlGCXARuAKkB/YJj3+50xjGkw7j3xZgxjKA72Af1EpIqq7stcKCJNgdO98mjaDPQEVvn23R34J/A08Kkvhp6492EsjAY+D7N8eQHGUB14BNcGv4WU3VaAcWTnN9zfabFv2T3ATGBiLAIKyhJN7Pyuqiu9378RkdbA9SJyt6pmxDKwSBKR8qp6KNZxFLBvgLOBgcDbvuVXA4nAeqB0tHbutffPIYvbez9fVdXVvrqh9QrS6hjvP1uqujjnWtGnqns5+u9ZpFjXWeHxG1ARqJW5QEQqiciTIrLG62JbIyLDRaSUr04FEXleRBaKyH4R2SIiX4hIu9AdiEhzEfmPV+eQiKwWkRe9shlAL+AUXxfGDN+63UVkmrePAyIy3fuW7N/+2yKyQUR6ishsEUkGnsruRYtIfxGZ5W13r4j8IiIX+8rvEJGfRGSniOwWkZ9FpE/INsqIyONeF2SKiGwXkZkicmpIvRtFZL6vzhsiUiO7+PIoGZiASyx+VwP/AY6ajkNE6ovIu15ch0QkQUSuClPvLBH5zXsNq0Tk5jB1jug68/6Ob3vFq/zdZeG6zkTkOBH5XER2iUiy9/c5Lcx+7va6ylJEJD5cnWgQkaoi8rKIbPLaapmI3Csi4quT2d000Htf7vLeX++LSE2vTjNgjbfKa773/WCvPKsurH4i8m/vPbnL+/8rLSLdvPfdARFZJCLnhcTdTUTGe/8jyV7cT4hIxRxe7xFdZ+K6SZsCV/pifltELvV+Py7MNmaIyE9B2zpS7Iim8GgG7AF2gPvwBKYAHYDHgQXAScAIoAZwn7deeVyX2yhcl0kN3CH/zyLSTlW3eNtrDvwCHMR1FawAGgPnetu5DXgP900788Nrr7duF+B73KH7YNwH5VDgexE5SVXn+15HNeAj4BngIdyHblgicifwL1xXziBgP3CC1xb+dnkddyRQBrgImCQiF6rq116dIcC9wHDgd6AqEOe1Rea+xnht9i/gAaCh12adRORkVU3PKs48eheYLiKNVHWDiJwEtMElml7+iiJSGde+x+LabD1wFfAfEamkquO8eu2Br4B44HLc334kcAyQXfy3edsbBgzAvU/CdpeJO1fxIzAPuBH3frkFmOa101yv3vXAC7gE9jHQCvgQ917MrVLe+9xPs/tbiPuS9SXuffIP3P9FH+A5oDau/fxeAKYBVwCtgSeABsAZuHYYgOt+8nfjrSJ7L3jr/BXXFfow7r15Nq5rcqO3bKKINFXV7d56TXDvz7dxXZcdvdfQAvf3zK3+uPfBfNzfHyAJWAtswv3//tHtJyJtce+5awPsI7JU1R4F+ODPD+q2uDfnscB1wGHgDl+9q716p4esPxxIBepksf3SQCXcG/le3/J3cR/kDbKJbQYwM8zy8cBuoLpvWVVgJzDRt+xtL+ZLctEOVb0YJ+ZU17dOKa/NpgKf+ZZPym47uGSVDvwjZPkpXrz9Ivj3TcQlbPF+H+otHwvMCtfOwB1eHL1DtjUN2AaU9p6/D2wHKvvqNPbeD4khr1eBwb5lN3jLmoXsQ4GRvufTgSVAuZD31BLgU9/fYT0wOWRbf/W293YObZQZX7jH/hzW7Rv62rzlrwOHgFre895evdAYr/SWnxUSyw1Z/D/M8D3P3OabIfV+85af6lvWxVs2KIvXId57+SogA6iZi/329i1LBN4Ls92RuC+s/vfIc8AuoGKk3udBH9Z1FjtLgTTch/UbwL9V9WVf+fm4byizva6hMt63v6lAWdzRDQAi8hcRmSMiu3EJ6wDuW25b3/bOBSap6qY8xHq6t+7uzAXq+o0/J+Tbubf/SbnY5slejOOyqyQiJ4rIJBHZ6m07DTiHI1/br8CFIvJPETlVRMqFbOYc3Ifj+yFtOQd31HZ6Nvsv5V9HfN2W2VH3H/4ecLUXz19xyT6c04GNqjojZPl7uG/pHbznPYGvVPWAbz/rgVm5iSknXhdOL+B/QIavnQSX9DLbqZH3+G/IJibg/ka5NQroFvLIqfvtdNwH84chy98DyuHayC80xv9564fWC+LrkOdLgQOqOjNkGbgvAsAfXX5PisgqXFJMwx3hCu5oKxLG4b5oXuHtswKut+BdVc2ydyHaLNHETn/cP9aFuH/i20TkGl95HVw/bFrI4xevPLOf+SJc18US4G9AD2+7SYB/uHRN8j66qAaumyHUFtwRmd82zV03VE3vZ5YxiUhj3DfsGrjReCfjXttkjnxtT+C6Ay/GdfvsEJG3RCTzfFcd7+dKjm7Pqr5YwnkzpH6QEXnv4pLEI0Bl3N8pnOzaN7McoD6wNUy9cMvyogbu6GUER7fTHcCxXqKtH26/qnoYr+s3l9aqanzIY14uYtypRw8wCW2rTKExpuK+3TcMEGeoXSHPU3FH/KH7gSPfp2/huiH/hfvy0w24PUy9PPO+SH7m7Qfc6NYawL8jsf28snM0sbNQvVFnIvItkAA8LSITvG+sO3AnKv+SxfqJ3s/LgZWqOjizQETKcvQ/3Hby/s+1E6gXZnk9r8wvt/edyOy3bggszKLO+bhzPn9R1T8SkohUOmKHqmnAk8CTIlIP173yHO6b3V/588PvXI7+kIDsPxxHAv4jze1Z1DuKqi4XkTm481kT/UeEIXZy5BFapsw2z4xvM1A3TL1wy/JiN+7b/itkcfSlqhkikpkUj9ivd/STXdKOhJ1ADREp5/swh6PbKlNojOVwX442Ri/Eo3lHFpfguilf9C3vHIXdjcWdHzwRd77mR43xCDo7oikEvG9nD+C+eWeexJuMO+zeH+ZbX7z+eYKxEkd3V1zN0cNnpwJ9RaQ+WTuEG/kW6nugj4j8caLX+/0irywvZuPOGd2UTZ3MhJLm228b3LmVsFR1i6q+jjtK7OQt/gb3Adoki7Zck832EkPqJubq1f3pKeALjkxWob4HGolI6Ov6G+4czRLv+U+4LsLKmRW8o74s2yMI7wvOj8BxwG/h2sqrugF3jib0S9BAov/l9Xvc59ZlIcuvxB1ZhA4DDo3xMm/9zBFYmUdG2Y78ioDyuP/JtJDlg/O4vaz+V1HVb3Hvmedw741X87iPiLEjmkJCVT8XkV+B+0XkZdyJ32tx30yexY0wKQe0xHUR9VPVg7iE1E9EnsedGzkRuIuQQ3lc900f3DmfJ3DdSA2B81U1cxjtYlwX3l9xI2/2qeoy3Ki3vl4sT+KOWobgEsFjeXy9+0RkGPCSiEzwXu8+oCuQoqov4ZLFYeBdrw3qA48C6/B9SRKRz7z2+Q13xHI87mjo396+Vnlxv+yNwPkeSMEl8nOA11X1u7y8jly8zonkfFHd28DduFFKw3Ef5Fd6sd3s64ochfugnCoiT+PeD48Sua4zgL8DPwBTROQN3FFULdwor9KqOtQ7qnkUeF1E3sKNMmyFG9W2N8C+Wnij8UItV9XQI+VMX+MuVHxVRGoDi3DdzzcAo31fwDJ19MXYBnfR6veqOt0r34o7CrpcRBJw5zfXqGqQLsAcqeoeEfkZuM87ItyOGwSU116GxcBpItIX1224PeRL0KvAi95+JuQ58EiJ1SiEkvrgz1FnrcKUneuV3es9r4DrulmK+wazE3fieyRQxqtTCvcBtAk3FPV73AdtIiGjf3BJ6kPcm+8QsBp43ldeDzdscp8XxwxfWQ/cB/9+3D/jdKB7yPbfBjYEbI9LcSflk3EfUnOAvr7yv3ivPwX3oXK5t59EX537cN9kd3jbWea1UdmQfV3t1TvgvY4luCONRhH8+yYSZjRQSJ0ZhIzuwyXR//j+NgnAVWHWPRs39Djz73dzmPZoRh5HnXnL2uM+mLd5+9mAG/hxYUi9u3EDVlJwQ65PDfe+C/MaMuPL6nFpDutX9f5um3FHMctxw9vFV6e3t60BXvvs9t7XH+CNTPPV7Yf74E7ztxtZj/46Ozfve6/uqJDX/bUXxzbvNfTh6BFlWe3XX6cd7ujzIGFG+nnvJwWejtR7Oz8P8YIyxphiw7u48TvgHFWdFttoCp6I3Ig7om+jf85AEjPWdWaMMcWEiHTA9Vw8irvuKeZJBizRGGNMcTIWdxnAbNyQ9ELBus6MMcZElQ1vNsYYE1XWdRaiVq1a2qxZs1iHYYwxRcrcuXO3q2rtcGWWaEI0a9aM+Pj4nCsaY4z5g4iszarMus6MMcZElSUaY4wxUWWJxhhjTFRZojHGGBNVlmiMMcZElY06M8aYEu7TeRt5esoyNu1OpkH1ijxwXlv6HZ+fe8MdyRKNMcaUYJ/O28iwiQtITnN3o9i4O5lhExcARCzZWNeZMcaUYE9PWfZHksmUnJbO01OWRWwflmiMMaYE27g7OezyTVkszwvrOjPGmBIoPUN5e3YigrtDWqgG1SN3d2tLNMYYU8Is37qPB8cn8Pv63XSoX5XVSftJOZzxR3nFsqV54Ly2EdtfzLvORKS+iLwjIkkikiIii0WkVzb1R4qIZvGo49XpnUV5u4J7ZcYYU7ikHs7gxWkr6POvH1m38yAvXt6VL+86lTEDu9CwekUEaFi9IqMHdC4+o85EpDowC5iJu3d2EtACdz/trDwDvBqy7CNAVTV0vY7ATt/zpPzEa4wxRdX89bsZMiGBpVv2cfFxDXjkog7UPKY84EaXRTKxhIp119mDwGZVvca3bE12K6jqfmB/5nMRaQycBlwdpvo2Vd0eiUCNMaYoSk5N5/lpy3n9x9XUqVKB16+J4+wOdQs0hlgnmn7AZBH5GDgD2AS8Dryiub/15/XAbmBCmLJ4ESkPLAZGqep3+Y7YGGOKiJ9W7WDYxAQSdxzkiu5NGHZhO6pWKFvgceT6HI2IlBORT0Tk9AjuvwVwG7AaOA94ERgD3J7LmEoB1wHvquohX9Fm4FZgIDAAWAZMzyp2EblJROJFJD4pyXrXjDFF296UNB76ZAFXvPYzCnxwYw9GD+gckyQDILk/cAAR2QdcpKozIrJzkVQgXlVP9i17Auivqu1zsX4fYBLQSVUX5VD3K+Cwql6cXb24uDi1G58ZY4qq6Uu2MvyThWzbl8INp7Xg3rPbULFc6ajvV0TmqmpcuLKgXWezgJOAGfkNyrMZ163ltwS4O5fr3wTMzinJeOYAlweIzRhjiowd+w/x6BeL+Xz+JtrWrcKrV59I18bVYx0WEDzR3Ad8KiL7gU9xieKIQyJVzQizXlZmAaGDtdsAWd4SNJOINMCNVLshl/vqiovXGGOKDVXl8/mbePSLxexLSePes9twa++WlCsT86tX/hA00Szwfr7oPUJpwG0+D8wWkeHAx8DxwF3AQ5kVRGQ00F1VzwpZ9zrgAPDf0I2KyD1AIrAIKAdchRt4MDBAbMYYU6ht3pPMw58sZPrSbRzXuDpPDexC23pVYh3WUYImmscIP1tBnqjqryLSD3gCGAGs836O9VWrD7T0rycightt9r6qHgyz6XK4620aAsm4hNNHVb+KVOzGGBMrGRnKR7+uZ/RXS0jLyODhPu259pTmlC4lsQ4trECDAUoCGwxgjCnMErcfYOjEBH5evZOTW9ZkzIAuNKlZKdZhRXQwgDHGmBg4nJ7Bm7PW8OzU5ZQrXYoxAzrz126NcR08hVvgRCMix+O6t04HquPOn/zmDUv+QVUnRzZEY4wp2ZZu2cuQ8QnM37CHs9vXZVS/TtSrViHWYeVaoEQjIqcC03AXWH4A3OErzgBuASzRGGNMBBw6nM4r361i7HcrqVaxLC9dcTx9u9QvEkcxfkGPaMYAU3AjuEpzZKL5DbgmzDrGGGMCmrduF0MmJLB86376H9+QEX07UKNyuViHlSdBE80JwABVVREJHUWwHagdmbCMMaZkOph6mGenLufNWWuoV7UCbw3uxhnt6sQ6rHwJmmhSgKyGN9QH9uQvHGOMKblmr9zO0IkLWLfzIFed1IQh57ejSozmJ4ukoIlmJnCPiHzmW5Z5ZHM98G1EojLGmBJkT3Iao79awke/rqd5rcp8fNNJ9GhRM9ZhRUzQRDMCN23MfGA8LskMEpHngBOBbpENzxhjirepi7bw8KcL2b7/EDf3cpNgVigb/UkwC1KgRKOq872p9p8GhgOCGxDwI9BLVZdFPkRjjCl+tu8/xMjPFzEpYTPt6lXh9UFxdGlUPdZhRUXg62hU9TfgLBGpANQAdmcxDYwxxpgQqsqnv2/k0S8Wc/BQOved04ZberekbOnCMwlmpOVpZgARqQp0ws0ltkFEFqnq3ohGZowxxcym3ckM/2QB3y1L4oQm1XlyYBda1y18k2BGWl5mBvgH7nYBx+C6zgD2icjTqjoqksEZY0xxkJGhvP/LOsZ8tYQMhUcu6sA1PZsV2kkwIy3ozACP4gYEvA58BGwF6gJXAI+KSBlVHRnpII0xpqhanbSfoRMW8EviTk5tVYvRAzrTuEbsJ8EsSEGPaG4EnlXVB3zLFgHfisge3B0vR0YoNmOMKbIOp2fw+sw1PP/NcsqXKcVTl3bhshMbFbnpYyIhaKKphpuCJpzJwK35C8cYY4q+xZv28uCE+SzcuJfzOtbl8Us6Uadq0ZkEM9KCJpo5uGtlpoUp6+aVG2NMiXTocDovf7uS/5uxiuqVyjL2yhO4oFO9EnkU45djohER/5i7u4BPROQw8D/+PEfzF9ytlS+JRpDGGFPYzV27kyETFrBy234GntCIEX3bU71S0ZwEM9Jyc0RzmCNv3yy4WZzHhNQTICGX2zTGmGLhwKHDPD1lGe/8lEiDahV557ru9Gpj8wv75SYpPMaRicYYYwzw44okhk1cwIZdyQzq2ZQHzm/HMeXtu3aoHFvEhisbY8yR9hxMY9SXi/nf3A20qF2Z/93Sk27NasQ6rEIrrzMDNAYaA0cNo1BVm8HZGFNsTV64hRGfLWTngVRu692Su85qXewmwYy0oBdstgDeB7pnLvJ+qve74u68aYwxxcq2fSmM/HwRXy3YQof6VXlrcDc6NawW67CKhKBHNK8DTYB7gKVAaqQDMsaYwkRVmfDbRh6ftJjktHQeOK8tN53eolhPghlpQRNNN2Cwqk6IRjDGGFOYbNh1kIc+WcgPy5OIa3osYwZ2oVWdY2IdVpETNCVvIMJHMSJSX0TeEZEkEUkRkcUi0iub+s1ERMM8zg+p10tE5nrbXC0it0QybmNM8ZWRobwzO5Fzn/+B+MSdPHpxR/57c09LMnkU9IjmCWCIiHyrqgfyu3MRqY67Y+dMoA+QBLQAtuVi9fNxd/rMtNO33ebAV8CbwFXAqcBYEUmyozFjTHZWJe1nyPgE4tfu4vQ2tXmifycaHVuyJsGMtKB32PyPiLQDEkXkZ2DX0VV0UIBNPghsVtVrfMvW5HLdHaq6JYuyW4BNqnqn93yJiPQA7gcs0RhjjpKWnsG4H1bz4vQVVCxbmmcuO46BJzQs8dPHRELQUWeDgWFAOnACR3ejBb2wsx8wWUQ+Bs4ANuEGHLyiqjlta6J3l88VwPOqOt5X1hOYGlJ/CjBIRMqqalrAOI0xxdjCjXt4cHwCizfv5cLO9Rh5cUfqVCm5k2BGWtCus0eBT4DrVXV3BPbfArgNeB43pU1X4CWv7OUs1tmPOzKZhZse52LgYxEZpKrveXXqcfTEn1txr7cWsNlfICI34W5xQJMmTfL+aowxRUpKWjovTl/BuB9WU6NyOV696gTO71Q/1mEVO0ETTU1gbISSDLjBCPGqOsx7Pk9EWgO3k0WiUdXtwLO+RfEiUgvXDfeev2rIqpLFclR1HDAOIC4uzqbbMaYE+DVxJ0PGJ7B6+wEuO7ERD/fpQLVKZWMdVrEUdNTZTKB9BPe/GVgcsmwJ7lqdIOYArX3Pt+COavzq4I6AdgTctjGmGNl/6DD/+Gwhl736E6npGfzn+u48fdlxlmSiKOgRzd3Af0VkF+5GZ6GDAVDVjADbmwW0DVnWBlgbMK6uHNkd9hPu/I/fObijJzs/Y0wJ9f3yJB6auIBNe5IZfHIzHjivLZVtEsyoC9rCS7yf72ZRrgG3+TwwW0SGAx8Dx+PuefNQZgURGQ10V9WzvOeDgDRgHpABXITrahvi2+6rwB0i8gLwb+AUYDBwRYDYjDHFxK4DqTz+5WIm/raRlrUrM/6WnpzY1CbBLChBE01Ebxmgqr+KSD/c9TkjgHXez7G+avWBliGrPgw0xY1+Ww5c5xsIgKquEZELcYnsVtxotrvsGhpjShZV5euFW/jHZwvZfTCNO89sxe1ntLJJMAuY5DyKuGSJi4vT+Pj4WIdhjMmnbXtTGPHZQqYs2krnhtV4cmAXOjSoGuuwii0RmauqceHKrHPSGFOsqCr/m7uBUZMWc+hwBkMvaMcNpzanjE2CGTOWaIwxxcb6nQcZNnEBM1dup3uzGowZ2JkWtW1+slgLOjNATjc108yT9sYYU1DSM5R3f0rkqcnLKCXweL9OXNm9CaVK2fQxhUHQI5pSHD0YoCZuiHIS7sS8McYUmBVb9zFkQgK/rdtN77a1+Wf/zjSsXjHWYRmfoJNq9g63XERaAp/iRo8ZY0zUpaVn8OqMVbz07Uoqly/NC3/tyiVdG9gkmIVQRM7RqOoqERkDPI27FsYYY6JmwYY9PDB+Pku37KNvl/qMvLgjtY4pH+uwTBYiORggCXdVvzHGREVKWjrPT1vOaz+sptYx5Rl39Ymc2zF0tilT2EQk0YhIDeDvwKpIbM8YY0LNWb2DoRMXsGb7AS7v1phhF7anWkWbn6woCDrqbA1HDwYoB9T1fh8YiaCMMSbTvpQ0npy8lPd+XkfjGhV5/4YenNKqVqzDMgEEPaL5nqMTTQpuEsz/qaod0RhjIua7pdt46JMFbN2bwg2nNufv57ahUjm7/K+oCTrqbHCU4jDGmD/sPJDKY18s4tPfN9G6zjGMvfVkjm9ybKzDMnlkXw2MMYWGqjIpYTMjP1/EnuQ07j6rNbed0ZLyZWwSzKIscKLxpum/AndzstCbaquqhs60bIwxOdq6N4Xhnyxk2pKtdGlUjfdv7EG7ejYJZnEQdDDACOBRYCHwO3AoCjEZY0oQVeXjX9fzz6+WkHo4g+EXtufaU5rZJJjFSNAjmuuBF1X13mgEY4wpWdbuOMCwiQuYvWoHPZrX4MmBXWhWq3KswzIRFjTR1AS+iEYgxpiSIz1DeWvWGp6ZuoyypUrxRP/OXN6tsU2CWUzlZXjzcUBOszgbY0xYy7bs48EJCcxfv5uz2tVhVP9O1K9mk2AWZ0ETzT3ARBHZAXwF7AytoKoZEYjLGFPMpB7OYOyMlbzy3UqqVCjLi5d35eLjbBLMkiBoosm8DcBbWZRrHrZpjCnm5q/fzYPjE1i2dR+XdG3AP/p2oKZNglliBE0Kj3H0zADGGBNWcmo6z32zjDdmrqFOlQq8fk0cZ3eom/OKplgJOjPAyCjFYYwpZmav2s6wiQtYu+Mgf+vRhKEXtKNqBZsEsySybi5jTETtTUlj9FdL+fCXdTStWYkPbzyJni1rxjosE0OWaIwxETNt8VaGf7qApH2HuOn0Ftx7dhsqlrPpY0o6SzTGmHzbsf8Qj36xmM/nb6JdvSqMuzqO4xpXj3VYppCI+RwPIlJfRN4RkSQRSRGRxSLSK5v6vUXkMxHZLCIHRSRBRK4LU0fDPNpF/xUZU3KoKp/9vpGzn/uerxdu5t6z2/D5HadakjFHiOkRjYhUB2YBM4E+uNtBtwC2ZbPaycAC4ClgM3AeME5EUlT1g5C6HTnyWp+kyERujNm8J5mHP1nI9KXb6Nq4Ok9d2oU2davEOixTCMW66+xBYLOqXuNbtia7FVT1iZBF/yciZ+Du7hmaaLap6vb8h2mMyZSRoXz46zpGf7WU9AxlRN8ODD65GaVt+hiThVjfJqAfMFlEPgbOADYBrwOvqGqQ63WqAhvCLI8XkfLAYmCUqn4XbmURuQm4CaBJkyYBdmtMybJm+wGGTkhgzpqdnNKqJqP7d6FJzUqxDssUcrG+TUAL4DbgeWAM0BV4ySt7OZcx9QXOAk7xLd4M3Ar8CpQDrgami0hvVf0hdBuqOg4YBxAXF2cXpBoT4nB6Bm/OWsOzU5dTrkwpnhzYmb/ENbbpY0yuxPo2AaWAeFUd5j2fJyKtgdvJRaIRkVNw3WV3qeovmctVdRmwzFf1JxFpBtwPHJVojDFZW7J5L0MmJJCwYQ/ndKjLqH6dqFs1tDPDmKzF+jYBm3HdWn5LgLtzWlFETsVN7PkPVf2/XOxrDnB54AiNKaEOHU7nle9WMfa7lVSrWJaX/3Y8fTrXt6MYE1isbxMwC2gbsqwNsDa7lUTkdOBLYKSqvpDLfXXFJTZjTA5+W7eLIeMTWLFtP/2Pb8g/+nbg2MrlYh2WKaJifZuA54HZIjIc+Bg4HrgLeCizgoiMBrqr6lne8964JDMWeF9E6nlV01U1yatzD5AILMKdo7kKN/BgYIDYjClxDqYe5pkpy3lr9hrqV63AW4O7cUa7OrEOyxRxMb1NgKr+KiL9gCeAEcA67+dYX7X6gH8k22CgEu58y/2+5WuBZt7v5YBngIZAMi7h9FHVr3IbmzElzayV2xk6MYH1O5O5+qSmPHh+W6rYJJgmAiTIKGIRGUkOtwlQ1UfzGVNMxcXFaXx8fKzDMKbA7ElO44kvl/Bx/Hqa16rMmAGd6dHCJsE0wYjIXFWNC1dmtwkwpgSbumgLD3+6kB0HUrmlV0vuObs1FcraJJgmsmI9M4AxJgaS9h1i5BeL+DJhM+3rV+WNQd3o3KharMMyxVReZgaoD9wH9AJqADuAGcBzqrolotEZYyJKVflk3kYem7SYg4fSuf/cNtzcqyVlS8d8fl1TjAWdGaAN8CNwLG5o8kqgHu66l2tE5DRVXRHxKI0x+bZxdzLDP1nAjGVJnNDETYLZqo5NgmmiL+gRzZPAXqCHqiZmLhSRpsBUr3xAxKIzxuRbRoby/py1jPl6KQqMvKgDV/e0STBNwQmaaM4AbvEnGQBVXeuNSBsbbiVjTGysTtrP0AkL+CVxJ6e1rsUT/TvTuIZNgmkKVtBEUw7Yl0XZPq/cGBNjh9MzeO3HNTw/bTkVypTi6Uu7cOmJjWz6GBMTQRPN78CdIvK1fwYAce/e27xyY0wMLdq0hyETEli4cS/ndazL45d0oo5NgmliKGiieQyYBCzx7iGzGTcY4DKgNe4umcaYGEhJS+elb1fw6verObZSOf7vyhO4oHP9WIdlTOALNid7938ZBQwHBDdTwFygr6pOjXyIxpiczF27kwfHJ7Aq6QADT2jEiL7tqV7JerJN4RD4OhpVnYy7K2Yl3DDnXap6MOKRGWNydODQYZ6esox3fkqkQbWKvHNdd3q1qR3rsIw5Qp5nBvCSiyUYY2Lkh+VJDJu4gE17krnmpKY8cH47jilvk32YwifHd6WIpAM9VfUXEckg+0k1VVXtnW5MFO05mMbjXy5m/NwNtKhdmf/e3JNuzWrEOixjspSbpPAYsMH3e+6nezbGRNTkhZsZ8dkidh5I5bbeLbnrLJsE0xR+OSYa/7T/NnuzMbGxbV8Kj3y2iK8XbqFjg6q8NbgbnRraJJimaAg619lqoL+qzg9T1gn4XFVbRCo4Y0o6VWX83A2M+nIJyWnpPHh+W248rYVNgmmKlKDnU5oB5bMoqwA0zVc0xpg/rN95kIc+WcCPK7bTrdmxjBnYhZa1j4l1WMYElpcT91mdo4kDduc9FGMMuEkw3/0pkaemLEOAxy7pyFU9mlLKJsE0RVRuRp3dC9zrPVXgCxFJDalWEXdvmo8iG54xJcvKbfsZOiGB+LW7OL1NbZ7o34lGx9okmKZoy80RzWpguvf7ICAeSAqpcwhYDLweudCMKTnS0jMY98NqXpy2gorlSvPsZccx4ISGNgmmKRZyM+rsM+AzIPNN/5iqrolyXMaUGAs37uHB8Qks3ryXPp3rM/LijtSuktWpUGOKnqBznV0brUCMKWlS0tJ5cfoKxv2wmhqVy/HqVSdyfqd6sQ7LmIizq/iNiYFfE3cyZHwCq7cf4C9xjRh+YQeqVSob67CMiQqbgsaYArT/0GGemryUd39aS6NjK/Le9T04tXWtWIdlTFTFfAoaEakPjAEuBKrgBh/cqqrfZ7NOZ+BloDuwE/g38Liqqq9OL+A5oCOwCXhKVV+NZOzGBPHdsm0Mn7iAzXtTuPaUZtx/blsq2ySYpgSI6RQ0IlIdmAXMxN00LQloAWzLZp2qwDfAD0A3oC3wNnAAeNar0xz4CngTuAo4FRgrIkmqOiGSr8GYnOw6kMrjkxYzcd5GWtU5hvG3nMyJTY+NdVjGFJhYf516ENisqtf4luU0ou1KoBIwSFWTgYUi0h74u4g85x3V3AJsUtU7vXWWiEgP4H7AEo0pEKrKVwu28MjnC9l9MI27zmzF7We2onwZmwTTlCy5OUfzbZANquqZAar3w91E7WPgDFwX1+vAK/5usBA9gR+9JJNpCvA4boqcNV6d0Lt9TgEGiUhZVU3zF4jITcBNAE2aNAkQvjHhbdubwsOfLmTq4q10bliNd6/rQYcGVWMdljExkZsjmlIceV6mLVAPSAS2AnVxH/CbgWUB998CuA14Hneepivwklf2chbr1OPPc0aZtvrK1ng/p4WpUwao5cX6B1UdB4wDiIuLs9sgmDxTVf4Xv4HHv1xM6uEMhl3QjutPbU4ZmwTTlGC5OUfTO/N3EekHvAicpKq/+Jb3AD72yoIoBcSr6jDv+TwRaQ3cTtaJBo4ekCBhluemjjERs26HmwRz5srtdG9egzEDOtPCJsE0JvA5mseBEf4kA6Cqc0RkJDAKbxaBXNqMm7rGbwlwdzbrbMEdsfjV8X5uzaHOYWBHgPiMyVF6hvL27ESembKM0qWEUf068bfuTWwSTGM8QRNNa46e5yzTNqBVwO3NwnXF+bUB1mazzk/AkyJSQVVTvGXn4M7vJPrq9AtZ7xzc0VMaxkTIiq37eHBCAvPW7eaMtrX5Z//ONKheMdZhGVOoBO04XgPcnEXZzfz5QZ9bzwMnichwEWklIpcBdwGvZFYQkdEiMt23zgfAQeBtEekkIgOAocBzvgEErwKNROQFEWkvIjcAg4FnAsZnTFiphzP41/QV9PnXTBK3H+CFv3blzcHdLMkYE0bQI5pHgfdFZCEwnj8HA1wKtMMNPc41Vf3VO+/zBDACWOf9HOurVh9o6Vtnj4icg0tG8cAu3PUzz/nqrBGRC3GJ7Fbc0c5ddg2NiYSEDbt5cHwCS7fs46LjGvDIRR2odYxNgmlMViTrUcRZrCByNi7hxAFlgTTgV+ARVZ2e3bpFQVxcnMbHx8c6DFMIpaSl8/w3y3ntx9XUrlKeUf06c06HurEOy5hCQUTmqmpcuLLAF2yq6jRgmoiUwg0V3q6qGfmM0ZhC7efVOxg6IYHEHQe5ontjhl7QnmoVbRJMY3IjzzMDeMkly6lijCkO9qWkMebrpbw/Zx1NalTigxt6cHIrmwTTmCACJxoRGQRcATQBKoQUq6q2PHotY4qeb5duZfgnC9m6N4UbTm3O389tQ6VysZ61yZiiJ9B/jYiMwJ2fWQj8jruFszHFys4DqTz2xSI+/X0Tbeoew9grT+b4JjYJpjF5FfTr2fXAi6p6bzSCMSaWVJUvEjYz8vNF7EtJ4+6zWnP7Ga0oV8amjzEmP4ImmprAF9EIxJhY2rLHTYI5bclWjmtUjScv7UG7ejYJpjGREDTRfA8cBwSa0dmYwkpV+ejX9Tzx5RLSMjIYfmF7rju1OaVt+hhjIiZoorkHmCgiO3A3FtsZWsGGOpuiYu2OAwydsICfVu/gpBY1GDOgC81qVY51WMYUO0ETzXLv51tZlGsetmlMgUrPUN6atYZnpi6jbKlSjB7Qmb/GNbZJMI2JkqBJ4TFsmn1ThC3b4ibBnL9+N2e3r8Oofp2pVy10lL4xJpICJRpVHRmlOIyJqtTDGYydsZJXvltJlQpl+dcVx3NRl/qI2FGMMdFm3Vym2Pt9/W6GjE9g2dZ9XNK1AY9c1JEalcvFOixjSoy8zAxQH7gP6AXUwN1IbAZumv4tEY3OmHxITk3n2anLeHPWGupUqcAbg+I4q71NgmlMQQs6M0Ab4EfgWNxNy1bi7mR5N3CNiJymqisiHqUxAc1etZ2hExawbudB/tajCUMvaEfVCjYJpjGxEPSI5klgL9BDVRMzF4pIU2CqVz4gYtEZE9DelDRGf7WED39ZT7OalfjwxpPo2bJmrMMypkQLmmjOAG7xJxkAVV0rIiM58oZlxhSoaYu3MvzTBSTtO8TNp7fgnrPbULFc6ViHZUyJFzTRlAP2ZVG2zys3pkDt2H+IkV8s5ov5m2hXrwqvXRNHl0bVYx2WMcYTNNH8DtwpIl/7ZwAQN0b0Nq/cmAKhqnz2+yYe/WIR+w8d5u/ntOGWXi1tEkxjCpm8XLA5CVgiIh8Dm3GDAS4DWgN9IhueMeFt2p3Mw58u5Nul2+jauDpPXdqFNnWrxDosY0wYQS/YnCwifYFRwHBAcDMFzAX6qurUyIdozJ8yMpQPflnHmK+Xkp6hjOjbgcEnN7NJMI0pxAJfR6Oqk4HJIlIJN8x5l6oejHhkxoRYs/0AQyckMGfNTk5pVZPR/bvQpGalWIdljMlBjolGRErhusTWqOrCzOVecjno1ekMNFNVu1eNibjD6Rm8MXMNz32znHJlSvHUwC5cFtfIpo8xpojIzRHNVbhhy52zqbMP+FBEblTVDyMSmTHA4k17GTIhgQUb93BOh7qM6teJulVtEkxjipLcJpq3VHVNVhVUNVFE3gAGAZZoTL4dOpzOy9+u5P9mrKJ6pbK88rcTuLBzPTuKMaYIys040BNwV/3nZBoQF2TnIjJSRDTkkeV8aVnUz3zU8er0zqK8XZDYTOzMXbuLPv+ayUvfruTi4xrwzb296GMzLRtTZOXmiKYKsCsX9XZ5dYNaBvT2PU/Ppu4zwKshyz4CVFW3hSzvyJF3AE3KQ2ymAB1MPczTU5bx9uxE6letwFvXduOMtnViHZYxJp9yk2i2A02BmTnUa+LVDepwbmd9VtX9wP7M5yLSGDgNuDpM9W2qmpd4TAzMXLGdoRMT2LArmWt6NuXB89txTHm7i4UxxUFuus5m4s695GQwOSejcFqIyEYRWSMiH4lIiwDrXg/sBiaEKYsXkc0iMl1EzshuIyJyk4jEi0h8UpId+BSkPclpPDh+Ple9MYeypUvx35t78tglnSzJGFOM5Oa/+QVgpog8DwxR1VR/oYiUxXVpnQmcGnD/c3AJailQB3gYmC0iHVV1R3YresOurwPeVdVDvqLNwK3Ar7i5164GpotIb1X9Idy2VHUcMA4gLi7OblVdQKYs2sKITxey40Aqt/Zuyd1ntaZCWZsE05jiJsdEo6o/ich9wLPAlSIyFVjrFTcFzgFqAvep6s9Bdq6qX/ufi8jPwGrcEdRzOax+AdAYeD1km8tw530y/SQizYD7gbCJxhSspH2HGPn5Ir5csJn29avyxqBudG5ULdZhGWOiJFf9E6r6goj8BgwF+gMVvaJk3N01x6jqj/kNRlX3i8gi3LxpObkJmK2qi3JRdw5web6CM/mmqkz8bSOPTVpMcmo6D5zXlptOb0HZ0jYJpjHFWa47wr1upx+8Lqta3uIdqprdKLFARKQC0A74Lod6DXCzFdyQy013xXWpmRjZuDuZhyYu4PvlSZzY9FieHNiZVnVsEkxjSoK8zHWWAYQOJc4TEXkG+AJYhztHMwKoDLzjlY8GuqvqWSGrXgccAP4bZpv3AInAItw5mquAfsDASMRsgsnIUN6bs5Ynv16KAiMv6sA1PZtRyibBNKbEiPXQnka4mQRq4a5z+Rk4SVUzzwHVB1r6V/DufXM98H4Wk3mWww1OaIjr2lsE9FHVr6LyCkyWViXtZ+iEBH5N3MVprWvxRP/ONK5hk2AaU9KIqg2y8ouLi9P4+PhYh1GkpaVn8NqPq3lh2goqlCnFiL4duPREmwTTmOJMROaqatjZYWJ9RGOKmYUb9zBkQgKLNu3l/I71eKxfR+pUsUkwjSnJLNGYiEhJS+elb1fw6verObZSOf7vyhO4oHP9WIdljCkELNGYfItP3MmDExJYnXSAS09sxMN92lO9UrlYh2WMKSTynWhE5ATcPGcA61T1t/xu0xQNBw65STDf+SmRBtUq8u513Tm9Te1Yh2WMKWTynGhE5ETgfUBxw5MFaOqd8P2bqs6NSISmUPp+eRIPTVzApj3JDOrZjAfOa0tlm5/MGBNGfj4ZxgF3qOo0/0IROQd4DXcfG1PM7D6YyuOTljDhtw20qF2Z/93ck7hmNWIdljGmEMtPojkmNMkAqOo3IvJyPrZrCqmvF2xmxGeL2HUwldvPaMmdZ9okmMaYnOUn0WwTkWtxsyenA4hIaeBa8nZfGlNIbdubwj8+W8TkRVvo2KAq71zXjY4NbBJMY0zu5CfRDMLd7fIFEdnkLWsA/ELu7l9jCjlVZfzcDTw+aTEphzMYcn47bjytOWVsEkxjTAB5TjSquho4V0RqceSoMzuaKQbW7zzIQ58s4McV2+nW7FjGDOxCy9rHxDosY0wRlO9hQl5iOSK5iEgdVY3IxJumYKVnKO/+lMjTU5YhwOOXdOTKHk1tEkxjTJ5FazxqPH8e5ZgiYuW2fQyZsIC5a3fRq01t/tm/E42OtUkwjTH5k5/raC7OptgmtypC0tIz+Pf3q/jX9JVUKl+a5/5yHP2Pb2iTYBpjIiI/RzSfAN/jLtQMZXe0KiIWbtzDA+MTWLJ5L3261GfkRR2pXaV8rMMyxhQj+Uk0K4DrVXVNaIGIrM/Hdk0BSElL54VpK3jtx9XUqFyOf199Iud1rBfrsIwxxVB+Es07uBuWHZVocMOeTSH1y5qdDJ2QwOrtB/hrXGMeurA91SqVjXVYxphiKj/Dm0dnU/bPvG7XRM++lDSemryM//y8lkbHVuS963twautasQ7LGFPM2SyIJcR3y7YxfOICNu9N4bpTmnP/eW2oVM7+/MaY6Av0SSMi32ZTnAHsAeYCb6jq1vwEZiJj14FUHp+0mInzNtKqzjGMv+VkTmx6bKzDMsaUIEG/0grQBqiPOzezFagLNAc2e88vBO4VkV6qujiCsZoAVJUvF2zmkc8WsSc5jbvObMXtZ7aifBmbBNMYU7CCTlr1HJACnKiqLVX1ZFVtCXTzlj8KtAaSADtPEyNb96Zw83/mcscH82hQvSJf3Hkqfz+3rSUZY0xMBD2iGQWMVNV5/oWqOldEHgVGqWpnEXkaeCZSQZrcUVX+G7+eUV8uIfVwBsMuaMf1p9okmMaY2AqaaNqQ9S0AkoBW3u+rgMp5DcoEt27HQYZOTGD2qh10b16DJwd2oXkt+xMYY2IvaKJJBG4Avg5TdpNXDu76mh15jsrkWnqG8vbsRJ6ZsozSpYRR/Trxt+5NbBJMY0yhETTRPAa8JyIJwARgG1AHGAh0Av7m1TsbmJPTxkRkJPBIyOKtqhr2EnURaUb4C0QvUNXJvnq9cOeTOgKbgKdUtdhdRLp86z4eHJ/A7+t3c0bb2vyzf2caVK8Y67CMMeYIgRKNqn4oIttxJ/0fAsoCabjZms/13dr570B6Lje7DOjte56b9c4H5vue78z8RUSaA18BbwJXAacCY0UkSVUn5DKmQi31cAavfr+Kl75dwTHly/Di5V25+LgGNgmmMaZQCnzFnqp+A3wjIqVwXWTbVTUjpE5KgE0eVtUtAcPYkc06twCbVPVO7/kSEekB3I87CivS5q/fzZAJCSzdso+LjmvAyIs6UPMYmwTTGFN45WcKmgxc11l+tRCRjUAqrrvtIe/undmZKCIVcBN7Pq+q431lPYGpIfWnAINEpKyqpoVuTERuwp1jokmTwnkbneTUdJ6ftpzXf1xN7Srlee2aOM7pUDfWYRljTI4CJxoRqQ/cB/QCauBO+s8AnsvDkckcYDCwFHeu52Fgtoh0VNVwgwn2445MZgGHgYuBj0VkkKq+59WpB0wLWW8r7rXWwl1YegRVHQeMA4iLi9OAryHqflq1g2ETE0jccZArujdm2IXtqVrBJsE0xhQNQaegaQP8CByL+7Bfiftgvxu4RkROU9UVud2eqh4xek1EfgZWA4NwJ/ND628HnvUtiheRWsCDwHv+qqGhZ7G8UNubksaYr5fywZx1NKlRiQ9u6MHJrWwSTGNM0RL0iOZJYC/QQ1UTMxeKSFNcd9WTwIC8BqOq+0VkEW52gdyaA1zre74Fl/z86uCOgIrMkOtvl27loYkL2bYvhRtPa87fz2lLxXJ2Zb8xpugJmmjOAG7xJxkAVV3rDVUem59gvPMu7YDvAqzWlSO7w34C+oXUOQeID3d+prDZsf8Qj01azGe/b6Jt3Sq8evWJdG1cPdZhGWNMngVNNOWAfVmU7fPKc01EngG+ANbhjjpG4GYUeMcrHw10V9WzvOeDcMOp5+Fmi74IuB0Y4tvsq8AdIvIC8G/gFNx5oCuCxFbQVJXP52/i0S8Wsy8ljXvObs1tvVtRroxNH2OMKdqCJprfgTtF5Gv/kGZxF3Dc5pUH0Qj4EHeSPgn4GThJVdd65fWBliHrPAw0xV1vsxy4zjcQAFVdIyIXAs8Dt+Iu2LyrMF9Ds3lPMg9/spDpS7dxXOPqPDWwC23rVYl1WMYYExGimvvz4yJyPjAJN5fZx7guq3rAZbjzKn1UNXRocZESFxen8fHxBbKvjAzlo1/XM/qrJaRlZHD/uW259pTmlLbpY4wxRYyIzFXVuHBlQWcGmCwifXGzOA/HjeZS3M3O+hb1JFOQErcfYOjEBH5evZOeLWoyZmBnmta0STCNMcVPXmYGmAxMFpFKuGHOu1T1YMQjK6bSM5Q3Z67h2W+WUbZUKUYP6Mzl3Rrb9DHGmGIrPzMDHAQswQSwdMtehoxPYP6GPZzdvg6j+nWmXrUKsQ7LGGOiKsdEIyIZ5P5CR1XVPCev4urQ4XRe+W4VY79bSbWKZXnpiuPp26W+HcUYY0qE3CSFxyhiV9QXJvPW7WLIhASWb91Pv64N+MdFHalROdAocGOMKdJyTDSqOrIA4ih2DqYe5tmpy3lz1hrqVa3Am4PjOLOdTYJpjCl5rJsrQj6dt5Gnpyxj0+5kalYuhwI7DqRyZY8mDL2gHVVsEkxjTAlliSYCPp23kWETF5Cc5u7Ztv1AKgLccUYr7j+vbWyDM8aYGLP5TSLg6SnL/kgymRT4ZN7G2ARkjDGFiCWaCNi0OznQcmOMKUks0URAg+oVAy03xpiSxBJNBDxwXlsqlj3yXjEVy5bmATs/Y4wxNhggEvod3xDgj1FnDapX5IHz2v6x3BhjSjJLNBHS7/iGlliMMSYM6zozxhgTVZZojDHGRJUlGmOMMVFlicYYY0xUWaIxxhgTVaJqdwDwE5EkYG0+NlEL2B6hcEoCa69grL2CsfYKJj/t1VRVa4crsEQTYSISr6pxsY6jqLD2CsbaKxhrr2Ci1V7WdWaMMSaqLNEYY4yJKks0kTcu1gEUMdZewVh7BWPtFUxU2svO0RhjjIkqO6IxxhgTVZZojDHGRJUlGmOMMVFliSYbInK7iCSIyF7v8ZOI9PGVi4iMFJFNIpIsIjNEpGPINsqLyEsisl1EDojI5yLSqOBfTcETkYdEREXkZd8yazOP1w4a8tjiK7e2CkNE6ovIOyKSJCIpIrJYRHr5yq3dPCKSGOY9piLypVdeIG1liSZ7G4AhwAlAHPAt8KmIdPHKHwTuA+4EugHbgG9EpIpvGy8AA4ErgNOAqsAkETnylpzFjIicBNwIJIQUWZsdaRlQ3/fo7CuztgohItWBWYAAfYD2uPbZ5qtm7fanbhz5/joBUOC/XnnBtJWq2iPAA9gJ3Ix7o28GhvvKKgL7gJu959WAVOBKX53GQAZwXqxfSxTbqBqwCjgTmAG87C23NjuynUYCC7Mos7YK3y5PALOyKbd2y779hgO7gUoF2VZ2RJNLIlJaRC4HjgFmA82BesDUzDqqmgz8AJzsLToRKBtSZz2wxFenOBoHjFfVb0OWW5sdrYWIbBSRNSLykYi08JZbW4XXD5gjIh+LyDYR+V1E7hAR8cqt3bLgtdH1wHuqepACbCtLNDkQkc4ish84BLwK9FfVBbg/EMDWkFW2+srqAekcPUmdv06xIiI3Aq2AEWGKrc2ONAcYDFyA62asB8wWkZpYW2WlBXAbsBo4D3gRGAPc7pVbu2XtHFxyed17XmBtVSZQmCXTMqArUB3XT/mOiPT2lYde8SphloXKTZ0iR0Ta4ro2TlPV1GyqWpsBqvq1/7mI/Iz7AB0E/JxZLWS1EtlWPqWAeFUd5j2fJyKtcYnmZV89a7ej3Qj8qqq/hyyPelvZEU0OVDVVVVeqauab+3fgXiBzdFBoVq/Dn98QtgClcVNvZ1WnOOmJe60LReSwiBwGegG3eb/v8OpZm4WhqvuBRUBr7P2Vlc3A4pBlS4Am3u/WbmGISB3gEuA13+ICaytLNMGVAsoDa3B/hHMyC0SkAm5Uxmxv0VwgLaROI9xImcw6xcmnuFFTXX2PeOAj7/flWJtlyWuLdrgPU3t/hTcLaBuyrA1/3kPK2i28wbju/498ywqurWI9CqIwP3B9v6cBzXAfoKNxoy0u8MqHAHuBAUAn74+4Caji28b/ARuBs4Hjge9wR0WlY/36CqgNZ+CNOrM2O6ptnsEd8TUHegCTvLZpam2VZZt18z74huPOBV4G7AFut/dYlm0muC95r4UpK5C2inkjFOYH8Dbum9Ih3PjyafiG9Hl/wJG4b6ApwPdAp5BtVABewnUbHQS+ABrH+rUVYBuGJhprsz9fZ+Y/dar3jzwB6GBtlWO79QHme22yHLgLb4Jga7ew7XUG7nxK9zBlBdJWNnuzMcaYqLJzNMYYY6LKEo0xxpioskRjjDEmqizRGGOMiSpLNMYYY6LKEo0xxpioskRjTJRlceOp0EdiNuu/7as3w7e8mbfshpD6tURknojsFJFu3rJpvm28F63Xakw4NqmmMdHXM+T5J7gLDkf6lh3KYRtbgP64q7izJCJ1gelAXeBM/XMCxTtx9xb5JFcRGxNBlmiMiTJV/dn/XEQOAdtDl+fgUE71RaQhLslUA3qp6h+TT6rqEt++jSlQlmiMKQZEpAnuVuPlcUlmeYxDMuYPlmiMKfpa4O6KqMDpqromxvEYcwRLNMYUfcNwd0HsaEnGFEY26syYom8ybhbeF0WkfKyDMSaUJRpjir4JwHXAucD/RKRsjOMx5giWaIwpBlT1HeAW4CLgAxEpHeOQjPmDJRpjiglVHQfcDVwKvCsi9v9tCgUbDGBMMaKq//LO0zwFHBKR69XubmhizBKNMcWMqj7tJZvHcbfnvS3GIZkSzhKNMQVMVZvlZT0RKeNW13RvO4m40Wbh9jEKGOVbtxTWVW5ixN54xhQNTYE03BQzeTHVW79pxCIyJpfEum+NKdxEpBlQy3u6T1WX5WEbbYCq3tMddmGnKUiWaIwxxkSVdZ0ZY4yJKks0xhhjosoSjTHGmKiyRGOMMSaqLNEYY4yJqv8HBVNCxaYvzDsAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "## draw 1D sensitivity curve \n", + "# this problem has three degrees of freedom, to draw 1D curve it needs to fix two dimensions\n", + "fixed = {\"'CA0[0]'\": 1.0, \"('T[0.125]','T[0.25]','T[0.375]','T[0.5]','T[0.625]','T[0.75]','T[0.875]','T[1]')\": 300}\n", + "\n", + "all_fim.figure_drawing(fixed, ['T[0]'], 'Reactor case','T [K]','$C_{A0}$ [M]' )" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Draw 2D sensitivity curve" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZAAAAElCAYAAADKuLQKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAA3CElEQVR4nO3de/zec/3H8cdzThPKYTFyPpYklZSIFVsOHcgpykwH1X7IIWmkRklF03KoSBahgwoThhXCsCkVIbWDTNicxwzz+v3xfl98drkOn+931/f6bh/P++123a5dn8/78/68r8/13fW63sePIgIzM7OeGtDfBTAzs8WTA4iZmfWKA4iZmfWKA4iZmfWKA4iZmfWKA4iZmfWKA0gPSBohKQqP5yX9R9K3JQ1cBMr26f4sQ1VIWkvS/Pz5DurHcoyW9MEG28dJmt4P5VngvJLWzf8PRhS2+e/wNcQBpHf2ArYGdgUmAKOAk/u1RDAC8H/czhhO+r+xFLBvP5bjG8CrAgjwTWD3Lpelkf+R/h/8obBtBP47fM1Ysr8LsJi6IyL+nf99jaSNgM9I+lJEvNSfBeskSctExLz+Lkc/GA7cCbweOAA4rX+Ls6CI+E9/lwEg/23c0t/lsP7jGkhn/AVYFni5uUPS6yR9V9K03BQyTdKxkgYU0gyUdKqkOyXNkfSQpPGS3lx/AknrSTo/p5knaaqksXnfdcD2wDaF5rXrCsduJenafI5nJE2UtFVd/uMkPSBpa0k3S5oLfK/Vm5a0u6Sbcr5PSbpN0kcL+w+WNEnSY5KekHSLpF3r8lhS0jdzU+BzkmZLulHStnXpPifpb4U050hauVX5ekPS1sDGwHnA+cC7JL215LFLSfqWpOn5M5+eXy9VSFNr9hkpaYykRyQ9K+lySesW0tWWiDi28JmOzvuaNSV9QdJJ+W/kaUm/yH+HG0qakD+nf0s6oK7cG+a/rWmS5ua/rR9JWqnN+12gCavZ36Gkd+V/f6xBHrW/uyXKXGNbtLgG0hnrAk8Cj0L6UiQ1bW1Kam74B/Be4DhgZeDIfNwywArAt0jNASsDI4FbJL05Ih7K+a0H3AY8S2rWuA9YCxiW8xkJ/AJYAvh83vZUPnZz4Hrgn6TmhQC+Clwv6b0R8bfC+3gD8EvgFOAYYG6zNyzpEOCHwCWkX+lzgHfma1G8Lj8FppP+1j4CXC5pl4i4Mqc5GjgcOBa4g/Srf8t8LWrn+k6+Zj8EjgLelK/ZZpLeFxHzm5WzFw4AXgIuAJbP5Rqey9nOz4G9gW8DN5Kad74GrA/sV5d2FOn9Hgismo+5WtJbI+KFfOwkYBzwk3zMA23OPwq4Lr+HTUk/AF4C3gGcTfpcvwicK2lKRNyVj1sj530Y8Hgu7zHAFbkcZTX8O4yIf0qanLddWkssaUXS9fpehz9D65aI8KPkg1e+gDchfSGuRGrvfRE4uJBu/5xuu7rjjwWeB1Ztkv8SwOuAp4HDC9vPI31Br9GibNcBNzbYfjHwBLBiYdvrgceA3xW2jctl/liJ6/D6XMbftUtbOGZAvmZXA5cWtl/eKh9SEJoPfL1u+za5vLt18PNdhvQFOqGwbRIwExjQ5tjNcnlG123/Wt6+eeH9BCmgDyikq72fzxS2BfCtBucaB0yvu0YB/LEu3e/y9k8Vtq2U/16/0eK9LAlsm499R4nzjijxdzgif47rFLYdmsuyZqc+Qz+6+3ATVu/cA7xA+hI+B/hJRJxe2L8TMAO4OTfRLJlrJVeTOmbfW0soaW9Jt0p6gvSf6RnSL99NCvkNAy6PiAd7Udbt8rFP1DZExFPAZaTmhqIXSV/o7bwvl/GsVoly08Xlkh7Oeb8ADGXB9zYZ2EXSiZK2lbR0XTZDScHngrpreSuplrVdi/MPKB6jQvNhEx8DViQF7Jqfk36h79jm2Fo5flG3vfa6/lpfHIX+soi4iVQL6Mkv/npX1r2+Jz9PKJznceARUg0WAElLSzpG0j256fIF4M95d/GzWhi/JP2Q+Vxh2+eBP0REu5qVLaIcQHpnd+DdwC7AtcBIScML+1cF1iH9Ryw+bsv7VwGQ9BHgV8DdpCaO9+R8ZwHFYcGr0L75opmVSc1j9R4i/RoteiTKNSWskp+blknSWsDEfP5DSEHn3cBVLPjevk1qlvso6UvrUUnn6pXhs6vm53/z6uv5+kJZGvlZXfqftXlfB5CaCf8kacXcxDIhH3tAqwN5pcmt/lo/VLe/5uEGeTxMap7rrcfrXj/fYnvxMzgJGE0KdrsCWwEfz/s6Mjw9Ip4DziUNNllS0vtJzWw/7kT+1j/cB9I7d0YehSXpj8DfgZMl/TYiniH1hUwjte82Mj0/fwL4d0SMqO3IHa71Xzaz6f0Xy2PA4AbbB+d9RWXX9p+dn99EGq3UyE6kPpW9i78wJb1ugROm9v7vAt+VNBj4MDCG1JS3D7lfiVQLq/8ipLC/kdFAsWY4u0k6JK2Wz7Ekqcmq3u6SVoiIp5tkUbuWg4HiKKnata8v52oN8liN1C/SbZ8AzouIb9U2SFq+D87zI+AIUk1vd9L/gwmtDrBFmwPIQoqIeZKOInUOjiTNB7kK2AOYExH3tDj8daSmnaL9SX0hRVcDH5e0ekQ0qk0AzCN1yNe7Hti1+OUnaQVSh/Z1LcrWys2kPpmDaP4FUAsUL9Q2SNqY1NbfsOYSadDATyXtQupTALiG1BG8dkRc05NCRsR0XgnW7XyK9P/hi7zS9FPzduAHpPk/zWox1+fnTwAnFrZ/Mj/fUJd+T0mja81YkrYB1iT1udQ8Txrd19deR+Fzyg7sZV7N/g6JiP9Iupo0EGIL4ISo0LD31yIHkA6IiMvyKJMvSzqdNILnQGCipO8DfwOWBjYgNdXsFhHPkgLNbpJOJfU9vIvUsfhE3Sm+QWpauFnSt0nNOW8CdoqIT+U0/yQ1pe1D+gX8dETcSxoF9uFclu+SahlHk740Tujl+31a0ijgNEm/ze/3adKXwnMRcRqpae9F4Lx8DVYHjgfup9B0KunSfH3+QqphvINUe/lJPtd/crlPl7QJ6Yv6OVIb/lDgpxHxp968jzrDSbXGn0TEAjUxSX8GvkJqxmoYQCLiLkkXAaNzH83NpP6M44CLIuLvdYesAFwi6SfAG0nNSPexYP/LP0nB/yrStXmwl/1g7VwFHCDpH6S/rY+Tmhx7o9nfYc2ZpB9bZZoUbVHX3734i9ODV0Zhbdhg37C87/D8eiCpCeUe0q+yx0gdxqOBJXOaAaThqA+S2t6vJ32BTgfG1eW/AXARqRlmHjAVOLWwfzBp2OXTuRzXFfa9h/SFPofUST8R2Kou/3HAAz28HnuSOrPnkjq0bwU+XNi/d37/zwF3kX6dj2PBkTxHkiajPZrzuTdfo6XqzrV/TvdMfh93k5qnFnoET77mARzXIs2JpJrQei3SLJU/zxmkL8gZ+fVShTTr5nONJDXVzcqf/R/q8ybV1m7P1+/lEV4NrmEtz8/WHT86b1+ybvt04BeF14NIndyP58cFpP6q+hFWzc5bTNP07zDvXyJ/hr/p7//Pfiz8Q/lDNbMuyJMFpwGfi4if9nNxuk7SUFKT7I4RMbG/y2MLx01YZtbnJG1AmqB4KvAXB49q8DBeM+uG40jzVOaR+pusAtyEZWZmveIaiJmZ9YoDiJmZ9Yo70UsYNGjlWHfdNfu7GNYDf739H/1dBOuhl2B2RLyxt8cvKZVukH8pLZi5U2/PZYkDSAnrrrsmU6Zc1t/FsB5YTuv1dxGsh55N82Z6LYDlSqZ9unDvHus9BxAzqwTx6jWArG85gJhZJYi0FIB1jwOImVWGayDd5QBiZpUgPKy02xxAzKwyXAPpLgcQM6sEd6J3nwOImVWCO9G7zwHEzCrDfSDd5QBiZpXgJqzucwAxs0pwAOk+BxAzqww3YXWXA4iZVYJrIN3nAGJmleBRWN3nAGJmleEaSHc5gJhZJXgpk+5zADGzynANpLscQMysEtyJ3n0OIGZWCe5E7z4HEDOrDNdAussBxMwqwZ3o3ecAYmaV4RpIdzmAmFkluAbSfb7eZlYJtVFYZR5t85JGSZos6SlJsySNl7RZieP2lnSHpGclzZB0VIM0S0s6QdI0SfMk3S/p0ML+EZKiwWNgIc3oBvsfKvHWOso1EDOrhA6PwhoCnAlMzlmfAFwradOIeKzh+aWdgQuBQ4GrgLcAZ0uaGxGnF5JeBKwFHATcB6wGLFuX3bPABsUNEfFcXZp7czlr5pd8bx3jAGJmldGpPpCI+FDxtaT9gSeBbYDxTQ7bHxgfEWfm11MlnQQcLemMiAhJw4AdgQ0iYnZON71xEaJdjeLFEmn6lJuwzKwSOtmE1cAKpO/Lx1ukWQaoryXMBdYE1smvdyPVao6Q9ICk+yT9UNLydcctm5vAHpB0uaR3NDjf+pJm5qawX0pav8fvaiE5gJhZZQwo+QAGSZpSeBzUJuuxwB3ApBZpJgC7SRomaYCkjYEj877V8/P6wLbA24E9gIOBnYBxhXzuBT4NfAzYlxSUbpK0USHNrcAIYGfgc8Bg4GZJq7R5Hx3lJiwzq4QeLmUyOyK2LJWvNIb0pb9tRLTqZzib1G9xKak75ilS4BnNK/0TA4AA9ouIJ3P+BwMTJK0WEQ9HxCQKgUrSzaTgdQipf4WIuLKujLcAU4EDgDFl3lcnuAZiZpXRgxpIKZJOJdUCPhgRU1uljeRoYHlSk9Vg4La8e3p+/h8wsxY8srvz89pN8p0PTAE2arQ/p5kD3NUqTV9wADGzShCwdMlHqfykscB+pOBxT9lyRMT8iJgZEc+Tgs+kiHgk774JWKOuz2Pj/DyjSTkEbE4KPs3KOhB4c6s0fcEBxMwqoTaRsBM1EElnAAeSAsDjkgbnx/KFNCdJmlh4PUjSFyW9RdIWOQDtBRxWyPpC4FHgXElvlbQNqZnr4lqQkfQNSR+StL6kLYBzSAHkx4VznSJpe0nrSXoPcDGwHPDz0hesAxxAzKwyOjgKayRp5NVE0q/62uPLhTSrUzdXAxhOGmV1E/BWYEhE1Jqxak1NOwJvyOl+DVxP6jSvWRE4i9S0dTXwJmC7Yj6kkV0XkTrcfwfMA94bEQ1rMX3FnehmVgmdvB9IRKhEmhF1r2cDW5c47l5gWIv9hwOHt8njE+3O0w0OIGZWGW5S6S4HEDOrBN9QqvscQMysEnxL2+5zADGzSnAA6T4HEDOrDPeBdJcDiJlVgmsg3ecAYmaV4QDSXQ4gZlYJHoXVfQ4gZlYJvid6eZLeBAwF3gusQboj4mzSzPbrgesj4qV2+fh6m1ll9OENpSpB0hBJl5NWB/4ZsAtpqZRlSfcoOZK0fMt/833XX98qPwcQM6uEPr4j4WIvB44rgGeAvYE3RsTaEfGuiNg2IjYlrdG1Bel+8HsB/5H0oWZ5ugnLzCrDv4hb+jfw2Vb3Uc/NVn/PjxMlfZQUVBpyADGzSvAw3tYi4rBeHHNZq/0OIGZWCR6F1X0OIGZWCa6B9IykdWk8CuuWiHiuTB5dbTKU9H+S/i7pqfyYJGnXwn7lnv8HJc2VdJ2kt9blsYyk0yTNlvSMpMskrVni3HtI+qekefl59754j2bWfzp9T/SqkbSipC9Lugf4D+kOiacA3wR+BPwReELSryUNaZdft6/lA8DRwDuBLUmFvUTS5nn/V0jDyA4B3g08AlwjaYVCHj8A9iDdavL9wOuByyU1/fEhaWvgV8AFpBEGFwC/ybeCNLMK8Cis1iQdCUwFjgAmkEZibUjqJF8aGEy6IdbRpLsiXiPpWkmbNM0zIvq42K1JegwYRbqF44PA6RFxYt63LCmIfDkifiLpDcAs4MCIuCCnWYt0M/qdI2JCk3P8Clg5IoYWtl0LzIqIfduVccstN48pU1r2JdkiZjmt199FsB56Fm6PiC17e/wGUny3ZNq9FvJciyNJfwFOAC4rM0lQ0hqkW/g+GBGnNErTb7U5SUtI+gSwPHAzsB4pAl5dSxMRc4EbgPflTe8i9ZMV0/yXdO/gWppGti4ek01oc4yZLUZqnehlHq9FEfHOiLikTPDI6R+MiCOaBQ/oh050SW8DJgEDgTnA7hHxD0m1L/OH6w55mDRTElKAmU/q7KlPM7jFaQc3ybfpMZIOAg4CWHvtNVpkbWaLAneid19/jMK6l9QPsSKpL+PndZ019W1qarCtXpk0Pco3Is4iNaux5Zab9287n5mV8lruIG9HUo8uT5maStcDSEQ8T5oRCTBF0ruBw4ET87bBwH8Lh6zKK7WHh0g/MgaR+kKKaW5ocdqHeHVto5ivmS3mXANp64UepA1KxIdFYR7IAGAZYBrpi34oMBlA0kDSSKujctrbSRdhKGn4GXkI71tI/SjNTMrHnFzYNrTNMWa2mHENpCUBTwG/Iw08WmhdDSCSvgP8gVTDWAHYDxgC7BoRIekHwLF5jPK/gK+R+kkuBIiIJyWdA5ws6RHgUWAMad2WawvnmQjcFhGj8qaxwA2SRgG/B3YHPgBs26dv2My6xjWQtk4APgkcQGqxOQ/4TUTM6W2G3Q7Yg4FfkPpBJpLmeuwcEVfm/d8jBYQzgCnA6sCwiHi6kMfhpAj6K+AmUoD5SETML6TZIB8LQETcDHyCdOH+DgwH9omIWzv9Bs2sf3RyFJakUZIm5wnPsySNl7RZieP2lnSHpGclzZB0VIM0S0s6QdK0PLH5fkmHFvaPkBQNHgPr8hmZ83hO0u2S3t+qbBExOiI2ArYn/UD/PvCQpAskDZOkEpdmAV2tgUTEiDb7AxidH83SPEeaaHhIizTrNth2MXBxqYKa2WKpgzWQIaQlzSeTYtMJwLWSNo2IxxodIGlnUmvJocBVpKb1syXNjYjTC0kvAtYijfK8D1iNtJRI0bOkH8IvKy4vImkfUsvKSODG/HxlLt/9rd5YRNwI3CjpEGA3YH/gcmCWpG9GxI9bHV+0KPSBmJkttE42YUXEAvfAkLQ/8CSwDTC+yWH7A+Mj4sz8eqqkk4CjJZ2Rm+mHATsCG0REbTrC9MZFaL7sOmk2+biIODu/PkTSTsAXSROz28oDmn4t6bqc35eBYUDpAOI+JzOrjD5cC2uFfOjjLdIsA9QvQjgXWBNYJ7/ejVSrOULSA5Luk/RDScvXHbdsbgJ7QNLlkt5R2yFpadKk6vrJ0VdTcnK0pIGSPiHpCtISU/uSBhkdXeb4GtdAzKwS+rgTfSxwB2lEZzMTgLG5lnEtaZ2pI/O+1Uk1jfVJg3fmkebBrQicRloRd8+c9l7g08DfSIHrS8BNkt4eEfeRpjEsQePJ0Tu2ehOSPkCqKe1JumS/A3YG/hi9WNfKAcTMKqMHtYtBkqYUXp+VJw+/iqQxpC/9besG69Q7m9RvcSmpr/4pUuAZTVpBo1bEAPaLiCdz/gcDEyStFhEPR8QkCoFK0s2k4HUIqX+lpkeToyXNIAWqP5H6TH6bl4vqNQcQM6uEAaQlZUuaXWYxRUmnkkZwfiAiprZKm3/BHy3pGNKI01nADnn39Pz8P2BmLXhkd+fntWkwuTki5udgt1Gt7KSA1NPJ0WuROuffDHwb+HaLgVcREes021njAGJmldHJTl1JY0nBY0hE3FP2uFxLmZnz2BeYFBGP5N03AXtJWr4w/2Lj/Nxwcl8eXrs5qUmLiHhe0u2kydC/KSQdCvy2RdF+XvY9lOUAYmaV0Mk+EElnkPoKdgMel1T7tT+n9sWfR1htFRE75NeDgL2A60gd6gfm19sXsr4QOA44V9JoUh/IWODiWpCR9A3gFtIQ39eTmq02J42wqhkDnC/pNlJQ+gKpearpCKqIOLDHF6INBxAzq4wOdqKPzM8T67Yfzyvz1Fanbq4GaZLyyaR4NolUe7mttjMi5kjakdRxPpk0qusS4KuFPFYkLeQ6mDR0+K/AdnX5/ErSKqTVOlYH7gR2iYiOLFFSlgOImVWC6FwTVkS0nZVdPzE6z+vYusRx95LmWzTbfzhpxY12+ZxJmuxYiqT1y6bN+bfs8wEHEDOriNpSJtbUv2l/24uithU6BxAzqwwvptiS+0DMzBrxarytRYRHYZmZNeO1mZqTNKDs/dDL8vU2s0qo1UDKPF6jXpC0Ve2FkhMKQ5R7zAHEzCrBAaSt+pFlA4BjSfNHesVNWGZWCR6F1Ss9volUkQOImVWGm1S6ywHEzCrBo7BKaTQPpMfLuNc4gJhZZTiAtDVe0vN1266Q9ELdNq/Ga2avMWXbsDo6mHWx4XkgZmYNifI3BKm/8exrgFfjNTNrppOrKVopvtxmVh2eCNKUpHf24piBkt7cbL8DiJlVg2cStnODpMsk7SSp5Xe/pLXzrXmnAR9uls5NWGZWHf5J3MomwDeBS4GnJE0i3SZ3FjAPWAlYH9gK2IwUPI6MiAubZegAYmbV4IkgLUXETODTkr5KWtr9Q8ARwLKFZNOAG0h3SJwQES3niDiAmFk1eC2TUvK917+bH0haERgIPBoR9fNBWnIAMbPqcA2kxyLiid4e6xZDM6uG2jDeMg+rLef+UUmnSDpX0jp5+/aSSq3Q6xqImVWHayClSFoJuAJ4D/AUsAJwGjAD+BzwGHBou3wci82sGjyMtydOBtYCtgEGseCy7tcCO5TJpGkNRNJ5vSzYcRExo5fHmpn1nn8Sl/Ux4MsRMUlSfUi9nxRc2mrVhPUp4CHS+OCy1gZ+QKoGmZl1j0dh9cTywMwm+wZS8kZT7fpAdouI28pkJGlJoH6ZYDOz7vA8kJ64FxhGaq6qtz3wjzKZtKrwTQSe7EGB5udjnurBMWZmndHBPhBJoyRNlvSUpFmSxkvarMRxe0u6Q9KzkmZIOqpBmqUlnSBpmqR5ku6X1LDDWtK+kkLS5XXbR+ftxcdD7d/Zy84ADpN0LKnlCGBFSQcCB+f9bTWtgUTE0B4UhjxjsUfHmJl1VOf6QIYAZwKTSaHpBOBaSZtGxGONDpC0M3AhafTSVcBbgLMlzY2I0wtJLyL1MRwE3AesxoKzwWv5rU/q7P5zkzLem8tZM7/keyMizpa0AXB8fm8A15DulPK9iLigTD6tOtHfFRG3l8lE0ukRcXCZtGZmfaKDTVgR8aEFspb2J7XIbAOMb3LY/sD4iDgzv54q6STgaElnRERIGgbsCGwQEbNzuun1GUlaihRojgU+QBopVe/FiOhJrWMBEfFVST8i/fBfFXgUuCYippbNo1W8niDpbe0ykHQO8MWyJzQz6xO1TvQyj55bgfR9+XiLNMvw6ltVzQXWBGq3h92NVKs5QtIDku6T9ENJy9cddyIwPSJa3UVwfUkzc1PYL3ONpUciYkZE/DQivh0RP+lJ8IDWAeS/pCrbJo125lmM55MW5fp6T05qZtYnyveBDJI0pfA4qE3OY4E7gEkt0kwAdpM0TNIASRsDR+Z9q+fn9YFtgbcDe5D6G3YCxtUyybWUfYAvtDjXrcAIYGfSxL/BwM2SVmnzPmrnOFDS6Cb7Rks6oEw+rUZh7QhcD0yUtH1E/KdwgiVIbX17AV+JiFPKnMzMrM/07I6EsyNiy1LZSmNIX/rbRkSrfoazgQ1Iy6UvRRpQNBYYzSv9EwOAAPaLiCdz/geTWnxWy+nG5f1NazsRcWVdGW8BpgIHAGNKvK0vAec02fcIcBgl7qHe9HJHxKPAB4E5pCCydi7oUsBvScHjSw4eZrbI6PBMdEmnAvsCH2zXvBPJ0aQ5FuuQagW1aRDT8/P/gJm14JHdnZ/XJt2HY3VS68+Lkl4EhgO75NcNW4QiYg5wF7BRybe2YU7fyN2kQNhWy3idl/3dAXgR+KOkjYDLgI8An4+I00oW1sysb3V4KRNJY4H9SMHjnrLFiIj5ETEzIp4nBZ9J+bsU4CZgjbo+j43z8wxS/8jbgC0Kj8tII7G2IN2vo1FZBwJvJgWoMl6kccc8wBtL5tF+McWImCnpA6SbjNxF+phGRMT5ZU9iZtYVHRrGK+kM0qiq3YDHJQ3Ou+bkX/vkEVZbRcQO+fUgUsvMdaQO9QPz6+0LWV8IHAecm/sgViQ1c11cCDJ31pXlCWDJiLizsO0U0miw+0kjqI4DlqNEs1N2G6mP5dcN9n2BFMjaajWM99N1my4ERgFXAkvV74+In5U5oZlZnxCwdMdyG5mfJ9ZtP57UpwGpqam+qWc4ae6GSB3uQ4qreUTEHEk7kla+nUwa1XUJ6Q6APbEmaZjvINItaW8B3tuDdQhPJDWT3Qr8lLSsyZuAzwLvpOScPjW7Y6Gkl0oWBFLzX2UXEdhyy81jypTL+rsY1gPLab3+LoL10LNwe9mO7Ua2XEkx5QPl0ur3C3euKpD0MdLahesUNk8HDouIUl94rZqw/D/QzBYfXgurRyLiUuDS3DG/Cmlk2r96kkerpUy8oq6ZLV68nHuPRcS9vT3WdyQsZWlg3f4uhPXAM/G5/i6C9ZB09kJmgGsgPSTp7cAmpCXcFxARbe8J1aoT/Y/AyLLD1yQNIC0N/PmIuK/MMWZmHeUAUoqkFYE/AO+tbcrPxU7xtgGkVYVvCGn9l9Jl6sUxZmad0bdrYVXNt0n9HtuRrtzupInjF5BmtG9VJpN2TViXSOrJHQkbD+kyM+trbsLqiQ+RhiTfkl8/kFdfvy6v0Psl0pDklloFkLITUurNbp/EzKwPuBO9rNWBqRExX9JzLNhy9Dvgl2UyaTUK68CFK5+ZWRe5BtITD5FmwUNaQmVr0gx6SOtkleJRWGZWHa6BlHUjKWhcDpwPfEPSuqQ1sg4grb/VlgOImVVDZ5cyqbrjgTXyv08mdajvA7yOFDwOKZOJA4iZVUPP7gfympbv7/Sf/O8XSDe+OrLlQQ34cptZdXT4fiBVJGlpSY9J+ujC5uUaiJlVgzvRS4mI5/ONqurv395jTWsgkqbmae5mZouHASUfdgmw58Jm0qoGsi7ppihmZos+10B64krgh5IuJgWT/1E3ETwi/tguEzdhmVk11JYysTJ+m58/nh81QbqSQYlw3C6AeGkSM1s8uAbSEx+kA9/v7QLI8ZLKLE0SEXHAwhbGzGyhuH+jlIi4rhP5tAsgWwBlFlN0TcXM+pdrIKVJmgrsHhF/a7BvM+CyiFi/XT7tAshuxRvCm5kt0hxAylqX5oOkBrLgfdKbcie6mVWDZ6L3VLOWoy2BJ8pk4ABiZtXgUVgtSTocODy/DGC8pOfrki0LrMzCLuduZrbYcRNWK1OBifnfBwBTgFl1aeYB/wR+WibDVvcDcWXQzBYf7kRvKSIuBS4FkARwQkRMW5g8HSTMrDo6tJSJpFGSJkt6StIsSePz6KR2x+0t6Q5Jz0qaIemoBmmWlnSCpGmS5km6X9KhTfLbV1JIurzBvpE5j+ck3S7p/e3fWRIRBy5s8AAHEDOriloNpDOr8Q4BzgTeR5p09yJwraSVm55e2hm4EDgL2AwYCRwu6eC6pBcBOwEHAZsAewF/b5Df+qR7dfy5wb59gLHAt4F3ADcDV0pau0X5Dpc0sNn+Jse8U9JOzfY7gJhZNdQ60cs82oiID0XEuRFxZ0T8A9gfeCOwTYvD9gfGR8SZETE1Iv4AnAQcrdxmJGkYsCOwS0RcExHTI+LW+ol9kpYiBZpjSX0X9Y4AxkXE2RFxd0QcQlrP6ostyjccmC7pO60WypW0kqT9JV1NunPh65uldQAxs2robA2k3gqk78vHW6RZhlcvkT4XWJNX5lXsBkwGjpD0gKT7JP1Q0vJ1x50ITI+In9efRNLSwLuAq+t2XU2qMTXzTuArwM7AXyU9IenPkn4n6SJJV0n6FzAb+AkwE9g0In7dLEOPwjKz6ui7n8RjgTuASS3STADG5lrGtcCGvHKXv9WB6cD6wLak0U57ACsCp5FuL7snvFxL2Ye0Ekgjg0hh8OG67Q+TajcNRUQA5wHnSXoPqRntPblMA4FHSc1lJwKXRsQTLd4r4ABiZlXRs1FYgyRNKbw+KyLOapitNIb0pb9tRMxvkefZwAakkU5LAU+RAs9ooHbcANIcjP0i4smc/8HABEmr5XTj8v5WtR149URANdjW+MCIW4Fby6RtxQHEzKqjfACZHRFbtksk6VTgE8AHIqJRX8TL8i/8oyUdAwwmzbHYIe+enp//B8ysBY/s7vy8NrAcqbZybe42gVyvyncRfCswjRRoBtcVYVVeXSvpUw4gZlYNHV7KRNJYUvAYEhH3lD0u11Jm5jz2BSZFxCN5903AXpKWj4g5edvG+XkG8AzwtrosvwWsBPwfMC3fkvZ2YCjwm0K6obxyn49W72tVUj/IpqRZ5wCPkSYQXlkoa1sOIGZWDR1cykTSGaRRVbsBj0uq/dqfU/vil3QSsFVE7JBfDyINyb2O1KF+YH69fSHrC4HjgHMljSb1gYwFLi58cd9ZV5YngCUjorh9DHC+pNtIQekLpH6UH7d4TwNI/RuHA0sDz5IGBSiX43XA85J+AIzKNaqWHEDMrDo6NxN9ZH6eWLf9eFKfBqSmpg3q9g8nzd0QqcN9SHFF84iYI2lHUsf5ZNIX+CXAV3tSuIj4laRVgK/lctxJGho8o8Vho4DDSEHk/IiYXtwpaR1S0DwWeDqna8kBxMyqoYNLmUSESqQZUfd6NrB1iePuBYb1oCwjmmw/kzTZsazPkWoWP2iS3wzgW5KeAb6EA4iZvaZ4ZlsrqwF/LZHuLzltW77cZlYNfTuRsAruJg0KaGdfoNSgAddAzKw6XrvBoYwTgIslbQL8AriL1AcTpNFYbwU+Ser037NMhg4gZlYNvqFUSxFxiaRdSetz/ZTGExH/BnwkIq4sk6cDiJlVg29p21ZETCDNel+TVONYmXTlHgPuioj/9iQ/BxAzqw43YZUSEQ8ADyxsPo7XZlYN7kTvCEmDJG1XJq0DiJlVR4fuSPgatz3wpzIJ3YRlZtUwgLRAh3WNA4iZVYdrF01J+lnJpOu0T5I4gJhZNXRwKZOKGgE8Ccxpk27Zshk6gJhZdbgG0sr9wNURcVCrRJL2BH5VJkMHEDOrBtdA2pkCtL2JFiXvagiO12ZWJR7G28plwOwS6f5JWvakLddAzKwavJRJSxFxHnBeiXR3k+570pYDiJlVg5uwus4BxMyqw43yXeXLbWbV4KVMSpP0kqT5TR4vSnpU0jWSWt45sasBRNJ0SdHg8Ye8X5JGS3pQ0lxJ10l6a10ey0g6TdJsSc9IuiyvLNnu3HtI+qekefl59756n2bWT7yUSVnfBP4LzALGAd8Ffp5fPwCcD7wRuFLSh5tl0u1L+W7SDeBrj3eShoz9Ou//CnAkcEhO+whwjaQVCnn8ANiDdNes9wOvBy6X1PR3haStSeOaLwC2yM+/kfSeDr0vM+tvtaVMyjzsOWAasG5EfCYijomITwPrAdNJgeSdwNXAMc0y6WoAiYhZEfFQ7QHsAjxF+jIXcBjwnYj4bUTcCRwArADsByDpDcBngKMi4pqI+AuwP7A5sGOLUx8G/CkiToyIuyPiROC6vN3MqsI1kLK+AJwaEc8VN0bEXOBU4AsR8RLpxlObN8uk3y5lDhifAX4REc+SIt9gUsQDXn4zNwDvy5veRRqoV0zzX9K9fmtpGtm6eEw2oc0xZra40RLlHrYqzQc9Lw2skv89m9S71FB/xuKhpKDx0/x6cH5+uC7dw4V9g4H5vHoyTDFNI4Pb5Psqkg6SNEXSlFmzZrXI2swWDSINLC3zeM2bAoyWtHpxo6Q1gG/k/ZAWVnywWSb9eSU/B0yOiDvqtje6T2+7qfVl0vQo34g4CzgLYMsttyw9td/M+kstgJQxry8Lsjj4EjARmCZpEqm/eVVSa82zwKdyug2BC5tl0i81EEmrAh8Dzi5sfig/19cKVuWV2sNDpEF4g1qkaeShNvma2WLPNZCycv/xhsAY4CXgbfn5+8BGtR/2EfH1iPhGs3z6qwlrBOknwC8L26aRvuiH1jZIGkgaaXVz3nQ78EJdmjWBtxTSNDKpeEw2tM0xZrZYETCw5KNNTtIoSZMlPSVplqTxkjYrcdzeku6Q9KykGZKOapBmaUknSJqWpxXcL+nQwv69cvP5E3mqwh2SDqjLY3SD6RAP1Z+rlYh4NI++2iEiNs3Px0bEo2Xz6Hoozp3nnwV+GRFP17ZHREj6AXCspHuAfwFfI61df2FO86Skc4CTJT0CPEqKoH8Hri2cYyJwW0SMypvGAjdIGgX8Htgd+ACwbV++VzPrpp40YbU1BDgTmJwzPgG4VtKmEfFYw7NLO5O+qw4FriL9sD1b0tyIOL2Q9CJgLeAg4D5gNRa8B8ejwLeAe0g/mD8MnCNpVkRcUUh3by5nzfyevklJK5OarVYm9S3f2uz9NdIfdbkhwEa80sZW9D3ShTwDWAm4FRhWDDTA4cCLpHkdy5La8YZHRPHibUCaJANARNws6ROkD+V44D/APhFxa4fek5n1u84FkIj40AI5S/uTbsa0DTC+yWH7A+Mj4sz8eqqkk4CjJZ2RfyQPI0052CAiaoOBpted+491+Y7NNZD3A8UA8mKeDtErkr5Fmne3TGHzPEmnRMRxZfLoegCJiD/RZFhYRAQwOj+aHf8caaLhIS3SrNtg28XAxT0qrJktZvpsiO4KpCb/x1ukWYY0Qa9oLrAmaTTTdGA3Uq3mCEnD8/4rgWMi4lV3CswtNh8ENgGOrdu9vqSZwPOkH9vHRMTUMm9G0mGkCYLnAL/glX7iTwHH5NrOD9vl494kM6uIHtVABkmaUnh9Vh552cxY4A5Sf2ozE0i1hWGkJvUNSb/wIa28MR1Yn9R0Po+0osaKwGnAGsCeL7+TNGl6JikozQf+LyKuLJzrVlJf8j2kAUFfA26W9NaSfRhfAMZGxOGFbfcC10uaA4wEHEDM7LWiRwFkdkSUuTsfksaQvvS3rWsqr3c2qfn8UtIkvadIgWc0r/RPDCBNH9gvIp7M+R8MTJC0WkTURoY+TVp2aXlgB2CMpOkRMRGgLpgg6RZgKmn1jjEl3ta6wB+a7PsD8MUSeXhSv5lVxQA6NQqrRtKppHX3PtiueSiSo0lf+uuQmoRuy7un5+f/ATNrwSO7Oz+vXcjrpYj4d0TcERHfB35DizWpcvPXXaT+5TIeBZqNKntr3t+WA4iZVUjn5oFIGktah++DEXFP2RJExPyImBkRz5OCz6SIeCTvvglYQ9LyhUM2zs8zWmQ7gAU7u+vLOhB4MylAlfF74JuS9pe0VM5jSUn7kkac/bZMJm7CMrOK6NwoLElnkEZV7QY8Lqk2EXlOrbM7j7DaKiJ2yK8HAXuRFmpdBjgwv96+kPWFwHHAuZJGk/pAxgIX14KMpGNJfRxTcz675LK8PHBI0imk0WD3k/pAjgOWIy3JXsYo4O05/c8kPUYayrsEcCMtajtFDiBmVhEdnQcyMj9PrNt+PK+MEl2d1OdRNBw4ORdmEjAkImrNWETEHEk7kjrOJ5NGdV0CfLWQx/LAj0ijt+aSOsqHR8RFhTRrkuaTDCItvX4L8N6IaFWLeVlEPC1pO2BXYDvStInHgOuBK/OI2LYcQMysIjo6D6TpCrSFNCPqXs8mTcprd9y9QNM7/eUJ0KOa7c9pPtHuPCXKEcDl+dErDiBmVhG1pUysEUkv0X7R2ZqIiLbxwQHEzCqio01YVXQC5QNIKb7aZlYRDiCtRMToTufpq21mFeEA0m2+2mZWEQ4g3earbWYV4QDSbb7aZlYRosVkbesDDiBmVhGugXSbr7aZVYQDSLf5aptZhfgrrZt8tc2sIlwD6TZfbTOrCAeQbvPVNrOKqN1QyrrFAcTMKsRfad3kq21mFeEmrG7z1TazinAA6TZfbTOrCAeQbvPVNrOK8A2lus0BxMwqwjWQbvPVNrOKELBEfxfiNcUBxMwqwjWQbvPVNrOKcADpNl9tM6sQf6V1k6+2mVWElzLpNgcQM6sIN2F124D+LoCZWWfUAkiZR5ucpFGSJkt6StIsSeMlbVbiuL0l3SHpWUkzJB3VIM3Skk6QNE3SPEn3Szq0sH8vSVMkPSHpmZzfAQ3yGZnzeE7S7ZLe3/aNdZjDtZlVSMe+0oYAZwKTSZHpBOBaSZtGxGONDpC0M3AhcChwFfAW4GxJcyPi9ELSi4C1gIOA+4DVgGUL+x8FvgXcA7wAfBg4R9KsiLgin2sfYCwwErgxP1+Zy3f/wr/9chxAzKwiOteEFREfWiBnaX/gSWAbYHyTw/YHxkfEmfn1VEknAUdLOiMiQtIwYEdgg4iYndNNrzv3H+vyHZtrIO8HrsjbjgDGRcTZ+fUhknYCvgiM6sFbXShuwjKziuhcE1YDK5C+Lx9vkWYZ4Lm6bXOBNYF18uvdSLWaIyQ9IOk+ST+UtHyjDJXsAGwC3JC3LQ28C7i6LvnVwPtKv6MOcA3EzCqiR6OwBkmaUnh9VkSc1SL9WOAOYFKLNBNItYVhwLXAhsCRed/qpJrG+sC2wDxgD2BF4DRgDWDPWkaS3gDMJAWl+cD/RcSVtbKTptw/XHf+h0m1m65xADGzCin9lTY7IrYsk1DSGNKX/rYRMb9F0rOBDYBLgaWAp0iBZzQpCECKcgHsFxFP5vwPBiZIWi0iakHhaWALYHlgB2CMpOkRMbFwvqgvaoNtfcpNWGZWEZ1vwpJ0KrAv8MGImNoqbSRHk7701wEGA7fl3dPz8/+AmbXgkd2dn9cu5PVSRPw7Iu6IiO8DvwGOybtnkwLS4LoirMqrayV9ygHEzCqiswFE0lhgP1LwuKdsKSJifkTMjIjnScFnUkQ8knffBKxR1+excX6e0SLbAaTmLHK+twND69IMBW4uW85OcBOWmVVE51bjlXQGaVTVbsDjkmq/9udExJyc5iRgq4jYIb8eBOwFXEf6sj8wv96+kPWFwHHAuZJGk/pAxgIX14KMpGOBW4GpOZ9dclkOKeQzBjhf0m2koPQFUj/KjztyAUpyADGziujoDaVG5ueJdduPJ/VpQOoY36Bu/3Dg5FyYScCQiKg1YxERcyTtSOo4n0wa1XUJ8NVCHssDPyKN3ppLmg8yPCIuKuTzK0mrAF/L5bgT2CUiWtViOs4BxMwqoqPzQFQizYi617OBrUscdy8wrMX+UZSYy5Hnm5zZLl1fcgAxs4rwWljd5qttZhXhANJtvtpmVhEOIN3mq21mFeKvtG5SRFcnLi6WJM2i9Rjtxdkg0sQkWzxU+fNaJyLe2NuDJV1Fuj5lzI6InXp7LkscQF7jJE0pu6SD9T9/XrYo8Ux0MzPrFQcQMzPrFQcQa7WEtS16/HnZIsN9IGZm1iuugZiZWa84gJiZWa84gJiZWa84gFSUpO0kXSZppqSQNKLEMW+TdL2kufm4r0tquyqpLTxJo/PnVHw81OYYf17Wrzzvv7qWJ90j4Lz8aEnS64FrgBuAdwObAOOAZ4Dv91kpreheYEjhddP7b/vzskWBA0hFRcQVwBUAksaVOOSTwOuAAyJiLnCnpLcAR0gaEx6u1w0vRkTLWkeBPy/rd27CspqtgT/nL6OaCaTbZK7bLyV67Vk/N0VNk/RLSeu3SOvPy/qdA4jVDAYertv2cGGf9a1bgRHAzsDnSNf85nzb0kb8eVm/cxOWFdU3e6jJduuwiLiy+FrSLcBU4ABgTLPD6l7787Kucg3Eah7i1b9cV83P9b90rY9FxBzgLmCjJkn8eVm/cwCxmknA+yUNLGwbCjwITO+XEr2G5c/hzcD/miTx52X9zgGkoiQtL2kLSVuQPue18+u18/6TJE0sHHIh8CwwTtJmkj4OfBXwiJ4ukHSKpO0lrSfpPcDFwHLAz/N+f162yHEAqa4tgb/mx7LA8fnfJ+T9qwMb1BJHxJOkX7BrAFOAM0jzCZq1v1tnrQlcRJoL8jtgHvDeiKjdCdOfly1yvBqvmZn1imsgZmbWKw4gZmbWKw4gZmbWKw4gZmbWKw4gZmbWKw4gZmbWKw4gZmbWKw4g1nWStpb0a0kPSnpe0qOSrpF0gKQl+uic4wp3+ruusH1EYfvGDY4bUti/Y2H71wrbH+iLMpst6hxArKskHQbcBKwMHA3sCHwa+BfwI+DDfXj6h0j30RjZYN/TwP4Ntg/P++qdm/O6omOlM1vMeDl36xpJ25GW2jg9Ig6t232ppDGk9Z/6yryIuKXJvt8Bn5L09dpaUpKWBfYAfku6V8fLImImMFPSrD4sr9kizTUQ66avAo8BX2m0MyL+ExF/726RXnY+sA6wbWHb7sASpABiZnUcQKwrct/GEODqiHiun4vTyAzgBhZsxhoO/B6Y0y8lMlvEOYBYtwwirQo8o13CfnQesJekgZJWJ/XPnNfPZTJbZDmA2CJL0nKSnpJ0Qd32DSTdKOlfkv4qacsOnfI3wDLAR4BPkjrdJ7Y8wuw1zAHEuuVRYC6pn6GsfYC/AbtKWqmw/cfAuIjYmNSfcoEkNcqgJyLiaeASUjPWcOCCiHhpYfM1qyoHEOuKiHgRuA4YKmmZkod9hnSTpAmkGgGS3gi8l3ynvoi4Jqd9V4eKeh6wK/A23Hxl1pIDiHXTd4BVgJMb7cy3c908//stpDvwXQ6cQwomAGsDD0bEC4VDZ+TtnXAN8GvgxxFxV4fyNKskzwOxromIGyQdAYzJAWIccD+wErAD8FlgP+DvpIBxfkS8KOlaYGVJzWoZC918VSjjfGDfTuVnVmWugVhXRcQPSHMtngBOAf5ICiRvAT4PjJe0FKkfYj9J04GpwBtIQeV+YI2cpmadvN3Musg1EOu6iLgZuLnZfkm7A/dHxLsL29YD/gIcCdxGmhl+tqShpBrI7WXOLWnJVISYn8syjhTAWpX3OupqObnTfon67WavJa6B2KLoM6SZ4S+LiGnAncBewBeAAyX9i9Sf8sna8iNtrAO8QGeG5h6b8xregbzMFksq9//ObPEmaV3SZEaApyPi3oXMb3XgTfnl8/24BItZv3EAMTOzXnETlpmZ9YoDiJmZ9YoDiJmZ9YoDiJmZ9YoDiJmZ9YoDiJmZ9YoDiJmZ9YoDiJmZ9cr/A8tISzTw81vEAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAElCAYAAAD0sRkBAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAnZUlEQVR4nO3debgcVZ3G8e+bsK8CAcMeFsF1ZCAIKEJUQBBUllEEBIIICA4jyAyIbAHBQVQWRR1ZNGwKAgoIYoBAWGQz7GFTIGFPIEEgQBIg/OaPU5cUndvd1ffWvX277vt5nnouXXXq1Dld5NenTp06pYjAzMw635B2F8DMzMrhgG5mVhEO6GZmFeGAbmZWEQ7oZmYV4YBuZlYRDugFSRotKXLLm5Iel/RDSYsMgLJ9o51l6HSSpuTO7duSZki6Q9KJkka0qUzbS/puN+tHZeUc1c/lme+4kiZImpD7vJ6kMZKW7c+yWeKA3rqvAJsA2wLjgMOBH7e1RDAacEDvvXGkc/tp4OvAFcDXgEmSdmhDebYH5gvowN2kct7dr6Xp3gHZ0mU94BjAAb0NFmh3ATrQvRHxWPbf10r6ALC3pO9ExDvtLFiZJC0cEXPaXY5+Nj0ibs99vlrSaaRAf4GkdSLimTaV7V0R8Spwe9OE/SAiHmp3GWwet9B7725gUWBY1wpJi0n6kaTJWdfMZElHSBqSS7OIpFMkTZL0mqSpkv4s6YO1B5C0hqTzsjRzJD2RBRqyy93NgU/lugwm5Pb9hKTrsmO8Lmm8pE/U5D9W0jOSNpF0q6RZwEmNKi1pB0l/y/J9VdKdkr6U2/6fkm6T9JKklyXdLmnbmjwWkPSDrOtqtqTpkm6RtGlNun0k3ZdLc3Z/XdJHxGukFuiiwH7N0kvaOqv3LEmvSLpM0ro1aSZk9fxydv7nSHpE0ldzacYCewIr587rlGxbva6PW7Lj35sd/x5JG2Xf8w8lPZ+dj7GSFq8p07GS7s7KPF3S9ZI2LlDfd7tcJI0Gfptt+meu3CMkPSDpT93s31WXzzc7ljXnFnrvjQBeAWZAClKkFt2HgR8ADwAbA0eRLkMPyfZbGFgSOB54Ptt2AHC7pA9GxNQsvzWAO4E3SJey/wRWBbbK8jkAOB8YyryA82q2778BNwIPkbplAvgecKOkjSPivlw9lgYuBH4CfB+YVa/Ckg4EfgZcRgo6rwHrZ99F/ns5C5hC+v/si8CVkr4QEVdnaQ4DDgaOAO4FlgJGkrtcl3Ri9p39DPgfYOXsO/uopE9GxNx65SxLRNwn6TngU43SSdoauAq4HtgZWAI4DrhF0noR8Wwu+dqkOo0BXgD2By6U9GJE3ED6f2d5YEOg64ey2RXT2qTuvxNI5+QkUrfRFaRzMBr4UJbmBeDQ3L4rA6cAzwCLk7qcbpI0MiLub3LcLleRzs2RpK7JrquZ54FfAadJWikinsvtsx8wGbim4DGskYjwUmBhXkBcl/SPYxlSv/XbwH/m0u2epdusZv8jgDeBFerkPxRYDJgJHJxbfy7pH+dKDco2Abilm/WXAC8D78utWwp4Cfhjbt3YrMxfLvA9LJWV8Y/N0ub2GZJ9Z9cAl+fWX9koH9KPwlzg6Jr1n8rKu32J53cKcH6D7bcBDzfJYyLpB3eB3Lo1gLeAk2vOVwAb15z/R4Cba87LM90cZ1S2/6iaPN8C1syt+1KW7rqa/f8ITG5Qj6HZ+XoUOK3AcSd08+9k7Zo8lyQ1NI7KrRtG+pH6XlnncbAv7nJp3SOkfzgvAWcDv46I03PbtwaeBG7NLnUXyFrt1wALklrrAEj6qtJIipdJPwyvk1p1+Uv0rYAr472tmqI2y/Z9uWtFpP7XK0jdNHlvkwJsM5/MynhGo0SSNpB0paRpWd5vAVvy3rr9HfiCpBMkbSppoZpstiT9GFxQ813eQQoOmzU4/pD8Psp1d/WQSIGq3vEWJ12lXBQRb3etj4jJwN+Y//t+OnL99ZGuNC4GPtGLsv4jIp7IfX4k+zuuJt0jwCqSlCv/FpJukDSDeedrHd57vnosImaSriS/mavfXqTv9bd1d7SWOKC3bgfSZfAXgOuAAyTtkdu+ArA66R9Efrkz274cgKQvAhcBDwO7Ahtl+b4I5IdBLse8S9dWLUu63K01lXSFkfdCFOu+WC77W7dMklYFxmfHP5D0I7Ah8FfeW7cfkrqRvgTcDMyQ9FtJXfcjVsj+Psb83+dSubJ05zc16X9ToG6NrEr332WXZUjBqd73XdvnP62bdNOAhUhdLT3xr5rPbzZYvwCpJY6k9YG/kK4E9yY1OjYE7uO956u3fgmsRvoRF7Av8KeI6O67sB5wH3rrJkU2ykXS9cD9wI8lXRoRr5P60icDX62z/5Ts79eAxyJidNcGSQsy/z/86aT+zZ54CRjezfrh2ba8ovMoT8/+rgxMqpNma1Kf/FcjNypE0mLvOWDEW8CPgB9JGg5sB5xM6nramey+BOkqpTYokdvenTFA/sppep10TUlaD1iJdE+gnn+RvsN633dtWd/fTbr3k4Lti62Xsld2IrXKd8zOCQCSliF12ZUiIiZJupnUbz6b1Off9EazFeeA3gsRMUfS/wCXk25O/pjUCt0JeC0iHmmw+2Kkf0R5u5O1mnKuAXaUtGJE1GshziH1Uda6EdhW0pLZJS+SliTdoJzQoGyN3Epqye3L/JfyXboCdz44rEPq++62ZR/pJvBZkr4AfDRbfS3wDrBaRFzbSiEjYgrzfjx7TNISwC9IN6V/3eB4r0u6C/iKpDFdVzuSViddofy8ZpdVsxvTt2fphpJuJN4Z84a/ziGNrulri5HuVbz7oy7ps6TW9OQW8+q6cVuv3L8kdb0sQ+oiur7F/K0BB/ReiogrJP0d+G9JpwMXkPoGx0v6KemydSFgLVLXwvYR8QYp8G8v6RRS3/UGwH8xf4voGNJDTLdK+iGp+2FlYOuI+HqW5iFS18/OwOPAzIh4lDRSYrusLD8i/YM9jPQP+Lge1nempMOBn0u6NKvvTNIDJbMj4uekrqi3gXOz72BF4FjgKXLdfJIuz76fu0kt3H8nte5/nR3r8azcpysN/buR1LJbldS/flakESFlGZYN1RPpCmN9YB9SF8guBe5jHEUa6XGlpF+S7jUcSxoF9dOatNOAiyQdQ2qR70/qs94/l+YhYFlJ+5NuuM6OiAd6Ub96/gocBIyV9NusHEcBzzbaqY6ucenflnQO6Uf9/ojo6v65FDiV9ON+yPy7W6+0+65spyzUuXufbdsq23Zw9nkR0iX/I6QWy0ukG4BjyEZAkALb8cBzpNbfjaSANgUYW5P/WsDvSd0Gc4AngFNy24eT+kBnZuWYkNu2ESnAvka66Toe+ERN/mPpZjRFk+/jP0g3J2eRblDeAWyX2/7VrP6zgQdJXUxjgSm5NIeQHpCZkeXzaPYdLVhzrN2zdK9n9XiY1J2ySonnd0r23QWptfqv7JydCKzeQj5bk0bEzCIF8suBdWvSTABuIf3AT8rO6aPAzjXpFs/Oe1d3zpRs/Si6H21yS83+I7J036xZPyZbnx+NcyCpNT4rq/cWzD+Cpd5xJ9Tkfwzpx6Cr1T+iZvuvs/8vlmv3v+uqLcq+YDPrJ9mDOAtExKbN0lZNNkrpMdLwzN3bXZ6qcZeLmfU5SUuR7o3sSuoyq+2CshI4oJtZf1gfuIH0hOp3IuLe9hanmtzlYmZWEX6wyMysIhzQzcwqwn3oBQxbTDFi6XaXwlpxz9R2l8Ba9U6aj76n0x6wgFS4A/kdGBcRW/f0WAOVA3oBI5aGiXu3uxTWisVPaHcJrFVvpEnteixIA/eLmJl7f0GVOKCbWSWI+efNGGwc0M2sEkSan3owc0A3s8pwC93MrAKEh+05oJtZZbiFbmZWAb4p6oBuZhXhm6IO6GZWIe5DNzOrAHe5OKCbWUU4oDugm1mFuMvFzKwC3EJ3QDezivAoFwd0M6sQt9DNzCrAj/67/mZWIUMLLs1I+rak+yW9mi23Sdq2r8pdFrfQzawSSr4p+gxwGPBPUsN3T+AySRtExP3lHaZcDuhmVgll3hSNiMtrVh0haX9gE8AB3cysr/XFTVFJQ4GvAEsAt/bBIUrjgG5mldDiTdFhkibmPp8REWe8Jz/pY8BtwCLAa8AOEfFA70vadxzQzawyWmihT4+IkU3SPAqsB7wP2Ak4R9KoiJjUw+L1OQd0M6uEsoctRsSbwGPZx4mSNgQOBvYu8TClckA3s0roh0f/hwAL9+0hescB3cwqocxRLpJOBK4CngaWBHYFRgEDeiy6A7qZVUaJLfThwPnZ31dIQxW3iYhx5R2ifA7oZlYJZXa5RMTokrLqVw7oZlYZg30uEwd0M6sEz4fugG5mFeIWuplZBQhYqN2FaDMHdDOrBM+H7oBuZhXiPnQzswrwTVEHdDOrEHe5mJlVQJmP/ncqB3QzqwR3uTigm1lFOKA7oJtZhbgP3cysAtxCd0A3swpxQDczqwCPcnFAN7OK8KP/DuhmViHucjEzqwDfFHVAN7MKcZeLmVkFuIXugG5mFeFRLg7oZlYRbqH3c5eTpG9Lul/Sq9lym6Rtc9slaYyk5yTNkjRB0kdq8lhY0s8lTZf0uqQrJK1S4Ng7SXpI0pzs7w59UUcza58hBZeq6u8W+jPAYcA/Sd/rnsBlkjaIiPuBQ4FDgNHAo8DRwLWS1o2ImVkepwJfBnYBZgAnA1dmeczt7qCSNgEuAo4B/gjsCFws6VMRcUdfVNTM+lenttAlLQxsAmwMrAQsCkwnxcCbIuKJonn1a0CPiMtrVh0haX9gE0kPAAcBJ0bEpQCS9gReAHYFfi1paWBvYK+IuDZLszvwJLAFMK7OoQ8CboiIE7LPJ0j6TLZ+l3JqZ2bt1kkBXdLapBi0G7A08A7wCjALWBZYBAhJdwG/BM6NiHca5dm2qw9JQyV9DVgCuBVYAxgOXNOVJiJmATcBn8xWbUC675FP8zTwcC5NdzbJ75MZ12QfM+sgXTdFiyztJul04EFgQ+C47O8iEbFcRKwSEYsBK5J6E+4l9UQ8KGmjRvn2+01RSR8DbiP9+rwG7BARD0jqCq7TanaZBqyc/fdwYC7pcqQ2zfAGhx1eJ9+6+0jaF9gXYLWlGuRsZgNCh3W5rAJsFBH31ksQEdOAy4HLJR0I7Ad8HKjbTdyOUS6PAusB7wN2As6RNCq3PWrSq5t1tYqkaSnfiDgDOANg5IpqlreZDQCdcsMzIrZvMf0c4GfN0vV7/SPizYh4LCImRsThpMuJg4GpWZLaVvMKzGtdTyX9CA9rkKY7U5vka2YdrquFXmSpqoHwgzYEWBiYTAq8W3ZtkLQI8GlSHzvAXcBbNWlWAT6US9Od2/L7ZLZsso+ZdZhOHbYoaSFJG0vaUdJukj4vaUSr+fRrl4ukE4GrgKeBJUmjV0YB20ZESDqVNPLlEeAfwJGkfvbfAUTEK5LOBn4s6QXmDVu8H7gud5zxwJ3ZFQDAacBNkg4H/gTsAHwG2LRPK2xm/abD+tCRNJQUi74JbA4sRKpGl5D0LPB74MyIeKxZnv3dhz4cOD/7+wopEG8TEV3DDU8ijcH8BbAMqfN/q9wYdEjdM2+TxpUvCowH9qgZg74W6UcDgIi4NRtRczxwLPA4sLPHoJtVRyc9+i9pJ+BEYFXSiLsjgXuAF5k3bHENYCNge+C7ksYCR2Y3S7vPN8L3+5oZuaJi4t7tLoW1YvETmqexgeUNuCsiRvZ0/w9K8ZuCaT/V5FjZ1fyOwLrAHOB24PCImNTT8tXk/xypATs2Il4ukH4j0kOZd0fE8fXSeS4XM6uEkrtcRpEe5vl7lvVxwHWSPhwRL5WQ/5oRMbto4qw3YcfsqdK6HNDNrDLKuuEZEZ/Pf86eSH8F+BTw5xLyLxzMa/ab02i7A7qZVUIf3xRdkvR78a++O0TvOaCbWWW00EIfJmli7vMZ2cOE9ZxGembmth4VrIakp1pIHhGxepGEDuhmVglDSOP+Cppe9AaspJNJQ5w3rTejaw+sArxKGuHyRkl5OqCbWXWU/dCQpFOArwGfaWUa2wLOJY1B35o0pfe5EXFDbzMdiA9NmZm1rOxH/yWdRnr48bMR8UiZZY2I0aTncb5NmgP9WklPSjpe0jo9zdcB3cwqo6yALukXwF6k9yX8S9LwbFmirLJGxKyIOD8bUbMa6YHKLwEPS7o9e/ioJQ7oZlYJotS5XA4gjWwZDzyfW/673FInEfFcRJwEjAT+N/u7W6v5uA/dzCqhzEf/I0LNU5VH0sbAHsBXSVOaXAyc3mo+DuhmVhkdNjnXmsDXs2Ut4BbS4/1/qJm/qjAHdDOrhE6abVHSLaRXYz5GGvFyfkRM6W2+DuhmVhkddFPwk6Rx6NOArYCtpLq9PBERmxfJ1AHdzCqhk1rowE00f21myxzQzawSOimgR8SovsjXAd3MKqGTXnDRVxzQzawyOqUPXVJLRY2Id4qkc0A3s0ropC4X0ms0i/ahBwVjtQO6mVVGBwX04/BNUTOzBop2ZBTqwOg7ETGmL/J1QDezahDFJ0Tv0QvgBr5OuYdgZtZYybNz9SVJT0n6eM26b0hatjf5DoCqmZmVpMwJ0fvWKsDCXR8kDQXOBEb0JlN3uZhZNXTYMJdu9HqGRwd0M6uOQd7n4IBuZtXQeS307oYt9mooowO6mVVD5z37f4ak2nnPz5b0Ws06z7ZoZoNQ57TQu5tt8cbeZuqAbmbV0DVssQN4tkUzs2Y6p4XeJzrk98zMrImum6IdMA5d0oo93G94o+11W+iSzu3JAYGjIuLJHu5rZtZzndNEfUzSWcCvIuKRRgklLQrsABwKXAIcXy9toy6XrwNTgTktFHI14FTAAd3M+ldnjXLZDDgJeFDS/cDNwH3Ai6SYuwywJvAJ4LOk6cROAk5ulGmzPvTtI+LOIqWTtADwZpG0Zmal66Bx6BFxF/A5SesD+wDbAf9Zk2w2cAepZX5BRNQOcZxPo4A+HnilhTLOzfZ5tYV9zMzK0UEBvUtE3A3sDyBpBWAlYBFgBjAlIt5qJb+6AT0itmyxYAG0tI+ZWak6pw99PhHxAvBCb/KoW31JGxTNRNLpvSmEmVmvddAolzxJcyV9os62DSTNLZpXo9+zcZI+VqAwZ5NdMpiZtU3XTdEiy8DSaJbFobQwv0ujgP40cJ2kdbstQXIesBdwdNEDmpn1mQ5qoUsaks2DDjAk+5xfFge2AaYXzbNRQN+CNIRmvKS1agoyFLgQ2A04NCJOaKkmZmZl66w3Fh0DvEUaGRjA37LP+eVVUmP54qL5NropOkPSZ0mTyIyXtFlEPCVpwewAXwK+ExE/71mVzMxKNkBa3wVMyP6KFLTPBp6pSTMHeAi4smimDcehR8QLkj5HmgXseknbAD8DtgL2i4gzix7IzKxPlTxsUdJmwH8DG5CGE+4VEWPLyDsibiSbXVFSAGdGxHO9zbfpxUdEPAt8hvRVPUjqihntYG5mA065XS5LAJOA7wCzSi7puyLi2Ih4Lus3/6ikzbP+85Y1msvlGzWrfgccDlwNLFi7PSJ+05MCmJmVQsBC5WUXEX8B/gIgaWx5Oc9P0reBY4BhpD71DYG7JV0GXB8RPyuST6Mul7PqrN8mW/ICcEA3s/bpoPnQ8yTtA5xGiqHXAH/Ibb4Z2InU1d1Uo4C+Rk8LaGbW7zrw0f/Md4GfRsRhuWGMXR4B/qdoRo1GuXjGRDPrLMVb6MMkTcx9PiMizii/QIWsAYyrs+114H1FM/Ibi4pYeQM4fmLzdDZgvH59o4fvbCDSbb3NgFZa6NMjYmQvj1iW6cCIOtvWBZ4tmlGjuVyul/TBohlld2ivl/SBovuYmZWqg54UzfkzcLSkNXPrQtIw4GDgsqIZNWqhjwKWbKFQ6sE+ZmblKPkFF5KWANbOPg4BVpO0HvBSRDxV3pE4kvQSi0mk+c+DdBP0g6TZF48rmlGzLpfLJLXyxqLCk8iYmZWq/JuiI4Ebcp+PzZZzgNFlHSR7Kn8kcBDweeBxUmw+HTglIgq/Y6JRQD+nh+UrPJGMmVmpShy2GBETaDwTYpnHmgn8IFt6rNEol716k7GZWb/q3GGLpfEoFzOrjg55sEjS9S0kj4j4XJGEDuhmVg0lP/rfx4bw3nuO6wLDgSnANOD9pKGMzwOPFs3UAd3MqqGDHv2PiFFd/y1pe9Kj/xtHxJ259RsBF2XbCumQ6puZFdCZ49B/AByVD+YAEXEHMAY4vmhGbqGbWTV07k3RD5DeDtedF5g3Fr6pRk+KPiHp4y0WzMysfTrkFXQ1JgP71dm2H6lfvZBGLfQRwMKFi2Rm1k6d20I/FrhA0iTgEubdFP0P0tOiuxXNyF0uZlYNJT/6318i4kJJ00mB/XBSLd4C/g58PiLGF82rWUD3o/xm1hk6t4VORFwHXCdpCOmtRdMj4p1W82kW0I/NfjkKlCf2bPXgZmalGnj94y3JgvgLPd2/WUBfDygyOZdb8mbWXh3UQpd0MPCriJjdwj7rAytExF/rpWkW0LevHRtpZjZgdUhAB/YADstePv37iLivu0SSlgG2A3YHNqXJLI++KWpm1dBBT4oC65OC9CHAoZJeBR4gjUefAywDrAmslX2+CPhwRExplKkDuplVQweNcomIAM4Fzs0e8d8a2IgUxBcBZgA3AycAl0fEy0XydUA3s+ronC6Xd2WP+N9RRl6N5kPvnIsXM7MOuinaV9xCN7Pq6LBmqKQVgG2ADwPLZqtfAh4Cro6IloYwOqCbWTV0UAs9e4DoBOBg0izubwD/ItXifcBiwJuSTgUOz/rcm3JAN7Nq6KCboqRH/A8iBfXzakevSFqdNArmCGBmlq4pB3Qzq4YOaqED+5Ba3qd2tzEingSOl/Q68B0c0M1s0OmcPvT3A/cUSHd3lraQzqm+mVkjXS30znhj0cPA1wqk2wV4pGimbqGbWXUMjGBdxHHAJZLWBc4HHiTdFA3SaJePkOZB35w0L3ohDuhmVg0d9Oh/RFwmaVvgf4GzmH+CQwH3AV+MiKuL5uuAbmbV0FmjXIiIccA4SauQWuTLkmrxEvBgRDzdap4O6GZWHZ3T5fKuiHgGeKaMvDrkAsXMrInOuilaiKRhkjYrmt4B3cyqY0jBpXNsDtxQNLG7XMysGjrrwaI+4YBuZtXRIQFd0m8KJl29lXwd0M2sGjprlMto4BXgtSbpFm0l087qTTIzq6drHHqJfeiSDpA0WdJsSXdJ+nRJpX0KuDgiVm20AN9qJVMHdDOrjhJHuUjaGTgN+CHw78CtwNWSViuhpBOBkQXSFZo2t4sDuplVQ/nDFr8LjI2IMyPi4Yg4EHge2L+E0l4BTC+Q7iHSNAGFOKCbWXWU1OUiaSFgA+Camk3XAJ/sbTEj4tyI2KpAuocj4tii+fqmqJlVwxDSu3+KGSZpYu7zGRFxRn47qS0/rWa/acAWPS1iX3NAN7PqKN7nMD0ietKHrW7WDRjucjGzaii3D306MBcYXrN+BeZvtfeKpHckza2zvC1phqRrJTXtonFAN7PqKKkPPSLeBO4CtqzZtCVptEuZfgA8DbwIjAV+BJyTfX4GOA9YnjTCZrtGGbnLxcyqofxH/08GzpN0J/A30pjwlYD/K/UoMBuYDGwTEbO7VkpaFLiaFNjXB64Cvg9cWS8jt9DNrDpKHLYYERcBBwFHAvcCmwJfyF7gXKZvAafkg3l2/FnAKcC3IuId0osw/q1RRm6hm1k19MGj/xHxS+CX5eY6nxWoX/KFgOWy/55OqmVdbqGbWTV07nzoE4ExklbMr5S0EnBMth3SRF3PNcrILXQzq47ObKJ+BxgPTJZ0G/ACqdW+CfAG8PUs3drA7xpl5IBuZtXQofOhR8TdktYGDgE2Aj5GmmLgp8DJETEjS3d0s7z69fdM0hRJ0c1yVbZdksZIek7SLEkTJH2kJo+FJf1c0nRJr0u6InvJarNj7yTpIUlzsr879FU9zaxNOvSNRRExIyK+HxGfi4gPZ3+P6ArmRfV31TYEVswt65OeuvpDtv1Q0q/UgVnaF4BrJS2Zy+NUYCdgF+DTwFLAlZLq/jZL2gS4CLgAWC/7e7GkjUqql5m1W9ej/0WWAUjSspK2lbS7pG0kLdtqHv3a5RIRL+Y/S9obeJUUXEUaInRiRFyabd+TFNR3BX4taWlgb2CviLg2S7M78CRpfoVxdQ59EHBDRJyQfT5B0mey9buUVT8za7MB2PouQtLxpMbswrnVcyT9JCKOKppP26qfBfC9gfMj4g1gDdJjtu/ObpaNw7yJebObbUAa3pNP8zTwMI1nQNuE+WdNG9dkHzPrNBpabBlAJB1EemDofOAzwIeyv+cD35f0X0XzaudN0S1JQfys7HPXnAndzW62ci7NXOafR3ga88+5kDe8Tr5195G0L7AvwGqrlTGfvZn1LVE8pM3ty4K06lvAaRFxcG7do8CNkl4DDgB+ViSjdl6g7AP8PSLurVnfk9nNiqRpKd+IOCMiRkbEyOWXX75J1mbWfl0BvcgyoIwgPdbfnauy7YW0JaBLWgH4MnBmbvXU7G+j2c2mkgYmDWuQpjtTm+RrZh2vYwP6DOCjdbZ9JNteSLta6KOBOcCFuXWTSYH33dnNJC1CGsnSNbvZXcBbNWlWIfU5NZoB7Tb6Z9Y0M2sbAYsUXAaUPwE/yEa3LAggaQFJu5BeP3dp0Yz6/acquxn6TeDCiJjZtT4iQtKpwBGSHgH+QZoU5zWyp6Mi4hVJZwM/lvQC6ZfrZOB+4LrcMcYDd0bE4dmq04CbJB1O+vJ2IN102LQv62pm/amVPvQB5XDg46Qpc38j6SVgWVJvxC2kG6aFtKP2o4APMO9x1ryTgEWBXwDLAHcAW+UDP3Aw8DZpXPmipEdm94iI/F2OtUjzCwMQEbdK+hpwPHAs8Diwc0TcUVKdzKztOjOgR8RMSZsB2wKbkWLfS8CNwNURUfgNSf1e+4i4gTozhmUFH5Mt9fafTXrw6MAGaUZ0s+4S4JKWCmtmHWZgDUksKot9V9JgrvMiOu/nzMysW53TQpf0DsXfTRoRUahinVF7M7OmOiegk252lv6y6Y6pvZlZY0MYgCNYuhURY/oiXwd0M6uQwR3SBnftzaxCOqrLpU8M7tqbWYU4oA/u2ptZhTigD+7am1mFdD36P3g5oJtZRbiFPrhrb2YV4oA+uGtvZhXigD64a29mFeKAPrhrb2YV4oA+uGtvZhUiYOF2F6KtHNDNrCLcQh/ctTezCnFAH9y1N7OKGdwhrV0viTYzK1lXC73IUvKRpX0l3SDpZUkhaUTpBynAAd3MKqJ9AR1YDLiGBq/P7A+D+/rEzCqkfS+4iIhTASSNbEsBMg7oZlYhgzukDe7am1mFeJSL+9DNrCJa6kMfJmlibtl3vtyk47MbnI2WUf1Tt2IG98+ZmVVISy306RHRrL/7VOD8JmmeKnrA/uCAbmYVUe4LLiJiOjC9tAz7gQO6mVVE+/rQJQ0HhgPrZKs+LOl9wFMR8VJ/lcN96GZWEQKGFlxK9y3gHuCC7PNV2ecv9cXB6nEL3cwqon0t9IgYQ5sfKgIHdDOrDA9bHNy1N7OKGdwhbXDX3swqpH2P/g8UDuhmVhHuchnctTezCnFAH9y1N7OKGdwhbXDX3swqxC30wV17M6sQB/TBXXszqxCPcnFAN7MKGdwhbXDX3swqxF0ug7v2ZlYhDuiDu/ZmViFdsy0OXg7oZlYR5b7gohM5oJtZRbjLZXDX3swqxAF9cNfezCrEAX1w197MKsQBfXDX3swqZnCHNEVEu8sw4El6EXiy3eXoI8OA6e0uhBVW5fO1ekQs39OdJf2V9P0UMT0itu7psQYqB/RBTtLEiBjZ7nJYMT5f1siQdhfAzMzK4YBuZlYRDuh2RrsLYC3x+bK63IduZlYRbqGbmVWEA7qZWUU4oJuZVYQDekVJ2kzSFZKelRSSRhfY52OSbpQ0K9vvaEnqh+IOepLGZOcpv0xtso/Pl73H4H5OttqWACYB52ZLQ5KWAq4FbgI2BNYFxgKvAz/ts1Ja3qPAqNznufUS+nxZdxzQKyoi/gL8BUDS2AK77AYsBuwZEbOASZI+BHxX0snh4VD94e2IaNgqz/H5svm4y8W6bALcnAWHLuOAlYARbSnR4LNm1nUyWdKFktZskNbny+bjgG5dhgPTatZNy22zvnUHMBrYBtiH9J3fKmm5Oul9vmw+7nKxvNrLdNVZbyWLiKvznyXdDjwB7AmcXG+3ms8+X4OcW+jWZSrzt+xWyP7WtgStj0XEa8CDwAfqJPH5svk4oFuX24BPS8q/Nn1L4DlgSltKNIhl5+GDwPN1kvh82Xwc0CtK0hKS1pO0Huk8r5Z9Xi3b/r+Sxud2+R3wBjBW0kcl7Qh8D/CIiX4g6SeSNpe0hqSNgEuAxYFzsu0+X9aUA3p1jQTuyZZFgWOz/z4u274isFZX4oh4hdTCWwmYCPyCNJ65Xv+tlWsV4Pekseh/BOYAG0dE15uyfL6sKc+2aGZWEW6hm5lVhAO6mVlFOKCbmVWEA7qZWUU4oJuZVYQDuplZRTigm5lVhAO69TtJm0j6g6TnJL0paYakayXtKWloHx1zbO5NQBNy60fn1q/TzX6jctu3yK0/Mrf+mb4os1mrHNCtX0k6CPgbsCxwGLAF8A3gH8CvgO368PBTSfOIH9DNtpnA7t2s3yPbVuu3WV5/Ka10Zr3k6XOt30jajPRo+ukR8V81my+XdDJp/pK+Micibq+z7Y/A1yUd3TUXiqRFgZ2AS0lzlb8rIp4FnpX0Yh+W16wlbqFbf/oe8BJwaHcbI+LxiLi/f4v0rvOA1YFNc+t2AIaSArrZgOeAbv0i6xsfBVwTEbPbXJzuPEl64XK+22UP4E/Aa20pkVmLHNCtvwwjzfr4ZLOEbXQu8BVJi0hakdS/f26by2RWmAO6DViSFpf0qqQLatavJekWSf+QdI+kkSUd8mJgYeCLwG6km6jjG+5hNoA4oFt/mQHMIvVTF7UzcB+wraRlcuv/DxgbEeuQ+uMvkKTuMmhFRMwELiN1u+wBXBAR7/Q2X7P+4oBu/SIi3gYmAFtKWrjgbnuTXtowjtRiRtLywMZkb/KJiGuztBuUVNRzgW2Bj+HuFuswDujWn04ElgN+3N3G7PVr/5b994dIb+i5EjibFNwBVgOei4i3crs+ma0vw7XAH4D/i4gHS8rTrF94HLr1m4i4SdJ3gZOzgD0WeApYBvgc8E1gV+B+UgA/LyLelnQdsKykeq3wXne35Mo4F9ilrPzM+pNb6NavIuJU0ljvl4GfANeTAvuHgP2AP0takNSPvaukKcATwNKkIP8UsFKWpsvq2XqzQc0tdOt3EXErcGu97ZJ2AJ6KiA1z69YA7gYOAe4kPbl5pqQtSS30u4ocW9ICqQgxNyvLWNIPSqPyTqDmKiC7CTu0dr1ZO7mFbgPR3qQnN98VEZOBScBXgG8Be0n6B6k/frco9rbz1YG3KGco4hFZXnuUkJdZKVTs34FZZ5M0gvRwE8DMiHi0l/mtCKycfXyzjVMWmL3LAd3MrCLc5WJmVhEO6GZmFeGAbmZWEQ7oZmYV4YBuZlYRDuhmZhXhgG5mVhEO6GZmFfH/VFy6f5EeYqkAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYEAAAElCAYAAAAC1F7cAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAs1ElEQVR4nO3dd9gcVd3G8e8NSAchFEMRAoiABZAiYKGoKE2l2Onwglho8loAldAUxRdQQCWCxlCsIChFShCQbkCQllBDD5BEIEAIIfzeP85ZGDbbs8+zeWbvz3XN9ezOnDlzdjaZ3845Z85RRGBmZv1pnl4XwMzMesdBwMysjzkImJn1MQcBM7M+5iBgZtbHHATMzPqYg0AbJO0hKQrLK5IekPQDSQvOBWXbq5dlGOokTaz6fovLQYNcloMk7Vhj/UhJg96vu9Zx83kZWXi/vaRvDHbZbM7M1+sCDFGfBR4DFgN2AA7Nr/fvYZn2IH2fv+5hGcrgUmBkjfUTB7cYHARcC5xXtf504O+DXJZ6NiH9P6jYHvgYcEJPSmMdcRDozG0RcX9+fbmk1YG9JR0YEa/1smDdJGmBiJjR63IMsskRcWOvC1FPRDzGmy+8PTM3nydrnauDuuNWYCFg6coKSQtL+pGkh3K10UOSDpc0TyHNgpJOlHSnpBckTZL0N0lrVh9A0iqSzsxpZkh6UNJP87argM2ADxaqL64q7Pt+SVfkY7woaayk91flP1rSY5I2kXS9pOnAjxt9aEk7SLou5/u8pJslfaqw/euSbpA0VdKzkm6UtG1VHvNJOjpXq70sabKkayV9qCrdPpJuL6Q5Q9KwRuUbTEoOljQhf99PSjpF0uJV6ULSsfnfwmOSpku6RtK6hTQTgZWBnQvf5+i8rV61zDGSDpH0cP6OL5K0bF7+KOk5SY9K+nbVvstIOk3SvZJeymnOkbRCC5/59eqgXL7dgRUKZZ4oaXg+HwfW2H9kPuaSLZ1kGxC+E+iOEcBzwBRIFzZStcK7gKOBO4CNge8Bw4BD8n4LkKqRjgGezNu+Ctwoac2ImJTzWwW4GXgJOAK4D3g78PGcz1eBs4B5gS/ndc/nfdcGrgbuJlUZBfAd4GpJG0fE7YXP8Vbg98BPgMOA6fU+sKT9gZ8B55P+878ArJfPRfG8nE6qSpkP+CRwoaRtIuKSnObbwMHA4cBtwOLABvlcVI51XD5nPwO+CayQz9l7JH0gImbVK2cHlL+/N4mIV5vsdyypWvBU4G+88d2vI2mzqjvE3YBHgK+T/g0cBYyVtHpETCVVMV4M3M4bVVPPNDn+rsCdpH8LbwNOAsaQ/n1dAowiVWMeJ+mOiLg47zcMeDmX/RlgedK5vi7/G3y5yXErjgaWATYEKj8EZkTEJEnnk/5d/rSSWNK8wN7AHyPivy0ewwZCRHhpceGNi+gapIvaksBewKvA1wvpds3pNq3a/3DgFWDZOvnPCywMTAMOLqwfQ7rILt+gbFcB19ZY/2fgWWCJwrrFganAeYV1o3OZP93CeVg8l/G8ZmkL+8yTz9llwAWF9Rc2yocUSGYB369a/8Fc3u27+P1OzHnWWjZosF/lQjq6av0ued9PFdYFMBlYpOozzgSOrirLWTWONTL9t33TugDuBeYrrDshr/9uYd18wNPAbxp8lnlJPzAC2KGF446s+jf0WI08N89pP1xY96m8buNufX9eOltcHdSZ8aT/tFOBM4DTIuKUwvatgIeB63N1x3z51+VlwFtIdwUASPqcpJskPUsKJi8Ci5ICTcXHgQsj4okOyrpp3vfZyoqIeB74K6kKqehV0kW5mQ/kMo5qlEjS+pIulPRUznsmsCVv/mz/ArbJVSQfkjR/VTZbkgLI2VXn8ibS3c6mDY4/T3EfFariGriE9Gu2erm7wT4bk37Rn1W1/vekz119ni+OiBcrbyJiInAjqaG1U5fHm+9Wxue/lxaO8ypwP+ki/zpJX8lVbS/k8j6SNxW/p45FxFWk8/flwuovA/8Jtyv0nINAZ3YgXRi2Aa4Avippt8L2ZUl1ujOrlpvz9qUAJH0S+ANwD/AlYKOc7zNAscvpUnTeGDiMVNVUbRLpTqbo6WitamWp/LdumSS9HRibj78/KXBsSOrZUvxsPyBVcX0K+CcwRdJvJFXaV5bNf+9n9vO5eKEstfy6Kn0rPaemRsS4GstLDfapVF296Tzni+6UwvaKp2rk8RSpmqtT1VUqrzRY//r5z9V6Pyf9O94ReD9v/EjpZrfnXwCfkbSUpJVJP5R+2cX8rUNuE+jMnZF7B0m6EvgPcLykc/MvvCnAQ8Dn6uw/Mf/9AnB/ROxR2SDpLcx+0ZhM5xeIqcDwGuuH521FrfY/n5z/rkCqh65lK1Ibw+ci9WgBUoP5mw4YMRP4EfAjScOB7UhVGQsDnye3s5DuhmrVHU+psa5iJFC8Q5tcJ92cqpzH4cBdlZX5jmUpZi/j22rk8Tbg8QEpXWNfAMZGRKWdqtIG1W1jgB+SqlSXJLU3nT0Ax7E2OQjMoYiYIembwAWkRrnjSb92dwJeiIjxDXZfmHT7XbQrqV626DJgR0nLRUStX/UAM0iNgNWuBraVtFhETAOQtBipkfaqBmVr5HpSG8W+FKobqlQu9jMrKyS9k1SXX/MOIlJD+OmStgHek1dfDrwGrBQRl7dTyFzNMrGdfTp0I+n8f4F091PxedL/saur0m8jaZFKlZCkEaRf38cV0swg9TgbaAuTOxEU7NlhXnXLHBHPSzqbVA20KHBOrpa0HnMQ6IKI+KukfwH/K+kU0i+cPUk9Pv6P1MtjfmA1UrXH9rl64e/A9pJOJNXFrw8cQGrILToC2JbUxvADUtXICsBWEbFLTnM3qVrq88ADwLSImEDqtbFdLsuPSL/2v036z39Uh593mqRDgZMlnZs/7zRgXeDliDiZVL3wKjAmn4PlgCNJ9c3FbrIX5PNzK+mX/vtIdxGn5WM9kMt9iqQ1SBfUl0n12lsCp0fEPzr5HHUsLWnjGusn5aAym4iYKukE4FBJL5J69qxF6sF0LXBR1S7TgcskHU9qSziSdCE+sZDmbuDDkrYjVd1Nrnf8OfR34NuSDiNVV34E+EyHed0NDJP0FWAc6d/CHYXtP+eNdgFXBc0tet0yPZQW3ugd9I4a2z6etx2c3y9Iqo4YT/qFNJXUCDqS3IuDdDE8BniC1P3zatJFcCKz9zRZDfgdqUpjBvAgcGJh+3DSxWdaLsdVhW0bkS7KL5AanscC76/KfzQ1enY0OR+fITXQTiddxG4Ctits/1z+/C+Tqkm+kI8zsZDmENIv6Sk5nwn5HL2l6li75nQv5s9xD6mqZ8Uufr8Tqd876JQm+4rU1XUCqd79SVJ30cWr0gWpO+lhpDuil0ltIetWpVszr38p7zM6rx9J7V46x7Tyb5WqXmSkX+6/ILVDTSP9GFmF2Xv+1DtuMc0i+d/of/O2iTXO0wTgX73+v+zljUX5izGzQZAf9Do2Ir7b67IMtlwdOB7YJyLO6HV5LHF1kJkNKEkrAu8gVXs9CZzT2xJZkbuImtlA+x/gSlIPqC9FRN0n0W3wuTrIzKyP+U7AzKyPOQiYmfUxNwy3YOkFFCMW7XUprB3/rn4W2uZ6r6VnIZbpdP/5pJYrt1+DSyNiq06PVSYOAi0YsSiM+0SvS2HtWOR3vS6BteulNOhix4L0oEIrphXm/uh3DgJmVgpi9vFWrDkHATMrBZHGabf2OAiYWWn4TqB9DgJmVgrC3R074SBgZqXhO4H2OQiYWSm4YbgzDgJmVgpuGO6Mg4CZlYbbBNrnIGBmpeDqoM44CJhZKTgIdMZBwMxKw9VB7XMQMLNS8J1AZxwEzKwU3DuoMw4CZlYa/XAnIGljYCtgY2B5YCFgMjABuBo4PyL+22p+rkIzs1KoDBvRyjIUSdpd0h3A9cBBwMLAfcBNwH+BjYDTgccljZa0Siv5+k7AzEqjrHcCkm4HlgXGALsBt0WNCeIlvRXYDtgZuEvSnhHxh0Z5OwiYWSmUvGH4N8AvI+LlRoki4jngbOBsSesAw5tl7CBgZqVQ5obhiDipg31uB25vls5BwMxKo8R3AgPGQcDMSqFf5hOQtACwCbV7B10TEQ+2k5+DgJmVRpnvBCS9g9QraGfgrcBrwHPAdGAYsCAQkm4Bfg6MiYjXmuXbD4HTzPpAL7uIStpX0j8kPSspJI1oYZ89ctrqZcEaaU8B7gI2BI7KfxeMiKUiYsWIWBhYDtgRuA04gdQ7aKNm5fCdgJmVQo97By0MXAZcAJzYxn4vAasVV9TpAbQisFFE3FYvo4h4Kh//Akn7A18G1iE9R1CXg4CZlUIvewdVeu9I2qD9XWNSC4m2bzPTGcDPWknr6iAzK415W1zmIgtJeljSY5IulPS+wS6A7wTMrBTarA5aWtK4wvtRETGq22VqYgKwF6kv/2LAgcB1ktaJiPuKCSWt2k7G7fQQchAws9Joo2pjckQ0rLqRdAxweJN8toiIq1o/7Bsi4gbghsLxric16u4PHFCV/H5gtmEiGmg5HjoImFkpDEDD8EnAWU3SPNKtg0XErHx3snqdJNOAc4F/0l5AaMhBwMxKo5uNnBExmfQQ1qCQJGBtag/1sBewK7A7sDkpOI2JiAfm9LhuGDazUhAwf4tL148tDZe0LvDOvOpdktaVNKyQZqykHxbeHyHpE5JWzfueQQoCv6zOPyJGR8RHgRE53WeBeyVdl59RWKLTsjsImFkp9Hg+gf2Af5NG8AS4KL//VCHNaqQHuiqWAEYB95CeMVgB2DQibq53kIh4LCJ+EBHvIg0dcStwLPCkpO93UnBXB5lZafSq+2dEjARGNkkzour9wcDBc3DMmyXNIl3H9yE9GNY2BwEzK4WSzyfwOklvJ40ftCuwJqmH0deAhpPH1OMgYGalUdb6bUmLAZ8hXfg3AybSpcZhBwEzK4UyTyoDTAJmAueRBpB7vZuopNliXyujh1Y4CJhZKZS8OmihvOyel0aCNq7tDgJmVgolDwJHDlTGDgJmVhplbROICAcBM7NGSn4nMGAcBMysNMoaBCR9pJ30EXFlq2kdBMysFEreO+gKUoOvGqSpbA88iqiZ9ZvKsBEltcVAZewgYGalUdbqoIi4eqDyLnHgNLN+UmkYHmLTS7YkjzQ6ILVdDgJmVho9HEV0oN0HvD7/sJIxklae04yH6PkwM3uzMt8JMHuD8DzALsBSc5qx2wTMrBRK3jtowDgImFkp+GGxzgxqdZCkr0n6j6Tn83KDpG0L2yVppKQnJE2XdJWkd1flsYCkkyVNlvSipL9KWrGFY+8k6W5JM/LfHQbiM5pZ75S4TQBgHknz5FFD561eV1zayrT75WzoMeDbwHrABsCVwPmS1s7bvwUcAuwPbAg8DVyex9KuOAnYCfgi8GFgceBCSXV/BEjahDThwtnAuvnvnyRt1K0PZma9VfI2AYDrSMNJzwSm53U3FdZVllfayXRQq4Mi4oKqVYdL+gqwiaQ7gIOA4yLiXABJu5MCwZeA0yS9Fdgb2DMiLs9pdgUeBj4GXFrn0AcB/4iIY/P7YyVtkdd/sTufzsx6bQhf4Jsp3wBy+Zf7Z4FFgeuBVYDhpAmXAYiI6ZKuAT4AnAasT2r7KaZ5VNI9OU29ILAJcHLVukuBr3flw5hZz5W5YbhUo4hKei9pTswFgReAHSLiDkkfyEmeqtrlKWCF/Ho4MAuYXCPN8AaHHV4n37r7SNoX2BdgpYUb5GxmcwU3DHemF20kE0j18hsDvwB+K+k9he1RlV411lVrJU1b+UbEqIjYICI2WGbBJjmb2VyhrA3DnXRkkbScpI2bpRv08xERr0TE/RExLiIOBW4DDibNoQmz/zpfljd+xU8iBfulG6SpZVKTfM1siCt5w/Cpkm6XtJ+kYY0SSvqwpFHA/cDajdLC3PGcwDzAAsBDpIv1lsC/ACQtSOoB9M2c9hZS6/eWwDk5zYrAWqR2hXpuyPscX1i3ZZN9zGyIGYq/8lv0DuB/SZPMn5zbQW8HngFmAEsCq5J6Xb4VuAbYMiKaXuMGNQhIOg64CHgUWIzU62dzYNuICEknkXoMjQfuBb5Lajc4ByAinpN0BnC8pKeBKcAJwH9I421XjjMWuDnfaQD8FLhG0qHAX4AdSEOzfmhAP7CZDZoytwlExEvAUZJ+COwIfIJUpb48qX11CjCedK37Q0SMbzXvwb4TGA6clf8+R7p4bx0RlV49PwYWAk4lRbabgI9HxLRCHgcDr5L6/S8EjAV2i4hZhTSrkQINABFxvaQvAMeQulo9AHw+Im7q+ic0s54oc++gioiYSbr2/aFbeSqiWXuqbbCUYtwnel0Ka8civ+t1CaxdL8EtEbFBp/uvKcWvW0z7wTk8VpmUuArNzPpJrxqGJQ3LQ9mMz8PdPCrpF5KajvDZyXA2klZqsKxYNcJCUw4CZlYaPeoiujzpWaZvAe8lDfG8KdDwfnQOhrOZSOpIU2t5GHhW0n2S9mml8HND7yAzsznWq4bhiLiT1Fhbcb+kb5LGNFs8Ip6vs+tBdDaczX7AYcCzwLm88eDrTqSeQT8nBaFfSpoZEaMbld9BwMxKYy6q2lic1HXzpQZpOh3O5p3AuIj4TNX6oySdCwyPiO0knQkcCIxulNlcdM7MzDo3DzB/i8tAkrQEcDTwq4h4tUHStoezyXYBTq+z7XRg5/z6T8AaTfJyEDCz8mijTWBpSeMKy77VeUk6RlI0WTav2mcR4G/A46Q2gmY6GSZnMWCZOtuWIQ3KCfA8aay1hlwdZGal0GabwOQWuoieRHquqZFHXj++tChwcX67XUS83GTfToezuRr4gaS7I+KWwvE3AI4F/pFXrV4sXz0OAmZWGt1sGI6Iycw+YnFNuVvmJaRYtFVEvNDCbp0OZ/M10ggJN0t6hDTnyrLASqQeQvvndIuSHrxtyEHAzEpB9KZ+OweAy0iNwdsDi+RqIYCpEfFKTteV4Wwi4iFJawJ7AhsBywF3AjcCo/NTxUTEia2U30HAzEqhh8NGrE8axwfSmGdFWwBX5dddG84mX+hH5WWOOAiYWWn06DmBq0gxqFm6ETXW/Rn4c/dL1ToHATMrhTKPIirpQdIsjLdLeojGPYgiIlZrNW8HATMrjRL3eb+a1OWz8rprI386CJhZKZT5TiAi9iy83qObeTsImFkplDkIDKQS3z2ZWT+p9A5qZRnqJL1P0nmSJkt6VdJ6ef0PJG3VTl4OAmZWGj0aSnpQSfoQ6UGzNUlT7xY/0mukUUZbNtTPh5kZ0LtJZXrgONJoo+8GvlG17VZgvXYyc5uAmZVGCS7wrVgP2DEiQlJ1L6HJ1B9criYHATMrj1brNl4b0FIMtJeBhetsWw54rp3MXB1kZuUg5o4JBQbetcBBkoo3PpU7gr2BK9vJzHcCZlYOvRpBbvB9D7gOuJ005EQAu0s6gTSO0YbtZNYfp8zM+kMftAxHxO2kOYSfAg4nhb/KlJSbRcSEdvLznYCZlUMfPS0WEbcCH5W0IDAMeDYiGs1nXJeDgJmVR5/VbeTZy56YkzwcBMysHProTqCbHATMrBx6OKvMUOYgYGbl4TuBtjkImFk59E8X0a5yEDCz8vCdQNscBMysHErcMCxpt3bSR8SYVtPWDQKSWs6kyvci4uEO9zUz61x5q4NGt5E2gDkPAsAuwCRgRhsHXwk4CXAQMLPBVe7eQasMVMbNqoO2j4ibW8lI0nzAK3NeJDOzDpS4Omgga1caBYGxtDck6ay8z/NzVCIzs06UOAgMpLpBICK2bCejiAigrX3MzLqqvG0CbyLpE6RpJNcAFqzeHhGrtppX3VMmaf02CnRKq2nNzAZEn8wvKWkb4GLSxDJrAuOBR4C3k6bLubqd/BrFzUslvbeFAp0BfKWdg5qZdV2lYbiVZWj7HnAqsE1+/92I2Jw05/C8wCXtZNYoCDwKXCFpjVoblZwJ7Al8v52DmpkNiD64EyD9+v8b6Vd/kKv1I+JeYCQpSLSsURD4GPAMMFbSasUNeVqz3wM7A9+KiGPbOaiZWddVho1oZenmYaVhkk6WNF7SdEmPSvqFpKWa7LeHpKixzFbHX+U14NXcDvsMqWt+xRPAajX3qqPu6YiIKcBHgBdIgWClXPC3AOcCnwUOjIiftHNAM7MB05s7geWBFYBvAe8lPWO1KfC7FvZ9iTQ5/OtLniOgkQnAiPx6HGm+4eUkLQMcAkxsp/ANnxOIiKclfZTU0HClpK2BnwEfB74cEb9q52BmZgOmR11EI+JOYMfCqvslfRO4UNLiEdGo23xExKQ2D3k2sFZ+fQRwBfBYfj8L+FI7mTUdOygiHpe0BXANcBfpVO8REWe2cyAzswE393QRXZw02kKzKR8XkvQwKXzdRhp259+NdoiIUwuvb8kdeLYGFgKuiIi72yloo7GD9qpadQ5wKKnl+S3V2yPi1+0c2MysqwTM33LqpSWNK7wfFRGjulIMaQngaOBXEfFqg6QTgL2A24HFgAOB6yStExH3tXq8iHgM6LhWptGdwOl11m+dlzeVA3AQMLPeaW8+gckRsUHD7KRjgMOb5LNFRFxV2GcRUs+dx0ltBHVFxA3ADYV9ryfdDewPHNDkuEgSqR2h1sNiDzbbv6JREBiwAYvMzLqu+20CJwFnNUnzyOuHlxYlPcQFsF0LDbxvEhGz8t3J6o3S5V5HpwI7UP8a3vKZaDRshEcCNbOhpYttAhExGZjcSlpJi5GqygVsFREvtHu8/Mt+bVL1UCNnAFsAp5CeFp6jgTs9qUwrVlkfzhnXPJ3NNV6cR70ugrVJZ89pBvSkd1AOAJeRGoO3BxbJ1UIAUyPilZxuLHBzRBya3x8B3Ajcl/c9gBQEmo3AsAWpe/7obpS/0dhBV0pas9WMJM2T92l4K2NmNmB685zA+sDGwLuAe4EnC8sHCulWI9XhVywBjALuIQWRFYBNWxi+fyrwVDcKDo3vBDYntVi3Sh3sY2bWHT2aVCY3DDe99YyIEVXvDwYO7uCQJwP7Sfp7fmp4jjSrDjpfUjszi81xgczMOtIn8wlExAmSlgfulnQF8N/Zk8QRrebXKAj8tpMC0mJDiplZ1809D4sNmDyU9NeABUjzCVQL0pPELWnUO2jPtktnZtYrfXInAJwA/IsUCMZHxMw5ycy9g8ysPPrgToA0augBEXFHNzJzEDCzcmhv2Iih7N+kkUu7oj/ippmVX4/mE+iBA4D/lfTBbmTmOwEzK4/+aBM4n/Rw2TWSXgSerdoeEbFyq5k5CJhZOfRPw/BYutgdv9FQ0g8CO0REs3EszMzmDkO/qqepiNijm/k1uhMYQeqHamY29+ufO4GucnWQmZVDj4aNGAySdgMuiogp+XVDETGm1bybBQEPA2FmQ0O57wRGkwapm5JfNxJA14LAkZJaGQYiImL3Vg9qZjYgytsmsAppVNLK665pFgTWJU2W3IzvGMyst0p8J1Cc5KvbE341CwLbtzC2tZnZ3KGkQWAglffmycz6S588MSxpfklHSBov6SVJs6qWV9vJz72DzKwcStw7qMrxpBFELwHOo7Uq+7ocBMysPPqjOugzwBERcWw3Mms0n8AQv2kys75S4obhKosCN3QrM1/ozaw8+qBNAPgbsGm3MnN1kJmVQ//cCZwMjJH0GnAxMLU6QUQ82GpmDgJmVg790zBcqQoaSf25hFsOhw4CZlYO/XMnsBeDMZS0mdmQM/Tr+5uKiNHdzM9BwMzKoX/uBLrKQcDMyqOkQUDSr4GjI+Kh/LqRiIi9W83bQcDMyqEybEQ5bQH8NL/+CI3bBNpqL3AQMLNyKHHvoIhYpfB6RDfzLm/cNLP+M2+LS5dJ+pWkByRNl/SMpAskrdXCfjtJulvSjPx3h+6XrjHfCZhZOfS2YXgcaTavR4FhpD78V0gaEREza+0gaRPgD6S+/ucBOwJ/kvTBiLip2QElvR14O7Bg9baIuLLVgjsImFl59KhuIyJOK7ydKOm7wO3AqsCEOrsdBPyjMBDcsZK2yOu/WO9YklYFzgbeX1lVKUZ+HfhhMTPrO3NJF1FJiwB7Ao8AExsk3YQ0BETRpcDXmxzidGAlUrAYD7zSSTkrHATMrDxaDwJLSxpXeD8qIkbNyaElfRX4MbAI6df/RyOi0Vj/w4GnqtY9ldc3siGwR0Sc22lZi9wwbGblUOkd1MoCkyNig8IyWwCQdIykaLJsXtjlbOB9wGbAvaT6/YWblLq6O6dqrKv2GHP467/IdwJmVg7df07gJOCsJmkeqbyIiOeA54D7JN0I/BfYCTizzr6TmP1X/7LMfndQ7QfAtyVdGREvNknblIOAmZVHF9sEImIyMLnD3ZWXBRqkuQHYkjRdZMWWwPVNynWmpDVJDdCVYFOVJHZvtaAOAmZWDj1qGJb0DtIv/iuAZ4AVge+Q5v69sJBuLHBzRByaV/0UuEbSocBfgB1ITwZ/qMnx9gAOBWYB6zF71ZCfGDazPtWbVs4ZwObAIcASpOqca4BNImJSId1qpOcIAIiI6yV9ATgGOBJ4APh8C88IHEkKGntHxLNzWngHATMrh3mA+Qf/sBHxKLB1C+lG1Fj3Z+DPbR5yKeDn3QgA4N5BZlYm/THH8LVA0yEpWuU7ATMrh7nkYbFBcCDwR0n/Bf7O7A3DRMRrrWbmIGBm5TH0f+W34p78d0yd7UEb13YHATMrh/65EzgKzzFsZlZDHwSBiBjZzfwcBMysHEo8qcxA6o8aNDMrv0p1UA8mlRlokg6WNNu8AU32WU/SVs3SOQiYWXmUt4vobqRhIo6TtE69RJKWlLSrpMtIXUkXb5axq4PMrBzK3TC8HrAr6ankb0l6HriDNEzFDGBJ0gQ2q+X3fwDeFRETm2U8qDFR0sQ6w7FelLdL0khJT+S5Oq+S9O6qPBaQdLKkyZJelPRXSSu2cOyez+VpZgOspHcCkYyJiHVIk9GcCEwjXfjfBywG/BPYC1g+IvZsJQDA4N8JbMibY/VywC3AH/P7b5Ei3R6kSRm+D1wuaY2ImJbTnAR8mjT92hTgBOBCSetHxKxaB53TuTzNbAjo0bARgy1fs7p23RrUmBgRz0TEpMoCbAM8T7ogizRd2nERcW5E3AnsTopwXwKQ9FZgb+CbEXF5RNxKukVaG/hYg0MfRJ7LMyLuyXN6XpXXm1lZlPROYCD1rE0gX/T3Bs6KiJfy5MnDgcsqaSJiuqRrgA8ApwHrkzqBFdM8KumenObSOofrdC5PMxtK1GqjQM1Kg7mepGVJg9W9CxiWV08F7gYuiYin282zlw3DWwKrkCZNhjdm2Kk15+YKhTSzmH2ih2bzcrY9l6ekfYF9AVZaaaUGWZvZ3EG0fkkbWkFA0jzAscDBpEqvl0hjBok0fPXCwCuSTgIOjYiWnyju5Y3RPsC/IuK2qvWdzLnZSpq28o2IUZX5R5dZZpkmWZtZ71WCQCvLkHMoqfr6WGDViFg0It4eEStGxKKkH9THkAaXO6ydjHsSBPItzaeBXxVWVyZfaDTn5iRSw/LSDdLU0ulcnmY2ZJQ6COxD+oV/dK1ePxHxcEQcQwoA+7STca/uBPYg9WX9fWHdQ6SL9ZaVFfkJuQ/zxpybtwAzq9KsSBpbu9G8nJW5PIuazuVpZkOJgAVbXIactwH/biHdrTltywY9JOYG4f8Bfl/o9klERK7POlzSeOBe4LvAC8A5Oc1zks4Ajpf0NG90Ef0PaX7PyjG6MpenmQ0l7bQJDDn3AF8Arm6S7ovA+HYy7sUZ2xxYHdilxrYfAwsBp5KegLsJ+HgxWJAaRl4l9ftfCBgL7Fb1jEC35vI0syGj1EHgKODPktYAzgLuIjUMB6mX0LuBnYHNgM+0k/Ggn7GI+Afp26q1LYCReam3/8vA/nmpl2ZEjXWdzOVpZkNKOceNiIjzJW0L/JDUo7JWR5fbgU9GxCXt5F3asGlm/abUdwJExKXApbkd9N2kOwCRnhO4K09437bynjEz6zPlDgIVEfEY8Fi38iv/GTOzPjEPQ7TnT9dIWpo0eug1re7jIGBmJdL3l7TNSANyttw40vdnzMzKoj+qg7rNZ8zMSqK8QUDSr1tMunK7eZfzjJlZHypvECCNsvAc6eHZRhZqN2OPrG1mJdG7YSMk/UrSA3lGxGckXSBprSb77FFnpsVaBXwE+FMeNK7uAuzXbtlLGzbNrN/09E5gHDCGNFLBMNIDr1dIGhERMxvs9xJphIPX5Qdia+W/QQvlaHkI6QoHATMrid4FgYg4rfB2oqTvkp7gXZU0VW6DXWNSg+0Vf6X2UDvV7iYNMdEyVweZWUnMHUNJS1oE2JNUhTOxSfKFJD0s6TFJF0p6X61EeZL5jzc7dp4+98h2yusgYGYl0dsgIOmrkl4gNd5uDXw0ImY02GUCsBdpbpUvAi8D10lafUAKWIeDgJmVRFtBYGlJ4wrLvrPlJh1Tp+G2uGxe2OVs4H2kB7buBf4kaeF6pY2IGyLitxFxW0T8E/g8aYTjuoNjDgS3CZhZSQhYoNXEkyOiWUPrSaRhmxt5pPIiIp4jdeO8T9KNpKGedwLObKVAETFL0jjSUPt1SXqN+g3AkctwK3B8RFzW7LgOAmZWEt1tGI6IycDkOShMW1EpT7i1NqlBuZGjgd1JfV0vIk2TOxzYhlSldD5p3pZLJH06Ii5slJmDgJmVRG96B0l6B+kX/xXAM8CKwHdIU+heWEj3phkPJR0B3AjcBywOHEAKAl9pcsiXSdPxbl3sTippIeCSXIb1SAHisGIZanGbgJmVSE8ahmeQf3kD95NmPZwGbFLV/XM1YLnC+yWAUaSpIy8DVgA2jYibmxxvP+DE6ucJImI6cCKwX0S8Rpp8Zu1mhfedgJmVRG/uBPJkLlu3kG5E1fuDSdPltmtZ4C11ts0PLJVfT6bOLI5FvhMws5KYO54TGATjgJGSincVSFoeOCJvhzSY3BPNMhvyZ8PMLOmbSWUOBMYCD0m6AXiadHewCWkYisqTxe8AzmmWmYOAmZVI+S9pEXFrbow+BNgIeC/wJPB/wAkRMSWn+34r+ZX/jJlZnyj1UNJvki/0h3Ujr/44Y2bWB/onCABIGkaqAhpGagS+KSKmtptP/5wxMyu5/gkCko4hVQcVH0abIeknEfG9dvLqjzNmZn2gMqlMuUk6iFQVdAZpWItJpCeGdwEOk/RMRPys1fwcBMysJPrmTmA/4Kf5OYOKCcDVeRTTrwItBwE/J2BmJSFg3haXIW0EaUiIWi7K21vmIGBmJdE3D4tNAd5TZ9u78/aWDfmzYWaW9E110F+AoyVNAX4fETMlzQd8ljS15G/byawvzpiZ9Yu+uKQdCqxDutj/WtJUUjfReYFrafP5gb44Y2bWD/pj2IiImCZpU2Bb4MOkADAVuBq4JCLqTThTk4OAmZVE31QHkS/0F9JkroBW9McZM7M+0D9BoJt8xsysRMp5SWsyr3C1iIiWT0Q5z5iZ9aFS3wkcRetBoC2lPWNm1m/KGwQiYuRA5V3OM2Zmfag/egd1m4OAmZWIL2nt8hkzs5Iob3XQQPIZM7OScBDohM+YmZVEZRRRa4eDgJmVRH9MKtNtDgJmVhKuDuqEz5iZlYSDQCd8xsysJBwEOuEzZmYl4SDQCZ8xMysRX9LapTbnH+hLkp4BHu51OQbI0sDkXhfCWlbm72vliFim050l/Z10floxOSK26vRYZeIg0OckjYuIDXpdDmuNvy/rtnl6XQAzM+sdBwEzsz7mIGCjel0Aa4u/L+sqtwmYmfUx3wmYmfUxBwEzsz7mIGBm1sccBEpK0qaS/irpcUkhaY8W9nmvpKslTc/7fV+SBqG4fU/SyPw9FZdJTfbx92VzzM9Yl9eiwJ3AmLw0JGlx4HLgGmBDYA1gNPAi8H8DVkormgBsXng/q15Cf1/WLQ4CJRURFwMXA0ga3cIuOwMLA7tHxHTgTklrAd+QdEK4G9lgeDUiGv76L/D3ZV3h6iCr2AT4Z76gVFwKLA+M6EmJ+s+quVrnIUm/l7Rqg7T+vqwrHASsYjjwVNW6pwrbbGDdBOwBbA3sQzrn10taqk56f1/WFa4OsqLqKgTVWW9dFhGXFN9LuhF4ENgdOKHeblXv/X1Z23wnYBWTmP0X5LL5b/UvThtgEfECcBewep0k/r6sKxwErOIG4MOSFiys2xJ4ApjYkxL1sfw9rAk8WSeJvy/rCgeBkpK0qKR1Ja1L+p5Xyu9Xytt/KGlsYZdzgJeA0ZLeI2lH4DuAe5oMAkk/kbSZpFUkbQT8GVgE+G3e7u/LBoSDQHltAPw7LwsBR+bXR+XtywGrVRJHxHOkX5LLA+OAU0n9zevVR1t3rQj8jvSswHnADGDjiKjMaOfvywaERxE1M+tjvhMwM+tjDgJmZn3MQcDMrI85CJiZ9TEHATOzPuYgYGbWxxwEzMz6mIOADTpJm0j6o6QnJL0iaYqkyyXtLmneATrm6MKMXVcV1u9RWP/OGvttXtj+scL67xbWPzYQZTYbDA4CNqgkHQRcBwwDvg18DNgLuBf4BbDdAB5+Emkc/q/W2DYN2LXG+t3ytmq/yXld3LXSmfWAh5K2QSNpU9KwBqdExAFVmy+QdAJpvJyBMiMibqyz7TxgF0nfr4y9I2khYCfgXNJY/6+LiMeBxyU9M4DlNRtwvhOwwfQdYCrwrVobI+KBiPjP4BbpdWcCKwMfKqzbAZiXFATMSslBwAZFruvfHLgsIl7ucXFqeZg0aXuxSmg34C/ACz0pkdkgcBCwwbI0aTTTh5sl7KExwGclLShpOVJ7xZgel8lsQDkI2FxL0iKSnpd0dtX61SRdK+leSf+WtEGXDvknYAHgk8DOpIbksQ33MBviHARssEwBppPq3Vv1eeB2YFtJSxbW/xIYHRHvJLUvnC1JtTJoR0RMA84nVQntBpwdEa/Nab5mczMHARsUEfEqcBWwpaQFWtxtb9JEKZeSfpkjaRlgY/KMWxFxeU67fpeKOgbYFngvrgqyPuAgYIPpOGAp4PhaG/PUimvn12uRZtK6EDiDFBAAVgKeiIiZhV0fzuu74XLgj8AvI+KuLuVpNtfycwI2aCLiGknfAE7IF/nRwCPAksBHgf8BvgT8h3TRPzMiXpV0BTBMUr1f+3NcFVQo4yzgi93Kz2xu5zsBG1QRcRKpL/6zwE+AK0nBYC3gy8DfJL2FVC//JUkTgQeBt5ICwyPA8jlNxcp5vZm1yXcCNugi4nrg+nrbJe0APBIRGxbWrQLcChwC3Ex6gvdXkrYk3Qnc0sqxJc2XihCzcllGk4JQo/JeRdXdRm6Inrd6vdlQ4zsBmxvtTXqC93UR8RBwJ/BZYD9gT0n3ktoXdq4M9dDEysBMutPt8/Cc125dyMusZ9Ta/x2zoU3SCNIDawDTImLCHOa3HLBCfvtKD4e7MJsjDgJmZn3M1UFmZn3MQcDMrI85CJiZ9TEHATOzPuYgYGbWxxwEzMz6mIOAmVkfcxAwM+tj/w/eS6OxVwOUtgAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXUAAAElCAYAAAAbc3I/AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8/fFQqAAAACXBIWXMAAAsTAAALEwEAmpwYAAAud0lEQVR4nO3dd9gcVd3/8fcnIFVQIGICCAFExPagIEUl4KPhoapIERVILCA/GyIKUoQoRZSqgo9E1NCLioggAqKAUgKhI+0REnqAJHRCS76/P87ZMNlsmb2zd9nJ53Vdc933njlz5uzs7nfPnjlzRhGBmZlVw7DBroCZmXWPg7qZWYU4qJuZVYiDuplZhTiom5lViIO6mVmFDNmgLmmcpCgsr0i6T9IRkpYYAnX74mDWoddJmppf1zObrL8ir/9XF/c5UdLUwuNReR/j6vIdIOlBSa9JuiWnhaTxXazLVEkT2+QZVfcZqF/W7VZ9ysj1GS9pjQbr2j6ffqrTPPstxI1RhbTxkv57oOs2WBYd7AqUsCPwMLAMsB2wf/7/G4NYp3GkY/ebQaxDFTwHfErSMhHxXC1R0mrA6Ly+Pz0GbAzcV9j3BsDhwFHA+YU6bEx6Hw6GHwEXNEi/d4DrMQo4BPgXcH/duu2AZwe4Po1cRHqtHiukHUJ6Tf8+KDUaYL0Q1G+JiP/k/y+TtBbwJUl7RcScwaxYN0laPCJeHux6DLDLgI8D2wMTC+m7AlOBh4BF+mvn+XhfV5e8Tv77y4i4v5C3Pt9Aun+Q999WRNw82HUAiIgngScHux6Dach2v7RwE7AkMLyWIGkpST+WNCV300yRdKCkYYU8S0g6TtIdkp6XNE3SnyW9s34HklaXdFrO87Kk+yX9NK+7AtgU+HDhZ/AVhW03kPS3vI8XJF2eW3/F8idKeljSxpKukTQL+EmrJy1pO0lX53KflXS9pE8U1n9d0rWSZkp6WtJ1krauK2NRSYfmbqyXJE2X9C9JH6nLt7ukWwt5fi1p+Vb166NZwB9IQbxoV+A0YL7LnSWNlHRqrtfLkm6TtEuDfB+TdFN+DvdJ+kqDPPN0v+TXcWJefV+xy6VR94uk/5J0gaSnJM3Kr88mDfazV+4meEnS5EZ5+kuZ41Xoshgt6fz8Hpsh6URJS+Y8mwH/yJtcVnjvb5bXN+sG+ZCkcyU9J+lxSfvn9VtIujl/Rm6QtF5dnTaX9BdJj0l6MX9u95HU8ktedd0vkmrvoQMLdR4v6Tv5eLylbnvlz/tZHRzmIaUXWur1RgHPADMgBSrgEuBdwKHA7cBGwPeB5YF98naLk7ptDiP9NFse+CpwnaR3RsS0XN7qwPXAi6Sfbf8HvA3YPJfzVeB0UguyFiiezdu+D7gSuJPURRPA94ArJW0UEbcWnsebgLOBo4EDSAGuIUnfAH5G6g4YCzwPfCAfi+JxOZnUwl0U2Ba4UNJWEXFxzrMfsDdwIHALsCywfj4WtX0dmY/Zz4DvAivnY/YeSR+KiNnN6tlHpwKXS1olIh6WtBHwDlJQ37SYUdLSpOO7HOmYPQTsApwmaamImJDzrQP8BZgM7Ex67ccDbwRa1f+rubz9gU+T3icNu1wkfQD4J3AzsDvp/bIn8Ld8nG7M+b4EHE/6sjgHeDtwFum9WNaw/D4vinavRdnjVXA6cC7wC2AD4GBgadJ7+Sbga8CJwDeBG/I2d7ap+ymk13gCqSv1CElvBrYidYk8T2rQnC9pzYh4JW+3BnA58HPgJdL7dDzwFtJnqqyNgWtJx/+knPYw6fU6FPgC8zaoNgdWB3r3nFlEDMmF14Pi2qQgtRzpQL8GfL2Qb9ecb3Td9gcCrwArNil/EWApUp/p3oX0U0lvtJVa1O0K4F8N0n8PPA28uZC2LDATOK+QNjHX+ZMljsOyuY7ntctb2GZYPmaXAn8qpF/YqhzSF8Ns4OC69A/n+n6qi6/vVFIQUf7/ezn9F8DVjY4z8PVcj83qyvob8ASwSH58BjAdWLqQ5235/TC17vkGMK6Q9uWcNqpuHwGMLzy+HLgLWKzuPXUXcH7hdXgI+GtdWZ/J5U1sc4xq9Wu0PF/iGJc9XuNyvl82+AzNBt6RH2+W8328yes5sfC4VubBhbRF835fBVYvpH8i5920yfNQ3vZA4ClgWIn9jiqkBXBYg3InAv8BVEg7D7i7W+/zwVh6ofvlbtKbYCbwa+CkiDihsH4L4AHgmty9sGhu1VwKvIHUagdA0k6SJkl6mvTl8AKp9bZ2obzNgQsj4tE+1HV03vbpWkJEPEs6ybVpXd7XSEG2nQ/lOta3quYhaT1JF0p6PJf9KjCGeZ/bDcBWkg6X9BFJi9UVM4YUiM6oO5aTSL9GRrfY/7DiNip0fbUS6ZN0OrBrrs9nSF+sjYwGHomIK+rSTye14N6VH28M/CUiXijs5yHg6jJ1aid3SWwK/A6YUzhOIgXM2nFaJS/n1hXxB9JrVNZhwAfrlrldOJIWqTv2yqvKHq+a+nqeTXo/bEDf1X4lEhGvkYLovRExpZDn7vz3bbWE3G10kqQHSF/Gr5KOw5uBFRegPkW/ANYEPlbbJ+kX7kmtNhrqeiGob0d6E29F+sB8VdJuhfUrAquRXvTicn1evwKApG1JP3/vAj4HbJjLfRIoDpFcgb6Pclieec+610wj/dIoeiLKdWWskP82rZOkt5FajsuTRgV9iPTc/sq8z+0IUpfSJ0hdBzMk/VZS7fxE7cPyH+Y/nssW6tLIb+rydzIy6FRSgDmE9HP/nCb5Wh3f2nqAkcDjDfI1SuuL5Umt8u8z/3H6OrBc/lIb2Wi/ObjN6GB/D0TE5LqleGLyvro6jC3Us8zxqqk/PrXHK3dQ13pP1T1+pUka5PdqPnYXANuQAvl/k97PhxfzLaiIuJ7URbdnTvoy6cv2lG6UP1h6oU/9jsijXyT9HbgNOErSH3JLbAYwBdipyfZT89+dgf9ExLjaCklvYP439nT6/iaeCYxokD4irysqO+fx9Px3ZeCOJnm2IPXR7xQRc4O/pKXm2WHEq8CPgR9LGkH60BxL6ob6DK8Hms2Z/4MHrQPReKD4C2p6k3zziYh7JU0i9ZWeV/ylU2cm8/7yqKkd81r9HgPe2iBfo7S+eBqYQ+pfbvirIiLmSKoF1Hn2m1v1rb4gO7Ut6bxBTa0VXPZ41bwV+HfdY4BHFrSCHVqT1Ie+a0ScXkvMDbNu+1/gJEkrk4L67yKi/rPaU3ohqM8VES9L+i7wJ9JJraNIrdHtSX2Md7fYfCnm/8m7K/MPmbsU+LSkkRHRqJUD8DKNT3RdCWytwrhrScuQPnRXtKhbK9eQ+vj3IJ0QbqQWvF+tJUh6B6kvvGELP9KJ4ZMlbQW8JydfRgpWq0bEZZ1UMiKm8voXaF/8BNiNeb8Y6l0J7CjpwxFR7Er5HKmv9q78+FpSN9PStS6Y/Gvmw0BfutXmEREvSPon8F/ATdF8aO3DpD71nZj3l8v2dPGzFxG3N1lV9njV7MS8Y7l3Jr0far96a0Nul1ywGrfV6P38BuDzfSzvFZrX+SzSYIUzgVWBX/ZxH0NGTwV1gIi4QNINwHcknUA6KfYF0giKY4BbgcVI3/afIJ3ce5EU/D8l6ThSX/Z6pLP4T9ft4hBga1If/RGkroiVgS0iojYU7E5SN9BnSD99n4uIe0hn07fJdfkxqTW+H+lN+sM+Pt/n8jCwn0v6Q36+zwHrAi9FxM9J3VKvAafmYzAS+AHwIIUuNkl/ysfnJlJL/P2kVv5JeV/35XqfIGltUlB4idTXOQY4OSJqw9q6KiLOI52kamUisBdwnqQDSUHz87luXyl0Zx1GGmlxqaSjSO+HH9C97heAbwNXAZdI+jXp18Fw0qikRSLie7m1/gPSl+dvSX3UbyeNrunkQp018qigeve2aVVOpNzxqtkqH69LSf3ohwCnRkTtIqd7Se+zL0qaSQry90ThwrEuuYt0nuxwSbNJwX3vBSjvTlJj66+k9/2jtXNmETFLaSjm3sDtEXHNAtV8KBjsM7XNFl4/i/32Bus2z+v2zo+XIP38v5v0RptJOik4Hlg05xlG+rA/ShrOdCUpqE2lbhQC6QvhLFIXwsukq+eOK6wfQRoy91yuxxWFdRuSguzzpBOxlwMb1JU/EXi4w+OxA+mE5SxSQJgEbFNYv1N+/i+RfkLvnPcztZBnH9LFNjNyOffkY/SGun3tmvO9kJ/HXaQW9CpdfH2nAqe3yXMFdaOMSF9YpxVem9uAXRps+3HScMPa6/eVBsdjFH0c/ZLT1iEF6ifyfh4m9QVvVZdvL1KQeonUh/uRRu+7Bs+hVr9myw4ljnPb48Xrn7XRpF/Bz5M+QycCS9bl/Uo+nq9RGFlT/3xo8vlt8prWnueXC2nrkq5cfTEf1x82em1a7LeY58PAjfn4N3odN87pX+vW+3swF+UnZWYLKaWLr34LrBWvX7290JB0OOmLd6VIo9V6Ws91v5iZdYOk95NOJO8FTKhCQAcHdTNbeP2RNMLnEtL5g0pw94uZWYX0wsVHZmZWkoO6mVmFuE+9hOFLKEYtPdi1sI6svl77PDak3HjjjdMj4i3tcza2qFS6M3kOXBIRW/R1X0OZg3oJo5aGyVsOdi2sI6dPHuwaWIfy5F19FqSJg8p4rnA/hqpxUDezShD9eJusHuKgbmaVINJc2ws7B3Uzqwy31B3UzawihIfzgYO6mVWIW+oO6mZWET5Rmjiom1kl+ERp4qBuZpXhPnUHdTOrCHe/JA7qZlYJDuqJg7qZVYa7XxzUzawi3FJPHNTNrBI8+iVxUDezynBL3UHdzCrC0wQkDupmVhluqTuom1lF+ERp4l8rZlYJtROlZZZS5UkjJZ0i6UlJL0m6U9KmLfKPkhQNlgG9bZ5b6mZWGd1qqUt6M3A18C9ga+BJYA3giRKbbwHcWng8s0vVKsVB3cwqocsnSvcFHouI3QppU0puOyMipnWvKp1x94uZVcYiJZcSPgVMknSOpCck3SLp65JUYtvz8jZXS9qh82exYBzUzawSai31MgswXNLkwrJHXXFrAF8F7gf+B/gpcCTwtRZVeB74DrATsBVwOXCOpF268PRKc/eLmVVCh6NfpkfE+i3WDwMmR8T++fHNktYiBfUTGm0QEdOBYwpJkyUNJ3XlnF6+agvGLXUzq4Quj355DLizLu0uYNUOqzUJWKvDbRaIW+pmVhldHKd+NbB2Xdo7gAc6LGdd0hfEgHFQN7NK6PLFR8cB10g6EDgHeD/wTeCAufuTfgRsEBEfy4/HAq8CNwNzgG1J3TX7da9a7Tmom1lldKs/OSJukPQp4Ajg+8CD+e8vCtlGAmvWbXoQsBowG7gX+GJEDFh/Ojiom1lFdHuagIi4CLioxfpxdY9PAU7pYhX6xEHdzCrDIz8c1M2sIgQsNtiVGAIc1M2sEjyfeuKgbmaV4al3HdTNrCI8n3rioG5mleHuFwd1M6uI2jQBCzsHdTOrBHe/JA7qZlYJDuqJg7qZVYb71B3Uzawi3FJPHNTNrDIc1B3UzawiPPolcVA3s0rwNAGJg7qZVUYVul8kjQBWApYEpgNTIuKVsts7qJtZJfTyiVJJ6wNfBrYA3la3+hVJNwBnAWdExLOtynJQN7PK6LXulxzMjwZGA7cDfybdDu9JYBawPLA6sCFwJHCkpJ8Ax0TES43KdFA3s0ro0Zb6lcCvgP8XEXe1yihpCeCTwL6k769DG+VzUDezSujR0S9rRsS0Mhlzy/wc4BxJb22Wz0HdzCqhF1vqZQN6g+0eb7ZuQLugJH1N0m2Sns3LtZK2LqyXpPGSHpU0S9IVkt5dV8bikn4uabqkFyRdIGmVEvveXtKdkl7Of7frj+doZoNnWMmlygb6+T0M7Ad8AFgf+DtwvqT35fX7AvsA3wA+CDwBXCZpmUIZxwPbA58FNgGWBS6U1PRLWtLGpJ8tZwDr5r+/k7Rht56YmQ2uWku9zDLUSFpM0s6SJkq6Ozd6X5H0WG7c/kDSu0qVFRH9Xd/WFZBmAvsDE4BHgRMi4vC8bklSYP9ORJwk6U2ks8JfiIgzcp63AQ8AW0bEJU32cQ6wfESMKaT9DXgyIj7bro7rr6CYvOWCPEsbcKcP7vvaOifpxohYv6/brynFj0vm3REWaF/dImkp4LvA14HlgLtoPPrlg3n9v4ADIuLqZmUOWp96blnvCLwRuIZU8RHApbU8ETFL0lXAh4CTgPVI50KKeR6SdFfO0zCoAxsDP69Lu4R0IM2sAnr0ROl9wDTgYODciJjRLKOkDwO7AJdI2iciTmqUb8CDuqT3AtcCSwDPA9tFxO2SPpSz1J8AeBxYOf8/AphNusqqPs+IFrsd0aTcpttI2gPYA2DVpVqUbGZDQi+eKCUNZTy/TMbcOr9a0nhgVLN8g9FSv4fUr/1mUt/4KZI2K6yv/92sBmn1yuTpqNyImEDqEmL9FeTf8mY9oNdOgpYN6HXbPM78jdS5BvwYRMQrEfGfiJgcEfsDtwB7k36CwPyt5xV5/QlMI30ZD2+Rp5Fpbco1sx7XyydKu2kojFMfBiwOTCEF3zHADTD3CqpNSCcSAG4EXs15zsx5VgHWIfXLN3Nt3uaoQtqYNtuYWY/pZitV0kjSpflbAcsA95O6S65ssc17gROADYCZpHOBh0aTESmSfthBlSIiDmmXaUCDuqQjgYuAh0gH6XPAZsDWERGSjgcOlHQ3cC9wEKnf/UyAiHhG0q+BoyQ9AcwAjgVuA/5W2M/lwPX5lwDAT4GrJO0P/BHYDvgo8JF+fcJmNmC62acu6c3A1aTRJluTRqOsQRqN12ybZYHLgKtIo1XWBiYCLwDHNNnsIFI3sEpUK4ChFdRJXSCn57/PkIJxcSjiT0jTTZ5IGr4zCdg8Ip4rlLE38Bpp3PmSwOXAbhExu5BnTdIXBwARcY2knYHDgB+Qzjh/JiImdf0Zmtmg6PLol32BxyJit0LalDbbfB5YChgbEbOAOyStA3xb0rFNWusvAHOA3wOntvoVUNagj1PvBR6n3oM8Tr3nLOg49XdK8ZuSeT/cZpy6pDuBv5JG3n2UdA3NycCJLbpSTgVWiIjiVfIfBK4H1oiI+b4U8jj17YHdSL0WDwOnkQL8f0o+nXn02sliM7OGunyidA3gq6R+9P8hdeEeCXytxTbNhk7X1s0nIl6MiNPyhZGrk/rgPw3cm6dR+X+SlitX5cRB3cwqo4O5X4ZLmlxY9mhQ1E0RsX9E3BwRvwV+RuugDo2HTjdKn3/DiIcj4siIeA+pT/6WvM+T221bNBRGv5iZLbAOT5ROb9PV8xhwZ13aXcBeLbZpNnQaOhg+LWkDYFdgB9Jov5vLbgtuqZtZhXRxlsarSaNXit5BmmeqmWuBTfJQ7JoxpP74qa12Jmk1SbWRf9eRLtA8ABgZEYeVq3LioG5mlTAMWKzkUsJxwEY50L5d0o7AN0kj8wCQ9KM8fLrmTOBFYKKk90j6NPA9oNnIFyR9WdKVpBF5Y0n3IV0zIjaJiF9FxDNln3+Nu1/MrDK61UqNiBskfQo4Avg+8GD++4tCtpGk4dO1bZ6RNIYU+CcDT5HGpx/bYlcTgGeBU0lj4gE+KumjTerVdoCPg7qZVUK3J/SKiItIF0s2Wz+uQdrtpJtId2JZYFxeWlYJcFA3s4VHD87rsnq3C3RQN7NKEL13kjAiWp147RMHdTOrhB69SUbXOaibWWX0WvdLnlqgrIiIse0yOaibWSX06J2PRlPiatOsVD4HdTOrjB7sUx/V7TId1M2sEnq0pd51vfbFZmbWUC/ezk7SDyWtVJe2QHHZQd3MKqE2+qXMMoQcCKxSeyBpEeBVSR/oa4HufjGzyujBVmqj29iVubVdUw7qZlYJ7lNPHNTNrDIc1B3UzaxKyva/zOnXWnRqW0nvyf8PI41H/4SkdeszepZGM1t4iNKTpfNSf1akYwc2SDu4QZpnaTSzhUgvzujlWRrNzFrosU51z9JoZtaMh78AvfhjxcysmS7eeXogSPqTpPd3kH8JSd+WtGezPG6pm1k19GZL/UHgOkm3AGeQ7lN6W0S8VsuQpxHYANgW+DTwCPDFZgU6qJtZNfTgXTIi4huSjge+BYwH3gSEpGeBl4HlSM9KwPU532kR0XRQpoO6mVVH77XUiYj7gG9I2gfYGNgQWAlYApgB3A1cVfakqoO6mVVDbw5pnCsiXgGuzEufOaibWXX0YEu92xzUzawaevBEqaRGV442ExFxaLtMTYN6hzdELfp+fwyoNzNrq/e6X8bXPQ4aT71buz9p34M6sAswjXQGtqxVgeMBB3UzG1i9Ofpl7teQpHcBFwATgLOBx4G3Ap8Fdge2KVNmu+6XT0XE9WUKkrQo8EqZvGZmXdeD3S91TgBOjoifFNIeBH6cb3F3IvCxdoW0+rFyOfBMBxWanbd5toNtzMy6oxdvUjqvDYHJTdbdAGxUppCmQT0ixkTEPWVrE8mYiPhP2W3MzLqqS9MESBovKeqWaS3yj2qQPyRt0UHtnwHGNFm3OSUb2a1OlK4XETeWKUTSCRHx9TJ5zcz6Rfe7X+4BNis8nl1imy2AWwuPZ3awv98A+0t6I/A7Xu9T3wnYAziiTCGt+tQvkfTRiLi9VQGSfg2MAxzUzWzwdP9E6WsR0bR13sSMPmxTczBplMu3gNqEXQJeIAX08WUKafVD5CHgb5LWbrRSyWnAF2h8lw4zs4HV3T71NSQ9ImmKpLMlrVFim/MkPSHpakk7dFL1iJgTEd8H3kb6hfBZYFNglYg4uNV8L0WtWuofJ12uermkTfP8BABIWgQ4E9gR2Dciju6k8mZmXdfZNAHDJRVPSk6IiAmFx5NIPRB3AysCBwHXSHp3RMxoUN7zwHeAq4HXgE8A50gaGxGnd/I0IuJp4J+dbFPUNKhHxAxJ/w1cRQrsoyPiQUlvIPX3fALYKyJ+3tedm5l1VflW+PSIWL/Zyoi4uPhY0nXA/cBY4NgG+acDxxSSJksaDuwLlA7qeejiBqRrfpZosJ+2F4W2HKceEU9I+hipxf53SVsCPyOdif1KRPyqbGXNzPpVP45Tj4jnJf0bWKuDzSaRuqdLyRcfnQ+sSfOrShcsqANExCOSPkpqsf8772xcRJxWtrJmZgOin6YJkLQE8E7gHx1sti7wWAf5f0GKyTsBt9PZ1fxztRrSWH9njTOB/YGLgTfUr4+I3/SlAmZmXSFgsS4VJR0N/Jl0ReeKwPeBpYFT8vofARtExMfy47HAq8DNwBzSXYq+BuzXwW4/QGown7cgdW/VUj+5SfqWeSkK0hhLM7PB0d351FcBzgKGA08C1wEbFSYrHEnqJik6CFiNNJ79XuCLHZ4knU4XplppFdRXX9DCzcwGTBf71CNi5zbrx9U9PoXcil8AxwFfk3RxRJS50KmhVqNfPNOimfWW3pt6t+gtwNrAnZIuY/6rUSMiDmlXiG+SUcLNM2HpMwa7FtaJF+Y0Gjxgldb7szQeVPi/0SibANoG9abfa5L+LumdZWsjaVjeppMhP2Zm3dPDszRGxLA2S6mat2qpbwYs00Gd1IdtzMy6owdvktEf2nW/nC+pk7GS0T6LmVk/6P3uFwAkbUOa82V5YAZwZURcVHb7VkG9r2dyp/dxOzOzBdPDJ0olLQNcCGxCmj9mBrACsI+kfwLbRMTz7cppNfql9OWtZmaDrvdb6keQLkDaFTg7ImbnyRN3Bv43r/9mu0J6+HvNzKxOl+58NEi2Bw6KiDNq49QjYnZEnEG6onX7MoV4SKOZVUMXpwkYJCsAdzZZd2de39bQ/c4yM+tEbZqA3m2pTwG2abJuq7y+LbfUzaw6ertP/STgmHyP0jNIMzyOIPWpfxn4dplCHNTNrBp6/ERpRBwn6S3A3qS7LkF6Vi8DR0bET8uU02rq3fuB7SLi1mZ5zMyGlKHbtVJKRBwg6ShgI9I49ZnAdRHxVNkyWrXURwGLL1ANzcwGSo+31GtyAL+4bcYmevx7zcwsq00TUGYZgiTtJ6nhPZ8l/UzSd8uU0y6o+7J/M+sNtZZ6j07oRbqf6W1N1t1CyfudtjtR+gNJZS77j4gYW2aHZmb9prf7HlYF/q/JuvtJd1Vqq11QX5dyNz91i97MBlfv96m/CKzcZN0qlLwRdbug/qmIuL6TWpmZDZreDur/BL4r6fcRMTeAS1oc2Cevb8vj1M2sGrp74+nBMB64BrhX0unAI6SW+y6kKQLGlSnEQd3MqqHHb5IREbdK+ihwNLAf6StqDvAvYPuy1ww5qJtZdfR29wu5u3u0pCWB5YCnImJWJ2W0mk+9t3/ImNnCpfdPlM6VA3lHwbzGgdvMqqPHZmmUtLekJTrc5gOStmi2fgg9PTOzBdCbFx/tBkyVdKSk/2qWSdJyknaVdCmpj33ZZnndp25m1dCbJ0prt6/bB9hX0rPA7cCTpHHpywFrAGvmx+cA74qIqc0KdFA3s2rowT71iAjgVOBUSRsCWwAbkgL5EqSbT/8TOBz4U0Q83a5MB3Uzq44e7lCOiEnApAUtp4cPgZlZQRf71CWNlxR1y7Q227xX0pWSZkl6RNLBkrRgT6pzbqmbWXV0t/vlHmCzwuPZzTJKWha4DLgK+CCwNjAReAE4pt2OJK0IbAm8i3RzDEg3yLgTuDginihbaQd1M6uG7k8T8FpEtGydF3weWAoYm8eY3yFpHeDbko7NfefzkTSM1F++N7AYaVKvp0jP5s25zFckHQ/s36ycIne/mFk1dP8mGWvkbpQpks6WtEaLvBsD/6y7+vMSYCXSXeSa2R/4FimwrxERb4yIt0XEKhHxRmB14DBgL+CAMpV2UDez6ijfpz5c0uTCskddSZNIE2htCewOjACukbRCkz2PAB6vS3u8sK6Z3Ukt8EMbDVOMiAci4jBSQN+9RTlzufvFzKqhsyGN0yNi/WYrI2Kee4RKuo50o4qxwLHNNmtQo0bpRW8Fbm5dVQBuynnbckvdzKqjn6YJiIjngX8DazXJMo35W+Qr5r/1Lfiiu4CdS1Ths8DdJfK5pW5mFdGPFx/l+VneCfyjSZZrgR9LWiIiXsppY4BHgaktiv4h8HtJawOnk744niK17pcH3k06CbspsEOZurqlbmbV0b1x6kdL2lTS6vlKz98DSwOn5PU/knR5YZMzSSNXJkp6j6RPA98Dmo58AYiI84GtSSNdTibdJOMuUqv8mpy2PLBtztuWW+pmVg3dnftlFeAsYDhpHpbrgI0i4oG8fiRpPhYAIuIZSWOAE4HJpNb2MTTvf58rIi4BLpG0Cqllvnx+NjOBf0fEQ51U3EHdzKqhi+PUI6JlP3dEjGuQdjswegH2+TDwcF+3r3H3i5lVR+9NvVuKpOGSSn1huKVuZtXQg7M0dmBT4FxKPEMHdTOrDvc9OKibWUUMI82e0kMk/aZk1tXKlumgbmbV0Xst9XHAM8DzbfItWbZAB3Uzq4be7FN/ELg0IurnnpmHpB1It7Jry0HdzKqj91rqk4Gmc9AUtJ1yt6b3DoGZWSNdvPPRALoAmF4i352kKQXackvdzKpjaAXstiLiVNKNp9vluwv4QZkyHdTNrBq6O01Az3JQN7Nq6M0TpV3nPnUzq45+mk99IEiaI2l2k+U1STMkXSZp81bluKVuZtXQ+y31Q0l3VloCuIh0c40RwFbAS8D5wGbAxZI+GREXNipkQL+zJE2VFA2Wi/J6SRov6VFJsyRdIenddWUsLunnkqZLekHSBXnKynb73l7SnZJezn+366/naWaDpIdb6qTAPQUYFRFfiogDIuKLpJtPTyVNAfwB4FJa3IR6oJ/eB0nzENeWD5DGX56b1+8L7AN8I+d9ArhM0jKFMo4Htifd3mkTYFngQklNv6MlbUwauH8GsG7++7s8+b2ZVUFtmoAyy9C0J3Bc4c5JAETELOA4YM+ImEO6ccb7mhUyoEE9Ip6MiGm1hfSz4llSgBXwLeDIiPhDRNxB+imyDPA5AElvAr4EfDciLouIm4BdSU/w4y12/S3gHxFxeETcFRGHA1fkdDOrit5uqa9I8/E7iwEr5P+n8/pNreczaE8vB/EvAadHxIuknxgjSD8tgLnfUFcBH8pJ65GedDHPQ6TbP9XyNLJxcZvskjbbmFmv0SLllqFpMjBe0shioqSVgEPyekiTez3arJDBPFE6hhTIT86Pa3firr/z9uPAyoU8s5n/CqzaCYVmRjQpt+k2kvYA9oAWX4lmNoSI8iFtdn9WpK/2Ai4Hpki6ltT9vCKpUfoisEvO93bSPVEbGsygvjtwQ0TcUpdeP8eBGqTVK5Ono3IjYgIwAWARqfS8C2Y2WDoJ6i/3Z0X6JCJukvR20nnFDYH3Ao+R73UaETNyvoNblTMoQV3SisAnga8VkqflvyOA4o1WV+T1VvY00qCl2s1gi3muarHLaczfKi+Wa2Y9r5OgPjTlwN10ZEsZg9WnPo70VXl2IW0KKfiOqSVIWoI0wuWanHQj8GpdnlWAdQp5Grm2uE02ps02ZtZTRBriXWYZuiQtL2lrSbtK2lLS8p1sP+Bfa/kE6ZeBsyPiuVp6RISk44EDJd0N3AscRJo8/syc5xlJvwaOkvQEMAM4FrgN+FthH5cD10fE/jnpp8BVkvYH/ghsB3wU+Eh/PlczG0i931KXdBip+2XxQvLLko6OiO+XKWMwjsBmwFq83ulf9BPSHT5OBJYDJgGbF4M/sDfwGmnc+ZKkEwu7RUTxzMeaFLpwIuIaSTsDh5FmOrsP+ExETOrSczKzQdfbQV3St0hdL78GTuf1buNdgAMkPRkRP2tbToTPAbaziBRD+web1Xvhs4NdA+uUzuLGiChzw4iG1l9/8Zg8ue3F5Wlfun+B9tUfcg/FxRGxd4N1xwFbRsQ725UzdIfhm5l1pNZSL7MMSaNIc740clFe35aDuplVRM8H9RnAe5qse3de39aQfXZmZp0ZxlAf2dLGH4FDJc0gDSR5VdKiwI6kW9mdUqYQt9TNrEL6p6Uu6YA8o+wJLfKMajIL7RYld7M/cAspeL8o6XFgFmkCwlspOX7dLXUzq4j+Gf0iaSPSFfC3ldxkC1IQrplZZqOIeE7SaGBrYDRpBOBM4ErSCdRSo1oc1M2sIrof1PPMsGeQJh9seXl+wYw8C23HcuC+MC994u4XM6uIfjlROgH4fUT8vYNtzpP0hKSrJe3Qssatb2E33y3tyuzcLXUzq4jaNAFdKk3anTQj4q4lN3ke+A5wNekCyU8A50gaGxGnN9nmh7SfjLAjDupmVhEddb8MlzS58HhCnpk1lSStDRwBbBIRr5QpMCKmk2ZUrJksaTjpjm4Ng3pEjC9b4bIc1M2sIjoK6tPbXFG6MWk22DvSdFVAmiF2tKQ9gaUjosz8vZOAL5StVDc4qJtZRXT1ROn5vH6noZrfAv9HasGXar2T7on8WLcqVYaDuplVRPeCekQ8DTw9T+nSC8DMfP9kJP0I2CAiPpYfjyVNDX4zMAfYlnTPiP26UqmSHNTNrCIGfJbGkaQZYYsOIt1DdDZp+vAvtjhJ2i8c1M2sIsS805B3V0RsVvd4XN3jUyh5KX9/clA3s4ro7fnUu8VHwMwqwkEdfATMrFIc0nwEzKwi3FIHHwEzqwwHdfARMLPK6PmbZHSFg7qZVYhDmo+AmVWEu1/AR8DMKsNBHXwEzKwyHNTBR8DMKqO7N8noVQ7qZlYRbqmDj4CZVYZI97FYuDmom1lFuKUOPgJmVhkO6uAjYGaV4pDmI2BmFeFpAsBB3cwqw90v4CNgZpXhoA4+AmZWKQ5pPgJmVhFuqYOPgJlVhoM6+AiYWWV49As4qJtZpTik+QiYWUW4+wV8BMysMhzUwUfAzCrDszSCg7qZVYZvkgHpdLGZWQXUul/KLB2WLB0gKSSd0CbfeyVdKWmWpEckHSxJHe9wAbilbmYV0T996pI2AnYHbmuTb1ngMuAq4IPA2sBE4AXgmK5XrAm31M2sIrrfUpf0JuAM4EvAU22yfx5YChgbEXdExB+AHwPfHsjWuoO6mVVEv3S/TAB+HxF/L5F3Y+CfETGrkHYJsBIwqpOdLgh3v5hZhZQOacMlTS48nhARE4oZJO0OvB3YtWSZI4CH69IeL6ybUrZyC8JBvYQ5MP1FeGCw69FPhgPTB7sS3aazBrsG/aaSr1e22oJsfOONN10ivWF4yezTI2KLZislrQ0cAWwSEa90UI2oL6pJer9xUC8hIt4y2HXoL5ImR8T6g10PK8evV3OtgnQfbEz6Ar2j0B2+CDBa0p7A0hHxct0200gt8qIV89/HGSDuUzczm9/5wHuBdQvLZODs/H+j1vu1wCaSioPlxwCPAlP7qZ7zcVA3M6sTEU/nESxzF9LQxJn5cUj6kaTLC5udCbwITJT0HkmfBr4HHBsR7n6xATOhfRYbQvx6DR0jgTVrDyLiGUljgBNJrfqnSOPTjx3ISmkAv0DMzKyfufvFzKxCHNTNzCrEQd3MrEIc1CtK0mhJF+SZ4kLSuBLbDPoMcwsrSePz61RcprXZxq+XzcejX6rrjcAdwKl5aWmozDC3kLsH2KzweHazjH69rBkH9YqKiL8AfwGQNLHEJsUZ5maRrqRbhzTD3ICOs12IvRYRLVvnBX69rCF3v1jNkJhhbiG3Ru5GmSLpbElrtMjr18saclC3mhHMPz9FcYY561+TgHHAlqQbMowArpG0QpP8fr2sIXe/WNGgzzC3sIqIi4uPJV0H3A+MpfkViX69bD5uqVvNkJhhzpKIeB74N7BWkyx+vawhB3WrGRIzzFmSX4d3Ao81yeLXyxpyUK8oSW+UtK6kdUmv86r58ap5/ZCcYW5hJeloSZtKWl3ShsDvgaWBU/J6v15WioN6da0P3JyXJYEf5P9/mNfPN8McqaW3EmmGuRMZhBnmFmKrAGeRxqqfB7wMbBQRtTtu+fWyUjxLo5lZhbilbmZWIQ7qZmYV4qBuZlYhDupmZhXioG5mViEO6mZmFeKgbmZWIQ7qNuAkbSzpXEmPSnpF0gxJl0kaK2mRftrnxMIdha4opI8rpL+jwXabFdZ/vJB+UCH94f6os1lfOKjbgJL0LeBqYHlgP+DjwBeBe4H/Bbbpx91PI81D/tUG654Ddm2QvlteV++3uay/dK12Zl3gqXdtwEgaTbqM/YSI+Gbd6j9JOpY030l/eTkirmuy7jxgF0kH1+ZOkbQksD3wB9Jc53NFxCPAI5Ke7Mf6mnXMLXUbSN8DZgL7NloZEfdFxG0DW6W5TgNWAz5SSNsOWIQU1M16goO6DYjcV74ZcGlEvDTI1WnkAdJNnItdMLsBfwSeH5QamfWBg7oNlOGk2SIfaJdxEJ0K7ChpCUkjSf39pw5yncw64qBuQ5akpSU9K+mMuvQ1Jf1L0r2Sbpa0fpd2+TtgcWBb4POkE6uXt9zCbIhxULeBMgOYReq3LuszwK3A1pKWK6T/EpgYEe8g9c+fIUmNCuhERDwHnE/qgtkNOCMi5ixouWYDyUHdBkREvAZcAYyRtHjJzb5EuvHDJaSWM5LeAmxEviNQRFyW867XpaqeCmwNvBd3vVgPclC3gXQksAJwVKOV+VZu78v/r0O608+FwK9JAR5gVeDRiHi1sOkDOb0bLgPOBX4ZEf/uUplmA8bj1G3ARMRVkr4NHJuD9kTgQWA54GPAl4HPAbeRgvhpEfGapL8By0tq1hpf4K6XQh1nA5/tVnlmA80tdRtQEXE8aSz408DRwN9JwX0d4CvAnyW9gdSv/TlJU4H7gTeRAv2DwEo5T81qOd1soeeWug24iLgGuKbZeknbAQ9GxAcLaasDNwH7ANeTrvD8laQxpJb6jWX2LWnRVIWYnesykfSl0qq+V1D3ayCfmF2kPt1ssLmlbkPRl0hXeM4VEVOAO4AdgT2BL0i6l9Q///kodwf11YBX6c4wxQNzWbt1oSyzrlG5z4JZb5M0inQBFMBzEXHPApY3Elg5P3xlEKc3MJuHg7qZWYW4+8XMrEIc1M3MKsRB3cysQhzUzcwqxEHdzKxCHNTNzCrEQd3MrEIc1M3MKuT/A90g75+kSl9gAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# this problem has three degrees of freedom, to draw heatmap it needs to fix one dimension\n", + "fixed = {\"('T[0.125]','T[0.25]','T[0.375]','T[0.5]','T[0.625]','T[0.75]','T[0.875]','T[1]')\": 300}\n", + "\n", + "all_fim.figure_drawing(fixed, ['CA0[0]','T[0]'], 'Reactor case','$C_{A0}$ [M]', 'T [K]' )" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/pyomo/contrib/doe/examples/reactor_compute_FIM.py b/pyomo/contrib/doe/examples/reactor_compute_FIM.py new file mode 100644 index 00000000000..c004ad36f00 --- /dev/null +++ b/pyomo/contrib/doe/examples/reactor_compute_FIM.py @@ -0,0 +1,111 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + + +from pyomo.common.dependencies import numpy as np +from pyomo.contrib.doe.examples.reactor_kinetics import create_model, disc_for_measure +from pyomo.contrib.doe import DesignOfExperiments, MeasurementVariables, DesignVariables + + +def main(): + ### Define inputs + # Control time set [h] + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + # Define parameter nominal value + parameter_dict = {"A1": 85, "A2": 370, "E1": 8, "E2": 15} + + # Define measurement object + measurements = MeasurementVariables() + measurements.add_variables( + "C", # measurement variable name + indices={ + 0: ["CA", "CB", "CC"], + 1: t_control, + }, # 0,1 are indices of the index sets + time_index_position=1, + ) + + # design object + exp_design = DesignVariables() + + # add CAO as design variable + exp_design.add_variables( + "CA0", # design variable name + indices={0: [0]}, # index dictionary + time_index_position=0, # time index position + values=[5], # design variable values + lower_bounds=1, # design variable lower bounds + upper_bounds=5, # design variable upper bounds + ) + + # add T as design variable + exp_design.add_variables( + "T", # design variable name + indices={0: t_control}, # index dictionary + time_index_position=0, # time index position + values=[ + 570, + 300, + 300, + 300, + 300, + 300, + 300, + 300, + 300, + ], # same length with t_control + lower_bounds=300, # design variable lower bounds + upper_bounds=700, # design variable upper bounds + ) + + ### Compute the FIM of a square model-based Design of Experiments problem + doe_object = DesignOfExperiments( + parameter_dict, # parameter dictionary + exp_design, # DesignVariables object + measurements, # MeasurementVariables object + create_model, # create model function + discretize_model=disc_for_measure, # discretize model function + ) + + result = doe_object.compute_FIM( + mode="sequential_finite", # calculation mode + scale_nominal_param_value=True, # scale nominal parameter value + formula="central", # formula for finite difference + ) + + result.result_analysis() + + # test result + relative_error = abs(np.log10(result.trace) - 2.78) + assert relative_error < 0.01 + + relative_error = abs(np.log10(result.det) - 2.99) + assert relative_error < 0.01 + + +if __name__ == "__main__": + main() diff --git a/pyomo/contrib/doe/examples/reactor_grid_search.py b/pyomo/contrib/doe/examples/reactor_grid_search.py new file mode 100644 index 00000000000..a4516c36451 --- /dev/null +++ b/pyomo/contrib/doe/examples/reactor_grid_search.py @@ -0,0 +1,140 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + + +from pyomo.common.dependencies import numpy as np +from pyomo.contrib.doe.examples.reactor_kinetics import create_model, disc_for_measure +from pyomo.contrib.doe import DesignOfExperiments, MeasurementVariables, DesignVariables + + +def main(): + ### Define inputs + # Control time set [h] + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + # Define parameter nominal value + parameter_dict = {"A1": 85, "A2": 372, "E1": 8, "E2": 15} + + # measurement object + measurements = MeasurementVariables() + measurements.add_variables( + "C", # variable name + indices={0: ["CA", "CB", "CC"], 1: t_control}, # indices + time_index_position=1, + ) # position of time index + + # design object + exp_design = DesignVariables() + + # add CAO as design variable + exp_design.add_variables( + "CA0", # variable name + indices={0: [0]}, # indices + time_index_position=0, # position of time index + values=[5], # nominal value + lower_bounds=1, # lower bound + upper_bounds=5, # upper bound + ) + + # add T as design variable + exp_design.add_variables( + "T", # variable name + indices={0: t_control}, # indices + time_index_position=0, # position of time index + values=[470, 300, 300, 300, 300, 300, 300, 300, 300], # nominal value + lower_bounds=300, # lower bound + upper_bounds=700, # upper bound + ) + + # For each variable, we define a list of possible values that are used + # in the sensitivity analysis + + design_ranges = { + "CA0[0]": [1, 3, 5], + ( + "T[0]", + "T[0.125]", + "T[0.25]", + "T[0.375]", + "T[0.5]", + "T[0.625]", + "T[0.75]", + "T[0.875]", + "T[1]", + ): [300, 500, 700], + } + ## choose from "sequential_finite", "direct_kaug" + sensi_opt = "direct_kaug" + + doe_object = DesignOfExperiments( + parameter_dict, # parameter dictionary + exp_design, # design variables + measurements, # measurement variables + create_model, # model function + discretize_model=disc_for_measure, # discretization function + ) + # run full factorial grid search + all_fim = doe_object.run_grid_search(design_ranges, mode=sensi_opt) + + all_fim.extract_criteria() + + ### 3 design variable example + # Define design ranges + design_ranges = { + "CA0[0]": list(np.linspace(1, 5, 2)), + "T[0]": list(np.linspace(300, 700, 2)), + ( + "T[0.125]", + "T[0.25]", + "T[0.375]", + "T[0.5]", + "T[0.625]", + "T[0.75]", + "T[0.875]", + "T[1]", + ): [300, 500], + } + + sensi_opt = "direct_kaug" + + doe_object = DesignOfExperiments( + parameter_dict, # parameter dictionary + exp_design, # design variables + measurements, # measurement variables + create_model, # model function + discretize_model=disc_for_measure, # discretization function + ) + # run the grid search for 3 dimensional case + all_fim = doe_object.run_grid_search(design_ranges, mode=sensi_opt) + + all_fim.extract_criteria() + + # see the criteria values + all_fim.store_all_results_dataframe + + +if __name__ == "__main__": + main() diff --git a/pyomo/contrib/doe/examples/reactor_kinetics.py b/pyomo/contrib/doe/examples/reactor_kinetics.py new file mode 100644 index 00000000000..57d06e146c5 --- /dev/null +++ b/pyomo/contrib/doe/examples/reactor_kinetics.py @@ -0,0 +1,247 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + + +import pyomo.environ as pyo +from pyomo.dae import ContinuousSet, DerivativeVar +from pyomo.contrib.doe import ModelOptionLib + + +def disc_for_measure(m, nfe=32, block=True): + """Pyomo.DAE discretization + + Arguments + --------- + m: Pyomo model + nfe: number of finite elements b + block: if True, the input model has blocks + """ + discretizer = pyo.TransformationFactory("dae.collocation") + if block: + for s in range(len(m.block)): + discretizer.apply_to(m.block[s], nfe=nfe, ncp=3, wrt=m.block[s].t) + else: + discretizer.apply_to(m, nfe=nfe, ncp=3, wrt=m.t) + return m + + +def create_model( + mod=None, + model_option="stage2", + control_time=[0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1], + control_val=None, + t_range=[0.0, 1], + CA_init=1, + C_init=0.1, +): + """ + This is an example user model provided to DoE library. + It is a dynamic problem solved by Pyomo.DAE. + + Arguments + --------- + mod: Pyomo model. If None, a Pyomo concrete model is created + model_option: choose from the 3 options in model_option + if ModelOptionLib.parmest, create a process model. + if ModelOptionLib.stage1, create the global model. + if ModelOptionLib.stage2, add model variables and constraints for block. + control_time: a list of control timepoints + control_val: control design variable values T at corresponding timepoints + t_range: time range, h + CA_init: time-independent design (control) variable, an initial value for CA + C_init: An initial value for C + + Return + ------ + m: a Pyomo.DAE model + """ + + theta = {"A1": 84.79, "A2": 371.72, "E1": 7.78, "E2": 15.05} + + model_option = ModelOptionLib(model_option) + + if model_option == ModelOptionLib.parmest: + mod = pyo.ConcreteModel() + return_m = True + elif model_option == ModelOptionLib.stage1 or model_option == ModelOptionLib.stage2: + if not mod: + raise ValueError( + "If model option is stage1 or stage2, a created model needs to be provided." + ) + return_m = False + else: + raise ValueError( + "model_option needs to be defined as parmest,stage1, or stage2." + ) + + if not control_val: + control_val = [300] * 9 + + controls = {} + for i, t in enumerate(control_time): + controls[t] = control_val[i] + + mod.t0 = pyo.Set(initialize=[0]) + mod.t_con = pyo.Set(initialize=control_time) + mod.CA0 = pyo.Var( + mod.t0, initialize=CA_init, bounds=(1.0, 5.0), within=pyo.NonNegativeReals + ) # mol/L + + # check if control_time is in time range + assert ( + control_time[0] >= t_range[0] and control_time[-1] <= t_range[1] + ), "control time is outside time range." + + if model_option == ModelOptionLib.stage1: + mod.T = pyo.Var( + mod.t_con, + initialize=controls, + bounds=(300, 700), + within=pyo.NonNegativeReals, + ) + return + + else: + para_list = ["A1", "A2", "E1", "E2"] + + ### Add variables + mod.CA_init = CA_init + mod.para_list = para_list + + # timepoints + mod.t = ContinuousSet(bounds=t_range, initialize=control_time) + + # time-dependent design variable, initialized with the first control value + def T_initial(m, t): + if t in m.t_con: + return controls[t] + else: + # count how many control points are before the current t; + # locate the nearest neighbouring control point before this t + neighbour_t = max(tc for tc in control_time if tc < t) + return controls[neighbour_t] + + mod.T = pyo.Var( + mod.t, initialize=T_initial, bounds=(300, 700), within=pyo.NonNegativeReals + ) + + mod.R = 8.31446261815324 # J / K / mole + + # Define parameters as Param + mod.A1 = pyo.Var(initialize=theta["A1"]) + mod.A2 = pyo.Var(initialize=theta["A2"]) + mod.E1 = pyo.Var(initialize=theta["E1"]) + mod.E2 = pyo.Var(initialize=theta["E2"]) + + # Concentration variables under perturbation + mod.C_set = pyo.Set(initialize=["CA", "CB", "CC"]) + mod.C = pyo.Var( + mod.C_set, mod.t, initialize=C_init, within=pyo.NonNegativeReals + ) + + # time derivative of C + mod.dCdt = DerivativeVar(mod.C, wrt=mod.t) + + # kinetic parameters + def kp1_init(m, t): + return m.A1 * pyo.exp(-m.E1 * 1000 / (m.R * m.T[t])) + + def kp2_init(m, t): + return m.A2 * pyo.exp(-m.E2 * 1000 / (m.R * m.T[t])) + + mod.kp1 = pyo.Var(mod.t, initialize=kp1_init) + mod.kp2 = pyo.Var(mod.t, initialize=kp2_init) + + def T_control(m, t): + """ + T at interval timepoint equal to the T of the control time point at the beginning of this interval + Count how many control points are before the current t; + locate the nearest neighbouring control point before this t + """ + if t in m.t_con: + return pyo.Constraint.Skip + else: + neighbour_t = max(tc for tc in control_time if tc < t) + return m.T[t] == m.T[neighbour_t] + + def cal_kp1(m, t): + """ + Create the perturbation parameter sets + m: model + t: time + """ + # LHS: 1/h + # RHS: 1/h*(kJ/mol *1000J/kJ / (J/mol/K) / K) + return m.kp1[t] == m.A1 * pyo.exp(-m.E1 * 1000 / (m.R * m.T[t])) + + def cal_kp2(m, t): + """ + Create the perturbation parameter sets + m: model + t: time + """ + # LHS: 1/h + # RHS: 1/h*(kJ/mol *1000J/kJ / (J/mol/K) / K) + return m.kp2[t] == m.A2 * pyo.exp(-m.E2 * 1000 / (m.R * m.T[t])) + + def dCdt_control(m, y, t): + """ + Calculate CA in Jacobian matrix analytically + y: CA, CB, CC + t: timepoints + """ + if y == "CA": + return m.dCdt[y, t] == -m.kp1[t] * m.C["CA", t] + elif y == "CB": + return m.dCdt[y, t] == m.kp1[t] * m.C["CA", t] - m.kp2[t] * m.C["CB", t] + elif y == "CC": + return pyo.Constraint.Skip + + def alge(m, t): + """ + The algebraic equation for mole balance + z: m.pert + t: time + """ + return m.C["CA", t] + m.C["CB", t] + m.C["CC", t] == m.CA0[0] + + # Control time + mod.T_rule = pyo.Constraint(mod.t, rule=T_control) + + # calculating C, Jacobian, FIM + mod.k1_pert_rule = pyo.Constraint(mod.t, rule=cal_kp1) + mod.k2_pert_rule = pyo.Constraint(mod.t, rule=cal_kp2) + mod.dCdt_rule = pyo.Constraint(mod.C_set, mod.t, rule=dCdt_control) + + mod.alge_rule = pyo.Constraint(mod.t, rule=alge) + + # B.C. + mod.C["CB", 0.0].fix(0.0) + mod.C["CC", 0.0].fix(0.0) + + if return_m: + return mod diff --git a/pyomo/contrib/doe/examples/reactor_optimize_doe.py b/pyomo/contrib/doe/examples/reactor_optimize_doe.py new file mode 100644 index 00000000000..56ea1ffeac3 --- /dev/null +++ b/pyomo/contrib/doe/examples/reactor_optimize_doe.py @@ -0,0 +1,123 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + +from pyomo.common.dependencies import numpy as np +from pyomo.contrib.doe.examples.reactor_kinetics import create_model, disc_for_measure +from pyomo.contrib.doe import DesignOfExperiments, MeasurementVariables, DesignVariables + + +def main(): + ### Define inputs + # Control time set [h] + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + # Define parameter nominal value + parameter_dict = {"A1": 85, "A2": 372, "E1": 8, "E2": 15} + + # measurement object + measurements = MeasurementVariables() + measurements.add_variables( + "C", # name of measurement + indices={0: ["CA", "CB", "CC"], 1: t_control}, # indices of measurement + time_index_position=1, + ) # position of time index + + # design object + exp_design = DesignVariables() + + # add CAO as design variable + exp_design.add_variables( + "CA0", # name of design variable + indices={0: [0]}, # indices of design variable + time_index_position=0, # position of time index + values=[5], # nominal value of design variable + lower_bounds=1, # lower bound of design variable + upper_bounds=5, # upper bound of design variable + ) + + # add T as design variable + exp_design.add_variables( + "T", # name of design variable + indices={0: t_control}, # indices of design variable + time_index_position=0, # position of time index + values=[ + 470, + 300, + 300, + 300, + 300, + 300, + 300, + 300, + 300, + ], # nominal value of design variable + lower_bounds=300, # lower bound of design variable + upper_bounds=700, # upper bound of design variable + ) + + design_names = exp_design.variable_names + exp1 = [5, 570, 300, 300, 300, 300, 300, 300, 300, 300] + exp1_design_dict = dict(zip(design_names, exp1)) + exp_design.update_values(exp1_design_dict) + + # add a prior information (scaled FIM with T=500 and T=300 experiments) + prior = np.asarray( + [ + [28.67892806, 5.41249739, -81.73674601, -24.02377324], + [5.41249739, 26.40935036, -12.41816477, -139.23992532], + [-81.73674601, -12.41816477, 240.46276004, 58.76422806], + [-24.02377324, -139.23992532, 58.76422806, 767.25584508], + ] + ) + + doe_object2 = DesignOfExperiments( + parameter_dict, # dictionary of parameters + exp_design, # design variables + measurements, # measurement variables + create_model, # function to create model + prior_FIM=prior, # prior information + discretize_model=disc_for_measure, # function to discretize model + ) + + square_result, optimize_result = doe_object2.stochastic_program( + if_optimize=True, # if optimize + if_Cholesky=True, # if use Cholesky decomposition + scale_nominal_param_value=True, # if scale nominal parameter value + objective_option="det", # objective option + L_initial=np.linalg.cholesky(prior), # initial Cholesky decomposition + ) + + square_result, optimize_result = doe_object2.stochastic_program( + if_optimize=True, # if optimize + if_Cholesky=True, # if use Cholesky decomposition + scale_nominal_param_value=True, # if scale nominal parameter value + objective_option="trace", # objective option + L_initial=np.linalg.cholesky(prior), # initial Cholesky decomposition + ) + + +if __name__ == "__main__": + main() diff --git a/pyomo/contrib/doe/measurements.py b/pyomo/contrib/doe/measurements.py new file mode 100644 index 00000000000..75fd4f7c485 --- /dev/null +++ b/pyomo/contrib/doe/measurements.py @@ -0,0 +1,328 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + +import itertools + + +class VariablesWithIndices: + def __init__(self): + """This class provides utility methods for DesignVariables and MeasurementVariables to create + lists of Pyomo variable names with an arbitrary number of indices. + """ + self.variable_names = [] + self.variable_names_value = {} + self.lower_bounds = {} + self.upper_bounds = {} + + def set_variable_name_list(self, variable_name_list): + """ + Specify variable names with its full name. + + Parameters + ---------- + variable_name_list: a ``list`` of ``string``, containing the variable names with indices, + for e.g. "C['CA', 23, 0]". + """ + self.variable_names.extend(variable_name_list) + + def add_variables( + self, + var_name, + indices=None, + time_index_position=None, + values=None, + lower_bounds=None, + upper_bounds=None, + ): + """ + Used for generating string names with indices. + + Parameters + ---------- + var_name: variable name in ``string`` + indices: a ``dict`` containing indices + if default (None), no extra indices needed for all var in var_name + for e.g., {0:["CA", "CB", "CC"], 1: [1,2,3]}. + time_index_position: an integer indicates which index is the time index + for e.g., 1 is the time index position in the indices example. + values: a ``list`` containing values which has the same shape of flattened variables + default choice is None, means there is no give nvalues + lower_bounds: a ``list `` of lower bounds. If given a scalar number, it is set as the lower bounds for all variables. + upper_bounds: a ``list`` of upper bounds. If given a scalar number, it is set as the upper bounds for all variables. + + Returns + ------- + if not defining values, return a set of variable names + if defining values, return a dictionary of variable names and its value + """ + added_names = self._generate_variable_names_with_indices( + var_name, indices=indices, time_index_position=time_index_position + ) + + self._check_valid_input( + len(added_names), + var_name, + indices, + time_index_position, + values, + lower_bounds, + upper_bounds, + ) + + if values: + # this dictionary keys are special set, values are its value + self.variable_names_value.update(zip(added_names, values)) + + # if a scalar (int or float) is given, set it as the lower bound for all variables + if lower_bounds: + if type(lower_bounds) in [int, float]: + lower_bounds = [lower_bounds] * len(added_names) + self.lower_bounds.update(zip(added_names, lower_bounds)) + + if upper_bounds: + if type(upper_bounds) in [int, float]: + upper_bounds = [upper_bounds] * len(added_names) + self.upper_bounds.update(zip(added_names, upper_bounds)) + + return added_names + + def _generate_variable_names_with_indices( + self, var_name, indices=None, time_index_position=None + ): + """ + Used for generating string names with indices. + + Parameters + ---------- + var_name: a ``list`` of var names + indices: a ``dict`` containing indices + if default (None), no extra indices needed for all var in var_name + for e.g., {0:["CA", "CB", "CC"], 1: [1,2,3]}. + time_index_position: an integer indicates which index is the time index + for e.g., 1 is the time index position in the indices example. + """ + # first combine all indices into a list + all_index_list = [] # contains all index lists + if indices: + for index_pointer in indices: + all_index_list.append(indices[index_pointer]) + + # all index list for one variable, such as ["CA", 10, 1] + # exhaustively enumerate over the full product of indices. For e.g., + # {0:["CA", "CB", "CC"], 1: [1,2,3]} + # becomes ["CA", 1], ["CA", 2], ..., ["CC", 2], ["CC", 3] + all_variable_indices = list(itertools.product(*all_index_list)) + + # list store all names added this time + added_names = [] + # iterate over index combinations ["CA", 1], ["CA", 2], ..., ["CC", 2], ["CC", 3] + for index_instance in all_variable_indices: + var_name_index_string = var_name + "[" + for i, idx in enumerate(index_instance): + # use repr() is different from using str() + # with repr(), "CA" is "CA", with str(), "CA" is CA. The first is not valid in our interface. + var_name_index_string += str(idx) + + # if i is the last index, close the []. if not, add a "," for the next index. + if i == len(index_instance) - 1: + var_name_index_string += "]" + else: + var_name_index_string += "," + + self.variable_names.append(var_name_index_string) + added_names.append(var_name_index_string) + + return added_names + + def _check_valid_input( + self, + len_indices, + var_name, + indices, + time_index_position, + values, + lower_bounds, + upper_bounds, + ): + """ + Check if the measurement information provided are valid to use. + """ + assert type(var_name) is str, "var_name should be a string." + + if time_index_position not in indices: + raise ValueError("time index cannot be found in indices.") + + # if given a list, check if bounds have the same length with flattened variable + if values and len(values) != len_indices: + raise ValueError("Values is of different length with indices.") + + if ( + lower_bounds + and type(lower_bounds) == list + and len(lower_bounds) != len_indices + ): + raise ValueError("Lowerbounds is of different length with indices.") + + if ( + upper_bounds + and type(upper_bounds) == list + and len(upper_bounds) != len_indices + ): + raise ValueError("Upperbounds is of different length with indices.") + + +class MeasurementVariables(VariablesWithIndices): + def __init__(self): + """ + This class stores information on which algebraic and differential variables in the Pyomo model are considered measurements. + """ + super().__init__() + self.variance = {} + + def set_variable_name_list(self, variable_name_list, variance=1): + """ + Specify variable names if given strings containing names and indices. + + Parameters + ---------- + variable_name_list: a ``list`` of ``string``, containing the variable names with indices, + for e.g. "C['CA', 23, 0]". + variance: a ``list`` of scalar numbers , which is the variance for this measurement. + """ + super().set_variable_name_list(variable_name_list) + + # add variance + if variance is not list: + variance = [variance] * len(variable_name_list) + + self.variance.update(zip(variable_name_list, variance)) + + def add_variables( + self, var_name, indices=None, time_index_position=None, variance=1 + ): + """ + Parameters + ----------- + var_name: a ``list`` of var names + indices: a ``dict`` containing indices + if default (None), no extra indices needed for all var in var_name + for e.g., {0:["CA", "CB", "CC"], 1: [1,2,3]}. + time_index_position: an integer indicates which index is the time index + for e.g., 1 is the time index position in the indices example. + variance: a scalar number, which is the variance for this measurement. + """ + added_names = super().add_variables( + var_name=var_name, indices=indices, time_index_position=time_index_position + ) + + # store variance + # if variance is a scalar number, repeat it for all added names + if variance is not list: + variance = [variance] * len(added_names) + self.variance.update(zip(added_names, variance)) + + def check_subset(self, subset_object): + """ + Check if subset_object is a subset of the current measurement object + + Parameters + ---------- + subset_object: a measurement object + """ + for name in subset_object.variable_names: + if name not in self.variable_names: + raise ValueError("Measurement not in the set: ", name) + + return True + + +class DesignVariables(VariablesWithIndices): + """ + Define design variables + """ + + def __init__(self): + super().__init__() + + def set_variable_name_list(self, variable_name_list): + """ + Specify variable names with its full name. + + Parameters + ---------- + variable_name_list: a ``list`` of ``string``, containing the variable names with indices, + for e.g. "C['CA', 23, 0]". + """ + super().set_variable_name_list(variable_name_list) + + def add_variables( + self, + var_name, + indices=None, + time_index_position=None, + values=None, + lower_bounds=None, + upper_bounds=None, + ): + """ + + Parameters + ---------- + var_name: a ``list`` of var names + indices: a ``dict`` containing indices + if default (None), no extra indices needed for all var in var_name + for e.g., {0:["CA", "CB", "CC"], 1: [1,2,3]}. + time_index_position: an integer indicates which index is the time index + for e.g., 1 is the time index position in the indices example. + values: a ``list`` containing values which has the same shape of flattened variables + default choice is None, means there is no give nvalues + lower_bounds: a ``list`` of lower bounds. If given a scalar number, it is set as the lower bounds for all variables. + upper_bounds: a ``list`` of upper bounds. If given a scalar number, it is set as the upper bounds for all variables. + """ + super().add_variables( + var_name=var_name, + indices=indices, + time_index_position=time_index_position, + values=values, + lower_bounds=lower_bounds, + upper_bounds=upper_bounds, + ) + + def update_values(self, new_value_dict): + """ + Update values of variables. Used for defining values for design variables of different experiments. + + Parameters + ---------- + new_value_dict: a ``dict`` containing the new values for the variables. + for e.g., {"C['CA', 23, 0]": 0.5, "C['CA', 24, 0]": 0.6} + """ + for key in new_value_dict: + if key not in self.variable_names: + raise ValueError("Variable not in the set: ", key) + + self.variable_names_value[key] = new_value_dict[key] diff --git a/pyomo/contrib/doe/result.py b/pyomo/contrib/doe/result.py new file mode 100644 index 00000000000..65ded38a63b --- /dev/null +++ b/pyomo/contrib/doe/result.py @@ -0,0 +1,758 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + + +from pyomo.common.dependencies import numpy as np, pandas as pd, matplotlib as plt +from pyomo.core.expr.numvalue import value + +from itertools import product +import logging +from pyomo.opt import SolverStatus, TerminationCondition + + +class FisherResults: + def __init__( + self, + parameter_names, + measurements, + jacobian_info=None, + all_jacobian_info=None, + prior_FIM=None, + store_FIM=None, + scale_constant_value=1, + max_condition_number=1.0e12, + ): + """Analyze the FIM result for a single run + + Parameters + ---------- + parameter_names: + A ``list`` of parameter names + measurements: + A ``MeasurementVariables`` which contains the Pyomo variable names and their corresponding indices and + bounds for experimental measurements + jacobian_info: + the jacobian for this measurement object + all_jacobian_info: + the overall jacobian + prior_FIM: + if there's prior FIM to be added + store_FIM: + if storing the FIM in a .csv or .txt, give the file name here as a string + scale_constant_value: + scale all elements in Jacobian matrix, default is 1. + max_condition_number: + max condition number + """ + self.parameter_names = parameter_names + self.measurements = measurements + self.measurement_variables = measurements.variable_names + + if jacobian_info is None: + self.jaco_information = all_jacobian_info + else: + self.jaco_information = jacobian_info + self.all_jacobian_info = all_jacobian_info + + self.prior_FIM = prior_FIM + self.store_FIM = store_FIM + self.scale_constant_value = scale_constant_value + self.fim_scale_constant_value = scale_constant_value**2 + self.max_condition_number = max_condition_number + self.logger = logging.getLogger(__name__) + self.logger.setLevel(level=logging.WARN) + + def result_analysis(self, result=None): + """Calculate FIM from Jacobian information. This is for grid search (combined models) results + + Parameters + ---------- + result: + solver status returned by IPOPT + """ + self.result = result + self.doe_result = None + + # get number of parameters + no_param = len(self.parameter_names) + + fim = np.zeros((no_param, no_param)) + + # convert dictionary to a numpy array + Q_all = [] + for par in self.parameter_names: + Q_all.append(self.jaco_information[par]) + n = len(self.parameter_names) + + Q_all = np.array(list(self.jaco_information[p] for p in self.parameter_names)).T + # add the FIM for each measurement variables together + for i, mea_name in enumerate(self.measurement_variables): + fim += ( + 1 + / self.measurements.variance[str(mea_name)] # variance of measurement + * ( + Q_all[i, :].reshape(n, 1) @ Q_all[i, :].reshape(n, 1).T + ) # Q.T @ Q for each measurement variable + ) + + # add prior information + if self.prior_FIM is not None: + try: + fim = fim + self.prior_FIM + self.logger.info('Existed information has been added.') + except: + raise ValueError('Check the shape of prior FIM.') + + if np.linalg.cond(fim) > self.max_condition_number: + self.logger.info( + "Warning: FIM is near singular. The condition number is: %s ;", + np.linalg.cond(fim), + ) + self.logger.info( + 'A condition number bigger than %s is considered near singular.', + self.max_condition_number, + ) + + # call private methods + self._print_FIM_info(fim) + if self.result is not None: + self._get_solver_info() + + # if given store file name, store the FIM + if self.store_FIM is not None: + self._store_FIM() + + def subset(self, measurement_subset): + """Create new FisherResults object corresponding to provided measurement_subset. + This requires that measurement_subset is a true subset of the original measurement object. + + Parameters + ---------- + measurement_subset: Instance of Measurements class + + Returns + ------- + new_result: New instance of FisherResults + """ + + # Check that measurement_subset is a valid subset of self.measurement + self.measurements.check_subset(measurement_subset) + + # Split Jacobian (should already be 3D) + small_jac = self._split_jacobian(measurement_subset) + + # create a new subject + FIM_subset = FisherResults( + self.parameter_names, + measurement_subset, + jacobian_info=small_jac, + prior_FIM=self.prior_FIM, + store_FIM=self.store_FIM, + scale_constant_value=self.scale_constant_value, + max_condition_number=self.max_condition_number, + ) + + return FIM_subset + + def _split_jacobian(self, measurement_subset): + """ + Split jacobian + + Parameters + ---------- + measurement_subset: the object of the measurement subsets + + Returns + ------- + jaco_info: split Jacobian + """ + # create a dict for FIM. It has the same keys as the Jacobian dict. + jaco_info = {} + + # reorganize the jacobian subset with the same form of the jacobian + # loop over parameters + for par in self.parameter_names: + jaco_info[par] = [] + # loop over measurements + for name in measurement_subset.variable_names: + try: + n_all_measure = self.measurement_variables.index(name) + jaco_info[par].append(self.all_jacobian_info[par][n_all_measure]) + except: + raise ValueError( + "Measurement ", name, " is not in original measurement set." + ) + + return jaco_info + + def _print_FIM_info(self, FIM): + """ + using a dictionary to store all FIM information + + Parameters + ---------- + FIM: the Fisher Information Matrix, needs to be P.D. and symmetric + + Returns + ------- + fim_info: a FIM dictionary containing the following key:value pairs + ~['FIM']: a list of FIM itself + ~[design variable name]: a list of design variable values at each time point + ~['Trace']: a scalar number of Trace + ~['Determinant']: a scalar number of determinant + ~['Condition number:']: a scalar number of condition number + ~['Minimal eigen value:']: a scalar number of minimal eigen value + ~['Eigen values:']: a list of all eigen values + ~['Eigen vectors:']: a list of all eigen vectors + """ + eig = np.linalg.eigvals(FIM) + self.FIM = FIM + self.trace = np.trace(FIM) + self.det = np.linalg.det(FIM) + self.min_eig = min(eig) + self.cond = max(eig) / min(eig) + self.eig_vals = eig + self.eig_vecs = np.linalg.eig(FIM)[1] + + self.logger.info( + 'FIM: %s; \n Trace: %s; \n Determinant: %s;', self.FIM, self.trace, self.det + ) + self.logger.info( + 'Condition number: %s; \n Min eigenvalue: %s.', self.cond, self.min_eig + ) + + def _solution_info(self, m, dv_set): + """ + Solution information. Only for optimization problem + + Parameters + ---------- + m: model + dv_set: design variable dictionary + + Returns + ------- + model_info: model solutions dictionary containing the following key:value pairs + -['obj']: a scalar number of objective function value + -['det']: a scalar number of determinant calculated by the model (different from FIM_info['det'] which + is calculated by numpy) + -['trace']: a scalar number of trace calculated by the model + -[design variable name]: a list of design variable solution + """ + self.obj_value = value(m.obj) + + # When scaled with constant values, the effect of the scaling factors are removed here + # For determinant, the scaling factor to determinant is scaling factor ** (Dim of FIM) + # For trace, the scaling factor to trace is the scaling factor. + if self.obj == 'det': + self.obj_det = np.exp(value(m.obj)) / (self.fim_scale_constant_value) ** ( + len(self.parameter_names) + ) + elif self.obj == 'trace': + self.obj_trace = np.exp(value(m.obj)) / (self.fim_scale_constant_value) + + design_variable_names = list(dv_set.keys()) + dv_times = list(dv_set.values()) + + solution = {} + for d, dname in enumerate(design_variable_names): + sol = [] + if dv_times[d] is not None: + for t, time in enumerate(dv_times[d]): + newvar = getattr(m, dname)[time] + sol.append(value(newvar)) + else: + newvar = getattr(m, dname) + sol.append(value(newvar)) + + solution[dname] = sol + self.solution = solution + + def _store_FIM(self): + # if given store file name, store the FIM + store_dict = {} + for i, name in enumerate(self.parameter_names): + store_dict[name] = self.FIM[i] + FIM_store = pd.DataFrame(store_dict) + FIM_store.to_csv(self.store_FIM, index=False) + + def _get_solver_info(self): + """ + Solver information dictionary + + Return: + ------ + solver_status: a solver information dictionary containing the following key:value pairs + -['square']: a string of square result solver status + -['doe']: a string of doe result solver status + """ + + if (self.result.solver.status == SolverStatus.ok) and ( + self.result.solver.termination_condition == TerminationCondition.optimal + ): + self.status = 'converged' + elif ( + self.result.solver.termination_condition == TerminationCondition.infeasible + ): + self.status = 'infeasible' + else: + self.status = self.result.solver.status + + +class GridSearchResult: + def __init__( + self, + design_ranges, + design_dimension_names, + FIM_result_list, + store_optimality_name=None, + ): + """ + This class deals with the FIM results from grid search, providing A, D, E, ME-criteria results for each design variable. + Can choose to draw 1D sensitivity curves and 2D heatmaps. + + Parameters + ---------- + design_ranges: + a ``dict`` whose keys are design variable names, values are a list of design variable values to go over + design_dimension_names: + a ``list`` of design variables names + FIM_result_list: + a ``dict`` containing FIM results, keys are a tuple of design variable values, values are FIM result objects + store_optimality_name: + a .csv file name containing all four optimalities value + """ + # design variables + self.design_names = design_dimension_names + self.design_ranges = design_ranges + self.FIM_result_list = FIM_result_list + + self.store_optimality_name = store_optimality_name + + def extract_criteria(self): + """ + Extract design criteria values for every 'grid' (design variable combination) searched. + + Returns + ------- + self.store_all_results_dataframe: a pandas dataframe with columns as design variable names and A, D, E, ME-criteria names. + Each row contains the design variable value for this 'grid', and the 4 design criteria value for this 'grid'. + """ + + # a list store all results + store_all_results = [] + + # generate combinations of design variable values to go over + search_design_set = product(*self.design_ranges) + + # loop over deign value combinations + for design_set_iter in search_design_set: + # locate this grid in the dictionary of combined results + result_object_asdict = { + k: v for k, v in self.FIM_result_list.items() if k == design_set_iter + } + # an result object is identified by a tuple of the design variable value it uses + result_object_iter = result_object_asdict[design_set_iter] + + # store results as a row in the dataframe + store_iteration_result = list(design_set_iter) + store_iteration_result.append(result_object_iter.trace) + store_iteration_result.append(result_object_iter.det) + store_iteration_result.append(result_object_iter.min_eig) + store_iteration_result.append(result_object_iter.cond) + + # add this row to the dataframe + store_all_results.append(store_iteration_result) + + # generate column names for the dataframe + column_names = [] + # this count is for repeated design variable names which can happen in dynamic problems + for i in self.design_names: + # if design variables share the same value, use the first name as the column name + if type(i) is list: + column_names.append(i[0]) + else: + column_names.append(i) + + # Each design criteria has a column to store values + column_names.append('A') + column_names.append('D') + column_names.append('E') + column_names.append('ME') + # generate the dataframe + store_all_results = np.asarray(store_all_results) + self.store_all_results_dataframe = pd.DataFrame( + store_all_results, columns=column_names + ) + # if needs to store the values + if self.store_optimality_name is not None: + self.store_all_results_dataframe.to_csv( + self.store_optimality_name, index=False + ) + + def figure_drawing( + self, + fixed_design_dimensions, + sensitivity_dimension, + title_text, + xlabel_text, + ylabel_text, + font_axes=16, + font_tick=14, + log_scale=True, + ): + """ + Extract results needed for drawing figures from the overall result dataframe. + Draw 1D sensitivity curve or 2D heatmap. + It can be applied to results of any dimensions, but requires design variable values in other dimensions be fixed. + + Parameters + ---------- + fixed_design_dimensions: a dictionary, keys are the design variable names to be fixed, values are the value of it to be fixed. + sensitivity_dimension: a list of design variable names to draw figures. + If only one name is given, a 1D sensitivity curve is drawn + if two names are given, a 2D heatmap is drawn. + title_text: name of the figure, a string + xlabel_text: x label title, a string. + In a 1D sensitivity curve, it is the design variable by which the curve is drawn. + In a 2D heatmap, it should be the second design variable in the design_ranges + ylabel_text: y label title, a string. + A 1D sensitivity curve does not need it. In a 2D heatmap, it should be the first design variable in the dv_ranges + font_axes: axes label font size + font_tick: tick label font size + log_scale: if True, the result matrix will be scaled by log10 + + Returns + -------- + None + """ + self.fixed_design_names = list(fixed_design_dimensions.keys()) + self.fixed_design_values = list(fixed_design_dimensions.values()) + self.sensitivity_dimension = sensitivity_dimension + + if len(self.fixed_design_names) + len(self.sensitivity_dimension) != len( + self.design_names + ): + raise ValueError( + 'Error: All dimensions except for those the figures are drawn by should be fixed.' + ) + + if len(self.sensitivity_dimension) not in [1, 2]: + raise ValueError("Error: Either 1D or 2D figures can be drawn.") + + # generate a combination of logic sentences to filter the results of the DOF needed. + # an example filter: (self.store_all_results_dataframe["CA0"]==5). + if len(self.fixed_design_names) != 0: + filter = '' + for i in range(len(self.fixed_design_names)): + filter += '(self.store_all_results_dataframe[' + filter += str(self.fixed_design_names[i]) + filter += ']==' + filter += str(self.fixed_design_values[i]) + filter += ')' + if i != (len(self.fixed_design_names) - 1): + filter += '&' + # extract results with other dimensions fixed + figure_result_data = self.store_all_results_dataframe.loc[eval(filter)] + # if there is no other fixed dimensions + else: + figure_result_data = self.store_all_results_dataframe + + # add results for figures + self.figure_result_data = figure_result_data + + # if one design variable name is given as DOF, draw 1D sensitivity curve + if len(sensitivity_dimension) == 1: + self._curve1D( + title_text, xlabel_text, font_axes=16, font_tick=14, log_scale=True + ) + # if two design variable names are given as DOF, draw 2D heatmaps + elif len(sensitivity_dimension) == 2: + self._heatmap( + title_text, + xlabel_text, + ylabel_text, + font_axes=16, + font_tick=14, + log_scale=True, + ) + + def _curve1D( + self, title_text, xlabel_text, font_axes=16, font_tick=14, log_scale=True + ): + """ + Draw 1D sensitivity curves for all design criteria + + Parameters + ---------- + title_text: name of the figure, a string + xlabel_text: x label title, a string. + In a 1D sensitivity curve, it is the design variable by which the curve is drawn. + font_axes: axes label font size + font_tick: tick label font size + log_scale: if True, the result matrix will be scaled by log10 + + Returns + -------- + 4 Figures of 1D sensitivity curves for each criteria + """ + + # extract the range of the DOF design variable + x_range = self.figure_result_data[self.sensitivity_dimension[0]].values.tolist() + + # decide if the results are log scaled + if log_scale: + y_range_A = np.log10(self.figure_result_data['A'].values.tolist()) + y_range_D = np.log10(self.figure_result_data['D'].values.tolist()) + y_range_E = np.log10(self.figure_result_data['E'].values.tolist()) + y_range_ME = np.log10(self.figure_result_data['ME'].values.tolist()) + else: + y_range_A = self.figure_result_data['A'].values.tolist() + y_range_D = self.figure_result_data['D'].values.tolist() + y_range_E = self.figure_result_data['E'].values.tolist() + y_range_ME = self.figure_result_data['ME'].values.tolist() + + # Draw A-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + # plt.rcParams.update(params) + ax.plot(x_range, y_range_A) + ax.scatter(x_range, y_range_A) + ax.set_ylabel('$log_{10}$ Trace') + ax.set_xlabel(xlabel_text) + plt.pyplot.title(title_text + ' - A optimality') + plt.pyplot.show() + + # Draw D-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + # plt.rcParams.update(params) + ax.plot(x_range, y_range_D) + ax.scatter(x_range, y_range_D) + ax.set_ylabel('$log_{10}$ Determinant') + ax.set_xlabel(xlabel_text) + plt.pyplot.title(title_text + ' - D optimality') + plt.pyplot.show() + + # Draw E-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + # plt.rcParams.update(params) + ax.plot(x_range, y_range_E) + ax.scatter(x_range, y_range_E) + ax.set_ylabel('$log_{10}$ Minimal eigenvalue') + ax.set_xlabel(xlabel_text) + plt.pyplot.title(title_text + ' - E optimality') + plt.pyplot.show() + + # Draw Modified E-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + # plt.rcParams.update(params) + ax.plot(x_range, y_range_ME) + ax.scatter(x_range, y_range_ME) + ax.set_ylabel('$log_{10}$ Condition number') + ax.set_xlabel(xlabel_text) + plt.pyplot.title(title_text + ' - Modified E optimality') + plt.pyplot.show() + + def _heatmap( + self, + title_text, + xlabel_text, + ylabel_text, + font_axes=16, + font_tick=14, + log_scale=True, + ): + """ + Draw 2D heatmaps for all design criteria + + Parameters + ---------- + title_text: name of the figure, a string + xlabel_text: x label title, a string. + In a 2D heatmap, it should be the second design variable in the design_ranges + ylabel_text: y label title, a string. + In a 2D heatmap, it should be the first design variable in the dv_ranges + font_axes: axes label font size + font_tick: tick label font size + log_scale: if True, the result matrix will be scaled by log10 + + Returns + -------- + 4 Figures of 2D heatmap for each criteria + """ + + # achieve the design variable ranges this figure needs + # create a dictionary for sensitivity dimensions + sensitivity_dict = {} + for i, name in enumerate(self.design_names): + if name in self.sensitivity_dimension: + sensitivity_dict[name] = self.design_ranges[i] + elif name[0] in self.sensitivity_dimension: + sensitivity_dict[name[0]] = self.design_ranges[i] + + x_range = sensitivity_dict[self.sensitivity_dimension[0]] + y_range = sensitivity_dict[self.sensitivity_dimension[1]] + + # extract the design criteria values + A_range = self.figure_result_data['A'].values.tolist() + D_range = self.figure_result_data['D'].values.tolist() + E_range = self.figure_result_data['E'].values.tolist() + ME_range = self.figure_result_data['ME'].values.tolist() + + # reshape the design criteria values for heatmaps + cri_a = np.asarray(A_range).reshape(len(x_range), len(y_range)) + cri_d = np.asarray(D_range).reshape(len(x_range), len(y_range)) + cri_e = np.asarray(E_range).reshape(len(x_range), len(y_range)) + cri_e_cond = np.asarray(ME_range).reshape(len(x_range), len(y_range)) + + self.cri_a = cri_a + self.cri_d = cri_d + self.cri_e = cri_e + self.cri_e_cond = cri_e_cond + + # decide if log scaled + if log_scale: + hes_a = np.log10(self.cri_a) + hes_e = np.log10(self.cri_e) + hes_d = np.log10(self.cri_d) + hes_e2 = np.log10(self.cri_e_cond) + else: + hes_a = self.cri_a + hes_e = self.cri_e + hes_d = self.cri_d + hes_e2 = self.cri_e_cond + + # set heatmap x,y ranges + xLabel = x_range + yLabel = y_range + + # A-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + plt.pyplot.rcParams.update(params) + ax.set_yticks(range(len(yLabel))) + ax.set_yticklabels(yLabel) + ax.set_ylabel(ylabel_text) + ax.set_xticks(range(len(xLabel))) + ax.set_xticklabels(xLabel) + ax.set_xlabel(xlabel_text) + im = ax.imshow(hes_a.T, cmap=plt.pyplot.cm.hot_r) + ba = plt.pyplot.colorbar(im) + ba.set_label('log10(trace(FIM))') + plt.pyplot.title(title_text + ' - A optimality') + plt.pyplot.show() + + # D-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + plt.pyplot.rcParams.update(params) + ax.set_yticks(range(len(yLabel))) + ax.set_yticklabels(yLabel) + ax.set_ylabel(ylabel_text) + ax.set_xticks(range(len(xLabel))) + ax.set_xticklabels(xLabel) + ax.set_xlabel(xlabel_text) + im = ax.imshow(hes_d.T, cmap=plt.pyplot.cm.hot_r) + ba = plt.pyplot.colorbar(im) + ba.set_label('log10(det(FIM))') + plt.pyplot.title(title_text + ' - D optimality') + plt.pyplot.show() + + # E-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + plt.pyplot.rcParams.update(params) + ax.set_yticks(range(len(yLabel))) + ax.set_yticklabels(yLabel) + ax.set_ylabel(ylabel_text) + ax.set_xticks(range(len(xLabel))) + ax.set_xticklabels(xLabel) + ax.set_xlabel(xlabel_text) + im = ax.imshow(hes_e.T, cmap=plt.pyplot.cm.hot_r) + ba = plt.pyplot.colorbar(im) + ba.set_label('log10(minimal eig(FIM))') + plt.pyplot.title(title_text + ' - E optimality') + plt.pyplot.show() + + # modified E-optimality + fig = plt.pyplot.figure() + plt.pyplot.rc('axes', titlesize=font_axes) + plt.pyplot.rc('axes', labelsize=font_axes) + plt.pyplot.rc('xtick', labelsize=font_tick) + plt.pyplot.rc('ytick', labelsize=font_tick) + ax = fig.add_subplot(111) + params = {'mathtext.default': 'regular'} + plt.pyplot.rcParams.update(params) + ax.set_yticks(range(len(yLabel))) + ax.set_yticklabels(yLabel) + ax.set_ylabel(ylabel_text) + ax.set_xticks(range(len(xLabel))) + ax.set_xticklabels(xLabel) + ax.set_xlabel(xlabel_text) + im = ax.imshow(hes_e2.T, cmap=plt.pyplot.cm.hot_r) + ba = plt.pyplot.colorbar(im) + ba.set_label('log10(cond(FIM))') + plt.pyplot.title(title_text + ' - Modified E-optimality') + plt.pyplot.show() diff --git a/pyomo/contrib/doe/scenario.py b/pyomo/contrib/doe/scenario.py new file mode 100644 index 00000000000..eff9c883e0b --- /dev/null +++ b/pyomo/contrib/doe/scenario.py @@ -0,0 +1,154 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + +import pickle +from enum import Enum +from collections import namedtuple + + +class FiniteDifferenceStep(Enum): + forward = "forward" + central = "central" + backward = "backward" + + +# namedtuple for scenario data +ScenarioData = namedtuple( + "ScenarioData", ["scenario", "scena_num", "eps_abs", "scenario_indices"] +) + + +class ScenarioGenerator: + def __init__(self, parameter_dict=None, formula="central", step=0.001, store=False): + """Generate scenarios. + DoE library first calls this function to generate scenarios. + + Parameters + ----------- + parameter_dict: + a ``dict`` of parameter, keys are names of ''string'', values are their nominal value of ''float''. + for e.g., {'A1': 84.79, 'A2': 371.72, 'E1': 7.78, 'E2': 15.05} + formula: + choose from 'central', 'forward', 'backward', None. + step: + Sensitivity perturbation step size, a fraction between [0,1]. default is 0.001 + store: + if True, store results. + """ + # get info from parameter dictionary + self.parameter_dict = parameter_dict + self.para_names = list(parameter_dict.keys()) + self.no_para = len(self.para_names) + self.formula = FiniteDifferenceStep(formula) + self.step = step + self.store = store + self.scenario_nominal = [parameter_dict[d] for d in self.para_names] + + # generate scenarios + self.generate_scenario() + + def generate_scenario(self): + """ + Generate scenario data for the given parameter dictionary. + + Returns: + ------- + ScenarioData: a namedtuple containing scenarios information. + ScenarioData.scenario: a list of dictionaries, each dictionary contains a perturbed scenario + ScenarioData.scena_num: a dict of scenario number related to one parameter + ScenarioData.eps_abs: keys are parameter name, values are the step it is perturbed + ScenarioData.scenario_indices: a list of scenario indices + + + For e.g., if a dict {'P':100, 'D':20} is given, step=0.1, formula='central', it will return: + self.ScenarioData.scenario: [{'P':101, 'D':20}, {'P':99, 'D':20}, {'P':100, 'D':20.2}, {'P':100, 'D':19.8}], + self.ScenarioData.scena_num: {'P':[0,1], 'D':[2,3]}} + self.ScenarioData.eps_abs: {'P': 2.0, 'D': 0.4} + self.ScenarioData.scenario_indices: [0,1,2,3] + if formula ='forward', it will return: + self.ScenarioData.scenario:[{'P':101, 'D':20}, {'P':100, 'D':20.2}, {'P':100, 'D':20}], + self.ScenarioData.scena_num: {'P':[0,2], 'D':[1,2]}} + self.ScenarioData.eps_abs: {'P': 2.0, 'D': 0.4} + self.ScenarioData.scenario_indices: [0,1,2] + """ + # dict for parameter perturbation step size + eps_abs = {} + # scenario dict for block + scenario = [] + # number of scenario + scena_num = {} + + # loop over parameter name + for p, para in enumerate(self.para_names): + ## get scenario dictionary + if self.formula == FiniteDifferenceStep.central: + scena_num[para] = [2 * p, 2 * p + 1] + scena_dict_up, scena_dict_lo = ( + self.parameter_dict.copy(), + self.parameter_dict.copy(), + ) + # corresponding parameter dictionary for the scenario + scena_dict_up[para] *= 1 + self.step + scena_dict_lo[para] *= 1 - self.step + + scenario.append(scena_dict_up) + scenario.append(scena_dict_lo) + + elif self.formula in [ + FiniteDifferenceStep.forward, + FiniteDifferenceStep.backward, + ]: + # the base case is added as the last one + scena_num[para] = [p, len(self.param_names)] + scena_dict_up, scena_dict_lo = ( + self.parameter_dict.copy(), + self.parameter_dict.copy(), + ) + if self.formula == FiniteDifferenceStep.forward: + scena_dict_up[para] *= 1 + self.step + + elif self.formula == FiniteDifferenceStep.backward: + scena_dict_lo[para] *= 1 - self.step + + scenario.append(scena_dict_up) + scenario.append(scena_dict_lo) + + ## get perturbation sizes + # for central difference scheme, perturbation size is two times the step size + if self.formula == FiniteDifferenceStep.central: + eps_abs[para] = 2 * self.step * self.parameter_dict[para] + else: + eps_abs[para] = self.step * self.parameter_dict[para] + + self.ScenarioData = ScenarioData( + scenario, scena_num, eps_abs, list(range(len(scenario))) + ) + + # store scenario + if self.store: + with open('scenario_simultaneous.pickle', 'wb') as f: + pickle.dump(self.scenario_data, f) diff --git a/pyomo/checker/tests/examples/model/ModelName_missing.py b/pyomo/contrib/doe/tests/__init__.py similarity index 100% rename from pyomo/checker/tests/examples/model/ModelName_missing.py rename to pyomo/contrib/doe/tests/__init__.py diff --git a/pyomo/contrib/doe/tests/test_example.py b/pyomo/contrib/doe/tests/test_example.py new file mode 100644 index 00000000000..0f143e03677 --- /dev/null +++ b/pyomo/contrib/doe/tests/test_example.py @@ -0,0 +1,70 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + + +from pyomo.common.dependencies import ( + numpy as np, + numpy_available, + pandas as pd, + pandas_available, + scipy_available, +) + +import pyomo.common.unittest as unittest + +from pyomo.opt import SolverFactory + +ipopt_available = SolverFactory('ipopt').available() + + +class TestReactorExample(unittest.TestCase): + @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") + @unittest.skipIf(not scipy_available, "scipy is not available") + @unittest.skipIf(not numpy_available, "Numpy is not available") + def test_reactor_compute_FIM(self): + from pyomo.contrib.doe.examples import reactor_compute_FIM + + reactor_compute_FIM.main() + + @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") + @unittest.skipIf(not numpy_available, "Numpy is not available") + def test_reactor_optimize_doe(self): + from pyomo.contrib.doe.examples import reactor_optimize_doe + + reactor_optimize_doe.main() + + @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") + @unittest.skipIf(not pandas_available, "pandas is not available") + @unittest.skipIf(not numpy_available, "Numpy is not available") + def test_reactor_grid_search(self): + from pyomo.contrib.doe.examples import reactor_grid_search + + reactor_grid_search.main() + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/doe/tests/test_fim_doe.py b/pyomo/contrib/doe/tests/test_fim_doe.py new file mode 100644 index 00000000000..42b463162b2 --- /dev/null +++ b/pyomo/contrib/doe/tests/test_fim_doe.py @@ -0,0 +1,360 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + +from pyomo.common.dependencies import numpy as np, numpy_available +import pyomo.common.unittest as unittest +from pyomo.contrib.doe import ( + MeasurementVariables, + DesignVariables, + ScenarioGenerator, + DesignOfExperiments, + VariablesWithIndices, +) +from pyomo.contrib.doe.examples.reactor_kinetics import create_model, disc_for_measure + + +class TestMeasurementError(unittest.TestCase): + def test(self): + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + variable_name = "C" + indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + # measurement object + measurements = MeasurementVariables() + # if time index is not in indices, an value error is thrown. + with self.assertRaises(ValueError): + measurements.add_variables( + variable_name, indices=indices, time_index_position=2 + ) + + +class TestDesignError(unittest.TestCase): + def test(self): + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + # design object + exp_design = DesignVariables() + + # add T as design variable + var_T = 'T' + indices_T = {0: t_control} + exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] + + upper_bound = [ + 700, + 700, + 700, + 700, + 700, + 700, + 700, + 700, + 700, + 800, + ] # wrong upper bound since it has more elements than the length of variable names + lower_bound = [300, 300, 300, 300, 300, 300, 300, 300, 300] + + with self.assertRaises(ValueError): + exp_design.add_variables( + var_T, + indices=indices_T, + time_index_position=0, + values=exp1_T, + lower_bounds=lower_bound, + upper_bounds=upper_bound, + ) + + +@unittest.skipIf(not numpy_available, "Numpy is not available") +class TestPriorFIMError(unittest.TestCase): + def test(self): + # Control time set [h] + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + # measurement object + variable_name = "C" + indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + + measurements = MeasurementVariables() + measurements.add_variables( + variable_name, indices=indices, time_index_position=1 + ) + + # design object + exp_design = DesignVariables() + + # add CAO as design variable + var_C = 'CA0' + indices_C = {0: [0]} + exp1_C = [5] + exp_design.add_variables( + var_C, + indices=indices_C, + time_index_position=0, + values=exp1_C, + lower_bounds=1, + upper_bounds=5, + ) + + # add T as design variable + var_T = 'T' + indices_T = {0: t_control} + exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] + + exp_design.add_variables( + var_T, + indices=indices_T, + time_index_position=0, + values=exp1_T, + lower_bounds=300, + upper_bounds=700, + ) + + parameter_dict = {"A1": 1, "A2": 1, "E1": 1} + + # empty prior + prior_right = [[0] * 3 for i in range(3)] + prior_pass = [[0] * 5 for i in range(10)] + + # check if the error can be thrown when given a wrong shape of FIM prior + with self.assertRaises(ValueError): + doe_object = DesignOfExperiments( + parameter_dict, + exp_design, + measurements, + create_model, + prior_FIM=prior_pass, + discretize_model=disc_for_measure, + ) + + +class TestMeasurement(unittest.TestCase): + """Test the MeasurementVariables class, specify, add_element, update_variance, check_subset functions.""" + + def test_setup(self): + ### add_element function + + # control time for C [h] + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + # control time for T [h] + t_control2 = [0.2, 0.4, 0.6, 0.8] + + # measurement object + measurements = MeasurementVariables() + + # add variable C + variable_name = "C" + indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + measurements.add_variables( + variable_name, indices=indices, time_index_position=1 + ) + + # add variable T + variable_name2 = "T" + indices2 = {0: [1, 3, 5], 1: t_control2} + measurements.add_variables( + variable_name2, indices=indices2, time_index_position=1, variance=10 + ) + + # check variable names + self.assertEqual(measurements.variable_names[0], 'C[CA,0]') + self.assertEqual(measurements.variable_names[1], 'C[CA,0.125]') + self.assertEqual(measurements.variable_names[-1], 'T[5,0.8]') + self.assertEqual(measurements.variable_names[-2], 'T[5,0.6]') + self.assertEqual(measurements.variance['T[5,0.4]'], 10) + self.assertEqual(measurements.variance['T[5,0.6]'], 10) + self.assertEqual(measurements.variance['T[5,0.4]'], 10) + self.assertEqual(measurements.variance['T[5,0.6]'], 10) + + ### specify function + var_names = [ + 'C[CA,0]', + 'C[CA,0.125]', + 'C[CA,0.875]', + 'C[CA,1]', + 'C[CB,0]', + 'C[CB,0.125]', + 'C[CB,0.25]', + 'C[CB,0.375]', + 'C[CC,0]', + 'C[CC,0.125]', + 'C[CC,0.25]', + 'C[CC,0.375]', + ] + + measurements2 = MeasurementVariables() + measurements2.set_variable_name_list(var_names) + + self.assertEqual(measurements2.variable_names[1], 'C[CA,0.125]') + self.assertEqual(measurements2.variable_names[-1], 'C[CC,0.375]') + + ### check_subset function + self.assertTrue(measurements.check_subset(measurements2)) + + +class TestDesignVariable(unittest.TestCase): + """Test the DesignVariable class, specify, add_element, add_bounds, update_values.""" + + def test_setup(self): + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + + # design object + exp_design = DesignVariables() + + # add CAO as design variable + var_C = 'CA0' + indices_C = {0: [0]} + exp1_C = [5] + exp_design.add_variables( + var_C, + indices=indices_C, + time_index_position=0, + values=exp1_C, + lower_bounds=1, + upper_bounds=5, + ) + + # add T as design variable + var_T = 'T' + indices_T = {0: t_control} + exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] + + exp_design.add_variables( + var_T, + indices=indices_T, + time_index_position=0, + values=exp1_T, + lower_bounds=300, + upper_bounds=700, + ) + + self.assertEqual( + exp_design.variable_names, + [ + 'CA0[0]', + 'T[0]', + 'T[0.125]', + 'T[0.25]', + 'T[0.375]', + 'T[0.5]', + 'T[0.625]', + 'T[0.75]', + 'T[0.875]', + 'T[1]', + ], + ) + self.assertEqual(exp_design.variable_names_value['CA0[0]'], 5) + self.assertEqual(exp_design.variable_names_value['T[0]'], 470) + self.assertEqual(exp_design.upper_bounds['CA0[0]'], 5) + self.assertEqual(exp_design.upper_bounds['T[0]'], 700) + self.assertEqual(exp_design.lower_bounds['CA0[0]'], 1) + self.assertEqual(exp_design.lower_bounds['T[0]'], 300) + + design_names = exp_design.variable_names + exp1 = [4, 600, 300, 300, 300, 300, 300, 300, 300, 300] + exp1_design_dict = dict(zip(design_names, exp1)) + exp_design.update_values(exp1_design_dict) + self.assertEqual(exp_design.variable_names_value['CA0[0]'], 4) + self.assertEqual(exp_design.variable_names_value['T[0]'], 600) + + +class TestParameter(unittest.TestCase): + """Test the ScenarioGenerator class, generate_scenario function.""" + + def test_setup(self): + # set up parameter class + param_dict = {'A1': 84.79, 'A2': 371.72, 'E1': 7.78, 'E2': 15.05} + + scenario_gene = ScenarioGenerator(param_dict, formula="central", step=0.1) + parameter_set = scenario_gene.ScenarioData + + self.assertAlmostEqual(parameter_set.eps_abs['A1'], 16.9582, places=1) + self.assertAlmostEqual(parameter_set.eps_abs['E1'], 1.5554, places=1) + self.assertEqual(parameter_set.scena_num['A2'], [2, 3]) + self.assertEqual(parameter_set.scena_num['E1'], [4, 5]) + self.assertAlmostEqual(parameter_set.scenario[0]['A1'], 93.2699, places=1) + self.assertAlmostEqual(parameter_set.scenario[2]['A2'], 408.8895, places=1) + self.assertAlmostEqual(parameter_set.scenario[-1]['E2'], 13.54, places=1) + self.assertAlmostEqual(parameter_set.scenario[-2]['E2'], 16.55, places=1) + + +class TestVariablesWithIndices(unittest.TestCase): + """Test the DesignVariable class, specify, add_element, add_bounds, update_values.""" + + def test_setup(self): + special = VariablesWithIndices() + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + ### add_element function + # add CAO as design variable + var_C = 'CA0' + indices_C = {0: [0]} + exp1_C = [5] + special.add_variables( + var_C, + indices=indices_C, + time_index_position=0, + values=exp1_C, + lower_bounds=1, + upper_bounds=5, + ) + + # add T as design variable + var_T = 'T' + indices_T = {0: t_control} + exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] + + special.add_variables( + var_T, + indices=indices_T, + time_index_position=0, + values=exp1_T, + lower_bounds=300, + upper_bounds=700, + ) + + self.assertEqual( + special.variable_names, + [ + 'CA0[0]', + 'T[0]', + 'T[0.125]', + 'T[0.25]', + 'T[0.375]', + 'T[0.5]', + 'T[0.625]', + 'T[0.75]', + 'T[0.875]', + 'T[1]', + ], + ) + self.assertEqual(special.variable_names_value['CA0[0]'], 5) + self.assertEqual(special.variable_names_value['T[0]'], 470) + self.assertEqual(special.upper_bounds['CA0[0]'], 5) + self.assertEqual(special.upper_bounds['T[0]'], 700) + self.assertEqual(special.lower_bounds['CA0[0]'], 1) + self.assertEqual(special.lower_bounds['T[0]'], 300) + + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/contrib/doe/tests/test_reactor_example.py b/pyomo/contrib/doe/tests/test_reactor_example.py new file mode 100644 index 00000000000..86c914ec4e0 --- /dev/null +++ b/pyomo/contrib/doe/tests/test_reactor_example.py @@ -0,0 +1,220 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# +# Pyomo.DoE was produced under the Department of Energy Carbon Capture Simulation +# Initiative (CCSI), and is copyright (c) 2022 by the software owners: +# TRIAD National Security, LLC., Lawrence Livermore National Security, LLC., +# Lawrence Berkeley National Laboratory, Pacific Northwest National Laboratory, +# Battelle Memorial Institute, University of Notre Dame, +# The University of Pittsburgh, The University of Texas at Austin, +# University of Toledo, West Virginia University, et al. All rights reserved. +# +# NOTICE. This Software was developed under funding from the +# U.S. Department of Energy and the U.S. Government consequently retains +# certain rights. As such, the U.S. Government has been granted for itself +# and others acting on its behalf a paid-up, nonexclusive, irrevocable, +# worldwide license in the Software to reproduce, distribute copies to the +# public, prepare derivative works, and perform publicly and display +# publicly, and to permit other to do so. +# ___________________________________________________________________________ + + +# import libraries +from pyomo.common.dependencies import numpy as np, numpy_available, pandas_available +import pyomo.common.unittest as unittest +from pyomo.contrib.doe import DesignOfExperiments, MeasurementVariables, DesignVariables +from pyomo.environ import value, ConcreteModel +from pyomo.contrib.doe.examples.reactor_kinetics import create_model, disc_for_measure +from pyomo.opt import SolverFactory + +ipopt_available = SolverFactory('ipopt').available() + + +class Test_example_options(unittest.TestCase): + """Test the three options in the kinetics example.""" + + def test_setUP(self): + # parmest option + mod = create_model(model_option="parmest") + + # global and block option + mod = ConcreteModel() + create_model(mod, model_option="stage1") + create_model(mod, model_option="stage2") + # both options need a given model, or raise errors + with self.assertRaises(ValueError): + create_model(model_option="stage1") + + with self.assertRaises(ValueError): + create_model(model_option="stage2") + + with self.assertRaises(ValueError): + create_model(model_option="NotDefine") + + +class Test_doe_object(unittest.TestCase): + """Test the kinetics example with both the sequential_finite mode and the direct_kaug mode""" + + @unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") + @unittest.skipIf(not numpy_available, "Numpy is not available") + @unittest.skipIf(not pandas_available, "Pandas is not available") + def test_setUP(self): + ### Define inputs + # Control time set [h] + t_control = [0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1] + # Define parameter nominal value + parameter_dict = {'A1': 84.79, 'A2': 371.72, 'E1': 7.78, 'E2': 15.05} + + # measurement object + variable_name = "C" + indices = {0: ['CA', 'CB', 'CC'], 1: t_control} + + measurements = MeasurementVariables() + measurements.add_variables( + variable_name, indices=indices, time_index_position=1 + ) + + # design object + exp_design = DesignVariables() + + # add CAO as design variable + var_C = 'CA0' + indices_C = {0: [0]} + exp1_C = [5] + exp_design.add_variables( + var_C, + indices=indices_C, + time_index_position=0, + values=exp1_C, + lower_bounds=1, + upper_bounds=5, + ) + + # add T as design variable + var_T = 'T' + indices_T = {0: t_control} + exp1_T = [470, 300, 300, 300, 300, 300, 300, 300, 300] + + exp_design.add_variables( + var_T, + indices=indices_T, + time_index_position=0, + values=exp1_T, + lower_bounds=300, + upper_bounds=700, + ) + + ### Test sequential_finite mode + sensi_opt = "sequential_finite" + + design_names = exp_design.variable_names + exp1 = [5, 570, 300, 300, 300, 300, 300, 300, 300, 300] + exp1_design_dict = dict(zip(design_names, exp1)) + + exp_design.update_values(exp1_design_dict) + + doe_object = DesignOfExperiments( + parameter_dict, + exp_design, + measurements, + create_model, + discretize_model=disc_for_measure, + ) + + result = doe_object.compute_FIM( + mode=sensi_opt, scale_nominal_param_value=True, formula="central" + ) + + result.result_analysis() + + self.assertAlmostEqual(np.log10(result.trace), 2.7885, places=2) + self.assertAlmostEqual(np.log10(result.det), 2.8218, places=2) + self.assertAlmostEqual(np.log10(result.min_eig), -1.0123, places=2) + + ### check subset feature + sub_name = "C" + sub_indices = {0: ["CB", "CC"], 1: [0.125, 0.25, 0.5, 0.75, 0.875]} + + measure_subset = MeasurementVariables() + measure_subset.add_variables( + sub_name, indices=sub_indices, time_index_position=1 + ) + sub_result = result.subset(measure_subset) + sub_result.result_analysis() + + self.assertAlmostEqual(np.log10(sub_result.trace), 2.5535, places=2) + self.assertAlmostEqual(np.log10(sub_result.det), 1.3464, places=2) + self.assertAlmostEqual(np.log10(sub_result.min_eig), -1.5386, places=2) + + ### Test direct_kaug mode + sensi_opt = "direct_kaug" + # Define a new experiment + + exp1 = [5, 570, 400, 300, 300, 300, 300, 300, 300, 300] + exp1_design_dict = dict(zip(design_names, exp1)) + exp_design.update_values(exp1_design_dict) + + doe_object = DesignOfExperiments( + parameter_dict, + exp_design, + measurements, + create_model, + discretize_model=disc_for_measure, + ) + + result = doe_object.compute_FIM( + mode=sensi_opt, scale_nominal_param_value=True, formula="central" + ) + + result.result_analysis() + + self.assertAlmostEqual(np.log10(result.trace), 2.7211, places=2) + self.assertAlmostEqual(np.log10(result.det), 2.0845, places=2) + self.assertAlmostEqual(np.log10(result.min_eig), -1.3510, places=2) + + ### Test stochastic_program mode + + exp1 = [5, 570, 300, 300, 300, 300, 300, 300, 300, 300] + exp1_design_dict = dict(zip(design_names, exp1)) + exp_design.update_values(exp1_design_dict) + + # add a prior information (scaled FIM with T=500 and T=300 experiments) + prior = np.asarray( + [ + [28.67892806, 5.41249739, -81.73674601, -24.02377324], + [5.41249739, 26.40935036, -12.41816477, -139.23992532], + [-81.73674601, -12.41816477, 240.46276004, 58.76422806], + [-24.02377324, -139.23992532, 58.76422806, 767.25584508], + ] + ) + + doe_object2 = DesignOfExperiments( + parameter_dict, + exp_design, + measurements, + create_model, + prior_FIM=prior, + discretize_model=disc_for_measure, + ) + + square_result, optimize_result = doe_object2.stochastic_program( + if_optimize=True, + if_Cholesky=True, + scale_nominal_param_value=True, + objective_option="det", + L_initial=np.linalg.cholesky(prior), + ) + + self.assertAlmostEqual(value(optimize_result.model.CA0[0]), 5.0, places=2) + self.assertAlmostEqual(value(optimize_result.model.T[0.5]), 300, places=2) + + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/contrib/example/__init__.py b/pyomo/contrib/example/__init__.py index bd9d25dc401..7f2d08a0292 100644 --- a/pyomo/contrib/example/__init__.py +++ b/pyomo/contrib/example/__init__.py @@ -3,13 +3,15 @@ # from pyomo.contrib.example.foo import * import pyomo.contrib.example.bar + # # import the plugins directory # -# The pyomo.environ package normally calls the load() function in +# The pyomo.environ package normally calls the load() function in # the pyomo.*.plugins subdirectories. However, pyomo.contrib packages # are not loaded by pyomo.environ, so we need to call this function # when we import the rest of this package. # from pyomo.contrib.example.plugins import load + load() diff --git a/pyomo/contrib/example/plugins/__init__.py b/pyomo/contrib/example/plugins/__init__.py index 4fc09a87477..dc71adec9dc 100644 --- a/pyomo/contrib/example/plugins/__init__.py +++ b/pyomo/contrib/example/plugins/__init__.py @@ -1,6 +1,6 @@ -# Define a 'load()' function, which simply imports +# Define a 'load()' function, which simply imports # sub-packages that define plugin classes. + def load(): import pyomo.contrib.example.plugins.ex_plugin - diff --git a/pyomo/contrib/example/plugins/ex_plugin.py b/pyomo/contrib/example/plugins/ex_plugin.py index 83b0f6ac518..504605205f4 100644 --- a/pyomo/contrib/example/plugins/ex_plugin.py +++ b/pyomo/contrib/example/plugins/ex_plugin.py @@ -1,14 +1,14 @@ - from pyomo.core.base import Transformation, TransformationFactory -@TransformationFactory.register('contrib.example.xfrm', doc="An example of a transformation in a pyomo.contrib package") +@TransformationFactory.register( + 'contrib.example.xfrm', + doc="An example of a transformation in a pyomo.contrib package", +) class Xfrm_PyomoTransformation(Transformation): - def __init__(self): super(Xfrm_PyomoTransformation, self).__init__() def create_using(self, instance, **kwds): # This transformation doesn't do anything... return instance - diff --git a/pyomo/contrib/example/tests/test_example.py b/pyomo/contrib/example/tests/test_example.py index f3c6b13cec0..c38de1b914f 100644 --- a/pyomo/contrib/example/tests/test_example.py +++ b/pyomo/contrib/example/tests/test_example.py @@ -19,7 +19,6 @@ class Tests(unittest.TestCase): - def test1(self): pass diff --git a/pyomo/contrib/fbbt/fbbt.py b/pyomo/contrib/fbbt/fbbt.py index ae0fbd3e480..5c486488540 100644 --- a/pyomo/contrib/fbbt/fbbt.py +++ b/pyomo/contrib/fbbt/fbbt.py @@ -23,7 +23,14 @@ from pyomo.core.base.expression import _GeneralExpressionData, ScalarExpression import logging from pyomo.common.errors import InfeasibleConstraintException, PyomoException -from pyomo.common.config import ConfigBlock, ConfigValue, In, NonNegativeFloat, NonNegativeInt +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + In, + NonNegativeFloat, + NonNegativeInt, +) +from pyomo.common.numeric_types import native_types logger = logging.getLogger(__name__) @@ -104,26 +111,10 @@ def _prop_bnds_leaf_to_root_SumExpression(node, bnds_dict, feasibility_tol): region is removed due to floating point arithmetic and to prevent math domain errors (a larger value is more conservative). """ - arg0 = node.arg(0) - lb, ub = bnds_dict[arg0] - for i in range(1, node.nargs()): - arg = node.arg(i) - lb2, ub2 = bnds_dict[arg] - lb, ub = interval.add(lb, ub, lb2, ub2) - bnds_dict[node] = (lb, ub) - - -def _prop_bnds_leaf_to_root_LinearExpression(node: numeric_expr.LinearExpression, bnds_dict, feasibility_tol): - """ - This is very similar to sum expression - """ - lb, ub = bnds_dict[node.constant] - for coef, v in zip(node.linear_coefs, node.linear_vars): - coef_bnds = bnds_dict[coef] - v_bnds = bnds_dict[v] - term_bounds = interval.mul(*coef_bnds, *v_bnds) - lb, ub = interval.add(lb, ub, *term_bounds) - bnds_dict[node] = (lb, ub) + bnds = (0, 0) + for arg in node.args: + bnds = interval.add(*bnds, *bnds_dict[arg]) + bnds_dict[node] = bnds def _prop_bnds_leaf_to_root_DivisionExpression(node, bnds_dict, feasibility_tol): @@ -165,7 +156,9 @@ def _prop_bnds_leaf_to_root_PowExpression(node, bnds_dict, feasibility_tol): arg1, arg2 = node.args lb1, ub1 = bnds_dict[arg1] lb2, ub2 = bnds_dict[arg2] - bnds_dict[node] = interval.power(lb1, ub1, lb2, ub2, feasibility_tol=feasibility_tol) + bnds_dict[node] = interval.power( + lb1, ub1, lb2, ub2, feasibility_tol=feasibility_tol + ) def _prop_bnds_leaf_to_root_NegationExpression(node, bnds_dict, feasibility_tol): @@ -325,7 +318,9 @@ def _prop_bnds_leaf_to_root_asin(node, bnds_dict, feasibility_tol): assert len(node.args) == 1 arg = node.args[0] lb1, ub1 = bnds_dict[arg] - bnds_dict[node] = interval.asin(lb1, ub1, -interval.inf, interval.inf, feasibility_tol) + bnds_dict[node] = interval.asin( + lb1, ub1, -interval.inf, interval.inf, feasibility_tol + ) def _prop_bnds_leaf_to_root_acos(node, bnds_dict, feasibility_tol): @@ -345,7 +340,9 @@ def _prop_bnds_leaf_to_root_acos(node, bnds_dict, feasibility_tol): assert len(node.args) == 1 arg = node.args[0] lb1, ub1 = bnds_dict[arg] - bnds_dict[node] = interval.acos(lb1, ub1, -interval.inf, interval.inf, feasibility_tol) + bnds_dict[node] = interval.acos( + lb1, ub1, -interval.inf, interval.inf, feasibility_tol + ) def _prop_bnds_leaf_to_root_atan(node, bnds_dict, feasibility_tol): @@ -385,7 +382,9 @@ def _prop_bnds_leaf_to_root_sqrt(node, bnds_dict, feasibility_tol): assert len(node.args) == 1 arg = node.args[0] lb1, ub1 = bnds_dict[arg] - bnds_dict[node] = interval.power(lb1, ub1, 0.5, 0.5, feasibility_tol=feasibility_tol) + bnds_dict[node] = interval.power( + lb1, ub1, 0.5, 0.5, feasibility_tol=feasibility_tol + ) def _prop_bnds_leaf_to_root_abs(node, bnds_dict, feasibility_tol): @@ -444,31 +443,47 @@ def _prop_bnds_leaf_to_root_GeneralExpression(node, bnds_dict, feasibility_tol): region is removed due to floating point arithmetic and to prevent math domain errors (a larger value is more conservative). """ - expr_lb, expr_ub = bnds_dict[node.expr] + (expr,) = node.args + if expr.__class__ in native_types: + expr_lb = expr_ub = expr + else: + expr_lb, expr_ub = bnds_dict[expr] bnds_dict[node] = (expr_lb, expr_ub) _prop_bnds_leaf_to_root_map = dict() -_prop_bnds_leaf_to_root_map[numeric_expr.ProductExpression] = _prop_bnds_leaf_to_root_ProductExpression -_prop_bnds_leaf_to_root_map[numeric_expr.DivisionExpression] = _prop_bnds_leaf_to_root_DivisionExpression -_prop_bnds_leaf_to_root_map[numeric_expr.PowExpression] = _prop_bnds_leaf_to_root_PowExpression -_prop_bnds_leaf_to_root_map[numeric_expr.SumExpression] = _prop_bnds_leaf_to_root_SumExpression -_prop_bnds_leaf_to_root_map[numeric_expr.MonomialTermExpression] = _prop_bnds_leaf_to_root_ProductExpression -_prop_bnds_leaf_to_root_map[numeric_expr.NegationExpression] = _prop_bnds_leaf_to_root_NegationExpression -_prop_bnds_leaf_to_root_map[numeric_expr.UnaryFunctionExpression] = _prop_bnds_leaf_to_root_UnaryFunctionExpression -_prop_bnds_leaf_to_root_map[numeric_expr.LinearExpression] = _prop_bnds_leaf_to_root_LinearExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.ProductExpression +] = _prop_bnds_leaf_to_root_ProductExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.DivisionExpression +] = _prop_bnds_leaf_to_root_DivisionExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.PowExpression +] = _prop_bnds_leaf_to_root_PowExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.SumExpression +] = _prop_bnds_leaf_to_root_SumExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.MonomialTermExpression +] = _prop_bnds_leaf_to_root_ProductExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.NegationExpression +] = _prop_bnds_leaf_to_root_NegationExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.UnaryFunctionExpression +] = _prop_bnds_leaf_to_root_UnaryFunctionExpression +_prop_bnds_leaf_to_root_map[ + numeric_expr.LinearExpression +] = _prop_bnds_leaf_to_root_SumExpression _prop_bnds_leaf_to_root_map[numeric_expr.AbsExpression] = _prop_bnds_leaf_to_root_abs -_prop_bnds_leaf_to_root_map[numeric_expr.NPV_ProductExpression] = _prop_bnds_leaf_to_root_ProductExpression -_prop_bnds_leaf_to_root_map[numeric_expr.NPV_DivisionExpression] = _prop_bnds_leaf_to_root_DivisionExpression -_prop_bnds_leaf_to_root_map[numeric_expr.NPV_PowExpression] = _prop_bnds_leaf_to_root_PowExpression -_prop_bnds_leaf_to_root_map[numeric_expr.NPV_SumExpression] = _prop_bnds_leaf_to_root_SumExpression -_prop_bnds_leaf_to_root_map[numeric_expr.NPV_NegationExpression] = _prop_bnds_leaf_to_root_NegationExpression -_prop_bnds_leaf_to_root_map[numeric_expr.NPV_UnaryFunctionExpression] = _prop_bnds_leaf_to_root_UnaryFunctionExpression -_prop_bnds_leaf_to_root_map[numeric_expr.NPV_AbsExpression] = _prop_bnds_leaf_to_root_abs - -_prop_bnds_leaf_to_root_map[_GeneralExpressionData] = _prop_bnds_leaf_to_root_GeneralExpression -_prop_bnds_leaf_to_root_map[ScalarExpression] = _prop_bnds_leaf_to_root_GeneralExpression +_prop_bnds_leaf_to_root_map[ + _GeneralExpressionData +] = _prop_bnds_leaf_to_root_GeneralExpression +_prop_bnds_leaf_to_root_map[ + ScalarExpression +] = _prop_bnds_leaf_to_root_GeneralExpression def _prop_bnds_root_to_leaf_ProductExpression(node, bnds_dict, feasibility_tol): @@ -491,7 +506,9 @@ def _prop_bnds_root_to_leaf_ProductExpression(node, bnds_dict, feasibility_tol): lb1, ub1 = bnds_dict[arg1] lb2, ub2 = bnds_dict[arg2] if arg1 is arg2: - _lb1, _ub1 = interval._inverse_power1(lb0, ub0, 2, 2, orig_xl=lb1, orig_xu=ub1, feasibility_tol=feasibility_tol) + _lb1, _ub1 = interval._inverse_power1( + lb0, ub0, 2, 2, orig_xl=lb1, orig_xu=ub1, feasibility_tol=feasibility_tol + ) _lb2, _ub2 = _lb1, _ub1 else: _lb1, _ub1 = interval.div(lb0, ub0, lb2, ub2, feasibility_tol) @@ -546,72 +563,25 @@ def _prop_bnds_root_to_leaf_SumExpression(node, bnds_dict, feasibility_tol): is more conservative). """ # first accumulate bounds - accumulated_bounds = list() - accumulated_bounds.append(bnds_dict[node.arg(0)]) - lb0, ub0 = bnds_dict[node] - for i in range(1, node.nargs()): - _lb0, _ub0 = accumulated_bounds[i-1] - _lb1, _ub1 = bnds_dict[node.arg(i)] - accumulated_bounds.append(interval.add(_lb0, _ub0, _lb1, _ub1)) - if lb0 > accumulated_bounds[node.nargs() - 1][0]: - accumulated_bounds[node.nargs() - 1] = (lb0, accumulated_bounds[node.nargs()-1][1]) - if ub0 < accumulated_bounds[node.nargs() - 1][1]: - accumulated_bounds[node.nargs() - 1] = (accumulated_bounds[node.nargs()-1][0], ub0) - - for i in reversed(range(1, node.nargs())): - lb0, ub0 = accumulated_bounds[i] - lb1, ub1 = accumulated_bounds[i-1] - lb2, ub2 = bnds_dict[node.arg(i)] - _lb1, _ub1 = interval.sub(lb0, ub0, lb2, ub2) - _lb2, _ub2 = interval.sub(lb0, ub0, lb1, ub1) - if _lb1 > lb1: - lb1 = _lb1 - if _ub1 < ub1: - ub1 = _ub1 - if _lb2 > lb2: - lb2 = _lb2 - if _ub2 < ub2: - ub2 = _ub2 - accumulated_bounds[i-1] = (lb1, ub1) - bnds_dict[node.arg(i)] = (lb2, ub2) - lb, ub = bnds_dict[node.arg(0)] - _lb, _ub = accumulated_bounds[0] - if _lb > lb: - lb = _lb - if _ub < ub: - ub = _ub - bnds_dict[node.arg(0)] = (lb, ub) - + bnds = (0, 0) + accumulated_bounds = [bnds] + for arg in node.args: + bnds = interval.add(*bnds, *bnds_dict[arg]) + accumulated_bounds.append(bnds) -def _prop_bnds_root_to_leaf_LinearExpression(node: numeric_expr.LinearExpression, - bnds_dict: ComponentMap, - feasibility_tol: float): - """ - This is very similar to SumExpression. - """ - # first accumulate bounds - accumulated_bounds = list() - accumulated_bounds.append(bnds_dict[node.constant]) + # Tighten based on parent (this) node lb0, ub0 = bnds_dict[node] - for coef, v in zip(node.linear_coefs, node.linear_vars): - _lb0, _ub0 = accumulated_bounds[-1] - _lb_coef, _ub_coef = bnds_dict[coef] - _lb_v, _ub_v = bnds_dict[v] - _lb_term, _ub_term = interval.mul(_lb_coef, _ub_coef, _lb_v, _ub_v) - accumulated_bounds.append(interval.add(_lb0, _ub0, _lb_term, _ub_term)) - if lb0 > accumulated_bounds[-1][0]: - accumulated_bounds[-1] = (lb0, accumulated_bounds[-1][1]) - if ub0 < accumulated_bounds[-1][1]: - accumulated_bounds[-1] = (accumulated_bounds[-1][0], ub0) - - for i in reversed(range(len(node.linear_coefs))): - lb0, ub0 = accumulated_bounds[i + 1] - lb1, ub1 = accumulated_bounds[i] - coef = node.linear_coefs[i] - v = node.linear_vars[i] - coef_bnds = bnds_dict[coef] - v_bnds = bnds_dict[v] - lb2, ub2 = interval.mul(*coef_bnds, *v_bnds) + if lb0 > bnds[0]: + bnds = (lb0, bnds[1]) + if ub0 < bnds[1]: + bnds = (bnds[0], ub0) + accumulated_bounds[-1] = bnds + + # propagate to the children + lb0, ub0 = accumulated_bounds[-1] + for i, arg in enumerate(reversed(node.args)): + lb1, ub1 = accumulated_bounds[-2 - i] + lb2, ub2 = bnds_dict[arg] _lb1, _ub1 = interval.sub(lb0, ub0, lb2, ub2) _lb2, _ub2 = interval.sub(lb0, ub0, lb1, ub1) if _lb1 > lb1: @@ -622,8 +592,8 @@ def _prop_bnds_root_to_leaf_LinearExpression(node: numeric_expr.LinearExpression lb2 = _lb2 if _ub2 < ub2: ub2 = _ub2 - accumulated_bounds[i] = (lb1, ub1) - bnds_dict[v] = interval.div(lb2, ub2, *coef_bnds, feasibility_tol=feasibility_tol) + lb0, ub0 = lb1, ub1 + bnds_dict[arg] = (lb2, ub2) def _prop_bnds_root_to_leaf_DivisionExpression(node, bnds_dict, feasibility_tol): @@ -678,17 +648,23 @@ def _prop_bnds_root_to_leaf_PowExpression(node, bnds_dict, feasibility_tol): lb0, ub0 = bnds_dict[node] lb1, ub1 = bnds_dict[arg1] lb2, ub2 = bnds_dict[arg2] - _lb1, _ub1 = interval._inverse_power1(lb0, ub0, lb2, ub2, orig_xl=lb1, orig_xu=ub1, feasibility_tol=feasibility_tol) + _lb1, _ub1 = interval._inverse_power1( + lb0, ub0, lb2, ub2, orig_xl=lb1, orig_xu=ub1, feasibility_tol=feasibility_tol + ) if _lb1 > lb1: lb1 = _lb1 if _ub1 < ub1: ub1 = _ub1 bnds_dict[arg1] = (lb1, ub1) - if is_fixed(arg2) and lb2 == ub2: # No need to tighten the bounds on arg2 if arg2 is fixed + if ( + is_fixed(arg2) and lb2 == ub2 + ): # No need to tighten the bounds on arg2 if arg2 is fixed pass else: - _lb2, _ub2 = interval._inverse_power2(lb0, ub0, lb1, ub1, feasiblity_tol=feasibility_tol) + _lb2, _ub2 = interval._inverse_power2( + lb0, ub0, lb1, ub1, feasiblity_tol=feasibility_tol + ) if _lb2 > lb2: lb2 = _lb2 if _ub2 < ub2: @@ -715,7 +691,9 @@ def _prop_bnds_root_to_leaf_sqrt(node, bnds_dict, feasibility_tol): lb0, ub0 = bnds_dict[node] lb1, ub1 = bnds_dict[arg1] lb2, ub2 = (0.5, 0.5) - _lb1, _ub1 = interval._inverse_power1(lb0, ub0, lb2, ub2, orig_xl=lb1, orig_xu=ub1, feasibility_tol=feasibility_tol) + _lb1, _ub1 = interval._inverse_power1( + lb0, ub0, lb2, ub2, orig_xl=lb1, orig_xu=ub1, feasibility_tol=feasibility_tol + ) if _lb1 > lb1: lb1 = _lb1 if _ub1 < ub1: @@ -1027,9 +1005,11 @@ def _prop_bnds_root_to_leaf_UnaryFunctionExpression(node, bnds_dict, feasibility if node.getname() in _unary_root_to_leaf_map: _unary_root_to_leaf_map[node.getname()](node, bnds_dict, feasibility_tol) else: - logger.warning('Unsupported expression type for FBBT: {0}. Bounds will not be improved in this part of ' - 'the tree.' - ''.format(node.getname())) + logger.warning( + 'Unsupported expression type for FBBT: {0}. Bounds will not be improved in this part of ' + 'the tree.' + ''.format(node.getname()) + ) def _prop_bnds_root_to_leaf_GeneralExpression(node, bnds_dict, feasibility_tol): @@ -1047,31 +1027,44 @@ def _prop_bnds_root_to_leaf_GeneralExpression(node, bnds_dict, feasibility_tol): region is removed due to floating point arithmetic and to prevent math domain errors (a larger value is more conservative). """ - expr_lb, expr_ub = bnds_dict[node] - bnds_dict[node.expr] = (expr_lb, expr_ub) + if node.expr.__class__ not in native_types: + expr_lb, expr_ub = bnds_dict[node] + bnds_dict[node.expr] = (expr_lb, expr_ub) _prop_bnds_root_to_leaf_map = dict() -_prop_bnds_root_to_leaf_map[numeric_expr.ProductExpression] = _prop_bnds_root_to_leaf_ProductExpression -_prop_bnds_root_to_leaf_map[numeric_expr.DivisionExpression] = _prop_bnds_root_to_leaf_DivisionExpression -_prop_bnds_root_to_leaf_map[numeric_expr.PowExpression] = _prop_bnds_root_to_leaf_PowExpression -_prop_bnds_root_to_leaf_map[numeric_expr.SumExpression] = _prop_bnds_root_to_leaf_SumExpression -_prop_bnds_root_to_leaf_map[numeric_expr.MonomialTermExpression] = _prop_bnds_root_to_leaf_ProductExpression -_prop_bnds_root_to_leaf_map[numeric_expr.NegationExpression] = _prop_bnds_root_to_leaf_NegationExpression -_prop_bnds_root_to_leaf_map[numeric_expr.UnaryFunctionExpression] = _prop_bnds_root_to_leaf_UnaryFunctionExpression -_prop_bnds_root_to_leaf_map[numeric_expr.LinearExpression] = _prop_bnds_root_to_leaf_LinearExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.ProductExpression +] = _prop_bnds_root_to_leaf_ProductExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.DivisionExpression +] = _prop_bnds_root_to_leaf_DivisionExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.PowExpression +] = _prop_bnds_root_to_leaf_PowExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.SumExpression +] = _prop_bnds_root_to_leaf_SumExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.MonomialTermExpression +] = _prop_bnds_root_to_leaf_ProductExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.NegationExpression +] = _prop_bnds_root_to_leaf_NegationExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.UnaryFunctionExpression +] = _prop_bnds_root_to_leaf_UnaryFunctionExpression +_prop_bnds_root_to_leaf_map[ + numeric_expr.LinearExpression +] = _prop_bnds_root_to_leaf_SumExpression _prop_bnds_root_to_leaf_map[numeric_expr.AbsExpression] = _prop_bnds_root_to_leaf_abs -_prop_bnds_root_to_leaf_map[numeric_expr.NPV_ProductExpression] = _prop_bnds_root_to_leaf_ProductExpression -_prop_bnds_root_to_leaf_map[numeric_expr.NPV_DivisionExpression] = _prop_bnds_root_to_leaf_DivisionExpression -_prop_bnds_root_to_leaf_map[numeric_expr.NPV_PowExpression] = _prop_bnds_root_to_leaf_PowExpression -_prop_bnds_root_to_leaf_map[numeric_expr.NPV_SumExpression] = _prop_bnds_root_to_leaf_SumExpression -_prop_bnds_root_to_leaf_map[numeric_expr.NPV_NegationExpression] = _prop_bnds_root_to_leaf_NegationExpression -_prop_bnds_root_to_leaf_map[numeric_expr.NPV_UnaryFunctionExpression] = _prop_bnds_root_to_leaf_UnaryFunctionExpression -_prop_bnds_root_to_leaf_map[numeric_expr.NPV_AbsExpression] = _prop_bnds_root_to_leaf_abs - -_prop_bnds_root_to_leaf_map[_GeneralExpressionData] = _prop_bnds_root_to_leaf_GeneralExpression -_prop_bnds_root_to_leaf_map[ScalarExpression] = _prop_bnds_root_to_leaf_GeneralExpression +_prop_bnds_root_to_leaf_map[ + _GeneralExpressionData +] = _prop_bnds_root_to_leaf_GeneralExpression +_prop_bnds_root_to_leaf_map[ + ScalarExpression +] = _prop_bnds_root_to_leaf_GeneralExpression def _check_and_reset_bounds(var, lb, ub): @@ -1096,7 +1089,10 @@ class _FBBTVisitorLeafToRoot(ExpressionValueVisitor): This walker propagates bounds from the variables to each node in the expression tree (all the way to the root node). """ - def __init__(self, bnds_dict, integer_tol=1e-4, feasibility_tol=1e-8): + + def __init__( + self, bnds_dict, integer_tol=1e-4, feasibility_tol=1e-8, ignore_fixed=False + ): """ Parameters ---------- @@ -1112,10 +1108,13 @@ def __init__(self, bnds_dict, integer_tol=1e-4, feasibility_tol=1e-8): self.bnds_dict = bnds_dict self.integer_tol = integer_tol self.feasibility_tol = feasibility_tol + self.ignore_fixed = ignore_fixed def visit(self, node, values): if node.__class__ in _prop_bnds_leaf_to_root_map: - _prop_bnds_leaf_to_root_map[node.__class__](node, self.bnds_dict, self.feasibility_tol) + _prop_bnds_leaf_to_root_map[node.__class__]( + node, self.bnds_dict, self.feasibility_tol + ) else: self.bnds_dict[node] = (-interval.inf, interval.inf) return None @@ -1128,7 +1127,7 @@ def visiting_potential_leaf(self, node): if node.is_variable_type(): if node in self.bnds_dict: return True, None - if node.is_fixed(): + if node.is_fixed() and not self.ignore_fixed: lb = value(node.value) ub = lb else: @@ -1139,27 +1138,28 @@ def visiting_potential_leaf(self, node): if ub is None: ub = interval.inf if lb - self.feasibility_tol > ub: - raise InfeasibleConstraintException('Variable has a lower bound which is larger than its upper bound: {0}'.format(str(node))) + raise InfeasibleConstraintException( + 'Variable has a lower bound that is larger than its upper bound: {0}'.format( + str(node) + ) + ) self.bnds_dict[node] = (lb, ub) return True, None - if node.__class__ is numeric_expr.LinearExpression: - const_val = value(node.constant) - self.bnds_dict[node.constant] = (const_val, const_val) - for coef in node.linear_coefs: - coef_val = value(coef) - self.bnds_dict[coef] = (coef_val, coef_val) - for v in node.linear_vars: - self.visiting_potential_leaf(v) - _prop_bnds_leaf_to_root_LinearExpression(node, self.bnds_dict, self.feasibility_tol) - return True, None - - if not node.is_expression_type(): - assert is_fixed(node) + if not node.is_potentially_variable(): + # NPV nodes are effectively constant leaves. Evaluate it + # and return the value. val = value(node) self.bnds_dict[node] = (val, val) return True, None + if node.__class__ is numeric_expr.ExternalFunctionExpression: + # TODO: provide some mechanism for users to provide interval + # arithmetic callback functions for general external + # functions + self.bnds_dict[node] = (-interval.inf, interval.inf) + return True, None + return False, None @@ -1169,6 +1169,7 @@ class _FBBTVisitorRootToLeaf(ExpressionValueVisitor): variables. Note that the bounds on every node in the tree must first be computed with _FBBTVisitorLeafToRoot. """ + def __init__(self, bnds_dict, integer_tol=1e-4, feasibility_tol=1e-8): """ Parameters @@ -1193,9 +1194,13 @@ def visiting_potential_leaf(self, node): if node.__class__ in nonpyomo_leaf_types: lb, ub = self.bnds_dict[node] if abs(lb - value(node)) > self.feasibility_tol: - raise InfeasibleConstraintException('Detected an infeasible constraint.') + raise InfeasibleConstraintException( + 'Detected an infeasible constraint.' + ) if abs(ub - value(node)) > self.feasibility_tol: - raise InfeasibleConstraintException('Detected an infeasible constraint.') + raise InfeasibleConstraintException( + 'Detected an infeasible constraint.' + ) return True, None if node.is_variable_type(): @@ -1204,13 +1209,17 @@ def visiting_potential_leaf(self, node): lb, ub = self.bnds_dict[node] if lb > ub: if lb - self.feasibility_tol > ub: - raise InfeasibleConstraintException('Lower bound ({1}) computed for variable {0} is larger than the computed upper bound ({2}).'.format(node, lb, ub)) + raise InfeasibleConstraintException( + 'Lower bound ({1}) computed for variable {0} is larger than the computed upper bound ({2}).'.format( + node, lb, ub + ) + ) else: """ - If we reach this code, then lb > ub, but not by more than feasibility_tol. + If we reach this code, then lb > ub, but not by more than feasibility_tol. Now we want to decrease lb slightly and increase ub slightly so that lb <= ub. However, we also have to make sure we do not make lb lower than the original lower bound - and make sure we do not make ub larger than the original upper bound. This is what + and make sure we do not make ub larger than the original upper bound. This is what _check_and_reset_bounds is for. """ lb -= self.feasibility_tol @@ -1218,18 +1227,22 @@ def visiting_potential_leaf(self, node): lb, ub = _check_and_reset_bounds(node, lb, ub) self.bnds_dict[node] = (lb, ub) if lb == interval.inf: - raise InfeasibleConstraintException('Computed a lower bound of +inf for variable {0}'.format(node)) + raise InfeasibleConstraintException( + 'Computed a lower bound of +inf for variable {0}'.format(node) + ) if ub == -interval.inf: - raise InfeasibleConstraintException('Computed an upper bound of -inf for variable {0}'.format(node)) + raise InfeasibleConstraintException( + 'Computed an upper bound of -inf for variable {0}'.format(node) + ) if node.is_binary() or node.is_integer(): """ This bit of code has two purposes: 1) Improve the bounds on binary and integer variables with the fact that they are integer. - 2) Account for roundoff error. If the lower bound of a binary variable comes back as - 1e-16, the lower bound may actually be 0. This could potentially cause problems when + 2) Account for roundoff error. If the lower bound of a binary variable comes back as + 1e-16, the lower bound may actually be 0. This could potentially cause problems when handing the problem to a MIP solver. Some solvers are robust to this, but some may not be - and may give the wrong solution. Even if the correct solution is found, this could + and may give the wrong solution. Even if the correct solution is found, this could introduce numerical problems. """ if lb > -interval.inf: @@ -1250,26 +1263,31 @@ def visiting_potential_leaf(self, node): node.setub(ub) return True, None - if node.__class__ is numeric_expr.LinearExpression: - _prop_bnds_root_to_leaf_LinearExpression(node, self.bnds_dict, self.feasibility_tol) - for v in node.linear_vars: - self.visiting_potential_leaf(v) - return True, None - - if not node.is_expression_type(): + if not node.is_potentially_variable(): lb, ub = self.bnds_dict[node] if abs(lb - value(node)) > self.feasibility_tol: - raise InfeasibleConstraintException('Detected an infeasible constraint.') + raise InfeasibleConstraintException( + 'Detected an infeasible constraint.' + ) if abs(ub - value(node)) > self.feasibility_tol: - raise InfeasibleConstraintException('Detected an infeasible constraint.') + raise InfeasibleConstraintException( + 'Detected an infeasible constraint.' + ) + return True, None + + if node.__class__ is numeric_expr.ExternalFunctionExpression: return True, None if node.__class__ in _prop_bnds_root_to_leaf_map: - _prop_bnds_root_to_leaf_map[node.__class__](node, self.bnds_dict, self.feasibility_tol) + _prop_bnds_root_to_leaf_map[node.__class__]( + node, self.bnds_dict, self.feasibility_tol + ) else: - logger.warning('Unsupported expression type for FBBT: {0}. Bounds will not be improved in this part of ' - 'the tree.' - ''.format(str(type(node)))) + logger.warning( + 'Unsupported expression type for FBBT: {0}. Bounds will not be improved in this part of ' + 'the tree.' + ''.format(str(type(node))) + ) return False, None @@ -1307,7 +1325,9 @@ def _fbbt_con(con, config): if not con.active: return ComponentMap() - bnds_dict = ComponentMap() # a dictionary to store the bounds of every node in the tree + bnds_dict = ( + ComponentMap() + ) # a dictionary to store the bounds of every node in the tree # a walker to propagate bounds from the variables to the root visitorA = _FBBTVisitorLeafToRoot(bnds_dict, feasibility_tol=config.feasibility_tol) @@ -1327,7 +1347,9 @@ def _fbbt_con(con, config): # check if the constraint is infeasible if lb > _ub + config.feasibility_tol or ub < _lb - config.feasibility_tol: - raise InfeasibleConstraintException('Detected an infeasible constraint during FBBT: {0}'.format(str(con))) + raise InfeasibleConstraintException( + 'Detected an infeasible constraint during FBBT: {0}'.format(str(con)) + ) # check if the constraint is always satisfied if config.deactivate_satisfied_constraints: @@ -1341,7 +1363,11 @@ def _fbbt_con(con, config): bnds_dict[con.body] = (lb, ub) # Now, propagate bounds back from the root to the variables - visitorB = _FBBTVisitorRootToLeaf(bnds_dict, integer_tol=config.integer_tol, feasibility_tol=config.feasibility_tol) + visitorB = _FBBTVisitorRootToLeaf( + bnds_dict, + integer_tol=config.integer_tol, + feasibility_tol=config.feasibility_tol, + ) visitorB.dfs_postorder_stack(con.body) new_var_bounds = ComponentMap() @@ -1386,8 +1412,9 @@ def _fbbt_block(m, config): var_lbs = ComponentMap() var_ubs = ComponentMap() n_cons = 0 - for c in m.component_data_objects(ctype=Constraint, active=True, - descend_into=config.descend_into, sort=True): + for c in m.component_data_objects( + ctype=Constraint, active=True, descend_into=config.descend_into, sort=True + ): for v in identify_variables(c.body): if v not in var_to_con_map: var_to_con_map[v] = list() @@ -1402,7 +1429,9 @@ def _fbbt_block(m, config): var_to_con_map[v].append(c) n_cons += 1 - for _v in m.component_data_objects(ctype=Var, active=True, descend_into=True, sort=True): + for _v in m.component_data_objects( + ctype=Var, active=True, descend_into=True, sort=True + ): if _v.is_fixed(): _v.setlb(_v.value) _v.setub(_v.value) @@ -1411,8 +1440,9 @@ def _fbbt_block(m, config): n_fbbt = 0 improved_vars = ComponentSet() - for c in m.component_data_objects(ctype=Constraint, active=True, - descend_into=config.descend_into, sort=True): + for c in m.component_data_objects( + ctype=Constraint, active=True, descend_into=config.descend_into, sort=True + ): _new_var_bounds = _fbbt_con(c, config) n_fbbt += 1 new_var_bounds.update(_new_var_bounds) @@ -1449,8 +1479,15 @@ def _fbbt_block(m, config): return new_var_bounds -def fbbt(comp, deactivate_satisfied_constraints=False, integer_tol=1e-5, feasibility_tol=1e-8, max_iter=10, - improvement_tol=1e-4, descend_into=True): +def fbbt( + comp, + deactivate_satisfied_constraints=False, + integer_tol=1e-5, + feasibility_tol=1e-8, + max_iter=10, + improvement_tol=1e-4, + descend_into=True, +): """ Perform FBBT on a constraint, block, or model. For more control, use _fbbt_con and _fbbt_block. For detailed documentation, see @@ -1493,11 +1530,15 @@ def fbbt(comp, deactivate_satisfied_constraints=False, integer_tol=1e-5, feasibi from FBBT. """ config = ConfigBlock() - dsc_config = ConfigValue(default=deactivate_satisfied_constraints, domain=In({True, False})) + dsc_config = ConfigValue( + default=deactivate_satisfied_constraints, domain=In({True, False}) + ) integer_tol_config = ConfigValue(default=integer_tol, domain=NonNegativeFloat) ft_config = ConfigValue(default=feasibility_tol, domain=NonNegativeFloat) mi_config = ConfigValue(default=max_iter, domain=NonNegativeInt) - improvement_tol_config = ConfigValue(default=improvement_tol, domain=NonNegativeFloat) + improvement_tol_config = ConfigValue( + default=improvement_tol, domain=NonNegativeFloat + ) descend_into_config = ConfigValue(default=descend_into) config.declare('deactivate_satisfied_constraints', dsc_config) config.declare('integer_tol', integer_tol_config) @@ -1519,18 +1560,20 @@ def fbbt(comp, deactivate_satisfied_constraints=False, integer_tol=1e-5, feasibi _new_var_bounds = _fbbt_block(comp, config) new_var_bounds.update(_new_var_bounds) else: - raise FBBTException('Cannot perform FBBT on objects of type {0}'.format(type(comp))) + raise FBBTException( + 'Cannot perform FBBT on objects of type {0}'.format(type(comp)) + ) return new_var_bounds -def compute_bounds_on_expr(expr): +def compute_bounds_on_expr(expr, ignore_fixed=False): """ Compute bounds on an expression based on the bounds on the variables in the expression. Parameters ---------- - expr: pyomo.core.expr.numeric_expr.ExpressionBase + expr: pyomo.core.expr.numeric_expr.NumericExpression Returns ------- @@ -1538,7 +1581,7 @@ def compute_bounds_on_expr(expr): ub: float """ bnds_dict = ComponentMap() - visitor = _FBBTVisitorLeafToRoot(bnds_dict) + visitor = _FBBTVisitorLeafToRoot(bnds_dict, ignore_fixed=ignore_fixed) visitor.dfs_postorder_stack(expr) lb, ub = bnds_dict[expr] if lb == -interval.inf: @@ -1561,7 +1604,9 @@ def __init__(self, comp): else: self._vars.update(identify_variables(comp.body)) else: - for c in comp.component_data_objects(Constraint, descend_into=True, active=True, sort=True): + for c in comp.component_data_objects( + Constraint, descend_into=True, active=True, sort=True + ): self._vars.update(identify_variables(c.body)) def save_bounds(self): diff --git a/pyomo/contrib/fbbt/interval.py b/pyomo/contrib/fbbt/interval.py index 4470a57b57d..aca6531c8df 100644 --- a/pyomo/contrib/fbbt/interval.py +++ b/pyomo/contrib/fbbt/interval.py @@ -26,7 +26,7 @@ def sub(xl, xu, yl, yu): def mul(xl, xu, yl, yu): - options = [xl*yl, xl*yu, xu*yl, xu*yu] + options = [xl * yl, xl * yu, xu * yl, xu * yu] if any(math.isnan(i) for i in options): lb = -inf ub = inf @@ -45,7 +45,9 @@ def inv(xl, xu, feasibility_tol): should be acceptable. Additionally, it very important to return a non-negative interval when xl is non-negative. """ if xu - xl <= -feasibility_tol: - raise InfeasibleConstraintException(f'lower bound is greater than upper bound in inv; xl: {xl}; xu: {xu}') + raise InfeasibleConstraintException( + f'lower bound is greater than upper bound in inv; xl: {xl}; xu: {xu}' + ) elif xu <= 0 <= xl: # This has to return -inf to inf because it could later be multiplied by 0 lb = -inf @@ -90,8 +92,8 @@ def power(xl, xu, yl, yu, feasibility_tol): If x is always positive, things are simple. We only need to worry about the sign of y. """ if yl < 0 < yu: - lb = min(xu ** yl, xl ** yu) - ub = max(xl ** yl, xu ** yu) + lb = min(xu**yl, xl**yu) + ub = max(xl**yl, xu**yu) elif yl >= 0: lb = min(xl**yl, xl**yu) ub = max(xu**yl, xu**yu) @@ -100,10 +102,12 @@ def power(xl, xu, yl, yu, feasibility_tol): ub = max(xl**yl, xl**yu) elif xl == 0: if yl >= 0: - lb = min(xl ** yl, xl ** yu) - ub = max(xu ** yl, xu ** yu) + lb = min(xl**yl, xl**yu) + ub = max(xu**yl, xu**yu) elif yu <= 0: - lb, ub = inv(*power(xl, xu, *sub(0, 0, yl, yu), feasibility_tol), feasibility_tol) + lb, ub = inv( + *power(xl, xu, *sub(0, 0, yl, yu), feasibility_tol), feasibility_tol + ) else: lb1, ub1 = power(xl, xu, 0, yu, feasibility_tol) lb2, ub2 = power(xl, xu, yl, 0, feasibility_tol) @@ -116,52 +120,54 @@ def power(xl, xu, yl, yu, feasibility_tol): 1) The sign of x 2) The sign of y 3) Whether y is even or odd. - + There are also special cases to avoid math domain errors. """ y = yl if xu <= 0: if y < 0: if y % 2 == 0: - lb = xl ** y + lb = xl**y if xu == 0: ub = inf else: - ub = xu ** y + ub = xu**y else: if xu == 0: lb = -inf ub = inf else: - lb = xu ** y - ub = xl ** y + lb = xu**y + ub = xl**y else: if y % 2 == 0: - lb = xu ** y - ub = xl ** y + lb = xu**y + ub = xl**y else: - lb = xl ** y - ub = xu ** y + lb = xl**y + ub = xu**y else: if y < 0: if y % 2 == 0: - lb = min(xl ** y, xu ** y) + lb = min(xl**y, xu**y) ub = inf else: - lb = - inf + lb = -inf ub = inf else: if y % 2 == 0: lb = 0 - ub = max(xl ** y, xu ** y) + ub = max(xl**y, xu**y) else: - lb = xl ** y - ub = xu ** y + lb = xl**y + ub = xu**y elif yl == yu: # the exponent has to be fractional, so x must be positive if xu < 0: msg = 'Cannot raise a negative number to the power of {0}.\n'.format(yl) - msg += 'The upper bound of a variable raised to the power of {0} is {1}'.format(yl, xu) + msg += 'The upper bound of a variable raised to the power of {0} is {1}'.format( + yl, xu + ) raise InfeasibleConstraintException(msg) xl = 0 lb, ub = power(xl, xu, yl, yu, feasibility_tol) @@ -199,17 +205,21 @@ def _inverse_power1(zl, zu, yl, yu, orig_xl, orig_xu, feasibility_tol): if y is even, then there are two primary cases (note that it is much easier to walk through these while looking at plots): case 1: y is positive - x**y is convex, positive, and symmetric. The bounds on x depend on the lower bound of z. If zl <= 0, - then xl should simply be -xu. However, if zl > 0, then we may be able to say something better. For - example, if the original lower bound on x is positive, then we can keep xl computed from - x = exp(ln(z) / y). Furthermore, if the original lower bound on x is larger than -xl computed from + x**y is convex, positive, and symmetric. The bounds on x depend on the lower bound of z. If zl <= 0, + then xl should simply be -xu. However, if zl > 0, then we may be able to say something better. For + example, if the original lower bound on x is positive, then we can keep xl computed from + x = exp(ln(z) / y). Furthermore, if the original lower bound on x is larger than -xl computed from x = exp(ln(z) / y), then we can still keep the xl computed from x = exp(ln(z) / y). Similar logic applies to the upper bound of x. case 2: y is negative The ideas are similar to case 1. """ if zu + feasibility_tol < 0: - raise InfeasibleConstraintException('Infeasible. Anything to the power of {0} must be positive.'.format(y)) + raise InfeasibleConstraintException( + 'Infeasible. Anything to the power of {0} must be positive.'.format( + y + ) + ) if y > 0: if zu <= 0: _xl = 0 @@ -230,7 +240,11 @@ def _inverse_power1(zl, zu, yl, yu, orig_xl, orig_xu, feasibility_tol): xu = _xu else: if zu == 0: - raise InfeasibleConstraintException('Infeasible. Anything to the power of {0} must be positive.'.format(y)) + raise InfeasibleConstraintException( + 'Infeasible. Anything to the power of {0} must be positive.'.format( + y + ) + ) elif zl <= 0: _xl = -inf _xu = inf @@ -247,20 +261,20 @@ def _inverse_power1(zl, zu, yl, yu, orig_xl, orig_xu, feasibility_tol): xu = _xu else: # y % 2 == 1 """ - y is odd. + y is odd. Case 1: y is positive x**y is monotonically increasing. If y is positive, then we can can compute the bounds on x using x = z**(1/y) and the signs on xl and xu depend on the signs of zl and zu. Case 2: y is negative - Again, this is easier to visualize with a plot. x**y approaches zero when x approaches -inf or inf. + Again, this is easier to visualize with a plot. x**y approaches zero when x approaches -inf or inf. Thus, if zl < 0 < zu, then no bounds can be inferred for x. If z is positive (zl >=0 ) then we can use the bounds computed from x = exp(ln(z) / y). If z is negative (zu <= 0), then we live in the bottom left quadrant, xl depends on zu, and xu depends on zl. """ if y > 0: - xl = abs(zl)**(1.0/y) + xl = abs(zl) ** (1.0 / y) xl = math.copysign(xl, zl) - xu = abs(zu)**(1.0/y) + xu = abs(zu) ** (1.0 / y) xu = math.copysign(xu, zu) else: if zl >= 0: @@ -269,11 +283,11 @@ def _inverse_power1(zl, zu, yl, yu, orig_xl, orig_xu, feasibility_tol): if zu == 0: xl = -inf else: - xl = -abs(zu)**(1.0/y) + xl = -abs(zu) ** (1.0 / y) if zl == 0: xu = -inf else: - xu = -abs(zl)**(1.0/y) + xu = -abs(zl) ** (1.0 / y) else: xl = -inf xu = inf @@ -290,9 +304,13 @@ def _inverse_power2(zl, zu, xl, xu, feasiblity_tol): if the exponent is an integer. """ if xu <= 0: - raise IntervalException('Cannot raise a negative variable to a fractional power.') + raise IntervalException( + 'Cannot raise a negative variable to a fractional power.' + ) if (xl > 0 and zu <= 0) or (xl >= 0 and zu < 0): - raise InfeasibleConstraintException('A positive variable raised to the power of anything must be positive.') + raise InfeasibleConstraintException( + 'A positive variable raised to the power of anything must be positive.' + ) lba, uba = log(zl, zu) lbb, ubb = log(xl, xu) yl, yu = div(lba, uba, lbb, ubb, feasiblity_tol) @@ -512,7 +530,7 @@ def asin(xl, xu, yl, yu, feasibility_tol): xl = sin(y) y >= yl - globally. + globally. """ # first find the next minimum of x = sin(y). Minimums occur at y = 2*pi*n - pi/2 for integer n. i = (yl + pi / 2) / (2 * pi) @@ -522,7 +540,9 @@ def asin(xl, xu, yl, yu, feasibility_tol): i2 = 2 * pi * i2 - pi / 2 # now find the next value of y such that xl = sin(y). This can be computed by a distance from the minimum (i). y_tmp = math.asin(xl) # this will give me a value between -pi/2 and pi/2 - dist = y_tmp - (-pi / 2) # this is the distance between the minimum of the sin function and a value that + dist = y_tmp - ( + -pi / 2 + ) # this is the distance between the minimum of the sin function and a value that # satisfies xl = sin(y) lb1 = i1 + dist lb2 = i2 + dist @@ -623,7 +643,7 @@ def acos(xl, xu, yl, yu, feasibility_tol): xl = cos(y) y >= yl - globally. + globally. """ # first find the next minimum of x = cos(y). Minimums occur at y = 2*pi*n - pi for integer n. i = (yl + pi) / (2 * pi) @@ -633,7 +653,9 @@ def acos(xl, xu, yl, yu, feasibility_tol): i2 = 2 * pi * i2 - pi # now find the next value of y such that xl = cos(y). This can be computed by a distance from the minimum (i). y_tmp = math.acos(xl) # this will give me a value between 0 and pi - dist = pi - y_tmp # this is the distance between the minimum of the sin function and a value that + dist = ( + pi - y_tmp + ) # this is the distance between the minimum of the sin function and a value that # satisfies xl = sin(y) lb1 = i1 + dist lb2 = i2 + dist @@ -724,7 +746,7 @@ def atan(xl, xu, yl, yu): i = math.floor(i) i = pi * i + pi / 2 y_tmp = math.atan(xl) - dist = y_tmp - (-pi/2) + dist = y_tmp - (-pi / 2) lb = i + dist if xu >= inf or yu >= inf: diff --git a/pyomo/contrib/fbbt/tests/test_fbbt.py b/pyomo/contrib/fbbt/tests/test_fbbt.py index cb71737dc9f..5e8d656eeab 100644 --- a/pyomo/contrib/fbbt/tests/test_fbbt.py +++ b/pyomo/contrib/fbbt/tests/test_fbbt.py @@ -13,14 +13,21 @@ import pyomo.environ as pyo from pyomo.contrib.fbbt.fbbt import fbbt, compute_bounds_on_expr from pyomo.common.dependencies import numpy as np, numpy_available +from pyomo.common.fileutils import find_library from pyomo.common.log import LoggingIntercept from pyomo.common.errors import InfeasibleConstraintException -from pyomo.core.expr.numeric_expr import (ProductExpression, - UnaryFunctionExpression, - LinearExpression) +from pyomo.core.expr.numeric_expr import ( + ProductExpression, + UnaryFunctionExpression, + LinearExpression, +) import math +import platform from io import StringIO +flib = find_library("asl_external_demo") +is_pypy = platform.python_implementation().lower().startswith('pypy') + class DummyExpr(ProductExpression): pass @@ -41,8 +48,28 @@ class FbbtTestBase(object): def test_add(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - x_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + x_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for xl, xu in x_bounds: for cl, cu in c_bounds: m = pyo.Block(concrete=True) @@ -50,7 +77,9 @@ def test_add(self): m.y = pyo.Var() m.p = pyo.Param(mutable=True) m.p.value = 1 - m.c = pyo.Constraint(expr=pyo.inequality(body=m.x+m.y+(m.p+1), lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.x + m.y + (m.p + 1), lower=cl, upper=cu) + ) self.tightener(m) x = np.linspace(pyo.value(m.x.lb), pyo.value(m.x.ub), 100) z = np.linspace(pyo.value(m.c.lower), pyo.value(m.c.upper), 100) @@ -70,14 +99,36 @@ def test_add(self): def test_sub1(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - x_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + x_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for xl, xu in x_bounds: for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(xl, xu)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=m.x-m.y, lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.x - m.y, lower=cl, upper=cu) + ) self.tightener(m) x = np.linspace(pyo.value(m.x.lb), pyo.value(m.x.ub), 100) z = np.linspace(pyo.value(m.c.lower), pyo.value(m.c.upper), 100) @@ -97,14 +148,36 @@ def test_sub1(self): def test_sub2(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - x_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + x_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for xl, xu in x_bounds: for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(xl, xu)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=m.y-m.x, lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.y - m.x, lower=cl, upper=cu) + ) self.tightener(m) x = np.linspace(pyo.value(m.x.lb), pyo.value(m.x.ub), 100) z = np.linspace(pyo.value(m.c.lower), pyo.value(m.c.upper), 100) @@ -124,16 +197,40 @@ def test_sub2(self): def test_mul(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - x_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + x_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for xl, xu in x_bounds: for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(xl, xu)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=m.x*m.y, lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.x * m.y, lower=cl, upper=cu) + ) self.tightener(m) - x = np.linspace(pyo.value(m.x.lb) + 1e-6, pyo.value(m.x.ub), 100, endpoint=False) + x = np.linspace( + pyo.value(m.x.lb) + 1e-6, pyo.value(m.x.ub), 100, endpoint=False + ) z = np.linspace(pyo.value(m.c.lower), pyo.value(m.c.upper), 100) if m.y.lb is None: yl = -np.inf @@ -151,17 +248,44 @@ def test_mul(self): def test_div1(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - x_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + x_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for xl, xu in x_bounds: for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(xl, xu)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=m.x/m.y, lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.x / m.y, lower=cl, upper=cu) + ) self.tightener(m) x = np.linspace(pyo.value(m.x.lb), pyo.value(m.x.ub), 100) - z = np.linspace(pyo.value(m.c.lower) + 1e-6, pyo.value(m.c.upper), 100, endpoint=False) + z = np.linspace( + pyo.value(m.c.lower) + 1e-6, + pyo.value(m.c.upper), + 100, + endpoint=False, + ) if m.y.lb is None: yl = -np.inf else: @@ -178,14 +302,36 @@ def test_div1(self): def test_div2(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - x_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + x_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for xl, xu in x_bounds: for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(xl, xu)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=m.y/m.x, lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.y / m.x, lower=cl, upper=cu) + ) self.tightener(m) x = np.linspace(pyo.value(m.x.lb), pyo.value(m.x.ub), 100) z = np.linspace(pyo.value(m.c.lower), pyo.value(m.c.upper), 100) @@ -212,14 +358,23 @@ def test_pow1(self): m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(xl, xu)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=m.x**m.y, lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.x**m.y, lower=cl, upper=cu) + ) if xl > 0 and cu <= 0: with self.assertRaises(InfeasibleConstraintException): self.tightener(m) else: self.tightener(m) - x = np.linspace(pyo.value(m.x.lb) + 1e-6, pyo.value(m.x.ub), 100, endpoint=False) - z = np.linspace(pyo.value(m.c.lower) + 1e-6, pyo.value(m.c.upper), 100, endpoint=False) + x = np.linspace( + pyo.value(m.x.lb) + 1e-6, pyo.value(m.x.ub), 100, endpoint=False + ) + z = np.linspace( + pyo.value(m.c.lower) + 1e-6, + pyo.value(m.c.upper), + 100, + endpoint=False, + ) if m.y.lb is None: yl = -np.inf else: @@ -236,17 +391,36 @@ def test_pow1(self): def test_pow2(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - x_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + x_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] c_bounds = [(-2.5, 2.8), (0.5, 2.8), (0, 2.8), (1, 2.8), (0.5, 1)] for xl, xu in x_bounds: for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(xl, xu)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=m.y**m.x, lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=m.y**m.x, lower=cl, upper=cu) + ) self.tightener(m) - x = np.linspace(pyo.value(m.x.lb) + 1e-6, pyo.value(m.x.ub), 100, endpoint=False) - z = np.linspace(pyo.value(m.c.lower) + 1e-6, pyo.value(m.c.upper), 100, endpoint=False) + x = np.linspace( + pyo.value(m.x.lb) + 1e-6, pyo.value(m.x.ub), 100, endpoint=False + ) + z = np.linspace( + pyo.value(m.c.lower) + 1e-6, + pyo.value(m.c.upper), + 100, + endpoint=False, + ) if m.y.lb is None: yl = -np.inf else: @@ -326,7 +500,7 @@ def test_x_pow_minus_2(self): m = pyo.ConcreteModel() m.x = pyo.Var() m.y = pyo.Var() - m.c = pyo.Constraint(expr=m.x**(-2) == m.y) + m.c = pyo.Constraint(expr=m.x ** (-2) == m.y) self.tightener(m) self.assertEqual(m.x.lb, None) @@ -397,7 +571,7 @@ def test_x_cubed(self): m.y.setlb(-5) m.y.setub(8) self.tightener(m) - self.assertAlmostEqual(m.x.lb, -5.0**(1.0/3.0)) + self.assertAlmostEqual(m.x.lb, -(5.0 ** (1.0 / 3.0))) self.assertAlmostEqual(m.x.ub, 2) m.x.setlb(None) @@ -412,7 +586,7 @@ def test_x_pow_minus_3(self): m = pyo.ConcreteModel() m.x = pyo.Var() m.y = pyo.Var() - m.c = pyo.Constraint(expr=m.x**(-3) == m.y) + m.c = pyo.Constraint(expr=m.x ** (-3) == m.y) self.tightener(m) self.assertEqual(m.x.lb, None) @@ -459,7 +633,9 @@ def test_pow4(self): m.y = pyo.Var(bounds=(yl, yu)) m.c = pyo.Constraint(expr=m.x**_exp_val == m.y) self.tightener(m) - y = np.linspace(pyo.value(m.y.lb) + 1e-6, pyo.value(m.y.ub), 100, endpoint=True) + y = np.linspace( + pyo.value(m.y.lb) + 1e-6, pyo.value(m.y.ub), 100, endpoint=True + ) if m.x.lb is None: xl = -np.inf else: @@ -522,7 +698,9 @@ def test_exp(self): for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.exp(m.x), lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.exp(m.x), lower=cl, upper=cu) + ) self.tightener(m) if pyo.value(m.c.lower) <= 0: _cl = 1e-6 @@ -620,15 +798,26 @@ def test_abs(self): self.assertAlmostEqual(m.y.lb, 2) self.assertAlmostEqual(m.y.ub, 5) - def test_log(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.log(m.x), lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.log(m.x), lower=cl, upper=cu) + ) self.tightener(m) z = np.linspace(pyo.value(m.c.lower), pyo.value(m.c.upper), 100) if m.x.lb is None: @@ -646,11 +835,23 @@ def test_log(self): def test_log10(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available') - c_bounds = [(-2.5, 2.8), (-2.5, -0.5), (0.5, 2.8), (-2.5, 0), (0, 2.8), (-2.5, -1), (1, 2.8), (-1, -0.5), (0.5, 1)] + c_bounds = [ + (-2.5, 2.8), + (-2.5, -0.5), + (0.5, 2.8), + (-2.5, 0), + (0, 2.8), + (-2.5, -1), + (1, 2.8), + (-1, -0.5), + (0.5, 1), + ] for cl, cu in c_bounds: m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.log10(m.x), lower=cl, upper=cu)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.log10(m.x), lower=cl, upper=cu) + ) self.tightener(m) z = np.linspace(pyo.value(m.c.lower), pyo.value(m.c.upper), 100) if m.x.lb is None: @@ -664,20 +865,24 @@ def test_log10(self): x = 10**z print(xl, xu, cl, cu) print(x) - self.assertTrue(np.all(xl <= x)) - self.assertTrue(np.all(xu >= x)) + self.assertTrue(np.all(xl - 1e-14 <= x)) + self.assertTrue(np.all(xu + 1e-14 >= x)) def test_sin(self): m = pyo.Block(concrete=True) - m.x = pyo.Var(bounds=(-math.pi/2, math.pi/2)) - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.sin(m.x), lower=-0.5, upper=0.5)) + m.x = pyo.Var(bounds=(-math.pi / 2, math.pi / 2)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.sin(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertAlmostEqual(pyo.value(m.x.lb), math.asin(-0.5)) self.assertAlmostEqual(pyo.value(m.x.ub), math.asin(0.5)) m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.sin(m.x), lower=-0.5, upper=0.5)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.sin(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertEqual(m.x.lb, None) self.assertEqual(m.x.ub, None) @@ -685,29 +890,37 @@ def test_sin(self): def test_cos(self): m = pyo.Block(concrete=True) m.x = pyo.Var(bounds=(0, math.pi)) - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.cos(m.x), lower=-0.5, upper=0.5)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.cos(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertAlmostEqual(pyo.value(m.x.lb), math.acos(0.5)) self.assertAlmostEqual(pyo.value(m.x.ub), math.acos(-0.5)) m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.cos(m.x), lower=-0.5, upper=0.5)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.cos(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertEqual(m.x.lb, None) self.assertEqual(m.x.ub, None) def test_tan(self): m = pyo.Block(concrete=True) - m.x = pyo.Var(bounds=(-math.pi/2, math.pi/2)) - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.tan(m.x), lower=-0.5, upper=0.5)) + m.x = pyo.Var(bounds=(-math.pi / 2, math.pi / 2)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.tan(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertAlmostEqual(pyo.value(m.x.lb), math.atan(-0.5)) self.assertAlmostEqual(pyo.value(m.x.ub), math.atan(0.5)) m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.tan(m.x), lower=-0.5, upper=0.5)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.tan(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertEqual(m.x.lb, None) self.assertEqual(m.x.ub, None) @@ -715,7 +928,9 @@ def test_tan(self): def test_asin(self): m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.asin(m.x), lower=-0.5, upper=0.5)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.asin(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertAlmostEqual(pyo.value(m.x.lb), math.sin(-0.5)) self.assertAlmostEqual(pyo.value(m.x.ub), math.sin(0.5)) @@ -731,7 +946,9 @@ def test_acos(self): def test_atan(self): m = pyo.Block(concrete=True) m.x = pyo.Var() - m.c = pyo.Constraint(expr=pyo.inequality(body=pyo.atan(m.x), lower=-0.5, upper=0.5)) + m.c = pyo.Constraint( + expr=pyo.inequality(body=pyo.atan(m.x), lower=-0.5, upper=0.5) + ) self.tightener(m) self.assertAlmostEqual(pyo.value(m.x.lb), math.tan(-0.5)) self.assertAlmostEqual(pyo.value(m.x.ub), math.tan(0.5)) @@ -744,8 +961,8 @@ def test_multiple_constraints(self): m.c = pyo.ConstraintList() m.c.add(m.x + m.y >= -1) m.c.add(m.x + m.y <= -1) - m.c.add(m.y - m.x*m.z <= 2) - m.c.add(m.y - m.x*m.z >= -2) + m.c.add(m.y - m.x * m.z <= 2) + m.c.add(m.y - m.x * m.z >= -2) m.c.add(m.x + m.z == 1) self.tightener(m) self.assertAlmostEqual(pyo.value(m.x.lb), -1, 8) @@ -763,8 +980,8 @@ def test_multiple_constraints2(self): m.c = pyo.ConstraintList() m.c.add(-m.x - m.y >= -1) m.c.add(-m.x - m.y <= -1) - m.c.add(-m.y - m.x*m.z >= -2) - m.c.add(-m.y - m.x*m.z <= 2) + m.c.add(-m.y - m.x * m.z >= -2) + m.c.add(-m.y - m.x * m.z <= 2) m.c.add(-m.x - m.z == 1) self.tightener(m) self.assertAlmostEqual(pyo.value(m.x.lb), 1, 8) @@ -797,8 +1014,8 @@ def test_binary(self): def test_always_feasible(self): m = pyo.ConcreteModel() - m.x = pyo.Var(bounds=(1,2)) - m.y = pyo.Var(bounds=(1,2)) + m.x = pyo.Var(bounds=(1, 2)) + m.y = pyo.Var(bounds=(1, 2)) m.c = pyo.Constraint(expr=m.x + m.y >= 0) self.tightener(m) self.assertTrue(m.c.active) @@ -843,10 +1060,12 @@ def test_inf_bounds_on_expr(self): def test_skip_unknown_expression1(self): if self.tightener is not fbbt: - raise unittest.SkipTest('Appsi FBBT does not support unkown expressions yet') + raise unittest.SkipTest( + 'Appsi FBBT does not support unknown expressions yet' + ) m = pyo.ConcreteModel() - m.x = pyo.Var(bounds=(1,1)) + m.x = pyo.Var(bounds=(1, 1)) m.y = pyo.Var() expr = DummyExpr([m.x, m.y]) m.c = pyo.Constraint(expr=expr == 1) @@ -863,14 +1082,18 @@ def test_skip_unknown_expression1(self): def test_skip_unknown_expression2(self): if self.tightener is not fbbt: - raise unittest.SkipTest('Appsi FBBT does not support unkown expressions yet') + raise unittest.SkipTest( + 'Appsi FBBT does not support unknown expressions yet' + ) def dummy_unary_expr(x): - return 0.5*x + return 0.5 * x m = pyo.ConcreteModel() - m.x = pyo.Var(bounds=(0,4)) - expr = UnaryFunctionExpression((m.x,), name='dummy_unary_expr', fcn=dummy_unary_expr) + m.x = pyo.Var(bounds=(0, 4)) + expr = UnaryFunctionExpression( + (m.x,), name='dummy_unary_expr', fcn=dummy_unary_expr + ) m.c = pyo.Constraint(expr=expr == 1) OUT = StringIO() @@ -883,8 +1106,8 @@ def dummy_unary_expr(x): def test_compute_expr_bounds(self): m = pyo.ConcreteModel() - m.x = pyo.Var(bounds=(-1,1)) - m.y = pyo.Var(bounds=(-1,1)) + m.x = pyo.Var(bounds=(-1, 1)) + m.y = pyo.Var(bounds=(-1, 1)) e = m.x + m.y lb, ub = compute_bounds_on_expr(e) self.assertAlmostEqual(lb, -2, 14) @@ -936,7 +1159,7 @@ def test_negative_power(self): m.x = pyo.Var() m.y = pyo.Var() m.z = pyo.Var() - m.c = pyo.Constraint(expr=(m.x**2 + m.y**2)**(-0.5) == m.z) + m.c = pyo.Constraint(expr=(m.x**2 + m.y**2) ** (-0.5) == m.z) self.tightener(m) self.assertAlmostEqual(m.z.lb, 0) self.assertIsNone(m.z.ub) @@ -946,7 +1169,9 @@ def test_linear_expression(self): m.x = pyo.Var(bounds=(1, 2)) m.y = pyo.Var() m.p = pyo.Param(initialize=3, mutable=True) - e = LinearExpression(constant=1, linear_coefs=[1, m.p - 1], linear_vars=[m.x, m.y]) + e = LinearExpression( + constant=1, linear_coefs=[1, m.p - 1], linear_vars=[m.x, m.y] + ) m.c = pyo.Constraint(expr=e == 0) self.tightener(m) self.assertAlmostEqual(m.y.lb, -1.5) @@ -956,12 +1181,12 @@ def test_quadratic_as_product(self): m1 = pyo.ConcreteModel() m1.x = pyo.Var([1, 2], bounds=(-2, 6)) m1.y = pyo.Var() - m1.c = pyo.Constraint(expr=m1.x[1]*m1.x[1] + m1.x[2]*m1.x[2] == m1.y) + m1.c = pyo.Constraint(expr=m1.x[1] * m1.x[1] + m1.x[2] * m1.x[2] == m1.y) m2 = pyo.ConcreteModel() m2.x = pyo.Var([1, 2], bounds=(-2, 6)) m2.y = pyo.Var() - m2.c = pyo.Constraint(expr=m2.x[1]**2 + m2.x[2]**2 == m2.y) + m2.c = pyo.Constraint(expr=m2.x[1] ** 2 + m2.x[2] ** 2 == m2.y) self.tightener(m1) self.tightener(m2) @@ -972,7 +1197,7 @@ def test_quadratic_as_product(self): m = pyo.ConcreteModel() m.x = pyo.Var([1, 2], bounds=(-2, 6)) m.y = pyo.Var() - m.c = pyo.Constraint(expr=m.x[1]*m.x[1] + m.x[2]*m.x[2] == 0) + m.c = pyo.Constraint(expr=m.x[1] * m.x[1] + m.x[2] * m.x[2] == 0) self.tightener(m) self.assertAlmostEqual(m.x[1].lb, 0) self.assertAlmostEqual(m.x[1].ub, 0) @@ -1005,7 +1230,12 @@ def test_long_linear_expression(self): m.a = pyo.Set(initialize=list(range(N))) m.x = pyo.Var(m.a, bounds=(0, 1)) m.x[n].setub(None) - m.c = pyo.Constraint(expr=LinearExpression(constant=0, linear_coefs=[1]*N, linear_vars=list(m.x.values())) == 1) + m.c = pyo.Constraint( + expr=LinearExpression( + constant=0, linear_coefs=[1] * N, linear_vars=list(m.x.values()) + ) + == 1 + ) self.tightener(m) self.assertAlmostEqual(m.x[n].ub, 1) @@ -1013,7 +1243,12 @@ def test_long_linear_expression(self): m.a = pyo.Set(initialize=list(range(N))) m.x = pyo.Var(m.a, bounds=(0, 1)) m.x[n].setlb(None) - m.c = pyo.Constraint(expr=LinearExpression(constant=0, linear_coefs=[1]*N, linear_vars=list(m.x.values())) == 1) + m.c = pyo.Constraint( + expr=LinearExpression( + constant=0, linear_coefs=[1] * N, linear_vars=list(m.x.values()) + ) + == 1 + ) self.tightener(m) self.assertAlmostEqual(m.x[n].lb, -28) @@ -1025,7 +1260,12 @@ def test_long_linear_expression2(self): m.x = pyo.Var(m.a, bounds=(0, 1)) m.x[n].setlb(None) m.x[n].setub(None) - m.c = pyo.Constraint(expr=LinearExpression(constant=1, linear_coefs=[1]*N, linear_vars=list(m.x.values())) == 1) + m.c = pyo.Constraint( + expr=LinearExpression( + constant=1, linear_coefs=[1] * N, linear_vars=list(m.x.values()) + ) + == 1 + ) self.tightener(m) self.assertAlmostEqual(m.x[n].lb, -29) self.assertAlmostEqual(m.x[n].ub, 0) @@ -1045,6 +1285,52 @@ def test_fixed_var(self): self.assertAlmostEqual(m.y.lb, 1) self.assertAlmostEqual(m.y.ub, 2) + @unittest.skipUnless(flib, 'Could not find the "asl_external_demo.so" library') + @unittest.skipIf(is_pypy, 'Cannot evaluate external functions under pypy') + def test_external_function(self): + if self.tightener is not fbbt: + raise unittest.SkipTest( + 'Appsi FBBT does not support unknown expressions yet' + ) + + m = pyo.ConcreteModel() + m.x = pyo.Var(bounds=(0, 1)) + m.y = pyo.Var(bounds=(0, 5)) + m.p = pyo.Param(initialize=1) + m.q = pyo.Param(initialize=3) + m.ef = pyo.ExternalFunction(library=flib, function="demo_function") + + m.con1 = pyo.Constraint(expr=m.ef("sum", m.x, m.y) >= 1) + + # No change due to variable EF + self.tightener(m) + self.assertAlmostEqual(m.x.lb, 0) + self.assertAlmostEqual(m.x.ub, 1) + self.assertAlmostEqual(m.y.lb, 0) + self.assertAlmostEqual(m.y.ub, 5) + + m.con2 = pyo.Constraint(expr=m.ef("sum", m.p, m.q) - m.y >= 1) + + self.tightener(m) + self.assertAlmostEqual(m.x.lb, 0) + self.assertAlmostEqual(m.x.ub, 1) + self.assertAlmostEqual(m.y.lb, 0) + self.assertAlmostEqual(m.y.ub, 3) + + def test_named_expr(self): + m = pyo.ConcreteModel() + m.x = pyo.Var(bounds=(0, None)) + m.y = pyo.Var(bounds=(1, 6)) + m.e_const = pyo.Expression(expr=3) + m.e_var = pyo.Expression(expr=m.y + m.e_const) + + m.c = pyo.Constraint(expr=m.x**2 == m.e_var) + + self.tightener(m) + self.tightener(m) + self.assertAlmostEqual(m.x.lb, 2) + self.assertAlmostEqual(m.x.ub, 3) + class TestFBBT(FbbtTestBase, unittest.TestCase): def setUp(self) -> None: diff --git a/pyomo/contrib/fbbt/tests/test_interval.py b/pyomo/contrib/fbbt/tests/test_interval.py index 747f83ac19b..59c62be4e84 100644 --- a/pyomo/contrib/fbbt/tests/test_interval.py +++ b/pyomo/contrib/fbbt/tests/test_interval.py @@ -22,7 +22,7 @@ class IntervalTestBase(object): in the other module. Therefore, we use this base class for testing both modules. The only difference in the derived classes is in the self.add, self.sub, - self.mul, etc. atrributes. + self.mul, etc. attributes. """ def setUp(self): @@ -111,11 +111,13 @@ def test_div(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available.') x_bounds = [(np.random.uniform(-5, -2), np.random.uniform(2, 5))] - y_bounds = [(np.random.uniform(-5, -2), np.random.uniform(2, 5)), - (0, np.random.uniform(2, 5)), - (np.random.uniform(0, 2), np.random.uniform(2, 5)), - (np.random.uniform(-5, -2), 0), - (np.random.uniform(-5, -2), np.random.uniform(-2, 0))] + y_bounds = [ + (np.random.uniform(-5, -2), np.random.uniform(2, 5)), + (0, np.random.uniform(2, 5)), + (np.random.uniform(0, 2), np.random.uniform(2, 5)), + (np.random.uniform(-5, -2), 0), + (np.random.uniform(-5, -2), np.random.uniform(-2, 0)), + ] for xl, xu in x_bounds: for yl, yu in y_bounds: zl, zu = self.div(xl, xu, yl, yu, 1e-8) @@ -146,14 +148,18 @@ def test_div_edge_cases(self): def test_pow(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available.') - x_bounds = [(np.random.uniform(0, 2), np.random.uniform(2, 5)), - (0, np.random.uniform(2, 5)), - (0, 0)] - y_bounds = [(np.random.uniform(-5, -2), np.random.uniform(2, 5)), - (0, np.random.uniform(2, 5)), - (np.random.uniform(0, 2), np.random.uniform(2, 5)), - (np.random.uniform(-5, -2), 0), - (np.random.uniform(-5, -2), np.random.uniform(-2, 0))] + x_bounds = [ + (np.random.uniform(0, 2), np.random.uniform(2, 5)), + (0, np.random.uniform(2, 5)), + (0, 0), + ] + y_bounds = [ + (np.random.uniform(-5, -2), np.random.uniform(2, 5)), + (0, np.random.uniform(2, 5)), + (np.random.uniform(0, 2), np.random.uniform(2, 5)), + (np.random.uniform(-5, -2), 0), + (np.random.uniform(-5, -2), np.random.uniform(-2, 0)), + ] for xl, xu in x_bounds: for yl, yu in y_bounds: zl, zu = self.power(xl, xu, yl, yu, 1e-8) @@ -163,13 +169,15 @@ def test_pow(self): x = np.linspace(xl, xu, 100) y = np.linspace(yl, yu, 100) for _x in x: - _z = _x ** y - self.assertTrue(np.all(zl <= _z)) - self.assertTrue(np.all(zu >= _z)) - - x_bounds = [(np.random.uniform(-5, -2), np.random.uniform(2, 5)), - (np.random.uniform(-5, -2), np.random.uniform(-2, 0)), - (np.random.uniform(-5, -2), 0)] + _z = _x**y + self.assertTrue(np.all(zl - 1e-14 <= _z)) + self.assertTrue(np.all(zu + 1e-14 >= _z)) + + x_bounds = [ + (np.random.uniform(-5, -2), np.random.uniform(2, 5)), + (np.random.uniform(-5, -2), np.random.uniform(-2, 0)), + (np.random.uniform(-5, -2), 0), + ] y_bounds = list(range(-4, 4)) for xl, xu in x_bounds: for yl in y_bounds: @@ -177,7 +185,7 @@ def test_pow(self): zl, zu = self.power(xl, xu, yl, yu, 1e-8) x = np.linspace(xl, xu, 100, endpoint=False) y = yl - _z = x ** y + _z = x**y self.assertTrue(np.all(zl <= _z)) self.assertTrue(np.all(zu >= _z)) @@ -200,13 +208,22 @@ def test_pow2(self): lb, ub = self.power(_xl, _xu, _yl, _yu, 1e-8) self.assertEqual(lb, -interval.inf) self.assertEqual(ub, interval.inf) - elif _yl == _yu and _yl != round(_yl) and (_xu < 0 or (_xu < 0 and _yu < 0)): - with self.assertRaises((InfeasibleConstraintException, interval.IntervalException)): + elif ( + _yl == _yu + and _yl != round(_yl) + and (_xu < 0 or (_xu < 0 and _yu < 0)) + ): + with self.assertRaises( + ( + InfeasibleConstraintException, + interval.IntervalException, + ) + ): lb, ub = self.power(_xl, _xu, _yl, _yu, 1e-8) else: lb, ub = self.power(_xl, _xu, _yl, _yu, 1e-8) if isfinite(lb) and isfinite(ub): - nan_fill = 0.5*(lb + ub) + nan_fill = 0.5 * (lb + ub) elif isfinite(lb): nan_fill = lb + 1 elif isfinite(ub): @@ -215,7 +232,7 @@ def test_pow2(self): nan_fill = 0 x = np.linspace(_xl, _xu, 30) y = np.linspace(_yl, _yu, 30) - z = x**np.split(y, len(y)) + z = x ** np.split(y, len(y)) z[np.isnan(z)] = nan_fill all_values = z estimated_lb = all_values.min() @@ -237,9 +254,11 @@ def test_exp(self): def test_log(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available.') - x_bounds = [(np.random.uniform(0, 2), np.random.uniform(2, 5)), - (0, np.random.uniform(2, 5)), - (0, 0)] + x_bounds = [ + (np.random.uniform(0, 2), np.random.uniform(2, 5)), + (0, np.random.uniform(2, 5)), + (0, 0), + ] for xl, xu in x_bounds: zl, zu = self.log(xl, xu) x = np.linspace(xl, xu, 100) @@ -250,8 +269,8 @@ def test_log(self): def test_cos(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available.') - lbs = np.linspace(-2*math.pi, 2*math.pi, 10) - ubs = np.linspace(-2*math.pi, 2*math.pi, 10) + lbs = np.linspace(-2 * math.pi, 2 * math.pi, 10) + ubs = np.linspace(-2 * math.pi, 2 * math.pi, 10) for xl in lbs: for xu in ubs: if xu >= xl: @@ -264,8 +283,8 @@ def test_cos(self): def test_sin(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available.') - lbs = np.linspace(-2*math.pi, 2*math.pi, 10) - ubs = np.linspace(-2*math.pi, 2*math.pi, 10) + lbs = np.linspace(-2 * math.pi, 2 * math.pi, 10) + ubs = np.linspace(-2 * math.pi, 2 * math.pi, 10) for xl in lbs: for xu in ubs: if xu >= xl: @@ -278,8 +297,8 @@ def test_sin(self): def test_tan(self): if not numpy_available: raise unittest.SkipTest('Numpy is not available.') - lbs = np.linspace(-2*math.pi, 2*math.pi, 10) - ubs = np.linspace(-2*math.pi, 2*math.pi, 10) + lbs = np.linspace(-2 * math.pi, 2 * math.pi, 10) + ubs = np.linspace(-2 * math.pi, 2 * math.pi, 10) for xl in lbs: for xu in ubs: if xu >= xl: @@ -298,22 +317,22 @@ def test_asin(self): yl, yu = self.asin(-0.5, 0.5, -math.pi, math.pi, 1e-8) self.assertAlmostEqual(yl, -math.pi, 12) self.assertAlmostEqual(yu, math.pi, 12) - yl, yu = self.asin(-0.5, 0.5, -math.pi/2, math.pi/2, 1e-8) + yl, yu = self.asin(-0.5, 0.5, -math.pi / 2, math.pi / 2, 1e-8) self.assertAlmostEqual(yl, math.asin(-0.5)) self.assertAlmostEqual(yu, math.asin(0.5)) - yl, yu = self.asin(-0.5, 0.5, -math.pi/2-0.1, math.pi/2+0.1, 1e-8) + yl, yu = self.asin(-0.5, 0.5, -math.pi / 2 - 0.1, math.pi / 2 + 0.1, 1e-8) self.assertAlmostEqual(yl, math.asin(-0.5)) self.assertAlmostEqual(yu, math.asin(0.5)) - yl, yu = self.asin(-0.5, 0.5, -math.pi/2+0.1, math.pi/2-0.1, 1e-8) + yl, yu = self.asin(-0.5, 0.5, -math.pi / 2 + 0.1, math.pi / 2 - 0.1, 1e-8) self.assertAlmostEqual(yl, math.asin(-0.5)) self.assertAlmostEqual(yu, math.asin(0.5)) - yl, yu = self.asin(-0.5, 0.5, -1.5*math.pi, 1.5*math.pi, 1e-8) + yl, yu = self.asin(-0.5, 0.5, -1.5 * math.pi, 1.5 * math.pi, 1e-8) self.assertAlmostEqual(yl, -3.6651914291880920, 12) self.assertAlmostEqual(yu, 3.6651914291880920, 12) - yl, yu = self.asin(-0.5, 0.5, -1.5*math.pi-0.1, 1.5*math.pi+0.1, 1e-8) + yl, yu = self.asin(-0.5, 0.5, -1.5 * math.pi - 0.1, 1.5 * math.pi + 0.1, 1e-8) self.assertAlmostEqual(yl, -3.6651914291880920, 12) self.assertAlmostEqual(yu, 3.6651914291880920, 12) - yl, yu = self.asin(-0.5, 0.5, -1.5*math.pi+0.1, 1.5*math.pi-0.1, 1e-8) + yl, yu = self.asin(-0.5, 0.5, -1.5 * math.pi + 0.1, 1.5 * math.pi - 0.1, 1e-8) self.assertAlmostEqual(yl, -3.6651914291880920, 12) self.assertAlmostEqual(yu, 3.6651914291880920, 12) @@ -323,25 +342,25 @@ def test_acos(self): yl, yu = self.acos(-0.5, 0.5, -interval.inf, interval.inf, 1e-8) self.assertEqual(yl, -interval.inf) self.assertEqual(yu, interval.inf) - yl, yu = self.acos(-0.5, 0.5, -0.5*math.pi, 0.5*math.pi, 1e-8) - self.assertAlmostEqual(yl, -0.5*math.pi, 12) - self.assertAlmostEqual(yu, 0.5*math.pi, 12) + yl, yu = self.acos(-0.5, 0.5, -0.5 * math.pi, 0.5 * math.pi, 1e-8) + self.assertAlmostEqual(yl, -0.5 * math.pi, 12) + self.assertAlmostEqual(yu, 0.5 * math.pi, 12) yl, yu = self.acos(-0.5, 0.5, 0, math.pi, 1e-8) self.assertAlmostEqual(yl, math.acos(0.5)) self.assertAlmostEqual(yu, math.acos(-0.5)) - yl, yu = self.acos(-0.5, 0.5, 0-0.1, math.pi+0.1, 1e-8) + yl, yu = self.acos(-0.5, 0.5, 0 - 0.1, math.pi + 0.1, 1e-8) self.assertAlmostEqual(yl, math.acos(0.5)) self.assertAlmostEqual(yu, math.acos(-0.5)) - yl, yu = self.acos(-0.5, 0.5, 0+0.1, math.pi-0.1, 1e-8) + yl, yu = self.acos(-0.5, 0.5, 0 + 0.1, math.pi - 0.1, 1e-8) self.assertAlmostEqual(yl, math.acos(0.5)) self.assertAlmostEqual(yu, math.acos(-0.5)) yl, yu = self.acos(-0.5, 0.5, -math.pi, 0, 1e-8) self.assertAlmostEqual(yl, -math.acos(-0.5), 12) self.assertAlmostEqual(yu, -math.acos(0.5), 12) - yl, yu = self.acos(-0.5, 0.5, -math.pi-0.1, 0+0.1, 1e-8) + yl, yu = self.acos(-0.5, 0.5, -math.pi - 0.1, 0 + 0.1, 1e-8) self.assertAlmostEqual(yl, -math.acos(-0.5), 12) self.assertAlmostEqual(yu, -math.acos(0.5), 12) - yl, yu = self.acos(-0.5, 0.5, -math.pi+0.1, 0-0.1, 1e-8) + yl, yu = self.acos(-0.5, 0.5, -math.pi + 0.1, 0 - 0.1, 1e-8) self.assertAlmostEqual(yl, -math.acos(-0.5), 12) self.assertAlmostEqual(yu, -math.acos(0.5), 12) @@ -354,15 +373,17 @@ def test_atan(self): yl, yu = self.atan(-0.5, 0.5, -0.1, 0.1) self.assertAlmostEqual(yl, -0.1, 12) self.assertAlmostEqual(yu, 0.1, 12) - yl, yu = self.atan(-0.5, 0.5, -0.5*math.pi+0.1, math.pi/2-0.1) + yl, yu = self.atan(-0.5, 0.5, -0.5 * math.pi + 0.1, math.pi / 2 - 0.1) self.assertAlmostEqual(yl, math.atan(-0.5), 12) self.assertAlmostEqual(yu, math.atan(0.5), 12) - yl, yu = self.atan(-0.5, 0.5, -1.5*math.pi+0.1, 1.5*math.pi-0.1) - self.assertAlmostEqual(yl, math.atan(-0.5)-math.pi, 12) - self.assertAlmostEqual(yu, math.atan(0.5)+math.pi, 12) + yl, yu = self.atan(-0.5, 0.5, -1.5 * math.pi + 0.1, 1.5 * math.pi - 0.1) + self.assertAlmostEqual(yl, math.atan(-0.5) - math.pi, 12) + self.assertAlmostEqual(yu, math.atan(0.5) + math.pi, 12) def test_encountered_bugs(self): - lb, ub = self._inverse_power1(88893.4225, 88893.4225, 2, 2, 298.15, 298.15, 1e-8) + lb, ub = self._inverse_power1( + 88893.4225, 88893.4225, 2, 2, 298.15, 298.15, 1e-8 + ) self.assertAlmostEqual(lb, 298.15) self.assertAlmostEqual(ub, 298.15) @@ -370,7 +391,9 @@ def test_encountered_bugs(self): self.assertAlmostEqual(lb, -0.0016) self.assertAlmostEqual(ub, -0.0016) - lb, ub = self._inverse_power1(-1, -1e-12, 2, 2, -interval.inf, interval.inf, 1e-8) + lb, ub = self._inverse_power1( + -1, -1e-12, 2, 2, -interval.inf, interval.inf, 1e-8 + ) self.assertAlmostEqual(lb, 0) self.assertAlmostEqual(ub, 0) diff --git a/pyomo/contrib/fme/fourier_motzkin_elimination.py b/pyomo/contrib/fme/fourier_motzkin_elimination.py index 93edfe7bf10..18aa157545e 100644 --- a/pyomo/contrib/fme/fourier_motzkin_elimination.py +++ b/pyomo/contrib/fme/fourier_motzkin_elimination.py @@ -9,9 +9,20 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core import (Var, Block, Constraint, Param, Set, SetOf, Suffix, - Expression, Objective, SortComponents, value, - ConstraintList) +from pyomo.core import ( + Var, + Block, + Constraint, + Param, + Set, + SetOf, + Suffix, + Expression, + Objective, + SortComponents, + value, + ConstraintList, +) from pyomo.core.base import TransformationFactory, _VarData from pyomo.core.plugins.transform.hierarchy import Transformation from pyomo.common.config import ConfigBlock, ConfigValue, NonNegativeFloat @@ -24,6 +35,7 @@ logger = logging.getLogger('pyomo.contrib.fme') + def _check_var_bounds_filter(constraint): """Check if the constraint is already implied by the variable bounds""" # this is one of our constraints, so we know that it is >=. @@ -31,19 +43,20 @@ def _check_var_bounds_filter(constraint): for v, coef in constraint['map'].items(): if coef > 0: if v.lb is None: - return True # we don't have var bounds with which to imply the - # constraint... - min_lhs += coef*v.lb + return True # we don't have var bounds with which to imply the + # constraint... + min_lhs += coef * v.lb elif coef < 0: if v.ub is None: - return True # we don't have var bounds with which to imply the - # constraint... - min_lhs += coef*v.ub + return True # we don't have var bounds with which to imply the + # constraint... + min_lhs += coef * v.ub # we do need value here since we didn't control v.lb and v.ub above. if value(min_lhs) >= constraint['lower']: - return False # constraint implied by var bounds + return False # constraint implied by var bounds return True + def vars_to_eliminate_list(x): if isinstance(x, (Var, _VarData)): if not x.is_indexed(): @@ -58,24 +71,26 @@ def vars_to_eliminate_list(x): ans.update(vars_to_eliminate_list(i)) return ans else: - raise ValueError( - "Expected Var or list of Vars." - "\n\tReceived %s" % type(x)) + raise ValueError("Expected Var or list of Vars.\n\tReceived %s" % type(x)) -def gcd(a,b): + +def gcd(a, b): while b != 0: a, b = b, a % b return abs(a) + def lcm(ints): a = ints[0] for b in ints[1:]: - a = abs(a*b) // gcd(a,b) + a = abs(a * b) // gcd(a, b) return a -@TransformationFactory.register('contrib.fourier_motzkin_elimination', - doc="Project out specified (continuous) " - "variables from a linear model.") + +@TransformationFactory.register( + 'contrib.fourier_motzkin_elimination', + doc="Project out specified (continuous) variables from a linear model.", +) class Fourier_Motzkin_Elimination_Transformation(Transformation): """Project out specified variables from a linear model. @@ -92,95 +107,116 @@ class Fourier_Motzkin_Elimination_Transformation(Transformation): """ CONFIG = ConfigBlock("contrib.fourier_motzkin_elimination") - CONFIG.declare('vars_to_eliminate', ConfigValue( - default=None, - domain=vars_to_eliminate_list, - description="Continuous variable or list of continuous variables to " - "project out of the model", - doc=""" - This specifies the list of variables to project out of the model. - Note that these variables must all be continuous and the model must be - linear.""" - )) - CONFIG.declare('constraint_filtering_callback', ConfigValue( - default=_check_var_bounds_filter, - description="A callback that determines whether or not new " - "constraints generated by Fourier-Motzkin elimination are added " - "to the model", - doc=""" - Specify None in order for no constraint filtering to occur during the - transformation. - - Specify a function that accepts a constraint (represented in the >= - dictionary form used in this transformation) and returns a Boolean - indicating whether or not to add it to the model. - """ - )) - CONFIG.declare('do_integer_arithmetic', ConfigValue( - default=False, - domain=bool, - description="A Boolean flag to decide whether Fourier-Motzkin " - "elimination will be performed with only integer arithmetic.", - doc=""" - If True, only integer arithmetic will be performed during Fourier- - Motzkin elimination. This should result in no numerical error. - If True and there is non-integer data in the constraints being - projected, an error will be raised. - - If False, the algorithm will not check whether data is integer, and will - perform division operations. Use this setting when not all data is - integer, or when you are willing to sacrifice some numeric accuracy. - """ - )) - CONFIG.declare('verbose', ConfigValue( - default=False, - domain=bool, - description="A Boolean flag to enable verbose output.", - doc=""" - If True, logs the steps of the projection. - """ - )) - CONFIG.declare('zero_tolerance', ConfigValue( - default=0, - domain=NonNegativeFloat, - description="Absolute tolerance at which a float will be considered 0.", - doc=""" - Whenever fourier-motzkin elimination is used with non-integer data, - there is a chance of numeric trouble, the most obvious of which is - that 'eliminated' variables will remain in the constraints with very - small coefficients. Set this tolerance so that floating points smaller - than this will be treated as 0 (and reported that way in the final - constraints). - """ - )) - CONFIG.declare('integer_tolerance', ConfigValue( - default=0, - domain=NonNegativeFloat, - description="Absolute tolerance at which a float will be considered " - "(and cast to) an integer, when do_integer_arithmetic is True", - doc=""" - Tolerance at which a number x will be considered an integer, when we - are performing fourier-motzkin elimination with only integer_arithmetic. - That is, x will be cast to an integer if - abs(int(x) - x) <= integer_tolerance. - """ - )) - CONFIG.declare('projected_constraints_name', ConfigValue( - default=None, - domain=str, - description="Optional name for the ConstraintList containing the " - "projected constraints. Must be a unique name with respect to the " - "instance.", - doc=""" - Optional name for the ConstraintList containing the projected - constraints. If not specified, the constraints will be stored on a - private block created by the transformation, so if you want access - to them after the transformation, use this argument. - - Must be a string which is a unique component name with respect to the - Block on which the transformation is called. - """ - )) + CONFIG.declare( + 'vars_to_eliminate', + ConfigValue( + default=None, + domain=vars_to_eliminate_list, + description="Continuous variable or list of continuous variables to " + "project out of the model", + doc=""" + This specifies the list of variables to project out of the model. + Note that these variables must all be continuous and the model must be + linear.""", + ), + ) + CONFIG.declare( + 'constraint_filtering_callback', + ConfigValue( + default=_check_var_bounds_filter, + description="A callback that determines whether or not new " + "constraints generated by Fourier-Motzkin elimination are added " + "to the model", + doc=""" + Specify None in order for no constraint filtering to occur during the + transformation. + + Specify a function that accepts a constraint (represented in the >= + dictionary form used in this transformation) and returns a Boolean + indicating whether or not to add it to the model. + """, + ), + ) + CONFIG.declare( + 'do_integer_arithmetic', + ConfigValue( + default=False, + domain=bool, + description="A Boolean flag to decide whether Fourier-Motzkin " + "elimination will be performed with only integer arithmetic.", + doc=""" + If True, only integer arithmetic will be performed during Fourier- + Motzkin elimination. This should result in no numerical error. + If True and there is non-integer data in the constraints being + projected, an error will be raised. + + If False, the algorithm will not check whether data is integer, and will + perform division operations. Use this setting when not all data is + integer, or when you are willing to sacrifice some numeric accuracy. + """, + ), + ) + CONFIG.declare( + 'verbose', + ConfigValue( + default=False, + domain=bool, + description="A Boolean flag to enable verbose output.", + doc=""" + If True, logs the steps of the projection. + """, + ), + ) + CONFIG.declare( + 'zero_tolerance', + ConfigValue( + default=0, + domain=NonNegativeFloat, + description="Absolute tolerance at which a float will be considered 0.", + doc=""" + Whenever fourier-motzkin elimination is used with non-integer data, + there is a chance of numeric trouble, the most obvious of which is + that 'eliminated' variables will remain in the constraints with very + small coefficients. Set this tolerance so that floating points smaller + than this will be treated as 0 (and reported that way in the final + constraints). + """, + ), + ) + CONFIG.declare( + 'integer_tolerance', + ConfigValue( + default=0, + domain=NonNegativeFloat, + description="Absolute tolerance at which a float will be considered " + "(and cast to) an integer, when do_integer_arithmetic is True", + doc=""" + Tolerance at which a number x will be considered an integer, when we + are performing fourier-motzkin elimination with only integer_arithmetic. + That is, x will be cast to an integer if + abs(int(x) - x) <= integer_tolerance. + """, + ), + ) + CONFIG.declare( + 'projected_constraints_name', + ConfigValue( + default=None, + domain=str, + description="Optional name for the ConstraintList containing the " + "projected constraints. Must be a unique name with respect to the " + "instance.", + doc=""" + Optional name for the ConstraintList containing the projected + constraints. If not specified, the constraints will be stored on a + private block created by the transformation, so if you want access + to them after the transformation, use this argument. + + Must be a string which is a unique component name with respect to the + Block on which the transformation is called. + """, + ), + ) def __init__(self): """Initialize transformation object""" @@ -212,66 +248,78 @@ def _apply_to_impl(self, instance, config): self.integer_tolerance = config.integer_tolerance self.zero_tolerance = config.zero_tolerance if vars_to_eliminate is None: - raise RuntimeError("The Fourier-Motzkin Elimination transformation " - "requires the argument vars_to_eliminate, a " - "list of Vars to be projected out of the model.") + raise RuntimeError( + "The Fourier-Motzkin Elimination transformation " + "requires the argument vars_to_eliminate, a " + "list of Vars to be projected out of the model." + ) # make transformation block transBlockName = unique_component_name( - instance, - '_pyomo_contrib_fme_transformation') + instance, '_pyomo_contrib_fme_transformation' + ) transBlock = Block() instance.add_component(transBlockName, transBlock) nm = config.projected_constraints_name if nm is None: - projected_constraints = transBlock.projected_constraints = \ - ConstraintList() + projected_constraints = transBlock.projected_constraints = ConstraintList() else: # check that this component doesn't already exist if instance.component(nm) is not None: - raise RuntimeError("projected_constraints_name was specified " - "as '%s', but this is already a component " - "on the instance! Please specify a unique " - "name." % nm) + raise RuntimeError( + "projected_constraints_name was specified " + "as '%s', but this is already a component " + "on the instance! Please specify a unique " + "name." % nm + ) projected_constraints = ConstraintList() instance.add_component(nm, projected_constraints) # collect all of the constraints # NOTE that we are ignoring deactivated constraints constraints = [] - ctypes_not_to_transform = set((Block, Param, Objective, Set, SetOf, - Expression, Suffix, Var)) + ctypes_not_to_transform = set( + (Block, Param, Objective, Set, SetOf, Expression, Suffix, Var) + ) for obj in instance.component_data_objects( - descend_into=Block, - sort=SortComponents.deterministic, - active=True): + descend_into=Block, sort=SortComponents.deterministic, active=True + ): if obj.ctype in ctypes_not_to_transform: continue elif obj.ctype is Constraint: cons_list = self._process_constraint(obj) constraints.extend(cons_list) - obj.deactivate() # the truth will be on our transformation block + obj.deactivate() # the truth will be on our transformation block else: raise RuntimeError( "Found active component %s of type %s. The " "Fourier-Motzkin Elimination transformation can only " "handle purely algebraic models. That is, only " "Sets, Params, Vars, Constraints, Expressions, Blocks, " - "and Objectives may be active on the model." % (obj.name, - obj.ctype)) + "and Objectives may be active on the model." % (obj.name, obj.ctype) + ) for obj in vars_to_eliminate: if obj.lb is not None: - constraints.append({'body': generate_standard_repn(obj), - 'lower': value(obj.lb), - 'map': ComponentMap([(obj, 1)])}) + constraints.append( + { + 'body': generate_standard_repn(obj), + 'lower': value(obj.lb), + 'map': ComponentMap([(obj, 1)]), + } + ) if obj.ub is not None: - constraints.append({'body': generate_standard_repn(-obj), - 'lower': -value(obj.ub), - 'map': ComponentMap([(obj, -1)])}) - - new_constraints = self._fourier_motzkin_elimination( constraints, - vars_to_eliminate) + constraints.append( + { + 'body': generate_standard_repn(-obj), + 'lower': -value(obj.ub), + 'map': ComponentMap([(obj, -1)]), + } + ) + + new_constraints = self._fourier_motzkin_elimination( + constraints, vars_to_eliminate + ) # put the new constraints on the transformation block for cons in new_constraints: @@ -279,10 +327,11 @@ def _apply_to_impl(self, instance, config): try: keep = self.constraint_filter(cons) except: - logger.error("Problem calling constraint filter callback " - "on constraint with right-hand side %s and " - "body:\n%s" % (cons['lower'], - cons['body'].to_expression())) + logger.error( + "Problem calling constraint filter callback " + "on constraint with right-hand side %s and " + "body:\n%s" % (cons['lower'], cons['body'].to_expression()) + ) raise if not keep: continue @@ -294,9 +343,8 @@ def _apply_to_impl(self, instance, config): continue else: # This would actually make a lot of sense in this case... - #projected_constraints.add(Constraint.Infeasible) - raise RuntimeError("Fourier-Motzkin found the model is " - "infeasible!") + # projected_constraints.add(Constraint.Infeasible) + raise RuntimeError("Fourier-Motzkin found the model is infeasible!") else: projected_constraints.add(lhs >= lower) @@ -313,17 +361,17 @@ def _process_constraint(self, constraint): std_repn = generate_standard_repn(body) # make sure that we store the lower bound's value so that we need not # worry again during the transformation - cons_dict = {'lower': value(constraint.lower), - 'body': std_repn - } + cons_dict = {'lower': value(constraint.lower), 'body': std_repn} upper = value(constraint.upper) constraints_to_add = [cons_dict] if upper is not None: # if it has both bounds if cons_dict['lower'] is not None: # copy the constraint and flip - leq_side = {'lower': -upper, - 'body': generate_standard_repn(-1.0*body)} + leq_side = { + 'lower': -upper, + 'body': generate_standard_repn(-1.0 * body), + } self._move_constant_and_add_map(leq_side) constraints_to_add.append(leq_side) @@ -331,13 +379,13 @@ def _process_constraint(self, constraint): else: # just flip the constraint cons_dict['lower'] = -upper - cons_dict['body'] = generate_standard_repn(-1.0*body) + cons_dict['body'] = generate_standard_repn(-1.0 * body) self._move_constant_and_add_map(cons_dict) return constraints_to_add def _move_constant_and_add_map(self, cons_dict): - """Takes constraint in dicionary form already in >= form, + """Takes constraint in dictionary form already in >= form, and moves the constant to the RHS """ body = cons_dict['body'] @@ -348,11 +396,11 @@ def _move_constant_and_add_map(self, cons_dict): # store a map of vars to coefficients. We can't use this in place of # standard repn because determinism, but this will save a lot of linear # time searches later. Note also that we will take the value of the - # coeficient here so that we never have to worry about it again during + # coefficient here so that we never have to worry about it again during # the transformation. - cons_dict['map'] = ComponentMap(zip(body.linear_vars, - [value(coef) for coef in - body.linear_coefs])) + cons_dict['map'] = ComponentMap( + zip(body.linear_vars, [value(coef) for coef in body.linear_coefs]) + ) def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate): """Performs FME on the constraint list in the argument @@ -369,18 +417,19 @@ def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate): if not std_repn.is_linear(): # as long as none of vars_that_appear are in the nonlinear part, # we are actually okay. - nonlinear_vars = ComponentSet(v for two_tuple in - std_repn.quadratic_vars for - v in two_tuple) + nonlinear_vars = ComponentSet( + v for two_tuple in std_repn.quadratic_vars for v in two_tuple + ) nonlinear_vars.update(v for v in std_repn.nonlinear_vars) for var in nonlinear_vars: if var in vars_to_eliminate: - raise RuntimeError("Variable %s appears in a nonlinear " - "constraint. The Fourier-Motzkin " - "Elimination transformation can only " - "be used to eliminate variables " - "which only appear linearly." % - var.name) + raise RuntimeError( + "Variable %s appears in a nonlinear " + "constraint. The Fourier-Motzkin " + "Elimination transformation can only " + "be used to eliminate variables " + "which only appear linearly." % var.name + ) for var in std_repn.linear_vars: if var in vars_to_eliminate: if not var in vars_that_appear_set: @@ -395,8 +444,7 @@ def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate): the_var = vars_that_appear.pop() logger.warning("Projecting out var %s of %s" % (iteration, total)) if self.verbose: - logger.info("Projecting out %s" % - the_var.getname(fully_qualified=True)) + logger.info("Projecting out %s" % the_var.getname(fully_qualified=True)) logger.info("New constraints are:") # we are 'reorganizing' the constraints, we sort based on the sign @@ -412,9 +460,9 @@ def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate): if leaving_var_coef is None or leaving_var_coef == 0: waiting_list.append(cons) if self.verbose: - logger.info("\t%s <= %s" - % (cons['lower'], - cons['body'].to_expression())) + logger.info( + "\t%s <= %s" % (cons['lower'], cons['body'].to_expression()) + ) continue # we know the constraint is a >= constraint, using that @@ -426,17 +474,23 @@ def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate): if leaving_var_coef < 0: leq_list.append( self._nonneg_scalar_multiply_linear_constraint( - cons, -1.0/leaving_var_coef)) + cons, -1.0 / leaving_var_coef + ) + ) else: geq_list.append( self._nonneg_scalar_multiply_linear_constraint( - cons, 1.0/leaving_var_coef)) + cons, 1.0 / leaving_var_coef + ) + ) else: - coefs.append(self._as_integer( - leaving_var_coef, - self._get_noninteger_coef_error_message, - (the_var.name, leaving_var_coef) - )) + coefs.append( + self._as_integer( + leaving_var_coef, + self._get_noninteger_coef_error_message, + (the_var.name, leaving_var_coef), + ) + ) if self.do_integer_arithmetic and len(coefs) > 0: least_common_mult = lcm(coefs) for cons in constraints: @@ -446,55 +500,59 @@ def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate): to_lcm = least_common_mult // abs(int(leaving_var_coef)) if leaving_var_coef < 0: leq_list.append( - self._nonneg_scalar_multiply_linear_constraint( - cons, to_lcm)) + self._nonneg_scalar_multiply_linear_constraint(cons, to_lcm) + ) else: geq_list.append( - self._nonneg_scalar_multiply_linear_constraint( - cons, to_lcm)) + self._nonneg_scalar_multiply_linear_constraint(cons, to_lcm) + ) constraints = waiting_list for leq in leq_list: for geq in geq_list: - constraints.append( self._add_linear_constraints( leq, geq)) + constraints.append(self._add_linear_constraints(leq, geq)) if self.verbose: - cons = constraints[len(constraints)-1] - logger.info("\t%s <= %s" % - (cons['lower'], - cons['body'].to_expression())) + cons = constraints[len(constraints) - 1] + logger.info( + "\t%s <= %s" % (cons['lower'], cons['body'].to_expression()) + ) iteration += 1 return constraints def _get_noninteger_coef_error_message(self, varname, coef): - return ("The do_integer_arithmetic flag was " - "set to True, but the coefficient of " - "%s is non-integer within the specified " - "tolerance, with value %s. \n" - "Please set do_integer_arithmetic=" - "False, increase integer_tolerance, " - "or make your data integer." % (varname, coef)) + return ( + "The do_integer_arithmetic flag was " + "set to True, but the coefficient of " + "%s is non-integer within the specified " + "tolerance, with value %s. \n" + "Please set do_integer_arithmetic=" + "False, increase integer_tolerance, " + "or make your data integer." % (varname, coef) + ) def _as_integer(self, x, error_message, error_args): if abs(int(x) - x) <= self.integer_tolerance: return int(round(x)) - raise ValueError(error_message if error_args is None - else error_message(*error_args)) + raise ValueError( + error_message if error_args is None else error_message(*error_args) + ) def _multiply(self, scalar, coef, error_message, error_args): if self.do_integer_arithmetic: assert type(scalar) is int return scalar * self._as_integer(coef, error_message, error_args) - elif abs(scalar*coef) > self.zero_tolerance: - return scalar*coef + elif abs(scalar * coef) > self.zero_tolerance: + return scalar * coef else: return 0 def _add(self, a, b, error_message, error_args): if self.do_integer_arithmetic: - return self._as_integer(a, error_message, error_args) \ - + self._as_integer(b, error_message, error_args) + return self._as_integer(a, error_message, error_args) + self._as_integer( + b, error_message, error_args + ) elif abs(a + b) > self.zero_tolerance: return a + b else: @@ -506,8 +564,8 @@ def _nonneg_scalar_multiply_linear_constraint_error_msg(self, cons, coef): "lower bound of %s is non-integer within the specified " "tolerance, with value %s. \n" "Please set do_integer_arithmetic=False, increase " - "integer_tolerance, or make your data integer." % - (cons['body'].to_expression() >= cons['lower'], coef) + "integer_tolerance, or make your data integer." + % (cons['body'].to_expression() >= cons['lower'], coef) ) def _nonneg_scalar_multiply_linear_constraint(self, cons, scalar): @@ -523,25 +581,31 @@ def _nonneg_scalar_multiply_linear_constraint(self, cons, scalar): new_coefs = [] for i, coef in enumerate(body.linear_coefs): v = body.linear_vars[i] - new_coefs.append(self._multiply( - scalar, coef, self._get_noninteger_coef_error_message, - (v.name, coef) - )) + new_coefs.append( + self._multiply( + scalar, + coef, + self._get_noninteger_coef_error_message, + (v.name, coef), + ) + ) # update the map cons['map'][v] = new_coefs[i] body.linear_coefs = new_coefs - body.quadratic_coefs = [scalar*coef for coef in body.quadratic_coefs] - body.nonlinear_expr = scalar*body.nonlinear_expr if \ - body.nonlinear_expr is not None else None + body.quadratic_coefs = [scalar * coef for coef in body.quadratic_coefs] + body.nonlinear_expr = ( + scalar * body.nonlinear_expr if body.nonlinear_expr is not None else None + ) # assume scalar >= 0 and constraint only has lower bound lb = cons['lower'] if lb is not None: cons['lower'] = self._multiply( - scalar, lb, + scalar, + lb, self._nonneg_scalar_multiply_linear_constraint_error_msg, - (cons, coef) + (cons, coef), ) return cons @@ -551,9 +615,11 @@ def _add_linear_constraints_error_msg(self, cons1, cons2): "adding %s and %s, encountered a coefficient that is " "non-integer within the specified tolerance\n" "Please set do_integer_arithmetic=False, increase " - "integer_tolerance, or make your data integer." % - (cons1['body'].to_expression() >= cons1['lower'], - cons2['body'].to_expression() >= cons2['lower']) + "integer_tolerance, or make your data integer." + % ( + cons1['body'].to_expression() >= cons1['lower'], + cons2['body'].to_expression() >= cons2['lower'], + ) ) def _add_linear_constraints(self, cons1, cons2): @@ -578,29 +644,38 @@ def _add_linear_constraints(self, cons1, cons2): expr = 0 for var in all_vars: coef = self._add( - cons1['map'].get(var, 0), cons2['map'].get(var, 0), - self._add_linear_constraints_error_msg, (cons1, cons2)) + cons1['map'].get(var, 0), + cons2['map'].get(var, 0), + self._add_linear_constraints_error_msg, + (cons1, cons2), + ) ans['map'][var] = coef - expr += coef*var + expr += coef * var # deal with nonlinear stuff if there is any for cons in [cons1_body, cons2_body]: if cons.nonlinear_expr is not None: expr += cons.nonlinear_expr - expr += sum(coef*v1*v2 for (coef, (v1, v2)) in - zip(cons.quadratic_coefs, cons.quadratic_vars)) + expr += sum( + coef * v1 * v2 + for (coef, (v1, v2)) in zip(cons.quadratic_coefs, cons.quadratic_vars) + ) ans['body'] = generate_standard_repn(expr) # upper is None and lower exists, so this gets the constant ans['lower'] = self._add( - cons1['lower'], cons2['lower'], - self._add_linear_constraints_error_msg, (cons1, cons2)) + cons1['lower'], + cons2['lower'], + self._add_linear_constraints_error_msg, + (cons1, cons2), + ) return ans - def post_process_fme_constraints(self, m, solver_factory, - projected_constraints=None, tolerance=0): + def post_process_fme_constraints( + self, m, solver_factory, projected_constraints=None, tolerance=0 + ): """Function that solves a sequence of LPs problems to check if constraints are implied by each other. Deletes any that are. @@ -617,10 +692,10 @@ def post_process_fme_constraints(self, m, solver_factory, had nonlinear constraints unrelated to the variables being projected, you need to either deactivate them or provide a solver which will do the right thing.) - projected_constraints: The ConstraintList of projected constraints. - Default is None, in which case we assume that - the FME transformation was called without - specifying their name, so will look for them on + projected_constraints: The ConstraintList of projected constraints. + Default is None, in which case we assume that + the FME transformation was called without + specifying their name, so will look for them on the private transformation block. tolerance: Tolerance at which we decide a constraint is implied by the others. Default is 0, meaning we remove the constraint if @@ -632,23 +707,27 @@ def post_process_fme_constraints(self, m, solver_factory, if projected_constraints is None: # make sure m looks like what we expect if not hasattr(m, "_pyomo_contrib_fme_transformation"): - raise RuntimeError("It looks like model %s has not been " - "transformed with the " - "fourier_motzkin_elimination transformation!" - % m.name) + raise RuntimeError( + "It looks like model %s has not been " + "transformed with the " + "fourier_motzkin_elimination transformation!" % m.name + ) transBlock = m._pyomo_contrib_fme_transformation if not hasattr(transBlock, 'projected_constraints'): - raise RuntimeError("It looks the projected constraints " - "were manually named when the FME " - "transformation was called on %s. " - "If this is so, specify the ConstraintList " - "of projected constraints with the " - "'projected_constraints' argument." % m.name) + raise RuntimeError( + "It looks the projected constraints " + "were manually named when the FME " + "transformation was called on %s. " + "If this is so, specify the ConstraintList " + "of projected constraints with the " + "'projected_constraints' argument." % m.name + ) projected_constraints = transBlock.projected_constraints # relax integrality so that we can do this with LP solves. TransformationFactory('core.relax_integer_vars').apply_to( - m, transform_deactivated_blocks=True) + m, transform_deactivated_blocks=True + ) # deactivate any active objectives on the model, and save what we did so # we can undo it after. active_objs = [] @@ -669,20 +748,23 @@ def post_process_fme_constraints(self, m, solver_factory, projected_constraints[i].deactivate() m.del_component(obj) # make objective to maximize its infeasibility - obj = Objective(expr=projected_constraints[i].body - \ - projected_constraints[i].lower) + obj = Objective( + expr=projected_constraints[i].body - projected_constraints[i].lower + ) m.add_component(obj_name, obj) results = solver_factory.solve(m) - if results.solver.termination_condition == \ - TerminationCondition.unbounded: + if results.solver.termination_condition == TerminationCondition.unbounded: obj_val = -float('inf') - elif results.solver.termination_condition != \ - TerminationCondition.optimal: - raise RuntimeError("Unsuccessful subproblem solve when checking" - "constraint %s.\n\t" - "Termination Condition: %s" % - (projected_constraints[i].name, - results.solver.termination_condition)) + elif results.solver.termination_condition != TerminationCondition.optimal: + raise RuntimeError( + "Unsuccessful subproblem solve when checking" + "constraint %s.\n\t" + "Termination Condition: %s" + % ( + projected_constraints[i].name, + results.solver.termination_condition, + ) + ) else: obj_val = value(obj) # if we couldn't make it infeasible, it's useless diff --git a/pyomo/contrib/fme/plugins.py b/pyomo/contrib/fme/plugins.py index 8817969c186..324dd583d0f 100644 --- a/pyomo/contrib/fme/plugins.py +++ b/pyomo/contrib/fme/plugins.py @@ -9,5 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.contrib.fme.fourier_motzkin_elimination diff --git a/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py b/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py index 904b67afc22..55b359d5990 100644 --- a/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py +++ b/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py @@ -11,7 +11,8 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep # Need solvers/writers registered. import pyomo.environ as pyo @@ -19,10 +20,20 @@ import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept from pyomo.common.collections import ComponentSet -from pyomo.core import (Var, Constraint, Param, ConcreteModel, NonNegativeReals, - Binary, value, Block, Objective) +from pyomo.core import ( + Var, + Constraint, + Param, + ConcreteModel, + NonNegativeReals, + Binary, + value, + Block, + Objective, +) from pyomo.core.base import TransformationFactory -from pyomo.core.expr.current import log +from pyomo.core.expr import log +from pyomo.core.expr.compare import assertExpressionsEqual from pyomo.gdp import Disjunction, Disjunct from pyomo.repn.standard_repn import generate_standard_repn from pyomo.opt import SolverFactory, check_available_solvers @@ -34,6 +45,7 @@ solvers = check_available_solvers('glpk') + class TestFourierMotzkinElimination(unittest.TestCase): def setUp(self): # will need this so we know transformation block names in the test that @@ -44,25 +56,25 @@ def setUp(self): def makeModel(): """ This is a single-level reformulation of a bilevel model. - We project out the dual variables to recover the reformulation in + We project out the dual variables to recover the reformulation in the original space. """ m = ConcreteModel() - m.x = Var(bounds=(0,2)) + m.x = Var(bounds=(0, 2)) m.y = Var(domain=NonNegativeReals) m.lamb = Var([1, 2], domain=NonNegativeReals) m.M = Param([1, 2], mutable=True, default=100) m.u = Var([1, 2], domain=Binary) - m.primal1 = Constraint(expr=m.x - 0.01*m.y <= 1) - m.dual1 = Constraint(expr=1 - m.lamb[1] - 0.01*m.lamb[2] == 0) + m.primal1 = Constraint(expr=m.x - 0.01 * m.y <= 1) + m.dual1 = Constraint(expr=1 - m.lamb[1] - 0.01 * m.lamb[2] == 0) @m.Constraint([1, 2]) def bound_lambdas(m, i): - return m.lamb[i] <= m.u[i]*m.M[i] + return m.lamb[i] <= m.u[i] * m.M[i] - m.bound_y = Constraint(expr=m.y <= 1000*(1 - m.u[1])) - m.dual2 = Constraint(expr=-m.x + 0.01*m.y + 1 <= (1 - m.u[2])*1000) + m.bound_y = Constraint(expr=m.y <= 1000 * (1 - m.u[1])) + m.dual2 = Constraint(expr=-m.x + 0.01 * m.y + 1 <= (1 - m.u[2]) * 1000) return m @@ -73,9 +85,9 @@ def test_no_vars_specified(self): "The Fourier-Motzkin Elimination transformation " "requires the argument vars_to_eliminate, a " "list of Vars to be projected out of the model.", - TransformationFactory('contrib.fourier_motzkin_elimination').\ - apply_to, - m) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to, + m, + ) unfiltered_indices = [1, 2, 3, 6] filtered_indices = [1, 2, 3, 4] @@ -140,29 +152,28 @@ def check_projected_constraints(self, m, indices): def test_transformed_constraints_indexed_var_arg(self): m = self.makeModel() - TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( - m, - vars_to_eliminate = m.lamb, - constraint_filtering_callback=None) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + m, vars_to_eliminate=m.lamb, constraint_filtering_callback=None + ) # we get some trivial constraints too, but let's check that the ones # that should be there really are self.check_projected_constraints(m, self.unfiltered_indices) def test_transformed_constraints_varData_list_arg(self): m = self.makeModel() - TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( m, - vars_to_eliminate = [m.lamb[1], m.lamb[2]], - constraint_filtering_callback=None) + vars_to_eliminate=[m.lamb[1], m.lamb[2]], + constraint_filtering_callback=None, + ) self.check_projected_constraints(m, self.unfiltered_indices) def test_transformed_constraints_indexedVar_list(self): m = self.makeModel() - TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( - m, - vars_to_eliminate = [m.lamb], - constraint_filtering_callback=None) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + m, vars_to_eliminate=[m.lamb], constraint_filtering_callback=None + ) self.check_projected_constraints(m, self.unfiltered_indices) @@ -171,9 +182,9 @@ def test_default_constraint_filtering(self): # during the transformation. This checks that we removed the constraints # we expect. m = self.makeModel() - TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( - m, - vars_to_eliminate = m.lamb) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + m, vars_to_eliminate=m.lamb + ) # we still have all the right constraints self.check_projected_constraints(m, self.filtered_indices) @@ -183,10 +194,10 @@ def test_default_constraint_filtering(self): def test_original_constraints_deactivated(self): m = self.makeModel() - TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( - m, - vars_to_eliminate = m.lamb) - + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + m, vars_to_eliminate=m.lamb + ) + self.assertFalse(m.primal1.active) self.assertFalse(m.dual1.active) self.assertFalse(m.dual2.active) @@ -203,10 +214,10 @@ def test_infeasible_model(self): self.assertRaisesRegex( RuntimeError, "Fourier-Motzkin found the model is infeasible!", - TransformationFactory('contrib.fourier_motzkin_elimination').\ - apply_to, - m, - vars_to_eliminate=m.x) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to, + m, + vars_to_eliminate=m.x, + ) def test_infeasible_model_no_var_bounds(self): m = ConcreteModel() @@ -217,16 +228,16 @@ def test_infeasible_model_no_var_bounds(self): self.assertRaisesRegex( RuntimeError, "Fourier-Motzkin found the model is infeasible!", - TransformationFactory('contrib.fourier_motzkin_elimination').\ - apply_to, - m, - vars_to_eliminate=m.x) - + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to, + m, + vars_to_eliminate=m.x, + ) + def test_nonlinear_error(self): m = ConcreteModel() m.x = Var() m.cons = Constraint(expr=m.x**2 >= 2) - m.cons2 = Constraint(expr=m.x<= 10) + m.cons2 = Constraint(expr=m.x <= 10) self.assertRaisesRegex( RuntimeError, @@ -235,10 +246,10 @@ def test_nonlinear_error(self): "Elimination transformation can only " "be used to eliminate variables " "which only appear linearly.", - TransformationFactory('contrib.fourier_motzkin_elimination').\ - apply_to, - m, - vars_to_eliminate=m.x) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to, + m, + vars_to_eliminate=m.x, + ) def test_components_we_do_not_understand_error(self): m = self.makeModel() @@ -250,17 +261,18 @@ def test_components_we_do_not_understand_error(self): "Fourier-Motzkin Elimination transformation can only " "handle purely algebraic models. That is, only " "Sets, Params, Vars, Constraints, Expressions, Blocks, " - "and Objectives may be active on the model." % (m.disj.name, - m.disj.type()), - TransformationFactory('contrib.fourier_motzkin_elimination').\ - apply_to, - m, - vars_to_eliminate=m.x) + "and Objectives may be active on the model." % (m.disj.name, m.disj.type()), + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to, + m, + vars_to_eliminate=m.x, + ) def test_bad_constraint_filtering_callback_error(self): m = self.makeModel() + def not_a_callback(cons): raise RuntimeError("I don't know how to do my job.") + fme = TransformationFactory('contrib.fourier_motzkin_elimination') log = StringIO() with LoggingIntercept(log, 'pyomo.contrib.fme', logging.ERROR): @@ -270,11 +282,13 @@ def not_a_callback(cons): fme.apply_to, m, vars_to_eliminate=m.x, - constraint_filtering_callback=not_a_callback) + constraint_filtering_callback=not_a_callback, + ) self.assertRegex( log.getvalue(), "Problem calling constraint filter callback " - "on constraint with right-hand side -1.0 and body:*") + "on constraint with right-hand side -1.0 and body:*", + ) def test_constraint_filtering_callback_not_callable_error(self): m = self.makeModel() @@ -287,11 +301,13 @@ def test_constraint_filtering_callback_not_callable_error(self): fme.apply_to, m, vars_to_eliminate=m.x, - constraint_filtering_callback=5) + constraint_filtering_callback=5, + ) self.assertRegex( log.getvalue(), "Problem calling constraint filter callback " - "on constraint with right-hand side -1.0 and body:*") + "on constraint with right-hand side -1.0 and body:*", + ) def test_combine_three_inequalities_and_flatten_blocks(self): m = ConcreteModel() @@ -303,7 +319,8 @@ def test_combine_three_inequalities_and_flatten_blocks(self): m.b.b2 = Block() m.b.b2.c = Constraint(expr=m.y >= 4) TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( - m, vars_to_eliminate=m.y, do_integer_arithmetic=True) + m, vars_to_eliminate=m.y, do_integer_arithmetic=True + ) constraints = m._pyomo_contrib_fme_transformation.projected_constraints self.assertEqual(len(constraints), 2) @@ -311,7 +328,7 @@ def test_combine_three_inequalities_and_flatten_blocks(self): self.assertEqual(value(cons.lower), 2) self.assertIsNone(cons.upper) self.assertIs(cons.body, m.x) - + cons = constraints[2] self.assertEqual(value(cons.lower), 4) self.assertIsNone(cons.upper) @@ -353,8 +370,7 @@ def check_hull_projected_constraints(self, m, constraints, indices): self.assertEqual(body.constant, 0) self.assertEqual(len(body.linear_vars), 2) self.assertTrue(body.is_linear()) - self.assertIs( - body.linear_vars[1], m.time1_disjuncts[0].binary_indicator_var) + self.assertIs(body.linear_vars[1], m.time1_disjuncts[0].binary_indicator_var) self.assertEqual(body.linear_coefs[1], -1) self.assertIs(body.linear_vars[0], m.p[1]) self.assertEqual(body.linear_coefs[0], 1) @@ -369,8 +385,7 @@ def check_hull_projected_constraints(self, m, constraints, indices): self.assertTrue(body.is_linear()) self.assertIs(body.linear_vars[0], m.p[1]) self.assertEqual(body.linear_coefs[0], -1) - self.assertIs( - body.linear_vars[1], m.time1_disjuncts[0].binary_indicator_var) + self.assertIs(body.linear_vars[1], m.time1_disjuncts[0].binary_indicator_var) self.assertEqual(body.linear_coefs[1], 10) # p[2] - p[1] <= 3*on.ind_var + 2*startup.ind_var @@ -428,11 +443,9 @@ def check_hull_projected_constraints(self, m, constraints, indices): self.assertEqual(body.constant, 0) self.assertEqual(len(body.linear_vars), 2) self.assertTrue(body.is_linear()) - self.assertIs( - body.linear_vars[0], m.time1_disjuncts[0].binary_indicator_var) + self.assertIs(body.linear_vars[0], m.time1_disjuncts[0].binary_indicator_var) self.assertEqual(body.linear_coefs[0], 1) - self.assertIs( - body.linear_vars[1], m.time1_disjuncts[1].binary_indicator_var) + self.assertIs(body.linear_vars[1], m.time1_disjuncts[1].binary_indicator_var) self.assertEqual(body.linear_coefs[1], 1) # 1 >= time1_disjuncts[0].ind_var + time_1.disjuncts[1].ind_var @@ -443,11 +456,9 @@ def check_hull_projected_constraints(self, m, constraints, indices): self.assertEqual(body.constant, 0) self.assertEqual(len(body.linear_vars), 2) self.assertTrue(body.is_linear()) - self.assertIs( - body.linear_vars[0], m.time1_disjuncts[0].binary_indicator_var) + self.assertIs(body.linear_vars[0], m.time1_disjuncts[0].binary_indicator_var) self.assertEqual(body.linear_coefs[0], -1) - self.assertIs( - body.linear_vars[1], m.time1_disjuncts[1].binary_indicator_var) + self.assertIs(body.linear_vars[1], m.time1_disjuncts[1].binary_indicator_var) self.assertEqual(body.linear_coefs[1], -1) # 1 <= on.ind_var + startup.ind_var + off.ind_var @@ -464,7 +475,7 @@ def check_hull_projected_constraints(self, m, constraints, indices): self.assertEqual(body.linear_coefs[1], 1) self.assertIs(body.linear_vars[2], m.startup.binary_indicator_var) self.assertEqual(body.linear_coefs[2], 1) - + # 1 >= on.ind_var + startup.ind_var + off.ind_var cons = constraints[indices[10]] self.assertEqual(cons.lower, -1) @@ -503,50 +514,53 @@ def create_hull_model(self): hull = TransformationFactory('gdp.hull') hull.apply_to(m) disaggregatedVars = ComponentSet( - [hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[0]), - hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[1]), - hull.get_disaggregated_var(m.p[1], m.on), - hull.get_disaggregated_var(m.p[2], m.on), - hull.get_disaggregated_var(m.p[1], m.startup), - hull.get_disaggregated_var(m.p[2], m.startup), - hull.get_disaggregated_var(m.p[1], m.off), - hull.get_disaggregated_var(m.p[2], m.off) - ]) - + [ + hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[0]), + hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[1]), + hull.get_disaggregated_var(m.p[1], m.on), + hull.get_disaggregated_var(m.p[2], m.on), + hull.get_disaggregated_var(m.p[1], m.startup), + hull.get_disaggregated_var(m.p[2], m.startup), + hull.get_disaggregated_var(m.p[1], m.off), + hull.get_disaggregated_var(m.p[2], m.off), + ] + ) + return m, disaggregatedVars def test_project_disaggregated_vars(self): - """This is a little bit more of an integration test with GDP, - but also an example of why FME is 'useful.' We will give a GDP, - take hull relaxation, and then project out the disaggregated + """This is a little bit more of an integration test with GDP, + but also an example of why FME is 'useful.' We will give a GDP, + take hull relaxation, and then project out the disaggregated variables.""" m, disaggregatedVars = self.create_hull_model() - filtered = TransformationFactory('contrib.fourier_motzkin_elimination').\ - create_using(m, vars_to_eliminate=disaggregatedVars) + filtered = TransformationFactory( + 'contrib.fourier_motzkin_elimination' + ).create_using(m, vars_to_eliminate=disaggregatedVars) TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( - m, vars_to_eliminate=disaggregatedVars, - constraint_filtering_callback=None, do_integer_arithmetic=True) + m, + vars_to_eliminate=disaggregatedVars, + constraint_filtering_callback=None, + do_integer_arithmetic=True, + ) constraints = m._pyomo_contrib_fme_transformation.projected_constraints # we of course get tremendous amounts of garbage, but we make sure that # what should be here is: - self.check_hull_projected_constraints(m, constraints, [16, 12, 69, 71, - 48, 60, 29, 1, 2, - 4, 5]) + self.check_hull_projected_constraints( + m, constraints, [23, 19, 8, 10, 54, 67, 35, 3, 4, 1, 2] + ) # and when we filter, it's still there. - constraints = filtered._pyomo_contrib_fme_transformation.\ - projected_constraints - self.check_hull_projected_constraints(filtered, constraints, [8, 6, 20, - 21, 14, - 17, 10, 1, - 2, 3, 4]) - + constraints = filtered._pyomo_contrib_fme_transformation.projected_constraints + self.check_hull_projected_constraints( + filtered, constraints, [10, 8, 5, 6, 15, 19, 11, 3, 4, 1, 2] + ) + @unittest.skipIf(not 'glpk' in solvers, 'glpk not available') def test_post_processing(self): m, disaggregatedVars = self.create_hull_model() fme = TransformationFactory('contrib.fourier_motzkin_elimination') - fme.apply_to(m, vars_to_eliminate=disaggregatedVars, - do_integer_arithmetic=True) + fme.apply_to(m, vars_to_eliminate=disaggregatedVars, do_integer_arithmetic=True) # post-process fme.post_process_fme_constraints(m, SolverFactory('glpk')) @@ -555,26 +569,25 @@ def test_post_processing(self): # They should be the same as the above, but now these are *all* the # constraints - self.check_hull_projected_constraints(m, constraints, [8, 6, 20, 21, 14, - 17, 10, 1, 2, 3, - 4]) + self.check_hull_projected_constraints( + m, constraints, [10, 8, 5, 6, 15, 19, 11, 3, 4, 1, 2] + ) # and check that we didn't change the model for disj in m.component_data_objects(Disjunct): self.assertIs(disj.binary_indicator_var.domain, Binary) - self.assertEqual(len([o for o in m.component_data_objects(Objective)]), - 1) + self.assertEqual(len([o for o in m.component_data_objects(Objective)]), 1) self.assertIsInstance(m.component("obj"), Objective) self.assertTrue(m.obj.active) - + @unittest.skipIf(not 'glpk' in solvers, 'glpk not available') def test_model_with_unrelated_nonlinear_expressions(self): m = ConcreteModel() - m.x = Var([1, 2, 3], bounds=(0,3)) + m.x = Var([1, 2, 3], bounds=(0, 3)) m.y = Var() m.z = Var() - @m.Constraint([1,2]) + @m.Constraint([1, 2]) def cons(m, i): return m.x[i] <= m.y**i @@ -584,80 +597,61 @@ def cons(m, i): m.cons4 = Constraint(expr=m.x[3] <= log(m.y + 1)) fme = TransformationFactory('contrib.fourier_motzkin_elimination') - fme.apply_to(m, vars_to_eliminate=m.x, - projected_constraints_name='projected_constraints', - constraint_filtering_callback=None) + fme.apply_to( + m, + vars_to_eliminate=m.x, + projected_constraints_name='projected_constraints', + constraint_filtering_callback=None, + ) constraints = m.projected_constraints # 0 <= y <= 3 cons = constraints[5] self.assertEqual(value(cons.lower), 0) - self.assertIs(cons.body, m.y) + assertExpressionsEqual(self, cons.body, m.y) cons = constraints[6] self.assertEqual(value(cons.lower), -3) - body = generate_standard_repn(cons.body) - self.assertTrue(body.is_linear()) - self.assertEqual(len(body.linear_vars), 1) - self.assertIs(body.linear_vars[0], m.y) - self.assertEqual(body.linear_coefs[0], -1) + assertExpressionsEqual(self, cons.body, -m.y) # z <= y**2 + 3 cons = constraints[2] self.assertEqual(value(cons.lower), -3) - body = generate_standard_repn(cons.body) - self.assertTrue(body.is_quadratic()) - self.assertEqual(len(body.linear_vars), 1) - self.assertIs(body.linear_vars[0], m.z) - self.assertEqual(body.linear_coefs[0], -1) - self.assertEqual(len(body.quadratic_vars), 1) - self.assertEqual(body.quadratic_coefs[0], 1) - self.assertIs(body.quadratic_vars[0][0], m.y) - self.assertIs(body.quadratic_vars[0][1], m.y) + assertExpressionsEqual(self, cons.body, -m.z + m.y**2) # z <= 6 cons = constraints[4] self.assertEqual(cons.lower, -6) - body = generate_standard_repn(cons.body) - self.assertTrue(body.is_linear()) - self.assertEqual(len(body.linear_vars), 1) - self.assertEqual(body.linear_coefs[0], -1) - self.assertIs(body.linear_vars[0], m.z) + assertExpressionsEqual(self, cons.body, -m.z) # 0 <= ln(y+ 1) cons = constraints[1] self.assertEqual(value(cons.lower), 0) - body = generate_standard_repn(cons.body) - self.assertTrue(body.is_nonlinear()) - self.assertFalse(body.is_quadratic()) - self.assertEqual(len(body.linear_vars), 0) - self.assertEqual(body.nonlinear_expr.name, 'log') - self.assertEqual(len(body.nonlinear_expr.args[0].args), 2) - self.assertIs(body.nonlinear_expr.args[0].args[0], m.y) - self.assertEqual(body.nonlinear_expr.args[0].args[1], 1) + assertExpressionsEqual(self, cons.body, log(m.y + 1)) # 0 <= y**2 cons = constraints[3] self.assertEqual(value(cons.lower), 0) - body = generate_standard_repn(cons.body) - self.assertTrue(body.is_quadratic()) - self.assertEqual(len(body.quadratic_vars), 1) - self.assertEqual(body.quadratic_coefs[0], 1) - self.assertIs(body.quadratic_vars[0][0], m.y) - self.assertIs(body.quadratic_vars[0][1], m.y) + assertExpressionsEqual(self, cons.body, m.y**2) # check constraints valid for a selection of points (this is nonconvex, # but anyway...) - pts = [#(sqrt(3), 6), Not numerically stable enough for this test - (1, 4), (3, 6), (3, 0), (0, 0), (2,6)] + pts = [ # (sqrt(3), 6), Not numerically stable enough for this test + (1, 4), + (3, 6), + (3, 0), + (0, 0), + (2, 6), + ] for pt in pts: m.y.fix(pt[0]) m.z.fix(pt[1]) for i in constraints: - self.assertLessEqual(value(constraints[i].lower), - value(constraints[i].body)) + self.assertLessEqual( + value(constraints[i].lower), value(constraints[i].body) + ) m.y.fixed = False m.z.fixed = False - + # check post process these are non-convex, so I don't want to deal with # it... (and this is a good test that I *don't* deal with it.) constraints[2].deactivate() @@ -666,19 +660,19 @@ def cons(m, i): # NOTE also that some of the suproblems in this test are unbounded: We # need to keep those constraints. fme.post_process_fme_constraints( - m, SolverFactory('glpk'), - projected_constraints=m.projected_constraints) + m, SolverFactory('glpk'), projected_constraints=m.projected_constraints + ) # we needed all the constraints, so we kept them all self.assertEqual(len(constraints), 6) # last check that if someone activates something on the model in # between, we just use it. (I struggle to imagine why you would do this - # because why withold the information *during* FME, but if there's some + # because why withhold the information *during* FME, but if there's some # reason, we may as well use all the information we've got.) m.some_new_cons = Constraint(expr=m.y <= 2) fme.post_process_fme_constraints( - m, SolverFactory('glpk'), - projected_constraints=m.projected_constraints) + m, SolverFactory('glpk'), projected_constraints=m.projected_constraints + ) # now we should have lost one constraint self.assertEqual(len(constraints), 5) # and it should be the y <= 3 one... @@ -687,10 +681,10 @@ def cons(m, i): @unittest.skipIf(not 'glpk' in solvers, 'glpk not available') def test_noninteger_coefficients_of_vars_being_projected_error(self): m = ConcreteModel() - m.x = Var(bounds=(0,9)) + m.x = Var(bounds=(0, 9)) m.y = Var(bounds=(-5, 5)) - m.c1 = Constraint(expr=2*m.x + 0.5*m.y >= 2) - m.c2 = Constraint(expr=0.25*m.y >= 0.5*m.x) + m.c1 = Constraint(expr=2 * m.x + 0.5 * m.y >= 2) + m.c2 = Constraint(expr=0.25 * m.y >= 0.5 * m.x) fme = TransformationFactory('contrib.fourier_motzkin_elimination') self.assertRaisesRegex( @@ -702,17 +696,18 @@ def test_noninteger_coefficients_of_vars_being_projected_error(self): "Please set do_integer_arithmetic=" "False, increase integer_tolerance, or make your data integer.", fme.apply_to, - m, - vars_to_eliminate=m.x, - do_integer_arithmetic=True) + m, + vars_to_eliminate=m.x, + do_integer_arithmetic=True, + ) @unittest.skipIf(not 'glpk' in solvers, 'glpk not available') def test_noninteger_coefficients_of_vars_not_being_projected_error(self): m = ConcreteModel() - m.x = Var(bounds=(0,9)) + m.x = Var(bounds=(0, 9)) m.y = Var(bounds=(-5, 5)) - m.c1 = Constraint(expr=2*m.x + 0.5*m.y >= 2) - m.c2 = Constraint(expr=0.25*m.y >= 5*m.x) + m.c1 = Constraint(expr=2 * m.x + 0.5 * m.y >= 2) + m.c2 = Constraint(expr=0.25 * m.y >= 5 * m.x) fme = TransformationFactory('contrib.fourier_motzkin_elimination') self.assertRaisesRegex( @@ -724,22 +719,27 @@ def test_noninteger_coefficients_of_vars_not_being_projected_error(self): "Please set do_integer_arithmetic=" "False, increase integer_tolerance, or make your data integer.", fme.apply_to, - m, - vars_to_eliminate=m.x, - do_integer_arithmetic=True) + m, + vars_to_eliminate=m.x, + do_integer_arithmetic=True, + ) def test_integer_arithmetic_non1_coefficients(self): m = ConcreteModel() - m.x = Var(bounds=(0,9)) + m.x = Var(bounds=(0, 9)) m.y = Var(bounds=(-5, 5)) - m.c1 = Constraint(expr=4*m.x + m.y >= 4) - m.c2 = Constraint(expr=m.y >= 2*m.x) + m.c1 = Constraint(expr=4 * m.x + m.y >= 4) + m.c2 = Constraint(expr=m.y >= 2 * m.x) fme = TransformationFactory('contrib.fourier_motzkin_elimination') - - fme.apply_to( m, vars_to_eliminate=m.x, - constraint_filtering_callback=None, - do_integer_arithmetic=True, verbose=True) + + fme.apply_to( + m, + vars_to_eliminate=m.x, + constraint_filtering_callback=None, + do_integer_arithmetic=True, + verbose=True, + ) constraints = m._pyomo_contrib_fme_transformation.projected_constraints @@ -778,13 +778,12 @@ def test_numerical_instability_almost_canceling(self): m.x0 = Var() m.y = Var() - m.cons1 = Constraint(expr=(1.342 + 2.371e-8)*m.x0 <= m.x + 17*m.y) - m.cons2 = Constraint(expr=(17.56 + 3.2e-7)*m.x0 >= m.y) - + m.cons1 = Constraint(expr=(1.342 + 2.371e-8) * m.x0 <= m.x + 17 * m.y) + m.cons2 = Constraint(expr=(17.56 + 3.2e-7) * m.x0 >= m.y) + fme = TransformationFactory('contrib.fourier_motzkin_elimination') - - fme.apply_to(m, vars_to_eliminate=[m.x0], verbose=True, - zero_tolerance=1e-9) + + fme.apply_to(m, vars_to_eliminate=[m.x0], verbose=True, zero_tolerance=1e-9) constraints = m._pyomo_contrib_fme_transformation.projected_constraints @@ -794,7 +793,7 @@ def test_numerical_instability_almost_canceling(self): useful = constraints[1] repn = generate_standard_repn(useful.body) self.assertTrue(repn.is_linear()) - self.assertEqual(len(repn.linear_coefs), 2) # this is the real test + self.assertEqual(len(repn.linear_coefs), 2) # this is the real test self.assertEqual(useful.lower, 0) self.assertIs(repn.linear_vars[0], m.x) self.assertAlmostEqual(repn.linear_coefs[0], 0.7451564696962295) @@ -812,27 +811,26 @@ def test_numerical_instability_early_elimination(self): m.x = Var() m.x0 = Var() m.y = Var() - + # we'll pretend that the 1.123e-9 is noise from previous calculations - m.cons1 = Constraint(expr=0 <= (4.27 + 1.123e-9)*m.x + 13*m.y - m.x0) - m.cons2 = Constraint(expr=m.x0 >= 12*m.y + 4.27*m.x) + m.cons1 = Constraint(expr=0 <= (4.27 + 1.123e-9) * m.x + 13 * m.y - m.x0) + m.cons2 = Constraint(expr=m.x0 >= 12 * m.y + 4.27 * m.x) fme = TransformationFactory('contrib.fourier_motzkin_elimination') - + # doing my own clones because I want assertIs tests first = m.clone() second = m.clone() third = m.clone() fme.apply_to(first, vars_to_eliminate=[first.x0], zero_tolerance=1e-10) - constraints = first._pyomo_contrib_fme_transformation.\ - projected_constraints + constraints = first._pyomo_contrib_fme_transformation.projected_constraints cons = constraints[1] self.assertEqual(cons.lower, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_coefs), 2) # x is still around + self.assertEqual(len(repn.linear_coefs), 2) # x is still around self.assertIs(repn.linear_vars[0], first.x) self.assertAlmostEqual(repn.linear_coefs[0], 1.123e-9) self.assertIs(repn.linear_vars[1], first.y) @@ -842,24 +840,27 @@ def test_numerical_instability_early_elimination(self): # so just to drive home the point, this results in no constraints: # (Though also note that that only happens if x0 is the first to be # projected out) - fme.apply_to(second, vars_to_eliminate=[second.x0, second.x], - zero_tolerance=1e-10) - self.assertEqual(len(second._pyomo_contrib_fme_transformation.\ - projected_constraints), 0) - + fme.apply_to( + second, vars_to_eliminate=[second.x0, second.x], zero_tolerance=1e-10 + ) + self.assertEqual( + len(second._pyomo_contrib_fme_transformation.projected_constraints), 0 + ) + # but in this version, we assume that x is already gone... - fme.apply_to(third, vars_to_eliminate=[third.x0], verbose=True, - zero_tolerance=1e-8) - constraints = third._pyomo_contrib_fme_transformation.\ - projected_constraints + fme.apply_to( + third, vars_to_eliminate=[third.x0], verbose=True, zero_tolerance=1e-8 + ) + constraints = third._pyomo_contrib_fme_transformation.projected_constraints cons = constraints[1] self.assertEqual(cons.lower, 0) self.assertIs(cons.body, third.y) self.assertIsNone(cons.upper) # and this is exactly the same as the above: - fme.apply_to(m, vars_to_eliminate=[m.x0, m.x], verbose=True, - zero_tolerance=1e-8) + fme.apply_to( + m, vars_to_eliminate=[m.x0, m.x], verbose=True, zero_tolerance=1e-8 + ) constraints = m._pyomo_contrib_fme_transformation.projected_constraints cons = constraints[1] self.assertEqual(cons.lower, 0) @@ -892,8 +893,7 @@ def test_use_all_var_bounds(self): fme = TransformationFactory('contrib.fourier_motzkin_elimination') fme.apply_to(m.b, vars_to_eliminate=[m.y]) - constraints = m.b.\ - _pyomo_contrib_fme_transformation.projected_constraints + constraints = m.b._pyomo_contrib_fme_transformation.projected_constraints # if we hadn't included y's bounds, then we wouldn't get any constraints # and y wouldn't be eliminated. If we do include y's bounds, we get new @@ -903,13 +903,15 @@ def test_use_all_var_bounds(self): def test_projected_constraints_named_correctly(self): m = self.make_tiny_model_where_bounds_matter() fme = TransformationFactory('contrib.fourier_motzkin_elimination') - fme.apply_to(m.b, vars_to_eliminate=[m.y], - projected_constraints_name='fme_constraints') + fme.apply_to( + m.b, vars_to_eliminate=[m.y], projected_constraints_name='fme_constraints' + ) self.assertIsInstance(m.b.component("fme_constraints"), Constraint) self.check_tiny_model_constraints(m.b.fme_constraints) - self.assertIsNone(m.b._pyomo_contrib_fme_transformation.component( - "projected_constraints")) + self.assertIsNone( + m.b._pyomo_contrib_fme_transformation.component("projected_constraints") + ) def test_non_unique_constraint_name_error(self): m = self.make_tiny_model_where_bounds_matter() @@ -918,21 +920,22 @@ def test_non_unique_constraint_name_error(self): RuntimeError, "projected_constraints_name was specified " "as 'c', but this is already a component on " - "the instance! Please specify a unique " + "the instance! Please specify a unique " "name.", fme.apply_to, - m.b, + m.b, vars_to_eliminate=[m.y], - projected_constraints_name='c') + projected_constraints_name='c', + ) def test_simple_hull_example(self): m = ConcreteModel() - m.x0 = Var(bounds=(0,3)) - m.x1 = Var(bounds=(0,3)) - m.x = Var(bounds=(0,3)) + m.x0 = Var(bounds=(0, 3)) + m.x1 = Var(bounds=(0, 3)) + m.x = Var(bounds=(0, 3)) m.disaggregation = Constraint(expr=m.x == m.x0 + m.x1) m.y = Var(domain=Binary) - m.cons = Constraint(expr=2*m.y <= m.x1) + m.cons = Constraint(expr=2 * m.y <= m.x1) fme = TransformationFactory('contrib.fourier_motzkin_elimination') fme.apply_to(m, vars_to_eliminate=[m.x0, m.x1]) diff --git a/pyomo/contrib/gdp_bounds/compute_bounds.py b/pyomo/contrib/gdp_bounds/compute_bounds.py index a60dcd7f736..f4f046e79df 100644 --- a/pyomo/contrib/gdp_bounds/compute_bounds.py +++ b/pyomo/contrib/gdp_bounds/compute_bounds.py @@ -24,9 +24,8 @@ from pyomo.common.errors import InfeasibleConstraintException from pyomo.contrib.fbbt.fbbt import fbbt, BoundsManager from pyomo.core.base.block import Block, TraversalStrategy -from pyomo.core.expr.current import identify_variables -from pyomo.core import (Constraint, Objective, - TransformationFactory, minimize, value) +from pyomo.core.expr import identify_variables +from pyomo.core import Constraint, Objective, TransformationFactory, minimize, value from pyomo.opt import SolverFactory from pyomo.gdp.disjunct import Disjunct from pyomo.core.plugins.transform.hierarchy import Transformation @@ -35,17 +34,24 @@ linear_degrees = {0, 1} inf = float('inf') + def disjunctive_obbt(model, solver): """Provides Optimality-based bounds tightening to a model using a solver.""" - model._disjuncts_to_process = list(model.component_data_objects( - ctype=Disjunct, active=True, descend_into=(Block, Disjunct), - descent_order=TraversalStrategy.BreadthFirstSearch)) + model._disjuncts_to_process = list( + model.component_data_objects( + ctype=Disjunct, + active=True, + descend_into=(Block, Disjunct), + descent_order=TraversalStrategy.BreadthFirstSearch, + ) + ) if model.ctype == Disjunct: model._disjuncts_to_process.insert(0, model) linear_var_set = ComponentSet() for constr in model.component_data_objects( - Constraint, active=True, descend_into=(Block, Disjunct)): + Constraint, active=True, descend_into=(Block, Disjunct) + ): if constr.body.polynomial_degree() in linear_degrees: linear_var_set.update(identify_variables(constr.body, include_fixed=False)) model._disj_bnds_linear_vars = list(linear_var_set) @@ -62,7 +68,10 @@ def disjunctive_obbt(model, solver): for var, new_bnds in var_bnds.items(): old_lb, old_ub = disjunct._disj_var_bounds.get(var, (-inf, inf)) new_lb, new_ub = new_bnds - disjunct._disj_var_bounds[var] = (max(old_lb, new_lb), min(old_ub, new_ub)) + disjunct._disj_var_bounds[var] = ( + max(old_lb, new_lb), + min(old_ub, new_ub), + ) else: disjunct.deactivate() # prune disjunct @@ -79,7 +88,8 @@ def obbt_disjunct(orig_model, idx, solver): # Deactivate nonlinear constraints for constr in model.component_data_objects( - Constraint, active=True, descend_into=(Block, Disjunct)): + Constraint, active=True, descend_into=(Block, Disjunct) + ): if constr.body.polynomial_degree() not in linear_degrees: constr.deactivate() @@ -109,12 +119,19 @@ def obbt_disjunct(orig_model, idx, solver): # Maps original variable --> (new computed LB, new computed UB) var_bnds = ComponentMap( - ((orig_var, ( - clone_var.lb if clone_var.has_lb() else -inf, - clone_var.ub if clone_var.has_ub() else inf)) - for orig_var, clone_var in zip( - orig_model._disj_bnds_linear_vars, model._disj_bnds_linear_vars) - if clone_var in relevant_var_set) + ( + ( + orig_var, + ( + clone_var.lb if clone_var.has_lb() else -inf, + clone_var.ub if clone_var.has_ub() else inf, + ), + ) + for orig_var, clone_var in zip( + orig_model._disj_bnds_linear_vars, model._disj_bnds_linear_vars + ) + if clone_var in relevant_var_set + ) ) return var_bnds @@ -129,8 +146,8 @@ def solve_bounding_problem(model, solver): return -inf else: raise NotImplementedError( - "Unhandled termination condition: %s" - % results.solver.termination_condition) + "Unhandled termination condition: %s" % results.solver.termination_condition + ) def disjunctive_fbbt(model): @@ -165,8 +182,10 @@ def fbbt_disjunct(disj, parent_bounds): fbbt_disjunct(disj, new_bnds) -@TransformationFactory.register('contrib.compute_disj_var_bounds', - doc="Compute disjunctive bounds in a given model.") +@TransformationFactory.register( + 'contrib.compute_disj_var_bounds', + doc="Compute disjunctive bounds in a given model.", +) class ComputeDisjunctiveVarBounds(Transformation): """Compute disjunctive bounds in a given model. diff --git a/pyomo/contrib/gdp_bounds/info.py b/pyomo/contrib/gdp_bounds/info.py index b0d78d7992a..bad76e0f2f7 100644 --- a/pyomo/contrib/gdp_bounds/info.py +++ b/pyomo/contrib/gdp_bounds/info.py @@ -36,14 +36,13 @@ def disjunctive_bound(var, scope): # Initialize to the global variable bound var_bnd = ( value(var.lb) if var.has_lb() else -inf, - value(var.ub) if var.has_ub() else inf) + value(var.ub) if var.has_ub() else inf, + ) possible_disjunct = scope while possible_disjunct is not None: try: disj_bnd = possible_disjunct._disj_var_bounds.get(var, (-inf, inf)) - disj_bnd = ( - max(var_bnd[0], disj_bnd[0]), - min(var_bnd[1], disj_bnd[1])) + disj_bnd = (max(var_bnd[0], disj_bnd[0]), min(var_bnd[1], disj_bnd[1])) return disj_bnd except AttributeError: # possible disjunct does not have attribute '_disj_var_bounds'. diff --git a/pyomo/contrib/gdp_bounds/plugins.py b/pyomo/contrib/gdp_bounds/plugins.py index ce5853a48d4..1ebe44378f0 100644 --- a/pyomo/contrib/gdp_bounds/plugins.py +++ b/pyomo/contrib/gdp_bounds/plugins.py @@ -9,5 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.contrib.gdp_bounds.compute_bounds diff --git a/pyomo/contrib/gdp_bounds/tests/test_gdp_bounds.py b/pyomo/contrib/gdp_bounds/tests/test_gdp_bounds.py index 74f7faa78c1..a5f7780f043 100644 --- a/pyomo/contrib/gdp_bounds/tests/test_gdp_bounds.py +++ b/pyomo/contrib/gdp_bounds/tests/test_gdp_bounds.py @@ -1,9 +1,13 @@ """Tests explicit bound to variable bound transformation module.""" import pyomo.common.unittest as unittest -from pyomo.contrib.gdp_bounds.info import ( - disjunctive_lb, disjunctive_ub) -from pyomo.environ import (ConcreteModel, Constraint, Objective, - TransformationFactory, Var) +from pyomo.contrib.gdp_bounds.info import disjunctive_lb, disjunctive_ub +from pyomo.environ import ( + ConcreteModel, + Constraint, + Objective, + TransformationFactory, + Var, +) from pyomo.gdp import Disjunct, Disjunction from pyomo.opt import check_available_solvers @@ -24,7 +28,9 @@ def test_compute_bounds_obbt(self): m.d2.c = Constraint(expr=m.x <= 4) m.disj = Disjunction(expr=[m.d1, m.d2]) m.obj = Objective(expr=m.x) - TransformationFactory('contrib.compute_disj_var_bounds').apply_to(m, solver='cbc') + TransformationFactory('contrib.compute_disj_var_bounds').apply_to( + m, solver='cbc' + ) self.assertEqual(m.d1._disj_var_bounds[m.x], (2, 8)) self.assertEqual(m.d2._disj_var_bounds[m.x], (0, 4)) self.assertEqual(disjunctive_lb(m.x, m.d1), 2) @@ -43,7 +49,9 @@ def test_compute_bounds_obbt_prune_disjunct(self): m.d2.c = Constraint(expr=m.x + 3 == 0) m.disj = Disjunction(expr=[m.d1, m.d2]) m.obj = Objective(expr=m.x) - TransformationFactory('contrib.compute_disj_var_bounds').apply_to(m, solver='cbc') + TransformationFactory('contrib.compute_disj_var_bounds').apply_to( + m, solver='cbc' + ) self.assertFalse(m.d1.active) self.assertEqual(m.d1.binary_indicator_var.value, 0) self.assertTrue(m.d1.indicator_var.fixed) diff --git a/pyomo/contrib/gdpopt/GDPopt.py b/pyomo/contrib/gdpopt/GDPopt.py index dff709557f5..3d45fa504cb 100644 --- a/pyomo/contrib/gdpopt/GDPopt.py +++ b/pyomo/contrib/gdpopt/GDPopt.py @@ -44,13 +44,16 @@ - start keeping basic changelog """ -from pyomo.common.config import ( - add_docstring_list, ConfigDict) +from pyomo.common.config import document_kwargs_from_configdict, ConfigDict from pyomo.contrib.gdpopt import __version__ from pyomo.contrib.gdpopt.config_options import ( - _add_common_configs, _supported_algorithms, _get_algorithm_config) + _add_common_configs, + _supported_algorithms, + _get_algorithm_config, +) from pyomo.opt.base import SolverFactory + def _handle_strategy_deprecation(config): # This method won't be needed when the strategy arg is removed, but for now, # we need to copy it over as algorithm. The config system already gave the @@ -58,10 +61,12 @@ def _handle_strategy_deprecation(config): if config.algorithm is None and config.strategy is not None: config.algorithm = config.strategy + @SolverFactory.register( 'gdpopt', doc='The GDPopt decomposition-based ' - 'Generalized Disjunctive Programming (GDP) solver') + 'Generalized Disjunctive Programming (GDP) solver', +) class GDPoptSolver(object): """Decomposition solver for Generalized Disjunctive Programming (GDP) problems. @@ -94,9 +99,11 @@ class GDPoptSolver(object): - Logic-to-linear transformation: Romeo Valentin """ + CONFIG = ConfigDict("GDPopt") _add_common_configs(CONFIG) + @document_kwargs_from_configdict(CONFIG) def solve(self, model, **kwds): """Solve the model. @@ -104,7 +111,7 @@ def solve(self, model, **kwds): model (Block): a Pyomo model or block to be solved """ - # The algorithm should have been specifed as an argument to the solve + # The algorithm should have been specified as an argument to the solve # method. We will instantiate an ephemeral instance of the correct # solver and call its solve method. options = kwds.pop('options', {}) @@ -122,7 +129,8 @@ def solve(self, model, **kwds): raise ValueError( "No algorithm was specified to the solve method. " "Please specify an algorithm or use an " - "algorithm-specific solver.") + "algorithm-specific solver." + ) # get rid of 'algorithm' and 'strategy' if they exist so that the solver # can validate. @@ -130,8 +138,7 @@ def solve(self, model, **kwds): kwds.pop('strategy', None) # The algorithm has already been validated, so this will work. - return SolverFactory( - _supported_algorithms[algorithm][0]).solve(model, **kwds) + return SolverFactory(_supported_algorithms[algorithm][0]).solve(model, **kwds) # Support use as a context manager under current solver API def __enter__(self): @@ -154,6 +161,3 @@ def version(self): return __version__ _metasolver = False - -GDPoptSolver.solve.__doc__ = add_docstring_list( - GDPoptSolver.solve.__doc__, GDPoptSolver.CONFIG, indent_by=8) diff --git a/pyomo/contrib/gdpopt/algorithm_base_class.py b/pyomo/contrib/gdpopt/algorithm_base_class.py index ac468e3432a..5bf41148700 100644 --- a/pyomo/contrib/gdpopt/algorithm_base_class.py +++ b/pyomo/contrib/gdpopt/algorithm_base_class.py @@ -17,19 +17,26 @@ from pyomo.common.modeling import unique_component_name from pyomo.contrib.gdpopt.config_options import _add_common_configs from pyomo.contrib.gdpopt.create_oa_subproblems import ( - add_util_block, add_disjunct_list, add_boolean_variable_lists, - add_algebraic_variable_list) + add_util_block, + add_disjunct_list, + add_boolean_variable_lists, + add_algebraic_variable_list, +) from pyomo.contrib.gdpopt import __version__ from pyomo.contrib.gdpopt.util import ( - get_main_elapsed_time, lower_logger_level_to, - solve_continuous_problem, time_code) + get_main_elapsed_time, + lower_logger_level_to, + solve_continuous_problem, + time_code, +) from pyomo.core.base import Objective, value, minimize, maximize from pyomo.core.staleflag import StaleFlagManager from pyomo.opt import SolverResults from pyomo.opt import TerminationCondition as tc from pyomo.util.model_size import build_model_size_report -class _GDPoptAlgorithm(): + +class _GDPoptAlgorithm: CONFIG = ConfigBlock("GDPopt") _add_common_configs(CONFIG) @@ -39,15 +46,13 @@ def __init__(self, **kwds): correctly set up the config arguments and initialize the generic parts of the algorithm state. """ - self.config = self.CONFIG(kwds.pop('options', {}), - preserve_implicit=True) + self.config = self.CONFIG(kwds.pop('options', {}), preserve_implicit=True) self.config.set_value(kwds) # We store bounds, timing info, iteration count, incumbent, and the # expression of the original (possibly nonlinear) objective function. self.LB = float('-inf') self.UB = float('inf') - self.unbounded = False self.timing = Bunch() self.initialization_iteration = 0 self.iteration = 0 @@ -56,10 +61,12 @@ def __init__(self, **kwds): self.incumbent_continuous_soln = None self.original_obj = None + self._dummy_obj = None self.original_util_block = None - self.log_formatter = ('{:>9} {:>15} {:>11.5f} {:>11.5f} ' - '{:>8.2%} {:>7.2f} {}') + self.log_formatter = ( + '{:>9} {:>15} {:>11.5f} {:>11.5f} {:>8.2%} {:>7.2f} {}' + ) # Support use as a context manager under current solver API def __enter__(self): @@ -86,8 +93,10 @@ def version(self): def solve(self, model, **kwds): """Solve the model. - Args: - model (Block): a Pyomo model or block to be solved + Parameters + ---------- + model : Block + the Pyomo model or block to be solved """ # I'm going to be nice for now and intercept with a more informative @@ -96,14 +105,15 @@ def solve(self, model, **kwds): if alg is None: alg = kwds.pop('strategy', None) if alg is not None: - raise ValueError("Changing the algorithm in the solve method " - "is not supported for algorithm-specific " - "GDPopt solvers. Either use " - "SolverFactory('gdpopt') or instantiate a " - "solver with the algorithm you want to use.") - - config = self.config(kwds.pop('options', {}), - preserve_implicit=True) + raise ValueError( + "Changing the algorithm in the solve method " + "is not supported for algorithm-specific " + "GDPopt solvers. Either use " + "SolverFactory('gdpopt') or instantiate a " + "solver with the algorithm you want to use." + ) + + config = self.config(kwds.pop('options', {}), preserve_implicit=True) config.set_value(kwds) with lower_logger_level_to(config.logger, tee=config.tee): @@ -112,7 +122,8 @@ def solve(self, model, **kwds): try: with time_code(self.timing, 'total', is_main_timer=True): results = self._gather_problem_info_and_solve_non_gdps( - model, config) + model, config + ) # If it wasn't disjunctive, we solved it if not results: # main loop implemented by each algorithm @@ -125,17 +136,24 @@ def solve(self, model, **kwds): config.logger.warning( "09/06/22: The GDPopt LBB algorithm currently has " "known issues. Please use the results with caution " - "and report any bugs!") - if (self.pyomo_results.solver.termination_condition not in - {tc.infeasible, tc.unbounded}): + "and report any bugs!" + ) + if self.pyomo_results.solver.termination_condition not in { + tc.infeasible, + tc.unbounded, + }: self._transfer_incumbent_to_original_model(config.logger) self._delete_original_model_util_block() return self.pyomo_results def _solve_gdp(self, original_model, config): # To be implemented by the algorithms - raise NotImplementedError("Derived _GDPoptAlgorithms need to " - "implement the _solve_gdp method.") + raise NotImplementedError( + "Derived _GDPoptAlgorithms need to implement the _solve_gdp method." + ) + + def _log_citation(self, config): + pass def _log_solver_intro_message(self, config): config.logger.info( @@ -146,24 +164,34 @@ def _log_solver_intro_message(self, config): config.display(ostream=os) config.logger.info(os.getvalue()) - config.logger.info(""" - If you use this software, you may cite the following: - - Implementation: - Chen, Q; Johnson, ES; Bernal, DE; Valentin, R; Kale, S; - Bates, J; Siirola, JD; Grossmann, IE. - Pyomo.GDP: an ecosystem for logic based modeling and optimization - development. - Optimization and Engineering, 2021. - """.strip()) + config.logger.info( + """ + If you use this software, you may cite the following: + - Implementation: + Chen, Q; Johnson, ES; Bernal, DE; Valentin, R; Kale, S; + Bates, J; Siirola, JD; Grossmann, IE. + Pyomo.GDP: an ecosystem for logic based modeling and optimization + development. + Optimization and Engineering, 2021. + """.strip() + ) + self._log_citation(config) def _log_header(self, logger): logger.info( '=================================================================' - '============================') + '============================' + ) logger.info( '{:^9} | {:^15} | {:^11} | {:^11} | {:^8} | {:^7}\n'.format( - 'Iteration', 'Subproblem Type', 'Lower Bound', 'Upper Bound', - ' Gap ', 'Time(s)')) + 'Iteration', + 'Subproblem Type', + 'Lower Bound', + 'Upper Bound', + ' Gap ', + 'Time(s)', + ) + ) @property def objective_sense(self): @@ -186,9 +214,11 @@ def _gather_problem_info_and_solve_non_gdps(self, model, config): # Check if this problem actually has any discrete decisions. If not, # just solve it. problem = self.pyomo_results.problem - if (problem.number_of_binary_variables == 0 and - problem.number_of_integer_variables == 0 and - problem.number_of_disjunctions == 0): + if ( + problem.number_of_binary_variables == 0 + and problem.number_of_integer_variables == 0 + and problem.number_of_disjunctions == 0 + ): cont_results = solve_continuous_problem(model, config) self.LB = cont_results.problem.lower_bound self.UB = cont_results.problem.upper_bound @@ -209,8 +239,9 @@ def _gather_problem_info_and_solve_non_gdps(self, model, config): # To transfer solutions between cloned models add_algebraic_variable_list(util_block) - def _update_bounds_after_solve(self, subprob_nm, primal=None, dual=None, - logger=None): + def _update_bounds_after_solve( + self, subprob_nm, primal=None, dual=None, logger=None + ): primal_improved = self._update_bounds(primal, dual) if logger is not None: self._log_current_state(logger, subprob_nm, primal_improved) @@ -254,23 +285,33 @@ def relative_gap(self): some point the primal bound changes signs. """ absolute_gap = abs(self.UB - self.LB) - return absolute_gap/(abs(self.primal_bound() + 1e-10)) + return absolute_gap / (abs(self.primal_bound() + 1e-10)) - def _log_current_state(self, logger, subproblem_type, - primal_improved=False): + def _log_current_state(self, logger, subproblem_type, primal_improved=False): star = "*" if primal_improved else "" - logger.info(self.log_formatter.format( - self.iteration, subproblem_type, self.LB, - self.UB, self.relative_gap(), - get_main_elapsed_time(self.timing), star)) + logger.info( + self.log_formatter.format( + self.iteration, + subproblem_type, + self.LB, + self.UB, + self.relative_gap(), + get_main_elapsed_time(self.timing), + star, + ) + ) def _log_termination_message(self, logger): logger.info( '\nSolved in {} iterations and {:.5f} seconds\n' 'Optimal objective value {:.10f}\n' 'Relative optimality gap {:.5%}'.format( - self.iteration, get_main_elapsed_time(self.timing), - self.primal_bound(), self.relative_gap())) + self.iteration, + get_main_elapsed_time(self.timing), + self.primal_bound(), + self.relative_gap(), + ) + ) def primal_bound(self): if self.objective_sense is minimize: @@ -279,16 +320,20 @@ def primal_bound(self): return self.LB def update_incumbent(self, util_block): - self.incumbent_continuous_soln = [v.value for v in - util_block.algebraic_variable_list] + self.incumbent_continuous_soln = [ + v.value for v in util_block.algebraic_variable_list + ] self.incumbent_boolean_soln = [ - v.value for v in util_block.transformed_boolean_variable_list] + v.value for v in util_block.transformed_boolean_variable_list + ] - def _update_bounds_after_discrete_problem_solve(self, mip_termination, - obj_expr, logger): + def _update_bounds_after_discrete_problem_solve( + self, mip_termination, obj_expr, logger + ): if mip_termination is tc.optimal: - self._update_bounds_after_solve('discrete', dual=value(obj_expr), - logger=logger) + self._update_bounds_after_solve( + 'discrete', dual=value(obj_expr), logger=logger + ) elif mip_termination is tc.infeasible: # Discrete problem was infeasible. self._update_dual_bound_to_infeasible() @@ -299,8 +344,10 @@ def _update_bounds_after_discrete_problem_solve(self, mip_termination, # have any info in terms of a dual bound.) pass else: - raise DeveloperError("Unrecognized termination condition %s when " - "updating the dual bound." % mip_termination) + raise DeveloperError( + "Unrecognized termination condition %s when " + "updating the dual bound." % mip_termination + ) def _update_dual_bound_to_infeasible(self): # set optimistic bound to infinity @@ -309,35 +356,34 @@ def _update_dual_bound_to_infeasible(self): else: self._update_bounds(dual=float('-inf')) - def _update_primal_bound_to_unbounded(self): + def _update_primal_bound_to_unbounded(self, config): if self.objective_sense == minimize: self._update_bounds(primal=float('-inf')) else: self._update_bounds(primal=float('inf')) - self.unbounded = True + config.logger.info('GDPopt exiting--GDP is unbounded.') + self.pyomo_results.solver.termination_condition = tc.unbounded + + def _load_infeasible_termination_status(self, config): + config.logger.info('GDPopt exiting--problem is infeasible.') + self.pyomo_results.solver.termination_condition = tc.infeasible def bounds_converged(self, config): - if self.unbounded: - config.logger.info('GDPopt exiting--GDP is unbounded.') - self.pyomo_results.solver.termination_condition = tc.unbounded + if self.pyomo_results.solver.termination_condition == tc.unbounded: return True elif self.LB + config.bound_tolerance >= self.UB: if self.LB == float('inf') and self.UB == float('inf'): - config.logger.info('GDPopt exiting--problem is infeasible.') - self.pyomo_results.solver.termination_condition = tc.infeasible + self._load_infeasible_termination_status(config) elif self.LB == float('-inf') and self.UB == float('-inf'): - config.logger.info('GDPopt exiting--problem is infeasible.') - self.pyomo_results.solver.termination_condition = tc.infeasible + self._load_infeasible_termination_status(config) else: # if they've crossed, then the gap is actually 0: Update the # dual (discrete problem) bound to be equal to the primal # (subproblem) bound if self.LB + config.bound_tolerance > self.UB: - self._update_bounds(dual=self.primal_bound(), - force_update=True) + self._update_bounds(dual=self.primal_bound(), force_update=True) self._log_current_state(config.logger, '') - config.logger.info( - 'GDPopt exiting--bounds have converged or crossed.') + config.logger.info('GDPopt exiting--bounds have converged or crossed.') self.pyomo_results.solver.termination_condition = tc.optimal return True @@ -347,7 +393,8 @@ def reached_iteration_limit(self, config): if config.iterlim is not None and self.iteration >= config.iterlim: config.logger.info( 'GDPopt unable to converge bounds within iteration limit of ' - '{} iterations.'.format(config.iterlim)) + '{} iterations.'.format(config.iterlim) + ) self.pyomo_results.solver.termination_condition = tc.maxIterations return True return False @@ -357,25 +404,26 @@ def reached_time_limit(self, config): if config.time_limit is not None and elapsed >= config.time_limit: config.logger.info( 'GDPopt exiting--Did not converge bounds ' - 'before time limit of {} seconds. '.format(config.time_limit)) + 'before time limit of {} seconds. '.format(config.time_limit) + ) self.pyomo_results.solver.termination_condition = tc.maxTimeLimit return True return False def any_termination_criterion_met(self, config): - return (self.bounds_converged(config) or - self.reached_iteration_limit(config) or - self.reached_time_limit(config)) + return ( + self.bounds_converged(config) + or self.reached_iteration_limit(config) + or self.reached_time_limit(config) + ) - def _create_pyomo_results_object_with_problem_info(self, original_model, - config): + def _create_pyomo_results_object_with_problem_info(self, original_model, config): """ Initialize a results object with results.problem information """ results = self.pyomo_results = SolverResults() - results.solver.name = 'GDPopt %s - %s' % (self.version(), - self.algorithm) + results.solver.name = 'GDPopt %s - %s' % (self.version(), self.algorithm) prob = results.problem prob.name = original_model.name @@ -388,34 +436,40 @@ def _create_pyomo_results_object_with_problem_info(self, original_model, prob.number_of_disjunctions = num_of.activated.disjunctions prob.number_of_variables = num_of.activated.variables prob.number_of_binary_variables = num_of.activated.binary_variables - prob.number_of_continuous_variables = num_of.activated.\ - continuous_variables + prob.number_of_continuous_variables = num_of.activated.continuous_variables prob.number_of_integer_variables = num_of.activated.integer_variables config.logger.info( "Original model has %s constraints (%s nonlinear) " "and %s disjunctions, " "with %s variables, of which %s are binary, %s are integer, " - "and %s are continuous." % - (num_of.activated.constraints, - num_of.activated.nonlinear_constraints, - num_of.activated.disjunctions, - num_of.activated.variables, - num_of.activated.binary_variables, - num_of.activated.integer_variables, - num_of.activated.continuous_variables)) + "and %s are continuous." + % ( + num_of.activated.constraints, + num_of.activated.nonlinear_constraints, + num_of.activated.disjunctions, + num_of.activated.variables, + num_of.activated.binary_variables, + num_of.activated.integer_variables, + num_of.activated.continuous_variables, + ) + ) # Handle missing or multiple objectives, and get sense - active_objectives = list(original_model.component_data_objects( - ctype=Objective, active=True, descend_into=True)) + active_objectives = list( + original_model.component_data_objects( + ctype=Objective, active=True, descend_into=True + ) + ) number_of_objectives = len(active_objectives) if number_of_objectives == 0: config.logger.warning( - 'Model has no active objectives. Adding dummy objective.') - discrete_obj = Objective(expr=1) - original_model.add_component(unique_component_name(original_model, - 'dummy_obj'), - discrete_obj) + 'Model has no active objectives. Adding dummy objective.' + ) + self._dummy_obj = discrete_obj = Objective(expr=1) + original_model.add_component( + unique_component_name(original_model, 'dummy_obj'), discrete_obj + ) elif number_of_objectives > 1: raise ValueError('Model has multiple active objectives.') else: @@ -431,12 +485,14 @@ def _transfer_incumbent_to_original_model(self, logger): # we don't have a solution to transfer logger.info("No feasible solutions found.") return - for var, soln in zip(self.original_util_block.algebraic_variable_list, - self.incumbent_continuous_soln): + for var, soln in zip( + self.original_util_block.algebraic_variable_list, + self.incumbent_continuous_soln, + ): var.set_value(soln, skip_validation=True) for var, soln in zip( - self.original_util_block.boolean_variable_list, - self.incumbent_boolean_soln): + self.original_util_block.boolean_variable_list, self.incumbent_boolean_soln + ): if soln is None: var.set_value(soln, skip_validation=True) elif soln > 0.5: @@ -456,6 +512,9 @@ def _delete_original_model_util_block(self): # prior one. if self.original_obj is not None: self.original_obj.activate() + if self._dummy_obj is not None: + self._dummy_obj.parent_block().del_component(self._dummy_obj) + self._dummy_obj = None def _get_final_pyomo_results_object(self): """ diff --git a/pyomo/contrib/gdpopt/branch_and_bound.py b/pyomo/contrib/gdpopt/branch_and_bound.py index b4690206840..f69a92efe16 100644 --- a/pyomo/contrib/gdpopt/branch_and_bound.py +++ b/pyomo/contrib/gdpopt/branch_and_bound.py @@ -14,19 +14,31 @@ import traceback from pyomo.common.collections import ComponentMap -from pyomo.common.config import add_docstring_list +from pyomo.common.config import document_kwargs_from_configdict from pyomo.common.errors import InfeasibleConstraintException from pyomo.contrib.fbbt.fbbt import fbbt from pyomo.contrib.gdpopt.algorithm_base_class import _GDPoptAlgorithm from pyomo.contrib.gdpopt.create_oa_subproblems import ( - add_util_block, add_disjunction_list, add_disjunct_list, - add_algebraic_variable_list, add_boolean_variable_lists, - add_transformed_boolean_variable_list) + add_util_block, + add_disjunction_list, + add_disjunct_list, + add_algebraic_variable_list, + add_boolean_variable_lists, + add_transformed_boolean_variable_list, +) from pyomo.contrib.gdpopt.config_options import ( - _add_nlp_solver_configs, _add_BB_configs, _add_mip_solver_configs, - _add_tolerance_configs) + _add_nlp_solver_configs, + _add_BB_configs, + _add_mip_solver_configs, + _add_tolerance_configs, + _add_nlp_solve_configs, +) +from pyomo.contrib.gdpopt.nlp_initialization import restore_vars_to_original_values from pyomo.contrib.gdpopt.util import ( - copy_var_list_values, SuppressInfeasibleWarning, get_main_elapsed_time) + copy_var_list_values, + SuppressInfeasibleWarning, + get_main_elapsed_time, +) from pyomo.contrib.satsolver.satsolver import satisfiable from pyomo.core import minimize, Suffix, Constraint, TransformationFactory from pyomo.opt import SolverFactory, SolverStatus @@ -36,20 +48,25 @@ # Data tuple for each node that also functions as the sort key. # Therefore, ordering of the arguments below matters. -BBNodeData = namedtuple('BBNodeData', [ - 'obj_lb', # lower bound on objective value, sign corrected to minimize - 'obj_ub', # upper bound on objective value, sign corrected to minimize - 'is_screened', # True if the node has been screened; False if not. - 'is_evaluated', # True if node has been evaluated; False if not. - 'num_unbranched_disjunctions', # number of unbranched disjunctions - 'node_count', # cumulative node counter - 'unbranched_disjunction_indices', # list of unbranched disjunction indices -]) +BBNodeData = namedtuple( + 'BBNodeData', + [ + 'obj_lb', # lower bound on objective value, sign corrected to minimize + 'obj_ub', # upper bound on objective value, sign corrected to minimize + 'is_screened', # True if the node has been screened; False if not. + 'is_evaluated', # True if node has been evaluated; False if not. + 'num_unbranched_disjunctions', # number of unbranched disjunctions + 'node_count', # cumulative node counter + 'unbranched_disjunction_indices', # list of unbranched disjunction indices + ], +) + @SolverFactory.register( 'gdpopt.lbb', doc="The LBB (logic-based branch and bound) Generalized Disjunctive " - "Programming (GDP) solver") + "Programming (GDP) solver", +) class GDP_LBB_Solver(_GDPoptAlgorithm): """The GDPopt (Generalized Disjunctive Programming optimizer) logic-based branch and bound (LBB) solver. @@ -57,21 +74,33 @@ class GDP_LBB_Solver(_GDPoptAlgorithm): Accepts models that can include nonlinear, continuous variables and constraints, as well as logical conditions. """ + CONFIG = _GDPoptAlgorithm.CONFIG() _add_mip_solver_configs(CONFIG) _add_nlp_solver_configs(CONFIG, default_solver='ipopt') + _add_nlp_solve_configs( + CONFIG, default_nlp_init_method=restore_vars_to_original_values + ) _add_tolerance_configs(CONFIG) _add_BB_configs(CONFIG) algorithm = 'LBB' + # Override solve() to customize the docstring for this solver + @document_kwargs_from_configdict(CONFIG, doc=_GDPoptAlgorithm.solve.__doc__) + def solve(self, model, **kwds): + return super().solve(model, **kwds) + def _log_citation(self, config): - config.logger.info("\n" + """- LBB algorithm: + config.logger.info( + "\n" + + """- LBB algorithm: Lee, S; Grossmann, IE. New algorithms for nonlinear generalized disjunctive programming. Comp. and Chem. Eng. 2000, 24, 2125-2141. DOI: 10.1016/S0098-1354(00)00581-0. - """.strip()) + """.strip() + ) def _solve_gdp(self, model, config): self.explored_nodes = 0 @@ -84,12 +113,12 @@ def _solve_gdp(self, model, config): add_boolean_variable_lists(util_block) root_node = TransformationFactory( - 'core.logical_to_linear').create_using(model) - root_util_blk = root_node.component( - self.original_util_block.name) + 'contrib.logical_to_disjunctive' + ).create_using(model) + root_util_blk = root_node.component(self.original_util_block.name) # Add to root utility block what we will need during the algorithm add_disjunction_list(root_util_blk) - # Now that logical_to_linear has been called. + # Now that logical_to_disjunctive has been called. add_transformed_boolean_variable_list(root_util_blk) # Map unfixed disjunct -> list of deactivated constraints @@ -125,9 +154,9 @@ def _solve_gdp(self, model, config): unfixed_disjuncts = [] disjuncts_fixed_True.indicator_var.fix(True) elif disjuncts_fixed_True and disjunction.xor: - assert len(disjuncts_fixed_True) == 1, ("XOR (only one True) " - "violated: %s" % - disjunction.name) + assert len(disjuncts_fixed_True) == 1, ( + "XOR (only one True) violated: %s" % disjunction.name + ) disjuncts_fixed_False.extend(unfixed_disjuncts) unfixed_disjuncts = [] @@ -138,9 +167,12 @@ def _solve_gdp(self, model, config): # Deactivate nonlinear constraints in unfixed disjuncts for disjunct in unfixed_disjuncts: nonlinear_constraints_in_disjunct = [ - constr for constr in disjunct.component_data_objects( - Constraint, active=True) - if constr.body.polynomial_degree() not in _linear_degrees] + constr + for constr in disjunct.component_data_objects( + Constraint, active=True + ) + if constr.body.polynomial_degree() not in _linear_degrees + ] for constraint in nonlinear_constraints_in_disjunct: constraint.deactivate() if nonlinear_constraints_in_disjunct: @@ -148,10 +180,12 @@ def _solve_gdp(self, model, config): # constraints in each disjunction for later branching # purposes root_util_blk.disjunct_to_nonlinear_constraints[ - disjunct] = nonlinear_constraints_in_disjunct + disjunct + ] = nonlinear_constraints_in_disjunct root_util_blk.disjunction_to_unfixed_disjuncts[ - disjunction] = unfixed_disjuncts + disjunction + ] = unfixed_disjuncts pass # Add the BigM suffix if it does not already exist. Used later during @@ -163,8 +197,10 @@ def _solve_gdp(self, model, config): queue = self.bb_queue = [] self.created_nodes = 0 unbranched_disjunction_indices = [ - i for i, disjunction in enumerate(root_util_blk.disjunction_list) - if disjunction in root_util_blk.disjunction_to_unfixed_disjuncts] + i + for i, disjunction in enumerate(root_util_blk.disjunction_list) + if disjunction in root_util_blk.disjunction_to_unfixed_disjuncts + ] sort_tuple = BBNodeData( obj_lb=float('-inf'), obj_ub=float('inf'), @@ -180,67 +216,80 @@ def _solve_gdp(self, model, config): while len(queue) > 0: # visit the top node on the heap node_data, node_model = heappop(queue) - config.logger.info("Nodes: %s LB %.10g Unbranched %s" % ( - self.explored_nodes, node_data.obj_lb, - node_data.num_unbranched_disjunctions)) + config.logger.info( + "Nodes: %s LB %.10g Unbranched %s" + % ( + self.explored_nodes, + node_data.obj_lb, + node_data.num_unbranched_disjunctions, + ) + ) # Check time limit if self.reached_time_limit(config): no_feasible_soln = float('inf') - self.LB = node_data.obj_lb if \ - solve_data.objective_sense == minimize else \ - -no_feasible_soln - self.UB = no_feasible_soln if \ - solve_data.objective_sense == minimize else \ - -node_data.obj_lb + self.LB = ( + node_data.obj_lb + if solve_data.objective_sense == minimize + else -no_feasible_soln + ) + self.UB = ( + no_feasible_soln + if solve_data.objective_sense == minimize + else -node_data.obj_lb + ) config.logger.info( - 'Final bound values: LB: {} UB: {}'. - format(self.LB, self.UB)) + 'Final bound values: LB: {} UB: {}'.format(self.LB, self.UB) + ) return self._get_final_results_object() # Handle current node if not node_data.is_screened: # Node has not been evaluated. self.explored_nodes += 1 - new_node_data = self._prescreen_node(node_data, node_model, - config) + new_node_data = self._prescreen_node(node_data, node_model, config) # replace with updated node data heappush(queue, (new_node_data, node_model)) - elif node_data.obj_lb < node_data.obj_ub - \ - config.bound_tolerance and not node_data.is_evaluated: + elif ( + node_data.obj_lb < node_data.obj_ub - config.bound_tolerance + and not node_data.is_evaluated + ): # Node has not been fully evaluated. # Note: infeasible and unbounded nodes will skip this condition, # because of strict inequality - new_node_data = self._evaluate_node(node_data, node_model, - config) + new_node_data = self._evaluate_node(node_data, node_model, config) # replace with updated node data heappush(queue, (new_node_data, node_model)) - elif node_data.num_unbranched_disjunctions == 0 or \ - node_data.obj_lb == float('inf'): + elif ( + node_data.num_unbranched_disjunctions == 0 + or node_data.obj_lb == float('inf') + ): # We have reached a leaf node, or the best available node is # infeasible. # Update the incumbent and put it in the original model self.update_incumbent( - node_model.component(self.original_util_block.name)) + node_model.component(self.original_util_block.name) + ) self._transfer_incumbent_to_original_model(config.logger) - self.LB = node_data.obj_lb if \ - self.objective_sense == minimize else \ - -node_data.obj_ub - self.UB = node_data.obj_ub if \ - self.objective_sense == minimize else \ - -node_data.obj_lb + self.LB = ( + node_data.obj_lb + if self.objective_sense == minimize + else -node_data.obj_ub + ) + self.UB = ( + node_data.obj_ub + if self.objective_sense == minimize + else -node_data.obj_lb + ) self.iteration = self.explored_nodes if node_data.obj_lb == float('inf'): - self.pyomo_results.solver.\ - termination_condition = tc.infeasible + self.pyomo_results.solver.termination_condition = tc.infeasible elif node_data.obj_ub == float('-inf'): - self.pyomo_results.solver.\ - termination_condition = tc.unbounded + self.pyomo_results.solver.termination_condition = tc.unbounded else: - self.pyomo_results.solver.\ - termination_condition = tc.optimal + self.pyomo_results.solver.termination_condition = tc.optimal return self._get_final_pyomo_results_object() else: self._branch_on_node(node_data, node_model, config) @@ -250,12 +299,11 @@ def _branch_on_node(self, node_data, node_model, config): # Keeping the naive branch selection disjunction_to_branch_idx = node_data.unbranched_disjunction_indices[0] - disjunction_to_branch = node_utils.disjunction_list[ - disjunction_to_branch_idx] + disjunction_to_branch = node_utils.disjunction_list[disjunction_to_branch_idx] num_unfixed_disjuncts = len( - node_utils.disjunction_to_unfixed_disjuncts[disjunction_to_branch]) - config.logger.info("Branching on disjunction %s" % - disjunction_to_branch.name) + node_utils.disjunction_to_unfixed_disjuncts[disjunction_to_branch] + ) + config.logger.info("Branching on disjunction %s" % disjunction_to_branch.name) node_count = self.created_nodes newly_created_nodes = 0 @@ -263,103 +311,118 @@ def _branch_on_node(self, node_data, node_model, config): # Create a new branch for each unfixed disjunct child_model = node_model.clone() child_utils = child_model.component(node_utils.name) - child_disjunction_to_branch = child_utils.\ - disjunction_list[ - disjunction_to_branch_idx] - child_unfixed_disjuncts = child_utils.\ - disjunction_to_unfixed_disjuncts[ - child_disjunction_to_branch] + child_disjunction_to_branch = child_utils.disjunction_list[ + disjunction_to_branch_idx + ] + child_unfixed_disjuncts = child_utils.disjunction_to_unfixed_disjuncts[ + child_disjunction_to_branch + ] for idx, child_disjunct in enumerate(child_unfixed_disjuncts): if idx == disjunct_index_to_fix_True: child_disjunct.indicator_var.fix(True) else: child_disjunct.deactivate() if not child_disjunction_to_branch.xor: - raise NotImplementedError("We still need to add support for " - "non-XOR disjunctions.") + raise NotImplementedError( + "We still need to add support for non-XOR disjunctions." + ) # This requires adding all combinations of activation status among # unfixed_disjuncts Reactivate nonlinear constraints in the # newly-fixed child disjunct - fixed_True_disjunct = child_unfixed_disjuncts[ - disjunct_index_to_fix_True] - for constr in child_utils.\ - disjunct_to_nonlinear_constraints.get(fixed_True_disjunct, ()): + fixed_True_disjunct = child_unfixed_disjuncts[disjunct_index_to_fix_True] + for constr in child_utils.disjunct_to_nonlinear_constraints.get( + fixed_True_disjunct, () + ): constr.activate() child_model.BigM[constr] = 1 # set arbitrary BigM (ok, because - # we fix corresponding Y=True) + # we fix corresponding Y=True) del child_utils.disjunction_to_unfixed_disjuncts[ - child_disjunction_to_branch] + child_disjunction_to_branch + ] for child_disjunct in child_unfixed_disjuncts: - child_utils.disjunct_to_nonlinear_constraints.pop( - child_disjunct, None) + child_utils.disjunct_to_nonlinear_constraints.pop(child_disjunct, None) newly_created_nodes += 1 child_node_data = node_data._replace( is_screened=False, is_evaluated=False, - num_unbranched_disjunctions=node_data.\ - num_unbranched_disjunctions - 1, + num_unbranched_disjunctions=node_data.num_unbranched_disjunctions - 1, node_count=node_count + newly_created_nodes, - unbranched_disjunction_indices=node_data.\ - unbranched_disjunction_indices[1:], + unbranched_disjunction_indices=node_data.unbranched_disjunction_indices[ + 1: + ], obj_ub=float('inf'), ) heappush(self.bb_queue, (child_node_data, child_model)) self.created_nodes += newly_created_nodes - config.logger.info("Added %s new nodes with %s relaxed disjunctions to " - "the heap. Size now %s." % ( - num_unfixed_disjuncts, - node_data.num_unbranched_disjunctions - 1, - len(self.bb_queue))) + config.logger.info( + "Added %s new nodes with %s relaxed disjunctions to " + "the heap. Size now %s." + % ( + num_unfixed_disjuncts, + node_data.num_unbranched_disjunctions - 1, + len(self.bb_queue), + ) + ) def _prescreen_node(self, node_data, node_model, config): # Check node for satisfiability if sat-solver is enabled if config.check_sat and satisfiable(node_model, config.logger) is False: if node_data.node_count == 0: - config.logger.info("Root node is not satisfiable. Problem is " - "infeasible.") + config.logger.info( + "Root node is not satisfiable. Problem is infeasible." + ) else: - config.logger.info("SAT solver pruned node %s" % - node_data.node_count) + config.logger.info("SAT solver pruned node %s" % node_data.node_count) new_lb = new_ub = float('inf') else: # Solve model subproblem if config.solve_local_rnGDP: config.logger.debug( "Screening node %s with LB %.10g and %s inactive " - "disjunctions." % (node_data.node_count, node_data.obj_lb, - node_data.num_unbranched_disjunctions)) - new_lb, new_ub = self._solve_local_rnGDP_subproblem(node_model, - config) + "disjunctions." + % ( + node_data.node_count, + node_data.obj_lb, + node_data.num_unbranched_disjunctions, + ) + ) + new_lb, new_ub = self._solve_local_rnGDP_subproblem(node_model, config) else: new_lb, new_ub = float('-inf'), float('inf') new_lb = max(node_data.obj_lb, new_lb) - new_node_data = node_data._replace(obj_lb=new_lb, obj_ub=new_ub, - is_screened=True) + new_node_data = node_data._replace( + obj_lb=new_lb, obj_ub=new_ub, is_screened=True + ) return new_node_data def _evaluate_node(self, node_data, node_model, config): # Solve model subproblem config.logger.info( "Exploring node %s with LB %.10g UB %.10g and %s inactive " - "disjunctions." % (node_data.node_count, node_data.obj_lb, - node_data.obj_ub, - node_data.num_unbranched_disjunctions)) + "disjunctions." + % ( + node_data.node_count, + node_data.obj_lb, + node_data.obj_ub, + node_data.num_unbranched_disjunctions, + ) + ) new_lb, new_ub = self._solve_rnGDP_subproblem(node_model, config) - new_node_data = node_data._replace(obj_lb=new_lb, obj_ub=new_ub, - is_evaluated=True) + new_node_data = node_data._replace( + obj_lb=new_lb, obj_ub=new_ub, is_evaluated=True + ) return new_node_data def _solve_rnGDP_subproblem(self, model, config): subproblem = TransformationFactory('gdp.bigm').create_using(model) obj_sense_correction = self.objective_sense != minimize model_utils = model.component(self.original_util_block.name) - subprob_utils = subproblem.component( - self.original_util_block.name) + subprob_utils = subproblem.component(self.original_util_block.name) try: with SuppressInfeasibleWarning(): @@ -370,38 +433,45 @@ def _solve_rnGDP_subproblem(self, model, config): copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('inf'), float('inf') minlp_args = dict(config.minlp_solver_args) - if (config.time_limit is not None and - config.minlp_solver == 'gams'): + if config.time_limit is not None and config.minlp_solver == 'gams': elapsed = get_main_elapsed_time(self.timing) remaining = max(config.time_limit - elapsed, 1) - minlp_args['add_options'] = minlp_args.get('add_options', - []) - minlp_args['add_options'].append('option reslim=%s;' % - remaining) - result = SolverFactory(config.minlp_solver).solve(subproblem, - **minlp_args) + minlp_args['add_options'] = minlp_args.get('add_options', []) + minlp_args['add_options'].append('option reslim=%s;' % remaining) + result = SolverFactory(config.minlp_solver).solve( + subproblem, **minlp_args + ) except RuntimeError as e: config.logger.warning( "Solver encountered RuntimeError. Treating as infeasible. " - "Msg: %s\n%s" % (str(e), traceback.format_exc())) + "Msg: %s\n%s" % (str(e), traceback.format_exc()) + ) copy_var_list_values( # copy variable values, even if errored from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('inf'), float('inf') term_cond = result.solver.termination_condition if term_cond == tc.optimal: assert result.solver.status is SolverStatus.ok - lb = result.problem.lower_bound if not obj_sense_correction else \ - -result.problem.upper_bound - ub = result.problem.upper_bound if not obj_sense_correction else \ - -result.problem.lower_bound + lb = ( + result.problem.lower_bound + if not obj_sense_correction + else -result.problem.upper_bound + ) + ub = ( + result.problem.upper_bound + if not obj_sense_correction + else -result.problem.lower_bound + ) copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, @@ -410,10 +480,16 @@ def _solve_rnGDP_subproblem(self, model, config): return lb, ub elif term_cond == tc.locallyOptimal or term_cond == tc.feasible: assert result.solver.status is SolverStatus.ok - lb = result.problem.lower_bound if not obj_sense_correction else \ - -result.problem.upper_bound - ub = result.problem.upper_bound if not obj_sense_correction else \ - -result.problem.lower_bound + lb = ( + result.problem.lower_bound + if not obj_sense_correction + else -result.problem.upper_bound + ) + ub = ( + result.problem.upper_bound + if not obj_sense_correction + else -result.problem.lower_bound + ) # TODO handle LB absent copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, @@ -425,27 +501,31 @@ def _solve_rnGDP_subproblem(self, model, config): copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('-inf'), float('-inf') elif term_cond == tc.infeasible: copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('inf'), float('inf') else: - config.logger.warning("Unknown termination condition of %s. " - "Treating as infeasible." % term_cond) + config.logger.warning( + "Unknown termination condition of %s. " + "Treating as infeasible." % term_cond + ) copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('inf'), float('inf') - def _solve_local_rnGDP_subproblem(self, model, config): # TODO: The returns of this method should be improved. Currently, it # returns trivial bounds (LB, UB) = (-inf, inf) if there is an error in @@ -453,30 +533,35 @@ def _solve_local_rnGDP_subproblem(self, model, config): # unbounded. subproblem = TransformationFactory('gdp.bigm').create_using(model) obj_sense_correction = self.objective_sense != minimize - subprob_utils = subproblem.component( - self.original_util_block.name) + subprob_utils = subproblem.component(self.original_util_block.name) model_utils = model.component(self.original_util_block.name) try: with SuppressInfeasibleWarning(): result = SolverFactory(config.local_minlp_solver).solve( - subproblem, **config.local_minlp_solver_args) + subproblem, **config.local_minlp_solver_args + ) except RuntimeError as e: config.logger.warning( "Solver encountered RuntimeError. Treating as infeasible. " - "Msg: %s\n%s" % (str(e), traceback.format_exc())) + "Msg: %s\n%s" % (str(e), traceback.format_exc()) + ) copy_var_list_values( # copy variable values, even if errored from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('-inf'), float('inf') term_cond = result.solver.termination_condition if term_cond == tc.optimal: assert result.solver.status is SolverStatus.ok - ub = result.problem.upper_bound if not obj_sense_correction else \ - -result.problem.lower_bound + ub = ( + result.problem.upper_bound + if not obj_sense_correction + else -result.problem.lower_bound + ) copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, @@ -485,8 +570,11 @@ def _solve_local_rnGDP_subproblem(self, model, config): return float('-inf'), ub elif term_cond == tc.locallyOptimal or term_cond == tc.feasible: assert result.solver.status is SolverStatus.ok - ub = result.problem.upper_bound if not obj_sense_correction else \ - -result.problem.lower_bound + ub = ( + result.problem.upper_bound + if not obj_sense_correction + else -result.problem.lower_bound + ) copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, @@ -497,28 +585,30 @@ def _solve_local_rnGDP_subproblem(self, model, config): copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('-inf'), float('-inf') elif term_cond == tc.infeasible: copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) # [ESJ 7/11/22]: Making this float('inf'), float('inf') doesn't # break tests, and makes more sense. But I'm not sure why it's not # that way already? return float('-inf'), float('inf') else: - config.logger.warning("Unknown termination condition of %s. " - "Treating as infeasible." % term_cond) + config.logger.warning( + "Unknown termination condition of %s. " + "Treating as infeasible." % term_cond + ) copy_var_list_values( from_list=subprob_utils.algebraic_variable_list, to_list=model_utils.algebraic_variable_list, - config=config, ignore_integrality=True + config=config, + ignore_integrality=True, ) return float('-inf'), float('inf') - -GDP_LBB_Solver.solve.__doc__ = add_docstring_list( - GDP_LBB_Solver.solve.__doc__, GDP_LBB_Solver.CONFIG, indent_by=8) diff --git a/pyomo/contrib/gdpopt/config_options.py b/pyomo/contrib/gdpopt/config_options.py index 78840372c2e..386826b844c 100644 --- a/pyomo/contrib/gdpopt/config_options.py +++ b/pyomo/contrib/gdpopt/config_options.py @@ -9,358 +9,522 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.config import (ConfigBlock, ConfigList, ConfigValue, - In, NonNegativeFloat, NonNegativeInt, - PositiveInt) +from pyomo.common.config import ( + ConfigBlock, + ConfigList, + ConfigValue, + In, + NonNegativeFloat, + NonNegativeInt, + PositiveInt, +) from pyomo.common.deprecation import deprecation_warning -from pyomo.contrib.gdpopt.discrete_problem_initialize import ( - valid_init_strategies) -from pyomo.contrib.gdpopt.nlp_initialization import ( - restore_vars_to_original_values) +from pyomo.contrib.gdpopt.discrete_problem_initialize import valid_init_strategies +from pyomo.contrib.gdpopt.nlp_initialization import restore_vars_to_original_values from pyomo.contrib.gdpopt.util import a_logger, _DoNothing _supported_algorithms = { 'LOA': ('gdpopt.loa', 'Logic-based Outer Approximation'), 'GLOA': ('gdpopt.gloa', 'Global Logic-based Outer Approximation'), 'LBB': ('gdpopt.lbb', 'Logic-based Branch and Bound'), - 'RIC': ('gdpopt.ric', 'Relaxation with Integer Cuts') + 'RIC': ('gdpopt.ric', 'Relaxation with Integer Cuts'), + 'enumerate': ('gdpopt.enumerate', 'Enumeration of discrete solutions'), } + def _strategy_deprecation(strategy): - deprecation_warning("The argument 'strategy' has been deprecated " - "in favor of 'algorithm.'", version="6.4.2") + deprecation_warning( + "The argument 'strategy' has been deprecated in favor of 'algorithm.'", + version="6.4.2", + ) return In(_supported_algorithms)(strategy) + def _init_strategy_deprecation(strategy): - deprecation_warning("The argument 'init_strategy' has been deprecated " - "in favor of 'init_algorithm.'", version="6.4.2") + deprecation_warning( + "The argument 'init_strategy' has been deprecated " + "in favor of 'init_algorithm.'", + version="6.4.2", + ) return In(valid_init_strategies)(strategy) + def _get_algorithm_config(): CONFIG = ConfigBlock("GDPoptAlgorithm") - CONFIG.declare("strategy", ConfigValue( - default=None, domain=_strategy_deprecation, - description="DEPRECATED: Please use 'algorithm' instead." - )) - CONFIG.declare("algorithm", ConfigValue( - default=None, domain=In(_supported_algorithms), - description="Algorithm to use." - )) + CONFIG.declare( + "strategy", + ConfigValue( + default=None, + domain=_strategy_deprecation, + description="DEPRECATED: Please use 'algorithm' instead.", + ), + ) + CONFIG.declare( + "algorithm", + ConfigValue( + default=None, + domain=In(_supported_algorithms), + description="Algorithm to use.", + ), + ) return CONFIG -def _add_common_configs(CONFIG): - CONFIG.declare("iterlim", ConfigValue( - default=None, domain=NonNegativeInt, - description="Iteration limit." - )) - CONFIG.declare("time_limit", ConfigValue( - default=None, - domain=PositiveInt, - description="Time limit (seconds, default=600)", - doc=""" - Seconds allowed until terminated. Note that the time limit can - currently only be enforced between subsolver invocations. You may - need to set subsolver time limits as well.""" - )) - CONFIG.declare("tee", ConfigValue( - default=False, - description="Stream output to terminal.", - domain=bool - )) - CONFIG.declare("logger", ConfigValue( - default='pyomo.contrib.gdpopt', - description="The logger object or name to use for reporting.", - domain=a_logger - )) - -def _add_oa_configs(CONFIG): - CONFIG.declare("init_strategy", ConfigValue( - default=None, - domain=_init_strategy_deprecation, - description="DEPRECATED: Please use 'init_algorithm' instead." - )) - CONFIG.declare("init_algorithm", ConfigValue( - default="set_covering", domain=In(valid_init_strategies), - description="Initialization algorithm to use.", - doc=""" - Selects the initialization algorithm to use when generating - the initial cuts to construct the discrete problem.""" - )) - CONFIG.declare("custom_init_disjuncts", ConfigList( - # domain=ComponentSets of Disjuncts, - default=None, - description="List of disjunct sets to use for initialization." - )) - CONFIG.declare("max_slack", ConfigValue( - default=1000, domain=NonNegativeFloat, - description="Upper bound on slack variables for OA" - )) - CONFIG.declare("OA_penalty_factor", ConfigValue( - default=1000, domain=NonNegativeFloat, - description=""" - Penalty multiplication term for slack variables on the - objective value.""" - )) - CONFIG.declare("set_cover_iterlim", ConfigValue( - default=8, domain=NonNegativeInt, - description="Limit on the number of set covering iterations." - )) - CONFIG.declare("discrete_problem_transformation", ConfigValue( - default='gdp.bigm', - description=""" - Name of the transformation to use to transform the - discrete problem from a GDP to an algebraic model.""" - )) - CONFIG.declare("call_before_discrete_problem_solve", ConfigValue( - default=_DoNothing, - description="callback hook before calling the discrete problem solver", - doc=""" - Callback called right before the MILP discrete problem is solved. - Takes three arguments: The solver object, the discrete problem, and the - GDPopt utility block on the discrete problem. - - Note that unless you are *very* confident in what you are doing, the - problem should not be modified in this callback: it should be used - to interrogate the problem only. - """ - )) - CONFIG.declare("call_after_discrete_problem_solve", ConfigValue( - default=_DoNothing, - description="callback hook after a solution of the discrete problem", - doc=""" - Callback called right after the MILP discrete problem is solved. - Takes three arguments: The solver object, the discrete problem, and the - GDPopt utility block on the discrete problem. - - Note that unless you are *very* confident in what you are doing, the - problem should not be modified in this callback: it should be used - to interrogate the problem only. - """ - )) - CONFIG.declare("call_before_master_solve", ConfigValue( - default=_DoNothing, - description="DEPRECATED: Please use " - "'call_before_discrete_problem_solve'", - )) - CONFIG.declare("call_after_master_solve", ConfigValue( - default=_DoNothing, - description="DEPRECATED: Please use " - "'call_after_discrete_problem_solve'", - )) - CONFIG.declare("subproblem_initialization_method", ConfigValue( - default=restore_vars_to_original_values, # Historical default - description="""" - callback to specify custom routines to initialize the - (MI)NLP subproblems.""", - doc=""" - Callback to specify custom routines for initializing the (MI)NLP - subproblems. This method is called after the discrete problem solution - is fixed in the subproblem and before the subproblem is solved (or - pre-solved). - Accepts three arguments: the solver object, the subproblem GDPopt - utility block and the discrete problem GDPopt utility block. The - discrete problem contains the most recent discrete problem solution. +def _add_common_configs(CONFIG): + CONFIG.declare( + "iterlim", + ConfigValue( + default=None, domain=NonNegativeInt, description="Iteration limit." + ), + ) + CONFIG.declare( + "time_limit", + ConfigValue( + default=None, + domain=PositiveInt, + description="Time limit (seconds, default=600)", + doc=""" + Seconds allowed until terminated. Note that the time limit can + currently only be enforced between subsolver invocations. You may + need to set subsolver time limits as well.""", + ), + ) + CONFIG.declare( + "tee", + ConfigValue( + default=False, description="Stream output to terminal.", domain=bool + ), + ) + CONFIG.declare( + "logger", + ConfigValue( + default='pyomo.contrib.gdpopt', + description="The logger object or name to use for reporting.", + domain=a_logger, + ), + ) - The return of this method will be unused: The method should directly - set the value of the variables on the subproblem - """ - )) - CONFIG.declare("call_before_subproblem_solve", ConfigValue( - default=_DoNothing, - description="callback hook before calling the subproblem solver", - doc=""" - Callback called right before the (MI)NLP subproblem is solved. - Takes three arguments: The solver object, the subproblem and the - GDPopt utility block on the subproblem. - Note that unless you are *very* confident in what you are doing, the - subproblem should not be modified in this callback: it should be used - to interrogate the problem only. +def _add_nlp_solve_configs(CONFIG, default_nlp_init_method): + # All of these config options are expected if the algorithm solves NLP + # subproblems. + CONFIG.declare( + "integer_tolerance", + ConfigValue(default=1e-5, description="Tolerance on integral values."), + ) + CONFIG.declare( + "constraint_tolerance", + ConfigValue( + default=1e-6, + description=""" + Tolerance on constraint satisfaction. + + Increasing this tolerance corresponds to being more conservative in + declaring the model or an NLP subproblem to be infeasible. + """, + ), + ) + CONFIG.declare( + "variable_tolerance", + ConfigValue(default=1e-8, description="Tolerance on variable bounds."), + ) + CONFIG.declare( + "subproblem_initialization_method", + ConfigValue( + default=default_nlp_init_method, + description="""" + callback to specify custom routines to initialize the + (MI)NLP subproblems.""", + doc=""" + Callback to specify custom routines for initializing the (MI)NLP + subproblems. This method is called after the discrete problem solution + is fixed in the subproblem and before the subproblem is solved (or + pre-solved). + + For algorithms with a discrete problem relaxation: + This method accepts three arguments: the solver object, the subproblem + GDPopt utility block and the discrete problem GDPopt utility block. The + discrete problem contains the most recent discrete problem solution. + + For algorithms without a discrete problem relaxation: + This method accepts four arguments: the list of Disjuncts that are + currently fixed as being active, a list of values for the non-indicator + BooleanVars (empty if force_nlp_subproblem=False), and a list of + values for the integer vars (also empty if force_nlp_subproblem=False), + and last the subproblem GDPopt utility block. + + The return of this method will be unused: The method should directly + set the value of the variables on the subproblem + """, + ), + ) + CONFIG.declare( + "call_before_subproblem_solve", + ConfigValue( + default=_DoNothing, + description="callback hook before calling the subproblem solver", + doc=""" + Callback called right before the (MI)NLP subproblem is solved. + Takes three arguments: The solver object, the subproblem and the + GDPopt utility block on the subproblem. + + Note that unless you are *very* confident in what you are doing, the + subproblem should not be modified in this callback: it should be used + to interrogate the problem only. + + To initialize the problem before it is solved, please specify a method + in the 'subproblem_initialization_method' argument. + """, + ), + ) + CONFIG.declare( + "call_after_subproblem_solve", + ConfigValue( + default=_DoNothing, + description=""" + callback hook after a solution of the + "nonlinear subproblem""", + doc=""" + Callback called right after the (MI)NLP subproblem is solved. + Takes three arguments: The solver object, the subproblem, and the + GDPopt utility block on the subproblem. + + Note that unless you are *very* confident in what you are doing, the + subproblem should not be modified in this callback: it should be used + to interrogate the problem only. + """, + ), + ) + CONFIG.declare( + "call_after_subproblem_feasible", + ConfigValue( + default=_DoNothing, + description=""" + callback hook after feasible solution of + the nonlinear subproblem""", + doc=""" + Callback called right after the (MI)NLP subproblem is solved, + if it was feasible. Takes three arguments: The solver object, the + subproblem and the GDPopt utility block on the subproblem. + + Note that unless you are *very* confident in what you are doing, the + subproblem should not be modified in this callback: it should be used + to interrogate the problem only. + """, + ), + ) + CONFIG.declare( + "force_subproblem_nlp", + ConfigValue( + default=False, + description="""Force subproblems to be NLP, even if discrete variables + exist.""", + ), + ) + CONFIG.declare( + "subproblem_presolve", + ConfigValue( + default=True, + description=""" + Flag to enable or disable subproblem presolve. + Default=True.""", + domain=bool, + ), + ) + CONFIG.declare( + "tighten_nlp_var_bounds", + ConfigValue( + default=False, + description=""" + Whether or not to do feasibility-based bounds tightening + on the variables in the NLP subproblem before solving it.""", + domain=bool, + ), + ) + CONFIG.declare( + "round_discrete_vars", + ConfigValue( + default=True, + description="""Flag to round subproblem discrete variable values to the + nearest integer. Rounding is done before fixing disjuncts.""", + ), + ) + CONFIG.declare( + "max_fbbt_iterations", + ConfigValue( + default=3, + description=""" + Maximum number of feasibility-based bounds tightening + iterations to do during NLP subproblem preprocessing.""", + domain=PositiveInt, + ), + ) - To initialize the problem before it is solved, please specify a method - in the 'subproblem_initialization_method' argument. - """ - )) - CONFIG.declare("call_after_subproblem_solve", ConfigValue( - default=_DoNothing, - description=""" - callback hook after a solution of the - "nonlinear subproblem""", - doc=""" - Callback called right after the (MI)NLP subproblem is solved. - Takes three arguments: The solver object, the subproblem, and the - GDPopt utility block on the subproblem. - Note that unless you are *very* confident in what you are doing, the - subproblem should not be modified in this callback: it should be used - to interrogate the problem only. - """ - )) - CONFIG.declare("call_after_subproblem_feasible", ConfigValue( - default=_DoNothing, - description=""" - callback hook after feasible solution of - the nonlinear subproblem""", - doc=""" - Callback called right after the (MI)NLP subproblem is solved, - if it was feasible. Takes three arguments: The solver object, the - subproblem and the GDPopt utility block on the subproblem. +def _add_oa_configs(CONFIG): + _add_nlp_solve_configs( + CONFIG, default_nlp_init_method=restore_vars_to_original_values + ) - Note that unless you are *very* confident in what you are doing, the - subproblem should not be modified in this callback: it should be used - to interrogate the problem only. - """ - )) - CONFIG.declare("round_discrete_vars", ConfigValue( - default=True, - description="""Flag to round subproblem discrete variable values to the - nearest integer. Rounding is done before fixing disjuncts.""" - )) - CONFIG.declare("force_subproblem_nlp", ConfigValue( - default=False, - description="""Force subproblems to be NLP, even if discrete variables - exist.""" - )) - CONFIG.declare("mip_presolve", ConfigValue( - default=True, - description=""" + CONFIG.declare( + "init_strategy", + ConfigValue( + default=None, + domain=_init_strategy_deprecation, + description="DEPRECATED: Please use 'init_algorithm' instead.", + ), + ) + CONFIG.declare( + "init_algorithm", + ConfigValue( + default="set_covering", + domain=In(valid_init_strategies), + description="Initialization algorithm to use.", + doc=""" + Selects the initialization algorithm to use when generating + the initial cuts to construct the discrete problem.""", + ), + ) + CONFIG.declare( + "custom_init_disjuncts", + ConfigList( + # domain=ComponentSets of Disjuncts, + default=None, + description="List of disjunct sets to use for initialization.", + ), + ) + CONFIG.declare( + "max_slack", + ConfigValue( + default=1000, + domain=NonNegativeFloat, + description="Upper bound on slack variables for OA", + ), + ) + CONFIG.declare( + "OA_penalty_factor", + ConfigValue( + default=1000, + domain=NonNegativeFloat, + description=""" + Penalty multiplication term for slack variables on the + objective value.""", + ), + ) + CONFIG.declare( + "set_cover_iterlim", + ConfigValue( + default=8, + domain=NonNegativeInt, + description="Limit on the number of set covering iterations.", + ), + ) + CONFIG.declare( + "discrete_problem_transformation", + ConfigValue( + default='gdp.bigm', + description=""" + Name of the transformation to use to transform the + discrete problem from a GDP to an algebraic model.""", + ), + ) + CONFIG.declare( + "call_before_discrete_problem_solve", + ConfigValue( + default=_DoNothing, + description="callback hook before calling the discrete problem solver", + doc=""" + Callback called right before the MILP discrete problem is solved. + Takes three arguments: The solver object, the discrete problem, and the + GDPopt utility block on the discrete problem. + + Note that unless you are *very* confident in what you are doing, the + problem should not be modified in this callback: it should be used + to interrogate the problem only. + """, + ), + ) + CONFIG.declare( + "call_after_discrete_problem_solve", + ConfigValue( + default=_DoNothing, + description="callback hook after a solution of the discrete problem", + doc=""" + Callback called right after the MILP discrete problem is solved. + Takes three arguments: The solver object, the discrete problem, and the + GDPopt utility block on the discrete problem. + + Note that unless you are *very* confident in what you are doing, the + problem should not be modified in this callback: it should be used + to interrogate the problem only. + """, + ), + ) + CONFIG.declare( + "call_before_master_solve", + ConfigValue( + default=_DoNothing, + description="DEPRECATED: Please use " + "'call_before_discrete_problem_solve'", + ), + ) + CONFIG.declare( + "call_after_master_solve", + ConfigValue( + default=_DoNothing, + description="DEPRECATED: Please use 'call_after_discrete_problem_solve'", + ), + ) + CONFIG.declare( + "mip_presolve", + ConfigValue( + default=True, + description=""" Flag to enable or disable GDPopt MIP presolve. Default=True.""", - domain=bool - )) - CONFIG.declare("subproblem_presolve", ConfigValue( - default=True, - description=""" - Flag to enable or disable subproblem presolve. - Default=True.""", - domain=bool - )) - CONFIG.declare("max_fbbt_iterations", ConfigValue( - default=3, - description=""" - Maximum number of feasibility-based bounds tightening - iterations to do during NLP subproblem preprocessing.""", - domain=PositiveInt - )) - CONFIG.declare("tighten_nlp_var_bounds", ConfigValue( - default=False, - description=""" - Whether or not to do feasibility-based bounds tightening - on the variables in the NLP subproblem before solving it.""", - domain=bool - )) - CONFIG.declare("calc_disjunctive_bounds", ConfigValue( - default=False, - description=""" + domain=bool, + ), + ) + CONFIG.declare( + "calc_disjunctive_bounds", + ConfigValue( + default=False, + description=""" Calculate special disjunctive variable bounds for GLOA. False by default.""", - domain=bool - )) - CONFIG.declare("obbt_disjunctive_bounds", ConfigValue( - default=False, - description=""" - Use optimality-based bounds tightening rather than feasibility-based - bounds tightening to compute disjunctive variable bounds. False by - default.""", - domain=bool - )) + domain=bool, + ), + ) + CONFIG.declare( + "obbt_disjunctive_bounds", + ConfigValue( + default=False, + description=""" + Use optimality-based bounds tightening rather than feasibility-based + bounds tightening to compute disjunctive variable bounds. False by + default.""", + domain=bool, + ), + ) + def _add_BB_configs(CONFIG): - CONFIG.declare("check_sat", ConfigValue( - default=False, - domain=bool, - description=""" - When True, GDPopt-LBB will check satisfiability - at each node via the pyomo.contrib.satsolver interface""" - )) - CONFIG.declare("solve_local_rnGDP", ConfigValue( - default=False, - domain=bool, - description=""" - When True, GDPopt-LBB will solve a local MINLP at each node.""" - )) + CONFIG.declare( + "check_sat", + ConfigValue( + default=False, + domain=bool, + description=""" + When True, GDPopt-LBB will check satisfiability + at each node via the pyomo.contrib.satsolver interface""", + ), + ) + CONFIG.declare( + "solve_local_rnGDP", + ConfigValue( + default=False, + domain=bool, + description=""" + When True, GDPopt-LBB will solve a local MINLP at each node.""", + ), + ) def _add_mip_solver_configs(CONFIG): - CONFIG.declare("mip_solver", ConfigValue( - default="gurobi", - description=""" - Mixed-integer linear solver to use. Note that no persisent solvers - other than the auto-persistent solvers in the APPSI package are - supported.""" - )) - CONFIG.declare("mip_solver_args", ConfigBlock( - description=""" - Keyword arguments to send to the MILP subsolver solve() invocation""", - implicit=True)) + CONFIG.declare( + "mip_solver", + ConfigValue( + default="gurobi", + description=""" + Mixed-integer linear solver to use. Note that no persisent solvers + other than the auto-persistent solvers in the APPSI package are + supported.""", + ), + ) + CONFIG.declare( + "mip_solver_args", + ConfigBlock( + description=""" + Keyword arguments to send to the MILP subsolver solve() invocation""", + implicit=True, + ), + ) def _add_nlp_solver_configs(CONFIG, default_solver): - CONFIG.declare("nlp_solver", ConfigValue( - default=default_solver, - description=""" - Nonlinear solver to use. Note that no persisent solvers - other than the auto-persistent solvers in the APPSI package are - supported.""")) - CONFIG.declare("nlp_solver_args", ConfigBlock( - description=""" - Keyword arguments to send to the NLP subsolver solve() invocation""", - implicit=True)) - CONFIG.declare("minlp_solver", ConfigValue( - default="baron", - description=""" - Mixed-integer nonlinear solver to use. Note that no persisent solvers - other than the auto-persistent solvers in the APPSI package are - supported.""" - )) - CONFIG.declare("minlp_solver_args", ConfigBlock( - description=""" - Keyword arguments to send to the MINLP subsolver solve() invocation""", - implicit=True)) - CONFIG.declare("local_minlp_solver", ConfigValue( - default="bonmin", - description=""" - Mixed-integer nonlinear solver to use. Note that no persisent solvers - other than the auto-persistent solvers in the APPSI package are - supported.""" - )) - CONFIG.declare("local_minlp_solver_args", ConfigBlock( - description=""" - Keyword arguments to send to the local MINLP subsolver solve() - invocation""", - implicit=True)) + CONFIG.declare( + "nlp_solver", + ConfigValue( + default=default_solver, + description=""" + Nonlinear solver to use. Note that no persisent solvers + other than the auto-persistent solvers in the APPSI package are + supported.""", + ), + ) + CONFIG.declare( + "nlp_solver_args", + ConfigBlock( + description=""" + Keyword arguments to send to the NLP subsolver solve() invocation""", + implicit=True, + ), + ) + CONFIG.declare( + "minlp_solver", + ConfigValue( + default="baron", + description=""" + Mixed-integer nonlinear solver to use. Note that no persisent solvers + other than the auto-persistent solvers in the APPSI package are + supported.""", + ), + ) + CONFIG.declare( + "minlp_solver_args", + ConfigBlock( + description=""" + Keyword arguments to send to the MINLP subsolver solve() invocation""", + implicit=True, + ), + ) + CONFIG.declare( + "local_minlp_solver", + ConfigValue( + default="bonmin", + description=""" + Mixed-integer nonlinear solver to use. Note that no persisent solvers + other than the auto-persistent solvers in the APPSI package are + supported.""", + ), + ) + CONFIG.declare( + "local_minlp_solver_args", + ConfigBlock( + description=""" + Keyword arguments to send to the local MINLP subsolver solve() + invocation""", + implicit=True, + ), + ) + CONFIG.declare( + "small_dual_tolerance", + ConfigValue( + default=1e-8, + description=""" + When generating cuts, small duals multiplied by expressions can + cause problems. Exclude all duals smaller in absolute value than the + following.""", + ), + ) def _add_tolerance_configs(CONFIG): - CONFIG.declare("bound_tolerance", ConfigValue( - default=1E-6, domain=NonNegativeFloat, - description="Tolerance for bound convergence." - )) - CONFIG.declare("small_dual_tolerance", ConfigValue( - default=1E-8, - description=""" - When generating cuts, small duals multiplied by expressions can - cause problems. Exclude all duals smaller in absolue value than the - following.""" - )) - CONFIG.declare("integer_tolerance", ConfigValue( - default=1E-5, - description="Tolerance on integral values." - )) - CONFIG.declare("constraint_tolerance", ConfigValue( - default=1E-6, - description=""" - Tolerance on constraint satisfaction. - - Increasing this tolerance corresponds to being more conservative in - declaring the model or an NLP subproblem to be infeasible. - """ - )) - CONFIG.declare("variable_tolerance", ConfigValue( - default=1E-8, - description="Tolerance on variable bounds." - )) - CONFIG.declare("zero_tolerance", ConfigValue( - default=1E-15, - description="Tolerance on variable equal to zero.")) + CONFIG.declare( + "bound_tolerance", + ConfigValue( + default=1e-6, + domain=NonNegativeFloat, + description="Tolerance for bound convergence.", + ), + ) diff --git a/pyomo/contrib/gdpopt/create_oa_subproblems.py b/pyomo/contrib/gdpopt/create_oa_subproblems.py index 3e8e1e217c8..12266866dbc 100644 --- a/pyomo/contrib/gdpopt/create_oa_subproblems.py +++ b/pyomo/contrib/gdpopt/create_oa_subproblems.py @@ -10,53 +10,56 @@ # ___________________________________________________________________________ from pyomo.core import ( - SortComponents, Constraint, Objective, LogicalConstraint, Expression) -from pyomo.core.base import ( - TransformationFactory, Suffix, ConstraintList, Integers) + SortComponents, + Constraint, + Objective, + LogicalConstraint, + Expression, +) +from pyomo.core.base import TransformationFactory, Suffix, ConstraintList, Integers from pyomo.core.base.block import Block, TraversalStrategy from pyomo.common.collections import ComponentMap, ComponentSet from pyomo.common.modeling import unique_component_name -from pyomo.contrib.gdpopt.discrete_problem_initialize import ( - valid_init_strategies) +from pyomo.contrib.gdpopt.discrete_problem_initialize import valid_init_strategies from pyomo.contrib.gdpopt.util import ( - get_main_elapsed_time, move_nonlinear_objective_to_constraints) + get_main_elapsed_time, + move_nonlinear_objective_to_constraints, +) from pyomo.gdp.disjunct import Disjunct, Disjunction from pyomo.util.vars_from_expressions import get_vars_from_components + def _get_discrete_problem_and_subproblem(solver, config): util_block = solver.original_util_block original_model = util_block.parent_block() if config.force_subproblem_nlp: # We'll need to fix these too add_discrete_variable_list(util_block) - original_obj = move_nonlinear_objective_to_constraints(util_block, - config.logger) + original_obj = move_nonlinear_objective_to_constraints(util_block, config.logger) solver.original_obj = original_obj # create model to hold the subproblems: We create this first because # certain initialization strategies for the discrete problem need it. - subproblem = get_subproblem(original_model) - subproblem_util_block = subproblem.component(util_block.local_name) - save_initial_values(subproblem_util_block) - add_transformed_boolean_variable_list(subproblem_util_block) - subproblem_obj = next(subproblem.component_data_objects( - Objective, active=True, descend_into=True)) - subproblem_util_block.obj = Expression(expr=subproblem_obj.expr) + subproblem, subproblem_util_block = get_subproblem(original_model, util_block) # create discrete problem--the MILP relaxation start = get_main_elapsed_time(solver.timing) discrete_problem_util_block = initialize_discrete_problem( - util_block, subproblem_util_block, config, solver) + util_block, subproblem_util_block, config, solver + ) - config.logger.info('Finished discrete problem initialization in {:.2f}s ' - 'and {} iterations \n'.format( - get_main_elapsed_time(solver.timing) - start, - solver.initialization_iteration)) + config.logger.info( + 'Finished discrete problem initialization in {:.2f}s ' + 'and {} iterations \n'.format( + get_main_elapsed_time(solver.timing) - start, + solver.initialization_iteration, + ) + ) return (discrete_problem_util_block, subproblem_util_block) -def initialize_discrete_problem(util_block, subprob_util_block, config, - solver): + +def initialize_discrete_problem(util_block, subprob_util_block, config, solver): """ Calls the specified transformation (by default bigm) on the original model and removes nonlinear constraints to create a MILP discrete problem. @@ -71,26 +74,27 @@ def initialize_discrete_problem(util_block, subprob_util_block, config, discrete_problem_util_block.no_good_disjunctions = Disjunction(Integers) # deactivate nonlinear constraints - for c in discrete.component_data_objects(Constraint, active=True, - descend_into=(Block, Disjunct)): + for c in discrete.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ): if c.body.polynomial_degree() not in (1, 0): c.deactivate() # Transform to a MILP - TransformationFactory(config.discrete_problem_transformation).apply_to( - discrete) + TransformationFactory(config.discrete_problem_transformation).apply_to(discrete) add_transformed_boolean_variable_list(discrete_problem_util_block) - add_algebraic_variable_list(discrete_problem_util_block, - name='all_mip_variables') + add_algebraic_variable_list(discrete_problem_util_block, name='all_mip_variables') # Call the specified initialization strategy. (We've already validated the # input in the config logic, so we know this is okay.) init_algorithm = valid_init_strategies.get(config.init_algorithm) - init_algorithm(util_block, discrete_problem_util_block, subprob_util_block, - config, solver) + init_algorithm( + util_block, discrete_problem_util_block, subprob_util_block, config, solver + ) return discrete_problem_util_block + def add_util_block(discrete): # create a block to store the cuts name = unique_component_name(discrete, '_gdpopt_cuts') @@ -99,39 +103,68 @@ def add_util_block(discrete): return block + def add_disjunct_list(util_block): model = util_block.parent_block() - util_block.disjunct_list = list(model.component_data_objects( - ctype=Disjunct, active=True, descend_into=(Block, Disjunct), - sort=SortComponents.deterministic)) + util_block.disjunct_list = list( + model.component_data_objects( + ctype=Disjunct, + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ) + ) + def add_disjunction_list(util_block): model = util_block.parent_block() - util_block.disjunction_list = list(model.component_data_objects( - ctype=Disjunction, active=True, descend_into=(Block, Disjunct), - sort=SortComponents.deterministic)) + util_block.disjunction_list = list( + model.component_data_objects( + ctype=Disjunction, + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ) + ) + def add_constraint_list(util_block): model = util_block.parent_block() - util_block.constraint_list = list(model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block, Disjunct), - sort=SortComponents.deterministic)) + util_block.constraint_list = list( + model.component_data_objects( + ctype=Constraint, + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ) + ) + def add_global_constraint_list(util_block): model = util_block.parent_block() - util_block.global_constraint_list = list(model.component_data_objects( - ctype=Constraint, active=True, descend_into=Block, - sort=SortComponents.deterministic)) + util_block.global_constraint_list = list( + model.component_data_objects( + ctype=Constraint, + active=True, + descend_into=Block, + sort=SortComponents.deterministic, + ) + ) + def add_constraints_by_disjunct(util_block): constraints_by_disjunct = util_block.constraints_by_disjunct = {} for disj in util_block.disjunct_list: cons_list = constraints_by_disjunct[disj] = [] for cons in disj.component_data_objects( - Constraint, active=True, descend_into=Block, - sort=SortComponents.deterministic): + Constraint, + active=True, + descend_into=Block, + sort=SortComponents.deterministic, + ): cons_list.append(cons) + def add_algebraic_variable_list(util_block, name=None): """ This collects variables from active Constraints and Objectives. It descends @@ -141,9 +174,20 @@ def add_algebraic_variable_list(util_block, name=None): model = util_block.parent_block() if name is None: name = "algebraic_variable_list" - setattr(util_block, name, list(get_vars_from_components( - model, ctype=(Constraint, Objective), descend_into=(Block, Disjunct), - active=True, sort=SortComponents.deterministic))) + setattr( + util_block, + name, + list( + get_vars_from_components( + model, + ctype=(Constraint, Objective), + descend_into=(Block, Disjunct), + active=True, + sort=SortComponents.deterministic, + ) + ), + ) + def add_discrete_variable_list(util_block): lst = util_block.discrete_variable_list = [] @@ -151,6 +195,7 @@ def add_discrete_variable_list(util_block): if v.is_integer(): lst.append(v) + # Must be collected after list of Disjuncts def add_boolean_variable_lists(util_block): util_block.boolean_variable_list = [] @@ -160,26 +205,32 @@ def add_boolean_variable_lists(util_block): ind_var_set = ComponentSet(util_block.boolean_variable_list) # This will not necessarily include the indicator_vars if it is called # before the GDP is transformed to a MIP. - for v in get_vars_from_components(util_block.parent_block(), - ctype=LogicalConstraint, - descend_into=(Block, Disjunct), - active=True, - sort=SortComponents.deterministic): + for v in get_vars_from_components( + util_block.parent_block(), + ctype=LogicalConstraint, + descend_into=(Block, Disjunct), + active=True, + sort=SortComponents.deterministic, + ): if v not in ind_var_set: util_block.boolean_variable_list.append(v) util_block.non_indicator_boolean_variable_list.append(v) + # For the discrete problem, we want the corresponding binaries for all of the -# BooleanVars. This must be called after logical_to_linear has been called. +# BooleanVars. This must be called after logical_to_disjunctive has been called. def add_transformed_boolean_variable_list(util_block): util_block.transformed_boolean_variable_list = [ - v.get_associated_binary() for v in util_block.boolean_variable_list] + v.get_associated_binary() for v in util_block.boolean_variable_list + ] + -def get_subproblem(original_model): +def get_subproblem(original_model, util_block): """Clone the original, and reclassify all the Disjuncts to Blocks. - We'll also call logical_to_linear in case any of the indicator_vars are - used in logical constraints and to make sure that the rest of the model is - algebraic (assuming it was a proper GDP to begin with). + We'll also call logical_to_disjunctive and bigm the disjunctive parts in + case any of the indicator_vars are used in logical constraints and to make + sure that the rest of the model is algebraic (assuming it was a proper + GDP to begin with). """ subproblem = original_model.clone() subproblem.name = subproblem.name + ": subproblem" @@ -188,18 +239,22 @@ def get_subproblem(original_model): if not hasattr(subproblem, 'dual'): subproblem.dual = Suffix(direction=Suffix.IMPORT) elif not isinstance(subproblem.dual, Suffix): - raise ValueError("The model contains a component called 'dual' that " - "is not a Suffix. It is of type %s. Please rename " - "this component, as GDPopt needs dual information to " - "create cuts." % type(subproblem.dual)) + raise ValueError( + "The model contains a component called 'dual' that " + "is not a Suffix. It is of type %s. Please rename " + "this component, as GDPopt needs dual information to " + "create cuts." % type(subproblem.dual) + ) subproblem.dual.activate() # reclassify all the Disjuncts as Blocks and deactivate the Disjunctions. We # don't need to add the xor constraints because we're not going to pass # infeasible integer solutions to this model. for disjunction in subproblem.component_data_objects( - Disjunction, descend_into=(Block, Disjunct), - descent_order=TraversalStrategy.PostfixDFS): + Disjunction, + descend_into=(Block, Disjunct), + descent_order=TraversalStrategy.PostfixDFS, + ): for disjunct in disjunction.disjuncts: if disjunct.indicator_var.fixed: if not disjunct.indicator_var.value: @@ -208,9 +263,20 @@ def get_subproblem(original_model): disjunction.deactivate() - TransformationFactory('core.logical_to_linear').apply_to(subproblem) + TransformationFactory('contrib.logical_to_disjunctive').apply_to(subproblem) + # transform any of the Disjuncts we created above with bigm. + TransformationFactory('gdp.bigm').apply_to(subproblem) + + subproblem_util_block = subproblem.component(util_block.local_name) + save_initial_values(subproblem_util_block) + add_transformed_boolean_variable_list(subproblem_util_block) + subproblem_obj = next( + subproblem.component_data_objects(Objective, active=True, descend_into=True) + ) + subproblem_util_block.obj = Expression(expr=subproblem_obj.expr) + + return subproblem, subproblem_util_block - return subproblem def save_initial_values(subproblem_util_block): initial_values = subproblem_util_block.initial_var_values = ComponentMap() diff --git a/pyomo/contrib/gdpopt/cut_generation.py b/pyomo/contrib/gdpopt/cut_generation.py index d9aa72ea955..36a826a4f83 100644 --- a/pyomo/contrib/gdpopt/cut_generation.py +++ b/pyomo/contrib/gdpopt/cut_generation.py @@ -15,6 +15,7 @@ from pyomo.common.collections import ComponentSet from pyomo.core import TransformationFactory, value, Constraint, Block + def _record_binary_value(var, var_value_is_one, var_value_is_zero, int_tol): val = value(var) if fabs(val - 1) <= int_tol: @@ -23,16 +24,19 @@ def _record_binary_value(var, var_value_is_one, var_value_is_zero, int_tol): var_value_is_zero.add(var) else: raise ValueError( - 'Binary %s = %s is not 0 or 1 within integer tolerance %s' % - (var.name, val, int_tol)) + 'Binary %s = %s is not 0 or 1 within integer tolerance %s' + % (var.name, val, int_tol) + ) + def add_no_good_cut(target_model_util_block, config): """Cut the current integer solution from the target model.""" var_value_is_one = ComponentSet() var_value_is_zero = ComponentSet() for var in target_model_util_block.transformed_boolean_variable_list: - _record_binary_value(var, var_value_is_one, var_value_is_zero, - config.integer_tolerance) + _record_binary_value( + var, var_value_is_one, var_value_is_zero, config.integer_tolerance + ) disjuncts = [] if config.force_subproblem_nlp: @@ -41,8 +45,9 @@ def add_no_good_cut(target_model_util_block, config): # This has possible duplicates with the above, but few enough that # we'll leave it for now. if var.is_binary(): - _record_binary_value(var, var_value_is_one, var_value_is_zero, - config.integer_tolerance) + _record_binary_value( + var, var_value_is_one, var_value_is_zero, config.integer_tolerance + ) else: # It's integer. It still has to be in the no-good cut because # else the algorithm is wrong (We're cutting more than just this @@ -58,32 +63,37 @@ def add_no_good_cut(target_model_util_block, config): # It shouldn't be possible to get here unless there's a solution to be cut. assert (var_value_is_one or var_value_is_zero) or len(disjuncts) == 0 - int_cut = (sum(1 - v for v in var_value_is_one) + - sum(v for v in var_value_is_zero)) >= 1 + int_cut = ( + sum(1 - v for v in var_value_is_one) + sum(v for v in var_value_is_zero) + ) >= 1 if len(disjuncts) > 0: idx = len(target_model_util_block.no_good_disjunctions) target_model_util_block.no_good_disjunctions[idx] = [ - [disj] for disj in disjuncts] + [[int_cut]] - config.logger.debug('Adding no-good disjunction: %s' % - _disjunction_to_str( - target_model_util_block.no_good_disjunctions[ - idx])) + [disj] for disj in disjuncts + ] + [[int_cut]] + config.logger.debug( + 'Adding no-good disjunction: %s' + % _disjunction_to_str(target_model_util_block.no_good_disjunctions[idx]) + ) # transform it TransformationFactory(config.discrete_problem_transformation).apply_to( target_model_util_block, - targets=[target_model_util_block.no_good_disjunctions[idx]]) + targets=[target_model_util_block.no_good_disjunctions[idx]], + ) else: config.logger.debug('Adding no-good cut: %s' % int_cut) # Exclude the current binary combination target_model_util_block.no_good_cuts.add(expr=int_cut) + def _disjunction_to_str(disjunction): pretty = [] for disjunct in disjunction.disjuncts: exprs = [] - for cons in disjunct.component_data_objects(Constraint, active=True, - descend_into=Block): + for cons in disjunct.component_data_objects( + Constraint, active=True, descend_into=Block + ): exprs.append(str(cons.expr)) pretty.append("[%s]" % ", ".join(exprs)) return " v ".join(pretty) diff --git a/pyomo/contrib/gdpopt/discrete_problem_initialize.py b/pyomo/contrib/gdpopt/discrete_problem_initialize.py index 438db4cad9c..3dc18132c5b 100644 --- a/pyomo/contrib/gdpopt/discrete_problem_initialize.py +++ b/pyomo/contrib/gdpopt/discrete_problem_initialize.py @@ -18,30 +18,33 @@ from pyomo.common.collections import ComponentMap from pyomo.contrib.gdpopt.cut_generation import add_no_good_cut -from pyomo.contrib.gdpopt.solve_discrete_problem import ( - solve_MILP_discrete_problem) +from pyomo.contrib.gdpopt.solve_discrete_problem import solve_MILP_discrete_problem from pyomo.contrib.gdpopt.util import _DoNothing from pyomo.core import Block, Constraint, Objective, Var, maximize, value from pyomo.gdp import Disjunct from pyomo.opt import TerminationCondition as tc + def _collect_original_bounds(discrete_prob_util_block): original_bounds = ComponentMap() for v in discrete_prob_util_block.all_mip_variables: original_bounds[v] = (v.lb, v.ub) return original_bounds + def _restore_bounds(original_bounds): for v, (l, u) in original_bounds.items(): v.setlb(l) v.setub(u) + # This contextmanager is for use when we solve the discrete problem with some # variables fixed. In that case, the bounds tightening that might be done during # preprocessing is not valid later, and we need to restore the variable bounds. @contextmanager -def preserve_discrete_problem_feasible_region(discrete_problem_util_block, - config, original_bounds=None): +def preserve_discrete_problem_feasible_region( + discrete_problem_util_block, config, original_bounds=None +): if config.mip_presolve and original_bounds is None: original_bounds = _collect_original_bounds(discrete_problem_util_block) @@ -50,8 +53,10 @@ def preserve_discrete_problem_feasible_region(discrete_problem_util_block, if config.mip_presolve: _restore_bounds(original_bounds) -def init_custom_disjuncts(util_block, discrete_problem_util_block, - subprob_util_block, config, solver): + +def init_custom_disjuncts( + util_block, discrete_problem_util_block, subprob_util_block, config, solver +): """Initialize by using user-specified custom disjuncts.""" solver._log_header(config.logger) @@ -73,10 +78,11 @@ def init_custom_disjuncts(util_block, discrete_problem_util_block, # fix the disjuncts in the linear GDP and solve config.logger.info( "Generating initial linear GDP approximation by " - "solving subproblems with user-specified active disjuncts.") + "solving subproblems with user-specified active disjuncts." + ) for orig_disj, discrete_problem_disj in zip( - util_block.disjunct_list, - discrete_problem_util_block.disjunct_list): + util_block.disjunct_list, discrete_problem_util_block.disjunct_list + ): if orig_disj in active_disjunct_set: used_disjuncts.add(orig_disj) discrete_problem_disj.indicator_var.fix(True) @@ -89,14 +95,18 @@ def init_custom_disjuncts(util_block, discrete_problem_util_block, 'initialization set number %s were unused: ' '%s\nThey may not be Disjunct objects or ' 'they may not be on the active subtree being ' - 'solved.' % (count, ", ".join([disj.name for disj in unused]))) + 'solved.' % (count, ", ".join([disj.name for disj in unused])) + ) with preserve_discrete_problem_feasible_region( - discrete_problem_util_block, config, original_bounds): + discrete_problem_util_block, config, original_bounds + ): mip_termination = solve_MILP_discrete_problem( - discrete_problem_util_block, solver, config) + discrete_problem_util_block, solver, config + ) if mip_termination is not tc.infeasible: solver._fix_discrete_soln_solve_subproblem_and_add_cuts( - discrete_problem_util_block, subprob_util_block, config) + discrete_problem_util_block, subprob_util_block, config + ) # remove the integer solution add_no_good_cut(discrete_problem_util_block, config) else: @@ -104,22 +114,25 @@ def init_custom_disjuncts(util_block, discrete_problem_util_block, 'MILP relaxation infeasible for user-specified ' 'custom initialization disjunct set %s. ' 'Skipping that set and continuing on.' - % list(disj.name for disj in active_disjunct_set)) + % list(disj.name for disj in active_disjunct_set) + ) solver.initialization_iteration += 1 -def init_fixed_disjuncts(util_block, discrete_problem_util_block, - subprob_util_block, config, solver): + +def init_fixed_disjuncts( + util_block, discrete_problem_util_block, subprob_util_block, config, solver +): """Initialize by solving the problem with the current disjunct values.""" config.logger.info( "Generating initial linear GDP approximation by " - "solving subproblem with original user-specified disjunct values.") + "solving subproblem with original user-specified disjunct values." + ) solver._log_header(config.logger) # Again, if we presolve, we are going to tighten the bounds after fixing the # indicator_vars, so it won't be valid afterwards and we need to restore it. - with preserve_discrete_problem_feasible_region(discrete_problem_util_block, - config): + with preserve_discrete_problem_feasible_region(discrete_problem_util_block, config): # fix the disjuncts in the discrete problem and send for solution. already_fixed = set() for disj in discrete_problem_util_block.disjunct_list: @@ -134,7 +147,8 @@ def init_fixed_disjuncts(util_block, discrete_problem_util_block, # anything. So first we solve the discrete problem in case we need # values for other discrete variables, and to make sure it's feasible. mip_termination = solve_MILP_discrete_problem( - discrete_problem_util_block, solver, config) + discrete_problem_util_block, solver, config + ) # restore the fixed status of the indicator_variables for disj in discrete_problem_util_block.disjunct_list: @@ -143,30 +157,36 @@ def init_fixed_disjuncts(util_block, discrete_problem_util_block, if mip_termination is not tc.infeasible: solver._fix_discrete_soln_solve_subproblem_and_add_cuts( - discrete_problem_util_block, subprob_util_block, config) + discrete_problem_util_block, subprob_util_block, config + ) add_no_good_cut(discrete_problem_util_block, config) else: config.logger.error( 'MILP relaxation infeasible for initial user-specified ' 'disjunct values. ' - 'Skipping initialization.') + 'Skipping initialization.' + ) solver.initialization_iteration += 1 + @contextmanager -def use_discrete_problem_for_max_binary_initialization( - discrete_problem_util_block): +def use_discrete_problem_for_max_binary_initialization(discrete_problem_util_block): m = discrete_problem_util_block.parent_block() # Set up binary maximization objective - original_objective = next(m.component_data_objects(Objective, active=True, - descend_into=True)) + original_objective = next( + m.component_data_objects(Objective, active=True, descend_into=True) + ) original_objective.deactivate() - binary_vars = (v for v in m.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v.is_binary() and not v.fixed) + binary_vars = ( + v + for v in m.component_data_objects(ctype=Var, descend_into=(Block, Disjunct)) + if v.is_binary() and not v.fixed + ) discrete_problem_util_block.max_binary_obj = Objective( - expr=sum(binary_vars), sense=maximize) + expr=sum(binary_vars), sense=maximize + ) yield @@ -175,8 +195,10 @@ def use_discrete_problem_for_max_binary_initialization( del discrete_problem_util_block.max_binary_obj original_objective.activate() -def init_max_binaries(util_block, discrete_problem_util_block, - subprob_util_block, config, solver): + +def init_max_binaries( + util_block, discrete_problem_util_block, subprob_util_block, config, solver +): """Initialize by maximizing binary variables and disjuncts. This function activates as many binary variables and disjuncts as @@ -186,38 +208,44 @@ def init_max_binaries(util_block, discrete_problem_util_block, config.logger.info( "Generating initial linear GDP approximation by " "solving a subproblem that maximizes " - "the sum of all binary and logical variables.") + "the sum of all binary and logical variables." + ) solver._log_header(config.logger) # As with set covering, this is only a change of objective. The formulation # may be tightened, but that is valid for the duration. with use_discrete_problem_for_max_binary_initialization( - discrete_problem_util_block): + discrete_problem_util_block + ): mip_termination = solve_MILP_discrete_problem( - discrete_problem_util_block, solver, config) + discrete_problem_util_block, solver, config + ) if mip_termination is not tc.infeasible: solver._fix_discrete_soln_solve_subproblem_and_add_cuts( - discrete_problem_util_block, subprob_util_block, config) + discrete_problem_util_block, subprob_util_block, config + ) else: config.logger.debug( "MILP relaxation for initialization was infeasible. " - "Problem is infeasible.") + "Problem is infeasible." + ) solver._update_dual_bound_to_infeasible() return False add_no_good_cut(discrete_problem_util_block, config) solver.initialization_iteration += 1 + @contextmanager def use_discrete_problem_for_set_covering(discrete_problem_util_block): m = discrete_problem_util_block.parent_block() - original_objective = next(m.component_data_objects(Objective, active=True, - descend_into=True)) + original_objective = next( + m.component_data_objects(Objective, active=True, descend_into=True) + ) original_objective.deactivate() # placeholder for the objective - discrete_problem_util_block.set_cover_obj = Objective(expr=0, - sense=maximize) + discrete_problem_util_block.set_cover_obj = Objective(expr=0, sense=maximize) yield @@ -226,23 +254,26 @@ def use_discrete_problem_for_set_covering(discrete_problem_util_block): del discrete_problem_util_block.set_cover_obj original_objective.activate() -def update_set_covering_objective(discrete_problem_util_block, - disj_needs_cover): + +def update_set_covering_objective(discrete_problem_util_block, disj_needs_cover): # number of disjuncts that still need to be covered num_needs_cover = sum(1 for disj_bool in disj_needs_cover if disj_bool) # number of disjuncts that have been covered num_covered = len(disj_needs_cover) - num_needs_cover # weights for the set covering problem - weights = list((num_covered + 1 if disj_bool else 1) - for disj_bool in disj_needs_cover) + weights = list( + (num_covered + 1 if disj_bool else 1) for disj_bool in disj_needs_cover + ) # Update set covering objective discrete_problem_util_block.set_cover_obj.expr = sum( weight * disj.binary_indicator_var - for (weight, disj) in zip(weights, - discrete_problem_util_block.disjunct_list)) + for (weight, disj) in zip(weights, discrete_problem_util_block.disjunct_list) + ) + -def init_set_covering(util_block, discrete_problem_util_block, - subprob_util_block, config, solver): +def init_set_covering( + util_block, discrete_problem_util_block, subprob_util_block, config, solver +): """Initialize by solving problems to cover the set of all disjuncts. The purpose of this initialization is to generate linearizations @@ -258,10 +289,14 @@ def init_set_covering(util_block, discrete_problem_util_block, # List of True/False if the corresponding disjunct in # disjunct_list still needs to be covered by the initialization disjunct_needs_cover = list( - any(constr.body.polynomial_degree() not in (0, 1) + any( + constr.body.polynomial_degree() not in (0, 1) for constr in disj.component_data_objects( - ctype=Constraint, active=True, descend_into=True)) - for disj in util_block.disjunct_list) + ctype=Constraint, active=True, descend_into=True + ) + ) + for disj in util_block.disjunct_list + ) subprob = subprob_util_block.parent_block() # We borrow the discrete problem to be the set covering MIP. This is only a @@ -271,28 +306,31 @@ def init_set_covering(util_block, discrete_problem_util_block, # feasibility-based tightening will remain valid for the duration. with use_discrete_problem_for_set_covering(discrete_problem_util_block): iter_count = 1 - while (any(disjunct_needs_cover) and - iter_count <= config.set_cover_iterlim): + while any(disjunct_needs_cover) and iter_count <= config.set_cover_iterlim: config.logger.debug( - "%s disjuncts need to be covered." % - disjunct_needs_cover.count(True) + "%s disjuncts need to be covered." % disjunct_needs_cover.count(True) ) ## Solve set covering MIP - update_set_covering_objective(discrete_problem_util_block, - disjunct_needs_cover) + update_set_covering_objective( + discrete_problem_util_block, disjunct_needs_cover + ) mip_termination = solve_MILP_discrete_problem( - discrete_problem_util_block, solver, config) + discrete_problem_util_block, solver, config + ) if mip_termination is tc.infeasible: - config.logger.debug('Set covering problem is infeasible. ' - 'Problem may have no more feasible ' - 'disjunctive realizations.') + config.logger.debug( + 'Set covering problem is infeasible. ' + 'Problem may have no more feasible ' + 'disjunctive realizations.' + ) if iter_count <= 1: config.logger.warning( 'Set covering problem is infeasible. ' 'Check your linear and logical constraints ' - 'for contradictions.') + 'for contradictions.' + ) solver._update_dual_bound_to_infeasible() # problem is infeasible. break return False @@ -300,21 +338,23 @@ def init_set_covering(util_block, discrete_problem_util_block, config.logger.debug('Solved set covering MIP') ## solve local NLP - nlp_feasible = solver.\ - _fix_discrete_soln_solve_subproblem_and_add_cuts( - discrete_problem_util_block, subprob_util_block, - config) + nlp_feasible = solver._fix_discrete_soln_solve_subproblem_and_add_cuts( + discrete_problem_util_block, subprob_util_block, config + ) if nlp_feasible: # if successful, update sets active_disjuncts = list( - fabs(value(disj.binary_indicator_var) - 1) <= - config.integer_tolerance for disj in - discrete_problem_util_block.disjunct_list) + fabs(value(disj.binary_indicator_var) - 1) + <= config.integer_tolerance + for disj in discrete_problem_util_block.disjunct_list + ) # Update the disjunct needs cover list disjunct_needs_cover = list( - (needed_cover and not was_active) for - (needed_cover, was_active) in - zip(disjunct_needs_cover, active_disjuncts)) + (needed_cover and not was_active) + for (needed_cover, was_active) in zip( + disjunct_needs_cover, active_disjuncts + ) + ) add_no_good_cut(discrete_problem_util_block, config) iter_count += 1 solver.initialization_iteration += 1 @@ -324,17 +364,19 @@ def init_set_covering(util_block, discrete_problem_util_block, # disjuncts config.logger.warning( 'Iteration limit reached for set covering initialization ' - 'without covering all disjuncts.') + 'without covering all disjuncts.' + ) return False config.logger.info("Initialization complete.") return True + # Valid initialization strategies valid_init_strategies = { 'no_init': _DoNothing, 'set_covering': init_set_covering, 'max_binary': init_max_binaries, 'fix_disjuncts': init_fixed_disjuncts, - 'custom_disjuncts': init_custom_disjuncts + 'custom_disjuncts': init_custom_disjuncts, } diff --git a/pyomo/contrib/gdpopt/enumerate.py b/pyomo/contrib/gdpopt/enumerate.py new file mode 100644 index 00000000000..45ecc8864f9 --- /dev/null +++ b/pyomo/contrib/gdpopt/enumerate.py @@ -0,0 +1,186 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from itertools import product + +from pyomo.common.collections import ComponentSet +from pyomo.common.config import document_kwargs_from_configdict + +from pyomo.contrib.gdpopt.algorithm_base_class import _GDPoptAlgorithm +from pyomo.contrib.gdpopt.config_options import ( + _add_mip_solver_configs, + _add_nlp_solve_configs, + _add_nlp_solver_configs, +) +from pyomo.contrib.gdpopt.nlp_initialization import ( + restore_vars_to_original_values_enumerate, +) +from pyomo.contrib.gdpopt.create_oa_subproblems import ( + add_discrete_variable_list, + add_disjunction_list, + get_subproblem, +) +from pyomo.contrib.gdpopt.solve_subproblem import solve_subproblem +from pyomo.contrib.gdpopt.util import ( + fix_discrete_solution_in_subproblem, + time_code, + get_main_elapsed_time, +) + +from pyomo.core import value +from pyomo.opt import TerminationCondition as tc +from pyomo.opt.base import SolverFactory + + +@SolverFactory.register( + 'gdpopt.enumerate', + doc="Generalized Disjunctive Programming (GDP) solver that enumerates " + "all discrete solutions", +) +class GDP_Enumeration_Solver(_GDPoptAlgorithm): + """ + Solves Generalized Disjunctive Programming (GDP) by enumerating all + discrete solutions and solving the resulting NLP subproblems, then + returning the best solution found. + + Accepts models that can include nonlinear, continuous variables and + constraints, as well as logical conditions. For non-convex problems, + the algorithm will not be exact unless the NLP subproblems are solved + globally. + """ + + CONFIG = _GDPoptAlgorithm.CONFIG() + _add_nlp_solver_configs(CONFIG, default_solver='ipopt') + _add_nlp_solve_configs( + CONFIG, default_nlp_init_method=restore_vars_to_original_values_enumerate + ) + # If we don't enumerate over integer values, we might have MILP subproblems + _add_mip_solver_configs(CONFIG) + + algorithm = 'enumerate' + + # Override solve() to customize the docstring for this solver + @document_kwargs_from_configdict(CONFIG, doc=_GDPoptAlgorithm.solve.__doc__) + def solve(self, model, **kwds): + return super().solve(model, **kwds) + + def _discrete_solution_iterator( + self, disjunctions, non_indicator_boolean_vars, discrete_var_list, config + ): + discrete_var_values = [range(v.lb, v.ub + 1) for v in discrete_var_list] + # we will calculate all the possible indicator_var realizations, and + # then multiply those out by all the boolean var realizations and all + # the integer var realizations. + for true_indicators in product( + *[disjunction.disjuncts for disjunction in disjunctions] + ): + if not config.force_subproblem_nlp: + yield (ComponentSet(true_indicators), (), ()) + else: + for boolean_realization in product( + [True, False], repeat=len(non_indicator_boolean_vars) + ): + for integer_realization in product(*discrete_var_values): + yield ( + ComponentSet(true_indicators), + boolean_realization, + integer_realization, + ) + + # Override logging so that we print progress in terms of the number of + # iterations needed to fully enumerate the discrete space. + def _log_current_state(self, logger, subproblem_type, primal_improved=False): + star = "*" if primal_improved else "" + logger.info( + self.log_formatter.format( + "{}/{}".format(self.iteration, self.num_discrete_solns), + subproblem_type, + self.LB, + self.UB, + self.relative_gap(), + get_main_elapsed_time(self.timing), + star, + ) + ) + + def _solve_gdp(self, original_model, config): + logger = config.logger + + util_block = self.original_util_block + # From preprocessing to make sure this *is* a GDP, we already have + # lists of: + # * Disjuncts + # * BooleanVars + # * Algebraic vars + # But we need to gather the Disjunctions and integer vars as well: + add_disjunction_list(util_block) + add_discrete_variable_list(util_block) + + subproblem, subproblem_util_block = get_subproblem(original_model, util_block) + + discrete_solns = list( + self._discrete_solution_iterator( + subproblem_util_block.disjunction_list, + subproblem_util_block.non_indicator_boolean_variable_list, + subproblem_util_block.discrete_variable_list, + config, + ) + ) + self.num_discrete_solns = len(discrete_solns) + for soln in discrete_solns: + # We will interrupt based on time limit or iteration limit: + if self.reached_time_limit(config) or self.reached_iteration_limit(config): + break + self.iteration += 1 + + with time_code(self.timing, 'nlp'): + with fix_discrete_solution_in_subproblem( + *soln, subproblem_util_block, config, self + ): + nlp_termination = solve_subproblem( + subproblem_util_block, self, config + ) + if nlp_termination in {tc.optimal, tc.feasible}: + primal_improved = self._update_bounds_after_solve( + 'subproblem', + primal=value(subproblem_util_block.obj.expr), + logger=config.logger, + ) + if primal_improved: + self.update_incumbent(subproblem_util_block) + + elif nlp_termination == tc.unbounded: + # the whole problem is unbounded, we can stop + self._update_primal_bound_to_unbounded(config) + self._log_current_state(config.logger, 'subproblem', True) + break + + else: + # Just log where we are + self._log_current_state(config.logger, 'subproblem') + + if self.iteration == self.num_discrete_solns: + # We can terminate optimally or declare infeasibility: We have + # enumerated all solutions, so our incumbent is optimal (or + # locally optimal, depending on how we solved the subproblems) + # if it exists, and if not then there is no solution. + if self.incumbent_boolean_soln is None: + self._update_dual_bound_to_infeasible() + self._load_infeasible_termination_status(config) + else: # the incumbent is optimal + self._update_bounds(dual=self.primal_bound(), force_update=True) + self._log_current_state(config.logger, '') + config.logger.info( + 'GDPopt exiting--all discrete solutions have been ' + 'enumerated.' + ) + self.pyomo_results.solver.termination_condition = tc.optimal + break diff --git a/pyomo/contrib/gdpopt/gloa.py b/pyomo/contrib/gdpopt/gloa.py index 32fe792fac2..ba8ed2fe234 100644 --- a/pyomo/contrib/gdpopt/gloa.py +++ b/pyomo/contrib/gdpopt/gloa.py @@ -9,35 +9,42 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.config import add_docstring_list +from pyomo.common.config import document_kwargs_from_configdict from pyomo.common.errors import DeveloperError from pyomo.common.modeling import unique_component_name from pyomo.contrib.gdp_bounds.info import disjunctive_bounds from pyomo.contrib.gdpopt.algorithm_base_class import _GDPoptAlgorithm from pyomo.contrib.gdpopt.config_options import ( - _add_oa_configs, _add_mip_solver_configs, _add_nlp_solver_configs, - _add_tolerance_configs) + _add_oa_configs, + _add_mip_solver_configs, + _add_nlp_solver_configs, + _add_tolerance_configs, +) from pyomo.contrib.gdpopt.create_oa_subproblems import ( - _get_discrete_problem_and_subproblem, add_constraints_by_disjunct, - add_global_constraint_list) + _get_discrete_problem_and_subproblem, + add_constraints_by_disjunct, + add_global_constraint_list, +) from pyomo.contrib.gdpopt.cut_generation import add_no_good_cut from pyomo.contrib.gdpopt.oa_algorithm_utils import _OAAlgorithmMixIn -from pyomo.contrib.gdpopt.solve_discrete_problem import ( - solve_MILP_discrete_problem) +from pyomo.contrib.gdpopt.solve_discrete_problem import solve_MILP_discrete_problem from pyomo.contrib.gdpopt.util import ( - _add_bigm_constraint_to_transformed_model, time_code) + _add_bigm_constraint_to_transformed_model, + time_code, +) from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error -from pyomo.core import ( - Constraint, Block, NonNegativeIntegers, Objective, value) +from pyomo.core import Constraint, Block, NonNegativeIntegers, Objective, value from pyomo.core.expr.numvalue import is_potentially_variable from pyomo.core.expr.visitor import identify_variables from pyomo.opt.base import SolverFactory + @SolverFactory.register( 'gdpopt.gloa', doc="The GLOA (global logic-based outer approximation) Generalized " - "Disjunctive Programming (GDP) solver") + "Disjunctive Programming (GDP) solver", +) class GDP_GLOA_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): """The GDPopt (Generalized Disjunctive Programming optimizer) global logic-based outer approximation (GLOA) solver. @@ -45,6 +52,7 @@ class GDP_GLOA_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): Accepts models that can include nonlinear, continuous variables and constraints, as well as logical conditions. """ + CONFIG = _GDPoptAlgorithm.CONFIG() _add_oa_configs(CONFIG) _add_mip_solver_configs(CONFIG) @@ -53,14 +61,22 @@ class GDP_GLOA_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): algorithm = 'GLOA' + # Override solve() to customize the docstring for this solver + @document_kwargs_from_configdict(CONFIG, doc=_GDPoptAlgorithm.solve.__doc__) + def solve(self, model, **kwds): + return super().solve(model, **kwds) + def _log_citation(self, config): - config.logger.info("\n" + """- GLOA algorithm: + config.logger.info( + "\n" + + """- GLOA algorithm: Lee, S; Grossmann, IE. A Global Optimization Algorithm for Nonconvex Generalized Disjunctive Programming and Applications to Process Systems. Comp. and Chem. Eng. 2001, 25, 1675-1697. DOI: 10.1016/S0098-1354(01)00732-3. - """.strip()) + """.strip() + ) def _solve_gdp(self, original_model, config): logger = config.logger @@ -73,13 +89,15 @@ def _solve_gdp(self, original_model, config): # constraints will be added by the transformation to a MIP, so these are # all we'll ever need. add_global_constraint_list(self.original_util_block) - (discrete_problem_util_block, - subproblem_util_block) = _get_discrete_problem_and_subproblem(self, - config) + ( + discrete_problem_util_block, + subproblem_util_block, + ) = _get_discrete_problem_and_subproblem(self, config) discrete = discrete_problem_util_block.parent_block() subproblem = subproblem_util_block.parent_block() - discrete_obj = next(discrete.component_data_objects( - Objective, active=True, descend_into=True)) + discrete_obj = next( + discrete.component_data_objects(Objective, active=True, descend_into=True) + ) self._log_header(logger) @@ -90,16 +108,19 @@ def _solve_gdp(self, original_model, config): # solve linear discrete problem with time_code(self.timing, 'mip'): mip_feasible = solve_MILP_discrete_problem( - discrete_problem_util_block, self, config) + discrete_problem_util_block, self, config + ) self._update_bounds_after_discrete_problem_solve( - mip_feasible, discrete_obj, logger) + mip_feasible, discrete_obj, logger + ) # Check termination conditions if self.any_termination_criterion_met(config): break with time_code(self.timing, 'nlp'): self._fix_discrete_soln_solve_subproblem_and_add_cuts( - discrete_problem_util_block, subproblem_util_block, config) + discrete_problem_util_block, subproblem_util_block, config + ) # Add integer cut with time_code(self.timing, "integer cut generation"): @@ -109,28 +130,34 @@ def _solve_gdp(self, original_model, config): if self.any_termination_criterion_met(config): break - def _add_cuts_to_discrete_problem(self, subproblem_util_block, - discrete_problem_util_block, - objective_sense, config, timing): + def _add_cuts_to_discrete_problem( + self, + subproblem_util_block, + discrete_problem_util_block, + objective_sense, + config, + timing, + ): """Add affine cuts""" m = discrete_problem_util_block.parent_block() if hasattr(discrete_problem_util_block, "aff_utils_blocks"): aff_utils_blocks = discrete_problem_util_block.aff_utils_blocks else: - aff_utils_blocks = discrete_problem_util_block.aff_utils_blocks = \ - dict() + aff_utils_blocks = discrete_problem_util_block.aff_utils_blocks = dict() config.logger.debug("Adding affine cuts.") counter = 0 for discrete_var, subprob_var in zip( - discrete_problem_util_block.algebraic_variable_list, - subproblem_util_block.algebraic_variable_list): + discrete_problem_util_block.algebraic_variable_list, + subproblem_util_block.algebraic_variable_list, + ): val = subprob_var.value if val is not None and not discrete_var.fixed: discrete_var.set_value(val, skip_validation=True) for constr in self._get_active_untransformed_constraints( - discrete_problem_util_block, config): + discrete_problem_util_block, config + ): disjunctive_var_bounds = disjunctive_bounds(constr.parent_block()) if constr.body.polynomial_degree() in (1, 0): @@ -144,17 +171,25 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, try: mc_eqn = mc(constr.body, disjunctive_var_bounds) except MCPP_Error as e: - config.logger.debug("Skipping constraint %s due to MCPP " - "error %s" % (constr.name, str(e))) + config.logger.debug( + "Skipping constraint %s due to MCPP " + "error %s" % (constr.name, str(e)) + ) continue # skip to the next constraint ccSlope = mc_eqn.subcc() cvSlope = mc_eqn.subcv() ccStart = mc_eqn.concave() cvStart = mc_eqn.convex() - ub_int = min(value(constr.upper), - mc_eqn.upper()) if constr.has_ub() else mc_eqn.upper() - lb_int = max(value(constr.lower), - mc_eqn.lower()) if constr.has_lb() else mc_eqn.lower() + ub_int = ( + min(value(constr.upper), mc_eqn.upper()) + if constr.has_ub() + else mc_eqn.upper() + ) + lb_int = ( + max(value(constr.lower), mc_eqn.lower()) + if constr.has_lb() + else mc_eqn.lower() + ) parent_block = constr.parent_block() # Create a block on which to put outer approximation cuts. @@ -166,31 +201,31 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, aff_utils_blocks[parent_block] = aff_utils aff_utils.GDPopt_aff_cons = Constraint(NonNegativeIntegers) aff_cuts = aff_utils.GDPopt_aff_cons - cut_body = sum(ccSlope[var] * (var - var.value) for var in - vars_in_constr if not var.fixed) + cut_body = sum( + ccSlope[var] * (var - var.value) + for var in vars_in_constr + if not var.fixed + ) if not is_potentially_variable(cut_body): - if (cut_body + ccStart >= lb_int - config.constraint_tolerance - and cut_body + cvStart <= ub_int + - config.constraint_tolerance): + if ( + cut_body + ccStart >= lb_int - config.constraint_tolerance + and cut_body + cvStart <= ub_int + config.constraint_tolerance + ): # We won't add them, but nothing is wrong--they hold config.logger.debug("Affine cut is trivially True.") else: # something went wrong. - raise DeveloperError("One of the affine cuts is trivially " - "False.") + raise DeveloperError("One of the affine cuts is trivially False.") else: concave_cut = cut_body + ccStart >= lb_int convex_cut = cut_body + cvStart <= ub_int idx = len(aff_cuts) aff_cuts[idx] = concave_cut - aff_cuts[idx+1] = convex_cut - _add_bigm_constraint_to_transformed_model(m, aff_cuts[idx], - aff_cuts) - _add_bigm_constraint_to_transformed_model(m, aff_cuts[idx+1], - aff_cuts) + aff_cuts[idx + 1] = convex_cut + _add_bigm_constraint_to_transformed_model(m, aff_cuts[idx], aff_cuts) + _add_bigm_constraint_to_transformed_model( + m, aff_cuts[idx + 1], aff_cuts + ) counter += 2 config.logger.debug("Added %s affine cuts" % counter) - -GDP_GLOA_Solver.solve.__doc__ = add_docstring_list( - GDP_GLOA_Solver.solve.__doc__, GDP_GLOA_Solver.CONFIG, indent_by=8) diff --git a/pyomo/contrib/gdpopt/loa.py b/pyomo/contrib/gdpopt/loa.py index 1d118dc645a..6a9889065bf 100644 --- a/pyomo/contrib/gdpopt/loa.py +++ b/pyomo/contrib/gdpopt/loa.py @@ -13,24 +13,38 @@ from math import copysign from pyomo.common.collections import ComponentMap -from pyomo.common.config import add_docstring_list +from pyomo.common.config import document_kwargs_from_configdict from pyomo.common.modeling import unique_component_name from pyomo.contrib.gdpopt.algorithm_base_class import _GDPoptAlgorithm from pyomo.contrib.gdpopt.config_options import ( - _add_oa_configs, _add_mip_solver_configs, _add_nlp_solver_configs, - _add_tolerance_configs) + _add_oa_configs, + _add_mip_solver_configs, + _add_nlp_solver_configs, + _add_tolerance_configs, +) from pyomo.contrib.gdpopt.create_oa_subproblems import ( - _get_discrete_problem_and_subproblem, add_constraint_list) + _get_discrete_problem_and_subproblem, + add_constraint_list, +) from pyomo.contrib.gdpopt.cut_generation import add_no_good_cut from pyomo.contrib.gdpopt.oa_algorithm_utils import _OAAlgorithmMixIn -from pyomo.contrib.gdpopt.solve_discrete_problem import ( - solve_MILP_discrete_problem) +from pyomo.contrib.gdpopt.solve_discrete_problem import solve_MILP_discrete_problem from pyomo.contrib.gdpopt.util import ( - time_code, _add_bigm_constraint_to_transformed_model) + time_code, + _add_bigm_constraint_to_transformed_model, +) from pyomo.core import ( - Block, Constraint, minimize, NonNegativeIntegers, NonNegativeReals, - Objective, value, Var, VarList) + Block, + Constraint, + minimize, + NonNegativeIntegers, + NonNegativeReals, + Objective, + value, + Var, + VarList, +) from pyomo.core.expr import differentiate from pyomo.core.expr.visitor import identify_variables from pyomo.gdp import Disjunct @@ -40,10 +54,12 @@ MAX_SYMBOLIC_DERIV_SIZE = 1000 JacInfo = namedtuple('JacInfo', ['mode', 'vars', 'jac']) + @SolverFactory.register( 'gdpopt.loa', doc="The LOA (logic-based outer approximation) Generalized Disjunctive " - "Programming (GDP) solver") + "Programming (GDP) solver", +) class GDP_LOA_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): """The GDPopt (Generalized Disjunctive Programming optimizer) logic-based outer approximation (LOA) solver. @@ -52,6 +68,7 @@ class GDP_LOA_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): constraints, as well as logical conditions. For nonconvex problems, LOA may not report rigorous dual bounds. """ + CONFIG = _GDPoptAlgorithm.CONFIG() _add_oa_configs(CONFIG) _add_mip_solver_configs(CONFIG) @@ -60,13 +77,21 @@ class GDP_LOA_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): algorithm = 'LOA' + # Override solve() to customize the docstring for this solver + @document_kwargs_from_configdict(CONFIG, doc=_GDPoptAlgorithm.solve.__doc__) + def solve(self, model, **kwds): + return super().solve(model, **kwds) + def _log_citation(self, config): - config.logger.info("\n" + """- LOA algorithm: + config.logger.info( + "\n" + + """- LOA algorithm: Türkay, M; Grossmann, IE. Logic-based MINLP algorithms for the optimal synthesis of process networks. Comp. and Chem. Eng. 1996, 20(8), 959–978. DOI: 10.1016/0098-1354(95)00219-7. - """.strip()) + """.strip() + ) def _solve_gdp(self, original_model, config): logger = config.logger @@ -74,15 +99,17 @@ def _solve_gdp(self, original_model, config): # We'll need these to get dual info after solving subproblems add_constraint_list(self.original_util_block) - (discrete_problem_util_block, - subproblem_util_block) = _get_discrete_problem_and_subproblem(self, - config) + ( + discrete_problem_util_block, + subproblem_util_block, + ) = _get_discrete_problem_and_subproblem(self, config) discrete = discrete_problem_util_block.parent_block() subproblem = subproblem_util_block.parent_block() original_obj = self._setup_augmented_penalty_objective( - discrete_problem_util_block) + discrete_problem_util_block + ) self._log_header(logger) @@ -93,12 +120,14 @@ def _solve_gdp(self, original_model, config): # solve linear discrete problem with time_code(self.timing, 'mip'): oa_obj = self._update_augmented_penalty_objective( - discrete_problem_util_block, original_obj, - config.OA_penalty_factor) + discrete_problem_util_block, original_obj, config.OA_penalty_factor + ) mip_feasible = solve_MILP_discrete_problem( - discrete_problem_util_block, self, config) + discrete_problem_util_block, self, config + ) self._update_bounds_after_discrete_problem_solve( - mip_feasible, oa_obj, logger) + mip_feasible, oa_obj, logger + ) # Check termination conditions if self.any_termination_criterion_met(config): @@ -106,7 +135,8 @@ def _solve_gdp(self, original_model, config): with time_code(self.timing, 'nlp'): self._fix_discrete_soln_solve_subproblem_and_add_cuts( - discrete_problem_util_block, subproblem_util_block, config) + discrete_problem_util_block, subproblem_util_block, config + ) # Add integer cut with time_code(self.timing, "integer cut generation"): @@ -118,8 +148,7 @@ def _solve_gdp(self, original_model, config): def _setup_augmented_penalty_objective(self, discrete_problem_util_block): m = discrete_problem_util_block.parent_block() - discrete_objective = next(m.component_data_objects(Objective, - active=True)) + discrete_objective = next(m.component_data_objects(Objective, active=True)) # Set up augmented penalty objective discrete_objective.deactivate() @@ -128,24 +157,36 @@ def _setup_augmented_penalty_objective(self, discrete_problem_util_block): return discrete_objective - def _update_augmented_penalty_objective(self, discrete_problem_util_block, - discrete_objective, - OA_penalty_factor): + def _update_augmented_penalty_objective( + self, discrete_problem_util_block, discrete_objective, OA_penalty_factor + ): m = discrete_problem_util_block.parent_block() sign_adjust = 1 if discrete_objective.sense == minimize else -1 - OA_penalty_expr = sign_adjust * OA_penalty_factor * \ - sum(v for v in m.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v.parent_component().local_name == - 'GDPopt_OA_slacks') - discrete_problem_util_block.oa_obj.expr = discrete_objective.expr + \ - OA_penalty_expr + OA_penalty_expr = ( + sign_adjust + * OA_penalty_factor + * sum( + v + for v in m.component_data_objects( + ctype=Var, descend_into=(Block, Disjunct) + ) + if v.parent_component().local_name == 'GDPopt_OA_slacks' + ) + ) + discrete_problem_util_block.oa_obj.expr = ( + discrete_objective.expr + OA_penalty_expr + ) return discrete_problem_util_block.oa_obj.expr - def _add_cuts_to_discrete_problem(self, subproblem_util_block, - discrete_problem_util_block, - objective_sense, config, timing): + def _add_cuts_to_discrete_problem( + self, + subproblem_util_block, + discrete_problem_util_block, + objective_sense, + config, + timing, + ): """Add outer approximation cuts to the linear GDP model.""" m = discrete_problem_util_block.parent_block() nlp = subproblem_util_block.parent_block() @@ -161,8 +202,9 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, oa_cut_blocks = discrete_problem_util_block.oa_cut_blocks = dict() for discrete_var, subprob_var in zip( - discrete_problem_util_block.algebraic_variable_list, - subproblem_util_block.algebraic_variable_list): + discrete_problem_util_block.algebraic_variable_list, + subproblem_util_block.algebraic_variable_list, + ): val = subprob_var.value if val is not None and not discrete_var.fixed: discrete_var.set_value(val, skip_validation=True) @@ -173,11 +215,11 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, if not hasattr(discrete_problem_util_block, 'jacobians'): discrete_problem_util_block.jacobians = ComponentMap() for constr, subprob_constr in zip( - discrete_problem_util_block.constraint_list, - subproblem_util_block.constraint_list): + discrete_problem_util_block.constraint_list, + subproblem_util_block.constraint_list, + ): dual_value = nlp.dual.get(subprob_constr, None) - if (dual_value is None or - generate_standard_repn(constr.body).is_linear()): + if dual_value is None or generate_standard_repn(constr.body).is_linear(): continue # Determine if the user pre-specified that OA cuts should not be @@ -185,29 +227,32 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, parent_block = constr.parent_block() ignore_set = getattr(parent_block, 'GDPopt_ignore_OA', None) config.logger.debug('Ignore_set %s' % ignore_set) - if (ignore_set and (constr in ignore_set or - constr.parent_component() in ignore_set)): + if ignore_set and ( + constr in ignore_set or constr.parent_component() in ignore_set + ): config.logger.debug( 'OA cut addition for %s skipped because it is in ' - 'the ignore set.' % constr.name) + 'the ignore set.' % constr.name + ) continue - config.logger.debug("Adding OA cut for %s with dual value %s" % - (constr.name, dual_value)) + config.logger.debug( + "Adding OA cut for %s with dual value %s" % (constr.name, dual_value) + ) # Cache jacobian jacobian = discrete_problem_util_block.jacobians.get(constr, None) if jacobian is None: - constr_vars = list(identify_variables(constr.body, - include_fixed=False)) + constr_vars = list(identify_variables(constr.body, include_fixed=False)) if len(constr_vars) >= MAX_SYMBOLIC_DERIV_SIZE: mode = differentiate.Modes.reverse_numeric else: mode = differentiate.Modes.sympy try: - jac_list = differentiate(constr.body, wrt_list=constr_vars, - mode=mode) + jac_list = differentiate( + constr.body, wrt_list=constr_vars, mode=mode + ) jac_map = ComponentMap(zip(constr_vars, jac_list)) except: if mode is differentiate.Modes.reverse_numeric: @@ -218,8 +263,9 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, discrete_problem_util_block.jacobians[constr] = jacobian # Recompute numeric derivatives if not jacobian.jac: - jac_list = differentiate(constr.body, wrt_list=jacobian.vars, - mode=jacobian.mode) + jac_list = differentiate( + constr.body, wrt_list=jacobian.vars, mode=jacobian.mode + ) jacobian.jac.update(zip(jacobian.vars, jac_list)) # Create a block on which to put outer approximation cuts, if we @@ -227,47 +273,55 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, oa_utils = oa_cut_blocks.get(parent_block) if oa_utils is None: nm = unique_component_name(parent_block, 'GDPopt_OA_cuts') - oa_utils = Block(doc="Block holding outer approximation cuts " - "and associated data.") + oa_utils = Block( + doc="Block holding outer approximation cuts and associated data." + ) parent_block.add_component(nm, oa_utils) oa_cut_blocks[parent_block] = oa_utils oa_utils.cuts = Constraint(NonNegativeIntegers) discrete_prob_oa_utils = discrete_problem_util_block.component( - 'GDPopt_OA_slacks') + 'GDPopt_OA_slacks' + ) if discrete_prob_oa_utils is None: - discrete_prob_oa_utils = discrete_problem_util_block.\ - GDPopt_OA_slacks = Block( - doc="Block holding outer approximation " - "slacks for the whole model (so that the " - "writers can find them).") + discrete_prob_oa_utils = ( + discrete_problem_util_block.GDPopt_OA_slacks + ) = Block( + doc="Block holding outer approximation " + "slacks for the whole model (so that the " + "writers can find them)." + ) discrete_prob_oa_utils.slacks = VarList( - bounds=(0, config.max_slack), domain=NonNegativeReals, - initialize=0) + bounds=(0, config.max_slack), domain=NonNegativeReals, initialize=0 + ) oa_cuts = oa_utils.cuts slack_var = discrete_prob_oa_utils.slacks.add() - rhs = value(constr.lower) if constr.has_lb() else value( - constr.upper) + rhs = value(constr.lower) if constr.has_lb() else value(constr.upper) try: new_oa_cut = ( - copysign(1, sign_adjust * dual_value) * ( - value(constr.body) - rhs + sum( + copysign(1, sign_adjust * dual_value) + * ( + value(constr.body) + - rhs + + sum( value(jac) * (var - value(var)) - for var, jac in jacobian.jac.items()) - ) - slack_var <= 0) + for var, jac in jacobian.jac.items() + ) + ) + - slack_var + <= 0 + ) assert new_oa_cut.polynomial_degree() in (1, 0) idx = len(oa_cuts) oa_cuts[idx] = new_oa_cut - _add_bigm_constraint_to_transformed_model(m, oa_cuts[idx], - oa_cuts) + _add_bigm_constraint_to_transformed_model(m, oa_cuts[idx], oa_cuts) config.logger.debug("Cut expression: %s" % new_oa_cut) counter += 1 except ZeroDivisionError: config.logger.warning( - "Zero division occured attempting to generate OA cut for " + "Zero division occurred attempting to generate OA cut for " "constraint %s.\n" - "Skipping OA cut generation for this constraint." - % (constr.name,) + "Skipping OA cut generation for this constraint." % (constr.name,) ) # Simply continue on to the next constraint. # Clear out the numeric Jacobian values @@ -275,6 +329,3 @@ def _add_cuts_to_discrete_problem(self, subproblem_util_block, jacobian.jac.clear() config.logger.debug('Added %s OA cuts' % counter) - -GDP_LOA_Solver.solve.__doc__ = add_docstring_list( - GDP_LOA_Solver.solve.__doc__, GDP_LOA_Solver.CONFIG, indent_by=8) diff --git a/pyomo/contrib/gdpopt/nlp_initialization.py b/pyomo/contrib/gdpopt/nlp_initialization.py index e64f940c657..fc083c095da 100644 --- a/pyomo/contrib/gdpopt/nlp_initialization.py +++ b/pyomo/contrib/gdpopt/nlp_initialization.py @@ -13,15 +13,8 @@ which case you can write your own, and specify it in the 'subproblem_initialization_method' argument.""" -# This is the original GDPopt behavior: -def restore_vars_to_original_values(solver, nlp_util_block, mip_util_block): - """Perform initialization of the subproblem. - This just restores the continuous variables to the original - model values, which were saved on the subproblem's utility block when it - was created. - """ - # restore original continuous variable values +def _restore_vars_from_nlp_block_saved_values(nlp_util_block): for var, old_value in nlp_util_block.initial_var_values.items(): if not var.fixed and var.is_continuous(): if old_value is not None: @@ -32,3 +25,29 @@ def restore_vars_to_original_values(solver, nlp_util_block, mip_util_block): old_value = var.ub # Set the value var.set_value(old_value) + + +# This is the original GDPopt behavior: +def restore_vars_to_original_values(solver, nlp_util_block, mip_util_block): + """Perform initialization of the subproblem. + + This just restores the continuous variables to the original + model values, which were saved on the subproblem's utility block when it + was created. + """ + # restore original continuous variable values + _restore_vars_from_nlp_block_saved_values(nlp_util_block) + + +# This is the default for the enumerate algorithm +def restore_vars_to_original_values_enumerate( + true_disjuncts, boolean_var_values, discrete_var_values, nlp_util_block +): + """Perform initialization of the subproblem. + + This just restores the continuous variables to the original + model values, which were saved on the subproblem's utility block when it + was created. + """ + # restore original continuous variable values + _restore_vars_from_nlp_block_saved_values(nlp_util_block) diff --git a/pyomo/contrib/gdpopt/oa_algorithm_utils.py b/pyomo/contrib/gdpopt/oa_algorithm_utils.py index bd94caaab61..9aba59e4527 100644 --- a/pyomo/contrib/gdpopt/oa_algorithm_utils.py +++ b/pyomo/contrib/gdpopt/oa_algorithm_utils.py @@ -11,32 +11,37 @@ from math import fabs from pyomo.contrib.gdpopt.solve_subproblem import solve_subproblem -from pyomo.contrib.gdpopt.util import ( - fix_discrete_problem_solution_in_subproblem) +from pyomo.contrib.gdpopt.util import fix_discrete_problem_solution_in_subproblem from pyomo.core import value from pyomo.opt import TerminationCondition as tc + class _OAAlgorithmMixIn(object): def _fix_discrete_soln_solve_subproblem_and_add_cuts( - self, discrete_prob_util_block, subprob_util_block, config): + self, discrete_prob_util_block, subprob_util_block, config + ): with fix_discrete_problem_solution_in_subproblem( - discrete_prob_util_block, subprob_util_block, self, config, - config.force_subproblem_nlp): - nlp_termination = solve_subproblem(subprob_util_block, self, - config) + discrete_prob_util_block, subprob_util_block, self, config + ): + nlp_termination = solve_subproblem(subprob_util_block, self, config) if nlp_termination in {tc.optimal, tc.feasible}: primal_improved = self._update_bounds_after_solve( - 'subproblem', primal=value(subprob_util_block.obj.expr), - logger=config.logger) + 'subproblem', + primal=value(subprob_util_block.obj.expr), + logger=config.logger, + ) if primal_improved: self.update_incumbent(subprob_util_block) - self._add_cuts_to_discrete_problem(subprob_util_block, - discrete_prob_util_block, - self.objective_sense, - config, self.timing) + self._add_cuts_to_discrete_problem( + subprob_util_block, + discrete_prob_util_block, + self.objective_sense, + config, + self.timing, + ) elif nlp_termination == tc.unbounded: # the whole problem is unbounded, we can stop - self._update_primal_bound_to_unbounded() + self._update_primal_bound_to_unbounded(config) return nlp_termination not in {tc.infeasible, tc.unbounded} @@ -56,7 +61,6 @@ def _get_active_untransformed_constraints(self, util_block, config): # get all the disjuncts in the original model. Check which ones are # True. for disj, constr_list in util_block.constraints_by_disjunct.items(): - if fabs(disj.binary_indicator_var.value - 1) \ - <= config.integer_tolerance: + if fabs(disj.binary_indicator_var.value - 1) <= config.integer_tolerance: for constr in constr_list: yield constr diff --git a/pyomo/contrib/gdpopt/plugins.py b/pyomo/contrib/gdpopt/plugins.py index c20a8794039..3ebad88a626 100644 --- a/pyomo/contrib/gdpopt/plugins.py +++ b/pyomo/contrib/gdpopt/plugins.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.contrib.gdpopt.GDPopt import pyomo.contrib.gdpopt.gloa diff --git a/pyomo/contrib/gdpopt/ric.py b/pyomo/contrib/gdpopt/ric.py index 787981d07f6..f3eb83b79a9 100644 --- a/pyomo/contrib/gdpopt/ric.py +++ b/pyomo/contrib/gdpopt/ric.py @@ -9,17 +9,20 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.config import add_docstring_list +from pyomo.common.config import document_kwargs_from_configdict from pyomo.contrib.gdpopt.algorithm_base_class import _GDPoptAlgorithm from pyomo.contrib.gdpopt.config_options import ( - _add_mip_solver_configs, _add_nlp_solver_configs, _add_tolerance_configs, - _add_oa_configs) + _add_mip_solver_configs, + _add_nlp_solver_configs, + _add_tolerance_configs, + _add_oa_configs, +) from pyomo.contrib.gdpopt.create_oa_subproblems import ( - _get_discrete_problem_and_subproblem) + _get_discrete_problem_and_subproblem, +) from pyomo.contrib.gdpopt.oa_algorithm_utils import _OAAlgorithmMixIn from pyomo.contrib.gdpopt.cut_generation import add_no_good_cut -from pyomo.contrib.gdpopt.solve_discrete_problem import ( - solve_MILP_discrete_problem) +from pyomo.contrib.gdpopt.solve_discrete_problem import solve_MILP_discrete_problem from pyomo.contrib.gdpopt.util import time_code from pyomo.core import Objective from pyomo.opt.base import SolverFactory @@ -28,10 +31,12 @@ # should get the integer solutions several-at-a-time with a solution pool or # something of the like... + @SolverFactory.register( 'gdpopt.ric', doc="The RIC (relaxation with integer cuts) Generalized Disjunctive " - "Programming (GDP) solver") + "Programming (GDP) solver", +) class GDP_RIC_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): """The GDPopt (Generalized Disjunctive Programming optimizer) relaxation with integer cuts (RIC) solver. @@ -40,8 +45,6 @@ class GDP_RIC_Solver(_GDPoptAlgorithm, _OAAlgorithmMixIn): constraints, as well as logical conditions. For non-convex problems, RIC will not be exact unless the NLP subproblems are solved globally. """ - def _log_citation(self, config): - pass CONFIG = _GDPoptAlgorithm.CONFIG() _add_mip_solver_configs(CONFIG) @@ -51,16 +54,25 @@ def _log_citation(self, config): algorithm = 'RIC' + # Override solve() to customize the docstring for this solver + @document_kwargs_from_configdict(CONFIG, doc=_GDPoptAlgorithm.solve.__doc__) + def solve(self, model, **kwds): + return super().solve(model, **kwds) + def _solve_gdp(self, original_model, config): logger = config.logger - (discrete_problem_util_block, - subproblem_util_block) = _get_discrete_problem_and_subproblem(self, - config) + ( + discrete_problem_util_block, + subproblem_util_block, + ) = _get_discrete_problem_and_subproblem(self, config) discrete_problem = discrete_problem_util_block.parent_block() subproblem = subproblem_util_block.parent_block() - discrete_problem_obj = next(discrete_problem.component_data_objects( - Objective, active=True, descend_into=True)) + discrete_problem_obj = next( + discrete_problem.component_data_objects( + Objective, active=True, descend_into=True + ) + ) self._log_header(logger) @@ -71,9 +83,11 @@ def _solve_gdp(self, original_model, config): # solve linear discrete problem with time_code(self.timing, 'mip'): mip_feasible = solve_MILP_discrete_problem( - discrete_problem_util_block, self, config) + discrete_problem_util_block, self, config + ) self._update_bounds_after_discrete_problem_solve( - mip_feasible, discrete_problem_obj, logger) + mip_feasible, discrete_problem_obj, logger + ) # Check termination conditions if self.any_termination_criterion_met(config): @@ -81,7 +95,8 @@ def _solve_gdp(self, original_model, config): with time_code(self.timing, 'nlp'): self._fix_discrete_soln_solve_subproblem_and_add_cuts( - discrete_problem_util_block, subproblem_util_block, config) + discrete_problem_util_block, subproblem_util_block, config + ) # Add integer cut with time_code(self.timing, "integer cut generation"): @@ -91,11 +106,13 @@ def _solve_gdp(self, original_model, config): if self.any_termination_criterion_met(config): break - def _add_cuts_to_discrete_problem(self, subproblem_util_block, - discrete_problem_util_block, - objective_sense, config, timing): + def _add_cuts_to_discrete_problem( + self, + subproblem_util_block, + discrete_problem_util_block, + objective_sense, + config, + timing, + ): # Nothing to do here pass - -GDP_RIC_Solver.solve.__doc__ = add_docstring_list( - GDP_RIC_Solver.solve.__doc__, GDP_RIC_Solver.CONFIG, indent_by=8) diff --git a/pyomo/contrib/gdpopt/solve_discrete_problem.py b/pyomo/contrib/gdpopt/solve_discrete_problem.py index 697379518be..3de66fbaca0 100644 --- a/pyomo/contrib/gdpopt/solve_discrete_problem.py +++ b/pyomo/contrib/gdpopt/solve_discrete_problem.py @@ -14,13 +14,17 @@ from pyomo.common.deprecation import deprecation_warning from pyomo.common.errors import InfeasibleConstraintException from pyomo.contrib.fbbt.fbbt import fbbt -from pyomo.contrib.gdpopt.util import (SuppressInfeasibleWarning, _DoNothing, - get_main_elapsed_time) +from pyomo.contrib.gdpopt.util import ( + SuppressInfeasibleWarning, + _DoNothing, + get_main_elapsed_time, +) from pyomo.core import Objective, Constraint from pyomo.opt import SolutionStatus, SolverFactory from pyomo.opt import TerminationCondition as tc from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver + def solve_MILP_discrete_problem(util_block, solver, config): """Solves the linear GDP model and attempts to resolve solution issues. Returns one of TerminationCondition.optimal, TerminationCondition.feasible, @@ -37,14 +41,19 @@ def solve_MILP_discrete_problem(util_block, solver, config): # implicitly fixed by these bounds. We can fix this by calling some # contrib.preprocessing transformations, but for now I'm just # leaving the constraints in. - fbbt(m, integer_tol=config.integer_tolerance, - deactivate_satisfied_constraints=False) + fbbt( + m, + integer_tol=config.integer_tolerance, + deactivate_satisfied_constraints=False, + ) # [ESJ 1/28/22]: Despite being a little scary, the tightened bounds # are okay to leave in because if you tighten the bounds now, they # could only get tighter in later iterations, since you are # tightening this relaxation - except InfeasibleConstraintException: - config.logger.debug("MIP preprocessing detected infeasibility.") + except InfeasibleConstraintException as e: + config.logger.debug( + "MIP preprocessing detected infeasibility:\n\t%s" % str(e) + ) return tc.infeasible # Deactivate extraneous IMPORT/EXPORT suffixes @@ -53,8 +62,7 @@ def solve_MILP_discrete_problem(util_block, solver, config): # Create solver, check availability if not SolverFactory(config.mip_solver).available(): - raise RuntimeError( - "MIP solver %s is not available." % config.mip_solver) + raise RuntimeError("MIP solver %s is not available." % config.mip_solver) # Callback immediately before solving MIP discrete problem config.call_before_discrete_problem_solve(solver, m, util_block) @@ -62,7 +70,9 @@ def solve_MILP_discrete_problem(util_block, solver, config): deprecation_warning( "The 'call_before_master_solve' argument is deprecated. " "Please use the 'call_before_discrete_problem_solve' option " - "to specify the callback.", version="6.4.2") + "to specify the callback.", + version="6.4.2", + ) with SuppressInfeasibleWarning(): mip_args = dict(config.mip_solver_args) @@ -73,9 +83,9 @@ def solve_MILP_discrete_problem(util_block, solver, config): mip_args['add_options'] = mip_args.get('add_options', []) mip_args['add_options'].append('option reslim=%s;' % remaining) elif config.mip_solver == 'multisolve': - mip_args['time_limit'] = min(mip_args.get('time_limit', - float('inf')), - remaining) + mip_args['time_limit'] = min( + mip_args.get('time_limit', float('inf')), remaining + ) results = SolverFactory(config.mip_solver).solve(m, **mip_args) config.call_after_discrete_problem_solve(solver, m, util_block) @@ -83,15 +93,16 @@ def solve_MILP_discrete_problem(util_block, solver, config): deprecation_warning( "The 'call_after_master_solve' argument is deprecated. " "Please use the 'call_after_discrete_problem_solve' option to " - "specify the callback.", version="6.4.2") + "specify the callback.", + version="6.4.2", + ) terminate_cond = results.solver.termination_condition if terminate_cond is tc.infeasibleOrUnbounded: # Linear solvers will sometimes tell me that it's infeasible or # unbounded during presolve, but fails to distinguish. We need to # resolve with a solver option flag on. - results, terminate_cond = distinguish_mip_infeasible_or_unbounded( - m, config) + results, terminate_cond = distinguish_mip_infeasible_or_unbounded(m, config) if terminate_cond is tc.unbounded: # Solution is unbounded. This occurs when the objective is # nonlinear. The nonlinear objective is moved to the constraints, and @@ -99,33 +110,40 @@ def solve_MILP_discrete_problem(util_block, solver, config): # arbitrary discrete solution by bounding the objective and re-solving, # in hopes that the cuts we generate later bound this problem. - obj_bound = 1E15 + obj_bound = 1e15 config.logger.warning( 'Discrete problem was unbounded. ' 'Re-solving with arbitrary bound values of (-{0:.10g}, {0:.10g}) ' 'on the objective, in order to get a discrete solution. ' - 'Check your initialization routine.'.format(obj_bound)) - discrete_objective = next(m.component_data_objects(Objective, - active=True)) + 'Check your initialization routine.'.format(obj_bound) + ) + discrete_objective = next(m.component_data_objects(Objective, active=True)) util_block.objective_bound = Constraint( - expr=(-obj_bound, discrete_objective.expr, obj_bound)) + expr=(-obj_bound, discrete_objective.expr, obj_bound) + ) with SuppressInfeasibleWarning(): results = SolverFactory(config.mip_solver).solve( - m, **config.mip_solver_args) + m, **config.mip_solver_args + ) # get rid of the made-up constraint del util_block.objective_bound - if results.solver.termination_condition in {tc.optimal, tc.feasible, - tc.locallyOptimal, - tc.globallyOptimal}: + if results.solver.termination_condition in { + tc.optimal, + tc.feasible, + tc.locallyOptimal, + tc.globallyOptimal, + }: # we found a solution, that's all we need to keep going. return tc.unbounded else: - raise RuntimeError("Unable to find a feasible solution for the " - "unbounded MILP discrete problem by bounding " - "the objective. Either check your " - "discrete problem initialization, or add a " - "bound on the discrete problem objective value " - "that admits a feasible solution.") + raise RuntimeError( + "Unable to find a feasible solution for the " + "unbounded MILP discrete problem by bounding " + "the objective. Either check your " + "discrete problem initialization, or add a " + "bound on the discrete problem objective value " + "that admits a feasible solution." + ) if terminate_cond is tc.optimal: return tc.optimal @@ -134,34 +152,41 @@ def solve_MILP_discrete_problem(util_block, solver, config): elif terminate_cond is tc.infeasible: config.logger.info( 'MILP discrete problem is now infeasible. GDPopt has explored or ' - 'cut off all feasible discrete configurations.') + 'cut off all feasible discrete configurations.' + ) return tc.infeasible elif terminate_cond is tc.maxTimeLimit: if len(results.solution) > 0: config.logger.info( 'Unable to optimize MILP discrete problem within time limit. ' - 'Using current solver feasible solution.') + 'Using current solver feasible solution.' + ) return tc.feasible else: config.logger.info( 'Unable to optimize MILP discrete problem within time limit. ' 'No solution found. Treating as infeasible, but there are no ' - 'guarantees.') + 'guarantees.' + ) return tc.infeasible - elif (terminate_cond is tc.other and - results.solution.status is SolutionStatus.feasible): + elif ( + terminate_cond is tc.other + and results.solution.status is SolutionStatus.feasible + ): # load the solution and suppress the warning message by setting # solver status to ok. config.logger.info( 'MIP solver reported feasible solution to MILP discrete problem, ' - 'but it is not guaranteed to be optimal.') + 'but it is not guaranteed to be optimal.' + ) return tc.feasible else: raise ValueError( 'GDPopt unable to handle MILP discrete problem ' 'termination condition ' - 'of %s. Solver message: %s' % - (terminate_cond, results.solver.message)) + 'of %s. Solver message: %s' % (terminate_cond, results.solver.message) + ) + def distinguish_mip_infeasible_or_unbounded(m, config): """Distinguish between an infeasible or unbounded solution. diff --git a/pyomo/contrib/gdpopt/solve_subproblem.py b/pyomo/contrib/gdpopt/solve_subproblem.py index cbfce90fe69..bd9b85c0cef 100644 --- a/pyomo/contrib/gdpopt/solve_subproblem.py +++ b/pyomo/contrib/gdpopt/solve_subproblem.py @@ -15,19 +15,24 @@ from pyomo.contrib import appsi from pyomo.contrib.appsi.cmodel import cmodel_available from pyomo.contrib.fbbt.fbbt import fbbt -from pyomo.contrib.gdpopt.util import (SuppressInfeasibleWarning, - is_feasible, get_main_elapsed_time) +from pyomo.contrib.gdpopt.solve_discrete_problem import ( + distinguish_mip_infeasible_or_unbounded, +) +from pyomo.contrib.gdpopt.util import ( + SuppressInfeasibleWarning, + is_feasible, + get_main_elapsed_time, +) from pyomo.core import Constraint, TransformationFactory, Objective, Block -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.opt import SolverFactory, SolverResults from pyomo.opt import TerminationCondition as tc -def configure_and_call_solver(model, solver, args, problem_type, timing, - time_limit): + +def configure_and_call_solver(model, solver, args, problem_type, timing, time_limit): opt = SolverFactory(solver) if not opt.available(): - raise RuntimeError("%s solver %s is not available." % (problem_type, - solver)) + raise RuntimeError("%s solver %s is not available." % (problem_type, solver)) with SuppressInfeasibleWarning(): solver_args = dict(args) if time_limit is not None: @@ -35,17 +40,15 @@ def configure_and_call_solver(model, solver, args, problem_type, timing, remaining = max(time_limit - elapsed, 1) if solver == 'gams': solver_args['add_options'] = solver_args.get('add_options', []) - solver_args['add_options'].append('option reslim=%s;' % - remaining) + solver_args['add_options'].append('option reslim=%s;' % remaining) elif solver == 'multisolve': - solver_args['time_limit'] = min(solver_args.get('time_limit', - float('inf')), - remaining) + solver_args['time_limit'] = min( + solver_args.get('time_limit', float('inf')), remaining + ) try: results = opt.solve(model, **solver_args) except ValueError as err: - if 'Cannot load a SolverResults object with bad status: error' in \ - str(err): + if 'Cannot load a SolverResults object with bad status: error' in str(err): results = SolverResults() results.solver.termination_condition = tc.error results.solver.message = str(err) @@ -53,6 +56,7 @@ def configure_and_call_solver(model, solver, args, problem_type, timing, raise return results + def process_nonlinear_problem_results(results, model, problem_type, config): """Processes the results object returned from the nonlinear solver. Returns one of TerminationCondition.optimal (for locally optimal or @@ -63,8 +67,10 @@ def process_nonlinear_problem_results(results, model, problem_type, config): """ logger = config.logger term_cond = results.solver.termination_condition - if any(term_cond == cond for cond in (tc.optimal, tc.locallyOptimal, - tc.globallyOptimal)): + if any( + term_cond == cond + for cond in (tc.optimal, tc.locallyOptimal, tc.globallyOptimal) + ): # Since we let people use local solvers and settle for the heuristic, we # just let all these by. return tc.optimal @@ -74,69 +80,92 @@ def process_nonlinear_problem_results(results, model, problem_type, config): logger.debug('%s subproblem was infeasible.' % problem_type) return tc.infeasible elif term_cond == tc.maxIterations: - logger.debug('%s subproblem failed to converge within iteration limit.' - % problem_type) + logger.debug( + '%s subproblem failed to converge within iteration limit.' % problem_type + ) if is_feasible(model, config): logger.debug( 'NLP solution is still feasible. ' - 'Using potentially suboptimal feasible solution.') + 'Using potentially suboptimal feasible solution.' + ) return tc.feasible return False elif term_cond == tc.internalSolverError: # Possible that IPOPT had a restoration failure - logger.debug("%s solver had an internal failure: %s" % - (problem_type, results.solver.message)) + logger.debug( + "%s solver had an internal failure: %s" + % (problem_type, results.solver.message) + ) return tc.noSolution - elif (term_cond == tc.other and - "Too few degrees of freedom" in str(results.solver.message)): + elif term_cond == tc.other and "Too few degrees of freedom" in str( + results.solver.message + ): # Possible IPOPT degrees of freedom error logger.debug( - "Perhaps the subproblem solver has too few degrees of freedom: %s" % - results.solver.message) + "Perhaps the subproblem solver has too few degrees of freedom: %s" + % results.solver.message + ) return tc.infeasible elif term_cond == tc.other: logger.debug( - "%s solver had a termination condition of 'other': %s" % - (problem_type, results.solver.message)) + "%s solver had a termination condition of 'other': %s" + % (problem_type, results.solver.message) + ) return tc.noSolution elif term_cond == tc.error: - logger.debug("%s solver had a termination condition of 'error': " - "%s" % (problem_type, results.solver.message)) + logger.debug( + "%s solver had a termination condition of 'error': " + "%s" % (problem_type, results.solver.message) + ) return tc.noSolution elif term_cond == tc.maxTimeLimit: - logger.debug("%s subproblem failed to converge within time " - "limit." % problem_type) + logger.debug( + "%s subproblem failed to converge within time limit." % problem_type + ) if is_feasible(model, config): config.logger.debug( '%s solution is still feasible. ' - 'Using potentially suboptimal feasible solution.' % - problem_type) + 'Using potentially suboptimal feasible solution.' % problem_type + ) return tc.feasible return tc.noSolution elif term_cond == tc.intermediateNonInteger: - config.logger.debug("%s solver could not find feasible integer" - " solution: %s" % (problem_type, - results.solver.message)) + config.logger.debug( + "%s solver could not find feasible integer" + " solution: %s" % (problem_type, results.solver.message) + ) return tc.noSolution elif term_cond == tc.unbounded: - config.logger.debug("The NLP subproblem is unbounded, meaning that " - "the GDP is unbounded.") + config.logger.debug( + "The NLP subproblem is unbounded, meaning that the GDP is unbounded." + ) return tc.unbounded else: # This isn't the user's fault, but we give up--we don't know what's # going on. raise DeveloperError( 'GDPopt unable to handle %s subproblem termination ' - 'condition of %s. Results: %s' % (problem_type, term_cond, results)) + 'condition of %s. Results: %s' % (problem_type, term_cond, results) + ) + def solve_linear_subproblem(subproblem, config, timing): - results = configure_and_call_solver(subproblem, config.mip_solver, - config.mip_solver_args, 'MIP', timing, - config.time_limit) + results = configure_and_call_solver( + subproblem, + config.mip_solver, + config.mip_solver_args, + 'MIP', + timing, + config.time_limit, + ) subprob_terminate_cond = results.solver.termination_condition if subprob_terminate_cond is tc.optimal: return tc.optimal - elif subprob_terminate_cond is tc.infeasible: + elif subprob_terminate_cond is tc.infeasibleOrUnbounded: + (results, subprob_terminate_cond) = distinguish_mip_infeasible_or_unbounded( + subproblem, config + ) + if subprob_terminate_cond is tc.infeasible: config.logger.debug('MILP subproblem was infeasible.') return tc.infeasible elif subprob_terminate_cond is tc.unbounded: @@ -145,56 +174,72 @@ def solve_linear_subproblem(subproblem, config, timing): else: raise ValueError( 'GDPopt unable to handle MIP subproblem termination ' - 'condition of %s. Results: %s' - % (subprob_terminate_cond, results)) + 'condition of %s. Results: %s' % (subprob_terminate_cond, results) + ) + def solve_NLP(nlp_model, config, timing): """Solve the NLP subproblem.""" config.logger.debug( - 'Solving nonlinear subproblem for ' - 'fixed binaries and logical realizations.') + 'Solving nonlinear subproblem for fixed binaries and logical realizations.' + ) - results = configure_and_call_solver(nlp_model, config.nlp_solver, - config.nlp_solver_args, 'NLP', timing, - config.time_limit) + results = configure_and_call_solver( + nlp_model, + config.nlp_solver, + config.nlp_solver_args, + 'NLP', + timing, + config.time_limit, + ) return process_nonlinear_problem_results(results, nlp_model, 'NLP', config) + def solve_MINLP(util_block, config, timing): """Solve the MINLP subproblem.""" - config.logger.debug( - "Solving MINLP subproblem for fixed logical realizations." - ) + config.logger.debug("Solving MINLP subproblem for fixed logical realizations.") model = util_block.parent_block() minlp_solver = SolverFactory(config.minlp_solver) if not minlp_solver.available(): - raise RuntimeError("MINLP solver %s is not available." % - config.minlp_solver) - - results = configure_and_call_solver(model, config.minlp_solver, - config.minlp_solver_args, 'MINLP', - timing, config.time_limit) - subprob_termination = process_nonlinear_problem_results(results, model, - 'MINLP', config) + raise RuntimeError("MINLP solver %s is not available." % config.minlp_solver) + + results = configure_and_call_solver( + model, + config.minlp_solver, + config.minlp_solver_args, + 'MINLP', + timing, + config.time_limit, + ) + subprob_termination = process_nonlinear_problem_results( + results, model, 'MINLP', config + ) return subprob_termination + def detect_unfixed_discrete_vars(model): """Detect unfixed discrete variables in use on the model.""" var_set = ComponentSet() for constr in model.component_data_objects( - Constraint, active=True, descend_into=True): + Constraint, active=True, descend_into=True + ): var_set.update( - v for v in EXPR.identify_variables( - constr.body, include_fixed=False) - if not v.is_continuous()) + v + for v in EXPR.identify_variables(constr.body, include_fixed=False) + if not v.is_continuous() + ) for obj in model.component_data_objects(Objective, active=True): - var_set.update(v for v in EXPR.identify_variables(obj.expr, - include_fixed=False) - if not v.is_continuous()) + var_set.update( + v + for v in EXPR.identify_variables(obj.expr, include_fixed=False) + if not v.is_continuous() + ) return var_set + class preprocess_subproblem(object): def __init__(self, util_block, config): self.util_block = util_block @@ -211,8 +256,9 @@ def __enter__(self): m = self.util_block.parent_block() # Save bounds so we can restore them - for cons in m.component_data_objects(Constraint, active=True, - descend_into=Block): + for cons in m.component_data_objects( + Constraint, active=True, descend_into=Block + ): for v in EXPR.identify_variables(cons.expr): if v not in self.original_bounds.keys(): self.original_bounds[v] = (v.lb, v.ub) @@ -224,25 +270,27 @@ def __enter__(self): try: # First do FBBT - if cmodel_available: - # [ESJ 8/16/22] We can do this when #2491 is resolved. For now, - # we'll just use contrib.fbbt. - pass - # # use the appsi fbbt implementation since we can - # it = appsi.fbbt.IntervalTightener() - # it.config.integer_tol = self.config.integer_tolerance - # it.config.feasibility_tol = self.config.constraint_tolerance - # it.config.max_iter = self.config.max_fbbt_iterations - # it.perform_fbbt(m) - else: - fbbt(m, integer_tol=self.config.integer_tolerance, - feasibility_tol=self.config.constraint_tolerance, - max_iter=self.config.max_fbbt_iterations) + # When #2574 is resolved, we can do the below. For now + # we'll use contrib.fbbt + # if cmodel_available: + # # use the appsi fbbt implementation since we can + # it = appsi.fbbt.IntervalTightener() + # it.config.integer_tol = self.config.integer_tolerance + # it.config.feasibility_tol = self.config.constraint_tolerance + # it.config.max_iter = self.config.max_fbbt_iterations + # it.perform_fbbt(m) + fbbt( + m, + integer_tol=self.config.integer_tolerance, + feasibility_tol=self.config.constraint_tolerance, + max_iter=self.config.max_fbbt_iterations, + ) xfrm = TransformationFactory # Now that we've tightened bounds, see if any variables are fixed # because their lb is equal to the ub (within tolerance) xfrm('contrib.detect_fixed_vars').apply_to( - m, tolerance=self.config.variable_tolerance) + m, tolerance=self.config.variable_tolerance + ) # Restore the original bounds because the subproblem solver might # like that better and because, if deactivate_trivial_constraints @@ -255,16 +303,20 @@ def __enter__(self): # Now, if something got fixed to 0, we might have 0*var terms to # remove xfrm('contrib.remove_zero_terms').apply_to( - m, constraints_modified=self.constraints_modified) + m, constraints_modified=self.constraints_modified + ) # Last, check if any constraints are now trivial and deactivate them xfrm('contrib.deactivate_trivial_constraints').apply_to( - m, tolerance=self.config.constraint_tolerance, - return_trivial=self.constraints_deactivated) + m, + tolerance=self.config.constraint_tolerance, + return_trivial=self.constraints_deactivated, + ) except InfeasibleConstraintException as e: self.config.logger.debug( "NLP subproblem determined to be infeasible " - "during preprocessing. Message: %s" % e) + "during preprocessing. Message: %s" % e + ) self.not_infeas = False return self.not_infeas @@ -298,14 +350,17 @@ def __exit__(self, type, value, traceback): for v in self.unfixed_vars: v.unfix() + def call_appropriate_subproblem_solver(subprob_util_block, solver, config): timing = solver.timing subprob = subprob_util_block.parent_block() config.call_before_subproblem_solve(solver, subprob, subprob_util_block) # Is the subproblem linear? - if not any(constr.body.polynomial_degree() not in (1, 0) for constr in - subprob.component_data_objects(Constraint, active=True)): + if not any( + constr.body.polynomial_degree() not in (1, 0) + for constr in subprob.component_data_objects(Constraint, active=True) + ): subprob_termination = solve_linear_subproblem(subprob, config, timing) else: # Does it have any discrete variables, and is that allowed? @@ -314,37 +369,38 @@ def call_appropriate_subproblem_solver(subprob_util_block, solver, config): # this is actually our fault at this point--we should have # enumerated the discrete solutions if it was possible and the user # requested. - raise DeveloperError("Unfixed discrete variables found on the NLP " - "subproblem.") + raise DeveloperError( + "Unfixed discrete variables found on the NLP subproblem." + ) elif len(unfixed_discrete_vars) == 0: subprob_termination = solve_NLP(subprob, config, timing) else: config.logger.debug( "The following discrete variables are unfixed: %s" "\nProceeding by solving the subproblem as a MINLP." - % ", ".join([v.name for v in unfixed_discrete_vars])) - subprob_termination = solve_MINLP(subprob_util_block, config, - timing) + % ", ".join([v.name for v in unfixed_discrete_vars]) + ) + subprob_termination = solve_MINLP(subprob_util_block, config, timing) # Call the NLP post-solve callback config.call_after_subproblem_solve(solver, subprob, subprob_util_block) # if feasible, call the NLP post-feasible callback if subprob_termination in {tc.optimal, tc.feasible}: - config.call_after_subproblem_feasible(solver, subprob, - subprob_util_block) + config.call_after_subproblem_feasible(solver, subprob, subprob_util_block) return subprob_termination + def solve_subproblem(subprob_util_block, solver, config): """Set up and solve the local MINLP or NLP subproblem.""" if config.subproblem_presolve: with preprocess_subproblem(subprob_util_block, config) as call_solver: if call_solver: - return call_appropriate_subproblem_solver(subprob_util_block, - solver, config) + return call_appropriate_subproblem_solver( + subprob_util_block, solver, config + ) else: return tc.infeasible - return call_appropriate_subproblem_solver(subprob_util_block, solver, - config) + return call_appropriate_subproblem_solver(subprob_util_block, solver, config) diff --git a/pyomo/contrib/gdpopt/tests/common_tests.py b/pyomo/contrib/gdpopt/tests/common_tests.py index 17a330777b2..5a363430381 100644 --- a/pyomo/contrib/gdpopt/tests/common_tests.py +++ b/pyomo/contrib/gdpopt/tests/common_tests.py @@ -12,39 +12,31 @@ from math import fabs from pyomo.environ import value + def check_8PP_solution(self, eight_process, results): - self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - self.assertTrue(fabs(value(results.problem.upper_bound) - 68) <= 1E-2) + self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1e-2) + self.assertTrue(fabs(value(results.problem.upper_bound) - 68) <= 1e-2) # Check discrete solution # use 2 - self.assertTrue( - value(eight_process.use_unit_1or2.disjuncts[1].indicator_var)) - self.assertFalse( - value(eight_process.use_unit_1or2.disjuncts[0].indicator_var)) + self.assertTrue(value(eight_process.use_unit_1or2.disjuncts[1].indicator_var)) + self.assertFalse(value(eight_process.use_unit_1or2.disjuncts[0].indicator_var)) # use 4 - self.assertTrue( - value(eight_process.use_unit_4or5ornot.disjuncts[0].indicator_var)) - self.assertFalse( - value(eight_process.use_unit_4or5ornot.disjuncts[1].indicator_var)) - self.assertFalse( - value(eight_process.use_unit_4or5ornot.disjuncts[2].indicator_var)) + self.assertTrue(value(eight_process.use_unit_4or5ornot.disjuncts[0].indicator_var)) + self.assertFalse(value(eight_process.use_unit_4or5ornot.disjuncts[1].indicator_var)) + self.assertFalse(value(eight_process.use_unit_4or5ornot.disjuncts[2].indicator_var)) # use 6 - self.assertTrue( - value(eight_process.use_unit_6or7ornot.disjuncts[0].indicator_var)) - self.assertFalse( - value(eight_process.use_unit_6or7ornot.disjuncts[1].indicator_var)) - self.assertFalse( - value(eight_process.use_unit_6or7ornot.disjuncts[2].indicator_var)) + self.assertTrue(value(eight_process.use_unit_6or7ornot.disjuncts[0].indicator_var)) + self.assertFalse(value(eight_process.use_unit_6or7ornot.disjuncts[1].indicator_var)) + self.assertFalse(value(eight_process.use_unit_6or7ornot.disjuncts[2].indicator_var)) # use 8 - self.assertTrue( - value(eight_process.use_unit_8ornot.disjuncts[0].indicator_var)) - self.assertFalse( - value(eight_process.use_unit_8ornot.disjuncts[1].indicator_var)) + self.assertTrue(value(eight_process.use_unit_8ornot.disjuncts[0].indicator_var)) + self.assertFalse(value(eight_process.use_unit_8ornot.disjuncts[1].indicator_var)) + def check_8PP_logical_solution(self, eight_process, results): - self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) - self.assertTrue(fabs(value(results.problem.upper_bound) - 68) <= 1E-2) + self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1e-2) + self.assertTrue(fabs(value(results.problem.upper_bound) - 68) <= 1e-2) # Check discrete solution # use 2 diff --git a/pyomo/contrib/gdpopt/tests/test_LBB.py b/pyomo/contrib/gdpopt/tests/test_LBB.py index 820fde1bed1..7d25767020e 100644 --- a/pyomo/contrib/gdpopt/tests/test_LBB.py +++ b/pyomo/contrib/gdpopt/tests/test_LBB.py @@ -22,8 +22,7 @@ from pyomo.common.log import LoggingIntercept import pyomo.contrib.gdpopt.tests.common_tests as ct from pyomo.contrib.satsolver.satsolver import z3_available -from pyomo.environ import (SolverFactory, value, ConcreteModel, Var, Objective, - maximize) +from pyomo.environ import SolverFactory, value, ConcreteModel, Var, Objective, maximize from pyomo.gdp import Disjunction from pyomo.opt import TerminationCondition @@ -33,12 +32,14 @@ minlp_solver = 'baron' minlp_args = dict() solver_available = SolverFactory(minlp_solver).available() -license_available = SolverFactory(minlp_solver).license_is_valid() if \ - solver_available else False +license_available = ( + SolverFactory(minlp_solver).license_is_valid() if solver_available else False +) -@unittest.skipUnless(solver_available, - "Required subsolver %s is not available" % (minlp_solver,)) +@unittest.skipUnless( + solver_available, "Required subsolver %s is not available" % (minlp_solver,) +) class TestGDPopt_LBB(unittest.TestCase): """Tests for logic-based branch and bound.""" @@ -46,17 +47,14 @@ def test_infeasible_GDP(self): """Test for infeasible GDP.""" m = ConcreteModel() m.x = Var(bounds=(0, 2)) - m.d = Disjunction(expr=[ - [m.x ** 2 >= 3, m.x >= 3], - [m.x ** 2 <= -1, m.x <= -1]]) + m.d = Disjunction(expr=[[m.x**2 >= 3, m.x >= 3], [m.x**2 <= -1, m.x <= -1]]) m.o = Objective(expr=m.x) result = SolverFactory('gdpopt.lbb').solve( - m, tee=False, - minlp_solver=minlp_solver, - minlp_solver_args=minlp_args, + m, tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args + ) + self.assertEqual( + result.solver.termination_condition, TerminationCondition.infeasible ) - self.assertEqual(result.solver.termination_condition, - TerminationCondition.infeasible) self.assertIsNone(m.x.value) self.assertIsNone(m.d.disjuncts[0].indicator_var.value) self.assertIsNone(m.d.disjuncts[1].indicator_var.value) @@ -65,26 +63,29 @@ def test_infeasible_GDP_check_sat(self): """Test for infeasible GDP with check_sat option True.""" m = ConcreteModel() m.x = Var(bounds=(0, 2)) - m.d = Disjunction(expr=[ - [m.x ** 2 >= 3, m.x >= 3], - [m.x ** 2 <= -1, m.x <= -1]]) + m.d = Disjunction(expr=[[m.x**2 >= 3, m.x >= 3], [m.x**2 <= -1, m.x <= -1]]) m.o = Objective(expr=m.x) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): result = SolverFactory('gdpopt.lbb').solve( - m, tee=False, check_sat=True, + m, + tee=False, + check_sat=True, minlp_solver=minlp_solver, - minlp_solver_args=minlp_args) - self.assertIn("Root node is not satisfiable. Problem is infeasible.", - output.getvalue().strip()) + minlp_solver_args=minlp_args, + ) + self.assertIn( + "Root node is not satisfiable. Problem is infeasible.", + output.getvalue().strip(), + ) - self.assertEqual(result.solver.termination_condition, - TerminationCondition.infeasible) + self.assertEqual( + result.solver.termination_condition, TerminationCondition.infeasible + ) self.assertIsNone(m.x.value) self.assertIsNone(m.d.disjuncts[0].indicator_var.value) self.assertIsNone(m.d.disjuncts[1].indicator_var.value) - # This should work--see issue #2483 # def test_fix_all_but_one_disjunct(self): # m = ConcreteModel() @@ -105,93 +106,88 @@ def test_infeasible_GDP_check_sat(self): # self.assertTrue(value(m.d.disjuncts[0].indicator_var)) # self.assertFalse(value(m.d.disjuncts[1].indicator_var)) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") def test_LBB_8PP(self): """Test the logic-based branch and bound algorithm.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() results = SolverFactory('gdpopt.lbb').solve( - eight_process, tee=False, + eight_process, + tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) ct.check_8PP_solution(self, eight_process, results) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") def test_LBB_8PP_max(self): """Test the logic-based branch and bound algorithm.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() obj = next(eight_process.component_data_objects(Objective, active=True)) obj.sense = maximize obj.set_value(-1 * obj.expr) SolverFactory('gdpopt.lbb').solve( - eight_process, tee=False, + eight_process, + tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) self.assertAlmostEqual(value(eight_process.profit.expr), -68, places=1) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") def test_LBB_strip_pack(self): """Test logic-based branch and bound with strip packing.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() SolverFactory('gdpopt.lbb').solve( - strip_pack, tee=False, + strip_pack, + tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 11) <= 1e-2) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") @unittest.pytest.mark.expensive def test_LBB_constrained_layout(self): """Test LBB with constrained layout.""" - exfile = import_file( - join(exdir, 'constrained_layout', 'cons_layout_model.py')) + exfile = import_file(join(exdir, 'constrained_layout', 'cons_layout_model.py')) cons_layout = exfile.build_constrained_layout_model() SolverFactory('gdpopt.lbb').solve( - cons_layout, tee=False, + cons_layout, + tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) objective_value = value(cons_layout.min_dist_cost.expr) self.assertTrue( fabs(objective_value - 41573) <= 200, - "Objective value of %s instead of 41573" % objective_value) + "Objective value of %s instead of 41573" % objective_value, + ) def test_LBB_ex_633_trespalacios(self): """Test LBB with Francisco thesis example.""" exfile = import_file(join(exdir, 'small_lit', 'ex_633_trespalacios.py')) model = exfile.build_simple_nonconvex_gdp() SolverFactory('gdpopt.lbb').solve( - model, tee=False, - minlp_solver=minlp_solver, - minlp_solver_args=minlp_args, + model, tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args ) objective_value = value(model.obj.expr) self.assertAlmostEqual(objective_value, 4.46, 2) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") - @unittest.skipUnless(SolverFactory('bonmin').available( - exception_flag=False), "Bonmin is not avaialable") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") + @unittest.skipUnless( + SolverFactory('bonmin').available(exception_flag=False), + "Bonmin is not available", + ) def test_LBB_8PP_with_screening(self): """Test the logic-based branch and bound algorithm.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() results = SolverFactory('gdpopt.lbb').solve( - eight_process, tee=False, + eight_process, + tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, solve_local_rnGDP=True, @@ -200,8 +196,10 @@ def test_LBB_8PP_with_screening(self): ) ct.check_8PP_solution(self, eight_process, results) -@unittest.skipUnless(solver_available, - "Required subsolver %s is not available" % (minlp_solver,)) + +@unittest.skipUnless( + solver_available, "Required subsolver %s is not available" % (minlp_solver,) +) @unittest.skipUnless(z3_available, "Z3 SAT solver is not available.") class TestGDPopt_LBB_Z3(unittest.TestCase): """Tests for logic-based branch and bound with Z3 SAT solver integration.""" @@ -210,77 +208,80 @@ def test_infeasible_GDP(self): """Test for infeasible GDP.""" m = ConcreteModel() m.x = Var(bounds=(0, 2)) - m.d = Disjunction(expr=[ - [m.x ** 2 >= 3, m.x >= 3], - [m.x ** 2 <= -1, m.x <= -1]]) + m.d = Disjunction(expr=[[m.x**2 >= 3, m.x >= 3], [m.x**2 <= -1, m.x <= -1]]) m.o = Objective(expr=m.x) result = SolverFactory('gdpopt.lbb').solve( - m, tee=False, - minlp_solver=minlp_solver, - minlp_solver_args=minlp_args, + m, tee=False, minlp_solver=minlp_solver, minlp_solver_args=minlp_args + ) + self.assertEqual( + result.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual( + result.solver.termination_condition, TerminationCondition.infeasible ) - self.assertEqual(result.solver.termination_condition, - TerminationCondition.infeasible) - self.assertEqual(result.solver.termination_condition, - TerminationCondition.infeasible) self.assertIsNone(m.x.value) self.assertIsNone(m.d.disjuncts[0].indicator_var.value) self.assertIsNone(m.d.disjuncts[1].indicator_var.value) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") def test_LBB_8PP(self): """Test the logic-based branch and bound algorithm.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() results = SolverFactory('gdpopt.lbb').solve( - eight_process, tee=False, check_sat=True, + eight_process, + tee=False, + check_sat=True, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) ct.check_8PP_solution(self, eight_process, results) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") def test_LBB_strip_pack(self): """Test logic-based branch and bound with strip packing.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() SolverFactory('gdpopt.lbb').solve( - strip_pack, tee=False, check_sat=True, + strip_pack, + tee=False, + check_sat=True, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 11) <= 1e-2) - @unittest.skipUnless(license_available, - "Problem is too big for unlicensed BARON.") + @unittest.skipUnless(license_available, "Problem is too big for unlicensed BARON.") @unittest.pytest.mark.expensive def test_LBB_constrained_layout(self): """Test LBB with constrained layout.""" - exfile = import_file( - join(exdir, 'constrained_layout', 'cons_layout_model.py')) + exfile = import_file(join(exdir, 'constrained_layout', 'cons_layout_model.py')) cons_layout = exfile.build_constrained_layout_model() SolverFactory('gdpopt.lbb').solve( - cons_layout, tee=False, check_sat=True, + cons_layout, + tee=False, + check_sat=True, minlp_solver=minlp_solver, minlp_solver_args=minlp_args, ) objective_value = value(cons_layout.min_dist_cost.expr) self.assertTrue( fabs(objective_value - 41573) <= 200, - "Objective value of %s instead of 41573" % objective_value) + "Objective value of %s instead of 41573" % objective_value, + ) def test_LBB_ex_633_trespalacios(self): """Test LBB with Francisco thesis example.""" exfile = import_file(join(exdir, 'small_lit', 'ex_633_trespalacios.py')) model = exfile.build_simple_nonconvex_gdp() - SolverFactory('gdpopt').solve(model, algorithm='LBB', tee=False, - check_sat=True, minlp_solver=minlp_solver, - minlp_solver_args=minlp_args, ) + SolverFactory('gdpopt').solve( + model, + algorithm='LBB', + tee=False, + check_sat=True, + minlp_solver=minlp_solver, + minlp_solver_args=minlp_args, + ) objective_value = value(model.obj.expr) self.assertAlmostEqual(objective_value, 4.46, 2) diff --git a/pyomo/contrib/gdpopt/tests/test_enumerate.py b/pyomo/contrib/gdpopt/tests/test_enumerate.py new file mode 100644 index 00000000000..7f52f2f5c09 --- /dev/null +++ b/pyomo/contrib/gdpopt/tests/test_enumerate.py @@ -0,0 +1,192 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.contrib.gdpopt.enumerate import GDP_Enumeration_Solver + +from pyomo.environ import ( + SolverFactory, + Objective, + maximize, + TerminationCondition, + value, + Var, + Integers, + Constraint, + ConcreteModel, +) +from pyomo.gdp import Disjunction +import pyomo.gdp.tests.models as models + + +@unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi not available') +class TestGDPoptEnumerate(unittest.TestCase): + def test_solve_two_term_disjunction(self): + m = models.makeTwoTermDisj() + m.obj = Objective(expr=m.x, sense=maximize) + + results = SolverFactory('gdpopt.enumerate').solve(m) + + self.assertEqual(results.solver.iterations, 2) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(results.problem.lower_bound, 9) + self.assertEqual(results.problem.upper_bound, 9) + + self.assertEqual(value(m.x), 9) + self.assertTrue(value(m.d[0].indicator_var)) + self.assertFalse(value(m.d[1].indicator_var)) + + def modify_two_term_disjunction(self, m): + # Make first disjunct feasible + m.a.setlb(0) + # Discrete variable + m.y = Var(domain=Integers, bounds=(2, 4)) + m.d[1].c3 = Constraint(expr=m.x <= 6) + m.d[0].c2 = Constraint(expr=m.y + m.a - 5 <= 2) + + m.obj = Objective(expr=-m.x - m.y) + + def test_solve_GDP_iterate_over_discrete_variables(self): + m = models.makeTwoTermDisj() + self.modify_two_term_disjunction(m) + + results = SolverFactory('gdpopt.enumerate').solve(m, force_subproblem_nlp=True) + + self.assertEqual(results.solver.iterations, 6) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(results.problem.lower_bound, -11) + self.assertEqual(results.problem.upper_bound, -11) + + self.assertEqual(value(m.x), 9) + self.assertEqual(value(m.y), 2) + self.assertTrue(value(m.d[0].indicator_var)) + self.assertFalse(value(m.d[1].indicator_var)) + + def test_solve_GDP_do_not_iterate_over_discrete_variables(self): + m = models.makeTwoTermDisj() + self.modify_two_term_disjunction(m) + + results = SolverFactory('gdpopt.enumerate').solve(m) + + self.assertEqual(results.solver.iterations, 2) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(results.problem.lower_bound, -11) + self.assertEqual(results.problem.upper_bound, -11) + + self.assertEqual(value(m.x), 9) + self.assertEqual(value(m.y), 2) + self.assertTrue(value(m.d[0].indicator_var)) + self.assertFalse(value(m.d[1].indicator_var)) + + def test_solve_GDP_iterate_over_Boolean_variables(self): + m = models.makeLogicalConstraintsOnDisjuncts() + + results = SolverFactory('gdpopt.enumerate').solve(m, force_subproblem_nlp=True) + + self.assertEqual(results.solver.iterations, 16) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(results.problem.lower_bound, 8) + self.assertEqual(results.problem.upper_bound, 8) + + self.assertTrue(value(m.d[2].indicator_var)) + self.assertTrue(value(m.d[3].indicator_var)) + self.assertFalse(value(m.d[1].indicator_var)) + self.assertFalse(value(m.d[4].indicator_var)) + self.assertEqual(value(m.x), 8) + # We don't know what values they take, but they have to be different + self.assertNotEqual(value(m.Y[1]), value(m.Y[2])) + + def test_solve_GDP_do_not_iterate_over_Boolean_variables(self): + m = models.makeLogicalConstraintsOnDisjuncts() + + results = SolverFactory('gdpopt.enumerate').solve(m) + + self.assertEqual(results.solver.iterations, 4) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(results.problem.lower_bound, 8) + self.assertEqual(results.problem.upper_bound, 8) + + self.assertTrue(value(m.d[2].indicator_var)) + self.assertTrue(value(m.d[3].indicator_var)) + self.assertFalse(value(m.d[1].indicator_var)) + self.assertFalse(value(m.d[4].indicator_var)) + self.assertEqual(value(m.x), 8) + # We don't know what values they take, but they have to be different + self.assertNotEqual(value(m.Y[1]), value(m.Y[2])) + + def test_stop_at_iteration_limit(self): + m = models.makeLogicalConstraintsOnDisjuncts() + + results = SolverFactory('gdpopt.enumerate').solve( + m, iterlim=4, force_subproblem_nlp=True + ) + + self.assertEqual(results.solver.iterations, 4) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxIterations + ) + + @unittest.skipUnless(SolverFactory('ipopt').available(), 'Ipopt not available') + def test_infeasible_GDP(self): + m = models.make_infeasible_gdp_model() + + results = SolverFactory('gdpopt.enumerate').solve(m) + + self.assertEqual(results.solver.iterations, 2) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(results.problem.lower_bound, float('inf')) + + def test_unbounded_GDP(self): + m = ConcreteModel() + m.x = Var(bounds=(-1, 10)) + m.y = Var(bounds=(2, 3)) + m.z = Var() + m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) + m.o = Objective(expr=m.z) + + results = SolverFactory('gdpopt.enumerate').solve(m) + + self.assertEqual(results.solver.iterations, 1) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) + self.assertEqual(results.problem.lower_bound, -float('inf')) + self.assertEqual(results.problem.upper_bound, -float('inf')) + + @unittest.skipUnless(SolverFactory('ipopt').available(), 'Ipopt not available') + def test_algorithm_specified_to_solve(self): + m = models.twoDisj_twoCircles_easy() + + results = SolverFactory('gdpopt').solve(m, algorithm='enumerate', tee=True) + + self.assertEqual(results.solver.iterations, 2) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertAlmostEqual(results.problem.lower_bound, 9) + self.assertAlmostEqual(results.problem.upper_bound, 9) + + self.assertAlmostEqual(value(m.x), 2) + self.assertAlmostEqual(value(m.y), 7) + self.assertTrue(value(m.upper_circle.indicator_var)) + self.assertFalse(value(m.lower_circle.indicator_var)) diff --git a/pyomo/contrib/gdpopt/tests/test_gdpopt.py b/pyomo/contrib/gdpopt/tests/test_gdpopt.py index 6034d093d4a..9fab71307a3 100644 --- a/pyomo/contrib/gdpopt/tests/test_gdpopt.py +++ b/pyomo/contrib/gdpopt/tests/test_gdpopt.py @@ -24,17 +24,33 @@ from pyomo.common.fileutils import import_file, PYOMO_ROOT_DIR from pyomo.contrib.appsi.solvers.gurobi import Gurobi from pyomo.contrib.gdpopt.create_oa_subproblems import ( - add_util_block, add_disjunct_list, add_constraints_by_disjunct, - add_global_constraint_list) + add_util_block, + add_disjunct_list, + add_constraints_by_disjunct, + add_global_constraint_list, +) import pyomo.contrib.gdpopt.tests.common_tests as ct from pyomo.contrib.gdpopt.util import is_feasible, time_code from pyomo.contrib.mcpp.pyomo_mcpp import mcpp_available from pyomo.contrib.gdpopt.solve_discrete_problem import ( - solve_MILP_discrete_problem, distinguish_mip_infeasible_or_unbounded) -from pyomo.core.expr.sympy_tools import sympy_available + solve_MILP_discrete_problem, + distinguish_mip_infeasible_or_unbounded, +) from pyomo.environ import ( - Block, ConcreteModel, Constraint, Integers, LogicalConstraint, maximize, - Objective, RangeSet, TransformationFactory, SolverFactory, sqrt, value, Var) + Block, + ConcreteModel, + Constraint, + Integers, + LogicalConstraint, + maximize, + Objective, + RangeSet, + TransformationFactory, + SolverFactory, + sqrt, + value, + Var, +) from pyomo.gdp import Disjunct, Disjunction from pyomo.gdp.tests import models from pyomo.opt import TerminationCondition @@ -50,14 +66,19 @@ GLOA_solvers = (mip_solver, global_nlp_solver, minlp_solver) LOA_solvers_available = all(SolverFactory(s).available() for s in LOA_solvers) GLOA_solvers_available = all(SolverFactory(s).available() for s in GLOA_solvers) -license_available = SolverFactory(global_nlp_solver).license_is_valid() if \ - GLOA_solvers_available else False +license_available = ( + SolverFactory(global_nlp_solver).license_is_valid() + if GLOA_solvers_available + else False +) + class TestGDPoptUnit(unittest.TestCase): """Real unit tests for GDPopt""" - @unittest.skipUnless(SolverFactory(mip_solver).available(), - "MIP solver not available") + @unittest.skipUnless( + SolverFactory(mip_solver).available(), "MIP solver not available" + ) def test_solve_discrete_problem_unbounded(self): m = ConcreteModel() m.GDPopt_utils = Block() @@ -65,13 +86,11 @@ def test_solve_discrete_problem_unbounded(self): m.y = Var(bounds=(2, 3)) m.z = Var() # Include a disjunction so that we don't default to just a MIP solver - m.d = Disjunction(expr=[ - [m.x + m.y >= 5], [m.x - m.y <= 3] - ]) + m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) m.o = Objective(expr=m.z) m.GDPopt_utils.variable_list = [m.x, m.y, m.z] - m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], - m.d._autodisjuncts[1]] + m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], m.d._autodisjuncts[1]] + TransformationFactory('gdp.bigm').apply_to(m) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): solver = SolverFactory('gdpopt.loa') @@ -79,15 +98,18 @@ def test_solve_discrete_problem_unbounded(self): dummy.timing = Bunch() with time_code(dummy.timing, 'main', is_main_timer=True): tc = solve_MILP_discrete_problem( - m.GDPopt_utils, - dummy, - solver.CONFIG(dict(mip_solver=mip_solver))) - self.assertIn("Discrete problem was unbounded. Re-solving with " - "arbitrary bound values", output.getvalue().strip()) + m.GDPopt_utils, dummy, solver.CONFIG(dict(mip_solver=mip_solver)) + ) + self.assertIn( + "Discrete problem was unbounded. Re-solving with " + "arbitrary bound values", + output.getvalue().strip(), + ) self.assertIs(tc, TerminationCondition.unbounded) - @unittest.skipUnless(SolverFactory(mip_solver).available(), - "MIP solver not available") + @unittest.skipUnless( + SolverFactory(mip_solver).available(), "MIP solver not available" + ) def test_solve_lp(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) @@ -95,10 +117,10 @@ def test_solve_lp(self): m.o = Objective(expr=m.x) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - results = SolverFactory('gdpopt.loa').solve( - m, mip_solver=mip_solver) - self.assertIn("Your model is an LP (linear program).", - output.getvalue().strip()) + results = SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver) + self.assertIn( + "Your model is an LP (linear program).", output.getvalue().strip() + ) self.assertAlmostEqual(value(m.o.expr), 1) self.assertEqual(results.problem.number_of_binary_variables, 0) @@ -107,19 +129,18 @@ def test_solve_lp(self): self.assertAlmostEqual(results.problem.lower_bound, 1) self.assertAlmostEqual(results.problem.upper_bound, 1) - @unittest.skipUnless(SolverFactory('gurobi').available(), - 'Gurobi not available') + @unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi not available') def test_solve_nlp(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) m.c = Constraint(expr=m.x >= 1) - m.o = Objective(expr=m.x ** 2) + m.o = Objective(expr=m.x**2) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - results = SolverFactory('gdpopt.loa').solve( - m, nlp_solver='gurobi') - self.assertIn("Your model is an NLP (nonlinear program).", - output.getvalue().strip()) + results = SolverFactory('gdpopt.loa').solve(m, nlp_solver='gurobi') + self.assertIn( + "Your model is an NLP (nonlinear program).", output.getvalue().strip() + ) self.assertAlmostEqual(value(m.o.expr), 1) self.assertEqual(results.problem.number_of_binary_variables, 0) @@ -128,8 +149,9 @@ def test_solve_nlp(self): self.assertAlmostEqual(results.problem.lower_bound, 1) self.assertAlmostEqual(results.problem.upper_bound, 1) - @unittest.skipUnless(SolverFactory(mip_solver).available(), - "MIP solver not available") + @unittest.skipUnless( + SolverFactory(mip_solver).available(), "MIP solver not available" + ) def test_solve_constant_obj(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) @@ -137,32 +159,38 @@ def test_solve_constant_obj(self): m.o = Objective(expr=1) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - SolverFactory('gdpopt.loa').solve( - m, mip_solver=mip_solver) - self.assertIn("Your model is an LP (linear program).", - output.getvalue().strip()) + SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver) + self.assertIn( + "Your model is an LP (linear program).", output.getvalue().strip() + ) self.assertAlmostEqual(value(m.o.expr), 1) - @unittest.skipUnless(SolverFactory(nlp_solver).available(), - 'NLP solver not available') + @unittest.skipUnless( + SolverFactory(nlp_solver).available(), 'NLP solver not available' + ) def test_no_objective(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) - m.c = Constraint(expr=m.x ** 2 >= 1) + m.c = Constraint(expr=m.x**2 >= 1) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): - SolverFactory('gdpopt.loa').solve( - m, nlp_solver=nlp_solver) - self.assertIn("Model has no active objectives. Adding dummy " - "objective.", output.getvalue().strip()) + SolverFactory('gdpopt.loa').solve(m, nlp_solver=nlp_solver) + self.assertIn( + "Model has no active objectives. Adding dummy objective.", + output.getvalue().strip(), + ) + + # check that the dummy objective is removed after the solve (else + # repeated solves result in the error about multiple active objectives + # on the model) + self.assertIsNone(m.component("dummy_obj")) def test_multiple_objectives(self): m = ConcreteModel() m.x = Var() m.o = Objective(expr=m.x) m.o2 = Objective(expr=m.x + 1) - with self.assertRaisesRegex(ValueError, "Model has multiple active " - "objectives"): + with self.assertRaisesRegex(ValueError, "Model has multiple active objectives"): SolverFactory('gdpopt.loa').solve(m) def test_is_feasible_function(self): @@ -170,38 +198,31 @@ def test_is_feasible_function(self): m.x = Var(bounds=(0, 3), initialize=2) m.c = Constraint(expr=m.x == 2) GDP_LOA_Solver = SolverFactory('gdpopt.loa') - self.assertTrue( - is_feasible(m, GDP_LOA_Solver.CONFIG())) + self.assertTrue(is_feasible(m, GDP_LOA_Solver.CONFIG())) m.c2 = Constraint(expr=m.x <= 1) - self.assertFalse( - is_feasible(m, GDP_LOA_Solver.CONFIG())) + self.assertFalse(is_feasible(m, GDP_LOA_Solver.CONFIG())) m = ConcreteModel() m.x = Var(bounds=(0, 3), initialize=2) m.c = Constraint(expr=m.x >= 5) - self.assertFalse( - is_feasible(m, GDP_LOA_Solver.CONFIG())) + self.assertFalse(is_feasible(m, GDP_LOA_Solver.CONFIG())) m = ConcreteModel() m.x = Var(bounds=(3, 3), initialize=2) - self.assertFalse( - is_feasible(m, GDP_LOA_Solver.CONFIG())) + self.assertFalse(is_feasible(m, GDP_LOA_Solver.CONFIG())) m = ConcreteModel() m.x = Var(bounds=(0, 1), initialize=2) - self.assertFalse( - is_feasible(m, GDP_LOA_Solver.CONFIG())) + self.assertFalse(is_feasible(m, GDP_LOA_Solver.CONFIG())) m = ConcreteModel() m.x = Var(bounds=(0, 1), initialize=2) m.d = Disjunct() - with self.assertRaisesRegex(NotImplementedError, - "Found active disjunct"): + with self.assertRaisesRegex(NotImplementedError, "Found active disjunct"): is_feasible(m, GDP_LOA_Solver.CONFIG()) - @unittest.skipUnless(SolverFactory('gurobi').available(), - 'Gurobi not available') + @unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi not available') def test_infeasible_or_unbounded_mip_termination(self): m = ConcreteModel() m.x = Var() @@ -211,34 +232,45 @@ def test_infeasible_or_unbounded_mip_termination(self): results = SolverFactory('gurobi').solve(m) # Gurobi shrugs: - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasibleOrUnbounded) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.infeasibleOrUnbounded, + ) # just pretend on the config block--we only need these two: config = ConfigDict() config.declare('mip_solver', ConfigValue('gurobi')) config.declare('mip_solver_args', ConfigValue({})) # We tell Gurobi to figure it out - (results, - termination_condition) = distinguish_mip_infeasible_or_unbounded( - m, config) + (results, termination_condition) = distinguish_mip_infeasible_or_unbounded( + m, config + ) # It's infeasible: self.assertEqual(termination_condition, TerminationCondition.infeasible) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) def get_GDP_on_block(self): m = ConcreteModel() m.x = Var(bounds=(-5, 5)) m.y = Var(bounds=(-2, 6)) m.b = Block() - m.b.disjunction = Disjunction(expr=[[m.x + m.y <= 1, m.y >= 0.5], - [m.x == 2, m.y == 4], - [m.x**2 - m.y <= 3]]) - m.disjunction = Disjunction(expr=[[m.x - m.y <= -2, m.y >= -1], - [m.x == 0, m.y >= 0], - [m.y**2 + m.x <= 3]]) + m.b.disjunction = Disjunction( + expr=[ + [m.x + m.y <= 1, m.y >= 0.5], + [m.x == 2, m.y == 4], + [m.x**2 - m.y <= 3], + ] + ) + m.disjunction = Disjunction( + expr=[ + [m.x - m.y <= -2, m.y >= -1], + [m.x == 0, m.y >= 0], + [m.y**2 + m.x <= 3], + ] + ) return m def test_gloa_cut_generation_ignores_deactivated_constraints(self): @@ -265,8 +297,9 @@ def test_gloa_cut_generation_ignores_deactivated_constraints(self): gloa = SolverFactory('gdpopt.gloa') - constraints = list(gloa._get_active_untransformed_constraints( - util_block, config)) + constraints = list( + gloa._get_active_untransformed_constraints(util_block, config) + ) self.assertEqual(len(constraints), 2) c1 = constraints[0] c2 = constraints[1] @@ -280,64 +313,70 @@ def test_gloa_cut_generation_ignores_deactivated_constraints(self): def test_complain_when_no_algorithm_specified(self): m = self.get_GDP_on_block() with self.assertRaisesRegex( - ValueError, - "No algorithm was specified to the solve method. " - "Please specify an algorithm or use an " - "algorithm-specific solver."): + ValueError, + "No algorithm was specified to the solve method. " + "Please specify an algorithm or use an " + "algorithm-specific solver.", + ): SolverFactory('gdpopt').solve(m) - @unittest.skipIf(not LOA_solvers_available, - "Required subsolvers %s are not available" - % (LOA_solvers,)) + @unittest.skipIf( + not LOA_solvers_available, + "Required subsolvers %s are not available" % (LOA_solvers,), + ) def test_solve_block(self): m = ConcreteModel() m.b = Block() m.b.x = Var(bounds=(-5, 5)) m.b.y = Var(bounds=(-2, 6)) - m.b.disjunction = Disjunction(expr=[[m.b.x + m.b.y <= 1, m.b.y >= 0.5], - [m.b.x == 2, m.b.y == 4], - [m.b.x**2 - m.b.y <= 3]]) - m.disjunction = Disjunction(expr=[[m.b.x - m.b.y <= -2, m.b.y >= -1], - [m.b.x == 0, m.b.y >= 0], - [m.b.y**2 + m.b.x <= 3]]) + m.b.disjunction = Disjunction( + expr=[ + [m.b.x + m.b.y <= 1, m.b.y >= 0.5], + [m.b.x == 2, m.b.y == 4], + [m.b.x**2 - m.b.y <= 3], + ] + ) + m.disjunction = Disjunction( + expr=[ + [m.b.x - m.b.y <= -2, m.b.y >= -1], + [m.b.x == 0, m.b.y >= 0], + [m.b.y**2 + m.b.x <= 3], + ] + ) m.b.obj = Objective(expr=m.b.x) - SolverFactory('gdpopt.ric').solve(m.b, mip_solver=mip_solver, - nlp_solver=nlp_solver) + SolverFactory('gdpopt.ric').solve( + m.b, mip_solver=mip_solver, nlp_solver=nlp_solver + ) # There are multiple optimal solutions, so just leave it at this: self.assertAlmostEqual(value(m.b.x), -5) # We didn't declare any Block on m--it's still just b. self.assertEqual(len(m.component_map(Block)), 1) -@unittest.skipIf(not LOA_solvers_available, - "Required subsolvers %s are not available" - % (LOA_solvers,)) + +@unittest.skipIf( + not LOA_solvers_available, + "Required subsolvers %s are not available" % (LOA_solvers,), +) class TestGDPopt(unittest.TestCase): """Tests for the GDPopt solver plugin.""" - def make_infeasible_gdp_model(self): - m = ConcreteModel() - m.x = Var(bounds=(0, 2)) - m.d = Disjunction(expr=[ - [m.x ** 2 >= 3, m.x >= 3], - [m.x ** 2 <= -1, m.x <= -1]]) - m.o = Objective(expr=m.x) - - return m def test_infeasible_GDP(self): """Test for infeasible GDP.""" - m = self.make_infeasible_gdp_model() + m = models.make_infeasible_gdp_model() output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): - results = SolverFactory('gdpopt.loa').solve(m, - mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertIn("Set covering problem is infeasible.", - output.getvalue().strip()) - - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + results = SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertIn( + "Set covering problem is infeasible.", output.getvalue().strip() + ) + + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) self.assertIsNone(m.x.value) self.assertIsNone(m.d.disjuncts[0].indicator_var.value) self.assertIsNone(m.d.disjuncts[1].indicator_var.value) @@ -347,13 +386,19 @@ def test_infeasible_GDP(self): output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): results = SolverFactory('gdpopt').solve( - m, mip_solver=mip_solver, nlp_solver=nlp_solver, - init_algorithm='no_init', algorithm='LOA') - self.assertIn("GDPopt exiting--problem is infeasible.", - output.getvalue().strip()) - - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + m, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + init_algorithm='no_init', + algorithm='LOA', + ) + self.assertIn( + "GDPopt exiting--problem is infeasible.", output.getvalue().strip() + ) + + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) self.assertIsNotNone(results.solver.user_time) self.assertIsNone(m.x.value) self.assertIsNone(m.d.disjuncts[0].indicator_var.value) @@ -361,16 +406,23 @@ def test_infeasible_GDP(self): def test_infeasible_gdp_max_binary(self): """Test that max binary initialization catches infeasible GDP too""" - m = self.make_infeasible_gdp_model() + m = models.make_infeasible_gdp_model() output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.DEBUG): results = SolverFactory('gdpopt.loa').solve( - m, mip_solver=mip_solver, nlp_solver=nlp_solver, - init_algorithm='max_binary') - self.assertIn("MILP relaxation for initialization was infeasible. " - "Problem is infeasible.", output.getvalue().strip()) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + m, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + init_algorithm='max_binary', + ) + self.assertIn( + "MILP relaxation for initialization was infeasible. " + "Problem is infeasible.", + output.getvalue().strip(), + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) def test_unbounded_gdp_minimization(self): m = ConcreteModel() @@ -378,17 +430,16 @@ def test_unbounded_gdp_minimization(self): m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) m.z = Var() - m.d = Disjunction(expr=[ - [m.x + m.y >= 5], [m.x - m.y <= 3] - ]) + m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) m.o = Objective(expr=m.z) m.GDPopt_utils.variable_list = [m.x, m.y, m.z] - m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], - m.d._autodisjuncts[1]] + m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], m.d._autodisjuncts[1]] results = SolverFactory('gdpopt.loa').solve( - m, mip_solver=mip_solver, nlp_solver=nlp_solver) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.unbounded) + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) def test_unbounded_gdp_maximization(self): m = ConcreteModel() @@ -396,45 +447,39 @@ def test_unbounded_gdp_maximization(self): m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) m.z = Var() - m.d = Disjunction(expr=[ - [m.x + m.y <= 5], [m.x - m.y >= 3] - ]) + m.d = Disjunction(expr=[[m.x + m.y <= 5], [m.x - m.y >= 3]]) m.o = Objective(expr=m.z, sense=maximize) m.GDPopt_utils.variable_list = [m.x, m.y, m.z] - m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], - m.d._autodisjuncts[1]] + m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], m.d._autodisjuncts[1]] results = SolverFactory('gdpopt.loa').solve( - m, mip_solver=mip_solver, nlp_solver=nlp_solver) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.unbounded) + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.unbounded + ) # [ESJ 5/16/22]: Using Gurobi for this test because glpk seems to get angry # on Windows when the MIP is arbitrarily bounded with the large bounds. And # I think I blame glpk... - @unittest.skipUnless(SolverFactory('gurobi').available(), - "Gurobi solver not available") + @unittest.skipUnless( + SolverFactory('gurobi').available(), "Gurobi solver not available" + ) def test_GDP_nonlinear_objective(self): m = ConcreteModel() m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) - m.d = Disjunction(expr=[ - [m.x + m.y >= 5], [m.x - m.y <= 3] - ]) - m.o = Objective(expr=m.x ** 2) - SolverFactory('gdpopt.loa').solve(m, mip_solver='gurobi', - nlp_solver=nlp_solver) + m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) + m.o = Objective(expr=m.x**2) + SolverFactory('gdpopt.loa').solve(m, mip_solver='gurobi', nlp_solver=nlp_solver) self.assertAlmostEqual(value(m.o), 0) m = ConcreteModel() m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) - m.d = Disjunction(expr=[ - [m.x + m.y >= 5], [m.x - m.y <= 3] - ]) - m.o = Objective(expr=-m.x ** 2, sense=maximize) + m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) + m.o = Objective(expr=-m.x**2, sense=maximize) print("second") - SolverFactory('gdpopt.loa').solve(m, mip_solver='gurobi', - nlp_solver=nlp_solver) + SolverFactory('gdpopt.loa').solve(m, mip_solver='gurobi', nlp_solver=nlp_solver) self.assertAlmostEqual(value(m.o), 0) def test_nested_disjunctions_set_covering(self): @@ -443,11 +488,14 @@ def test_nested_disjunctions_set_covering(self): # initialization. This makes sure we get the correct answer anyway, as # there is a feasible solution. m = models.makeNestedNonlinearModel() - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver, - init_algorithm='set_covering') - self.assertAlmostEqual(value(m.x), sqrt(2)/2) - self.assertAlmostEqual(value(m.y), sqrt(2)/2) + SolverFactory('gdpopt.loa').solve( + m, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + init_algorithm='set_covering', + ) + self.assertAlmostEqual(value(m.x), sqrt(2) / 2) + self.assertAlmostEqual(value(m.y), sqrt(2) / 2) self.assertTrue(value(m.disj.disjuncts[1].indicator_var)) self.assertFalse(value(m.disj.disjuncts[0].indicator_var)) self.assertTrue(value(m.d1.indicator_var)) @@ -457,15 +505,18 @@ def test_equality_propagation_infeasibility_in_subproblems(self): m = ConcreteModel() m.x = Var(bounds=(-10, 10)) m.y = Var(bounds=(-10, 10)) - m.disj = Disjunction(expr=[[m.x == m.y, m.y == 2], - [m.y == 8], - [m.x + m.y >= 4, m.y == m.x + 1]]) + m.disj = Disjunction( + expr=[[m.x == m.y, m.y == 2], [m.y == 8], [m.x + m.y >= 4, m.y == m.x + 1]] + ) m.cons = Constraint(expr=m.x == 3) m.obj = Objective(expr=m.x + m.y) - SolverFactory('gdpopt').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver, - init_algorithm='set_covering', - algorithm='RIC') + SolverFactory('gdpopt').solve( + m, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + init_algorithm='set_covering', + algorithm='RIC', + ) self.assertAlmostEqual(value(m.x), 3) self.assertAlmostEqual(value(m.y), 4) self.assertFalse(value(m.disj.disjuncts[0].indicator_var)) @@ -476,14 +527,15 @@ def test_bound_infeasibility_in_subproblems(self): m = ConcreteModel() m.x = Var(bounds=(2, 4)) m.y = Var(bounds=(5, 10)) - m.disj = Disjunction(expr=[[m.x == m.y, m.x + m.y >= 8], - [m.x == 4]]) + m.disj = Disjunction(expr=[[m.x == m.y, m.x + m.y >= 8], [m.x == 4]]) m.obj = Objective(expr=m.x + m.y) SolverFactory('gdpopt.ric').solve( - m, mip_solver=mip_solver, + m, + mip_solver=mip_solver, nlp_solver=nlp_solver, init_algorithm='set_covering', - tee=True) + tee=True, + ) self.assertAlmostEqual(value(m.x), 4) self.assertAlmostEqual(value(m.y), 5) self.assertFalse(value(m.disj.disjuncts[0].indicator_var)) @@ -493,15 +545,17 @@ def test_subproblem_preprocessing_encounters_trivial_constraints(self): m = ConcreteModel() m.x = Var(bounds=(0, 10)) m.z = Var(bounds=(-10, 10)) - m.disjunction = Disjunction(expr=[[m.x == 0, m.z >= 4], - [m.x + m.z <= 0]]) - m.cons = Constraint(expr=m.x*m.z <= 0) + m.disjunction = Disjunction(expr=[[m.x == 0, m.z >= 4], [m.x + m.z <= 0]]) + m.cons = Constraint(expr=m.x * m.z <= 0) m.obj = Objective(expr=-m.z) m.disjunction.disjuncts[0].indicator_var.fix(True) m.disjunction.disjuncts[1].indicator_var.fix(False) - SolverFactory('gdpopt.ric').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver, - init_algorithm='fix_disjuncts') + SolverFactory('gdpopt.ric').solve( + m, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + init_algorithm='fix_disjuncts', + ) # The real test is that this doesn't throw an error when we preprocess # to solve the first subproblem (in the initialization). The nonlinear # constraint becomes trivial, which we need to make sure is handled @@ -517,8 +571,12 @@ def make_convex_circle_and_circle_slice_disjunction(self): m.x = Var(bounds=(-10, 18)) m.y = Var(bounds=(0, 7)) m.obj = Objective(expr=m.x**2 + m.y) - m.disjunction = Disjunction(expr=[[m.x**2 + m.y**2 <= 3, m.y >= 1], - (m.x - 3)**2 + (m.y - 2)**2 <= 1]) + m.disjunction = Disjunction( + expr=[ + [m.x**2 + m.y**2 <= 3, m.y >= 1], + (m.x - 3) ** 2 + (m.y - 2) ** 2 <= 1, + ] + ) return m @@ -528,10 +586,12 @@ def test_some_vars_only_in_subproblem(self): # that works out. m = self.make_convex_circle_and_circle_slice_disjunction() - results = SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + results = SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual(results.problem.upper_bound, 1) self.assertAlmostEqual(value(m.x), 0) self.assertAlmostEqual(value(m.y), 1) @@ -542,8 +602,9 @@ def test_fixed_vars_honored(self): # first, force ourselves into the suboptimal disjunct m.disjunction.disjuncts[0].indicator_var.fix(False) - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertTrue(value(m.disjunction.disjuncts[1].indicator_var)) self.assertFalse(value(m.disjunction.disjuncts[0].indicator_var)) self.assertTrue(m.disjunction.disjuncts[0].indicator_var.fixed) @@ -554,8 +615,9 @@ def test_fixed_vars_honored(self): # Now, do it by fixing a continuous variable m.disjunction.disjuncts[0].indicator_var.fixed = False m.x.fix(3) - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertTrue(value(m.disjunction.disjuncts[1].indicator_var)) self.assertFalse(value(m.disjunction.disjuncts[0].indicator_var)) self.assertEqual(value(m.x), 3) @@ -567,15 +629,19 @@ def test_ignore_set_for_oa_cuts(self): m = self.make_convex_circle_and_circle_slice_disjunction() m.disjunction.disjuncts[1].GDPopt_ignore_OA = [ - m.disjunction.disjuncts[1].constraint[1]] + m.disjunction.disjuncts[1].constraint[1] + ] output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.DEBUG): - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertIn('OA cut addition for ' - 'disjunction_disjuncts[1].constraint[1] skipped ' - 'because it is in the ignore set.', - output.getvalue().strip()) + SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertIn( + 'OA cut addition for ' + 'disjunction_disjuncts[1].constraint[1] skipped ' + 'because it is in the ignore set.', + output.getvalue().strip(), + ) # and the solution is optimal self.assertAlmostEqual(value(m.x), 0) self.assertAlmostEqual(value(m.y), 1) @@ -589,178 +655,192 @@ def test_reverse_numeric_differentiation_in_LOA(self): m.s = RangeSet(1300) m.x = Var(m.s, bounds=(-10, 10)) m.d1 = Disjunct() - m.d1.hypersphere = Constraint(expr=sum(m.x[i]**2 for i in m.s) <= 1) + m.d1.hypersphere = Constraint(expr=sum(m.x[i] ** 2 for i in m.s) <= 1) m.d2 = Disjunct() - m.d2.translated_hyper_sphere = Constraint(expr=sum((m.x[i] - i)**2 for i - in m.s) <= 1) + m.d2.translated_hyper_sphere = Constraint( + expr=sum((m.x[i] - i) ** 2 for i in m.s) <= 1 + ) m.disjunction = Disjunction(expr=[m.d1, m.d2]) # we'll go in the sphere centered at (0,0,...,0) m.obj = Objective(expr=sum(m.x[i] for i in m.s)) - results = SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + results = SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertTrue(value(m.d1.indicator_var)) self.assertFalse(value(m.d2.indicator_var)) - x_val = -sqrt(1300)/1300 + x_val = -sqrt(1300) / 1300 for x in m.x.values(): self.assertAlmostEqual(value(x), x_val) - self.assertAlmostEqual(results.problem.upper_bound, 1300*x_val, - places=6) + self.assertAlmostEqual(results.problem.upper_bound, 1300 * x_val, places=6) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_logical_constraints_on_disjuncts(self): m = models.makeLogicalConstraintsOnDisjuncts() - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertAlmostEqual(value(m.x), 8) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_logical_constraints_on_disjuncts_nonlinear_convex(self): m = models.makeLogicalConstraintsOnDisjuncts_NonlinearConvex() - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver, tee=True) + results = SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual(value(m.x), 4) def test_nested_disjunctions_no_init(self): m = models.makeNestedNonlinearModel() - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver, - init_algorithm='no_init') - self.assertAlmostEqual(value(m.x), sqrt(2)/2) - self.assertAlmostEqual(value(m.y), sqrt(2)/2) + SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver, init_algorithm='no_init' + ) + self.assertAlmostEqual(value(m.x), sqrt(2) / 2) + self.assertAlmostEqual(value(m.y), sqrt(2) / 2) def test_nested_disjunctions_max_binary(self): m = models.makeNestedNonlinearModel() - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver, - init_algorithm='max_binary') - self.assertAlmostEqual(value(m.x), sqrt(2)/2) - self.assertAlmostEqual(value(m.y), sqrt(2)/2) + SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver, init_algorithm='max_binary' + ) + self.assertAlmostEqual(value(m.x), sqrt(2) / 2) + self.assertAlmostEqual(value(m.y), sqrt(2) / 2) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_boolean_vars_on_disjuncts(self): m = models.makeBooleanVarsOnDisjuncts() - SolverFactory('gdpopt.loa').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + SolverFactory('gdpopt.loa').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertAlmostEqual(value(m.x), 8) def test_LOA_8PP_default_init(self): """Test logic-based outer approximation with 8PP.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.loa').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver) + results = SolverFactory('gdpopt.loa').solve( + eight_process, mip_solver=mip_solver, nlp_solver=nlp_solver + ) ct.check_8PP_solution(self, eight_process, results) def test_iteration_limit(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - results = SolverFactory('gdpopt.loa').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver, - iterlim=2) - self.assertIn("GDPopt unable to converge bounds within iteration " - "limit of 2 iterations.", output.getvalue().strip()) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.maxIterations) + results = SolverFactory('gdpopt.loa').solve( + eight_process, mip_solver=mip_solver, nlp_solver=nlp_solver, iterlim=2 + ) + self.assertIn( + "GDPopt unable to converge bounds within iteration " + "limit of 2 iterations.", + output.getvalue().strip(), + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxIterations + ) def test_time_limit(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): - results = SolverFactory('gdpopt.loa').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver, - time_limit=1) - self.assertIn("GDPopt exiting--Did not converge bounds before " - "time limit of 1 seconds.", output.getvalue().strip()) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.maxTimeLimit) - - @unittest.skipUnless(sympy_available, "Sympy not available") + results = SolverFactory('gdpopt.loa').solve( + eight_process, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + time_limit=1, + ) + self.assertIn( + "GDPopt exiting--Did not converge bounds before " + "time limit of 1 seconds.", + output.getvalue().strip(), + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxTimeLimit + ) + def test_LOA_8PP_logical_default_init(self): """Test logic-based outer approximation with 8PP.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_logical.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_logical.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.loa').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver, - tee=False) + results = SolverFactory('gdpopt.loa').solve( + eight_process, mip_solver=mip_solver, nlp_solver=nlp_solver, tee=False + ) ct.check_8PP_logical_solution(self, eight_process, results) - @unittest.skipUnless(SolverFactory('gams').available(exception_flag=False), - 'GAMS solver not available') + @unittest.skipUnless( + SolverFactory('gams').available(exception_flag=False), + 'GAMS solver not available', + ) def test_LOA_8PP_gams_solver(self): # Make sure that the duals are still correct - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.loa').solve(eight_process, - mip_solver=mip_solver, - nlp_solver='gams', - max_slack=0, tee=False) + results = SolverFactory('gdpopt.loa').solve( + eight_process, + mip_solver=mip_solver, + nlp_solver='gams', + max_slack=0, + tee=False, + ) ct.check_8PP_solution(self, eight_process, results) def test_LOA_8PP_force_NLP(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.loa').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver, - force_subproblem_nlp=True, - tee=False) + results = SolverFactory('gdpopt.loa').solve( + eight_process, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + force_subproblem_nlp=True, + tee=False, + ) ct.check_8PP_solution(self, eight_process, results) def test_LOA_strip_pack_default_init(self): """Test logic-based outer approximation with strip packing.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() - SolverFactory('gdpopt.loa').solve(strip_pack, mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) + SolverFactory('gdpopt.loa').solve( + strip_pack, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 11) <= 1e-2) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_LOA_strip_pack_logical_constraints(self): """Test logic-based outer approximation with variation of strip packing with some logical constraints.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() # add logical constraints strip_pack.Rec3AboveOrBelowRec1 = LogicalConstraint( - expr=strip_pack.no_overlap[1, 3].disjuncts[2].indicator_var.lor( - strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var)) + expr=strip_pack.no_overlap[1, 3] + .disjuncts[2] + .indicator_var.lor(strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var) + ) strip_pack.Rec3RightOrLeftOfRec2 = LogicalConstraint( - expr=strip_pack.no_overlap[2, 3].disjuncts[0].indicator_var.lor( - strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var)) + expr=strip_pack.no_overlap[2, 3] + .disjuncts[0] + .indicator_var.lor(strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var) + ) SolverFactory('gdpopt.loa').solve( - strip_pack, mip_solver=mip_solver, + strip_pack, + mip_solver=mip_solver, nlp_solver=nlp_solver, - subproblem_presolve=False)# skip preprocessing for linear problem - # and so that we test everything is still - # fine - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 13) <= 1E-2) + subproblem_presolve=False, + ) # skip preprocessing for linear problem + # and so that we test everything is still + # fine + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 13) <= 1e-2) @unittest.pytest.mark.expensive def test_LOA_constrained_layout_default_init(self): """Test LOA with constrained layout.""" - exfile = import_file( - join(exdir, 'constrained_layout', 'cons_layout_model.py')) + exfile = import_file(join(exdir, 'constrained_layout', 'cons_layout_model.py')) cons_layout = exfile.build_constrained_layout_model() SolverFactory('gdpopt.loa').solve( cons_layout, @@ -772,68 +852,72 @@ def test_LOA_constrained_layout_default_init(self): objective_value = value(cons_layout.min_dist_cost.expr) self.assertTrue( fabs(objective_value - 41573) <= 200, - "Objective value of %s instead of 41573" % objective_value) + "Objective value of %s instead of 41573" % objective_value, + ) def test_LOA_8PP_maxBinary(self): """Test logic-based OA with max_binary initialization.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.loa').solve(eight_process, - init_algorithm='max_binary', - mip_solver=mip_solver, - nlp_solver=nlp_solver) + results = SolverFactory('gdpopt.loa').solve( + eight_process, + init_algorithm='max_binary', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) ct.check_8PP_solution(self, eight_process, results) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_LOA_8PP_logical_maxBinary(self): """Test logic-based OA with max_binary initialization.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_logical.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_logical.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.loa').solve(eight_process, - init_algorithm='max_binary', - mip_solver=mip_solver, - nlp_solver=nlp_solver) + results = SolverFactory('gdpopt.loa').solve( + eight_process, + init_algorithm='max_binary', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) ct.check_8PP_logical_solution(self, eight_process, results) def test_LOA_strip_pack_maxBinary(self): """Test LOA with strip packing using max_binary initialization.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() - SolverFactory('gdpopt.loa').solve(strip_pack, - init_algorithm='max_binary', - mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) + SolverFactory('gdpopt.loa').solve( + strip_pack, + init_algorithm='max_binary', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 11) <= 1e-2) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_LOA_strip_pack_maxBinary_logical_constraints(self): """Test LOA with strip packing using max_binary initialization and logical constraints.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() # add logical constraints strip_pack.Rec3AboveOrBelowRec1 = LogicalConstraint( - expr=strip_pack.no_overlap[1, 3].disjuncts[2].indicator_var.lor( - strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var)) + expr=strip_pack.no_overlap[1, 3] + .disjuncts[2] + .indicator_var.lor(strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var) + ) strip_pack.Rec3RightOrLeftOfRec2 = LogicalConstraint( - expr=strip_pack.no_overlap[2, 3].disjuncts[0].indicator_var.lor( - strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var)) - SolverFactory('gdpopt.loa').solve(strip_pack, - init_algorithm='max_binary', - mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 13) <= 1E-2) + expr=strip_pack.no_overlap[2, 3] + .disjuncts[0] + .indicator_var.lor(strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var) + ) + SolverFactory('gdpopt.loa').solve( + strip_pack, + init_algorithm='max_binary', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 13) <= 1e-2) def test_LOA_8PP_fixed_disjuncts(self): """Test LOA with 8PP using fixed disjuncts initialization.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() initialize = [ # Use units 1, 4, 7, 8 @@ -841,7 +925,7 @@ def test_LOA_8PP_fixed_disjuncts(self): eight_process.use_unit_3ornot.disjuncts[1], eight_process.use_unit_4or5ornot.disjuncts[0], eight_process.use_unit_6or7ornot.disjuncts[1], - eight_process.use_unit_8ornot.disjuncts[0] + eight_process.use_unit_8ornot.disjuncts[0], ] for disj in eight_process.component_data_objects(Disjunct): if disj in initialize: @@ -852,12 +936,12 @@ def test_LOA_8PP_fixed_disjuncts(self): eight_process, init_algorithm='fix_disjuncts', mip_solver=mip_solver, - nlp_solver=nlp_solver) + nlp_solver=nlp_solver, + ) ct.check_8PP_solution(self, eight_process, results) def test_LOA_custom_disjuncts_with_silly_components_in_list(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() eight_process.goofy = Disjunct() @@ -865,54 +949,67 @@ def test_LOA_custom_disjuncts_with_silly_components_in_list(self): initialize = [ # Use units 1, 4, 7, 8 - [eight_process.use_unit_1or2.disjuncts[0], - eight_process.use_unit_3ornot.disjuncts[1], - eight_process.use_unit_4or5ornot.disjuncts[0], - eight_process.use_unit_6or7ornot.disjuncts[1], - eight_process.use_unit_8ornot.disjuncts[0], - eight_process.goofy], + [ + eight_process.use_unit_1or2.disjuncts[0], + eight_process.use_unit_3ornot.disjuncts[1], + eight_process.use_unit_4or5ornot.disjuncts[0], + eight_process.use_unit_6or7ornot.disjuncts[1], + eight_process.use_unit_8ornot.disjuncts[0], + eight_process.goofy, + ], # Use units 2, 4, 6, 8 - [eight_process.use_unit_1or2.disjuncts[1], - eight_process.use_unit_3ornot.disjuncts[1], - eight_process.use_unit_4or5ornot.disjuncts[0], - eight_process.use_unit_6or7ornot.disjuncts[0], - eight_process.use_unit_8ornot.disjuncts[0]] + [ + eight_process.use_unit_1or2.disjuncts[1], + eight_process.use_unit_3ornot.disjuncts[1], + eight_process.use_unit_4or5ornot.disjuncts[0], + eight_process.use_unit_6or7ornot.disjuncts[0], + eight_process.use_unit_8ornot.disjuncts[0], + ], ] output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): results = SolverFactory('gdpopt.loa').solve( - eight_process, init_algorithm='custom_disjuncts', - custom_init_disjuncts=initialize, mip_solver=mip_solver, - nlp_solver=nlp_solver) - - self.assertIn("The following disjuncts from the custom disjunct " - "initialization set number 0 were unused: goofy", - output.getvalue().strip()) + eight_process, + init_algorithm='custom_disjuncts', + custom_init_disjuncts=initialize, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) + + self.assertIn( + "The following disjuncts from the custom disjunct " + "initialization set number 0 were unused: goofy", + output.getvalue().strip(), + ) # and the solution is optimal to boot - self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) + self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1e-2) def test_LOA_custom_disjuncts(self): """Test logic-based OA with custom disjuncts initialization.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() initialize = [ # Use units 1, 4, 7, 8 - [eight_process.use_unit_1or2.disjuncts[0], - eight_process.use_unit_3ornot.disjuncts[1], - eight_process.use_unit_4or5ornot.disjuncts[0], - eight_process.use_unit_6or7ornot.disjuncts[1], - eight_process.use_unit_8ornot.disjuncts[0]], + [ + eight_process.use_unit_1or2.disjuncts[0], + eight_process.use_unit_3ornot.disjuncts[1], + eight_process.use_unit_4or5ornot.disjuncts[0], + eight_process.use_unit_6or7ornot.disjuncts[1], + eight_process.use_unit_8ornot.disjuncts[0], + ], # Use units 2, 4, 6, 8 - [eight_process.use_unit_1or2.disjuncts[1], - eight_process.use_unit_3ornot.disjuncts[1], - eight_process.use_unit_4or5ornot.disjuncts[0], - eight_process.use_unit_6or7ornot.disjuncts[0], - eight_process.use_unit_8ornot.disjuncts[0]] + [ + eight_process.use_unit_1or2.disjuncts[1], + eight_process.use_unit_3ornot.disjuncts[1], + eight_process.use_unit_4or5ornot.disjuncts[0], + eight_process.use_unit_6or7ornot.disjuncts[0], + eight_process.use_unit_8ornot.disjuncts[0], + ], ] - def assert_correct_disjuncts_active(solver, subprob_util_block, - discrete_problem_util_block): + def assert_correct_disjuncts_active( + solver, subprob_util_block, discrete_problem_util_block + ): iteration = solver.initialization_iteration discrete_problem = discrete_problem_util_block.model() subprob = subprob_util_block.model() @@ -925,8 +1022,7 @@ def assert_correct_disjuncts_active(solver, subprob_util_block, idx = orig_disj.index() # Find the corresponding components on the discrete problem and # subproblem - discrete_problem_parent = discrete_problem.component( - parent_nm) + discrete_problem_parent = discrete_problem.component(parent_nm) subprob_parent = subprob.component(parent_nm) self.assertIsInstance(discrete_problem_parent, Disjunct) self.assertIsInstance(subprob_parent, Block) @@ -941,28 +1037,30 @@ def assert_correct_disjuncts_active(solver, subprob_util_block, self.assertFalse(disj.active) SolverFactory('gdpopt.loa').solve( - eight_process, init_algorithm='custom_disjuncts', - custom_init_disjuncts=initialize, mip_solver=mip_solver, + eight_process, + init_algorithm='custom_disjuncts', + custom_init_disjuncts=initialize, + mip_solver=mip_solver, nlp_solver=nlp_solver, - subproblem_initialization_method=assert_correct_disjuncts_active) + subproblem_initialization_method=assert_correct_disjuncts_active, + ) - self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) + self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1e-2) - @unittest.skipUnless(Gurobi().available(), - "APPSI Gurobi solver is not available") + @unittest.skipUnless(Gurobi().available(), "APPSI Gurobi solver is not available") def test_auto_persistent_solver(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) m = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.loa').solve(m, - mip_solver='appsi_gurobi') + results = SolverFactory('gdpopt.loa').solve(m, mip_solver='appsi_gurobi') - self.assertTrue(fabs(value(m.profit.expr) - 68) <= 1E-2) + self.assertTrue(fabs(value(m.profit.expr) - 68) <= 1e-2) ct.check_8PP_solution(self, m, results) -@unittest.skipIf(not LOA_solvers_available, - "Required subsolvers %s are not available" - % (LOA_solvers,)) + +@unittest.skipIf( + not LOA_solvers_available, + "Required subsolvers %s are not available" % (LOA_solvers,), +) class TestGDPoptRIC(unittest.TestCase): """Tests for the GDPopt solver plugin.""" @@ -970,135 +1068,133 @@ def test_infeasible_GDP(self): """Test for infeasible GDP.""" m = ConcreteModel() m.x = Var(bounds=(0, 2)) - m.d = Disjunction(expr=[ - [m.x ** 2 >= 3, m.x >= 3], - [m.x ** 2 <= -1, m.x <= -1]]) + m.d = Disjunction(expr=[[m.x**2 >= 3, m.x >= 3], [m.x**2 <= -1, m.x <= -1]]) m.o = Objective(expr=m.x) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING): - SolverFactory('gdpopt.ric').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertIn("Set covering problem is infeasible.", - output.getvalue().strip()) + SolverFactory('gdpopt.ric').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertIn( + "Set covering problem is infeasible.", output.getvalue().strip() + ) def test_GDP_nonlinear_objective(self): m = ConcreteModel() m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) - m.d = Disjunction(expr=[ - [m.x + m.y >= 5], [m.x - m.y <= 3] - ]) - m.o = Objective(expr=m.x ** 2) - SolverFactory('gdpopt.ric').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) + m.o = Objective(expr=m.x**2) + SolverFactory('gdpopt.ric').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertAlmostEqual(value(m.o), 0) m = ConcreteModel() m.x = Var(bounds=(-1, 10)) m.y = Var(bounds=(2, 3)) - m.d = Disjunction(expr=[ - [m.x + m.y >= 5], [m.x - m.y <= 3] - ]) - m.o = Objective(expr=-m.x ** 2, sense=maximize) - SolverFactory('gdpopt.ric').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + m.d = Disjunction(expr=[[m.x + m.y >= 5], [m.x - m.y <= 3]]) + m.o = Objective(expr=-m.x**2, sense=maximize) + SolverFactory('gdpopt.ric').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertAlmostEqual(value(m.o), 0) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_logical_constraints_on_disjuncts(self): m = models.makeLogicalConstraintsOnDisjuncts() - SolverFactory('gdpopt.ric').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + SolverFactory('gdpopt.ric').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertAlmostEqual(value(m.x), 8) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_boolean_vars_on_disjuncts(self): m = models.makeBooleanVarsOnDisjuncts() - SolverFactory('gdpopt.ric').solve(m, mip_solver=mip_solver, - nlp_solver=nlp_solver) + SolverFactory('gdpopt.ric').solve( + m, mip_solver=mip_solver, nlp_solver=nlp_solver + ) self.assertAlmostEqual(value(m.x), 8) def test_RIC_8PP_default_init(self): """Test logic-based outer approximation with 8PP.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.ric').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver, - tee=False) + results = SolverFactory('gdpopt.ric').solve( + eight_process, mip_solver=mip_solver, nlp_solver=nlp_solver, tee=False + ) ct.check_8PP_solution(self, eight_process, results) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_RIC_8PP_logical_default_init(self): """Test logic-based outer approximation with 8PP.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_logical.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_logical.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.ric').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver, - tee=False) + results = SolverFactory('gdpopt.ric').solve( + eight_process, mip_solver=mip_solver, nlp_solver=nlp_solver, tee=False + ) ct.check_8PP_logical_solution(self, eight_process, results) - @unittest.skipUnless(SolverFactory('gams').available(exception_flag=False), - 'GAMS solver not available') + @unittest.skipUnless( + SolverFactory('gams').available(exception_flag=False), + 'GAMS solver not available', + ) def test_RIC_8PP_gams_solver(self): # Make sure that the duals are still correct - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.ric').solve(eight_process, - mip_solver=mip_solver, - nlp_solver='gams', - max_slack=0, tee=False) + results = SolverFactory('gdpopt.ric').solve( + eight_process, + mip_solver=mip_solver, + nlp_solver='gams', + max_slack=0, + tee=False, + ) ct.check_8PP_solution(self, eight_process, results) def test_RIC_8PP_force_NLP(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.ric').solve(eight_process, - mip_solver=mip_solver, - nlp_solver=nlp_solver, - force_subproblem_nlp=True, - tee=False) + results = SolverFactory('gdpopt.ric').solve( + eight_process, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + force_subproblem_nlp=True, + tee=False, + ) ct.check_8PP_solution(self, eight_process, results) def test_RIC_strip_pack_default_init(self): """Test logic-based outer approximation with strip packing.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() - SolverFactory('gdpopt.ric').solve(strip_pack, mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) + SolverFactory('gdpopt.ric').solve( + strip_pack, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 11) <= 1e-2) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_RIC_strip_pack_default_init_logical_constraints(self): """Test logic-based outer approximation with strip packing with logical constraints.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() # add logical constraints strip_pack.Rec3AboveOrBelowRec1 = LogicalConstraint( - expr=strip_pack.no_overlap[1, 3].disjuncts[2].indicator_var.lor( - strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var)) + expr=strip_pack.no_overlap[1, 3] + .disjuncts[2] + .indicator_var.lor(strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var) + ) strip_pack.Rec3RightOrLeftOfRec2 = LogicalConstraint( - expr=strip_pack.no_overlap[2, 3].disjuncts[0].indicator_var.lor( - strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var)) - SolverFactory('gdpopt.ric').solve(strip_pack, mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 13) <= 1E-2) + expr=strip_pack.no_overlap[2, 3] + .disjuncts[0] + .indicator_var.lor(strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var) + ) + SolverFactory('gdpopt.ric').solve( + strip_pack, mip_solver=mip_solver, nlp_solver=nlp_solver + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 13) <= 1e-2) @unittest.pytest.mark.expensive def test_RIC_constrained_layout_default_init(self): """Test RIC with constrained layout.""" - exfile = import_file( - join(exdir, 'constrained_layout', 'cons_layout_model.py')) + exfile = import_file(join(exdir, 'constrained_layout', 'cons_layout_model.py')) cons_layout = exfile.build_constrained_layout_model() SolverFactory('gdpopt.ric').solve( cons_layout, @@ -1110,56 +1206,60 @@ def test_RIC_constrained_layout_default_init(self): objective_value = value(cons_layout.min_dist_cost.expr) self.assertTrue( fabs(objective_value - 41573) <= 200, - "Objective value of %s instead of 41573" % objective_value) + "Objective value of %s instead of 41573" % objective_value, + ) def test_RIC_8PP_maxBinary(self): """Test logic-based OA with max_binary initialization.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() - results = SolverFactory('gdpopt.ric').solve(eight_process, - init_algorithm='max_binary', - mip_solver=mip_solver, - nlp_solver=nlp_solver) + results = SolverFactory('gdpopt.ric').solve( + eight_process, + init_algorithm='max_binary', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) ct.check_8PP_solution(self, eight_process, results) def test_RIC_strip_pack_maxBinary(self): """Test RIC with strip packing using max_binary initialization.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() - SolverFactory('gdpopt.ric').solve(strip_pack, - init_algorithm='max_binary', - mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) + SolverFactory('gdpopt.ric').solve( + strip_pack, + init_algorithm='max_binary', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 11) <= 1e-2) - @unittest.skipUnless(sympy_available, "Sympy not available") def test_RIC_strip_pack_maxBinary_logical_constraints(self): """Test RIC with strip packing using max_binary initialization and including logical constraints.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() # add logical constraints strip_pack.Rec3AboveOrBelowRec1 = LogicalConstraint( - expr=strip_pack.no_overlap[1, 3].disjuncts[2].indicator_var.lor( - strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var)) + expr=strip_pack.no_overlap[1, 3] + .disjuncts[2] + .indicator_var.lor(strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var) + ) strip_pack.Rec3RightOrLeftOfRec2 = LogicalConstraint( - expr=strip_pack.no_overlap[2, 3].disjuncts[0].indicator_var.lor( - strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var)) - SolverFactory('gdpopt.ric').solve(strip_pack, - init_algorithm='max_binary', - mip_solver=mip_solver, - nlp_solver=nlp_solver) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 13) <= 1E-2) + expr=strip_pack.no_overlap[2, 3] + .disjuncts[0] + .indicator_var.lor(strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var) + ) + SolverFactory('gdpopt.ric').solve( + strip_pack, + init_algorithm='max_binary', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 13) <= 1e-2) def test_RIC_8PP_fixed_disjuncts(self): """Test RIC with 8PP using fixed disjuncts initialization.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() initialize = [ # Use units 1, 4, 7, 8 @@ -1167,7 +1267,7 @@ def test_RIC_8PP_fixed_disjuncts(self): eight_process.use_unit_3ornot.disjuncts[1], eight_process.use_unit_4or5ornot.disjuncts[0], eight_process.use_unit_6or7ornot.disjuncts[1], - eight_process.use_unit_8ornot.disjuncts[0] + eight_process.use_unit_8ornot.disjuncts[0], ] for disj in eight_process.component_data_objects(Disjunct): if disj in initialize: @@ -1178,31 +1278,36 @@ def test_RIC_8PP_fixed_disjuncts(self): eight_process, init_algorithm='fix_disjuncts', mip_solver=mip_solver, - nlp_solver=nlp_solver) + nlp_solver=nlp_solver, + ) ct.check_8PP_solution(self, eight_process, results) def test_RIC_custom_disjuncts(self): """Test logic-based OA with custom disjuncts initialization.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() initialize = [ # Use units 1, 4, 7, 8 - [eight_process.use_unit_1or2.disjuncts[0], - eight_process.use_unit_3ornot.disjuncts[1], - eight_process.use_unit_4or5ornot.disjuncts[0], - eight_process.use_unit_6or7ornot.disjuncts[1], - eight_process.use_unit_8ornot.disjuncts[0]], + [ + eight_process.use_unit_1or2.disjuncts[0], + eight_process.use_unit_3ornot.disjuncts[1], + eight_process.use_unit_4or5ornot.disjuncts[0], + eight_process.use_unit_6or7ornot.disjuncts[1], + eight_process.use_unit_8ornot.disjuncts[0], + ], # Use units 2, 4, 6, 8 - [eight_process.use_unit_1or2.disjuncts[1], - eight_process.use_unit_3ornot.disjuncts[1], - eight_process.use_unit_4or5ornot.disjuncts[0], - eight_process.use_unit_6or7ornot.disjuncts[0], - eight_process.use_unit_8ornot.disjuncts[0]] + [ + eight_process.use_unit_1or2.disjuncts[1], + eight_process.use_unit_3ornot.disjuncts[1], + eight_process.use_unit_4or5ornot.disjuncts[0], + eight_process.use_unit_6or7ornot.disjuncts[0], + eight_process.use_unit_8ornot.disjuncts[0], + ], ] - def assert_correct_disjuncts_active(solver, subprob_util_block, - discrete_problem_util_block): + def assert_correct_disjuncts_active( + solver, subprob_util_block, discrete_problem_util_block + ): # I can get the iteration based on the number of no-good # cuts in this case... iteration = solver.initialization_iteration @@ -1217,8 +1322,7 @@ def assert_correct_disjuncts_active(solver, subprob_util_block, idx = orig_disj.index() # Find the corresponding components on the discrete problem and # subproblem - discrete_problem_parent = discrete_problem.component( - parent_nm) + discrete_problem_parent = discrete_problem.component(parent_nm) subprob_parent = subprob.component(parent_nm) self.assertIsInstance(discrete_problem_parent, Disjunct) self.assertIsInstance(subprob_parent, Block) @@ -1233,28 +1337,37 @@ def assert_correct_disjuncts_active(solver, subprob_util_block, self.assertFalse(disj.active) SolverFactory('gdpopt.ric').solve( - eight_process, init_algorithm='custom_disjuncts', + eight_process, + init_algorithm='custom_disjuncts', custom_init_disjuncts=initialize, mip_solver=mip_solver, nlp_solver=nlp_solver, - subproblem_initialization_method=assert_correct_disjuncts_active) + subproblem_initialization_method=assert_correct_disjuncts_active, + ) - self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1E-2) + self.assertTrue(fabs(value(eight_process.profit.expr) - 68) <= 1e-2) def test_force_nlp_subproblem_with_general_integer_variables(self): m = ConcreteModel() m.x = Var(domain=Integers, bounds=(0, 10)) m.y = Var(bounds=(0, 10)) - m.disjunction = Disjunction(expr=[[m.x**2 <= 4, m.y**2 <= 1], - [(m.x - 1)**2 + (m.y - 1)**2 <= 4, - m.y <= 4]]) + m.disjunction = Disjunction( + expr=[ + [m.x**2 <= 4, m.y**2 <= 1], + [(m.x - 1) ** 2 + (m.y - 1) ** 2 <= 4, m.y <= 4], + ] + ) m.obj = Objective(expr=-m.y - m.x) - results = SolverFactory('gdpopt.ric').solve(m, init_algorithm='no_init', - mip_solver=mip_solver, - nlp_solver=nlp_solver, - force_subproblem_nlp=True) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + results = SolverFactory('gdpopt.ric').solve( + m, + init_algorithm='no_init', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + force_subproblem_nlp=True, + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual(value(m.x), 2) self.assertAlmostEqual(value(m.y), 1 + sqrt(3)) @@ -1268,9 +1381,12 @@ def test_force_nlp_subproblem_with_unbounded_integer_variables(self): m.x = Var(domain=Integers, bounds=(0, 10)) m.y = Var(bounds=(0, 10)) m.w = Var(domain=Integers) - m.disjunction = Disjunction(expr=[[m.x**2 <= 4, m.y**2 <= 1], - [(m.x - 1)**2 + (m.y - 1)**2 <= 4, - m.y <= 4]]) + m.disjunction = Disjunction( + expr=[ + [m.x**2 <= 4, m.y**2 <= 1], + [(m.x - 1) ** 2 + (m.y - 1) ** 2 <= 4, m.y <= 4], + ] + ) m.c = Constraint(expr=m.x + m.y == m.w) m.obj = Objective(expr=-m.w) # We don't find a feasible solution this way, but we want to make sure @@ -1279,11 +1395,17 @@ def test_force_nlp_subproblem_with_unbounded_integer_variables(self): output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.INFO): results = SolverFactory('gdpopt.ric').solve( - m, init_algorithm='no_init', mip_solver=mip_solver, - nlp_solver=nlp_solver, force_subproblem_nlp=True, iterlim=5) + m, + init_algorithm='no_init', + mip_solver=mip_solver, + nlp_solver=nlp_solver, + force_subproblem_nlp=True, + iterlim=5, + ) self.assertIn("No feasible solutions found.", output.getvalue().strip()) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.maxIterations) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxIterations + ) # There's no solution in the model self.assertIsNone(m.x.value) self.assertIsNone(m.y.value) @@ -1292,9 +1414,11 @@ def test_force_nlp_subproblem_with_unbounded_integer_variables(self): self.assertIsNone(m.disjunction.disjuncts[1].indicator_var.value) self.assertIsNotNone(results.solver.user_time) -@unittest.skipIf(not GLOA_solvers_available, - "Required subsolvers %s are not available" - % (GLOA_solvers,)) + +@unittest.skipIf( + not GLOA_solvers_available, + "Required subsolvers %s are not available" % (GLOA_solvers,), +) @unittest.skipIf(not mcpp_available(), "MC++ is not available") class TestGLOA(unittest.TestCase): """Tests for global logic-based outer approximation.""" @@ -1303,14 +1427,14 @@ def test_GDP_integer_vars(self): m = ConcreteModel() m.x = Var(bounds=(0, 10)) m.y = Var(domain=Integers, bounds=(0, 5)) - m.d = Disjunction(expr=[[m.x >= m.y, m.y >= 3.5], - [m.x >= m.y, m.y >= 2.5]]) + m.d = Disjunction(expr=[[m.x >= m.y, m.y >= 3.5], [m.x >= m.y, m.y >= 2.5]]) m.o = Objective(expr=m.x) SolverFactory('gdpopt').solve( - m, algorithm='GLOA', + m, + algorithm='GLOA', mip_solver=mip_solver, nlp_solver=global_nlp_solver, - minlp_solver=minlp_solver + minlp_solver=minlp_solver, ) self.assertAlmostEqual(value(m.o.expr), 3) @@ -1318,25 +1442,32 @@ def make_nonlinear_gdp_with_int_vars(self): m = ConcreteModel() m.x = Var(bounds=(0, 10)) m.y = Var(domain=Integers, bounds=(0, 5)) - m.d = Disjunction(expr=[[m.x**2 >= m.y, m.y >= 3.5], - [m.x**2 >= m.y, m.y >= 2.5]]) + m.d = Disjunction( + expr=[[m.x**2 >= m.y, m.y >= 3.5], [m.x**2 >= m.y, m.y >= 2.5]] + ) m.o = Objective(expr=m.x) return m def test_nonlinear_GDP_integer_vars(self): m = self.make_nonlinear_gdp_with_int_vars() - SolverFactory('gdpopt.gloa').solve(m, mip_solver=mip_solver, - nlp_solver=global_nlp_solver, - minlp_solver=minlp_solver) + SolverFactory('gdpopt.gloa').solve( + m, + mip_solver=mip_solver, + nlp_solver=global_nlp_solver, + minlp_solver=minlp_solver, + ) self.assertAlmostEqual(value(m.o.expr), sqrt(3)) self.assertAlmostEqual(value(m.y), 3) def test_nonlinear_GDP_integer_vars_force_nlp_subproblem(self): m = self.make_nonlinear_gdp_with_int_vars() - SolverFactory('gdpopt.gloa').solve(m, mip_solver=mip_solver, - nlp_solver=global_nlp_solver, - minlp_solver=minlp_solver, - force_subproblem_nlp=True) + SolverFactory('gdpopt.gloa').solve( + m, + mip_solver=mip_solver, + nlp_solver=global_nlp_solver, + minlp_solver=minlp_solver, + force_subproblem_nlp=True, + ) self.assertAlmostEqual(value(m.o.expr), sqrt(3)) self.assertAlmostEqual(value(m.y), 3) @@ -1344,137 +1475,135 @@ def test_GDP_integer_vars_infeasible(self): m = ConcreteModel() m.x = Var(bounds=(0, 1)) m.y = Var(domain=Integers, bounds=(0, 5)) - m.d = Disjunction(expr=[[m.x >= m.y, m.y >= 3.5], - [m.x >= m.y, m.y >= 2.5]]) + m.d = Disjunction(expr=[[m.x >= m.y, m.y >= 3.5], [m.x >= m.y, m.y >= 2.5]]) m.o = Objective(expr=m.x) - res = SolverFactory('gdpopt.gloa').solve(m, mip_solver=mip_solver, - nlp_solver=global_nlp_solver, - minlp_solver=minlp_solver) - self.assertEqual(res.solver.termination_condition, - TerminationCondition.infeasible) - - @unittest.skipUnless(license_available and sympy_available, - "Global NLP solver license not available or sympy " - "not available.") + res = SolverFactory('gdpopt.gloa').solve( + m, + mip_solver=mip_solver, + nlp_solver=global_nlp_solver, + minlp_solver=minlp_solver, + ) + self.assertEqual( + res.solver.termination_condition, TerminationCondition.infeasible + ) + + @unittest.skipUnless(license_available, "Global NLP solver license not available") def test_logical_constraints_on_disjuncts(self): m = models.makeLogicalConstraintsOnDisjuncts() - SolverFactory('gdpopt.gloa').solve(m, mip_solver=mip_solver, - nlp_solver=global_nlp_solver) + SolverFactory('gdpopt.gloa').solve( + m, mip_solver=mip_solver, nlp_solver=global_nlp_solver + ) self.assertAlmostEqual(value(m.x), 8) - @unittest.skipUnless(license_available and sympy_available, - "Global NLP solver license not available or sympy " - "not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available") def test_boolean_vars_on_disjuncts(self): m = models.makeBooleanVarsOnDisjuncts() - SolverFactory('gdpopt.gloa').solve(m, mip_solver=mip_solver, - nlp_solver=global_nlp_solver) + SolverFactory('gdpopt.gloa').solve( + m, mip_solver=mip_solver, nlp_solver=global_nlp_solver + ) self.assertAlmostEqual(value(m.x), 8) - @unittest.skipUnless(license_available, - "Global NLP solver license not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available.") def test_GLOA_8PP(self): """Test the global logic-based outer approximation algorithm.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() results = SolverFactory('gdpopt.gloa').solve( - eight_process, tee=False, + eight_process, + tee=False, mip_solver=mip_solver, nlp_solver=global_nlp_solver, - nlp_solver_args=global_nlp_solver_args + nlp_solver_args=global_nlp_solver_args, ) ct.check_8PP_solution(self, eight_process, results) - @unittest.skipUnless(license_available and sympy_available, - "Global NLP solver license not available or sympy " - "not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available") def test_GLOA_8PP_logical(self): """Test the global logic-based outer approximation algorithm.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_logical.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_logical.py')) eight_process = exfile.build_eight_process_flowsheet() results = SolverFactory('gdpopt.gloa').solve( - eight_process, tee=False, + eight_process, + tee=False, mip_solver=mip_solver, nlp_solver=global_nlp_solver, - nlp_solver_args=global_nlp_solver_args + nlp_solver_args=global_nlp_solver_args, ) ct.check_8PP_logical_solution(self, eight_process, results) - @unittest.skipUnless(license_available, - "Global NLP solver license not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available.") def test_GLOA_8PP_force_NLP(self): """Test the global logic-based outer approximation algorithm.""" - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) eight_process = exfile.build_eight_process_flowsheet() results = SolverFactory('gdpopt.gloa').solve( - eight_process, tee=False, + eight_process, + tee=False, mip_solver=mip_solver, nlp_solver=global_nlp_solver, nlp_solver_args=global_nlp_solver_args, - force_subproblem_nlp=True + force_subproblem_nlp=True, ) ct.check_8PP_solution(self, eight_process, results) - @unittest.skipUnless(license_available, - "Global NLP solver license not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available.") def test_GLOA_strip_pack_default_init(self): """Test logic-based outer approximation with strip packing.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() SolverFactory('gdpopt.gloa').solve( strip_pack, mip_solver=mip_solver, nlp_solver=global_nlp_solver, - nlp_solver_args=global_nlp_solver_args) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 11) <= 1E-2) + nlp_solver_args=global_nlp_solver_args, + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 11) <= 1e-2) - @unittest.skipUnless(license_available and sympy_available, - "Global NLP solver license not available or sympy " - "not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available") def test_GLOA_strip_pack_default_init_logical_constraints(self): """Test logic-based outer approximation with strip packing.""" - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) strip_pack = exfile.build_rect_strip_packing_model() # add logical constraints strip_pack.Rec3AboveOrBelowRec1 = LogicalConstraint( - expr=strip_pack.no_overlap[1, 3].disjuncts[2].indicator_var.lor( - strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var)) + expr=strip_pack.no_overlap[1, 3] + .disjuncts[2] + .indicator_var.lor(strip_pack.no_overlap[1, 3].disjuncts[3].indicator_var) + ) strip_pack.Rec3RightOrLeftOfRec2 = LogicalConstraint( - expr=strip_pack.no_overlap[2, 3].disjuncts[0].indicator_var.lor( - strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var)) + expr=strip_pack.no_overlap[2, 3] + .disjuncts[0] + .indicator_var.lor(strip_pack.no_overlap[2, 3].disjuncts[1].indicator_var) + ) SolverFactory('gdpopt.gloa').solve( - strip_pack, mip_solver=mip_solver, + strip_pack, + mip_solver=mip_solver, nlp_solver=global_nlp_solver, - nlp_solver_args=global_nlp_solver_args) - self.assertTrue( - fabs(value(strip_pack.total_length.expr) - 13) <= 1E-2) + nlp_solver_args=global_nlp_solver_args, + ) + self.assertTrue(fabs(value(strip_pack.total_length.expr) - 13) <= 1e-2) - @unittest.skipUnless(license_available, - "Global NLP solver license not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available.") @unittest.pytest.mark.expensive def test_GLOA_constrained_layout_default_init(self): """Test LOA with constrained layout.""" - exfile = import_file( - join(exdir, 'constrained_layout', 'cons_layout_model.py')) + exfile = import_file(join(exdir, 'constrained_layout', 'cons_layout_model.py')) cons_layout = exfile.build_constrained_layout_model() results = SolverFactory('gdpopt.gloa').solve( cons_layout, mip_solver=mip_solver, nlp_solver=global_nlp_solver, nlp_solver_args=global_nlp_solver_args, - tee=False) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + tee=False, + ) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) objective_value = value(cons_layout.min_dist_cost.expr) self.assertTrue( fabs(objective_value - 41573) <= 200, - "Objective value of %s instead of 41573" % objective_value) + "Objective value of %s instead of 41573" % objective_value, + ) def test_GLOA_ex_633_trespalacios(self): """Test LOA with Francisco thesis example.""" @@ -1485,12 +1614,12 @@ def test_GLOA_ex_633_trespalacios(self): mip_solver=mip_solver, nlp_solver=global_nlp_solver, nlp_solver_args=global_nlp_solver_args, - tee=False) + tee=False, + ) objective_value = value(model.obj.expr) self.assertAlmostEqual(objective_value, 4.46, 2) - @unittest.skipUnless(license_available, - "Global NLP solver license not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available.") def test_GLOA_nonconvex_HENS(self): exfile = import_file(join(exdir, 'small_lit', 'nonconvex_HEN.py')) model = exfile.build_gdp_model() @@ -1499,12 +1628,12 @@ def test_GLOA_nonconvex_HENS(self): mip_solver=mip_solver, nlp_solver=global_nlp_solver, nlp_solver_args=global_nlp_solver_args, - tee=False) + tee=False, + ) objective_value = value(model.objective.expr) - self.assertAlmostEqual(objective_value * 1E-5, 1.14385, 2) + self.assertAlmostEqual(objective_value * 1e-5, 1.14385, 2) - @unittest.skipUnless(license_available, - "Global NLP solver license not available.") + @unittest.skipUnless(license_available, "Global NLP solver license not available.") def test_GLOA_disjunctive_bounds(self): exfile = import_file(join(exdir, 'small_lit', 'nonconvex_HEN.py')) model = exfile.build_gdp_model() @@ -1514,43 +1643,48 @@ def test_GLOA_disjunctive_bounds(self): nlp_solver=global_nlp_solver, nlp_solver_args=global_nlp_solver_args, calc_disjunctive_bounds=True, - tee=False) + tee=False, + ) objective_value = value(model.objective.expr) - self.assertAlmostEqual(objective_value * 1E-5, 1.14385, 2) - -@unittest.skipIf(not GLOA_solvers_available, - "Required subsolvers %s are not available" - % (GLOA_solvers,)) -@unittest.skipIf(not LOA_solvers_available, - "Required subsolvers %s are not available" - % (LOA_solvers,)) + self.assertAlmostEqual(objective_value * 1e-5, 1.14385, 2) + + +@unittest.skipIf( + not GLOA_solvers_available, + "Required subsolvers %s are not available" % (GLOA_solvers,), +) +@unittest.skipIf( + not LOA_solvers_available, + "Required subsolvers %s are not available" % (LOA_solvers,), +) class TestConfigOptions(unittest.TestCase): def make_model(self): m = ConcreteModel() m.x = Var(bounds=(-10, 10)) m.y = Var(bounds=(-10, 10)) - m.disjunction = Disjunction(expr=[[m.y >= m.x**2 + 1], - [m.y >= m.x**2 -3*m.x + 2]]) + m.disjunction = Disjunction( + expr=[[m.y >= m.x**2 + 1], [m.y >= m.x**2 - 3 * m.x + 2]] + ) m.obj = Objective(expr=m.y) return m @unittest.skipIf(not mcpp_available(), "MC++ is not available") - @unittest.skipUnless(SolverFactory('gurobi').available(), - 'Gurobi not available') + @unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi not available') def test_set_options_on_config_block(self): m = self.make_model() opt = SolverFactory('gdpopt.loa') with self.assertRaisesRegex( - ValueError, - r"Changing the algorithm in the solve method " - r"is not supported for algorithm-specific " - r"GDPopt solvers. Either use " - r"SolverFactory[(]'gdpopt'[)] or instantiate a " - r"solver with the algorithm you want to use."): + ValueError, + r"Changing the algorithm in the solve method " + r"is not supported for algorithm-specific " + r"GDPopt solvers. Either use " + r"SolverFactory[(]'gdpopt'[)] or instantiate a " + r"solver with the algorithm you want to use.", + ): opt.solve(m, algorithm='RIC') opt.CONFIG.mip_solver = mip_solver @@ -1577,13 +1711,16 @@ def test_set_options_on_config_block(self): self.assertAlmostEqual(value(m.obj), -0.25) @unittest.skipIf(not mcpp_available(), "MC++ is not available") - @unittest.skipUnless(SolverFactory('gurobi').available(), - 'Gurobi not available') + @unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi not available') def test_set_options_in_init(self): m = self.make_model() - opt = SolverFactory('gdpopt.loa', mip_solver='gurobi', - nlp_solver=nlp_solver, init_algorithm='no_init') + opt = SolverFactory( + 'gdpopt.loa', + mip_solver='gurobi', + nlp_solver=nlp_solver, + init_algorithm='no_init', + ) buf = StringIO() with redirect_stdout(buf): @@ -1598,23 +1735,37 @@ def test_set_options_in_init(self): self.assertAlmostEqual(value(m.obj), -0.25) self.assertEqual(opt.config.mip_solver, 'gurobi') + @unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi not available') def test_no_default_algorithm(self): m = self.make_model() opt = SolverFactory('gdpopt') + buf = StringIO() with redirect_stdout(buf): - opt.solve(m, algorithm='RIC', tee=True, mip_solver=mip_solver, - nlp_solver=nlp_solver) + opt.solve( + m, + algorithm='RIC', + tee=True, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + ) self.assertIn('using RIC algorithm', buf.getvalue()) self.assertAlmostEqual(value(m.obj), -0.25) buf = StringIO() with redirect_stdout(buf): - opt.solve(m, algorithm='LBB', tee=True, mip_solver=mip_solver, - nlp_solver=nlp_solver) + opt.solve( + m, + algorithm='LBB', + tee=True, + mip_solver=mip_solver, + nlp_solver=nlp_solver, + minlp_solver='gurobi', + ) self.assertIn('using LBB algorithm', buf.getvalue()) self.assertAlmostEqual(value(m.obj), -0.25) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/gdpopt/util.py b/pyomo/contrib/gdpopt/util.py index 6a9d572e47a..f288f9e2647 100644 --- a/pyomo/contrib/gdpopt/util.py +++ b/pyomo/contrib/gdpopt/util.py @@ -21,12 +21,22 @@ from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr from pyomo.contrib.mcpp.pyomo_mcpp import mcpp_available, McCormick from pyomo.core import ( - Block, Constraint, minimize, Objective, Reals, Reference, - TransformationFactory, value, Var) + Block, + Constraint, + minimize, + Objective, + Reals, + Reference, + TransformationFactory, + value, + Var, +) +from pyomo.core.expr.numvalue import native_types from pyomo.gdp import Disjunct, Disjunction from pyomo.gdp.util import _parent_disjunct from pyomo.opt import SolverFactory + class _DoNothing(object): """Do nothing, literally. @@ -45,6 +55,7 @@ def _do_nothing(*args, **kwargs): return _do_nothing + class SuppressInfeasibleWarning(object): """Suppress the infeasible model warning message from solve(). @@ -60,83 +71,91 @@ class SuppressInfeasibleWarning(object): class InfeasibleWarningFilter(logging.Filter): def filter(self, record): return not record.getMessage().startswith( - "Loading a SolverResults object with a warning status into " - "model") + "Loading a SolverResults object with a warning status into model" + ) warning_filter = InfeasibleWarningFilter() def __enter__(self): logger = logging.getLogger('pyomo.core') logger.addFilter(self.warning_filter) + return self def __exit__(self, exception_type, exception_value, traceback): logger = logging.getLogger('pyomo.core') logger.removeFilter(self.warning_filter) + def solve_continuous_problem(m, config): logger = config.logger logger.info('Problem has no discrete decisions.') obj = next(m.component_data_objects(Objective, active=True)) - if (any(c.body.polynomial_degree() not in (1, 0) for c in - m.component_data_objects(Constraint, active=True, - descend_into=Block)) - or obj.expr.polynomial_degree() not in (1, 0)): - logger.info("Your model is an NLP (nonlinear program). " - "Using NLP solver %s to solve." % config.nlp_solver) - results = SolverFactory(config.nlp_solver).solve( - m, **config.nlp_solver_args) + if any( + c.body.polynomial_degree() not in (1, 0) + for c in m.component_data_objects(Constraint, active=True, descend_into=Block) + ) or obj.polynomial_degree() not in (1, 0): + logger.info( + "Your model is an NLP (nonlinear program). " + "Using NLP solver %s to solve." % config.nlp_solver + ) + results = SolverFactory(config.nlp_solver).solve(m, **config.nlp_solver_args) return results else: - logger.info("Your model is an LP (linear program). " - "Using LP solver %s to solve." % config.mip_solver) - results = SolverFactory(config.mip_solver).solve( - m, **config.mip_solver_args) + logger.info( + "Your model is an LP (linear program). " + "Using LP solver %s to solve." % config.mip_solver + ) + results = SolverFactory(config.mip_solver).solve(m, **config.mip_solver_args) return results + def move_nonlinear_objective_to_constraints(util_block, logger): m = util_block.parent_block() - discrete_obj = next(m.component_data_objects(Objective, descend_into=True, - active=True)) + discrete_obj = next( + m.component_data_objects(Objective, descend_into=True, active=True) + ) + if discrete_obj.polynomial_degree() in (1, 0): + # Nothing to move + return None # Move the objective to the constraints if it is nonlinear - if discrete_obj.expr.polynomial_degree() not in (1, 0): - logger.info("Objective is nonlinear. Moving it to constraint set.") - - util_block.objective_value = Var(domain=Reals, initialize=0) - if mcpp_available(): - mc_obj = McCormick(discrete_obj.expr) - util_block.objective_value.setub(mc_obj.upper()) - util_block.objective_value.setlb(mc_obj.lower()) - else: - # Use Pyomo's contrib.fbbt package - lb, ub = compute_bounds_on_expr(discrete_obj.expr) - if discrete_obj.sense == minimize: - util_block.objective_value.setlb(lb) - else: - util_block.objective_value.setub(ub) + logger.info("Objective is nonlinear. Moving it to constraint set.") + util_block.objective_value = Var(domain=Reals, initialize=0) + if mcpp_available(): + mc_obj = McCormick(discrete_obj.expr) + util_block.objective_value.setub(mc_obj.upper()) + util_block.objective_value.setlb(mc_obj.lower()) + else: + # Use Pyomo's contrib.fbbt package + lb, ub = compute_bounds_on_expr(discrete_obj.expr) if discrete_obj.sense == minimize: - util_block.objective_constr = Constraint( - expr=util_block.objective_value >= discrete_obj.expr) + util_block.objective_value.setlb(lb) else: - util_block.objective_constr = Constraint( - expr=util_block.objective_value <= discrete_obj.expr) - # Deactivate the original objective and add this new one. - discrete_obj.deactivate() - util_block.objective = Objective( - expr=util_block.objective_value, sense=discrete_obj.sense) - - # Add the new variable and constraint to the working lists - if discrete_obj.expr.polynomial_degree() not in (1, 0): - util_block.algebraic_variable_list.append( - util_block.objective_value) - if hasattr(util_block, 'constraint_list'): - util_block.constraint_list.append(util_block.objective_constr) - # If we moved the objective, return the original in case we want to - # restore it later - return discrete_obj - # Nothing was moved - return None + util_block.objective_value.setub(ub) + + if discrete_obj.sense == minimize: + util_block.objective_constr = Constraint( + expr=util_block.objective_value >= discrete_obj.expr + ) + else: + util_block.objective_constr = Constraint( + expr=util_block.objective_value <= discrete_obj.expr + ) + # Deactivate the original objective and add this new one. + discrete_obj.deactivate() + util_block.objective = Objective( + expr=util_block.objective_value, sense=discrete_obj.sense + ) + + # Add the new variable and constraint to the working lists + util_block.algebraic_variable_list.append(util_block.objective_value) + if hasattr(util_block, 'constraint_list'): + util_block.constraint_list.append(util_block.objective_constr) + # If we moved the objective, return the original in case we want to + # restore it later + return discrete_obj + def a_logger(str_or_logger): """Returns a logger when passed either a logger name or logger object.""" @@ -145,17 +164,25 @@ def a_logger(str_or_logger): else: return logging.getLogger(str_or_logger) -def copy_var_list_values(from_list, to_list, config, - skip_stale=False, skip_fixed=True, - ignore_integrality=False): + +def copy_var_list_values( + from_list, + to_list, + config, + skip_stale=False, + skip_fixed=True, + ignore_integrality=False, +): """Copy variable values from one list to another. Rounds to Binary/Integer if necessary Sets to zero for NonNegativeReals if necessary """ if ignore_integrality: - deprecation_warning("The 'ignore_integrality' argument no longer " - "has any functionality.", version="6.4.2") + deprecation_warning( + "The 'ignore_integrality' argument no longer has any functionality.", + version="6.4.2", + ) if len(from_list) != len(to_list): raise ValueError('The lengths of from_list and to_list do not match.') @@ -167,19 +194,21 @@ def copy_var_list_values(from_list, to_list, config, continue # Skip fixed variables. v_to.set_value(value(v_from, exception=False), skip_validation=True) + def fix_discrete_var(var, val, config): """Fixes the discrete variable var to val, rounding to the nearest integer - or not, depending on if rounding is specifed in config and what the integer + or not, depending on if rounding is specified in config and what the integer tolerance is.""" if val is None: return if var.is_continuous(): var.set_value(val, skip_validation=True) - elif (fabs(val - round(val)) > config.integer_tolerance): + elif fabs(val - round(val)) > config.integer_tolerance: raise ValueError( "Integer variable '%s' cannot be fixed to value %s because it " - "is not within the specified integer tolerance of %s." % - (var.name, val, config.integer_tolerance)) + "is not within the specified integer tolerance of %s." + % (var.name, val, config.integer_tolerance) + ) else: # variable is integer and within tolerance if config.round_discrete_vars: @@ -187,36 +216,126 @@ def fix_discrete_var(var, val, config): else: var.fix(val, skip_validation=True) -class fix_discrete_problem_solution_in_subproblem(object): - def __init__(self, discrete_prob_util_block, subproblem_util_block, - solver, config, make_subproblem_continuous=True): + +class fix_discrete_solution_in_subproblem(object): + def __init__( + self, + true_disjuncts, + boolean_var_values, + integer_var_values, + subprob_util_block, + config, + solver, + ): + self.True_disjuncts = true_disjuncts + self.boolean_var_values = boolean_var_values + self.discrete_var_values = integer_var_values + self.subprob_util_block = subprob_util_block + self.config = config + + def __enter__(self): + # fix subproblem Blocks according to the discrete problem solution + fixed = [] + for block in self.subprob_util_block.disjunct_list: + if block in self.True_disjuncts: + block.binary_indicator_var.fix(1) + fixed.append(block.name) + else: + block.deactivate() + block.binary_indicator_var.fix(0) + self.config.logger.debug( + "Fixed the following Disjuncts to 'True': %s" % ", ".join(fixed) + ) + + fixed_bools = [] + for subprob_bool, val in zip( + self.subprob_util_block.non_indicator_boolean_variable_list, + self.boolean_var_values, + ): + subprob_binary = subprob_bool.get_associated_binary() + if val: + subprob_binary.fix(1) + else: + subprob_binary.fix(0) + fixed_bools.append("%s = %s" % (subprob_bool.name, val)) + self.config.logger.debug( + "Fixed the following Boolean variables: %s" % ", ".join(fixed_bools) + ) + + # Fix subproblem discrete variables according to the discrete problem + # solution + if self.config.force_subproblem_nlp: + fixed_discrete = [] + for subprob_var, val in zip( + self.subprob_util_block.discrete_variable_list, self.discrete_var_values + ): + fix_discrete_var(subprob_var, val, self.config) + fixed_discrete.append("%s = %s" % (subprob_var.name, val)) + self.config.logger.debug( + "Fixed the following integer variables: " + "%s" % ", ".join(fixed_discrete) + ) + + # Call the subproblem initialization callback + self.config.subproblem_initialization_method( + self.True_disjuncts, + self.boolean_var_values, + self.discrete_var_values, + self.subprob_util_block, + ) + + return self + + def __exit__(self, type, value, traceback): + # unfix all subproblem blocks + for block in self.subprob_util_block.disjunct_list: + block.activate() + block.binary_indicator_var.unfix() + + # unfix all the formerly-Boolean variables + for bool_var in self.subprob_util_block.non_indicator_boolean_variable_list: + bool_var.get_associated_binary().unfix() + + # unfix all discrete variables and restore them to their original values + if self.config.force_subproblem_nlp: + for subprob_var in self.subprob_util_block.discrete_variable_list: + subprob_var.fixed = False + + # [ESJ 2/25/22] We don't need to reset the values of the continuous + # variables because we will initialize them based on the discrete + # problem solution before we solve again. + + +class fix_discrete_problem_solution_in_subproblem(fix_discrete_solution_in_subproblem): + def __init__(self, discrete_prob_util_block, subproblem_util_block, solver, config): self.discrete_prob_util_block = discrete_prob_util_block self.subprob_util_block = subproblem_util_block self.solver = solver self.config = config - self.make_subprob_continuous = make_subproblem_continuous def __enter__(self): # fix subproblem Blocks according to the discrete problem solution fixed = [] - for disjunct, block in zip(self.discrete_prob_util_block.disjunct_list, - self.subprob_util_block.disjunct_list): + for disjunct, block in zip( + self.discrete_prob_util_block.disjunct_list, + self.subprob_util_block.disjunct_list, + ): if not disjunct.indicator_var.value: block.deactivate() block.binary_indicator_var.fix(0) else: block.binary_indicator_var.fix(1) fixed.append(block.name) - self.config.logger.debug("Fixed the following Disjuncts to 'True': %s" - % ", ".join(fixed)) + self.config.logger.debug( + "Fixed the following Disjuncts to 'True': %s" % ", ".join(fixed) + ) fixed_bools = [] for discrete_problem_bool, subprob_bool in zip( - self.discrete_prob_util_block.\ - non_indicator_boolean_variable_list, - self.subprob_util_block.non_indicator_boolean_variable_list): - discrete_problem_binary = discrete_problem_bool.\ - get_associated_binary() + self.discrete_prob_util_block.non_indicator_boolean_variable_list, + self.subprob_util_block.non_indicator_boolean_variable_list, + ): + discrete_problem_binary = discrete_problem_bool.get_associated_binary() subprob_binary = subprob_bool.get_associated_binary() val = discrete_problem_binary.value if val is None: @@ -233,16 +352,18 @@ def __enter__(self): subprob_binary.fix(0) bool_val = False fixed_bools.append("%s = %s" % (subprob_bool.name, bool_val)) - self.config.logger.debug("Fixed the following Boolean variables: %s" - % ", ".join(fixed_bools)) + self.config.logger.debug( + "Fixed the following Boolean variables: %s" % ", ".join(fixed_bools) + ) # Fix subproblem discrete variables according to the discrete problem # solution - if self.make_subprob_continuous: + if self.config.force_subproblem_nlp: fixed_discrete = [] for discrete_problem_var, subprob_var in zip( - self.discrete_prob_util_block.discrete_variable_list, - self.subprob_util_block.discrete_variable_list): + self.discrete_prob_util_block.discrete_variable_list, + self.subprob_util_block.discrete_variable_list, + ): # [ESJ 1/24/21]: We don't check if discrete problem_var # actually has a value here because we are going to have to do # that error checking later. This is because the subproblem @@ -250,36 +371,22 @@ def __enter__(self): # problem and vice versa since discrete problem is linearized, # but subproblem is a specific realization of the disjuncts. All # this means we don't have enough info to do it here. - fix_discrete_var(subprob_var, discrete_problem_var.value, - self.config) - fixed_discrete.append("%s = %s" % (subprob_var.name, - discrete_problem_var.value)) - self.config.logger.debug("Fixed the following integer variables: " - "%s" % ", ".join(fixed_discrete)) + fix_discrete_var(subprob_var, discrete_problem_var.value, self.config) + fixed_discrete.append( + "%s = %s" % (subprob_var.name, discrete_problem_var.value) + ) + self.config.logger.debug( + "Fixed the following integer variables: " + "%s" % ", ".join(fixed_discrete) + ) # Call the subproblem initialization callback self.config.subproblem_initialization_method( - self.solver, self.subprob_util_block, self.discrete_prob_util_block) + self.solver, self.subprob_util_block, self.discrete_prob_util_block + ) - def __exit__(self, type, value, traceback): - # unfix all subproblem blocks - for block in self.subprob_util_block.disjunct_list: - block.activate() - block.binary_indicator_var.unfix() - - # unfix all the formerly-Boolean variables - for bool_var in \ - self.subprob_util_block.non_indicator_boolean_variable_list: - bool_var.get_associated_binary().unfix() + return self - # unfix all discrete variables and restore them to their original values - if self.make_subprob_continuous: - for subprob_var in self.subprob_util_block.discrete_variable_list: - subprob_var.fixed = False - - # [ESJ 2/25/22] We don't need to reset the values of the continuous - # variables because we will initialize them based on the discrete - # problem solution before we solve again. def is_feasible(model, config): """Checks to see if the algebraic model is feasible in its current state. @@ -288,50 +395,54 @@ def is_feasible(model, config): untransformed GDP models. """ - disj = next(model.component_data_objects( - ctype=Disjunct, active=True), None) + disj = next(model.component_data_objects(ctype=Disjunct, active=True), None) if disj is not None: raise NotImplementedError( "Found active disjunct %s. " "This function is not intended to check " "feasibility of disjunctive models, " - "only transformed subproblems." % disj.name) + "only transformed subproblems." % disj.name + ) config.logger.debug('Checking if model is feasible.') for constr in model.component_data_objects( - ctype=Constraint, active=True, descend_into=True): + ctype=Constraint, active=True, descend_into=True + ): # Check constraint lower bound - if (constr.lower is not None and ( - value(constr.lower) - value(constr.body) - >= config.constraint_tolerance - )): - config.logger.info('%s: body %s < LB %s' % ( - constr.name, value(constr.body), value(constr.lower))) + if constr.lower is not None and ( + value(constr.lower) - value(constr.body) >= config.constraint_tolerance + ): + config.logger.info( + '%s: body %s < LB %s' + % (constr.name, value(constr.body), value(constr.lower)) + ) return False # check constraint upper bound - if (constr.upper is not None and ( - value(constr.body) - value(constr.upper) - >= config.constraint_tolerance - )): - config.logger.info('%s: body %s > UB %s' % ( - constr.name, value(constr.body), value(constr.upper))) + if constr.upper is not None and ( + value(constr.body) - value(constr.upper) >= config.constraint_tolerance + ): + config.logger.info( + '%s: body %s > UB %s' + % (constr.name, value(constr.body), value(constr.upper)) + ) return False for var in model.component_data_objects(ctype=Var, descend_into=True): # Check variable lower bound - if (var.has_lb() and - value(var.lb) - value(var) >= config.variable_tolerance): - config.logger.info('%s: value %s < LB %s' % ( - var.name, value(var), value(var.lb))) + if var.has_lb() and value(var.lb) - value(var) >= config.variable_tolerance: + config.logger.info( + '%s: value %s < LB %s' % (var.name, value(var), value(var.lb)) + ) return False # Check variable upper bound - if (var.has_ub() and - value(var) - value(var.ub) >= config.variable_tolerance): - config.logger.info('%s: value %s > UB %s' % ( - var.name, value(var), value(var.ub))) + if var.has_ub() and value(var) - value(var.ub) >= config.variable_tolerance: + config.logger.info( + '%s: value %s > UB %s' % (var.name, value(var), value(var.ub)) + ) return False config.logger.info('Model is feasible.') return True + @contextmanager def time_code(timing_data_obj, code_block_name, is_main_timer=False): """Starts timer at entry, stores elapsed time at exit @@ -348,6 +459,7 @@ def time_code(timing_data_obj, code_block_name, is_main_timer=False): prev_time = timing_data_obj.get(code_block_name, 0) timing_data_obj[code_block_name] = prev_time + elapsed_time + def get_main_elapsed_time(timing_data_obj): """Returns the time since entering the main `time_code` context""" current_time = timing.default_timer() @@ -360,12 +472,13 @@ def get_main_elapsed_time(timing_data_obj): "`get_main_elapsed_time()`." ) + @contextmanager def lower_logger_level_to(logger, level=None, tee=False): """Increases logger verbosity by lowering reporting level.""" - if tee: # we want pretty stuff - level = logging.INFO # we need to be at least this verbose for tee to - # work + if tee: # we want pretty stuff + level = logging.INFO # we need to be at least this verbose for tee to + # work handlers = [h for h in logger.handlers] logger.handlers.clear() logger.propagate = False @@ -396,6 +509,7 @@ def lower_logger_level_to(logger, level=None, tee=False): if level_changed: logger.setLevel(old_logger_level) + def _add_bigm_constraint_to_transformed_model(m, constraint, block): """Adds the given constraint to the discrete problem model as if it had been on the model originally, before the bigm transformation was called. @@ -431,11 +545,14 @@ def _add_bigm_constraint_to_transformed_model(m, constraint, block): return bigm = TransformationFactory('gdp.bigm') - bigm.assume_fixed_vars_permanent = False + # We're fine with default state, but because we're not using apply_to, we + # need to set it. + bigm._config = bigm.CONFIG() # ESJ: This function doesn't handle ConstraintDatas, and bigm is not # sufficiently modular to have a function that does at the moment, so I'm # making a Reference to the ComponentData so that it will look like an # indexed component for now. If I redesign bigm at some point, then this # could be prettier. - bigm._transform_constraint(Reference(constraint), parent_disjunct, None, - [], []) + bigm._transform_constraint(Reference(constraint), parent_disjunct, None, [], []) + # Now get rid of it because this is a class attribute! + del bigm._config diff --git a/pyomo/contrib/gjh/GJH.py b/pyomo/contrib/gjh/GJH.py index d8b13e5f21f..df9dfebf477 100644 --- a/pyomo/contrib/gjh/GJH.py +++ b/pyomo/contrib/gjh/GJH.py @@ -17,6 +17,7 @@ logger = logging.getLogger('pyomo.contrib.gjh') + def readgjh(fname=None): """ Build objective gradient and constraint Jacobian @@ -50,8 +51,7 @@ def readgjh(fname=None): fname = files.pop(0) if len(files) > 1: print("WARNING: More than one gjh file in current directory") - print(" Processing: %s\nIgnoring: %s" % ( - fname, '\n\t\t'.join(files))) + print(" Processing: %s\nIgnoring: %s" % (fname, '\n\t\t'.join(files))) with open(fname, "r") as f: line = "dummy_str" @@ -67,24 +67,24 @@ def readgjh(fname=None): When printed via ampl interface: ampl: display g; g [*] := - 1 0.204082 - 2 0.367347 - 3 0.44898 - 4 0.44898 - 5 0.244898 - 6 -0.173133 - 7 -0.173133 - 8 -0.0692532 - 9 0.0692532 - 10 0.346266 - ; + 1 0.204082 + 2 0.367347 + 3 0.44898 + 4 0.44898 + 5 0.244898 + 6 -0.173133 + 7 -0.173133 + 8 -0.0692532 + 9 0.0692532 + 10 0.346266 + ; """ index = int(line.split()[0]) - 1 value = float(line.split()[1]) g.append([index, value]) line = f.readline() - # Skip lines until J value is reached + # Skip lines until J value is reached while line != "param J :=\n": line = f.readline() @@ -95,44 +95,44 @@ def readgjh(fname=None): When printed via ampl interface: ampl: display J; J [*,*] - : 1 2 3 4 5 6 - := - 1 -0.434327 0.784302 . . . -0.399833 - 2 2.22045e-16 . 1.46939 . . -0.831038 - 3 0.979592 . . 1.95918 . -0.9596 - 4 1.79592 . . . 2.12245 -0.692532 - 5 0.979592 . . . . 0 - 6 . -0.0640498 0.545265 . . . - 7 . 0.653061 . 1.14286 . . - 8 . 1.63265 . . 1.63265 . - 9 . 1.63265 . . . . - 10 . . 0.262481 0.262481 . . - 11 . . 1.14286 . 0.653061 . - 12 . . 1.95918 . . . - 13 . . . 0.545265 -0.0640498 . - 14 . . . 1.95918 . . - 15 . . . . 1.63265 . - 16 . . . . . -1 - - : 7 8 9 10 := - 1 0.399833 . . . - 2 . 0.831038 . . - 3 . . 0.9596 . - 4 . . . 0.692532 - 6 -0.799667 0.799667 . . - 7 -1.38506 . 1.38506 . - 8 -1.33278 . . 1.33278 - 9 0 . . . - 10 . -0.9596 0.9596 . - 11 . -1.38506 . 1.38506 - 12 . 0 . . - 13 . . -0.799667 0.799667 - 14 . . 0 . - 15 . . . 0 - 16 1 . . . - 17 -1 1 . . - 18 . -1 1 . - 19 . . -1 1 + : 1 2 3 4 5 6 + := + 1 -0.434327 0.784302 . . . -0.399833 + 2 2.22045e-16 . 1.46939 . . -0.831038 + 3 0.979592 . . 1.95918 . -0.9596 + 4 1.79592 . . . 2.12245 -0.692532 + 5 0.979592 . . . . 0 + 6 . -0.0640498 0.545265 . . . + 7 . 0.653061 . 1.14286 . . + 8 . 1.63265 . . 1.63265 . + 9 . 1.63265 . . . . + 10 . . 0.262481 0.262481 . . + 11 . . 1.14286 . 0.653061 . + 12 . . 1.95918 . . . + 13 . . . 0.545265 -0.0640498 . + 14 . . . 1.95918 . . + 15 . . . . 1.63265 . + 16 . . . . . -1 + + : 7 8 9 10 := + 1 0.399833 . . . + 2 . 0.831038 . . + 3 . . 0.9596 . + 4 . . . 0.692532 + 6 -0.799667 0.799667 . . + 7 -1.38506 . 1.38506 . + 8 -1.33278 . . 1.33278 + 9 0 . . . + 10 . -0.9596 0.9596 . + 11 . -1.38506 . 1.38506 + 12 . 0 . . + 13 . . -0.799667 0.799667 + 14 . . 0 . + 15 . . . 0 + 16 1 . . . + 17 -1 1 . . + 18 . -1 1 . + 19 . . -1 1 ; """ if line[0] == '[': @@ -154,31 +154,31 @@ def readgjh(fname=None): """ When printed via ampl interface: ampl: display H; - H [*,*] - : 1 2 3 4 5 6 := - 1 . 0.25 . . . -0.35348 - 2 0.25 . 0.25 . . -0.212088 - 3 . 0.25 . 0.25 . . - 4 . . 0.25 . 0.25 . - 5 . . . 0.25 . . - 6 -0.35348 -0.212088 . . . -0.0999584 - 7 0.35348 -0.212088 -0.35348 . . 0.0999584 - 8 . 0.424176 -0.070696 -0.424176 . . - 9 . . 0.424176 0.070696 -0.424176 . - 10 . . . 0.35348 0.424176 . - - : 7 8 9 10 := - 1 0.35348 . . . - 2 -0.212088 0.424176 . . - 3 -0.35348 -0.070696 0.424176 . - 4 . -0.424176 0.070696 0.35348 - 5 . . -0.424176 0.424176 - 6 0.0999584 . . . - 7 -0.299875 0.199917 . . - 8 0.199917 -0.439817 0.2399 . - 9 . 0.2399 -0.439817 0.199917 - 10 . . 0.199917 -0.199917 - ; + H [*,*] + : 1 2 3 4 5 6 := + 1 . 0.25 . . . -0.35348 + 2 0.25 . 0.25 . . -0.212088 + 3 . 0.25 . 0.25 . . + 4 . . 0.25 . 0.25 . + 5 . . . 0.25 . . + 6 -0.35348 -0.212088 . . . -0.0999584 + 7 0.35348 -0.212088 -0.35348 . . 0.0999584 + 8 . 0.424176 -0.070696 -0.424176 . . + 9 . . 0.424176 0.070696 -0.424176 . + 10 . . . 0.35348 0.424176 . + + : 7 8 9 10 := + 1 0.35348 . . . + 2 -0.212088 0.424176 . . + 3 -0.35348 -0.070696 0.424176 . + 4 . -0.424176 0.070696 0.35348 + 5 . . -0.424176 0.424176 + 6 0.0999584 . . . + 7 -0.299875 0.199917 . . + 8 0.199917 -0.439817 0.2399 . + 9 . 0.2399 -0.439817 0.199917 + 10 . . 0.199917 -0.199917 + ; """ if line[0] == '[': # Hessian row index @@ -189,12 +189,12 @@ def readgjh(fname=None): value = float(line.split()[1]) H.append([row, column, value]) line = f.readline() - - with open(fname[:-3]+'col', 'r') as f: + + with open(fname[:-3] + 'col', 'r') as f: data = f.read() variableList = data.split() - with open(fname[:-3]+'row', 'r') as f: + with open(fname[:-3] + 'row', 'r') as f: data = f.read() constraintList = data.split() @@ -206,6 +206,7 @@ class GJHSolver(ASL): An interface to the AMPL GJH "solver" for evaluating a model at a point. """ + def __init__(self, **kwds): kwds['type'] = 'gjh' kwds['symbolic_solver_labels'] = True @@ -222,7 +223,7 @@ def _initialize_callbacks(self, model): def _presolve(self, *args, **kwds): super()._presolve(*args, **kwds) - self._gjh_file = self._soln_file[:-3]+'gjh' + self._gjh_file = self._soln_file[:-3] + 'gjh' TempfileManager.add_tempfile(self._gjh_file, exists=False) def _postsolve(self): diff --git a/pyomo/contrib/gjh/__init__.py b/pyomo/contrib/gjh/__init__.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/contrib/gjh/__init__.py +++ b/pyomo/contrib/gjh/__init__.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/gjh/getGJH.py b/pyomo/contrib/gjh/getGJH.py index 5485184d4ee..112de054745 100644 --- a/pyomo/contrib/gjh/getGJH.py +++ b/pyomo/contrib/gjh/getGJH.py @@ -21,33 +21,31 @@ # https://ampl.com/resources/hooking-your-solver-to-ampl/ # All 32-bit downloads are used - 64-bit is available only for Linux urlmap = { - 'linux': 'https://ampl.com/netlib/ampl/student/linux/gjh.gz', - 'windows': 'https://ampl.com/netlib/ampl/student/mswin/gjh.exe.gz', - 'cygwin': 'https://ampl.com/netlib/ampl/student/mswin/gjh.exe.gz', - 'darwin': 'https://ampl.com/netlib/ampl/student/macosx/x86_32/gjh.gz', -} -exemap = { - 'linux': '', - 'windows': '.exe', - 'cygwin': '.exe', - 'darwin': '', + 'linux': 'https://netlib.org/ampl/student/linux/gjh.gz', + 'windows': 'https://netlib.org/ampl/student/mswin/gjh.exe.gz', + 'cygwin': 'https://netlib.org/ampl/student/mswin/gjh.exe.gz', + 'darwin': 'https://netlib.org/ampl/student/macosx/x86_32/gjh.gz', } +exemap = {'linux': '', 'windows': '.exe', 'cygwin': '.exe', 'darwin': ''} + def get_gjh(downloader): system, bits = downloader.get_sysinfo() url = downloader.get_platform_url(urlmap) - downloader.set_destination_filename( - os.path.join('bin', 'gjh'+exemap[system])) + downloader.set_destination_filename(os.path.join('bin', 'gjh' + exemap[system])) - logger.info("Fetching GJH from %s and installing it to %s" - % (url, downloader.destination())) + logger.info( + "Fetching GJH from %s and installing it to %s" % (url, downloader.destination()) + ) downloader.get_gzipped_binary_file(url) mode = os.stat(downloader.destination()).st_mode - os.chmod( downloader.destination(), - mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH ) + os.chmod( + downloader.destination(), mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH + ) + def main(argv): downloader = FileDownloader() diff --git a/pyomo/contrib/gjh/plugins.py b/pyomo/contrib/gjh/plugins.py index b53158faad9..4af2f38becd 100644 --- a/pyomo/contrib/gjh/plugins.py +++ b/pyomo/contrib/gjh/plugins.py @@ -14,7 +14,9 @@ from pyomo.contrib.gjh.GJH import GJHSolver from pyomo.opt.base import SolverFactory + def load(): DownloadFactory.register('gjh')(get_gjh) - SolverFactory.register('contrib.gjh', - doc='Interface to the AMPL GJH "solver"')(GJHSolver) + SolverFactory.register('contrib.gjh', doc='Interface to the AMPL GJH "solver"')( + GJHSolver + ) diff --git a/pyomo/contrib/iis/iis.py b/pyomo/contrib/iis/iis.py index c32108b7b7a..bd192d04eb3 100644 --- a/pyomo/contrib/iis/iis.py +++ b/pyomo/contrib/iis/iis.py @@ -1,6 +1,6 @@ """ This module contains functions for computing an irreducible infeasible set -for a Pyomo MILP or LP using a specified commerical solver, one of CPLEX, +for a Pyomo MILP or LP using a specified commercial solver, one of CPLEX, Gurobi, or Xpress. """ @@ -15,7 +15,7 @@ def write_iis(pyomo_model, iis_file_name, solver=None, logger=logger): """ Write an irreducible infeasible set for a Pyomo MILP or LP - using the specified commerical solver. + using the specified commercial solver. Arguments --------- diff --git a/pyomo/contrib/iis/tests/test_iis.py b/pyomo/contrib/iis/tests/test_iis.py index 31ea48d9650..b1b675d5081 100644 --- a/pyomo/contrib/iis/tests/test_iis.py +++ b/pyomo/contrib/iis/tests/test_iis.py @@ -109,10 +109,7 @@ def _test_iis(solver_name): def _validate_ilp(file_name): - lines_found = { - "c2: 100 x + y <= 0": False, - "c3: x >= 0.5": False, - } + lines_found = {"c2: 100 x + y <= 0": False, "c3: x >= 0.5": False} with open(file_name, "r") as f: for line in f.readlines(): for k, v in lines_found.items(): diff --git a/pyomo/contrib/incidence_analysis/README.md b/pyomo/contrib/incidence_analysis/README.md index 9dd7880a690..645568c9036 100644 --- a/pyomo/contrib/incidence_analysis/README.md +++ b/pyomo/contrib/incidence_analysis/README.md @@ -1,6 +1,119 @@ -### Pyomo variable-constraint matching tools -This contrib package includes functions for analyzing the -structure of a variable-constraint graph (or incidence matrix) -from a Pyomo model. This is useful for verifying the non-singularity -of a subset of variables and constraints, as one might want to do -for differential-algebraic equation models. +# Incidence Analysis + +Tools for constructing and analyzing the incidence graph of variables and +constraints. + +These tools can be used to detect whether and (approximately) why the Jacobian +of equality constraints is structurally or numerically singular, which +commonly happens as the result of a modeling error. +See the +[documentation](https://pyomo.readthedocs.io/en/stable/contributed_packages/incidence/index.html) +for more information and examples. + +## Dependencies + +Incidence Analysis uses +[NetworkX](https://github.com/networkx/networkx) +to represent incidence graphs. Additionally, +[SciPy](https://github.com/scipy/scipy) +and +[Plotly](https://github.com/plotly/plotly.py) +may be required for some functionality. + +## Example + +Identifying over-constrained and under-constrained subsystems of a singular +square system: +```python +import pyomo.environ as pyo +from pyomo.contrib.incidence_analysis import IncidenceGraphInterface + +m = pyo.ConcreteModel() +m.components = pyo.Set(initialize=[1, 2, 3]) +m.x = pyo.Var(m.components, initialize=1.0/3.0) +m.flow_comp = pyo.Var(m.components, initialize=10.0) +m.flow = pyo.Var(initialize=30.0) +m.density = pyo.Var(initialize=1.0) + +m.sum_eqn = pyo.Constraint( + expr=sum(m.x[j] for j in m.components) - 1 == 0 +) +m.holdup_eqn = pyo.Constraint(m.components, expr={ + j: m.x[j]*m.density - 1 == 0 for j in m.components +}) +m.density_eqn = pyo.Constraint( + expr=1/m.density - sum(1/m.x[j] for j in m.components) == 0 +) +m.flow_eqn = pyo.Constraint(m.components, expr={ + j: m.x[j]*m.flow - m.flow_comp[j] == 0 for j in m.components +}) + +igraph = IncidenceGraphInterface(m, include_inequality=False) +var_dmp, con_dmp = igraph.dulmage_mendelsohn() + +uc_var = var_dmp.unmatched + var_dmp.underconstrained +uc_con = con_dmp.underconstrained +oc_var = var_dmp.overconstrained +oc_con = con_dmp.overconstrained + con_dmp.unmatched + +print("Overconstrained subsystem") +print("-------------------------") +print("Variables") +for var in oc_var: + print(f" {var.name}") +print("Constraints") +for con in oc_con: + print(f" {con.name}") +print() + +print("Underconstrained subsystem") +print("--------------------------") +print("Variables") +for var in uc_var: + print(f" {var.name}") +print("Constraints") +for con in uc_con: + print(f" {con.name}") +``` +This displays: +```console +Overconstrained subsystem +------------------------- +Variables + x[1] + density + x[2] + x[3] +Constraints + sum_eqn + holdup_eqn[1] + holdup_eqn[2] + holdup_eqn[3] + density_eqn + +Underconstrained subsystem +-------------------------- +Variables + flow_comp[1] + flow + flow_comp[2] + flow_comp[3] +Constraints + flow_eqn[1] + flow_eqn[2] + flow_eqn[3] +``` + +## Citation + +We are working on a journal article about Incidence Analysis and the underlying +methods. In the meantime, if you use Incidence Analysis in your research, you +may cite the following conference paper: +```bibtex +@inproceedings{Parker2023Dulmage, + title={{An application of the Dulmage-Mendelsohn partition to the analysis of a discretized dynamic chemical looping combustion reactor model}}, + author={Robert Parker and Chinedu Okoli and Bethany Nicholson and John Siirola and Lorenz Biegler}, + booktitle={Proceedings of FOCAPO/CPC 2023}, + year={2023} +} +``` diff --git a/pyomo/contrib/incidence_analysis/__init__.py b/pyomo/contrib/incidence_analysis/__init__.py index 178bbc96db8..ee078690f2f 100644 --- a/pyomo/contrib/incidence_analysis/__init__.py +++ b/pyomo/contrib/incidence_analysis/__init__.py @@ -1,7 +1,9 @@ from .triangularize import block_triangularize from .matching import maximum_matching -from .interface import IncidenceGraphInterface -from .util import ( - generate_strongly_connected_components, - solve_strongly_connected_components, - ) +from .interface import IncidenceGraphInterface, get_bipartite_incidence_graph +from .scc_solver import ( + generate_strongly_connected_components, + solve_strongly_connected_components, +) +from .incidence import get_incident_variables +from .config import IncidenceMethod diff --git a/pyomo/contrib/incidence_analysis/common/dulmage_mendelsohn.py b/pyomo/contrib/incidence_analysis/common/dulmage_mendelsohn.py index 347538659d4..95b6cd7134f 100644 --- a/pyomo/contrib/incidence_analysis/common/dulmage_mendelsohn.py +++ b/pyomo/contrib/incidence_analysis/common/dulmage_mendelsohn.py @@ -27,8 +27,6 @@ def _get_projected_digraph(bg, matching, top_nodes): - """ - """ digraph = DiGraph() digraph.add_nodes_from(top_nodes) for n in top_nodes: @@ -45,8 +43,6 @@ def _get_projected_digraph(bg, matching, top_nodes): def _get_reachable_from(digraph, sources): - """ - """ _filter = set() reachable = [] for node in sources: @@ -96,14 +92,7 @@ def dulmage_mendelsohn(bg, top_nodes=None, matching=None): t_other = [t for t in top_nodes if t not in _filter] b_other = [b for b in bot_nodes if b not in _filter] - return (( - t_unmatched, - t_reachable, - t_matched_with_reachable, - t_other, - ), ( - b_unmatched, - b_reachable, - b_matched_with_reachable, - b_other, - )) + return ( + (t_unmatched, t_reachable, t_matched_with_reachable, t_other), + (b_unmatched, b_reachable, b_matched_with_reachable, b_other), + ) diff --git a/pyomo/contrib/incidence_analysis/common/tests/test_dulmage_mendelsohn.py b/pyomo/contrib/incidence_analysis/common/tests/test_dulmage_mendelsohn.py index 5192a6aa56e..1675fc7420a 100644 --- a/pyomo/contrib/incidence_analysis/common/tests/test_dulmage_mendelsohn.py +++ b/pyomo/contrib/incidence_analysis/common/tests/test_dulmage_mendelsohn.py @@ -10,13 +10,10 @@ # ___________________________________________________________________________ import pyomo.common.unittest as unittest -from pyomo.common.dependencies import ( - networkx as nx, - networkx_available, - ) +from pyomo.common.dependencies import networkx as nx, networkx_available from pyomo.contrib.incidence_analysis.common.dulmage_mendelsohn import ( dulmage_mendelsohn, - ) +) @unittest.skipUnless(networkx_available, "networkx is not available") @@ -36,39 +33,39 @@ def _construct_graph(self): bg.add_nodes_from([n_l + i for i in right_nodes], bipartite=1) paper_edges = [ - (1, 1), - (1, 2), - (1, 3), - (1, 4), - (1, 5), - (1, 6), - (2, 4), - (2, 5), - (2, 7), - (2, 8), - (2, 10), - (3, 1), - (3, 3), - (3, 5), - (4, 6), - (4, 7), - (4, 11), - (5, 6), - (5, 7), - (5, 9), - (6, 8), - (6, 9), - (7, 8), - (7, 9), - (7, 10), - (8, 10), - (8, 11), - (9, 11), - (10, 10), - (11, 10), - (11, 11), - (12, 11), - ] + (1, 1), + (1, 2), + (1, 3), + (1, 4), + (1, 5), + (1, 6), + (2, 4), + (2, 5), + (2, 7), + (2, 8), + (2, 10), + (3, 1), + (3, 3), + (3, 5), + (4, 6), + (4, 7), + (4, 11), + (5, 6), + (5, 7), + (5, 9), + (6, 8), + (6, 9), + (7, 8), + (7, 9), + (7, 10), + (8, 10), + (8, 11), + (9, 11), + (10, 10), + (11, 10), + (11, 11), + (12, 11), + ] edges = [(i - 1, j - 1 + n_l) for i, j in paper_edges] bg.add_edges_from(edges) @@ -131,27 +128,27 @@ def _construct_graph(self): """ N = 7 top_nodes = list(range(N)) - bot_nodes = list(range(N, 2*N)) + bot_nodes = list(range(N, 2 * N)) graph = nx.Graph() graph.add_nodes_from(top_nodes, bipartite=0) graph.add_nodes_from(bot_nodes, bipartite=1) edges = [ - (0, 0), - (0, 1), - (0, 6), - (1, 0), - (1, 2), - (2, 3), - (2, 5), - (3, 4), - (3, 5), - (4, 3), - (4, 4), - (5, 6), - (6, 6), - ] + (0, 0), + (0, 1), + (0, 6), + (1, 0), + (1, 2), + (2, 3), + (2, 5), + (3, 4), + (3, 5), + (4, 3), + (4, 4), + (5, 6), + (6, 6), + ] edges = [(i, j + N) for i, j in edges] graph.add_edges_from(edges) return graph, top_nodes @@ -165,7 +162,7 @@ def test_graph_dm_partition(self): top_dmp, bot_dmp = dulmage_mendelsohn(graph, top_nodes=top_nodes) self.assertFalse(nx.is_connected(graph)) - + underconstrained_top = {0, 1} underconstrained_bot = {7, 8, 9} self.assertEqual(underconstrained_top, set(top_dmp[2])) diff --git a/pyomo/contrib/incidence_analysis/config.py b/pyomo/contrib/incidence_analysis/config.py new file mode 100644 index 00000000000..56841617cac --- /dev/null +++ b/pyomo/contrib/incidence_analysis/config.py @@ -0,0 +1,78 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +"""Configuration options for incidence graph generation +""" + +import enum +from pyomo.common.config import ConfigDict, ConfigValue, InEnum + + +class IncidenceMethod(enum.Enum): + """Methods for identifying variables that participate in expressions""" + + identify_variables = 0 + """Use ``pyomo.core.expr.visitor.identify_variables``""" + + standard_repn = 1 + """Use ``pyomo.repn.standard_repn.generate_standard_repn``""" + + +_include_fixed = ConfigValue( + default=False, + domain=bool, + description="Include fixed variables", + doc=( + "Flag indicating whether fixed variables should be included in the" + " incidence graph" + ), +) + + +_linear_only = ConfigValue( + default=False, + domain=bool, + description="Identify variables that participate linearly", + doc=( + "Flag indicating whether only variables that participate linearly should" + " be included. Note that these are included even if they participate" + " nonlinearly as well." + ), +) + + +_method = ConfigValue( + default=IncidenceMethod.standard_repn, + domain=InEnum(IncidenceMethod), + description="Method used to identify incident variables", +) + + +IncidenceConfig = ConfigDict() +"""Options for incidence graph generation + +- ``include_fixed`` -- Flag indicating whether fixed variables should be included + in the incidence graph +- ``linear_only`` -- Flag indicating whether only variables that participate linearly + should be included. Note that these are included even if they participate + nonlinearly as well +- ``method`` -- Method used to identify incident variables. Must be a value of the + ``IncidenceMethod`` enum. + +""" + + +IncidenceConfig.declare("include_fixed", _include_fixed) + + +IncidenceConfig.declare("linear_only", _linear_only) + + +IncidenceConfig.declare("method", _method) diff --git a/pyomo/contrib/incidence_analysis/connected.py b/pyomo/contrib/incidence_analysis/connected.py index f268e292991..2dcf31c0fe0 100644 --- a/pyomo/contrib/incidence_analysis/connected.py +++ b/pyomo/contrib/incidence_analysis/connected.py @@ -13,7 +13,23 @@ def get_independent_submatrices(matrix): - """ + """Partition a matrix into irreducible block diagonal form + + This is equivalent to identifying the connected components of the bipartite + incidence graph of rows and columns. + + Parameters + ---------- + matrix: ``scipy.sparse.coo_matrix`` + Matrix to partition into block diagonal form + + Returns + ------- + row_blocks: list of lists + Partition of row coordinates into diagonal blocks + col_blocks: list of lists + Partition of column coordinates into diagonal blocks + """ nxc = nx.algorithms.components nxb = nx.algorithms.bipartite @@ -27,11 +43,10 @@ def get_independent_submatrices(matrix): # nodes have values in [N, N+M-1]. # We could also check the "bipartite" attribute of each node... row_blocks = [ - sorted([node for node in comp if node < N]) - for comp in connected_components + sorted([node for node in comp if node < N]) for comp in connected_components ] col_blocks = [ - sorted([node-N for node in comp if node >= N]) + sorted([node - N for node in comp if node >= N]) for comp in connected_components ] return row_blocks, col_blocks diff --git a/pyomo/contrib/incidence_analysis/dulmage_mendelsohn.py b/pyomo/contrib/incidence_analysis/dulmage_mendelsohn.py index 761b6ee69c2..a3af0d1e6c9 100644 --- a/pyomo/contrib/incidence_analysis/dulmage_mendelsohn.py +++ b/pyomo/contrib/incidence_analysis/dulmage_mendelsohn.py @@ -13,41 +13,95 @@ from pyomo.common.dependencies import networkx as nx from pyomo.contrib.incidence_analysis.common.dulmage_mendelsohn import ( dulmage_mendelsohn as dm_nx, - ) +) + """ This module imports the general Dulmage-Mendelsohn-on-a-graph function from "common" and implements an interface for coo_matrix-like objects. """ RowPartition = namedtuple( - "RowPartition", - ["unmatched", "overconstrained", "underconstrained", "square"], - ) + "RowPartition", ["unmatched", "overconstrained", "underconstrained", "square"] +) +"""Named tuple containing the subsets of the Dulmage-Mendelsohn partition +when applied to matrix rows (constraints). + +""" + ColPartition = namedtuple( - "ColPartition", - ["unmatched", "underconstrained", "overconstrained", "square"], - ) + "ColPartition", ["unmatched", "underconstrained", "overconstrained", "square"] +) +"""Named tuple containing the subsets of the Dulmage-Mendelsohn partition +when applied to matrix columns (variables). + +""" + def dulmage_mendelsohn(matrix_or_graph, top_nodes=None, matching=None): - """ - COO matrix or NetworkX graph interface to the coarse Dulmage Mendelsohn - partition. The matrix or graph should correspond to a Pyomo model. - top_nodes must be provided if a NetworkX graph is used, and should - correspond to Pyomo constraints. + """Partition a bipartite graph or incidence matrix according to the + Dulmage-Mendelsohn characterization + + The Dulmage-Mendelsohn partition tells which nodes of the two bipartite + sets *can possibly be* unmatched after a maximum cardinality matching. + Applied to an incidence matrix, it can be interpreted as partitioning + rows and columns into under-constrained, over-constrained, and + well-constrained subsystems. + + As it is often useful to explicitly check the unmatched rows and columns, + ``dulmage_mendelsohn`` partitions rows into the subsets: + + - **underconstrained** - The rows matched with *possibly* unmatched + columns (unmatched and underconstrained columns) + - **square** - The well-constrained rows, which are matched with + well-constrained columns + - **overconstrained** - The matched rows that *can possibly be* unmatched + in some maximum cardinality matching + - **unmatched** - The unmatched rows in a particular maximum cardinality + matching + + and partitions columns into the subsets: + + - **unmatched** - The unmatched columns in a particular maximum cardinality + matching + - **underconstrained** - The columns that *can possibly be* unmatched in + some maximum cardinality matching + - **square** - The well-constrained columns, which are matched with + well-constrained rows + - **overconstrained** - The columns matched with *possibly* unmatched + rows (unmatched and overconstrained rows) + + Parameters + ---------- + matrix_or_graph: ``scipy.sparse.coo_matrix`` or ``networkx.Graph`` + The incidence matrix or bipartite graph to be partitioned + top_nodes: list + List of nodes in one bipartite set of the graph. Must be provided + if a graph is provided. + matching: dict + A maximum cardinality matching in the form of a dict mapping + from "top nodes" to their matched nodes *and* from the matched + nodes back to the "top nodes". + + Returns + ------- + row_dmp: RowPartition + The Dulmage-Mendelsohn partition of rows + col_dmp: ColPartition + The Dulmage-Mendelsohn partition of columns """ if isinstance(matrix_or_graph, nx.Graph): # The purpose of handling graphs here is that if we construct NX graphs # directly from Pyomo expressions, we can eliminate the overhead of - # convering expressions to a matrix, then the matrix to a graph. + # converting expressions to a matrix, then the matrix to a graph. # # In this case, top_nodes should correspond to constraints. graph = matrix_or_graph if top_nodes is None: raise ValueError( - "top_nodes must be specified if a graph is provided," - "\notherwise the result is ambiguous." - ) + "top_nodes must be specified if a graph is provided," + "\notherwise the result is ambiguous." + ) partition = dm_nx(graph, top_nodes=top_nodes, matching=matching) # RowPartition and ColPartition do not make sense for a general graph. # However, here we assume that this graph comes from a Pyomo model, @@ -74,17 +128,15 @@ def dulmage_mendelsohn(matrix_or_graph, top_nodes=None, matching=None): # Matrix rows have bipartite=0, columns have bipartite=1 bg = from_biadjacency_matrix(matrix) row_partition, col_partition = dm_nx( - bg, - top_nodes=list(range(M)), - matching=matching, - ) + bg, top_nodes=list(range(M)), matching=matching + ) partition = ( - row_partition, - tuple([n - M for n in subset] for subset in col_partition) - # Column nodes have values in [M, M+N-1]. Apply the offset - # to get values corresponding to indices in user's matrix. - ) + row_partition, + tuple([n - M for n in subset] for subset in col_partition) + # Column nodes have values in [M, M+N-1]. Apply the offset + # to get values corresponding to indices in user's matrix. + ) partition = (RowPartition(*partition[0]), ColPartition(*partition[1])) return partition diff --git a/pyomo/contrib/incidence_analysis/incidence.py b/pyomo/contrib/incidence_analysis/incidence.py new file mode 100644 index 00000000000..e38bfc75f58 --- /dev/null +++ b/pyomo/contrib/incidence_analysis/incidence.py @@ -0,0 +1,131 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +"""Functionality for identifying variables that participate in expressions +""" +import enum +from pyomo.core.expr.visitor import identify_variables +from pyomo.core.expr.numvalue import value as pyo_value +from pyomo.repn import generate_standard_repn +from pyomo.common.backports import nullcontext +from pyomo.util.subsystems import TemporarySubsystemManager +from pyomo.contrib.incidence_analysis.config import IncidenceMethod, IncidenceConfig + + +# +# Handlers for different methods of generating the incidence graph +# +def _get_incident_via_identify_variables(expr, include_fixed): + # Note that identify_variables will not identify the same variable + # more than once. + return list(identify_variables(expr, include_fixed=include_fixed)) + + +def _get_incident_via_standard_repn(expr, include_fixed, linear_only): + if include_fixed: + to_unfix = [ + var for var in identify_variables(expr, include_fixed=True) if var.fixed + ] + context = TemporarySubsystemManager(to_unfix=to_unfix) + else: + context = nullcontext() + + with context: + repn = generate_standard_repn(expr, compute_values=False, quadratic=False) + + linear_vars = [] + # Check coefficients to make sure we don't include linear variables with + # fixed coefficients of zero. + # Note that linear variables with constant coefficients of zero are already + # filtered in generate_standard_repn + for var, coef in zip(repn.linear_vars, repn.linear_coefs): + try: + value = pyo_value(coef) + except ValueError as err: + # Catch error evaluating expression with uninitialized variables + # TODO: Suppress logged error? + if "No value for uninitialized NumericValue" not in str(err): + raise err + value = None + if value != 0: + linear_vars.append(var) + if linear_only: + nl_var_id_set = set(id(var) for var in repn.nonlinear_vars) + return [var for var in repn.linear_vars if id(var) not in nl_var_id_set] + else: + # Combine linear and nonlinear variables and filter out duplicates. Note + # that quadratic=False, so we don't need to include repn.quadratic_vars. + variables = linear_vars + list(repn.nonlinear_vars) + unique_variables = [] + id_set = set() + for var in variables: + v_id = id(var) + if v_id not in id_set: + id_set.add(v_id) + unique_variables.append(var) + return unique_variables + + +def get_incident_variables(expr, **kwds): + """Get variables that participate in an expression + + The exact variables returned depends on the method used to determine incidence. + For example, ``method=IncidenceMethod.identify_variables`` will return all + variables participating in the expression, while + ``method=IncidenceMethod.standard_repn`` will return only the variables + identified by ``generate_standard_repn`` which ignores variables that only + appear multiplied by a constant factor of zero. + + Keyword arguments must be valid options for ``IncidenceConfig``. + + Parameters + ---------- + expr: ``NumericExpression`` + Expression to search for variables + + Returns + ------- + list of VarData + List containing the variables that participate in the expression + + Example + ------- + + .. doctest:: + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.incidence_analysis import get_incident_variables + >>> m = pyo.ConcreteModel() + >>> m.x = pyo.Var([1, 2, 3]) + >>> expr = m.x[1] + 2*m.x[2] + 3*m.x[3]**2 + >>> print([v.name for v in get_incident_variables(expr)]) + ['x[1]', 'x[2]', 'x[3]'] + >>> print([v.name for v in get_incident_variables(expr, linear_only=True)]) + ['x[1]', 'x[2]'] + + """ + config = IncidenceConfig(kwds) + method = config.method + include_fixed = config.include_fixed + linear_only = config.linear_only + if linear_only and method is IncidenceMethod.identify_variables: + raise RuntimeError( + "linear_only=True is not supported when using identify_variables" + ) + if method is IncidenceMethod.identify_variables: + return _get_incident_via_identify_variables(expr, include_fixed) + elif method is IncidenceMethod.standard_repn: + return _get_incident_via_standard_repn(expr, include_fixed, linear_only) + else: + raise ValueError( + f"Unrecognized value {method} for the method used to identify incident" + f" variables. Valid options are {IncidenceMethod.identify_variables}" + f" and {IncidenceMethod.standard_repn}." + ) diff --git a/pyomo/contrib/incidence_analysis/interface.py b/pyomo/contrib/incidence_analysis/interface.py index 21296883fd6..f74a68b4422 100644 --- a/pyomo/contrib/incidence_analysis/interface.py +++ b/pyomo/contrib/incidence_analysis/interface.py @@ -8,60 +8,77 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +"""Utility functions and a utility class for interfacing Pyomo components with +useful graph algorithms. + +""" import enum -from pyomo.core.base.block import Block +import textwrap +from pyomo.core.base.block import _BlockData from pyomo.core.base.var import Var from pyomo.core.base.constraint import Constraint from pyomo.core.base.objective import Objective -from pyomo.core.base.reference import Reference -from pyomo.core.expr.visitor import identify_variables -from pyomo.core.expr.logical_expr import EqualityExpression +from pyomo.core.expr import EqualityExpression from pyomo.util.subsystems import create_subsystem_block from pyomo.common.collections import ComponentSet, ComponentMap -from pyomo.common.dependencies import scipy_available -from pyomo.common.dependencies import networkx as nx -from pyomo.contrib.incidence_analysis.matching import maximum_matching -from pyomo.contrib.incidence_analysis.connected import ( - get_independent_submatrices, +from pyomo.common.dependencies import ( + attempt_import, + networkx as nx, + scipy as sp, + plotly, ) +from pyomo.common.deprecation import deprecated +from pyomo.contrib.incidence_analysis.config import IncidenceConfig +from pyomo.contrib.incidence_analysis.matching import maximum_matching +from pyomo.contrib.incidence_analysis.connected import get_independent_submatrices from pyomo.contrib.incidence_analysis.triangularize import ( + get_scc_of_projection, block_triangularize, get_diagonal_blocks, get_blocks_from_maps, - ) +) from pyomo.contrib.incidence_analysis.dulmage_mendelsohn import ( dulmage_mendelsohn, RowPartition, ColPartition, - ) -if scipy_available: - from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP - import scipy as sp - +) +from pyomo.contrib.incidence_analysis.incidence import get_incident_variables +from pyomo.contrib.pynumero.asl import AmplInterface -class IncidenceMatrixType(enum.Enum): - NONE = 0 - STRUCTURAL = 1 - NUMERIC = 2 +pyomo_nlp, pyomo_nlp_available = attempt_import( + 'pyomo.contrib.pynumero.interfaces.pyomo_nlp' +) +asl_available = pyomo_nlp_available & AmplInterface.available() def _check_unindexed(complist): for comp in complist: if comp.is_indexed(): - raise ValueError( - "Variables and constraints must be unindexed " - "ComponentData objects. Got %s, which is indexed." - % comp.name - ) + raise RuntimeError( + "Variables and constraints must be unindexed " + "ComponentData objects. Got %s, which is indexed." % comp.name + ) -def get_incidence_graph(variables, constraints, include_fixed=True): - """ - This function gets the incidence graph of Pyomo variables and constraints. +def get_incidence_graph(variables, constraints, **kwds): + config = IncidenceConfig(kwds) + return get_bipartite_incidence_graph(variables, constraints, **config) - Arguments: - ---------- + +def get_bipartite_incidence_graph(variables, constraints, **kwds): + """Return the bipartite incidence graph of Pyomo variables and constraints. + + Each node in the returned graph is an integer. The convention is that, + for a graph with N variables and M constraints, nodes 0 through M-1 + correspond to constraints and nodes M through M+N-1 correspond to variables. + Nodes correspond to variables and constraints in the provided orders. + For consistency with NetworkX's "convention", constraint nodes are tagged + with ``bipartite=0`` while variable nodes are tagged with ``bipartite=1``, + although these attributes are not used. + + Parameters + --------- variables: List of Pyomo VarData objects Variables that will appear in incidence graph constraints: List of Pyomo ConstraintData objects @@ -69,38 +86,96 @@ def get_incidence_graph(variables, constraints, include_fixed=True): include_fixed: Bool Flag for whether fixed variable should be included in the incidence - Returns: - -------- - NetworkX Graph - + Returns + ------- + ``networkx.Graph`` + """ - _check_unindexed(variables+constraints) - N, M = len(variables), len(constraints) + config = IncidenceConfig(kwds) + _check_unindexed(variables + constraints) + N = len(variables) + M = len(constraints) graph = nx.Graph() graph.add_nodes_from(range(M), bipartite=0) - graph.add_nodes_from(range(M, M+N), bipartite=1) - var_node_map = ComponentMap((v, M+i) for i, v in enumerate(variables)) + graph.add_nodes_from(range(M, M + N), bipartite=1) + var_node_map = ComponentMap((v, M + i) for i, v in enumerate(variables)) for i, con in enumerate(constraints): - for var in identify_variables(con.expr, include_fixed=include_fixed): + for var in get_incident_variables(con.body, **config): if var in var_node_map: graph.add_edge(i, var_node_map[var]) return graph -def _generate_variables_in_constraints(constraints, include_fixed=False): +def extract_bipartite_subgraph(graph, nodes0, nodes1): + """Return the bipartite subgraph of a graph. + + Two lists of nodes to project onto must be provided. These will correspond + to the "bipartite sets" in the subgraph. If the two sets provided have + M and N nodes, the subgraph will have nodes 0 through M+N-1, with the first + M corresponding to the first set provided and the last N corresponding + to the second set. + + Parameters + ---------- + graph: NetworkX Graph + The graph from which a subgraph is extracted + nodes0: list + A list of nodes in the original graph that will form the first + bipartite set of the projected graph (and have ``bipartite=0``) + nodes1: list + A list of nodes in the original graph that will form the second + bipartite set of the projected graph (and have ``bipartite=1``) + + Returns + ------- + subgraph: ``networkx.Graph`` + Graph containing integer nodes corresponding to positions in the + provided lists, with edges where corresponding nodes are adjacent + in the original graph. + + """ + subgraph = nx.Graph() + sub_M = len(nodes0) + sub_N = len(nodes1) + subgraph.add_nodes_from(range(sub_M), bipartite=0) + subgraph.add_nodes_from(range(sub_M, sub_M + sub_N), bipartite=1) + + old_new_map = {} + for i, node in enumerate(nodes0 + nodes1): + if node in old_new_map: + raise RuntimeError("Node %s provided more than once.") + old_new_map[node] = i + + for node1, node2 in graph.edges(): + if node1 in old_new_map and node2 in old_new_map: + new_node_1 = old_new_map[node1] + new_node_2 = old_new_map[node2] + if ( + subgraph.nodes[new_node_1]["bipartite"] + == subgraph.nodes[new_node_2]["bipartite"] + ): + raise RuntimeError( + "Subgraph is not bipartite. Found an edge between nodes" + " %s and %s (in the original graph)." % (node1, node2) + ) + subgraph.add_edge(new_node_1, new_node_2) + return subgraph + + +def _generate_variables_in_constraints(constraints, **kwds): + config = IncidenceConfig(kwds) known_vars = ComponentSet() for con in constraints: - for var in identify_variables(con.expr, include_fixed=include_fixed): + for var in get_incident_variables(con.body, **config): if var not in known_vars: known_vars.add(var) yield var -def get_structural_incidence_matrix(variables, constraints, include_fixed=True): - """ - This function gets the incidence matrix of Pyomo constraints and variables. +def get_structural_incidence_matrix(variables, constraints, **kwds): + """Return the incidence matrix of Pyomo constraints and variables - Arguments + Parameters --------- variables: List of Pyomo VarData objects constraints: List of Pyomo ConstraintData objects @@ -110,31 +185,53 @@ def get_structural_incidence_matrix(variables, constraints, include_fixed=True): Returns ------- - A scipy.sparse coo matrix. Rows are indices into the user-provided list of - constraints, columns are indices into the user-provided list of variables. - Entries are 1.0. + ``scipy.sparse.coo_matrix`` + COO matrix. Rows are indices into the user-provided list of constraints, + columns are indices into the user-provided list of variables. + Entries are 1.0. """ - _check_unindexed(variables+constraints) + config = IncidenceConfig(kwds) + _check_unindexed(variables + constraints) N, M = len(variables), len(constraints) var_idx_map = ComponentMap((v, i) for i, v in enumerate(variables)) rows = [] cols = [] for i, con in enumerate(constraints): - cols.extend(var_idx_map[v] for v in - identify_variables(con.expr, include_fixed=include_fixed) - if v in var_idx_map) - rows.extend([i]*(len(cols) - len(rows))) + cols.extend( + var_idx_map[v] + for v in get_incident_variables(con.body, **config) + if v in var_idx_map + ) + rows.extend([i] * (len(cols) - len(rows))) assert len(rows) == len(cols) - data = [1.0]*len(rows) - matrix = sp.sparse.coo_matrix( (data, (rows, cols)), shape=(M, N) ) + data = [1.0] * len(rows) + matrix = sp.sparse.coo_matrix((data, (rows, cols)), shape=(M, N)) return matrix def get_numeric_incidence_matrix(variables, constraints): - """ - This function gets the numeric incidence matrix (Jacobian) of Pyomo - constraints with respect to variables. + """Return the "numeric incidence matrix" (Jacobian) of Pyomo variables + and constraints. + + Each matrix value is the derivative of a constraint body with respect + to a variable. Rows correspond to constraints and columns correspond to + variables. Entries are included even if the value of the derivative is + zero. + Only active constraints and unfixed variables that participate in these + constraints are included. + + Parameters + --------- + variables: List of Pyomo VarData objects + constraints: List of Pyomo ConstraintData objects + + Returns + ------- + ``scipy.sparse.coo_matrix`` + COO matrix. Rows are indices into the user-provided list of constraints, + columns are indices into the user-provided list of variables. + """ # NOTE: There are several ways to get a numeric incidence matrix # from a Pyomo model. Here we get the numeric incidence matrix by @@ -143,266 +240,579 @@ def get_numeric_incidence_matrix(variables, constraints): _check_unindexed(comps) block = create_subsystem_block(constraints, variables) block._obj = Objective(expr=0) - nlp = PyomoNLP(block) + nlp = pyomo_nlp.PyomoNLP(block) return nlp.extract_submatrix_jacobian(variables, constraints) class IncidenceGraphInterface(object): - """ - The purpose of this class is to allow the user to easily - analyze graphs of variables and contraints in a Pyomo - model without constructing multiple PyomoNLPs. + """An interface for applying graph algorithms to Pyomo variables and + constraints + + Parameters + ---------- + model: Pyomo BlockData or PyNumero PyomoNLP, default ``None`` + An object from which an incidence graph will be constructed. + active: Bool, default ``True`` + Whether only active constraints should be included in the incidence + graph. Cannot be set to ``False`` if the ``model`` is provided as + a PyomoNLP. + include_fixed: Bool, default ``False`` + Whether to include fixed variables in the incidence graph. Cannot + be set to ``False`` if ``model`` is a PyomoNLP. + include_inequality: Bool, default ``True`` + Whether to include inequality constraints (those whose expressions + are not instances of ``EqualityExpression``) in the incidence graph. + If a PyomoNLP is provided, setting to ``False`` uses the + ``evaluate_jacobian_eq`` method instead of ``evaluate_jacobian`` + rather than checking constraint expression types. + + """ - def __init__( - self, - model=None, - active=True, - include_fixed=False, - include_inequality=True, - ): - """ - """ + def __init__(self, model=None, active=True, include_inequality=True, **kwds): + """Construct an IncidenceGraphInterface object""" # If the user gives us a model or an NLP, we assume they want us - # to cache the incidence matrix for fast analysis of submatrices - # later on. + # to cache the incidence graph for fast analysis later on. # WARNING: This cache will become invalid if the user alters their - # model. + # model. + self._config = IncidenceConfig(kwds) if model is None: - self.cached = IncidenceMatrixType.NONE - elif isinstance(model, PyomoNLP): + self._incidence_graph = None + self._variables = None + self._constraints = None + elif isinstance(model, _BlockData): + self._constraints = [ + con + for con in model.component_data_objects(Constraint, active=active) + if include_inequality or isinstance(con.expr, EqualityExpression) + ] + self._variables = list( + _generate_variables_in_constraints(self._constraints, **self._config) + ) + self._var_index_map = ComponentMap( + (var, i) for i, var in enumerate(self._variables) + ) + self._con_index_map = ComponentMap( + (con, i) for i, con in enumerate(self._constraints) + ) + self._incidence_graph = get_bipartite_incidence_graph( + self._variables, self._constraints, **self._config + ) + elif pyomo_nlp_available and isinstance(model, pyomo_nlp.PyomoNLP): if not active: raise ValueError( "Cannot get the Jacobian of inactive constraints from the " "nl interface (PyomoNLP).\nPlease set the `active` flag " "to True." ) - if include_fixed: + if kwds: raise ValueError( - "Cannot get the Jacobian with respect to fixed variables " - "from the nl interface (PyomoNLP).\nPlease set the " - "`include_fixed` flag to False." + "Incidence graph generation options, e.g. include_fixed, method," + " and linear_only, are not supported when generating a graph" + " from a PyomoNLP." ) nlp = model - self.cached = IncidenceMatrixType.NUMERIC - self.variables = nlp.get_pyomo_variables() - self.constraints = nlp.get_pyomo_constraints() - self.var_index_map = ComponentMap( - (var, idx) for idx, var in enumerate(self.variables)) - self.con_index_map = ComponentMap( - (con, idx) for idx, con in enumerate(self.constraints)) - if include_inequality: - self.incidence_matrix = nlp.evaluate_jacobian() - else: - self.incidence_matrix = nlp.evaluate_jacobian_eq() - elif isinstance(model, Block): - self.cached = IncidenceMatrixType.STRUCTURAL - self.constraints = [ - con for con in - model.component_data_objects(Constraint, active=active) + self._variables = nlp.get_pyomo_variables() + self._constraints = [ + con + for con in nlp.get_pyomo_constraints() if include_inequality or isinstance(con.expr, EqualityExpression) ] - self.variables = list( - _generate_variables_in_constraints( - self.constraints, include_fixed=include_fixed - ) + self._var_index_map = ComponentMap( + (var, idx) for idx, var in enumerate(self._variables) + ) + self._con_index_map = ComponentMap( + (con, idx) for idx, con in enumerate(self._constraints) ) - self.var_index_map = ComponentMap( - (var, i) for i, var in enumerate(self.variables)) - self.con_index_map = ComponentMap( - (con, i) for i, con in enumerate(self.constraints)) - self.incidence_matrix = get_structural_incidence_matrix( - self.variables, - self.constraints, - ) + if include_inequality: + incidence_matrix = nlp.evaluate_jacobian() + else: + incidence_matrix = nlp.evaluate_jacobian_eq() + nxb = nx.algorithms.bipartite + self._incidence_graph = nxb.from_biadjacency_matrix(incidence_matrix) else: raise TypeError( - "Unsupported type for incidence matrix. Expected " - "%s or %s but got %s." - % (PyomoNLP, Block, type(model)) - ) + "Unsupported type for incidence graph. Expected PyomoNLP" + " or _BlockData but got %s." % type(model) + ) - self.row_block_map = None - self.col_block_map = None + @property + def variables(self): + """The variables participating in the incidence graph""" + if self._incidence_graph is None: + raise RuntimeError("Cannot get variables when nothing is cached") + return self._variables + + @property + def constraints(self): + """The constraints participating in the incidence graph""" + if self._incidence_graph is None: + raise RuntimeError("Cannot get constraints when nothing is cached") + return self._constraints + + @property + def n_edges(self): + """The number of edges in the incidence graph, or the number of + structural nonzeros in the incidence matrix + """ + # The number of structural nonzeros in the incidence matrix + if self._incidence_graph is None: + raise RuntimeError( + "Cannot get number of edges (nonzeros) when nothing is cached" + ) + return len(self._incidence_graph.edges) + + @property + @deprecated( + msg="``var_index_map`` is deprecated. Please use ``get_matrix_coord`` instead.", + version="6.5.0", + ) + def var_index_map(self): + return self._var_index_map + + @property + @deprecated( + msg="``con_index_map`` is deprecated. Please use ``get_matrix_coord`` instead.", + version="6.5.0", + ) + def con_index_map(self): + return self._con_index_map + + @property + @deprecated( + msg="The ``row_block_map`` attribute is deprecated and will be removed.", + version="6.5.0", + ) + def row_block_map(self): + return None + + @property + @deprecated( + msg="The ``col_block_map`` attribute is deprecated and will be removed.", + version="6.5.0", + ) + def col_block_map(self): + return None + + def get_matrix_coord(self, component): + """Return the row or column coordinate of the component in the incidence + *matrix* of variables and constraints + + Variables will return a column coordinate and constraints will return + a row coordinate. + + Parameters + ---------- + component: ``ComponentData`` + Component whose coordinate to locate + + Returns + ------- + ``int`` + Column or row coordinate of the provided variable or constraint + + """ + if self._incidence_graph is None: + raise RuntimeError( + "Cannot get the coordinate of %s if an incidence graph" + " is not cached." % component.name + ) + _check_unindexed([component]) + if component in self._var_index_map and component in self._con_index_map: + raise RuntimeError( + "%s is in both variable and constraint maps." + " This should not happen." % component.name + ) + elif component in self._var_index_map: + return self._var_index_map[component] + elif component in self._con_index_map: + return self._con_index_map[component] + else: + raise RuntimeError( + "%s is not included in the incidence graph" % component.name + ) def _validate_input(self, variables, constraints): if variables is None: - if self.cached is IncidenceMatrixType.NONE: - raise ValueError( - "Neither variables nor a model have been provided." - ) + if self._incidence_graph is None: + raise ValueError("Neither variables nor a model have been provided.") else: variables = self.variables if constraints is None: - if self.cached is IncidenceMatrixType.NONE: - raise ValueError( - "Neither constraints nor a model have been provided." - ) + if self._incidence_graph is None: + raise ValueError("Neither constraints nor a model have been provided.") else: constraints = self.constraints - _check_unindexed(variables+constraints) + _check_unindexed(variables + constraints) return variables, constraints - def _extract_submatrix(self, variables, constraints): - # Assumes variables and constraints are valid - if self.cached is IncidenceMatrixType.NONE: - return get_structural_incidence_matrix( - variables, - constraints, - include_fixed=False, - ) + def _extract_subgraph(self, variables, constraints): + if self._incidence_graph is None: + # Note that we pass along self._config here, so any kwds used + # in construction will apply to these incidence graphs. + return get_bipartite_incidence_graph(variables, constraints, **self._config) else: - N, M = len(variables), len(constraints) - old_new_var_indices = dict((self.var_index_map[v], i) - for i, v in enumerate(variables)) - old_new_con_indices = dict((self.con_index_map[c], i) - for i, c in enumerate(constraints)) - coo = self.incidence_matrix - new_row = [] - new_col = [] - new_data = [] - for r, c, e in zip(coo.row, coo.col, coo.data): - if r in old_new_con_indices and c in old_new_var_indices: - new_row.append(old_new_con_indices[r]) - new_col.append(old_new_var_indices[c]) - new_data.append(e) - return sp.sparse.coo_matrix( - (new_data, (new_row, new_col)), - shape=(M, N), - ) + constraint_nodes = [self._con_index_map[con] for con in constraints] + + # Note that this is the number of constraints in the original graph, + # not the subgraph. + M = len(self.constraints) + variable_nodes = [M + self._var_index_map[var] for var in variables] + subgraph = extract_bipartite_subgraph( + self._incidence_graph, constraint_nodes, variable_nodes + ) + return subgraph + + @property + def incidence_matrix(self): + """The structural incidence matrix of variables and constraints. + + Variables correspond to columns and constraints correspond to rows. + All matrix entries have value 1.0. - def maximum_matching(self, variables=None, constraints=None): """ - Returns a maximal matching between the constraints and variables, - in terms of a map from constraints to variables. + if self._incidence_graph is None: + return None + else: + M = len(self.constraints) + N = len(self.variables) + row = [] + col = [] + data = [] + # Here we assume that the incidence graph is bipartite with nodes + # 0 through M-1 forming one of the bipartite sets. + for i in range(M): + assert self._incidence_graph.nodes[i]["bipartite"] == 0 + for j in self._incidence_graph[i]: + assert self._incidence_graph.nodes[j]["bipartite"] == 1 + row.append(i) + col.append(j - M) + data.append(1.0) + return sp.sparse.coo_matrix((data, (row, col)), shape=(M, N)) + + def get_adjacent_to(self, component): + """Return a list of components adjacent to the provided component + in the cached bipartite incidence graph of variables and constraints + + Parameters + ---------- + component: ``ComponentData`` + The variable or constraint data object whose adjacent components + are returned + + Returns + ------- + list of ComponentData + List of constraint or variable data objects adjacent to the + provided component + + Example + ------- + + .. doctest:: + :skipif: not networkx_available + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.incidence_analysis import IncidenceGraphInterface + >>> m = pyo.ConcreteModel() + >>> m.x = pyo.Var([1, 2]) + >>> m.eq1 = pyo.Constraint(expr=m.x[1]**2 == 7) + >>> m.eq2 = pyo.Constraint(expr=m.x[1]*m.x[2] == 3) + >>> m.eq3 = pyo.Constraint(expr=m.x[1] + 2*m.x[2] == 5) + >>> igraph = IncidenceGraphInterface(m) + >>> adj_to_x2 = igraph.get_adjacent_to(m.x[2]) + >>> print([c.name for c in adj_to_x2]) + ['eq2', 'eq3'] + """ - variables, constraints = self._validate_input(variables, constraints) - matrix = self._extract_submatrix(variables, constraints) + if self._incidence_graph is None: + raise RuntimeError( + "Cannot get components adjacent to %s if an incidence graph" + " is not cached." % component + ) + _check_unindexed([component]) + M = len(self.constraints) + N = len(self.variables) + if component in self._var_index_map: + vnode = M + self._var_index_map[component] + adj = self._incidence_graph[vnode] + adj_comps = [self.constraints[i] for i in adj] + elif component in self._con_index_map: + cnode = self._con_index_map[component] + adj = self._incidence_graph[cnode] + adj_comps = [self.variables[j - M] for j in adj] + else: + raise RuntimeError( + "Cannot find component %s in the cached incidence graph." % component + ) + return adj_comps - matching = maximum_matching(matrix.tocoo()) - # Matching maps row (constraint) indices to column (variable) indices + def maximum_matching(self, variables=None, constraints=None): + """Return a maximum cardinality matching of variables and constraints. - return ComponentMap((constraints[i], variables[j]) - for i, j in matching.items()) + The matching maps constraints to their matched variables. + + Returns + ------- + ``ComponentMap`` + A map from constraints to their matched variables. - def get_connected_components(self, variables=None, constraints=None): """ - Return lists of lists of variables and constraints that appear in - different connected components of the bipartite graph of variables - and constraints. + variables, constraints = self._validate_input(variables, constraints) + graph = self._extract_subgraph(variables, constraints) + con_nodes = list(range(len(constraints))) + matching = maximum_matching(graph, top_nodes=con_nodes) + # Matching maps constraint nodes to variable nodes. Here we need to + # know the convention according to which the graph was constructed. + M = len(constraints) + return ComponentMap( + (constraints[i], variables[j - M]) for i, j in matching.items() + ) + + def get_connected_components(self, variables=None, constraints=None): + """Partition variables and constraints into weakly connected components + of the incidence graph + + These correspond to diagonal blocks in a block diagonalization of the + incidence matrix. + + Returns + ------- + var_blocks: list of lists of variables + Partition of variables into connected components + con_blocks: list of lists of constraints + Partition of constraints into corresponding connected components + """ variables, constraints = self._validate_input(variables, constraints) - matrix = self._extract_submatrix(variables, constraints) + graph = self._extract_subgraph(variables, constraints) + nxc = nx.algorithms.components + M = len(constraints) + N = len(variables) + connected_components = list(nxc.connected_components(graph)) + + con_blocks = [ + sorted([i for i in comp if i < M]) for comp in connected_components + ] + con_blocks = [[constraints[i] for i in block] for block in con_blocks] + var_blocks = [ + sorted([j for j in comp if j >= M]) for comp in connected_components + ] + var_blocks = [[variables[i - M] for i in block] for block in var_blocks] - row_blocks, col_blocks = get_independent_submatrices(matrix.tocoo()) - con_blocks = [[constraints[i] for i in block] for block in row_blocks] - var_blocks = [[variables[j] for j in block] for block in col_blocks] - # Switch the order of the partitions here to match the method call. - # Hopefully this does not get too confusing... return var_blocks, con_blocks - def block_triangularize(self, variables=None, constraints=None): - """ - Returns two ComponentMaps. A map from variables to their blocks - in a block triangularization of the incidence matrix, and a - map from constraints to their blocks in a block triangularization - of the incidence matrix. + # NOTE: That this replaces the <=6.4.4 block_triangularize function + def map_nodes_to_block_triangular_indices(self, variables=None, constraints=None): + """Map variables and constraints to indices of their diagonal blocks in + a block lower triangular permutation + + Returns + ------- + var_block_map: ``ComponentMap`` + Map from variables to their diagonal blocks in a block + triangularization + con_block_map: ``ComponentMap`` + Map from constraints to their diagonal blocks in a block + triangularization + """ variables, constraints = self._validate_input(variables, constraints) - matrix = self._extract_submatrix(variables, constraints) - - row_block_map, col_block_map = block_triangularize(matrix.tocoo()) - # Cache maps in case we want to get diagonal blocks quickly in the - # future. - self.row_block_map = row_block_map - self.col_block_map = col_block_map - con_block_map = ComponentMap((constraints[i], idx) - for i, idx in row_block_map.items()) - var_block_map = ComponentMap((variables[j], idx) - for j, idx in col_block_map.items()) + graph = self._extract_subgraph(variables, constraints) + + M = len(constraints) + con_nodes = list(range(M)) + sccs = get_scc_of_projection(graph, con_nodes) + row_idx_map = {r: idx for idx, scc in enumerate(sccs) for r, _ in scc} + col_idx_map = {c - M: idx for idx, scc in enumerate(sccs) for _, c in scc} + con_block_map = ComponentMap( + (constraints[i], idx) for i, idx in row_idx_map.items() + ) + var_block_map = ComponentMap( + (variables[j], idx) for j, idx in col_idx_map.items() + ) # Switch the order of the maps here to match the method call. # Hopefully this does not get too confusing... return var_block_map, con_block_map - def get_diagonal_blocks(self, variables=None, constraints=None): - """ - Returns the diagonal blocks in a block triangularization of the - incidence matrix of the provided constraints with respect to the - provided variables. + def block_triangularize(self, variables=None, constraints=None): + """Compute an ordered partition of the provided variables and + constraints such that their incidence matrix is block lower triangular + + Subsets in the partition correspond to the strongly connected components + of the bipartite incidence graph, projected with respect to a perfect + matching. Returns ------- - tuple of lists - The first list contains lists that partition the variables, - the second lists contains lists that partition the constraints. + var_partition: list of lists + Partition of variables. The inner lists hold unindexed variables. + con_partition: list of lists + Partition of constraints. The inner lists hold unindexed constraints. + + Example + ------- + + .. doctest:: + :skipif: not networkx_available + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.incidence_analysis import IncidenceGraphInterface + >>> m = pyo.ConcreteModel() + >>> m.x = pyo.Var([1, 2]) + >>> m.eq1 = pyo.Constraint(expr=m.x[1]**2 == 7) + >>> m.eq2 = pyo.Constraint(expr=m.x[1]*m.x[2] == 3) + >>> igraph = IncidenceGraphInterface(m) + >>> vblocks, cblocks = igraph.block_triangularize() + >>> print([[v.name for v in vb] for vb in vblocks]) + [['x[1]'], ['x[2]']] + >>> print([[c.name for c in cb] for cb in cblocks]) + [['eq1'], ['eq2']] + + .. note:: + + **Breaking change in Pyomo 6.5.0** + + The pre-6.5.0 ``block_triangularize`` method returned maps from + each variable or constraint to the index of its block in a block + lower triangularization as the original intent of this function + was to identify when variables do or don't share a diagonal block + in this partition. Since then, the dominant use case of + ``block_triangularize`` has been to partition variables and + constraints into these blocks and inspect or solve each block + individually. A natural return type for this functionality is the + ordered partition of variables and constraints, as lists of lists. + This functionality was previously available via the + ``get_diagonal_blocks`` method, which was confusing as it did not + capture that the partition was the diagonal of a block + *triangularization* (as opposed to diagonalization). The pre-6.5.0 + functionality of ``block_triangularize`` is still available via the + ``map_nodes_to_block_triangular_indices`` method. """ variables, constraints = self._validate_input(variables, constraints) - matrix = self._extract_submatrix(variables, constraints) + graph = self._extract_subgraph(variables, constraints) - if self.row_block_map is None or self.col_block_map is None: - block_rows, block_cols = get_diagonal_blocks(matrix) - else: - block_rows, block_cols = get_blocks_from_maps( - self.row_block_map, self.col_block_map - ) - block_cons = [[constraints[i] for i in block] for block in block_rows] - block_vars = [[variables[i] for i in block] for block in block_cols] + M = len(constraints) + con_nodes = list(range(M)) + sccs = get_scc_of_projection(graph, con_nodes) + var_partition = [[variables[j - M] for _, j in scc] for scc in sccs] + con_partition = [[constraints[i] for i, _ in scc] for scc in sccs] + return var_partition, con_partition + + @deprecated( + msg=( + "``IncidenceGraphInterface.get_diagonal_blocks`` is deprecated." + " Please use ``IncidenceGraphInterface.block_triangularize``" + " instead." + ), + version="6.5.0", + ) + def get_diagonal_blocks(self, variables=None, constraints=None): + variables, constraints = self._validate_input(variables, constraints) + graph = self._extract_subgraph(variables, constraints) + M = len(constraints) + con_nodes = list(range(M)) + sccs = get_scc_of_projection(graph, con_nodes) + block_cons = [[constraints[i] for i, _ in scc] for scc in sccs] + block_vars = [[variables[j - M] for _, j in scc] for scc in sccs] return block_vars, block_cons def dulmage_mendelsohn(self, variables=None, constraints=None): - """ - Returns the Dulmage-Mendelsohn partition of the incidence graph - of the provided variables and constraints. + """Partition variables and constraints according to the Dulmage- + Mendelsohn characterization of the incidence graph + + Variables are partitioned into the following subsets: + + - **unmatched** - Variables not matched in a particular maximum + cardinality matching + - **underconstrained** - Variables that *could possibly be* unmatched + in a maximum cardinality matching + - **square** - Variables in the well-constrained subsystem + - **overconstrained** - Variables matched with constraints that can + possibly be unmatched + + Constraints are partitioned into the following subsets: + + - **underconstrained** - Constraints matched with variables that can + possibly be unmatched + - **square** - Constraints in the well-constrained subsystem + - **overconstrained** - Constraints that *can possibly be* unmatched + with a maximum cardinality matching + - **unmatched** - Constraints that were not matched in a particular + maximum cardinality matching - Returns: - -------- - ColPartition namedtuple and RowPartition namedtuple. - The ColPartition is returned first to match the order of variables - and constraints in the method arguments. - These partition variables (columns) and constraints (rows) - into overconstrained, underconstrained, unmatched, and square. + Returns + ------- + var_partition: ``ColPartition`` named tuple + Partitions variables into square, underconstrained, overconstrained, + and unmatched. + con_partition: ``RowPartition`` named tuple + Partitions constraints into square, underconstrained, + overconstrained, and unmatched. + + Example + ------- + + .. doctest:: + :skipif: not networkx_available + + >>> import pyomo.environ as pyo + >>> from pyomo.contrib.incidence_analysis import IncidenceGraphInterface + >>> m = pyo.ConcreteModel() + >>> m.x = pyo.Var([1, 2]) + >>> m.eq1 = pyo.Constraint(expr=m.x[1]**2 == 7) + >>> m.eq2 = pyo.Constraint(expr=m.x[1]*m.x[2] == 3) + >>> m.eq3 = pyo.Constraint(expr=m.x[1] + 2*m.x[2] == 5) + >>> igraph = IncidenceGraphInterface(m) + >>> var_dmp, con_dmp = igraph.dulmage_mendelsohn() + >>> print([v.name for v in var_dmp.overconstrained]) + ['x[1]', 'x[2]'] + >>> print([c.name for c in con_dmp.overconstrained]) + ['eq1', 'eq2'] + >>> print([c.name for c in con_dmp.unmatched]) + ['eq3'] """ variables, constraints = self._validate_input(variables, constraints) - matrix = self._extract_submatrix(variables, constraints) - - row_partition, col_partition = dulmage_mendelsohn(matrix.tocoo()) + graph = self._extract_subgraph(variables, constraints) + M = len(constraints) + top_nodes = list(range(M)) + row_partition, col_partition = dulmage_mendelsohn(graph, top_nodes=top_nodes) con_partition = RowPartition( - *[[constraints[i] for i in subset] for subset in row_partition] - ) + *[[constraints[i] for i in subset] for subset in row_partition] + ) var_partition = ColPartition( - *[[variables[i] for i in subset] for subset in col_partition] - ) + *[[variables[i - M] for i in subset] for subset in col_partition] + ) # Switch the order of the maps here to match the method call. # Hopefully this does not get too confusing... return var_partition, con_partition def remove_nodes(self, nodes, constraints=None): - """ - Removes the specified variables and constraints (columns and - rows) from the cached incidence matrix. This is a "projection" - of the variable and constraint vectors, rather than something - like a vertex elimination. - For the puropse of this method, there is no need to distinguish - between variables and constraints. However, we provide the - "constraints" argument so a call signature similar to other methods - in this class is still valid. - - Arguments: + """Removes the specified variables and constraints (columns and + rows) from the cached incidence matrix. + + This is a "projection" of the variable and constraint vectors, rather + than something like a vertex elimination. For the puropse of this + method, there is no need to distinguish between variables and + constraints. However, we provide the "constraints" argument so a call + signature similar to other methods in this class is still valid. + + Parameters ---------- - nodes: List + nodes: list VarData or ConData objects whose columns or rows will be removed from the incidence matrix. - constraints: List + constraints: list VarData or ConData objects whose columns or rows will be removed from the incidence matrix. """ if constraints is None: constraints = [] - if self.cached is IncidenceMatrixType.NONE: + if self._incidence_graph is None: raise RuntimeError( "Attempting to remove variables and constraints from cached " "incidence matrix,\nbut no incidence matrix has been cached." @@ -411,18 +821,84 @@ def remove_nodes(self, nodes, constraints=None): to_exclude.update(constraints) vars_to_include = [v for v in self.variables if v not in to_exclude] cons_to_include = [c for c in self.constraints if c not in to_exclude] - incidence_matrix = self._extract_submatrix( - vars_to_include, cons_to_include - ) + incidence_graph = self._extract_subgraph(vars_to_include, cons_to_include) # update attributes - self.variables = vars_to_include - self.constraints = cons_to_include - self.incidence_matrix = incidence_matrix - self.var_index_map = ComponentMap( + self._variables = vars_to_include + self._constraints = cons_to_include + self._incidence_graph = incidence_graph + self._var_index_map = ComponentMap( (var, i) for i, var in enumerate(self.variables) ) - self.con_index_map = ComponentMap( - (con, i) for i, con in enumerate(self.constraints) + self._con_index_map = ComponentMap( + (con, i) for i, con in enumerate(self._constraints) + ) + + def plot(self, variables=None, constraints=None, title=None, show=True): + """Plot the bipartite incidence graph of variables and constraints""" + variables, constraints = self._validate_input(variables, constraints) + graph = self._extract_subgraph(variables, constraints) + M = len(constraints) + + left_nodes = list(range(M)) + pos_dict = nx.drawing.bipartite_layout(graph, nodes=left_nodes) + + edge_x = [] + edge_y = [] + for start_node, end_node in graph.edges(): + x0, y0 = pos_dict[start_node] + x1, y1 = pos_dict[end_node] + edge_x.append(x0) + edge_x.append(x1) + edge_x.append(None) + edge_y.append(y0) + edge_y.append(y1) + edge_y.append(None) + edge_trace = plotly.graph_objects.Scatter( + x=edge_x, + y=edge_y, + line=dict(width=0.5, color='#888'), + hoverinfo='none', + mode='lines', + ) + + node_x = [] + node_y = [] + node_text = [] + node_color = [] + for node in graph.nodes(): + x, y = pos_dict[node] + node_x.append(x) + node_y.append(y) + if node < M: + # According to convention, we are a constraint node + c = constraints[node] + node_color.append('red') + body_text = '
'.join( + textwrap.wrap(str(c.body), width=120, subsequent_indent=" ") + ) + node_text.append( + f'{str(c)}
lb: {str(c.lower)}
body: {body_text}
' + f'ub: {str(c.upper)}
active: {str(c.active)}' + ) + else: + # According to convention, we are a variable node + v = variables[node - M] + node_color.append('blue') + node_text.append( + f'{str(v)}
lb: {str(v.lb)}
ub: {str(v.ub)}
' + f'value: {str(v.value)}
domain: {str(v.domain)}
' + f'fixed: {str(v.is_fixed())}' + ) + node_trace = plotly.graph_objects.Scatter( + x=node_x, + y=node_y, + mode='markers', + hoverinfo='text', + text=node_text, + marker=dict(color=node_color, size=10), ) - self.row_block_map = None - self.col_block_map = None + fig = plotly.graph_objects.Figure(data=[edge_trace, node_trace]) + if title is not None: + fig.update_layout(title=dict(text=title)) + if show: + fig.show() diff --git a/pyomo/contrib/incidence_analysis/matching.py b/pyomo/contrib/incidence_analysis/matching.py index 25906958103..14b3cd5b18d 100644 --- a/pyomo/contrib/incidence_analysis/matching.py +++ b/pyomo/contrib/incidence_analysis/matching.py @@ -11,34 +11,74 @@ from pyomo.common.dependencies import networkx as nx -def maximum_matching(matrix): - """ - Returns a maximum matching of the rows and columns of the - matrix as a dict from row indices to column indices. + +def maximum_matching(matrix_or_graph, top_nodes=None): + """Return a maximum cardinality matching of the provided matrix or + bipartite graph + + If a matrix is provided, the matching is returned as a map from row + indices to column indices. If a bipartite graph is provided, a list of + "top nodes" must be provided as well. These correspond to one of the + "bipartite sets". The matching is then returned as a map from "top nodes" + to the other set of nodes. + + Parameters + ---------- + matrix_or_graph: SciPy sparse matrix or NetworkX Graph + The matrix or graph whose maximum matching will be computed + top_nodes: list + Integer nodes representing a bipartite set in a graph. Must be provided + if and only if a NetworkX Graph is provided. + + Returns + ------- + max_matching: dict + Dict mapping from integer nodes in the first bipartite set (row + indices) to nodes in the second (column indices). + """ nxb = nx.algorithms.bipartite nxc = nx.algorithms.components from_biadjacency_matrix = nxb.matrix.from_biadjacency_matrix - M, N = matrix.shape - bg = from_biadjacency_matrix(matrix) - - # Check assumptions regarding from_biadjacency_matrix function: - for i in range(M): - # First M nodes in graph correspond to rows - assert bg.nodes[i]['bipartite'] == 0 - - for j in range(M, M+N): - # Last N nodes in graph correspond to columns - assert bg.nodes[j]['bipartite'] == 1 - - # If the matrix is block diagonal, the graph will be disconnected. - # This is fine, but we need to separate into connected components - # for NetworkX to not complain. - conn_comp = [bg.subgraph(c) for c in nxc.connected_components(bg)] - matchings = [nxb.maximum_matching(c) for c in conn_comp] - # If n0 < M, then n1 >= M. n0 is the row index, n1-M is the column index - max_matching = { - n0: n1-M for m in matchings for n0, n1 in m.items() if n0 < M - } + if isinstance(matrix_or_graph, nx.Graph): + graph_provided = True + if top_nodes is None: + raise RuntimeError("top_nodes argument must be set if a graph is provided.") + M = len(top_nodes) + N = len(matrix_or_graph.nodes) - M + bg = matrix_or_graph + if not nxb.is_bipartite(bg): + raise RuntimeError("Provided graph is not bipartite.") + else: + graph_provided = False + # Assume something SciPy-sparse compatible was provided. + if top_nodes is not None: + raise RuntimeError( + "top_nodes argument cannot be used if a matrix is provided" + ) + M, N = matrix_or_graph.shape + top_nodes = list(range(M)) + bg = from_biadjacency_matrix(matrix_or_graph) + + # Check assumptions regarding from_biadjacency_matrix function: + for i in range(M): + # First M nodes in graph correspond to rows + assert bg.nodes[i]["bipartite"] == 0 + + for j in range(M, M + N): + # Last N nodes in graph correspond to columns + assert bg.nodes[j]["bipartite"] == 1 + + matching = nxb.maximum_matching(bg, top_nodes=top_nodes) + if graph_provided: + top_node_set = set(top_nodes) + # If a graph was provided, we return a mapping from "top nodes" + # to their matched "bottom nodes" + max_matching = {n0: n1 for n0, n1 in matching.items() if n0 in top_node_set} + else: + # If a matrix was provided, we return a mapping from row indices + # to column indices. + max_matching = {n0: n1 - M for n0, n1 in matching.items() if n0 < M} + return max_matching diff --git a/pyomo/contrib/incidence_analysis/scc_solver.py b/pyomo/contrib/incidence_analysis/scc_solver.py new file mode 100644 index 00000000000..d7620278fd3 --- /dev/null +++ b/pyomo/contrib/incidence_analysis/scc_solver.py @@ -0,0 +1,146 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import logging + +from pyomo.core.base.constraint import Constraint +from pyomo.util.calc_var_value import calculate_variable_from_constraint +from pyomo.util.subsystems import TemporarySubsystemManager, generate_subsystem_blocks +from pyomo.contrib.incidence_analysis.interface import ( + IncidenceGraphInterface, + _generate_variables_in_constraints, +) + + +_log = logging.getLogger(__name__) + + +def generate_strongly_connected_components( + constraints, variables=None, include_fixed=False +): + """Yield in order ``_BlockData`` that each contain the variables and + constraints of a single diagonal block in a block lower triangularization + of the incidence matrix of constraints and variables + + These diagonal blocks correspond to strongly connected components of the + bipartite incidence graph, projected with respect to a perfect matching + into a directed graph. + + Parameters + ---------- + constraints: List of Pyomo constraint data objects + Constraints used to generate strongly connected components. + variables: List of Pyomo variable data objects + Variables that may participate in strongly connected components. + If not provided, all variables in the constraints will be used. + include_fixed: Bool + Indicates whether fixed variables will be included when + identifying variables in constraints. + + Yields + ------ + Tuple of ``_BlockData``, list-of-variables + Blocks containing the variables and constraints of every strongly + connected component, in a topological order. The variables are the + "input variables" for that block. + + """ + if variables is None: + variables = list( + _generate_variables_in_constraints(constraints, include_fixed=include_fixed) + ) + + assert len(variables) == len(constraints) + igraph = IncidenceGraphInterface() + var_blocks, con_blocks = igraph.block_triangularize( + variables=variables, constraints=constraints + ) + subsets = [(cblock, vblock) for vblock, cblock in zip(var_blocks, con_blocks)] + for block, inputs in generate_subsystem_blocks( + subsets, include_fixed=include_fixed + ): + # TODO: How does len scale for reference-to-list? + assert len(block.vars) == len(block.cons) + yield (block, inputs) + + +def solve_strongly_connected_components( + block, solver=None, solve_kwds=None, calc_var_kwds=None +): + """Solve a square system of variables and equality constraints by + solving strongly connected components individually. + + Strongly connected components (of the directed graph of constraints + obtained from a perfect matching of variables and constraints) are + the diagonal blocks in a block triangularization of the incidence + matrix, so solving the strongly connected components in topological + order is sufficient to solve the entire block. + + One-by-one blocks are solved using Pyomo's + calculate_variable_from_constraint function, while higher-dimension + blocks are solved using the user-provided solver object. + + Parameters + ---------- + block: Pyomo Block + The Pyomo block whose variables and constraints will be solved + solver: Pyomo solver object + The solver object that will be used to solve strongly connected + components of size greater than one constraint. Must implement + a solve method. + solve_kwds: Dictionary + Keyword arguments for the solver's solve method + calc_var_kwds: Dictionary + Keyword arguments for calculate_variable_from_constraint + + Returns + ------- + List of results objects returned by each call to solve + + """ + if solve_kwds is None: + solve_kwds = {} + if calc_var_kwds is None: + calc_var_kwds = {} + + igraph = IncidenceGraphInterface( + block, active=True, include_fixed=False, include_inequality=False + ) + constraints = igraph.constraints + variables = igraph.variables + + res_list = [] + log_blocks = _log.isEnabledFor(logging.DEBUG) + for scc, inputs in generate_strongly_connected_components(constraints, variables): + with TemporarySubsystemManager(to_fix=inputs): + N = len(scc.vars) + if N == 1: + if log_blocks: + _log.debug(f"Solving 1x1 block: {scc.cons[0].name}.") + results = calculate_variable_from_constraint( + scc.vars[0], scc.cons[0], **calc_var_kwds + ) + res_list.append(results) + else: + if solver is None: + var_names = [var.name for var in scc.vars.values()][:10] + con_names = [con.name for con in scc.cons.values()][:10] + raise RuntimeError( + "An external solver is required if block has strongly\n" + "connected components of size greater than one (is not" + " a DAG).\nGot an SCC of size %sx%s including" + " components:\n%s\n%s" % (N, N, var_names, con_names) + ) + if log_blocks: + _log.debug(f"Solving {N}x{N} block.") + results = solver.solve(scc, **solve_kwds) + res_list.append(results) + return res_list diff --git a/pyomo/contrib/incidence_analysis/tests/models_for_testing.py b/pyomo/contrib/incidence_analysis/tests/models_for_testing.py index 7b969d1a1e8..98d61201619 100644 --- a/pyomo/contrib/incidence_analysis/tests/models_for_testing.py +++ b/pyomo/contrib/incidence_analysis/tests/models_for_testing.py @@ -21,7 +21,7 @@ def make_gas_expansion_model(N=2): of isentropic expansions. """ m = pyo.ConcreteModel() - m.streams = pyo.Set(initialize=range(N+1)) + m.streams = pyo.Set(initialize=range(N + 1)) m.rho = pyo.Var(m.streams, initialize=1) m.P = pyo.Var(m.streams, initialize=1) m.F = pyo.Var(m.streams, initialize=1) @@ -29,13 +29,14 @@ def make_gas_expansion_model(N=2): m.R = pyo.Param(initialize=8.31) m.Q = pyo.Param(m.streams, initialize=1) - m.gamma = pyo.Param(initialize=1.4*m.R.value) + m.gamma = pyo.Param(initialize=1.4 * m.R.value) def mbal(m, i): if i == 0: return pyo.Constraint.Skip else: - return m.rho[i-1]*m.F[i-1] - m.rho[i]*m.F[i] == 0 + return m.rho[i - 1] * m.F[i - 1] - m.rho[i] * m.F[i] == 0 + m.mbal = pyo.Constraint(m.streams, rule=mbal) def ebal(m, i): @@ -43,21 +44,25 @@ def ebal(m, i): return pyo.Constraint.Skip else: return ( - m.rho[i-1]*m.F[i-1]*m.T[i-1] + - m.Q[i] - - m.rho[i]*m.F[i]*m.T[i] == 0 - ) + m.rho[i - 1] * m.F[i - 1] * m.T[i - 1] + + m.Q[i] + - m.rho[i] * m.F[i] * m.T[i] + == 0 + ) + m.ebal = pyo.Constraint(m.streams, rule=ebal) def expansion(m, i): if i == 0: return pyo.Constraint.Skip else: - return m.P[i]/m.P[i-1] - (m.rho[i]/m.rho[i-1])**m.gamma == 0 + return m.P[i] / m.P[i - 1] - (m.rho[i] / m.rho[i - 1]) ** m.gamma == 0 + m.expansion = pyo.Constraint(m.streams, rule=expansion) def ideal_gas(m, i): - return m.P[i] - m.rho[i]*m.R*m.T[i] == 0 + return m.P[i] - m.rho[i] * m.R * m.T[i] == 0 + m.ideal_gas = pyo.Constraint(m.streams, rule=ideal_gas) return m @@ -76,18 +81,16 @@ def make_dynamic_model(**disc_args): m.flow_const = pyo.Param(initialize=0.5) def diff_eqn_rule(m, t): - return m.area*m.dhdt[t] - (m.flow_in[t] - m.flow_out[t]) == 0 + return m.area * m.dhdt[t] - (m.flow_in[t] - m.flow_out[t]) == 0 + m.diff_eqn = pyo.Constraint(m.time, rule=diff_eqn_rule) def flow_out_rule(m, t): - return m.flow_out[t] - (m.flow_const*pyo.sqrt(m.height[t])) == 0 + return m.flow_out[t] - (m.flow_const * pyo.sqrt(m.height[t])) == 0 + m.flow_out_eqn = pyo.Constraint(m.time, rule=flow_out_rule) - default_disc_args = { - "wrt": m.time, - "nfe": 5, - "scheme": "BACKWARD", - } + default_disc_args = {"wrt": m.time, "nfe": 5, "scheme": "BACKWARD"} default_disc_args.update(disc_args) discretizer = pyo.TransformationFactory("dae.finite_difference") @@ -100,14 +103,14 @@ def make_degenerate_solid_phase_model(): """ From the solid phase thermo package of a moving bed chemical looping combustion reactor. This example was first presented in [1] - + [1] Parker, R. Nonlinear programming strategies for dynamic models of chemical looping combustion reactors. Pres. AIChE Annual Meeting, 2020. """ m = pyo.ConcreteModel() - m.components = pyo.Set(initialize=[1,2,3]) - m.x = pyo.Var(m.components, initialize=1/3) + m.components = pyo.Set(initialize=[1, 2, 3]) + m.x = pyo.Var(m.components, initialize=1 / 3) m.flow_comp = pyo.Var(m.components, initialize=10) m.flow = pyo.Var(initialize=30) m.rho = pyo.Var(initialize=1) @@ -115,14 +118,15 @@ def make_degenerate_solid_phase_model(): # These are rough approximations of the relevant equations, with the same # incidence. m.sum_eqn = pyo.Constraint(expr=sum(m.x[j] for j in m.components) - 1 == 0) - m.holdup_eqn = pyo.Constraint(m.components, expr={ - j: m.x[j]*m.rho - 1 == 0 for j in m.components - }) - m.density_eqn = pyo.Constraint(expr= - 1/m.rho - sum(1/m.x[j] for j in m.components) == 0 - ) - m.flow_eqn = pyo.Constraint(m.components, expr={ - j: m.x[j]*m.flow - m.flow_comp[j] == 0 for j in m.components - }) + m.holdup_eqn = pyo.Constraint( + m.components, expr={j: m.x[j] * m.rho - 1 == 0 for j in m.components} + ) + m.density_eqn = pyo.Constraint( + expr=1 / m.rho - sum(1 / m.x[j] for j in m.components) == 0 + ) + m.flow_eqn = pyo.Constraint( + m.components, + expr={j: m.x[j] * m.flow - m.flow_comp[j] == 0 for j in m.components}, + ) return m diff --git a/pyomo/contrib/incidence_analysis/tests/test_connected.py b/pyomo/contrib/incidence_analysis/tests/test_connected.py index 607dbbb2917..a937a5029a1 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_connected.py +++ b/pyomo/contrib/incidence_analysis/tests/test_connected.py @@ -19,9 +19,7 @@ get_structural_incidence_matrix, _generate_variables_in_constraints, ) -from pyomo.contrib.incidence_analysis.connected import ( - get_independent_submatrices, -) +from pyomo.contrib.incidence_analysis.connected import get_independent_submatrices from pyomo.contrib.incidence_analysis.tests.models_for_testing import ( make_gas_expansion_model, make_dynamic_model, @@ -36,7 +34,6 @@ @unittest.skipIf(not networkx_available, "NetworkX is not available") @unittest.skipIf(not scipy_available, "SciPy is not available") class TestIndependentSubmatrices(unittest.TestCase): - def test_decomposable_matrix(self): """ The following matrix decomposes into two independent diagonal @@ -51,21 +48,14 @@ def test_decomposable_matrix(self): col = [0, 0, 1, 2, 3, 3, 4, 4] data = [1, 1, 1, 1, 1, 1, 1, 1] N = 5 - coo = sp.sparse.coo_matrix( - (data, (row, col)), - shape=(N, N), - ) + coo = sp.sparse.coo_matrix((data, (row, col)), shape=(N, N)) row_blocks, col_blocks = get_independent_submatrices(coo) self.assertEqual(len(row_blocks), 2) self.assertEqual(len(col_blocks), 2) # One of the independent submatrices must be the first two rows/cols - self.assertTrue( - set(row_blocks[0]) == {0, 1} or set(row_blocks[1]) == {0, 1} - ) - self.assertTrue( - set(col_blocks[0]) == {0, 1} or set(col_blocks[1]) == {0, 1} - ) + self.assertTrue(set(row_blocks[0]) == {0, 1} or set(row_blocks[1]) == {0, 1}) + self.assertTrue(set(col_blocks[0]) == {0, 1} or set(col_blocks[1]) == {0, 1}) # The other independent submatrix must be last three rows/columns self.assertTrue( set(row_blocks[0]) == {2, 3, 4} or set(row_blocks[1]) == {2, 3, 4} @@ -90,10 +80,7 @@ def test_decomposable_matrix_permuted(self): row = [row_perm[i] for i in row] col = [col_perm[i] for i in col] - coo = sp.sparse.coo_matrix( - (data, (row, col)), - shape=(N, N), - ) + coo = sp.sparse.coo_matrix((data, (row, col)), shape=(N, N)) row_blocks, col_blocks = get_independent_submatrices(coo) self.assertEqual(len(row_blocks), 2) self.assertEqual(len(col_blocks), 2) @@ -119,21 +106,14 @@ def test_decomposable_matrix_permuted(self): def test_dynamic_model_backward(self): m = make_dynamic_model(nfe=5, scheme="BACKWARD") m.height[0].fix() - constraints = list( - m.component_data_objects(pyo.Constraint, active=True) - ) + constraints = list(m.component_data_objects(pyo.Constraint, active=True)) variables = list(_generate_variables_in_constraints(constraints)) - con_coord_map = ComponentMap( - (con, i) for i, con in enumerate(constraints) - ) - var_coord_map = ComponentMap( - (var, i) for i, var in enumerate(variables) - ) + con_coord_map = ComponentMap((con, i) for i, con in enumerate(constraints)) + var_coord_map = ComponentMap((var, i) for i, var in enumerate(variables)) coo = get_structural_incidence_matrix(variables, constraints) row_blocks, col_blocks = get_independent_submatrices(coo) rc_blocks = [ - (tuple(rows), tuple(cols)) - for rows, cols in zip(row_blocks, col_blocks) + (tuple(rows), tuple(cols)) for rows, cols in zip(row_blocks, col_blocks) ] self.assertEqual(len(rc_blocks), 2) # Want to check that one block contains flow_out_rule and flow_out @@ -148,10 +128,7 @@ def test_dynamic_model_backward(self): var_coord_map[m.dhdt[0]], var_coord_map[m.flow_in[0]], } - t0_con_coords = { - con_coord_map[m.flow_out_eqn[0]], - con_coord_map[m.diff_eqn[0]], - } + t0_con_coords = {con_coord_map[m.flow_out_eqn[0]], con_coord_map[m.diff_eqn[0]]} var_blocks = [ tuple(sorted(t0_var_coords)), @@ -162,8 +139,7 @@ def test_dynamic_model_backward(self): tuple(i for i in range(len(constraints)) if i not in t0_con_coords), ] target_blocks = [ - (tuple(rows), tuple(cols)) - for rows, cols in zip(con_blocks, var_blocks) + (tuple(rows), tuple(cols)) for rows, cols in zip(con_blocks, var_blocks) ] target_blocks = list(sorted(target_blocks)) rc_blocks = list(sorted(rc_blocks)) @@ -172,21 +148,14 @@ def test_dynamic_model_backward(self): def test_dynamic_model_forward(self): m = make_dynamic_model(nfe=5, scheme="FORWARD") m.height[0].fix() - constraints = list( - m.component_data_objects(pyo.Constraint, active=True) - ) + constraints = list(m.component_data_objects(pyo.Constraint, active=True)) variables = list(_generate_variables_in_constraints(constraints)) - con_coord_map = ComponentMap( - (con, i) for i, con in enumerate(constraints) - ) - var_coord_map = ComponentMap( - (var, i) for i, var in enumerate(variables) - ) + con_coord_map = ComponentMap((con, i) for i, con in enumerate(constraints)) + var_coord_map = ComponentMap((var, i) for i, var in enumerate(variables)) coo = get_structural_incidence_matrix(variables, constraints) row_blocks, col_blocks = get_independent_submatrices(coo) rc_blocks = [ - (tuple(rows), tuple(cols)) - for rows, cols in zip(row_blocks, col_blocks) + (tuple(rows), tuple(cols)) for rows, cols in zip(row_blocks, col_blocks) ] # With a forward discretization, all variables and constraints # are in the same independent block. diff --git a/pyomo/contrib/incidence_analysis/tests/test_dulmage_mendelsohn.py b/pyomo/contrib/incidence_analysis/tests/test_dulmage_mendelsohn.py index 6251c71f1e9..4aae9abc2c6 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_dulmage_mendelsohn.py +++ b/pyomo/contrib/incidence_analysis/tests/test_dulmage_mendelsohn.py @@ -14,16 +14,12 @@ from pyomo.common.dependencies import networkx_available from pyomo.common.dependencies import scipy_available from pyomo.common.collections import ComponentSet, ComponentMap -from pyomo.contrib.incidence_analysis.interface import ( - get_structural_incidence_matrix, - ) -from pyomo.contrib.incidence_analysis.dulmage_mendelsohn import ( - dulmage_mendelsohn, - ) +from pyomo.contrib.incidence_analysis.interface import get_structural_incidence_matrix +from pyomo.contrib.incidence_analysis.dulmage_mendelsohn import dulmage_mendelsohn from pyomo.contrib.incidence_analysis.tests.models_for_testing import ( make_gas_expansion_model, make_dynamic_model, - ) +) import pyomo.common.unittest as unittest @@ -31,7 +27,6 @@ @unittest.skipUnless(networkx_available, "networkx is not available.") @unittest.skipUnless(scipy_available, "scipy is not available.") class TestGasExpansionDMMatrixInterface(unittest.TestCase): - def test_square_well_posed_model(self): N = 4 m = make_gas_expansion_model(N) @@ -39,8 +34,7 @@ def test_square_well_posed_model(self): m.rho[0].fix() m.T[0].fix() - variables = [v for v in m.component_data_objects(pyo.Var) - if not v.fixed] + variables = [v for v in m.component_data_objects(pyo.Var) if not v.fixed] constraints = list(m.component_data_objects(pyo.Constraint)) imat = get_structural_incidence_matrix(variables, constraints) @@ -69,8 +63,7 @@ def test_square_ill_posed_model(self): m.rho[0].fix() m.T[0].fix() - variables = [v for v in m.component_data_objects(pyo.Var) - if not v.fixed] + variables = [v for v in m.component_data_objects(pyo.Var) if not v.fixed] constraints = list(m.component_data_objects(pyo.Constraint)) imat = get_structural_incidence_matrix(variables, constraints) @@ -89,8 +82,9 @@ def test_square_ill_posed_model(self): self.assertEqual(row_partition[1], []) # The potentially unmatched variables have four constraints # between them - matched_con_set = set(con_idx_map[con] for con in constraints - if con is not m.ideal_gas[0]) + matched_con_set = set( + con_idx_map[con] for con in constraints if con is not m.ideal_gas[0] + ) self.assertEqual(set(row_partition[2]), matched_con_set) # All variables are potentially unmatched @@ -106,8 +100,8 @@ def test_rectangular_system(self): imat = get_structural_incidence_matrix(variables, constraints) M, N = imat.shape - self.assertEqual(M, 4*N_model + 1) - self.assertEqual(N, 4*(N_model + 1)) + self.assertEqual(M, 4 * N_model + 1) + self.assertEqual(N, 4 * (N_model + 1)) row_partition, col_partition = dulmage_mendelsohn(imat) @@ -130,14 +124,12 @@ def test_rectangular_system(self): @unittest.skipUnless(networkx_available, "networkx is not available.") @unittest.skipUnless(scipy_available, "scipy is not available.") class TestDynamicModel(unittest.TestCase): - def test_rectangular_model(self): m = make_dynamic_model() m.height[0].fix() - variables = [v for v in m.component_data_objects(pyo.Var) - if not v.fixed] + variables = [v for v in m.component_data_objects(pyo.Var) if not v.fixed] constraints = list(m.component_data_objects(pyo.Constraint)) imat = get_structural_incidence_matrix(variables, constraints) @@ -162,17 +154,15 @@ def test_rectangular_model(self): # we expect # Rows matched with potentially unmatched columns - self.assertEqual(len(row_partition[2]), M-1) - row_indices = set([i for i in range(M) - if i != con_idx_map[m.flow_out_eqn[0]]]) + self.assertEqual(len(row_partition[2]), M - 1) + row_indices = set([i for i in range(M) if i != con_idx_map[m.flow_out_eqn[0]]]) self.assertEqual(set(row_partition[2]), row_indices) # Potentially unmatched columns self.assertEqual(len(col_partition[0]), N - M) self.assertEqual(len(col_partition[1]), M - 1) potentially_unmatched = col_partition[0] + col_partition[1] - col_indices = set([i for i in range(N) - if i != var_idx_map[m.flow_out[0]]]) + col_indices = set([i for i in range(N) if i != var_idx_map[m.flow_out[0]]]) self.assertEqual(set(potentially_unmatched), col_indices) diff --git a/pyomo/contrib/incidence_analysis/tests/test_incidence.py b/pyomo/contrib/incidence_analysis/tests/test_incidence.py new file mode 100644 index 00000000000..3b0b6a997aa --- /dev/null +++ b/pyomo/contrib/incidence_analysis/tests/test_incidence.py @@ -0,0 +1,186 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.environ as pyo +from pyomo.repn import generate_standard_repn +import pyomo.common.unittest as unittest +from pyomo.common.collections import ComponentSet +from pyomo.contrib.incidence_analysis.incidence import ( + IncidenceMethod, + get_incident_variables, +) + + +class TestAssumedBehavior(unittest.TestCase): + """Tests for non-obvious behavior we rely on + + If this behavior changes, these tests will fail and hopefully we won't + waste time debugging the "real" tests. This behavior includes: + - The error message when we try to evaluate an expression with + uninitialized variables + + """ + + def test_uninitialized_value_error_message(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2]) + m.x[1].set_value(5) + msg = "No value for uninitialized NumericValue" + with self.assertRaisesRegex(ValueError, msg): + pyo.value(1 + m.x[1] * m.x[2]) + + +class _TestIncidence(object): + """Base class with tests for get_incident_variables that should be + independent of the method used + + """ + + def _get_incident_variables(self, expr): + raise NotImplementedError("_TestIncidence should not be used directly") + + def test_basic_incidence(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + expr = m.x[1] + m.x[1] * m.x[2] + m.x[1] * pyo.exp(m.x[3]) + variables = self._get_incident_variables(expr) + self.assertEqual(ComponentSet(variables), ComponentSet(m.x[:])) + + def test_incidence_with_fixed_variable(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + expr = m.x[1] + m.x[1] * m.x[2] + m.x[1] * pyo.exp(m.x[3]) + m.x[2].fix() + variables = self._get_incident_variables(expr) + var_set = ComponentSet(variables) + self.assertEqual(var_set, ComponentSet([m.x[1], m.x[3]])) + + def test_incidence_with_mutable_parameter(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + m.p = pyo.Param(mutable=True, initialize=None) + expr = m.x[1] + m.p * m.x[1] * m.x[2] + m.x[1] * pyo.exp(m.x[3]) + variables = self._get_incident_variables(expr) + self.assertEqual(ComponentSet(variables), ComponentSet(m.x[:])) + + +class TestIncidenceStandardRepn(unittest.TestCase, _TestIncidence): + def _get_incident_variables(self, expr, **kwds): + method = IncidenceMethod.standard_repn + return get_incident_variables(expr, method=method, **kwds) + + def test_assumed_standard_repn_behavior(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2]) + m.p = pyo.Param(initialize=0.0) + + # We rely on variables with constant coefficients of zero not appearing + # in the standard repn (as opposed to appearing with explicit + # coefficients of zero). + expr = m.x[1] + 0 * m.x[2] + repn = generate_standard_repn(expr) + self.assertEqual(len(repn.linear_vars), 1) + self.assertIs(repn.linear_vars[0], m.x[1]) + + expr = m.p * m.x[1] + m.x[2] + repn = generate_standard_repn(expr) + self.assertEqual(len(repn.linear_vars), 1) + self.assertIs(repn.linear_vars[0], m.x[2]) + + def test_zero_coef(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + + # generate_standard_repn filters subexpressions with zero coefficients + expr = 0 * m.x[1] + 0 * m.x[1] * m.x[2] + 0 * pyo.exp(m.x[3]) + variables = self._get_incident_variables(expr) + self.assertEqual(len(variables), 0) + + def test_variable_minus_itself(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + # standard repn will recognize the zero coefficient and filter x[1] + expr = m.x[1] + m.x[2] * m.x[3] - m.x[1] + variables = self._get_incident_variables(expr) + var_set = ComponentSet(variables) + self.assertEqual(var_set, ComponentSet([m.x[2], m.x[3]])) + + def test_linear_only(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + + expr = 2 * m.x[1] + 4 * m.x[2] * m.x[1] - m.x[1] * pyo.exp(m.x[3]) + variables = self._get_incident_variables(expr, linear_only=True) + self.assertEqual(len(variables), 0) + + expr = 2 * m.x[1] + 2 * m.x[2] * m.x[3] + 3 * m.x[2] + variables = self._get_incident_variables(expr, linear_only=True) + self.assertEqual(ComponentSet(variables), ComponentSet([m.x[1]])) + + m.x[3].fix(2.5) + expr = 2 * m.x[1] + 2 * m.x[2] * m.x[3] + 3 * m.x[2] + variables = self._get_incident_variables(expr, linear_only=True) + self.assertEqual(ComponentSet(variables), ComponentSet([m.x[1], m.x[2]])) + + def test_fixed_zero_linear_coefficient(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + m.p = pyo.Param([1, 2], mutable=True, initialize=1.0) + m.p[1].set_value(0) + expr = 2 * m.x[1] + m.p[1] * m.p[2] * m.x[2] + m.p[2] * m.x[3] ** 2 + variables = self._get_incident_variables(expr) + self.assertEqual(ComponentSet(variables), ComponentSet([m.x[1], m.x[3]])) + + m.x[3].fix(0.0) + expr = 2 * m.x[1] + 3 * m.x[3] * m.p[2] * m.x[2] + m.x[1] ** 2 + variables = self._get_incident_variables(expr) + self.assertEqual(ComponentSet(variables), ComponentSet([m.x[1]])) + + m.x[3].fix(1.0) + variables = self._get_incident_variables(expr) + self.assertEqual(ComponentSet(variables), ComponentSet([m.x[1], m.x[2]])) + + def test_fixed_none_linear_coefficient(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + m.p = pyo.Param([1, 2], mutable=True, initialize=1.0) + m.x[3].fix(None) + expr = 2 * m.x[1] + 3 * m.x[3] * m.p[2] * m.x[2] + m.x[1] ** 2 + variables = self._get_incident_variables(expr) + self.assertEqual(ComponentSet(variables), ComponentSet([m.x[1], m.x[2]])) + + +class TestIncidenceIdentifyVariables(unittest.TestCase, _TestIncidence): + def _get_incident_variables(self, expr, **kwds): + method = IncidenceMethod.identify_variables + return get_incident_variables(expr, method=method, **kwds) + + def test_zero_coef(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + + # identify_variables does not eliminate expressions times zero + expr = 0 * m.x[1] + 0 * m.x[1] * m.x[2] + 0 * pyo.exp(m.x[3]) + variables = self._get_incident_variables(expr) + self.assertEqual(ComponentSet(variables), ComponentSet(m.x[:])) + + def test_variable_minus_itself(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + # identify_variables will not filter x[1] + expr = m.x[1] + m.x[2] * m.x[3] - m.x[1] + variables = self._get_incident_variables(expr) + var_set = ComponentSet(variables) + self.assertEqual(var_set, ComponentSet(m.x[:])) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/incidence_analysis/tests/test_interface.py b/pyomo/contrib/incidence_analysis/tests/test_interface.py index c4f4a1e0e8a..75bac643790 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_interface.py +++ b/pyomo/contrib/incidence_analysis/tests/test_interface.py @@ -10,50 +10,59 @@ # ___________________________________________________________________________ import pyomo.environ as pyo -from pyomo.common.dependencies import networkx_available -from pyomo.common.dependencies import scipy_available +from pyomo.core.expr.visitor import identify_variables +from pyomo.common.dependencies import ( + networkx_available, + plotly_available, + scipy_available, +) from pyomo.common.collections import ComponentSet, ComponentMap from pyomo.contrib.incidence_analysis.interface import ( + asl_available, IncidenceGraphInterface, get_structural_incidence_matrix, get_numeric_incidence_matrix, get_incidence_graph, + get_bipartite_incidence_graph, + extract_bipartite_subgraph, ) from pyomo.contrib.incidence_analysis.matching import maximum_matching -from pyomo.contrib.incidence_analysis.triangularize import block_triangularize -from pyomo.contrib.incidence_analysis.dulmage_mendelsohn import ( - dulmage_mendelsohn, +from pyomo.contrib.incidence_analysis.triangularize import ( + map_coords_to_block_triangular_indices, ) +from pyomo.contrib.incidence_analysis.dulmage_mendelsohn import dulmage_mendelsohn from pyomo.contrib.incidence_analysis.tests.models_for_testing import ( make_gas_expansion_model, make_degenerate_solid_phase_model, make_dynamic_model, ) + if scipy_available: from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP if networkx_available: + import networkx as nx from networkx.algorithms.bipartite.matrix import from_biadjacency_matrix -from pyomo.contrib.pynumero.asl import AmplInterface import pyomo.common.unittest as unittest @unittest.skipUnless(networkx_available, "networkx is not available.") @unittest.skipUnless(scipy_available, "scipy is not available.") -@unittest.skipUnless(AmplInterface.available(), "pynumero_ASL is not available") +@unittest.skipUnless(asl_available, "pynumero PyomoNLP is not available") class TestGasExpansionNumericIncidenceMatrix(unittest.TestCase): """ This class tests the get_numeric_incidence_matrix function on the gas expansion model. """ + def test_incidence_matrix(self): N = 5 model = make_gas_expansion_model(N) all_vars = list(model.component_data_objects(pyo.Var)) all_cons = list(model.component_data_objects(pyo.Constraint)) imat = get_numeric_incidence_matrix(all_vars, all_cons) - n_var = 4*(N+1) - n_con = 4*N+1 + n_var = 4 * (N + 1) + n_con = 4 * N + 1 self.assertEqual(imat.shape, (n_con, n_var)) var_idx_map = ComponentMap((v, i) for i, v in enumerate(all_vars)) @@ -61,103 +70,119 @@ def test_incidence_matrix(self): # Map constraints to the variables they contain. csr_map = ComponentMap() - csr_map.update((model.mbal[i], ComponentSet([ - model.F[i], - model.F[i-1], - model.rho[i], - model.rho[i-1], - ])) for i in model.streams if i != model.streams.first()) - csr_map.update((model.ebal[i], ComponentSet([ - model.F[i], - model.F[i-1], - model.rho[i], - model.rho[i-1], - model.T[i], - model.T[i-1], - ])) for i in model.streams if i != model.streams.first()) - csr_map.update((model.expansion[i], ComponentSet([ - model.rho[i], - model.rho[i-1], - model.P[i], - model.P[i-1], - ])) for i in model.streams if i != model.streams.first()) - csr_map.update((model.ideal_gas[i], ComponentSet([ - model.P[i], - model.rho[i], - model.T[i], - ])) for i in model.streams) + csr_map.update( + ( + model.mbal[i], + ComponentSet( + [model.F[i], model.F[i - 1], model.rho[i], model.rho[i - 1]] + ), + ) + for i in model.streams + if i != model.streams.first() + ) + csr_map.update( + ( + model.ebal[i], + ComponentSet( + [ + model.F[i], + model.F[i - 1], + model.rho[i], + model.rho[i - 1], + model.T[i], + model.T[i - 1], + ] + ), + ) + for i in model.streams + if i != model.streams.first() + ) + csr_map.update( + ( + model.expansion[i], + ComponentSet( + [model.rho[i], model.rho[i - 1], model.P[i], model.P[i - 1]] + ), + ) + for i in model.streams + if i != model.streams.first() + ) + csr_map.update( + (model.ideal_gas[i], ComponentSet([model.P[i], model.rho[i], model.T[i]])) + for i in model.streams + ) # Map constraint and variable indices to the values of the derivatives # Note that the derivative values calculated here depend on the model's # canonical form. deriv_lookup = {} - m = model # for convenience + m = model # for convenience for s in model.streams: # Ideal gas: i = con_idx_map[model.ideal_gas[s]] j = var_idx_map[model.P[s]] - deriv_lookup[i,j] = 1.0 + deriv_lookup[i, j] = 1.0 j = var_idx_map[model.rho[s]] - deriv_lookup[i,j] = - model.R.value*model.T[s].value + deriv_lookup[i, j] = -model.R.value * model.T[s].value j = var_idx_map[model.T[s]] - deriv_lookup[i,j] = - model.R.value*model.rho[s].value + deriv_lookup[i, j] = -model.R.value * model.rho[s].value if s != model.streams.first(): # Expansion: i = con_idx_map[model.expansion[s]] j = var_idx_map[model.P[s]] - deriv_lookup[i,j] = 1/model.P[s-1].value + deriv_lookup[i, j] = 1 / model.P[s - 1].value - j = var_idx_map[model.P[s-1]] - deriv_lookup[i,j] = -model.P[s].value/model.P[s-1]**2 + j = var_idx_map[model.P[s - 1]] + deriv_lookup[i, j] = -model.P[s].value / model.P[s - 1] ** 2 j = var_idx_map[model.rho[s]] - deriv_lookup[i,j] = pyo.value( - -m.gamma*(m.rho[s]/m.rho[s-1])**(m.gamma-1)/m.rho[s-1] + deriv_lookup[i, j] = pyo.value( + -m.gamma * (m.rho[s] / m.rho[s - 1]) ** (m.gamma - 1) / m.rho[s - 1] ) - j = var_idx_map[model.rho[s-1]] - deriv_lookup[i,j] = pyo.value( - -m.gamma*(m.rho[s]/m.rho[s-1])**(m.gamma-1) * - (-m.rho[s]/m.rho[s-1]**2) + j = var_idx_map[model.rho[s - 1]] + deriv_lookup[i, j] = pyo.value( + -m.gamma + * (m.rho[s] / m.rho[s - 1]) ** (m.gamma - 1) + * (-m.rho[s] / m.rho[s - 1] ** 2) ) # Energy balance: i = con_idx_map[m.ebal[s]] - j = var_idx_map[m.rho[s-1]] - deriv_lookup[i,j] = pyo.value(m.F[s-1]*m.T[s-1]) + j = var_idx_map[m.rho[s - 1]] + deriv_lookup[i, j] = pyo.value(m.F[s - 1] * m.T[s - 1]) - j = var_idx_map[m.F[s-1]] - deriv_lookup[i,j] = pyo.value(m.rho[s-1]*m.T[s-1]) + j = var_idx_map[m.F[s - 1]] + deriv_lookup[i, j] = pyo.value(m.rho[s - 1] * m.T[s - 1]) - j = var_idx_map[m.T[s-1]] - deriv_lookup[i,j] = pyo.value(m.F[s-1]*m.rho[s-1]) + j = var_idx_map[m.T[s - 1]] + deriv_lookup[i, j] = pyo.value(m.F[s - 1] * m.rho[s - 1]) j = var_idx_map[m.rho[s]] - deriv_lookup[i,j] = pyo.value(-m.F[s]*m.T[s]) + deriv_lookup[i, j] = pyo.value(-m.F[s] * m.T[s]) j = var_idx_map[m.F[s]] - deriv_lookup[i,j] = pyo.value(-m.rho[s]*m.T[s]) + deriv_lookup[i, j] = pyo.value(-m.rho[s] * m.T[s]) j = var_idx_map[m.T[s]] - deriv_lookup[i,j] = pyo.value(-m.F[s]*m.rho[s]) + deriv_lookup[i, j] = pyo.value(-m.F[s] * m.rho[s]) # Mass balance: i = con_idx_map[m.mbal[s]] - j = var_idx_map[m.rho[s-1]] - deriv_lookup[i,j] = pyo.value(m.F[s-1]) + j = var_idx_map[m.rho[s - 1]] + deriv_lookup[i, j] = pyo.value(m.F[s - 1]) - j = var_idx_map[m.F[s-1]] - deriv_lookup[i,j] = pyo.value(m.rho[s-1]) + j = var_idx_map[m.F[s - 1]] + deriv_lookup[i, j] = pyo.value(m.rho[s - 1]) j = var_idx_map[m.rho[s]] - deriv_lookup[i,j] = pyo.value(-m.F[s]) + deriv_lookup[i, j] = pyo.value(-m.F[s]) j = var_idx_map[m.F[s]] - deriv_lookup[i,j] = pyo.value(-m.rho[s]) - + deriv_lookup[i, j] = pyo.value(-m.rho[s]) # Want to test that the columns have the rows we expect. i = model.streams.first() @@ -166,7 +191,7 @@ def test_incidence_matrix(self): var = all_vars[j] self.assertIn(var, csr_map[con]) csr_map[con].remove(var) - self.assertAlmostEqual(pyo.value(deriv_lookup[i,j]), pyo.value(e), 8) + self.assertAlmostEqual(pyo.value(deriv_lookup[i, j]), pyo.value(e), 8) # And no additional rows for con in csr_map: self.assertEqual(len(csr_map[con]), 0) @@ -195,12 +220,15 @@ def test_perfect_matching(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) @@ -209,8 +237,9 @@ def test_perfect_matching(self): n_var = len(variables) matching = maximum_matching(imat) - matching = ComponentMap((c, variables[matching[con_idx_map[c]]]) - for c in constraints) + matching = ComponentMap( + (c, variables[matching[con_idx_map[c]]]) for c in constraints + ) values = ComponentSet(matching.values()) self.assertEqual(len(matching), n_var) self.assertEqual(len(values), n_var) @@ -228,12 +257,15 @@ def test_triangularize(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) @@ -241,16 +273,18 @@ def test_triangularize(self): con_idx_map = ComponentMap((c, i) for i, c in enumerate(constraints)) var_idx_map = ComponentMap((v, i) for i, v in enumerate(variables)) - row_block_map, col_block_map = block_triangularize(imat) - var_block_map = ComponentMap((v, col_block_map[var_idx_map[v]]) - for v in variables) - con_block_map = ComponentMap((c, row_block_map[con_idx_map[c]]) - for c in constraints) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(imat) + var_block_map = ComponentMap( + (v, col_block_map[var_idx_map[v]]) for v in variables + ) + con_block_map = ComponentMap( + (c, row_block_map[con_idx_map[c]]) for c in constraints + ) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) - self.assertEqual(len(var_values), N+1) - self.assertEqual(len(con_values), N+1) + self.assertEqual(len(var_values), N + 1) + self.assertEqual(len(con_values), N + 1) self.assertEqual(var_block_map[model.P[0]], 0) @@ -274,14 +308,15 @@ class TestGasExpansionStructuralIncidenceMatrix(unittest.TestCase): This class tests the get_structural_incidence_matrix function on the gas expansion model. """ + def test_incidence_matrix(self): N = 5 model = make_gas_expansion_model(N) all_vars = list(model.component_data_objects(pyo.Var)) all_cons = list(model.component_data_objects(pyo.Constraint)) imat = get_structural_incidence_matrix(all_vars, all_cons) - n_var = 4*(N+1) - n_con = 4*N+1 + n_var = 4 * (N + 1) + n_con = 4 * N + 1 self.assertEqual(imat.shape, (n_con, n_var)) var_idx_map = ComponentMap((v, i) for i, v in enumerate(all_vars)) @@ -289,31 +324,47 @@ def test_incidence_matrix(self): # Map constraints to the variables they contain. csr_map = ComponentMap() - csr_map.update((model.mbal[i], ComponentSet([ - model.F[i], - model.F[i-1], - model.rho[i], - model.rho[i-1], - ])) for i in model.streams if i != model.streams.first()) - csr_map.update((model.ebal[i], ComponentSet([ - model.F[i], - model.F[i-1], - model.rho[i], - model.rho[i-1], - model.T[i], - model.T[i-1], - ])) for i in model.streams if i != model.streams.first()) - csr_map.update((model.expansion[i], ComponentSet([ - model.rho[i], - model.rho[i-1], - model.P[i], - model.P[i-1], - ])) for i in model.streams if i != model.streams.first()) - csr_map.update((model.ideal_gas[i], ComponentSet([ - model.P[i], - model.rho[i], - model.T[i], - ])) for i in model.streams) + csr_map.update( + ( + model.mbal[i], + ComponentSet( + [model.F[i], model.F[i - 1], model.rho[i], model.rho[i - 1]] + ), + ) + for i in model.streams + if i != model.streams.first() + ) + csr_map.update( + ( + model.ebal[i], + ComponentSet( + [ + model.F[i], + model.F[i - 1], + model.rho[i], + model.rho[i - 1], + model.T[i], + model.T[i - 1], + ] + ), + ) + for i in model.streams + if i != model.streams.first() + ) + csr_map.update( + ( + model.expansion[i], + ComponentSet( + [model.rho[i], model.rho[i - 1], model.P[i], model.P[i - 1]] + ), + ) + for i in model.streams + if i != model.streams.first() + ) + csr_map.update( + (model.ideal_gas[i], ComponentSet([model.P[i], model.rho[i], model.T[i]])) + for i in model.streams + ) # Want to test that the columns have the rows we expect. i = model.streams.first() @@ -351,12 +402,15 @@ def test_perfect_matching(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) @@ -365,8 +419,9 @@ def test_perfect_matching(self): n_var = len(variables) matching = maximum_matching(imat) - matching = ComponentMap((c, variables[matching[con_idx_map[c]]]) - for c in constraints) + matching = ComponentMap( + (c, variables[matching[con_idx_map[c]]]) for c in constraints + ) values = ComponentSet(matching.values()) self.assertEqual(len(matching), n_var) self.assertEqual(len(values), n_var) @@ -384,12 +439,15 @@ def test_triangularize(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) @@ -397,16 +455,18 @@ def test_triangularize(self): con_idx_map = ComponentMap((c, i) for i, c in enumerate(constraints)) var_idx_map = ComponentMap((v, i) for i, v in enumerate(variables)) - row_block_map, col_block_map = block_triangularize(imat) - var_block_map = ComponentMap((v, col_block_map[var_idx_map[v]]) - for v in variables) - con_block_map = ComponentMap((c, row_block_map[con_idx_map[c]]) - for c in constraints) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(imat) + var_block_map = ComponentMap( + (v, col_block_map[var_idx_map[v]]) for v in variables + ) + con_block_map = ComponentMap( + (c, row_block_map[con_idx_map[c]]) for c in constraints + ) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) - self.assertEqual(len(var_values), N+1) - self.assertEqual(len(con_values), N+1) + self.assertEqual(len(var_values), N + 1) + self.assertEqual(len(con_values), N + 1) self.assertEqual(var_block_map[model.P[0]], 0) @@ -425,7 +485,7 @@ def test_triangularize(self): @unittest.skipUnless(networkx_available, "networkx is not available.") @unittest.skipUnless(scipy_available, "scipy is not available.") -@unittest.skipUnless(AmplInterface.available(), "pynumero_ASL is not available") +@unittest.skipUnless(asl_available, "pynumero PyomoNLP is not available") class TestGasExpansionModelInterfaceClassNumeric(unittest.TestCase): # In these tests, we pass the interface a PyomoNLP and cache # its Jacobian. @@ -451,12 +511,15 @@ def test_perfect_matching(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) @@ -482,21 +545,74 @@ def test_triangularize(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) - var_block_map, con_block_map = igraph.block_triangularize( - variables, constraints) + var_blocks, con_blocks = igraph.block_triangularize(variables, constraints) + partition = [ + list(zip(vblock, cblock)) for vblock, cblock in zip(var_blocks, con_blocks) + ] + self.assertEqual(len(partition), N + 1) + + for i in model.streams: + variables = ComponentSet([var for var, _ in partition[i]]) + constraints = ComponentSet([con for _, con in partition[i]]) + if i == model.streams.first(): + self.assertEqual(variables, ComponentSet([model.P[0]])) + else: + pred_vars = ComponentSet( + [model.rho[i], model.T[i], model.P[i], model.F[i]] + ) + pred_cons = ComponentSet( + [ + model.ideal_gas[i], + model.expansion[i], + model.mbal[i], + model.ebal[i], + ] + ) + self.assertEqual(pred_vars, variables) + self.assertEqual(pred_cons, constraints) + + def test_maps_from_triangularization(self): + N = 5 + model = make_gas_expansion_model(N) + model.obj = pyo.Objective(expr=0) + nlp = PyomoNLP(model) + igraph = IncidenceGraphInterface(nlp) + + # These are the variables and constraints of the square, + # nonsingular subsystem + variables = [] + variables.extend(model.P.values()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) + + constraints = list(model.component_data_objects(pyo.Constraint)) + + var_block_map, con_block_map = igraph.map_nodes_to_block_triangular_indices( + variables, constraints + ) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) - self.assertEqual(len(var_values), N+1) - self.assertEqual(len(con_values), N+1) + self.assertEqual(len(var_values), N + 1) + self.assertEqual(len(con_values), N + 1) self.assertEqual(var_block_map[model.P[0]], 0) @@ -518,13 +634,13 @@ def test_exception(self): nlp = PyomoNLP(model) igraph = IncidenceGraphInterface(nlp) - with self.assertRaises(ValueError) as exc: + with self.assertRaises(RuntimeError) as exc: variables = [model.P] constraints = [model.ideal_gas] igraph.maximum_matching(variables, constraints) self.assertIn('must be unindexed', str(exc.exception)) - with self.assertRaises(ValueError) as exc: + with self.assertRaises(RuntimeError) as exc: variables = [model.P] constraints = [model.ideal_gas] igraph.block_triangularize(variables, constraints) @@ -532,7 +648,6 @@ def test_exception(self): @unittest.skipUnless(networkx_available, "networkx is not available.") -@unittest.skipUnless(scipy_available, "scipy is not available.") class TestGasExpansionModelInterfaceClassStructural(unittest.TestCase): # In these tests we pass a model to the interface and are caching a # structural incidence matrix. @@ -554,12 +669,15 @@ def test_perfect_matching(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) @@ -583,21 +701,76 @@ def test_triangularize(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) - var_block_map, con_block_map = igraph.block_triangularize( - variables, constraints) + var_blocks, con_blocks = igraph.block_triangularize(variables, constraints) + partition = [ + list(zip(vblock, cblock)) for vblock, cblock in zip(var_blocks, con_blocks) + ] + self.assertEqual(len(partition), N + 1) + + for i in model.streams: + variables = ComponentSet([var for var, _ in partition[i]]) + constraints = ComponentSet([con for _, con in partition[i]]) + if i == model.streams.first(): + self.assertEqual(variables, ComponentSet([model.P[0]])) + else: + pred_vars = ComponentSet( + [model.rho[i], model.T[i], model.P[i], model.F[i]] + ) + pred_cons = ComponentSet( + [ + model.ideal_gas[i], + model.expansion[i], + model.mbal[i], + model.ebal[i], + ] + ) + self.assertEqual(pred_vars, variables) + self.assertEqual(pred_cons, constraints) + + def test_maps_from_triangularization(self): + """ + This tests the maps from variables and constraints to their diagonal + blocks returned by map_nodes_to_block_triangular_indices + """ + N = 5 + model = make_gas_expansion_model(N) + igraph = IncidenceGraphInterface(model) + + # These are the variables and constraints of the square, + # nonsingular subsystem + variables = [] + variables.extend(model.P.values()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) + + constraints = list(model.component_data_objects(pyo.Constraint)) + + var_block_map, con_block_map = igraph.map_nodes_to_block_triangular_indices( + variables, constraints + ) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) - self.assertEqual(len(var_values), N+1) - self.assertEqual(len(con_values), N+1) + self.assertEqual(len(var_values), N + 1) + self.assertEqual(len(con_values), N + 1) self.assertEqual(var_block_map[model.P[0]], 0) @@ -614,7 +787,7 @@ def test_triangularize(self): self.assertEqual(con_block_map[model.ebal[i]], i) def test_triangularize_submatrix(self): - # This test exercises the extraction of a somewhat nontrivial + # This test exercises triangularization of a somewhat nontrivial # submatrix from a cached incidence matrix. N = 5 model = make_gas_expansion_model(N) @@ -623,28 +796,75 @@ def test_triangularize_submatrix(self): # These are the variables and constraints of a square, # nonsingular subsystem variables = [] - half = N//2 + half = N // 2 variables.extend(model.P[i] for i in model.streams if i >= half) variables.extend(model.T[i] for i in model.streams if i > half) variables.extend(model.rho[i] for i in model.streams if i > half) variables.extend(model.F[i] for i in model.streams if i > half) constraints = [] - constraints.extend(model.ideal_gas[i] for i in model.streams - if i >= half) - constraints.extend(model.expansion[i] for i in model.streams - if i > half) - constraints.extend(model.mbal[i] for i in model.streams - if i > half) - constraints.extend(model.ebal[i] for i in model.streams - if i > half) - - var_block_map, con_block_map = igraph.block_triangularize( - variables, constraints) + constraints.extend(model.ideal_gas[i] for i in model.streams if i >= half) + constraints.extend(model.expansion[i] for i in model.streams if i > half) + constraints.extend(model.mbal[i] for i in model.streams if i > half) + constraints.extend(model.ebal[i] for i in model.streams if i > half) + + var_blocks, con_blocks = igraph.block_triangularize(variables, constraints) + partition = [ + list(zip(vblock, cblock)) for vblock, cblock in zip(var_blocks, con_blocks) + ] + self.assertEqual(len(partition), (N - half) + 1) + + for i in model.streams: + idx = i - half + variables = ComponentSet([var for var, _ in partition[idx]]) + constraints = ComponentSet([con for _, con in partition[idx]]) + if i == half: + self.assertEqual(variables, ComponentSet([model.P[half]])) + elif i > half: + pred_var = ComponentSet( + [model.rho[i], model.T[i], model.P[i], model.F[i]] + ) + pred_con = ComponentSet( + [ + model.ideal_gas[i], + model.expansion[i], + model.mbal[i], + model.ebal[i], + ] + ) + self.assertEqual(variables, pred_var) + self.assertEqual(constraints, pred_con) + + def test_maps_from_triangularization_submatrix(self): + # This test exercises the var/con-block-maps obtained from + # triangularization of a somewhat nontrivial submatrix from a cached + # incidence matrix. + N = 5 + model = make_gas_expansion_model(N) + igraph = IncidenceGraphInterface(model) + + # These are the variables and constraints of a square, + # nonsingular subsystem + variables = [] + half = N // 2 + variables.extend(model.P[i] for i in model.streams if i >= half) + variables.extend(model.T[i] for i in model.streams if i > half) + variables.extend(model.rho[i] for i in model.streams if i > half) + variables.extend(model.F[i] for i in model.streams if i > half) + + constraints = [] + constraints.extend(model.ideal_gas[i] for i in model.streams if i >= half) + constraints.extend(model.expansion[i] for i in model.streams if i > half) + constraints.extend(model.mbal[i] for i in model.streams if i > half) + constraints.extend(model.ebal[i] for i in model.streams if i > half) + + var_block_map, con_block_map = igraph.map_nodes_to_block_triangular_indices( + variables, constraints + ) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) - self.assertEqual(len(var_values), (N-half)+1) - self.assertEqual(len(con_values), (N-half)+1) + self.assertEqual(len(var_values), (N - half) + 1) + self.assertEqual(len(con_values), (N - half) + 1) self.assertEqual(var_block_map[model.P[half]], 0) @@ -665,18 +885,19 @@ def test_exception(self): model = make_gas_expansion_model() igraph = IncidenceGraphInterface(model) - with self.assertRaises(ValueError) as exc: + with self.assertRaises(RuntimeError) as exc: variables = [model.P] constraints = [model.ideal_gas] igraph.maximum_matching(variables, constraints) self.assertIn('must be unindexed', str(exc.exception)) - with self.assertRaises(ValueError) as exc: + with self.assertRaises(RuntimeError) as exc: variables = [model.P] constraints = [model.ideal_gas] igraph.block_triangularize(variables, constraints) self.assertIn('must be unindexed', str(exc.exception)) + @unittest.skipUnless(scipy_available, "scipy is not available.") def test_remove(self): model = make_gas_expansion_model() igraph = IncidenceGraphInterface(model) @@ -720,7 +941,6 @@ def test_remove(self): @unittest.skipUnless(networkx_available, "networkx is not available.") -@unittest.skipUnless(scipy_available, "scipy is not available.") class TestGasExpansionModelInterfaceClassNoCache(unittest.TestCase): # In these tests we do not cache anything and use the interface # simply as a convenient wrapper around the analysis functions, @@ -745,12 +965,15 @@ def test_perfect_matching(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) @@ -774,21 +997,72 @@ def test_triangularize(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) - var_block_map, con_block_map = igraph.block_triangularize( - variables, constraints) + var_blocks, con_blocks = igraph.block_triangularize(variables, constraints) + partition = [ + list(zip(vblock, cblock)) for vblock, cblock in zip(var_blocks, con_blocks) + ] + self.assertEqual(len(partition), N + 1) + + for i in model.streams: + variables = ComponentSet([var for var, _ in partition[i]]) + constraints = ComponentSet([con for _, con in partition[i]]) + if i == model.streams.first(): + self.assertEqual(variables, ComponentSet([model.P[0]])) + else: + pred_vars = ComponentSet( + [model.rho[i], model.T[i], model.P[i], model.F[i]] + ) + pred_cons = ComponentSet( + [ + model.ideal_gas[i], + model.expansion[i], + model.mbal[i], + model.ebal[i], + ] + ) + self.assertEqual(pred_vars, variables) + self.assertEqual(pred_cons, constraints) + + def test_maps_from_triangularization(self): + N = 5 + model = make_gas_expansion_model(N) + igraph = IncidenceGraphInterface() + + # These are the variables and constraints of the square, + # nonsingular subsystem + variables = [] + variables.extend(model.P.values()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) + + constraints = list(model.component_data_objects(pyo.Constraint)) + + var_block_map, con_block_map = igraph.map_nodes_to_block_triangular_indices( + variables, constraints + ) var_values = set(var_block_map.values()) con_values = set(con_block_map.values()) - self.assertEqual(len(var_values), N+1) - self.assertEqual(len(con_values), N+1) + self.assertEqual(len(var_values), N + 1) + self.assertEqual(len(con_values), N + 1) self.assertEqual(var_block_map[model.P[0]], 0) @@ -813,22 +1087,23 @@ def test_diagonal_blocks(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) - var_blocks, con_blocks = igraph.get_diagonal_blocks( - variables, constraints - ) - self.assertIs(igraph.row_block_map, None) - self.assertIs(igraph.col_block_map, None) - self.assertEqual(len(var_blocks), N+1) - self.assertEqual(len(con_blocks), N+1) + var_blocks, con_blocks = igraph.get_diagonal_blocks(variables, constraints) + # self.assertIs(igraph.row_block_map, None) + # self.assertIs(igraph.col_block_map, None) + self.assertEqual(len(var_blocks), N + 1) + self.assertEqual(len(con_blocks), N + 1) for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)): var_set = ComponentSet(vars) @@ -841,19 +1116,22 @@ def test_diagonal_blocks(self): self.assertEqual(pred_con_set, con_set) else: - pred_var_set = ComponentSet([ - model.rho[i], model.T[i], model.P[i], model.F[i] - ]) - pred_con_set = ComponentSet([ - model.ideal_gas[i], - model.expansion[i], - model.mbal[i], - model.ebal[i], - ]) + pred_var_set = ComponentSet( + [model.rho[i], model.T[i], model.P[i], model.F[i]] + ) + pred_con_set = ComponentSet( + [ + model.ideal_gas[i], + model.expansion[i], + model.mbal[i], + model.ebal[i], + ] + ) self.assertEqual(pred_var_set, var_set) self.assertEqual(pred_con_set, con_set) def test_diagonal_blocks_with_cached_maps(self): + # NOTE: This functionality has been deprecated. N = 5 model = make_gas_expansion_model(N) igraph = IncidenceGraphInterface() @@ -862,52 +1140,28 @@ def test_diagonal_blocks_with_cached_maps(self): # nonsingular subsystem variables = [] variables.extend(model.P.values()) - variables.extend(model.T[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.rho[i] for i in model.streams - if i != model.streams.first()) - variables.extend(model.F[i] for i in model.streams - if i != model.streams.first()) + variables.extend( + model.T[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.rho[i] for i in model.streams if i != model.streams.first() + ) + variables.extend( + model.F[i] for i in model.streams if i != model.streams.first() + ) constraints = list(model.component_data_objects(pyo.Constraint)) igraph.block_triangularize(variables, constraints) - var_blocks, con_blocks = igraph.get_diagonal_blocks( - variables, constraints - ) - self.assertIsNot(igraph.row_block_map, None) - self.assertIsNot(igraph.col_block_map, None) - self.assertEqual(len(var_blocks), N+1) - self.assertEqual(len(con_blocks), N+1) - - for i, (vars, cons) in enumerate(zip(var_blocks, con_blocks)): - var_set = ComponentSet(vars) - con_set = ComponentSet(cons) - - if i == 0: - pred_var_set = ComponentSet([model.P[0]]) - self.assertEqual(pred_var_set, var_set) - pred_con_set = ComponentSet([model.ideal_gas[0]]) - self.assertEqual(pred_con_set, con_set) - - else: - pred_var_set = ComponentSet([ - model.rho[i], model.T[i], model.P[i], model.F[i] - ]) - pred_con_set = ComponentSet([ - model.ideal_gas[i], - model.expansion[i], - model.mbal[i], - model.ebal[i], - ]) - self.assertEqual(pred_var_set, var_set) - self.assertEqual(pred_con_set, con_set) + var_blocks, con_blocks = igraph.get_diagonal_blocks(variables, constraints) + # NOTE: row/col_block_map have been deprecated. + # However, they still return None for now. + self.assertIs(igraph.row_block_map, None) + self.assertIs(igraph.col_block_map, None) @unittest.skipUnless(networkx_available, "networkx is not available.") -@unittest.skipUnless(scipy_available, "scipy is not available.") class TestDulmageMendelsohnInterface(unittest.TestCase): - def test_degenerate_solid_phase_model(self): m = make_degenerate_solid_phase_model() variables = list(m.component_data_objects(pyo.Var)) @@ -920,8 +1174,8 @@ def test_degenerate_solid_phase_model(self): underconstrained_vars.add(m.flow) underconstrained_cons = ComponentSet(m.flow_eqn.values()) - self.assertEqual(len(var_dmp[0]+var_dmp[1]), len(underconstrained_vars)) - for var in var_dmp[0]+var_dmp[1]: + self.assertEqual(len(var_dmp[0] + var_dmp[1]), len(underconstrained_vars)) + for var in var_dmp[0] + var_dmp[1]: self.assertIn(var, underconstrained_vars) self.assertEqual(len(con_dmp[2]), len(underconstrained_cons)) @@ -938,8 +1192,8 @@ def test_degenerate_solid_phase_model(self): for var in var_dmp[2]: self.assertIn(var, overconstrained_vars) - self.assertEqual(len(con_dmp[0]+con_dmp[1]), len(overconstrained_cons)) - for con in con_dmp[0]+con_dmp[1]: + self.assertEqual(len(con_dmp[0] + con_dmp[1]), len(overconstrained_cons)) + for con in con_dmp[0] + con_dmp[1]: self.assertIn(con, overconstrained_cons) def test_named_tuple(self): @@ -981,6 +1235,7 @@ def test_named_tuple(self): for con in dmp_cons_over: self.assertIn(con, overconstrained_cons) + @unittest.skipUnless(scipy_available, "scipy is not available.") def test_incidence_graph(self): m = make_degenerate_solid_phase_model() variables = list(m.component_data_objects(pyo.Var)) @@ -1003,14 +1258,14 @@ def test_dm_graph_interface(self): top_nodes = list(range(M)) con_dmp, var_dmp = dulmage_mendelsohn(graph, top_nodes=top_nodes) con_dmp = tuple([constraints[i] for i in subset] for subset in con_dmp) - var_dmp = tuple([variables[i-M] for i in subset] for subset in var_dmp) + var_dmp = tuple([variables[i - M] for i in subset] for subset in var_dmp) underconstrained_vars = ComponentSet(m.flow_comp.values()) underconstrained_vars.add(m.flow) underconstrained_cons = ComponentSet(m.flow_eqn.values()) - self.assertEqual(len(var_dmp[0]+var_dmp[1]), len(underconstrained_vars)) - for var in var_dmp[0]+var_dmp[1]: + self.assertEqual(len(var_dmp[0] + var_dmp[1]), len(underconstrained_vars)) + for var in var_dmp[0] + var_dmp[1]: self.assertIn(var, underconstrained_vars) self.assertEqual(len(con_dmp[2]), len(underconstrained_cons)) @@ -1027,10 +1282,11 @@ def test_dm_graph_interface(self): for var in var_dmp[2]: self.assertIn(var, overconstrained_vars) - self.assertEqual(len(con_dmp[0]+con_dmp[1]), len(overconstrained_cons)) - for con in con_dmp[0]+con_dmp[1]: + self.assertEqual(len(con_dmp[0] + con_dmp[1]), len(overconstrained_cons)) + for con in con_dmp[0] + con_dmp[1]: self.assertIn(con, overconstrained_cons) + @unittest.skipUnless(scipy_available, "scipy is not available.") def test_remove(self): m = make_degenerate_solid_phase_model() variables = list(m.component_data_objects(pyo.Var)) @@ -1069,9 +1325,7 @@ def test_remove(self): @unittest.skipUnless(networkx_available, "networkx is not available.") -@unittest.skipUnless(scipy_available, "scipy is not available.") class TestConnectedComponents(unittest.TestCase): - def test_dynamic_model_backward(self): """ This is the same test as performed in the test_connected.py @@ -1082,12 +1336,10 @@ def test_dynamic_model_backward(self): igraph = IncidenceGraphInterface(m) var_blocks, con_blocks = igraph.get_connected_components() vc_blocks = [ - (tuple(vars), tuple(cons)) - for vars, cons in zip(var_blocks, con_blocks) + (tuple(vars), tuple(cons)) for vars, cons in zip(var_blocks, con_blocks) ] key_fcn = lambda vc_comps: tuple( - tuple(comp.name for comp in comps) - for comps in vc_comps + tuple(comp.name for comp in comps) for comps in vc_comps ) vc_blocks = list(sorted(vc_blocks, key=key_fcn)) @@ -1096,31 +1348,27 @@ def test_dynamic_model_backward(self): # The variables in these blocks need to be sorted by their coordinates # in the underlying incidence matrix - var_idx_map = ComponentMap( - (var, i) for i, var in enumerate(igraph.variables) - ) - con_idx_map = ComponentMap( - (con, i) for i, con in enumerate(igraph.constraints) - ) - var_key = lambda var: var_idx_map[var] - con_key = lambda con: con_idx_map[con] + var_key = lambda var: igraph.get_matrix_coord(var) + con_key = lambda con: igraph.get_matrix_coord(con) var_blocks = [ tuple(sorted(t0_vars, key=var_key)), - tuple(sorted( - (var for var in igraph.variables if var not in t0_vars), - key=var_key, - )), + tuple( + sorted( + (var for var in igraph.variables if var not in t0_vars), key=var_key + ) + ), ] con_blocks = [ tuple(sorted(t0_cons, key=con_key)), - tuple(sorted( - (con for con in igraph.constraints if con not in t0_cons), - key=con_key, - )), + tuple( + sorted( + (con for con in igraph.constraints if con not in t0_cons), + key=con_key, + ) + ), ] target_blocks = [ - (tuple(vars), tuple(cons)) - for vars, cons in zip(var_blocks, con_blocks) + (tuple(vars), tuple(cons)) for vars, cons in zip(var_blocks, con_blocks) ] target_blocks = list(sorted(target_blocks, key=key_fcn)) @@ -1129,7 +1377,7 @@ def test_dynamic_model_backward(self): # So if this test fails, we'll get a somewhat confusing PyomoException # about not being able to convert non-constant expressions to bool # rather than a message saying that our variables are not the same. - #self.assertEqual(target_blocks, vc_blocks) + # self.assertEqual(target_blocks, vc_blocks) for block, target_block in zip(vc_blocks, target_blocks): vars, cons = block pred_vars, pred_cons = target_block @@ -1144,7 +1392,6 @@ def test_dynamic_model_backward(self): @unittest.skipUnless(networkx_available, "networkx is not available.") @unittest.skipUnless(scipy_available, "scipy is not available.") class TestExtraVars(unittest.TestCase): - def test_unused_var(self): m = pyo.ConcreteModel() m.v1 = pyo.Var() @@ -1163,10 +1410,9 @@ def test_reference(self): @unittest.skipUnless(networkx_available, "networkx is not available.") -@unittest.skipUnless(scipy_available, "scipy is not available.") -@unittest.skipUnless(AmplInterface.available(), "pynumero_ASL is not available") class TestExceptions(unittest.TestCase): - + @unittest.skipUnless(scipy_available, "scipy is not available.") + @unittest.skipUnless(asl_available, "pynumero_ASL is not available") def test_nlp_fixed_error(self): m = pyo.ConcreteModel() m.v1 = pyo.Var() @@ -1175,9 +1421,12 @@ def test_nlp_fixed_error(self): m.v2.fix(2.0) m._obj = pyo.Objective(expr=0.0) nlp = PyomoNLP(m) - with self.assertRaisesRegex(ValueError, "fixed variables"): + msg = "generation options.*are not supported" + with self.assertRaisesRegex(ValueError, msg): igraph = IncidenceGraphInterface(nlp, include_fixed=True) + @unittest.skipUnless(scipy_available, "scipy is not available.") + @unittest.skipUnless(asl_available, "pynumero_ASL is not available") def test_nlp_active_error(self): m = pyo.ConcreteModel() m.v1 = pyo.Var() @@ -1198,7 +1447,6 @@ def test_remove_no_matrix(self): @unittest.skipUnless(networkx_available, "networkx is not available.") @unittest.skipUnless(scipy_available, "scipy is not available.") -@unittest.skipUnless(AmplInterface.available(), "pynumero_ASL is not available") class TestIncludeInequality(unittest.TestCase): def make_model_with_inequalities(self): m = make_degenerate_solid_phase_model() @@ -1223,6 +1471,7 @@ def test_include_inequality_model(self): igraph = IncidenceGraphInterface(m, include_inequality=True) self.assertEqual(igraph.incidence_matrix.shape, (12, 8)) + @unittest.skipUnless(asl_available, "pynumero_ASL is not available") def test_dont_include_inequality_nlp(self): m = self.make_model_with_inequalities() m._obj = pyo.Objective(expr=0) @@ -1230,6 +1479,7 @@ def test_dont_include_inequality_nlp(self): igraph = IncidenceGraphInterface(nlp, include_inequality=False) self.assertEqual(igraph.incidence_matrix.shape, (8, 8)) + @unittest.skipUnless(asl_available, "pynumero_ASL is not available") def test_include_inequality_nlp(self): m = self.make_model_with_inequalities() m._obj = pyo.Objective(expr=0) @@ -1238,5 +1488,309 @@ def test_include_inequality_nlp(self): self.assertEqual(igraph.incidence_matrix.shape, (12, 8)) +@unittest.skipUnless(networkx_available, "networkx is not available.") +class TestGetIncidenceGraph(unittest.TestCase): + def make_test_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3, 4]) + m.v = pyo.Var(m.I, bounds=(0, None)) + m.eq1 = pyo.Constraint(expr=m.v[1] ** 2 + m.v[2] ** 2 == 1.0) + m.eq2 = pyo.Constraint(expr=m.v[1] + 2.0 == m.v[3]) + m.ineq1 = pyo.Constraint(expr=m.v[2] - m.v[3] ** 0.5 + m.v[4] ** 2 <= 1.0) + m.ineq2 = pyo.Constraint(expr=m.v[2] * m.v[4] >= 1.0) + m.ineq3 = pyo.Constraint(expr=m.v[1] >= m.v[4] ** 4) + m.obj = pyo.Objective(expr=-m.v[1] - m.v[2] + m.v[3] ** 2 + m.v[4] ** 2) + return m + + def test_bipartite_incidence_graph(self): + m = self.make_test_model() + constraints = [m.eq1, m.eq2, m.ineq1, m.ineq2, m.ineq3] + variables = list(m.v.values()) + graph = get_bipartite_incidence_graph(variables, constraints) + + # Nodes: + # 0: m.eq1 + # 1: m.eq2 + # 2: m.ineq1 + # 3: m.ineq2 + # 4: m.ineq3 + # 5: m.v[1] + # 6: m.v[2] + # 7: m.v[3] + # 8: m.v[4] + + # Assert some basic structure + self.assertEqual(len(graph.nodes), 9) + self.assertEqual(len(graph.edges), 11) + self.assertTrue(nx.algorithms.bipartite.is_bipartite(graph)) + + # Assert that the "adjacency list" is what we expect + self.assertEqual(set(graph[0]), {5, 6}) + self.assertEqual(set(graph[1]), {5, 7}) + self.assertEqual(set(graph[2]), {6, 7, 8}) + self.assertEqual(set(graph[3]), {6, 8}) + self.assertEqual(set(graph[4]), {5, 8}) + self.assertEqual(set(graph[5]), {0, 1, 4}) + self.assertEqual(set(graph[6]), {0, 2, 3}) + self.assertEqual(set(graph[7]), {1, 2}) + self.assertEqual(set(graph[8]), {2, 3, 4}) + + def test_unused_var(self): + m = self.make_test_model() + constraints = [m.eq1, m.eq2] + variables = list(m.v.values()) + graph = get_bipartite_incidence_graph(variables, constraints) + + # Nodes: + # 0: m.eq1 + # 1: m.eq2 + # 2: m.v[1] + # 3: m.v[2] + # 4: m.v[3] + # 5: m.v[4] + + self.assertEqual(len(graph.nodes), 6) + self.assertEqual(len(graph.edges), 4) + self.assertTrue(nx.algorithms.bipartite.is_bipartite(graph)) + + # Assert that the "adjacency list" is what we expect + self.assertEqual(set(graph[0]), {2, 3}) + self.assertEqual(set(graph[1]), {2, 4}) + self.assertEqual(set(graph[2]), {0, 1}) + self.assertEqual(set(graph[3]), {0}) + self.assertEqual(set(graph[4]), {1}) + self.assertEqual(set(graph[5]), set()) + + def test_fixed_vars(self): + m = self.make_test_model() + constraints = [m.eq1, m.eq2, m.ineq1, m.ineq2, m.ineq3] + variables = list(m.v.values()) + m.v[1].fix() + m.v[4].fix() + + # Slightly odd situation where we provide fixed variables, but + # then tell the graph to not include them. Nodes will be created + # for these vars, but they will not have any edges. + graph = get_bipartite_incidence_graph( + variables, constraints, include_fixed=False + ) + + # Nodes: + # 0: m.eq1 + # 1: m.eq2 + # 2: m.ineq1 + # 3: m.ineq2 + # 4: m.ineq3 + # 5: m.v[1] + # 6: m.v[2] + # 7: m.v[3] + # 8: m.v[4] + + self.assertEqual(len(graph.nodes), 9) + self.assertEqual(len(graph.edges), 5) + self.assertTrue(nx.algorithms.bipartite.is_bipartite(graph)) + + # Assert that the "adjacency list" is what we expect + self.assertEqual(set(graph[0]), {6}) + self.assertEqual(set(graph[1]), {7}) + self.assertEqual(set(graph[2]), {6, 7}) + self.assertEqual(set(graph[3]), {6}) + self.assertEqual(set(graph[4]), set()) + self.assertEqual(set(graph[5]), set()) + self.assertEqual(set(graph[6]), {0, 2, 3}) + self.assertEqual(set(graph[7]), {1, 2}) + self.assertEqual(set(graph[8]), set()) + + def test_extract_subgraph(self): + m = self.make_test_model() + constraints = [m.eq1, m.eq2, m.ineq1, m.ineq2, m.ineq3] + variables = list(m.v.values()) + graph = get_bipartite_incidence_graph(variables, constraints) + + sg_cons = [0, 2] + sg_vars = [i + len(constraints) for i in [2, 0, 3]] + + subgraph = extract_bipartite_subgraph(graph, sg_cons, sg_vars) + + # Subgraph nodes: + # 0: m.eq1 + # 1: m.ineq1 + # 2: m.v[3] + # 3: m.v[1] + # 4: m.v[4] + + self.assertEqual(len(subgraph.nodes), 5) + self.assertEqual(len(subgraph.edges), 3) + self.assertTrue(nx.algorithms.bipartite.is_bipartite(subgraph)) + + self.assertEqual(set(subgraph[0]), {3}) + self.assertEqual(set(subgraph[1]), {2, 4}) + self.assertEqual(set(subgraph[2]), {1}) + self.assertEqual(set(subgraph[3]), {0}) + self.assertEqual(set(subgraph[4]), {1}) + + def test_extract_exceptions(self): + m = self.make_test_model() + constraints = [m.eq1, m.eq2, m.ineq1, m.ineq2, m.ineq3] + variables = list(m.v.values()) + graph = get_bipartite_incidence_graph(variables, constraints) + + sg_cons = [0, 2, 5] + sg_vars = [i + len(constraints) for i in [2, 3]] + msg = "Subgraph is not bipartite" + with self.assertRaisesRegex(RuntimeError, msg): + subgraph = extract_bipartite_subgraph(graph, sg_cons, sg_vars) + + sg_cons = [0, 2, 5] + sg_vars = [i + len(constraints) for i in [2, 0, 3]] + msg = "provided more than once" + with self.assertRaisesRegex(RuntimeError, msg): + subgraph = extract_bipartite_subgraph(graph, sg_cons, sg_vars) + + +@unittest.skipUnless(networkx_available, "networkx is not available.") +class TestGetAdjacent(unittest.TestCase): + def test_get_adjacent_to_var(self): + m = make_degenerate_solid_phase_model() + igraph = IncidenceGraphInterface(m) + adj_cons = igraph.get_adjacent_to(m.rho) + self.assertEqual( + ComponentSet(adj_cons), + ComponentSet( + [m.holdup_eqn[1], m.holdup_eqn[2], m.holdup_eqn[3], m.density_eqn] + ), + ) + + def test_get_adjacent_to_con(self): + m = make_degenerate_solid_phase_model() + igraph = IncidenceGraphInterface(m) + adj_vars = igraph.get_adjacent_to(m.density_eqn) + self.assertEqual( + ComponentSet(adj_vars), ComponentSet([m.x[1], m.x[2], m.x[3], m.rho]) + ) + + def test_get_adjacent_exceptions(self): + m = make_degenerate_solid_phase_model() + igraph = IncidenceGraphInterface() + msg = "Cannot get components adjacent to" + with self.assertRaisesRegex(RuntimeError, msg): + adj_vars = igraph.get_adjacent_to(m.density_eqn) + + m.x[1].fix() + igraph = IncidenceGraphInterface(m, include_fixed=False) + msg = "Cannot find component" + with self.assertRaisesRegex(RuntimeError, msg): + adj_cons = igraph.get_adjacent_to(m.x[1]) + + +@unittest.skipUnless(networkx_available, "networkx is not available.") +class TestInterface(unittest.TestCase): + def test_assumed_constraint_behavior(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + m.con = pyo.Constraint(expr=m.x[1] == m.x[2] - pyo.exp(m.x[3])) + var_set = ComponentSet(identify_variables(m.con.body)) + self.assertEqual(var_set, ComponentSet(m.x[:])) + + def test_subgraph_with_fewer_var_or_con(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2]) + m.v = pyo.Var(m.I) + m.eq1 = pyo.Constraint(expr=m.v[1] + m.v[2] == 1) + m.ineq1 = pyo.Constraint(expr=m.v[1] - m.v[2] <= 2) + + # Defensively set include_inequality=True, which is the current + # default, in case this default changes. + igraph = IncidenceGraphInterface(m, include_inequality=True) + + variables = list(m.v.values()) + constraints = [m.ineq1] + matching = igraph.maximum_matching(variables, constraints) + self.assertEqual(len(matching), 1) + + variables = [m.v[2]] + constraints = [m.eq1, m.ineq1] + matching = igraph.maximum_matching(variables, constraints) + self.assertEqual(len(matching), 1) + + @unittest.skipUnless(plotly_available, "Plotly is not available") + def test_plot(self): + """ + Unfortunately, this test only ensures the code runs without errors. + It does not test for correctness. + """ + m = pyo.ConcreteModel() + m.x = pyo.Var(bounds=(-1, 1)) + m.y = pyo.Var() + m.z = pyo.Var() + # NOTE: Objective will not be displayed + m.obj = pyo.Objective(expr=m.y**2 + m.z**2) + m.c1 = pyo.Constraint(expr=m.y == 2 * m.x + 1) + m.c2 = pyo.Constraint(expr=m.z >= m.x) + m.y.fix() + igraph = IncidenceGraphInterface(m, include_inequality=True, include_fixed=True) + igraph.plot(title='test plot', show=False) + + def test_zero_coeff(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + m.eq1 = pyo.Constraint(expr=m.x[1] + 0 * m.x[2] == 2) + m.eq2 = pyo.Constraint(expr=m.x[1] ** 2 == 1) + m.eq3 = pyo.Constraint(expr=m.x[2] * m.x[3] - m.x[1] == 1) + + igraph = IncidenceGraphInterface(m) + var_dmp, con_dmp = igraph.dulmage_mendelsohn() + + # Because 0*m.x[2] does not appear in the incidence graph, we correctly + # identify that the system is structurally singular + self.assertGreater(len(var_dmp.unmatched), 0) + + def test_var_minus_itself(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + m.eq1 = pyo.Constraint(expr=m.x[1] + m.x[2] - m.x[2] == 2) + m.eq2 = pyo.Constraint(expr=m.x[1] ** 2 == 1) + m.eq3 = pyo.Constraint(expr=m.x[2] * m.x[3] - m.x[1] == 1) + + igraph = IncidenceGraphInterface(m) + var_dmp, con_dmp = igraph.dulmage_mendelsohn() + + # m.x[2] - m.x[2] is correctly ignored by generate_standard_repn, + # so we correctly identify that the system is structurally singular + self.assertGreater(len(var_dmp.unmatched), 0) + + def test_linear_only(self): + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3]) + m.eq1 = pyo.Constraint(expr=m.x[1] ** 2 + m.x[2] ** 2 + m.x[3] ** 2 == 1) + m.eq2 = pyo.Constraint(expr=m.x[2] + pyo.sqrt(m.x[1]) + pyo.exp(m.x[3]) == 1) + m.eq3 = pyo.Constraint(expr=m.x[3] + m.x[1] ** 3 + m.x[2] == 1) + + igraph = IncidenceGraphInterface(m, linear_only=True) + self.assertEqual(igraph.n_edges, 3) + self.assertEqual(ComponentSet(igraph.variables), ComponentSet([m.x[2], m.x[3]])) + + matching = igraph.maximum_matching() + self.assertEqual(len(matching), 2) + self.assertIs(matching[m.eq2], m.x[2]) + self.assertIs(matching[m.eq3], m.x[3]) + + +@unittest.skipUnless(networkx_available, "networkx is not available.") +class TestIndexedBlock(unittest.TestCase): + def test_block_data_obj(self): + m = pyo.ConcreteModel() + m.block = pyo.Block([1, 2, 3]) + m.block[1].subblock = make_degenerate_solid_phase_model() + igraph = IncidenceGraphInterface(m.block[1]) + var_dmp, con_dmp = igraph.dulmage_mendelsohn() + self.assertEqual(len(var_dmp.unmatched), 1) + self.assertEqual(len(con_dmp.unmatched), 1) + + msg = "Unsupported type.*_BlockData" + with self.assertRaisesRegex(TypeError, msg): + igraph = IncidenceGraphInterface(m.block) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/incidence_analysis/tests/test_matching.py b/pyomo/contrib/incidence_analysis/tests/test_matching.py index 33ec74e0064..b5550b3b84c 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_matching.py +++ b/pyomo/contrib/incidence_analysis/tests/test_matching.py @@ -10,11 +10,8 @@ # ___________________________________________________________________________ from pyomo.contrib.incidence_analysis.matching import maximum_matching -from pyomo.common.dependencies import ( - scipy, - scipy_available, - networkx_available, - ) +from pyomo.common.dependencies import scipy, scipy_available, networkx_available + if scipy_available: sps = scipy.sparse @@ -49,14 +46,14 @@ def test_identity(self): def test_low_rank_diagonal(self): N = 5 - omit = N//2 + omit = N // 2 row = [i for i in range(N) if i != omit] col = [j for j in range(N) if j != omit] - data = [1 for _ in range(N-1)] + data = [1 for _ in range(N - 1)] matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) matching = maximum_matching(matrix) - self.assertEqual(len(matching), N-1) + self.assertEqual(len(matching), N - 1) for i in range(N): if i != omit: self.assertIn(i, matching) @@ -67,15 +64,15 @@ def test_bordered(self): row = [] col = [] data = [] - for i in range(N-1): + for i in range(N - 1): # Bottom row - row.append(N-1) + row.append(N - 1) col.append(i) data.append(1) # Right column row.append(i) - col.append(N-1) + col.append(N - 1) data.append(1) # Diagonal @@ -106,7 +103,7 @@ def test_hessenberg(self): data = [] for i in range(N): # Bottom row - row.append(N-1) + row.append(N - 1) col.append(i) data.append(1) @@ -117,7 +114,7 @@ def test_hessenberg(self): data.append(1) else: # One-off diagonal - row.append(i-1) + row.append(i - 1) col.append(i) data.append(1) @@ -141,13 +138,13 @@ def test_low_rank_hessenberg(self): the imperfect matching. """ N = 5 - omit = N//2 + omit = N // 2 row = [] col = [] data = [] for i in range(N): # Bottom row - row.append(N-1) + row.append(N - 1) col.append(i) data.append(1) @@ -159,7 +156,7 @@ def test_low_rank_hessenberg(self): else: # One-off diagonal if i != omit: - row.append(i-1) + row.append(i - 1) col.append(i) data.append(1) @@ -167,11 +164,11 @@ def test_low_rank_hessenberg(self): matching = maximum_matching(matrix) values = set(matching.values()) - self.assertEqual(len(matching), N-1) + self.assertEqual(len(matching), N - 1) self.assertIn(0, matching) - self.assertIn(N-1, matching) + self.assertIn(N - 1, matching) self.assertIn(0, values) - self.assertIn(N-1, values) + self.assertIn(N - 1, values) def test_nondecomposable_hessenberg(self): """ @@ -187,7 +184,7 @@ def test_nondecomposable_hessenberg(self): data = [] for i in range(N): # Bottom row - row.append(N-1) + row.append(N - 1) col.append(i) data.append(1) @@ -200,7 +197,7 @@ def test_nondecomposable_hessenberg(self): if i != 0: # One-off diagonal - row.append(i-1) + row.append(i - 1) col.append(i) data.append(1) @@ -227,23 +224,23 @@ def test_low_rank_nondecomposable_hessenberg(self): row = [] col = [] data = [] - for i in range(N-1): + for i in range(N - 1): # Below diagonal - row.append(i+1) + row.append(i + 1) col.append(i) data.append(1) # Above diagonal row.append(i) - col.append(i+1) + col.append(i + 1) data.append(1) matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) matching = maximum_matching(matrix) values = set(matching.values()) - self.assertEqual(len(matching), N-1) - self.assertEqual(len(values), N-1) + self.assertEqual(len(matching), N - 1) + self.assertEqual(len(values), N - 1) if __name__ == "__main__": diff --git a/pyomo/contrib/incidence_analysis/tests/test_util.py b/pyomo/contrib/incidence_analysis/tests/test_scc_solver.py similarity index 67% rename from pyomo/contrib/incidence_analysis/tests/test_util.py rename to pyomo/contrib/incidence_analysis/tests/test_scc_solver.py index 1949eaeb3eb..6efe52a7d80 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_util.py +++ b/pyomo/contrib/incidence_analysis/tests/test_scc_solver.py @@ -14,22 +14,21 @@ from pyomo.common.dependencies import networkx_available from pyomo.common.dependencies import scipy_available from pyomo.common.collections import ComponentSet, ComponentMap -from pyomo.contrib.incidence_analysis.util import ( +from pyomo.contrib.incidence_analysis.scc_solver import ( TemporarySubsystemManager, generate_strongly_connected_components, solve_strongly_connected_components, - ) +) from pyomo.contrib.incidence_analysis.tests.models_for_testing import ( make_gas_expansion_model, make_dynamic_model, - ) +) import pyomo.common.unittest as unittest @unittest.skipUnless(scipy_available, "SciPy is not available") @unittest.skipUnless(networkx_available, "NetworkX is not available") class TestGenerateSCC(unittest.TestCase): - def test_gas_expansion(self): N = 5 m = make_gas_expansion_model(N) @@ -39,11 +38,11 @@ def test_gas_expansion(self): constraints = list(m.component_data_objects(pyo.Constraint)) self.assertEqual( - len(list(generate_strongly_connected_components(constraints))), - N+1, - ) + len(list(generate_strongly_connected_components(constraints))), N + 1 + ) for i, (block, inputs) in enumerate( - generate_strongly_connected_components(constraints)): + generate_strongly_connected_components(constraints) + ): with TemporarySubsystemManager(to_fix=inputs): if i == 0: # P[0], ideal_gas[0] @@ -65,15 +64,15 @@ def test_gas_expansion(self): self.assertEqual(len(block.cons), 4) var_set = ComponentSet([m.P[i], m.rho[i], m.F[i], m.T[i]]) - con_set = ComponentSet([ - m.ideal_gas[i], m.mbal[i], m.ebal[i], m.expansion[i] - ]) + con_set = ComponentSet( + [m.ideal_gas[i], m.mbal[i], m.ebal[i], m.expansion[i]] + ) for var, con in zip(block.vars[:], block.cons[:]): self.assertIn(var, var_set) self.assertIn(con, con_set) # P[0] is in expansion[1] - other_var_set = ComponentSet([m.P[i-1]]) + other_var_set = ComponentSet([m.P[i - 1]]) self.assertEqual(len(block.input_vars), 1) for var in block.input_vars[:]: self.assertIn(var, other_var_set) @@ -84,17 +83,17 @@ def test_gas_expansion(self): self.assertEqual(len(block.cons), 4) var_set = ComponentSet([m.P[i], m.rho[i], m.F[i], m.T[i]]) - con_set = ComponentSet([ - m.ideal_gas[i], m.mbal[i], m.ebal[i], m.expansion[i] - ]) + con_set = ComponentSet( + [m.ideal_gas[i], m.mbal[i], m.ebal[i], m.expansion[i]] + ) for var, con in zip(block.vars[:], block.cons[:]): self.assertIn(var, var_set) self.assertIn(con, con_set) # P[i-1], rho[i-1], F[i-1], T[i-1], etc. - other_var_set = ComponentSet([ - m.P[i-1], m.rho[i-1], m.F[i-1], m.T[i-1] - ]) + other_var_set = ComponentSet( + [m.P[i - 1], m.rho[i - 1], m.F[i - 1], m.T[i - 1]] + ) self.assertEqual(len(block.input_vars), 4) for var in block.input_vars[:]: self.assertIn(var, other_var_set) @@ -111,22 +110,23 @@ def test_dynamic_backward_disc_with_initial_conditions(self): constraints = list(m.component_data_objects(pyo.Constraint)) self.assertEqual( - len(list(generate_strongly_connected_components(constraints))), - nfe+2, - # The "initial constraints" have two SCCs because they - # decompose into the algebraic equation and differential - # equation. This decomposition is because the discretization - # equation is not present. - # - # This is actually quite troublesome for testing because - # it means that the topological order of strongly connected - # components is not unique (alternatively, the initial - # conditions and rest of the model are independent, or the - # bipartite graph of variables and equations is disconnected). - ) + len(list(generate_strongly_connected_components(constraints))), + nfe + 2, + # The "initial constraints" have two SCCs because they + # decompose into the algebraic equation and differential + # equation. This decomposition is because the discretization + # equation is not present. + # + # This is actually quite troublesome for testing because + # it means that the topological order of strongly connected + # components is not unique (alternatively, the initial + # conditions and rest of the model are independent, or the + # bipartite graph of variables and equations is disconnected). + ) t_scc_map = {} for i, (block, inputs) in enumerate( - generate_strongly_connected_components(constraints)): + generate_strongly_connected_components(constraints) + ): with TemporarySubsystemManager(to_fix=inputs): t = block.vars[0].index() t_scc_map[t] = i @@ -135,12 +135,10 @@ def test_dynamic_backward_disc_with_initial_conditions(self): else: t_prev = m.time.prev(t) - con_set = ComponentSet([ - m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t] - ]) - var_set = ComponentSet([ - m.height[t], m.dhdt[t], m.flow_out[t] - ]) + con_set = ComponentSet( + [m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t]] + ) + var_set = ComponentSet([m.height[t], m.dhdt[t], m.flow_out[t]]) self.assertEqual(len(con_set), len(block.cons)) self.assertEqual(len(var_set), len(block.vars)) for var, con in zip(block.vars[:], block.cons[:]): @@ -148,10 +146,11 @@ def test_dynamic_backward_disc_with_initial_conditions(self): self.assertIn(con, con_set) self.assertFalse(var.fixed) - other_var_set = ComponentSet([m.height[t_prev]])\ - if t != t1 else ComponentSet() - # At t1, "input var" height[t0] is fixed, so - # it is not included here. + other_var_set = ( + ComponentSet([m.height[t_prev]]) if t != t1 else ComponentSet() + ) + # At t1, "input var" height[t0] is fixed, so + # it is not included here. self.assertEqual(len(inputs), len(other_var_set)) for var in block.input_vars[:]: self.assertIn(var, other_var_set) @@ -187,27 +186,23 @@ def test_dynamic_backward_disc_without_initial_conditions(self): m.diff_eqn[t0].deactivate() m.flow_out_eqn[t0].deactivate() - constraints = list( - m.component_data_objects(pyo.Constraint, active=True) - ) + constraints = list(m.component_data_objects(pyo.Constraint, active=True)) self.assertEqual( - len(list(generate_strongly_connected_components(constraints))), - nfe, - ) + len(list(generate_strongly_connected_components(constraints))), nfe + ) for i, (block, inputs) in enumerate( - generate_strongly_connected_components(constraints)): + generate_strongly_connected_components(constraints) + ): with TemporarySubsystemManager(to_fix=inputs): # We have a much easier time testing the SCCs generated # in this test. - t = m.time[i+2] + t = m.time[i + 2] t_prev = m.time.prev(t) - con_set = ComponentSet([ - m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t] - ]) - var_set = ComponentSet([ - m.height[t], m.dhdt[t], m.flow_out[t] - ]) + con_set = ComponentSet( + [m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t]] + ) + var_set = ComponentSet([m.height[t], m.dhdt[t], m.flow_out[t]]) self.assertEqual(len(con_set), len(block.cons)) self.assertEqual(len(var_set), len(block.vars)) for var, con in zip(block.vars[:], block.cons[:]): @@ -215,10 +210,11 @@ def test_dynamic_backward_disc_without_initial_conditions(self): self.assertIn(con, con_set) self.assertFalse(var.fixed) - other_var_set = ComponentSet([m.height[t_prev]])\ - if t != t1 else ComponentSet() - # At t1, "input var" height[t0] is fixed, so - # it is not included here. + other_var_set = ( + ComponentSet([m.height[t_prev]]) if t != t1 else ComponentSet() + ) + # At t1, "input var" height[t0] is fixed, so + # it is not included here. self.assertEqual(len(inputs), len(other_var_set)) for var in block.input_vars[:]: self.assertIn(var, other_var_set) @@ -251,37 +247,29 @@ def test_dynamic_backward_with_inputs(self): # Variables that we want in our SCCs: # Here we exclude "dynamic inputs" (flow_in) instead of fixing them variables = [ - var for var in m.component_data_objects(pyo.Var) - if not var.fixed and var.parent_component() is not m.flow_in - ] - constraints = list( - m.component_data_objects(pyo.Constraint, active=True) - ) + var + for var in m.component_data_objects(pyo.Var) + if not var.fixed and var.parent_component() is not m.flow_in + ] + constraints = list(m.component_data_objects(pyo.Constraint, active=True)) self.assertEqual( - len(list(generate_strongly_connected_components( - constraints, - variables, - ))), - nfe, - ) + len(list(generate_strongly_connected_components(constraints, variables))), + nfe, + ) # The result of the generator is the same as in the previous # test, but we are using the more general API for i, (block, inputs) in enumerate( - generate_strongly_connected_components( - constraints, - variables, - )): + generate_strongly_connected_components(constraints, variables) + ): with TemporarySubsystemManager(to_fix=inputs): - t = m.time[i+2] + t = m.time[i + 2] t_prev = m.time.prev(t) - con_set = ComponentSet([ - m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t] - ]) - var_set = ComponentSet([ - m.height[t], m.dhdt[t], m.flow_out[t] - ]) + con_set = ComponentSet( + [m.diff_eqn[t], m.flow_out_eqn[t], m.dhdt_disc_eq[t]] + ) + var_set = ComponentSet([m.height[t], m.dhdt[t], m.flow_out[t]]) self.assertEqual(len(con_set), len(block.cons)) self.assertEqual(len(var_set), len(block.vars)) for var, con in zip(block.vars[:], block.cons[:]): @@ -322,22 +310,23 @@ def test_dynamic_forward_disc(self): constraints = list(m.component_data_objects(pyo.Constraint)) # For a forward discretization, the entire model decomposes self.assertEqual( - len(list(generate_strongly_connected_components(constraints))), - len(list(m.component_data_objects(pyo.Constraint))), - ) + len(list(generate_strongly_connected_components(constraints))), + len(list(m.component_data_objects(pyo.Constraint))), + ) self.assertEqual( - len(list(generate_strongly_connected_components(constraints))), - 3*nfe+2, - # "Initial constraints" only add two variables/equations - ) + len(list(generate_strongly_connected_components(constraints))), + 3 * nfe + 2, + # "Initial constraints" only add two variables/equations + ) for i, (block, inputs) in enumerate( - generate_strongly_connected_components(constraints)): + generate_strongly_connected_components(constraints) + ): with TemporarySubsystemManager(to_fix=inputs): # The order is: # algebraic -> derivative -> differential -> algebraic -> ... - idx = i//3 + idx = i // 3 mod = i % 3 - t = m.time[idx+1] + t = m.time[idx + 1] if t != time.last(): t_next = m.time.next(t) @@ -355,11 +344,29 @@ def test_dynamic_forward_disc(self): self.assertIs(block.vars[0], m.height[t_next]) self.assertIs(block.cons[0], m.dhdt_disc_eq[t]) + def test_with_zero_coefficients(self): + """Test where the blocks we identify are incorrect if we don't filter + out variables with coefficients of zero + """ + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3], initialize=1.0) + m.eq1 = pyo.Constraint(expr=m.x[1] + 2 * m.x[2] + 0 * m.x[3] == 7) + m.eq2 = pyo.Constraint(expr=m.x[1] + pyo.log(m.x[1]) == 0) + blocks = generate_strongly_connected_components([m.eq1, m.eq2]) + blocks = [bl for (bl, _) in blocks] + + self.assertEqual(len(blocks[0].vars), 1) + self.assertIs(blocks[0].vars[0], m.x[1]) + self.assertIs(blocks[0].cons[0], m.eq2) + + self.assertEqual(len(blocks[1].vars), 1) + self.assertIs(blocks[1].vars[0], m.x[2]) + self.assertIs(blocks[1].cons[0], m.eq1) + @unittest.skipUnless(scipy_available, "SciPy is not available") @unittest.skipUnless(networkx_available, "NetworkX is not available") class TestSolveSCC(unittest.TestCase): - def test_dynamic_backward_no_solver(self): nfe = 5 m = make_dynamic_model(nfe=nfe, scheme="BACKWARD") @@ -368,8 +375,7 @@ def test_dynamic_backward_no_solver(self): m.flow_in.fix() m.height[t0].fix() - with self.assertRaisesRegex(RuntimeError, - "An external solver is required*"): + with self.assertRaisesRegex(RuntimeError, "An external solver is required*"): solve_strongly_connected_components(m) for t in time: @@ -381,8 +387,9 @@ def test_dynamic_backward_no_solver(self): self.assertFalse(m.dhdt[t].fixed) self.assertTrue(m.flow_in[t].fixed) - @unittest.skipUnless(pyo.SolverFactory("ipopt").available(), - "IPOPT is not available") + @unittest.skipUnless( + pyo.SolverFactory("ipopt").available(), "IPOPT is not available" + ) def test_dynamic_backward(self): nfe = 5 m = make_dynamic_model(nfe=nfe, scheme="BACKWARD") @@ -393,16 +400,16 @@ def test_dynamic_backward(self): solver = pyo.SolverFactory("ipopt") solve_kwds = {"tee": False} - solve_strongly_connected_components(m, solver=solver, - solve_kwds=solve_kwds) + solve_strongly_connected_components(m, solver=solver, solve_kwds=solve_kwds) for con in m.component_data_objects(pyo.Constraint): # Sanity check that this is an equality constraint... self.assertEqual(pyo.value(con.upper), pyo.value(con.lower)) # Assert that the constraint is satisfied within tolerance - self.assertAlmostEqual(pyo.value(con.body), pyo.value(con.upper), - delta=1e-7) + self.assertAlmostEqual( + pyo.value(con.body), pyo.value(con.upper), delta=1e-7 + ) for t in time: if t == t0: @@ -428,8 +435,9 @@ def test_dynamic_forward(self): self.assertEqual(pyo.value(con.upper), pyo.value(con.lower)) # Assert that the constraint is satisfied within tolerance - self.assertAlmostEqual(pyo.value(con.body), pyo.value(con.upper), - delta=1e-7) + self.assertAlmostEqual( + pyo.value(con.body), pyo.value(con.upper), delta=1e-7 + ) for t in time: if t == t0: @@ -449,20 +457,49 @@ def test_with_calc_var_kwds(self): m.p0 = pyo.Param(initialize=3e5) m.p1 = pyo.Param(initialize=1.296e12) m.con0 = pyo.Constraint(expr=m.v0 == m.p0) - m.con1 = pyo.Constraint(expr=0.0 == m.p1*m.v1/m.v0 + m.v2) + m.con1 = pyo.Constraint(expr=0.0 == m.p1 * m.v1 / m.v0 + m.v2) calc_var_kwds = {"eps": 1e-7} # This solve fails to converge without raising the tolerance. # The secant method used to converge a linear variable-constraint # pair (such as con0, v0 and con1, v1) leaves us with a residual of # 1.48e-8 due to roundoff error. - results = solve_strongly_connected_components( - m, calc_var_kwds=calc_var_kwds - ) + results = solve_strongly_connected_components(m, calc_var_kwds=calc_var_kwds) self.assertEqual(len(results), 2) self.assertAlmostEqual(m.v0.value, m.p0.value) self.assertAlmostEqual(m.v1.value, -18.4499152895) # -18.4499 ~= -v2*v0/p1 + def test_with_zero_coefficients(self): + """Test where the blocks we identify are incorrect if we don't filter + out variables with coefficients of zero + """ + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3], initialize=1.0) + m.eq1 = pyo.Constraint(expr=m.x[1] + 2 * m.x[2] + 0 * m.x[3] == 7) + m.eq2 = pyo.Constraint(expr=m.x[1] + pyo.log(m.x[1]) == 0) + results = solve_strongly_connected_components(m) + + self.assertAlmostEqual(m.x[1].value, 0.56714329) + self.assertAlmostEqual(m.x[2].value, 3.21642835) + self.assertEqual(m.x[3].value, 1.0) + + def test_with_inequalities(self): + """Test that we correctly ignore inequalities""" + m = pyo.ConcreteModel() + m.x = pyo.Var([1, 2, 3], initialize=1.0) + m.eq1 = pyo.Constraint(expr=m.x[1] + 2 * m.x[2] + 0 * m.x[3] == 7) + m.eq2 = pyo.Constraint(expr=m.x[1] + pyo.log(m.x[1]) == 0) + + # Solving the system violates this inequality. That is fine. We happily + # ignore it for the purpose of this equation solve. + m.ineq1 = pyo.Constraint(expr=m.x[1] + 2 * m.x[2] + m.x[3] <= 3) + + results = solve_strongly_connected_components(m) + + self.assertAlmostEqual(m.x[1].value, 0.56714329) + self.assertAlmostEqual(m.x[2].value, 3.21642835) + self.assertEqual(m.x[3].value, 1.0) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/incidence_analysis/tests/test_triangularize.py b/pyomo/contrib/incidence_analysis/tests/test_triangularize.py index f7542fcb6dc..76ba4403310 100644 --- a/pyomo/contrib/incidence_analysis/tests/test_triangularize.py +++ b/pyomo/contrib/incidence_analysis/tests/test_triangularize.py @@ -12,42 +12,141 @@ import random from pyomo.contrib.incidence_analysis.matching import maximum_matching from pyomo.contrib.incidence_analysis.triangularize import ( - block_triangularize, - get_diagonal_blocks, - ) + get_scc_of_projection, + block_triangularize, + map_coords_to_block_triangular_indices, + get_diagonal_blocks, +) from pyomo.common.dependencies import ( - scipy, - scipy_available, - networkx_available, - ) + scipy, + scipy_available, + networkx as nx, + networkx_available, +) + if scipy_available: sps = scipy.sparse +if networkx_available: + nxb = nx.algorithms.bipartite import pyomo.common.unittest as unittest +@unittest.skipUnless(networkx_available, "networkx is not available") +@unittest.skipUnless(scipy_available, "scipy is not available") +class TestGetSCCOfProjection(unittest.TestCase): + def test_graph_decomposable_tridiagonal_shuffled(self): + """ + This is the same graph as in test_decomposable_tridiagonal_shuffled + below, but now we convert the matrix into a bipartite graph and + use get_scc_of_projection. + + The matrix decomposes into 2x2 blocks: + |x x | + |x x | + | x x x | + | x x | + | x x| + """ + N = 11 + row = [] + col = [] + data = [] + + # Diagonal + row.extend(range(N)) + col.extend(range(N)) + data.extend(1 for _ in range(N)) + + # Below diagonal + row.extend(range(1, N)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) + + # Above diagonal + row.extend(i for i in range(N - 1) if not i % 2) + col.extend(i + 1 for i in range(N - 1) if not i % 2) + data.extend(1 for i in range(N - 1) if not i % 2) + + # Same results hold after applying a random permutation. + row_perm = list(range(N)) + col_perm = list(range(N)) + random.shuffle(row_perm) + random.shuffle(col_perm) + + row = [row_perm[i] for i in row] + col = [col_perm[j] for j in col] + + matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) + graph = nxb.matrix.from_biadjacency_matrix(matrix) + row_nodes = list(range(N)) + sccs = get_scc_of_projection(graph, row_nodes) + + self.assertEqual(len(sccs), (N + 1) // 2) + + for i in range((N + 1) // 2): + # Note that these rows and cols are in the permuted space + rows = set(r for r, _ in sccs[i]) + cols = set(c - N for _, c in sccs[i]) + + pred_rows = {row_perm[2 * i]} + pred_cols = {col_perm[2 * i]} + + if 2 * i + 1 < N: + pred_rows.add(row_perm[2 * i + 1]) + pred_cols.add(col_perm[2 * i + 1]) + + self.assertEqual(pred_rows, rows) + self.assertEqual(pred_cols, cols) + + def test_scc_exceptions(self): + graph = nx.Graph() + graph.add_nodes_from(range(3)) + graph.add_edges_from([(0, 1), (0, 2), (1, 2)]) + top_nodes = [0] + msg = "graph is not bipartite" + with self.assertRaisesRegex(RuntimeError, msg): + sccs = get_scc_of_projection(graph, top_nodes=top_nodes) + + graph = nx.Graph() + graph.add_nodes_from(range(3)) + graph.add_edges_from([(0, 1), (0, 2)]) + top_nodes[0] + msg = "bipartite sets of different cardinalities" + with self.assertRaisesRegex(RuntimeError, msg): + sccs = get_scc_of_projection(graph, top_nodes=top_nodes) + + graph = nx.Graph() + graph.add_nodes_from(range(4)) + graph.add_edges_from([(0, 1), (0, 2)]) + top_nodes = [0, 3] + msg = "without a perfect matching" + with self.assertRaisesRegex(RuntimeError, msg): + sccs = get_scc_of_projection(graph, top_nodes=top_nodes) + + @unittest.skipUnless(networkx_available, "networkx is not available") @unittest.skipUnless(scipy_available, "scipy is not available") class TestTriangularize(unittest.TestCase): def test_low_rank_exception(self): N = 5 - row = list(range(N-1)) - col = list(range(N-1)) - data = [1 for _ in range(N-1)] + row = list(range(N - 1)) + col = list(range(N - 1)) + data = [1 for _ in range(N - 1)] matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) - with self.assertRaises(ValueError) as exc: + with self.assertRaises(RuntimeError) as exc: row_block_map, col_block_map = block_triangularize(matrix) self.assertIn('perfect matching', str(exc.exception)) def test_non_square_exception(self): N = 5 - row = list(range(N-1)) - col = list(range(N-1)) - data = [1 for _ in range(N-1)] + row = list(range(N - 1)) + col = list(range(N - 1)) + data = [1 for _ in range(N - 1)] - matrix = sps.coo_matrix((data, (row, col)), shape=(N, N-1)) + matrix = sps.coo_matrix((data, (row, col)), shape=(N, N - 1)) with self.assertRaises(ValueError) as exc: row_block_map, col_block_map = block_triangularize(matrix) @@ -56,12 +155,12 @@ def test_non_square_exception(self): def test_identity(self): N = 5 matrix = sps.identity(N).tocoo() - row_block_map, col_block_map = block_triangularize(matrix) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(matrix) row_values = set(row_block_map.values()) col_values = set(row_block_map.values()) # For a (block) diagonal matrix, the order of diagonal - # blocks is arbitary, so we can't perform any strong + # blocks is arbitrary, so we can't perform any strong # checks here. # # Perfect matching is unique, but order of strongly @@ -100,15 +199,15 @@ def test_lower_tri(self): # Below diagonal row.extend(range(1, N)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) - row_block_map, col_block_map = block_triangularize(matrix) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(matrix) row_values = set(row_block_map.values()) col_values = set(row_block_map.values()) - + self.assertEqual(len(row_values), N) self.assertEqual(len(col_values), N) @@ -137,16 +236,16 @@ def test_upper_tri(self): data.extend(1 for _ in range(N)) # Below diagonal - row.extend(range(N-1)) + row.extend(range(N - 1)) col.extend(range(1, N)) - data.extend(1 for _ in range(N-1)) + data.extend(1 for _ in range(N - 1)) matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) - row_block_map, col_block_map = block_triangularize(matrix) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(matrix) row_values = set(row_block_map.values()) col_values = set(row_block_map.values()) - + self.assertEqual(len(row_values), N) self.assertEqual(len(col_values), N) @@ -154,8 +253,8 @@ def test_upper_tri(self): # The block_triangularize function permutes # to lower triangular form, so rows and # columns are transposed to assemble the blocks. - self.assertEqual(row_block_map[i], N-1-i) - self.assertEqual(col_block_map[i], N-1-i) + self.assertEqual(row_block_map[i], N - 1 - i) + self.assertEqual(col_block_map[i], N - 1 - i) def test_bordered(self): """ @@ -171,23 +270,23 @@ def test_bordered(self): col = [] data = [] # Diagonal - row.extend(range(N-1)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + row.extend(range(N - 1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) # Bottom row - row.extend(N-1 for _ in range(N-1)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + row.extend(N - 1 for _ in range(N - 1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) # Right column - row.extend(range(N-1)) - col.extend(N-1 for _ in range(N-1)) - data.extend(1 for _ in range(N-1)) + row.extend(range(N - 1)) + col.extend(N - 1 for _ in range(N - 1)) + data.extend(1 for _ in range(N - 1)) matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) - row_block_map, col_block_map = block_triangularize(matrix) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(matrix) row_values = set(row_block_map.values()) col_values = set(row_block_map.values()) @@ -208,34 +307,34 @@ def test_decomposable_bordered(self): |x x x x | """ N = 5 - half = N//2 + half = N // 2 row = [] col = [] data = [] # Diagonal - row.extend(range(N-1)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + row.extend(range(N - 1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) # Bottom row - row.extend(N-1 for _ in range(N-1)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + row.extend(N - 1 for _ in range(N - 1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) # Right column - row.extend(range(half, N-1)) - col.extend(N-1 for _ in range(half, N-1)) - data.extend(1 for _ in range(half, N-1)) + row.extend(range(half, N - 1)) + col.extend(N - 1 for _ in range(half, N - 1)) + data.extend(1 for _ in range(half, N - 1)) matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) - row_block_map, col_block_map = block_triangularize(matrix) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(matrix) row_values = set(row_block_map.values()) col_values = set(row_block_map.values()) - self.assertEqual(len(row_values), half+1) - self.assertEqual(len(col_values), half+1) + self.assertEqual(len(row_values), half + 1) + self.assertEqual(len(col_values), half + 1) first_half_set = set(range(half)) for i in range(N): @@ -268,30 +367,30 @@ def test_decomposable_tridiagonal(self): # Below diagonal row.extend(range(1, N)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) # Above diagonal - row.extend(i for i in range(N-1) if not i%2) - col.extend(i+1 for i in range(N-1) if not i%2) - data.extend(1 for i in range(N-1) if not i%2) + row.extend(i for i in range(N - 1) if not i % 2) + col.extend(i + 1 for i in range(N - 1) if not i % 2) + data.extend(1 for i in range(N - 1) if not i % 2) matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) - row_block_map, col_block_map = block_triangularize(matrix) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(matrix) row_values = set(row_block_map.values()) col_values = set(row_block_map.values()) - self.assertEqual(len(row_values), (N+1)//2) - self.assertEqual(len(col_values), (N+1)//2) + self.assertEqual(len(row_values), (N + 1) // 2) + self.assertEqual(len(col_values), (N + 1) // 2) - for i in range((N+1)//2): - self.assertEqual(row_block_map[2*i], i) - self.assertEqual(col_block_map[2*i], i) + for i in range((N + 1) // 2): + self.assertEqual(row_block_map[2 * i], i) + self.assertEqual(col_block_map[2 * i], i) - if 2*i+1 < N: - self.assertEqual(row_block_map[2*i+1], i) - self.assertEqual(col_block_map[2*i+1], i) + if 2 * i + 1 < N: + self.assertEqual(row_block_map[2 * i + 1], i) + self.assertEqual(col_block_map[2 * i + 1], i) def test_decomposable_tridiagonal_shuffled(self): """ @@ -314,13 +413,13 @@ def test_decomposable_tridiagonal_shuffled(self): # Below diagonal row.extend(range(1, N)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) # Above diagonal - row.extend(i for i in range(N-1) if not i%2) - col.extend(i+1 for i in range(N-1) if not i%2) - data.extend(1 for i in range(N-1) if not i%2) + row.extend(i for i in range(N - 1) if not i % 2) + col.extend(i + 1 for i in range(N - 1) if not i % 2) + data.extend(1 for i in range(N - 1) if not i % 2) # Same results hold after applying a random permutation. row_perm = list(range(N)) @@ -333,22 +432,22 @@ def test_decomposable_tridiagonal_shuffled(self): matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) - row_block_map, col_block_map = block_triangularize(matrix) + row_block_map, col_block_map = map_coords_to_block_triangular_indices(matrix) row_values = set(row_block_map.values()) col_values = set(row_block_map.values()) - self.assertEqual(len(row_values), (N+1)//2) - self.assertEqual(len(col_values), (N+1)//2) + self.assertEqual(len(row_values), (N + 1) // 2) + self.assertEqual(len(col_values), (N + 1) // 2) - for i in range((N+1)//2): - row_idx = row_perm[2*i] - col_idx = col_perm[2*i] + for i in range((N + 1) // 2): + row_idx = row_perm[2 * i] + col_idx = col_perm[2 * i] self.assertEqual(row_block_map[row_idx], i) self.assertEqual(col_block_map[col_idx], i) - if 2*i+1 < N: - row_idx = row_perm[2*i+1] - col_idx = col_perm[2*i+1] + if 2 * i + 1 < N: + row_idx = row_perm[2 * i + 1] + col_idx = col_perm[2 * i + 1] self.assertEqual(row_block_map[row_idx], i) self.assertEqual(col_block_map[col_idx], i) @@ -373,31 +472,31 @@ def test_decomposable_tridiagonal_diagonal_blocks(self): # Below diagonal row.extend(range(1, N)) - col.extend(range(N-1)) - data.extend(1 for _ in range(N-1)) + col.extend(range(N - 1)) + data.extend(1 for _ in range(N - 1)) # Above diagonal - row.extend(i for i in range(N-1) if not i%2) - col.extend(i+1 for i in range(N-1) if not i%2) - data.extend(1 for i in range(N-1) if not i%2) + row.extend(i for i in range(N - 1) if not i % 2) + col.extend(i + 1 for i in range(N - 1) if not i % 2) + data.extend(1 for i in range(N - 1) if not i % 2) matrix = sps.coo_matrix((data, (row, col)), shape=(N, N)) row_blocks, col_blocks = get_diagonal_blocks(matrix) - self.assertEqual(len(row_blocks), (N+1)//2) - self.assertEqual(len(col_blocks), (N+1)//2) + self.assertEqual(len(row_blocks), (N + 1) // 2) + self.assertEqual(len(col_blocks), (N + 1) // 2) - for i in range((N+1)//2): + for i in range((N + 1) // 2): rows = row_blocks[i] cols = col_blocks[i] - - if 2*i+1 < N: - self.assertEqual(set(rows), {2*i, 2*i+1}) - self.assertEqual(set(cols), {2*i, 2*i+1}) + + if 2 * i + 1 < N: + self.assertEqual(set(rows), {2 * i, 2 * i + 1}) + self.assertEqual(set(cols), {2 * i, 2 * i + 1}) else: - self.assertEqual(set(rows), {2*i}) - self.assertEqual(set(cols), {2*i}) + self.assertEqual(set(rows), {2 * i}) + self.assertEqual(set(cols), {2 * i}) if __name__ == "__main__": diff --git a/pyomo/contrib/incidence_analysis/triangularize.py b/pyomo/contrib/incidence_analysis/triangularize.py index 7c5d8865c52..ac6680a367e 100644 --- a/pyomo/contrib/incidence_analysis/triangularize.py +++ b/pyomo/contrib/incidence_analysis/triangularize.py @@ -9,122 +9,195 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from pyomo.common.deprecation import deprecated from pyomo.contrib.incidence_analysis.matching import maximum_matching +from pyomo.contrib.incidence_analysis.common.dulmage_mendelsohn import ( + # TODO: The fact that we import this function here suggests it should be + # promoted. + _get_projected_digraph, +) from pyomo.common.dependencies import networkx as nx -def block_triangularize(matrix, matching=None): - """ - Computes the necessary information to permute a matrix to block-lower - triangular form, i.e. a partition of rows and columns into an ordered - set of diagonal blocks in such a permutation. +def _get_scc_dag_of_projection(graph, top_nodes, matching): + """Return the DAG of strongly connected components of a bipartite graph, + projected with respect to a perfect matching - Arguments - --------- - matrix: A SciPy sparse matrix - matching: A perfect matching of rows and columns, in the form of a dict - mapping row indices to column indices + This data structure can be used, for instance, to identify the minimal + subsystem of constraints and variables necessary to solve a given variable + or constraint. - Returns - ------- - Two dicts. The first maps each row index to the index of its block in a - block-lower triangular permutation of the matrix. The second maps each - column index to the index of its block in a block-lower triangular - permutation of the matrix. """ - nxb = nx.algorithms.bipartite nxc = nx.algorithms.components - nxd = nx.algorithms.dag - from_biadjacency_matrix = nxb.matrix.from_biadjacency_matrix + # _get_projected_digraph treats matched edges as "in-edges", so we + # reverse the direction of edges here. + dg = _get_projected_digraph(graph, matching, top_nodes).reverse() - M, N = matrix.shape - if M != N: - raise ValueError("block_triangularize does not currently " - "support non-square matrices. Got matrix with shape %s." - % (matrix.shape,) - ) - bg = from_biadjacency_matrix(matrix) - - if matching is None: - matching = maximum_matching(matrix) - - len_matching = len(matching) - if len_matching != M: - raise ValueError("block_triangularize only supports matrices " - "that have a perfect matching of rows and columns. " - "Cardinality of maximal matching is %s" % len_matching - ) - - # Construct directed graph of rows - dg = nx.DiGraph() - dg.add_nodes_from(range(M)) - for n in dg.nodes: - col_idx = matching[n] - col_node = col_idx + M - # For all rows that share this column - for neighbor in bg[col_node]: - if neighbor != n: - # Add an edge towards this column's matched row - dg.add_edge(neighbor, n) - - # Partition the rows into strongly connected components (diagonal blocks) scc_list = list(nxc.strongly_connected_components(dg)) + n_scc = len(scc_list) node_scc_map = {n: idx for idx, scc in enumerate(scc_list) for n in scc} # Now we need to put the SCCs in the right order. We do this by performing # a topological sort on the DAG of SCCs. dag = nx.DiGraph() - for i, c in enumerate(scc_list): - dag.add_node(i) + dag.add_nodes_from(range(n_scc)) for n in dg.nodes: source_scc = node_scc_map[n] for neighbor in dg[n]: target_scc = node_scc_map[neighbor] if target_scc != source_scc: - dag.add_edge(target_scc, source_scc) - # Reverse direction of edge. This corresponds to creating - # a block lower triangular matrix. - - scc_order = list(nxd.lexicographical_topological_sort(dag)) + dag.add_edge(source_scc, target_scc) + + # Note that the matching is required to fully interpret scc_list (as it + # only contains the "top nodes") + return scc_list, dag + + +def get_scc_of_projection(graph, top_nodes, matching=None): + """Return the topologically ordered strongly connected components of a + bipartite graph, projected with respect to a perfect matching + + The provided undirected bipartite graph is projected into a directed graph + on the set of "top nodes" by treating "matched edges" as out-edges and + "unmatched edges" as in-edges. Then the strongly connected components of + the directed graph are computed. These strongly connected components are + unique, regardless of the choice of perfect matching. The strongly connected + components form a directed acyclic graph, and are returned in a topological + order. The order is unique, as ambiguities are resolved "lexicographically". + + The "direction" of the projection (where matched edges are out-edges) + leads to a block *lower* triangular permutation when the top nodes + correspond to *rows* in the bipartite graph of a matrix. + + Parameters + ---------- + graph: NetworkX Graph + A bipartite graph + top_nodes: list + One of the bipartite sets in the graph + matching: dict + Maps each node in ``top_nodes`` to its matched node - scc_block_map = {c: i for i, c in enumerate(scc_order)} - row_block_map = {n: scc_block_map[c] for n, c in node_scc_map.items()} - # ^ This maps row indices to the blocks they belong to. - - # Invert the matching to map row indices to column indices - col_row_map = {c: r for r, c in matching.items()} - assert len(col_row_map) == M + Returns + ------- + list of lists + The outer list is a list of strongly connected components. Each + strongly connected component is a list of tuples of matched nodes. + The first node is a "top node", and the second is an "other node". - col_block_map = {c: row_block_map[col_row_map[c]] for c in range(N)} + """ + nxb = nx.algorithms.bipartite + nxd = nx.algorithms.dag + if not nxb.is_bipartite(graph): + raise RuntimeError("Provided graph is not bipartite.") + M = len(top_nodes) + N = len(graph.nodes) - M + if M != N: + raise RuntimeError( + "get_scc_of_projection does not support bipartite graphs with" + " bipartite sets of different cardinalities. Got sizes %s and" + " %s." % (M, N) + ) + if matching is None: + # This matching maps top nodes to "other nodes" *and* other nodes + # back to top nodes. + matching = nxb.maximum_matching(graph, top_nodes=top_nodes) + if len(matching) != 2 * M: + raise RuntimeError( + "get_scc_of_projection does not support bipartite graphs without" + " a perfect matching. Got a graph with %s nodes per bipartite set" + " and a matching of cardinality %s." % (M, (len(matching) / 2)) + ) + + scc_list, dag = _get_scc_dag_of_projection(graph, top_nodes, matching) + scc_order = list(nxd.lexicographical_topological_sort(dag)) - return row_block_map, col_block_map + # The "natural" return type, here, is a list of lists. Each inner list + # is an SCC, and contains tuples of nodes. The "top node", and its matched + # "bottom node". + ordered_node_subsets = [ + sorted([(i, matching[i]) for i in scc_list[scc_idx]]) for scc_idx in scc_order + ] + return ordered_node_subsets -def get_blocks_from_maps(row_block_map, col_block_map): - """ - Gets the row and column coordinates of each diagonal block in a - block triangularization from maps of row/column coordinates to - block indices. +def block_triangularize(matrix, matching=None): + """Compute ordered partitions of the matrix's rows and columns that + permute the matrix to block lower triangular form - Arguments - --------- - row_block_map: dict - Dict mapping each row coordinate to the coordinate of the - block it belongs to + Subsets in the partition correspond to diagonal blocks in the block + triangularization. The order is topological, with ties broken + "lexicographically". - col_block_map: dict - Dict mapping each column coordinate to the coordinate of the - block it belongs to + Parameters + ---------- + matrix: ``scipy.sparse.coo_matrix`` + Matrix whose rows and columns will be permuted + matching: ``dict`` + A perfect matching. Maps rows to columns *and* columns back to rows. Returns ------- - tuple of lists - The first list is a list-of-lists of row indices that partitions - the indices into diagonal blocks. The second list is a - list-of-lists of column indices that partitions the indices into - diagonal blocks. + row_partition: list of lists + A partition of rows. The inner lists hold integer row coordinates. + col_partition: list of lists + A partition of columns. The inner lists hold integer column coordinates. + + + .. note:: + + **Breaking change in Pyomo 6.5.0** + + The pre-6.5.0 ``block_triangularize`` function returned maps from + each row or column to the index of its block in a block + lower triangularization as the original intent of this function + was to identify when coordinates do or don't share a diagonal block + in this partition. Since then, the dominant use case of + ``block_triangularize`` has been to partition variables and + constraints into these blocks and inspect or solve each block + individually. A natural return type for this functionality is the + ordered partition of rows and columns, as lists of lists. + This functionality was previously available via the + ``get_diagonal_blocks`` method, which was confusing as it did not + capture that the partition was the diagonal of a block + *triangularization* (as opposed to diagonalization). The pre-6.5.0 + functionality of ``block_triangularize`` is still available via the + ``map_coords_to_block_triangular_indices`` function. """ + nxb = nx.algorithms.bipartite + nxc = nx.algorithms.components + nxd = nx.algorithms.dag + from_biadjacency_matrix = nxb.matrix.from_biadjacency_matrix + M, N = matrix.shape + if M != N: + raise ValueError( + "block_triangularize does not currently support non-square" + " matrices. Got matrix with shape %s." % ((M, N),) + ) + graph = from_biadjacency_matrix(matrix) + row_nodes = list(range(M)) + sccs = get_scc_of_projection(graph, row_nodes, matching=matching) + row_partition = [[i for i, j in scc] for scc in sccs] + col_partition = [[j - M for i, j in scc] for scc in sccs] + return row_partition, col_partition + + +def map_coords_to_block_triangular_indices(matrix, matching=None): + row_blocks, col_blocks = block_triangularize(matrix, matching=matching) + row_idx_map = {r: idx for idx, rblock in enumerate(row_blocks) for r in rblock} + col_idx_map = {c: idx for idx, cblock in enumerate(col_blocks) for c in cblock} + return row_idx_map, col_idx_map + + +@deprecated( + msg=( + "``get_blocks_from_maps`` is deprecated. This functionality has been" + " incorporated into ``block_triangularize``." + ), + version="6.5.0", +) +def get_blocks_from_maps(row_block_map, col_block_map): blocks = set(row_block_map.values()) assert blocks == set(col_block_map.values()) n_blocks = len(blocks) @@ -137,31 +210,12 @@ def get_blocks_from_maps(row_block_map, col_block_map): return block_rows, block_cols +@deprecated( + msg=( + "``get_diagonal_blocks`` has been deprecated. Please use" + " ``block_triangularize`` instead." + ), + version="6.5.0", +) def get_diagonal_blocks(matrix, matching=None): - """ - Gets the diagonal blocks of a block triangularization of the provided - matrix. - - Arguments - --------- - coo_matrix - Matrix to get the diagonal blocks of - - matching - Dict mapping row indices to column indices in the perfect matching - to be used by the block triangularization. - - Returns - ------- - tuple of lists - The first list is a list-of-lists of row indices that partitions - the indices into diagonal blocks. The second list is a - list-of-lists of column indices that partitions the indices into - diagonal blocks. - - """ - row_block_map, col_block_map = block_triangularize( - matrix, matching=matching - ) - block_rows, block_cols = get_blocks_from_maps(row_block_map, col_block_map) - return block_rows, block_cols + return block_triangularize(matrix, matching=matching) diff --git a/pyomo/contrib/incidence_analysis/util.py b/pyomo/contrib/incidence_analysis/util.py index 9e592dad922..a127161d33d 100644 --- a/pyomo/contrib/incidence_analysis/util.py +++ b/pyomo/contrib/incidence_analysis/util.py @@ -9,158 +9,14 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.base.var import Var -from pyomo.core.base.constraint import Constraint -from pyomo.common.collections import ComponentSet -from pyomo.core.expr.visitor import identify_variables -from pyomo.util.calc_var_value import calculate_variable_from_constraint -from pyomo.util.subsystems import ( - create_subsystem_block, - TemporarySubsystemManager, - generate_subsystem_blocks, - ) -from pyomo.contrib.incidence_analysis.interface import IncidenceGraphInterface - - -def generate_strongly_connected_components( - constraints, - variables=None, - include_fixed=False, - ): - """ Performs a block triangularization of the incidence matrix - of the provided constraints and variables, and yields a block that - contains the constraints and variables of each diagonal block - (strongly connected component). - - Arguments - --------- - constraints: List of Pyomo constraint data objects - Constraints used to generate strongly connected components. - variables: List of Pyomo variable data objects - Variables that may participate in strongly connected components. - If not provided, all variables in the constraints will be used. - include_fixed: Bool - Indicates whether fixed variables will be included when - identifying variables in constraints. - - Yields - ------ - Blocks containing the variables and constraints of every strongly - connected component, in a topological order, as well as the - "input variables" for that block - - """ - if variables is None: - var_set = ComponentSet() - variables = [] - for con in constraints: - for var in identify_variables( - con.expr, - include_fixed=include_fixed, - ): - if var not in var_set: - variables.append(var) - var_set.add(var) - - assert len(variables) == len(constraints) - igraph = IncidenceGraphInterface() - var_block_map, con_block_map = igraph.block_triangularize( - variables=variables, - constraints=constraints, - ) - blocks = set(var_block_map.values()) - n_blocks = len(blocks) - var_blocks = [[] for b in range(n_blocks)] - con_blocks = [[] for b in range(n_blocks)] - for var, b in var_block_map.items(): - var_blocks[b].append(var) - for con, b in con_block_map.items(): - con_blocks[b].append(con) - subsets = list(zip(con_blocks, var_blocks)) - for block, inputs in generate_subsystem_blocks( - subsets, - include_fixed=include_fixed, - ): - # TODO: How does len scale for reference-to-list? - assert len(block.vars) == len(block.cons) - yield (block, inputs) - - -def solve_strongly_connected_components( - block, - solver=None, - solve_kwds=None, - calc_var_kwds=None, - ): - """ This function solves a square block of variables and equality - constraints by solving strongly connected components individually. - Strongly connected components (of the directed graph of constraints - obtained from a perfect matching of variables and constraints) are - the diagonal blocks in a block triangularization of the incidence - matrix, so solving the strongly connected components in topological - order is sufficient to solve the entire block. - - One-by-one blocks are solved using Pyomo's - calculate_variable_from_constraint function, while higher-dimension - blocks are solved using the user-provided solver object. - - Arguments - --------- - block: Pyomo Block - The Pyomo block whose variables and constraints will be solved - solver: Pyomo solver object - The solver object that will be used to solve strongly connected - components of size greater than one constraint. Must implement - a solve method. - solve_kwds: Dictionary - Keyword arguments for the solver's solve method - calc_var_kwds: Dictionary - Keyword arguments for calculate_variable_from_constraint - - Returns - ------- - List of results objects returned by each call to solve - - """ - if solve_kwds is None: - solve_kwds = {} - if calc_var_kwds is None: - calc_var_kwds = {} - - constraints = list(block.component_data_objects(Constraint, active=True)) - var_set = ComponentSet() - variables = [] - for con in constraints: - for var in identify_variables(con.expr, include_fixed=False): - # Because we are solving, we do not want to include fixed variables - if var not in var_set: - variables.append(var) - var_set.add(var) - - res_list = [] - for scc, inputs in generate_strongly_connected_components( - constraints, - variables, - ): - with TemporarySubsystemManager(to_fix=inputs): - if len(scc.vars) == 1: - results = calculate_variable_from_constraint( - scc.vars[0], scc.cons[0], **calc_var_kwds - ) - res_list.append(results) - else: - if solver is None: - # NOTE: Use local name to avoid slow generation of this - # error message if a user provides a large, non-decomposable - # block with no solver. - vars = [var.local_name for var in scc.vars.values()] - cons = [con.local_name for con in scc.cons.values()] - raise RuntimeError( - "An external solver is required if block has strongly\n" - "connected components of size greater than one (is not " - "a DAG).\nGot an SCC with components: \n%s\n%s" - % (vars, cons) - ) - results = solver.solve(scc, **solve_kwds) - res_list.append(results) - return res_list +from pyomo.common.deprecation import relocated_module + +msg = ( + "The 'pyomo.contrib.incidence_analysis.util' module has been moved to" + " 'pyomo.contrib.incidence_analysis.scc_solver'. However, we recommend" + " importing this functionality (e.g. solve_strongly_connected_components)" + " directly from 'pyomo.contrib.incidence_analysis'." +) +relocated_module( + "pyomo.contrib.incidence_analysis.scc_solver", version='6.5.0', msg=msg +) diff --git a/pyomo/contrib/interior_point/examples/ex1.py b/pyomo/contrib/interior_point/examples/ex1.py index 0456e27f0eb..d9931e1daa8 100644 --- a/pyomo/contrib/interior_point/examples/ex1.py +++ b/pyomo/contrib/interior_point/examples/ex1.py @@ -27,12 +27,12 @@ m.y = pyo.Var() m.obj = pyo.Objective(expr=m.x**2 + m.y**2) m.c1 = pyo.Constraint(expr=m.y == pyo.exp(m.x)) -m.c2 = pyo.Constraint(expr=m.y >= (m.x - 1)**2) +m.c2 = pyo.Constraint(expr=m.y >= (m.x - 1) ** 2) interface = InteriorPointInterface(m) linear_solver = MumpsInterface( - # log_filename='lin_sol.log', - icntl_options={11: 1}, # Set error level to 1 (most detailed) - ) + # log_filename='lin_sol.log', + icntl_options={11: 1} # Set error level to 1 (most detailed) +) ip_solver = InteriorPointSolver(linear_solver) x, duals_eq, duals_ineq = ip_solver.solve(interface) diff --git a/pyomo/contrib/interior_point/interface.py b/pyomo/contrib/interior_point/interface.py index cf449633683..38d91be5566 100644 --- a/pyomo/contrib/interior_point/interface.py +++ b/pyomo/contrib/interior_point/interface.py @@ -237,12 +237,14 @@ def get_delta_duals_slacks_ub(self): def regularize_equality_gradient(self, kkt, coef, copy_kkt=True): raise RuntimeError( 'Equality gradient regularization is necessary but no ' - 'function has been implemented for doing so.') + 'function has been implemented for doing so.' + ) def regularize_hessian(self, kkt, coef, copy_kkt=True): raise RuntimeError( 'Hessian of Lagrangian regularization is necessary but no ' - 'function has been implemented for doing so.') + 'function has been implemented for doing so.' + ) class InteriorPointInterface(BaseInteriorPointInterface): @@ -255,9 +257,11 @@ def __init__(self, pyomo_model): self._slacks = self.init_slacks() # set the init_duals_primals_lb/ub from ipopt_zL_out, ipopt_zU_out if available - # need to compress them as well and initialize the duals_primals_lb/ub - self._init_duals_primals_lb, self._init_duals_primals_ub =\ - self._get_full_duals_primals_bounds() + # need to compress them as well and initialize the duals_primals_lb/ub + ( + self._init_duals_primals_lb, + self._init_duals_primals_ub, + ) = self._get_full_duals_primals_bounds() self._init_duals_primals_lb[np.isneginf(self._nlp.primals_lb())] = 0 self._init_duals_primals_ub[np.isinf(self._nlp.primals_ub())] = 0 self._duals_primals_lb = self._init_duals_primals_lb.copy() @@ -416,8 +420,9 @@ def evaluate_primal_dual_kkt_matrix(self, timer=None): primals = self._nlp.get_primals() timer.start('hess block') - data = (duals_primals_lb/(primals - self._nlp.primals_lb()) + - duals_primals_ub/(self._nlp.primals_ub() - primals)) + data = duals_primals_lb / ( + primals - self._nlp.primals_lb() + ) + duals_primals_ub / (self._nlp.primals_ub() - primals) n = self._nlp.n_primals() indices = np.arange(n) hess_block = scipy.sparse.coo_matrix((data, (indices, indices)), shape=(n, n)) @@ -425,8 +430,9 @@ def evaluate_primal_dual_kkt_matrix(self, timer=None): timer.stop('hess block') timer.start('slack block') - data = (duals_slacks_lb/(self._slacks - self._nlp.ineq_lb()) + - duals_slacks_ub/(self._nlp.ineq_ub() - self._slacks)) + data = duals_slacks_lb / ( + self._slacks - self._nlp.ineq_lb() + ) + duals_slacks_ub / (self._nlp.ineq_ub() - self._slacks) n = self._nlp.n_ineq_constraints() indices = np.arange(n) slack_block = scipy.sparse.coo_matrix((data, (indices, indices)), shape=(n, n)) @@ -440,12 +446,12 @@ def evaluate_primal_dual_kkt_matrix(self, timer=None): kkt.set_block(0, 2, jac_eq.transpose()) kkt.set_block(3, 0, jac_ineq) kkt.set_block(0, 3, jac_ineq.transpose()) - kkt.set_block(3, 1, -scipy.sparse.identity( - self._nlp.n_ineq_constraints(), - format='coo')) - kkt.set_block(1, 3, -scipy.sparse.identity( - self._nlp.n_ineq_constraints(), - format='coo')) + kkt.set_block( + 3, 1, -scipy.sparse.identity(self._nlp.n_ineq_constraints(), format='coo') + ) + kkt.set_block( + 1, 3, -scipy.sparse.identity(self._nlp.n_ineq_constraints(), format='coo') + ) timer.stop('set block') return kkt @@ -465,17 +471,21 @@ def evaluate_primal_dual_kkt_rhs(self, timer=None): timer.stop('eval cons') timer.start('grad_lag_primals') - grad_lag_primals = (grad_obj + - jac_eq.transpose() * self._nlp.get_duals_eq() + - jac_ineq.transpose() * self._nlp.get_duals_ineq() - - self._barrier / (self._nlp.get_primals() - self._nlp.primals_lb()) + - self._barrier / (self._nlp.primals_ub() - self._nlp.get_primals())) + grad_lag_primals = ( + grad_obj + + jac_eq.transpose() * self._nlp.get_duals_eq() + + jac_ineq.transpose() * self._nlp.get_duals_ineq() + - self._barrier / (self._nlp.get_primals() - self._nlp.primals_lb()) + + self._barrier / (self._nlp.primals_ub() - self._nlp.get_primals()) + ) timer.stop('grad_lag_primals') timer.start('grad_lag_slacks') - grad_lag_slacks = (-self._nlp.get_duals_ineq() - - self._barrier / (self._slacks - self._nlp.ineq_lb()) + - self._barrier / (self._nlp.ineq_ub() - self._slacks)) + grad_lag_slacks = ( + -self._nlp.get_duals_ineq() + - self._barrier / (self._slacks - self._nlp.ineq_lb()) + + self._barrier / (self._nlp.ineq_ub() - self._slacks) + ) timer.stop('grad_lag_slacks') rhs = BlockVector(4) @@ -505,27 +515,31 @@ def get_delta_duals_ineq(self): return self._delta_duals_ineq def get_delta_duals_primals_lb(self): - res = (((self._barrier - self._duals_primals_lb * self._delta_primals) / - (self._nlp.get_primals() - self._nlp.primals_lb())) - - self._duals_primals_lb) + res = ( + (self._barrier - self._duals_primals_lb * self._delta_primals) + / (self._nlp.get_primals() - self._nlp.primals_lb()) + ) - self._duals_primals_lb return res def get_delta_duals_primals_ub(self): - res = (((self._barrier + self._duals_primals_ub * self._delta_primals) / - (self._nlp.primals_ub() - self._nlp.get_primals())) - - self._duals_primals_ub) + res = ( + (self._barrier + self._duals_primals_ub * self._delta_primals) + / (self._nlp.primals_ub() - self._nlp.get_primals()) + ) - self._duals_primals_ub return res def get_delta_duals_slacks_lb(self): - res = (((self._barrier - self._duals_slacks_lb * self._delta_slacks) / - (self._slacks - self._nlp.ineq_lb())) - - self._duals_slacks_lb) + res = ( + (self._barrier - self._duals_slacks_lb * self._delta_slacks) + / (self._slacks - self._nlp.ineq_lb()) + ) - self._duals_slacks_lb return res def get_delta_duals_slacks_ub(self): - res = (((self._barrier + self._duals_slacks_ub * self._delta_slacks) / - (self._nlp.ineq_ub() - self._slacks)) - - self._duals_slacks_ub) + res = ( + (self._barrier + self._duals_slacks_ub * self._delta_slacks) + / (self._nlp.ineq_ub() - self._slacks) + ) - self._duals_slacks_ub return res def evaluate_objective(self): @@ -556,9 +570,9 @@ def regularize_equality_gradient(self, kkt, coef, copy_kkt=True): if copy_kkt: kkt = kkt.copy() reg_coef = coef - ptb = (reg_coef * - scipy.sparse.identity(self._nlp.n_eq_constraints(), - format='coo')) + ptb = reg_coef * scipy.sparse.identity( + self._nlp.n_eq_constraints(), format='coo' + ) kkt.set_block(2, 2, ptb) return kkt @@ -577,21 +591,22 @@ def _get_full_duals_primals_bounds(self): full_duals_primals_lb = None full_duals_primals_ub = None # Check in case _nlp was constructed as an AmplNLP (from an nl file) - if (hasattr(self._nlp, 'pyomo_model') and - hasattr(self._nlp, 'get_pyomo_variables')): + if hasattr(self._nlp, 'pyomo_model') and hasattr( + self._nlp, 'get_pyomo_variables' + ): pyomo_model = self._nlp.pyomo_model() pyomo_variables = self._nlp.get_pyomo_variables() - if hasattr(pyomo_model,'ipopt_zL_out'): - zL_suffix = pyomo_model.ipopt_zL_out + if hasattr(pyomo_model, 'ipopt_zL_out'): + zL_suffix = pyomo_model.ipopt_zL_out full_duals_primals_lb = np.empty(self._nlp.n_primals()) - for i,v in enumerate(pyomo_variables): + for i, v in enumerate(pyomo_variables): if v in zL_suffix: full_duals_primals_lb[i] = zL_suffix[v] - if hasattr(pyomo_model,'ipopt_zU_out'): - zU_suffix = pyomo_model.ipopt_zU_out + if hasattr(pyomo_model, 'ipopt_zU_out'): + zU_suffix = pyomo_model.ipopt_zU_out full_duals_primals_ub = np.empty(self._nlp.n_primals()) - for i,v in enumerate(pyomo_variables): + for i, v in enumerate(pyomo_variables): if v in zU_suffix: full_duals_primals_ub[i] = zU_suffix[v] @@ -605,7 +620,9 @@ def _get_full_duals_primals_bounds(self): def load_primals_into_pyomo_model(self): if not isinstance(self._nlp, pyomo_nlp.PyomoNLP): - raise RuntimeError('Can only load primals into a pyomo model if a pyomo model was used in the constructor.') + raise RuntimeError( + 'Can only load primals into a pyomo model if a pyomo model was used in the constructor.' + ) pyomo_variables = self._nlp.get_pyomo_variables() primals = self._nlp.get_primals() diff --git a/pyomo/contrib/interior_point/interior_point.py b/pyomo/contrib/interior_point/interior_point.py index b421fc128c9..00d26ddef03 100644 --- a/pyomo/contrib/interior_point/interior_point.py +++ b/pyomo/contrib/interior_point/interior_point.py @@ -9,7 +9,10 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.contrib.pynumero.interfaces.utils import build_bounds_mask, build_compression_matrix +from pyomo.contrib.pynumero.interfaces.utils import ( + build_bounds_mask, + build_compression_matrix, +) import numpy as np import logging import time @@ -37,12 +40,13 @@ class InteriorPointStatus(enum.Enum): class LinearSolveContext(object): - def __init__(self, - interior_point_logger, - linear_solver_logger, - filename=None, - level=logging.INFO): - + def __init__( + self, + interior_point_logger, + linear_solver_logger, + filename=None, + level=logging.INFO, + ): self.interior_point_logger = interior_point_logger self.linear_solver_logger = linear_solver_logger self.filename = filename @@ -58,7 +62,6 @@ def __enter__(self): self.linear_solver_logger.addHandler(self.handler) self.interior_point_logger.addHandler(self.handler) - def __exit__(self, et, ev, tb): self.linear_solver_logger.propagate = True self.interior_point_logger.propagate = True @@ -70,7 +73,7 @@ def __exit__(self, et, ev, tb): # How should the RegContext work? # TODO: in this class, use the linear_solver_context to ... # Use linear_solver_logger to write iter_no and reg_coef -# +# # Define a method for logging IP_reg_info to the linear solver log # Method can be called within linear_solve_context class FactorizationContext(object): @@ -89,45 +92,54 @@ def __exit__(self, et, ev, tb): # Will this swallow exceptions in this context? def log_header(self): - self.logger.debug('{_iter:<10}' - '{reg_iter:<10}' - '{num_realloc:<10}' - '{reg_coef:<10}' - '{neg_eig:<10}' - '{status:<10}'.format( - _iter='Iter', - reg_iter='reg_iter', - num_realloc='# realloc', - reg_coef='reg_coef', - neg_eig='neg_eig', - status='status')) + self.logger.debug( + '{_iter:<10}' + '{reg_iter:<10}' + '{num_realloc:<10}' + '{reg_coef:<10}' + '{neg_eig:<10}' + '{status:<10}'.format( + _iter='Iter', + reg_iter='reg_iter', + num_realloc='# realloc', + reg_coef='reg_coef', + neg_eig='neg_eig', + status='status', + ) + ) def log_info(self, _iter, reg_iter, num_realloc, coef, neg_eig, status): - self.logger.debug('{_iter:<10}' - '{reg_iter:<10}' - '{num_realloc:<10}' - '{reg_coef:<10.2e}' - '{neg_eig:<10}' - '{status:<10}'.format( - _iter=_iter, - reg_iter=reg_iter, - num_realloc=num_realloc, - reg_coef=coef, - neg_eig=str(neg_eig), - status=status.name)) + self.logger.debug( + '{_iter:<10}' + '{reg_iter:<10}' + '{num_realloc:<10}' + '{reg_coef:<10.2e}' + '{neg_eig:<10}' + '{status:<10}'.format( + _iter=_iter, + reg_iter=reg_iter, + num_realloc=num_realloc, + reg_coef=coef, + neg_eig=str(neg_eig), + status=status.name, + ) + ) class InteriorPointSolver(object): """ Class for creating interior point solvers with different options """ - def __init__(self, - linear_solver, - max_iter=100, - tol=1e-8, - linear_solver_log_filename=None, - max_reallocation_iterations=5, - reallocation_factor=2): + + def __init__( + self, + linear_solver, + max_iter=100, + tol=1e-8, + linear_solver_log_filename=None, + max_reallocation_iterations=5, + reallocation_factor=2, + ): self.linear_solver = linear_solver self.max_iter = max_iter self.tol = tol @@ -150,14 +162,15 @@ def __init__(self, pass self.linear_solver_logger = self.linear_solver.getLogger() - self.linear_solve_context = LinearSolveContext(self.logger, - self.linear_solver_logger, - self.linear_solver_log_filename) + self.linear_solve_context = LinearSolveContext( + self.logger, self.linear_solver_logger, self.linear_solver_log_filename + ) def update_barrier_parameter(self): - self._barrier_parameter = max(self._minimum_barrier_parameter, - min(0.5 * self._barrier_parameter, - self._barrier_parameter ** 1.5)) + self._barrier_parameter = max( + self._minimum_barrier_parameter, + min(0.5 * self._barrier_parameter, self._barrier_parameter**1.5), + ) def set_linear_solver(self, linear_solver): """This method exists to hopefully make it easy to try the same IP @@ -178,7 +191,7 @@ def solve(self, interface, timer=None, report_timing=False): Parameters ---------- interface: pyomo.contrib.interior_point.interface.BaseInteriorPointInterface - The interior point interface. This object handles the function evaluation, + The interior point interface. This object handles the function evaluation, building the KKT matrix, and building the KKT right hand side. timer: HierarchicalTimer report_timing: bool @@ -212,31 +225,35 @@ def solve(self, interface, timer=None, report_timing=False): self.process_init_duals_ub(duals_primals_ub, self.interface.primals_ub()) self.process_init_duals_lb(duals_slacks_lb, self.interface.ineq_lb()) self.process_init_duals_ub(duals_slacks_ub, self.interface.ineq_ub()) - + interface.set_barrier_parameter(self._barrier_parameter) alpha_primal_max = 1 alpha_dual_max = 1 - self.logger.info('{_iter:<6}' - '{objective:<11}' - '{primal_inf:<11}' - '{dual_inf:<11}' - '{compl_inf:<11}' - '{barrier:<11}' - '{alpha_p:<11}' - '{alpha_d:<11}' - '{reg:<11}' - '{time:<7}'.format(_iter='Iter', - objective='Objective', - primal_inf='Prim Inf', - dual_inf='Dual Inf', - compl_inf='Comp Inf', - barrier='Barrier', - alpha_p='Prim Step', - alpha_d='Dual Step', - reg='Reg', - time='Time')) + self.logger.info( + '{_iter:<6}' + '{objective:<11}' + '{primal_inf:<11}' + '{dual_inf:<11}' + '{compl_inf:<11}' + '{barrier:<11}' + '{alpha_p:<11}' + '{alpha_d:<11}' + '{reg:<11}' + '{time:<7}'.format( + _iter='Iter', + objective='Objective', + primal_inf='Prim Inf', + dual_inf='Dual Inf', + compl_inf='Comp Inf', + barrier='Barrier', + alpha_p='Prim Step', + alpha_d='Dual Step', + reg='Reg', + time='Time', + ) + ) reg_coef = 0 @@ -256,41 +273,47 @@ def solve(self, interface, timer=None, report_timing=False): interface.set_duals_slacks_ub(duals_slacks_ub) timer.start('convergence check') - primal_inf, dual_inf, complimentarity_inf = \ - self.check_convergence(barrier=0, timer=timer) + primal_inf, dual_inf, complimentarity_inf = self.check_convergence( + barrier=0, timer=timer + ) timer.stop('convergence check') objective = interface.evaluate_objective() - self.logger.info('{_iter:<6}' - '{objective:<11.2e}' - '{primal_inf:<11.2e}' - '{dual_inf:<11.2e}' - '{compl_inf:<11.2e}' - '{barrier:<11.2e}' - '{alpha_p:<11.2e}' - '{alpha_d:<11.2e}' - '{reg:<11.2e}' - '{time:<7.3f}'.format(_iter=_iter, - objective=objective, - primal_inf=primal_inf, - dual_inf=dual_inf, - compl_inf=complimentarity_inf, - barrier=self._barrier_parameter, - alpha_p=alpha_primal_max, - alpha_d=alpha_dual_max, - reg=reg_coef, - time=time.time() - t0)) + self.logger.info( + '{_iter:<6}' + '{objective:<11.2e}' + '{primal_inf:<11.2e}' + '{dual_inf:<11.2e}' + '{compl_inf:<11.2e}' + '{barrier:<11.2e}' + '{alpha_p:<11.2e}' + '{alpha_d:<11.2e}' + '{reg:<11.2e}' + '{time:<7.3f}'.format( + _iter=_iter, + objective=objective, + primal_inf=primal_inf, + dual_inf=dual_inf, + compl_inf=complimentarity_inf, + barrier=self._barrier_parameter, + alpha_p=alpha_primal_max, + alpha_d=alpha_dual_max, + reg=reg_coef, + time=time.time() - t0, + ) + ) if max(primal_inf, dual_inf, complimentarity_inf) <= tol: status = InteriorPointStatus.optimal break timer.start('convergence check') - primal_inf, dual_inf, complimentarity_inf = \ - self.check_convergence( - barrier=self._barrier_parameter, - timer=timer) + primal_inf, dual_inf, complimentarity_inf = self.check_convergence( + barrier=self._barrier_parameter, timer=timer + ) timer.stop('convergence check') - if max(primal_inf, dual_inf, complimentarity_inf) \ - <= 0.1 * self._barrier_parameter: + if ( + max(primal_inf, dual_inf, complimentarity_inf) + <= 0.1 * self._barrier_parameter + ): # This comparison is made with barrier problem infeasibility. # Sometimes have trouble getting dual infeasibility low enough self.update_barrier_parameter() @@ -320,8 +343,7 @@ def solve(self, interface, timer=None, report_timing=False): interface.set_primal_dual_kkt_solution(delta) timer.start('frac boundary') - alpha_primal_max, alpha_dual_max = \ - self.fraction_to_the_boundary() + alpha_primal_max, alpha_dual_max = self.fraction_to_the_boundary() timer.stop('frac boundary') delta_primals = interface.get_delta_primals() delta_slacks = interface.get_delta_slacks() @@ -347,74 +369,86 @@ def solve(self, interface, timer=None, report_timing=False): return status def factorize(self, kkt, timer=None): - desired_n_neg_evals = (self.interface.n_eq_constraints() + - self.interface.n_ineq_constraints()) + desired_n_neg_evals = ( + self.interface.n_eq_constraints() + self.interface.n_ineq_constraints() + ) reg_iter = 0 with self.factorization_context as fact_con: status, num_realloc = try_factorization_and_reallocation( - kkt=kkt, - linear_solver=self.linear_solver, - reallocation_factor=self.reallocation_factor, - max_iter=self.max_reallocation_iterations, - timer=timer) - if status not in {LinearSolverStatus.successful, - LinearSolverStatus.singular}: + kkt=kkt, + linear_solver=self.linear_solver, + reallocation_factor=self.reallocation_factor, + max_iter=self.max_reallocation_iterations, + timer=timer, + ) + if status not in { + LinearSolverStatus.successful, + LinearSolverStatus.singular, + }: raise RuntimeError( - 'Could not factorize KKT system; linear solver status: ' - + str(status)) + 'Could not factorize KKT system; linear solver status: ' + + str(status) + ) if status == LinearSolverStatus.successful: neg_eig = self.linear_solver.get_inertia()[1] else: neg_eig = None fact_con.log_info( - _iter=self._iter, - reg_iter=reg_iter, - num_realloc=num_realloc, - coef=0, - neg_eig=neg_eig, - status=status) + _iter=self._iter, + reg_iter=reg_iter, + num_realloc=num_realloc, + coef=0, + neg_eig=neg_eig, + status=status, + ) reg_iter += 1 if status == LinearSolverStatus.singular: - constraint_reg_coef = (self.base_eq_reg_coef * - self._barrier_parameter**0.25) + constraint_reg_coef = ( + self.base_eq_reg_coef * self._barrier_parameter**0.25 + ) kkt = self.interface.regularize_equality_gradient( - kkt=kkt, - coef=constraint_reg_coef, - copy_kkt=False) + kkt=kkt, coef=constraint_reg_coef, copy_kkt=False + ) total_hess_reg_coef = self.hess_reg_coef last_hess_reg_coef = 0 - while (neg_eig != desired_n_neg_evals or - status == LinearSolverStatus.singular): + while ( + neg_eig != desired_n_neg_evals or status == LinearSolverStatus.singular + ): kkt = self.interface.regularize_hessian( - kkt=kkt, - coef=total_hess_reg_coef - last_hess_reg_coef, - copy_kkt=False) + kkt=kkt, + coef=total_hess_reg_coef - last_hess_reg_coef, + copy_kkt=False, + ) status, num_realloc = try_factorization_and_reallocation( - kkt=kkt, - linear_solver=self.linear_solver, - reallocation_factor=self.reallocation_factor, - max_iter=self.max_reallocation_iterations, - timer=timer) + kkt=kkt, + linear_solver=self.linear_solver, + reallocation_factor=self.reallocation_factor, + max_iter=self.max_reallocation_iterations, + timer=timer, + ) if status != LinearSolverStatus.successful: raise RuntimeError( - 'Could not factorize KKT system; linear solver status: ' - + str(status)) + 'Could not factorize KKT system; linear solver status: ' + + str(status) + ) neg_eig = self.linear_solver.get_inertia()[1] fact_con.log_info( - _iter=self._iter, - reg_iter=reg_iter, - num_realloc=num_realloc, - coef=total_hess_reg_coef, - neg_eig=neg_eig, - status=status) + _iter=self._iter, + reg_iter=reg_iter, + num_realloc=num_realloc, + coef=total_hess_reg_coef, + neg_eig=neg_eig, + status=status, + ) reg_iter += 1 if reg_iter > self.max_reg_iter: raise RuntimeError( - 'Exceeded maximum number of regularization iterations.') + 'Exceeded maximum number of regularization iterations.' + ) last_hess_reg_coef = total_hess_reg_coef total_hess_reg_coef *= self.reg_factor_increase @@ -435,7 +469,7 @@ def check_convergence(self, barrier, timer=None): ---------- barrier: float timer: HierarchicalTimer - + Returns ------- primal_inf: float @@ -448,8 +482,7 @@ def check_convergence(self, barrier, timer=None): interface = self.interface slacks = interface.get_slacks() timer.start('grad obj') - grad_obj = interface.get_obj_factor() * \ - interface.evaluate_grad_objective() + grad_obj = interface.get_obj_factor() * interface.evaluate_grad_objective() timer.stop('grad obj') timer.start('jac eq') jac_eq = interface.evaluate_jacobian_eq() @@ -492,9 +525,7 @@ def check_convergence(self, barrier, timer=None): grad_lag_primals += duals_primals_ub timer.stop('grad_lag_primals') timer.start('grad_lag_slacks') - grad_lag_slacks = (-duals_ineq - - duals_slacks_lb + - duals_slacks_ub) + grad_lag_slacks = -duals_ineq - duals_slacks_lb + duals_slacks_ub timer.stop('grad_lag_slacks') timer.start('bound resids') primals_lb_resid = (primals - primals_lb_mod) * duals_primals_lb - barrier @@ -516,14 +547,14 @@ def check_convergence(self, barrier, timer=None): else: max_ineq_resid = np.max(np.abs(ineq_resid)) primal_inf = max(max_eq_resid, max_ineq_resid) - + max_grad_lag_primals = np.max(np.abs(grad_lag_primals)) if grad_lag_slacks.size == 0: max_grad_lag_slacks = 0 else: max_grad_lag_slacks = np.max(np.abs(grad_lag_slacks)) dual_inf = max(max_grad_lag_primals, max_grad_lag_slacks) - + if primals_lb_resid.size == 0: max_primals_lb_resid = 0 else: @@ -540,18 +571,22 @@ def check_convergence(self, barrier, timer=None): max_slacks_ub_resid = 0 else: max_slacks_ub_resid = np.max(np.abs(slacks_ub_resid)) - complimentarity_inf = max(max_primals_lb_resid, max_primals_ub_resid, - max_slacks_lb_resid, max_slacks_ub_resid) - + complimentarity_inf = max( + max_primals_lb_resid, + max_primals_ub_resid, + max_slacks_lb_resid, + max_slacks_ub_resid, + ) + return primal_inf, dual_inf, complimentarity_inf def fraction_to_the_boundary(self): - return fraction_to_the_boundary(self.interface, - 1 - self._barrier_parameter) + return fraction_to_the_boundary(self.interface, 1 - self._barrier_parameter) -def try_factorization_and_reallocation(kkt, linear_solver, reallocation_factor, - max_iter, timer=None): +def try_factorization_and_reallocation( + kkt, linear_solver, reallocation_factor, max_iter, timer=None +): if timer is None: timer = HierarchicalTimer() @@ -566,15 +601,13 @@ def try_factorization_and_reallocation(kkt, linear_solver, reallocation_factor, (and ordering of row and column arrays) of the KKT matrix never changes. We have not had time to test this thoroughly, yet. """ - res = linear_solver.do_symbolic_factorization( - matrix=kkt, - raise_on_error=False) + res = linear_solver.do_symbolic_factorization(matrix=kkt, raise_on_error=False) timer.stop('symbolic') if res.status == LinearSolverStatus.successful: timer.start('numeric') res = linear_solver.do_numeric_factorization( - matrix=kkt, - raise_on_error=False) + matrix=kkt, raise_on_error=False + ) timer.stop('numeric') status = res.status if status == LinearSolverStatus.not_enough_memory: @@ -638,50 +671,48 @@ def fraction_to_the_boundary(interface, tau): ineq_ub = interface.ineq_ub() alpha_primal_max_a = _fraction_to_the_boundary_helper_lb( - tau=tau, - x=primals, - delta_x=delta_primals, - xl=primals_lb) + tau=tau, x=primals, delta_x=delta_primals, xl=primals_lb + ) alpha_primal_max_b = _fraction_to_the_boundary_helper_ub( - tau=tau, - x=primals, - delta_x=delta_primals, - xu=primals_ub) + tau=tau, x=primals, delta_x=delta_primals, xu=primals_ub + ) alpha_primal_max_c = _fraction_to_the_boundary_helper_lb( - tau=tau, - x=slacks, - delta_x=delta_slacks, - xl=ineq_lb) + tau=tau, x=slacks, delta_x=delta_slacks, xl=ineq_lb + ) alpha_primal_max_d = _fraction_to_the_boundary_helper_ub( - tau=tau, - x=slacks, - delta_x=delta_slacks, - xu=ineq_ub) - alpha_primal_max = min(alpha_primal_max_a, alpha_primal_max_b, - alpha_primal_max_c, alpha_primal_max_d) + tau=tau, x=slacks, delta_x=delta_slacks, xu=ineq_ub + ) + alpha_primal_max = min( + alpha_primal_max_a, alpha_primal_max_b, alpha_primal_max_c, alpha_primal_max_d + ) alpha_dual_max_a = _fraction_to_the_boundary_helper_lb( tau=tau, x=duals_primals_lb, delta_x=delta_duals_primals_lb, - xl=np.zeros(duals_primals_lb.size)) + xl=np.zeros(duals_primals_lb.size), + ) alpha_dual_max_b = _fraction_to_the_boundary_helper_lb( tau=tau, x=duals_primals_ub, delta_x=delta_duals_primals_ub, - xl=np.zeros(duals_primals_ub.size)) + xl=np.zeros(duals_primals_ub.size), + ) alpha_dual_max_c = _fraction_to_the_boundary_helper_lb( tau=tau, x=duals_slacks_lb, delta_x=delta_duals_slacks_lb, - xl=np.zeros(duals_slacks_lb.size)) + xl=np.zeros(duals_slacks_lb.size), + ) alpha_dual_max_d = _fraction_to_the_boundary_helper_lb( tau=tau, x=duals_slacks_ub, delta_x=delta_duals_slacks_ub, - xl=np.zeros(duals_slacks_ub.size)) - alpha_dual_max = min(alpha_dual_max_a, alpha_dual_max_b, - alpha_dual_max_c, alpha_dual_max_d) + xl=np.zeros(duals_slacks_ub.size), + ) + alpha_dual_max = min( + alpha_dual_max_a, alpha_dual_max_b, alpha_dual_max_c, alpha_dual_max_d + ) return alpha_primal_max, alpha_dual_max @@ -690,11 +721,13 @@ def process_init(x, lb, ub): if np.any((ub - lb) < 0): raise ValueError( 'Lower bounds for variables/inequalities should not be larger ' - 'than upper bounds.') + 'than upper bounds.' + ) if np.any((ub - lb) == 0): raise ValueError( 'Variables and inequalities should not have equal lower and upper ' - 'bounds.') + 'bounds.' + ) lb_mask = build_bounds_mask(lb) ub_mask = build_bounds_mask(ub) @@ -702,7 +735,7 @@ def process_init(x, lb, ub): lb_only = np.logical_and(lb_mask, np.logical_not(ub_mask)) ub_only = np.logical_and(ub_mask, np.logical_not(lb_mask)) lb_and_ub = np.logical_and(lb_mask, ub_mask) - out_of_bounds = ((x >= ub) + (x <= lb)) + out_of_bounds = (x >= ub) + (x <= lb) out_of_bounds_lb_only = np.logical_and(out_of_bounds, lb_only) out_of_bounds_ub_only = np.logical_and(out_of_bounds, ub_only) out_of_bounds_lb_and_ub = np.logical_and(out_of_bounds, lb_and_ub) diff --git a/pyomo/contrib/interior_point/inverse_reduced_hessian.py b/pyomo/contrib/interior_point/inverse_reduced_hessian.py index 787ab95090d..6144a4afeb8 100644 --- a/pyomo/contrib/interior_point/inverse_reduced_hessian.py +++ b/pyomo/contrib/interior_point/inverse_reduced_hessian.py @@ -15,13 +15,15 @@ from .interface import InteriorPointInterface from .linalg.scipy_interface import ScipyInterface -np, numpy_available = attempt_import('numpy', - 'Interior point requires numpy', - minimum_version='1.13.0') +np, numpy_available = attempt_import( + 'numpy', 'Interior point requires numpy', minimum_version='1.13.0' +) # Todo: This function currently used IPOPT for the initial solve - should accept solver -def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e-6, solver_options=None, tee=False): +def inv_reduced_hessian_barrier( + model, independent_variables, bound_tolerance=1e-6, solver_options=None, tee=False +): """ This function computes the inverse of the reduced Hessian of a problem at the solution. This function first solves the problem with Ipopt and then generates @@ -29,8 +31,8 @@ def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e For more information on the reduced Hessian, see "Numerical Optimization", 2nd Edition Nocedal and Wright, 2006. - - The approach used in this method can be found in, "Computational Strategies for + + The approach used in this method can be found in, "Computational Strategies for the Optimal Operation of Large-Scale Chemical Processes", Dissertation, V. Zavala 2008. See section 3.2.1. @@ -40,7 +42,7 @@ def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e The Pyomo model that we want to solve and analyze independent_variables : list of Pyomo variables This is the list of independent variables for computing the reduced hessian. - These variables must not be at their bounds at the solution of the + These variables must not be at their bounds at the solution of the optimization problem. bound_tolerance : float The tolerance to use when checking if the variables are too close to their bound. @@ -76,8 +78,8 @@ def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e solver.options[key] = solver_options[key] # set options to prevent bounds relaxation (and 0 slacks) - solver.options['bound_relax_factor']=0 - solver.options['honor_original_bounds']='no' + solver.options['bound_relax_factor'] = 0 + solver.options['honor_original_bounds'] = 'no' # solve the problem status = solver.solve(m, tee=tee) @@ -89,17 +91,19 @@ def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e estimated_mu = list() for v in m.ipopt_zL_out: if v.has_lb(): - estimated_mu.append((pyo.value(v) - v.lb)*m.ipopt_zL_out[v]) + estimated_mu.append((pyo.value(v) - v.lb) * m.ipopt_zL_out[v]) for v in m.ipopt_zU_out: if v.has_ub(): - estimated_mu.append((v.ub - pyo.value(v))*m.ipopt_zU_out[v]) + estimated_mu.append((v.ub - pyo.value(v)) * m.ipopt_zU_out[v]) if len(estimated_mu) == 0: mu = 10**-8.6 else: - mu = sum(estimated_mu)/len(estimated_mu) + mu = sum(estimated_mu) / len(estimated_mu) # check to make sure these estimates were all reasonable - if any([abs(mu-estmu) > 1e-7 for estmu in estimated_mu]): - print('Warning: estimated values of mu do not seem consistent - using mu=10^(-8.6)') + if any([abs(mu - estmu) > 1e-7 for estmu in estimated_mu]): + print( + 'Warning: estimated values of mu do not seem consistent - using mu=10^(-8.6)' + ) mu = 10**-8.6 # collect the list of var data objects for the independent variables @@ -113,12 +117,15 @@ def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e # check that none of the independent variables are at their bounds for v in ind_vardatas: - if (v.has_lb() and pyo.value(v) - v.lb <= bound_tolerance) or \ - (v.has_ub() and v.ub - pyo.value(v) <= bound_tolerance): - raise ValueError("Independent variable: {} has a solution value that is near" - " its bound (according to tolerance). The reduced hessian" - " computation does not support this at this time. All" - " independent variables should be in their interior.".format(v)) + if (v.has_lb() and pyo.value(v) - v.lb <= bound_tolerance) or ( + v.has_ub() and v.ub - pyo.value(v) <= bound_tolerance + ): + raise ValueError( + "Independent variable: {} has a solution value that is near" + " its bound (according to tolerance). The reduced hessian" + " computation does not support this at this time. All" + " independent variables should be in their interior.".format(v) + ) # find the list of indices that we need to make up the reduced hessian kkt_builder = InteriorPointInterface(m) @@ -135,12 +142,12 @@ def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e n_rh = len(ind_var_indices) rhs = np.zeros(kkt.shape[0]) inv_red_hess = np.zeros((n_rh, n_rh)) - + for rhi, vari in enumerate(ind_var_indices): rhs[vari] = 1 v, res = linear_solver.do_back_solve(rhs) rhs[vari] = 0 for rhj, varj in enumerate(ind_var_indices): - inv_red_hess[rhi,rhj] = v[varj] + inv_red_hess[rhi, rhj] = v[varj] return status, inv_red_hess diff --git a/pyomo/contrib/interior_point/linalg/ma27_interface.py b/pyomo/contrib/interior_point/linalg/ma27_interface.py index b6423a241fb..7bb98b0b6fd 100644 --- a/pyomo/contrib/interior_point/linalg/ma27_interface.py +++ b/pyomo/contrib/interior_point/linalg/ma27_interface.py @@ -11,33 +11,43 @@ class InteriorPointMA27Interface(MA27, IPLinearSolverInterface): def getLoggerName(cls): return 'ma27' - def __init__(self, cntl_options=None, icntl_options=None, iw_factor=1.2, a_factor=2): - super(InteriorPointMA27Interface, self).__init__(cntl_options=cntl_options, - icntl_options=icntl_options, - iw_factor=iw_factor, - a_factor=a_factor) + def __init__( + self, cntl_options=None, icntl_options=None, iw_factor=1.2, a_factor=2 + ): + super(InteriorPointMA27Interface, self).__init__( + cntl_options=cntl_options, + icntl_options=icntl_options, + iw_factor=iw_factor, + a_factor=a_factor, + ) self._num_status = None def do_symbolic_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: self._num_status = None - return super(InteriorPointMA27Interface, self).do_symbolic_factorization(matrix=matrix, - raise_on_error=raise_on_error) + return super(InteriorPointMA27Interface, self).do_symbolic_factorization( + matrix=matrix, raise_on_error=raise_on_error + ) def do_numeric_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: - res = super(InteriorPointMA27Interface, self).do_numeric_factorization(matrix=matrix, - raise_on_error=raise_on_error) + res = super(InteriorPointMA27Interface, self).do_numeric_factorization( + matrix=matrix, raise_on_error=raise_on_error + ) self._num_status = res.status return res def get_inertia(self): if self._num_status is None: - raise RuntimeError('Must call do_numeric_factorization before inertia can be computed') + raise RuntimeError( + 'Must call do_numeric_factorization before inertia can be computed' + ) if self._num_status != LinearSolverStatus.successful: - raise RuntimeError('Can only compute inertia if the numeric factorization was successful.') + raise RuntimeError( + 'Can only compute inertia if the numeric factorization was successful.' + ) num_negative_eigenvalues = self.get_info(15) num_positive_eigenvalues = self._dim - num_negative_eigenvalues return (num_positive_eigenvalues, num_negative_eigenvalues, 0) diff --git a/pyomo/contrib/interior_point/linalg/mumps_interface.py b/pyomo/contrib/interior_point/linalg/mumps_interface.py index 487db383398..98f0ef03210 100644 --- a/pyomo/contrib/interior_point/linalg/mumps_interface.py +++ b/pyomo/contrib/interior_point/linalg/mumps_interface.py @@ -17,14 +17,17 @@ from pyomo.contrib.pynumero.sparse import BlockVector import numpy as np -mumps, mumps_available = attempt_import(name='pyomo.contrib.pynumero.linalg.mumps_interface', - error_message='pymumps is required to use the MumpsInterface') +mumps, mumps_available = attempt_import( + name='pyomo.contrib.pynumero.linalg.mumps_interface', + error_message='pymumps is required to use the MumpsInterface', +) -from pyomo.contrib.pynumero.linalg.mumps_interface import MumpsCentralizedAssembledLinearSolver +from pyomo.contrib.pynumero.linalg.mumps_interface import ( + MumpsCentralizedAssembledLinearSolver, +) class MumpsInterface(MumpsCentralizedAssembledLinearSolver, IPLinearSolverInterface): - @classmethod def getLoggerName(cls): return 'mumps' @@ -39,8 +42,13 @@ def __init__(self, par=1, comm=None, cntl_options=None, icntl_options=None): if 24 not in icntl_options: icntl_options[24] = 0 - super(MumpsInterface, self).__init__(sym=2, par=par, comm=comm, cntl_options=cntl_options, - icntl_options=icntl_options) + super(MumpsInterface, self).__init__( + sym=2, + par=par, + comm=comm, + cntl_options=cntl_options, + icntl_options=icntl_options, + ) self.error_level = self.get_icntl(11) self.log_error = bool(self.error_level) @@ -48,17 +56,20 @@ def __init__(self, par=1, comm=None, cntl_options=None, icntl_options=None): self.log_header(include_error=self.log_error) def do_back_solve( - self, rhs: Union[np.ndarray, BlockVector], - raise_on_error: bool = True + self, rhs: Union[np.ndarray, BlockVector], raise_on_error: bool = True ) -> Tuple[Optional[Union[np.ndarray, BlockVector]], LinearSolverResults]: - res, status = super(MumpsInterface, self).do_back_solve(rhs, raise_on_error=raise_on_error) + res, status = super(MumpsInterface, self).do_back_solve( + rhs, raise_on_error=raise_on_error + ) self.log_info() return res, status def get_inertia(self): num_negative_eigenvalues = self.get_infog(12) num_zero_eigenvalues = self.get_infog(28) - num_positive_eigenvalues = self._dim - num_negative_eigenvalues - num_zero_eigenvalues + num_positive_eigenvalues = ( + self._dim - num_negative_eigenvalues - num_zero_eigenvalues + ) return num_positive_eigenvalues, num_negative_eigenvalues, num_zero_eigenvalues def get_error_info(self): @@ -108,8 +119,8 @@ def log_info(self): # Which fields to log should be specified at the instance level # Any logging that should be done on an iteration-specific case # should be handled by the IP solver - fields=[] - fields.append(self.get_infog(1)) # Status, 0 for success + fields = [] + fields.append(self.get_infog(1)) # Status, 0 for success fields.append(self.get_infog(28)) # Number of null pivots fields.append(self.get_infog(12)) # Number of negative pivots @@ -125,9 +136,8 @@ def log_info(self): log_string += '{1:<10}' log_string += '{2:<10}' - # Allocate 15 spsaces for the rest, which I assume are floats + # Allocate 15 spaces for the rest, which I assume are floats for i in range(4, len(fields)): log_string += '{' + str(i) + ':<15.3e}' self.logger.info(log_string.format(*fields)) - diff --git a/pyomo/contrib/interior_point/linalg/scipy_interface.py b/pyomo/contrib/interior_point/linalg/scipy_interface.py index 2f9de7803ff..b7b7923bad4 100644 --- a/pyomo/contrib/interior_point/linalg/scipy_interface.py +++ b/pyomo/contrib/interior_point/linalg/scipy_interface.py @@ -25,8 +25,9 @@ def __init__(self, compute_inertia=False): def do_numeric_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: - res = super(ScipyInterface, self).do_numeric_factorization(matrix=matrix, - raise_on_error=raise_on_error) + res = super(ScipyInterface, self).do_numeric_factorization( + matrix=matrix, raise_on_error=raise_on_error + ) if self.compute_inertia: eig = eigvals(matrix.toarray()) @@ -39,5 +40,7 @@ def do_numeric_factorization( def get_inertia(self): if self._inertia is None: - raise RuntimeError('The intertia was not computed during do_numeric_factorization. Set compute_inertia to True.') + raise RuntimeError( + 'The inertia was not computed during do_numeric_factorization. Set compute_inertia to True.' + ) return self._inertia diff --git a/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py b/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py index 573aa45e11c..35863aa7cf7 100644 --- a/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py +++ b/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py @@ -1,5 +1,6 @@ import pyomo.common.unittest as unittest from pyomo.common.dependencies import attempt_import + np, np_available = attempt_import('numpy', minimum_version='1.13.0') scipy, scipy_available = attempt_import('scipy.sparse') mumps, mumps_available = attempt_import('mumps') @@ -9,14 +10,19 @@ from scipy.sparse import coo_matrix, tril from pyomo.contrib import interior_point as ip from pyomo.contrib.pynumero.linalg.base import LinearSolverStatus + if scipy_available: from pyomo.contrib.interior_point.linalg.scipy_interface import ScipyInterface if mumps_available: from pyomo.contrib.interior_point.linalg.mumps_interface import MumpsInterface from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface + ma27_available = MA27Interface.available() if ma27_available: - from pyomo.contrib.interior_point.linalg.ma27_interface import InteriorPointMA27Interface + from pyomo.contrib.interior_point.linalg.ma27_interface import ( + InteriorPointMA27Interface, + ) + def get_base_matrix(use_tril): if use_tril: @@ -27,7 +33,7 @@ def get_base_matrix(use_tril): row = [0, 0, 0, 1, 1, 2, 2] col = [0, 1, 2, 0, 1, 0, 2] data = [1, 7, 3, 7, 4, 3, 6] - mat = coo_matrix((data, (row, col)), shape=(3,3), dtype=np.double) + mat = coo_matrix((data, (row, col)), shape=(3, 3), dtype=np.double) return mat @@ -40,7 +46,7 @@ def get_base_matrix_wrong_order(use_tril): row = [1, 0, 0, 0, 1, 2, 2] col = [0, 1, 2, 0, 1, 0, 2] data = [7, 7, 3, 1, 4, 3, 6] - mat = coo_matrix((data, (row, col)), shape=(3,3), dtype=np.double) + mat = coo_matrix((data, (row, col)), shape=(3, 3), dtype=np.double) return mat @@ -50,6 +56,7 @@ class TestTrilBehavior(unittest.TestCase): the behavior of tril that is tested in this test, namely the tests in TestWrongNonzeroOrdering. """ + def test_tril_behavior(self): mat = get_base_matrix(use_tril=True) mat2 = tril(mat) @@ -84,12 +91,16 @@ def _test_linear_solvers(self, solver): x, res = solver.do_back_solve(rhs) self.assertTrue(np.allclose(x, x_true)) - @unittest.skipIf(not scipy_available, 'scipy is needed for interior point scipy tests') + @unittest.skipIf( + not scipy_available, 'scipy is needed for interior point scipy tests' + ) def test_scipy(self): solver = ScipyInterface() self._test_linear_solvers(solver) - @unittest.skipIf(not mumps_available, 'mumps is needed for interior point mumps tests') + @unittest.skipIf( + not mumps_available, 'mumps is needed for interior point mumps tests' + ) def test_mumps(self): solver = MumpsInterface() self._test_linear_solvers(solver) @@ -112,12 +123,16 @@ def _test_solvers(self, solver, use_tril): x, res = solver.do_back_solve(rhs) self.assertTrue(np.allclose(x, x_true)) - @unittest.skipIf(not scipy_available, 'scipy is needed for interior point scipy tests') + @unittest.skipIf( + not scipy_available, 'scipy is needed for interior point scipy tests' + ) def test_scipy(self): solver = ScipyInterface() self._test_solvers(solver, use_tril=False) - @unittest.skipIf(not mumps_available, 'mumps is needed for interior point mumps tests') + @unittest.skipIf( + not mumps_available, 'mumps is needed for interior point mumps tests' + ) def test_mumps(self): solver = MumpsInterface() self._test_solvers(solver, use_tril=True) diff --git a/pyomo/contrib/interior_point/linalg/tests/test_realloc.py b/pyomo/contrib/interior_point/linalg/tests/test_realloc.py index 26a12e43a47..0b2e449e349 100644 --- a/pyomo/contrib/interior_point/linalg/tests/test_realloc.py +++ b/pyomo/contrib/interior_point/linalg/tests/test_realloc.py @@ -1,21 +1,24 @@ import pyomo.common.unittest as unittest from pyomo.common.dependencies import attempt_import -np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', - minimum_version='1.13.0') + +np, numpy_available = attempt_import( + 'numpy', 'Interior point requires numpy', minimum_version='1.13.0' +) scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy') mumps, mumps_available = attempt_import('mumps') if not (numpy_available and scipy_available): raise unittest.SkipTest('Interior point tests require numpy and scipy') from scipy.sparse import coo_matrix import pyomo.contrib.interior_point as ip + if mumps_available: from pyomo.contrib.interior_point.linalg.mumps_interface import MumpsInterface from pyomo.contrib.pynumero.linalg.base import LinearSolverStatus + @unittest.skipIf(not mumps_available, 'mumps is not available') class TestReallocation(unittest.TestCase): def test_reallocate_memory_mumps(self): - # Create a tri-diagonal matrix with small entries on the diagonal n = 10000 small_val = 1e-7 @@ -23,18 +26,18 @@ def test_reallocate_memory_mumps(self): irn = [] jcn = [] ent = [] - for i in range(n-1): - irn.extend([i+1, i, i]) - jcn.extend([i, i, i+1]) - ent.extend([big_val,small_val,big_val]) - irn.append(n-1) - jcn.append(n-1) + for i in range(n - 1): + irn.extend([i + 1, i, i]) + jcn.extend([i, i, i + 1]) + ent.extend([big_val, small_val, big_val]) + irn.append(n - 1) + jcn.append(n - 1) ent.append(small_val) irn = np.array(irn) jcn = np.array(jcn) ent = np.array(ent) - matrix = coo_matrix((ent, (irn, jcn)), shape=(n,n)) + matrix = coo_matrix((ent, (irn, jcn)), shape=(n, n)) linear_solver = MumpsInterface() linear_solver.do_symbolic_factorization(matrix) @@ -53,7 +56,7 @@ def test_reallocate_memory_mumps(self): self.assertEqual(res.status, LinearSolverStatus.successful) # Expected memory allocation (MB) - self.assertEqual(linear_solver._prev_allocation, 2*predicted) + self.assertEqual(linear_solver._prev_allocation, 2 * predicted) actual = linear_solver.get_infog(18) diff --git a/pyomo/contrib/interior_point/tests/test_interior_point.py b/pyomo/contrib/interior_point/tests/test_interior_point.py index 01e006a60e4..bff80934d20 100644 --- a/pyomo/contrib/interior_point/tests/test_interior_point.py +++ b/pyomo/contrib/interior_point/tests/test_interior_point.py @@ -13,7 +13,9 @@ import pyomo.environ as pyo from pyomo.common.dependencies import attempt_import -np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', minimum_version='1.13.0') +np, numpy_available = attempt_import( + 'numpy', 'Interior point requires numpy', minimum_version='1.13.0' +) scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy') mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps') if not (numpy_available and scipy_available): @@ -28,21 +30,28 @@ import numpy as np from pyomo.contrib.pynumero.asl import AmplInterface + asl_available = AmplInterface.available() -from pyomo.contrib.interior_point.interior_point import (process_init, - process_init_duals_lb, - process_init_duals_ub, - _fraction_to_the_boundary_helper_lb, - _fraction_to_the_boundary_helper_ub, - InteriorPointStatus, - InteriorPointSolver) +from pyomo.contrib.interior_point.interior_point import ( + process_init, + process_init_duals_lb, + process_init_duals_ub, + _fraction_to_the_boundary_helper_lb, + _fraction_to_the_boundary_helper_ub, + InteriorPointStatus, + InteriorPointSolver, +) from pyomo.contrib.interior_point.interface import InteriorPointInterface from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface + ma27_available = MA27Interface.available() if ma27_available: - from pyomo.contrib.interior_point.linalg.ma27_interface import InteriorPointMA27Interface + from pyomo.contrib.interior_point.linalg.ma27_interface import ( + InteriorPointMA27Interface, + ) + @unittest.skipIf(not asl_available, 'asl is not available') class TestSolveInteriorPoint(unittest.TestCase): @@ -52,7 +61,7 @@ def _test_solve_interior_point_1(self, linear_solver): m.y = pyo.Var() m.obj = pyo.Objective(expr=m.x**2 + m.y**2) m.c1 = pyo.Constraint(expr=m.y == pyo.exp(m.x)) - m.c2 = pyo.Constraint(expr=m.y >= (m.x - 1)**2) + m.c2 = pyo.Constraint(expr=m.y >= (m.x - 1) ** 2) interface = InteriorPointInterface(m) ip_solver = InteriorPointSolver(linear_solver) status = ip_solver.solve(interface) @@ -62,8 +71,8 @@ def _test_solve_interior_point_1(self, linear_solver): duals_ineq = interface.get_duals_ineq() self.assertAlmostEqual(x[0], 0) self.assertAlmostEqual(x[1], 1) - self.assertAlmostEqual(duals_eq[0], -1-1.0/3.0) - self.assertAlmostEqual(duals_ineq[0], 2.0/3.0) + self.assertAlmostEqual(duals_eq[0], -1 - 1.0 / 3.0) + self.assertAlmostEqual(duals_ineq[0], 2.0 / 3.0) interface.load_primals_into_pyomo_model() self.assertAlmostEqual(m.x.value, 0) self.assertAlmostEqual(m.y.value, 1) @@ -114,26 +123,26 @@ def test_ip2_ma27(self): class TestProcessInit(unittest.TestCase): def testprocess_init(self): - lb = np.array([-np.inf, -np.inf, -2, -2], dtype=np.double) - ub = np.array([ np.inf, 2, np.inf, 2], dtype=np.double) + lb = np.array([-np.inf, -np.inf, -2, -2], dtype=np.double) + ub = np.array([np.inf, 2, np.inf, 2], dtype=np.double) - x = np.array([ 0, 0, 0, 0], dtype=np.double) + x = np.array([0, 0, 0, 0], dtype=np.double) process_init(x, lb, ub) self.assertTrue(np.allclose(x, np.array([0, 0, 0, 0], dtype=np.double))) - x = np.array([ -2, -2, -2, -2], dtype=np.double) + x = np.array([-2, -2, -2, -2], dtype=np.double) process_init(x, lb, ub) self.assertTrue(np.allclose(x, np.array([-2, -2, -1, 0], dtype=np.double))) - x = np.array([ -3, -3, -3, -3], dtype=np.double) + x = np.array([-3, -3, -3, -3], dtype=np.double) process_init(x, lb, ub) self.assertTrue(np.allclose(x, np.array([-3, -3, -1, 0], dtype=np.double))) - x = np.array([ 2, 2, 2, 2], dtype=np.double) + x = np.array([2, 2, 2, 2], dtype=np.double) process_init(x, lb, ub) self.assertTrue(np.allclose(x, np.array([2, 1, 2, 0], dtype=np.double))) - x = np.array([ 3, 3, 3, 3], dtype=np.double) + x = np.array([3, 3, 3, 3], dtype=np.double) process_init(x, lb, ub) self.assertTrue(np.allclose(x, np.array([3, 1, 3, 0], dtype=np.double))) diff --git a/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py b/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py index 6a81eb7373b..67657dfce47 100644 --- a/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py +++ b/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py @@ -14,19 +14,25 @@ from pyomo.opt import check_optimal_termination from pyomo.common.dependencies import attempt_import -np, numpy_available = attempt_import('numpy', 'inverse_reduced_hessian numpy', - minimum_version='1.13.0') -scipy, scipy_available = attempt_import('scipy', 'inverse_reduced_hessian requires scipy') +np, numpy_available = attempt_import( + 'numpy', 'inverse_reduced_hessian numpy', minimum_version='1.13.0' +) +scipy, scipy_available = attempt_import( + 'scipy', 'inverse_reduced_hessian requires scipy' +) if numpy_available: from pyomo.contrib.pynumero.asl import AmplInterface + asl_available = AmplInterface.available() else: - asl_available=False + asl_available = False if not (numpy_available and scipy_available and asl_available): - raise unittest.SkipTest('inverse_reduced_hessian tests require numpy, scipy, and asl') -from pyomo.common.dependencies import (pandas as pd, pandas_available) + raise unittest.SkipTest( + 'inverse_reduced_hessian tests require numpy, scipy, and asl' + ) +from pyomo.common.dependencies import pandas as pd, pandas_available ipopt_solver = pyo.SolverFactory('ipopt') if not ipopt_solver.available(exception_flag=False): @@ -38,79 +44,98 @@ except: numdiff_available = False -from pyomo.contrib.interior_point.inverse_reduced_hessian import inv_reduced_hessian_barrier - +from pyomo.contrib.interior_point.inverse_reduced_hessian import ( + inv_reduced_hessian_barrier, +) + + class TestInverseReducedHessian(unittest.TestCase): # the original test def test_invrh_zavala_thesis(self): m = pyo.ConcreteModel() - m.x = pyo.Var([1,2,3]) - m.obj = pyo.Objective(expr=(m.x[1]-1)**2 + (m.x[2]-2)**2 + (m.x[3]-3)**2) - m.c1 = pyo.Constraint(expr=m.x[1] + 2*m.x[2] + 3*m.x[3]==0) + m.x = pyo.Var([1, 2, 3]) + m.obj = pyo.Objective( + expr=(m.x[1] - 1) ** 2 + (m.x[2] - 2) ** 2 + (m.x[3] - 3) ** 2 + ) + m.c1 = pyo.Constraint(expr=m.x[1] + 2 * m.x[2] + 3 * m.x[3] == 0) status, invrh = inv_reduced_hessian_barrier(m, [m.x[2], m.x[3]]) - expected_invrh = np.asarray([[ 0.35714286, -0.21428571], - [-0.21428571, 0.17857143]]) + expected_invrh = np.asarray( + [[0.35714286, -0.21428571], [-0.21428571, 0.17857143]] + ) np.testing.assert_array_almost_equal(invrh, expected_invrh) # test by DLW, April 2020 def _simple_model(self, add_constraint=False): # Hardwired to have two x columns and one y # if add_constraint is true, there is a binding constraint on b0 - data = pd.DataFrame([[1, 1.1, 0.365759306], - [2, 1.2, 4], - [3, 1.3, 4.8876684], - [4, 1.4, 5.173455561], - [5, 1.5, 2.093799081], - [6, 1.6, 9], - [7, 1.7, 6.475045106], - [8, 1.8, 8.127111268], - [9, 1.9, 6], - [10, 1.21, 10.20642714], - [11, 1.22, 13.08211636], - [12, 1.23, 10], - [13, 1.24, 15.38766047], - [14, 1.25, 14.6587746], - [15, 1.26, 13.68608604], - [16, 1.27, 14.70707893], - [17, 1.28, 18.46192779], - [18, 1.29, 15.60649164]], - columns=['tofu','chard', 'y']) + data = pd.DataFrame( + [ + [1, 1.1, 0.365759306], + [2, 1.2, 4], + [3, 1.3, 4.8876684], + [4, 1.4, 5.173455561], + [5, 1.5, 2.093799081], + [6, 1.6, 9], + [7, 1.7, 6.475045106], + [8, 1.8, 8.127111268], + [9, 1.9, 6], + [10, 1.21, 10.20642714], + [11, 1.22, 13.08211636], + [12, 1.23, 10], + [13, 1.24, 15.38766047], + [14, 1.25, 14.6587746], + [15, 1.26, 13.68608604], + [16, 1.27, 14.70707893], + [17, 1.28, 18.46192779], + [18, 1.29, 15.60649164], + ], + columns=['tofu', 'chard', 'y'], + ) model = pyo.ConcreteModel() - model.b0 = pyo.Var(initialize = 0) + model.b0 = pyo.Var(initialize=0) model.bindexes = pyo.Set(initialize=['tofu', 'chard']) - model.b = pyo.Var(model.bindexes, initialize = 1) + model.b = pyo.Var(model.bindexes, initialize=1) # try to make trouble if add_constraint: - model.binding_constraint = pyo.Constraint(expr=model.b0>=10) + model.binding_constraint = pyo.Constraint(expr=model.b0 >= 10) # The columns need to have unique values (or you get warnings) def response_rule(m, t, c): - expr = m.b0 + m.b['tofu']*t + m.b['chard']*c + expr = m.b0 + m.b['tofu'] * t + m.b['chard'] * c return expr - model.response_function = pyo.Expression(data.tofu, data.chard, rule = response_rule) + + model.response_function = pyo.Expression( + data.tofu, data.chard, rule=response_rule + ) def SSE_rule(m): - return sum((data.y[i] - m.response_function[data.tofu[i], data.chard[i]])**2\ - for i in data.index) - model.SSE = pyo.Objective(rule = SSE_rule, sense=pyo.minimize) + return sum( + (data.y[i] - m.response_function[data.tofu[i], data.chard[i]]) ** 2 + for i in data.index + ) + + model.SSE = pyo.Objective(rule=SSE_rule, sense=pyo.minimize) return model @unittest.skipIf(not numdiff_available, "numdifftools missing") @unittest.skipIf(not pandas_available, "pandas missing") def test_3x3_using_linear_regression(self): - """ simple linear regression with two x columns, so 3x3 Hessian""" + """simple linear regression with two x columns, so 3x3 Hessian""" model = self._simple_model() solver = pyo.SolverFactory("ipopt") status = solver.solve(model) self.assertTrue(check_optimal_termination(status)) - tstar = [pyo.value(model.b0), - pyo.value(model.b['tofu']), pyo.value(model.b['chard'])] + tstar = [ + pyo.value(model.b0), + pyo.value(model.b['tofu']), + pyo.value(model.b['chard']), + ] def _ndwrap(x): # wrapper for numdiff call @@ -126,25 +151,22 @@ def _ndwrap(x): model.b0.fixed = False model.b["tofu"].fixed = False model.b["chard"].fixed = False - status, H_inv_red_hess = inv_reduced_hessian_barrier(model, - [model.b0, - model.b["tofu"], - model.b["chard"]]) + status, H_inv_red_hess = inv_reduced_hessian_barrier( + model, [model.b0, model.b["tofu"], model.b["chard"]] + ) # this passes at decimal=6, BTW np.testing.assert_array_almost_equal(HInv, H_inv_red_hess, decimal=3) - @unittest.skipIf(not numdiff_available, "numdifftools missing") @unittest.skipIf(not pandas_available, "pandas missing") def test_with_binding_constraint(self): - """ there is a binding constraint""" + """there is a binding constraint""" model = self._simple_model(add_constraint=True) - status, H_inv_red_hess = inv_reduced_hessian_barrier(model, - [model.b0, - model.b["tofu"], - model.b["chard"]]) + status, H_inv_red_hess = inv_reduced_hessian_barrier( + model, [model.b0, model.b["tofu"], model.b["chard"]] + ) print("test_with_binding_constraint should see an error raised.") diff --git a/pyomo/contrib/interior_point/tests/test_realloc.py b/pyomo/contrib/interior_point/tests/test_realloc.py index 5a1b388e9b1..9789c7d3ac0 100644 --- a/pyomo/contrib/interior_point/tests/test_realloc.py +++ b/pyomo/contrib/interior_point/tests/test_realloc.py @@ -3,8 +3,9 @@ from pyomo.core.base import ConcreteModel, Var, Constraint, Objective from pyomo.common.dependencies import attempt_import -np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', - minimum_version='1.13.0') +np, numpy_available = attempt_import( + 'numpy', 'Interior point requires numpy', minimum_version='1.13.0' +) scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy') mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps') @@ -12,6 +13,7 @@ raise unittest.SkipTest('Interior point tests require numpy and scipy') from pyomo.contrib.pynumero.asl import AmplInterface + asl_available = AmplInterface.available() if not asl_available: raise unittest.SkipTest('Regularization tests require ASL') @@ -25,24 +27,27 @@ def make_model_tri(n, small_val=1e-7, big_val=1e2): m.x = Var(range(n), initialize=0.5) def c_rule(m, i): - return big_val*m.x[i-1] + small_val*m.x[i] + big_val*m.x[i+1] == 1 - - m.c = Constraint(range(1,n-1), rule=c_rule) + return big_val * m.x[i - 1] + small_val * m.x[i] + big_val * m.x[i + 1] == 1 + + m.c = Constraint(range(1, n - 1), rule=c_rule) - m.obj = Objective(expr=small_val*sum((m.x[i]-1)**2 for i in range(n))) + m.obj = Objective(expr=small_val * sum((m.x[i] - 1) ** 2 for i in range(n))) return m + class TestReallocation(unittest.TestCase): def _test_ip_with_reallocation(self, linear_solver, interface): - ip_solver = InteriorPointSolver(linear_solver, - max_reallocation_iterations=3, - reallocation_factor=1.1, - # The small factor is to ensure that multiple iterations of - # reallocation are performed. The bug in the previous - # implementation only occurred if 2+ reallocation iterations - # were needed (max_reallocation_iterations >= 3). - max_iter=1) + ip_solver = InteriorPointSolver( + linear_solver, + max_reallocation_iterations=3, + reallocation_factor=1.1, + # The small factor is to ensure that multiple iterations of + # reallocation are performed. The bug in the previous + # implementation only occurred if 2+ reallocation iterations + # were needed (max_reallocation_iterations >= 3). + max_iter=1, + ) ip_solver.set_interface(interface) ip_solver.solve(interface) @@ -67,7 +72,7 @@ def test_mumps(self): self.assertTrue(predicted == 12 or predicted == 11) self.assertTrue(actual > predicted) - #self.assertEqual(actual, 14) + # self.assertEqual(actual, 14) # NOTE: This test will break if Mumps (or your Mumps version) # gets more conservative at estimating memory requirement, # or if the numeric factorization gets more efficient. diff --git a/pyomo/contrib/interior_point/tests/test_reg.py b/pyomo/contrib/interior_point/tests/test_reg.py index 202916a3bfa..b37d9532428 100644 --- a/pyomo/contrib/interior_point/tests/test_reg.py +++ b/pyomo/contrib/interior_point/tests/test_reg.py @@ -14,8 +14,9 @@ from pyomo.core.base import ConcreteModel, Var, Constraint, Objective from pyomo.common.dependencies import attempt_import -np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', - minimum_version='1.13.0') +np, numpy_available = attempt_import( + 'numpy', 'Interior point requires numpy', minimum_version='1.13.0' +) scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy') mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps') if not (numpy_available and scipy_available): @@ -26,31 +27,40 @@ from pyomo.contrib.interior_point.linalg.mumps_interface import MumpsInterface from pyomo.contrib.pynumero.asl import AmplInterface + asl_available = AmplInterface.available() if not asl_available: raise unittest.SkipTest('Regularization tests require ASL') -from pyomo.contrib.interior_point.interior_point import InteriorPointSolver, InteriorPointStatus +from pyomo.contrib.interior_point.interior_point import ( + InteriorPointSolver, + InteriorPointStatus, +) from pyomo.contrib.interior_point.interface import InteriorPointInterface from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface + ma27_available = MA27Interface.available() if ma27_available: - from pyomo.contrib.interior_point.linalg.ma27_interface import InteriorPointMA27Interface + from pyomo.contrib.interior_point.linalg.ma27_interface import ( + InteriorPointMA27Interface, + ) + def make_model(): m = ConcreteModel() - m.x = Var([1,2,3], initialize=0) - m.f = Var([1,2,3], initialize=0) + m.x = Var([1, 2, 3], initialize=0) + m.f = Var([1, 2, 3], initialize=0) m.F = Var(initialize=0) m.f[1].fix(1) m.f[2].fix(2) - m.sum_con = Constraint(expr= - (1 == m.x[1] + m.x[2] + m.x[3])) + m.sum_con = Constraint(expr=(1 == m.x[1] + m.x[2] + m.x[3])) + def bilin_rule(m, i): - return m.F*m.x[i] == m.f[i] - m.bilin_con = Constraint([1,2,3], rule=bilin_rule) + return m.F * m.x[i] == m.f[i] + + m.bilin_con = Constraint([1, 2, 3], rule=bilin_rule) m.obj = Objective(expr=m.F**2) @@ -82,8 +92,10 @@ def _test_regularization(self, linear_solver): # Expected regularization coefficient: self.assertAlmostEqual(reg_coef, 1e-4) - desired_n_neg_evals = (ip_solver.interface.n_eq_constraints() + - ip_solver.interface.n_ineq_constraints()) + desired_n_neg_evals = ( + ip_solver.interface.n_eq_constraints() + + ip_solver.interface.n_ineq_constraints() + ) # Expected inertia: n_pos_evals, n_neg_evals, n_null_evals = linear_solver.get_inertia() @@ -138,4 +150,3 @@ def test_ma27_2(self): # test_reg = TestRegularization() # test_reg.test_regularize_mumps() # test_reg.test_regularize_scipy() - diff --git a/pyomo/contrib/mcpp/build.py b/pyomo/contrib/mcpp/build.py index c90fb1a2040..95246e5278e 100644 --- a/pyomo/contrib/mcpp/build.py +++ b/pyomo/contrib/mcpp/build.py @@ -18,6 +18,7 @@ from pyomo.common.fileutils import this_file_dir, find_dir from pyomo.common.download import FileDownloader + def _generate_configuration(): # defer the import until use (this eventually imports pkg_resources, # which is slow to import) @@ -25,27 +26,23 @@ def _generate_configuration(): # Try and find MC++. Defer to the MCPP_ROOT if it is set; # otherwise, look in common locations for a mcpp directory. - pathlist=[ - os.path.join(envvar.PYOMO_CONFIG_DIR, 'src'), - this_file_dir(), - ] + pathlist = [os.path.join(envvar.PYOMO_CONFIG_DIR, 'src'), this_file_dir()] if 'MCPP_ROOT' in os.environ: mcpp = os.environ['MCPP_ROOT'] else: mcpp = find_dir('mcpp', cwd=True, pathlist=pathlist) if mcpp: - print("Found MC++ at %s" % ( mcpp, )) + print("Found MC++ at %s" % (mcpp,)) else: raise RuntimeError( - "Cannot identify the location of the MCPP source distribution") + "Cannot identify the location of the MCPP source distribution" + ) # # Configuration for this extension # project_dir = this_file_dir() - sources = [ - os.path.join(project_dir, 'mcppInterface.cpp'), - ] + sources = [os.path.join(project_dir, 'mcppInterface.cpp')] include_dirs = [ os.path.join(mcpp, 'src', 'mc'), os.path.join(mcpp, 'src', '3rdparty', 'fadbad++'), @@ -60,12 +57,8 @@ def _generate_configuration(): library_dirs=[], libraries=[], ) - - package_config = { - 'name': 'mcpp', - 'packages': [], - 'ext_modules': [mcpp_ext], - } + + package_config = {'name': 'mcpp', 'packages': [], 'ext_modules': [mcpp_ext]} return package_config @@ -83,9 +76,12 @@ class _BuildWithoutPlatformInfo(build_ext, object): # Python 2.7, so we will add an explicit inheritance from object so # that super() works. def get_ext_filename(self, ext_name): - filename = super(_BuildWithoutPlatformInfo, self).get_ext_filename( - ext_name).split('.') - filename = '.'.join([filename[0],filename[-1]]) + filename = ( + super(_BuildWithoutPlatformInfo, self) + .get_ext_filename(ext_name) + .split('.') + ) + filename = '.'.join([filename[0], filename[-1]]) return filename print("\n**** Building MCPP library ****") @@ -97,14 +93,15 @@ def get_ext_filename(self, ext_name): try: basedir = os.path.abspath(os.path.curdir) tmpdir = os.path.abspath(tempfile.mkdtemp()) - print(" tmpdir = %s" % ( tmpdir, )) + print(" tmpdir = %s" % (tmpdir,)) os.chdir(tmpdir) dist.run_command('install_lib') - print("Installed mcppInterface to %s" % ( install_dir, )) + print("Installed mcppInterface to %s" % (install_dir,)) finally: os.chdir(basedir) shutil.rmtree(tmpdir) + class MCPPBuilder(object): def __call__(self, parallel): return build_mcpp() @@ -115,4 +112,3 @@ def skip(self): if __name__ == "__main__": build_mcpp() - diff --git a/pyomo/contrib/mcpp/getMCPP.py b/pyomo/contrib/mcpp/getMCPP.py index b2292de55a3..caf9566df64 100644 --- a/pyomo/contrib/mcpp/getMCPP.py +++ b/pyomo/contrib/mcpp/getMCPP.py @@ -22,16 +22,20 @@ def get_mcpp(downloader): downloader.set_destination_filename(os.path.join('src', 'mcpp')) - logger.info("Fetching MC++ from %s and installing it to %s" - % (url, downloader.destination())) + logger.info( + "Fetching MC++ from %s and installing it to %s" + % (url, downloader.destination()) + ) downloader.get_zip_archive(url, dirOffset=1) + def main(argv): downloader = FileDownloader() downloader.parse_args(argv) get_mcpp(downloader) + if __name__ == '__main__': logger.setLevel(logging.INFO) try: @@ -41,4 +45,3 @@ def main(argv): print("Usage: %s [--insecure] [target]" % os.path.basename(sys.argv[0])) raise sys.exit(1) - diff --git a/pyomo/contrib/mcpp/plugins.py b/pyomo/contrib/mcpp/plugins.py index 3ad933d3f62..eed8874b1e7 100644 --- a/pyomo/contrib/mcpp/plugins.py +++ b/pyomo/contrib/mcpp/plugins.py @@ -14,7 +14,7 @@ from .getMCPP import get_mcpp from .build import MCPPBuilder + def load(): DownloadFactory.register('mcpp')(get_mcpp) ExtensionBuilderFactory.register('mcpp')(MCPPBuilder) - diff --git a/pyomo/contrib/mcpp/pyomo_mcpp.py b/pyomo/contrib/mcpp/pyomo_mcpp.py index 60771ca9393..bfd4b80edc3 100644 --- a/pyomo/contrib/mcpp/pyomo_mcpp.py +++ b/pyomo/contrib/mcpp/pyomo_mcpp.py @@ -23,15 +23,24 @@ from pyomo.core.base.expression import _ExpressionData from pyomo.core.expr.numvalue import nonpyomo_leaf_types from pyomo.core.expr.numeric_expr import ( - AbsExpression, LinearExpression, NegationExpression, NPV_AbsExpression, - NPV_ExternalFunctionExpression, NPV_NegationExpression, NPV_PowExpression, - NPV_ProductExpression, NPV_SumExpression, NPV_UnaryFunctionExpression, - PowExpression, ProductExpression, SumExpression, - UnaryFunctionExpression, NPV_DivisionExpression, DivisionExpression, -) -from pyomo.core.expr.visitor import ( - StreamBasedExpressionVisitor, identify_variables, + AbsExpression, + LinearExpression, + NegationExpression, + NPV_AbsExpression, + NPV_ExternalFunctionExpression, + NPV_NegationExpression, + NPV_PowExpression, + NPV_ProductExpression, + NPV_SumExpression, + NPV_UnaryFunctionExpression, + PowExpression, + ProductExpression, + SumExpression, + UnaryFunctionExpression, + NPV_DivisionExpression, + DivisionExpression, ) +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor, identify_variables from pyomo.common.collections import ComponentMap logger = logging.getLogger('pyomo.contrib.mcpp') @@ -47,10 +56,14 @@ def mcpp_available(): NPV_expressions = ( - NPV_AbsExpression, NPV_ExternalFunctionExpression, - NPV_NegationExpression, NPV_PowExpression, - NPV_ProductExpression, NPV_SumExpression, - NPV_UnaryFunctionExpression, NPV_DivisionExpression, + NPV_AbsExpression, + NPV_ExternalFunctionExpression, + NPV_NegationExpression, + NPV_PowExpression, + NPV_ProductExpression, + NPV_SumExpression, + NPV_UnaryFunctionExpression, + NPV_DivisionExpression, ) @@ -63,7 +76,7 @@ def _MCPP_lib(): # Version number mcpp.get_version.restype = ctypes.c_char_p - + mcpp.toString.argtypes = [ctypes.c_void_p] mcpp.toString.restype = ctypes.c_char_p @@ -86,8 +99,13 @@ def _MCPP_lib(): mcpp.subcv.restype = ctypes.c_double # Create MC type variable - mcpp.newVar.argtypes = [ctypes.c_double, ctypes.c_double, - ctypes.c_double, ctypes.c_int, ctypes.c_int] + mcpp.newVar.argtypes = [ + ctypes.c_double, + ctypes.c_double, + ctypes.c_double, + ctypes.c_int, + ctypes.c_int, + ] mcpp.newVar.restype = ctypes.c_void_p # Create MC type constant @@ -165,8 +183,7 @@ def _MCPP_lib(): mcpp.try_unary_fcn.restype = ctypes.c_void_p # Binary function exception wrapper - mcpp.try_binary_fcn.argtypes = [ctypes.c_void_p, ctypes.c_void_p, - ctypes.c_void_p] + mcpp.try_binary_fcn.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] mcpp.try_binary_fcn.restype = ctypes.c_void_p # Error message retrieval @@ -174,6 +191,7 @@ def _MCPP_lib(): return mcpp + # Initialize the singleton to None _MCPP_lib._mcpp = None @@ -253,7 +271,8 @@ def exitNode(self, node, data): ans = self.mcpp.try_unary_fcn(self.mcpp.mc_abs, data[0]) elif isinstance(node, LinearExpression): raise NotImplementedError( - 'Quicksum has bugs that prevent proper usage of MC++.') + 'Quicksum has bugs that prevent proper usage of MC++.' + ) # ans = self.mcpp.newConstant(node.constant) # for coef, var in zip(node.linear_coefs, node.linear_vars): # ans = self.mcpp.add( @@ -339,20 +358,20 @@ def register_var(self, var, lb, ub): if lb == -inf: lb = -500000 logger.warning( - 'Var %s missing lower bound. Assuming LB of %s' - % (var.name, lb)) + 'Var %s missing lower bound. Assuming LB of %s' % (var.name, lb) + ) if ub == inf: ub = 500000 logger.warning( - 'Var %s missing upper bound. Assuming UB of %s' - % (var.name, ub)) + 'Var %s missing upper bound. Assuming UB of %s' % (var.name, ub) + ) if var_val is None: var_val = (lb + ub) / 2 self.missing_value_warnings.append( 'Var %s missing value. Assuming midpoint value of %s' - % (var.name, var_val)) - return self.mcpp.newVar( - lb, var_val, ub, self.num_vars, var_idx) + % (var.name, var_val) + ) + return self.mcpp.newVar(lb, var_val, ub, self.num_vars, var_idx) def finalizeResult(self, node_result): # Note, the node_result should NOT be in self.refs @@ -399,7 +418,7 @@ class McCormick(object): def changePoint(self, var, point): updates the current value() on the pyomo side and the current point on the MC++ side. - """ + """ def __init__(self, expression, improved_var_bounds=None): # Guarantee that McCormick objects have mc_expr defined @@ -466,4 +485,3 @@ def warn_if_var_missing_value(self): for message in self.visitor.missing_value_warnings: logger.warning(message) self.visitor.missing_value_warnings = [] - diff --git a/pyomo/contrib/mcpp/setup.py b/pyomo/contrib/mcpp/setup.py deleted file mode 100644 index 8f309660848..00000000000 --- a/pyomo/contrib/mcpp/setup.py +++ /dev/null @@ -1,15 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -from setuptools import setup -from pyomo.contrib.mcpp import build - -setup(**build._generate_configuration()) diff --git a/pyomo/contrib/mcpp/test_mcpp.py b/pyomo/contrib/mcpp/test_mcpp.py index 64ce2290f85..9d8c670d470 100644 --- a/pyomo/contrib/mcpp/test_mcpp.py +++ b/pyomo/contrib/mcpp/test_mcpp.py @@ -20,15 +20,26 @@ from pyomo.common.dependencies.matplotlib import pyplot as plt from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, mcpp_available, MCPP_Error from pyomo.core import ( - ConcreteModel, Expression, Var, acos, asin, atan, cos, exp, quicksum, sin, - tan, value, - ComponentMap, log) -from pyomo.core.expr.current import identify_variables + ConcreteModel, + Expression, + Var, + acos, + asin, + atan, + cos, + exp, + quicksum, + sin, + tan, + value, + ComponentMap, + log, +) +from pyomo.core.expr import identify_variables @unittest.skipIf(not mcpp_available(), "MC++ is not available") class TestMcCormick(unittest.TestCase): - def test_outofbounds(self): m = ConcreteModel() m.x = Var(bounds=(-1, 5), initialize=2) @@ -66,16 +77,14 @@ def test_var(self): output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.mcpp', logging.WARNING): mc_var = mc(m.no_ub) - self.assertIn("Var no_ub missing upper bound.", - output.getvalue().strip()) + self.assertIn("Var no_ub missing upper bound.", output.getvalue().strip()) self.assertEqual(mc_var.lower(), 0) self.assertEqual(mc_var.upper(), 500000) m.no_lb = Var(bounds=(None, -3), initialize=-1) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.mcpp', logging.WARNING): mc_var = mc(m.no_lb) - self.assertIn("Var no_lb missing lower bound.", - output.getvalue().strip()) + self.assertIn("Var no_lb missing lower bound.", output.getvalue().strip()) self.assertEqual(mc_var.lower(), -500000) self.assertEqual(mc_var.upper(), -3) m.no_val = Var(bounds=(0, 1)) @@ -83,8 +92,7 @@ def test_var(self): with LoggingIntercept(output, 'pyomo.contrib.mcpp', logging.WARNING): mc_var = mc(m.no_val) mc_var.subcv() - self.assertIn("Var no_val missing value.", - output.getvalue().strip()) + self.assertIn("Var no_val missing value.", output.getvalue().strip()) self.assertEqual(mc_var.lower(), 0) self.assertEqual(mc_var.upper(), 1) @@ -98,7 +106,8 @@ def test_fixed_var(self): self.assertEqual(mc_expr.upper(), 160) self.assertEqual( str(mc_expr), - "[ -1.00000e+02 : 1.60000e+02 ] [ 6.00000e+00 : 6.00000e+00 ] [ ( 2.00000e+00) : ( 2.00000e+00) ]") + "[ -1.00000e+02 : 1.60000e+02 ] [ 6.00000e+00 : 6.00000e+00 ] [ ( 2.00000e+00) : ( 2.00000e+00) ]", + ) def test_reciprocal(self): m = ConcreteModel() @@ -116,10 +125,9 @@ def test_nonpyomo_numeric(self): def test_linear_expression(self): m = ConcreteModel() m.x = Var(bounds=(1, 2), initialize=1) - with self.assertRaises(NotImplementedError): - mc_expr = mc(quicksum([m.x, m.x], linear=True)) - self.assertEqual(mc_expr.lower(), 2) - self.assertEqual(mc_expr.upper(), 4) + mc_expr = mc(quicksum([m.x, m.x], linear=True)) + self.assertEqual(mc_expr.lower(), 2) + self.assertEqual(mc_expr.upper(), 4) def test_trig(self): m = ConcreteModel() @@ -148,7 +156,7 @@ def test_lmtd(self): m.x = Var(bounds=(0.1, 500), initialize=33.327) m.y = Var(bounds=(0.1, 500), initialize=14.436) m.z = Var(bounds=(0, 90), initialize=22.5653) - e = m.z - (m.x * m.y * (m.x + m.y) / 2) ** (1/3) + e = m.z - (m.x * m.y * (m.x + m.y) / 2) ** (1 / 3) mc_expr = mc(e) for _x in [m.x.lb, m.x.ub]: @@ -203,17 +211,18 @@ def test_powers(self): # This was corrected in 2.1 to # "Square-root with nonpositive values in range" with self.assertRaisesRegex( - MCPP_Error, - r"(Square-root with nonpositive values in range)" - r"|(Log with negative values in range)"): - mc(m.z ** 1.5) - mc_expr = mc(m.y ** 1.5) + MCPP_Error, + r"(Square-root with nonpositive values in range)" + r"|(Log with negative values in range)", + ): + mc(m.z**1.5) + mc_expr = mc(m.y**1.5) self.assertAlmostEqual(mc_expr.lower(), 1e-4**1.5) self.assertAlmostEqual(mc_expr.upper(), 2**1.5) - mc_expr = mc(m.y ** m.x) + mc_expr = mc(m.y**m.x) self.assertAlmostEqual(mc_expr.lower(), 1e-4**2) self.assertAlmostEqual(mc_expr.upper(), 4) - mc_expr = mc(m.z ** 2) + mc_expr = mc(m.z**2) self.assertAlmostEqual(mc_expr.lower(), 0) self.assertAlmostEqual(mc_expr.upper(), 1) @@ -242,20 +251,35 @@ def make2dPlot(expr, numticks=10, show_plot=False): mc_cvVals[i] = mc_expr.convex() fvals[i] = value(expr) if show_plot: - plt.plot(xaxis, fvals, 'r', xaxis, mc_ccVals, 'b--', xaxis, - mc_cvVals, 'b--', xaxis, aff_cc, 'k|', xaxis, aff_cv, 'k|') + plt.plot( + xaxis, + fvals, + 'r', + xaxis, + mc_ccVals, + 'b--', + xaxis, + mc_cvVals, + 'b--', + xaxis, + aff_cc, + 'k|', + xaxis, + aff_cv, + 'k|', + ) plt.show() return mc_ccVals, mc_cvVals, aff_cc, aff_cv def make3dPlot(expr, numticks=30, show_plot=False): - ccSurf = [None] * ((numticks + 1)**2) - cvSurf = [None] * ((numticks + 1)**2) - fvals = [None] * ((numticks + 1)**2) - xaxis2d = [None] * ((numticks + 1)**2) - yaxis2d = [None] * ((numticks + 1)**2) - ccAffine = [None] * ((numticks + 1)**2) - cvAffine = [None] * ((numticks + 1)**2) + ccSurf = [None] * ((numticks + 1) ** 2) + cvSurf = [None] * ((numticks + 1) ** 2) + fvals = [None] * ((numticks + 1) ** 2) + xaxis2d = [None] * ((numticks + 1) ** 2) + yaxis2d = [None] * ((numticks + 1) ** 2) + ccAffine = [None] * ((numticks + 1) ** 2) + cvAffine = [None] * ((numticks + 1) ** 2) eqn = mc(expr) vars = identify_variables(expr) @@ -279,11 +303,11 @@ def make3dPlot(expr, numticks=30, show_plot=False): eqn.changePoint(x, x_tick) for j, y_tick in enumerate(yaxis): ccAffine[i + (numticks + 1) * j] = ( - ccSlope[x] * (x_tick - x_val) + - ccSlope[y] * (y_tick - y_val) + f_cc) + ccSlope[x] * (x_tick - x_val) + ccSlope[y] * (y_tick - y_val) + f_cc + ) cvAffine[i + (numticks + 1) * j] = ( - cvSlope[x] * (x_tick - x_val) + - cvSlope[y] * (y_tick - y_val) + f_cv) + cvSlope[x] * (x_tick - x_val) + cvSlope[y] * (y_tick - y_val) + f_cv + ) xaxis2d[i + (numticks + 1) * j] = x_tick yaxis2d[i + (numticks + 1) * j] = y_tick eqn.changePoint(y, y_tick) @@ -293,6 +317,7 @@ def make3dPlot(expr, numticks=30, show_plot=False): if show_plot: from mpl_toolkits.mplot3d import Axes3D + assert Axes3D # silence pyflakes # Plotting Solutions in 3D diff --git a/pyomo/contrib/mindtpy/MindtPy.py b/pyomo/contrib/mindtpy/MindtPy.py index f0fb26df35e..6eb27c4c649 100644 --- a/pyomo/contrib/mindtpy/MindtPy.py +++ b/pyomo/contrib/mindtpy/MindtPy.py @@ -50,35 +50,25 @@ - Add single-tree implementation. - Add support for cplex_persistent solver. - Fix bug in OA cut expression in cut_generation.py. - """ -from __future__ import division -import logging -from pyomo.contrib.gdpopt.util import (copy_var_list_values, - time_code, lower_logger_level_to) -from pyomo.contrib.mindtpy.initialization import MindtPy_initialize_main -from pyomo.contrib.mindtpy.iterate import MindtPy_iteration_loop -from pyomo.contrib.mindtpy.util import model_is_valid, set_up_solve_data, set_up_logger, get_primal_integral, get_dual_integral, setup_results_object, process_objective, create_utility_block -from pyomo.core import (Block, ConstraintList, NonNegativeReals, - Var, VarList, TransformationFactory, RangeSet, minimize, Constraint, Objective) -from pyomo.opt import SolverFactory -from pyomo.contrib.mindtpy.config_options import _get_MindtPy_config, check_config -from pyomo.common.config import add_docstring_list -from pyomo.util.vars_from_expressions import get_vars_from_components -__version__ = (0, 1, 0) +from pyomo.contrib.mindtpy import __version__ +from pyomo.opt import SolverFactory +from pyomo.contrib.mindtpy.config_options import _get_MindtPy_config +from pyomo.common.config import document_kwargs_from_configdict +from pyomo.contrib.mindtpy.config_options import _supported_algorithms @SolverFactory.register( - 'mindtpy', - doc='MindtPy: Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo') + 'mindtpy', doc='MindtPy: Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo' +) class MindtPySolver(object): """ Decomposition solver for Mixed-Integer Nonlinear Programming (MINLP) problems. - The MindtPy (Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo) solver - applies a variety of decomposition-based approaches to solve Mixed-Integer - Nonlinear Programming (MINLP) problems. + The MindtPy (Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo) solver + applies a variety of decomposition-based approaches to solve Mixed-Integer + Nonlinear Programming (MINLP) problems. These approaches include: - Outer approximation (OA) @@ -88,17 +78,12 @@ class MindtPySolver(object): - Global LP/NLP based branch-and-bound (GLP/NLP) - Regularized LP/NLP based branch-and-bound (RLP/NLP) - Feasibility pump (FP) - - This solver implementation has been developed by David Bernal - and Zedong Peng as part of research efforts at the Grossmann - Research Group (http://egon.cheme.cmu.edu/) at the Department of Chemical Engineering at - Carnegie Mellon University. """ + CONFIG = _get_MindtPy_config() def available(self, exception_flag=True): - """Check if solver is available. - """ + """Check if solver is available.""" return True def license_is_valid(self): @@ -108,184 +93,26 @@ def version(self): """Return a 3-tuple describing the solver version.""" return __version__ + @document_kwargs_from_configdict(CONFIG) def solve(self, model, **kwds): """Solve the model. - Parameters - ---------- - model : Pyomo model - The MINLP model to be solved. + Args: + model (Block): a Pyomo model or block to be solved - Returns - ------- - results : SolverResults - Results from solving the MINLP problem by MindtPy. """ - config = self.CONFIG(kwds.pop('options', { - }), preserve_implicit=True) # TODO: do we need to set preserve_implicit=True? - config.set_value(kwds) - set_up_logger(config) - new_logging_level = logging.INFO if config.tee else None - with lower_logger_level_to(config.logger, new_logging_level): - check_config(config) - - solve_data = set_up_solve_data(model, config) - - if config.integer_to_binary: - TransformationFactory('contrib.integer_to_binary'). \ - apply_to(solve_data.working_model) - - with time_code(solve_data.timing, 'total', is_main_timer=True), \ - lower_logger_level_to(config.logger, new_logging_level), \ - create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): - config.logger.info( - '---------------------------------------------------------------------------------------------\n' - ' Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo (MindtPy) \n' - '---------------------------------------------------------------------------------------------\n' - 'For more information, please visit https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html') - - MindtPy = solve_data.working_model.MindtPy_utils - setup_results_object(solve_data, config) - # In the process_objective function, as long as the objective function is nonlinear, it will be reformulated and the variable/constraint/objective lists will be updated. - # For OA/GOA/LP-NLP algorithm, if the objective funtion is linear, it will not be reformulated as epigraph constraint. - # If the objective function is linear, it will be reformulated as epigraph constraint only if the Feasibility Pump or ROA/RLP-NLP algorithm is activated. (move_objective = True) - # In some cases, the variable/constraint/objective lists will not be updated even if the objective is epigraph-reformulated. - # In Feasibility Pump, since the distance calculation only includes discrete variables and the epigraph slack variables are continuous variables, the Feasibility Pump algorithm will not affected even if the variable list are updated. - # In ROA and RLP/NLP, since the distance calculation does not include these epigraph slack variables, they should not be added to the variable list. (update_var_con_list = False) - # In the process_objective function, once the objective function has been reformulated as epigraph constraint, the variable/constraint/objective lists will not be updated only if the MINLP has a linear objective function and regularization is activated at the same time. - # This is because the epigraph constraint is very "flat" for branching rules. The original objective function will be used for the main problem and epigraph reformulation will be used for the projection problem. - # TODO: The logic here is too complicated, can we simplify it? - process_objective(solve_data, config, - move_objective=(config.init_strategy == 'FP' - or config.add_regularization is not None - or config.move_objective), - use_mcpp=config.use_mcpp, - update_var_con_list=config.add_regularization is None, - partition_nonlinear_terms=config.partition_obj_nonlinear_terms, - obj_handleable_polynomial_degree=solve_data.mip_objective_polynomial_degree, - constr_handleable_polynomial_degree=solve_data.mip_constraint_polynomial_degree - ) - # The epigraph constraint is very "flat" for branching rules. - # If ROA/RLP-NLP is activated and the original objective function is linear, we will use the original objective for the main mip. - if MindtPy.objective_list[0].expr.polynomial_degree() in solve_data.mip_objective_polynomial_degree and config.add_regularization is not None: - MindtPy.objective_list[0].activate() - MindtPy.objective_constr.deactivate() - MindtPy.objective.deactivate() - - # Save model initial values. - solve_data.initial_var_values = list( - v.value for v in MindtPy.variable_list) - - # Store the initial model state as the best solution found. If we - # find no better solution, then we will restore from this copy. - solve_data.best_solution_found = None - solve_data.best_solution_found_time = None - - # Record solver name - solve_data.results.solver.name = 'MindtPy' + str(config.strategy) - - # Validate the model to ensure that MindtPy is able to solve it. - if not model_is_valid(solve_data, config): - return - - # Create a model block in which to store the generated feasibility - # slack constraints. Do not leave the constraints on by default. - feas = MindtPy.feas_opt = Block() - feas.deactivate() - feas.feas_constraints = ConstraintList( - doc='Feasibility Problem Constraints') - - # Create a model block in which to store the generated linear - # constraints. Do not leave the constraints on by default. - lin = MindtPy.cuts = Block() - lin.deactivate() - - # no-good cuts exclude particular discrete decisions - lin.no_good_cuts = ConstraintList(doc='no-good cuts') - # Feasible no-good cuts exclude discrete realizations that have - # been explored via an NLP subproblem. Depending on model - # characteristics, the user may wish to revisit NLP subproblems - # (with a different initialization, for example). Therefore, these - # cuts are not enabled by default. - # - # Note: these cuts will only exclude integer realizations that are - # not already in the primary no_good_cuts ConstraintList. - lin.feasible_no_good_cuts = ConstraintList( - doc='explored no-good cuts') - lin.feasible_no_good_cuts.deactivate() - - if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2': - feas.nl_constraint_set = RangeSet(len(MindtPy.nonlinear_constraint_list), - doc='Integer index set over the nonlinear constraints.') - # Create slack variables for feasibility problem - feas.slack_var = Var(feas.nl_constraint_set, - domain=NonNegativeReals, initialize=1) - else: - feas.slack_var = Var(domain=NonNegativeReals, initialize=1) - - # Create slack variables for OA cuts - if config.add_slack: - lin.slack_vars = VarList( - bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals) - - # Initialize the main problem - with time_code(solve_data.timing, 'initialization'): - MindtPy_initialize_main(solve_data, config) - - # Algorithm main loop - with time_code(solve_data.timing, 'main loop'): - MindtPy_iteration_loop(solve_data, config) - if solve_data.best_solution_found is not None: - # Update values in original model - copy_var_list_values( - from_list=solve_data.best_solution_found.MindtPy_utils.variable_list, - to_list=MindtPy.variable_list, - config=config) - # The original does not have variable list. Use get_vars_from_components() should be used for both working_model and original_model to exclude the unused variables. - solve_data.working_model.MindtPy_utils.deactivate() - if solve_data.working_model.find_component("_int_to_binary_reform") is not None: - solve_data.working_model._int_to_binary_reform.deactivate() - copy_var_list_values(list(get_vars_from_components(block=solve_data.working_model, - ctype=(Constraint, Objective), - include_fixed=False, - active=True, - sort=True, - descend_into=True, - descent_order=None)), - list(get_vars_from_components(block=solve_data.original_model, - ctype=(Constraint, Objective), - include_fixed=False, - active=True, - sort=True, - descend_into=True, - descent_order=None)), - config=config) - # exclude fixed variables here. This is consistent with the definition of variable_list in GDPopt.util - if solve_data.objective_sense == minimize: - solve_data.results.problem.lower_bound = solve_data.dual_bound - solve_data.results.problem.upper_bound = solve_data.primal_bound - else: - solve_data.results.problem.lower_bound = solve_data.primal_bound - solve_data.results.problem.upper_bound = solve_data.dual_bound - - solve_data.results.solver.timing = solve_data.timing - solve_data.results.solver.user_time = solve_data.timing.total - solve_data.results.solver.wallclock_time = solve_data.timing.total - solve_data.results.solver.iterations = solve_data.mip_iter - solve_data.results.solver.num_infeasible_nlp_subproblem = solve_data.nlp_infeasible_counter - solve_data.results.solver.best_solution_found_time = solve_data.best_solution_found_time - solve_data.results.solver.primal_integral = get_primal_integral(solve_data, config) - solve_data.results.solver.dual_integral = get_dual_integral(solve_data, config) - solve_data.results.solver.primal_dual_gap_integral = solve_data.results.solver.primal_integral + \ - solve_data.results.solver.dual_integral - config.logger.info(' {:<25}: {:>7.4f} '.format( - 'Primal-dual gap integral', solve_data.results.solver.primal_dual_gap_integral)) - - if config.single_tree: - solve_data.results.solver.num_nodes = solve_data.nlp_iter - \ - (1 if config.init_strategy == 'rNLP' else 0) - - return solve_data.results + # The algorithm should have been specified as an argument to the solve + # method. We will instantiate an ephemeral instance of the correct + # solver and call its solve method. + options = kwds.pop('options', {}) + config = self.CONFIG(options, preserve_implicit=True) + # Don't complain about extra things, they aren't for us. We just need to + # get the algorithm and then our job is done. + config.set_value(kwds, skip_implicit=True) + + return SolverFactory(_supported_algorithms[config.strategy][0]).solve( + model, **kwds + ) # # Support 'with' statements. @@ -295,8 +122,3 @@ def __enter__(self): def __exit__(self, t, v, traceback): pass - - -# Add the CONFIG arguments to the solve method docstring -MindtPySolver.solve.__doc__ = add_docstring_list( - MindtPySolver.solve.__doc__, MindtPySolver.CONFIG, indent_by=8) diff --git a/pyomo/contrib/mindtpy/__init__.py b/pyomo/contrib/mindtpy/__init__.py index e69de29bb2d..8e2c2d9eaa4 100644 --- a/pyomo/contrib/mindtpy/__init__.py +++ b/pyomo/contrib/mindtpy/__init__.py @@ -0,0 +1 @@ +__version__ = (0, 1, 0) diff --git a/pyomo/contrib/mindtpy/algorithm_base_class.py b/pyomo/contrib/mindtpy/algorithm_base_class.py new file mode 100644 index 00000000000..7def1dcaab3 --- /dev/null +++ b/pyomo/contrib/mindtpy/algorithm_base_class.py @@ -0,0 +1,3051 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +"""Iteration loop for MindtPy.""" +import math +from io import StringIO +import pyomo.core.expr as EXPR +from pyomo.repn import generate_standard_repn +import logging +from pyomo.contrib.fbbt.fbbt import fbbt +from pyomo.opt import TerminationCondition as tc +from pyomo.contrib.mindtpy import __version__ +from pyomo.common.dependencies import attempt_import +from pyomo.util.vars_from_expressions import get_vars_from_components +from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver +from pyomo.common.collections import ComponentMap, Bunch, ComponentSet +from pyomo.common.errors import InfeasibleConstraintException +from pyomo.contrib.mindtpy.cut_generation import add_no_good_cuts +from operator import itemgetter +from pyomo.common.errors import DeveloperError +from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy +from pyomo.opt import ( + SolverFactory, + SolverResults, + ProblemSense, + SolutionStatus, + SolverStatus, +) +from pyomo.core import ( + minimize, + maximize, + Objective, + VarList, + Reals, + ConstraintList, + Constraint, + Block, + TransformationFactory, + NonNegativeReals, + Suffix, + Var, + RangeSet, + value, + Expression, +) +from pyomo.contrib.gdpopt.util import ( + SuppressInfeasibleWarning, + _DoNothing, + lower_logger_level_to, + copy_var_list_values, + get_main_elapsed_time, + time_code, +) +from pyomo.contrib.gdpopt.solve_discrete_problem import ( + distinguish_mip_infeasible_or_unbounded, +) +from pyomo.contrib.mindtpy.util import ( + generate_norm1_objective_function, + generate_norm2sq_objective_function, + generate_norm_inf_objective_function, + generate_lag_objective_function, + GurobiPersistent4MindtPy, + setup_results_object, + get_integer_solution, + initialize_feas_subproblem, + epigraph_reformulation, + add_var_bound, + copy_var_list_values_from_solution_pool, + generate_norm_constraint, + fp_converged, + add_orthogonality_cuts, + set_solver_mipgap, + set_solver_constraint_violation_tolerance, + update_solver_timelimit, +) + +single_tree, single_tree_available = attempt_import('pyomo.contrib.mindtpy.single_tree') +tabu_list, tabu_list_available = attempt_import('pyomo.contrib.mindtpy.tabu_list') + + +class _MindtPyAlgorithm(object): + def __init__(self, **kwds): + """ + This is a common init method for all the MindtPy algorithms, so that we + correctly set up the config arguments and initialize the generic parts + of the algorithm state. + + """ + self.working_model = None + self.mip = None + self.fixed_nlp = None + + # We store bounds, timing info, iteration count, incumbent, and the + # expression of the original (possibly nonlinear) objective function. + self.results = SolverResults() + self.timing = Bunch() + self.curr_int_sol = [] + self.should_terminate = False + self.integer_list = [] + + # Set up iteration counters + self.nlp_iter = 0 + self.mip_iter = 0 + self.mip_subiter = 0 + self.nlp_infeasible_counter = 0 + self.fp_iter = 1 + + self.primal_bound_progress_time = [0] + self.dual_bound_progress_time = [0] + self.abs_gap = float('inf') + self.rel_gap = float('inf') + self.log_formatter = ( + ' {:>9} {:>15} {:>15g} {:>12g} {:>12g} {:>7.2%} {:>7.2f}' + ) + self.fixed_nlp_log_formatter = ( + '{:1}{:>9} {:>15} {:>15g} {:>12g} {:>12g} {:>7.2%} {:>7.2f}' + ) + self.log_note_formatter = ' {:>9} {:>15} {:>15}' + + # Flag indicating whether the solution improved in the past + # iteration or not + self.primal_bound_improved = False + self.dual_bound_improved = False + + # Store the initial model state as the best solution found. If we + # find no better solution, then we will restore from this copy. + self.best_solution_found = None + self.best_solution_found_time = None + + self.stored_bound = {} + self.num_no_good_cuts_added = {} + self.last_iter_cuts = False + # Store the OA cuts generated in the mip_start_process. + self.mip_start_lazy_oa_cuts = [] + + # Support use as a context manager under current solver API + def __enter__(self): + return self + + def __exit__(self, t, v, traceback): + pass + + def available(self, exception_flag=True): + """Solver is always available. Though subsolvers may not be, they will + raise an error when the time comes. + """ + return True + + def license_is_valid(self): + return True + + def version(self): + """Return a 3-tuple describing the solver version.""" + return __version__ + + _metasolver = False + + def _log_solver_intro_message(self): + self.config.logger.info( + "Starting MindtPy version %s using %s algorithm" + % (".".join(map(str, self.version())), self.config.strategy) + ) + os = StringIO() + self.config.display(ostream=os) + self.config.logger.info(os.getvalue()) + self.config.logger.info( + '-----------------------------------------------------------------------------------------------\n' + ' Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo (MindtPy) \n' + '-----------------------------------------------------------------------------------------------\n' + 'For more information, please visit \n' + 'https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html' + ) + self.config.logger.info( + 'If you use this software, please cite the following:\n' + 'Bernal, David E., et al. Mixed-integer nonlinear decomposition toolbox for Pyomo (MindtPy).\n' + 'Computer Aided Chemical Engineering. Vol. 44. Elsevier, 2018. 895-900.\n' + ) + + def set_up_logger(self): + """Set up the formatter and handler for logger.""" + self.config.logger.handlers.clear() + self.config.logger.propagate = False + ch = logging.StreamHandler() + ch.setLevel(self.config.logging_level) + # create formatter and add it to the handlers + formatter = logging.Formatter('%(message)s') + ch.setFormatter(formatter) + # add the handlers to logger + self.config.logger.addHandler(ch) + + def _log_header(self, logger): + # TODO: rewrite + logger.info( + '=================================================================' + '============================' + ) + logger.info( + '{:^9} | {:^15} | {:^11} | {:^11} | {:^8} | {:^7}\n'.format( + 'Iteration', + 'Subproblem Type', + 'Lower Bound', + 'Upper Bound', + ' Gap ', + 'Time(s)', + ) + ) + + def create_utility_block(self, model, name): + created_util_block = False + # Create a model block on which to store MindtPy-specific utility + # modeling objects. + if hasattr(model, name): + raise RuntimeError( + "MindtPy needs to create a Block named %s " + "on the model object, but an attribute with that name " + "already exists." % name + ) + else: + created_util_block = True + setattr( + model, + name, + Block(doc="Container for MindtPy solver utility modeling objects"), + ) + self.util_block_name = name + + # Save ordered lists of main modeling components, so that data can + # be easily transferred between future model clones. + self.build_ordered_component_lists(model) + self.add_cuts_components(model) + + def model_is_valid(self): + """Determines whether the model is solvable by MindtPy. + + Returns + ------- + bool + True if model is solvable in MindtPy, False otherwise. + """ + m = self.working_model + MindtPy = m.MindtPy_utils + config = self.config + + # Handle LP/NLP being passed to the solver + prob = self.results.problem + if len(MindtPy.discrete_variable_list) == 0: + config.logger.info('Problem has no discrete decisions.') + obj = next(m.component_data_objects(ctype=Objective, active=True)) + if ( + any( + c.body.polynomial_degree() + not in self.mip_constraint_polynomial_degree + for c in MindtPy.constraint_list + ) + or obj.expr.polynomial_degree() + not in self.mip_objective_polynomial_degree + ): + config.logger.info( + 'Your model is a NLP (nonlinear program). ' + 'Using NLP solver %s to solve.' % config.nlp_solver + ) + update_solver_timelimit( + self.nlp_opt, config.nlp_solver, self.timing, config + ) + self.nlp_opt.solve( + self.original_model, + tee=config.nlp_solver_tee, + **config.nlp_solver_args, + ) + return False + else: + config.logger.info( + 'Your model is an LP (linear program). ' + 'Using LP solver %s to solve.' % config.mip_solver + ) + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(self.original_model) + update_solver_timelimit( + self.mip_opt, config.mip_solver, self.timing, config + ) + results = self.mip_opt.solve( + self.original_model, + tee=config.mip_solver_tee, + load_solutions=False, + **config.mip_solver_args, + ) + if len(results.solution) > 0: + self.original_model.solutions.load_from(results) + return False + + # Set up dual value reporting + if config.calculate_dual_at_solution: + if not hasattr(m, 'dual'): + m.dual = Suffix(direction=Suffix.IMPORT) + elif not isinstance(m.dual, Suffix): + raise ValueError( + "dual is not defined as a Suffix in the original model." + ) + + # TODO if any continuous variables are multiplied with binary ones, + # need to do some kind of transformation (Glover?) or throw an error message + return True + + def build_ordered_component_lists(self, model): + """Define lists used for future data transfer. + + Also attaches ordered lists of the variables, constraints to the model so that they can be used for mapping back and + forth. + + """ + util_block = getattr(model, self.util_block_name) + var_set = ComponentSet() + util_block.constraint_list = list( + model.component_data_objects( + ctype=Constraint, active=True, descend_into=(Block) + ) + ) + util_block.linear_constraint_list = list( + c + for c in util_block.constraint_list + if c.body.polynomial_degree() in self.mip_constraint_polynomial_degree + ) + util_block.nonlinear_constraint_list = list( + c + for c in util_block.constraint_list + if c.body.polynomial_degree() not in self.mip_constraint_polynomial_degree + ) + util_block.objective_list = list( + model.component_data_objects( + ctype=Objective, active=True, descend_into=(Block) + ) + ) + + # Identify the non-fixed variables in (potentially) active constraints and + # objective functions + for constr in getattr(util_block, 'constraint_list'): + for v in EXPR.identify_variables(constr.body, include_fixed=False): + var_set.add(v) + for obj in model.component_data_objects(ctype=Objective, active=True): + for v in EXPR.identify_variables(obj.expr, include_fixed=False): + var_set.add(v) + + # We use component_data_objects rather than list(var_set) in order to + # preserve a deterministic ordering. + util_block.variable_list = list( + v + for v in model.component_data_objects(ctype=Var, descend_into=(Block)) + if v in var_set + ) + util_block.discrete_variable_list = list( + v for v in util_block.variable_list if v in var_set and v.is_integer() + ) + util_block.continuous_variable_list = list( + v for v in util_block.variable_list if v in var_set and v.is_continuous() + ) + + def add_cuts_components(self, model): + config = self.config + MindtPy = model.MindtPy_utils + + # Create a model block in which to store the generated feasibility + # slack constraints. Do not leave the constraints on by default. + feas = MindtPy.feas_opt = Block() + feas.deactivate() + feas.feas_constraints = ConstraintList(doc='Feasibility Problem Constraints') + + # Create a model block in which to store the generated linear + # constraints. Do not leave the constraints on by default. + lin = MindtPy.cuts = Block() + lin.deactivate() + + # no-good cuts exclude particular discrete decisions + lin.no_good_cuts = ConstraintList(doc='no-good cuts') + # Feasible no-good cuts exclude discrete realizations that have + # been explored via an NLP subproblem. Depending on model + # characteristics, the user may wish to revisit NLP subproblems + # (with a different initialization, for example). Therefore, these + # cuts are not enabled by default. + + if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2': + feas.nl_constraint_set = RangeSet( + len(MindtPy.nonlinear_constraint_list), + doc='Integer index set over the nonlinear constraints.', + ) + # Create slack variables for feasibility problem + feas.slack_var = Var( + feas.nl_constraint_set, domain=NonNegativeReals, initialize=1 + ) + else: + feas.slack_var = Var(domain=NonNegativeReals, initialize=1) + + # Create slack variables for OA cuts + if config.add_slack: + lin.slack_vars = VarList( + bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals + ) + + def get_dual_integral(self): + """Calculate the dual integral. + Ref: The confined primal integral. [http://www.optimization-online.org/DB_FILE/2020/07/7910.pdf] + + Returns + ------- + float + The dual integral. + """ + dual_integral = 0 + dual_bound_progress = self.dual_bound_progress.copy() + # Initial dual bound is set to inf or -inf. To calculate dual integral, we set + # initial_dual_bound to 10% greater or smaller than the first_found_dual_bound. + # TODO: check if the calculation of initial_dual_bound needs to be modified. + for dual_bound in dual_bound_progress: + if dual_bound != dual_bound_progress[0]: + break + for i in range(len(dual_bound_progress)): + if dual_bound_progress[i] == self.dual_bound_progress[0]: + dual_bound_progress[i] = dual_bound * ( + 1 + - self.config.initial_bound_coef + * self.objective_sense + * math.copysign(1, dual_bound) + ) + else: + break + for i in range(len(dual_bound_progress)): + if i == 0: + dual_integral += abs(dual_bound_progress[i] - self.dual_bound) * ( + self.dual_bound_progress_time[i] + ) + else: + dual_integral += abs(dual_bound_progress[i] - self.dual_bound) * ( + self.dual_bound_progress_time[i] + - self.dual_bound_progress_time[i - 1] + ) + self.config.logger.info( + ' {:<25}: {:>7.4f} '.format('Dual integral', dual_integral) + ) + return dual_integral + + def get_primal_integral(self): + """Calculate the primal integral. + Ref: The confined primal integral. [http://www.optimization-online.org/DB_FILE/2020/07/7910.pdf] + + Returns + ------- + float + The primal integral. + """ + primal_integral = 0 + primal_bound_progress = self.primal_bound_progress.copy() + # Initial primal bound is set to inf or -inf. To calculate primal integral, we set + # initial_primal_bound to 10% greater or smaller than the first_found_primal_bound. + # TODO: check if the calculation of initial_primal_bound needs to be modified. + for primal_bound in primal_bound_progress: + if primal_bound != primal_bound_progress[0]: + break + for i in range(len(primal_bound_progress)): + if primal_bound_progress[i] == self.primal_bound_progress[0]: + primal_bound_progress[i] = primal_bound * ( + 1 + + self.config.initial_bound_coef + * self.objective_sense + * math.copysign(1, primal_bound) + ) + else: + break + for i in range(len(primal_bound_progress)): + if i == 0: + primal_integral += abs(primal_bound_progress[i] - self.primal_bound) * ( + self.primal_bound_progress_time[i] + ) + else: + primal_integral += abs(primal_bound_progress[i] - self.primal_bound) * ( + self.primal_bound_progress_time[i] + - self.primal_bound_progress_time[i - 1] + ) + + self.config.logger.info( + ' {:<25}: {:>7.4f} '.format('Primal integral', primal_integral) + ) + return primal_integral + + def get_integral_info(self): + ''' + Obtain primal integral, dual integral and primal dual gap integral. + ''' + self.primal_integral = self.get_primal_integral() + self.dual_integral = self.get_dual_integral() + self.primal_dual_gap_integral = self.primal_integral + self.dual_integral + + def update_gap(self): + """Update the relative gap and the absolute gap.""" + if self.objective_sense == minimize: + self.abs_gap = self.primal_bound - self.dual_bound + else: + self.abs_gap = self.dual_bound - self.primal_bound + self.rel_gap = self.abs_gap / (abs(self.primal_bound) + 1e-10) + + def update_dual_bound(self, bound_value): + """Update the dual bound. + + Call after solving relaxed problem, including relaxed NLP and MIP main problem. + Use the optimal primal bound of the relaxed problem to update the dual bound. + + Parameters + ---------- + bound_value : float + The input value used to update the dual bound. + """ + if math.isnan(bound_value): + return + if self.objective_sense == minimize: + self.dual_bound = max(bound_value, self.dual_bound) + self.dual_bound_improved = self.dual_bound > self.dual_bound_progress[-1] + else: + self.dual_bound = min(bound_value, self.dual_bound) + self.dual_bound_improved = self.dual_bound < self.dual_bound_progress[-1] + self.dual_bound_progress.append(self.dual_bound) + self.dual_bound_progress_time.append(get_main_elapsed_time(self.timing)) + if self.dual_bound_improved: + self.update_gap() + + def update_suboptimal_dual_bound(self, results): + """If the relaxed problem is not solved to optimality, the dual bound is updated + according to the dual bound of relaxed problem. + + Parameters + ---------- + results : SolverResults + Results from solving the relaxed problem. + The dual bound of the relaxed problem can only be obtained from the result object. + """ + if self.objective_sense == minimize: + bound_value = results.problem.lower_bound + else: + bound_value = results.problem.upper_bound + self.update_dual_bound(bound_value) + + def update_primal_bound(self, bound_value): + """Update the primal bound. + + Call after solve fixed NLP subproblem. + Use the optimal primal bound of the relaxed problem to update the dual bound. + + Parameters + ---------- + bound_value : float + The input value used to update the primal bound. + """ + if math.isnan(bound_value): + return + if self.objective_sense == minimize: + self.primal_bound = min(bound_value, self.primal_bound) + self.primal_bound_improved = ( + self.primal_bound < self.primal_bound_progress[-1] + ) + else: + self.primal_bound = max(bound_value, self.primal_bound) + self.primal_bound_improved = ( + self.primal_bound > self.primal_bound_progress[-1] + ) + self.primal_bound_progress.append(self.primal_bound) + self.primal_bound_progress_time.append(get_main_elapsed_time(self.timing)) + if self.primal_bound_improved: + self.update_gap() + + def process_objective(self, update_var_con_list=True): + """Process model objective function. + + Check that the model has only 1 valid objective. + If the objective is nonlinear, move it into the constraints. + If no objective function exists, emit a warning and create a dummy objective. + + Parameters + ---------- + update_var_con_list : bool, optional + Whether to update the variable/constraint/objective lists, by default True. + Currently, update_var_con_list will be set to False only when add_regularization is not None in MindtPy. + """ + config = self.config + m = self.working_model + util_block = getattr(m, self.util_block_name) + # Handle missing or multiple objectives + active_objectives = list( + m.component_data_objects(ctype=Objective, active=True, descend_into=True) + ) + self.results.problem.number_of_objectives = len(active_objectives) + if len(active_objectives) == 0: + config.logger.warning( + 'Model has no active objectives. Adding dummy objective.' + ) + util_block.dummy_objective = Objective(expr=1) + main_obj = util_block.dummy_objective + elif len(active_objectives) > 1: + raise ValueError('Model has multiple active objectives.') + else: + main_obj = active_objectives[0] + self.results.problem.sense = ( + ProblemSense.minimize if main_obj.sense == 1 else ProblemSense.maximize + ) + self.objective_sense = main_obj.sense + + # Move the objective to the constraints if it is nonlinear or move_objective is True. + if ( + main_obj.expr.polynomial_degree() + not in self.mip_objective_polynomial_degree + or config.move_objective + ): + if config.move_objective: + config.logger.info("Moving objective to constraint set.") + else: + config.logger.info( + "Objective is nonlinear. Moving it to constraint set." + ) + util_block.objective_value = VarList(domain=Reals, initialize=0) + util_block.objective_constr = ConstraintList() + if ( + main_obj.expr.polynomial_degree() + not in self.mip_objective_polynomial_degree + and config.partition_obj_nonlinear_terms + and main_obj.expr.__class__ is EXPR.SumExpression + ): + repn = generate_standard_repn( + main_obj.expr, quadratic=2 in self.mip_objective_polynomial_degree + ) + # the following code will also work if linear_subexpr is a constant. + linear_subexpr = ( + repn.constant + + sum( + coef * var + for coef, var in zip(repn.linear_coefs, repn.linear_vars) + ) + + sum( + coef * var1 * var2 + for coef, (var1, var2) in zip( + repn.quadratic_coefs, repn.quadratic_vars + ) + ) + ) + # only need to generate one epigraph constraint for the sum of all linear terms and constant + epigraph_reformulation( + linear_subexpr, + util_block.objective_value, + util_block.objective_constr, + config.use_mcpp, + main_obj.sense, + ) + nonlinear_subexpr = repn.nonlinear_expr + if nonlinear_subexpr.__class__ is EXPR.SumExpression: + for subsubexpr in nonlinear_subexpr.args: + epigraph_reformulation( + subsubexpr, + util_block.objective_value, + util_block.objective_constr, + config.use_mcpp, + main_obj.sense, + ) + else: + epigraph_reformulation( + nonlinear_subexpr, + util_block.objective_value, + util_block.objective_constr, + config.use_mcpp, + main_obj.sense, + ) + else: + epigraph_reformulation( + main_obj.expr, + util_block.objective_value, + util_block.objective_constr, + config.use_mcpp, + main_obj.sense, + ) + + main_obj.deactivate() + util_block.objective = Objective( + expr=sum(util_block.objective_value[:]), sense=main_obj.sense + ) + + if ( + main_obj.expr.polynomial_degree() + not in self.mip_objective_polynomial_degree + or (config.move_objective and update_var_con_list) + ): + util_block.variable_list.extend(util_block.objective_value[:]) + util_block.continuous_variable_list.extend( + util_block.objective_value[:] + ) + util_block.constraint_list.extend(util_block.objective_constr[:]) + util_block.objective_list.append(util_block.objective) + for constr in util_block.objective_constr[:]: + if ( + constr.body.polynomial_degree() + in self.mip_constraint_polynomial_degree + ): + util_block.linear_constraint_list.append(constr) + else: + util_block.nonlinear_constraint_list.append(constr) + + def set_up_solve_data(self, model): + """Set up the solve data. + + Parameters + ---------- + model : Pyomo model + The original model to be solved in MindtPy. + """ + config = self.config + # if the objective function is a constant, dual bound constraint is not added. + obj = next(model.component_data_objects(ctype=Objective, active=True)) + if obj.expr.polynomial_degree() == 0: + config.logger.info( + 'The model has a constant objecitive function. use_dual_bound is set to False.' + ) + config.use_dual_bound = False + + if config.use_fbbt: + fbbt(model) + # TODO: logging_level is not logging.INFO here + config.logger.info('Use the fbbt to tighten the bounds of variables') + + self.original_model = model + self.working_model = model.clone() + + # set up bounds + if obj.sense == minimize: + self.primal_bound = float('inf') + self.dual_bound = float('-inf') + else: + self.primal_bound = float('-inf') + self.dual_bound = float('inf') + self.primal_bound_progress = [self.primal_bound] + self.dual_bound_progress = [self.dual_bound] + + if config.nlp_solver in {'ipopt', 'cyipopt'}: + if not hasattr(self.working_model, 'ipopt_zL_out'): + self.working_model.ipopt_zL_out = Suffix(direction=Suffix.IMPORT) + if not hasattr(self.working_model, 'ipopt_zU_out'): + self.working_model.ipopt_zU_out = Suffix(direction=Suffix.IMPORT) + + if config.quadratic_strategy == 0: + self.mip_objective_polynomial_degree = {0, 1} + self.mip_constraint_polynomial_degree = {0, 1} + elif config.quadratic_strategy == 1: + self.mip_objective_polynomial_degree = {0, 1, 2} + self.mip_constraint_polynomial_degree = {0, 1} + elif config.quadratic_strategy == 2: + self.mip_objective_polynomial_degree = {0, 1, 2} + self.mip_constraint_polynomial_degree = {0, 1, 2} + + # ----------------------------------------------------------------------------------------- + # initialization + + def MindtPy_initialization(self): + """Initializes the decomposition algorithm. + + This function initializes the decomposition algorithm, which includes generating the + initial cuts required to build the main MIP. + """ + # Do the initialization + config = self.config + if config.init_strategy == 'rNLP': + self.init_rNLP() + elif config.init_strategy == 'max_binary': + self.init_max_binaries() + elif config.init_strategy == 'initial_binary': + try: + self.curr_int_sol = get_integer_solution(self.working_model) + except TypeError as e: + config.logger.error(e) + raise ValueError( + 'The initial integer combination is not provided or not complete. ' + 'Please provide the complete integer combination or use other initialization strategy.' + ) + self.integer_list.append(self.curr_int_sol) + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) + elif config.init_strategy == 'FP': + self.init_rNLP() + self.fp_loop() + + def init_rNLP(self, add_oa_cuts=True): + """Initialize the problem by solving the relaxed NLP and then store the optimal variable + values obtained from solving the rNLP. + + Parameters + ---------- + add_oa_cuts : Bool + Whether add OA cuts after solving the relaxed NLP problem. + + Raises + ------ + ValueError + MindtPy unable to handle the termination condition of the relaxed NLP. + """ + config = self.config + m = self.working_model.clone() + config.logger.debug('Relaxed NLP: Solve relaxed integrality') + MindtPy = m.MindtPy_utils + TransformationFactory('core.relax_integer_vars').apply_to(m) + nlp_args = dict(config.nlp_solver_args) + update_solver_timelimit(self.nlp_opt, config.nlp_solver, self.timing, config) + with SuppressInfeasibleWarning(): + results = self.nlp_opt.solve( + m, tee=config.nlp_solver_tee, load_solutions=False, **nlp_args + ) + if len(results.solution) > 0: + m.solutions.load_from(results) + subprob_terminate_cond = results.solver.termination_condition + if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}: + main_objective = MindtPy.objective_list[-1] + if subprob_terminate_cond == tc.optimal: + self.update_dual_bound(value(main_objective.expr)) + else: + config.logger.info('relaxed NLP is not solved to optimality.') + self.update_suboptimal_dual_bound(results) + config.logger.info( + self.log_formatter.format( + '-', + 'Relaxed NLP', + value(main_objective.expr), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + # Add OA cut + if add_oa_cuts: + if ( + self.config.nlp_solver == 'cyipopt' + and self.objective_sense == minimize + ): + # TODO: recover the opposite dual when cyipopt issue #2831 is solved. + dual_values = ( + list(-1 * m.dual[c] for c in MindtPy.constraint_list) + if config.calculate_dual_at_solution + else None + ) + else: + dual_values = ( + list(m.dual[c] for c in MindtPy.constraint_list) + if config.calculate_dual_at_solution + else None + ) + copy_var_list_values( + m.MindtPy_utils.variable_list, + self.mip.MindtPy_utils.variable_list, + config, + ) + if config.init_strategy == 'FP': + copy_var_list_values( + m.MindtPy_utils.variable_list, + self.working_model.MindtPy_utils.variable_list, + config, + ) + self.add_cuts( + dual_values=dual_values, + linearize_active=True, + linearize_violated=True, + cb_opt=None, + ) + for var in self.mip.MindtPy_utils.discrete_variable_list: + # We don't want to trigger the reset of the global stale + # indicator, so we will set this variable to be "stale", + # knowing that set_value will switch it back to "not + # stale" + var.stale = True + var.set_value(int(round(var.value)), skip_validation=True) + elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: + # TODO fail? try something else? + config.logger.info( + 'Initial relaxed NLP problem is infeasible. ' + 'Problem may be infeasible.' + ) + elif subprob_terminate_cond is tc.maxTimeLimit: + config.logger.info('NLP subproblem failed to converge within time limit.') + self.results.solver.termination_condition = tc.maxTimeLimit + elif subprob_terminate_cond is tc.maxIterations: + config.logger.info( + 'NLP subproblem failed to converge within iteration limit.' + ) + else: + raise ValueError( + 'MindtPy unable to handle relaxed NLP termination condition ' + 'of %s. Solver message: %s' + % (subprob_terminate_cond, results.solver.message) + ) + + def init_max_binaries(self): + """Modifies model by maximizing the number of activated binary variables. + + Note - The user would usually want to call solve_subproblem after an invocation + of this function. + + Raises + ------ + ValueError + MILP main problem is infeasible. + ValueError + MindtPy unable to handle the termination condition of the MILP main problem. + """ + config = self.config + m = self.working_model.clone() + if hasattr(m, 'dual') and isinstance(m.dual, Suffix): + m.del_component('dual') + MindtPy = m.MindtPy_utils + self.mip_subiter += 1 + config.logger.debug('Initialization: maximize value of binaries') + for c in MindtPy.nonlinear_constraint_list: + c.deactivate() + objective = next(m.component_data_objects(Objective, active=True)) + objective.deactivate() + binary_vars = ( + v + for v in m.MindtPy_utils.discrete_variable_list + if v.is_binary() and not v.fixed + ) + MindtPy.max_binary_obj = Objective( + expr=sum(v for v in binary_vars), sense=maximize + ) + + getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate() + getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate() + + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(m) + mip_args = dict(config.mip_solver_args) + update_solver_timelimit(self.mip_opt, config.mip_solver, self.timing, config) + results = self.mip_opt.solve( + m, tee=config.mip_solver_tee, load_solutions=False, **mip_args + ) + if len(results.solution) > 0: + m.solutions.load_from(results) + + solve_terminate_cond = results.solver.termination_condition + if solve_terminate_cond is tc.optimal: + copy_var_list_values( + MindtPy.variable_list, + self.working_model.MindtPy_utils.variable_list, + config, + ) + config.logger.info( + self.log_formatter.format( + '-', + 'Max binary MILP', + value(MindtPy.max_binary_obj.expr), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + elif solve_terminate_cond is tc.infeasible: + raise ValueError( + 'MIP main problem is infeasible. ' + 'Problem may have no more feasible ' + 'binary configurations.' + ) + elif solve_terminate_cond is tc.maxTimeLimit: + config.logger.info('NLP subproblem failed to converge within time limit.') + self.results.solver.termination_condition = tc.maxTimeLimit + elif solve_terminate_cond is tc.maxIterations: + config.logger.info( + 'NLP subproblem failed to converge within iteration limit.' + ) + else: + raise ValueError( + 'MindtPy unable to handle MILP main termination condition ' + 'of %s. Solver message: %s' + % (solve_terminate_cond, results.solver.message) + ) + + ################################################################################################################################################################################################################## + # nlp_solve.py + + def solve_subproblem(self): + """Solves the Fixed-NLP (with fixed integers). + + This function sets up the 'fixed_nlp' by fixing binaries, sets continuous variables to their initial var values, + precomputes dual values, deactivates trivial constraints, and then solves NLP model. + + Returns + ------- + fixed_nlp : Pyomo model + Integer-variable-fixed NLP model. + results : SolverResults + Results from solving the Fixed-NLP. + """ + config = self.config + MindtPy = self.fixed_nlp.MindtPy_utils + self.nlp_iter += 1 + + MindtPy.cuts.deactivate() + if config.calculate_dual_at_solution: + self.fixed_nlp.tmp_duals = ComponentMap() + # tmp_duals are the value of the dual variables stored before using deactivate trivial constraints + # The values of the duals are computed as follows: (Complementary Slackness) + # + # | constraint | c_geq | status at x1 | tmp_dual (violation) | + # |------------|-------|--------------|----------------------| + # | g(x) <= b | -1 | g(x1) <= b | 0 | + # | g(x) <= b | -1 | g(x1) > b | g(x1) - b | + # | g(x) >= b | +1 | g(x1) >= b | 0 | + # | g(x) >= b | +1 | g(x1) < b | b - g(x1) | + evaluation_error = False + for c in self.fixed_nlp.MindtPy_utils.constraint_list: + # We prefer to include the upper bound as the right hand side since we are + # considering c by default a (hopefully) convex function, which would make + # c >= lb a nonconvex inequality which we wouldn't like to add linearizations + # if we don't have to + rhs = value(c.upper) if c.has_ub() else value(c.lower) + c_geq = -1 if c.has_ub() else 1 + try: + self.fixed_nlp.tmp_duals[c] = c_geq * max( + 0, c_geq * (rhs - value(c.body)) + ) + except (ValueError, OverflowError) as e: + config.logger.error(e) + self.fixed_nlp.tmp_duals[c] = None + evaluation_error = True + if evaluation_error: + for nlp_var, orig_val in zip( + MindtPy.variable_list, self.initial_var_values + ): + if not nlp_var.fixed and not nlp_var.is_binary(): + nlp_var.set_value(orig_val, skip_validation=True) + try: + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( + self.fixed_nlp, + tmp=True, + ignore_infeasible=False, + tolerance=config.constraint_tolerance, + ) + except InfeasibleConstraintException as e: + config.logger.error( + str(e) + '\nInfeasibility detected in deactivate_trivial_constraints.' + ) + results = SolverResults() + results.solver.termination_condition = tc.infeasible + return self.fixed_nlp, results + # Solve the NLP + nlp_args = dict(config.nlp_solver_args) + update_solver_timelimit(self.nlp_opt, config.nlp_solver, self.timing, config) + with SuppressInfeasibleWarning(): + with time_code(self.timing, 'fixed subproblem'): + results = self.nlp_opt.solve( + self.fixed_nlp, + tee=config.nlp_solver_tee, + load_solutions=False, + **nlp_args, + ) + if len(results.solution) > 0: + self.fixed_nlp.solutions.load_from(results) + TransformationFactory('contrib.deactivate_trivial_constraints').revert( + self.fixed_nlp + ) + return self.fixed_nlp, results + + def handle_nlp_subproblem_tc(self, fixed_nlp, result, cb_opt=None): + """This function handles different terminaton conditions of the fixed-NLP subproblem. + + Parameters + ---------- + fixed_nlp : Pyomo model + Integer-variable-fixed NLP model. + result : SolverResults + Results from solving the NLP subproblem. + cb_opt : SolverFactory, optional + The gurobi_persistent solver, by default None. + """ + if result.solver.termination_condition in { + tc.optimal, + tc.locallyOptimal, + tc.feasible, + }: + self.handle_subproblem_optimal(fixed_nlp, cb_opt) + elif result.solver.termination_condition in {tc.infeasible, tc.noSolution}: + self.handle_subproblem_infeasible(fixed_nlp, cb_opt) + elif result.solver.termination_condition is tc.maxTimeLimit: + self.config.logger.info( + 'NLP subproblem failed to converge within the time limit.' + ) + self.results.solver.termination_condition = tc.maxTimeLimit + self.should_terminate = True + elif result.solver.termination_condition is tc.maxEvaluations: + self.config.logger.info('NLP subproblem failed due to maxEvaluations.') + self.results.solver.termination_condition = tc.maxEvaluations + self.should_terminate = True + else: + self.handle_subproblem_other_termination( + fixed_nlp, result.solver.termination_condition, cb_opt + ) + + def handle_subproblem_optimal(self, fixed_nlp, cb_opt=None, fp=False): + """This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates + the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This + function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution. + + Parameters + ---------- + fixed_nlp : Pyomo model + Integer-variable-fixed NLP model. + cb_opt : SolverFactory, optional + The gurobi_persistent solver, by default None. + fp : bool, optional + Whether it is in the loop of feasibility pump, by default False. + """ + # TODO: check what is this copy_value function used for? + # Warmstart? + config = self.config + copy_var_list_values( + fixed_nlp.MindtPy_utils.variable_list, + self.working_model.MindtPy_utils.variable_list, + config, + ) + if config.calculate_dual_at_solution: + for c in fixed_nlp.tmp_duals: + if fixed_nlp.dual.get(c, None) is None: + fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] + elif ( + self.config.nlp_solver == 'cyipopt' + and self.objective_sense == minimize + ): + # TODO: recover the opposite dual when cyipopt issue #2831 is solved. + fixed_nlp.dual[c] = -fixed_nlp.dual[c] + dual_values = list( + fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list + ) + else: + dual_values = None + main_objective = fixed_nlp.MindtPy_utils.objective_list[-1] + self.update_primal_bound(value(main_objective.expr)) + if self.primal_bound_improved: + self.best_solution_found = fixed_nlp.clone() + self.best_solution_found_time = get_main_elapsed_time(self.timing) + # Add the linear cut + copy_var_list_values( + fixed_nlp.MindtPy_utils.variable_list, + self.mip.MindtPy_utils.variable_list, + config, + ) + self.add_cuts( + dual_values=dual_values, + linearize_active=True, + linearize_violated=True, + cb_opt=cb_opt, + ) + + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) + if config.add_no_good_cuts: + add_no_good_cuts( + self.mip, var_values, config, self.timing, self.mip_iter, cb_opt + ) + + config.call_after_subproblem_feasible(fixed_nlp) + + config.logger.info( + self.fixed_nlp_log_formatter.format( + '*' if self.primal_bound_improved else ' ', + self.nlp_iter if not fp else self.fp_iter, + 'Fixed NLP', + value(main_objective.expr), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + + def handle_subproblem_infeasible(self, fixed_nlp, cb_opt=None): + """Solves feasibility problem and adds cut according to the specified strategy. + + This function handles the result of the latest iteration of solving the NLP subproblem given an infeasible + solution and copies the solution of the feasibility problem to the working model. + + Parameters + ---------- + fixed_nlp : Pyomo model + Integer-variable-fixed NLP model. + cb_opt : SolverFactory, optional + The gurobi_persistent solver, by default None. + """ + # TODO try something else? Reinitialize with different initial + # value? + config = self.config + config.logger.info('NLP subproblem was locally infeasible.') + self.nlp_infeasible_counter += 1 + if config.calculate_dual_at_solution: + for c in fixed_nlp.MindtPy_utils.constraint_list: + rhs = value(c.upper) if c.has_ub() else value(c.lower) + c_geq = -1 if c.has_ub() else 1 + fixed_nlp.dual[c] = c_geq * max(0, c_geq * (rhs - value(c.body))) + dual_values = list( + fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list + ) + else: + dual_values = None + + # if config.strategy == 'PSC' or config.strategy == 'GBD': + # for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True): + # fixed_nlp.ipopt_zL_out[var] = 0 + # fixed_nlp.ipopt_zU_out[var] = 0 + # if var.has_ub() and abs(var.ub - value(var)) < config.absolute_bound_tolerance: + # fixed_nlp.ipopt_zL_out[var] = 1 + # elif var.has_lb() and abs(value(var) - var.lb) < config.absolute_bound_tolerance: + # fixed_nlp.ipopt_zU_out[var] = -1 + + config.logger.info('Solving feasibility problem') + feas_subproblem, feas_subproblem_results = self.solve_feasibility_subproblem() + # TODO: do we really need this? + if self.should_terminate: + return + copy_var_list_values( + feas_subproblem.MindtPy_utils.variable_list, + self.mip.MindtPy_utils.variable_list, + config, + ) + self.add_cuts( + dual_values=dual_values, + linearize_active=True, + linearize_violated=True, + cb_opt=cb_opt, + ) + # Add a no-good cut to exclude this discrete option + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) + if config.add_no_good_cuts: + # excludes current discrete option + add_no_good_cuts( + self.mip, var_values, config, self.timing, self.mip_iter, cb_opt + ) + + def handle_subproblem_other_termination( + self, fixed_nlp, termination_condition, cb_opt=None + ): + """Handles the result of the latest iteration of solving the fixed NLP subproblem given + a solution that is neither optimal nor infeasible. + + Parameters + ---------- + fixed_nlp : Pyomo model + Integer-variable-fixed NLP model. + termination_condition : Pyomo TerminationCondition + The termination condition of the fixed NLP subproblem. + cb_opt : SolverFactory, optional + The gurobi_persistent solver, by default None. + + Raises + ------ + ValueError + MindtPy unable to handle the NLP subproblem termination condition. + """ + if termination_condition is tc.maxIterations: + # TODO try something else? Reinitialize with different initial value? + self.config.logger.info( + 'NLP subproblem failed to converge within iteration limit.' + ) + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) + if self.config.add_no_good_cuts: + # excludes current discrete option + add_no_good_cuts( + self.mip, + var_values, + self.config, + self.timing, + self.mip_iter, + cb_opt, + ) + + else: + raise ValueError( + 'MindtPy unable to handle NLP subproblem termination ' + 'condition of {}'.format(termination_condition) + ) + + def solve_feasibility_subproblem(self): + """Solves a feasibility NLP if the fixed_nlp problem is infeasible. + + Returns + ------- + feas_subproblem : Pyomo model + Feasibility NLP from the model. + feas_soln : SolverResults + Results from solving the feasibility NLP. + """ + config = self.config + feas_subproblem = self.fixed_nlp + MindtPy = feas_subproblem.MindtPy_utils + MindtPy.feas_opt.activate() + if MindtPy.component('objective_value') is not None: + MindtPy.objective_value[:].set_value(0, skip_validation=True) + + active_obj = next( + feas_subproblem.component_data_objects(Objective, active=True) + ) + active_obj.deactivate() + for constr in MindtPy.nonlinear_constraint_list: + constr.deactivate() + + MindtPy.feas_opt.activate() + MindtPy.feas_obj.activate() + nlp_args = dict(config.nlp_solver_args) + update_solver_timelimit( + self.feasibility_nlp_opt, config.nlp_solver, self.timing, config + ) + with SuppressInfeasibleWarning(): + try: + with time_code(self.timing, 'feasibility subproblem'): + feas_soln = self.feasibility_nlp_opt.solve( + feas_subproblem, + tee=config.nlp_solver_tee, + load_solutions=config.nlp_solver != 'appsi_ipopt', + **nlp_args, + ) + if len(feas_soln.solution) > 0: + feas_subproblem.solutions.load_from(feas_soln) + except (ValueError, OverflowError) as e: + config.logger.error(e) + for nlp_var, orig_val in zip( + MindtPy.variable_list, self.initial_var_values + ): + if not nlp_var.fixed and not nlp_var.is_binary(): + nlp_var.set_value(orig_val, skip_validation=True) + with time_code(self.timing, 'feasibility subproblem'): + feas_soln = self.feasibility_nlp_opt.solve( + feas_subproblem, + tee=config.nlp_solver_tee, + load_solutions=config.nlp_solver != 'appsi_ipopt', + **nlp_args, + ) + if len(feas_soln.solution) > 0: + feas_soln.solutions.load_from(feas_soln) + self.handle_feasibility_subproblem_tc( + feas_soln.solver.termination_condition, MindtPy + ) + MindtPy.feas_opt.deactivate() + for constr in MindtPy.nonlinear_constraint_list: + constr.activate() + active_obj.activate() + MindtPy.feas_obj.deactivate() + return feas_subproblem, feas_soln + + def handle_feasibility_subproblem_tc(self, subprob_terminate_cond, MindtPy): + """Handles the result of the latest iteration of solving the feasibility NLP subproblem. + + Parameters + ---------- + subprob_terminate_cond : Pyomo TerminationCondition + The termination condition of the feasibility NLP subproblem. + MindtPy : Pyomo Block + The MindtPy_utils block. + """ + config = self.config + if subprob_terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}: + # TODO: check what is this copy_value used for? + copy_var_list_values( + MindtPy.variable_list, + self.working_model.MindtPy_utils.variable_list, + config, + ) + if value(MindtPy.feas_obj.expr) <= config.zero_tolerance: + config.logger.warning( + 'The objective value %.4E of feasibility problem is less than zero_tolerance. ' + 'This indicates that the nlp subproblem is feasible, although it is found infeasible in the previous step. ' + 'Check the nlp solver output' % value(MindtPy.feas_obj.expr) + ) + elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: + config.logger.error( + 'Feasibility subproblem infeasible. This should never happen.' + ) + self.should_terminate = True + self.results.solver.status = SolverStatus.error + elif subprob_terminate_cond is tc.maxIterations: + config.logger.error( + 'Subsolver reached its maximum number of iterations without converging, ' + 'consider increasing the iterations limit of the subsolver or reviewing your formulation.' + ) + self.should_terminate = True + self.results.solver.status = SolverStatus.error + else: + config.logger.error( + 'MindtPy unable to handle feasibility subproblem termination condition ' + 'of {}'.format(subprob_terminate_cond) + ) + self.should_terminate = True + self.results.solver.status = SolverStatus.error + + ###################################################################################################################################################### + # iterate.py + + def algorithm_should_terminate(self, check_cycling): + """Checks if the algorithm should terminate at the given point. + + This function determines whether the algorithm should terminate based on the solver options and progress. + (Sets the self.results.solver.termination_condition to the appropriate condition, i.e. optimal, + maxIterations, maxTimeLimit). + + Parameters + ---------- + check_cycling : bool + Whether to check for a special case that causes the discrete variables to loop through the same values. + + Returns + ------- + bool + True if the algorithm should terminate, False otherwise. + """ + if self.should_terminate: + # self.primal_bound_progress[0] can only be inf or -inf. + # If the current primal bound equals inf or -inf, we can infer there is no solution. + if self.primal_bound == self.primal_bound_progress[0]: + self.results.solver.termination_condition = tc.noSolution + else: + self.results.solver.termination_condition = tc.feasible + return True + return ( + self.bounds_converged() + or self.reached_iteration_limit() + or self.reached_time_limit() + or self.reached_stalling_limit() + or (check_cycling and self.iteration_cycling()) + ) + + def fix_dual_bound(self, last_iter_cuts): + """Fix the dual bound when no-good cuts or tabu list is activated. + + Parameters + ---------- + last_iter_cuts : bool + Whether the cuts in the last iteration have been added. + """ + # If no-good cuts or tabu list is activated, the dual bound is not valid for the final optimal solution. + # Therefore, we need to correct it at the end. + # In singletree implementation, the dual bound at one iteration before the optimal solution, is valid for the optimal solution. + # So we will set the dual bound to it. + config = self.config + if config.single_tree: + config.logger.info( + 'Fix the bound to the value of one iteration before optimal solution is found.' + ) + try: + self.dual_bound = self.stored_bound[self.primal_bound] + except KeyError as e: + config.logger.error( + str(e) + '\nNo stored bound found. Bound fix failed.' + ) + else: + config.logger.info( + 'Solve the main problem without the last no_good cut to fix the bound.' + 'zero_tolerance is set to 1E-4' + ) + config.zero_tolerance = 1e-4 + # Solve NLP subproblem + # The constraint linearization happens in the handlers + if not last_iter_cuts: + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) + + MindtPy = self.mip.MindtPy_utils + # deactivate the integer cuts generated after the best solution was found. + self.deactivate_no_good_cuts_when_fixing_bound(MindtPy.cuts.no_good_cuts) + if ( + config.add_regularization is not None + and MindtPy.component('mip_obj') is None + ): + MindtPy.objective_list[-1].activate() + # determine if persistent solver is called. + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(self.mip, symbolic_solver_labels=True) + mip_args = dict(config.mip_solver_args) + update_solver_timelimit( + self.mip_opt, config.mip_solver, self.timing, config + ) + main_mip_results = self.mip_opt.solve( + self.mip, tee=config.mip_solver_tee, load_solutions=False, **mip_args + ) + if len(main_mip_results.solution) > 0: + self.mip.solutions.load_from(main_mip_results) + + if main_mip_results.solver.termination_condition is tc.infeasible: + config.logger.info( + 'Bound fix failed. The bound fix problem is infeasible' + ) + else: + self.update_suboptimal_dual_bound(main_mip_results) + config.logger.info( + 'Fixed bound values: Primal Bound: {} Dual Bound: {}'.format( + self.primal_bound, self.dual_bound + ) + ) + # Check bound convergence + if ( + abs(self.primal_bound - self.dual_bound) + <= config.absolute_bound_tolerance + ): + self.results.solver.termination_condition = tc.optimal + + def set_up_tabulist_callback(self): + """Sets up the tabulist using IncumbentCallback. + Currently only support CPLEX. + """ + tabulist = self.mip_opt._solver_model.register_callback( + tabu_list.IncumbentCallback_cplex + ) + tabulist.opt = self.mip_opt + tabulist.config = self.config + tabulist.mindtpy_solver = self + self.mip_opt.options['preprocessing_reduce'] = 1 + # If the callback is used to reject incumbents, the user must set the + # parameter c.parameters.preprocessing.reduce either to the value 1 (one) + # to restrict presolve to primal reductions only or to 0 (zero) to disable all presolve reductions + self.mip_opt._solver_model.set_warning_stream(None) + self.mip_opt._solver_model.set_log_stream(None) + self.mip_opt._solver_model.set_error_stream(None) + + def set_up_lazy_OA_callback(self): + """Sets up the lazy OA using LazyConstraintCallback. + Currently only support CPLEX and Gurobi. + """ + if self.config.mip_solver == 'cplex_persistent': + lazyoa = self.mip_opt._solver_model.register_callback( + single_tree.LazyOACallback_cplex + ) + # pass necessary data and parameters to lazyoa + lazyoa.main_mip = self.mip + lazyoa.config = self.config + lazyoa.opt = self.mip_opt + lazyoa.mindtpy_solver = self + self.mip_opt._solver_model.set_warning_stream(None) + self.mip_opt._solver_model.set_log_stream(None) + self.mip_opt._solver_model.set_error_stream(None) + if self.config.mip_solver == 'gurobi_persistent': + self.mip_opt.set_callback(single_tree.LazyOACallback_gurobi) + self.mip_opt.mindtpy_solver = self + self.mip_opt.config = self.config + + ########################################################################################################################################## + # mip_solve.py + + def solve_main(self): + """This function solves the MIP main problem. + + Returns + ------- + self.mip : Pyomo model + The MIP stored in self. + main_mip_results : SolverResults + Results from solving the main MIP. + """ + config = self.config + self.mip_iter += 1 + + # setup main problem + self.setup_main() + mip_args = self.set_up_mip_solver() + + try: + main_mip_results = self.mip_opt.solve( + self.mip, tee=config.mip_solver_tee, load_solutions=False, **mip_args + ) + # update_attributes should be before load_from(main_mip_results), since load_from(main_mip_results) may fail. + if len(main_mip_results.solution) > 0: + self.mip.solutions.load_from(main_mip_results) + except (ValueError, AttributeError, RuntimeError) as e: + config.logger.error(e) + if config.single_tree: + config.logger.warning('Single tree terminate.') + if get_main_elapsed_time(self.timing) >= config.time_limit: + config.logger.warning('due to the timelimit.') + self.results.solver.termination_condition = tc.maxTimeLimit + if config.strategy == 'GOA' or config.add_no_good_cuts: + config.logger.warning( + 'Error: Cannot load a SolverResults object with bad status: error. ' + 'MIP solver failed. This usually happens in the single-tree GOA algorithm. ' + "No-good cuts are added and GOA algorithm doesn't converge within the time limit. " + 'No integer solution is found, so the CPLEX solver will report an error status. ' + ) + return None, None + if config.solution_pool: + main_mip_results._solver_model = self.mip_opt._solver_model + main_mip_results._pyomo_var_to_solver_var_map = ( + self.mip_opt._pyomo_var_to_solver_var_map + ) + if main_mip_results.solver.termination_condition is tc.optimal: + if config.single_tree and not config.add_no_good_cuts: + self.update_suboptimal_dual_bound(main_mip_results) + elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: + # Linear solvers will sometimes tell me that it's infeasible or + # unbounded during presolve, but fails to distinguish. We need to + # resolve with a solver option flag on. + main_mip_results, _ = distinguish_mip_infeasible_or_unbounded( + self.mip, config + ) + return self.mip, main_mip_results + + def solve_fp_main(self): + """This function solves the MIP main problem. + + Returns + ------- + self.mip : Pyomo model + The MIP stored in self. + main_mip_results : SolverResults + Results from solving the main MIP. + """ + # setup main problem + config = self.config + self.setup_fp_main() + mip_args = self.set_up_mip_solver() + + main_mip_results = self.mip_opt.solve( + self.mip, tee=config.mip_solver_tee, load_solutions=False, **mip_args + ) + # update_attributes should be before load_from(main_mip_results), since load_from(main_mip_results) may fail. + # if config.single_tree or config.use_tabu_list: + # self.update_attributes() + if len(main_mip_results.solution) > 0: + self.mip.solutions.load_from(main_mip_results) + if main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: + # Linear solvers will sometimes tell me that it's infeasible or + # unbounded during presolve, but fails to distinguish. We need to + # resolve with a solver option flag on. + main_mip_results, _ = distinguish_mip_infeasible_or_unbounded( + self.mip, config + ) + + return self.mip, main_mip_results + + def solve_regularization_main(self): + """This function solves the MIP main problem. + + Returns + ------- + self.mip : Pyomo model + The MIP stored in self. + main_mip_results : SolverResults + Results from solving the main MIP. + """ + config = self.config + # setup main problem + self.setup_regularization_main() + + if isinstance(self.regularization_mip_opt, PersistentSolver): + self.regularization_mip_opt.set_instance(self.mip) + update_solver_timelimit( + self.regularization_mip_opt, + config.mip_regularization_solver, + self.timing, + config, + ) + main_mip_results = self.regularization_mip_opt.solve( + self.mip, + tee=config.mip_solver_tee, + load_solutions=False, + **dict(config.mip_solver_args), + ) + if len(main_mip_results.solution) > 0: + self.mip.solutions.load_from(main_mip_results) + if main_mip_results.solver.termination_condition is tc.optimal: + config.logger.info( + self.log_formatter.format( + self.mip_iter, + 'Reg ' + self.regularization_mip_type, + value(self.mip.MindtPy_utils.roa_proj_mip_obj), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + + elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: + # Linear solvers will sometimes tell me that it's infeasible or + # unbounded during presolve, but fails to distinguish. We need to + # resolve with a solver option flag on. + main_mip_results, _ = distinguish_mip_infeasible_or_unbounded( + self.mip, config + ) + + self.mip.MindtPy_utils.objective_constr.deactivate() + self.mip.MindtPy_utils.del_component('roa_proj_mip_obj') + self.mip.MindtPy_utils.cuts.del_component('obj_reg_estimate') + if config.add_regularization == 'level_L1': + self.mip.MindtPy_utils.del_component('L1_obj') + elif config.add_regularization == 'level_L_infinity': + self.mip.MindtPy_utils.del_component('L_infinity_obj') + + return self.mip, main_mip_results + + def set_up_mip_solver(self): + """Set up the MIP solver. + + Returns + ------- + mainopt : SolverFactory + The customized MIP solver. + """ + # determine if persistent solver is called. + config = self.config + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(self.mip, symbolic_solver_labels=True) + if config.single_tree: + self.set_up_lazy_OA_callback() + if config.use_tabu_list: + self.set_up_tabulist_callback() + mip_args = dict(config.mip_solver_args) + if config.mip_solver in { + 'cplex', + 'cplex_persistent', + 'gurobi', + 'gurobi_persistent', + }: + mip_args['warmstart'] = True + return mip_args + + # The following functions deal with handling the solution we get from the above MIP solver function + + def handle_main_optimal(self, main_mip, update_bound=True): + """This function copies the results from 'solve_main' to the working model and updates + the upper/lower bound. This function is called after an optimal solution is found for + the main problem. + + Parameters + ---------- + main_mip : Pyomo model + The MIP main problem. + update_bound : bool, optional + Whether to update the bound, by default True. + Bound will not be updated when handling regularization problem. + """ + # proceed. Just need integer values + MindtPy = main_mip.MindtPy_utils + # check if the value of binary variable is valid + for var in MindtPy.discrete_variable_list: + if var.value is None: + self.config.logger.warning( + f"Integer variable {var.name} not initialized. " + "Setting it to its lower bound" + ) + # nlp_var.bounds[0] + var.set_value(var.lb, skip_validation=True) + # warm start for the nlp subproblem + copy_var_list_values( + main_mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + self.config, + skip_fixed=False, + ) + + if update_bound: + self.update_dual_bound(value(MindtPy.mip_obj.expr)) + self.config.logger.info( + self.log_formatter.format( + self.mip_iter, + 'MILP', + value(MindtPy.mip_obj.expr), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + + def handle_main_infeasible(self): + """This function handles the result of the latest iteration of solving + the MIP problem given an infeasible solution. + """ + self.config.logger.info( + 'MIP main problem is infeasible. ' + 'Problem may have no more feasible ' + 'binary configurations.' + ) + if self.mip_iter == 1: + self.config.logger.warning( + 'MindtPy initialization may have generated poor quality cuts.' + ) + # TODO no-good cuts for single tree case + # set optimistic bound to infinity + self.config.logger.info( + 'MindtPy exiting due to MILP main problem infeasibility.' + ) + if self.results.solver.termination_condition is None: + if ( + self.primal_bound == float('inf') and self.objective_sense == minimize + ) or ( + self.primal_bound == float('-inf') and self.objective_sense == maximize + ): + # if self.mip_iter == 0: + self.results.solver.termination_condition = tc.infeasible + else: + self.results.solver.termination_condition = tc.feasible + + def handle_main_max_timelimit(self, main_mip, main_mip_results): + """This function handles the result of the latest iteration of solving the MIP problem + given that solving the MIP takes too long. + + Parameters + ---------- + main_mip : Pyomo model + The MIP main problem. + main_mip_results : [type] + Results from solving the MIP main subproblem. + """ + # If we have found a valid feasible solution, we take that. If not, we can at least use the dual bound. + MindtPy = main_mip.MindtPy_utils + self.config.logger.info( + 'Unable to optimize MILP main problem ' + 'within time limit. ' + 'Using current solver feasible solution.' + ) + copy_var_list_values( + main_mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + self.config, + skip_fixed=False, + ) + self.update_suboptimal_dual_bound(main_mip_results) + self.config.logger.info( + self.log_formatter.format( + self.mip_iter, + 'MILP', + value(MindtPy.mip_obj.expr), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + + def handle_main_unbounded(self, main_mip): + """This function handles the result of the latest iteration of solving the MIP + problem given an unbounded solution due to the relaxation. + + Parameters + ---------- + main_mip : Pyomo model + The MIP main problem. + + Returns + ------- + main_mip_results : SolverResults + The results of the bounded main problem. + """ + # Solution is unbounded. Add an arbitrary bound to the objective and resolve. + # This occurs when the objective is nonlinear. The nonlinear objective is moved + # to the constraints, and deactivated for the linear main problem. + config = self.config + MindtPy = main_mip.MindtPy_utils + config.logger.warning( + 'main MILP was unbounded. ' + 'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) on the objective. ' + 'You can change this bound with the option obj_bound.'.format( + config.obj_bound + ) + ) + MindtPy.objective_bound = Constraint( + expr=(-config.obj_bound, MindtPy.mip_obj.expr, config.obj_bound) + ) + if isinstance(self.mip_opt, PersistentSolver): + self.mip_opt.set_instance(main_mip) + update_solver_timelimit(self.mip_opt, config.mip_solver, self.timing, config) + with SuppressInfeasibleWarning(): + main_mip_results = self.mip_opt.solve( + main_mip, + tee=config.mip_solver_tee, + load_solutions=False, + **config.mip_solver_args, + ) + if len(main_mip_results.solution) > 0: + self.mip.solutions.load_from(main_mip_results) + return main_mip_results + + def handle_regularization_main_tc(self, main_mip, main_mip_results): + """Handles the result of the regularization main problem. + + Parameters + ---------- + main_mip : Pyomo model + The MIP main problem. + main_mip_results : SolverResults + Results from solving the regularization main subproblem. + + Raises + ------ + ValueError + MindtPy unable to handle the regularization problem termination condition. + """ + if main_mip_results is None: + self.config.logger.info( + 'Failed to solve the regularization problem.' + 'The solution of the OA main problem will be adopted.' + ) + elif main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}: + self.handle_main_optimal(main_mip, update_bound=False) + elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: + self.config.logger.info( + 'Regularization problem failed to converge within the time limit.' + ) + self.results.solver.termination_condition = tc.maxTimeLimit + # break + elif main_mip_results.solver.termination_condition is tc.infeasible: + self.config.logger.info('Regularization problem infeasible.') + elif main_mip_results.solver.termination_condition is tc.unbounded: + self.config.logger.info( + 'Regularization problem unbounded.' + 'Sometimes solving MIQCP in CPLEX, unbounded means infeasible.' + ) + elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: + self.config.logger.info( + 'Regularization problem is infeasible or unbounded.' + 'It might happen when using CPLEX to solve MIQP.' + ) + elif main_mip_results.solver.termination_condition is tc.unknown: + self.config.logger.info( + 'Termination condition of the regularization problem is unknown.' + ) + if main_mip_results.problem.lower_bound != float('-inf'): + self.config.logger.info('Solution limit has been reached.') + self.handle_main_optimal(main_mip, update_bound=False) + else: + self.config.logger.info( + 'No solution obtained from the regularization subproblem.' + 'Please set mip_solver_tee to True for more information.' + 'The solution of the OA main problem will be adopted.' + ) + else: + raise ValueError( + 'MindtPy unable to handle regularization problem termination condition ' + 'of %s. Solver message: %s' + % ( + main_mip_results.solver.termination_condition, + main_mip_results.solver.message, + ) + ) + + def setup_main(self): + """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods.""" + config = self.config + MindtPy = self.mip.MindtPy_utils + + for c in MindtPy.constraint_list: + if c.body.polynomial_degree() not in self.mip_constraint_polynomial_degree: + c.deactivate() + + MindtPy.cuts.activate() + + sign_adjust = 1 if self.objective_sense == minimize else -1 + MindtPy.del_component('mip_obj') + if config.add_regularization is not None and config.add_no_good_cuts: + MindtPy.cuts.no_good_cuts.deactivate() + + if config.add_slack: + MindtPy.del_component('aug_penalty_expr') + + MindtPy.aug_penalty_expr = Expression( + expr=sign_adjust + * config.OA_penalty_factor + * sum(v for v in MindtPy.cuts.slack_vars.values()) + ) + main_objective = MindtPy.objective_list[-1] + MindtPy.mip_obj = Objective( + expr=main_objective.expr + + (MindtPy.aug_penalty_expr if config.add_slack else 0), + sense=self.objective_sense, + ) + + if config.use_dual_bound: + # Delete previously added dual bound constraint + MindtPy.cuts.del_component('dual_bound') + if self.dual_bound not in {float('inf'), float('-inf')}: + if self.objective_sense == minimize: + MindtPy.cuts.dual_bound = Constraint( + expr=main_objective.expr + + (MindtPy.aug_penalty_expr if config.add_slack else 0) + >= self.dual_bound, + doc='Objective function expression should improve on the best found dual bound', + ) + else: + MindtPy.cuts.dual_bound = Constraint( + expr=main_objective.expr + + (MindtPy.aug_penalty_expr if config.add_slack else 0) + <= self.dual_bound, + doc='Objective function expression should improve on the best found dual bound', + ) + + def setup_fp_main(self): + """Set up main problem for Feasibility Pump method.""" + MindtPy = self.mip.MindtPy_utils + + for c in MindtPy.constraint_list: + if c.body.polynomial_degree() not in self.mip_constraint_polynomial_degree: + c.deactivate() + + MindtPy.cuts.activate() + MindtPy.del_component('mip_obj') + MindtPy.del_component('fp_mip_obj') + if self.config.fp_main_norm == 'L1': + MindtPy.fp_mip_obj = generate_norm1_objective_function( + self.mip, self.working_model, discrete_only=self.config.fp_discrete_only + ) + elif self.config.fp_main_norm == 'L2': + MindtPy.fp_mip_obj = generate_norm2sq_objective_function( + self.mip, self.working_model, discrete_only=self.config.fp_discrete_only + ) + elif self.config.fp_main_norm == 'L_infinity': + MindtPy.fp_mip_obj = generate_norm_inf_objective_function( + self.mip, self.working_model, discrete_only=self.config.fp_discrete_only + ) + + def setup_regularization_main(self): + """Set up main regularization problem for ROA method.""" + config = self.config + MindtPy = self.mip.MindtPy_utils + + for c in MindtPy.constraint_list: + if c.body.polynomial_degree() not in self.mip_constraint_polynomial_degree: + c.deactivate() + + MindtPy.cuts.activate() + + sign_adjust = 1 if self.objective_sense == minimize else -1 + MindtPy.del_component('mip_obj') + if config.single_tree: + MindtPy.del_component('roa_proj_mip_obj') + MindtPy.cuts.del_component('obj_reg_estimate') + if config.add_regularization is not None and config.add_no_good_cuts: + MindtPy.cuts.no_good_cuts.activate() + + # The epigraph constraint is very "flat" for branching rules. + # In ROA, if the objective function is linear(or quadratic when quadratic_strategy = 1 or 2), the original objective function is used in the MIP problem. + # In the MIP projection problem, we need to reactivate the epigraph constraint(objective_constr). + if ( + MindtPy.objective_list[0].expr.polynomial_degree() + in self.mip_objective_polynomial_degree + ): + MindtPy.objective_constr.activate() + if config.add_regularization == 'level_L1': + MindtPy.roa_proj_mip_obj = generate_norm1_objective_function( + self.mip, self.best_solution_found, discrete_only=False + ) + elif config.add_regularization == 'level_L2': + MindtPy.roa_proj_mip_obj = generate_norm2sq_objective_function( + self.mip, self.best_solution_found, discrete_only=False + ) + elif config.add_regularization == 'level_L_infinity': + MindtPy.roa_proj_mip_obj = generate_norm_inf_objective_function( + self.mip, self.best_solution_found, discrete_only=False + ) + elif config.add_regularization in { + 'grad_lag', + 'hess_lag', + 'hess_only_lag', + 'sqp_lag', + }: + MindtPy.roa_proj_mip_obj = generate_lag_objective_function( + self.mip, + self.best_solution_found, + config, + self.timing, + discrete_only=False, + ) + if self.objective_sense == minimize: + MindtPy.cuts.obj_reg_estimate = Constraint( + expr=sum(MindtPy.objective_value[:]) + <= (1 - config.level_coef) * self.primal_bound + + config.level_coef * self.dual_bound + ) + else: + MindtPy.cuts.obj_reg_estimate = Constraint( + expr=sum(MindtPy.objective_value[:]) + >= (1 - config.level_coef) * self.primal_bound + + config.level_coef * self.dual_bound + ) + + def update_result(self): + if self.objective_sense == minimize: + self.results.problem.lower_bound = self.dual_bound + self.results.problem.upper_bound = self.primal_bound + else: + self.results.problem.lower_bound = self.primal_bound + self.results.problem.upper_bound = self.dual_bound + + self.results.solver.timing = self.timing + self.results.solver.user_time = self.timing.total + self.results.solver.wallclock_time = self.timing.total + self.results.solver.iterations = self.mip_iter + self.results.solver.num_infeasible_nlp_subproblem = self.nlp_infeasible_counter + self.results.solver.best_solution_found_time = self.best_solution_found_time + self.results.solver.primal_integral = self.primal_integral + self.results.solver.dual_integral = self.dual_integral + self.results.solver.primal_dual_gap_integral = self.primal_dual_gap_integral + + def load_solution(self): + # Update values in original model + config = self.config + MindtPy = self.working_model.MindtPy_utils + copy_var_list_values( + from_list=self.best_solution_found.MindtPy_utils.variable_list, + to_list=MindtPy.variable_list, + config=config, + ) + # The original does not have variable list. + # Use get_vars_from_components() should be used for both working_model and original_model to exclude the unused variables. + self.working_model.MindtPy_utils.deactivate() + # The original objective should be activated to make sure the variable list is in the same order (get_vars_from_components). + self.working_model.MindtPy_utils.objective_list[0].activate() + if self.working_model.component("_int_to_binary_reform") is not None: + self.working_model._int_to_binary_reform.deactivate() + # exclude fixed variables here. This is consistent with the definition of variable_list. + working_model_variable_list = list( + get_vars_from_components( + block=self.working_model, + ctype=(Constraint, Objective), + include_fixed=False, + active=True, + sort=True, + descend_into=True, + descent_order=None, + ) + ) + original_model_variable_list = list( + get_vars_from_components( + block=self.original_model, + ctype=(Constraint, Objective), + include_fixed=False, + active=True, + sort=True, + descend_into=True, + descent_order=None, + ) + ) + for v_from, v_to in zip( + working_model_variable_list, original_model_variable_list + ): + if v_from.name != v_to.name: + raise DeveloperError( + 'The name of the two variables is not the same. Loading final solution' + ) + copy_var_list_values( + working_model_variable_list, original_model_variable_list, config=config + ) + + def check_subsolver_validity(self): + """Check if the subsolvers are available and licensed.""" + if not self.mip_opt.available(): + raise ValueError(self.config.mip_solver + ' is not available.') + if not self.mip_opt.license_is_valid(): + raise ValueError(self.config.mip_solver + ' is not licensed.') + if not self.nlp_opt.available(): + raise ValueError(self.config.nlp_solver + ' is not available.') + if not self.nlp_opt.license_is_valid(): + raise ValueError(self.config.nlp_solver + ' is not licensed.') + if self.config.add_regularization is not None: + if not self.regularization_mip_opt.available(): + raise ValueError( + self.config.mip_regularization_solver + ' is not available.' + ) + if not self.regularization_mip_opt.license_is_valid(): + raise ValueError( + self.config.mip_regularization_solver + ' is not licensed.' + ) + + def check_config(self): + """Checks if the configuration options make sense.""" + config = self.config + # configuration confirmation + if config.init_strategy == 'FP': + config.add_no_good_cuts = True + config.use_tabu_list = False + + if config.nlp_solver == 'baron': + config.equality_relaxation = False + if config.nlp_solver == 'gams' and config.nlp_solver.__contains__('solver'): + if config.nlp_solver_args['solver'] == 'baron': + config.equality_relaxation = False + + if config.solver_tee: + config.mip_solver_tee = True + config.nlp_solver_tee = True + if config.add_no_good_cuts: + config.integer_to_binary = True + if config.use_tabu_list: + config.mip_solver = 'cplex_persistent' + if config.threads > 1: + config.threads = 1 + config.logger.info( + 'The threads parameter is corrected to 1 since incumbent callback conflicts with multi-threads mode.' + ) + if config.solution_pool: + if config.mip_solver not in {'cplex_persistent', 'gurobi_persistent'}: + if config.mip_solver in {'appsi_cplex', 'appsi_gurobi'}: + config.logger.info("Solution pool does not support APPSI solver.") + config.mip_solver = 'cplex_persistent' + + ################################################################################################################################ + # Feasibility Pump + + def solve_fp_subproblem(self): + """Solves the feasibility pump NLP subproblem. + + This function sets up the 'fp_nlp' by relax integer variables. + precomputes dual values, deactivates trivial constraints, and then solves NLP model. + + Returns + ------- + fp_nlp : Pyomo model + Fixed-NLP from the model. + results : SolverResults + Results from solving the fixed-NLP subproblem. + """ + fp_nlp = self.working_model.clone() + MindtPy = fp_nlp.MindtPy_utils + config = self.config + + # Set up NLP + fp_nlp.MindtPy_utils.objective_list[-1].deactivate() + if self.objective_sense == minimize: + fp_nlp.improving_objective_cut = Constraint( + expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) <= self.primal_bound + ) + else: + fp_nlp.improving_objective_cut = Constraint( + expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) >= self.primal_bound + ) + + # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations + # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' https://doi.org/10.1007/s10107-012-0608-x + # the norm type is consistent with the norm obj of the FP-main problem. + if config.fp_norm_constraint: + generate_norm_constraint(fp_nlp, self.mip, config) + + MindtPy.fp_nlp_obj = generate_norm2sq_objective_function( + fp_nlp, self.mip, discrete_only=config.fp_discrete_only + ) + + MindtPy.cuts.deactivate() + TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp) + try: + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( + fp_nlp, + tmp=True, + ignore_infeasible=False, + tolerance=config.constraint_tolerance, + ) + except InfeasibleConstraintException as e: + config.logger.error( + str(e) + '\nInfeasibility detected in deactivate_trivial_constraints.' + ) + results = SolverResults() + results.solver.termination_condition = tc.infeasible + return fp_nlp, results + # Solve the NLP + nlp_args = dict(config.nlp_solver_args) + update_solver_timelimit(self.nlp_opt, config.nlp_solver, self.timing, config) + with SuppressInfeasibleWarning(): + with time_code(self.timing, 'fp subproblem'): + results = self.nlp_opt.solve( + fp_nlp, tee=config.nlp_solver_tee, load_solutions=False, **nlp_args + ) + if len(results.solution) > 0: + fp_nlp.solutions.load_from(results) + return fp_nlp, results + + def handle_fp_subproblem_optimal(self, fp_nlp): + """Copies the solution to the working model, updates bound, adds OA cuts / no-good cuts / + increasing objective cut, calculates the duals and stores incumbent solution if it has been improved. + + Parameters + ---------- + fp_nlp : Pyomo model + The feasibility pump NLP subproblem. + """ + copy_var_list_values( + fp_nlp.MindtPy_utils.variable_list, + self.working_model.MindtPy_utils.variable_list, + self.config, + ) + add_orthogonality_cuts(self.working_model, self.mip, self.config) + + # if OA-like or fp converged, update Upper bound, + # add no_good cuts and increasing objective cuts (fp) + if fp_converged( + self.working_model, + self.mip, + proj_zero_tolerance=self.config.fp_projzerotol, + discrete_only=self.config.fp_discrete_only, + ): + copy_var_list_values( + self.mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + self.config, + skip_fixed=False, + ) + fixed_nlp, fixed_nlp_results = self.solve_subproblem() + if fixed_nlp_results.solver.termination_condition in { + tc.optimal, + tc.locallyOptimal, + tc.feasible, + }: + self.handle_subproblem_optimal(fixed_nlp) + if self.primal_bound_improved: + self.mip.MindtPy_utils.cuts.del_component('improving_objective_cut') + if self.objective_sense == minimize: + self.mip.MindtPy_utils.cuts.improving_objective_cut = ( + Constraint( + expr=sum(self.mip.MindtPy_utils.objective_value[:]) + <= self.primal_bound + - self.config.fp_cutoffdecr + * max(1, abs(self.primal_bound)) + ) + ) + else: + self.mip.MindtPy_utils.cuts.improving_objective_cut = ( + Constraint( + expr=sum(self.mip.MindtPy_utils.objective_value[:]) + >= self.primal_bound + + self.config.fp_cutoffdecr + * max(1, abs(self.primal_bound)) + ) + ) + else: + self.config.logger.error( + 'Feasibility pump Fixed-NLP is infeasible, something might be wrong. ' + 'There might be a problem with the precisions - the feasibility pump seems to have converged' + ) + + def handle_fp_main_tc(self, fp_main_results): + """Handle the termination condition of the feasibility pump main problem. + + Parameters + ---------- + fp_main_results : SolverResults + The results from solving the FP main problem. + + Returns + ------- + bool + True if FP loop should terminate, False otherwise. + """ + if fp_main_results.solver.termination_condition is tc.optimal: + self.config.logger.info( + self.log_formatter.format( + self.fp_iter, + 'FP-MIP', + value(self.mip.MindtPy_utils.fp_mip_obj), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + return False + elif fp_main_results.solver.termination_condition is tc.maxTimeLimit: + self.config.logger.warning('FP-MIP reaches max TimeLimit') + self.results.solver.termination_condition = tc.maxTimeLimit + return True + elif fp_main_results.solver.termination_condition is tc.infeasible: + self.config.logger.warning('FP-MIP infeasible') + no_good_cuts = self.mip.MindtPy_utils.cuts.no_good_cuts + if no_good_cuts.__len__() > 0: + no_good_cuts[no_good_cuts.__len__()].deactivate() + return True + elif fp_main_results.solver.termination_condition is tc.unbounded: + self.config.logger.warning('FP-MIP unbounded') + return True + elif ( + fp_main_results.solver.termination_condition is tc.other + and fp_main_results.solution.status is SolutionStatus.feasible + ): + self.config.logger.warning( + 'MILP solver reported feasible solution of FP-MIP, ' + 'but not guaranteed to be optimal.' + ) + return False + else: + self.config.logger.warning('Unexpected result of FP-MIP') + return True + + def fp_loop(self): + """Feasibility pump loop. + + This is the outermost function for the Feasibility Pump algorithm in this package; this function + controls the progress of solving the model. + + Raises + ------ + ValueError + MindtPy unable to handle the termination condition of the FP-NLP subproblem. + """ + config = self.config + while self.fp_iter < config.fp_iteration_limit: + # solve MIP main problem + with time_code(self.timing, 'fp main'): + fp_main, fp_main_results = self.solve_fp_main() + fp_should_terminate = self.handle_fp_main_tc(fp_main_results) + if fp_should_terminate: + break + + # Solve NLP subproblem + # The constraint linearization happens in the handlers + fp_nlp, fp_nlp_result = self.solve_fp_subproblem() + + if fp_nlp_result.solver.termination_condition in { + tc.optimal, + tc.locallyOptimal, + tc.feasible, + }: + config.logger.info( + self.log_formatter.format( + self.fp_iter, + 'FP-NLP', + value(fp_nlp.MindtPy_utils.fp_nlp_obj), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + self.handle_fp_subproblem_optimal(fp_nlp) + elif fp_nlp_result.solver.termination_condition in { + tc.infeasible, + tc.noSolution, + }: + config.logger.error('Feasibility pump NLP subproblem infeasible') + self.should_terminate = True + self.results.solver.status = SolverStatus.error + return + elif fp_nlp_result.solver.termination_condition is tc.maxIterations: + config.logger.error( + 'Feasibility pump NLP subproblem failed to converge within iteration limit.' + ) + self.should_terminate = True + self.results.solver.status = SolverStatus.error + return + else: + raise ValueError( + 'MindtPy unable to handle NLP subproblem termination ' + 'condition of {}'.format(fp_nlp_result.solver.termination_condition) + ) + # Call the NLP post-solve callback + config.call_after_subproblem_solve(fp_nlp) + self.fp_iter += 1 + self.mip.MindtPy_utils.del_component('fp_mip_obj') + + if config.fp_main_norm == 'L1': + self.mip.MindtPy_utils.del_component('L1_obj') + elif config.fp_main_norm == 'L_infinity': + self.mip.MindtPy_utils.del_component('L_infinity_obj') + + # deactivate the improving_objective_cut + self.mip.MindtPy_utils.cuts.del_component('improving_objective_cut') + if not config.fp_transfercuts: + for c in self.mip.MindtPy_utils.cuts.oa_cuts: + c.deactivate() + for c in self.mip.MindtPy_utils.cuts.no_good_cuts: + c.deactivate() + if config.fp_projcuts: + self.working_model.MindtPy_utils.cuts.del_component('fp_orthogonality_cuts') + + def initialize_mip_problem(self): + '''Deactivate the nonlinear constraints to create the MIP problem.''' + # if single tree is activated, we need to add bounds for unbounded variables in nonlinear constraints to avoid unbounded main problem. + config = self.config + if config.single_tree: + add_var_bound(self.working_model, config) + + self.mip = self.working_model.clone() + next(self.mip.component_data_objects(Objective, active=True)).deactivate() + if hasattr(self.mip, 'dual') and isinstance(self.mip.dual, Suffix): + self.mip.del_component('dual') + # Deactivate extraneous IMPORT/EXPORT suffixes + if config.nlp_solver in {'ipopt', 'cyipopt'}: + getattr(self.mip, 'ipopt_zL_out', _DoNothing()).deactivate() + getattr(self.mip, 'ipopt_zU_out', _DoNothing()).deactivate() + + MindtPy = self.mip.MindtPy_utils + + if config.init_strategy == 'FP': + MindtPy.cuts.fp_orthogonality_cuts = ConstraintList( + doc='Orthogonality cuts in feasibility pump' + ) + if config.fp_projcuts: + self.working_model.MindtPy_utils.cuts.fp_orthogonality_cuts = ( + ConstraintList(doc='Orthogonality cuts in feasibility pump') + ) + + self.fixed_nlp = self.working_model.clone() + TransformationFactory('core.fix_integer_vars').apply_to(self.fixed_nlp) + initialize_feas_subproblem(self.fixed_nlp, config) + + def initialize_subsolvers(self): + """Initialize and set options for MIP and NLP subsolvers.""" + config = self.config + if config.mip_solver == 'gurobi_persistent' and config.single_tree: + self.mip_opt = GurobiPersistent4MindtPy() + else: + self.mip_opt = SolverFactory(config.mip_solver) + self.nlp_opt = SolverFactory(config.nlp_solver) + self.feasibility_nlp_opt = SolverFactory(config.nlp_solver) + if config.mip_regularization_solver is not None: + self.regularization_mip_opt = SolverFactory( + config.mip_regularization_solver + ) + + self.check_subsolver_validity() + if config.mip_solver == 'gams': + self.mip_opt.options['add_options'] = [] + if config.nlp_solver == 'gams': + self.nlp_opt.options['add_options'] = [] + self.feasibility_nlp_opt.options['add_options'] = [] + set_solver_mipgap(self.mip_opt, config.mip_solver, config) + + set_solver_constraint_violation_tolerance( + self.nlp_opt, config.nlp_solver, config + ) + set_solver_constraint_violation_tolerance( + self.feasibility_nlp_opt, config.nlp_solver, config + ) + + self.set_appsi_solver_update_config() + + if config.mip_solver == 'gurobi_persistent' and config.single_tree: + # PreCrush: Controls presolve reductions that affect user cuts + # You should consider setting this parameter to 1 if you are using callbacks to add your own cuts. + self.mip_opt.options['PreCrush'] = 1 + self.mip_opt.options['LazyConstraints'] = 1 + + # set threads + if config.threads > 0: + self.mip_opt.options['threads'] = config.threads + # regularization solver + if config.mip_regularization_solver is not None: + set_solver_mipgap( + self.regularization_mip_opt, config.mip_regularization_solver, config + ) + if config.mip_regularization_solver == 'gams': + self.regularization_mip_opt.options['add_options'] = [] + if config.regularization_mip_threads > 0: + self.regularization_mip_opt.options[ + 'threads' + ] = config.regularization_mip_threads + else: + self.regularization_mip_opt.options['threads'] = config.threads + + if config.mip_regularization_solver in { + 'cplex', + 'appsi_cplex', + 'cplex_persistent', + }: + if config.solution_limit is not None: + self.regularization_mip_opt.options[ + 'mip_limits_solutions' + ] = config.solution_limit + # We don't need to solve the regularization problem to optimality. + # We will choose to perform aggressive node probing during presolve. + self.regularization_mip_opt.options['mip_strategy_presolvenode'] = 3 + # When using ROA method to solve convex MINLPs, the Hessian of the Lagrangean is always positive semidefinite, + # and the regularization subproblems are always convex. + # However, due to numerical accuracy, the regularization problem ended up nonconvex for a few cases, + # e.g., the smallest eigenvalue of the Hessian was slightly negative. + # Therefore, we set the optimalitytarget parameter to 3 to enable CPLEX to solve nonconvex MIQPs in the ROA-L2 and ROA-∇2L methods. + if config.add_regularization in {'hess_lag', 'hess_only_lag'}: + self.regularization_mip_opt.options['optimalitytarget'] = 3 + elif config.mip_regularization_solver == 'gurobi': + if config.solution_limit is not None: + self.regularization_mip_opt.options[ + 'SolutionLimit' + ] = config.solution_limit + # Same reason as mip_strategy_presolvenode. + self.regularization_mip_opt.options['Presolve'] = 2 + + def set_appsi_solver_update_config(self): + """Set update config for APPSI solvers.""" + config = self.config + if config.mip_solver in {'appsi_cplex', 'appsi_gurobi', 'appsi_highs'}: + # mip main problem + self.mip_opt.update_config.check_for_new_or_removed_constraints = True + self.mip_opt.update_config.check_for_new_or_removed_vars = True + self.mip_opt.update_config.check_for_new_or_removed_params = False + self.mip_opt.update_config.check_for_new_objective = True + self.mip_opt.update_config.update_constraints = True + self.mip_opt.update_config.update_vars = True + self.mip_opt.update_config.update_params = False + self.mip_opt.update_config.update_named_expressions = False + self.mip_opt.update_config.update_objective = False + self.mip_opt.update_config.treat_fixed_vars_as_params = True + + if config.nlp_solver == 'appsi_ipopt': + # fixed-nlp + self.nlp_opt.update_config.check_for_new_or_removed_constraints = False + self.nlp_opt.update_config.check_for_new_or_removed_vars = False + self.nlp_opt.update_config.check_for_new_or_removed_params = False + self.nlp_opt.update_config.check_for_new_objective = False + self.nlp_opt.update_config.update_constraints = True + self.nlp_opt.update_config.update_vars = True + self.nlp_opt.update_config.update_params = False + self.nlp_opt.update_config.update_named_expressions = False + self.nlp_opt.update_config.update_objective = False + self.nlp_opt.update_config.treat_fixed_vars_as_params = False + + self.feasibility_nlp_opt.update_config.check_for_new_or_removed_constraints = ( + False + ) + self.feasibility_nlp_opt.update_config.check_for_new_or_removed_vars = False + self.feasibility_nlp_opt.update_config.check_for_new_or_removed_params = ( + False + ) + self.feasibility_nlp_opt.update_config.check_for_new_objective = False + self.feasibility_nlp_opt.update_config.update_constraints = False + self.feasibility_nlp_opt.update_config.update_vars = True + self.feasibility_nlp_opt.update_config.update_params = False + self.feasibility_nlp_opt.update_config.update_named_expressions = False + self.feasibility_nlp_opt.update_config.update_objective = False + self.feasibility_nlp_opt.update_config.treat_fixed_vars_as_params = False + + def solve(self, model, **kwds): + """Solve the model. + + Parameters + ---------- + model : Pyomo model + The MINLP model to be solved. + + Returns + ------- + results : SolverResults + Results from solving the MINLP problem by MindtPy. + """ + config = self.config = self.CONFIG( + kwds.pop('options', {}), preserve_implicit=True + ) + config.set_value(kwds) + self.set_up_logger() + new_logging_level = logging.INFO if config.tee else None + with lower_logger_level_to(config.logger, new_logging_level): + self.check_config() + + self.set_up_solve_data(model) + + if config.integer_to_binary: + TransformationFactory('contrib.integer_to_binary').apply_to( + self.working_model + ) + + self.create_utility_block(self.working_model, 'MindtPy_utils') + with time_code(self.timing, 'total', is_main_timer=True), lower_logger_level_to( + config.logger, new_logging_level + ): + self._log_solver_intro_message() + self.initialize_subsolvers() + + # Validate the model to ensure that MindtPy is able to solve it. + if not self.model_is_valid(): + return + + MindtPy = self.working_model.MindtPy_utils + + setup_results_object(self.results, self.original_model, config) + + # Reformulate the objective function. + self.objective_reformulation() + + # Save model initial values. + self.initial_var_values = list(v.value for v in MindtPy.variable_list) + + # TODO: if the MindtPy solver is defined once and called several times to solve models. The following two lines are necessary. It seems that the solver class will not be init every time call. + # For example, if we remove the following two lines. test_RLPNLP_L1 will fail. + self.best_solution_found = None + self.best_solution_found_time = None + self.initialize_mip_problem() + + # Initialization + with time_code(self.timing, 'initialization'): + self.MindtPy_initialization() + + # Algorithm main loop + with time_code(self.timing, 'main loop'): + self.MindtPy_iteration_loop() + + # Load solution + if self.best_solution_found is not None: + self.load_solution() + + # Get integral info + self.get_integral_info() + + config.logger.info( + ' {:<25}: {:>7.4f} '.format( + 'Primal-dual gap integral', self.primal_dual_gap_integral + ) + ) + + # Update result + self.update_result() + if config.single_tree: + self.results.solver.num_nodes = self.nlp_iter - ( + 1 if config.init_strategy == 'rNLP' else 0 + ) + + return self.results + + def objective_reformulation(self): + # In the process_objective function, as long as the objective function is nonlinear, it will be reformulated and the variable/constraint/objective lists will be updated. + # For OA/GOA/LP-NLP algorithm, if the objective function is linear, it will not be reformulated as epigraph constraint. + # If the objective function is linear, it will be reformulated as epigraph constraint only if the Feasibility Pump or ROA/RLP-NLP algorithm is activated. (move_objective = True) + # In some cases, the variable/constraint/objective lists will not be updated even if the objective is epigraph-reformulated. + # In Feasibility Pump, since the distance calculation only includes discrete variables and the epigraph slack variables are continuous variables, the Feasibility Pump algorithm will not affected even if the variable list are updated. + # In ROA and RLP/NLP, since the distance calculation does not include these epigraph slack variables, they should not be added to the variable list. (update_var_con_list = False) + # In the process_objective function, once the objective function has been reformulated as epigraph constraint, the variable/constraint/objective lists will not be updated only if the MINLP has a linear objective function and regularization is activated at the same time. + # This is because the epigraph constraint is very "flat" for branching rules. The original objective function will be used for the main problem and epigraph reformulation will be used for the projection problem. + self.process_objective(update_var_con_list=True) + + def handle_main_mip_termination(self, main_mip, main_mip_results): + should_terminate = False + if main_mip_results is not None: + if not self.config.single_tree: + if main_mip_results.solver.termination_condition is tc.optimal: + self.handle_main_optimal(main_mip) + elif main_mip_results.solver.termination_condition is tc.infeasible: + self.handle_main_infeasible() + self.last_iter_cuts = True + should_terminate = True + elif main_mip_results.solver.termination_condition is tc.unbounded: + temp_results = self.handle_main_unbounded(main_mip) + elif ( + main_mip_results.solver.termination_condition + is tc.infeasibleOrUnbounded + ): + temp_results = self.handle_main_unbounded(main_mip) + if temp_results.solver.termination_condition is tc.infeasible: + self.handle_main_infeasible() + elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: + self.handle_main_max_timelimit(main_mip, main_mip_results) + self.results.solver.termination_condition = tc.maxTimeLimit + elif main_mip_results.solver.termination_condition is tc.feasible or ( + main_mip_results.solver.termination_condition is tc.other + and main_mip_results.solution.status is SolutionStatus.feasible + ): + # load the solution and suppress the warning message by setting + # solver status to ok. + MindtPy = main_mip.MindtPy_utils + self.config.logger.info( + 'MILP solver reported feasible solution, ' + 'but not guaranteed to be optimal.' + ) + copy_var_list_values( + main_mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + self.config, + skip_fixed=False, + ) + self.update_suboptimal_dual_bound(main_mip_results) + self.config.logger.info( + self.log_formatter.format( + self.mip_iter, + 'MILP', + value(MindtPy.mip_obj.expr), + self.primal_bound, + self.dual_bound, + self.rel_gap, + get_main_elapsed_time(self.timing), + ) + ) + else: + raise ValueError( + 'MindtPy unable to handle MILP main termination condition ' + 'of %s. Solver message: %s' + % ( + main_mip_results.solver.termination_condition, + main_mip_results.solver.message, + ) + ) + else: + self.config.logger.info('Algorithm should terminate here.') + should_terminate = True + # break + return should_terminate + + # iterate.py + def MindtPy_iteration_loop(self): + """Main loop for MindtPy Algorithms. + + This is the outermost function for the Outer Approximation algorithm in this package; this function controls the progress of + solving the model. + + Raises + ------ + ValueError + The strategy value is not correct or not included. + """ + config = self.config + while self.mip_iter < config.iteration_limit: + # solve MIP main problem + with time_code(self.timing, 'main'): + main_mip, main_mip_results = self.solve_main() + if self.handle_main_mip_termination(main_mip, main_mip_results): + break + # Call the MIP post-solve callback + with time_code(self.timing, 'Call after main solve'): + config.call_after_main_solve(main_mip) + + # Regularization is activated after the first feasible solution is found. + if config.add_regularization is not None: + if not config.single_tree: + self.add_regularization() + + # In R-LP/NLP, we might end up with an integer combination that hasn't been explored. + # Therefore, we need to solve fixed NLP subproblem one more time. + if config.single_tree: + self.curr_int_sol = get_integer_solution(self.mip, string_zero=True) + copy_var_list_values( + main_mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + config, + skip_fixed=False, + ) + if self.curr_int_sol not in set(self.integer_list): + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) + + if self.algorithm_should_terminate(check_cycling=True): + self.last_iter_cuts = False + break + + if not config.single_tree: # if we don't use lazy callback, i.e. LP_NLP + # Solve NLP subproblem + # The constraint linearization happens in the handlers + if not config.solution_pool: + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) + + # Call the NLP post-solve callback + with time_code(self.timing, 'Call after subproblem solve'): + config.call_after_subproblem_solve(fixed_nlp) + + if self.algorithm_should_terminate(check_cycling=False): + self.last_iter_cuts = True + break + else: + solution_name_obj = self.get_solution_name_obj(main_mip_results) + for index, (name, _) in enumerate(solution_name_obj): + # the optimal solution of the main problem has been added to integer_list above + # so we should skip checking cycling for the first solution in the solution pool + if index > 0: + copy_var_list_values_from_solution_pool( + self.mip.MindtPy_utils.variable_list, + self.fixed_nlp.MindtPy_utils.variable_list, + config, + solver_model=main_mip_results._solver_model, + var_map=main_mip_results._pyomo_var_to_solver_var_map, + solution_name=name, + ) + self.curr_int_sol = get_integer_solution(self.fixed_nlp) + if self.curr_int_sol in set(self.integer_list): + config.logger.info( + 'The same combination has been explored and will be skipped here.' + ) + continue + else: + self.integer_list.append(self.curr_int_sol) + fixed_nlp, fixed_nlp_result = self.solve_subproblem() + self.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result) + + # Call the NLP post-solve callback + with time_code(self.timing, 'Call after subproblem solve'): + config.call_after_subproblem_solve(fixed_nlp) + + if self.algorithm_should_terminate(check_cycling=False): + self.last_iter_cuts = True + break # TODO: break two loops. + + # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable. + # we correct it after the iteration. + if ( + (config.add_no_good_cuts or config.use_tabu_list) + and not self.should_terminate + and config.add_regularization is None + ): + self.fix_dual_bound(self.last_iter_cuts) + config.logger.info( + ' ===============================================================================================' + ) + + def get_solution_name_obj(self, main_mip_results): + if self.config.mip_solver == 'cplex_persistent': + solution_pool_names = ( + main_mip_results._solver_model.solution.pool.get_names() + ) + elif self.config.mip_solver == 'gurobi_persistent': + solution_pool_names = list(range(main_mip_results._solver_model.SolCount)) + # list to store the name and objective value of the solutions in the solution pool + solution_name_obj = [] + for name in solution_pool_names: + if self.config.mip_solver == 'cplex_persistent': + obj = main_mip_results._solver_model.solution.pool.get_objective_value( + name + ) + elif self.config.mip_solver == 'gurobi_persistent': + main_mip_results._solver_model.setParam( + gurobipy.GRB.Param.SolutionNumber, name + ) + obj = main_mip_results._solver_model.PoolObjVal + solution_name_obj.append([name, obj]) + solution_name_obj.sort( + key=itemgetter(1), reverse=self.objective_sense == maximize + ) + solution_name_obj = solution_name_obj[: self.config.num_solution_iteration] + return solution_name_obj + + def add_regularization(self): + if self.best_solution_found is not None: + # The main problem might be unbounded, regularization is activated only when a valid bound is provided. + if self.dual_bound != self.dual_bound_progress[0]: + with time_code(self.timing, 'regularization main'): + ( + regularization_main_mip, + regularization_main_mip_results, + ) = self.solve_regularization_main() + self.handle_regularization_main_tc( + regularization_main_mip, regularization_main_mip_results + ) + + def bounds_converged(self): + # Check bound convergence + if self.abs_gap <= self.config.absolute_bound_tolerance: + self.config.logger.info( + 'MindtPy exiting on bound convergence. ' + 'Absolute gap: {} <= absolute tolerance: {} \n'.format( + self.abs_gap, self.config.absolute_bound_tolerance + ) + ) + self.results.solver.termination_condition = tc.optimal + return True + # Check relative bound convergence + if self.best_solution_found is not None: + if self.rel_gap <= self.config.relative_bound_tolerance: + self.config.logger.info( + 'MindtPy exiting on bound convergence. ' + 'Relative gap : {} <= relative tolerance: {} \n'.format( + self.rel_gap, self.config.relative_bound_tolerance + ) + ) + self.results.solver.termination_condition = tc.optimal + return True + return False + + def reached_iteration_limit(self): + # Check iteration limit + if self.mip_iter >= self.config.iteration_limit: + self.config.logger.info( + 'MindtPy unable to converge bounds ' + 'after {} main iterations.'.format(self.mip_iter) + ) + self.config.logger.info( + 'Final bound values: Primal Bound: {} Dual Bound: {}'.format( + self.primal_bound, self.dual_bound + ) + ) + if self.config.single_tree: + self.results.solver.termination_condition = tc.feasible + else: + self.results.solver.termination_condition = tc.maxIterations + return True + else: + return False + + def reached_time_limit(self): + if get_main_elapsed_time(self.timing) >= self.config.time_limit: + self.config.logger.info( + 'MindtPy unable to converge bounds ' + 'before time limit of {} seconds. ' + 'Elapsed: {} seconds'.format( + self.config.time_limit, get_main_elapsed_time(self.timing) + ) + ) + self.config.logger.info( + 'Final bound values: Primal Bound: {} Dual Bound: {}'.format( + self.primal_bound, self.dual_bound + ) + ) + self.results.solver.termination_condition = tc.maxTimeLimit + return True + else: + return False + + def reached_stalling_limit(self): + config = self.config + if len(self.primal_bound_progress) >= config.stalling_limit: + if ( + abs( + self.primal_bound_progress[-1] + - self.primal_bound_progress[-config.stalling_limit] + ) + <= config.zero_tolerance + ): + config.logger.info( + 'Algorithm is not making enough progress. ' + 'Exiting iteration loop.' + ) + config.logger.info( + 'Final bound values: Primal Bound: {} Dual Bound: {}'.format( + self.primal_bound, self.dual_bound + ) + ) + if self.best_solution_found is not None: + self.results.solver.termination_condition = tc.feasible + else: + # TODO: Is it correct to set self.working_model as the best_solution_found? + # In function copy_var_list_values, skip_fixed is set to True in default. + self.best_solution_found = self.working_model.clone() + config.logger.warning( + 'Algorithm did not find a feasible solution. ' + 'Returning best bound solution. Consider increasing stalling_limit or absolute_bound_tolerance.' + ) + self.results.solver.termination_condition = tc.noSolution + return True + return False + + def iteration_cycling(self): + config = self.config + if config.cycling_check or config.use_tabu_list: + self.curr_int_sol = get_integer_solution(self.mip) + if config.cycling_check and self.mip_iter >= 1: + if self.curr_int_sol in set(self.integer_list): + config.logger.info( + 'Cycling happens after {} main iterations. ' + 'The same combination is obtained in iteration {} ' + 'This issue happens when the NLP subproblem violates constraint qualification. ' + 'Convergence to optimal solution is not guaranteed.'.format( + self.mip_iter, + self.integer_list.index(self.curr_int_sol) + 1, + ) + ) + config.logger.info( + 'Final bound values: Primal Bound: {} Dual Bound: {}'.format( + self.primal_bound, self.dual_bound + ) + ) + # TODO determine self.primal_bound, self.dual_bound is inf or -inf. + self.results.solver.termination_condition = tc.feasible + return True + self.integer_list.append(self.curr_int_sol) + return False diff --git a/pyomo/contrib/mindtpy/config_options.py b/pyomo/contrib/mindtpy/config_options.py index c081dbde20e..ed0c86baae9 100644 --- a/pyomo/contrib/mindtpy/config_options.py +++ b/pyomo/contrib/mindtpy/config_options.py @@ -1,8 +1,22 @@ # -*- coding: utf-8 -*- import logging from pyomo.common.config import ( - ConfigBlock, ConfigValue, In, PositiveFloat, PositiveInt, NonNegativeInt) + ConfigBlock, + ConfigValue, + In, + PositiveFloat, + PositiveInt, + NonNegativeInt, +) from pyomo.contrib.gdpopt.util import _DoNothing, a_logger +from pyomo.common.deprecation import deprecation_warning + +_supported_algorithms = { + 'OA': ('mindtpy.oa', 'Outer Approximation'), + 'ECP': ('mindtpy.ecp', 'Extended Cutting Plane'), + 'GOA': ('mindtpy.goa', 'Global Outer Approximation'), + 'FP': ('mindtpy.fp', 'Feasibility Pump'), +} def _get_MindtPy_config(): @@ -15,222 +29,473 @@ def _get_MindtPy_config(): """ CONFIG = ConfigBlock('MindtPy') - CONFIG.declare('iteration_limit', ConfigValue( - default=50, - domain=NonNegativeInt, - description='Iteration limit', - doc='Number of maximum iterations in the decomposition methods.' - )) - CONFIG.declare('stalling_limit', ConfigValue( - default=15, - domain=PositiveInt, - description='Stalling limit', - doc='Stalling limit for primal bound progress in the decomposition methods.' - )) - CONFIG.declare('time_limit', ConfigValue( - default=600, - domain=PositiveInt, - description='Time limit (seconds, default=600)', - doc='Seconds allowed until terminated. Note that the time limit can' - 'currently only be enforced between subsolver invocations. You may' - 'need to set subsolver time limits as well.' - )) - CONFIG.declare('strategy', ConfigValue( - default='OA', - domain=In(['OA', 'ECP', 'GOA', 'FP']), - description='Decomposition strategy', - doc='MINLP Decomposition strategy to be applied to the method. ' - 'Currently available Outer Approximation (OA), Extended Cutting ' - 'Plane (ECP), Global Outer Approximation (GOA) and Feasibility Pump (FP).' - )) - CONFIG.declare('add_regularization', ConfigValue( - default=None, - domain=In(['level_L1', 'level_L2', 'level_L_infinity', - 'grad_lag', 'hess_lag', 'hess_only_lag', 'sqp_lag']), - description='add regularization', - doc='Solving a regularization problem before solve the fixed subproblem' - 'the objective function of the regularization problem.' - )) - CONFIG.declare('init_strategy', ConfigValue( - default=None, - domain=In(['rNLP', 'initial_binary', 'max_binary', 'FP']), - description='Initialization strategy', - doc='Initialization strategy used by any method. Currently the ' + _add_common_configs(CONFIG) + _add_subsolver_configs(CONFIG) + _add_tolerance_configs(CONFIG) + _add_fp_configs(CONFIG) + _add_bound_configs(CONFIG) + _add_roa_configs(CONFIG) + return CONFIG + + +def _get_MindtPy_OA_config(): + """Set up the configurations for MindtPy-OA. + + Returns + ------- + CONFIG : ConfigBlock + The specific configurations for MindtPy + """ + CONFIG = ConfigBlock('MindtPy-OA') + + _add_common_configs(CONFIG) + _add_oa_configs(CONFIG) + _add_roa_configs(CONFIG) + _add_fp_configs(CONFIG) + _add_oa_cuts_configs(CONFIG) + _add_subsolver_configs(CONFIG) + _add_tolerance_configs(CONFIG) + _add_bound_configs(CONFIG) + return CONFIG + + +def _get_MindtPy_GOA_config(): + """Set up the configurations for MindtPy-GOA. + + Returns + ------- + CONFIG : ConfigBlock + The specific configurations for MindtPy + """ + CONFIG = ConfigBlock('MindtPy-GOA') + + _add_common_configs(CONFIG) + _add_goa_configs(CONFIG) + _add_oa_cuts_configs(CONFIG) + _add_subsolver_configs(CONFIG) + _add_tolerance_configs(CONFIG) + _add_bound_configs(CONFIG) + return CONFIG + + +def _get_MindtPy_ECP_config(): + """Set up the configurations for MindtPy-ECP. + + Returns + ------- + CONFIG : ConfigBlock + The specific configurations for MindtPy + """ + CONFIG = ConfigBlock('MindtPy-GOA') + + _add_common_configs(CONFIG) + _add_ecp_configs(CONFIG) + _add_oa_cuts_configs(CONFIG) + _add_subsolver_configs(CONFIG) + _add_tolerance_configs(CONFIG) + _add_bound_configs(CONFIG) + return CONFIG + + +def _get_MindtPy_FP_config(): + """Set up the configurations for MindtPy-FP. + + Returns + ------- + CONFIG : ConfigBlock + The specific configurations for MindtPy + """ + CONFIG = ConfigBlock('MindtPy-GOA') + CONFIG.declare( + 'init_strategy', + ConfigValue( + default='FP', + domain=In(['FP']), + description='Initialization strategy', + doc='Initialization strategy used by any method. Currently the ' 'continuous relaxation of the MINLP (rNLP), solve a maximal ' 'covering problem (max_binary), and fix the initial value for ' - 'the integer variables (initial_binary).' - )) - CONFIG.declare('max_slack', ConfigValue( - default=1000.0, - domain=PositiveFloat, - description='Maximum slack variable', - doc='Maximum slack variable value allowed for the Outer Approximation ' - 'cuts.' - )) - CONFIG.declare('OA_penalty_factor', ConfigValue( - default=1000.0, - domain=PositiveFloat, - description='Outer Approximation slack penalty factor', - doc='In the objective function of the Outer Approximation method, the ' - 'slack variables corresponding to all the constraints get ' - 'multiplied by this number and added to the objective.' - )) - CONFIG.declare('call_after_main_solve', ConfigValue( - default=_DoNothing(), - domain=None, - description='Function to be executed after every main problem', - doc='Callback hook after a solution of the main problem.' - )) - CONFIG.declare('call_after_subproblem_solve', ConfigValue( - default=_DoNothing(), - domain=None, - description='Function to be executed after every subproblem', - doc='Callback hook after a solution of the nonlinear subproblem.' - )) - CONFIG.declare('call_after_subproblem_feasible', ConfigValue( - default=_DoNothing(), - domain=None, - description='Function to be executed after every feasible subproblem', - doc='Callback hook after a feasible solution' - ' of the nonlinear subproblem.' - )) - CONFIG.declare('tee', ConfigValue( - default=False, - description='Stream output to terminal.', - domain=bool - )) - CONFIG.declare('logger', ConfigValue( - default='pyomo.contrib.mindtpy', - description='The logger object or name to use for reporting.', - domain=a_logger - )) - CONFIG.declare('logging_level', ConfigValue( - default=logging.INFO, - domain=NonNegativeInt, - description='The logging level for MindtPy.' - 'CRITICAL = 50, ERROR = 40, WARNING = 30, INFO = 20, DEBUG = 10, NOTSET = 0', - )) - CONFIG.declare('integer_to_binary', ConfigValue( - default=False, - description='Convert integer variables to binaries (for no-good cuts).', - domain=bool - )) - CONFIG.declare('add_no_good_cuts', ConfigValue( - default=False, - description='Add no-good cuts (no-good cuts) to binary variables to disallow same integer solution again.' - 'Note that integer_to_binary flag needs to be used to apply it to actual integers and not just binaries.', - domain=bool - )) - CONFIG.declare('use_tabu_list', ConfigValue( - default=False, - description='Use tabu list and incumbent callback to disallow same integer solution again.', - domain=bool - )) - CONFIG.declare('add_affine_cuts', ConfigValue( - default=False, - description='Add affine cuts drive from MC++.', - domain=bool - )) - CONFIG.declare('single_tree', ConfigValue( - default=False, - description='Use single tree implementation in solving the MIP main problem.', - domain=bool - )) - CONFIG.declare('solution_pool', ConfigValue( - default=False, - description='Use solution pool in solving the MIP main problem.', - domain=bool - )) - CONFIG.declare('num_solution_iteration', ConfigValue( - default=5, - description='The number of MIP solutions (from the solution pool) used to generate the fixed NLP subproblem in each iteration.', - domain=PositiveInt - )) - CONFIG.declare('add_slack', ConfigValue( - default=False, - description='Whether add slack variable here.' - 'slack variables here are used to deal with nonconvex MINLP.', - domain=bool - )) - CONFIG.declare('cycling_check', ConfigValue( - default=True, - description='Check if OA algorithm is stalled in a cycle and terminate.', - domain=bool - )) - CONFIG.declare('feasibility_norm', ConfigValue( - default='L_infinity', - domain=In(['L1', 'L2', 'L_infinity']), - description='Different forms of objective function in feasibility subproblem.' - )) - CONFIG.declare('differentiate_mode', ConfigValue( - default='reverse_symbolic', - domain=In(['reverse_symbolic', 'sympy']), - description='Differentiate mode to calculate jacobian.' - )) - CONFIG.declare('linearize_inactive', ConfigValue( - default=False, - description='Add OA cuts for inactive constraints.', - domain=bool - )) - CONFIG.declare('use_mcpp', ConfigValue( - default=False, - description="Use package MC++ to set a bound for variable 'objective_value', which is introduced when the original problem's objective function is nonlinear.", - domain=bool - )) - CONFIG.declare('equality_relaxation', ConfigValue( - default=False, - description='Use dual solution from the NLP solver to add OA cuts for equality constraints.', - domain=bool - )) - CONFIG.declare('calculate_dual_at_solution', ConfigValue( - default=False, - description='Calculate duals of the NLP subproblem.', - domain=bool - )) - CONFIG.declare('use_fbbt', ConfigValue( - default=False, - description='Use fbbt to tighten the feasible region of the problem.', - domain=bool - )) - CONFIG.declare('use_dual_bound', ConfigValue( - default=True, - description='Add dual bound constraint to enforce the objective satisfies best-found dual bound.', - domain=bool - )) - CONFIG.declare('heuristic_nonconvex', ConfigValue( - default=False, - description='Use dual solution from the NLP solver and slack variables to add OA cuts for equality constraints (Equality relaxation)' - 'and minimize the sum of the slack variables (Augmented Penalty).', - domain=bool - )) - CONFIG.declare('partition_obj_nonlinear_terms', ConfigValue( - default=True, - description='Partition objective with the sum of nonlinear terms using epigraph reformulation.', - domain=bool - )) - CONFIG.declare('quadratic_strategy', ConfigValue( - default=0, - domain=In([0, 1, 2]), - description='How to treat the quadratic terms in MINLP.' - '0 : treat as nonlinear terms' - '1 : only use quadratic terms in objective function directly in main problem' - '2 : use quadratic terms in objective function and constraints in main problem', - )) - CONFIG.declare('move_objective', ConfigValue( - default=False, - domain=bool, - description='Whether to replace the objective function to constraint using epigraph constraint.', - )) - + 'the integer variables (initial_binary).', + ), + ) + _add_common_configs(CONFIG) + _add_fp_configs(CONFIG) + _add_oa_cuts_configs(CONFIG) _add_subsolver_configs(CONFIG) _add_tolerance_configs(CONFIG) - _add_fp_configs(CONFIG) _add_bound_configs(CONFIG) - _add_loa_configs(CONFIG) return CONFIG +def _add_oa_configs(CONFIG): + CONFIG.declare( + 'heuristic_nonconvex', + ConfigValue( + default=False, + description='Use dual solution from the NLP solver and slack variables to add OA cuts for equality constraints (Equality relaxation)' + 'and minimize the sum of the slack variables (Augmented Penalty).', + domain=bool, + ), + ) + CONFIG.declare( + 'init_strategy', + ConfigValue( + default='rNLP', + domain=In(['rNLP', 'initial_binary', 'max_binary', 'FP']), + description='Initialization strategy', + doc='Initialization strategy used by any method. Currently the ' + 'continuous relaxation of the MINLP (rNLP), solve a maximal ' + 'covering problem (max_binary), and fix the initial value for ' + 'the integer variables (initial_binary).', + ), + ) + + +def _add_oa_cuts_configs(CONFIG): + CONFIG.declare( + 'add_slack', + ConfigValue( + default=False, + description='Whether add slack variable here.' + 'slack variables here are used to deal with nonconvex MINLP.', + domain=bool, + ), + ) + CONFIG.declare( + 'max_slack', + ConfigValue( + default=1000.0, + domain=PositiveFloat, + description='Maximum slack variable', + doc='Maximum slack variable value allowed for the Outer Approximation ' + 'cuts.', + ), + ) + CONFIG.declare( + 'OA_penalty_factor', + ConfigValue( + default=1000.0, + domain=PositiveFloat, + description='Outer Approximation slack penalty factor', + doc='In the objective function of the Outer Approximation method, the ' + 'slack variables corresponding to all the constraints get ' + 'multiplied by this number and added to the objective.', + ), + ) + CONFIG.declare( + 'equality_relaxation', + ConfigValue( + default=False, + description='Use dual solution from the NLP solver to add OA cuts for equality constraints.', + domain=bool, + ), + ) + CONFIG.declare( + 'linearize_inactive', + ConfigValue( + default=False, + description='Add OA cuts for inactive constraints.', + domain=bool, + ), + ) + + +def _add_goa_configs(CONFIG): + CONFIG.declare( + 'init_strategy', + ConfigValue( + default='rNLP', + domain=In(['rNLP', 'initial_binary', 'max_binary']), + description='Initialization strategy', + doc='Initialization strategy used by any method. Currently the ' + 'continuous relaxation of the MINLP (rNLP), solve a maximal ' + 'covering problem (max_binary), and fix the initial value for ' + 'the integer variables (initial_binary).', + ), + ) + + +def _add_ecp_configs(CONFIG): + CONFIG.declare( + 'ecp_tolerance', + ConfigValue( + default=None, + domain=PositiveFloat, + description='ECP tolerance', + doc='Feasibility tolerance used to determine the stopping criterion in' + 'the ECP method. As long as nonlinear constraint are violated for ' + 'more than this tolerance, the method will keep iterating.', + ), + ) + CONFIG.declare( + 'init_strategy', + ConfigValue( + default='max_binary', + domain=In(['rNLP', 'max_binary', 'FP']), + description='Initialization strategy', + doc='Initialization strategy used by any method. Currently the ' + 'continuous relaxation of the MINLP (rNLP), solve a maximal ' + 'covering problem (max_binary), and fix the initial value for ' + 'the integer variables (initial_binary).', + ), + ) + + +def _add_common_configs(CONFIG): + CONFIG.declare( + 'iteration_limit', + ConfigValue( + default=50, + domain=NonNegativeInt, + description='Iteration limit', + doc='Number of maximum iterations in the decomposition methods.', + ), + ) + CONFIG.declare( + 'stalling_limit', + ConfigValue( + default=15, + domain=PositiveInt, + description='Stalling limit', + doc='Stalling limit for primal bound progress in the decomposition methods.', + ), + ) + CONFIG.declare( + 'time_limit', + ConfigValue( + default=600, + domain=PositiveInt, + description='Time limit (seconds, default=600)', + doc='Seconds allowed until terminated. Note that the time limit can' + 'currently only be enforced between subsolver invocations. You may' + 'need to set subsolver time limits as well.', + ), + ) + CONFIG.declare( + 'strategy', + ConfigValue( + default='OA', + domain=In(['OA', 'ECP', 'GOA', 'FP']), + description='Decomposition strategy', + doc='MINLP Decomposition strategy to be applied to the method. ' + 'Currently available Outer Approximation (OA), Extended Cutting ' + 'Plane (ECP), Global Outer Approximation (GOA) and Feasibility Pump (FP).', + ), + ) + CONFIG.declare( + 'add_regularization', + ConfigValue( + default=None, + domain=In( + [ + 'level_L1', + 'level_L2', + 'level_L_infinity', + 'grad_lag', + 'hess_lag', + 'hess_only_lag', + 'sqp_lag', + ] + ), + description='add regularization', + doc='Solving a regularization problem before solve the fixed subproblem' + 'the objective function of the regularization problem.', + ), + ) + CONFIG.declare( + 'call_after_main_solve', + ConfigValue( + default=_DoNothing(), + domain=None, + description='Function to be executed after every main problem', + doc='Callback hook after a solution of the main problem.', + ), + ) + CONFIG.declare( + 'call_after_subproblem_solve', + ConfigValue( + default=_DoNothing(), + domain=None, + description='Function to be executed after every subproblem', + doc='Callback hook after a solution of the nonlinear subproblem.', + ), + ) + CONFIG.declare( + 'call_after_subproblem_feasible', + ConfigValue( + default=_DoNothing(), + domain=None, + description='Function to be executed after every feasible subproblem', + doc='Callback hook after a feasible solution' + ' of the nonlinear subproblem.', + ), + ) + CONFIG.declare( + 'tee', + ConfigValue( + default=False, description='Stream output to terminal.', domain=bool + ), + ) + CONFIG.declare( + 'logger', + ConfigValue( + default='pyomo.contrib.mindtpy', + description='The logger object or name to use for reporting.', + domain=a_logger, + ), + ) + CONFIG.declare( + 'logging_level', + ConfigValue( + default=logging.INFO, + domain=NonNegativeInt, + description='The logging level for MindtPy.' + 'CRITICAL = 50, ERROR = 40, WARNING = 30, INFO = 20, DEBUG = 10, NOTSET = 0', + ), + ) + CONFIG.declare( + 'integer_to_binary', + ConfigValue( + default=False, + description='Convert integer variables to binaries (for no-good cuts).', + domain=bool, + ), + ) + CONFIG.declare( + 'add_no_good_cuts', + ConfigValue( + default=False, + description='Add no-good cuts (no-good cuts) to binary variables to disallow same integer solution again.' + 'Note that integer_to_binary flag needs to be used to apply it to actual integers and not just binaries.', + domain=bool, + ), + ) + CONFIG.declare( + 'use_tabu_list', + ConfigValue( + default=False, + description='Use tabu list and incumbent callback to disallow same integer solution again.', + domain=bool, + ), + ) + CONFIG.declare( + 'single_tree', + ConfigValue( + default=False, + description='Use single tree implementation in solving the MIP main problem.', + domain=bool, + ), + ) + CONFIG.declare( + 'solution_pool', + ConfigValue( + default=False, + description='Use solution pool in solving the MIP main problem.', + domain=bool, + ), + ) + CONFIG.declare( + 'num_solution_iteration', + ConfigValue( + default=5, + description='The number of MIP solutions (from the solution pool) used to generate the fixed NLP subproblem in each iteration.', + domain=PositiveInt, + ), + ) + CONFIG.declare( + 'cycling_check', + ConfigValue( + default=True, + description='Check if OA algorithm is stalled in a cycle and terminate.', + domain=bool, + ), + ) + CONFIG.declare( + 'feasibility_norm', + ConfigValue( + default='L_infinity', + domain=In(['L1', 'L2', 'L_infinity']), + description='Different forms of objective function in feasibility subproblem.', + ), + ) + CONFIG.declare( + 'differentiate_mode', + ConfigValue( + default='reverse_symbolic', + domain=In(['reverse_symbolic', 'sympy']), + description='Differentiate mode to calculate jacobian.', + ), + ) + CONFIG.declare( + 'use_mcpp', + ConfigValue( + default=False, + description="Use package MC++ to set a bound for variable 'objective_value', which is introduced when the original problem's objective function is nonlinear.", + domain=bool, + ), + ) + CONFIG.declare( + 'calculate_dual_at_solution', + ConfigValue( + default=False, + description='Calculate duals of the NLP subproblem.', + domain=bool, + ), + ) + CONFIG.declare( + 'use_fbbt', + ConfigValue( + default=False, + description='Use fbbt to tighten the feasible region of the problem.', + domain=bool, + ), + ) + CONFIG.declare( + 'use_dual_bound', + ConfigValue( + default=True, + description='Add dual bound constraint to enforce the objective satisfies best-found dual bound.', + domain=bool, + ), + ) + CONFIG.declare( + 'partition_obj_nonlinear_terms', + ConfigValue( + default=True, + description='Partition objective with the sum of nonlinear terms using epigraph reformulation.', + domain=bool, + ), + ) + CONFIG.declare( + 'quadratic_strategy', + ConfigValue( + default=0, + domain=In([0, 1, 2]), + description='How to treat the quadratic terms in MINLP.' + '0 : treat as nonlinear terms' + '1 : only use quadratic terms in objective function directly in main problem' + '2 : use quadratic terms in objective function and constraints in main problem', + ), + ) + CONFIG.declare( + 'move_objective', + ConfigValue( + default=False, + domain=bool, + description='Whether to replace the objective function to constraint using epigraph constraint.', + ), + ) + CONFIG.declare( + 'add_cuts_at_incumbent', + ConfigValue( + default=False, + description='Whether to add lazy cuts to the main problem at the incumbent solution found in the branch & bound tree', + domain=bool, + ), + ) + + def _add_subsolver_configs(CONFIG): """Adds the subsolver-related configurations. @@ -239,72 +504,129 @@ def _add_subsolver_configs(CONFIG): CONFIG : ConfigBlock The specific configurations for MindtPy. """ - CONFIG.declare('nlp_solver', ConfigValue( - default='ipopt', - domain=In(['ipopt', 'appsi_ipopt', 'gams', 'baron']), - description='NLP subsolver name', - doc='Which NLP subsolver is going to be used for solving the nonlinear' - 'subproblems.' - )) - CONFIG.declare('nlp_solver_args', ConfigBlock( - implicit=True, - description='NLP subsolver options', - doc='Which NLP subsolver options to be passed to the solver while ' - 'solving the nonlinear subproblems.' - )) - CONFIG.declare('mip_solver', ConfigValue( - default='glpk', - domain=In(['gurobi', 'cplex', 'cbc', 'glpk', 'gams', - 'gurobi_persistent', 'cplex_persistent', 'appsi_cplex', 'appsi_gurobi']), - description='MIP subsolver name', - doc='Which MIP subsolver is going to be used for solving the mixed-' - 'integer main problems.' - )) - CONFIG.declare('mip_solver_args', ConfigBlock( - implicit=True, - description='MIP subsolver options', - doc='Which MIP subsolver options to be passed to the solver while ' - 'solving the mixed-integer main problems.' - )) - CONFIG.declare('mip_solver_mipgap', ConfigValue( - default=1E-4, - domain=PositiveFloat, - description='Mipgap passed to MIP solver.' - )) - CONFIG.declare('threads', ConfigValue( - default=0, - domain=NonNegativeInt, - description='Threads', - doc='Threads used by MIP solver and NLP solver.' - )) - CONFIG.declare('regularization_mip_threads', ConfigValue( - default=0, - domain=NonNegativeInt, - description='regularization MIP threads', - doc='Threads used by MIP solver to solve regularization main problem.' - )) - CONFIG.declare('solver_tee', ConfigValue( - default=False, - description='Stream the output of MIP solver and NLP solver to terminal.', - domain=bool - )) - CONFIG.declare('mip_solver_tee', ConfigValue( - default=False, - description='Stream the output of MIP solver to terminal.', - domain=bool - )) - CONFIG.declare('nlp_solver_tee', ConfigValue( - default=False, - description='Stream the output of nlp solver to terminal.', - domain=bool - )) - CONFIG.declare('mip_regularization_solver', ConfigValue( - default=None, - domain=In(['gurobi', 'cplex', 'cbc', 'glpk', 'gams', - 'gurobi_persistent', 'cplex_persistent', 'appsi_cplex', 'appsi_gurobi']), - description='MIP subsolver for regularization problem', - doc='Which MIP subsolver is going to be used for solving the regularization problem.' - )) + CONFIG.declare( + 'nlp_solver', + ConfigValue( + default='ipopt', + domain=In(['ipopt', 'appsi_ipopt', 'gams', 'baron', 'cyipopt']), + description='NLP subsolver name', + doc='Which NLP subsolver is going to be used for solving the nonlinear' + 'subproblems.', + ), + ) + CONFIG.declare( + 'nlp_solver_args', + ConfigBlock( + implicit=True, + description='NLP subsolver options', + doc='Which NLP subsolver options to be passed to the solver while ' + 'solving the nonlinear subproblems.', + ), + ) + CONFIG.declare( + 'mip_solver', + ConfigValue( + default='glpk', + domain=In( + [ + 'gurobi', + 'cplex', + 'cbc', + 'glpk', + 'gams', + 'gurobi_persistent', + 'cplex_persistent', + 'appsi_cplex', + 'appsi_gurobi', + # 'appsi_highs', TODO: feasibility pump now fails with appsi_highs #2951 + ] + ), + description='MIP subsolver name', + doc='Which MIP subsolver is going to be used for solving the mixed-' + 'integer main problems.', + ), + ) + CONFIG.declare( + 'mip_solver_args', + ConfigBlock( + implicit=True, + description='MIP subsolver options', + doc='Which MIP subsolver options to be passed to the solver while ' + 'solving the mixed-integer main problems.', + ), + ) + CONFIG.declare( + 'mip_solver_mipgap', + ConfigValue( + default=1e-4, + domain=PositiveFloat, + description='Mipgap passed to MIP solver.', + ), + ) + CONFIG.declare( + 'threads', + ConfigValue( + default=0, + domain=NonNegativeInt, + description='Threads', + doc='Threads used by MIP solver and NLP solver.', + ), + ) + CONFIG.declare( + 'regularization_mip_threads', + ConfigValue( + default=0, + domain=NonNegativeInt, + description='regularization MIP threads', + doc='Threads used by MIP solver to solve regularization main problem.', + ), + ) + CONFIG.declare( + 'solver_tee', + ConfigValue( + default=False, + description='Stream the output of MIP solver and NLP solver to terminal.', + domain=bool, + ), + ) + CONFIG.declare( + 'mip_solver_tee', + ConfigValue( + default=False, + description='Stream the output of MIP solver to terminal.', + domain=bool, + ), + ) + CONFIG.declare( + 'nlp_solver_tee', + ConfigValue( + default=False, + description='Stream the output of nlp solver to terminal.', + domain=bool, + ), + ) + CONFIG.declare( + 'mip_regularization_solver', + ConfigValue( + default=None, + domain=In( + [ + 'gurobi', + 'cplex', + 'cbc', + 'glpk', + 'gams', + 'gurobi_persistent', + 'cplex_persistent', + 'appsi_cplex', + 'appsi_gurobi', + # 'appsi_highs', + ] + ), + description='MIP subsolver for regularization problem', + doc='Which MIP subsolver is going to be used for solving the regularization problem.', + ), + ) def _add_tolerance_configs(CONFIG): @@ -315,49 +637,50 @@ def _add_tolerance_configs(CONFIG): CONFIG : ConfigBlock The specific configurations for MindtPy. """ - CONFIG.declare('absolute_bound_tolerance', ConfigValue( - default=1E-4, - domain=PositiveFloat, - description='Bound tolerance', - doc='Absolute tolerance for bound feasibility checks.' - )) - CONFIG.declare('relative_bound_tolerance', ConfigValue( - default=1E-3, - domain=PositiveFloat, - description='Relative bound tolerance', - doc='Relative tolerance for bound feasibility checks. ' - ':math:`|Primal Bound - Dual Bound| / (1e-10 + |Primal Bound|) <= relative tolerance`' - )) - CONFIG.declare('small_dual_tolerance', ConfigValue( - default=1E-8, - description='When generating cuts, small duals multiplied ' - 'by expressions can cause problems. Exclude all duals ' - 'smaller in absolute value than the following.' - )) - CONFIG.declare('integer_tolerance', ConfigValue( - default=1E-5, - description='Tolerance on integral values.' - )) - CONFIG.declare('constraint_tolerance', ConfigValue( - default=1E-6, - description='Tolerance on constraint satisfaction.' - )) - CONFIG.declare('variable_tolerance', ConfigValue( - default=1E-8, - description='Tolerance on variable bounds.' - )) - CONFIG.declare('zero_tolerance', ConfigValue( - default=1E-8, - description='Tolerance on variable equal to zero.' - )) - CONFIG.declare('ecp_tolerance', ConfigValue( - default=None, - domain=PositiveFloat, - description='ECP tolerance', - doc='Feasibility tolerance used to determine the stopping criterion in' - 'the ECP method. As long as nonlinear constraint are violated for ' - 'more than this tolerance, the method will keep iterating.' - )) + CONFIG.declare( + 'absolute_bound_tolerance', + ConfigValue( + default=1e-4, + domain=PositiveFloat, + description='Bound tolerance', + doc='Absolute tolerance for bound feasibility checks.', + ), + ) + CONFIG.declare( + 'relative_bound_tolerance', + ConfigValue( + default=1e-3, + domain=PositiveFloat, + description='Relative bound tolerance', + doc='Relative tolerance for bound feasibility checks. ' + ':math:`|Primal Bound - Dual Bound| / (1e-10 + |Primal Bound|) <= relative tolerance`', + ), + ) + CONFIG.declare( + 'small_dual_tolerance', + ConfigValue( + default=1e-8, + description='When generating cuts, small duals multiplied ' + 'by expressions can cause problems. Exclude all duals ' + 'smaller in absolute value than the following.', + ), + ) + CONFIG.declare( + 'integer_tolerance', + ConfigValue(default=1e-5, description='Tolerance on integral values.'), + ) + CONFIG.declare( + 'constraint_tolerance', + ConfigValue(default=1e-6, description='Tolerance on constraint satisfaction.'), + ) + CONFIG.declare( + 'variable_tolerance', + ConfigValue(default=1e-8, description='Tolerance on variable bounds.'), + ) + CONFIG.declare( + 'zero_tolerance', + ConfigValue(default=1e-8, description='Tolerance on variable equal to zero.'), + ) def _add_bound_configs(CONFIG): @@ -368,26 +691,38 @@ def _add_bound_configs(CONFIG): CONFIG : ConfigBlock The specific configurations for MindtPy. """ - CONFIG.declare('obj_bound', ConfigValue( - default=1E15, - domain=PositiveFloat, - description='Bound applied to the linearization of the objective function if main MIP is unbounded.' - )) - CONFIG.declare('continuous_var_bound', ConfigValue( - default=1e10, - description='Default bound added to unbounded continuous variables in nonlinear constraint if single tree is activated.', - domain=PositiveFloat - )) - CONFIG.declare('integer_var_bound', ConfigValue( - default=1e9, - description='Default bound added to unbounded integral variables in nonlinear constraint if single tree is activated.', - domain=PositiveFloat - )) - CONFIG.declare('initial_bound_coef', ConfigValue( - default=1E-1, - domain=PositiveFloat, - description='The coefficient used to approximate the initial primal/dual bound.' - )) + CONFIG.declare( + 'obj_bound', + ConfigValue( + default=1e15, + domain=PositiveFloat, + description='Bound applied to the linearization of the objective function if main MIP is unbounded.', + ), + ) + CONFIG.declare( + 'continuous_var_bound', + ConfigValue( + default=1e10, + description='Default bound added to unbounded continuous variables in nonlinear constraint if single tree is activated.', + domain=PositiveFloat, + ), + ) + CONFIG.declare( + 'integer_var_bound', + ConfigValue( + default=1e9, + description='Default bound added to unbounded integral variables in nonlinear constraint if single tree is activated.', + domain=PositiveFloat, + ), + ) + CONFIG.declare( + 'initial_bound_coef', + ConfigValue( + default=1e-1, + domain=PositiveFloat, + description='The coefficient used to approximate the initial primal/dual bound.', + ), + ) def _add_fp_configs(CONFIG): @@ -398,190 +733,120 @@ def _add_fp_configs(CONFIG): CONFIG : ConfigBlock The specific configurations for MindtPy. """ - CONFIG.declare('fp_cutoffdecr', ConfigValue( - default=1E-1, - domain=PositiveFloat, - description='Additional relative decrement of cutoff value for the original objective function.' - )) - CONFIG.declare('fp_iteration_limit', ConfigValue( - default=20, - domain=PositiveInt, - description='Feasibility pump iteration limit', - doc='Number of maximum iterations in the feasibility pump methods.' - )) + CONFIG.declare( + 'fp_cutoffdecr', + ConfigValue( + default=1e-1, + domain=PositiveFloat, + description='Additional relative decrement of cutoff value for the original objective function.', + ), + ) + CONFIG.declare( + 'fp_iteration_limit', + ConfigValue( + default=20, + domain=PositiveInt, + description='Feasibility pump iteration limit', + doc='Number of maximum iterations in the feasibility pump methods.', + ), + ) # TODO: integrate this option - CONFIG.declare('fp_projcuts', ConfigValue( - default=True, - description='Whether to add cut derived from regularization of MIP solution onto NLP feasible set.', - domain=bool - )) - CONFIG.declare('fp_transfercuts', ConfigValue( - default=True, - description='Whether to transfer cuts from the Feasibility Pump MIP to main MIP in selected strategy (all except from the round in which the FP MIP became infeasible).', - domain=bool - )) - CONFIG.declare('fp_projzerotol', ConfigValue( - default=1E-4, - domain=PositiveFloat, - description='Tolerance on when to consider optimal value of regularization problem as zero, which may trigger the solution of a Sub-NLP.' - )) - CONFIG.declare('fp_mipgap', ConfigValue( - default=1E-2, - domain=PositiveFloat, - description='Optimality tolerance (relative gap) to use for solving MIP regularization problem.' - )) - CONFIG.declare('fp_discrete_only', ConfigValue( - default=True, - description='Only calculate the distance among discrete variables in regularization problems.', - domain=bool - )) - CONFIG.declare('fp_main_norm', ConfigValue( - default='L1', - domain=In(['L1', 'L2', 'L_infinity']), - description='Different forms of objective function MIP regularization problem.' - )) - CONFIG.declare('fp_norm_constraint', ConfigValue( - default=True, - description='Whether to add the norm constraint to FP-NLP', - domain=bool - )) - CONFIG.declare('fp_norm_constraint_coef', ConfigValue( - default=1, - domain=PositiveFloat, - description='The coefficient in the norm constraint, correspond to the Beta in the paper.' - )) - - -def _add_loa_configs(CONFIG): - """Adds the LOA-related configurations. + CONFIG.declare( + 'fp_projcuts', + ConfigValue( + default=True, + description='Whether to add cut derived from regularization of MIP solution onto NLP feasible set.', + domain=bool, + ), + ) + CONFIG.declare( + 'fp_transfercuts', + ConfigValue( + default=True, + description='Whether to transfer cuts from the Feasibility Pump MIP to main MIP in selected strategy (all except from the round in which the FP MIP became infeasible).', + domain=bool, + ), + ) + CONFIG.declare( + 'fp_projzerotol', + ConfigValue( + default=1e-4, + domain=PositiveFloat, + description='Tolerance on when to consider optimal value of regularization problem as zero, which may trigger the solution of a Sub-NLP.', + ), + ) + CONFIG.declare( + 'fp_mipgap', + ConfigValue( + default=1e-2, + domain=PositiveFloat, + description='Optimality tolerance (relative gap) to use for solving MIP regularization problem.', + ), + ) + CONFIG.declare( + 'fp_discrete_only', + ConfigValue( + default=True, + description='Only calculate the distance among discrete variables in regularization problems.', + domain=bool, + ), + ) + CONFIG.declare( + 'fp_main_norm', + ConfigValue( + default='L1', + domain=In(['L1', 'L2', 'L_infinity']), + description='Different forms of objective function MIP regularization problem.', + ), + ) + CONFIG.declare( + 'fp_norm_constraint', + ConfigValue( + default=True, + description='Whether to add the norm constraint to FP-NLP', + domain=bool, + ), + ) + CONFIG.declare( + 'fp_norm_constraint_coef', + ConfigValue( + default=1, + domain=PositiveFloat, + description='The coefficient in the norm constraint, correspond to the Beta in the paper.', + ), + ) - Parameters - ---------- - CONFIG : ConfigBlock - The specific configurations for MindtPy. - """ - CONFIG.declare('level_coef', ConfigValue( - default=0.5, - domain=PositiveFloat, - description='The coefficient in the regularization main problem' - 'represents how much the linear approximation of the MINLP problem is trusted.' - )) - CONFIG.declare('solution_limit', ConfigValue( - default=10, - domain=PositiveInt, - description='The solution limit for the regularization problem since it does not need to be solved to optimality.' - )) - CONFIG.declare('add_cuts_at_incumbent', ConfigValue( - default=False, - description='Whether to add lazy cuts to the main problem at the incumbent solution found in the branch & bound tree', - domain=bool - )) - CONFIG.declare('reduce_level_coef', ConfigValue( - default=False, - description='Whether to reduce level coefficient in ROA single tree when regularization problem is infeasible.', - domain=bool - )) - CONFIG.declare('use_bb_tree_incumbent', ConfigValue( - default=False, - description='Whether to use the incumbent solution of branch & bound tree in ROA single tree when regularization problem is infeasible.', - domain=bool - )) - CONFIG.declare('sqp_lag_scaling_coef', ConfigValue( - default='fixed', - domain=In(['fixed', 'variable_dependent']), - description='The coefficient used to scale the L2 norm in sqp_lag.' - )) - - -def check_config(config): - """Checks if the configuration options make sense. + +def _add_roa_configs(CONFIG): + """Adds the ROA-related configurations. Parameters ---------- - config : ConfigBlock + CONFIG : ConfigBlock The specific configurations for MindtPy. """ - # configuration confirmation - if config.add_regularization is not None: - if config.add_regularization in {'grad_lag', 'hess_lag', 'hess_only_lag', 'sqp_lag'}: - config.calculate_dual_at_solution = True - if config.regularization_mip_threads == 0 and config.threads > 0: - config.regularization_mip_threads = config.threads - config.logger.info( - 'Set regularization_mip_threads equal to threads') - if config.single_tree: - config.add_cuts_at_incumbent = True - # if no method is activated by users, we will use use_bb_tree_incumbent by default - if not (config.reduce_level_coef or config.use_bb_tree_incumbent): - config.use_bb_tree_incumbent = True - if config.mip_regularization_solver is None: - config.mip_regularization_solver = config.mip_solver - if config.single_tree: - config.logger.info('Single-tree implementation is activated.') - config.iteration_limit = 1 - config.add_slack = False - if config.mip_solver not in {'cplex_persistent', 'gurobi_persistent'}: - raise ValueError("Only cplex_persistent and gurobi_persistent are supported for LP/NLP based Branch and Bound method." - "Please refer to https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html#lp-nlp-based-branch-and-bound.") - if config.threads > 1: - config.threads = 1 - config.logger.info( - 'The threads parameter is corrected to 1 since lazy constraint callback conflicts with multi-threads mode.') - if config.strategy == 'GOA': - config.add_slack = False - config.use_mcpp = True - config.equality_relaxation = False - config.use_fbbt = True - # add_no_good_cuts is Ture by default in GOA - if not config.add_no_good_cuts and not config.use_tabu_list: - config.add_no_good_cuts = True - config.use_tabu_list = False - elif config.strategy == 'FP': # feasibility pump alone - config.init_strategy = 'FP' - config.iteration_limit = 0 - if config.init_strategy == 'FP': - config.add_no_good_cuts = True - config.use_tabu_list = False - - if config.nlp_solver == 'baron': - config.equality_relaxation = False - if config.nlp_solver == 'gams' and config.nlp_solver.__contains__('solver'): - if config.nlp_solver_args['solver'] == 'baron': - config.equality_relaxation = False - # if ecp tolerance is not provided use bound tolerance - if config.ecp_tolerance is None: - config.ecp_tolerance = config.absolute_bound_tolerance - - if config.solver_tee: - config.mip_solver_tee = True - config.nlp_solver_tee = True - if config.heuristic_nonconvex: - config.equality_relaxation = True - config.add_slack = True - if config.equality_relaxation: - config.calculate_dual_at_solution = True - if config.add_no_good_cuts: - config.integer_to_binary = True - if config.use_tabu_list: - config.mip_solver = 'cplex_persistent' - if config.threads > 1: - config.threads = 1 - config.logger.info( - 'The threads parameter is corrected to 1 since incumbent callback conflicts with multi-threads mode.') - if config.solution_pool: - if config.mip_solver not in {'cplex_persistent', 'gurobi_persistent'}: - if config.mip_solver in {'appsi_cplex', 'appsi_gurobi'}: - config.logger.info("Solution pool does not support APPSI solver.") - config.mip_solver = 'cplex_persistent' - if config.calculate_dual_at_solution: - if config.mip_solver == 'appsi_cplex': - config.logger.info("APPSI-Cplex cannot get duals for mixed-integer problems" - "mip_solver will be changed to Cplex.") - config.mip_solver = 'cplex' - if config.mip_regularization_solver == 'appsi_cplex': - config.logger.info("APPSI-Cplex cannot get duals for mixed-integer problems" - "mip_solver will be changed to Cplex.") - config.mip_regularization_solver = 'cplex' - if config.mip_solver in {'gurobi', 'appsi_gurobi'} or \ - config.mip_regularization_solver in {'gurobi', 'appsi_gurobi'}: - raise ValueError("GUROBI can not provide duals for mixed-integer problems.") + CONFIG.declare( + 'level_coef', + ConfigValue( + default=0.5, + domain=PositiveFloat, + description='The coefficient in the regularization main problem' + 'represents how much the linear approximation of the MINLP problem is trusted.', + ), + ) + CONFIG.declare( + 'solution_limit', + ConfigValue( + default=10, + domain=PositiveInt, + description='The solution limit for the regularization problem since it does not need to be solved to optimality.', + ), + ) + CONFIG.declare( + 'sqp_lag_scaling_coef', + ConfigValue( + default='fixed', + domain=In(['fixed', 'variable_dependent']), + description='The coefficient used to scale the L2 norm in sqp_lag.', + ), + ) diff --git a/pyomo/contrib/mindtpy/cut_generation.py b/pyomo/contrib/mindtpy/cut_generation.py index 90980df2ae9..c0449054baa 100644 --- a/pyomo/contrib/mindtpy/cut_generation.py +++ b/pyomo/contrib/mindtpy/cut_generation.py @@ -10,22 +10,30 @@ # ___________________________________________________________________________ """Cut generation.""" -from __future__ import division from math import copysign from pyomo.core import minimize, value -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.contrib.gdpopt.util import time_code from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error -def add_oa_cuts(target_model, dual_values, solve_data, config, - cb_opt=None, - linearize_active=True, - linearize_violated=True): +def add_oa_cuts( + target_model, + dual_values, + jacobians, + objective_sense, + mip_constraint_polynomial_degree, + mip_iter, + config, + timing, + cb_opt=None, + linearize_active=True, + linearize_violated=True, +): """Adds OA cuts. Generates and adds OA cuts (linearizes nonlinear constraints). - For nonconvex problems, turn on 'config.add_slack'. + For nonconvex problems, turn on 'config.add_slack'. Slack variables will always be used for nonlinear equality constraints. Parameters @@ -34,8 +42,14 @@ def add_oa_cuts(target_model, dual_values, solve_data, config, The relaxed linear model. dual_values : list The value of the duals for each constraint. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + jacobians : ComponentMap + Map nonlinear_constraint --> Map(variable --> jacobian of constraint w.r.t. variable). + objective_sense : Int + Objective sense of model. + mip_constraint_polynomial_degree : Set + The polynomial degrees of constraints that are regarded as linear. + mip_iter : Int + MIP iteration counter. config : ConfigBlock The specific configurations for MindtPy. cb_opt : SolverFactory, optional @@ -45,146 +59,225 @@ def add_oa_cuts(target_model, dual_values, solve_data, config, linearize_violated : bool, optional Whether to linearize the violated nonlinear constraints, by default True. """ - with time_code(solve_data.timing, 'OA cut generation'): + with time_code(timing, 'OA cut generation'): for index, constr in enumerate(target_model.MindtPy_utils.constraint_list): # TODO: here the index is correlated to the duals, try if this can be fixed when temp duals are removed. - if constr.body.polynomial_degree() in solve_data.mip_constraint_polynomial_degree: + if constr.body.polynomial_degree() in mip_constraint_polynomial_degree: continue constr_vars = list(EXPR.identify_variables(constr.body)) - jacs = solve_data.jacobians + jacs = jacobians # Equality constraint (makes the problem nonconvex) - if constr.has_ub() and constr.has_lb() and value(constr.lower) == value(constr.upper) and config.equality_relaxation: - sign_adjust = -1 if solve_data.objective_sense == minimize else 1 + if ( + constr.has_ub() + and constr.has_lb() + and value(constr.lower) == value(constr.upper) + and config.equality_relaxation + ): + sign_adjust = -1 if objective_sense == minimize else 1 rhs = constr.lower if config.add_slack: slack_var = target_model.MindtPy_utils.cuts.slack_vars.add() target_model.MindtPy_utils.cuts.oa_cuts.add( expr=copysign(1, sign_adjust * dual_values[index]) - * (sum(value(jacs[constr][var]) * (var - value(var)) - for var in EXPR.identify_variables(constr.body)) - + value(constr.body) - rhs) - - (slack_var if config.add_slack else 0) <= 0) - if config.single_tree and config.mip_solver == 'gurobi_persistent' and solve_data.mip_iter > 0 and cb_opt is not None: + * ( + sum( + value(jacs[constr][var]) * (var - value(var)) + for var in EXPR.identify_variables(constr.body) + ) + + value(constr.body) + - rhs + ) + - (slack_var if config.add_slack else 0) + <= 0 + ) + if ( + config.single_tree + and config.mip_solver == 'gurobi_persistent' + and mip_iter > 0 + and cb_opt is not None + ): cb_opt.cbLazy( - target_model.MindtPy_utils.cuts.oa_cuts[len(target_model.MindtPy_utils.cuts.oa_cuts)]) + target_model.MindtPy_utils.cuts.oa_cuts[ + len(target_model.MindtPy_utils.cuts.oa_cuts) + ] + ) else: # Inequality constraint (possibly two-sided) - if (constr.has_ub() - and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) - or (linearize_violated and constr.uslack() < 0) - or (config.linearize_inactive and constr.uslack() > 0)) or ('MindtPy_utils.objective_constr' in constr.name and constr.has_ub()): + if ( + constr.has_ub() + and ( + linearize_active + and abs(constr.uslack()) < config.zero_tolerance + ) + or (linearize_violated and constr.uslack() < 0) + or (config.linearize_inactive and constr.uslack() > 0) + ) or ( + 'MindtPy_utils.objective_constr' in constr.name and constr.has_ub() + ): # always add the linearization for the epigraph of the objective if config.add_slack: slack_var = target_model.MindtPy_utils.cuts.slack_vars.add() target_model.MindtPy_utils.cuts.oa_cuts.add( - expr=(sum(value(jacs[constr][var])*(var - var.value) - for var in constr_vars) + value(constr.body) - - (slack_var if config.add_slack else 0) - <= value(constr.upper)) + expr=( + sum( + value(jacs[constr][var]) * (var - var.value) + for var in constr_vars + ) + + value(constr.body) + - (slack_var if config.add_slack else 0) + <= value(constr.upper) + ) ) - if config.single_tree and config.mip_solver == 'gurobi_persistent' and solve_data.mip_iter > 0 and cb_opt is not None: + if ( + config.single_tree + and config.mip_solver == 'gurobi_persistent' + and mip_iter > 0 + and cb_opt is not None + ): cb_opt.cbLazy( - target_model.MindtPy_utils.cuts.oa_cuts[len(target_model.MindtPy_utils.cuts.oa_cuts)]) - - if (constr.has_lb() - and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) - or (linearize_violated and constr.lslack() < 0) - or (config.linearize_inactive and constr.lslack() > 0)) or ('MindtPy_utils.objective_constr' in constr.name and constr.has_lb()): + target_model.MindtPy_utils.cuts.oa_cuts[ + len(target_model.MindtPy_utils.cuts.oa_cuts) + ] + ) + + if ( + constr.has_lb() + and ( + linearize_active + and abs(constr.lslack()) < config.zero_tolerance + ) + or (linearize_violated and constr.lslack() < 0) + or (config.linearize_inactive and constr.lslack() > 0) + ) or ( + 'MindtPy_utils.objective_constr' in constr.name and constr.has_lb() + ): if config.add_slack: slack_var = target_model.MindtPy_utils.cuts.slack_vars.add() target_model.MindtPy_utils.cuts.oa_cuts.add( - expr=(sum(value(jacs[constr][var])*(var - var.value) - for var in constr_vars) + value(constr.body) - + (slack_var if config.add_slack else 0) - >= value(constr.lower)) + expr=( + sum( + value(jacs[constr][var]) * (var - var.value) + for var in constr_vars + ) + + value(constr.body) + + (slack_var if config.add_slack else 0) + >= value(constr.lower) + ) ) - if config.single_tree and config.mip_solver == 'gurobi_persistent' and solve_data.mip_iter > 0 and cb_opt is not None: + if ( + config.single_tree + and config.mip_solver == 'gurobi_persistent' + and mip_iter > 0 + and cb_opt is not None + ): cb_opt.cbLazy( - target_model.MindtPy_utils.cuts.oa_cuts[len(target_model.MindtPy_utils.cuts.oa_cuts)]) - - -def add_ecp_cuts(target_model, solve_data, config, - linearize_active=True, - linearize_violated=True): + target_model.MindtPy_utils.cuts.oa_cuts[ + len(target_model.MindtPy_utils.cuts.oa_cuts) + ] + ) + + +def add_ecp_cuts( + target_model, + jacobians, + config, + timing, + linearize_active=True, + linearize_violated=True, +): """Linearizes nonlinear constraints. Adds the cuts for the ECP method. Parameters ---------- target_model : Pyomo model The relaxed linear model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + jacobians : ComponentMap + Map nonlinear_constraint --> Map(variable --> jacobian of constraint w.r.t. variable) config : ConfigBlock The specific configurations for MindtPy. + timing : Timing + Timing. linearize_active : bool, optional Whether to linearize the active nonlinear constraints, by default True. linearize_violated : bool, optional Whether to linearize the violated nonlinear constraints, by default True. """ - with time_code(solve_data.timing, 'ECP cut generation'): + with time_code(timing, 'ECP cut generation'): for constr in target_model.MindtPy_utils.nonlinear_constraint_list: constr_vars = list(EXPR.identify_variables(constr.body)) - jacs = solve_data.jacobians + jacs = jacobians if constr.has_lb() and constr.has_ub(): config.logger.warning( 'constraint {} has both a lower ' 'and upper bound.' - '\n'.format( - constr)) + '\n'.format(constr) + ) continue if constr.has_ub(): try: upper_slack = constr.uslack() - except (ValueError, OverflowError): - config.logger.warning( - 'constraint {} has caused either a ' + except (ValueError, OverflowError) as e: + config.logger.error( + str(e) + '\nConstraint {} has caused either a ' 'ValueError or OverflowError.' - '\n'.format( - constr)) + '\n'.format(constr) + ) continue - if (linearize_active and abs(upper_slack) < config.ecp_tolerance) \ - or (linearize_violated and upper_slack < 0) \ - or (config.linearize_inactive and upper_slack > 0): + if ( + (linearize_active and abs(upper_slack) < config.ecp_tolerance) + or (linearize_violated and upper_slack < 0) + or (config.linearize_inactive and upper_slack > 0) + ): if config.add_slack: slack_var = target_model.MindtPy_utils.cuts.slack_vars.add() target_model.MindtPy_utils.cuts.ecp_cuts.add( - expr=(sum(value(jacs[constr][var])*(var - var.value) - for var in constr_vars) - - (slack_var if config.add_slack else 0) - <= upper_slack) + expr=( + sum( + value(jacs[constr][var]) * (var - var.value) + for var in constr_vars + ) + - (slack_var if config.add_slack else 0) + <= upper_slack + ) ) if constr.has_lb(): try: lower_slack = constr.lslack() - except (ValueError, OverflowError): - config.logger.warning( - 'constraint {} has caused either a ' + except (ValueError, OverflowError) as e: + config.logger.error( + str(e) + '\nConstraint {} has caused either a ' 'ValueError or OverflowError.' - '\n'.format( - constr)) + '\n'.format(constr) + ) continue - if (linearize_active and abs(lower_slack) < config.ecp_tolerance) \ - or (linearize_violated and lower_slack < 0) \ - or (config.linearize_inactive and lower_slack > 0): + if ( + (linearize_active and abs(lower_slack) < config.ecp_tolerance) + or (linearize_violated and lower_slack < 0) + or (config.linearize_inactive and lower_slack > 0) + ): if config.add_slack: slack_var = target_model.MindtPy_utils.cuts.slack_vars.add() target_model.MindtPy_utils.cuts.ecp_cuts.add( - expr=(sum(value(jacs[constr][var])*(var - var.value) - for var in constr_vars) - + (slack_var if config.add_slack else 0) - >= -lower_slack) + expr=( + sum( + value(jacs[constr][var]) * (var - var.value) + for var in constr_vars + ) + + (slack_var if config.add_slack else 0) + >= -lower_slack + ) ) -def add_no_good_cuts(var_values, solve_data, config): +def add_no_good_cuts(target_model, var_values, config, timing, mip_iter=0, cb_opt=None): """Adds no-good cuts. This adds an no-good cuts to the no_good_cuts ConstraintList, which is not activated by default. @@ -193,12 +286,18 @@ def add_no_good_cuts(var_values, solve_data, config): Parameters ---------- + target_model : Block + The model to add no-good cuts to. var_values : list Variable values of the current solution, used to generate the cut. - solve_data : MindtPySolveData - Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. + timing : Timing + Timing. + mip_iter : Int, optional + MIP iteration counter. + cb_opt : SolverFactory, optional + Gurobi_persistent solver, by default None. Raises ------ @@ -207,11 +306,10 @@ def add_no_good_cuts(var_values, solve_data, config): """ if not config.add_no_good_cuts: return - with time_code(solve_data.timing, 'no_good cut generation'): - + with time_code(timing, 'no_good cut generation'): config.logger.debug('Adding no-good cuts') - m = solve_data.mip + m = target_model MindtPy = m.MindtPy_utils int_tol = config.integer_tolerance @@ -227,37 +325,49 @@ def add_no_good_cuts(var_values, solve_data, config): for v in binary_vars: if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol: raise ValueError( - 'Binary {} = {} is not 0 or 1'.format(v.name, value(v))) + 'Binary {} = {} is not 0 or 1'.format(v.name, value(v)) + ) if not binary_vars: # if no binary variables, skip return - int_cut = (sum(1 - v for v in binary_vars - if value(abs(v - 1)) <= int_tol) + - sum(v for v in binary_vars - if value(abs(v)) <= int_tol) >= 1) + int_cut = ( + sum(1 - v for v in binary_vars if value(abs(v - 1)) <= int_tol) + + sum(v for v in binary_vars if value(abs(v)) <= int_tol) + >= 1 + ) MindtPy.cuts.no_good_cuts.add(expr=int_cut) - - -def add_affine_cuts(solve_data, config): + if ( + config.single_tree + and config.mip_solver == 'gurobi_persistent' + and mip_iter > 0 + and cb_opt is not None + ): + cb_opt.cbLazy( + target_model.MindtPy_utils.cuts.no_good_cuts[ + len(target_model.MindtPy_utils.cuts.no_good_cuts) + ] + ) + + +def add_affine_cuts(target_model, config, timing): """Adds affine cuts using MCPP. Parameters ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. config : ConfigBlock The specific configurations for MindtPy. + timing : Timing + Timing. """ - with time_code(solve_data.timing, 'Affine cut generation'): - m = solve_data.mip + with time_code(timing, 'Affine cut generation'): + m = target_model config.logger.debug('Adding affine cuts') counter = 0 for constr in m.MindtPy_utils.nonlinear_constraint_list: - vars_in_constr = list( - EXPR.identify_variables(constr.body)) + vars_in_constr = list(EXPR.identify_variables(constr.body)) if any(var.value is None for var in vars_in_constr): continue # a variable has no values @@ -265,8 +375,10 @@ def add_affine_cuts(solve_data, config): try: mc_eqn = mc(constr.body) except MCPP_Error as e: - config.logger.debug( - 'Skipping constraint %s due to MCPP error %s' % (constr.name, str(e))) + config.logger.error( + '\nSkipping constraint %s due to MCPP error %s' + % (constr.name, str(e)) + ) continue # skip to the next constraint ccSlope = mc_eqn.subcc() @@ -295,22 +407,40 @@ def add_affine_cuts(solve_data, config): if not (concave_cut_valid or convex_cut_valid): continue - ub_int = min(value(constr.upper), mc_eqn.upper() - ) if constr.has_ub() else mc_eqn.upper() - lb_int = max(value(constr.lower), mc_eqn.lower() - ) if constr.has_lb() else mc_eqn.lower() + ub_int = ( + min(value(constr.upper), mc_eqn.upper()) + if constr.has_ub() + else mc_eqn.upper() + ) + lb_int = ( + max(value(constr.lower), mc_eqn.lower()) + if constr.has_lb() + else mc_eqn.lower() + ) aff_cuts = m.MindtPy_utils.cuts.aff_cuts if concave_cut_valid: - concave_cut = sum(ccSlope[var] * (var - var.value) - for var in vars_in_constr - if not var.fixed) + ccStart >= lb_int + concave_cut = ( + sum( + ccSlope[var] * (var - var.value) + for var in vars_in_constr + if not var.fixed + ) + + ccStart + >= lb_int + ) aff_cuts.add(expr=concave_cut) counter += 1 if convex_cut_valid: - convex_cut = sum(cvSlope[var] * (var - var.value) - for var in vars_in_constr - if not var.fixed) + cvStart <= ub_int + convex_cut = ( + sum( + cvSlope[var] * (var - var.value) + for var in vars_in_constr + if not var.fixed + ) + + cvStart + <= ub_int + ) aff_cuts.add(expr=convex_cut) counter += 1 diff --git a/pyomo/contrib/mindtpy/extended_cutting_plane.py b/pyomo/contrib/mindtpy/extended_cutting_plane.py new file mode 100644 index 00000000000..446304b1361 --- /dev/null +++ b/pyomo/contrib/mindtpy/extended_cutting_plane.py @@ -0,0 +1,176 @@ +# -*- coding: utf-8 -*- + +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.gdpopt.util import time_code, get_main_elapsed_time +from pyomo.contrib.mindtpy.util import calc_jacobians +from pyomo.core import ConstraintList +from pyomo.opt import SolverFactory +from pyomo.contrib.mindtpy.config_options import _get_MindtPy_ECP_config +from pyomo.contrib.mindtpy.algorithm_base_class import _MindtPyAlgorithm +from pyomo.contrib.mindtpy.cut_generation import add_ecp_cuts +from pyomo.opt import TerminationCondition as tc + + +@SolverFactory.register( + 'mindtpy.ecp', doc='MindtPy: Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo' +) +class MindtPy_ECP_Solver(_MindtPyAlgorithm): + """ + Decomposition solver for Mixed-Integer Nonlinear Programming (MINLP) problems. + + The MindtPy (Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo) solver + applies a variety of decomposition-based approaches to solve Mixed-Integer + Nonlinear Programming (MINLP) problems. + This class includes: + + - Extended Cutting Plane (ECP) + """ + + CONFIG = _get_MindtPy_ECP_config() + + def MindtPy_iteration_loop(self): + """Main loop for MindtPy Algorithms. + + This is the outermost function for the Extended Cutting Plane algorithm in this package; this function controls the progress of + solving the model. + + Raises + ------ + ValueError + The strategy value is not correct or not included. + """ + while self.mip_iter < self.config.iteration_limit: + # solve MIP main problem + main_mip, main_mip_results = self.solve_main() + + if self.handle_main_mip_termination(main_mip, main_mip_results): + break + + # Call the MIP post-solve callback + with time_code(self.timing, 'Call after main solve'): + self.config.call_after_main_solve(main_mip) + + if self.algorithm_should_terminate(): + self.last_iter_cuts = False + break + + add_ecp_cuts(self.mip, self.jacobians, self.config, self.timing) + + # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable. + # we correct it after the iteration. + if ( + self.config.add_no_good_cuts or self.config.use_tabu_list + ) and not self.should_terminate: + self.fix_dual_bound(self.last_iter_cuts) + self.config.logger.info( + ' ===============================================================================================' + ) + + def check_config(self): + config = self.config + # if ecp tolerance is not provided use bound tolerance + if config.ecp_tolerance is None: + config.ecp_tolerance = config.absolute_bound_tolerance + super().check_config() + + def initialize_mip_problem(self): + '''Deactivate the nonlinear constraints to create the MIP problem.''' + super().initialize_mip_problem() + self.jacobians = calc_jacobians(self.mip, self.config) # preload jacobians + self.mip.MindtPy_utils.cuts.ecp_cuts = ConstraintList( + doc='Extended Cutting Planes' + ) + + def init_rNLP(self): + """Initialize the problem by solving the relaxed NLP and then store the optimal variable + values obtained from solving the rNLP. + + Raises + ------ + ValueError + MindtPy unable to handle the termination condition of the relaxed NLP. + """ + super().init_rNLP(add_oa_cuts=False) + + def algorithm_should_terminate(self): + """Checks if the algorithm should terminate at the given point. + + This function determines whether the algorithm should terminate based on the solver options and progress. + (Sets the self.results.solver.termination_condition to the appropriate condition, i.e. optimal, + maxIterations, maxTimeLimit). + + Returns + ------- + bool + True if the algorithm should terminate, False otherwise. + """ + if self.should_terminate: + if self.primal_bound == self.primal_bound_progress[0]: + self.results.solver.termination_condition = tc.noSolution + else: + self.results.solver.termination_condition = tc.feasible + return True + + return ( + self.bounds_converged() + or self.reached_iteration_limit() + or self.reached_time_limit() + or self.reached_stalling_limit() + or self.all_nonlinear_constraint_satisfied() + ) + + def all_nonlinear_constraint_satisfied(self): + # check to see if the nonlinear constraints are satisfied + config = self.config + MindtPy = self.mip.MindtPy_utils + nonlinear_constraints = [c for c in MindtPy.nonlinear_constraint_list] + for nlc in nonlinear_constraints: + if nlc.has_lb(): + try: + lower_slack = nlc.lslack() + except (ValueError, OverflowError) as e: + # Set lower_slack (upper_slack below) less than -config.ecp_tolerance in this case. + config.logger.error(e) + lower_slack = -10 * config.ecp_tolerance + if lower_slack < -config.ecp_tolerance: + config.logger.debug( + 'MindtPy-ECP continuing as {} has not met the ' + 'nonlinear constraints satisfaction.' + '\n'.format(nlc) + ) + return False + if nlc.has_ub(): + try: + upper_slack = nlc.uslack() + except (ValueError, OverflowError) as e: + config.logger.error(e) + upper_slack = -10 * config.ecp_tolerance + if upper_slack < -config.ecp_tolerance: + config.logger.debug( + 'MindtPy-ECP continuing as {} has not met the ' + 'nonlinear constraints satisfaction.' + '\n'.format(nlc) + ) + return False + # For ECP to know whether to know which bound to copy over (primal or dual) + self.primal_bound = self.dual_bound + config.logger.info( + 'MindtPy-ECP exiting on nonlinear constraints satisfaction. ' + 'Primal Bound: {} Dual Bound: {}\n'.format( + self.primal_bound, self.dual_bound + ) + ) + + self.best_solution_found = self.mip.clone() + self.results.solver.termination_condition = tc.optimal + return True diff --git a/pyomo/contrib/mindtpy/feasibility_pump.py b/pyomo/contrib/mindtpy/feasibility_pump.py index 1dcca6a4877..5716400598a 100644 --- a/pyomo/contrib/mindtpy/feasibility_pump.py +++ b/pyomo/contrib/mindtpy/feasibility_pump.py @@ -1,3 +1,5 @@ +# -*- coding: utf-8 -*- + # ___________________________________________________________________________ # # Pyomo: Python Optimization Modeling Objects @@ -9,318 +11,62 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core import (minimize, Constraint, TransformationFactory, value) -from pyomo.core.base.constraint import ConstraintList -from pyomo.opt import SolverFactory, SolutionStatus, SolverResults, SolverStatus -from pyomo.contrib.gdpopt.util import SuppressInfeasibleWarning, copy_var_list_values, time_code, get_main_elapsed_time -from pyomo.contrib.mindtpy.nlp_solve import solve_subproblem, handle_subproblem_optimal -from pyomo.opt import TerminationCondition as tc -from pyomo.contrib.mindtpy.util import generate_norm2sq_objective_function, set_solver_options -from pyomo.contrib.mindtpy.mip_solve import solve_main -from pyomo.contrib.mindtpy.util import generate_norm1_norm_constraint - - -def fp_converged(solve_data, config, discrete_only=True): - """Calculates the euclidean norm between the discrete variables in the MIP and NLP models. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - discrete_only : bool, optional - Whether to only optimize on distance between the discrete variables, by default True. - - Returns - ------- - distance : float - The euclidean norm between the discrete variables in the MIP and NLP models. - """ - distance = (max((nlp_var.value - milp_var.value)**2 - for (nlp_var, milp_var) in - zip(solve_data.working_model.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list) - if (not discrete_only) or milp_var.is_integer())) - return distance <= config.fp_projzerotol - - -def solve_fp_subproblem(solve_data, config): - """Solves the feasibility pump NLP subproblem. +import logging +from pyomo.contrib.mindtpy.config_options import _get_MindtPy_FP_config +from pyomo.contrib.mindtpy.algorithm_base_class import _MindtPyAlgorithm +from pyomo.core import ConstraintList +from pyomo.contrib.mindtpy.util import calc_jacobians +from pyomo.opt import SolverFactory +from pyomo.contrib.mindtpy.cut_generation import add_oa_cuts - This function sets up the 'fp_nlp' by relax integer variables. - precomputes dual values, deactivates trivial constraints, and then solves NLP model. - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - fp_nlp : Pyomo model - Fixed-NLP from the model. - results : SolverResults - Results from solving the fixed-NLP subproblem. +@SolverFactory.register( + 'mindtpy.fp', doc='MindtPy: Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo' +) +class MindtPy_FP_Solver(_MindtPyAlgorithm): """ - fp_nlp = solve_data.working_model.clone() - MindtPy = fp_nlp.MindtPy_utils - - # Set up NLP - fp_nlp.MindtPy_utils.objective_list[-1].deactivate() - if solve_data.objective_sense == minimize: - fp_nlp.improving_objective_cut = Constraint( - expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) <= solve_data.primal_bound) - else: - fp_nlp.improving_objective_cut = Constraint( - expr=sum(fp_nlp.MindtPy_utils.objective_value[:]) >= solve_data.primal_bound) + Decomposition solver for Mixed-Integer Nonlinear Programming (MINLP) problems. - # Add norm_constraint, which guarantees the monotonicity of the norm objective value sequence of all iterations - # Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' https://doi.org/10.1007/s10107-012-0608-x - # the norm type is consistant with the norm obj of the FP-main problem. - if config.fp_norm_constraint: - generate_norm_constraint(fp_nlp, solve_data, config) + The MindtPy (Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo) solver + applies a variety of decomposition-based approaches to solve Mixed-Integer + Nonlinear Programming (MINLP) problems. + This class includes: - MindtPy.fp_nlp_obj = generate_norm2sq_objective_function( - fp_nlp, solve_data.mip, discrete_only=config.fp_discrete_only) - - MindtPy.cuts.deactivate() - TransformationFactory('core.relax_integer_vars').apply_to(fp_nlp) - try: - TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( - fp_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance) - except ValueError: - config.logger.warning( - 'infeasibility detected in deactivate_trivial_constraints') - results = SolverResults() - results.solver.termination_condition = tc.infeasible - return fp_nlp, results - # Solve the NLP - nlpopt = SolverFactory(config.nlp_solver) - nlp_args = dict(config.nlp_solver_args) - set_solver_options(nlpopt, solve_data, config, solver_type='nlp') - with SuppressInfeasibleWarning(): - with time_code(solve_data.timing, 'fp subproblem'): - results = nlpopt.solve(fp_nlp, - tee=config.nlp_solver_tee, - load_solutions=False, - **nlp_args) - if len(results.solution) > 0: - fp_nlp.solutions.load_from(results) - return fp_nlp, results - - -def handle_fp_subproblem_optimal(fp_nlp, solve_data, config): - """Copies the solution to the working model, updates bound, adds OA cuts / no-good cuts / - increasing objective cut, calculates the duals and stores incumbent solution if it has been improved. - - Parameters - ---------- - fp_nlp : Pyomo model - The feasibility pump NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. + - Feasibility pump (FP) """ - copy_var_list_values( - fp_nlp.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config, - ignore_integrality=True) - add_orthogonality_cuts(solve_data, config) - - # if OA-like or fp converged, update Upper bound, - # add no_good cuts and increasing objective cuts (fp) - if fp_converged(solve_data, config, discrete_only=config.fp_discrete_only): - copy_var_list_values(solve_data.mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - fixed_nlp, fixed_nlp_results = solve_subproblem( - solve_data, config) - if fixed_nlp_results.solver.termination_condition in {tc.optimal, tc.locallyOptimal, tc.feasible}: - handle_subproblem_optimal( - fixed_nlp, solve_data, config, fp=True) - else: - config.logger.error('Feasibility pump Fixed-NLP is infeasible, something might be wrong. ' - 'There might be a problem with the precisions - the feasibility pump seems to have converged') - -def fp_loop(solve_data, config): - """Feasibility pump loop. - - This is the outermost function for the algorithms in this package; this function - controls the progression of solving the model. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the termination condition of the FP-NLP subproblem. - """ - while solve_data.fp_iter < config.fp_iteration_limit: - - solve_data.mip_subiter = 0 - # solve MILP main problem - feas_main, feas_main_results = solve_main( - solve_data, config, fp=True) - fp_should_terminate = handle_fp_main_tc( - feas_main_results, solve_data, config) - if fp_should_terminate: - break - - # Solve NLP subproblem - # The constraint linearization happens in the handlers - fp_nlp, fp_nlp_result = solve_fp_subproblem( - solve_data, config) - - if fp_nlp_result.solver.termination_condition in {tc.optimal, tc.locallyOptimal, tc.feasible}: - config.logger.info(solve_data.log_formatter.format( - solve_data.fp_iter, 'FP-NLP', value( - fp_nlp.MindtPy_utils.fp_nlp_obj), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - handle_fp_subproblem_optimal(fp_nlp, solve_data, config) - elif fp_nlp_result.solver.termination_condition in {tc.infeasible, tc.noSolution}: - config.logger.error('Feasibility pump NLP subproblem infeasible') - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error - return - elif fp_nlp_result.solver.termination_condition is tc.maxIterations: - config.logger.error( - 'Feasibility pump NLP subproblem failed to converge within iteration limit.') - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error - return - else: - raise ValueError( - 'MindtPy unable to handle NLP subproblem termination ' - 'condition of {}'.format(fp_nlp_result.solver.termination_condition)) - # Call the NLP post-solve callback - config.call_after_subproblem_solve(fp_nlp, solve_data) - solve_data.fp_iter += 1 - solve_data.mip.MindtPy_utils.del_component('fp_mip_obj') - - if config.fp_main_norm == 'L1': - solve_data.mip.MindtPy_utils.del_component('L1_obj') - elif config.fp_main_norm == 'L_infinity': - solve_data.mip.MindtPy_utils.del_component( - 'L_infinity_obj') - - # deactivate the improving_objective_cut - solve_data.mip.MindtPy_utils.cuts.del_component( - 'improving_objective_cut') - if not config.fp_transfercuts: - for c in solve_data.mip.MindtPy_utils.cuts.oa_cuts: - c.deactivate() - for c in solve_data.mip.MindtPy_utils.cuts.no_good_cuts: - c.deactivate() - if config.fp_projcuts: - solve_data.working_model.MindtPy_utils.cuts.del_component( - 'fp_orthogonality_cuts') - - -def add_orthogonality_cuts(solve_data, config): - """Add orthogonality cuts. - - This function adds orthogonality cuts to avoid cycling when the independence constraint qualification is not satisfied. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - """ - mip_integer_vars = solve_data.mip.MindtPy_utils.discrete_variable_list - nlp_integer_vars = solve_data.working_model.MindtPy_utils.discrete_variable_list - orthogonality_cut = sum((nlp_v.value-mip_v.value)*(mip_v-nlp_v.value) - for mip_v, nlp_v in zip(mip_integer_vars, nlp_integer_vars)) >= 0 - solve_data.mip.MindtPy_utils.cuts.fp_orthogonality_cuts.add( - orthogonality_cut) - if config.fp_projcuts: - orthogonality_cut = sum((nlp_v.value-mip_v.value)*(nlp_v-nlp_v.value) - for mip_v, nlp_v in zip(mip_integer_vars, nlp_integer_vars)) >= 0 - solve_data.working_model.MindtPy_utils.cuts.fp_orthogonality_cuts.add( - orthogonality_cut) - - -def generate_norm_constraint(fp_nlp, solve_data, config): - """Generate the norm constraint for the FP-NLP subproblem. - - Parameters - ---------- - fp_nlp : Pyomo model - The feasibility pump NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - """ - if config.fp_main_norm == 'L1': - # TODO: check if we can access the block defined in FP-main problem - generate_norm1_norm_constraint( - fp_nlp, solve_data.mip, config, discrete_only=True) - elif config.fp_main_norm == 'L2': - fp_nlp.norm_constraint = Constraint(expr=sum((nlp_var - mip_var.value)**2 - config.fp_norm_constraint_coef*(nlp_var.value - mip_var.value)**2 - for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) <= 0) - elif config.fp_main_norm == 'L_infinity': - fp_nlp.norm_constraint = ConstraintList() - rhs = config.fp_norm_constraint_coef * max(nlp_var.value - mip_var.value for nlp_var, mip_var in zip( - fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list)) - for nlp_var, mip_var in zip(fp_nlp.MindtPy_utils.discrete_variable_list, solve_data.mip.MindtPy_utils.discrete_variable_list): - fp_nlp.norm_constraint.add(nlp_var - mip_var.value <= rhs) - - -def handle_fp_main_tc(feas_main_results, solve_data, config): - """Handle the termination condition of the feasibility pump main problem. - - Parameters - ---------- - feas_main_results : SolverResults - The results from solving the FP main problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - bool - True if FP loop should terminate, False otherwise. - """ - if feas_main_results.solver.termination_condition is tc.optimal: - config.logger.info(solve_data.log_formatter.format( - solve_data.fp_iter, 'FP-MIP', value( - solve_data.mip.MindtPy_utils.fp_mip_obj), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, get_main_elapsed_time(solve_data.timing))) - return False - elif feas_main_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.warning('FP-MIP reaches max TimeLimit') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - return True - elif feas_main_results.solver.termination_condition is tc.infeasible: - config.logger.warning('FP-MIP infeasible') - no_good_cuts = solve_data.mip.MindtPy_utils.cuts.no_good_cuts - if no_good_cuts.__len__() > 0: - no_good_cuts[no_good_cuts.__len__()].deactivate() - return True - elif feas_main_results.solver.termination_condition is tc.unbounded: - config.logger.warning('FP-MIP unbounded') - return True - elif (feas_main_results.solver.termination_condition is tc.other and - feas_main_results.solution.status is SolutionStatus.feasible): - config.logger.warning('MILP solver reported feasible solution of FP-MIP, ' - 'but not guaranteed to be optimal.') - return False - else: - config.logger.warning('Unexpected result of FP-MIP') - return True + CONFIG = _get_MindtPy_FP_config() + + def check_config(self): + # feasibility pump alone will lead to iteration_limit = 0, important! + self.config.iteration_limit = 0 + self.config.move_objective = True + super().check_config() + + def initialize_mip_problem(self): + '''Deactivate the nonlinear constraints to create the MIP problem.''' + super().initialize_mip_problem() + self.jacobians = calc_jacobians(self.mip, self.config) # preload jacobians + self.mip.MindtPy_utils.cuts.oa_cuts = ConstraintList( + doc='Outer approximation cuts' + ) + + def add_cuts( + self, dual_values, linearize_active=True, linearize_violated=True, cb_opt=None + ): + add_oa_cuts( + self.mip, + dual_values, + self.jacobians, + self.objective_sense, + self.mip_constraint_polynomial_degree, + self.mip_iter, + self.config, + self.timing, + cb_opt, + linearize_active, + linearize_violated, + ) + + def MindtPy_iteration_loop(self): + pass diff --git a/pyomo/contrib/mindtpy/global_outer_approximation.py b/pyomo/contrib/mindtpy/global_outer_approximation.py new file mode 100644 index 00000000000..ee3ffb62f55 --- /dev/null +++ b/pyomo/contrib/mindtpy/global_outer_approximation.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- + +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + + +from pyomo.contrib.gdpopt.util import get_main_elapsed_time +from pyomo.core import ConstraintList +from pyomo.opt import SolverFactory +from pyomo.contrib.mindtpy.config_options import _get_MindtPy_GOA_config +from pyomo.contrib.mindtpy.algorithm_base_class import _MindtPyAlgorithm +from pyomo.contrib.mindtpy.cut_generation import add_affine_cuts + + +@SolverFactory.register( + 'mindtpy.goa', doc='MindtPy: Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo' +) +class MindtPy_GOA_Solver(_MindtPyAlgorithm): + """ + Decomposition solver for Mixed-Integer Nonlinear Programming (MINLP) problems. + + The MindtPy (Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo) solver + applies a variety of decomposition-based approaches to solve Mixed-Integer + Nonlinear Programming (MINLP) problems. + This class includes: + + - Global outer approximation (GOA) + - Global LP/NLP based branch-and-bound (GLP/NLP) + """ + + CONFIG = _get_MindtPy_GOA_config() + + def check_config(self): + config = self.config + config.add_slack = False + config.use_mcpp = True + config.equality_relaxation = False + config.use_fbbt = True + # add_no_good_cuts is True by default in GOA + if not config.add_no_good_cuts and not config.use_tabu_list: + config.add_no_good_cuts = True + config.use_tabu_list = False + # Set default initialization_strategy + if config.single_tree: + config.logger.info('Single-tree implementation is activated.') + config.iteration_limit = 1 + config.add_slack = False + if config.mip_solver not in {'cplex_persistent', 'gurobi_persistent'}: + raise ValueError( + "Only cplex_persistent and gurobi_persistent are supported for LP/NLP based Branch and Bound method." + "Please refer to https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html#lp-nlp-based-branch-and-bound." + ) + if config.threads > 1: + config.threads = 1 + config.logger.info( + 'The threads parameter is corrected to 1 since lazy constraint callback conflicts with multi-threads mode.' + ) + + super().check_config() + + def initialize_mip_problem(self): + '''Deactivate the nonlinear constraints to create the MIP problem.''' + super().initialize_mip_problem() + self.mip.MindtPy_utils.cuts.aff_cuts = ConstraintList(doc='Affine cuts') + + def update_primal_bound(self, bound_value): + """Update the primal bound. + + Call after solve fixed NLP subproblem. + Use the optimal primal bound of the relaxed problem to update the dual bound. + + Parameters + ---------- + bound_value : float + The input value used to update the primal bound. + """ + super().update_primal_bound(bound_value) + self.primal_bound_progress_time.append(get_main_elapsed_time(self.timing)) + if self.primal_bound_improved: + self.num_no_good_cuts_added.update( + {self.primal_bound: len(self.mip.MindtPy_utils.cuts.no_good_cuts)} + ) + + def add_cuts( + self, + dual_values=None, + linearize_active=True, + linearize_violated=True, + cb_opt=None, + ): + add_affine_cuts(self.mip, self.config, self.timing) + + def deactivate_no_good_cuts_when_fixing_bound(self, no_good_cuts): + try: + valid_no_good_cuts_num = self.num_no_good_cuts_added[self.primal_bound] + if self.config.add_no_good_cuts: + for i in range(valid_no_good_cuts_num + 1, len(no_good_cuts) + 1): + no_good_cuts[i].deactivate() + if self.config.use_tabu_list: + self.integer_list = self.integer_list[:valid_no_good_cuts_num] + except KeyError as e: + self.config.logger.error(str(e) + '\nDeactivating no-good cuts failed.') diff --git a/pyomo/contrib/mindtpy/initialization.py b/pyomo/contrib/mindtpy/initialization.py deleted file mode 100644 index 18a5ac01e0c..00000000000 --- a/pyomo/contrib/mindtpy/initialization.py +++ /dev/null @@ -1,272 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -"""Initialization functions.""" -from __future__ import division -from pyomo.contrib.gdpopt.util import (SuppressInfeasibleWarning, _DoNothing, - copy_var_list_values, get_main_elapsed_time) -from pyomo.contrib.mindtpy.cut_generation import add_oa_cuts, add_affine_cuts -from pyomo.contrib.mindtpy.nlp_solve import solve_subproblem -from pyomo.contrib.mindtpy.util import calc_jacobians, set_solver_options, update_dual_bound, add_var_bound, get_integer_solution, update_suboptimal_dual_bound -from pyomo.core import (ConstraintList, Objective, - TransformationFactory, maximize, minimize, - value, Var) -from pyomo.opt import SolverFactory, TerminationCondition as tc -from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver -from pyomo.contrib.mindtpy.nlp_solve import solve_subproblem, handle_nlp_subproblem_tc -from pyomo.contrib.mindtpy.feasibility_pump import fp_loop - - -def MindtPy_initialize_main(solve_data, config): - """Initializes the decomposition algorithm and creates the main MIP/MILP problem. - - This function initializes the decomposition problem, which includes generating the - initial cuts required to build the main MIP. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - """ - # if single tree is activated, we need to add bounds for unbounded variables in nonlinear constraints to avoid unbounded main problem. - if config.single_tree: - add_var_bound(solve_data, config) - - m = solve_data.mip = solve_data.working_model.clone() - next(solve_data.mip.component_data_objects( - Objective, active=True)).deactivate() - - MindtPy = m.MindtPy_utils - if config.calculate_dual_at_solution: - m.dual.deactivate() - - if config.init_strategy == 'FP': - MindtPy.cuts.fp_orthogonality_cuts = ConstraintList( - doc='Orthogonality cuts in feasibility pump') - if config.fp_projcuts: - solve_data.working_model.MindtPy_utils.cuts.fp_orthogonality_cuts = ConstraintList( - doc='Orthogonality cuts in feasibility pump') - if config.strategy == 'OA' or config.init_strategy == 'FP': - calc_jacobians(solve_data, config) # preload jacobians - MindtPy.cuts.oa_cuts = ConstraintList(doc='Outer approximation cuts') - elif config.strategy == 'ECP': - calc_jacobians(solve_data, config) # preload jacobians - MindtPy.cuts.ecp_cuts = ConstraintList(doc='Extended Cutting Planes') - elif config.strategy == 'GOA': - MindtPy.cuts.aff_cuts = ConstraintList(doc='Affine cuts') - # elif config.strategy == 'PSC': - # detect_nonlinear_vars(solve_data, config) - # MindtPy.cuts.psc_cuts = ConstraintList( - # doc='Partial surrogate cuts') - # elif config.strategy == 'GBD': - # MindtPy.cuts.gbd_cuts = ConstraintList( - # doc='Generalized Benders cuts') - - # Set default initialization_strategy - if config.init_strategy is None: - if config.strategy in {'OA', 'GOA'}: - config.init_strategy = 'rNLP' - else: - config.init_strategy = 'max_binary' - - config.logger.info( - '{} is the initial strategy being used.' - '\n'.format(config.init_strategy)) - config.logger.info( - ' ===============================================================================================') - config.logger.info( - ' {:>9} | {:>15} | {:>15} | {:>12} | {:>12} | {:^7} | {:>7}\n'.format('Iteration', 'Subproblem Type', 'Objective Value', 'Primal Bound', - 'Dual Bound', ' Gap ', 'Time(s)')) - # Do the initialization - if config.init_strategy == 'rNLP': - init_rNLP(solve_data, config) - elif config.init_strategy == 'max_binary': - init_max_binaries(solve_data, config) - elif config.init_strategy == 'initial_binary': - solve_data.curr_int_sol = get_integer_solution( - solve_data.working_model) - solve_data.integer_list.append(solve_data.curr_int_sol) - if config.strategy != 'ECP': - fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config) - elif config.init_strategy == 'FP': - init_rNLP(solve_data, config) - fp_loop(solve_data, config) - - -def init_rNLP(solve_data, config): - """Initialize the problem by solving the relaxed NLP and then store the optimal variable - values obtained from solving the rNLP. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the termination condition of the relaxed NLP. - """ - m = solve_data.working_model.clone() - config.logger.debug( - 'Relaxed NLP: Solve relaxed integrality') - MindtPy = m.MindtPy_utils - TransformationFactory('core.relax_integer_vars').apply_to(m) - nlp_args = dict(config.nlp_solver_args) - nlpopt = SolverFactory(config.nlp_solver) - set_solver_options(nlpopt, solve_data, config, solver_type='nlp') - with SuppressInfeasibleWarning(): - results = nlpopt.solve(m, - tee=config.nlp_solver_tee, - load_solutions=False, - **nlp_args) - if len(results.solution) > 0: - m.solutions.load_from(results) - subprob_terminate_cond = results.solver.termination_condition - if subprob_terminate_cond in {tc.optimal, tc.feasible, tc.locallyOptimal}: - main_objective = MindtPy.objective_list[-1] - if subprob_terminate_cond == tc.optimal: - update_dual_bound(solve_data, value(main_objective.expr)) - else: - config.logger.info( - 'relaxed NLP is not solved to optimality.') - update_suboptimal_dual_bound(solve_data, results) - dual_values = list( - m.dual[c] for c in MindtPy.constraint_list) if config.calculate_dual_at_solution else None - config.logger.info(solve_data.log_formatter.format('-', 'Relaxed NLP', value(main_objective.expr), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - # Add OA cut - if config.strategy in {'OA', 'GOA', 'FP'}: - copy_var_list_values(m.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config, ignore_integrality=True) - if config.init_strategy == 'FP': - copy_var_list_values(m.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config, ignore_integrality=True) - if config.strategy in {'OA', 'FP'}: - add_oa_cuts(solve_data.mip, dual_values, solve_data, config) - elif config.strategy == 'GOA': - add_affine_cuts(solve_data, config) - for var in solve_data.mip.MindtPy_utils.discrete_variable_list: - # We don't want to trigger the reset of the global stale - # indicator, so we will set this variable to be "stale", - # knowing that set_value will switch it back to "not - # stale" - var.stale = True - var.set_value(int(round(var.value)), skip_validation=True) - elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: - # TODO fail? try something else? - config.logger.info( - 'Initial relaxed NLP problem is infeasible. ' - 'Problem may be infeasible.') - elif subprob_terminate_cond is tc.maxTimeLimit: - config.logger.info( - 'NLP subproblem failed to converge within time limit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - elif subprob_terminate_cond is tc.maxIterations: - config.logger.info( - 'NLP subproblem failed to converge within iteration limit.') - else: - raise ValueError( - 'MindtPy unable to handle relaxed NLP termination condition ' - 'of %s. Solver message: %s' % - (subprob_terminate_cond, results.solver.message)) - - -def init_max_binaries(solve_data, config): - """Modifies model by maximizing the number of activated binary variables. - - Note - The user would usually want to call solve_subproblem after an invocation - of this function. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MILP main problem is infeasible. - ValueError - MindtPy unable to handle the termination condition of the MILP main problem. - """ - m = solve_data.working_model.clone() - if config.calculate_dual_at_solution: - m.dual.deactivate() - MindtPy = m.MindtPy_utils - solve_data.mip_subiter += 1 - config.logger.debug( - 'Initialization: maximize value of binaries') - for c in MindtPy.nonlinear_constraint_list: - c.deactivate() - objective = next(m.component_data_objects(Objective, active=True)) - objective.deactivate() - binary_vars = (v for v in m.MindtPy_utils.discrete_variable_list - if v.is_binary() and not v.fixed) - MindtPy.max_binary_obj = Objective( - expr=sum(v for v in binary_vars), sense=maximize) - - getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate() - getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate() - - mipopt = SolverFactory(config.mip_solver) - if isinstance(mipopt, PersistentSolver): - mipopt.set_instance(m) - mip_args = dict(config.mip_solver_args) - set_solver_options(mipopt, solve_data, config, solver_type='mip') - results = mipopt.solve(m, - tee=config.mip_solver_tee, - load_solutions=False, - **mip_args) - if len(results.solution) > 0: - m.solutions.load_from(results) - - solve_terminate_cond = results.solver.termination_condition - if solve_terminate_cond is tc.optimal: - copy_var_list_values( - MindtPy.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - config.logger.info(solve_data.log_formatter.format('-', - 'Max binary MILP', - value(MindtPy.max_binary_obj.expr), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - elif solve_terminate_cond is tc.infeasible: - raise ValueError( - 'MILP main problem is infeasible. ' - 'Problem may have no more feasible ' - 'binary configurations.') - elif solve_terminate_cond is tc.maxTimeLimit: - config.logger.info( - 'NLP subproblem failed to converge within time limit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - elif solve_terminate_cond is tc.maxIterations: - config.logger.info( - 'NLP subproblem failed to converge within iteration limit.') - else: - raise ValueError( - 'MindtPy unable to handle MILP main termination condition ' - 'of %s. Solver message: %s' % - (solve_terminate_cond, results.solver.message)) diff --git a/pyomo/contrib/mindtpy/iterate.py b/pyomo/contrib/mindtpy/iterate.py deleted file mode 100644 index 08461389787..00000000000 --- a/pyomo/contrib/mindtpy/iterate.py +++ /dev/null @@ -1,470 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -"""Iteration loop for MindtPy.""" -from __future__ import division -from pyomo.contrib.mindtpy.util import set_solver_options, get_integer_solution, update_suboptimal_dual_bound, copy_var_list_values_from_solution_pool -from pyomo.contrib.mindtpy.cut_generation import add_ecp_cuts - -from pyomo.contrib.mindtpy.mip_solve import solve_main, handle_main_optimal, handle_main_infeasible, handle_main_other_conditions, handle_regularization_main_tc -from pyomo.contrib.mindtpy.nlp_solve import solve_subproblem, handle_nlp_subproblem_tc -from pyomo.core import minimize, maximize -from pyomo.opt import TerminationCondition as tc -from pyomo.contrib.gdpopt.util import get_main_elapsed_time, time_code -from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver -from pyomo.opt import SolverFactory -from pyomo.common.dependencies import attempt_import -from pyomo.contrib.gdpopt.util import copy_var_list_values -from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy -from operator import itemgetter - -tabu_list, tabu_list_available = attempt_import( - 'pyomo.contrib.mindtpy.tabu_list') - - -def MindtPy_iteration_loop(solve_data, config): - """Main loop for MindtPy Algorithms. - - This is the outermost function for the algorithms in this package; this function controls the progression of - solving the model. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - The strategy value is not correct or not included. - """ - last_iter_cuts = False - while solve_data.mip_iter < config.iteration_limit: - - solve_data.mip_subiter = 0 - # solve MILP main problem - if config.strategy in {'OA', 'GOA', 'ECP'}: - main_mip, main_mip_results = solve_main(solve_data, config) - if main_mip_results is not None: - if not config.single_tree: - if main_mip_results.solver.termination_condition is tc.optimal: - handle_main_optimal(main_mip, solve_data, config) - elif main_mip_results.solver.termination_condition is tc.infeasible: - handle_main_infeasible(main_mip, solve_data, config) - last_iter_cuts = True - break - else: - handle_main_other_conditions( - main_mip, main_mip_results, solve_data, config) - # Call the MILP post-solve callback - with time_code(solve_data.timing, 'Call after main solve'): - config.call_after_main_solve(main_mip, solve_data) - else: - config.logger.info('Algorithm should terminate here.') - break - else: - raise ValueError() - - # Regularization is activated after the first feasible solution is found. - if config.add_regularization is not None and solve_data.best_solution_found is not None and not config.single_tree: - # The main problem might be unbounded, regularization is activated only when a valid bound is provided. - if solve_data.dual_bound != solve_data.dual_bound_progress[0]: - main_mip, main_mip_results = solve_main( - solve_data, config, regularization_problem=True) - handle_regularization_main_tc( - main_mip, main_mip_results, solve_data, config) - - # TODO: add descriptions for the following code - if config.add_regularization is not None and config.single_tree: - solve_data.curr_int_sol = get_integer_solution( - solve_data.mip, string_zero=True) - copy_var_list_values( - main_mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - if solve_data.curr_int_sol not in set(solve_data.integer_list): - fixed_nlp, fixed_nlp_result = solve_subproblem( - solve_data, config) - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config) - if algorithm_should_terminate(solve_data, config, check_cycling=True): - last_iter_cuts = False - break - - if not config.single_tree and config.strategy != 'ECP': # if we don't use lazy callback, i.e. LP_NLP - # Solve NLP subproblem - # The constraint linearization happens in the handlers - if not config.solution_pool: - fixed_nlp, fixed_nlp_result = solve_subproblem( - solve_data, config) - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config) - - # Call the NLP post-solve callback - with time_code(solve_data.timing, 'Call after subproblem solve'): - config.call_after_subproblem_solve(fixed_nlp, solve_data) - - if algorithm_should_terminate(solve_data, config, check_cycling=False): - last_iter_cuts = True - break - else: - if config.mip_solver == 'cplex_persistent': - solution_pool_names = main_mip_results._solver_model.solution.pool.get_names() - elif config.mip_solver == 'gurobi_persistent': - solution_pool_names = list( - range(main_mip_results._solver_model.SolCount)) - # list to store the name and objective value of the solutions in the solution pool - solution_name_obj = [] - for name in solution_pool_names: - if config.mip_solver == 'cplex_persistent': - obj = main_mip_results._solver_model.solution.pool.get_objective_value( - name) - elif config.mip_solver == 'gurobi_persistent': - main_mip_results._solver_model.setParam( - gurobipy.GRB.Param.SolutionNumber, name) - obj = main_mip_results._solver_model.PoolObjVal - solution_name_obj.append([name, obj]) - solution_name_obj.sort( - key=itemgetter(1), reverse=solve_data.objective_sense == maximize) - counter = 0 - for name, _ in solution_name_obj: - # the optimal solution of the main problem has been added to integer_list above - # so we should skip checking cycling for the first solution in the solution pool - if counter >= 1: - copy_var_list_values_from_solution_pool(solve_data.mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config, solver_model=main_mip_results._solver_model, - var_map=main_mip_results._pyomo_var_to_solver_var_map, - solution_name=name) - solve_data.curr_int_sol = get_integer_solution( - solve_data.working_model) - if solve_data.curr_int_sol in set(solve_data.integer_list): - config.logger.info( - 'The same combination has been explored and will be skipped here.') - continue - else: - solve_data.integer_list.append( - solve_data.curr_int_sol) - counter += 1 - fixed_nlp, fixed_nlp_result = solve_subproblem( - solve_data, config) - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config) - - # Call the NLP post-solve callback - with time_code(solve_data.timing, 'Call after subproblem solve'): - config.call_after_subproblem_solve( - fixed_nlp, solve_data) - - if algorithm_should_terminate(solve_data, config, check_cycling=False): - last_iter_cuts = True - break - - if counter >= config.num_solution_iteration: - break - - if config.strategy == 'ECP': - add_ecp_cuts(solve_data.mip, solve_data, config) - - # if config.strategy == 'PSC': - # # If the hybrid algorithm is not making progress, switch to OA. - # progress_required = 1E-6 - # if solve_data.objective_sense == minimize: - # log = solve_data.LB_progress - # sign_adjust = 1 - # else: - # log = solve_data.UB_progress - # sign_adjust = -1 - # # Maximum number of iterations in which the lower (optimistic) - # # bound does not improve before switching to OA - # max_nonimprove_iter = 5 - # making_progress = True - # # TODO-romeo Unnecessary for OA and ROA, right? - # for i in range(1, max_nonimprove_iter + 1): - # try: - # if (sign_adjust * log[-i] - # <= (log[-i - 1] + progress_required) - # * sign_adjust): - # making_progress = False - # else: - # making_progress = True - # break - # except IndexError: - # # Not enough history yet, keep going. - # making_progress = True - # break - # if not making_progress and ( - # config.strategy == 'hPSC' or - # config.strategy == 'PSC'): - # config.logger.info( - # 'Not making enough progress for {} iterations. ' - # 'Switching to OA.'.format(max_nonimprove_iter)) - # config.strategy = 'OA' - - # if add_no_good_cuts is True, the bound obtained in the last iteration is no reliable. - # we correct it after the iteration. - if (config.add_no_good_cuts or config.use_tabu_list) and config.strategy != 'FP' and not solve_data.should_terminate and config.add_regularization is None: - fix_dual_bound(solve_data, config, last_iter_cuts) - config.logger.info( - ' ===============================================================================================') - - -def algorithm_should_terminate(solve_data, config, check_cycling): - """Checks if the algorithm should terminate at the given point. - - This function determines whether the algorithm should terminate based on the solver options and progress. - (Sets the solve_data.results.solver.termination_condition to the appropriate condition, i.e. optimal, - maxIterations, maxTimeLimit). - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - check_cycling : bool - Whether to check for a special case that causes the discrete variables to loop through the same values. - - Returns - ------- - bool - True if the algorithm should terminate, False otherwise. - """ - if solve_data.should_terminate: - if solve_data.primal_bound == solve_data.primal_bound_progress[0]: - solve_data.results.solver.termination_condition = tc.noSolution - else: - solve_data.results.solver.termination_condition = tc.feasible - return True - - # Check bound convergence - if solve_data.abs_gap <= config.absolute_bound_tolerance: - config.logger.info( - 'MindtPy exiting on bound convergence. ' - '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n'.format( - solve_data.primal_bound, solve_data.dual_bound, config.absolute_bound_tolerance)) - solve_data.results.solver.termination_condition = tc.optimal - return True - # Check relative bound convergence - if solve_data.best_solution_found is not None: - if solve_data.rel_gap <= config.relative_bound_tolerance: - config.logger.info( - 'MindtPy exiting on bound convergence. ' - '|Primal Bound: {} - Dual Bound: {}| / (1e-10 + |Primal Bound|:{}) <= relative tolerance: {}'.format(solve_data.primal_bound, solve_data.dual_bound, abs(solve_data.primal_bound), config.relative_bound_tolerance)) - solve_data.results.solver.termination_condition = tc.optimal - return True - - # Check iteration limit - if solve_data.mip_iter >= config.iteration_limit: - config.logger.info( - 'MindtPy unable to converge bounds ' - 'after {} main iterations.'.format(solve_data.mip_iter)) - config.logger.info( - 'Final bound values: Primal Bound: {} Dual Bound: {}'. - format(solve_data.primal_bound, solve_data.dual_bound)) - if config.single_tree: - solve_data.results.solver.termination_condition = tc.feasible - else: - solve_data.results.solver.termination_condition = tc.maxIterations - return True - - # Check time limit - if get_main_elapsed_time(solve_data.timing) >= config.time_limit: - config.logger.info( - 'MindtPy unable to converge bounds ' - 'before time limit of {} seconds. ' - 'Elapsed: {} seconds' - .format(config.time_limit, get_main_elapsed_time(solve_data.timing))) - config.logger.info( - 'Final bound values: Primal Bound: {} Dual Bound: {}'. - format(solve_data.primal_bound, solve_data.dual_bound)) - solve_data.results.solver.termination_condition = tc.maxTimeLimit - return True - - # Check if algorithm is stalling - if len(solve_data.primal_bound_progress) >= config.stalling_limit: - if abs(solve_data.primal_bound_progress[-1] - solve_data.primal_bound_progress[-config.stalling_limit]) <= config.zero_tolerance: - config.logger.info( - 'Algorithm is not making enough progress. ' - 'Exiting iteration loop.') - config.logger.info( - 'Final bound values: Primal Bound: {} Dual Bound: {}'. - format(solve_data.primal_bound, solve_data.dual_bound)) - if solve_data.best_solution_found is not None: - solve_data.results.solver.termination_condition = tc.feasible - else: - # TODO: Is it correct to set solve_data.working_model as the best_solution_found? - # In function copy_var_list_values, skip_fixed is set to True in default. - solve_data.best_solution_found = solve_data.working_model.clone() - config.logger.warning( - 'Algorithm did not find a feasible solution. ' - 'Returning best bound solution. Consider increasing stalling_limit or absolute_bound_tolerance.') - solve_data.results.solver.termination_condition = tc.noSolution - return True - - if config.strategy == 'ECP': - # check to see if the nonlinear constraints are satisfied - MindtPy = solve_data.working_model.MindtPy_utils - nonlinear_constraints = [c for c in MindtPy.nonlinear_constraint_list] - for nlc in nonlinear_constraints: - if nlc.has_lb(): - try: - lower_slack = nlc.lslack() - except (ValueError, OverflowError): - # Set lower_slack (upper_slack below) less than -config.ecp_tolerance in this case. - lower_slack = -10*config.ecp_tolerance - if lower_slack < -config.ecp_tolerance: - config.logger.debug( - 'MindtPy-ECP continuing as {} has not met the ' - 'nonlinear constraints satisfaction.' - '\n'.format(nlc)) - return False - if nlc.has_ub(): - try: - upper_slack = nlc.uslack() - except (ValueError, OverflowError): - upper_slack = -10*config.ecp_tolerance - if upper_slack < -config.ecp_tolerance: - config.logger.debug( - 'MindtPy-ECP continuing as {} has not met the ' - 'nonlinear constraints satisfaction.' - '\n'.format(nlc)) - return False - # For ECP to know whether to know which bound to copy over (primal or dual) - solve_data.primal_bound = solve_data.dual_bound - config.logger.info( - 'MindtPy-ECP exiting on nonlinear constraints satisfaction. ' - 'Primal Bound: {} Dual Bound: {}\n'.format(solve_data.primal_bound, solve_data.dual_bound)) - - solve_data.best_solution_found = solve_data.working_model.clone() - solve_data.results.solver.termination_condition = tc.optimal - return True - - # Cycling check - if check_cycling: - if config.cycling_check or config.use_tabu_list: - solve_data.curr_int_sol = get_integer_solution(solve_data.mip) - if config.cycling_check and solve_data.mip_iter >= 1: - if solve_data.curr_int_sol in set(solve_data.integer_list): - config.logger.info( - 'Cycling happens after {} main iterations. ' - 'The same combination is obtained in iteration {} ' - 'This issue happens when the NLP subproblem violates constraint qualification. ' - 'Convergence to optimal solution is not guaranteed.' - .format(solve_data.mip_iter, solve_data.integer_list.index(solve_data.curr_int_sol)+1)) - config.logger.info( - 'Final bound values: Primal Bound: {} Dual Bound: {}'. - format(solve_data.primal_bound, solve_data.dual_bound)) - # TODO determine solve_data.primal_bound, solve_data.dual_bound is inf or -inf. - solve_data.results.solver.termination_condition = tc.feasible - return True - solve_data.integer_list.append(solve_data.curr_int_sol) - - # if not algorithm_is_making_progress(solve_data, config): - # config.logger.debug( - # 'Algorithm is not making enough progress. ' - # 'Exiting iteration loop.') - # return True - return False - - -def fix_dual_bound(solve_data, config, last_iter_cuts): - """Fix the dual bound when no-good cuts or tabu list is activated. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - last_iter_cuts : bool - Whether the cuts in the last iteration have been added. - """ - if config.single_tree: - config.logger.info( - 'Fix the bound to the value of one iteration before optimal solution is found.') - try: - solve_data.dual_bound = solve_data.stored_bound[solve_data.primal_bound] - except KeyError: - config.logger.info('No stored bound found. Bound fix failed.') - else: - config.logger.info( - 'Solve the main problem without the last no_good cut to fix the bound.' - 'zero_tolerance is set to 1E-4') - config.zero_tolerance = 1E-4 - # Solve NLP subproblem - # The constraint linearization happens in the handlers - if not last_iter_cuts: - fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config) - - MindtPy = solve_data.mip.MindtPy_utils - # deactivate the integer cuts generated after the best solution was found. - if config.strategy == 'GOA': - try: - valid_no_good_cuts_num = solve_data.num_no_good_cuts_added[solve_data.primal_bound] - if config.add_no_good_cuts: - for i in range(valid_no_good_cuts_num+1, len(MindtPy.cuts.no_good_cuts)+1): - MindtPy.cuts.no_good_cuts[i].deactivate() - if config.use_tabu_list: - solve_data.integer_list = solve_data.integer_list[:valid_no_good_cuts_num] - except KeyError: - config.logger.info('No-good cut deactivate failed.') - elif config.strategy == 'OA': - # Only deactive the last OA cuts may not be correct. - # Since integer solution may also be cut off by OA cuts due to calculation approximation. - if config.add_no_good_cuts: - MindtPy.cuts.no_good_cuts[len( - MindtPy.cuts.no_good_cuts)].deactivate() - if config.use_tabu_list: - solve_data.integer_list = solve_data.integer_list[:-1] - if config.add_regularization is not None and MindtPy.find_component('mip_obj') is None: - MindtPy.objective_list[-1].activate() - mainopt = SolverFactory(config.mip_solver) - # determine if persistent solver is called. - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(solve_data.mip, symbolic_solver_labels=True) - if config.use_tabu_list: - tabulist = mainopt._solver_model.register_callback( - tabu_list.IncumbentCallback_cplex) - tabulist.solve_data = solve_data - tabulist.opt = mainopt - tabulist.config = config - mainopt._solver_model.parameters.preprocessing.reduce.set(1) - # If the callback is used to reject incumbents, the user must set the - # parameter c.parameters.preprocessing.reduce either to the value 1 (one) - # to restrict presolve to primal reductions only or to 0 (zero) to disable all presolve reductions - mainopt._solver_model.set_warning_stream(None) - mainopt._solver_model.set_log_stream(None) - mainopt._solver_model.set_error_stream(None) - mip_args = dict(config.mip_solver_args) - set_solver_options(mainopt, solve_data, config, solver_type='mip') - main_mip_results = mainopt.solve(solve_data.mip, - tee=config.mip_solver_tee, - load_solutions=False, - **mip_args) - if len(main_mip_results.solution) > 0: - solve_data.mip.solutions.load_from(main_mip_results) - - if main_mip_results.solver.termination_condition is tc.infeasible: - config.logger.info( - 'Bound fix failed. The bound fix problem is infeasible') - else: - update_suboptimal_dual_bound(solve_data, main_mip_results) - config.logger.info( - 'Fixed bound values: Primal Bound: {} Dual Bound: {}'. - format(solve_data.primal_bound, solve_data.dual_bound)) - # Check bound convergence - if abs(solve_data.primal_bound - solve_data.dual_bound) <= config.absolute_bound_tolerance: - solve_data.results.solver.termination_condition = tc.optimal diff --git a/pyomo/contrib/mindtpy/mip_solve.py b/pyomo/contrib/mindtpy/mip_solve.py deleted file mode 100644 index 7a05a64f7bd..00000000000 --- a/pyomo/contrib/mindtpy/mip_solve.py +++ /dev/null @@ -1,550 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -"""main problem functions.""" -from __future__ import division -import logging -from pyomo.core import Constraint, Expression, Objective, minimize, value -from pyomo.opt import TerminationCondition as tc -from pyomo.opt import SolutionStatus, SolverFactory -from pyomo.contrib.gdpopt.util import copy_var_list_values, SuppressInfeasibleWarning, _DoNothing, get_main_elapsed_time, time_code -from pyomo.contrib.gdpopt.solve_discrete_problem import ( - distinguish_mip_infeasible_or_unbounded) -from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver -from pyomo.common.dependencies import attempt_import -from pyomo.contrib.mindtpy.util import generate_norm1_objective_function, generate_norm2sq_objective_function, generate_norm_inf_objective_function, generate_lag_objective_function, set_solver_options, GurobiPersistent4MindtPy, update_dual_bound, update_suboptimal_dual_bound - - -single_tree, single_tree_available = attempt_import( - 'pyomo.contrib.mindtpy.single_tree') -tabu_list, tabu_list_available = attempt_import( - 'pyomo.contrib.mindtpy.tabu_list') - - -def solve_main(solve_data, config, fp=False, regularization_problem=False): - """This function solves the MIP main problem. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - fp : bool, optional - Whether it is in the loop of feasibility pump, by default False. - regularization_problem : bool, optional - Whether it is solving a regularization problem, by default False. - - Returns - ------- - solve_data.mip : Pyomo model - The MIP stored in solve_data. - main_mip_results : SolverResults - Results from solving the main MIP. - """ - if not fp and not regularization_problem: - solve_data.mip_iter += 1 - - # setup main problem - setup_main(solve_data, config, fp, regularization_problem) - mainopt = set_up_mip_solver(solve_data, config, regularization_problem) - - mip_args = dict(config.mip_solver_args) - if config.mip_solver in {'cplex', 'cplex_persistent', 'gurobi', 'gurobi_persistent'}: - mip_args['warmstart'] = True - set_solver_options(mainopt, solve_data, config, - solver_type='mip', regularization=regularization_problem) - try: - with time_code(solve_data.timing, 'regularization main' if regularization_problem else ('fp main' if fp else 'main')): - main_mip_results = mainopt.solve(solve_data.mip, - tee=config.mip_solver_tee, - load_solutions=False, - **mip_args) - if len(main_mip_results.solution) > 0: - solve_data.mip.solutions.load_from(main_mip_results) - except (ValueError, AttributeError): - if config.single_tree: - config.logger.warning('Single tree terminate.') - if get_main_elapsed_time(solve_data.timing) >= config.time_limit - 2: - config.logger.warning('due to the timelimit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - if config.strategy == 'GOA' or config.add_no_good_cuts: - config.logger.warning('ValueError: Cannot load a SolverResults object with bad status: error. ' - 'MIP solver failed. This usually happens in the single-tree GOA algorithm. ' - "No-good cuts are added and GOA algorithm doesn't converge within the time limit. " - 'No integer solution is found, so the cplex solver will report an error status. ') - return None, None - if config.solution_pool: - main_mip_results._solver_model = mainopt._solver_model - main_mip_results._pyomo_var_to_solver_var_map = mainopt._pyomo_var_to_solver_var_map - if main_mip_results.solver.termination_condition is tc.optimal: - if config.single_tree and not config.add_no_good_cuts and not regularization_problem: - update_suboptimal_dual_bound(solve_data, main_mip_results) - if regularization_problem: - config.logger.info(solve_data.log_formatter.format(solve_data.mip_iter, 'Reg '+solve_data.regularization_mip_type, - value(solve_data.mip.MindtPy_utils.loa_proj_mip_obj), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - - elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: - # Linear solvers will sometimes tell me that it's infeasible or - # unbounded during presolve, but fails to distinguish. We need to - # resolve with a solver option flag on. - main_mip_results, _ = distinguish_mip_infeasible_or_unbounded( - solve_data.mip, config) - return solve_data.mip, main_mip_results - - if regularization_problem: - solve_data.mip.MindtPy_utils.objective_constr.deactivate() - solve_data.mip.MindtPy_utils.del_component('loa_proj_mip_obj') - solve_data.mip.MindtPy_utils.cuts.del_component('obj_reg_estimate') - if config.add_regularization == 'level_L1': - solve_data.mip.MindtPy_utils.del_component('L1_obj') - elif config.add_regularization == 'level_L_infinity': - solve_data.mip.MindtPy_utils.del_component( - 'L_infinity_obj') - - return solve_data.mip, main_mip_results - - -def set_up_mip_solver(solve_data, config, regularization_problem): - """Set up the MIP solver. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - regularization_problem : bool - Whether it is solving a regularization problem. - - Returns - ------- - mainopt : SolverFactory - The customized MIP solver. - """ - # Deactivate extraneous IMPORT/EXPORT suffixes - if config.nlp_solver == 'ipopt': - getattr(solve_data.mip, 'ipopt_zL_out', _DoNothing()).deactivate() - getattr(solve_data.mip, 'ipopt_zU_out', _DoNothing()).deactivate() - if regularization_problem: - mainopt = SolverFactory(config.mip_regularization_solver) - else: - if config.mip_solver == 'gurobi_persistent' and config.single_tree: - mainopt = GurobiPersistent4MindtPy() - mainopt.solve_data = solve_data - mainopt.config = config - else: - mainopt = SolverFactory(config.mip_solver) - - # determine if persistent solver is called. - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(solve_data.mip, symbolic_solver_labels=True) - if config.single_tree and not regularization_problem: - # Configuration of cplex lazy callback - if config.mip_solver == 'cplex_persistent': - lazyoa = mainopt._solver_model.register_callback( - single_tree.LazyOACallback_cplex) - # pass necessary data and parameters to lazyoa - lazyoa.main_mip = solve_data.mip - lazyoa.solve_data = solve_data - lazyoa.config = config - lazyoa.opt = mainopt - mainopt._solver_model.set_warning_stream(None) - mainopt._solver_model.set_log_stream(None) - mainopt._solver_model.set_error_stream(None) - if config.mip_solver == 'gurobi_persistent': - mainopt.set_callback(single_tree.LazyOACallback_gurobi) - if config.use_tabu_list: - tabulist = mainopt._solver_model.register_callback( - tabu_list.IncumbentCallback_cplex) - tabulist.solve_data = solve_data - tabulist.opt = mainopt - tabulist.config = config - mainopt._solver_model.parameters.preprocessing.reduce.set(1) - # If the callback is used to reject incumbents, the user must set the - # parameter c.parameters.preprocessing.reduce either to the value 1 (one) - # to restrict presolve to primal reductions only or to 0 (zero) to disable all presolve reductions - mainopt._solver_model.set_warning_stream(None) - mainopt._solver_model.set_log_stream(None) - mainopt._solver_model.set_error_stream(None) - return mainopt - - -# The following functions deal with handling the solution we get from the above MIP solver function - - -def handle_main_optimal(main_mip, solve_data, config, update_bound=True): - """This function copies the results from 'solve_main' to the working model and updates - the upper/lower bound. This function is called after an optimal solution is found for - the main problem. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - update_bound : bool, optional - Whether to update the bound, by default True. - Bound will not be updated when handling regularization problem. - """ - # proceed. Just need integer values - MindtPy = main_mip.MindtPy_utils - # check if the value of binary variable is valid - for var in MindtPy.discrete_variable_list: - if var.value is None: - config.logger.warning( - f"Integer variable {var.name} not initialized. " - "Setting it to its lower bound") - var.set_value(var.lb, skip_validation=True) # nlp_var.bounds[0] - # warm start for the nlp subproblem - copy_var_list_values( - main_mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - - if update_bound: - update_dual_bound(solve_data, value(MindtPy.mip_obj.expr)) - config.logger.info(solve_data.log_formatter.format(solve_data.mip_iter, 'MILP', value(MindtPy.mip_obj.expr), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - - -def handle_main_other_conditions(main_mip, main_mip_results, solve_data, config): - """This function handles the result of the latest iteration of solving the MIP problem (given any of a few - edge conditions, such as if the solution is neither infeasible nor optimal). - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - main_mip_results : SolverResults - Results from solving the MIP problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle MILP main termination condition. - """ - if main_mip_results.solver.termination_condition is tc.infeasible: - handle_main_infeasible(main_mip, solve_data, config) - elif main_mip_results.solver.termination_condition is tc.unbounded: - temp_results = handle_main_unbounded(main_mip, solve_data, config) - elif main_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded: - temp_results = handle_main_unbounded(main_mip, solve_data, config) - if temp_results.solver.termination_condition is tc.infeasible: - handle_main_infeasible(main_mip, solve_data, config) - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - handle_main_max_timelimit( - main_mip, main_mip_results, solve_data, config) - solve_data.results.solver.termination_condition = tc.maxTimeLimit - elif main_mip_results.solver.termination_condition is tc.feasible or \ - (main_mip_results.solver.termination_condition is tc.other and - main_mip_results.solution.status is SolutionStatus.feasible): - # load the solution and suppress the warning message by setting - # solver status to ok. - MindtPy = main_mip.MindtPy_utils - config.logger.info( - 'MILP solver reported feasible solution, ' - 'but not guaranteed to be optimal.') - copy_var_list_values( - main_mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - update_suboptimal_dual_bound(solve_data, main_mip_results) - config.logger.info(solve_data.log_formatter.format(solve_data.mip_iter, 'MILP', value(MindtPy.mip_obj.expr), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - else: - raise ValueError( - 'MindtPy unable to handle MILP main termination condition ' - 'of %s. Solver message: %s' % - (main_mip_results.solver.termination_condition, main_mip_results.solver.message)) - - -def handle_main_infeasible(main_mip, solve_data, config): - """This function handles the result of the latest iteration of solving - the MIP problem given an infeasible solution. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - """ - config.logger.info( - 'MILP main problem is infeasible. ' - 'Problem may have no more feasible ' - 'binary configurations.') - if solve_data.mip_iter == 1: - config.logger.warning( - 'MindtPy initialization may have generated poor ' - 'quality cuts.') - # TODO no-good cuts for single tree case - # set optimistic bound to infinity - # TODO: can we remove the following line? - # solve_data.dual_bound_progress.append(solve_data.dual_bound) - config.logger.info( - 'MindtPy exiting due to MILP main problem infeasibility.') - if solve_data.results.solver.termination_condition is None: - if solve_data.mip_iter == 0: - solve_data.results.solver.termination_condition = tc.infeasible - else: - solve_data.results.solver.termination_condition = tc.feasible - - -def handle_main_max_timelimit(main_mip, main_mip_results, solve_data, config): - """This function handles the result of the latest iteration of solving the MIP problem - given that solving the MIP takes too long. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - main_mip_results : [type] - Results from solving the MIP main subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - """ - # TODO if we have found a valid feasible solution, we take that, if not, we can at least use the dual bound - MindtPy = main_mip.MindtPy_utils - config.logger.info( - 'Unable to optimize MILP main problem ' - 'within time limit. ' - 'Using current solver feasible solution.') - copy_var_list_values( - main_mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - update_suboptimal_dual_bound(solve_data, main_mip_results) - config.logger.info(solve_data.log_formatter.format(solve_data.mip_iter, 'MILP', value(MindtPy.mip_obj.expr), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - - -def handle_main_unbounded(main_mip, solve_data, config): - """This function handles the result of the latest iteration of solving the MIP - problem given an unbounded solution due to the relaxation. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - main_mip_results : SolverResults - The results of the bounded main problem. - """ - # Solution is unbounded. Add an arbitrary bound to the objective and resolve. - # This occurs when the objective is nonlinear. The nonlinear objective is moved - # to the constraints, and deactivated for the linear main problem. - MindtPy = main_mip.MindtPy_utils - config.logger.warning( - 'main MILP was unbounded. ' - 'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) on the objective. ' - 'You can change this bound with the option obj_bound.'.format(config.obj_bound)) - MindtPy.objective_bound = Constraint( - expr=(-config.obj_bound, MindtPy.mip_obj.expr, config.obj_bound)) - mainopt = SolverFactory(config.mip_solver) - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(main_mip) - set_solver_options(mainopt, solve_data, config, solver_type='mip') - with SuppressInfeasibleWarning(): - main_mip_results = mainopt.solve(main_mip, - tee=config.mip_solver_tee, - load_solutions=False, - **config.mip_solver_args) - if len(main_mip_results.solution) > 0: - solve_data.mip.solutions.load_from(main_mip_results) - return main_mip_results - - -def handle_regularization_main_tc(main_mip, main_mip_results, solve_data, config): - """Handles the result of the latest FP iteration of solving the regularization main problem. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - main_mip_results : SolverResults - Results from solving the regularization main subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the regularization problem termination condition. - """ - if main_mip_results is None: - config.logger.info( - 'Failed to solve the regularization problem.' - 'The solution of the OA main problem will be adopted.') - elif main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}: - handle_main_optimal( - main_mip, solve_data, config, update_bound=False) - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( - 'Regularization problem failed to converge within the time limit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - # break - elif main_mip_results.solver.termination_condition is tc.infeasible: - config.logger.info( - 'Regularization problem infeasible.') - elif main_mip_results.solver.termination_condition is tc.unbounded: - config.logger.info( - 'Regularization problem ubounded.' - 'Sometimes solving MIQP in cplex, unbounded means infeasible.') - elif main_mip_results.solver.termination_condition is tc.unknown: - config.logger.info( - 'Termination condition of the regularization problem is unknown.') - if main_mip_results.problem.lower_bound != float('-inf'): - config.logger.info('Solution limit has been reached.') - handle_main_optimal( - main_mip, solve_data, config, update_bound=False) - else: - config.logger.info('No solution obtained from the regularization subproblem.' - 'Please set mip_solver_tee to True for more informations.' - 'The solution of the OA main problem will be adopted.') - else: - raise ValueError( - 'MindtPy unable to handle regularization problem termination condition ' - 'of %s. Solver message: %s' % - (main_mip_results.solver.termination_condition, main_mip_results.solver.message)) - - -def setup_main(solve_data, config, fp, regularization_problem): - """Set up main problem/main regularization problem for OA, ECP, Feasibility Pump and ROA methods. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - fp : bool - Whether it is in the loop of feasibility pump. - regularization_problem : bool - Whether it is solving a regularization problem. - """ - MindtPy = solve_data.mip.MindtPy_utils - - for c in MindtPy.constraint_list: - if c.body.polynomial_degree() not in solve_data.mip_constraint_polynomial_degree: - c.deactivate() - - MindtPy.cuts.activate() - - sign_adjust = 1 if solve_data.objective_sense == minimize else - 1 - MindtPy.del_component('mip_obj') - if regularization_problem and config.single_tree: - MindtPy.del_component('loa_proj_mip_obj') - MindtPy.cuts.del_component('obj_reg_estimate') - if config.add_regularization is not None and config.add_no_good_cuts: - if regularization_problem: - MindtPy.cuts.no_good_cuts.activate() - else: - MindtPy.cuts.no_good_cuts.deactivate() - - if fp: - MindtPy.del_component('fp_mip_obj') - if config.fp_main_norm == 'L1': - MindtPy.fp_mip_obj = generate_norm1_objective_function( - solve_data.mip, - solve_data.working_model, - discrete_only=config.fp_discrete_only) - elif config.fp_main_norm == 'L2': - MindtPy.fp_mip_obj = generate_norm2sq_objective_function( - solve_data.mip, - solve_data.working_model, - discrete_only=config.fp_discrete_only) - elif config.fp_main_norm == 'L_infinity': - MindtPy.fp_mip_obj = generate_norm_inf_objective_function( - solve_data.mip, - solve_data.working_model, - discrete_only=config.fp_discrete_only) - elif regularization_problem: - # The epigraph constraint is very "flat" for branching rules. - # In ROA, if the objective function is linear(or quadratic when quadratic_strategy = 1 or 2), the original objective function is used in the MIP problem. - # In the MIP projection problem, we need to reactivate the epigraph constraint(objective_constr). - if MindtPy.objective_list[0].expr.polynomial_degree() in solve_data.mip_objective_polynomial_degree: - MindtPy.objective_constr.activate() - if config.add_regularization == 'level_L1': - MindtPy.loa_proj_mip_obj = generate_norm1_objective_function(solve_data.mip, - solve_data.best_solution_found, - discrete_only=False) - elif config.add_regularization == 'level_L2': - MindtPy.loa_proj_mip_obj = generate_norm2sq_objective_function(solve_data.mip, - solve_data.best_solution_found, - discrete_only=False) - elif config.add_regularization == 'level_L_infinity': - MindtPy.loa_proj_mip_obj = generate_norm_inf_objective_function(solve_data.mip, - solve_data.best_solution_found, - discrete_only=False) - elif config.add_regularization in {'grad_lag', 'hess_lag', 'hess_only_lag', 'sqp_lag'}: - MindtPy.loa_proj_mip_obj = generate_lag_objective_function(solve_data.mip, - solve_data.best_solution_found, - config, - solve_data, - discrete_only=False) - if solve_data.objective_sense == minimize: - MindtPy.cuts.obj_reg_estimate = Constraint( - expr=sum(MindtPy.objective_value[:]) <= (1 - config.level_coef) * solve_data.primal_bound + config.level_coef * solve_data.dual_bound) - else: - MindtPy.cuts.obj_reg_estimate = Constraint( - expr=sum(MindtPy.objective_value[:]) >= (1 - config.level_coef) * solve_data.primal_bound + config.level_coef * solve_data.dual_bound) - else: - if config.add_slack: - MindtPy.del_component('aug_penalty_expr') - - MindtPy.aug_penalty_expr = Expression( - expr=sign_adjust * config.OA_penalty_factor * sum( - v for v in MindtPy.cuts.slack_vars[...])) - main_objective = MindtPy.objective_list[-1] - MindtPy.mip_obj = Objective( - expr=main_objective.expr + - (MindtPy.aug_penalty_expr if config.add_slack else 0), - sense=solve_data.objective_sense) - - if config.use_dual_bound: - # Delete previously added dual bound constraint - MindtPy.cuts.del_component('dual_bound') - if solve_data.dual_bound not in {float('inf'), float('-inf')}: - if solve_data.objective_sense == minimize: - MindtPy.cuts.dual_bound = Constraint( - expr=main_objective.expr + - (MindtPy.aug_penalty_expr if config.add_slack else 0) >= solve_data.dual_bound, - doc='Objective function expression should improve on the best found dual bound') - else: - MindtPy.cuts.dual_bound = Constraint( - expr=main_objective.expr + - (MindtPy.aug_penalty_expr if config.add_slack else 0) <= solve_data.dual_bound, - doc='Objective function expression should improve on the best found dual bound') diff --git a/pyomo/contrib/mindtpy/nlp_solve.py b/pyomo/contrib/mindtpy/nlp_solve.py deleted file mode 100644 index 6e2658e509f..00000000000 --- a/pyomo/contrib/mindtpy/nlp_solve.py +++ /dev/null @@ -1,443 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -"""Solution of NLP subproblems.""" -from __future__ import division - -from pyomo.common.collections import ComponentMap -from pyomo.common.errors import InfeasibleConstraintException -from pyomo.contrib.mindtpy.cut_generation import (add_oa_cuts, - add_no_good_cuts, add_affine_cuts) -from pyomo.contrib.mindtpy.util import add_feas_slacks, set_solver_options, update_primal_bound -from pyomo.contrib.gdpopt.util import copy_var_list_values, get_main_elapsed_time, time_code, SuppressInfeasibleWarning -from pyomo.core import (Constraint, Objective, - TransformationFactory, minimize, value) -from pyomo.opt import TerminationCondition as tc -from pyomo.opt import SolverFactory, SolverResults, SolverStatus - - -def solve_subproblem(solve_data, config): - """Solves the Fixed-NLP (with fixed integers). - - This function sets up the 'fixed_nlp' by fixing binaries, sets continuous variables to their intial var values, - precomputes dual values, deactivates trivial constraints, and then solves NLP model. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - results : SolverResults - Results from solving the Fixed-NLP. - """ - fixed_nlp = solve_data.working_model.clone() - MindtPy = fixed_nlp.MindtPy_utils - solve_data.nlp_iter += 1 - - # Set up NLP - TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) - - MindtPy.cuts.deactivate() - if config.calculate_dual_at_solution: - fixed_nlp.tmp_duals = ComponentMap() - # tmp_duals are the value of the dual variables stored before using deactivate trivial contraints - # The values of the duals are computed as follows: (Complementary Slackness) - # - # | constraint | c_geq | status at x1 | tmp_dual (violation) | - # |------------|-------|--------------|----------------------| - # | g(x) <= b | -1 | g(x1) <= b | 0 | - # | g(x) <= b | -1 | g(x1) > b | g(x1) - b | - # | g(x) >= b | +1 | g(x1) >= b | 0 | - # | g(x) >= b | +1 | g(x1) < b | b - g(x1) | - evaluation_error = False - for c in fixed_nlp.MindtPy_utils.constraint_list: - # We prefer to include the upper bound as the right hand side since we are - # considering c by default a (hopefully) convex function, which would make - # c >= lb a nonconvex inequality which we wouldn't like to add linearizations - # if we don't have to - rhs = value(c.upper) if c.has_ub() else value(c.lower) - c_geq = -1 if c.has_ub() else 1 - try: - fixed_nlp.tmp_duals[c] = c_geq * max( - 0, c_geq*(rhs - value(c.body))) - except (ValueError, OverflowError) as error: - fixed_nlp.tmp_duals[c] = None - evaluation_error = True - if evaluation_error: - for nlp_var, orig_val in zip( - MindtPy.variable_list, - solve_data.initial_var_values): - if not nlp_var.fixed and not nlp_var.is_binary(): - nlp_var.set_value(orig_val, skip_validation=True) - try: - TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( - fixed_nlp, tmp=True, ignore_infeasible=False, tolerance=config.constraint_tolerance) - except InfeasibleConstraintException: - config.logger.warning( - 'infeasibility detected in deactivate_trivial_constraints') - results = SolverResults() - results.solver.termination_condition = tc.infeasible - return fixed_nlp, results - # Solve the NLP - nlpopt = SolverFactory(config.nlp_solver) - nlp_args = dict(config.nlp_solver_args) - set_solver_options(nlpopt, solve_data, config, solver_type='nlp') - with SuppressInfeasibleWarning(): - with time_code(solve_data.timing, 'fixed subproblem'): - results = nlpopt.solve(fixed_nlp, - tee=config.nlp_solver_tee, - load_solutions=False, - **nlp_args) - if len(results.solution) > 0: - fixed_nlp.solutions.load_from(results) - return fixed_nlp, results - - -def handle_nlp_subproblem_tc(fixed_nlp, result, solve_data, config, cb_opt=None): - """This function handles different terminaton conditions of the fixed-NLP subproblem. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - result : SolverResults - Results from solving the NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - cb_opt : SolverFactory, optional - The gurobi_persistent solver, by default None. - """ - if result.solver.termination_condition in {tc.optimal, tc.locallyOptimal, tc.feasible}: - handle_subproblem_optimal(fixed_nlp, solve_data, config, cb_opt) - elif result.solver.termination_condition in {tc.infeasible, tc.noSolution}: - handle_subproblem_infeasible(fixed_nlp, solve_data, config, cb_opt) - elif result.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( - 'NLP subproblem failed to converge within the time limit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - solve_data.should_terminate = True - elif result.solver.termination_condition is tc.maxEvaluations: - config.logger.info( - 'NLP subproblem failed due to maxEvaluations.') - solve_data.results.solver.termination_condition = tc.maxEvaluations - solve_data.should_terminate = True - else: - handle_subproblem_other_termination(fixed_nlp, result.solver.termination_condition, - solve_data, config) - - -# The next few functions deal with handling the solution we get from the above NLP solver function - - -def handle_subproblem_optimal(fixed_nlp, solve_data, config, cb_opt=None, fp=False): - """This function copies the result of the NLP solver function ('solve_subproblem') to the working model, updates - the bounds, adds OA and no-good cuts, and then stores the new solution if it is the new best solution. This - function handles the result of the latest iteration of solving the NLP subproblem given an optimal solution. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - cb_opt : SolverFactory, optional - The gurobi_persistent solver, by default None. - fp : bool, optional - Whether it is in the loop of feasibility pump, by default False. - """ - copy_var_list_values( - fixed_nlp.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - if config.calculate_dual_at_solution: - for c in fixed_nlp.tmp_duals: - if fixed_nlp.dual.get(c, None) is None: - fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] - dual_values = list(fixed_nlp.dual[c] - for c in fixed_nlp.MindtPy_utils.constraint_list) - else: - dual_values = None - main_objective = fixed_nlp.MindtPy_utils.objective_list[-1] - update_primal_bound(solve_data, value(main_objective.expr)) - if solve_data.primal_bound_improved: - solve_data.best_solution_found = fixed_nlp.clone() - solve_data.best_solution_found_time = get_main_elapsed_time( - solve_data.timing) - if config.strategy == 'GOA': - solve_data.num_no_good_cuts_added.update( - {solve_data.primal_bound: len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts)}) - - # add obj increasing constraint for fp - if fp: - solve_data.mip.MindtPy_utils.cuts.del_component( - 'improving_objective_cut') - if solve_data.objective_sense == minimize: - solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(expr=sum(solve_data.mip.MindtPy_utils.objective_value[:]) - <= solve_data.primal_bound - config.fp_cutoffdecr*max(1, abs(solve_data.primal_bound))) - else: - solve_data.mip.MindtPy_utils.cuts.improving_objective_cut = Constraint(expr=sum(solve_data.mip.MindtPy_utils.objective_value[:]) - >= solve_data.primal_bound + config.fp_cutoffdecr*max(1, abs(solve_data.primal_bound))) - # Add the linear cut - if config.strategy == 'OA' or fp: - copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config) - add_oa_cuts(solve_data.mip, dual_values, solve_data, config, cb_opt) - elif config.strategy == 'GOA': - copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config) - add_affine_cuts(solve_data, config) - # elif config.strategy == 'PSC': - # # !!THIS SEEMS LIKE A BUG!! - mrmundt # - # add_psc_cut(solve_data, config) - # elif config.strategy == 'GBD': - # # !!THIS SEEMS LIKE A BUG!! - mrmundt # - # add_gbd_cut(solve_data, config) - - var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - if config.add_no_good_cuts: - add_no_good_cuts(var_values, solve_data, config) - - config.call_after_subproblem_feasible(fixed_nlp, solve_data) - - config.logger.info(solve_data.fixed_nlp_log_formatter.format('*' if solve_data.primal_bound_improved else ' ', - solve_data.nlp_iter if not fp else solve_data.fp_iter, - 'Fixed NLP', - value(main_objective.expr), - solve_data.primal_bound, - solve_data.dual_bound, - solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) - - -def handle_subproblem_infeasible(fixed_nlp, solve_data, config, cb_opt=None): - """Solves feasibility problem and adds cut according to the specified strategy. - - This function handles the result of the latest iteration of solving the NLP subproblem given an infeasible - solution and copies the solution of the feasibility problem to the working model. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - cb_opt : SolverFactory, optional - The gurobi_persistent solver, by default None. - """ - # TODO try something else? Reinitialize with different initial - # value? - config.logger.info('NLP subproblem was locally infeasible.') - solve_data.nlp_infeasible_counter += 1 - if config.calculate_dual_at_solution: - for c in fixed_nlp.MindtPy_utils.constraint_list: - rhs = value(c.upper) if c. has_ub() else value(c.lower) - c_geq = -1 if c.has_ub() else 1 - fixed_nlp.dual[c] = (c_geq - * max(0, c_geq * (rhs - value(c.body)))) - dual_values = list(fixed_nlp.dual[c] - for c in fixed_nlp.MindtPy_utils.constraint_list) - else: - dual_values = None - - # if config.strategy == 'PSC' or config.strategy == 'GBD': - # for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True): - # fixed_nlp.ipopt_zL_out[var] = 0 - # fixed_nlp.ipopt_zU_out[var] = 0 - # if var.has_ub() and abs(var.ub - value(var)) < config.absolute_bound_tolerance: - # fixed_nlp.ipopt_zL_out[var] = 1 - # elif var.has_lb() and abs(value(var) - var.lb) < config.absolute_bound_tolerance: - # fixed_nlp.ipopt_zU_out[var] = -1 - - if config.strategy in {'OA', 'GOA'}: - config.logger.info('Solving feasibility problem') - feas_subproblem, feas_subproblem_results = solve_feasibility_subproblem( - solve_data, config) - # TODO: do we really need this? - if solve_data.should_terminate: - return - copy_var_list_values(feas_subproblem.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config) - if config.strategy == 'OA': - add_oa_cuts(solve_data.mip, dual_values, - solve_data, config, cb_opt) - elif config.strategy == 'GOA': - add_affine_cuts(solve_data, config) - # Add a no-good cut to exclude this discrete option - var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) - if config.add_no_good_cuts: - # excludes current discrete option - add_no_good_cuts(var_values, solve_data, config) - - -def handle_subproblem_other_termination(fixed_nlp, termination_condition, - solve_data, config): - """Handles the result of the latest iteration of solving the fixed NLP subproblem given - a solution that is neither optimal nor infeasible. - - Parameters - ---------- - fixed_nlp : Pyomo model - Integer-variable-fixed NLP model. - termination_condition : Pyomo TerminationCondition - The termination condition of the fixed NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the NLP subproblem termination condition. - """ - if termination_condition is tc.maxIterations: - # TODO try something else? Reinitialize with different initial value? - config.logger.info( - 'NLP subproblem failed to converge within iteration limit.') - var_values = list( - v.value for v in fixed_nlp.MindtPy_utils.variable_list) - if config.add_no_good_cuts: - # excludes current discrete option - add_no_good_cuts(var_values, solve_data, config) - - else: - raise ValueError( - 'MindtPy unable to handle NLP subproblem termination ' - 'condition of {}'.format(termination_condition)) - - -def solve_feasibility_subproblem(solve_data, config): - """Solves a feasibility NLP if the fixed_nlp problem is infeasible. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - feas_subproblem : Pyomo model - Feasibility NLP from the model. - feas_soln : SolverResults - Results from solving the feasibility NLP. - """ - feas_subproblem = solve_data.working_model.clone() - add_feas_slacks(feas_subproblem, config) - - MindtPy = feas_subproblem.MindtPy_utils - if MindtPy.find_component('objective_value') is not None: - MindtPy.objective_value[:].set_value(0, skip_validation=True) - - next(feas_subproblem.component_data_objects( - Objective, active=True)).deactivate() - for constr in feas_subproblem.MindtPy_utils.nonlinear_constraint_list: - constr.deactivate() - - MindtPy.feas_opt.activate() - if config.feasibility_norm == 'L1': - MindtPy.feas_obj = Objective( - expr=sum(s for s in MindtPy.feas_opt.slack_var[...]), - sense=minimize) - elif config.feasibility_norm == 'L2': - MindtPy.feas_obj = Objective( - expr=sum(s*s for s in MindtPy.feas_opt.slack_var[...]), - sense=minimize) - else: - MindtPy.feas_obj = Objective( - expr=MindtPy.feas_opt.slack_var, - sense=minimize) - TransformationFactory('core.fix_integer_vars').apply_to(feas_subproblem) - nlpopt = SolverFactory(config.nlp_solver) - nlp_args = dict(config.nlp_solver_args) - set_solver_options(nlpopt, solve_data, config, solver_type='nlp') - with SuppressInfeasibleWarning(): - try: - with time_code(solve_data.timing, 'feasibility subproblem'): - feas_soln = nlpopt.solve(feas_subproblem, - tee=config.nlp_solver_tee, - load_solutions=config.nlp_solver!='appsi_ipopt', - **nlp_args) - if len(feas_soln.solution) > 0: - feas_subproblem.solutions.load_from(feas_soln) - except (ValueError, OverflowError) as error: - for nlp_var, orig_val in zip( - MindtPy.variable_list, - solve_data.initial_var_values): - if not nlp_var.fixed and not nlp_var.is_binary(): - nlp_var.set_value(orig_val, skip_validation=True) - with time_code(solve_data.timing, 'feasibility subproblem'): - feas_soln = nlpopt.solve(feas_subproblem, - tee=config.nlp_solver_tee, - load_solutions=config.nlp_solver!='appsi_ipopt', - **nlp_args) - if len(feas_soln.solution) > 0: - feas_soln.solutions.load_from(feas_soln) - handle_feasibility_subproblem_tc( - feas_soln.solver.termination_condition, MindtPy, solve_data, config) - return feas_subproblem, feas_soln - - -def handle_feasibility_subproblem_tc(subprob_terminate_cond, MindtPy, solve_data, config): - """Handles the result of the latest iteration of solving the feasibility NLP subproblem given - a solution that is neither optimal nor infeasible. - - Parameters - ---------- - subprob_terminate_cond : Pyomo TerminationCondition - The termination condition of the feasibility NLP subproblem. - MindtPy : Pyomo Block - The MindtPy_utils block. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - """ - if subprob_terminate_cond in {tc.optimal, tc.locallyOptimal, tc.feasible}: - copy_var_list_values( - MindtPy.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - if value(MindtPy.feas_obj.expr) <= config.zero_tolerance: - config.logger.warning('The objective value %.4E of feasibility problem is less than zero_tolerance. ' - 'This indicates that the nlp subproblem is feasible, although it is found infeasible in the previous step. ' - 'Check the nlp solver output' % value(MindtPy.feas_obj.expr)) - elif subprob_terminate_cond in {tc.infeasible, tc.noSolution}: - config.logger.error('Feasibility subproblem infeasible. ' - 'This should never happen.') - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error - elif subprob_terminate_cond is tc.maxIterations: - config.logger.error('Subsolver reached its maximum number of iterations without converging, ' - 'consider increasing the iterations limit of the subsolver or reviewing your formulation.') - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error - else: - config.logger.error('MindtPy unable to handle feasibility subproblem termination condition ' - 'of {}'.format(subprob_terminate_cond)) - solve_data.should_terminate = True - solve_data.results.solver.status = SolverStatus.error diff --git a/pyomo/contrib/mindtpy/outer_approximation.py b/pyomo/contrib/mindtpy/outer_approximation.py new file mode 100644 index 00000000000..99d9cea1bd4 --- /dev/null +++ b/pyomo/contrib/mindtpy/outer_approximation.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.mindtpy.util import calc_jacobians +from pyomo.core import ConstraintList +from pyomo.opt import SolverFactory +from pyomo.contrib.mindtpy.config_options import _get_MindtPy_OA_config +from pyomo.contrib.mindtpy.algorithm_base_class import _MindtPyAlgorithm +from pyomo.contrib.mindtpy.cut_generation import add_oa_cuts + + +@SolverFactory.register( + 'mindtpy.oa', doc='MindtPy: Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo' +) +class MindtPy_OA_Solver(_MindtPyAlgorithm): + """ + Decomposition solver for Mixed-Integer Nonlinear Programming (MINLP) problems. + + The MindtPy (Mixed-Integer Nonlinear Decomposition Toolbox in Pyomo) solver + applies a variety of decomposition-based approaches to solve Mixed-Integer + Nonlinear Programming (MINLP) problems. + This class includes: + + - Outer approximation (OA) + - Regularized outer approximation (ROA) + - LP/NLP based branch-and-bound (LP/NLP) + - Regularized LP/NLP based branch-and-bound (RLP/NLP) + """ + + CONFIG = _get_MindtPy_OA_config() + + def check_config(self): + config = self.config + if config.add_regularization is not None: + if config.add_regularization in { + 'grad_lag', + 'hess_lag', + 'hess_only_lag', + 'sqp_lag', + }: + config.calculate_dual_at_solution = True + if config.regularization_mip_threads == 0 and config.threads > 0: + config.regularization_mip_threads = config.threads + config.logger.info('Set regularization_mip_threads equal to threads') + if config.single_tree: + config.add_cuts_at_incumbent = True + if config.mip_regularization_solver is None: + config.mip_regularization_solver = config.mip_solver + if config.single_tree: + config.logger.info('Single-tree implementation is activated.') + config.iteration_limit = 1 + config.add_slack = False + if config.mip_solver not in {'cplex_persistent', 'gurobi_persistent'}: + raise ValueError( + "Only cplex_persistent and gurobi_persistent are supported for LP/NLP based Branch and Bound method." + "Please refer to https://pyomo.readthedocs.io/en/stable/contributed_packages/mindtpy.html#lp-nlp-based-branch-and-bound." + ) + if config.threads > 1: + config.threads = 1 + config.logger.info( + 'The threads parameter is corrected to 1 since lazy constraint callback conflicts with multi-threads mode.' + ) + if config.heuristic_nonconvex: + config.equality_relaxation = True + config.add_slack = True + if config.equality_relaxation: + config.calculate_dual_at_solution = True + if config.init_strategy == 'FP' or config.add_regularization is not None: + config.move_objective = True + if config.add_regularization is not None: + if config.add_regularization in { + 'level_L1', + 'level_L_infinity', + 'grad_lag', + }: + self.regularization_mip_type = 'MILP' + elif config.add_regularization in { + 'level_L2', + 'hess_lag', + 'hess_only_lag', + 'sqp_lag', + }: + self.regularization_mip_type = 'MIQP' + _MindtPyAlgorithm.check_config(self) + + def initialize_mip_problem(self): + '''Deactivate the nonlinear constraints to create the MIP problem.''' + super().initialize_mip_problem() + self.jacobians = calc_jacobians(self.mip, self.config) # preload jacobians + self.mip.MindtPy_utils.cuts.oa_cuts = ConstraintList( + doc='Outer approximation cuts' + ) + + def add_cuts( + self, dual_values, linearize_active=True, linearize_violated=True, cb_opt=None + ): + add_oa_cuts( + self.mip, + dual_values, + self.jacobians, + self.objective_sense, + self.mip_constraint_polynomial_degree, + self.mip_iter, + self.config, + self.timing, + cb_opt, + linearize_active, + linearize_violated, + ) + + def deactivate_no_good_cuts_when_fixing_bound(self, no_good_cuts): + # Only deactivate the last OA cuts may not be correct. + # Since integer solution may also be cut off by OA cuts due to calculation approximation. + if self.config.add_no_good_cuts: + no_good_cuts[len(no_good_cuts)].deactivate() + if self.config.use_tabu_list: + self.integer_list = self.integer_list[:-1] + + def objective_reformulation(self): + # In the process_objective function, as long as the objective function is nonlinear, it will be reformulated and the variable/constraint/objective lists will be updated. + # For OA/GOA/LP-NLP algorithm, if the objective function is linear, it will not be reformulated as epigraph constraint. + # If the objective function is linear, it will be reformulated as epigraph constraint only if the Feasibility Pump or ROA/RLP-NLP algorithm is activated. (move_objective = True) + # In some cases, the variable/constraint/objective lists will not be updated even if the objective is epigraph-reformulated. + # In Feasibility Pump, since the distance calculation only includes discrete variables and the epigraph slack variables are continuous variables, the Feasibility Pump algorithm will not affected even if the variable list are updated. + # In ROA and RLP/NLP, since the distance calculation does not include these epigraph slack variables, they should not be added to the variable list. (update_var_con_list = False) + # In the process_objective function, once the objective function has been reformulated as epigraph constraint, the variable/constraint/objective lists will not be updated only if the MINLP has a linear objective function and regularization is activated at the same time. + # This is because the epigraph constraint is very "flat" for branching rules. The original objective function will be used for the main problem and epigraph reformulation will be used for the projection problem. + MindtPy = self.working_model.MindtPy_utils + config = self.config + self.process_objective(update_var_con_list=config.add_regularization is None) + # The epigraph constraint is very "flat" for branching rules. + # If ROA/RLP-NLP is activated and the original objective function is linear, we will use the original objective for the main mip. + if ( + MindtPy.objective_list[0].expr.polynomial_degree() + in self.mip_objective_polynomial_degree + and config.add_regularization is not None + ): + MindtPy.objective_list[0].activate() + MindtPy.objective_constr.deactivate() + MindtPy.objective.deactivate() diff --git a/pyomo/contrib/mindtpy/plugins.py b/pyomo/contrib/mindtpy/plugins.py index 6a24a96cd49..f25706d086a 100644 --- a/pyomo/contrib/mindtpy/plugins.py +++ b/pyomo/contrib/mindtpy/plugins.py @@ -9,5 +9,10 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.contrib.mindtpy.MindtPy + import pyomo.contrib.mindtpy.outer_approximation + import pyomo.contrib.mindtpy.extended_cutting_plane + import pyomo.contrib.mindtpy.global_outer_approximation + import pyomo.contrib.mindtpy.feasibility_pump diff --git a/pyomo/contrib/mindtpy/single_tree.py b/pyomo/contrib/mindtpy/single_tree.py index a62671440ac..9776920f434 100644 --- a/pyomo/contrib/mindtpy/single_tree.py +++ b/pyomo/contrib/mindtpy/single_tree.py @@ -9,35 +9,43 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from __future__ import division from pyomo.common.dependencies import attempt_import from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy from pyomo.contrib.mindtpy.cut_generation import add_oa_cuts, add_no_good_cuts -from pyomo.contrib.mindtpy.mip_solve import handle_main_optimal, solve_main, handle_regularization_main_tc -from pyomo.opt.results import ProblemSense from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error -import logging from pyomo.repn import generate_standard_repn -from pyomo.core.expr import current as EXPR -import pyomo.environ as pyo +import pyomo.core.expr as EXPR from math import copysign -from pyomo.contrib.mindtpy.util import get_integer_solution, update_dual_bound, update_primal_bound -from pyomo.contrib.gdpopt.util import copy_var_list_values, get_main_elapsed_time, time_code -from pyomo.contrib.mindtpy.nlp_solve import solve_subproblem, solve_feasibility_subproblem, handle_nlp_subproblem_tc +from pyomo.contrib.mindtpy.util import get_integer_solution +from pyomo.contrib.gdpopt.util import ( + copy_var_list_values, + get_main_elapsed_time, + time_code, +) from pyomo.opt import TerminationCondition as tc -from pyomo.core import Constraint, minimize, value, maximize -from pyomo.core.expr.current import identify_variables -cplex, cplex_available = attempt_import('cplex') +from pyomo.core import minimize, value +from pyomo.core.expr import identify_variables +cplex, cplex_available = attempt_import('cplex') -class LazyOACallback_cplex(cplex.callbacks.LazyConstraintCallback if cplex_available else object): - """Inherent class in Cplex to call Lazy callback.""" - def copy_lazy_var_list_values(self, opt, from_list, to_list, config, - skip_stale=False, skip_fixed=True, - ignore_integrality=False): +class LazyOACallback_cplex( + cplex.callbacks.LazyConstraintCallback if cplex_available else object +): + """Inherent class in CPLEX to call Lazy callback.""" + + def copy_lazy_var_list_values( + self, + opt, + from_list, + to_list, + config, + skip_stale=False, + skip_fixed=True, + ignore_integrality=False, + ): """This function copies variable values from one list to another. - + Rounds to Binary/Integer if necessary. Sets to zero for NonNegativeReals if necessary. @@ -63,8 +71,7 @@ def copy_lazy_var_list_values(self, opt, from_list, to_list, config, continue # Skip stale variable values. if skip_fixed and v_to.is_fixed(): continue # Skip fixed variables. - v_val = self.get_values( - opt._pyomo_var_to_solver_var_map[v_from]) + v_val = self.get_values(opt._pyomo_var_to_solver_var_map[v_from]) try: # We don't want to trigger the reset of the global stale # indicator, so we will set this variable to be "stale", @@ -77,26 +84,44 @@ def copy_lazy_var_list_values(self, opt, from_list, to_list, config, # will always succeed and the ValueError should never be # raised. v_to.set_value(v_val, skip_validation=True) - except ValueError: + except ValueError as e: # Snap the value to the bounds - if v_to.has_lb() and v_val < v_to.lb and v_to.lb - v_val <= config.variable_tolerance: + config.logger.error(e) + if ( + v_to.has_lb() + and v_val < v_to.lb + and v_to.lb - v_val <= config.variable_tolerance + ): v_to.set_value(v_to.lb, skip_validation=True) - elif v_to.has_ub() and v_val > v_to.ub and v_val - v_to.ub <= config.variable_tolerance: + elif ( + v_to.has_ub() + and v_val > v_to.ub + and v_val - v_to.ub <= config.variable_tolerance + ): v_to.set_value(v_to.ub, skip_validation=True) # ... or the nearest integer elif v_to.is_integer(): rounded_val = int(round(v_val)) - if (ignore_integrality or abs(v_val - rounded_val) <= config.integer_tolerance) \ - and rounded_val in v_to.domain: + if ( + ignore_integrality + or abs(v_val - rounded_val) <= config.integer_tolerance + ) and rounded_val in v_to.domain: v_to.set_value(rounded_val, skip_validation=True) else: raise - def add_lazy_oa_cuts(self, target_model, dual_values, solve_data, config, opt, - linearize_active=True, - linearize_violated=True): - """Linearizes nonlinear constraints; add the OA cuts through Cplex inherent function self.add() - For nonconvex problems, turn on 'config.add_slack'. Slack variables will always be used for + def add_lazy_oa_cuts( + self, + target_model, + dual_values, + mindtpy_solver, + config, + opt, + linearize_active=True, + linearize_violated=True, + ): + """Linearizes nonlinear constraints; add the OA cuts through CPLEX inherent function self.add() + For nonconvex problems, turn on 'config.add_slack'. Slack variables will always be used for nonlinear equality constraints. Parameters @@ -105,8 +130,8 @@ def add_lazy_oa_cuts(self, target_model, dual_values, solve_data, config, opt, The MIP main problem. dual_values : list The value of the duals for each constraint. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -117,79 +142,166 @@ def add_lazy_oa_cuts(self, target_model, dual_values, solve_data, config, opt, Whether to linearize the violated nonlinear constraints, by default True. """ config.logger.debug('Adding OA cuts') - with time_code(solve_data.timing, 'OA cut generation'): + with time_code(mindtpy_solver.timing, 'OA cut generation'): for index, constr in enumerate(target_model.MindtPy_utils.constraint_list): - if constr.body.polynomial_degree() in solve_data.mip_constraint_polynomial_degree: + if ( + constr.body.polynomial_degree() + in mindtpy_solver.mip_constraint_polynomial_degree + ): continue constr_vars = list(identify_variables(constr.body)) - jacs = solve_data.jacobians + jacs = mindtpy_solver.jacobians # Equality constraint (makes the problem nonconvex) - if constr.has_ub() and constr.has_lb() and value(constr.lower) == value(constr.upper): - sign_adjust = -1 if solve_data.objective_sense == minimize else 1 + if ( + constr.has_ub() + and constr.has_lb() + and value(constr.lower) == value(constr.upper) + ): + sign_adjust = ( + -1 if mindtpy_solver.objective_sense == minimize else 1 + ) rhs = constr.lower - # since the cplex requires the lazy cuts in cplex type, we need to transform the pyomo expression into cplex expression - pyomo_expr = copysign(1, sign_adjust * dual_values[index]) * (sum(value(jacs[constr][var]) * ( - var - value(var)) for var in EXPR.identify_variables(constr.body)) + value(constr.body) - rhs) + # Since CPLEX requires the lazy cuts in CPLEX type, + # we need to transform the pyomo expression into CPLEX expression. + pyomo_expr = copysign(1, sign_adjust * dual_values[index]) * ( + sum( + value(jacs[constr][var]) * (var - value(var)) + for var in EXPR.identify_variables(constr.body) + ) + + value(constr.body) + - rhs + ) cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr) cplex_rhs = -generate_standard_repn(pyomo_expr).constant - self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients), - sense='L', - rhs=cplex_rhs) + self.add( + constraint=cplex.SparsePair( + ind=cplex_expr.variables, val=cplex_expr.coefficients + ), + sense='L', + rhs=cplex_rhs, + ) + if ( + self.get_solution_source() + == cplex.callbacks.SolutionSource.mipstart_solution + ): + mindtpy_solver.mip_start_lazy_oa_cuts.append( + [ + cplex.SparsePair( + ind=cplex_expr.variables, + val=cplex_expr.coefficients, + ), + 'L', + cplex_rhs, + ] + ) else: # Inequality constraint (possibly two-sided) - if (constr.has_ub() - and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) - or (linearize_violated and constr.uslack() < 0) - or (config.linearize_inactive and constr.uslack() > 0)) or ('MindtPy_utils.objective_constr' in constr.name and constr.has_ub()): - + if ( + constr.has_ub() + and ( + linearize_active + and abs(constr.uslack()) < config.zero_tolerance + ) + or (linearize_violated and constr.uslack() < 0) + or (config.linearize_inactive and constr.uslack() > 0) + ) or ( + 'MindtPy_utils.objective_constr' in constr.name + and constr.has_ub() + ): + pyomo_expr = sum( + value(jacs[constr][var]) * (var - var.value) + for var in constr_vars + ) + value(constr.body) + cplex_rhs = -generate_standard_repn(pyomo_expr).constant + cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr) + self.add( + constraint=cplex.SparsePair( + ind=cplex_expr.variables, val=cplex_expr.coefficients + ), + sense='L', + rhs=value(constr.upper) + cplex_rhs, + ) + if ( + self.get_solution_source() + == cplex.callbacks.SolutionSource.mipstart_solution + ): + mindtpy_solver.mip_start_lazy_oa_cuts.append( + [ + cplex.SparsePair( + ind=cplex_expr.variables, + val=cplex_expr.coefficients, + ), + 'L', + value(constr.upper) + cplex_rhs, + ] + ) + if ( + constr.has_lb() + and ( + linearize_active + and abs(constr.lslack()) < config.zero_tolerance + ) + or (linearize_violated and constr.lslack() < 0) + or (config.linearize_inactive and constr.lslack() > 0) + ) or ( + 'MindtPy_utils.objective_constr' in constr.name + and constr.has_lb() + ): pyomo_expr = sum( - value(jacs[constr][var])*(var - var.value) for var in constr_vars) + value(constr.body) - cplex_rhs = - \ - generate_standard_repn(pyomo_expr).constant - cplex_expr, _ = opt._get_expr_from_pyomo_expr( - pyomo_expr) - self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients), - sense='L', - rhs=value(constr.upper) + cplex_rhs) - if (constr.has_lb() - and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) - or (linearize_violated and constr.lslack() < 0) - or (config.linearize_inactive and constr.lslack() > 0)) or ('MindtPy_utils.objective_constr' in constr.name and constr.has_lb()): - pyomo_expr = sum(value(jacs[constr][var]) * (var - self.get_values( - opt._pyomo_var_to_solver_var_map[var])) for var in constr_vars) + value(constr.body) - cplex_rhs = - \ - generate_standard_repn(pyomo_expr).constant - cplex_expr, _ = opt._get_expr_from_pyomo_expr( - pyomo_expr) - self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients), - sense='G', - rhs=value(constr.lower) + cplex_rhs) - - def add_lazy_affine_cuts(self, solve_data, config, opt): + value(jacs[constr][var]) + * ( + var + - self.get_values(opt._pyomo_var_to_solver_var_map[var]) + ) + for var in constr_vars + ) + value(constr.body) + cplex_rhs = -generate_standard_repn(pyomo_expr).constant + cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr) + self.add( + constraint=cplex.SparsePair( + ind=cplex_expr.variables, val=cplex_expr.coefficients + ), + sense='G', + rhs=value(constr.lower) + cplex_rhs, + ) + if ( + self.get_solution_source() + == cplex.callbacks.SolutionSource.mipstart_solution + ): + mindtpy_solver.mip_start_lazy_oa_cuts.append( + [ + cplex.SparsePair( + ind=cplex_expr.variables, + val=cplex_expr.coefficients, + ), + 'G', + value(constr.lower) + cplex_rhs, + ] + ) + + def add_lazy_affine_cuts(self, mindtpy_solver, config, opt): """Adds affine cuts using MCPP. - Add affine cuts through Cplex inherent function self.add(). + Add affine cuts through CPLEX inherent function self.add(). Parameters ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory The cplex_persistent solver. """ - with time_code(solve_data.timing, 'Affine cut generation'): - m = solve_data.mip + with time_code(mindtpy_solver.timing, 'Affine cut generation'): + m = mindtpy_solver.mip config.logger.debug('Adding affine cuts') counter = 0 for constr in m.MindtPy_utils.nonlinear_constraint_list: - - vars_in_constr = list( - identify_variables(constr.body)) + vars_in_constr = list(identify_variables(constr.body)) if any(var.value is None for var in vars_in_constr): continue # a variable has no values @@ -198,7 +310,9 @@ def add_lazy_affine_cuts(self, solve_data, config, opt): mc_eqn = mc(constr.body) except MCPP_Error as e: config.logger.debug( - 'Skipping constraint %s due to MCPP error %s' % (constr.name, str(e))) + 'Skipping constraint %s due to MCPP error %s' + % (constr.name, str(e)) + ) continue # skip to the next constraint # TODO: check if the value of ccSlope and cvSlope is not Nan or inf. If so, we skip this. ccSlope = mc_eqn.subcc() @@ -226,39 +340,69 @@ def add_lazy_affine_cuts(self, solve_data, config, opt): if not (concave_cut_valid or convex_cut_valid): continue - ub_int = min(value(constr.upper), mc_eqn.upper() - ) if constr.has_ub() else mc_eqn.upper() - lb_int = max(value(constr.lower), mc_eqn.lower() - ) if constr.has_lb() else mc_eqn.lower() + ub_int = ( + min(value(constr.upper), mc_eqn.upper()) + if constr.has_ub() + else mc_eqn.upper() + ) + lb_int = ( + max(value(constr.lower), mc_eqn.lower()) + if constr.has_lb() + else mc_eqn.lower() + ) if concave_cut_valid: - pyomo_concave_cut = sum(ccSlope[var] * (var - var.value) - for var in vars_in_constr - if not var.fixed) + ccStart + pyomo_concave_cut = ( + sum( + ccSlope[var] * (var - var.value) + for var in vars_in_constr + if not var.fixed + ) + + ccStart + ) cplex_concave_rhs = generate_standard_repn( - pyomo_concave_cut).constant + pyomo_concave_cut + ).constant cplex_concave_cut, _ = opt._get_expr_from_pyomo_expr( - pyomo_concave_cut) - self.add(constraint=cplex.SparsePair(ind=cplex_concave_cut.variables, val=cplex_concave_cut.coefficients), - sense='G', - rhs=lb_int - cplex_concave_rhs) + pyomo_concave_cut + ) + self.add( + constraint=cplex.SparsePair( + ind=cplex_concave_cut.variables, + val=cplex_concave_cut.coefficients, + ), + sense='G', + rhs=lb_int - cplex_concave_rhs, + ) counter += 1 if convex_cut_valid: - pyomo_convex_cut = sum(cvSlope[var] * (var - var.value) - for var in vars_in_constr - if not var.fixed) + cvStart - cplex_convex_rhs = generate_standard_repn( - pyomo_convex_cut).constant + pyomo_convex_cut = ( + sum( + cvSlope[var] * (var - var.value) + for var in vars_in_constr + if not var.fixed + ) + + cvStart + ) + cplex_convex_rhs = generate_standard_repn(pyomo_convex_cut).constant cplex_convex_cut, _ = opt._get_expr_from_pyomo_expr( - pyomo_convex_cut) - self.add(constraint=cplex.SparsePair(ind=cplex_convex_cut.variables, val=cplex_convex_cut.coefficients), - sense='L', - rhs=ub_int - cplex_convex_rhs) + pyomo_convex_cut + ) + self.add( + constraint=cplex.SparsePair( + ind=cplex_convex_cut.variables, + val=cplex_convex_cut.coefficients, + ), + sense='L', + rhs=ub_int - cplex_convex_rhs, + ) counter += 1 - config.logger.info('Added %s affine cuts' % counter) + config.logger.debug('Added %s affine cuts' % counter) - def add_lazy_no_good_cuts(self, var_values, solve_data, config, opt, feasible=False): + def add_lazy_no_good_cuts( + self, var_values, mindtpy_solver, config, opt, feasible=False + ): """Adds no-good cuts. Add the no-good cuts through Cplex inherent function self.add(). @@ -267,8 +411,8 @@ def add_lazy_no_good_cuts(self, var_values, solve_data, config, opt, feasible=Fa ---------- var_values : list The variable values of the incumbent solution, used to generate the cut. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -284,9 +428,9 @@ def add_lazy_no_good_cuts(self, var_values, solve_data, config, opt, feasible=Fa if not config.add_no_good_cuts: return - config.logger.info('Adding no-good cuts') - with time_code(solve_data.timing, 'No-good cut generation'): - m = solve_data.mip + config.logger.debug('Adding no-good cuts') + with time_code(mindtpy_solver.timing, 'No-good cut generation'): + m = mindtpy_solver.mip MindtPy = m.MindtPy_utils int_tol = config.integer_tolerance @@ -306,25 +450,29 @@ def add_lazy_no_good_cuts(self, var_values, solve_data, config, opt, feasible=Fa # check to make sure that binary variables are all 0 or 1 for v in binary_vars: if value(abs(v - 1)) > int_tol and value(abs(v)) > int_tol: - raise ValueError('Binary {} = {} is not 0 or 1'.format( - v.name, value(v))) + raise ValueError( + 'Binary {} = {} is not 0 or 1'.format(v.name, value(v)) + ) if not binary_vars: # if no binary variables, skip return - pyomo_no_good_cut = sum(1 - v for v in binary_vars if value(abs(v - 1)) - <= int_tol) + sum(v for v in binary_vars if value(abs(v)) <= int_tol) - cplex_no_good_rhs = generate_standard_repn( - pyomo_no_good_cut).constant - cplex_no_good_cut, _ = opt._get_expr_from_pyomo_expr( - pyomo_no_good_cut) - - self.add(constraint=cplex.SparsePair(ind=cplex_no_good_cut.variables, val=cplex_no_good_cut.coefficients), - sense='G', - rhs=1 - cplex_no_good_rhs) - - def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config, opt): - """This function is called during the branch and bound of main mip, more + pyomo_no_good_cut = sum( + 1 - v for v in binary_vars if value(abs(v - 1)) <= int_tol + ) + sum(v for v in binary_vars if value(abs(v)) <= int_tol) + cplex_no_good_rhs = generate_standard_repn(pyomo_no_good_cut).constant + cplex_no_good_cut, _ = opt._get_expr_from_pyomo_expr(pyomo_no_good_cut) + + self.add( + constraint=cplex.SparsePair( + ind=cplex_no_good_cut.variables, val=cplex_no_good_cut.coefficients + ), + sense='G', + rhs=1 - cplex_no_good_rhs, + ) + + def handle_lazy_main_feasible_solution(self, main_mip, mindtpy_solver, config, opt): + """This function is called during the branch and bound of main mip, more exactly when a feasible solution is found and LazyCallback is activated. Copy the result to working model and update upper or lower bound. In LP-NLP, upper or lower bound are updated during solving the main problem. @@ -333,8 +481,8 @@ def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config, opt): ---------- main_mip : Pyomo model The MIP main problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -343,25 +491,37 @@ def handle_lazy_main_feasible_solution(self, main_mip, solve_data, config, opt): # proceed. Just need integer values # this value copy is useful since we need to fix subproblem based on the solution of the main problem - self.copy_lazy_var_list_values(opt, - main_mip.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - update_dual_bound(solve_data, self.get_best_objective_value()) - config.logger.info(solve_data.log_formatter.format(solve_data.mip_iter, 'restrLP', self.get_objective_value(), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, get_main_elapsed_time(solve_data.timing))) - - def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): + self.copy_lazy_var_list_values( + opt, + main_mip.MindtPy_utils.variable_list, + mindtpy_solver.fixed_nlp.MindtPy_utils.variable_list, + config, + skip_fixed=False, + ) + mindtpy_solver.update_dual_bound(self.get_best_objective_value()) + config.logger.info( + mindtpy_solver.log_formatter.format( + mindtpy_solver.mip_iter, + 'restrLP', + self.get_objective_value(), + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + mindtpy_solver.rel_gap, + get_main_elapsed_time(mindtpy_solver.timing), + ) + ) + + def handle_lazy_subproblem_optimal(self, fixed_nlp, mindtpy_solver, config, opt): """This function copies the optimal solution of the fixed NLP subproblem to the MIP - main problem(explanation see below), updates bound, adds OA and no-good cuts, + main problem(explanation see below), updates bound, adds OA and no-good cuts, stores incumbent solution if it has been improved. Parameters ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -371,53 +531,79 @@ def handle_lazy_subproblem_optimal(self, fixed_nlp, solve_data, config, opt): for c in fixed_nlp.tmp_duals: if fixed_nlp.dual.get(c, None) is None: fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c] - dual_values = list(fixed_nlp.dual[c] - for c in fixed_nlp.MindtPy_utils.constraint_list) + elif ( + config.nlp_solver == 'cyipopt' + and mindtpy_solver.objective_sense == minimize + ): + # TODO: recover the opposite dual when cyipopt issue #2831 is solved. + fixed_nlp.dual[c] = -fixed_nlp.dual[c] + dual_values = list( + fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list + ) else: dual_values = None main_objective = fixed_nlp.MindtPy_utils.objective_list[-1] - update_primal_bound(solve_data, value(main_objective.expr)) - if solve_data.primal_bound_improved: - solve_data.best_solution_found = fixed_nlp.clone() - solve_data.best_solution_found_time = get_main_elapsed_time( - solve_data.timing) + mindtpy_solver.update_primal_bound(value(main_objective.expr)) + if mindtpy_solver.primal_bound_improved: + mindtpy_solver.best_solution_found = fixed_nlp.clone() + mindtpy_solver.best_solution_found_time = get_main_elapsed_time( + mindtpy_solver.timing + ) if config.add_no_good_cuts or config.use_tabu_list: - solve_data.stored_bound.update( - {solve_data.primal_bound: solve_data.dual_bound}) + mindtpy_solver.stored_bound.update( + {mindtpy_solver.primal_bound: mindtpy_solver.dual_bound} + ) config.logger.info( - solve_data.fixed_nlp_log_formatter.format('*' if solve_data.primal_bound_improved else ' ', - solve_data.nlp_iter, 'Fixed NLP', value( - main_objective.expr), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) + mindtpy_solver.fixed_nlp_log_formatter.format( + '*' if mindtpy_solver.primal_bound_improved else ' ', + mindtpy_solver.nlp_iter, + 'Fixed NLP', + value(main_objective.expr), + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + mindtpy_solver.rel_gap, + get_main_elapsed_time(mindtpy_solver.timing), + ) + ) # In OA algorithm, OA cuts are generated based on the solution of the subproblem # We need to first copy the value of variables from the subproblem and then add cuts # since value(constr.body), value(jacs[constr][var]), value(var) are used in self.add_lazy_oa_cuts() - copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config) + copy_var_list_values( + fixed_nlp.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, + config, + ) if config.strategy == 'OA': self.add_lazy_oa_cuts( - solve_data.mip, dual_values, solve_data, config, opt) + mindtpy_solver.mip, dual_values, mindtpy_solver, config, opt + ) if config.add_regularization is not None: - add_oa_cuts(solve_data.mip, dual_values, solve_data, config) + add_oa_cuts( + mindtpy_solver.mip, + dual_values, + mindtpy_solver.jacobians, + mindtpy_solver.objective_sense, + mindtpy_solver.mip_constraint_polynomial_degree, + mindtpy_solver.mip_iter, + config, + mindtpy_solver.timing, + ) elif config.strategy == 'GOA': - self.add_lazy_affine_cuts(solve_data, config, opt) + self.add_lazy_affine_cuts(mindtpy_solver, config, opt) if config.add_no_good_cuts: - var_values = list( - v.value for v in fixed_nlp.MindtPy_utils.variable_list) - self.add_lazy_no_good_cuts(var_values, solve_data, config, opt) + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) + self.add_lazy_no_good_cuts(var_values, mindtpy_solver, config, opt) - def handle_lazy_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt): + def handle_lazy_subproblem_infeasible(self, fixed_nlp, mindtpy_solver, config, opt): """Solves feasibility NLP subproblem and adds cuts according to the specified strategy. Parameters ---------- fixed_nlp : Pyomo model Integer-variable-fixed NLP model. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. opt : SolverFactory @@ -426,41 +612,58 @@ def handle_lazy_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt): # TODO try something else? Reinitialize with different initial # value? config.logger.info('NLP subproblem was locally infeasible.') - solve_data.nlp_infeasible_counter += 1 + mindtpy_solver.nlp_infeasible_counter += 1 if config.calculate_dual_at_solution: for c in fixed_nlp.MindtPy_utils.constraint_list: - rhs = ((0 if c.upper is None else c.upper) - + (0 if c.lower is None else c.lower)) + rhs = (0 if c.upper is None else c.upper) + ( + 0 if c.lower is None else c.lower + ) sign_adjust = 1 if c.upper is None else -1 - fixed_nlp.dual[c] = (sign_adjust - * max(0, sign_adjust * (rhs - value(c.body)))) - dual_values = list(fixed_nlp.dual[c] - for c in fixed_nlp.MindtPy_utils.constraint_list) + fixed_nlp.dual[c] = sign_adjust * max( + 0, sign_adjust * (rhs - value(c.body)) + ) + dual_values = list( + fixed_nlp.dual[c] for c in fixed_nlp.MindtPy_utils.constraint_list + ) else: dual_values = None config.logger.info('Solving feasibility problem') - feas_subproblem, feas_subproblem_results = solve_feasibility_subproblem( - solve_data, config) + ( + feas_subproblem, + feas_subproblem_results, + ) = mindtpy_solver.solve_feasibility_subproblem() # In OA algorithm, OA cuts are generated based on the solution of the subproblem # We need to first copy the value of variables from the subproblem and then add cuts - copy_var_list_values(feas_subproblem.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config) + copy_var_list_values( + feas_subproblem.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, + config, + ) if config.strategy == 'OA': self.add_lazy_oa_cuts( - solve_data.mip, dual_values, solve_data, config, opt) + mindtpy_solver.mip, dual_values, mindtpy_solver, config, opt + ) if config.add_regularization is not None: - add_oa_cuts(solve_data.mip, dual_values, solve_data, config) + add_oa_cuts( + mindtpy_solver.mip, + dual_values, + mindtpy_solver.jacobians, + mindtpy_solver.objective_sense, + mindtpy_solver.mip_constraint_polynomial_degree, + mindtpy_solver.mip_iter, + config, + mindtpy_solver.timing, + ) elif config.strategy == 'GOA': - self.add_lazy_affine_cuts(solve_data, config, opt) + self.add_lazy_affine_cuts(mindtpy_solver, config, opt) if config.add_no_good_cuts: - var_values = list( - v.value for v in fixed_nlp.MindtPy_utils.variable_list) - self.add_lazy_no_good_cuts(var_values, solve_data, config, opt) + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) + self.add_lazy_no_good_cuts(var_values, mindtpy_solver, config, opt) - def handle_lazy_subproblem_other_termination(self, fixed_nlp, termination_condition, - solve_data, config): + def handle_lazy_subproblem_other_termination( + self, fixed_nlp, termination_condition, mindtpy_solver, config + ): """Handles the result of the latest iteration of solving the NLP subproblem given a solution that is neither optimal nor infeasible. @@ -470,8 +673,8 @@ def handle_lazy_subproblem_other_termination(self, fixed_nlp, termination_condit Integer-variable-fixed NLP model. termination_condition : Pyomo TerminationCondition The termination condition of the fixed NLP subproblem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. @@ -483,195 +686,175 @@ def handle_lazy_subproblem_other_termination(self, fixed_nlp, termination_condit if termination_condition is tc.maxIterations: # TODO try something else? Reinitialize with different initial value? config.logger.info( - 'NLP subproblem failed to converge within iteration limit.') - var_values = list( - v.value for v in fixed_nlp.MindtPy_utils.variable_list) + 'NLP subproblem failed to converge within iteration limit.' + ) + var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list) else: raise ValueError( 'MindtPy unable to handle NLP subproblem termination ' - 'condition of {}'.format(termination_condition)) - - def handle_lazy_regularization_problem(self, main_mip, main_mip_results, solve_data, config): - """Handles the termination condition of the regularization main problem in RLP/NLP. - - Parameters - ---------- - main_mip : Pyomo model - The MIP main problem. - main_mip_results : SolverResults - Results from solving the regularization MIP problem. - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Raises - ------ - ValueError - MindtPy unable to handle the termination condition of the regularization problem. - ValueError - MindtPy unable to handle the termination condition of the regularization problem. - """ - if main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}: - handle_main_optimal( - main_mip, solve_data, config, update_bound=False) - elif main_mip_results.solver.termination_condition in {tc.infeasible, tc.infeasibleOrUnbounded}: - config.logger.info(solve_data.log_note_formatter.format( - solve_data.mip_iter, 'Reg '+solve_data.regularization_mip_type, 'infeasible')) - if config.reduce_level_coef: - config.level_coef = config.level_coef / 2 - main_mip, main_mip_results = solve_main( - solve_data, config, regularization_problem=True) - if main_mip_results.solver.termination_condition in {tc.optimal, tc.feasible}: - handle_main_optimal( - main_mip, solve_data, config, update_bound=False) - elif main_mip_results.solver.termination_condition is tc.infeasible: - config.logger.info('regularization problem still infeasible with reduced level_coef. ' - 'NLP subproblem is generated based on the incumbent solution of the main problem.') - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( - 'Regularization problem failed to converge within the time limit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - elif main_mip_results.solver.termination_condition is tc.unbounded: - config.logger.info( - 'Regularization problem ubounded.' - 'Sometimes solving MIQP using cplex, unbounded means infeasible.') - elif main_mip_results.solver.termination_condition is tc.unknown: - config.logger.info( - 'Termination condition of the regularization problem is unknown.') - if main_mip_results.problem.lower_bound != float('-inf'): - config.logger.info('Solution limit has been reached.') - handle_main_optimal( - main_mip, solve_data, config, update_bound=False) - else: - config.logger.info('No solution obtained from the regularization subproblem.' - 'Please set mip_solver_tee to True for more informations.' - 'The solution of the OA main problem will be adopted.') - else: - raise ValueError( - 'MindtPy unable to handle regularization problem termination condition ' - 'of %s. Solver message: %s' % - (main_mip_results.solver.termination_condition, main_mip_results.solver.message)) - elif config.use_bb_tree_incumbent: - config.logger.debug( - 'Fixed subproblem will be generated based on the incumbent solution of the main problem.') - elif main_mip_results.solver.termination_condition is tc.maxTimeLimit: - config.logger.info( - 'Regularization problem failed to converge within the time limit.') - solve_data.results.solver.termination_condition = tc.maxTimeLimit - elif main_mip_results.solver.termination_condition is tc.unbounded: - config.logger.info( - 'Regularization problem ubounded.' - 'Sometimes solving MIQP using cplex, unbounded means infeasible.') - elif main_mip_results.solver.termination_condition is tc.unknown: - config.logger.info( - 'Termination condition of the regularization problem is unknown.') - if main_mip_results.problem.lower_bound != float('-inf'): - config.logger.info('Solution limit has been reached.') - handle_main_optimal(main_mip, solve_data, - config, update_bound=False) - else: - raise ValueError( - 'MindtPy unable to handle regularization problem termination condition ' - 'of %s. Solver message: %s' % - (main_mip_results.solver.termination_condition, main_mip_results.solver.message)) + 'condition of {}'.format(termination_condition) + ) def __call__(self): - """This is an inherent function in LazyConstraintCallback in cplex. + """This is an inherent function in LazyConstraintCallback in CPLEX. This function is called whenever an integer solution is found during the branch and bound process. """ - solve_data = self.solve_data + mindtpy_solver = self.mindtpy_solver config = self.config opt = self.opt main_mip = self.main_mip - - if solve_data.should_terminate: + mindtpy_solver = self.mindtpy_solver + + # Reference: https://www.ibm.com/docs/en/icos/22.1.1?topic=SSSA5P_22.1.1/ilog.odms.cplex.help/refpythoncplex/html/cplex.callbacks.SolutionSource-class.htm + # Another solution source is user_solution = 118, but it will not be encountered in LazyConstraintCallback. + config.logger.debug( + "Solution source: %s (111 node_solution, 117 heuristic_solution, 119 mipstart_solution)".format( + self.get_solution_source() + ) + ) + + # The solution found in MIP start process might be revisited in branch and bound. + # Lazy constraints separated when processing a MIP start will be discarded after that MIP start has been processed. + # This means that the callback may have to separate the same constraint again for the next MIP start or for a solution that is found later in the solution process. + # https://www.ibm.com/docs/en/icos/22.1.1?topic=SSSA5P_22.1.1/ilog.odms.cplex.help/refpythoncplex/html/cplex.callbacks.LazyConstraintCallback-class.htm + if ( + self.get_solution_source() + != cplex.callbacks.SolutionSource.mipstart_solution + and len(mindtpy_solver.mip_start_lazy_oa_cuts) > 0 + ): + for constraint, sense, rhs in mindtpy_solver.mip_start_lazy_oa_cuts: + self.add(constraint, sense, rhs) + mindtpy_solver.mip_start_lazy_oa_cuts = [] + + if mindtpy_solver.should_terminate: self.abort() return - - self.handle_lazy_main_feasible_solution( - main_mip, solve_data, config, opt) - + self.handle_lazy_main_feasible_solution(main_mip, mindtpy_solver, config, opt) if config.add_cuts_at_incumbent: - self.copy_lazy_var_list_values(opt, - main_mip.MindtPy_utils.variable_list, - solve_data.mip.MindtPy_utils.variable_list, - config) + self.copy_lazy_var_list_values( + opt, + main_mip.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, + config, + ) if config.strategy == 'OA': - self.add_lazy_oa_cuts( - solve_data.mip, None, solve_data, config, opt) + # The solution obtained from mip start might be infeasible and even introduce a math domain error, like log(-1). + try: + self.add_lazy_oa_cuts( + mindtpy_solver.mip, None, mindtpy_solver, config, opt + ) + except ValueError as e: + config.logger.error( + str(e) + + "\nUsually this error is caused by the MIP start solution causing a math domain error. " + "We will skip it." + ) + return # regularization is activated after the first feasible solution is found. - if config.add_regularization is not None and solve_data.best_solution_found is not None: + if ( + config.add_regularization is not None + and mindtpy_solver.best_solution_found is not None + ): # The main problem might be unbounded, regularization is activated only when a valid bound is provided. - if not solve_data.dual_bound_improved and not solve_data.primal_bound_improved: - config.logger.debug('The bound and the best found solution have neither been improved.' - 'We will skip solving the regularization problem and the Fixed-NLP subproblem') - solve_data.primal_bound_improved = False + if ( + not mindtpy_solver.dual_bound_improved + and not mindtpy_solver.primal_bound_improved + ): + config.logger.debug( + 'The bound and the best found solution have neither been improved.' + 'We will skip solving the regularization problem and the Fixed-NLP subproblem' + ) + mindtpy_solver.primal_bound_improved = False return - if solve_data.dual_bound != solve_data.dual_bound_progress[0]: - main_mip, main_mip_results = solve_main( - solve_data, config, regularization_problem=True) - self.handle_lazy_regularization_problem( - main_mip, main_mip_results, solve_data, config) - if abs(solve_data.primal_bound - solve_data.dual_bound) <= config.absolute_bound_tolerance: + if mindtpy_solver.dual_bound != mindtpy_solver.dual_bound_progress[0]: + mindtpy_solver.add_regularization() + if ( + abs(mindtpy_solver.primal_bound - mindtpy_solver.dual_bound) + <= config.absolute_bound_tolerance + ): config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n'.format( - solve_data.primal_bound, solve_data.dual_bound, config.absolute_bound_tolerance)) - solve_data.results.solver.termination_condition = tc.optimal + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + config.absolute_bound_tolerance, + ) + ) + mindtpy_solver.results.solver.termination_condition = tc.optimal self.abort() return # check if the same integer combination is obtained. - solve_data.curr_int_sol = get_integer_solution( - solve_data.working_model, string_zero=True) - - if solve_data.curr_int_sol in set(solve_data.integer_list): - config.logger.debug('This integer combination has been explored. ' - 'We will skip solving the Fixed-NLP subproblem.') - solve_data.primal_bound_improved = False + mindtpy_solver.curr_int_sol = get_integer_solution( + mindtpy_solver.fixed_nlp, string_zero=True + ) + + if mindtpy_solver.curr_int_sol in set(mindtpy_solver.integer_list): + config.logger.debug( + 'This integer combination has been explored. ' + 'We will skip solving the Fixed-NLP subproblem.' + ) + mindtpy_solver.primal_bound_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( - v.value for v in solve_data.working_model.MindtPy_utils.variable_list) - self.add_lazy_no_good_cuts( - var_values, solve_data, config, opt) + v.value + for v in mindtpy_solver.working_model.MindtPy_utils.variable_list + ) + self.add_lazy_no_good_cuts(var_values, mindtpy_solver, config, opt) return elif config.strategy == 'OA': return else: - solve_data.integer_list.append(solve_data.curr_int_sol) + mindtpy_solver.integer_list.append(mindtpy_solver.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers - fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) - + fixed_nlp, fixed_nlp_result = mindtpy_solver.solve_subproblem() # add oa cuts - if fixed_nlp_result.solver.termination_condition in {tc.optimal, tc.locallyOptimal, tc.feasible}: - self.handle_lazy_subproblem_optimal( - fixed_nlp, solve_data, config, opt) - if abs(solve_data.primal_bound - solve_data.dual_bound) <= config.absolute_bound_tolerance: + if fixed_nlp_result.solver.termination_condition in { + tc.optimal, + tc.locallyOptimal, + tc.feasible, + }: + self.handle_lazy_subproblem_optimal(fixed_nlp, mindtpy_solver, config, opt) + if ( + abs(mindtpy_solver.primal_bound - mindtpy_solver.dual_bound) + <= config.absolute_bound_tolerance + ): config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n'.format( - solve_data.primal_bound, solve_data.dual_bound, config.absolute_bound_tolerance)) - solve_data.results.solver.termination_condition = tc.optimal + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + config.absolute_bound_tolerance, + ) + ) + mindtpy_solver.results.solver.termination_condition = tc.optimal return - elif fixed_nlp_result.solver.termination_condition in {tc.infeasible, tc.noSolution}: + elif fixed_nlp_result.solver.termination_condition in { + tc.infeasible, + tc.noSolution, + }: self.handle_lazy_subproblem_infeasible( - fixed_nlp, solve_data, config, opt) + fixed_nlp, mindtpy_solver, config, opt + ) else: - self.handle_lazy_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition, - solve_data, config) + self.handle_lazy_subproblem_other_termination( + fixed_nlp, + fixed_nlp_result.solver.termination_condition, + mindtpy_solver, + config, + ) # Gurobi -def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): - """This is a GUROBI callback function defined for LP/NLP based B&B algorithm. +def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, mindtpy_solver, config): + """This is a Gurobi callback function defined for LP/NLP based B&B algorithm. Parameters ---------- @@ -681,76 +864,108 @@ def LazyOACallback_gurobi(cb_m, cb_opt, cb_where, solve_data, config): The gurobi_persistent solver. cb_where : int An enum member of gurobipy.GRB.Callback. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. """ if cb_where == gurobipy.GRB.Callback.MIPSOL: # gurobipy.GRB.Callback.MIPSOL means that an integer solution is found during the branch and bound process - if solve_data.should_terminate: + if mindtpy_solver.should_terminate: cb_opt._solver_model.terminate() return cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) - handle_lazy_main_feasible_solution_gurobi( - cb_m, cb_opt, solve_data, config) + handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, mindtpy_solver, config) if config.add_cuts_at_incumbent: if config.strategy == 'OA': - add_oa_cuts(solve_data.mip, None, solve_data, config, cb_opt) + add_oa_cuts( + mindtpy_solver.mip, + None, + mindtpy_solver.jacobians, + mindtpy_solver.objective_sense, + mindtpy_solver.mip_constraint_polynomial_degree, + mindtpy_solver.mip_iter, + config, + mindtpy_solver.timing, + cb_opt=cb_opt, + ) # Regularization is activated after the first feasible solution is found. - if config.add_regularization is not None and solve_data.best_solution_found is not None: + if ( + config.add_regularization is not None + and mindtpy_solver.best_solution_found is not None + ): # The main problem might be unbounded, regularization is activated only when a valid bound is provided. - if not solve_data.dual_bound_improved and not solve_data.primal_bound_improved: - config.logger.debug('The bound and the best found solution have neither been improved.' - 'We will skip solving the regularization problem and the Fixed-NLP subproblem') - solve_data.primal_bound_improved = False + if ( + not mindtpy_solver.dual_bound_improved + and not mindtpy_solver.primal_bound_improved + ): + config.logger.debug( + 'The bound and the best found solution have neither been improved.' + 'We will skip solving the regularization problem and the Fixed-NLP subproblem' + ) + mindtpy_solver.primal_bound_improved = False return - if solve_data.dual_bound != solve_data.dual_bound_progress[0]: - main_mip, main_mip_results = solve_main( - solve_data, config, regularization_problem=True) - handle_regularization_main_tc( - main_mip, main_mip_results, solve_data, config) + if mindtpy_solver.dual_bound != mindtpy_solver.dual_bound_progress[0]: + mindtpy_solver.add_regularization() - if abs(solve_data.primal_bound - solve_data.dual_bound) <= config.absolute_bound_tolerance: + if ( + abs(mindtpy_solver.primal_bound - mindtpy_solver.dual_bound) + <= config.absolute_bound_tolerance + ): config.logger.info( 'MindtPy exiting on bound convergence. ' '|Primal Bound: {} - Dual Bound: {}| <= (absolute tolerance {}) \n'.format( - solve_data.primal_bound, solve_data.dual_bound, config.absolute_bound_tolerance)) - solve_data.results.solver.termination_condition = tc.optimal + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + config.absolute_bound_tolerance, + ) + ) + mindtpy_solver.results.solver.termination_condition = tc.optimal cb_opt._solver_model.terminate() return - # # check if the same integer combination is obtained. - solve_data.curr_int_sol = get_integer_solution( - solve_data.working_model, string_zero=True) - - if solve_data.curr_int_sol in set(solve_data.integer_list): - config.logger.debug('This integer combination has been explored. ' - 'We will skip solving the Fixed-NLP subproblem.') - solve_data.primal_bound_improved = False + # check if the same integer combination is obtained. + mindtpy_solver.curr_int_sol = get_integer_solution( + mindtpy_solver.fixed_nlp, string_zero=True + ) + + if mindtpy_solver.curr_int_sol in set(mindtpy_solver.integer_list): + config.logger.debug( + 'This integer combination has been explored. ' + 'We will skip solving the Fixed-NLP subproblem.' + ) + mindtpy_solver.primal_bound_improved = False if config.strategy == 'GOA': if config.add_no_good_cuts: var_values = list( - v.value for v in solve_data.working_model.MindtPy_utils.variable_list) - add_no_good_cuts(var_values, solve_data, config) + v.value + for v in mindtpy_solver.fixed_nlp.MindtPy_utils.variable_list + ) + add_no_good_cuts( + mindtpy_solver.mip, + var_values, + config, + mindtpy_solver.timing, + mip_iter=mindtpy_solver.mip_iter, + cb_opt=cb_opt, + ) return elif config.strategy == 'OA': return else: - solve_data.integer_list.append(solve_data.curr_int_sol) + mindtpy_solver.integer_list.append(mindtpy_solver.curr_int_sol) # solve subproblem # The constraint linearization happens in the handlers - fixed_nlp, fixed_nlp_result = solve_subproblem(solve_data, config) + fixed_nlp, fixed_nlp_result = mindtpy_solver.solve_subproblem() - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_result, solve_data, config, cb_opt) + mindtpy_solver.handle_nlp_subproblem_tc(fixed_nlp, fixed_nlp_result, cb_opt) -def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config): - """This function is called during the branch and bound of main MIP problem, +def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, mindtpy_solver, config): + """This function is called during the branch and bound of main MIP problem, more exactly when a feasible solution is found and LazyCallback is activated. Copy the solution to working model and update upper or lower bound. @@ -762,19 +977,34 @@ def handle_lazy_main_feasible_solution_gurobi(cb_m, cb_opt, solve_data, config): The MIP main problem. cb_opt : SolverFactory The gurobi_persistent solver. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + mindtpy_solver : object + The mindtpy solver class. config : ConfigBlock The specific configurations for MindtPy. """ # proceed. Just need integer values cb_opt.cbGetSolution(vars=cb_m.MindtPy_utils.variable_list) # this value copy is useful since we need to fix subproblem based on the solution of the main problem - copy_var_list_values(cb_m.MindtPy_utils.variable_list, - solve_data.working_model.MindtPy_utils.variable_list, - config) - update_dual_bound(solve_data, cb_opt.cbGet( - gurobipy.GRB.Callback.MIPSOL_OBJBND)) - config.logger.info(solve_data.log_formatter.format(solve_data.mip_iter, 'restrLP', cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJ), - solve_data.primal_bound, solve_data.dual_bound, solve_data.rel_gap, - get_main_elapsed_time(solve_data.timing))) + copy_var_list_values( + cb_m.MindtPy_utils.variable_list, + mindtpy_solver.fixed_nlp.MindtPy_utils.variable_list, + config, + skip_fixed=False, + ) + copy_var_list_values( + cb_m.MindtPy_utils.variable_list, + mindtpy_solver.mip.MindtPy_utils.variable_list, + config, + ) + mindtpy_solver.update_dual_bound(cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJBND)) + config.logger.info( + mindtpy_solver.log_formatter.format( + mindtpy_solver.mip_iter, + 'restrLP', + cb_opt.cbGet(gurobipy.GRB.Callback.MIPSOL_OBJ), + mindtpy_solver.primal_bound, + mindtpy_solver.dual_bound, + mindtpy_solver.rel_gap, + get_main_elapsed_time(mindtpy_solver.timing), + ) + ) diff --git a/pyomo/contrib/mindtpy/tabu_list.py b/pyomo/contrib/mindtpy/tabu_list.py index 54e9d4d14db..313bd6f6271 100644 --- a/pyomo/contrib/mindtpy/tabu_list.py +++ b/pyomo/contrib/mindtpy/tabu_list.py @@ -1,31 +1,44 @@ -from cplex.callbacks import IncumbentCallback -from pyomo.core import Var +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# +from pyomo.common.dependencies import attempt_import, UnavailableClass -class IncumbentCallback_cplex(IncumbentCallback): +cplex, cplex_available = attempt_import('cplex') + + +class IncumbentCallback_cplex( + cplex.callbacks.IncumbentCallback if cplex_available else UnavailableClass(cplex) +): """Inherent class in Cplex to call Incumbent callback.""" def __call__(self): """ - This is an inherent function in LazyConstraintCallback in cplex. + This is an inherent function in LazyConstraintCallback in CPLEX. This callback will be used after each new potential incumbent is found. https://www.ibm.com/support/knowledgecenter/SSSA5P_12.10.0/ilog.odms.cplex.help/refpythoncplex/html/cplex.callbacks.IncumbentCallback-class.html IncumbentCallback will be activated after Lazyconstraint callback, when the potential incumbent solution is satisfies the lazyconstraints. TODO: need to handle GOA same integer combination check in lazyconstraint callback in single_tree.py - TODO: integer_var_value_tuple can be replaced by solve_data.curr_int_sol """ - solve_data = self.solve_data + mindtpy_solver = self.mindtpy_solver opt = self.opt config = self.config if config.single_tree: self.reject() else: temp = [] - for var in solve_data.mip.MindtPy_utils.discrete_variable_list: - value = self.get_values( - opt._pyomo_var_to_solver_var_map[var]) + for var in mindtpy_solver.mip.MindtPy_utils.discrete_variable_list: + value = self.get_values(opt._pyomo_var_to_solver_var_map[var]) temp.append(int(round(value))) - integer_var_value = tuple(temp) + mindtpy_solver.curr_int_sol = tuple(temp) - if integer_var_value in set(solve_data.integer_list): + if mindtpy_solver.curr_int_sol in set(mindtpy_solver.integer_list): self.reject() diff --git a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py index 0b1ce2cfe76..10da243d332 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py @@ -34,10 +34,18 @@ """ -from __future__ import division -from pyomo.environ import (Binary, ConcreteModel, Constraint, NonNegativeReals, - Objective, RangeSet, Var, minimize, log) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + RangeSet, + Var, + minimize, + log, +) from pyomo.common.collections import ComponentMap @@ -46,7 +54,7 @@ class SimpleMINLP(ConcreteModel): def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'DuranEx1') + kwargs.setdefault('name', 'SimpleMINLP2') super(SimpleMINLP, self).__init__(*args, **kwargs) m = self @@ -63,25 +71,34 @@ def __init__(self, *args, **kwargs): # DISCRETE VARIABLES Y = m.Y = Var(J, domain=Binary, initialize=initY) # CONTINUOUS VARIABLES - X = m.X = Var(I, domain=NonNegativeReals, - initialize=initX, bounds=(0, 2)) + X = m.X = Var(I, domain=NonNegativeReals, initialize=initX, bounds=(0, 2)) """Constraint definitions""" # CONSTRAINTS - m.const1 = Constraint(expr=0.8*log(X[2] + 1) + 0.96*log(X[1] - X[2] + 1) - - 0.8*X[3] >= 0) - m.const2 = Constraint(expr=log(X[2] + 1) + 1.2*log(X[1] - X[2] + 1) - - X[3] - 2*Y[3] >= -2) - m.const3 = Constraint(expr=10*X[1] - 7*X[3] - - 18*log(X[2] + 1) - 19.2*log(X[1] - X[2] + 1) + 10 - X[4] <= 0) + m.const1 = Constraint( + expr=0.8 * log(X[2] + 1) + 0.96 * log(X[1] - X[2] + 1) - 0.8 * X[3] >= 0 + ) + m.const2 = Constraint( + expr=log(X[2] + 1) + 1.2 * log(X[1] - X[2] + 1) - X[3] - 2 * Y[3] >= -2 + ) + m.const3 = Constraint( + expr=10 * X[1] + - 7 * X[3] + - 18 * log(X[2] + 1) + - 19.2 * log(X[1] - X[2] + 1) + + 10 + - X[4] + <= 0 + ) m.const4 = Constraint(expr=X[2] - X[1] <= 0) - m.const5 = Constraint(expr=X[2] - 2*Y[1] <= 0) - m.const6 = Constraint(expr=X[1] - X[2] - 2*Y[2] <= 0) + m.const5 = Constraint(expr=X[2] - 2 * Y[1] <= 0) + m.const6 = Constraint(expr=X[1] - X[2] - 2 * Y[2] <= 0) m.const7 = Constraint(expr=Y[1] + Y[2] <= 1) """Cost (objective) function definition""" - m.objective = Objective(expr=+5*Y[1] + 6*Y[2] + - 8*Y[3] + X[4], sense=minimize) + m.objective = Objective( + expr=+5 * Y[1] + 6 * Y[2] + 8 * Y[3] + X[4], sense=minimize + ) """Bound definitions""" # x (continuous) upper bounds diff --git a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py index b4eda8338cc..f387b0e26a1 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py @@ -29,18 +29,25 @@ """ -from __future__ import division -from pyomo.environ import (Binary, ConcreteModel, Constraint, Reals, - Objective, RangeSet, Var, minimize, log) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + Reals, + Objective, + RangeSet, + Var, + minimize, + log, +) from pyomo.common.collections import ComponentMap class SimpleMINLP(ConcreteModel): - def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'DuranEx1') + kwargs.setdefault('name', 'SimpleMINLP3') super(SimpleMINLP, self).__init__(*args, **kwargs) m = self @@ -57,18 +64,19 @@ def __init__(self, *args, **kwargs): # DISCRETE VARIABLES Y = m.Y = Var(J, domain=Binary, initialize=initY) # CONTINUOUS VARIABLES - X = m.X = Var(I, domain=Reals, initialize=initX, bounds=(-1, 50)) + X = m.X = Var(I, domain=Reals, initialize=initX, bounds=(-0.9, 50)) """Constraint definitions""" # CONSTRAINTS - m.const1 = Constraint(expr=-X[2] + 5*log(X[1] + 1) + 3*Y[1] >= 0) - m.const2 = Constraint(expr=-X[2] + X[1]**2 - Y[1] <= 1) - m.const3 = Constraint(expr=X[1] + X[2] + 20*Y[1] <= 24) - m.const4 = Constraint(expr=2*X[2] + 3*X[1] <= 10) + m.const1 = Constraint(expr=-X[2] + 5 * log(X[1] + 1) + 3 * Y[1] >= 0) + m.const2 = Constraint(expr=-X[2] + X[1] ** 2 - Y[1] <= 1) + m.const3 = Constraint(expr=X[1] + X[2] + 20 * Y[1] <= 24) + m.const4 = Constraint(expr=2 * X[2] + 3 * X[1] <= 10) """Cost (objective) function definition""" - m.objective = Objective(expr=10*X[1]**2 - X[2] + 5*(Y[1] - 1), - sense=minimize) + m.objective = Objective( + expr=10 * X[1] ** 2 - X[2] + 5 * (Y[1] - 1), sense=minimize + ) m.optimal_value = -5.512 m.optimal_solution = ComponentMap() m.optimal_solution[m.X[1]] = 0.20710677582302733 diff --git a/pyomo/contrib/mindtpy/tests/MINLP4_simple.py b/pyomo/contrib/mindtpy/tests/MINLP4_simple.py index f3fba325f5e..7b57c6b8f0d 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP4_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP4_simple.py @@ -14,9 +14,17 @@ """ -from __future__ import division -from pyomo.environ import (ConcreteModel, Constraint, Reals, Integers, - Objective, Var, sqrt, minimize, exp) +from pyomo.environ import ( + ConcreteModel, + Constraint, + Reals, + Integers, + Objective, + Var, + sqrt, + minimize, + exp, +) from pyomo.common.collections import ComponentMap @@ -30,10 +38,14 @@ def __init__(self, *args, **kwargs): m.x = Var(domain=Reals, bounds=(1, 20), initialize=5.29) m.y = Var(domain=Integers, bounds=(1, 20), initialize=3) - m.c1 = Constraint(expr=0.3*(m.x-8)**2 + 0.04 * (m.y - 6) - ** 4 + 0.1 * exp(2*m.x)*((m.y)**(-4)) <= 56) - m.c2 = Constraint(expr=1/m.x + 1/m.y - sqrt(m.x) * sqrt(m.y) <= -1) - m.c3 = Constraint(expr=2*m.x - 5*m.y <= -1) + m.c1 = Constraint( + expr=0.3 * (m.x - 8) ** 2 + + 0.04 * (m.y - 6) ** 4 + + 0.1 * exp(2 * m.x) * ((m.y) ** (-4)) + <= 56 + ) + m.c2 = Constraint(expr=1 / m.x + 1 / m.y - sqrt(m.x) * sqrt(m.y) <= -1) + m.c3 = Constraint(expr=2 * m.x - 5 * m.y <= -1) m.objective = Objective(expr=-6 * m.x - m.y, sense=minimize) m.optimal_value = -56.981 diff --git a/pyomo/contrib/mindtpy/tests/MINLP5_simple.py b/pyomo/contrib/mindtpy/tests/MINLP5_simple.py index 754eba16d6e..5ab5f98b894 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP5_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP5_simple.py @@ -10,8 +10,16 @@ 3 constraints """ -from pyomo.environ import (ConcreteModel, Constraint, Reals, Integers, - Objective, Var, minimize, exp) +from pyomo.environ import ( + ConcreteModel, + Constraint, + Reals, + Integers, + Objective, + Var, + minimize, + exp, +) from pyomo.common.collections import ComponentMap @@ -25,11 +33,15 @@ def __init__(self, *args, **kwargs): m.x = Var(within=Reals, bounds=(1, 20), initialize=5.29) m.y = Var(within=Integers, bounds=(1, 20), initialize=3) - m.objective = Objective(expr=0.3 * (m.x - 8)**2 + 0.04 * - (m.y - 6)**4 + 0.1*exp(2*m.x)*(m.y**(-4)), sense=minimize) + m.objective = Objective( + expr=0.3 * (m.x - 8) ** 2 + + 0.04 * (m.y - 6) ** 4 + + 0.1 * exp(2 * m.x) * (m.y ** (-4)), + sense=minimize, + ) m.c1 = Constraint(expr=6 * m.x + m.y <= 60) - m.c2 = Constraint(expr=1/m.x + 1/m.x - m.x**0.5*m.y**0.5 <= -1) + m.c2 = Constraint(expr=1 / m.x + 1 / m.x - m.x**0.5 * m.y**0.5 <= -1) m.c3 = Constraint(expr=2 * m.x - 5 * m.y <= -1) m.optimal_value = 3.6572 m.optimal_solution = ComponentMap() diff --git a/pyomo/contrib/mindtpy/tests/MINLP_simple.py b/pyomo/contrib/mindtpy/tests/MINLP_simple.py index 137dc45bf30..5663c93af8b 100644 --- a/pyomo/contrib/mindtpy/tests/MINLP_simple.py +++ b/pyomo/contrib/mindtpy/tests/MINLP_simple.py @@ -25,13 +25,20 @@ 7 constraints """ -from __future__ import division -from pyomo.environ import (Binary, ConcreteModel, Constraint, - NonNegativeReals, Objective, - RangeSet, Var, minimize) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + RangeSet, + Var, + minimize, +) from pyomo.common.collections import ComponentMap + class SimpleMINLP(ConcreteModel): """Convex MINLP problem Assignment 6 APSE.""" @@ -51,7 +58,7 @@ def __init__(self, *args, **kwargs): 'sub2': {1: 0, 2: 1, 3: 1}, 'sub3': {1: 1, 2: 0, 3: 1}, 'sub4': {1: 1, 2: 1, 3: 0}, - 'sub5': {1: 0, 2: 0, 3: 0} + 'sub5': {1: 0, 2: 0, 3: 0}, } # initial point information for continuous variables initX = {1: 0, 2: 0} @@ -73,14 +80,15 @@ def __init__(self, *args, **kwargs): m.const7 = Constraint(expr=m.Y[1] + m.Y[2] + m.Y[3] >= 1) """Cost (objective) function definition""" - m.objective = Objective(expr=Y[1] + 1.5 * Y[2] + 0.5 * Y[3] + X[1] ** 2 + X[2] ** 2, - sense=minimize) + m.objective = Objective( + expr=Y[1] + 1.5 * Y[2] + 0.5 * Y[3] + X[1] ** 2 + X[2] ** 2, sense=minimize + ) """Bound definitions""" # x (continuous) upper bounds x_ubs = {1: 4, 2: 4} for i, x_ub in x_ubs.items(): X[i].setub(x_ub) - + m.optimal_value = 3.5 m.optimal_solution = ComponentMap() m.optimal_solution[m.X[1]] = 1.0 diff --git a/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py index 25dde9ed828..6038f9a74eb 100644 --- a/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py +++ b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py @@ -9,24 +9,29 @@ 2 constraints """ -from __future__ import division -from pyomo.environ import (Binary, ConcreteModel, Constraint, Reals, - Objective, Param, RangeSet, Var, exp, minimize, log) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + Objective, + Var, + minimize, + log, +) from pyomo.common.collections import ComponentMap class ConstraintQualificationExample(ConcreteModel): - def __init__(self, *args, **kwargs): """Create the problem.""" kwargs.setdefault('name', 'ConstraintQualificationExample') super(ConstraintQualificationExample, self).__init__(*args, **kwargs) m = self m.x = Var(bounds=(1.0, 10.0), initialize=5.0) - m.y = Var(within=Binary) - m.c1 = Constraint(expr=(m.x-3.0)**2 <= 50.0*(1-m.y)) - m.c2 = Constraint(expr=m.x*log(m.x)+5.0 <= 50.0*(m.y)) + m.y = Var(within=Binary, initialize=1.0) + m.c1 = Constraint(expr=(m.x - 3.0) ** 2 <= 50.0 * (1 - m.y)) + m.c2 = Constraint(expr=m.x * log(m.x) + 5.0 <= 50.0 * (m.y)) m.objective = Objective(expr=m.x, sense=minimize) m.optimal_value = 3 m.optimal_solution = ComponentMap() diff --git a/pyomo/contrib/mindtpy/tests/eight_process_problem.py b/pyomo/contrib/mindtpy/tests/eight_process_problem.py index 2b0330b358d..d3876a9dc44 100644 --- a/pyomo/contrib/mindtpy/tests/eight_process_problem.py +++ b/pyomo/contrib/mindtpy/tests/eight_process_problem.py @@ -22,10 +22,19 @@ http://dx.doi.org/10.1016/0098-1354(95)00219-7 """ -from __future__ import division -from pyomo.environ import (Binary, ConcreteModel, Constraint, NonNegativeReals, - Objective, Param, RangeSet, Var, exp, minimize) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + Param, + RangeSet, + Var, + exp, + minimize, +) from pyomo.common.collections import ComponentMap @@ -68,9 +77,22 @@ def __init__(self, convex=True, *args, **kwargs): # VARIABLE COST COEFF FOR PROCESS UNITS - STREAMS # Format: stream #: cost - variable_cost = {3: -10, 5: -15, 9: -40, 19: 25, 21: 35, 25: -35, - 17: 80, 14: 15, 10: 15, 2: 1, 4: 1, 18: -65, 20: -60, - 22: -80} + variable_cost = { + 3: -10, + 5: -15, + 9: -40, + 19: 25, + 21: 35, + 25: -35, + 17: 80, + 14: 15, + 10: 15, + 2: 1, + 4: 1, + 18: -65, + 20: -60, + 22: -80, + } CV = m.CV = Param(I, initialize=variable_cost, default=0) # initial point information for equipment selection (for each NLP @@ -78,13 +100,35 @@ def __init__(self, convex=True, *args, **kwargs): initY = { 'sub1': {1: 1, 2: 0, 3: 1, 4: 1, 5: 0, 6: 0, 7: 1, 8: 1}, 'sub2': {1: 0, 2: 1, 3: 1, 4: 1, 5: 0, 6: 1, 7: 0, 8: 1}, - 'sub3': {1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 0, 8: 1} + 'sub3': {1: 1, 2: 0, 3: 1, 4: 0, 5: 1, 6: 0, 7: 0, 8: 1}, } # initial point information for stream flows - initX = {2: 2, 3: 1.5, 4: 0, 5: 0, 6: 0.75, 7: 0.5, 8: 0.5, - 9: 0.75, 10: 0, 11: 1.5, 12: 1.34, 13: 2, 14: 2.5, 15: 0, - 16: 0, 17: 2, 18: 0.75, 19: 2, 20: 1.5, 21: 0, 22: 0, - 23: 1.7, 24: 1.5, 25: 0.5} + initX = { + 2: 2, + 3: 1.5, + 4: 0, + 5: 0, + 6: 0.75, + 7: 0.5, + 8: 0.5, + 9: 0.75, + 10: 0, + 11: 1.5, + 12: 1.34, + 13: 2, + 14: 2.5, + 15: 0, + 16: 0, + 17: 2, + 18: 0.75, + 19: 2, + 20: 1.5, + 21: 0, + 22: 0, + 23: 1.7, + 24: 1.5, + 25: 0.5, + } """Variable declarations""" # BINARY VARIABLE DENOTING EXISTENCE-NONEXISTENCE @@ -145,15 +189,30 @@ def __init__(self, convex=True, *args, **kwargs): m.pureint4 = Constraint(expr=m.Y[3] - m.Y[8] <= 0) """Cost (objective) function definition""" - m.objective = Objective(expr=sum(Y[j] * CF[j] for j in J) + - sum(X[i] * CV[i] for i in I) + CONSTANT, - sense=minimize) + m.objective = Objective( + expr=sum(Y[j] * CF[j] for j in J) + sum(X[i] * CV[i] for i in I) + CONSTANT, + sense=minimize, + ) """Bound definitions""" # x (flow) upper bounds # x_ubs = {3: 2, 5: 2, 9: 2, 10: 1, 14: 1, 17: 2, 19: 2, 21: 2, 25: 3} - x_ubs = {2: 10, 3: 2, 4: 10, 5: 2, 9: 2, 10: 1, 14: 1, 17: 2, 18: 10, 19: 2, - 20: 10, 21: 2, 22: 10, 25: 3} # add bounds for variables in nonlinear constraints + x_ubs = { + 2: 10, + 3: 2, + 4: 10, + 5: 2, + 9: 2, + 10: 1, + 14: 1, + 17: 2, + 18: 10, + 19: 2, + 20: 10, + 21: 2, + 22: 10, + 25: 3, + } # add bounds for variables in nonlinear constraints for i, x_ub in x_ubs.items(): X[i].setub(x_ub) m.optimal_value = 68.0097 @@ -170,7 +229,7 @@ def __init__(self, convex=True, *args, **kwargs): m.optimal_solution[m.X[11]] = 1.3333333333333333 m.optimal_solution[m.X[12]] = 1.3333333333333333 m.optimal_solution[m.X[13]] = 2.0 - m.optimal_solution[m.X[14]] = .26666666666666666 + m.optimal_solution[m.X[14]] = 0.26666666666666666 m.optimal_solution[m.X[15]] = 0.0 m.optimal_solution[m.X[16]] = 0.0 m.optimal_solution[m.X[17]] = 0.5858354365580745 @@ -189,4 +248,4 @@ def __init__(self, convex=True, *args, **kwargs): m.optimal_solution[m.Y[5]] = 0.0 m.optimal_solution[m.Y[6]] = 1.0 m.optimal_solution[m.Y[7]] = 0.0 - m.optimal_solution[m.Y[8]] = 1.0 \ No newline at end of file + m.optimal_solution[m.Y[8]] = 1.0 diff --git a/pyomo/contrib/mindtpy/tests/feasibility_pump1.py b/pyomo/contrib/mindtpy/tests/feasibility_pump1.py index 764bf73d992..e0a611c1ed2 100644 --- a/pyomo/contrib/mindtpy/tests/feasibility_pump1.py +++ b/pyomo/contrib/mindtpy/tests/feasibility_pump1.py @@ -10,20 +10,26 @@ 3 constraints """ -from __future__ import division -from pyomo.environ import (Binary, ConcreteModel, Constraint, Objective, - Var, minimize, Reals) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + Objective, + Var, + minimize, + Reals, +) from pyomo.common.collections import ComponentMap -class Feasibility_Pump1(ConcreteModel): - """Feasibility_Pump1 example""" +class FeasPump1(ConcreteModel): + """Feasibility Pump example 1""" def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'Feasibility_Pump1') - super(Feasibility_Pump1, self).__init__(*args, **kwargs) + kwargs.setdefault('name', 'Feasibility Pump 1') + super(FeasPump1, self).__init__(*args, **kwargs) m = self m.x = Var(within=Binary) @@ -32,8 +38,9 @@ def __init__(self, *args, **kwargs): m.objective = Objective(expr=m.x, sense=minimize) - m.c1 = Constraint(expr=(m.y1-0.5) * (m.y1-0.5) + - (m.y2-0.5) * (m.y2-0.5) <= 0.25) + m.c1 = Constraint( + expr=(m.y1 - 0.5) * (m.y1 - 0.5) + (m.y2 - 0.5) * (m.y2 - 0.5) <= 0.25 + ) m.c2 = Constraint(expr=m.x - m.y1 <= 3) m.c3 = Constraint(expr=m.y2 <= 0) m.optimal_value = 0 diff --git a/pyomo/contrib/mindtpy/tests/feasibility_pump2.py b/pyomo/contrib/mindtpy/tests/feasibility_pump2.py index 2b470dfd3cc..48b98dc5800 100644 --- a/pyomo/contrib/mindtpy/tests/feasibility_pump2.py +++ b/pyomo/contrib/mindtpy/tests/feasibility_pump2.py @@ -10,30 +10,37 @@ 3 constraints """ -from __future__ import division from math import pi -from pyomo.environ import (Binary, ConcreteModel, Constraint, Objective, - Var, minimize, Reals, sin) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + Objective, + Var, + minimize, + Reals, + sin, +) from pyomo.common.collections import ComponentMap -class Feasibility_Pump2(ConcreteModel): - """Feasibility_Pump2 example""" +class FeasPump2(ConcreteModel): + """Feasibility Pump example 2""" def __init__(self, *args, **kwargs): """Create the problem.""" - kwargs.setdefault('name', 'Feasibility_Pump2') - super(Feasibility_Pump2, self).__init__(*args, **kwargs) + kwargs.setdefault('name', 'Feasibility Pump 2') + super(FeasPump2, self).__init__(*args, **kwargs) m = self m.x = Var(within=Binary) m.y = Var(within=Reals) - m.objective = Objective(expr=- m.y, sense=minimize) + m.objective = Objective(expr=-m.y, sense=minimize) m.c1 = Constraint(expr=m.y - sin(m.x * pi * (5 / 3)) <= 0) - m.c2 = Constraint(expr=- m.y - sin(m.x * pi * (5 / 3)) <= 0) + m.c2 = Constraint(expr=-m.y - sin(m.x * pi * (5 / 3)) <= 0) m.optimal_value = 0 m.optimal_solution = ComponentMap() m.optimal_solution[m.x] = 0.0 diff --git a/pyomo/contrib/mindtpy/tests/from_proposal.py b/pyomo/contrib/mindtpy/tests/from_proposal.py index a06a76c1bb6..6ddab15ee53 100644 --- a/pyomo/contrib/mindtpy/tests/from_proposal.py +++ b/pyomo/contrib/mindtpy/tests/from_proposal.py @@ -4,9 +4,16 @@ Link: https://www.researchgate.net/project/Convex-MINLP/update/5c7eb2ee3843b034242e9e4a """ -from __future__ import division -from pyomo.environ import (ConcreteModel, Constraint, Reals, Integers, - Objective, Var, sqrt, minimize) +from pyomo.environ import ( + ConcreteModel, + Constraint, + Reals, + Integers, + Objective, + Var, + sqrt, + minimize, +) from pyomo.common.collections import ComponentMap @@ -20,10 +27,10 @@ def __init__(self, *args, **kwargs): m.x = Var(domain=Reals, bounds=(0, 20), initialize=1) m.y = Var(domain=Integers, bounds=(0, 20), initialize=4) - m.c1 = Constraint(expr=m.x**2/20.0 + m.y <= 20) - m.c2 = Constraint(expr=(m.x-1)**2/40.0 - m.y <= -4) - m.c3 = Constraint(expr=m.y - 10*sqrt(m.x+0.1) <= 0) - m.c4 = Constraint(expr=-m.x-m.y <= -5) + m.c1 = Constraint(expr=m.x**2 / 20.0 + m.y <= 20) + m.c2 = Constraint(expr=(m.x - 1) ** 2 / 40.0 - m.y <= -4) + m.c3 = Constraint(expr=m.y - 10 * sqrt(m.x + 0.1) <= 0) + m.c4 = Constraint(expr=-m.x - m.y <= -5) m.objective = Objective(expr=m.x - m.y / 4.5 + 2, sense=minimize) m.optimal_value = 0.66555 diff --git a/pyomo/contrib/mindtpy/tests/nonconvex1.py b/pyomo/contrib/mindtpy/tests/nonconvex1.py index 247fa70c507..94a4de29405 100644 --- a/pyomo/contrib/mindtpy/tests/nonconvex1.py +++ b/pyomo/contrib/mindtpy/tests/nonconvex1.py @@ -10,8 +10,15 @@ 6 constraints """ -from pyomo.environ import (ConcreteModel, Constraint, Reals, Binary, - Objective, Var, minimize) +from pyomo.environ import ( + ConcreteModel, + Constraint, + Reals, + Binary, + Objective, + Var, + minimize, +) from pyomo.common.collections import ComponentMap @@ -28,11 +35,13 @@ def __init__(self, *args, **kwargs): m.y2 = Var(within=Binary, bounds=(0, 1), initialize=0) m.y3 = Var(within=Binary, bounds=(0, 1), initialize=0) - m.objective = Objective(expr=2 * m.x1 + 3 * m.x2 + 1.5 * m.y1 + - 2 * m.y2 - 0.5 * m.y3, sense=minimize) + m.objective = Objective( + expr=2 * m.x1 + 3 * m.x2 + 1.5 * m.y1 + 2 * m.y2 - 0.5 * m.y3, + sense=minimize, + ) m.c1 = Constraint(expr=m.x1 * m.x1 + m.y1 == 1.25) - m.c2 = Constraint(expr=m.x2 ** 1.5 + 1.5 * m.y2 == 3) + m.c2 = Constraint(expr=m.x2**1.5 + 1.5 * m.y2 == 3) m.c4 = Constraint(expr=m.x1 + m.y1 <= 1.6) m.c5 = Constraint(expr=1.333 * m.x2 + m.y2 <= 3) m.c6 = Constraint(expr=-m.y1 - m.y2 + m.y3 <= 0) diff --git a/pyomo/contrib/mindtpy/tests/nonconvex2.py b/pyomo/contrib/mindtpy/tests/nonconvex2.py index 8c80b8f9407..525db1292c1 100644 --- a/pyomo/contrib/mindtpy/tests/nonconvex2.py +++ b/pyomo/contrib/mindtpy/tests/nonconvex2.py @@ -10,8 +10,16 @@ 7 constraints """ -from pyomo.environ import (ConcreteModel, Constraint, Reals, Binary, - Objective, Var, minimize, log) +from pyomo.environ import ( + ConcreteModel, + Constraint, + Reals, + Binary, + Objective, + Var, + minimize, + log, +) from pyomo.common.collections import ComponentMap @@ -34,20 +42,34 @@ def __init__(self, *args, **kwargs): m.y7 = Var(within=Binary, bounds=(0, 1), initialize=0) m.y8 = Var(within=Binary, bounds=(0, 1), initialize=0) - m.objective = Objective(expr=- m.x1 * m.x2 * m.x3, sense=minimize) + m.objective = Objective(expr=-m.x1 * m.x2 * m.x3, sense=minimize) - m.c1 = Constraint(expr=-log(1 - m.x1) + log(0.1) * m.y1 + - log(0.2) * m.y2 + log(0.15) * m.y3 == 0) - m.c2 = Constraint(expr=-log(1 - m.x2) + log(0.05) * m.y4 + - log(0.2) * m.y5 + log(0.15) * m.y6 == 0) - m.c3 = Constraint(expr=-log(1 - m.x3) + log(0.02) * m.y7 + - log(0.06) * m.y8 == 0) + m.c1 = Constraint( + expr=-log(1 - m.x1) + log(0.1) * m.y1 + log(0.2) * m.y2 + log(0.15) * m.y3 + == 0 + ) + m.c2 = Constraint( + expr=-log(1 - m.x2) + log(0.05) * m.y4 + log(0.2) * m.y5 + log(0.15) * m.y6 + == 0 + ) + m.c3 = Constraint( + expr=-log(1 - m.x3) + log(0.02) * m.y7 + log(0.06) * m.y8 == 0 + ) m.c4 = Constraint(expr=-m.y1 - m.y2 - m.y3 <= -1) m.c5 = Constraint(expr=-m.y4 - m.y5 - m.y6 <= -1) m.c6 = Constraint(expr=-m.y7 - m.y8 <= -1) - m.c7 = Constraint(expr=3 * m.y1 + m.y2 + 2 * m.y3 + 3 * - m.y4 + 2 * m.y5 + m.y6 + 3 * m.y7 + 2 * m.y8 <= 10) + m.c7 = Constraint( + expr=3 * m.y1 + + m.y2 + + 2 * m.y3 + + 3 * m.y4 + + 2 * m.y5 + + m.y6 + + 3 * m.y7 + + 2 * m.y8 + <= 10 + ) m.optimal_value = -0.94347 m.optimal_solution = ComponentMap() m.optimal_solution[m.x1] = 0.97 diff --git a/pyomo/contrib/mindtpy/tests/nonconvex3.py b/pyomo/contrib/mindtpy/tests/nonconvex3.py index efd074accfb..dbb88bb1fad 100644 --- a/pyomo/contrib/mindtpy/tests/nonconvex3.py +++ b/pyomo/contrib/mindtpy/tests/nonconvex3.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -"""Problem C in paper 'Outer approximation algorithms for separable nonconvex mixed-integer nonlinear programs' +"""Problem C in paper 'Outer approximation algorithms for separable nonconvex mixed-integer nonlinear programs'. +The problem in the paper has two optimal solution. Variable y4 and y6 are symmetric. Therefore, we remove variable y6 for simplification. Ref: Kesavan P, Allgor R J, Gatzke E P, et al. Outer approximation algorithms for separable nonconvex mixed-integer nonlinear programs[J]. Mathematical Programming, 2004, 100(3): 517-535. @@ -10,8 +11,15 @@ 6 constraints """ -from pyomo.environ import (ConcreteModel, Constraint, Reals, Binary, - Objective, Var, minimize) +from pyomo.environ import ( + ConcreteModel, + Constraint, + Reals, + Binary, + Objective, + Var, + minimize, +) from pyomo.common.collections import ComponentMap @@ -29,17 +37,17 @@ def __init__(self, *args, **kwargs): m.y3 = Var(within=Binary, bounds=(0, 1), initialize=0) m.y4 = Var(within=Binary, bounds=(0, 1), initialize=0) m.y5 = Var(within=Binary, bounds=(0, 1), initialize=0) - m.y6 = Var(within=Binary, bounds=(0, 1), initialize=0) m.objective = Objective(expr=7 * m.x1 + 10 * m.x2, sense=minimize) - m.c1 = Constraint(expr=(m.x1 ** 1.2) * (m.x2 ** 1.7) - - 7 * m.x1 - 9 * m.x2 <= -24) + m.c1 = Constraint( + expr=(m.x1**1.2) * (m.x2**1.7) - 7 * m.x1 - 9 * m.x2 <= -24 + ) m.c2 = Constraint(expr=-m.x1 - 2 * m.x2 <= 5) m.c3 = Constraint(expr=-3 * m.x1 + m.x2 <= 1) m.c4 = Constraint(expr=4 * m.x1 - 3 * m.x2 <= 11) m.c5 = Constraint(expr=-m.x1 + m.y1 + 2 * m.y2 + 4 * m.y3 == 0) - m.c6 = Constraint(expr=-m.x2 + m.y4 + 2 * m.y5 + m.y6 == 0) + m.c6 = Constraint(expr=-m.x2 + m.y4 + 2 * m.y5 == 0) m.optimal_value = 31 m.optimal_solution = ComponentMap() m.optimal_solution[m.x1] = 3.0 @@ -49,4 +57,3 @@ def __init__(self, *args, **kwargs): m.optimal_solution[m.y3] = 0.0 m.optimal_solution[m.y4] = 1.0 m.optimal_solution[m.y5] = 0.0 - m.optimal_solution[m.y6] = 0.0 diff --git a/pyomo/contrib/mindtpy/tests/nonconvex4.py b/pyomo/contrib/mindtpy/tests/nonconvex4.py index c3105a2c244..c30fb9922a0 100644 --- a/pyomo/contrib/mindtpy/tests/nonconvex4.py +++ b/pyomo/contrib/mindtpy/tests/nonconvex4.py @@ -11,10 +11,18 @@ """ from pyomo.environ import * -from pyomo.environ import (ConcreteModel, Constraint, Reals, Binary, - Objective, Var, minimize) +from pyomo.environ import ( + ConcreteModel, + Constraint, + Reals, + Binary, + Objective, + Var, + minimize, +) from pyomo.common.collections import ComponentMap + class Nonconvex4(ConcreteModel): def __init__(self, *args, **kwargs): """Create the problem.""" @@ -30,8 +38,14 @@ def __init__(self, *args, **kwargs): m.objective = Objective(expr=-5 * m.x1 + 3 * m.x2, sense=minimize) - m.c1 = Constraint(expr=2 * (m.x2 ** 2) - 2 * (m.x2 ** 0.5) - - 2 * (m.x1 ** 0.5) * (m.x2 ** 2) + 11 * m.x2 + 8 * m.x1 <= 39) + m.c1 = Constraint( + expr=2 * (m.x2**2) + - 2 * (m.x2**0.5) + - 2 * (m.x1**0.5) * (m.x2**2) + + 11 * m.x2 + + 8 * m.x1 + <= 39 + ) m.c2 = Constraint(expr=m.x1 - m.x2 <= 3) m.c3 = Constraint(expr=3 * m.x1 + 2 * m.x2 <= 24) m.c4 = Constraint(expr=-m.x1 + m.y1 + 2 * m.y2 + 4 * m.y3 == 0) diff --git a/pyomo/contrib/mindtpy/tests/online_doc_example.py b/pyomo/contrib/mindtpy/tests/online_doc_example.py index 4ee5002a4e2..d741455e7f7 100644 --- a/pyomo/contrib/mindtpy/tests/online_doc_example.py +++ b/pyomo/contrib/mindtpy/tests/online_doc_example.py @@ -19,15 +19,20 @@ 2 constraints """ -from __future__ import division -from pyomo.environ import (Binary, ConcreteModel, Constraint, - Objective, Var, minimize, log) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + Objective, + Var, + minimize, + log, +) from pyomo.common.collections import ComponentMap class OnlineDocExample(ConcreteModel): - def __init__(self, *args, **kwargs): """Create the problem.""" kwargs.setdefault('name', 'OnlineDocExample') @@ -35,9 +40,8 @@ def __init__(self, *args, **kwargs): m = self m.x = Var(bounds=(1.0, 10.0), initialize=5.0) m.y = Var(within=Binary) - m.c1 = Constraint(expr=(m.x-4.0)**2 - - m.x <= 50.0*(1-m.y)) - m.c2 = Constraint(expr=m.x*log(m.x) + 5 <= 50.0*(m.y)) + m.c1 = Constraint(expr=(m.x - 4.0) ** 2 - m.x <= 50.0 * (1 - m.y)) + m.c2 = Constraint(expr=m.x * log(m.x) + 5 <= 50.0 * (m.y)) m.objective = Objective(expr=m.x, sense=minimize) m.optimal_value = 2.438447 m.optimal_solution = ComponentMap() diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy.py b/pyomo/contrib/mindtpy/tests/test_mindtpy.py index 71e624ba372..e872eccc670 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy.py @@ -12,15 +12,16 @@ """Tests for the MindtPy solver.""" from pyomo.core.expr.calculus.diff_with_sympy import differentiate_available import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 from pyomo.contrib.mindtpy.tests.MINLP4_simple import SimpleMINLP4 from pyomo.contrib.mindtpy.tests.MINLP5_simple import SimpleMINLP5 from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ( + ConstraintQualificationExample, +) from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.environ import SolverFactory, value, maximize from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded @@ -28,20 +29,22 @@ from pyomo.opt import TerminationCondition -full_model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample(), - SimpleMINLP(), - SimpleMINLP2(), - SimpleMINLP3(), - SimpleMINLP4(), - SimpleMINLP5(), - ProposalModel(), - OnlineDocExample() - ] -model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample(), - SimpleMINLP2(), - ] +full_model_list = [ + EightProcessFlowsheet(convex=True), + ConstraintQualificationExample(), + SimpleMINLP(), + SimpleMINLP2(), + SimpleMINLP3(), + SimpleMINLP4(), + SimpleMINLP5(), + ProposalModel(), + OnlineDocExample(), +] +model_list = [ + EightProcessFlowsheet(convex=True), + ConstraintQualificationExample(), + SimpleMINLP2(), +] nonconvex_model_list = [EightProcessFlowsheet(convex=False)] obj_nonlinear_sum_model_list = [SimpleMINLP(), SimpleMINLP5()] @@ -54,312 +57,459 @@ extreme_model_list = [LP_model.model, QCP_model.model] required_solvers = ('ipopt', 'glpk') -if all(SolverFactory(s).available() for s in required_solvers): +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % (required_solvers,)) -@unittest.skipIf(not differentiate_available, - 'Symbolic differentiation is not available') +@unittest.skipIf( + not subsolvers_available, + 'Required subsolvers %s are not available' % (required_solvers,), +) +@unittest.skipIf( + not differentiate_available, 'Symbolic differentiation is not available' +) class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) def test_OA_rNLP(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_extreme_model(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in extreme_model_list: - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) def test_OA_L2_norm(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - feasibility_norm='L2', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + feasibility_norm='L2', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_L_infinity_norm(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - feasibility_norm='L_infinity', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + feasibility_norm='L_infinity', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_max_binary(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='max_binary', - feasibility_norm='L1', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='max_binary', + feasibility_norm='L1', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_sympy(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - differentiate_mode='sympy', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + differentiate_mode='sympy', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_initial_binary(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='initial_binary', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_no_good_cuts(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_no_good_cuts=True - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_no_good_cuts=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) - @unittest.skipUnless(SolverFactory('cplex').available() or SolverFactory('gurobi').available(), - "CPLEX or GUROBI not available.") + @unittest.skipUnless( + SolverFactory('cplex').available() or SolverFactory('gurobi').available(), + "CPLEX or Gurobi not available.", + ) def test_OA_quadratic_strategy(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: - model = ProposalModel() + model = ProposalModel().clone() if SolverFactory('cplex').available(): mip_solver = 'cplex' elif SolverFactory('gurobi').available(): mip_solver = 'gurobi' for quadratic_strategy in (0, 1, 2): - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_solvers[0], - quadratic_strategy=quadratic_strategy, - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_solvers[0], + quadratic_strategy=quadratic_strategy, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) - - @unittest.skipUnless(SolverFactory('appsi_cplex').available(exception_flag=False), - "APPSI_CPLEX not available.") + + @unittest.skipUnless( + SolverFactory('appsi_cplex').available(exception_flag=False), + "APPSI_CPLEX not available.", + ) def test_OA_APPSI_solver(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver='appsi_cplex', - nlp_solver=required_solvers[0] - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver='appsi_cplex', + nlp_solver=required_solvers[0], + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) - @unittest.skipUnless(SolverFactory('appsi_ipopt').available(exception_flag=False), - "APPSI_IPOPT not available.") + @unittest.skipUnless( + SolverFactory('appsi_ipopt').available(exception_flag=False), + "APPSI_IPOPT not available.", + ) def test_OA_APPSI_ipopt(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver='appsi_ipopt' - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver='appsi_ipopt', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) + self.assertAlmostEqual( + value(model.objective.expr), model.optimal_value, places=1 + ) + + @unittest.skipUnless( + SolverFactory('cyipopt').available(exception_flag=False), + "APPSI_IPOPT not available.", + ) + def test_OA_cyipopt(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + for model in nonconvex_model_list: + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver='cyipopt', + heuristic_nonconvex=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) def test_OA_integer_to_binary(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - integer_to_binary=True - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + integer_to_binary=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_partition_obj_nonlinear_terms(self): """Test the outer approximation decomposition algorithm (partition_obj_nonlinear_terms).""" with SolverFactory('mindtpy') as opt: for model in obj_nonlinear_sum_model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - partition_obj_nonlinear_terms=True - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + partition_obj_nonlinear_terms=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_add_slack(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='initial_binary', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_slack=True - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='initial_binary', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_slack=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_slack=True - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_slack=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_OA_nonconvex(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in nonconvex_model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - heuristic_nonconvex=True - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + heuristic_nonconvex=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_iteration_limit(self): with SolverFactory('mindtpy') as opt: model = ConstraintQualificationExample() - opt.solve(model, strategy='OA', - iteration_limit=1, - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0] - ) + opt.solve( + model, + strategy='OA', + iteration_limit=1, + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) # self.assertAlmostEqual(value(model.objective.expr), 3, places=2) def test_time_limit(self): with SolverFactory('mindtpy') as opt: model = ConstraintQualificationExample() - opt.solve(model, strategy='OA', - time_limit=1, - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0] - ) + opt.solve( + model, + strategy='OA', + time_limit=1, + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) def test_maximize_obj(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: - model = ProposalModel() + model = ProposalModel().clone() model.objective.sense = maximize - opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - self.assertAlmostEqual( - value(model.objective.expr), 14.83, places=1) + opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + self.assertAlmostEqual(value(model.objective.expr), 14.83, places=1) + + def test_infeasible_model(self): + """Test the outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + model = SimpleMINLP().clone() + model.X[1].fix(0) + model.Y[1].fix(0) + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + self.assertIs( + results.solver.termination_condition, TerminationCondition.infeasible + ) if __name__ == '__main__': diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py index 2d535e31e10..b5bfbe62553 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_ECP.py @@ -1,73 +1,89 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ( + ConstraintQualificationExample, +) from pyomo.environ import SolverFactory, value from pyomo.opt import TerminationCondition required_solvers = ('ipopt', 'glpk') -# required_solvers = ('gams', 'gams') -if all(SolverFactory(s).available() for s in required_solvers): +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False -model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample(), - SimpleMINLP(), - SimpleMINLP2(), - SimpleMINLP3(), - ProposalModel(), - ] +model_list = [ + EightProcessFlowsheet(convex=True), + ConstraintQualificationExample(), + SimpleMINLP(), + SimpleMINLP2(), + SimpleMINLP3(), + ProposalModel(), +] -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % (required_solvers,)) +@unittest.skipIf( + not subsolvers_available, + 'Required subsolvers %s are not available' % (required_solvers,), +) class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) def test_ECP(self): """Test the extended cutting plane decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='ECP', - init_strategy='rNLP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - absolute_bound_tolerance=1E-5) + model = model.clone() + results = opt.solve( + model, + strategy='ECP', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + absolute_bound_tolerance=1e-5, + ) - self.assertIs(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertIs( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ECP_add_slack(self): """Test the extended cutting plane decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='ECP', - init_strategy='rNLP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - absolute_bound_tolerance=1E-5, - add_slack=True) + model = model.clone() + results = opt.solve( + model, + strategy='ECP', + init_strategy='rNLP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + absolute_bound_tolerance=1e-5, + add_slack=True, + ) - self.assertIs(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertIs( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py index 1a241c074d2..697a63d17c8 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_feas_pump.py @@ -1,48 +1,54 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ( + ConstraintQualificationExample, +) from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample from pyomo.environ import SolverFactory, value from pyomo.opt import TerminationCondition from pyomo.contrib.gdpopt.util import is_feasible from pyomo.util.infeasible import log_infeasible_constraints -from pyomo.contrib.mindtpy.tests.feasibility_pump1 import Feasibility_Pump1 -from pyomo.contrib.mindtpy.tests.feasibility_pump2 import Feasibility_Pump2 +from pyomo.contrib.mindtpy.tests.feasibility_pump1 import FeasPump1 +from pyomo.contrib.mindtpy.tests.feasibility_pump2 import FeasPump2 -required_solvers = ('ipopt', 'glpk', 'cplex') -if all(SolverFactory(s).available() for s in required_solvers): +required_solvers = ('ipopt', 'cplex') +# TODO: 'appsi_highs' will fail here. +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False -model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample(), - Feasibility_Pump1(), - Feasibility_Pump2(), - SimpleMINLP(), - SimpleMINLP2(), - SimpleMINLP3(), - ProposalModel(), - OnlineDocExample() - ] +model_list = [ + EightProcessFlowsheet(convex=True), + ConstraintQualificationExample(), + FeasPump1(), + FeasPump2(), + SimpleMINLP(), + SimpleMINLP2(), + SimpleMINLP3(), + ProposalModel(), + OnlineDocExample(), +] -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % (required_solvers,)) +@unittest.skipIf( + not subsolvers_available, + 'Required subsolvers %s are not available' % (required_solvers,), +) class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) def get_config(self, solver): config = solver.CONFIG @@ -52,10 +58,14 @@ def test_FP(self): """Test the feasibility pump algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='FP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - absolute_bound_tolerance=1E-5) + model = model.clone() + results = opt.solve( + model, + strategy='FP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + absolute_bound_tolerance=1e-5, + ) log_infeasible_constraints(model) self.assertTrue(is_feasible(model, self.get_config(opt))) @@ -63,16 +73,22 @@ def test_FP_OA_8PP(self): """Test the FP-OA algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='FP', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - # absolute_bound_tolerance=1E-5 - ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='FP', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + # absolute_bound_tolerance=1E-5 + ) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py index 7c5f460278b..0fa19b30d9c 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_global.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.nonconvex1 import Nonconvex1 from pyomo.contrib.mindtpy.tests.nonconvex2 import Nonconvex2 from pyomo.contrib.mindtpy.tests.nonconvex3 import Nonconvex3 @@ -12,60 +11,79 @@ from pyomo.opt import TerminationCondition required_solvers = ('baron', 'cplex_persistent') -if not all(SolverFactory(s).available(False) for s in required_solvers): +if not all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = False elif not SolverFactory('baron').license_is_valid(): subsolvers_available = False else: subsolvers_available = True -model_list = [EightProcessFlowsheet(convex=False), - Nonconvex1(), - Nonconvex2(), - Nonconvex3(), - Nonconvex4(), - ] +model_list = [ + EightProcessFlowsheet(convex=False), + Nonconvex1(), + Nonconvex2(), + Nonconvex3(), + Nonconvex4(), +] -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % (required_solvers,)) -@unittest.skipIf(not pyomo.contrib.mcpp.pyomo_mcpp.mcpp_available(), - 'MC++ is not available') +@unittest.skipIf( + not subsolvers_available, + 'Required subsolvers %s are not available' % (required_solvers,), +) +@unittest.skipIf( + not pyomo.contrib.mcpp.pyomo_mcpp.mcpp_available(), 'MC++ is not available' +) class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) def test_GOA(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='GOA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0]) + model = model.clone() + results = opt.solve( + model, + strategy='GOA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) - self.assertIn(results.solver.termination_condition, [ - TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=2) + value(model.objective.expr), model.optimal_value, places=2 + ) self.check_optimal_solution(model) def test_GOA_tabu_list(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='GOA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - use_tabu_list=True) + model = model.clone() + results = opt.solve( + model, + strategy='GOA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + use_tabu_list=True, + ) - self.assertIn(results.solver.termination_condition, [ - TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=2) + value(model.objective.expr), model.optimal_value, places=2 + ) self.check_optimal_solution(model) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py index 644cd3de12a..259cfe9dd7c 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_global_lp_nlp.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- """Tests for global LP/NLP in the MindtPy solver.""" import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.nonconvex1 import Nonconvex1 from pyomo.contrib.mindtpy.tests.nonconvex2 import Nonconvex2 from pyomo.contrib.mindtpy.tests.nonconvex3 import Nonconvex3 @@ -12,7 +11,7 @@ from pyomo.contrib.mcpp import pyomo_mcpp required_solvers = ('baron', 'cplex_persistent') -if not all(SolverFactory(s).available(False) for s in required_solvers): +if not all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = False elif not SolverFactory('baron').license_is_valid(): subsolvers_available = False @@ -20,55 +19,99 @@ subsolvers_available = True -model_list = [EightProcessFlowsheet(convex=False), - Nonconvex1(), - Nonconvex2(), - Nonconvex3(), - Nonconvex4(), - ] +model_list = [ + EightProcessFlowsheet(convex=False), + Nonconvex1(), + Nonconvex2(), + Nonconvex3(), + Nonconvex4(), +] -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % (required_solvers,)) -@unittest.skipIf(not pyomo_mcpp.mcpp_available(), - 'MC++ is not available') +@unittest.skipIf( + not subsolvers_available, + 'Required subsolvers %s are not available' % (required_solvers,), +) +@unittest.skipIf(not pyomo_mcpp.mcpp_available(), 'MC++ is not available') class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) def test_GOA(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='GOA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - single_tree=True) + model = model.clone() + results = opt.solve( + model, + strategy='GOA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + single_tree=True, + ) - self.assertIn(results.solver.termination_condition, [ - TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=2) + value(model.objective.expr), model.optimal_value, places=2 + ) self.check_optimal_solution(model) def test_GOA_tabu_list(self): """Test the global outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='GOA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - single_tree=True, - use_tabu_list=True) + model = model.clone() + results = opt.solve( + model, + strategy='GOA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + single_tree=True, + use_tabu_list=True, + ) - self.assertIn(results.solver.termination_condition, [ - TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=2) + value(model.objective.expr), model.optimal_value, places=2 + ) + self.check_optimal_solution(model) + + @unittest.skipUnless( + SolverFactory('gurobi_persistent').available(exception_flag=False) + and SolverFactory('gurobi_direct').available(), + 'gurobi_persistent and gurobi_direct solver is not available', + ) + def test_GOA_Gurobi(self): + """Test the global outer approximation decomposition algorithm.""" + with SolverFactory('mindtpy') as opt: + for model in model_list: + model = model.clone() + results = opt.solve( + model, + strategy='GOA', + mip_solver='gurobi_persistent', + nlp_solver=required_solvers[0], + single_tree=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) + self.assertAlmostEqual( + value(model.objective.expr), model.optimal_value, places=2 + ) self.check_optimal_solution(model) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py index 9bfed7a9d46..2662a0e6f56 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py @@ -12,10 +12,9 @@ """Tests for the MindtPy solver.""" import sys import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import ( - EightProcessFlowsheet, -) +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP +from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3 from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ( ConstraintQualificationExample, ) @@ -24,25 +23,34 @@ required_nlp_solvers = 'ipopt' required_mip_solvers = ['cplex_persistent', 'gurobi_persistent'] -available_mip_solvers = [s for s in required_mip_solvers - if SolverFactory(s).available(False)] +available_mip_solvers = [ + s for s in required_mip_solvers if SolverFactory(s).available(False) +] -if SolverFactory(required_nlp_solvers).available(False) and available_mip_solvers: +if ( + SolverFactory(required_nlp_solvers).available(exception_flag=False) + and available_mip_solvers +): subsolvers_available = True else: subsolvers_available = False -model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample(), - SimpleMINLP() - ] +model_list = [ + EightProcessFlowsheet(convex=True), + ConstraintQualificationExample(), + SimpleMINLP(), + SimpleMINLP3(), +] + def known_solver_failure(mip_solver, model): - if ( mip_solver == 'gurobi_persistent' - and model.name in {'DuranEx3', 'SimpleMINLP'} - and sys.platform.startswith('win') - and SolverFactory(mip_solver).version()[:3] == (9,5,0) ): + if ( + mip_solver == 'gurobi_persistent' + and model.name in {'DuranEx3', 'SimpleMINLP'} + and sys.platform.startswith('win') + and SolverFactory(mip_solver).version()[:3] == (9, 5, 0) + ): sys.stderr.write( f"Skipping sub-test {model.name} with {mip_solver} due to known " f"failure when running Gurobi 9.5.0 on Windows\n" @@ -50,175 +58,247 @@ def known_solver_failure(mip_solver, model): return True return False -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % ([required_nlp_solvers] + required_mip_solvers)) + +@unittest.skipIf( + not subsolvers_available, + 'Required subsolvers %s are not available' + % ([required_nlp_solvers] + required_mip_solvers), +) class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) - @unittest.skipUnless('cplex_persistent' in available_mip_solvers, - 'cplex_persistent solver is not available') + @unittest.skipUnless( + 'cplex_persistent' in available_mip_solvers, + 'cplex_persistent solver is not available', + ) def test_LPNLP_CPLEX(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver='cplex_persistent', - nlp_solver=required_nlp_solvers, - single_tree=True) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver='cplex_persistent', + nlp_solver=required_nlp_solvers, + single_tree=True, + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) - @unittest.skipUnless('gurobi_persistent' in available_mip_solvers, - 'gurobi_persistent solver is not available') - def test_LPNLP_GUROBI(self): + @unittest.skipUnless( + 'gurobi_persistent' in available_mip_solvers, + 'gurobi_persistent solver is not available', + ) + def test_LPNLP_Gurobi(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver='gurobi_persistent', - nlp_solver=required_nlp_solvers, - single_tree=True) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver='gurobi_persistent', + nlp_solver=required_nlp_solvers, + single_tree=True, + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_RLPNLP_L1(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_nlp_solvers, - single_tree=True, - add_regularization='level_L1') - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_nlp_solvers, + single_tree=True, + add_regularization='level_L1', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_RLPNLP_L2(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_nlp_solvers, - single_tree=True, - add_regularization='level_L2') - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_nlp_solvers, + single_tree=True, + add_regularization='level_L2', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_RLPNLP_Linf(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_nlp_solvers, - single_tree=True, - add_regularization='level_L_infinity') - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_nlp_solvers, + single_tree=True, + add_regularization='level_L_infinity', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_RLPNLP_grad_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_nlp_solvers, - single_tree=True, - add_regularization='grad_lag') - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_nlp_solvers, + single_tree=True, + add_regularization='grad_lag', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_RLPNLP_hess_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_nlp_solvers, - single_tree=True, - add_regularization='hess_lag') - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_nlp_solvers, + single_tree=True, + add_regularization='hess_lag', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_RLPNLP_hess_only_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_nlp_solvers, - single_tree=True, - add_regularization='hess_only_lag') - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_nlp_solvers, + single_tree=True, + add_regularization='hess_only_lag', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_RLPNLP_sqp_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: + model = model.clone() for mip_solver in available_mip_solvers: if known_solver_failure(mip_solver, model): continue - results = opt.solve(model, strategy='OA', - mip_solver=mip_solver, - nlp_solver=required_nlp_solvers, - single_tree=True, - add_regularization='sqp_lag') - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + results = opt.solve( + model, + strategy='OA', + mip_solver=mip_solver, + nlp_solver=required_nlp_solvers, + single_tree=True, + add_regularization='sqp_lag', + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py index fbebc3c7494..4c2ae4d1220 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_regularization.py @@ -1,191 +1,260 @@ # -*- coding: utf-8 -*- """Tests for the MindtPy solver.""" import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ( + ConstraintQualificationExample, +) from pyomo.environ import SolverFactory, value from pyomo.opt import TerminationCondition required_solvers = ('ipopt', 'cplex') # required_solvers = ('gams', 'gams') -if all(SolverFactory(s).available() for s in required_solvers): +if all(SolverFactory(s).available(exception_flag=False) for s in required_solvers): subsolvers_available = True else: subsolvers_available = False -model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample() - ] +model_list = [EightProcessFlowsheet(convex=True), ConstraintQualificationExample()] -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % (required_solvers,)) +@unittest.skipIf( + not subsolvers_available, + 'Required subsolvers %s are not available' % (required_solvers,), +) class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) def test_ROA_L1(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='level_L1') + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='level_L1', + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_L2(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='level_L2') + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='level_L2', + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_Linf(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='level_L_infinity') + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='level_L_infinity', + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_grad_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='grad_lag') + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='grad_lag', + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_hess_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='hess_lag') + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='hess_lag', + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_hess_only_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='hess_only_lag') + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='hess_only_lag', + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_sqp_lag(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='sqp_lag') + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='sqp_lag', + ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_sqp_lag_equality_relaxation(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='sqp_lag', - equality_relaxation=True, - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='sqp_lag', + equality_relaxation=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_sqp_lag_add_no_good_cuts(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='sqp_lag', - equality_relaxation=True, - add_no_good_cuts=True, - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='sqp_lag', + equality_relaxation=True, + add_no_good_cuts=True, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) def test_ROA_sqp_lag_level_coef(self): """Test the LP/NLP decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - add_regularization='sqp_lag', - equality_relaxation=True, - level_coef=0.4 - ) - - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + add_regularization='sqp_lag', + equality_relaxation=True, + level_coef=0.4, + ) + + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=1) + value(model.objective.expr), model.optimal_value, places=1 + ) self.check_optimal_solution(model) diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py index e38ab8f7770..e8ad85ad9bc 100644 --- a/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py +++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_solution_pool.py @@ -1,97 +1,129 @@ """Tests for solution pool in the MindtPy solver.""" from pyomo.core.expr.calculus.diff_with_sympy import differentiate_available import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet +from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2 -from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample +from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ( + ConstraintQualificationExample, +) from pyomo.environ import SolverFactory, value, maximize from pyomo.opt import TerminationCondition -model_list = [EightProcessFlowsheet(convex=True), - ConstraintQualificationExample(), - SimpleMINLP2(), - ] +model_list = [ + EightProcessFlowsheet(convex=True), + ConstraintQualificationExample(), + SimpleMINLP2(), +] try: import cplex + cplexpy_available = True except ImportError: cplexpy_available = False required_solvers = ('ipopt', 'cplex_persistent', 'gurobi_persistent') ipopt_available = SolverFactory('ipopt').available() -cplex_persistent_available = SolverFactory( - 'cplex_persistent').available(exception_flag=False) -gurobi_persistent_available = SolverFactory( - 'gurobi_persistent').available(exception_flag=False) +cplex_persistent_available = SolverFactory('cplex_persistent').available( + exception_flag=False +) +gurobi_persistent_available = SolverFactory('gurobi_persistent').available( + exception_flag=False +) -@unittest.skipIf(not differentiate_available, - 'Symbolic differentiation is not available') +@unittest.skipIf( + not differentiate_available, 'Symbolic differentiation is not available' +) class TestMindtPy(unittest.TestCase): """Tests for the MindtPy solver plugin.""" def check_optimal_solution(self, model, places=1): for var in model.optimal_solution: - self.assertAlmostEqual(var.value, model.optimal_solution[var], places=places) + self.assertAlmostEqual( + var.value, model.optimal_solution[var], places=places + ) - @unittest.skipIf(not(ipopt_available and cplex_persistent_available and cplexpy_available), - 'Required subsolvers are not available') + @unittest.skipIf( + not (ipopt_available and cplex_persistent_available and cplexpy_available), + 'Required subsolvers are not available', + ) def test_OA_solution_pool_cplex(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - solution_pool=True, - mip_solver=required_solvers[1], - nlp_solver=required_solvers[0], - ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + solution_pool=True, + mip_solver=required_solvers[1], + nlp_solver=required_solvers[0], + ) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=2) + value(model.objective.expr), model.optimal_value, places=2 + ) self.check_optimal_solution(model) - @unittest.skipIf(not(ipopt_available and gurobi_persistent_available), - 'Required subsolvers are not available') + @unittest.skipIf( + not (ipopt_available and gurobi_persistent_available), + 'Required subsolvers are not available', + ) def test_OA_solution_pool_gurobi(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - solution_pool=True, - mip_solver=required_solvers[2], - nlp_solver=required_solvers[0], - ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + solution_pool=True, + mip_solver=required_solvers[2], + nlp_solver=required_solvers[0], + ) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=2) + value(model.objective.expr), model.optimal_value, places=2 + ) self.check_optimal_solution(model) # the following tests are used to increase the code coverage - @unittest.skipIf(not(ipopt_available and cplex_persistent_available), - 'Required subsolvers are not available') + @unittest.skipIf( + not (ipopt_available and cplex_persistent_available), + 'Required subsolvers are not available', + ) def test_OA_solution_pool_coverage1(self): """Test the outer approximation decomposition algorithm.""" with SolverFactory('mindtpy') as opt: for model in model_list: - results = opt.solve(model, strategy='OA', - init_strategy='rNLP', - solution_pool=True, - mip_solver='glpk', - nlp_solver=required_solvers[0], - num_solution_iteration=1 - ) - self.assertIn(results.solver.termination_condition, - [TerminationCondition.optimal, TerminationCondition.feasible]) + model = model.clone() + results = opt.solve( + model, + strategy='OA', + init_strategy='rNLP', + solution_pool=True, + mip_solver='glpk', + nlp_solver=required_solvers[0], + num_solution_iteration=1, + ) + self.assertIn( + results.solver.termination_condition, + [TerminationCondition.optimal, TerminationCondition.feasible], + ) self.assertAlmostEqual( - value(model.objective.expr), model.optimal_value, places=2) + value(model.objective.expr), model.optimal_value, places=2 + ) self.check_optimal_solution(model) diff --git a/pyomo/contrib/mindtpy/tests/unit_test.py b/pyomo/contrib/mindtpy/tests/unit_test.py deleted file mode 100644 index 6bc97a7859b..00000000000 --- a/pyomo/contrib/mindtpy/tests/unit_test.py +++ /dev/null @@ -1,363 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -"""Unit tests for the MindtPy solver.""" -import pyomo.common.unittest as unittest -from pyomo.contrib.mindtpy.tests.eight_process_problem import \ - EightProcessFlowsheet -from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP -from pyomo.environ import SolverFactory, maximize -from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded -from pyomo.solvers.tests.models.QCP_simple import QCP_simple -from pyomo.contrib.mindtpy.config_options import _get_MindtPy_config, check_config -from pyomo.contrib.mindtpy.util import get_primal_integral, get_dual_integral, set_up_solve_data, add_feas_slacks, set_solver_options -from pyomo.contrib.mindtpy.nlp_solve import handle_subproblem_other_termination, handle_feasibility_subproblem_tc, solve_subproblem, handle_nlp_subproblem_tc -from pyomo.core.base import TransformationFactory -from pyomo.opt import TerminationCondition as tc -from pyomo.contrib.gdpopt.util import time_code -from pyomo.contrib.mindtpy.util import create_utility_block, process_objective, setup_results_object -from pyomo.contrib.mindtpy.initialization import MindtPy_initialize_main, init_rNLP -from pyomo.contrib.mindtpy.feasibility_pump import generate_norm_constraint, handle_fp_main_tc -from pyomo.core import Block, ConstraintList -from pyomo.contrib.mindtpy.mip_solve import solve_main, handle_main_other_conditions -from pyomo.opt import SolutionStatus, SolverStatus -from pyomo.core import (Constraint, Objective, - TransformationFactory, minimize, Var, RangeSet, NonNegativeReals) -from pyomo.contrib.mindtpy.iterate import algorithm_should_terminate - -nonconvex_model_list = [EightProcessFlowsheet(convex=False)] - -LP_model = LP_unbounded() -LP_model._generate_model() - -QCP_model = QCP_simple() -QCP_model._generate_model() -extreme_model_list = [LP_model.model, QCP_model.model] - -required_solvers = ('ipopt', 'glpk') -# required_solvers = ('gams', 'gams') -if all(SolverFactory(s).available() for s in required_solvers): - subsolvers_available = True -else: - subsolvers_available = False - - -@unittest.skipIf(not subsolvers_available, - 'Required subsolvers %s are not available' - % (required_solvers,)) -class TestMindtPy(unittest.TestCase): - """Tests for the MindtPy solver plugin.""" - - def test_handle_termination_condition(self): - """Test the outer approximation decomposition algorithm.""" - model = SimpleMINLP() - config = _get_MindtPy_config() - solve_data = set_up_solve_data(model, config) - with time_code(solve_data.timing, 'total', is_main_timer=True), \ - create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data): - - MindtPy = solve_data.working_model.MindtPy_utils - - MindtPy = solve_data.working_model.MindtPy_utils - setup_results_object(solve_data, config) - process_objective(solve_data, config, - move_objective=(config.init_strategy == 'FP' - or config.add_regularization is not None), - use_mcpp=config.use_mcpp, - update_var_con_list=config.add_regularization is None - ) - feas = MindtPy.feas_opt = Block() - feas.deactivate() - feas.feas_constraints = ConstraintList( - doc='Feasibility Problem Constraints') - - lin = MindtPy.cuts = Block() - lin.deactivate() - - if config.feasibility_norm == 'L1' or config.feasibility_norm == 'L2': - feas.nl_constraint_set = RangeSet(len(MindtPy.nonlinear_constraint_list), - doc='Integer index set over the nonlinear constraints.') - # Create slack variables for feasibility problem - feas.slack_var = Var(feas.nl_constraint_set, - domain=NonNegativeReals, initialize=1) - else: - feas.slack_var = Var(domain=NonNegativeReals, initialize=1) - - # no-good cuts exclude particular discrete decisions - lin.no_good_cuts = ConstraintList(doc='no-good cuts') - - fixed_nlp = solve_data.working_model.clone() - TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp) - - MindtPy_initialize_main(solve_data, config) - - # test handle_subproblem_other_termination - termination_condition = tc.maxIterations - config.add_no_good_cuts = True - handle_subproblem_other_termination(fixed_nlp, termination_condition, - solve_data, config) - self.assertEqual( - len(solve_data.mip.MindtPy_utils.cuts.no_good_cuts), 1) - - # test handle_main_other_conditions - main_mip, main_mip_results = solve_main(solve_data, config) - main_mip_results.solver.termination_condition = tc.infeasible - handle_main_other_conditions( - solve_data.mip, main_mip_results, solve_data, config) - self.assertIs( - solve_data.results.solver.termination_condition, tc.feasible) - - main_mip_results.solver.termination_condition = tc.unbounded - handle_main_other_conditions( - solve_data.mip, main_mip_results, solve_data, config) - self.assertIn(main_mip.MindtPy_utils.objective_bound, - main_mip.component_data_objects(ctype=Constraint)) - - main_mip.MindtPy_utils.del_component('objective_bound') - main_mip_results.solver.termination_condition = tc.infeasibleOrUnbounded - handle_main_other_conditions( - solve_data.mip, main_mip_results, solve_data, config) - self.assertIn(main_mip.MindtPy_utils.objective_bound, - main_mip.component_data_objects(ctype=Constraint)) - - main_mip_results.solver.termination_condition = tc.maxTimeLimit - handle_main_other_conditions( - solve_data.mip, main_mip_results, solve_data, config) - self.assertIs( - solve_data.results.solver.termination_condition, tc.maxTimeLimit) - - main_mip_results.solver.termination_condition = tc.other - main_mip_results.solution.status = SolutionStatus.feasible - handle_main_other_conditions( - solve_data.mip, main_mip_results, solve_data, config) - for v1, v2 in zip(main_mip.MindtPy_utils.variable_list, solve_data.working_model.MindtPy_utils.variable_list): - self.assertEqual(v1.value, v2.value) - - # test handle_feasibility_subproblem_tc - feas_subproblem = solve_data.working_model.clone() - add_feas_slacks(feas_subproblem, config) - MindtPy = feas_subproblem.MindtPy_utils - MindtPy.feas_opt.activate() - if config.feasibility_norm == 'L1': - MindtPy.feas_obj = Objective( - expr=sum(s for s in MindtPy.feas_opt.slack_var[...]), - sense=minimize) - elif config.feasibility_norm == 'L2': - MindtPy.feas_obj = Objective( - expr=sum(s*s for s in MindtPy.feas_opt.slack_var[...]), - sense=minimize) - else: - MindtPy.feas_obj = Objective( - expr=MindtPy.feas_opt.slack_var, - sense=minimize) - - handle_feasibility_subproblem_tc( - tc.optimal, MindtPy, solve_data, config) - handle_feasibility_subproblem_tc( - tc.infeasible, MindtPy, solve_data, config) - self.assertIs(solve_data.should_terminate, True) - self.assertIs(solve_data.results.solver.status, SolverStatus.error) - - solve_data.should_terminate = False - solve_data.results.solver.status = None - handle_feasibility_subproblem_tc( - tc.maxIterations, MindtPy, solve_data, config) - self.assertIs(solve_data.should_terminate, True) - self.assertIs(solve_data.results.solver.status, SolverStatus.error) - - solve_data.should_terminate = False - solve_data.results.solver.status = None - handle_feasibility_subproblem_tc( - tc.solverFailure, MindtPy, solve_data, config) - self.assertIs(solve_data.should_terminate, True) - self.assertIs(solve_data.results.solver.status, SolverStatus.error) - - # test NLP subproblem infeasible - solve_data.working_model.Y[1].value = 0 - solve_data.working_model.Y[2].value = 0 - solve_data.working_model.Y[3].value = 0 - fixed_nlp, fixed_nlp_results = solve_subproblem(solve_data, config) - solve_data.working_model.Y[1].value = None - solve_data.working_model.Y[2].value = None - solve_data.working_model.Y[3].value = None - - # test handle_nlp_subproblem_tc - fixed_nlp_results.solver.termination_condition = tc.maxTimeLimit - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_results, solve_data, config) - self.assertIs(solve_data.should_terminate, True) - self.assertIs( - solve_data.results.solver.termination_condition, tc.maxTimeLimit) - - fixed_nlp_results.solver.termination_condition = tc.maxEvaluations - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_results, solve_data, config) - self.assertIs(solve_data.should_terminate, True) - self.assertIs( - solve_data.results.solver.termination_condition, tc.maxEvaluations) - - fixed_nlp_results.solver.termination_condition = tc.maxIterations - handle_nlp_subproblem_tc( - fixed_nlp, fixed_nlp_results, solve_data, config) - self.assertIs(solve_data.should_terminate, True) - self.assertIs( - solve_data.results.solver.termination_condition, tc.maxEvaluations) - - # test handle_fp_main_tc - config.init_strategy = 'FP' - solve_data.fp_iter = 1 - init_rNLP(solve_data, config) - feas_main, feas_main_results = solve_main( - solve_data, config, fp=True) - feas_main_results.solver.termination_condition = tc.optimal - fp_should_terminate = handle_fp_main_tc( - feas_main_results, solve_data, config) - self.assertIs(fp_should_terminate, False) - - feas_main_results.solver.termination_condition = tc.maxTimeLimit - fp_should_terminate = handle_fp_main_tc( - feas_main_results, solve_data, config) - self.assertIs(fp_should_terminate, True) - self.assertIs( - solve_data.results.solver.termination_condition, tc.maxTimeLimit) - - feas_main_results.solver.termination_condition = tc.infeasible - fp_should_terminate = handle_fp_main_tc( - feas_main_results, solve_data, config) - self.assertIs(fp_should_terminate, True) - - feas_main_results.solver.termination_condition = tc.unbounded - fp_should_terminate = handle_fp_main_tc( - feas_main_results, solve_data, config) - self.assertIs(fp_should_terminate, True) - - feas_main_results.solver.termination_condition = tc.other - feas_main_results.solution.status = SolutionStatus.feasible - fp_should_terminate = handle_fp_main_tc( - feas_main_results, solve_data, config) - self.assertIs(fp_should_terminate, False) - - feas_main_results.solver.termination_condition = tc.solverFailure - fp_should_terminate = handle_fp_main_tc( - feas_main_results, solve_data, config) - self.assertIs(fp_should_terminate, True) - - # test generate_norm_constraint - fp_nlp = solve_data.working_model.clone() - config.fp_main_norm = 'L1' - generate_norm_constraint(fp_nlp, solve_data, config) - self.assertIsNotNone(fp_nlp.MindtPy_utils.find_component( - 'L1_norm_constraint')) - - config.fp_main_norm = 'L2' - generate_norm_constraint(fp_nlp, solve_data, config) - self.assertIsNotNone(fp_nlp.find_component('norm_constraint')) - - fp_nlp.del_component('norm_constraint') - config.fp_main_norm = 'L_infinity' - generate_norm_constraint(fp_nlp, solve_data, config) - self.assertIsNotNone(fp_nlp.find_component('norm_constraint')) - - # test set_solver_options - config.mip_solver = 'gams' - config.threads = 1 - opt = SolverFactory(config.mip_solver) - set_solver_options(opt, solve_data, config, - 'mip', regularization=False) - - config.mip_solver = 'gurobi' - config.mip_regularization_solver = 'gurobi' - config.regularization_mip_threads = 1 - opt = SolverFactory(config.mip_solver) - set_solver_options(opt, solve_data, config, - 'mip', regularization=True) - - config.nlp_solver = 'gams' - config.nlp_solver_args['solver'] = 'ipopt' - set_solver_options(opt, solve_data, config, - 'nlp', regularization=False) - - config.nlp_solver_args['solver'] = 'ipopth' - set_solver_options(opt, solve_data, config, - 'nlp', regularization=False) - - config.nlp_solver_args['solver'] = 'conopt' - set_solver_options(opt, solve_data, config, - 'nlp', regularization=False) - - config.nlp_solver_args['solver'] = 'msnlp' - set_solver_options(opt, solve_data, config, - 'nlp', regularization=False) - - config.nlp_solver_args['solver'] = 'baron' - set_solver_options(opt, solve_data, config, - 'nlp', regularization=False) - - # test algorithm_should_terminate - solve_data.should_terminate = True - solve_data.primal_bound = float('inf') - self.assertIs(algorithm_should_terminate( - solve_data, config, check_cycling=False), True) - self.assertIs( - solve_data.results.solver.termination_condition, tc.noSolution) - - solve_data.primal_bound = 100 - self.assertIs(algorithm_should_terminate( - solve_data, config, check_cycling=False), True) - self.assertIs( - solve_data.results.solver.termination_condition, tc.feasible) - - solve_data.primal_bound_progress = [float('inf'), 5, 4, 3, 2, 1] - solve_data.primal_bound_progress_time = [1, 2, 3, 4, 5, 6] - solve_data.primal_bound = 1 - self.assertEqual(get_primal_integral(solve_data, config), 14.5) - - solve_data.dual_bound_progress = [float('-inf'), 1, 2, 3, 4, 5] - solve_data.dual_bound_progress_time = [1, 2, 3, 4, 5, 6] - solve_data.dual_bound = 5 - self.assertEqual(get_dual_integral(solve_data, config), 14.1) - - # test check_config - config.add_regularization = 'level_L1' - config.regularization_mip_threads = 0 - config.threads = 8 - check_config(config) - self.assertEqual(config.regularization_mip_threads, 8) - - config.strategy = 'GOA' - config.add_slack = True - config.use_mcpp = False - config.equality_relaxation = True - config.use_fbbt = False - config.add_no_good_cuts = False - config.use_tabu_list = False - check_config(config) - self.assertTrue(config.use_mcpp) - self.assertTrue(config.use_fbbt) - self.assertFalse(config.add_slack) - self.assertFalse(config.equality_relaxation) - self.assertTrue(config.add_no_good_cuts) - self.assertFalse(config.use_tabu_list) - - config.single_tree = False - config.strategy = 'FP' - config.init_strategy = 'rNLP' - config.iteration_limit = 100 - config.add_no_good_cuts = False - config.use_tabu_list = True - check_config(config) - self.assertEqual(config.init_strategy, 'FP') - self.assertEqual(config.iteration_limit, 0) - self.assertEqual(config.add_no_good_cuts, True) - self.assertEqual(config.use_tabu_list, False) - -if __name__ == '__main__': - unittest.main() diff --git a/pyomo/contrib/mindtpy/util.py b/pyomo/contrib/mindtpy/util.py index 63ec470f0e4..e336715cc8f 100644 --- a/pyomo/contrib/mindtpy/util.py +++ b/pyomo/contrib/mindtpy/util.py @@ -10,25 +10,28 @@ # ___________________________________________________________________________ """Utility functions and classes for the MindtPy solver.""" -from __future__ import division -from contextlib import contextmanager import logging -from pyomo.common.collections import ComponentMap, Bunch, ComponentSet -from pyomo.core import (Block, Constraint, VarList, - Objective, Reals, Suffix, Var, minimize, RangeSet, ConstraintList, TransformationFactory) -from pyomo.gdp import Disjunct, Disjunction +from pyomo.common.collections import ComponentMap +from pyomo.core import ( + Block, + Constraint, + VarList, + Objective, + Reals, + Var, + minimize, + RangeSet, + ConstraintList, + TransformationFactory, +) from pyomo.repn import generate_standard_repn from pyomo.contrib.mcpp.pyomo_mcpp import mcpp_available, McCormick from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from pyomo.core.expr import differentiate -from pyomo.core.expr import current as EXPR -from pyomo.opt import SolverFactory, SolverResults, ProblemSense -from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver +import pyomo.core.expr as EXPR +from pyomo.opt import ProblemSense from pyomo.contrib.gdpopt.util import get_main_elapsed_time, time_code from pyomo.util.model_size import build_model_size_report -from pyomo.core.expr.calculus.derivatives import differentiate from pyomo.common.dependencies import attempt_import -from pyomo.contrib.fbbt.fbbt import fbbt from pyomo.solvers.plugins.solvers.gurobi_direct import gurobipy from pyomo.solvers.plugins.solvers.gurobi_persistent import GurobiPersistent import math @@ -37,102 +40,38 @@ numpy = attempt_import('numpy')[0] -class MindtPySolveData(object): - """Data container to hold solve-instance data. - """ - pass - - -def model_is_valid(solve_data, config): - """Determines whether the model is solvable by MindtPy. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - bool - True if model is solvable in MindtPy, False otherwise. - """ - m = solve_data.working_model - MindtPy = m.MindtPy_utils - - # Handle LP/NLP being passed to the solver - prob = solve_data.results.problem - if len(MindtPy.discrete_variable_list) == 0: - config.logger.info('Problem has no discrete decisions.') - obj = next(m.component_data_objects(ctype=Objective, active=True)) - if (any(c.body.polynomial_degree() not in solve_data.mip_constraint_polynomial_degree for c in MindtPy.constraint_list) or - obj.expr.polynomial_degree() not in solve_data.mip_objective_polynomial_degree): - config.logger.info( - 'Your model is a NLP (nonlinear program). ' - 'Using NLP solver %s to solve.' % config.nlp_solver) - nlpopt = SolverFactory(config.nlp_solver) - set_solver_options(nlpopt, solve_data, config, solver_type='nlp') - nlpopt.solve(solve_data.original_model, - tee=config.nlp_solver_tee, **config.nlp_solver_args) - return False - else: - config.logger.info( - 'Your model is an LP (linear program). ' - 'Using LP solver %s to solve.' % config.mip_solver) - mainopt = SolverFactory(config.mip_solver) - if isinstance(mainopt, PersistentSolver): - mainopt.set_instance(solve_data.original_model) - set_solver_options(mainopt, solve_data, - config, solver_type='mip') - results = mainopt.solve(solve_data.original_model, - tee=config.mip_solver_tee, - load_solutions=False, - **config.mip_solver_args - ) - if len(results.solution) > 0: - solve_data.original_model.solutions.load_from(results) - return False - - if not hasattr(m, 'dual') and config.calculate_dual_at_solution: # Set up dual value reporting - m.dual = Suffix(direction=Suffix.IMPORT) - - # TODO if any continuous variables are multiplied with binary ones, - # need to do some kind of transformation (Glover?) or throw an error message - return True - - -def calc_jacobians(solve_data, config): +def calc_jacobians(model, config): """Generates a map of jacobians for the variables in the model. This function generates a map of jacobians corresponding to the variables in the - model and adds this ComponentMap to solve_data. + model. Parameters ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. + model : Pyomo model + Target model to calculate jacobian. config : ConfigBlock The specific configurations for MindtPy. """ # Map nonlinear_constraint --> Map( - # variable --> jacobian of constraint wrt. variable) - solve_data.jacobians = ComponentMap() + # variable --> jacobian of constraint w.r.t. variable) + jacobians = ComponentMap() if config.differentiate_mode == 'reverse_symbolic': - mode = differentiate.Modes.reverse_symbolic + mode = EXPR.differentiate.Modes.reverse_symbolic elif config.differentiate_mode == 'sympy': - mode = differentiate.Modes.sympy - for c in solve_data.mip.MindtPy_utils.nonlinear_constraint_list: + mode = EXPR.differentiate.Modes.sympy + for c in model.MindtPy_utils.nonlinear_constraint_list: vars_in_constr = list(EXPR.identify_variables(c.body)) - jac_list = differentiate( - c.body, wrt_list=vars_in_constr, mode=mode) - solve_data.jacobians[c] = ComponentMap( - (var, jac_wrt_var) - for var, jac_wrt_var in zip(vars_in_constr, jac_list)) + jac_list = EXPR.differentiate(c.body, wrt_list=vars_in_constr, mode=mode) + jacobians[c] = ComponentMap( + (var, jac_wrt_var) for var, jac_wrt_var in zip(vars_in_constr, jac_list) + ) + return jacobians -def add_feas_slacks(m, config): +def initialize_feas_subproblem(m, config): """Adds feasibility slack variables according to config.feasibility_norm (given an infeasible problem). + Defines the objective function of the feasibility subproblem. Parameters ---------- @@ -147,38 +86,49 @@ def add_feas_slacks(m, config): if constr.has_ub(): if config.feasibility_norm in {'L1', 'L2'}: MindtPy.feas_opt.feas_constraints.add( - constr.body - constr.upper - <= MindtPy.feas_opt.slack_var[i]) + constr.body - constr.upper <= MindtPy.feas_opt.slack_var[i] + ) else: MindtPy.feas_opt.feas_constraints.add( - constr.body - constr.upper - <= MindtPy.feas_opt.slack_var) + constr.body - constr.upper <= MindtPy.feas_opt.slack_var + ) if constr.has_lb(): if config.feasibility_norm in {'L1', 'L2'}: MindtPy.feas_opt.feas_constraints.add( - constr.body - constr.lower - >= -MindtPy.feas_opt.slack_var[i]) + constr.body - constr.lower >= -MindtPy.feas_opt.slack_var[i] + ) else: MindtPy.feas_opt.feas_constraints.add( - constr.body - constr.lower - >= -MindtPy.feas_opt.slack_var) + constr.body - constr.lower >= -MindtPy.feas_opt.slack_var + ) + # Setup objective function for the feasibility subproblem. + if config.feasibility_norm == 'L1': + MindtPy.feas_obj = Objective( + expr=sum(s for s in MindtPy.feas_opt.slack_var.values()), sense=minimize + ) + elif config.feasibility_norm == 'L2': + MindtPy.feas_obj = Objective( + expr=sum(s * s for s in MindtPy.feas_opt.slack_var.values()), sense=minimize + ) + else: + MindtPy.feas_obj = Objective(expr=MindtPy.feas_opt.slack_var, sense=minimize) + MindtPy.feas_obj.deactivate() -def add_var_bound(solve_data, config): +def add_var_bound(model, config): """This function will add bounds for variables in nonlinear constraints if they are not bounded. - This is to avoid an unbounded main problem in the LP/NLP algorithm. Thus, the model will be + This is to avoid an unbounded main problem in the LP/NLP algorithm. Thus, the model will be updated to include bounds for the unbounded variables in nonlinear constraints. Parameters ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. + model : PyomoModel + Target model to add bound for its variables. config : ConfigBlock The specific configurations for MindtPy. """ - m = solve_data.working_model - MindtPy = m.MindtPy_utils + MindtPy = model.MindtPy_utils for c in MindtPy.nonlinear_constraint_list: for var in EXPR.identify_variables(c.body): if var.has_lb() and var.has_ub(): @@ -196,7 +146,8 @@ def add_var_bound(solve_data, config): def generate_norm2sq_objective_function(model, setpoint_model, discrete_only=False): - r"""This function generates objective (FP-NLP subproblem) for minimum euclidean distance to setpoint_model. + r"""This function generates objective (FP-NLP subproblem) for minimum + euclidean distance to setpoint_model. L2 distance of (x,y) = \sqrt{\sum_i (x_i - y_i)^2}. @@ -207,7 +158,8 @@ def generate_norm2sq_objective_function(model, setpoint_model, discrete_only=Fal setpoint_model : Pyomo model The model that provides the base point for us to calculate the distance. discrete_only : bool, optional - Whether to only optimize on distance between the discrete variables, by default False. + Whether to only optimize on distance between the discrete + variables, by default False. Returns ------- @@ -215,24 +167,43 @@ def generate_norm2sq_objective_function(model, setpoint_model, discrete_only=Fal The norm2 square objective function. """ # skip objective_value variable and slack_var variables - var_filter = (lambda v: v[1].is_integer()) if discrete_only \ - else (lambda v: 'MindtPy_utils.objective_value' not in v[1].name and - 'MindtPy_utils.feas_opt.slack_var' not in v[1].name) - - model_vars, setpoint_vars = zip(*filter(var_filter, - zip(model.MindtPy_utils.variable_list, - setpoint_model.MindtPy_utils.variable_list))) + var_filter = ( + (lambda v: v[1].is_integer()) + if discrete_only + else ( + lambda v: 'MindtPy_utils.objective_value' not in v[1].name + and 'MindtPy_utils.feas_opt.slack_var' not in v[1].name + ) + ) + + model_vars, setpoint_vars = zip( + *filter( + var_filter, + zip( + model.MindtPy_utils.variable_list, + setpoint_model.MindtPy_utils.variable_list, + ), + ) + ) assert len(model_vars) == len( - setpoint_vars), 'Trying to generate Squared Norm2 objective function for models with different number of variables' + setpoint_vars + ), 'Trying to generate Squared Norm2 objective function for models with different number of variables' - return Objective(expr=( - sum([(model_var - setpoint_var.value)**2 - for (model_var, setpoint_var) in - zip(model_vars, setpoint_vars)]))) + return Objective( + expr=( + sum( + [ + (model_var - setpoint_var.value) ** 2 + for (model_var, setpoint_var) in zip(model_vars, setpoint_vars) + ] + ) + ) + ) def generate_norm1_objective_function(model, setpoint_model, discrete_only=False): - r"""This function generates objective (PF-OA main problem) for minimum Norm1 distance to setpoint_model. + r"""This function generates objective (PF-OA main problem) for minimum + Norm1 distance to setpoint_model. Norm1 distance of (x,y) = \sum_i |x_i - y_i|. @@ -243,36 +214,47 @@ def generate_norm1_objective_function(model, setpoint_model, discrete_only=False setpoint_model : Pyomo model The model that provides the base point for us to calculate the distance. discrete_only : bool, optional - Whether to only optimize on distance between the discrete variables, by default False. + Whether to only optimize on distance between the discrete + variables, by default False. Returns ------- Objective The norm1 objective function. + """ # skip objective_value variable and slack_var variables - var_filter = (lambda v: v.is_integer()) if discrete_only \ - else (lambda v: 'MindtPy_utils.objective_value' not in v.name and - 'MindtPy_utils.feas_opt.slack_var' not in v.name) + var_filter = ( + (lambda v: v.is_integer()) + if discrete_only + else ( + lambda v: 'MindtPy_utils.objective_value' not in v.name + and 'MindtPy_utils.feas_opt.slack_var' not in v.name + ) + ) model_vars = list(filter(var_filter, model.MindtPy_utils.variable_list)) - setpoint_vars = list( - filter(var_filter, setpoint_model.MindtPy_utils.variable_list)) + setpoint_vars = list(filter(var_filter, setpoint_model.MindtPy_utils.variable_list)) assert len(model_vars) == len( - setpoint_vars), 'Trying to generate Norm1 objective function for models with different number of variables' + setpoint_vars + ), 'Trying to generate Norm1 objective function for models with different number of variables' model.MindtPy_utils.del_component('L1_obj') - obj_blk = model.MindtPy_utils.L1_obj = Block() - obj_blk.L1_obj_idx = RangeSet(len(model_vars)) - obj_blk.L1_obj_var = Var( - obj_blk.L1_obj_idx, domain=Reals, bounds=(0, None)) - obj_blk.abs_reform = ConstraintList() - for idx, v_model, v_setpoint in zip(obj_blk.L1_obj_idx, model_vars, - setpoint_vars): - obj_blk.abs_reform.add( - expr=v_model - v_setpoint.value >= -obj_blk.L1_obj_var[idx]) - obj_blk.abs_reform.add( - expr=v_model - v_setpoint.value <= obj_blk.L1_obj_var[idx]) - - return Objective(expr=sum(obj_blk.L1_obj_var[idx] for idx in obj_blk.L1_obj_idx)) + obj_block = model.MindtPy_utils.L1_obj = Block() + obj_block.L1_obj_idx = RangeSet(len(model_vars)) + obj_block.L1_obj_var = Var(obj_block.L1_obj_idx, domain=Reals, bounds=(0, None)) + obj_block.abs_reform = ConstraintList() + for idx, v_model, v_setpoint in zip( + obj_block.L1_obj_idx, model_vars, setpoint_vars + ): + obj_block.abs_reform.add( + expr=v_model - v_setpoint.value >= -obj_block.L1_obj_var[idx] + ) + obj_block.abs_reform.add( + expr=v_model - v_setpoint.value <= obj_block.L1_obj_var[idx] + ) + + return Objective( + expr=sum(obj_block.L1_obj_var[idx] for idx in obj_block.L1_obj_idx) + ) def generate_norm_inf_objective_function(model, setpoint_model, discrete_only=False): @@ -295,29 +277,37 @@ def generate_norm_inf_objective_function(model, setpoint_model, discrete_only=Fa The norm infinity objective function. """ # skip objective_value variable and slack_var variables - var_filter = (lambda v: v.is_integer()) if discrete_only \ - else (lambda v: 'MindtPy_utils.objective_value' not in v.name and - 'MindtPy_utils.feas_opt.slack_var' not in v.name) + var_filter = ( + (lambda v: v.is_integer()) + if discrete_only + else ( + lambda v: 'MindtPy_utils.objective_value' not in v.name + and 'MindtPy_utils.feas_opt.slack_var' not in v.name + ) + ) model_vars = list(filter(var_filter, model.MindtPy_utils.variable_list)) - setpoint_vars = list( - filter(var_filter, setpoint_model.MindtPy_utils.variable_list)) + setpoint_vars = list(filter(var_filter, setpoint_model.MindtPy_utils.variable_list)) assert len(model_vars) == len( - setpoint_vars), 'Trying to generate Norm Infinity objective function for models with different number of variables' + setpoint_vars + ), 'Trying to generate Norm Infinity objective function for models with different number of variables' model.MindtPy_utils.del_component('L_infinity_obj') - obj_blk = model.MindtPy_utils.L_infinity_obj = Block() - obj_blk.L_infinity_obj_var = Var(domain=Reals, bounds=(0, None)) - obj_blk.abs_reform = ConstraintList() - for v_model, v_setpoint in zip(model_vars, - setpoint_vars): - obj_blk.abs_reform.add( - expr=v_model - v_setpoint.value >= -obj_blk.L_infinity_obj_var) - obj_blk.abs_reform.add( - expr=v_model - v_setpoint.value <= obj_blk.L_infinity_obj_var) - - return Objective(expr=obj_blk.L_infinity_obj_var) - - -def generate_lag_objective_function(model, setpoint_model, config, solve_data, discrete_only=False): + obj_block = model.MindtPy_utils.L_infinity_obj = Block() + obj_block.L_infinity_obj_var = Var(domain=Reals, bounds=(0, None)) + obj_block.abs_reform = ConstraintList() + for v_model, v_setpoint in zip(model_vars, setpoint_vars): + obj_block.abs_reform.add( + expr=v_model - v_setpoint.value >= -obj_block.L_infinity_obj_var + ) + obj_block.abs_reform.add( + expr=v_model - v_setpoint.value <= obj_block.L_infinity_obj_var + ) + + return Objective(expr=obj_block.L_infinity_obj_var) + + +def generate_lag_objective_function( + model, setpoint_model, config, timing, discrete_only=False +): """The function generates the second-order Taylor approximation of the Lagrangean. Parameters @@ -328,15 +318,15 @@ def generate_lag_objective_function(model, setpoint_model, config, solve_data, d The model that provides the base point for us to calculate the distance. config : ConfigBlock The specific configurations for MindtPy. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + timing : Timing + Timing discrete_only : bool, optional Whether to only optimize on distance between the discrete variables, by default False. Returns ------- Objective - The taylor extension(1st order or 2nd order) of the Lagrangean function. + The taylor extension(1st order or 2nd order) of the Lagrangean function. """ temp_model = setpoint_model.clone() for var in temp_model.MindtPy_utils.variable_list: @@ -351,10 +341,14 @@ def generate_lag_objective_function(model, setpoint_model, config, solve_data, d # Implementation 1 # First calculate Jacobian and Hessian without assigning variable and constraint sequence, then use get_primal_indices to get the indices. - with time_code(solve_data.timing, 'PyomoNLP'): + with time_code(timing, 'PyomoNLP'): nlp = pyomo_nlp.PyomoNLP(temp_model) - lam = [-temp_model.dual[constr] if abs(temp_model.dual[constr]) > config.zero_tolerance else 0 - for constr in nlp.get_pyomo_constraints()] + lam = [ + -temp_model.dual[constr] + if abs(temp_model.dual[constr]) > config.zero_tolerance + else 0 + for constr in nlp.get_pyomo_constraints() + ] nlp.set_duals(lam) obj_grad = nlp.evaluate_grad_objective().reshape(-1, 1) jac = nlp.evaluate_jacobian().toarray() @@ -365,8 +359,14 @@ def generate_lag_objective_function(model, setpoint_model, config, solve_data, d if 'MindtPy_utils.objective_value' not in var.name: jac_lag[nlp.get_primal_indices([var])[0]] = 0 nlp_var = set([i.name for i in nlp.get_pyomo_variables()]) - first_order_term = sum(float(jac_lag[nlp.get_primal_indices([temp_var])[0]]) * (var - temp_var.value) for var, - temp_var in zip(model.MindtPy_utils.variable_list, temp_model.MindtPy_utils.variable_list) if temp_var.name in nlp_var) + first_order_term = sum( + jac_lag[nlp.get_primal_indices([temp_var])[0]][0] * (var - temp_var.value) + for var, temp_var in zip( + model.MindtPy_utils.variable_list, + temp_model.MindtPy_utils.variable_list, + ) + if temp_var.name in nlp_var + ) if config.add_regularization == 'grad_lag': return Objective(expr=first_order_term, sense=minimize) @@ -374,41 +374,80 @@ def generate_lag_objective_function(model, setpoint_model, config, solve_data, d # Implementation 1 hess_lag = nlp.evaluate_hessian_lag().toarray() hess_lag[abs(hess_lag) < config.zero_tolerance] = 0 - second_order_term = 0.5 * sum((var_i - temp_var_i.value) * float(hess_lag[nlp.get_primal_indices([temp_var_i])[0]][nlp.get_primal_indices([temp_var_j])[0]]) * (var_j - temp_var_j.value) - for var_i, temp_var_i in zip(model.MindtPy_utils.variable_list, temp_model.MindtPy_utils.variable_list) - for var_j, temp_var_j in zip(model.MindtPy_utils.variable_list, temp_model.MindtPy_utils.variable_list) - if (temp_var_i.name in nlp_var and temp_var_j.name in nlp_var)) + second_order_term = 0.5 * sum( + (var_i - temp_var_i.value) + * float( + hess_lag[nlp.get_primal_indices([temp_var_i])[0]][ + nlp.get_primal_indices([temp_var_j])[0] + ] + ) + * (var_j - temp_var_j.value) + for var_i, temp_var_i in zip( + model.MindtPy_utils.variable_list, + temp_model.MindtPy_utils.variable_list, + ) + for var_j, temp_var_j in zip( + model.MindtPy_utils.variable_list, + temp_model.MindtPy_utils.variable_list, + ) + if (temp_var_i.name in nlp_var and temp_var_j.name in nlp_var) + ) if config.add_regularization == 'hess_lag': - return Objective(expr=first_order_term + second_order_term, sense=minimize) + return Objective( + expr=first_order_term + second_order_term, sense=minimize + ) elif config.add_regularization == 'hess_only_lag': return Objective(expr=second_order_term, sense=minimize) elif config.add_regularization == 'sqp_lag': - var_filter = (lambda v: v[1].is_integer()) if discrete_only \ - else (lambda v: 'MindtPy_utils.objective_value' not in v[1].name and - 'MindtPy_utils.feas_opt.slack_var' not in v[1].name) - - model_vars, setpoint_vars = zip(*filter(var_filter, - zip(model.MindtPy_utils.variable_list, - setpoint_model.MindtPy_utils.variable_list))) + var_filter = ( + (lambda v: v[1].is_integer()) + if discrete_only + else ( + lambda v: 'MindtPy_utils.objective_value' not in v[1].name + and 'MindtPy_utils.feas_opt.slack_var' not in v[1].name + ) + ) + + model_vars, setpoint_vars = zip( + *filter( + var_filter, + zip( + model.MindtPy_utils.variable_list, + setpoint_model.MindtPy_utils.variable_list, + ), + ) + ) assert len(model_vars) == len( - setpoint_vars), 'Trying to generate Squared Norm2 objective function for models with different number of variables' + setpoint_vars + ), 'Trying to generate Squared Norm2 objective function for models with different number of variables' if config.sqp_lag_scaling_coef is None: rho = 1 elif config.sqp_lag_scaling_coef == 'fixed': r = 1 - rho = numpy.linalg.norm(jac_lag/(2*r)) + rho = numpy.linalg.norm(jac_lag / (2 * r)) elif config.sqp_lag_scaling_coef == 'variable_dependent': - r = numpy.sqrt( - len(temp_model.MindtPy_utils.discrete_variable_list)) - rho = numpy.linalg.norm(jac_lag/(2*r)) - - return Objective(expr=first_order_term + rho*sum([(model_var - setpoint_var.value)**2 for (model_var, setpoint_var) in zip(model_vars, setpoint_vars)])) + r = numpy.sqrt(len(temp_model.MindtPy_utils.discrete_variable_list)) + rho = numpy.linalg.norm(jac_lag / (2 * r)) + + return Objective( + expr=first_order_term + + rho + * sum( + [ + (model_var - setpoint_var.value) ** 2 + for (model_var, setpoint_var) in zip(model_vars, setpoint_vars) + ] + ) + ) def generate_norm1_norm_constraint(model, setpoint_model, config, discrete_only=True): - r"""This function generates constraint (PF-OA main problem) for minimum Norm1 distance to setpoint_model. + r"""This function generates constraint (PF-OA main problem) for minimum + Norm1 distance to setpoint_model. + + Norm constraint is used to guarantees the monotonicity of the norm + objective value sequence of all iterations. - Norm constraint is used to guarantees the monotonicity of the norm objective value sequence of all iterations Norm1 distance of (x,y) = \sum_i |x_i - y_i|. Ref: Paper 'A storm of feasibility pumps for nonconvex MINLP' Eq. (16). @@ -421,156 +460,168 @@ def generate_norm1_norm_constraint(model, setpoint_model, config, discrete_only= config : ConfigBlock The specific configurations for MindtPy. discrete_only : bool, optional - Whether to only optimize on distance between the discrete variables, by default True. + Whether to only optimize on distance between the discrete + variables, by default True. + """ - var_filter = (lambda v: v.is_integer()) if discrete_only \ - else (lambda v: True) + var_filter = (lambda v: v.is_integer()) if discrete_only else (lambda v: True) model_vars = list(filter(var_filter, model.MindtPy_utils.variable_list)) - setpoint_vars = list( - filter(var_filter, setpoint_model.MindtPy_utils.variable_list)) + setpoint_vars = list(filter(var_filter, setpoint_model.MindtPy_utils.variable_list)) assert len(model_vars) == len( - setpoint_vars), 'Trying to generate Norm1 norm constraint for models with different number of variables' - norm_constraint_blk = model.MindtPy_utils.L1_norm_constraint = Block() - norm_constraint_blk.L1_slack_idx = RangeSet(len(model_vars)) - norm_constraint_blk.L1_slack_var = Var( - norm_constraint_blk.L1_slack_idx, domain=Reals, bounds=(0, None)) - norm_constraint_blk.abs_reform = ConstraintList() - for idx, v_model, v_setpoint in zip(norm_constraint_blk.L1_slack_idx, model_vars, - setpoint_vars): - norm_constraint_blk.abs_reform.add( - expr=v_model - v_setpoint.value >= -norm_constraint_blk.L1_slack_var[idx]) - norm_constraint_blk.abs_reform.add( - expr=v_model - v_setpoint.value <= norm_constraint_blk.L1_slack_var[idx]) - rhs = config.fp_norm_constraint_coef * \ - sum(abs(v_model.value-v_setpoint.value) - for v_model, v_setpoint in zip(model_vars, setpoint_vars)) - norm_constraint_blk.sum_slack = Constraint( - expr=sum(norm_constraint_blk.L1_slack_var[idx] for idx in norm_constraint_blk.L1_slack_idx) <= rhs) - - -def set_solver_options(opt, solve_data, config, solver_type, regularization=False): - """Set options for MIP/NLP solvers. + setpoint_vars + ), 'Trying to generate Norm1 norm constraint for models with different number of variables' + norm_constraint_block = model.MindtPy_utils.L1_norm_constraint = Block() + norm_constraint_block.L1_slack_idx = RangeSet(len(model_vars)) + norm_constraint_block.L1_slack_var = Var( + norm_constraint_block.L1_slack_idx, domain=Reals, bounds=(0, None) + ) + norm_constraint_block.abs_reform = ConstraintList() + for idx, v_model, v_setpoint in zip( + norm_constraint_block.L1_slack_idx, model_vars, setpoint_vars + ): + norm_constraint_block.abs_reform.add( + expr=v_model - v_setpoint.value >= -norm_constraint_block.L1_slack_var[idx] + ) + norm_constraint_block.abs_reform.add( + expr=v_model - v_setpoint.value <= norm_constraint_block.L1_slack_var[idx] + ) + rhs = config.fp_norm_constraint_coef * sum( + abs(v_model.value - v_setpoint.value) + for v_model, v_setpoint in zip(model_vars, setpoint_vars) + ) + norm_constraint_block.sum_slack = Constraint( + expr=sum( + norm_constraint_block.L1_slack_var[idx] + for idx in norm_constraint_block.L1_slack_idx + ) + <= rhs + ) + + +def update_solver_timelimit(opt, solver_name, timing, config): + """Updates the time limit for subsolvers. Parameters ---------- - opt : SolverFactory - The MIP/NLP solver. - solve_data : MindtPySolveData - Data container that holds solve-instance data. + opt : Solvers + The solver object. + solver_name : String + The name of solver. + timing : Timing + Timing config : ConfigBlock The specific configurations for MindtPy. - solver_type : str - The type of the solver, i.e. mip or nlp. - regularization : bool, optional - Whether the solver is used to solve the regularization problem, by default False. """ - # TODO: integrate nlp_args here - # nlp_args = dict(config.nlp_solver_args) - elapsed = get_main_elapsed_time(solve_data.timing) - remaining = int(max(config.time_limit - elapsed, 1)) - if solver_type == 'mip': - if regularization: - solver_name = config.mip_regularization_solver - if config.regularization_mip_threads > 0: - opt.options['threads'] = config.regularization_mip_threads - else: - solver_name = config.mip_solver - if config.threads > 0: - opt.options['threads'] = config.threads - elif solver_type == 'nlp': - solver_name = config.nlp_solver - # TODO: opt.name doesn't work for GAMS - if solver_name in {'cplex', 'gurobi', 'gurobi_persistent', 'appsi_gurobi'}: + elapsed = get_main_elapsed_time(timing) + remaining = math.ceil(max(config.time_limit - elapsed, 1)) + if solver_name in { + 'cplex', + 'appsi_cplex', + 'cplex_persistent', + 'gurobi', + 'gurobi_persistent', + 'appsi_gurobi', + }: opt.options['timelimit'] = remaining - opt.options['mipgap'] = config.mip_solver_mipgap - if solver_name == 'gurobi_persistent' and config.single_tree: - # PreCrush: Controls presolve reductions that affect user cuts - # You should consider setting this parameter to 1 if you are using callbacks to add your own cuts. - opt.options['PreCrush'] = 1 - opt.options['LazyConstraints'] = 1 - if regularization == True: - if solver_name == 'cplex': - if config.solution_limit is not None: - opt.options['mip limits solutions'] = config.solution_limit - opt.options['mip strategy presolvenode'] = 3 - # TODO: need to discuss if this option should be added. - if config.add_regularization in {'hess_lag', 'hess_only_lag'}: - opt.options['optimalitytarget'] = 3 - elif solver_name == 'gurobi': - if config.solution_limit is not None: - opt.options['SolutionLimit'] = config.solution_limit - opt.options['Presolve'] = 2 - elif solver_name == 'cplex_persistent': - opt.options['timelimit'] = remaining - opt._solver_model.parameters.mip.tolerances.mipgap.set( - config.mip_solver_mipgap) - if regularization is True: - if config.solution_limit is not None: - opt._solver_model.parameters.mip.limits.solutions.set( - config.solution_limit) - opt._solver_model.parameters.mip.strategy.presolvenode.set(3) - if config.add_regularization in {'hess_lag', 'hess_only_lag'}: - opt._solver_model.parameters.optimalitytarget.set(3) - elif solver_name == 'appsi_cplex': - opt.options['timelimit'] = remaining - opt.options['mip_tolerances_mipgap'] = config.mip_solver_mipgap - if regularization is True: - if config.solution_limit is not None: - opt.options['mip_limits_solutions'] = config.solution_limit - opt.options['mip_strategy_presolvenode'] = 3 - if config.add_regularization in {'hess_lag', 'hess_only_lag'}: - opt.options['optimalitytarget'] = 3 + elif solver_name == 'appsi_highs': + opt.config.time_limit = remaining + elif solver_name == 'cyipopt': + opt.config.options['max_cpu_time'] = float(remaining) elif solver_name == 'glpk': opt.options['tmlim'] = remaining - opt.options['mipgap'] = config.mip_solver_mipgap elif solver_name == 'baron': opt.options['MaxTime'] = remaining - opt.options['AbsConFeasTol'] = config.zero_tolerance elif solver_name in {'ipopt', 'appsi_ipopt'}: opt.options['max_cpu_time'] = remaining + elif solver_name == 'gams': + opt.options['add_options'].append('option Reslim=%s;' % remaining) + + +def set_solver_mipgap(opt, solver_name, config): + """Set mipgap for subsolvers. + + Parameters + ---------- + opt : Solvers + The solver object. + solver_name : String + The name of solver. + config : ConfigBlock + The specific configurations for MindtPy. + """ + if solver_name in { + 'cplex', + 'cplex_persistent', + 'gurobi', + 'gurobi_persistent', + 'appsi_gurobi', + 'glpk', + }: + opt.options['mipgap'] = config.mip_solver_mipgap + elif solver_name == 'appsi_cplex': + opt.options['mip_tolerances_mipgap'] = config.mip_solver_mipgap + elif solver_name == 'appsi_highs': + opt.config.mip_gap = config.mip_solver_mipgap + elif solver_name == 'gams': + opt.options['add_options'].append('option optcr=%s;' % config.mip_solver_mipgap) + + +def set_solver_constraint_violation_tolerance(opt, solver_name, config): + """Set constraint violation tolerance for solvers. + + Parameters + ---------- + opt : Solvers + The solver object. + solver_name : String + The name of solver. + config : ConfigBlock + The specific configurations for MindtPy. + """ + if solver_name == 'baron': + opt.options['AbsConFeasTol'] = config.zero_tolerance + elif solver_name in {'ipopt', 'appsi_ipopt'}: opt.options['constr_viol_tol'] = config.zero_tolerance + elif solver_name == 'cyipopt': + opt.config.options['constr_viol_tol'] = config.zero_tolerance elif solver_name == 'gams': - if solver_type == 'mip': - opt.options['add_options'] = ['option optcr=%s;' % config.mip_solver_mipgap, - 'option reslim=%s;' % remaining] - elif solver_type == 'nlp': - opt.options['add_options'] = ['option reslim=%s;' % remaining] - if config.nlp_solver_args.__contains__('solver'): - if config.nlp_solver_args['solver'] in {'ipopt', 'ipopth', 'msnlp', 'conopt', 'baron'}: - if config.nlp_solver_args['solver'] == 'ipopt': - opt.options['add_options'].append( - '$onecho > ipopt.opt') - opt.options['add_options'].append( - 'constr_viol_tol ' + str(config.zero_tolerance)) - elif config.nlp_solver_args['solver'] == 'ipopth': - opt.options['add_options'].append( - '$onecho > ipopth.opt') - opt.options['add_options'].append( - 'constr_viol_tol ' + str(config.zero_tolerance)) - # TODO: Ipopt warmstart option - # opt.options['add_options'].append('warm_start_init_point yes\n' - # 'warm_start_bound_push 1e-9\n' - # 'warm_start_bound_frac 1e-9\n' - # 'warm_start_slack_bound_frac 1e-9\n' - # 'warm_start_slack_bound_push 1e-9\n' - # 'warm_start_mult_bound_push 1e-9\n') - elif config.nlp_solver_args['solver'] == 'conopt': - opt.options['add_options'].append( - '$onecho > conopt.opt') - opt.options['add_options'].append( - 'RTNWMA ' + str(config.zero_tolerance)) - elif config.nlp_solver_args['solver'] == 'msnlp': - opt.options['add_options'].append( - '$onecho > msnlp.opt') - opt.options['add_options'].append( - 'feasibility_tolerance ' + str(config.zero_tolerance)) - elif config.nlp_solver_args['solver'] == 'baron': - opt.options['add_options'].append( - '$onecho > baron.opt') - opt.options['add_options'].append( - 'AbsConFeasTol ' + str(config.zero_tolerance)) - opt.options['add_options'].append('$offecho') - opt.options['add_options'].append('GAMS_MODEL.optfile=1') + if config.nlp_solver_args['solver'] in { + 'ipopt', + 'ipopth', + 'msnlp', + 'conopt', + 'baron', + }: + opt.options['add_options'].append('GAMS_MODEL.optfile=1') + opt.options['add_options'].append( + '$onecho > ' + config.nlp_solver_args['solver'] + '.opt' + ) + if config.nlp_solver_args['solver'] in {'ipopt', 'ipopth'}: + opt.options['add_options'].append( + 'constr_viol_tol ' + str(config.zero_tolerance) + ) + # Ipopt warmstart options + opt.options['add_options'].append( + 'warm_start_init_point yes\n' + 'warm_start_bound_push 1e-9\n' + 'warm_start_bound_frac 1e-9\n' + 'warm_start_slack_bound_frac 1e-9\n' + 'warm_start_slack_bound_push 1e-9\n' + 'warm_start_mult_bound_push 1e-9\n' + ) + elif config.nlp_solver_args['solver'] == 'conopt': + opt.options['add_options'].append( + 'RTNWMA ' + str(config.zero_tolerance) + ) + elif config.nlp_solver_args['solver'] == 'msnlp': + opt.options['add_options'].append( + 'feasibility_tolerance ' + str(config.zero_tolerance) + ) + elif config.nlp_solver_args['solver'] == 'baron': + opt.options['add_options'].append( + 'AbsConFeasTol ' + str(config.zero_tolerance) + ) + opt.options['add_options'].append('$offecho') def get_integer_solution(model, string_zero=False): @@ -592,7 +643,8 @@ def get_integer_solution(model, string_zero=False): for var in model.MindtPy_utils.discrete_variable_list: if string_zero: if var.value == 0: - # In cplex, negative zero is different from zero, so we use string to denote this(Only in singletree) + # In CPLEX, negative zero is different from zero, + # so we use string to denote this (Only in singletree). temp.append(str(var.value)) else: temp.append(int(round(var.value))) @@ -601,105 +653,15 @@ def get_integer_solution(model, string_zero=False): return tuple(temp) -def set_up_solve_data(model, config): - """Set up the solve data. - - Parameters - ---------- - model : Pyomo model - The original model to be solved in MindtPy. - config : ConfigBlock - The specific configurations for MindtPy. - - Returns - ------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - """ - solve_data = MindtPySolveData() - solve_data.results = SolverResults() - solve_data.timing = Bunch() - solve_data.curr_int_sol = [] - solve_data.should_terminate = False - solve_data.integer_list = [] - - # if the objective function is a constant, dual bound constraint is not added. - obj = next(model.component_data_objects(ctype=Objective, active=True)) - if obj.expr.polynomial_degree() == 0: - config.use_dual_bound = False - - if config.use_fbbt: - fbbt(model) - # TODO: logging_level is not logging.INFO here - config.logger.info( - 'Use the fbbt to tighten the bounds of variables') - - solve_data.original_model = model - solve_data.working_model = model.clone() - - # Set up iteration counters - solve_data.nlp_iter = 0 - solve_data.mip_iter = 0 - solve_data.mip_subiter = 0 - solve_data.nlp_infeasible_counter = 0 - if config.init_strategy == 'FP': - solve_data.fp_iter = 1 - - # set up bounds - if obj.sense == minimize: - solve_data.primal_bound = float('inf') - solve_data.dual_bound = float('-inf') - else: - solve_data.primal_bound = float('-inf') - solve_data.dual_bound = float('inf') - solve_data.primal_bound_progress = [solve_data.primal_bound] - solve_data.dual_bound_progress = [solve_data.dual_bound] - solve_data.primal_bound_progress_time = [0] - solve_data.dual_bound_progress_time = [0] - solve_data.abs_gap = float('inf') - solve_data.rel_gap = float('inf') - solve_data.log_formatter = ' {:>9} {:>15} {:>15g} {:>12g} {:>12g} {:>7.2%} {:>7.2f}' - solve_data.fixed_nlp_log_formatter = '{:1}{:>9} {:>15} {:>15g} {:>12g} {:>12g} {:>7.2%} {:>7.2f}' - solve_data.log_note_formatter = ' {:>9} {:>15} {:>15}' - if config.add_regularization is not None: - if config.add_regularization in {'level_L1', 'level_L_infinity', 'grad_lag'}: - solve_data.regularization_mip_type = 'MILP' - elif config.add_regularization in {'level_L2', 'hess_lag', 'hess_only_lag', 'sqp_lag'}: - solve_data.regularization_mip_type = 'MIQP' - - if config.single_tree and (config.add_no_good_cuts or config.use_tabu_list): - solve_data.stored_bound = {} - if config.strategy == 'GOA' and (config.add_no_good_cuts or config.use_tabu_list): - solve_data.num_no_good_cuts_added = {} - - # Flag indicating whether the solution improved in the past - # iteration or not - solve_data.primal_bound_improved = False - solve_data.dual_bound_improved = False - - if config.nlp_solver == 'ipopt': - if not hasattr(solve_data.working_model, 'ipopt_zL_out'): - solve_data.working_model.ipopt_zL_out = Suffix( - direction=Suffix.IMPORT) - if not hasattr(solve_data.working_model, 'ipopt_zU_out'): - solve_data.working_model.ipopt_zU_out = Suffix( - direction=Suffix.IMPORT) - - if config.quadratic_strategy == 0: - solve_data.mip_objective_polynomial_degree = {0, 1} - solve_data.mip_constraint_polynomial_degree = {0, 1} - elif config.quadratic_strategy == 1: - solve_data.mip_objective_polynomial_degree = {0, 1, 2} - solve_data.mip_constraint_polynomial_degree = {0, 1} - elif config.quadratic_strategy == 2: - solve_data.mip_objective_polynomial_degree = {0, 1, 2} - solve_data.mip_constraint_polynomial_degree = {0, 1, 2} - - return solve_data - - -def copy_var_list_values_from_solution_pool(from_list, to_list, config, solver_model, var_map, solution_name, - ignore_integrality=False): +def copy_var_list_values_from_solution_pool( + from_list, + to_list, + config, + solver_model, + var_map, + solution_name, + ignore_integrality=False, +): """Copy variable values from the solution pool to another list. Parameters @@ -723,10 +685,10 @@ def copy_var_list_values_from_solution_pool(from_list, to_list, config, solver_m try: if config.mip_solver == 'cplex_persistent': var_val = solver_model.solution.pool.get_values( - solution_name, var_map[v_from]) + solution_name, var_map[v_from] + ) elif config.mip_solver == 'gurobi_persistent': - solver_model.setParam( - gurobipy.GRB.Param.SolutionNumber, solution_name) + solver_model.setParam(gurobipy.GRB.Param.SolutionNumber, solution_name) var_val = var_map[v_from].Xn # We don't want to trigger the reset of the global stale # indicator, so we will set this variable to be "stale", @@ -738,27 +700,27 @@ def copy_var_list_values_from_solution_pool(from_list, to_list, config, solver_m # instead log warnings). This means that the following will # always succeed and the ValueError should never be raised. v_to.set_value(var_val, skip_validation=True) - except ValueError as err: - err_msg = getattr(err, 'message', str(err)) + except ValueError as e: + config.logger.error(e) rounded_val = int(round(var_val)) # Check to see if this is just a tolerance issue if ignore_integrality and v_to.is_integer(): v_to.set_value(var_val, skip_validation=True) elif v_to.is_integer() and ( - abs(var_val - rounded_val) <= config.integer_tolerance): + abs(var_val - rounded_val) <= config.integer_tolerance + ): v_to.set_value(rounded_val, skip_validation=True) elif abs(var_val) <= config.zero_tolerance and 0 in v_to.domain: v_to.set_value(0, skip_validation=True) else: config.logger.error( - 'Unknown validation domain error setting variable %s' % - (v_to.name,) + 'Unknown validation domain error setting variable %s' % (v_to.name,) ) raise class GurobiPersistent4MindtPy(GurobiPersistent): - """ A new persistent interface to Gurobi. + """A new persistent interface to Gurobi. Args: GurobiPersistent (PersistentSolver): A class that provides a persistent interface to Gurobi. @@ -769,97 +731,14 @@ def f(gurobi_model, where): """Callback function for Gurobi. Args: - gurobi_model (gurobi model): the gurobi model derived from pyomo model. + gurobi_model (Gurobi model): the Gurobi model derived from pyomo model. where (int): an enum member of gurobipy.GRB.Callback. """ - self._callback_func(self._pyomo_model, self, - where, self.solve_data, self.config) - return f - - -def update_gap(solve_data): - """Update the relative gap and the absolute gap. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - """ - solve_data.abs_gap = abs(solve_data.primal_bound - solve_data.dual_bound) - solve_data.rel_gap = solve_data.abs_gap / (abs(solve_data.primal_bound) + 1E-10) - + self._callback_func( + self._pyomo_model, self, where, self.mindtpy_solver, self.config + ) -def update_dual_bound(solve_data, bound_value): - """Update the dual bound. - - Call after solving relaxed problem, including relaxed NLP and MIP master problem. - Use the optimal primal bound of the relaxed problem to update the dual bound. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - bound_value : float - The input value used to update the dual bound. - """ - if math.isnan(bound_value): - return - if solve_data.objective_sense == minimize: - solve_data.dual_bound = max(bound_value, solve_data.dual_bound) - solve_data.dual_bound_improved = solve_data.dual_bound > solve_data.dual_bound_progress[-1] - else: - solve_data.dual_bound = min(bound_value, solve_data.dual_bound) - solve_data.dual_bound_improved = solve_data.dual_bound < solve_data.dual_bound_progress[-1] - solve_data.dual_bound_progress.append(solve_data.dual_bound) - solve_data.dual_bound_progress_time.append(get_main_elapsed_time(solve_data.timing)) - if solve_data.dual_bound_improved: - update_gap(solve_data) - - -def update_suboptimal_dual_bound(solve_data, results): - """If the relaxed problem is not solved to optimality, the dual bound is updated - according to the dual bound of relaxed problem. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - results : SolverResults - Results from solving the relaxed problem. - The dual bound of the relaxed problem can only be obtained from the result object. - """ - if solve_data.objective_sense == minimize: - bound_value = results.problem.lower_bound - else: - bound_value = results.problem.upper_bound - update_dual_bound(solve_data, bound_value) - - -def update_primal_bound(solve_data, bound_value): - """Update the primal bound. - - Call after solve fixed NLP subproblem. - Use the optimal primal bound of the relaxed problem to update the dual bound. - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - bound_value : float - The input value used to update the primal bound. - """ - if math.isnan(bound_value): - return - if solve_data.objective_sense == minimize: - solve_data.primal_bound = min(bound_value, solve_data.primal_bound) - solve_data.primal_bound_improved = solve_data.primal_bound < solve_data.primal_bound_progress[-1] - else: - solve_data.primal_bound = max(bound_value, solve_data.primal_bound) - solve_data.primal_bound_improved = solve_data.primal_bound > solve_data.primal_bound_progress[-1] - solve_data.primal_bound_progress.append(solve_data.primal_bound) - solve_data.primal_bound_progress_time.append(get_main_elapsed_time(solve_data.timing)) - if solve_data.primal_bound_improved: - update_gap(solve_data) + return f def set_up_logger(config): @@ -881,78 +760,6 @@ def set_up_logger(config): config.logger.addHandler(ch) -def get_dual_integral(solve_data, config): - """Calculate the dual integral. - Ref: The confined primal integral. [http://www.optimization-online.org/DB_FILE/2020/07/7910.pdf] - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - - Returns - ------- - float - The dual integral. - """ - dual_integral = 0 - dual_bound_progress = solve_data.dual_bound_progress.copy() - # Initial dual bound is set to inf or -inf. To calculate dual integral, we set - # initial_dual_bound to 10% greater or smaller than the first_found_dual_bound. - # TODO: check if the calculation of initial_dual_bound needs to be modified. - for dual_bound in dual_bound_progress: - if dual_bound != dual_bound_progress[0]: - break - for i in range(len(dual_bound_progress)): - if dual_bound_progress[i] == solve_data.dual_bound_progress[0]: - dual_bound_progress[i] = dual_bound * (1 - config.initial_bound_coef * solve_data.objective_sense * math.copysign(1,dual_bound)) - else: - break - for i in range(len(dual_bound_progress)): - if i == 0: - dual_integral += abs(dual_bound_progress[i] - solve_data.dual_bound) * (solve_data.dual_bound_progress_time[i]) - else: - dual_integral += abs(dual_bound_progress[i] - solve_data.dual_bound) * (solve_data.dual_bound_progress_time[i] - solve_data.dual_bound_progress_time[i-1]) - config.logger.info(' {:<25}: {:>7.4f} '.format('Dual integral', dual_integral)) - return dual_integral - - -def get_primal_integral(solve_data, config): - """Calculate the primal integral. - Ref: The confined primal integral. [http://www.optimization-online.org/DB_FILE/2020/07/7910.pdf] - - Parameters - ---------- - solve_data : MindtPySolveData - Data container that holds solve-instance data. - - Returns - ------- - float - The primal integral. - """ - primal_integral = 0 - primal_bound_progress = solve_data.primal_bound_progress.copy() - # Initial primal bound is set to inf or -inf. To calculate primal integral, we set - # initial_primal_bound to 10% greater or smaller than the first_found_primal_bound. - # TODO: check if the calculation of initial_primal_bound needs to be modified. - for primal_bound in primal_bound_progress: - if primal_bound != primal_bound_progress[0]: - break - for i in range(len(primal_bound_progress)): - if primal_bound_progress[i] == solve_data.primal_bound_progress[0]: - primal_bound_progress[i] = primal_bound * (1 + config.initial_bound_coef * solve_data.objective_sense * math.copysign(1,primal_bound)) - else: - break - for i in range(len(primal_bound_progress)): - if i == 0: - primal_integral += abs(primal_bound_progress[i] - solve_data.primal_bound) * (solve_data.primal_bound_progress_time[i]) - else: - primal_integral += abs(primal_bound_progress[i] - solve_data.primal_bound) * (solve_data.primal_bound_progress_time[i] - solve_data.primal_bound_progress_time[i-1]) - - config.logger.info(' {:<25}: {:>7.4f} '.format('Primal integral', primal_integral)) - return primal_integral - def epigraph_reformulation(exp, slack_var_list, constraint_list, use_mcpp, sense): """Epigraph reformulation. @@ -988,100 +795,23 @@ def epigraph_reformulation(exp, slack_var_list, constraint_list, use_mcpp, sense else: constraint_list.add(expr=slack_var <= exp) -def build_ordered_component_lists(model, solve_data): - """Define lists used for future data transfer. - - Also attaches ordered lists of the variables, constraints, disjuncts, and - disjunctions to the model so that they can be used for mapping back and - forth. - """ - util_blk = getattr(model, solve_data.util_block_name) - var_set = ComponentSet() - setattr( - util_blk, 'constraint_list', list( - model.component_data_objects( - ctype=Constraint, active=True, - descend_into=(Block, Disjunct)))) - if hasattr(solve_data,'mip_constraint_polynomial_degree'): - mip_constraint_polynomial_degree = solve_data.mip_constraint_polynomial_degree - else: - mip_constraint_polynomial_degree = {0, 1} - setattr( - util_blk, 'linear_constraint_list', list( - c for c in model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block, Disjunct)) - if c.body.polynomial_degree() in mip_constraint_polynomial_degree)) - setattr( - util_blk, 'nonlinear_constraint_list', list( - c for c in model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block, Disjunct)) - if c.body.polynomial_degree() not in mip_constraint_polynomial_degree)) - setattr( - util_blk, 'disjunct_list', list( - model.component_data_objects( - ctype=Disjunct, active=True, - descend_into=(Block, Disjunct)))) - setattr( - util_blk, 'disjunction_list', list( - model.component_data_objects( - ctype=Disjunction, active=True, - descend_into=(Disjunct, Block)))) - setattr( - util_blk, 'objective_list', list( - model.component_data_objects( - ctype=Objective, active=True, - descend_into=(Block)))) - - # Identify the non-fixed variables in (potentially) active constraints and - # objective functions - for constr in getattr(util_blk, 'constraint_list'): - for v in EXPR.identify_variables(constr.body, include_fixed=False): - var_set.add(v) - for obj in model.component_data_objects(ctype=Objective, active=True): - for v in EXPR.identify_variables(obj.expr, include_fixed=False): - var_set.add(v) - # Disjunct indicator variables might not appear in active constraints. In - # fact, if we consider them Logical variables, they should not appear in - # active algebraic constraints. For now, they need to be added to the - # variable set. - for disj in getattr(util_blk, 'disjunct_list'): - var_set.add(disj.binary_indicator_var) - - # We use component_data_objects rather than list(var_set) in order to - # preserve a deterministic ordering. - var_list = list( - v for v in model.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v in var_set) - setattr(util_blk, 'variable_list', var_list) - discrete_variable_list = list( - v for v in model.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v in var_set and v.is_integer()) - setattr(util_blk, 'discrete_variable_list', discrete_variable_list) - continuous_variable_list = list( - v for v in model.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v in var_set and v.is_continuous()) - setattr(util_blk, 'continuous_variable_list', continuous_variable_list) - -def setup_results_object(solve_data, config): +def setup_results_object(results, model, config): """Record problem statistics for original model.""" # Create the solver results object - res = solve_data.results + res = results prob = res.problem - res.problem.name = solve_data.original_model.name + res.problem.name = model.name res.problem.number_of_nonzeros = None # TODO - # TODO work on termination condition and message res.solver.termination_condition = None res.solver.message = None res.solver.user_time = None - res.solver.system_time = None res.solver.wallclock_time = None res.solver.termination_message = None + # Record solver name + res.solver.name = 'MindtPy' + str(config.strategy) - num_of = build_model_size_report(solve_data.original_model) + num_of = build_model_size_report(model) # Get count of constraints and variables prob.number_of_constraints = num_of.activated.constraints @@ -1095,193 +825,141 @@ def setup_results_object(solve_data, config): "Original model has %s constraints (%s nonlinear) " "and %s disjunctions, " "with %s variables, of which %s are binary, %s are integer, " - "and %s are continuous." % - (num_of.activated.constraints, - num_of.activated.nonlinear_constraints, - num_of.activated.disjunctions, - num_of.activated.variables, - num_of.activated.binary_variables, - num_of.activated.integer_variables, - num_of.activated.continuous_variables)) - -def process_objective(solve_data, config, move_objective=False, - use_mcpp=False, update_var_con_list=True, - partition_nonlinear_terms=True, - obj_handleable_polynomial_degree={0, 1}, - constr_handleable_polynomial_degree={0, 1}): - """Process model objective function. - Check that the model has only 1 valid objective. - If the objective is nonlinear, move it into the constraints. - If no objective function exists, emit a warning and create a dummy - objective. + "and %s are continuous." + % ( + num_of.activated.constraints, + num_of.activated.nonlinear_constraints, + num_of.activated.disjunctions, + num_of.activated.variables, + num_of.activated.binary_variables, + num_of.activated.integer_variables, + num_of.activated.continuous_variables, + ) + ) + config.logger.info( + '{} is the initial strategy being used.\n'.format(config.init_strategy) + ) + config.logger.info( + ' ===============================================================================================' + ) + config.logger.info( + ' {:>9} | {:>15} | {:>15} | {:>12} | {:>12} | {:^7} | {:>7}\n'.format( + 'Iteration', + 'Subproblem Type', + 'Objective Value', + 'Primal Bound', + 'Dual Bound', + ' Gap ', + 'Time(s)', + ) + ) + + +def fp_converged(working_model, mip_model, proj_zero_tolerance, discrete_only=True): + """Calculates the euclidean norm between the discrete variables in the MIP and NLP models. + Parameters ---------- - solve_data (GDPoptSolveData): solver environment data class - config (ConfigBlock): solver configuration options - move_objective (bool): if True, move even linear - objective functions to the constraints - update_var_con_list (bool): if True, the variable/constraint/objective lists will not be updated. - This arg is set to True by default. Currently, update_var_con_list will be set to False only when - add_regularization is not None in MindtPy. - partition_nonlinear_terms (bool): if True, partition sum of nonlinear terms in the objective function. + working_model : Pyomo model + The working model(original model). + mip_model : Pyomo model + The mip model. + proj_zero_tolerance : Float + The projection zero tolerance of Feasibility Pump. + discrete_only : bool, optional + Whether to only optimize on distance between the discrete variables, by default True. + + Returns + ------- + distance : float + The euclidean norm between the discrete variables in the MIP and NLP models. """ - m = solve_data.working_model - util_blk = getattr(m, solve_data.util_block_name) - # Handle missing or multiple objectives - active_objectives = list(m.component_data_objects( - ctype=Objective, active=True, descend_into=True)) - solve_data.results.problem.number_of_objectives = len(active_objectives) - if len(active_objectives) == 0: - config.logger.warning( - 'Model has no active objectives. Adding dummy objective.') - util_blk.dummy_objective = Objective(expr=1) - main_obj = util_blk.dummy_objective - elif len(active_objectives) > 1: - raise ValueError('Model has multiple active objectives.') - else: - main_obj = active_objectives[0] - solve_data.results.problem.sense = ProblemSense.minimize if \ - main_obj.sense == 1 else \ - ProblemSense.maximize - solve_data.objective_sense = main_obj.sense - - # Move the objective to the constraints if it is nonlinear or move_objective is True. - if main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree or move_objective: - if move_objective: - config.logger.info("Moving objective to constraint set.") - else: - config.logger.info( - "Objective is nonlinear. Moving it to constraint set.") - util_blk.objective_value = VarList(domain=Reals, initialize=0) - util_blk.objective_constr = ConstraintList() - if main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree and partition_nonlinear_terms and main_obj.expr.__class__ is EXPR.SumExpression: - repn = generate_standard_repn(main_obj.expr, quadratic=2 in obj_handleable_polynomial_degree) - # the following code will also work if linear_subexpr is a constant. - linear_subexpr = repn.constant + sum(coef*var for coef, var in zip(repn.linear_coefs, repn.linear_vars)) \ - + sum(coef*var1*var2 for coef, (var1, var2) in zip(repn.quadratic_coefs, repn.quadratic_vars)) - # only need to generate one epigraph constraint for the sum of all linear terms and constant - epigraph_reformulation(linear_subexpr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense) - nonlinear_subexpr = repn.nonlinear_expr - if nonlinear_subexpr.__class__ is EXPR.SumExpression: - for subsubexpr in nonlinear_subexpr.args: - epigraph_reformulation(subsubexpr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense) - else: - epigraph_reformulation(nonlinear_subexpr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense) - else: - epigraph_reformulation(main_obj.expr, util_blk.objective_value, util_blk.objective_constr, use_mcpp, main_obj.sense) - - main_obj.deactivate() - util_blk.objective = Objective(expr=sum(util_blk.objective_value[:]), sense=main_obj.sense) - - if main_obj.expr.polynomial_degree() not in obj_handleable_polynomial_degree or \ - (move_objective and update_var_con_list): - util_blk.variable_list.extend(util_blk.objective_value[:]) - util_blk.continuous_variable_list.extend(util_blk.objective_value[:]) - util_blk.constraint_list.extend(util_blk.objective_constr[:]) - util_blk.objective_list.append(util_blk.objective) - for constr in util_blk.objective_constr[:]: - if constr.body.polynomial_degree() in constr_handleable_polynomial_degree: - util_blk.linear_constraint_list.append(constr) - else: - util_blk.nonlinear_constraint_list.append(constr) + distance = max( + (nlp_var.value - milp_var.value) ** 2 + for (nlp_var, milp_var) in zip( + working_model.MindtPy_utils.variable_list, + mip_model.MindtPy_utils.variable_list, + ) + if (not discrete_only) or milp_var.is_integer() + ) + return distance <= proj_zero_tolerance + -def build_ordered_component_lists(model, solve_data): - """Define lists used for future data transfer. +def add_orthogonality_cuts(working_model, mip_model, config): + """Add orthogonality cuts. - Also attaches ordered lists of the variables, constraints, disjuncts, and - disjunctions to the model so that they can be used for mapping back and - forth. + This function adds orthogonality cuts to avoid cycling when the independence constraint qualification is not satisfied. + Parameters + ---------- + working_model : Pyomo model + The working model(original model). + mip_model : Pyomo model + The mip model. + config : ConfigBlock + The specific configurations for MindtPy. """ - util_blk = getattr(model, solve_data.util_block_name) - var_set = ComponentSet() - setattr( - util_blk, 'constraint_list', list( - model.component_data_objects( - ctype=Constraint, active=True, - descend_into=(Block, Disjunct)))) - if hasattr(solve_data,'mip_constraint_polynomial_degree'): - mip_constraint_polynomial_degree = solve_data.mip_constraint_polynomial_degree - else: - mip_constraint_polynomial_degree = {0, 1} - setattr( - util_blk, 'linear_constraint_list', list( - c for c in model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block, Disjunct)) - if c.body.polynomial_degree() in mip_constraint_polynomial_degree)) - setattr( - util_blk, 'nonlinear_constraint_list', list( - c for c in model.component_data_objects( - ctype=Constraint, active=True, descend_into=(Block, Disjunct)) - if c.body.polynomial_degree() not in mip_constraint_polynomial_degree)) - setattr( - util_blk, 'disjunct_list', list( - model.component_data_objects( - ctype=Disjunct, active=True, - descend_into=(Block, Disjunct)))) - setattr( - util_blk, 'disjunction_list', list( - model.component_data_objects( - ctype=Disjunction, active=True, - descend_into=(Disjunct, Block)))) - setattr( - util_blk, 'objective_list', list( - model.component_data_objects( - ctype=Objective, active=True, - descend_into=(Block)))) - - # Identify the non-fixed variables in (potentially) active constraints and - # objective functions - for constr in getattr(util_blk, 'constraint_list'): - for v in EXPR.identify_variables(constr.body, include_fixed=False): - var_set.add(v) - for obj in model.component_data_objects(ctype=Objective, active=True): - for v in EXPR.identify_variables(obj.expr, include_fixed=False): - var_set.add(v) - # Disjunct indicator variables might not appear in active constraints. In - # fact, if we consider them Logical variables, they should not appear in - # active algebraic constraints. For now, they need to be added to the - # variable set. - for disj in getattr(util_blk, 'disjunct_list'): - var_set.add(disj.binary_indicator_var) - - # We use component_data_objects rather than list(var_set) in order to - # preserve a deterministic ordering. - var_list = list( - v for v in model.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v in var_set) - setattr(util_blk, 'variable_list', var_list) - discrete_variable_list = list( - v for v in model.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v in var_set and v.is_integer()) - setattr(util_blk, 'discrete_variable_list', discrete_variable_list) - continuous_variable_list = list( - v for v in model.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v in var_set and v.is_continuous()) - setattr(util_blk, 'continuous_variable_list', continuous_variable_list) - -@contextmanager -def create_utility_block(model, name, solve_data): - created_util_block = False - # Create a model block on which to store GDPopt-specific utility - # modeling objects. - if hasattr(model, name): - raise RuntimeError( - "GDPopt needs to create a Block named %s " - "on the model object, but an attribute with that name " - "already exists." % name) - else: - created_util_block = True - setattr(model, name, Block( - doc="Container for GDPopt solver utility modeling objects")) - solve_data.util_block_name = name - - # Save ordered lists of main modeling components, so that data can - # be easily transferred between future model clones. - build_ordered_component_lists(model, solve_data) - yield - if created_util_block: - model.del_component(name) + mip_integer_vars = mip_model.MindtPy_utils.discrete_variable_list + nlp_integer_vars = working_model.MindtPy_utils.discrete_variable_list + orthogonality_cut = ( + sum( + (nlp_v.value - mip_v.value) * (mip_v - nlp_v.value) + for mip_v, nlp_v in zip(mip_integer_vars, nlp_integer_vars) + ) + >= 0 + ) + mip_model.MindtPy_utils.cuts.fp_orthogonality_cuts.add(orthogonality_cut) + if config.fp_projcuts: + orthogonality_cut = ( + sum( + (nlp_v.value - mip_v.value) * (nlp_v - nlp_v.value) + for mip_v, nlp_v in zip(mip_integer_vars, nlp_integer_vars) + ) + >= 0 + ) + working_model.MindtPy_utils.cuts.fp_orthogonality_cuts.add(orthogonality_cut) + + +def generate_norm_constraint(fp_nlp_model, mip_model, config): + """Generate the norm constraint for the FP-NLP subproblem. + + Parameters + ---------- + fp_nlp_model : Pyomo model + The feasibility pump NLP subproblem. + mip_model : Pyomo model + The mip_model model. + config : ConfigBlock + The specific configurations for MindtPy. + """ + if config.fp_main_norm == 'L1': + # TODO: check if we can access the block defined in FP-main problem + generate_norm1_norm_constraint( + fp_nlp_model, mip_model, config, discrete_only=True + ) + elif config.fp_main_norm == 'L2': + fp_nlp_model.norm_constraint = Constraint( + expr=sum( + (nlp_var - mip_var.value) ** 2 + - config.fp_norm_constraint_coef * (nlp_var.value - mip_var.value) ** 2 + for nlp_var, mip_var in zip( + fp_nlp_model.MindtPy_utils.discrete_variable_list, + mip_model.MindtPy_utils.discrete_variable_list, + ) + ) + <= 0 + ) + elif config.fp_main_norm == 'L_infinity': + fp_nlp_model.norm_constraint = ConstraintList() + rhs = config.fp_norm_constraint_coef * max( + nlp_var.value - mip_var.value + for nlp_var, mip_var in zip( + fp_nlp_model.MindtPy_utils.discrete_variable_list, + mip_model.MindtPy_utils.discrete_variable_list, + ) + ) + for nlp_var, mip_var in zip( + fp_nlp_model.MindtPy_utils.discrete_variable_list, + mip_model.MindtPy_utils.discrete_variable_list, + ): + fp_nlp_model.norm_constraint.add(nlp_var - mip_var.value <= rhs) diff --git a/pyomo/checker/plugins/checkers/py3k/__init__.py b/pyomo/contrib/mpc/__init__.py similarity index 69% rename from pyomo/checker/plugins/checkers/py3k/__init__.py rename to pyomo/contrib/mpc/__init__.py index 970bf8cde7b..da977f365d2 100644 --- a/pyomo/checker/plugins/checkers/py3k/__init__.py +++ b/pyomo/contrib/mpc/__init__.py @@ -9,5 +9,8 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -import pyomo.checker.plugins.checkers.py3k.printing -import pyomo.checker.plugins.checkers.py3k.range +from .interfaces.model_interface import DynamicModelInterface +from .data.series_data import TimeSeriesData +from .data.interval_data import IntervalData +from .data.scalar_data import ScalarData +from .data.get_cuid import get_indexed_cuid diff --git a/pyomo/checker/plugins/checkers/sample/printing.py b/pyomo/contrib/mpc/data/__init__.py similarity index 57% rename from pyomo/checker/plugins/checkers/sample/printing.py rename to pyomo/contrib/mpc/data/__init__.py index 7b18bb09f2e..9061fda4bfd 100644 --- a/pyomo/checker/plugins/checkers/sample/printing.py +++ b/pyomo/contrib/mpc/data/__init__.py @@ -9,15 +9,15 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.checker.plugins.checker import IterativeTreeChecker +from .scalar_data import ScalarData +from .series_data import TimeSeriesData +from .interval_data import IntervalData +from .convert import series_to_interval, interval_to_series -class PrintASTNodes(IterativeTreeChecker): +__doc__ = """A module containing data structures for storing values associated + with time-indexed Pyomo variables. - def __init__(self): - self.disable() + This is the core of the mpc package. Code in this module should not + import from other parts of mpc. - def check(self, runner, script, info): - if 'lineno' in dir(info): - self.problem(str(info), lineno = info.lineno) - else: - self.problem(str(info)) + """ diff --git a/pyomo/contrib/mpc/data/convert.py b/pyomo/contrib/mpc/data/convert.py new file mode 100644 index 00000000000..f1d35592a9f --- /dev/null +++ b/pyomo/contrib/mpc/data/convert.py @@ -0,0 +1,170 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from collections.abc import MutableMapping +from pyomo.contrib.mpc.data.dynamic_data_base import _is_iterable, _DynamicDataBase +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.series_data import TimeSeriesData +from pyomo.contrib.mpc.data.interval_data import IntervalData +from pyomo.contrib.mpc.data.find_nearest_index import find_nearest_interval_index + + +def _process_to_dynamic_data(data, time_set=None): + """Processes a user's data to convert it to the appropriate type + of dynamic data + + Mappings are converted to ScalarData, and length-two tuples are converted + to TimeSeriesData or IntervalData, depending on the contents of the + second item (the list of time points or intervals). + + Arguments + --------- + data: Dict, ComponentMap, or Tuple + Data to convert to either ScalarData, TimeSeriesData, or + IntervalData, depending on type. + + Returns + ------- + ScalarData, TimeSeriesData, or IntervalData + + """ + if isinstance(data, _DynamicDataBase): + return data + if isinstance(data, MutableMapping): + return ScalarData(data, time_set=time_set) + elif isinstance(data, tuple): + if len(data) != 2: + raise TypeError( + "_process_to_dynamic_data only accepts a mapping or a" + " tuple of length two. Got tuple of length %s" % len(data) + ) + if not isinstance(data[0], MutableMapping): + raise TypeError( + "First entry of data tuple must be instance of MutableMapping," + "e.g. dict or ComponentMap. Got %s" % type(data[0]) + ) + elif len(data[1]) == 0: + raise ValueError( + "Time sequence provided in data tuple is empty." + " Cannot infer whether this is a list of points or intervals." + ) + elif all(not _is_iterable(item) for item in data[1]): + return TimeSeriesData(*data) + elif all(_is_iterable(item) and len(item) == 2 for item in data[1]): + return IntervalData(*data) + else: + raise TypeError( + "Second entry of data tuple must be a non-empty iterable of" + " scalars (time points) or length-two tuples (intervals)." + " Got %s" % str(data[1]) + ) + + +def interval_to_series( + data, time_points=None, tolerance=0.0, use_left_endpoints=False, prefer_left=True +): + """ + Arguments + --------- + data: IntervalData + Data to convert to a TimeSeriesData object + time_points: Iterable (optional) + Points at which time series will be defined. Values are taken + from the interval in which each point lives. The default is to + use the right endpoint of each interval. + tolerance: Float (optional) + Tolerance within which time points are considered equal. + Default is zero. + use_left_endpoints: Bool (optional) + Whether the left endpoints should be used in the case when + time_points is not provided. Default is False, meaning that + the right interval endpoints will be used. Should not be set + if time points are provided. + prefer_left: Bool (optional) + If time_points is provided, and a time point is equal (within + tolerance) to a boundary between two intervals, this flag + controls which interval is used. + + Returns + ------- + TimeSeriesData + + """ + if time_points is None: + # TODO: Should first or last data points of first or last + # intervals be included? + if use_left_endpoints: + time_points = [t for t, _ in data.get_intervals()] + else: + time_points = [t for _, t in data.get_intervals()] + series_data = data.get_data() + # TODO: Should TimeSeriesData be constructed with the original time set? + return TimeSeriesData(series_data, time_points) + if use_left_endpoints: + raise RuntimeError("Cannot provide time_points with use_left_endpoints=True") + + intervals = data.get_intervals() + data_dict = data.get_data() + # NOTE: This implementation is O(len(time_points)*log(len(intervals))). + # Could potentially do better with an O(len(time_points) + len(intervals)) + # implementation. + idx_list = [ + find_nearest_interval_index( + intervals, t, tolerance=tolerance, prefer_left=prefer_left + ) + for t in time_points + ] + for i, t in enumerate(time_points): + if idx_list[i] is None: + raise RuntimeError( + "Time point %s cannot be found in intervals within" + " tolerance %s." % (t, tolerance) + ) + new_data = {key: [vals[i] for i in idx_list] for key, vals in data_dict.items()} + # TODO: Should TimeSeriesData be constructed with the original time set? + return TimeSeriesData(new_data, time_points) + + +def series_to_interval(data, use_left_endpoints=False): + """ + Arguments + --------- + data: TimeSeriesData + Data that will be converted into an IntervalData object + use_left_endpoints: Bool (optional) + Flag indicating whether values on intervals should come + from the values at the left or right endpoints of the + intervals + + Returns + ------- + IntervalData + + """ + time = data.get_time_points() + data_dict = data.get_data() + n_t = len(time) + if n_t == 1: + t0 = time[0] + # TODO: Copy data dict? + # TODO: Should we raise an error if time list has length one? + return IntervalData(data_dict, [(t0, t0)]) + else: + # This covers the case of n_t > 1 and n_t == 0 + new_data = {} + intervals = [(time[i - 1], time[i]) for i in range(1, n_t)] + for key, values in data_dict.items(): + interval_values = [ + values[i - 1] if use_left_endpoints else values[i] + for i in range(1, n_t) + ] + new_data[key] = interval_values + return IntervalData(new_data, intervals) diff --git a/pyomo/contrib/mpc/data/dynamic_data_base.py b/pyomo/contrib/mpc/data/dynamic_data_base.py new file mode 100644 index 00000000000..c0223d2dcbe --- /dev/null +++ b/pyomo/contrib/mpc/data/dynamic_data_base.py @@ -0,0 +1,144 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.base.set import Set +from pyomo.contrib.mpc.data.get_cuid import get_indexed_cuid + + +def _is_iterable(obj): + if not hasattr(obj, "__iter__") and not hasattr(obj, "__getitem__"): + # Here we know obj is not iterable. + return False + elif hasattr(obj, "is_indexed"): + # Pyomo scalar components define __iter__ and __getitem__, + # however for our purpose we would like to consider them + # as not iterable. + # + # Note that sets implement is_indexed but are always iterable. + return obj.is_indexed() or isinstance(obj, Set) + else: + try: + iter(obj) + return True + except TypeError as err: + if "not iterable" in str(err): + # Hopefully this error message is not implementation + # or version specific. Tested in CPython 3.7.8 and + # PyPy 3.7.10. + return False + else: + raise err + + +class _DynamicDataBase(object): + """ + A base class for storing data associated with time-indexed variables. + + """ + + def __init__(self, data, time_set=None, context=None): + """ + Processes keys of the data dict. + + """ + # This is used if we ever need to process a VarData to get + # a time-indexed CUID. We need to know what set to slice. + self._orig_time_set = time_set + + self._data = { + get_indexed_cuid(key, (self._orig_time_set,), context=context): val + for key, val in data.items() + } + + def __eq__(self, other): + if isinstance(other, _DynamicDataBase): + return self._data == other._data + else: + # Should this return False or raise TypeError? + raise TypeError( + "%s and %s are not comparable" % (self.__class__, other.__class__) + ) + + def get_data(self): + """ + Return a dictionary mapping CUIDs to values + + """ + return self._data + + def get_cuid(self, key, context=None): + """ + Get the time-indexed CUID corresponding to the provided key + """ + return get_indexed_cuid(key, (self._orig_time_set,), context=context) + + def get_data_from_key(self, key, context=None): + """ + Returns the value associated with the given key. + + """ + cuid = get_indexed_cuid(key, (self._orig_time_set,), context=context) + return self._data[cuid] + + def contains_key(self, key, context=None): + """ + Returns whether this object's dict contains the given key. + + """ + cuid = get_indexed_cuid(key, (self._orig_time_set,), context=context) + return cuid in self._data + + def update_data(self, other, context=None): + """ + Updates this object's data dict. + + """ + if isinstance(other, _DynamicDataBase): + self._data.update(other.get_data()) + else: + other = { + get_indexed_cuid(key, (self._orig_time_set,), context=context): val + for key, val in other.items() + } + self._data.update(other) + + def to_serializable(self): + """ + Returns a json-serializable object. + + """ + # We have no idea whether the values in this object's dict are + # json-serializable. + raise NotImplementedError( + "to_serializable has not been implemented by %s" % self.__class__ + ) + + def extract_variables(self, variables, context=None, copy_values=False): + """ + Return a new object that only keeps data values for the variables + specified. + + """ + if copy_values: + # We don't know what this object uses as values in its dict, + # so we don't know how to copy them. + raise NotImplementedError( + "extract_variables with copy_values=True has not been" + " implemented by %s" % self.__class__ + ) + data = {} + for var in variables: + cuid = get_indexed_cuid(var, (self._orig_time_set,), context=context) + data[cuid] = self._data[cuid] + MyClass = self.__class__ + # Subclasses likely have different construction signatures, + # so this maybe shouldn't be implemented on the base class. + return MyClass(data, time_set=self._orig_time_set) diff --git a/pyomo/contrib/mpc/data/find_nearest_index.py b/pyomo/contrib/mpc/data/find_nearest_index.py new file mode 100644 index 00000000000..0875bde63e9 --- /dev/null +++ b/pyomo/contrib/mpc/data/find_nearest_index.py @@ -0,0 +1,131 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import bisect + + +def find_nearest_index(array, target, tolerance=None): + # array needs to be sorted and we assume it is zero-indexed + lo = 0 + hi = len(array) + i = bisect.bisect_right(array, target, lo=lo, hi=hi) + # i is the index at which target should be inserted if it is to be + # right of any equal components. + + if i == lo: + # target is less than every entry of the set + nearest_index = i + delta = array[nearest_index] - target + elif i == hi: + # target is greater than or equal to every entry of the set + nearest_index = i - 1 + delta = target - array[nearest_index] + else: + # p_le <= target < p_g + # delta_left = target - p_le + # delta_right = p_g - target + # delta = min(delta_left, delta_right) + # Tie goes to the index on the left. + delta, nearest_index = min((abs(target - array[j]), j) for j in [i - 1, i]) + + if tolerance is not None: + if delta > tolerance: + return None + return nearest_index + + +def _distance_from_interval(point, interval, tolerance=None): + lo, hi = interval + if tolerance is None: + tolerance = 0.0 + if point < lo - tolerance: + return lo - point + elif lo - tolerance <= point and point <= hi + tolerance: + return 0.0 + elif point > hi + tolerance: + return point - hi + + +def find_nearest_interval_index( + interval_array, target, tolerance=None, prefer_left=True +): + # NOTE: This function quickly begins to behave badly if tolerance + # gets too large, e.g. greater than the width of the smallest + # interval. For this reason, intervals that represent a single + # point, e.g. (1.0, 1.0) should not be supported. + array_lo = 0 + array_hi = len(interval_array) + target_tuple = (target,) + i = bisect.bisect_right(interval_array, target_tuple, lo=array_lo, hi=array_hi) + distance_tol = 0.0 if tolerance is None else tolerance + if i == array_lo: + # We are at or to the left of the left endpoint of the + # first interval. + nearest_index = i + delta = _distance_from_interval( + target, interval_array[i], tolerance=distance_tol + ) + elif i == array_hi: + # We are within or to the right of the last interval. + nearest_index = i - 1 + delta = _distance_from_interval( + target, interval_array[i - 1], tolerance=distance_tol + ) + else: + # Find closest interval + if prefer_left: + # In the case of a tie, we return the left interval + # by default. + delta, nearest_index = min( + ( + _distance_from_interval( + target, interval_array[j], tolerance=distance_tol + ), + j, + ) + for j in [i - 1, i] + ) + else: + # If prefer_left=False, we return the right interval. + delta, neg_nearest_index = min( + ( + _distance_from_interval( + target, interval_array[j], tolerance=distance_tol + ), + -j, + ) + for j in [i - 1, i] + ) + nearest_index = -neg_nearest_index + + # If we have two adjacent intervals, e.g. [(0, 1), (1, 2)], and are just + # to the right of the boundary, we will not check the left interval as + # bisect places our tuple, e.g. (1.0+1e-10,), to the right of the right + # interval. + if prefer_left and nearest_index >= array_lo + 1: + delta_left = _distance_from_interval( + target, interval_array[nearest_index - 1], tolerance=distance_tol + ) + if delta_left <= delta: + nearest_index = nearest_index - 1 + delta = delta_left + elif not prefer_left and nearest_index < array_hi - 1: + delta_right = _distance_from_interval( + target, interval_array[nearest_index + 1], tolerance=distance_tol + ) + if delta_right <= delta: + nearest_index = nearest_index + 1 + delta = delta_right + + if tolerance is not None: + if delta > tolerance: + return None + return nearest_index diff --git a/pyomo/contrib/mpc/data/get_cuid.py b/pyomo/contrib/mpc/data/get_cuid.py new file mode 100644 index 00000000000..1f229b35645 --- /dev/null +++ b/pyomo/contrib/mpc/data/get_cuid.py @@ -0,0 +1,94 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.base.componentuid import ComponentUID +from pyomo.util.slices import slice_component_along_sets +from pyomo.core.base.indexed_component_slice import IndexedComponent_slice +from pyomo.dae.flatten import get_slice_for_set + + +def get_indexed_cuid(var, sets=None, dereference=None, context=None): + """ + Attempts to convert the provided "var" object into a CUID with + with wildcards. + + Arguments + --------- + var: + Object to process + sets: Tuple of sets + Sets to use if slicing a vardata object + dereference: None or int + Number of times we may access referent attribute to recover a + "base component" from a reference. + context: Block + Block with respect to which slices and CUIDs will be generated + + """ + # TODO: Does this function have a good name? + # Should this function be generalized beyond a single indexing set? + if isinstance(var, ComponentUID): + return var + elif isinstance(var, (str, IndexedComponent_slice)): + # TODO: Raise error if string and context is None + return ComponentUID(var, context=context) + # At this point we are assuming var is a Pyomo Var or VarData object. + + # Is allowing dereference to be an integer worth the confusion it might + # add? + if dereference is None: + # Does this branch make sense? If given an unattached component, + # we dereference, otherwise we don't dereference. + remaining_dereferences = int(var.parent_block() is None) + else: + remaining_dereferences = int(dereference) + if var.is_indexed(): + if var.is_reference() and remaining_dereferences: + remaining_dereferences -= 1 + referent = var.referent + if isinstance(referent, IndexedComponent_slice): + return ComponentUID(referent, context=context) + else: + # If dereference is None, we propagate None, dereferencing + # until we either reach a component attached to a block + # or reach a non-reference component. + dereference = ( + dereference if dereference is None else remaining_dereferences + ) + # NOTE: Calling this function recursively + return get_indexed_cuid(referent, sets, dereference=dereference) + else: + # Assume that var is indexed only by time + # TODO: Should we call slice_component_along_sets here as well? + # To cover the case of b[t0].var, where var is indexed + # by a set we care about, and we also care about time... + # But then maybe we should slice only the sets we care about... + # Don't want to do anything with these sets unless we're + # presented with a vardata... + # + # Should we call flatten.slice_component_along_sets? Then we + # might need to return/yield multiple components here... + # I like making this a "simple" function. The caller can call + # slice_component_along_set on their input data if they expect + # to have components indexed by multiple sets. + # + # TODO: Assert that we're only indexed by the specified set(s)? + # (If these sets are provided, of course...) + index = tuple(get_slice_for_set(s) for s in var.index_set().subsets()) + return ComponentUID(var[index], context=context) + else: + if sets is None: + raise ValueError( + "A ComponentData %s was provided but no set. We need to know\n" + "what set this component should be indexed by." % var.name + ) + slice_ = slice_component_along_sets(var, sets) + return ComponentUID(slice_, context=context) diff --git a/pyomo/contrib/mpc/data/interval_data.py b/pyomo/contrib/mpc/data/interval_data.py new file mode 100644 index 00000000000..cdd3b0e37dc --- /dev/null +++ b/pyomo/contrib/mpc/data/interval_data.py @@ -0,0 +1,203 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# + +from collections import namedtuple +from pyomo.core.expr.numvalue import value as pyo_value +from pyomo.contrib.mpc.data.get_cuid import get_indexed_cuid +from pyomo.contrib.mpc.data.dynamic_data_base import _is_iterable, _DynamicDataBase +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.find_nearest_index import ( + find_nearest_index, + find_nearest_interval_index, +) + + +IntervalDataTuple = namedtuple("IntervalTuple", ["data", "intervals"]) + + +def assert_disjoint_intervals(intervals): + """ + This function takes intervals in the form of tuples and makes sure + that they are disjoint. + + Arguments + --------- + intervals: iterable + Iterable of tuples, each containing the low and high values of an + interval. + + """ + intervals = list(sorted(intervals)) + for i, (lo, hi) in enumerate(intervals): + if not lo <= hi: + raise RuntimeError( + "Lower endpoint of interval is higher than upper endpoint" + ) + if i != 0: + prev_lo, prev_hi = intervals[i - 1] + if not prev_hi <= lo: + raise RuntimeError( + "Intervals %s and %s are not disjoint" + % ((prev_lo, prev_hi), (lo, hi)) + ) + + +class IntervalData(_DynamicDataBase): + def __init__(self, data, intervals, time_set=None, context=None): + intervals = list(intervals) + if not intervals == list(sorted(intervals)): + raise RuntimeError("Intervals are not sorted in increasing order.") + assert_disjoint_intervals(intervals) + self._intervals = intervals + + # First make sure provided lists of variable data have the + # same lengths as the provided time list. + for key, data_list in data.items(): + if len(data_list) != len(intervals): + raise ValueError( + "Data lists must have same length as time. " + "Length of time is %s while length of data for " + "key %s is %s." % (len(intervals), key, len(data_list)) + ) + super().__init__(data, time_set=time_set, context=context) + + def __eq__(self, other): + if isinstance(other, IntervalData): + return ( + self._data == other.get_data() + and self._intervals == other.get_intervals() + ) + else: + raise TypeError( + "%s and %s are not comparable" % (self.__class__, other.__class__) + ) + + def get_intervals(self): + return self._intervals + + def get_data_at_interval_indices(self, indices): + # NOTE: Much of this code is repeated from TimeSeriesData. + # TODO: Find some way to consolidate. + if _is_iterable(indices): + index_list = list(sorted(indices)) + interval_list = [self._intervals[i] for i in indices] + data = { + cuid: [values[idx] for idx in index_list] + for cuid, values in self._data.items() + } + time_set = self._orig_time_set + return IntervalData(data, interval_list, time_set=time_set) + else: + return ScalarData( + {cuid: values[indices] for cuid, values in self._data.items()} + ) + + # TODO: get_data_at_interval, get_data_at_time + def get_data_at_time(self, time, tolerance=None, prefer_left=True): + if not _is_iterable(time): + index = find_nearest_interval_index( + self._intervals, time, tolerance=tolerance, prefer_left=prefer_left + ) + if index is None: + raise RuntimeError( + "Time point %s not found in an interval within" + " tolerance %s" % (time, tolerance) + ) + else: + raise RuntimeError( + "get_data_at_time is not supported with multiple time points" + " for IntervalData. To sample the piecewise-constant data at" + " particular time points, please use interval_to_series from" + " pyomo.contrib.mpc.data.convert" + ) + return self.get_data_at_interval_indices(index) + + def to_serializable(self): + """ + Convert to json-serializable object. + + """ + intervals = self._intervals + data = { + str(cuid): [pyo_value(val) for val in values] + for cuid, values in self._data.items() + } + return IntervalDataTuple(data, intervals) + + def concatenate(self, other, tolerance=0.0): + """ + Extend interval list and variable data lists with the intervals + and variable values in the provided IntervalData + + """ + other_intervals = other.get_intervals() + intervals = self._intervals + if len(other_intervals) == 0: + return + if other_intervals[0][0] < intervals[-1][1] + tolerance: + # First point of new intervals is less than (within + # tolerance) + raise ValueError( + "Initial time point of target, %s, is not greater than" + " final time point of source, %s, within tolerance %s." + % (other_time[0][0], time[-1][1], tolerance) + ) + self._intervals.extend(other_intervals) + + data = self._data + other_data = other.get_data() + for cuid, values in data.items(): + # We assume that other contains all the cuids in self. + # We make no assumption the other way around. + values.extend(other_data[cuid]) + + def shift_time_points(self, offset): + """ + Apply an offset to stored time points. + + """ + # Note that this is different from what we are doing in + # shift_values_by_time in the helper class. + self._intervals = [(lo + offset, hi + offset) for lo, hi in self._intervals] + + def extract_variables(self, variables, context=None, copy_values=False): + """ + Only keep variables specified. + + """ + if copy_values: + raise NotImplementedError( + "extract_variables with copy_values=True has not been" + " implemented by %s" % self.__class__ + ) + data = {} + if not isinstance(variables, (list, tuple)): + # If variables is not a sequence and is instead a slice (or + # indexed variable), we get either a confusing error message + # or a lot of repeated work. + raise TypeError("extract_values only accepts a list or tuple of variables") + for var in variables: + cuid = get_indexed_cuid(var, (self._orig_time_set,), context=context) + data[cuid] = self._data[cuid] + return IntervalData(data, self._intervals, time_set=self._orig_time_set) diff --git a/pyomo/contrib/mpc/data/scalar_data.py b/pyomo/contrib/mpc/data/scalar_data.py new file mode 100644 index 00000000000..5426921ef06 --- /dev/null +++ b/pyomo/contrib/mpc/data/scalar_data.py @@ -0,0 +1,44 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.expr.numvalue import value as pyo_value +from pyomo.contrib.mpc.data.dynamic_data_base import _is_iterable, _DynamicDataBase +from pyomo.contrib.mpc.data.get_cuid import get_indexed_cuid + + +class ScalarData(_DynamicDataBase): + """ + An object to store scalar data associated with time-indexed + variables. + """ + + def __init__(self, data, time_set=None, context=None): + """ + Arguments: + ---------- + data: dict or ComponentMap + Maps variables, names, or CUIDs to lists of values + + """ + for key, val in data.items(): + if _is_iterable(val): + raise TypeError( + "Value %s corresponding to key %s is not a scalar" % (val, key) + ) + super().__init__(data, time_set=time_set, context=context) + + def to_serializable(self): + """ + Convert to json-serializable object. + + """ + data = {str(cuid): pyo_value(val) for cuid, val in self._data.items()} + return data diff --git a/pyomo/contrib/mpc/data/series_data.py b/pyomo/contrib/mpc/data/series_data.py new file mode 100644 index 00000000000..d09ab8cae24 --- /dev/null +++ b/pyomo/contrib/mpc/data/series_data.py @@ -0,0 +1,218 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from collections import namedtuple +from pyomo.core.expr.numvalue import value as pyo_value +from pyomo.contrib.mpc.data.find_nearest_index import find_nearest_index +from pyomo.contrib.mpc.data.get_cuid import get_indexed_cuid +from pyomo.contrib.mpc.data.dynamic_data_base import _is_iterable, _DynamicDataBase +from pyomo.contrib.mpc.data.scalar_data import ScalarData + + +TimeSeriesTuple = namedtuple("TimeSeriesTuple", ["data", "time"]) + + +class TimeSeriesData(_DynamicDataBase): + """ + An object to store time series data associated with time-indexed + variables. + + """ + + def __init__(self, data, time, time_set=None, context=None): + """ + Arguments: + ---------- + data: dict or ComponentMap + Maps variables, names, or CUIDs to lists of values + time: list + Contains the time points corresponding to variable data points. + + """ + _time = list(time) + if _time != list(sorted(time)): + raise ValueError("Time points are not sorted in increasing order") + self._time = _time + + # When looking up a value at a particular time point, we will use + # this map to try and find the index of the time point. If this lookup + # fails, we will use binary search-within-tolerance to attempt to find + # a point that is close enough. + # + # WARNING: If the list of time points is updated, e.g. via + # shift_time_points or concatenate, then this map needs to be + # updated as well. + self._time_idx_map = {t: idx for idx, t in enumerate(time)} + + # First make sure provided lists of variable data have the + # same lengths as the provided time list. + for key, data_list in data.items(): + if len(data_list) != len(time): + raise ValueError( + "Data lists must have same length as time. " + "Length of time is %s while length of data for " + "key %s is %s." % (len(time), key, len(data_list)) + ) + super().__init__(data, time_set=time_set, context=context) + + def __eq__(self, other): + if isinstance(other, TimeSeriesData): + return self._data == other._data and self._time == other._time + else: + # Should this return False or raise TypeError? + raise TypeError( + "%s and %s are not comparable" % (self.__class__, other.__class__) + ) + + def get_time_points(self): + """ + Get time points of the time series data + + """ + return self._time + + def get_data_at_time_indices(self, indices): + """ + Returns data at the specified index or indices of this object's list + of time points. + + """ + if _is_iterable(indices): + # Raise error if indices not sorted? + index_list = list(sorted(indices)) + time_list = [self._time[i] for i in indices] + data = { + cuid: [values[idx] for idx in index_list] + for cuid, values in self._data.items() + } + time_set = self._orig_time_set + return TimeSeriesData(data, time_list, time_set=time_set) + else: + # indices is a scalar + return ScalarData( + {cuid: values[indices] for cuid, values in self._data.items()} + ) + + def get_data_at_time(self, time=None, tolerance=0.0): + """ + Returns the data associated with the provided time point or points. + This function attempts to map time points to indices, then uses + get_data_at_time_indices to actually extract the data. If a provided + time point does not exist in the time-index map, binary search is + used to find the closest value within a tolerance. + + Parameters + ---------- + time: Float or iterable + The time point or points corresponding to returned data. + tolerance: Float + Tolerance within which we will search for a matching time point. + The default is 0.0, meaning time points must be specified exactly. + + Returns + ------- + TimeSeriesData or ScalarData + TimeSeriesData containing only the specified time points + or dict mapping CUIDs to values at the specified scalar time + point. + + """ + if time is None: + # If time is not specified, assume we want the entire time + # set. Skip all the overhead, don't create a new object, and + # return self. + return self + is_iterable = _is_iterable(time) + time_iter = iter(time) if is_iterable else (time,) + indices = [] + # Allocate indices list dynamically to support a general iterator + # for time. Not sure if this will ever matter... + for t in time_iter: + if t in self._time_idx_map: + idx = self._time_idx_map[t] + else: + idx = find_nearest_index(self._time, t, tolerance=tolerance) + if idx is None: + raise RuntimeError( + "Time point %s is invalid within tolerance %s" % (t, tolerance) + ) + indices.append(idx) + if not is_iterable: + indices = indices[0] + return self.get_data_at_time_indices(indices) + + def to_serializable(self): + """ + Convert to json-serializable object. + + """ + time = self._time + data = { + str(cuid): [pyo_value(val) for val in values] + for cuid, values in self._data.items() + } + return TimeSeriesTuple(data, time) + + def concatenate(self, other, tolerance=0.0): + """ + Extend time list and variable data lists with the time points + and variable values in the provided TimeSeriesData. + The new time points must be strictly greater than the old time + points. + + """ + other_time = other.get_time_points() + time = self._time + if other_time[0] < time[-1] + tolerance: + raise ValueError( + "Initial time point of target, %s, is not greater than" + " final time point of source, %s, within tolerance %s." + % (other_time[0], time[-1], tolerance) + ) + self._time.extend(other.get_time_points()) + + # Update _time_idx_map as we have altered the list of time points. + n_time = len(time) + for i, t in enumerate(other_time): + self._time_idx_map[t] = n_time + i + + data = self._data + other_data = other.get_data() + for cuid, values in data.items(): + # We assume that other contains all the cuids in self. + # We make no assumption the other way around. + values.extend(other_data[cuid]) + + def shift_time_points(self, offset): + """ + Apply an offset to stored time points. + + """ + # Note that this is different from what we are doing in + # shift_values_by_time in the helper class. + self._time = [t + offset for t in self._time] + self._time_idx_map = {t: idx for idx, t in enumerate(self._time)} + + def extract_variables(self, variables, context=None, copy_values=False): + """ + Only keep variables specified. + + """ + if copy_values: + raise NotImplementedError( + "extract_variables with copy_values=True has not been" + " implemented by %s" % self.__class__ + ) + data = {} + for var in variables: + cuid = get_indexed_cuid(var, (self._orig_time_set,), context=context) + data[cuid] = self._data[cuid] + return TimeSeriesData(data, self._time, time_set=self._orig_time_set) diff --git a/pyomo/contrib/mpc/data/tests/__init__.py b/pyomo/contrib/mpc/data/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/mpc/data/tests/test_convert.py b/pyomo/contrib/mpc/data/tests/test_convert.py new file mode 100644 index 00000000000..0f8a4623e20 --- /dev/null +++ b/pyomo/contrib/mpc/data/tests/test_convert.py @@ -0,0 +1,188 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +import pytest +import random + +import pyomo.environ as pyo +import pyomo.dae as dae +from pyomo.common.collections import ComponentMap +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.series_data import TimeSeriesData +from pyomo.contrib.mpc.data.interval_data import IntervalData +from pyomo.contrib.mpc.data.convert import ( + _process_to_dynamic_data, + interval_to_series, + series_to_interval, +) + + +def _make_model(): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[0.1 * i for i in range(11)]) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var(m.time, m.comp, initialize=1.0) + return m + + +class TestIntervalToSeries(unittest.TestCase): + def test_no_time_points(self): + m = _make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.7, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + + series_data = interval_to_series(interval_data) + # Default uses right endpoint of each interval + pred_time_points = [0.2, 0.5, 1.0] + pred_data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + self.assertEqual(series_data, TimeSeriesData(pred_data, pred_time_points)) + + def test_no_time_points_left_endpoints(self): + m = _make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.7, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + + series_data = interval_to_series(interval_data, use_left_endpoints=True) + pred_time_points = [0.0, 0.2, 0.7] + pred_data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + self.assertEqual(series_data, TimeSeriesData(pred_data, pred_time_points)) + + def test_time_points_provided_no_boundary(self): + m = _make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + + # Choose some time points that don't lie on interval boundaries + time_points = [0.05 + i * 0.1 for i in range(10)] + series_data = interval_to_series(interval_data, time_points=time_points) + pred_data = { + m.var[:, "A"]: [1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0], + m.var[:, "B"]: [4.0, 4.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0], + } + self.assertEqual(series_data, TimeSeriesData(pred_data, time_points)) + + def test_time_points_provided_some_on_boundary(self): + m = _make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + + time_points = [0.1 * i for i in range(11)] + series_data = interval_to_series(interval_data, time_points=time_points) + # Some of the time points are on interval boundaries. By default we + # use the values from the intervals on the left. + pred_data = { + m.var[:, "A"]: [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0], + m.var[:, "B"]: [4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0], + } + self.assertEqual(series_data, TimeSeriesData(pred_data, time_points)) + + def test_time_points_provided_some_on_boundary_use_right(self): + m = _make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + + time_points = [0.1 * i for i in range(11)] + series_data = interval_to_series( + interval_data, time_points=time_points, prefer_left=False + ) + # Some of the time points are on interval boundaries. By default we + # use the values from the intervals on the left. + pred_data = { + m.var[:, "A"]: [1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], + m.var[:, "B"]: [4.0, 4.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0, 6.0], + } + self.assertEqual(series_data, TimeSeriesData(pred_data, time_points)) + + def test_with_roundoff_error(self): + m = _make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + + # Simulate roundoff error in these time points. + random.seed(12710) + time_points = [i * 0.1 + random.uniform(-1e-8, 1e-8) for i in range(11)] + series_data = interval_to_series( + interval_data, time_points=time_points, tolerance=1e-7 + ) + pred_data = { + m.var[:, "A"]: [1.0, 1.0, 1.0, 2.0, 2.0, 2.0, 3.0, 3.0, 3.0, 3.0, 3.0], + m.var[:, "B"]: [4.0, 4.0, 4.0, 5.0, 5.0, 5.0, 6.0, 6.0, 6.0, 6.0, 6.0], + } + self.assertEqual(series_data, TimeSeriesData(pred_data, time_points)) + + +class TestSeriesToInterval(unittest.TestCase): + def test_singleton(self): + m = _make_model() + time_points = [0.1] + data = {m.var[:, "A"]: [0.5], m.var[:, "B"]: [2.0]} + series_data = TimeSeriesData(data, time_points) + interval_data = series_to_interval(series_data) + pred_data = IntervalData( + {m.var[:, "A"]: [0.5], m.var[:, "B"]: [2.0]}, [(0.1, 0.1)] + ) + self.assertEqual(interval_data, pred_data) + + def test_convert(self): + m = _make_model() + time_points = [0.1, 0.2, 0.3, 0.4, 0.5] + data = { + m.var[:, "A"]: [1.0, 2.0, 3.0, 4.0, 5.0], + m.var[:, "B"]: [6.0, 7.0, 8.0, 9.0, 10.0], + } + series_data = TimeSeriesData(data, time_points) + interval_data = series_to_interval(series_data) + + pred_data = IntervalData( + {m.var[:, "A"]: [2.0, 3.0, 4.0, 5.0], m.var[:, "B"]: [7.0, 8.0, 9.0, 10.0]}, + [(0.1, 0.2), (0.2, 0.3), (0.3, 0.4), (0.4, 0.5)], + ) + self.assertEqual(pred_data, interval_data) + + def test_convert_use_right(self): + m = _make_model() + time_points = [0.1, 0.2, 0.3, 0.4, 0.5] + data = { + m.var[:, "A"]: [1.0, 2.0, 3.0, 4.0, 5.0], + m.var[:, "B"]: [6.0, 7.0, 8.0, 9.0, 10.0], + } + series_data = TimeSeriesData(data, time_points) + interval_data = series_to_interval(series_data, use_left_endpoints=True) + + pred_data = IntervalData( + {m.var[:, "A"]: [1.0, 2.0, 3.0, 4.0], m.var[:, "B"]: [6.0, 7.0, 8.0, 9.0]}, + [(0.1, 0.2), (0.2, 0.3), (0.3, 0.4), (0.4, 0.5)], + ) + self.assertEqual(pred_data, interval_data) + + +class TestProcessToDynamic(unittest.TestCase): + def test_non_time_indexed_data(self): + m = _make_model() + m.scalar_var = pyo.Var(m.comp, initialize=3.0) + data = ComponentMap([(m.scalar_var["A"], 3.1), (m.scalar_var["B"], 3.2)]) + # Passing non-time-indexed data to ScalarData just returns + # a ScalarData object with the non-time-indexed CUIDs as keys. + dyn_data = _process_to_dynamic_data(data) + self.assertTrue(isinstance(dyn_data, ScalarData)) + self.assertIn(pyo.ComponentUID(m.scalar_var["A"]), dyn_data.get_data()) + self.assertIn(pyo.ComponentUID(m.scalar_var["B"]), dyn_data.get_data()) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/data/tests/test_find_nearest_index.py b/pyomo/contrib/mpc/data/tests/test_find_nearest_index.py new file mode 100644 index 00000000000..e90024ef108 --- /dev/null +++ b/pyomo/contrib/mpc/data/tests/test_find_nearest_index.py @@ -0,0 +1,254 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +import pytest + +from pyomo.contrib.mpc.data.find_nearest_index import ( + find_nearest_index, + find_nearest_interval_index, +) + + +class TestFindNearestIndex(unittest.TestCase): + def test_two_points(self): + array = [0, 5] + + i = find_nearest_index(array, 1) + self.assertEqual(i, 0) + i = find_nearest_index(array, 1, tolerance=0.5) + self.assertEqual(i, None) + + i = find_nearest_index(array, -0.01, tolerance=0.1) + self.assertEqual(i, 0) + i = find_nearest_index(array, -0.01, tolerance=0.001) + self.assertEqual(i, None) + + i = find_nearest_index(array, 6, tolerance=2) + self.assertEqual(i, 1) + i = find_nearest_index(array, 6, tolerance=1) + self.assertEqual(i, 1) + + # This test relies on the behavior for tiebreaks + i = find_nearest_index(array, 2.5) + self.assertEqual(i, 0) + + def test_array_with_floats(self): + array = [] + for i in range(5): + i0 = float(i) + i1 = round((i + 0.15) * 1e4) / 1e4 + i2 = round((i + 0.64) * 1e4) / 1e4 + array.extend([i, i1, i2]) + array.append(5.0) + + i = find_nearest_index(array, 1.01, tolerance=0.1) + self.assertEqual(i, 3) + i = find_nearest_index(array, 1.01, tolerance=0.001) + self.assertEqual(i, None) + + i = find_nearest_index(array, 3.5) + self.assertEqual(i, 11) + i = find_nearest_index(array, 3.5, tolerance=0.1) + self.assertEqual(i, None) + + i = find_nearest_index(array, -1) + self.assertEqual(i, 0) + i = find_nearest_index(array, -1, tolerance=1) + self.assertEqual(i, 0) + + i = find_nearest_index(array, 5.5) + self.assertEqual(i, 15) + i = find_nearest_index(array, 5.5, tolerance=0.49) + self.assertEqual(i, None) + + i = find_nearest_index(array, 2.64, tolerance=1e-8) + self.assertEqual(i, 8) + i = find_nearest_index(array, 2.64, tolerance=0) + self.assertEqual(i, 8) + + i = find_nearest_index(array, 5, tolerance=0) + self.assertEqual(i, 15) + + i = find_nearest_index(array, 0, tolerance=0) + self.assertEqual(i, 0) + + +class TestFindNearestIntervalIndex(unittest.TestCase): + def test_find_interval(self): + intervals = [(0.0, 0.1), (0.1, 0.5), (0.7, 1.0)] + target = 0.05 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 0) + + target = 0.099 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 0) + + target = 0.1 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 0) + + target = 0.1 + idx = find_nearest_interval_index(intervals, target, prefer_left=False) + self.assertEqual(idx, 1) + + target = 0.55 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 1) + + target = 0.60 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 1) + + target = 0.6999 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 2) + + target = 1.0 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 2) + + target = -0.1 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 0) + + target = 1.1 + idx = find_nearest_interval_index(intervals, target) + self.assertEqual(idx, 2) + + def test_find_interval_tolerance(self): + intervals = [(0.0, 0.1), (0.1, 0.5), (0.7, 1.0)] + + target = 0.501 + idx = find_nearest_interval_index(intervals, target, tolerance=None) + self.assertEqual(idx, 1) + + idx = find_nearest_interval_index(intervals, target, tolerance=1e-5) + self.assertEqual(idx, None) + + idx = find_nearest_interval_index(intervals, target, tolerance=1e-2) + self.assertEqual(idx, 1) + + target = 1.001 + idx = find_nearest_interval_index(intervals, target, tolerance=1e-2) + self.assertEqual(idx, 2) + + # + # Behavior when distance between target and nearest interval "equals" + # the tolerance is not well-defined. Here the computed distance may + # not be exactly 1e-3 due to roundoff error. + # + # idx = find_nearest_interval_index(intervals, target, tolerance=1e-3) + # self.assertEqual(idx, None) + + idx = find_nearest_interval_index(intervals, target, tolerance=1e-4) + self.assertEqual(idx, None) + + def test_find_interval_with_tolerance_on_boundary(self): + # Our target is on the boundary between two intervals. + intervals = [(0.0, 0.1), (0.1, 0.5), (0.5, 1.0)] + target = 0.1001 + idx = find_nearest_interval_index( + intervals, target, tolerance=None, prefer_left=True + ) + self.assertEqual(idx, 1) + + # target != 0.1 (the interval boundary) within tolerance. We are + # within interval 1. + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-5, prefer_left=True + ) + self.assertEqual(idx, 1) + # This is true even if we prefer the right interval + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-5, prefer_left=False + ) + self.assertEqual(idx, 1) + + # target == 0.1 within tolerance. We are on the boundary, and + # should return the "preferred" interval. + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=True + ) + self.assertEqual(idx, 0) + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=False + ) + self.assertEqual(idx, 1) + + target = 0.4999 + # We are not equal to boundary (0.5) within tolerance + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-5, prefer_left=True + ) + self.assertEqual(idx, 1) + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-5, prefer_left=False + ) + self.assertEqual(idx, 1) + + # We are equal to boundary within tolerance + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=True + ) + self.assertEqual(idx, 1) + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=False + ) + self.assertEqual(idx, 2) + + def test_find_interval_with_tolerance_singleton(self): + intervals = [(0.0, 0.1), (0.1, 0.1), (0.5, 0.5), (0.5, 1.0)] + + target = 0.1001 + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=True + ) + self.assertEqual(idx, 0) + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=False + ) + self.assertEqual(idx, 1) + + target = 0.0999 + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=True + ) + self.assertEqual(idx, 0) + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=False + ) + self.assertEqual(idx, 1) + + target = 0.4999 + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=True + ) + self.assertEqual(idx, 2) + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=False + ) + self.assertEqual(idx, 3) + + target = 0.5001 + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=True + ) + self.assertEqual(idx, 2) + idx = find_nearest_interval_index( + intervals, target, tolerance=1e-3, prefer_left=False + ) + self.assertEqual(idx, 3) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/data/tests/test_get_cuid.py b/pyomo/contrib/mpc/data/tests/test_get_cuid.py new file mode 100644 index 00000000000..30ba2b58b1b --- /dev/null +++ b/pyomo/contrib/mpc/data/tests/test_get_cuid.py @@ -0,0 +1,110 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +import pyomo.environ as pyo +from pyomo.contrib.mpc.data.get_cuid import get_indexed_cuid + + +class TestGetCUID(unittest.TestCase): + def _make_model(self): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[0.0, 0.1, 0.2]) + m.space = pyo.Set(initialize=[1.0, 1.5, 2.0]) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var(m.time, m.comp, initialize=1.0) + m.txc_var = pyo.Var(m.time, m.space, m.comp, initialize=2.0) + + @m.Block(m.time, m.space) + def b(b, t, x): + b.bvar1 = pyo.Var(initialize=3.0) + b.bvar2 = pyo.Var(m.comp, initialize=3.0) + + return m + + def test_get_cuid(self): + m = self._make_model() + + pred_cuid = pyo.ComponentUID(m.var[:, "A"]) + self.assertEqual(get_indexed_cuid(m.var[:, "A"]), pred_cuid) + self.assertEqual(get_indexed_cuid(pyo.Reference(m.var[:, "A"])), pred_cuid) + self.assertEqual(get_indexed_cuid("var[*,A]"), pred_cuid) + self.assertEqual(get_indexed_cuid("var[*,'A']"), pred_cuid) + self.assertEqual(get_indexed_cuid(m.var[0, "A"], sets=(m.time,)), pred_cuid) + + def test_get_cuid_twosets(self): + m = self._make_model() + + pred_cuid = pyo.ComponentUID(m.b[:, :].bvar2["A"]) + self.assertEqual(get_indexed_cuid(m.b[:, :].bvar2["A"]), pred_cuid) + self.assertEqual( + get_indexed_cuid(pyo.Reference(m.b[:, :].bvar2["A"])), pred_cuid + ) + self.assertEqual(get_indexed_cuid("b[*,*].bvar2[A]"), pred_cuid) + self.assertEqual( + get_indexed_cuid(m.b[0, 1].bvar2["A"], sets=(m.time, m.space)), pred_cuid + ) + + def test_get_cuid_dereference(self): + m = self._make_model() + m.ref = pyo.Reference(m.var[:, "A"]) + m.ref2 = pyo.Reference(m.ref) + + pred_cuid = pyo.ComponentUID(m.var[:, "A"]) + + # ref is attached to the model, so by default we don't "dereference" + self.assertNotEqual(get_indexed_cuid(m.ref), pred_cuid) + self.assertEqual(get_indexed_cuid(m.ref), pyo.ComponentUID(m.ref[:])) + + # If we use dereference=True, we do dereference and get the CUID of + # the underlying slice. + self.assertEqual(get_indexed_cuid(m.ref, dereference=True), pred_cuid) + + # However, we only dereference once, so a reference-to-reference + # does not reveal the underlying slice (of the original reference) + self.assertNotEqual(get_indexed_cuid(m.ref2, dereference=True), pred_cuid) + self.assertEqual( + get_indexed_cuid(m.ref2, dereference=True), pyo.ComponentUID(m.ref[:]) + ) + + # But if we use dereference=2, we allow two dereferences, and get + # the original slice + self.assertEqual(get_indexed_cuid(m.ref2, dereference=2), pred_cuid) + + def test_get_cuid_context(self): + m = self._make_model() + top = pyo.ConcreteModel() + top.m = m + + pred_cuid = pyo.ComponentUID(m.var[:, "A"], context=m) + self.assertEqual(get_indexed_cuid(m.var[:, "A"], context=m), pred_cuid) + self.assertEqual( + get_indexed_cuid(pyo.Reference(m.var[:, "A"]), context=m), pred_cuid + ) + + # This is what we would expect without a context arg + full_cuid = pyo.ComponentUID(m.var[:, "A"]) + self.assertNotEqual(get_indexed_cuid("m.var[*,A]"), pred_cuid) + self.assertEqual(get_indexed_cuid("m.var[*,A]"), full_cuid) + + msg = "Context is not allowed" + with self.assertRaisesRegex(ValueError, msg): + # Passing context with a string raises a reasonable + # error from the CUID constructor + get_indexed_cuid("m.var[*,A]", context=m) + + self.assertEqual( + get_indexed_cuid(m.var[0, "A"], sets=(m.time,), context=m), pred_cuid + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/data/tests/test_interval_data.py b/pyomo/contrib/mpc/data/tests/test_interval_data.py new file mode 100644 index 00000000000..8afe3eb3021 --- /dev/null +++ b/pyomo/contrib/mpc/data/tests/test_interval_data.py @@ -0,0 +1,354 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# + +import pyomo.common.unittest as unittest +import pytest + +import pyomo.environ as pyo +import pyomo.dae as dae +import pyomo.contrib.mpc as mpc +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.interval_data import assert_disjoint_intervals, IntervalData + + +class TestIntervalData(unittest.TestCase): + def _make_model(self): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[0.1 * i for i in range(11)]) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var(m.time, m.comp, initialize=1.0) + return m + + def test_construct(self): + m = self._make_model() + intervals = [(0.0, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0], m.var[:, "B"]: [3.0, 4.0]} + interval_data = IntervalData(data, intervals) + + self.assertEqual( + interval_data.get_data(), + {pyo.ComponentUID(key): val for key, val in data.items()}, + ) + self.assertEqual(intervals, interval_data.get_intervals()) + + def test_eq(self): + m = self._make_model() + intervals = [(0.0, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0], m.var[:, "B"]: [3.0, 4.0]} + interval_data_1 = IntervalData(data, intervals) + + data = {m.var[:, "A"]: [1.0, 2.0], m.var[:, "B"]: [3.0, 4.0]} + interval_data_2 = IntervalData(data, intervals) + + self.assertEqual(interval_data_1, interval_data_2) + + data = {m.var[:, "A"]: [1.0, 3.0], m.var[:, "B"]: [3.0, 4.0]} + interval_data_3 = IntervalData(data, intervals) + + self.assertNotEqual(interval_data_1, interval_data_3) + + def test_get_data_at_indices_multiple(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + data = interval_data.get_data_at_interval_indices([0, 2]) + + pred_data = IntervalData( + {m.var[:, "A"]: [1.0, 3.0], m.var[:, "B"]: [4.0, 6.0]}, + [(0.0, 0.2), (0.5, 1.0)], + ) + self.assertEqual(pred_data, data) + + def test_get_data_at_indices_singleton(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + data = interval_data.get_data_at_interval_indices(1) + pred_data = ScalarData({m.var[:, "A"]: 2.0, m.var[:, "B"]: 5.0}) + self.assertEqual(data, pred_data) + + def test_get_data_at_time_scalar(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + + data = interval_data.get_data_at_time(0.1) + pred_data = ScalarData({m.var[:, "A"]: 1.0, m.var[:, "B"]: 4.0}) + self.assertEqual(data, pred_data) + + # Default is to allow time points outside of intervals + # (finds the nearest interval) + data = interval_data.get_data_at_time(1.1) + pred_data = ScalarData({m.var[:, "A"]: 3.0, m.var[:, "B"]: 6.0}) + self.assertEqual(data, pred_data) + + msg = "Time point.*not found" + with self.assertRaisesRegex(RuntimeError, msg): + data = interval_data.get_data_at_time(1.1, tolerance=1e-3) + + # If a point on an interval boundary is supplied, default is to + # use value on left. + data = interval_data.get_data_at_time(0.5) + pred_data = ScalarData({m.var[:, "A"]: 2.0, m.var[:, "B"]: 5.0}) + self.assertEqual(data, pred_data) + + data = interval_data.get_data_at_time(0.5, prefer_left=False) + pred_data = ScalarData({m.var[:, "A"]: 3.0, m.var[:, "B"]: 6.0}) + self.assertEqual(data, pred_data) + + def test_to_serializable(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + json_data = interval_data.to_serializable() + self.assertEqual( + json_data, + ( + {"var[*,A]": [1.0, 2.0, 3.0], "var[*,B]": [4.0, 5.0, 6.0]}, + [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)], + ), + ) + + def test_concatenate(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data_1 = IntervalData(data, intervals) + + intervals = [(1.0, 1.5), (2.0, 3.0)] + data = {m.var[:, "A"]: [7.0, 8.0], m.var[:, "B"]: [9.0, 10.0]} + interval_data_2 = IntervalData(data, intervals) + + interval_data_1.concatenate(interval_data_2) + + new_intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0), (1.0, 1.5), (2.0, 3.0)] + new_values = { + m.var[:, "A"]: [1.0, 2.0, 3.0, 7.0, 8.0], + m.var[:, "B"]: [4.0, 5.0, 6.0, 9.0, 10.0], + } + new_data = IntervalData(new_values, new_intervals) + self.assertEqual(interval_data_1, new_data) + + def test_shift_time_points(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals) + interval_data.shift_time_points(1.0) + + intervals = [(1.0, 1.2), (1.2, 1.5), (1.5, 2.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + new_interval_data = IntervalData(data, intervals) + self.assertEqual(interval_data, new_interval_data) + + def test_extract_variables(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals, time_set=m.time) + new_data = interval_data.extract_variables([m.var[:, "B"]]) + value_dict = {m.var[:, "B"]: [4.0, 5.0, 6.0]} + pred_data = IntervalData(value_dict, intervals) + self.assertEqual(new_data, pred_data) + + def test_extract_variables_exception(self): + m = self._make_model() + intervals = [(0.0, 0.2), (0.2, 0.5), (0.5, 1.0)] + data = {m.var[:, "A"]: [1.0, 2.0, 3.0], m.var[:, "B"]: [4.0, 5.0, 6.0]} + interval_data = IntervalData(data, intervals, time_set=m.time) + msg = "only accepts a list or tuple" + with self.assertRaisesRegex(TypeError, msg): + new_data = interval_data.extract_variables(m.var[:, "B"]) + + +class TestAssertDisjoint(unittest.TestCase): + def test_disjoint(self): + intervals = [(0, 1), (1, 2)] + assert_disjoint_intervals(intervals) + + intervals = [(2, 3), (0, 1)] + assert_disjoint_intervals(intervals) + + intervals = [(0, 1), (1, 1)] + assert_disjoint_intervals(intervals) + + def test_backwards_endpoints(self): + intervals = [(0, 1), (3, 2)] + msg = "Lower endpoint of interval is higher" + with self.assertRaisesRegex(RuntimeError, msg): + assert_disjoint_intervals(intervals) + + def test_not_disjoint(self): + intervals = [(0, 2), (1, 3)] + msg = "are not disjoint" + with self.assertRaisesRegex(RuntimeError, msg): + assert_disjoint_intervals(intervals) + + +class TestLoadInputs(unittest.TestCase): + def make_model(self): + m = pyo.ConcreteModel() + m.time = dae.ContinuousSet(initialize=[0, 1, 2, 3, 4, 5, 6]) + m.v = pyo.Var(m.time, initialize=0) + return m + + def test_load_inputs_some_time(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = mpc.IntervalData({"v": [1.0]}, [(2, 4)]) + interface.load_data(inputs) + + for t in m.time: + # Note that by default, the left endpoint is not loaded. + if t == 3 or t == 4: + self.assertEqual(m.v[t].value, 1.0) + else: + self.assertEqual(m.v[t].value, 0.0) + + def test_load_inputs_some_time_include_endpoints(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = mpc.IntervalData({"v": [1.0]}, [(2, 4)]) + + # Default is to exclude right and include left + interface.load_data(inputs, exclude_left_endpoint=False) + + for t in m.time: + if t == 2 or t == 3 or t == 4: + self.assertEqual(m.v[t].value, 1.0) + else: + self.assertEqual(m.v[t].value, 0.0) + + def test_load_inputs_some_time_exclude_endpoints(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = mpc.IntervalData({"v": [1.0]}, [(2, 4)]) + + # Default is to exclude right and include left + interface.load_data(inputs, exclude_right_endpoint=True) + + for t in m.time: + if t == 3: + self.assertEqual(m.v[t].value, 1.0) + else: + self.assertEqual(m.v[t].value, 0.0) + + def test_load_inputs_all_time_default(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = mpc.IntervalData({"v": [1.0, 2.0]}, [(0, 3), (3, 6)]) + interface.load_data(inputs) + for t in m.time: + if t == 0: + self.assertEqual(m.v[t].value, 0.0) + elif t <= 3: + self.assertEqual(m.v[t].value, 1.0) + else: + self.assertEqual(m.v[t].value, 2.0) + + def test_load_inputs_all_time_prefer_right(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = mpc.IntervalData({"v": [1.0, 2.0]}, [(0, 3), (3, 6)]) + interface.load_data(inputs, prefer_left=False) + for t in m.time: + if t < 3: + self.assertEqual(m.v[t].value, 1.0) + elif t == 6: + # By default, preferring intervals to the right of time + # points will exclude the right endpoints of intervals. + self.assertEqual(m.v[t].value, 0.0) + else: + self.assertEqual(m.v[t].value, 2.0) + + def test_load_inputs_all_time_prefer_right_dont_exclude(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = mpc.IntervalData({"v": [1.0, 2.0]}, [(0, 3), (3, 6)]) + interface.load_data(inputs, prefer_left=False, exclude_right_endpoint=False) + # Note that all time points have been set. + for t in m.time: + if t < 3: + self.assertEqual(m.v[t].value, 1.0) + else: + self.assertEqual(m.v[t].value, 2.0) + + def load_inputs_invalid_time(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = mpc.IntervalData({"v": [1.0, 2.0, 3.0]}, [(0, 3), (3, 6), (6, 9)]) + interface.load_data(inputs) + for t in m.time: + if t == 0: + self.assertEqual(m.v[t].value, 0.0) + elif t <= 3: + self.assertEqual(m.v[t].value, 1.0) + else: + self.assertEqual(m.v[t].value, 2.0) + + def load_inputs_exception(self): + m = self.make_model() + interface = mpc.DynamicModelInterface(m, m.time) + inputs = {"_v": {(0, 3): 1.0, (3, 6): 2.0, (6, 9): 3.0}} + inputs = mpc.IntervalData({"_v": [1.0, 2.0, 3.0]}, [(0, 3), (3, 6), (6, 9)]) + with self.assertRaisesRegex(RuntimeError, "Cannot find"): + interface.load_data(inputs) + + +class TestIntervalFromTimeSeries(unittest.TestCase): + def test_singleton(self): + name = "name" + series = mpc.TimeSeriesData({name: [2.0]}, [1.0]) + interval = mpc.data.convert.series_to_interval(series) + self.assertEqual(interval, IntervalData({name: [2.0]}, [(1.0, 1.0)])) + + def test_empty(self): + name = "name" + series = mpc.TimeSeriesData({name: []}, []) + interval = mpc.data.convert.series_to_interval(series) + self.assertEqual(interval, mpc.IntervalData({name: []}, [])) + + def test_interval_from_series(self): + name = "name" + series = mpc.TimeSeriesData({name: [4.0, 5.0, 6.0]}, [1, 2, 3]) + interval = mpc.data.convert.series_to_interval(series) + self.assertEqual( + interval, mpc.IntervalData({name: [5.0, 6.0]}, [(1, 2), (2, 3)]) + ) + + def test_use_left_endpoint(self): + name = "name" + series = mpc.TimeSeriesData({name: [4.0, 5.0, 6.0]}, [1, 2, 3]) + interval = mpc.data.convert.series_to_interval(series, use_left_endpoints=True) + self.assertEqual( + interval, mpc.IntervalData({name: [4.0, 5.0]}, [(1, 2), (2, 3)]) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/data/tests/test_scalar_data.py b/pyomo/contrib/mpc/data/tests/test_scalar_data.py new file mode 100644 index 00000000000..110ed749bda --- /dev/null +++ b/pyomo/contrib/mpc/data/tests/test_scalar_data.py @@ -0,0 +1,119 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest + +import pyomo.environ as pyo +from pyomo.contrib.mpc.data.scalar_data import ScalarData + + +class TestScalarData(unittest.TestCase): + def _make_model(self): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[0, 1, 2]) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var(m.time, m.comp, initialize=1.0) + return m + + def test_construct_and_get_data(self): + m = self._make_model() + data = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}) + data_dict = data.get_data() + pred_data_dict = { + pyo.ComponentUID("var[*,A]"): 0.5, + pyo.ComponentUID("var[*,B]"): 2.0, + } + self.assertEqual(data_dict, pred_data_dict) + + def test_construct_exception(self): + m = self._make_model() + msg = "Value.*not a scalar" + with self.assertRaisesRegex(TypeError, msg): + data = ScalarData({m.var[:, "A"]: [1, 2]}) + + def test_eq(self): + m = self._make_model() + data1 = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}) + data2 = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}) + data3 = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 3.0}) + self.assertEqual(data1, data2) + self.assertNotEqual(data1, data3) + data_dict = data2.get_data() + msg = "not comparable" + with self.assertRaisesRegex(TypeError, msg): + data1 == data_dict + + def test_get_data_from_key(self): + m = self._make_model() + data = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}, time_set=m.time) + val = data.get_data_from_key(m.var[:, "A"]) + self.assertEqual(val, 0.5) + val = data.get_data_from_key(pyo.Reference(m.var[:, "A"])) + self.assertEqual(val, 0.5) + + val = data.get_data_from_key(m.var[0, "A"]) + self.assertEqual(val, 0.5) + + val = data.get_data_from_key("var[*,A]") + self.assertEqual(val, 0.5) + + def test_contains_key(self): + m = self._make_model() + data = ScalarData({m.var[:, "A"]: 0.5}, time_set=m.time) + self.assertTrue(data.contains_key(m.var[:, "A"])) + self.assertFalse(data.contains_key(m.var[:, "B"])) + + def test_update_data(self): + m = self._make_model() + data = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}) + new_data = ScalarData({m.var[:, "A"]: 0.1}) + data.update_data(new_data) + self.assertEqual( + data.get_data(), + { + pyo.ComponentUID(m.var[:, "A"]): 0.1, + pyo.ComponentUID(m.var[:, "B"]): 2.0, + }, + ) + + new_data = {m.var[:, "A"]: 0.2} + data.update_data(new_data) + self.assertEqual( + data.get_data(), + { + pyo.ComponentUID(m.var[:, "A"]): 0.2, + pyo.ComponentUID(m.var[:, "B"]): 2.0, + }, + ) + + def test_to_serializable(self): + m = self._make_model() + data = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}) + pred_json_dict = {"var[*,A]": 0.5, "var[*,B]": 2.0} + self.assertEqual(data.to_serializable(), pred_json_dict) + + def test_extract_variables(self): + m = self._make_model() + data = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}) + data = data.extract_variables([m.var[:, "A"]]) + pred_data_dict = {pyo.ComponentUID(m.var[:, "A"]): 0.5} + self.assertEqual(data.get_data(), pred_data_dict) + + def test_extract_variables_exception(self): + m = self._make_model() + data = ScalarData({m.var[:, "A"]: 0.5, m.var[:, "B"]: 2.0}) + msg = "extract_variables with copy_values=True" + with self.assertRaisesRegex(NotImplementedError, msg): + data = data.extract_variables([m.var[:, "A"]], copy_values=True) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/data/tests/test_series_data.py b/pyomo/contrib/mpc/data/tests/test_series_data.py new file mode 100644 index 00000000000..e32559ac074 --- /dev/null +++ b/pyomo/contrib/mpc/data/tests/test_series_data.py @@ -0,0 +1,211 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest + +import pyomo.environ as pyo +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.series_data import TimeSeriesData + + +class TestSeriesData(unittest.TestCase): + def _make_model(self): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[0.0, 0.1, 0.2]) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var(m.time, m.comp, initialize=1.0) + return m + + def test_construct_and_get_data(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + + processed_data_dict = { + pyo.ComponentUID(key): val for key, val in data_dict.items() + } + self.assertEqual(data.get_data(), processed_data_dict) + + def test_construct_exception(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4]} + msg = "must have same length" + with self.assertRaisesRegex(ValueError, msg): + data = TimeSeriesData(data_dict, m.time) + + data_dict = {m.var[:, "A"]: [1, 2], m.var[:, "B"]: [2, 4]} + with self.assertRaisesRegex(ValueError, msg): + # series don't have same lengths as time + data = TimeSeriesData(data_dict, m.time) + + msg = "not sorted" + with self.assertRaisesRegex(ValueError, msg): + # Time list has right number of points, but is not sorted + # increasing. + data = TimeSeriesData(data_dict, [0, -1]) + + def test_get_time_points(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + self.assertEqual(data.get_time_points(), list(m.time)) + + new_time_list = [3, 4, 5] + data = TimeSeriesData(data_dict, new_time_list) + self.assertEqual(data.get_time_points(), new_time_list) + + def test_get_data_at_time_indices(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + new_data = data.get_data_at_time_indices(1) + self.assertEqual(ScalarData({m.var[:, "A"]: 2, m.var[:, "B"]: 4}), new_data) + + new_data = data.get_data_at_time_indices([1]) + t1 = m.time.at(2) # Sets are indexed starting from 1... + self.assertEqual( + TimeSeriesData({m.var[:, "A"]: [2], m.var[:, "B"]: [4]}, [t1]), new_data + ) + + new_t = [m.time.at(1), m.time.at(3)] + new_data = data.get_data_at_time_indices([0, 2]) + self.assertEqual( + TimeSeriesData({m.var[:, "A"]: [1, 3], m.var[:, "B"]: [2, 6]}, new_t), + new_data, + ) + + def test_get_data_at_time(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + new_data = data.get_data_at_time(0.1) + self.assertEqual(ScalarData({m.var[:, "A"]: 2, m.var[:, "B"]: 4}), new_data) + + t1 = 0.1 + new_data = data.get_data_at_time([t1]) + self.assertEqual( + TimeSeriesData({m.var[:, "A"]: [2], m.var[:, "B"]: [4]}, [t1]), new_data + ) + + new_t = [0.0, 0.2] + new_data = data.get_data_at_time(new_t) + self.assertEqual( + TimeSeriesData({m.var[:, "A"]: [1, 3], m.var[:, "B"]: [2, 6]}, new_t), + new_data, + ) + + def test_get_data_at_time_with_tolerance(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + + # Test an invalid time value. A tolerance of None gives us + # the closest index + new_data = data.get_data_at_time(-0.1, tolerance=None) + self.assertEqual(ScalarData({m.var[:, "A"]: 1, m.var[:, "B"]: 2}), new_data) + + # Test a value that is only valid within tolerance + new_data = data.get_data_at_time(-0.0001, tolerance=1e-3) + self.assertEqual(ScalarData({m.var[:, "A"]: 1, m.var[:, "B"]: 2}), new_data) + + # The default is to raise an error in the case of any discrepancy. + msg = "Time point.*is invalid" + with self.assertRaisesRegex(RuntimeError, msg): + new_data = data.get_data_at_time(-0.0001) + + # Test a value that is invalid within tolerance + msg = "Time point.*is invalid" + with self.assertRaisesRegex(RuntimeError, msg): + new_data = data.get_data_at_time(-0.01, tolerance=1e-3) + + def test_to_serializable(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time).to_serializable() + + pred_json_obj = ( + {str(pyo.ComponentUID(key)): val for key, val in data_dict.items()}, + list(m.time), + ) + self.assertEqual(data, pred_json_obj) + + # Test attributes of the TimeSeriesTuple namedtuple + self.assertEqual(data.time, list(m.time)) + self.assertEqual( + data.data, + {str(pyo.ComponentUID(key)): val for key, val in data_dict.items()}, + ) + + def test_concatenate(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + time1 = [t for t in m.time] + data1 = TimeSeriesData(data_dict, time1) + + time2 = [t + 1.0 for t in m.time] + data2 = TimeSeriesData(data_dict, time2) + + data1.concatenate(data2) + pred_time = time1 + time2 + pred_data = { + m.var[:, "A"]: [1, 2, 3, 1, 2, 3], + m.var[:, "B"]: [2, 4, 6, 2, 4, 6], + } + # Note that data1 has been modified in place + self.assertEqual(TimeSeriesData(pred_data, pred_time), data1) + + def test_concatenate_exception(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + time1 = [t for t in m.time] + data1 = TimeSeriesData(data_dict, time1) + + msg = "Initial time point.*is not greater" + with self.assertRaisesRegex(ValueError, msg): + data1.concatenate(data1) + + def test_shift_time_points(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + + offset = 1.0 + data.shift_time_points(offset) + self.assertEqual(data.get_time_points(), [t + offset for t in m.time]) + + def test_extract_variables(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + + new_data = data.extract_variables([m.var[:, "A"]]) + self.assertEqual(new_data, TimeSeriesData({m.var[:, "A"]: [1, 2, 3]}, m.time)) + + def test_shift_then_get_data(self): + m = self._make_model() + data_dict = {m.var[:, "A"]: [1, 2, 3], m.var[:, "B"]: [2, 4, 6]} + data = TimeSeriesData(data_dict, m.time) + + offset = 0.1 + data.shift_time_points(offset) + self.assertEqual(data.get_time_points(), [t + offset for t in m.time]) + + # A time point of zero is no longer valid + msg = "Time point.*is invalid" + with self.assertRaisesRegex(RuntimeError, msg): + t0_data = data.get_data_at_time(0.0, tolerance=1e-3) + + t1_data = data.get_data_at_time(0.1) + self.assertEqual(t1_data, ScalarData({m.var[:, "A"]: 1, m.var[:, "B"]: 2})) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/examples/README.md b/pyomo/contrib/mpc/examples/README.md new file mode 100644 index 00000000000..54894e3e241 --- /dev/null +++ b/pyomo/contrib/mpc/examples/README.md @@ -0,0 +1,6 @@ +### Examples of rolling horizon and dynamic optimization data structures + +This repository contains examples using the data structures and interfaces +in this package for rolling horizon dynamic simulation and optimization. + +Code in this directory should not be imported from elsewhere in the codebase. diff --git a/pyomo/contrib/mpc/examples/__init__.py b/pyomo/contrib/mpc/examples/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/mpc/examples/cstr/__init__.py b/pyomo/contrib/mpc/examples/cstr/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/mpc/examples/cstr/model.py b/pyomo/contrib/mpc/examples/cstr/model.py new file mode 100644 index 00000000000..d794084f122 --- /dev/null +++ b/pyomo/contrib/mpc/examples/cstr/model.py @@ -0,0 +1,178 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.environ as pyo +import pyomo.dae as dae +from pyomo.contrib.incidence_analysis import IncidenceGraphInterface +from pyomo.common.dependencies.matplotlib import pyplot as plt + + +def _flow_eqn_rule(m, t): + return m.flow_in[t] - m.flow_out[t] == 0 + + +def _conc_out_eqn_rule(m, t, j): + return m.conc[t, j] - m.conc_out[t, j] == 0 + + +def _rate_eqn_rule(m, t, j): + return m.rate_gen[t, j] - m.stoich[j] * m.k_rxn * m.conc[t, "A"] == 0 + + +def _conc_diff_eqn_rule(m, t, j): + return ( + m.dcdt[t, j] + - ( + m.flow_in[t] * m.conc_in[t, j] + - m.flow_out[t] * m.conc_out[t, j] + + m.rate_gen[t, j] + ) + == 0 + ) + + +def _conc_steady_eqn_rule(m, t, j): + return ( + m.flow_in[t] * m.conc_in[t, j] + - m.flow_out[t] * m.conc_out[t, j] + + m.rate_gen[t, j] + ) == 0 + + +def make_model(dynamic=True, horizon=10.0): + m = pyo.ConcreteModel() + m.comp = pyo.Set(initialize=["A", "B"]) + if dynamic: + m.time = dae.ContinuousSet(initialize=[0, horizon]) + else: + m.time = pyo.Set(initialize=[0]) + time = m.time + comp = m.comp + + m.stoich = pyo.Param(m.comp, initialize={"A": -1, "B": 1}, mutable=True) + m.k_rxn = pyo.Param(initialize=1.0, mutable=True) + + m.conc = pyo.Var(m.time, m.comp) + if dynamic: + m.dcdt = dae.DerivativeVar(m.conc, wrt=m.time) + + m.flow_in = pyo.Var(time, bounds=(0, None)) + m.flow_out = pyo.Var(time, bounds=(0, None)) + m.flow_eqn = pyo.Constraint(time, rule=_flow_eqn_rule) + + m.conc_in = pyo.Var(time, comp, bounds=(0, None)) + m.conc_out = pyo.Var(time, comp, bounds=(0, None)) + m.conc_out_eqn = pyo.Constraint(time, comp, rule=_conc_out_eqn_rule) + + m.rate_gen = pyo.Var(time, comp) + m.rate_eqn = pyo.Constraint(time, comp, rule=_rate_eqn_rule) + + if dynamic: + m.conc_diff_eqn = pyo.Constraint(time, comp, rule=_conc_diff_eqn_rule) + else: + m.conc_steady_eqn = pyo.Constraint(time, comp, rule=_conc_steady_eqn_rule) + + return m + + +def initialize_model(m, dynamic=True, ntfe=None): + if ntfe is not None and not dynamic: + raise RuntimeError("Cannot provide ntfe to initialize steady model") + elif dynamic and ntfe is None: + ntfe = 10 + if dynamic: + disc = pyo.TransformationFactory("dae.finite_difference") + disc.apply_to(m, wrt=m.time, nfe=ntfe, scheme="BACKWARD") + + t0 = m.time.first() + + # Fix inputs + m.conc_in[:, "A"].fix(5.0) + m.conc_in[:, "B"].fix(0.01) + m.flow_in[:].fix(1.0) + m.flow_in[t0].fix(0.1) + + if dynamic: + # Fix initial conditions if dynamic + m.conc[t0, "A"].fix(1.0) + m.conc[t0, "B"].fix(0.0) + + +def create_instance(dynamic=True, horizon=None, ntfe=None): + if horizon is None and dynamic: + horizon = 10.0 + if ntfe is None and dynamic: + ntfe = 10 + m = make_model(horizon=horizon, dynamic=dynamic) + initialize_model(m, ntfe=ntfe, dynamic=dynamic) + return m + + +def _plot_time_indexed_variables( + data, keys, show=False, save=False, fname=None, transparent=False +): + fig, ax = plt.subplots() + time = data.get_time_points() + for i, key in enumerate(keys): + data_list = data.get_data_from_key(key) + label = str(data.get_cuid(key)) + ax.plot(time, data_list, label=label) + ax.legend() + + if show: + plt.show() + if save: + if fname is None: + fname = "states.png" + fig.savefig(fname, transparent=transparent) + + return fig, ax + + +def _step_time_indexed_variables( + data, keys, show=False, save=False, fname=None, transparent=False +): + fig, ax = plt.subplots() + time = data.get_time_points() + for i, key in enumerate(keys): + data_list = data.get_data_from_key(key) + label = str(data.get_cuid(key)) + ax.step(time, data_list, label=label) + ax.legend() + + if show: + plt.show() + if save: + if fname is None: + fname = "inputs.png" + fig.savefig(fname, transparent=transparent) + + return fig, ax + + +def main(): + # Make sure steady and dynamic models are square, structurally + # nonsingular models. + m_steady = create_instance(dynamic=False) + steady_igraph = IncidenceGraphInterface(m_steady) + assert len(steady_igraph.variables) == len(steady_igraph.constraints) + steady_vdmp, steady_cdmp = steady_igraph.dulmage_mendelsohn() + assert not steady_vdmp.unmatched and not steady_cdmp.unmatched + + m = create_instance(horizon=100.0, ntfe=100) + igraph = IncidenceGraphInterface(m) + assert len(igraph.variables) == len(igraph.constraints) + vdmp, cdmp = igraph.dulmage_mendelsohn() + assert not vdmp.unmatched and not cdmp.unmatched + + +if __name__ == "__main__": + main() diff --git a/pyomo/contrib/mpc/examples/cstr/run_mpc.py b/pyomo/contrib/mpc/examples/cstr/run_mpc.py new file mode 100644 index 00000000000..86ae7e4e47b --- /dev/null +++ b/pyomo/contrib/mpc/examples/cstr/run_mpc.py @@ -0,0 +1,154 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.environ as pyo +import pyomo.contrib.mpc as mpc +from pyomo.contrib.mpc.examples.cstr.model import ( + create_instance, + _plot_time_indexed_variables, + _step_time_indexed_variables, +) + + +def get_steady_state_data(target, tee=False): + m = create_instance(dynamic=False) + interface = mpc.DynamicModelInterface(m, m.time) + var_set, tr_cost = interface.get_penalty_from_target(target) + m.target_set = var_set + m.tracking_cost = tr_cost + m.objective = pyo.Objective(expr=sum(m.tracking_cost[:, 0])) + m.flow_in[:].unfix() + solver = pyo.SolverFactory("ipopt") + solver.solve(m, tee=tee) + return interface.get_data_at_time(0) + + +def run_cstr_mpc( + initial_data, + setpoint_data, + samples_per_controller_horizon=5, + sample_time=2.0, + ntfe_per_sample_controller=2, + ntfe_plant=5, + simulation_steps=5, + tee=False, +): + controller_horizon = sample_time * samples_per_controller_horizon + ntfe = ntfe_per_sample_controller * samples_per_controller_horizon + m_controller = create_instance(horizon=controller_horizon, ntfe=ntfe) + controller_interface = mpc.DynamicModelInterface(m_controller, m_controller.time) + t0_controller = m_controller.time.first() + + m_plant = create_instance(horizon=sample_time, ntfe=ntfe_plant) + plant_interface = mpc.DynamicModelInterface(m_plant, m_plant.time) + + # Sets initial conditions and initializes + controller_interface.load_data(initial_data) + plant_interface.load_data(initial_data) + + # + # Add objective to controller model + # + setpoint_variables = [m_controller.conc[:, "A"], m_controller.conc[:, "B"]] + vset, tr_cost = controller_interface.get_penalty_from_target( + setpoint_data, variables=setpoint_variables + ) + m_controller.setpoint_set = vset + m_controller.tracking_cost = tr_cost + m_controller.objective = pyo.Objective( + expr=sum( + m_controller.tracking_cost[i, t] + for i in m_controller.setpoint_set + for t in m_controller.time + if t != m_controller.time.first() + ) + ) + + # + # Unfix input in controller model + # + m_controller.flow_in[:].unfix() + m_controller.flow_in[t0_controller].fix() + sample_points = [i * sample_time for i in range(samples_per_controller_horizon + 1)] + input_set, pwc_con = controller_interface.get_piecewise_constant_constraints( + [m_controller.flow_in], sample_points + ) + m_controller.input_set = input_set + m_controller.pwc_con = pwc_con + + sim_t0 = 0.0 + + # + # Initialize data structure to hold results of "rolling horizon" + # simulation. + # + sim_data = plant_interface.get_data_at_time([sim_t0]) + + solver = pyo.SolverFactory("ipopt") + non_initial_plant_time = list(m_plant.time)[1:] + ts = sample_time + t0_controller + for i in range(simulation_steps): + # The starting point of this part of the simulation + # in "real" time (rather than the model's time set) + sim_t0 = i * sample_time + + # + # Solve controller model to get inputs + # + res = solver.solve(m_controller, tee=tee) + pyo.assert_optimal_termination(res) + ts_data = controller_interface.get_data_at_time(ts) + input_data = ts_data.extract_variables([m_controller.flow_in]) + + plant_interface.load_data(input_data) + + # + # Solve plant model to simulate + # + res = solver.solve(m_plant, tee=tee) + pyo.assert_optimal_termination(res) + + # + # Extract data from simulated model + # + m_data = plant_interface.get_data_at_time(non_initial_plant_time) + m_data.shift_time_points(sim_t0 - m_plant.time.first()) + sim_data.concatenate(m_data) + + # + # Re-initialize plant model + # + tf_data = plant_interface.get_data_at_time(m_plant.time.last()) + plant_interface.load_data(tf_data) + + # + # Re-initialize controller model + # + controller_interface.shift_values_by_time(sample_time) + controller_interface.load_data(tf_data, time_points=t0_controller) + + return m_plant, sim_data + + +def main(): + init_steady_target = mpc.ScalarData({"flow_in[*]": 0.3}) + init_data = get_steady_state_data(init_steady_target, tee=False) + setpoint_target = mpc.ScalarData({"flow_in[*]": 1.2}) + setpoint_data = get_steady_state_data(setpoint_target, tee=False) + + m, sim_data = run_cstr_mpc(init_data, setpoint_data, tee=False) + + _plot_time_indexed_variables(sim_data, [m.conc[:, "A"], m.conc[:, "B"]], show=True) + _step_time_indexed_variables(sim_data, [m.flow_in[:]], show=True) + + +if __name__ == "__main__": + main() diff --git a/pyomo/contrib/mpc/examples/cstr/run_openloop.py b/pyomo/contrib/mpc/examples/cstr/run_openloop.py new file mode 100644 index 00000000000..36ddb990545 --- /dev/null +++ b/pyomo/contrib/mpc/examples/cstr/run_openloop.py @@ -0,0 +1,86 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.environ as pyo +import pyomo.contrib.mpc as mpc +from pyomo.contrib.mpc.examples.cstr.model import ( + create_instance, + _plot_time_indexed_variables, + _step_time_indexed_variables, +) + + +def get_input_sequence(): + input_sequence = mpc.TimeSeriesData( + {"flow_in[*]": [0.1, 1.0, 0.5, 1.3, 1.0, 0.3]}, [0.0, 2.0, 4.0, 6.0, 8.0, 15.0] + ) + return mpc.data.convert.series_to_interval(input_sequence) + + +def run_cstr_openloop( + inputs, model_horizon=1.0, ntfe=10, simulation_steps=15, tee=False +): + m = create_instance(horizon=model_horizon, ntfe=ntfe) + dynamic_interface = mpc.DynamicModelInterface(m, m.time) + + sim_t0 = 0.0 + + # + # Initialize data structure to hold results of "rolling horizon" + # simulation. + # + sim_data = dynamic_interface.get_data_at_time([sim_t0]) + + solver = pyo.SolverFactory("ipopt") + non_initial_model_time = list(m.time)[1:] + for i in range(simulation_steps): + # The starting point of this part of the simulation + # in "real" time (rather than the model's time set) + sim_t0 = i * model_horizon + + sim_time = [sim_t0 + t for t in m.time] + new_inputs = mpc.data.convert.interval_to_series(inputs, time_points=sim_time) + new_inputs.shift_time_points(m.time.first() - sim_t0) + dynamic_interface.load_data(new_inputs, tolerance=1e-6) + + # + # Solve square model to simulate + # + res = solver.solve(m, tee=tee) + pyo.assert_optimal_termination(res) + + # + # Extract data from simulated model + # + m_data = dynamic_interface.get_data_at_time(non_initial_model_time) + m_data.shift_time_points(sim_t0 - m.time.first()) + sim_data.concatenate(m_data) + + # + # Re-initialize (initial conditions and variable values) + # + # The default is to load this ScalarData at all points in the + # model's time set. + tf_data = dynamic_interface.get_data_at_time(m.time.last()) + dynamic_interface.load_data(tf_data) + + return m, sim_data + + +def main(): + input_sequence = get_input_sequence() + m, sim_data = run_cstr_openloop(input_sequence, tee=False) + _plot_time_indexed_variables(sim_data, [m.conc[:, "A"], m.conc[:, "B"]], show=True) + _step_time_indexed_variables(sim_data, [m.flow_in[:]], show=True) + + +if __name__ == "__main__": + main() diff --git a/pyomo/contrib/mpc/examples/cstr/tests/__init__.py b/pyomo/contrib/mpc/examples/cstr/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/mpc/examples/cstr/tests/test_mpc.py b/pyomo/contrib/mpc/examples/cstr/tests/test_mpc.py new file mode 100644 index 00000000000..741a1533da3 --- /dev/null +++ b/pyomo/contrib/mpc/examples/cstr/tests/test_mpc.py @@ -0,0 +1,128 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +import pyomo.environ as pyo +import pyomo.contrib.mpc as mpc +from pyomo.contrib.mpc.examples.cstr.run_mpc import get_steady_state_data, run_cstr_mpc + + +ipopt_available = pyo.SolverFactory("ipopt").available() + + +@unittest.skipIf(not ipopt_available, "ipopt is not available") +class TestCSTRMPC(unittest.TestCase): + # This data was obtained from a run of this code. The test is + # intended to make sure that values do not change, not that + # they are correct in some absolute sense. + _pred_A_data = [ + 1.15385, + 2.11629, + 2.59104, + 2.82521, + 2.94072, + 2.99770, + 2.84338, + 2.76022, + 2.71541, + 2.69127, + 2.67826, + 2.70659, + 2.72163, + 2.72961, + 2.73384, + 2.73609, + 2.73100, + 2.72830, + 2.72686, + 2.72609, + 2.72568, + 2.72660, + 2.72709, + 2.72735, + 2.72749, + 2.72756, + ] + _pred_B_data = [ + 3.85615, + 2.89371, + 2.41896, + 2.18479, + 2.06928, + 2.01230, + 2.16662, + 2.24978, + 2.29459, + 2.31873, + 2.33174, + 2.30341, + 2.28837, + 2.28039, + 2.27616, + 2.27391, + 2.27900, + 2.28170, + 2.28314, + 2.28391, + 2.28432, + 2.28340, + 2.28291, + 2.28265, + 2.28251, + 2.28244, + ] + + def _get_initial_data(self): + initial_data = mpc.ScalarData({"flow_in[*]": 0.3}) + return get_steady_state_data(initial_data) + + def _get_setpoint_data(self): + setpoint_data = mpc.ScalarData({"flow_in[*]": 1.2}) + return get_steady_state_data(setpoint_data) + + def test_mpc_simulation(self): + initial_data = self._get_initial_data() + setpoint_data = self._get_setpoint_data() + sample_time = 2.0 + samples_per_horizon = 5 + ntfe_per_sample = 2 + ntfe_plant = 5 + simulation_steps = 5 + m_plant, sim_data = run_cstr_mpc( + initial_data, + setpoint_data, + samples_per_controller_horizon=samples_per_horizon, + sample_time=sample_time, + ntfe_per_sample_controller=ntfe_per_sample, + ntfe_plant=ntfe_plant, + simulation_steps=simulation_steps, + ) + sim_time_points = [ + sample_time / ntfe_plant * i + for i in range(simulation_steps * ntfe_plant + 1) + ] + + AB_data = sim_data.extract_variables( + [m_plant.conc[:, "A"], m_plant.conc[:, "B"]] + ) + + A_cuid = sim_data.get_cuid(m_plant.conc[:, "A"]) + B_cuid = sim_data.get_cuid(m_plant.conc[:, "B"]) + pred_data = {A_cuid: self._pred_A_data, B_cuid: self._pred_B_data} + + self.assertStructuredAlmostEqual(pred_data, AB_data.get_data(), delta=1e-3) + self.assertStructuredAlmostEqual( + sim_time_points, AB_data.get_time_points(), delta=1e-7 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/examples/cstr/tests/test_openloop.py b/pyomo/contrib/mpc/examples/cstr/tests/test_openloop.py new file mode 100644 index 00000000000..218865ceabb --- /dev/null +++ b/pyomo/contrib/mpc/examples/cstr/tests/test_openloop.py @@ -0,0 +1,144 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +import pyomo.environ as pyo +import pyomo.contrib.mpc as mpc +from pyomo.contrib.mpc.examples.cstr.run_openloop import run_cstr_openloop + + +ipopt_available = pyo.SolverFactory("ipopt").available() + + +@unittest.skipIf(not ipopt_available, "ipopt is not available") +class TestCSTROpenLoop(unittest.TestCase): + # This data was obtained from a run of this code. The test is + # intended to make sure that values do not change, not that + # they are correct in some absolute sense. + _pred_A_data = [ + 1.00000, + 1.50000, + 1.83333, + 2.05556, + 2.20370, + 2.30247, + 2.36831, + 2.41221, + 2.44147, + 2.46098, + 2.47399, + 2.48266, + 2.48844, + 2.36031, + 2.27039, + 2.20729, + 2.16301, + 2.13194, + 2.11013, + 2.09483, + 2.08409, + 2.07656, + 2.07127, + 2.06756, + 2.06495, + 2.16268, + 2.22893, + 2.27385, + 2.30431, + 2.32495, + 2.33895, + 2.34844, + 2.35488, + 2.35924, + 2.36220, + 2.36420, + 2.36556, + 2.36648, + 2.36711, + 2.36753, + 2.36782, + ] + _pred_B_data = [ + 0.00000, + 0.30200, + 0.61027, + 0.90132, + 1.16380, + 1.39353, + 1.59049, + 1.75683, + 1.89576, + 2.01081, + 2.10544, + 2.18289, + 2.24600, + 2.41517, + 2.54001, + 2.63284, + 2.70242, + 2.75502, + 2.79516, + 2.82605, + 2.85007, + 2.86890, + 2.88380, + 2.89569, + 2.90526, + 2.81484, + 2.75455, + 2.71450, + 2.68802, + 2.67062, + 2.65927, + 2.65195, + 2.64728, + 2.64436, + 2.64258, + 2.64153, + 2.64096, + 2.64067, + 2.64057, + 2.64058, + 2.64064, + ] + + def _get_input_sequence(self): + input_sequence = mpc.TimeSeriesData( + {"flow_in[*]": [0.1, 1.0, 0.7, 0.9]}, [0.0, 3.0, 6.0, 10.0] + ) + return mpc.data.convert.series_to_interval(input_sequence) + + def test_openloop_simulation(self): + input_sequence = self._get_input_sequence() + ntfe = 4 + model_horizon = 1.0 + simulation_steps = 10 + m, sim_data = run_cstr_openloop( + input_sequence, model_horizon=1.0, ntfe=4, simulation_steps=10 + ) + sim_time_points = [ + model_horizon / ntfe * i for i in range(simulation_steps * ntfe + 1) + ] + + AB_data = sim_data.extract_variables([m.conc[:, "A"], m.conc[:, "B"]]) + + A_cuid = sim_data.get_cuid(m.conc[:, "A"]) + B_cuid = sim_data.get_cuid(m.conc[:, "B"]) + pred_data = {A_cuid: self._pred_A_data, B_cuid: self._pred_B_data} + + self.assertStructuredAlmostEqual(pred_data, AB_data.get_data(), delta=1e-3) + self.assertStructuredAlmostEqual( + sim_time_points, AB_data.get_time_points(), delta=1e-7 + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/checker/tests/examples/model/ModelValue_repeatif.py b/pyomo/contrib/mpc/interfaces/__init__.py similarity index 66% rename from pyomo/checker/tests/examples/model/ModelValue_repeatif.py rename to pyomo/contrib/mpc/interfaces/__init__.py index 7bb973390ed..8e02003f99e 100644 --- a/pyomo/checker/tests/examples/model/ModelValue_repeatif.py +++ b/pyomo/contrib/mpc/interfaces/__init__.py @@ -9,18 +9,10 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import AbstractModel, Var, value +__doc__ = """A module providing interface classes to ease use of the data + structures and modeling utilities provided elsewhere in mpc. -model = AbstractModel() -model.X = Var() + Code in this module may import from mpc.data and mpc.modeling + and should not be imported into these other modules. -if model.X >= 10.0: - pass -if value(model.X) >= 10.0: - pass -if model.X >= 10.0: - pass - -if model.X >= 10.0: - if model.X >= 10.0: - pass + """ diff --git a/pyomo/contrib/mpc/interfaces/copy_values.py b/pyomo/contrib/mpc/interfaces/copy_values.py new file mode 100644 index 00000000000..896656b230d --- /dev/null +++ b/pyomo/contrib/mpc/interfaces/copy_values.py @@ -0,0 +1,52 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.expr.numvalue import value as pyo_value + + +iterable_scalars = (str, bytes) + + +def _to_iterable(item): + if hasattr(item, "__iter__"): + if isinstance(item, iterable_scalars): + yield item + else: + for obj in item: + yield obj + else: + yield item + + +def copy_values_at_time( + source_vars, target_vars, source_time_points, target_time_points +): + # Process input arguments to wrap scalars in a list + source_time_points = list(_to_iterable(source_time_points)) + target_time_points = list(_to_iterable(target_time_points)) + if ( + len(source_time_points) != len(target_time_points) + and len(source_time_points) != 1 + ): + raise ValueError( + "copy_values_at_time can only copy values when lists of time\n" + "points have the same length or the source list has length one." + ) + n_points = len(target_time_points) + if len(source_time_points) == 1: + source_time_points = source_time_points * n_points + for s_var, t_var in zip(source_vars, target_vars): + for s_t, t_t in zip(source_time_points, target_time_points): + # Using the value function allows expressions to substitute + # for variables. However, it raises an error if the expression + # cannot be evaluated (e.g. has value None). + # t_var[t_t].set_value(pyo_value(s_var[s_t])) + t_var[t_t].set_value(s_var[s_t].value) diff --git a/pyomo/contrib/mpc/interfaces/load_data.py b/pyomo/contrib/mpc/interfaces/load_data.py new file mode 100644 index 00000000000..efa9515901e --- /dev/null +++ b/pyomo/contrib/mpc/interfaces/load_data.py @@ -0,0 +1,181 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.mpc.data.dynamic_data_base import _is_iterable +from pyomo.contrib.mpc.data.find_nearest_index import ( + find_nearest_index, + find_nearest_interval_index, +) + + +def _raise_invalid_cuid(cuid, model): + raise RuntimeError("Cannot find a component %s on block %s" % (cuid, model)) + + +def load_data_from_scalar(data, model, time): + """A function to load ScalarData into a model + + Arguments + --------- + data: ScalarData + model: BlockData + time: Iterable + + """ + data = data.get_data() + t_iter = time if _is_iterable(time) else (time,) + for cuid, val in data.items(): + var = model.find_component(cuid) + if var is None: + _raise_invalid_cuid(cuid, model) + # TODO: Time points should probably use find_nearest_index + # This will have to happen in the calling function, as data + # doesn't have a list of time points to check. + if var.is_indexed(): + for t in t_iter: + var[t].set_value(val) + else: + var.set_value(val) + + +def load_data_from_series(data, model, time, tolerance=0.0): + """A function to load TimeSeriesData into a model + + Arguments + --------- + data: TimeSeriesData + model: BlockData + time: Iterable + + """ + time_list = list(time) + time_indices = [ + find_nearest_index(time_list, t, tolerance=tolerance) + for t in data.get_time_points() + ] + for idx, t in zip(time_indices, data.get_time_points()): + if idx is None: + raise RuntimeError("Time point %s not found time set" % t) + if len(time_list) != len(data.get_time_points()): + raise RuntimeError( + "TimeSeriesData object and model must have same number" + " of time points to load data from series" + ) + data = data.get_data() + for cuid, vals in data.items(): + var = model.find_component(cuid) + if var is None: + _raise_invalid_cuid(cuid, model) + for idx, val in zip(time_indices, vals): + t = time_list[idx] + var[t].set_value(val) + + +def load_data_from_interval( + data, + model, + time, + tolerance=0.0, + prefer_left=True, + exclude_left_endpoint=True, + exclude_right_endpoint=False, +): + """A function to load IntervalData into a model + + Loads values into specified variables at time points that are + within the intervals specified. If a time point is on the boundary + of two intervals, the default is to use the interval on the left. + Often, intervals should be treated as half-open, i.e. one of the + left or right endpoints should be excluded. This can be enforced + with the corresponding optional arguments. + + Arguments + --------- + data: IntervalData + model: BlockData + time: Iterable + tolerance: Float + prefer_left: Bool + exclude_left_endpoint: Bool + exclude_right_endpoint: Bool + + """ + if prefer_left and exclude_right_endpoint and not exclude_left_endpoint: + raise RuntimeError( + "Cannot use prefer_left=True with exclude_left_endpoint=False" + " and exclude_right_endpoint=True." + ) + elif not prefer_left and exclude_left_endpoint and not exclude_right_endpoint: + raise RuntimeError( + "Cannot use prefer_left=False with exclude_left_endpoint=True" + " and exclude_right_endpoint=False." + ) + intervals = data.get_intervals() + left_endpoints = [t for t, _ in intervals] + right_endpoints = [t for _, t in intervals] + # NOTE: O(len(time)*log(len(intervals))) + idx_list = [ + find_nearest_interval_index( + intervals, t, tolerance=tolerance, prefer_left=prefer_left + ) + for t in time + ] + left_endpoint_indices = [ + # index of interval which t is the left endpoint of + find_nearest_index(left_endpoints, t, tolerance=tolerance) + for t in time + ] + right_endpoint_indices = [ + # index of interval which t is the right endpoint of + find_nearest_index(right_endpoints, t, tolerance=tolerance) + for t in time + ] + + # Post-process indices to exclude endpoints + for i, t in enumerate(time): + if ( + exclude_left_endpoint + and left_endpoint_indices[i] is not None + and right_endpoint_indices[i] is None + ): + # If t is a left endpoint but not a right endpoint, + # do not load a value at t. + idx_list[i] = None + elif ( + exclude_right_endpoint + and right_endpoint_indices[i] is not None + and left_endpoint_indices[i] is None + ): + # If t is a right endpoint but not a left endpoint, + # do not load a value at t. + idx_list[i] = None + elif ( + exclude_left_endpoint + and exclude_right_endpoint + and right_endpoint_indices[i] is not None + and left_endpoint_indices[i] is not None + ): + # t is both a left endpoint and a right endpoint + idx_list[i] = None + + data = data.get_data() + for cuid, vals in data.items(): + var = model.find_component(cuid) + if var is None: + _raise_invalid_cuid(cuid, model) + for i, t in zip(idx_list, time): + if i is None: + # t could not be found in an interval. This is fine. + # We don't necessarily require that the interval data + # cover the entire time set. + continue + else: + var[t].set_value(vals[i]) diff --git a/pyomo/contrib/mpc/interfaces/model_interface.py b/pyomo/contrib/mpc/interfaces/model_interface.py new file mode 100644 index 00000000000..35f81af4a7a --- /dev/null +++ b/pyomo/contrib/mpc/interfaces/model_interface.py @@ -0,0 +1,403 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.dae.flatten import flatten_dae_components +from pyomo.common.modeling import NOTSET +from pyomo.core.base.var import Var +from pyomo.core.base.expression import Expression +from pyomo.core.base.componentuid import ComponentUID +from pyomo.core.expr.numeric_expr import value as pyo_value + +from pyomo.contrib.mpc.interfaces.load_data import ( + load_data_from_scalar, + load_data_from_series, + load_data_from_interval, +) +from pyomo.contrib.mpc.interfaces.copy_values import copy_values_at_time +from pyomo.contrib.mpc.data.find_nearest_index import find_nearest_index +from pyomo.contrib.mpc.data.get_cuid import get_indexed_cuid +from pyomo.contrib.mpc.data.dynamic_data_base import _is_iterable +from pyomo.contrib.mpc.data.series_data import TimeSeriesData +from pyomo.contrib.mpc.data.interval_data import IntervalData +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.convert import _process_to_dynamic_data +from pyomo.contrib.mpc.modeling.cost_expressions import ( + get_penalty_from_constant_target, + get_penalty_from_target, +) +from pyomo.contrib.mpc.modeling.constraints import get_piecewise_constant_constraints + +iterable_scalars = (str, bytes) + + +def _to_iterable(item): + if hasattr(item, "__iter__"): + if isinstance(item, iterable_scalars): + yield item + else: + for obj in item: + yield obj + else: + yield item + + +class DynamicModelInterface(object): + """A helper class for working with dynamic models, e.g. those where + many components are indexed by some ordered set referred to as "time." + + This class provides methods for interacting with time-indexed + components, for instance, loading and extracting data or shifting + values by some time offset. It also provides methods for constructing + components useful for dynamic optimization. + + """ + + def __init__(self, model, time, context=NOTSET): + """ + Construct with a model and a set. We will flatten the model + with respect to this set and generate CUIDs with wildcards. + + """ + scalar_vars, dae_vars = flatten_dae_components(model, time, Var) + scalar_expr, dae_expr = flatten_dae_components(model, time, Expression) + self.model = model + self.time = time + self._scalar_vars = scalar_vars + self._dae_vars = dae_vars + self._scalar_expr = scalar_expr + self._dae_expr = dae_expr + + if context is NOTSET: + context = model + + # Use buffer to reduce repeated work during name/cuid generation + cuid_buffer = {} + self._scalar_var_cuids = [ + ComponentUID(var, cuid_buffer=cuid_buffer, context=context) + for var in self._scalar_vars + ] + self._dae_var_cuids = [ + ComponentUID(var.referent, cuid_buffer=cuid_buffer, context=context) + for var in self._dae_vars + ] + self._dae_expr_cuids = [ + ComponentUID(expr.referent, cuid_buffer=cuid_buffer, context=context) + for expr in self._dae_expr + ] + + def get_scalar_variables(self): + return self._scalar_vars + + def get_indexed_variables(self): + return self._dae_vars + + def get_scalar_expressions(self): + return self._scalar_expr + + def get_indexed_expressions(self): + return self._dae_expr + + def get_scalar_variable_data(self): + """ + Get data corresponding to non-time-indexed variables. + + Returns + ------- + dict + Maps CUIDs of non-time-indexed variables to the value of these + variables. + + """ + return { + cuid: var.value + for cuid, var in zip(self._scalar_var_cuids, self._scalar_vars) + } + + def get_data_at_time(self, time=None, include_expr=False): + """ + Gets data at a single time point or set of time points. Note that + the returned type changes depending on whether a scalar or iterable + is supplied. + + """ + if time is None: + # Default is to use the entire time set, treating a singleton + # as a scalar. + time = self.time if len(self.time) > 1 else self.time.at(1) + if _is_iterable(time): + # Assume time is iterable + time_list = list(time) + data = { + cuid: [var[t].value for t in time] + for cuid, var in zip(self._dae_var_cuids, self._dae_vars) + } + if include_expr: + data.update( + { + cuid: [pyo_value(expr[t]) for t in time] + for cuid, expr in zip(self._dae_expr_cuids, self._dae_expr) + } + ) + # Return a TimeSeriesData object + return TimeSeriesData(data, time_list, time_set=self.time) + else: + # time is a scalar + data = { + cuid: var[time].value + for cuid, var in zip(self._dae_var_cuids, self._dae_vars) + } + if include_expr: + data.update( + { + cuid: pyo_value(expr[time]) + for cuid, expr in zip(self._dae_expr_cuids, self._dae_expr) + } + ) + # Return ScalarData object + return ScalarData(data) + + def load_data( + self, + data, + time_points=None, + tolerance=0.0, + prefer_left=None, + exclude_left_endpoint=None, + exclude_right_endpoint=None, + ): + """Method to load data into the model. + + Loads data into indicated variables in the model, possibly + at specified time points. + + Arguments + --------- + data: ScalarData, TimeSeriesData, or mapping + If ScalarData, loads values into indicated variables at + all (or specified) time points. If TimeSeriesData, loads + lists of values into time points. + If mapping, checks whether each variable and value is + indexed or iterable and correspondingly loads data into + variables. + time_points: Iterable (optional) + Subset of time points into which data should be loaded. + Default of None corresponds to loading into all time points. + + """ + if time_points is None: + time_points = self.time + data = _process_to_dynamic_data(data, time_set=self.time) + + def _error_if_used(prefer_left, excl_left, excl_right, dtype): + if any(a is not None for a in (prefer_left, excl_left, excl_right)): + raise RuntimeError( + "prefer_left, exclude_left_endpoint, and exclude_right_endpoint" + " can only be set if data is IntervalData-compatible. Got" + " prefer_left=%s, exclude_left_endpoint=%s, and" + " exclude_right_endpoint=%s while loading data of type %s" + % (prefer_left, excl_left, excl_right, dtype) + ) + + excl_left = exclude_left_endpoint + excl_right = exclude_right_endpoint + if isinstance(data, ScalarData): + # This covers the case of non-time-indexed variables + # as keys. + _error_if_used(prefer_left, excl_left, excl_right, type(data)) + load_data_from_scalar(data, self.model, time_points) + elif isinstance(data, TimeSeriesData): + _error_if_used(prefer_left, excl_left, excl_right, type(data)) + load_data_from_series(data, self.model, time_points, tolerance=tolerance) + elif isinstance(data, IntervalData): + prefer_left = True if prefer_left is None else prefer_left + excl_left = prefer_left if excl_left is None else excl_left + excl_right = (not prefer_left) if excl_right is None else excl_right + load_data_from_interval( + data, + self.model, + time_points, + tolerance=tolerance, + prefer_left=prefer_left, + exclude_left_endpoint=excl_left, + exclude_right_endpoint=excl_right, + ) + + def copy_values_at_time(self, source_time=None, target_time=None): + """ + Copy values of all time-indexed variables from source time point + to target time points. + + Parameters + ---------- + source_time: Float + Time point from which to copy values. + target_time: Float or iterable + Time point or points to which to copy values. + + """ + if source_time is None: + source_time = self.time.first() + if target_time is None: + target_time = self.time + copy_values_at_time(self._dae_vars, self._dae_vars, source_time, target_time) + + def shift_values_by_time(self, dt): + """ + Shift values in time indexed variables by a specified time offset. + """ + seen = set() + t0 = self.time.first() + tf = self.time.last() + time_map = {} + time_list = list(self.time) + for var in self._dae_vars: + if id(var[tf]) in seen: + # Assume that if var[tf] has been encountered, this is a + # reference to a "variable" we have already processed. + continue + else: + seen.add(id(var[tf])) + new_values = [] + for t in time_list: + if t not in time_map: + # Build up a map from target to source time points, + # as I don't want to call find_nearest_index more + # frequently than I have to. + t_new = t + dt + idx = find_nearest_index(time_list, t_new, tolerance=None) + # If t_new is not a valid time point, we proceed with the + # closest valid time point. + # We're relying on the fact that indices of t0 or tf are + # returned if t_new is outside the bounds of the time set. + t_new = time_list[idx] + time_map[t] = t_new + t_new = time_map[t] + new_values.append(var[t_new].value) + for i, t in enumerate(self.time): + var[t].set_value(new_values[i]) + + def get_penalty_from_target( + self, + target_data, + time=None, + variables=None, + weight_data=None, + variable_set=None, + tolerance=None, + prefer_left=None, + ): + """A method to get a quadratic penalty expression from a provided + setpoint data structure + + Parameters + ---------- + target_data: ScalarData, TimeSeriesData, or IntervalData + Holds target values for variables + time: Set (optional) + Points at which to apply the tracking cost. Default will use + the model's time set. + variables: List of Pyomo VarData (optional) + Subset of variables supplied in setpoint_data to use in the + tracking cost. Default is to use all variables supplied. + weight_data: ScalarData (optional) + Holds the weights to use in the tracking cost for each variable + variable_set: Set (optional) + A set indexing the list of provided variables, if one already + exists. + tolerance: Float (optional) + Tolerance for checking inclusion in an interval. Only may be + provided if IntervalData is provided for target_data. In this + case the default is 0.0. + prefer_left: Bool (optional) + Flag indicating whether the left end point of intervals should + be preferred over the right end point. Only may be provided if + IntervalData is provided for target_data. In this case the + default is False. + + Returns + ------- + Set, Expression + Set indexing the list of variables to be penalized, and + Expression indexed by this set and time. This Expression contains + the weighted tracking cost for each variable at each point in + time. + + """ + if time is None: + time = self.time + target_data = _process_to_dynamic_data(target_data, time_set=self.time) + if variables is None: + # Use variables provided by the setpoint. + # NOTE: Nondeterministic order in non-C Python < 3.7 + # Should these data structures use OrderedDicts internally + # to enforce an order here? + variables = [ + self.model.find_component(key) for key in target_data.get_data().keys() + ] + else: + # Variables were provided. These could be anything. Process them + # to get time-indexed variables on the model. + variables = [ + self.model.find_component(get_indexed_cuid(var, (self.time,))) + for var in variables + ] + return get_penalty_from_target( + variables, + time, + target_data, + weight_data=weight_data, + variable_set=variable_set, + tolerance=tolerance, + prefer_left=prefer_left, + ) + + def get_piecewise_constant_constraints( + self, variables, sample_points, use_next=True, tolerance=0.0 + ): + """A method to get an indexed constraint ensuring that inputs + are piecewise constant. + + Parameters + ---------- + variables: List of Pyomo Vars + Variables to enforce piecewise constant + sample_points: List of floats + Points marking the boundaries of intervals within which + variables must be constant + use_next: Bool (optional) + Whether to enforce constancy by setting each variable equal + to itself at the next point in time (as opposed to at the + previous point in time). Default is True. + tolerance: Float (optional) + Absolute tolerance used to determine whether provided sample + points are in the model's time set. + + Returns + ------- + Tuple: + First entry is a Set indexing the list of provided variables + (with integers). Second entry is a constraint indexed by this + set and time enforcing the piecewise constant condition via + equality constraints. + + """ + cuids = [get_indexed_cuid(var, (self.time,)) for var in variables] + variables = [self.model.find_component(cuid) for cuid in cuids] + time_list = list(self.time) + # Make sure that sample points exist (within tolerance) in the time + # set. + sample_point_indices = [ + find_nearest_index(time_list, t, tolerance=tolerance) for t in sample_points + ] + sample_points = [time_list[i] for i in sample_point_indices] + return get_piecewise_constant_constraints( + variables, self.time, sample_points, use_next=use_next + ) diff --git a/pyomo/contrib/mpc/interfaces/tests/__init__.py b/pyomo/contrib/mpc/interfaces/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/mpc/interfaces/tests/test_interface.py b/pyomo/contrib/mpc/interfaces/tests/test_interface.py new file mode 100644 index 00000000000..65ffc7bb40a --- /dev/null +++ b/pyomo/contrib/mpc/interfaces/tests/test_interface.py @@ -0,0 +1,612 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +import pyomo.environ as pyo +from pyomo.common.collections import ComponentSet +from pyomo.core.expr.compare import compare_expressions +from pyomo.contrib.mpc.interfaces.model_interface import DynamicModelInterface +from pyomo.contrib.mpc.data.series_data import TimeSeriesData +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.interval_data import IntervalData + + +class TestDynamicModelInterface(unittest.TestCase): + def _make_model(self, n_time_points=3): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=range(n_time_points)) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var( + m.time, + m.comp, + initialize={(i, j): 1.0 + i * 0.1 for i, j in m.time * m.comp}, + ) + m.input = pyo.Var(m.time, initialize={i: 1.0 - i * 0.1 for i in m.time}) + m.scalar = pyo.Var(initialize=0.5) + m.var_squared = pyo.Expression( + m.time, + m.comp, + initialize={(i, j): m.var[i, j] ** 2 for i, j in m.time * m.comp}, + ) + return m + + def _hashRef(self, reference): + return tuple(id(obj) for obj in reference.values()) + + def test_interface_construct(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + + scalar_vars = interface.get_scalar_variables() + self.assertEqual(len(scalar_vars), 1) + self.assertIs(scalar_vars[0], m.scalar) + + dae_vars = interface.get_indexed_variables() + self.assertEqual(len(dae_vars), 3) + dae_var_set = set(self._hashRef(var) for var in dae_vars) + pred_dae_var = [ + pyo.Reference(m.var[:, "A"]), + pyo.Reference(m.var[:, "B"]), + m.input, + ] + for var in pred_dae_var: + self.assertIn(self._hashRef(var), dae_var_set) + + dae_expr = interface.get_indexed_expressions() + dae_expr_set = set(self._hashRef(expr) for expr in dae_expr) + self.assertEqual(len(dae_expr), 2) + pred_dae_expr = [ + pyo.Reference(m.var_squared[:, "A"]), + pyo.Reference(m.var_squared[:, "B"]), + ] + for expr in pred_dae_expr: + self.assertIn(self._hashRef(expr), dae_expr_set) + + def test_get_scalar_var_data(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = interface.get_scalar_variable_data() + self.assertEqual({pyo.ComponentUID(m.scalar): 0.5}, data) + + def test_get_data_at_time_all_points(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = interface.get_data_at_time(include_expr=True) + pred_data = TimeSeriesData( + { + m.var[:, "A"]: [1.0, 1.1, 1.2], + m.var[:, "B"]: [1.0, 1.1, 1.2], + m.input[:]: [1.0, 0.9, 0.8], + m.var_squared[:, "A"]: [1.0**2, 1.1**2, 1.2**2], + m.var_squared[:, "B"]: [1.0**2, 1.1**2, 1.2**2], + }, + m.time, + ) + self.assertEqual(data, pred_data) + + def test_get_data_at_time_subset(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = interface.get_data_at_time(time=[0, 2]) + pred_data = TimeSeriesData( + { + m.var[:, "A"]: [1.0, 1.2], + m.var[:, "B"]: [1.0, 1.2], + m.input[:]: [1.0, 0.8], + }, + [0, 2], + ) + self.assertEqual(data, pred_data) + + def test_get_data_at_time_singleton(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = interface.get_data_at_time(time=1, include_expr=True) + pred_data = ScalarData( + { + m.var[:, "A"]: 1.1, + m.var[:, "B"]: 1.1, + m.input[:]: 0.9, + m.var_squared[:, "A"]: 1.1**2, + m.var_squared[:, "B"]: 1.1**2, + } + ) + self.assertEqual(data, pred_data) + + def test_load_scalar_data(self): + # load_scalar_data has been removed. Instead we simply call + # load_data + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = {pyo.ComponentUID(m.scalar): 6.0} + interface.load_data(data) + self.assertEqual(m.scalar.value, 6.0) + + def test_load_data_at_time_all(self): + # NOTE: load_data_at_time has been deprecated + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = ScalarData({m.var[:, "A"]: 5.5, m.input[:]: 6.6}) + interface.load_data(data) + + B_data = [m.var[t, "B"].value for t in m.time] + # var[:,B] has not been changed + self.assertEqual(B_data, [1.0, 1.1, 1.2]) + + for t in m.time: + self.assertEqual(m.var[t, "A"].value, 5.5) + self.assertEqual(m.input[t].value, 6.6) + + def test_load_data_at_time_subset(self): + # NOTE: load_data_at_time has been deprecated + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + + old_A = {t: m.var[t, "A"].value for t in m.time} + old_input = {t: m.input[t].value for t in m.time} + + data = ScalarData({m.var[:, "A"]: 5.5, m.input[:]: 6.6}) + time_points = [1, 2] + time_set = set(time_points) + interface.load_data(data, time_points=[1, 2]) + + B_data = [m.var[t, "B"].value for t in m.time] + # var[:,B] has not been changed + self.assertEqual(B_data, [1.0, 1.1, 1.2]) + + for t in m.time: + if t in time_set: + self.assertEqual(m.var[t, "A"].value, 5.5) + self.assertEqual(m.input[t].value, 6.6) + else: + self.assertEqual(m.var[t, "A"].value, old_A[t]) + self.assertEqual(m.input[t].value, old_input[t]) + + def test_load_data_from_dict_scalar_var(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = {pyo.ComponentUID(m.scalar): 6.0} + interface.load_data(data) + self.assertEqual(m.scalar.value, 6.0) + + def test_load_data_from_dict_indexed_var(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = {pyo.ComponentUID(m.input): 6.0} + interface.load_data(data) + for t in m.time: + self.assertEqual(m.input[t].value, 6.0) + + def test_load_data_from_dict_indexed_var_list_data(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data_list = [2, 3, 4] + data = {pyo.ComponentUID(m.input): data_list} + # Need to provide data to load_data that can be interpreted + # as a TimeSeriesData + interface.load_data((data, m.time)) + for i, t in enumerate(m.time): + self.assertEqual(m.input[t].value, data_list[i]) + + def test_load_data_from_ScalarData_to_point(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = ScalarData({m.var[:, "A"]: 5.5, m.input[:]: 6.6}) + interface.load_data(data, time_points=1) + + B_data = [m.var[t, "B"].value for t in m.time] + # var[:,B] has not been changed + self.assertEqual(B_data, [1.0, 1.1, 1.2]) + + old_A = [1.0, 1.1, 1.2] + old_input = [1.0, 0.9, 0.8] + for i, t in enumerate(m.time): + if t == 1: + self.assertEqual(m.var[t, "A"].value, 5.5) + self.assertEqual(m.input[t].value, 6.6) + else: + self.assertEqual(m.var[t, "A"].value, old_A[i]) + self.assertEqual(m.input[t].value, old_input[i]) + + def test_load_data_from_ScalarData_toall(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + data = ScalarData({m.var[:, "A"]: 5.5, m.input[:]: 6.6}) + interface.load_data(data) + + B_data = [m.var[t, "B"].value for t in m.time] + # var[:,B] has not been changed + self.assertEqual(B_data, [1.0, 1.1, 1.2]) + + for t in m.time: + self.assertEqual(m.var[t, "A"].value, 5.5) + self.assertEqual(m.input[t].value, 6.6) + + def test_load_data_from_ScalarData_tosubset(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + + old_A = {t: m.var[t, "A"].value for t in m.time} + old_input = {t: m.input[t].value for t in m.time} + + data = ScalarData({m.var[:, "A"]: 5.5, m.input[:]: 6.6}) + time_points = [1, 2] + time_set = set(time_points) + interface.load_data(data, time_points=[1, 2]) + + B_data = [m.var[t, "B"].value for t in m.time] + # var[:,B] has not been changed + self.assertEqual(B_data, [1.0, 1.1, 1.2]) + + for t in m.time: + if t in time_set: + self.assertEqual(m.var[t, "A"].value, 5.5) + self.assertEqual(m.input[t].value, 6.6) + else: + self.assertEqual(m.var[t, "A"].value, old_A[t]) + self.assertEqual(m.input[t].value, old_input[t]) + + def test_load_data_from_TimeSeriesData(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + new_A = [1.0, 2.0, 3.0] + new_input = [4.0, 5.0, 6.0] + data = TimeSeriesData({m.var[:, "A"]: new_A, m.input[:]: new_input}, m.time) + interface.load_data(data) + + B_data = [m.var[t, "B"].value for t in m.time] + # var[:,B] has not been changed + self.assertEqual(B_data, [1.0, 1.1, 1.2]) + + for i, t in enumerate(m.time): + self.assertEqual(m.var[t, "A"].value, new_A[i]) + self.assertEqual(m.input[t].value, new_input[i]) + + def test_load_data_from_TimeSeriesData_tuple(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + new_A = [1.0, 2.0, 3.0] + new_input = [4.0, 5.0, 6.0] + data = ({m.var[:, "A"]: new_A, m.input[:]: new_input}, m.time) + interface.load_data(data) + + B_data = [m.var[t, "B"].value for t in m.time] + # var[:,B] has not been changed + self.assertEqual(B_data, [1.0, 1.1, 1.2]) + + for i, t in enumerate(m.time): + self.assertEqual(m.var[t, "A"].value, new_A[i]) + self.assertEqual(m.input[t].value, new_input[i]) + + def test_load_data_from_IntervalData(self): + m = self._make_model(5) + interface = DynamicModelInterface(m, m.time) + new_A = [-1.1, -1.2, -1.3] + new_input = [3.0, 2.9, 2.8] + data = IntervalData( + {m.var[:, "A"]: new_A, m.input[:]: new_input}, + [(0.0, 0.0), (0.0, 2.0), (2.0, 4.0)], + ) + interface.load_data(data) + B_data = [m.var[t, "B"].value for t in m.time] + self.assertEqual(B_data, [1.0, 1.1, 1.2, 1.3, 1.4]) + for t in m.time: + if t == 0: + idx = 0 + elif t <= 2.0: + idx = 1 + elif t <= 4.0: + idx = 2 + self.assertEqual(m.var[t, "A"].value, new_A[idx]) + self.assertEqual(m.input[t].value, new_input[idx]) + + def test_load_data_from_IntervalData_tuple(self): + m = self._make_model(5) + interface = DynamicModelInterface(m, m.time) + new_A = [-1.1, -1.2, -1.3] + new_input = [3.0, 2.9, 2.8] + data = ( + {m.var[:, "A"]: new_A, m.input[:]: new_input}, + [(0.0, 0.0), (0.0, 2.0), (2.0, 4.0)], + ) + interface.load_data(data) + B_data = [m.var[t, "B"].value for t in m.time] + self.assertEqual(B_data, [1.0, 1.1, 1.2, 1.3, 1.4]) + for t in m.time: + if t == 0: + idx = 0 + elif t <= 2.0: + idx = 1 + elif t <= 4.0: + idx = 2 + self.assertEqual(m.var[t, "A"].value, new_A[idx]) + self.assertEqual(m.input[t].value, new_input[idx]) + + def test_load_data_bad_arg(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + new_A = [1.0, 2.0, 3.0] + new_input = [4.0, 5.0, 6.0] + data = ({m.var[:, "A"]: new_A, m.input[:]: new_input}, m.time) + msg = "can only be set if data is IntervalData-compatible" + with self.assertRaisesRegex(RuntimeError, msg): + interface.load_data(data, prefer_left=True) + + def test_copy_values_at_time_default(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + interface.copy_values_at_time() + # Default is to copy values from t0 to all points in time + for t in m.time: + self.assertEqual(m.var[t, "A"].value, 1.0) + self.assertEqual(m.var[t, "B"].value, 1.0) + self.assertEqual(m.input[t].value, 1.0) + + def test_copy_values_at_time_toall(self): + m = self._make_model() + tf = m.time.last() + interface = DynamicModelInterface(m, m.time) + interface.copy_values_at_time(source_time=tf) + # Default is to copy values to all points in time + for t in m.time: + self.assertEqual(m.var[t, "A"].value, 1.2) + self.assertEqual(m.var[t, "B"].value, 1.2) + self.assertEqual(m.input[t].value, 0.8) + + def test_copy_values_at_time_tosubset(self): + m = self._make_model() + tf = m.time.last() + interface = DynamicModelInterface(m, m.time) + target_points = [t for t in m.time if t != m.time.first()] + target_subset = set(target_points) + interface.copy_values_at_time(source_time=tf, target_time=target_points) + # Default is to copy values to all points in time + for t in m.time: + if t in target_subset: + self.assertEqual(m.var[t, "A"].value, 1.2) + self.assertEqual(m.var[t, "B"].value, 1.2) + self.assertEqual(m.input[t].value, 0.8) + else: + # t0 has not been altered. + self.assertEqual(m.var[t, "A"].value, 1.0) + self.assertEqual(m.var[t, "B"].value, 1.0) + self.assertEqual(m.input[t].value, 1.0) + + def test_copy_values_at_time_exception(self): + m = self._make_model() + tf = m.time.last() + interface = DynamicModelInterface(m, m.time) + msg = "copy_values_at_time can only copy" + with self.assertRaisesRegex(ValueError, msg): + interface.copy_values_at_time(source_time=m.time, target_time=tf) + + def test_shift_values_by_time(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + dt = 1.0 + interface.shift_values_by_time(dt) + + t = 0 + self.assertEqual(m.var[t, "A"].value, 1.1) + self.assertEqual(m.var[t, "B"].value, 1.1) + self.assertEqual(m.input[t].value, 0.9) + + t = 1 + self.assertEqual(m.var[t, "A"].value, 1.2) + self.assertEqual(m.var[t, "B"].value, 1.2) + self.assertEqual(m.input[t].value, 0.8) + + t = 2 + # For values within dt of the endpoint, the value at + # the boundary is copied. + self.assertEqual(m.var[t, "A"].value, 1.2) + self.assertEqual(m.var[t, "B"].value, 1.2) + self.assertEqual(m.input[t].value, 0.8) + + def test_get_penalty_from_constant_target(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + setpoint_data = ScalarData({m.var[:, "A"]: 1.0, m.var[:, "B"]: 2.0}) + weight_data = ScalarData({m.var[:, "A"]: 10.0, m.var[:, "B"]: 0.1}) + + vset, tr_cost = interface.get_penalty_from_target( + setpoint_data, weight_data=weight_data + ) + m.var_set = vset + m.tracking_cost = tr_cost + for t in m.time: + for i in m.var_set: + pred_expr = ( + 10.0 * (m.var[t, "A"] - 1.0) ** 2 + if i == 0 + else 0.1 * (m.var[t, "B"] - 2.0) ** 2 + ) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.tracking_cost[i, t])) + self.assertTrue( + compare_expressions(pred_expr, m.tracking_cost[i, t].expr) + ) + + def test_get_penalty_from_constant_target_var_subset(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + setpoint_data = ScalarData( + {m.var[:, "A"]: 1.0, m.var[:, "B"]: 2.0, m.input[:]: 3.0} + ) + weight_data = ScalarData( + {m.var[:, "A"]: 10.0, m.var[:, "B"]: 0.1, m.input[:]: 0.01} + ) + + variables = [m.var[:, "A"], m.var[:, "B"]] + m.variable_set = pyo.Set(initialize=range(len(variables))) + new_set, tr_cost = interface.get_penalty_from_target( + setpoint_data, + variables=variables, + weight_data=weight_data, + variable_set=m.variable_set, + ) + m.tracking_cost = tr_cost + self.assertIs(m.variable_set, new_set) + for t in m.time: + for i in m.variable_set: + pred_expr = ( + 10.0 * (m.var[t, "A"] - 1.0) ** 2 + if i == 0 + else +0.1 * (m.var[t, "B"] - 2.0) ** 2 + ) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.tracking_cost[i, t])) + self.assertTrue( + compare_expressions(pred_expr, m.tracking_cost[i, t].expr) + ) + + +class TestGetPenaltyFromTarget(unittest.TestCase): + def _make_model(self, n_time_points=3): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=list(range(n_time_points))) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var( + m.time, m.comp, initialize={(i, j): 1.1 * i for i, j in m.time * m.comp} + ) + return m + + def test_constant_setpoint(self): + m = self._make_model() + interface = DynamicModelInterface(m, m.time) + setpoint = {m.var[:, "A"]: 0.3, m.var[:, "B"]: 0.4} + m.var_set, m.penalty = interface.get_penalty_from_target(setpoint) + + # Note that the order of the variables here is not deterministic + # in some Python <=3.6 implementations + pred_expr = { + (i, t): ( + (m.var[t, "A"] - 0.3) ** 2 if i == 0 else (m.var[t, "B"] - 0.4) ** 2 + ) + for i, t in m.var_set * m.time + } + for t in m.time: + for i in m.var_set: + self.assertTrue( + compare_expressions(pred_expr[i, t], m.penalty[i, t].expr) + ) + self.assertEqual(pyo.value(pred_expr[i, t]), pyo.value(m.penalty[i, t])) + + def test_varying_setpoint(self): + m = self._make_model(n_time_points=5) + interface = DynamicModelInterface(m, m.time) + A_target = [0.4, 0.6, 0.1, 0.0, 1.1] + B_target = [0.8, 0.9, 1.3, 1.5, 1.4] + setpoint = ({m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, m.time) + m.var_set, m.penalty = interface.get_penalty_from_target(setpoint) + + target = { + (i, t): A_target[j] if i == 0 else B_target[t] + for i in m.var_set + for (j, t) in enumerate(m.time) + } + for i, t in m.var_set * m.time: + var = m.var[t, "A"] if i == 0 else m.var[t, "B"] + pred_expr = (var - target[i, t]) ** 2 + self.assertTrue(compare_expressions(pred_expr, m.penalty[i, t].expr)) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i, t])) + + def test_piecewise_constant_setpoint(self): + m = self._make_model(n_time_points=5) + interface = DynamicModelInterface(m, m.time) + A_target = [0.3, 0.9, 0.7] + B_target = [1.1, 0.1, 0.5] + setpoint = ( + {m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, + [(0.0, 0.0), (0.0, 2.0), (2.0, 4.0)], + ) + m.var_set, m.penalty = interface.get_penalty_from_target(setpoint) + target = { + (i, j): A_target[j] if i == 0 else B_target[j] + for i in m.var_set + for j in range(len(A_target)) + } + for i, t in m.var_set * m.time: + if t == 0: + idx = 0 + elif t <= 2.0: + idx = 1 + elif t <= 4.0: + idx = 2 + var = m.var[t, "A"] if i == 0 else m.var[t, "B"] + pred_expr = (var - target[i, idx]) ** 2 + self.assertTrue(compare_expressions(pred_expr, m.penalty[i, t].expr)) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i, t])) + + def test_piecewise_constant_setpoint_with_specified_variables(self): + m = self._make_model(n_time_points=5) + interface = DynamicModelInterface(m, m.time) + A_target = [0.3, 0.9, 0.7] + B_target = [1.1, 0.1, 0.5] + setpoint = ( + {m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, + [(0.0, 0.0), (0.0, 2.0), (2.0, 4.0)], + ) + variables = [pyo.Reference(m.var[:, "B"])] + m.var_set, m.penalty = interface.get_penalty_from_target( + setpoint, variables=variables + ) + self.assertEqual(len(m.var_set), 1) + self.assertEqual(m.var_set[1], 0) + for i, t in m.var_set * m.time: + if t == 0: + idx = 0 + elif t <= 2.0: + idx = 1 + elif t <= 4.0: + idx = 2 + var = m.var[t, "B"] + pred_expr = (var - B_target[idx]) ** 2 + self.assertTrue(compare_expressions(pred_expr, m.penalty[i, t].expr)) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i, t])) + + def test_piecewise_constant_setpoint_time_subset(self): + m = self._make_model(n_time_points=5) + interface = DynamicModelInterface(m, m.time) + A_target = [0.3, 0.9, 0.7] + B_target = [1.1, 0.1, 0.5] + setpoint = ( + {m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, + [(0.0, 0.0), (0.0, 2.0), (2.0, 4.0)], + ) + m.sample_points = pyo.Set(initialize=[0.0, 2.0, 4.0]) + m.var_set, m.penalty = interface.get_penalty_from_target( + setpoint, time=m.sample_points + ) + idx_sets = ComponentSet(m.penalty.index_set().subsets()) + self.assertIn(m.var_set, idx_sets) + self.assertIn(m.sample_points, idx_sets) + target = { + (i, j): A_target[j] if i == 0 else B_target[j] + for i in m.var_set + for j in range(len(A_target)) + } + for i, t in m.var_set * m.time: + if t == 0: + idx = 0 + elif t <= 2.0: + idx = 1 + elif t <= 4.0: + idx = 2 + if t in m.sample_points: + var = m.var[t, "A"] if i == 0 else m.var[t, "B"] + pred_expr = (var - target[i, idx]) ** 2 + self.assertTrue(compare_expressions(pred_expr, m.penalty[i, t].expr)) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i, t])) + else: + self.assertNotIn((i, t), m.penalty) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/interfaces/tests/test_var_linker.py b/pyomo/contrib/mpc/interfaces/tests/test_var_linker.py new file mode 100644 index 00000000000..ceec9fada36 --- /dev/null +++ b/pyomo/contrib/mpc/interfaces/tests/test_var_linker.py @@ -0,0 +1,143 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +import pyomo.environ as pyo +from pyomo.contrib.mpc.interfaces.var_linker import DynamicVarLinker + + +class TestVarLinker(unittest.TestCase): + def _make_models(self, n_time_points_1=3, n_time_points_2=3): + m1 = pyo.ConcreteModel() + m1.time = pyo.Set(initialize=range(n_time_points_1)) + m1.comp = pyo.Set(initialize=["A", "B"]) + m1.var = pyo.Var( + m1.time, + m1.comp, + initialize={(i, j): 1.0 + i * 0.1 for i, j in m1.time * m1.comp}, + ) + m1.input = pyo.Var(m1.time, initialize={i: 1.0 - i * 0.1 for i in m1.time}) + + m2 = pyo.ConcreteModel() + m2.time = pyo.Set(initialize=range(n_time_points_2)) + m2.x1 = pyo.Var(m2.time, initialize=2.1) + m2.x2 = pyo.Var(m2.time, initialize=2.2) + m2.x3 = pyo.Var(m2.time, initialize=2.3) + m2.x4 = pyo.Var(m2.time, initialize=2.4) + + return m1, m2 + + def test_transfer_one_to_one(self): + m1, m2 = self._make_models() + vars1 = [pyo.Reference(m1.var[:, "A"]), pyo.Reference(m1.var[:, "B"]), m1.input] + vars2 = [m2.x1, m2.x2, m2.x3] + + linker = DynamicVarLinker(vars1, vars2) + t_source = 0 + t_target = 2 + linker.transfer(t_source=0, t_target=2) + + pred_AB = lambda t: 1.0 + t * 0.1 + pred_input = lambda t: 1.0 - t * 0.1 + for t in m1.time: + # Both models have same time set + + # Values in source variables have not changed + self.assertEqual(m1.var[t, "A"].value, pred_AB(t)) + self.assertEqual(m1.var[t, "B"].value, pred_AB(t)) + self.assertEqual(m1.input[t].value, pred_input(t)) + + if t == t_target: + self.assertEqual(m2.x1[t].value, pred_AB(t_source)) + self.assertEqual(m2.x2[t].value, pred_AB(t_source)) + self.assertEqual(m2.x3[t].value, pred_input(t_source)) + self.assertEqual(m2.x4[t].value, 2.4) + else: + self.assertEqual(m2.x1[t].value, 2.1) + self.assertEqual(m2.x2[t].value, 2.2) + self.assertEqual(m2.x3[t].value, 2.3) + self.assertEqual(m2.x4[t].value, 2.4) + + def test_transfer_one_to_all(self): + m1, m2 = self._make_models() + vars1 = [pyo.Reference(m1.var[:, "A"]), pyo.Reference(m1.var[:, "B"]), m1.input] + vars2 = [m2.x1, m2.x2, m2.x3] + + linker = DynamicVarLinker(vars1, vars2) + t_source = 0 + t_target = 2 + linker.transfer(t_source=0, t_target=m2.time) + + pred_AB = lambda t: 1.0 + t * 0.1 + pred_input = lambda t: 1.0 - t * 0.1 + for t in m1.time: + # Both models have same time set + + # Values in source variables have not changed + self.assertEqual(m1.var[t, "A"].value, pred_AB(t)) + self.assertEqual(m1.var[t, "B"].value, pred_AB(t)) + self.assertEqual(m1.input[t].value, pred_input(t)) + + # Target variables have been updated + self.assertEqual(m2.x1[t].value, pred_AB(t_source)) + self.assertEqual(m2.x2[t].value, pred_AB(t_source)) + self.assertEqual(m2.x3[t].value, pred_input(t_source)) + self.assertEqual(m2.x4[t].value, 2.4) + + def test_transfer_all_to_all(self): + m1, m2 = self._make_models() + vars1 = [pyo.Reference(m1.var[:, "A"]), pyo.Reference(m1.var[:, "B"]), m1.input] + vars2 = [m2.x1, m2.x2, m2.x3] + + linker = DynamicVarLinker(vars1, vars2) + t_source = 0 + t_target = 2 + linker.transfer(t_source=m1.time, t_target=m2.time) + + pred_AB = lambda t: 1.0 + t * 0.1 + pred_input = lambda t: 1.0 - t * 0.1 + for t in m1.time: + # Both models have same time set + + # Values in source variables have not changed + self.assertEqual(m1.var[t, "A"].value, pred_AB(t)) + self.assertEqual(m1.var[t, "B"].value, pred_AB(t)) + self.assertEqual(m1.input[t].value, pred_input(t)) + + # Target variables have been updated + self.assertEqual(m2.x1[t].value, pred_AB(t)) + self.assertEqual(m2.x2[t].value, pred_AB(t)) + self.assertEqual(m2.x3[t].value, pred_input(t)) + self.assertEqual(m2.x4[t].value, 2.4) + + def test_transfer_exceptions(self): + m1, m2 = self._make_models() + vars1 = [pyo.Reference(m1.var[:, "A"]), pyo.Reference(m1.var[:, "B"])] + vars2 = [m2.x1, m2.x2, m2.x3] + + msg = "must be provided two lists.*of equal length" + with self.assertRaisesRegex(ValueError, msg): + linker = DynamicVarLinker(vars1, vars2) + + vars1 = [pyo.Reference(m1.var[:, "A"]), pyo.Reference(m1.var[:, "B"]), m1.input] + vars2 = [m2.x1, m2.x2, m2.x3] + linker = DynamicVarLinker(vars1, vars2) + msg = "Source time points were not provided" + with self.assertRaisesRegex(RuntimeError, msg): + linker.transfer(t_target=m2.time) + + msg = "Target time points were not provided" + with self.assertRaisesRegex(RuntimeError, msg): + linker.transfer(t_source=m1.time.first()) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/interfaces/var_linker.py b/pyomo/contrib/mpc/interfaces/var_linker.py new file mode 100644 index 00000000000..fd831c9a2c1 --- /dev/null +++ b/pyomo/contrib/mpc/interfaces/var_linker.py @@ -0,0 +1,58 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.mpc.interfaces.copy_values import copy_values_at_time + + +class DynamicVarLinker(object): + """ + The purpose of this class is so that we do not have + to call find_component or construct ComponentUIDs in a loop + when transferring values between two different dynamic models. + It also allows us to transfer values between variables that + have different names in different models. + + """ + + def __init__( + self, source_variables, target_variables, source_time=None, target_time=None + ): + # Right now all the transfers I can think of only happen + # in one direction + if len(source_variables) != len(target_variables): + raise ValueError( + "%s must be provided two lists of time-indexed variables " + "of equal length. Got lengths %s and %s" + % (type(self), len(source_variables), len(target_variables)) + ) + self._source_variables = source_variables + self._target_variables = target_variables + self._source_time = source_time + self._target_time = target_time + + def transfer(self, t_source=None, t_target=None): + if t_source is None and self._source_time is None: + raise RuntimeError( + "Source time points were not provided in the transfer method " + "or in the constructor." + ) + elif t_source is None: + t_source = self._source_time + if t_target is None and self._target_time is None: + raise RuntimeError( + "Target time points were not provided in the transfer method " + "or in the constructor." + ) + elif t_target is None: + t_target = self._target_time + copy_values_at_time( + self._source_variables, self._target_variables, t_source, t_target + ) diff --git a/pyomo/checker/tests/examples/model/ModelArgument_firstarg.py b/pyomo/contrib/mpc/modeling/__init__.py similarity index 70% rename from pyomo/checker/tests/examples/model/ModelArgument_firstarg.py rename to pyomo/contrib/mpc/modeling/__init__.py index ea0d6ce1aec..0eb255a9f56 100644 --- a/pyomo/checker/tests/examples/model/ModelArgument_firstarg.py +++ b/pyomo/contrib/mpc/modeling/__init__.py @@ -9,12 +9,10 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import ConcreteModel, RangeSet, Var, Constraint +__doc__ = """A module with utilities for constructing modeling components that are + useful for dynamic optimization. -model = ConcreteModel() -model.S = RangeSet(10) -model.X = Var(model.S) + Code in this module may import from mpc.data, but should not import + from mpc.interfaces. -def C_rule(m, i): - return m.X[i] >= 10.0 -model.C = Constraint(rule=C_rule) + """ diff --git a/pyomo/contrib/mpc/modeling/constraints.py b/pyomo/contrib/mpc/modeling/constraints.py new file mode 100644 index 00000000000..6fb6a311afb --- /dev/null +++ b/pyomo/contrib/mpc/modeling/constraints.py @@ -0,0 +1,62 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.base.constraint import Constraint +from pyomo.core.base.set import Set + + +def get_piecewise_constant_constraints(inputs, time, sample_points, use_next=True): + """Returns an IndexedConstraint that constrains the provided variables + to be constant between the provided sample points + + Arguments + --------- + inputs: list of variables + Time-indexed variables that will be constrained piecewise constant + time: Set + Set of points at which provided variables will be constrained + sample_points: List of floats + Points at which "constant constraints" will be omitted; these are + points at which the provided variables may vary. + use_next: Bool (default True) + Whether the next time point will be used in the constant constraint + at each point in time. Otherwise, the previous time point is used. + + Returns + ------- + Set, IndexedConstraint + A RangeSet indexing the list of variables provided and a Constraint + indexed by the product of this RangeSet and time. + + """ + input_set = Set(initialize=range(len(inputs))) + sample_point_set = set(sample_points) + + def piecewise_constant_rule(m, i, t): + if t in sample_point_set: + return Constraint.Skip + else: + # I think whether we want prev or next here depends on whether + # we use an explicit or implicit time discretization. I.e. whether + # an input is applied to the finite element in front of or behind + # its time point. If the wrong direction for a discretization + # is used, we could have different inputs applied within the same + # finite element, which I think we never want. + var = inputs[i] + if use_next: + t_next = time.next(t) + return var[t] - var[t_next] == 0 + else: + t_prev = time.prev(t) + return var[t_prev] - var[t] == 0 + + pwc_con = Constraint(input_set, time, rule=piecewise_constant_rule) + return input_set, pwc_con diff --git a/pyomo/contrib/mpc/modeling/cost_expressions.py b/pyomo/contrib/mpc/modeling/cost_expressions.py new file mode 100644 index 00000000000..65a376e42d2 --- /dev/null +++ b/pyomo/contrib/mpc/modeling/cost_expressions.py @@ -0,0 +1,325 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# + +from pyomo.common.collections import ComponentMap +from pyomo.core.base.componentuid import ComponentUID +from pyomo.core.base.expression import Expression +from pyomo.core.base.set import Set + +from pyomo.contrib.mpc.data.series_data import get_indexed_cuid +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.series_data import TimeSeriesData +from pyomo.contrib.mpc.data.interval_data import IntervalData +from pyomo.contrib.mpc.data.convert import interval_to_series, _process_to_dynamic_data + + +def get_penalty_from_constant_target( + variables, time, setpoint_data, weight_data=None, variable_set=None +): + """ + This function returns a tracking cost IndexedExpression for the given + time-indexed variables and associated setpoint data. + + Arguments + --------- + variables: list + List of time-indexed variables to include in the tracking cost + expression + time: iterable + Set of variable indices for which a cost expression will be + created + setpoint_data: ScalarData, dict, or ComponentMap + Maps variable names to setpoint values + weight_data: ScalarData, dict, or ComponentMap + Optional. Maps variable names to tracking cost weights. If not + provided, weights of one are used. + variable_set: Set + Optional. A set of indices into the provided list of variables + by which the cost expression will be indexed. + + Returns + ------- + Set, Expression + RangeSet that indexes the list of variables provided and an Expression + indexed by the RangeSet and time containing the cost term for each + variable at each point in time. + + """ + if weight_data is None: + weight_data = ScalarData(ComponentMap((var, 1.0) for var in variables)) + if not isinstance(weight_data, ScalarData): + weight_data = ScalarData(weight_data) + if not isinstance(setpoint_data, ScalarData): + setpoint_data = ScalarData(setpoint_data) + if variable_set is None: + variable_set = Set(initialize=range(len(variables))) + + # Make sure data have keys for each var + for var in variables: + if not setpoint_data.contains_key(var): + raise KeyError( + "Setpoint data dictionary does not contain a" + " key for variable %s" % var.name + ) + if not weight_data.contains_key(var): + raise KeyError( + "Tracking weight dictionary does not contain a" + " key for variable %s" % var.name + ) + + # Set up data structures so we don't have to re-process keys for each + # time index in the rule. + cuids = [get_indexed_cuid(var) for var in variables] + setpoint_data = setpoint_data.get_data() + weight_data = weight_data.get_data() + + def tracking_rule(m, i, t): + return get_quadratic_penalty_at_time( + variables[i], t, setpoint_data[cuids[i]], weight=weight_data[cuids[i]] + ) + + tracking_expr = Expression(variable_set, time, rule=tracking_rule) + return variable_set, tracking_expr + + +def get_penalty_from_piecewise_constant_target( + variables, + time, + setpoint_data, + weight_data=None, + variable_set=None, + tolerance=0.0, + prefer_left=True, +): + """Returns an IndexedExpression penalizing deviation between + the specified variables and piecewise constant target data. + + Arguments + --------- + variables: List of Pyomo variables + Variables that participate in the cost expressions. + time: Iterable + Index used for the cost expression + setpoint_data: IntervalData + Holds the piecewise constant values that will be used as + setpoints + weight_data: ScalarData (optional) + Weights for variables. Default is all ones. + tolerance: Float (optional) + Tolerance used for determining whether a time point + is within an interval. Default is zero. + prefer_left: Bool (optional) + If a time point lies at the boundary of two intervals, whether + the value on the left will be chosen. Default is True. + + Returns + ------- + Set, Expression + Pyomo Expression, indexed by time, for the total weighted + tracking cost with respect to the provided setpoint. + + """ + if variable_set is None: + variable_set = Set(initialize=range(len(variables))) + if isinstance(setpoint_data, IntervalData): + setpoint_time_series = interval_to_series( + setpoint_data, + time_points=time, + tolerance=tolerance, + prefer_left=prefer_left, + ) + else: + setpoint_time_series = IntervalData(*setpoint_data) + var_set, tracking_cost = get_penalty_from_time_varying_target( + variables, + time, + setpoint_time_series, + weight_data=weight_data, + variable_set=variable_set, + ) + return var_set, tracking_cost + + +def get_quadratic_penalty_at_time(var, t, setpoint, weight=None): + if weight is None: + weight = 1.0 + return weight * (var[t] - setpoint) ** 2 + + +def _get_penalty_expressions_from_time_varying_target( + variables, time, setpoint_data, weight_data=None +): + if weight_data is None: + weight_data = ScalarData(ComponentMap((var, 1.0) for var in variables)) + if not isinstance(weight_data, ScalarData): + weight_data = ScalarData(weight_data) + if not isinstance(setpoint_data, TimeSeriesData): + setpoint_data = TimeSeriesData(*setpoint_data) + + # Validate incoming data + if list(time) != setpoint_data.get_time_points(): + raise RuntimeError( + "Mismatch in time points between time set and points" + " in the setpoint data structure" + ) + for var in variables: + if not setpoint_data.contains_key(var): + raise KeyError("Setpoint data does not contain a key for variable %s" % var) + if not weight_data.contains_key(var): + raise KeyError( + "Tracking weight does not contain a key for variable %s" % var + ) + + # Get lists of weights and setpoints so we don't have to process + # the variables (to get CUIDs) and hash the CUIDs for every + # time index. + cuids = [get_indexed_cuid(var, sets=(time,)) for var in variables] + weights = [weight_data.get_data_from_key(var) for var in variables] + setpoints = [setpoint_data.get_data_from_key(var) for var in variables] + tracking_costs = [ + { + t: get_quadratic_penalty_at_time(var, t, setpoints[j][i], weights[j]) + for i, t in enumerate(time) + } + for j, var in enumerate(variables) + ] + return tracking_costs + + +def get_penalty_from_time_varying_target( + variables, time, setpoint_data, weight_data=None, variable_set=None +): + """Constructs a penalty expression for the specified variables and + specified time-varying target data. + + Arguments + --------- + variables: List of Pyomo variables + Variables that participate in the cost expressions. + time: Iterable + Index used for the cost expression + setpoint_data: TimeSeriesData + Holds the trajectory values that will be used as a setpoint + weight_data: ScalarData (optional) + Weights for variables. Default is all ones. + variable_set: Set (optional) + Set indexing the list of provided variables, if one exists already. + + Returns + ------- + Set, Expression + Set indexing the list of provided variables and Expression, indexed + by the variable set and time, for the total weighted penalty with + respect to the provided setpoint. + + """ + if variable_set is None: + variable_set = Set(initialize=range(len(variables))) + + # This is a list of dictionaries, one for each variable and each + # mapping each time point to the quadratic weighted tracking cost term + # at that time point. + tracking_costs = _get_penalty_expressions_from_time_varying_target( + variables, time, setpoint_data, weight_data=weight_data + ) + + def tracking_rule(m, i, t): + return tracking_costs[i][t] + + tracking_cost = Expression(variable_set, time, rule=tracking_rule) + return variable_set, tracking_cost + + +def get_penalty_from_target( + variables, + time, + setpoint_data, + weight_data=None, + variable_set=None, + tolerance=None, + prefer_left=None, +): + """A function to get a penalty expression for specified variables from + a target that is constant, piecewise constant, or time-varying. + + This function accepts ScalarData, IntervalData, or TimeSeriesData objects, + or compatible mappings/tuples as the target, and builds the appropriate + penalty expression for each. Mappings are converted to ScalarData, and + tuples (of data dict, time list) are unpacked and converted to IntervalData + or TimeSeriesData depending on the contents of the time list. + + Arguments + --------- + variables: List + List of time-indexed variables to be penalized + time: Set + Set of time points at which to construct penalty expressions. + Also indexes the returned Expression. + setpoint_data: ScalarData, TimeSeriesData, or IntervalData + Data structure representing the possibly time-varying or piecewise + constant setpoint + weight_data: ScalarData (optional) + Data structure holding the weights to be applied to each variable + variable_set: Set (optional) + Set indexing the provided variables, if one already exists. Also + indexes the returned Expression. + tolerance: Float (optional) + Tolerance for checking inclusion within an interval. Only may be + provided if IntervalData is provided as the setpoint. + prefer_left: Bool (optional) + Flag indicating whether left endpoints of intervals should take + precedence over right endpoints. Default is False. Only may be + provided if IntervalData is provided as the setpoint. + + Returns + ------- + Set, Expression + Set indexing the list of provided variables and an Expression, + indexed by this set and the provided time set, containing the + penalties for each variable at each point in time. + + """ + setpoint_data = _process_to_dynamic_data(setpoint_data) + args = (variables, time, setpoint_data) + kwds = dict(weight_data=weight_data, variable_set=variable_set) + + def _error_if_used(tolerance, prefer_left, sp_type): + if tolerance is not None or prefer_left is not None: + raise RuntimeError( + "tolerance and prefer_left arguments can only be used if" + " IntervalData-compatible setpoint is provided. Got" + " tolerance=%s, prefer_left=%s when using %s as a target." + % (tolerance, prefer_left, sp_type) + ) + + if isinstance(setpoint_data, ScalarData): + _error_if_used(tolerance, prefer_left, type(setpoint_data)) + return get_penalty_from_constant_target(*args, **kwds) + elif isinstance(setpoint_data, TimeSeriesData): + _error_if_used(tolerance, prefer_left, type(setpoint_data)) + return get_penalty_from_time_varying_target(*args, **kwds) + elif isinstance(setpoint_data, IntervalData): + tolerance = 0.0 if tolerance is None else tolerance + prefer_left = True if prefer_left is None else prefer_left + kwds.update(prefer_left=prefer_left, tolerance=tolerance) + return get_penalty_from_piecewise_constant_target(*args, **kwds) diff --git a/pyomo/contrib/mpc/modeling/terminal.py b/pyomo/contrib/mpc/modeling/terminal.py new file mode 100644 index 00000000000..c25efca280a --- /dev/null +++ b/pyomo/contrib/mpc/modeling/terminal.py @@ -0,0 +1,163 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# + +from pyomo.core.base.componentuid import ComponentUID +from pyomo.core.base.expression import Expression +from pyomo.core.base.set import Set + +from pyomo.contrib.mpc.data.series_data import get_indexed_cuid +from pyomo.contrib.mpc.data.scalar_data import ScalarData + + +def _get_quadratic_penalty_at_time(var, t, target, weight=None): + if weight is None: + weight = 1.0 + return weight * (var[t] - target) ** 2 + + +def _get_penalty_expressions_at_time( + variables, t, target_data, weight_data=None, time_set=None +): + """A private helper function to process data and construct penalty + expressions + + """ + if weight_data is None: + weight_data = ScalarData(ComponentMap((var, 1.0) for var in variables)) + if not isinstance(weight_data, ScalarData): + # We pass time_set as an argument in case the user provides a + # ComponentMap of VarData -> values. In this case knowing the + # time set is necessary to recover the indexed CUID. + weight_data = ScalarData(weight_data, time_set=time_set) + if not isinstance(target_data, ScalarData): + target_data = ScalarData(target_data, time_set=time_set) + + for var in variables: + if not target_data.contains_key(var): + raise KeyError( + "Target data does not contain a key for variable %s" % var.name + ) + if not weight_data.contains_key(var): + raise KeyError( + "Penalty weight data does not contain a key for variable %s" % var.name + ) + + penalties = [ + _get_quadratic_penalty_at_time( + var, + t, + target_data.get_data_from_key(var), + weight_data.get_data_from_key(var), + ) + for var in variables + ] + return penalties + + +def get_penalty_at_time( + variables, t, target_data, weight_data=None, time_set=None, variable_set=None +): + """Returns an Expression penalizing the deviation of the specified + variables at the specified point in time from the specified target + + Arguments + --------- + variables: List + List of time-indexed variables that will be penalized + t: Float + Time point at which to apply the penalty + target_data: ScalarData + ScalarData object containing the target for (at least) the variables + to be penalized + weight_data: ScalarData (optional) + ScalarData object containing the penalty weights for (at least) the + variables to be penalized + time_set: Set (optional) + Time set that indexes the provided variables. This is only used if + target or weight data are provided as a ComponentMap with VarData + as keys. In this case the Set is necessary to recover the CUIDs + used internally as keys + variable_set: Set (optional) + Set indexing the list of variables provided, if such a set already + exists + + Returns + ------- + Set, Expression + Set indexing the list of variables provided and an Expression, + indexed by this set, containing the weighted penalty expressions + + """ + if variable_set is None: + variable_set = Set(initialize=range(len(variables))) + penalty_expressions = _get_penalty_expressions_at_time( + variables, t, target_data, weight_data=weight_data, time_set=time_set + ) + + def penalty_rule(m, i): + return penalty_expressions[i] + + penalty = Expression(variable_set, rule=penalty_rule) + return variable_set, penalty + + +def get_terminal_penalty( + variables, time_set, target_data, weight_data=None, variable_set=None +): + """Returns an Expression penalizing the deviation of the specified + variables at the final point in time from the specified target + + Arguments + --------- + variables: List + List of time-indexed variables that will be penalized + time_set: Set + Time set that indexes the provided variables. Penalties are applied + at the last point in this set. + target_data: ScalarData + ScalarData object containing the target for (at least) the variables + to be penalized + weight_data: ScalarData (optional) + ScalarData object containing the penalty weights for (at least) the + variables to be penalized + variable_set: Set (optional) + Set indexing the list of variables provided, if such a set already + exists + + Returns + ------- + Set, Expression + Set indexing the list of variables provided and an Expression, + indexed by this set, containing the weighted penalty expressions + + """ + t = time_set.last() + return get_penalty_at_time( + variables, + t, + target_data, + weight_data=weight_data, + time_set=time_set, + variable_set=variable_set, + ) diff --git a/pyomo/contrib/mpc/modeling/tests/__init__.py b/pyomo/contrib/mpc/modeling/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/mpc/modeling/tests/test_cost_expressions.py b/pyomo/contrib/mpc/modeling/tests/test_cost_expressions.py new file mode 100644 index 00000000000..5db390ffa47 --- /dev/null +++ b/pyomo/contrib/mpc/modeling/tests/test_cost_expressions.py @@ -0,0 +1,507 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# +import pyomo.common.unittest as unittest + +import pyomo.environ as pyo +from pyomo.common.collections import ComponentSet +from pyomo.core.expr.visitor import identify_variables +from pyomo.core.expr.compare import compare_expressions +from pyomo.contrib.mpc.modeling.cost_expressions import ( + get_penalty_from_constant_target, + get_penalty_from_piecewise_constant_target, + get_penalty_from_time_varying_target, + get_penalty_from_target, +) +from pyomo.contrib.mpc.data.scalar_data import ScalarData +from pyomo.contrib.mpc.data.series_data import TimeSeriesData +from pyomo.contrib.mpc.data.interval_data import IntervalData + + +class TestTrackingCostConstantSetpoint(unittest.TestCase): + def test_penalty_no_weights(self): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[1, 2, 3]) + m.v1 = pyo.Var(m.time, initialize={i: 1 * i for i in m.time}) + m.v2 = pyo.Var(m.time, initialize={i: 2 * i for i in m.time}) + + setpoint_data = ScalarData({m.v1[:]: 3.0, m.v2[:]: 4.0}) + variables = [m.v1, m.v2] + m.var_set, m.tracking_expr = get_penalty_from_constant_target( + variables, m.time, setpoint_data + ) + self.assertEqual(len(m.var_set), 2) + self.assertIn(0, m.var_set) + self.assertIn(1, m.var_set) + + var_sets = { + (i, t): ComponentSet(identify_variables(m.tracking_expr[i, t])) + for i in m.var_set + for t in m.time + } + for i in m.time: + for j in m.var_set: + self.assertIn(variables[j][i], var_sets[j, i]) + pred_value = (1 * i - 3) ** 2 if j == 0 else (2 * i - 4) ** 2 + self.assertEqual(pred_value, pyo.value(m.tracking_expr[j, i])) + pred_expr = (m.v1[i] - 3) ** 2 if j == 0 else (m.v2[i] - 4) ** 2 + self.assertTrue( + compare_expressions(pred_expr, m.tracking_expr[j, i].expr) + ) + + def test_penalty_with_weights(self): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[1, 2, 3]) + m.v1 = pyo.Var(m.time, initialize={i: 1 * i for i in m.time}) + m.v2 = pyo.Var(m.time, initialize={i: 2 * i for i in m.time}) + + setpoint_data = ScalarData({m.v1[:]: 3.0, m.v2[:]: 4.0}) + weight_data = ScalarData({m.v1[:]: 0.1, m.v2[:]: 0.5}) + m.var_set = pyo.Set(initialize=[0, 1]) + variables = [m.v1, m.v2] + new_set, m.tracking_expr = get_penalty_from_constant_target( + variables, + m.time, + setpoint_data, + weight_data=weight_data, + variable_set=m.var_set, + ) + self.assertIs(new_set, m.var_set) + + var_sets = { + (i, t): ComponentSet(identify_variables(m.tracking_expr[i, t])) + for i in m.var_set + for t in m.time + } + for i in m.time: + for j in m.var_set: + self.assertIn(variables[j][i], var_sets[j, i]) + pred_value = ( + 0.1 * (1 * i - 3) ** 2 if j == 0 else 0.5 * (2 * i - 4) ** 2 + ) + self.assertAlmostEqual(pred_value, pyo.value(m.tracking_expr[j, i])) + pred_expr = ( + 0.1 * (m.v1[i] - 3) ** 2 if j == 0 else 0.5 * (m.v2[i] - 4) ** 2 + ) + self.assertTrue( + compare_expressions(pred_expr, m.tracking_expr[j, i].expr) + ) + + def test_exceptions(self): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[1, 2, 3]) + m.v1 = pyo.Var(m.time, initialize={i: 1 * i for i in m.time}) + m.v2 = pyo.Var(m.time, initialize={i: 2 * i for i in m.time}) + + setpoint_data = ScalarData({m.v1[:]: 3.0}) + weight_data = ScalarData({m.v2[:]: 0.1}) + with self.assertRaisesRegex(KeyError, "Setpoint data"): + _, m.tracking_expr = get_penalty_from_constant_target( + [m.v1, m.v2], m.time, setpoint_data + ) + + setpoint_data = ScalarData({m.v1[:]: 3.0, m.v2[:]: 4.0}) + with self.assertRaisesRegex(KeyError, "Tracking weight"): + _, m.tracking_expr = get_penalty_from_constant_target( + [m.v1, m.v2], m.time, setpoint_data, weight_data=weight_data + ) + + def test_add_set_after_expr(self): + # A small gotcha that may come up. This is known behavior + # due to Pyomo's "implicit set" addition. + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=[1, 2, 3]) + m.v1 = pyo.Var(m.time, initialize={i: 1 * i for i in m.time}) + m.v2 = pyo.Var(m.time, initialize={i: 2 * i for i in m.time}) + + setpoint_data = ScalarData({m.v1[:]: 3.0, m.v2[:]: 4.0}) + weight_data = ScalarData({m.v1[:]: 0.1, m.v2[:]: 0.5}) + m.var_set = pyo.Set(initialize=[0, 1]) + variables = [m.v1, m.v2] + new_set, tr_expr = get_penalty_from_constant_target( + variables, + m.time, + setpoint_data, + weight_data=weight_data, + variable_set=m.var_set, + ) + m.tracking_expr = tr_expr # new_set gets added and assigned a name + msg = "Attempting to re-assign" + with self.assertRaisesRegex(RuntimeError, msg): + # attempting to add the same component twice + m.variable_set = new_set + + +class TestTrackingCostPiecewiseSetpoint(unittest.TestCase): + def _make_model(self, n_time_points=3): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=list(range(n_time_points))) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var( + m.time, m.comp, initialize={(i, j): 1.1 * i for i, j in m.time * m.comp} + ) + return m + + def test_piecewise_penalty_no_weights(self): + m = self._make_model(n_time_points=5) + + variables = [pyo.Reference(m.var[:, "A"]), pyo.Reference(m.var[:, "B"])] + setpoint_data = IntervalData( + {m.var[:, "A"]: [2.0, 2.5], m.var[:, "B"]: [3.0, 3.5]}, [(0, 2), (2, 4)] + ) + m.var_set, m.tracking_cost = get_penalty_from_piecewise_constant_target( + variables, m.time, setpoint_data + ) + for i in m.time: + for j in m.var_set: + if i <= 2: + pred_expr = ( + (m.var[i, "A"] - 2.0) ** 2 + if j == 0 + else (m.var[i, "B"] - 3.0) ** 2 + ) + else: + pred_expr = ( + (m.var[i, "A"] - 2.5) ** 2 + if j == 0 + else (m.var[i, "B"] - 3.5) ** 2 + ) + pred_value = pyo.value(pred_expr) + self.assertEqual(pred_value, pyo.value(m.tracking_cost[j, i])) + self.assertTrue( + compare_expressions(pred_expr, m.tracking_cost[j, i].expr) + ) + + def test_piecewise_penalty_with_weights(self): + m = self._make_model(n_time_points=5) + + variables = [pyo.Reference(m.var[:, "A"]), pyo.Reference(m.var[:, "B"])] + setpoint_data = IntervalData( + {m.var[:, "A"]: [2.0, 2.5], m.var[:, "B"]: [3.0, 3.5]}, [(0, 2), (2, 4)] + ) + weight_data = { + pyo.ComponentUID(m.var[:, "A"]): 10.0, + pyo.ComponentUID(m.var[:, "B"]): 0.1, + } + m.var_set, m.tracking_cost = get_penalty_from_piecewise_constant_target( + variables, m.time, setpoint_data, weight_data=weight_data + ) + for i in m.time: + for j in m.var_set: + if i <= 2: + pred_expr = ( + 10.0 * (m.var[i, "A"] - 2.0) ** 2 + if j == 0 + else 0.1 * (m.var[i, "B"] - 3.0) ** 2 + ) + else: + pred_expr = ( + 10.0 * (m.var[i, "A"] - 2.5) ** 2 + if j == 0 + else 0.1 * (m.var[i, "B"] - 3.5) ** 2 + ) + pred_value = pyo.value(pred_expr) + self.assertEqual(pred_value, pyo.value(m.tracking_cost[j, i])) + self.assertTrue( + compare_expressions(pred_expr, m.tracking_cost[j, i].expr) + ) + + def test_piecewise_penalty_exceptions(self): + m = self._make_model(n_time_points=5) + + variables = [pyo.Reference(m.var[:, "A"]), pyo.Reference(m.var[:, "B"])] + setpoint_data = IntervalData({m.var[:, "A"]: [2.0, 2.5]}, [(0, 2), (2, 4)]) + weight_data = { + pyo.ComponentUID(m.var[:, "A"]): 10.0, + pyo.ComponentUID(m.var[:, "B"]): 0.1, + } + msg = "Setpoint data does not contain" + with self.assertRaisesRegex(KeyError, msg): + tr_cost = get_penalty_from_piecewise_constant_target( + variables, m.time, setpoint_data, weight_data=weight_data + ) + + setpoint_data = IntervalData( + {m.var[:, "A"]: [2.0, 2.5], m.var[:, "B"]: [3.0, 3.5]}, [(0, 2), (2, 4)] + ) + weight_data = {pyo.ComponentUID(m.var[:, "A"]): 10.0} + msg = "Tracking weight does not contain" + with self.assertRaisesRegex(KeyError, msg): + tr_cost = get_penalty_from_piecewise_constant_target( + variables, m.time, setpoint_data, weight_data=weight_data + ) + + +class TestTrackingCostVaryingSetpoint(unittest.TestCase): + def _make_model(self, n_time_points=3): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=list(range(n_time_points))) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var( + m.time, m.comp, initialize={(i, j): 1.1 * i for i, j in m.time * m.comp} + ) + return m + + def test_varying_setpoint_no_weights(self): + m = self._make_model(n_time_points=5) + variables = [pyo.Reference(m.var[:, "A"]), pyo.Reference(m.var[:, "B"])] + A_setpoint = [1.0 - 0.1 * i for i in range(len(m.time))] + B_setpoint = [5.0 + 0.1 * i for i in range(len(m.time))] + setpoint_data = TimeSeriesData( + {m.var[:, "A"]: A_setpoint, m.var[:, "B"]: B_setpoint}, m.time + ) + m.var_set, m.tracking_cost = get_penalty_from_time_varying_target( + variables, m.time, setpoint_data + ) + for i, t in enumerate(m.time): + for j in m.var_set: + pred_expr = ( + (m.var[t, "A"] - A_setpoint[i]) ** 2 + if j == 0 + else (m.var[t, "B"] - B_setpoint[i]) ** 2 + ) + pred_value = pyo.value(pred_expr) + self.assertEqual(pred_value, pyo.value(m.tracking_cost[j, t])) + self.assertTrue( + compare_expressions(pred_expr, m.tracking_cost[j, t].expr) + ) + + def test_varying_setpoint_with_weights(self): + m = self._make_model(n_time_points=5) + variables = [pyo.Reference(m.var[:, "A"]), pyo.Reference(m.var[:, "B"])] + A_setpoint = [1.0 - 0.1 * i for i in range(len(m.time))] + B_setpoint = [5.0 + 0.1 * i for i in range(len(m.time))] + setpoint_data = TimeSeriesData( + {m.var[:, "A"]: A_setpoint, m.var[:, "B"]: B_setpoint}, m.time + ) + weight_data = { + pyo.ComponentUID(m.var[:, "A"]): 10.0, + pyo.ComponentUID(m.var[:, "B"]): 0.1, + } + m.var_set, m.tracking_cost = get_penalty_from_time_varying_target( + variables, m.time, setpoint_data, weight_data=weight_data + ) + for i, t in enumerate(m.time): + for j in m.var_set: + pred_expr = ( + 10.0 * (m.var[t, "A"] - A_setpoint[i]) ** 2 + if j == 0 + else 0.1 * (m.var[t, "B"] - B_setpoint[i]) ** 2 + ) + pred_value = pyo.value(pred_expr) + self.assertEqual(pred_value, pyo.value(m.tracking_cost[j, t])) + self.assertTrue( + compare_expressions(pred_expr, m.tracking_cost[j, t].expr) + ) + + def test_varying_setpoint_exceptions(self): + m = self._make_model(n_time_points=5) + variables = [pyo.Reference(m.var[:, "A"]), pyo.Reference(m.var[:, "B"])] + A_setpoint = [1.0 - 0.1 * i for i in range(len(m.time))] + B_setpoint = [5.0 + 0.1 * i for i in range(len(m.time))] + setpoint_data = TimeSeriesData( + {m.var[:, "A"]: A_setpoint, m.var[:, "B"]: B_setpoint}, + [i + 10 for i in m.time], + ) + weight_data = { + pyo.ComponentUID(m.var[:, "A"]): 10.0, + pyo.ComponentUID(m.var[:, "B"]): 0.1, + } + msg = "Mismatch in time points" + with self.assertRaisesRegex(RuntimeError, msg): + # Time-varying setpoint specifies different time points + # from our time set. + var_set, tr_cost = get_penalty_from_time_varying_target( + variables, m.time, setpoint_data, weight_data=weight_data + ) + + setpoint_data = TimeSeriesData({m.var[:, "A"]: A_setpoint}, m.time) + msg = "Setpoint data does not contain" + with self.assertRaisesRegex(KeyError, msg): + var_set, tr_cost = get_penalty_from_time_varying_target( + variables, m.time, setpoint_data, weight_data=weight_data + ) + + setpoint_data = TimeSeriesData( + {m.var[:, "A"]: A_setpoint, m.var[:, "B"]: B_setpoint}, m.time + ) + weight_data = {pyo.ComponentUID(m.var[:, "A"]): 10.0} + msg = "Tracking weight does not contain" + with self.assertRaisesRegex(KeyError, msg): + tr_cost = get_penalty_from_time_varying_target( + variables, m.time, setpoint_data, weight_data=weight_data + ) + + +class TestGetPenaltyFromTarget(unittest.TestCase): + def _make_model(self, n_time_points=3): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=list(range(n_time_points))) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var( + m.time, m.comp, initialize={(i, j): 1.1 * i for i, j in m.time * m.comp} + ) + return m + + def test_constant_setpoint(self): + m = self._make_model() + setpoint = {m.var[:, "A"]: 0.3, m.var[:, "B"]: 0.4} + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + pred_expr = { + (i, t): ( + (m.var[t, "B"] - 0.4) ** 2 if i == 0 else (m.var[t, "A"] - 0.3) ** 2 + ) + for i, t in m.var_set * m.time + } + for t in m.time: + for i in m.var_set: + self.assertTrue( + compare_expressions(pred_expr[i, t], m.penalty[i, t].expr) + ) + self.assertEqual(pyo.value(pred_expr[i, t]), pyo.value(m.penalty[i, t])) + + def test_constant_setpoint_with_ScalarData(self): + m = self._make_model() + setpoint = ScalarData({m.var[:, "A"]: 0.3, m.var[:, "B"]: 0.4}) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + pred_expr = { + (i, t): ( + (m.var[t, "B"] - 0.4) ** 2 if i == 0 else (m.var[t, "A"] - 0.3) ** 2 + ) + for i, t in m.var_set * m.time + } + for t in m.time: + for i in m.var_set: + self.assertTrue( + compare_expressions(pred_expr[i, t], m.penalty[i, t].expr) + ) + self.assertEqual(pyo.value(pred_expr[i, t]), pyo.value(m.penalty[i, t])) + + def test_varying_setpoint(self): + m = self._make_model(n_time_points=5) + A_target = [0.4, 0.6, 0.1, 0.0, 1.1] + B_target = [0.8, 0.9, 1.3, 1.5, 1.4] + setpoint = ({m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, m.time) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + + target = { + (i, t): A_target[j] if i == 1 else B_target[t] + for i in m.var_set + for (j, t) in enumerate(m.time) + } + for i, t in m.var_set * m.time: + pred_expr = (variables[i][t] - target[i, t]) ** 2 + self.assertTrue(compare_expressions(pred_expr, m.penalty[i, t].expr)) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i, t])) + + def test_piecewise_constant_setpoint(self): + m = self._make_model(n_time_points=5) + A_target = [0.3, 0.9, 0.7] + B_target = [1.1, 0.1, 0.5] + setpoint = ( + {m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, + [(0.0, 0.0), (0.0, 2.0), (2.0, 4.0)], + ) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + target = { + (i, j): A_target[j] if i == 1 else B_target[j] + for i in m.var_set + for j in range(len(A_target)) + } + for i, t in m.var_set * m.time: + if t == 0: + idx = 0 + elif t <= 2.0: + idx = 1 + elif t <= 4.0: + idx = 2 + pred_expr = (variables[i][t] - target[i, idx]) ** 2 + self.assertTrue(compare_expressions(pred_expr, m.penalty[i, t].expr)) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i, t])) + + def test_bad_argument(self): + m = self._make_model(n_time_points=3) + A_target = [0.4, 0.6, 0.1] + B_target = [0.8, 0.9, 1.3] + setpoint = ({m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, m.time) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + msg = "tolerance.*can only be used" + with self.assertRaisesRegex(RuntimeError, msg): + m.var_set, m.penalty = get_penalty_from_target( + variables, m.time, setpoint, tolerance=1e-8 + ) + + def test_bad_data_tuple(self): + m = self._make_model(n_time_points=3) + A_target = [0.4, 0.6, 0.1] + B_target = [0.8, 0.9, 1.3] + setpoint = ( + {m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, + m.time, + "something else", + ) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + msg = "tuple of length two" + with self.assertRaisesRegex(TypeError, msg): + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + + def test_bad_data_tuple_entry_0(self): + m = self._make_model(n_time_points=3) + A_target = [0.4, 0.6, 0.1] + B_target = [0.8, 0.9, 1.3] + setpoint = ([(m.var[:, "A"], A_target), (m.var[:, "B"], B_target)], m.time) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + msg = "must be instance of MutableMapping" + with self.assertRaisesRegex(TypeError, msg): + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + + def test_empty_time_list(self): + m = self._make_model(n_time_points=3) + A_target = [] + B_target = [] + setpoint = ({m.var[:, "A"]: A_target, m.var[:, "B"]: B_target}, []) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + msg = "Time sequence.*is empty" + with self.assertRaisesRegex(ValueError, msg): + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + + def test_bad_time_list(self): + m = self._make_model(n_time_points=3) + A_target = [0.4, 0.6, 0.1] + B_target = [0.8, 0.9, 1.3] + setpoint = ( + dict([(m.var[:, "A"], A_target), (m.var[:, "B"], B_target)]), + [0.0, (0.1, 0.2), 0.3], + ) + variables = [pyo.Reference(m.var[:, "B"]), pyo.Reference(m.var[:, "A"])] + msg = "Second entry of data tuple must be" + with self.assertRaisesRegex(TypeError, msg): + m.var_set, m.penalty = get_penalty_from_target(variables, m.time, setpoint) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/modeling/tests/test_input_constraints.py b/pyomo/contrib/mpc/modeling/tests/test_input_constraints.py new file mode 100644 index 00000000000..e3ba3bf3760 --- /dev/null +++ b/pyomo/contrib/mpc/modeling/tests/test_input_constraints.py @@ -0,0 +1,111 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# +import pyomo.common.unittest as unittest + +import pyomo.environ as pyo +from pyomo.core.expr.compare import compare_expressions +from pyomo.contrib.mpc.modeling.constraints import get_piecewise_constant_constraints + + +class TestPiecewiseConstantConstraints(unittest.TestCase): + def _make_model(self, n_time_points=3): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=list(range(n_time_points))) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var( + m.time, m.comp, initialize={(i, j): 1.1 * i for i, j in m.time * m.comp} + ) + m.input = pyo.Var(m.time, initialize={i: 3.3 * i for i in m.time}) + return m + + def test_pwc_constraint_backward(self): + # Tests the form of piecewise constant constraints that + # should be used for a backward discretization, i.e. + # that each input at non-sampling points is set equal to + # the next value in the time set, rather than the previous. + # This is the default. + n_time_points = 5 + sample_points = [0, 2, 4] + sample_points_set = set(sample_points) + m = self._make_model(n_time_points=n_time_points) + inputs = [pyo.Reference(m.var[:, "B"]), m.input] + m.input_set, m.pwc_con = get_piecewise_constant_constraints( + inputs, m.time, sample_points + ) + pred_expr = { + # Here we rely on knowledge that delta t == 1 + (i, t): inputs[i][t] - inputs[i][t + 1] == 0 + for t in m.time + if t not in sample_points_set + for i in range(len(inputs)) + } + self.assertEqual(list(m.input_set), list(range(len(inputs)))) + for i in range(len(inputs)): + for t in m.time: + if t in sample_points_set: + self.assertNotIn((i, t), m.pwc_con) + else: + self.assertIn((i, t), m.pwc_con) + self.assertEqual( + pyo.value(pred_expr[i, t]), pyo.value(m.pwc_con[i, t].expr) + ) + self.assertTrue( + compare_expressions(pred_expr[i, t], m.pwc_con[i, t].expr) + ) + + def test_pwc_constraint_forward(self): + # The form of piecewise constant constraints that + # should be used for a forward discretization. + n_time_points = 5 + sample_points = [0, 2, 4] + sample_points_set = set(sample_points) + m = self._make_model(n_time_points=n_time_points) + inputs = [pyo.Reference(m.var[:, "B"]), m.input] + m.input_set, m.pwc_con = get_piecewise_constant_constraints( + inputs, m.time, sample_points, use_next=False + ) + pred_expr = { + # Here we rely on knowledge that delta t == 1 + (i, t): inputs[i][t - 1] - inputs[i][t] == 0 + for t in m.time + if t not in sample_points_set + for i in range(len(inputs)) + } + self.assertEqual(list(m.input_set), list(range(len(inputs)))) + for i in range(len(inputs)): + for t in m.time: + if t in sample_points_set: + self.assertNotIn((i, t), m.pwc_con) + else: + self.assertIn((i, t), m.pwc_con) + self.assertEqual( + pyo.value(pred_expr[i, t]), pyo.value(m.pwc_con[i, t].expr) + ) + self.assertTrue( + compare_expressions(pred_expr[i, t], m.pwc_con[i, t].expr) + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/mpc/modeling/tests/test_terminal.py b/pyomo/contrib/mpc/modeling/tests/test_terminal.py new file mode 100644 index 00000000000..b835f0b1087 --- /dev/null +++ b/pyomo/contrib/mpc/modeling/tests/test_terminal.py @@ -0,0 +1,93 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +################################################################################# +# The Institute for the Design of Advanced Energy Systems Integrated Platform +# Framework (IDAES IP) was produced under the DOE Institute for the +# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 +# by the software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University +# Research Corporation, et al. All rights reserved. +# +# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and +# license information. +################################################################################# +import pyomo.common.unittest as unittest + +import pyomo.environ as pyo +from pyomo.core.expr.compare import compare_expressions +from pyomo.contrib.mpc.modeling.terminal import ( + get_penalty_at_time, + get_terminal_penalty, +) +from pyomo.contrib.mpc.data.scalar_data import ScalarData + + +class TestTerminalPenalty(unittest.TestCase): + def _make_model(self, n_time_points=3): + m = pyo.ConcreteModel() + m.time = pyo.Set(initialize=list(range(n_time_points))) + m.comp = pyo.Set(initialize=["A", "B"]) + m.var = pyo.Var( + m.time, m.comp, initialize={(i, j): 1.1 * i for i, j in m.time * m.comp} + ) + m.input = pyo.Var(m.time, initialize={i: 3.3 * i for i in m.time}) + return m + + def test_get_penalty(self): + m = self._make_model() + + variables = [pyo.Reference(m.var[:, "A"]), m.input] + target = ScalarData({m.var[:, "A"]: 4.4, m.input[:]: 5.5}) + weight_data = { + pyo.ComponentUID(m.var[:, "A"]): 10.0, + pyo.ComponentUID(m.input[:]): 0.2, + } + tp = 1 + m.var_set, m.penalty = get_penalty_at_time( + variables, tp, target, weight_data=weight_data + ) + for i in m.var_set: + pred_expr = ( + 10.0 * (m.var[tp, "A"] - 4.4) ** 2 + if i == 0 + else 0.2 * (m.input[tp] - 5.5) ** 2 + ) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i].expr)) + self.assertTrue(compare_expressions(pred_expr, m.penalty[i].expr)) + + def test_get_terminal_penalty(self): + m = self._make_model() + + variables = [pyo.Reference(m.var[:, "A"]), m.input] + target = ScalarData({m.var[:, "A"]: 4.4, m.input[:]: 5.5}) + weight_data = { + pyo.ComponentUID(m.var[:, "A"]): 10.0, + pyo.ComponentUID(m.input[:]): 0.2, + } + m.var_set, m.penalty = get_terminal_penalty( + variables, m.time, target, weight_data=weight_data + ) + + for i in m.var_set: + tf = m.time.last() + pred_expr = ( + 10.0 * (m.var[tf, "A"] - 4.4) ** 2 + if i == 0 + else 0.2 * (m.input[tf] - 5.5) ** 2 + ) + self.assertEqual(pyo.value(pred_expr), pyo.value(m.penalty[i].expr)) + self.assertTrue(compare_expressions(pred_expr, m.penalty[i].expr)) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/multistart/high_conf_stop.py b/pyomo/contrib/multistart/high_conf_stop.py index 853e5b7d36e..e18467c1741 100644 --- a/pyomo/contrib/multistart/high_conf_stop.py +++ b/pyomo/contrib/multistart/high_conf_stop.py @@ -28,11 +28,12 @@ def num_one_occurrences(observed_obj_vals, tolerance): if count == 1: # look at previous and next elements to make sure that they are # not within the tolerance - if (i > 0 and - obj_val - sorted_histogram[i - 1][0] <= tolerance): + if i > 0 and obj_val - sorted_histogram[i - 1][0] <= tolerance: continue - if (i < len(sorted_histogram) - 1 and - sorted_histogram[i + 1][0] - obj_val <= tolerance): + if ( + i < len(sorted_histogram) - 1 + and sorted_histogram[i + 1][0] - obj_val <= tolerance + ): continue num_obj_vals_only_observed_once += 1 return num_obj_vals_only_observed_once @@ -49,6 +50,5 @@ def should_stop(solutions, stopping_mass, stopping_delta, tolerance): return False # Do not stop if no solutions have been found. d = stopping_delta c = stopping_mass - confidence = f / n + (2 * sqrt(2) + sqrt(3) - ) * sqrt(log(3 / d) / n) + confidence = f / n + (2 * sqrt(2) + sqrt(3)) * sqrt(log(3 / d) / n) return confidence < c diff --git a/pyomo/contrib/multistart/multi.py b/pyomo/contrib/multistart/multi.py index 2604734c0b1..a0e424d2c95 100644 --- a/pyomo/contrib/multistart/multi.py +++ b/pyomo/contrib/multistart/multi.py @@ -15,7 +15,10 @@ import logging from pyomo.common.config import ( - ConfigBlock, ConfigValue, In, add_docstring_list + ConfigBlock, + ConfigValue, + In, + document_kwargs_from_configdict, ) from pyomo.common.modeling import unique_component_name from pyomo.contrib.multistart.high_conf_stop import should_stop @@ -27,8 +30,8 @@ logger = logging.getLogger('pyomo.contrib.multistart') -@SolverFactory.register('multistart', - doc='MultiStart solver for NLPs') +@SolverFactory.register('multistart', doc='MultiStart solver for NLPs') +@document_kwargs_from_configdict('CONFIG') class MultiStart(object): """Solver wrapper that initializes at multiple starting points. @@ -42,61 +45,85 @@ class MultiStart(object): """ CONFIG = ConfigBlock("MultiStart") - CONFIG.declare("strategy", ConfigValue( - default="rand", domain=In(strategies.keys()), - description="Specify the restart strategy. Defaults to rand.", - doc="""Specify the restart strategy. + CONFIG.declare( + "strategy", + ConfigValue( + default="rand", + domain=In(strategies.keys()), + description="Specify the restart strategy. Defaults to rand.", + doc="""Specify the restart strategy. - "rand": random choice between variable bounds - "midpoint_guess_and_bound": midpoint between current value and farthest bound - "rand_guess_and_bound": random choice between current value and farthest bound - "rand_distributed": random choice among evenly distributed values - "midpoint": exact midpoint between the bounds. If using this option, multiple iterations are useless. - """ - )) - CONFIG.declare("solver", ConfigValue( - default="ipopt", - description="solver to use, defaults to ipopt" - )) - CONFIG.declare("solver_args", ConfigValue( - default={}, - description="Dictionary of keyword arguments to pass to the solver." - )) - CONFIG.declare("iterations", ConfigValue( - default=10, - description="Specify the number of iterations, defaults to 10. " - "If -1 is specified, the high confidence stopping rule will be used" - )) - CONFIG.declare("stopping_mass", ConfigValue( - default=0.5, - description="Maximum allowable estimated missing mass of optima.", - doc="""Maximum allowable estimated missing mass of optima for the + """, + ), + ) + CONFIG.declare( + "solver", + ConfigValue(default="ipopt", description="solver to use, defaults to ipopt"), + ) + CONFIG.declare( + "solver_args", + ConfigValue( + default={}, + description="Dictionary of keyword arguments to pass to the solver.", + ), + ) + CONFIG.declare( + "iterations", + ConfigValue( + default=10, + description="Specify the number of iterations, defaults to 10. " + "If -1 is specified, the high confidence stopping rule will be used", + ), + ) + CONFIG.declare( + "stopping_mass", + ConfigValue( + default=0.5, + description="Maximum allowable estimated missing mass of optima.", + doc="""Maximum allowable estimated missing mass of optima for the high confidence stopping rule, only used with the random strategy. The lower the parameter, the stricter the rule. - Value bounded in (0, 1].""" - )) - CONFIG.declare("stopping_delta", ConfigValue( - default=0.5, - description="1 minus the confidence level required for the stopping rule.", - doc="""1 minus the confidence level required for the stopping rule for the + Value bounded in (0, 1].""", + ), + ) + CONFIG.declare( + "stopping_delta", + ConfigValue( + default=0.5, + description="1 minus the confidence level required for the stopping rule.", + doc="""1 minus the confidence level required for the stopping rule for the high confidence stopping rule, only used with the random strategy. The lower the parameter, the stricter the rule. - Value bounded in (0, 1].""" - )) - CONFIG.declare("suppress_unbounded_warning", ConfigValue( - default=False, domain=bool, - description="True to suppress warning for skipping unbounded variables." - )) - CONFIG.declare("HCS_max_iterations", ConfigValue( - default=1000, - description="Maximum number of iterations before interrupting the high confidence stopping rule." - )) - CONFIG.declare("HCS_tolerance", ConfigValue( - default=0, - description="Tolerance on HCS objective value equality. Defaults to Python float equality precision." - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) + Value bounded in (0, 1].""", + ), + ) + CONFIG.declare( + "suppress_unbounded_warning", + ConfigValue( + default=False, + domain=bool, + description="True to suppress warning for skipping unbounded variables.", + ), + ) + CONFIG.declare( + "HCS_max_iterations", + ConfigValue( + default=1000, + description="Maximum number of iterations before interrupting the high confidence stopping rule.", + ), + ) + CONFIG.declare( + "HCS_tolerance", + ConfigValue( + default=0, + description="Tolerance on HCS objective value equality. Defaults to Python float equality precision.", + ), + ) def available(self, exception_flag=True): """Check if solver is available. @@ -121,16 +148,19 @@ def solve(self, model, **kwds): # Model sense objectives = model.component_data_objects(Objective, active=True) obj = next(objectives, None) - #Check model validity + # Check model validity if next(objectives, None) is not None: raise RuntimeError( - "Multistart solver is unable to handle model with multiple active objectives.") + "Multistart solver is unable to handle model with multiple active objectives." + ) if obj is None: raise RuntimeError( - "Multistart solver is unable to handle model with no active objective.") - if obj.polynomial_degree()==0: + "Multistart solver is unable to handle model with no active objective." + ) + if obj.polynomial_degree() == 0: raise RuntimeError( - "Multistart solver received model with constant objective") + "Multistart solver received model with constant objective" + ) # store objective values and objective/result information for best # solution obtained @@ -143,13 +173,17 @@ def solve(self, model, **kwds): try: # create temporary variable list for value transfer tmp_var_list_name = unique_component_name(model, "_vars_list") - setattr(model, tmp_var_list_name, - list(model.component_data_objects( - ctype=Var, descend_into=True))) + setattr( + model, + tmp_var_list_name, + list(model.component_data_objects(ctype=Var, descend_into=True)), + ) best_result = result = solver.solve(model, **config.solver_args) - if (result.solver.status is SolverStatus.ok and - result.solver.termination_condition is tc.optimal): + if ( + result.solver.status is SolverStatus.ok + and result.solver.termination_condition is tc.optimal + ): obj_val = value(obj.expr) best_objective = obj_val objectives.append(obj_val) @@ -160,14 +194,18 @@ def solve(self, model, **kwds): using_HCS = config.iterations == -1 HCS_completed = False if using_HCS: - assert config.strategy == "rand", \ - "High confidence stopping rule requires rand strategy." + assert ( + config.strategy == "rand" + ), "High confidence stopping rule requires rand strategy." max_iter = config.HCS_max_iterations while num_iter < max_iter: if using_HCS and should_stop( - objectives, config.stopping_mass, - config.stopping_delta, config.HCS_tolerance): + objectives, + config.stopping_mass, + config.stopping_delta, + config.HCS_tolerance, + ): HCS_completed = True break num_iter += 1 @@ -175,8 +213,10 @@ def solve(self, model, **kwds): m = model.clone() if num_iter > 1 else model reinitialize_variables(m, config) result = solver.solve(m, **config.solver_args) - if (result.solver.status is SolverStatus.ok and - result.solver.termination_condition is tc.optimal): + if ( + result.solver.status is SolverStatus.ok + and result.solver.termination_condition is tc.optimal + ): model_objectives = m.component_data_objects(Objective, active=True) mobj = next(model_objectives) obj_val = value(mobj.expr) @@ -197,7 +237,8 @@ def solve(self, model, **kwds): logger.warning( "High confidence stopping rule was unable to complete " "after %s iterations. To increase this limit, change the " - "HCS_max_iterations flag." % num_iter) + "HCS_max_iterations flag." % num_iter + ) # if no better result was found than initial solve, then return # that without needing to copy variables. diff --git a/pyomo/contrib/multistart/reinit.py b/pyomo/contrib/multistart/reinit.py index 0102310691b..214192df648 100644 --- a/pyomo/contrib/multistart/reinit.py +++ b/pyomo/contrib/multistart/reinit.py @@ -64,10 +64,11 @@ def reinitialize_variables(model, config): 'Skipping reinitialization of unbounded variable ' '%s with bounds (%s, %s). ' 'To suppress this message, set the ' - 'suppress_unbounded_warning flag.' - % (var.name, var.lb, var.ub)) + 'suppress_unbounded_warning flag.' % (var.name, var.lb, var.ub) + ) continue val = var.value if var.value is not None else (var.lb + var.ub) / 2 # apply reinitialization strategy to variable - var.set_value(strategies[config.strategy](val, var.lb, var.ub), - skip_validation=True) + var.set_value( + strategies[config.strategy](val, var.lb, var.ub), skip_validation=True + ) diff --git a/pyomo/contrib/multistart/test_multi.py b/pyomo/contrib/multistart/test_multi.py index c833f427297..16c8563ae9e 100644 --- a/pyomo/contrib/multistart/test_multi.py +++ b/pyomo/contrib/multistart/test_multi.py @@ -8,8 +8,15 @@ from pyomo.contrib.multistart.high_conf_stop import should_stop from pyomo.contrib.multistart.reinit import strategies from pyomo.environ import ( - ConcreteModel, Constraint, NonNegativeReals, Objective, SolverFactory, Var, - maximize, sin, value + ConcreteModel, + Constraint, + NonNegativeReals, + Objective, + SolverFactory, + Var, + maximize, + sin, + value, ) @@ -25,16 +32,24 @@ class MultistartTests(unittest.TestCase): def test_as_good_as_standard(self): standard_model = build_model() SolverFactory('ipopt').solve(standard_model) - standard_objective_value = value(next(standard_model.component_data_objects(Objective, active=True))) + standard_objective_value = value( + next(standard_model.component_data_objects(Objective, active=True)) + ) fresh_model = build_model() multistart_iterations = 10 test_trials = 10 for strategy, _ in product(strategies.keys(), range(test_trials)): m2 = fresh_model.clone() - SolverFactory('multistart').solve(m2, iterations=multistart_iterations, strategy=strategy) - clone_objective_value = value(next(m2.component_data_objects(Objective, active=True))) - self.assertGreaterEqual(clone_objective_value, standard_objective_value) # assumes maximization + SolverFactory('multistart').solve( + m2, iterations=multistart_iterations, strategy=strategy + ) + clone_objective_value = value( + next(m2.component_data_objects(Objective, active=True)) + ) + self.assertGreaterEqual( + clone_objective_value, standard_objective_value + ) # assumes maximization def test_as_good_with_HCS_rule(self): """test that the high confidence stopping rule with very lenient @@ -48,13 +63,14 @@ def test_as_good_with_HCS_rule(self): for i in range(5): m2 = build_model() SolverFactory('multistart').solve( - m2, iterations=-1, stopping_mass=0.99, stopping_delta=0.99) + m2, iterations=-1, stopping_mass=0.99, stopping_delta=0.99 + ) m_objectives = m.component_data_objects(Objective, active=True) m_obj = next(m_objectives, None) m2_objectives = m2.component_data_objects(Objective, active=True) - m2_obj = next(m2_objectives,None) + m2_obj = next(m2_objectives, None) # Assert that multistart solver does no worse than standard solver - self.assertTrue((value(m2_obj.expr)) >= (value(m_obj.expr) - .001)) + self.assertTrue((value(m2_obj.expr)) >= (value(m_obj.expr) - 0.001)) del m2 def test_missing_bounds(self): @@ -64,9 +80,11 @@ def test_missing_bounds(self): output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.multistart', logging.WARNING): SolverFactory('multistart').solve(m) - self.assertIn("Skipping reinitialization of unbounded " - "variable x with bounds (0, None).", - output.getvalue().strip()) + self.assertIn( + "Skipping reinitialization of unbounded " + "variable x with bounds (0, None).", + output.getvalue().strip(), + ) def test_var_value_None(self): m = ConcreteModel() @@ -82,11 +100,12 @@ def test_model_infeasible(self): SolverFactory('multistart').solve(m, iterations=2) output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.multistart', logging.WARNING): - SolverFactory('multistart').solve( - m, iterations=-1, HCS_max_iterations=3) - self.assertIn("High confidence stopping rule was unable to " - "complete after 3 iterations.", - output.getvalue().strip()) + SolverFactory('multistart').solve(m, iterations=-1, HCS_max_iterations=3) + self.assertIn( + "High confidence stopping rule was unable to " + "complete after 3 iterations.", + output.getvalue().strip(), + ) def test_should_stop(self): soln = [0] * 149 @@ -115,7 +134,7 @@ def test_no_obj(self): def test_const_obj(self): m = ConcreteModel() m.x = Var() - m.o = Objective(expr = 5) + m.o = Objective(expr=5) with self.assertRaisesRegex(RuntimeError, "constant objective"): SolverFactory('multistart').solve(m) diff --git a/pyomo/contrib/parmest/__init__.py b/pyomo/contrib/parmest/__init__.py index fe87518c8fe..d340885b3fd 100644 --- a/pyomo/contrib/parmest/__init__.py +++ b/pyomo/contrib/parmest/__init__.py @@ -12,20 +12,16 @@ from pyomo.common.deprecation import relocated_module_attribute relocated_module_attribute( - 'create_ef', - 'pyomo.contrib.parmest.utils.create_ef', - 'TBD') + 'create_ef', 'pyomo.contrib.parmest.utils.create_ef', version='6.4.2' +) relocated_module_attribute( - 'ipopt_solver_wrapper', - 'pyomo.contrib.parmest.utils.ipopt_solver_wrapper', - 'TBD') + 'ipopt_solver_wrapper', + 'pyomo.contrib.parmest.utils.ipopt_solver_wrapper', + version='6.4.2', +) relocated_module_attribute( - 'mpi_utils', - 'pyomo.contrib.parmest.utils.mpi_utils', - 'TBD') + 'mpi_utils', 'pyomo.contrib.parmest.utils.mpi_utils', version='6.4.2' +) relocated_module_attribute( - 'scenario_tree', - 'pyomo.contrib.parmest.utils.scenario_tree', - 'TBD') - - + 'scenario_tree', 'pyomo.contrib.parmest.utils.scenario_tree', version='6.4.2' +) diff --git a/pyomo/contrib/parmest/examples/__init__.py b/pyomo/contrib/parmest/examples/__init__.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/contrib/parmest/examples/__init__.py +++ b/pyomo/contrib/parmest/examples/__init__.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/parmest/examples/reaction_kinetics/simple_reaction_parmest_example.py b/pyomo/contrib/parmest/examples/reaction_kinetics/simple_reaction_parmest_example.py index f5d185bbc51..719a930251c 100644 --- a/pyomo/contrib/parmest/examples/reaction_kinetics/simple_reaction_parmest_example.py +++ b/pyomo/contrib/parmest/examples/reaction_kinetics/simple_reaction_parmest_example.py @@ -18,13 +18,23 @@ Code provided by Paul Akula. ''' -from pyomo.environ import (ConcreteModel, Param, Var, PositiveReals, Objective, - Constraint, RangeSet, Expression, minimize, exp, value) +from pyomo.environ import ( + ConcreteModel, + Param, + Var, + PositiveReals, + Objective, + Constraint, + RangeSet, + Expression, + minimize, + exp, + value, +) import pyomo.contrib.parmest.parmest as parmest def simple_reaction_model(data): - # Create the concrete model model = ConcreteModel() @@ -37,47 +47,50 @@ def simple_reaction_model(data): model.k = Var(model.rxn, initialize=initial_guess, within=PositiveReals) # reaction product - model.y = Expression(expr=exp(-model.k[1] * - model.x1 * exp(-model.k[2] / model.x2))) - + model.y = Expression(expr=exp(-model.k[1] * model.x1 * exp(-model.k[2] / model.x2))) + # fix all of the regressed parameters model.k.fix() - - #=================================================================== + # =================================================================== # Stage-specific cost computations def ComputeFirstStageCost_rule(model): return 0 + model.FirstStageCost = Expression(rule=ComputeFirstStageCost_rule) def AllMeasurements(m): return (float(data['y']) - m.y) ** 2 + model.SecondStageCost = Expression(rule=AllMeasurements) def total_cost_rule(m): return m.FirstStageCost + m.SecondStageCost - model.Total_Cost_Objective = Objective(rule=total_cost_rule, - sense=minimize) + + model.Total_Cost_Objective = Objective(rule=total_cost_rule, sense=minimize) return model + def main(): # Data from Table 5.2 in Y. Bard, "Nonlinear Parameter Estimation", (pg. 124) - data = [{'experiment': 1, 'x1': 0.1, 'x2': 100, 'y': 0.98}, - {'experiment': 2, 'x1': 0.2, 'x2': 100, 'y': 0.983}, - {'experiment': 3, 'x1': 0.3, 'x2': 100, 'y': 0.955}, - {'experiment': 4, 'x1': 0.4, 'x2': 100, 'y': 0.979}, - {'experiment': 5, 'x1': 0.5, 'x2': 100, 'y': 0.993}, - {'experiment': 6, 'x1': 0.05, 'x2': 200, 'y': 0.626}, - {'experiment': 7, 'x1': 0.1, 'x2': 200, 'y': 0.544}, - {'experiment': 8, 'x1': 0.15, 'x2': 200, 'y': 0.455}, - {'experiment': 9, 'x1': 0.2, 'x2': 200, 'y': 0.225}, - {'experiment': 10, 'x1': 0.25, 'x2': 200, 'y': 0.167}, - {'experiment': 11, 'x1': 0.02, 'x2': 300, 'y': 0.566}, - {'experiment': 12, 'x1': 0.04, 'x2': 300, 'y': 0.317}, - {'experiment': 13, 'x1': 0.06, 'x2': 300, 'y': 0.034}, - {'experiment': 14, 'x1': 0.08, 'x2': 300, 'y': 0.016}, - {'experiment': 15, 'x1': 0.1, 'x2': 300, 'y': 0.006}] + data = [ + {'experiment': 1, 'x1': 0.1, 'x2': 100, 'y': 0.98}, + {'experiment': 2, 'x1': 0.2, 'x2': 100, 'y': 0.983}, + {'experiment': 3, 'x1': 0.3, 'x2': 100, 'y': 0.955}, + {'experiment': 4, 'x1': 0.4, 'x2': 100, 'y': 0.979}, + {'experiment': 5, 'x1': 0.5, 'x2': 100, 'y': 0.993}, + {'experiment': 6, 'x1': 0.05, 'x2': 200, 'y': 0.626}, + {'experiment': 7, 'x1': 0.1, 'x2': 200, 'y': 0.544}, + {'experiment': 8, 'x1': 0.15, 'x2': 200, 'y': 0.455}, + {'experiment': 9, 'x1': 0.2, 'x2': 200, 'y': 0.225}, + {'experiment': 10, 'x1': 0.25, 'x2': 200, 'y': 0.167}, + {'experiment': 11, 'x1': 0.02, 'x2': 300, 'y': 0.566}, + {'experiment': 12, 'x1': 0.04, 'x2': 300, 'y': 0.317}, + {'experiment': 13, 'x1': 0.06, 'x2': 300, 'y': 0.034}, + {'experiment': 14, 'x1': 0.08, 'x2': 300, 'y': 0.016}, + {'experiment': 15, 'x1': 0.1, 'x2': 300, 'y': 0.006}, + ] # ======================================================================= # Parameter estimation without covariance estimate @@ -89,16 +102,17 @@ def main(): print(obj) print(theta) print() - - #======================================================================= + + # ======================================================================= # Estimate both k1 and k2 and compute the covariance matrix theta_names = ['k'] pest = parmest.Estimator(simple_reaction_model, data, theta_names) - n = 15 # total number of data points used in the objective (y in 15 scenarios) + n = 15 # total number of data points used in the objective (y in 15 scenarios) obj, theta, cov = pest.theta_est(calc_cov=True, cov_n=n) print(obj) print(theta) print(cov) + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/reactor_design/__init__.py b/pyomo/contrib/parmest/examples/reactor_design/__init__.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/__init__.py +++ b/pyomo/contrib/parmest/examples/reactor_design/__init__.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/parmest/examples/reactor_design/bootstrap_example.py b/pyomo/contrib/parmest/examples/reactor_design/bootstrap_example.py index 9bea6cb8884..cf1b8a2de23 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/bootstrap_example.py +++ b/pyomo/contrib/parmest/examples/reactor_design/bootstrap_example.py @@ -12,40 +12,49 @@ import pandas as pd from os.path import join, abspath, dirname import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model +from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, +) def main(): # Vars to estimate theta_names = ['k1', 'k2', 'k3'] - + # Data file_dirname = dirname(abspath(str(__file__))) file_name = abspath(join(file_dirname, 'reactor_data.csv')) - data = pd.read_csv(file_name) - + data = pd.read_csv(file_name) + # Sum of squared error function - def SSE(model, data): - expr = (float(data['ca']) - model.ca)**2 + \ - (float(data['cb']) - model.cb)**2 + \ - (float(data['cc']) - model.cc)**2 + \ - (float(data['cd']) - model.cd)**2 + def SSE(model, data): + expr = ( + (float(data['ca']) - model.ca) ** 2 + + (float(data['cb']) - model.cb) ** 2 + + (float(data['cc']) - model.cc) ** 2 + + (float(data['cd']) - model.cd) ** 2 + ) return expr - + # Create an instance of the parmest estimator pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE) - + # Parameter estimation obj, theta = pest.theta_est() - - # Parameter estimation with bootstrap resampling + + # Parameter estimation with bootstrap resampling bootstrap_theta = pest.theta_est_bootstrap(50) - + # Plot results parmest.graphics.pairwise_plot(bootstrap_theta, title='Bootstrap theta') - parmest.graphics.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'], - title='Bootstrap theta with confidence regions') + parmest.graphics.pairwise_plot( + bootstrap_theta, + theta, + 0.8, + ['MVN', 'KDE', 'Rect'], + title='Bootstrap theta with confidence regions', + ) + if __name__ == "__main__": main() - diff --git a/pyomo/contrib/parmest/examples/reactor_design/datarec_example.py b/pyomo/contrib/parmest/examples/reactor_design/datarec_example.py index 8ce11b8426e..811571e20ed 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/datarec_example.py +++ b/pyomo/contrib/parmest/examples/reactor_design/datarec_example.py @@ -12,7 +12,9 @@ import numpy as np import pandas as pd import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model +from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, +) np.random.seed(1234) @@ -21,9 +23,10 @@ def reactor_design_model_for_datarec(data): # Unfix inlet concentration for data rec model = reactor_design_model(data) model.caf.fixed = False - + return model + def generate_data(): ### Generate data based on real sv, caf, ca, cb, cc, and cd sv_real = 1.05 @@ -32,63 +35,66 @@ def generate_data(): cb_real = 1060.8 cc_real = 1683.9 cd_real = 1898.5 - - data = pd.DataFrame() + + data = pd.DataFrame() ndata = 200 # Normal distribution, mean = 3400, std = 500 data['ca'] = 500 * np.random.randn(ndata) + 3400 # Random distribution between 500 and 1500 - data['cb'] = np.random.rand(ndata)*1000+500 + data['cb'] = np.random.rand(ndata) * 1000 + 500 # Lognormal distribution - data['cc'] = np.random.lognormal(np.log(1600),0.25,ndata) + data['cc'] = np.random.lognormal(np.log(1600), 0.25, ndata) # Triangular distribution between 1000 and 2000 - data['cd'] = np.random.triangular(1000,1800,3000,size=ndata) - + data['cd'] = np.random.triangular(1000, 1800, 3000, size=ndata) + data['sv'] = sv_real data['caf'] = caf_real - + return data + def main(): # Generate data data = generate_data() data_std = data.std() - + # Define sum of squared error objective function for data rec - def SSE(model, data): - expr = ((float(data['ca']) - model.ca)/float(data_std['ca']))**2 + \ - ((float(data['cb']) - model.cb)/float(data_std['cb']))**2 + \ - ((float(data['cc']) - model.cc)/float(data_std['cc']))**2 + \ - ((float(data['cd']) - model.cd)/float(data_std['cd']))**2 + def SSE(model, data): + expr = ( + ((float(data['ca']) - model.ca) / float(data_std['ca'])) ** 2 + + ((float(data['cb']) - model.cb) / float(data_std['cb'])) ** 2 + + ((float(data['cc']) - model.cc) / float(data_std['cc'])) ** 2 + + ((float(data['cd']) - model.cd) / float(data_std['cd'])) ** 2 + ) return expr - + ### Data reconciliation - theta_names = [] # no variables to estimate, use initialized values - + theta_names = [] # no variables to estimate, use initialized values + pest = parmest.Estimator(reactor_design_model_for_datarec, data, theta_names, SSE) - + obj, theta, data_rec = pest.theta_est(return_values=['ca', 'cb', 'cc', 'cd', 'caf']) print(obj) print(theta) - - parmest.graphics.grouped_boxplot(data[['ca', 'cb', 'cc', 'cd']], - data_rec[['ca', 'cb', 'cc', 'cd']], - group_names=['Data', 'Data Rec']) - - + + parmest.graphics.grouped_boxplot( + data[['ca', 'cb', 'cc', 'cd']], + data_rec[['ca', 'cb', 'cc', 'cd']], + group_names=['Data', 'Data Rec'], + ) + ### Parameter estimation using reconciled data theta_names = ['k1', 'k2', 'k3'] data_rec['sv'] = data['sv'] - + pest = parmest.Estimator(reactor_design_model, data_rec, theta_names, SSE) obj, theta = pest.theta_est() print(obj) print(theta) - - theta_real = {'k1': 5.0/6.0, - 'k2': 5.0/3.0, - 'k3': 1.0/6000.0} + + theta_real = {'k1': 5.0 / 6.0, 'k2': 5.0 / 3.0, 'k3': 1.0 / 6000.0} print(theta_real) + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/reactor_design/leaveNout_example.py b/pyomo/contrib/parmest/examples/reactor_design/leaveNout_example.py index fbb29930e61..95af53e63d3 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/leaveNout_example.py +++ b/pyomo/contrib/parmest/examples/reactor_design/leaveNout_example.py @@ -13,76 +13,86 @@ import pandas as pd from os.path import join, abspath, dirname import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model +from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, +) + def main(): - # Vars to estimate theta_names = ['k1', 'k2', 'k3'] - + # Data file_dirname = dirname(abspath(str(__file__))) file_name = abspath(join(file_dirname, 'reactor_data.csv')) - data = pd.read_csv(file_name) - + data = pd.read_csv(file_name) + # Create more data for the example N = 50 df_std = data.std().to_frame().transpose() df_rand = pd.DataFrame(np.random.normal(size=N)) df_sample = data.sample(N, replace=True).reset_index(drop=True) - data = df_sample + df_rand.dot(df_std)/10 - + data = df_sample + df_rand.dot(df_std) / 10 + # Sum of squared error function - def SSE(model, data): - expr = (float(data['ca']) - model.ca)**2 + \ - (float(data['cb']) - model.cb)**2 + \ - (float(data['cc']) - model.cc)**2 + \ - (float(data['cd']) - model.cd)**2 + def SSE(model, data): + expr = ( + (float(data['ca']) - model.ca) ** 2 + + (float(data['cb']) - model.cb) ** 2 + + (float(data['cc']) - model.cc) ** 2 + + (float(data['cd']) - model.cd) ** 2 + ) return expr - + # Create an instance of the parmest estimator pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE) - + # Parameter estimation obj, theta = pest.theta_est() print(obj) print(theta) - + ### Parameter estimation with 'leave-N-out' - # Example use case: For each combination of data where one data point is left + # Example use case: For each combination of data where one data point is left # out, estimate theta - lNo_theta = pest.theta_est_leaveNout(1) + lNo_theta = pest.theta_est_leaveNout(1) print(lNo_theta.head()) - + parmest.graphics.pairwise_plot(lNo_theta, theta) - + ### Leave one out/boostrap analysis - # Example use case: leave 25 data points out, run 20 bootstrap samples with the - # remaining points, determine if the theta estimate using the points left out - # is inside or outside an alpha region based on the bootstrap samples, repeat + # Example use case: leave 25 data points out, run 20 bootstrap samples with the + # remaining points, determine if the theta estimate using the points left out + # is inside or outside an alpha region based on the bootstrap samples, repeat # 5 times. Results are stored as a list of tuples, see API docs for information. lNo = 25 lNo_samples = 5 bootstrap_samples = 20 dist = 'MVN' alphas = [0.7, 0.8, 0.9] - - results = pest.leaveNout_bootstrap_test(lNo, lNo_samples, bootstrap_samples, - dist, alphas, seed=524) - + + results = pest.leaveNout_bootstrap_test( + lNo, lNo_samples, bootstrap_samples, dist, alphas, seed=524 + ) + # Plot results for a single value of alpha alpha = 0.8 for i in range(lNo_samples): theta_est_N = results[i][1] bootstrap_results = results[i][2] - parmest.graphics.pairwise_plot(bootstrap_results, theta_est_N, alpha, ['MVN'], - title= 'Alpha: '+ str(alpha) + ', '+ \ - str(theta_est_N.loc[0,alpha])) - + parmest.graphics.pairwise_plot( + bootstrap_results, + theta_est_N, + alpha, + ['MVN'], + title='Alpha: ' + str(alpha) + ', ' + str(theta_est_N.loc[0, alpha]), + ) + # Extract the percent of points that are within the alpha region - r = [results[i][1].loc[0,alpha] for i in range(lNo_samples)] - percent_true = sum(r)/len(r) + r = [results[i][1].loc[0, alpha] for i in range(lNo_samples)] + percent_true = sum(r) / len(r) print(percent_true) + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/reactor_design/likelihood_ratio_example.py b/pyomo/contrib/parmest/examples/reactor_design/likelihood_ratio_example.py index c20380c5a66..13a40774740 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/likelihood_ratio_example.py +++ b/pyomo/contrib/parmest/examples/reactor_design/likelihood_ratio_example.py @@ -14,46 +14,51 @@ from itertools import product from os.path import join, abspath, dirname import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model +from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, +) def main(): # Vars to estimate theta_names = ['k1', 'k2', 'k3'] - + # Data file_dirname = dirname(abspath(str(__file__))) file_name = abspath(join(file_dirname, 'reactor_data.csv')) - data = pd.read_csv(file_name) - + data = pd.read_csv(file_name) + # Sum of squared error function - def SSE(model, data): - expr = (float(data['ca']) - model.ca)**2 + \ - (float(data['cb']) - model.cb)**2 + \ - (float(data['cc']) - model.cc)**2 + \ - (float(data['cd']) - model.cd)**2 + def SSE(model, data): + expr = ( + (float(data['ca']) - model.ca) ** 2 + + (float(data['cb']) - model.cb) ** 2 + + (float(data['cc']) - model.cc) ** 2 + + (float(data['cd']) - model.cd) ** 2 + ) return expr - + # Create an instance of the parmest estimator pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE) - + # Parameter estimation obj, theta = pest.theta_est() - + # Find the objective value at each theta estimate k1 = [0.8, 0.85, 0.9] k2 = [1.6, 1.65, 1.7] k3 = [0.00016, 0.000165, 0.00017] theta_vals = pd.DataFrame(list(product(k1, k2, k3)), columns=['k1', 'k2', 'k3']) obj_at_theta = pest.objective_at_theta(theta_vals) - + # Run the likelihood ratio test LR = pest.likelihood_ratio_test(obj_at_theta, obj, [0.8, 0.85, 0.9, 0.95]) - + # Plot results - parmest.graphics.pairwise_plot(LR, theta, 0.9, - title='LR results within 90% confidence region') - + parmest.graphics.pairwise_plot( + LR, theta, 0.9, title='LR results within 90% confidence region' + ) + + if __name__ == "__main__": main() - diff --git a/pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py b/pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py index 5c7c4062441..bc564cbdfd3 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py +++ b/pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py @@ -12,35 +12,40 @@ import pandas as pd from os.path import join, abspath, dirname import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model +from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, +) def main(): # Parameter estimation using multisensor data - + # Vars to estimate theta_names = ['k1', 'k2', 'k3'] - + # Data, includes multiple sensors for ca and cc file_dirname = dirname(abspath(str(__file__))) file_name = abspath(join(file_dirname, 'reactor_data_multisensor.csv')) - data = pd.read_csv(file_name) - + data = pd.read_csv(file_name) + # Sum of squared error function - def SSE_multisensor(model, data): - expr = ((float(data['ca1']) - model.ca)**2)*(1/3) + \ - ((float(data['ca2']) - model.ca)**2)*(1/3) + \ - ((float(data['ca3']) - model.ca)**2)*(1/3) + \ - (float(data['cb']) - model.cb)**2 + \ - ((float(data['cc1']) - model.cc)**2)*(1/2) + \ - ((float(data['cc2']) - model.cc)**2)*(1/2) + \ - (float(data['cd']) - model.cd)**2 + def SSE_multisensor(model, data): + expr = ( + ((float(data['ca1']) - model.ca) ** 2) * (1 / 3) + + ((float(data['ca2']) - model.ca) ** 2) * (1 / 3) + + ((float(data['ca3']) - model.ca) ** 2) * (1 / 3) + + (float(data['cb']) - model.cb) ** 2 + + ((float(data['cc1']) - model.cc) ** 2) * (1 / 2) + + ((float(data['cc2']) - model.cc) ** 2) * (1 / 2) + + (float(data['cd']) - model.cd) ** 2 + ) return expr - + pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE_multisensor) obj, theta = pest.theta_est() print(obj) print(theta) - + + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/reactor_design/parameter_estimation_example.py b/pyomo/contrib/parmest/examples/reactor_design/parameter_estimation_example.py index 3e25b349fc6..334dfa264a4 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/parameter_estimation_example.py +++ b/pyomo/contrib/parmest/examples/reactor_design/parameter_estimation_example.py @@ -12,43 +12,47 @@ import pandas as pd from os.path import join, abspath, dirname import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model +from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, +) def main(): # Vars to estimate theta_names = ['k1', 'k2', 'k3'] - + # Data file_dirname = dirname(abspath(str(__file__))) file_name = abspath(join(file_dirname, 'reactor_data.csv')) - data = pd.read_csv(file_name) - + data = pd.read_csv(file_name) + # Sum of squared error function - def SSE(model, data): - expr = (float(data['ca']) - model.ca)**2 + \ - (float(data['cb']) - model.cb)**2 + \ - (float(data['cc']) - model.cc)**2 + \ - (float(data['cd']) - model.cd)**2 + def SSE(model, data): + expr = ( + (float(data['ca']) - model.ca) ** 2 + + (float(data['cb']) - model.cb) ** 2 + + (float(data['cc']) - model.cc) ** 2 + + (float(data['cd']) - model.cd) ** 2 + ) return expr - + # Create an instance of the parmest estimator pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE) - + # Parameter estimation obj, theta = pest.theta_est() - - # Assert statements compare parameter estimation (theta) to an expected value - k1_expected = 5.0/6.0 - k2_expected = 5.0/3.0 - k3_expected = 1.0/6000.0 - relative_error = abs(theta['k1'] - k1_expected)/k1_expected + + # Assert statements compare parameter estimation (theta) to an expected value + k1_expected = 5.0 / 6.0 + k2_expected = 5.0 / 3.0 + k3_expected = 1.0 / 6000.0 + relative_error = abs(theta['k1'] - k1_expected) / k1_expected assert relative_error < 0.05 - relative_error = abs(theta['k2'] - k2_expected)/k2_expected + relative_error = abs(theta['k2'] - k2_expected) / k2_expected assert relative_error < 0.05 - relative_error = abs(theta['k3'] - k3_expected)/k3_expected + relative_error = abs(theta['k3'] - k3_expected) / k3_expected assert relative_error < 0.05 - + + if __name__ == "__main__": main() - diff --git a/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py b/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py index bfd79cd4cca..f4cd6c8dbf5 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py +++ b/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py @@ -14,52 +14,67 @@ """ import pandas as pd from pyomo.environ import ( - ConcreteModel, Param, Var, PositiveReals, Objective, Constraint, maximize, - SolverFactory + ConcreteModel, + Param, + Var, + PositiveReals, + Objective, + Constraint, + maximize, + SolverFactory, ) def reactor_design_model(data): - # Create the concrete model model = ConcreteModel() - + # Rate constants - model.k1 = Param(initialize = 5.0/6.0, within=PositiveReals, mutable=True) # min^-1 - model.k2 = Param(initialize = 5.0/3.0, within=PositiveReals, mutable=True) # min^-1 - model.k3 = Param(initialize = 1.0/6000.0, within=PositiveReals, mutable=True) # m^3/(gmol min) + model.k1 = Param(initialize=5.0 / 6.0, within=PositiveReals, mutable=True) # min^-1 + model.k2 = Param(initialize=5.0 / 3.0, within=PositiveReals, mutable=True) # min^-1 + model.k3 = Param( + initialize=1.0 / 6000.0, within=PositiveReals, mutable=True + ) # m^3/(gmol min) # Inlet concentration of A, gmol/m^3 - model.caf = Param(initialize = float(data['caf']), within=PositiveReals) - - # Space velocity (flowrate/volume) - model.sv = Param(initialize = float(data['sv']), within=PositiveReals) - + model.caf = Param(initialize=float(data['caf']), within=PositiveReals) + + # Space velocity (flowrate/volume) + model.sv = Param(initialize=float(data['sv']), within=PositiveReals) + # Outlet concentration of each component - model.ca = Var(initialize = 5000.0, within=PositiveReals) - model.cb = Var(initialize = 2000.0, within=PositiveReals) - model.cc = Var(initialize = 2000.0, within=PositiveReals) - model.cd = Var(initialize = 1000.0, within=PositiveReals) - + model.ca = Var(initialize=5000.0, within=PositiveReals) + model.cb = Var(initialize=2000.0, within=PositiveReals) + model.cc = Var(initialize=2000.0, within=PositiveReals) + model.cd = Var(initialize=1000.0, within=PositiveReals) + # Objective - model.obj = Objective(expr = model.cb, sense=maximize) - + model.obj = Objective(expr=model.cb, sense=maximize) + # Constraints - model.ca_bal = Constraint(expr = (0 == model.sv * model.caf \ - - model.sv * model.ca - model.k1 * model.ca \ - - 2.0 * model.k3 * model.ca ** 2.0)) - - model.cb_bal = Constraint(expr=(0 == -model.sv * model.cb \ - + model.k1 * model.ca - model.k2 * model.cb)) - - model.cc_bal = Constraint(expr=(0 == -model.sv * model.cc \ - + model.k2 * model.cb)) - - model.cd_bal = Constraint(expr=(0 == -model.sv * model.cd \ - + model.k3 * model.ca ** 2.0)) - + model.ca_bal = Constraint( + expr=( + 0 + == model.sv * model.caf + - model.sv * model.ca + - model.k1 * model.ca + - 2.0 * model.k3 * model.ca**2.0 + ) + ) + + model.cb_bal = Constraint( + expr=(0 == -model.sv * model.cb + model.k1 * model.ca - model.k2 * model.cb) + ) + + model.cc_bal = Constraint(expr=(0 == -model.sv * model.cc + model.k2 * model.cb)) + + model.cd_bal = Constraint( + expr=(0 == -model.sv * model.cd + model.k3 * model.ca**2.0) + ) + return model + def main(): # For a range of sv values, return ca, cb, cc, and cd results = [] @@ -70,9 +85,10 @@ def main(): solver = SolverFactory('ipopt') solver.solve(model) results.append([sv, caf, model.ca(), model.cb(), model.cc(), model.cd()]) - + results = pd.DataFrame(results, columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd']) print(results) - + + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/reactor_design/timeseries_data_example.py b/pyomo/contrib/parmest/examples/reactor_design/timeseries_data_example.py index 76b74c445fe..da2ab1874c9 100644 --- a/pyomo/contrib/parmest/examples/reactor_design/timeseries_data_example.py +++ b/pyomo/contrib/parmest/examples/reactor_design/timeseries_data_example.py @@ -13,40 +13,43 @@ from os.path import join, abspath, dirname import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model +from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, +) def main(): # Parameter estimation using timeseries data - + # Vars to estimate theta_names = ['k1', 'k2', 'k3'] - + # Data, includes multiple sensors for ca and cc file_dirname = dirname(abspath(str(__file__))) file_name = abspath(join(file_dirname, 'reactor_data_timeseries.csv')) - data = pd.read_csv(file_name) - + data = pd.read_csv(file_name) + # Group time series data into experiments, return the mean value for sv and caf # Returns a list of dictionaries data_ts = parmest.group_data(data, 'experiment', ['sv', 'caf']) - - def SSE_timeseries(model, data): + + def SSE_timeseries(model, data): expr = 0 for val in data['ca']: - expr = expr + ((float(val) - model.ca)**2)*(1/len(data['ca'])) + expr = expr + ((float(val) - model.ca) ** 2) * (1 / len(data['ca'])) for val in data['cb']: - expr = expr + ((float(val) - model.cb)**2)*(1/len(data['cb'])) + expr = expr + ((float(val) - model.cb) ** 2) * (1 / len(data['cb'])) for val in data['cc']: - expr = expr + ((float(val) - model.cc)**2)*(1/len(data['cc'])) + expr = expr + ((float(val) - model.cc) ** 2) * (1 / len(data['cc'])) for val in data['cd']: - expr = expr + ((float(val) - model.cd)**2)*(1/len(data['cd'])) + expr = expr + ((float(val) - model.cd) ** 2) * (1 / len(data['cd'])) return expr - + pest = parmest.Estimator(reactor_design_model, data_ts, theta_names, SSE_timeseries) obj, theta = pest.theta_est() print(obj) print(theta) + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/bootstrap_example.py b/pyomo/contrib/parmest/examples/rooney_biegler/bootstrap_example.py index cfc84548eb0..f686bbd933d 100644 --- a/pyomo/contrib/parmest/examples/rooney_biegler/bootstrap_example.py +++ b/pyomo/contrib/parmest/examples/rooney_biegler/bootstrap_example.py @@ -11,38 +11,47 @@ import pandas as pd import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import rooney_biegler_model +from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import ( + rooney_biegler_model, +) def main(): # Vars to estimate theta_names = ['asymptote', 'rate_constant'] - + # Data - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[7,19.8]], - columns=['hour', 'y']) - + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) + # Sum of squared error function - def SSE(model, data): - expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) + def SSE(model, data): + expr = sum( + (data.y[i] - model.response_function[data.hour[i]]) ** 2 for i in data.index + ) return expr - + # Create an instance of the parmest estimator pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE) - + # Parameter estimation obj, theta = pest.theta_est() - + # Parameter estimation with bootstrap resampling bootstrap_theta = pest.theta_est_bootstrap(50, seed=4581) - + # Plot results parmest.graphics.pairwise_plot(bootstrap_theta, title='Bootstrap theta') - parmest.graphics.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'], - title='Bootstrap theta with confidence regions') - + parmest.graphics.pairwise_plot( + bootstrap_theta, + theta, + 0.8, + ['MVN', 'KDE', 'Rect'], + title='Bootstrap theta with confidence regions', + ) + + if __name__ == "__main__": main() - - \ No newline at end of file diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/likelihood_ratio_example.py b/pyomo/contrib/parmest/examples/rooney_biegler/likelihood_ratio_example.py index 392fc5d023c..5e54a33abda 100644 --- a/pyomo/contrib/parmest/examples/rooney_biegler/likelihood_ratio_example.py +++ b/pyomo/contrib/parmest/examples/rooney_biegler/likelihood_ratio_example.py @@ -13,43 +13,50 @@ import pandas as pd from itertools import product import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import rooney_biegler_model +from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import ( + rooney_biegler_model, +) def main(): # Vars to estimate theta_names = ['asymptote', 'rate_constant'] - + # Data - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[7,19.8]], - columns=['hour', 'y']) - + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) + # Sum of squared error function - def SSE(model, data): - expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) + def SSE(model, data): + expr = sum( + (data.y[i] - model.response_function[data.hour[i]]) ** 2 for i in data.index + ) return expr - + # Create an instance of the parmest estimator pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE) - + # Parameter estimation obj, theta = pest.theta_est() - + # Find the objective value at each theta estimate asym = np.arange(10, 30, 2) rate = np.arange(0, 1.5, 0.1) - theta_vals = pd.DataFrame(list(product(asym, rate)), columns=['asymptote', 'rate_constant']) + theta_vals = pd.DataFrame( + list(product(asym, rate)), columns=['asymptote', 'rate_constant'] + ) obj_at_theta = pest.objective_at_theta(theta_vals) - + # Run the likelihood ratio test LR = pest.likelihood_ratio_test(obj_at_theta, obj, [0.8, 0.85, 0.9, 0.95]) - + # Plot results - parmest.graphics.pairwise_plot(LR, theta, 0.8, - title='LR results within 80% confidence region') + parmest.graphics.pairwise_plot( + LR, theta, 0.8, title='LR results within 80% confidence region' + ) + if __name__ == "__main__": main() - - \ No newline at end of file diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/parameter_estimation_example.py b/pyomo/contrib/parmest/examples/rooney_biegler/parameter_estimation_example.py index fa8ff310396..9af33217fe4 100644 --- a/pyomo/contrib/parmest/examples/rooney_biegler/parameter_estimation_example.py +++ b/pyomo/contrib/parmest/examples/rooney_biegler/parameter_estimation_example.py @@ -11,41 +11,50 @@ import pandas as pd import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import rooney_biegler_model +from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import ( + rooney_biegler_model, +) def main(): # Vars to estimate theta_names = ['asymptote', 'rate_constant'] - + # Data - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[7,19.8]], - columns=['hour', 'y']) - + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) + # Sum of squared error function - def SSE(model, data): - expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) + def SSE(model, data): + expr = sum( + (data.y[i] - model.response_function[data.hour[i]]) ** 2 for i in data.index + ) return expr - + # Create an instance of the parmest estimator pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE) - + # Parameter estimation and covariance - n = 6 # total number of data points used in the objective (y in 6 scenarios) + n = 6 # total number of data points used in the objective (y in 6 scenarios) obj, theta, cov = pest.theta_est(calc_cov=True, cov_n=n) - + # Plot theta estimates using a multivariate Gaussian distribution - parmest.graphics.pairwise_plot((theta, cov, 100), theta_star=theta, alpha=0.8, - distributions=['MVN'], title='Theta estimates within 80% confidence region') - - # Assert statements compare parameter estimation (theta) to an expected value - relative_error = abs(theta['asymptote'] - 19.1426)/19.1426 + parmest.graphics.pairwise_plot( + (theta, cov, 100), + theta_star=theta, + alpha=0.8, + distributions=['MVN'], + title='Theta estimates within 80% confidence region', + ) + + # Assert statements compare parameter estimation (theta) to an expected value + relative_error = abs(theta['asymptote'] - 19.1426) / 19.1426 assert relative_error < 0.01 - relative_error = abs(theta['rate_constant'] - 0.5311)/0.5311 + relative_error = abs(theta['rate_constant'] - 0.5311) / 0.5311 assert relative_error < 0.01 + if __name__ == "__main__": main() - - \ No newline at end of file diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py index 49859c46d76..5a0e1238e85 100644 --- a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py +++ b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py @@ -18,28 +18,35 @@ import pandas as pd import pyomo.environ as pyo -def rooney_biegler_model(data): +def rooney_biegler_model(data): model = pyo.ConcreteModel() - model.asymptote = pyo.Var(initialize = 15) - model.rate_constant = pyo.Var(initialize = 0.5) + model.asymptote = pyo.Var(initialize=15) + model.rate_constant = pyo.Var(initialize=0.5) def response_rule(m, h): expr = m.asymptote * (1 - pyo.exp(-m.rate_constant * h)) return expr - model.response_function = pyo.Expression(data.hour, rule = response_rule) + + model.response_function = pyo.Expression(data.hour, rule=response_rule) def SSE_rule(m): - return sum((data.y[i] - m.response_function[data.hour[i]])**2 for i in data.index) - model.SSE = pyo.Objective(rule = SSE_rule, sense=pyo.minimize) + return sum( + (data.y[i] - m.response_function[data.hour[i]]) ** 2 for i in data.index + ) + + model.SSE = pyo.Objective(rule=SSE_rule, sense=pyo.minimize) return model + def main(): # These were taken from Table A1.4 in Bates and Watts (1988). - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],[4,16.0],[5,15.6],[7,19.8]], - columns=['hour', 'y']) + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) model = rooney_biegler_model(data) solver = pyo.SolverFactory('ipopt') @@ -47,7 +54,7 @@ def main(): print('asymptote = ', model.asymptote()) print('rate constant = ', model.rate_constant()) - + + if __name__ == '__main__': main() - diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler_with_constraint.py b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler_with_constraint.py new file mode 100644 index 00000000000..2582e3fe928 --- /dev/null +++ b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler_with_constraint.py @@ -0,0 +1,63 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +""" +Rooney Biegler model, based on Rooney, W. C. and Biegler, L. T. (2001). Design for +model parameter uncertainty using nonlinear confidence regions. AIChE Journal, +47(8), 1794-1804. +""" + +import pandas as pd +import pyomo.environ as pyo + + +def rooney_biegler_model_with_constraint(data): + model = pyo.ConcreteModel() + + model.asymptote = pyo.Var(initialize=15) + model.rate_constant = pyo.Var(initialize=0.5) + model.response_function = pyo.Var(data.hour, initialize=0.0) + + # changed from expression to constraint + def response_rule(m, h): + return m.response_function[h] == m.asymptote * ( + 1 - pyo.exp(-m.rate_constant * h) + ) + + model.response_function_constraint = pyo.Constraint(data.hour, rule=response_rule) + + def SSE_rule(m): + return sum( + (data.y[i] - m.response_function[data.hour[i]]) ** 2 for i in data.index + ) + + model.SSE = pyo.Objective(rule=SSE_rule, sense=pyo.minimize) + + return model + + +def main(): + # These were taken from Table A1.4 in Bates and Watts (1988). + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) + + model = rooney_biegler_model_with_constraint(data) + solver = pyo.SolverFactory('ipopt') + solver.solve(model) + + print('asymptote = ', model.asymptote()) + print('rate constant = ', model.rate_constant()) + + +if __name__ == '__main__': + main() diff --git a/pyomo/contrib/parmest/examples/semibatch/__init__.py b/pyomo/contrib/parmest/examples/semibatch/__init__.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/contrib/parmest/examples/semibatch/__init__.py +++ b/pyomo/contrib/parmest/examples/semibatch/__init__.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/parmest/examples/semibatch/parallel_example.py b/pyomo/contrib/parmest/examples/semibatch/parallel_example.py index 663d5b0cb3c..ff1287811cf 100644 --- a/pyomo/contrib/parmest/examples/semibatch/parallel_example.py +++ b/pyomo/contrib/parmest/examples/semibatch/parallel_example.py @@ -21,35 +21,37 @@ import pyomo.contrib.parmest.parmest as parmest from pyomo.contrib.parmest.examples.semibatch.semibatch import generate_model + def main(): # Vars to estimate theta_names = ['k1', 'k2', 'E1', 'E2'] - + # Data, list of json file names - data = [] + data = [] file_dirname = dirname(abspath(str(__file__))) for exp_num in range(10): - file_name = abspath(join(file_dirname, 'exp'+str(exp_num+1)+'.out')) + file_name = abspath(join(file_dirname, 'exp' + str(exp_num + 1) + '.out')) data.append(file_name) - - # Note, the model already includes a 'SecondStageCost' expression + + # Note, the model already includes a 'SecondStageCost' expression # for sum of squared error that will be used in parameter estimation - + pest = parmest.Estimator(generate_model, data, theta_names) - + ### Parameter estimation with bootstrap resampling bootstrap_theta = pest.theta_est_bootstrap(100) bootstrap_theta.to_csv('bootstrap_theta.csv') - + ### Compute objective at theta for likelihood ratio test k1 = np.arange(4, 24, 3) k2 = np.arange(40, 160, 40) E1 = np.arange(29000, 32000, 500) E2 = np.arange(38000, 42000, 500) theta_vals = pd.DataFrame(list(product(k1, k2, E1, E2)), columns=theta_names) - + obj_at_theta = pest.objective_at_theta(theta_vals) obj_at_theta.to_csv('obj_at_theta.csv') + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/semibatch/parameter_estimation_example.py b/pyomo/contrib/parmest/examples/semibatch/parameter_estimation_example.py index 36fc8b9b6ec..fc4c9f5c675 100644 --- a/pyomo/contrib/parmest/examples/semibatch/parameter_estimation_example.py +++ b/pyomo/contrib/parmest/examples/semibatch/parameter_estimation_example.py @@ -14,28 +14,29 @@ import pyomo.contrib.parmest.parmest as parmest from pyomo.contrib.parmest.examples.semibatch.semibatch import generate_model + def main(): - # Vars to estimate theta_names = ['k1', 'k2', 'E1', 'E2'] - + # Data, list of dictionaries - data = [] + data = [] file_dirname = dirname(abspath(str(__file__))) for exp_num in range(10): - file_name = abspath(join(file_dirname, 'exp'+str(exp_num+1)+'.out')) - with open(file_name,'r') as infile: + file_name = abspath(join(file_dirname, 'exp' + str(exp_num + 1) + '.out')) + with open(file_name, 'r') as infile: d = json.load(infile) data.append(d) - - # Note, the model already includes a 'SecondStageCost' expression + + # Note, the model already includes a 'SecondStageCost' expression # for sum of squared error that will be used in parameter estimation - + pest = parmest.Estimator(generate_model, data, theta_names) - + obj, theta = pest.theta_est() print(obj) print(theta) + if __name__ == '__main__': main() diff --git a/pyomo/contrib/parmest/examples/semibatch/scenario_example.py b/pyomo/contrib/parmest/examples/semibatch/scenario_example.py index 8902ff874b4..071e53236c4 100644 --- a/pyomo/contrib/parmest/examples/semibatch/scenario_example.py +++ b/pyomo/contrib/parmest/examples/semibatch/scenario_example.py @@ -17,16 +17,15 @@ def main(): - # Vars to estimate in parmest theta_names = ['k1', 'k2', 'E1', 'E2'] # Data: list of dictionaries - data = [] + data = [] file_dirname = dirname(abspath(str(__file__))) for exp_num in range(10): - fname = join(file_dirname, 'exp'+str(exp_num+1)+'.out') - with open(fname,'r') as infile: + fname = join(file_dirname, 'exp' + str(exp_num + 1) + '.out') + with open(fname, 'r') as infile: d = json.load(infile) data.append(d) @@ -39,14 +38,15 @@ def main(): experimentscens = sc.ScenarioSet("Experiments") scenmaker.ScenariosFromExperiments(experimentscens) experimentscens.write_csv(output_file) - + # Use the bootstrap to make 3 scenarios and print bootscens = sc.ScenarioSet("Bootstrap") - scenmaker.ScenariosFromBoostrap(bootscens, 3) + scenmaker.ScenariosFromBootstrap(bootscens, 3) for s in bootscens.ScensIterator(): print("{}, {}".format(s.name, s.probability)) - for n,v in s.ThetaVals.items(): + for n, v in s.ThetaVals.items(): print(" {}={}".format(n, v)) + if __name__ == "__main__": main() diff --git a/pyomo/contrib/parmest/examples/semibatch/semibatch.py b/pyomo/contrib/parmest/examples/semibatch/semibatch.py index d3840286550..8cda262c019 100644 --- a/pyomo/contrib/parmest/examples/semibatch/semibatch.py +++ b/pyomo/contrib/parmest/examples/semibatch/semibatch.py @@ -16,21 +16,34 @@ """ import json from os.path import join, abspath, dirname -from pyomo.environ import ConcreteModel, Set, Param, Var, Constraint, ConstraintList, Expression, Objective, TransformationFactory, SolverFactory, exp, minimize +from pyomo.environ import ( + ConcreteModel, + Set, + Param, + Var, + Constraint, + ConstraintList, + Expression, + Objective, + TransformationFactory, + SolverFactory, + exp, + minimize, +) from pyomo.dae import ContinuousSet, DerivativeVar -def generate_model(data): +def generate_model(data): # unpack and fix the data cameastemp = data['Ca_meas'] cbmeastemp = data['Cb_meas'] ccmeastemp = data['Cc_meas'] trmeastemp = data['Tr_meas'] - cameas={} - cbmeas={} - ccmeas={} - trmeas={} + cameas = {} + cbmeas = {} + ccmeas = {} + trmeas = {} for i in cameastemp.keys(): cameas[float(i)] = cameastemp[i] cbmeas[float(i)] = cbmeastemp[i] @@ -51,26 +64,26 @@ def generate_model(data): # # Parameters for semi-batch reactor model # - m.R = Param(initialize=8.314) # kJ/kmol/K - m.Mwa = Param(initialize=50.0) # kg/kmol - m.rhor = Param(initialize=1000.0) # kg/m^3 - m.cpr = Param(initialize=3.9) # kJ/kg/K - m.Tf = Param(initialize=300) # K - m.deltaH1 = Param(initialize=-40000.0) # kJ/kmol - m.deltaH2 = Param(initialize=-50000.0) # kJ/kmol - m.alphaj = Param(initialize=0.8) # kJ/s/m^2/K - m.alphac = Param(initialize=0.7) # kJ/s/m^2/K - m.Aj = Param(initialize=5.0) # m^2 - m.Ac = Param(initialize=3.0) # m^2 - m.Vj = Param(initialize=0.9) # m^3 - m.Vc = Param(initialize=0.07) # m^3 - m.rhow = Param(initialize=700.0) # kg/m^3 - m.cpw = Param(initialize=3.1) # kJ/kg/K - m.Ca0 = Param(initialize=data['Ca0']) # kmol/m^3) - m.Cb0 = Param(initialize=data['Cb0']) # kmol/m^3) - m.Cc0 = Param(initialize=data['Cc0']) # kmol/m^3) - m.Tr0 = Param(initialize=300.0) # K - m.Vr0 = Param(initialize=1.0) # m^3 + m.R = Param(initialize=8.314) # kJ/kmol/K + m.Mwa = Param(initialize=50.0) # kg/kmol + m.rhor = Param(initialize=1000.0) # kg/m^3 + m.cpr = Param(initialize=3.9) # kJ/kg/K + m.Tf = Param(initialize=300) # K + m.deltaH1 = Param(initialize=-40000.0) # kJ/kmol + m.deltaH2 = Param(initialize=-50000.0) # kJ/kmol + m.alphaj = Param(initialize=0.8) # kJ/s/m^2/K + m.alphac = Param(initialize=0.7) # kJ/s/m^2/K + m.Aj = Param(initialize=5.0) # m^2 + m.Ac = Param(initialize=3.0) # m^2 + m.Vj = Param(initialize=0.9) # m^3 + m.Vc = Param(initialize=0.07) # m^3 + m.rhow = Param(initialize=700.0) # kg/m^3 + m.cpw = Param(initialize=3.1) # kJ/kg/K + m.Ca0 = Param(initialize=data['Ca0']) # kmol/m^3) + m.Cb0 = Param(initialize=data['Cb0']) # kmol/m^3) + m.Cc0 = Param(initialize=data['Cc0']) # kmol/m^3) + m.Tr0 = Param(initialize=300.0) # K + m.Vr0 = Param(initialize=1.0) # m^3 m.time = ContinuousSet(bounds=(0, 21600), initialize=m.measT) # Time in seconds @@ -82,35 +95,42 @@ def _initTc(m, t): return data['Tc1'] else: return data['Tc2'] - m.Tc = Param(m.time, initialize=_initTc, default=_initTc) # bounds= (288,432) Cooling coil temp, control input + + m.Tc = Param( + m.time, initialize=_initTc, default=_initTc + ) # bounds= (288,432) Cooling coil temp, control input def _initFa(m, t): if t < 10800: return data['Fa1'] else: return data['Fa2'] - m.Fa = Param(m.time, initialize=_initFa, default=_initFa) # bounds=(0,0.05) Inlet flow rate, control input + + m.Fa = Param( + m.time, initialize=_initFa, default=_initFa + ) # bounds=(0,0.05) Inlet flow rate, control input # # Parameters being estimated # - m.k1 = Var(initialize=14, bounds=(2,100)) # 1/s Actual: 15.01 - m.k2 = Var(initialize=90, bounds=(2,150)) # 1/s Actual: 85.01 - m.E1 = Var(initialize=27000.0, bounds=(25000,40000)) # kJ/kmol Actual: 30000 - m.E2 = Var(initialize=45000.0, bounds=(35000,50000)) # kJ/kmol Actual: 40000 + m.k1 = Var(initialize=14, bounds=(2, 100)) # 1/s Actual: 15.01 + m.k2 = Var(initialize=90, bounds=(2, 150)) # 1/s Actual: 85.01 + m.E1 = Var(initialize=27000.0, bounds=(25000, 40000)) # kJ/kmol Actual: 30000 + m.E2 = Var(initialize=45000.0, bounds=(35000, 50000)) # kJ/kmol Actual: 40000 # m.E1.fix(30000) # m.E2.fix(40000) - # # Time dependent variables # - m.Ca = Var(m.time, initialize=m.Ca0, bounds=(0,25)) - m.Cb = Var(m.time, initialize=m.Cb0, bounds=(0,25)) - m.Cc = Var(m.time, initialize=m.Cc0, bounds=(0,25)) + m.Ca = Var(m.time, initialize=m.Ca0, bounds=(0, 25)) + m.Cb = Var(m.time, initialize=m.Cb0, bounds=(0, 25)) + m.Cc = Var(m.time, initialize=m.Cc0, bounds=(0, 25)) m.Vr = Var(m.time, initialize=m.Vr0) m.Tr = Var(m.time, initialize=m.Tr0) - m.Tj = Var(m.time, initialize=310.0, bounds=(288,None)) # Cooling jacket temp, follows coil temp until failure + m.Tj = Var( + m.time, initialize=310.0, bounds=(288, None) + ) # Cooling jacket temp, follows coil temp until failure # # Derivatives in the model @@ -125,44 +145,61 @@ def _initFa(m, t): # Differential Equations in the model # - def _dCacon(m,t): + def _dCacon(m, t): if t == 0: return Constraint.Skip - return m.dCa[t] == m.Fa[t]/m.Vr[t] - m.k1*exp(-m.E1/(m.R*m.Tr[t]))*m.Ca[t] + return ( + m.dCa[t] + == m.Fa[t] / m.Vr[t] - m.k1 * exp(-m.E1 / (m.R * m.Tr[t])) * m.Ca[t] + ) + m.dCacon = Constraint(m.time, rule=_dCacon) - def _dCbcon(m,t): + def _dCbcon(m, t): if t == 0: return Constraint.Skip - return m.dCb[t] == m.k1*exp(-m.E1/(m.R*m.Tr[t]))*m.Ca[t] - \ - m.k2*exp(-m.E2/(m.R*m.Tr[t]))*m.Cb[t] + return ( + m.dCb[t] + == m.k1 * exp(-m.E1 / (m.R * m.Tr[t])) * m.Ca[t] + - m.k2 * exp(-m.E2 / (m.R * m.Tr[t])) * m.Cb[t] + ) + m.dCbcon = Constraint(m.time, rule=_dCbcon) - def _dCccon(m,t): + def _dCccon(m, t): if t == 0: return Constraint.Skip - return m.dCc[t] == m.k2*exp(-m.E2/(m.R*m.Tr[t]))*m.Cb[t] + return m.dCc[t] == m.k2 * exp(-m.E2 / (m.R * m.Tr[t])) * m.Cb[t] + m.dCccon = Constraint(m.time, rule=_dCccon) - def _dVrcon(m,t): + def _dVrcon(m, t): if t == 0: return Constraint.Skip - return m.dVr[t] == m.Fa[t]*m.Mwa/m.rhor + return m.dVr[t] == m.Fa[t] * m.Mwa / m.rhor + m.dVrcon = Constraint(m.time, rule=_dVrcon) - def _dTrcon(m,t): + def _dTrcon(m, t): if t == 0: return Constraint.Skip - return m.rhor*m.cpr*m.dTr[t] == \ - m.Fa[t]*m.Mwa*m.cpr/m.Vr[t]*(m.Tf-m.Tr[t]) - \ - m.k1*exp(-m.E1/(m.R*m.Tr[t]))*m.Ca[t]*m.deltaH1 - \ - m.k2*exp(-m.E2/(m.R*m.Tr[t]))*m.Cb[t]*m.deltaH2 + \ - m.alphaj*m.Aj/m.Vr0*(m.Tj[t]-m.Tr[t]) + \ - m.alphac*m.Ac/m.Vr0*(m.Tc[t]-m.Tr[t]) + return m.rhor * m.cpr * m.dTr[t] == m.Fa[t] * m.Mwa * m.cpr / m.Vr[t] * ( + m.Tf - m.Tr[t] + ) - m.k1 * exp(-m.E1 / (m.R * m.Tr[t])) * m.Ca[t] * m.deltaH1 - m.k2 * exp( + -m.E2 / (m.R * m.Tr[t]) + ) * m.Cb[ + t + ] * m.deltaH2 + m.alphaj * m.Aj / m.Vr0 * ( + m.Tj[t] - m.Tr[t] + ) + m.alphac * m.Ac / m.Vr0 * ( + m.Tc[t] - m.Tr[t] + ) + m.dTrcon = Constraint(m.time, rule=_dTrcon) - def _singlecooling(m,t): + def _singlecooling(m, t): return m.Tc[t] == m.Tj[t] + m.singlecooling = Constraint(m.time, rule=_singlecooling) # Initial Conditions @@ -172,6 +209,7 @@ def _initcon(m): yield m.Cc[m.time.first()] == m.Cc0 yield m.Vr[m.time.first()] == m.Vr0 yield m.Tr[m.time.first()] == m.Tr0 + m.initcon = ConstraintList(rule=_initcon) # @@ -179,28 +217,40 @@ def _initcon(m): # def ComputeFirstStageCost_rule(model): return 0 + m.FirstStageCost = Expression(rule=ComputeFirstStageCost_rule) def AllMeasurements(m): - return sum((m.Ca[t] - m.Ca_meas[t]) ** 2 + (m.Cb[t] - m.Cb_meas[t]) ** 2 - + (m.Cc[t] - m.Cc_meas[t]) ** 2 - + 0.01 * (m.Tr[t] - m.Tr_meas[t]) ** 2 for t in m.measT) + return sum( + (m.Ca[t] - m.Ca_meas[t]) ** 2 + + (m.Cb[t] - m.Cb_meas[t]) ** 2 + + (m.Cc[t] - m.Cc_meas[t]) ** 2 + + 0.01 * (m.Tr[t] - m.Tr_meas[t]) ** 2 + for t in m.measT + ) def MissingMeasurements(m): if data['experiment'] == 1: - return sum((m.Ca[t] - m.Ca_meas[t]) ** 2 + (m.Cb[t] - m.Cb_meas[t]) ** 2 - + (m.Cc[t] - m.Cc_meas[t]) ** 2 - + (m.Tr[t] - m.Tr_meas[t]) ** 2 for t in m.measT) + return sum( + (m.Ca[t] - m.Ca_meas[t]) ** 2 + + (m.Cb[t] - m.Cb_meas[t]) ** 2 + + (m.Cc[t] - m.Cc_meas[t]) ** 2 + + (m.Tr[t] - m.Tr_meas[t]) ** 2 + for t in m.measT + ) elif data['experiment'] == 2: return sum((m.Tr[t] - m.Tr_meas[t]) ** 2 for t in m.measT) else: - return sum((m.Cb[t] - m.Cb_meas[t]) ** 2 - + (m.Tr[t] - m.Tr_meas[t]) ** 2 for t in m.measT) + return sum( + (m.Cb[t] - m.Cb_meas[t]) ** 2 + (m.Tr[t] - m.Tr_meas[t]) ** 2 + for t in m.measT + ) m.SecondStageCost = Expression(rule=MissingMeasurements) def total_cost_rule(model): return model.FirstStageCost + model.SecondStageCost + m.Total_Cost_Objective = Objective(rule=total_cost_rule, sense=minimize) # Discretize model @@ -208,20 +258,21 @@ def total_cost_rule(model): disc.apply_to(m, nfe=20, ncp=4) return m + def main(): # Data loaded from files file_dirname = dirname(abspath(str(__file__))) file_name = abspath(join(file_dirname, 'exp2.out')) - with open(file_name,'r') as infile: + with open(file_name, 'r') as infile: data = json.load(infile) data['experiment'] = 2 - + model = generate_model(data) solver = SolverFactory('ipopt') solver.solve(model) print('k1 = ', model.k1()) print('E1 = ', model.E1()) - + + if __name__ == '__main__': main() - diff --git a/pyomo/contrib/parmest/graphics.py b/pyomo/contrib/parmest/graphics.py index 3295a5f0a98..f01622d2d17 100644 --- a/pyomo/contrib/parmest/graphics.py +++ b/pyomo/contrib/parmest/graphics.py @@ -11,11 +11,16 @@ import itertools from pyomo.common.dependencies import ( - matplotlib, matplotlib_available, - numpy as np, numpy_available, - pandas as pd, pandas_available, - scipy, scipy_available, - check_min_version, attempt_import + matplotlib, + matplotlib_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, + scipy, + scipy_available, + check_min_version, + attempt_import, ) from pyomo.common.dependencies.matplotlib import pyplot as plt from pyomo.common.dependencies.scipy import stats @@ -27,57 +32,67 @@ 'seaborn', catch_exceptions=(ImportError, SyntaxError) ) -imports_available = numpy_available & scipy_available & pandas_available \ - & matplotlib_available & seaborn_available +imports_available = ( + numpy_available + & scipy_available + & pandas_available + & matplotlib_available + & seaborn_available +) + -def _get_variables(ax,columns): +def _get_variables(ax, columns): sps = ax.get_subplotspec() nx = sps.get_geometry()[1] ny = sps.get_geometry()[0] cell = sps.get_geometry()[2] - xloc = int(np.mod(cell,nx)) - yloc = int(np.mod((cell-xloc)/nx, ny)) + xloc = int(np.mod(cell, nx)) + yloc = int(np.mod((cell - xloc) / nx, ny)) xvar = columns[xloc] yvar = columns[yloc] - #print(sps.get_geometry(), cell, xloc, yloc, xvar, yvar) - + # print(sps.get_geometry(), cell, xloc, yloc, xvar, yvar) + return xvar, yvar, (xloc, yloc) -def _get_XYgrid(x,y,ncells): - xlin = np.linspace(min(x)-abs(max(x)-min(x))/2, max(x)+abs(max(x)-min(x))/2, ncells) - ylin = np.linspace(min(y)-abs(max(y)-min(y))/2, max(y)+abs(max(y)-min(y))/2, ncells) +def _get_XYgrid(x, y, ncells): + xlin = np.linspace( + min(x) - abs(max(x) - min(x)) / 2, max(x) + abs(max(x) - min(x)) / 2, ncells + ) + ylin = np.linspace( + min(y) - abs(max(y) - min(y)) / 2, max(y) + abs(max(y) - min(y)) / 2, ncells + ) X, Y = np.meshgrid(xlin, ylin) - - return X,Y + return X, Y -def _get_data_slice(xvar,yvar,columns,data,theta_star): - search_ranges = {} +def _get_data_slice(xvar, yvar, columns, data, theta_star): + search_ranges = {} for var in columns: - if var in [xvar,yvar]: + if var in [xvar, yvar]: search_ranges[var] = data[var].unique() else: search_ranges[var] = [theta_star[var]] - data_slice = pd.DataFrame(list(itertools.product(*search_ranges.values())), - columns=search_ranges.keys()) - - # griddata will not work with linear interpolation if the data + data_slice = pd.DataFrame( + list(itertools.product(*search_ranges.values())), columns=search_ranges.keys() + ) + + # griddata will not work with linear interpolation if the data # values are constant in any dimension for col in data[columns].columns: - cv = data[col].std()/data[col].mean() # Coefficient of variation - if cv < 1e-8: + cv = data[col].std() / data[col].mean() # Coefficient of variation + if cv < 1e-8: temp = data.copy() # Add variation (the interpolation is later scaled) if cv == 0: - temp[col] = temp[col] + data[col].mean()/10 + temp[col] = temp[col] + data[col].mean() / 10 else: temp[col] = temp[col] + data[col].std() - data = data.append(temp, ignore_index=True) - + data = pd.concat([data, temp], ignore_index=True) + data_slice['obj'] = scipy.interpolate.griddata( np.array(data[columns]), np.array(data[['obj']]), @@ -85,152 +100,165 @@ def _get_data_slice(xvar,yvar,columns,data,theta_star): method='linear', rescale=True, ) - + X = data_slice[xvar] Y = data_slice[yvar] Z = data_slice['obj'] - - return X,Y,Z - -# Note: seaborn 0.11 no longer expects color and label to be passed to the + + return X, Y, Z + + +# Note: seaborn 0.11 no longer expects color and label to be passed to the # plotting functions. label is kept here for backward compatibility -def _add_scatter(x,y,color,columns,theta_star,label=None): +def _add_scatter(x, y, color, columns, theta_star, label=None): ax = plt.gca() xvar, yvar, loc = _get_variables(ax, columns) - + ax.scatter(theta_star[xvar], theta_star[yvar], c=color, s=35) - - -def _add_rectangle_CI(x,y,color,columns,lower_bound,upper_bound,label=None): + + +def _add_rectangle_CI(x, y, color, columns, lower_bound, upper_bound, label=None): ax = plt.gca() - xvar, yvar, loc = _get_variables(ax,columns) + xvar, yvar, loc = _get_variables(ax, columns) xmin = lower_bound[xvar] ymin = lower_bound[yvar] xmax = upper_bound[xvar] ymax = upper_bound[yvar] - + ax.plot([xmin, xmax], [ymin, ymin], color=color) ax.plot([xmax, xmax], [ymin, ymax], color=color) ax.plot([xmax, xmin], [ymax, ymax], color=color) ax.plot([xmin, xmin], [ymax, ymin], color=color) -def _add_scipy_dist_CI(x,y,color,columns,ncells,alpha,dist,theta_star,label=None): +def _add_scipy_dist_CI( + x, y, color, columns, ncells, alpha, dist, theta_star, label=None +): ax = plt.gca() - xvar, yvar, loc = _get_variables(ax,columns) - - X,Y = _get_XYgrid(x,y,ncells) - + xvar, yvar, loc = _get_variables(ax, columns) + + X, Y = _get_XYgrid(x, y, ncells) + data_slice = [] - + if isinstance(dist, stats._multivariate.multivariate_normal_frozen): for var in theta_star.index: if var == xvar: data_slice.append(X) elif var == yvar: data_slice.append(Y) - elif var not in [xvar,yvar]: - data_slice.append(np.array([[theta_star[var]]*ncells]*ncells)) + elif var not in [xvar, yvar]: + data_slice.append(np.array([[theta_star[var]] * ncells] * ncells)) data_slice = np.dstack(tuple(data_slice)) - + elif isinstance(dist, stats.kde.gaussian_kde): for var in theta_star.index: if var == xvar: data_slice.append(X.ravel()) elif var == yvar: data_slice.append(Y.ravel()) - elif var not in [xvar,yvar]: - data_slice.append(np.array([theta_star[var]]*ncells*ncells)) + elif var not in [xvar, yvar]: + data_slice.append(np.array([theta_star[var]] * ncells * ncells)) data_slice = np.array(data_slice) else: return - + Z = dist.pdf(data_slice) Z = Z.reshape((ncells, ncells)) - - ax.contour(X,Y,Z, levels=[alpha], colors=color) - - -def _add_obj_contour(x,y,color,columns,data,theta_star,label=None): + + ax.contour(X, Y, Z, levels=[alpha], colors=color) + + +def _add_obj_contour(x, y, color, columns, data, theta_star, label=None): ax = plt.gca() - xvar, yvar, loc = _get_variables(ax,columns) + xvar, yvar, loc = _get_variables(ax, columns) try: - X, Y, Z = _get_data_slice(xvar,yvar,columns,data,theta_star) - + X, Y, Z = _get_data_slice(xvar, yvar, columns, data, theta_star) + triang = matplotlib.tri.Triangulation(X, Y) cmap = plt.cm.get_cmap('Greys') - - plt.tricontourf(triang,Z,cmap=cmap) + + plt.tricontourf(triang, Z, cmap=cmap) except: - print('Objective contour plot for', xvar, yvar,'slice failed') + print('Objective contour plot for', xvar, yvar, 'slice failed') + def _set_axis_limits(g, axis_limits, theta_vals, theta_star): - if theta_star is not None: - theta_vals = theta_vals.append(theta_star, ignore_index=True) - + theta_vals = pd.concat([theta_vals, theta_star], ignore_index=True) + if axis_limits is None: axis_limits = {} for col in theta_vals.columns: theta_range = np.abs(theta_vals[col].max() - theta_vals[col].min()) if theta_range < 1e-10: - theta_range = theta_vals[col].max()/10 - axis_limits[col] = [theta_vals[col].min() - theta_range/4, - theta_vals[col].max() + theta_range/4] + theta_range = theta_vals[col].max() / 10 + axis_limits[col] = [ + theta_vals[col].min() - theta_range / 4, + theta_vals[col].max() + theta_range / 4, + ] for ax in g.fig.get_axes(): - xvar, yvar, (xloc, yloc) = _get_variables(ax,theta_vals.columns) - if xloc != yloc: # not on diagonal + xvar, yvar, (xloc, yloc) = _get_variables(ax, theta_vals.columns) + if xloc != yloc: # not on diagonal ax.set_ylim(axis_limits[yvar]) ax.set_xlim(axis_limits[xvar]) - else: # on diagonal + else: # on diagonal ax.set_xlim(axis_limits[xvar]) - -def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[], - axis_limits=None, title=None, add_obj_contour=True, - add_legend=True, filename=None): + +def pairwise_plot( + theta_values, + theta_star=None, + alpha=None, + distributions=[], + axis_limits=None, + title=None, + add_obj_contour=True, + add_legend=True, + filename=None, +): """ - Plot pairwise relationship for theta values, and optionally alpha-level + Plot pairwise relationship for theta values, and optionally alpha-level confidence intervals and objective value contours - + Parameters ---------- theta_values: DataFrame or tuple - - * If theta_values is a DataFrame, then it contains one column for each theta variable - and (optionally) an objective value column ('obj') and columns that contains - Boolean results from confidence interval tests (labeled using the alpha value). + + * If theta_values is a DataFrame, then it contains one column for each theta variable + and (optionally) an objective value column ('obj') and columns that contains + Boolean results from confidence interval tests (labeled using the alpha value). Each row is a sample. - - * Theta variables can be computed from ``theta_est_bootstrap``, + + * Theta variables can be computed from ``theta_est_bootstrap``, ``theta_est_leaveNout``, and ``leaveNout_bootstrap_test``. * The objective value can be computed using the ``likelihood_ratio_test``. - * Results from confidence interval tests can be computed using the - ``leaveNout_bootstrap_test``, ``likelihood_ratio_test``, and + * Results from confidence interval tests can be computed using the + ``leaveNout_bootstrap_test``, ``likelihood_ratio_test``, and ``confidence_region_test``. - * If theta_values is a tuple, then it contains a mean, covariance, and number - of samples (mean, cov, n) where mean is a dictionary or Series - (indexed by variable name), covariance is a DataFrame (indexed by + * If theta_values is a tuple, then it contains a mean, covariance, and number + of samples (mean, cov, n) where mean is a dictionary or Series + (indexed by variable name), covariance is a DataFrame (indexed by variable name, one column per variable name), and n is an integer. - The mean and covariance are used to create a multivariate normal - sample of n theta values. The covariance can be computed using + The mean and covariance are used to create a multivariate normal + sample of n theta values. The covariance can be computed using ``theta_est(calc_cov=True)``. - + theta_star: dict or Series, optional - Estimated value of theta. The dictionary or Series is indexed by variable name. + Estimated value of theta. The dictionary or Series is indexed by variable name. Theta_star is used to slice higher dimensional contour intervals in 2D alpha: float, optional - Confidence interval value, if an alpha value is given and the - distributions list is empty, the data will be filtered by True/False + Confidence interval value, if an alpha value is given and the + distributions list is empty, the data will be filtered by True/False values using the column name whose value equals alpha (see results from - ``leaveNout_bootstrap_test``, ``likelihood_ratio_test``, and + ``leaveNout_bootstrap_test``, ``likelihood_ratio_test``, and ``confidence_region_test``) distributions: list of strings, optional - Statistical distribution used to define a confidence region, - options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde, and + Statistical distribution used to define a confidence region, + options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde, and 'Rect' for rectangular. Confidence interval is a 2D slice, using linear interpolation at theta_star. axis_limits: dict, optional @@ -254,10 +282,10 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[], assert isinstance(title, (type(None), str)) assert isinstance(add_obj_contour, bool) assert isinstance(filename, (type(None), str)) - + # If theta_values is a tuple containing (mean, cov, n), create a DataFrame of values if isinstance(theta_values, tuple): - assert(len(theta_values) == 3) + assert len(theta_values) == 3 mean = theta_values[0] cov = theta_values[1] n = theta_values[2] @@ -265,31 +293,38 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[], mean = pd.Series(mean) theta_names = mean.index mvn_dist = stats.multivariate_normal(mean, cov) - theta_values = pd.DataFrame(mvn_dist.rvs(n, random_state=1), columns=theta_names) - - assert(theta_values.shape[0] > 0) - + theta_values = pd.DataFrame( + mvn_dist.rvs(n, random_state=1), columns=theta_names + ) + + assert theta_values.shape[0] > 0 + if isinstance(theta_star, dict): theta_star = pd.Series(theta_star) if isinstance(theta_star, pd.DataFrame): - theta_star = theta_star.loc[0,:] - - theta_names = [col for col in theta_values.columns if (col not in ['obj']) - and (not isinstance(col, float)) and (not isinstance(col, int))] - + theta_star = theta_star.loc[0, :] + + theta_names = [ + col + for col in theta_values.columns + if (col not in ['obj']) + and (not isinstance(col, float)) + and (not isinstance(col, int)) + ] + # Filter data by alpha if (alpha in theta_values.columns) and (len(distributions) == 0): thetas = theta_values.loc[theta_values[alpha] == True, theta_names] else: thetas = theta_values[theta_names] - + if theta_star is not None: theta_star = theta_star[theta_names] - + legend_elements = [] - + g = sns.PairGrid(thetas) - + # Plot histogram on the diagonal # Note: distplot is deprecated and will be removed in a future # version of seaborn, use histplot. distplot is kept for older @@ -297,169 +332,216 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[], if check_min_version(sns, "0.11"): g.map_diag(sns.histplot) else: - g.map_diag(sns.distplot, kde=False, hist=True, norm_hist=False) - + g.map_diag(sns.distplot, kde=False, hist=True, norm_hist=False) + # Plot filled contours using all theta values based on obj if 'obj' in theta_values.columns and add_obj_contour: - g.map_offdiag(_add_obj_contour, columns=theta_names, data=theta_values, - theta_star=theta_star) - + g.map_offdiag( + _add_obj_contour, + columns=theta_names, + data=theta_values, + theta_star=theta_star, + ) + # Plot thetas g.map_offdiag(plt.scatter, s=10) - legend_elements.append(matplotlib.lines.Line2D( - [0], [0], marker='o', color='w', label='thetas', - markerfacecolor='cadetblue', markersize=5)) - + legend_elements.append( + matplotlib.lines.Line2D( + [0], + [0], + marker='o', + color='w', + label='thetas', + markerfacecolor='cadetblue', + markersize=5, + ) + ) + # Plot theta* if theta_star is not None: - g.map_offdiag(_add_scatter, color='k', columns=theta_names, theta_star=theta_star) - - legend_elements.append(matplotlib.lines.Line2D( - [0], [0], marker='o', color='w', label='theta*', - markerfacecolor='k', markersize=6)) - + g.map_offdiag( + _add_scatter, color='k', columns=theta_names, theta_star=theta_star + ) + + legend_elements.append( + matplotlib.lines.Line2D( + [0], + [0], + marker='o', + color='w', + label='theta*', + markerfacecolor='k', + markersize=6, + ) + ) + # Plot confidence regions colors = ['r', 'mediumblue', 'darkgray'] if (alpha is not None) and (len(distributions) > 0): - if theta_star is None: - print("""theta_star is not defined, confidence region slice will be - plotted at the mean value of theta""") + print( + """theta_star is not defined, confidence region slice will be + plotted at the mean value of theta""" + ) theta_star = thetas.mean() - + mvn_dist = None kde_dist = None for i, dist in enumerate(distributions): if dist == 'Rect': lb, ub = fit_rect_dist(thetas, alpha) - g.map_offdiag(_add_rectangle_CI, color=colors[i], columns=theta_names, - lower_bound=lb, upper_bound=ub) - legend_elements.append(matplotlib.lines.Line2D( - [0], [0], color=colors[i], lw=1, label=dist)) - + g.map_offdiag( + _add_rectangle_CI, + color=colors[i], + columns=theta_names, + lower_bound=lb, + upper_bound=ub, + ) + legend_elements.append( + matplotlib.lines.Line2D([0], [0], color=colors[i], lw=1, label=dist) + ) + elif dist == 'MVN': mvn_dist = fit_mvn_dist(thetas) Z = mvn_dist.pdf(thetas) - score = stats.scoreatpercentile(Z, (1-alpha)*100) - g.map_offdiag(_add_scipy_dist_CI, color=colors[i], columns=theta_names, - ncells=100, alpha=score, dist=mvn_dist, - theta_star=theta_star) - legend_elements.append(matplotlib.lines.Line2D( - [0], [0], color=colors[i], lw=1, label=dist)) - + score = stats.scoreatpercentile(Z, (1 - alpha) * 100) + g.map_offdiag( + _add_scipy_dist_CI, + color=colors[i], + columns=theta_names, + ncells=100, + alpha=score, + dist=mvn_dist, + theta_star=theta_star, + ) + legend_elements.append( + matplotlib.lines.Line2D([0], [0], color=colors[i], lw=1, label=dist) + ) + elif dist == 'KDE': kde_dist = fit_kde_dist(thetas) Z = kde_dist.pdf(thetas.transpose()) - score = stats.scoreatpercentile(Z, (1-alpha)*100) - g.map_offdiag(_add_scipy_dist_CI, color=colors[i], columns=theta_names, - ncells=100, alpha=score, dist=kde_dist, - theta_star=theta_star) - legend_elements.append(matplotlib.lines.Line2D( - [0], [0], color=colors[i], lw=1, label=dist)) - + score = stats.scoreatpercentile(Z, (1 - alpha) * 100) + g.map_offdiag( + _add_scipy_dist_CI, + color=colors[i], + columns=theta_names, + ncells=100, + alpha=score, + dist=kde_dist, + theta_star=theta_star, + ) + legend_elements.append( + matplotlib.lines.Line2D([0], [0], color=colors[i], lw=1, label=dist) + ) + _set_axis_limits(g, axis_limits, thetas, theta_star) - + for ax in g.axes.flatten(): - ax.ticklabel_format(style='sci', scilimits=(-2,2), axis='both') - + ax.ticklabel_format(style='sci', scilimits=(-2, 2), axis='both') + if add_legend: xvar, yvar, loc = _get_variables(ax, theta_names) - if loc == (len(theta_names)-1,0): + if loc == (len(theta_names) - 1, 0): ax.legend(handles=legend_elements, loc='best', prop={'size': 8}) if title: g.fig.subplots_adjust(top=0.9) - g.fig.suptitle(title) - + g.fig.suptitle(title) + # Work in progress # Plot lower triangle graphics in separate figures, useful for presentations lower_triangle_only = False if lower_triangle_only: for ax in g.axes.flatten(): xvar, yvar, (xloc, yloc) = _get_variables(ax, theta_names) - if xloc < yloc: # lower triangle + if xloc < yloc: # lower triangle ax.remove() - + ax.set_xlabel(xvar) ax.set_ylabel(yvar) - + fig = plt.figure() - ax.figure=fig + ax.figure = fig fig.axes.append(ax) fig.add_axes(ax) - + f, dummy = plt.subplots() bbox = dummy.get_position() - ax.set_position(bbox) + ax.set_position(bbox) dummy.remove() plt.close(f) ax.tick_params(reset=True) - + if add_legend: ax.legend(handles=legend_elements, loc='best', prop={'size': 8}) - + plt.close(g.fig) - + if filename is None: plt.show() else: plt.savefig(filename) plt.close() - + + def fit_rect_dist(theta_values, alpha): """ Fit an alpha-level rectangular distribution to theta values - + Parameters ---------- theta_values: DataFrame Theta values, columns = variable names alpha: float, optional Confidence interval value - + Returns --------- tuple containing lower bound and upper bound for each variable """ assert isinstance(theta_values, pd.DataFrame) assert isinstance(alpha, (int, float)) - - tval = stats.t.ppf(1-(1-alpha)/2, len(theta_values)-1) # Two-tail + + tval = stats.t.ppf(1 - (1 - alpha) / 2, len(theta_values) - 1) # Two-tail m = theta_values.mean() s = theta_values.std() - lower_bound = m-tval*s - upper_bound = m+tval*s - + lower_bound = m - tval * s + upper_bound = m + tval * s + return lower_bound, upper_bound - + + def fit_mvn_dist(theta_values): """ Fit a multivariate normal distribution to theta values - + Parameters ---------- theta_values: DataFrame Theta values, columns = variable names - + Returns --------- scipy.stats.multivariate_normal distribution """ assert isinstance(theta_values, pd.DataFrame) - + dist = stats.multivariate_normal( - theta_values.mean(), theta_values.cov(), allow_singular=True) + theta_values.mean(), theta_values.cov(), allow_singular=True + ) return dist + def fit_kde_dist(theta_values): """ Fit a Gaussian kernel-density distribution to theta values - + Parameters ---------- theta_values: DataFrame Theta values, columns = variable names - + Returns --------- scipy.stats.gaussian_kde distribution @@ -467,33 +549,35 @@ def fit_kde_dist(theta_values): assert isinstance(theta_values, pd.DataFrame) dist = stats.gaussian_kde(theta_values.transpose().values) - + return dist + def _get_grouped_data(data1, data2, normalize, group_names): if normalize: data_median = data1.median() data_std = data1.std() - data1 = (data1 - data_median)/data_std - data2 = (data2 - data_median)/data_std - + data1 = (data1 - data_median) / data_std + data2 = (data2 - data_median) / data_std + # Combine data1 and data2 to create a grouped histogram - data = pd.concat({group_names[0]: data1, - group_names[1]: data2}) + data = pd.concat({group_names[0]: data1, group_names[1]: data2}) data.reset_index(level=0, inplace=True) data.rename(columns={'level_0': 'set'}, inplace=True) - + data = data.melt(id_vars='set', value_vars=data1.columns, var_name='columns') - + return data -def grouped_boxplot(data1, data2, normalize=False, group_names=['data1', 'data2'], - filename=None): + +def grouped_boxplot( + data1, data2, normalize=False, group_names=['data1', 'data2'], filename=None +): """ Plot a grouped boxplot to compare two datasets - + The datasets can be normalized by the median and standard deviation of data1. - + Parameters ---------- data1: DataFrame @@ -512,30 +596,31 @@ def grouped_boxplot(data1, data2, normalize=False, group_names=['data1', 'data2' assert isinstance(normalize, bool) assert isinstance(group_names, list) assert isinstance(filename, (type(None), str)) - + data = _get_grouped_data(data1, data2, normalize, group_names) - + plt.figure() - sns.boxplot(data=data, hue='set', y='value', x='columns', - order=data1.columns) + sns.boxplot(data=data, hue='set', y='value', x='columns', order=data1.columns) plt.gca().legend().set_title('') plt.gca().set_xlabel('') plt.gca().set_ylabel('') - + if filename is None: plt.show() else: plt.savefig(filename) plt.close() -def grouped_violinplot(data1, data2, normalize=False, group_names=['data1', 'data2'], - filename=None): + +def grouped_violinplot( + data1, data2, normalize=False, group_names=['data1', 'data2'], filename=None +): """ Plot a grouped violinplot to compare two datasets - + The datasets can be normalized by the median and standard deviation of data1. - + Parameters ---------- data1: DataFrame @@ -554,17 +639,18 @@ def grouped_violinplot(data1, data2, normalize=False, group_names=['data1', 'dat assert isinstance(normalize, bool) assert isinstance(group_names, list) assert isinstance(filename, (type(None), str)) - + data = _get_grouped_data(data1, data2, normalize, group_names) - + plt.figure() - sns.violinplot(data=data, hue='set', y='value', x='columns', - order=data1.columns, split=True) - + sns.violinplot( + data=data, hue='set', y='value', x='columns', order=data1.columns, split=True + ) + plt.gca().legend().set_title('') plt.gca().set_xlabel('') plt.gca().set_ylabel('') - + if filename is None: plt.show() else: diff --git a/pyomo/contrib/parmest/ipopt_solver_wrapper.py b/pyomo/contrib/parmest/ipopt_solver_wrapper.py index fc321507b3b..a6d5e0506fb 100644 --- a/pyomo/contrib/parmest/ipopt_solver_wrapper.py +++ b/pyomo/contrib/parmest/ipopt_solver_wrapper.py @@ -10,5 +10,5 @@ # ___________________________________________________________________________ from pyomo.common.deprecation import relocated_module -relocated_module('pyomo.contrib.parmest.utils.ipopt_solver_wrapper', - version='6.4.2') + +relocated_module('pyomo.contrib.parmest.utils.ipopt_solver_wrapper', version='6.4.2') diff --git a/pyomo/contrib/parmest/parmest.py b/pyomo/contrib/parmest/parmest.py index 5a355855ece..cbdc9179f35 100644 --- a/pyomo/contrib/parmest/parmest.py +++ b/pyomo/contrib/parmest/parmest.py @@ -12,12 +12,21 @@ #### Adding option for "local" EF starting Sept 2020 #### Wrapping mpi-sppy functionality and local option Jan 2021, Feb 2021 +# TODO: move use_mpisppy to a Pyomo configuration option +# # False implies always use the EF that is local to parmest use_mpisppy = True # Use it if we can but use local if not. if use_mpisppy: try: - import mpisppy.utils.sputils as sputils - except: + # MPI-SPPY has an unfortunate side effect of outputting + # "[ 0.00] Initializing mpi-sppy" when it is imported. This can + # cause things like doctests to fail. We will suppress that + # information here. + from pyomo.common.tee import capture_output + + with capture_output(): + import mpisppy.utils.sputils as sputils + except ImportError: use_mpisppy = False # we can't use it if use_mpisppy: # These things should be outside the try block. @@ -37,9 +46,12 @@ from pyomo.common.dependencies import ( attempt_import, - numpy as np, numpy_available, - pandas as pd, pandas_available, - scipy, scipy_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, + scipy, + scipy_available, ) import pyomo.environ as pyo @@ -49,14 +61,17 @@ import pyomo.contrib.parmest.utils as utils import pyomo.contrib.parmest.graphics as graphics +from pyomo.dae import ContinuousSet parmest_available = numpy_available & pandas_available & scipy_available inverse_reduced_hessian, inverse_reduced_hessian_available = attempt_import( - 'pyomo.contrib.interior_point.inverse_reduced_hessian') + 'pyomo.contrib.interior_point.inverse_reduced_hessian' +) logger = logging.getLogger(__name__) + def ef_nonants(ef): # Wrapper to call someone's ef_nonants # (the function being called is very short, but it might be changed) @@ -66,7 +81,9 @@ def ef_nonants(ef): return local_ef.ef_nonants(ef) -def _experiment_instance_creation_callback(scenario_name, node_names=None, cb_data=None): +def _experiment_instance_creation_callback( + scenario_name, node_names=None, cb_data=None +): """ This is going to be called by mpi-sppy or the local EF and it will call into the user's model's callback. @@ -75,13 +92,13 @@ def _experiment_instance_creation_callback(scenario_name, node_names=None, cb_da ----------- scenario_name: `str` Scenario name should end with a number node_names: `None` ( Not used here ) - cb_data : dict with ["callback"], ["BootList"], + cb_data : dict with ["callback"], ["BootList"], ["theta_names"], ["cb_data"], etc. "cb_data" is passed through to user's callback function that is the "callback" value. "BootList" is None or bootstrap experiment number list. (called cb_data by mpisppy) - + Returns: -------- @@ -92,22 +109,23 @@ def _experiment_instance_creation_callback(scenario_name, node_names=None, cb_da ---- There is flexibility both in how the function is passed and its signature. """ - assert(cb_data is not None) + assert cb_data is not None outer_cb_data = cb_data scen_num_str = re.compile(r'(\d+)$').search(scenario_name).group(1) scen_num = int(scen_num_str) - basename = scenario_name[:-len(scen_num_str)] # to reconstruct name + basename = scenario_name[: -len(scen_num_str)] # to reconstruct name CallbackFunction = outer_cb_data["callback"] - + if callable(CallbackFunction): callback = CallbackFunction else: cb_name = CallbackFunction if "CallbackModule" not in outer_cb_data: - raise RuntimeError(\ - "Internal Error: need CallbackModule in parmest callback") + raise RuntimeError( + "Internal Error: need CallbackModule in parmest callback" + ) else: modname = outer_cb_data["CallbackModule"] @@ -122,12 +140,12 @@ def _experiment_instance_creation_callback(scenario_name, node_names=None, cb_da try: callback = getattr(cb_module, cb_name) except: - print("Error getting function="+cb_name+" from module="+str(modname)) + print("Error getting function=" + cb_name + " from module=" + str(modname)) raise if "BootList" in outer_cb_data: bootlist = outer_cb_data["BootList"] - #print("debug in callback: using bootlist=",str(bootlist)) + # print("debug in callback: using bootlist=",str(bootlist)) # assuming bootlist itself is zero based exp_num = bootlist[scen_num] else: @@ -135,14 +153,16 @@ def _experiment_instance_creation_callback(scenario_name, node_names=None, cb_da scen_name = basename + str(exp_num) - cb_data = outer_cb_data["cb_data"] # cb_data might be None. + cb_data = outer_cb_data["cb_data"] # cb_data might be None. # at least three signatures are supported. The first is preferred try: - instance = callback(experiment_number = exp_num, cb_data = cb_data) + instance = callback(experiment_number=exp_num, cb_data=cb_data) except TypeError: - raise RuntimeError("Only one callback signature is supported: " - "callback(experiment_number, cb_data) ") + raise RuntimeError( + "Only one callback signature is supported: " + "callback(experiment_number, cb_data) " + ) """ try: instance = callback(scenario_tree_model, scen_name, node_names) @@ -157,17 +177,33 @@ def _experiment_instance_creation_callback(scenario_name, node_names=None, cb_da raise """ if hasattr(instance, "_mpisppy_node_list"): - raise RuntimeError (f"scenario for experiment {exp_num} has _mpisppy_node_list") - nonant_list = [instance.find_component(vstr) for vstr in\ - outer_cb_data["theta_names"]] - instance._mpisppy_node_list = [scenario_tree.ScenarioNode( - name="ROOT", - cond_prob=1.0, - stage=1, - cost_expression=instance.FirstStageCost, - scen_name_list=None, # Deprecated? - nonant_list=nonant_list, - scen_model=instance)] + raise RuntimeError(f"scenario for experiment {exp_num} has _mpisppy_node_list") + nonant_list = [ + instance.find_component(vstr) for vstr in outer_cb_data["theta_names"] + ] + if use_mpisppy: + instance._mpisppy_node_list = [ + scenario_tree.ScenarioNode( + name="ROOT", + cond_prob=1.0, + stage=1, + cost_expression=instance.FirstStageCost, + nonant_list=nonant_list, + scen_model=instance, + ) + ] + else: + instance._mpisppy_node_list = [ + scenario_tree.ScenarioNode( + name="ROOT", + cond_prob=1.0, + stage=1, + cost_expression=instance.FirstStageCost, + scen_name_list=None, + nonant_list=nonant_list, + scen_model=instance, + ) + ] if "ThetaVals" in outer_cb_data: thetavals = outer_cb_data["ThetaVals"] @@ -177,21 +213,22 @@ def _experiment_instance_creation_callback(scenario_name, node_names=None, cb_da theta_cuid = ComponentUID(vstr) theta_object = theta_cuid.find_component_on(instance) if thetavals[vstr] is not None: - #print("Fixing",vstr,"at",str(thetavals[vstr])) + # print("Fixing",vstr,"at",str(thetavals[vstr])) theta_object.fix(thetavals[vstr]) else: - #print("Freeing",vstr) + # print("Freeing",vstr) theta_object.unfix() return instance -#============================================= + +# ============================================= def _treemaker(scenlist): """ Makes a scenario tree (avoids dependence on daps) - + Parameters - ---------- + ---------- scenlist (list of `int`): experiment (i.e. scenario) numbers Returns @@ -200,14 +237,14 @@ def _treemaker(scenlist): """ num_scenarios = len(scenlist) - m = scenariotree.tree_structure_model.CreateAbstractScenarioTreeModel() + m = scenario_tree.tree_structure_model.CreateAbstractScenarioTreeModel() m = m.create_instance() m.Stages.add('Stage1') m.Stages.add('Stage2') m.Nodes.add('RootNode') for i in scenlist: - m.Nodes.add('LeafNode_Experiment'+str(i)) - m.Scenarios.add('Experiment'+str(i)) + m.Nodes.add('LeafNode_Experiment' + str(i)) + m.Scenarios.add('Experiment' + str(i)) m.NodeStage['RootNode'] = 'Stage1' m.ConditionalProbability['RootNode'] = 1.0 for node in m.Nodes: @@ -215,16 +252,16 @@ def _treemaker(scenlist): m.NodeStage[node] = 'Stage2' m.Children['RootNode'].add(node) m.Children[node].clear() - m.ConditionalProbability[node] = 1.0/num_scenarios - m.ScenarioLeafNode[node.replace('LeafNode_','')] = node + m.ConditionalProbability[node] = 1.0 / num_scenarios + m.ScenarioLeafNode[node.replace('LeafNode_', '')] = node return m - + def group_data(data, groupby_column_name, use_mean=None): """ Group data by scenario - + Parameters ---------- data: DataFrame @@ -232,9 +269,9 @@ def group_data(data, groupby_column_name, use_mean=None): groupby_column_name: strings Name of data column which contains scenario numbers use_mean: list of column names or None, optional - Name of data columns which should be reduced to a single value per + Name of data columns which should be reduced to a single value per scenario by taking the mean - + Returns ---------- grouped_data: list of dictionaries @@ -244,7 +281,7 @@ def group_data(data, groupby_column_name, use_mean=None): use_mean_list = [] else: use_mean_list = use_mean - + grouped_data = [] for exp_num, group in data.groupby(data[groupby_column_name]): d = {} @@ -262,9 +299,11 @@ class _SecondStageCostExpr(object): """ Class to pass objective expression into the Pyomo model """ + def __init__(self, ssc_function, data): self._ssc_function = ssc_function self._data = data + def __call__(self, model): return self._ssc_function(model, self._data) @@ -276,17 +315,17 @@ class Estimator(object): Parameters ---------- model_function: function - Function that generates an instance of the Pyomo model using 'data' + Function that generates an instance of the Pyomo model using 'data' as the input argument data: pd.DataFrame, list of dictionaries, list of dataframes, or list of json file names - Data that is used to build an instance of the Pyomo model and build + Data that is used to build an instance of the Pyomo model and build the objective function theta_names: list of strings List of Var names to estimate obj_function: function, optional Function used to formulate parameter estimation objective, generally - sum of squared error between measurements and model variables. - If no function is specified, the model is used + sum of squared error between measurements and model variables. + If no function is specified, the model is used "as is" and should be defined with a "FirstStageCost" and "SecondStageCost" expression that are used to build an objective. tee: bool, optional @@ -296,69 +335,110 @@ class Estimator(object): solver_options: dict, optional Provides options to the solver (also the name of an attribute) """ - def __init__(self, model_function, data, theta_names, obj_function=None, - tee=False, diagnostic_mode=False, solver_options=None): - + + def __init__( + self, + model_function, + data, + theta_names, + obj_function=None, + tee=False, + diagnostic_mode=False, + solver_options=None, + ): self.model_function = model_function - - assert isinstance(data, (list, pd.DataFrame)), "Data must be a list or DataFrame" + + assert isinstance( + data, (list, pd.DataFrame) + ), "Data must be a list or DataFrame" # convert dataframe into a list of dataframes, each row = one scenario if isinstance(data, pd.DataFrame): - self.callback_data = [data.loc[i,:].to_frame().transpose() for i in data.index] + self.callback_data = [ + data.loc[i, :].to_frame().transpose() for i in data.index + ] else: self.callback_data = data - assert isinstance(self.callback_data[0], (dict, pd.DataFrame, str)), "The scenarios in data must be a dictionary, DataFrame or filename" - + assert isinstance( + self.callback_data[0], (dict, pd.DataFrame, str) + ), "The scenarios in data must be a dictionary, DataFrame or filename" + if len(theta_names) == 0: self.theta_names = ['parmest_dummy_var'] else: - self.theta_names = theta_names - - self.obj_function = obj_function + self.theta_names = theta_names + + self.obj_function = obj_function self.tee = tee self.diagnostic_mode = diagnostic_mode self.solver_options = solver_options - + self._second_stage_cost_exp = "SecondStageCost" + # boolean to indicate if model is initialized using a square solve + self.model_initialized = False + def _return_theta_names(self): + """ + Return list of fitted model parameter names + """ + # if fitted model parameter names differ from theta_names created when Estimator object is created + if hasattr(self, 'theta_names_updated'): + return self.theta_names_updated + + else: + return ( + self.theta_names + ) # default theta_names, created when Estimator object is created def _create_parmest_model(self, data): """ Modify the Pyomo model for parameter estimation """ model = self.model_function(data) - - if (len(self.theta_names) == 1) and (self.theta_names[0] == 'parmest_dummy_var'): - model.parmest_dummy_var = pyo.Var(initialize = 1.0) - + + if (len(self.theta_names) == 1) and ( + self.theta_names[0] == 'parmest_dummy_var' + ): + model.parmest_dummy_var = pyo.Var(initialize=1.0) + # Add objective function (optional) if self.obj_function: for obj in model.component_objects(pyo.Objective): if obj.name in ["Total_Cost_Objective"]: - raise RuntimeError("Parmest will not override the existing model Objective named "+ obj.name) + raise RuntimeError( + "Parmest will not override the existing model Objective named " + + obj.name + ) obj.deactivate() - + for expr in model.component_data_objects(pyo.Expression): if expr.name in ["FirstStageCost", "SecondStageCost"]: - raise RuntimeError("Parmest will not override the existing model Expression named "+ expr.name) + raise RuntimeError( + "Parmest will not override the existing model Expression named " + + expr.name + ) model.FirstStageCost = pyo.Expression(expr=0) - model.SecondStageCost = pyo.Expression(rule=_SecondStageCostExpr(self.obj_function, data)) - + model.SecondStageCost = pyo.Expression( + rule=_SecondStageCostExpr(self.obj_function, data) + ) + def TotalCost_rule(model): return model.FirstStageCost + model.SecondStageCost - model.Total_Cost_Objective = pyo.Objective(rule=TotalCost_rule, sense=pyo.minimize) - + + model.Total_Cost_Objective = pyo.Objective( + rule=TotalCost_rule, sense=pyo.minimize + ) + # Convert theta Params to Vars, and unfix theta Vars model = utils.convert_params_to_vars(model, self.theta_names) - + # Update theta names list to use CUID string representation for i, theta in enumerate(self.theta_names): var_cuid = ComponentUID(theta) var_validate = var_cuid.find_component_on(model) if var_validate is None: logger.warning( - "theta_name[%s] (%s) was not found on the model", - (i, theta)) + "theta_name[%s] (%s) was not found on the model", (i, theta) + ) else: try: # If the component is not a variable, @@ -370,37 +450,41 @@ def TotalCost_rule(model): logger.warning(theta + ' is not a variable') self.parmest_model = model - + return model - - + def _instance_creation_callback(self, experiment_number=None, cb_data=None): - # cb_data is a list of dictionaries, list of dataframes, OR list of json file names exp_data = cb_data[experiment_number] if isinstance(exp_data, (dict, pd.DataFrame)): pass elif isinstance(exp_data, str): try: - with open(exp_data,'r') as infile: + with open(exp_data, 'r') as infile: exp_data = json.load(infile) except: raise RuntimeError(f'Could not read {exp_data} as json') else: raise RuntimeError(f'Unexpected data format for cb_data={cb_data}') model = self._create_parmest_model(exp_data) - + return model - - def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", - return_values=[], bootlist=None, calc_cov=False, cov_n=None): + def _Q_opt( + self, + ThetaVals=None, + solver="ef_ipopt", + return_values=[], + bootlist=None, + calc_cov=False, + cov_n=None, + ): """ Set up all thetas as first stage Vars, return resulting theta values as well as the objective function value. """ - if (solver == "k_aug"): + if solver == "k_aug": raise RuntimeError("k_aug no longer supported.") # (Bootstrap scenarios will use indirection through the bootlist) @@ -423,22 +507,25 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", options = {"solver": "ipopt"} scenario_creator_options = {"cb_data": outer_cb_data} if use_mpisppy: - ef = sputils.create_EF(scen_names, - _experiment_instance_creation_callback, - EF_name = "_Q_opt", - suppress_warnings=True, - scenario_creator_kwargs=scenario_creator_options) + ef = sputils.create_EF( + scen_names, + _experiment_instance_creation_callback, + EF_name="_Q_opt", + suppress_warnings=True, + scenario_creator_kwargs=scenario_creator_options, + ) else: - ef = local_ef.create_EF(scen_names, - _experiment_instance_creation_callback, - EF_name = "_Q_opt", - suppress_warnings=True, - scenario_creator_kwargs=scenario_creator_options) + ef = local_ef.create_EF( + scen_names, + _experiment_instance_creation_callback, + EF_name="_Q_opt", + suppress_warnings=True, + scenario_creator_kwargs=scenario_creator_options, + ) self.ef_instance = ef - + # Solve the extensive form with ipopt if solver == "ef_ipopt": - if not calc_cov: # Do not calculate the reduced hessian @@ -447,12 +534,12 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", for key in self.solver_options: solver.options[key] = self.solver_options[key] - solve_result = solver.solve(ef, tee = self.tee) + solve_result = solver.solve(self.ef_instance, tee=self.tee) # The import error will be raised when we attempt to use # inv_reduced_hessian_barrier below. # - #elif not asl_available: + # elif not asl_available: # raise ImportError("parmest requires ASL to calculate the " # "covariance matrix with solver 'ipopt'") else: @@ -461,70 +548,89 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", for ndname, Var, solval in ef_nonants(ef): ind_vars.append(Var) # calculate the reduced hessian - solve_result, inv_red_hes = \ - inverse_reduced_hessian.inv_reduced_hessian_barrier( - self.ef_instance, - independent_variables= ind_vars, - solver_options=self.solver_options, - tee=self.tee) - + ( + solve_result, + inv_red_hes, + ) = inverse_reduced_hessian.inv_reduced_hessian_barrier( + self.ef_instance, + independent_variables=ind_vars, + solver_options=self.solver_options, + tee=self.tee, + ) + if self.diagnostic_mode: - print(' Solver termination condition = ', - str(solve_result.solver.termination_condition)) + print( + ' Solver termination condition = ', + str(solve_result.solver.termination_condition), + ) # assume all first stage are thetas... thetavals = {} for ndname, Var, solval in ef_nonants(ef): # process the name # the scenarios are blocks, so strip the scenario name - vname = Var.name[Var.name.find(".")+1:] + vname = Var.name[Var.name.find(".") + 1 :] thetavals[vname] = solval objval = pyo.value(ef.EF_Obj) - + if calc_cov: # Calculate the covariance matrix - - # Number of data points considered + + # Number of data points considered n = cov_n - + # Extract number of fitted parameters l = len(thetavals) - + # Assumption: Objective value is sum of squared errors sse = objval - + '''Calculate covariance assuming experimental observation errors are - independent and follow a Gaussian + independent and follow a Gaussian distribution with constant variance. - + The formula used in parmest was verified against equations (7-5-15) and (7-5-16) in "Nonlinear Parameter Estimation", Y. Bard, 1974. - + This formula is also applicable if the objective is scaled by a constant; the constant cancels out. (was scaled by 1/n because it computes an expected value.) ''' cov = 2 * sse / (n - l) * inv_red_hes - cov = pd.DataFrame(cov, index=thetavals.keys(), columns=thetavals.keys()) - + cov = pd.DataFrame( + cov, index=thetavals.keys(), columns=thetavals.keys() + ) + thetavals = pd.Series(thetavals) - + if len(return_values) > 0: var_values = [] - for exp_i in self.ef_instance.component_objects(Block, descend_into=False): + if len(scen_names) > 1: # multiple scenarios + block_objects = self.ef_instance.component_objects( + Block, descend_into=False + ) + else: # single scenario + block_objects = [self.ef_instance] + for exp_i in block_objects: vals = {} for var in return_values: exp_i_var = exp_i.find_component(str(var)) - if exp_i_var is None: # we might have a block such as _mpisppy_data + if ( + exp_i_var is None + ): # we might have a block such as _mpisppy_data continue - temp = [pyo.value(_) for _ in exp_i_var.values()] + # if value to return is ContinuousSet + if type(exp_i_var) == ContinuousSet: + temp = list(exp_i_var) + else: + temp = [pyo.value(_) for _ in exp_i_var.values()] if len(temp) == 1: vals[var] = temp[0] else: vals[var] = temp if len(vals) > 0: - var_values.append(vals) + var_values.append(vals) var_values = pd.DataFrame(var_values) if calc_cov: return objval, thetavals, var_values, cov @@ -532,24 +638,26 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", return objval, thetavals, var_values if calc_cov: - return objval, thetavals, cov else: return objval, thetavals - + else: - raise RuntimeError("Unknown solver in Q_Opt="+solver) - + raise RuntimeError("Unknown solver in Q_Opt=" + solver) - def _Q_at_theta(self, thetavals): + def _Q_at_theta(self, thetavals, initialize_parmest_model=False): """ Return the objective function value with fixed theta values. - + Parameters ---------- thetavals: dict A dictionary of theta values. + initialize_parmest_model: boolean + If True: Solve square problem instance, build extensive form of the model for + parameter estimation, and set flag model_initialized to True + Returns ------- objectiveval: float @@ -558,99 +666,220 @@ def _Q_at_theta(self, thetavals): A dictionary of all values for theta that were input. solvertermination: Pyomo TerminationCondition Tries to return the "worst" solver status across the scenarios. - pyo.TerminationCondition.optimal is the best and + pyo.TerminationCondition.optimal is the best and pyo.TerminationCondition.infeasible is the worst. """ - dummy_cb = {"callback": self._instance_creation_callback, - "ThetaVals": thetavals, - "theta_names": self.theta_names, - "cb_data": self.callback_data} - optimizer = pyo.SolverFactory('ipopt') - + + if len(thetavals) > 0: + dummy_cb = { + "callback": self._instance_creation_callback, + "ThetaVals": thetavals, + "theta_names": self._return_theta_names(), + "cb_data": self.callback_data, + } + else: + dummy_cb = { + "callback": self._instance_creation_callback, + "theta_names": self._return_theta_names(), + "cb_data": self.callback_data, + } + if self.diagnostic_mode: - print(' Compute objective at theta = ',str(thetavals)) + if len(thetavals) > 0: + print(' Compute objective at theta = ', str(thetavals)) + else: + print(' Compute objective at initial theta') # start block of code to deal with models with no constraints # (ipopt will crash or complain on such problems without special care) instance = _experiment_instance_creation_callback("FOO0", None, dummy_cb) - try: # deal with special problems so Ipopt will not crash + try: # deal with special problems so Ipopt will not crash first = next(instance.component_objects(pyo.Constraint, active=True)) active_constraints = True except: - active_constraints = False + active_constraints = False # end block of code to deal with models with no constraints WorstStatus = pyo.TerminationCondition.optimal totobj = 0 - senario_numbers = list(range(len(self.callback_data))) - for snum in senario_numbers: - sname = "scenario_NODE"+str(snum) + scenario_numbers = list(range(len(self.callback_data))) + if initialize_parmest_model: + # create dictionary to store pyomo model instances (scenarios) + scen_dict = dict() + + for snum in scenario_numbers: + sname = "scenario_NODE" + str(snum) instance = _experiment_instance_creation_callback(sname, None, dummy_cb) + + if initialize_parmest_model: + # list to store fitted parameter names that will be unfixed + # after initialization + theta_init_vals = [] + # use appropriate theta_names member + theta_ref = self._return_theta_names() + + for i, theta in enumerate(theta_ref): + # Use parser in ComponentUID to locate the component + var_cuid = ComponentUID(theta) + var_validate = var_cuid.find_component_on(instance) + if var_validate is None: + logger.warning( + "theta_name %s was not found on the model", (theta) + ) + else: + try: + if len(thetavals) == 0: + var_validate.fix() + else: + var_validate.fix(thetavals[theta]) + theta_init_vals.append(var_validate) + except: + logger.warning( + 'Unable to fix model parameter value for %s (not a Pyomo model Var)', + (theta), + ) + if active_constraints: if self.diagnostic_mode: - print(' Experiment = ',snum) - print(' First solve with with special diagnostics wrapper') - status_obj, solved, iters, time, regu \ - = utils.ipopt_solve_with_stats(instance, optimizer, max_iter=500, max_cpu_time=120) - print(" status_obj, solved, iters, time, regularization_stat = ", - str(status_obj), str(solved), str(iters), str(time), str(regu)) + print(' Experiment = ', snum) + print(' First solve with special diagnostics wrapper') + ( + status_obj, + solved, + iters, + time, + regu, + ) = utils.ipopt_solve_with_stats( + instance, optimizer, max_iter=500, max_cpu_time=120 + ) + print( + " status_obj, solved, iters, time, regularization_stat = ", + str(status_obj), + str(solved), + str(iters), + str(time), + str(regu), + ) results = optimizer.solve(instance) if self.diagnostic_mode: - print('standard solve solver termination condition=', - str(results.solver.termination_condition)) - - if results.solver.termination_condition \ - != pyo.TerminationCondition.optimal : + print( + 'standard solve solver termination condition=', + str(results.solver.termination_condition), + ) + + if ( + results.solver.termination_condition + != pyo.TerminationCondition.optimal + ): # DLW: Aug2018: not distinguishing "middlish" conditions if WorstStatus != pyo.TerminationCondition.infeasible: WorstStatus = results.solver.termination_condition - + if initialize_parmest_model: + if self.diagnostic_mode: + print( + "Scenario {:d} infeasible with initialized parameter values".format( + snum + ) + ) + else: + if initialize_parmest_model: + if self.diagnostic_mode: + print( + "Scenario {:d} initialization successful with initial parameter values".format( + snum + ) + ) + if initialize_parmest_model: + # unfix parameters after initialization + for theta in theta_init_vals: + theta.unfix() + scen_dict[sname] = instance + else: + if initialize_parmest_model: + # unfix parameters after initialization + for theta in theta_init_vals: + theta.unfix() + scen_dict[sname] = instance + objobject = getattr(instance, self._second_stage_cost_exp) objval = pyo.value(objobject) totobj += objval - - retval = totobj / len(senario_numbers) # -1?? + + retval = totobj / len(scenario_numbers) # -1?? + if initialize_parmest_model and not hasattr(self, 'ef_instance'): + # create extensive form of the model using scenario dictionary + if len(scen_dict) > 0: + for scen in scen_dict.values(): + scen._mpisppy_probability = 1 / len(scen_dict) + + if use_mpisppy: + EF_instance = sputils._create_EF_from_scen_dict( + scen_dict, + EF_name="_Q_at_theta", + # suppress_warnings=True + ) + else: + EF_instance = local_ef._create_EF_from_scen_dict( + scen_dict, EF_name="_Q_at_theta", nonant_for_fixed_vars=True + ) + + self.ef_instance = EF_instance + # set self.model_initialized flag to True to skip extensive form model + # creation using theta_est() + self.model_initialized = True + + # return initialized theta values + if len(thetavals) == 0: + # use appropriate theta_names member + theta_ref = self._return_theta_names() + for i, theta in enumerate(theta_ref): + thetavals[theta] = theta_init_vals[i]() return retval, thetavals, WorstStatus def _get_sample_list(self, samplesize, num_samples, replacement=True): - samplelist = list() - - senario_numbers = list(range(len(self.callback_data))) - + + scenario_numbers = list(range(len(self.callback_data))) + if num_samples is None: # This could get very large - for i, l in enumerate(combinations(senario_numbers, samplesize)): + for i, l in enumerate(combinations(scenario_numbers, samplesize)): samplelist.append((i, np.sort(l))) else: for i in range(num_samples): attempts = 0 - unique_samples = 0 # check for duplicates in each sample - duplicate = False # check for duplicates between samples - while (unique_samples <= len(self.theta_names)) and (not duplicate): - sample = np.random.choice(senario_numbers, - samplesize, - replace=replacement) + unique_samples = 0 # check for duplicates in each sample + duplicate = False # check for duplicates between samples + while (unique_samples <= len(self._return_theta_names())) and ( + not duplicate + ): + sample = np.random.choice( + scenario_numbers, samplesize, replace=replacement + ) sample = np.sort(sample).tolist() unique_samples = len(np.unique(sample)) if sample in samplelist: duplicate = True - + attempts += 1 - if attempts > num_samples: # arbitrary timeout limit - raise RuntimeError("""Internal error: timeout constructing - a sample, the dim of theta may be too - close to the samplesize""") - + if attempts > num_samples: # arbitrary timeout limit + raise RuntimeError( + """Internal error: timeout constructing + a sample, the dim of theta may be too + close to the samplesize""" + ) + samplelist.append((i, sample)) - + return samplelist - - def theta_est(self, solver="ef_ipopt", return_values=[], calc_cov=False, cov_n=None): + + def theta_est( + self, solver="ef_ipopt", return_values=[], calc_cov=False, cov_n=None + ): """ Parameter estimation using all scenarios in the data @@ -663,9 +892,9 @@ def theta_est(self, solver="ef_ipopt", return_values=[], calc_cov=False, cov_n=N calc_cov: boolean, optional If True, calculate and return the covariance matrix (only for "ef_ipopt" solver) cov_n: int, optional - If calc_cov=True, then the user needs to supply the number of datapoints + If calc_cov=True, then the user needs to supply the number of datapoints that are used in the objective function - + Returns ------- objectiveval: float @@ -681,15 +910,29 @@ def theta_est(self, solver="ef_ipopt", return_values=[], calc_cov=False, cov_n=N assert isinstance(return_values, list) assert isinstance(calc_cov, bool) if calc_cov: - assert isinstance(cov_n, int), "The number of datapoints that are used in the objective function is required to calculate the covariance matrix" - assert cov_n > len(self.theta_names), "The number of datapoints must be greater than the number of parameters to estimate" - - return self._Q_opt(solver=solver, return_values=return_values, - bootlist=None, calc_cov=calc_cov, cov_n=cov_n) - - - def theta_est_bootstrap(self, bootstrap_samples, samplesize=None, - replacement=True, seed=None, return_samples=False): + assert isinstance( + cov_n, int + ), "The number of datapoints that are used in the objective function is required to calculate the covariance matrix" + assert cov_n > len( + self._return_theta_names() + ), "The number of datapoints must be greater than the number of parameters to estimate" + + return self._Q_opt( + solver=solver, + return_values=return_values, + bootlist=None, + calc_cov=calc_cov, + cov_n=cov_n, + ) + + def theta_est_bootstrap( + self, + bootstrap_samples, + samplesize=None, + replacement=True, + seed=None, + return_samples=False, + ): """ Parameter estimation using bootstrap resampling of the data @@ -698,7 +941,7 @@ def theta_est_bootstrap(self, bootstrap_samples, samplesize=None, bootstrap_samples: int Number of bootstrap samples to draw from the data samplesize: int or None, optional - Size of each bootstrap sample. If samplesize=None, samplesize will be + Size of each bootstrap sample. If samplesize=None, samplesize will be set to the number of samples in the data replacement: bool, optional Sample with or without replacement @@ -706,11 +949,11 @@ def theta_est_bootstrap(self, bootstrap_samples, samplesize=None, Random seed return_samples: bool, optional Return a list of sample numbers used in each bootstrap estimation - + Returns ------- - bootstrap_theta: pd.DataFrame - Theta values for each sample and (if return_samples = True) + bootstrap_theta: pd.DataFrame + Theta values for each sample and (if return_samples = True) the sample numbers used in each estimation """ assert isinstance(bootstrap_samples, int) @@ -718,15 +961,14 @@ def theta_est_bootstrap(self, bootstrap_samples, samplesize=None, assert isinstance(replacement, bool) assert isinstance(seed, (type(None), int)) assert isinstance(return_samples, bool) - + if samplesize is None: samplesize = len(self.callback_data) - + if seed is not None: np.random.seed(seed) - - global_list = self._get_sample_list(samplesize, bootstrap_samples, - replacement) + + global_list = self._get_sample_list(samplesize, bootstrap_samples, replacement) task_mgr = utils.ParallelTaskManager(bootstrap_samples) local_list = task_mgr.global_to_local_data(global_list) @@ -736,18 +978,18 @@ def theta_est_bootstrap(self, bootstrap_samples, samplesize=None, objval, thetavals = self._Q_opt(bootlist=list(sample)) thetavals['samples'] = sample bootstrap_theta.append(thetavals) - + global_bootstrap_theta = task_mgr.allgather_global_data(bootstrap_theta) - bootstrap_theta = pd.DataFrame(global_bootstrap_theta) + bootstrap_theta = pd.DataFrame(global_bootstrap_theta) if not return_samples: del bootstrap_theta['samples'] - + return bootstrap_theta - - - def theta_est_leaveNout(self, lNo, lNo_samples=None, seed=None, - return_samples=False): + + def theta_est_leaveNout( + self, lNo, lNo_samples=None, seed=None, return_samples=False + ): """ Parameter estimation where N data points are left out of each sample @@ -756,55 +998,55 @@ def theta_est_leaveNout(self, lNo, lNo_samples=None, seed=None, lNo: int Number of data points to leave out for parameter estimation lNo_samples: int - Number of leave-N-out samples. If lNo_samples=None, the maximum + Number of leave-N-out samples. If lNo_samples=None, the maximum number of combinations will be used seed: int or None, optional Random seed return_samples: bool, optional Return a list of sample numbers that were left out - + Returns ------- - lNo_theta: pd.DataFrame - Theta values for each sample and (if return_samples = True) + lNo_theta: pd.DataFrame + Theta values for each sample and (if return_samples = True) the sample numbers left out of each estimation """ assert isinstance(lNo, int) assert isinstance(lNo_samples, (type(None), int)) assert isinstance(seed, (type(None), int)) assert isinstance(return_samples, bool) - - samplesize = len(self.callback_data)-lNo + + samplesize = len(self.callback_data) - lNo if seed is not None: np.random.seed(seed) - + global_list = self._get_sample_list(samplesize, lNo_samples, replacement=False) - + task_mgr = utils.ParallelTaskManager(len(global_list)) local_list = task_mgr.global_to_local_data(global_list) - + lNo_theta = list() for idx, sample in local_list: objval, thetavals = self._Q_opt(bootlist=list(sample)) lNo_s = list(set(range(len(self.callback_data))) - set(sample)) thetavals['lNo'] = np.sort(lNo_s) lNo_theta.append(thetavals) - + global_bootstrap_theta = task_mgr.allgather_global_data(lNo_theta) - lNo_theta = pd.DataFrame(global_bootstrap_theta) - + lNo_theta = pd.DataFrame(global_bootstrap_theta) + if not return_samples: del lNo_theta['lNo'] - + return lNo_theta - - - def leaveNout_bootstrap_test(self, lNo, lNo_samples, bootstrap_samples, - distribution, alphas, seed=None): + + def leaveNout_bootstrap_test( + self, lNo, lNo_samples, bootstrap_samples, distribution, alphas, seed=None + ): """ - Leave-N-out bootstrap test to compare theta values where N data points are - left out to a bootstrap analysis using the remaining data, + Leave-N-out bootstrap test to compare theta values where N data points are + left out to a bootstrap analysis using the remaining data, results indicate if theta is within a confidence region determined by the bootstrap analysis @@ -813,33 +1055,33 @@ def leaveNout_bootstrap_test(self, lNo, lNo_samples, bootstrap_samples, lNo: int Number of data points to leave out for parameter estimation lNo_samples: int - Leave-N-out sample size. If lNo_samples=None, the maximum number + Leave-N-out sample size. If lNo_samples=None, the maximum number of combinations will be used bootstrap_samples: int: Bootstrap sample size distribution: string - Statistical distribution used to define a confidence region, - options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde, + Statistical distribution used to define a confidence region, + options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde, and 'Rect' for rectangular. alphas: list - List of alpha values used to determine if theta values are inside + List of alpha values used to determine if theta values are inside or outside the region. seed: int or None, optional Random seed - + Returns ---------- List of tuples with one entry per lNo_sample: - - * The first item in each tuple is the list of N samples that are left + + * The first item in each tuple is the list of N samples that are left out. - * The second item in each tuple is a DataFrame of theta estimated using + * The second item in each tuple is a DataFrame of theta estimated using the N samples. - * The third item in each tuple is a DataFrame containing results from + * The third item in each tuple is a DataFrame containing results from the bootstrap analysis using the remaining samples. - - For each DataFrame a column is added for each value of alpha which - indicates if the theta estimate is in (True) or out (False) of the + + For each DataFrame a column is added for each value of alpha which + indicates if the theta estimate is in (True) or out (False) of the alpha region for a given distribution (based on the bootstrap results) """ assert isinstance(lNo, int) @@ -848,40 +1090,41 @@ def leaveNout_bootstrap_test(self, lNo, lNo_samples, bootstrap_samples, assert distribution in ['Rect', 'MVN', 'KDE'] assert isinstance(alphas, list) assert isinstance(seed, (type(None), int)) - + if seed is not None: np.random.seed(seed) - + data = self.callback_data.copy() - + global_list = self._get_sample_list(lNo, lNo_samples, replacement=False) - + results = [] for idx, sample in global_list: - # Reset callback_data to only include the sample self.callback_data = [data[i] for i in sample] obj, theta = self.theta_est() - + # Reset callback_data to include all scenarios except the sample self.callback_data = [data[i] for i in range(len(data)) if i not in sample] bootstrap_theta = self.theta_est_bootstrap(bootstrap_samples) - - training, test = self.confidence_region_test(bootstrap_theta, - distribution=distribution, alphas=alphas, - test_theta_values=theta) - + + training, test = self.confidence_region_test( + bootstrap_theta, + distribution=distribution, + alphas=alphas, + test_theta_values=theta, + ) + results.append((sample, test, training)) - + # Reset callback_data (back to full data set) self.callback_data = data - + return results - - - def objective_at_theta(self, theta_values): + + def objective_at_theta(self, theta_values=None, initialize_parmest_model=False): """ Objective value for each theta @@ -889,46 +1132,116 @@ def objective_at_theta(self, theta_values): ---------- theta_values: pd.DataFrame, columns=theta_names Values of theta used to compute the objective - + + initialize_parmest_model: boolean + If True: Solve square problem instance, build extensive form of the model for + parameter estimation, and set flag model_initialized to True + + Returns ------- obj_at_theta: pd.DataFrame - Objective value for each theta (infeasible solutions are + Objective value for each theta (infeasible solutions are omitted). """ - assert isinstance(theta_values, pd.DataFrame) - - # for parallel code we need to use lists and dicts in the loop - theta_names = theta_values.columns - all_thetas = theta_values.to_dict('records') - task_mgr = utils.ParallelTaskManager(len(all_thetas)) - local_thetas = task_mgr.global_to_local_data(all_thetas) - + if len(self.theta_names) == 1 and self.theta_names[0] == 'parmest_dummy_var': + pass # skip assertion if model has no fitted parameters + else: + # create a local instance of the pyomo model to access model variables and parameters + model_temp = self._create_parmest_model(self.callback_data[0]) + model_theta_list = [] # list to store indexed and non-indexed parameters + # iterate over original theta_names + for theta_i in self.theta_names: + var_cuid = ComponentUID(theta_i) + var_validate = var_cuid.find_component_on(model_temp) + # check if theta in theta_names are indexed + try: + # get component UID of Set over which theta is defined + set_cuid = ComponentUID(var_validate.index_set()) + # access and iterate over the Set to generate theta names as they appear + # in the pyomo model + set_validate = set_cuid.find_component_on(model_temp) + for s in set_validate: + self_theta_temp = repr(var_cuid) + "[" + repr(s) + "]" + # generate list of theta names + model_theta_list.append(self_theta_temp) + # if theta is not indexed, copy theta name to list as-is + except AttributeError: + self_theta_temp = repr(var_cuid) + model_theta_list.append(self_theta_temp) + except: + raise + # if self.theta_names is not the same as temp model_theta_list, + # create self.theta_names_updated + if set(self.theta_names) == set(model_theta_list) and len( + self.theta_names + ) == set(model_theta_list): + pass + else: + self.theta_names_updated = model_theta_list + + if theta_values is None: + all_thetas = {} # dictionary to store fitted variables + # use appropriate theta names member + theta_names = self._return_theta_names() + else: + assert isinstance(theta_values, pd.DataFrame) + # for parallel code we need to use lists and dicts in the loop + theta_names = theta_values.columns + # # check if theta_names are in model + for theta in list(theta_names): + theta_temp = theta.replace("'", "") # cleaning quotes from theta_names + + assert theta_temp in [ + t.replace("'", "") for t in model_theta_list + ], "Theta name {} in 'theta_values' not in 'theta_names' {}".format( + theta_temp, model_theta_list + ) + assert len(list(theta_names)) == len(model_theta_list) + + all_thetas = theta_values.to_dict('records') + + if all_thetas: + task_mgr = utils.ParallelTaskManager(len(all_thetas)) + local_thetas = task_mgr.global_to_local_data(all_thetas) + else: + if initialize_parmest_model: + task_mgr = utils.ParallelTaskManager( + 1 + ) # initialization performed using just 1 set of theta values # walk over the mesh, return objective function all_obj = list() - for Theta in local_thetas: - obj, thetvals, worststatus = self._Q_at_theta(Theta) + if len(all_thetas) > 0: + for Theta in local_thetas: + obj, thetvals, worststatus = self._Q_at_theta( + Theta, initialize_parmest_model=initialize_parmest_model + ) + if worststatus != pyo.TerminationCondition.infeasible: + all_obj.append(list(Theta.values()) + [obj]) + # DLW, Aug2018: should we also store the worst solver status? + else: + obj, thetvals, worststatus = self._Q_at_theta( + thetavals={}, initialize_parmest_model=initialize_parmest_model + ) if worststatus != pyo.TerminationCondition.infeasible: - all_obj.append(list(Theta.values()) + [obj]) - # DLW, Aug2018: should we also store the worst solver status? - + all_obj.append(list(thetvals.values()) + [obj]) + global_all_obj = task_mgr.allgather_global_data(all_obj) dfcols = list(theta_names) + ['obj'] obj_at_theta = pd.DataFrame(data=global_all_obj, columns=dfcols) - return obj_at_theta - - - def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas, - return_thresholds=False): + + def likelihood_ratio_test( + self, obj_at_theta, obj_value, alphas, return_thresholds=False + ): r""" - Likelihood ratio test to identify theta values within a confidence + Likelihood ratio test to identify theta values within a confidence region using the :math:`\chi^2` distribution - + Parameters ---------- obj_at_theta: pd.DataFrame, columns = theta_names + 'obj' - Objective values for each theta value (returned by + Objective values for each theta value (returned by objective_at_theta) obj_value: int or float Objective value from parameter estimation using all data @@ -936,11 +1249,11 @@ def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas, List of alpha values to use in the chi2 test return_thresholds: bool, optional Return the threshold value for each alpha - + Returns ------- - LR: pd.DataFrame - Objective values for each theta value along with True or False for + LR: pd.DataFrame + Objective values for each theta value along with True or False for each alpha thresholds: pd.Series If return_threshold = True, the thresholds are also returned. @@ -949,7 +1262,7 @@ def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas, assert isinstance(obj_value, (int, float)) assert isinstance(alphas, list) assert isinstance(return_thresholds, bool) - + LR = obj_at_theta.copy() S = len(self.callback_data) thresholds = {} @@ -957,92 +1270,96 @@ def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas, chi2_val = scipy.stats.chi2.ppf(a, 2) thresholds[a] = obj_value * ((chi2_val / (S - 2)) + 1) LR[a] = LR['obj'] < thresholds[a] - + thresholds = pd.Series(thresholds) - + if return_thresholds: return LR, thresholds else: return LR - def confidence_region_test(self, theta_values, distribution, alphas, - test_theta_values=None): + def confidence_region_test( + self, theta_values, distribution, alphas, test_theta_values=None + ): """ - Confidence region test to determine if theta values are within a - rectangular, multivariate normal, or Gaussian kernel density distribution + Confidence region test to determine if theta values are within a + rectangular, multivariate normal, or Gaussian kernel density distribution for a range of alpha values - + Parameters ---------- theta_values: pd.DataFrame, columns = theta_names - Theta values used to generate a confidence region + Theta values used to generate a confidence region (generally returned by theta_est_bootstrap) distribution: string - Statistical distribution used to define a confidence region, - options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde, + Statistical distribution used to define a confidence region, + options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde, and 'Rect' for rectangular. alphas: list - List of alpha values used to determine if theta values are inside + List of alpha values used to determine if theta values are inside or outside the region. test_theta_values: pd.Series or pd.DataFrame, keys/columns = theta_names, optional Additional theta values that are compared to the confidence region to determine if they are inside or outside. - + Returns - training_results: pd.DataFrame - Theta value used to generate the confidence region along with True + training_results: pd.DataFrame + Theta value used to generate the confidence region along with True (inside) or False (outside) for each alpha - test_results: pd.DataFrame - If test_theta_values is not None, returns test theta value along + test_results: pd.DataFrame + If test_theta_values is not None, returns test theta value along with True (inside) or False (outside) for each alpha """ assert isinstance(theta_values, pd.DataFrame) assert distribution in ['Rect', 'MVN', 'KDE'] assert isinstance(alphas, list) - assert isinstance(test_theta_values, (type(None), dict, pd.Series, pd.DataFrame)) - + assert isinstance( + test_theta_values, (type(None), dict, pd.Series, pd.DataFrame) + ) + if isinstance(test_theta_values, (dict, pd.Series)): test_theta_values = pd.Series(test_theta_values).to_frame().transpose() - + training_results = theta_values.copy() - + if test_theta_values is not None: test_result = test_theta_values.copy() - + for a in alphas: - if distribution == 'Rect': lb, ub = graphics.fit_rect_dist(theta_values, a) - training_results[a] = ((theta_values > lb).all(axis=1) & \ - (theta_values < ub).all(axis=1)) - + training_results[a] = (theta_values > lb).all(axis=1) & ( + theta_values < ub + ).all(axis=1) + if test_theta_values is not None: # use upper and lower bound from the training set - test_result[a] = ((test_theta_values > lb).all(axis=1) & \ - (test_theta_values < ub).all(axis=1)) - + test_result[a] = (test_theta_values > lb).all(axis=1) & ( + test_theta_values < ub + ).all(axis=1) + elif distribution == 'MVN': dist = graphics.fit_mvn_dist(theta_values) Z = dist.pdf(theta_values) - score = scipy.stats.scoreatpercentile(Z, (1-a)*100) - training_results[a] = (Z >= score) - + score = scipy.stats.scoreatpercentile(Z, (1 - a) * 100) + training_results[a] = Z >= score + if test_theta_values is not None: # use score from the training set Z = dist.pdf(test_theta_values) - test_result[a] = (Z >= score) - + test_result[a] = Z >= score + elif distribution == 'KDE': dist = graphics.fit_kde_dist(theta_values) Z = dist.pdf(theta_values.transpose()) - score = scipy.stats.scoreatpercentile(Z, (1-a)*100) - training_results[a] = (Z >= score) - + score = scipy.stats.scoreatpercentile(Z, (1 - a) * 100) + training_results[a] = Z >= score + if test_theta_values is not None: # use score from the training set Z = dist.pdf(test_theta_values.transpose()) - test_result[a] = (Z >= score) - + test_result[a] = Z >= score + if test_theta_values is not None: return training_results, test_result else: diff --git a/pyomo/contrib/parmest/scenariocreator.py b/pyomo/contrib/parmest/scenariocreator.py index 9a784153c16..58d2d4da722 100644 --- a/pyomo/contrib/parmest/scenariocreator.py +++ b/pyomo/contrib/parmest/scenariocreator.py @@ -29,60 +29,54 @@ def __init__(self, name): self._scens = list() # use a df instead? self.name = name # might be "" - def _firstscen(self): # Return the first scenario for testing and to get Theta names. - assert(len(self._scens) > 0) + assert len(self._scens) > 0 return self._scens[0] - def ScensIterator(self): - """ Usage: for scenario in ScensIterator()""" + """Usage: for scenario in ScensIterator()""" return iter(self._scens) - def ScenarioNumber(self, scennum): - """ Returns the scenario with the given, zero-based number""" + """Returns the scenario with the given, zero-based number""" return self._scens[scennum] - def addone(self, scen): - """ Add a scenario to the set + """Add a scenario to the set Args: scen (ParmestScen): the scenario to add """ - assert(isinstance(self._scens, list)) + assert isinstance(self._scens, list) self._scens.append(scen) - def append_bootstrap(self, bootstrap_theta): - """ Append a boostrap theta df to the scenario set; equally likely + """Append a bootstrap theta df to the scenario set; equally likely Args: - boostrap_theta (dataframe): created by the bootstrap + bootstrap_theta (dataframe): created by the bootstrap Note: this can be cleaned up a lot with the list becomes a df, which is why I put it in the ScenarioSet class. """ - assert(len(bootstrap_theta) > 0) - prob = 1. / len(bootstrap_theta) + assert len(bootstrap_theta) > 0 + prob = 1.0 / len(bootstrap_theta) # dict of ThetaVal dicts dfdict = bootstrap_theta.to_dict(orient='index') for index, ThetaVals in dfdict.items(): - name = "Boostrap"+str(index) + name = "Bootstrap" + str(index) self.addone(ParmestScen(name, ThetaVals, prob)) - def write_csv(self, filename): - """ write a csv file with the scenarios in the set + """write a csv file with the scenarios in the set Args: filename (str): full path and full name of file """ if len(self._scens) == 0: - print ("Empty scenario set, not writing file={}".format(filename)) + print("Empty scenario set, not writing file={}".format(filename)) return with open(filename, "w") as f: f.write("Name,Probability") @@ -97,25 +91,26 @@ def write_csv(self, filename): class ParmestScen(object): - """ A little container for scenarios; the Args are the attributes. + """A little container for scenarios; the Args are the attributes. Args: name (str): name for reporting; might be "" ThetaVals (dict): ThetaVals[name]=val - probability (float): probability of occurance "near" these ThetaVals + probability (float): probability of occurrence "near" these ThetaVals """ def __init__(self, name, ThetaVals, probability): self.name = name - assert(isinstance(ThetaVals, dict)) + assert isinstance(ThetaVals, dict) self.ThetaVals = ThetaVals self.probability = probability + ############################################################ class ScenarioCreator(object): - """ Create scenarios from parmest. + """Create scenarios from parmest. Args: pest (Estimator): the parmest object @@ -127,7 +122,6 @@ def __init__(self, pest, solvername): self.pest = pest self.solvername = solvername - def ScenariosFromExperiments(self, addtoSet): """Creates new self.Scenarios list using the experiments only. @@ -137,27 +131,28 @@ def ScenariosFromExperiments(self, addtoSet): a ScenarioSet """ - assert(isinstance(addtoSet, ScenarioSet)) - - senario_numbers = list(range(len(self.pest.callback_data))) - - prob = 1. / len(senario_numbers) - for exp_num in senario_numbers: + assert isinstance(addtoSet, ScenarioSet) + + scenario_numbers = list(range(len(self.pest.callback_data))) + + prob = 1.0 / len(scenario_numbers) + for exp_num in scenario_numbers: ##print("Experiment number=", exp_num) - model = self.pest._instance_creation_callback(exp_num, - self.pest.callback_data) + model = self.pest._instance_creation_callback( + exp_num, self.pest.callback_data + ) opt = pyo.SolverFactory(self.solvername) results = opt.solve(model) # solves and updates model ## pyo.check_termination_optimal(results) ThetaVals = dict() for theta in self.pest.theta_names: - tvar = eval('model.'+theta) + tvar = eval('model.' + theta) tval = pyo.value(tvar) ##print(" theta, tval=", tvar, tval) ThetaVals[theta] = tval - addtoSet.addone(ParmestScen("ExpScen"+str(exp_num), ThetaVals, prob)) - - def ScenariosFromBoostrap(self, addtoSet, numtomake, seed=None): + addtoSet.addone(ParmestScen("ExpScen" + str(exp_num), ThetaVals, prob)) + + def ScenariosFromBootstrap(self, addtoSet, numtomake, seed=None): """Creates new self.Scenarios list using the experiments only. Args: @@ -165,7 +160,7 @@ def ScenariosFromBoostrap(self, addtoSet, numtomake, seed=None): numtomake (int) : number of scenarios to create """ - assert(isinstance(addtoSet, ScenarioSet)) + assert isinstance(addtoSet, ScenarioSet) bootstrap_thetas = self.pest.theta_est_bootstrap(numtomake, seed=seed) addtoSet.append_bootstrap(bootstrap_thetas) diff --git a/pyomo/contrib/parmest/tests/__init__.py b/pyomo/contrib/parmest/tests/__init__.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/contrib/parmest/tests/__init__.py +++ b/pyomo/contrib/parmest/tests/__init__.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/parmest/tests/test_examples.py b/pyomo/contrib/parmest/tests/test_examples.py index 32afa236174..67e06130384 100644 --- a/pyomo/contrib/parmest/tests/test_examples.py +++ b/pyomo/contrib/parmest/tests/test_examples.py @@ -11,18 +11,17 @@ import pyomo.common.unittest as unittest import pyomo.contrib.parmest.parmest as parmest -from pyomo.contrib.parmest.graphics import ( - matplotlib_available, seaborn_available -) +from pyomo.contrib.parmest.graphics import matplotlib_available, seaborn_available from pyomo.opt import SolverFactory -ipopt_available = SolverFactory('ipopt').available() +ipopt_available = SolverFactory("ipopt").available() -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, - "The 'ipopt' solver is not available") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") class TestRooneyBieglerExamples(unittest.TestCase): @classmethod def setUpClass(self): @@ -34,28 +33,44 @@ def tearDownClass(self): def test_model(self): from pyomo.contrib.parmest.examples.rooney_biegler import rooney_biegler + rooney_biegler.main() + def test_model_with_constraint(self): + from pyomo.contrib.parmest.examples.rooney_biegler import ( + rooney_biegler_with_constraint, + ) + + rooney_biegler_with_constraint.main() + @unittest.skipUnless(seaborn_available, "test requires seaborn") def test_parameter_estimation_example(self): - from pyomo.contrib.parmest.examples.rooney_biegler import parameter_estimation_example + from pyomo.contrib.parmest.examples.rooney_biegler import ( + parameter_estimation_example, + ) + parameter_estimation_example.main() @unittest.skipUnless(seaborn_available, "test requires seaborn") def test_bootstrap_example(self): from pyomo.contrib.parmest.examples.rooney_biegler import bootstrap_example + bootstrap_example.main() @unittest.skipUnless(seaborn_available, "test requires seaborn") def test_likelihood_ratio_example(self): - from pyomo.contrib.parmest.examples.rooney_biegler import likelihood_ratio_example + from pyomo.contrib.parmest.examples.rooney_biegler import ( + likelihood_ratio_example, + ) + likelihood_ratio_example.main() -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, - "The 'ipopt' solver is not available") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") class TestReactionKineticsExamples(unittest.TestCase): @classmethod def setUpClass(self): @@ -66,14 +81,18 @@ def tearDownClass(self): pass def test_example(self): - from pyomo.contrib.parmest.examples.reaction_kinetics import simple_reaction_parmest_example + from pyomo.contrib.parmest.examples.reaction_kinetics import ( + simple_reaction_parmest_example, + ) + simple_reaction_parmest_example.main() -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, - "The 'ipopt' solver is not available") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") class TestSemibatchExamples(unittest.TestCase): @classmethod def setUpClass(self): @@ -85,21 +104,27 @@ def tearDownClass(self): def test_model(self): from pyomo.contrib.parmest.examples.semibatch import semibatch + semibatch.main() def test_parameter_estimation_example(self): - from pyomo.contrib.parmest.examples.semibatch import parameter_estimation_example + from pyomo.contrib.parmest.examples.semibatch import ( + parameter_estimation_example, + ) + parameter_estimation_example.main() def test_scenario_example(self): from pyomo.contrib.parmest.examples.semibatch import scenario_example + scenario_example.main() -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, - "The 'ipopt' solver is not available") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") class TestReactorDesignExamples(unittest.TestCase): @classmethod def setUpClass(self): @@ -112,38 +137,54 @@ def tearDownClass(self): @unittest.pytest.mark.expensive def test_model(self): from pyomo.contrib.parmest.examples.reactor_design import reactor_design + reactor_design.main() def test_parameter_estimation_example(self): - from pyomo.contrib.parmest.examples.reactor_design import parameter_estimation_example + from pyomo.contrib.parmest.examples.reactor_design import ( + parameter_estimation_example, + ) + parameter_estimation_example.main() @unittest.skipUnless(seaborn_available, "test requires seaborn") def test_bootstrap_example(self): from pyomo.contrib.parmest.examples.reactor_design import bootstrap_example + bootstrap_example.main() @unittest.pytest.mark.expensive def test_likelihood_ratio_example(self): - from pyomo.contrib.parmest.examples.reactor_design import likelihood_ratio_example + from pyomo.contrib.parmest.examples.reactor_design import ( + likelihood_ratio_example, + ) + likelihood_ratio_example.main() @unittest.pytest.mark.expensive def test_leaveNout_example(self): from pyomo.contrib.parmest.examples.reactor_design import leaveNout_example + leaveNout_example.main() def test_timeseries_data_example(self): - from pyomo.contrib.parmest.examples.reactor_design import timeseries_data_example + from pyomo.contrib.parmest.examples.reactor_design import ( + timeseries_data_example, + ) + timeseries_data_example.main() def test_multisensor_data_example(self): - from pyomo.contrib.parmest.examples.reactor_design import multisensor_data_example + from pyomo.contrib.parmest.examples.reactor_design import ( + multisensor_data_example, + ) + multisensor_data_example.main() @unittest.skipUnless(matplotlib_available, "test requires matplotlib") def test_datarec_example(self): from pyomo.contrib.parmest.examples.reactor_design import datarec_example + datarec_example.main() diff --git a/pyomo/contrib/parmest/tests/test_graphics.py b/pyomo/contrib/parmest/tests/test_graphics.py index fae672c7d28..c18659e9948 100644 --- a/pyomo/contrib/parmest/tests/test_graphics.py +++ b/pyomo/contrib/parmest/tests/test_graphics.py @@ -10,13 +10,18 @@ # ___________________________________________________________________________ from pyomo.common.dependencies import ( - numpy as np, numpy_available, - pandas as pd, pandas_available, - scipy, scipy_available, - matplotlib, matplotlib_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, + scipy, + scipy_available, + matplotlib, + matplotlib_available, ) import platform + is_osx = platform.mac_ver()[0] != '' import pyomo.common.unittest as unittest @@ -29,26 +34,35 @@ testdir = os.path.dirname(os.path.abspath(__file__)) -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not graphics.imports_available, - "parmest.graphics imports are unavailable") -@unittest.skipIf(is_osx, "Disabling graphics tests on OSX due to issue in Matplotlib, see Pyomo PR #1337") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf( + not graphics.imports_available, "parmest.graphics imports are unavailable" +) +@unittest.skipIf( + is_osx, + "Disabling graphics tests on OSX due to issue in Matplotlib, see Pyomo PR #1337", +) class TestGraphics(unittest.TestCase): - def setUp(self): - self.A = pd.DataFrame(np.random.randint(0,100,size=(100,4)), columns=list('ABCD')) - self.B = pd.DataFrame(np.random.randint(0,100,size=(100,4)), columns=list('ABCD')) + self.A = pd.DataFrame( + np.random.randint(0, 100, size=(100, 4)), columns=list('ABCD') + ) + self.B = pd.DataFrame( + np.random.randint(0, 100, size=(100, 4)), columns=list('ABCD') + ) def test_pairwise_plot(self): graphics.pairwise_plot(self.A, alpha=0.8, distributions=['Rect', 'MVN', 'KDE']) def test_grouped_boxplot(self): - graphics.grouped_boxplot(self.A, self.B, normalize=True, - group_names=['A', 'B']) + graphics.grouped_boxplot(self.A, self.B, normalize=True, group_names=['A', 'B']) def test_grouped_violinplot(self): graphics.grouped_violinplot(self.A, self.B) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/parmest/tests/test_parmest.py b/pyomo/contrib/parmest/tests/test_parmest.py index 4d3caf71a2e..f26ecec2fce 100644 --- a/pyomo/contrib/parmest/tests/test_parmest.py +++ b/pyomo/contrib/parmest/tests/test_parmest.py @@ -10,13 +10,18 @@ # ___________________________________________________________________________ from pyomo.common.dependencies import ( - numpy as np, numpy_available, - pandas as pd, pandas_available, - scipy, scipy_available, - matplotlib, matplotlib_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, + scipy, + scipy_available, + matplotlib, + matplotlib_available, ) import platform + is_osx = platform.mac_ver()[0] != '' import pyomo.common.unittest as unittest @@ -32,81 +37,103 @@ import pyomo.dae as dae from pyomo.opt import SolverFactory + ipopt_available = SolverFactory('ipopt').available() from pyomo.common.fileutils import find_library + pynumero_ASL_available = False if find_library('pynumero_ASL') is None else True testdir = os.path.dirname(os.path.abspath(__file__)) -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") class TestRooneyBiegler(unittest.TestCase): - def setUp(self): - from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import rooney_biegler_model + from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import ( + rooney_biegler_model, + ) # Note, the data used in this test has been corrected to use data.loc[5,'hour'] = 7 (instead of 6) - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[7,19.8]], columns=['hour', 'y']) + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) theta_names = ['asymptote', 'rate_constant'] def SSE(model, data): - expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) + expr = sum( + (data.y[i] - model.response_function[data.hour[i]]) ** 2 + for i in data.index + ) return expr - solver_options = { - 'tol': 1e-8, - } + solver_options = {'tol': 1e-8} self.data = data - self.pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE, - solver_options=solver_options) + self.pest = parmest.Estimator( + rooney_biegler_model, + data, + theta_names, + SSE, + solver_options=solver_options, + tee=True, + ) def test_theta_est(self): objval, thetavals = self.pest.theta_est() self.assertAlmostEqual(objval, 4.3317112, places=2) - self.assertAlmostEqual(thetavals['asymptote'], 19.1426, places=2) # 19.1426 from the paper - self.assertAlmostEqual(thetavals['rate_constant'], 0.5311, places=2) # 0.5311 from the paper + self.assertAlmostEqual( + thetavals['asymptote'], 19.1426, places=2 + ) # 19.1426 from the paper + self.assertAlmostEqual( + thetavals['rate_constant'], 0.5311, places=2 + ) # 0.5311 from the paper - @unittest.skipIf(not graphics.imports_available, - "parmest.graphics imports are unavailable") + @unittest.skipIf( + not graphics.imports_available, "parmest.graphics imports are unavailable" + ) def test_bootstrap(self): objval, thetavals = self.pest.theta_est() - num_bootstraps=10 + num_bootstraps = 10 theta_est = self.pest.theta_est_bootstrap(num_bootstraps, return_samples=True) num_samples = theta_est['samples'].apply(len) self.assertTrue(len(theta_est.index), 10) - self.assertTrue(num_samples.equals(pd.Series([6]*10))) + self.assertTrue(num_samples.equals(pd.Series([6] * 10))) del theta_est['samples'] - # apply cofidence region test + # apply confidence region test CR = self.pest.confidence_region_test(theta_est, 'MVN', [0.5, 0.75, 1.0]) self.assertTrue(set(CR.columns) >= set([0.5, 0.75, 1.0])) self.assertTrue(CR[0.5].sum() == 5) self.assertTrue(CR[0.75].sum() == 7) - self.assertTrue(CR[1.0].sum() == 10) # all true + self.assertTrue(CR[1.0].sum() == 10) # all true graphics.pairwise_plot(theta_est) graphics.pairwise_plot(theta_est, thetavals) graphics.pairwise_plot(theta_est, thetavals, 0.8, ['MVN', 'KDE', 'Rect']) - @unittest.skipIf(not graphics.imports_available, - "parmest.graphics imports are unavailable") + @unittest.skipIf( + not graphics.imports_available, "parmest.graphics imports are unavailable" + ) def test_likelihood_ratio(self): objval, thetavals = self.pest.theta_est() asym = np.arange(10, 30, 2) rate = np.arange(0, 1.5, 0.25) - theta_vals = pd.DataFrame(list(product(asym, rate)), columns=self.pest.theta_names) + theta_vals = pd.DataFrame( + list(product(asym, rate)), columns=self.pest.theta_names + ) obj_at_theta = self.pest.objective_at_theta(theta_vals) @@ -115,26 +142,28 @@ def test_likelihood_ratio(self): self.assertTrue(set(LR.columns) >= set([0.8, 0.9, 1.0])) self.assertTrue(LR[0.8].sum() == 6) self.assertTrue(LR[0.9].sum() == 10) - self.assertTrue(LR[1.0].sum() == 60) # all true + self.assertTrue(LR[1.0].sum() == 60) # all true graphics.pairwise_plot(LR, thetavals, 0.8) def test_leaveNout(self): lNo_theta = self.pest.theta_est_leaveNout(1) - self.assertTrue(lNo_theta.shape == (6,2)) + self.assertTrue(lNo_theta.shape == (6, 2)) - results = self.pest.leaveNout_bootstrap_test(1, None, 3, 'Rect', [0.5, 1.0], seed=5436) - self.assertTrue(len(results) == 6) # 6 lNo samples + results = self.pest.leaveNout_bootstrap_test( + 1, None, 3, 'Rect', [0.5, 1.0], seed=5436 + ) + self.assertTrue(len(results) == 6) # 6 lNo samples i = 1 - samples = results[i][0] # list of N samples that are left out + samples = results[i][0] # list of N samples that are left out lno_theta = results[i][1] bootstrap_theta = results[i][2] - self.assertTrue(samples == [1]) # sample 1 was left out - self.assertTrue(lno_theta.shape[0] == 1) # lno estimate for sample 1 + self.assertTrue(samples == [1]) # sample 1 was left out + self.assertTrue(lno_theta.shape[0] == 1) # lno estimate for sample 1 self.assertTrue(set(lno_theta.columns) >= set([0.5, 1.0])) - self.assertTrue(lno_theta[1.0].sum() == 1) # all true - self.assertTrue(bootstrap_theta.shape[0] == 3) # bootstrap for sample 1 - self.assertTrue(bootstrap_theta[1.0].sum() == 3) # all true + self.assertTrue(lno_theta[1.0].sum() == 1) # all true + self.assertTrue(bootstrap_theta.shape[0] == 3) # bootstrap for sample 1 + self.assertTrue(bootstrap_theta[1.0].sum() == 3) # all true def test_diagnostic_mode(self): self.pest.diagnostic_mode = True @@ -143,7 +172,9 @@ def test_diagnostic_mode(self): asym = np.arange(10, 30, 2) rate = np.arange(0, 1.5, 0.25) - theta_vals = pd.DataFrame(list(product(asym, rate)), columns=self.pest.theta_names) + theta_vals = pd.DataFrame( + list(product(asym, rate)), columns=self.pest.theta_names + ) obj_at_theta = self.pest.objective_at_theta(theta_vals) @@ -151,21 +182,28 @@ def test_diagnostic_mode(self): @unittest.skip("Presently having trouble with mpiexec on appveyor") def test_parallel_parmest(self): - """ use mpiexec and mpi4py """ + """use mpiexec and mpi4py""" p = str(parmestbase.__path__) l = p.find("'") - r = p.find("'", l+1) - parmestpath = p[l+1:r] - rbpath = parmestpath + os.sep + "examples" + os.sep + \ - "rooney_biegler" + os.sep + "rooney_biegler_parmest.py" - rbpath = os.path.abspath(rbpath) # paranoia strikes deep... + r = p.find("'", l + 1) + parmestpath = p[l + 1 : r] + rbpath = ( + parmestpath + + os.sep + + "examples" + + os.sep + + "rooney_biegler" + + os.sep + + "rooney_biegler_parmest.py" + ) + rbpath = os.path.abspath(rbpath) # paranoia strikes deep... rlist = ["mpiexec", "--allow-run-as-root", "-n", "2", sys.executable, rbpath] - if sys.version_info >= (3,5): + if sys.version_info >= (3, 5): ret = subprocess.run(rlist) retcode = ret.returncode else: retcode = subprocess.call(rlist) - assert(retcode == 0) + assert retcode == 0 @unittest.skip("Most folks don't have k_aug installed") def test_theta_k_aug_for_Hessian(self): @@ -174,20 +212,32 @@ def test_theta_k_aug_for_Hessian(self): self.assertAlmostEqual(objval, 4.4675, places=2) @unittest.skipIf(not pynumero_ASL_available, "pynumero ASL is not available") - @unittest.skipIf(not parmest.inverse_reduced_hessian_available, - "Cannot test covariance matrix: required ASL dependency is missing") + @unittest.skipIf( + not parmest.inverse_reduced_hessian_available, + "Cannot test covariance matrix: required ASL dependency is missing", + ) def test_theta_est_cov(self): objval, thetavals, cov = self.pest.theta_est(calc_cov=True, cov_n=6) self.assertAlmostEqual(objval, 4.3317112, places=2) - self.assertAlmostEqual(thetavals['asymptote'], 19.1426, places=2) # 19.1426 from the paper - self.assertAlmostEqual(thetavals['rate_constant'], 0.5311, places=2) # 0.5311 from the paper + self.assertAlmostEqual( + thetavals['asymptote'], 19.1426, places=2 + ) # 19.1426 from the paper + self.assertAlmostEqual( + thetavals['rate_constant'], 0.5311, places=2 + ) # 0.5311 from the paper # Covariance matrix - self.assertAlmostEqual(cov.iloc[0,0], 6.30579403, places=2) # 6.22864 from paper - self.assertAlmostEqual(cov.iloc[0,1], -0.4395341, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov.iloc[1,0], -0.4395341, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov.iloc[1,1], 0.04124, places=2) # 0.04124 from paper + self.assertAlmostEqual( + cov.iloc[0, 0], 6.30579403, places=2 + ) # 6.22864 from paper + self.assertAlmostEqual( + cov.iloc[0, 1], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 0], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual(cov.iloc[1, 1], 0.04124, places=2) # 0.04124 from paper ''' Why does the covariance matrix from parmest not match the paper? Parmest is calculating the exact reduced Hessian. The paper (Rooney and Bielger, 2001) likely @@ -196,27 +246,28 @@ def test_theta_est_cov(self): The formula used in parmest was verified against equations (7-5-15) and (7-5-16) in "Nonlinear Parameter Estimation", Y. Bard, 1974. ''' + def test_cov_scipy_least_squares_comparison(self): ''' Scipy results differ in the 3rd decimal place from the paper. It is possible the paper used an alternative finite difference approximation for the Jacobian. ''' - + def model(theta, t): ''' Model to be fitted y = model(theta, t) Arguments: theta: vector of fitted parameters t: independent variable [hours] - + Returns: y: model predictions [need to check paper for units] ''' asymptote = theta[0] rate_constant = theta[1] - + return asymptote * (1 - np.exp(-rate_constant * t)) - + def residual(theta, t, y): ''' Calculate residuals @@ -226,410 +277,682 @@ def residual(theta, t, y): y: dependent variable [?] ''' return y - model(theta, t) - + # define data t = self.data['hour'].to_numpy() y = self.data['y'].to_numpy() - + # define initial guess theta_guess = np.array([15, 0.5]) - + ## solve with optimize.least_squares - sol = scipy.optimize.least_squares(residual, theta_guess,method='trf',args=(t,y),verbose=2) + sol = scipy.optimize.least_squares( + residual, theta_guess, method='trf', args=(t, y), verbose=2 + ) theta_hat = sol.x - - self.assertAlmostEqual(theta_hat[0], 19.1426, places=2) # 19.1426 from the paper - self.assertAlmostEqual(theta_hat[1], 0.5311, places=2) # 0.5311 from the paper - + + self.assertAlmostEqual( + theta_hat[0], 19.1426, places=2 + ) # 19.1426 from the paper + self.assertAlmostEqual(theta_hat[1], 0.5311, places=2) # 0.5311 from the paper + # calculate residuals r = residual(theta_hat, t, y) - + # calculate variance of the residuals # -2 because there are 2 fitted parameters sigre = np.matmul(r.T, r / (len(y) - 2)) - + # approximate covariance # Need to divide by 2 because optimize.least_squares scaled the objective by 1/2 cov = sigre * np.linalg.inv(np.matmul(sol.jac.T, sol.jac)) - - self.assertAlmostEqual(cov[0,0], 6.22864, places=2) # 6.22864 from paper - self.assertAlmostEqual(cov[0,1], -0.4322, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov[1,0], -0.4322, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov[1,1], 0.04124, places=2) # 0.04124 from paper - + + self.assertAlmostEqual(cov[0, 0], 6.22864, places=2) # 6.22864 from paper + self.assertAlmostEqual(cov[0, 1], -0.4322, places=2) # -0.4322 from paper + self.assertAlmostEqual(cov[1, 0], -0.4322, places=2) # -0.4322 from paper + self.assertAlmostEqual(cov[1, 1], 0.04124, places=2) # 0.04124 from paper + def test_cov_scipy_curve_fit_comparison(self): ''' Scipy results differ in the 3rd decimal place from the paper. It is possible the paper used an alternative finite difference approximation for the Jacobian. ''' + ## solve with optimize.curve_fit def model(t, asymptote, rate_constant): return asymptote * (1 - np.exp(-rate_constant * t)) - + # define data t = self.data['hour'].to_numpy() y = self.data['y'].to_numpy() - + # define initial guess theta_guess = np.array([15, 0.5]) - + theta_hat, cov = scipy.optimize.curve_fit(model, t, y, p0=theta_guess) - - self.assertAlmostEqual(theta_hat[0], 19.1426, places=2) # 19.1426 from the paper - self.assertAlmostEqual(theta_hat[1], 0.5311, places=2) # 0.5311 from the paper - - self.assertAlmostEqual(cov[0,0], 6.22864, places=2) # 6.22864 from paper - self.assertAlmostEqual(cov[0,1], -0.4322, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov[1,0], -0.4322, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov[1,1], 0.04124, places=2) # 0.04124 from paper - - - -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") + + self.assertAlmostEqual( + theta_hat[0], 19.1426, places=2 + ) # 19.1426 from the paper + self.assertAlmostEqual(theta_hat[1], 0.5311, places=2) # 0.5311 from the paper + + self.assertAlmostEqual(cov[0, 0], 6.22864, places=2) # 6.22864 from paper + self.assertAlmostEqual(cov[0, 1], -0.4322, places=2) # -0.4322 from paper + self.assertAlmostEqual(cov[1, 0], -0.4322, places=2) # -0.4322 from paper + self.assertAlmostEqual(cov[1, 1], 0.04124, places=2) # 0.04124 from paper + + +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") class TestModelVariants(unittest.TestCase): - def setUp(self): - - self.data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[7,19.8]], columns=['hour', 'y']) + self.data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) def rooney_biegler_params(data): - model = pyo.ConcreteModel() - model.asymptote = pyo.Param(initialize = 15, mutable=True) - model.rate_constant = pyo.Param(initialize = 0.5, mutable=True) - + model.asymptote = pyo.Param(initialize=15, mutable=True) + model.rate_constant = pyo.Param(initialize=0.5, mutable=True) + def response_rule(m, h): expr = m.asymptote * (1 - pyo.exp(-m.rate_constant * h)) return expr - model.response_function = pyo.Expression(data.hour, rule = response_rule) - + + model.response_function = pyo.Expression(data.hour, rule=response_rule) + return model - + def rooney_biegler_indexed_params(data): - model = pyo.ConcreteModel() - - model.param_names = pyo.Set(initialize=['asymptote','rate_constant']) - model.theta = pyo.Param(model.param_names, initialize={'asymptote':15, 'rate_constant':0.5}, mutable=True) - + + model.param_names = pyo.Set(initialize=['asymptote', 'rate_constant']) + model.theta = pyo.Param( + model.param_names, + initialize={'asymptote': 15, 'rate_constant': 0.5}, + mutable=True, + ) + def response_rule(m, h): - expr = m.theta['asymptote'] * (1 - pyo.exp(-m.theta['rate_constant'] * h)) + expr = m.theta['asymptote'] * ( + 1 - pyo.exp(-m.theta['rate_constant'] * h) + ) return expr - model.response_function = pyo.Expression(data.hour, rule = response_rule) - + + model.response_function = pyo.Expression(data.hour, rule=response_rule) + return model - + def rooney_biegler_vars(data): - model = pyo.ConcreteModel() - model.asymptote = pyo.Var(initialize = 15) - model.rate_constant = pyo.Var(initialize = 0.5) - model.asymptote.fixed = True # parmest will unfix theta variables + model.asymptote = pyo.Var(initialize=15) + model.rate_constant = pyo.Var(initialize=0.5) + model.asymptote.fixed = True # parmest will unfix theta variables model.rate_constant.fixed = True - + def response_rule(m, h): expr = m.asymptote * (1 - pyo.exp(-m.rate_constant * h)) return expr - model.response_function = pyo.Expression(data.hour, rule = response_rule) - + + model.response_function = pyo.Expression(data.hour, rule=response_rule) + return model - + def rooney_biegler_indexed_vars(data): - model = pyo.ConcreteModel() - - model.var_names = pyo.Set(initialize=['asymptote','rate_constant']) - model.theta = pyo.Var(model.var_names, initialize={'asymptote':15, 'rate_constant':0.5}) - model.theta['asymptote'].fixed = True # parmest will unfix theta variables, even when they are indexed + + model.var_names = pyo.Set(initialize=['asymptote', 'rate_constant']) + model.theta = pyo.Var( + model.var_names, initialize={'asymptote': 15, 'rate_constant': 0.5} + ) + model.theta[ + 'asymptote' + ].fixed = ( + True # parmest will unfix theta variables, even when they are indexed + ) model.theta['rate_constant'].fixed = True - + def response_rule(m, h): - expr = m.theta['asymptote'] * (1 - pyo.exp(-m.theta['rate_constant'] * h)) + expr = m.theta['asymptote'] * ( + 1 - pyo.exp(-m.theta['rate_constant'] * h) + ) return expr - model.response_function = pyo.Expression(data.hour, rule = response_rule) - + + model.response_function = pyo.Expression(data.hour, rule=response_rule) + return model def SSE(model, data): - expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index) + expr = sum( + (data.y[i] - model.response_function[data.hour[i]]) ** 2 + for i in data.index + ) return expr - + self.objective_function = SSE - + theta_vals = pd.DataFrame([20, 1], index=['asymptote', 'rate_constant']).T - theta_vals_index = pd.DataFrame([20, 1], index=["theta['asymptote']", "theta['rate_constant']"]).T - + theta_vals_index = pd.DataFrame( + [20, 1], index=["theta['asymptote']", "theta['rate_constant']"] + ).T + self.input = { - 'param': { - 'model': rooney_biegler_params, - 'theta_names': ['asymptote', 'rate_constant'], - 'theta_vals': theta_vals}, - 'param_index': { - 'model': rooney_biegler_indexed_params, - 'theta_names': ['theta'], - 'theta_vals': theta_vals_index}, - 'vars': { - 'model': rooney_biegler_vars, - 'theta_names': ['asymptote', 'rate_constant'], - 'theta_vals': theta_vals}, - 'vars_index': { - 'model': rooney_biegler_indexed_vars, - 'theta_names': ['theta'], - 'theta_vals': theta_vals_index}, - 'vars_quoted_index': { - 'model': rooney_biegler_indexed_vars, - 'theta_names': ["theta['asymptote']", "theta['rate_constant']"], - 'theta_vals': theta_vals_index}, - 'vars_str_index': { - 'model': rooney_biegler_indexed_vars, - 'theta_names': ["theta[asymptote]", "theta[rate_constant]"], - 'theta_vals': theta_vals_index}, - } - + 'param': { + 'model': rooney_biegler_params, + 'theta_names': ['asymptote', 'rate_constant'], + 'theta_vals': theta_vals, + }, + 'param_index': { + 'model': rooney_biegler_indexed_params, + 'theta_names': ['theta'], + 'theta_vals': theta_vals_index, + }, + 'vars': { + 'model': rooney_biegler_vars, + 'theta_names': ['asymptote', 'rate_constant'], + 'theta_vals': theta_vals, + }, + 'vars_index': { + 'model': rooney_biegler_indexed_vars, + 'theta_names': ['theta'], + 'theta_vals': theta_vals_index, + }, + 'vars_quoted_index': { + 'model': rooney_biegler_indexed_vars, + 'theta_names': ["theta['asymptote']", "theta['rate_constant']"], + 'theta_vals': theta_vals_index, + }, + 'vars_str_index': { + 'model': rooney_biegler_indexed_vars, + 'theta_names': ["theta[asymptote]", "theta[rate_constant]"], + 'theta_vals': theta_vals_index, + }, + } + @unittest.skipIf(not pynumero_ASL_available, "pynumero ASL is not available") @unittest.skipIf( not parmest.inverse_reduced_hessian_available, - "Cannot test covariance matrix: required ASL dependency is missing") + "Cannot test covariance matrix: required ASL dependency is missing", + ) def test_parmest_basics(self): - for model_type, parmest_input in self.input.items(): - pest = parmest.Estimator(parmest_input['model'], self.data, parmest_input['theta_names'], self.objective_function) - + pest = parmest.Estimator( + parmest_input['model'], + self.data, + parmest_input['theta_names'], + self.objective_function, + ) + objval, thetavals, cov = pest.theta_est(calc_cov=True, cov_n=6) self.assertAlmostEqual(objval, 4.3317112, places=2) - self.assertAlmostEqual(cov.iloc[0,0], 6.30579403, places=2) # 6.22864 from paper - self.assertAlmostEqual(cov.iloc[0,1], -0.4395341, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov.iloc[1,0], -0.4395341, places=2) # -0.4322 from paper - self.assertAlmostEqual(cov.iloc[1,1], 0.04193591, places=2) # 0.04124 from paper - + self.assertAlmostEqual( + cov.iloc[0, 0], 6.30579403, places=2 + ) # 6.22864 from paper + self.assertAlmostEqual( + cov.iloc[0, 1], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 0], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 1], 0.04193591, places=2 + ) # 0.04124 from paper + obj_at_theta = pest.objective_at_theta(parmest_input['theta_vals']) self.assertAlmostEqual(obj_at_theta['obj'][0], 16.531953, places=2) + def test_parmest_basics_with_initialize_parmest_model_option(self): + for model_type, parmest_input in self.input.items(): + pest = parmest.Estimator( + parmest_input['model'], + self.data, + parmest_input['theta_names'], + self.objective_function, + ) -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, - "The 'ipopt' solver is not available") -class TestReactorDesign(unittest.TestCase): + objval, thetavals, cov = pest.theta_est(calc_cov=True, cov_n=6) + + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual( + cov.iloc[0, 0], 6.30579403, places=2 + ) # 6.22864 from paper + self.assertAlmostEqual( + cov.iloc[0, 1], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 0], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 1], 0.04193591, places=2 + ) # 0.04124 from paper + + obj_at_theta = pest.objective_at_theta( + parmest_input['theta_vals'], initialize_parmest_model=True + ) + + self.assertAlmostEqual(obj_at_theta['obj'][0], 16.531953, places=2) + + def test_parmest_basics_with_square_problem_solve(self): + for model_type, parmest_input in self.input.items(): + pest = parmest.Estimator( + parmest_input['model'], + self.data, + parmest_input['theta_names'], + self.objective_function, + ) + + obj_at_theta = pest.objective_at_theta( + parmest_input['theta_vals'], initialize_parmest_model=True + ) + + objval, thetavals, cov = pest.theta_est(calc_cov=True, cov_n=6) + + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual( + cov.iloc[0, 0], 6.30579403, places=2 + ) # 6.22864 from paper + self.assertAlmostEqual( + cov.iloc[0, 1], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 0], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 1], 0.04193591, places=2 + ) # 0.04124 from paper + + self.assertAlmostEqual(obj_at_theta['obj'][0], 16.531953, places=2) + + def test_parmest_basics_with_square_problem_solve_no_theta_vals(self): + for model_type, parmest_input in self.input.items(): + pest = parmest.Estimator( + parmest_input['model'], + self.data, + parmest_input['theta_names'], + self.objective_function, + ) + + obj_at_theta = pest.objective_at_theta(initialize_parmest_model=True) + objval, thetavals, cov = pest.theta_est(calc_cov=True, cov_n=6) + + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual( + cov.iloc[0, 0], 6.30579403, places=2 + ) # 6.22864 from paper + self.assertAlmostEqual( + cov.iloc[0, 1], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 0], -0.4395341, places=2 + ) # -0.4322 from paper + self.assertAlmostEqual( + cov.iloc[1, 1], 0.04193591, places=2 + ) # 0.04124 from paper + + +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") +class TestReactorDesign(unittest.TestCase): def setUp(self): - from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model + from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, + ) # Data from the design - data = pd.DataFrame(data=[[1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5], - [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4], - [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8], - [1.20, 10000, 3680.7, 1070.0, 1486.1, 1881.6], - [1.25, 10000, 3750.0, 1071.4, 1428.6, 1875.0], - [1.30, 10000, 3817.1, 1072.2, 1374.6, 1868.0], - [1.35, 10000, 3882.2, 1072.4, 1324.0, 1860.7], - [1.40, 10000, 3945.4, 1072.1, 1276.3, 1853.1], - [1.45, 10000, 4006.7, 1071.3, 1231.4, 1845.3], - [1.50, 10000, 4066.4, 1070.1, 1189.0, 1837.3], - [1.55, 10000, 4124.4, 1068.5, 1148.9, 1829.1], - [1.60, 10000, 4180.9, 1066.5, 1111.0, 1820.8], - [1.65, 10000, 4235.9, 1064.3, 1075.0, 1812.4], - [1.70, 10000, 4289.5, 1061.8, 1040.9, 1803.9], - [1.75, 10000, 4341.8, 1059.0, 1008.5, 1795.3], - [1.80, 10000, 4392.8, 1056.0, 977.7, 1786.7], - [1.85, 10000, 4442.6, 1052.8, 948.4, 1778.1], - [1.90, 10000, 4491.3, 1049.4, 920.5, 1769.4], - [1.95, 10000, 4538.8, 1045.8, 893.9, 1760.8]], - columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd']) + data = pd.DataFrame( + data=[ + [1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5], + [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4], + [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8], + [1.20, 10000, 3680.7, 1070.0, 1486.1, 1881.6], + [1.25, 10000, 3750.0, 1071.4, 1428.6, 1875.0], + [1.30, 10000, 3817.1, 1072.2, 1374.6, 1868.0], + [1.35, 10000, 3882.2, 1072.4, 1324.0, 1860.7], + [1.40, 10000, 3945.4, 1072.1, 1276.3, 1853.1], + [1.45, 10000, 4006.7, 1071.3, 1231.4, 1845.3], + [1.50, 10000, 4066.4, 1070.1, 1189.0, 1837.3], + [1.55, 10000, 4124.4, 1068.5, 1148.9, 1829.1], + [1.60, 10000, 4180.9, 1066.5, 1111.0, 1820.8], + [1.65, 10000, 4235.9, 1064.3, 1075.0, 1812.4], + [1.70, 10000, 4289.5, 1061.8, 1040.9, 1803.9], + [1.75, 10000, 4341.8, 1059.0, 1008.5, 1795.3], + [1.80, 10000, 4392.8, 1056.0, 977.7, 1786.7], + [1.85, 10000, 4442.6, 1052.8, 948.4, 1778.1], + [1.90, 10000, 4491.3, 1049.4, 920.5, 1769.4], + [1.95, 10000, 4538.8, 1045.8, 893.9, 1760.8], + ], + columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd'], + ) theta_names = ['k1', 'k2', 'k3'] def SSE(model, data): - expr = (float(data['ca']) - model.ca)**2 + \ - (float(data['cb']) - model.cb)**2 + \ - (float(data['cc']) - model.cc)**2 + \ - (float(data['cd']) - model.cd)**2 + expr = ( + (float(data['ca']) - model.ca) ** 2 + + (float(data['cb']) - model.cb) ** 2 + + (float(data['cc']) - model.cc) ** 2 + + (float(data['cd']) - model.cd) ** 2 + ) return expr solver_options = {"max_iter": 6000} - self.pest = parmest.Estimator(reactor_design_model, data, - theta_names, SSE, solver_options=solver_options) + self.pest = parmest.Estimator( + reactor_design_model, data, theta_names, SSE, solver_options=solver_options + ) def test_theta_est(self): # used in data reconciliation objval, thetavals = self.pest.theta_est() - self.assertAlmostEqual(thetavals['k1'], 5.0/6.0, places=4) - self.assertAlmostEqual(thetavals['k2'], 5.0/3.0, places=4) - self.assertAlmostEqual(thetavals['k3'], 1.0/6000.0, places=7) + self.assertAlmostEqual(thetavals['k1'], 5.0 / 6.0, places=4) + self.assertAlmostEqual(thetavals['k2'], 5.0 / 3.0, places=4) + self.assertAlmostEqual(thetavals['k3'], 1.0 / 6000.0, places=7) def test_return_values(self): - objval, thetavals, data_rec =\ - self.pest.theta_est(return_values=['ca', 'cb', 'cc', 'cd', 'caf']) + objval, thetavals, data_rec = self.pest.theta_est( + return_values=['ca', 'cb', 'cc', 'cd', 'caf'] + ) self.assertAlmostEqual(data_rec["cc"].loc[18], 893.84924, places=3) -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, - "The 'ipopt' solver is not available") -class TestReactorDesign_DAE(unittest.TestCase): - # Based on a reactor example in `Chemical Reactor Analysis and Design Fundamentals`, +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") +class TestReactorDesign_DAE(unittest.TestCase): + # Based on a reactor example in `Chemical Reactor Analysis and Design Fundamentals`, # https://sites.engineering.ucsb.edu/~jbraw/chemreacfun/ # https://sites.engineering.ucsb.edu/~jbraw/chemreacfun/fig-html/appendix/fig-A-10.html - + def setUp(self): - def ABC_model(data): - ca_meas = data['ca'] cb_meas = data['cb'] cc_meas = data['cc'] - + if isinstance(data, pd.DataFrame): - meas_t = data.index # time index - else: # dictionary - meas_t = list(ca_meas.keys()) # nested dictionary - + meas_t = data.index # time index + else: # dictionary + meas_t = list(ca_meas.keys()) # nested dictionary + ca0 = 1.0 cb0 = 0.0 cc0 = 0.0 - + m = pyo.ConcreteModel() - - m.k1 =pyo.Var(initialize = 0.5, bounds = (1e-4, 10)) - m.k2 = pyo.Var(initialize = 3.0, bounds = (1e-4, 10)) - - m.time = dae.ContinuousSet(bounds = (0.0, 5.0), initialize = meas_t) - + + m.k1 = pyo.Var(initialize=0.5, bounds=(1e-4, 10)) + m.k2 = pyo.Var(initialize=3.0, bounds=(1e-4, 10)) + + m.time = dae.ContinuousSet(bounds=(0.0, 5.0), initialize=meas_t) + # initialization and bounds - m.ca = pyo.Var(m.time, initialize = ca0, bounds = (-1e-3, ca0+1e-3)) - m.cb = pyo.Var(m.time, initialize = cb0, bounds = (-1e-3, ca0+1e-3)) - m.cc = pyo.Var(m.time, initialize = cc0, bounds = (-1e-3, ca0+1e-3)) - - m.dca = dae.DerivativeVar(m.ca, wrt = m.time) - m.dcb = dae.DerivativeVar(m.cb, wrt = m.time) - m.dcc = dae.DerivativeVar(m.cc, wrt = m.time) - + m.ca = pyo.Var(m.time, initialize=ca0, bounds=(-1e-3, ca0 + 1e-3)) + m.cb = pyo.Var(m.time, initialize=cb0, bounds=(-1e-3, ca0 + 1e-3)) + m.cc = pyo.Var(m.time, initialize=cc0, bounds=(-1e-3, ca0 + 1e-3)) + + m.dca = dae.DerivativeVar(m.ca, wrt=m.time) + m.dcb = dae.DerivativeVar(m.cb, wrt=m.time) + m.dcc = dae.DerivativeVar(m.cc, wrt=m.time) + def _dcarate(m, t): if t == 0: return pyo.Constraint.Skip else: return m.dca[t] == -m.k1 * m.ca[t] - m.dcarate = pyo.Constraint(m.time, rule = _dcarate) - + + m.dcarate = pyo.Constraint(m.time, rule=_dcarate) + def _dcbrate(m, t): if t == 0: return pyo.Constraint.Skip else: return m.dcb[t] == m.k1 * m.ca[t] - m.k2 * m.cb[t] - m.dcbrate = pyo.Constraint(m.time, rule = _dcbrate) - + + m.dcbrate = pyo.Constraint(m.time, rule=_dcbrate) + def _dccrate(m, t): if t == 0: return pyo.Constraint.Skip else: return m.dcc[t] == m.k2 * m.cb[t] - m.dccrate = pyo.Constraint(m.time, rule = _dccrate) - + + m.dccrate = pyo.Constraint(m.time, rule=_dccrate) + def ComputeFirstStageCost_rule(m): return 0 + m.FirstStageCost = pyo.Expression(rule=ComputeFirstStageCost_rule) - + def ComputeSecondStageCost_rule(m): - return sum((m.ca[t] - ca_meas[t]) ** 2 + (m.cb[t] - cb_meas[t]) ** 2 - + (m.cc[t] - cc_meas[t]) ** 2 for t in meas_t) + return sum( + (m.ca[t] - ca_meas[t]) ** 2 + + (m.cb[t] - cb_meas[t]) ** 2 + + (m.cc[t] - cc_meas[t]) ** 2 + for t in meas_t + ) + m.SecondStageCost = pyo.Expression(rule=ComputeSecondStageCost_rule) - + def total_cost_rule(model): return model.FirstStageCost + model.SecondStageCost - m.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, sense=pyo.minimize) - + + m.Total_Cost_Objective = pyo.Objective( + rule=total_cost_rule, sense=pyo.minimize + ) + disc = pyo.TransformationFactory('dae.collocation') disc.apply_to(m, nfe=20, ncp=2) - + return m - + # This example tests data formatted in 3 ways # Each format holds 1 scenario # 1. dataframe with time index # 2. nested dictionary {ca: {t, val pairs}, ... } - data = [[0.000, 0.957, -0.031, -0.015], - [0.263, 0.557, 0.330, 0.044], - [0.526, 0.342, 0.512, 0.156], - [0.789, 0.224, 0.499, 0.310], - [1.053, 0.123, 0.428, 0.454], - [1.316, 0.079, 0.396, 0.556], - [1.579, 0.035, 0.303, 0.651], - [1.842, 0.029, 0.287, 0.658], - [2.105, 0.025, 0.221, 0.750], - [2.368, 0.017, 0.148, 0.854], - [2.632, -0.002, 0.182, 0.845], - [2.895, 0.009, 0.116, 0.893], - [3.158, -0.023, 0.079, 0.942], - [3.421, 0.006, 0.078, 0.899], - [3.684, 0.016, 0.059, 0.942], - [3.947, 0.014, 0.036, 0.991], - [4.211, -0.009, 0.014, 0.988], - [4.474, -0.030, 0.036, 0.941], - [4.737, 0.004, 0.036, 0.971], - [5.000, -0.024, 0.028, 0.985]] + data = [ + [0.000, 0.957, -0.031, -0.015], + [0.263, 0.557, 0.330, 0.044], + [0.526, 0.342, 0.512, 0.156], + [0.789, 0.224, 0.499, 0.310], + [1.053, 0.123, 0.428, 0.454], + [1.316, 0.079, 0.396, 0.556], + [1.579, 0.035, 0.303, 0.651], + [1.842, 0.029, 0.287, 0.658], + [2.105, 0.025, 0.221, 0.750], + [2.368, 0.017, 0.148, 0.854], + [2.632, -0.002, 0.182, 0.845], + [2.895, 0.009, 0.116, 0.893], + [3.158, -0.023, 0.079, 0.942], + [3.421, 0.006, 0.078, 0.899], + [3.684, 0.016, 0.059, 0.942], + [3.947, 0.014, 0.036, 0.991], + [4.211, -0.009, 0.014, 0.988], + [4.474, -0.030, 0.036, 0.941], + [4.737, 0.004, 0.036, 0.971], + [5.000, -0.024, 0.028, 0.985], + ] data = pd.DataFrame(data, columns=['t', 'ca', 'cb', 'cc']) data_df = data.set_index('t') - data_dict = {'ca': {k:v for (k, v) in zip(data.t, data.ca)}, - 'cb': {k:v for (k, v) in zip(data.t, data.cb)}, - 'cc': {k:v for (k, v) in zip(data.t, data.cc)} } - + data_dict = { + 'ca': {k: v for (k, v) in zip(data.t, data.ca)}, + 'cb': {k: v for (k, v) in zip(data.t, data.cb)}, + 'cc': {k: v for (k, v) in zip(data.t, data.cc)}, + } + theta_names = ['k1', 'k2'] - + self.pest_df = parmest.Estimator(ABC_model, [data_df], theta_names) self.pest_dict = parmest.Estimator(ABC_model, [data_dict], theta_names) - + + # Estimator object with multiple scenarios + self.pest_df_multiple = parmest.Estimator( + ABC_model, [data_df, data_df], theta_names + ) + self.pest_dict_multiple = parmest.Estimator( + ABC_model, [data_dict, data_dict], theta_names + ) + # Create an instance of the model self.m_df = ABC_model(data_df) self.m_dict = ABC_model(data_dict) - - + def test_dataformats(self): - obj1, theta1 = self.pest_df.theta_est() obj2, theta2 = self.pest_dict.theta_est() - + self.assertAlmostEqual(obj1, obj2, places=6) self.assertAlmostEqual(theta1['k1'], theta2['k1'], places=6) self.assertAlmostEqual(theta1['k2'], theta2['k2'], places=6) - + + def test_return_continuous_set(self): + ''' + test if ContinuousSet elements are returned correctly from theta_est() + ''' + obj1, theta1, return_vals1 = self.pest_df.theta_est(return_values=['time']) + obj2, theta2, return_vals2 = self.pest_dict.theta_est(return_values=['time']) + self.assertAlmostEqual(return_vals1['time'].loc[0][18], 2.368, places=3) + self.assertAlmostEqual(return_vals2['time'].loc[0][18], 2.368, places=3) + + def test_return_continuous_set_multiple_datasets(self): + ''' + test if ContinuousSet elements are returned correctly from theta_est() + ''' + obj1, theta1, return_vals1 = self.pest_df_multiple.theta_est( + return_values=['time'] + ) + obj2, theta2, return_vals2 = self.pest_dict_multiple.theta_est( + return_values=['time'] + ) + self.assertAlmostEqual(return_vals1['time'].loc[1][18], 2.368, places=3) + self.assertAlmostEqual(return_vals2['time'].loc[1][18], 2.368, places=3) + def test_covariance(self): - - from pyomo.contrib.interior_point.inverse_reduced_hessian import inv_reduced_hessian_barrier - - # Number of datapoints. + from pyomo.contrib.interior_point.inverse_reduced_hessian import ( + inv_reduced_hessian_barrier, + ) + + # Number of datapoints. # 3 data components (ca, cb, cc), 20 timesteps, 1 scenario = 60 - # In this example, this is the number of data points in data_df, but that's - # only because the data is indexed by time and contains no additional inforamtion. + # In this example, this is the number of data points in data_df, but that's + # only because the data is indexed by time and contains no additional information. n = 60 - + # Compute covariance using parmest obj, theta, cov = self.pest_df.theta_est(calc_cov=True, cov_n=n) - + # Compute covariance using interior_point vars_list = [self.m_df.k1, self.m_df.k2] - solve_result, inv_red_hes = inv_reduced_hessian_barrier(self.m_df, - independent_variables= vars_list, - tee=True) + solve_result, inv_red_hes = inv_reduced_hessian_barrier( + self.m_df, independent_variables=vars_list, tee=True + ) l = len(vars_list) cov_interior_point = 2 * obj / (n - l) * inv_red_hes - cov_interior_point = pd.DataFrame(cov_interior_point, ['k1', 'k2'], ['k1', 'k2']) - + cov_interior_point = pd.DataFrame( + cov_interior_point, ['k1', 'k2'], ['k1', 'k2'] + ) + cov_diff = (cov - cov_interior_point).abs().sum().sum() - + self.assertTrue(cov.loc['k1', 'k1'] > 0) self.assertTrue(cov.loc['k2', 'k2'] > 0) self.assertAlmostEqual(cov_diff, 0, places=6) - - + +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") +class TestSquareInitialization_RooneyBiegler(unittest.TestCase): + def setUp(self): + from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler_with_constraint import ( + rooney_biegler_model_with_constraint, + ) + + # Note, the data used in this test has been corrected to use data.loc[5,'hour'] = 7 (instead of 6) + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) + + theta_names = ['asymptote', 'rate_constant'] + + def SSE(model, data): + expr = sum( + (data.y[i] - model.response_function[data.hour[i]]) ** 2 + for i in data.index + ) + return expr + + solver_options = {'tol': 1e-8} + + self.data = data + self.pest = parmest.Estimator( + rooney_biegler_model_with_constraint, + data, + theta_names, + SSE, + solver_options=solver_options, + tee=True, + ) + + def test_theta_est_with_square_initialization(self): + obj_init = self.pest.objective_at_theta(initialize_parmest_model=True) + objval, thetavals = self.pest.theta_est() + + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual( + thetavals['asymptote'], 19.1426, places=2 + ) # 19.1426 from the paper + self.assertAlmostEqual( + thetavals['rate_constant'], 0.5311, places=2 + ) # 0.5311 from the paper + + def test_theta_est_with_square_initialization_and_custom_init_theta(self): + theta_vals_init = pd.DataFrame( + data=[[19.0, 0.5]], columns=['asymptote', 'rate_constant'] + ) + obj_init = self.pest.objective_at_theta( + theta_values=theta_vals_init, initialize_parmest_model=True + ) + objval, thetavals = self.pest.theta_est() + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual( + thetavals['asymptote'], 19.1426, places=2 + ) # 19.1426 from the paper + self.assertAlmostEqual( + thetavals['rate_constant'], 0.5311, places=2 + ) # 0.5311 from the paper + + def test_theta_est_with_square_initialization_diagnostic_mode_true(self): + self.pest.diagnostic_mode = True + obj_init = self.pest.objective_at_theta(initialize_parmest_model=True) + objval, thetavals = self.pest.theta_est() + + self.assertAlmostEqual(objval, 4.3317112, places=2) + self.assertAlmostEqual( + thetavals['asymptote'], 19.1426, places=2 + ) # 19.1426 from the paper + self.assertAlmostEqual( + thetavals['rate_constant'], 0.5311, places=2 + ) # 0.5311 from the paper + + self.pest.diagnostic_mode = False + + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/parmest/tests/test_scenariocreator.py b/pyomo/contrib/parmest/tests/test_scenariocreator.py index 350db7db0f3..a2dcf4c2739 100644 --- a/pyomo/contrib/parmest/tests/test_scenariocreator.py +++ b/pyomo/contrib/parmest/tests/test_scenariocreator.py @@ -9,9 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.dependencies import ( - pandas as pd, pandas_available, -) +from pyomo.common.dependencies import pandas as pd, pandas_available uuid_available = True try: @@ -25,48 +23,60 @@ import pyomo.contrib.parmest.scenariocreator as sc import pyomo.environ as pyo from pyomo.environ import SolverFactory + ipopt_available = SolverFactory('ipopt').available() testdir = os.path.dirname(os.path.abspath(__file__)) -@unittest.skipIf(not parmest.parmest_available, "Cannot test parmest: required dependencies are missing") + +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") class TestScenarioReactorDesign(unittest.TestCase): - def setUp(self): - from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model - - # Data from the design - data = pd.DataFrame(data=[[1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5], - [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4], - [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8], - [1.20, 10000, 3680.7, 1070.0, 1486.1, 1881.6], - [1.25, 10000, 3750.0, 1071.4, 1428.6, 1875.0], - [1.30, 10000, 3817.1, 1072.2, 1374.6, 1868.0], - [1.35, 10000, 3882.2, 1072.4, 1324.0, 1860.7], - [1.40, 10000, 3945.4, 1072.1, 1276.3, 1853.1], - [1.45, 10000, 4006.7, 1071.3, 1231.4, 1845.3], - [1.50, 10000, 4066.4, 1070.1, 1189.0, 1837.3], - [1.55, 10000, 4124.4, 1068.5, 1148.9, 1829.1], - [1.60, 10000, 4180.9, 1066.5, 1111.0, 1820.8], - [1.65, 10000, 4235.9, 1064.3, 1075.0, 1812.4], - [1.70, 10000, 4289.5, 1061.8, 1040.9, 1803.9], - [1.75, 10000, 4341.8, 1059.0, 1008.5, 1795.3], - [1.80, 10000, 4392.8, 1056.0, 977.7, 1786.7], - [1.85, 10000, 4442.6, 1052.8, 948.4, 1778.1], - [1.90, 10000, 4491.3, 1049.4, 920.5, 1769.4], - [1.95, 10000, 4538.8, 1045.8, 893.9, 1760.8]], - columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd']) + from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, + ) + + # Data from the design + data = pd.DataFrame( + data=[ + [1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5], + [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4], + [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8], + [1.20, 10000, 3680.7, 1070.0, 1486.1, 1881.6], + [1.25, 10000, 3750.0, 1071.4, 1428.6, 1875.0], + [1.30, 10000, 3817.1, 1072.2, 1374.6, 1868.0], + [1.35, 10000, 3882.2, 1072.4, 1324.0, 1860.7], + [1.40, 10000, 3945.4, 1072.1, 1276.3, 1853.1], + [1.45, 10000, 4006.7, 1071.3, 1231.4, 1845.3], + [1.50, 10000, 4066.4, 1070.1, 1189.0, 1837.3], + [1.55, 10000, 4124.4, 1068.5, 1148.9, 1829.1], + [1.60, 10000, 4180.9, 1066.5, 1111.0, 1820.8], + [1.65, 10000, 4235.9, 1064.3, 1075.0, 1812.4], + [1.70, 10000, 4289.5, 1061.8, 1040.9, 1803.9], + [1.75, 10000, 4341.8, 1059.0, 1008.5, 1795.3], + [1.80, 10000, 4392.8, 1056.0, 977.7, 1786.7], + [1.85, 10000, 4442.6, 1052.8, 948.4, 1778.1], + [1.90, 10000, 4491.3, 1049.4, 920.5, 1769.4], + [1.95, 10000, 4538.8, 1045.8, 893.9, 1760.8], + ], + columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd'], + ) theta_names = ['k1', 'k2', 'k3'] - - def SSE(model, data): - expr = (float(data['ca']) - model.ca)**2 + \ - (float(data['cb']) - model.cb)**2 + \ - (float(data['cc']) - model.cc)**2 + \ - (float(data['cd']) - model.cd)**2 + + def SSE(model, data): + expr = ( + (float(data['ca']) - model.ca) ** 2 + + (float(data['cb']) - model.cb) ** 2 + + (float(data['cc']) - model.cc) ** 2 + + (float(data['cd']) - model.cd) ** 2 + ) return expr - + self.pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE) def test_scen_from_exps(self): @@ -77,30 +87,30 @@ def test_scen_from_exps(self): df = pd.read_csv("delme_exp_csv.csv") os.remove("delme_exp_csv.csv") # March '20: all reactor_design experiments have the same theta values! - k1val = df.loc[5].at["k1"] - self.assertAlmostEqual(k1val, 5.0/6.0, places=2) + k1val = df.loc[5].at["k1"] + self.assertAlmostEqual(k1val, 5.0 / 6.0, places=2) tval = experimentscens.ScenarioNumber(0).ThetaVals["k1"] - self.assertAlmostEqual(tval, 5.0/6.0, places=2) + self.assertAlmostEqual(tval, 5.0 / 6.0, places=2) - @unittest.skipIf(not uuid_available, "The uuid module is not available") def test_no_csv_if_empty(self): # low level test of scenario sets # verify that nothing is written, but no errors with empty set emptyset = sc.ScenarioSet("empty") - tfile = uuid.uuid4().hex+".csv" + tfile = uuid.uuid4().hex + ".csv" emptyset.write_csv(tfile) - self.assertFalse(os.path.exists(tfile), - "ScenarioSet wrote csv in spite of empty set") - - + self.assertFalse( + os.path.exists(tfile), "ScenarioSet wrote csv in spite of empty set" + ) -@unittest.skipIf(not parmest.parmest_available, "Cannot test parmest: required dependencies are missing") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") class TestScenarioSemibatch(unittest.TestCase): - def setUp(self): import pyomo.contrib.parmest.examples.semibatch.semibatch as sb import json @@ -108,30 +118,29 @@ def setUp(self): # Vars to estimate in parmest theta_names = ['k1', 'k2', 'E1', 'E2'] - self.fbase = os.path.join(testdir,"..","examples","semibatch") + self.fbase = os.path.join(testdir, "..", "examples", "semibatch") # Data, list of dictionaries - data = [] + data = [] for exp_num in range(10): - fname = "exp"+str(exp_num+1)+".out" + fname = "exp" + str(exp_num + 1) + ".out" fullname = os.path.join(self.fbase, fname) - with open(fullname,'r') as infile: + with open(fullname, 'r') as infile: d = json.load(infile) data.append(d) - # Note, the model already includes a 'SecondStageCost' expression + # Note, the model already includes a 'SecondStageCost' expression # for the sum of squared error that will be used in parameter estimation self.pest = parmest.Estimator(sb.generate_model, data, theta_names) - def test_semibatch_bootstrap(self): - scenmaker = sc.ScenarioCreator(self.pest, "ipopt") bootscens = sc.ScenarioSet("Bootstrap") numtomake = 2 - scenmaker.ScenariosFromBoostrap(bootscens, numtomake, seed=1134) + scenmaker.ScenariosFromBootstrap(bootscens, numtomake, seed=1134) tval = bootscens.ScenarioNumber(0).ThetaVals["k1"] self.assertAlmostEqual(tval, 20.64, places=1) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/parmest/tests/test_solver.py b/pyomo/contrib/parmest/tests/test_solver.py index 918c2ef053f..eb655023b9b 100644 --- a/pyomo/contrib/parmest/tests/test_solver.py +++ b/pyomo/contrib/parmest/tests/test_solver.py @@ -10,13 +10,18 @@ # ___________________________________________________________________________ from pyomo.common.dependencies import ( - numpy as np, numpy_available, - pandas as pd, pandas_available, - scipy, scipy_available, - matplotlib, matplotlib_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, + scipy, + scipy_available, + matplotlib, + matplotlib_available, ) import platform + is_osx = platform.mac_ver()[0] != '' import pyomo.common.unittest as unittest @@ -27,34 +32,40 @@ import pyomo.environ as pyo from pyomo.opt import SolverFactory + ipopt_available = SolverFactory('ipopt').available() from pyomo.common.fileutils import find_library + pynumero_ASL_available = False if find_library('pynumero_ASL') is None else True testdir = os.path.dirname(os.path.abspath(__file__)) -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) @unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available") class TestSolver(unittest.TestCase): - def setUp(self): pass def test_ipopt_solve_with_stats(self): - - from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import rooney_biegler_model + from pyomo.contrib.parmest.examples.rooney_biegler.rooney_biegler import ( + rooney_biegler_model, + ) from pyomo.contrib.parmest.utils import ipopt_solve_with_stats - - data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0], - [4,16.0],[5,15.6],[7,19.8]], columns=['hour', 'y']) - + + data = pd.DataFrame( + data=[[1, 8.3], [2, 10.3], [3, 19.0], [4, 16.0], [5, 15.6], [7, 19.8]], + columns=['hour', 'y'], + ) + model = rooney_biegler_model(data) solver = pyo.SolverFactory('ipopt') solver.solve(model) - + status_obj, solved, iters, time, regu = ipopt_solve_with_stats(model, solver) self.assertEqual(solved, True) diff --git a/pyomo/contrib/parmest/tests/test_utils.py b/pyomo/contrib/parmest/tests/test_utils.py index 45ec2875062..bd0706ac38d 100644 --- a/pyomo/contrib/parmest/tests/test_utils.py +++ b/pyomo/contrib/parmest/tests/test_utils.py @@ -9,55 +9,60 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.dependencies import ( - pandas as pd, pandas_available, -) +from pyomo.common.dependencies import pandas as pd, pandas_available import pyomo.environ as pyo import pyomo.common.unittest as unittest import pyomo.contrib.parmest.parmest as parmest from pyomo.opt import SolverFactory -ipopt_available = SolverFactory('ipopt').available() +ipopt_available = SolverFactory('ipopt').available() -@unittest.skipIf(not parmest.parmest_available, - "Cannot test parmest: required dependencies are missing") -@unittest.skipIf(not ipopt_available, - "The 'ipopt' solver is not available") +@unittest.skipIf( + not parmest.parmest_available, + "Cannot test parmest: required dependencies are missing", +) +@unittest.skipIf(not ipopt_available, "The 'ipopt' solver is not available") class TestUtils(unittest.TestCase): @classmethod def setUpClass(self): pass - + @classmethod def tearDownClass(self): pass @unittest.pytest.mark.expensive def test_convert_param_to_var(self): - - from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model - - data = pd.DataFrame(data=[[1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5], - [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4], - [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8]], - columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd']) + from pyomo.contrib.parmest.examples.reactor_design.reactor_design import ( + reactor_design_model, + ) + + data = pd.DataFrame( + data=[ + [1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5], + [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4], + [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8], + ], + columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd'], + ) theta_names = ['k1', 'k2', 'k3'] - + instance = reactor_design_model(data.loc[0]) solver = pyo.SolverFactory('ipopt') solver.solve(instance) - - instance_vars = parmest.utils.convert_params_to_vars(instance, theta_names, fix_vars=True) + + instance_vars = parmest.utils.convert_params_to_vars( + instance, theta_names, fix_vars=True + ) solver.solve(instance_vars) - + assert instance.k1() == instance_vars.k1() assert instance.k2() == instance_vars.k2() assert instance.k3() == instance_vars.k3() - if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/parmest/utils/__init__.py b/pyomo/contrib/parmest/utils/__init__.py index 7b8245f7cad..1615ab206f7 100644 --- a/pyomo/contrib/parmest/utils/__init__.py +++ b/pyomo/contrib/parmest/utils/__init__.py @@ -9,17 +9,17 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.contrib.parmest.utils.create_ef import (get_objs, - create_EF, - find_active_objective, - ef_nonants) +from pyomo.contrib.parmest.utils.create_ef import ( + get_objs, + create_EF, + find_active_objective, + ef_nonants, +) from pyomo.contrib.parmest.utils.ipopt_solver_wrapper import ipopt_solve_with_stats from pyomo.contrib.parmest.utils.model_utils import convert_params_to_vars -from pyomo.contrib.parmest.utils.mpi_utils import (MPIInterface, - ParallelTaskManager) +from pyomo.contrib.parmest.utils.mpi_utils import MPIInterface, ParallelTaskManager -from pyomo.contrib.parmest.utils.scenario_tree import (build_vardatalist, - ScenarioNode) +from pyomo.contrib.parmest.utils.scenario_tree import build_vardatalist, ScenarioNode diff --git a/pyomo/contrib/parmest/utils/create_ef.py b/pyomo/contrib/parmest/utils/create_ef.py index ff6949649df..2e6c8541fa1 100644 --- a/pyomo/contrib/parmest/utils/create_ef.py +++ b/pyomo/contrib/parmest/utils/create_ef.py @@ -11,77 +11,89 @@ import pyomo.environ as pyo from pyomo.core import Objective + def get_objs(scenario_instance): - """ return the list of objective functions for scenario_instance""" - scenario_objs = scenario_instance.component_data_objects(pyo.Objective, - active=True, descend_into=True) + """return the list of objective functions for scenario_instance""" + scenario_objs = scenario_instance.component_data_objects( + pyo.Objective, active=True, descend_into=True + ) scenario_objs = list(scenario_objs) - if (len(scenario_objs) == 0): - raise RuntimeError("Scenario " + sname + " has no active " - "objective functions.") - if (len(scenario_objs) > 1): - print("WARNING: Scenario", sname, "has multiple active " - "objectives. Selecting the first objective for " - "inclusion in the extensive form.") + if len(scenario_objs) == 0: + raise RuntimeError("Scenario " + sname + " has no active objective functions.") + if len(scenario_objs) > 1: + print( + "WARNING: Scenario", + sname, + "has multiple active " + "objectives. Selecting the first objective for " + "inclusion in the extensive form.", + ) return scenario_objs def _models_have_same_sense(models): - """ Check if every model in the provided dict has the same objective sense. - - Input: - models (dict) -- Keys are scenario names, values are Pyomo - ConcreteModel objects. - Returns: - is_minimizing (bool) -- True if and only if minimizing. None if the - check fails. - check (bool) -- True only if all the models have the same sense (or - no models were provided) - Raises: - ValueError -- If any of the models has either none or multiple - active objectives. + """Check if every model in the provided dict has the same objective sense. + + Input: + models (dict) -- Keys are scenario names, values are Pyomo + ConcreteModel objects. + Returns: + is_minimizing (bool) -- True if and only if minimizing. None if the + check fails. + check (bool) -- True only if all the models have the same sense (or + no models were provided) + Raises: + ValueError -- If any of the models has either none or multiple + active objectives. """ - if (len(models) == 0): + if len(models) == 0: return True, True - senses = [find_active_objective(scenario).is_minimizing() - for scenario in models.values()] + senses = [ + find_active_objective(scenario).is_minimizing() for scenario in models.values() + ] sense = senses[0] check = all(val == sense for val in senses) - if (check): + if check: return (sense == pyo.minimize), check return None, check -def create_EF(scenario_names, scenario_creator, scenario_creator_kwargs=None, - EF_name=None, suppress_warnings=False, - nonant_for_fixed_vars=True): - """ Create a ConcreteModel of the extensive form. - - Args: - scenario_names (list of str): - Names for each scenario to be passed to the scenario_creator - function. - scenario_creator (callable): - Function which takes a scenario name as its first argument and - returns a concrete model corresponding to that scenario. - scenario_creator_kwargs (dict, optional): - Options to pass to `scenario_creator`. - EF_name (str, optional): - Name of the ConcreteModel of the EF. - suppress_warnings (boolean, optional): - If true, do not display warnings. Default False. - nonant_for_fixed_vars (bool--optional): If True, enforces - non-anticipativity constraints for all variables, including - those which have been fixed. Default is True. - - Returns: - EF_instance (ConcreteModel): - ConcreteModel of extensive form with explicit - non-anticipativity constraints. - - Note: - If any of the scenarios produced by scenario_creator do not have a - ._mpisppy_probability attribute, this function displays a warning, and assumes - that all scenarios are equally likely. + +def create_EF( + scenario_names, + scenario_creator, + scenario_creator_kwargs=None, + EF_name=None, + suppress_warnings=False, + nonant_for_fixed_vars=True, +): + """Create a ConcreteModel of the extensive form. + + Args: + scenario_names (list of str): + Names for each scenario to be passed to the scenario_creator + function. + scenario_creator (callable): + Function which takes a scenario name as its first argument and + returns a concrete model corresponding to that scenario. + scenario_creator_kwargs (dict, optional): + Options to pass to `scenario_creator`. + EF_name (str, optional): + Name of the ConcreteModel of the EF. + suppress_warnings (boolean, optional): + If true, do not display warnings. Default False. + nonant_for_fixed_vars (bool--optional): If True, enforces + non-anticipativity constraints for all variables, including + those which have been fixed. Default is True. + + Returns: + EF_instance (ConcreteModel): + ConcreteModel of extensive form with explicit + non-anticipativity constraints. + + Note: + If any of the scenarios produced by scenario_creator do not have a + ._mpisppy_probability attribute, this function displays a warning, and assumes + that all scenarios are equally likely. """ if scenario_creator_kwargs is None: scenario_creator_kwargs = dict() @@ -90,9 +102,9 @@ def create_EF(scenario_names, scenario_creator, scenario_creator_kwargs=None, for name in scenario_names } - if (len(scen_dict) == 0): + if len(scen_dict) == 0: raise RuntimeError("create_EF() received empty scenario list") - elif (len(scen_dict) == 1): + elif len(scen_dict) == 1: scenario_instance = list(scen_dict.values())[0] if not suppress_warnings: print("WARNING: passed single scenario to create_EF()") @@ -100,76 +112,83 @@ def create_EF(scenario_names, scenario_creator, scenario_creator_kwargs=None, scenario_instance.ref_vars = dict() for node in scenario_instance._mpisppy_node_list: ndn = node.name - nlens = {node.name: len(node.nonant_vardata_list) - for node in scenario_instance._mpisppy_node_list} + nlens = { + node.name: len(node.nonant_vardata_list) + for node in scenario_instance._mpisppy_node_list + } for i in range(nlens[ndn]): v = node.nonant_vardata_list[i] if (ndn, i) not in scenario_instance.ref_vars: scenario_instance.ref_vars[(ndn, i)] = v - # patch in EF_Obj - scenario_objs = get_objs(scenario_instance) + # patch in EF_Obj + scenario_objs = get_objs(scenario_instance) for obj_func in scenario_objs: obj_func.deactivate() - obj = scenario_objs[0] + obj = scenario_objs[0] sense = pyo.minimize if obj.is_minimizing() else pyo.maximize scenario_instance.EF_Obj = pyo.Objective(expr=obj.expr, sense=sense) return scenario_instance #### special return for single scenario # Check if every scenario has a specified probability - probs_specified = \ - all([hasattr(scen, '_mpisppy_probability') for scen in scen_dict.values()]) + probs_specified = all( + [hasattr(scen, '_mpisppy_probability') for scen in scen_dict.values()] + ) if not probs_specified: for scen in scen_dict.values(): scen._mpisppy_probability = 1 / len(scen_dict) if not suppress_warnings: - print('WARNING: At least one scenario is missing _mpisppy_probability attribute.', - 'Assuming equally-likely scenarios...') - - EF_instance = _create_EF_from_scen_dict(scen_dict, - EF_name=EF_name, - nonant_for_fixed_vars=True) + print( + 'WARNING: At least one scenario is missing _mpisppy_probability attribute.', + 'Assuming equally-likely scenarios...', + ) + + EF_instance = _create_EF_from_scen_dict( + scen_dict, EF_name=EF_name, nonant_for_fixed_vars=True + ) return EF_instance -def _create_EF_from_scen_dict(scen_dict, EF_name=None, - nonant_for_fixed_vars=True): - """ Create a ConcreteModel of the extensive form from a scenario - dictionary. - - Args: - scen_dict (dict): Dictionary whose keys are scenario names and - values are ConcreteModel objects corresponding to each - scenario. - EF_name (str--optional): Name of the resulting EF model. - nonant_for_fixed_vars (bool--optional): If True, enforces - non-anticipativity constraints for all variables, including - those which have been fixed. Default is True. - - Returns: - EF_instance (ConcreteModel): ConcreteModel of extensive form with - explicity non-anticipativity constraints. - - Notes: - The non-anticipativity constraints are enforced by creating - "reference variables" at each node in the scenario tree (excluding - leaves) and enforcing that all the variables for each scenario at - that node are equal to the reference variables. - - This function is called directly when creating bundles for PH. - - Does NOT assume that each scenario is equally likely. Raises an - AttributeError if a scenario object is encountered which does not - have a ._mpisppy_probability attribute. - - Added the flag nonant_for_fixed_vars because original code only - enforced non-anticipativity for non-fixed vars, which is not always - desirable in the context of bundling. This allows for more - fine-grained control. + +def _create_EF_from_scen_dict(scen_dict, EF_name=None, nonant_for_fixed_vars=True): + """Create a ConcreteModel of the extensive form from a scenario + dictionary. + + Args: + scen_dict (dict): Dictionary whose keys are scenario names and + values are ConcreteModel objects corresponding to each + scenario. + EF_name (str--optional): Name of the resulting EF model. + nonant_for_fixed_vars (bool--optional): If True, enforces + non-anticipativity constraints for all variables, including + those which have been fixed. Default is True. + + Returns: + EF_instance (ConcreteModel): ConcreteModel of extensive form with + explicitly non-anticipativity constraints. + + Notes: + The non-anticipativity constraints are enforced by creating + "reference variables" at each node in the scenario tree (excluding + leaves) and enforcing that all the variables for each scenario at + that node are equal to the reference variables. + + This function is called directly when creating bundles for PH. + + Does NOT assume that each scenario is equally likely. Raises an + AttributeError if a scenario object is encountered which does not + have a ._mpisppy_probability attribute. + + Added the flag nonant_for_fixed_vars because original code only + enforced non-anticipativity for non-fixed vars, which is not always + desirable in the context of bundling. This allows for more + fine-grained control. """ is_min, clear = _models_have_same_sense(scen_dict) - if (not clear): - raise RuntimeError('Cannot build the extensive form out of models ' - 'with different objective senses') + if not clear: + raise RuntimeError( + 'Cannot build the extensive form out of models ' + 'with different objective senses' + ) sense = pyo.minimize if is_min else pyo.maximize EF_instance = pyo.ConcreteModel(name=EF_name) EF_instance.EF_Obj = pyo.Objective(expr=0.0, sense=sense) @@ -177,61 +196,72 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # we don't strict need these here, but it allows for eliding # of single scenarios and bundles when convenient EF_instance._mpisppy_data = pyo.Block(name="For non-Pyomo mpi-sppy data") - EF_instance._mpisppy_model = pyo.Block(name="For mpi-sppy Pyomo additions to the scenario model") + EF_instance._mpisppy_model = pyo.Block( + name="For mpi-sppy Pyomo additions to the scenario model" + ) EF_instance._mpisppy_data.scenario_feasible = None EF_instance._ef_scenario_names = [] EF_instance._mpisppy_probability = 0 - for (sname, scenario_instance) in scen_dict.items(): + for sname, scenario_instance in scen_dict.items(): EF_instance.add_component(sname, scenario_instance) EF_instance._ef_scenario_names.append(sname) # Now deactivate the scenario instance Objective scenario_objs = get_objs(scenario_instance) for obj_func in scenario_objs: obj_func.deactivate() - obj_func = scenario_objs[0] # Select the first objective + obj_func = scenario_objs[0] # Select the first objective try: - EF_instance.EF_Obj.expr += scenario_instance._mpisppy_probability * obj_func.expr - EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability + EF_instance.EF_Obj.expr += ( + scenario_instance._mpisppy_probability * obj_func.expr + ) + EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability except AttributeError as e: - raise AttributeError("Scenario " + sname + " has no specified " - "probability. Specify a value for the attribute " - " _mpisppy_probability and try again.") from e + raise AttributeError( + "Scenario " + sname + " has no specified " + "probability. Specify a value for the attribute " + " _mpisppy_probability and try again." + ) from e # Normalization does nothing when solving the full EF, but is required for - # appropraite scaling of EFs used as bundles. + # appropriate scaling of EFs used as bundles. EF_instance.EF_Obj.expr /= EF_instance._mpisppy_probability # For each node in the scenario tree, we need to collect the # nonanticipative vars and create the constraints for them, # which we do using a reference variable. - ref_vars = dict() # keys are _nonant_indices (i.e. a node name and a - # variable number) + ref_vars = dict() # keys are _nonant_indices (i.e. a node name and a + # variable number) ref_suppl_vars = dict() - EF_instance._nlens = dict() + EF_instance._nlens = dict() nonant_constr = pyo.Constraint(pyo.Any, name='_C_EF_') EF_instance.add_component('_C_EF_', nonant_constr) - nonant_constr_suppl = pyo.Constraint(pyo.Any, name='_C_EF_suppl') EF_instance.add_component('_C_EF_suppl', nonant_constr_suppl) - for (sname, s) in scen_dict.items(): - nlens = {node.name: len(node.nonant_vardata_list) - for node in s._mpisppy_node_list} - - for (node_name, num_nonant_vars) in nlens.items(): # copy nlens to EF - if (node_name in EF_instance._nlens.keys() and - num_nonant_vars != EF_instance._nlens[node_name]): - raise RuntimeError("Number of non-anticipative variables is " - "not consistent at node " + node_name + " in scenario " + - sname) + for sname, s in scen_dict.items(): + nlens = { + node.name: len(node.nonant_vardata_list) for node in s._mpisppy_node_list + } + + for node_name, num_nonant_vars in nlens.items(): # copy nlens to EF + if ( + node_name in EF_instance._nlens.keys() + and num_nonant_vars != EF_instance._nlens[node_name] + ): + raise RuntimeError( + "Number of non-anticipative variables is " + "not consistent at node " + node_name + " in scenario " + sname + ) EF_instance._nlens[node_name] = num_nonant_vars - nlens_ef_suppl = {node.name: len(node.nonant_ef_suppl_vardata_list) - for node in s._mpisppy_node_list} + nlens_ef_suppl = { + node.name: len(node.nonant_ef_suppl_vardata_list) + for node in s._mpisppy_node_list + } for node in s._mpisppy_node_list: ndn = node.name @@ -244,10 +274,12 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # Add a non-anticipativity constraint, except in the case when # the variable is fixed and nonant_for_fixed_vars=False. elif (nonant_for_fixed_vars) or (not v.is_fixed()): - expr = LinearExpression(linear_coefs=[1,-1], - linear_vars=[v,ref_vars[(ndn,i)]], - constant=0.) - nonant_constr[(ndn,i,sname)] = (expr, 0.0) + expr = LinearExpression( + linear_coefs=[1, -1], + linear_vars=[v, ref_vars[(ndn, i)]], + constant=0.0, + ) + nonant_constr[(ndn, i, sname)] = (expr, 0.0) for i in range(nlens_ef_suppl[ndn]): v = node.nonant_ef_suppl_vardata_list[i] @@ -258,29 +290,35 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # Add a non-anticipativity constraint, expect in the case when # the variable is fixed and nonant_for_fixed_vars=False. elif (nonant_for_fixed_vars) or (not v.is_fixed()): - expr = LinearExpression(linear_coefs=[1,-1], - linear_vars=[v,ref_suppl_vars[(ndn,i)]], - constant=0.) - nonant_constr_suppl[(ndn,i,sname)] = (expr, 0.0) + expr = LinearExpression( + linear_coefs=[1, -1], + linear_vars=[v, ref_suppl_vars[(ndn, i)]], + constant=0.0, + ) + nonant_constr_suppl[(ndn, i, sname)] = (expr, 0.0) EF_instance.ref_vars = ref_vars EF_instance.ref_suppl_vars = ref_suppl_vars - + return EF_instance def find_active_objective(pyomomodel): # return the only active objective or raise and error - obj = list(pyomomodel.component_data_objects( - Objective, active=True, descend_into=True)) + obj = list( + pyomomodel.component_data_objects(Objective, active=True, descend_into=True) + ) if len(obj) != 1: - raise RuntimeError("Could not identify exactly one active " - "Objective for model '%s' (found %d objectives)" - % (pyomomodel.name, len(obj))) + raise RuntimeError( + "Could not identify exactly one active " + "Objective for model '%s' (found %d objectives)" + % (pyomomodel.name, len(obj)) + ) return obj[0] + def ef_nonants(ef): - """ An iterator to give representative Vars subject to non-anticipitivity + """An iterator to give representative Vars subject to non-anticipitivity Args: ef (ConcreteModel): the full extensive form model diff --git a/pyomo/contrib/parmest/utils/ipopt_solver_wrapper.py b/pyomo/contrib/parmest/utils/ipopt_solver_wrapper.py index 889a4c6dafb..7d8289cd181 100644 --- a/pyomo/contrib/parmest/utils/ipopt_solver_wrapper.py +++ b/pyomo/contrib/parmest/utils/ipopt_solver_wrapper.py @@ -12,6 +12,7 @@ from pyomo.common.tempfiles import TempfileManager from pyomo.opt import TerminationCondition + def ipopt_solve_with_stats(model, solver, max_iter=500, max_cpu_time=120): """ Run the solver (must be ipopt) and return the convergence statistics @@ -38,9 +39,7 @@ def ipopt_solve_with_stats(model, solver, max_iter=500, max_cpu_time=120): TempfileManager.push() tempfile = TempfileManager.create_tempfile(suffix='ipopt_out', text=True) - opts = {'output_file': tempfile, - 'max_iter': max_iter, - 'max_cpu_time': max_cpu_time} + opts = {'output_file': tempfile, 'max_iter': max_iter, 'max_cpu_time': max_cpu_time} status_obj = solver.solve(model, options=opts, tee=True) solved = True @@ -59,10 +58,14 @@ def ipopt_solve_with_stats(model, solver, max_iter=500, max_cpu_time=120): iters = int(tokens[3]) tokens_m_2 = line_m_2.split() regu = str(tokens_m_2[6]) - elif line.startswith('Total CPU secs in IPOPT (w/o function evaluations) ='): + elif line.startswith( + 'Total CPU secs in IPOPT (w/o function evaluations) =' + ): tokens = line.split() time += float(tokens[9]) - elif line.startswith('Total CPU secs in NLP function evaluations ='): + elif line.startswith( + 'Total CPU secs in NLP function evaluations =' + ): tokens = line.split() time += float(tokens[8]) line_m_2 = line_m_1 diff --git a/pyomo/contrib/parmest/utils/model_utils.py b/pyomo/contrib/parmest/utils/model_utils.py index d3662b22a82..c3c71dc2d6c 100644 --- a/pyomo/contrib/parmest/utils/model_utils.py +++ b/pyomo/contrib/parmest/utils/model_utils.py @@ -12,8 +12,7 @@ import logging import pyomo.environ as pyo -from pyomo.core.expr.current import (replace_expressions, - identify_mutable_parameters) +from pyomo.core.expr import replace_expressions, identify_mutable_parameters from pyomo.core.base.var import IndexedVar from pyomo.core.base.param import IndexedParam @@ -21,10 +20,11 @@ logger = logging.getLogger(__name__) + def convert_params_to_vars(model, param_names=None, fix_vars=False): """ Convert select Params to Vars - + Parameters ---------- model : Pyomo concrete model @@ -33,24 +33,24 @@ def convert_params_to_vars(model, param_names=None, fix_vars=False): List of parameter names to convert, if None then all Params are converted fix_vars : bool Fix the new variables, default is False - + Returns ------- model : Pyomo concrete model Model with select Params converted to Vars """ - + model = model.clone() - + if param_names is None: param_names = [param.name for param in model.component_data_objects(pyo.Param)] - + indexed_param_names = [] - + # Convert Params to Vars, unfix Vars, and create a substitution map substitution_map = {} for i, param_name in enumerate(param_names): - # Leverage the parser in ComponentUID to locate the component. + # Leverage the parser in ComponentUID to locate the component. theta_cuid = ComponentUID(param_name) theta_object = theta_cuid.find_component_on(model) @@ -58,14 +58,14 @@ def convert_params_to_vars(model, param_names=None, fix_vars=False): if theta_object.is_parameter_type(): # Delete Param, add Var vals = theta_object.extract_values() - model.del_component(theta_object) - model.add_component(theta_object.name, pyo.Var(initialize = vals[None])) - + model.del_component(theta_object) + model.add_component(theta_object.name, pyo.Var(initialize=vals[None])) + # Update substitution map theta_var_cuid = ComponentUID(theta_object.name) theta_var_object = theta_var_cuid.find_component_on(model) substitution_map[id(theta_object)] = theta_var_object - + # Indexed Param elif isinstance(theta_object, IndexedParam): # Delete Param, add Var @@ -77,34 +77,39 @@ def convert_params_to_vars(model, param_names=None, fix_vars=False): theta_cuid = ComponentUID(indexed_param_name) param_theta_objects.append(theta_cuid.find_component_on(model)) indexed_param_names.append(indexed_param_name) - - model.del_component(theta_object) - + + model.del_component(theta_object) + index_name = theta_object.index_set().name index_cuid = ComponentUID(index_name) index_object = index_cuid.find_component_on(model) - model.add_component(theta_object.name, pyo.Var(index_object, - initialize = vals)) - + model.add_component( + theta_object.name, pyo.Var(index_object, initialize=vals) + ) + # Update substitution map (map each indexed param to indexed var) theta_var_cuid = ComponentUID(theta_object.name) theta_var_object = theta_var_cuid.find_component_on(model) var_theta_objects = [] for theta_obj in theta_var_object: - theta_cuid = ComponentUID(theta_var_object.name + '[' + str(theta_obj) + ']') + theta_cuid = ComponentUID( + theta_var_object.name + '[' + str(theta_obj) + ']' + ) var_theta_objects.append(theta_cuid.find_component_on(model)) - - for param_theta_obj, var_theta_obj in zip(param_theta_objects, var_theta_objects): + + for param_theta_obj, var_theta_obj in zip( + param_theta_objects, var_theta_objects + ): substitution_map[id(param_theta_obj)] = var_theta_obj - + # Var or Indexed Var elif isinstance(theta_object, IndexedVar) or theta_object.is_variable_type(): theta_var_object = theta_object - + else: logger.warning("%s is not a Param or Var on the model", (param_name)) return model - + if fix_vars: theta_var_object.fix() else: @@ -113,50 +118,73 @@ def convert_params_to_vars(model, param_names=None, fix_vars=False): # If no substitutions are needed, return the model if len(substitution_map) == 0: return model - + # Update the list of param_names if the parameters were indexed if len(indexed_param_names) > 0: param_names = indexed_param_names - + # Convert Params to Vars in Expressions for expr in model.component_data_objects(pyo.Expression): - if expr.active and any(v.name in param_names for v in identify_mutable_parameters(expr)): + if expr.active and any( + v.name in param_names for v in identify_mutable_parameters(expr) + ): new_expr = replace_expressions(expr=expr, substitution_map=substitution_map) - model.del_component(expr) + model.del_component(expr) model.add_component(expr.name, pyo.Expression(rule=new_expr)) - + # Convert Params to Vars in Constraint expressions num_constraints = len(list(model.component_objects(pyo.Constraint, active=True))) if num_constraints > 0: model.constraints = pyo.ConstraintList() for c in model.component_data_objects(pyo.Constraint): - if c.active and any(v.name in param_names for v in identify_mutable_parameters(c.expr)): + if c.active and any( + v.name in param_names for v in identify_mutable_parameters(c.expr) + ): if c.equality: model.constraints.add( - replace_expressions(expr=c.lower, substitution_map=substitution_map) == - replace_expressions(expr=c.body, substitution_map=substitution_map)) + replace_expressions( + expr=c.lower, substitution_map=substitution_map + ) + == replace_expressions( + expr=c.body, substitution_map=substitution_map + ) + ) elif c.lower is not None: model.constraints.add( - replace_expressions(expr=c.lower, substitution_map=substitution_map) <= - replace_expressions(expr=c.body, substitution_map=substitution_map)) + replace_expressions( + expr=c.lower, substitution_map=substitution_map + ) + <= replace_expressions( + expr=c.body, substitution_map=substitution_map + ) + ) elif c.upper is not None: model.constraints.add( - replace_expressions(expr=c.upper, substitution_map=substitution_map) >= - replace_expressions(expr=c.body, substitution_map=substitution_map)) + replace_expressions( + expr=c.upper, substitution_map=substitution_map + ) + >= replace_expressions( + expr=c.body, substitution_map=substitution_map + ) + ) else: - raise ValueError("Unable to parse constraint to convert params to vars.") - c.deactivate() - + raise ValueError( + "Unable to parse constraint to convert params to vars." + ) + c.deactivate() + # Convert Params to Vars in Objective expressions for obj in model.component_data_objects(pyo.Objective): - if obj.active and any(v.name in param_names for v in identify_mutable_parameters(obj)): + if obj.active and any( + v.name in param_names for v in identify_mutable_parameters(obj) + ): expr = replace_expressions(expr=obj.expr, substitution_map=substitution_map) - model.del_component(obj) + model.del_component(obj) model.add_component(obj.name, pyo.Objective(rule=expr, sense=obj.sense)) - - #print('--- Updated Model ---') - #model.pprint() - #solver = pyo.SolverFactory('ipopt') - #solver.solve(model) - + + # print('--- Updated Model ---') + # model.pprint() + # solver = pyo.SolverFactory('ipopt') + # solver.solve(model) + return model diff --git a/pyomo/contrib/parmest/utils/mpi_utils.py b/pyomo/contrib/parmest/utils/mpi_utils.py index ba912c01652..35c4bf137bc 100644 --- a/pyomo/contrib/parmest/utils/mpi_utils.py +++ b/pyomo/contrib/parmest/utils/mpi_utils.py @@ -11,6 +11,7 @@ from collections import OrderedDict import importlib + """ This module is a collection of classes that provide a friendlier interface to MPI (through mpi4py). They help @@ -25,6 +26,7 @@ class MPIInterface: __have_mpi__ = None + def __init__(self): if MPIInterface.__have_mpi__ is None: # This is trying to import mpy4py.MPI, and setting a flag to indicate @@ -58,15 +60,16 @@ def have_mpi(self): @property def comm(self): return self._comm - + @property def rank(self): return self._rank - + @property def size(self): return self._size + class ParallelTaskManager: def __init__(self, n_total_tasks, mpi_interface=None): if mpi_interface is None: @@ -110,23 +113,25 @@ def is_root(self): def global_to_local_data(self, global_data): if type(global_data) is list: local_data = list() - assert (len(global_data) == self._n_total_tasks) + assert len(global_data) == self._n_total_tasks for i in self._local_map: local_data.append(global_data[i]) return local_data elif type(global_data) is OrderedDict: local_data = OrderedDict() - assert (len(global_data) == self._n_total_tasks) + assert len(global_data) == self._n_total_tasks idx = 0 for k, v in global_data.items(): if idx in self._local_map: local_data[k] = v idx += idx return local_data - raise ValueError('Unknown type passed to global_to_local_data. Expected list or OrderedDict.') + raise ValueError( + 'Unknown type passed to global_to_local_data. Expected list or OrderedDict.' + ) def allgather_global_data(self, local_data): - assert (len(local_data) == len(self._local_map)) + assert len(local_data) == len(self._local_map) if not self._mpi_interface.have_mpi: return list(local_data) @@ -136,7 +141,7 @@ def allgather_global_data(self, local_data): return self._stack_global_data(global_data_list_of_lists) def gather_global_data(self, local_data): - assert (len(local_data) == len(self._local_map)) + assert len(local_data) == len(self._local_map) if not self._mpi_interface.have_mpi: return list(local_data) @@ -149,7 +154,6 @@ def gather_global_data(self, local_data): assert self.is_root() == False return None - def _stack_global_data(self, global_data_list_of_lists): # stack the list of lists into one global data list # ToDo: test that this is equivalent to [d for sublist in global_data_list_of_lists for d in sublist] diff --git a/pyomo/contrib/parmest/utils/scenario_tree.py b/pyomo/contrib/parmest/utils/scenario_tree.py index b324115f991..d46a8f2c5f0 100644 --- a/pyomo/contrib/parmest/utils/scenario_tree.py +++ b/pyomo/contrib/parmest/utils/scenario_tree.py @@ -1,5 +1,5 @@ # This software is distributed under the 3-clause BSD License. -# Copied with minor modifications from mpisppy/scenario_tree.py +# Copied with minor modifications from mpisppy/scenario_tree.py # from the mpi-sppy library (https://github.com/Pyomo/mpi-sppy). # ALL INDEXES ARE ZERO-BASED @@ -11,6 +11,7 @@ import pyomo.environ as pyo + def build_vardatalist(self, model, varlist=None): """ Convert a list of pyomo variables to a list of ScalarVar and _GeneralVarData. If varlist is none, builds a @@ -26,7 +27,9 @@ def build_vardatalist(self, model, varlist=None): # if the varlist is None, then assume we want all the active variables if varlist is None: raise RuntimeError("varlist is None in scenario_tree.build_vardatalist") - vardatalist = [v for v in model.component_data_objects(pyo.Var, active=True, sort=True)] + vardatalist = [ + v for v in model.component_data_objects(pyo.Var, active=True, sort=True) + ] elif isinstance(varlist, pyo.Var): # user provided a variable, not a list of variables. Let's work with it anyway varlist = [varlist] @@ -41,19 +44,20 @@ def build_vardatalist(self, model, varlist=None): else: vardatalist.append(v) return vardatalist - + + class ScenarioNode: """Store a node in the scenario tree. Note: - This can only be created programatically from a scenario + This can only be created programmatically from a scenario creation function. (maybe that function reads data) Args: name (str): name of the node; one node must be named "ROOT" cond_prob (float): conditional probability stage (int): stage number (root is 1) - cost_expression (pyo Expression or Var): stage cost + cost_expression (pyo Expression or Var): stage cost scen_name_list (str): OPTIONAL scenario names at the node just for debugging and reporting; not really used as of Dec 31 nonant_list (list of pyo Var, Vardata or slices): the Vars that @@ -62,15 +66,25 @@ class ScenarioNode: nonant_ef_suppl_list (list of pyo Var, Vardata or slices): vars for which nonanticipativity constraints tighten the EF (important for bundling) - parent_name (str): name of the parent node + parent_name (str): name of the parent node Lists: nonant_vardata(list of vardata objects): vardatas to blend x_bar_list(list of floats): bound by index to nonant_vardata """ - def __init__(self, name, cond_prob, stage, cost_expression, scen_name_list, - nonant_list, scen_model, nonant_ef_suppl_list=None, - parent_name=None): + + def __init__( + self, + name, + cond_prob, + stage, + cost_expression, + scen_name_list, + nonant_list, + scen_model, + nonant_ef_suppl_list=None, + parent_name=None, + ): """Initialize a ScenarioNode object. Assume most error detection is done elsewhere. """ @@ -80,23 +94,22 @@ def __init__(self, name, cond_prob, stage, cost_expression, scen_name_list, self.cost_expression = cost_expression self.nonant_list = nonant_list self.nonant_ef_suppl_list = nonant_ef_suppl_list - self.parent_name = parent_name # None for ROOT + self.parent_name = parent_name # None for ROOT # now make the vardata lists if self.nonant_list is not None: - self.nonant_vardata_list = build_vardatalist(self, - scen_model, - self.nonant_list) + self.nonant_vardata_list = build_vardatalist( + self, scen_model, self.nonant_list + ) else: - print("nonant_list is empty for node {},".format(node) +\ - "No nonanticipativity will be enforced at this node by default") + print( + "nonant_list is empty for node {},".format(node) + + "No nonanticipativity will be enforced at this node by default" + ) self.nonant_vardata_list = [] if self.nonant_ef_suppl_list is not None: - self.nonant_ef_suppl_vardata_list = build_vardatalist(self, - scen_model, - self.nonant_ef_suppl_list) + self.nonant_ef_suppl_vardata_list = build_vardatalist( + self, scen_model, self.nonant_ef_suppl_list + ) else: self.nonant_ef_suppl_vardata_list = [] - - - diff --git a/pyomo/contrib/piecewise/__init__.py b/pyomo/contrib/piecewise/__init__.py new file mode 100644 index 00000000000..33cfc6f1606 --- /dev/null +++ b/pyomo/contrib/piecewise/__init__.py @@ -0,0 +1,24 @@ +from pyomo.contrib.piecewise.piecewise_linear_expression import ( + PiecewiseLinearExpression, +) +from pyomo.contrib.piecewise.piecewise_linear_function import PiecewiseLinearFunction + +## register transformations +from pyomo.contrib.piecewise.transform.inner_representation_gdp import ( + InnerRepresentationGDPTransformation, +) +from pyomo.contrib.piecewise.transform.disaggregated_convex_combination import ( + DisaggregatedConvexCombinationTransformation, +) +from pyomo.contrib.piecewise.transform.outer_representation_gdp import ( + OuterRepresentationGDPTransformation, +) +from pyomo.contrib.piecewise.transform.multiple_choice import ( + MultipleChoiceTransformation, +) +from pyomo.contrib.piecewise.transform.reduced_inner_representation_gdp import ( + ReducedInnerRepresentationGDPTransformation, +) +from pyomo.contrib.piecewise.transform.convex_combination import ( + ConvexCombinationTransformation, +) diff --git a/pyomo/contrib/piecewise/piecewise_linear_expression.py b/pyomo/contrib/piecewise/piecewise_linear_expression.py new file mode 100644 index 00000000000..ea1d95b0f51 --- /dev/null +++ b/pyomo/contrib/piecewise/piecewise_linear_expression.py @@ -0,0 +1,49 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.autoslots import AutoSlots +from pyomo.core.expr.numeric_expr import NumericExpression +from weakref import ref as weakref_ref + + +class PiecewiseLinearExpression(NumericExpression): + """ + A numeric expression node representing a specific instantiation of a + PiecewiseLinearFunction. + + Args: + args (list or tuple): Children of this node + pw_linear_function (PiecewiseLinearFunction): piece-wise linear function + of which this node is an instance. + """ + + __slots__ = ('_pw_linear_function',) + __autoslot_mappers__ = {'_pw_linear_function': AutoSlots.weakref_mapper} + + def __init__(self, args, pw_linear_function): + super().__init__(args) + self._pw_linear_function = weakref_ref(pw_linear_function) + + def nargs(self): + return len(self._args_) + + @property + def pw_linear_function(self): + return self._pw_linear_function() + + def create_node_with_local_data(self, args): + return self.__class__(args, pw_linear_function=self.pw_linear_function) + + def _to_string(self, values, verbose, smap): + return "%s(%s)" % (str(self.pw_linear_function), ', '.join(values)) + + def polynomial_degree(self): + return None diff --git a/pyomo/contrib/piecewise/piecewise_linear_function.py b/pyomo/contrib/piecewise/piecewise_linear_function.py new file mode 100644 index 00000000000..6d4fa658f88 --- /dev/null +++ b/pyomo/contrib/piecewise/piecewise_linear_function.py @@ -0,0 +1,518 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import logging + +from pyomo.common import DeveloperError +from pyomo.common.autoslots import AutoSlots +from pyomo.common.collections import ComponentMap +from pyomo.common.dependencies import numpy as np +from pyomo.common.dependencies.scipy import spatial +from pyomo.contrib.piecewise.piecewise_linear_expression import ( + PiecewiseLinearExpression, +) +from pyomo.core import Any, NonNegativeIntegers, value, Var +from pyomo.core.base.block import _BlockData, Block +from pyomo.core.base.component import ModelComponentFactory +from pyomo.core.base.expression import Expression +from pyomo.core.base.global_set import UnindexedComponent_index +from pyomo.core.base.indexed_component import UnindexedComponent_set +from pyomo.core.base.initializer import Initializer +import pyomo.core.expr as EXPR + +# This is the default absolute tolerance in np.isclose... Not sure if it's +# enough, but we need to make sure that 'barely negative' values are assumed to +# be zero. +ZERO_TOLERANCE = 1e-8 + +logger = logging.getLogger(__name__) + + +class PiecewiseLinearFunctionData(_BlockData): + _Block_reserved_words = Any + + def __init__(self, component=None): + _BlockData.__init__(self, component) + + with self._declare_reserved_components(): + self._expressions = Expression(NonNegativeIntegers) + self._transformed_exprs = ComponentMap() + self._simplices = None + # These will always be tuples, even when we only have one dimension. + self._points = [] + self._linear_functions = [] + + def __call__(self, *args): + """ + Returns a PiecewiseLinearExpression which is an instance of this + function applied to the variables and/or constants specified in args. + """ + if all( + type(arg) in EXPR.native_types or not arg.is_potentially_variable() + for arg in args + ): + # We need to actually evaluate + return self._evaluate(*args) + else: + expr = PiecewiseLinearExpression(args, self) + idx = id(expr) + self._expressions[idx] = expr + return self._expressions[idx] + + def _evaluate(self, *args): + # ESJ: This is a very inefficient implementation in high dimensions, but + # for now we will just do a linear scan of the simplices. + if self._simplices is None: + raise RuntimeError( + "Cannot evaluate PiecewiseLinearFunction--it " + "appears it is not fully defined. (No simplices " + "are stored.)" + ) + + pt = [value(arg) for arg in args] + for simplex, func in zip(self._simplices, self._linear_functions): + if self._pt_in_simplex(pt, simplex): + return func(*args) + + raise ValueError( + "Unsuccessful evaluation of PiecewiseLinearFunction " + "'%s' at point (%s). Is the point in the function's " + "domain?" % (self.name, ', '.join(str(arg) for arg in args)) + ) + + def _pt_in_simplex(self, pt, simplex): + dim = len(pt) + if dim == 1: + return ( + self._points[simplex[0]][0] <= pt[0] + and self._points[simplex[1]][0] >= pt[0] + ) + # Otherwise, we check if pt is a convex combination of the simplex's + # extreme points + A = np.ones((dim + 1, dim + 1)) + b = np.array([x for x in pt] + [1]) + for j, extreme_point in enumerate(simplex): + for i, coord in enumerate(self._points[extreme_point]): + A[i, j] = coord + if np.linalg.det(A) == 0: + # A is singular, so the system has no solutions + return False + else: + lambdas = np.linalg.solve(A, b) + for l in lambdas: + if l < -ZERO_TOLERANCE: + return False + return True + + def _get_simplices_from_arg(self, simplices): + self._simplices = [] + known_points = set() + point_to_index = {} + for simplex in simplices: + extreme_pts = [] + for pt in simplex: + if pt not in known_points: + known_points.add(pt) + if hasattr(pt, '__len__'): + self._points.append(pt) + else: + self._points.append((pt,)) + point_to_index[pt] = len(self._points) - 1 + extreme_pts.append(point_to_index[pt]) + self._simplices.append(tuple(extreme_pts)) + + def map_transformation_var(self, pw_expr, v): + """ + Records on the PiecewiseLinearFunction object that the transformed + form of the PiecewiseLinearExpression object pw_expr is the Var v. + """ + self._transformed_exprs[self._expressions[id(pw_expr)]] = v + + def get_transformation_var(self, pw_expr): + """ + Returns the Var that replaced the PiecewiseLinearExpression 'pw_expr' + after transformation, or None if 'pw_expr' has not been transformed. + """ + if pw_expr in self._transformed_exprs: + return self._transformed_exprs[pw_expr] + else: + return None + + +class _univariate_linear_functor(AutoSlots.Mixin): + __slots__ = ('slope', 'intercept') + + def __init__(self, slope, intercept): + self.slope = slope + self.intercept = intercept + + def __call__(self, x): + return self.slope * x + self.intercept + + +class _multivariate_linear_functor(AutoSlots.Mixin): + __slots__ = 'normal' + + def __init__(self, normal): + self.normal = normal + + def __call__(self, *args): + return sum(self.normal[i] * arg for i, arg in enumerate(args)) + self.normal[-1] + + +class _tabular_data_functor(AutoSlots.Mixin): + __slots__ = ('tabular_data',) + + def __init__(self, tabular_data, tupleize=False): + if not tupleize: + self.tabular_data = tabular_data + else: + self.tabular_data = {(pt,): val for pt, val in tabular_data.items()} + + def __call__(self, *args): + return self.tabular_data[args] + + +def _define_handler(handle_map, *key): + def _wrapper(obj): + assert key not in handle_map + handle_map[key] = obj + return obj + + return _wrapper + + +@ModelComponentFactory.register("Multidimensional piecewise linear function") +class PiecewiseLinearFunction(Block): + """A piecewise linear function, which may be defined over an index. + + Can be specified in one of several ways: + 1) List of points and a nonlinear function to approximate. In + this case, the points will be used to derive a triangulation + of the part of the domain of interest, and a linear function + approximating the given function will be calculated for each + of the simplices in the triangulation. In this case, scipy is + required (for multivariate functions). + 2) List of simplices and a nonlinear function to approximate. In + this case, a linear function approximating the given function + will be calculated for each simplex. For multivariate functions, + numpy is required. + 3) List of simplices and list of functions that return linear function + expressions. These are the desired piecewise functions + corresponding to each simplex. + 4) Mapping of function values to points of the domain, allowing for + the construction of a piecewise linear function from tabular data. + + Args: + function: Nonlinear function to approximate: must be callable + function_rule: Function that returns a nonlinear function to + approximate for each index in an IndexedPiecewiseLinearFunction + points: List of points in the same dimension as the domain of the + function being approximated. Note that if the pieces of the + function are specified this way, we require scipy. + simplices: A list of lists of points, where each list specifies the + extreme points of a a simplex over which the nonlinear function + will be approximated as a linear function. + linear_functions: A list of functions, each of which returns an + expression for a linear function of the arguments. + tabular_data: A dictionary mapping values of the nonlinear function + to points in the domain + """ + + _ComponentDataClass = PiecewiseLinearFunctionData + + # Map 5-tuple of bool to handler: "(f, pts, simplices, linear_funcs, + # tabular_data) : handler" + _handlers = {} + + def __new__(cls, *args, **kwds): + if cls is not PiecewiseLinearFunction: + return super(PiecewiseLinearFunction, cls).__new__(cls) + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): + return PiecewiseLinearFunction.__new__(ScalarPiecewiseLinearFunction) + else: + return IndexedPiecewiseLinearFunction.__new__( + IndexedPiecewiseLinearFunction + ) + + def __init__(self, *args, **kwargs): + _func_arg = kwargs.pop('function', None) + _func_rule_arg = kwargs.pop('function_rule', None) + _points_arg = kwargs.pop('points', None) + _simplices_arg = kwargs.pop('simplices', None) + _linear_functions = kwargs.pop('linear_functions', None) + _tabular_data_arg = kwargs.pop('tabular_data', None) + _tabular_data_rule_arg = kwargs.pop('tabular_data_rule', None) + + kwargs.setdefault('ctype', PiecewiseLinearFunction) + Block.__init__(self, *args, **kwargs) + + # This cannot be a rule. + self._func = _func_arg + self._func_rule = Initializer(_func_rule_arg) + self._points_rule = Initializer(_points_arg, treat_sequences_as_mappings=False) + self._simplices_rule = Initializer( + _simplices_arg, treat_sequences_as_mappings=False + ) + self._linear_funcs_rule = Initializer( + _linear_functions, treat_sequences_as_mappings=False + ) + self._tabular_data = _tabular_data_arg + self._tabular_data_rule = Initializer( + _tabular_data_rule_arg, treat_sequences_as_mappings=False + ) + + def _get_dimension_from_points(self, points): + if len(points) < 1: + raise ValueError( + "Cannot construct PiecewiseLinearFunction from " + "points list of length 0." + ) + + if hasattr(points[0], '__len__'): + dimension = len(points[0]) + else: + dimension = 1 + + return dimension + + def _construct_simplices_from_multivariate_points(self, obj, points, dimension): + try: + triangulation = spatial.Delaunay(points) + except (spatial.QhullError, ValueError) as error: + logger.error("Unable to triangulate the set of input points.") + raise + + # Get the points for the triangulation because they might not all be + # there if any were coplanar. + obj._points = [pt for pt in map(tuple, triangulation.points)] + obj._simplices = [] + for simplex in triangulation.simplices: + # For each simplex, check whether or not the simplex is + # degenerate. If it is, we will just drop it. + + # We have n + 1 points in n dimensions. + # We put them in a n x (n + 1) matrix: [p_0 p_1 ... p_n] + points = triangulation.points[simplex].transpose() + # The question is if they span R^n: We construct the square matrix + # [p_1 - p_0 p_2 - p_1 ... p_n - p_{n-1}] and check if it is full + # rank. Note that we use numpy's matrix_rank function rather than + # checking the determinant because matrix_rank will by default calculate a + # tolerance based on the input to account for numerical errors in the + # SVD computation. + if ( + np.linalg.matrix_rank( + points[:, 1:] + - np.append(points[:, : dimension - 1], points[:, [0]], axis=1) + ) + == dimension + ): + obj._simplices.append(tuple(sorted(simplex))) + + # It's possible that qhull dropped some points if there were numerical + # issues with them (e.g., if they were redundant). We'll be polite and + # tell the user: + for pt in triangulation.coplanar: + logger.info( + "The Delaunay triangulation dropped the point with index " + "%s from the triangulation." % pt[0] + ) + + def _construct_one_dimensional_simplices_from_points(self, obj, points): + points.sort() + obj._simplices = [] + for i in range(len(points) - 1): + obj._simplices.append((i, i + 1)) + obj._points.append((points[i],)) + # Add the last one + obj._points.append((points[-1],)) + + @_define_handler(_handlers, True, True, False, False, False) + def _construct_from_function_and_points(self, obj, parent, nonlinear_function): + idx = obj._index + + points = self._points_rule(parent, idx) + dimension = self._get_dimension_from_points(points) + + if dimension == 1: + # This is univariate and we'll handle it separately in order to + # avoid a dependence on scipy. + self._construct_one_dimensional_simplices_from_points(obj, points) + return self._construct_from_univariate_function_and_segments( + obj, nonlinear_function + ) + + self._construct_simplices_from_multivariate_points(obj, points, dimension) + return self._construct_from_function_and_simplices( + obj, parent, nonlinear_function, simplices_are_user_defined=False + ) + + def _construct_from_univariate_function_and_segments(self, obj, func): + for idx1, idx2 in obj._simplices: + x1 = obj._points[idx1][0] + x2 = obj._points[idx2][0] + y = {x: func(x) for x in [x1, x2]} + slope = (y[x2] - y[x1]) / (x2 - x1) + intercept = y[x1] - slope * x1 + obj._linear_functions.append(_univariate_linear_functor(slope, intercept)) + + return obj + + @_define_handler(_handlers, True, False, True, False, False) + def _construct_from_function_and_simplices( + self, obj, parent, nonlinear_function, simplices_are_user_defined=True + ): + if obj._simplices is None: + obj._get_simplices_from_arg(self._simplices_rule(parent, obj._index)) + simplices = obj._simplices + + if len(simplices) < 1: + raise ValueError( + "Cannot construct PiecewiseLinearFunction " + "with empty list of simplices" + ) + + dimension = len(simplices[0]) - 1 + if dimension == 1: + # Back to high school with us--this is univariate and we'll handle + # it separately in order to avoid a kind of silly dependence on + # numpy. + return self._construct_from_univariate_function_and_segments( + obj, nonlinear_function + ) + + # evaluate the function at each of the points and form the homogeneous + # system of equations + A = np.ones((dimension + 2, dimension + 2)) + b = np.zeros(dimension + 2) + b[-1] = 1 + + for num_piece, simplex in enumerate(simplices): + for i, pt_idx in enumerate(simplex): + pt = obj._points[pt_idx] + for j, val in enumerate(pt): + A[i, j] = val + A[i, j + 1] = nonlinear_function(*pt) + A[i + 1, :] = 0 + A[i + 1, dimension] = -1 + # This system has a solution unless there's a bug--we filtered the + # simplices to make sure they are full-dimensional, so we know there + # is a hyperplane that passes through these dimension + 1 points (and the + # last equation scales it so that the coefficient for the output of + # the nonlinear function dimension is -1, so we can just read off + # the linear equation in the x space). + try: + normal = np.linalg.solve(A, b) + except np.linalg.LinAlgError as e: + logger.warning('LinAlgError: %s' % e) + msg = ( + "When calculating the hyperplane approximation over the simplex " + "with index %s, the matrix was unexpectedly singular. This " + "likely means that this simplex is degenerate" % num_piece + ) + + if simplices_are_user_defined: + raise ValueError(msg) + # otherwise it's our fault, and I was hoping this is unreachable + # code... + raise DeveloperError( + msg + + " and that it should have been filtered out of the triangulation" + ) + + obj._linear_functions.append(_multivariate_linear_functor(normal)) + + return obj + + @_define_handler(_handlers, False, False, True, True, False) + def _construct_from_linear_functions_and_simplices( + self, obj, parent, nonlinear_function + ): + # We know that we have simplices because else this handler wouldn't + # have been called. + obj._get_simplices_from_arg(self._simplices_rule(parent, obj._index)) + obj._linear_functions = [f for f in self._linear_funcs_rule(parent, obj._index)] + return obj + + @_define_handler(_handlers, False, False, False, False, True) + def _construct_from_tabular_data(self, obj, parent, nonlinear_function): + idx = obj._index + + tabular_data = self._tabular_data + if tabular_data is None: + tabular_data = self._tabular_data_rule(parent, idx) + points = [pt for pt in tabular_data.keys()] + dimension = self._get_dimension_from_points(points) + + if dimension == 1: + # This is univariate and we'll handle it separately in order to + # avoid a dependence on scipy. + self._construct_one_dimensional_simplices_from_points(obj, points) + return self._construct_from_univariate_function_and_segments( + obj, _tabular_data_functor(tabular_data, tupleize=True) + ) + + self._construct_simplices_from_multivariate_points(obj, points, dimension) + return self._construct_from_function_and_simplices( + obj, parent, _tabular_data_functor(tabular_data) + ) + + def _getitem_when_not_present(self, index): + if index is None and not self.is_indexed(): + obj = self._data[index] = self + else: + obj = self._data[index] = self._ComponentDataClass(component=self) + obj._index = index + parent = obj.parent_block() + + # Get the nonlinear function, if we have one. + nonlinear_function = None + if self._func_rule is not None: + nonlinear_function = self._func_rule(parent, index) + elif self._func is not None: + nonlinear_function = self._func + + handler = self._handlers.get( + ( + nonlinear_function is not None, + self._points_rule is not None, + self._simplices_rule is not None, + self._linear_funcs_rule is not None, + self._tabular_data is not None or self._tabular_data_rule is not None, + ) + ) + if handler is None: + raise ValueError( + "Unsupported set of arguments given for " + "constructing PiecewiseLinearFunction. " + "Expected a nonlinear function and a list" + "of breakpoints, a nonlinear function and a list " + "of simplices, a list of linear functions and " + "a list of corresponding simplices, or a dictionary " + "mapping points to nonlinear function values." + ) + return handler(self, obj, parent, nonlinear_function) + + +class ScalarPiecewiseLinearFunction( + PiecewiseLinearFunctionData, PiecewiseLinearFunction +): + def __init__(self, *args, **kwds): + self._suppress_ctypes = set() + + PiecewiseLinearFunctionData.__init__(self, self) + PiecewiseLinearFunction.__init__(self, *args, **kwds) + self._data[None] = self + self._index = UnindexedComponent_index + + +class IndexedPiecewiseLinearFunction(PiecewiseLinearFunction): + pass diff --git a/pyomo/contrib/piecewise/tests/__init__.py b/pyomo/contrib/piecewise/tests/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/piecewise/tests/common_tests.py b/pyomo/contrib/piecewise/tests/common_tests.py new file mode 100644 index 00000000000..c77d7064544 --- /dev/null +++ b/pyomo/contrib/piecewise/tests/common_tests.py @@ -0,0 +1,84 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.contrib.piecewise.tests.models as models +from pyomo.core import Var +from pyomo.core.base import TransformationFactory +from pyomo.environ import value +from pyomo.gdp import Disjunct, Disjunction + + +def check_trans_block_structure(test, block): + # One (indexed) disjunct + test.assertEqual(len(block.component_map(Disjunct)), 1) + # One disjunction + test.assertEqual(len(block.component_map(Disjunction)), 1) + # The 'z' var (that we will substitute in for the function being + # approximated) is here: + test.assertEqual(len(block.component_map(Var)), 1) + test.assertIsInstance(block.substitute_var, Var) + + +def check_log_x_model_soln(test, m): + test.assertAlmostEqual(value(m.x), 4) + test.assertAlmostEqual(value(m.x1), 1) + test.assertAlmostEqual(value(m.x2), 1) + test.assertAlmostEqual(value(m.obj), m.f2(4)) + + +def check_transformation_do_not_descend(test, transformation): + m = models.make_log_x_model() + transform = TransformationFactory(transformation) + transform.apply_to(m) + + test.check_pw_log(m) + test.check_pw_paraboloid(m) + + +def check_transformation_PiecewiseLinearFunction_targets(test, transformation): + m = models.make_log_x_model() + transform = TransformationFactory(transformation) + transform.apply_to(m, targets=[m.pw_log]) + + test.check_pw_log(m) + + # And check that the paraboloid was *not* transformed. + test.assertIsNone(m.pw_paraboloid.get_transformation_var(m.paraboloid_expr)) + + +def check_descend_into_expressions(test, transformation): + m = models.make_log_x_model() + transform = TransformationFactory(transformation) + transform.apply_to(m, descend_into_expressions=True) + + # Everything should be transformed + test.check_pw_log(m) + test.check_pw_paraboloid(m) + + +def check_descend_into_expressions_constraint_target(test, transformation): + m = models.make_log_x_model() + transform = TransformationFactory(transformation) + transform.apply_to(m, descend_into_expressions=True, targets=[m.indexed_c]) + + test.check_pw_paraboloid(m) + # And check that the log was *not* transformed. + test.assertIsNone(m.pw_log.get_transformation_var(m.log_expr)) + + +def check_descend_into_expressions_objective_target(test, transformation): + m = models.make_log_x_model() + transform = TransformationFactory(transformation) + transform.apply_to(m, descend_into_expressions=True, targets=[m.obj]) + + test.check_pw_log(m) + # And check that the paraboloid was *not* transformed. + test.assertIsNone(m.pw_paraboloid.get_transformation_var(m.paraboloid_expr)) diff --git a/pyomo/contrib/piecewise/tests/models.py b/pyomo/contrib/piecewise/tests/models.py new file mode 100644 index 00000000000..be2811a70a4 --- /dev/null +++ b/pyomo/contrib/piecewise/tests/models.py @@ -0,0 +1,72 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.piecewise import PiecewiseLinearFunction +from pyomo.environ import ConcreteModel, Constraint, log, Objective, Var + + +def make_log_x_model(): + m = ConcreteModel() + m.x = Var(bounds=(1, 10)) + m.pw_log = PiecewiseLinearFunction(points=[1, 3, 6, 10], function=log) + + # Here are the linear functions, for safe keeping. + def f1(x): + return (log(3) / 2) * x - log(3) / 2 + + m.f1 = f1 + + def f2(x): + return (log(2) / 3) * x + log(3 / 2) + + m.f2 = f2 + + def f3(x): + return (log(5 / 3) / 4) * x + log(6 / ((5 / 3) ** (3 / 2))) + + m.f3 = f3 + + m.log_expr = m.pw_log(m.x) + m.obj = Objective(expr=m.log_expr) + + m.x1 = Var(bounds=(0, 3)) + m.x2 = Var(bounds=(1, 7)) + + ## apprximates paraboloid x1**2 + x2**2 + def g1(x1, x2): + return 3 * x1 + 5 * x2 - 4 + + m.g1 = g1 + + def g2(x1, x2): + return 3 * x1 + 11 * x2 - 28 + + m.g2 = g2 + simplices = [ + [(0, 1), (0, 4), (3, 4)], + [(0, 1), (3, 4), (3, 1)], + [(3, 4), (3, 7), (0, 7)], + [(0, 7), (0, 4), (3, 4)], + ] + m.pw_paraboloid = PiecewiseLinearFunction( + simplices=simplices, linear_functions=[g1, g1, g2, g2] + ) + m.paraboloid_expr = m.pw_paraboloid(m.x1, m.x2) + + def c_rule(m, i): + if i == 0: + return m.x >= m.paraboloid_expr + else: + return (1, m.x1, 2) + + m.indexed_c = Constraint([0, 1], rule=c_rule) + + return m diff --git a/pyomo/contrib/piecewise/tests/test_inner_repn_gdp.py b/pyomo/contrib/piecewise/tests/test_inner_repn_gdp.py new file mode 100644 index 00000000000..bba85a6bd7b --- /dev/null +++ b/pyomo/contrib/piecewise/tests/test_inner_repn_gdp.py @@ -0,0 +1,177 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.contrib.piecewise.tests import models +import pyomo.contrib.piecewise.tests.common_tests as ct +from pyomo.core.base import TransformationFactory +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) +from pyomo.gdp import Disjunct, Disjunction +from pyomo.environ import Constraint, SolverFactory, Var + + +class TestTransformPiecewiseModelToInnerRepnGDP(unittest.TestCase): + def check_log_disjunct(self, d, pts, f, substitute_var, x): + self.assertEqual(len(d.component_map(Constraint)), 3) + # lambdas and indicator_var + self.assertEqual(len(d.component_map(Var)), 2) + self.assertIsInstance(d.lambdas, Var) + self.assertEqual(len(d.lambdas), 2) + for lamb in d.lambdas.values(): + self.assertEqual(lamb.lb, 0) + self.assertEqual(lamb.ub, 1) + self.assertIsInstance(d.convex_combo, Constraint) + assertExpressionsEqual( + self, d.convex_combo.expr, d.lambdas[0] + d.lambdas[1] == 1 + ) + self.assertIsInstance(d.set_substitute, Constraint) + assertExpressionsEqual( + self, d.set_substitute.expr, substitute_var == f(x), places=7 + ) + self.assertIsInstance(d.linear_combo, Constraint) + self.assertEqual(len(d.linear_combo), 1) + assertExpressionsEqual( + self, + d.linear_combo[0].expr, + x == pts[0] * d.lambdas[0] + pts[1] * d.lambdas[1], + ) + + def check_paraboloid_disjunct(self, d, pts, f, substitute_var, x1, x2): + self.assertEqual(len(d.component_map(Constraint)), 3) + # lambdas and indicator_var + self.assertEqual(len(d.component_map(Var)), 2) + self.assertIsInstance(d.lambdas, Var) + self.assertEqual(len(d.lambdas), 3) + for lamb in d.lambdas.values(): + self.assertEqual(lamb.lb, 0) + self.assertEqual(lamb.ub, 1) + self.assertIsInstance(d.convex_combo, Constraint) + assertExpressionsEqual( + self, d.convex_combo.expr, d.lambdas[0] + d.lambdas[1] + d.lambdas[2] == 1 + ) + self.assertIsInstance(d.set_substitute, Constraint) + assertExpressionsEqual( + self, d.set_substitute.expr, substitute_var == f(x1, x2), places=7 + ) + self.assertIsInstance(d.linear_combo, Constraint) + self.assertEqual(len(d.linear_combo), 2) + assertExpressionsEqual( + self, + d.linear_combo[0].expr, + x1 + == pts[0][0] * d.lambdas[0] + + pts[1][0] * d.lambdas[1] + + pts[2][0] * d.lambdas[2], + ) + assertExpressionsEqual( + self, + d.linear_combo[1].expr, + x2 + == pts[0][1] * d.lambdas[0] + + pts[1][1] * d.lambdas[1] + + pts[2][1] * d.lambdas[2], + ) + + def check_pw_log(self, m): + ## + # Check the transformation of the approximation of log(x) + ## + z = m.pw_log.get_transformation_var(m.log_expr) + self.assertIsInstance(z, Var) + # Now we can use those Vars to check on what the transformation created + log_block = z.parent_block() + ct.check_trans_block_structure(self, log_block) + + # Check that all of the Disjuncts have what they should + self.assertEqual(len(log_block.disjuncts), 3) + disjuncts_dict = { + log_block.disjuncts[0]: ((1, 3), m.f1), + log_block.disjuncts[1]: ((3, 6), m.f2), + log_block.disjuncts[2]: ((6, 10), m.f3), + } + for d, (pts, f) in disjuncts_dict.items(): + self.check_log_disjunct(d, pts, f, log_block.substitute_var, m.x) + + # Check the Disjunction + self.assertIsInstance(log_block.pick_a_piece, Disjunction) + self.assertEqual(len(log_block.pick_a_piece.disjuncts), 3) + for i in range(2): + self.assertIs(log_block.pick_a_piece.disjuncts[i], log_block.disjuncts[i]) + + # And check the substitute Var is in the objective now. + self.assertIs(m.obj.expr.expr, log_block.substitute_var) + + def check_pw_paraboloid(self, m): + ## + # Check the approximation of the transformation of the paraboloid + ## + z = m.pw_paraboloid.get_transformation_var(m.paraboloid_expr) + self.assertIsInstance(z, Var) + paraboloid_block = z.parent_block() + ct.check_trans_block_structure(self, paraboloid_block) + + self.assertEqual(len(paraboloid_block.disjuncts), 4) + disjuncts_dict = { + paraboloid_block.disjuncts[0]: ([(0, 1), (0, 4), (3, 4)], m.g1), + paraboloid_block.disjuncts[1]: ([(0, 1), (3, 4), (3, 1)], m.g1), + paraboloid_block.disjuncts[2]: ([(3, 4), (3, 7), (0, 7)], m.g2), + paraboloid_block.disjuncts[3]: ([(0, 7), (0, 4), (3, 4)], m.g2), + } + for d, (pts, f) in disjuncts_dict.items(): + self.check_paraboloid_disjunct( + d, pts, f, paraboloid_block.substitute_var, m.x1, m.x2 + ) + + # Check the Disjunction + self.assertIsInstance(paraboloid_block.pick_a_piece, Disjunction) + self.assertEqual(len(paraboloid_block.pick_a_piece.disjuncts), 4) + for i in range(3): + self.assertIs( + paraboloid_block.pick_a_piece.disjuncts[i], + paraboloid_block.disjuncts[i], + ) + + # And check the substitute Var is in the objective now. + self.assertIs(m.indexed_c[0].body.args[0].expr, paraboloid_block.substitute_var) + + def test_transformation_do_not_descend(self): + ct.check_transformation_do_not_descend(self, 'contrib.piecewise.inner_repn_gdp') + + def test_transformation_PiecewiseLinearFunction_targets(self): + ct.check_transformation_PiecewiseLinearFunction_targets( + self, 'contrib.piecewise.inner_repn_gdp' + ) + + def test_descend_into_expressions(self): + ct.check_descend_into_expressions(self, 'contrib.piecewise.inner_repn_gdp') + + def test_descend_into_expressions_constraint_target(self): + ct.check_descend_into_expressions_constraint_target( + self, 'contrib.piecewise.inner_repn_gdp' + ) + + def test_descend_into_expressions_objective_target(self): + ct.check_descend_into_expressions_objective_target( + self, 'contrib.piecewise.inner_repn_gdp' + ) + + @unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi is not available') + def test_solve_disaggregated_convex_combo_model(self): + m = models.make_log_x_model() + TransformationFactory( + 'contrib.piecewise.disaggregated_convex_combination' + ).apply_to(m) + SolverFactory('gurobi').solve(m) + + ct.check_log_x_model_soln(self, m) diff --git a/pyomo/contrib/piecewise/tests/test_outer_repn_gdp.py b/pyomo/contrib/piecewise/tests/test_outer_repn_gdp.py new file mode 100644 index 00000000000..d1b600075d2 --- /dev/null +++ b/pyomo/contrib/piecewise/tests/test_outer_repn_gdp.py @@ -0,0 +1,183 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from math import sqrt +from pyomo.common.dependencies import scipy_available +import pyomo.common.unittest as unittest +from pyomo.contrib.piecewise.tests import models +import pyomo.contrib.piecewise.tests.common_tests as ct +from pyomo.core.base import TransformationFactory +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) +from pyomo.gdp import Disjunct, Disjunction +from pyomo.environ import Constraint, SolverFactory, Var + + +class TestTransformPiecewiseModelToOuterRepnGDP(unittest.TestCase): + def check_log_disjunct(self, d, pts, f, substitute_var, x): + # We can fit both bounds constraints in one constraint, then we have the + # linear function + self.assertEqual(len(d.component_map(Constraint)), 2) + # indicator_var + self.assertEqual(len(d.component_map(Var)), 1) + self.assertIsInstance(d.simplex_halfspaces, Constraint) + self.assertEqual(d.simplex_halfspaces.lower, pts[0]) + self.assertEqual(d.simplex_halfspaces.upper, pts[1]) + self.assertIs(d.simplex_halfspaces.body, x) + + self.assertIsInstance(d.set_substitute, Constraint) + assertExpressionsEqual( + self, d.set_substitute.expr, substitute_var == f(x), places=7 + ) + + def check_paraboloid_disjunct(self, d, constraint_coefs, f, substitute_var, x1, x2): + self.assertEqual(len(d.component_map(Constraint)), 2) + # just indicator_var + self.assertEqual(len(d.component_map(Var)), 1) + for i, cons in d.simplex_halfspaces.items(): + coefs = constraint_coefs[i] + assertExpressionsEqual( + self, cons.expr, coefs[0] * x1 + coefs[1] * x2 + coefs[2] <= 0, places=6 + ) + + self.assertIsInstance(d.set_substitute, Constraint) + assertExpressionsEqual( + self, d.set_substitute.expr, substitute_var == f(x1, x2), places=7 + ) + + def check_pw_log(self, m): + ## + # Check the transformation of the approximation of log(x) + ## + z = m.pw_log.get_transformation_var(m.log_expr) + self.assertIsInstance(z, Var) + # Now we can use those Vars to check on what the transformation created + log_block = z.parent_block() + ct.check_trans_block_structure(self, log_block) + + # Check that all of the Disjuncts have what they should + self.assertEqual(len(log_block.disjuncts), 3) + disjuncts_dict = { + log_block.disjuncts[0]: ((1, 3), m.f1), + log_block.disjuncts[1]: ((3, 6), m.f2), + log_block.disjuncts[2]: ((6, 10), m.f3), + } + for d, (pts, f) in disjuncts_dict.items(): + self.check_log_disjunct(d, pts, f, log_block.substitute_var, m.x) + + # Check the Disjunction + self.assertIsInstance(log_block.pick_a_piece, Disjunction) + self.assertEqual(len(log_block.pick_a_piece.disjuncts), 3) + for i in range(2): + self.assertIs(log_block.pick_a_piece.disjuncts[i], log_block.disjuncts[i]) + + # And check the substitute Var is in the objective now. + self.assertIs(m.obj.expr.expr, log_block.substitute_var) + + def check_pw_paraboloid(self, m): + ## + # Check the approximation of the transformation of the paraboloid + ## + z = m.pw_paraboloid.get_transformation_var(m.paraboloid_expr) + self.assertIsInstance(z, Var) + paraboloid_block = z.parent_block() + ct.check_trans_block_structure(self, paraboloid_block) + + self.assertEqual(len(paraboloid_block.disjuncts), 4) + disjuncts_dict = { + # the normal vectors of the faces are normalized when we get + # them from scipy: + paraboloid_block.disjuncts[0]: ( + [ + [sqrt(2) / 2, -sqrt(2) / 2, sqrt(2) / 2], + [-1.0, 0.0, 0.0], + [0.0, 1.0, -4.0], + ], + m.g1, + ), + paraboloid_block.disjuncts[1]: ( + [ + [-sqrt(2) / 2, sqrt(2) / 2, -sqrt(2) / 2], + [0.0, -1.0, 1.0], + [1.0, 0.0, -3.0], + ], + m.g1, + ), + paraboloid_block.disjuncts[2]: ( + [ + [-sqrt(2) / 2, -sqrt(2) / 2, 7 * sqrt(2) / 2], + [0.0, 1.0, -7.0], + [1.0, 0.0, -3.0], + ], + m.g2, + ), + paraboloid_block.disjuncts[3]: ( + [ + [sqrt(2) / 2, sqrt(2) / 2, -7 * sqrt(2) / 2], + [-1.0, 0.0, 0.0], + [0.0, -1.0, 4.0], + ], + m.g2, + ), + } + for d, (constraint_coefs, f) in disjuncts_dict.items(): + self.check_paraboloid_disjunct( + d, constraint_coefs, f, paraboloid_block.substitute_var, m.x1, m.x2 + ) + + # Check the Disjunction + self.assertIsInstance(paraboloid_block.pick_a_piece, Disjunction) + self.assertEqual(len(paraboloid_block.pick_a_piece.disjuncts), 4) + for i in range(3): + self.assertIs( + paraboloid_block.pick_a_piece.disjuncts[i], + paraboloid_block.disjuncts[i], + ) + + # And check the substitute Var is in the objective now. + self.assertIs(m.indexed_c[0].body.args[0].expr, paraboloid_block.substitute_var) + + @unittest.skipUnless(scipy_available, "Scipy is not available") + def test_transformation_do_not_descend(self): + ct.check_transformation_do_not_descend(self, 'contrib.piecewise.outer_repn_gdp') + + def test_transformation_PiecewiseLinearFunction_targets(self): + ct.check_transformation_PiecewiseLinearFunction_targets( + self, 'contrib.piecewise.outer_repn_gdp' + ) + + @unittest.skipUnless(scipy_available, "Scipy is not available") + def test_descend_into_expressions(self): + ct.check_descend_into_expressions(self, 'contrib.piecewise.outer_repn_gdp') + + @unittest.skipUnless(scipy_available, "Scipy is not available") + def test_descend_into_expressions_constraint_target(self): + ct.check_descend_into_expressions_constraint_target( + self, 'contrib.piecewise.outer_repn_gdp' + ) + + def test_descend_into_expressions_objective_target(self): + ct.check_descend_into_expressions_objective_target( + self, 'contrib.piecewise.outer_repn_gdp' + ) + + @unittest.skipUnless( + SolverFactory('gurobi').available() and scipy_available, + 'Gurobi and/or scipy is not available', + ) + def test_solve_multiple_choice_model(self): + m = models.make_log_x_model() + TransformationFactory('contrib.piecewise.multiple_choice').apply_to(m) + SolverFactory('gurobi').solve(m) + + ct.check_log_x_model_soln(self, m) diff --git a/pyomo/contrib/piecewise/tests/test_piecewise_linear_function.py b/pyomo/contrib/piecewise/tests/test_piecewise_linear_function.py new file mode 100644 index 00000000000..e740e5e3384 --- /dev/null +++ b/pyomo/contrib/piecewise/tests/test_piecewise_linear_function.py @@ -0,0 +1,583 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from io import StringIO +import logging +import pickle + +from pyomo.common.dependencies import attempt_import +from pyomo.common.log import LoggingIntercept +import pyomo.common.unittest as unittest +from pyomo.contrib.piecewise import PiecewiseLinearFunction +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) +from pyomo.environ import ConcreteModel, Constraint, log, Var + +np, numpy_available = attempt_import('numpy') +scipy, scipy_available = attempt_import('scipy') + + +def f(x): + return log(x) + + +def f1(x): + return (log(3) / 2) * x - log(3) / 2 + + +def f2(x): + return (log(2) / 3) * x + log(3 / 2) + + +def f3(x): + return (log(5 / 3) / 4) * x + log(6 / ((5 / 3) ** (3 / 2))) + + +class TestPiecewiseLinearFunction2D(unittest.TestCase): + def make_ln_x_model(self): + m = ConcreteModel() + m.x = Var(bounds=(1, 10)) + m.f = f + m.f1 = f1 + m.f2 = f2 + m.f3 = f3 + + return m + + def check_ln_x_approx(self, pw, x): + self.assertEqual(len(pw._simplices), 3) + self.assertEqual(len(pw._linear_functions), 3) + # indices of extreme points. + simplices = [(0, 1), (1, 2), (2, 3)] + for idx, simplex in enumerate(simplices): + self.assertEqual(pw._simplices[idx], simplices[idx]) + + assertExpressionsEqual( + self, pw._linear_functions[0](x), (log(3) / 2) * x - log(3) / 2, places=7 + ) + assertExpressionsEqual( + self, pw._linear_functions[1](x), (log(2) / 3) * x + log(3 / 2), places=7 + ) + assertExpressionsEqual( + self, + pw._linear_functions[2](x), + (log(5 / 3) / 4) * x + log(6 / ((5 / 3) ** (3 / 2))), + places=7, + ) + + def check_x_squared_approx(self, pw, x): + self.assertEqual(len(pw._simplices), 3) + self.assertEqual(len(pw._linear_functions), 3) + # indices of extreme points. + simplices = [(0, 1), (1, 2), (2, 3)] + for idx, simplex in enumerate(simplices): + self.assertEqual(pw._simplices[idx], simplices[idx]) + + assertExpressionsStructurallyEqual( + self, pw._linear_functions[0](x), 4 * x - 3, places=7 + ) + assertExpressionsStructurallyEqual( + self, pw._linear_functions[1](x), 9 * x - 18, places=7 + ) + assertExpressionsStructurallyEqual( + self, pw._linear_functions[2](x), 16 * x - 60, places=7 + ) + + def test_pw_linear_approx_of_ln_x_simplices(self): + m = self.make_ln_x_model() + simplices = [(1, 3), (3, 6), (6, 10)] + m.pw = PiecewiseLinearFunction(simplices=simplices, function=m.f) + self.check_ln_x_approx(m.pw, m.x) + + def test_pw_linear_approx_of_ln_x_points(self): + m = self.make_ln_x_model() + m.pw = PiecewiseLinearFunction(points=[1, 3, 6, 10], function=m.f) + self.check_ln_x_approx(m.pw, m.x) + + def test_pw_linear_approx_of_ln_x_linear_funcs(self): + m = self.make_ln_x_model() + m.pw = PiecewiseLinearFunction( + simplices=[(1, 3), (3, 6), (6, 10)], linear_functions=[m.f1, m.f2, m.f3] + ) + self.check_ln_x_approx(m.pw, m.x) + + def test_pw_linear_approx_of_ln_x_tabular_data(self): + m = self.make_ln_x_model() + m.pw = PiecewiseLinearFunction( + tabular_data={1: 0, 3: log(3), 6: log(6), 10: log(10)} + ) + self.check_ln_x_approx(m.pw, m.x) + + def test_use_pw_function_in_constraint(self): + m = self.make_ln_x_model() + m.pw = PiecewiseLinearFunction( + simplices=[(1, 3), (3, 6), (6, 10)], linear_functions=[m.f1, m.f2, m.f3] + ) + m.c = Constraint(expr=m.pw(m.x) <= 1) + self.assertEqual(str(m.c.body.expr), "pw(x)") + + def test_evaluate_pw_function(self): + m = self.make_ln_x_model() + m.pw = PiecewiseLinearFunction( + simplices=[(1, 3), (3, 6), (6, 10)], linear_functions=[m.f1, m.f2, m.f3] + ) + self.assertAlmostEqual(m.pw(1), 0) + self.assertAlmostEqual(m.pw(2), m.f1(2)) + self.assertAlmostEqual(m.pw(3), log(3)) + self.assertAlmostEqual(m.pw(4.5), m.f2(4.5)) + self.assertAlmostEqual(m.pw(9.2), m.f3(9.2)) + self.assertAlmostEqual(m.pw(10), log(10)) + + def test_indexed_pw_linear_function_approximate_over_simplices(self): + m = self.make_ln_x_model() + m.z = Var([1, 2], bounds=(-10, 10)) + + def g1(x): + return x**2 + + def g2(x): + return log(x) + + m.funcs = {1: g1, 2: g2} + simplices = [(1, 3), (3, 6), (6, 10)] + m.pw = PiecewiseLinearFunction( + [1, 2], simplices=simplices, function_rule=lambda m, i: m.funcs[i] + ) + self.check_ln_x_approx(m.pw[2], m.z[2]) + self.check_x_squared_approx(m.pw[1], m.z[1]) + + def test_indexed_pw_linear_function_approximate_over_points(self): + m = self.make_ln_x_model() + m.z = Var([1, 2], bounds=(-10, 10)) + + def g1(x): + return x**2 + + def g2(x): + return log(x) + + m.funcs = {1: g1, 2: g2} + + def silly_pts_rule(m, i): + return [1, 3, 6, 10] + + m.pw = PiecewiseLinearFunction( + [1, 2], points=silly_pts_rule, function_rule=lambda m, i: m.funcs[i] + ) + self.check_ln_x_approx(m.pw[2], m.z[2]) + self.check_x_squared_approx(m.pw[1], m.z[1]) + + def test_indexed_pw_linear_function_tabular_data(self): + m = self.make_ln_x_model() + m.z = Var([1, 2], bounds=(-10, 10)) + + def silly_tabular_data_rule(m, i): + if i == 1: + return {1: 1, 3: 9, 6: 36, 10: 100} + if i == 2: + return {1: 0, 3: log(3), 6: log(6), 10: log(10)} + + m.pw = PiecewiseLinearFunction( + [1, 2], tabular_data_rule=silly_tabular_data_rule + ) + self.check_ln_x_approx(m.pw[2], m.z[2]) + self.check_x_squared_approx(m.pw[1], m.z[1]) + + def test_indexed_pw_linear_function_linear_funcs_and_simplices(self): + m = self.make_ln_x_model() + m.z = Var([1, 2], bounds=(-10, 10)) + + def silly_simplex_rule(m, i): + return [(1, 3), (3, 6), (6, 10)] + + def h1(x): + return 4 * x - 3 + + def h2(x): + return 9 * x - 18 + + def h3(x): + return 16 * x - 60 + + def silly_linear_func_rule(m, i): + return [h1, h2, h3] + + m.pw = PiecewiseLinearFunction( + [1, 2], + simplices=silly_simplex_rule, + linear_functions=silly_linear_func_rule, + ) + self.check_x_squared_approx(m.pw[1], m.z[1]) + self.check_x_squared_approx(m.pw[2], m.z[2]) + + def test_pickle(self): + m = self.make_ln_x_model() + m.pw = PiecewiseLinearFunction(points=[1, 3, 6, 10], function=m.f) + m.c = Constraint(expr=m.pw(m.x) >= 0.35) + + # pickle and unpickle + unpickle = pickle.loads(pickle.dumps(m)) + + # Check that the pprint is equal + m_buf = StringIO() + m.pprint(ostream=m_buf) + m_output = m_buf.getvalue() + + unpickle_buf = StringIO() + unpickle.pprint(ostream=unpickle_buf) + unpickle_output = unpickle_buf.getvalue() + self.assertMultiLineEqual(m_output, unpickle_output) + + +# Here's a cute paraboloid: +def g(x, y): + return x**2 + y**2 + + +class TestPiecewiseLinearFunction3D(unittest.TestCase): + simplices = [ + [(0, 1), (0, 4), (3, 4)], + [(0, 1), (3, 4), (3, 1)], + [(3, 4), (3, 7), (0, 7)], + [(0, 7), (0, 4), (3, 4)], + ] + + def make_model(self): + m = ConcreteModel() + m.x1 = Var(bounds=(0, 3)) + m.x2 = Var(bounds=(1, 7)) + m.g = g + return m + + def check_pw_linear_approximation(self, m): + self.assertEqual(len(m.pw._simplices), 4) + for i, simplex in enumerate(m.pw._simplices): + for idx in simplex: + self.assertIn(m.pw._points[idx], self.simplices[i]) + + self.assertEqual(len(m.pw._linear_functions), 4) + + assertExpressionsStructurallyEqual( + self, + m.pw._linear_functions[0](m.x1, m.x2), + 3 * m.x1 + 5 * m.x2 - 4, + places=7, + ) + assertExpressionsStructurallyEqual( + self, + m.pw._linear_functions[1](m.x1, m.x2), + 3 * m.x1 + 5 * m.x2 - 4, + places=7, + ) + assertExpressionsStructurallyEqual( + self, + m.pw._linear_functions[2](m.x1, m.x2), + 3 * m.x1 + 11 * m.x2 - 28, + places=7, + ) + assertExpressionsStructurallyEqual( + self, + m.pw._linear_functions[3](m.x1, m.x2), + 3 * m.x1 + 11 * m.x2 - 28, + places=7, + ) + + @unittest.skipUnless( + scipy_available and numpy_available, "scipy and/or numpy are not available" + ) + def test_pw_linear_approx_of_paraboloid_points(self): + m = self.make_model() + m.pw = PiecewiseLinearFunction( + points=[(0, 1), (0, 4), (0, 7), (3, 1), (3, 4), (3, 7)], function=m.g + ) + self.check_pw_linear_approximation(m) + + @unittest.skipUnless(scipy_available, "scipy is not available") + def test_pw_linear_approx_tabular_data(self): + m = self.make_model() + + m.pw = PiecewiseLinearFunction( + tabular_data={ + (0, 1): g(0, 1), + (0, 4): g(0, 4), + (0, 7): g(0, 7), + (3, 1): g(3, 1), + (3, 4): g(3, 4), + (3, 7): g(3, 7), + } + ) + self.check_pw_linear_approximation(m) + + @unittest.skipUnless(numpy_available, "numpy are not available") + def test_pw_linear_approx_of_paraboloid_simplices(self): + m = self.make_model() + m.pw = PiecewiseLinearFunction(function=m.g, simplices=self.simplices) + self.check_pw_linear_approximation(m) + + def test_pw_linear_approx_of_paraboloid_linear_funcs(self): + m = self.make_model() + + def g1(x1, x2): + return 3 * x1 + 5 * x2 - 4 + + def g2(x1, x2): + return 3 * x1 + 11 * x2 - 28 + + m.pw = PiecewiseLinearFunction( + simplices=self.simplices, linear_functions=[g1, g1, g2, g2] + ) + self.check_pw_linear_approximation(m) + + def test_use_pw_linear_approx_in_constraint(self): + m = self.make_model() + + def g1(x1, x2): + return 3 * x1 + 5 * x2 - 4 + + def g2(x1, x2): + return 3 * x1 + 11 * x2 - 28 + + m.pw = PiecewiseLinearFunction( + simplices=self.simplices, linear_functions=[g1, g1, g2, g2] + ) + + m.c = Constraint(expr=m.pw(m.x1, m.x2) <= 5) + self.assertEqual(str(m.c.body.expr), "pw(x1, x2)") + self.assertIs(m.c.body.expr.pw_linear_function, m.pw) + + @unittest.skipUnless(numpy_available, "numpy is not available") + def test_evaluate_pw_linear_function(self): + # NOTE: This test requires numpy because it is used to check which + # simplex a point is in + m = self.make_model() + + def g1(x1, x2): + return 3 * x1 + 5 * x2 - 4 + + def g2(x1, x2): + return 3 * x1 + 11 * x2 - 28 + + m.pw = PiecewiseLinearFunction( + simplices=self.simplices, linear_functions=[g1, g1, g2, g2] + ) + # check it's equal to the original function at all the extreme points of + # the simplices + for x1, x2 in m.pw._points: + self.assertAlmostEqual(m.pw(x1, x2), m.g(x1, x2)) + # check some points in the approximation + self.assertAlmostEqual(m.pw(1, 3), g1(1, 3)) + self.assertAlmostEqual(m.pw(2.5, 6), g2(2.5, 6)) + self.assertAlmostEqual(m.pw(0.2, 4.3), g2(0.2, 4.3)) + + +class TestTriangulationProducesDegenerateSimplices(unittest.TestCase): + cube_extreme_pt_indices = [ + {10, 11, 13, 14, 19, 20, 22, 23}, # right bottom back + {9, 10, 12, 13, 18, 19, 21, 22}, # right bottom front + {0, 1, 3, 4, 9, 10, 12, 13}, # left bottom front + {1, 2, 4, 5, 10, 11, 13, 14}, # left bottom back + {3, 4, 6, 7, 12, 13, 15, 16}, # left top front + {4, 5, 7, 8, 13, 14, 16, 17}, # left top back + {12, 13, 15, 16, 21, 22, 24, 25}, # right top front + {13, 14, 16, 17, 22, 23, 25, 26}, # right top back + ] + + def make_model(self): + m = ConcreteModel() + + m.f = lambda x1, x2, y: x1 * x2 + y + # This is a 2x2 stack of cubes, so there are 8 total cubes, each of which + # will get divided into 6 simplices. + m.points = [ + (-2.0, 0.0, 1.0), + (-2.0, 0.0, 4.0), + (-2.0, 0.0, 7.0), + (-2.0, 1.5, 1.0), + (-2.0, 1.5, 4.0), + (-2.0, 1.5, 7.0), + (-2.0, 3.0, 1.0), + (-2.0, 3.0, 4.0), + (-2.0, 3.0, 7.0), + (-1.5, 0.0, 1.0), + (-1.5, 0.0, 4.0), + (-1.5, 0.0, 7.0), + (-1.5, 1.5, 1.0), + (-1.5, 1.5, 4.0), + (-1.5, 1.5, 7.0), + (-1.5, 3.0, 1.0), + (-1.5, 3.0, 4.0), + (-1.5, 3.0, 7.0), + (-1.0, 0.0, 1.0), + (-1.0, 0.0, 4.0), + (-1.0, 0.0, 7.0), + (-1.0, 1.5, 1.0), + (-1.0, 1.5, 4.0), + (-1.0, 1.5, 7.0), + (-1.0, 3.0, 1.0), + (-1.0, 3.0, 4.0), + (-1.0, 3.0, 7.0), + ] + return m + + @unittest.skipUnless( + scipy_available and numpy_available, "scipy and/or numpy are not available" + ) + def test_degenerate_simplices_filtered(self): + m = self.make_model() + pw = m.approx = PiecewiseLinearFunction(points=m.points, function=m.f) + + # check that all the points got used + self.assertEqual(len(pw._points), 27) + for p_model, p_pw in zip(m.points, pw._points): + self.assertEqual(p_model, p_pw) + + # Started with a 2x2 grid of cubes, and each is divided into 6 + # simplices. It's crazy degenerate in terms of *how* this is done, but + # that's the point of this test. + self.assertEqual(len(pw._simplices), 48) + simplex_in_cube = {idx: 0 for idx in range(8)} + for simplex in pw._simplices: + for i, vertex_set in enumerate(self.cube_extreme_pt_indices): + if set(simplex).issubset(vertex_set): + simplex_in_cube[i] += 1 + # verify the simplex is full-dimensional + pts = np.array([pw._points[j] for j in simplex]).transpose() + A = pts[:, 1:] - np.append(pts[:, :2], pts[:, [0]], axis=1) + self.assertNotEqual(np.linalg.det(A), 0) + + # Check that they are 6 to a cube, as expected + for num in simplex_in_cube.values(): + self.assertEqual(num, 6) + + @unittest.skipUnless( + scipy_available and numpy_available, "scipy and/or numpy are not available" + ) + def test_redundant_points_logged(self): + m = self.make_model() + # add a redundant point + m.points.append((-2, 0, 1)) + + out = StringIO() + with LoggingIntercept( + out, 'pyomo.contrib.piecewise.piecewise_linear_function', level=logging.INFO + ): + m.approx = PiecewiseLinearFunction(points=m.points, function=m.f) + + self.assertIn( + "The Delaunay triangulation dropped the point with index 27 " + "from the triangulation", + out.getvalue(), + ) + + @unittest.skipUnless(numpy_available, "numpy is not available") + def test_user_given_degenerate_simplex_error(self): + m = self.make_model() + with self.assertRaisesRegex( + ValueError, + "When calculating the hyperplane approximation over the simplex " + "with index 0, the matrix was unexpectedly singular. This " + "likely means that this simplex is degenerate", + ): + m.pw = PiecewiseLinearFunction( + simplices=[ + ( + (-2.0, 0.0, 1.0), + (-2.0, 0.0, 4.0), + (-2.0, 1.5, 1.0), + (-2.0, 1.5, 4.0), + ) + ], + function=m.f, + ) + + @unittest.skipUnless( + scipy_available and numpy_available, "scipy and/or numpy are not available" + ) + def test_simplex_not_numerically_full_rank_but_determinant_nonzero(self): + m = ConcreteModel() + + def f(x3, x6, x9, x4): + return -x6 * (0.01 * x4 * x9 + x3) + 0.98 * x3 + + points = [ + (0, 0.85, 1.2, 0), + (0.07478, 0.86396, 1.8668, 5), + (0, 0.85, 1.8668, 0), + (0.07478, 0.86396, 2.18751, 5), + (0, 0.86396, 1.2, 0), + (0.07478, 0.87971, 2.18751, 5), + (0, 0.87971, 1.2, 0), + (0.07478, 0.89001, 2.18751, 5), + (0.07478, 0.85, 1.2, 0), + (0.28333, 0.86396, 2.18751, 5), + (0.07478, 0.86396, 1.2, 0), + (0.28333, 0.89001, 2.18751, 5), + (0.28333, 0.85, 1.2, 0), + (0.31332, 0.89001, 2.18751, 5), + (0.31332, 0.85, 1.2, 0), + (1.2, 0.89001, 2.18751, 5), + (0, 0.89001, 1.2, 0), + (0.07478, 0.91727, 1.8668, 5), + (0, 0.89001, 1.8668, 0), + (0.07478, 0.91727, 2.18751, 5), + (0, 0.91727, 1.2, 0), + (0.07478, 0.93, 2.18751, 5), + (0.07478, 0.89001, 1.2, 0), + (0.28333, 0.91727, 2.18751, 5), + (0.07478, 0.91727, 1.2, 0), + (0.28333, 0.93, 2.18751, 5), + (0.28333, 0.89001, 1.2, 0), + (0.31332, 0.93, 2.18751, 5), + (0.31332, 0.89001, 1.2, 0), + (1.2, 0.93, 2.18751, 5), + (0, 0.85, 2.18751, 0), + (0.07478, 0.86396, 3.49134, 5), + (0, 0.85, 3.49134, 0), + (0.07478, 0.86396, 4, 5), + (0, 0.86396, 2.18751, 0), + (0.07478, 0.87971, 4, 5), + (0, 0.87971, 2.18751, 0), + (0.07478, 0.89001, 4, 5), + (0.07478, 0.85, 2.18751, 0), + (0.28333, 0.86396, 4, 5), + (0.07478, 0.86396, 2.18751, 0), + (0.28333, 0.89001, 4, 5), + (0.28333, 0.85, 2.18751, 0), + (0.31332, 0.89001, 4, 5), + (0.31332, 0.85, 2.18751, 0), + (1.2, 0.89001, 4, 5), + (0, 0.89001, 2.18751, 0), + (0.07478, 0.91727, 3.49134, 5), + (0, 0.89001, 3.49134, 0), + (0.07478, 0.91727, 4, 5), + (0, 0.91727, 2.18751, 0), + (0.07478, 0.93, 3.49134, 5), + (0, 0.91727, 3.49134, 0), + (0.07478, 0.93, 4, 5), + (0.07478, 0.89001, 2.18751, 0), + (0.28333, 0.91727, 4, 5), + (0.07478, 0.91727, 2.18751, 0), + (0.28333, 0.93, 4, 5), + (0.28333, 0.89001, 2.18751, 0), + (0.31332, 0.93, 4, 5), + (0.31332, 0.89001, 2.18751, 0), + (1.2, 0.93, 4, 5), + ] + m.pw = PiecewiseLinearFunction(points=points, function=f) + + # The big win is if the above runs, but we'll check the approximation + # computationally at least, to make sure that at all the points we gave, + # the pw linear approximation evaluates to the same value as the + # original nonlinear function. + for pt in points: + self.assertAlmostEqual(m.pw(*pt), f(*pt)) diff --git a/pyomo/contrib/piecewise/tests/test_reduced_inner_repn.py b/pyomo/contrib/piecewise/tests/test_reduced_inner_repn.py new file mode 100644 index 00000000000..b10edaac737 --- /dev/null +++ b/pyomo/contrib/piecewise/tests/test_reduced_inner_repn.py @@ -0,0 +1,256 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.contrib.piecewise.tests import models +import pyomo.contrib.piecewise.tests.common_tests as ct +from pyomo.core.base import TransformationFactory +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) +from pyomo.gdp import Disjunct, Disjunction +from pyomo.environ import Constraint, SolverFactory, Var + + +class TestTransformPiecewiseModelToReducedInnerRepnGDP(unittest.TestCase): + def check_disjunct(self, d, not_pts): + self.assertEqual(len(d.component_map(Constraint)), 1) + # just the indicator_var + self.assertEqual(len(d.component_map(Var)), 1) + self.assertIsInstance(d.lambdas_zero_for_other_simplices, Constraint) + self.assertEqual(len(d.lambdas_zero_for_other_simplices), len(not_pts)) + transBlock = d.parent_block() + for i, cons in zip(not_pts, d.lambdas_zero_for_other_simplices.values()): + assertExpressionsEqual(self, cons.expr, transBlock.lambdas[i] <= 0) + + def check_log_trans_block_structure(self, transBlock): + m = transBlock.model() + # One (indexed) disjunct + self.assertEqual(len(transBlock.component_map(Disjunct)), 1) + # One disjunction + self.assertEqual(len(transBlock.component_map(Disjunction)), 1) + # substitute Var and lambdas: + self.assertEqual(len(transBlock.component_map(Var)), 2) + # The 'z' var (that we will substitute in for the function being + # approximated) is here: + self.assertIsInstance(transBlock.substitute_var, Var) + + self.assertIsInstance(transBlock.lambdas, Var) + self.assertEqual(len(transBlock.lambdas), 4) + for lamb in transBlock.lambdas.values(): + self.assertEqual(lamb.lb, 0) + self.assertEqual(lamb.ub, 1) + self.assertIsInstance(transBlock.convex_combo, Constraint) + assertExpressionsEqual( + self, + transBlock.convex_combo.expr, + transBlock.lambdas[0] + + transBlock.lambdas[1] + + transBlock.lambdas[2] + + transBlock.lambdas[3] + == 1, + ) + self.assertIsInstance(transBlock.linear_combo, Constraint) + self.assertEqual(len(transBlock.linear_combo), 1) + pts = m.pw_log._points + assertExpressionsEqual( + self, + transBlock.linear_combo[0].expr, + m.x + == pts[0][0] * transBlock.lambdas[0] + + pts[1][0] * transBlock.lambdas[1] + + pts[2][0] * transBlock.lambdas[2] + + pts[3][0] * transBlock.lambdas[3], + ) + + self.assertIsInstance(transBlock.linear_func, Constraint) + self.assertEqual(len(transBlock.linear_func), 1) + assertExpressionsEqual( + self, + transBlock.linear_func.expr, + transBlock.lambdas[0] * m.f1(1) + + transBlock.lambdas[1] * m.f1(3) + + transBlock.lambdas[2] * m.f2(6) + + transBlock.lambdas[3] * m.f3(10) + == transBlock.substitute_var, + places=7, + ) + + def check_paraboloid_trans_block_structure(self, transBlock): + m = transBlock.model() + # One (indexed) disjunct + self.assertEqual(len(transBlock.component_map(Disjunct)), 1) + # One disjunction + self.assertEqual(len(transBlock.component_map(Disjunction)), 1) + # substitute Var and lambdas: + self.assertEqual(len(transBlock.component_map(Var)), 2) + # 3 constraints: The convexity one, the x-is-a-linear-combo of extreme + # points one, and the + # z-is-a-linear-combo-of-pw-linear-function-values-at-extreme-ppoints + # one: + self.assertEqual(len(transBlock.component_map(Constraint)), 3) + + # The 'z' var (that we will substitute in for the function being + # approximated) is here: + self.assertIsInstance(transBlock.substitute_var, Var) + + self.assertIsInstance(transBlock.lambdas, Var) + self.assertEqual(len(transBlock.lambdas), 6) + for lamb in transBlock.lambdas.values(): + self.assertEqual(lamb.lb, 0) + self.assertEqual(lamb.ub, 1) + self.assertIsInstance(transBlock.convex_combo, Constraint) + assertExpressionsEqual( + self, + transBlock.convex_combo.expr, + transBlock.lambdas[0] + + transBlock.lambdas[1] + + transBlock.lambdas[2] + + transBlock.lambdas[3] + + transBlock.lambdas[4] + + transBlock.lambdas[5] + == 1, + ) + self.assertIsInstance(transBlock.linear_combo, Constraint) + self.assertEqual(len(transBlock.linear_combo), 2) + pts = m.pw_paraboloid._points + assertExpressionsEqual( + self, + transBlock.linear_combo[0].expr, + m.x1 + == pts[0][0] * transBlock.lambdas[0] + + pts[1][0] * transBlock.lambdas[1] + + pts[2][0] * transBlock.lambdas[2] + + pts[3][0] * transBlock.lambdas[3] + + pts[4][0] * transBlock.lambdas[4] + + pts[5][0] * transBlock.lambdas[5], + ) + assertExpressionsEqual( + self, + transBlock.linear_combo[1].expr, + m.x2 + == pts[0][1] * transBlock.lambdas[0] + + pts[1][1] * transBlock.lambdas[1] + + pts[2][1] * transBlock.lambdas[2] + + pts[3][1] * transBlock.lambdas[3] + + pts[4][1] * transBlock.lambdas[4] + + pts[5][1] * transBlock.lambdas[5], + ) + + self.assertIsInstance(transBlock.linear_func, Constraint) + self.assertEqual(len(transBlock.linear_func), 1) + assertExpressionsEqual( + self, + transBlock.linear_func.expr, + transBlock.lambdas[0] * m.g1(0, 1) + + transBlock.lambdas[1] * m.g1(0, 4) + + transBlock.lambdas[2] * m.g1(3, 4) + + transBlock.lambdas[3] * m.g1(3, 1) + + transBlock.lambdas[4] * m.g2(3, 7) + + transBlock.lambdas[5] * m.g2(0, 7) + == transBlock.substitute_var, + ) + + def check_pw_log(self, m): + ## + # Check the transformation of the approximation of log(x) + ## + z = m.pw_log.get_transformation_var(m.log_expr) + self.assertIsInstance(z, Var) + # Now we can use those Vars to check on what the transformation created + log_block = z.parent_block() + self.check_log_trans_block_structure(log_block) + + # Check that all of the Disjuncts have what they should + self.assertEqual(len(log_block.disjuncts), 3) + disjuncts_dict = { + # disjunct : [extreme points *not* in corresponding x domain] + log_block.disjuncts[0]: (2, 3), + log_block.disjuncts[1]: (0, 3), + log_block.disjuncts[2]: (0, 1), + } + for d, not_pts in disjuncts_dict.items(): + self.check_disjunct(d, not_pts) + + # Check the Disjunction + self.assertIsInstance(log_block.pick_a_piece, Disjunction) + self.assertEqual(len(log_block.pick_a_piece.disjuncts), 3) + for i in range(2): + self.assertIs(log_block.pick_a_piece.disjuncts[i], log_block.disjuncts[i]) + + # And check the substitute Var is in the objective now. + self.assertIs(m.obj.expr.expr, log_block.substitute_var) + + def check_pw_paraboloid(self, m): + ## + # Check the approximation of the transformation of the paraboloid + ## + z = m.pw_paraboloid.get_transformation_var(m.paraboloid_expr) + self.assertIsInstance(z, Var) + paraboloid_block = z.parent_block() + self.check_paraboloid_trans_block_structure(paraboloid_block) + + self.assertEqual(len(paraboloid_block.disjuncts), 4) + disjuncts_dict = { + # disjunct : [extreme points *not* in corresponding (x1, x2) domain] + paraboloid_block.disjuncts[0]: [3, 4, 5], + paraboloid_block.disjuncts[1]: [1, 4, 5], + paraboloid_block.disjuncts[2]: [0, 1, 3], + paraboloid_block.disjuncts[3]: [0, 3, 4], + } + for d, not_pts in disjuncts_dict.items(): + self.check_disjunct(d, not_pts) + + # Check the Disjunction + self.assertIsInstance(paraboloid_block.pick_a_piece, Disjunction) + self.assertEqual(len(paraboloid_block.pick_a_piece.disjuncts), 4) + for i in range(3): + self.assertIs( + paraboloid_block.pick_a_piece.disjuncts[i], + paraboloid_block.disjuncts[i], + ) + + # And check the substitute Var is in the objective now. + self.assertIs(m.indexed_c[0].body.args[0].expr, paraboloid_block.substitute_var) + + def test_transformation_do_not_descend(self): + ct.check_transformation_do_not_descend( + self, 'contrib.piecewise.reduced_inner_repn_gdp' + ) + + def test_transformation_PiecewiseLinearFunction_targets(self): + ct.check_transformation_PiecewiseLinearFunction_targets( + self, 'contrib.piecewise.reduced_inner_repn_gdp' + ) + + def test_descend_into_expressions(self): + ct.check_descend_into_expressions( + self, 'contrib.piecewise.reduced_inner_repn_gdp' + ) + + def test_descend_into_expressions_constraint_target(self): + ct.check_descend_into_expressions_constraint_target( + self, 'contrib.piecewise.reduced_inner_repn_gdp' + ) + + def test_descend_into_expressions_objective_target(self): + ct.check_descend_into_expressions_objective_target( + self, 'contrib.piecewise.reduced_inner_repn_gdp' + ) + + @unittest.skipUnless(SolverFactory('gurobi').available(), 'Gurobi is not available') + def test_solve_convex_combo_model(self): + m = models.make_log_x_model() + TransformationFactory('contrib.piecewise.convex_combination').apply_to(m) + SolverFactory('gurobi').solve(m) + + ct.check_log_x_model_soln(self, m) diff --git a/pyomo/contrib/piecewise/transform/__init__.py b/pyomo/contrib/piecewise/transform/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pyomo/contrib/piecewise/transform/convex_combination.py b/pyomo/contrib/piecewise/transform/convex_combination.py new file mode 100644 index 00000000000..abfeac27129 --- /dev/null +++ b/pyomo/contrib/piecewise/transform/convex_combination.py @@ -0,0 +1,42 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.base import Transformation, TransformationFactory +import pyomo.gdp.plugins.multiple_bigm + + +@TransformationFactory.register( + 'contrib.piecewise.convex_combination', + doc="Convert piecewise-linear model to a GDP " + "to 'Convex Combination' MIP formulation.", +) +class ConvexCombinationTransformation(Transformation): + """ + Converts a model containing PiecewiseLinearFunctions to a an equivalent + MIP via the Convex Combination method from [1]. Note that, + while this model probably resolves to the model described in [1] after + presolve, the Pyomo version is not as simplified. + + References + ---------- + [1] J.P. Vielma, S. Ahmed, and G. Nemhauser, "Mixed-integer models + for nonseparable piecewise-linear optimization: unifying framework + and extensions," Operations Research, vol. 58, no. 2, pp. 305-315, + 2010. + """ + + def _apply_to(self, instance, **kwds): + TransformationFactory('contrib.piecewise.reduced_inner_repn_gdp').apply_to( + instance + ) + TransformationFactory('gdp.mbigm').apply_to( + instance, reduce_bound_constraints=True + ) diff --git a/pyomo/contrib/piecewise/transform/disaggregated_convex_combination.py b/pyomo/contrib/piecewise/transform/disaggregated_convex_combination.py new file mode 100644 index 00000000000..44059935e09 --- /dev/null +++ b/pyomo/contrib/piecewise/transform/disaggregated_convex_combination.py @@ -0,0 +1,39 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.base import Transformation, TransformationFactory +import pyomo.gdp.plugins.hull + + +@TransformationFactory.register( + 'contrib.piecewise.disaggregated_convex_combination', + doc="Convert piecewise-linear model to a GDP " + "to 'Disaggregated Convex Combination' MIP " + "formulation.", +) +class DisaggregatedConvexCombinationTransformation(Transformation): + """ + Converts a model containing PiecewiseLinearFunctions to a an equivalent + MIP via the Diaggregated Convex Combination method from [1]. Note that, + while this model probably resolves to the model described in [1] after + presolve, the Pyomo version is not as simplified. + + References + ---------- + [1] J.P. Vielma, S. Ahmed, and G. Nemhauser, "Mixed-integer models + for nonseparable piecewise-linear optimization: unifying framework + and extensions," Operations Research, vol. 58, no. 2, pp. 305-315, + 2010. + """ + + def _apply_to(self, instance, **kwds): + TransformationFactory('contrib.piecewise.inner_repn_gdp').apply_to(instance) + TransformationFactory('gdp.hull').apply_to(instance) diff --git a/pyomo/contrib/piecewise/transform/inner_representation_gdp.py b/pyomo/contrib/piecewise/transform/inner_representation_gdp.py new file mode 100644 index 00000000000..627e41aeae9 --- /dev/null +++ b/pyomo/contrib/piecewise/transform/inner_representation_gdp.py @@ -0,0 +1,106 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +from pyomo.contrib.piecewise.transform.piecewise_to_gdp_transformation import ( + PiecewiseLinearToGDP, +) +from pyomo.core import Constraint, NonNegativeIntegers, Suffix, Var +from pyomo.core.base import TransformationFactory +from pyomo.gdp import Disjunct, Disjunction + + +@TransformationFactory.register( + 'contrib.piecewise.inner_repn_gdp', + doc="Convert piecewise-linear model to a GDP " + "using an inner representation of the " + "simplices that are the domains of the linear " + "functions.", +) +class InnerRepresentationGDPTransformation(PiecewiseLinearToGDP): + """ + Convert a model involving piecewise linear expressions into a GDP by + representing the piecewise linear functions as Disjunctions where the + simplices over which the linear functions are defined are represented + in an "inner" representation--as convex combinations of their extreme + points. The multipliers defining the convex combination are local to + each Disjunct, so there is one per extreme point in each simplex. + + This transformation can be called in one of two ways: + 1) The default, where 'descend_into_expressions' is False. This is + more computationally efficient, but relies on the + PiecewiseLinearFunctions being declared on the same Block in which + they are used in Expressions (if you are hoping to maintain the + original hierarchical structure of the model). In this mode, + targets must be Blocks and/or PiecewiseLinearFunctions. + 2) With 'descend_into_expressions' True. This is less computationally + efficient, but will respect hierarchical structure by finding + uses of PiecewiseLinearFunctions in Constraint and Obective + expressions and putting their transformed counterparts on the same + parent Block as the component owning their parent expression. In + this mode, targets must be Blocks, Constraints, and/or Objectives. + """ + + CONFIG = PiecewiseLinearToGDP.CONFIG() + _transformation_name = 'pw_linear_inner_repn' + + def _transform_pw_linear_expr(self, pw_expr, pw_linear_func, transformation_block): + transBlock = transformation_block.transformed_functions[ + len(transformation_block.transformed_functions) + ] + + # get the PiecewiseLinearFunctionExpression + dimension = pw_expr.nargs() + transBlock.disjuncts = Disjunct(NonNegativeIntegers) + substitute_var = transBlock.substitute_var = Var() + pw_linear_func.map_transformation_var(pw_expr, substitute_var) + substitute_var_lb = float('inf') + substitute_var_ub = -float('inf') + for simplex, linear_func in zip( + pw_linear_func._simplices, pw_linear_func._linear_functions + ): + disj = transBlock.disjuncts[len(transBlock.disjuncts)] + disj.lambdas = Var(NonNegativeIntegers, dense=False, bounds=(0, 1)) + extreme_pts = [] + for idx in simplex: + extreme_pts.append(pw_linear_func._points[idx]) + + disj.convex_combo = Constraint( + expr=sum(disj.lambdas[i] for i in range(len(extreme_pts))) == 1 + ) + linear_func_expr = linear_func(*pw_expr.args) + disj.set_substitute = Constraint(expr=substitute_var == linear_func_expr) + (lb, ub) = compute_bounds_on_expr(linear_func_expr) + if lb is not None and lb < substitute_var_lb: + substitute_var_lb = lb + if ub is not None and ub > substitute_var_ub: + substitute_var_ub = ub + + @disj.Constraint(range(dimension)) + def linear_combo(disj, i): + return pw_expr.args[i] == sum( + disj.lambdas[j] * pt[i] for j, pt in enumerate(extreme_pts) + ) + + # Mark the lambdas as local so that we don't do anything silly in + # the hull transformation. + disj.LocalVars = Suffix(direction=Suffix.LOCAL) + disj.LocalVars[disj] = [v for v in disj.lambdas.values()] + + if substitute_var_lb < float('inf'): + transBlock.substitute_var.setlb(substitute_var_lb) + if substitute_var_ub > -float('inf'): + transBlock.substitute_var.setub(substitute_var_ub) + transBlock.pick_a_piece = Disjunction( + expr=[d for d in transBlock.disjuncts.values()] + ) + + return transBlock.substitute_var diff --git a/pyomo/contrib/piecewise/transform/multiple_choice.py b/pyomo/contrib/piecewise/transform/multiple_choice.py new file mode 100644 index 00000000000..97dc8e9d2b3 --- /dev/null +++ b/pyomo/contrib/piecewise/transform/multiple_choice.py @@ -0,0 +1,43 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.core.base import Transformation, TransformationFactory +import pyomo.gdp.plugins.hull + + +@TransformationFactory.register( + 'contrib.piecewise.multiple_choice', + doc="Convert piecewise-linear model to a GDP " + "to 'Multiple Choice' MIP " + "formulation.", +) +class MultipleChoiceTransformation(Transformation): + """ + Converts a model containing PiecewiseLinearFunctions to a an equivalent + MIP via the Multiple Choice method from [1]. Note that, + while this model probably resolves to the model described in [1] after + presolve, the Pyomo version is not as simplified. Specifically, in [1], the + the 'z' variables (representing the value of the piecewise-linear function + in each Disjunct) are not disaggregated. In this transformation's output + they will be, but a linear combination of inequalities yields a model + equivalent to the Multiple Choice model in [1]. + + References + ---------- + [1] J.P. Vielma, S. Ahmed, and G. Nemhauser, "Mixed-integer models + for nonseparable piecewise-linear optimization: unifying framework + and extensions," Operations Research, vol. 58, no. 2, pp. 305-315, + 2010. + """ + + def _apply_to(self, instance, **kwds): + TransformationFactory('contrib.piecewise.outer_repn_gdp').apply_to(instance) + TransformationFactory('gdp.hull').apply_to(instance) diff --git a/pyomo/contrib/piecewise/transform/outer_representation_gdp.py b/pyomo/contrib/piecewise/transform/outer_representation_gdp.py new file mode 100644 index 00000000000..04cd01e1246 --- /dev/null +++ b/pyomo/contrib/piecewise/transform/outer_representation_gdp.py @@ -0,0 +1,122 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.dependencies.numpy as np +from pyomo.common.dependencies.scipy import spatial +from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +from pyomo.contrib.piecewise.transform.piecewise_to_gdp_transformation import ( + PiecewiseLinearToGDP, +) +from pyomo.core import Constraint, NonNegativeIntegers, Suffix, Var +from pyomo.core.base import TransformationFactory +from pyomo.gdp import Disjunct, Disjunction + + +@TransformationFactory.register( + 'contrib.piecewise.outer_repn_gdp', + doc="Convert piecewise-linear model to a GDP " + "using an outer (Ax <= b) representation of " + "the simplices that are the domains of the " + "linear functions.", +) +class OuterRepresentationGDPTransformation(PiecewiseLinearToGDP): + """ + Convert a model involving piecewise linear expressions into a GDP by + representing the piecewise linear functions as Disjunctions where the + simplices over which the linear functions are defined are represented + in an "outer" representation--in sets of constraints of the form Ax <= b. + + This transformation can be called in one of two ways: + 1) The default, where 'descend_into_expressions' is False. This is + more computationally efficient, but relies on the + PiecewiseLinearFunctions being declared on the same Block in which + they are used in Expressions (if you are hoping to maintain the + original hierarchical structure of the model). In this mode, + targets must be Blocks and/or PiecewiseLinearFunctions. + 2) With 'descend_into_expressions' True. This is less computationally + efficient, but will respect hierarchical structure by finding + uses of PiecewiseLinearFunctions in Constraint and Obective + expressions and putting their transformed counterparts on the same + parent Block as the component owning their parent expression. In + this mode, targets must be Blocks, Constraints, and/or Objectives. + """ + + CONFIG = PiecewiseLinearToGDP.CONFIG() + _transformation_name = 'pw_linear_outer_repn' + + def _transform_pw_linear_expr(self, pw_expr, pw_linear_func, transformation_block): + transBlock = transformation_block.transformed_functions[ + len(transformation_block.transformed_functions) + ] + + # get the PiecewiseLinearFunctionExpression + dimension = pw_expr.nargs() + transBlock.disjuncts = Disjunct(NonNegativeIntegers) + substitute_var = transBlock.substitute_var = Var() + pw_linear_func.map_transformation_var(pw_expr, substitute_var) + substitute_var_lb = float('inf') + substitute_var_ub = -float('inf') + if dimension > 1: + A = np.ones((dimension + 1, dimension + 1)) + b = np.zeros(dimension + 1) + b[-1] = 1 + + for simplex, linear_func in zip( + pw_linear_func._simplices, pw_linear_func._linear_functions + ): + disj = transBlock.disjuncts[len(transBlock.disjuncts)] + + if dimension == 1: + # We don't need scipy, and the polytopes are 1-dimensional + # simplices, so they are defined by two bounds constraints: + disj.simplex_halfspaces = Constraint( + expr=( + pw_linear_func._points[simplex[0]][0], + pw_expr.args[0], + pw_linear_func._points[simplex[1]][0], + ) + ) + else: + disj.simplex_halfspaces = Constraint(range(dimension + 1)) + # we will use scipy to get the convex hull of the extreme + # points of the simplex + extreme_pts = [] + for idx in simplex: + extreme_pts.append(pw_linear_func._points[idx]) + chull = spatial.ConvexHull(extreme_pts) + vars = pw_expr.args + for i, eqn in enumerate(chull.equations): + # The equations are given as normal vectors (A) followed by + # offsets (b) such that Ax + b <= 0 gives the halfspaces + # defining the simplex. (See Qhull documentation) + disj.simplex_halfspaces[i] = ( + sum(eqn[j] * v for j, v in enumerate(vars)) + + float(eqn[dimension]) + <= 0 + ) + + linear_func_expr = linear_func(*pw_expr.args) + disj.set_substitute = Constraint(expr=substitute_var == linear_func_expr) + (lb, ub) = compute_bounds_on_expr(linear_func_expr) + if lb is not None and lb < substitute_var_lb: + substitute_var_lb = lb + if ub is not None and ub > substitute_var_ub: + substitute_var_ub = ub + + if substitute_var_lb < float('inf'): + transBlock.substitute_var.setlb(substitute_var_lb) + if substitute_var_ub > -float('inf'): + transBlock.substitute_var.setub(substitute_var_ub) + transBlock.pick_a_piece = Disjunction( + expr=[d for d in transBlock.disjuncts.values()] + ) + + return transBlock.substitute_var diff --git a/pyomo/contrib/piecewise/transform/piecewise_to_gdp_transformation.py b/pyomo/contrib/piecewise/transform/piecewise_to_gdp_transformation.py new file mode 100644 index 00000000000..ed4902ae6d5 --- /dev/null +++ b/pyomo/contrib/piecewise/transform/piecewise_to_gdp_transformation.py @@ -0,0 +1,254 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.config import ConfigDict, ConfigValue +from pyomo.common.errors import DeveloperError +from pyomo.common.modeling import unique_component_name +from pyomo.contrib.piecewise import PiecewiseLinearFunction +from pyomo.contrib.piecewise.transform.piecewise_to_mip_visitor import ( + PiecewiseLinearToMIP, +) +from pyomo.core import ( + Constraint, + Objective, + Var, + BooleanVar, + Expression, + Suffix, + Param, + Set, + SetOf, + RangeSet, + ExternalFunction, + Connector, + SortComponents, + Any, +) +from pyomo.core.base import Transformation +from pyomo.core.base.block import _BlockData, Block +from pyomo.core.util import target_list +from pyomo.gdp import Disjunct, Disjunction +from pyomo.gdp.util import is_child_of +from pyomo.network import Port + + +class PiecewiseLinearToGDP(Transformation): + """ + Base class for transformations of piecewise-linear models to GDPs + """ + + CONFIG = ConfigDict('contrib.piecewise_to_gdp') + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be transformed", + doc=""" + This specifies the list of components to transform. If None (default), + the entire model is transformed. Note that if the transformation is + done out of place, the list of targets should be attached to the model + before it is cloned, and the list will specify the targets on the cloned + instance.""", + ), + ) + CONFIG.declare( + 'descend_into_expressions', + ConfigValue( + default=False, + domain=bool, + description="Whether to look for uses of PiecewiseLinearFunctions in " + "the Constraint and Objective expressions, rather than assuming " + "all PiecewiseLinearFunctions are on the active tree(s) of 'instance' " + "and 'targets.'", + doc=""" + It is *strongly* recommended that, in hierarchical models, the + PiecewiseLinearFunction components are on the same Block as where + they are used in expressions. If you follow this recommendation, + this option can remain False, which will make this transformation + more efficient. However, if you do not follow the recommendation, + unless you know what you are doing, turn this option to 'True' to + ensure that all of the uses of PiecewiseLinearFunctions are + transformed. + """, + ), + ) + + def __init__(self): + super().__init__() + self.handlers = { + Constraint: self._transform_constraint, + Objective: self._transform_objective, + Var: False, + BooleanVar: False, + Connector: False, + Expression: False, + Suffix: False, + Param: False, + Set: False, + SetOf: False, + RangeSet: False, + Disjunction: False, + Disjunct: False, + Block: self._transform_block, + ExternalFunction: False, + Port: False, + PiecewiseLinearFunction: self._transform_piecewise_linear_function, + } + self._transformation_blocks = {} + + def _apply_to(self, instance, **kwds): + try: + self._apply_to_impl(instance, **kwds) + finally: + self._transformation_blocks.clear() + + def _apply_to_impl(self, instance, **kwds): + config = self.CONFIG(kwds.pop('options', {})) + config.set_value(kwds) + + targets = config.targets + if targets is None: + targets = (instance,) + + knownBlocks = {} + not_walking_exprs_msg = ( + "When not descending into expressions, Constraints " + "and Objectives are not valid targets. Please specify " + "PiecewiseLinearFunction component and the Blocks " + "containing them, or (at the cost of some performance " + "in this transformation), set the 'descend_into_expressions' " + "option to 'True'." + ) + for t in targets: + if not is_child_of(parent=instance, child=t, knownBlocks=knownBlocks): + raise ValueError( + "Target '%s' is not a component on instance " + "'%s'!" % (t.name, instance.name) + ) + if t.ctype is PiecewiseLinearFunction: + if config.descend_into_expressions: + raise ValueError( + "When descending into expressions, the transformation " + "cannot take PiecewiseLinearFunction components as " + "targets. Please instead specify the Blocks, " + "Constraints, and Objectives where your " + "PiecewiseLinearFunctions have been used in " + "expressions." + ) + self._transform_piecewise_linear_function( + t, config.descend_into_expressions + ) + elif t.ctype is Block or isinstance(t, _BlockData): + self._transform_block(t, config.descend_into_expressions) + elif t.ctype is Constraint: + if not config.descend_into_expressions: + raise ValueError( + "Encountered Constraint target '%s':\n%s" + % (t.name, not_walking_exprs_msg) + ) + self._transform_constraint(t, config.descend_into_expressions) + elif t.ctype is Objective: + if not config.descend_into_expressions: + raise ValueError( + "Encountered Objective target '%s':\n%s" + % (t.name, not_walking_exprs_msg) + ) + self._transform_objective(t, config.descend_into_expressions) + else: + raise ValueError( + "Target '%s' is not a PiecewiseLinearFunction, Block or " + "Constraint. It was of type '%s' and can't be transformed." + % (t.name, type(t)) + ) + + def _get_transformation_block(self, parent): + if parent in self._transformation_blocks: + return self._transformation_blocks[parent] + + nm = unique_component_name( + parent, '_pyomo_contrib_%s' % self._transformation_name + ) + self._transformation_blocks[parent] = transBlock = Block() + parent.add_component(nm, transBlock) + + transBlock.transformed_functions = Block(Any) + return transBlock + + def _transform_block(self, block, descend_into_expressions): + blocks = block.values() if block.is_indexed() else (block,) + for b in blocks: + for obj in b.component_objects( + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ): + handler = self.handlers.get(obj.ctype, None) + if not handler: + if handler is None: + raise RuntimeError( + "No transformation handler registered for modeling " + "components of type '%s'." % obj.ctype + ) + continue + handler(obj, descend_into_expressions) + + def _transform_piecewise_linear_function( + self, pw_linear_func, descend_into_expressions + ): + if descend_into_expressions: + return + + transBlock = self._get_transformation_block(pw_linear_func.parent_block()) + _functions = ( + pw_linear_func.values() + if pw_linear_func.is_indexed() + else (pw_linear_func,) + ) + for pw_func in _functions: + for pw_expr in pw_func._expressions.values(): + substitute_var = self._transform_pw_linear_expr( + pw_expr.expr, pw_func, transBlock + ) + # We change the named expression to point to the variable that + # will take the appropriate value of the piecewise linear + # function. + pw_expr.expr = substitute_var + + # Deactivate so that modern writers don't complain + pw_linear_func.deactivate() + + def _transform_constraint(self, constraint, descend_into_expressions): + if not descend_into_expressions: + return + + transBlock = self._get_transformation_block(constraint.parent_block()) + visitor = PiecewiseLinearToMIP(self._transform_pw_linear_expr, transBlock) + + _constraints = constraint.values() if constraint.is_indexed() else (constraint,) + for c in _constraints: + visitor.walk_expression((c.expr, c, 0)) + + def _transform_objective(self, objective, descend_into_expressions): + if not descend_into_expressions: + return + + transBlock = self._get_transformation_block(objective.parent_block()) + visitor = PiecewiseLinearToMIP(self._transform_pw_linear_expr, transBlock) + + _objectives = objective.values() if objective.is_indexed() else (objective,) + for o in _objectives: + visitor.walk_expression((o.expr, o, 0)) + + def _transform_pw_linear_expr(self, pw_expr, pw_linear_func, transformation_block): + raise DeveloperError( + "Derived class failed to implement '_transform_pw_linear_expr'" + ) diff --git a/pyomo/contrib/piecewise/transform/piecewise_to_mip_visitor.py b/pyomo/contrib/piecewise/transform/piecewise_to_mip_visitor.py new file mode 100644 index 00000000000..e3347cf206a --- /dev/null +++ b/pyomo/contrib/piecewise/transform/piecewise_to_mip_visitor.py @@ -0,0 +1,56 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.piecewise.piecewise_linear_expression import ( + PiecewiseLinearExpression, +) +from pyomo.core import Expression +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor + + +class PiecewiseLinearToMIP(StreamBasedExpressionVisitor): + """ + Expression walker to replace PiecewiseLinearExpressions when creating + equivalent MIP formulations. + + Args: + transform_pw_linear_expression (function): a callback that accepts + a PiecewiseLinearExpression, its parent PiecewiseLinearFunction, + and a transformation Block. It is expected to convert the + PiecewiseLinearExpression to MIP form, and return the Var (or + other expression) that will replace it in the expression. + transBlock (Block): transformation Block to pass to the above + callback + """ + + def __init__(self, transform_pw_linear_expression, transBlock): + self.transform_pw_linear_expression = transform_pw_linear_expression + self.transBlock = transBlock + self._process_node = self._process_node_bx + + def initializeWalker(self, expr): + expr, src, src_idx = expr + # always walk + return True, expr + + def beforeChild(self, node, child, child_idx): + return True, None + + def exitNode(self, node, data): + if node.__class__ is PiecewiseLinearExpression: + parent = node.pw_linear_function + substitute_var = self.transform_pw_linear_expression( + node, parent, self.transBlock + ) + parent._expressions[id(node)] = substitute_var + return node + + finalizeResult = None diff --git a/pyomo/contrib/piecewise/transform/reduced_inner_representation_gdp.py b/pyomo/contrib/piecewise/transform/reduced_inner_representation_gdp.py new file mode 100644 index 00000000000..b89852530d9 --- /dev/null +++ b/pyomo/contrib/piecewise/transform/reduced_inner_representation_gdp.py @@ -0,0 +1,138 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +from pyomo.contrib.piecewise.transform.piecewise_to_gdp_transformation import ( + PiecewiseLinearToGDP, +) +from pyomo.core import Constraint, NonNegativeIntegers, Var +from pyomo.core.base import TransformationFactory +from pyomo.gdp import Disjunct, Disjunction + + +@TransformationFactory.register( + 'contrib.piecewise.reduced_inner_repn_gdp', + doc="Convert piecewise-linear model to a GDP " + "using an inner representation of the " + "simplices that are the domains of the linear " + "functions.", +) +class ReducedInnerRepresentationGDPTransformation(PiecewiseLinearToGDP): + """ + Convert a model involving piecewise linear expressions into a GDP by + representing the piecewise linear functions as Disjunctions where the + simplices over which the linear functions are defined are represented + in a reduced "inner" representation--as convex combinations of their extreme + points. We refer to this as 'reduced' since we create only one multiplier + for each extreme point in the union of the extreme points over all the + simplices. Within the Disjuncts, we then enforce that all of the multipliers + for extreme points not in the simplex are 0. + + This transformation can be called in one of two ways: + 1) The default, where 'descend_into_expressions' is False. This is + more computationally efficient, but relies on the + PiecewiseLinearFunctions being declared on the same Block in which + they are used in Expressions (if you are hoping to maintain the + original hierarchical structure of the model). In this mode, + targets must be Blocks and/or PiecewiseLinearFunctions. + 2) With 'descend_into_expressions' True. This is less computationally + efficient, but will respect hierarchical structure by finding + uses of PiecewiseLinearFunctions in Constraint and Obective + expressions and putting their transformed counterparts on the same + parent Block as the component owning their parent expression. In + this mode, targets must be Blocks, Constraints, and/or Objectives. + """ + + CONFIG = PiecewiseLinearToGDP.CONFIG() + _transformation_name = 'pw_linear_reduced_inner_repn' + + def _transform_pw_linear_expr(self, pw_expr, pw_linear_func, transformation_block): + transBlock = transformation_block.transformed_functions[ + len(transformation_block.transformed_functions) + ] + + # get the PiecewiseLinearFunctionExpression + dimension = pw_expr.nargs() + transBlock.disjuncts = Disjunct(NonNegativeIntegers) + substitute_var = transBlock.substitute_var = Var() + pw_linear_func.map_transformation_var(pw_expr, substitute_var) + substitute_var_lb = float('inf') + substitute_var_ub = -float('inf') + extreme_pts_by_simplex = {} + linear_func_by_extreme_pt = {} + # Save all the extreme points as sets since we will need to check set + # containment to build the constraints fixing the multipliers to 0. We + # can also build the data structure that will allow us to later build + # the linear func expression + for simplex, linear_func in zip( + pw_linear_func._simplices, pw_linear_func._linear_functions + ): + extreme_pts = extreme_pts_by_simplex[simplex] = set() + for idx in simplex: + extreme_pts.add(idx) + if idx not in linear_func_by_extreme_pt: + linear_func_by_extreme_pt[idx] = linear_func + + # We're going to want bounds on the substitute var, so we use + # interval arithmetic to figure those out as we go. + (lb, ub) = compute_bounds_on_expr(linear_func(*pw_expr.args)) + if lb is not None and lb < substitute_var_lb: + substitute_var_lb = lb + if ub is not None and ub > substitute_var_ub: + substitute_var_ub = ub + + # set the bounds on the substitute var + if substitute_var_lb < float('inf'): + transBlock.substitute_var.setlb(substitute_var_lb) + if substitute_var_ub > -float('inf'): + transBlock.substitute_var.setub(substitute_var_ub) + + num_extreme_pts = len(pw_linear_func._points) + # lambda[i] will be the multiplier for the extreme point with index i in + # pw_linear_fun._points + transBlock.lambdas = Var(range(num_extreme_pts), bounds=(0, 1)) + + # Now that we have all of the extreme points, we can make the + # disjunctive constraints + for simplex in pw_linear_func._simplices: + disj = transBlock.disjuncts[len(transBlock.disjuncts)] + cons = disj.lambdas_zero_for_other_simplices = Constraint( + NonNegativeIntegers + ) + extreme_pts = extreme_pts_by_simplex[simplex] + for i in range(num_extreme_pts): + if i not in extreme_pts: + cons[len(cons)] = transBlock.lambdas[i] <= 0 + # Make the disjunction + transBlock.pick_a_piece = Disjunction( + expr=[d for d in transBlock.disjuncts.values()] + ) + + # Now we make the global constraints + transBlock.convex_combo = Constraint( + expr=sum(transBlock.lambdas[i] for i in range(num_extreme_pts)) == 1 + ) + transBlock.linear_func = Constraint( + expr=sum( + linear_func_by_extreme_pt[j](*pt) * transBlock.lambdas[j] + for (j, pt) in enumerate(pw_linear_func._points) + ) + == substitute_var + ) + + @transBlock.Constraint(range(dimension)) + def linear_combo(b, i): + return pw_expr.args[i] == sum( + pt[i] * transBlock.lambdas[j] + for (j, pt) in enumerate(pw_linear_func._points) + ) + + return transBlock.substitute_var diff --git a/pyomo/contrib/preprocessing/plugins/bounds_to_vars.py b/pyomo/contrib/preprocessing/plugins/bounds_to_vars.py index 3981e3765e0..ece2376774c 100644 --- a/pyomo/contrib/preprocessing/plugins/bounds_to_vars.py +++ b/pyomo/contrib/preprocessing/plugins/bounds_to_vars.py @@ -17,16 +17,23 @@ import math from pyomo.core.base.transformation import TransformationFactory -from pyomo.common.config import (ConfigBlock, ConfigValue, NonNegativeFloat, - add_docstring_list) +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + NonNegativeFloat, + document_kwargs_from_configdict, +) from pyomo.core.base.constraint import Constraint from pyomo.core.expr.numvalue import value from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.repn import generate_standard_repn -@TransformationFactory.register('contrib.constraints_to_var_bounds', - doc="Change constraints to be a bound on the variable.") +@TransformationFactory.register( + 'contrib.constraints_to_var_bounds', + doc="Change constraints to be a bound on the variable.", +) +@document_kwargs_from_configdict('CONFIG') class ConstraintToVarBoundTransform(IsomorphicTransformation): """Change constraints to be a bound on the variable. @@ -40,23 +47,30 @@ class ConstraintToVarBoundTransform(IsomorphicTransformation): """ CONFIG = ConfigBlock("ConstraintToVarBounds") - CONFIG.declare("tolerance", ConfigValue( - default=1E-13, domain=NonNegativeFloat, - description="tolerance on bound equality (:math:`LB = UB`)" - )) - CONFIG.declare("detect_fixed", ConfigValue( - default=True, domain=bool, - description="If True, fix variable when " - ":math:`| LB - UB | \\leq tolerance`." - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) + CONFIG.declare( + "tolerance", + ConfigValue( + default=1e-13, + domain=NonNegativeFloat, + description="tolerance on bound equality (:math:`LB = UB`)", + ), + ) + CONFIG.declare( + "detect_fixed", + ConfigValue( + default=True, + domain=bool, + description="If True, fix variable when " + ":math:`| LB - UB | \\leq tolerance`.", + ), + ) def _apply_to(self, model, **kwds): config = self.CONFIG(kwds) for constr in model.component_data_objects( - ctype=Constraint, active=True, descend_into=True): + ctype=Constraint, active=True, descend_into=True + ): # Check if the constraint is k * x + c1 <= c2 or c2 <= k * x + c1 repn = generate_standard_repn(constr.body) if not repn.is_linear() or len(repn.linear_vars) != 1: @@ -94,11 +108,20 @@ def _apply_to(self, model, **kwds): # Make sure that the lb and ub are integral. Use safe # construction if near to integer. if var.has_lb(): - var.setlb(int(min(math.ceil(var.lb - config.tolerance), - math.ceil(var.lb)))) + var.setlb( + int( + min(math.ceil(var.lb - config.tolerance), math.ceil(var.lb)) + ) + ) if var.has_ub(): - var.setub(int(max(math.floor(var.ub + config.tolerance), - math.floor(var.ub)))) + var.setub( + int( + max( + math.floor(var.ub + config.tolerance), + math.floor(var.ub), + ) + ) + ) if var is not None and var.value is not None: _adjust_var_value_if_not_feasible(var) diff --git a/pyomo/contrib/preprocessing/plugins/constraint_tightener.py b/pyomo/contrib/preprocessing/plugins/constraint_tightener.py index b44befd7912..4c8b28e0319 100644 --- a/pyomo/contrib/preprocessing/plugins/constraint_tightener.py +++ b/pyomo/contrib/preprocessing/plugins/constraint_tightener.py @@ -10,13 +10,15 @@ @TransformationFactory.register( 'core.tighten_constraints_from_vars', - doc="[DEPRECATED] Tightens upper and lower bound on linear constraints.") + doc="[DEPRECATED] Tightens upper and lower bound on linear constraints.", +) @deprecated( "Use of the constraint tightener transformation is deprecated. " "Its functionality may be partially replicated using " "`pyomo.contrib.fbbt.compute_bounds_on_expr(constraint.body)`.", - version='5.7') -class TightenContraintFromVars(IsomorphicTransformation): + version='5.7', +) +class TightenConstraintFromVars(IsomorphicTransformation): """Tightens upper and lower bound on constraints based on variable bounds. Iterates through each variable and tightens the constraint bounds using @@ -27,12 +29,13 @@ class TightenContraintFromVars(IsomorphicTransformation): """ def __init__(self): - super(TightenContraintFromVars, self).__init__() + super(TightenConstraintFromVars, self).__init__() def _apply_to(self, model): """Apply the transformation.""" for constr in model.component_data_objects( - ctype=Constraint, active=True, descend_into=True): + ctype=Constraint, active=True, descend_into=True + ): repn = generate_standard_repn(constr.body) if not repn.is_linear(): continue @@ -42,7 +45,7 @@ def _apply_to(self, model): if repn.constant: LB = UB = repn.constant - # loop through each coefficent and variable pair + # loop through each coefficient and variable pair for var, coef in zip(repn.linear_vars, repn.linear_coefs): # Calculate bounds using interval arithmetic if coef >= 0: @@ -74,5 +77,6 @@ def _apply_to(self, model): if UB < LB: logger.error( "Infeasible variable bounds: " - "Constraint %s has inferred LB %s > UB %s" % - (constr.name, new_lb, new_ub)) + "Constraint %s has inferred LB %s > UB %s" + % (constr.name, new_lb, new_ub) + ) diff --git a/pyomo/contrib/preprocessing/plugins/deactivate_trivial_constraints.py b/pyomo/contrib/preprocessing/plugins/deactivate_trivial_constraints.py index acb085aa8f5..a91e0a292f2 100644 --- a/pyomo/contrib/preprocessing/plugins/deactivate_trivial_constraints.py +++ b/pyomo/contrib/preprocessing/plugins/deactivate_trivial_constraints.py @@ -1,5 +1,4 @@ # -*- coding: utf-8 -*- - # ___________________________________________________________________________ # # Pyomo: Python Optimization Modeling Objects @@ -15,8 +14,12 @@ import logging from pyomo.common.collections import ComponentSet -from pyomo.common.config import (ConfigBlock, ConfigValue, NonNegativeFloat, - add_docstring_list) +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + NonNegativeFloat, + document_kwargs_from_configdict, +) from pyomo.common.errors import InfeasibleConstraintException from pyomo.core.base.constraint import Constraint from pyomo.core.base.transformation import TransformationFactory @@ -26,9 +29,11 @@ logger = logging.getLogger('pyomo.contrib.preprocessing') + @TransformationFactory.register( - 'contrib.deactivate_trivial_constraints', - doc="Deactivate trivial constraints.") + 'contrib.deactivate_trivial_constraints', doc="Deactivate trivial constraints." +) +@document_kwargs_from_configdict('CONFIG') class TrivialConstraintDeactivator(IsomorphicTransformation): """Deactivates trivial constraints. @@ -42,75 +47,90 @@ class TrivialConstraintDeactivator(IsomorphicTransformation): """ CONFIG = ConfigBlock("TrivialConstraintDeactivator") - CONFIG.declare("tmp", ConfigValue( - default=False, domain=bool, - description="True to store a set of transformed constraints for future" - " reversion of the transformation." - )) - CONFIG.declare("ignore_infeasible", ConfigValue( - default=False, domain=bool, - description="True to skip over trivial constraints that are " - "infeasible instead of raising an InfeasibleConstraintException." - )) - CONFIG.declare("return_trivial", ConfigValue( - default=[], - description="a list to which the deactivated trivial" - "constraints are appended (side effect)" - )) - CONFIG.declare("tolerance", ConfigValue( - default=1E-13, domain=NonNegativeFloat, - description="tolerance on constraint violations" - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) - + CONFIG.declare( + "tmp", + ConfigValue( + default=False, + domain=bool, + description="True to store a set of transformed constraints for future" + " reversion of the transformation.", + ), + ) + CONFIG.declare( + "ignore_infeasible", + ConfigValue( + default=False, + domain=bool, + description="True to skip over trivial constraints that are " + "infeasible instead of raising an InfeasibleConstraintException.", + ), + ) + CONFIG.declare( + "return_trivial", + ConfigValue( + default=[], + description="a list to which the deactivated trivial" + "constraints are appended (side effect)", + ), + ) + CONFIG.declare( + "tolerance", + ConfigValue( + default=1e-13, + domain=NonNegativeFloat, + description="tolerance on constraint violations", + ), + ) def _apply_to(self, instance, **kwargs): config = self.CONFIG(kwargs) - if config.tmp and not hasattr(instance, - '_tmp_trivial_deactivated_constrs'): + if config.tmp and not hasattr(instance, '_tmp_trivial_deactivated_constrs'): instance._tmp_trivial_deactivated_constrs = ComponentSet() elif config.tmp: logger.warning( 'Deactivating trivial constraints on the block {} for which ' 'trivial constraints were previously deactivated. ' 'Reversion will affect all deactivated constraints.'.format( - instance.name)) + instance.name + ) + ) # Trivial constraints are those that do not contain any variables, ie. # the polynomial degree is 0 - for constr in instance.component_data_objects(ctype=Constraint, - active=True, - descend_into=True): + for constr in instance.component_data_objects( + ctype=Constraint, active=True, descend_into=True + ): repn = generate_standard_repn(constr.body) if not repn.is_constant(): # This constraint is not trivial continue # We need to check each constraint to sure that it is not violated. - constr_lb = value( - constr.lower) if constr.has_lb() else float('-inf') - constr_ub = value( - constr.upper) if constr.has_ub() else float('inf') + constr_lb = value(constr.lower) if constr.has_lb() else float('-inf') + constr_ub = value(constr.upper) if constr.has_ub() else float('inf') constr_value = repn.constant # Check if the lower bound is violated outside a given tolerance - if (constr_value + config.tolerance <= constr_lb): + if constr_value + config.tolerance <= constr_lb: if config.ignore_infeasible: continue else: raise InfeasibleConstraintException( - 'Trivial constraint {} violates LB {} ≤ BODY {}.' - .format(constr.name, constr_lb, constr_value)) + 'Trivial constraint {} violates LB {} ≤ BODY {}.'.format( + constr.name, constr_lb, constr_value + ) + ) # Check if the upper bound is violated outside a given tolerance - if (constr_value >= constr_ub + config.tolerance): + if constr_value >= constr_ub + config.tolerance: if config.ignore_infeasible: continue else: raise InfeasibleConstraintException( - 'Trivial constraint {} violates BODY {} ≤ UB {}.' - .format(constr.name, constr_value, constr_ub)) + 'Trivial constraint {} violates BODY {} ≤ UB {}.'.format( + constr.name, constr_value, constr_ub + ) + ) # Constraint is not infeasible. Deactivate it. if config.tmp: diff --git a/pyomo/contrib/preprocessing/plugins/detect_fixed_vars.py b/pyomo/contrib/preprocessing/plugins/detect_fixed_vars.py index dbef3941c6a..bafbec7b8bd 100644 --- a/pyomo/contrib/preprocessing/plugins/detect_fixed_vars.py +++ b/pyomo/contrib/preprocessing/plugins/detect_fixed_vars.py @@ -14,16 +14,22 @@ from pyomo.core.base.transformation import TransformationFactory from pyomo.common.collections import ComponentMap -from pyomo.common.config import (ConfigBlock, ConfigValue, NonNegativeFloat, - add_docstring_list) +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + NonNegativeFloat, + document_kwargs_from_configdict, +) from pyomo.core.base.var import Var from pyomo.core.expr.numvalue import value from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation @TransformationFactory.register( - 'contrib.detect_fixed_vars', - doc="Detect variables that are de-facto fixed but not considered fixed.") + 'contrib.detect_fixed_vars', + doc="Detect variables that are de-facto fixed but not considered fixed.", +) +@document_kwargs_from_configdict('CONFIG') class FixedVarDetector(IsomorphicTransformation): """Detects variables that are de-facto fixed but not considered fixed. @@ -37,17 +43,23 @@ class FixedVarDetector(IsomorphicTransformation): """ CONFIG = ConfigBlock("FixedVarDetector") - CONFIG.declare("tmp", ConfigValue( - default=False, domain=bool, - description="True to store the set of transformed variables and " - "their old values so that they can be restored." - )) - CONFIG.declare("tolerance", ConfigValue( - default=1E-13, domain=NonNegativeFloat, - description="tolerance on bound equality (LB == UB)" - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) + CONFIG.declare( + "tmp", + ConfigValue( + default=False, + domain=bool, + description="True to store the set of transformed variables and " + "their old values so that they can be restored.", + ), + ) + CONFIG.declare( + "tolerance", + ConfigValue( + default=1e-13, + domain=NonNegativeFloat, + description="tolerance on bound equality (LB == UB)", + ), + ) def _apply_to(self, instance, **kwargs): config = self.CONFIG(kwargs) @@ -55,16 +67,14 @@ def _apply_to(self, instance, **kwargs): if config.tmp: instance._xfrm_detect_fixed_vars_old_values = ComponentMap() - for var in instance.component_data_objects( - ctype=Var, descend_into=True): + for var in instance.component_data_objects(ctype=Var, descend_into=True): if var.fixed or var.lb is None or var.ub is None: # if the variable is already fixed, or if it is missing a # bound, we skip it. continue if fabs(value(var.lb) - value(var.ub)) <= config.tolerance: if config.tmp: - instance._xfrm_detect_fixed_vars_old_values[var] = \ - var.value + instance._xfrm_detect_fixed_vars_old_values[var] = var.value var.fix(var.lb) def revert(self, instance): diff --git a/pyomo/contrib/preprocessing/plugins/equality_propagate.py b/pyomo/contrib/preprocessing/plugins/equality_propagate.py index fdbe8ae9320..03e2e11dadb 100644 --- a/pyomo/contrib/preprocessing/plugins/equality_propagate.py +++ b/pyomo/contrib/preprocessing/plugins/equality_propagate.py @@ -18,7 +18,11 @@ from pyomo.core.expr.numvalue import value from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.repn.standard_repn import generate_standard_repn -from pyomo.common.config import ConfigBlock, ConfigValue, add_docstring_list +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + document_kwargs_from_configdict, +) from pyomo.common.errors import InfeasibleConstraintException @@ -34,18 +38,23 @@ def _build_equality_set(m): #: dict: map of var UID to the set of all equality-linked var UIDs eq_var_map = ComponentMap() relevant_vars = ComponentSet() - for constr in m.component_data_objects(ctype=Constraint, - active=True, - descend_into=True): + for constr in m.component_data_objects( + ctype=Constraint, active=True, descend_into=True + ): # Check to make sure the constraint is of form v1 - v2 == 0 - if (value(constr.lower) == 0 and value(constr.upper) == 0 and - constr.body.polynomial_degree() == 1): + if ( + value(constr.lower) == 0 + and value(constr.upper) == 0 + and constr.body.polynomial_degree() == 1 + ): repn = generate_standard_repn(constr.body) # only take the variables with nonzero coefficients - vars_ = [v for i, v in enumerate(repn.linear_vars) - if repn.linear_coefs[i]] - if (len(vars_) == 2 and repn.constant == 0 and - sorted(l for l in repn.linear_coefs if l) == [-1, 1]): + vars_ = [v for i, v in enumerate(repn.linear_vars) if repn.linear_coefs[i]] + if ( + len(vars_) == 2 + and repn.constant == 0 + and sorted(l for l in repn.linear_coefs if l) == [-1, 1] + ): # this is an a == b constraint. v1 = vars_[0] v2 = vars_[1] @@ -62,9 +71,9 @@ def _build_equality_set(m): def _detect_fixed_variables(m): """Detect fixed variables due to constraints of form var = const.""" new_fixed_vars = ComponentSet() - for constr in m.component_data_objects(ctype=Constraint, - active=True, - descend_into=True): + for constr in m.component_data_objects( + ctype=Constraint, active=True, descend_into=True + ): if constr.equality and constr.body.polynomial_degree() == 1: repn = generate_standard_repn(constr.body) if len(repn.linear_vars) == 1 and repn.linear_coefs[0]: @@ -77,9 +86,11 @@ def _detect_fixed_variables(m): return new_fixed_vars - -@TransformationFactory.register('contrib.propagate_fixed_vars', - doc="Propagate variable fixing for equalities of type x = y.") +@TransformationFactory.register( + 'contrib.propagate_fixed_vars', + doc="Propagate variable fixing for equalities of type x = y.", +) +@document_kwargs_from_configdict('CONFIG') class FixedVarPropagator(IsomorphicTransformation): """Propagate variable fixing for equalities of type :math:`x = y`. @@ -95,13 +106,15 @@ class FixedVarPropagator(IsomorphicTransformation): """ CONFIG = ConfigBlock() - CONFIG.declare("tmp", ConfigValue( - default=False, domain=bool, - description="True to store the set of transformed variables and " - "their old states so that they can be later restored." - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) + CONFIG.declare( + "tmp", + ConfigValue( + default=False, + domain=bool, + description="True to store the set of transformed variables and " + "their old states so that they can be later restored.", + ), + ) def _apply_to(self, instance, **kwds): config = self.CONFIG(kwds) @@ -116,7 +129,7 @@ def _apply_to(self, instance, **kwds): fixed_vars.update(newly_fixed) processed = ComponentSet() # Go through each fixed variable to propagate the 'fixed' status to all - # equality-linked variabes. + # equality-linked variables. for v1 in fixed_vars: # If we have already processed the variable, skip it. if v1 in processed: @@ -124,15 +137,14 @@ def _apply_to(self, instance, **kwds): eq_set = eq_var_map.get(v1, ComponentSet([v1])) for v2 in eq_set: - if (v2.fixed and value(v1) != value(v2)): + if v2.fixed and value(v1) != value(v2): raise InfeasibleConstraintException( 'Variables {} and {} have conflicting fixed ' 'values of {} and {}, but are linked by ' - 'equality constraints.' - .format(v1.name, - v2.name, - value(v1), - value(v2))) + 'equality constraints.'.format( + v1.name, v2.name, value(v1), value(v2) + ) + ) elif not v2.fixed: v2.fix(value(v1)) if config.tmp: @@ -148,8 +160,11 @@ def revert(self, instance): del instance._tmp_propagate_fixed -@TransformationFactory.register('contrib.propagate_eq_var_bounds', - doc="Propagate variable bounds for equalities of type x = y.") +@TransformationFactory.register( + 'contrib.propagate_eq_var_bounds', + doc="Propagate variable bounds for equalities of type x = y.", +) +@document_kwargs_from_configdict('CONFIG') class VarBoundPropagator(IsomorphicTransformation): """Propagate variable bounds for equalities of type :math:`x = y`. @@ -162,19 +177,20 @@ class VarBoundPropagator(IsomorphicTransformation): """ CONFIG = ConfigBlock() - CONFIG.declare("tmp", ConfigValue( - default=False, domain=bool, - description="True to store the set of transformed variables and " - "their old states so that they can be later restored." - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) + CONFIG.declare( + "tmp", + ConfigValue( + default=False, + domain=bool, + description="True to store the set of transformed variables and " + "their old states so that they can be later restored.", + ), + ) def _apply_to(self, instance, **kwds): config = self.CONFIG(kwds) if config.tmp and not hasattr(instance, '_tmp_propagate_original_bounds'): - instance._tmp_propagate_original_bounds = Suffix( - direction=Suffix.LOCAL) + instance._tmp_propagate_original_bounds = Suffix(direction=Suffix.LOCAL) eq_var_map, relevant_vars = _build_equality_set(instance) processed = ComponentSet() # Go through each variable in an equality set to propagate the variable @@ -203,14 +219,15 @@ def _apply_to(self, instance, **kwds): raise InfeasibleConstraintException( 'Variable {} has a lower bound {} ' '> the upper bound {} of variable {}, ' - 'but they are linked by equality constraints.' - .format(v1.name, value(v1.lb), value(v2.ub), v2.name)) + 'but they are linked by equality constraints.'.format( + v1.name, value(v1.lb), value(v2.ub), v2.name + ) + ) for v in var_equality_set: if config.tmp: # TODO warn if overwriting - instance._tmp_propagate_original_bounds[v] = ( - v.lb, v.ub) + instance._tmp_propagate_original_bounds[v] = (v.lb, v.ub) v.setlb(max_lb) v.setub(min_ub) diff --git a/pyomo/contrib/preprocessing/plugins/induced_linearity.py b/pyomo/contrib/preprocessing/plugins/induced_linearity.py index 5dbdd3b2597..88c062fdee2 100644 --- a/pyomo/contrib/preprocessing/plugins/induced_linearity.py +++ b/pyomo/contrib/preprocessing/plugins/induced_linearity.py @@ -24,14 +24,27 @@ from math import fabs from pyomo.common.collections import ComponentMap, ComponentSet -from pyomo.common.config import (ConfigBlock, ConfigValue, NonNegativeFloat, - add_docstring_list) +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + NonNegativeFloat, + document_kwargs_from_configdict, +) from pyomo.common.modeling import unique_component_name from pyomo.contrib.preprocessing.util import SuppressConstantObjectiveWarning -from pyomo.core import (Binary, Block, Constraint, Objective, Set, - TransformationFactory, Var, summation, value) +from pyomo.core import ( + Binary, + Block, + Constraint, + Objective, + Set, + TransformationFactory, + Var, + summation, + value, +) from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation -from pyomo.gdp import Disjunct +from pyomo.gdp import Disjunct, Disjunction from pyomo.opt import TerminationCondition as tc from pyomo.opt import SolverFactory from pyomo.repn import generate_standard_repn @@ -39,8 +52,11 @@ logger = logging.getLogger('pyomo.contrib.preprocessing') -@TransformationFactory.register('contrib.induced_linearity', - doc="Reformulate nonlinear constraints with induced linearity.") +@TransformationFactory.register( + 'contrib.induced_linearity', + doc="Reformulate nonlinear constraints with induced linearity.", +) +@document_kwargs_from_configdict('CONFIG') class InducedLinearity(IsomorphicTransformation): """Reformulate nonlinear constraints with induced linearity. @@ -66,31 +82,33 @@ class InducedLinearity(IsomorphicTransformation): """ CONFIG = ConfigBlock("contrib.induced_linearity") - CONFIG.declare('equality_tolerance', ConfigValue( - default=1E-6, - domain=NonNegativeFloat, - description="Tolerance on equality constraints." - )) - CONFIG.declare('pruning_solver', ConfigValue( - default='glpk', - description="Solver to use when pruning possible values." - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) + CONFIG.declare( + 'equality_tolerance', + ConfigValue( + default=1e-6, + domain=NonNegativeFloat, + description="Tolerance on equality constraints.", + ), + ) + CONFIG.declare( + 'pruning_solver', + ConfigValue( + default='glpk', description="Solver to use when pruning possible values." + ), + ) def _apply_to(self, model, **kwds): """Apply the transformation to the given model.""" config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) - _process_container(model, config) _process_subcontainers(model, config) + _process_container(model, config) def _process_subcontainers(blk, config): - for disj in blk.component_data_objects( - Disjunct, active=True, descend_into=True): - _process_container(disj, config) + for disj in blk.component_data_objects(Disjunct, active=True, descend_into=False): _process_subcontainers(disj, config) + _process_container(disj, config) def _process_container(blk, config): @@ -98,8 +116,7 @@ def _process_container(blk, config): blk._induced_linearity_info = Block() else: assert blk._induced_linearity_info.ctype == Block - eff_discr_vars = detect_effectively_discrete_vars( - blk, config.equality_tolerance) + eff_discr_vars = detect_effectively_discrete_vars(blk, config.equality_tolerance) # TODO will need to go through this for each disjunct, since it does # not (should not) descend into Disjuncts. @@ -118,8 +135,7 @@ def _process_container(blk, config): for v2, bilinear_constrs in v1_pairs.items(): if (v1, v2) in processed_pairs: continue - _process_bilinear_constraints( - blk, v1, v2, var_values, bilinear_constrs) + _process_bilinear_constraints(blk, v1, v2, var_values, bilinear_constrs) processed_pairs.add((v2, v1)) # processed_pairs.add((v1, v2)) # TODO is this necessary? @@ -132,7 +148,7 @@ def determine_valid_values(block, discr_var_to_constrs_map, config): Right now, we select a naive approach where we look for variables in the discreteness-inducing constraints. We then adjust their values and see if - things are stil feasible. Based on their coefficient values, we can infer a + things are still feasible. Based on their coefficient values, we can infer a set of allowable values for the effectively discrete variable. Args: @@ -146,8 +162,11 @@ def determine_valid_values(block, discr_var_to_constrs_map, config): # constraints for constr in constrs: repn = generate_standard_repn(constr.body) - var_coef = sum(coef for i, coef in enumerate(repn.linear_coefs) - if repn.linear_vars[i] is eff_discr_var) + var_coef = sum( + coef + for i, coef in enumerate(repn.linear_coefs) + if repn.linear_vars[i] is eff_discr_var + ) const = -(repn.constant - constr.upper) / var_coef possible_vals = set((const,)) for i, var in enumerate(repn.linear_vars): @@ -160,10 +179,12 @@ def determine_valid_values(block, discr_var_to_constrs_map, config): var_values = [v * coef for v in range(var.lb, var.ub + 1)] else: raise ValueError( - '%s has unacceptable variable domain: %s' % - (var.name, var.domain)) + '%s has unacceptable variable domain: %s' + % (var.name, var.domain) + ) possible_vals = set( - (v1 + v2 for v1 in possible_vals for v2 in var_values)) + (v1 + v2 for v1 in possible_vals for v2 in var_values) + ) old_possible_vals = possible_values.get(eff_discr_var, None) if old_possible_vals is not None: possible_values[eff_discr_var] = old_possible_vals & possible_vals @@ -179,8 +200,7 @@ def prune_possible_values(block_scope, possible_values, config): # Prune the set of possible values by solving a series of feasibility # problems top_level_scope = block_scope.model() - tmp_name = unique_component_name( - top_level_scope, '_induced_linearity_prune_data') + tmp_name = unique_component_name(top_level_scope, '_induced_linearity_prune_data') tmp_orig_blk = Block() setattr(top_level_scope, tmp_name, tmp_orig_blk) tmp_orig_blk._possible_values = possible_values @@ -191,13 +211,23 @@ def prune_possible_values(block_scope, possible_values, config): for obj in model.component_data_objects(Objective, active=True): obj.deactivate() for constr in model.component_data_objects( - Constraint, active=True, descend_into=(Block, Disjunct)): + Constraint, active=True, descend_into=(Block, Disjunct) + ): if constr.body.polynomial_degree() not in (1, 0): constr.deactivate() if block_scope.ctype == Disjunct: disj = tmp_clone_blk._tmp_block_scope[0] disj.indicator_var.fix(1) TransformationFactory('gdp.bigm').apply_to(model) + # FIXME: this whole transformation should be reworked to solve + # feasibility checks on independent models (using References) and + # NOT have to clone the model / deactivate components in place. + # + # Deactivate any Disjuncts / Disjunctions (so the writers don't complain) + for d in model.component_data_objects(Disjunction): + d.deactivate() + for d in model.component_data_objects(Disjunct): + d._deactivate_without_fixing_indicator() tmp_clone_blk.test_feasible = Constraint() tmp_clone_blk._obj = Objective(expr=1) for eff_discr_var, vals in tmp_clone_blk._possible_values.items(): @@ -209,11 +239,14 @@ def prune_possible_values(block_scope, possible_values, config): if res.solver.termination_condition is tc.infeasible: val_feasible[val] = False tmp_clone_blk._possible_values[eff_discr_var] = set( - v for v in tmp_clone_blk._possible_values[eff_discr_var] - if val_feasible.get(v, True)) + v + for v in tmp_clone_blk._possible_values[eff_discr_var] + if val_feasible.get(v, True) + ) for i, var in enumerate(tmp_orig_blk._possible_value_vars): possible_values[var] = tmp_clone_blk._possible_values[ - tmp_clone_blk._possible_value_vars[i]] + tmp_clone_blk._possible_value_vars[i] + ] return possible_values @@ -221,23 +254,32 @@ def prune_possible_values(block_scope, possible_values, config): def _process_bilinear_constraints(block, v1, v2, var_values, bilinear_constrs): # TODO check that the appropriate variable bounds exist. if not (v2.has_lb() and v2.has_ub()): - logger.warning(textwrap.dedent("""\ + logger.warning( + textwrap.dedent( + """\ Attempting to transform bilinear term {v1} * {v2} using effectively discrete variable {v1}, but {v2} is missing a lower or upper bound: ({v2lb}, {v2ub}). - """.format(v1=v1, v2=v2, v2lb=v2.lb, v2ub=v2.ub)).strip()) + """.format( + v1=v1, v2=v2, v2lb=v2.lb, v2ub=v2.ub + ) + ).strip() + ) return False blk = Block() unique_name = unique_component_name( - block, ("%s_%s_bilinear" % (v1.local_name, v2.local_name)) - .replace('[', '').replace(']', '')) + block, + ("%s_%s_bilinear" % (v1.local_name, v2.local_name)) + .replace('[', '') + .replace(']', ''), + ) block._induced_linearity_info.add_component(unique_name, blk) # TODO think about not using floats as indices in a set blk.valid_values = Set(initialize=sorted(var_values)) blk.x_active = Var(blk.valid_values, domain=Binary, initialize=1) blk.v_increment = Var( - blk.valid_values, domain=v2.domain, - bounds=(v2.lb, v2.ub), initialize=v2.value) + blk.valid_values, domain=v2.domain, bounds=(v2.lb, v2.ub), initialize=v2.value + ) blk.v_defn = Constraint(expr=v2 == summation(blk.v_increment)) @blk.Constraint(blk.valid_values) @@ -247,6 +289,7 @@ def v_lb(blk, val): @blk.Constraint(blk.valid_values) def v_ub(blk, val): return blk.v_increment[val] <= v2.ub * blk.x_active[val] + blk.select_one_value = Constraint(expr=summation(blk.x_active) == 1) # Categorize as case 1 or case 2 for bilinear_constr in bilinear_constrs: @@ -271,22 +314,27 @@ def v_ub(blk, val): def _reformulate_case_2(blk, v1, v2, bilinear_constr): repn = generate_standard_repn(bilinear_constr.body) replace_index = next( - i for i, var_tup in enumerate(repn.quadratic_vars) - if (var_tup[0] is v1 and var_tup[1] is v2) or - (var_tup[0] is v2 and var_tup[1] is v1)) - bilinear_constr.set_value(( - bilinear_constr.lower, - sum(coef * repn.linear_vars[i] - for i, coef in enumerate(repn.linear_coefs)) + - repn.quadratic_coefs[replace_index] * sum( - val * blk.v_increment[val] for val in blk.valid_values) + - sum(repn.quadratic_coefs[i] * var_tup[0] * var_tup[1] - for i, var_tup in enumerate(repn.quadratic_vars) - if not i == replace_index) + - repn.constant + - zero_if_None(repn.nonlinear_expr), - bilinear_constr.upper - )) + i + for i, var_tup in enumerate(repn.quadratic_vars) + if (var_tup[0] is v1 and var_tup[1] is v2) + or (var_tup[0] is v2 and var_tup[1] is v1) + ) + bilinear_constr.set_value( + ( + bilinear_constr.lower, + sum(coef * repn.linear_vars[i] for i, coef in enumerate(repn.linear_coefs)) + + repn.quadratic_coefs[replace_index] + * sum(val * blk.v_increment[val] for val in blk.valid_values) + + sum( + repn.quadratic_coefs[i] * var_tup[0] * var_tup[1] + for i, var_tup in enumerate(repn.quadratic_vars) + if not i == replace_index + ) + + repn.constant + + zero_if_None(repn.nonlinear_expr), + bilinear_constr.upper, + ) + ) def zero_if_None(val): @@ -304,7 +352,8 @@ def _bilinear_expressions(model): # x --> (y --> [constr1, constr2, ...], z --> [constr2, constr3]) bilinear_map = ComponentMap() for constr in model.component_data_objects( - Constraint, active=True, descend_into=(Block, Disjunct)): + Constraint, active=True, descend_into=(Block, Disjunct) + ): if constr.body.polynomial_degree() in (1, 0): continue # Skip trivial and linear constraints repn = generate_standard_repn(constr.body) @@ -337,9 +386,8 @@ def detect_effectively_discrete_vars(block, equality_tolerance): for constr in block.component_data_objects(Constraint, active=True): if constr.lower is None or constr.upper is None: continue # skip inequality constraints - if fabs(value(constr.lower) - value(constr.upper) - ) > equality_tolerance: - continue # not equality constriant. Skip. + if fabs(value(constr.lower) - value(constr.upper)) > equality_tolerance: + continue # not equality constraint. Skip. if constr.body.polynomial_degree() not in (1, 0): continue # skip nonlinear expressions repn = generate_standard_repn(constr.body) @@ -349,8 +397,7 @@ def detect_effectively_discrete_vars(block, equality_tolerance): # preprocessed before this, or we will end up reformulating # expressions that we do not need to here. continue - non_discrete_vars = list(v for v in repn.linear_vars - if v.is_continuous()) + non_discrete_vars = list(v for v in repn.linear_vars if v.is_continuous()) if len(non_discrete_vars) == 1: # We know that this is an effectively discrete continuous # variable. Add it to our identified variable list. diff --git a/pyomo/contrib/preprocessing/plugins/init_vars.py b/pyomo/contrib/preprocessing/plugins/init_vars.py index 06ad0e14aa3..2b37e13e4cd 100644 --- a/pyomo/contrib/preprocessing/plugins/init_vars.py +++ b/pyomo/contrib/preprocessing/plugins/init_vars.py @@ -19,8 +19,9 @@ @TransformationFactory.register( - 'contrib.init_vars_midpoint', - doc="Initialize non-fixed variables to the midpoint of their bounds.") + 'contrib.init_vars_midpoint', + doc="Initialize non-fixed variables to the midpoint of their bounds.", +) class InitMidpoint(IsomorphicTransformation): """Initialize non-fixed variables to the midpoint of their bounds. @@ -36,8 +37,7 @@ def _apply_to(self, instance, overwrite=False): overwrite: if False, transformation will not overwrite existing variable values. """ - for var in instance.component_data_objects( - ctype=Var, descend_into=True): + for var in instance.component_data_objects(ctype=Var, descend_into=True): if var.fixed: continue if var.value is not None and not overwrite: @@ -52,12 +52,12 @@ def _apply_to(self, instance, overwrite=False): # if one bound does not exist, set variable value to the other var.set_value(value(var.lb)) else: - var.set_value((value(var.lb) + value(var.ub)) / 2.) + var.set_value((value(var.lb) + value(var.ub)) / 2.0) @TransformationFactory.register( - 'contrib.init_vars_zero', - doc="Initialize non-fixed variables to zero.") + 'contrib.init_vars_zero', doc="Initialize non-fixed variables to zero." +) class InitZero(IsomorphicTransformation): """Initialize non-fixed variables to zero. @@ -73,8 +73,7 @@ def _apply_to(self, instance, overwrite=False): overwrite: if False, transformation will not overwrite existing variable values. """ - for var in instance.component_data_objects( - ctype=Var, descend_into=True): + for var in instance.component_data_objects(ctype=Var, descend_into=True): if var.fixed: continue if var.value is not None and not overwrite: diff --git a/pyomo/contrib/preprocessing/plugins/int_to_binary.py b/pyomo/contrib/preprocessing/plugins/int_to_binary.py index 7bde2a38e12..8b264868ba5 100644 --- a/pyomo/contrib/preprocessing/plugins/int_to_binary.py +++ b/pyomo/contrib/preprocessing/plugins/int_to_binary.py @@ -6,11 +6,20 @@ from pyomo.common.collections import ComponentSet from pyomo.common.config import ConfigBlock, ConfigValue, In -from pyomo.core import TransformationFactory, Var, Block, Constraint, Any, Binary, value, RangeSet, \ - Reals +from pyomo.core import ( + TransformationFactory, + Var, + Block, + Constraint, + Any, + Binary, + value, + RangeSet, + Reals, +) from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.gdp import Disjunct -from pyomo.core.expr.current import identify_variables +from pyomo.core.expr import identify_variables from pyomo.common.modeling import unique_component_name logger = logging.getLogger('pyomo.contrib.preprocessing') @@ -18,7 +27,8 @@ @TransformationFactory.register( 'contrib.integer_to_binary', - doc="Reformulate integer variables into binary variables.") + doc="Reformulate integer variables into binary variables.", +) class IntegerToBinary(IsomorphicTransformation): """Reformulate integer variables to binary variables and constraints. @@ -26,24 +36,33 @@ class IntegerToBinary(IsomorphicTransformation): """ CONFIG = ConfigBlock("contrib.integer_to_binary") - CONFIG.declare("strategy", ConfigValue( - default='base2', - domain=In('base2', ), - description="Reformulation method", - # TODO: eventually we will support other methods, but not yet. - )) - CONFIG.declare("ignore_unused", ConfigValue( - default=False, - domain=bool, - description="Ignore variables that do not appear in (potentially) active constraints. " - "These variables are unlikely to be passed to the solver." - )) - CONFIG.declare("relax_integrality", ConfigValue( - default=True, - domain=bool, - description="Relax the integrality of the integer variables " - "after adding in the binary variables and constraints." - )) + CONFIG.declare( + "strategy", + ConfigValue( + default='base2', + domain=In('base2'), + description="Reformulation method", + # TODO: eventually we will support other methods, but not yet. + ), + ) + CONFIG.declare( + "ignore_unused", + ConfigValue( + default=False, + domain=bool, + description="Ignore variables that do not appear in (potentially) active constraints. " + "These variables are unlikely to be passed to the solver.", + ), + ) + CONFIG.declare( + "relax_integrality", + ConfigValue( + default=True, + domain=bool, + description="Relax the integrality of the integer variables " + "after adding in the binary variables and constraints.", + ), + ) def _apply_to(self, model, **kwds): """Apply the transformation to the given model.""" @@ -51,19 +70,25 @@ def _apply_to(self, model, **kwds): config.set_value(kwds) integer_vars = list( - v for v in model.component_data_objects( - ctype=Var, descend_into=(Block, Disjunct)) - if v.is_integer() and not v.is_binary() and not v.fixed) + v + for v in model.component_data_objects( + ctype=Var, descend_into=(Block, Disjunct) + ) + if v.is_integer() and not v.is_binary() and not v.fixed + ) if len(integer_vars) == 0: - logger.info( - "Model has no free integer variables. No reformulation needed.") + logger.info("Model has no free integer variables. No reformulation needed.") return vars_on_constr = ComponentSet() for c in model.component_data_objects( - ctype=Constraint, descend_into=(Block, Disjunct), active=True): - vars_on_constr.update(v for v in identify_variables(c.body, include_fixed=False) - if v.is_integer()) + ctype=Constraint, descend_into=(Block, Disjunct), active=True + ): + vars_on_constr.update( + v + for v in identify_variables(c.body, include_fixed=False) + if v.is_integer() + ) if config.ignore_unused: num_vars_not_on_constr = len(integer_vars) - len(vars_on_constr) @@ -75,26 +100,31 @@ def _apply_to(self, model, **kwds): integer_vars = list(vars_on_constr) logger.info( - "Reformulating integer variables using the %s strategy." - % config.strategy) + "Reformulating integer variables using the %s strategy." % config.strategy + ) # Set up reformulation block blk_name = unique_component_name(model, "_int_to_binary_reform") reform_block = Block( doc="Holds variables and constraints for reformulating " - "integer variables to binary variables." + "integer variables to binary variables." ) setattr(model, blk_name, reform_block) reform_block.int_var_set = RangeSet(0, len(integer_vars) - 1) reform_block.new_binary_var = Var( - Any, domain=Binary, dense=False, initialize=0, - doc="Binary variable with index (int_var_idx, idx)") + Any, + domain=Binary, + dense=False, + initialize=0, + doc="Binary variable with index (int_var_idx, idx)", + ) reform_block.integer_to_binary_constraint = Constraint( reform_block.int_var_set, doc="Equality constraints mapping the binary variable values " - "to the integer variable value.") + "to the integer variable value.", + ) # check that variables are bounded for idx, int_var in enumerate(integer_vars): @@ -103,16 +133,21 @@ def _apply_to(self, model, **kwds): "Integer variable %s is missing an " "upper or lower bound. LB: %s; UB: %s. " "Integer to binary reformulation does not support unbounded integer variables." - % (int_var.name, int_var.lb, int_var.ub)) + % (int_var.name, int_var.lb, int_var.ub) + ) # do the reformulation highest_power = int(floor(log(value(int_var.ub - int_var.lb), 2))) # TODO potentially fragile due to floating point reform_block.integer_to_binary_constraint.add( - idx, expr=int_var == sum( - reform_block.new_binary_var[idx, pwr] * (2 ** pwr) - for pwr in range(0, highest_power + 1)) - + int_var.lb) + idx, + expr=int_var + == sum( + reform_block.new_binary_var[idx, pwr] * (2**pwr) + for pwr in range(0, highest_power + 1) + ) + + int_var.lb, + ) # Relax the original integer variable if config.relax_integrality: @@ -121,5 +156,9 @@ def _apply_to(self, model, **kwds): logger.info( "Reformulated %s integer variables using " "%s binary variables and %s constraints." - % (len(integer_vars), len(reform_block.new_binary_var), - len(reform_block.integer_to_binary_constraint))) + % ( + len(integer_vars), + len(reform_block.new_binary_var), + len(reform_block.integer_to_binary_constraint), + ) + ) diff --git a/pyomo/contrib/preprocessing/plugins/remove_zero_terms.py b/pyomo/contrib/preprocessing/plugins/remove_zero_terms.py index 71dc4215e9f..7cce719f98d 100644 --- a/pyomo/contrib/preprocessing/plugins/remove_zero_terms.py +++ b/pyomo/contrib/preprocessing/plugins/remove_zero_terms.py @@ -16,15 +16,15 @@ from pyomo.core import quicksum from pyomo.core.base.constraint import Constraint from pyomo.core.base.transformation import TransformationFactory -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.repn import generate_standard_repn from pyomo.common.config import ConfigDict, ConfigValue @TransformationFactory.register( - 'contrib.remove_zero_terms', - doc="Remove terms 0 * var in constraints") + 'contrib.remove_zero_terms', doc="Remove terms 0 * var in constraints" +) class RemoveZeroTerms(IsomorphicTransformation): """Looks for :math:`0 v` in a constraint and removes it. @@ -34,12 +34,16 @@ class RemoveZeroTerms(IsomorphicTransformation): .. note:: TODO: support nonlinear expressions """ + CONFIG = ConfigDict("RemoveZeroTerms") - CONFIG.declare("constraints_modified", ConfigValue( - default={}, - description="A dictionary that maps the constraints modified during " - "the transformation to a tuple: (original_expr, modified_expr)" - )) + CONFIG.declare( + "constraints_modified", + ConfigValue( + default={}, + description="A dictionary that maps the constraints modified during " + "the transformation to a tuple: (original_expr, modified_expr)", + ), + ) def _apply_to(self, model, **kwargs): """Apply the transformation.""" @@ -47,26 +51,33 @@ def _apply_to(self, model, **kwargs): m = model for constr in m.component_data_objects( - ctype=Constraint, active=True, descend_into=True): + ctype=Constraint, active=True, descend_into=True + ): repn = generate_standard_repn(constr.body) if not repn.is_linear() or repn.is_constant(): continue # we currently only process linear constraints, and we - # assume that trivial constraints have already been - # deactivated or will be deactivated in a different - # step + # assume that trivial constraints have already been + # deactivated or will be deactivated in a different + # step original_expr = constr.expr # get the index of all nonzero coefficient variables nonzero_vars_indx = [ - i for i, _ in enumerate(repn.linear_vars) + i + for i, _ in enumerate(repn.linear_vars) if not repn.linear_coefs[i] == 0 ] const = repn.constant # reconstitute the constraint, including only variable terms with # nonzero coefficients - constr_body = quicksum(repn.linear_coefs[i] * repn.linear_vars[i] - for i in nonzero_vars_indx) + const + constr_body = ( + quicksum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in nonzero_vars_indx + ) + + const + ) if constr.equality: new_expr = constr_body == constr.upper elif constr.has_lb() and not constr.has_ub(): @@ -75,7 +86,6 @@ def _apply_to(self, model, **kwargs): new_expr = constr_body <= constr.upper else: # constraint is a bounded inequality of form a <= x <= b. - new_expr = EXPR.inequality( constr.lower, constr_body, - constr.upper) + new_expr = EXPR.inequality(constr.lower, constr_body, constr.upper) constr.set_value(new_expr) config.constraints_modified[constr] = (original_expr, new_expr) diff --git a/pyomo/contrib/preprocessing/plugins/strip_bounds.py b/pyomo/contrib/preprocessing/plugins/strip_bounds.py index 2e937af2868..51704bc9d58 100644 --- a/pyomo/contrib/preprocessing/plugins/strip_bounds.py +++ b/pyomo/contrib/preprocessing/plugins/strip_bounds.py @@ -12,15 +12,21 @@ """Transformation to strip variable bounds from a model.""" from pyomo.common.collections import ComponentMap -from pyomo.common.config import ConfigBlock, ConfigValue, add_docstring_list +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + document_kwargs_from_configdict, +) from pyomo.core.base.transformation import TransformationFactory from pyomo.core.base.var import Var from pyomo.core.base.set_types import Reals from pyomo.core.plugins.transform.hierarchy import NonIsomorphicTransformation -@TransformationFactory.register('contrib.strip_var_bounds', - doc="Strip bounds from varaibles.") +@TransformationFactory.register( + 'contrib.strip_var_bounds', doc="Strip bounds from variables." +) +@document_kwargs_from_configdict('CONFIG') class VariableBoundStripper(NonIsomorphicTransformation): """Strip bounds from variables. @@ -30,29 +36,40 @@ class VariableBoundStripper(NonIsomorphicTransformation): """ CONFIG = ConfigBlock() - CONFIG.declare("strip_domains", ConfigValue( - default=True, domain=bool, - description="strip the domain for discrete variables as well" - )) - CONFIG.declare("reversible", ConfigValue( - default=False, domain=bool, - description="Whether the bound stripping will be temporary. " - "If so, store information for reversion." - )) - - __doc__ = add_docstring_list(__doc__, CONFIG) + CONFIG.declare( + "strip_domains", + ConfigValue( + default=True, + domain=bool, + description="strip the domain for discrete variables as well", + ), + ) + CONFIG.declare( + "reversible", + ConfigValue( + default=False, + domain=bool, + description="Whether the bound stripping will be temporary. " + "If so, store information for reversion.", + ), + ) def _apply_to(self, instance, **kwds): config = self.CONFIG(kwds) if config.reversible: - if any(hasattr(instance, map_name) for map_name in [ + if any( + hasattr(instance, map_name) + for map_name in [ '_tmp_var_bound_strip_lb', '_tmp_var_bound_strip_ub', - '_tmp_var_bound_strip_domain']): + '_tmp_var_bound_strip_domain', + ] + ): raise RuntimeError( 'Variable stripping reversion component maps already ' 'exist. Did you already apply a temporary transformation ' - 'without a subsequent reversion?') + 'without a subsequent reversion?' + ) # Component maps to store data for reversion. instance._tmp_var_bound_strip_lb = ComponentMap() instance._tmp_var_bound_strip_ub = ComponentMap() @@ -73,8 +90,7 @@ def _apply_to(self, instance, **kwds): def revert(self, instance): """Revert variable bounds and domains changed by the transformation.""" - for var in instance.component_data_objects( - ctype=Var, descend_into=True): + for var in instance.component_data_objects(ctype=Var, descend_into=True): if var in instance._tmp_var_bound_strip_lb: var.setlb(instance._tmp_var_bound_strip_lb[var]) if var in instance._tmp_var_bound_strip_ub: diff --git a/pyomo/contrib/preprocessing/plugins/var_aggregator.py b/pyomo/contrib/preprocessing/plugins/var_aggregator.py index d1f748e2a1d..0a429cb5a67 100644 --- a/pyomo/contrib/preprocessing/plugins/var_aggregator.py +++ b/pyomo/contrib/preprocessing/plugins/var_aggregator.py @@ -14,10 +14,8 @@ from __future__ import division from pyomo.common.collections import ComponentMap, ComponentSet -from pyomo.core.base import ( - Block, Constraint, VarList, Objective, TransformationFactory, -) -from pyomo.core.expr.current import ExpressionReplacementVisitor +from pyomo.core.base import Block, Constraint, VarList, Objective, TransformationFactory +from pyomo.core.expr import ExpressionReplacementVisitor from pyomo.core.expr.numvalue import value from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.repn import generate_standard_repn @@ -41,22 +39,25 @@ def _get_equality_linked_variables(constraint): # Generate the standard linear representation repn = generate_standard_repn(constraint.body) - nonzero_coef_vars = tuple(v for i, v in enumerate(repn.linear_vars) - # if coefficient on variable is nonzero - if repn.linear_coefs[i] != 0) + nonzero_coef_vars = tuple( + v + for i, v in enumerate(repn.linear_vars) + # if coefficient on variable is nonzero + if repn.linear_coefs[i] != 0 + ) if len(nonzero_coef_vars) != 2: - # Expect two variables with nonzero cofficient in constraint; + # Expect two variables with nonzero coefficient in constraint; # otherwise, return empty tuple. return () if sorted(coef for coef in repn.linear_coefs if coef != 0) != [-1, 1]: # Expect a constraint of form x == y --> 0 == -1 * x + 1 * y; # otherwise, return empty tuple. return () - # Above checks are satisifed. Return the variables. + # Above checks are satisfied. Return the variables. return nonzero_coef_vars -def _fix_equality_fixed_variables(model, scaling_tolerance=1E-10): +def _fix_equality_fixed_variables(model, scaling_tolerance=1e-10): """Detects variables fixed by a constraint: ax=b. Fixes the variable to the constant value (b/a) and deactivates the relevant @@ -87,11 +88,12 @@ def _fix_equality_fixed_variables(model, scaling_tolerance=1E-10): (repn.linear_coefs[i], v) for i, v in enumerate(repn.linear_vars) # if coefficient on variable is nonzero - if repn.linear_coefs[i] != 0) + if repn.linear_coefs[i] != 0 + ) # get the coefficient and variable object coef, var = next(nonzero_coef_vars) if next(nonzero_coef_vars, None) is not None: - # Expect one variable with nonzero cofficient in constraint; + # Expect one variable with nonzero coefficient in constraint; # otherwise, skip. continue # Constant term on the constraint body @@ -101,9 +103,17 @@ def _fix_equality_fixed_variables(model, scaling_tolerance=1E-10): logger.warning( "Skipping fixed variable processing for constraint %s: " "%s * %s + %s = %s because coefficient %s is below " - "tolerance of %s. Check your problem scaling." % - (constraint.name, coef, var.name, const, - value(constraint.lower), coef, scaling_tolerance)) + "tolerance of %s. Check your problem scaling." + % ( + constraint.name, + coef, + var.name, + const, + value(constraint.lower), + coef, + scaling_tolerance, + ) + ) continue # Constraint has form lower <= coef * var + const <= upper. We know that @@ -128,7 +138,8 @@ def _build_equality_set(model): # Loop through all the active constraints in the model for constraint in model.component_data_objects( - ctype=Constraint, active=True, descend_into=True): + ctype=Constraint, active=True, descend_into=True + ): eq_linked_vars = _get_equality_linked_variables(constraint) if not eq_linked_vars: continue # if we get an empty tuple, skip to next constraint. @@ -171,8 +182,10 @@ def max_if_not_None(iterable): return max(non_nones or [None]) # min( [] or [None] ) -> None -@TransformationFactory.register('contrib.aggregate_vars', - doc="Aggregate model variables that are linked by equality constraints.") +@TransformationFactory.register( + 'contrib.aggregate_vars', + doc="Aggregate model variables that are linked by equality constraints.", +) class VariableAggregator(IsomorphicTransformation): """Aggregate model variables that are linked by equality constraints. @@ -208,7 +221,8 @@ def _apply_to(self, model, detect_fixed_vars=True): # Generate aggregation infrastructure model._var_aggregator_info = Block( doc="Holds information for the variable aggregation " - "transformation system.") + "transformation system." + ) z = model._var_aggregator_info.z = VarList(doc="Aggregated variables.") # Map of the aggregate var to the equalty set (ComponentSet) z_to_vars = model._var_aggregator_info.z_to_vars = ComponentMap() @@ -219,8 +233,7 @@ def _apply_to(self, model, detect_fixed_vars=True): # TODO This iteritems is sorted by the variable name of the key in # order to preserve determinism. Unfortunately, var.name() is an # expensive operation right now. - for var, eq_set in sorted(eq_var_map.items(), - key=lambda tup: tup[0].name): + for var, eq_set in sorted(eq_var_map.items(), key=lambda tup: tup[0].name): if var in processed_vars: continue # Skip already-process variables @@ -241,61 +254,60 @@ def _apply_to(self, model, detect_fixed_vars=True): fixed_vars = [v for v in eq_set if v.fixed] if fixed_vars: # Check to make sure all the fixed values are the same. - if any(var.value != fixed_vars[0].value - for var in fixed_vars[1:]): + if any(var.value != fixed_vars[0].value for var in fixed_vars[1:]): raise ValueError( "Aggregate variable for equality set is fixed to " - "multiple different values: %s" % (fixed_vars,)) + "multiple different values: %s" % (fixed_vars,) + ) z_agg.fix(fixed_vars[0].value) # Check that the fixed value lies within bounds. if z_agg.has_lb() and z_agg.value < value(z_agg.lb): raise ValueError( "Aggregate variable for equality set is fixed to " - "a value less than its lower bound: %s < LB %s" % - (z_agg.value, value(z_agg.lb)) + "a value less than its lower bound: %s < LB %s" + % (z_agg.value, value(z_agg.lb)) ) if z_agg.has_ub() and z_agg.value > value(z_agg.ub): raise ValueError( "Aggregate variable for equality set is fixed to " - "a value greater than its upper bound: %s > UB %s" % - (z_agg.value, value(z_agg.ub)) + "a value greater than its upper bound: %s > UB %s" + % (z_agg.value, value(z_agg.ub)) ) else: # Set the value to be the average of the values within the # bounds only if the value is not already fixed. values_within_bounds = [ - v.value for v in eq_set if ( + v.value + for v in eq_set + if ( v.value is not None and (not z_agg.has_lb() or v.value >= value(z_agg.lb)) and (not z_agg.has_ub() or v.value <= value(z_agg.ub)) - )] + ) + ] if values_within_bounds: - z_agg.set_value(sum(values_within_bounds) / - len(values_within_bounds), - skip_validation=True) + z_agg.set_value( + sum(values_within_bounds) / len(values_within_bounds), + skip_validation=True, + ) processed_vars.update(eq_set) # Do the substitution - substitution_map = {id(var): z_var - for var, z_var in var_to_z.items()} + substitution_map = {id(var): z_var for var, z_var in var_to_z.items()} visitor = ExpressionReplacementVisitor( substitute=substitution_map, descend_into_named_expressions=True, remove_named_expressions=False, ) - for constr in model.component_data_objects( - ctype=Constraint, active=True - ): + for constr in model.component_data_objects(ctype=Constraint, active=True): orig_body = constr.body new_body = visitor.walk_expression(constr.body) if orig_body is not new_body: constr.set_value((constr.lower, new_body, constr.upper)) - for objective in model.component_data_objects( - ctype=Objective, active=True - ): + for objective in model.component_data_objects(ctype=Objective, active=True): orig_expr = objective.expr new_expr = visitor.walk_expression(objective.expr) if orig_expr is not new_expr: diff --git a/pyomo/contrib/preprocessing/plugins/zero_sum_propagator.py b/pyomo/contrib/preprocessing/plugins/zero_sum_propagator.py index 143ace484a1..16c6614cb3b 100644 --- a/pyomo/contrib/preprocessing/plugins/zero_sum_propagator.py +++ b/pyomo/contrib/preprocessing/plugins/zero_sum_propagator.py @@ -18,8 +18,10 @@ from pyomo.repn.standard_repn import generate_standard_repn -@TransformationFactory.register('contrib.propagate_zero_sum', - doc="Propagate fixed-to-zero for sums of only positive (or negative) vars.") +@TransformationFactory.register( + 'contrib.propagate_zero_sum', + doc="Propagate fixed-to-zero for sums of only positive (or negative) vars.", +) class ZeroSumPropagator(IsomorphicTransformation): """Propagates fixed-to-zero for sums of only positive (or negative) vars. @@ -31,51 +33,65 @@ class ZeroSumPropagator(IsomorphicTransformation): """ def _apply_to(self, instance): - for constr in instance.component_data_objects(ctype=Constraint, - active=True, - descend_into=True): + for constr in instance.component_data_objects( + ctype=Constraint, active=True, descend_into=True + ): if not constr.body.polynomial_degree() == 1: continue # constraint not linear. Skip. repn = generate_standard_repn(constr.body) - if (constr.has_ub() and ( - (repn.constant is None and value(constr.upper) == 0) or - repn.constant == value(constr.upper) - )): + if constr.has_ub() and ( + (repn.constant is None and value(constr.upper) == 0) + or repn.constant == value(constr.upper) + ): # term1 + term2 + term3 + ... <= 0 # all var terms need to be non-negative if all( # variable has 0 coefficient coef == 0 or # variable is non-negative and has non-negative coefficient - (repn.linear_vars[i].has_lb() and - value(repn.linear_vars[i].lb) >= 0 and - coef >= 0) or + ( + repn.linear_vars[i].has_lb() + and value(repn.linear_vars[i].lb) >= 0 + and coef >= 0 + ) + or # variable is non-positive and has non-positive coefficient - (repn.linear_vars[i].has_ub() and - value(repn.linear_vars[i].ub) <= 0 and - coef <= 0) for i, coef in enumerate(repn.linear_coefs)): + ( + repn.linear_vars[i].has_ub() + and value(repn.linear_vars[i].ub) <= 0 + and coef <= 0 + ) + for i, coef in enumerate(repn.linear_coefs) + ): for i, coef in enumerate(repn.linear_coefs): if not coef == 0: repn.linear_vars[i].fix(0) continue - if (constr.has_lb() and ( - (repn.constant is None and value(constr.lower) == 0) or - repn.constant == value(constr.lower) - )): + if constr.has_lb() and ( + (repn.constant is None and value(constr.lower) == 0) + or repn.constant == value(constr.lower) + ): # term1 + term2 + term3 + ... >= 0 # all var terms need to be non-positive if all( # variable has 0 coefficient coef == 0 or # variable is non-negative and has non-positive coefficient - (repn.linear_vars[i].has_lb() and - value(repn.linear_vars[i].lb) >= 0 and - coef <= 0) or + ( + repn.linear_vars[i].has_lb() + and value(repn.linear_vars[i].lb) >= 0 + and coef <= 0 + ) + or # variable is non-positive and has non-negative coefficient - (repn.linear_vars[i].has_ub() and - value(repn.linear_vars[i].ub) <= 0 and - coef >= 0) for i, coef in enumerate(repn.linear_coefs)): + ( + repn.linear_vars[i].has_ub() + and value(repn.linear_vars[i].ub) <= 0 + and coef >= 0 + ) + for i, coef in enumerate(repn.linear_coefs) + ): for i, coef in enumerate(repn.linear_coefs): if not coef == 0: repn.linear_vars[i].fix(0) diff --git a/pyomo/contrib/preprocessing/tests/test_bounds_to_vars_xfrm.py b/pyomo/contrib/preprocessing/tests/test_bounds_to_vars_xfrm.py index 5b849508132..5770b23eb11 100644 --- a/pyomo/contrib/preprocessing/tests/test_bounds_to_vars_xfrm.py +++ b/pyomo/contrib/preprocessing/tests/test_bounds_to_vars_xfrm.py @@ -1,7 +1,13 @@ """Tests explicit bound to variable bound transformation module.""" import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, Constraint, TransformationFactory, - Var, value, Integers) +from pyomo.environ import ( + ConcreteModel, + Constraint, + TransformationFactory, + Var, + value, + Integers, +) class TestConstraintToVarBoundTransform(unittest.TestCase): @@ -27,8 +33,7 @@ def test_constraint_to_var_bound(self): m.c7 = Constraint(expr=m.v7 + 2 >= 2.01) m.c8 = Constraint(expr=m.v8 + 2 >= 2.0001) - m2 = TransformationFactory( - 'contrib.constraints_to_var_bounds').create_using(m) + m2 = TransformationFactory('contrib.constraints_to_var_bounds').create_using(m) self.assertEqual(value(m2.v1.lb), 2) self.assertEqual(value(m2.v1.ub), 2) self.assertTrue(m2.v1.fixed) @@ -48,7 +53,9 @@ def test_constraint_to_var_bound(self): del m2 # to keep from accidentally using it below - TransformationFactory('contrib.constraints_to_var_bounds').apply_to(m, tolerance=1e-3) + TransformationFactory('contrib.constraints_to_var_bounds').apply_to( + m, tolerance=1e-3 + ) self.assertEqual(value(m.v1.lb), 2) self.assertEqual(value(m.v1.ub), 2) self.assertTrue(m.v1.fixed) @@ -75,7 +82,6 @@ def test_skip_trivial_constraints(self): m.z.fix(0) m.y.fix(0) TransformationFactory('contrib.constraints_to_var_bounds').apply_to(m) - self.assertEqual(m.c.body.polynomial_degree(), 1) self.assertTrue(m.c.active) self.assertFalse(m.x.has_lb()) self.assertFalse(m.x.has_ub()) @@ -85,10 +91,13 @@ def test_detect_fixed_false(self): m.x = Var() m.c = Constraint(expr=m.x == 3) TransformationFactory('contrib.constraints_to_var_bounds').apply_to( - m, detect_fixed=False) + m, detect_fixed=False + ) self.assertFalse(m.c.active) self.assertTrue(m.x.has_lb()) + self.assertEqual(m.x.lb, 3) self.assertTrue(m.x.has_ub()) + self.assertEqual(m.x.ub, 3) self.assertFalse(m.x.fixed) diff --git a/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py b/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py index f93aa8a88b5..aa7fa52d272 100644 --- a/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py +++ b/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py @@ -1,6 +1,6 @@ """Tests the Bounds Tightening module.""" import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, Constraint, TransformationFactory, Var, value) +from pyomo.environ import ConcreteModel, Constraint, TransformationFactory, Var, value class TestIntervalTightener(unittest.TestCase): diff --git a/pyomo/contrib/preprocessing/tests/test_deactivate_trivial_constraints.py b/pyomo/contrib/preprocessing/tests/test_deactivate_trivial_constraints.py index 87c9894ff5e..fa0ca6cfa9a 100644 --- a/pyomo/contrib/preprocessing/tests/test_deactivate_trivial_constraints.py +++ b/pyomo/contrib/preprocessing/tests/test_deactivate_trivial_constraints.py @@ -2,8 +2,7 @@ """Tests deactivation of trivial constraints.""" import pyomo.common.unittest as unittest from pyomo.common.errors import InfeasibleConstraintException -from pyomo.environ import (Constraint, ConcreteModel, TransformationFactory, - Var) +from pyomo.environ import Constraint, ConcreteModel, TransformationFactory, Var class TestTrivialConstraintDeactivator(unittest.TestCase): @@ -20,8 +19,7 @@ def test_deactivate_trivial_constraints(self): m.c3 = Constraint(expr=m.v1 <= 5) m.v1.fix() - TransformationFactory( - 'contrib.deactivate_trivial_constraints').apply_to(m) + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(m) self.assertTrue(m.c.active) self.assertTrue(m.c2.active) self.assertFalse(m.c3.active) @@ -38,9 +36,9 @@ def test_deactivate_trivial_constraints_return_list(self): m.v1.fix() trivial = [] - TransformationFactory( - 'contrib.deactivate_trivial_constraints').apply_to( - m, return_trivial=trivial) + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to( + m, return_trivial=trivial + ) self.assertTrue(m.c.active) self.assertTrue(m.c2.active) self.assertFalse(m.c3.active) @@ -58,8 +56,7 @@ def test_deactivate_trivial_constraints_revert(self): m.c3 = Constraint(expr=m.v1 <= 5) m.v1.fix() - xfrm = TransformationFactory( - 'contrib.deactivate_trivial_constraints') + xfrm = TransformationFactory('contrib.deactivate_trivial_constraints') xfrm.apply_to(m, tmp=True) self.assertTrue(m.c.active) self.assertTrue(m.c2.active) @@ -71,8 +68,9 @@ def test_deactivate_trivial_constraints_revert(self): def test_trivial_constraints_lb_conflict(self): """Test for violated trivial constraint lower bound.""" with self.assertRaisesRegex( - InfeasibleConstraintException, - "Trivial constraint c violates LB 2.0 ≤ BODY 1."): + InfeasibleConstraintException, + "Trivial constraint c violates LB 2.0 ≤ BODY 1.", + ): self._trivial_constraints_lb_conflict() def _trivial_constraints_lb_conflict(self): @@ -80,14 +78,14 @@ def _trivial_constraints_lb_conflict(self): m.v1 = Var(initialize=1) m.c = Constraint(expr=m.v1 >= 2) m.v1.fix() - TransformationFactory( - 'contrib.deactivate_trivial_constraints').apply_to(m) + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(m) def test_trivial_constraints_ub_conflict(self): """Test for violated trivial constraint upper bound.""" with self.assertRaisesRegex( - InfeasibleConstraintException, - "Trivial constraint c violates BODY 1 ≤ UB 0.0."): + InfeasibleConstraintException, + "Trivial constraint c violates BODY 1 ≤ UB 0.0.", + ): self._trivial_constraints_ub_conflict() def _trivial_constraints_ub_conflict(self): @@ -95,18 +93,16 @@ def _trivial_constraints_ub_conflict(self): m.v1 = Var(initialize=1) m.c = Constraint(expr=m.v1 <= 0) m.v1.fix() - TransformationFactory( - 'contrib.deactivate_trivial_constraints').apply_to(m) + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(m) def test_trivial_constraint_due_to_0_coefficient(self): m = ConcreteModel() m.x = Var() m.y = Var() m.y.fix(0) - m.c = Constraint(expr=m.x*m.y >= 0) + m.c = Constraint(expr=m.x * m.y >= 0) - TransformationFactory( - 'contrib.deactivate_trivial_constraints').apply_to(m) + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(m) self.assertFalse(m.c.active) @@ -115,10 +111,9 @@ def test_higher_degree_trivial_constraint(self): m.x = Var() m.y = Var() m.z = Var() - m.c = Constraint(expr=(m.x**2 + m.y)*m.z >= -8) + m.c = Constraint(expr=(m.x**2 + m.y) * m.z >= -8) m.z.fix(0) - TransformationFactory( - 'contrib.deactivate_trivial_constraints').apply_to(m) + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(m) self.assertFalse(m.c.active) def test_trivial_linear_constraint_due_to_cancellation(self): @@ -126,8 +121,7 @@ def test_trivial_linear_constraint_due_to_cancellation(self): m.x = Var() m.c = Constraint(expr=m.x - m.x <= 0) - TransformationFactory( - 'contrib.deactivate_trivial_constraints').apply_to(m) + TransformationFactory('contrib.deactivate_trivial_constraints').apply_to(m) self.assertFalse(m.c.active) diff --git a/pyomo/contrib/preprocessing/tests/test_equality_propagate.py b/pyomo/contrib/preprocessing/tests/test_equality_propagate.py index 6d682af2afd..40e1d7eecb9 100644 --- a/pyomo/contrib/preprocessing/tests/test_equality_propagate.py +++ b/pyomo/contrib/preprocessing/tests/test_equality_propagate.py @@ -2,8 +2,14 @@ import pyomo.common.unittest as unittest from pyomo.common.errors import InfeasibleConstraintException -from pyomo.environ import (ConcreteModel, Constraint, RangeSet, - TransformationFactory, Var, value) +from pyomo.environ import ( + ConcreteModel, + Constraint, + RangeSet, + TransformationFactory, + Var, + value, +) class TestEqualityPropagate(unittest.TestCase): @@ -101,18 +107,18 @@ def test_var_fix_revert(self): def test_var_fix_accounts_for_constants(self): """Test to make sure that constraints of the form x == y + constant - are handled correctly when propogating fixed variables.""" + are handled correctly when propagating fixed variables.""" m = ConcreteModel() m.v = Var(initialize=1.0) m.v2 = Var(initialize=1.0) m.v3 = Var(initialize=1.0) - m.c = Constraint(expr = m.v - m.v2 + m.v3 == 0) + m.c = Constraint(expr=m.v - m.v2 + m.v3 == 0) m.v.fix() m.v4 = Var(initialize=1.0) m.c2 = Constraint(expr=m.v2 == m.v4) m.v4.fix() TransformationFactory('contrib.propagate_fixed_vars').apply_to(m) - + self.assertTrue(m.v.fixed) self.assertEqual(value(m.v), 1) self.assertTrue(m.v4.fixed) @@ -182,10 +188,12 @@ def test_var_bound_propagate_crossover(self): m.v2 = Var(initialize=5, bounds=(4, 8)) m.c1 = Constraint(expr=m.v1 == m.v2) xfrm = TransformationFactory('contrib.propagate_eq_var_bounds') - with self.assertRaisesRegex(InfeasibleConstraintException, - "Variable v2 has a lower bound 4 > the " - "upper bound 3 of variable v1, but they " - "are linked by equality constraints"): + with self.assertRaisesRegex( + InfeasibleConstraintException, + "Variable v2 has a lower bound 4 > the " + "upper bound 3 of variable v1, but they " + "are linked by equality constraints", + ): xfrm.apply_to(m) def test_var_bound_propagate_revert(self): diff --git a/pyomo/contrib/preprocessing/tests/test_induced_linearity.py b/pyomo/contrib/preprocessing/tests/test_induced_linearity.py index 2a161c5e536..c2c24c33f14 100644 --- a/pyomo/contrib/preprocessing/tests/test_induced_linearity.py +++ b/pyomo/contrib/preprocessing/tests/test_induced_linearity.py @@ -14,11 +14,21 @@ from pyomo.contrib.preprocessing.plugins.induced_linearity import ( _bilinear_expressions, detect_effectively_discrete_vars, - determine_valid_values) + determine_valid_values, +) from pyomo.common.collections import ComponentSet, Bunch -from pyomo.environ import (Binary, ConcreteModel, Constraint, ConstraintList, - Integers, RangeSet, SolverFactory, - TransformationFactory, Var, exp) +from pyomo.environ import ( + Binary, + ConcreteModel, + Constraint, + ConstraintList, + Integers, + RangeSet, + SolverFactory, + TransformationFactory, + Var, + exp, +) from pyomo.gdp import Disjunct, Disjunction from pyomo.repn import generate_standard_repn @@ -34,8 +44,12 @@ def test_detect_bilinear_vars(self): m.y = Var() m.z = Var() m.c = Constraint( - expr=(m.x - 3) * (m.y + 2) - (m.z + 4) * m.y + (m.x + 2) ** 2 - + exp(m.y ** 2) * m.x <= m.z) + expr=(m.x - 3) * (m.y + 2) + - (m.z + 4) * m.y + + (m.x + 2) ** 2 + + exp(m.y**2) * m.x + <= m.z + ) m.c2 = Constraint(expr=m.x * m.y == 3) bilinear_map = _bilinear_expressions(m) self.assertEqual(len(bilinear_map), 3) @@ -55,16 +69,16 @@ def test_detect_effectively_discrete_vars(self): m.z = Var(domain=Integers) m.constr = Constraint(expr=m.x == m.y + m.z) m.ignore_inequality = Constraint(expr=m.x <= m.y + m.z) - m.ignore_nonlinear = Constraint(expr=m.x ** 2 == m.y + m.z) + m.ignore_nonlinear = Constraint(expr=m.x**2 == m.y + m.z) m.a = Var() m.b = Var(domain=Binary) m.c = Var(domain=Integers) m.disj = Disjunct() m.disj.constr = Constraint(expr=m.a == m.b + m.c) - effectively_discrete = detect_effectively_discrete_vars(m, 1E-6) + effectively_discrete = detect_effectively_discrete_vars(m, 1e-6) self.assertEqual(len(effectively_discrete), 1) self.assertEqual(effectively_discrete[m.x], [m.constr]) - effectively_discrete = detect_effectively_discrete_vars(m.disj, 1E-6) + effectively_discrete = detect_effectively_discrete_vars(m.disj, 1e-6) self.assertEqual(len(effectively_discrete), 1) self.assertEqual(effectively_discrete[m.a], [m.disj.constr]) @@ -75,15 +89,17 @@ def test_determine_valid_values(self): m.y = Var(RangeSet(4), domain=Binary) m.z = Var(domain=Integers, bounds=(-1, 2)) m.constr = Constraint( - expr=m.x == m.y[1] + 2 * m.y[2] + m.y[3] + 2 * m.y[4] + m.z) + expr=m.x == m.y[1] + 2 * m.y[2] + m.y[3] + 2 * m.y[4] + m.z + ) m.logical = ConstraintList() m.logical.add(expr=m.y[1] + m.y[2] == 1) m.logical.add(expr=m.y[3] + m.y[4] == 1) m.logical.add(expr=m.y[2] + m.y[4] <= 1) var_to_values_map = determine_valid_values( - m, detect_effectively_discrete_vars(m, 1E-6), Bunch( - equality_tolerance=1E-6, - pruning_solver='glpk')) + m, + detect_effectively_discrete_vars(m, 1e-6), + Bunch(equality_tolerance=1e-6, pruning_solver='glpk'), + ) valid_values = set([1, 2, 3, 4, 5]) self.assertEqual(set(var_to_values_map[m.x]), valid_values) @@ -94,7 +110,8 @@ def test_induced_linearity_case2(self): m.y = Var(RangeSet(4), domain=Binary) m.z = Var(domain=Integers, bounds=(-1, 2)) m.constr = Constraint( - expr=m.x[0] == m.y[1] + 2 * m.y[2] + m.y[3] + 2 * m.y[4] + m.z) + expr=m.x[0] == m.y[1] + 2 * m.y[2] + m.y[3] + 2 * m.y[4] + m.z + ) m.logical = ConstraintList() m.logical.add(expr=m.y[1] + m.y[2] == 1) m.logical.add(expr=m.y[3] + m.y[4] == 1) @@ -102,17 +119,17 @@ def test_induced_linearity_case2(self): m.b = Var(bounds=(-2, 7)) m.c = Var() m.bilinear = Constraint( - expr=(m.x[0] - 3) * (m.b + 2) - (m.c + 4) * m.b + - exp(m.b ** 2) * m.x[0] <= m.c) + expr=(m.x[0] - 3) * (m.b + 2) - (m.c + 4) * m.b + exp(m.b**2) * m.x[0] + <= m.c + ) TransformationFactory('contrib.induced_linearity').apply_to(m) xfrmed_blk = m._induced_linearity_info.x0_b_bilinear - self.assertSetEqual( - set(xfrmed_blk.valid_values), set([1, 2, 3, 4, 5])) - select_one_repn = generate_standard_repn( - xfrmed_blk.select_one_value.body) + self.assertSetEqual(set(xfrmed_blk.valid_values), set([1, 2, 3, 4, 5])) + select_one_repn = generate_standard_repn(xfrmed_blk.select_one_value.body) self.assertEqual( ComponentSet(select_one_repn.linear_vars), - ComponentSet(xfrmed_blk.x_active[i] for i in xfrmed_blk.valid_values)) + ComponentSet(xfrmed_blk.x_active[i] for i in xfrmed_blk.valid_values), + ) @unittest.skipIf(not glpk_available, 'GLPK not available') def test_bilinear_in_disjuncts(self): @@ -121,7 +138,8 @@ def test_bilinear_in_disjuncts(self): m.y = Var(RangeSet(4), domain=Binary) m.z = Var(domain=Integers, bounds=(-1, 2)) m.constr = Constraint( - expr=m.x[0] == m.y[1] + 2 * m.y[2] + m.y[3] + 2 * m.y[4] + m.z) + expr=m.x[0] == m.y[1] + 2 * m.y[2] + m.y[3] + 2 * m.y[4] + m.z + ) m.logical = ConstraintList() m.logical.add(expr=m.y[1] + m.y[2] == 1) m.logical.add(expr=m.y[3] + m.y[4] == 1) @@ -132,17 +150,19 @@ def test_bilinear_in_disjuncts(self): m.v[2].setlb(-4) m.v[2].setub(5) m.bilinear = Constraint( - expr=(m.x[0] - 3) * (m.v[1] + 2) - (m.v[2] + 4) * m.v[1] + - exp(m.v[1] ** 2) * m.x[0] <= m.v[2]) - m.disjctn = Disjunction(expr=[ - [m.x[0] * m.v[1] <= 4], - [m.x[0] * m.v[2] >= 6] - ]) + expr=(m.x[0] - 3) * (m.v[1] + 2) + - (m.v[2] + 4) * m.v[1] + + exp(m.v[1] ** 2) * m.x[0] + <= m.v[2] + ) + m.disjctn = Disjunction(expr=[[m.x[0] * m.v[1] <= 4], [m.x[0] * m.v[2] >= 6]]) TransformationFactory('contrib.induced_linearity').apply_to(m) self.assertEqual( - m.disjctn.disjuncts[0].constraint[1].body.polynomial_degree(), 1) + m.disjctn.disjuncts[0].constraint[1].body.polynomial_degree(), 1 + ) self.assertEqual( - m.disjctn.disjuncts[1].constraint[1].body.polynomial_degree(), 1) + m.disjctn.disjuncts[1].constraint[1].body.polynomial_degree(), 1 + ) @unittest.skipIf(not glpk_available, 'GLPK not available') def test_induced_linear_in_disjunct(self): @@ -154,20 +174,21 @@ def test_induced_linear_in_disjunct(self): m.v = Var([1]) m.v[1].setlb(-2) m.v[1].setub(7) - m.bilinear_outside = Constraint( - expr=m.x[0] * m.v[1] >= 2) - m.disjctn = Disjunction(expr=[ - [m.x[0] * m.v[1] == 3, - 2 * m.x[0] == m.y[1] + m.y[2]], - [m.x[0] * m.v[1] == 4] - ]) + m.bilinear_outside = Constraint(expr=m.x[0] * m.v[1] >= 2) + m.disjctn = Disjunction( + expr=[ + [m.x[0] * m.v[1] == 3, 2 * m.x[0] == m.y[1] + m.y[2]], + [m.x[0] * m.v[1] == 4], + ] + ) TransformationFactory('contrib.induced_linearity').apply_to(m) self.assertEqual( - m.disjctn.disjuncts[0].constraint[1].body.polynomial_degree(), 1) + m.disjctn.disjuncts[0].constraint[1].body.polynomial_degree(), 1 + ) + self.assertEqual(m.bilinear_outside.body.polynomial_degree(), 2) self.assertEqual( - m.bilinear_outside.body.polynomial_degree(), 2) - self.assertEqual( - m.disjctn.disjuncts[1].constraint[1].body.polynomial_degree(), 2) + m.disjctn.disjuncts[1].constraint[1].body.polynomial_degree(), 2 + ) if __name__ == '__main__': diff --git a/pyomo/contrib/preprocessing/tests/test_init_vars.py b/pyomo/contrib/preprocessing/tests/test_init_vars.py index 6226a99cbb5..f65773f7dbb 100644 --- a/pyomo/contrib/preprocessing/tests/test_init_vars.py +++ b/pyomo/contrib/preprocessing/tests/test_init_vars.py @@ -1,7 +1,6 @@ """Tests initialization of uninitialized variables.""" import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, TransformationFactory, - value, Var) +from pyomo.environ import ConcreteModel, TransformationFactory, value, Var class TestInitVars(unittest.TestCase): @@ -30,8 +29,7 @@ def test_midpoint_var_init(self): self.assertEqual(value(m.v5), 2) self.assertEqual(value(m.v6), 3) - TransformationFactory('contrib.init_vars_midpoint').apply_to( - m, overwrite=True) + TransformationFactory('contrib.init_vars_midpoint').apply_to(m, overwrite=True) self.assertEqual(value(m.v1), 0) self.assertEqual(value(m.v2), 2) self.assertEqual(value(m.v3), 2) @@ -62,8 +60,7 @@ def test_zero_var_init(self): self.assertEqual(value(m.v5), 2) self.assertEqual(value(m.v6), 3) - TransformationFactory('contrib.init_vars_zero').apply_to( - m, overwrite=True) + TransformationFactory('contrib.init_vars_zero').apply_to(m, overwrite=True) self.assertEqual(value(m.v1), 0) self.assertEqual(value(m.v2), 2) self.assertEqual(value(m.v3), -2) diff --git a/pyomo/contrib/preprocessing/tests/test_int_to_binary.py b/pyomo/contrib/preprocessing/tests/test_int_to_binary.py index c7464b23187..bb75a075592 100644 --- a/pyomo/contrib/preprocessing/tests/test_int_to_binary.py +++ b/pyomo/contrib/preprocessing/tests/test_int_to_binary.py @@ -18,6 +18,7 @@ import logging from io import StringIO + class TestIntToBinary(unittest.TestCase): """Tests integer to binary variable reformulation.""" @@ -27,7 +28,10 @@ def test_int_to_binary(self): output = StringIO() with LoggingIntercept(output, 'pyomo.contrib.preprocessing', logging.INFO): xfrm('contrib.integer_to_binary').apply_to(m) - self.assertIn("Reformulating integer variables using the base2 strategy.", output.getvalue()) + self.assertIn( + "Reformulating integer variables using the base2 strategy.", + output.getvalue(), + ) reform_blk = m._int_to_binary_reform self.assertEqual(len(reform_blk.int_var_set), 1) reform_blk.new_binary_var[0, 0].value = 1 diff --git a/pyomo/contrib/preprocessing/tests/test_strip_bounds.py b/pyomo/contrib/preprocessing/tests/test_strip_bounds.py index 77a8852fcb4..deb1b6c8b37 100644 --- a/pyomo/contrib/preprocessing/tests/test_strip_bounds.py +++ b/pyomo/contrib/preprocessing/tests/test_strip_bounds.py @@ -1,8 +1,16 @@ """Tests stripping of variable bounds.""" import pyomo.common.unittest as unittest -from pyomo.environ import (Binary, ConcreteModel, Integers, NonNegativeReals, - PositiveReals, Reals, TransformationFactory, Var) +from pyomo.environ import ( + Binary, + ConcreteModel, + Integers, + NonNegativeReals, + PositiveReals, + Reals, + TransformationFactory, + Var, +) class TestStripBounds(unittest.TestCase): diff --git a/pyomo/contrib/preprocessing/tests/test_var_aggregator.py b/pyomo/contrib/preprocessing/tests/test_var_aggregator.py index a32169a8ad0..d44f8abdeb2 100644 --- a/pyomo/contrib/preprocessing/tests/test_var_aggregator.py +++ b/pyomo/contrib/preprocessing/tests/test_var_aggregator.py @@ -5,11 +5,18 @@ _build_equality_set, _get_equality_linked_variables, max_if_not_None, - min_if_not_None + min_if_not_None, +) +from pyomo.environ import ( + ConcreteModel, + Constraint, + ConstraintList, + Objective, + RangeSet, + SolverFactory, + TransformationFactory, + Var, ) -from pyomo.environ import (ConcreteModel, Constraint, ConstraintList, - Objective, RangeSet, SolverFactory, - TransformationFactory, Var) class TestVarAggregate(unittest.TestCase): @@ -78,7 +85,7 @@ def test_fixed_var_out_of_bounds_ub(self): with self.assertRaises(ValueError): TransformationFactory('contrib.aggregate_vars').apply_to(m) - def test_do_not_tranform_deactivated_constraints(self): + def test_do_not_transform_deactivated_constraints(self): m = ConcreteModel() m.x = Var() m.y = Var() @@ -113,17 +120,17 @@ def test_equality_set(self): self.assertEqual(eq_var_map[m.v3], ComponentSet([m.v3, m.v4])) self.assertEqual(eq_var_map[m.v4], ComponentSet([m.v3, m.v4])) self.assertEqual( - eq_var_map[m.x[1]], - ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) + eq_var_map[m.x[1]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]]) + ) self.assertEqual( - eq_var_map[m.x[2]], - ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) + eq_var_map[m.x[2]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]]) + ) self.assertEqual( - eq_var_map[m.x[3]], - ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) + eq_var_map[m.x[3]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]]) + ) self.assertEqual( - eq_var_map[m.x[4]], - ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) + eq_var_map[m.x[4]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]]) + ) self.assertEqual(eq_var_map[m.y[1]], ComponentSet([m.y[1], m.y[2]])) self.assertEqual(eq_var_map[m.y[2]], ComponentSet([m.y[1], m.y[2]])) @@ -135,14 +142,11 @@ def test_var_aggregate(self): z_to_vars = m._var_aggregator_info.z_to_vars var_to_z = m._var_aggregator_info.var_to_z z = m._var_aggregator_info.z + self.assertEqual(z_to_vars[z[1]], ComponentSet([m.v3, m.v4])) self.assertEqual( - z_to_vars[z[1]], ComponentSet([m.v3, m.v4])) - self.assertEqual( - z_to_vars[z[2]], - ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]])) - self.assertEqual( - z_to_vars[z[3]], - ComponentSet([m.y[1], m.y[2]])) + z_to_vars[z[2]], ComponentSet([m.x[1], m.x[2], m.x[3], m.x[4]]) + ) + self.assertEqual(z_to_vars[z[3]], ComponentSet([m.y[1], m.y[2]])) self.assertIs(var_to_z[m.v3], z[1]) self.assertIs(var_to_z[m.v4], z[1]) self.assertIs(var_to_z[m.x[1]], z[2]) @@ -174,8 +178,9 @@ def test_max_if_not_None(self): self.assertEqual(max_if_not_None([0]), 0) self.assertEqual(max_if_not_None([0, None]), 0) - @unittest.skipIf(not SolverFactory('glpk').available(), - "GLPK solver is not available.") + @unittest.skipIf( + not SolverFactory('glpk').available(), "GLPK solver is not available." + ) def test_var_update(self): m = ConcreteModel() m.x = Var() diff --git a/pyomo/contrib/preprocessing/tests/test_zero_sum_propagate.py b/pyomo/contrib/preprocessing/tests/test_zero_sum_propagate.py index c71e34be823..e5dc132628b 100644 --- a/pyomo/contrib/preprocessing/tests/test_zero_sum_propagate.py +++ b/pyomo/contrib/preprocessing/tests/test_zero_sum_propagate.py @@ -1,7 +1,14 @@ """Tests the zero sum propagation module.""" import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, Constraint, TransformationFactory, - Var, NonNegativeReals, NonPositiveReals, Binary) +from pyomo.environ import ( + ConcreteModel, + Constraint, + TransformationFactory, + Var, + NonNegativeReals, + NonPositiveReals, + Binary, +) class TestZeroSumPropagate(unittest.TestCase): diff --git a/pyomo/contrib/preprocessing/tests/test_zero_term_removal.py b/pyomo/contrib/preprocessing/tests/test_zero_term_removal.py index a5644c1daa2..7ff40b6ae32 100644 --- a/pyomo/contrib/preprocessing/tests/test_zero_term_removal.py +++ b/pyomo/contrib/preprocessing/tests/test_zero_term_removal.py @@ -1,8 +1,7 @@ """Tests detection of zero terms.""" import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, Constraint, TransformationFactory, - Var) -from pyomo.core.expr import current as EXPR +from pyomo.environ import ConcreteModel, Constraint, TransformationFactory, Var +import pyomo.core.expr as EXPR from pyomo.repn import generate_standard_repn @@ -25,21 +24,25 @@ def test_zero_term_removal(self): TransformationFactory('contrib.remove_zero_terms').apply_to(m) m.v1.unfix() # Check that the term no longer exists - self.assertFalse(any(id(m.v1) == id(v) - for v in EXPR.identify_variables(m.c.body))) - self.assertFalse(any(id(m.v1) == id(v) - for v in EXPR.identify_variables(m.c2.body))) - self.assertFalse(any(id(m.v1) == id(v) - for v in EXPR.identify_variables(m.c3.body))) - self.assertFalse(any(id(m.v1) == id(v) - for v in EXPR.identify_variables(m.c4.body))) + self.assertFalse( + any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c.body)) + ) + self.assertFalse( + any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c2.body)) + ) + self.assertFalse( + any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c3.body)) + ) + self.assertFalse( + any(id(m.v1) == id(v) for v in EXPR.identify_variables(m.c4.body)) + ) def test_trivial_constraints_skipped(self): m = ConcreteModel() m.x = Var() m.y = Var() m.z = Var() - m.c = Constraint(expr=(m.x + m.y)*m.z >= 8) + m.c = Constraint(expr=(m.x + m.y) * m.z >= 8) m.z.fix(0) TransformationFactory('contrib.remove_zero_terms').apply_to(m) m.z.unfix() @@ -56,5 +59,6 @@ def test_trivial_constraints_skipped(self): self.assertIs(repn.quadratic_vars[1][1], m.z) self.assertEqual(repn.constant, 0) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/preprocessing/util.py b/pyomo/contrib/preprocessing/util.py index 5f53b984888..69182f56656 100644 --- a/pyomo/contrib/preprocessing/util.py +++ b/pyomo/contrib/preprocessing/util.py @@ -18,4 +18,5 @@ class SuppressConstantObjectiveWarning(LoggingIntercept): def __init__(self): super(SuppressConstantObjectiveWarning, self).__init__( - StringIO(), 'pyomo.core', logging.WARNING) + StringIO(), 'pyomo.core', logging.WARNING + ) diff --git a/pyomo/contrib/pynumero/algorithms/__init__.py b/pyomo/contrib/pynumero/algorithms/__init__.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/contrib/pynumero/algorithms/__init__.py +++ b/pyomo/contrib/pynumero/algorithms/__init__.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py b/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py index 2e7f798254c..766ef96322a 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py @@ -9,16 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ """ -The cyipopt_solver module includes the python interface to the -Cythonized ipopt solver cyipopt (see more: -https://github.com/mechmotum/cyipopt.git). To use the solver, -you can create a derived implementation from the abstract base class -CyIpoptProblemInterface that provides the necessary methods. - -Note: This module also includes a default implementation CyIpopt -that works with problems derived from AslNLP as long as those -classes return numpy ndarray objects for the vectors and coo_matrix -objects for the matrices (e.g., AmplNLP and PyomoNLP) +The cyipopt_solver module includes two solvers that call CyIpopt. One, +CyIpoptSolver, is a solver that operates on a CyIpoptProblemInterface +(such as CyIpoptNLP). The other, PyomoCyIpoptSolver, operates directly on a +Pyomo model. + """ import io import sys @@ -26,369 +21,162 @@ import os import abc -from pyomo.common.dependencies import ( - attempt_import, - numpy as np, numpy_available, -) +from pyomo.common.deprecation import relocated_module_attribute +from pyomo.common.dependencies import attempt_import, numpy as np, numpy_available from pyomo.common.tee import redirect_fd, TeeStream -def _cyipopt_importer(): - import cyipopt - # cyipopt before version 1.0.3 called the problem class "Problem" - if not hasattr(cyipopt, 'Problem'): - cyipopt.Problem = cyipopt.problem - # cyipopt before version 1.0.3 put the __version__ flag in the ipopt - # module (which was deprecated starting in 1.0.3) - if not hasattr(cyipopt, '__version__'): - import ipopt - cyipopt.__version__ = ipopt.__version__ - # Beginning in 1.0.3, STATUS_MESSAGES is in a separate - # ipopt_wrapper module - if not hasattr(cyipopt, 'STATUS_MESSAGES'): - import ipopt_wrapper - cyipopt.STATUS_MESSAGES = ipopt_wrapper.STATUS_MESSAGES - return cyipopt - -cyipopt, cyipopt_available = attempt_import( - 'ipopt', - error_message='cyipopt solver relies on the ipopt module from cyipopt. ' - 'See https://github.com/mechmotum/cyipopt.git for cyipopt ' - 'installation instructions.', - importer=_cyipopt_importer, -) - # Because pynumero.interfaces requires numpy, we will leverage deferred # imports here so that the solver can be registered even when numpy is # not available. -pyomo_nlp = attempt_import('pyomo.contrib.pynumero.interfaces.pyomo_nlp')[0] -pyomo_grey_box = attempt_import('pyomo.contrib.pynumero.interfaces.pyomo_grey_box_nlp')[0] -egb = attempt_import('pyomo.contrib.pynumero.interfaces.external_grey_box')[0] +pyomo_nlp = attempt_import("pyomo.contrib.pynumero.interfaces.pyomo_nlp")[0] +pyomo_grey_box = attempt_import("pyomo.contrib.pynumero.interfaces.pyomo_grey_box_nlp")[ + 0 +] +egb = attempt_import("pyomo.contrib.pynumero.interfaces.external_grey_box")[0] + +# Defer this import so that importing this module (PyomoCyIpoptSolver in +# particular) does not rely on an attempted cyipopt import. +cyipopt_interface, _ = attempt_import( + "pyomo.contrib.pynumero.interfaces.cyipopt_interface" +) + +# These attributes should no longer be imported from this module. These +# deprecation paths provide a deferred import to these attributes so (a) they +# can still be used until these paths are removed, and (b) the imports are not +# triggered when this module is imported. +relocated_module_attribute( + "cyipopt_available", + "pyomo.contrib.pynumero.interfaces.cyipopt_interface.cyipopt_available", + "6.6.0", +) +relocated_module_attribute( + "CyIpoptProblemInterface", + "pyomo.contrib.pynumero.interfaces.cyipopt_interface.CyIpoptProblemInterface", + "6.6.0", +) +relocated_module_attribute( + "CyIpoptNLP", + "pyomo.contrib.pynumero.interfaces.cyipopt_interface.CyIpoptNLP", + "6.6.0", +) from pyomo.common.config import ConfigBlock, ConfigValue from pyomo.common.timing import TicTocTimer from pyomo.core.base import Block, Objective, minimize -from pyomo.opt import ( - SolverStatus, SolverResults, TerminationCondition, ProblemSense -) +from pyomo.opt import SolverStatus, SolverResults, TerminationCondition, ProblemSense +from pyomo.opt.results.solution import Solution logger = logging.getLogger(__name__) # This maps the cyipopt STATUS_MESSAGES back to string representations # of the Ipopt ApplicationReturnStatus enum _cyipopt_status_enum = [ - "Solve_Succeeded", (b"Algorithm terminated successfully at a locally " - b"optimal point, satisfying the convergence tolerances " - b"(can be specified by options)."), - "Solved_To_Acceptable_Level", (b"Algorithm stopped at a point that was " - b"converged, not to \"desired\" tolerances, " - b"but to \"acceptable\" tolerances (see the " - b"acceptable-... options)."), - "Infeasible_Problem_Detected", (b"Algorithm converged to a point of local " - b"infeasibility. Problem may be " - b"infeasible."), - "Search_Direction_Becomes_Too_Small", (b"Algorithm proceeds with very " - b"little progress."), - "Diverging_Iterates", b"It seems that the iterates diverge.", - "User_Requested_Stop", (b"The user call-back function intermediate_callback " - b"(see Section 3.3.4 in the documentation) returned " - b"false, i.e., the user code requested a premature " - b"termination of the optimization."), - "Feasible_Point_Found", b"Feasible point for square problem found.", - "Maximum_Iterations_Exceeded", (b"Maximum number of iterations exceeded " - b"(can be specified by an option)."), - "Restoration_Failed", (b"Restoration phase failed, algorithm doesn\'t know " - b"how to proceed."), - "Error_In_Step_Computation", (b"An unrecoverable error occurred while Ipopt " - b"tried to compute the search direction."), - "Maximum_CpuTime_Exceeded", b"Maximum CPU time exceeded.", - "Not_Enough_Degrees_Of_Freedom", b"Problem has too few degrees of freedom.", - "Invalid_Problem_Definition", b"Invalid problem definition.", - "Invalid_Option", b"Invalid option encountered.", - "Invalid_Number_Detected", (b"Algorithm received an invalid number (such as " - b"NaN or Inf) from the NLP; see also option " - b"check_derivatives_for_naninf."), + "Solve_Succeeded", + ( + b"Algorithm terminated successfully at a locally " + b"optimal point, satisfying the convergence tolerances " + b"(can be specified by options)." + ), + "Solved_To_Acceptable_Level", + ( + b"Algorithm stopped at a point that was " + b'converged, not to "desired" tolerances, ' + b'but to "acceptable" tolerances (see the ' + b"acceptable-... options)." + ), + "Infeasible_Problem_Detected", + ( + b"Algorithm converged to a point of local " + b"infeasibility. Problem may be " + b"infeasible." + ), + "Search_Direction_Becomes_Too_Small", + (b"Algorithm proceeds with very little progress."), + "Diverging_Iterates", + b"It seems that the iterates diverge.", + "User_Requested_Stop", + ( + b"The user call-back function intermediate_callback " + b"(see Section 3.3.4 in the documentation) returned " + b"false, i.e., the user code requested a premature " + b"termination of the optimization." + ), + "Feasible_Point_Found", + b"Feasible point for square problem found.", + "Maximum_Iterations_Exceeded", + (b"Maximum number of iterations exceeded (can be specified by an option)."), + "Restoration_Failed", + (b"Restoration phase failed, algorithm doesn't know how to proceed."), + "Error_In_Step_Computation", + ( + b"An unrecoverable error occurred while Ipopt " + b"tried to compute the search direction." + ), + "Maximum_CpuTime_Exceeded", + b"Maximum CPU time exceeded.", + "Not_Enough_Degrees_Of_Freedom", + b"Problem has too few degrees of freedom.", + "Invalid_Problem_Definition", + b"Invalid problem definition.", + "Invalid_Option", + b"Invalid option encountered.", + "Invalid_Number_Detected", + ( + b"Algorithm received an invalid number (such as " + b"NaN or Inf) from the NLP; see also option " + b"check_derivatives_for_naninf." + ), # Note that the concluding "." was missing before cyipopt 1.0.3 - "Invalid_Number_Detected", (b"Algorithm received an invalid number (such as " - b"NaN or Inf) from the NLP; see also option " - b"check_derivatives_for_naninf"), - "Unrecoverable_Exception", b"Some uncaught Ipopt exception encountered.", - "NonIpopt_Exception_Thrown", b"Unknown Exception caught in Ipopt.", + "Invalid_Number_Detected", + ( + b"Algorithm received an invalid number (such as " + b"NaN or Inf) from the NLP; see also option " + b"check_derivatives_for_naninf" + ), + "Unrecoverable_Exception", + b"Some uncaught Ipopt exception encountered.", + "NonIpopt_Exception_Thrown", + b"Unknown Exception caught in Ipopt.", # Note that the concluding "." was missing before cyipopt 1.0.3 - "NonIpopt_Exception_Thrown", b"Unknown Exception caught in Ipopt", - "Insufficient_Memory", b"Not enough memory.", - "Internal_Error", (b"An unknown internal error occurred. Please contact " - b"the Ipopt authors through the mailing list."), + "NonIpopt_Exception_Thrown", + b"Unknown Exception caught in Ipopt", + "Insufficient_Memory", + b"Not enough memory.", + "Internal_Error", + ( + b"An unknown internal error occurred. Please contact " + b"the Ipopt authors through the mailing list." + ), ] _cyipopt_status_enum = { - _cyipopt_status_enum[i+1]: _cyipopt_status_enum[i] + _cyipopt_status_enum[i + 1]: _cyipopt_status_enum[i] for i in range(0, len(_cyipopt_status_enum), 2) } # This maps Ipopt ApplicationReturnStatus enum strings to an appropriate # Pyomo TerminationCondition _ipopt_term_cond = { - 'Solve_Succeeded': TerminationCondition.optimal, - 'Solved_To_Acceptable_Level': TerminationCondition.feasible, - 'Infeasible_Problem_Detected': TerminationCondition.infeasible, - 'Search_Direction_Becomes_Too_Small': TerminationCondition.minStepLength, - 'Diverging_Iterates': TerminationCondition.unbounded, - 'User_Requested_Stop': TerminationCondition.userInterrupt, - 'Feasible_Point_Found': TerminationCondition.feasible, - 'Maximum_Iterations_Exceeded': TerminationCondition.maxIterations, - 'Restoration_Failed': TerminationCondition.noSolution, - 'Error_In_Step_Computation': TerminationCondition.solverFailure, - 'Maximum_CpuTime_Exceeded': TerminationCondition.maxTimeLimit, - 'Not_Enough_Degrees_Of_Freedom': TerminationCondition.invalidProblem, - 'Invalid_Problem_Definition': TerminationCondition.invalidProblem, - 'Invalid_Option': TerminationCondition.error, - 'Invalid_Number_Detected': TerminationCondition.internalSolverError, - 'Unrecoverable_Exception': TerminationCondition.internalSolverError, - 'NonIpopt_Exception_Thrown': TerminationCondition.error, - 'Insufficient_Memory': TerminationCondition.resourceInterrupt, - 'Internal_Error': TerminationCondition.internalSolverError, + "Solve_Succeeded": TerminationCondition.optimal, + "Solved_To_Acceptable_Level": TerminationCondition.feasible, + "Infeasible_Problem_Detected": TerminationCondition.infeasible, + "Search_Direction_Becomes_Too_Small": TerminationCondition.minStepLength, + "Diverging_Iterates": TerminationCondition.unbounded, + "User_Requested_Stop": TerminationCondition.userInterrupt, + "Feasible_Point_Found": TerminationCondition.feasible, + "Maximum_Iterations_Exceeded": TerminationCondition.maxIterations, + "Restoration_Failed": TerminationCondition.noSolution, + "Error_In_Step_Computation": TerminationCondition.solverFailure, + "Maximum_CpuTime_Exceeded": TerminationCondition.maxTimeLimit, + "Not_Enough_Degrees_Of_Freedom": TerminationCondition.invalidProblem, + "Invalid_Problem_Definition": TerminationCondition.invalidProblem, + "Invalid_Option": TerminationCondition.error, + "Invalid_Number_Detected": TerminationCondition.internalSolverError, + "Unrecoverable_Exception": TerminationCondition.internalSolverError, + "NonIpopt_Exception_Thrown": TerminationCondition.error, + "Insufficient_Memory": TerminationCondition.resourceInterrupt, + "Internal_Error": TerminationCondition.internalSolverError, } -class CyIpoptProblemInterface(object, metaclass=abc.ABCMeta): - @abc.abstractmethod - def x_init(self): - """Return the initial values for x as a numpy ndarray - """ - pass - - @abc.abstractmethod - def x_lb(self): - """Return the lower bounds on x as a numpy ndarray - """ - pass - - @abc.abstractmethod - def x_ub(self): - """Return the upper bounds on x as a numpy ndarray - """ - pass - - @abc.abstractmethod - def g_lb(self): - """Return the lower bounds on the constraints as a numpy ndarray - """ - pass - - @abc.abstractmethod - def g_ub(self): - """Return the upper bounds on the constraints as a numpy ndarray - """ - pass - - @abc.abstractmethod - def scaling_factors(self): - """Return the values for scaling factors as a tuple - (objective_scaling, x_scaling, g_scaling). Return None - if the scaling factors are to be ignored - """ - pass - - @abc.abstractmethod - def objective(self, x): - """Return the value of the objective - function evaluated at x - """ - pass - - @abc.abstractmethod - def gradient(self, x): - """Return the gradient of the objective - function evaluated at x as a numpy ndarray - """ - pass - - @abc.abstractmethod - def constraints(self, x): - """Return the residuals of the constraints - evaluated at x as a numpy ndarray - """ - pass - - @abc.abstractmethod - def jacobianstructure(self): - """Return the structure of the jacobian - in coordinate format. That is, return (rows,cols) - where rows and cols are both numpy ndarray - objects that contain the row and column indices - for each of the nonzeros in the jacobian. - """ - pass - - @abc.abstractmethod - def jacobian(self, x): - """Return the values for the jacobian evaluated at x - as a numpy ndarray of nonzero values corresponding - to the rows and columns specified in the jacobianstructure - """ - pass - - @abc.abstractmethod - def hessianstructure(self): - """Return the structure of the hessian - in coordinate format. That is, return (rows,cols) - where rows and cols are both numpy ndarray - objects that contain the row and column indices - for each of the nonzeros in the hessian. - Note: return ONLY the lower diagonal of this symmetric matrix. - """ - pass - - @abc.abstractmethod - def hessian(self, x, y, obj_factor): - """Return the values for the hessian evaluated at x - as a numpy ndarray of nonzero values corresponding - to the rows and columns specified in the - hessianstructure method. - Note: return ONLY the lower diagonal of this symmetric matrix. - """ - pass - - def intermediate(self, alg_mod, iter_count, obj_value, - inf_pr, inf_du, mu, d_norm, regularization_size, - alpha_du, alpha_pr, ls_trials): - """Callback that can be used to examine or report intermediate - results. This method is called each iteration - """ - # TODO: Document the arguments - pass - - -class CyIpoptNLP(CyIpoptProblemInterface): - def __init__(self, nlp, intermediate_callback=None): - """This class provides a CyIpoptProblemInterface for use - with the CyIpoptSolver class that can take in an NLP - as long as it provides vectors as numpy ndarrays and - matrices as scipy.sparse.coo_matrix objects. This class - provides the interface between AmplNLP or PyomoNLP objects - and the CyIpoptSolver - """ - self._nlp = nlp - self._intermediate_callback = intermediate_callback - - x = nlp.init_primals() - y = nlp.init_duals() - if np.any(np.isnan(y)): - # did not get initial values for y, use this default - y.fill(1.0) - - self._cached_x = x.copy() - self._cached_y = y.copy() - self._cached_obj_factor = 1.0 - - nlp.set_primals(self._cached_x) - nlp.set_duals(self._cached_y) - - # get jacobian and hessian structures - self._jac_g = nlp.evaluate_jacobian() - try: - self._hess_lag = nlp.evaluate_hessian_lag() - self._hess_lower_mask = self._hess_lag.row >= self._hess_lag.col - self._hessian_available = True - except (AttributeError, NotImplementedError): - self._hessian_available = False - self._hess_lag = None - self._hess_lower_mask = None - - def _set_primals_if_necessary(self, x): - if not np.array_equal(x, self._cached_x): - self._nlp.set_primals(x) - self._cached_x = x.copy() - - def _set_duals_if_necessary(self, y): - if not np.array_equal(y, self._cached_y): - self._nlp.set_duals(y) - self._cached_y = y.copy() - - def _set_obj_factor_if_necessary(self, obj_factor): - if obj_factor != self._cached_obj_factor: - self._nlp.set_obj_factor(obj_factor) - self._cached_obj_factor = obj_factor - - def x_init(self): - return self._nlp.init_primals() - - def x_lb(self): - return self._nlp.primals_lb() - - def x_ub(self): - return self._nlp.primals_ub() - - def g_lb(self): - return self._nlp.constraints_lb() - - def g_ub(self): - return self._nlp.constraints_ub() - - def scaling_factors(self): - obj_scaling = self._nlp.get_obj_scaling() - x_scaling = self._nlp.get_primals_scaling() - g_scaling = self._nlp.get_constraints_scaling() - return obj_scaling, x_scaling, g_scaling - - def objective(self, x): - self._set_primals_if_necessary(x) - return self._nlp.evaluate_objective() - - def gradient(self, x): - self._set_primals_if_necessary(x) - return self._nlp.evaluate_grad_objective() - - def constraints(self, x): - self._set_primals_if_necessary(x) - return self._nlp.evaluate_constraints() - - def jacobianstructure(self): - return self._jac_g.row, self._jac_g.col - - def jacobian(self, x): - self._set_primals_if_necessary(x) - self._nlp.evaluate_jacobian(out=self._jac_g) - return self._jac_g.data - - def hessianstructure(self): - if not self._hessian_available: - return np.zeros(0), np.zeros(0) - - row = np.compress(self._hess_lower_mask, self._hess_lag.row) - col = np.compress(self._hess_lower_mask, self._hess_lag.col) - return row, col - - - def hessian(self, x, y, obj_factor): - if not self._hessian_available: - raise ValueError("Hessian requested, but not supported by the NLP") - - self._set_primals_if_necessary(x) - self._set_duals_if_necessary(y) - self._set_obj_factor_if_necessary(obj_factor) - self._nlp.evaluate_hessian_lag(out=self._hess_lag) - data = np.compress(self._hess_lower_mask, self._hess_lag.data) - return data - - def intermediate( - self, - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials - ): - if self._intermediate_callback is not None: - return self._intermediate_callback(self._nlp, alg_mod, iter_count, obj_value, - inf_pr, inf_du, mu, d_norm, regularization_size, - alpha_du, alpha_pr, ls_trials) - return True - class CyIpoptSolver(object): def __init__(self, problem_interface, options=None): @@ -407,27 +195,10 @@ def __init__(self, problem_interface, options=None): self._options = dict() def solve(self, x0=None, tee=False): - xl = self._problem.x_lb() - xu = self._problem.x_ub() - gl = self._problem.g_lb() - gu = self._problem.g_ub() - if x0 is None: x0 = self._problem.x_init() xstart = x0 - - nx = len(xstart) - ng = len(gl) - - cyipopt_solver = cyipopt.Problem( - n=nx, - m=ng, - problem_obj=self._problem, - lb=xl, - ub=xu, - cl=gl, - cu=gu - ) + cyipopt_solver = self._problem # check if we need scaling obj_scaling, x_scaling, g_scaling = self._problem.scaling_factors() @@ -458,7 +229,7 @@ def solve(self, x0=None, tee=False): # We preemptively set up the TeeStream, even if we aren't # going to use it: the implementation is such that the # context manager does nothing (i.e., doesn't start up any - # processing threads) until afer a client accesses + # processing threads) until after a client accesses # STDOUT/STDERR with TeeStream(sys.stdout) as _teeStream: if tee: @@ -479,36 +250,45 @@ def solve(self, x0=None, tee=False): def _numpy_vector(val): ans = np.array(val, np.float64) if len(ans.shape) != 1: - raise ValueError("expected a vector, but recieved a matrix " - "with shape %s" % (ans.shape,)) + raise ValueError( + "expected a vector, but received a matrix with shape %s" % (ans.shape,) + ) return ans class PyomoCyIpoptSolver(object): - CONFIG = ConfigBlock("cyipopt") - CONFIG.declare("tee", ConfigValue( - default=False, - domain=bool, - description="Stream solver output to console", - )) - CONFIG.declare("load_solutions", ConfigValue( - default=True, - domain=bool, - description="Store the final solution into the original Pyomo model", - )) - CONFIG.declare("return_nlp", ConfigValue( - default=False, - domain=bool, - description="Return the results object and the underlying nlp" - " NLP object from the solve call.", - )) + CONFIG.declare( + "tee", + ConfigValue( + default=False, domain=bool, description="Stream solver output to console" + ), + ) + CONFIG.declare( + "load_solutions", + ConfigValue( + default=True, + domain=bool, + description="Store the final solution into the original Pyomo model", + ), + ) + CONFIG.declare( + "return_nlp", + ConfigValue( + default=False, + domain=bool, + description="Return the results object and the underlying nlp" + " NLP object from the solve call.", + ), + ) CONFIG.declare("options", ConfigBlock(implicit=True)) - CONFIG.declare("intermediate_callback", ConfigValue( - default=None, - description="Set the function that will be called each" - " iteration." - )) + CONFIG.declare( + "intermediate_callback", + ConfigValue( + default=None, + description="Set the function that will be called each iteration.", + ), + ) def __init__(self, **kwds): """Create an instance of the CyIpoptSolver. You must @@ -524,50 +304,39 @@ def _set_model(self, model): self._model = model def available(self, exception_flag=False): - return bool(numpy_available and cyipopt_available) + return bool(numpy_available and cyipopt_interface.cyipopt_available) def license_is_valid(self): return True def version(self): - return tuple(int(_) for _ in cyipopt.__version__.split('.')) + return tuple(int(_) for _ in cyipopt.__version__.split(".")) def solve(self, model, **kwds): config = self.config(kwds, preserve_implicit=True) if not isinstance(model, Block): - raise ValueError("PyomoCyIpoptSolver.solve(model): model " - "must be a Pyomo Block") + raise ValueError( + "PyomoCyIpoptSolver.solve(model): model must be a Pyomo Block" + ) # If this is a Pyomo model / block, then we need to create # the appropriate PyomoNLP, then wrap it in a CyIpoptNLP - grey_box_blocks = list(model.component_data_objects( - egb.ExternalGreyBoxBlock, active=True)) + grey_box_blocks = list( + model.component_data_objects(egb.ExternalGreyBoxBlock, active=True) + ) if grey_box_blocks: # nlp = pyomo_nlp.PyomoGreyBoxNLP(model) nlp = pyomo_grey_box.PyomoNLPWithGreyBoxBlocks(model) else: nlp = pyomo_nlp.PyomoNLP(model) - problem = CyIpoptNLP(nlp, intermediate_callback=config.intermediate_callback) - - xl = problem.x_lb() - xu = problem.x_ub() - gl = problem.g_lb() - gu = problem.g_ub() - - nx = len(xl) - ng = len(gl) - - cyipopt_solver = cyipopt.Problem( - n=nx, - m=ng, - problem_obj=problem, - lb=xl, - ub=xu, - cl=gl, - cu=gu + problem = cyipopt_interface.CyIpoptNLP( + nlp, intermediate_callback=config.intermediate_callback ) + ng = len(problem.g_lb()) + nx = len(problem.x_lb()) + cyipopt_solver = problem # check if we need scaling obj_scaling, x_scaling, g_scaling = problem.scaling_factors() @@ -600,7 +369,7 @@ def solve(self, model, **kwds): # We preemptively set up the TeeStream, even if we aren't # going to use it: the implementation is such that the # context manager does nothing (i.e., doesn't start up any - # processing threads) until afer a client accesses + # processing threads) until after a client accesses # STDOUT/STDERR with TeeStream(sys.stdout) as _teeStream: if config.tee: @@ -627,31 +396,35 @@ def solve(self, model, **kwds): if config.load_solutions: nlp.set_primals(x) - nlp.set_duals(info['mult_g']) + nlp.set_duals(info["mult_g"]) nlp.load_state_into_pyomo( - bound_multipliers=(info['mult_x_L'], info['mult_x_U'])) + bound_multipliers=(info["mult_x_L"], info["mult_x_U"]) + ) else: - soln = results.solution.add() + soln = Solution() + sm = nlp.symbol_map soln.variable.update( - (i, {'Value':j, 'ipopt_zL_out': zl, 'ipopt_zU_out': zu}) - for i,j,zl,zu in zip( nlp.variable_names(), - x, - info['mult_x_L'], - info['mult_x_U'] ) + (sm.getSymbol(i), {'Value': j, 'ipopt_zL_out': zl, 'ipopt_zU_out': zu}) + for i, j, zl, zu in zip( + nlp.get_pyomo_variables(), x, info['mult_x_L'], info['mult_x_U'] + ) ) soln.constraint.update( - (i, {'Dual':j}) for i,j in zip( - nlp.constraint_names(), info['mult_g'])) - + (sm.getSymbol(i), {'Dual': j}) + for i, j in zip(nlp.get_pyomo_constraints(), info['mult_g']) + ) + model.solutions.add_symbol_map(sm) + results._smap_id = id(sm) + results.solution.insert(soln) results.problem.name = model.name obj = next(model.component_data_objects(Objective, active=True)) if obj.sense == minimize: results.problem.sense = ProblemSense.minimize - results.problem.upper_bound = info['obj_val'] + results.problem.upper_bound = info["obj_val"] else: results.problem.sense = ProblemSense.maximize - results.problem.lower_bound = info['obj_val'] + results.problem.lower_bound = info["obj_val"] results.problem.number_of_objectives = 1 results.problem.number_of_constraints = ng results.problem.number_of_variables = nx @@ -660,14 +433,17 @@ def solve(self, model, **kwds): results.problem.number_of_continuous_variables = nx # TODO: results.problem.number_of_nonzeros - results.solver.name = 'cyipopt' - results.solver.return_code = info['status'] - results.solver.message = info['status_msg'] + results.solver.name = "cyipopt" + results.solver.return_code = info["status"] + results.solver.message = info["status_msg"] results.solver.wallclock_time = wall_time - status_enum = _cyipopt_status_enum[info['status_msg']] + status_enum = _cyipopt_status_enum[info["status_msg"]] results.solver.termination_condition = _ipopt_term_cond[status_enum] results.solver.status = TerminationCondition.to_solver_status( - results.solver.termination_condition) + results.solver.termination_condition + ) + + problem.close() if config.return_nlp: return results, nlp diff --git a/pyomo/contrib/pynumero/algorithms/solvers/implicit_functions.py b/pyomo/contrib/pynumero/algorithms/solvers/implicit_functions.py new file mode 100644 index 00000000000..e0bc0170d33 --- /dev/null +++ b/pyomo/contrib/pynumero/algorithms/solvers/implicit_functions.py @@ -0,0 +1,618 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.collections import ComponentSet, ComponentMap +from pyomo.common.timing import HierarchicalTimer +from pyomo.common.dependencies import attempt_import, numpy as np +from pyomo.core.base.objective import Objective +from pyomo.core.base.suffix import Suffix +from pyomo.core.expr.visitor import identify_variables +from pyomo.util.calc_var_value import calculate_variable_from_constraint +from pyomo.util.subsystems import ( + TemporarySubsystemManager, + create_subsystem_block, + generate_subsystem_blocks, +) + +# Use attempt_import here due to unguarded NumPy import in these files +pyomo_nlp = attempt_import('pyomo.contrib.pynumero.interfaces.pyomo_nlp')[0] +nlp_proj = attempt_import('pyomo.contrib.pynumero.interfaces.nlp_projections')[0] +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptSolver +from pyomo.contrib.pynumero.interfaces.cyipopt_interface import CyIpoptNLP +from pyomo.contrib.pynumero.algorithms.solvers.scipy_solvers import ( + FsolveNlpSolver, + NewtonNlpSolver, + SecantNewtonNlpSolver, +) +from pyomo.contrib.incidence_analysis import IncidenceGraphInterface +from pyomo.contrib.incidence_analysis.scc_solver import ( + generate_strongly_connected_components, +) + + +class NlpSolverBase(object): + """A base class that solves an NLP object + + Subclasses should implement this interface for compatibility with + ImplicitFunctionSolver objects. + + """ + + def __init__(self, nlp, options=None, timer=None): + raise NotImplementedError( + "%s has not implemented the __init__ method" % type(self) + ) + + def solve(self, **kwds): + raise NotImplementedError( + "%s has not implemented the solve method" % type(self) + ) + + +class CyIpoptSolverWrapper(NlpSolverBase): + """A wrapper for CyIpoptNLP and CyIpoptSolver that implements the + NlpSolverBase API + + """ + + def __init__(self, nlp, options=None, timer=None): + self._cyipopt_nlp = CyIpoptNLP(nlp) + self._cyipopt_solver = CyIpoptSolver(self._cyipopt_nlp, options=options) + + def solve(self, **kwds): + return self._cyipopt_solver.solve(**kwds) + + +class ScipySolverWrapper(NlpSolverBase): + """A wrapper for SciPy NLP solvers that implements the NlpSolverBase API + + This solver uses scipy.optimize.fsolve for "vector-valued" NLPs (with more + than one primal variable and equality constraint) and the Secant-Newton + hybrid for "scalar-valued" NLPs. + + """ + + def __init__(self, nlp, timer=None, options=None): + if options is None: + options = {} + for key in options: + if ( + key not in SecantNewtonNlpSolver.OPTIONS + and key not in FsolveNlpSolver.OPTIONS + ): + raise ValueError( + "Option %s is invalid for both SecantNewtonNlpSolver and" + " FsolveNlpSolver" % key + ) + # Note that options currently contain the options for both solvers. + # There is currently no way to specify, e.g., different tolerances + # for the two solvers. This can be updated if there is demand for it. + newton_options = { + key: value + for key, value in options.items() + if key in SecantNewtonNlpSolver.OPTIONS + } + fsolve_options = { + key: value + for key, value in options.items() + if key in FsolveNlpSolver.OPTIONS + } + if nlp.n_primals() == 1: + solver = SecantNewtonNlpSolver(nlp, timer=timer, options=newton_options) + else: + solver = FsolveNlpSolver(nlp, timer=timer, options=fsolve_options) + self._nlp = nlp + self._options = options + self._solver = solver + + def solve(self, x0=None): + res = self._solver.solve(x0=x0) + return res + + +class PyomoImplicitFunctionBase(object): + """A base class defining an API for implicit functions defined using + Pyomo components. In particular, this is the API required by + ExternalPyomoModel. + + Implicit functions are defined by two lists of Pyomo VarData and + one list of Pyomo ConstraintData. The first list of VarData corresponds + to "variables" defining the outputs of the implicit function. + The list of ConstraintData are equality constraints that are converged + to evaluate the implicit function. The second list of VarData are + variables to be treated as "parameters" or inputs to the implicit + function. + + """ + + def __init__(self, variables, constraints, parameters): + """ + Arguments + --------- + variables: List of VarData + Variables to be treated as outputs of the implicit function + constraints: List of ConstraintData + Constraints that are converged to evaluate the implicit function + parameters: List of VarData + Variables to be treated as inputs to the implicit function + + """ + self._variables = variables + self._constraints = constraints + self._parameters = parameters + self._block_variables = variables + parameters + self._block = create_subsystem_block(constraints, self._block_variables) + + def get_variables(self): + return self._variables + + def get_constraints(self): + return self._constraints + + def get_parameters(self): + return self._parameters + + def get_block(self): + return self._block + + def set_parameters(self, values): + """Sets the parameters of the system that defines the implicit + function. + + This method does not necessarily need to update values of the Pyomo + variables, as long as the next evaluation of this implicit function + is consistent with these inputs. + + Arguments + --------- + values: NumPy array + Array of values to set for the "parameter variables" in the order + they were specified in the constructor + + """ + raise NotImplementedError() + + def evaluate_outputs(self): + """Returns the values of the variables that are treated as outputs + of the implicit function + + The returned values do not necessarily need to be the values stored + in the Pyomo variables, as long as they are consistent with the + latest parameters that have been set. + + Returns + ------- + NumPy array + Array with values corresponding to the "output variables" in + the order they were specified in the constructor + + """ + raise NotImplementedError() + + def update_pyomo_model(self): + """Sets values of "parameter variables" and "output variables" + to the most recent values set or computed in this implicit function + + """ + raise NotImplementedError() + + +class ImplicitFunctionSolver(PyomoImplicitFunctionBase): + """A basic implicit function solver that uses a ProjectedNLP to solve + the parameterized system without repeated file writes when parameters + are updated + + """ + + def __init__( + self, + variables, + constraints, + parameters, + solver_class=None, + solver_options=None, + timer=None, + ): + if timer is None: + timer = HierarchicalTimer() + self._timer = timer + if solver_options is None: + solver_options = {} + + self._timer.start("__init__") + + super().__init__(variables, constraints, parameters) + block = self.get_block() + + # PyomoNLP requires an objective + block._obj = Objective(expr=0.0) + # CyIpoptSolver requires a non-empty scaling factor + block.scaling_factor = Suffix(direction=Suffix.EXPORT) + block.scaling_factor[block._obj] = 1.0 + + self._timer.start("PyomoNLP") + self._nlp = pyomo_nlp.PyomoNLP(block) + self._timer.stop("PyomoNLP") + primals_ordering = [var.name for var in variables] + self._proj_nlp = nlp_proj.ProjectedExtendedNLP(self._nlp, primals_ordering) + + self._timer.start("NlpSolver") + if solver_class is None: + self._solver = ScipySolverWrapper( + self._proj_nlp, options=solver_options, timer=timer + ) + else: + self._solver = solver_class( + self._proj_nlp, options=solver_options, timer=timer + ) + self._timer.stop("NlpSolver") + + vars_in_cons = [] + _seen = set() + for con in constraints: + for var in identify_variables(con.expr, include_fixed=False): + if id(var) not in _seen: + _seen.add(id(var)) + vars_in_cons.append(var) + self._active_var_set = ComponentSet(vars_in_cons) + + # It is possible (and fairly common) for variables specified as + # parameters to not appear in any of the specified constraints. + # We will fail if we try to get their coordinates in the NLP. + # + # Technically, this could happen for the variables as well. However, + # this would guarantee that the Jacobian is singular. I will worry + # about this when I encounter such a case. + self._active_param_mask = np.array( + [(p in self._active_var_set) for p in parameters] + ) + self._active_parameters = [ + p for i, p in enumerate(parameters) if self._active_param_mask[i] + ] + if any((var not in self._active_var_set) for var in variables): + raise RuntimeError( + "Invalid model. All variables must appear in specified constraints." + ) + + # These are coordinates in the original NLP + self._variable_coords = self._nlp.get_primal_indices(variables) + self._active_parameter_coords = self._nlp.get_primal_indices( + self._active_parameters + ) + + # NOTE: With this array, we are storing the same data in two locations. + # Once here, and once in the NLP. We do this because parameters do not + # *need* to exist in the NLP. However, we still need to be able to + # update the "parameter variables" in the Pyomo model. If we only stored + # the parameters in the NLP, we would lose the values for parameters + # that don't appear in the active constraints. + self._parameter_values = np.array([var.value for var in parameters]) + + self._timer.start("__init__") + + def set_parameters(self, values, **kwds): + self._timer.start("set_parameters") + # I am not 100% sure the values will always be an array (as opposed to + # list), so explicitly convert here. + values = np.array(values) + self._parameter_values = values + values = np.compress(self._active_param_mask, values) + primals = self._nlp.get_primals() + # Will it cause a problem that _active_parameter_coords is a list + # rather than array? + primals[self._active_parameter_coords] = values + self._nlp.set_primals(primals) + self._timer.start("solve") + results = self._solver.solve(**kwds) + self._timer.stop("solve") + self._timer.stop("set_parameters") + return results + + def evaluate_outputs(self): + primals = self._nlp.get_primals() + outputs = primals[self._variable_coords] + return outputs + + def update_pyomo_model(self): + primals = self._nlp.get_primals() + for i, var in enumerate(self.get_variables()): + var.set_value(primals[self._variable_coords[i]], skip_validation=True) + for var, value in zip(self._parameters, self._parameter_values): + var.set_value(value, skip_validation=True) + + +class DecomposedImplicitFunctionBase(PyomoImplicitFunctionBase): + """A base class for an implicit function that applies a partition + to its variables and constraints and converges the system by solving + subsets sequentially + + Subclasses should implement the partition_system method, which + determines how variables and constraints are partitioned into subsets. + + """ + + def __init__( + self, + variables, + constraints, + parameters, + solver_class=None, + solver_options=None, + timer=None, + use_calc_var=True, + ): + if timer is None: + timer = HierarchicalTimer() + self._timer = timer + self._timer.start("__init__") + if solver_class is None: + solver_class = ScipySolverWrapper + self._solver_class = solver_class + if solver_options is None: + solver_options = {} + self._solver_options = solver_options + self._calc_var_cutoff = 1 if use_calc_var else 0 + # NOTE: This super call is only necessary so the get_* methods work + super().__init__(variables, constraints, parameters) + + subsystem_list = [ + # Switch order in list for compatibility with generate_subsystem_blocks + (cons, vars) + for vars, cons in self.partition_system(variables, constraints) + ] + + var_param_set = ComponentSet(variables + parameters) + # We will treat variables that are neither variables nor parameters + # as "constants". These are usually things like area, volume, or some + # other global parameter that is treated as a variable and "fixed" with + # an equality constraint. + constants = [] + constant_set = ComponentSet() + for con in constraints: + for var in identify_variables(con.expr, include_fixed=False): + if var not in constant_set and var not in var_param_set: + # If this is a newly encountered variable that is neither + # a var nor param, treat it as a "constant" + constant_set.add(var) + constants.append(var) + + with TemporarySubsystemManager(to_fix=constants): + # Temporarily fix "constant" variables so (a) they don't show + # up in the local inputs of the subsystem blocks and (b) so + # they don't appear as additional columns in the NLPs and + # ProjectedNLPs. + + self._subsystem_list = list(generate_subsystem_blocks(subsystem_list)) + # These are subsystems that need an external solver, rather than + # calculate_variable_from_constraint. _calc_var_cutoff should be either + # 0 or 1. + self._solver_subsystem_list = [ + (block, inputs) + for block, inputs in self._subsystem_list + if len(block.vars) > self._calc_var_cutoff + ] + + # Need a dummy objective to create an NLP + for block, inputs in self._solver_subsystem_list: + block._obj = Objective(expr=0.0) + # I need scaling_factor so Pyomo NLPs I create from these blocks + # don't break when ProjectedNLP calls get_primals_scaling + block.scaling_factor = Suffix(direction=Suffix.EXPORT) + # HACK: scaling_factor just needs to be nonempty + block.scaling_factor[block._obj] = 1.0 + + # Original PyomoNLP for each subset in the partition + # Since we are creating these NLPs with "constants" fixed, these + # variables will not show up in the NLPs + self._timer.start("PyomoNLP") + self._solver_subsystem_nlps = [ + pyomo_nlp.PyomoNLP(block) + for block, inputs in self._solver_subsystem_list + ] + self._timer.stop("PyomoNLP") + + # "Output variable" names are required to construct ProjectedNLPs. + # Ideally, we can eventually replace these with variable indices. + self._solver_subsystem_var_names = [ + [var.name for var in block.vars.values()] + for block, inputs in self._solver_subsystem_list + ] + self._solver_proj_nlps = [ + nlp_proj.ProjectedExtendedNLP(nlp, names) + for nlp, names in zip( + self._solver_subsystem_nlps, self._solver_subsystem_var_names + ) + ] + + # We will solve the ProjectedNLPs rather than the original NLPs + self._timer.start("NlpSolver") + self._nlp_solvers = [ + self._solver_class(nlp, timer=self._timer, options=self._solver_options) + for nlp in self._solver_proj_nlps + ] + self._timer.stop("NlpSolver") + self._solver_subsystem_input_coords = [ + # Coordinates in the NLP, not ProjectedNLP + nlp.get_primal_indices(inputs) + for nlp, (subsystem, inputs) in zip( + self._solver_subsystem_nlps, self._solver_subsystem_list + ) + ] + + self._n_variables = len(variables) + self._n_constraints = len(constraints) + self._n_parameters = len(parameters) + + # This is a global (wrt individual subsystems) array that stores + # the current values of variables and parameters. This is useful + # for updating values in between subsystem solves. + # + # NOTE: This could also be implemented as a tuple of + # (subsystem_coord, primal_coord), which would eliminate the need to + # store data in two locations. The current implementation is easier, + # however. + self._global_values = np.array([var.value for var in variables + parameters]) + self._global_indices = ComponentMap( + (var, i) for i, var in enumerate(variables + parameters) + ) + # Cache the global array-coordinates of each subset of "input" + # variables. These are used for updating before each solve. + self._local_input_global_coords = [ + # If I do not fix "constants" above, I get errors here + # that only show up in the CLC models. + # TODO: Add a test that covers this edge case. + np.array([self._global_indices[var] for var in inputs], dtype=int) + for (_, inputs) in self._solver_subsystem_list + ] + + # Cache the global array-coordinates of each subset of "output" + # variables. These are used for updating after each solve. + self._output_coords = [ + np.array( + [self._global_indices[var] for var in block.vars.values()], dtype=int + ) + for (block, _) in self._solver_subsystem_list + ] + + self._timer.stop("__init__") + + def n_subsystems(self): + """Returns the number of subsystems in the partition of variables + and equations used to converge the system defining the implicit + function + + """ + return len(self._subsystem_list) + + def partition_system(self, variables, constraints): + """Partitions the systems of equations defined by the provided + variables and constraints + + Each subset of the partition should have an equal number of variables + and equations. These subsets, or "subsystems", will be solved + sequentially in the order provided by this method instead of solving + the entire system simultaneously. Subclasses should implement this + method to define the partition that their implicit function solver + will use. Partitions are defined as a list of tuples of lists. + Each tuple has two entries, the first a list of variables, and the + second a list of constraints. These inner lists should have the + same number of entries. + + Arguments + --------- + variables: list + List of VarData in the system to be partitioned + constraints: list + List of ConstraintData (equality constraints) defining the + equations of the system to be partitioned + + Returns + ------- + List of tuples + List of tuples describing the ordered partition. Each tuple + contains equal-length subsets of variables and constraints. + + """ + # Subclasses should implement this method, which returns an ordered + # partition (two lists-of-lists) of variables and constraints. + raise NotImplementedError( + "%s has not implemented the partition_system method" % type(self) + ) + + def set_parameters(self, values): + self._timer.start("set_parameters") + values = np.array(values) + # + # Set parameter values + # + # NOTE: Here I rely on the fact that the "global array" is in the + # order (variables, parameters) + self._global_values[self._n_variables :] = values + + # + # Solve subsystems one-by-one + # + # The basic procedure is: update local information from the global + # array, solve the subsystem, then update the global array with + # new values. + solver_subsystem_idx = 0 + for block, inputs in self._subsystem_list: + if len(block.vars) <= self._calc_var_cutoff: + # Update model values from global array. + for var in inputs: + idx = self._global_indices[var] + var.set_value(self._global_values[idx], skip_validation=True) + # Solve using calculate_variable_from_constraint + var = block.vars[0] + con = block.cons[0] + self._timer.start("solve") + self._timer.start("calc_var") + calculate_variable_from_constraint(var, con) + self._timer.stop("calc_var") + self._timer.stop("solve") + # Update global array with values from solve + self._global_values[self._global_indices[var]] = var.value + else: + # Transfer variable values into the projected NLP, solve, + # and extract values. + + i = solver_subsystem_idx + nlp = self._solver_subsystem_nlps[i] + proj_nlp = self._solver_proj_nlps[i] + input_coords = self._solver_subsystem_input_coords[i] + input_global_coords = self._local_input_global_coords[i] + output_global_coords = self._output_coords[i] + + nlp_solver = self._nlp_solvers[solver_subsystem_idx] + + # Get primals, load potentially new input values into primals, + # then load primals into NLP + primals = nlp.get_primals() + primals[input_coords] = self._global_values[input_global_coords] + + # Set primals in the original NLP. This is necessary so the + # parameters get updated. + nlp.set_primals(primals) + + # Get initial guess in the space of variables we solve for + x0 = proj_nlp.get_primals() + self._timer.start("solve") + self._timer.start("solve_nlp") + nlp_solver.solve(x0=x0) + self._timer.stop("solve_nlp") + self._timer.stop("solve") + + # Set values in global array. Here we rely on the fact that + # the projected NLP's primals are in the order that variables + # were initially specified. + self._global_values[output_global_coords] = proj_nlp.get_primals() + + solver_subsystem_idx += 1 + + self._timer.stop("set_parameters") + + def evaluate_outputs(self): + return self._global_values[: self._n_variables] + + def update_pyomo_model(self): + # NOTE: Here we rely on the fact that global_values is in the + # order (variables, parameters) + for i, var in enumerate(self.get_variables() + self.get_parameters()): + var.set_value(self._global_values[i], skip_validation=True) + + +class SccImplicitFunctionSolver(DecomposedImplicitFunctionBase): + def partition_system(self, variables, constraints): + self._timer.start("partition") + igraph = IncidenceGraphInterface() + var_blocks, con_blocks = igraph.block_triangularize(variables, constraints) + self._timer.stop("partition") + return zip(var_blocks, con_blocks) diff --git a/pyomo/contrib/pynumero/algorithms/solvers/pyomo_ext_cyipopt.py b/pyomo/contrib/pynumero/algorithms/solvers/pyomo_ext_cyipopt.py index 9918cfcbb66..b234d2f0890 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/pyomo_ext_cyipopt.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/pyomo_ext_cyipopt.py @@ -10,7 +10,9 @@ # ___________________________________________________________________________ import numpy as np import abc -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptProblemInterface +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( + CyIpoptProblemInterface, +) from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP from pyomo.contrib.pynumero.sparse.block_vector import BlockVector from pyomo.environ import Var, Constraint, value @@ -42,11 +44,13 @@ variables """ + class ExternalInputOutputModel(object, metaclass=abc.ABCMeta): """ This is the base class for building external input output models for use with Pyomo and CyIpopt """ + def __init__(self): pass @@ -55,7 +59,7 @@ def set_inputs(self, input_values): """ This method is called by the solver to set the current values for the input variables. The derived class must cache these if - necessary for any subsequent calls to evalute_outputs or + necessary for any subsequent calls to evaluate_outputs or evaluate_derivatives. """ pass @@ -81,9 +85,17 @@ def evaluate_derivatives(self): # ToDo: Hessians not yet handled + class PyomoExternalCyIpoptProblem(CyIpoptProblemInterface): - def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, - outputs_eqn_scaling=None, nl_file_options=None): + def __init__( + self, + pyomo_model, + ex_input_output_model, + inputs, + outputs, + outputs_eqn_scaling=None, + nl_file_options=None, + ): """ Create an instance of this class to pass as a problem to CyIpopt. @@ -98,8 +110,8 @@ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, the methods to compute the outputs and the derivatives. inputs : list of Pyomo variables (_VarData) - The Pyomo model needs to have variables to represent the inputs to the - external model. This is the list of those input variables in the order + The Pyomo model needs to have variables to represent the inputs to the + external model. This is the list of those input variables in the order that corresponds to the input_values vector provided in the set_inputs call. outputs : list of Pyomo variables (_VarData) @@ -119,30 +131,38 @@ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, self._inputs = [v for v in inputs] for v in self._inputs: if not isinstance(v, _VarData): - raise RuntimeError('Argument inputs passed to PyomoExternalCyIpoptProblem must be' - ' a list of VarData objects. Note: if you have an indexed variable, pass' - ' each index as a separate entry in the list (e.g., inputs=[m.x[1], m.x[2]]).') + raise RuntimeError( + 'Argument inputs passed to PyomoExternalCyIpoptProblem must be' + ' a list of VarData objects. Note: if you have an indexed variable, pass' + ' each index as a separate entry in the list (e.g., inputs=[m.x[1], m.x[2]]).' + ) self._outputs = [v for v in outputs] for v in self._outputs: if not isinstance(v, _VarData): - raise RuntimeError('Argument outputs passed to PyomoExternalCyIpoptProblem must be' - ' a list of VarData objects. Note: if you have an indexed variable, pass' - ' each index as a separate entry in the list (e.g., inputs=[m.x[1], m.x[2]]).') + raise RuntimeError( + 'Argument outputs passed to PyomoExternalCyIpoptProblem must be' + ' a list of VarData objects. Note: if you have an indexed variable, pass' + ' each index as a separate entry in the list (e.g., inputs=[m.x[1], m.x[2]]).' + ) # we need to add a dummy variable and constraint to the pyomo_nlp # to make sure it does not remove variables that do not # appear in the pyomo part of the model - also ensure unique name in case model # is used in more than one instance of this class # ToDo: Improve this by convincing Pyomo not to remove the inputs and outputs - dummy_var_name = unique_component_name(self._pyomo_model, '_dummy_variable_CyIpoptPyomoExNLP') + dummy_var_name = unique_component_name( + self._pyomo_model, '_dummy_variable_CyIpoptPyomoExNLP' + ) dummy_var = Var() setattr(self._pyomo_model, dummy_var_name, dummy_var) - dummy_con_name = unique_component_name(self._pyomo_model, '_dummy_constraint_CyIpoptPyomoExNLP') + dummy_con_name = unique_component_name( + self._pyomo_model, '_dummy_constraint_CyIpoptPyomoExNLP' + ) dummy_con = Constraint( - expr = getattr(self._pyomo_model, dummy_var_name) == \ - sum(v for v in self._inputs) + sum(v for v in self._outputs) - ) + expr=getattr(self._pyomo_model, dummy_var_name) + == sum(v for v in self._inputs) + sum(v for v in self._outputs) + ) setattr(self._pyomo_model, dummy_con_name, dummy_con) # initialize the dummy var to the right hand side @@ -157,7 +177,7 @@ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, # make an nlp interface from the pyomo model self._pyomo_nlp = PyomoNLP(self._pyomo_model, nl_file_options) - + # create initial value vectors for primals and duals init_primals = self._pyomo_nlp.init_primals() init_duals_pyomo = self._pyomo_nlp.init_duals() @@ -172,12 +192,12 @@ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, # build the map from inputs and outputs to the full x vector self._input_columns = self._pyomo_nlp.get_primal_indices(self._inputs) - #self._input_x_mask = np.zeros(self._pyomo_nlp.n_primals(), dtype=np.float64) - #self._input_x_mask[self._input_columns] = 1.0 + # self._input_x_mask = np.zeros(self._pyomo_nlp.n_primals(), dtype=np.float64) + # self._input_x_mask[self._input_columns] = 1.0 self._output_columns = self._pyomo_nlp.get_primal_indices(self._outputs) - #self._output_x_mask = np.zeros(self._pyomo_nlp.n_primals(), dtype=np.float64) - #self._output_x_mask[self._output_columns] = 1.0 - + # self._output_x_mask = np.zeros(self._pyomo_nlp.n_primals(), dtype=np.float64) + # self._output_x_mask[self._output_columns] = 1.0 + # create caches for primals and duals self._cached_primals = init_primals.copy() self._cached_duals = init_duals.clone(copy=True) @@ -207,13 +227,16 @@ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, # outputs_eqn_scaling if pyomo_constraints_scaling is not None or outputs_eqn_scaling is not None: if pyomo_constraints_scaling is None: - pyomo_constraints_scaling = np.ones(self._pyomo_nlp.n_primals(), dtype=np.float64) + pyomo_constraints_scaling = np.ones( + self._pyomo_nlp.n_primals(), dtype=np.float64 + ) if outputs_eqn_scaling is None: outputs_eqn_scaling = np.ones(len(self._outputs), dtype=np.float64) if type(outputs_eqn_scaling) is list: outputs_eqn_scaling = np.asarray(outputs_eqn_scaling, dtype=np.float64) - self._constraints_scaling = np.concatenate((pyomo_constraints_scaling, - outputs_eqn_scaling)) + self._constraints_scaling = np.concatenate( + (pyomo_constraints_scaling, outputs_eqn_scaling) + ) ### setup the jacobian structures self._jac_pyomo = self._pyomo_nlp.evaluate_jacobian() @@ -230,7 +253,7 @@ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, jac_ex_irows = np.copy(jac_ex.row) jac_ex_irows += ex_start_row jac_ex_jcols = np.copy(jac_ex.col) - for z,col in enumerate(jac_ex_jcols): + for z, col in enumerate(jac_ex_jcols): jac_ex_jcols[z] = self._input_columns[col] jac_ex_data = np.copy(jac_ex.data) @@ -247,16 +270,26 @@ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs, # add the jac for output variables from the extra equations for i in range(len(self._outputs)): - jac_ex_output_irows.append(ex_start_row + i) - jac_ex_output_jcols.append(self._output_columns[i]) - jac_ex_output_data.append(-1.0) - - self._full_jac_irows = np.concatenate((self._jac_pyomo.row, jac_ex_irows, jac_ex_output_irows)) - self._full_jac_jcols = np.concatenate((self._jac_pyomo.col, jac_ex_jcols, jac_ex_output_jcols)) - self._full_jac_data = np.concatenate((self._jac_pyomo.data, jac_ex_data, jac_ex_output_data)) + jac_ex_output_irows.append(ex_start_row + i) + jac_ex_output_jcols.append(self._output_columns[i]) + jac_ex_output_data.append(-1.0) + + self._full_jac_irows = np.concatenate( + (self._jac_pyomo.row, jac_ex_irows, jac_ex_output_irows) + ) + self._full_jac_jcols = np.concatenate( + (self._jac_pyomo.col, jac_ex_jcols, jac_ex_output_jcols) + ) + self._full_jac_data = np.concatenate( + (self._jac_pyomo.data, jac_ex_data, jac_ex_output_data) + ) # currently, this interface does not do anything with Hessians + # Call CyIpoptProblemInterface.__init__, which calls + # cyipopt.Problem.__init__ + super(PyomoExternalCyIpoptProblem, self).__init__() + def load_x_into_pyomo(self, primals): """ Use this method to load a numpy array of values into the corresponding @@ -270,7 +303,7 @@ def load_x_into_pyomo(self, primals): internally. """ pyomo_variables = self._pyomo_nlp.get_pyomo_variables() - for i,v in enumerate(primals): + for i, v in enumerate(primals): pyomo_variables[i].set_value(v) def _set_primals_if_necessary(self, primals): @@ -295,7 +328,7 @@ def x_init(self): def x_lb(self): return self._pyomo_nlp.primals_lb() - + def x_ub(self): return self._pyomo_nlp.primals_ub() @@ -320,7 +353,9 @@ def constraints(self, primals): self._set_primals_if_necessary(primals) pyomo_constraints = self._pyomo_nlp.evaluate_constraints() ex_io_outputs = self._ex_io_model.evaluate_outputs() - ex_io_constraints = ex_io_outputs - self._ex_io_outputs_from_full_primals(primals) + ex_io_constraints = ex_io_outputs - self._ex_io_outputs_from_full_primals( + primals + ) constraints = BlockVector(2) constraints.set_block(0, pyomo_constraints) constraints.set_block(1, ex_io_constraints) @@ -328,15 +363,17 @@ def constraints(self, primals): def jacobianstructure(self): return self._full_jac_irows, self._full_jac_jcols - + def jacobian(self, primals): self._set_primals_if_necessary(primals) self._pyomo_nlp.evaluate_jacobian(out=self._jac_pyomo) pyomo_data = self._jac_pyomo.data ex_io_deriv = self._ex_io_model.evaluate_derivatives() # CDL: dense version: ex_io_deriv = self._ex_io_model.evaluate_derivatives().flatten('C') - self._full_jac_data[0:len(pyomo_data)] = pyomo_data - self._full_jac_data[len(pyomo_data):len(pyomo_data)+len(ex_io_deriv.data)] = ex_io_deriv.data + self._full_jac_data[0 : len(pyomo_data)] = pyomo_data + self._full_jac_data[ + len(pyomo_data) : len(pyomo_data) + len(ex_io_deriv.data) + ] = ex_io_deriv.data # CDL: dense version: self._full_jac_data[len(pyomo_data):len(pyomo_data)+len(ex_io_deriv)] = ex_io_deriv # the -1s for the output variables should still be here @@ -344,19 +381,15 @@ def jacobian(self, primals): def hessianstructure(self): return np.zeros(0), np.zeros(0) - #raise NotImplementedError('No Hessians for now') + # raise NotImplementedError('No Hessians for now') def hessian(self, x, y, obj_factor): raise NotImplementedError('No Hessians for now') def _ex_io_inputs_from_full_primals(self, primals): return primals[self._input_columns] - #return np.compress(self._input_x_mask, primals) + # return np.compress(self._input_x_mask, primals) def _ex_io_outputs_from_full_primals(self, primals): return primals[self._output_columns] - #return np.compress(self._output_x_mask, primals) - - - - + # return np.compress(self._output_x_mask, primals) diff --git a/pyomo/contrib/pynumero/algorithms/solvers/scipy_solvers.py b/pyomo/contrib/pynumero/algorithms/solvers/scipy_solvers.py new file mode 100644 index 00000000000..53f657c984f --- /dev/null +++ b/pyomo/contrib/pynumero/algorithms/solvers/scipy_solvers.py @@ -0,0 +1,505 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from collections import namedtuple +from pyomo.core.base.objective import Objective +from pyomo.common.timing import HierarchicalTimer +from pyomo.common.modeling import unique_component_name +from pyomo.common.config import ConfigBlock, ConfigValue, In +from pyomo.contrib.pynumero.algorithms.solvers.square_solver_base import ( + DenseSquareNlpSolver, + ScalarDenseSquareNlpSolver, +) +from pyomo.opt import SolverResults, TerminationCondition +from pyomo.common.dependencies import ( + attempt_import, + numpy as np, + numpy_available, + scipy as sp, + scipy_available, +) + +# Use attempt_import here so that we can register the solver even if SciPy is +# not available. +pyomo_nlp, _ = attempt_import("pyomo.contrib.pynumero.interfaces.pyomo_nlp") + + +class FsolveNlpSolver(DenseSquareNlpSolver): + OPTIONS = DenseSquareNlpSolver.OPTIONS( + description="Options for SciPy fsolve wrapper" + ) + OPTIONS.declare( + "xtol", + ConfigValue( + default=1e-8, + domain=float, + description="Tolerance for convergence of variable vector", + ), + ) + OPTIONS.declare( + "maxfev", + ConfigValue( + default=100, + domain=int, + description="Maximum number of function evaluations per solve", + ), + ) + OPTIONS.declare( + "tol", + ConfigValue( + default=None, + domain=float, + description="Tolerance for convergence of function residual", + ), + ) + OPTIONS.declare("full_output", ConfigValue(default=True, domain=bool)) + + def solve(self, x0=None): + if x0 is None: + x0 = self._nlp.get_primals() + + res = sp.optimize.fsolve( + self.evaluate_function, + x0, + fprime=self.evaluate_jacobian, + full_output=self.options.full_output, + xtol=self.options.xtol, + maxfev=self.options.maxfev, + ) + if self.options.full_output: + x, info, ier, msg = res + else: + x, ier, msg = res + + # + # fsolve converges with a tolerance specified on the variable + # vector x. We may also want to enforce a tolerance on function + # value, which we check here. + # + if self.options.tol is not None: + if self.options.full_output: + fcn_val = info["fvec"] + else: + fcn_val = self.evaluate_function(x) + if not np.all(np.abs(fcn_val) <= self.options.tol): + raise RuntimeError( + "fsolve converged to a solution that does not satisfy the" + " function tolerance 'tol' of %s." + " You may need to relax the 'tol' option or tighten the" + " 'xtol' option (currently 'xtol' is %s)." + % (self.options.tol, self.options.xtol) + ) + + return res + + +class RootNlpSolver(DenseSquareNlpSolver): + OPTIONS = DenseSquareNlpSolver.OPTIONS( + description="Options for SciPy fsolve wrapper" + ) + OPTIONS.declare( + "tol", + ConfigValue(default=1e-8, domain=float, description="Convergence tolerance"), + ) + OPTIONS.declare( + "method", + ConfigValue( + default="hybr", + domain=In({"hybr", "lm"}), + description="Method used to solve for the function root", + doc=( + """The 'method' argument in the scipy.optimize.root function. + For now only 'hybr' (Powell hybrid method from MINPACK) and + 'lm' (Levenberg-Marquardt from MINPACK) are supported. + """ + ), + ), + ) + + def solve(self, x0=None): + if x0 is None: + x0 = self._nlp.get_primals() + + results = sp.optimize.root( + self.evaluate_function, + x0, + jac=self.evaluate_jacobian, + tol=self.options.tol, + method=self.options.method, + ) + return results + + +class NewtonNlpSolver(ScalarDenseSquareNlpSolver): + """A wrapper around the SciPy scalar Newton solver for NLP objects""" + + OPTIONS = ScalarDenseSquareNlpSolver.OPTIONS( + description="Options for SciPy newton wrapper" + ) + OPTIONS.declare( + "tol", + ConfigValue(default=1e-8, domain=float, description="Convergence tolerance"), + ) + OPTIONS.declare( + "secant", + ConfigValue( + default=False, + domain=bool, + description="Whether to use SciPy's secant method", + ), + ) + OPTIONS.declare( + "full_output", + ConfigValue( + default=True, + domain=bool, + description="Whether underlying solver should return its full output", + ), + ) + OPTIONS.declare( + "maxiter", + ConfigValue( + default=50, + domain=int, + description="Maximum number of function evaluations per solve", + ), + ) + + def solve(self, x0=None): + if x0 is None: + x0 = self._nlp.get_primals() + + if self.options.secant: + fprime = None + else: + fprime = lambda x: self.evaluate_jacobian(np.array([x]))[0, 0] + results = sp.optimize.newton( + lambda x: self.evaluate_function(np.array([x]))[0], + x0[0], + fprime=fprime, + tol=self.options.tol, + full_output=self.options.full_output, + maxiter=self.options.maxiter, + ) + return results + + +class SecantNewtonNlpSolver(NewtonNlpSolver): + """A wrapper around the SciPy scalar Newton solver for NLP objects + that takes a specified number of secant iterations (default is 2) to + try to converge a linear equation quickly then switches to Newton's + method if this is not successful. This strategy is inspired by + calculate_variable_from_constraint in pyomo.util.calc_var_value. + + """ + + OPTIONS = ConfigBlock(description="Options for the SciPy Newton-secant hybrid") + OPTIONS.declare_from(NewtonNlpSolver.OPTIONS, skip={"maxiter", "secant"}) + OPTIONS.declare( + "secant_iter", + ConfigValue( + default=2, + domain=int, + description=( + "Number of secant iterations to perform before switching" + " to Newton's method." + ), + ), + ) + OPTIONS.declare( + "newton_iter", + ConfigValue( + default=50, + domain=int, + description="Maximum iterations for the Newton solve", + ), + ) + + def __init__(self, nlp, timer=None, options=None): + super().__init__(nlp, timer=timer, options=options) + self.converged_with_secant = None + + def solve(self, x0=None): + if x0 is None: + x0 = self._nlp.get_primals() + + try: + results = sp.optimize.newton( + lambda x: self.evaluate_function(np.array([x]))[0], + x0[0], + fprime=None, + tol=self.options.tol, + maxiter=self.options.secant_iter, + full_output=self.options.full_output, + ) + self.converged_with_secant = True + except RuntimeError: + self.converged_with_secant = False + x0 = self._nlp.get_primals() + results = sp.optimize.newton( + lambda x: self.evaluate_function(np.array([x]))[0], + x0[0], + fprime=lambda x: self.evaluate_jacobian(np.array([x]))[0, 0], + tol=self.options.tol, + maxiter=self.options.newton_iter, + full_output=self.options.full_output, + ) + return results + + +class PyomoScipySolver(object): + def __init__(self, options=None): + if options is None: + options = {} + self._nlp = None + self._nlp_solver = None + self._full_output = None + self.options = options + + def available(self, exception_flag=False): + return bool(numpy_available and scipy_available) + + def license_is_valid(self): + return True + + def version(self): + return tuple(int(_) for _ in sp.__version__.split('.')) + + def set_options(self, options): + self.options = options + + def solve(self, model, timer=None, tee=False): + """ + Parameters + ---------- + model: BlockData + The model that will be solved + timer: HierarchicalTimer + A HierarchicalTimer that "sub-timers" created by this object + will be attached to. If not provided, a new timer is created. + tee: Bool + A dummy flag indicating whether solver output should be displayed. + The current SciPy solvers supported have no output, so setting this + flag does not do anything. + + Returns + ------- + SolverResults + Contains the results of the solve + + """ + if timer is None: + timer = HierarchicalTimer() + self._timer = timer + self._timer.start("solve") + active_objs = list(model.component_data_objects(Objective, active=True)) + if len(active_objs) == 0: + obj_name = unique_component_name(model, "_obj") + obj = Objective(expr=0.0) + model.add_component(obj_name, obj) + + nlp = pyomo_nlp.PyomoNLP(model) + self._nlp = nlp + + if len(active_objs) == 0: + model.del_component(obj_name) + + # Call to solve(nlp) + self._nlp_solver = self.create_nlp_solver(options=self.options) + x0 = nlp.get_primals() + results = self._nlp_solver.solve(x0=x0) + + # Transfer values back to Pyomo model + for var, val in zip(nlp.get_pyomo_variables(), nlp.get_primals()): + var.set_value(val) + + self._timer.stop("solve") + + # Translate results into a Pyomo-compatible results structure + pyomo_results = self.get_pyomo_results(model, results) + + return pyomo_results + + def get_nlp(self): + return self._nlp + + def create_nlp_solver(self, **kwds): + raise NotImplementedError( + "%s has not implemented the create_nlp_solver method" % self.__class__ + ) + + def get_pyomo_results(self, model, scipy_results): + raise NotImplementedError( + "%s has not implemented the get_results method" % self.__class__ + ) + + # + # Support "with" statements. + # + def __enter__(self): + return self + + def __exit__(self, t, v, traceback): + pass + + +class PyomoFsolveSolver(PyomoScipySolver): + # Note that scipy.optimize.fsolve does not return a + # scipy.optimize.OptimizeResult object (as of SciPy 1.9.3). + # To assess convergence, we must check the integer flag "ier" + # that is the third (or second if full_output=False) entry + # of the returned tuple. This dict maps documented "ier" values + # to Pyomo termination conditions. + _term_cond = {1: TerminationCondition.feasible} + + def create_nlp_solver(self, **kwds): + nlp = self.get_nlp() + solver = FsolveNlpSolver(nlp, **kwds) + return solver + + def get_pyomo_results(self, model, scipy_results): + nlp = self.get_nlp() + if self._nlp_solver.options.full_output: + x, info, ier, msg = scipy_results + else: + x, ier, msg = scipy_results + results = SolverResults() + + # Record problem data + results.problem.name = model.name + results.problem.number_of_constraints = nlp.n_eq_constraints() + results.problem.number_of_variables = nlp.n_primals() + results.problem.number_of_binary_variables = 0 + results.problem.number_of_integer_variables = 0 + results.problem.number_of_continuous_variables = nlp.n_primals() + + # Record solver data + results.solver.name = "scipy.fsolve" + results.solver.return_code = ier + results.solver.message = msg + results.solver.wallclock_time = self._timer.timers["solve"].total_time + results.solver.termination_condition = self._term_cond.get( + ier, TerminationCondition.error + ) + results.solver.status = TerminationCondition.to_solver_status( + results.solver.termination_condition + ) + if self._nlp_solver.options.full_output: + results.solver.number_of_function_evaluations = info["nfev"] + results.solver.number_of_gradient_evaluations = info["njev"] + return results + + +class PyomoRootSolver(PyomoScipySolver): + def create_nlp_solver(self, **kwds): + nlp = self.get_nlp() + solver = RootNlpSolver(nlp, **kwds) + return solver + + def get_pyomo_results(self, model, scipy_results): + nlp = self.get_nlp() + results = SolverResults() + + # Record problem data + results.problem.name = model.name + results.problem.number_of_constraints = nlp.n_eq_constraints() + results.problem.number_of_variables = nlp.n_primals() + results.problem.number_of_binary_variables = 0 + results.problem.number_of_integer_variables = 0 + results.problem.number_of_continuous_variables = nlp.n_primals() + + # Record solver data + results.solver.name = "scipy.root" + results.solver.return_code = scipy_results.status + results.solver.message = scipy_results.message + results.solver.wallclock_time = self._timer.timers["solve"].total_time + + # Check the "success" field of the scipy results object as status + # appears to be different between solvers (i.e. "hybrid" vs "lm") + # and not well documented as of SciPy 1.9.3 + if scipy_results.success: + results.solver.termination_condition = TerminationCondition.feasible + else: + results.solver.termination_condition = TerminationCondition.error + + results.solver.status = TerminationCondition.to_solver_status( + results.solver.termination_condition + ) + # This attribute is in the SciPy documentation but appears not to + # be implemented for "hybr" or "lm" solvers... + # results.solver.number_of_iterations = scipy_results.nit + results.solver.number_of_function_evaluations = scipy_results.nfev + results.solver.number_of_gradient_evaluations = scipy_results.njev + + return results + + +class PyomoNewtonSolver(PyomoScipySolver): + _solver_name = "scipy.newton" + + def create_nlp_solver(self, **kwds): + nlp = self.get_nlp() + solver = NewtonNlpSolver(nlp, **kwds) + return solver + + def get_pyomo_results(self, model, scipy_results): + nlp = self.get_nlp() + results = SolverResults() + + if self._nlp_solver.options.full_output: + root, res = scipy_results + else: + root = scipy_results + + # Record problem data + results.problem.name = model.name + results.problem.number_of_constraints = nlp.n_eq_constraints() + results.problem.number_of_variables = nlp.n_primals() + results.problem.number_of_binary_variables = 0 + results.problem.number_of_integer_variables = 0 + results.problem.number_of_continuous_variables = nlp.n_primals() + + # Record solver data + results.solver.name = self._solver_name + + results.solver.wallclock_time = self._timer.timers["solve"].total_time + + if self._nlp_solver.options.full_output: + # We only have access to any of this information if the solver was + # requested to return its full output. + + # For this solver, res.flag is a string. + # If successful, it is 'converged' + results.solver.message = res.flag + + if res.converged: + term_cond = TerminationCondition.feasible + else: + term_cond = TerminationCondition.Error + results.solver.termination_condition = term_cond + results.solver.status = TerminationCondition.to_solver_status( + results.solver.termination_condition + ) + + results.solver.number_of_function_evaluations = res.function_calls + return results + + +class PyomoSecantNewtonSolver(PyomoNewtonSolver): + _solver_name = "scipy.secant-newton" + + def converged_with_secant(self): + return self._nlp_solver.converged_with_secant + + def create_nlp_solver(self, **kwds): + nlp = self.get_nlp() + solver = SecantNewtonNlpSolver(nlp, **kwds) + return solver diff --git a/pyomo/contrib/pynumero/algorithms/solvers/square_solver_base.py b/pyomo/contrib/pynumero/algorithms/solvers/square_solver_base.py new file mode 100644 index 00000000000..c4a33d97611 --- /dev/null +++ b/pyomo/contrib/pynumero/algorithms/solvers/square_solver_base.py @@ -0,0 +1,114 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from collections import namedtuple +from pyomo.common.timing import HierarchicalTimer +from pyomo.common.config import ConfigBlock +from pyomo.util.subsystems import create_subsystem_block + + +class SquareNlpSolverBase(object): + """A base class for NLP solvers that act on a square system + of equality constraints. + + """ + + # Ideally, this ConfigBlock would contain options that are valid for any + # square NLP solver. However, no such options seem to exist while + # preserving the names of the SciPy function arguments. E.g., tolerance + # is "tol" in some solvers and "xtol" in others, and some solvers + # support "maxiter" while others support "maxfev". It may be useful to + # attempt some standardization by, e.g., mapping tol->xtol, then + # specifying "universal" options here, but this can happen at a later + # date as these solvers see more use. + OPTIONS = ConfigBlock() + + def __init__(self, nlp, timer=None, options=None): + """ + Arguments + --------- + nlp: ExtendedNLP + An instance of ExtendedNLP that will be solved. + ExtendedNLP is required to ensure that the NLP has equal + numbers of primal variables and equality constraints. + + """ + if timer is None: + timer = HierarchicalTimer() + if options is None: + options = {} + self.options = self.OPTIONS(options) + + self._timer = timer + self._nlp = nlp + self._function_values = None + self._jacobian = None + + if self._nlp.n_eq_constraints() != self._nlp.n_primals(): + raise RuntimeError( + "Cannot construct a square solver for an NLP that" + " does not have the same numbers of variables as" + " equality constraints. Got %s variables and %s" + " equalities." % (self._nlp.n_primals(), self._nlp.n_eq_constraints()) + ) + # Checking for a square system of equalities is easy, but checking + # bounds is a little difficult. We don't know how an NLP will + # implement bounds (no bound could be None, np.nan, or np.inf), + # so it is annoying to check that bounds are not present. + # Instead, we just ignore bounds, and the user must know that the + # result of this solver is not guaranteed to respect bounds. + # While it is easier to check that inequalities are absent, + # for consistency, we take the same approach and simply ignore + # them. + + def solve(self, x0=None): + # the NLP has a natural initial guess - the cached primal + # values. x0 may be provided if a different initial guess + # is desired. + raise NotImplementedError( + "%s has not implemented the solve method" % self.__class__ + ) + + def evaluate_function(self, x0): + # NOTE: NLP object should handle any caching + self._nlp.set_primals(x0) + values = self._nlp.evaluate_eq_constraints() + return values + + def evaluate_jacobian(self, x0): + # NOTE: NLP object should handle any caching + self._nlp.set_primals(x0) + self._jacobian = self._nlp.evaluate_jacobian_eq(out=self._jacobian) + return self._jacobian + + +class DenseSquareNlpSolver(SquareNlpSolverBase): + """A square NLP solver that uses a dense Jacobian""" + + def evaluate_jacobian(self, x0): + sparse_jac = super().evaluate_jacobian(x0) + dense_jac = sparse_jac.toarray() + return dense_jac + + +class ScalarDenseSquareNlpSolver(DenseSquareNlpSolver): + # A base class for solvers for scalar equations. + # Not intended to be instantiated directly. Instead, + # NewtonNlpSolver or SecantNewtonNlpSolver should be used. + + def __init__(self, nlp, timer=None, options=None): + super().__init__(nlp, timer=timer, options=options) + if nlp.n_primals() != 1: + raise RuntimeError( + "Cannot use the scipy.optimize.newton solver on an NLP with" + " more than one variable and equality constraint. Got %s" + " primals. Please use RootNlpSolver or FsolveNlpSolver instead." + ) diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py index 90d1e8b9772..119c4604f19 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py @@ -13,7 +13,10 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -21,19 +24,20 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): raise unittest.SkipTest( - "Pynumero needs the ASL extension to run CyIpoptSolver tests") + "Pynumero needs the ASL extension to run CyIpoptSolver tests" + ) from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available -) +from pyomo.contrib.pynumero.interfaces.cyipopt_interface import cyipopt_available + if not cyipopt_available: raise unittest.SkipTest("Pynumero needs cyipopt to run CyIpoptSolver tests") -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptNLP +from pyomo.contrib.pynumero.interfaces.cyipopt_interface import CyIpoptNLP def create_model1(): @@ -46,8 +50,8 @@ def create_model1(): m.x[2].setlb(0.0) return m -class TestCyIpoptNLP(unittest.TestCase): +class TestCyIpoptNLP(unittest.TestCase): def test_model1_CyIpoptNLP(self): model = create_model1() nlp = PyomoNLP(model) @@ -58,7 +62,7 @@ def test_model1_CyIpoptNLP_scaling(self): m = create_model1() m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.o] = 1e-6 # scale the objective + m.scaling_factor[m.o] = 1e-6 # scale the objective m.scaling_factor[m.c] = 2.0 # scale the equality constraint m.scaling_factor[m.d] = 3.0 # scale the inequality constraint m.scaling_factor[m.x[1]] = 4.0 # scale one of the x variables @@ -80,7 +84,7 @@ def test_model1_CyIpoptNLP_scaling(self): m = create_model1() m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - #m.scaling_factor[m.o] = 1e-6 # scale the objective + # m.scaling_factor[m.o] = 1e-6 # scale the objective m.scaling_factor[m.c] = 2.0 # scale the equality constraint m.scaling_factor[m.d] = 3.0 # scale the inequality constraint m.scaling_factor[m.x[1]] = 4.0 # scale the x variable @@ -102,10 +106,10 @@ def test_model1_CyIpoptNLP_scaling(self): m = create_model1() m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.o] = 1e-6 # scale the objective + m.scaling_factor[m.o] = 1e-6 # scale the objective m.scaling_factor[m.c] = 2.0 # scale the equality constraint m.scaling_factor[m.d] = 3.0 # scale the inequality constraint - #m.scaling_factor[m.x] = 4.0 # scale the x variable + # m.scaling_factor[m.x] = 4.0 # scale the x variable cynlp = CyIpoptNLP(PyomoNLP(m)) obj_scaling, x_scaling, g_scaling = cynlp.scaling_factors() @@ -124,8 +128,8 @@ def test_model1_CyIpoptNLP_scaling(self): m = create_model1() m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.o] = 1e-6 # scale the objective - #m.scaling_factor[m.c] = 2.0 # scale the equality constraint + m.scaling_factor[m.o] = 1e-6 # scale the objective + # m.scaling_factor[m.c] = 2.0 # scale the equality constraint m.scaling_factor[m.d] = 3.0 # scale the inequality constraint m.scaling_factor[m.x[1]] = 4.0 # scale the x variable @@ -144,18 +148,18 @@ def test_model1_CyIpoptNLP_scaling(self): # test missing all m = create_model1() - #m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - #m.scaling_factor[m.o] = 1e-6 # scale the objective - #m.scaling_factor[m.c] = 2.0 # scale the equality constraint - #m.scaling_factor[m.d] = 3.0 # scale the inequality constraint - #m.scaling_factor[m.x] = 4.0 # scale the x variable + # m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + # m.scaling_factor[m.o] = 1e-6 # scale the objective + # m.scaling_factor[m.c] = 2.0 # scale the equality constraint + # m.scaling_factor[m.d] = 3.0 # scale the inequality constraint + # m.scaling_factor[m.x] = 4.0 # scale the x variable cynlp = CyIpoptNLP(PyomoNLP(m)) obj_scaling, x_scaling, g_scaling = cynlp.scaling_factors() self.assertTrue(obj_scaling is None) self.assertTrue(x_scaling is None) self.assertTrue(g_scaling is None) - + def _check_model1(self, nlp, cynlp): # test x_init expected_xinit = np.asarray([4.0, 4.0, 4.0], dtype=np.float64) @@ -205,14 +209,16 @@ def _check_model1(self, nlp, cynlp): # test constraints expected = np.asarray([20, -5], dtype=np.float64) constraints = cynlp.constraints(x) - self.assertTrue(np.allclose(expected, constraints)) - + self.assertTrue(np.allclose(expected, constraints)) + # test jacobian - expected = np.asarray([[8.0, 0, 1.0],[0.0, 8.0, 1.0]]) + expected = np.asarray([[8.0, 0, 1.0], [0.0, 8.0, 1.0]]) spexpected = spa.coo_matrix(expected).todense() rows, cols = cynlp.jacobianstructure() values = cynlp.jacobian(x) - jac = spa.coo_matrix((values, (rows,cols)), shape=(len(constraints), len(x))).todense() + jac = spa.coo_matrix( + (values, (rows, cols)), shape=(len(constraints), len(x)) + ).todense() self.assertTrue(np.allclose(spexpected, jac)) # test hessian @@ -220,6 +226,11 @@ def _check_model1(self, nlp, cynlp): y.fill(1.0) rows, cols = cynlp.hessianstructure() values = cynlp.hessian(x, y, obj_factor=1.0) - hess_lower = spa.coo_matrix((values, (rows,cols)), shape=(len(x), len(x))).todense() - expected_hess_lower = np.asarray([[-286.0, 0.0, 0.0], [0.0, 4.0, 0.0], [-144.0, 0.0, 192.0]], dtype=np.float64) + hess_lower = spa.coo_matrix( + (values, (rows, cols)), shape=(len(x), len(x)) + ).todense() + expected_hess_lower = np.asarray( + [[-286.0, 0.0, 0.0], [0.0, 4.0, 0.0], [-144.0, 0.0, 192.0]], + dtype=np.float64, + ) self.assertTrue(np.allclose(expected_hess_lower, hess_lower)) diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py index 094bb7d9238..2a7edb430d4 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py @@ -14,7 +14,10 @@ import os from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -22,21 +25,20 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): raise unittest.SkipTest( - "Pynumero needs the ASL extension to run CyIpoptSolver tests") + "Pynumero needs the ASL extension to run CyIpoptSolver tests" + ) from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available +from pyomo.contrib.pynumero.interfaces.cyipopt_interface import ( + cyipopt_available, + CyIpoptNLP, ) -if not cyipopt_available: - raise unittest.SkipTest("Pynumero needs cyipopt to run CyIpoptSolver tests") -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - CyIpoptSolver, CyIpoptNLP -) +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptSolver def create_model1(): @@ -72,13 +74,18 @@ def create_model3(G, A, b, c): model.con_ids = range(nl) model.x = pyo.Var(model.var_ids, initialize=0.0) - model.hessian_f = pyo.Param(model.var_ids, model.var_ids, mutable=True, rule=lambda m, i, j: G[i, j]) - model.jacobian_c = pyo.Param(model.con_ids, model.var_ids, mutable=True, rule=lambda m, i, j: A[i, j]) + model.hessian_f = pyo.Param( + model.var_ids, model.var_ids, mutable=True, rule=lambda m, i, j: G[i, j] + ) + model.jacobian_c = pyo.Param( + model.con_ids, model.var_ids, mutable=True, rule=lambda m, i, j: A[i, j] + ) model.rhs = pyo.Param(model.con_ids, mutable=True, rule=lambda m, i: b[i]) model.grad_f = pyo.Param(model.var_ids, mutable=True, rule=lambda m, i: c[i]) def equality_constraint_rule(m, i): return sum(m.jacobian_c[i, j] * m.x[j] for j in m.var_ids) == m.rhs[i] + model.equalities = pyo.Constraint(model.con_ids, rule=equality_constraint_rule) def objective_rule(m): @@ -93,6 +100,7 @@ def objective_rule(m): return model + def create_model4(): m = pyo.ConcreteModel() m.x = pyo.Var([1, 2], initialize=1.0) @@ -108,7 +116,11 @@ def create_model6(): model.x = pyo.Var(model.S, initialize=1.0) def f(model): - return model.x[1] ** 4 + (model.x[1] + model.x[2]) ** 2 + (-1.0 + pyo.exp(model.x[2])) ** 2 + return ( + model.x[1] ** 4 + + (model.x[1] + model.x[2]) ** 2 + + (-1.0 + pyo.exp(model.x[2])) ** 2 + ) model.f = pyo.Objective(rule=f) return model @@ -120,16 +132,19 @@ def create_model9(): p = 71 wght = -0.1 - hp2 = 0.5 * p ** 2 + hp2 = 0.5 * p**2 model.x = pyo.Var(pyo.RangeSet(1, p), pyo.RangeSet(1, p), initialize=0.0) def f(model): - return sum(0.5 * (model.x[i, j] - model.x[i, j - 1]) ** 2 + \ - 0.5 * (model.x[i, j] - model.x[i - 1, j]) ** 2 + \ - hp2 * (model.x[i, j] - model.x[i, j - 1]) ** 4 + \ - hp2 * (model.x[i, j] - model.x[i - 1, j]) ** 4 \ - for i in range(2, p + 1) for j in range(2, p + 1)) + (wght * model.x[p, p]) + return sum( + 0.5 * (model.x[i, j] - model.x[i, j - 1]) ** 2 + + 0.5 * (model.x[i, j] - model.x[i - 1, j]) ** 2 + + hp2 * (model.x[i, j] - model.x[i, j - 1]) ** 4 + + hp2 * (model.x[i, j] - model.x[i - 1, j]) ** 4 + for i in range(2, p + 1) + for j in range(2, p + 1) + ) + (wght * model.x[p, p]) model.f = pyo.Objective(rule=f) @@ -140,8 +155,18 @@ def f(model): return model -class TestCyIpoptSolver(unittest.TestCase): +@unittest.skipIf(cyipopt_available, "cyipopt is available") +class TestCyIpoptNotAvailable(unittest.TestCase): + def test_not_available_exception(self): + model = create_model1() + nlp = PyomoNLP(model) + msg = "cyipopt is required" + with self.assertRaisesRegex(RuntimeError, msg): + solver = CyIpoptSolver(CyIpoptNLP(nlp)) + +@unittest.skipUnless(cyipopt_available, "cyipopt is not available") +class TestCyIpoptSolver(unittest.TestCase): def test_model1(self): model = create_model1() nlp = PyomoNLP(model) @@ -158,21 +183,24 @@ def test_model1(self): def test_model1_with_scaling(self): m = create_model1() m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.o] = 1e-6 # scale the objective + m.scaling_factor[m.o] = 1e-6 # scale the objective m.scaling_factor[m.c] = 2.0 # scale the equality constraint m.scaling_factor[m.d] = 3.0 # scale the inequality constraint m.scaling_factor[m.x[1]] = 4.0 # scale one of the x variables cynlp = CyIpoptNLP(PyomoNLP(m)) - options={'nlp_scaling_method': 'user-scaling', - 'output_file': '_cyipopt-scaling.log', - 'file_print_level':10, - 'max_iter': 0} + options = { + 'nlp_scaling_method': 'user-scaling', + 'output_file': '_cyipopt-scaling.log', + 'file_print_level': 10, + 'max_iter': 0, + } solver = CyIpoptSolver(cynlp, options=options) x, info = solver.solve() with open('_cyipopt-scaling.log', 'r') as fd: solver_trace = fd.read() + cynlp.close() os.remove('_cyipopt-scaling.log') # check for the following strings in the log and then delete the log @@ -215,7 +243,7 @@ def test_model3(self): solver = CyIpoptSolver(CyIpoptNLP(nlp)) x, info = solver.solve(tee=False) x_sol = np.array([2.0, -1.0, 1.0]) - y_sol = np.array([-3., 2.]) + y_sol = np.array([-3.0, 2.0]) self.assertTrue(np.allclose(x, x_sol, rtol=1e-4)) nlp.set_primals(x) nlp.set_duals(y_sol) @@ -228,4 +256,4 @@ def test_options(self): solver = CyIpoptSolver(CyIpoptNLP(nlp), options={'max_iter': 1}) x, info = solver.solve(tee=False) nlp.set_primals(x) - self.assertAlmostEqual(nlp.evaluate_objective(), -5.0879028e+02, places=5) + self.assertAlmostEqual(nlp.evaluate_objective(), -5.0879028e02, places=5) diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_implicit_functions.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_implicit_functions.py new file mode 100644 index 00000000000..04d4ed321f1 --- /dev/null +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_implicit_functions.py @@ -0,0 +1,364 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import itertools +import pyomo.common.unittest as unittest +import pyomo.environ as pyo +from pyomo.common.dependencies import ( + scipy, + scipy_available, + numpy as np, + numpy_available, + networkx_available, +) +from pyomo.contrib.pynumero.asl import AmplInterface +from pyomo.contrib.pynumero.algorithms.solvers.implicit_functions import ( + CyIpoptSolverWrapper, + ImplicitFunctionSolver, + DecomposedImplicitFunctionBase, + SccImplicitFunctionSolver, +) +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import cyipopt_available + +if not scipy_available or not numpy_available: + # SciPy is only really necessary as it is a dependency of AmplInterface. + # NumPy is directly used by the implicit function solvers. + raise unittest.SkipTest( + "NumPy and SciPy are needed to test the implicit function solvers" + ) +if not AmplInterface.available(): + # AmplInterface is not theoretically necessary for these solvers, + # however it is the only AD backend implemented for PyomoNLP. + raise unittest.SkipTest( + "PyNumero ASL extension is necessary to test implicit function solvers" + ) + + +class ImplicitFunction1(object): + def __init__(self): + self._model = self._make_model() + + def _make_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.J = pyo.Set(initialize=[1, 2]) + m.x = pyo.Var(m.I, initialize=1.0) + m.p = pyo.Var(m.J, initialize=1.0) + # Note that this system of constraints decomposes. First con1 + # and con3 are used to solve for x[2] and x[3], then con2 + # is used to solve for x[1] + m.con1 = pyo.Constraint(expr=m.x[2] ** 2 + m.x[3] ** 2 == m.p[1]) + m.con2 = pyo.Constraint( + expr=2 * m.x[1] + 3 * m.x[2] - 4 * m.x[3] == m.p[1] ** 2 - m.p[2] + ) + m.con3 = pyo.Constraint(expr=m.p[2] ** 1.5 == 2 * pyo.exp(m.x[2] / m.x[3])) + m.obj = pyo.Objective(expr=0.0) + return m + + def get_parameters(self): + m = self._model + return [m.p[1], m.p[2]] + + def get_variables(self): + m = self._model + return [m.x[1], m.x[2], m.x[3]] + + def get_equations(self): + m = self._model + return [m.con1, m.con2, m.con3] + + def get_input_output_sequence(self): + p1_inputs = [1.0, 2.0, 3.0] + p2_inputs = [1.0, 2.0, 3.0] + inputs = list(itertools.product(p1_inputs, p2_inputs)) + + outputs = [ + # Outputs computed by solving system with Ipopt + (2.498253, -0.569676, 0.821869), + (0.898530, 0.327465, 0.944863), + (-0.589294, 0.690561, 0.723274), + (5.033063, -0.805644, 1.162299), + (2.977820, 0.463105, 1.336239), + (1.080826, 0.976601, 1.022864), + (8.327101, -0.986708, 1.423519), + (5.922325, 0.567186, 1.636551), + (3.711364, 1.196087, 1.252747), + ] + # We will iterate over these input/output pairs, set inputs, + # solve, and check outputs. Note that these values are computed + # with Ipopt, and the default solver for the system defining the + # implicit function is scipy.optimize.fsolve. There is no guarantee + # that these algorithms converge to the same solution for the + # highly nonlinear system defining this implicit function. If some + # of these tests start failing (e.g. because one of these algorithms + # changes), some of these inputs may need to be omitted. + return list(zip(inputs, outputs)) + + +class ImplicitFunctionWithExtraVariables(ImplicitFunction1): + """This is the same system as ImplicitFunction1, but now some + of the hand-coded constants have been replaced by unfixed variables. + These variables will be completely ignored and treated as constants + by the implicit functions. + + """ + + def _make_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.J = pyo.Set(initialize=[1, 2]) + m.K = pyo.Set(initialize=[1, 2, 3]) + m.x = pyo.Var(m.I, initialize=1.0) + m.p = pyo.Var(m.J, initialize=1.0) + + # These variables will be treated as neither outputs nor + # inputs. They are simply treated as constants. + m.const = pyo.Var(m.K, initialize=1.0) + m.const[1].set_value(1.0) + m.const[2].set_value(2.0) + m.const[3].set_value(1.5) + + m.con1 = pyo.Constraint(expr=m.const[1] * m.x[2] ** 2 + m.x[3] ** 2 == m.p[1]) + m.con2 = pyo.Constraint( + expr=m.const[2] * m.x[1] + 3 * m.x[2] - 4 * m.x[3] == m.p[1] ** 2 - m.p[2] + ) + m.con3 = pyo.Constraint( + expr=m.p[2] ** m.const[3] == 2 * pyo.exp(m.x[2] / m.x[3]) + ) + m.obj = pyo.Objective(expr=0.0) + return m + + +class ImplicitFunctionInputsDontAppear(object): + """This is an implicit function designed to test the edge case + where inputs do not appear in the system defining the implicit + function (i.e. the function is constant). + + """ + + def __init__(self): + self._model = self._make_model() + + def _make_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.J = pyo.Set(initialize=[1, 2]) + m.x = pyo.Var(m.I, initialize=1.0) + m.p = pyo.Var(m.J, initialize=1.0) + m.con1 = pyo.Constraint(expr=m.x[2] ** 2 + m.x[3] ** 2 == 1.0) + m.con2 = pyo.Constraint(expr=2 * m.x[1] + 3 * m.x[2] - 4 * m.x[3] == 0.0) + m.con3 = pyo.Constraint(expr=1.0 == 2 * pyo.exp(m.x[2] / m.x[3])) + m.obj = pyo.Objective(expr=0.0) + return m + + def get_parameters(self): + m = self._model + return [m.p[1], m.p[2]] + + def get_variables(self): + m = self._model + return [m.x[1], m.x[2], m.x[3]] + + def get_equations(self): + m = self._model + return [m.con1, m.con2, m.con3] + + def get_input_output_sequence(self): + # As the implicit function is constant, these parameter + # values don't matter + p1_inputs = [-1.0, 0.0] + p2_inputs = [1.0] + inputs = list(itertools.product(p1_inputs, p2_inputs)) + + outputs = [ + # Outputs computed by solving system with Ipopt + (2.498253, -0.569676, 0.821869), + (2.498253, -0.569676, 0.821869), + ] + return list(zip(inputs, outputs)) + + +class ImplicitFunctionNoInputs(ImplicitFunctionInputsDontAppear): + """The same system as with inputs that don't appear, but now the + inputs are not provided to the implicit function solver + + """ + + def get_parameters(self): + return [] + + def get_input_output_sequence(self): + inputs = [()] + outputs = [ + # Outputs computed by solving system with Ipopt + (2.498253, -0.569676, 0.821869) + ] + return list(zip(inputs, outputs)) + + +class _TestSolver(unittest.TestCase): + """A suite of basic tests for implicit function solvers. + + A "concrete" subclass should be defined for each implicit function + solver. This subclass should implement get_solver_class, then + add "test" methods that call the following methods: + + _test_implicit_function_1 + _test_implicit_function_inputs_dont_appear + _test_implicit_function_no_inputs + _test_implicit_function_with_extra_variables + + These methods are private so they don't get picked up on the base + class by pytest. + + """ + + def get_solver_class(self): + raise NotImplementedError() + + def _test_implicit_function(self, ImplicitFunctionClass, **kwds): + SolverClass = self.get_solver_class() + fcn = ImplicitFunctionClass() + variables = fcn.get_variables() + parameters = fcn.get_parameters() + equations = fcn.get_equations() + + solver = SolverClass(variables, equations, parameters, **kwds) + + for inputs, pred_outputs in fcn.get_input_output_sequence(): + solver.set_parameters(inputs) + outputs = solver.evaluate_outputs() + self.assertStructuredAlmostEqual( + list(outputs), list(pred_outputs), reltol=1e-5, abstol=1e-5 + ) + + solver.update_pyomo_model() + for i, var in enumerate(variables): + self.assertAlmostEqual(var.value, pred_outputs[i], delta=1e-5) + + def _test_implicit_function_1(self, **kwds): + self._test_implicit_function(ImplicitFunction1, **kwds) + + def _test_implicit_function_inputs_dont_appear(self): + self._test_implicit_function(ImplicitFunctionInputsDontAppear) + + def _test_implicit_function_no_inputs(self): + self._test_implicit_function(ImplicitFunctionNoInputs) + + def _test_implicit_function_with_extra_variables(self): + self._test_implicit_function(ImplicitFunctionWithExtraVariables) + + +class TestImplicitFunctionSolver(_TestSolver): + def get_solver_class(self): + return ImplicitFunctionSolver + + def test_bad_option(self): + msg = "Option.*is invalid" + with self.assertRaisesRegex(ValueError, msg): + self._test_implicit_function_1(solver_options=dict(bad_option=None)) + + def test_implicit_function_1(self): + self._test_implicit_function_1() + + @unittest.skipUnless(cyipopt_available, "CyIpopt is not available") + def test_implicit_function_1_with_cyipopt(self): + self._test_implicit_function_1(solver_class=CyIpoptSolverWrapper) + + def test_implicit_function_inputs_dont_appear(self): + self._test_implicit_function_inputs_dont_appear() + + def test_implicit_function_no_inputs(self): + self._test_implicit_function_no_inputs() + + def test_implicit_function_with_extra_variables(self): + self._test_implicit_function_with_extra_variables() + + +@unittest.skipUnless(networkx_available, "NetworkX is not available") +class TestSccImplicitFunctionSolver(_TestSolver): + def get_solver_class(self): + return SccImplicitFunctionSolver + + def test_partition_not_implemented(self): + fcn = ImplicitFunction1() + variables = fcn.get_variables() + parameters = fcn.get_parameters() + equations = fcn.get_equations() + msg = "has not implemented" + with self.assertRaisesRegex(NotImplementedError, msg): + solver = DecomposedImplicitFunctionBase(variables, equations, parameters) + + def test_n_subsystems(self): + SolverClass = self.get_solver_class() + fcn = ImplicitFunction1() + variables = fcn.get_variables() + parameters = fcn.get_parameters() + equations = fcn.get_equations() + solver = SolverClass(variables, equations, parameters) + + # Assert that the system decomposes into two subsystems. + self.assertEqual(solver.n_subsystems(), 2) + + def test_implicit_function_1(self): + self._test_implicit_function_1() + + @unittest.skipUnless(cyipopt_available, "CyIpopt is not available") + def test_implicit_function_1_with_cyipopt(self): + self._test_implicit_function_1(solver_class=CyIpoptSolverWrapper) + + def test_implicit_function_1_no_calc_var(self): + self._test_implicit_function_1( + use_calc_var=False, solver_options={"maxfev": 20} + ) + + def test_implicit_function_inputs_dont_appear(self): + self._test_implicit_function_inputs_dont_appear() + + def test_implicit_function_no_inputs(self): + self._test_implicit_function_no_inputs() + + def test_implicit_function_with_extra_variables(self): + self._test_implicit_function_with_extra_variables() + + +def _solve_with_ipopt(): + from pyomo.util.subsystems import TemporarySubsystemManager + + ipopt = pyo.SolverFactory("ipopt") + fcn = ImplicitFunctionInputsDontAppear() + m = fcn._model + params = fcn.get_parameters() + variables = fcn.get_variables() + input_list = [] + output_list = [] + error_list = [] + for (p1, p2), _ in fcn.get_input_output_sequence(): + params[0].set_value(p1) + params[1].set_value(p2) + input_list.append((p1, p2)) + with TemporarySubsystemManager(to_fix=params): + try: + res = ipopt.solve(m, tee=True) + pyo.assert_optimal_termination(res) + error_list.append(False) + except (ValueError, AssertionError, RuntimeError): + error_list.append(True) + output_list.append(tuple(var.value for var in variables)) + for i, (inputs, outputs) in enumerate(zip(input_list, output_list)): + print(inputs, outputs, error_list[i]) + for outputs in output_list: + print("(%1.6f, %1.6f, %1.6f)," % outputs) + + +if __name__ == "__main__": + # unittest.main() + _solve_with_ipopt() diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py index 6eb276c473d..82a37873d5f 100644 --- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py @@ -14,7 +14,10 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -22,17 +25,21 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): raise unittest.SkipTest( - "Pynumero needs the ASL extension to run CyIpoptSolver tests") + "Pynumero needs the ASL extension to run CyIpoptSolver tests" + ) + +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import cyipopt_available -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available -) if not cyipopt_available: raise unittest.SkipTest("Pynumero needs cyipopt to run CyIpoptSolver tests") -from pyomo.contrib.pynumero.algorithms.solvers.pyomo_ext_cyipopt import ExternalInputOutputModel, PyomoExternalCyIpoptProblem +from pyomo.contrib.pynumero.algorithms.solvers.pyomo_ext_cyipopt import ( + ExternalInputOutputModel, + PyomoExternalCyIpoptProblem, +) from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptSolver @@ -51,19 +58,20 @@ def set_inputs(self, input_values): self._F = input_values[3] def evaluate_outputs(self): - P1 = self._Pin - self._c1*self._F**2 - P2 = P1 - self._c2*self._F**2 + P1 = self._Pin - self._c1 * self._F**2 + P2 = P1 - self._c2 * self._F**2 return np.asarray([P1, P2], dtype=np.float64) def evaluate_derivatives(self): - jac = [[1, -self._F**2, 0, -2*self._c1*self._F], - [1, -self._F**2, -self._F**2, -2*self._F*(self._c1 + self._c2)]] + jac = [ + [1, -self._F**2, 0, -2 * self._c1 * self._F], + [1, -self._F**2, -self._F**2, -2 * self._F * (self._c1 + self._c2)], + ] jac = np.asarray(jac, dtype=np.float64) return spa.coo_matrix(jac) class TestExternalInputOutputModel(unittest.TestCase): - def test_interface(self): # weird, this is really a test of the test class above # but we could add code later, so... @@ -79,35 +87,43 @@ def test_interface(self): def test_pyomo_external_model(self): m = pyo.ConcreteModel() - m.Pin = pyo.Var(initialize=100, bounds=(0,None)) - m.c1 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.c2 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.F = pyo.Var(initialize=10, bounds=(0,None)) + m.Pin = pyo.Var(initialize=100, bounds=(0, None)) + m.c1 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.c2 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.F = pyo.Var(initialize=10, bounds=(0, None)) m.P1 = pyo.Var() m.P2 = pyo.Var() - m.F_con = pyo.Constraint(expr = m.F == 10) - m.Pin_con = pyo.Constraint(expr = m.Pin == 100) + m.F_con = pyo.Constraint(expr=m.F == 10) + m.Pin_con = pyo.Constraint(expr=m.Pin == 100) # simple parameter estimation test - m.obj = pyo.Objective(expr= (m.P1 - 90)**2 + (m.P2 - 40)**2) + m.obj = pyo.Objective(expr=(m.P1 - 90) ** 2 + (m.P2 - 40) ** 2) - cyipopt_problem = \ - PyomoExternalCyIpoptProblem(m, - PressureDropModel(), - [m.Pin, m.c1, m.c2, m.F], - [m.P1, m.P2] - ) + cyipopt_problem = PyomoExternalCyIpoptProblem( + m, PressureDropModel(), [m.Pin, m.c1, m.c2, m.F], [m.P1, m.P2] + ) # check that the dummy variable is initialized - expected_dummy_var_value = pyo.value(m.Pin) + pyo.value(m.c1) + pyo.value(m.c2) + pyo.value(m.F) \ - + 0 + 0 - # + pyo.value(m.P1) + pyo.value(m.P2) # not initialized - therefore should use zero - self.assertAlmostEqual(pyo.value(m._dummy_variable_CyIpoptPyomoExNLP), expected_dummy_var_value) + # + pyo.value(m.P1) + pyo.value(m.P2) + # not initialized - therefore should use zero + expected_dummy_var_value = ( + pyo.value(m.Pin) + + pyo.value(m.c1) + + pyo.value(m.c2) + + pyo.value(m.F) + + 0 + + 0 + ) + self.assertAlmostEqual( + pyo.value(m._dummy_variable_CyIpoptPyomoExNLP), expected_dummy_var_value + ) # solve the problem - solver = CyIpoptSolver(cyipopt_problem, {'hessian_approximation':'limited-memory'}) + solver = CyIpoptSolver( + cyipopt_problem, {'hessian_approximation': 'limited-memory'} + ) x, info = solver.solve(tee=False) cyipopt_problem.load_x_into_pyomo(x) self.assertAlmostEqual(pyo.value(m.c1), 0.1, places=5) @@ -115,52 +131,55 @@ def test_pyomo_external_model(self): def test_pyomo_external_model_scaling(self): m = pyo.ConcreteModel() - m.Pin = pyo.Var(initialize=100, bounds=(0,None)) - m.c1 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.c2 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.F = pyo.Var(initialize=10, bounds=(0,None)) + m.Pin = pyo.Var(initialize=100, bounds=(0, None)) + m.c1 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.c2 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.F = pyo.Var(initialize=10, bounds=(0, None)) m.P1 = pyo.Var() m.P2 = pyo.Var() - m.F_con = pyo.Constraint(expr = m.F == 10) - m.Pin_con = pyo.Constraint(expr = m.Pin == 100) + m.F_con = pyo.Constraint(expr=m.F == 10) + m.Pin_con = pyo.Constraint(expr=m.Pin == 100) # simple parameter estimation test - m.obj = pyo.Objective(expr= (m.P1 - 90)**2 + (m.P2 - 40)**2) + m.obj = pyo.Objective(expr=(m.P1 - 90) ** 2 + (m.P2 - 40) ** 2) # set scaling parameters for the pyomo variables and constraints m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.Pin] = 2.0 # scale the variable - m.scaling_factor[m.c1] = 3.0 # scale the variable - m.scaling_factor[m.c2] = 4.0 # scale the variable - m.scaling_factor[m.F] = 5.0 # scale the variable - m.scaling_factor[m.P1] = 6.0 # scale the variable - m.scaling_factor[m.P2] = 7.0 # scale the variable - m.scaling_factor[m.F_con] = 8.0 # scale the pyomo constraint - m.scaling_factor[m.Pin_con] = 9.0 # scale the pyomo constraint - - cyipopt_problem = \ - PyomoExternalCyIpoptProblem(pyomo_model=m, - ex_input_output_model=PressureDropModel(), - inputs=[m.Pin, m.c1, m.c2, m.F], - outputs=[m.P1, m.P2], - outputs_eqn_scaling=[10.0, 11.0], - nl_file_options={'file_determinism': 3}, - ) + m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.Pin] = 2.0 # scale the variable + m.scaling_factor[m.c1] = 3.0 # scale the variable + m.scaling_factor[m.c2] = 4.0 # scale the variable + m.scaling_factor[m.F] = 5.0 # scale the variable + m.scaling_factor[m.P1] = 6.0 # scale the variable + m.scaling_factor[m.P2] = 7.0 # scale the variable + m.scaling_factor[m.F_con] = 8.0 # scale the pyomo constraint + m.scaling_factor[m.Pin_con] = 9.0 # scale the pyomo constraint + + cyipopt_problem = PyomoExternalCyIpoptProblem( + pyomo_model=m, + ex_input_output_model=PressureDropModel(), + inputs=[m.Pin, m.c1, m.c2, m.F], + outputs=[m.P1, m.P2], + outputs_eqn_scaling=[10.0, 11.0], + nl_file_options={'file_determinism': 2}, + ) # solve the problem - options={'hessian_approximation':'limited-memory', - 'nlp_scaling_method': 'user-scaling', - 'output_file': '_cyipopt-pyomo-ext-scaling.log', - 'file_print_level':10, - 'max_iter': 0} + options = { + 'hessian_approximation': 'limited-memory', + 'nlp_scaling_method': 'user-scaling', + 'output_file': '_cyipopt-pyomo-ext-scaling.log', + 'file_print_level': 10, + 'max_iter': 0, + } solver = CyIpoptSolver(cyipopt_problem, options=options) x, info = solver.solve(tee=False) with open('_cyipopt-pyomo-ext-scaling.log', 'r') as fd: solver_trace = fd.read() + cyipopt_problem.close() os.remove('_cyipopt-pyomo-ext-scaling.log') self.assertIn('nlp_scaling_method = user-scaling', solver_trace) @@ -186,58 +205,62 @@ def test_pyomo_external_model_scaling(self): def test_pyomo_external_model_ndarray_scaling(self): m = pyo.ConcreteModel() - m.Pin = pyo.Var(initialize=100, bounds=(0,None)) - m.c1 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.c2 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.F = pyo.Var(initialize=10, bounds=(0,None)) + m.Pin = pyo.Var(initialize=100, bounds=(0, None)) + m.c1 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.c2 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.F = pyo.Var(initialize=10, bounds=(0, None)) m.P1 = pyo.Var() m.P2 = pyo.Var() - m.F_con = pyo.Constraint(expr = m.F == 10) - m.Pin_con = pyo.Constraint(expr = m.Pin == 100) + m.F_con = pyo.Constraint(expr=m.F == 10) + m.Pin_con = pyo.Constraint(expr=m.Pin == 100) # simple parameter estimation test - m.obj = pyo.Objective(expr= (m.P1 - 90)**2 + (m.P2 - 40)**2) + m.obj = pyo.Objective(expr=(m.P1 - 90) ** 2 + (m.P2 - 40) ** 2) # set scaling parameters for the pyomo variables and constraints m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.Pin] = 2.0 # scale the variable - m.scaling_factor[m.c1] = 3.0 # scale the variable - m.scaling_factor[m.c2] = 4.0 # scale the variable - m.scaling_factor[m.F] = 5.0 # scale the variable - m.scaling_factor[m.P1] = 6.0 # scale the variable - m.scaling_factor[m.P2] = 7.0 # scale the variable - m.scaling_factor[m.F_con] = 8.0 # scale the pyomo constraint - m.scaling_factor[m.Pin_con] = 9.0 # scale the pyomo constraint + m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.Pin] = 2.0 # scale the variable + m.scaling_factor[m.c1] = 3.0 # scale the variable + m.scaling_factor[m.c2] = 4.0 # scale the variable + m.scaling_factor[m.F] = 5.0 # scale the variable + m.scaling_factor[m.P1] = 6.0 # scale the variable + m.scaling_factor[m.P2] = 7.0 # scale the variable + m.scaling_factor[m.F_con] = 8.0 # scale the pyomo constraint + m.scaling_factor[m.Pin_con] = 9.0 # scale the pyomo constraint # test that this all works with ndarray input as well - cyipopt_problem = \ - PyomoExternalCyIpoptProblem( - pyomo_model=m, - ex_input_output_model=PressureDropModel(), - inputs=[m.Pin, m.c1, m.c2, m.F], - outputs=[m.P1, m.P2], - outputs_eqn_scaling=np.asarray([10.0, 11.0], dtype=np.float64), - nl_file_options={'file_determinism': 3}, - ) + cyipopt_problem = PyomoExternalCyIpoptProblem( + pyomo_model=m, + ex_input_output_model=PressureDropModel(), + inputs=[m.Pin, m.c1, m.c2, m.F], + outputs=[m.P1, m.P2], + outputs_eqn_scaling=np.asarray([10.0, 11.0], dtype=np.float64), + nl_file_options={'file_determinism': 2}, + ) # solve the problem - options={'hessian_approximation':'limited-memory', - 'nlp_scaling_method': 'user-scaling', - 'output_file': '_cyipopt-pyomo-ext-scaling-ndarray.log', - 'file_print_level':10, - 'max_iter': 0} + options = { + 'hessian_approximation': 'limited-memory', + 'nlp_scaling_method': 'user-scaling', + 'output_file': '_cyipopt-pyomo-ext-scaling-ndarray.log', + 'file_print_level': 10, + 'max_iter': 0, + } solver = CyIpoptSolver(cyipopt_problem, options=options) x, info = solver.solve(tee=False) with open('_cyipopt-pyomo-ext-scaling-ndarray.log', 'r') as fd: solver_trace = fd.read() + cyipopt_problem.close() os.remove('_cyipopt-pyomo-ext-scaling-ndarray.log') self.assertIn('nlp_scaling_method = user-scaling', solver_trace) - self.assertIn('output_file = _cyipopt-pyomo-ext-scaling-ndarray.log', solver_trace) + self.assertIn( + 'output_file = _cyipopt-pyomo-ext-scaling-ndarray.log', solver_trace + ) self.assertIn('objective scaling factor = 0.1', solver_trace) self.assertIn('x scaling provided', solver_trace) self.assertIn('c scaling provided', solver_trace) @@ -259,39 +282,51 @@ def test_pyomo_external_model_ndarray_scaling(self): def test_pyomo_external_model_dummy_var_initialization(self): m = pyo.ConcreteModel() - m.Pin = pyo.Var(initialize=100, bounds=(0,None)) - m.c1 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.c2 = pyo.Var(initialize=1.0, bounds=(0,None)) - m.F = pyo.Var(initialize=10, bounds=(0,None)) + m.Pin = pyo.Var(initialize=100, bounds=(0, None)) + m.c1 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.c2 = pyo.Var(initialize=1.0, bounds=(0, None)) + m.F = pyo.Var(initialize=10, bounds=(0, None)) m.P1 = pyo.Var(initialize=75.0) m.P2 = pyo.Var(initialize=50.0) - m.F_con = pyo.Constraint(expr = m.F == 10) - m.Pin_con = pyo.Constraint(expr = m.Pin == 100) + m.F_con = pyo.Constraint(expr=m.F == 10) + m.Pin_con = pyo.Constraint(expr=m.Pin == 100) # simple parameter estimation test - m.obj = pyo.Objective(expr= (m.P1 - 90)**2 + (m.P2 - 40)**2) + m.obj = pyo.Objective(expr=(m.P1 - 90) ** 2 + (m.P2 - 40) ** 2) - cyipopt_problem = \ - PyomoExternalCyIpoptProblem(m, - PressureDropModel(), - [m.Pin, m.c1, m.c2, m.F], - [m.P1, m.P2] - ) + cyipopt_problem = PyomoExternalCyIpoptProblem( + m, PressureDropModel(), [m.Pin, m.c1, m.c2, m.F], [m.P1, m.P2] + ) # check that the dummy variable is initialized - expected_dummy_var_value = pyo.value(m.Pin) + pyo.value(m.c1) + pyo.value(m.c2) + pyo.value(m.F) \ - + pyo.value(m.P1) + pyo.value(m.P2) - self.assertAlmostEqual(pyo.value(m._dummy_variable_CyIpoptPyomoExNLP), expected_dummy_var_value) + expected_dummy_var_value = ( + pyo.value(m.Pin) + + pyo.value(m.c1) + + pyo.value(m.c2) + + pyo.value(m.F) + + pyo.value(m.P1) + + pyo.value(m.P2) + ) + self.assertAlmostEqual( + pyo.value(m._dummy_variable_CyIpoptPyomoExNLP), expected_dummy_var_value + ) # check that the dummy constraint is satisfied - self.assertAlmostEqual(pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.body),pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.lower)) - self.assertAlmostEqual(pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.body),pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.upper)) + self.assertAlmostEqual( + pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.body), + pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.lower), + ) + self.assertAlmostEqual( + pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.body), + pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.upper), + ) # solve the problem - solver = CyIpoptSolver(cyipopt_problem, {'hessian_approximation':'limited-memory'}) + solver = CyIpoptSolver( + cyipopt_problem, {'hessian_approximation': 'limited-memory'} + ) x, info = solver.solve(tee=False) cyipopt_problem.load_x_into_pyomo(x) self.assertAlmostEqual(pyo.value(m.c1), 0.1, places=5) self.assertAlmostEqual(pyo.value(m.c2), 0.5, places=5) - diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_scipy_solvers.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_scipy_solvers.py new file mode 100644 index 00000000000..6636dc3d6e2 --- /dev/null +++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_scipy_solvers.py @@ -0,0 +1,552 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest +from pyomo.common.dependencies import scipy, scipy_available +import pyomo.environ as pyo + +if not scipy_available: + raise unittest.SkipTest("SciPy is needed to test the SciPy solvers") + +from pyomo.contrib.pynumero.asl import AmplInterface +from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP +from pyomo.contrib.pynumero.algorithms.solvers.square_solver_base import ( + SquareNlpSolverBase, +) +from pyomo.contrib.pynumero.algorithms.solvers.scipy_solvers import ( + FsolveNlpSolver, + RootNlpSolver, + PyomoScipySolver, + SecantNewtonNlpSolver, +) + + +def make_simple_model(): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.x = pyo.Var(m.I, initialize=1.0) + m.con1 = pyo.Constraint(expr=m.x[1] ** 2 + m.x[2] ** 2 + m.x[3] ** 2 == 1) + m.con2 = pyo.Constraint(expr=2 * m.x[1] + 3 * m.x[2] - 4 * m.x[3] == 0) + m.con3 = pyo.Constraint(expr=m.x[1] == 2 * pyo.exp(m.x[2] / m.x[3])) + m.obj = pyo.Objective(expr=0.0) + nlp = PyomoNLP(m) + return m, nlp + + +def make_scalar_model(): + m = pyo.ConcreteModel() + m.x = pyo.Var(initialize=1.0, bounds=(0.0, None)) + m.con = pyo.Constraint(expr=(m.x - 2) ** 3 - 5 * m.x == 0) + m.obj = pyo.Objective(expr=0.0) + nlp = PyomoNLP(m) + return m, nlp + + +def make_linear_scalar_model(): + m = pyo.ConcreteModel() + m.x = pyo.Var(initialize=1.0, bounds=(0.0, None)) + m.con = pyo.Constraint(expr=-12.5 * m.x + 30.1 == 0) + m.obj = pyo.Objective(expr=0.0) + nlp = PyomoNLP(m) + return m, nlp + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestSquareSolverBase(unittest.TestCase): + def test_not_implemented_solve(self): + m, nlp = make_simple_model() + solver = SquareNlpSolverBase(nlp) + msg = "has not implemented the solve method" + with self.assertRaisesRegex(NotImplementedError, msg): + solver.solve() + + def test_not_square(self): + m, _ = make_simple_model() + m.con4 = pyo.Constraint(expr=m.x[1] == m.x[2]) + nlp = PyomoNLP(m) + msg = "same numbers of variables as equality constraints" + with self.assertRaisesRegex(RuntimeError, msg): + solver = SquareNlpSolverBase(nlp) + + def test_bounds_and_ineq_okay(self): + m, _ = make_simple_model() + m.x[1].setlb(0.0) + m.x[1].setub(1.0) + m.con4 = pyo.Constraint(expr=m.x[1] <= m.x[2]) + nlp = PyomoNLP(m) + # Just construct the solver and get no error + solver = SquareNlpSolverBase(nlp) + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestFsolveNLP(unittest.TestCase): + def test_solve_simple_nlp(self): + m, nlp = make_simple_model() + solver = FsolveNlpSolver(nlp, options=dict(xtol=1e-9, maxfev=20, tol=1e-8)) + x, info, ier, msg = solver.solve() + self.assertEqual(ier, 1) + + variables = [m.x[1], m.x[2], m.x[3]] + predicted_xorder = [0.92846891, -0.22610731, 0.29465397] + indices = nlp.get_primal_indices(variables) + nlp_to_x_indices = [None] * len(variables) + for i, j in enumerate(indices): + nlp_to_x_indices[j] = i + predicted_nlporder = [predicted_xorder[i] for i in nlp_to_x_indices] + self.assertStructuredAlmostEqual(nlp.get_primals().tolist(), predicted_nlporder) + + def test_solve_max_iter(self): + m, nlp = make_simple_model() + solver = FsolveNlpSolver(nlp, options=dict(xtol=1e-9, maxfev=10)) + x, info, ier, msg = solver.solve() + self.assertNotEqual(ier, 1) + self.assertIn("has reached maxfev", msg) + + def test_solve_too_tight_tol(self): + m, nlp = make_simple_model() + solver = FsolveNlpSolver(nlp, options=dict(xtol=1e-3, maxfev=20, tol=1e-8)) + msg = "does not satisfy the function tolerance" + with self.assertRaisesRegex(RuntimeError, msg): + x, info, ier, msg = solver.solve() + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestPyomoScipySolver(unittest.TestCase): + def test_available_and_version(self): + solver = PyomoScipySolver() + self.assertTrue(solver.available()) + self.assertTrue(solver.license_is_valid()) + + sp_version = tuple(int(num) for num in scipy.__version__.split('.')) + self.assertEqual(sp_version, solver.version()) + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestFsolvePyomo(unittest.TestCase): + def test_available_and_version(self): + solver = pyo.SolverFactory("scipy.fsolve") + self.assertTrue(solver.available()) + self.assertTrue(solver.license_is_valid()) + + sp_version = tuple(int(num) for num in scipy.__version__.split('.')) + self.assertEqual(sp_version, solver.version()) + + def test_solve_simple_nlp(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.fsolve") + + # Just want to make sure this option works + solver.set_options(dict(full_output=False)) + + results = solver.solve(m) + solution = [m.x[1].value, m.x[2].value, m.x[3].value] + predicted = [0.92846891, -0.22610731, 0.29465397] + self.assertStructuredAlmostEqual(solution, predicted) + + def test_solve_results_obj(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.fsolve") + results = solver.solve(m) + solution = [m.x[1].value, m.x[2].value, m.x[3].value] + predicted = [0.92846891, -0.22610731, 0.29465397] + self.assertStructuredAlmostEqual(solution, predicted) + + self.assertEqual(results.problem.number_of_constraints, 3) + self.assertEqual(results.problem.number_of_variables, 3) + + # Note that the solver returns termination condition feasible + # rather than optimal... + self.assertEqual( + results.solver.termination_condition, pyo.TerminationCondition.feasible + ) + msg = "Solver failed to return an optimal solution" + with self.assertRaisesRegex(RuntimeError, msg): + pyo.assert_optimal_termination(results) + self.assertEqual(results.solver.status, pyo.SolverStatus.ok) + + def test_solve_max_iter(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.fsolve") + solver.set_options(dict(xtol=1e-9, maxfev=10)) + res = solver.solve(m) + self.assertNotEqual(res.solver.return_code, 1) + self.assertIn("has reached maxfev", res.solver.message) + + def test_solve_too_tight_tol(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory( + "scipy.fsolve", options=dict(xtol=1e-3, maxfev=20, tol=1e-8) + ) + msg = "does not satisfy the function tolerance" + with self.assertRaisesRegex(RuntimeError, msg): + res = solver.solve(m) + + def test_with_scalar_model_bad_starting_point(self): + # NOTE: fsolve fails to solve this very simple scalar-valued + # equation with a default starting point (x=1). This may be + # worth looking into. + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.fsolve") + res = solver.solve(m) + predicted_x = 4.90547401 + self.assertNotEqual(predicted_x, m.x.value) + + def test_with_scalar_model_good_starting_point(self): + # NOTE: fsolve can solve this equation with a good starting point. + m, _ = make_scalar_model() + m.x.set_value(4.0) + solver = pyo.SolverFactory("scipy.fsolve") + res = solver.solve(m) + predicted_x = 4.90547401 + self.assertAlmostEqual(predicted_x, m.x.value) + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestRootNLP(unittest.TestCase): + def test_solve_simple_nlp(self): + m, nlp = make_simple_model() + solver = RootNlpSolver(nlp) + results = solver.solve() + self.assertTrue(results.success) + + variables = [m.x[1], m.x[2], m.x[3]] + predicted_xorder = [0.92846891, -0.22610731, 0.29465397] + indices = nlp.get_primal_indices(variables) + nlp_to_x_indices = [None] * len(variables) + for i, j in enumerate(indices): + nlp_to_x_indices[j] = i + predicted_nlporder = [predicted_xorder[i] for i in nlp_to_x_indices] + self.assertStructuredAlmostEqual(results.x.tolist(), predicted_nlporder) + + def test_solve_simple_nlp_levenberg_marquardt(self): + m, nlp = make_simple_model() + solver = RootNlpSolver(nlp, options=dict(method="lm")) + results = solver.solve() + self.assertTrue(results.success) + + variables = [m.x[1], m.x[2], m.x[3]] + predicted_xorder = [0.92846891, -0.22610731, 0.29465397] + indices = nlp.get_primal_indices(variables) + nlp_to_x_indices = [None] * len(variables) + for i, j in enumerate(indices): + nlp_to_x_indices[j] = i + predicted_nlporder = [predicted_xorder[i] for i in nlp_to_x_indices] + self.assertStructuredAlmostEqual(results.x.tolist(), predicted_nlporder) + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestRootPyomo(unittest.TestCase): + def test_available_and_version(self): + solver = pyo.SolverFactory("scipy.root") + self.assertTrue(solver.available()) + self.assertTrue(solver.license_is_valid()) + + sp_version = tuple(int(num) for num in scipy.__version__.split('.')) + self.assertEqual(sp_version, solver.version()) + + def test_solve_simple_nlp(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.root") + + solver.set_options(dict(tol=1e-7)) + + results = solver.solve(m) + solution = [m.x[1].value, m.x[2].value, m.x[3].value] + predicted = [0.92846891, -0.22610731, 0.29465397] + self.assertStructuredAlmostEqual(solution, predicted) + + def test_solve_simple_nlp_levenberg_marquardt(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.root") + + solver.set_options(dict(tol=1e-7, method="lm")) + + results = solver.solve(m) + solution = [m.x[1].value, m.x[2].value, m.x[3].value] + predicted = [0.92846891, -0.22610731, 0.29465397] + self.assertStructuredAlmostEqual(solution, predicted) + + def test_bad_method(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.root") + + solver.set_options(dict(tol=1e-7, method="some-solver")) + with self.assertRaisesRegex(ValueError, "not in domain"): + results = solver.solve(m) + + def test_solver_results_obj(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.root") + + solver.set_options(dict(tol=1e-7)) + + results = solver.solve(m) + solution = [m.x[1].value, m.x[2].value, m.x[3].value] + predicted = [0.92846891, -0.22610731, 0.29465397] + self.assertStructuredAlmostEqual(solution, predicted) + + self.assertEqual(results.problem.number_of_constraints, 3) + self.assertEqual(results.problem.number_of_variables, 3) + self.assertEqual(results.solver.return_code, 1) + self.assertEqual( + results.solver.termination_condition, pyo.TerminationCondition.feasible + ) + self.assertEqual(results.solver.message, "The solution converged.") + + def test_solver_results_obj_levenberg_marquardt(self): + m, _ = make_simple_model() + solver = pyo.SolverFactory("scipy.root") + + solver.set_options(dict(tol=1e-7, method="lm")) + + results = solver.solve(m) + solution = [m.x[1].value, m.x[2].value, m.x[3].value] + predicted = [0.92846891, -0.22610731, 0.29465397] + self.assertStructuredAlmostEqual(solution, predicted) + + self.assertEqual(results.problem.number_of_constraints, 3) + self.assertEqual(results.problem.number_of_variables, 3) + + # NOTE: Return code (the scipy OptimizeResult.status field) is not + # documented in SciPy 1.9.3, so we cannot assert anything about it. + # self.assertEqual(results.solver.return_code, 1) + + self.assertEqual( + results.solver.termination_condition, pyo.TerminationCondition.feasible + ) + self.assertIn( + "The relative error between two consecutive iterates", + results.solver.message, + ) + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestNewtonPyomo(unittest.TestCase): + def test_available(self): + solver = pyo.SolverFactory("scipy.newton") + self.assertTrue(solver.available()) + self.assertTrue(solver.license_is_valid()) + + sp_version = tuple(int(num) for num in scipy.__version__.split('.')) + self.assertEqual(sp_version, solver.version()) + + def test_solve(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.newton") + results = solver.solve(m, tee=True) + predicted_x = 4.90547401 + self.assertAlmostEqual(predicted_x, m.x.value) + + def test_solve_doesnt_converge(self): + m, _ = make_scalar_model() + m.x.set_value(3e10) + solver = pyo.SolverFactory("scipy.newton") + with self.assertRaisesRegex(RuntimeError, "Failed to converge"): + # scipy.optimize.newton raises a RuntimeError when it fails to + # converge to a solution (contrary to fsolve, which happily + # returns the result). This behavior makes it hard to test + # for cases where TerminationCondition is not feasible. + # Should the underlying scipy.optimize.newton call be wrapped + # with try/except to catch this case and return an infeasible + # TerminationCondition? + results = solver.solve(m) + + def test_too_many_iter(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.newton") + solver.set_options({"maxiter": 5}) + with self.assertRaisesRegex(RuntimeError, "Failed to converge"): + results = solver.solve(m) + + def test_results_object(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.newton") + results = solver.solve(m) + predicted_x = 4.90547401 + self.assertAlmostEqual(predicted_x, m.x.value) + + # Check results.problem + self.assertEqual(results.problem.number_of_constraints, 1) + self.assertEqual(results.problem.number_of_variables, 1) + self.assertEqual(results.problem.number_of_continuous_variables, 1) + self.assertEqual(results.problem.number_of_binary_variables, 0) + self.assertEqual(results.problem.number_of_integer_variables, 0) + + # Assert some reasonable things about the returned results + self.assertGreater(results.solver.wallclock_time, 0.0) + self.assertEqual( + results.solver.termination_condition, pyo.TerminationCondition.feasible + ) + self.assertEqual(results.solver.status, pyo.SolverStatus.ok) + self.assertGreater(results.solver.number_of_function_evaluations, 0) + + def test_results_object_without_full_output(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.newton") + solver.set_options(dict(full_output=False)) + + results = solver.solve(m) + predicted_x = 4.90547401 + self.assertAlmostEqual(predicted_x, m.x.value) + + # Check results.problem + self.assertEqual(results.problem.number_of_constraints, 1) + self.assertEqual(results.problem.number_of_variables, 1) + self.assertEqual(results.problem.number_of_continuous_variables, 1) + self.assertEqual(results.problem.number_of_binary_variables, 0) + self.assertEqual(results.problem.number_of_integer_variables, 0) + + # Assert some reasonable things about the returned results + self.assertGreater(results.solver.wallclock_time, 0.0) + + # Now assert that termination condition and solver status have + # not been reported. + # + # This will break if Pyomo changes its default behavior. + self.assertIs( + results.solver.termination_condition, pyo.TerminationCondition.unknown + ) + # The default SolverStatus appears to be ok... + # self.assertIsNot(results.solver.status, pyo.SolverStatus.ok) + + with self.assertRaises(AttributeError): + # This attribute has no default, I guess. + # Assert that it hasn't been set. + n_eval = results.solver.number_of_function_evaluations + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestSecantNewton(unittest.TestCase): + def test_inherited_options_skipped(self): + m, nlp = make_scalar_model() + options = SecantNewtonNlpSolver.OPTIONS + self.assertNotIn("maxiter", options) + self.assertNotIn("secant", options) + self.assertIn("secant_iter", options) + self.assertIn("newton_iter", options) + + with self.assertRaisesRegex(ValueError, "implicit.*keys are not allowed"): + solver = SecantNewtonNlpSolver(nlp, options={"maxiter": 10}) + + +@unittest.skipUnless(AmplInterface.available(), "AmplInterface is not available") +class TestSecantNewtonPyomo(unittest.TestCase): + def test_available(self): + solver = pyo.SolverFactory("scipy.secant-newton") + self.assertTrue(solver.available()) + self.assertTrue(solver.license_is_valid()) + + sp_version = tuple(int(num) for num in scipy.__version__.split('.')) + self.assertEqual(sp_version, solver.version()) + + def test_solve(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.secant-newton") + results = solver.solve(m) + predicted_x = 4.90547401 + self.assertAlmostEqual(predicted_x, m.x.value) + + self.assertFalse(solver.converged_with_secant()) + + def test_solve_doesnt_converge(self): + m, _ = make_scalar_model() + m.x.set_value(3e10) + solver = pyo.SolverFactory("scipy.secant-newton") + with self.assertRaisesRegex(RuntimeError, "Failed to converge"): + # scipy.optimize.newton raises a RuntimeError when it fails to + # converge to a solution (contrary to fsolve, which happily + # returns the result). This behavior makes it hard to test + # for cases where TerminationCondition is not feasible. + # Should the underlying scipy.optimize.newton call be wrapped + # with try/except to catch this case and return an infeasible + # TerminationCondition? + results = solver.solve(m) + + self.assertFalse(solver.converged_with_secant()) + + def test_too_many_iter(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.secant-newton") + + # We now have a different API (from SolverFactory("newton"), + # in that we specify the number of iterations for the newton + # or secant sub-solvers. + solver.set_options({"newton_iter": 5}) + with self.assertRaisesRegex(RuntimeError, "Failed to converge"): + results = solver.solve(m) + + def test_results_object(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.secant-newton") + results = solver.solve(m) + predicted_x = 4.90547401 + self.assertAlmostEqual(predicted_x, m.x.value) + + # Check results.problem + self.assertEqual(results.problem.number_of_constraints, 1) + self.assertEqual(results.problem.number_of_variables, 1) + self.assertEqual(results.problem.number_of_continuous_variables, 1) + self.assertEqual(results.problem.number_of_binary_variables, 0) + self.assertEqual(results.problem.number_of_integer_variables, 0) + + # Assert some reasonable things about the returned results + self.assertGreater(results.solver.wallclock_time, 0.0) + self.assertEqual( + results.solver.termination_condition, pyo.TerminationCondition.feasible + ) + self.assertEqual(results.solver.status, pyo.SolverStatus.ok) + self.assertGreater(results.solver.number_of_function_evaluations, 0) + + def test_results_object_without_full_output(self): + m, _ = make_scalar_model() + solver = pyo.SolverFactory("scipy.secant-newton") + solver.set_options(dict(full_output=False)) + + results = solver.solve(m) + predicted_x = 4.90547401 + self.assertAlmostEqual(predicted_x, m.x.value) + + # Check results.problem + self.assertEqual(results.problem.number_of_constraints, 1) + self.assertEqual(results.problem.number_of_variables, 1) + self.assertEqual(results.problem.number_of_continuous_variables, 1) + self.assertEqual(results.problem.number_of_binary_variables, 0) + self.assertEqual(results.problem.number_of_integer_variables, 0) + + # Assert some reasonable things about the returned results + self.assertGreater(results.solver.wallclock_time, 0.0) + + # Now assert that termination condition and solver status have + # not been reported. + # + # This will break if Pyomo changes its default behavior. + self.assertIs( + results.solver.termination_condition, pyo.TerminationCondition.unknown + ) + # The default SolverStatus appears to be ok... + # self.assertIsNot(results.solver.status, pyo.SolverStatus.ok) + + with self.assertRaises(AttributeError): + # This attribute has no default, I guess. + # Assert that it hasn't been set. + n_eval = results.solver.number_of_function_evaluations + + def test_solve_linear(self): + m, _ = make_linear_scalar_model() + solver = pyo.SolverFactory("scipy.secant-newton") + results = solver.solve(m) + self.assertAlmostEqual(m.x.value, 30.1 / 12.5) + # This linear equation converges with the secant subsolver. + self.assertTrue(solver.converged_with_secant()) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/pynumero/asl.py b/pyomo/contrib/pynumero/asl.py index c87f3d80528..a28741fb230 100644 --- a/pyomo/contrib/pynumero/asl.py +++ b/pyomo/contrib/pynumero/asl.py @@ -11,6 +11,7 @@ from pyomo.common.fileutils import find_library from pyomo.common.dependencies import numpy as np +from pyomo.contrib.pynumero.exceptions import PyNumeroEvaluationError import ctypes import logging import os @@ -19,17 +20,19 @@ CURRENT_INTERFACE_VERSION = 3 + class _NotSet: pass + def _LoadASLInterface(libname): ASLib = ctypes.cdll.LoadLibrary(libname) # define 1d array array_1d_double = np.ctypeslib.ndpointer( - dtype=np.double, ndim=1, flags='CONTIGUOUS') - array_1d_int = np.ctypeslib.ndpointer( - dtype=np.intc, ndim=1, flags='CONTIGUOUS') + dtype=np.double, ndim=1, flags='CONTIGUOUS' + ) + array_1d_int = np.ctypeslib.ndpointer(dtype=np.intc, ndim=1, flags='CONTIGUOUS') # library version try: @@ -49,13 +52,16 @@ def _LoadASLInterface(libname): ASLib.EXTERNAL_AmplInterface_new.restype = ctypes.c_void_p if interface_version >= 2: - ASLib.EXTERNAL_AmplInterface_new_file.argtypes = [ctypes.c_char_p, ctypes.c_char_p] + ASLib.EXTERNAL_AmplInterface_new_file.argtypes = [ + ctypes.c_char_p, + ctypes.c_char_p, + ] else: ASLib.EXTERNAL_AmplInterface_new_file.argtypes = [ctypes.c_char_p] ASLib.EXTERNAL_AmplInterface_new_file.restype = ctypes.c_void_p - #ASLib.EXTERNAL_AmplInterface_new_str.argtypes = [ctypes.c_char_p] - #ASLib.EXTERNAL_AmplInterface_new_str.restype = ctypes.c_void_p + # ASLib.EXTERNAL_AmplInterface_new_str.argtypes = [ctypes.c_char_p] + # ASLib.EXTERNAL_AmplInterface_new_str.restype = ctypes.c_void_p # number of variables ASLib.EXTERNAL_AmplInterface_n_vars.argtypes = [ctypes.c_void_p] @@ -74,83 +80,107 @@ def _LoadASLInterface(libname): ASLib.EXTERNAL_AmplInterface_nnz_hessian_lag.restype = ctypes.c_int # lower bounds on x - ASLib.EXTERNAL_AmplInterface_x_lower_bounds.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_x_lower_bounds.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_x_lower_bounds.restype = None # upper bounds on x - ASLib.EXTERNAL_AmplInterface_x_upper_bounds.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_x_upper_bounds.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_x_upper_bounds.restype = None # lower bounds on g - ASLib.EXTERNAL_AmplInterface_g_lower_bounds.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_g_lower_bounds.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_g_lower_bounds.restype = None # upper bounds on g - ASLib.EXTERNAL_AmplInterface_g_upper_bounds.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_g_upper_bounds.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_g_upper_bounds.restype = None # initial value x - ASLib.EXTERNAL_AmplInterface_get_init_x.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_get_init_x.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_get_init_x.restype = None # initial value multipliers - ASLib.EXTERNAL_AmplInterface_get_init_multipliers.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_get_init_multipliers.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_get_init_multipliers.restype = None # evaluate objective - ASLib.EXTERNAL_AmplInterface_eval_f.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int, - ctypes.POINTER(ctypes.c_double)] + ASLib.EXTERNAL_AmplInterface_eval_f.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + ctypes.POINTER(ctypes.c_double), + ] ASLib.EXTERNAL_AmplInterface_eval_f.restype = ctypes.c_bool # gradient objective - ASLib.EXTERNAL_AmplInterface_eval_deriv_f.argtypes = [ctypes.c_void_p, - array_1d_double, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_eval_deriv_f.argtypes = [ + ctypes.c_void_p, + array_1d_double, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_eval_deriv_f.restype = ctypes.c_bool # structure jacobian of constraints - ASLib.EXTERNAL_AmplInterface_struct_jac_g.argtypes = [ctypes.c_void_p, - array_1d_int, - array_1d_int, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_struct_jac_g.argtypes = [ + ctypes.c_void_p, + array_1d_int, + array_1d_int, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_struct_jac_g.restype = None # structure hessian of Lagrangian - ASLib.EXTERNAL_AmplInterface_struct_hes_lag.argtypes = [ctypes.c_void_p, - array_1d_int, - array_1d_int, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_struct_hes_lag.argtypes = [ + ctypes.c_void_p, + array_1d_int, + array_1d_int, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_struct_hes_lag.restype = None # evaluate constraints - ASLib.EXTERNAL_AmplInterface_eval_g.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_eval_g.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_eval_g.restype = ctypes.c_bool # evaluate jacobian constraints - ASLib.EXTERNAL_AmplInterface_eval_jac_g.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_eval_jac_g.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_eval_jac_g.restype = ctypes.c_bool # temporary try/except block while changes get merged in pynumero_libraries @@ -158,35 +188,41 @@ def _LoadASLInterface(libname): ASLib.EXTERNAL_AmplInterface_dummy.argtypes = [ctypes.c_void_p] ASLib.EXTERNAL_AmplInterface_dummy.restype = None # evaluate hessian Lagrangian - ASLib.EXTERNAL_AmplInterface_eval_hes_lag.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int, - array_1d_double, - ctypes.c_int, - array_1d_double, - ctypes.c_int, - ctypes.c_double] + ASLib.EXTERNAL_AmplInterface_eval_hes_lag.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + array_1d_double, + ctypes.c_int, + array_1d_double, + ctypes.c_int, + ctypes.c_double, + ] ASLib.EXTERNAL_AmplInterface_eval_hes_lag.restype = ctypes.c_bool except Exception: # evaluate hessian Lagrangian - ASLib.EXTERNAL_AmplInterface_eval_hes_lag.argtypes = [ctypes.c_void_p, - array_1d_double, - ctypes.c_int, - array_1d_double, - ctypes.c_int, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_eval_hes_lag.argtypes = [ + ctypes.c_void_p, + array_1d_double, + ctypes.c_int, + array_1d_double, + ctypes.c_int, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_eval_hes_lag.restype = ctypes.c_bool interface_version = 0 # finalize solution - ASLib.EXTERNAL_AmplInterface_finalize_solution.argtypes = [ctypes.c_void_p, - ctypes.c_int, - ctypes.c_char_p, - array_1d_double, - ctypes.c_int, - array_1d_double, - ctypes.c_int] + ASLib.EXTERNAL_AmplInterface_finalize_solution.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_char_p, + array_1d_double, + ctypes.c_int, + array_1d_double, + ctypes.c_int, + ] ASLib.EXTERNAL_AmplInterface_finalize_solution.restype = None # destructor @@ -197,13 +233,13 @@ def _LoadASLInterface(libname): logger.warning( 'The current pynumero_ASL library is version=%s, but found ' 'version=%s. Please recompile / update your pynumero_ASL ' - 'library.' % (CURRENT_INTERFACE_VERSION, interface_version)) + 'library.' % (CURRENT_INTERFACE_VERSION, interface_version) + ) return ASLib, interface_version class AmplInterface(object): - libname = _NotSet ASLib = None interface_version = None @@ -218,13 +254,13 @@ def available(cls): return os.path.exists(cls.libname) def __init__(self, filename=None, nl_buffer=None): - if not AmplInterface.available(): - raise RuntimeError( - "Cannot load the PyNumero ASL interface (pynumero_ASL)") + raise RuntimeError("Cannot load the PyNumero ASL interface (pynumero_ASL)") if nl_buffer is not None: - raise NotImplementedError("AmplInterface only supported form NL-file for now") + raise NotImplementedError( + "AmplInterface only supported form NL-file for now" + ) # Be sure to remove AMPLFUNC from the environment before loading # the ASL. This should prevent it from potentially caching an @@ -233,11 +269,11 @@ def __init__(self, filename=None, nl_buffer=None): amplfunc = os.environ.pop('AMPLFUNC', '') if AmplInterface.ASLib is None: - AmplInterface.ASLib, AmplInterface.interface_version \ - = _LoadASLInterface(AmplInterface.libname) + AmplInterface.ASLib, AmplInterface.interface_version = _LoadASLInterface( + AmplInterface.libname + ) if AmplInterface.interface_version >= 3: - AmplInterface.asl_date \ - = AmplInterface.ASLib.EXTERNAL_get_asl_date() + AmplInterface.asl_date = AmplInterface.ASLib.EXTERNAL_get_asl_date() else: AmplInterface.asl_date = 0 @@ -296,13 +332,9 @@ def get_bounds_info(self, xl, xu, gl, gu): ng = len(g_l) assert nx == len(x_u), "lower and upper bound x vectors must be the same size" assert ng == len(g_u), "lower and upper bound g vectors must be the same size" - self.ASLib.EXTERNAL_AmplInterface_get_bounds_info(self._obj, - x_l, - x_u, - nx, - g_l, - g_u, - ng) + self.ASLib.EXTERNAL_AmplInterface_get_bounds_info( + self._obj, x_l, x_u, nx, g_l, g_u, ng + ) def get_x_lower_bounds(self, invec): self.ASLib.EXTERNAL_AmplInterface_x_lower_bounds(self._obj, invec, len(invec)) @@ -320,102 +352,117 @@ def get_init_x(self, invec): self.ASLib.EXTERNAL_AmplInterface_get_init_x(self._obj, invec, len(invec)) def get_init_multipliers(self, invec): - self.ASLib.EXTERNAL_AmplInterface_get_init_multipliers(self._obj, invec, len(invec)) + self.ASLib.EXTERNAL_AmplInterface_get_init_multipliers( + self._obj, invec, len(invec) + ) def eval_f(self, x): - assert x.size == self._nx, "Error: Dimension missmatch." - assert x.dtype == np.double, "Error: array type. Function eval_deriv_f expects an array of type double" + assert x.size == self._nx, "Error: Dimension mismatch." + assert ( + x.dtype == np.double + ), "Error: array type. Function eval_deriv_f expects an array of type double" sol = ctypes.c_double() - res = self.ASLib.EXTERNAL_AmplInterface_eval_f(self._obj, x, self._nx, ctypes.byref(sol)) - assert res, "Error in AMPL evaluation" + res = self.ASLib.EXTERNAL_AmplInterface_eval_f( + self._obj, x, self._nx, ctypes.byref(sol) + ) + if not res: + raise PyNumeroEvaluationError("Error in AMPL evaluation") return sol.value def eval_deriv_f(self, x, df): - assert x.size == self._nx, "Error: Dimension missmatch." - assert x.dtype == np.double, "Error: array type. Function eval_deriv_f expects an array of type double" + assert x.size == self._nx, "Error: Dimension mismatch." + assert ( + x.dtype == np.double + ), "Error: array type. Function eval_deriv_f expects an array of type double" res = self.ASLib.EXTERNAL_AmplInterface_eval_deriv_f(self._obj, x, df, len(x)) - assert res, "Error in AMPL evaluation" + if not res: + raise PyNumeroEvaluationError("Error in AMPL evaluation") def struct_jac_g(self, irow, jcol): irow_p = irow.astype(np.intc, casting='safe', copy=False) jcol_p = jcol.astype(np.intc, casting='safe', copy=False) - assert len(irow) == len(jcol), "Error: Dimension missmatch. Arrays irow and jcol must be of the same size" - assert len(irow) == self._nnz_jac_g, "Error: Dimension missmatch. Jacobian has {} nnz".format(self._nnz_jac_g) - self.ASLib.EXTERNAL_AmplInterface_struct_jac_g(self._obj, - irow_p, - jcol_p, - len(irow)) - + assert len(irow) == len( + jcol + ), "Error: Dimension mismatch. Arrays irow and jcol must be of the same size" + assert ( + len(irow) == self._nnz_jac_g + ), "Error: Dimension mismatch. Jacobian has {} nnz".format(self._nnz_jac_g) + self.ASLib.EXTERNAL_AmplInterface_struct_jac_g( + self._obj, irow_p, jcol_p, len(irow) + ) def struct_hes_lag(self, irow, jcol): irow_p = irow.astype(np.intc, casting='safe', copy=False) jcol_p = jcol.astype(np.intc, casting='safe', copy=False) - assert len(irow) == len(jcol), "Error: Dimension missmatch. Arrays irow and jcol must be of the same size" - assert len(irow) == self._nnz_hess, "Error: Dimension missmatch. Hessian has {} nnz".format(self._nnz_hess) - self.ASLib.EXTERNAL_AmplInterface_struct_hes_lag(self._obj, - irow_p, - jcol_p, - len(irow)) + assert len(irow) == len( + jcol + ), "Error: Dimension mismatch. Arrays irow and jcol must be of the same size" + assert ( + len(irow) == self._nnz_hess + ), "Error: Dimension mismatch. Hessian has {} nnz".format(self._nnz_hess) + self.ASLib.EXTERNAL_AmplInterface_struct_hes_lag( + self._obj, irow_p, jcol_p, len(irow) + ) def eval_jac_g(self, x, jac_g_values): - assert x.size == self._nx, "Error: Dimension missmatch." - assert jac_g_values.size == self._nnz_jac_g, "Error: Dimension missmatch." + assert x.size == self._nx, "Error: Dimension mismatch." + assert jac_g_values.size == self._nnz_jac_g, "Error: Dimension mismatch." xeval = x.astype(np.double, casting='safe', copy=False) jac_eval = jac_g_values.astype(np.double, casting='safe', copy=False) - res = self.ASLib.EXTERNAL_AmplInterface_eval_jac_g(self._obj, - xeval, - self._nx, - jac_eval, - self._nnz_jac_g) - assert res, "Error in AMPL evaluation" + res = self.ASLib.EXTERNAL_AmplInterface_eval_jac_g( + self._obj, xeval, self._nx, jac_eval, self._nnz_jac_g + ) + if not res: + raise PyNumeroEvaluationError("Error in AMPL evaluation") def eval_g(self, x, g): - assert x.size == self._nx, "Error: Dimension missmatch." - assert g.size == self._ny, "Error: Dimension missmatch." - assert x.dtype == np.double, "Error: array type. Function eval_g expects an array of type double" - assert g.dtype == np.double, "Error: array type. Function eval_g expects an array of type double" - res = self.ASLib.EXTERNAL_AmplInterface_eval_g(self._obj, - x, - self._nx, - g, - self._ny) - assert res, "Error in AMPL evaluation" + assert x.size == self._nx, "Error: Dimension mismatch." + assert g.size == self._ny, "Error: Dimension mismatch." + assert ( + x.dtype == np.double + ), "Error: array type. Function eval_g expects an array of type double" + assert ( + g.dtype == np.double + ), "Error: array type. Function eval_g expects an array of type double" + res = self.ASLib.EXTERNAL_AmplInterface_eval_g( + self._obj, x, self._nx, g, self._ny + ) + if not res: + raise PyNumeroEvaluationError("Error in AMPL evaluation") def eval_hes_lag(self, x, lam, hes_lag, obj_factor=1.0): - assert x.size == self._nx, "Error: Dimension missmatch." - assert lam.size == self._ny, "Error: Dimension missmatch." - assert hes_lag.size == self._nnz_hess, "Error: Dimension missmatch." - assert x.dtype == np.double, "Error: array type. Function eval_hes_lag expects an array of type double" - assert lam.dtype == np.double, "Error: array type. Function eval_hes_lag expects an array of type double" - assert hes_lag.dtype == np.double, "Error: array type. Function eval_hes_lag expects an array of type double" + assert x.size == self._nx, "Error: Dimension mismatch." + assert lam.size == self._ny, "Error: Dimension mismatch." + assert hes_lag.size == self._nnz_hess, "Error: Dimension mismatch." + assert ( + x.dtype == np.double + ), "Error: array type. Function eval_hes_lag expects an array of type double" + assert ( + lam.dtype == np.double + ), "Error: array type. Function eval_hes_lag expects an array of type double" + assert ( + hes_lag.dtype == np.double + ), "Error: array type. Function eval_hes_lag expects an array of type double" if self.interface_version >= 1: - res = self.ASLib.EXTERNAL_AmplInterface_eval_hes_lag(self._obj, - x, - self._nx, - lam, - self._ny, - hes_lag, - self._nnz_hess, - obj_factor) + res = self.ASLib.EXTERNAL_AmplInterface_eval_hes_lag( + self._obj, + x, + self._nx, + lam, + self._ny, + hes_lag, + self._nnz_hess, + obj_factor, + ) else: - res = self.ASLib.EXTERNAL_AmplInterface_eval_hes_lag(self._obj, - x, - self._nx, - lam, - self._ny, - hes_lag, - self._nnz_hess) - assert res, "Error in AMPL evaluation" + res = self.ASLib.EXTERNAL_AmplInterface_eval_hes_lag( + self._obj, x, self._nx, lam, self._ny, hes_lag, self._nnz_hess + ) + if not res: + raise PyNumeroEvaluationError("Error in AMPL evaluation") def finalize_solution(self, ampl_solve_status_num, msg, x, lam): b_msg = msg.encode('utf-8') - self.ASLib.EXTERNAL_AmplInterface_finalize_solution(self._obj, - ampl_solve_status_num, - b_msg, - x, - len(x), - lam, - len(lam)) - - - + self.ASLib.EXTERNAL_AmplInterface_finalize_solution( + self._obj, ampl_solve_status_num, b_msg, x, len(x), lam, len(lam) + ) diff --git a/pyomo/contrib/pynumero/build.py b/pyomo/contrib/pynumero/build.py index 7f3488332c5..08b5c512ab7 100644 --- a/pyomo/contrib/pynumero/build.py +++ b/pyomo/contrib/pynumero/build.py @@ -12,6 +12,7 @@ import sys from pyomo.common.cmake_builder import build_cmake_project + def build_pynumero(user_args=[], parallel=None): return build_cmake_project( targets=['src'], @@ -21,9 +22,11 @@ def build_pynumero(user_args=[], parallel=None): parallel=parallel, ) + class PyNumeroBuilder(object): def __call__(self, parallel): return build_pynumero(parallel=parallel) + if __name__ == "__main__": build_pynumero(sys.argv[1:]) diff --git a/pyomo/contrib/pynumero/dependencies.py b/pyomo/contrib/pynumero/dependencies.py index 988b258afe5..d386bbc3dda 100644 --- a/pyomo/contrib/pynumero/dependencies.py +++ b/pyomo/contrib/pynumero/dependencies.py @@ -17,7 +17,8 @@ 'numpy', 'Pynumero requires the optional Pyomo dependency "numpy"', minimum_version='1.13.0', - defer_check=False) + defer_check=False, +) if not numpy_available: numpy.log_import_warning('pyomo.contrib.pynumero') diff --git a/pyomo/contrib/pynumero/examples/callback/cyipopt_callback.py b/pyomo/contrib/pynumero/examples/callback/cyipopt_callback.py index 66c9ae3ec9b..6bd86c006a1 100644 --- a/pyomo/contrib/pynumero/examples/callback/cyipopt_callback.py +++ b/pyomo/contrib/pynumero/examples/callback/cyipopt_callback.py @@ -3,27 +3,41 @@ import logging """ -This example uses an interation callback to print the values +This example uses an iteration callback to print the values of the constraint residuals at each iteration of the CyIpopt solver """ -def iteration_callback(nlp, alg_mod, iter_count, obj_value, - inf_pr, inf_du, mu, d_norm, regularization_size, - alpha_du, alpha_pr, ls_trials): + + +def iteration_callback( + nlp, + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, +): logger = logging.getLogger('pyomo') constraint_names = nlp.constraint_names() residuals = nlp.evaluate_constraints() logger.info(' ...Residuals for iteration {}'.format(iter_count)) - for i,nm in enumerate(constraint_names): + for i, nm in enumerate(constraint_names): logger.info(' ...{}: {}'.format(nm, residuals[i])) + def main(): solver = pyo.SolverFactory('cyipopt') - status, nlp = solver.solve(m, tee=False, return_nlp=True, - intermediate_callback=iteration_callback) + status, nlp = solver.solve( + m, tee=False, return_nlp=True, intermediate_callback=iteration_callback + ) + if __name__ == '__main__': logging.getLogger('pyomo').setLevel(logging.INFO) main() - - diff --git a/pyomo/contrib/pynumero/examples/callback/cyipopt_callback_halt.py b/pyomo/contrib/pynumero/examples/callback/cyipopt_callback_halt.py index 1ad30addf7a..18fad2bbcd8 100644 --- a/pyomo/contrib/pynumero/examples/callback/cyipopt_callback_halt.py +++ b/pyomo/contrib/pynumero/examples/callback/cyipopt_callback_halt.py @@ -4,21 +4,33 @@ """ This example uses an iteration callback to halt the solver """ -def iteration_callback(nlp, alg_mod, iter_count, obj_value, - inf_pr, inf_du, mu, d_norm, regularization_size, - alpha_du, alpha_pr, ls_trials): + + +def iteration_callback( + nlp, + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, +): if iter_count >= 4: return False return True + def main(): solver = pyo.SolverFactory('cyipopt') - status = solver.solve(m, tee=False, - intermediate_callback=iteration_callback) + status = solver.solve(m, tee=False, intermediate_callback=iteration_callback) return status + if __name__ == '__main__': status = main() print(status) - - diff --git a/pyomo/contrib/pynumero/examples/callback/cyipopt_functor_callback.py b/pyomo/contrib/pynumero/examples/callback/cyipopt_functor_callback.py index 8befed75774..f977a2701a2 100644 --- a/pyomo/contrib/pynumero/examples/callback/cyipopt_functor_callback.py +++ b/pyomo/contrib/pynumero/examples/callback/cyipopt_functor_callback.py @@ -3,37 +3,50 @@ import pandas as pd """ -This example uses an interation callback with a functor to store +This example uses an iteration callback with a functor to store values from each iteration in a class """ + + class ResidualsTableCallback(object): def __init__(self): self._residuals = None - def __call__(self, nlp, alg_mod, iter_count, obj_value, - inf_pr, inf_du, mu, d_norm, regularization_size, - alpha_du, alpha_pr, ls_trials): + def __call__( + self, + nlp, + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + ): constraint_names = nlp.constraint_names() if self._residuals is None: self._residuals = {nm: [] for nm in constraint_names} self._residuals['iter'] = [] residuals = nlp.evaluate_constraints() - for i,nm in enumerate(constraint_names): + for i, nm in enumerate(constraint_names): self._residuals[nm].append(residuals[i]) self._residuals['iter'].append(iter_count) def get_residual_dataframe(self): return pd.DataFrame(self._residuals) + def main(): solver = pyo.SolverFactory('cyipopt') resid_table_by_iter = ResidualsTableCallback() - status = solver.solve(m, tee=False, - intermediate_callback=resid_table_by_iter) + status = solver.solve(m, tee=False, intermediate_callback=resid_table_by_iter) return resid_table_by_iter.get_residual_dataframe() + if __name__ == '__main__': df = main() print(df) - - diff --git a/pyomo/contrib/pynumero/examples/callback/reactor_design.py b/pyomo/contrib/pynumero/examples/callback/reactor_design.py index dc4bc7fb1af..927b25f9bc9 100644 --- a/pyomo/contrib/pynumero/examples/callback/reactor_design.py +++ b/pyomo/contrib/pynumero/examples/callback/reactor_design.py @@ -5,31 +5,36 @@ model = ConcreteModel() # set the data (native python data) -k1 = 5.0/6.0 # min^-1 -k2 = 5.0/3.0 # min^-1 -k3 = 1.0/6000.0 # m^3/(gmol min) -caf = 10000.0 # gmol/m^3 +k1 = 5.0 / 6.0 # min^-1 +k2 = 5.0 / 3.0 # min^-1 +k3 = 1.0 / 6000.0 # m^3/(gmol min) +caf = 10000.0 # gmol/m^3 # create the variables -model.sv = Var(initialize = 1.0, within=PositiveReals) -model.ca = Var(initialize = 5000.0, bounds=(0,10000)) -model.cb = Var(initialize = 2000.0, within=PositiveReals) -model.cc = Var(initialize = 2000.0, within=PositiveReals) -model.cd = Var(initialize = 1000.0, within=PositiveReals) +model.sv = Var(initialize=1.0, within=PositiveReals) +model.ca = Var(initialize=5000.0, bounds=(0, 10000)) +model.cb = Var(initialize=2000.0, within=PositiveReals) +model.cc = Var(initialize=2000.0, within=PositiveReals) +model.cd = Var(initialize=1000.0, within=PositiveReals) # create the objective -model.obj = Objective(expr = model.cb, sense=maximize) +model.obj = Objective(expr=model.cb, sense=maximize) # create the constraints -model.ca_bal = Constraint(expr = (0 == model.sv * caf \ - - model.sv * model.ca - k1 * model.ca \ - - 2.0 * k3 * model.ca ** 2.0)) - -model.cb_bal = Constraint(expr=(0 == -model.sv * model.cb \ - + k1 * model.ca - k2 * model.cb)) - -model.cc_bal = Constraint(expr=(0 == -model.sv * model.cc \ - + k2 * model.cb)) - -model.cd_bal = Constraint(expr=(0 == -model.sv * model.cd \ - + k3 * model.ca ** 2.0)) +model.ca_bal = Constraint( + expr=( + 0 + == model.sv * caf + - model.sv * model.ca + - k1 * model.ca + - 2.0 * k3 * model.ca**2.0 + ) +) + +model.cb_bal = Constraint( + expr=(0 == -model.sv * model.cb + k1 * model.ca - k2 * model.cb) +) + +model.cc_bal = Constraint(expr=(0 == -model.sv * model.cc + k2 * model.cb)) + +model.cd_bal = Constraint(expr=(0 == -model.sv * model.cd + k3 * model.ca**2.0)) diff --git a/pyomo/contrib/pynumero/examples/external_grey_box/param_est/generate_data.py b/pyomo/contrib/pynumero/examples/external_grey_box/param_est/generate_data.py index ba2e7210c9f..3588ba3853d 100644 --- a/pyomo/contrib/pynumero/examples/external_grey_box/param_est/generate_data.py +++ b/pyomo/contrib/pynumero/examples/external_grey_box/param_est/generate_data.py @@ -3,25 +3,25 @@ import pyomo.contrib.pynumero.examples.external_grey_box.param_est.models as pm import pandas as pd + def generate_data(N, UA_mean, UA_std, seed=42): rnd.seed(seed) m = pyo.ConcreteModel() pm.build_single_point_model_pyomo_only(m) # dummy objective since this is a square problem - m.obj = pyo.Objective(expr=1) + m.obj = pyo.Objective(expr=1) # create the ipopt solver solver = pyo.SolverFactory('ipopt') - data = {'run': [], 'Th_in': [], 'Tc_in': [], 'Th_out':[], - 'Tc_out': []} + data = {'run': [], 'Th_in': [], 'Tc_in': [], 'Th_out': [], 'Tc_out': []} for i in range(N): # draw a random value for the parameters ua = float(rnd.normal(UA_mean, UA_std)) # draw a noisy value for the test input conditions Th_in = 100 + float(rnd.normal(0, 2)) - Tc_in = 30 + float(rnd.normal(0, 2)) + Tc_in = 30 + float(rnd.normal(0, 2)) m.UA.fix(ua) m.Th_in.fix(Th_in) m.Tc_in.fix(Tc_in) @@ -35,6 +35,7 @@ def generate_data(N, UA_mean, UA_std, seed=42): return pd.DataFrame(data) + def generate_data_external(N, UA_mean, UA_std, seed=42): rnd.seed(seed) m = pyo.ConcreteModel() @@ -48,21 +49,19 @@ def generate_data_external(N, UA_mean, UA_std, seed=42): m.Th_in_spec_con = pyo.Constraint(expr=m.egb.inputs['Th_in'] == m.Th_in_spec) m.Tc_in_spec_con = pyo.Constraint(expr=m.egb.inputs['Tc_in'] == m.Tc_in_spec) - # dummy objective since this is a square problem - m.obj = pyo.Objective(expr=(m.egb.inputs['UA'] - m.UA_spec)**2) + m.obj = pyo.Objective(expr=(m.egb.inputs['UA'] - m.UA_spec) ** 2) # create the ipopt solver solver = pyo.SolverFactory('cyipopt') - data = {'run': [], 'Th_in': [], 'Tc_in': [], 'Th_out':[], - 'Tc_out': []} + data = {'run': [], 'Th_in': [], 'Tc_in': [], 'Th_out': [], 'Tc_out': []} for i in range(N): # draw a random value for the parameters UA = float(rnd.normal(UA_mean, UA_std)) # draw a noisy value for the test input conditions Th_in = 100 + float(rnd.normal(0, 2)) - Tc_in = 30 + float(rnd.normal(0, 2)) + Tc_in = 30 + float(rnd.normal(0, 2)) m.UA_spec.value = UA m.Th_in_spec.value = Th_in m.Tc_in_spec.value = Tc_in @@ -73,12 +72,11 @@ def generate_data_external(N, UA_mean, UA_std, seed=42): data['Tc_in'].append(pyo.value(m.egb.inputs['Tc_in'])) data['Th_out'].append(pyo.value(m.egb.inputs['Th_out'])) data['Tc_out'].append(pyo.value(m.egb.inputs['Tc_out'])) - + return pd.DataFrame(data) + if __name__ == '__main__': - #df = generate_data(50, 200, 5) + # df = generate_data(50, 200, 5) df = generate_data_external(50, 200, 5) df.to_csv('data.csv', index=False) - - diff --git a/pyomo/contrib/pynumero/examples/external_grey_box/param_est/models.py b/pyomo/contrib/pynumero/examples/external_grey_box/param_est/models.py index 84b308ce10e..a8b9befb188 100644 --- a/pyomo/contrib/pynumero/examples/external_grey_box/param_est/models.py +++ b/pyomo/contrib/pynumero/examples/external_grey_box/param_est/models.py @@ -1,5 +1,8 @@ import pyomo.environ as pyo -from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxModel, ExternalGreyBoxBlock +from pyomo.contrib.pynumero.interfaces.external_grey_box import ( + ExternalGreyBoxModel, + ExternalGreyBoxBlock, +) import scipy.sparse as spa import numpy as np import math @@ -31,55 +34,74 @@ """ + def build_single_point_model_pyomo_only(m): # fixed parameters - m.Cp_h = 2131 # heat cap hot - m.Cp_c = 4178 # heat cap cold - m.Fh = 0.1 # flow hot - m.Fc = 0.2 # flow cold - + m.Cp_h = 2131 # heat cap hot + m.Cp_c = 4178 # heat cap cold + m.Fh = 0.1 # flow hot + m.Fc = 0.2 # flow cold # model inputs - m.Th_in = pyo.Var(initialize=100, bounds=(10,None)) - m.Th_out = pyo.Var(initialize=50, bounds=(10,None)) - m.Tc_in = pyo.Var(initialize=30, bounds=(10,None)) - m.Tc_out = pyo.Var(initialize=50, bounds=(10,None)) + m.Th_in = pyo.Var(initialize=100, bounds=(10, None)) + m.Th_out = pyo.Var(initialize=50, bounds=(10, None)) + m.Tc_in = pyo.Var(initialize=30, bounds=(10, None)) + m.Tc_out = pyo.Var(initialize=50, bounds=(10, None)) m.UA = pyo.Var(initialize=100) - m.Q = pyo.Var(initialize=10000, bounds=(0,None)) - m.lmtd = pyo.Var(initialize=20, bounds=(0,None)) - m.dt1 = pyo.Var(initialize=20, bounds=(0,None)) - m.dt2 = pyo.Var(initialize=20, bounds=(0,None)) + m.Q = pyo.Var(initialize=10000, bounds=(0, None)) + m.lmtd = pyo.Var(initialize=20, bounds=(0, None)) + m.dt1 = pyo.Var(initialize=20, bounds=(0, None)) + m.dt2 = pyo.Var(initialize=20, bounds=(0, None)) # model constraints - m.dt1_con = pyo.Constraint(expr = m.dt1 == m.Th_in - m.Tc_out) - m.dt2_con = pyo.Constraint(expr = m.dt2 == m.Th_out - m.Tc_in) - m.lmtd_con = pyo.Constraint(expr = m.lmtd * pyo.log(m.dt2/m.dt1) == (m.dt2 - m.dt1)) - - m.ua_con = pyo.Constraint(expr = m.Q == m.UA * m.lmtd) - m.Qh_con = pyo.Constraint(expr = m.Q == m.Fh*m.Cp_h*(m.Th_in - m.Th_out)) - m.Qc_con = pyo.Constraint(expr = m.Q == m.Fc*m.Cp_c*(m.Tc_out - m.Tc_in)) + m.dt1_con = pyo.Constraint(expr=m.dt1 == m.Th_in - m.Tc_out) + m.dt2_con = pyo.Constraint(expr=m.dt2 == m.Th_out - m.Tc_in) + m.lmtd_con = pyo.Constraint(expr=m.lmtd * pyo.log(m.dt2 / m.dt1) == (m.dt2 - m.dt1)) + + m.ua_con = pyo.Constraint(expr=m.Q == m.UA * m.lmtd) + m.Qh_con = pyo.Constraint(expr=m.Q == m.Fh * m.Cp_h * (m.Th_in - m.Th_out)) + m.Qc_con = pyo.Constraint(expr=m.Q == m.Fc * m.Cp_c * (m.Tc_out - m.Tc_in)) + def build_single_point_model_external(m): ex_model = UAModelExternal() m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_model) + class UAModelExternal(ExternalGreyBoxModel): def __init__(self): super(UAModelExternal, self).__init__() - self._input_names = ['Th_in', 'Th_out', 'Tc_in', 'Tc_out', 'UA', 'Q', - 'lmtd', 'dT1', 'dT2'] + self._input_names = [ + 'Th_in', + 'Th_out', + 'Tc_in', + 'Tc_out', + 'UA', + 'Q', + 'lmtd', + 'dT1', + 'dT2', + ] self._input_values = np.zeros(self.n_inputs(), dtype=np.float64) - self._eq_constraint_names = \ - ['dT1_con', 'dT2_con', 'lmtd_con', 'QUA_con', 'Qhot_con', 'Qcold_con'] - self._eq_constraint_multipliers = np.zeros(self.n_equality_constraints(), dtype=np.float64) + self._eq_constraint_names = [ + 'dT1_con', + 'dT2_con', + 'lmtd_con', + 'QUA_con', + 'Qhot_con', + 'Qcold_con', + ] + self._eq_constraint_multipliers = np.zeros( + self.n_equality_constraints(), dtype=np.float64 + ) # parameters self._Cp_h = 2131 self._Cp_c = 4178 self._Fh = 0.1 self._Fc = 0.2 - + def n_inputs(self): return len(self.input_names()) @@ -94,7 +116,7 @@ def input_names(self): def equality_constraint_names(self): return self._eq_constraint_names - + def output_names(self): return [] @@ -110,7 +132,7 @@ def finalize_block_construction(self, pyomo_block): pyomo_block.inputs['Tc_out'].set_value(50) pyomo_block.inputs['UA'].set_value(100) - + pyomo_block.inputs['Q'].setlb(0) pyomo_block.inputs['Q'].set_value(10000) @@ -144,7 +166,7 @@ def evaluate_equality_constraints(self): resid = np.zeros(self.n_equality_constraints()) resid[0] = Th_in - Tc_out - dT1 resid[1] = Th_out - Tc_in - dT2 - resid[2] = lmtd * math.log(dT2/dT1) - (dT2-dT1) + resid[2] = lmtd * math.log(dT2 / dT1) - (dT2 - dT1) resid[3] = UA * lmtd - Q resid[4] = self._Fh * self._Cp_h * (Th_in - Th_out) - Q resid[5] = -self._Fc * self._Cp_c * (Tc_in - Tc_out) - Q @@ -179,11 +201,11 @@ def evaluate_jacobian_equality_constraints(self): idx += 1 row[idx], col[idx], data[idx] = (1, 8, -1.0) idx += 1 - row[idx], col[idx], data[idx] = (2, 6, math.log(dT2/dT1)) + row[idx], col[idx], data[idx] = (2, 6, math.log(dT2 / dT1)) idx += 1 - row[idx], col[idx], data[idx] = (2, 7, -lmtd/dT1 + 1) + row[idx], col[idx], data[idx] = (2, 7, -lmtd / dT1 + 1) idx += 1 - row[idx], col[idx], data[idx] = (2, 8, lmtd/dT2 - 1) + row[idx], col[idx], data[idx] = (2, 8, lmtd / dT2 - 1) idx += 1 row[idx], col[idx], data[idx] = (3, 4, lmtd) idx += 1 @@ -191,21 +213,21 @@ def evaluate_jacobian_equality_constraints(self): idx += 1 row[idx], col[idx], data[idx] = (3, 6, UA) idx += 1 - row[idx], col[idx], data[idx] = (4, 0, self._Fh*self._Cp_h) + row[idx], col[idx], data[idx] = (4, 0, self._Fh * self._Cp_h) idx += 1 - row[idx], col[idx], data[idx] = (4, 1, -self._Fh*self._Cp_h) + row[idx], col[idx], data[idx] = (4, 1, -self._Fh * self._Cp_h) idx += 1 row[idx], col[idx], data[idx] = (4, 5, -1) idx += 1 - row[idx], col[idx], data[idx] = (5, 2, -self._Fc*self._Cp_c) + row[idx], col[idx], data[idx] = (5, 2, -self._Fc * self._Cp_c) idx += 1 - row[idx], col[idx], data[idx] = (5, 3, self._Fc*self._Cp_c) + row[idx], col[idx], data[idx] = (5, 3, self._Fc * self._Cp_c) idx += 1 row[idx], col[idx], data[idx] = (5, 5, -1.0) idx += 1 assert idx == 18 - return spa.coo_matrix( (data, (row, col)), shape=(6,9) ) + return spa.coo_matrix((data, (row, col)), shape=(6, 9)) def evaluate_hessian_equality_constraints(self): Th_in = self._input_values[0] @@ -225,21 +247,21 @@ def evaluate_hessian_equality_constraints(self): idx = 0 # lmtd_con - row[idx], col[idx], data[idx] = (7, 6, lam[2]*(-1)/dT1) + row[idx], col[idx], data[idx] = (7, 6, lam[2] * (-1) / dT1) idx += 1 - row[idx], col[idx], data[idx] = (7, 7, lam[2]*lmtd/(dT1**2)) + row[idx], col[idx], data[idx] = (7, 7, lam[2] * lmtd / (dT1**2)) idx += 1 - row[idx], col[idx], data[idx] = (8, 6, lam[2]*1/dT2) + row[idx], col[idx], data[idx] = (8, 6, lam[2] * 1 / dT2) idx += 1 - row[idx], col[idx], data[idx] = (8, 8, lam[2]*(-lmtd)/(dT2**2)) + row[idx], col[idx], data[idx] = (8, 8, lam[2] * (-lmtd) / (dT2**2)) idx += 1 # QUA_con - row[idx], col[idx], data[idx] = (6, 4, lam[3]*(1)) + row[idx], col[idx], data[idx] = (6, 4, lam[3] * (1)) idx += 1 assert idx == 5 - - return spa.coo_matrix( (data, (row, col)), shape=(9,9) ) + + return spa.coo_matrix((data, (row, col)), shape=(9, 9)) # # Implement the following methods to provide support for diff --git a/pyomo/contrib/pynumero/examples/external_grey_box/param_est/perform_estimation.py b/pyomo/contrib/pynumero/examples/external_grey_box/param_est/perform_estimation.py index 5307611a56b..29ca7145475 100644 --- a/pyomo/contrib/pynumero/examples/external_grey_box/param_est/perform_estimation.py +++ b/pyomo/contrib/pynumero/examples/external_grey_box/param_est/perform_estimation.py @@ -4,6 +4,7 @@ import pandas as pd import pyomo.contrib.pynumero.examples.external_grey_box.param_est.models as po + def perform_estimation_pyomo_only(data_fname, solver_trace=False): # read in our data file - careful with formats df = pd.read_csv(data_fname) @@ -17,12 +18,15 @@ def perform_estimation_pyomo_only(data_fname, solver_trace=False): # create a separate Pyomo block for each data point def _model_i(b, i): po.build_single_point_model_pyomo_only(b) + m.model_i = pyo.Block(m.PTS, rule=_model_i) # we want the parameters to be the same across all the data pts m.UA = pyo.Var() + def _eq_parameter(m, i): return m.UA == m.model_i[i].UA + m.eq_parameter = pyo.Constraint(m.PTS, rule=_eq_parameter) # define the least squares objective function @@ -32,13 +36,14 @@ def _least_squares(m): row = m.df.iloc[i] # error in inputs measured - obj += (m.model_i[i].Th_in - float(row['Th_in']))**2 - obj += (m.model_i[i].Tc_in - float(row['Tc_in']))**2 + obj += (m.model_i[i].Th_in - float(row['Th_in'])) ** 2 + obj += (m.model_i[i].Tc_in - float(row['Tc_in'])) ** 2 # error in outputs - obj += (m.model_i[i].Th_out - float(row['Th_out']))**2 - obj += (m.model_i[i].Tc_out - float(row['Tc_out']))**2 + obj += (m.model_i[i].Th_out - float(row['Th_out'])) ** 2 + obj += (m.model_i[i].Tc_out - float(row['Tc_out'])) ** 2 return obj + m.obj = pyo.Objective(rule=_least_squares) solver = pyo.SolverFactory('ipopt') @@ -46,6 +51,7 @@ def _least_squares(m): return m + def perform_estimation_external(data_fname, solver_trace=False): # read in our data file - careful with formats df = pd.read_csv(data_fname) @@ -59,14 +65,17 @@ def perform_estimation_external(data_fname, solver_trace=False): # create a separate Pyomo block for each data point def _model_i(b, i): po.build_single_point_model_external(b) + m.model_i = pyo.Block(m.PTS, rule=_model_i) # we want the parameters to be the same across all the data pts # create a global parameter and provide equality constraints to # the parameters in each model instance m.UA = pyo.Var() + def _eq_parameter(m, i): return m.UA == m.model_i[i].egb.inputs['UA'] + m.eq_parameter = pyo.Constraint(m.PTS, rule=_eq_parameter) # define the least squares objective function @@ -76,13 +85,14 @@ def _least_squares(m): row = m.df.iloc[i] # error in inputs measured - obj += (m.model_i[i].egb.inputs['Th_in'] - float(row['Th_in']))**2 - obj += (m.model_i[i].egb.inputs['Tc_in'] - float(row['Tc_in']))**2 + obj += (m.model_i[i].egb.inputs['Th_in'] - float(row['Th_in'])) ** 2 + obj += (m.model_i[i].egb.inputs['Tc_in'] - float(row['Tc_in'])) ** 2 # error in outputs - obj += (m.model_i[i].egb.inputs['Th_out'] - float(row['Th_out']))**2 - obj += (m.model_i[i].egb.inputs['Tc_out'] - float(row['Tc_out']))**2 + obj += (m.model_i[i].egb.inputs['Th_out'] - float(row['Th_out'])) ** 2 + obj += (m.model_i[i].egb.inputs['Tc_out'] - float(row['Tc_out'])) ** 2 return obj + m.obj = pyo.Objective(rule=_least_squares) solver = pyo.SolverFactory('cyipopt') @@ -93,13 +103,13 @@ def _least_squares(m): # for example: names = nlp.primals_names() values = nlp.evaluate_grad_objective() - print({names[i]:values[i] for i in range(len(names))}) + print({names[i]: values[i] for i in range(len(names))}) return m + if __name__ == '__main__': m = perform_estimation_pyomo_only(sys.argv[1]) print(pyo.value(m.UA)) m = perform_estimation_external(sys.argv[1]) print(pyo.value(m.UA)) - diff --git a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_outputs.py b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_outputs.py index 2d6bbc9228c..eff4f34cabc 100644 --- a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_outputs.py +++ b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_outputs.py @@ -11,9 +11,11 @@ from __future__ import division import pyomo.environ as pyo -from pyomo.contrib.pynumero.interfaces.external_grey_box import \ - ExternalGreyBoxBlock -from pyomo.contrib.pynumero.examples.external_grey_box.react_example.reactor_model_outputs import ReactorConcentrationsOutputModel +from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxBlock +from pyomo.contrib.pynumero.examples.external_grey_box.react_example.reactor_model_outputs import ( + ReactorConcentrationsOutputModel, +) + def maximize_cb_outputs(show_solver_log=False): # in this simple example, we will use an external grey box model representing @@ -22,15 +24,13 @@ def maximize_cb_outputs(show_solver_log=False): m = pyo.ConcreteModel() # create a block to store the external reactor model - m.reactor = ExternalGreyBoxBlock( - external_model=ReactorConcentrationsOutputModel() - ) + m.reactor = ExternalGreyBoxBlock(external_model=ReactorConcentrationsOutputModel()) # The reaction rate constants and the feed concentration will # be fixed for this example - m.k1con = pyo.Constraint(expr=m.reactor.inputs['k1'] == 5/6) - m.k2con = pyo.Constraint(expr=m.reactor.inputs['k2'] == 5/3) - m.k3con = pyo.Constraint(expr=m.reactor.inputs['k3'] == 1/6000) + m.k1con = pyo.Constraint(expr=m.reactor.inputs['k1'] == 5 / 6) + m.k2con = pyo.Constraint(expr=m.reactor.inputs['k2'] == 5 / 3) + m.k3con = pyo.Constraint(expr=m.reactor.inputs['k3'] == 1 / 6000) m.cafcon = pyo.Constraint(expr=m.reactor.inputs['caf'] == 10000) # add an objective function that maximizes the concentration @@ -43,8 +43,7 @@ def maximize_cb_outputs(show_solver_log=False): pyo.assert_optimal_termination(results) return m + if __name__ == '__main__': m = maximize_cb_outputs(show_solver_log=True) m.pprint() - - diff --git a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_ratio_residuals.py b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_ratio_residuals.py index 7b5e290d555..26d70c7921e 100644 --- a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_ratio_residuals.py +++ b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/maximize_cb_ratio_residuals.py @@ -11,10 +11,18 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxBlock -from pyomo.contrib.pynumero.examples.external_grey_box.react_example.reactor_model_residuals import ReactorModel, ReactorModelWithHessian, \ - ReactorModelNoOutputs, ReactorModelScaled, create_pyomo_reactor_model - -def maximize_cb_ratio_residuals_with_output(show_solver_log=False, additional_options={}): +from pyomo.contrib.pynumero.examples.external_grey_box.react_example.reactor_model_residuals import ( + ReactorModel, + ReactorModelWithHessian, + ReactorModelNoOutputs, + ReactorModelScaled, + create_pyomo_reactor_model, +) + + +def maximize_cb_ratio_residuals_with_output( + show_solver_log=False, additional_options={} +): # in this simple example, we will use an external grey box model representing # a steady-state reactor, and solve for the space velocity that maximizes # the ratio of B to the other components coming out of the reactor @@ -27,20 +35,23 @@ def maximize_cb_ratio_residuals_with_output(show_solver_log=False, additional_op # The feed concentration will be fixed for this example m.cafcon = pyo.Constraint(expr=m.reactor.inputs['caf'] == 10000) - + # add an objective function that maximizes the concentration # of cb coming out of the reactor m.obj = pyo.Objective(expr=m.reactor.outputs['cb_ratio'], sense=pyo.maximize) solver = pyo.SolverFactory('cyipopt') solver.config.options['hessian_approximation'] = 'limited-memory' - for k,v in additional_options.items(): + for k, v in additional_options.items(): solver.config.options[k] = v results = solver.solve(m, tee=show_solver_log) pyo.assert_optimal_termination(results) return m -def maximize_cb_ratio_residuals_with_hessian_with_output(show_solver_log=False, additional_options={}): + +def maximize_cb_ratio_residuals_with_hessian_with_output( + show_solver_log=False, additional_options={} +): # in this simple example, we will use an external grey box model representing # a steady-state reactor, and solve for the space velocity that maximizes # the ratio of B to the other components coming out of the reactor @@ -53,31 +64,37 @@ def maximize_cb_ratio_residuals_with_hessian_with_output(show_solver_log=False, # The feed concentration will be fixed for this example m.cafcon = pyo.Constraint(expr=m.reactor.inputs['caf'] == 10000) - + # add an objective function that maximizes the concentration # of cb coming out of the reactor m.obj = pyo.Objective(expr=m.reactor.outputs['cb_ratio'], sense=pyo.maximize) solver = pyo.SolverFactory('cyipopt') - for k,v in additional_options.items(): + for k, v in additional_options.items(): solver.config.options[k] = v results = solver.solve(m, tee=show_solver_log) pyo.assert_optimal_termination(results) return m -def maximize_cb_ratio_residuals_with_hessian_with_output_pyomo(show_solver_log=False, additional_options={}): + +def maximize_cb_ratio_residuals_with_hessian_with_output_pyomo( + show_solver_log=False, additional_options={} +): # this example is the same as the one above, but solves with a pure # pyomo model - this is mostly for comparison and testing m = create_pyomo_reactor_model() solver = pyo.SolverFactory('ipopt') - for k,v in additional_options.items(): + for k, v in additional_options.items(): solver.options[k] = v solver.options['linear_solver'] = 'mumps' results = solver.solve(m, tee=show_solver_log) pyo.assert_optimal_termination(results) return m -def maximize_cb_ratio_residuals_with_output_scaling(show_solver_log=False, additional_options={}): + +def maximize_cb_ratio_residuals_with_output_scaling( + show_solver_log=False, additional_options={} +): # in this simple example, we will use an external grey box model representing # a steady-state reactor, and solve for the space velocity that maximizes # the ratio of B to the other components coming out of the reactor @@ -111,19 +128,20 @@ def maximize_cb_ratio_residuals_with_output_scaling(show_solver_log=False, addit # set a scaling factor for this constraint - if we had additional pyomo # variables, we could set them the same way m.scaling_factor[m.cafcon] = 42.0 - + # add an objective function that maximizes the concentration # of cb coming out of the reactor m.obj = pyo.Objective(expr=m.reactor.outputs['cb_ratio'], sense=pyo.maximize) solver = pyo.SolverFactory('cyipopt') solver.config.options['hessian_approximation'] = 'limited-memory' - for k,v in additional_options.items(): + for k, v in additional_options.items(): solver.config.options[k] = v results = solver.solve(m, tee=show_solver_log) pyo.assert_optimal_termination(results) return m + def maximize_cb_ratio_residuals_with_obj(show_solver_log=False, additional_options={}): # in this simple example, we will use an external grey box model representing # a steady-state reactor, and solve for the space velocity that maximizes @@ -143,17 +161,22 @@ def maximize_cb_ratio_residuals_with_obj(show_solver_log=False, additional_optio # add an objective function that maximizes the concentration # of cb coming out of the reactor u = m.reactor.inputs - m.obj = pyo.Objective(expr=u['cb']/(u['ca']+u['cc']+u['cd']), sense=pyo.maximize) + m.obj = pyo.Objective( + expr=u['cb'] / (u['ca'] + u['cc'] + u['cd']), sense=pyo.maximize + ) solver = pyo.SolverFactory('cyipopt') solver.config.options['hessian_approximation'] = 'limited-memory' - for k,v in additional_options.items(): + for k, v in additional_options.items(): solver.config.options[k] = v results = solver.solve(m, tee=show_solver_log) pyo.assert_optimal_termination(results) return m -def maximize_cb_ratio_residuals_with_pyomo_variables(show_solver_log=False, additional_options={}): + +def maximize_cb_ratio_residuals_with_pyomo_variables( + show_solver_log=False, additional_options={} +): # in this simple example, we will use an external grey box model representing # a steady-state reactor, and solve for the space velocity that maximizes # the ratio of B to the other components coming out of the reactor @@ -169,8 +192,9 @@ def maximize_cb_ratio_residuals_with_pyomo_variables(show_solver_log=False, addi # add a variable and constraint for the cb ratio m.cb_ratio = pyo.Var(initialize=1) u = m.reactor.inputs - m.cb_ratio_con = pyo.Constraint(expr = \ - u['cb']/(u['ca']+u['cc']+u['cd']) - m.cb_ratio == 0) + m.cb_ratio_con = pyo.Constraint( + expr=u['cb'] / (u['ca'] + u['cc'] + u['cd']) - m.cb_ratio == 0 + ) # The feed concentration will be fixed for this example m.cafcon = pyo.Constraint(expr=m.reactor.inputs['caf'] == 10000) @@ -181,25 +205,30 @@ def maximize_cb_ratio_residuals_with_pyomo_variables(show_solver_log=False, addi solver = pyo.SolverFactory('cyipopt') solver.config.options['hessian_approximation'] = 'limited-memory' - for k,v in additional_options.items(): + for k, v in additional_options.items(): solver.config.options[k] = v results = solver.solve(m, tee=show_solver_log) pyo.assert_optimal_termination(results) return m + if __name__ == '__main__': m = maximize_cb_ratio_residuals_with_output(show_solver_log=True) # the next two are the same model with pyomo/ipopt and external/cyipopt m = maximize_cb_ratio_residuals_with_hessian_with_output_pyomo(show_solver_log=True) m = maximize_cb_ratio_residuals_with_hessian_with_output(show_solver_log=True) - - aoptions={'hessian_approximation':'limited-memory', - #'limited_memory_update_type': 'sr1', - 'nlp_scaling_method': 'user-scaling', - 'print_level':10} - m = maximize_cb_ratio_residuals_with_output_scaling(show_solver_log=True, additional_options=aoptions) + + aoptions = { + 'hessian_approximation': 'limited-memory', + #'limited_memory_update_type': 'sr1', + 'nlp_scaling_method': 'user-scaling', + 'print_level': 10, + } + m = maximize_cb_ratio_residuals_with_output_scaling( + show_solver_log=True, additional_options=aoptions + ) m = maximize_cb_ratio_residuals_with_obj(show_solver_log=True) - + m = maximize_cb_ratio_residuals_with_pyomo_variables(show_solver_log=True) diff --git a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_outputs.py b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_outputs.py index 0bdcdd1d280..7570a20b066 100644 --- a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_outputs.py +++ b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_outputs.py @@ -28,25 +28,28 @@ from scipy.sparse import coo_matrix from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxModel + def reactor_outlet_concentrations(sv, caf, k1, k2, k3): def _model(x, sv, caf, k1, k2, k3): ca, cb, cc, cd = x[0], x[1], x[2], x[3] # compute the residuals r = np.zeros(4) - r[0] = sv*caf + (-sv-k1)*ca - 2*k3*ca**2 - r[1] = k1*ca + (-sv-k2)*cb - r[2] = k2*cb - sv*cc - r[3] = k3*ca**2 - sv*cd + r[0] = sv * caf + (-sv - k1) * ca - 2 * k3 * ca**2 + r[1] = k1 * ca + (-sv - k2) * cb + r[2] = k2 * cb - sv * cc + r[3] = k3 * ca**2 - sv * cd return r - concentrations = \ - fsolve(lambda x: _model(x, sv, caf, k1, k2, k3), np.ones(4), xtol=1e-8) + concentrations = fsolve( + lambda x: _model(x, sv, caf, k1, k2, k3), np.ones(4), xtol=1e-8 + ) # Todo: check solve status return concentrations + class ReactorConcentrationsOutputModel(ExternalGreyBoxModel): def input_names(self): return ['sv', 'caf', 'k1', 'k2', 'k3'] @@ -68,14 +71,14 @@ def finalize_block_construction(self, pyomo_block): # initialize the variables pyomo_block.inputs['sv'].value = 5 pyomo_block.inputs['caf'].value = 10000 - pyomo_block.inputs['k1'].value = 5/6 - pyomo_block.inputs['k2'].value = 5/3 - pyomo_block.inputs['k3'].value = 1/6000 + pyomo_block.inputs['k1'].value = 5 / 6 + pyomo_block.inputs['k2'].value = 5 / 3 + pyomo_block.inputs['k3'].value = 1 / 6000 pyomo_block.outputs['ca'].value = 1 pyomo_block.outputs['cb'].value = 1 pyomo_block.outputs['cc'].value = 1 pyomo_block.outputs['cd'].value = 1 - + def evaluate_outputs(self): sv = self._input_values[0] caf = self._input_values[1] @@ -91,17 +94,17 @@ def evaluate_jacobian_outputs(self): delta = 1e-6 u0 = np.copy(self._input_values) y0 = self.evaluate_outputs() - jac = np.empty((4,5)) + jac = np.empty((4, 5)) u = np.copy(self._input_values) for j in range(len(u)): # perturb the variables u[j] += delta self.set_input_values(u) yperturb = self.evaluate_outputs() - jac_col = (yperturb - y0)/delta - jac[:,j] = jac_col + jac_col = (yperturb - y0) / delta + jac[:, j] = jac_col u[j] = u0[j] - + # return us back to our starting state self.set_input_values(u0) @@ -114,17 +117,16 @@ def evaluate_jacobian_outputs(self): for c in range(5): row.append(r) col.append(c) - data.append(jac[r,c]) - - return coo_matrix((data, (row, col)), shape=(4,5)) + data.append(jac[r, c]) + + return coo_matrix((data, (row, col)), shape=(4, 5)) + if __name__ == '__main__': sv = 1.34 caf = 10000 - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 concentrations = reactor_outlet_concentrations(sv, caf, k1, k2, k3) print(concentrations) - - diff --git a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_residuals.py b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_residuals.py index ef5371ee210..6a6ae9bb652 100644 --- a/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_residuals.py +++ b/pyomo/contrib/pynumero/examples/external_grey_box/react_example/reactor_model_residuals.py @@ -26,6 +26,7 @@ from scipy.sparse import coo_matrix from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxModel + def create_pyomo_reactor_model(): # this function is here to show what the "ReactorModel" would # look like if it was coded directly in Pyomo @@ -33,52 +34,50 @@ def create_pyomo_reactor_model(): m = pyo.ConcreteModel() # set the data (native python data) - k1 = 5.0/6.0 # min^-1 - k2 = 5.0/3.0 # min^-1 - k3 = 1.0/6000.0 # m^3/(gmol min) - #caf = 10000.0 # gmol/m^3 + k1 = 5.0 / 6.0 # min^-1 + k2 = 5.0 / 3.0 # min^-1 + k3 = 1.0 / 6000.0 # m^3/(gmol min) + # caf = 10000.0 # gmol/m^3 # create the variables - m.sv = pyo.Var(initialize=1.0, bounds=(0,None)) + m.sv = pyo.Var(initialize=1.0, bounds=(0, None)) m.caf = pyo.Var(initialize=1.0) - m.ca = pyo.Var(initialize=1.0, bounds=(0,None)) - m.cb = pyo.Var(initialize=1.0, bounds=(0,None)) - m.cc = pyo.Var(initialize=1.0, bounds=(0,None)) - m.cd = pyo.Var(initialize=1.0, bounds=(0,None)) + m.ca = pyo.Var(initialize=1.0, bounds=(0, None)) + m.cb = pyo.Var(initialize=1.0, bounds=(0, None)) + m.cc = pyo.Var(initialize=1.0, bounds=(0, None)) + m.cd = pyo.Var(initialize=1.0, bounds=(0, None)) m.cb_ratio = pyo.Var(initialize=1.0) # create the objective m.obj = pyo.Objective(expr=m.cb_ratio, sense=pyo.maximize) - + # create the constraints - m.ca_bal = pyo.Constraint(expr = (0 == m.sv * m.caf \ - - m.sv * m.ca - k1 * m.ca \ - - 2.0 * k3 * m.ca ** 2.0)) + m.ca_bal = pyo.Constraint( + expr=(0 == m.sv * m.caf - m.sv * m.ca - k1 * m.ca - 2.0 * k3 * m.ca**2.0) + ) - m.cb_bal = pyo.Constraint(expr=(0 == -m.sv * m.cb \ - + k1 * m.ca - k2 * m.cb)) + m.cb_bal = pyo.Constraint(expr=(0 == -m.sv * m.cb + k1 * m.ca - k2 * m.cb)) - m.cc_bal = pyo.Constraint(expr=(0 == -m.sv * m.cc \ - + k2 * m.cb)) + m.cc_bal = pyo.Constraint(expr=(0 == -m.sv * m.cc + k2 * m.cb)) - m.cd_bal = pyo.Constraint(expr=(0 == -m.sv * m.cd \ - + k3 * m.ca ** 2.0)) + m.cd_bal = pyo.Constraint(expr=(0 == -m.sv * m.cd + k3 * m.ca**2.0)) - m.cb_ratio_con = pyo.Constraint(expr=m.cb/(m.ca + m.cc + m.cd) - m.cb_ratio == 0) + m.cb_ratio_con = pyo.Constraint(expr=m.cb / (m.ca + m.cc + m.cd) - m.cb_ratio == 0) m.cafcon = pyo.Constraint(expr=m.caf == 10000) return m + class ReactorModel(ExternalGreyBoxModel): def __init__(self, use_exact_derivatives=True): self._use_exact_derivatives = use_exact_derivatives - + def input_names(self): return ['sv', 'caf', 'ca', 'cb', 'cc', 'cd'] def equality_constraint_names(self): return ['ca_bal', 'cb_bal', 'cc_bal', 'cd_bal'] - + def output_names(self): return ['cb_ratio'] @@ -109,22 +108,22 @@ def evaluate_equality_constraints(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 r = np.zeros(4) - r[0] = sv*caf + (-sv-k1)*ca - 2*k3*ca**2 - r[1] = k1*ca + (-sv-k2)*cb - r[2] = k2*cb - sv*cc - r[3] = k3*ca**2 - sv*cd + r[0] = sv * caf + (-sv - k1) * ca - 2 * k3 * ca**2 + r[1] = k1 * ca + (-sv - k2) * cb + r[2] = k2 * cb - sv * cc + r[3] = k3 * ca**2 - sv * cd return r - + def evaluate_outputs(self): ca = self._input_values[2] cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - cb_ratio = cb/(ca+cc+cd) + cb_ratio = cb / (ca + cc + cd) return np.asarray([cb_ratio], dtype=np.float64) def evaluate_jacobian_equality_constraints(self): @@ -134,41 +133,41 @@ def evaluate_jacobian_equality_constraints(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 if self._use_exact_derivatives: row = np.zeros(12) col = np.zeros(12) data = np.zeros(12) - row[0], col[0], data[0] = (0, 0, caf-ca) + row[0], col[0], data[0] = (0, 0, caf - ca) row[1], col[1], data[1] = (0, 1, sv) - row[2], col[2], data[2] = (0, 2, -sv-k1-4*k3*ca) + row[2], col[2], data[2] = (0, 2, -sv - k1 - 4 * k3 * ca) row[3], col[3], data[3] = (1, 0, -cb) row[4], col[4], data[4] = (1, 2, k1) - row[5], col[5], data[5] = (1, 3, -sv-k2) + row[5], col[5], data[5] = (1, 3, -sv - k2) row[6], col[6], data[6] = (2, 0, -cc) row[7], col[7], data[7] = (2, 3, k2) row[8], col[8], data[8] = (2, 4, -sv) row[9], col[9], data[9] = (3, 0, -cd) - row[10], col[10], data[10] = (3, 2, 2*k3*ca) + row[10], col[10], data[10] = (3, 2, 2 * k3 * ca) row[11], col[11], data[11] = (3, 5, -sv) - ret = coo_matrix((data, (row, col)), shape=(4,6)) + ret = coo_matrix((data, (row, col)), shape=(4, 6)) return ret else: delta = 1e-8 u0 = np.copy(self._input_values) y0 = self.evaluate_equality_constraints() - jac = np.empty((4,6)) + jac = np.empty((4, 6)) u = np.copy(self._input_values) for j in range(len(u)): # perturb the variables u[j] += delta self.set_input_values(u) yperturb = self.evaluate_equality_constraints() - jac_col = (yperturb - y0)/delta - jac[:,j] = jac_col + jac_col = (yperturb - y0) / delta + jac[:, j] = jac_col u[j] = u0[j] # return us back to our starting state @@ -183,8 +182,8 @@ def evaluate_jacobian_equality_constraints(self): for c in range(6): row.append(r) col.append(c) - data.append(jac[r,c]) - ret = coo_matrix((data, (row, col)), shape=(4,6)) + data.append(jac[r, c]) + ret = coo_matrix((data, (row, col)), shape=(4, 6)) return ret def evaluate_jacobian_outputs(self): @@ -192,20 +191,20 @@ def evaluate_jacobian_outputs(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - cb_ratio = cb/(ca+cc+cd) + cb_ratio = cb / (ca + cc + cd) row = np.zeros(4) col = np.zeros(4) data = np.zeros(4) - row[0], col[0], data[0] = (0, 2, -cb/(ca+cc+cd)**2) - row[1], col[1], data[1] = (0, 3, 1/(ca+cc+cd)) - row[2], col[2], data[2] = (0, 4, -cb/(ca+cc+cd)**2) - row[3], col[3], data[3] = (0, 5, -cb/(ca+cc+cd)**2) - return coo_matrix((data, (row, col)), shape=(1,6)) + row[0], col[0], data[0] = (0, 2, -cb / (ca + cc + cd) ** 2) + row[1], col[1], data[1] = (0, 3, 1 / (ca + cc + cd)) + row[2], col[2], data[2] = (0, 4, -cb / (ca + cc + cd) ** 2) + row[3], col[3], data[3] = (0, 5, -cb / (ca + cc + cd) ** 2) + return coo_matrix((data, (row, col)), shape=(1, 6)) class ReactorModelWithHessian(ReactorModel): def __init__(self): - super(ReactorModelWithHessian,self).__init__(True) + super(ReactorModelWithHessian, self).__init__(True) self._eq_con_mult_values = np.zeros(4) self._output_con_mult_values = np.zeros(1) @@ -224,36 +223,36 @@ def evaluate_hessian_equality_constraints(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 lam = self._eq_con_mult_values - + nnz = 7 irow = np.zeros(nnz, dtype=np.int64) jcol = np.zeros(nnz, dtype=np.int64) data = np.zeros(nnz, dtype=np.float64) idx = 0 - irow[idx], jcol[idx], data[idx] = (1, 0, lam[0]*1.0) + irow[idx], jcol[idx], data[idx] = (1, 0, lam[0] * 1.0) idx += 1 - irow[idx], jcol[idx], data[idx] = (2, 0, lam[0]*(-1.0)) + irow[idx], jcol[idx], data[idx] = (2, 0, lam[0] * (-1.0)) idx += 1 - irow[idx], jcol[idx], data[idx] = (2, 2, lam[0]*(-4*k3)) + irow[idx], jcol[idx], data[idx] = (2, 2, lam[0] * (-4 * k3)) idx += 1 - irow[idx], jcol[idx], data[idx] = (3, 0, lam[1]*(-1)) + irow[idx], jcol[idx], data[idx] = (3, 0, lam[1] * (-1)) idx += 1 - irow[idx], jcol[idx], data[idx] = (4, 0, lam[2]*(-1)) + irow[idx], jcol[idx], data[idx] = (4, 0, lam[2] * (-1)) idx += 1 - irow[idx], jcol[idx], data[idx] = (2, 2, lam[3]*(2*k3)) + irow[idx], jcol[idx], data[idx] = (2, 2, lam[3] * (2 * k3)) idx += 1 - irow[idx], jcol[idx], data[idx] = (5, 0, lam[3]*(-1)) + irow[idx], jcol[idx], data[idx] = (5, 0, lam[3] * (-1)) idx += 1 assert idx == nnz - hess = coo_matrix( (data, (irow,jcol)), shape=(6,6) ) + hess = coo_matrix((data, (irow, jcol)), shape=(6, 6)) return hess def evaluate_hessian_outputs(self): @@ -263,43 +262,43 @@ def evaluate_hessian_outputs(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 lam = self._output_con_mult_values - + nnz = 9 irow = np.zeros(nnz, dtype=np.int64) jcol = np.zeros(nnz, dtype=np.int64) data = np.zeros(nnz, dtype=np.float64) - h1 = 2*cb/((ca+cc+cd)**3) - h2 = -1.0/((ca+cc+cd)**2) - + h1 = 2 * cb / ((ca + cc + cd) ** 3) + h2 = -1.0 / ((ca + cc + cd) ** 2) + idx = 0 - irow[idx], jcol[idx], data[idx] = (2, 2, lam[0]*h1) + irow[idx], jcol[idx], data[idx] = (2, 2, lam[0] * h1) idx += 1 - irow[idx], jcol[idx], data[idx] = (3, 2, lam[0]*h2) + irow[idx], jcol[idx], data[idx] = (3, 2, lam[0] * h2) idx += 1 - irow[idx], jcol[idx], data[idx] = (4, 2, lam[0]*h1) + irow[idx], jcol[idx], data[idx] = (4, 2, lam[0] * h1) idx += 1 - irow[idx], jcol[idx], data[idx] = (4, 3, lam[0]*h2) + irow[idx], jcol[idx], data[idx] = (4, 3, lam[0] * h2) idx += 1 - irow[idx], jcol[idx], data[idx] = (4, 4, lam[0]*h1) + irow[idx], jcol[idx], data[idx] = (4, 4, lam[0] * h1) idx += 1 - irow[idx], jcol[idx], data[idx] = (5, 2, lam[0]*h1) + irow[idx], jcol[idx], data[idx] = (5, 2, lam[0] * h1) idx += 1 - irow[idx], jcol[idx], data[idx] = (5, 3, lam[0]*h2) + irow[idx], jcol[idx], data[idx] = (5, 3, lam[0] * h2) idx += 1 - irow[idx], jcol[idx], data[idx] = (5, 4, lam[0]*h1) + irow[idx], jcol[idx], data[idx] = (5, 4, lam[0] * h1) idx += 1 - irow[idx], jcol[idx], data[idx] = (5, 5, lam[0]*h1) + irow[idx], jcol[idx], data[idx] = (5, 5, lam[0] * h1) idx += 1 assert idx == nnz - hess = coo_matrix( (data, (irow,jcol)), shape=(6,6) ) + hess = coo_matrix((data, (irow, jcol)), shape=(6, 6)) return hess - + class ReactorModelNoOutputs(ExternalGreyBoxModel): def input_names(self): @@ -307,7 +306,7 @@ def input_names(self): def equality_constraint_names(self): return ['ca_bal', 'cb_bal', 'cc_bal', 'cd_bal'] - + def output_names(self): return [] @@ -337,16 +336,16 @@ def evaluate_equality_constraints(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 r = np.zeros(4) - r[0] = sv*caf + (-sv-k1)*ca - 2*k3*ca**2 - r[1] = k1*ca + (-sv-k2)*cb - r[2] = k2*cb - sv*cc - r[3] = k3*ca**2 - sv*cd + r[0] = sv * caf + (-sv - k1) * ca - 2 * k3 * ca**2 + r[1] = k1 * ca + (-sv - k2) * cb + r[2] = k2 * cb - sv * cc + r[3] = k3 * ca**2 - sv * cd return r - + def evaluate_outputs(self): raise NotImplementedError() @@ -357,38 +356,39 @@ def evaluate_jacobian_equality_constraints(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 row = np.zeros(12) col = np.zeros(12) data = np.zeros(12) - row[0], col[0], data[0] = (0, 0, caf-ca) + row[0], col[0], data[0] = (0, 0, caf - ca) row[1], col[1], data[1] = (0, 1, sv) - row[2], col[2], data[2] = (0, 2, -sv-k1-4*k3*ca) + row[2], col[2], data[2] = (0, 2, -sv - k1 - 4 * k3 * ca) row[3], col[3], data[3] = (1, 0, -cb) row[4], col[4], data[4] = (1, 2, k1) - row[5], col[5], data[5] = (1, 3, -sv-k2) + row[5], col[5], data[5] = (1, 3, -sv - k2) row[6], col[6], data[6] = (2, 0, -cc) row[7], col[7], data[7] = (2, 3, k2) row[8], col[8], data[8] = (2, 4, -sv) row[9], col[9], data[9] = (3, 0, -cd) - row[10], col[10], data[10] = (3, 2, 2*k3*ca) + row[10], col[10], data[10] = (3, 2, 2 * k3 * ca) row[11], col[11], data[11] = (3, 5, -sv) - ret = coo_matrix((data, (row, col)), shape=(4,6)) + ret = coo_matrix((data, (row, col)), shape=(4, 6)) return ret def evaluate_jacobian_outputs(self): raise NotImplementedError() - + + class ReactorModelScaled(ExternalGreyBoxModel): def input_names(self): return ['sv', 'caf', 'ca', 'cb', 'cc', 'cd'] def equality_constraint_names(self): return ['ca_bal', 'cb_bal', 'cc_bal', 'cd_bal'] - + def output_names(self): return ['cb_ratio'] @@ -438,22 +438,22 @@ def evaluate_equality_constraints(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 r = np.zeros(4) - r[0] = sv*caf + (-sv-k1)*ca - 2*k3*ca**2 - r[1] = k1*ca + (-sv-k2)*cb - r[2] = k2*cb - sv*cc - r[3] = k3*ca**2 - sv*cd + r[0] = sv * caf + (-sv - k1) * ca - 2 * k3 * ca**2 + r[1] = k1 * ca + (-sv - k2) * cb + r[2] = k2 * cb - sv * cc + r[3] = k3 * ca**2 - sv * cd return r - + def evaluate_outputs(self): ca = self._input_values[2] cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - cb_ratio = cb/(ca+cc+cd) + cb_ratio = cb / (ca + cc + cd) return np.asarray([cb_ratio], dtype=np.float64) def evaluate_jacobian_equality_constraints(self): @@ -463,26 +463,26 @@ def evaluate_jacobian_equality_constraints(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - k1 = 5/6 - k2 = 5/3 - k3 = 1/6000 + k1 = 5 / 6 + k2 = 5 / 3 + k3 = 1 / 6000 row = np.zeros(12) col = np.zeros(12) data = np.zeros(12) - row[0], col[0], data[0] = (0, 0, caf-ca) + row[0], col[0], data[0] = (0, 0, caf - ca) row[1], col[1], data[1] = (0, 1, sv) - row[2], col[2], data[2] = (0, 2, -sv-k1-4*k3*ca) + row[2], col[2], data[2] = (0, 2, -sv - k1 - 4 * k3 * ca) row[3], col[3], data[3] = (1, 0, -cb) row[4], col[4], data[4] = (1, 2, k1) - row[5], col[5], data[5] = (1, 3, -sv-k2) + row[5], col[5], data[5] = (1, 3, -sv - k2) row[6], col[6], data[6] = (2, 0, -cc) row[7], col[7], data[7] = (2, 3, k2) row[8], col[8], data[8] = (2, 4, -sv) row[9], col[9], data[9] = (3, 0, -cd) - row[10], col[10], data[10] = (3, 2, 2*k3*ca) + row[10], col[10], data[10] = (3, 2, 2 * k3 * ca) row[11], col[11], data[11] = (3, 5, -sv) - ret = coo_matrix((data, (row, col)), shape=(4,6)) + ret = coo_matrix((data, (row, col)), shape=(4, 6)) return ret def evaluate_jacobian_outputs(self): @@ -490,12 +490,12 @@ def evaluate_jacobian_outputs(self): cb = self._input_values[3] cc = self._input_values[4] cd = self._input_values[5] - cb_ratio = cb/(ca+cc+cd) + cb_ratio = cb / (ca + cc + cd) row = np.zeros(4) col = np.zeros(4) data = np.zeros(4) - row[0], col[0], data[0] = (0, 2, -cb/(ca+cc+cd)**2) - row[1], col[1], data[1] = (0, 3, 1/(ca+cc+cd)) - row[2], col[2], data[2] = (0, 4, -cb/(ca+cc+cd)**2) - row[3], col[3], data[3] = (0, 5, -cb/(ca+cc+cd)**2) - return coo_matrix((data, (row, col)), shape=(1,6)) + row[0], col[0], data[0] = (0, 2, -cb / (ca + cc + cd) ** 2) + row[1], col[1], data[1] = (0, 3, 1 / (ca + cc + cd)) + row[2], col[2], data[2] = (0, 4, -cb / (ca + cc + cd) ** 2) + row[3], col[3], data[3] = (0, 5, -cb / (ca + cc + cd) ** 2) + return coo_matrix((data, (row, col)), shape=(1, 6)) diff --git a/pyomo/contrib/pynumero/examples/feasibility.py b/pyomo/contrib/pynumero/examples/feasibility.py index 3cef730ef7e..94baabb7bec 100644 --- a/pyomo/contrib/pynumero/examples/feasibility.py +++ b/pyomo/contrib/pynumero/examples/feasibility.py @@ -10,15 +10,16 @@ # ___________________________________________________________________________ from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.contrib.pynumero.interfaces.utils import (build_bounds_mask, - build_compression_matrix, - full_to_compressed) +from pyomo.contrib.pynumero.interfaces.utils import ( + build_bounds_mask, + build_compression_matrix, + full_to_compressed, +) import pyomo.environ as pyo import numpy as np def create_basic_model(): - m = pyo.ConcreteModel() m.x = pyo.Var([1, 2, 3], domain=pyo.Reals) for i in range(1, 4): @@ -31,7 +32,7 @@ def create_basic_model(): m.x[2].setlb(0.0) m.x[3].setlb(0.0) m.x[2].setub(100.0) - m.obj = pyo.Objective(expr=m.x[2]**2) + m.obj = pyo.Objective(expr=m.x[2] ** 2) return m @@ -95,14 +96,18 @@ def main(): # lower and upper inequalities residual res_ineq_lb = Cineq_ineqlb * res_ineq - compressed_ineq_lb - res_ineq_ub = compressed_ineq_ub - Cineq_inequb*res_ineq + res_ineq_ub = compressed_ineq_ub - Cineq_inequb * res_ineq print("Residuals of inequality constraints lower bounds:", res_ineq_lb) print("Residuals of inequality constraints upper bounds:", res_ineq_ub) feasible = False - if np.all(res_xl >= 0) and np.all(res_xu >= 0) \ - and np.all(res_ineq_lb >= 0) and np.all(res_ineq_ub >= 0) and \ - np.allclose(res_eq, np.zeros(nlp.n_eq_constraints()), atol=1e-5): + if ( + np.all(res_xl >= 0) + and np.all(res_xu >= 0) + and np.all(res_ineq_lb >= 0) + and np.all(res_ineq_ub >= 0) + and np.allclose(res_eq, np.zeros(nlp.n_eq_constraints()), atol=1e-5) + ): feasible = True print("Is x0 feasible:", feasible) diff --git a/pyomo/contrib/pynumero/examples/mumps_example.py b/pyomo/contrib/pynumero/examples/mumps_example.py index 93aaa1d20e9..938fab99279 100644 --- a/pyomo/contrib/pynumero/examples/mumps_example.py +++ b/pyomo/contrib/pynumero/examples/mumps_example.py @@ -1,13 +1,17 @@ import numpy as np import scipy.sparse as sp from scipy.linalg import hilbert -from pyomo.contrib.pynumero.linalg.mumps_interface import MumpsCentralizedAssembledLinearSolver +from pyomo.contrib.pynumero.linalg.mumps_interface import ( + MumpsCentralizedAssembledLinearSolver, +) def main(): # create the matrix and the right hand sides N = 1000 - A = sp.coo_matrix(hilbert(N) + np.identity(N)) # a well-condition, symmetric, positive-definite matrix with off-diagonal entries + A = sp.coo_matrix( + hilbert(N) + np.identity(N) + ) # a well-condition, symmetric, positive-definite matrix with off-diagonal entries true_x1 = np.arange(N) true_x2 = np.array(list(reversed(np.arange(N)))) b1 = A * true_x1 @@ -44,14 +48,20 @@ def main(): assert np.allclose(x1, true_x1) # Set options - solver = MumpsCentralizedAssembledLinearSolver(icntl_options={11: 2}) # compute error stats - solver.set_cntl(2, 1e-4) # set the stopping criteria for iterative refinement - solver.set_icntl(10, 5) # set the maximum number of iterations for iterative refinement to 5 + solver = MumpsCentralizedAssembledLinearSolver( + icntl_options={11: 2} + ) # compute error stats + solver.set_cntl(2, 1e-4) # set the stopping criteria for iterative refinement + solver.set_icntl( + 10, 5 + ) # set the maximum number of iterations for iterative refinement to 5 x1, res = solver.solve(A, b1) assert np.allclose(x1, true_x1) # Get information after the solve - print('Number of iterations of iterative refinement performed: ', solver.get_infog(15)) + print( + 'Number of iterations of iterative refinement performed: ', solver.get_infog(15) + ) print('scaled residual: ', solver.get_rinfog(6)) diff --git a/pyomo/contrib/pynumero/examples/nlp_interface_2.py b/pyomo/contrib/pynumero/examples/nlp_interface_2.py index c3cf043be2e..ecd63d28c49 100644 --- a/pyomo/contrib/pynumero/examples/nlp_interface_2.py +++ b/pyomo/contrib/pynumero/examples/nlp_interface_2.py @@ -27,7 +27,7 @@ def create_problem(begin, end): def _x1dot(M, i): if i == M.t.first(): return pyo.Constraint.Skip - return M.xdot[1, i] == (1-M.x[2, i] ** 2) * M.x[1, i] - M.x[2, i] + M.u[i] + return M.xdot[1, i] == (1 - M.x[2, i] ** 2) * M.x[1, i] - M.x[2, i] + M.u[i] m.x1dotcon = pyo.Constraint(m.t, rule=_x1dot) @@ -65,10 +65,9 @@ def main(show_plot=True): # Discretize model using Orthogonal Collocation discretizer = pyo.TransformationFactory('dae.collocation') discretizer.apply_to(instance, nfe=100, ncp=3, scheme='LAGRANGE-RADAU') - discretizer.reduce_collocation_points(instance, - var=instance.u, - ncp=1, - contset=instance.t) + discretizer.reduce_collocation_points( + instance, var=instance.u, ncp=1, contset=instance.t + ) # Interface pyomo model with nlp nlp = PyomoNLP(instance) @@ -106,4 +105,4 @@ def main(show_plot=True): if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/pyomo/contrib/pynumero/examples/parallel_matvec.py b/pyomo/contrib/pynumero/examples/parallel_matvec.py index c64886dceb9..26a2ec9a632 100644 --- a/pyomo/contrib/pynumero/examples/parallel_matvec.py +++ b/pyomo/contrib/pynumero/examples/parallel_matvec.py @@ -1,37 +1,35 @@ import numpy as np -from mpi4py import MPI +from pyomo.common.dependencies import mpi4py from pyomo.contrib.pynumero.sparse.mpi_block_vector import MPIBlockVector from pyomo.contrib.pynumero.sparse.mpi_block_matrix import MPIBlockMatrix from scipy.sparse import random def main(): - comm = MPI.COMM_WORLD + comm = mpi4py.MPI.COMM_WORLD rank = comm.Get_rank() - + owners = [0, 1, 2, -1] x = MPIBlockVector(4, rank_owner=owners, mpi_comm=comm) - - owners = np.array([[ 0, -1, -1, 0], - [-1, 1, -1, 1], - [-1, -1, 2, 2]]) + + owners = np.array([[0, -1, -1, 0], [-1, 1, -1, 1], [-1, -1, 2, 2]]) a = MPIBlockMatrix(3, 4, rank_ownership=owners, mpi_comm=comm) - + np.random.seed(0) x.set_block(3, np.random.uniform(-10, 10, size=10)) - + np.random.seed(rank) x.set_block(rank, np.random.uniform(-10, 10, size=10)) a.set_block(rank, rank, random(10, 10, density=0.1)) a.set_block(rank, 3, random(10, 10, density=0.1)) - + b = a * x # parallel matrix-vector dot product - + # check the answer local_x = x.make_local_copy().flatten() local_a = a.to_local_array() local_b = b.make_local_copy().flatten() - + err = np.abs(local_a.dot(local_x) - local_b).max() if rank == 0: diff --git a/pyomo/contrib/pynumero/examples/parallel_vector_ops.py b/pyomo/contrib/pynumero/examples/parallel_vector_ops.py index f9daa35ec4e..4b155ce7493 100644 --- a/pyomo/contrib/pynumero/examples/parallel_vector_ops.py +++ b/pyomo/contrib/pynumero/examples/parallel_vector_ops.py @@ -1,25 +1,25 @@ import numpy as np -from mpi4py import MPI +from pyomo.common.dependencies import mpi4py from pyomo.contrib.pynumero.sparse.mpi_block_vector import MPIBlockVector def main(): - comm = MPI.COMM_WORLD + comm = mpi4py.MPI.COMM_WORLD rank = comm.Get_rank() - + owners = [2, 0, 1, -1] x = MPIBlockVector(4, rank_owner=owners, mpi_comm=comm) - x.set_block(owners.index(rank), np.ones(3)*(rank + 1)) + x.set_block(owners.index(rank), np.ones(3) * (rank + 1)) x.set_block(3, np.array([1, 2, 3])) - + y = MPIBlockVector(4, rank_owner=owners, mpi_comm=comm) - y.set_block(owners.index(rank), np.ones(3)*(rank + 1)) + y.set_block(owners.index(rank), np.ones(3) * (rank + 1)) y.set_block(3, np.array([1, 2, 3])) - + z1: MPIBlockVector = x + y # add x and y z2 = x.dot(y) # dot product z3 = np.abs(x).max() # infinity norm - + z1_local = z1.make_local_copy() if rank == 0: print(z1_local.flatten()) diff --git a/pyomo/contrib/pynumero/examples/sensitivity.py b/pyomo/contrib/pynumero/examples/sensitivity.py index f1775cea4b1..a3927d637b3 100644 --- a/pyomo/contrib/pynumero/examples/sensitivity.py +++ b/pyomo/contrib/pynumero/examples/sensitivity.py @@ -31,14 +31,19 @@ def create_model(eta1, eta2): model.nominal_eta2 = pyo.Param(initialize=eta2, mutable=True) # constraints + objective - model.const1 = pyo.Constraint(expr=6*model.x1+3*model.x2+2*model.x3 - model.eta1 == 0) - model.const2 = pyo.Constraint(expr=model.eta2*model.x1+model.x2-model.x3-1 == 0) + model.const1 = pyo.Constraint( + expr=6 * model.x1 + 3 * model.x2 + 2 * model.x3 - model.eta1 == 0 + ) + model.const2 = pyo.Constraint( + expr=model.eta2 * model.x1 + model.x2 - model.x3 - 1 == 0 + ) model.cost = pyo.Objective(expr=model.x1**2 + model.x2**2 + model.x3**2) model.consteta1 = pyo.Constraint(expr=model.eta1 == model.nominal_eta1) model.consteta2 = pyo.Constraint(expr=model.eta2 == model.nominal_eta2) return model + def compute_init_lam(nlp, x=None, lam_max=1e3): if x is None: x = nlp.init_primals() @@ -46,7 +51,9 @@ def compute_init_lam(nlp, x=None, lam_max=1e3): assert x.size == nlp.n_primals() nlp.set_primals(x) - assert nlp.n_ineq_constraints() == 0, "only supported for equality constrained nlps for now" + assert ( + nlp.n_ineq_constraints() == 0 + ), "only supported for equality constrained nlps for now" nx = nlp.n_primals() nc = nlp.n_constraints() @@ -58,7 +65,7 @@ def compute_init_lam(nlp, x=None, lam_max=1e3): df = nlp.evaluate_grad_objective() # create KKT system - kkt = BlockMatrix(2,2) + kkt = BlockMatrix(2, 2) kkt.set_block(0, 0, identity(nx)) kkt.set_block(1, 0, jac) kkt.set_block(0, 1, jac.transpose()) @@ -86,17 +93,34 @@ def main(): nlp.set_primals(x) nlp.set_duals(y) - J = nlp.extract_submatrix_jacobian(pyomo_variables=[m.x1, m.x2, m.x3], pyomo_constraints=[m.const1, m.const2]) - H = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=[m.x1, m.x2, m.x3], pyomo_variables_cols=[m.x1, m.x2, m.x3]) + J = nlp.extract_submatrix_jacobian( + pyomo_variables=[m.x1, m.x2, m.x3], pyomo_constraints=[m.const1, m.const2] + ) + H = nlp.extract_submatrix_hessian_lag( + pyomo_variables_rows=[m.x1, m.x2, m.x3], pyomo_variables_cols=[m.x1, m.x2, m.x3] + ) - M = BlockMatrix(2,2) + M = BlockMatrix(2, 2) M.set_block(0, 0, H) M.set_block(1, 0, J) M.set_block(0, 1, J.transpose()) Np = BlockMatrix(2, 1) - Np.set_block(0, 0, nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=[m.x1, m.x2, m.x3], pyomo_variables_cols=[m.eta1, m.eta2])) - Np.set_block(1, 0, nlp.extract_submatrix_jacobian(pyomo_variables=[m.eta1, m.eta2], pyomo_constraints=[m.const1, m.const2])) + Np.set_block( + 0, + 0, + nlp.extract_submatrix_hessian_lag( + pyomo_variables_rows=[m.x1, m.x2, m.x3], + pyomo_variables_cols=[m.eta1, m.eta2], + ), + ) + Np.set_block( + 1, + 0, + nlp.extract_submatrix_jacobian( + pyomo_variables=[m.eta1, m.eta2], pyomo_constraints=[m.const1, m.const2] + ), + ) ds = spsolve(M.tocsc(), -Np.tocsc()) diff --git a/pyomo/contrib/pynumero/examples/sqp.py b/pyomo/contrib/pynumero/examples/sqp.py index ead348f3e07..7d321676817 100644 --- a/pyomo/contrib/pynumero/examples/sqp.py +++ b/pyomo/contrib/pynumero/examples/sqp.py @@ -98,7 +98,10 @@ def _pde(m, x, t): else: # print(foo.last_t, t-dt, abs(foo.last_t - (t-dt))) # assert math.isclose(foo.last_t, t - dt, abs_tol=1e-6) - e = m.dydt[x, t] - m.v * m.dydx2[x, t] + m.dydx[x, t] * m.y[x, t] == m.r + m.u[x, t] + e = ( + m.dydt[x, t] - m.v * m.dydx2[x, t] + m.dydx[x, t] * m.y[x, t] + == m.r + m.u[x, t] + ) return e m.pde = pe.Constraint(m.x, m.t, rule=_pde) @@ -133,10 +136,11 @@ def _obj(m): return m -def sqp(nlp: NLP, linear_solver: LinearSolverInterface, - max_iter=100, tol=1e-8, output=True): +def sqp( + nlp: NLP, linear_solver: LinearSolverInterface, max_iter=100, tol=1e-8, output=True +): """ - An example of a simple SQP algoritm for + An example of a simple SQP algorithm for equality-constrained NLPs. Parameters @@ -149,7 +153,7 @@ def sqp(nlp: NLP, linear_solver: LinearSolverInterface, The convergence tolerance """ t0 = time.time() - + # setup KKT matrix kkt = BlockMatrix(2, 2) rhs = BlockVector(2) @@ -160,22 +164,27 @@ def sqp(nlp: NLP, linear_solver: LinearSolverInterface, z.set_block(1, nlp.get_duals()) if output: - print(f"{'Iter':<12}{'Objective':<12}{'Primal Infeasibility':<25}{'Dual Infeasibility':<25}{'Elapsed Time':<15}") + print( + f"{'Iter':<12}{'Objective':<12}{'Primal Infeasibility':<25}{'Dual Infeasibility':<25}{'Elapsed Time':<15}" + ) # main iteration loop for _iter in range(max_iter): nlp.set_primals(z.get_block(0)) nlp.set_duals(z.get_block(1)) - grad_lag = (nlp.evaluate_grad_objective() + - nlp.evaluate_jacobian_eq().transpose() * z.get_block(1)) + grad_lag = ( + nlp.evaluate_grad_objective() + + nlp.evaluate_jacobian_eq().transpose() * z.get_block(1) + ) residuals = nlp.evaluate_eq_constraints() if output: - print(f"{_iter:<12}{nlp.evaluate_objective():<12.2e}{np.abs(residuals).max():<25.2e}{np.abs(grad_lag).max():<25.2e}{time.time()-t0:<15.2e}") + print( + f"{_iter:<12}{nlp.evaluate_objective():<12.2e}{np.abs(residuals).max():<25.2e}{np.abs(grad_lag).max():<25.2e}{time.time()-t0:<15.2e}" + ) - if (np.abs(grad_lag).max() <= tol and - np.abs(residuals).max() <= tol): + if np.abs(grad_lag).max() <= tol and np.abs(residuals).max() <= tol: break kkt.set_block(0, 0, nlp.evaluate_hessian_lag()) @@ -189,7 +198,7 @@ def sqp(nlp: NLP, linear_solver: LinearSolverInterface, assert res.status == LinearSolverStatus.successful z += delta - + def load_solution(m: pe.ConcreteModel(), nlp: PyomoNLP): primals = nlp.get_primals() pyomo_vars = nlp.get_pyomo_variables() @@ -208,7 +217,7 @@ def main(linear_solver, nfe_x=100, nfe_t=200): if __name__ == '__main__': # create the linear solver linear_solver = MA27() - linear_solver.set_cntl(1, 1e-6) # pivot tolerance + linear_solver.set_cntl(1, 1e-6) # pivot tolerance optimal_obj = main(linear_solver) print(f'Optimal Objective: {optimal_obj}') diff --git a/pyomo/contrib/pynumero/examples/tests/test_cyipopt_examples.py b/pyomo/contrib/pynumero/examples/tests/test_cyipopt_examples.py index 22fa020bf50..167b0601f7a 100644 --- a/pyomo/contrib/pynumero/examples/tests/test_cyipopt_examples.py +++ b/pyomo/contrib/pynumero/examples/tests/test_cyipopt_examples.py @@ -20,7 +20,10 @@ import logging from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -32,21 +35,23 @@ 'One of the tests below requires a recent version of pandas for' ' comparing with a tolerance.', minimum_version='1.1.0', - defer_check=False) + defer_check=False, +) from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run CyIpopt tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run CyIpopt tests") import pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver as cyipopt_solver + if not cyipopt_solver.cyipopt_available: - raise unittest.SkipTest( - "PyNumero needs CyIpopt installed to run CyIpopt tests") + raise unittest.SkipTest("PyNumero needs CyIpopt installed to run CyIpopt tests") import cyipopt as cyipopt_core example_dir = os.path.join(this_file_dir(), '..') + class TestPyomoCyIpoptSolver(unittest.TestCase): def test_status_maps(self): # verify that all status messages from cyipopy can be cleanly @@ -59,27 +64,51 @@ def test_status_maps(self): class TestExamples(unittest.TestCase): def test_external_grey_box_react_example_maximize_cb_outputs(self): - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'react_example', 'maximize_cb_outputs.py')) + ex = import_file( + os.path.join( + example_dir, + 'external_grey_box', + 'react_example', + 'maximize_cb_outputs.py', + ) + ) m = ex.maximize_cb_outputs() self.assertAlmostEqual(pyo.value(m.reactor.inputs['sv']), 1.34381, places=3) self.assertAlmostEqual(pyo.value(m.reactor.outputs['cb']), 1072.4372, places=2) def test_external_grey_box_react_example_maximize_cb_outputs_scaling(self): - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'react_example', 'maximize_cb_ratio_residuals.py')) - aoptions={'nlp_scaling_method': 'user-scaling', - 'output_file': '_cyipopt-external-greybox-react-scaling.log', - 'file_print_level':10} - m = ex.maximize_cb_ratio_residuals_with_output_scaling(additional_options=aoptions) + ex = import_file( + os.path.join( + example_dir, + 'external_grey_box', + 'react_example', + 'maximize_cb_ratio_residuals.py', + ) + ) + aoptions = { + 'nlp_scaling_method': 'user-scaling', + 'output_file': '_cyipopt-external-greybox-react-scaling.log', + 'file_print_level': 10, + } + m = ex.maximize_cb_ratio_residuals_with_output_scaling( + additional_options=aoptions + ) self.assertAlmostEqual(pyo.value(m.reactor.inputs['sv']), 1.26541996, places=3) - self.assertAlmostEqual(pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2) - self.assertAlmostEqual(pyo.value(m.reactor.outputs['cb_ratio']), 0.15190409266, places=3) + self.assertAlmostEqual( + pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2 + ) + self.assertAlmostEqual( + pyo.value(m.reactor.outputs['cb_ratio']), 0.15190409266, places=3 + ) with open('_cyipopt-external-greybox-react-scaling.log', 'r') as fd: solver_trace = fd.read() os.remove('_cyipopt-external-greybox-react-scaling.log') self.assertIn('nlp_scaling_method = user-scaling', solver_trace) - self.assertIn('output_file = _cyipopt-external-greybox-react-scaling.log', solver_trace) + self.assertIn( + 'output_file = _cyipopt-external-greybox-react-scaling.log', solver_trace + ) self.assertIn('objective scaling factor = 1', solver_trace) self.assertIn('x scaling provided', solver_trace) self.assertIn('c scaling provided', solver_trace) @@ -101,54 +130,115 @@ def test_external_grey_box_react_example_maximize_cb_outputs_scaling(self): self.assertIn('c scaling vector[ 6]= 1.0000000000000000e+01', solver_trace) def test_external_grey_box_react_example_maximize_with_output(self): - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'react_example', 'maximize_cb_ratio_residuals.py')) + ex = import_file( + os.path.join( + example_dir, + 'external_grey_box', + 'react_example', + 'maximize_cb_ratio_residuals.py', + ) + ) m = ex.maximize_cb_ratio_residuals_with_output() self.assertAlmostEqual(pyo.value(m.reactor.inputs['sv']), 1.26541996, places=3) - self.assertAlmostEqual(pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2) - self.assertAlmostEqual(pyo.value(m.reactor.outputs['cb_ratio']), 0.15190409266, places=3) + self.assertAlmostEqual( + pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2 + ) + self.assertAlmostEqual( + pyo.value(m.reactor.outputs['cb_ratio']), 0.15190409266, places=3 + ) def test_external_grey_box_react_example_maximize_with_hessian_with_output(self): - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'react_example', 'maximize_cb_ratio_residuals.py')) + ex = import_file( + os.path.join( + example_dir, + 'external_grey_box', + 'react_example', + 'maximize_cb_ratio_residuals.py', + ) + ) m = ex.maximize_cb_ratio_residuals_with_hessian_with_output() self.assertAlmostEqual(pyo.value(m.reactor.inputs['sv']), 1.26541996, places=3) - self.assertAlmostEqual(pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2) - self.assertAlmostEqual(pyo.value(m.reactor.outputs['cb_ratio']), 0.15190409266, places=3) + self.assertAlmostEqual( + pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2 + ) + self.assertAlmostEqual( + pyo.value(m.reactor.outputs['cb_ratio']), 0.15190409266, places=3 + ) - def test_external_grey_box_react_example_maximize_with_hessian_with_output_pyomo(self): - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'react_example', 'maximize_cb_ratio_residuals.py')) + def test_external_grey_box_react_example_maximize_with_hessian_with_output_pyomo( + self, + ): + ex = import_file( + os.path.join( + example_dir, + 'external_grey_box', + 'react_example', + 'maximize_cb_ratio_residuals.py', + ) + ) m = ex.maximize_cb_ratio_residuals_with_hessian_with_output_pyomo() self.assertAlmostEqual(pyo.value(m.sv), 1.26541996, places=3) self.assertAlmostEqual(pyo.value(m.cb), 1071.7410089, places=2) self.assertAlmostEqual(pyo.value(m.cb_ratio), 0.15190409266, places=3) def test_pyomo_react_example_maximize_with_obj(self): - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'react_example', 'maximize_cb_ratio_residuals.py')) + ex = import_file( + os.path.join( + example_dir, + 'external_grey_box', + 'react_example', + 'maximize_cb_ratio_residuals.py', + ) + ) m = ex.maximize_cb_ratio_residuals_with_obj() self.assertAlmostEqual(pyo.value(m.reactor.inputs['sv']), 1.26541996, places=3) - self.assertAlmostEqual(pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2) + self.assertAlmostEqual( + pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2 + ) self.assertAlmostEqual(pyo.value(m.obj), 0.15190409266, places=3) - def test_external_grey_box_react_example_maximize_with_additional_pyomo_variables(self): - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'react_example', 'maximize_cb_ratio_residuals.py')) + def test_external_grey_box_react_example_maximize_with_additional_pyomo_variables( + self, + ): + ex = import_file( + os.path.join( + example_dir, + 'external_grey_box', + 'react_example', + 'maximize_cb_ratio_residuals.py', + ) + ) m = ex.maximize_cb_ratio_residuals_with_pyomo_variables() self.assertAlmostEqual(pyo.value(m.reactor.inputs['sv']), 1.26541996, places=3) - self.assertAlmostEqual(pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2) + self.assertAlmostEqual( + pyo.value(m.reactor.inputs['cb']), 1071.7410089, places=2 + ) self.assertAlmostEqual(pyo.value(m.cb_ratio), 0.15190409266, places=3) @unittest.skipIf(not pandas_available, "Test uses pandas for data") def test_parameter_estimation(self): - data_fname = os.path.join(example_dir, 'external_grey_box', 'param_est', 'smalldata.csv') + data_fname = os.path.join( + example_dir, 'external_grey_box', 'param_est', 'smalldata.csv' + ) baseline = pandas.read_csv(data_fname) # test the data generator - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'param_est', 'generate_data.py')) + ex = import_file( + os.path.join( + example_dir, 'external_grey_box', 'param_est', 'generate_data.py' + ) + ) df1 = ex.generate_data(5, 200, 5, 42) df2 = ex.generate_data_external(5, 200, 5, 42) pandas.testing.assert_frame_equal(df1, baseline, atol=1e-3) pandas.testing.assert_frame_equal(df2, baseline, atol=1e-3) # test the estimation - ex = import_file(os.path.join(example_dir, 'external_grey_box', 'param_est', 'perform_estimation.py')) + ex = import_file( + os.path.join( + example_dir, 'external_grey_box', 'param_est', 'perform_estimation.py' + ) + ) m = ex.perform_estimation_external(data_fname, solver_trace=False) self.assertAlmostEqual(pyo.value(m.UA), 204.43761, places=3) @@ -163,12 +253,13 @@ def test_cyipopt_callbacks(self): with LoggingIntercept(output, 'pyomo', logging.INFO): ex.main() - self.assertIn("Residuals for iteration 2", - output.getvalue().strip()) + self.assertIn("Residuals for iteration 2", output.getvalue().strip()) @unittest.skipIf(not pandas_available, "pandas needed to run this example") def test_cyipopt_functor(self): - ex = import_file(os.path.join(example_dir, 'callback', 'cyipopt_functor_callback.py')) + ex = import_file( + os.path.join(example_dir, 'callback', 'cyipopt_functor_callback.py') + ) df = ex.main() self.assertEqual(df.shape, (7, 5)) # check one of the residuals @@ -176,7 +267,10 @@ def test_cyipopt_functor(self): self.assertAlmostEqual(s.iloc[6], 0, places=3) def test_cyipopt_callback_halt(self): - ex = import_file(os.path.join(example_dir, 'callback', 'cyipopt_callback_halt.py')) + ex = import_file( + os.path.join(example_dir, 'callback', 'cyipopt_callback_halt.py') + ) status = ex.main() - self.assertEqual(status.solver.termination_condition, TerminationCondition.userInterrupt) - + self.assertEqual( + status.solver.termination_condition, TerminationCondition.userInterrupt + ) diff --git a/pyomo/contrib/pynumero/examples/tests/test_examples.py b/pyomo/contrib/pynumero/examples/tests/test_examples.py index cd083094cf2..5c7993ebbb6 100644 --- a/pyomo/contrib/pynumero/examples/tests/test_examples.py +++ b/pyomo/contrib/pynumero/examples/tests/test_examples.py @@ -7,6 +7,7 @@ import numpy as np from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): raise unittest.SkipTest('Pynumero examples need ASL') @@ -14,15 +15,18 @@ from pyomo.contrib.pynumero.linalg.scipy_interface import ScipyLU import pyomo.environ as pe + ipopt_opt = pe.SolverFactory('ipopt') ipopt_available = ipopt_opt.available(exception_flag=False) -from pyomo.contrib.pynumero.examples import (nlp_interface, - nlp_interface_2, - feasibility, - mumps_example, - sensitivity, - sqp) +from pyomo.contrib.pynumero.examples import ( + nlp_interface, + nlp_interface_2, + feasibility, + mumps_example, + sensitivity, + sqp, +) class TestPyNumeroExamples(unittest.TestCase): @@ -32,19 +36,16 @@ def test_nlp_interface(self): def test_nlp_interface_2(self): nlp_interface_2.main(show_plot=False) - @unittest.skipIf(not ipopt_available, - "feasibility example requires ipopt") + @unittest.skipIf(not ipopt_available, "feasibility example requires ipopt") def test_feasibility(self): is_feasible = feasibility.main() self.assertTrue(is_feasible) - @unittest.skipIf(not mumps_available, - 'mumps example needs pymumps') + @unittest.skipIf(not mumps_available, 'mumps example needs pymumps') def test_mumps_example(self): mumps_example.main() - @unittest.skipIf(not ipopt_available, - "sensitivity example requires ipopt") + @unittest.skipIf(not ipopt_available, "sensitivity example requires ipopt") def test_sensitivity(self): x_sens, x_correct = sensitivity.main() self.assertTrue(np.allclose(x_sens, x_correct, rtol=1e-3, atol=1e-4)) diff --git a/pyomo/contrib/pynumero/examples/tests/test_mpi_examples.py b/pyomo/contrib/pynumero/examples/tests/test_mpi_examples.py index d0d3e9d2127..3c47d58754e 100644 --- a/pyomo/contrib/pynumero/examples/tests/test_mpi_examples.py +++ b/pyomo/contrib/pynumero/examples/tests/test_mpi_examples.py @@ -1,30 +1,30 @@ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy_available, scipy_available, numpy as np + numpy_available, + scipy_available, + numpy as np, ) -SKIPTESTS=[] +SKIPTESTS = [] if numpy_available and scipy_available: pass else: - SKIPTESTS.append( - "Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests" - ) + SKIPTESTS.append("Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests") try: from mpi4py import MPI + comm = MPI.COMM_WORLD if comm.Get_size() != 3: - SKIPTESTS.append( - "Pynumero MPI examples require exactly 3 processes" - ) + SKIPTESTS.append("Pynumero MPI examples require exactly 3 processes") except ImportError: SKIPTESTS.append("Pynumero MPI examples require exactly 3 processes") if not SKIPTESTS: from pyomo.contrib.pynumero.examples import parallel_vector_ops, parallel_matvec + @unittest.pytest.mark.mpi @unittest.skipIf(SKIPTESTS, SKIPTESTS) class TestExamples(unittest.TestCase): diff --git a/pyomo/checker/tests/examples/model/ModelValue_globalif.py b/pyomo/contrib/pynumero/exceptions.py similarity index 66% rename from pyomo/checker/tests/examples/model/ModelValue_globalif.py rename to pyomo/contrib/pynumero/exceptions.py index 8f200fc657f..dc2167d75d2 100644 --- a/pyomo/checker/tests/examples/model/ModelValue_globalif.py +++ b/pyomo/contrib/pynumero/exceptions.py @@ -9,12 +9,12 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import AbstractModel, Var, value -model = AbstractModel() -model.X = Var() +class PyNumeroEvaluationError(ArithmeticError): + """An exception to be raised by PyNumero evaluation backends in the event + of a failed function evaluation. This should be caught by solver interfaces + and translated to the solver-specific evaluation error API. + + """ -if model.X >= 10.0: - pass -if value(model.X) >= 10.0: pass diff --git a/pyomo/contrib/pynumero/interfaces/__init__.py b/pyomo/contrib/pynumero/interfaces/__init__.py index 4093769f23f..debe453e175 100644 --- a/pyomo/contrib/pynumero/interfaces/__init__.py +++ b/pyomo/contrib/pynumero/interfaces/__init__.py @@ -9,10 +9,10 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -#from ..dependencies import numpy_available, scipy_available +# from ..dependencies import numpy_available, scipy_available # TODO: What do we want to import from interfaces? -#if numpy_available and scipy_available: +# if numpy_available and scipy_available: # from .nlp import AmplNLP, PyomoNLP # from .ampl_nlp import AslNLP # from .nlp_compositions import TwoStageStochasticNLP diff --git a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py index 785259edfb9..f5bd56696cf 100644 --- a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py @@ -16,8 +16,10 @@ import pyomo.contrib.pynumero.asl as _asl except ImportError as e: print('{}'.format(e)) - raise ImportError('Error importing asl.' - 'Make sure libpynumero_ASL is installed and added to path.') + raise ImportError( + 'Error importing asl.' + 'Make sure libpynumero_ASL is installed and added to path.' + ) from scipy.sparse import coo_matrix import os @@ -27,16 +29,17 @@ __all__ = ['AslNLP', 'AmplNLP'] + # ToDo: need to add support for modifying bounds. # support for changing variable bounds seems possible. # support for changing inequality bounds would require more work. (this is less frequent?) -# TODO: check performance impacts of cacheing - memory and computational time. +# TODO: check performance impacts of caching - memory and computational time. # TODO: only create and cache data for ExtendedNLP methods if they are ever asked for # TODO: There are todos in the code below class AslNLP(ExtendedNLP): def __init__(self, nl_file): """ - Base class for NLP classes based on the Ampl Solver Library and + Base class for NLP classes based on the Ampl Solver Library and NL files. Parameters @@ -65,20 +68,36 @@ def __init__(self, nl_file): self._cached_objective = None self._cached_grad_objective = self.create_new_vector('primals') self._cached_con_full = np.zeros(self._n_con_full, dtype=np.float64) - self._cached_jac_full = coo_matrix((np.zeros(self._nnz_jac_full, dtype=np.float64), - (self._irows_jac_full, self._jcols_jac_full)), - shape=(self._n_con_full, self._n_primals)) + self._cached_jac_full = coo_matrix( + ( + np.zeros(self._nnz_jac_full, dtype=np.float64), + (self._irows_jac_full, self._jcols_jac_full), + ), + shape=(self._n_con_full, self._n_primals), + ) # these are only being cached for quicker copy of the matrix with the nonzero structure # TODO: only create these caches if the ExtendedNLP methods are asked for? - self._cached_jac_eq = coo_matrix((np.zeros(self._nnz_jac_eq, dtype=np.float64), - (self._irows_jac_eq, self._jcols_jac_eq)), - shape=(self._n_con_eq, self._n_primals)) - self._cached_jac_ineq = coo_matrix((np.zeros(self._nnz_jac_ineq), - (self._irows_jac_ineq, self._jcols_jac_ineq)), - shape=(self._n_con_ineq, self._n_primals)) - self._cached_hessian_lag = coo_matrix((np.zeros(self._nnz_hessian_lag, dtype=np.float64), - (self._irows_hess, self._jcols_hess)), - shape=(self._n_primals, self._n_primals)) + self._cached_jac_eq = coo_matrix( + ( + np.zeros(self._nnz_jac_eq, dtype=np.float64), + (self._irows_jac_eq, self._jcols_jac_eq), + ), + shape=(self._n_con_eq, self._n_primals), + ) + self._cached_jac_ineq = coo_matrix( + ( + np.zeros(self._nnz_jac_ineq), + (self._irows_jac_ineq, self._jcols_jac_ineq), + ), + shape=(self._n_con_ineq, self._n_primals), + ) + self._cached_hessian_lag = coo_matrix( + ( + np.zeros(self._nnz_hessian_lag, dtype=np.float64), + (self._irows_hess, self._jcols_hess), + ), + shape=(self._n_primals, self._n_primals), + ) self._invalidate_primals_cache() self._invalidate_duals_cache() @@ -107,7 +126,7 @@ def _collect_nlp_structure(self): self._nnz_jac_full = self._asl.get_nnz_jac_g() self._nnz_hess_lag_lower = self._asl.get_nnz_hessian_lag() - # get the initial values for the primals + # get the initial values for the primals self._init_primals = np.zeros(self._n_primals, dtype=np.float64) self._init_duals_full = np.zeros(self._n_con_full, dtype=np.float64) self._asl.get_init_x(self._init_primals) @@ -134,7 +153,9 @@ def _collect_nlp_structure(self): bounds_difference = self._primals_ub - self._primals_lb if np.any(bounds_difference < 0): print(np.where(bounds_difference < 0)) - raise RuntimeError("Some variables have lower bounds that are greater than the upper bounds.") + raise RuntimeError( + "Some variables have lower bounds that are greater than the upper bounds." + ) # Build the maps for converting from the full constraint # vector (which includes all equality and inequality constraints) @@ -150,7 +171,9 @@ def _collect_nlp_structure(self): # get the initial values for the dual variables self._init_duals_eq = np.compress(self._con_full_eq_mask, self._init_duals_full) - self._init_duals_ineq = np.compress(self._con_full_ineq_mask, self._init_duals_full) + self._init_duals_ineq = np.compress( + self._con_full_ineq_mask, self._init_duals_full + ) self._init_duals_eq.flags.writeable = False self._init_duals_ineq.flags.writeable = False @@ -164,7 +187,7 @@ def _collect_nlp_structure(self): self._con_full_ub[self._con_full_eq_mask] = 0.0 self._con_full_lb.flags.writeable = False self._con_full_ub.flags.writeable = False - + # set number of equatity and inequality constraints from maps self._n_con_eq = len(self._con_eq_full_map) self._n_con_ineq = len(self._con_ineq_full_map) @@ -180,19 +203,31 @@ def _collect_nlp_structure(self): self._nz_con_full_eq_mask = np.isin(self._irows_jac_full, self._con_eq_full_map) self._nz_con_full_ineq_mask = np.logical_not(self._nz_con_full_eq_mask) - self._irows_jac_eq = np.compress(self._nz_con_full_eq_mask, self._irows_jac_full) - self._jcols_jac_eq = np.compress(self._nz_con_full_eq_mask, self._jcols_jac_full) - self._irows_jac_ineq = np.compress(self._nz_con_full_ineq_mask, self._irows_jac_full) - self._jcols_jac_ineq = np.compress(self._nz_con_full_ineq_mask, self._jcols_jac_full) + self._irows_jac_eq = np.compress( + self._nz_con_full_eq_mask, self._irows_jac_full + ) + self._jcols_jac_eq = np.compress( + self._nz_con_full_eq_mask, self._jcols_jac_full + ) + self._irows_jac_ineq = np.compress( + self._nz_con_full_ineq_mask, self._irows_jac_full + ) + self._jcols_jac_ineq = np.compress( + self._nz_con_full_ineq_mask, self._jcols_jac_full + ) self._nnz_jac_eq = len(self._irows_jac_eq) self._nnz_jac_ineq = len(self._irows_jac_ineq) # this is expensive but only done once - can we do this with numpy somehow? - self._con_full_eq_map = full_eq_map = {self._con_eq_full_map[i]: i for i in range(self._n_con_eq)} + self._con_full_eq_map = full_eq_map = { + self._con_eq_full_map[i]: i for i in range(self._n_con_eq) + } for i, v in enumerate(self._irows_jac_eq): self._irows_jac_eq[i] = full_eq_map[v] - self._con_full_ineq_map = full_ineq_map = {self._con_ineq_full_map[i]: i for i in range(self._n_con_ineq)} + self._con_full_ineq_map = full_ineq_map = { + self._con_ineq_full_map[i]: i for i in range(self._n_con_ineq) + } for i, v in enumerate(self._irows_jac_ineq): self._irows_jac_ineq[i] = full_ineq_map[v] @@ -233,7 +268,9 @@ def _build_constraint_maps(self): bounds_difference = self._con_full_ub - self._con_full_lb inconsistent_bounds = np.any(bounds_difference < 0.0) if inconsistent_bounds: - raise RuntimeError("Bounds on range constraints found with upper bounds set below the lower bounds.") + raise RuntimeError( + "Bounds on range constraints found with upper bounds set below the lower bounds." + ) # build maps from con_full to con_eq and con_ineq abs_bounds_difference = np.absolute(bounds_difference) @@ -284,7 +321,7 @@ def n_ineq_constraints(self): # overloaded from NLP def nnz_jacobian(self): return self._nnz_jac_full - + # overloaded from ExtendedNLP def nnz_jacobian_eq(self): return self._nnz_jac_eq @@ -308,11 +345,11 @@ def primals_ub(self): # overloaded from NLP def constraints_lb(self): return self._con_full_lb - + # overloaded from NLP def constraints_ub(self): return self._con_full_ub - + # overloaded from ExtendedNLP def ineq_lb(self): return self._con_ineq_lb @@ -340,7 +377,7 @@ def init_duals_ineq(self): # overloaded from NLP / Extended NLP def create_new_vector(self, vector_type): """ - Creates a vector of the appropriate length and structure as + Creates a vector of the appropriate length and structure as requested Parameters @@ -371,7 +408,7 @@ def set_primals(self, primals): # overloaded from NLP def get_primals(self): - return self._primals.copy() + return self._primals.copy() # overloaded from NLP def set_duals(self, duals): @@ -393,7 +430,7 @@ def set_obj_factor(self, obj_factor): # overloaded from NLP def get_obj_factor(self): return self._obj_factor - + # overloaded from ExtendedNLP def set_duals_eq(self, duals_eq): self._invalidate_duals_cache() @@ -432,16 +469,14 @@ def get_constraints_scaling(self): def get_eq_constraints_scaling(self): constraints_scaling = self.get_constraints_scaling() if constraints_scaling is not None: - return np.compress(self._con_full_eq_mask, - constraints_scaling) + return np.compress(self._con_full_eq_mask, constraints_scaling) return None # overloaded from ExtendedNLP def get_ineq_constraints_scaling(self): constraints_scaling = self.get_constraints_scaling() if constraints_scaling is not None: - return np.compress(self._con_full_ineq_mask, - constraints_scaling) + return np.compress(self._con_full_ineq_mask, constraints_scaling) return None def _evaluate_objective_and_cache_if_necessary(self): @@ -462,7 +497,11 @@ def evaluate_grad_objective(self, out=None): if out is not None: if not isinstance(out, np.ndarray) or out.size != self._n_primals: - raise RuntimeError('Called evaluate_grad_objective with an invalid "out" argument - should take an ndarray of size {}'.format(self._n_primals)) + raise RuntimeError( + 'Called evaluate_grad_objective with an invalid "out" argument - should take an ndarray of size {}'.format( + self._n_primals + ) + ) np.copyto(out, self._cached_grad_objective) return out else: @@ -482,9 +521,11 @@ def evaluate_constraints(self, out=None): if out is not None: if not isinstance(out, np.ndarray) or out.size != self._n_con_full: - raise RuntimeError('Called evaluate_constraints with an invalid' - ' "out" argument - should take an ndarray of ' - 'size {}'.format(self._n_con_full)) + raise RuntimeError( + 'Called evaluate_constraints with an invalid' + ' "out" argument - should take an ndarray of ' + 'size {}'.format(self._n_con_full) + ) np.copyto(out, self._cached_con_full) return out else: @@ -496,11 +537,13 @@ def evaluate_eq_constraints(self, out=None): if out is not None: if not isinstance(out, np.ndarray) or out.size != self._n_con_eq: - raise RuntimeError('Called evaluate_eq_constraints with an invalid' - ' "out" argument - should take an ndarray of ' - 'size {}'.format(self._n_con_eq)) + raise RuntimeError( + 'Called evaluate_eq_constraints with an invalid' + ' "out" argument - should take an ndarray of ' + 'size {}'.format(self._n_con_eq) + ) self._cached_con_full.compress(self._con_full_eq_mask, out=out) - return out + return out else: return self._cached_con_full.compress(self._con_full_eq_mask) @@ -510,9 +553,11 @@ def evaluate_ineq_constraints(self, out=None): if out is not None: if not isinstance(out, np.ndarray) or out.size != self._n_con_ineq: - raise RuntimeError('Called evaluate_ineq_constraints with an invalid' - ' "out" argument - should take an ndarray of ' - 'size {}'.format(self._n_con_ineq)) + raise RuntimeError( + 'Called evaluate_ineq_constraints with an invalid' + ' "out" argument - should take an ndarray of ' + 'size {}'.format(self._n_con_ineq) + ) self._cached_con_full.compress(self._con_full_ineq_mask, out=out) return out else: @@ -530,14 +575,19 @@ def evaluate_jacobian(self, out=None): self._evaluate_jacobians_and_cache_if_necessary() if out is not None: - if not isinstance(out, coo_matrix) \ - or out.shape[0] != self._n_con_full \ - or out.shape[1] != self._n_primals \ - or out.nnz != self._nnz_jac_full: - raise RuntimeError('evaluate_jacobian called with an "out" argument' - ' that is invalid. This should be a coo_matrix with' - ' shape=({},{}) and nnz={}' - .format(self._n_con_full, self._n_primals, self._nnz_jac_full)) + if ( + not isinstance(out, coo_matrix) + or out.shape[0] != self._n_con_full + or out.shape[1] != self._n_primals + or out.nnz != self._nnz_jac_full + ): + raise RuntimeError( + 'evaluate_jacobian called with an "out" argument' + ' that is invalid. This should be a coo_matrix with' + ' shape=({},{}) and nnz={}'.format( + self._n_con_full, self._n_primals, self._nnz_jac_full + ) + ) np.copyto(out.data, self._cached_jac_full.data) return out else: @@ -548,19 +598,26 @@ def evaluate_jacobian_eq(self, out=None): self._evaluate_jacobians_and_cache_if_necessary() if out is not None: - if not isinstance(out, coo_matrix) \ - or out.shape[0] != self._n_con_eq \ - or out.shape[1] != self._n_primals \ - or out.nnz != self._nnz_jac_eq: - raise RuntimeError('evaluate_jacobian_eq called with an "out" argument' - ' that is invalid. This should be a coo_matrix with' - ' shape=({},{}) and nnz={}' - .format(self._n_con_eq, self._n_primals, self._nnz_jac_eq)) - + if ( + not isinstance(out, coo_matrix) + or out.shape[0] != self._n_con_eq + or out.shape[1] != self._n_primals + or out.nnz != self._nnz_jac_eq + ): + raise RuntimeError( + 'evaluate_jacobian_eq called with an "out" argument' + ' that is invalid. This should be a coo_matrix with' + ' shape=({},{}) and nnz={}'.format( + self._n_con_eq, self._n_primals, self._nnz_jac_eq + ) + ) + self._cached_jac_full.data.compress(self._nz_con_full_eq_mask, out=out.data) return out else: - self._cached_jac_full.data.compress(self._nz_con_full_eq_mask, out=self._cached_jac_eq.data) + self._cached_jac_full.data.compress( + self._nz_con_full_eq_mask, out=self._cached_jac_eq.data + ) return self._cached_jac_eq.copy() # overloaded from NLP @@ -568,19 +625,28 @@ def evaluate_jacobian_ineq(self, out=None): self._evaluate_jacobians_and_cache_if_necessary() if out is not None: - if not isinstance(out, coo_matrix) \ - or out.shape[0] != self._n_con_ineq \ - or out.shape[1] != self._n_primals \ - or out.nnz != self._nnz_jac_ineq: - raise RuntimeError('evaluate_jacobian_ineq called with an "out" argument' - ' that is invalid. This should be a coo_matrix with' - ' shape=({},{}) and nnz={}' - .format(self._n_con_ineq, self._n_primals, self._nnz_jac_ineq)) - - self._cached_jac_full.data.compress(self._nz_con_full_ineq_mask, out=out.data) + if ( + not isinstance(out, coo_matrix) + or out.shape[0] != self._n_con_ineq + or out.shape[1] != self._n_primals + or out.nnz != self._nnz_jac_ineq + ): + raise RuntimeError( + 'evaluate_jacobian_ineq called with an "out" argument' + ' that is invalid. This should be a coo_matrix with' + ' shape=({},{}) and nnz={}'.format( + self._n_con_ineq, self._n_primals, self._nnz_jac_ineq + ) + ) + + self._cached_jac_full.data.compress( + self._nz_con_full_ineq_mask, out=out.data + ) return out else: - self._cached_jac_full.data.compress(self._nz_con_full_ineq_mask, out=self._cached_jac_ineq.data) + self._cached_jac_full.data.compress( + self._nz_con_full_ineq_mask, out=self._cached_jac_ineq.data + ) return self._cached_jac_ineq.copy() def evaluate_hessian_lag(self, out=None): @@ -592,8 +658,9 @@ def evaluate_hessian_lag(self, out=None): # get the hessian data = np.zeros(self._nnz_hess_lag_lower, np.float64) - self._asl.eval_hes_lag(self._primals, self._duals_full, - data, obj_factor=self._obj_factor) + self._asl.eval_hes_lag( + self._primals, self._duals_full, data, obj_factor=self._obj_factor + ) values = np.concatenate((data, data[self._lower_hess_mask])) # note: this was done to ensure that scipy did not change # the structure of a sparse matrix if one of the nonzeros @@ -605,26 +672,36 @@ def evaluate_hessian_lag(self, out=None): self._hessian_lag_is_cached = True if out is not None: - if not isinstance(out, coo_matrix) or out.shape[0] != self._n_primals or \ - out.shape[1] != self._n_primals or out.nnz != self._nnz_hessian_lag: - raise RuntimeError('evaluate_hessian_lag called with an "out" argument' - ' that is invalid. This should be a coo_matrix with' - ' shape=({},{}) adn nnz={}' - .format(self._n_primals, self._n_primals, self._nnz_hessian_lag)) + if ( + not isinstance(out, coo_matrix) + or out.shape[0] != self._n_primals + or out.shape[1] != self._n_primals + or out.nnz != self._nnz_hessian_lag + ): + raise RuntimeError( + 'evaluate_hessian_lag called with an "out" argument' + ' that is invalid. This should be a coo_matrix with' + ' shape=({},{}) and nnz={}'.format( + self._n_primals, self._n_primals, self._nnz_hessian_lag + ) + ) np.copyto(out.data, self._cached_hessian_lag.data) return out else: return self._cached_hessian_lag.copy() def report_solver_status(self, status_code, status_message): - self._asl.finalize_solution(status_code, status_message, self._primals, self._duals) + self._asl.finalize_solution( + status_code, status_message, self._primals, self._duals + ) + class AmplNLP(AslNLP): def __init__(self, nl_file, row_filename=None, col_filename=None): """ AMPL nonlinear program interface. If row_filename and col_filename are not provided, the interface - will see if files exist (with same name as nl_file but the .row + will see if files exist (with same name as nl_file but the .row and .col extensions) Parameters @@ -659,7 +736,9 @@ def __init__(self, nl_file, row_filename=None, col_filename=None): self._name_to_vidx = None if col_filename is not None: self._vidx_to_name = self._build_component_names_list(col_filename) - self._name_to_vidx = {self._vidx_to_name[vidx]: vidx for vidx in range(self._n_primals)} + self._name_to_vidx = { + self._vidx_to_name[vidx]: vidx for vidx in range(self._n_primals) + } # create containers with names of constraints and objective self._con_full_idx_to_name = None @@ -672,17 +751,31 @@ def __init__(self, nl_file, row_filename=None, col_filename=None): self._obj_name = all_names[-1] del all_names[-1] self._con_full_idx_to_name = all_names - self._con_eq_idx_to_name = [all_names[self._con_eq_full_map[i]] for i in range(self._n_con_eq)] - self._con_ineq_idx_to_name = [all_names[self._con_ineq_full_map[i]] for i in range(self._n_con_ineq)] - self._name_to_con_full_idx = {all_names[cidx]: cidx for cidx in range(self._n_con_full)} - self._name_to_con_eq_idx = {name:idx for idx,name in enumerate(self._con_eq_idx_to_name)} - self._name_to_con_ineq_idx = {name:idx for idx,name in enumerate(self._con_ineq_idx_to_name)} + self._con_eq_idx_to_name = [ + all_names[self._con_eq_full_map[i]] for i in range(self._n_con_eq) + ] + self._con_ineq_idx_to_name = [ + all_names[self._con_ineq_full_map[i]] for i in range(self._n_con_ineq) + ] + self._name_to_con_full_idx = { + all_names[cidx]: cidx for cidx in range(self._n_con_full) + } + self._name_to_con_eq_idx = { + name: idx for idx, name in enumerate(self._con_eq_idx_to_name) + } + self._name_to_con_ineq_idx = { + name: idx for idx, name in enumerate(self._con_ineq_idx_to_name) + } def primals_names(self): """Returns ordered list with names of primal variables""" return list(self._vidx_to_name) - @deprecated(msg='This method has been replaced with primals_names', version='6.0.0.dev0', remove_in='6.0') + @deprecated( + msg='This method has been replaced with primals_names', + version='6.0.0', + remove_in='6.0', + ) def variable_names(self): """Returns ordered list with names of primal variables""" return self.primals_names() @@ -702,7 +795,11 @@ def ineq_constraint_names(self): (corresponding to evaluate_ineq_constraints)""" return list(self._con_ineq_idx_to_name) - @deprecated(msg='This method has been replaced with primal_idx', version='6.0.0.dev0', remove_in='6.0') + @deprecated( + msg='This method has been replaced with primal_idx', + version='6.0.0', + remove_in='6.0', + ) def variable_idx(self, var_name): return self.primal_idx(var_name) @@ -774,9 +871,9 @@ def ineq_constraint_idx(self, con_name): @staticmethod def _build_component_names_list(filename): - """ Builds an ordered list of strings from a file + """Builds an ordered list of strings from a file containing strings on separate lines (e.g., the row - and col files """ + and col files""" ordered_names = list() with open(filename, 'r') as f: for line in f: diff --git a/pyomo/contrib/pynumero/interfaces/cyipopt_interface.py b/pyomo/contrib/pynumero/interfaces/cyipopt_interface.py new file mode 100644 index 00000000000..19e74625d03 --- /dev/null +++ b/pyomo/contrib/pynumero/interfaces/cyipopt_interface.py @@ -0,0 +1,398 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +""" +The cyipopt_interface module includes the python interface to the +Cythonized ipopt solver cyipopt (see more: +https://github.com/mechmotum/cyipopt.git). To use the interface, +you can create a derived implementation from the abstract base class +CyIpoptProblemInterface that provides the necessary methods. + +Note: This module also includes a default implementation CyIpopt +that works with problems derived from AslNLP as long as those +classes return numpy ndarray objects for the vectors and coo_matrix +objects for the matrices (e.g., AmplNLP and PyomoNLP) +""" +import abc + +from pyomo.common.dependencies import attempt_import, numpy as np, numpy_available + + +def _cyipopt_importer(): + import cyipopt + + # cyipopt before version 1.0.3 called the problem class "Problem" + if not hasattr(cyipopt, "Problem"): + cyipopt.Problem = cyipopt.problem + # cyipopt before version 1.0.3 put the __version__ flag in the ipopt + # module (which was deprecated starting in 1.0.3) + if not hasattr(cyipopt, "__version__"): + import ipopt + + cyipopt.__version__ = ipopt.__version__ + # Beginning in 1.0.3, STATUS_MESSAGES is in a separate + # ipopt_wrapper module + if not hasattr(cyipopt, "STATUS_MESSAGES"): + import ipopt_wrapper + + cyipopt.STATUS_MESSAGES = ipopt_wrapper.STATUS_MESSAGES + return cyipopt + + +cyipopt, cyipopt_available = attempt_import( + "ipopt", + error_message="cyipopt solver relies on the ipopt module from cyipopt. " + "See https://github.com/mechmotum/cyipopt.git for cyipopt " + "installation instructions.", + importer=_cyipopt_importer, +) + +# If cyipopt is not available, we will use object as our base class for +# CyIpoptProblemInterface so we don't require cyipopt to import from +# this file. +# Note that this *does* trigger the import attempt and therefore is +# moderately time-consuming. +cyipopt_Problem = cyipopt.Problem if cyipopt_available else object + + +class CyIpoptProblemInterface(cyipopt_Problem, metaclass=abc.ABCMeta): + """Abstract subclass of ``cyipopt.Problem`` defining an object that can be + used as an interface to CyIpopt. Subclasses must define all methods necessary + for the CyIpopt solve and must call this class's ``__init__`` method to + initialize Ipopt's data structures. + + Note that, if "output_file" is provided as an Ipopt option, the log file + is open until this object (and thus the underlying Ipopt NLP object) is + deallocated. To force this deallocation, call the ``close()`` method, which + is defined by ``cyipopt.Problem``. + + """ + + # Flag used to determine whether the underlying IpoptProblem struct + # has been initialized. This is used to prevent segfaults when calling + # cyipopt.Problem's solve method if cyipopt.Problem.__init__ hasn't been + # called. + _problem_initialized = False + + def __init__(self): + """Initialize the problem interface + + This method calls ``cyipopt.Problem.__init__``, and *must* be called + by any subclass's ``__init__`` method. If not, we will segfault when + we call ``cyipopt.Problem.solve`` from this object. + + """ + if not cyipopt_available: + raise RuntimeError( + "cyipopt is required to instantiate CyIpoptProblemInterface" + ) + + # Call cyipopt.Problem.__init__ + xl = self.x_lb() + xu = self.x_ub() + gl = self.g_lb() + gu = self.g_ub() + nx = len(xl) + ng = len(gl) + super(CyIpoptProblemInterface, self).__init__( + n=nx, m=ng, lb=xl, ub=xu, cl=gl, cu=gu + ) + # Set a flag to indicate that the IpoptProblem struct has been + # initialized + self._problem_initialized = True + + def solve(self, x, lagrange=None, zl=None, zu=None): + """Solve a CyIpopt Problem + + Checks whether __init__ has been called before calling + cyipopt.Problem.solve + + """ + lagrange = [] if lagrange is None else lagrange + zl = [] if zl is None else zl + zu = [] if zu is None else zu + # Check a flag to make sure __init__ has been called. This is to prevent + # segfaults if we try to call solve from a subclass that has not called + # super().__init__ + # + # Note that we can still segfault if a user overrides solve and does not + # call cyipopt.Problem.__init__, but in this case we assume they know what + # they are doing. + if not self._problem_initialized: + raise RuntimeError( + "Attempting to call cyipopt.Problem.solve when" + " cyipopt.Problem.__init__ has not been called. This can happen" + " if a subclass of CyIpoptProblemInterface overrides __init__" + " without calling CyIpoptProblemInterface.__init__ or setting" + " the CyIpoptProblemInterface._problem_initialized flag." + ) + return super(CyIpoptProblemInterface, self).solve( + x, lagrange=lagrange, zl=zl, zu=zu + ) + + @abc.abstractmethod + def x_init(self): + """Return the initial values for x as a numpy ndarray""" + pass + + @abc.abstractmethod + def x_lb(self): + """Return the lower bounds on x as a numpy ndarray""" + pass + + @abc.abstractmethod + def x_ub(self): + """Return the upper bounds on x as a numpy ndarray""" + pass + + @abc.abstractmethod + def g_lb(self): + """Return the lower bounds on the constraints as a numpy ndarray""" + pass + + @abc.abstractmethod + def g_ub(self): + """Return the upper bounds on the constraints as a numpy ndarray""" + pass + + @abc.abstractmethod + def scaling_factors(self): + """Return the values for scaling factors as a tuple + (objective_scaling, x_scaling, g_scaling). Return None + if the scaling factors are to be ignored + """ + pass + + @abc.abstractmethod + def objective(self, x): + """Return the value of the objective + function evaluated at x + """ + pass + + @abc.abstractmethod + def gradient(self, x): + """Return the gradient of the objective + function evaluated at x as a numpy ndarray + """ + pass + + @abc.abstractmethod + def constraints(self, x): + """Return the residuals of the constraints + evaluated at x as a numpy ndarray + """ + pass + + @abc.abstractmethod + def jacobianstructure(self): + """Return the structure of the jacobian + in coordinate format. That is, return (rows,cols) + where rows and cols are both numpy ndarray + objects that contain the row and column indices + for each of the nonzeros in the jacobian. + """ + pass + + @abc.abstractmethod + def jacobian(self, x): + """Return the values for the jacobian evaluated at x + as a numpy ndarray of nonzero values corresponding + to the rows and columns specified in the jacobianstructure + """ + pass + + @abc.abstractmethod + def hessianstructure(self): + """Return the structure of the hessian + in coordinate format. That is, return (rows,cols) + where rows and cols are both numpy ndarray + objects that contain the row and column indices + for each of the nonzeros in the hessian. + Note: return ONLY the lower diagonal of this symmetric matrix. + """ + pass + + @abc.abstractmethod + def hessian(self, x, y, obj_factor): + """Return the values for the hessian evaluated at x + as a numpy ndarray of nonzero values corresponding + to the rows and columns specified in the + hessianstructure method. + Note: return ONLY the lower diagonal of this symmetric matrix. + """ + pass + + def intermediate( + self, + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + ): + """Callback that can be used to examine or report intermediate + results. This method is called each iteration + """ + # TODO: Document the arguments + pass + + +class CyIpoptNLP(CyIpoptProblemInterface): + def __init__(self, nlp, intermediate_callback=None): + """This class provides a CyIpoptProblemInterface for use + with the CyIpoptSolver class that can take in an NLP + as long as it provides vectors as numpy ndarrays and + matrices as scipy.sparse.coo_matrix objects. This class + provides the interface between AmplNLP or PyomoNLP objects + and the CyIpoptSolver + """ + self._nlp = nlp + self._intermediate_callback = intermediate_callback + + x = nlp.init_primals() + y = nlp.init_duals() + if np.any(np.isnan(y)): + # did not get initial values for y, use this default + y.fill(1.0) + + self._cached_x = x.copy() + self._cached_y = y.copy() + self._cached_obj_factor = 1.0 + + nlp.set_primals(self._cached_x) + nlp.set_duals(self._cached_y) + + # get jacobian and hessian structures + self._jac_g = nlp.evaluate_jacobian() + try: + self._hess_lag = nlp.evaluate_hessian_lag() + self._hess_lower_mask = self._hess_lag.row >= self._hess_lag.col + self._hessian_available = True + except (AttributeError, NotImplementedError): + self._hessian_available = False + self._hess_lag = None + self._hess_lower_mask = None + + # Call CyIpoptProblemInterface.__init__, which calls + # cyipopt.Problem.__init__ + super(CyIpoptNLP, self).__init__() + + def _set_primals_if_necessary(self, x): + if not np.array_equal(x, self._cached_x): + self._nlp.set_primals(x) + self._cached_x = x.copy() + + def _set_duals_if_necessary(self, y): + if not np.array_equal(y, self._cached_y): + self._nlp.set_duals(y) + self._cached_y = y.copy() + + def _set_obj_factor_if_necessary(self, obj_factor): + if obj_factor != self._cached_obj_factor: + self._nlp.set_obj_factor(obj_factor) + self._cached_obj_factor = obj_factor + + def x_init(self): + return self._nlp.init_primals() + + def x_lb(self): + return self._nlp.primals_lb() + + def x_ub(self): + return self._nlp.primals_ub() + + def g_lb(self): + return self._nlp.constraints_lb() + + def g_ub(self): + return self._nlp.constraints_ub() + + def scaling_factors(self): + obj_scaling = self._nlp.get_obj_scaling() + x_scaling = self._nlp.get_primals_scaling() + g_scaling = self._nlp.get_constraints_scaling() + return obj_scaling, x_scaling, g_scaling + + def objective(self, x): + self._set_primals_if_necessary(x) + return self._nlp.evaluate_objective() + + def gradient(self, x): + self._set_primals_if_necessary(x) + return self._nlp.evaluate_grad_objective() + + def constraints(self, x): + self._set_primals_if_necessary(x) + return self._nlp.evaluate_constraints() + + def jacobianstructure(self): + return self._jac_g.row, self._jac_g.col + + def jacobian(self, x): + self._set_primals_if_necessary(x) + self._nlp.evaluate_jacobian(out=self._jac_g) + return self._jac_g.data + + def hessianstructure(self): + if not self._hessian_available: + return np.zeros(0), np.zeros(0) + + row = np.compress(self._hess_lower_mask, self._hess_lag.row) + col = np.compress(self._hess_lower_mask, self._hess_lag.col) + return row, col + + def hessian(self, x, y, obj_factor): + if not self._hessian_available: + raise ValueError("Hessian requested, but not supported by the NLP") + + self._set_primals_if_necessary(x) + self._set_duals_if_necessary(y) + self._set_obj_factor_if_necessary(obj_factor) + self._nlp.evaluate_hessian_lag(out=self._hess_lag) + data = np.compress(self._hess_lower_mask, self._hess_lag.data) + return data + + def intermediate( + self, + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + ): + if self._intermediate_callback is not None: + return self._intermediate_callback( + self._nlp, + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + ) + return True diff --git a/pyomo/contrib/pynumero/interfaces/external_grey_box.py b/pyomo/contrib/pynumero/interfaces/external_grey_box.py index 87c304f868a..8fd728a7c9b 100644 --- a/pyomo/contrib/pynumero/interfaces/external_grey_box.py +++ b/pyomo/contrib/pynumero/interfaces/external_grey_box.py @@ -33,7 +33,7 @@ This module is used for interfacing an external model as a block in a Pyomo model. -An ExternalGreyBoxModel is model is a model that does not +An ExternalGreyBoxModel is a model that does not provide constraints explicitly as algebraic expressions, but instead provides a set of methods that can compute the residuals of the constraints (or outputs) and their derivatives. @@ -62,7 +62,7 @@ ExternalGreyBoxModel, it will automatically create pyomo variables to represent the inputs and the outputs from the external model. You can implement a callback to modify the Pyomo block after it is - constructed. This also provides a mechanism to initalize variables, + constructed. This also provides a mechanism to initialize variables, etc. * Create a PyomoGreyBoxNLP and provide it with the Pyomo model @@ -83,6 +83,7 @@ """ + class ExternalGreyBoxModel(object): """ This is the base class for building external input output models @@ -131,19 +132,19 @@ def evaluate_hessian_outputs(self): """ def n_inputs(self): - """ This method returns the number of inputs. You do not + """This method returns the number of inputs. You do not need to overload this method in derived classes. """ return len(self.input_names()) def n_equality_constraints(self): - """ This method returns the number of equality constraints. + """This method returns the number of equality constraints. You do not need to overload this method in derived classes. """ return len(self.equality_constraint_names()) def n_outputs(self): - """ This method returns the number of outputs. You do not + """This method returns the number of outputs. You do not need to overload this method in derived classes. """ return len(self.output_names()) @@ -154,7 +155,9 @@ def input_names(self): of this external model. These should be returned in the same order that they are to be used in set_input_values. """ - raise NotImplementedError('Derived ExternalGreyBoxModel classes need to implement the method: input_names') + raise NotImplementedError( + 'Derived ExternalGreyBoxModel classes need to implement the method: input_names' + ) def equality_constraint_names(self): """ @@ -190,11 +193,13 @@ def set_input_values(self, input_values): """ This method is called by the solver to set the current values for the input variables. The derived class must cache these if - necessary for any subsequent calls to evalute_outputs or + necessary for any subsequent calls to evaluate_outputs or evaluate_derivatives. """ - raise NotImplementedError('Derived ExternalGreyBoxModel classes need' - ' to implement the method: set_input_values') + raise NotImplementedError( + 'Derived ExternalGreyBoxModel classes need' + ' to implement the method: set_input_values' + ) def set_equality_constraint_multipliers(self, eq_con_multiplier_values): """ @@ -204,14 +209,18 @@ class must cache these if necessary for any subsequent calls to evaluate_hessian_equality_constraints """ # we should check these for efficiency - assert self.n_equality_constraints() == len(eq_con_multiplier_values) - if not hasattr(self, 'evaluate_hessian_equality_constraints') \ - or self.n_equality_constraints() == 0: + assert self.n_equality_constraints() == len(eq_con_multiplier_values) + if ( + not hasattr(self, 'evaluate_hessian_equality_constraints') + or self.n_equality_constraints() == 0 + ): return - - raise NotImplementedError('Derived ExternalGreyBoxModel classes need to implement' - ' set_equality_constraint_multlipliers when they' - ' support Hessian computations.') + + raise NotImplementedError( + 'Derived ExternalGreyBoxModel classes need to implement' + ' set_equality_constraint_multipliers when they' + ' support Hessian computations.' + ) def set_output_constraint_multipliers(self, output_con_multiplier_values): """ @@ -222,13 +231,17 @@ class must cache these if necessary for any subsequent calls """ # we should check these for efficiency assert self.n_outputs() == len(output_con_multiplier_values) - if not hasattr(self, 'evaluate_hessian_output_constraints') \ - or self.n_outputs() == 0: + if ( + not hasattr(self, 'evaluate_hessian_output_constraints') + or self.n_outputs() == 0 + ): return - raise NotImplementedError('Derived ExternalGreyBoxModel classes need to implement' - ' set_output_constraint_multlipliers when they' - ' support Hessian computations.') + raise NotImplementedError( + 'Derived ExternalGreyBoxModel classes need to implement' + ' set_output_constraint_multipliers when they' + ' support Hessian computations.' + ) def get_equality_constraint_scaling_factors(self): """ @@ -254,16 +267,19 @@ def evaluate_equality_constraints(self): Compute the residuals from the model (using the values set in input_values) and return as a numpy array """ - raise NotImplementedError('evaluate_equality_constraints called ' - 'but not implemented in the derived class.') + raise NotImplementedError( + 'evaluate_equality_constraints called ' + 'but not implemented in the derived class.' + ) def evaluate_outputs(self): """ Compute the outputs from the model (using the values set in input_values) and return as a numpy array """ - raise NotImplementedError('evaluate_outputs called ' - 'but not implemented in the derived class.') + raise NotImplementedError( + 'evaluate_outputs called but not implemented in the derived class.' + ) def evaluate_jacobian_equality_constraints(self): """ @@ -273,8 +289,10 @@ def evaluate_jacobian_equality_constraints(self): the order of the residual names and the cols in the order of the input variables. """ - raise NotImplementedError('evaluate_jacobian_equality_constraints called ' - 'but not implemented in the derived class.') + raise NotImplementedError( + 'evaluate_jacobian_equality_constraints called ' + 'but not implemented in the derived class.' + ) def evaluate_jacobian_outputs(self): """ @@ -284,8 +302,10 @@ def evaluate_jacobian_outputs(self): the order of the output variables and the cols in the order of the input variables. """ - raise NotImplementedError('evaluate_equality_outputs called ' - 'but not implemented in the derived class.') + raise NotImplementedError( + 'evaluate_equality_outputs called ' + 'but not implemented in the derived class.' + ) # # Implement the following methods to provide support for @@ -297,12 +317,7 @@ def evaluate_jacobian_outputs(self): class ExternalGreyBoxBlockData(_BlockData): - - def set_external_model(self, - external_grey_box_model, - inputs=None, - outputs=None, - ): + def set_external_model(self, external_grey_box_model, inputs=None, outputs=None): """ Parameters ---------- @@ -329,7 +344,8 @@ def set_external_model(self, if self._input_names is None or len(self._input_names) == 0: raise ValueError( 'No input_names specified for external_grey_box_model.' - ' Must specify at least one input.') + ' Must specify at least one input.' + ) self._input_names_set = Set(initialize=self._input_names, ordered=True) @@ -341,7 +357,7 @@ def set_external_model(self, "Dimension mismatch in provided input vars for external " "model.\nExpected %s input vars, got %s." % (ex_model.n_inputs(), len(inputs)) - ) + ) self.inputs = Reference(inputs) self._equality_constraint_names = ex_model.equality_constraint_names() @@ -357,7 +373,7 @@ def set_external_model(self, "Dimension mismatch in provided output vars for external " "model.\nExpected %s output vars, got %s." % (ex_model.n_outputs(), len(outputs)) - ) + ) self.outputs = Reference(outputs) # call the callback so the model can set initialization, bounds, etc. @@ -368,7 +384,6 @@ def get_external_model(self): class ExternalGreyBoxBlock(Block): - _ComponentDataClass = ExternalGreyBoxBlockData def __new__(cls, *args, **kwds): @@ -395,8 +410,7 @@ def construct(self, data=None): timer = ConstructionTimer(self) if is_debug_set(logger): - logger.debug("Constructing external grey box model %s" - % (self.name)) + logger.debug("Constructing external grey box model %s" % (self.name)) super(ExternalGreyBoxBlock, self).construct(data) diff --git a/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py b/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py index de1541e58b7..d0e6c21fa64 100644 --- a/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py +++ b/pyomo/contrib/pynumero/interfaces/external_pyomo_model.py @@ -10,30 +10,16 @@ # ___________________________________________________________________________ import itertools -from pyomo.environ import SolverFactory from pyomo.core.base.var import Var from pyomo.core.base.constraint import Constraint from pyomo.core.base.objective import Objective from pyomo.core.expr.visitor import identify_variables -from pyomo.common.collections import ComponentSet -from pyomo.core.base.suffix import Suffix -from pyomo.util.calc_var_value import calculate_variable_from_constraint -from pyomo.util.subsystems import ( - create_subsystem_block, - TemporarySubsystemManager, -) +from pyomo.common.timing import HierarchicalTimer +from pyomo.util.subsystems import create_subsystem_block from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.contrib.pynumero.interfaces.external_grey_box import ( - ExternalGreyBoxModel, -) -from pyomo.contrib.pynumero.interfaces.nlp_projections import ProjectedNLP -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available, - CyIpoptNLP, - CyIpoptSolver, -) -from pyomo.contrib.incidence_analysis.util import ( - generate_strongly_connected_components, +from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxModel +from pyomo.contrib.pynumero.algorithms.solvers.implicit_functions import ( + SccImplicitFunctionSolver, ) import numpy as np import scipy.sparse as sps @@ -63,7 +49,7 @@ def _dense_to_full_sparse(matrix): for i, j in itertools.product(range(nrow), range(ncol)): row.append(i) col.append(j) - data.append(matrix[i,j]) + data.append(matrix[i, j]) row = np.array(row) col = np.array(col) data = np.array(data) @@ -78,7 +64,7 @@ def get_hessian_of_constraint(constraint, wrt1=None, wrt2=None, nlp=None): wrt2 = variables elif wrt1 is not None and wrt2 is not None: variables = wrt1 + wrt2 - elif wrt1 is not None: # but wrt2 is None + elif wrt1 is not None: # but wrt2 is None wrt2 = wrt1 variables = wrt1 else: @@ -143,14 +129,16 @@ class ExternalPyomoModel(ExternalGreyBoxModel): """ - def __init__(self, - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=None, - solver=None, - ): + def __init__( + self, + input_vars, + external_vars, + residual_cons, + external_cons, + solver_class=None, + solver_options=None, + timer=None, + ): """ Arguments: ---------- @@ -164,94 +152,45 @@ def __init__(self, external_cons: list List of equality constraints used to solve for the external variables - use_cyipopt: bool - Whether to use CyIpopt to solve strongly connected components of - the implicit function that have dimension greater than one. - solver: Pyomo solver object - Used to solve strongly connected components of the implicit function - that have dimension greater than one. Only used if use_cyipopt - is False. + solver_class: Subclass of ImplicitFunctionSolver + The solver object that is used to converge the system of + equations defining the implicit function. + solver_options: dict + Options dict for the ImplicitFunctionSolver + timer: HierarchicalTimer + HierarchicalTimer object to which new timing categories introduced + will be attached. If None, a new timer will be created. """ - if use_cyipopt is None: - use_cyipopt = cyipopt_available - if use_cyipopt and not cyipopt_available: - raise RuntimeError( - "Constructing an ExternalPyomoModel with CyIpopt unavailable. " - "Please set the use_cyipopt argument to False." - ) - if solver is not None and use_cyipopt: - raise RuntimeError( - "Constructing an ExternalPyomoModel with a solver specified " - "and use_cyipopt set to True. Please set use_cyipopt to False " - "to use the desired solver." - ) - elif solver is None and not use_cyipopt: - solver = SolverFactory("ipopt") - # If use_cyipopt is True, this solver is None and will not be used. - self._solver = solver - self._use_cyipopt = use_cyipopt + if timer is None: + timer = HierarchicalTimer() + self._timer = timer + if solver_class is None: + solver_class = SccImplicitFunctionSolver + self._solver_class = solver_class + if solver_options is None: + solver_options = {} + + self._timer.start("__init__") # We only need this block to construct the NLP, which wouldn't # be necessary if we could compute Hessians of Pyomo constraints. self._block = create_subsystem_block( - residual_cons+external_cons, - input_vars+external_vars, - ) + residual_cons + external_cons, input_vars + external_vars + ) self._block._obj = Objective(expr=0.0) + self._timer.start("PyomoNLP") self._nlp = PyomoNLP(self._block) + self._timer.stop("PyomoNLP") - self._scc_list = list(generate_strongly_connected_components( - external_cons, variables=external_vars - )) - - if use_cyipopt: - # Using CyIpopt allows us to solve inner problems without - # costly rewriting of the nl file. It requires quite a bit - # of preprocessing, however, to construct the ProjectedNLP - # for each block of the decomposition. - - # Get "vector-valued" SCCs, those of dimension > 0. - # We will solve these with a direct IPOPT interface, which requires - # some preprocessing. - self._vector_scc_list = [ - (scc, inputs) for scc, inputs in self._scc_list - if len(scc.vars) > 1 - ] - - # Need a dummy objective to create an NLP - for scc, inputs in self._vector_scc_list: - scc._obj = Objective(expr=0.0) - - # I need scaling_factor so Pyomo NLPs I create from these blocks - # don't break when ProjectedNLP calls get_primals_scaling - scc.scaling_factor = Suffix(direction=Suffix.EXPORT) - # HACK: scaling_factor just needs to be nonempty. - scc.scaling_factor[scc._obj] = 1.0 - - # These are the "original NLPs" that will be projected - self._vector_scc_nlps = [ - PyomoNLP(scc) for scc, inputs in self._vector_scc_list - ] - self._vector_scc_var_names = [ - [var.name for var in scc.vars.values()] - for scc, inputs in self._vector_scc_list - ] - self._vector_proj_nlps = [ - ProjectedNLP(nlp, names) for nlp, names in - zip(self._vector_scc_nlps, self._vector_scc_var_names) - ] - - # We will solve the ProjectedNLPs rather than the original NLPs - self._cyipopt_nlps = [CyIpoptNLP(nlp) for nlp in self._vector_proj_nlps] - self._cyipopt_solvers = [ - CyIpoptSolver(nlp) for nlp in self._cyipopt_nlps - ] - self._vector_scc_input_coords = [ - nlp.get_primal_indices(inputs) - for nlp, (scc, inputs) in - zip(self._vector_scc_nlps, self._vector_scc_list) - ] + # Instantiate a solver with the ImplicitFunctionSolver API: + self._solver = self._solver_class( + external_vars, + external_cons, + input_vars, + timer=self._timer, + **solver_options, + ) assert len(external_vars) == len(external_cons) @@ -263,6 +202,12 @@ def __init__(self, self.residual_con_multipliers = [None for _ in residual_cons] self.residual_scaling_factors = None + self._input_output_coords = self._nlp.get_primal_indices( + input_vars + external_vars + ) + + self._timer.stop("__init__") + def n_inputs(self): return len(self.input_vars) @@ -272,78 +217,32 @@ def n_equality_constraints(self): # I would like to try to get by without using the following "name" methods. def input_names(self): return ["input_%i" % i for i in range(self.n_inputs())] + def equality_constraint_names(self): return ["residual_%i" % i for i in range(self.n_equality_constraints())] def set_input_values(self, input_values): + self._timer.start("set_inputs") + solver = self._solver external_cons = self.external_cons external_vars = self.external_vars input_vars = self.input_vars - for var, val in zip(input_vars, input_values): - var.set_value(val, skip_validation=True) - - vector_scc_idx = 0 - for block, inputs in self._scc_list: - if len(block.vars) == 1: - calculate_variable_from_constraint( - block.vars[0], block.cons[0] - ) - else: - if self._use_cyipopt: - # Transfer variable values into the projected NLP, solve, - # and extract values. - - nlp = self._vector_scc_nlps[vector_scc_idx] - proj_nlp = self._vector_proj_nlps[vector_scc_idx] - input_coords = self._vector_scc_input_coords[vector_scc_idx] - cyipopt = self._cyipopt_solvers[vector_scc_idx] - _, local_inputs = self._vector_scc_list[vector_scc_idx] - - primals = nlp.get_primals() - variables = nlp.get_pyomo_variables() - - # Set values and bounds from inputs to the SCC. - # This works because values have been set in the original - # pyomo model, either by a previous SCC solve, or from the - # "global inputs" - for i, var in zip(input_coords, local_inputs): - # Set primals (inputs) in the original NLP - primals[i] = var.value - # This affects future evaluations in the ProjectedNLP - nlp.set_primals(primals) - x0 = proj_nlp.get_primals() - sol, _ = cyipopt.solve(x0=x0) - - # Set primals from solution in projected NLP. This updates - # values in the original NLP - proj_nlp.set_primals(sol) - # I really only need to set new primals for the variables in - # the ProjectedNLP. However, I can only get a list of variables - # from the original Pyomo NLP, so here some of the values I'm - # setting are redundant. - new_primals = nlp.get_primals() - assert len(new_primals) == len(variables) - for var, val in zip(variables, new_primals): - var.set_value(val, skip_validation=True) - - else: - # Use a Pyomo solver to solve this strongly connected - # component. - with TemporarySubsystemManager(to_fix=inputs): - solver.solve(block) - - vector_scc_idx += 1 + solver.set_parameters(input_values) + outputs = solver.evaluate_outputs() + solver.update_pyomo_model() + # # Send updated variable values to NLP for dervative evaluation + # primals = self._nlp.get_primals() - to_update = input_vars + external_vars - indices = self._nlp.get_primal_indices(to_update) - values = np.fromiter((var.value for var in to_update), float) - primals[indices] = values + values = np.concatenate((input_values, outputs)) + primals[self._input_output_coords] = values self._nlp.set_primals(primals) + self._timer.stop("set_inputs") + def set_equality_constraint_multipliers(self, eq_con_multipliers): """ Sets multipliers for residual equality constraints seen by the @@ -356,7 +255,7 @@ def set_equality_constraint_multipliers(self, eq_con_multipliers): def set_external_constraint_multipliers(self, eq_con_multipliers): eq_con_multipliers = np.array(eq_con_multipliers) external_multipliers = self.calculate_external_constraint_multipliers( - eq_con_multipliers, + eq_con_multipliers ) multipliers = np.concatenate((eq_con_multipliers, external_multipliers)) cons = self.residual_cons + self.external_cons @@ -389,7 +288,7 @@ def calculate_external_constraint_multipliers(self, resid_multipliers): jgy_t = jgy.transpose() jfy_t = jfy.transpose() - dfdg = - sps.linalg.splu(jgy_t.tocsc()).solve(jfy_t.toarray()) + dfdg = -sps.linalg.splu(jgy_t.tocsc()).solve(jfy_t.toarray()) resid_multipliers = np.array(resid_multipliers) external_multipliers = dfdg.dot(resid_multipliers) return external_multipliers @@ -435,6 +334,8 @@ def evaluate_equality_constraints(self): return self._nlp.extract_subvector_constraints(self.residual_cons) def evaluate_jacobian_equality_constraints(self): + self._timer.start("jacobian") + nlp = self._nlp x = self.input_vars y = self.external_vars @@ -447,7 +348,7 @@ def evaluate_jacobian_equality_constraints(self): nf = len(f) nx = len(x) - n_entries = nf*nx + n_entries = nf * nx # TODO: Does it make sense to cast dydx to a sparse matrix? # My intuition is that it does only if jgy is "decomposable" @@ -459,7 +360,10 @@ def evaluate_jacobian_equality_constraints(self): # be nonzero. Here, this is all of the entries. dfdx = jfx + jfy.dot(dydx) - return _dense_to_full_sparse(dfdx) + full_sparse = _dense_to_full_sparse(dfdx) + + self._timer.stop("jacobian") + return full_sparse def evaluate_jacobian_external_variables(self): nlp = self._nlp @@ -486,15 +390,15 @@ def evaluate_hessian_external_variables(self): ny = len(y) nx = len(x) - hgxx = np.array([ - get_hessian_of_constraint(con, x, nlp=nlp).toarray() for con in g - ]) - hgxy = np.array([ - get_hessian_of_constraint(con, x, y, nlp=nlp).toarray() for con in g - ]) - hgyy = np.array([ - get_hessian_of_constraint(con, y, nlp=nlp).toarray() for con in g - ]) + hgxx = np.array( + [get_hessian_of_constraint(con, x, nlp=nlp).toarray() for con in g] + ) + hgxy = np.array( + [get_hessian_of_constraint(con, x, y, nlp=nlp).toarray() for con in g] + ) + hgyy = np.array( + [get_hessian_of_constraint(con, y, nlp=nlp).toarray() for con in g] + ) # This term is sparse, but we do not exploit it. term1 = hgxx @@ -512,7 +416,7 @@ def evaluate_hessian_external_variables(self): rhs = term1 + term2 + term3 - rhs.shape = (ny, nx*nx) + rhs.shape = (ny, nx * nx) sol = jgy_fact.solve(rhs) sol.shape = (ny, nx, nx) d2ydx2 = -sol @@ -539,15 +443,15 @@ def evaluate_hessians_of_residuals(self): nf = len(f) nx = len(x) - hfxx = np.array([ - get_hessian_of_constraint(con, x, nlp=nlp).toarray() for con in f - ]) - hfxy = np.array([ - get_hessian_of_constraint(con, x, y, nlp=nlp).toarray() for con in f - ]) - hfyy = np.array([ - get_hessian_of_constraint(con, y, nlp=nlp).toarray() for con in f - ]) + hfxx = np.array( + [get_hessian_of_constraint(con, x, nlp=nlp).toarray() for con in f] + ) + hfxy = np.array( + [get_hessian_of_constraint(con, x, y, nlp=nlp).toarray() for con in f] + ) + hfyy = np.array( + [get_hessian_of_constraint(con, y, nlp=nlp).toarray() for con in f] + ) d2ydx2 = self.evaluate_hessian_external_variables() @@ -556,7 +460,7 @@ def evaluate_hessians_of_residuals(self): term2 = prod + prod.transpose((0, 2, 1)) term3 = hfyy.dot(dydx).transpose((0, 2, 1)).dot(dydx) - d2ydx2.shape = (ny, nx*nx) + d2ydx2.shape = (ny, nx * nx) term4 = jfy.dot(d2ydx2) term4.shape = (nf, nx, nx) @@ -570,6 +474,8 @@ def evaluate_hessian_equality_constraints(self): due to these equality constraints. """ + self._timer.start("hessian") + # External multipliers must be calculated after both primals and duals # are set, and are only necessary for this Hessian calculation. # We know this Hessian calculation wants to use the most recently @@ -585,7 +491,9 @@ def evaluate_hessian_equality_constraints(self): # Hessian-of-Lagrangian term in the full space. hess_lag = self.calculate_reduced_hessian_lagrangian(hlxx, hlxy, hlyy) sparse = _dense_to_full_sparse(hess_lag) - return sps.tril(sparse) + lower_triangle = sps.tril(sparse) + self._timer.stop("hessian") + return lower_triangle def set_equality_constraint_scaling_factors(self, scaling_factors): """ diff --git a/pyomo/contrib/pynumero/interfaces/nlp.py b/pyomo/contrib/pynumero/interfaces/nlp.py index dde4ee73866..95c05f06a61 100644 --- a/pyomo/contrib/pynumero/interfaces/nlp.py +++ b/pyomo/contrib/pynumero/interfaces/nlp.py @@ -13,7 +13,7 @@ classes that provide different representations for the NLP. The first interface (NLP) presents the NLP in the following form -(where all equality and inequality constaints are combined) +(where all equality and inequality constraints are combined) minimize f(x) subject to g_L <= g(x) <= g_U @@ -21,7 +21,7 @@ where x \in R^{n_x} are the primal variables, x_L \in R^{n_x} are the lower bounds of the primal variables, - x_U \in R^{n_x} are the uppper bounds of the primal variables, + x_U \in R^{n_x} are the upper bounds of the primal variables, g: R^{n_x} \rightarrow R^{n_c} are constraints (combined equality and inequality) @@ -36,7 +36,7 @@ where x \in R^{n_x} are the primal variables, x_L \in R^{n_x} are the lower bounds of the primal variables, - x_U \in R^{n_x} are the uppper bounds of the primal variables, + x_U \in R^{n_x} are the upper bounds of the primal variables, h: R^{n_x} \rightarrow R^{n_eq} are the equality constraints q: R^{n_x} \rightarrow R^{n_ineq} are the inequality constraints @@ -58,7 +58,7 @@ class NLP(object, metaclass=abc.ABCMeta): def __init__(self): pass - + @abc.abstractmethod def n_primals(self): """ @@ -84,7 +84,7 @@ def constraint_names(self): Override this to provide string names for the constraints """ return [str(i) for i in range(self.n_constraints())] - + @abc.abstractmethod def nnz_jacobian(self): """ @@ -157,7 +157,7 @@ def init_primals(self): @abc.abstractmethod def init_duals(self): """ - Returns vector with initial values for the dual variables + Returns vector with initial values for the dual variables of the constraints """ pass @@ -165,7 +165,7 @@ def init_duals(self): @abc.abstractmethod def create_new_vector(self, vector_type): """ - Creates a vector of the appropriate length and structure as + Creates a vector of the appropriate length and structure as requested Parameters @@ -235,7 +235,7 @@ def set_obj_factor(self, obj_factor): @abc.abstractmethod def get_obj_factor(self): - """Get the value of the objective function factor as + """Get the value of the objective function factor as set by set_obj_factor. This is the value that will be used in calls to the evaluation of the hessian of the lagrangian (evaluate_hessian_lag) @@ -244,7 +244,7 @@ def get_obj_factor(self): @abc.abstractmethod def get_obj_scaling(self): - """ Return the desired scaling factor to use for the + """Return the desired scaling factor to use for the for the objective function. None indicates no scaling. This indicates potential scaling for the model, but the evaluation methods should return *unscaled* values @@ -257,7 +257,7 @@ def get_obj_scaling(self): @abc.abstractmethod def get_primals_scaling(self): - """ Return the desired scaling factors to use for the + """Return the desired scaling factors to use for the for the primals. None indicates no scaling. This indicates potential scaling for the model, but the evaluation methods should return *unscaled* values @@ -270,7 +270,7 @@ def get_primals_scaling(self): @abc.abstractmethod def get_constraints_scaling(self): - """ Return the desired scaling factors to use for the + """Return the desired scaling factors to use for the for the constraints. None indicates no scaling. This indicates potential scaling for the model, but the evaluation methods should return *unscaled* values @@ -283,7 +283,7 @@ def get_constraints_scaling(self): @abc.abstractmethod def evaluate_objective(self): - """Returns value of objective function evaluated at the + """Returns value of objective function evaluated at the values given for the primal variables in set_primals Returns @@ -294,7 +294,7 @@ def evaluate_objective(self): @abc.abstractmethod def evaluate_grad_objective(self, out=None): - """Returns gradient of the objective function evaluated at the + """Returns gradient of the objective function evaluated at the values given for the primal variables in set_primals Parameters @@ -361,33 +361,34 @@ def evaluate_hessian_lag(self, out=None): @abc.abstractmethod def report_solver_status(self, status_code, status_message): - """Report the solver status to NLP class using the values for the + """Report the solver status to NLP class using the values for the primals and duals defined in the set methods""" pass class ExtendedNLP(NLP, metaclass=abc.ABCMeta): - """ This interface extends the NLP interface to support a presentation + """This interface extends the NLP interface to support a presentation of the problem that separates equality and inequality constraints """ + def __init__(self): super(ExtendedNLP, self).__init__() pass - + @abc.abstractmethod def n_eq_constraints(self): """ Returns number of equality constraints """ pass - + @abc.abstractmethod def n_ineq_constraints(self): """ Returns number of inequality constraints """ pass - + @abc.abstractmethod def nnz_jacobian_eq(self): """ @@ -445,7 +446,7 @@ def init_duals_ineq(self): @abc.abstractmethod def create_new_vector(self, vector_type): """ - Creates a vector of the appropriate length and structure as + Creates a vector of the appropriate length and structure as requested Parameters @@ -502,7 +503,7 @@ def get_duals_ineq(self): @abc.abstractmethod def get_eq_constraints_scaling(self): - """ Return the desired scaling factors to use for the + """Return the desired scaling factors to use for the for the equality constraints. None indicates no scaling. This indicates potential scaling for the model, but the evaluation methods should return *unscaled* values @@ -515,7 +516,7 @@ def get_eq_constraints_scaling(self): @abc.abstractmethod def get_ineq_constraints_scaling(self): - """ Return the desired scaling factors to use for the + """Return the desired scaling factors to use for the for the inequality constraints. None indicates no scaling. This indicates potential scaling for the model, but the evaluation methods should return *unscaled* values @@ -591,4 +592,3 @@ def evaluate_jacobian_ineq(self, out=None): matrix_like """ pass - diff --git a/pyomo/contrib/pynumero/interfaces/nlp_projections.py b/pyomo/contrib/pynumero/interfaces/nlp_projections.py index a91b976965a..68cb0eef15f 100644 --- a/pyomo/contrib/pynumero/interfaces/nlp_projections.py +++ b/pyomo/contrib/pynumero/interfaces/nlp_projections.py @@ -1,7 +1,8 @@ -from pyomo.contrib.pynumero.interfaces.nlp import NLP +from pyomo.contrib.pynumero.interfaces.nlp import NLP, ExtendedNLP import numpy as np import scipy.sparse as sp + class _BaseNLPDelegator(NLP): def __init__(self, original_nlp): """ @@ -105,6 +106,34 @@ def report_solver_status(self, status_code, status_message): self._original_nlp.report_solver_status(status_code, status_message) +class _ExtendedNLPDelegator(_BaseNLPDelegator): + def __init__(self, original_nlp): + if not isinstance(original_nlp, ExtendedNLP): + raise TypeError( + "Original NLP must be an instance of ExtendedNLP to use in" + " an _ExtendedNLPDelegator. Got type %s" % type(original_nlp) + ) + super().__init__(original_nlp) + + def n_eq_constraints(self): + return self._original_nlp.n_eq_constraints() + + def n_ineq_constraints(self): + return self._original_nlp.n_ineq_constraints() + + def evaluate_eq_constraints(self): + return self._original_nlp.evaluate_eq_constraints() + + def evaluate_jacobian_eq(self): + return self._original_nlp.evaluate_jacobian_eq() + + def evaluate_ineq_constraints(self): + return self._original_nlp.evaluate_ineq_constraints() + + def evaluate_jacobian_ineq(self): + return self._original_nlp.evaluate_jacobian_ineq() + + class RenamedNLP(_BaseNLPDelegator): def __init__(self, original_nlp, primals_name_map): """ @@ -130,9 +159,10 @@ def __init__(self, original_nlp, primals_name_map): def _generate_new_names(self): if self._new_primals_names is None: assert self._original_nlp.n_primals() == len(self._primals_name_map) - self._new_primals_names = \ - [self._primals_name_map[nm] for nm in self._original_nlp.primals_names()] - + self._new_primals_names = [ + self._primals_name_map[nm] for nm in self._original_nlp.primals_names() + ] + def primals_names(self): return self._new_primals_names @@ -141,7 +171,7 @@ class ProjectedNLP(_BaseNLPDelegator): def __init__(self, original_nlp, primals_ordering): """ This class takes an NLP that depends on a set of primals (original - space) and converts it to an NLP that depends on a reordered set of + space) and converts it to an NLP that depends on a reordered set of primals (projected space). This will impact all the returned items associated with primal @@ -149,7 +179,7 @@ def __init__(self, original_nlp, primals_ordering): instead of the original primals ordering. Note also that this can include additional primal variables not - in the original NLP, or can exclude primal variables that were + in the original NLP, or can exclude primal variables that were in the original NLP. Parameters @@ -158,9 +188,9 @@ def __init__(self, original_nlp, primals_ordering): The original NLP object that implements the NLP interface primals_ordering: list - List of strings indicating the desired primal variable + List of strings indicating the desired primal variable ordering for this NLP. The list can contain new variables - that are not in the original NLP, thereby expanding the + that are not in the original NLP, thereby expanding the space of the primal variables. """ super(ProjectedNLP, self).__init__(original_nlp) @@ -176,18 +206,20 @@ def __init__(self, original_nlp, primals_ordering): def _generate_maps(self): if self._original_idxs is None or self._projected_idxs is None: - primals_ordering_dict = {k:i for i,k in enumerate(self._primals_ordering)} + primals_ordering_dict = {k: i for i, k in enumerate(self._primals_ordering)} original_names = self._original_nlp.primals_names() original_idxs = list() projected_idxs = list() - for i,nm in enumerate(original_names): + for i, nm in enumerate(original_names): if nm in primals_ordering_dict: # we need the reordering for this element original_idxs.append(i) projected_idxs.append(primals_ordering_dict[nm]) self._original_idxs = np.asarray(original_idxs) self._projected_idxs = np.asarray(projected_idxs) - self._original_to_projected = np.nan*np.zeros(self._original_nlp.n_primals()) + self._original_to_projected = np.nan * np.zeros( + self._original_nlp.n_primals() + ) self._original_to_projected[self._original_idxs] = self._projected_idxs def n_primals(self): @@ -209,10 +241,10 @@ def nnz_hessian_lag(self): return self._nnz_hessian_lag def _project_primals(self, default, original_primals): - projected_x = default*np.ones(self.n_primals(), dtype=np.float64) + projected_x = default * np.ones(self.n_primals(), dtype=np.float64) projected_x[self._projected_idxs] = original_primals[self._original_idxs] return projected_x - + def primals_lb(self): return self._project_primals(-np.inf, self._original_nlp.primals_lb()) @@ -243,7 +275,9 @@ def set_primals(self, primals): def get_primals(self): original_primals = self._original_nlp.get_primals() - self._projected_primals[self._projected_idxs] = original_primals[self._original_idxs] + self._projected_primals[self._projected_idxs] = original_primals[ + self._original_idxs + ] return self._projected_primals def get_primals_scaling(self): @@ -251,7 +285,7 @@ def get_primals_scaling(self): def evaluate_grad_objective(self, out=None): original_grad_objective = self._original_nlp.evaluate_grad_objective() - projected_objective = self._project_primals(0.0, original_grad_objective) + projected_objective = self._project_primals(0.0, original_grad_objective) if out is None: return projected_objective np.copyto(out, projected_objective) @@ -262,7 +296,7 @@ def evaluate_jacobian(self, out=None): if out is not None: np.copyto(out.data, original_jacobian.data[self._jacobian_nz_mask]) return out - + row = original_jacobian.row col = original_jacobian.col data = original_jacobian.data @@ -276,21 +310,26 @@ def evaluate_jacobian(self, out=None): new_row = row[self._jacobian_nz_mask] new_data = data[self._jacobian_nz_mask] - return sp.coo_matrix((new_data, (new_row,new_col)), shape=(self.n_constraints(), self.n_primals())) + return sp.coo_matrix( + (new_data, (new_row, new_col)), + shape=(self.n_constraints(), self.n_primals()), + ) def evaluate_hessian_lag(self, out=None): original_hessian = self._original_nlp.evaluate_hessian_lag() if out is not None: np.copyto(out.data, original_hessian.data[self._hessian_nz_mask]) return out - + row = original_hessian.row col = original_hessian.col data = original_hessian.data if self._hessian_nz_mask is None: # need to remap the irow, jcol to the new space and change the size - self._hessian_nz_mask = np.isin(col, self._original_idxs) & np.isin(row, self._original_idxs) + self._hessian_nz_mask = np.isin(col, self._original_idxs) & np.isin( + row, self._original_idxs + ) new_col = col[self._hessian_nz_mask] new_col = self._original_to_projected[new_col] @@ -298,7 +337,63 @@ def evaluate_hessian_lag(self, out=None): new_row = self._original_to_projected[new_row] new_data = data[self._hessian_nz_mask] - return sp.coo_matrix((new_data, (new_row,new_col)), shape=(self.n_primals(), self.n_primals())) + return sp.coo_matrix( + (new_data, (new_row, new_col)), shape=(self.n_primals(), self.n_primals()) + ) def report_solver_status(self, status_code, status_message): raise NotImplementedError('Need to think about this...') + + +class ProjectedExtendedNLP(ProjectedNLP, _ExtendedNLPDelegator): + def __init__(self, original_nlp, primals_ordering): + super(ProjectedExtendedNLP, self).__init__(original_nlp, primals_ordering) + self._jacobian_eq_nz_mask = None + self._jacobian_ineq_nz_mask = None + + def evaluate_jacobian_eq(self, out=None): + original_jacobian = self._original_nlp.evaluate_jacobian_eq() + if out is not None: + np.copyto(out.data, original_jacobian.data[self._jacobian_eq_nz_mask]) + return out + + row = original_jacobian.row + col = original_jacobian.col + data = original_jacobian.data + + if self._jacobian_eq_nz_mask is None: + # need to remap the irow, jcol to the new space and change the size + self._jacobian_eq_nz_mask = np.isin(col, self._original_idxs) + + new_col = col[self._jacobian_eq_nz_mask] + new_col = self._original_to_projected[new_col] + new_row = row[self._jacobian_eq_nz_mask] + new_data = data[self._jacobian_eq_nz_mask] + + return sp.coo_matrix( + (new_data, (new_row, new_col)), + shape=(self.n_eq_constraints(), self.n_primals()), + ) + + def evaluate_jacobian_ineq(self, out=None): + original_jacobian = self._original_nlp.evaluate_jacobian_ineq() + if out is not None: + np.copyto(out.data, original_jacobian.data[self._jacobian_ineq_nz_mask]) + return out + + row = original_jacobian.row + col = original_jacobian.col + data = original_jacobian.data + + if self._jacobian_ineq_nz_mask is None: + self._jacobian_ineq_nz_mask = np.isin(col, self._original_idxs) + + new_col = col[self._jacobian_ineq_nz_mask] + new_col = self._original_to_projected[new_col] + new_row = row[self._jacobian_ineq_nz_mask] + new_data = data[self._jacobian_ineq_nz_mask] + + return sp.coo_matrix( + (new_data, (new_row, new_col)), + shape=(self.n_ineq_constraints(), self.n_primals()), + ) diff --git a/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py b/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py index 47a58cfcac0..9a5dc50ef7b 100644 --- a/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/pyomo_grey_box_nlp.py @@ -25,14 +25,18 @@ from pyomo.contrib.pynumero.sparse.block_vector import BlockVector from pyomo.contrib.pynumero.interfaces.nlp import NLP from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.contrib.pynumero.interfaces.utils import make_lower_triangular_full, CondensedSparseSummation +from pyomo.contrib.pynumero.interfaces.utils import ( + make_lower_triangular_full, + CondensedSparseSummation, +) from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxBlock from pyomo.contrib.pynumero.interfaces.nlp_projections import ProjectedNLP + # Todo: make some of the numpy arise not writable from __init__ class PyomoNLPWithGreyBoxBlocks(NLP): def __init__(self, pyomo_model): - super(PyomoNLPWithGreyBoxBlocks,self).__init__() + super(PyomoNLPWithGreyBoxBlocks, self).__init__() # get the list of all grey box blocks and build _ExternalGreyBoxAsNLP objects greybox_components = [] @@ -47,9 +51,9 @@ def __init__(self, pyomo_model): # PyomoNLP object to expose any variables on the block to # the underlying Pyomo machinery for greybox in pyomo_model.component_objects( - ExternalGreyBoxBlock, descend_into=True): - greybox.parent_block().reclassify_component_type( - greybox, pyo.Block) + ExternalGreyBoxBlock, descend_into=True + ): + greybox.parent_block().reclassify_component_type(greybox, pyo.Block) greybox_components.append(greybox) # store the pyomo model @@ -58,17 +62,13 @@ def __init__(self, pyomo_model): # part of the model only) self._pyomo_nlp = PyomoNLP(pyomo_model) self._pyomo_model_var_names_to_datas = { - v.getname( - fully_qualified=True - ): v + v.getname(fully_qualified=True): v for v in pyomo_model.component_data_objects( ctype=pyo.Var, descend_into=True ) } self._pyomo_model_constraint_names_to_datas = { - c.getname( - fully_qualified=True - ): c + c.getname(fully_qualified=True): c for c in pyomo_model.component_data_objects( ctype=pyo.Constraint, descend_into=True ) @@ -78,13 +78,15 @@ def __init__(self, pyomo_model): # Restore the ctypes of the ExternalGreyBoxBlock components for greybox in greybox_components: greybox.parent_block().reclassify_component_type( - greybox, ExternalGreyBoxBlock) + greybox, ExternalGreyBoxBlock + ) if self._pyomo_nlp.n_primals() == 0: raise ValueError( "No variables were found in the Pyomo part of the model." " PyomoGreyBoxModel requires at least one variable" - " to be active in a Pyomo objective or constraint") + " to be active in a Pyomo objective or constraint" + ) # build the list of NLP wrappers for the greybox objects greybox_nlps = [] @@ -100,12 +102,15 @@ def __init__(self, pyomo_model): greybox_nlps.append(greybox_nlp) if fixed_vars: - logging.getLogger(__name__).error('PyomoNLPWithGreyBoxBlocks found fixed variables for the' - ' inputs and/or outputs of an ExternalGreyBoxBlock. This' - ' is not currently supported. The fixed variables were:\n\t' - + '\n\t'.join(f.getname(fully_qualified=True) for f in fixed_vars) - ) - raise NotImplementedError('PyomoNLPWithGreyBoxBlocks does not support fixed inputs or outputs') + logging.getLogger(__name__).error( + 'PyomoNLPWithGreyBoxBlocks found fixed variables for the' + ' inputs and/or outputs of an ExternalGreyBoxBlock. This' + ' is not currently supported. The fixed variables were:\n\t' + + '\n\t'.join(f.getname(fully_qualified=True) for f in fixed_vars) + ) + raise NotImplementedError( + 'PyomoNLPWithGreyBoxBlocks does not support fixed inputs or outputs' + ) # let's build up the union of all the primal variables names # RBP: Why use names here? Why not just ComponentSet of all @@ -117,14 +122,21 @@ def __init__(self, pyomo_model): # sort the names for consistency run to run self._n_primals = len(primals_names) self._primals_names = primals_names = sorted(primals_names) - self._pyomo_model_var_datas = [self._pyomo_model_var_names_to_datas[nm] for nm in self._primals_names] + self._pyomo_model_var_datas = [ + self._pyomo_model_var_names_to_datas[nm] for nm in self._primals_names + ] # get the names of all the constraints self._constraint_names = list(self._pyomo_nlp.constraint_names()) - self._constraint_datas = [self._pyomo_model_constraint_names_to_datas.get(nm) for nm in self._constraint_names] + self._constraint_datas = [ + self._pyomo_model_constraint_names_to_datas.get(nm) + for nm in self._constraint_names + ] for gbnlp in greybox_nlps: self._constraint_names.extend(gbnlp.constraint_names()) - self._constraint_datas.extend([(gbnlp._block, nm) for nm in gbnlp.constraint_names()]) + self._constraint_datas.extend( + [(gbnlp._block, nm) for nm in gbnlp.constraint_names()] + ) self._n_constraints = len(self._constraint_names) self._has_hessian_support = True @@ -134,7 +146,7 @@ def __init__(self, pyomo_model): # wrap all the nlp objects with projected nlp objects self._pyomo_nlp = ProjectedNLP(self._pyomo_nlp, primals_names) - for i,gbnlp in enumerate(greybox_nlps): + for i, gbnlp in enumerate(greybox_nlps): greybox_nlps[i] = ProjectedNLP(greybox_nlps[i], primals_names) # build a list of all the nlps in order @@ -159,33 +171,43 @@ def __init__(self, pyomo_model): self._primals_ub[mask] = np.minimum(self._primals_ub[mask], local[mask]) # all the nan's should be gone (every primal should be initialized) - if np.any(np.isnan(self._init_primals)) \ - or np.any(np.isnan(self._primals_lb)) \ - or np.any(np.isnan(self._primals_ub)): - raise ValueError('NaN values found in initialization of primals or' - ' primals_lb or primals_ub in _PyomoNLPWithGreyBoxBlocks.') + if ( + np.any(np.isnan(self._init_primals)) + or np.any(np.isnan(self._primals_lb)) + or np.any(np.isnan(self._primals_ub)) + ): + raise ValueError( + 'NaN values found in initialization of primals or' + ' primals_lb or primals_ub in _PyomoNLPWithGreyBoxBlocks.' + ) self._init_duals = BlockVector(len(nlps)) self._dual_values_blockvector = BlockVector(len(nlps)) self._constraints_lb = BlockVector(len(nlps)) self._constraints_ub = BlockVector(len(nlps)) - for i,nlp in enumerate(nlps): + for i, nlp in enumerate(nlps): self._init_duals.set_block(i, nlp.init_duals()) self._constraints_lb.set_block(i, nlp.constraints_lb()) self._constraints_ub.set_block(i, nlp.constraints_ub()) - self._dual_values_blockvector.set_block(i, np.nan*np.zeros(nlp.n_constraints())) + self._dual_values_blockvector.set_block( + i, np.nan * np.zeros(nlp.n_constraints()) + ) self._init_duals = self._init_duals.flatten() self._constraints_lb = self._constraints_lb.flatten() self._constraints_ub = self._constraints_ub.flatten() # verify that there are no nans in the init_duals - if np.any(np.isnan(self._init_duals)) \ - or np.any(np.isnan(self._constraints_lb)) \ - or np.any(np.isnan(self._constraints_ub)): - raise ValueError('NaN values found in initialization of duals or' - ' constraints_lb or constraints_ub in' - ' _PyomoNLPWithGreyBoxBlocks.') - - self._primal_values = np.nan*np.ones(self._n_primals) + if ( + np.any(np.isnan(self._init_duals)) + or np.any(np.isnan(self._constraints_lb)) + or np.any(np.isnan(self._constraints_ub)) + ): + raise ValueError( + 'NaN values found in initialization of duals or' + ' constraints_lb or constraints_ub in' + ' _PyomoNLPWithGreyBoxBlocks.' + ) + + self._primal_values = np.nan * np.ones(self._n_primals) # set the values of the primals and duals to make sure initial # values get all the way through to the underlying models self.set_primals(self._init_primals) @@ -208,12 +230,12 @@ def __init__(self, pyomo_model): scaling_suffix = pyomo_model.component('scaling_factor') if scaling_suffix and scaling_suffix.ctype is pyo.Suffix: need_scaling = True - for i,v in enumerate(self._pyomo_model_var_datas): + for i, v in enumerate(self._pyomo_model_var_datas): if v in scaling_suffix: self._primals_scaling[i] = scaling_suffix[v] self._constraints_scaling = BlockVector(len(nlps)) - for i,nlp in enumerate(nlps): + for i, nlp in enumerate(nlps): local_constraints_scaling = nlp.get_constraints_scaling() if local_constraints_scaling is None: self._constraints_scaling.set_block(i, np.ones(nlp.n_constraints())) @@ -307,7 +329,7 @@ def get_primals(self): # overloaded from NLP def set_duals(self, duals): self._dual_values_blockvector.copyfrom(duals) - for i,nlp in enumerate(self._nlps): + for i, nlp in enumerate(self._nlps): nlp.set_duals(self._dual_values_blockvector.get_block(i)) # overloaded from NLP @@ -349,7 +371,7 @@ def evaluate_grad_objective(self, out=None): def evaluate_constraints(self, out=None): # todo: implement the "out" version more efficiently ret = BlockVector(len(self._nlps)) - for i,nlp in enumerate(self._nlps): + for i, nlp in enumerate(self._nlps): ret.set_block(i, nlp.evaluate_constraints()) if out is not None: @@ -360,8 +382,8 @@ def evaluate_constraints(self, out=None): # overloaded from NLP def evaluate_jacobian(self, out=None): - ret = BlockMatrix(len(self._nlps),1) - for i,nlp in enumerate(self._nlps): + ret = BlockMatrix(len(self._nlps), 1) + for i, nlp in enumerate(self._nlps): ret.set_block(i, 0, nlp.evaluate_jacobian()) ret = ret.tocoo() @@ -393,13 +415,12 @@ def report_solver_status(self, status_code, status_message): def load_state_into_pyomo(self, bound_multipliers=None): # load the values of the primals into the pyomo primals = self.get_primals() - for value,vardata in zip(primals, self._pyomo_model_var_datas): + for value, vardata in zip(primals, self._pyomo_model_var_datas): vardata.set_value(value) # get the active suffixes m = self._pyomo_model - model_suffixes = dict( - pyo.suffix.active_import_suffix_generator(m)) + model_suffixes = dict(pyo.suffix.active_import_suffix_generator(m)) # we need to correct the sign of the multipliers based on whether or # not we are minimizing or maximizing - this is done in the ASL interface @@ -413,29 +434,35 @@ def load_state_into_pyomo(self, bound_multipliers=None): if 'dual' in model_suffixes: model_suffixes['dual'].clear() dual_values = self._dual_values_blockvector.flatten() - for value,t in zip(dual_values, self._constraint_datas): + for value, t in zip(dual_values, self._constraint_datas): if type(t) is tuple: - model_suffixes['dual'].setdefault(t[0], {})[t[1]] = -obj_sign*value + model_suffixes['dual'].setdefault(t[0], {})[t[1]] = ( + -obj_sign * value + ) else: # t is a constraint data - model_suffixes['dual'][t] = -obj_sign*value + model_suffixes['dual'][t] = -obj_sign * value if 'ipopt_zL_out' in model_suffixes: model_suffixes['ipopt_zL_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zL_out'].update( - zip(self._pyomo_model_var_datas, obj_sign*bound_multipliers[0])) + zip(self._pyomo_model_var_datas, obj_sign * bound_multipliers[0]) + ) if 'ipopt_zU_out' in model_suffixes: model_suffixes['ipopt_zU_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zU_out'].update( - zip(self._pyomo_model_var_datas, -obj_sign*bound_multipliers[1])) + zip(self._pyomo_model_var_datas, -obj_sign * bound_multipliers[1]) + ) + def _default_if_none(value, default): if value is None: return default return value + class _ExternalGreyBoxAsNLP(NLP): """ This class takes an ExternalGreyBoxModel and makes it look @@ -443,6 +470,7 @@ class _ExternalGreyBoxAsNLP(NLP): the ExternalGreyBoxModel supports constraints only (no objective), so some of the methods are not appropriate and raise exceptions """ + def __init__(self, external_grey_box_block): self._block = external_grey_box_block self._ex_model = external_grey_box_block.get_external_model() @@ -452,68 +480,69 @@ def __init__(self, external_grey_box_block): n_outputs = len(self._block.outputs) assert n_outputs == self._ex_model.n_outputs() - if self._ex_model.n_outputs() == 0 and \ - self._ex_model.n_equality_constraints() == 0: + if ( + self._ex_model.n_outputs() == 0 + and self._ex_model.n_equality_constraints() == 0 + ): raise ValueError( 'ExternalGreyBoxModel has no equality constraints ' 'or outputs. To use _ExternalGreyBoxAsNLP, it must' - ' have at least one or both.') + ' have at least one or both.' + ) # create the list of primals and constraint names # primals will be ordered inputs, followed by outputs self._primals_names = [ - self._block.inputs[k].getname( - fully_qualified=True - ) for k in self._block.inputs + self._block.inputs[k].getname(fully_qualified=True) + for k in self._block.inputs ] self._primals_names.extend( - self._block.outputs[k].getname( - fully_qualified=True - ) + self._block.outputs[k].getname(fully_qualified=True) for k in self._block.outputs ) n_primals = len(self._primals_names) - prefix = self._block.getname( - fully_qualified=True - ) - self._constraint_names = \ - ['{}.{}'.format(prefix, nm) \ - for nm in self._ex_model.equality_constraint_names()] + prefix = self._block.getname(fully_qualified=True) + self._constraint_names = [ + '{}.{}'.format(prefix, nm) + for nm in self._ex_model.equality_constraint_names() + ] output_var_names = [ - self._block.outputs[k].getname( - fully_qualified=False - ) for k in self._block.outputs + self._block.outputs[k].getname(fully_qualified=False) + for k in self._block.outputs ] self._constraint_names.extend( - ['{}.output_constraints[{}]'.format(prefix, nm) \ - for nm in self._ex_model.output_names()]) + [ + '{}.output_constraints[{}]'.format(prefix, nm) + for nm in self._ex_model.output_names() + ] + ) # create the numpy arrays of bounds on the primals self._primals_lb = BlockVector(2) self._primals_ub = BlockVector(2) self._init_primals = BlockVector(2) - lb = np.nan*np.zeros(n_inputs) - ub = np.nan*np.zeros(n_inputs) - init_primals = np.nan*np.zeros(n_inputs) - for i,k in enumerate(self._block.inputs): + lb = np.nan * np.zeros(n_inputs) + ub = np.nan * np.zeros(n_inputs) + init_primals = np.nan * np.zeros(n_inputs) + for i, k in enumerate(self._block.inputs): lb[i] = _default_if_none(self._block.inputs[k].lb, -np.inf) ub[i] = _default_if_none(self._block.inputs[k].ub, np.inf) init_primals[i] = _default_if_none(self._block.inputs[k].value, 0.0) - self._primals_lb.set_block(0,lb) - self._primals_ub.set_block(0,ub) + self._primals_lb.set_block(0, lb) + self._primals_ub.set_block(0, ub) self._init_primals.set_block(0, init_primals) - lb = np.nan*np.zeros(n_outputs) - ub = np.nan*np.zeros(n_outputs) - init_primals = np.nan*np.zeros(n_outputs) - for i,k in enumerate(self._block.outputs): + lb = np.nan * np.zeros(n_outputs) + ub = np.nan * np.zeros(n_outputs) + init_primals = np.nan * np.zeros(n_outputs) + for i, k in enumerate(self._block.outputs): lb[i] = _default_if_none(self._block.outputs[k].lb, -np.inf) ub[i] = _default_if_none(self._block.outputs[k].ub, np.inf) init_primals[i] = _default_if_none(self._block.outputs[k].value, 0.0) - self._primals_lb.set_block(1,lb) - self._primals_ub.set_block(1,ub) - self._init_primals.set_block(1,init_primals) + self._primals_lb.set_block(1, lb) + self._primals_ub.set_block(1, ub) + self._init_primals.set_block(1, init_primals) self._primals_lb = self._primals_lb.flatten() self._primals_ub = self._primals_ub.flatten() self._init_primals = self._init_primals.flatten() @@ -538,11 +567,13 @@ def __init__(self, external_grey_box_block): # do we have hessian support self._has_hessian_support = True - if self._ex_model.n_equality_constraints() > 0 \ - and not hasattr(self._ex_model, 'evaluate_hessian_equality_constraints'): + if self._ex_model.n_equality_constraints() > 0 and not hasattr( + self._ex_model, 'evaluate_hessian_equality_constraints' + ): self._has_hessian_support = False - if self._ex_model.n_outputs() > 0 \ - and not hasattr(self._ex_model, 'evaluate_hessian_outputs'): + if self._ex_model.n_outputs() > 0 and not hasattr( + self._ex_model, 'evaluate_hessian_outputs' + ): self._has_hessian_support = False self._nnz_jacobian = None @@ -608,7 +639,7 @@ def set_primals(self, primals): self._cache_invalidate_primals() assert len(primals) == self.n_primals() np.copyto(self._primal_values, primals) - self._ex_model.set_input_values(primals[:self._ex_model.n_inputs()]) + self._ex_model.set_input_values(primals[: self._ex_model.n_inputs()]) def get_primals(self): return np.copy(self._primal_values) @@ -622,12 +653,12 @@ def set_duals(self, duals): np.copyto(self._dual_values, duals) if self._ex_model.n_equality_constraints() > 0: self._ex_model.set_equality_constraint_multipliers( - self._dual_values[:self._ex_model.n_equality_constraints()] - ) + self._dual_values[: self._ex_model.n_equality_constraints()] + ) if self._ex_model.n_outputs() > 0: self._ex_model.set_output_constraint_multipliers( - self._dual_values[self._ex_model.n_equality_constraints():] - ) + self._dual_values[self._ex_model.n_equality_constraints() :] + ) def get_duals(self): return np.copy(self._dual_values) @@ -646,7 +677,7 @@ def get_primals_scaling(self): '_ExternalGreyBoxAsNLP does not support scaling of primals ' 'directly. This should be handled at a higher level using ' 'suffixes on the Pyomo variables.' - ) + ) def get_constraints_scaling(self): # todo: would this be better with block vectors @@ -655,12 +686,12 @@ def get_constraints_scaling(self): if self._ex_model.n_equality_constraints() > 0: eq_scaling = self._ex_model.get_equality_constraint_scaling_factors() if eq_scaling is not None: - scaling[:self._ex_model.n_equality_constraints()] = eq_scaling + scaling[: self._ex_model.n_equality_constraints()] = eq_scaling scaled = True if self._ex_model.n_outputs() > 0: output_scaling = self._ex_model.get_output_constraint_scaling_factors() if output_scaling is not None: - scaling[self._ex_model.n_equality_constraints():] = output_scaling + scaling[self._ex_model.n_equality_constraints() :] = output_scaling scaled = True if scaled: return scaling @@ -682,10 +713,10 @@ def _evaluate_constraints_if_necessary_and_cache(self): else: c.set_block(0, np.zeros(0, dtype=np.float64)) if self._ex_model.n_outputs() > 0: - output_values = self._primal_values[self._ex_model.n_inputs():] + output_values = self._primal_values[self._ex_model.n_inputs() :] c.set_block(1, self._ex_model.evaluate_outputs() - output_values) else: - c.set_block(1,np.zeros(0, dtype=np.float64)) + c.set_block(1, np.zeros(0, dtype=np.float64)) self._cached_constraint_residuals = c.flatten() def evaluate_constraints(self, out=None): @@ -699,17 +730,19 @@ def evaluate_constraints(self, out=None): def _evaluate_jacobian_if_necessary_and_cache(self): if self._cached_jacobian is None: - jac = BlockMatrix(2,2) - jac.set_row_size(0,self._ex_model.n_equality_constraints()) - jac.set_row_size(1,self._ex_model.n_outputs()) - jac.set_col_size(0,self._ex_model.n_inputs()) - jac.set_col_size(1,self._ex_model.n_outputs()) - + jac = BlockMatrix(2, 2) + jac.set_row_size(0, self._ex_model.n_equality_constraints()) + jac.set_row_size(1, self._ex_model.n_outputs()) + jac.set_col_size(0, self._ex_model.n_inputs()) + jac.set_col_size(1, self._ex_model.n_outputs()) + if self._ex_model.n_equality_constraints() > 0: - jac.set_block(0,0,self._ex_model.evaluate_jacobian_equality_constraints()) + jac.set_block( + 0, 0, self._ex_model.evaluate_jacobian_equality_constraints() + ) if self._ex_model.n_outputs() > 0: - jac.set_block(1,0,self._ex_model.evaluate_jacobian_outputs()) - jac.set_block(1,1,-1.0*identity(self._ex_model.n_outputs())) + jac.set_block(1, 0, self._ex_model.evaluate_jacobian_outputs()) + jac.set_block(1, 1, -1.0 * identity(self._ex_model.n_outputs())) self._cached_jacobian = jac.tocoo() @@ -721,16 +754,16 @@ def evaluate_jacobian(self, out=None): assert np.array_equal(jac.col, out.col) np.copyto(out.data, jac.data) return out - + return self._cached_jacobian.copy() def _evaluate_hessian_if_necessary_and_cache(self): if self._cached_hessian is None: - hess = BlockMatrix(2,2) - hess.set_row_size(0,self._ex_model.n_inputs()) - hess.set_row_size(1,self._ex_model.n_outputs()) - hess.set_col_size(0,self._ex_model.n_inputs()) - hess.set_col_size(1,self._ex_model.n_outputs()) + hess = BlockMatrix(2, 2) + hess.set_row_size(0, self._ex_model.n_inputs()) + hess.set_row_size(1, self._ex_model.n_outputs()) + hess.set_col_size(0, self._ex_model.n_inputs()) + hess.set_col_size(1, self._ex_model.n_outputs()) # get the hessian w.r.t. the equality constraints eq_hess = None @@ -738,8 +771,10 @@ def _evaluate_hessian_if_necessary_and_cache(self): eq_hess = self._ex_model.evaluate_hessian_equality_constraints() # let's check that it is lower triangular if np.any(eq_hess.row < eq_hess.col): - raise ValueError('ExternalGreyBoxModel must return lower ' - 'triangular portion of the Hessian only') + raise ValueError( + 'ExternalGreyBoxModel must return lower ' + 'triangular portion of the Hessian only' + ) eq_hess = make_lower_triangular_full(eq_hess) @@ -748,8 +783,10 @@ def _evaluate_hessian_if_necessary_and_cache(self): output_hess = self._ex_model.evaluate_hessian_outputs() # let's check that it is lower triangular if np.any(output_hess.row < output_hess.col): - raise ValueError('ExternalGreyBoxModel must return lower ' - 'triangular portion of the Hessian only') + raise ValueError( + 'ExternalGreyBoxModel must return lower ' + 'triangular portion of the Hessian only' + ) output_hess = make_lower_triangular_full(output_hess) @@ -761,14 +798,14 @@ def _evaluate_hessian_if_necessary_and_cache(self): data = np.concatenate((eq_hess.data, output_hess.data)) assert eq_hess.shape == output_hess.shape - input_hess = coo_matrix( (data, (row,col)), shape=eq_hess.shape) + input_hess = coo_matrix((data, (row, col)), shape=eq_hess.shape) elif eq_hess is not None: input_hess = eq_hess elif output_hess is not None: input_hess = output_hess - assert input_hess is not None # need equality or outputs or both + assert input_hess is not None # need equality or outputs or both - hess.set_block(0,0,input_hess) + hess.set_block(0, 0, input_hess) self._cached_hessian = hess.tocoo() def has_hessian_support(self): @@ -779,7 +816,7 @@ def evaluate_hessian_lag(self, out=None): raise NotImplementedError( 'Hessians not supported for all of the external grey box' ' models. Therefore, Hessians are not supported overall.' - ) + ) self._evaluate_hessian_if_necessary_and_cache() if out is not None: @@ -788,7 +825,7 @@ def evaluate_hessian_lag(self, out=None): assert np.array_equal(hess.col, out.col) np.copyto(out.data, hess.data) return out - + return self._cached_hessian.copy() def report_solver_status(self, status_code, status_message): diff --git a/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py b/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py index 7d50ccbdc7c..8017c642854 100644 --- a/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/pyomo_nlp.py @@ -1,4 +1,3 @@ -# ___________________________________________________________________________ # # Pyomo: Python Optimization Modeling Objects # Copyright (c) 2008-2022 @@ -31,6 +30,7 @@ __all__ = ['PyomoNLP'] + # TODO: There are todos in the code below class PyomoNLP(AslNLP): def __init__(self, pyomo_model, nl_file_options=None): @@ -45,8 +45,7 @@ def __init__(self, pyomo_model, nl_file_options=None): TempfileManager.push() try: # get the temp file names for the nl file - nl_file = TempfileManager.create_tempfile( - suffix='pynumero.nl') + nl_file = TempfileManager.create_tempfile(suffix='pynumero.nl') # The current AmplInterface code only supports a single # objective function Therefore, we throw an error if there @@ -59,22 +58,28 @@ def __init__(self, pyomo_model, nl_file_options=None): # handle this # # This currently addresses issue #1217 - objectives = list(pyomo_model.component_data_objects( - ctype=pyo.Objective, active=True, descend_into=True)) + objectives = list( + pyomo_model.component_data_objects( + ctype=pyo.Objective, active=True, descend_into=True + ) + ) if len(objectives) != 1: raise NotImplementedError( 'The ASL interface and PyomoNLP in PyNumero currently ' 'only support single objective problems. Deactivate ' 'any extra objectives you may have, or add a dummy ' 'objective (f(x)=0) if you have a square problem ' - '(found %s objectives).' % (len(objectives),)) + '(found %s objectives).' % (len(objectives),) + ) self._objective = objectives[0] # write the nl file for the Pyomo model and get the symbolMap if nl_file_options is None: nl_file_options = dict() fname, symbolMap = WriterFactory('nl')( - pyomo_model, nl_file, lambda x:True, nl_file_options) + pyomo_model, nl_file, lambda x: True, nl_file_options + ) + self._symbol_map = symbolMap # create component maps from vardata to idx and condata to idx self._vardata_to_idx = vdidx = ComponentMap() @@ -83,17 +88,22 @@ def __init__(self, pyomo_model, nl_file_options=None): # TODO: Are these names totally consistent? for name, obj in symbolMap.bySymbol.items(): if name[0] == 'v': - vdidx[obj()] = int(name[1:]) + vdidx[obj] = int(name[1:]) elif name[0] == 'c': - cdidx[obj()] = int(name[1:]) + cdidx[obj] = int(name[1:]) # The NL writer advertises the external function libraries # through the PYOMO_AMPLFUNC environment variable; merge it # with any preexisting AMPLFUNC definitions - amplfunc = "\n".join(filter(None, ( - os.environ.get('AMPLFUNC', None), - os.environ.get('PYOMO_AMPLFUNC', None), - ))) + amplfunc = "\n".join( + filter( + None, + ( + os.environ.get('AMPLFUNC', None), + os.environ.get('PYOMO_AMPLFUNC', None), + ), + ) + ) with CtypesEnviron(AMPLFUNC=amplfunc): super(PyomoNLP, self).__init__(nl_file) @@ -105,22 +115,25 @@ def __init__(self, pyomo_model, nl_file_options=None): full_to_equality = self._con_full_eq_map equality_mask = self._con_full_eq_mask self._condata_to_eq_idx = ComponentMap( - (con, full_to_equality[i]) - for con, i in self._condata_to_idx.items() - if equality_mask[i] - ) + (con, full_to_equality[i]) + for con, i in self._condata_to_idx.items() + if equality_mask[i] + ) full_to_inequality = self._con_full_ineq_map inequality_mask = self._con_full_ineq_mask self._condata_to_ineq_idx = ComponentMap( - (con, full_to_inequality[i]) - for con, i in self._condata_to_idx.items() - if inequality_mask[i] - ) + (con, full_to_inequality[i]) + for con, i in self._condata_to_idx.items() + if inequality_mask[i] + ) finally: # delete the nl file TempfileManager.pop() + @property + def symbol_map(self): + return self._symbol_map def pyomo_model(self): """ @@ -141,7 +154,7 @@ def get_pyomo_variables(self): the order corresponding to the primals """ # ToDo: is there a more efficient way to do this - idx_to_vardata = {i:v for v,i in self._vardata_to_idx.items()} + idx_to_vardata = {i: v for v, i in self._vardata_to_idx.items()} return [idx_to_vardata[i] for i in range(len(idx_to_vardata))] def get_pyomo_constraints(self): @@ -150,7 +163,7 @@ def get_pyomo_constraints(self): the order corresponding to the primals """ # ToDo: is there a more efficient way to do this - idx_to_condata = {i:v for v,i in self._condata_to_idx.items()} + idx_to_condata = {i: v for v, i in self._condata_to_idx.items()} return [idx_to_condata[i] for i in range(len(idx_to_condata))] def get_pyomo_equality_constraints(self): @@ -158,8 +171,7 @@ def get_pyomo_equality_constraints(self): Return an ordered list of the Pyomo ConData objects in the order corresponding to the equality constraints. """ - idx_to_condata = {i: c for c, i in - self._condata_to_eq_idx.items()} + idx_to_condata = {i: c for c, i in self._condata_to_eq_idx.items()} return [idx_to_condata[i] for i in range(len(idx_to_condata))] def get_pyomo_inequality_constraints(self): @@ -167,11 +179,14 @@ def get_pyomo_inequality_constraints(self): Return an ordered list of the Pyomo ConData objects in the order corresponding to the inequality constraints. """ - idx_to_condata = {i: c for c, i in - self._condata_to_ineq_idx.items()} + idx_to_condata = {i: c for c, i in self._condata_to_ineq_idx.items()} return [idx_to_condata[i] for i in range(len(idx_to_condata))] - @deprecated(msg='This method has been replaced with primals_names', version='6.0.0.dev0', remove_in='6.0') + @deprecated( + msg='This method has been replaced with primals_names', + version='6.0.0', + remove_in='6.0', + ) def variable_names(self): return self.primals_names() @@ -181,10 +196,7 @@ def primals_names(self): names in the order corresponding to the primals """ pyomo_variables = self.get_pyomo_variables() - return [ - v.getname(fully_qualified=True) - for v in pyomo_variables - ] + return [v.getname(fully_qualified=True) for v in pyomo_variables] def constraint_names(self): """ @@ -192,10 +204,7 @@ def constraint_names(self): names in the order corresponding to internal constraint order """ pyomo_constraints = self.get_pyomo_constraints() - return [ - v.getname(fully_qualified=True) - for v in pyomo_constraints - ] + return [v.getname(fully_qualified=True) for v in pyomo_constraints] def equality_constraint_names(self): """ @@ -203,10 +212,7 @@ def equality_constraint_names(self): the order corresponding to the equality constraints. """ equality_constraints = self.get_pyomo_equality_constraints() - return [ - v.getname(fully_qualified=True) - for v in equality_constraints - ] + return [v.getname(fully_qualified=True) for v in equality_constraints] def inequality_constraint_names(self): """ @@ -214,10 +220,7 @@ def inequality_constraint_names(self): the order corresponding to the inequality constraints. """ inequality_constraints = self.get_pyomo_inequality_constraints() - return [ - v.getname(fully_qualified=True) - for v in inequality_constraints - ] + return [v.getname(fully_qualified=True) for v in inequality_constraints] def get_primal_indices(self, pyomo_variables): """ @@ -316,7 +319,7 @@ def get_primals_scaling(self): scaling_suffix = self._pyomo_model.component('scaling_factor') if scaling_suffix and scaling_suffix.ctype is pyo.Suffix: primals_scaling = np.ones(self.n_primals()) - for i,v in enumerate(self.get_pyomo_variables()): + for i, v in enumerate(self.get_pyomo_variables()): if v in scaling_suffix: primals_scaling[i] = scaling_suffix[v] return primals_scaling @@ -327,7 +330,7 @@ def get_constraints_scaling(self): scaling_suffix = self._pyomo_model.component('scaling_factor') if scaling_suffix and scaling_suffix.ctype is pyo.Suffix: constraints_scaling = np.ones(self.n_constraints()) - for i,c in enumerate(self.get_pyomo_constraints()): + for i, c in enumerate(self.get_pyomo_constraints()): if c in scaling_suffix: constraints_scaling[i] = scaling_suffix[c] return constraints_scaling @@ -377,15 +380,18 @@ def extract_submatrix_jacobian(self, pyomo_variables, pyomo_constraints): submatrix_data = np.compress(submatrix_mask, jac.data) # ToDo: this is expensive - have to think about how to do this with numpy - row_submatrix_map = {j:i for i,j in enumerate(constraint_indices)} + row_submatrix_map = {j: i for i, j in enumerate(constraint_indices)} for i, v in enumerate(submatrix_irows): submatrix_irows[i] = row_submatrix_map[v] - col_submatrix_map = {j:i for i,j in enumerate(primal_indices)} + col_submatrix_map = {j: i for i, j in enumerate(primal_indices)} for i, v in enumerate(submatrix_jcols): submatrix_jcols[i] = col_submatrix_map[v] - return coo_matrix((submatrix_data, (submatrix_irows, submatrix_jcols)), shape=(len(constraint_indices), len(primal_indices))) + return coo_matrix( + (submatrix_data, (submatrix_irows, submatrix_jcols)), + shape=(len(constraint_indices), len(primal_indices)), + ) def extract_submatrix_hessian_lag(self, pyomo_variables_rows, pyomo_variables_cols): """ @@ -410,15 +416,18 @@ def extract_submatrix_hessian_lag(self, pyomo_variables_rows, pyomo_variables_co submatrix_data = np.compress(submatrix_mask, hess_lag.data) # ToDo: this is expensive - have to think about how to do this with numpy - submatrix_map = {j:i for i,j in enumerate(primal_indices_rows)} + submatrix_map = {j: i for i, j in enumerate(primal_indices_rows)} for i, v in enumerate(submatrix_irows): submatrix_irows[i] = submatrix_map[v] - submatrix_map = {j:i for i,j in enumerate(primal_indices_cols)} + submatrix_map = {j: i for i, j in enumerate(primal_indices_cols)} for i, v in enumerate(submatrix_jcols): submatrix_jcols[i] = submatrix_map[v] - return coo_matrix((submatrix_data, (submatrix_irows, submatrix_jcols)), shape=(len(primal_indices_rows), len(primal_indices_cols))) + return coo_matrix( + (submatrix_data, (submatrix_irows, submatrix_jcols)), + shape=(len(primal_indices_rows), len(primal_indices_cols)), + ) def load_state_into_pyomo(self, bound_multipliers=None): primals = self.get_primals() @@ -426,24 +435,25 @@ def load_state_into_pyomo(self, bound_multipliers=None): for var, val in zip(variables, primals): var.set_value(val) m = self.pyomo_model() - model_suffixes = dict( - pyo.suffix.active_import_suffix_generator(m)) + model_suffixes = dict(pyo.suffix.active_import_suffix_generator(m)) if 'dual' in model_suffixes: duals = self.get_duals() constraints = self.get_pyomo_constraints() model_suffixes['dual'].clear() - model_suffixes['dual'].update( - zip(constraints, duals)) + model_suffixes['dual'].update(zip(constraints, duals)) if 'ipopt_zL_out' in model_suffixes: model_suffixes['ipopt_zL_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zL_out'].update( - zip(variables, bound_multipliers[0])) + zip(variables, bound_multipliers[0]) + ) if 'ipopt_zU_out' in model_suffixes: model_suffixes['ipopt_zU_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zU_out'].update( - zip(variables, bound_multipliers[1])) + zip(variables, bound_multipliers[1]) + ) + # TODO: look for the [:-i] when i might be zero class PyomoGreyBoxNLP(NLP): @@ -456,9 +466,9 @@ def __init__(self, pyomo_model): # before calling the NL writer so that the attached Vars get # picked up by the writer. for greybox in pyomo_model.component_objects( - ExternalGreyBoxBlock, descend_into=True): - greybox.parent_block().reclassify_component_type( - greybox, pyo.Block) + ExternalGreyBoxBlock, descend_into=True + ): + greybox.parent_block().reclassify_component_type(greybox, pyo.Block) greybox_components.append(greybox) self._pyomo_model = pyomo_model @@ -468,25 +478,28 @@ def __init__(self, pyomo_model): # Restore the ctypes of the ExternalGreyBoxBlock components for greybox in greybox_components: greybox.parent_block().reclassify_component_type( - greybox, ExternalGreyBoxBlock) + greybox, ExternalGreyBoxBlock + ) # get the greybox block data objects greybox_data = [] for greybox in greybox_components: - greybox_data.extend(data for data in greybox.values() - if data.active) + greybox_data.extend(data for data in greybox.values() if data.active) if len(greybox_data) > 1: - raise NotImplementedError("The PyomoGreyBoxModel interface has not" - " been tested with Pyomo models that contain" - " more than one ExternalGreyBoxBlock. Currently," - " only a single block is supported.") + raise NotImplementedError( + "The PyomoGreyBoxModel interface has not" + " been tested with Pyomo models that contain" + " more than one ExternalGreyBoxBlock. Currently," + " only a single block is supported." + ) if self._pyomo_nlp.n_primals() == 0: raise ValueError( "No variables were found in the Pyomo part of the model." " PyomoGreyBoxModel requires at least one variable" - " to be active in a Pyomo objective or constraint") + " to be active in a Pyomo objective or constraint" + ) # check that the greybox model supports what we would expect # TODO: add support for models that do not provide jacobians @@ -520,22 +533,28 @@ def __init__(self, pyomo_model): # check that none of the inputs / outputs are fixed for v in data.inputs.values(): if v.fixed: - raise NotImplementedError('Found a grey box model input that is fixed: {}.' - ' This interface does not currently support fixed' - ' variables. Please add a constraint instead.' - ''.format(v.getname(fully_qualified=True))) + raise NotImplementedError( + 'Found a grey box model input that is fixed: {}.' + ' This interface does not currently support fixed' + ' variables. Please add a constraint instead.' + ''.format(v.getname(fully_qualified=True)) + ) for v in data.outputs.values(): if v.fixed: - raise NotImplementedError('Found a grey box model output that is fixed: {}.' - ' This interface does not currently support fixed' - ' variables. Please add a constraint instead.' - ''.format(v.getname(fully_qualified=True))) + raise NotImplementedError( + 'Found a grey box model output that is fixed: {}.' + ' This interface does not currently support fixed' + ' variables. Please add a constraint instead.' + ''.format(v.getname(fully_qualified=True)) + ) block_name = data.getname() for nm in data._ex_model.equality_constraint_names(): self._greybox_constraints_names.append('{}.{}'.format(block_name, nm)) for nm in data._ex_model.output_names(): - self._greybox_constraints_names.append('{}.{}_con'.format(block_name, nm)) + self._greybox_constraints_names.append( + '{}.{}_con'.format(block_name, nm) + ) for var in data.component_data_objects(pyo.Var): if var not in self._vardata_to_idx: @@ -544,7 +563,9 @@ def __init__(self, pyomo_model): self._vardata_to_idx[var] = n_primals n_primals += 1 greybox_primals.append(var) - self._greybox_primals_names.append(var.getname(fully_qualified=True)) + self._greybox_primals_names.append( + var.getname(fully_qualified=True) + ) self._n_greybox_primals = len(greybox_primals) self._greybox_primal_variables = greybox_primals @@ -598,7 +619,7 @@ def __init__(self, pyomo_model): scaling_suffix = self._pyomo_nlp._pyomo_model.component('scaling_factor') if scaling_suffix and scaling_suffix.ctype is pyo.Suffix: need_scaling = True - for i,v in enumerate(self.get_pyomo_variables()): + for i, v in enumerate(self.get_pyomo_variables()): if v in scaling_suffix: self._primals_scaling[i] = scaling_suffix[v] @@ -685,51 +706,45 @@ def nnz_hessian_lag(self): # overloaded from NLP def primals_lb(self): - return np.concatenate((self._pyomo_nlp.primals_lb(), - self._greybox_primals_lb, - )) + return np.concatenate((self._pyomo_nlp.primals_lb(), self._greybox_primals_lb)) # overloaded from NLP def primals_ub(self): - return np.concatenate(( - self._pyomo_nlp.primals_ub(), - self._greybox_primals_ub, - )) + return np.concatenate((self._pyomo_nlp.primals_ub(), self._greybox_primals_ub)) # overloaded from NLP def constraints_lb(self): - return np.concatenate(( - self._pyomo_nlp.constraints_lb(), - np.zeros(self._n_greybox_constraints, dtype=np.float64), - )) + return np.concatenate( + ( + self._pyomo_nlp.constraints_lb(), + np.zeros(self._n_greybox_constraints, dtype=np.float64), + ) + ) # overloaded from NLP def constraints_ub(self): - return np.concatenate(( - self._pyomo_nlp.constraints_ub(), - np.zeros(self._n_greybox_constraints, dtype=np.float64), - )) + return np.concatenate( + ( + self._pyomo_nlp.constraints_ub(), + np.zeros(self._n_greybox_constraints, dtype=np.float64), + ) + ) # overloaded from NLP def init_primals(self): - return np.concatenate(( - self._pyomo_nlp.init_primals(), - self._init_greybox_primals, - )) + return np.concatenate( + (self._pyomo_nlp.init_primals(), self._init_greybox_primals) + ) # overloaded from NLP def init_duals(self): - return np.concatenate(( - self._pyomo_nlp.init_duals(), - self._init_greybox_duals, - )) + return np.concatenate((self._pyomo_nlp.init_duals(), self._init_greybox_duals)) # overloaded from ExtendedNLP def init_duals_eq(self): - return np.concatenate(( - self._pyomo_nlp.init_duals_eq(), - self._init_greybox_duals, - )) + return np.concatenate( + (self._pyomo_nlp.init_duals_eq(), self._init_greybox_duals) + ) # overloaded from NLP / Extended NLP def create_new_vector(self, vector_type): @@ -749,11 +764,10 @@ def set_primals(self, primals): self._invalidate_greybox_primals_cache() # set the primals on the "pyomo" part of the nlp - self._pyomo_nlp.set_primals( - primals[:self._pyomo_nlp.n_primals()]) + self._pyomo_nlp.set_primals(primals[: self._pyomo_nlp.n_primals()]) # copy the values for the greybox primals - np.copyto(self._greybox_primals, primals[self._pyomo_nlp.n_primals():]) + np.copyto(self._greybox_primals, primals[self._pyomo_nlp.n_primals() :]) for external in self._external_greybox_helpers: external.set_primals(primals) @@ -763,21 +777,17 @@ def get_primals(self): # return the value of the primals that the pyomo # part knows about as well as any extra values that # are only in the greybox part - return np.concatenate(( - self._pyomo_nlp.get_primals(), - self._greybox_primals, - )) + return np.concatenate((self._pyomo_nlp.get_primals(), self._greybox_primals)) # overloaded from NLP def set_duals(self, duals): self._invalidate_greybox_duals_cache() # set the duals for the pyomo part of the nlp - self._pyomo_nlp.set_duals( - duals[:self._pyomo_nlp.n_constraints()]) + self._pyomo_nlp.set_duals(duals[: self._pyomo_nlp.n_constraints()]) # set the duals for the greybox part of the nlp - np.copyto(self._greybox_duals, duals[self._pyomo_nlp.n_constraints():]) + np.copyto(self._greybox_duals, duals[self._pyomo_nlp.n_constraints() :]) # set the duals in the helpers for the hessian computation for h in self._external_greybox_helpers: @@ -787,10 +797,7 @@ def set_duals(self, duals): def get_duals(self): # return the duals for the pyomo part of the nlp # concatenated with the greybox part - return np.concatenate(( - self._pyomo_nlp.get_duals(), - self._greybox_duals, - )) + return np.concatenate((self._pyomo_nlp.get_duals(), self._greybox_duals)) # overloaded from ExtendedNLP def set_duals_eq(self, duals): @@ -854,17 +861,23 @@ def evaluate_objective(self): # overloaded from NLP def evaluate_grad_objective(self, out=None): # objective is owned by the pyomo model - return np.concatenate(( - self._pyomo_nlp.evaluate_grad_objective(out), - np.zeros(self._n_greybox_primals))) + return np.concatenate( + ( + self._pyomo_nlp.evaluate_grad_objective(out), + np.zeros(self._n_greybox_primals), + ) + ) def _evaluate_greybox_constraints_and_cache_if_necessary(self): if self._greybox_constraints_cached: return - self._cached_greybox_constraints = np.concatenate(tuple( - external.evaluate_residuals() - for external in self._external_greybox_helpers)) + self._cached_greybox_constraints = np.concatenate( + tuple( + external.evaluate_residuals() + for external in self._external_greybox_helpers + ) + ) self._greybox_constraints_cached = True # overloaded from NLP @@ -872,28 +885,30 @@ def evaluate_constraints(self, out=None): self._evaluate_greybox_constraints_and_cache_if_necessary() if out is not None: - if not isinstance(out, np.ndarray) \ - or out.size != self.n_constraints(): + if not isinstance(out, np.ndarray) or out.size != self.n_constraints(): raise RuntimeError( 'Called evaluate_constraints with an invalid' ' "out" argument - should take an ndarray of ' - 'size {}'.format(self.n_constraints())) + 'size {}'.format(self.n_constraints()) + ) # call on the pyomo part of the nlp - self._pyomo_nlp.evaluate_constraints( - out[:-self._n_greybox_constraints]) + self._pyomo_nlp.evaluate_constraints(out[: -self._n_greybox_constraints]) # call on the greybox part of the nlp - np.copyto(out[-self._n_greybox_constraints:], - self._cached_greybox_constraints) + np.copyto( + out[-self._n_greybox_constraints :], self._cached_greybox_constraints + ) return out else: # concatenate the pyomo and external constraint residuals - return np.concatenate(( - self._pyomo_nlp.evaluate_constraints(), - self._cached_greybox_constraints, - )) + return np.concatenate( + ( + self._pyomo_nlp.evaluate_constraints(), + self._cached_greybox_constraints, + ) + ) # overloaded from ExtendedNLP def evaluate_eq_constraints(self, out=None): @@ -934,44 +949,55 @@ def evaluate_jacobian(self, out=None): self._evaluate_greybox_jacobians_and_cache_if_necessary() if out is not None: - if ( not isinstance(out, coo_matrix) - or out.shape[0] != self.n_constraints() - or out.shape[1] != self.n_primals() - or out.nnz != self.nnz_jacobian() ): + if ( + not isinstance(out, coo_matrix) + or out.shape[0] != self.n_constraints() + or out.shape[1] != self.n_primals() + or out.nnz != self.nnz_jacobian() + ): raise RuntimeError( 'evaluate_jacobian called with an "out" argument' ' that is invalid. This should be a coo_matrix with' - ' shape=({},{}) and nnz={}' - .format(self.n_constraints(), self.n_primals(), - self.nnz_jacobian())) + ' shape=({},{}) and nnz={}'.format( + self.n_constraints(), self.n_primals(), self.nnz_jacobian() + ) + ) n_pyomo_constraints = self.n_constraints() - self._n_greybox_constraints # to avoid an additional copy, we pass in a slice (numpy view) of the underlying # data, row, and col that we were handed to be populated in evaluate_jacobian self._pyomo_nlp.evaluate_jacobian( - out=coo_matrix((out.data[:-self._nnz_greybox_jac], - (out.row[:-self._nnz_greybox_jac], - out.col[:-self._nnz_greybox_jac])), - shape=(n_pyomo_constraints, self._pyomo_nlp.n_primals()))) - np.copyto(out.data[-self._nnz_greybox_jac:], - self._cached_greybox_jac.data) + out=coo_matrix( + ( + out.data[: -self._nnz_greybox_jac], + ( + out.row[: -self._nnz_greybox_jac], + out.col[: -self._nnz_greybox_jac], + ), + ), + shape=(n_pyomo_constraints, self._pyomo_nlp.n_primals()), + ) + ) + np.copyto(out.data[-self._nnz_greybox_jac :], self._cached_greybox_jac.data) return out else: base = self._pyomo_nlp.evaluate_jacobian() - base = coo_matrix((base.data, (base.row, base.col)), - shape=(base.shape[0], self.n_primals())) + base = coo_matrix( + (base.data, (base.row, base.col)), + shape=(base.shape[0], self.n_primals()), + ) - jac = BlockMatrix(2,1) + jac = BlockMatrix(2, 1) jac.set_block(0, 0, base) jac.set_block(1, 0, self._cached_greybox_jac) return jac.tocoo() # TODO: Doesn't this need a "shape" specification? - #return coo_matrix(( + # return coo_matrix(( # np.concatenate((base.data, self._cached_greybox_jac.data)), # ( np.concatenate((base.row, self._cached_greybox_jac.row)), # np.concatenate((base.col, self._cached_greybox_jac.col)) ) - #)) + # )) # overloaded from ExtendedNLP """ @@ -1026,46 +1052,65 @@ def _evaluate_greybox_hessians_and_cache_if_necessary(self): irow = np.concatenate(irow) jcol = np.concatenate(jcol) - self._cached_greybox_hess = coo_matrix( (data, (irow,jcol)), shape=(self.n_primals(), self.n_primals())) + self._cached_greybox_hess = coo_matrix( + (data, (irow, jcol)), shape=(self.n_primals(), self.n_primals()) + ) self._greybox_hess_cached = True def evaluate_hessian_lag(self, out=None): self._evaluate_greybox_hessians_and_cache_if_necessary() if out is not None: - if ( not isinstance(out, coo_matrix) - or out.shape[0] != self.n_primals() - or out.shape[1] != self.n_primals() - or out.nnz != self.nnz_hessian_lag() ): + if ( + not isinstance(out, coo_matrix) + or out.shape[0] != self.n_primals() + or out.shape[1] != self.n_primals() + or out.nnz != self.nnz_hessian_lag() + ): raise RuntimeError( 'evaluate_hessian_lag called with an "out" argument' ' that is invalid. This should be a coo_matrix with' - ' shape=({},{}) and nnz={}' - .format(self.n_primals(), self.n_primals(), - self.nnz_hessian())) + ' shape=({},{}) and nnz={}'.format( + self.n_primals(), self.n_primals(), self.nnz_hessian() + ) + ) # to avoid an additional copy, we pass in a slice (numpy view) of the underlying # data, row, and col that we were handed to be populated in evaluate_hessian_lag # the coo_matrix is simply a holder of the data, row, and col structures self._pyomo_nlp.evaluate_hessian_lag( - out=coo_matrix((out.data[:-self._nnz_greybox_hess], - (out.row[:-self._nnz_greybox_hess], - out.col[:-self._nnz_greybox_hess])), - shape=(self._pyomo_nlp.n_primals(), self._pyomo_nlp.n_primals()))) - np.copyto(out.data[-self._nnz_greybox_hess:], - self._cached_greybox_hess.data) + out=coo_matrix( + ( + out.data[: -self._nnz_greybox_hess], + ( + out.row[: -self._nnz_greybox_hess], + out.col[: -self._nnz_greybox_hess], + ), + ), + shape=(self._pyomo_nlp.n_primals(), self._pyomo_nlp.n_primals()), + ) + ) + np.copyto( + out.data[-self._nnz_greybox_hess :], self._cached_greybox_hess.data + ) return out else: hess = self._pyomo_nlp.evaluate_hessian_lag() data = np.concatenate((hess.data, self._cached_greybox_hess.data)) row = np.concatenate((hess.row, self._cached_greybox_hess.row)) col = np.concatenate((hess.col, self._cached_greybox_hess.col)) - hess = coo_matrix((data, (row, col)), shape=(self.n_primals(), self.n_primals())) + hess = coo_matrix( + (data, (row, col)), shape=(self.n_primals(), self.n_primals()) + ) return hess # overloaded from NLP def report_solver_status(self, status_code, status_message): raise NotImplementedError('Todo: implement this') - @deprecated(msg='This method has been replaced with primals_names', version='6.0.0.dev0', remove_in='6.0') + @deprecated( + msg='This method has been replaced with primals_names', + version='6.0.0', + remove_in='6.0', + ) def variable_names(self): return self.primals_names() @@ -1097,8 +1142,7 @@ def get_pyomo_variables(self): Return an ordered list of the Pyomo VarData objects in the order corresponding to the primals """ - return self._pyomo_nlp.get_pyomo_variables() + \ - self._greybox_primal_variables + return self._pyomo_nlp.get_pyomo_variables() + self._greybox_primal_variables def get_pyomo_constraints(self): """ @@ -1108,8 +1152,8 @@ def get_pyomo_constraints(self): # FIXME: what do we return for the external block constraints? # return self._pyomo_nlp.get_pyomo_constraints() raise NotImplementedError( - "returning list of all constraints when using an external " - "model is TBD") + "returning list of all constraints when using an external model is TBD" + ) def load_state_into_pyomo(self, bound_multipliers=None): primals = self.get_primals() @@ -1117,8 +1161,7 @@ def load_state_into_pyomo(self, bound_multipliers=None): for var, val in zip(variables, primals): var.set_value(val) m = self.pyomo_model() - model_suffixes = dict( - pyo.suffix.active_import_suffix_generator(m)) + model_suffixes = dict(pyo.suffix.active_import_suffix_generator(m)) if 'dual' in model_suffixes: model_suffixes['dual'].clear() # Until we sort out how to return the duals for the external @@ -1132,12 +1175,14 @@ def load_state_into_pyomo(self, bound_multipliers=None): model_suffixes['ipopt_zL_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zL_out'].update( - zip(variables, bound_multipliers[0])) + zip(variables, bound_multipliers[0]) + ) if 'ipopt_zU_out' in model_suffixes: model_suffixes['ipopt_zU_out'].clear() if bound_multipliers is not None: model_suffixes['ipopt_zU_out'].update( - zip(variables, bound_multipliers[1])) + zip(variables, bound_multipliers[1]) + ) class _ExternalGreyBoxModelHelper(object): @@ -1176,29 +1221,41 @@ def __init__(self, ex_grey_box_block, vardata_to_idx, con_offset): # the indices in the full primals vector self._inputs_to_primals_map = np.fromiter( (vardata_to_idx[v] for v in self._block.inputs.values()), - dtype=np.int64, count=n_inputs) + dtype=np.int64, + count=n_inputs, + ) # store the map of output indices (0 .. n_outputs) to # the indices in the full primals vector self._outputs_to_primals_map = np.fromiter( (vardata_to_idx[v] for v in self._block.outputs.values()), - dtype=np.int64, count=n_outputs) - - if self._ex_model.n_outputs() == 0 and \ - self._ex_model.n_equality_constraints() == 0: + dtype=np.int64, + count=n_outputs, + ) + + if ( + self._ex_model.n_outputs() == 0 + and self._ex_model.n_equality_constraints() == 0 + ): raise ValueError( 'ExternalGreyBoxModel has no equality constraints ' - 'or outputs. It must have at least one or both.') + 'or outputs. It must have at least one or both.' + ) self._ex_eq_duals_to_full_map = None if n_eq_constraints > 0: - self._ex_eq_duals_to_full_map = \ - list(range(con_offset, con_offset + n_eq_constraints)) + self._ex_eq_duals_to_full_map = list( + range(con_offset, con_offset + n_eq_constraints) + ) self._ex_output_duals_to_full_map = None if n_outputs > 0: - self._ex_output_duals_to_full_map = \ - list(range(con_offset + n_eq_constraints, con_offset + n_eq_constraints + n_outputs)) + self._ex_output_duals_to_full_map = list( + range( + con_offset + n_eq_constraints, + con_offset + n_eq_constraints + n_outputs, + ) + ) # we need to change the column indices in the jacobian # from the 0..n_inputs provided by the external model @@ -1238,8 +1295,7 @@ def set_duals(self, duals): self._ex_model.set_output_constraint_multipliers(duals_outputs) def n_residuals(self): - return self._ex_model.n_equality_constraints() \ - + self._ex_model.n_outputs() + return self._ex_model.n_equality_constraints() + self._ex_model.n_outputs() def get_residual_scaling(self): eq_scaling = self._ex_model.get_equality_constraint_scaling_factors() @@ -1251,12 +1307,10 @@ def get_residual_scaling(self): if output_con_scaling is None: output_con_scaling = np.ones(self._ex_model.n_outputs()) - return np.concatenate(( - eq_scaling, - output_con_scaling)) + return np.concatenate((eq_scaling, output_con_scaling)) def evaluate_residuals(self): - # evalute the equality constraints and the output equations + # evaluate the equality constraints and the output equations # and return a single vector of residuals # returns residual for h(x)=0, where h(x) = [h_eq(x); h_o(x)-o] resid_list = [] @@ -1282,12 +1336,12 @@ def evaluate_jacobian(self): # The first time through, we won't have created the # mapping of external primals ('u') to the full space # primals ('x') - self._eq_jac_primal_jcol = self._inputs_to_primals_map[ - eq_jac.col] + self._eq_jac_primal_jcol = self._inputs_to_primals_map[eq_jac.col] # map the columns from the inputs "u" back to the full primals "x" eq_jac = coo_matrix( (eq_jac.data, (eq_jac.row, self._eq_jac_primal_jcol)), - (eq_jac.shape[0], self._n_primals)) + (eq_jac.shape[0], self._n_primals), + ) outputs_jac = None if self._ex_model.n_outputs() > 0: @@ -1300,13 +1354,18 @@ def evaluate_jacobian(self): # mapping of external outputs ('o') to the full space # primals ('x') self._outputs_jac_primal_jcol = self._inputs_to_primals_map[ - outputs_jac.col] - + outputs_jac.col + ] + # We also need tocreate the irow, jcol, nnz structure for the # output variable portion of h(u)-o=0 - self._additional_output_entries_irow = np.asarray(range(self._ex_model.n_outputs())) + self._additional_output_entries_irow = np.asarray( + range(self._ex_model.n_outputs()) + ) self._additional_output_entries_jcol = self._outputs_to_primals_map - self._additional_output_entries_data = -1.0*np.ones(self._ex_model.n_outputs()) + self._additional_output_entries_data = -1.0 * np.ones( + self._ex_model.n_outputs() + ) col = self._outputs_jac_primal_jcol data = outputs_jac.data @@ -1314,19 +1373,19 @@ def evaluate_jacobian(self): # add the additional entries for the -Po*x portion of the jacobian row = np.concatenate((row, self._additional_output_entries_irow)) col = np.concatenate((col, self._additional_output_entries_jcol)) - data = np.concatenate((data, self._additional_output_entries_data)) + data = np.concatenate((data, self._additional_output_entries_data)) outputs_jac = coo_matrix( - (data, (row, col)), - shape=(outputs_jac.shape[0], self._n_primals)) + (data, (row, col)), shape=(outputs_jac.shape[0], self._n_primals) + ) jac = None if eq_jac is not None: if outputs_jac is not None: # create a jacobian with both Jw_eq and Jw_o - jac = BlockMatrix(2,1) + jac = BlockMatrix(2, 1) jac.name = 'external model jacobian' - jac.set_block(0,0,eq_jac) - jac.set_block(1,0,outputs_jac) + jac.set_block(0, 0, eq_jac) + jac.set_block(1, 0, outputs_jac) else: assert self._ex_model.n_outputs() == 0 assert self._ex_model.n_equality_constraints() > 0 @@ -1358,8 +1417,10 @@ def evaluate_hessian(self): if self._eq_hess_jcol is None: # first time through, let's also check that it is lower triangular if np.any(eq_hess.row < eq_hess.col): - raise ValueError('ExternalGreyBoxModel must return lower ' - 'triangular portion of the Hessian only') + raise ValueError( + 'ExternalGreyBoxModel must return lower ' + 'triangular portion of the Hessian only' + ) # The first time through, we won't have created the # mapping of external primals ('u') to the full space @@ -1380,14 +1441,20 @@ def evaluate_hessian(self): if self._output_hess_irow is None: # first time through, let's also check that it is lower triangular if np.any(output_hess.row < output_hess.col): - raise ValueError('ExternalGreyBoxModel must return lower ' - 'triangular portion of the Hessian only') + raise ValueError( + 'ExternalGreyBoxModel must return lower ' + 'triangular portion of the Hessian only' + ) # The first time through, we won't have created the # mapping of external outputs ('o') to the full space # primals ('x') - self._output_hess_irow = row = self._inputs_to_primals_map[output_hess.row] - self._output_hess_jcol = col = self._inputs_to_primals_map[output_hess.col] + self._output_hess_irow = row = self._inputs_to_primals_map[ + output_hess.row + ] + self._output_hess_jcol = col = self._inputs_to_primals_map[ + output_hess.col + ] # mapping may have made this not lower triangular mask = col > row @@ -1400,7 +1467,6 @@ def evaluate_hessian(self): data = np.concatenate(data_list) irow = np.concatenate(irow_list) jcol = np.concatenate(jcol_list) - hess = coo_matrix( (data, (irow, jcol)), (self._n_primals, self._n_primals) ) + hess = coo_matrix((data, (irow, jcol)), (self._n_primals, self._n_primals)) return hess - diff --git a/pyomo/contrib/pynumero/interfaces/tests/compare_utils.py b/pyomo/contrib/pynumero/interfaces/tests/compare_utils.py index 0278b040274..d30cfb8f56a 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/compare_utils.py +++ b/pyomo/contrib/pynumero/interfaces/tests/compare_utils.py @@ -8,20 +8,32 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.contrib.pynumero.dependencies import (numpy as np, scipy) +from pyomo.contrib.pynumero.dependencies import numpy as np, scipy + def check_vectors_specific_order(tst, v1, v1order, v2, v2order, v1_v2_map=None): tst.assertEqual(len(v1), len(v1order)) tst.assertEqual(len(v2), len(v2order)) tst.assertEqual(len(v1), len(v2)) if v1_v2_map is None: - v2map = {s:v2order.index(s) for s in v1order} + v2map = {s: v2order.index(s) for s in v1order} else: - v2map = {s:v2order.index(v1_v2_map[s]) for s in v1order} - for i,s in enumerate(v1order): + v2map = {s: v2order.index(v1_v2_map[s]) for s in v1order} + for i, s in enumerate(v1order): tst.assertAlmostEqual(v1[i], v2[v2map[s]], places=7) -def check_sparse_matrix_specific_order(tst, m1, m1rows, m1cols, m2, m2rows, m2cols, m1_m2_rows_map=None, m1_m2_cols_map=None): + +def check_sparse_matrix_specific_order( + tst, + m1, + m1rows, + m1cols, + m2, + m2rows, + m2cols, + m1_m2_rows_map=None, + m1_m2_cols_map=None, +): tst.assertEqual(m1.shape[0], len(m1rows)) tst.assertEqual(m1.shape[1], len(m1cols)) tst.assertEqual(m2.shape[0], len(m2rows)) @@ -48,8 +60,8 @@ def check_sparse_matrix_specific_order(tst, m1, m1rows, m1cols, m2, m2rows, m2co for i in range(len(m1rows)): for j in range(len(m1cols)): - m2c[i,j] = m2d[rowmap[i], colmap[j]] + m2c[i, j] = m2d[rowmap[i], colmap[j]] for i in range(len(m1rows)): for j in range(len(m1cols)): - tst.assertAlmostEqual(m1c[i,j], m2c[i,j], places=7) + tst.assertAlmostEqual(m1c[i, j], m2c[i, j], places=7) diff --git a/pyomo/contrib/pynumero/interfaces/tests/external_grey_box_models.py b/pyomo/contrib/pynumero/interfaces/tests/external_grey_box_models.py index fe40b0c2746..1f2a5169857 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/external_grey_box_models.py +++ b/pyomo/contrib/pynumero/interfaces/tests/external_grey_box_models.py @@ -1,5 +1,8 @@ from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -48,6 +51,7 @@ # [Pout - (P2 - 2*c*F^2] # h_o(u) = {empty} + # Model 4: Same as model 2, but treat P2 as an input to be converged by the solver # u = [Pin, c, F, P2] # o = [Pout] @@ -63,7 +67,7 @@ # P3 - (P2 - c*F^2); # ] # h_o(u) = [P3 - c*F^2] (or could also be [Pin - 4*c*F^2] or [P1 - 3*c*F^2] or [P2 - 2*c*F^2]) -# +# # Model 6: treat all variables as "inputs", equality only, and no output equations # u = [Pin, c, F, P1, P2, P3, Pout] # o = {empty} @@ -98,7 +102,7 @@ def evaluate_outputs(self): Pin = self._input_values[0] c = self._input_values[1] F = self._input_values[2] - Pout = Pin - 4*c*F**2 + Pout = Pin - 4 * c * F**2 return np.asarray([Pout], dtype=np.float64) def evaluate_jacobian_outputs(self): @@ -106,10 +110,11 @@ def evaluate_jacobian_outputs(self): F = self._input_values[2] irow = np.asarray([0, 0, 0], dtype=np.int64) jcol = np.asarray([0, 1, 2], dtype=np.int64) - nonzeros = np.asarray([1, -4*F**2, -4*c*2*F], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1,3)) + nonzeros = np.asarray([1, -4 * F**2, -4 * c * 2 * F], dtype=np.float64) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1, 3)) return jac + class PressureDropSingleOutputWithHessian(PressureDropSingleOutput): def __init__(self): super(PressureDropSingleOutputWithHessian, self).__init__() @@ -123,10 +128,13 @@ def evaluate_hessian_outputs(self): F = self._input_values[2] irow = np.asarray([2, 2], dtype=np.int64) jcol = np.asarray([1, 2], dtype=np.int64) - data = self._output_con_mult_values[0]*np.asarray([-8*F, -8*c], dtype=np.float64) - hess = spa.coo_matrix((data, (irow, jcol)), shape=(3,3)) + data = self._output_con_mult_values[0] * np.asarray( + [-8 * F, -8 * c], dtype=np.float64 + ) + hess = spa.coo_matrix((data, (irow, jcol)), shape=(3, 3)) return hess + class PressureDropSingleEquality(ExternalGreyBoxModel): # u = [Pin, c, F, Pout] # o = {empty} @@ -152,17 +160,18 @@ def evaluate_equality_constraints(self): c = self._input_values[1] F = self._input_values[2] Pout = self._input_values[3] - return np.asarray([Pout - (Pin - 4*c*F**2)], dtype=np.float64) + return np.asarray([Pout - (Pin - 4 * c * F**2)], dtype=np.float64) def evaluate_jacobian_equality_constraints(self): c = self._input_values[1] F = self._input_values[2] irow = np.asarray([0, 0, 0, 0], dtype=np.int64) jcol = np.asarray([0, 1, 2, 3], dtype=np.int64) - nonzeros = np.asarray([-1, 4*F**2, 4*2*c*F, 1], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1,4)) + nonzeros = np.asarray([-1, 4 * F**2, 4 * 2 * c * F, 1], dtype=np.float64) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1, 4)) return jac + class PressureDropSingleEqualityWithHessian(PressureDropSingleEquality): # u = [Pin, c, F, Pout] # o = {empty} @@ -181,10 +190,13 @@ def evaluate_hessian_equality_constraints(self): F = self._input_values[2] irow = np.asarray([2, 2], dtype=np.int64) jcol = np.asarray([1, 2], dtype=np.int64) - nonzeros = self._eq_con_mult_values[0]*np.asarray([8*F, 8*c], dtype=np.float64) - hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(4,4)) + nonzeros = self._eq_con_mult_values[0] * np.asarray( + [8 * F, 8 * c], dtype=np.float64 + ) + hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(4, 4)) return hess + class PressureDropTwoOutputs(ExternalGreyBoxModel): # u = [Pin, c, F] # o = [P2, Pout] @@ -213,8 +225,8 @@ def evaluate_outputs(self): Pin = self._input_values[0] c = self._input_values[1] F = self._input_values[2] - P2 = Pin - 2*c*F**2 - Pout = Pin - 4*c*F**2 + P2 = Pin - 2 * c * F**2 + Pout = Pin - 4 * c * F**2 return np.asarray([P2, Pout], dtype=np.float64) def evaluate_jacobian_outputs(self): @@ -222,10 +234,14 @@ def evaluate_jacobian_outputs(self): F = self._input_values[2] irow = np.asarray([0, 0, 0, 1, 1, 1], dtype=np.int64) jcol = np.asarray([0, 1, 2, 0, 1, 2], dtype=np.int64) - nonzeros = np.asarray([1, -2*F**2, -2*c*2*F, 1, -4*F**2, -4*c*2*F], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2,3)) + nonzeros = np.asarray( + [1, -2 * F**2, -2 * c * 2 * F, 1, -4 * F**2, -4 * c * 2 * F], + dtype=np.float64, + ) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2, 3)) return jac + class PressureDropTwoOutputsWithHessian(PressureDropTwoOutputs): # u = [Pin, c, F] # o = [P2, Pout] @@ -233,7 +249,7 @@ class PressureDropTwoOutputsWithHessian(PressureDropTwoOutputs): # h_o(u) = [Pin - 2*c*F^2] # [Pin - 4*c*F^2] def __init__(self): - super(PressureDropTwoOutputsWithHessian,self).__init__() + super(PressureDropTwoOutputsWithHessian, self).__init__() self._output_con_mult_values = np.zeros(2, dtype=np.float64) def set_output_constraint_multipliers(self, output_con_multiplier_values): @@ -246,10 +262,14 @@ def evaluate_hessian_outputs(self): y2 = self._output_con_mult_values[1] irow = np.asarray([2, 2], dtype=np.int64) jcol = np.asarray([1, 2], dtype=np.int64) - nonzeros = np.asarray([y1*(-4*F) + y2*(-8*F), y1*(-4*c)+y2*(-8*c)], dtype=np.float64) - hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(3,3)) + nonzeros = np.asarray( + [y1 * (-4 * F) + y2 * (-8 * F), y1 * (-4 * c) + y2 * (-8 * c)], + dtype=np.float64, + ) + hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(3, 3)) return hess + class PressureDropTwoEqualities(ExternalGreyBoxModel): # u = [Pin, c, F, P2, Pout] # o = {empty} @@ -277,17 +297,24 @@ def evaluate_equality_constraints(self): F = self._input_values[2] P2 = self._input_values[3] Pout = self._input_values[4] - return np.asarray([P2 - (Pin - 2*c*F**2), Pout - (P2 - 2*c*F**2)], dtype=np.float64) + return np.asarray( + [P2 - (Pin - 2 * c * F**2), Pout - (P2 - 2 * c * F**2)], + dtype=np.float64, + ) def evaluate_jacobian_equality_constraints(self): c = self._input_values[1] F = self._input_values[2] irow = np.asarray([0, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) jcol = np.asarray([0, 1, 2, 3, 1, 2, 3, 4], dtype=np.int64) - nonzeros = np.asarray([-1, 2*F**2, 2*2*c*F, 1, 2*F**2, 2*2*c*F, -1, 1], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2,5)) + nonzeros = np.asarray( + [-1, 2 * F**2, 2 * 2 * c * F, 1, 2 * F**2, 2 * 2 * c * F, -1, 1], + dtype=np.float64, + ) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2, 5)) return jac + class PressureDropTwoEqualitiesWithHessian(PressureDropTwoEqualities): # u = [Pin, c, F, P2, Pout] # o = {empty} @@ -295,7 +322,7 @@ class PressureDropTwoEqualitiesWithHessian(PressureDropTwoEqualities): # [Pout - (P2 - 2*c*F^2] # h_o(u) = {empty} def __init__(self): - super(PressureDropTwoEqualitiesWithHessian,self).__init__() + super(PressureDropTwoEqualitiesWithHessian, self).__init__() self._eq_con_mult_values = np.zeros(2, dtype=np.float64) def set_equality_constraint_multipliers(self, eq_con_multiplier_values): @@ -310,10 +337,13 @@ def evaluate_hessian_equality_constraints(self): irow = np.asarray([2, 2], dtype=np.int64) jcol = np.asarray([1, 2], dtype=np.int64) - nonzeros = np.asarray([y1*(4*F) + y2*(4*F), y1*(4*c)+y2*(4*c)], dtype=np.float64) - hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(5,5)) + nonzeros = np.asarray( + [y1 * (4 * F) + y2 * (4 * F), y1 * (4 * c) + y2 * (4 * c)], dtype=np.float64 + ) + hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(5, 5)) return hess + class PressureDropTwoEqualitiesTwoOutputs(ExternalGreyBoxModel): # u = [Pin, c, F, P1, P3] # o = {P2, Pout} @@ -346,22 +376,26 @@ def evaluate_equality_constraints(self): F = self._input_values[2] P1 = self._input_values[3] P3 = self._input_values[4] - return np.asarray([P1 - (Pin - c*F**2), P3 - (P1 - 2*c*F**2)], dtype=np.float64) + return np.asarray( + [P1 - (Pin - c * F**2), P3 - (P1 - 2 * c * F**2)], dtype=np.float64 + ) def evaluate_outputs(self): Pin = self._input_values[0] c = self._input_values[1] F = self._input_values[2] P1 = self._input_values[3] - return np.asarray([P1 - c*F**2, Pin - 4*c*F**2], dtype=np.float64) + return np.asarray([P1 - c * F**2, Pin - 4 * c * F**2], dtype=np.float64) def evaluate_jacobian_equality_constraints(self): c = self._input_values[1] F = self._input_values[2] irow = np.asarray([0, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) jcol = np.asarray([0, 1, 2, 3, 1, 2, 3, 4], dtype=np.int64) - nonzeros = np.asarray([-1, F**2, 2*c*F, 1, 2*F**2, 4*c*F, -1, 1], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2,5)) + nonzeros = np.asarray( + [-1, F**2, 2 * c * F, 1, 2 * F**2, 4 * c * F, -1, 1], dtype=np.float64 + ) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2, 5)) return jac def evaluate_jacobian_outputs(self): @@ -369,12 +403,16 @@ def evaluate_jacobian_outputs(self): F = self._input_values[2] irow = np.asarray([0, 0, 0, 1, 1, 1], dtype=np.int64) jcol = np.asarray([1, 2, 3, 0, 1, 2], dtype=np.int64) - nonzeros = np.asarray([-F**2, -c*2*F, 1, 1, -4*F**2, -4*c*2*F], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2,5)) + nonzeros = np.asarray( + [-(F**2), -c * 2 * F, 1, 1, -4 * F**2, -4 * c * 2 * F], dtype=np.float64 + ) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(2, 5)) return jac -class PressureDropTwoEqualitiesTwoOutputsWithHessian(PressureDropTwoEqualitiesTwoOutputs): +class PressureDropTwoEqualitiesTwoOutputsWithHessian( + PressureDropTwoEqualitiesTwoOutputs +): # u = [Pin, c, F, P1, P3] # o = {P2, Pout} # h_eq(u) = [P1 - (Pin - c*F^2] @@ -382,7 +420,7 @@ class PressureDropTwoEqualitiesTwoOutputsWithHessian(PressureDropTwoEqualitiesTw # h_o(u) = [P1 - c*F^2] # [Pin - 4*c*F^2] def __init__(self): - super(PressureDropTwoEqualitiesTwoOutputsWithHessian,self).__init__() + super(PressureDropTwoEqualitiesTwoOutputsWithHessian, self).__init__() self._eq_con_mult_values = np.zeros(2, dtype=np.float64) self._output_con_mult_values = np.zeros(2, dtype=np.float64) @@ -401,8 +439,10 @@ def evaluate_hessian_equality_constraints(self): y2 = self._eq_con_mult_values[1] irow = np.asarray([2, 2], dtype=np.int64) jcol = np.asarray([1, 2], dtype=np.int64) - nonzeros = np.asarray([y1*(2*F) + y2*(4*F), y1*(2*c)+y2*(4*c)], dtype=np.float64) - hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(5,5)) + nonzeros = np.asarray( + [y1 * (2 * F) + y2 * (4 * F), y1 * (2 * c) + y2 * (4 * c)], dtype=np.float64 + ) + hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(5, 5)) return hess def evaluate_hessian_outputs(self): @@ -412,25 +452,36 @@ def evaluate_hessian_outputs(self): y2 = self._output_con_mult_values[1] irow = np.asarray([2, 2], dtype=np.int64) jcol = np.asarray([1, 2], dtype=np.int64) - nonzeros = np.asarray([y1*(-2*F) + y2*(-8*F), y1*(-2*c)+y2*(-8*c)], dtype=np.float64) - hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(5,5)) + nonzeros = np.asarray( + [y1 * (-2 * F) + y2 * (-8 * F), y1 * (-2 * c) + y2 * (-8 * c)], + dtype=np.float64, + ) + hess = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(5, 5)) return hess + class PressureDropTwoEqualitiesTwoOutputsScaleBoth(PressureDropTwoEqualitiesTwoOutputs): def get_equality_constraint_scaling_factors(self): return np.asarray([3.1, 3.2], dtype=np.float64) - + def get_output_constraint_scaling_factors(self): return np.asarray([4.1, 4.2]) -class PressureDropTwoEqualitiesTwoOutputsScaleEqualities(PressureDropTwoEqualitiesTwoOutputs): + +class PressureDropTwoEqualitiesTwoOutputsScaleEqualities( + PressureDropTwoEqualitiesTwoOutputs +): def get_equality_constraint_scaling_factors(self): return np.asarray([3.1, 3.2], dtype=np.float64) -class PressureDropTwoEqualitiesTwoOutputsScaleOutputs(PressureDropTwoEqualitiesTwoOutputs): + +class PressureDropTwoEqualitiesTwoOutputsScaleOutputs( + PressureDropTwoEqualitiesTwoOutputs +): def get_output_constraint_scaling_factors(self): return np.asarray([4.1, 4.2]) + class OneOutput(ExternalGreyBoxModel): def __init__(self): self._input_names = ['u'] @@ -458,13 +509,13 @@ def set_input_values(self, input_values): self._u = input_values[0] def evaluate_outputs(self): - return np.asarray([5*self._u]) + return np.asarray([5 * self._u]) def evaluate_jacobian_outputs(self): irow = np.asarray([0], dtype=np.int64) jcol = np.asarray([0], dtype=np.int64) nonzeros = np.asarray([5.0], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1,1)) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1, 1)) return jac def set_output_constraint_multipliers(self, output_con_multiplier_values): @@ -475,9 +526,10 @@ def evaluate_hessian_outputs(self): irow = np.asarray([], dtype=np.int64) jcol = np.asarray([], dtype=np.int64) data = np.asarray([], dtype=np.float64) - hess = spa.coo_matrix((data, (irow, jcol)), shape=(1,1)) + hess = spa.coo_matrix((data, (irow, jcol)), shape=(1, 1)) return hess + class OneOutputOneEquality(ExternalGreyBoxModel): def __init__(self): self._input_names = ['u'] @@ -508,20 +560,20 @@ def evaluate_equality_constraints(self): return np.asarray([self._u**2 - 1]) def evaluate_outputs(self): - return np.asarray([5*self._u]) + return np.asarray([5 * self._u]) def evaluate_jacobian_equality_constraints(self): irow = np.asarray([0], dtype=np.int64) jcol = np.asarray([0], dtype=np.int64) - nonzeros = np.asarray([2*self._u], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1,1)) + nonzeros = np.asarray([2 * self._u], dtype=np.float64) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1, 1)) return jac def evaluate_jacobian_outputs(self): irow = np.asarray([0], dtype=np.int64) jcol = np.asarray([0], dtype=np.int64) nonzeros = np.asarray([5.0], dtype=np.float64) - jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1,1)) + jac = spa.coo_matrix((nonzeros, (irow, jcol)), shape=(1, 1)) return jac def set_equality_constraint_multipliers(self, equality_con_multiplier_values): @@ -535,13 +587,13 @@ def set_output_constraint_multipliers(self, output_con_multiplier_values): def evaluate_hessian_equality_constraints(self): irow = np.asarray([0], dtype=np.int64) jcol = np.asarray([0], dtype=np.int64) - data = np.asarray([self._equality_mult*2.0], dtype=np.float64) - hess = spa.coo_matrix((data, (irow, jcol)), shape=(1,1)) + data = np.asarray([self._equality_mult * 2.0], dtype=np.float64) + hess = spa.coo_matrix((data, (irow, jcol)), shape=(1, 1)) return hess def evaluate_hessian_outputs(self): irow = np.asarray([], dtype=np.int64) jcol = np.asarray([], dtype=np.int64) data = np.asarray([], dtype=np.float64) - hess = spa.coo_matrix((data, (irow, jcol)), shape=(1,1)) + hess = spa.coo_matrix((data, (irow, jcol)), shape=(1, 1)) return hess diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_cyipopt_interface.py b/pyomo/contrib/pynumero/interfaces/tests/test_cyipopt_interface.py new file mode 100644 index 00000000000..2c5d8ff7e4e --- /dev/null +++ b/pyomo/contrib/pynumero/interfaces/tests/test_cyipopt_interface.py @@ -0,0 +1,92 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest + +from pyomo.contrib.pynumero.dependencies import ( + numpy as np, + numpy_available, + scipy_available, +) + +if not (numpy_available and scipy_available): + raise unittest.SkipTest("Pynumero needs scipy and numpy to run CyIpopt tests") + +from pyomo.contrib.pynumero.asl import AmplInterface + +if not AmplInterface.available(): + raise unittest.SkipTest("Pynumero needs the ASL extension to run CyIpopt tests") + +from pyomo.contrib.pynumero.interfaces.cyipopt_interface import ( + cyipopt_available, + CyIpoptProblemInterface, +) + +if not cyipopt_available: + raise unittest.SkipTest("CyIpopt is not available") + + +class TestSubclassCyIpoptInterface(unittest.TestCase): + def test_subclass_no_init(self): + class MyCyIpoptProblem(CyIpoptProblemInterface): + def __init__(self): + # This subclass implements __init__ but does not call + # super().__init__ + pass + + def x_init(self): + pass + + def x_lb(self): + pass + + def x_ub(self): + pass + + def g_lb(self): + pass + + def g_ub(self): + pass + + def scaling_factors(self): + pass + + def objective(self, x): + pass + + def gradient(self, x): + pass + + def constraints(self, x): + pass + + def jacobianstructure(self): + pass + + def jacobian(self, x): + pass + + def hessianstructure(self): + pass + + def hessian(self, x, y, obj_factor): + pass + + problem = MyCyIpoptProblem() + x0 = [] + msg = "__init__ has not been called" + with self.assertRaisesRegex(RuntimeError, msg): + problem.solve(x0) + + +if __name__ == "__main__": + unittest.main() diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_dynamic_model.py b/pyomo/contrib/pynumero/interfaces/tests/test_dynamic_model.py index 0f23af10565..ddd56afb5b4 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_dynamic_model.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_dynamic_model.py @@ -15,7 +15,10 @@ import math from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -23,54 +26,71 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): raise unittest.SkipTest( - "Pynumero needs the ASL extension to run CyIpoptSolver tests") + "Pynumero needs the ASL extension to run CyIpoptSolver tests" + ) -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - CyIpoptSolver, CyIpoptNLP, cyipopt_available, +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptSolver +from pyomo.contrib.pynumero.interfaces.cyipopt_interface import ( + CyIpoptNLP, + cyipopt_available, ) -from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxModel, ExternalGreyBoxBlock +from pyomo.contrib.pynumero.interfaces.external_grey_box import ( + ExternalGreyBoxModel, + ExternalGreyBoxBlock, +) from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoGreyBoxNLP, PyomoNLP -from pyomo.contrib.pynumero.interfaces.pyomo_grey_box_nlp import PyomoNLPWithGreyBoxBlocks +from pyomo.contrib.pynumero.interfaces.pyomo_grey_box_nlp import ( + PyomoNLPWithGreyBoxBlocks, +) + +from pyomo.contrib.pynumero.interfaces.tests.compare_utils import ( + check_vectors_specific_order, + check_sparse_matrix_specific_order, +) -from pyomo.contrib.pynumero.interfaces.tests.compare_utils import check_vectors_specific_order, check_sparse_matrix_specific_order def create_pyomo_model(A1, A2, c1, c2, N, dt): m = pyo.ConcreteModel() - + # timesteps m.T = pyo.Set(initialize=list(range(N)), ordered=True) m.Tu = pyo.Set(initialize=list(range(N))[1:], ordered=True) - + # inputs (controls) - m.F1 = pyo.Var(m.Tu, bounds=(0,None), initialize={t:1+0.1*t for t in m.Tu}) - m.F2 = pyo.Var(m.Tu, bounds=(0,None), initialize={t:2+0.1*t for t in m.Tu}) + m.F1 = pyo.Var(m.Tu, bounds=(0, None), initialize={t: 1 + 0.1 * t for t in m.Tu}) + m.F2 = pyo.Var(m.Tu, bounds=(0, None), initialize={t: 2 + 0.1 * t for t in m.Tu}) # state variables - m.h1 = pyo.Var(m.T, bounds=(0,None), initialize={t:3+0.1*t for t in m.T}) - m.h2 = pyo.Var(m.T, bounds=(0,None), initialize={t:4+0.1*t for t in m.T}) + m.h1 = pyo.Var(m.T, bounds=(0, None), initialize={t: 3 + 0.1 * t for t in m.T}) + m.h2 = pyo.Var(m.T, bounds=(0, None), initialize={t: 4 + 0.1 * t for t in m.T}) # algebraics (outputs) - m.F12 = pyo.Var(m.T, bounds=(0,None), initialize={t:5+0.1*t for t in m.T}) - m.Fo = pyo.Var(m.T, bounds=(0,None), initialize={t:6+0.1*t for t in m.T}) + m.F12 = pyo.Var(m.T, bounds=(0, None), initialize={t: 5 + 0.1 * t for t in m.T}) + m.Fo = pyo.Var(m.T, bounds=(0, None), initialize={t: 6 + 0.1 * t for t in m.T}) @m.Constraint(m.Tu) def h1bal(m, t): - return (m.h1[t] - m.h1[t-1]) - dt/A1 * (m.F1[t] - c1*pyo.sqrt(m.h1[t])) == 0 + return (m.h1[t] - m.h1[t - 1]) - dt / A1 * ( + m.F1[t] - c1 * pyo.sqrt(m.h1[t]) + ) == 0 @m.Constraint(m.Tu) def h2bal(m, t): - return (m.h2[t] - m.h2[t-1]) - dt/A2 * (c1*pyo.sqrt(m.h1[t]) + m.F2[t] - c2*pyo.sqrt(m.h2[t])) == 0 + return (m.h2[t] - m.h2[t - 1]) - dt / A2 * ( + c1 * pyo.sqrt(m.h1[t]) + m.F2[t] - c2 * pyo.sqrt(m.h2[t]) + ) == 0 @m.Constraint(m.T) def F12con(m, t): - return c1*pyo.sqrt(m.h1[t]) - m.F12[t] == 0 + return c1 * pyo.sqrt(m.h1[t]) - m.F12[t] == 0 @m.Constraint(m.T) def Focon(m, t): - return c2*pyo.sqrt(m.h2[t]) - m.Fo[t] == 0 + return c2 * pyo.sqrt(m.h2[t]) - m.Fo[t] == 0 @m.Constraint(m.Tu) def min_inflow(m, t): @@ -80,10 +100,12 @@ def min_inflow(m, t): def max_outflow(m, t): return m.Fo[t] <= 4.5 - m.h10 = pyo.Constraint( expr=m.h1[m.T.first()] == 1.5 ) - m.h20 = pyo.Constraint( expr=m.h2[m.T.first()] == 0.5 ) - m.obj = pyo.Objective( expr= sum((m.h1[t]-1.0)**2 + (m.h2[t]-1.5)**2 for t in m.T) ) - + m.h10 = pyo.Constraint(expr=m.h1[m.T.first()] == 1.5) + m.h20 = pyo.Constraint(expr=m.h2[m.T.first()] == 0.5) + m.obj = pyo.Objective( + expr=sum((m.h1[t] - 1.0) ** 2 + (m.h2[t] - 1.5) ** 2 for t in m.T) + ) + return m @@ -95,18 +117,20 @@ def __init__(self, A1, A2, c1, c2, N, dt): self._c2 = c2 self._N = N self._dt = dt - self._input_names = ['F1_{}'.format(t) for t in range(1,N)] - self._input_names.extend(['F2_{}'.format(t) for t in range(1,N)]) - self._input_names.extend(['h1_{}'.format(t) for t in range(0,N)]) - self._input_names.extend(['h2_{}'.format(t) for t in range(0,N)]) - self._output_names = ['F12_{}'.format(t) for t in range(0,N)] - self._output_names.extend(['Fo_{}'.format(t) for t in range(0,N)]) - self._equality_constraint_names = ['h1bal_{}'.format(t) for t in range(1,N)] - self._equality_constraint_names.extend(['h2bal_{}'.format(t) for t in range(1,N)]) + self._input_names = ['F1_{}'.format(t) for t in range(1, N)] + self._input_names.extend(['F2_{}'.format(t) for t in range(1, N)]) + self._input_names.extend(['h1_{}'.format(t) for t in range(0, N)]) + self._input_names.extend(['h2_{}'.format(t) for t in range(0, N)]) + self._output_names = ['F12_{}'.format(t) for t in range(0, N)] + self._output_names.extend(['Fo_{}'.format(t) for t in range(0, N)]) + self._equality_constraint_names = ['h1bal_{}'.format(t) for t in range(1, N)] + self._equality_constraint_names.extend( + ['h2bal_{}'.format(t) for t in range(1, N)] + ) # inputs - self._F1 = np.zeros(N) # we don't use the first one - self._F2 = np.zeros(N) # we don't use the first one + self._F1 = np.zeros(N) # we don't use the first one + self._F2 = np.zeros(N) # we don't use the first one self._h1 = np.zeros(N) self._h2 = np.zeros(N) @@ -115,8 +139,8 @@ def __init__(self, A1, A2, c1, c2, N, dt): self._Fo = np.zeros(N) # multipliers - self._eq_con_mult_values = np.ones(2*(N-1)) - self._output_con_mult_values = np.ones(2*N) + self._eq_con_mult_values = np.ones(2 * (N - 1)) + self._output_con_mult_values = np.ones(2 * N) def input_names(self): return self._input_names @@ -138,18 +162,18 @@ def finalize_block_construction(self, pyomo_block): def set_input_values(self, input_values): N = self._N - assert len(input_values) == 4*N-2 - self._F1[1:self._N] = np.copy(input_values[:N-1]) - self._F2[1:self._N] = np.copy(input_values[N-1:2*N-2]) - self._h1 = np.copy(input_values[2*N-2:3*N-2]) - self._h2 = np.copy(input_values[3*N-2:4*N-2]) + assert len(input_values) == 4 * N - 2 + self._F1[1 : self._N] = np.copy(input_values[: N - 1]) + self._F2[1 : self._N] = np.copy(input_values[N - 1 : 2 * N - 2]) + self._h1 = np.copy(input_values[2 * N - 2 : 3 * N - 2]) + self._h2 = np.copy(input_values[3 * N - 2 : 4 * N - 2]) def set_equality_constraint_multipliers(self, eq_con_multiplier_values): - assert len(eq_con_multiplier_values) == 2*(self._N-1) + assert len(eq_con_multiplier_values) == 2 * (self._N - 1) np.copyto(self._eq_con_mult_values, eq_con_multiplier_values) - + def set_output_constraint_multipliers(self, output_con_multiplier_values): - assert len(output_con_multiplier_values) == 2*self._N + assert len(output_con_multiplier_values) == 2 * self._N np.copyto(self._output_con_mult_values, output_con_multiplier_values) def evaluate_equality_constraints(self): @@ -158,30 +182,32 @@ def evaluate_equality_constraints(self): F2 = self._F2 h1 = self._h1 h2 = self._h2 - - resid = np.zeros(2*(N-1)) - for t in range(1,N): - resid[t-1] = (h1[t]-h1[t-1]) - \ - self._dt/self._A1*(F1[t] - self._c1*math.sqrt(h1[t])) + resid = np.zeros(2 * (N - 1)) + + for t in range(1, N): + resid[t - 1] = (h1[t] - h1[t - 1]) - self._dt / self._A1 * ( + F1[t] - self._c1 * math.sqrt(h1[t]) + ) - for t in range(1,N): - resid[t-2+N] = (h2[t]-h2[t-1]) - \ - self._dt/self._A2*(self._c1*math.sqrt(h1[t]) + F2[t] - self._c2*math.sqrt(h2[t])) + for t in range(1, N): + resid[t - 2 + N] = (h2[t] - h2[t - 1]) - self._dt / self._A2 * ( + self._c1 * math.sqrt(h1[t]) + F2[t] - self._c2 * math.sqrt(h2[t]) + ) return resid def evaluate_outputs(self): N = self._N h1 = self._h1 h2 = self._h2 - - resid = np.zeros(2*N) + + resid = np.zeros(2 * N) for t in range(N): - resid[t] = self._c1*math.sqrt(h1[t]) + resid[t] = self._c1 * math.sqrt(h1[t]) for t in range(N): - resid[t+N] = self._c2*math.sqrt(h2[t]) + resid[t + N] = self._c2 * math.sqrt(h2[t]) return resid @@ -197,46 +223,48 @@ def evaluate_jacobian_equality_constraints(self): c2 = self._c2 dt = self._dt - nnz = 3*(N-1)+ 4*(N-1) + nnz = 3 * (N - 1) + 4 * (N - 1) irow = np.zeros(nnz, dtype=np.int64) jcol = np.zeros(nnz, dtype=np.int64) data = np.zeros(nnz, dtype=np.float64) idx = 0 # Jac h1bal - for i in range(N-1): + for i in range(N - 1): irow[idx] = i jcol[idx] = i - data[idx] = -dt/A1 + data[idx] = -dt / A1 idx += 1 irow[idx] = i - jcol[idx] = 2*(N-1)+i + jcol[idx] = 2 * (N - 1) + i data[idx] = -1 idx += 1 irow[idx] = i - jcol[idx] = 2*(N-1)+i+1 - data[idx] = 1+dt/A1*c1*1/2*(h1[i+1])**(-0.5) + jcol[idx] = 2 * (N - 1) + i + 1 + data[idx] = 1 + dt / A1 * c1 * 1 / 2 * (h1[i + 1]) ** (-0.5) idx += 1 # Jac h2bal - for i in range(N-1): - irow[idx] = i+(N-1) - jcol[idx] = i+(N-1) - data[idx] = -dt/A2 + for i in range(N - 1): + irow[idx] = i + (N - 1) + jcol[idx] = i + (N - 1) + data[idx] = -dt / A2 idx += 1 - irow[idx] = i+(N-1) - jcol[idx] = 2*(N-1)+i+1 - data[idx] = -dt/A2*c1*1/2*(h1[i+1])**(-0.5) + irow[idx] = i + (N - 1) + jcol[idx] = 2 * (N - 1) + i + 1 + data[idx] = -dt / A2 * c1 * 1 / 2 * (h1[i + 1]) ** (-0.5) idx += 1 - irow[idx] = i+(N-1) - jcol[idx] = 2*(N-1)+N+i + irow[idx] = i + (N - 1) + jcol[idx] = 2 * (N - 1) + N + i data[idx] = -1 idx += 1 - irow[idx] = i+(N-1) - jcol[idx] = 2*(N-1)+N+i+1 - data[idx] = 1+dt/A2*c2*1/2*(h2[i+1])**(-0.5) + irow[idx] = i + (N - 1) + jcol[idx] = 2 * (N - 1) + N + i + 1 + data[idx] = 1 + dt / A2 * c2 * 1 / 2 * (h2[i + 1]) ** (-0.5) idx += 1 assert idx == nnz - return spa.coo_matrix( (data, (irow,jcol)), shape=(2*(N-1), 2*(N-1)+2*N) ) + return spa.coo_matrix( + (data, (irow, jcol)), shape=(2 * (N - 1), 2 * (N - 1) + 2 * N) + ) def evaluate_jacobian_outputs(self): N = self._N @@ -250,7 +278,7 @@ def evaluate_jacobian_outputs(self): c2 = self._c2 dt = self._dt - nnz = 2*N + nnz = 2 * N irow = np.zeros(nnz, dtype=np.int64) jcol = np.zeros(nnz, dtype=np.int64) data = np.zeros(nnz, dtype=np.float64) @@ -258,17 +286,17 @@ def evaluate_jacobian_outputs(self): # Jac F12 for i in range(N): irow[idx] = i - jcol[idx] = 2*(N-1)+i - data[idx] = 1/2*c1*h1[i]**(-0.5) + jcol[idx] = 2 * (N - 1) + i + data[idx] = 1 / 2 * c1 * h1[i] ** (-0.5) idx += 1 for i in range(N): - irow[idx] = N+i - jcol[idx] = 2*(N-1)+N+i - data[idx] = 1/2*c2*h2[i]**(-0.5) + irow[idx] = N + i + jcol[idx] = 2 * (N - 1) + N + i + data[idx] = 1 / 2 * c2 * h2[i] ** (-0.5) idx += 1 assert idx == nnz - return spa.coo_matrix( (data, (irow,jcol)), shape=(2*N, 2*(N-1)+2*N) ) + return spa.coo_matrix((data, (irow, jcol)), shape=(2 * N, 2 * (N - 1) + 2 * N)) def evaluate_hessian_equality_constraints(self): N = self._N @@ -282,26 +310,30 @@ def evaluate_hessian_equality_constraints(self): c2 = self._c2 dt = self._dt lam = self._eq_con_mult_values - - nnz = 2*(N-1) + + nnz = 2 * (N - 1) irow = np.zeros(nnz, dtype=np.int64) jcol = np.zeros(nnz, dtype=np.int64) data = np.zeros(nnz, dtype=np.float64) idx = 0 - for i in range(N-1): - irow[idx] = 2*(N-1)+i+1 - jcol[idx] = 2*(N-1)+i+1 - data[idx] = lam[i]*dt/A1*(-c1/4)*h1[i+1]**(-1.5) + lam[(N-1)+i]*dt/A2*(c1/4)*h1[i+1]**(-1.5) + for i in range(N - 1): + irow[idx] = 2 * (N - 1) + i + 1 + jcol[idx] = 2 * (N - 1) + i + 1 + data[idx] = lam[i] * dt / A1 * (-c1 / 4) * h1[i + 1] ** (-1.5) + lam[ + (N - 1) + i + ] * dt / A2 * (c1 / 4) * h1[i + 1] ** (-1.5) idx += 1 - irow[idx] = 2*(N-1)+N+i+1 - jcol[idx] = 2*(N-1)+N+i+1 - data[idx] = lam[(N-1)+i]*dt/A2*(-c2/4)*h2[i+1]**(-1.5) + irow[idx] = 2 * (N - 1) + N + i + 1 + jcol[idx] = 2 * (N - 1) + N + i + 1 + data[idx] = lam[(N - 1) + i] * dt / A2 * (-c2 / 4) * h2[i + 1] ** (-1.5) idx += 1 assert idx == nnz - hess = spa.coo_matrix( (data, (irow,jcol)), shape=(2*(N-1)+2*N, 2*(N-1)+2*N) ) + hess = spa.coo_matrix( + (data, (irow, jcol)), shape=(2 * (N - 1) + 2 * N, 2 * (N - 1) + 2 * N) + ) return hess - + def evaluate_hessian_outputs(self): N = self._N F1 = self._F1 @@ -314,8 +346,8 @@ def evaluate_hessian_outputs(self): c2 = self._c2 dt = self._dt lam = self._output_con_mult_values - - nnz = 2*N + + nnz = 2 * N irow = np.zeros(nnz, dtype=np.int64) jcol = np.zeros(nnz, dtype=np.int64) data = np.zeros(nnz, dtype=np.float64) @@ -323,21 +355,24 @@ def evaluate_hessian_outputs(self): # Hess F12_t for i in range(N): - irow[idx] = 2*(N-1)+i - jcol[idx] = 2*(N-1)+i - data[idx] = lam[i]*c1*(-1/4)*h1[i]**(-1.5) + irow[idx] = 2 * (N - 1) + i + jcol[idx] = 2 * (N - 1) + i + data[idx] = lam[i] * c1 * (-1 / 4) * h1[i] ** (-1.5) idx += 1 # Hess Fo_t for i in range(N): - irow[idx] = 2*(N-1)+N+i - jcol[idx] = 2*(N-1)+N+i - data[idx] = lam[N+i]*c2*(-1/4)*h2[i]**(-1.5) + irow[idx] = 2 * (N - 1) + N + i + jcol[idx] = 2 * (N - 1) + N + i + data[idx] = lam[N + i] * c2 * (-1 / 4) * h2[i] ** (-1.5) idx += 1 assert idx == nnz - hess = spa.coo_matrix( (data, (irow,jcol)), shape=(2*(N-1)+2*N, 2*(N-1)+2*N) ) + hess = spa.coo_matrix( + (data, (irow, jcol)), shape=(2 * (N - 1) + 2 * N, 2 * (N - 1) + 2 * N) + ) return hess + def create_pyomo_external_grey_box_model(A1, A2, c1, c2, N, dt): m2 = pyo.ConcreteModel() m2.T = pyo.Set(initialize=list(range(N)), ordered=True) @@ -347,13 +382,13 @@ def create_pyomo_external_grey_box_model(A1, A2, c1, c2, N, dt): # initialize the same as the pyomo model for t in m2.Tu: - m2.egb.inputs['F1_{}'.format(t)].value = 1+0.1*t - m2.egb.inputs['F2_{}'.format(t)].value = 2+0.1*t + m2.egb.inputs['F1_{}'.format(t)].value = 1 + 0.1 * t + m2.egb.inputs['F2_{}'.format(t)].value = 2 + 0.1 * t for t in m2.T: - m2.egb.inputs['h1_{}'.format(t)].value = 3+0.1*t - m2.egb.inputs['h2_{}'.format(t)].value = 4+0.1*t - m2.egb.outputs['F12_{}'.format(t)].value = 5+0.1*t - m2.egb.outputs['Fo_{}'.format(t)].value = 6+0.1*t + m2.egb.inputs['h1_{}'.format(t)].value = 3 + 0.1 * t + m2.egb.inputs['h2_{}'.format(t)].value = 4 + 0.1 * t + m2.egb.outputs['F12_{}'.format(t)].value = 5 + 0.1 * t + m2.egb.outputs['Fo_{}'.format(t)].value = 6 + 0.1 * t @m2.Constraint(m2.Tu) def min_inflow(m, t): @@ -365,18 +400,24 @@ def max_outflow(m, t): Fo_t = m.egb.outputs['Fo_{}'.format(t)] return Fo_t <= 4.5 - m2.h10 = pyo.Constraint( expr=m2.egb.inputs['h1_0'] == 1.5 ) - m2.h20 = pyo.Constraint( expr=m2.egb.inputs['h2_0'] == 0.5 ) - m2.obj = pyo.Objective( expr= sum((m2.egb.inputs['h1_{}'.format(t)]-1.0)**2 + (m2.egb.inputs['h2_{}'.format(t)]-1.5)**2 for t in m2.T) ) + m2.h10 = pyo.Constraint(expr=m2.egb.inputs['h1_0'] == 1.5) + m2.h20 = pyo.Constraint(expr=m2.egb.inputs['h2_0'] == 0.5) + m2.obj = pyo.Objective( + expr=sum( + (m2.egb.inputs['h1_{}'.format(t)] - 1.0) ** 2 + + (m2.egb.inputs['h2_{}'.format(t)] - 1.5) ** 2 + for t in m2.T + ) + ) return m2 class TestGreyBoxModel(unittest.TestCase): - @unittest.skipIf( not pyo.SolverFactory('ipopt').available(exception_flag=False), - "Ipopt needed to run tests with solve") + "Ipopt needed to run tests with solve", + ) def test_compare_evaluations(self): A1 = 5 A2 = 10 @@ -401,42 +442,162 @@ def test_compare_evaluations(self): m_c_order = m_nlp.constraint_names() mex_x_order = mex_nlp.primals_names() mex_c_order = mex_nlp.constraint_names() - - x1list = ['h1[0]', 'h1[1]', 'h1[2]', 'h1[3]', 'h1[4]', 'h1[5]', 'h2[0]', 'h2[1]', 'h2[2]', 'h2[3]', 'h2[4]', 'h2[5]', 'F1[1]', 'F1[2]', 'F1[3]', 'F1[4]', 'F1[5]', 'F2[1]', 'F2[2]', 'F2[3]', 'F2[4]', 'F2[5]', 'F12[0]', 'F12[1]', 'F12[2]', 'F12[3]', 'F12[4]', 'F12[5]', 'Fo[0]', 'Fo[1]', 'Fo[2]', 'Fo[3]', 'Fo[4]', 'Fo[5]'] - x2list = ['egb.inputs[h1_0]', 'egb.inputs[h1_1]', 'egb.inputs[h1_2]', 'egb.inputs[h1_3]', 'egb.inputs[h1_4]', 'egb.inputs[h1_5]', 'egb.inputs[h2_0]', 'egb.inputs[h2_1]', 'egb.inputs[h2_2]', 'egb.inputs[h2_3]', 'egb.inputs[h2_4]', 'egb.inputs[h2_5]', 'egb.inputs[F1_1]', 'egb.inputs[F1_2]', 'egb.inputs[F1_3]', 'egb.inputs[F1_4]', 'egb.inputs[F1_5]', 'egb.inputs[F2_1]', 'egb.inputs[F2_2]', 'egb.inputs[F2_3]', 'egb.inputs[F2_4]', 'egb.inputs[F2_5]', 'egb.outputs[F12_0]', 'egb.outputs[F12_1]', 'egb.outputs[F12_2]', 'egb.outputs[F12_3]', 'egb.outputs[F12_4]', 'egb.outputs[F12_5]', 'egb.outputs[Fo_0]', 'egb.outputs[Fo_1]', 'egb.outputs[Fo_2]', 'egb.outputs[Fo_3]', 'egb.outputs[Fo_4]', 'egb.outputs[Fo_5]'] + + x1list = [ + 'h1[0]', + 'h1[1]', + 'h1[2]', + 'h1[3]', + 'h1[4]', + 'h1[5]', + 'h2[0]', + 'h2[1]', + 'h2[2]', + 'h2[3]', + 'h2[4]', + 'h2[5]', + 'F1[1]', + 'F1[2]', + 'F1[3]', + 'F1[4]', + 'F1[5]', + 'F2[1]', + 'F2[2]', + 'F2[3]', + 'F2[4]', + 'F2[5]', + 'F12[0]', + 'F12[1]', + 'F12[2]', + 'F12[3]', + 'F12[4]', + 'F12[5]', + 'Fo[0]', + 'Fo[1]', + 'Fo[2]', + 'Fo[3]', + 'Fo[4]', + 'Fo[5]', + ] + x2list = [ + 'egb.inputs[h1_0]', + 'egb.inputs[h1_1]', + 'egb.inputs[h1_2]', + 'egb.inputs[h1_3]', + 'egb.inputs[h1_4]', + 'egb.inputs[h1_5]', + 'egb.inputs[h2_0]', + 'egb.inputs[h2_1]', + 'egb.inputs[h2_2]', + 'egb.inputs[h2_3]', + 'egb.inputs[h2_4]', + 'egb.inputs[h2_5]', + 'egb.inputs[F1_1]', + 'egb.inputs[F1_2]', + 'egb.inputs[F1_3]', + 'egb.inputs[F1_4]', + 'egb.inputs[F1_5]', + 'egb.inputs[F2_1]', + 'egb.inputs[F2_2]', + 'egb.inputs[F2_3]', + 'egb.inputs[F2_4]', + 'egb.inputs[F2_5]', + 'egb.outputs[F12_0]', + 'egb.outputs[F12_1]', + 'egb.outputs[F12_2]', + 'egb.outputs[F12_3]', + 'egb.outputs[F12_4]', + 'egb.outputs[F12_5]', + 'egb.outputs[Fo_0]', + 'egb.outputs[Fo_1]', + 'egb.outputs[Fo_2]', + 'egb.outputs[Fo_3]', + 'egb.outputs[Fo_4]', + 'egb.outputs[Fo_5]', + ] x1_x2_map = dict(zip(x1list, x2list)) - x1idx_x2idx_map = {i: mex_x_order.index(x1_x2_map[m_x_order[i]]) for i in range(len(m_x_order))} - - c1list = ['h1bal[1]', 'h1bal[2]', 'h1bal[3]', 'h1bal[4]', - 'h1bal[5]', 'h2bal[1]', 'h2bal[2]', 'h2bal[3]', 'h2bal[4]', - 'h2bal[5]', 'F12con[0]', 'F12con[1]', 'F12con[2]', - 'F12con[3]', 'F12con[4]', 'F12con[5]', 'Focon[0]', 'Focon[1]', - 'Focon[2]', 'Focon[3]', 'Focon[4]', 'Focon[5]', - 'min_inflow[1]', 'min_inflow[2]', 'min_inflow[3]', - 'min_inflow[4]', 'min_inflow[5]', 'max_outflow[0]', - 'max_outflow[1]', 'max_outflow[2]', 'max_outflow[3]', - 'max_outflow[4]', 'max_outflow[5]', 'h10', 'h20'] - c2list = ['egb.h1bal_1', 'egb.h1bal_2', 'egb.h1bal_3', - 'egb.h1bal_4', 'egb.h1bal_5', 'egb.h2bal_1', 'egb.h2bal_2', - 'egb.h2bal_3', 'egb.h2bal_4', 'egb.h2bal_5', - 'egb.output_constraints[F12_0]', - 'egb.output_constraints[F12_1]', - 'egb.output_constraints[F12_2]', - 'egb.output_constraints[F12_3]', - 'egb.output_constraints[F12_4]', - 'egb.output_constraints[F12_5]', - 'egb.output_constraints[Fo_0]', - 'egb.output_constraints[Fo_1]', - 'egb.output_constraints[Fo_2]', - 'egb.output_constraints[Fo_3]', - 'egb.output_constraints[Fo_4]', - 'egb.output_constraints[Fo_5]', 'min_inflow[1]', - 'min_inflow[2]', 'min_inflow[3]', 'min_inflow[4]', - 'min_inflow[5]', 'max_outflow[0]', 'max_outflow[1]', - 'max_outflow[2]', 'max_outflow[3]', 'max_outflow[4]', - 'max_outflow[5]', 'h10', 'h20'] + x1idx_x2idx_map = { + i: mex_x_order.index(x1_x2_map[m_x_order[i]]) for i in range(len(m_x_order)) + } + + c1list = [ + 'h1bal[1]', + 'h1bal[2]', + 'h1bal[3]', + 'h1bal[4]', + 'h1bal[5]', + 'h2bal[1]', + 'h2bal[2]', + 'h2bal[3]', + 'h2bal[4]', + 'h2bal[5]', + 'F12con[0]', + 'F12con[1]', + 'F12con[2]', + 'F12con[3]', + 'F12con[4]', + 'F12con[5]', + 'Focon[0]', + 'Focon[1]', + 'Focon[2]', + 'Focon[3]', + 'Focon[4]', + 'Focon[5]', + 'min_inflow[1]', + 'min_inflow[2]', + 'min_inflow[3]', + 'min_inflow[4]', + 'min_inflow[5]', + 'max_outflow[0]', + 'max_outflow[1]', + 'max_outflow[2]', + 'max_outflow[3]', + 'max_outflow[4]', + 'max_outflow[5]', + 'h10', + 'h20', + ] + c2list = [ + 'egb.h1bal_1', + 'egb.h1bal_2', + 'egb.h1bal_3', + 'egb.h1bal_4', + 'egb.h1bal_5', + 'egb.h2bal_1', + 'egb.h2bal_2', + 'egb.h2bal_3', + 'egb.h2bal_4', + 'egb.h2bal_5', + 'egb.output_constraints[F12_0]', + 'egb.output_constraints[F12_1]', + 'egb.output_constraints[F12_2]', + 'egb.output_constraints[F12_3]', + 'egb.output_constraints[F12_4]', + 'egb.output_constraints[F12_5]', + 'egb.output_constraints[Fo_0]', + 'egb.output_constraints[Fo_1]', + 'egb.output_constraints[Fo_2]', + 'egb.output_constraints[Fo_3]', + 'egb.output_constraints[Fo_4]', + 'egb.output_constraints[Fo_5]', + 'min_inflow[1]', + 'min_inflow[2]', + 'min_inflow[3]', + 'min_inflow[4]', + 'min_inflow[5]', + 'max_outflow[0]', + 'max_outflow[1]', + 'max_outflow[2]', + 'max_outflow[3]', + 'max_outflow[4]', + 'max_outflow[5]', + 'h10', + 'h20', + ] c1_c2_map = dict(zip(c1list, c2list)) - c1idx_c2idx_map = {i: mex_c_order.index(c1_c2_map[m_c_order[i]]) for i in range(len(m_c_order))} + c1idx_c2idx_map = { + i: mex_c_order.index(c1_c2_map[m_c_order[i]]) for i in range(len(m_c_order)) + } # get the primals from m and put them in the correct order for mex m_x = m_nlp.get_primals() @@ -459,27 +620,59 @@ def test_compare_evaluations(self): m_gobj = m_nlp.evaluate_grad_objective() mex_gobj = mex_nlp.evaluate_grad_objective() - check_vectors_specific_order(self, m_gobj, m_x_order, mex_gobj, mex_x_order, x1_x2_map) + check_vectors_specific_order( + self, m_gobj, m_x_order, mex_gobj, mex_x_order, x1_x2_map + ) m_c = m_nlp.evaluate_constraints() mex_c = mex_nlp.evaluate_constraints() - check_vectors_specific_order(self, m_c, m_c_order, mex_c, mex_c_order, c1_c2_map) + check_vectors_specific_order( + self, m_c, m_c_order, mex_c, mex_c_order, c1_c2_map + ) m_j = m_nlp.evaluate_jacobian() mex_j = mex_nlp.evaluate_jacobian().todense() - check_sparse_matrix_specific_order(self, m_j, m_c_order, m_x_order, mex_j, mex_c_order, mex_x_order, c1_c2_map, x1_x2_map) + check_sparse_matrix_specific_order( + self, + m_j, + m_c_order, + m_x_order, + mex_j, + mex_c_order, + mex_x_order, + c1_c2_map, + x1_x2_map, + ) m_h = m_nlp.evaluate_hessian_lag() mex_h = mex_nlp.evaluate_hessian_lag() - check_sparse_matrix_specific_order(self, m_h, m_x_order, m_x_order, mex_h, mex_x_order, mex_x_order, x1_x2_map, x1_x2_map) - - mex_h = 0*mex_h + check_sparse_matrix_specific_order( + self, + m_h, + m_x_order, + m_x_order, + mex_h, + mex_x_order, + mex_x_order, + x1_x2_map, + x1_x2_map, + ) + + mex_h = 0 * mex_h mex_nlp.evaluate_hessian_lag(out=mex_h) - check_sparse_matrix_specific_order(self, m_h, m_x_order, m_x_order, mex_h, mex_x_order, mex_x_order, x1_x2_map, x1_x2_map) - - - @unittest.skipIf(not cyipopt_available, - "CyIpopt needed to run tests with solve") + check_sparse_matrix_specific_order( + self, + m_h, + m_x_order, + m_x_order, + mex_h, + mex_x_order, + mex_x_order, + x1_x2_map, + x1_x2_map, + ) + + @unittest.skipIf(not cyipopt_available, "CyIpopt needed to run tests with solve") def test_solve(self): A1 = 5 A2 = 10 @@ -492,21 +685,45 @@ def test_solve(self): solver = pyo.SolverFactory('cyipopt') solver.config.options['linear_solver'] = 'mumps' status = solver.solve(m, tee=False) - + mex = create_pyomo_external_grey_box_model(A1, A2, c1, c2, N, dt) solver = pyo.SolverFactory('cyipopt') solver.config.options['linear_solver'] = 'mumps' status = solver.solve(mex, tee=False) for k in m.F1: - self.assertAlmostEqual(pyo.value(m.F1[k]), pyo.value(mex.egb.inputs['F1_{}'.format(k)]), places=3) - self.assertAlmostEqual(pyo.value(m.F2[k]), pyo.value(mex.egb.inputs['F2_{}'.format(k)]), places=3) + self.assertAlmostEqual( + pyo.value(m.F1[k]), + pyo.value(mex.egb.inputs['F1_{}'.format(k)]), + places=3, + ) + self.assertAlmostEqual( + pyo.value(m.F2[k]), + pyo.value(mex.egb.inputs['F2_{}'.format(k)]), + places=3, + ) for k in m.h1: - self.assertAlmostEqual(pyo.value(m.h1[k]), pyo.value(mex.egb.inputs['h1_{}'.format(k)]), places=3) - self.assertAlmostEqual(pyo.value(m.h2[k]), pyo.value(mex.egb.inputs['h2_{}'.format(k)]), places=3) + self.assertAlmostEqual( + pyo.value(m.h1[k]), + pyo.value(mex.egb.inputs['h1_{}'.format(k)]), + places=3, + ) + self.assertAlmostEqual( + pyo.value(m.h2[k]), + pyo.value(mex.egb.inputs['h2_{}'.format(k)]), + places=3, + ) for k in m.F12: - self.assertAlmostEqual(pyo.value(m.F12[k]), pyo.value(mex.egb.outputs['F12_{}'.format(k)]), places=3) - self.assertAlmostEqual(pyo.value(m.Fo[k]), pyo.value(mex.egb.outputs['Fo_{}'.format(k)]), places=3) + self.assertAlmostEqual( + pyo.value(m.F12[k]), + pyo.value(mex.egb.outputs['F12_{}'.format(k)]), + places=3, + ) + self.assertAlmostEqual( + pyo.value(m.Fo[k]), + pyo.value(mex.egb.outputs['Fo_{}'.format(k)]), + places=3, + ) """ self._input_names = ['F1_{}'.format(t) for t in range(1,N)] self._input_names.extend(['F2_{}'.format(t) for t in range(1,N)]) @@ -516,7 +733,7 @@ def test_solve(self): self._output_names.extend(['Fo_{}'.format(t) for t in range(0,N)]) """ + if __name__ == '__main__': t = TestGreyBoxModel() t.test_solve() - diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py index 358821bc180..88a4024aeeb 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_asl_function.py @@ -12,23 +12,27 @@ import os import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy_available) + numpy as np, + numpy_available, + scipy_available, +) + if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run NLP tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run NLP tests") from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL from pyomo.environ import ConcreteModel, ExternalFunction, Var, Objective + class TestAMPLExternalFunction(unittest.TestCase): def assertListsAlmostEqual(self, first, second, places=7, msg=None): self.assertEqual(len(first), len(second)) - msg = "lists %s and %s differ at item " % ( - first, second) - for i,a in enumerate(first): + msg = "lists %s and %s differ at item " % (first, second) + for i, a in enumerate(first): self.assertAlmostEqual(a, second[i], places, msg + str(i)) def test_solve_gsl_function(self): @@ -37,7 +41,7 @@ def test_solve_gsl_function(self): self.skipTest("Could not find the amplgsl.dll library") model = ConcreteModel() model.z_func = ExternalFunction(library=DLL, function="gsl_sf_gamma") - model.x = Var(initialize=3, bounds=(1e-5,None)) + model.x = Var(initialize=3, bounds=(1e-5, None)) model.o = Objective(expr=model.z_func(model.x)) nlp = PyomoNLP(model) self.assertAlmostEqual(nlp.evaluate_objective(), 2, 7) diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_grey_box_model.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_grey_box_model.py index 46e30a988aa..58e08a409f0 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_grey_box_model.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_grey_box_model.py @@ -14,7 +14,10 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -22,21 +25,22 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run cyipopt tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run cyipopt tests") -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available, -) +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import cyipopt_available from ..external_grey_box import ExternalGreyBoxModel, ExternalGreyBoxBlock from ..pyomo_nlp import PyomoGreyBoxNLP -from pyomo.contrib.pynumero.interfaces.tests.compare_utils import check_vectors_specific_order, check_sparse_matrix_specific_order +from pyomo.contrib.pynumero.interfaces.tests.compare_utils import ( + check_vectors_specific_order, + check_sparse_matrix_specific_order, +) import pyomo.contrib.pynumero.interfaces.tests.external_grey_box_models as ex_models -class TestExternalGreyBoxModel(unittest.TestCase): +class TestExternalGreyBoxModel(unittest.TestCase): def test_pressure_drop_single_output(self): egbm = ex_models.PressureDropSingleOutput() input_names = egbm.input_names() @@ -62,9 +66,15 @@ def test_pressure_drop_single_output(self): tmp = egbm.evaluate_jacobian_equality_constraints() jac_o = egbm.evaluate_jacobian_outputs() - self.assertTrue(np.array_equal(jac_o.row, np.asarray([0,0,0], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.col, np.asarray([0,1,2], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.data, np.asarray([1,-36,-48], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_o.row, np.asarray([0, 0, 0], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.col, np.asarray([0, 1, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.data, np.asarray([1, -36, -48], dtype=np.float64)) + ) with self.assertRaises(AttributeError): eq_hess = egbm.evaluate_hessian_equality_constraints() @@ -97,16 +107,31 @@ def test_pressure_drop_single_output_with_hessian(self): tmp = egbm.evaluate_jacobian_equality_constraints() jac_o = egbm.evaluate_jacobian_outputs() - self.assertTrue(np.array_equal(jac_o.row, np.asarray([0,0,0], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.col, np.asarray([0,1,2], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.data, np.asarray([1,-36,-48], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_o.row, np.asarray([0, 0, 0], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.col, np.asarray([0, 1, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.data, np.asarray([1, -36, -48], dtype=np.float64)) + ) with self.assertRaises(AttributeError): eq_hess = egbm.evaluate_hessian_equality_constraints() outputs_hess = egbm.evaluate_hessian_outputs() - self.assertTrue(np.array_equal(outputs_hess.row, np.asarray([2, 2], dtype=np.int64))) - self.assertTrue(np.array_equal(outputs_hess.col, np.asarray([1, 2], dtype=np.int64))) - self.assertTrue(np.array_equal(outputs_hess.data, np.asarray([5*(-8*3), 5*(-8*2)], dtype=np.int64))) + self.assertTrue( + np.array_equal(outputs_hess.row, np.asarray([2, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(outputs_hess.col, np.asarray([1, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal( + outputs_hess.data, + np.asarray([5 * (-8 * 3), 5 * (-8 * 2)], dtype=np.int64), + ) + ) def test_pressure_drop_single_equality(self): egbm = ex_models.PressureDropSingleEquality() @@ -132,9 +157,15 @@ def test_pressure_drop_single_equality(self): tmp = egbm.evaluate_jacobian_outputs() jac_eq = egbm.evaluate_jacobian_equality_constraints() - self.assertTrue(np.array_equal(jac_eq.row, np.asarray([0,0,0,0], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.col, np.asarray([0,1,2,3], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.data, np.asarray([-1, 36, 48, 1], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_eq.row, np.asarray([0, 0, 0, 0], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_eq.col, np.asarray([0, 1, 2, 3], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_eq.data, np.asarray([-1, 36, 48, 1], dtype=np.float64)) + ) with self.assertRaises(AttributeError): eq_hess = egbm.evaluate_hessian_equality_constraints() @@ -165,16 +196,26 @@ def test_pressure_drop_single_equality_with_hessian(self): tmp = egbm.evaluate_jacobian_outputs() jac_eq = egbm.evaluate_jacobian_equality_constraints() - self.assertTrue(np.array_equal(jac_eq.row, np.asarray([0,0,0,0], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.col, np.asarray([0,1,2,3], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.data, np.asarray([-1, 36, 48, 1], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_eq.row, np.asarray([0, 0, 0, 0], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_eq.col, np.asarray([0, 1, 2, 3], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_eq.data, np.asarray([-1, 36, 48, 1], dtype=np.float64)) + ) eq_hess = egbm.evaluate_hessian_equality_constraints() with self.assertRaises(AttributeError): outputs_hess = egbm.evaluate_hessian_outputs() self.assertTrue(np.array_equal(eq_hess.row, np.asarray([2, 2], dtype=np.int64))) self.assertTrue(np.array_equal(eq_hess.col, np.asarray([1, 2], dtype=np.int64))) - self.assertTrue(np.array_equal(eq_hess.data, np.asarray([5*(8*3), 5*(8*2)], dtype=np.float64))) + self.assertTrue( + np.array_equal( + eq_hess.data, np.asarray([5 * (8 * 3), 5 * (8 * 2)], dtype=np.float64) + ) + ) def test_pressure_drop_two_outputs(self): egbm = ex_models.PressureDropTwoOutputs() @@ -201,7 +242,7 @@ def test_pressure_drop_two_outputs(self): # h_eq(u) = {empty} # h_o(u) = [Pin - 2*c*F^2] # [Pin - 4*c*F^2] - + o = egbm.evaluate_outputs() self.assertTrue(np.array_equal(o, np.asarray([64, 28], dtype=np.float64))) @@ -209,9 +250,17 @@ def test_pressure_drop_two_outputs(self): tmp = egbm.evaluate_jacobian_equality_constraints() jac_o = egbm.evaluate_jacobian_outputs() - self.assertTrue(np.array_equal(jac_o.row, np.asarray([0,0,0,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.col, np.asarray([0,1,2,0,1,2], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.data, np.asarray([1, -18, -24, 1,-36,-48], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_o.row, np.asarray([0, 0, 0, 1, 1, 1], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.col, np.asarray([0, 1, 2, 0, 1, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal( + jac_o.data, np.asarray([1, -18, -24, 1, -36, -48], dtype=np.float64) + ) + ) with self.assertRaises(AttributeError): hess_eq = egbm.evaluate_hessian_equality_constraints() @@ -243,7 +292,7 @@ def test_pressure_drop_two_outputs_with_hessian(self): # h_eq(u) = {empty} # h_o(u) = [Pin - 2*c*F^2] # [Pin - 4*c*F^2] - + o = egbm.evaluate_outputs() self.assertTrue(np.array_equal(o, np.asarray([64, 28], dtype=np.float64))) @@ -251,16 +300,26 @@ def test_pressure_drop_two_outputs_with_hessian(self): tmp = egbm.evaluate_jacobian_equality_constraints() jac_o = egbm.evaluate_jacobian_outputs() - self.assertTrue(np.array_equal(jac_o.row, np.asarray([0,0,0,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.col, np.asarray([0,1,2,0,1,2], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.data, np.asarray([1, -18, -24, 1,-36,-48], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_o.row, np.asarray([0, 0, 0, 1, 1, 1], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.col, np.asarray([0, 1, 2, 0, 1, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal( + jac_o.data, np.asarray([1, -18, -24, 1, -36, -48], dtype=np.float64) + ) + ) with self.assertRaises(AttributeError): hess_eq = egbm.evaluate_hessian_equality_constraints() hess = egbm.evaluate_hessian_outputs() self.assertTrue(np.array_equal(hess.row, np.asarray([2, 2], dtype=np.int64))) self.assertTrue(np.array_equal(hess.col, np.asarray([1, 2], dtype=np.int64))) - self.assertTrue(np.array_equal(hess.data, np.asarray([-156.0, -104.0], dtype=np.float64))) + self.assertTrue( + np.array_equal(hess.data, np.asarray([-156.0, -104.0], dtype=np.float64)) + ) def test_pressure_drop_two_equalities(self): egbm = ex_models.PressureDropTwoEqualities() @@ -293,9 +352,22 @@ def test_pressure_drop_two_equalities(self): tmp = egbm.evaluate_jacobian_outputs() jac_eq = egbm.evaluate_jacobian_equality_constraints() - self.assertTrue(np.array_equal(jac_eq.row, np.asarray([0,0,0,0,1,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.col, np.asarray([0,1,2,3,1,2,3,4], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.data, np.asarray([-1, 18, 24, 1, 18, 24, -1, 1], dtype=np.float64))) + self.assertTrue( + np.array_equal( + jac_eq.row, np.asarray([0, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.col, np.asarray([0, 1, 2, 3, 1, 2, 3, 4], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.data, + np.asarray([-1, 18, 24, 1, 18, 24, -1, 1], dtype=np.float64), + ) + ) with self.assertRaises(AttributeError): hess_outputs = egbm.evaluate_hessian_outputs() @@ -333,17 +405,31 @@ def test_pressure_drop_two_equalities_with_hessian(self): tmp = egbm.evaluate_jacobian_outputs() jac_eq = egbm.evaluate_jacobian_equality_constraints() - self.assertTrue(np.array_equal(jac_eq.row, np.asarray([0,0,0,0,1,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.col, np.asarray([0,1,2,3,1,2,3,4], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.data, np.asarray([-1, 18, 24, 1, 18, 24, -1, 1], dtype=np.float64))) + self.assertTrue( + np.array_equal( + jac_eq.row, np.asarray([0, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.col, np.asarray([0, 1, 2, 3, 1, 2, 3, 4], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.data, + np.asarray([-1, 18, 24, 1, 18, 24, -1, 1], dtype=np.float64), + ) + ) with self.assertRaises(AttributeError): hess_outputs = egbm.evaluate_hessian_outputs() hess = egbm.evaluate_hessian_equality_constraints() self.assertTrue(np.array_equal(hess.row, np.asarray([2, 2], dtype=np.int64))) self.assertTrue(np.array_equal(hess.col, np.asarray([1, 2], dtype=np.int64))) - self.assertTrue(np.array_equal(hess.data, np.asarray([96.0, 64.0], dtype=np.float64))) - + self.assertTrue( + np.array_equal(hess.data, np.asarray([96.0, 64.0], dtype=np.float64)) + ) def test_pressure_drop_two_equalities_two_outputs(self): # u = [Pin, c, F, P1, P3] @@ -370,14 +456,34 @@ def test_pressure_drop_two_equalities_two_outputs(self): self.assertTrue(np.array_equal(o, np.asarray([62, 28], dtype=np.float64))) jac_eq = egbm.evaluate_jacobian_equality_constraints() - self.assertTrue(np.array_equal(jac_eq.row, np.asarray([0,0,0,0,1,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.col, np.asarray([0,1,2,3,1,2,3,4], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.data, np.asarray([-1, 9, 12, 1, 18, 24, -1, 1], dtype=np.float64))) + self.assertTrue( + np.array_equal( + jac_eq.row, np.asarray([0, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.col, np.asarray([0, 1, 2, 3, 1, 2, 3, 4], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.data, np.asarray([-1, 9, 12, 1, 18, 24, -1, 1], dtype=np.float64) + ) + ) jac_o = egbm.evaluate_jacobian_outputs() - self.assertTrue(np.array_equal(jac_o.row, np.asarray([0,0,0,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.col, np.asarray([1,2,3,0,1,2], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.data, np.asarray([-9, -12, 1, 1, -36, -48], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_o.row, np.asarray([0, 0, 0, 1, 1, 1], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.col, np.asarray([1, 2, 3, 0, 1, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal( + jac_o.data, np.asarray([-9, -12, 1, 1, -36, -48], dtype=np.float64) + ) + ) with self.assertRaises(AttributeError): hess = egbm.evaluate_hessian_equality_constraints() @@ -409,24 +515,50 @@ def test_pressure_drop_two_equalities_two_outputs_with_hessian(self): self.assertTrue(np.array_equal(o, np.asarray([62, 28], dtype=np.float64))) jac_eq = egbm.evaluate_jacobian_equality_constraints() - self.assertTrue(np.array_equal(jac_eq.row, np.asarray([0,0,0,0,1,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.col, np.asarray([0,1,2,3,1,2,3,4], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_eq.data, np.asarray([-1, 9, 12, 1, 18, 24, -1, 1], dtype=np.float64))) + self.assertTrue( + np.array_equal( + jac_eq.row, np.asarray([0, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.col, np.asarray([0, 1, 2, 3, 1, 2, 3, 4], dtype=np.int64) + ) + ) + self.assertTrue( + np.array_equal( + jac_eq.data, np.asarray([-1, 9, 12, 1, 18, 24, -1, 1], dtype=np.float64) + ) + ) jac_o = egbm.evaluate_jacobian_outputs() - self.assertTrue(np.array_equal(jac_o.row, np.asarray([0,0,0,1,1,1], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.col, np.asarray([1,2,3,0,1,2], dtype=np.int64))) - self.assertTrue(np.array_equal(jac_o.data, np.asarray([-9, -12, 1, 1, -36, -48], dtype=np.float64))) + self.assertTrue( + np.array_equal(jac_o.row, np.asarray([0, 0, 0, 1, 1, 1], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal(jac_o.col, np.asarray([1, 2, 3, 0, 1, 2], dtype=np.int64)) + ) + self.assertTrue( + np.array_equal( + jac_o.data, np.asarray([-9, -12, 1, 1, -36, -48], dtype=np.float64) + ) + ) hess = egbm.evaluate_hessian_equality_constraints() self.assertTrue(np.array_equal(hess.row, np.asarray([2, 2], dtype=np.int64))) self.assertTrue(np.array_equal(hess.col, np.asarray([1, 2], dtype=np.int64))) - self.assertTrue(np.array_equal(hess.data, np.asarray([60.0, 40.0], dtype=np.float64))) + self.assertTrue( + np.array_equal(hess.data, np.asarray([60.0, 40.0], dtype=np.float64)) + ) hess = egbm.evaluate_hessian_outputs() self.assertTrue(np.array_equal(hess.row, np.asarray([2, 2], dtype=np.int64))) self.assertTrue(np.array_equal(hess.col, np.asarray([1, 2], dtype=np.int64))) - self.assertTrue(np.array_equal(hess.data, np.asarray([-258, -172], dtype=np.float64))) + self.assertTrue( + np.array_equal(hess.data, np.asarray([-258, -172], dtype=np.float64)) + ) + + """ def test_pressure_drop_two_equalities_two_outputs_no_hessian(self): # u = [Pin, c, F, P1, P3] @@ -472,6 +604,7 @@ def test_pressure_drop_two_equalities_two_outputs_no_hessian(self): hess = egbm.evaluate_hessian_outputs() """ + # TODO: make this work even if there is only external and no variables anywhere in pyomo part class TestPyomoGreyBoxNLP(unittest.TestCase): def test_error_no_variables(self): @@ -487,7 +620,7 @@ def test_error_fixed_inputs_outputs(self): m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_models.PressureDropSingleOutput()) m.egb.inputs['Pin'].fix(100) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) with self.assertRaises(NotImplementedError): pyomo_nlp = PyomoGreyBoxNLP(m) @@ -495,13 +628,17 @@ def test_error_fixed_inputs_outputs(self): m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_models.PressureDropTwoOutputs()) m.egb.outputs['P2'].fix(50) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) with self.assertRaises(NotImplementedError): pyomo_nlp = PyomoGreyBoxNLP(m) def test_pressure_drop_single_output(self): - self._test_pressure_drop_single_output(ex_models.PressureDropSingleOutput(),False) - self._test_pressure_drop_single_output(ex_models.PressureDropSingleOutputWithHessian(),True) + self._test_pressure_drop_single_output( + ex_models.PressureDropSingleOutput(), False + ) + self._test_pressure_drop_single_output( + ex_models.PressureDropSingleOutputWithHessian(), True + ) def _test_pressure_drop_single_output(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -519,8 +656,8 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(0) m.egb.outputs['Pout'].setub(100) - #m.dummy = pyo.Constraint(expr=sum(m.egb.inputs[i] for i in m.egb.inputs) + sum(m.egb.outputs[i] for i in m.egb.outputs) <= 1e6) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + # m.dummy = pyo.Constraint(expr=sum(m.egb.inputs[i] for i in m.egb.inputs) + sum(m.egb.outputs[i] for i in m.egb.outputs) <= 1e6) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) @@ -530,30 +667,47 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.outputs[Pout]', + ] x_order = pyomo_nlp.variable_names() comparison_c_order = ['egb.Pout_con'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(4, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(1, len(pyomo_nlp.create_new_vector('constraints'))) @@ -565,7 +719,7 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4], dtype=np.float64))) + self.assertTrue(np.array_equal(x, np.asarray([1, 2, 3, 4], dtype=np.float64))) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42], dtype=np.float64)) @@ -586,34 +740,72 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) c = np.zeros(1) pyomo_nlp.evaluate_constraints(out=c) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) - + j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[1, -36, -48, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (4,4)) - comparison_h = np.asarray([[0, 0, 0, 0],[0, 0, 0, 0], [0, -8*3*21, -8*2*21, 0], [0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (4, 4)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, -8 * 3 * 21, -8 * 2 * 21, 0], + [0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(AttributeError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_single_equality(self): - self._test_pressure_drop_single_equality(ex_models.PressureDropSingleEquality(), False) - self._test_pressure_drop_single_equality(ex_models.PressureDropSingleEqualityWithHessian(), True) + self._test_pressure_drop_single_equality( + ex_models.PressureDropSingleEquality(), False + ) + self._test_pressure_drop_single_equality( + ex_models.PressureDropSingleEqualityWithHessian(), True + ) def _test_pressure_drop_single_equality(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -631,7 +823,7 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): m.egb.inputs['Pout'].value = 50 m.egb.inputs['Pout'].setlb(0) m.egb.inputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.inputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.inputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) self.assertEqual(4, pyomo_nlp.n_primals()) @@ -640,30 +832,47 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.inputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[Pout]', + ] x_order = pyomo_nlp.variable_names() comparison_c_order = ['egb.pdrop'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(4, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(1, len(pyomo_nlp.create_new_vector('constraints'))) @@ -675,7 +884,7 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4], dtype=np.float64))) + self.assertTrue(np.array_equal(x, np.asarray([1, 2, 3, 4], dtype=np.float64))) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42], dtype=np.float64)) @@ -696,7 +905,9 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -706,24 +917,58 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[-1, 36, 48, 1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (4,4)) - comparison_h = np.asarray([[0, 0, 0, 0],[0, 0, 0, 0], [0, 8*3*21, 8*2*21, 0], [0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (4, 4)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0], + [0, 0, 0, 0], + [0, 8 * 3 * 21, 8 * 2 * 21, 0], + [0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(AttributeError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_two_outputs(self): self._test_pressure_drop_two_outputs(ex_models.PressureDropTwoOutputs(), False) - self._test_pressure_drop_two_outputs(ex_models.PressureDropTwoOutputsWithHessian(), True) + self._test_pressure_drop_two_outputs( + ex_models.PressureDropTwoOutputsWithHessian(), True + ) def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -744,7 +989,7 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(0) m.egb.outputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) self.assertEqual(5, pyomo_nlp.n_primals()) @@ -753,30 +998,48 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.outputs[P2]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + ] x_order = pyomo_nlp.variable_names() comparison_c_order = ['egb.P2_con', 'egb.Pout_con'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(5, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(2, len(pyomo_nlp.create_new_vector('constraints'))) @@ -788,7 +1051,9 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10], dtype=np.float64)) @@ -809,7 +1074,9 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-16, -22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -819,24 +1086,67 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[1, -18, -24, -1, 0], [1, -36, -48, 0, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (5,5)) - comparison_h = np.asarray([[0, 0, 0, 0, 0],[0, 0, 0, 0, 0], [0, (-4*3*21) + (-8*3*5), (-4*2*21) + (-8*2*5), 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (5, 5)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [ + 0, + (-4 * 3 * 21) + (-8 * 3 * 5), + (-4 * 2 * 21) + (-8 * 2 * 5), + 0, + 0, + ], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(AttributeError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_two_equalities(self): - self._test_pressure_drop_two_equalities(ex_models.PressureDropTwoEqualities(), False) - self._test_pressure_drop_two_equalities(ex_models.PressureDropTwoEqualitiesWithHessian(), True) + self._test_pressure_drop_two_equalities( + ex_models.PressureDropTwoEqualities(), False + ) + self._test_pressure_drop_two_equalities( + ex_models.PressureDropTwoEqualitiesWithHessian(), True + ) def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -857,7 +1167,7 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): m.egb.inputs['Pout'].value = 50 m.egb.inputs['Pout'].setlb(0) m.egb.inputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.inputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.inputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) self.assertEqual(5, pyomo_nlp.n_primals()) @@ -866,30 +1176,48 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.inputs[P2]', 'egb.inputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P2]', + 'egb.inputs[Pout]', + ] x_order = pyomo_nlp.variable_names() comparison_c_order = ['egb.pdrop2', 'egb.pdropout'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(5, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(2, len(pyomo_nlp.create_new_vector('constraints'))) @@ -901,7 +1229,9 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10], dtype=np.float64)) @@ -922,34 +1252,73 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([16, 6], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) c = np.zeros(2) pyomo_nlp.evaluate_constraints(out=c) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) - + j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[-1, 18, 24, 1, 0], [0, 18, 24, -1, 1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (5,5)) - comparison_h = np.asarray([[0, 0, 0, 0, 0],[0, 0, 0, 0, 0], [0, (4*3*21) + (4*3*5), (4*2*21) + (4*2*5), 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (5, 5)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, (4 * 3 * 21) + (4 * 3 * 5), (4 * 2 * 21) + (4 * 2 * 5), 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(AttributeError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_two_equalities_two_outputs(self): - self._test_pressure_drop_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs(), False) - self._test_pressure_drop_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True) + self._test_pressure_drop_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs(), False + ) + self._test_pressure_drop_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True + ) def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -976,7 +1345,7 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(30) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) self.assertEqual(7, pyomo_nlp.n_primals()) @@ -985,32 +1354,50 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo if hessian_support: self.assertEqual(5, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + ] x_order = pyomo_nlp.variable_names() comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.P2_con', 'egb.Pout_con'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 20, 15, 30], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 80, 85, 70], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 70, 75, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(7, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(4, len(pyomo_nlp.create_new_vector('constraints'))) @@ -1022,12 +1409,16 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5, 6, 7], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5,6,7], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5, 6, 7], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10, 11, 12], dtype=np.float64)) y = pyomo_nlp.get_duals() - self.assertTrue(np.array_equal(y, np.asarray([42, 10, 11, 12], dtype=np.float64))) + self.assertTrue( + np.array_equal(y, np.asarray([42, 10, 11, 12], dtype=np.float64)) + ) pyomo_nlp.set_duals(np.asarray([21, 5, 6, 7], dtype=np.float64)) y = pyomo_nlp.get_duals() self.assertTrue(np.array_equal(y, np.asarray([21, 5, 6, 7], dtype=np.float64))) @@ -1043,54 +1434,99 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-2, 26, -13, -22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) c = np.zeros(4) pyomo_nlp.evaluate_constraints(out=c) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) - j = pyomo_nlp.evaluate_jacobian() - comparison_j = np.asarray([[-1, 9, 12, 1, 0, 0, 0], - [ 0, 18, 24, -1, 1, 0, 0], - [ 0, -9, -12, 1, 0, -1, 0], - [ 1, -36, -48, 0, 0, 0, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + comparison_j = np.asarray( + [ + [-1, 9, 12, 1, 0, 0, 0], + [0, 18, 24, -1, 1, 0, 0], + [0, -9, -12, 1, 0, -1, 0], + [1, -36, -48, 0, 0, 0, -1], + ] + ) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (7,7)) - comparison_h = np.asarray([[0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), (2*2*21) + (4*2*5) + (-2*2*6) + (-8*2*7), 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 2*1]], - dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (7, 7)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [ + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + (2 * 2 * 21) + (4 * 2 * 5) + (-2 * 2 * 6) + (-8 * 2 * 7), + 0, + 0, + 0, + 0, + ], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(AttributeError): h = pyomo_nlp.evaluate_hessian_lag() def test_external_additional_constraints_vars(self): - self._test_external_additional_constraints_vars(ex_models.PressureDropTwoEqualitiesTwoOutputs(), False) - self._test_external_additional_constraints_vars(ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True) + self._test_external_additional_constraints_vars( + ex_models.PressureDropTwoEqualitiesTwoOutputs(), False + ) + self._test_external_additional_constraints_vars( + ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True + ) def _test_external_additional_constraints_vars(self, ex_model, hessian_support): m = pyo.ConcreteModel() - m.hin = pyo.Var(bounds=(0,None), initialize=10) - m.hout = pyo.Var(bounds=(0,None)) + m.hin = pyo.Var(bounds=(0, None), initialize=10) + m.hout = pyo.Var(bounds=(0, None)) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_model) - m.incon = pyo.Constraint(expr= 0 <= m.egb.inputs['Pin'] - 10*m.hin) - m.outcon = pyo.Constraint(expr= 0 == m.egb.outputs['Pout'] - 10*m.hout) + m.incon = pyo.Constraint(expr=0 <= m.egb.inputs['Pin'] - 10 * m.hin) + m.outcon = pyo.Constraint(expr=0 == m.egb.outputs['Pout'] - 10 * m.hout) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1112,7 +1548,7 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(30) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) self.assertEqual(9, pyomo_nlp.n_primals()) @@ -1121,33 +1557,63 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): if hessian_support: self.assertEqual(5, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.variable_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.P2_con', 'egb.Pout_con', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.P2_con', + 'egb.Pout_con', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 20, 15, 30, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() - comparison_xub = np.asarray([150, 5, 5, 90, 80, 85, 70, np.inf, np.inf], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + comparison_xub = np.asarray( + [150, 5, 5, 90, 80, 85, 70, np.inf, np.inf], dtype=np.float64 + ) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0, 0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0, 0, 0, np.inf, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() - comparison_xinit = np.asarray([100, 2, 3, 80, 70, 75, 50, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + comparison_xinit = np.asarray( + [100, 2, 3, 80, 70, 75, 50, 10, 0], dtype=np.float64 + ) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0, 0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(9, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(6, len(pyomo_nlp.create_new_vector('constraints'))) @@ -1159,15 +1625,21 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5,6,7,8,9], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10, 11, 12, 13, 14], dtype=np.float64)) y = pyomo_nlp.get_duals() - self.assertTrue(np.array_equal(y, np.asarray([42, 10, 11, 12, 13, 14], dtype=np.float64))) + self.assertTrue( + np.array_equal(y, np.asarray([42, 10, 11, 12, 13, 14], dtype=np.float64)) + ) pyomo_nlp.set_duals(np.asarray([0, 0, 21, 5, 6, 7], dtype=np.float64)) y = pyomo_nlp.get_duals() - self.assertTrue(np.array_equal(y, np.asarray([0, 0, 21, 5, 6, 7], dtype=np.float64))) + self.assertTrue( + np.array_equal(y, np.asarray([0, 0, 21, 5, 6, 7], dtype=np.float64)) + ) fac = pyomo_nlp.get_obj_factor() self.assertEqual(fac, 1) @@ -1180,7 +1652,9 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 0, 0, 60, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-2, 26, -13, -22, 0, 50], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -1189,51 +1663,98 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) j = pyomo_nlp.evaluate_jacobian() - comparison_j = np.asarray([[-1, 9, 12, 1, 0, 0, 0, 0, 0], - [ 0, 18, 24, -1, 1, 0, 0, 0, 0], - [ 0, -9, -12, 1, 0, -1, 0, 0, 0], - [ 1, -36, -48, 0, 0, 0, -1, 0, 0], - [ 1, 0, 0, 0, 0, 0, 0, -10, 0], - [ 0, 0, 0, 0, 0, 0, 1, 0, -10]]) - - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + comparison_j = np.asarray( + [ + [-1, 9, 12, 1, 0, 0, 0, 0, 0], + [0, 18, 24, -1, 1, 0, 0, 0, 0], + [0, -9, -12, 1, 0, -1, 0, 0, 0], + [1, -36, -48, 0, 0, 0, -1, 0, 0], + [1, 0, 0, 0, 0, 0, 0, -10, 0], + [0, 0, 0, 0, 0, 0, 1, 0, -10], + ] + ) + + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (9,9)) - comparison_h = np.asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), (2*2*21) + (4*2*5) + (-2*2*6) + (-8*2*7), 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 2*1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], - dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (9, 9)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [ + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + (2 * 2 * 21) + (4 * 2 * 5) + (-2 * 2 * 6) + (-8 * 2 * 7), + 0, + 0, + 0, + 0, + 0, + 0, + ], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 2 * 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(AttributeError): h = pyomo_nlp.evaluate_hessian_lag() - @unittest.skipIf(not cyipopt_available, - "CyIpopt needed to run tests with solve") + @unittest.skipIf(not cyipopt_available, "CyIpopt needed to run tests with solve") def test_external_greybox_solve(self): - self._test_external_greybox_solve(ex_models.PressureDropTwoEqualitiesTwoOutputs(), False) - self._test_external_greybox_solve(ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True) + self._test_external_greybox_solve( + ex_models.PressureDropTwoEqualitiesTwoOutputs(), False + ) + self._test_external_greybox_solve( + ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True + ) def _test_external_greybox_solve(self, ex_model, hessian_support): m = pyo.ConcreteModel() - m.mu = pyo.Var(bounds=(0,None), initialize=1) + m.mu = pyo.Var(bounds=(0, None), initialize=1) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_model) - m.ccon = pyo.Constraint(expr = m.egb.inputs['c'] == 128/(3.14*1e-4)*m.mu*m.egb.inputs['F']) - m.pcon = pyo.Constraint(expr = m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) - m.pincon = pyo.Constraint(expr = m.egb.inputs['Pin'] == 100.0) + m.ccon = pyo.Constraint( + expr=m.egb.inputs['c'] == 128 / (3.14 * 1e-4) * m.mu * m.egb.inputs['F'] + ) + m.pcon = pyo.Constraint(expr=m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) + m.pincon = pyo.Constraint(expr=m.egb.inputs['Pin'] == 100.0) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1255,12 +1776,14 @@ def _test_external_greybox_solve(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(10) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2 + (m.egb.inputs['F']-3)**2) + m.obj = pyo.Objective( + expr=(m.egb.outputs['Pout'] - 20) ** 2 + (m.egb.inputs['F'] - 3) ** 2 + ) solver = pyo.SolverFactory('cyipopt') if not hessian_support: - solver.config.options = {'hessian_approximation':'limited-memory'} + solver.config.options = {'hessian_approximation': 'limited-memory'} status = solver.solve(m, tee=False) self.assertAlmostEqual(pyo.value(m.egb.inputs['F']), 3.0, places=3) @@ -1272,15 +1795,15 @@ def _test_external_greybox_solve(self, ex_model, hessian_support): self.assertAlmostEqual(pyo.value(m.egb.inputs['P3']), 46.0, places=3) self.assertAlmostEqual(pyo.value(m.egb.outputs['P2']), 64.0, places=3) self.assertAlmostEqual(pyo.value(m.egb.inputs['F']), 3.0, places=3) - + def create_model_two_equalities_two_outputs(self, external_model): m = pyo.ConcreteModel() - m.hin = pyo.Var(bounds=(0,None), initialize=10) - m.hout = pyo.Var(bounds=(0,None)) + m.hin = pyo.Var(bounds=(0, None), initialize=10) + m.hout = pyo.Var(bounds=(0, None)) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(external_model) - m.incon = pyo.Constraint(expr= 0 <= m.egb.inputs['Pin'] - 10*m.hin) - m.outcon = pyo.Constraint(expr= 0 == m.egb.outputs['Pout'] - 10*m.hout) + m.incon = pyo.Constraint(expr=0 <= m.egb.inputs['Pin'] - 10 * m.hin) + m.outcon = pyo.Constraint(expr=0 == m.egb.outputs['Pout'] - 10 * m.hout) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1305,8 +1828,10 @@ def create_model_two_equalities_two_outputs(self, external_model): return m def test_scaling_all_missing(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) fs = pyomo_nlp.get_obj_scaling() xs = pyomo_nlp.get_primals_scaling() @@ -1316,127 +1841,202 @@ def test_scaling_all_missing(self): self.assertIsNone(cs) def test_scaling_pyomo_model_only(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - #m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable - m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable - m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable - #m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable - m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable - m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable - m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable - #m.scaling_factor[m.hin] = 1.8 + # m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable + m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable + m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable + # m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable + m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable + m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable + m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable + # m.scaling_factor[m.hin] = 1.8 m.scaling_factor[m.hout] = 1.9 - #m.scaling_factor[m.incon] = 2.1 + # m.scaling_factor[m.incon] = 2.1 m.scaling_factor[m.outcon] = 2.2 pyomo_nlp = PyomoGreyBoxNLP(m) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.variable_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.P2_con', 'egb.Pout_con', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.P2_con', + 'egb.Pout_con', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() fs = pyomo_nlp.get_obj_scaling() self.assertEqual(fs, 1.0) - + xs = pyomo_nlp.get_primals_scaling() - comparison_xs = np.asarray([1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64) - check_vectors_specific_order(self, xs, x_order, comparison_xs, comparison_x_order) + comparison_xs = np.asarray( + [1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64 + ) + check_vectors_specific_order( + self, xs, x_order, comparison_xs, comparison_x_order + ) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([1, 1, 1, 1, 1, 2.2], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) def test_scaling_greybox_only(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.variable_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.P2_con', 'egb.Pout_con', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.P2_con', + 'egb.Pout_con', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() fs = pyomo_nlp.get_obj_scaling() self.assertEqual(fs, 1.0) - + xs = pyomo_nlp.get_primals_scaling() comparison_xs = np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, xs, x_order, comparison_xs, comparison_x_order) + check_vectors_specific_order( + self, xs, x_order, comparison_xs, comparison_x_order + ) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 4.1, 4.2, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) - - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleEqualities()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) + + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleEqualities() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 1, 1, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) - - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) + + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoGreyBoxNLP(m) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([1, 1, 4.1, 4.2, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) def test_scaling_pyomo_model_and_greybox(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - #m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable - m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable - m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable - #m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable - m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable - m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable - m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable - #m.scaling_factor[m.hin] = 1.8 + # m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable + m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable + m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable + # m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable + m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable + m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable + m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable + # m.scaling_factor[m.hin] = 1.8 m.scaling_factor[m.hout] = 1.9 - #m.scaling_factor[m.incon] = 2.1 + # m.scaling_factor[m.incon] = 2.1 m.scaling_factor[m.outcon] = 2.2 pyomo_nlp = PyomoGreyBoxNLP(m) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.variable_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.P2_con', 'egb.Pout_con', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.P2_con', + 'egb.Pout_con', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() fs = pyomo_nlp.get_obj_scaling() self.assertEqual(fs, 1.0) - + xs = pyomo_nlp.get_primals_scaling() - comparison_xs = np.asarray([1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64) - check_vectors_specific_order(self, xs, x_order, comparison_xs, comparison_x_order) + comparison_xs = np.asarray( + [1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64 + ) + check_vectors_specific_order( + self, xs, x_order, comparison_xs, comparison_x_order + ) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 4.1, 4.2, 1, 2.2], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) - @unittest.skipIf(not cyipopt_available, - "CyIpopt needed to run tests with solve") + @unittest.skipIf(not cyipopt_available, "CyIpopt needed to run tests with solve") def test_external_greybox_solve_scaling(self): m = pyo.ConcreteModel() - m.mu = pyo.Var(bounds=(0,None), initialize=1) + m.mu = pyo.Var(bounds=(0, None), initialize=1) m.egb = ExternalGreyBoxBlock() - m.egb.set_external_model(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth()) - m.ccon = pyo.Constraint(expr = m.egb.inputs['c'] == 128/(3.14*1e-4)*m.mu*m.egb.inputs['F']) - m.pcon = pyo.Constraint(expr = m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) - m.pincon = pyo.Constraint(expr = m.egb.inputs['Pin'] == 100.0) + m.egb.set_external_model( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth() + ) + m.ccon = pyo.Constraint( + expr=m.egb.inputs['c'] == 128 / (3.14 * 1e-4) * m.mu * m.egb.inputs['F'] + ) + m.pcon = pyo.Constraint(expr=m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) + m.pincon = pyo.Constraint(expr=m.egb.inputs['Pin'] == 100.0) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1458,26 +2058,30 @@ def test_external_greybox_solve_scaling(self): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(10) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2 + (m.egb.inputs['F']-3)**2) + m.obj = pyo.Objective( + expr=(m.egb.outputs['Pout'] - 20) ** 2 + (m.egb.inputs['F'] - 3) ** 2 + ) m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable - m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable - m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable - #m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable - m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable - m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable - m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable + m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable + m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable + m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable + # m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable + m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable + m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable + m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable m.scaling_factor[m.mu] = 1.9 m.scaling_factor[m.pincon] = 2.2 solver = pyo.SolverFactory('cyipopt') - solver.config.options = {'hessian_approximation':'limited-memory', - 'nlp_scaling_method': 'user-scaling', - 'output_file': '_cyipopt-external-greybox-scaling.log', - 'file_print_level':10, - 'max_iter': 0} + solver.config.options = { + 'hessian_approximation': 'limited-memory', + 'nlp_scaling_method': 'user-scaling', + 'output_file': '_cyipopt-external-greybox-scaling.log', + 'file_print_level': 10, + 'max_iter': 0, + } status = solver.solve(m, tee=False) with open('_cyipopt-external-greybox-scaling.log', 'r') as fd: @@ -1485,7 +2089,9 @@ def test_external_greybox_solve_scaling(self): os.remove('_cyipopt-external-greybox-scaling.log') self.assertIn('nlp_scaling_method = user-scaling', solver_trace) - self.assertIn('output_file = _cyipopt-external-greybox-scaling.log', solver_trace) + self.assertIn( + 'output_file = _cyipopt-external-greybox-scaling.log', solver_trace + ) self.assertIn('objective scaling factor = 0.1', solver_trace) self.assertIn('x scaling provided', solver_trace) self.assertIn('c scaling provided', solver_trace) @@ -1493,23 +2099,54 @@ def test_external_greybox_solve_scaling(self): # x order: ['egb.inputs[F]', 'mu', 'egb.outputs[Pout]', 'egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[P1]', 'egb.inputs[P3]', 'egb.outputs[P2]'] # c order: ['ccon', 'pcon', 'pincon', 'egb.pdrop1', 'egb.pdrop3', 'egb.P2_con', 'egb.Pout_con'] self.assertIn('DenseVector "x scaling vector" with 8 elements:', solver_trace) - self.assertIn('x scaling vector[ 1]= 1.3000000000000000e+00', solver_trace) # F - self.assertIn('x scaling vector[ 8]= 1.8999999999999999e+00', solver_trace) # mu - self.assertIn('x scaling vector[ 7]= 1.7000000000000000e+00', solver_trace) # Pout - self.assertIn('x scaling vector[ 4]= 1.1000000000000001e+00', solver_trace) # Pin - self.assertIn('x scaling vector[ 5]= 1.2000000000000000e+00', solver_trace) # c - self.assertIn('x scaling vector[ 2]= 1.0000000000000000e+00', solver_trace) # P1 - self.assertIn('x scaling vector[ 3]= 1.5000000000000000e+00', solver_trace) # P3 - self.assertIn('x scaling vector[ 6]= 1.6000000000000001e+00', solver_trace) # P2 - self.assertIn('DenseVector "c scaling vector" with 6 elements:', solver_trace) - self.assertIn('c scaling vector[ 1]= 1.0000000000000000e+00', solver_trace) # ccon - self.assertIn('c scaling vector[ 2]= 2.2000000000000002e+00', solver_trace) # pincon - self.assertIn('c scaling vector[ 3]= 3.1000000000000001e+00', solver_trace) # pdrop1 - self.assertIn('c scaling vector[ 4]= 3.2000000000000002e+00', solver_trace) # pdrop3 - self.assertIn('c scaling vector[ 5]= 4.0999999999999996e+00', solver_trace) # P2_con - self.assertIn('c scaling vector[ 6]= 4.2000000000000002e+00', solver_trace) # Pout_con + self.assertIn( + 'x scaling vector[ 1]= 1.3000000000000000e+00', solver_trace + ) # F + self.assertIn( + 'x scaling vector[ 8]= 1.8999999999999999e+00', solver_trace + ) # mu + self.assertIn( + 'x scaling vector[ 7]= 1.7000000000000000e+00', solver_trace + ) # Pout + self.assertIn( + 'x scaling vector[ 4]= 1.1000000000000001e+00', solver_trace + ) # Pin + self.assertIn( + 'x scaling vector[ 5]= 1.2000000000000000e+00', solver_trace + ) # c + self.assertIn( + 'x scaling vector[ 2]= 1.0000000000000000e+00', solver_trace + ) # P1 + self.assertIn( + 'x scaling vector[ 3]= 1.5000000000000000e+00', solver_trace + ) # P3 + self.assertIn( + 'x scaling vector[ 6]= 1.6000000000000001e+00', solver_trace + ) # P2 + self.assertIn('DenseVector "c scaling vector" with 6 elements:', solver_trace) + self.assertIn( + 'c scaling vector[ 1]= 1.0000000000000000e+00', solver_trace + ) # ccon + self.assertIn( + 'c scaling vector[ 2]= 2.2000000000000002e+00', solver_trace + ) # pincon + self.assertIn( + 'c scaling vector[ 3]= 3.1000000000000001e+00', solver_trace + ) # pdrop1 + self.assertIn( + 'c scaling vector[ 4]= 3.2000000000000002e+00', solver_trace + ) # pdrop3 + self.assertIn( + 'c scaling vector[ 5]= 4.0999999999999996e+00', solver_trace + ) # P2_con + self.assertIn( + 'c scaling vector[ 6]= 4.2000000000000002e+00', solver_trace + ) # Pout_con self.assertIn('DenseVector "d scaling vector" with 1 elements:', solver_trace) - self.assertIn('d scaling vector[ 1]= 1.0000000000000000e+00', solver_trace) # pcon + self.assertIn( + 'd scaling vector[ 1]= 1.0000000000000000e+00', solver_trace + ) # pcon + if __name__ == '__main__': TestPyomoGreyBoxNLP().test_external_greybox_solve(self) diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py index 2367cbb1d29..2d758e2e1a9 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_block.py @@ -16,7 +16,10 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) if not (numpy_available and scipy_available): @@ -25,14 +28,15 @@ from pyomo.common.dependencies.scipy import sparse as sps from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run cyipopt tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run cyipopt tests") -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available, +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import cyipopt_available +from pyomo.contrib.pynumero.algorithms.solvers.implicit_functions import ( + CyIpoptSolverWrapper, + ImplicitFunctionSolver, ) -import pyomo.contrib.pynumero.interfaces.external_pyomo_model as epm_module from pyomo.contrib.pynumero.interfaces.external_pyomo_model import ( ExternalPyomoModel, get_hessian_of_constraint, @@ -50,9 +54,7 @@ ) if not pyo.SolverFactory("ipopt").available(): - raise unittest.SkipTest( - "Need IPOPT to run ExternalPyomoModel tests" - ) + raise unittest.SkipTest("Need IPOPT to run ExternalPyomoModel tests") def _make_external_model(): @@ -66,12 +68,12 @@ def _make_external_model(): m.y_out = pyo.Var() m.c_out_1 = pyo.Constraint(expr=m.x_out - m.x == 0) m.c_out_2 = pyo.Constraint(expr=m.y_out - m.y == 0) - m.c_ex_1 = pyo.Constraint(expr= - m.x**3 - 2*m.y == m.a**2 + m.b**3 - m.r**3 - 2 - ) - m.c_ex_2 = pyo.Constraint(expr= - m.x + m.y**3 == m.a**3 + 2*m.b**2 + m.r**2 + 1 - ) + m.c_ex_1 = pyo.Constraint( + expr=m.x**3 - 2 * m.y == m.a**2 + m.b**3 - m.r**3 - 2 + ) + m.c_ex_2 = pyo.Constraint( + expr=m.x + m.y**3 == m.a**3 + 2 * m.b**2 + m.r**2 + 1 + ) return m @@ -90,8 +92,7 @@ def linking_constraint_rule(m, i): elif i == 2: return m.r == m.ex_block.inputs["input_2"] - m.linking_constraint = pyo.Constraint(range(n_inputs), - rule=linking_constraint_rule) + m.linking_constraint = pyo.Constraint(range(n_inputs), rule=linking_constraint_rule) def _add_nonlinear_linking_constraints(m): @@ -106,14 +107,13 @@ def _add_nonlinear_linking_constraints(m): def linking_constraint_rule(m, i): if i == 0: - return m.a**2 - 0.5*m.ex_block.inputs["input_0"]**2 == 0 + return m.a**2 - 0.5 * m.ex_block.inputs["input_0"] ** 2 == 0 elif i == 1: - return m.b**2 - 0.5*m.ex_block.inputs["input_1"]**2 == 0 + return m.b**2 - 0.5 * m.ex_block.inputs["input_1"] ** 2 == 0 elif i == 2: - return m.r**2 - 0.5*m.ex_block.inputs["input_2"]**2 == 0 + return m.r**2 - 0.5 * m.ex_block.inputs["input_2"] ** 2 == 0 - m.linking_constraint = pyo.Constraint(range(n_inputs), - rule=linking_constraint_rule) + m.linking_constraint = pyo.Constraint(range(n_inputs), rule=linking_constraint_rule) def make_dynamic_model(): @@ -121,7 +121,7 @@ def make_dynamic_model(): m.time = pyo.Set(initialize=[0, 1, 2]) m = pyo.ConcreteModel() - m.time = pyo.Set(initialize=[0, 1, 2]) + m.time = pyo.Set(initialize=[0, 1, 2]) t0 = m.time.first() m.h = pyo.Var(m.time, initialize=1.0) @@ -133,6 +133,7 @@ def make_dynamic_model(): def h_diff_eqn_rule(m, t): return m.dhdt[t] - (m.flow_in[t] - m.flow_out[t]) == 0 + m.h_diff_eqn = pyo.Constraint(m.time, rule=h_diff_eqn_rule) def dhdt_disc_eqn_rule(m, t): @@ -140,19 +141,20 @@ def dhdt_disc_eqn_rule(m, t): return pyo.Constraint.Skip else: t_prev = m.time.prev(t) - delta_t = (t - t_prev) - return m.dhdt[t] - delta_t*(m.h[t] - m.h[t_prev]) == 0 + delta_t = t - t_prev + return m.dhdt[t] - delta_t * (m.h[t] - m.h[t_prev]) == 0 + m.dhdt_disc_eqn = pyo.Constraint(m.time, rule=dhdt_disc_eqn_rule) def flow_out_eqn(m, t): - return m.flow_out[t] == m.flow_coef*m.h[t]**0.5 + return m.flow_out[t] == m.flow_coef * m.h[t] ** 0.5 + m.flow_out_eqn = pyo.Constraint(m.time, rule=flow_out_eqn) return m class TestExternalGreyBoxBlock(unittest.TestCase): - def test_construct_scalar(self): m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) @@ -165,11 +167,8 @@ def test_construct_scalar(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, external_vars, residual_cons, external_cons + ) block.set_external_model(ex_model) self.assertEqual(len(block.inputs), len(input_vars)) @@ -186,11 +185,8 @@ def test_construct_indexed(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, external_vars, residual_cons, external_cons + ) for i in block: b = block[i] @@ -211,11 +207,8 @@ def test_solve_square(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, external_vars, residual_cons, external_cons + ) block.set_external_model(ex_model) _add_linking_constraints(m) @@ -256,11 +249,8 @@ def test_optimize(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, external_vars, residual_cons, external_cons + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -268,18 +258,25 @@ def test_optimize(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) # Solve with external model embedded solver = pyo.SolverFactory("cyipopt") solver.solve(m) - m_ex.obj = pyo.Objective(expr= - (m_ex.x-2.0)**2 + (m_ex.y-2.0)**2 + (m_ex.a-2.0)**2 + - (m_ex.b-2.0)**2 + (m_ex.r-2.0)**2 - ) + m_ex.obj = pyo.Objective( + expr=(m_ex.x - 2.0) ** 2 + + (m_ex.y - 2.0) ** 2 + + (m_ex.a - 2.0) ** 2 + + (m_ex.b - 2.0) ** 2 + + (m_ex.r - 2.0) ** 2 + ) m_ex.a.set_value(0.0) m_ex.b.set_value(0.0) m_ex.r.set_value(0.0) @@ -299,8 +296,9 @@ def test_optimize(self): self.assertAlmostEqual(m_ex.y.value, y.value, delta=1e-8) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") - def test_optimize_with_ipopt_for_inner_problem(self): - # Use Ipopt, rather than the default CyIpopt, for the inner problem + def test_optimize_with_cyipopt_for_inner_problem(self): + # Use CyIpopt, rather than the default SciPy solvers, + # for the inner problem m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) block = m.ex_block @@ -310,15 +308,19 @@ def test_optimize_with_ipopt_for_inner_problem(self): external_vars = [m_ex.x, m_ex.y] residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] - inner_solver = pyo.SolverFactory("ipopt") + + # This passes options to the internal ImplicitFunctionSolver, + # which by default is SccImplicitFunctionSolver. + # This option tells it what solver to use subsystems in its + # decomposition. + solver_options = dict(solver_class=CyIpoptSolverWrapper) ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - solver=inner_solver, - use_cyipopt=False, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + solver_options=solver_options, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -326,18 +328,25 @@ def test_optimize_with_ipopt_for_inner_problem(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) # Solve with external model embedded solver = pyo.SolverFactory("cyipopt") solver.solve(m) - m_ex.obj = pyo.Objective(expr= - (m_ex.x-2.0)**2 + (m_ex.y-2.0)**2 + (m_ex.a-2.0)**2 + - (m_ex.b-2.0)**2 + (m_ex.r-2.0)**2 - ) + m_ex.obj = pyo.Objective( + expr=(m_ex.x - 2.0) ** 2 + + (m_ex.y - 2.0) ** 2 + + (m_ex.a - 2.0) ** 2 + + (m_ex.b - 2.0) ** 2 + + (m_ex.r - 2.0) ** 2 + ) m_ex.a.set_value(0.0) m_ex.b.set_value(0.0) m_ex.r.set_value(0.0) @@ -357,11 +366,11 @@ def test_optimize_with_ipopt_for_inner_problem(self): self.assertAlmostEqual(m_ex.y.value, y.value, delta=1e-8) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") - def test_optimize_no_cyipopt_for_inner_problem(self): - # Here we don't specify a solver for the inner problem. - # This is contrived, as clearly CyIpopt is available for the outer - # solve. This test exercises the part of the ExternalPyomoModel - # constructor that sets a default solver if CyIpopt is not available. + def test_optimize_no_decomposition(self): + # This is a test that does not use the SCC decomposition + # to converge the implicit function. We do this by passing + # solver_class=ImplicitFunctionSolver rather than the default, + # SccImplicitFunctionSolver m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) block = m.ex_block @@ -372,11 +381,12 @@ def test_optimize_no_cyipopt_for_inner_problem(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, + external_vars, + residual_cons, + external_cons, + solver_class=ImplicitFunctionSolver, + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -384,18 +394,25 @@ def test_optimize_no_cyipopt_for_inner_problem(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) # Solve with external model embedded solver = pyo.SolverFactory("cyipopt") solver.solve(m) - m_ex.obj = pyo.Objective(expr= - (m_ex.x-2.0)**2 + (m_ex.y-2.0)**2 + (m_ex.a-2.0)**2 + - (m_ex.b-2.0)**2 + (m_ex.r-2.0)**2 - ) + m_ex.obj = pyo.Objective( + expr=(m_ex.x - 2.0) ** 2 + + (m_ex.y - 2.0) ** 2 + + (m_ex.a - 2.0) ** 2 + + (m_ex.b - 2.0) ** 2 + + (m_ex.r - 2.0) ** 2 + ) m_ex.a.set_value(0.0) m_ex.b.set_value(0.0) m_ex.r.set_value(0.0) @@ -425,20 +442,19 @@ def test_construct_dynamic(self): ext_cons = [m.flow_out_eqn] external_model_dict = { - t: ExternalPyomoModel( - [var[t] for var in inputs], - [var[t] for var in ext_vars], - [con[t] for con in residuals], - [con[t] for con in ext_cons], - ) - for t in time - } + t: ExternalPyomoModel( + [var[t] for var in inputs], + [var[t] for var in ext_vars], + [con[t] for con in residuals], + [con[t] for con in ext_cons], + ) + for t in time + } reduced_space = pyo.Block(concrete=True) reduced_space.external_block = ExternalGreyBoxBlock( - time, - external_model=external_model_dict, - ) + time, external_model=external_model_dict + ) block = reduced_space.external_block block[t0].deactivate() self.assertIs(type(block), IndexedExternalGreyBoxBlock) @@ -457,10 +473,10 @@ def test_construct_dynamic(self): pyomo_vars = list(reduced_space.component_data_objects(pyo.Var)) pyomo_cons = list(reduced_space.component_data_objects(pyo.Constraint)) # NOTE: Variables in the EGBB are not found by component_data_objects - self.assertEqual(len(pyomo_vars), len(inputs)*len(time)) + self.assertEqual(len(pyomo_vars), len(inputs) * len(time)) # "Constraints" defined by the EGBB are not found either, although # this is expected. - self.assertEqual(len(pyomo_cons), len(time)-1) + self.assertEqual(len(pyomo_cons), len(time) - 1) reduced_space._obj = pyo.Objective(expr=0) @@ -472,24 +488,24 @@ def test_construct_dynamic(self): # This is necessary for these variables to appear in the PNLPwGBB. # Otherwise they don't appear in any "real" constraints of the - # PyomoNLP. + # PyomoNLP. reduced_space.const_input_eqn = pyo.Constraint( expr=reduced_space.input_var[2] - reduced_space.input_var[1] == 0 - ) + ) nlp = PyomoNLPWithGreyBoxBlocks(reduced_space) self.assertEqual( - nlp.n_primals(), - # EGBB "inputs", dhdt, and flow_in exist for t != t0. - # h exists for all time. - (2+len(inputs))*(len(time)-1) + len(time), - ) + nlp.n_primals(), + # EGBB "inputs", dhdt, and flow_in exist for t != t0. + # h exists for all time. + (2 + len(inputs)) * (len(time) - 1) + len(time), + ) self.assertEqual( - nlp.n_constraints(), - # EGBB equality constraints and disc_eqn exist for t != t0. - # const_input_eqn is a single constraint - (len(residuals)+1)*(len(time)-1) + 1, - ) + nlp.n_constraints(), + # EGBB equality constraints and disc_eqn exist for t != t0. + # const_input_eqn is a single constraint + (len(residuals) + 1) * (len(time) - 1) + 1, + ) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") def test_solve_square_dynamic(self): @@ -518,14 +534,12 @@ def test_solve_square_dynamic(self): residual_cons = [m.h_diff_eqn[t]] external_cons = [m.flow_out_eqn[t]] external_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, external_vars, residual_cons, external_cons + ) block[t].set_external_model(external_model) n_inputs = len(input_vars) + def linking_constraint_rule(m, i, t): if t == t0: return pyo.Constraint.Skip @@ -535,10 +549,8 @@ def linking_constraint_rule(m, i, t): return m.deriv_var[t] == m.external_block[t].inputs["input_1"] reduced_space.linking_constraint = pyo.Constraint( - range(n_inputs), - time, - rule=linking_constraint_rule, - ) + range(n_inputs), time, rule=linking_constraint_rule + ) # Initialize new variables for t in time: if t != t0: @@ -571,9 +583,7 @@ def test_optimize_dynamic(self): m.h[t0].fix(1.2) m.flow_in[t0].fix(1.5) - m.obj = pyo.Objective(expr=sum( - (m.h[t] - 2.0)**2 for t in m.time if t != t0 - )) + m.obj = pyo.Objective(expr=sum((m.h[t] - 2.0) ** 2 for t in m.time if t != t0)) # Create the block that will hold the reduced space model. reduced_space = pyo.Block(concrete=True) @@ -594,14 +604,12 @@ def test_optimize_dynamic(self): residual_cons = [m.h_diff_eqn[t]] external_cons = [m.flow_out_eqn[t]] external_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, external_vars, residual_cons, external_cons + ) block[t].set_external_model(external_model) n_inputs = len(input_vars) + def linking_constraint_rule(m, i, t): if t == t0: return pyo.Constraint.Skip @@ -613,10 +621,8 @@ def linking_constraint_rule(m, i, t): return m.input_var[t] == m.external_block[t].inputs["input_2"] reduced_space.linking_constraint = pyo.Constraint( - range(n_inputs), - time, - rule=linking_constraint_rule, - ) + range(n_inputs), time, rule=linking_constraint_rule + ) # Initialize new variables for t in time: if t != t0: @@ -636,10 +642,18 @@ def linking_constraint_rule(m, i, t): for t in time: if t == t0: continue - values = [m.h[t].value, m.dhdt[t].value, - m.flow_out[t].value, m.flow_in[t].value] - target_values = [h_target[t], dhdt_target[t], - flow_out_target[t], flow_in_target[t]] + values = [ + m.h[t].value, + m.dhdt[t].value, + m.flow_out[t].value, + m.flow_in[t].value, + ] + target_values = [ + h_target[t], + dhdt_target[t], + flow_out_target[t], + flow_in_target[t], + ] self.assertStructuredAlmostEqual(values, target_values, delta=1e-5) @unittest.skipUnless(cyipopt_available, "cyipopt is not available") @@ -655,9 +669,7 @@ def test_optimize_dynamic_references(self): m.h[t0].fix(1.2) m.flow_in[t0].fix(1.5) - m.obj = pyo.Objective(expr=sum( - (m.h[t] - 2.0)**2 for t in m.time if t != t0 - )) + m.obj = pyo.Objective(expr=sum((m.h[t] - 2.0) ** 2 for t in m.time if t != t0)) # Create the block that will hold the reduced space model. reduced_space = pyo.Block(concrete=True) @@ -678,11 +690,8 @@ def test_optimize_dynamic_references(self): residual_cons = [m.h_diff_eqn[t]] external_cons = [m.flow_out_eqn[t]] external_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - ) + input_vars, external_vars, residual_cons, external_cons + ) block[t].set_external_model(external_model, inputs=input_vars) solver = pyo.SolverFactory("cyipopt") @@ -697,15 +706,22 @@ def test_optimize_dynamic_references(self): for t in time: if t == t0: continue - values = [m.h[t].value, m.dhdt[t].value, - m.flow_out[t].value, m.flow_in[t].value] - target_values = [h_target[t], dhdt_target[t], - flow_out_target[t], flow_in_target[t]] + values = [ + m.h[t].value, + m.dhdt[t].value, + m.flow_out[t].value, + m.flow_in[t].value, + ] + target_values = [ + h_target[t], + dhdt_target[t], + flow_out_target[t], + flow_in_target[t], + ] self.assertStructuredAlmostEqual(values, target_values, delta=1e-5) class TestPyomoNLPWithGreyBoxBLocks(unittest.TestCase): - def test_set_and_evaluate(self): m = pyo.ConcreteModel() m.ex_block = ExternalGreyBoxBlock(concrete=True) @@ -717,12 +733,8 @@ def test_set_and_evaluate(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, external_vars, residual_cons, external_cons + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -730,9 +742,13 @@ def test_set_and_evaluate(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_linking_constraints(m) @@ -747,15 +763,15 @@ def test_set_and_evaluate(self): # PyomoNLPWithGreyBoxBlocks sorts variables by name primals_names = [ - "a", - "b", - "ex_block.inputs[input_0]", - "ex_block.inputs[input_1]", - "ex_block.inputs[input_2]", - "ex_block.inputs[input_3]", - "ex_block.inputs[input_4]", - "r", - ] + "a", + "b", + "ex_block.inputs[input_0]", + "ex_block.inputs[input_1]", + "ex_block.inputs[input_2]", + "ex_block.inputs[input_3]", + "ex_block.inputs[input_4]", + "r", + ] self.assertEqual(nlp.primals_names(), primals_names) np.testing.assert_equal(np.zeros(8), nlp.get_primals()) @@ -769,25 +785,26 @@ def test_set_and_evaluate(self): self.assertEqual(var.value, val) constraint_names = [ - "linking_constraint[0]", - "linking_constraint[1]", - "linking_constraint[2]", - "ex_block.residual_0", - "ex_block.residual_1", - ] + "linking_constraint[0]", + "linking_constraint[1]", + "linking_constraint[2]", + "ex_block.residual_0", + "ex_block.residual_1", + ] self.assertEqual(constraint_names, nlp.constraint_names()) - residuals = np.array([ + residuals = np.array( + [ -2.0, -2.0, 3.0, # These values were obtained by solving the same system # with Ipopt in another script. It may be better to do # the solve in this test in case the system changes. - 5.0-(-3.03051522), - 6.0-3.583839997, - ]) - np.testing.assert_allclose(residuals, nlp.evaluate_constraints(), - rtol=1e-8) + 5.0 - (-3.03051522), + 6.0 - 3.583839997, + ] + ) + np.testing.assert_allclose(residuals, nlp.evaluate_constraints(), rtol=1e-8) duals = np.array([1, 2, 3, 4, 5]) nlp.set_duals(duals) @@ -806,12 +823,8 @@ def test_jacobian(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, external_vars, residual_cons, external_cons + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -819,9 +832,13 @@ def test_jacobian(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_linking_constraints(m) @@ -850,27 +867,26 @@ def test_jacobian(self): # "ex_block.inputs[input_4]", # "r", # ] - row = [ - 0, 0, - 1, 1, - 2, 2, - 3, 3, 3, 3, 3, - 4, 4, 4, 4, 4, - ] - col = [ - 0, 2, - 1, 3, - 7, 4, - 2, 3, 4, 5, 6, - 2, 3, 4, 5, 6, - ] + row = [0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4] + col = [0, 2, 1, 3, 7, 4, 2, 3, 4, 5, 6, 2, 3, 4, 5, 6] data = [ - 1, -1, - 1, -1, - 1, -1, - -0.16747094, -1.00068434, 1.72383729, 1, 0, - -0.30708535, -0.28546127, -0.25235924, 0, 1, - ] + 1, + -1, + 1, + -1, + 1, + -1, + -0.16747094, + -1.00068434, + 1.72383729, + 1, + 0, + -0.30708535, + -0.28546127, + -0.25235924, + 0, + 1, + ] self.assertEqual(len(row), len(jac.row)) rcd_dict = dict(((i, j), val) for i, j, val in zip(row, col, data)) for i, j, val in zip(jac.row, jac.col, jac.data): @@ -889,12 +905,8 @@ def test_hessian_1(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, external_vars, residual_cons, external_cons + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -902,9 +914,13 @@ def test_hessian_1(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_nonlinear_linking_constraints(m) @@ -941,18 +957,18 @@ def test_hessian_1(self): # while writing this test, which is just meant to verify # that the different Hessians combined properly. ex_block_nonzeros = { - (2, 2): 2.0 + (-1.0) + (-0.10967928) + (-0.25595929), - (2, 3): (-0.10684633) + (0.05169308), - (3, 2): (-0.10684633) + (0.05169308), - (2, 4): (0.19329898) + (0.03823075), - (4, 2): (0.19329898) + (0.03823075), - (3, 3): 2.0 + (-1.0) + (-1.31592135) + (-0.0241836), - (3, 4): (1.13920361) + (0.01063667), - (4, 3): (1.13920361) + (0.01063667), - (4, 4): 2.0 + (-1.0) + (-1.0891866) + (0.01190218), - (5, 5): 2.0, - (6, 6): 2.0, - } + (2, 2): 2.0 + (-1.0) + (-0.10967928) + (-0.25595929), + (2, 3): (-0.10684633) + (0.05169308), + (3, 2): (-0.10684633) + (0.05169308), + (2, 4): (0.19329898) + (0.03823075), + (4, 2): (0.19329898) + (0.03823075), + (3, 3): 2.0 + (-1.0) + (-1.31592135) + (-0.0241836), + (3, 4): (1.13920361) + (0.01063667), + (4, 3): (1.13920361) + (0.01063667), + (4, 4): 2.0 + (-1.0) + (-1.0891866) + (0.01190218), + (5, 5): 2.0, + (6, 6): 2.0, + } rcd_dict.update(ex_block_nonzeros) # Because "external Hessians" are computed by factorizing matrices, @@ -981,12 +997,8 @@ def test_hessian_2(self): residual_cons = [m_ex.c_out_1, m_ex.c_out_2] external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=False, - ) + input_vars, external_vars, residual_cons, external_cons + ) block.set_external_model(ex_model) a = m.ex_block.inputs["input_0"] @@ -994,9 +1006,13 @@ def test_hessian_2(self): r = m.ex_block.inputs["input_2"] x = m.ex_block.inputs["input_3"] y = m.ex_block.inputs["input_4"] - m.obj = pyo.Objective(expr= - (x-2.0)**2 + (y-2.0)**2 + (a-2.0)**2 + (b-2.0)**2 + (r-2.0)**2 - ) + m.obj = pyo.Objective( + expr=(x - 2.0) ** 2 + + (y - 2.0) ** 2 + + (a - 2.0) ** 2 + + (b - 2.0) ** 2 + + (r - 2.0) ** 2 + ) _add_nonlinear_linking_constraints(m) @@ -1021,7 +1037,7 @@ def test_hessian_2(self): row = [0, 1, 7] col = [0, 1, 7] # Data entries are influenced by multiplier values. - data = [4.4*2.0, -3.3*2.0, 2.2*2.0] + data = [4.4 * 2.0, -3.3 * 2.0, 2.2 * 2.0] # ^ These variables only appear in linking constraints rcd_dict = dict(((i, j), val) for i, j, val in zip(row, col, data)) @@ -1032,18 +1048,18 @@ def test_hessian_2(self): # while writing this test, which is just meant to verify # that the different Hessians combined properly. ex_block_nonzeros = { - (2, 2): 2.0 + 4.4*(-1.0) + -1.1*(-0.10967928), - (2, 3): -1.1*(-0.10684633), - (3, 2): -1.1*(-0.10684633), - (2, 4): -1.1*(0.19329898), - (4, 2): -1.1*(0.19329898), - (3, 3): 2.0 + (-3.3)*(-1.0) + -1.1*(-1.31592135), - (3, 4): -1.1*(1.13920361), - (4, 3): -1.1*(1.13920361), - (4, 4): 2.0 + 2.2*(-1.0) + -1.1*(-1.0891866), - (5, 5): 2.0, - (6, 6): 2.0, - } + (2, 2): 2.0 + 4.4 * (-1.0) + -1.1 * (-0.10967928), + (2, 3): -1.1 * (-0.10684633), + (3, 2): -1.1 * (-0.10684633), + (2, 4): -1.1 * (0.19329898), + (4, 2): -1.1 * (0.19329898), + (3, 3): 2.0 + (-3.3) * (-1.0) + -1.1 * (-1.31592135), + (3, 4): -1.1 * (1.13920361), + (4, 3): -1.1 * (1.13920361), + (4, 4): 2.0 + 2.2 * (-1.0) + -1.1 * (-1.0891866), + (5, 5): 2.0, + (6, 6): 2.0, + } rcd_dict.update(ex_block_nonzeros) # Because "external Hessians" are computed by factorizing matrices, @@ -1086,7 +1102,7 @@ def _create_pressure_drop_model(self): m.c_con = pyo.Constraint(expr=m.c == 1.0) m.F_con = pyo.Constraint(expr=m.F == 10.0) m.P2_con = pyo.Constraint(expr=m.P2 <= 5.0) - m.obj = pyo.Objective(expr=(m.Pout - 3.0)**2) + m.obj = pyo.Objective(expr=(m.Pout - 3.0) ** 2) cons = [m.c_con, m.F_con, m.Pin_con, m.P2_con] inputs = [m.Pin, m.c, m.F] @@ -1095,11 +1111,7 @@ def _create_pressure_drop_model(self): # This is "model 3" from the external_grey_box_models.py file. ex_model = PressureDropTwoOutputsWithHessian() m.egb = ExternalGreyBoxBlock() - m.egb.set_external_model( - ex_model, - inputs=inputs, - outputs=outputs, - ) + m.egb.set_external_model(ex_model, inputs=inputs, outputs=outputs) return m def test_pressure_drop_model(self): @@ -1114,7 +1126,7 @@ def test_pressure_drop_model(self): # The references to inputs and outputs are not picked up twice, # as EGBB does not have ctype Block - self.assertEqual(len(pyomo_variables), len(inputs)+len(outputs)) + self.assertEqual(len(pyomo_variables), len(inputs) + len(outputs)) self.assertEqual(len(pyomo_constraints), len(cons)) # Test the inputs and outputs attributes on egb @@ -1137,20 +1149,20 @@ def test_pressure_drop_model_nlp(self): outputs = [m.P2, m.Pout] nlp = PyomoNLPWithGreyBoxBlocks(m) - + n_primals = len(inputs) + len(outputs) n_eq_con = len(cons) + len(outputs) self.assertEqual(nlp.n_primals(), n_primals) self.assertEqual(nlp.n_constraints(), n_eq_con) constraint_names = [ - "c_con", - "F_con", - "Pin_con", - "P2_con", - "egb.output_constraints[P2]", - "egb.output_constraints[Pout]", - ] + "c_con", + "F_con", + "Pin_con", + "P2_con", + "egb.output_constraints[P2]", + "egb.output_constraints[Pout]", + ] primals = inputs + outputs nlp_constraints = nlp.constraint_names() nlp_vars = nlp.primals_names() @@ -1166,8 +1178,7 @@ def test_pressure_drop_model_nlp(self): name = var.name var_idx_map[var] = nlp_vars.index(name) - incident_vars = {con.name: list(identify_variables(con.expr)) - for con in cons} + incident_vars = {con.name: list(identify_variables(con.expr)) for con in cons} incident_vars["egb.output_constraints[P2]"] = inputs + [outputs[0]] incident_vars["egb.output_constraints[Pout]"] = inputs + [outputs[1]] @@ -1185,62 +1196,5 @@ def test_pressure_drop_model_nlp(self): self.assertIn((i, j), expected_nonzeros) -class TestExceptions(unittest.TestCase): - - @unittest.skipUnless(cyipopt_available, "cyipopt is not available") - def test_solver_with_cyipopt(self): - # CyIpopt is required here just because we get a different error - # (see test below) if CyIpopt is unavailable. - m = pyo.ConcreteModel() - m.ex_block = ExternalGreyBoxBlock(concrete=True) - block = m.ex_block - - m_ex = _make_external_model() - input_vars = [m_ex.a, m_ex.b, m_ex.r, m_ex.x_out, m_ex.y_out] - external_vars = [m_ex.x, m_ex.y] - residual_cons = [m_ex.c_out_1, m_ex.c_out_2] - external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] - inner_solver = pyo.SolverFactory("ipopt") - msg = "Please set use_cyipopt to False" - with self.assertRaisesRegex(RuntimeError, msg): - ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - solver=inner_solver, - use_cyipopt=True, - ) - - def test_cyipopt_unavailable(self): - try: - # HACK: Make external_pyomo_model.py think that cyipopt is not - # available. - epm_module.cyipopt_available = False - - m = pyo.ConcreteModel() - m.ex_block = ExternalGreyBoxBlock(concrete=True) - block = m.ex_block - - m_ex = _make_external_model() - input_vars = [m_ex.a, m_ex.b, m_ex.r, m_ex.x_out, m_ex.y_out] - external_vars = [m_ex.x, m_ex.y] - residual_cons = [m_ex.c_out_1, m_ex.c_out_2] - external_cons = [m_ex.c_ex_1, m_ex.c_ex_2] - inner_solver = pyo.SolverFactory("ipopt") - msg = "Constructing an ExternalPyomoModel with CyIpopt unavailable" - with self.assertRaisesRegex(RuntimeError, msg): - ex_model = ExternalPyomoModel( - input_vars, - external_vars, - residual_cons, - external_cons, - use_cyipopt=True, - ) - finally: - # HACK: Reset the global flag - epm_module.cyipopt_available = cyipopt_available - - -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py index da6467d85bf..f808decf26c 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_external_pyomo_model.py @@ -14,7 +14,10 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) if not (numpy_available and scipy_available): @@ -23,13 +26,11 @@ from pyomo.common.dependencies.scipy import sparse as sps from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run cyipopt tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run cyipopt tests") -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available, -) +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import cyipopt_available from pyomo.contrib.pynumero.interfaces.external_pyomo_model import ( ExternalPyomoModel, get_hessian_of_constraint, @@ -37,52 +38,45 @@ from pyomo.contrib.pynumero.interfaces.pyomo_grey_box_nlp import ( PyomoNLPWithGreyBoxBlocks, ) -from pyomo.contrib.pynumero.interfaces.external_grey_box import ( - ExternalGreyBoxBlock, -) -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - CyIpoptNLP, - CyIpoptSolver, -) +from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxBlock +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptSolver +from pyomo.contrib.pynumero.interfaces.cyipopt_interface import CyIpoptNLP if not pyo.SolverFactory("ipopt").available(): - raise unittest.SkipTest( - "Need IPOPT to run ExternalPyomoModel tests" - ) + raise unittest.SkipTest("Need IPOPT to run ExternalPyomoModel tests") class SimpleModel1(object): - def make_model(self): m = pyo.ConcreteModel() m.x = pyo.Var(initialize=2.0) m.y = pyo.Var(initialize=2.0) m.residual_eqn = pyo.Constraint(expr=m.x**2 + m.y**2 == 1.0) - m.external_eqn = pyo.Constraint(expr=m.x*m.y == 0.2) + m.external_eqn = pyo.Constraint(expr=m.x * m.y == 0.2) # The "external function constraint" exposed by the ExternalPyomoModel # will look like: x**2 + 0.04/x**2 - 1 == 0 return m def evaluate_external_variables(self, x): # y(x) - return 0.2/x + return 0.2 / x def evaluate_external_jacobian(self, x): # dydx - return -0.2/(x**2) + return -0.2 / (x**2) def evaluate_external_hessian(self, x): # d2ydx2 - return 0.4/(x**3) + return 0.4 / (x**3) def evaluate_residual(self, x): - return x**2 + 0.04/x**2 - 1 + return x**2 + 0.04 / x**2 - 1 def evaluate_jacobian(self, x): - return 2*x - 0.08/x**3 + return 2 * x - 0.08 / x**3 def evaluate_hessian(self, x): - return 2 + 0.24/x**4 + return 2 + 0.24 / x**4 class SimpleModel2(object): @@ -96,26 +90,26 @@ def make_model(self): m.x = pyo.Var(initialize=2.0) m.y = pyo.Var(initialize=2.0) m.residual_eqn = pyo.Constraint(expr=m.x**2 + m.y**2 == 1.0) - m.external_eqn = pyo.Constraint(expr=(m.x**3)*(m.y**3) == 0.2) + m.external_eqn = pyo.Constraint(expr=(m.x**3) * (m.y**3) == 0.2) return m def evaluate_external_variables(self, x): - return 0.2**(1/3)/x + return 0.2 ** (1 / 3) / x def evaluate_external_jacobian(self, x): - return -(0.2**(1/3))/(x**2) + return -(0.2 ** (1 / 3)) / (x**2) def evaluate_external_hessian(self, x): - return 2*0.2**(1/3)/(x**3) + return 2 * 0.2 ** (1 / 3) / (x**3) def evaluate_residual(self, x): - return x**2 + 0.2**(2/3)/x**2 - 1 + return x**2 + 0.2 ** (2 / 3) / x**2 - 1 def evaluate_jacobian(self, x): - return 2*x - 2*0.2**(2/3)/x**3 + return 2 * x - 2 * 0.2 ** (2 / 3) / x**3 def evaluate_hessian(self, x): - return 2 + 6*0.2**(2/3)/x**4 + return 2 + 6 * 0.2 ** (2 / 3) / x**4 class SimpleModel2by2_1(object): @@ -135,17 +129,17 @@ def residual_eqn_rule(m, i): # equality hessian calculation, i.e. to have nonlinearities # in x, y, and xy. if i == 0: - return m.x[0]**2 + m.x[0]*m.y[0] + m.y[0]**2 == 1.0 + return m.x[0] ** 2 + m.x[0] * m.y[0] + m.y[0] ** 2 == 1.0 elif i == 1: - return m.x[1]**2 + m.x[1]*m.y[1] == 2.0 + return m.x[1] ** 2 + m.x[1] * m.y[1] == 2.0 m.residual_eqn = pyo.Constraint([0, 1], rule=residual_eqn_rule) def external_eqn_rule(m, i): if i == 0: - return m.y[0] + m.y[1] + m.x[0]*m.x[1] + m.x[0]**2 == 1.0 + return m.y[0] + m.y[1] + m.x[0] * m.x[1] + m.x[0] ** 2 == 1.0 elif i == 1: - return m.y[0] + 2.0*m.x[0]*m.x[1] + m.x[1]**2 == 2.0 + return m.y[0] + 2.0 * m.x[0] * m.x[1] + m.x[1] ** 2 == 2.0 m.external_eqn = pyo.Constraint([0, 1], rule=external_eqn_rule) @@ -155,34 +149,52 @@ def external_eqn_rule(m, i): # the equality constraints exposed by the ExternalPyomoModel def evaluate_residual(self, x): f0 = ( - x[0]**2 + 2*x[0] - 2*x[0]**2*x[1] - x[1]**2*x[0] + 4 - - 8*x[0]*x[1] - 4*x[1]**2 + 4*x[0]**2*x[1]**2 - + 4*x[0]*x[1]**3 + x[1]**4 - 1.0 - ) - f1 = x[1]**2 - x[1] + x[0]*x[1]**2 + x[1]**3 - x[0]**2*x[1] - 2.0 + x[0] ** 2 + + 2 * x[0] + - 2 * x[0] ** 2 * x[1] + - x[1] ** 2 * x[0] + + 4 + - 8 * x[0] * x[1] + - 4 * x[1] ** 2 + + 4 * x[0] ** 2 * x[1] ** 2 + + 4 * x[0] * x[1] ** 3 + + x[1] ** 4 + - 1.0 + ) + f1 = x[1] ** 2 - x[1] + x[0] * x[1] ** 2 + x[1] ** 3 - x[0] ** 2 * x[1] - 2.0 return (f0, f1) def evaluate_jacobian(self, x): df0dx0 = ( - 2*x[0] + 2 - 4*x[0]*x[1] - x[1]**2 - - 8*x[1] + 8*x[0]*x[1]**2 + 4*x[1]**3 - ) + 2 * x[0] + + 2 + - 4 * x[0] * x[1] + - x[1] ** 2 + - 8 * x[1] + + 8 * x[0] * x[1] ** 2 + + 4 * x[1] ** 3 + ) df0dx1 = ( - -2*x[0]**2 - 2*x[0]*x[1] - 8*x[0] - 8*x[1] - + 8*x[0]**2*x[1] + 12*x[0]*x[1]**2 + 4*x[1]**3 - ) - df1dx0 = x[1]**2 - 2*x[0]*x[1] - df1dx1 = 2*x[1] - 1+ 2*x[0]*x[1] - x[0]**2 + 3*x[1]**2 + -2 * x[0] ** 2 + - 2 * x[0] * x[1] + - 8 * x[0] + - 8 * x[1] + + 8 * x[0] ** 2 * x[1] + + 12 * x[0] * x[1] ** 2 + + 4 * x[1] ** 3 + ) + df1dx0 = x[1] ** 2 - 2 * x[0] * x[1] + df1dx1 = 2 * x[1] - 1 + 2 * x[0] * x[1] - x[0] ** 2 + 3 * x[1] ** 2 return np.array([[df0dx0, df0dx1], [df1dx0, df1dx1]]) def evaluate_hessian(self, x): - df0dx0dx0 = 2 - 4*x[1] + 8*x[1]**2 - df0dx0dx1 = -4*x[0] - 2*x[1] - 8 + 16*x[0]*x[1] + 12*x[1]**2 - df0dx1dx1 = -2*x[0] - 8 + 8*x[0]**2 + 24*x[0]*x[1] + 12*x[1]**2 + df0dx0dx0 = 2 - 4 * x[1] + 8 * x[1] ** 2 + df0dx0dx1 = -4 * x[0] - 2 * x[1] - 8 + 16 * x[0] * x[1] + 12 * x[1] ** 2 + df0dx1dx1 = -2 * x[0] - 8 + 8 * x[0] ** 2 + 24 * x[0] * x[1] + 12 * x[1] ** 2 - df1dx0dx0 = -2*x[1] - df1dx0dx1 = 2*x[1] - 2*x[0] - df1dx1dx1 = 2 + 2*x[0] + 6*x[1] + df1dx0dx0 = -2 * x[1] + df1dx0dx1 = 2 * x[1] - 2 * x[0] + df1dx1dx1 = 2 + 2 * x[0] + 6 * x[1] d2f0 = np.array([[df0dx0dx0, df0dx0dx1], [df0dx0dx1, df0dx1dx1]]) d2f1 = np.array([[df1dx0dx0, df1dx0dx1], [df1dx0dx1, df1dx1dx1]]) return [d2f0, d2f1] @@ -190,14 +202,14 @@ def evaluate_hessian(self, x): # The following three methods are evaluation and derivatives of # the external function "hidden by" the ExternalPyomoModel def evaluate_external_variables(self, x): - y0 = 2.0 - 2.0*x[0]*x[1] - x[1]**2 - y1 = 1.0 - y0 - x[0]*x[1] - x[0]**2 + y0 = 2.0 - 2.0 * x[0] * x[1] - x[1] ** 2 + y1 = 1.0 - y0 - x[0] * x[1] - x[0] ** 2 return (y0, y1) def evaluate_external_jacobian(self, x): - dy0dx0 = -2.0*x[1] - dy0dx1 = -2.0*x[0] - 2.0*x[1] - dy1dx0 = -dy0dx0 - x[1] - 2.0*x[0] + dy0dx0 = -2.0 * x[1] + dy0dx1 = -2.0 * x[0] - 2.0 * x[1] + dy1dx0 = -dy0dx0 - x[1] - 2.0 * x[0] dy1dx1 = -dy0dx1 - x[0] return np.array([[dy0dx0, dy0dx1], [dy1dx0, dy1dx1]]) @@ -227,13 +239,13 @@ def make_model(self): m.x = pyo.Var([0, 1], initialize=2.0) m.y = pyo.Var([0, 1], initialize=2.0) - m.residual_eqn = pyo.Constraint(expr= - m.x[0]**2 + m.x[1]**2 + m.y[0]**2 + m.y[1]**2 == 1.0 - ) + m.residual_eqn = pyo.Constraint( + expr=m.x[0] ** 2 + m.x[1] ** 2 + m.y[0] ** 2 + m.y[1] ** 2 == 1.0 + ) def external_eqn_rule(m, i): if i == 0: - return (m.x[0]**2) * m.y[0] * (m.x[1]**0.5) * m.y[1] - 0.1 == 0 + return (m.x[0] ** 2) * m.y[0] * (m.x[1] ** 0.5) * m.y[1] - 0.1 == 0 elif i == 1: return m.x[0] * m.y[0] * m.x[1] - 0.2 == 0 @@ -242,34 +254,28 @@ def external_eqn_rule(m, i): return m def evaluate_external_variables(self, x): - y0 = 0.2/(x[0]*x[1]) - y1 = 0.1/(x[0]**2 * y0 * x[1]**0.5) + y0 = 0.2 / (x[0] * x[1]) + y1 = 0.1 / (x[0] ** 2 * y0 * x[1] ** 0.5) return [y0, y1] def evaluate_external_jacobian(self, x): - dy0dx0 = -0.2/(x[0]**2 * x[1]) - dy0dx1 = -0.2/(x[0] * x[1]**2) - dy1dx0 = -0.5*x[1]**0.5/x[0]**2 - dy1dx1 = 0.25/(x[0] * x[1]**0.5) + dy0dx0 = -0.2 / (x[0] ** 2 * x[1]) + dy0dx1 = -0.2 / (x[0] * x[1] ** 2) + dy1dx0 = -0.5 * x[1] ** 0.5 / x[0] ** 2 + dy1dx1 = 0.25 / (x[0] * x[1] ** 0.5) return np.array([[dy0dx0, dy0dx1], [dy1dx0, dy1dx1]]) def evaluate_external_hessian(self, x): - d2y0dx0dx0 = 0.4/(x[0]**3 * x[1]) - d2y0dx0dx1 = 0.2/(x[0]**2 * x[1]**2) - d2y0dx1dx1 = 0.4/(x[0] * x[1]**3) - - d2y1dx0dx0 = x[1]**0.5/(x[0]**3) - d2y1dx0dx1 = -0.25/(x[0]**2 * x[1]**0.5) - d2y1dx1dx1 = -0.125/(x[0] * x[1]**1.5) - - d2y0dxdx = np.array([ - [d2y0dx0dx0, d2y0dx0dx1], - [d2y0dx0dx1, d2y0dx1dx1], - ]) - d2y1dxdx = np.array([ - [d2y1dx0dx0, d2y1dx0dx1], - [d2y1dx0dx1, d2y1dx1dx1], - ]) + d2y0dx0dx0 = 0.4 / (x[0] ** 3 * x[1]) + d2y0dx0dx1 = 0.2 / (x[0] ** 2 * x[1] ** 2) + d2y0dx1dx1 = 0.4 / (x[0] * x[1] ** 3) + + d2y1dx0dx0 = x[1] ** 0.5 / (x[0] ** 3) + d2y1dx0dx1 = -0.25 / (x[0] ** 2 * x[1] ** 0.5) + d2y1dx1dx1 = -0.125 / (x[0] * x[1] ** 1.5) + + d2y0dxdx = np.array([[d2y0dx0dx0, d2y0dx0dx1], [d2y0dx0dx1, d2y0dx1dx1]]) + d2y1dxdx = np.array([[d2y1dx0dx0, d2y1dx0dx1], [d2y1dx0dx1, d2y1dx1dx1]]) return [d2y0dxdx, d2y1dxdx] def calculate_external_multipliers(self, lam, x): @@ -284,8 +290,10 @@ def calculate_external_multipliers(self, lam, x): """ y = self.evaluate_external_variables(x) - lg0 = -2*y[1]*lam[0]/(x[0]**2 * x[1]**0.5 * y[0]) - lg1 = -(2*y[0]*lam[0] + x[0]**2*x[1]**0.5*y[1]*lg0)/(x[0]*x[1]) + lg0 = -2 * y[1] * lam[0] / (x[0] ** 2 * x[1] ** 0.5 * y[0]) + lg1 = -(2 * y[0] * lam[0] + x[0] ** 2 * x[1] ** 0.5 * y[1] * lg0) / ( + x[0] * x[1] + ) return [lg0, lg1] def calculate_full_space_lagrangian_hessians(self, lam, x): @@ -299,14 +307,14 @@ def calculate_full_space_lagrangian_hessians(self, lam, x): hfxy = np.array([[0, 0], [0, 0]]) hfyy = np.array([[d2fdy0dy0, 0], [0, d2fdy1dy1]]) - dg0dx0dx0 = 2*y[0]*x[1]**0.5*y[1] - dg0dx0dx1 = x[0]*y[0]*y[1]/x[1]**0.5 - dg0dx1dx1 = -1/4*x[0]**2*y[0]*y[1]/x[1]**(3/2) - dg0dx0dy0 = 2*x[0]*x[1]**0.5*y[1] - dg0dx0dy1 = 2*x[0]*y[0]*x[1]**0.5 - dg0dx1dy0 = 0.5*x[0]**2*y[1]/x[1]**0.5 - dg0dx1dy1 = 0.5*x[0]**2*y[0]/x[1]**0.5 - dg0dy0dy1 = x[0]**2*x[1]**0.5 + dg0dx0dx0 = 2 * y[0] * x[1] ** 0.5 * y[1] + dg0dx0dx1 = x[0] * y[0] * y[1] / x[1] ** 0.5 + dg0dx1dx1 = -1 / 4 * x[0] ** 2 * y[0] * y[1] / x[1] ** (3 / 2) + dg0dx0dy0 = 2 * x[0] * x[1] ** 0.5 * y[1] + dg0dx0dy1 = 2 * x[0] * y[0] * x[1] ** 0.5 + dg0dx1dy0 = 0.5 * x[0] ** 2 * y[1] / x[1] ** 0.5 + dg0dx1dy1 = 0.5 * x[0] ** 2 * y[0] / x[1] ** 0.5 + dg0dy0dy1 = x[0] ** 2 * x[1] ** 0.5 hg0xx = np.array([[dg0dx0dx0, dg0dx0dx1], [dg0dx0dx1, dg0dx1dx1]]) hg0xy = np.array([[dg0dx0dy0, dg0dx0dy1], [dg0dx1dy0, dg0dx1dy1]]) hg0yy = np.array([[0, dg0dy0dy1], [dg0dy0dy1, 0]]) @@ -318,9 +326,9 @@ def calculate_full_space_lagrangian_hessians(self, lam, x): hg1xy = np.array([[dg1dx0dy0, 0], [dg1dx1dy0, 0]]) hg1yy = np.zeros((2, 2)) - hlxx = lam[0]*hfxx + lam_g[0]*hg0xx + lam_g[1]*hg1xx - hlxy = lam[0]*hfxy + lam_g[0]*hg0xy + lam_g[1]*hg1xy - hlyy = lam[0]*hfyy + lam_g[0]*hg0yy + lam_g[1]*hg1yy + hlxx = lam[0] * hfxx + lam_g[0] * hg0xx + lam_g[1] * hg1xx + hlxy = lam[0] * hfxy + lam_g[0] * hg0xy + lam_g[1] * hg1xy + hlyy = lam[0] * hfyy + lam_g[0] * hg0yy + lam_g[1] * hg1yy return hlxx, hlxy, hlyy def calculate_reduced_lagrangian_hessian(self, lam, x): @@ -328,13 +336,13 @@ def calculate_reduced_lagrangian_hessian(self, lam, x): hlxx, hlxy, hlyy = self.calculate_full_space_lagrangian_hessians(lam, x) return ( hlxx - + (hlxy.dot(dydx)).transpose() + hlxy.dot(dydx) + + (hlxy.dot(dydx)).transpose() + + hlxy.dot(dydx) + dydx.transpose().dot(hlyy).dot(dydx) ) class TestGetHessianOfConstraint(unittest.TestCase): - def test_simple_model_1(self): model = SimpleModel1() m = model.make_model() @@ -362,70 +370,106 @@ def test_polynomial(self): x1 = 1.1 x2 = 1.2 x3 = 1.3 - m.x = pyo.Var(range(1, n_x+1), initialize={1: x1, 2: x2, 3: x3}) - m.eqn = pyo.Constraint(expr= - 5*(m.x[1]**5) + # T1 - 5*(m.x[1]**4)*(m.x[2]) + # T2 - 5*(m.x[1]**3)*(m.x[2])*(m.x[3]) + # T3 - 5*(m.x[1])*(m.x[2]**2)*(m.x[3]**2) + # T4 - 4*(m.x[1]**2)*(m.x[2])*(m.x[3]) + # T5 - 4*(m.x[2]**2)*(m.x[3]**2) + # T6 - 4*(m.x[3]**4) + # T7 - 3*(m.x[1])*(m.x[2])*(m.x[3]) + # T8 - 3*(m.x[2]**3) + # T9 - 3*(m.x[2]**2)*(m.x[3]) + # T10 - 2*(m.x[1])*(m.x[2]) + # T11 - 2*(m.x[2])*(m.x[3]) # T12 - == 0 - ) + m.x = pyo.Var(range(1, n_x + 1), initialize={1: x1, 2: x2, 3: x3}) + m.eqn = pyo.Constraint( + expr=5 * (m.x[1] ** 5) # T1 + + 5 * (m.x[1] ** 4) * (m.x[2]) # T2 + + 5 * (m.x[1] ** 3) * (m.x[2]) * (m.x[3]) # T3 + + 5 * (m.x[1]) * (m.x[2] ** 2) * (m.x[3] ** 2) # T4 + + 4 * (m.x[1] ** 2) * (m.x[2]) * (m.x[3]) # T5 + + 4 * (m.x[2] ** 2) * (m.x[3] ** 2) # T6 + + 4 * (m.x[3] ** 4) # T7 + + 3 * (m.x[1]) * (m.x[2]) * (m.x[3]) # T8 + + 3 * (m.x[2] ** 3) # T9 + + 3 * (m.x[2] ** 2) * (m.x[3]) # T10 + + 2 * (m.x[1]) * (m.x[2]) # T11 + + 2 * (m.x[2]) * (m.x[3]) # T12 + == 0 + ) rcd = [] - rcd.append((0, 0, ( - # wrt x1, x1 - 5*5*4*x1**3 + # T1 - 5*4*3*x1**2*x2 + # T2 - 5*3*2*x1*x2*x3 + # T3 - 4*2*1*x2*x3 # T5 - ))) - rcd.append((1, 1, ( - # wrt x2, x2 - 5*x1*2*x3**2 + # T4 - 4*2*x3**2 + # T6 - 3*3*2*x2 + # T9 - 3*2*x3 # T10 - ))) - rcd.append((2, 2, ( - # wrt x3, x3 - 5*x1*x2**2*2 + # T4 - 4*x2**2*2 + # T6 - 4*4*3*x3**2 # T7 - ))) - rcd.append((1, 0, ( - # wrt x2, x1 - 5*4*x1**3 + # T2 - 5*3*x1**2*x3 + # T3 - 5*2*x2*x3**2 + # T4 - 4*2*x1*x3 + # T5 - 3*x3 + # T8 - 2 # T11 - ))) - rcd.append((2, 0, ( - # wrt x3, x1 - 5*3*x1**2*x2 + # T3 - 5*x2**2*2*x3 + # T4 - 4*2*x1*x2 + # T5 - 3*x2 # T8 - ))) - rcd.append((2, 1, ( - # wrt x3, x2 - 5*x1**3 + # T3 - 5*x1*2*x2*2*x3 + # T4 - 4*x1**2 + # T5 - 4*2*x2*2*x3 + # T6 - 3*x1 + # T8 - 3*2*x2 + # T10 - 2 # T12 - ))) + rcd.append( + ( + 0, + 0, + ( + # wrt x1, x1 + 5 * 5 * 4 * x1**3 # T1 + + 5 * 4 * 3 * x1**2 * x2 # T2 + + 5 * 3 * 2 * x1 * x2 * x3 # T3 + + 4 * 2 * 1 * x2 * x3 # T5 + ), + ) + ) + rcd.append( + ( + 1, + 1, + ( + # wrt x2, x2 + 5 * x1 * 2 * x3**2 # T4 + + 4 * 2 * x3**2 # T6 + + 3 * 3 * 2 * x2 # T9 + + 3 * 2 * x3 # T10 + ), + ) + ) + rcd.append( + ( + 2, + 2, + ( + # wrt x3, x3 + 5 * x1 * x2**2 * 2 # T4 + + 4 * x2**2 * 2 # T6 + + 4 * 4 * 3 * x3**2 # T7 + ), + ) + ) + rcd.append( + ( + 1, + 0, + ( + # wrt x2, x1 + 5 * 4 * x1**3 # T2 + + 5 * 3 * x1**2 * x3 # T3 + + 5 * 2 * x2 * x3**2 # T4 + + 4 * 2 * x1 * x3 # T5 + + 3 * x3 # T8 + + 2 # T11 + ), + ) + ) + rcd.append( + ( + 2, + 0, + ( + # wrt x3, x1 + 5 * 3 * x1**2 * x2 # T3 + + 5 * x2**2 * 2 * x3 # T4 + + 4 * 2 * x1 * x2 # T5 + + 3 * x2 # T8 + ), + ) + ) + rcd.append( + ( + 2, + 1, + ( + # wrt x3, x2 + 5 * x1**3 # T3 + + 5 * x1 * 2 * x2 * 2 * x3 # T4 + + 4 * x1**2 # T5 + + 4 * 2 * x2 * 2 * x3 # T6 + + 3 * x1 # T8 + + 3 * 2 * x2 # T10 + + 2 # T12 + ), + ) + ) row = [r for r, _, _ in rcd] col = [c for _, c, _ in rcd] @@ -433,10 +477,10 @@ def test_polynomial(self): expected_hess = sps.coo_matrix((data, (row, col)), shape=(n_x, n_x)) expected_hess_array = expected_hess.toarray() expected_hess_array = ( - expected_hess_array - + np.transpose(expected_hess_array) - - np.diag(np.diagonal(expected_hess_array)) - ) + expected_hess_array + + np.transpose(expected_hess_array) + - np.diag(np.diagonal(expected_hess_array)) + ) hess = get_hessian_of_constraint(m.eqn, list(m.x.values())) hess_array = hess.toarray() np.testing.assert_allclose(expected_hess_array, hess_array, rtol=1e-8) @@ -462,9 +506,7 @@ def test_explicit_zeros(self): row = np.array([0, 1]) col = np.array([0, 1]) data = np.array([2.0, 0.0]) - expected_hess = sps.coo_matrix( - (data, (row, col)), shape=(2,2) - ) + expected_hess = sps.coo_matrix((data, (row, col)), shape=(2, 2)) hess = get_hessian_of_constraint(m.eqn, variables) np.testing.assert_allclose(hess.row, row, atol=0) np.testing.assert_allclose(hess.col, col, atol=0) @@ -472,112 +514,83 @@ def test_explicit_zeros(self): class TestExternalPyomoModel(unittest.TestCase): - def test_evaluate_SimpleModel1(self): model = SimpleModel1() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) resid = external_model.evaluate_equality_constraints() - self.assertAlmostEqual( - resid[0], - model.evaluate_residual(x[0]), - delta=1e-8, - ) + self.assertAlmostEqual(resid[0], model.evaluate_residual(x[0]), delta=1e-8) def test_jacobian_SimpleModel1(self): model = SimpleModel1() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) jac = external_model.evaluate_jacobian_equality_constraints() self.assertAlmostEqual( - jac.toarray()[0][0], - model.evaluate_jacobian(x[0]), - delta=1e-8, - ) + jac.toarray()[0][0], model.evaluate_jacobian(x[0]), delta=1e-8 + ) def test_hessian_SimpleModel1(self): model = SimpleModel1() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) hess = external_model.evaluate_hessians_of_residuals() self.assertAlmostEqual( - hess[0][0, 0], - model.evaluate_hessian(x[0]), - delta=1e-8, - ) + hess[0][0, 0], model.evaluate_hessian(x[0]), delta=1e-8 + ) def test_external_hessian_SimpleModel1(self): model = SimpleModel1() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) hess = external_model.evaluate_hessian_external_variables() expected_hess = model.evaluate_external_hessian(x[0]) - self.assertAlmostEqual( - hess[0][0,0], - expected_hess, - delta=1e-8, - ) + self.assertAlmostEqual(hess[0][0, 0], expected_hess, delta=1e-8) def test_evaluate_SimpleModel2(self): model = SimpleModel2() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) resid = external_model.evaluate_equality_constraints() - self.assertAlmostEqual( - resid[0], - model.evaluate_residual(x[0]), - delta=1e-8, - ) + self.assertAlmostEqual(resid[0], model.evaluate_residual(x[0]), delta=1e-8) def test_jacobian_SimpleModel2(self): model = SimpleModel2() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) @@ -587,69 +600,51 @@ def test_jacobian_SimpleModel2(self): # dense matrix from this operation. I am not sure if I should # cast it to a sparse matrix. For now it is dense... self.assertAlmostEqual( - jac.toarray()[0][0], - model.evaluate_jacobian(x[0]), - delta=1e-7, - ) + jac.toarray()[0][0], model.evaluate_jacobian(x[0]), delta=1e-7 + ) def test_hessian_SimpleModel2(self): model = SimpleModel2() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) hess = external_model.evaluate_hessians_of_residuals() self.assertAlmostEqual( - hess[0][0, 0], - model.evaluate_hessian(x[0]), - delta=1e-7, - ) + hess[0][0, 0], model.evaluate_hessian(x[0]), delta=1e-7 + ) def test_external_jacobian_SimpleModel2(self): model = SimpleModel2() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) jac = external_model.evaluate_jacobian_external_variables() expected_jac = model.evaluate_external_jacobian(x[0]) - self.assertAlmostEqual( - jac[0,0], - expected_jac, - delta=1e-8, - ) + self.assertAlmostEqual(jac[0, 0], expected_jac, delta=1e-8) def test_external_hessian_SimpleModel2(self): model = SimpleModel2() m = model.make_model() - x_init_list = [ - [-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5] - ] + x_init_list = [[-5.0], [-4.0], [-3.0], [-1.5], [0.5], [1.0], [2.0], [3.5]] external_model = ExternalPyomoModel( - [m.x], [m.y], [m.residual_eqn], [m.external_eqn], - ) + [m.x], [m.y], [m.residual_eqn], [m.external_eqn] + ) for x in x_init_list: external_model.set_input_values(x) hess = external_model.evaluate_hessian_external_variables() expected_hess = model.evaluate_external_hessian(x[0]) - self.assertAlmostEqual( - hess[0][0,0], - expected_hess, - delta=1e-7, - ) + self.assertAlmostEqual(hess[0][0, 0], expected_hess, delta=1e-7) def test_external_jacobian_Model2by2(self): model = Model2by2() @@ -662,11 +657,11 @@ def test_external_jacobian_Model2by2(self): x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) @@ -685,18 +680,17 @@ def test_external_hessian_Model2by2(self): x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) hess = external_model.evaluate_hessian_external_variables() expected_hess = model.evaluate_external_hessian(x) for matrix1, matrix2 in zip(hess, expected_hess): - matrix2 = np.matrix(matrix2) np.testing.assert_allclose(matrix1, matrix2, rtol=1e-8) def test_external_jacobian_SimpleModel2x2_1(self): @@ -710,11 +704,11 @@ def test_external_jacobian_SimpleModel2x2_1(self): x1_init_list = [-4.5, -2.3, 0.0, 1.0, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) @@ -733,11 +727,11 @@ def test_external_hessian_SimpleModel2x2_1(self): x1_init_list = [-4.5, -2.3, 0.0, 1.0, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) @@ -757,11 +751,11 @@ def test_evaluate_SimpleModel2x2_1(self): x1_init_list = [-4.5, -2.3, 0.0, 1.0, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) @@ -780,17 +774,19 @@ def test_jacobian_SimpleModel2x2_1(self): x1_init_list = [-4.5, -2.3, 0.0, 1.0, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) jac = external_model.evaluate_jacobian_equality_constraints() expected_jac = model.evaluate_jacobian(x) - np.testing.assert_allclose(jac.toarray(), expected_jac, rtol=1e-8) + np.testing.assert_allclose( + jac.toarray(), expected_jac, rtol=1e-8, atol=1e-8 + ) def test_hessian_SimpleModel2x2_1(self): model = SimpleModel2by2_1() @@ -803,11 +799,11 @@ def test_hessian_SimpleModel2x2_1(self): x1_init_list = [-4.5, -2.3, 0.0, 1.0, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) @@ -826,11 +822,11 @@ def test_evaluate_hessian_lagrangian_SimpleModel2x2_1(self): x1_init_list = [-4.5, -2.3, 0.0, 1.0, 4.1] x_init_list = list(itertools.product(x0_init_list, x1_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x in x_init_list: external_model.set_input_values(x) @@ -861,15 +857,13 @@ def test_external_multipliers_from_residual_multipliers(self): x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] - init_list = list( - itertools.product(x0_init_list, x1_init_list, lam_init_list) - ) + init_list = list(itertools.product(x0_init_list, x1_init_list, lam_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x0, x1, lam in init_list: x = [x0, x1] @@ -889,15 +883,13 @@ def test_full_space_lagrangian_hessians(self): x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] - init_list = list( - itertools.product(x0_init_list, x1_init_list, lam_init_list) - ) + init_list = list(itertools.product(x0_init_list, x1_init_list, lam_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x0, x1, lam in init_list: x = [x0, x1] @@ -908,10 +900,12 @@ def test_full_space_lagrangian_hessians(self): # (This is wrong in the sense that the residual and external # multipliers won't necessarily correspond). external_model.set_external_constraint_multipliers(lam) - hlxx, hlxy, hlyy = \ - external_model.get_full_space_lagrangian_hessians() - pred_hlxx, pred_hlxy, pred_hlyy = \ - model.calculate_full_space_lagrangian_hessians(lam, x) + hlxx, hlxy, hlyy = external_model.get_full_space_lagrangian_hessians() + ( + pred_hlxx, + pred_hlxy, + pred_hlyy, + ) = model.calculate_full_space_lagrangian_hessians(lam, x) # TODO: Is comparing the array representation sufficient here? # Should I make sure I get the sparse representation I expect? @@ -929,15 +923,13 @@ def test_reduced_hessian_lagrangian(self): x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] - init_list = list( - itertools.product(x0_init_list, x1_init_list, lam_init_list) - ) + init_list = list(itertools.product(x0_init_list, x1_init_list, lam_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x0, x1, lam in init_list: x = [x0, x1] @@ -945,21 +937,16 @@ def test_reduced_hessian_lagrangian(self): external_model.set_input_values(x) # Same comment as previous test regarding calculation order external_model.set_external_constraint_multipliers(lam) - hlxx, hlxy, hlyy = \ - external_model.get_full_space_lagrangian_hessians() - hess = external_model.calculate_reduced_hessian_lagrangian( - hlxx, hlxy, hlyy - ) + hlxx, hlxy, hlyy = external_model.get_full_space_lagrangian_hessians() + hess = external_model.calculate_reduced_hessian_lagrangian(hlxx, hlxy, hlyy) pred_hess = model.calculate_reduced_lagrangian_hessian(lam, x) # This test asserts that we are doing the block reduction properly. np.testing.assert_allclose(np.array(hess), pred_hess, rtol=1e-8) from_individual = external_model.evaluate_hessians_of_residuals() - hl_from_individual = sum(l*h for l, h in zip(lam, from_individual)) + hl_from_individual = sum(l * h for l, h in zip(lam, from_individual)) # This test asserts that the block reduction is correct. - np.testing.assert_allclose( - np.array(hess), hl_from_individual, rtol=1e-8 - ) + np.testing.assert_allclose(np.array(hess), hl_from_individual, rtol=1e-8) def test_evaluate_hessian_equality_constraints(self): model = Model2by2() @@ -971,15 +958,13 @@ def test_evaluate_hessian_equality_constraints(self): x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] - init_list = list( - itertools.product(x0_init_list, x1_init_list, lam_init_list) - ) + init_list = list(itertools.product(x0_init_list, x1_init_list, lam_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x0, x1, lam in init_list: x = [x0, x1] @@ -989,12 +974,10 @@ def test_evaluate_hessian_equality_constraints(self): hess = external_model.evaluate_hessian_equality_constraints() pred_hess = model.calculate_reduced_lagrangian_hessian(lam, x) # This test asserts that we are doing the block reduction properly. - np.testing.assert_allclose( - hess.toarray(), np.tril(pred_hess), rtol=1e-8 - ) + np.testing.assert_allclose(hess.toarray(), np.tril(pred_hess), rtol=1e-8) from_individual = external_model.evaluate_hessians_of_residuals() - hl_from_individual = sum(l*h for l, h in zip(lam, from_individual)) + hl_from_individual = sum(l * h for l, h in zip(lam, from_individual)) # This test asserts that the block reduction is correct. np.testing.assert_allclose( hess.toarray(), np.tril(hl_from_individual), rtol=1e-8 @@ -1010,15 +993,13 @@ def test_evaluate_hessian_equality_constraints_order(self): x0_init_list = [-5.0, -3.0, 0.5, 1.0, 2.5] x1_init_list = [0.5, 1.0, 1.5, 2.5, 4.1] lam_init_list = [-2.5, -0.5, 0.0, 1.0, 2.0] - init_list = list( - itertools.product(x0_init_list, x1_init_list, lam_init_list) - ) + init_list = list(itertools.product(x0_init_list, x1_init_list, lam_init_list)) external_model = ExternalPyomoModel( - list(m.x.values()), - list(m.y.values()), - list(m.residual_eqn.values()), - list(m.external_eqn.values()), - ) + list(m.x.values()), + list(m.y.values()), + list(m.residual_eqn.values()), + list(m.external_eqn.values()), + ) for x0, x1, lam in init_list: x = [x0, x1] @@ -1032,12 +1013,10 @@ def test_evaluate_hessian_equality_constraints_order(self): hess = external_model.evaluate_hessian_equality_constraints() pred_hess = model.calculate_reduced_lagrangian_hessian(lam, x) # This test asserts that we are doing the block reduction properly. - np.testing.assert_allclose( - hess.toarray(), np.tril(pred_hess), rtol=1e-8 - ) + np.testing.assert_allclose(hess.toarray(), np.tril(pred_hess), rtol=1e-8) from_individual = external_model.evaluate_hessians_of_residuals() - hl_from_individual = sum(l*h for l, h in zip(lam, from_individual)) + hl_from_individual = sum(l * h for l, h in zip(lam, from_individual)) # This test asserts that the block reduction is correct. np.testing.assert_allclose( hess.toarray(), np.tril(hl_from_individual), rtol=1e-8 @@ -1045,15 +1024,14 @@ def test_evaluate_hessian_equality_constraints_order(self): class TestScaling(unittest.TestCase): - def con_3_body(self, x, y, u, v): - return 1e5*x**2 + 1e4*y**2 + 1e1*u**2 + 1e0*v**2 + return 1e5 * x**2 + 1e4 * y**2 + 1e1 * u**2 + 1e0 * v**2 def con_3_rhs(self): return 2.0e4 def con_4_body(self, x, y, u, v): - return 1e-2*x + 1e-3*y + 1e-4*u + 1e-4*v + return 1e-2 * x + 1e-3 * y + 1e-4 * u + 1e-4 * v def con_4_rhs(self): return 3.0e-4 @@ -1064,12 +1042,8 @@ def make_model(self): m.y = pyo.Var(initialize=1.0) m.u = pyo.Var(initialize=1.0) m.v = pyo.Var(initialize=1.0) - m.con_1 = pyo.Constraint( - expr=m.x * m.y == m.u - ) - m.con_2 = pyo.Constraint( - expr=m.x**2 * m.y**3 == m.v - ) + m.con_1 = pyo.Constraint(expr=m.x * m.y == m.u) + m.con_2 = pyo.Constraint(expr=m.x**2 * m.y**3 == m.v) m.con_3 = pyo.Constraint( expr=self.con_3_body(m.x, m.y, m.u, m.v) == self.con_3_rhs() ) @@ -1083,15 +1057,9 @@ def make_model(self): epm_model.u = pyo.Reference(m.u) epm_model.v = pyo.Reference(m.v) epm_model.epm = ExternalPyomoModel( - [m.u, m.v], - [m.x, m.y], - [m.con_3, m.con_4], - [m.con_1, m.con_2], - use_cyipopt=False, - ) - epm_model.obj = pyo.Objective( - expr=m.x**2 + m.y**2 + m.u**2 + m.v**2 + [m.u, m.v], [m.x, m.y], [m.con_3, m.con_4], [m.con_1, m.con_2] ) + epm_model.obj = pyo.Objective(expr=m.x**2 + m.y**2 + m.u**2 + m.v**2) epm_model.egb = ExternalGreyBoxBlock() epm_model.egb.set_external_model(epm_model.epm, inputs=[m.u, m.v]) return epm_model @@ -1111,6 +1079,7 @@ def test_pyomo_nlp(self): nlp_sf = nlp.get_constraints_scaling() np.testing.assert_array_equal(scaling_factors, nlp_sf) + @unittest.skipUnless(cyipopt_available, "cyipopt is not available") def test_cyipopt_nlp(self): m = self.make_model() scaling_factors = [1e-4, 1e4] @@ -1132,30 +1101,30 @@ def test_cyipopt_callback(self): scaling_factors = [1e-4, 1e4] m.epm.set_equality_constraint_scaling_factors(scaling_factors) nlp = PyomoNLPWithGreyBoxBlocks(m) - + def callback( - local_nlp, - alg_mod, - iter_count, - obj_value, - inf_pr, - inf_du, - mu, - d_norm, - regularization_size, - alpha_du, - alpha_pr, - ls_trials, - ): + local_nlp, + alg_mod, + iter_count, + obj_value, + inf_pr, + inf_du, + mu, + d_norm, + regularization_size, + alpha_du, + alpha_pr, + ls_trials, + ): primals = tuple(local_nlp.get_primals()) # I happen to know the order of the primals here u, v, x, y = primals # Calculate the scaled residuals I expect - con_3_resid = scaling_factors[0]*abs( + con_3_resid = scaling_factors[0] * abs( self.con_3_body(x, y, u, v) - self.con_3_rhs() ) - con_4_resid = scaling_factors[1]*abs( + con_4_resid = scaling_factors[1] * abs( self.con_4_body(x, y, u, v) - self.con_4_rhs() ) pred_inf_pr = max(con_3_resid, con_4_resid) @@ -1163,17 +1132,10 @@ def callback( # Make sure Ipopt is using the scaled constraints internally self.assertAlmostEqual(inf_pr, pred_inf_pr) - cyipopt_nlp = CyIpoptNLP( - nlp, - intermediate_callback=callback, - ) + cyipopt_nlp = CyIpoptNLP(nlp, intermediate_callback=callback) x0 = nlp.get_primals() cyipopt = CyIpoptSolver( - cyipopt_nlp, - options={ - "max_iter": 0, - "nlp_scaling_method": "user-scaling", - }, + cyipopt_nlp, options={"max_iter": 0, "nlp_scaling_method": "user-scaling"} ) cyipopt.solve(x0=x0) diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py index fe4320172af..38d44473a67 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py @@ -13,72 +13,83 @@ import os from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy_available + numpy as np, + numpy_available, + scipy_available, ) + if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface +from pyomo.contrib.pynumero.exceptions import PyNumeroEvaluationError + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run NLP tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run NLP tests") import pyomo.environ as pyo from pyomo.contrib.pynumero.interfaces.ampl_nlp import AslNLP, AmplNLP from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP import tempfile -from pyomo.contrib.pynumero.interfaces.utils import build_bounds_mask, build_compression_matrix, \ - build_compression_mask_for_finite_values, full_to_compressed, compressed_to_full +from pyomo.contrib.pynumero.interfaces.utils import ( + build_bounds_mask, + build_compression_matrix, + build_compression_mask_for_finite_values, + full_to_compressed, + compressed_to_full, +) def create_pyomo_model1(): m = pyo.ConcreteModel() m.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT) - m.S = pyo.Set(initialize=[i+1 for i in range(9)]) + m.S = pyo.Set(initialize=[i + 1 for i in range(9)]) xb = dict() - xb[1] = (-1,1) - xb[2] = (2,2) - xb[3] = (-3,np.inf) + xb[1] = (-1, 1) + xb[2] = (2, 2) + xb[3] = (-3, np.inf) xb[4] = (-np.inf, np.inf) - xb[5] = (-5,5) - xb[6] = (-np.inf,6) - xb[7] = (-7,np.inf) - xb[8] = (-np.inf,np.inf) - xb[9] = (-9,9) - m.x = pyo.Var(m.S, initialize=1.0, bounds=lambda m,i: xb[i]) + xb[5] = (-5, 5) + xb[6] = (-np.inf, 6) + xb[7] = (-7, np.inf) + xb[8] = (-np.inf, np.inf) + xb[9] = (-9, 9) + m.x = pyo.Var(m.S, initialize=1.0, bounds=lambda m, i: xb[i]) cb = dict() - cb[1] = (-1,1) - cb[2] = (2,2) - cb[3] = (-3,np.inf) + cb[1] = (-1, 1) + cb[2] = (2, 2) + cb[3] = (-3, np.inf) cb[4] = (-np.inf, 4) - cb[5] = (-5,5) - cb[6] = (-6,-6) - cb[7] = (-7,np.inf) - cb[8] = (-np.inf,8) - cb[9] = (-9,9) - - def c_rule(m,i): - return (cb[i][0], sum(i*j*m.x[j] for j in m.S), cb[i][1]) + cb[5] = (-5, 5) + cb[6] = (-6, -6) + cb[7] = (-7, np.inf) + cb[8] = (-np.inf, 8) + cb[9] = (-9, 9) + + def c_rule(m, i): + return (cb[i][0], sum(i * j * m.x[j] for j in m.S), cb[i][1]) + m.c = pyo.Constraint(m.S, rule=c_rule) for i in m.S: m.dual.set_value(m.c[i], i) - m.obj = pyo.Objective(expr=sum(i*j*m.x[i]*m.x[j] for i in m.S for j in m.S)) + m.obj = pyo.Objective(expr=sum(i * j * m.x[i] * m.x[j] for i in m.S for j in m.S)) # add scaling parameters for testing m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) m.scaling_factor[m.obj] = 5 for i in m.S: - m.scaling_factor[m.x[i]] = 2*float(i) + m.scaling_factor[m.x[i]] = 2 * float(i) for i in m.S: - m.scaling_factor[m.c[i]] = 3*float(i) + m.scaling_factor[m.c[i]] = 3 * float(i) return m + def create_pyomo_model2(): m = pyo.ConcreteModel() m.x = pyo.Var([1, 2, 3], domain=pyo.Reals) @@ -92,30 +103,41 @@ def create_pyomo_model2(): m.x[2].setlb(0.0) m.x[3].setlb(0.0) m.x[2].setub(100.0) - m.obj = pyo.Objective(expr=m.x[2]**2) + m.obj = pyo.Objective(expr=m.x[2] ** 2) return m + def execute_extended_nlp_interface(self, anlp): - self.assertEqual(anlp.n_primals(),9) + self.assertEqual(anlp.n_primals(), 9) self.assertEqual(anlp.n_constraints(), 9) - self.assertEqual(anlp.n_eq_constraints(),2) - self.assertEqual(anlp.n_ineq_constraints(),7) - self.assertEqual(anlp.nnz_jacobian(), 9*9) - self.assertEqual(anlp.nnz_jacobian_eq(), 2*9) - self.assertEqual(anlp.nnz_jacobian_ineq(), 7*9) - self.assertEqual(anlp.nnz_hessian_lag(), 9*9) - - expected_primals_lb = np.asarray([-1, 2, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64) - expected_primals_ub = np.asarray([1, 2, np.inf, np.inf, 5, 6, np.inf, np.inf, 9], dtype=np.float64) + self.assertEqual(anlp.n_eq_constraints(), 2) + self.assertEqual(anlp.n_ineq_constraints(), 7) + self.assertEqual(anlp.nnz_jacobian(), 9 * 9) + self.assertEqual(anlp.nnz_jacobian_eq(), 2 * 9) + self.assertEqual(anlp.nnz_jacobian_ineq(), 7 * 9) + self.assertEqual(anlp.nnz_hessian_lag(), 9 * 9) + + expected_primals_lb = np.asarray( + [-1, 2, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64 + ) + expected_primals_ub = np.asarray( + [1, 2, np.inf, np.inf, 5, 6, np.inf, np.inf, 9], dtype=np.float64 + ) self.assertTrue(np.array_equal(expected_primals_lb, anlp.primals_lb())) self.assertTrue(np.array_equal(expected_primals_ub, anlp.primals_ub())) - expected_constraints_lb = np.asarray([-1, 0, -3, -np.inf, -5, 0, -7, -np.inf, -9], dtype=np.float64) - expected_constraints_ub = np.asarray([1, 0, np.inf, 4, 5, 0, np.inf, 8, 9], dtype=np.float64) + expected_constraints_lb = np.asarray( + [-1, 0, -3, -np.inf, -5, 0, -7, -np.inf, -9], dtype=np.float64 + ) + expected_constraints_ub = np.asarray( + [1, 0, np.inf, 4, 5, 0, np.inf, 8, 9], dtype=np.float64 + ) self.assertTrue(np.array_equal(expected_constraints_lb, anlp.constraints_lb())) self.assertTrue(np.array_equal(expected_constraints_ub, anlp.constraints_ub())) - expected_ineq_lb = np.asarray([-1, -3, -np.inf, -5, -7, -np.inf, -9], dtype=np.float64) + expected_ineq_lb = np.asarray( + [-1, -3, -np.inf, -5, -7, -np.inf, -9], dtype=np.float64 + ) expected_ineq_ub = np.asarray([1, np.inf, 4, 5, np.inf, 8, 9], dtype=np.float64) self.assertTrue(np.array_equal(expected_ineq_lb, anlp.ineq_lb())) self.assertTrue(np.array_equal(expected_ineq_ub, anlp.ineq_ub())) @@ -144,7 +166,7 @@ def execute_extended_nlp_interface(self, anlp): t = anlp.create_new_vector('duals_ineq') self.assertTrue(t.size == 7) - expected_primals = [i+1 for i in range(9)] + expected_primals = [i + 1 for i in range(9)] new_primals = np.asarray(expected_primals, dtype=np.float64) expected_primals = np.asarray(expected_primals, dtype=np.float64) anlp.set_primals(new_primals) @@ -153,7 +175,7 @@ def execute_extended_nlp_interface(self, anlp): self.assertTrue(np.array_equal(expected_primals, anlp._primals)) anlp.set_primals(np.ones(9)) - expected_duals = [i+1 for i in range(9)] + expected_duals = [i + 1 for i in range(9)] new_duals = np.asarray(expected_duals, dtype=np.float64) expected_duals = np.asarray(expected_duals, dtype=np.float64) anlp.set_duals(new_duals) @@ -166,7 +188,7 @@ def execute_extended_nlp_interface(self, anlp): self.assertTrue(np.array_equal(expected_duals_ineq, anlp._duals_ineq)) anlp.set_duals(np.ones(9)) - expected_duals_eq = [i+1 for i in range(2)] + expected_duals_eq = [i + 1 for i in range(2)] new_duals_eq = np.asarray(expected_duals_eq, dtype=np.float64) anlp.set_duals_eq(new_duals_eq) ret = anlp.get_duals_eq() @@ -180,7 +202,7 @@ def execute_extended_nlp_interface(self, anlp): expected_duals = np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64) self.assertTrue(np.array_equal(expected_duals, anlp._duals_full)) - expected_duals_ineq = [i+1 for i in range(7)] + expected_duals_ineq = [i + 1 for i in range(7)] new_duals_ineq = np.asarray(expected_duals_ineq, dtype=np.float64) anlp.set_duals_ineq(new_duals_ineq) ret = anlp.get_duals_ineq() @@ -195,16 +217,21 @@ def execute_extended_nlp_interface(self, anlp): self.assertTrue(np.array_equal(expected_duals, anlp._duals_full)) # objective function - expected_objective = sum((i+1)*(j+1) for i in range(9) for j in range(9)) + expected_objective = sum((i + 1) * (j + 1) for i in range(9) for j in range(9)) self.assertEqual(expected_objective, anlp.evaluate_objective()) # change the value of the primals - anlp.set_primals(2.0*np.ones(9)) - expected_objective = sum(2.0**2*(i+1)*(j+1) for i in range(9) for j in range(9)) + anlp.set_primals(2.0 * np.ones(9)) + expected_objective = sum( + 2.0**2 * (i + 1) * (j + 1) for i in range(9) for j in range(9) + ) self.assertEqual(expected_objective, anlp.evaluate_objective()) anlp.set_primals(np.ones(9)) # gradient of the objective - expected_gradient = np.asarray([2*sum((i+1)*(j+1) for j in range(9)) for i in range(9)], dtype=np.float64) + expected_gradient = np.asarray( + [2 * sum((i + 1) * (j + 1) for j in range(9)) for i in range(9)], + dtype=np.float64, + ) grad_obj = anlp.evaluate_grad_objective() self.assertTrue(np.array_equal(expected_gradient, grad_obj)) # test inplace @@ -213,8 +240,11 @@ def execute_extended_nlp_interface(self, anlp): self.assertTrue(ret is grad_obj) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) # change the value of the primals - anlp.set_primals(2.0*np.ones(9)) - expected_gradient = np.asarray([2*2*sum((i+1)*(j+1) for j in range(9)) for i in range(9)], dtype=np.float64) + anlp.set_primals(2.0 * np.ones(9)) + expected_gradient = np.asarray( + [2 * 2 * sum((i + 1) * (j + 1) for j in range(9)) for i in range(9)], + dtype=np.float64, + ) grad_obj = np.ones(9) anlp.evaluate_grad_objective(out=grad_obj) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) @@ -222,7 +252,9 @@ def execute_extended_nlp_interface(self, anlp): # full constraints con = anlp.evaluate_constraints() - expected_con = np.asarray([45, 88, 3*45, 4*45, 5*45, 276, 7*45, 8*45, 9*45], dtype=np.float64) + expected_con = np.asarray( + [45, 88, 3 * 45, 4 * 45, 5 * 45, 276, 7 * 45, 8 * 45, 9 * 45], dtype=np.float64 + ) self.assertTrue(np.array_equal(expected_con, con)) # test inplace con = np.zeros(9) @@ -230,10 +262,23 @@ def execute_extended_nlp_interface(self, anlp): self.assertTrue(ret is con) self.assertTrue(np.array_equal(expected_con, con)) # change the value of the primals - anlp.set_primals(2.0*np.ones(9)) + anlp.set_primals(2.0 * np.ones(9)) con = np.zeros(9) anlp.evaluate_constraints(out=con) - expected_con = np.asarray([2*45, 2*(88+2)-2, 2*3*45, 2*4*45, 2*5*45, 2*(276-6)+6, 2*7*45, 2*8*45, 2*9*45], dtype=np.float64) + expected_con = np.asarray( + [ + 2 * 45, + 2 * (88 + 2) - 2, + 2 * 3 * 45, + 2 * 4 * 45, + 2 * 5 * 45, + 2 * (276 - 6) + 6, + 2 * 7 * 45, + 2 * 8 * 45, + 2 * 9 * 45, + ], + dtype=np.float64, + ) self.assertTrue(np.array_equal(expected_con, con)) anlp.set_primals(np.ones(9)) @@ -247,16 +292,20 @@ def execute_extended_nlp_interface(self, anlp): self.assertTrue(ret is con_eq) self.assertTrue(np.array_equal(expected_con_eq, con_eq)) # change the value of the primals - anlp.set_primals(2.0*np.ones(9)) + anlp.set_primals(2.0 * np.ones(9)) con_eq = np.zeros(2) anlp.evaluate_eq_constraints(out=con_eq) - expected_con_eq = np.asarray([2*(88+2)-2, 2*(276-6)+6], dtype=np.float64) + expected_con_eq = np.asarray( + [2 * (88 + 2) - 2, 2 * (276 - 6) + 6], dtype=np.float64 + ) self.assertTrue(np.array_equal(expected_con_eq, con_eq)) anlp.set_primals(np.ones(9)) # inequality constraints con_ineq = anlp.evaluate_ineq_constraints() - expected_con_ineq = np.asarray([45, 3*45, 4*45, 5*45, 7*45, 8*45, 9*45], dtype=np.float64) + expected_con_ineq = np.asarray( + [45, 3 * 45, 4 * 45, 5 * 45, 7 * 45, 8 * 45, 9 * 45], dtype=np.float64 + ) self.assertTrue(np.array_equal(expected_con_ineq, con_ineq)) # test inplace con_ineq = np.zeros(7) @@ -264,28 +313,28 @@ def execute_extended_nlp_interface(self, anlp): self.assertTrue(ret is con_ineq) self.assertTrue(np.array_equal(expected_con_ineq, con_ineq)) # change the value of the primals - anlp.set_primals(2.0*np.ones(9)) + anlp.set_primals(2.0 * np.ones(9)) con_ineq = np.zeros(7) anlp.evaluate_ineq_constraints(out=con_ineq) - expected_con_ineq = 2.0*expected_con_ineq + expected_con_ineq = 2.0 * expected_con_ineq self.assertTrue(np.array_equal(expected_con_ineq, con_ineq)) anlp.set_primals(np.ones(9)) # jacobian of all constraints jac = anlp.evaluate_jacobian() dense_jac = jac.todense() - expected_jac = [ [(i)*(j) for j in range(1,10)] for i in range(1,10) ] + expected_jac = [[(i) * (j) for j in range(1, 10)] for i in range(1, 10)] expected_jac = np.asarray(expected_jac, dtype=np.float64) self.assertTrue(np.array_equal(dense_jac, expected_jac)) # test inplace - jac.data = 0*jac.data + jac.data = 0 * jac.data ret = anlp.evaluate_jacobian(out=jac) self.assertTrue(ret is jac) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) # change the value of the primals # ToDo: not a great test since this problem is linear - anlp.set_primals(2.0*np.ones(9)) + anlp.set_primals(2.0 * np.ones(9)) anlp.evaluate_jacobian(out=jac) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) @@ -293,18 +342,20 @@ def execute_extended_nlp_interface(self, anlp): # jacobian of equality constraints jac_eq = anlp.evaluate_jacobian_eq() dense_jac_eq = jac_eq.todense() - expected_jac_eq = np.asarray([[2, 4, 6, 8, 10, 12, 14, 16, 18], - [6, 12, 18, 24, 30, 36, 42, 48, 54]], dtype=np.float64) + expected_jac_eq = np.asarray( + [[2, 4, 6, 8, 10, 12, 14, 16, 18], [6, 12, 18, 24, 30, 36, 42, 48, 54]], + dtype=np.float64, + ) self.assertTrue(np.array_equal(dense_jac_eq, expected_jac_eq)) # test inplace - jac_eq.data = 0*jac_eq.data + jac_eq.data = 0 * jac_eq.data ret = anlp.evaluate_jacobian_eq(out=jac_eq) self.assertTrue(ret is jac_eq) dense_jac_eq = jac_eq.todense() self.assertTrue(np.array_equal(dense_jac_eq, expected_jac_eq)) # change the value of the primals # ToDo: not a great test since this problem is linear - anlp.set_primals(2.0*np.ones(9)) + anlp.set_primals(2.0 * np.ones(9)) anlp.evaluate_jacobian_eq(out=jac_eq) dense_jac_eq = jac_eq.todense() self.assertTrue(np.array_equal(dense_jac_eq, expected_jac_eq)) @@ -312,18 +363,20 @@ def execute_extended_nlp_interface(self, anlp): # jacobian of inequality constraints jac_ineq = anlp.evaluate_jacobian_ineq() dense_jac_ineq = jac_ineq.todense() - expected_jac_ineq = [ [(i)*(j) for j in range(1,10)] for i in [1, 3, 4, 5, 7, 8, 9] ] + expected_jac_ineq = [ + [(i) * (j) for j in range(1, 10)] for i in [1, 3, 4, 5, 7, 8, 9] + ] expected_jac_ineq = np.asarray(expected_jac_ineq, dtype=np.float64) self.assertTrue(np.array_equal(dense_jac_ineq, expected_jac_ineq)) # test inplace - jac_ineq.data = 0*jac_ineq.data + jac_ineq.data = 0 * jac_ineq.data ret = anlp.evaluate_jacobian_ineq(out=jac_ineq) self.assertTrue(ret is jac_ineq) dense_jac_ineq = jac_ineq.todense() self.assertTrue(np.array_equal(dense_jac_ineq, expected_jac_ineq)) # change the value of the primals # ToDo: not a great test since this problem is linear - anlp.set_primals(2.0*np.ones(9)) + anlp.set_primals(2.0 * np.ones(9)) anlp.evaluate_jacobian_ineq(out=jac_ineq) dense_jac_ineq = jac_ineq.todense() self.assertTrue(np.array_equal(dense_jac_ineq, expected_jac_ineq)) @@ -331,7 +384,7 @@ def execute_extended_nlp_interface(self, anlp): # hessian hess = anlp.evaluate_hessian_lag() dense_hess = hess.todense() - expected_hess = [ [2.0*i*j for j in range(1, 10)] for i in range(1,10) ] + expected_hess = [[2.0 * i * j for j in range(1, 10)] for i in range(1, 10)] expected_hess = np.asarray(expected_hess, dtype=np.float64) self.assertTrue(np.array_equal(dense_hess, expected_hess)) # test inplace @@ -341,7 +394,7 @@ def execute_extended_nlp_interface(self, anlp): dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess)) # change the value of the primals - anlp.set_primals(2.0*np.ones(9)) + anlp.set_primals(2.0 * np.ones(9)) anlp.evaluate_hessian_lag(out=hess) dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess)) @@ -349,7 +402,7 @@ def execute_extended_nlp_interface(self, anlp): anlp.set_obj_factor(2.0) hess = anlp.evaluate_hessian_lag() dense_hess = hess.todense() - expected_hess = [ [4.0*i*j for j in range(1, 10)] for i in range(1,10) ] + expected_hess = [[4.0 * i * j for j in range(1, 10)] for i in range(1, 10)] expected_hess = np.asarray(expected_hess, dtype=np.float64) self.assertTrue(np.array_equal(dense_hess, expected_hess)) @@ -360,7 +413,7 @@ def setUpClass(cls): cls.pm = create_pyomo_model1() temporary_dir = tempfile.mkdtemp() cls.filename = os.path.join(temporary_dir, "Pyomo_TestAslNLP") - cls.pm.write(cls.filename+'.nl', io_options={"symbolic_solver_labels": True}) + cls.pm.write(cls.filename + '.nl', io_options={"symbolic_solver_labels": True}) @classmethod def tearDownClass(cls): @@ -376,7 +429,8 @@ def test_nlp_interface(self): self.assertIsNone(anlp.get_constraints_scaling()) self.assertIsNone(anlp.get_eq_constraints_scaling()) self.assertIsNone(anlp.get_ineq_constraints_scaling()) - + + class TestAmplNLP(unittest.TestCase): @classmethod def setUpClass(cls): @@ -384,10 +438,12 @@ def setUpClass(cls): cls.pm2 = create_pyomo_model2() temporary_dir = tempfile.mkdtemp() cls.filename = os.path.join(temporary_dir, "Pyomo_TestAmplNLP") - cls.pm2.write(cls.filename+'.nl', io_options={"symbolic_solver_labels": True}) - cls.nlp = AmplNLP(cls.filename+'.nl', - row_filename=cls.filename+'.row', - col_filename=cls.filename+'.col') + cls.pm2.write(cls.filename + '.nl', io_options={"symbolic_solver_labels": True}) + cls.nlp = AmplNLP( + cls.filename + '.nl', + row_filename=cls.filename + '.row', + col_filename=cls.filename + '.col', + ) @classmethod def tearDownClass(cls): @@ -398,28 +454,30 @@ def test_names(self): # Note: order may not be the same as "expected" expected_variable_names = ['x[1]', 'x[2]', 'x[3]'] variable_names = self.nlp.variable_names() - self.assertEqual(len(expected_variable_names),len(variable_names)) + self.assertEqual(len(expected_variable_names), len(variable_names)) for i in range(len(expected_variable_names)): self.assertTrue(expected_variable_names[i] in variable_names) # Note: order may not be the same as "expected" expected_constraint_names = ['e1', 'e2', 'i1', 'i2', 'i3'] constraint_names = self.nlp.constraint_names() - self.assertEqual(len(expected_constraint_names),len(constraint_names)) + self.assertEqual(len(expected_constraint_names), len(constraint_names)) for i in range(len(expected_constraint_names)): self.assertTrue(expected_constraint_names[i] in constraint_names) # Note: order may not be the same as "expected" expected_eq_constraint_names = ['e1', 'e2'] eq_constraint_names = self.nlp.eq_constraint_names() - self.assertEqual(len(expected_eq_constraint_names),len(eq_constraint_names)) + self.assertEqual(len(expected_eq_constraint_names), len(eq_constraint_names)) for i in range(len(expected_eq_constraint_names)): self.assertTrue(expected_eq_constraint_names[i] in eq_constraint_names) # Note: order may not be the same as "expected" expected_ineq_constraint_names = ['i1', 'i2', 'i3'] ineq_constraint_names = self.nlp.ineq_constraint_names() - self.assertEqual(len(expected_ineq_constraint_names),len(ineq_constraint_names)) + self.assertEqual( + len(expected_ineq_constraint_names), len(ineq_constraint_names) + ) for i in range(len(expected_ineq_constraint_names)): self.assertTrue(expected_ineq_constraint_names[i] in ineq_constraint_names) @@ -459,7 +517,7 @@ class TestPyomoNLP(unittest.TestCase): def setUpClass(cls): # test problem cls.pm = create_pyomo_model1() - + @classmethod def tearDownClass(cls): pass @@ -476,34 +534,33 @@ def test_nlp_interface(self): self.assertTrue(np.array_equal(xs, expected_xs)) cs = nlp.get_constraints_scaling() - expected_cs = np.asarray([ 3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0 ]) + expected_cs = np.asarray([3.0, 6.0, 9.0, 12.0, 15.0, 18.0, 21.0, 24.0, 27.0]) self.assertTrue(np.array_equal(cs, expected_cs)) eqcs = nlp.get_eq_constraints_scaling() - expected_eqcs = np.asarray([ 6.0, 18.0 ]) + expected_eqcs = np.asarray([6.0, 18.0]) self.assertTrue(np.array_equal(eqcs, expected_eqcs)) ineqcs = nlp.get_ineq_constraints_scaling() - expected_ineqcs = np.asarray([ 3.0, 9.0, 12.0, 15.0, 21.0, 24.0, 27.0 ]) + expected_ineqcs = np.asarray([3.0, 9.0, 12.0, 15.0, 21.0, 24.0, 27.0]) self.assertTrue(np.array_equal(ineqcs, expected_ineqcs)) - def test_indices_methods(self): nlp = PyomoNLP(self.pm) # get_pyomo_variables variables = nlp.get_pyomo_variables() - expected_ids = [id(self.pm.x[i]) for i in range(1,10)] + expected_ids = [id(self.pm.x[i]) for i in range(1, 10)] ids = [id(variables[i]) for i in range(9)] self.assertTrue(expected_ids == ids) variable_names = nlp.variable_names() - expected_names = [self.pm.x[i].getname() for i in range(1,10)] + expected_names = [self.pm.x[i].getname() for i in range(1, 10)] self.assertTrue(variable_names == expected_names) # get_pyomo_constraints constraints = nlp.get_pyomo_constraints() - expected_ids = [id(self.pm.c[i]) for i in range(1,10)] + expected_ids = [id(self.pm.c[i]) for i in range(1, 10)] ids = [id(constraints[i]) for i in range(9)] self.assertTrue(expected_ids == ids) @@ -514,14 +571,16 @@ def test_indices_methods(self): # get_pyomo_equality_constraints eq_constraints = nlp.get_pyomo_equality_constraints() # 2 and 6 are the equality constraints - eq_indices = [2, 6] # "indices" here is a bit overloaded + eq_indices = [2, 6] # "indices" here is a bit overloaded expected_eq_ids = [id(self.pm.c[i]) for i in eq_indices] eq_ids = [id(con) for con in eq_constraints] self.assertEqual(eq_ids, expected_eq_ids) eq_constraint_names = nlp.equality_constraint_names() - expected_eq_names = [c.getname(fully_qualified=True) - for c in nlp.get_pyomo_equality_constraints()] + expected_eq_names = [ + c.getname(fully_qualified=True) + for c in nlp.get_pyomo_equality_constraints() + ] self.assertEqual(eq_constraint_names, expected_eq_names) # get_pyomo_inequality_constraints @@ -541,10 +600,14 @@ def test_indices_methods(self): # get_constraint_indices expected_constraint_indices = [i for i in range(9)] - self.assertTrue(expected_constraint_indices == nlp.get_constraint_indices([self.pm.c])) + self.assertTrue( + expected_constraint_indices == nlp.get_constraint_indices([self.pm.c]) + ) expected_constraint_indices = [0, 3, 8, 4] constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]] - self.assertTrue(expected_constraint_indices == nlp.get_constraint_indices(constraints)) + self.assertTrue( + expected_constraint_indices == nlp.get_constraint_indices(constraints) + ) # get_equality_constraint_indices pyomo_eq_indices = [2, 6] @@ -554,8 +617,7 @@ def test_indices_methods(self): eq_constraints = [self.pm.c[i] for i in pyomo_eq_indices] expected_eq_indices = [0, 1] # ^indices in the list of equality constraints - eq_constraint_indices = nlp.get_equality_constraint_indices( - eq_constraints) + eq_constraint_indices = nlp.get_equality_constraint_indices(eq_constraints) self.assertEqual(expected_eq_indices, eq_constraint_indices) # get_inequality_constraint_indices @@ -567,62 +629,80 @@ def test_indices_methods(self): expected_ineq_indices = [0, 1, 2, 3, 4, 6] # ^indices in the list of equality constraints; didn't include 8 ineq_constraint_indices = nlp.get_inequality_constraint_indices( - ineq_constraints) + ineq_constraints + ) self.assertEqual(expected_ineq_indices, ineq_constraint_indices) # extract_subvector_grad_objective - expected_gradient = np.asarray([2*sum((i+1)*(j+1) for j in range(9)) for i in range(9)], dtype=np.float64) + expected_gradient = np.asarray( + [2 * sum((i + 1) * (j + 1) for j in range(9)) for i in range(9)], + dtype=np.float64, + ) grad_obj = nlp.extract_subvector_grad_objective([self.pm.x]) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) - expected_gradient = np.asarray([2*sum((i+1)*(j+1) for j in range(9)) for i in [0, 3, 8, 4]], dtype=np.float64) + expected_gradient = np.asarray( + [2 * sum((i + 1) * (j + 1) for j in range(9)) for i in [0, 3, 8, 4]], + dtype=np.float64, + ) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] grad_obj = nlp.extract_subvector_grad_objective(variables) self.assertTrue(np.array_equal(expected_gradient, grad_obj)) # extract_subvector_constraints - expected_con = np.asarray([45, 88, 3*45, 4*45, 5*45, 276, 7*45, 8*45, 9*45], dtype=np.float64) + expected_con = np.asarray( + [45, 88, 3 * 45, 4 * 45, 5 * 45, 276, 7 * 45, 8 * 45, 9 * 45], + dtype=np.float64, + ) con = nlp.extract_subvector_constraints([self.pm.c]) self.assertTrue(np.array_equal(expected_con, con)) - expected_con = np.asarray([45, 4*45, 9*45, 5*45], dtype=np.float64) + expected_con = np.asarray([45, 4 * 45, 9 * 45, 5 * 45], dtype=np.float64) constraints = [self.pm.c[1], self.pm.c[4], self.pm.c[9], self.pm.c[5]] con = nlp.extract_subvector_constraints(constraints) self.assertTrue(np.array_equal(expected_con, con)) # extract_submatrix_jacobian - expected_jac = [ [(i)*(j) for j in range(1,10)] for i in range(1,10) ] + expected_jac = [[(i) * (j) for j in range(1, 10)] for i in range(1, 10)] expected_jac = np.asarray(expected_jac, dtype=np.float64) - jac = nlp.extract_submatrix_jacobian(pyomo_variables=[self.pm.x], pyomo_constraints=[self.pm.c]) + jac = nlp.extract_submatrix_jacobian( + pyomo_variables=[self.pm.x], pyomo_constraints=[self.pm.c] + ) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) - expected_jac = [ [(i)*(j) for j in [1, 4, 9, 5]] for i in [2, 6, 4] ] + expected_jac = [[(i) * (j) for j in [1, 4, 9, 5]] for i in [2, 6, 4]] expected_jac = np.asarray(expected_jac, dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] constraints = [self.pm.c[2], self.pm.c[6], self.pm.c[4]] - jac = nlp.extract_submatrix_jacobian(pyomo_variables=variables, pyomo_constraints=constraints) + jac = nlp.extract_submatrix_jacobian( + pyomo_variables=variables, pyomo_constraints=constraints + ) dense_jac = jac.todense() self.assertTrue(np.array_equal(dense_jac, expected_jac)) # extract_submatrix_hessian_lag - expected_hess = [ [2.0*i*j for j in range(1, 10)] for i in range(1,10) ] + expected_hess = [[2.0 * i * j for j in range(1, 10)] for i in range(1, 10)] expected_hess = np.asarray(expected_hess, dtype=np.float64) - hess = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=[self.pm.x], pyomo_variables_cols=[self.pm.x]) + hess = nlp.extract_submatrix_hessian_lag( + pyomo_variables_rows=[self.pm.x], pyomo_variables_cols=[self.pm.x] + ) dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess)) - expected_hess = [ [2.0*i*j for j in [1, 4, 9, 5]] for i in [1, 4, 9, 5]] + expected_hess = [[2.0 * i * j for j in [1, 4, 9, 5]] for i in [1, 4, 9, 5]] expected_hess = np.asarray(expected_hess, dtype=np.float64) variables = [self.pm.x[1], self.pm.x[4], self.pm.x[9], self.pm.x[5]] - hess = nlp.extract_submatrix_hessian_lag(pyomo_variables_rows=variables, pyomo_variables_cols=variables) + hess = nlp.extract_submatrix_hessian_lag( + pyomo_variables_rows=variables, pyomo_variables_cols=variables + ) dense_hess = hess.todense() self.assertTrue(np.array_equal(dense_hess, expected_hess)) def test_no_objective(self): m = pyo.ConcreteModel() m.x = pyo.Var() - m.c = pyo.Constraint(expr=2.0*m.x>=5) + m.c = pyo.Constraint(expr=2.0 * m.x >= 5) with self.assertRaises(NotImplementedError): nlp = PyomoNLP(m) @@ -630,22 +710,25 @@ def test_invalid_bounds(self): m = pyo.ConcreteModel() m.x = pyo.Var([1, 2, 3], domain=pyo.NonNegativeReals) for i in m.x: - m.x[i].ub = i-2 + m.x[i].ub = i - 2 m.x[i].value = i m.i3 = pyo.Constraint(expr=m.x[2] + m.x[3] + m.x[1] >= -500.0) - m.obj = pyo.Objective(expr=m.x[2]**2) + m.obj = pyo.Objective(expr=m.x[2] ** 2) with self.assertRaisesRegex( - RuntimeError, "Some variables have lower bounds that " - "are greater than the upper bounds"): + RuntimeError, + "Some variables have lower bounds that " + "are greater than the upper bounds", + ): nlp = PyomoNLP(m) + class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): cls.pm = create_pyomo_model1() temporary_dir = tempfile.mkdtemp() cls.filename = os.path.join(temporary_dir, "Pyomo_TestAslNLP") - cls.pm.write(cls.filename+'.nl', io_options={"symbolic_solver_labels": True}) + cls.pm.write(cls.filename + '.nl', io_options={"symbolic_solver_labels": True}) @classmethod def tearDownClass(cls): @@ -654,53 +737,138 @@ def tearDownClass(cls): def test_util_maps(self): anlp = AslNLP(self.filename) - full_to_compressed_mask = build_compression_mask_for_finite_values(anlp.primals_lb()) + full_to_compressed_mask = build_compression_mask_for_finite_values( + anlp.primals_lb() + ) # test build_bounds_mask - should be the same as above - self.assertTrue(np.array_equal(full_to_compressed_mask, build_bounds_mask(anlp.primals_lb()))) + self.assertTrue( + np.array_equal( + full_to_compressed_mask, build_bounds_mask(anlp.primals_lb()) + ) + ) - expected_compressed_primals_lb = np.asarray([-1, 2, -3, -5, -7, -9], dtype=np.float64) + expected_compressed_primals_lb = np.asarray( + [-1, 2, -3, -5, -7, -9], dtype=np.float64 + ) # test build_compression_matrix C = build_compression_matrix(full_to_compressed_mask) - compressed_primals_lb = C*anlp.primals_lb() - self.assertTrue(np.array_equal(expected_compressed_primals_lb, compressed_primals_lb)) + compressed_primals_lb = C * anlp.primals_lb() + self.assertTrue( + np.array_equal(expected_compressed_primals_lb, compressed_primals_lb) + ) # test full_to_compressed - compressed_primals_lb = full_to_compressed(anlp.primals_lb(), full_to_compressed_mask) - self.assertTrue(np.array_equal(expected_compressed_primals_lb, compressed_primals_lb)) + compressed_primals_lb = full_to_compressed( + anlp.primals_lb(), full_to_compressed_mask + ) + self.assertTrue( + np.array_equal(expected_compressed_primals_lb, compressed_primals_lb) + ) # test in place compressed_primals_lb = np.zeros(len(expected_compressed_primals_lb)) - ret = full_to_compressed(anlp.primals_lb(), full_to_compressed_mask, out=compressed_primals_lb) + ret = full_to_compressed( + anlp.primals_lb(), full_to_compressed_mask, out=compressed_primals_lb + ) self.assertTrue(ret is compressed_primals_lb) - self.assertTrue(np.array_equal(expected_compressed_primals_lb, compressed_primals_lb)) - + self.assertTrue( + np.array_equal(expected_compressed_primals_lb, compressed_primals_lb) + ) + # test compressed_to_full - expected_full_primals_lb = np.asarray([-1, 2, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64) - full_primals_lb = compressed_to_full(compressed_primals_lb, full_to_compressed_mask, default=-np.inf) + expected_full_primals_lb = np.asarray( + [-1, 2, -3, -np.inf, -5, -np.inf, -7, -np.inf, -9], dtype=np.float64 + ) + full_primals_lb = compressed_to_full( + compressed_primals_lb, full_to_compressed_mask, default=-np.inf + ) self.assertTrue(np.array_equal(expected_full_primals_lb, full_primals_lb)) # test in place full_primals_lb.fill(0.0) - ret = compressed_to_full(compressed_primals_lb, full_to_compressed_mask, out=full_primals_lb, default=-np.inf) + ret = compressed_to_full( + compressed_primals_lb, + full_to_compressed_mask, + out=full_primals_lb, + default=-np.inf, + ) self.assertTrue(ret is full_primals_lb) self.assertTrue(np.array_equal(expected_full_primals_lb, full_primals_lb)) # test no default - expected_full_primals_lb = np.asarray([-1, 2, -3, np.nan, -5, np.nan, -7, np.nan, -9], dtype=np.float64) - full_primals_lb = compressed_to_full(compressed_primals_lb, full_to_compressed_mask) + expected_full_primals_lb = np.asarray( + [-1, 2, -3, np.nan, -5, np.nan, -7, np.nan, -9], dtype=np.float64 + ) + full_primals_lb = compressed_to_full( + compressed_primals_lb, full_to_compressed_mask + ) print(expected_full_primals_lb) print(full_primals_lb) np.testing.assert_array_equal(expected_full_primals_lb, full_primals_lb) # test in place no default - expected_full_primals_lb = np.asarray([-1, 2, -3, 0.0, -5, 0.0, -7, 0.0, -9], dtype=np.float64) + expected_full_primals_lb = np.asarray( + [-1, 2, -3, 0.0, -5, 0.0, -7, 0.0, -9], dtype=np.float64 + ) full_primals_lb.fill(0.0) - ret = compressed_to_full(compressed_primals_lb, full_to_compressed_mask, out=full_primals_lb) + ret = compressed_to_full( + compressed_primals_lb, full_to_compressed_mask, out=full_primals_lb + ) self.assertTrue(ret is full_primals_lb) self.assertTrue(np.array_equal(expected_full_primals_lb, full_primals_lb)) +class TestExceptions(unittest.TestCase): + def _make_bad_model(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=[1, 2, 3]) + m.x = pyo.Var(m.I, initialize=1) + + m.obj = pyo.Objective(expr=m.x[1] + m.x[2] / m.x[3]) + m.eq1 = pyo.Constraint(expr=m.x[1] == pyo.sqrt(m.x[2])) + return m + + def test_eval_error_in_constraint(self): + m = self._make_bad_model() + m.x[2] = -1 + nlp = PyomoNLP(m) + msg = "Error in AMPL evaluation" + with self.assertRaisesRegex(PyNumeroEvaluationError, msg): + residuals = nlp.evaluate_constraints() + + def test_eval_error_in_constraint_jacobian(self): + m = self._make_bad_model() + m.x[2] = -1 + nlp = PyomoNLP(m) + msg = "Error in AMPL evaluation" + with self.assertRaisesRegex(PyNumeroEvaluationError, msg): + jacobian = nlp.evaluate_jacobian() + + def test_eval_error_in_objective(self): + m = self._make_bad_model() + m.x[3] = 0 + nlp = PyomoNLP(m) + msg = "Error in AMPL evaluation" + with self.assertRaisesRegex(PyNumeroEvaluationError, msg): + objval = nlp.evaluate_objective() + + def test_eval_error_in_objective_gradient(self): + m = self._make_bad_model() + m.x[3] = 0 + nlp = PyomoNLP(m) + msg = "Error in AMPL evaluation" + with self.assertRaisesRegex(PyNumeroEvaluationError, msg): + gradient = nlp.evaluate_grad_objective() + + def test_eval_error_in_lagrangian_hessian(self): + m = self._make_bad_model() + m.x[3] = 0 + nlp = PyomoNLP(m) + msg = "Error in AMPL evaluation" + with self.assertRaisesRegex(PyNumeroEvaluationError, msg): + hessian = nlp.evaluate_hessian_lag() + + if __name__ == '__main__': TestAslNLP.setUpClass() t = TestAslNLP() t.test_create() - diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_nlp_projections.py b/pyomo/contrib/pynumero/interfaces/tests/test_nlp_projections.py index 4ad45c3d7f6..7bf693b1eb6 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_nlp_projections.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_nlp_projections.py @@ -13,40 +13,52 @@ import os from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy_available + numpy as np, + numpy_available, + scipy_available, ) + if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run NLP tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run NLP tests") import pyomo.environ as pyo from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP -from pyomo.contrib.pynumero.interfaces.nlp_projections import RenamedNLP, ProjectedNLP +from pyomo.contrib.pynumero.interfaces.nlp_projections import ( + RenamedNLP, + ProjectedNLP, + ProjectedExtendedNLP, +) + def create_pyomo_model(): m = pyo.ConcreteModel() - m.x = pyo.Var(range(3), bounds=(-10,10), initialize={0:1.0, 1:2.0, 2:4.0}) + m.x = pyo.Var(range(3), bounds=(-10, 10), initialize={0: 1.0, 1: 2.0, 2: 4.0}) - m.obj = pyo.Objective(expr=m.x[0]**2 + m.x[0]*m.x[1] + m.x[0]*m.x[2] + m.x[2]**2) + m.obj = pyo.Objective( + expr=m.x[0] ** 2 + m.x[0] * m.x[1] + m.x[0] * m.x[2] + m.x[2] ** 2 + ) - m.con1 = pyo.Constraint(expr=m.x[0]*m.x[1] + m.x[0]*m.x[2] == 4) + m.con1 = pyo.Constraint(expr=m.x[0] * m.x[1] + m.x[0] * m.x[2] == 4) m.con2 = pyo.Constraint(expr=m.x[0] + m.x[2] == 4) return m + class TestRenamedNLP(unittest.TestCase): def test_rename(self): m = create_pyomo_model() nlp = PyomoNLP(m) expected_names = ['x[0]', 'x[1]', 'x[2]'] self.assertEqual(nlp.primals_names(), expected_names) - renamed_nlp = RenamedNLP(nlp, {'x[0]': 'y[0]', 'x[1]':'y[1]', 'x[2]':'y[2]'}) + renamed_nlp = RenamedNLP(nlp, {'x[0]': 'y[0]', 'x[1]': 'y[1]', 'x[2]': 'y[2]'}) expected_names = ['y[0]', 'y[1]', 'y[2]'] - + + class TestProjectedNLP(unittest.TestCase): def test_projected(self): m = create_pyomo_model() @@ -54,33 +66,37 @@ def test_projected(self): projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[1]', 'x[2]']) expected_names = ['x[0]', 'x[1]', 'x[2]'] self.assertEqual(projected_nlp.primals_names(), expected_names) - self.assertTrue(np.array_equal(projected_nlp.get_primals(), - np.asarray([1.0, 2.0, 4.0]))) - self.assertTrue(np.array_equal(projected_nlp.evaluate_grad_objective(), - np.asarray([8.0, 1.0, 9.0]))) + self.assertTrue( + np.array_equal(projected_nlp.get_primals(), np.asarray([1.0, 2.0, 4.0])) + ) + self.assertTrue( + np.array_equal( + projected_nlp.evaluate_grad_objective(), np.asarray([8.0, 1.0, 9.0]) + ) + ) self.assertEqual(projected_nlp.nnz_jacobian(), 5) self.assertEqual(projected_nlp.nnz_hessian_lag(), 6) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 5) denseJ = J.todense() - expected_jac = np.asarray([[6.0, 1.0, 1.0],[1.0, 0.0, 1.0]]) + expected_jac = np.asarray([[6.0, 1.0, 1.0], [1.0, 0.0, 1.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" - J = 0.0*J + J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 6) - expectedH = np.asarray([[2.0, 1.0, 1.0],[1.0, 0.0, 0.0], [1.0, 0.0, 2.0]]) + expectedH = np.asarray([[2.0, 1.0, 1.0], [1.0, 0.0, 0.0], [1.0, 0.0, 2.0]]) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" - H = 0.0*H + H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) @@ -89,32 +105,37 @@ def test_projected(self): projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]', 'x[1]']) expected_names = ['x[0]', 'x[2]', 'x[1]'] self.assertEqual(projected_nlp.primals_names(), expected_names) - self.assertTrue(np.array_equal(projected_nlp.get_primals(), np.asarray([1.0, 4.0, 2.0]))) - self.assertTrue(np.array_equal(projected_nlp.evaluate_grad_objective(), - np.asarray([8.0, 9.0, 1.0]))) + self.assertTrue( + np.array_equal(projected_nlp.get_primals(), np.asarray([1.0, 4.0, 2.0])) + ) + self.assertTrue( + np.array_equal( + projected_nlp.evaluate_grad_objective(), np.asarray([8.0, 9.0, 1.0]) + ) + ) self.assertEqual(projected_nlp.nnz_jacobian(), 5) self.assertEqual(projected_nlp.nnz_hessian_lag(), 6) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 5) denseJ = J.todense() - expected_jac = np.asarray([[6.0, 1.0, 1.0],[1.0, 1.0, 0.0]]) + expected_jac = np.asarray([[6.0, 1.0, 1.0], [1.0, 1.0, 0.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" - J = 0.0*J + J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 6) - expectedH = np.asarray([[2.0, 1.0, 1.0],[1.0, 2.0, 0.0], [1.0, 0.0, 0.0]]) + expectedH = np.asarray([[2.0, 1.0, 1.0], [1.0, 2.0, 0.0], [1.0, 0.0, 0.0]]) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" - H = 0.0*H + H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) @@ -123,33 +144,46 @@ def test_projected(self): projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]', 'y', 'x[1]']) expected_names = ['x[0]', 'x[2]', 'y', 'x[1]'] self.assertEqual(projected_nlp.primals_names(), expected_names) - np.testing.assert_equal(projected_nlp.get_primals(),np.asarray([1.0, 4.0, np.nan, 2.0])) - - self.assertTrue(np.array_equal(projected_nlp.evaluate_grad_objective(), - np.asarray([8.0, 9.0, 0.0, 1.0]))) + np.testing.assert_equal( + projected_nlp.get_primals(), np.asarray([1.0, 4.0, np.nan, 2.0]) + ) + + self.assertTrue( + np.array_equal( + projected_nlp.evaluate_grad_objective(), + np.asarray([8.0, 9.0, 0.0, 1.0]), + ) + ) self.assertEqual(projected_nlp.nnz_jacobian(), 5) self.assertEqual(projected_nlp.nnz_hessian_lag(), 6) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 5) denseJ = J.todense() - expected_jac = np.asarray([[6.0, 1.0, 0.0, 1.0],[1.0, 1.0, 0.0, 0.0]]) + expected_jac = np.asarray([[6.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" - J = 0.0*J + J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 6) - expectedH = np.asarray([[2.0, 1.0, 0.0, 1.0],[1.0, 2.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]) + expectedH = np.asarray( + [ + [2.0, 1.0, 0.0, 1.0], + [1.0, 2.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 0.0], + ] + ) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" - H = 0.0*H + H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) @@ -158,39 +192,432 @@ def test_projected(self): projected_nlp = ProjectedNLP(nlp, ['x[0]', 'x[2]']) expected_names = ['x[0]', 'x[2]'] self.assertEqual(projected_nlp.primals_names(), expected_names) - np.testing.assert_equal(projected_nlp.get_primals(),np.asarray([1.0, 4.0])) - - self.assertTrue(np.array_equal(projected_nlp.evaluate_grad_objective(), - np.asarray([8.0, 9.0]))) + np.testing.assert_equal(projected_nlp.get_primals(), np.asarray([1.0, 4.0])) + + self.assertTrue( + np.array_equal( + projected_nlp.evaluate_grad_objective(), np.asarray([8.0, 9.0]) + ) + ) self.assertEqual(projected_nlp.nnz_jacobian(), 4) self.assertEqual(projected_nlp.nnz_hessian_lag(), 4) J = projected_nlp.evaluate_jacobian() self.assertEqual(len(J.data), 4) denseJ = J.todense() - expected_jac = np.asarray([[6.0, 1.0],[1.0, 1.0]]) + expected_jac = np.asarray([[6.0, 1.0], [1.0, 1.0]]) self.assertTrue(np.array_equal(denseJ, expected_jac)) # test the use of "out" - J = 0.0*J + J = 0.0 * J projected_nlp.evaluate_jacobian(out=J) denseJ = J.todense() self.assertTrue(np.array_equal(denseJ, expected_jac)) H = projected_nlp.evaluate_hessian_lag() self.assertEqual(len(H.data), 4) - expectedH = np.asarray([[2.0, 1.0],[1.0, 2.0]]) + expectedH = np.asarray([[2.0, 1.0], [1.0, 2.0]]) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) # test the use of "out" - H = 0.0*H + H = 0.0 * H projected_nlp.evaluate_hessian_lag(out=H) denseH = H.todense() self.assertTrue(np.array_equal(denseH, expectedH)) + +class TestProjectedExtendedNLP(unittest.TestCase): + def _make_model_with_inequalities(self): + m = pyo.ConcreteModel() + m.I = pyo.Set(initialize=range(4)) + m.x = pyo.Var(m.I, initialize=1.1) + m.obj = pyo.Objective( + expr=1 * m.x[0] + 2 * m.x[1] ** 2 + 3 * m.x[1] * m.x[2] + 4 * m.x[3] ** 3 + ) + m.eq_con_1 = pyo.Constraint( + expr=m.x[0] * (m.x[1] ** 1.1) * (m.x[2] ** 1.2) == 3.0 + ) + m.eq_con_2 = pyo.Constraint(expr=m.x[0] ** 2 + m.x[3] ** 2 + m.x[1] == 2.0) + m.ineq_con_1 = pyo.Constraint(expr=m.x[0] + m.x[3] * m.x[0] <= 4.0) + m.ineq_con_2 = pyo.Constraint(expr=m.x[1] + m.x[2] >= 1.0) + m.ineq_con_3 = pyo.Constraint(expr=m.x[2] >= 0) + return m + + def _get_nlps(self): + m = self._make_model_with_inequalities() + nlp = PyomoNLP(m) + primals_ordering = ["x[1]", "x[0]"] + proj_nlp = ProjectedExtendedNLP(nlp, primals_ordering) + return m, nlp, proj_nlp + + def _x_to_nlp(self, m, nlp, values): + # We often want to set coordinates in the nlp based on some + # order of variables in the model. However, in general we don't + # know the order of primals in the NLP. This method reorders + # a list of values such that they will be sent to x[0]...x[3] + # in the NLP. + indices = nlp.get_primal_indices([m.x[0], m.x[1], m.x[2], m.x[3]]) + reordered_values = [None for _ in m.x] + for i, val in zip(indices, values): + reordered_values[i] = val + return reordered_values + + def _c_to_nlp(self, m, nlp, values): + indices = nlp.get_constraint_indices( + [m.eq_con_1, m.eq_con_2, m.ineq_con_1, m.ineq_con_2, m.ineq_con_3] + ) + reordered_values = [None] * 5 + for i, val in zip(indices, values): + reordered_values[i] = val + return reordered_values + + def _eq_to_nlp(self, m, nlp, values): + indices = nlp.get_equality_constraint_indices([m.eq_con_1, m.eq_con_2]) + reordered_values = [None] * 2 + for i, val in zip(indices, values): + reordered_values[i] = val + return reordered_values + + def _ineq_to_nlp(self, m, nlp, values): + indices = nlp.get_inequality_constraint_indices( + [m.ineq_con_1, m.ineq_con_2, m.ineq_con_3] + ) + reordered_values = [None] * 3 + for i, val in zip(indices, values): + reordered_values[i] = val + return reordered_values + + def _rc_to_nlp(self, m, nlp, rc): + var_indices = nlp.get_primal_indices(list(m.x.values())) + con_indices = nlp.get_constraint_indices( + [m.eq_con_1, m.eq_con_2, m.ineq_con_1, m.ineq_con_2, m.ineq_con_3] + ) + i, j = rc + return (con_indices[i], var_indices[j]) + + def _rc_to_proj_nlp(self, m, nlp, rc): + var_indices = [1, 0] + con_indices = nlp.get_constraint_indices( + [m.eq_con_1, m.eq_con_2, m.ineq_con_1, m.ineq_con_2, m.ineq_con_3] + ) + i, j = rc + return (con_indices[i], var_indices[j]) + + def _rc_to_proj_nlp_eq(self, m, nlp, rc): + # Expects variable coords in order [x0, x1], constraint coords + # in order [eq1, eq2] + var_indices = [1, 0] + con_indices = nlp.get_equality_constraint_indices([m.eq_con_1, m.eq_con_2]) + i, j = rc + return (con_indices[i], var_indices[j]) + + def _rc_to_proj_nlp_ineq(self, m, nlp, rc): + # Expects variable coords in order [x0, x1], constraint coords + # in order [ineq1, ineq2, ineq3] + var_indices = [1, 0] + con_indices = nlp.get_inequality_constraint_indices( + [m.ineq_con_1, m.ineq_con_2, m.ineq_con_3] + ) + i, j = rc + return (con_indices[i], var_indices[j]) + + def test_non_extended_original_nlp(self): + m, nlp, proj_nlp = self._get_nlps() + # Note that even though nlp is a PyomoNLP, and thus an ExtendedNLP, + # this projected NLP is *not* extended. + proj_nlp = ProjectedNLP(nlp, ["x[0]", "x[1]", "x[2]"]) + msg = "Original NLP must be an instance of ExtendedNLP" + with self.assertRaisesRegex(TypeError, msg): + proj_ext_nlp = ProjectedExtendedNLP(proj_nlp, ["x[1]", "x[0]"]) + + def test_n_primals_constraints(self): + m, nlp, proj_nlp = self._get_nlps() + self.assertEqual(proj_nlp.n_primals(), 2) + self.assertEqual(proj_nlp.n_constraints(), 5) + self.assertEqual(proj_nlp.n_eq_constraints(), 2) + self.assertEqual(proj_nlp.n_ineq_constraints(), 3) + + def test_set_get_primals(self): + m, nlp, proj_nlp = self._get_nlps() + primals = proj_nlp.get_primals() + np.testing.assert_array_equal(primals, [1.1, 1.1]) + nlp.set_primals(self._x_to_nlp(m, nlp, [1.2, 1.3, 1.4, 1.5])) + proj_primals = proj_nlp.get_primals() + np.testing.assert_array_equal(primals, [1.3, 1.2]) + + proj_nlp.set_primals(np.array([-1.0, -1.1])) + # Make sure we can get this vector back from ProjNLP + np.testing.assert_array_equal(proj_nlp.get_primals(), [-1.0, -1.1]) + # Make sure we can get this vector back from the original NLP + np.testing.assert_array_equal( + nlp.get_primals(), self._x_to_nlp(m, nlp, [-1.1, -1.0, 1.4, 1.5]) + ) + + def test_set_primals_with_list_error(self): + # This doesn't work. Get a TypeError due to treating list as numpy array + # when indexing another array. + m, nlp, proj_nlp = self._get_nlps() + msg = "only integer scalar arrays can be converted to a scalar index" + # This test may be too specific. If NumPy changes this error message, + # the test could fail. + with self.assertRaisesRegex(TypeError, msg): + proj_nlp.set_primals([1.0, 2.0]) + + def test_get_set_duals(self): + m, nlp, proj_nlp = self._get_nlps() + nlp.set_duals([2, 3, 4, 5, 6]) + np.testing.assert_array_equal(proj_nlp.get_duals(), [2, 3, 4, 5, 6]) + + proj_nlp.set_duals([-1, -2, -3, -4, -5]) + np.testing.assert_array_equal(proj_nlp.get_duals(), [-1, -2, -3, -4, -5]) + np.testing.assert_array_equal(nlp.get_duals(), [-1, -2, -3, -4, -5]) + + def test_eval_constraints(self): + m, nlp, proj_nlp = self._get_nlps() + x0, x1, x2, x3 = [1.2, 1.3, 1.4, 1.5] + nlp.set_primals(self._x_to_nlp(m, nlp, [x0, x1, x2, x3])) + + con_resids = nlp.evaluate_constraints() + pred_con_body = [ + x0 * x1**1.1 * x2**1.2 - 3.0, + x0**2 + x3**2 + x1 - 2.0, + x0 + x0 * x3, + x1 + x2, + x2, + ] + np.testing.assert_array_equal(con_resids, self._c_to_nlp(m, nlp, pred_con_body)) + + con_resids = proj_nlp.evaluate_constraints() + np.testing.assert_array_equal(con_resids, self._c_to_nlp(m, nlp, pred_con_body)) + + eq_resids = proj_nlp.evaluate_eq_constraints() + pred_eq_body = [x0 * x1**1.1 * x2**1.2 - 3.0, x0**2 + x3**2 + x1 - 2.0] + np.testing.assert_array_equal(eq_resids, self._eq_to_nlp(m, nlp, pred_eq_body)) + + ineq_body = proj_nlp.evaluate_ineq_constraints() + pred_ineq_body = [x0 + x0 * x3, x1 + x2, x2] + np.testing.assert_array_equal( + ineq_body, self._ineq_to_nlp(m, nlp, pred_ineq_body) + ) + + def test_eval_jacobian_orig_nlp(self): + m, nlp, proj_nlp = self._get_nlps() + x0, x1, x2, x3 = [1.2, 1.3, 1.4, 1.5] + nlp.set_primals(self._x_to_nlp(m, nlp, [x0, x1, x2, x3])) + + jac = nlp.evaluate_jacobian() + # Predicted row/col indices in the "natural ordering" of the model + pred_rc = [ + # eq 1 + (0, 0), + (0, 1), + (0, 2), + # eq 2 + (1, 0), + (1, 1), + (1, 3), + # ineq 1 + (2, 0), + (2, 3), + # ineq 2 + (3, 1), + (3, 2), + # ineq 3 + (4, 2), + ] + pred_data_dict = { + # eq 1 + (0, 0): x1**1.1 * x2**1.2, + (0, 1): 1.1 * x0 * (x1**0.1) * x2**1.2, + (0, 2): 1.2 * x0 * x1**1.1 * x2**0.2, + # eq 2 + (1, 0): 2 * x0, + (1, 1): 1.0, + (1, 3): 2 * x3, + # ineq 1 + (2, 0): 1.0 + x3, + (2, 3): x0, + # ineq 2 + (3, 1): 1.0, + (3, 2): 1.0, + # ineq 3 + (4, 2): 1.0, + } + pred_rc_set = set(self._rc_to_nlp(m, nlp, rc) for rc in pred_rc) + pred_data_dict = { + self._rc_to_nlp(m, nlp, rc): val for rc, val in pred_data_dict.items() + } + rc_set = set(zip(jac.row, jac.col)) + self.assertEqual(pred_rc_set, rc_set) + + data_dict = dict(zip(zip(jac.row, jac.col), jac.data)) + self.assertEqual(pred_data_dict, data_dict) + + def test_eval_jacobian_proj_nlp(self): + m, nlp, proj_nlp = self._get_nlps() + x0, x1, x2, x3 = [1.2, 1.3, 1.4, 1.5] + nlp.set_primals(self._x_to_nlp(m, nlp, [x0, x1, x2, x3])) + + jac = proj_nlp.evaluate_jacobian() + self.assertEqual(jac.shape, (5, 2)) + # Predicted row/col indices. In the "natural ordering" of the model. + pred_rc = [ + # eq 1 + (0, 0), + (0, 1), + # eq 2 + (1, 0), + (1, 1), + # ineq 1 + (2, 0), + # ineq 2 + (3, 1), + ] + pred_data_dict = { + # eq 1 + (0, 0): x1**1.1 * x2**1.2, + (0, 1): 1.1 * x0 * (x1**0.1) * x2**1.2, + # eq 2 + (1, 0): 2 * x0, + (1, 1): 1.0, + # ineq 1 + (2, 0): 1.0 + x3, + # ineq 2 + (3, 1): 1.0, + } + # Projected NLP has primals: [x1, x0] + pred_rc_set = set(self._rc_to_proj_nlp(m, nlp, rc) for rc in pred_rc) + pred_data_dict = { + self._rc_to_proj_nlp(m, nlp, rc): val for rc, val in pred_data_dict.items() + } + rc_set = set(zip(jac.row, jac.col)) + self.assertEqual(pred_rc_set, rc_set) + + data_dict = dict(zip(zip(jac.row, jac.col), jac.data)) + self.assertEqual(pred_data_dict, data_dict) + + def test_eval_eq_jacobian_proj_nlp(self): + m, nlp, proj_nlp = self._get_nlps() + x0, x1, x2, x3 = [1.2, 1.3, 1.4, 1.5] + nlp.set_primals(self._x_to_nlp(m, nlp, [x0, x1, x2, x3])) + + jac = proj_nlp.evaluate_jacobian_eq() + self.assertEqual(jac.shape, (2, 2)) + # Predicted row/col indices. In the "natural ordering" of the equality + # constraints (eq1, eq2) + # In list, first two are eq 1; second two are eq 2 + pred_rc = [(0, 0), (0, 1), (1, 0), (1, 1)] + pred_data_dict = { + # eq 1 + (0, 0): x1**1.1 * x2**1.2, + (0, 1): 1.1 * x0 * (x1**0.1) * x2**1.2, + # eq 2 + (1, 0): 2 * x0, + (1, 1): 1.0, + } + # Projected NLP has primals: [x1, x0] + pred_rc_set = set(self._rc_to_proj_nlp_eq(m, nlp, rc) for rc in pred_rc) + pred_data_dict = { + self._rc_to_proj_nlp_eq(m, nlp, rc): val + for rc, val in pred_data_dict.items() + } + rc_set = set(zip(jac.row, jac.col)) + self.assertEqual(pred_rc_set, rc_set) + + data_dict = dict(zip(zip(jac.row, jac.col), jac.data)) + self.assertEqual(pred_data_dict, data_dict) + + def test_eval_ineq_jacobian_proj_nlp(self): + m, nlp, proj_nlp = self._get_nlps() + x0, x1, x2, x3 = [1.2, 1.3, 1.4, 1.5] + nlp.set_primals(self._x_to_nlp(m, nlp, [x0, x1, x2, x3])) + + jac = proj_nlp.evaluate_jacobian_ineq() + self.assertEqual(jac.shape, (3, 2)) + # Predicted row/col indices. In the "natural ordering" of the inequality + # constraints (ineq1, ineq2, ineq3) + pred_rc = [(0, 0), (1, 1)] # [(ineq 1, ineq 2)] + pred_data_dict = { + # ineq 1 + (0, 0): 1.0 + x3, + # ineq 2 + (1, 1): 1.0, + } + # Projected NLP has primals: [x1, x0] + pred_rc_set = set(self._rc_to_proj_nlp_ineq(m, nlp, rc) for rc in pred_rc) + pred_data_dict = { + self._rc_to_proj_nlp_ineq(m, nlp, rc): val + for rc, val in pred_data_dict.items() + } + rc_set = set(zip(jac.row, jac.col)) + self.assertEqual(pred_rc_set, rc_set) + + data_dict = dict(zip(zip(jac.row, jac.col), jac.data)) + self.assertEqual(pred_data_dict, data_dict) + + def test_eval_eq_jacobian_proj_nlp_using_out_arg(self): + m, nlp, proj_nlp = self._get_nlps() + jac = proj_nlp.evaluate_jacobian_eq() + x0, x1, x2, x3 = [1.2, 1.3, 1.4, 1.5] + nlp.set_primals(self._x_to_nlp(m, nlp, [x0, x1, x2, x3])) + + proj_nlp.evaluate_jacobian_eq(out=jac) + self.assertEqual(jac.shape, (2, 2)) + # Predicted row/col indices. In the "natural ordering" of the equality + # constraints (eq1, eq2) + # In list, first two are eq 1; second two are eq 2 + pred_rc = [(0, 0), (0, 1), (1, 0), (1, 1)] + pred_data_dict = { + # eq 1 + (0, 0): x1**1.1 * x2**1.2, + (0, 1): 1.1 * x0 * (x1**0.1) * x2**1.2, + # eq 2 + (1, 0): 2 * x0, + (1, 1): 1.0, + } + # Projected NLP has primals: [x1, x0] + pred_rc_set = set(self._rc_to_proj_nlp_eq(m, nlp, rc) for rc in pred_rc) + pred_data_dict = { + self._rc_to_proj_nlp_eq(m, nlp, rc): val + for rc, val in pred_data_dict.items() + } + rc_set = set(zip(jac.row, jac.col)) + self.assertEqual(pred_rc_set, rc_set) + + data_dict = dict(zip(zip(jac.row, jac.col), jac.data)) + self.assertEqual(pred_data_dict, data_dict) + + def test_eval_ineq_jacobian_proj_nlp_using_out_arg(self): + m, nlp, proj_nlp = self._get_nlps() + jac = proj_nlp.evaluate_jacobian_ineq() + x0, x1, x2, x3 = [1.2, 1.3, 1.4, 1.5] + nlp.set_primals(self._x_to_nlp(m, nlp, [x0, x1, x2, x3])) + + proj_nlp.evaluate_jacobian_ineq(out=jac) + self.assertEqual(jac.shape, (3, 2)) + # Predicted row/col indices. In the "natural ordering" of the inequality + # constraints (ineq1, ineq2, ineq3) + pred_rc = [(0, 0), (1, 1)] # [(ineq 1, ineq 2)] + pred_data_dict = { + # ineq 1 + (0, 0): 1.0 + x3, + # ineq 2 + (1, 1): 1.0, + } + # Projected NLP has primals: [x1, x0] + pred_rc_set = set(self._rc_to_proj_nlp_ineq(m, nlp, rc) for rc in pred_rc) + pred_data_dict = { + self._rc_to_proj_nlp_ineq(m, nlp, rc): val + for rc, val in pred_data_dict.items() + } + rc_set = set(zip(jac.row, jac.col)) + self.assertEqual(pred_rc_set, rc_set) + + data_dict = dict(zip(zip(jac.row, jac.col), jac.data)) + self.assertEqual(pred_data_dict, data_dict) + + if __name__ == '__main__': TestRenamedNLP().test_rename() TestProjectedNLP().test_projected() - - diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_pyomo_grey_box_nlp.py b/pyomo/contrib/pynumero/interfaces/tests/test_pyomo_grey_box_nlp.py index 531655d8f23..52536dd9c06 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_pyomo_grey_box_nlp.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_pyomo_grey_box_nlp.py @@ -14,7 +14,10 @@ import pyomo.environ as pyo from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as spa @@ -22,23 +25,32 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") from pyomo.contrib.pynumero.asl import AmplInterface + if not AmplInterface.available(): - raise unittest.SkipTest( - "Pynumero needs the ASL extension to run cyipopt tests") + raise unittest.SkipTest("Pynumero needs the ASL extension to run cyipopt tests") -from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import ( - cyipopt_available, -) +from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import cyipopt_available from pyomo.contrib.pynumero.interfaces.external_grey_box import ExternalGreyBoxBlock -from pyomo.contrib.pynumero.interfaces.pyomo_grey_box_nlp import _ExternalGreyBoxAsNLP, PyomoNLPWithGreyBoxBlocks -from pyomo.contrib.pynumero.interfaces.tests.compare_utils import check_vectors_specific_order, check_sparse_matrix_specific_order +from pyomo.contrib.pynumero.interfaces.pyomo_grey_box_nlp import ( + _ExternalGreyBoxAsNLP, + PyomoNLPWithGreyBoxBlocks, +) +from pyomo.contrib.pynumero.interfaces.tests.compare_utils import ( + check_vectors_specific_order, + check_sparse_matrix_specific_order, +) import pyomo.contrib.pynumero.interfaces.tests.external_grey_box_models as ex_models + class TestExternalGreyBoxAsNLP(unittest.TestCase): def test_pressure_drop_single_output(self): - self._test_pressure_drop_single_output(ex_models.PressureDropSingleOutput(),False) - self._test_pressure_drop_single_output(ex_models.PressureDropSingleOutputWithHessian(),True) + self._test_pressure_drop_single_output( + ex_models.PressureDropSingleOutput(), False + ) + self._test_pressure_drop_single_output( + ex_models.PressureDropSingleOutputWithHessian(), True + ) def _test_pressure_drop_single_output(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -56,7 +68,7 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(0) m.egb.outputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) self.assertEqual(4, egb_nlp.n_primals()) @@ -65,30 +77,47 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, egb_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.outputs[Pout]', + ] x_order = egb_nlp.primals_names() comparison_c_order = ['egb.output_constraints[Pout]'] c_order = egb_nlp.constraint_names() xlb = egb_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = egb_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = egb_nlp.constraints_lb() comparison_clb = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = egb_nlp.constraints_ub() comparison_cub = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = egb_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = egb_nlp.init_duals() comparison_duals_init = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(4, len(egb_nlp.create_new_vector('primals'))) self.assertEqual(1, len(egb_nlp.create_new_vector('constraints'))) @@ -96,7 +125,7 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): egb_nlp.set_primals(np.asarray([1, 2, 3, 4], dtype=np.float64)) x = egb_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4], dtype=np.float64))) + self.assertTrue(np.array_equal(x, np.asarray([1, 2, 3, 4], dtype=np.float64))) egb_nlp.set_primals(egb_nlp.init_primals()) egb_nlp.set_duals(np.asarray([42], dtype=np.float64)) @@ -126,25 +155,61 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): j = egb_nlp.evaluate_jacobian() comparison_j = np.asarray([[1, -36, -48, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j egb_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = egb_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (4,4)) + self.assertTrue(h.shape == (4, 4)) # hessian should be "full", not lower or upper triangular - comparison_h = np.asarray([[0, 0, 0, 0],[0, 0, -8*3*21, 0], [0, -8*3*21, -8*2*21, 0], [0, 0, 0, 0]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + comparison_h = np.asarray( + [ + [0, 0, 0, 0], + [0, 0, -8 * 3 * 21, 0], + [0, -8 * 3 * 21, -8 * 2 * 21, 0], + [0, 0, 0, 0], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = egb_nlp.evaluate_hessian_lag() def test_pressure_drop_single_equality(self): - self._test_pressure_drop_single_equality(ex_models.PressureDropSingleEquality(), False) - self._test_pressure_drop_single_equality(ex_models.PressureDropSingleEqualityWithHessian(), True) + self._test_pressure_drop_single_equality( + ex_models.PressureDropSingleEquality(), False + ) + self._test_pressure_drop_single_equality( + ex_models.PressureDropSingleEqualityWithHessian(), True + ) def _test_pressure_drop_single_equality(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -162,7 +227,7 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): m.egb.inputs['Pout'].value = 50 m.egb.inputs['Pout'].setlb(0) m.egb.inputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.inputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.inputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) self.assertEqual(4, egb_nlp.n_primals()) @@ -171,30 +236,47 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, egb_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.inputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[Pout]', + ] x_order = egb_nlp.primals_names() comparison_c_order = ['egb.pdrop'] c_order = egb_nlp.constraint_names() xlb = egb_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = egb_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = egb_nlp.constraints_lb() comparison_clb = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = egb_nlp.constraints_ub() comparison_cub = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = egb_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = egb_nlp.init_duals() comparison_duals_init = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(4, len(egb_nlp.create_new_vector('primals'))) self.assertEqual(1, len(egb_nlp.create_new_vector('constraints'))) @@ -202,7 +284,7 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): egb_nlp.set_primals(np.asarray([1, 2, 3, 4], dtype=np.float64)) x = egb_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4], dtype=np.float64))) + self.assertTrue(np.array_equal(x, np.asarray([1, 2, 3, 4], dtype=np.float64))) egb_nlp.set_primals(egb_nlp.init_primals()) egb_nlp.set_duals(np.asarray([42], dtype=np.float64)) @@ -232,24 +314,58 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): j = egb_nlp.evaluate_jacobian() comparison_j = np.asarray([[-1, 36, 48, 1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j egb_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = egb_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (4,4)) - comparison_h = np.asarray([[0, 0, 0, 0],[0, 0, 8*3*21, 0], [0, 8*3*21, 8*2*21, 0], [0, 0, 0, 0]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (4, 4)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0], + [0, 0, 8 * 3 * 21, 0], + [0, 8 * 3 * 21, 8 * 2 * 21, 0], + [0, 0, 0, 0], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = egb_nlp.evaluate_hessian_lag() def test_pressure_drop_two_outputs(self): self._test_pressure_drop_two_outputs(ex_models.PressureDropTwoOutputs(), False) - self._test_pressure_drop_two_outputs(ex_models.PressureDropTwoOutputsWithHessian(), True) + self._test_pressure_drop_two_outputs( + ex_models.PressureDropTwoOutputsWithHessian(), True + ) def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -270,7 +386,7 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(0) m.egb.outputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) self.assertEqual(5, egb_nlp.n_primals()) @@ -279,30 +395,51 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, egb_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.outputs[P2]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + ] x_order = egb_nlp.primals_names() - comparison_c_order = ['egb.output_constraints[P2]', 'egb.output_constraints[Pout]'] + comparison_c_order = [ + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + ] c_order = egb_nlp.constraint_names() xlb = egb_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = egb_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = egb_nlp.constraints_lb() comparison_clb = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = egb_nlp.constraints_ub() comparison_cub = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = egb_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = egb_nlp.init_duals() comparison_duals_init = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(5, len(egb_nlp.create_new_vector('primals'))) self.assertEqual(2, len(egb_nlp.create_new_vector('constraints'))) @@ -310,7 +447,9 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): egb_nlp.set_primals(np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) x = egb_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) + ) egb_nlp.set_primals(egb_nlp.init_primals()) egb_nlp.set_duals(np.asarray([42, 10], dtype=np.float64)) @@ -340,28 +479,67 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): j = egb_nlp.evaluate_jacobian() comparison_j = np.asarray([[1, -18, -24, -1, 0], [1, -36, -48, 0, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j egb_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = egb_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (5,5)) - comparison_h = np.asarray([[0, 0, 0, 0, 0], - [0, 0, (-4*3*21) + (-8*3*5), 0, 0], - [0, (-4*3*21) + (-8*3*5), (-4*2*21) + (-8*2*5), 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (5, 5)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, (-4 * 3 * 21) + (-8 * 3 * 5), 0, 0], + [ + 0, + (-4 * 3 * 21) + (-8 * 3 * 5), + (-4 * 2 * 21) + (-8 * 2 * 5), + 0, + 0, + ], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = egb_nlp.evaluate_hessian_lag() def test_pressure_drop_two_equalities(self): - self._test_pressure_drop_two_equalities(ex_models.PressureDropTwoEqualities(), False) - self._test_pressure_drop_two_equalities(ex_models.PressureDropTwoEqualitiesWithHessian(), True) + self._test_pressure_drop_two_equalities( + ex_models.PressureDropTwoEqualities(), False + ) + self._test_pressure_drop_two_equalities( + ex_models.PressureDropTwoEqualitiesWithHessian(), True + ) def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -382,7 +560,7 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): m.egb.inputs['Pout'].value = 50 m.egb.inputs['Pout'].setlb(0) m.egb.inputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.inputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.inputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) self.assertEqual(5, egb_nlp.n_primals()) @@ -391,30 +569,48 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): if hessian_support: self.assertEqual(3, egb_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.inputs[P2]', 'egb.inputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P2]', + 'egb.inputs[Pout]', + ] x_order = egb_nlp.primals_names() comparison_c_order = ['egb.pdrop2', 'egb.pdropout'] c_order = egb_nlp.constraint_names() xlb = egb_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = egb_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = egb_nlp.constraints_lb() comparison_clb = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = egb_nlp.constraints_ub() comparison_cub = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = egb_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = egb_nlp.init_duals() comparison_duals_init = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(5, len(egb_nlp.create_new_vector('primals'))) self.assertEqual(2, len(egb_nlp.create_new_vector('constraints'))) @@ -422,7 +618,9 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): egb_nlp.set_primals(np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) x = egb_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) + ) egb_nlp.set_primals(egb_nlp.init_primals()) egb_nlp.set_duals(np.asarray([42, 10], dtype=np.float64)) @@ -452,28 +650,61 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): j = egb_nlp.evaluate_jacobian() comparison_j = np.asarray([[-1, 18, 24, 1, 0], [0, 18, 24, -1, 1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j egb_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = egb_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (5,5)) - comparison_h = np.asarray([[0, 0, 0, 0, 0], - [0, 0, (4*3*21) + (4*3*5), 0, 0], - [0, (4*3*21) + (4*3*5), (4*2*21) + (4*2*5), 0, 0], - [0, 0, 0, 0, 0], - [0, 0, 0, 0, 0]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (5, 5)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, (4 * 3 * 21) + (4 * 3 * 5), 0, 0], + [0, (4 * 3 * 21) + (4 * 3 * 5), (4 * 2 * 21) + (4 * 2 * 5), 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = egb_nlp.evaluate_hessian_lag() def test_pressure_drop_two_equalities_two_outputs(self): - self._test_pressure_drop_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs(), False) - self._test_pressure_drop_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True) + self._test_pressure_drop_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs(), False + ) + self._test_pressure_drop_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True + ) def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -500,7 +731,7 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(30) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) self.assertEqual(7, egb_nlp.n_primals()) @@ -512,32 +743,55 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo # even if they occur in the same place self.assertEqual(6, egb_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + ] x_order = egb_nlp.primals_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + ] c_order = egb_nlp.constraint_names() xlb = egb_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 20, 15, 30], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = egb_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 80, 85, 70], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = egb_nlp.constraints_lb() comparison_clb = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = egb_nlp.constraints_ub() comparison_cub = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = egb_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 70, 75, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = egb_nlp.init_duals() comparison_duals_init = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(7, len(egb_nlp.create_new_vector('primals'))) self.assertEqual(4, len(egb_nlp.create_new_vector('constraints'))) @@ -545,12 +799,16 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo egb_nlp.set_primals(np.asarray([1, 2, 3, 4, 5, 6, 7], dtype=np.float64)) x = egb_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5,6,7], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5, 6, 7], dtype=np.float64)) + ) egb_nlp.set_primals(egb_nlp.init_primals()) egb_nlp.set_duals(np.asarray([42, 10, 11, 12], dtype=np.float64)) y = egb_nlp.get_duals() - self.assertTrue(np.array_equal(y, np.asarray([42, 10, 11, 12], dtype=np.float64))) + self.assertTrue( + np.array_equal(y, np.asarray([42, 10, 11, 12], dtype=np.float64)) + ) egb_nlp.set_duals(np.asarray([21, 5, 6, 7], dtype=np.float64)) y = egb_nlp.get_duals() self.assertTrue(np.array_equal(y, np.asarray([21, 5, 6, 7], dtype=np.float64))) @@ -574,40 +832,88 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) j = egb_nlp.evaluate_jacobian() - comparison_j = np.asarray([[-1, 9, 12, 1, 0, 0, 0], - [ 0, 18, 24, -1, 1, 0, 0], - [ 0, -9, -12, 1, 0, -1, 0], - [ 1, -36, -48, 0, 0, 0, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + comparison_j = np.asarray( + [ + [-1, 9, 12, 1, 0, 0, 0], + [0, 18, 24, -1, 1, 0, 0], + [0, -9, -12, 1, 0, -1, 0], + [1, -36, -48, 0, 0, 0, -1], + ] + ) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j egb_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = egb_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (7,7)) - comparison_h = np.asarray([[0, 0, 0, 0, 0, 0, 0], - [0, 0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), 0, 0, 0, 0], - [0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), (2*2*21) + (4*2*5) + (-2*2*6) + (-8*2*7), 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0]], - dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (7, 7)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0, 0, 0], + [ + 0, + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + 0, + 0, + 0, + 0, + ], + [ + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + (2 * 2 * 21) + (4 * 2 * 5) + (-2 * 2 * 6) + (-8 * 2 * 7), + 0, + 0, + 0, + 0, + ], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = egb_nlp.evaluate_hessian_lag() def create_model_two_equalities_two_outputs(self, external_model): m = pyo.ConcreteModel() - m.hin = pyo.Var(bounds=(0,None), initialize=10) - m.hout = pyo.Var(bounds=(0,None)) + m.hin = pyo.Var(bounds=(0, None), initialize=10) + m.hout = pyo.Var(bounds=(0, None)) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(external_model) - m.incon = pyo.Constraint(expr= 0 <= m.egb.inputs['Pin'] - 10*m.hin) - m.outcon = pyo.Constraint(expr= 0 == m.egb.outputs['Pout'] - 10*m.hout) + m.incon = pyo.Constraint(expr=0 <= m.egb.inputs['Pin'] - 10 * m.hin) + m.outcon = pyo.Constraint(expr=0 == m.egb.outputs['Pout'] - 10 * m.hout) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -632,8 +938,10 @@ def create_model_two_equalities_two_outputs(self, external_model): return m def test_scaling_all_missing(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) with self.assertRaises(NotImplementedError): fs = egb_nlp.get_obj_scaling() @@ -643,20 +951,22 @@ def test_scaling_all_missing(self): self.assertIsNone(cs) def test_scaling_pyomo_model_only(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - #m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable - m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable - m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable - #m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable - m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable - m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable - m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable - #m.scaling_factor[m.hin] = 1.8 + # m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable + m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable + m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable + # m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable + m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable + m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable + m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable + # m.scaling_factor[m.hin] = 1.8 m.scaling_factor[m.hout] = 1.9 - #m.scaling_factor[m.incon] = 2.1 + # m.scaling_factor[m.incon] = 2.1 m.scaling_factor[m.outcon] = 2.2 egb_nlp = _ExternalGreyBoxAsNLP(m.egb) @@ -669,11 +979,18 @@ def test_scaling_pyomo_model_only(self): self.assertIsNone(cs) def test_scaling_greybox_only(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + ] c_order = egb_nlp.constraint_names() with self.assertRaises(NotImplementedError): @@ -683,24 +1000,34 @@ def test_scaling_greybox_only(self): cs = egb_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 4.1, 4.2], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) - - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleEqualities()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) + + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleEqualities() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) cs = egb_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) - - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) + + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) egb_nlp = _ExternalGreyBoxAsNLP(m.egb) cs = egb_nlp.get_constraints_scaling() comparison_cs = np.asarray([1, 1, 4.1, 4.2], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) -class TestPyomoNLPWithGreyBoxModels(unittest.TestCase): +class TestPyomoNLPWithGreyBoxModels(unittest.TestCase): def test_error_no_variables(self): m = pyo.ConcreteModel() m.egb = ExternalGreyBoxBlock() @@ -714,7 +1041,7 @@ def test_error_fixed_inputs_outputs(self): m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_models.PressureDropSingleOutput()) m.egb.inputs['Pin'].fix(100) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) with self.assertRaises(NotImplementedError): pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) @@ -722,13 +1049,17 @@ def test_error_fixed_inputs_outputs(self): m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_models.PressureDropTwoOutputs()) m.egb.outputs['P2'].fix(50) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) with self.assertRaises(NotImplementedError): pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) def test_pressure_drop_single_output(self): - self._test_pressure_drop_single_output(ex_models.PressureDropSingleOutput(),False) - self._test_pressure_drop_single_output(ex_models.PressureDropSingleOutputWithHessian(),True) + self._test_pressure_drop_single_output( + ex_models.PressureDropSingleOutput(), False + ) + self._test_pressure_drop_single_output( + ex_models.PressureDropSingleOutputWithHessian(), True + ) def _test_pressure_drop_single_output(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -746,8 +1077,8 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(0) m.egb.outputs['Pout'].setub(100) - #m.dummy = pyo.Constraint(expr=sum(m.egb.inputs[i] for i in m.egb.inputs) + sum(m.egb.outputs[i] for i in m.egb.outputs) <= 1e6) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + # m.dummy = pyo.Constraint(expr=sum(m.egb.inputs[i] for i in m.egb.inputs) + sum(m.egb.outputs[i] for i in m.egb.outputs) <= 1e6) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) @@ -757,30 +1088,47 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): if hessian_support: self.assertEqual(4, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.outputs[Pout]', + ] x_order = pyomo_nlp.primals_names() comparison_c_order = ['egb.output_constraints[Pout]'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(4, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(1, len(pyomo_nlp.create_new_vector('constraints'))) @@ -788,7 +1136,7 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4], dtype=np.float64))) + self.assertTrue(np.array_equal(x, np.asarray([1, 2, 3, 4], dtype=np.float64))) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42], dtype=np.float64)) @@ -809,7 +1157,9 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -819,24 +1169,60 @@ def _test_pressure_drop_single_output(self, ex_model, hessian_support): j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[1, -36, -48, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (4,4)) - comparison_h = np.asarray([[0, 0, 0, 0],[0, 0, -8*3*21, 0], [0, -8*3*21, -8*2*21, 0], [0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (4, 4)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0], + [0, 0, -8 * 3 * 21, 0], + [0, -8 * 3 * 21, -8 * 2 * 21, 0], + [0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_single_equality(self): - self._test_pressure_drop_single_equality(ex_models.PressureDropSingleEquality(), False) - self._test_pressure_drop_single_equality(ex_models.PressureDropSingleEqualityWithHessian(), True) + self._test_pressure_drop_single_equality( + ex_models.PressureDropSingleEquality(), False + ) + self._test_pressure_drop_single_equality( + ex_models.PressureDropSingleEqualityWithHessian(), True + ) def _test_pressure_drop_single_equality(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -854,7 +1240,7 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): m.egb.inputs['Pout'].value = 50 m.egb.inputs['Pout'].setlb(0) m.egb.inputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.inputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.inputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) self.assertEqual(4, pyomo_nlp.n_primals()) @@ -863,30 +1249,47 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): if hessian_support: self.assertEqual(4, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.inputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[Pout]', + ] x_order = pyomo_nlp.primals_names() comparison_c_order = ['egb.pdrop'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(4, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(1, len(pyomo_nlp.create_new_vector('constraints'))) @@ -894,7 +1297,7 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4], dtype=np.float64))) + self.assertTrue(np.array_equal(x, np.asarray([1, 2, 3, 4], dtype=np.float64))) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42], dtype=np.float64)) @@ -915,7 +1318,9 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -925,24 +1330,58 @@ def _test_pressure_drop_single_equality(self, ex_model, hessian_support): j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[-1, 36, 48, 1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (4,4)) - comparison_h = np.asarray([[0, 0, 0, 0],[0, 0, 8*3*21, 0], [0, 8*3*21, 8*2*21, 0], [0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (4, 4)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0], + [0, 0, 8 * 3 * 21, 0], + [0, 8 * 3 * 21, 8 * 2 * 21, 0], + [0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_two_outputs(self): self._test_pressure_drop_two_outputs(ex_models.PressureDropTwoOutputs(), False) - self._test_pressure_drop_two_outputs(ex_models.PressureDropTwoOutputsWithHessian(), True) + self._test_pressure_drop_two_outputs( + ex_models.PressureDropTwoOutputsWithHessian(), True + ) def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -963,7 +1402,7 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(0) m.egb.outputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) self.assertEqual(5, pyomo_nlp.n_primals()) @@ -972,30 +1411,51 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): if hessian_support: self.assertEqual(4, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.outputs[P2]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + ] x_order = pyomo_nlp.primals_names() - comparison_c_order = ['egb.output_constraints[P2]', 'egb.output_constraints[Pout]'] + comparison_c_order = [ + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + ] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(5, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(2, len(pyomo_nlp.create_new_vector('constraints'))) @@ -1003,7 +1463,9 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10], dtype=np.float64)) @@ -1024,7 +1486,9 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-16, -22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -1034,24 +1498,67 @@ def _test_pressure_drop_two_outputs(self, ex_model, hessian_support): j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[1, -18, -24, -1, 0], [1, -36, -48, 0, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (5,5)) - comparison_h = np.asarray([[0, 0, 0, 0, 0],[0, 0, (-4*3*21) + (-8*3*5), 0, 0], [0, (-4*3*21) + (-8*3*5), (-4*2*21) + (-8*2*5), 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (5, 5)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, (-4 * 3 * 21) + (-8 * 3 * 5), 0, 0], + [ + 0, + (-4 * 3 * 21) + (-8 * 3 * 5), + (-4 * 2 * 21) + (-8 * 2 * 5), + 0, + 0, + ], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_two_equalities(self): - self._test_pressure_drop_two_equalities(ex_models.PressureDropTwoEqualities(), False) - self._test_pressure_drop_two_equalities(ex_models.PressureDropTwoEqualitiesWithHessian(), True) + self._test_pressure_drop_two_equalities( + ex_models.PressureDropTwoEqualities(), False + ) + self._test_pressure_drop_two_equalities( + ex_models.PressureDropTwoEqualitiesWithHessian(), True + ) def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -1072,7 +1579,7 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): m.egb.inputs['Pout'].value = 50 m.egb.inputs['Pout'].setlb(0) m.egb.inputs['Pout'].setub(100) - m.obj = pyo.Objective(expr=(m.egb.inputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.inputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) self.assertEqual(5, pyomo_nlp.n_primals()) @@ -1081,30 +1588,48 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): if hessian_support: self.assertEqual(4, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', 'egb.inputs[P2]', 'egb.inputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P2]', + 'egb.inputs[Pout]', + ] x_order = pyomo_nlp.primals_names() comparison_c_order = ['egb.pdrop2', 'egb.pdropout'] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 100], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(5, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(2, len(pyomo_nlp.create_new_vector('constraints'))) @@ -1112,7 +1637,9 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10], dtype=np.float64)) @@ -1133,7 +1660,9 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([16, 6], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -1143,24 +1672,61 @@ def _test_pressure_drop_two_equalities(self, ex_model, hessian_support): j = pyomo_nlp.evaluate_jacobian() comparison_j = np.asarray([[-1, 18, 24, 1, 0], [0, 18, 24, -1, 1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (5,5)) - comparison_h = np.asarray([[0, 0, 0, 0, 0],[0, 0, (4*3*21) + (4*3*5), 0, 0], [0, (4*3*21) + (4*3*5), (4*2*21) + (4*2*5), 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 2*1]], dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (5, 5)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0], + [0, 0, (4 * 3 * 21) + (4 * 3 * 5), 0, 0], + [0, (4 * 3 * 21) + (4 * 3 * 5), (4 * 2 * 21) + (4 * 2 * 5), 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = pyomo_nlp.evaluate_hessian_lag() def test_pressure_drop_two_equalities_two_outputs(self): - self._test_pressure_drop_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs(), False) - self._test_pressure_drop_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True) + self._test_pressure_drop_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs(), False + ) + self._test_pressure_drop_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True + ) def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_support): m = pyo.ConcreteModel() @@ -1187,7 +1753,7 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(30) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) self.assertEqual(7, pyomo_nlp.n_primals()) @@ -1196,32 +1762,55 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo if hessian_support: self.assertEqual(4, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + ] x_order = pyomo_nlp.primals_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + ] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 20, 15, 30], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() comparison_xub = np.asarray([150, 5, 5, 90, 80, 85, 70], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() comparison_xinit = np.asarray([100, 2, 3, 80, 70, 75, 50], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(7, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(4, len(pyomo_nlp.create_new_vector('constraints'))) @@ -1229,12 +1818,16 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5, 6, 7], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5,6,7], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5, 6, 7], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10, 11, 12], dtype=np.float64)) y = pyomo_nlp.get_duals() - self.assertTrue(np.array_equal(y, np.asarray([42, 10, 11, 12], dtype=np.float64))) + self.assertTrue( + np.array_equal(y, np.asarray([42, 10, 11, 12], dtype=np.float64)) + ) pyomo_nlp.set_duals(np.asarray([21, 5, 6, 7], dtype=np.float64)) y = pyomo_nlp.get_duals() self.assertTrue(np.array_equal(y, np.asarray([21, 5, 6, 7], dtype=np.float64))) @@ -1250,7 +1843,9 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 0, 0, 60], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-2, 26, -13, -22], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -1259,44 +1854,96 @@ def _test_pressure_drop_two_equalities_two_outputs(self, ex_model, hessian_suppo check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) j = pyomo_nlp.evaluate_jacobian() - comparison_j = np.asarray([[-1, 9, 12, 1, 0, 0, 0], - [ 0, 18, 24, -1, 1, 0, 0], - [ 0, -9, -12, 1, 0, -1, 0], - [ 1, -36, -48, 0, 0, 0, -1]]) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + comparison_j = np.asarray( + [ + [-1, 9, 12, 1, 0, 0, 0], + [0, 18, 24, -1, 1, 0, 0], + [0, -9, -12, 1, 0, -1, 0], + [1, -36, -48, 0, 0, 0, -1], + ] + ) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (7,7)) - comparison_h = np.asarray([[0, 0, 0, 0, 0, 0, 0], - [0, 0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), 0, 0, 0, 0], - [0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), (2*2*21) + (4*2*5) + (-2*2*6) + (-8*2*7), 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 2*1]], - dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (7, 7)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0, 0, 0], + [ + 0, + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + 0, + 0, + 0, + 0, + ], + [ + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + (2 * 2 * 21) + (4 * 2 * 5) + (-2 * 2 * 6) + (-8 * 2 * 7), + 0, + 0, + 0, + 0, + ], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 2 * 1], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = pyomo_nlp.evaluate_hessian_lag() def test_external_additional_constraints_vars(self): - self._test_external_additional_constraints_vars(ex_models.PressureDropTwoEqualitiesTwoOutputs(), False) - self._test_external_additional_constraints_vars(ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True) + self._test_external_additional_constraints_vars( + ex_models.PressureDropTwoEqualitiesTwoOutputs(), False + ) + self._test_external_additional_constraints_vars( + ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True + ) def _test_external_additional_constraints_vars(self, ex_model, hessian_support): m = pyo.ConcreteModel() - m.hin = pyo.Var(bounds=(0,None), initialize=10) - m.hout = pyo.Var(bounds=(0,None)) + m.hin = pyo.Var(bounds=(0, None), initialize=10) + m.hout = pyo.Var(bounds=(0, None)) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_model) - m.incon = pyo.Constraint(expr= 0 <= m.egb.inputs['Pin'] - 10*m.hin) - m.outcon = pyo.Constraint(expr= 0 == m.egb.outputs['Pout'] - 10*m.hout) + m.incon = pyo.Constraint(expr=0 <= m.egb.inputs['Pin'] - 10 * m.hin) + m.outcon = pyo.Constraint(expr=0 == m.egb.outputs['Pout'] - 10 * m.hout) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1318,7 +1965,7 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(30) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) self.assertEqual(9, pyomo_nlp.n_primals()) @@ -1327,33 +1974,63 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): if hessian_support: self.assertEqual(4, pyomo_nlp.nnz_hessian_lag()) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.primals_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() xlb = pyomo_nlp.primals_lb() comparison_xlb = np.asarray([50, 1, 1, 10, 20, 15, 30, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, xlb, x_order, comparison_xlb, comparison_x_order) + check_vectors_specific_order( + self, xlb, x_order, comparison_xlb, comparison_x_order + ) xub = pyomo_nlp.primals_ub() - comparison_xub = np.asarray([150, 5, 5, 90, 80, 85, 70, np.inf, np.inf], dtype=np.float64) - check_vectors_specific_order(self, xub, x_order, comparison_xub, comparison_x_order) + comparison_xub = np.asarray( + [150, 5, 5, 90, 80, 85, 70, np.inf, np.inf], dtype=np.float64 + ) + check_vectors_specific_order( + self, xub, x_order, comparison_xub, comparison_x_order + ) clb = pyomo_nlp.constraints_lb() comparison_clb = np.asarray([0, 0, 0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, clb, c_order, comparison_clb, comparison_c_order) + check_vectors_specific_order( + self, clb, c_order, comparison_clb, comparison_c_order + ) cub = pyomo_nlp.constraints_ub() comparison_cub = np.asarray([0, 0, 0, 0, np.inf, 0], dtype=np.float64) - check_vectors_specific_order(self, cub, c_order, comparison_cub, comparison_c_order) + check_vectors_specific_order( + self, cub, c_order, comparison_cub, comparison_c_order + ) xinit = pyomo_nlp.init_primals() - comparison_xinit = np.asarray([100, 2, 3, 80, 70, 75, 50, 10, 0], dtype=np.float64) - check_vectors_specific_order(self, xinit, x_order, comparison_xinit, comparison_x_order) + comparison_xinit = np.asarray( + [100, 2, 3, 80, 70, 75, 50, 10, 0], dtype=np.float64 + ) + check_vectors_specific_order( + self, xinit, x_order, comparison_xinit, comparison_x_order + ) duals_init = pyomo_nlp.init_duals() comparison_duals_init = np.asarray([0, 0, 0, 0, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, duals_init, c_order, comparison_duals_init, comparison_c_order) + check_vectors_specific_order( + self, duals_init, c_order, comparison_duals_init, comparison_c_order + ) self.assertEqual(9, len(pyomo_nlp.create_new_vector('primals'))) self.assertEqual(6, len(pyomo_nlp.create_new_vector('constraints'))) @@ -1361,15 +2038,21 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): pyomo_nlp.set_primals(np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float64)) x = pyomo_nlp.get_primals() - self.assertTrue(np.array_equal(x, np.asarray([1,2,3,4,5,6,7,8,9], dtype=np.float64))) + self.assertTrue( + np.array_equal(x, np.asarray([1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=np.float64)) + ) pyomo_nlp.set_primals(pyomo_nlp.init_primals()) pyomo_nlp.set_duals(np.asarray([42, 10, 11, 12, 13, 14], dtype=np.float64)) y = pyomo_nlp.get_duals() - self.assertTrue(np.array_equal(y, np.asarray([42, 10, 11, 12, 13, 14], dtype=np.float64))) + self.assertTrue( + np.array_equal(y, np.asarray([42, 10, 11, 12, 13, 14], dtype=np.float64)) + ) pyomo_nlp.set_duals(np.asarray([0, 0, 21, 5, 6, 7], dtype=np.float64)) y = pyomo_nlp.get_duals() - self.assertTrue(np.array_equal(y, np.asarray([0, 0, 21, 5, 6, 7], dtype=np.float64))) + self.assertTrue( + np.array_equal(y, np.asarray([0, 0, 21, 5, 6, 7], dtype=np.float64)) + ) fac = pyomo_nlp.get_obj_factor() self.assertEqual(fac, 1) @@ -1382,7 +2065,9 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): gradf = pyomo_nlp.evaluate_grad_objective() comparison_gradf = np.asarray([0, 0, 0, 0, 0, 0, 60, 0, 0], dtype=np.float64) - check_vectors_specific_order(self, gradf, x_order, comparison_gradf, comparison_x_order) + check_vectors_specific_order( + self, gradf, x_order, comparison_gradf, comparison_x_order + ) c = pyomo_nlp.evaluate_constraints() comparison_c = np.asarray([-2, 26, -13, -22, 0, 50], dtype=np.float64) check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) @@ -1391,51 +2076,108 @@ def _test_external_additional_constraints_vars(self, ex_model, hessian_support): check_vectors_specific_order(self, c, c_order, comparison_c, comparison_c_order) j = pyomo_nlp.evaluate_jacobian() - comparison_j = np.asarray([[-1, 9, 12, 1, 0, 0, 0, 0, 0], - [ 0, 18, 24, -1, 1, 0, 0, 0, 0], - [ 0, -9, -12, 1, 0, -1, 0, 0, 0], - [ 1, -36, -48, 0, 0, 0, -1, 0, 0], - [ 1, 0, 0, 0, 0, 0, 0, -10, 0], - [ 0, 0, 0, 0, 0, 0, 1, 0, -10]]) - - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) - - j = 2.0*j + comparison_j = np.asarray( + [ + [-1, 9, 12, 1, 0, 0, 0, 0, 0], + [0, 18, 24, -1, 1, 0, 0, 0, 0], + [0, -9, -12, 1, 0, -1, 0, 0, 0], + [1, -36, -48, 0, 0, 0, -1, 0, 0], + [1, 0, 0, 0, 0, 0, 0, -10, 0], + [0, 0, 0, 0, 0, 0, 1, 0, -10], + ] + ) + + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) + + j = 2.0 * j pyomo_nlp.evaluate_jacobian(out=j) - check_sparse_matrix_specific_order(self, j, c_order, x_order, comparison_j, comparison_c_order, comparison_x_order) + check_sparse_matrix_specific_order( + self, + j, + c_order, + x_order, + comparison_j, + comparison_c_order, + comparison_x_order, + ) if hessian_support: h = pyomo_nlp.evaluate_hessian_lag() - self.assertTrue(h.shape == (9,9)) - comparison_h = np.asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), 0, 0, 0, 0, 0, 0], - [0, (2*3*21) + (4*3*5) + (-2*3*6) + (-8*3*7), (2*2*21) + (4*2*5) + (-2*2*6) + (-8*2*7), 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 2*1, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0], - [0, 0, 0, 0, 0, 0, 0, 0, 0]], - dtype=np.float64) - check_sparse_matrix_specific_order(self, h, x_order, x_order, comparison_h, comparison_x_order, comparison_x_order) + self.assertTrue(h.shape == (9, 9)) + comparison_h = np.asarray( + [ + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [ + 0, + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + 0, + 0, + 0, + 0, + 0, + 0, + ], + [ + 0, + (2 * 3 * 21) + (4 * 3 * 5) + (-2 * 3 * 6) + (-8 * 3 * 7), + (2 * 2 * 21) + (4 * 2 * 5) + (-2 * 2 * 6) + (-8 * 2 * 7), + 0, + 0, + 0, + 0, + 0, + 0, + ], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 2 * 1, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 0, 0], + ], + dtype=np.float64, + ) + check_sparse_matrix_specific_order( + self, + h, + x_order, + x_order, + comparison_h, + comparison_x_order, + comparison_x_order, + ) else: with self.assertRaises(NotImplementedError): h = pyomo_nlp.evaluate_hessian_lag() - @unittest.skipIf(not cyipopt_available, - "CyIpopt needed to run tests with solve") + @unittest.skipIf(not cyipopt_available, "CyIpopt needed to run tests with solve") def test_external_greybox_solve(self): - self._test_external_greybox_solve(ex_models.PressureDropTwoEqualitiesTwoOutputs(), False) - self._test_external_greybox_solve(ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True) + self._test_external_greybox_solve( + ex_models.PressureDropTwoEqualitiesTwoOutputs(), False + ) + self._test_external_greybox_solve( + ex_models.PressureDropTwoEqualitiesTwoOutputsWithHessian(), True + ) def _test_external_greybox_solve(self, ex_model, hessian_support): m = pyo.ConcreteModel() - m.mu = pyo.Var(bounds=(0,None), initialize=1) + m.mu = pyo.Var(bounds=(0, None), initialize=1) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_model) - m.ccon = pyo.Constraint(expr = m.egb.inputs['c'] == 128/(3.14*1e-4)*m.mu*m.egb.inputs['F']) - m.pcon = pyo.Constraint(expr = m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) - m.pincon = pyo.Constraint(expr = m.egb.inputs['Pin'] == 100.0) + m.ccon = pyo.Constraint( + expr=m.egb.inputs['c'] == 128 / (3.14 * 1e-4) * m.mu * m.egb.inputs['F'] + ) + m.pcon = pyo.Constraint(expr=m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) + m.pincon = pyo.Constraint(expr=m.egb.inputs['Pin'] == 100.0) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1457,11 +2199,13 @@ def _test_external_greybox_solve(self, ex_model, hessian_support): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(10) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2 + (m.egb.inputs['F']-3)**2) + m.obj = pyo.Objective( + expr=(m.egb.outputs['Pout'] - 20) ** 2 + (m.egb.inputs['F'] - 3) ** 2 + ) solver = pyo.SolverFactory('cyipopt') if not hessian_support: - solver.config.options = {'hessian_approximation':'limited-memory'} + solver.config.options = {'hessian_approximation': 'limited-memory'} status = solver.solve(m, tee=False) self.assertAlmostEqual(pyo.value(m.egb.inputs['F']), 3.0, places=3) @@ -1475,12 +2219,12 @@ def _test_external_greybox_solve(self, ex_model, hessian_support): def create_model_two_equalities_two_outputs(self, external_model): m = pyo.ConcreteModel() - m.hin = pyo.Var(bounds=(0,None), initialize=10) - m.hout = pyo.Var(bounds=(0,None)) + m.hin = pyo.Var(bounds=(0, None), initialize=10) + m.hout = pyo.Var(bounds=(0, None)) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(external_model) - m.incon = pyo.Constraint(expr= 0 <= m.egb.inputs['Pin'] - 10*m.hin) - m.outcon = pyo.Constraint(expr= 0 == m.egb.outputs['Pout'] - 10*m.hout) + m.incon = pyo.Constraint(expr=0 <= m.egb.inputs['Pin'] - 10 * m.hin) + m.outcon = pyo.Constraint(expr=0 == m.egb.outputs['Pout'] - 10 * m.hout) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1505,8 +2249,10 @@ def create_model_two_equalities_two_outputs(self, external_model): return m def test_scaling_all_missing(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) fs = pyomo_nlp.get_obj_scaling() xs = pyomo_nlp.get_primals_scaling() @@ -1516,53 +2262,91 @@ def test_scaling_all_missing(self): self.assertIsNone(cs) def test_scaling_pyomo_model_only(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - #m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable - m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable - m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable - #m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable - m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable - m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable - m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable - #m.scaling_factor[m.hin] = 1.8 + # m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable + m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable + m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable + # m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable + m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable + m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable + m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable + # m.scaling_factor[m.hin] = 1.8 m.scaling_factor[m.hout] = 1.9 - #m.scaling_factor[m.incon] = 2.1 + # m.scaling_factor[m.incon] = 2.1 m.scaling_factor[m.outcon] = 2.2 pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.primals_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() fs = pyomo_nlp.get_obj_scaling() self.assertEqual(fs, 1.0) xs = pyomo_nlp.get_primals_scaling() - comparison_xs = np.asarray([1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64) - check_vectors_specific_order(self, xs, x_order, comparison_xs, comparison_x_order) + comparison_xs = np.asarray( + [1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64 + ) + check_vectors_specific_order( + self, xs, x_order, comparison_xs, comparison_x_order + ) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([1, 1, 1, 1, 1, 2.2], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) def test_scaling_greybox_only(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.primals_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() fs = pyomo_nlp.get_obj_scaling() @@ -1570,73 +2354,110 @@ def test_scaling_greybox_only(self): xs = pyomo_nlp.get_primals_scaling() comparison_xs = np.asarray([1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, xs, x_order, comparison_xs, comparison_x_order) + check_vectors_specific_order( + self, xs, x_order, comparison_xs, comparison_x_order + ) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 4.1, 4.2, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) - - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleEqualities()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) + + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleEqualities() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 1, 1, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) - - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleOutputs()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) + + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleOutputs() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([1, 1, 4.1, 4.2, 1, 1], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) def test_scaling_pyomo_model_and_greybox(self): - m = self.create_model_two_equalities_two_outputs(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth()) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2) + m = self.create_model_two_equalities_two_outputs( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth() + ) + m.obj = pyo.Objective(expr=(m.egb.outputs['Pout'] - 20) ** 2) m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - #m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable - m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable - m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable - #m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable - m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable - m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable - m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable - #m.scaling_factor[m.hin] = 1.8 + # m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable + m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable + m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable + # m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable + m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable + m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable + m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable + # m.scaling_factor[m.hin] = 1.8 m.scaling_factor[m.hout] = 1.9 - #m.scaling_factor[m.incon] = 2.1 + # m.scaling_factor[m.incon] = 2.1 m.scaling_factor[m.outcon] = 2.2 pyomo_nlp = PyomoNLPWithGreyBoxBlocks(m) - comparison_x_order = ['egb.inputs[Pin]', 'egb.inputs[c]', 'egb.inputs[F]', - 'egb.inputs[P1]', 'egb.inputs[P3]', - 'egb.outputs[P2]', 'egb.outputs[Pout]', - 'hin', 'hout'] + comparison_x_order = [ + 'egb.inputs[Pin]', + 'egb.inputs[c]', + 'egb.inputs[F]', + 'egb.inputs[P1]', + 'egb.inputs[P3]', + 'egb.outputs[P2]', + 'egb.outputs[Pout]', + 'hin', + 'hout', + ] x_order = pyomo_nlp.primals_names() - comparison_c_order = ['egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]', 'incon', 'outcon'] + comparison_c_order = [ + 'egb.pdrop1', + 'egb.pdrop3', + 'egb.output_constraints[P2]', + 'egb.output_constraints[Pout]', + 'incon', + 'outcon', + ] c_order = pyomo_nlp.constraint_names() fs = pyomo_nlp.get_obj_scaling() self.assertEqual(fs, 1.0) xs = pyomo_nlp.get_primals_scaling() - comparison_xs = np.asarray([1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64) - check_vectors_specific_order(self, xs, x_order, comparison_xs, comparison_x_order) + comparison_xs = np.asarray( + [1.1, 1.2, 1.3, 1.0, 1.5, 1.6, 1.7, 1.0, 1.9], dtype=np.float64 + ) + check_vectors_specific_order( + self, xs, x_order, comparison_xs, comparison_x_order + ) cs = pyomo_nlp.get_constraints_scaling() comparison_cs = np.asarray([3.1, 3.2, 4.1, 4.2, 1, 2.2], dtype=np.float64) - check_vectors_specific_order(self, cs, c_order, comparison_cs, comparison_c_order) + check_vectors_specific_order( + self, cs, c_order, comparison_cs, comparison_c_order + ) - @unittest.skipIf(not cyipopt_available, - "CyIpopt needed to run tests with solve") + @unittest.skipIf(not cyipopt_available, "CyIpopt needed to run tests with solve") def test_external_greybox_solve_scaling(self): m = pyo.ConcreteModel() - m.mu = pyo.Var(bounds=(0,None), initialize=1) + m.mu = pyo.Var(bounds=(0, None), initialize=1) m.egb = ExternalGreyBoxBlock() - m.egb.set_external_model(ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth()) - m.ccon = pyo.Constraint(expr = m.egb.inputs['c'] == 128/(3.14*1e-4)*m.mu*m.egb.inputs['F']) - m.pcon = pyo.Constraint(expr = m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) - m.pincon = pyo.Constraint(expr = m.egb.inputs['Pin'] == 100.0) + m.egb.set_external_model( + ex_models.PressureDropTwoEqualitiesTwoOutputsScaleBoth() + ) + m.ccon = pyo.Constraint( + expr=m.egb.inputs['c'] == 128 / (3.14 * 1e-4) * m.mu * m.egb.inputs['F'] + ) + m.pcon = pyo.Constraint(expr=m.egb.inputs['Pin'] - m.egb.outputs['Pout'] <= 72) + m.pincon = pyo.Constraint(expr=m.egb.inputs['Pin'] == 100.0) m.egb.inputs['Pin'].value = 100 m.egb.inputs['Pin'].setlb(50) m.egb.inputs['Pin'].setub(150) @@ -1658,26 +2479,30 @@ def test_external_greybox_solve_scaling(self): m.egb.outputs['Pout'].value = 50 m.egb.outputs['Pout'].setlb(10) m.egb.outputs['Pout'].setub(70) - m.obj = pyo.Objective(expr=(m.egb.outputs['Pout']-20)**2 + (m.egb.inputs['F']-3)**2) + m.obj = pyo.Objective( + expr=(m.egb.outputs['Pout'] - 20) ** 2 + (m.egb.inputs['F'] - 3) ** 2 + ) m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) - m.scaling_factor[m.obj] = 0.1 # scale the objective - m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable - m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable - m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable - #m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable - m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable - m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable - m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable + m.scaling_factor[m.obj] = 0.1 # scale the objective + m.scaling_factor[m.egb.inputs['Pin']] = 1.1 # scale the variable + m.scaling_factor[m.egb.inputs['c']] = 1.2 # scale the variable + m.scaling_factor[m.egb.inputs['F']] = 1.3 # scale the variable + # m.scaling_factor[m.egb.inputs['P1']] = 1.4 # scale the variable + m.scaling_factor[m.egb.inputs['P3']] = 1.5 # scale the variable + m.scaling_factor[m.egb.outputs['P2']] = 1.6 # scale the variable + m.scaling_factor[m.egb.outputs['Pout']] = 1.7 # scale the variable m.scaling_factor[m.mu] = 1.9 m.scaling_factor[m.pincon] = 2.2 solver = pyo.SolverFactory('cyipopt') - solver.config.options = {'hessian_approximation':'limited-memory', - 'nlp_scaling_method': 'user-scaling', - 'output_file': '_cyipopt-external-greybox-scaling.log', - 'file_print_level':10, - 'max_iter': 0} + solver.config.options = { + 'hessian_approximation': 'limited-memory', + 'nlp_scaling_method': 'user-scaling', + 'output_file': '_cyipopt-external-greybox-scaling.log', + 'file_print_level': 10, + 'max_iter': 0, + } status = solver.solve(m, tee=False) with open('_cyipopt-external-greybox-scaling.log', 'r') as fd: @@ -1685,7 +2510,9 @@ def test_external_greybox_solve_scaling(self): os.remove('_cyipopt-external-greybox-scaling.log') self.assertIn('nlp_scaling_method = user-scaling', solver_trace) - self.assertIn('output_file = _cyipopt-external-greybox-scaling.log', solver_trace) + self.assertIn( + 'output_file = _cyipopt-external-greybox-scaling.log', solver_trace + ) self.assertIn('objective scaling factor = 0.1', solver_trace) self.assertIn('x scaling provided', solver_trace) self.assertIn('c scaling provided', solver_trace) @@ -1693,33 +2520,62 @@ def test_external_greybox_solve_scaling(self): # x_order: ['egb.inputs[F]', 'egb.inputs[P1]', 'egb.inputs[P3]', 'egb.inputs[Pin]', 'egb.inputs[c]', 'egb.outputs[P2]', 'egb.outputs[Pout]', 'mu'] # c_order: ['ccon', 'pcon', 'pincon', 'egb.pdrop1', 'egb.pdrop3', 'egb.output_constraints[P2]', 'egb.output_constraints[Pout]'] self.assertIn('DenseVector "x scaling vector" with 8 elements:', solver_trace) - self.assertIn('x scaling vector[ 1]= 1.3000000000000000e+00', solver_trace) # F - self.assertIn('x scaling vector[ 8]= 1.8999999999999999e+00', solver_trace) # mu - self.assertIn('x scaling vector[ 7]= 1.7000000000000000e+00', solver_trace) # Pout - self.assertIn('x scaling vector[ 4]= 1.1000000000000001e+00', solver_trace) # Pin - self.assertIn('x scaling vector[ 5]= 1.2000000000000000e+00', solver_trace) # c - self.assertIn('x scaling vector[ 2]= 1.0000000000000000e+00', solver_trace) # P1 - self.assertIn('x scaling vector[ 3]= 1.5000000000000000e+00', solver_trace) # P3 - self.assertIn('x scaling vector[ 6]= 1.6000000000000001e+00', solver_trace) # P2 + self.assertIn( + 'x scaling vector[ 1]= 1.3000000000000000e+00', solver_trace + ) # F + self.assertIn( + 'x scaling vector[ 8]= 1.8999999999999999e+00', solver_trace + ) # mu + self.assertIn( + 'x scaling vector[ 7]= 1.7000000000000000e+00', solver_trace + ) # Pout + self.assertIn( + 'x scaling vector[ 4]= 1.1000000000000001e+00', solver_trace + ) # Pin + self.assertIn( + 'x scaling vector[ 5]= 1.2000000000000000e+00', solver_trace + ) # c + self.assertIn( + 'x scaling vector[ 2]= 1.0000000000000000e+00', solver_trace + ) # P1 + self.assertIn( + 'x scaling vector[ 3]= 1.5000000000000000e+00', solver_trace + ) # P3 + self.assertIn( + 'x scaling vector[ 6]= 1.6000000000000001e+00', solver_trace + ) # P2 self.assertIn('DenseVector "c scaling vector" with 6 elements:', solver_trace) - self.assertIn('c scaling vector[ 1]= 1.0000000000000000e+00', solver_trace) # ccon - self.assertIn('c scaling vector[ 2]= 2.2000000000000002e+00', solver_trace) # pincon - self.assertIn('c scaling vector[ 3]= 3.1000000000000001e+00', solver_trace) # pdrop1 - self.assertIn('c scaling vector[ 4]= 3.2000000000000002e+00', solver_trace) # pdrop3 - self.assertIn('c scaling vector[ 5]= 4.0999999999999996e+00', solver_trace) # P2_con - self.assertIn('c scaling vector[ 6]= 4.2000000000000002e+00', solver_trace) # Pout_con + self.assertIn( + 'c scaling vector[ 1]= 1.0000000000000000e+00', solver_trace + ) # ccon + self.assertIn( + 'c scaling vector[ 2]= 2.2000000000000002e+00', solver_trace + ) # pincon + self.assertIn( + 'c scaling vector[ 3]= 3.1000000000000001e+00', solver_trace + ) # pdrop1 + self.assertIn( + 'c scaling vector[ 4]= 3.2000000000000002e+00', solver_trace + ) # pdrop3 + self.assertIn( + 'c scaling vector[ 5]= 4.0999999999999996e+00', solver_trace + ) # P2_con + self.assertIn( + 'c scaling vector[ 6]= 4.2000000000000002e+00', solver_trace + ) # Pout_con self.assertIn('DenseVector "d scaling vector" with 1 elements:', solver_trace) - self.assertIn('d scaling vector[ 1]= 1.0000000000000000e+00', solver_trace) # pcon + self.assertIn( + 'd scaling vector[ 1]= 1.0000000000000000e+00', solver_trace + ) # pcon - @unittest.skipIf(not cyipopt_available, - "CyIpopt needed to run tests with solve") + @unittest.skipIf(not cyipopt_available, "CyIpopt needed to run tests with solve") def test_duals_after_solve(self): m = pyo.ConcreteModel() m.p = pyo.Var(initialize=1) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_models.OneOutput()) - m.con = pyo.Constraint(expr=4*m.p-2*m.egb.outputs['o'] == 0) - m.obj = pyo.Objective(expr=10*m.p**2) + m.con = pyo.Constraint(expr=4 * m.p - 2 * m.egb.outputs['o'] == 0) + m.obj = pyo.Objective(expr=10 * m.p**2) # we want to check dual information so we need the suffixes m.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT) @@ -1733,28 +2589,40 @@ def test_duals_after_solve(self): self.assertAlmostEqual(pyo.value(m.egb.inputs['u']), 4.0, places=3) self.assertAlmostEqual(pyo.value(m.egb.outputs['o']), 20.0, places=3) self.assertAlmostEqual(pyo.value(m.dual[m.con]), 50.0, places=3) - self.assertAlmostEqual(m.dual[m.egb]['egb.output_constraints[o]'], -100.0, places=3) - self.assertAlmostEqual(pyo.value(m.ipopt_zL_out[m.egb.inputs['u']]), 500.0, places=3) - self.assertAlmostEqual(pyo.value(m.ipopt_zU_out[m.egb.inputs['u']]), 0.0, places=3) + self.assertAlmostEqual( + m.dual[m.egb]['egb.output_constraints[o]'], -100.0, places=3 + ) + self.assertAlmostEqual( + pyo.value(m.ipopt_zL_out[m.egb.inputs['u']]), 500.0, places=3 + ) + self.assertAlmostEqual( + pyo.value(m.ipopt_zU_out[m.egb.inputs['u']]), 0.0, places=3 + ) del m.obj - m.obj = pyo.Objective(expr=-10*m.p**2) + m.obj = pyo.Objective(expr=-10 * m.p**2) status = solver.solve(m, tee=False) self.assertAlmostEqual(pyo.value(m.p), 25.0, places=3) self.assertAlmostEqual(pyo.value(m.egb.inputs['u']), 10.0, places=3) self.assertAlmostEqual(pyo.value(m.egb.outputs['o']), 50.0, places=3) self.assertAlmostEqual(pyo.value(m.dual[m.con]), -125.0, places=3) - self.assertAlmostEqual(m.dual[m.egb]['egb.output_constraints[o]'], 250.0, places=3) - self.assertAlmostEqual(pyo.value(m.ipopt_zL_out[m.egb.inputs['u']]), 0.0, places=3) - self.assertAlmostEqual(pyo.value(m.ipopt_zU_out[m.egb.inputs['u']]), -1250.0, places=3) + self.assertAlmostEqual( + m.dual[m.egb]['egb.output_constraints[o]'], 250.0, places=3 + ) + self.assertAlmostEqual( + pyo.value(m.ipopt_zL_out[m.egb.inputs['u']]), 0.0, places=3 + ) + self.assertAlmostEqual( + pyo.value(m.ipopt_zU_out[m.egb.inputs['u']]), -1250.0, places=3 + ) m = pyo.ConcreteModel() m.p = pyo.Var(initialize=1) m.egb = ExternalGreyBoxBlock() m.egb.set_external_model(ex_models.OneOutputOneEquality()) - m.con = pyo.Constraint(expr=4*m.p-2*m.egb.outputs['o'] == 0) - m.obj = pyo.Objective(expr=10*m.p**2) + m.con = pyo.Constraint(expr=4 * m.p - 2 * m.egb.outputs['o'] == 0) + m.obj = pyo.Objective(expr=10 * m.p**2) # we want to check dual information so we need the suffixes m.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT) @@ -1766,8 +2634,11 @@ def test_duals_after_solve(self): self.assertAlmostEqual(pyo.value(m.egb.inputs['u']), 1.0, places=3) self.assertAlmostEqual(pyo.value(m.egb.outputs['o']), 5.0, places=3) self.assertAlmostEqual(pyo.value(m.dual[m.con]), 12.5, places=3) - self.assertAlmostEqual(m.dual[m.egb]['egb.output_constraints[o]'], -25.0, places=3) + self.assertAlmostEqual( + m.dual[m.egb]['egb.output_constraints[o]'], -25.0, places=3 + ) self.assertAlmostEqual(m.dual[m.egb]['egb.u2_con'], 62.5, places=3) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_utils.py b/pyomo/contrib/pynumero/interfaces/tests/test_utils.py index e4704a26deb..dafe89ca2c7 100644 --- a/pyomo/contrib/pynumero/interfaces/tests/test_utils.py +++ b/pyomo/contrib/pynumero/interfaces/tests/test_utils.py @@ -11,21 +11,27 @@ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) + if not (numpy_available and scipy_available): raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") import pyomo.contrib.pynumero.interfaces.utils as utils + + class TestCondensedSparseSummation(unittest.TestCase): def test_condensed_sparse_summation(self): data = [1.0, 0.0] row = [1, 2] col = [2, 2] - A = scipy.sparse.coo_matrix( (data, (row,col)), shape=(3,3) ) + A = scipy.sparse.coo_matrix((data, (row, col)), shape=(3, 3)) data = [3.0, 0.0] - B = scipy.sparse.coo_matrix( (data, (row,col)), shape=(3,3) ) + B = scipy.sparse.coo_matrix((data, (row, col)), shape=(3, 3)) # By default, scipy will remove structural nonzeros that # have zero values @@ -34,8 +40,8 @@ def test_condensed_sparse_summation(self): # Our CondensedSparseSummation should not remove any # structural nonzeros - sparse_sum = utils.CondensedSparseSummation([A,B]) - C = sparse_sum.sum([A,B]) + sparse_sum = utils.CondensedSparseSummation([A, B]) + C = sparse_sum.sum([A, B]) expected_data = np.asarray([4.0, 0.0], dtype=np.float64) expected_row = np.asarray([1, 2], dtype=np.int64) expected_col = np.asarray([2, 2], dtype=np.int64) @@ -44,14 +50,14 @@ def test_condensed_sparse_summation(self): self.assertTrue(np.array_equal(expected_col, C.col)) B.data[1] = 5.0 - C = sparse_sum.sum([A,B]) + C = sparse_sum.sum([A, B]) expected_data = np.asarray([4.0, 5.0], dtype=np.float64) self.assertTrue(np.array_equal(expected_data, C.data)) self.assertTrue(np.array_equal(expected_row, C.row)) self.assertTrue(np.array_equal(expected_col, C.col)) B.data[1] = 0.0 - C = sparse_sum.sum([A,B]) + C = sparse_sum.sum([A, B]) expected_data = np.asarray([4.0, 0.0], dtype=np.float64) self.assertTrue(np.array_equal(expected_data, C.data)) self.assertTrue(np.array_equal(expected_row, C.row)) @@ -61,17 +67,17 @@ def test_repeated_row_col(self): data = [1.0, 0.0, 2.0] row = [1, 2, 1] col = [2, 2, 2] - A = scipy.sparse.coo_matrix( (data, (row,col)), shape=(3,3) ) + A = scipy.sparse.coo_matrix((data, (row, col)), shape=(3, 3)) data = [3.0, 0.0] row = [1, 2] col = [2, 2] - B = scipy.sparse.coo_matrix( (data, (row,col)), shape=(3,3) ) + B = scipy.sparse.coo_matrix((data, (row, col)), shape=(3, 3)) # Our CondensedSparseSummation should not remove any # structural nonzeros - sparse_sum = utils.CondensedSparseSummation([A,B]) - C = sparse_sum.sum([A,B]) + sparse_sum = utils.CondensedSparseSummation([A, B]) + C = sparse_sum.sum([A, B]) expected_data = np.asarray([6.0, 0.0], dtype=np.float64) expected_row = np.asarray([1, 2], dtype=np.int64) expected_col = np.asarray([2, 2], dtype=np.int64) @@ -82,5 +88,3 @@ def test_repeated_row_col(self): if __name__ == '__main__': TestCondensedSparseSummation().test_condensed_sparse_summation() - - diff --git a/pyomo/contrib/pynumero/interfaces/utils.py b/pyomo/contrib/pynumero/interfaces/utils.py index 80b7f604409..c7bd04eb002 100644 --- a/pyomo/contrib/pynumero/interfaces/utils.py +++ b/pyomo/contrib/pynumero/interfaces/utils.py @@ -12,12 +12,15 @@ from scipy.sparse import coo_matrix from pyomo.contrib.pynumero.sparse import BlockVector, BlockMatrix from pyomo.common.dependencies import attempt_import -mpi_block_vector, mpi_block_vector_available = attempt_import('pyomo.contrib.pynumero.sparse.mpi_block_vector') + +mpi_block_vector, mpi_block_vector_available = attempt_import( + 'pyomo.contrib.pynumero.sparse.mpi_block_vector' +) def build_bounds_mask(vector): """ - Creates masks for converting from the full vector of bounds that + Creates masks for converting from the full vector of bounds that may contain -np.inf or np.inf to a vector of bounds that are finite only. """ @@ -27,7 +30,7 @@ def build_bounds_mask(vector): def build_compression_matrix(compression_mask): """ Return a sparse matrix CM of ones such that - compressed_vector = CM*full_vector based on the + compressed_vector = CM*full_vector based on the compression mask Parameters @@ -54,12 +57,18 @@ def build_compression_matrix(compression_mask): return coo_matrix((data, (rows, cols)), shape=(nnz, len(compression_mask))) elif isinstance(compression_mask, mpi_block_vector.MPIBlockVector): from pyomo.contrib.pynumero.sparse.mpi_block_matrix import MPIBlockMatrix + n = compression_mask.nblocks rank_ownership = np.ones((n, n), dtype=np.int64) * -1 for i in range(n): rank_ownership[i, i] = compression_mask.rank_ownership[i] - res = MPIBlockMatrix(nbrows=n, nbcols=n, rank_ownership=rank_ownership, mpi_comm=compression_mask.mpi_comm, - assert_correct_owners=False) + res = MPIBlockMatrix( + nbrows=n, + nbcols=n, + rank_ownership=rank_ownership, + mpi_comm=compression_mask.mpi_comm, + assert_correct_owners=False, + ) for ndx in compression_mask.owned_blocks: block = compression_mask.get_block(ndx) sub_matrix = build_compression_matrix(block) @@ -70,18 +79,19 @@ def build_compression_matrix(compression_mask): def build_compression_mask_for_finite_values(vector): """ Creates masks for converting from the full vector of - values to the vector that contains only the finite values. This is + values to the vector that contains only the finite values. This is typically used to convert a vector of bounds (that may contain np.inf and -np.inf) to only the bounds that are finite. """ full_finite_mask = np.isfinite(vector) return full_finite_mask + # TODO: Is this needed anywhere? -#def build_expansion_map_for_finite_values(vector): +# def build_expansion_map_for_finite_values(vector): # """ # Creates a map from the compressed vector to the full -# vector based on the locations of finite values only. This is +# vector based on the locations of finite values only. This is # typically used to map a vector of bounds (that is compressed to only # contain the finite values) to a full vector (that may contain np.inf # and -np.inf). @@ -89,7 +99,8 @@ def build_compression_mask_for_finite_values(vector): # full_finite_mask = np.isfinite(vector) # finite_full_map = full_finite_mask.nonzero()[0] # return finite_full_map - + + def full_to_compressed(full_array, compression_mask, out=None): if out is not None: np.compress(compression_mask, full_array, out=out) @@ -97,6 +108,7 @@ def full_to_compressed(full_array, compression_mask, out=None): else: return np.compress(compression_mask, full_array) + def compressed_to_full(compressed_array, compression_mask, out=None, default=None): if out is None: ret = np.empty(len(compression_mask)) @@ -110,6 +122,7 @@ def compressed_to_full(compressed_array, compression_mask, out=None, default=Non return ret + def make_lower_triangular_full(lower_triangular_matrix): ''' This function takes a symmetric matrix that only has entries in the @@ -117,12 +130,19 @@ def make_lower_triangular_full(lower_triangular_matrix): ''' mask = lower_triangular_matrix.row != lower_triangular_matrix.col - row = np.concatenate((lower_triangular_matrix.row, lower_triangular_matrix.col[mask])) - col = np.concatenate((lower_triangular_matrix.col, lower_triangular_matrix.row[mask])) - data = np.concatenate((lower_triangular_matrix.data, lower_triangular_matrix.data[mask])) + row = np.concatenate( + (lower_triangular_matrix.row, lower_triangular_matrix.col[mask]) + ) + col = np.concatenate( + (lower_triangular_matrix.col, lower_triangular_matrix.row[mask]) + ) + data = np.concatenate( + (lower_triangular_matrix.data, lower_triangular_matrix.data[mask]) + ) return coo_matrix((data, (row, col)), shape=lower_triangular_matrix.shape) + class CondensedSparseSummation(object): def __init__(self, list_of_matrices): """ @@ -145,11 +165,11 @@ def _build_maps(self, list_of_matrices): # get the list of all unique nonzeros across the matrices nz_tuples = set() for m in list_of_matrices: - nz_tuples.update(zip(m.row,m.col)) + nz_tuples.update(zip(m.row, m.col)) nz_tuples = sorted(nz_tuples) self._nz_tuples = nz_tuples self._row, self._col = list(zip(*nz_tuples)) - row_col_to_nz_map = {t:i for i,t in enumerate(nz_tuples)} + row_col_to_nz_map = {t: i for i, t in enumerate(nz_tuples)} self._shape = None self._maps = list() @@ -160,7 +180,9 @@ def _build_maps(self, list_of_matrices): for i in range(nnz): map_col[i] = i map_row[i] = row_col_to_nz_map[(m.row[i], m.col[i])] - mp = coo_matrix( (np.ones(nnz), (map_row, map_col)), shape=(len(row_col_to_nz_map),nnz) ) + mp = coo_matrix( + (np.ones(nnz), (map_row, map_col)), shape=(len(row_col_to_nz_map), nnz) + ) self._maps.append(mp) if self._shape is None: self._shape = m.shape @@ -170,7 +192,9 @@ def _build_maps(self, list_of_matrices): def sum(self, list_of_matrices): data = np.zeros(len(self._row)) assert len(self._maps) == len(list_of_matrices) - for i,mp in enumerate(self._maps): + for i, mp in enumerate(self._maps): data += mp.dot(list_of_matrices[i].data) - ret = coo_matrix((data, (np.copy(self._row), np.copy(self._col))), shape=self._shape) + ret = coo_matrix( + (data, (np.copy(self._row), np.copy(self._col))), shape=self._shape + ) return ret diff --git a/pyomo/contrib/pynumero/intrinsic.py b/pyomo/contrib/pynumero/intrinsic.py index 12389776ce8..5a2dccb64e7 100644 --- a/pyomo/contrib/pynumero/intrinsic.py +++ b/pyomo/contrib/pynumero/intrinsic.py @@ -11,11 +11,12 @@ from pyomo.common.dependencies import numpy as np, attempt_import -block_vector = attempt_import('pyomo.contrib.pynumero.sparse.block_vector', - defer_check=True)[0] +block_vector = attempt_import( + 'pyomo.contrib.pynumero.sparse.block_vector', defer_check=True +)[0] -def norm(x, ord=None): +def norm(x, ord=None): f = np.linalg.norm if isinstance(x, np.ndarray): return f(x, ord=ord) @@ -25,6 +26,7 @@ def norm(x, ord=None): else: raise NotImplementedError() + def allclose(x1, x2, rtol, atol): # this needs to be implemented for parallel x1_flat = x1.flatten() @@ -37,18 +39,21 @@ def concatenate(arrays): def where(*args): - condition = args[0] if len(args) == 2: raise ValueError('either both or neither of x and y should be given') if len(args) > 3: - raise TypeError('where() takes at most 3 arguments ({} given)'.format(len(args))) + raise TypeError( + 'where() takes at most 3 arguments ({} given)'.format(len(args)) + ) n_args = len(args) if isinstance(condition, block_vector.BlockVector): if n_args == 1: - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(condition.nblocks) for i in range(condition.nblocks): _args = [condition.get_block(i)] @@ -57,52 +62,100 @@ def where(*args): else: x = args[1] y = args[2] - if isinstance(x, block_vector.BlockVector) and isinstance(y, block_vector.BlockVector): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert condition.nblocks == x.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand' - assert x.nblocks == y.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand' + if isinstance(x, block_vector.BlockVector) and isinstance( + y, block_vector.BlockVector + ): + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not x.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not y.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + condition.nblocks == x.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + x.nblocks == y.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) for i in range(condition.nblocks): _args = [condition.get_block(i), x.get_block(i), y.get_block(i)] res.set_block(i, where(*_args)) return res elif isinstance(x, np.ndarray) and isinstance(y, block_vector.BlockVector): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert condition.nblocks == y.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand' - assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand' - assert x.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not y.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + condition.nblocks == y.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + x.size == condition.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + x.size == y.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) accum = 0 for i in range(condition.nblocks): nelements = condition._brow_lengths[i] - _args = [condition.get_block(i), x[accum: accum + nelements], y.get_block(i)] + _args = [ + condition.get_block(i), + x[accum : accum + nelements], + y.get_block(i), + ] res.set_block(i, where(*_args)) accum += nelements return res elif isinstance(x, block_vector.BlockVector) and isinstance(y, np.ndarray): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert condition.nblocks == x.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand' - assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand' - assert x.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not x.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + condition.nblocks == x.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + x.size == condition.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + x.size == y.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) accum = 0 for i in range(condition.nblocks): nelements = condition._brow_lengths[i] - _args = [condition.get_block(i), x.get_block(i), y[accum: accum + nelements]] + _args = [ + condition.get_block(i), + x.get_block(i), + y[accum : accum + nelements], + ] res.set_block(i, where(*_args)) accum += nelements return res elif np.isscalar(x) and isinstance(y, block_vector.BlockVector): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert condition.nblocks == y.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand' - assert condition.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not y.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + condition.nblocks == y.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + condition.size == y.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) accum = 0 for i in range(condition.nblocks): @@ -113,10 +166,18 @@ def where(*args): return res elif isinstance(x, block_vector.BlockVector) and np.isscalar(y): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert condition.nblocks == x.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand' - assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not x.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + condition.nblocks == x.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + x.size == condition.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) accum = 0 for i in range(condition.nblocks): @@ -127,44 +188,64 @@ def where(*args): return res elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand' - assert x.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + x.size == condition.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + x.size == y.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) accum = 0 for i in range(condition.nblocks): nelements = condition._brow_lengths[i] - _args = [condition.get_block(i), x[accum: accum + nelements], y[accum: accum + nelements]] + _args = [ + condition.get_block(i), + x[accum : accum + nelements], + y[accum : accum + nelements], + ] res.set_block(i, where(*_args)) accum += nelements return res elif isinstance(x, np.ndarray) and np.isscalar(y): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + x.size == condition.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) accum = 0 for i in range(condition.nblocks): nelements = condition._brow_lengths[i] - _args = [condition.get_block(i), x[accum: accum + nelements], y] + _args = [condition.get_block(i), x[accum : accum + nelements], y] res.set_block(i, where(*_args)) accum += nelements return res elif np.isscalar(x) and isinstance(y, np.ndarray): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert condition.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + condition.size == y.size + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(condition.nblocks) accum = 0 for i in range(condition.nblocks): nelements = condition._brow_lengths[i] - _args = [condition.get_block(i), x, y[accum: accum + nelements]] + _args = [condition.get_block(i), x, y[accum : accum + nelements]] res.set_block(i, where(*_args)) accum += nelements return res elif np.isscalar(x) and np.isscalar(y): - assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not condition.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(condition.nblocks) for i in range(condition.nblocks): _args = [condition.get_block(i), x, y] @@ -177,7 +258,6 @@ def where(*args): if n_args == 1: return np.where(*args) else: - x = args[1] y = args[2] if isinstance(x, block_vector.BlockVector): @@ -191,43 +271,60 @@ def where(*args): def isin(element, test_elements, assume_unique=False, invert=False): - - if isinstance(element, block_vector.BlockVector) and isinstance(test_elements, block_vector.BlockVector): - assert not element.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not test_elements.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert element.nblocks == test_elements.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand' + if isinstance(element, block_vector.BlockVector) and isinstance( + test_elements, block_vector.BlockVector + ): + assert ( + not element.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not test_elements.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + element.nblocks == test_elements.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' res = block_vector.BlockVector(element.nblocks) for i in range(element.nblocks): - res.set_block(i, isin(element.get_block(i), - test_elements.get_block(i), - assume_unique=assume_unique, - invert=invert)) + res.set_block( + i, + isin( + element.get_block(i), + test_elements.get_block(i), + assume_unique=assume_unique, + invert=invert, + ), + ) return res - elif isinstance(element, block_vector.BlockVector) and isinstance(test_elements, np.ndarray): - - assert not element.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + elif isinstance(element, block_vector.BlockVector) and isinstance( + test_elements, np.ndarray + ): + assert ( + not element.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(element.nblocks) for i in range(element.nblocks): - res.set_block(i, isin(element.get_block(i), - test_elements, - assume_unique=assume_unique, - invert=invert)) + res.set_block( + i, + isin( + element.get_block(i), + test_elements, + assume_unique=assume_unique, + invert=invert, + ), + ) return res elif isinstance(element, np.ndarray) and isinstance(test_elements, np.ndarray): - - return np.isin(element, - test_elements, - assume_unique=assume_unique, - invert=invert) + return np.isin( + element, test_elements, assume_unique=assume_unique, invert=invert + ) else: raise NotImplementedError() def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): - if return_indices: raise NotImplementedError() @@ -236,83 +333,122 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False): elif isinstance(ar1, np.ndarray) or isinstance(ar1, block_vector.BlockVector): x = ar1 else: - raise RuntimeError('ar1 type not recognized. Needs to be np.ndarray or BlockVector') + raise RuntimeError( + 'ar1 type not recognized. Needs to be np.ndarray or BlockVector' + ) if isinstance(ar2, tuple) and len(ar2) == 1: y = ar2[0] elif isinstance(ar2, np.ndarray) or isinstance(ar1, block_vector.BlockVector): y = ar2 else: - raise RuntimeError('ar2 type not recognized. Needs to be np.ndarray or BlockVector') - - if isinstance(x, block_vector.BlockVector) and isinstance(y, block_vector.BlockVector): + raise RuntimeError( + 'ar2 type not recognized. Needs to be np.ndarray or BlockVector' + ) + if isinstance(x, block_vector.BlockVector) and isinstance( + y, block_vector.BlockVector + ): assert x.nblocks == y.nblocks, "Number of blocks does not match" - assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not x.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not y.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(x.nblocks) for i in range(x.nblocks): - res.set_block(i, intersect1d(x.get_block(i), y.get_block(i), assume_unique=assume_unique)) + res.set_block( + i, + intersect1d( + x.get_block(i), y.get_block(i), assume_unique=assume_unique + ), + ) return res elif isinstance(x, block_vector.BlockVector) and isinstance(y, np.ndarray): - assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not x.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(x.nblocks) for i in range(x.nblocks): - res.set_block(i, np.intersect1d(x.get_block(i), y, assume_unique=assume_unique)) + res.set_block( + i, np.intersect1d(x.get_block(i), y, assume_unique=assume_unique) + ) return res elif isinstance(x, np.ndarray) and isinstance(y, block_vector.BlockVector): - - assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not y.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(y.nblocks) for i in range(y.nblocks): - res.set_block(i, np.intersect1d(x, y.get_block(i), assume_unique=assume_unique)) + res.set_block( + i, np.intersect1d(x, y.get_block(i), assume_unique=assume_unique) + ) return res else: return np.intersect1d(x, y, assume_unique=assume_unique) def setdiff1d(ar1, ar2, assume_unique=False): - if isinstance(ar1, tuple) and len(ar1) == 1: x = ar1[0] elif isinstance(ar1, np.ndarray) or isinstance(ar1, block_vector.BlockVector): x = ar1 else: - raise RuntimeError('ar1 type not recognized. Needs to be np.ndarray or BlockVector') + raise RuntimeError( + 'ar1 type not recognized. Needs to be np.ndarray or BlockVector' + ) if isinstance(ar2, tuple) and len(ar2) == 1: y = ar2[0] elif isinstance(ar2, np.ndarray) or isinstance(ar1, block_vector.BlockVector): y = ar2 else: - raise RuntimeError('ar2 type not recognized. Needs to be np.ndarray or BlockVector') - - if isinstance(x, block_vector.BlockVector) and isinstance(y, block_vector.BlockVector): + raise RuntimeError( + 'ar2 type not recognized. Needs to be np.ndarray or BlockVector' + ) + if isinstance(x, block_vector.BlockVector) and isinstance( + y, block_vector.BlockVector + ): assert x.nblocks == y.nblocks, "Number of blocks does not match" - assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' - assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not x.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not y.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(x.nblocks) for i in range(x.nblocks): - res.set_block(i, setdiff1d(x.get_block(i), y.get_block(i), assume_unique=assume_unique)) + res.set_block( + i, + setdiff1d(x.get_block(i), y.get_block(i), assume_unique=assume_unique), + ) return res elif isinstance(x, block_vector.BlockVector) and isinstance(y, np.ndarray): - assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not x.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(x.nblocks) for i in range(x.nblocks): - res.set_block(i, np.setdiff1d(x.get_block(i), y, assume_unique=assume_unique)) + res.set_block( + i, np.setdiff1d(x.get_block(i), y, assume_unique=assume_unique) + ) return res elif isinstance(x, np.ndarray) and isinstance(y, block_vector.BlockVector): - - assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector' + assert ( + not y.has_none + ), 'Operation not allowed with None blocks. Specify all blocks in BlockVector' res = block_vector.BlockVector(y.nblocks) for i in range(y.nblocks): - res.set_block(i, np.setdiff1d(x, y.get_block(i), assume_unique=assume_unique)) + res.set_block( + i, np.setdiff1d(x, y.get_block(i), assume_unique=assume_unique) + ) return res else: return np.setdiff1d(x, y, assume_unique=assume_unique) diff --git a/pyomo/contrib/pynumero/linalg/base.py b/pyomo/contrib/pynumero/linalg/base.py index 9311fba02a5..2b4eeaef451 100644 --- a/pyomo/contrib/pynumero/linalg/base.py +++ b/pyomo/contrib/pynumero/linalg/base.py @@ -47,9 +47,7 @@ def do_numeric_factorization( @abstractmethod def do_back_solve( - self, - rhs: Union[np.ndarray, BlockVector], - raise_on_error: bool = True, + self, rhs: Union[np.ndarray, BlockVector], raise_on_error: bool = True ) -> Tuple[Optional[Union[np.ndarray, BlockVector]], LinearSolverResults]: pass @@ -59,7 +57,6 @@ def solve( rhs: Union[np.ndarray, BlockVector], raise_on_error: bool = True, ) -> Tuple[Optional[Union[np.ndarray, BlockVector]], LinearSolverResults]: - symbolic_res = self.do_symbolic_factorization( matrix, raise_on_error=raise_on_error ) diff --git a/pyomo/contrib/pynumero/linalg/ma27.py b/pyomo/contrib/pynumero/linalg/ma27.py index 29a0032a01d..21c137e837b 100644 --- a/pyomo/contrib/pynumero/linalg/ma27.py +++ b/pyomo/contrib/pynumero/linalg/ma27.py @@ -9,16 +9,14 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ from pyomo.common.fileutils import find_library -from pyomo.contrib.pynumero.linalg.utils import (validate_index, - validate_value, _NotSet) +from pyomo.contrib.pynumero.linalg.utils import validate_index, validate_value, _NotSet import numpy.ctypeslib as npct import numpy as np -import ctypes +import ctypes import os class MA27Interface(object): - libname = _NotSet @classmethod @@ -29,13 +27,9 @@ def available(cls): return False return os.path.exists(cls.libname) - def __init__(self, - iw_factor=None, - a_factor=None): - + def __init__(self, iw_factor=None, a_factor=None): if not MA27Interface.available(): - raise RuntimeError( - 'Could not find pynumero_MA27 library.') + raise RuntimeError('Could not find pynumero_MA27 library.') self.iw_factor = iw_factor self.a_factor = a_factor @@ -48,34 +42,48 @@ def __init__(self, array_1d_int = npct.ndpointer(dtype=np.intc, ndim=1, flags='CONTIGUOUS') # Declare arg and res types of functions: - + # Do I need to specify that this function takes no argument? self.lib.new_MA27_struct.restype = ctypes.c_void_p self.lib.free_MA27_struct.argtypes = [ctypes.c_void_p] - + self.lib.set_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int] # Do I need to specify that this function returns nothing? self.lib.get_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.get_icntl.restype = ctypes.c_int - + self.lib.set_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double] self.lib.get_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.get_cntl.restype = ctypes.c_double - + self.lib.get_info.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.get_info.restype = ctypes.c_int - + self.lib.alloc_iw_a.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.alloc_iw_b.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.alloc_a.argtypes = [ctypes.c_void_p, ctypes.c_int] - self.lib.do_symbolic_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, array_1d_int, array_1d_int] - self.lib.do_numeric_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, array_1d_int, array_1d_int, - array_1d_double] - self.lib.do_backsolve.argtypes = [ctypes.c_void_p, ctypes.c_int, array_1d_double] + self.lib.do_symbolic_factorization.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + array_1d_int, + array_1d_int, + ] + self.lib.do_numeric_factorization.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + array_1d_int, + array_1d_int, + array_1d_double, + ] + self.lib.do_backsolve.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + array_1d_double, + ] self.icntl_len = 30 self.cntl_len = 5 @@ -92,24 +100,24 @@ def set_icntl(self, i, val): # NOTE: Use the FORTRAN indexing (same as documentation) to # set and access info/cntl arrays from Python, whereas C # functions use C indexing. Maybe this is too confusing. - self.lib.set_icntl(self._ma27, i-1, val) + self.lib.set_icntl(self._ma27, i - 1, val) def get_icntl(self, i): validate_index(i, self.icntl_len, 'ICNTL') - return self.lib.get_icntl(self._ma27, i-1) + return self.lib.get_icntl(self._ma27, i - 1) def set_cntl(self, i, val): validate_index(i, self.cntl_len, 'CNTL') validate_value(val, float, 'CNTL') - self.lib.set_cntl(self._ma27, i-1, val) + self.lib.set_cntl(self._ma27, i - 1, val) def get_cntl(self, i): validate_index(i, self.cntl_len, 'CNTL') - return self.lib.get_cntl(self._ma27, i-1) + return self.lib.get_cntl(self._ma27, i - 1) def get_info(self, i): validate_index(i, self.info_len, 'INFO') - return self.lib.get_info(self._ma27, i-1) + return self.lib.get_info(self._ma27, i - 1) def do_symbolic_factorization(self, dim, irn, icn): irn = irn.astype(np.intc, casting='safe', copy=True) @@ -119,12 +127,10 @@ def do_symbolic_factorization(self, dim, irn, icn): assert ne == icn.size, 'Dimension mismatch in row and column arrays' if self.iw_factor is not None: - min_size = 2*ne + 3*dim + 1 - self.lib.alloc_iw_a(self._ma27, - int(self.iw_factor*min_size)) + min_size = 2 * ne + 3 * dim + 1 + self.lib.alloc_iw_a(self._ma27, int(self.iw_factor * min_size)) - self.lib.do_symbolic_factorization(self._ma27, - dim, ne, irn, icn) + self.lib.do_symbolic_factorization(self._ma27, dim, ne, irn, icn) return self.get_info(1) def do_numeric_factorization(self, irn, icn, dim, entries): @@ -134,28 +140,27 @@ def do_numeric_factorization(self, irn, icn, dim, entries): ent = entries.astype(np.double, casting='safe', copy=True) ne = ent.size - assert dim == self._dim_cached,\ - ('Dimension mismatch between symbolic and numeric factorization.' - 'Please re-run symbolic factorization with the correct ' - 'dimension.') + assert dim == self._dim_cached, ( + 'Dimension mismatch between symbolic and numeric factorization.' + 'Please re-run symbolic factorization with the correct ' + 'dimension.' + ) if self.a_factor is not None: min_size = self.get_info(5) - self.lib.alloc_a(self._ma27, - int(self.a_factor*min_size)) + self.lib.alloc_a(self._ma27, int(self.a_factor * min_size)) if self.iw_factor is not None: min_size = self.get_info(6) - self.lib.alloc_iw_b(self._ma27, - int(self.iw_factor*min_size)) + self.lib.alloc_iw_b(self._ma27, int(self.iw_factor * min_size)) - self.lib.do_numeric_factorization(self._ma27, dim, ne, - irn, icn, ent) + self.lib.do_numeric_factorization(self._ma27, dim, ne, irn, icn, ent) return self.get_info(1) def do_backsolve(self, rhs, copy=True): rhs = rhs.astype(np.double, casting='safe', copy=copy) rhs_dim = rhs.size - assert rhs_dim == self._dim_cached,\ - 'Dimension mismatch in right hand side. Please correct.' + assert ( + rhs_dim == self._dim_cached + ), 'Dimension mismatch in right hand side. Please correct.' self.lib.do_backsolve(self._ma27, rhs_dim, rhs) diff --git a/pyomo/contrib/pynumero/linalg/ma27_interface.py b/pyomo/contrib/pynumero/linalg/ma27_interface.py index d848d9062e4..1ae02fe3290 100644 --- a/pyomo/contrib/pynumero/linalg/ma27_interface.py +++ b/pyomo/contrib/pynumero/linalg/ma27_interface.py @@ -27,7 +27,6 @@ def __init__( def do_symbolic_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: - if not isspmatrix_coo(matrix): matrix = matrix.tocoo() matrix = tril(matrix) @@ -59,7 +58,6 @@ def do_symbolic_factorization( def do_numeric_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: - if not isspmatrix_coo(matrix): matrix = matrix.tocoo() matrix = tril(matrix) @@ -96,7 +94,6 @@ def do_numeric_factorization( def do_back_solve( self, rhs: Union[np.ndarray, BlockVector], raise_on_error: bool = True ) -> Tuple[Optional[Union[np.ndarray, BlockVector]], LinearSolverResults]: - if isinstance(rhs, BlockVector): _rhs = rhs.flatten() result = _rhs diff --git a/pyomo/contrib/pynumero/linalg/ma57.py b/pyomo/contrib/pynumero/linalg/ma57.py index c12658d7cf9..1be6c8abcf7 100644 --- a/pyomo/contrib/pynumero/linalg/ma57.py +++ b/pyomo/contrib/pynumero/linalg/ma57.py @@ -10,15 +10,14 @@ # ___________________________________________________________________________ from pyomo.common.fileutils import find_library -from pyomo.contrib.pynumero.linalg.utils import (validate_index, - validate_value, _NotSet) +from pyomo.contrib.pynumero.linalg.utils import validate_index, validate_value, _NotSet import numpy.ctypeslib as npct import numpy as np -import ctypes +import ctypes import os -class MA57Interface(object): +class MA57Interface(object): libname = _NotSet @classmethod @@ -29,14 +28,9 @@ def available(cls): return False return os.path.exists(cls.libname) - def __init__(self, - work_factor=None, - fact_factor=None, - ifact_factor=None): - + def __init__(self, work_factor=None, fact_factor=None, ifact_factor=None): if not MA57Interface.available(): - raise RuntimeError( - 'Could not find pynumero_MA57 library.') + raise RuntimeError('Could not find pynumero_MA57 library.') self.work_factor = work_factor self.fact_factor = fact_factor @@ -49,25 +43,25 @@ def __init__(self, array_1d_int = npct.ndpointer(dtype=np.intc, ndim=1, flags='CONTIGUOUS') # Declare arg and res types of functions: - + # Do I need to specify that this function takes no argument? self.lib.new_MA57_struct.restype = ctypes.c_void_p # return type is pointer to MA57_struct. Why do I use c_void_p here? self.lib.free_MA57_struct.argtypes = [ctypes.c_void_p] - + self.lib.set_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int] # Do I need to specify that this function returns nothing? self.lib.get_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.get_icntl.restype = ctypes.c_int - + self.lib.set_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double] self.lib.get_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.get_cntl.restype = ctypes.c_double - + self.lib.get_info.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.get_info.restype = ctypes.c_int - + self.lib.get_rinfo.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.get_rinfo.restype = ctypes.c_double @@ -79,17 +73,42 @@ def __init__(self, self.lib.set_nrhs.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.set_lrhs.argtypes = [ctypes.c_void_p, ctypes.c_int] self.lib.set_job.argtypes = [ctypes.c_void_p, ctypes.c_int] - - self.lib.do_symbolic_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, array_1d_int, array_1d_int] - self.lib.do_numeric_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, array_1d_double] - self.lib.do_backsolve.argtypes = [ctypes.c_void_p, ctypes.c_int, array_2d_double] - self.lib.do_iterative_refinement.argtypes = [ctypes.c_void_p, ctypes.c_int, - ctypes.c_int, array_1d_double, array_1d_int, array_1d_int, - array_1d_double, array_1d_double, array_1d_double] - self.lib.do_reallocation.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double, - ctypes.c_int] + + self.lib.do_symbolic_factorization.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + array_1d_int, + array_1d_int, + ] + self.lib.do_numeric_factorization.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + array_1d_double, + ] + self.lib.do_backsolve.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + array_2d_double, + ] + self.lib.do_iterative_refinement.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_int, + array_1d_double, + array_1d_int, + array_1d_int, + array_1d_double, + array_1d_double, + array_1d_double, + ] + self.lib.do_reallocation.argtypes = [ + ctypes.c_void_p, + ctypes.c_int, + ctypes.c_double, + ctypes.c_int, + ] self.icntl_len = 20 self.cntl_len = 5 @@ -107,28 +126,28 @@ def set_icntl(self, i, val): # NOTE: Use the FORTRAN indexing (same as documentation) to # set and access info/cntl arrays from Python, whereas C # functions use C indexing. Maybe this is too confusing. - self.lib.set_icntl(self._ma57, i-1, val) + self.lib.set_icntl(self._ma57, i - 1, val) def get_icntl(self, i): validate_index(i, self.icntl_len, 'ICNTL') - return self.lib.get_icntl(self._ma57, i-1) + return self.lib.get_icntl(self._ma57, i - 1) def set_cntl(self, i, val): validate_index(i, self.cntl_len, 'CNTL') validate_value(val, float, 'CNTL') - self.lib.set_cntl(self._ma57, i-1, val) + self.lib.set_cntl(self._ma57, i - 1, val) def get_cntl(self, i): validate_index(i, self.cntl_len, 'CNTL') - return self.lib.get_cntl(self._ma57, i-1) + return self.lib.get_cntl(self._ma57, i - 1) def get_info(self, i): validate_index(i, self.info_len, 'INFO') - return self.lib.get_info(self._ma57, i-1) + return self.lib.get_info(self._ma57, i - 1) def get_rinfo(self, i): validate_index(i, self.rinfo_len, 'RINFO') - return self.lib.get_info(self._ma57, i-1) + return self.lib.get_info(self._ma57, i - 1) def do_symbolic_factorization(self, dim, irn, jcn): irn = irn.astype(np.intc, casting='safe', copy=True) @@ -138,31 +157,29 @@ def do_symbolic_factorization(self, dim, irn, jcn): self.ne_cached = ne self.dim_cached = dim assert ne == jcn.size, 'Dimension mismatch in row and column arrays' - self.lib.do_symbolic_factorization(self._ma57, - dim, ne, irn, jcn) + self.lib.do_symbolic_factorization(self._ma57, dim, ne, irn, jcn) return self.get_info(1) def do_numeric_factorization(self, dim, entries): entries = entries.astype(np.float64, casting='safe', copy=True) ne = entries.size - assert ne == self.ne_cached,\ - ('Wrong number of entries in matrix. Please re-run symbolic' - 'factorization with correct nonzero coordinates.') - assert dim == self.dim_cached,\ - ('Dimension mismatch between symbolic and numeric factorization.' - 'Please re-run symbolic factorization with the correct ' - 'dimension.') + assert ne == self.ne_cached, ( + 'Wrong number of entries in matrix. Please re-run symbolic' + 'factorization with correct nonzero coordinates.' + ) + assert dim == self.dim_cached, ( + 'Dimension mismatch between symbolic and numeric factorization.' + 'Please re-run symbolic factorization with the correct ' + 'dimension.' + ) if self.fact_factor is not None: min_size = self.get_info(9) - self.lib.alloc_fact(self._ma57, - int(self.fact_factor*min_size)) + self.lib.alloc_fact(self._ma57, int(self.fact_factor * min_size)) if self.ifact_factor is not None: min_size = self.get_info(10) - self.lib.alloc_ifact(self._ma57, - int(self.ifact_factor*min_size)) + self.lib.alloc_ifact(self._ma57, int(self.ifact_factor * min_size)) - self.lib.do_numeric_factorization(self._ma57, - dim, ne, entries) + self.lib.do_numeric_factorization(self._ma57, dim, ne, entries) return self.get_info(1) def do_backsolve(self, rhs, copy=True): @@ -175,13 +192,13 @@ def do_backsolve(self, rhs, copy=True): elif len(shape) == 2: # FIXME raise NotImplementedError( - 'Funcionality for solving a matrix of right hand ' - 'is buggy and needs fixing.') + 'Functionality for solving a matrix of right hand ' + 'is buggy and needs fixing.' + ) rhs_dim = rhs.shape[0] nrhs = rhs.shape[1] else: - raise ValueError( - 'Right hand side must be a one or two-dimensional array') + raise ValueError('Right hand side must be a one or two-dimensional array') # This does not necessarily need to be true; each RHS could have length # larger than N (for some reason). In the C interface, however, I assume # that LRHS == N @@ -189,16 +206,14 @@ def do_backsolve(self, rhs, copy=True): # TODO: Option to specify a JOB other than 1. By my understanding, # different JOBs allow partial factorizations to be performed. # Currently not supported - unclear if it should be. - + if nrhs > 1: self.lib.set_nrhs(self._ma57, nrhs) - + if self.work_factor is not None: - self.lib.alloc_work(self._ma57, - int(self.work_factor*nrhs*rhs_dim)) + self.lib.alloc_work(self._ma57, int(self.work_factor * nrhs * rhs_dim)) - self.lib.do_backsolve(self._ma57, - rhs_dim, rhs) + self.lib.do_backsolve(self._ma57, rhs_dim, rhs) if len(shape) == 1: # If the user input rhs as a 1D array, return the solution diff --git a/pyomo/contrib/pynumero/linalg/ma57_interface.py b/pyomo/contrib/pynumero/linalg/ma57_interface.py index 3610a97199a..ef80ac653cf 100644 --- a/pyomo/contrib/pynumero/linalg/ma57_interface.py +++ b/pyomo/contrib/pynumero/linalg/ma57_interface.py @@ -36,7 +36,6 @@ def __init__( def do_symbolic_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: - if not isspmatrix_coo(matrix): matrix = matrix.tocoo() matrix = tril(matrix) @@ -70,7 +69,6 @@ def do_symbolic_factorization( def do_numeric_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: - if not isspmatrix_coo(matrix): matrix = matrix.tocoo() matrix = tril(matrix) @@ -107,7 +105,6 @@ def do_numeric_factorization( def do_back_solve( self, rhs: Union[np.ndarray, BlockVector], raise_on_error: bool = True ) -> Tuple[Optional[Union[np.ndarray, BlockVector]], LinearSolverResults]: - if isinstance(rhs, BlockVector): _rhs = rhs.flatten() result = _rhs diff --git a/pyomo/contrib/pynumero/linalg/mumps_interface.py b/pyomo/contrib/pynumero/linalg/mumps_interface.py index 1e712d4ebee..95aca114f2f 100644 --- a/pyomo/contrib/pynumero/linalg/mumps_interface.py +++ b/pyomo/contrib/pynumero/linalg/mumps_interface.py @@ -14,22 +14,25 @@ from typing import Union, Tuple, Optional from pyomo.common.dependencies import attempt_import + mumps, mumps_available = attempt_import( - 'mumps', error_message="Error importing mumps. PyNumero's " + 'mumps', + error_message="Error importing mumps. PyNumero's " "mumps_interface requires pymumps; install it with, e.g., " - "'conda install -c conda-forge pymumps'") + "'conda install -c conda-forge pymumps'", +) from pyomo.contrib.pynumero.sparse import BlockVector, BlockMatrix class MumpsCentralizedAssembledLinearSolver(DirectLinearSolverInterface): """ - A thin wrapper around pymumps which uses the centralized assembled matrix format. + A thin wrapper around pymumps which uses the centralized assembled matrix format. In other words ICNTL(5) = 0 and ICNTL(18) = 0. Solve matrix * x = rhs for x. - See the Mumps documentation for descriptions of the parameters. The section numbers + See the Mumps documentation for descriptions of the parameters. The section numbers listed below refer to the Mumps documentation for version 5.2.1. Parameters @@ -45,9 +48,11 @@ class MumpsCentralizedAssembledLinearSolver(DirectLinearSolverInterface): icntl_options: dict, optional See section 6.1 """ + def __init__(self, sym=0, par=1, comm=None, cntl_options=None, icntl_options=None): self._nnz = None self._dim = None + self._mumps = None self._mumps = mumps.DMumpsContext(sym=sym, par=par, comm=comm) self._mumps.set_silent() self._icntl_options = dict() @@ -74,18 +79,18 @@ def _init(self): self.set_cntl(k, v) for k, v in self._icntl_options.items(): self.set_icntl(k, v) - + def do_symbolic_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: """ - Perform Mumps analysis. + Perform Mumps analysis. Parameters ---------- matrix: scipy.sparse.spmatrix or pyomo.contrib.pynumero.sparse.BlockMatrix This matrix must have the same nonzero structure as the matrix passed into - do_numeric_factorization. The matrix will be converted to coo format if it + do_numeric_factorization. The matrix will be converted to coo format if it is not already in coo format. If sym is 1 or 2, the matrix will be converted to lower triangular. """ @@ -101,10 +106,11 @@ def do_symbolic_factorization( self._nnz = matrix.nnz try: self._mumps.set_shape(nrows) - self._mumps.set_centralized_assembled_rows_cols(matrix.row + 1, matrix.col + 1) + self._mumps.set_centralized_assembled_rows_cols( + matrix.row + 1, matrix.col + 1 + ) self._mumps.run(job=1) - self._prev_allocation = max(self.get_infog(16), - self.get_icntl(23)) + self._prev_allocation = max(self.get_infog(16), self.get_icntl(23)) # INFOG(16) is the Mumps estimate for memory usage; ICNTL(23) # is the override used in increase_memory_allocation. Both are # already rounded to MB, so neither should every be negative. @@ -128,14 +134,14 @@ def do_numeric_factorization( self, matrix: Union[spmatrix, BlockMatrix], raise_on_error: bool = True ) -> LinearSolverResults: """ - Perform Mumps factorization. Note that do_symbolic_factorization should be called - before do_numeric_factorization. + Perform Mumps factorization. Note that do_symbolic_factorization should be called + before do_numeric_factorization. Parameters ---------- matrix: scipy.sparse.spmatrix or pyomo.contrib.pynumero.sparse.BlockMatrix This matrix must have the same nonzero structure as the matrix passed into - do_symbolic_factorization. The matrix will be converted to coo format if it + do_symbolic_factorization. The matrix will be converted to coo format if it is not already in coo format. If sym is 1 or 2, the matrix will be converted to lower triangular. """ @@ -149,9 +155,13 @@ def do_numeric_factorization( if nrows != ncols: raise ValueError('matrix is not square') if self._dim != nrows: - raise ValueError('The shape of the matrix changed between symbolic and numeric factorization') + raise ValueError( + 'The shape of the matrix changed between symbolic and numeric factorization' + ) if self._nnz != matrix.nnz: - raise ValueError('The number of nonzeros changed between symbolic and numeric factorization') + raise ValueError( + 'The number of nonzeros changed between symbolic and numeric factorization' + ) try: self._mumps.set_centralized_assembled_values(matrix.data) self._mumps.run(job=2) @@ -174,22 +184,21 @@ def do_numeric_factorization( return res def do_back_solve( - self, rhs: Union[np.ndarray, BlockVector], - raise_on_error: bool = True + self, rhs: Union[np.ndarray, BlockVector], raise_on_error: bool = True ) -> Tuple[Optional[Union[np.ndarray, BlockVector]], LinearSolverResults]: """ - Perform back solve with Mumps. Note that both do_symbolic_factorization and - do_numeric_factorization should be called before do_back_solve. + Perform back solve with Mumps. Note that both do_symbolic_factorization and + do_numeric_factorization should be called before do_back_solve. Parameters ---------- rhs: numpy.ndarray or pyomo.contrib.pynumero.sparse.BlockVector The right hand side in matrix * x = rhs. - + Returns ------- result: numpy.ndarray or pyomo.contrib.pynumero.sparse.BlockVector - The x in matrix * x = rhs. If rhs is a BlockVector, then, result + The x in matrix * x = rhs. If rhs is a BlockVector, then, result will be a BlockVector with the same block structure as rhs. """ if isinstance(rhs, BlockVector): @@ -205,7 +214,7 @@ def do_back_solve( _result = rhs.copy_structure() _result.copyfrom(result) result = _result - + return result, LinearSolverResults(LinearSolverStatus.successful) def increase_memory_allocation(self, factor): @@ -213,7 +222,7 @@ def increase_memory_allocation(self, factor): if self._prev_allocation == 0: new_allocation = 1 else: - new_allocation = int(factor*self._prev_allocation) + new_allocation = int(factor * self._prev_allocation) # Here I set the memory allocation directly instead of increasing # the "percent-increase-from-predicted" parameter ICNTL(14) self.set_icntl(23, new_allocation) @@ -221,7 +230,8 @@ def increase_memory_allocation(self, factor): return new_allocation def __del__(self): - self._mumps.destroy() + if getattr(self, '_mumps', None) is not None: + self._mumps.destroy() def set_icntl(self, key, value): self._icntl_options[key] = value diff --git a/pyomo/contrib/pynumero/linalg/tests/test_ma27.py b/pyomo/contrib/pynumero/linalg/tests/test_ma27.py index 748bcfbb27c..5a02871306a 100644 --- a/pyomo/contrib/pynumero/linalg/tests/test_ma27.py +++ b/pyomo/contrib/pynumero/linalg/tests/test_ma27.py @@ -10,6 +10,7 @@ # ___________________________________________________________________________ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import numpy as np, numpy_available + if not numpy_available: raise unittest.SkipTest('pynumero MA27 tests require numpy') import ctypes @@ -18,19 +19,18 @@ @unittest.skipIf(not MA27Interface.available(), reason='MA27 not available') class TestMA27Interface(unittest.TestCase): - def test_get_cntl(self): ma27 = MA27Interface() self.assertEqual(ma27.get_icntl(1), 6) - self.assertAlmostEqual(ma27.get_cntl(1), 1e-1) # Numerical pivot threshold - self.assertAlmostEqual(ma27.get_cntl(3), 0.0) # Null pivot threshold + self.assertAlmostEqual(ma27.get_cntl(1), 1e-1) # Numerical pivot threshold + self.assertAlmostEqual(ma27.get_cntl(3), 0.0) # Null pivot threshold def test_set_icntl(self): ma27 = MA27Interface() - ma27.set_icntl(5, 4) # Set output printing to max verbosity - ma27.set_icntl(8, 1) # Keep factors when we run out of space - # (so MA27ED can be used) + ma27.set_icntl(5, 4) # Set output printing to max verbosity + ma27.set_icntl(8, 1) # Keep factors when we run out of space + # (so MA27ED can be used) icntl5 = ma27.get_icntl(5) icntl8 = ma27.get_icntl(8) self.assertEqual(icntl5, 4) @@ -55,21 +55,21 @@ def test_do_symbolic_factorization(self): n = 5 ne = 7 - irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) - icn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = np.array([1, 1, 2, 2, 3, 3, 5], dtype=np.intc) + icn = np.array([1, 2, 3, 5, 3, 4, 5], dtype=np.intc) # These arrays, copied out of HSL docs, contain Fortran indices. # Interfaces accept C indices as this is what I typically expect. irn = irn - 1 icn = icn - 1 - bad_icn = np.array([1,2,3,5,3,4], dtype=np.intc) + bad_icn = np.array([1, 2, 3, 5, 3, 4], dtype=np.intc) # ^No need to update these indices ma27.do_symbolic_factorization(n, irn, icn) self.assertEqual(ma27.get_info(1), 0) - self.assertEqual(ma27.get_info(5), 14) # Min required num. integer words - self.assertEqual(ma27.get_info(6), 20) # Min required num. real words + self.assertEqual(ma27.get_info(5), 14) # Min required num. integer words + self.assertEqual(ma27.get_info(6), 20) # Min required num. real words with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): ma27.do_symbolic_factorization(n, irn, bad_icn) @@ -79,22 +79,22 @@ def test_do_numeric_factorization(self): n = 5 ne = 7 - irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) - icn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = np.array([1, 1, 2, 2, 3, 3, 5], dtype=np.intc) + icn = np.array([1, 2, 3, 5, 3, 4, 5], dtype=np.intc) irn = irn - 1 icn = icn - 1 - ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double) + ent = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0], dtype=np.double) ma27.do_symbolic_factorization(n, irn, icn) status = ma27.do_numeric_factorization(irn, icn, n, ent) self.assertEqual(status, 0) - expected_ent = [2.,3.,4.,6.,1.,5.,1.,] + expected_ent = [2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0] for i in range(ne): self.assertAlmostEqual(ent[i], expected_ent[i]) - - self.assertEqual(ma27.get_info(15), 2) # 2 negative eigenvalues - self.assertEqual(ma27.get_info(14), 1) # 1 2x2 pivot + + self.assertEqual(ma27.get_info(15), 2) # 2 negative eigenvalues + self.assertEqual(ma27.get_info(14), 1) # 1 2x2 pivot # Check that we can successfully perform another numeric factorization # with same symbolic factorization @@ -104,17 +104,17 @@ def test_do_numeric_factorization(self): self.assertEqual(status, 0) with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): - ma27.do_numeric_factorization(irn, icn, n+1, ent) + ma27.do_numeric_factorization(irn, icn, n + 1, ent) - # Check that we can successfully perform another symbolic and + # Check that we can successfully perform another symbolic and # numeric factorization with the same ma27 struct # # n is still 5, ne has changed to 8. - irn = np.array([1,1,2,2,3,3,5,1], dtype=np.intc) - icn = np.array([1,2,3,5,3,4,5,5], dtype=np.intc) + irn = np.array([1, 1, 2, 2, 3, 3, 5, 1], dtype=np.intc) + icn = np.array([1, 2, 3, 5, 3, 4, 5, 5], dtype=np.intc) irn = irn - 1 icn = icn - 1 - ent = np.array([2.,3.,4.,6.,1.,5.,1.,3.], dtype=np.double) + ent = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0, 3.0], dtype=np.double) status = ma27.do_symbolic_factorization(n, irn, icn) self.assertEqual(status, 0) status = ma27.do_numeric_factorization(irn, icn, n, ent) @@ -126,33 +126,33 @@ def test_do_backsolve(self): n = 5 ne = 7 - irn = np.array([1, 1, 2, 2, 3, 3, 5], dtype=np.intc) - icn = np.array([1, 2, 3, 5, 3, 4, 5], dtype=np.intc) - ent = np.array([2., 3., 4., 6., 1., 5., 1.], dtype=np.double) + irn = np.array([1, 1, 2, 2, 3, 3, 5], dtype=np.intc) + icn = np.array([1, 2, 3, 5, 3, 4, 5], dtype=np.intc) + ent = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0], dtype=np.double) irn = irn - 1 icn = icn - 1 - rhs = np.array([8.,45.,31.,15.,17.], dtype=np.double) + rhs = np.array([8.0, 45.0, 31.0, 15.0, 17.0], dtype=np.double) status = ma27.do_symbolic_factorization(n, irn, icn) status = ma27.do_numeric_factorization(irn, icn, n, ent) sol = ma27.do_backsolve(rhs) - expected_sol = [1,2,3,4,5] - old_rhs = np.array([8.,45.,31.,15.,17.]) + expected_sol = [1, 2, 3, 4, 5] + old_rhs = np.array([8.0, 45.0, 31.0, 15.0, 17.0]) for i in range(n): self.assertAlmostEqual(sol[i], expected_sol[i]) self.assertEqual(old_rhs[i], rhs[i]) # Check that we can perform a numeric factorization with different ordering - irn_mod = np.array([1, 2, 2, 1, 3, 3, 5], dtype=np.intc) - icn_mod = np.array([2, 3, 5, 1, 3, 4, 5], dtype=np.intc) - ent_mod = np.array([3., 4., 6., 2., 1., 5., 1.], dtype=np.double) + irn_mod = np.array([1, 2, 2, 1, 3, 3, 5], dtype=np.intc) + icn_mod = np.array([2, 3, 5, 1, 3, 4, 5], dtype=np.intc) + ent_mod = np.array([3.0, 4.0, 6.0, 2.0, 1.0, 5.0, 1.0], dtype=np.double) irn_mod -= 1 icn_mod -= 1 status = ma27.do_numeric_factorization(irn_mod, icn_mod, n, ent_mod) sol = ma27.do_backsolve(rhs) self.assertTrue(np.allclose(sol, np.array(expected_sol))) - # Check that we can perform a numeric factorization with differnt lengths + # Check that we can perform a numeric factorization with different lengths # due to extra zero entries irn_mod = irn_mod[1:] icn_mod = icn_mod[1:] diff --git a/pyomo/contrib/pynumero/linalg/tests/test_ma57.py b/pyomo/contrib/pynumero/linalg/tests/test_ma57.py index d0826f9a2ef..86dbbd3ca50 100644 --- a/pyomo/contrib/pynumero/linalg/tests/test_ma57.py +++ b/pyomo/contrib/pynumero/linalg/tests/test_ma57.py @@ -11,6 +11,7 @@ import ctypes import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import numpy as np, numpy_available + if not numpy_available: raise unittest.SkipTest('pynumero MA27 tests require numpy') from pyomo.contrib.pynumero.linalg.ma57 import MA57Interface @@ -18,20 +19,19 @@ @unittest.skipIf(not MA57Interface.available(), reason='MA57 not available') class TestMA57Interface(unittest.TestCase): - def test_get_cntl(self): ma57 = MA57Interface() self.assertEqual(ma57.get_icntl(1), 6) self.assertEqual(ma57.get_icntl(7), 1) - self.assertAlmostEqual(ma57.get_cntl(1), 1e-2) # Numerical pivot threshold - self.assertAlmostEqual(ma57.get_cntl(2), 1e-20) # Null pivot threshold + self.assertAlmostEqual(ma57.get_cntl(1), 1e-2) # Numerical pivot threshold + self.assertAlmostEqual(ma57.get_cntl(2), 1e-20) # Null pivot threshold def test_set_icntl(self): ma57 = MA57Interface() - ma57.set_icntl(5, 4) # Set output printing to max verbosity - ma57.set_icntl(8, 1) # Keep factors when we run out of space - # (so MA57ED can be used) + ma57.set_icntl(5, 4) # Set output printing to max verbosity + ma57.set_icntl(8, 1) # Keep factors when we run out of space + # (so MA57ED can be used) icntl5 = ma57.get_icntl(5) icntl8 = ma57.get_icntl(8) self.assertEqual(icntl5, 4) @@ -56,22 +56,22 @@ def test_do_symbolic_factorization(self): n = 5 ne = 7 - irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) - jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc) - # Copied these Fortran-style indices from HSL docs. + irn = np.array([1, 1, 2, 2, 3, 3, 5], dtype=np.intc) + jcn = np.array([1, 2, 3, 5, 3, 4, 5], dtype=np.intc) + # Copied these Fortran-style indices from HSL docs. # Interface expects C-style indices, as is typical in Python. irn = irn - 1 jcn = jcn - 1 - bad_jcn = np.array([1,2,3,5,3,4], dtype=np.intc) + bad_jcn = np.array([1, 2, 3, 5, 3, 4], dtype=np.intc) ma57.do_symbolic_factorization(n, irn, jcn) self.assertEqual(ma57.get_info(1), 0) self.assertEqual(ma57.get_info(4), 0) - self.assertEqual(ma57.get_info(9), 48) # Min required length of FACT - self.assertEqual(ma57.get_info(10), 53) # Min required length of IFACT - self.assertEqual(ma57.get_info(14), 0) # Should not yet be set + self.assertEqual(ma57.get_info(9), 48) # Min required length of FACT + self.assertEqual(ma57.get_info(10), 53) # Min required length of IFACT + self.assertEqual(ma57.get_info(14), 0) # Should not yet be set with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): ma57.do_symbolic_factorization(n, irn, bad_jcn) @@ -81,11 +81,11 @@ def test_do_numeric_factorization(self): n = 5 ne = 7 - irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) - jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = np.array([1, 1, 2, 2, 3, 3, 5], dtype=np.intc) + jcn = np.array([1, 2, 3, 5, 3, 4, 5], dtype=np.intc) irn = irn - 1 jcn = jcn - 1 - ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double) + ent = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0], dtype=np.double) ma57.do_symbolic_factorization(n, irn, jcn) ma57.fact_factor = 1.5 ma57.ifact_factor = 1.5 @@ -94,29 +94,29 @@ def test_do_numeric_factorization(self): status = ma57.do_numeric_factorization(n, ent) self.assertEqual(status, 0) - - self.assertEqual(ma57.get_info(14), 12) # 12 entries in factors - self.assertEqual(ma57.get_info(24), 2) # 2 negative eigenvalues - self.assertEqual(ma57.get_info(22), 1) # 1 2x2 pivot - self.assertEqual(ma57.get_info(23), 0) # 0 delayed pivots - ent2 = np.array([1.,5.,1.,6.,4.,3.,2.], dtype=np.double) + self.assertEqual(ma57.get_info(14), 12) # 12 entries in factors + self.assertEqual(ma57.get_info(24), 2) # 2 negative eigenvalues + self.assertEqual(ma57.get_info(22), 1) # 1 2x2 pivot + self.assertEqual(ma57.get_info(23), 0) # 0 delayed pivots + + ent2 = np.array([1.0, 5.0, 1.0, 6.0, 4.0, 3.0, 2.0], dtype=np.double) ma57.do_numeric_factorization(n, ent2) self.assertEqual(status, 0) - bad_ent = np.array([2.,3.,4.,6.,1.,5.], dtype=np.double) + bad_ent = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0], dtype=np.double) with self.assertRaisesRegex(AssertionError, 'Wrong number of entries'): ma57.do_numeric_factorization(n, bad_ent) with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'): - ma57.do_numeric_factorization(n+1, ent) + ma57.do_numeric_factorization(n + 1, ent) n = 5 ne = 8 - irn = np.array([1,1,2,2,3,3,5,5], dtype=np.intc) - jcn = np.array([1,2,3,5,3,4,5,1], dtype=np.intc) + irn = np.array([1, 1, 2, 2, 3, 3, 5, 5], dtype=np.intc) + jcn = np.array([1, 2, 3, 5, 3, 4, 5, 1], dtype=np.intc) irn = irn - 1 jcn = jcn - 1 - ent = np.array([2.,3.,4.,6.,1.,5.,1.,-1.3], dtype=np.double) + ent = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0, -1.3], dtype=np.double) status = ma57.do_symbolic_factorization(n, irn, jcn) self.assertEqual(status, 0) status = ma57.do_numeric_factorization(n, ent) @@ -124,36 +124,35 @@ def test_do_numeric_factorization(self): self.assertEqual(ma57.get_info(24), 2) self.assertEqual(ma57.get_info(23), 0) - def test_do_backsolve(self): ma57 = MA57Interface() n = 5 ne = 7 - irn = np.array([1,1,2,2,3,3,5], dtype=np.intc) - jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc) + irn = np.array([1, 1, 2, 2, 3, 3, 5], dtype=np.intc) + jcn = np.array([1, 2, 3, 5, 3, 4, 5], dtype=np.intc) irn = irn - 1 jcn = jcn - 1 - ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double) - rhs = np.array([8.,45.,31.,15.,17.], dtype=np.double) + ent = np.array([2.0, 3.0, 4.0, 6.0, 1.0, 5.0, 1.0], dtype=np.double) + rhs = np.array([8.0, 45.0, 31.0, 15.0, 17.0], dtype=np.double) status = ma57.do_symbolic_factorization(n, irn, jcn) status = ma57.do_numeric_factorization(n, ent) sol = ma57.do_backsolve(rhs) - expected_sol = [1,2,3,4,5] - old_rhs = np.array([8.,45.,31.,15.,17.]) + expected_sol = [1, 2, 3, 4, 5] + old_rhs = np.array([8.0, 45.0, 31.0, 15.0, 17.0]) for i in range(n): self.assertAlmostEqual(sol[i], expected_sol[i]) self.assertEqual(old_rhs[i], rhs[i]) - #rhs2 = np.array([[8., 17.], + # rhs2 = np.array([[8., 17.], # [45., 15.], # [31., 31.], # [15., 45.], # [17., 8.]], dtype=np.double) - #sol = ma57.do_backsolve(rhs2) + # sol = ma57.do_backsolve(rhs2) # FIXME - # This gives unexpected (incorrect) results. + # This gives unexpected (incorrect) results. # Need to investigate further. diff --git a/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py b/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py index 473c3de26ba..9b0aba96be1 100644 --- a/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py +++ b/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ import pyomo.common.unittest as unittest + try: import numpy as np from scipy.sparse import coo_matrix, tril @@ -16,18 +17,19 @@ raise unittest.SkipTest("Pynumero needs scipy and numpy to run linear solver tests") from pyomo.contrib.pynumero.linalg.mumps_interface import ( - mumps_available, MumpsCentralizedAssembledLinearSolver + mumps_available, + MumpsCentralizedAssembledLinearSolver, ) from pyomo.contrib.pynumero.sparse import BlockMatrix, BlockVector -@unittest.skipIf(not mumps_available, - "Pynumero needs pymumps to run linear solver tests") + +@unittest.skipIf( + not mumps_available, "Pynumero needs pymumps to run linear solver tests" +) class TestMumpsLinearSolver(unittest.TestCase): def test_mumps_linear_solver(self): - A = np.array([[ 1, 7, 3], - [ 7, 4, -5], - [ 3, -5, 6]], dtype=np.double) + A = np.array([[1, 7, 3], [7, 4, -5], [3, -5, 6]], dtype=np.double) A = coo_matrix(A) A_lower = tril(A) x1 = np.arange(3) + 1 @@ -57,12 +59,14 @@ def test_mumps_linear_solver(self): block_b1 = BlockVector(2) block_b1.set_block(0, b1[0:2]) block_b1.set_block(1, b1[2:]) - + block_b2 = BlockVector(2) block_b2.set_block(0, b2[0:2]) block_b2.set_block(1, b2[2:]) - solver = MumpsCentralizedAssembledLinearSolver(icntl_options={10: -3}, cntl_options={2: 1e-16}) + solver = MumpsCentralizedAssembledLinearSolver( + icntl_options={10: -3}, cntl_options={2: 1e-16} + ) solver.do_symbolic_factorization(block_A) solver.do_numeric_factorization(block_A) x, res = solver.do_back_solve(block_b1) diff --git a/pyomo/contrib/pynumero/linalg/utils.py b/pyomo/contrib/pynumero/linalg/utils.py index acb3eb930da..2b7a9e99142 100644 --- a/pyomo/contrib/pynumero/linalg/utils.py +++ b/pyomo/contrib/pynumero/linalg/utils.py @@ -9,25 +9,29 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def validate_index(i, array_len, array_name=''): if not isinstance(i, int): raise TypeError( - 'Index into %s array must be an integer. Got %s' - % (array_name, type(i))) + 'Index into %s array must be an integer. Got %s' % (array_name, type(i)) + ) if i < 1 or i > array_len: # NOTE: Use the FORTRAN indexing (same as documentation) to # set and access info/cntl arrays from Python, whereas C # functions use C indexing. Maybe this is too confusing. raise IndexError( 'Index %s is out of range for %s array of length %s' - % (i, array_name, array_len)) + % (i, array_name, array_len) + ) + def validate_value(val, dtype, array_name=''): if not isinstance(val, dtype): raise ValueError( 'Members of %s array must have type %s. Got %s' - % (array_name, dtype, type(val))) + % (array_name, dtype, type(val)) + ) + class _NotSet: pass - diff --git a/pyomo/contrib/pynumero/plugins.py b/pyomo/contrib/pynumero/plugins.py index cff8663de13..06bb0a5a059 100644 --- a/pyomo/contrib/pynumero/plugins.py +++ b/pyomo/contrib/pynumero/plugins.py @@ -13,10 +13,34 @@ from pyomo.opt import SolverFactory from .build import PyNumeroBuilder from .algorithms.solvers.cyipopt_solver import PyomoCyIpoptSolver +from .algorithms.solvers.scipy_solvers import ( + PyomoFsolveSolver, + PyomoRootSolver, + PyomoNewtonSolver, + PyomoSecantNewtonSolver, +) + def load(): ExtensionBuilderFactory.register('pynumero')(PyNumeroBuilder) SolverFactory.register( - 'cyipopt', - doc='Cyipopt: direct python bindings to the Ipopt NLP solver' + 'cyipopt', doc='Cyipopt: direct python bindings to the Ipopt NLP solver' )(PyomoCyIpoptSolver) + SolverFactory.register( + "scipy.fsolve", + doc=("fsolve: A SciPy wrapper around MINPACK's hybrd and hybrj algorithms"), + )(PyomoFsolveSolver) + SolverFactory.register( + "scipy.root", doc=("root: Find the root of a vector function") + )(PyomoRootSolver) + SolverFactory.register( + "scipy.newton", doc="newton: Find a zero of a scalar-valued function" + )(PyomoNewtonSolver) + SolverFactory.register( + "scipy.secant-newton", + doc=( + "secant-newton: Take a few secant iterations to try to converge" + " a potentially linear equation quickly, then switch to Newton's" + " method" + ), + )(PyomoSecantNewtonSolver) diff --git a/pyomo/contrib/pynumero/sparse/block_matrix.py b/pyomo/contrib/pynumero/sparse/block_matrix.py index c5938174c7d..97e090fec4c 100644 --- a/pyomo/contrib/pynumero/sparse/block_matrix.py +++ b/pyomo/contrib/pynumero/sparse/block_matrix.py @@ -22,7 +22,6 @@ """ -from scipy.sparse.sputils import get_index_dtype from pyomo.contrib.pynumero.sparse.block_vector import BlockVector from scipy.sparse import coo_matrix, csr_matrix, csc_matrix from scipy.sparse import isspmatrix @@ -44,12 +43,16 @@ class NotFullyDefinedBlockMatrixError(Exception): def assert_block_structure(mat): if mat.has_undefined_row_sizes(): - msgr = 'Operation not allowed with None rows. ' \ - 'Specify at least one block in every row' + msgr = ( + 'Operation not allowed with None rows. ' + 'Specify at least one block in every row' + ) raise NotFullyDefinedBlockMatrixError(msgr) if mat.has_undefined_col_sizes(): - msgc = 'Operation not allowed with None columns. ' \ - 'Specify at least one block every column' + msgc = ( + 'Operation not allowed with None columns. ' + 'Specify at least one block every column' + ) raise NotFullyDefinedBlockMatrixError(msgc) @@ -83,10 +86,10 @@ class BlockMatrix(BaseBlockMatrix): number of block-columns in the matrix """ + format = 'block_matrix' def __init__(self, nbrows, nbcols): - shape = (nbrows, nbcols) self._blocks = np.empty(shape, dtype='object') @@ -166,7 +169,11 @@ def row_block_sizes(self, copy=True): """ if self.has_undefined_row_sizes(): - raise NotFullyDefinedBlockMatrixError('Some block row lengths are not defined: {0}'.format(str(self._brow_lengths))) + raise NotFullyDefinedBlockMatrixError( + 'Some block row lengths are not defined: {0}'.format( + str(self._brow_lengths) + ) + ) if copy: return self._brow_lengths.copy() else: @@ -188,7 +195,11 @@ def col_block_sizes(self, copy=True): """ if self.has_undefined_col_sizes(): - raise NotFullyDefinedBlockMatrixError('Some block column lengths are not defined: {0}'.format(str(self._bcol_lengths))) + raise NotFullyDefinedBlockMatrixError( + 'Some block column lengths are not defined: {0}'.format( + str(self._bcol_lengths) + ) + ) if copy: return self._bcol_lengths.copy() else: @@ -196,12 +207,16 @@ def col_block_sizes(self, copy=True): def get_row_size(self, row): if row in self._undefined_brows: - raise NotFullyDefinedBlockMatrixError('The dimensions of the requested row are not defined.') + raise NotFullyDefinedBlockMatrixError( + 'The dimensions of the requested row are not defined.' + ) return int(self._brow_lengths[row]) def get_col_size(self, col): if col in self._undefined_bcols: - raise NotFullyDefinedBlockMatrixError('The dimensions of the requested column are not defined.') + raise NotFullyDefinedBlockMatrixError( + 'The dimensions of the requested column are not defined.' + ) return int(self._bcol_lengths[col]) def set_row_size(self, row, size): @@ -212,11 +227,13 @@ def set_row_size(self, row, size): self._brow_lengths = np.asarray(self._brow_lengths, dtype=np.int64) else: if self._brow_lengths[row] != size: - raise ValueError('Incompatible row dimensions for ' - 'row {row}; got {got}; ' - 'expected {exp}'.format(row=row, - got=size, - exp=self._brow_lengths[row])) + raise ValueError( + 'Incompatible row dimensions for ' + 'row {row}; got {got}; ' + 'expected {exp}'.format( + row=row, got=size, exp=self._brow_lengths[row] + ) + ) def set_col_size(self, col, size): if col in self._undefined_bcols: @@ -226,11 +243,13 @@ def set_col_size(self, col, size): self._bcol_lengths = np.asarray(self._bcol_lengths, dtype=np.int64) else: if self._bcol_lengths[col] != size: - raise ValueError('Incompatible column dimensions for ' - 'column {col}; got {got}; ' - 'expected {exp}'.format(col=col, - got=size, - exp=self._bcol_lengths[col])) + raise ValueError( + 'Incompatible column dimensions for ' + 'column {col}; got {got}; ' + 'expected {exp}'.format( + col=col, got=size, exp=self._bcol_lengths[col] + ) + ) def is_row_size_defined(self, row): return row not in self._undefined_brows @@ -373,15 +392,13 @@ def tocoo(self, copy=True): # create pointers for COO matrix (row, col, data) data = np.empty(nonzeros, dtype=dtype) - idx_dtype = get_index_dtype(maxval=max(shape)) - row = -np.ones(nonzeros, dtype=idx_dtype) - col = -np.ones(nonzeros, dtype=idx_dtype) + row = -np.ones(nonzeros, dtype=int) + col = -np.ones(nonzeros, dtype=int) # populate COO pointers nnz = 0 ii, jj = np.nonzero(self._block_mask) for i, j in zip(ii, jj): - B = self.get_block(i, j).tocoo() # get slice that contains all elements in current block idx = slice(nnz, nnz + B.nnz) @@ -476,23 +493,24 @@ def _mul_sparse_matrix(self, other): assert other.bshape[0] == self.bshape[1], "Dimension mismatch" result = BlockMatrix(self.bshape[0], other.bshape[1]) - # get dimenions from the other matrix + # get dimensions from the other matrix other_col_sizes = other.col_block_sizes(copy=False) # compute result for i in range(self.bshape[0]): for j in range(other.bshape[1]): - accum = coo_matrix((self._brow_lengths[i], - other_col_sizes[i])) + accum = coo_matrix((self._brow_lengths[i], other_col_sizes[i])) for k in range(self.bshape[1]): if self._block_mask[i, k] and not other.is_empty_block(k, j): - prod = self._blocks[i,k] * other.get_block(k, j) + prod = self._blocks[i, k] * other.get_block(k, j) accum = accum + prod result.set_block(i, j, accum) return result elif isspmatrix(other): - raise NotImplementedError('BlockMatrix multiply with spmatrix not supported. Multiply a BlockMatrix ' - 'with another BlockMatrix of compatible dimensions.') + raise NotImplementedError( + 'BlockMatrix multiply with spmatrix not supported. Multiply a BlockMatrix ' + 'with another BlockMatrix of compatible dimensions.' + ) else: raise NotImplementedError('Operation not supported by BlockMatrix') @@ -518,9 +536,13 @@ def transpose(self, axes=None, copy=True): It is difficult to support transpose without copying. A "TransposeView" object might be a better approach. """ if axes is not None: - raise ValueError(("Sparse matrices do not support " - "an 'axes' parameter because swapping " - "dimensions is the only logical permutation.")) + raise ValueError( + ( + "Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation." + ) + ) if not copy: raise ValueError('BlockMatrix only supports transpose with copy=True') @@ -594,8 +616,9 @@ def copyfrom(self, other, deep=True): """ assert_block_structure(self) if isinstance(other, BlockMatrix): - assert other.bshape == self.bshape, \ - 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape) + assert other.bshape == self.bshape, 'dimensions mismatch {} != {}'.format( + self.bshape, other.bshape + ) m, n = self.bshape if deep: @@ -611,8 +634,9 @@ def copyfrom(self, other, deep=True): self.set_block(i, j, other.get_block(i, j)) elif isspmatrix(other) or isinstance(other, np.ndarray): - assert other.shape == self.shape, \ - 'dimensions mismatch {} != {}'.format(self.shape, other.shape) + assert other.shape == self.shape, 'dimensions mismatch {} != {}'.format( + self.shape, other.shape + ) if isinstance(other, np.ndarray): # cast numpy.array to coo_matrix for ease of manipulation m = csr_matrix(other) @@ -628,9 +652,9 @@ def copyfrom(self, other, deep=True): # csc column slicing is fast # therefore, we do the row slice once for each row, then we convert to csc for the column slicing for i in range(self.bshape[0]): - mm = m[row_offsets[i]:row_offsets[i+1], :].tocsc() + mm = m[row_offsets[i] : row_offsets[i + 1], :].tocsc() for j in range(self.bshape[1]): - mmm = mm[:, col_offsets[j]:col_offsets[j+1]] + mmm = mm[:, col_offsets[j] : col_offsets[j + 1]] if self.is_empty_block(i, j) and mmm.nnz == 0: self.set_block(i, j, None) @@ -638,8 +662,10 @@ def copyfrom(self, other, deep=True): self.set_block(i, j, mmm) else: - raise NotImplementedError("Format not supported. BlockMatrix can only copy data from another BlockMatrix, " - "a numpy array, or a scipy sparse matrix.") + raise NotImplementedError( + "Format not supported. BlockMatrix can only copy data from another BlockMatrix, " + "a numpy array, or a scipy sparse matrix." + ) def copyto(self, other, deep=True): """ @@ -659,8 +685,9 @@ def copyto(self, other, deep=True): """ if isinstance(other, BlockMatrix): - assert other.bshape == self.bshape, \ - 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape) + assert other.bshape == self.bshape, 'dimensions mismatch {} != {}'.format( + self.bshape, other.bshape + ) if deep: m, n = self.bshape @@ -676,8 +703,9 @@ def copyto(self, other, deep=True): for j in range(n): other.set_block(i, j, self.get_block(i, j)) elif isspmatrix(other) or isinstance(other, np.ndarray): - assert other.shape == self.shape, \ - 'dimensions mismatch {} != {}'.format(self.shape, other.shape) + assert other.shape == self.shape, 'dimensions mismatch {} != {}'.format( + self.shape, other.shape + ) # create temporary matrix to copy tmp_matrix = self.tocoo() @@ -698,11 +726,15 @@ def copyto(self, other, deep=True): elif isinstance(other, np.ndarray): np.copyto(other, tmp_matrix.toarray()) else: - raise NotImplementedError("Format not supported. BlockMatrix can only copy data to another BlockMatrix, " - "a numpy array, or a scipy sparse coo, csr, or csc matrix.") + raise NotImplementedError( + "Format not supported. BlockMatrix can only copy data to another BlockMatrix, " + "a numpy array, or a scipy sparse coo, csr, or csc matrix." + ) else: - raise NotImplementedError("Format not supported. BlockMatrix can only copy data to another BlockMatrix, " - "a numpy array, or a scipy sparse coo, csr, or csc matrix.") + raise NotImplementedError( + "Format not supported. BlockMatrix can only copy data to another BlockMatrix, " + "a numpy array, or a scipy sparse coo, csr, or csc matrix." + ) def copy(self, deep=True): """ @@ -773,10 +805,24 @@ def _print(self, indent): else: block = self.get_block(idx, jdx) if isinstance(block, BlockMatrix): - msg += indent + str((idx, jdx)) + ': ' + block.__class__.__name__ + str(block.bshape) + '\n' - msg += block._print(indent=indent+' ') + msg += ( + indent + + str((idx, jdx)) + + ': ' + + block.__class__.__name__ + + str(block.bshape) + + '\n' + ) + msg += block._print(indent=indent + ' ') else: - msg += indent + str((idx, jdx)) + ': ' + block.__class__.__name__ + str(block.shape) + '\n' + msg += ( + indent + + str((idx, jdx)) + + ': ' + + block.__class__.__name__ + + str(block.shape) + + '\n' + ) return msg def __str__(self): @@ -784,8 +830,7 @@ def __str__(self): def get_block(self, row, col): assert row >= 0 and col >= 0, 'indices must be positive' - assert row < self.bshape[0] and \ - col < self.bshape[1], 'Indices out of range' + assert row < self.bshape[0] and col < self.bshape[1], 'Indices out of range' return self._blocks[row, col] def set_block(self, row, col, value): @@ -808,7 +853,9 @@ def set_block(self, row, col, value): warnings.warn(msg) value = coo_matrix(value) else: - assert isspmatrix(value), 'blocks need to be sparse matrices or BlockMatrices' + assert isspmatrix( + value + ), 'blocks need to be sparse matrices or BlockMatrices' nrows, ncols = value.shape self.set_row_size(row, nrows) @@ -817,25 +864,33 @@ def set_block(self, row, col, value): self._block_mask[row, col] = True def __getitem__(self, item): - raise NotImplementedError('BlockMatrix does not support __getitem__. ' - 'Use get_block or set_block to access sub-blocks.') + raise NotImplementedError( + 'BlockMatrix does not support __getitem__. ' + 'Use get_block or set_block to access sub-blocks.' + ) def __setitem__(self, item, val): - raise NotImplementedError('BlockMatrix does not support __setitem__. ' - 'Use get_block or set_block to access sub-blocks.') + raise NotImplementedError( + 'BlockMatrix does not support __setitem__. ' + 'Use get_block or set_block to access sub-blocks.' + ) def _binary_operation_helper(self, other, operation): assert_block_structure(self) result = BlockMatrix(self.bshape[0], self.bshape[1]) if isinstance(other, BlockMatrix): - assert other.bshape == self.bshape, \ - 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape) - assert other.shape == self.shape, \ - 'dimensions mismatch {} != {}'.format(self.shape, other.shape) + assert other.bshape == self.bshape, 'dimensions mismatch {} != {}'.format( + self.bshape, other.bshape + ) + assert other.shape == self.shape, 'dimensions mismatch {} != {}'.format( + self.shape, other.shape + ) assert_block_structure(other) - block_indices = np.bitwise_or(self.get_block_mask(copy=False), other.get_block_mask(copy=False)) + block_indices = np.bitwise_or( + self.get_block_mask(copy=False), other.get_block_mask(copy=False) + ) for i, j in zip(*np.nonzero(block_indices)): mat1 = self.get_block(i, j) mat2 = other.get_block(i, j) @@ -891,21 +946,20 @@ def __mul__(self, other): for i in range(bm): result.set_block(i, np.zeros(self._brow_lengths[i])) for i, j in zip(*np.nonzero(self._block_mask)): - x = other.get_block(j) - A = self._blocks[i, j] - blk = result.get_block(i) - _tmp = A*x - _tmp += blk - result.set_block(i, _tmp) + x = other.get_block(j) + A = self._blocks[i, j] + blk = result.get_block(i) + _tmp = A * x + _tmp += blk + result.set_block(i, _tmp) return result elif isinstance(other, np.ndarray): - if other.ndim != 1: raise NotImplementedError('Operation not supported by BlockMatrix') - assert self.shape[1] == other.shape[0], \ - 'Dimension mismatch {}!={}'.format(self.shape[1], - other.shape[0]) + assert self.shape[1] == other.shape[0], 'Dimension mismatch {}!={}'.format( + self.shape[1], other.shape[0] + ) assert_block_structure(self) nblocks = self.bshape[0] @@ -916,7 +970,7 @@ def __mul__(self, other): for j in range(bn): if not self.is_empty_block(i, j): A = self._blocks[i, j] - x = other[counter: counter + A.shape[1]] + x = other[counter : counter + A.shape[1]] blk = result.get_block(i) blk += A * x counter += self.get_col_size(j) @@ -949,7 +1003,9 @@ def __rmul__(self, other): result.set_block(i, j, self._blocks[i, j] * other) return result elif isspmatrix(other): - raise NotImplementedError('sparse matrix times block matrix is not supported.') + raise NotImplementedError( + 'sparse matrix times block matrix is not supported.' + ) else: raise NotImplementedError('Operation not supported by BlockMatrix') @@ -964,12 +1020,13 @@ def __abs__(self): return res def __iadd__(self, other): - if isinstance(other, BlockMatrix): - assert other.bshape == self.bshape, \ - 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape) - assert other.shape == self.shape, \ - 'dimensions mismatch {} != {}'.format(self.shape, other.shape) + assert other.bshape == self.bshape, 'dimensions mismatch {} != {}'.format( + self.bshape, other.bshape + ) + assert other.shape == self.shape, 'dimensions mismatch {} != {}'.format( + self.shape, other.shape + ) iterator = set(zip(*np.nonzero(self._block_mask))) iterator.update(zip(*np.nonzero(other._block_mask))) @@ -988,12 +1045,13 @@ def __iadd__(self, other): raise NotImplementedError('Operation not supported by BlockMatrix') def __isub__(self, other): - if isinstance(other, BlockMatrix): - assert other.bshape == self.bshape, \ - 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape) - assert other.shape == self.shape, \ - 'dimensions mismatch {} != {}'.format(self.shape, other.shape) + assert other.bshape == self.bshape, 'dimensions mismatch {} != {}'.format( + self.bshape, other.bshape + ) + assert other.shape == self.shape, 'dimensions mismatch {} != {}'.format( + self.shape, other.shape + ) iterator = set(zip(*np.nonzero(self._block_mask))) iterator.update(zip(*np.nonzero(other._block_mask))) @@ -1001,7 +1059,9 @@ def __isub__(self, other): if not self.is_empty_block(i, j) and not other.is_empty_block(i, j): self._blocks[i, j] -= other.get_block(i, j) elif not other.is_empty_block(i, j): - self.set_block(i, j, -other.get_block(i, j)) # the copy happens in __neg__ of other.get_block(i, j) + self.set_block( + i, j, -other.get_block(i, j) + ) # the copy happens in __neg__ of other.get_block(i, j) return self elif isspmatrix(other): # Note: this is not efficient but is just for flexibility. @@ -1054,7 +1114,9 @@ def _comparison_helper(self, operation, other): for i in range(m): for j in range(n): if not self.is_empty_block(i, j) and not other.is_empty_block(i, j): - result.set_block(i, j, operation(self._blocks[i, j], other.get_block(i, j))) + result.set_block( + i, j, operation(self._blocks[i, j], other.get_block(i, j)) + ) else: nrows = self._brow_lengths[i] ncols = self._bcol_lengths[j] @@ -1062,13 +1124,17 @@ def _comparison_helper(self, operation, other): if not self.is_empty_block(i, j): result.set_block(i, j, operation(self._blocks[i, j], mat)) elif not other.is_empty_block(i, j): - result.set_block(i, j, operation(mat, other.get_block(i, j))) + result.set_block( + i, j, operation(mat, other.get_block(i, j)) + ) else: result.set_block(i, j, operation(mat, mat)) return result elif isinstance(other, BlockMatrix) or isspmatrix(other): if isinstance(other, BlockMatrix): - raise NotImplementedError('Operation supported with same block structure only') + raise NotImplementedError( + 'Operation supported with same block structure only' + ) else: raise NotImplementedError('Operation not supported by BlockMatrix') elif np.isscalar(other): @@ -1080,8 +1146,8 @@ def _comparison_helper(self, operation, other): else: nrows = self._brow_lengths[i] ncols = self._bcol_lengths[j] - matc = coo_matrix((nrows, ncols)) - result.set_block(i, j, operation(matc, other)) + mat = coo_matrix((nrows, ncols)) + result.set_block(i, j, operation(mat, other)) return result else: if other.__class__.__name__ == 'MPIBlockMatrix': @@ -1129,15 +1195,17 @@ def get_block_column_index(self, index): int """ - msgc = 'Operation not allowed with None columns. ' \ - 'Specify at least one block in every column' + msgc = ( + 'Operation not allowed with None columns. ' + 'Specify at least one block in every column' + ) assert not self.has_undefined_col_sizes(), msgc bm, bn = self.bshape - # get cummulative sum of block sizes + # get cumulative sum of block sizes cum = self._bcol_lengths.cumsum() assert index >= 0, 'index out of bounds' - assert index < cum[bn-1], 'index out of bounds' + assert index < cum[bn - 1], 'index out of bounds' # exits if only has one column if bn <= 1: @@ -1165,15 +1233,17 @@ def get_block_row_index(self, index): int """ - msgr = 'Operation not allowed with None rows. ' \ - 'Specify at least one block in every row' + msgr = ( + 'Operation not allowed with None rows. ' + 'Specify at least one block in every row' + ) assert not self.has_undefined_row_sizes(), msgr bm, bn = self.bshape - # get cummulative sum of block sizes + # get cumulative sum of block sizes cum = self._brow_lengths.cumsum() - assert index >=0, 'index out of bounds' - assert index < cum[bm-1], 'index out of bounds' + assert index >= 0, 'index out of bounds' + assert index < cum[bm - 1], 'index out of bounds' # exits if only has one column if bm <= 1: @@ -1213,7 +1283,7 @@ def getcol(self, j): offset = 0 if bcol > 0: cum_sum = self._bcol_lengths.cumsum() - offset = cum_sum[bcol-1] + offset = cum_sum[bcol - 1] # build block vector result = BlockVector(bm) @@ -1223,10 +1293,10 @@ def getcol(self, j): v = np.zeros(self._brow_lengths[i]) elif isinstance(mat, BaseBlockMatrix): # this will return a block vector - v = mat.getcol(j-offset) + v = mat.getcol(j - offset) else: # if it is sparse matrix transform array to vector - v = mat.getcol(j-offset).toarray().flatten() + v = mat.getcol(j - offset).toarray().flatten() result.set_block(i, v) return result @@ -1256,7 +1326,7 @@ def getrow(self, i): offset = 0 if brow > 0: cum_sum = self._brow_lengths.cumsum() - offset = cum_sum[brow-1] + offset = cum_sum[brow - 1] # build block vector result = BlockVector(bn) @@ -1266,9 +1336,9 @@ def getrow(self, i): v = np.zeros(self._bcol_lengths[j]) elif isinstance(mat, BaseBlockMatrix): # this will return a block vector - v = mat.getcol(i-offset) + v = mat.getcol(i - offset) else: # if it is sparse matrix transform array to vector - v = mat.getcol(i-offset).toarray().flatten() + v = mat.getcol(i - offset).toarray().flatten() result.set_block(j, v) return result diff --git a/pyomo/contrib/pynumero/sparse/block_vector.py b/pyomo/contrib/pynumero/sparse/block_vector.py index cc5617f1479..00733a71752 100644 --- a/pyomo/contrib/pynumero/sparse/block_vector.py +++ b/pyomo/contrib/pynumero/sparse/block_vector.py @@ -111,30 +111,81 @@ def __array_wrap__(self, out_arr, context=None): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): """Runs ufuncs speciallizations to BlockVector""" # functions that take one vector - unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil, - np.floor, np.tan, np.arctan, np.arcsin, - np.arccos, np.sinh, np.cosh, np.abs, - np.tanh, np.arccosh, np.arcsinh, np.arctanh, - np.fabs, np.sqrt, np.log, np.log2, np.absolute, - np.isfinite, np.isinf, np.isnan, np.log1p, - np.logical_not, np.expm1, np.exp2, np.sign, - np.rint, np.square, np.positive, np.negative, - np.rad2deg, np.deg2rad, np.conjugate, np.reciprocal, - np.signbit] + unary_funcs = [ + np.log10, + np.sin, + np.cos, + np.exp, + np.ceil, + np.floor, + np.tan, + np.arctan, + np.arcsin, + np.arccos, + np.sinh, + np.cosh, + np.abs, + np.tanh, + np.arccosh, + np.arcsinh, + np.arctanh, + np.fabs, + np.sqrt, + np.log, + np.log2, + np.absolute, + np.isfinite, + np.isinf, + np.isnan, + np.log1p, + np.logical_not, + np.expm1, + np.exp2, + np.sign, + np.rint, + np.square, + np.positive, + np.negative, + np.rad2deg, + np.deg2rad, + np.conjugate, + np.reciprocal, + np.signbit, + ] # functions that take two vectors - binary_funcs = [np.add, np.multiply, np.divide, np.subtract, - np.greater, np.greater_equal, np.less, np.less_equal, - np.not_equal, np.maximum, np.minimum, np.fmax, - np.fmin, np.equal, np.logical_and, - np.logical_or, np.logical_xor, np.logaddexp, - np.logaddexp2, np.remainder, np.heaviside, - np.hypot] + binary_funcs = [ + np.add, + np.multiply, + np.divide, + np.subtract, + np.greater, + np.greater_equal, + np.less, + np.less_equal, + np.not_equal, + np.maximum, + np.minimum, + np.fmax, + np.fmin, + np.equal, + np.logical_and, + np.logical_or, + np.logical_xor, + np.logaddexp, + np.logaddexp2, + np.remainder, + np.heaviside, + np.hypot, + ] args = [input_ for i, input_ in enumerate(inputs)] outputs = kwargs.pop('out', None) if outputs is not None: - raise NotImplementedError(str(ufunc) + ' cannot be used with BlockVector if the out keyword argument is given.') + raise NotImplementedError( + str(ufunc) + + ' cannot be used with BlockVector if the out keyword argument is given.' + ) if ufunc in unary_funcs: results = self._unary_operation(ufunc, method, *args, **kwargs) @@ -156,8 +207,9 @@ def _unary_operation(self, ufunc, method, *args, **kwargs): v.set_block(i, self._unary_operation(ufunc, method, *_args, **kwargs)) return v elif type(x) == np.ndarray: - return super(BlockVector, self).__array_ufunc__(ufunc, method, - *args, **kwargs) + return super(BlockVector, self).__array_ufunc__( + ufunc, method, *args, **kwargs + ) else: raise NotImplementedError() @@ -169,57 +221,90 @@ def _binary_operation(self, ufunc, method, *args, **kwargs): if isinstance(x1, BlockVector) and isinstance(x2, BlockVector): assert_block_structure(x1) assert_block_structure(x2) - assert x1.nblocks == x2.nblocks, \ - 'Operation on BlockVectors need the same number of blocks on each operand' - assert x1.size == x2.size, \ - 'Dimension missmatch {}!={}'.format(x1.size, x2.size) + assert ( + x1.nblocks == x2.nblocks + ), 'Operation on BlockVectors need the same number of blocks on each operand' + assert x1.size == x2.size, 'Dimension mismatch {}!={}'.format( + x1.size, x2.size + ) res = BlockVector(x1.nblocks) for i in range(x1.nblocks): - _args = [x1.get_block(i)] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1.get_block(i)] + + [x2.get_block(i)] + + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) return res - elif type(x1)==np.ndarray and isinstance(x2, BlockVector): + elif type(x1) == np.ndarray and isinstance(x2, BlockVector): assert_block_structure(x2) - assert x1.size == x2.size, \ - 'Dimension missmatch {}!={}'.format(x1.size, x2.size) + assert x1.size == x2.size, 'Dimension mismatch {}!={}'.format( + x1.size, x2.size + ) res = BlockVector(x2.nblocks) accum = 0 for i in range(x2.nblocks): nelements = x2._brow_lengths[i] - _args = [x1[accum: accum + nelements]] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1[accum : accum + nelements]] + + [x2.get_block(i)] + + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) accum += nelements return res - elif type(x2)==np.ndarray and isinstance(x1, BlockVector): + elif type(x2) == np.ndarray and isinstance(x1, BlockVector): assert_block_structure(x1) - assert x1.size == x2.size, \ - 'Dimension missmatch {}!={}'.format(x1.size, x2.size) + assert x1.size == x2.size, 'Dimension mismatch {}!={}'.format( + x1.size, x2.size + ) res = BlockVector(x1.nblocks) accum = 0 for i in range(x1.nblocks): nelements = x1._brow_lengths[i] - _args = [x1.get_block(i)] + [x2[accum: accum + nelements]] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1.get_block(i)] + + [x2[accum : accum + nelements]] + + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) accum += nelements return res elif np.isscalar(x1) and isinstance(x2, BlockVector): assert_block_structure(x2) res = BlockVector(x2.nblocks) for i in range(x2.nblocks): - _args = [x1] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) return res elif np.isscalar(x2) and isinstance(x1, BlockVector): assert_block_structure(x1) res = BlockVector(x1.nblocks) for i in range(x1.nblocks): - _args = [x1.get_block(i)] + [x2] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1.get_block(i)] + [x2] + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) return res - elif (type(x1)==np.ndarray or np.isscalar(x1)) and (type(x2)==np.ndarray or np.isscalar(x2)): - return super(BlockVector, self).__array_ufunc__(ufunc, method, - *args, **kwargs) + elif (type(x1) == np.ndarray or np.isscalar(x1)) and ( + type(x2) == np.ndarray or np.isscalar(x2) + ): + return super(BlockVector, self).__array_ufunc__( + ufunc, method, *args, **kwargs + ) else: if x1.__class__.__name__ == 'MPIBlockVector': raise RuntimeError('Operation not supported by BlockVector') @@ -239,7 +324,7 @@ def bshape(self): """ Returns the number of blocks in this BlockVector in a tuple. """ - return self.nblocks, + return (self.nblocks,) @property def shape(self): @@ -247,7 +332,7 @@ def shape(self): Returns total number of elements in this BlockVector """ assert_block_structure(self) - return np.sum(self._brow_lengths), + return (np.sum(self._brow_lengths),) @property def size(self): @@ -283,7 +368,9 @@ def block_sizes(self, copy=True): def get_block_size(self, ndx): if ndx in self._undefined_brows: - raise NotFullyDefinedBlockVectorError('The dimensions of the requested block are not defined.') + raise NotFullyDefinedBlockVectorError( + 'The dimensions of the requested block are not defined.' + ) return int(self._brow_lengths[ndx]) def _set_block_size(self, ndx, size): @@ -294,11 +381,13 @@ def _set_block_size(self, ndx, size): self._brow_lengths = np.asarray(self._brow_lengths, dtype=np.int64) else: if self._brow_lengths[ndx] != size: - raise ValueError('Incompatible dimensions for ' - 'block {ndx}; got {got}; ' - 'expected {exp}'.format(ndx=ndx, - got=size, - exp=self._brow_lengths[ndx])) + raise ValueError( + 'Incompatible dimensions for ' + 'block {ndx}; got {got}; ' + 'expected {exp}'.format( + ndx=ndx, got=size, exp=self._brow_lengths[ndx] + ) + ) def is_block_defined(self, ndx): return ndx not in self._undefined_brows @@ -320,13 +409,16 @@ def dot(self, other, out=None): assert_block_structure(self) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) - return sum(self.get_block(i).dot(other.get_block(i)) for i in range(self.nblocks)) - elif type(other)==np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) + return sum( + self.get_block(i).dot(other.get_block(i)) for i in range(self.nblocks) + ) + elif type(other) == np.ndarray: bv = self.flatten() return bv.dot(other) else: @@ -347,8 +439,9 @@ def all(self, axis=None, out=None, keepdims=False): Returns True if all elements evaluate to True. """ assert_block_structure(self) - results = np.array([self.get_block(i).all() for i in range(self.nblocks)], - dtype=bool) + results = np.array( + [self.get_block(i).all() for i in range(self.nblocks)], dtype=bool + ) return results.all(axis=axis, out=out, keepdims=keepdims) def any(self, axis=None, out=None, keepdims=False): @@ -356,8 +449,9 @@ def any(self, axis=None, out=None, keepdims=False): Returns True if any element evaluate to True. """ assert_block_structure(self) - results = np.array([self.get_block(i).any() for i in range(self.nblocks)], - dtype=bool) + results = np.array( + [self.get_block(i).any() for i in range(self.nblocks)], dtype=bool + ) return results.any(axis=axis, out=out, keepdims=keepdims) def max(self, axis=None, out=None, keepdims=False): @@ -377,11 +471,12 @@ def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): bv = BlockVector(self.nblocks) for bid, vv in enumerate(self): if bid not in self._undefined_brows: - bv.set_block(bid, vv.astype(dtype, - order=order, - casting=casting, - subok=subok, - copy=copy)) + bv.set_block( + bid, + vv.astype( + dtype, order=order, casting=casting, subok=subok, copy=copy + ), + ) return bv raise NotImplementedError("astype not implemented for copy=False") @@ -430,22 +525,30 @@ def compress(self, condition, axis=None, out=None): if isinstance(condition, BlockVector): assert_block_structure(condition) - assert self.shape == condition.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, condition.shape) - assert self.nblocks == condition.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - condition.nblocks) + assert self.shape == condition.shape, 'Dimension mismatch {} != {}'.format( + self.shape, condition.shape + ) + assert ( + self.nblocks == condition.nblocks + ), 'Number of blocks mismatch {} != {}'.format( + self.nblocks, condition.nblocks + ) for idx in range(self.nblocks): - result.set_block(idx, self.get_block(idx).compress(condition.get_block(idx))) + result.set_block( + idx, self.get_block(idx).compress(condition.get_block(idx)) + ) return result - elif type(condition)==np.ndarray: - assert self.shape == condition.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, - condition.shape) + elif type(condition) == np.ndarray: + assert self.shape == condition.shape, 'Dimension mismatch {} != {}'.format( + self.shape, condition.shape + ) accum = 0 for idx in range(self.nblocks): nelements = self._brow_lengths[idx] - result.set_block(idx, self.get_block(idx).compress(condition[accum: accum + nelements])) + result.set_block( + idx, + self.get_block(idx).compress(condition[accum : accum + nelements]), + ) accum += nelements return result else: @@ -489,7 +592,7 @@ def ptp(self, axis=None, out=None, keepdims=False): """ assert_block_structure(self) assert out is None, 'Out keyword not supported' - return self.max()-self.min() + return self.max() - self.min() def round(self, decimals=0, out=None): """ @@ -506,13 +609,17 @@ def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Returns the standard deviation of the BlockVector elements. """ - return self.flatten().std(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) + return self.flatten().std( + axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims + ) def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False): """ Returns the variance of the BlockVector elements. """ - return self.flatten().var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims) + return self.flatten().var( + axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims + ) def tofile(self, fid, sep="", format="%s"): """ @@ -538,7 +645,7 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=False): n = self.size if n == 0: return 0.0 - return self.sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims)/n + return self.sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims) / n def prod(self, axis=None, dtype=None, out=None, keepdims=False): """ @@ -579,8 +686,8 @@ def tolist(self): def flatten(self, order='C'): """ - Converts the BlockVector to a NumPy array. This will also call flatten on the underlying NumPy arrays in - the BlockVector. + Converts the BlockVector to a NumPy array. This will also call flatten on the underlying NumPy arrays in + the BlockVector. Parameters ---------- @@ -593,14 +700,16 @@ def flatten(self, order='C'): The NumPy array resulting from concatenating all of the blocks """ assert_block_structure(self) - all_blocks = tuple(self.get_block(i).flatten(order=order) for i in range(self.nblocks)) + all_blocks = tuple( + self.get_block(i).flatten(order=order) for i in range(self.nblocks) + ) return np.concatenate(all_blocks) def ravel(self, order='C'): """ Converts the BlockVector into a NumPy array. Note that ravel is also called on all of the NumPy arrays in the BlockVector before concatenating them. - + Parameters ---------- order: str @@ -611,7 +720,9 @@ def ravel(self, order='C'): res: numpy.ndarray """ assert_block_structure(self) - all_blocks = tuple(self.get_block(i).ravel(order=order) for i in range(self.nblocks)) + all_blocks = tuple( + self.get_block(i).ravel(order=order) for i in range(self.nblocks) + ) return np.concatenate(all_blocks) def argmax(self, axis=None, out=None): @@ -692,11 +803,12 @@ def copyfrom(self, other): if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx in range(other.nblocks): if isinstance(self.get_block(idx), BlockVector): self.get_block(idx).copyfrom(other.get_block(idx)) @@ -709,19 +821,20 @@ def copyfrom(self, other): raise RuntimeError('Input not recognized') elif self.get_block(idx) is None: if isinstance(other.get_block(idx), np.ndarray): - # this inlcude block vectors too + # this include block vectors too self.set_block(idx, other.get_block(idx).copy()) else: raise RuntimeError('Input not recognized') else: raise RuntimeError('Input not recognized') elif isinstance(other, np.ndarray): - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) offset = 0 for idx in range(self.nblocks): - subarray = other[offset: offset + self.get_block(idx).size] + subarray = other[offset : offset + self.get_block(idx).size] if isinstance(self.get_block(idx), BlockVector): self.get_block(idx).copyfrom(subarray) else: @@ -745,8 +858,9 @@ def copyto(self, other): """ if isinstance(other, BlockVector): - msgj = 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + msgj = 'Number of blocks mismatch {} != {}'.format( + self.nblocks, other.nblocks + ) assert self.nblocks == other.nblocks, msgj for idx in range(self.nblocks): if isinstance(other.get_block(idx), BlockVector): @@ -789,7 +903,12 @@ def copy_structure(self): if isinstance(self.get_block(bid), BlockVector): bv.set_block(bid, self.get_block(bid).copy_structure()) elif type(self.get_block(bid)) == np.ndarray: - bv.set_block(bid, np.zeros(self.get_block(bid).size, dtype=self.get_block(bid).dtype)) + bv.set_block( + bid, + np.zeros( + self.get_block(bid).size, dtype=self.get_block(bid).dtype + ), + ) else: raise NotImplementedError('Should never get here') return bv @@ -808,11 +927,12 @@ def set_blocks(self, blocks): None """ - assert isinstance(blocks, list), \ - 'blocks should be passed in ordered list' - assert len(blocks) == self.nblocks, \ - 'More blocks passed than allocated {} != {}'.format(len(blocks), - self.nblocks) + assert isinstance(blocks, list), 'blocks should be passed in ordered list' + assert ( + len(blocks) == self.nblocks + ), 'More blocks passed than allocated {} != {}'.format( + len(blocks), self.nblocks + ) for idx, blk in enumerate(blocks): self.set_block(idx, blk) @@ -828,21 +948,23 @@ def __add__(self, other): assert_block_structure(self) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): result.set_block(idx, blk + other.get_block(idx)) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, blk + other[accum: accum + nelements]) + result.set_block(idx, blk + other[accum : accum + nelements]) accum += nelements return result elif np.isscalar(other): @@ -858,28 +980,30 @@ def __radd__(self, other): # other + self return self.__add__(other) def __sub__(self, other): - # substract this BlockVector with other vector - # supports substraction with scalar, numpy.ndarray and BlockVectors + # subtract this BlockVector with other vector + # supports subtraction with scalar, numpy.ndarray and BlockVectors # returns BlockVector result = BlockVector(self.nblocks) assert_block_structure(self) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): result.set_block(idx, blk - other.get_block(idx)) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, blk - other[accum: accum + nelements]) + result.set_block(idx, blk - other[accum : accum + nelements]) accum += nelements return result elif np.isscalar(other): @@ -892,27 +1016,28 @@ def __sub__(self, other): raise NotImplementedError() def __rsub__(self, other): # other - self - result = BlockVector(self.nblocks) assert_block_structure(self) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): result.set_block(idx, other.get_block(idx) - blk) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, other[accum: accum + nelements] - blk) + result.set_block(idx, other[accum : accum + nelements] - blk) accum += nelements return result elif np.isscalar(other): @@ -932,21 +1057,23 @@ def __mul__(self, other): result = BlockVector(self.nblocks) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): - result.set_block(idx, blk .__mul__(other.get_block(idx))) + result.set_block(idx, blk.__mul__(other.get_block(idx))) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, blk.__mul__(other[accum: accum + nelements])) + result.set_block(idx, blk.__mul__(other[accum : accum + nelements])) accum += nelements return result elif np.isscalar(other): @@ -969,21 +1096,23 @@ def __truediv__(self, other): result = BlockVector(self.nblocks) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): result.set_block(idx, blk / other.get_block(idx)) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, blk / other[accum: accum + nelements]) + result.set_block(idx, blk / other[accum : accum + nelements]) accum += nelements return result elif np.isscalar(other): @@ -1000,21 +1129,23 @@ def __rtruediv__(self, other): result = BlockVector(self.nblocks) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): result.set_block(idx, other.get_block(idx) / blk) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, other[accum: accum + nelements] / blk) + result.set_block(idx, other[accum : accum + nelements] / blk) accum += nelements return result elif np.isscalar(other): @@ -1031,21 +1162,23 @@ def __floordiv__(self, other): result = BlockVector(self.nblocks) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): result.set_block(idx, blk // other.get_block(idx)) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, blk // other[accum: accum + nelements]) + result.set_block(idx, blk // other[accum : accum + nelements]) accum += nelements return result elif np.isscalar(other): @@ -1062,21 +1195,23 @@ def __rfloordiv__(self, other): result = BlockVector(self.nblocks) if isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): result.set_block(idx, other.get_block(idx) // blk) return result - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - result.set_block(idx, other[accum: accum + nelements] // blk) + result.set_block(idx, other[accum : accum + nelements] // blk) accum += nelements return result elif np.isscalar(other): @@ -1098,21 +1233,23 @@ def __iadd__(self, other): return self elif isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): blk += other.get_block(idx) return self - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - blk += other[accum: accum + nelements] + blk += other[accum : accum + nelements] accum += nelements return self else: @@ -1128,21 +1265,23 @@ def __isub__(self, other): return self elif isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): blk -= other.get_block(idx) return self - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - blk -= other[accum: accum + nelements] + blk -= other[accum : accum + nelements] accum += nelements return self else: @@ -1158,21 +1297,23 @@ def __imul__(self, other): return self elif isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): blk *= other.get_block(idx) return self - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - blk *= other[accum: accum + nelements] + blk *= other[accum : accum + nelements] accum += nelements return self else: @@ -1188,21 +1329,23 @@ def __itruediv__(self, other): return self elif isinstance(other, BlockVector): assert_block_structure(other) - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch {} != {}'.format(self.nblocks, - other.nblocks) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) for idx, blk in enumerate(self): blk /= other.get_block(idx) return self - elif type(other)==np.ndarray: - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + elif type(other) == np.ndarray: + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for idx, blk in enumerate(self): nelements = self._brow_lengths[idx] - blk /= other[accum: accum + nelements] + blk /= other[accum : accum + nelements] accum += nelements return self else: @@ -1221,10 +1364,24 @@ def _print(self, indent): msg = '' for ndx, block in enumerate(self): if isinstance(block, BlockVector): - msg += indent + str(ndx) + ': ' + block.__class__.__name__ + str(block.bshape) + '\n' - msg += block._print(indent=indent+' ') + msg += ( + indent + + str(ndx) + + ': ' + + block.__class__.__name__ + + str(block.bshape) + + '\n' + ) + msg += block._print(indent=indent + ' ') else: - msg += indent + str(ndx) + ': ' + block.__class__.__name__ + str(block.shape) + '\n' + msg += ( + indent + + str(ndx) + + ': ' + + block.__class__.__name__ + + str(block.shape) + + '\n' + ) return msg def __str__(self): @@ -1261,9 +1418,9 @@ def set_block(self, key, value): This is the block. It can be a NumPy array or another BlockVector. """ assert -self.nblocks < key < self.nblocks, 'out of range' - assert isinstance(value, np.ndarray) or \ - isinstance(value, BaseBlockVector), \ - 'Blocks need to be numpy arrays or BlockVectors' + assert isinstance(value, np.ndarray) or isinstance( + value, BaseBlockVector + ), 'Blocks need to be numpy arrays or BlockVectors' assert value.ndim == 1, 'Blocks need to be 1D' if isinstance(value, BaseBlockVector): @@ -1299,15 +1456,21 @@ def _has_equal_structure(self, other): def __getitem__(self, item): if not self._has_equal_structure(item): - raise ValueError('BlockVector.__getitem__ only accepts slices in the form of BlockVectors of the same structure') + raise ValueError( + 'BlockVector.__getitem__ only accepts slices in the form of BlockVectors of the same structure' + ) res = BlockVector(self.nblocks) for ndx, block in self: res.set_block(ndx, block[item.get_block(ndx)]) def __setitem__(self, key, value): - if not (self._has_equal_structure(key) and (self._has_equal_structure(value) or np.isscalar(value))): + if not ( + self._has_equal_structure(key) + and (self._has_equal_structure(value) or np.isscalar(value)) + ): raise ValueError( - 'BlockVector.__setitem__ only accepts slices in the form of BlockVectors of the same structure') + 'BlockVector.__setitem__ only accepts slices in the form of BlockVectors of the same structure' + ) if np.isscalar(value): for ndx, block in enumerate(self): block[key.get_block(ndx)] = value @@ -1321,14 +1484,23 @@ def _comparison_helper(self, other, operation): if isinstance(other, BlockVector): assert_block_structure(other) for ndx in range(self.nblocks): - result.set_block(ndx, operation(self.get_block(ndx), other.get_block(ndx))) + result.set_block( + ndx, operation(self.get_block(ndx), other.get_block(ndx)) + ) return result elif isinstance(other, np.ndarray): - assert self.shape == other.shape, \ - 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) accum = 0 for ndx in range(self.nblocks): - result.set_block(ndx, operation(self.get_block(ndx), other[accum : accum + self.get_block_size(ndx)])) + result.set_block( + ndx, + operation( + self.get_block(ndx), + other[accum : accum + self.get_block_size(ndx)], + ), + ) accum += self.get_block_size(ndx) return result elif np.isscalar(other): @@ -1406,13 +1578,16 @@ def toMPIBlockVector(self, rank_ownership, mpi_comm, assert_correct_owners=False from pyomo.contrib.pynumero.sparse.mpi_block_vector import MPIBlockVector assert_block_structure(self) - assert len(rank_ownership) == self.nblocks, \ - 'rank_ownership must be of size {}'.format(self.nblocks) - - mpi_bv = MPIBlockVector(self.nblocks, - rank_ownership, - mpi_comm, - assert_correct_owners=assert_correct_owners) + assert ( + len(rank_ownership) == self.nblocks + ), 'rank_ownership must be of size {}'.format(self.nblocks) + + mpi_bv = MPIBlockVector( + self.nblocks, + rank_ownership, + mpi_comm, + assert_correct_owners=assert_correct_owners, + ) # populate blocks in the right spaces for bid in mpi_bv.owned_blocks: diff --git a/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py b/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py index e99aee9787e..ee045464dec 100644 --- a/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py +++ b/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py @@ -22,12 +22,12 @@ """ from __future__ import annotations +from pyomo.common.dependencies import mpi4py from .mpi_block_vector import MPIBlockVector from .block_vector import BlockVector from .block_matrix import BlockMatrix, NotFullyDefinedBlockMatrixError from .block_matrix import assert_block_structure as block_matrix_assert_block_structure from .base_block import BaseBlockMatrix -from mpi4py import MPI import numpy as np from scipy.sparse import coo_matrix import operator @@ -86,17 +86,13 @@ class MPIBlockMatrix(BaseBlockMatrix): assert_correct_owners: bool If True, then checks will be performed to ensure that processor owners are consistent. This check - requires communication. If False, this check is + requires communication. If False, this check is skipped. """ - def __init__(self, - nbrows, - nbcols, - rank_ownership, - mpi_comm, - assert_correct_owners=False): - + def __init__( + self, nbrows, nbcols, rank_ownership, mpi_comm, assert_correct_owners=False + ): shape = (nbrows, nbcols) self._block_matrix = BlockMatrix(nbrows, nbcols) self._mpiw = mpi_comm @@ -110,8 +106,9 @@ def __init__(self, # Note: this requires communication but is disabled when assertions # are turned off if assert_correct_owners: - assert self._assert_correct_owners(), \ - 'rank_owner must be the same in all processors' + assert ( + self._assert_correct_owners() + ), 'rank_owner must be the same in all processors' # make some of the pointers unmutable self._rank_owner.flags.writeable = False @@ -148,7 +145,7 @@ def nnz(self): if not self._block_matrix.is_empty_block(i, j): local_nnz += self._block_matrix.get_block(i, j).nnz - return self._mpiw.allreduce(local_nnz, op=MPI.SUM) + return self._mpiw.allreduce(local_nnz, op=mpi4py.MPI.SUM) @property def owned_blocks(self): @@ -200,13 +197,13 @@ def set_col_size(self, col, size): def is_row_size_defined(self, row, this_process_only=True): res = self._block_matrix.is_row_size_defined(row) if not this_process_only: - res = self.mpi_comm.allreduce(res, op=MPI.LOR) + res = self.mpi_comm.allreduce(res, op=mpi4py.MPI.LOR) return bool(res) def is_col_size_defined(self, col, this_process_only=True): res = self._block_matrix.is_col_size_defined(col) if not this_process_only: - res = self.mpi_comm.allreduce(res, op=MPI.LOR) + res = self.mpi_comm.allreduce(res, op=mpi4py.MPI.LOR) return bool(res) def get_block_mask(self, copy=True): @@ -243,15 +240,21 @@ def transpose(self, axes=None, copy=True): MPIBlockMatrix with dimensions reversed """ if axes is not None: - raise ValueError(("Sparse matrices do not support " - "an 'axes' parameter because swapping " - "dimensions is the only logical permutation.")) + raise ValueError( + ( + "Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation." + ) + ) if not copy: raise ValueError('MPIBlockMatrix only supports transpose with copy=True') m = self.bshape[0] n = self.bshape[1] - result = MPIBlockMatrix(n, m, self._rank_owner.T, self._mpiw, assert_correct_owners=False) + result = MPIBlockMatrix( + n, m, self._rank_owner.T, self._mpiw, assert_correct_owners=False + ) result._block_matrix = self._block_matrix.transpose() return result @@ -353,7 +356,7 @@ def is_empty_block(self, idx, jdx, this_process_only=True): """ res = self._block_matrix.is_empty_block(idx, jdx) if not this_process_only: - res = self.mpi_comm.allreduce(res, op=MPI.LAND) + res = self.mpi_comm.allreduce(res, op=mpi4py.MPI.LAND) return bool(res) # Note: this requires communication @@ -379,8 +382,9 @@ def broadcast_block_sizes(self): send_data = np.concatenate([local_row_data, local_col_data]) - receive_data = np.empty(num_processors * (self.bshape[0] + self.bshape[1]), - dtype=np.int64) + receive_data = np.empty( + num_processors * (self.bshape[0] + self.bshape[1]), dtype=np.int64 + ) self._mpiw.Allgather(send_data, receive_data) proc_dims = np.split(receive_data, num_processors) @@ -393,19 +397,22 @@ def broadcast_block_sizes(self): for i in range(m): rows_length = set() for k in range(num_processors): - row_sizes, col_sizes = np.split(proc_dims[k], - [self.bshape[0]]) + row_sizes, col_sizes = np.split(proc_dims[k], [self.bshape[0]]) rows_length.add(row_sizes[i]) if len(rows_length) > 2: - msg = 'Row {} has more than one dimension accross processors'.format(i) + msg = 'Row {} has more than one dimension across processors'.format(i) raise RuntimeError(msg) elif len(rows_length) == 2: if -1 not in rows_length: - msg = 'Row {} has more than one dimension accross processors'.format(i) + msg = 'Row {} has more than one dimension across processors'.format( + i + ) raise RuntimeError(msg) rows_length.remove(-1) elif -1 in rows_length: - msg = 'The dimensions of block row {} were not defined in any process'.format(i) + msg = 'The dimensions of block row {} were not defined in any process'.format( + i + ) raise NotFullyDefinedBlockMatrixError(msg) # here rows_length must only have one element @@ -415,19 +422,24 @@ def broadcast_block_sizes(self): for i in range(n): cols_length = set() for k in range(num_processors): - rows_sizes, col_sizes = np.split(proc_dims[k], - [self.bshape[0]]) + rows_sizes, col_sizes = np.split(proc_dims[k], [self.bshape[0]]) cols_length.add(col_sizes[i]) if len(cols_length) > 2: - msg = 'Column {} has more than one dimension accross processors'.format(i) + msg = 'Column {} has more than one dimension across processors'.format( + i + ) raise RuntimeError(msg) elif len(cols_length) == 2: if -1 not in cols_length: - msg = 'Column {} has more than one dimension accross processors'.format(i) + msg = 'Column {} has more than one dimension across processors'.format( + i + ) raise RuntimeError(msg) cols_length.remove(-1) elif -1 in cols_length: - msg = 'The dimensions of block column {} were not defined in any process'.format(i) + msg = 'The dimensions of block column {} were not defined in any process'.format( + i + ) raise NotFullyDefinedBlockMatrixError(msg) # here rows_length must only have one element @@ -554,7 +566,9 @@ def copy(self): """ m, n = self.bshape - result = MPIBlockMatrix(m, n, self._rank_owner, self._mpiw, assert_correct_owners=False) + result = MPIBlockMatrix( + m, n, self._rank_owner, self._mpiw, assert_correct_owners=False + ) result._block_matrix = self._block_matrix.copy() return result @@ -571,7 +585,9 @@ def copy_structure(self): """ m, n = self.bshape - result = MPIBlockMatrix(m, n, self._rank_owner, self._mpiw, assert_correct_owners=False) + result = MPIBlockMatrix( + m, n, self._rank_owner, self._mpiw, assert_correct_owners=False + ) result._block_matrix = self._block_matrix.copy_structure() return result @@ -608,7 +624,11 @@ def __str__(self): msg = '{}{}\n'.format(self.__class__.__name__, self.bshape) for idx in range(self.bshape[0]): for jdx in range(self.bshape[1]): - rank = self._rank_owner[idx, jdx] if self._rank_owner[idx, jdx] >= 0 else 'A' + rank = ( + self._rank_owner[idx, jdx] + if self._rank_owner[idx, jdx] >= 0 + else 'A' + ) msg += '({}, {}): Owned by processor{}\n'.format(idx, jdx, rank) return msg @@ -616,24 +636,22 @@ def get_block(self, row, col): block = self._block_matrix.get_block(row, col) owner = self._rank_owner[row, col] rank = self._mpiw.Get_rank() - assert owner == rank or \ - owner < 0, \ - 'Block {} not owned by processor {}'.format((row, col), rank) + assert owner == rank or owner < 0, 'Block {} not owned by processor {}'.format( + (row, col), rank + ) return block def set_block(self, row, col, value): - assert row >= 0 and \ - col >= 0, 'Indices must be positive' + assert row >= 0 and col >= 0, 'Indices must be positive' - assert row < self.bshape[0] and \ - col < self.bshape[1], 'Indices out of range' + assert row < self.bshape[0] and col < self.bshape[1], 'Indices out of range' owner = self._rank_owner[row, col] rank = self._mpiw.Get_rank() - assert owner == rank or \ - owner < 0, \ - 'Block {} not owned by processor {}'.format((row, col), rank) + assert owner == rank or owner < 0, 'Block {} not owned by processor {}'.format( + (row, col), rank + ) self._block_matrix.set_block(row, col, value) @@ -646,14 +664,18 @@ def __setitem__(self, item, val): def _binary_operation_helper(self, other, operation): result = self.copy_structure() if isinstance(other, (MPIBlockMatrix, BlockMatrix)): - assert other.bshape == self.bshape, \ - 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape) + assert other.bshape == self.bshape, 'dimensions mismatch {} != {}'.format( + self.bshape, other.bshape + ) if isinstance(other, MPIBlockMatrix): - assert np.array_equal(self._rank_owner, other._rank_owner), \ - 'MPIBlockMatrices must be distributed in same processors' + assert np.array_equal( + self._rank_owner, other._rank_owner + ), 'MPIBlockMatrices must be distributed in same processors' - block_indices = np.bitwise_or(self.get_block_mask(copy=False), other.get_block_mask(copy=False)) + block_indices = np.bitwise_or( + self.get_block_mask(copy=False), other.get_block_mask(copy=False) + ) block_indices = np.bitwise_and(block_indices, self._owned_mask) ii, jj = np.nonzero(block_indices) for i, j in zip(ii, jj): @@ -666,9 +688,13 @@ def _binary_operation_helper(self, other, operation): elif mat1 is None and mat2 is not None: result.set_block(i, j, operation(0, mat2)) else: - raise ValueError('This is unexpected. Please report to the developers.') + raise ValueError( + 'This is unexpected. Please report to the developers.' + ) elif np.isscalar(other): - block_indices = np.bitwise_and(self.get_block_mask(copy=False), self._owned_mask) + block_indices = np.bitwise_and( + self.get_block_mask(copy=False), self._owned_mask + ) for i, j in zip(*np.nonzero(block_indices)): result.set_block(i, j, operation(self.get_block(i, j), other)) else: @@ -679,12 +705,14 @@ def _inplace_binary_operation_helper(self, other, operation): if isinstance(other, (MPIBlockMatrix, BlockMatrix)): assert operation in {operator.iadd, operator.isub} - assert other.bshape == self.bshape, \ - 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape) + assert other.bshape == self.bshape, 'dimensions mismatch {} != {}'.format( + self.bshape, other.bshape + ) if isinstance(other, MPIBlockMatrix): - assert np.array_equal(self._rank_owner, other._rank_owner), \ - 'MPIBlockMatrices must be distributed in same processors' + assert np.array_equal( + self._rank_owner, other._rank_owner + ), 'MPIBlockMatrices must be distributed in same processors' block_indices = other.get_block_mask(copy=False) block_indices = np.bitwise_and(block_indices, self._owned_mask) @@ -704,7 +732,9 @@ def _inplace_binary_operation_helper(self, other, operation): else: raise RuntimeError('Please report this to the developers.') elif np.isscalar(other): - block_indices = np.bitwise_and(self.get_block_mask(copy=False), self._owned_mask) + block_indices = np.bitwise_and( + self.get_block_mask(copy=False), self._owned_mask + ) for i, j in zip(*np.nonzero(block_indices)): blk = self.get_block(i, j) blk = operation(blk, other) @@ -728,7 +758,7 @@ def __rsub__(self, other): def _get_block_vector_for_dot_product(self, x): if isinstance(x, MPIBlockVector): """ - Consider a non-empty block m_{i, j} from the mpi block matrix with rank owner r_m and the + Consider a non-empty block m_{i, j} from the mpi block matrix with rank owner r_m and the corresponding block v_{j} from the mpi block vector with rank owner r_v. There are 4 cases: 1. r_m = r_v In this case, all is good. @@ -739,7 +769,9 @@ def _get_block_vector_for_dot_product(self, x): 4. If none of the above cases hold, then v_{j} must be broadcast """ n_block_rows, n_block_cols = self.bshape - blocks_needing_broadcast = np.zeros(n_block_cols, dtype=np.int64) # a value > 0 means broadcast + blocks_needing_broadcast = np.zeros( + n_block_cols, dtype=np.int64 + ) # a value > 0 means broadcast x_rank_ownership = x.rank_ownership comm = self._mpiw rank = comm.Get_rank() @@ -748,7 +780,9 @@ def _get_block_vector_for_dot_product(self, x): block_indices = self._owned_mask else: block_indices = self._unique_owned_mask - block_indices = np.bitwise_and(block_indices, self.get_block_mask(copy=False)) + block_indices = np.bitwise_and( + block_indices, self.get_block_mask(copy=False) + ) for i, j in zip(*np.nonzero(block_indices)): r_m = self._rank_owner[i, j] r_v = x_rank_ownership[j] @@ -793,7 +827,9 @@ def _get_block_vector_for_dot_product(self, x): y.copyfrom(x) return y else: - raise NotImplementedError('Dot product is not yet supported for MPIBlockMatrix*'+str(type(x))) + raise NotImplementedError( + 'Dot product is not yet supported for MPIBlockMatrix*' + str(type(x)) + ) def _block_vector_multiply(self, x): """ @@ -823,7 +859,9 @@ def _block_vector_multiply(self, x): global_blocks_that_need_reduced = np.zeros(n_block_rows, dtype=np.int64) comm.Allreduce(blocks_that_need_reduced, global_blocks_that_need_reduced) - block_indices_that_need_reduced = np.nonzero(global_blocks_that_need_reduced > 1)[0] + block_indices_that_need_reduced = np.nonzero( + global_blocks_that_need_reduced > 1 + )[0] global_res_rank_owner = np.zeros(n_block_rows, dtype=np.int64) comm.Allreduce(res_rank_owner, global_res_rank_owner) global_res_rank_owner[block_indices_that_need_reduced] = -1 @@ -840,10 +878,12 @@ def _block_vector_multiply(self, x): else: global_res_rank_owner[ndx] = -1 - res = MPIBlockVector(nblocks=n_block_rows, - rank_owner=global_res_rank_owner, - mpi_comm=comm, - assert_correct_owners=False) + res = MPIBlockVector( + nblocks=n_block_rows, + rank_owner=global_res_rank_owner, + mpi_comm=comm, + assert_correct_owners=False, + ) for ndx in np.nonzero(res.ownership_mask)[0]: res.set_block(ndx, np.zeros(self.get_row_size(ndx))) if rank == 0: @@ -948,14 +988,18 @@ def __idiv__(self, other): def __neg__(self): result = self.copy_structure() - block_indices = np.bitwise_and(self.get_block_mask(copy=False), self._owned_mask) + block_indices = np.bitwise_and( + self.get_block_mask(copy=False), self._owned_mask + ) for i, j in zip(*np.nonzero(block_indices)): result.set_block(i, j, -self.get_block(i, j)) return result def __abs__(self): result = self.copy_structure() - block_indices = np.bitwise_and(self.get_block_mask(copy=False), self._owned_mask) + block_indices = np.bitwise_and( + self.get_block_mask(copy=False), self._owned_mask + ) for i, j in zip(*np.nonzero(block_indices)): result.set_block(i, j, abs(self.get_block(i, j))) return result @@ -965,9 +1009,12 @@ def _comparison_helper(self, operation, other): result = self.copy_structure() if isinstance(other, MPIBlockMatrix): - assert other.bshape == self.bshape, 'dimension mismatch {} != {}'.format(self.bshape, other.bshape) - assert np.array_equal(self.rank_ownership, other.rank_ownership), 'MPIBlockMatrices must be distributed in ' \ - 'the same processors' + assert other.bshape == self.bshape, 'dimension mismatch {} != {}'.format( + self.bshape, other.bshape + ) + assert np.array_equal( + self.rank_ownership, other.rank_ownership + ), 'MPIBlockMatrices must be distributed in the same processors' for i, j in zip(*np.nonzero(self.ownership_mask)): mat1 = self.get_block(i, j) @@ -1034,10 +1081,10 @@ def get_block_column_index(self, index): assert_block_structure(self) bm, bn = self.bshape - # get cummulative sum of block sizes + # get cumulative sum of block sizes cum = self.col_block_sizes(copy=False).cumsum() assert index >= 0, 'index out of bounds' - assert index < cum[bn-1], 'index out of bounds' + assert index < cum[bn - 1], 'index out of bounds' # exits if only has one column if bn <= 1: @@ -1068,10 +1115,10 @@ def get_block_row_index(self, index): assert_block_structure(self) bm, bn = self.bshape - # get cummulative sum of block sizes + # get cumulative sum of block sizes cum = self.row_block_sizes(copy=False).cumsum() assert index >= 0, 'index out of bounds' - assert index < cum[bm-1], 'index out of bounds' + assert index < cum[bm - 1], 'index out of bounds' # exits if only has one column if bm <= 1: @@ -1110,16 +1157,13 @@ def getcol(self, j): for i in range(bm): col_ownership.append(self._rank_owner[i, bcol]) # create vector - bv = MPIBlockVector(bm, - col_ownership, - self._mpiw, - assert_correct_owners=False) + bv = MPIBlockVector(bm, col_ownership, self._mpiw, assert_correct_owners=False) # compute offset columns offset = 0 if bcol > 0: cum_sum = self.col_block_sizes(copy=False).cumsum() - offset = cum_sum[bcol-1] + offset = cum_sum[bcol - 1] # populate vector rank = self._mpiw.Get_rank() @@ -1129,10 +1173,10 @@ def getcol(self, j): if self._block_matrix.is_empty_block(row_bid, bcol): v = np.zeros(self.get_row_size(row_bid)) elif isinstance(sub_matrix, BaseBlockMatrix): - v = sub_matrix.getcol(j-offset) + v = sub_matrix.getcol(j - offset) else: # if it is sparse matrix transform array to vector - v = sub_matrix.getcol(j-offset).toarray().flatten() + v = sub_matrix.getcol(j - offset).toarray().flatten() bv.set_block(row_bid, v) return bv @@ -1159,34 +1203,33 @@ def getrow(self, i): row_ownership = [] bm, bn = self.bshape for j in range(bn): - row_ownership.append(self._rank_owner[brow, j]) + row_ownership.append(self._rank_owner[brow, j]) # create vector - bv = MPIBlockVector(bn, - row_ownership, - self._mpiw, - assert_correct_owners=False) + bv = MPIBlockVector(bn, row_ownership, self._mpiw, assert_correct_owners=False) # compute offset columns offset = 0 if brow > 0: cum_sum = self.row_block_sizes(copy=False).cumsum() - offset = cum_sum[brow-1] + offset = cum_sum[brow - 1] # populate vector rank = self._mpiw.Get_rank() for col_bid, owner in enumerate(row_ownership): - if rank == owner or owner<0: + if rank == owner or owner < 0: sub_matrix = self._block_matrix.get_block(brow, col_bid) if self._block_matrix.is_empty_block(brow, col_bid): v = np.zeros(self.get_col_size(col_bid)) elif isinstance(sub_matrix, BaseBlockMatrix): - v = sub_matrix.getrow(i-offset) + v = sub_matrix.getrow(i - offset) else: # if it is sparse matrix transform array to vector - v = sub_matrix.getrow(i-offset).toarray().flatten() + v = sub_matrix.getrow(i - offset).toarray().flatten() bv.set_block(col_bid, v) return bv @staticmethod - def fromBlockMatrix(block_matrix, rank_ownership, mpi_comm, assert_correct_owners=False): + def fromBlockMatrix( + block_matrix, rank_ownership, mpi_comm, assert_correct_owners=False + ): """ Creates a parallel MPIBlockMatrix from blockmatrix @@ -1206,11 +1249,13 @@ def fromBlockMatrix(block_matrix, rank_ownership, mpi_comm, assert_correct_owner # create mpi matrix bm, bn = block_matrix.bshape - mat = MPIBlockMatrix(bm, - bn, - rank_ownership, - mpi_comm, - assert_correct_owners=assert_correct_owners) + mat = MPIBlockMatrix( + bm, + bn, + rank_ownership, + mpi_comm, + assert_correct_owners=assert_correct_owners, + ) # populate matrix for i in range(bm): diff --git a/pyomo/contrib/pynumero/sparse/mpi_block_vector.py b/pyomo/contrib/pynumero/sparse/mpi_block_vector.py index 90f53d7ce45..5d89bbf5522 100644 --- a/pyomo/contrib/pynumero/sparse/mpi_block_vector.py +++ b/pyomo/contrib/pynumero/sparse/mpi_block_vector.py @@ -9,12 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - +from pyomo.common.dependencies import mpi4py from pyomo.contrib.pynumero.sparse import BlockVector from .base_block import BaseBlockVector from .block_vector import NotFullyDefinedBlockVectorError from .block_vector import assert_block_structure as block_vector_assert_block_structure -from mpi4py import MPI import numpy as np import operator @@ -45,7 +44,7 @@ class MPIBlockVector(np.ndarray, BaseBlockVector): A communicator from the MPI space. Typically MPI.COMM_WORLD _block_vector: BlockVector Internal BlockVector. Blocks that belong to this processor are stored - in _block_vector. Blocks that do not belong to this proceesor are empty + in _block_vector. Blocks that do not belong to this processor are empty and store as numpy.zeros(0) _owned_mask: numpy.ndarray bool 1D-array that indicates if a block belongs to this processor. While @@ -60,7 +59,7 @@ class MPIBlockVector(np.ndarray, BaseBlockVector): include blocks with ownership -1. _brow_lengths: numpy.ndarray 1D-Array of size nblocks that specifies the length of each entry - in the MPIBlockVector. This is the same accross all processors. + in the MPIBlockVector. This is the same across all processors. _undefined_brows: set A set of block indices for which the blocks are still None (i.e., the dimensions have not yet ben set). Operations with BlockVectors require all entries to be @@ -86,7 +85,6 @@ class MPIBlockVector(np.ndarray, BaseBlockVector): """ def __new__(cls, nblocks, rank_owner, mpi_comm, assert_correct_owners=False): - assert isinstance(nblocks, int) assert len(rank_owner) == nblocks @@ -128,8 +126,9 @@ def __init__(self, nblocks, rank_owner, mpi_comm, assert_correct_owners=False): # Note: this requires communication but is disabled when assertions # are turned off if assert_correct_owners: - assert self._assert_correct_owners(), \ - 'rank_owner must be the same in all processors' + assert ( + self._assert_correct_owners() + ), 'rank_owner must be the same in all processors' def __array_prepare__(self, out_arr, context=None): return super(MPIBlockVector, self).__array_prepare__(self, out_arr, context) @@ -140,28 +139,79 @@ def __array_wrap__(self, out_arr, context=None): def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): """Runs ufuncs speciallizations to MPIBlockVector""" # functions that take one vector - unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil, - np.floor, np.tan, np.arctan, np.arcsin, - np.arccos, np.sinh, np.cosh, np.abs, - np.tanh, np.arccosh, np.arcsinh, np.arctanh, - np.fabs, np.sqrt, np.log, np.log2, np.absolute, - np.isfinite, np.isinf, np.isnan, np.log1p, - np.logical_not, np.expm1, np.exp2, np.sign, - np.rint, np.square, np.positive, np.negative, - np.rad2deg, np.deg2rad, np.conjugate, np.reciprocal, - np.signbit] + unary_funcs = [ + np.log10, + np.sin, + np.cos, + np.exp, + np.ceil, + np.floor, + np.tan, + np.arctan, + np.arcsin, + np.arccos, + np.sinh, + np.cosh, + np.abs, + np.tanh, + np.arccosh, + np.arcsinh, + np.arctanh, + np.fabs, + np.sqrt, + np.log, + np.log2, + np.absolute, + np.isfinite, + np.isinf, + np.isnan, + np.log1p, + np.logical_not, + np.expm1, + np.exp2, + np.sign, + np.rint, + np.square, + np.positive, + np.negative, + np.rad2deg, + np.deg2rad, + np.conjugate, + np.reciprocal, + np.signbit, + ] # functions that take two vectors - binary_funcs = [np.add, np.multiply, np.divide, np.subtract, - np.greater, np.greater_equal, np.less, np.less_equal, - np.not_equal, np.maximum, np.minimum, np.fmax, - np.fmin, np.equal, np.logical_and, - np.logical_or, np.logical_xor, np.logaddexp, - np.logaddexp2, np.remainder, np.heaviside, - np.hypot] + binary_funcs = [ + np.add, + np.multiply, + np.divide, + np.subtract, + np.greater, + np.greater_equal, + np.less, + np.less_equal, + np.not_equal, + np.maximum, + np.minimum, + np.fmax, + np.fmin, + np.equal, + np.logical_and, + np.logical_or, + np.logical_xor, + np.logaddexp, + np.logaddexp2, + np.remainder, + np.heaviside, + np.hypot, + ] outputs = kwargs.pop('out', None) if outputs is not None: - raise NotImplementedError(str(ufunc) + ' cannot be used with MPIBlockVector if the out keyword argument is given.') + raise NotImplementedError( + str(ufunc) + + ' cannot be used with MPIBlockVector if the out keyword argument is given.' + ) if ufunc in unary_funcs: results = self._unary_operation(ufunc, method, *inputs, **kwargs) @@ -191,8 +241,9 @@ def _unary_operation(self, ufunc, method, *args, **kwargs): v.set_block(i, self._unary_operation(ufunc, method, *_args, **kwargs)) return v elif type(x) == np.ndarray: - return super(MPIBlockVector, self).__array_ufunc__(ufunc, method, - *args, **kwargs) + return super(MPIBlockVector, self).__array_ufunc__( + ufunc, method, *args, **kwargs + ) else: raise NotImplementedError() @@ -202,15 +253,23 @@ def _binary_operation(self, ufunc, method, *args, **kwargs): x1 = args[0] x2 = args[1] if isinstance(x1, MPIBlockVector) and isinstance(x2, MPIBlockVector): - msg = 'BlockVectors must be distributed in same processors' - assert np.array_equal(x1._rank_owner, x2._rank_owner) or self._mpiw.Get_size() == 1, msg + assert ( + np.array_equal(x1._rank_owner, x2._rank_owner) + or self._mpiw.Get_size() == 1 + ), msg assert x1._mpiw == x2._mpiw, 'Need to have same communicator' res = x1.copy_structure() for i in x1._owned_blocks: - _args = [x1.get_block(i)] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1.get_block(i)] + + [x2.get_block(i)] + + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) return res elif isinstance(x1, BlockVector) and isinstance(x2, MPIBlockVector): raise RuntimeError('Operation not supported by MPIBlockVector') @@ -219,27 +278,40 @@ def _binary_operation(self, ufunc, method, *args, **kwargs): elif isinstance(x1, MPIBlockVector) and np.isscalar(x2): res = x1.copy_structure() for i in x1._owned_blocks: - _args = [x1.get_block(i)] + [x2] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1.get_block(i)] + [x2] + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) return res elif isinstance(x2, MPIBlockVector) and np.isscalar(x1): res = x2.copy_structure() for i in x2._owned_blocks: - _args = [x1] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] - res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs)) + _args = ( + [x1] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))] + ) + res.set_block( + i, self._binary_operation(ufunc, method, *_args, **kwargs) + ) return res - elif isinstance(x1, MPIBlockVector) and type(x2)==np.ndarray: + elif isinstance(x1, MPIBlockVector) and type(x2) == np.ndarray: raise RuntimeError('Operation not supported by MPIBlockVector') - elif isinstance(x2, MPIBlockVector) and type(x1)==np.ndarray: + elif isinstance(x2, MPIBlockVector) and type(x1) == np.ndarray: raise RuntimeError('Operation not supported by MPIBlockVector') elif isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray): # this will take care of blockvector and ndarrays return self._block_vector.__array_ufunc__(ufunc, method, *args, **kwargs) - elif (type(x1)==BlockVector or np.isscalar(x1)) and (type(x2)==BlockVector or np.isscalar(x2)): + elif (type(x1) == BlockVector or np.isscalar(x1)) and ( + type(x2) == BlockVector or np.isscalar(x2) + ): return self._block_vector.__array_ufunc__(ufunc, method, *args, **kwargs) - elif (type(x1)==np.ndarray or np.isscalar(x1)) and (type(x2)==np.ndarray or np.isscalar(x2)): - return super(MPIBlockVector, self).__array_ufunc__(ufunc, method, - *args, **kwargs) + elif (type(x1) == np.ndarray or np.isscalar(x1)) and ( + type(x2) == np.ndarray or np.isscalar(x2) + ): + return super(MPIBlockVector, self).__array_ufunc__( + ufunc, method, *args, **kwargs + ) else: raise NotImplementedError() @@ -255,7 +327,7 @@ def bshape(self): """ Returns the number of blocks in this MPIBlockVector in a tuple. """ - return self.nblocks, + return (self.nblocks,) @property def shape(self): @@ -346,7 +418,9 @@ def block_sizes(self, copy=True): def get_block_size(self, ndx): res = self._brow_lengths[ndx] if np.isnan(res): - raise NotFullyDefinedBlockVectorError('The dimensions of the requested block are not defined.') + raise NotFullyDefinedBlockVectorError( + 'The dimensions of the requested block are not defined.' + ) res = int(res) return res @@ -356,10 +430,12 @@ def _set_block_size(self, ndx, size): self._brow_lengths[ndx] = size else: if self._brow_lengths[ndx] != size: - raise ValueError('Incompatible dimensions for block {ndx}; ' - 'got {got}; expected {exp}'.format(ndx=ndx, - got=size, - exp=self._brow_lengths[ndx])) + raise ValueError( + 'Incompatible dimensions for block {ndx}; ' + 'got {got}; expected {exp}'.format( + ndx=ndx, got=size, exp=self._brow_lengths[ndx] + ) + ) # Note: this operation requires communication def broadcast_block_sizes(self): @@ -376,11 +452,12 @@ def broadcast_block_sizes(self): local_length_data.fill(-1) for ndx in self.owned_blocks: if ndx in self._undefined_brows: - raise NotFullyDefinedBlockVectorError('Block {ndx} is owned by rank {rank}, ' - 'but the dimensions for block {ndx} ' - 'have not yet been specified in rank {rank}. ' - 'Please specify all owned blocks.'.format(ndx=ndx, - rank=rank)) + raise NotFullyDefinedBlockVectorError( + 'Block {ndx} is owned by rank {rank}, ' + 'but the dimensions for block {ndx} ' + 'have not yet been specified in rank {rank}. ' + 'Please specify all owned blocks.'.format(ndx=ndx, rank=rank) + ) local_length_data[ndx] = self.get_block_size(ndx) receive_data = np.empty(num_processors * self.nblocks, dtype=np.int64) self._mpiw.Allgather(local_length_data, receive_data) @@ -393,15 +470,23 @@ def broadcast_block_sizes(self): processor_sizes = proc_dims[k] block_length.add(processor_sizes[i]) if len(block_length) > 2: - msg = 'Block {} has more than one dimension accross processors'.format(i) + msg = 'Block {} has more than one dimension across processors'.format(i) raise RuntimeError(msg) elif len(block_length) == 2: if -1 not in block_length: - msg = 'Block {} has more than one dimension accross processors'.format(i) + msg = ( + 'Block {} has more than one dimension across processors'.format( + i + ) + ) raise RuntimeError(msg) block_length.remove(-1) elif -1 in block_length: - msg = 'The dimension of block {} was not specified in any process'.format(i) + msg = ( + 'The dimension of block {} was not specified in any process'.format( + i + ) + ) # here block_length must only have one element self._brow_lengths[i] = block_length.pop() @@ -427,7 +512,6 @@ def finalize_block_sizes(self, broadcast=True, block_sizes=None): # Note: this requires communication but is only run in __new__ def _assert_correct_owners(self, root=0): - rank = self._mpiw.Get_rank() num_processors = self._mpiw.Get_size() @@ -437,8 +521,7 @@ def _assert_correct_owners(self, root=0): local_owners = self._rank_owner.copy() receive_data = None if rank == root: - receive_data = np.empty(self.nblocks * num_processors, - dtype=np.int64) + receive_data = np.empty(self.nblocks * num_processors, dtype=np.int64) self._mpiw.Gather(local_owners, receive_data, root=root) @@ -462,7 +545,7 @@ def all(self, axis=None, out=None, keepdims=False): for i in self._owned_blocks: local *= self._block_vector.get_block(i).all() - return bool(self._mpiw.allreduce(local, op=MPI.PROD)) + return bool(self._mpiw.allreduce(local, op=mpi4py.MPI.PROD)) def any(self, axis=None, out=None, keepdims=False): """ @@ -474,7 +557,7 @@ def any(self, axis=None, out=None, keepdims=False): for i in self._owned_blocks: local += self._block_vector.get_block(i).any() - return bool(self._mpiw.allreduce(local, op=MPI.SUM)) + return bool(self._mpiw.allreduce(local, op=mpi4py.MPI.SUM)) def min(self, axis=None, out=None, keepdims=False): """ @@ -484,10 +567,16 @@ def min(self, axis=None, out=None, keepdims=False): assert_block_structure(self) local_min = np.inf for i in self._owned_blocks: - lmin = self._block_vector.get_block(i).min() - if lmin <= local_min: - local_min = lmin - return self._mpiw.allreduce(local_min, op=MPI.MIN) + block = self._block_vector.get_block(i) + if block.size > 0: + lmin = block.min() + if lmin <= local_min: + local_min = lmin + res = self._mpiw.allreduce(local_min, op=mpi4py.MPI.MIN) + if res == np.inf: + if self.size == 0: + raise ValueError('cannot get the min of a size 0 array') + return res def max(self, axis=None, out=None, keepdims=False): """ @@ -497,10 +586,16 @@ def max(self, axis=None, out=None, keepdims=False): assert_block_structure(self) local_max = -np.inf for i in self._owned_blocks: - lmax = self._block_vector.get_block(i).max() - if lmax >= local_max: - local_max = lmax - return self._mpiw.allreduce(local_max, op=MPI.MAX) + block = self._block_vector.get_block(i) + if block.size > 0: + lmax = block.max() + if lmax >= local_max: + local_max = lmax + res = self._mpiw.allreduce(local_max, op=mpi4py.MPI.MAX) + if res == -np.inf: + if self.size == 0: + raise ValueError('cannot get the max of a size 0 array') + return res def sum(self, axis=None, dtype=None, out=None, keepdims=False): """ @@ -515,7 +610,7 @@ def sum(self, axis=None, dtype=None, out=None, keepdims=False): for i in indices: local_sum += self._block_vector.get_block(i).sum(axis=axis, dtype=dtype) - return self._mpiw.allreduce(local_sum, op=MPI.SUM) + return self._mpiw.allreduce(local_sum, op=mpi4py.MPI.SUM) def prod(self, axis=None, dtype=None, out=None, keepdims=False): """ @@ -529,13 +624,13 @@ def prod(self, axis=None, dtype=None, out=None, keepdims=False): local_prod = 1.0 for i in indices: local_prod *= self._block_vector.get_block(i).prod(axis=axis, dtype=dtype) - return self._mpiw.allreduce(local_prod, op=MPI.PROD) + return self._mpiw.allreduce(local_prod, op=mpi4py.MPI.PROD) def mean(self, axis=None, dtype=None, out=None, keepdims=False): """ Returns the average of all entries in this MPIBlockVector """ - return self.sum(out=out)/self.size + return self.sum(out=out) / self.size def conj(self): """ @@ -557,8 +652,12 @@ def nonzero(self): """ Returns the indices of the elements that are non-zero. """ - result = MPIBlockVector(nblocks=self.nblocks, rank_owner=self.rank_ownership, - mpi_comm=self.mpi_comm, assert_correct_owners=False) + result = MPIBlockVector( + nblocks=self.nblocks, + rank_owner=self.rank_ownership, + mpi_comm=self.mpi_comm, + assert_correct_owners=False, + ) assert_block_structure(self) for i in self._owned_blocks: result.set_block(i, self._block_vector.get_block(i).nonzero()[0]) @@ -572,7 +671,9 @@ def round(self, decimals=0, out=None): assert_block_structure(self) result = self.copy_structure() for i in self._owned_blocks: - result.set_block(i, self._block_vector.get_block(i).round(decimals=decimals)) + result.set_block( + i, self._block_vector.get_block(i).round(decimals=decimals) + ) return result def clip(self, min=None, max=None, out=None): @@ -615,8 +716,12 @@ def compress(self, condition, axis=None, out=None): """ assert out is None, 'Out keyword not supported' assert_block_structure(self) - result = MPIBlockVector(nblocks=self.nblocks, rank_owner=self.rank_ownership, - mpi_comm=self.mpi_comm, assert_correct_owners=False) + result = MPIBlockVector( + nblocks=self.nblocks, + rank_owner=self.rank_ownership, + mpi_comm=self.mpi_comm, + assert_correct_owners=False, + ) if isinstance(condition, MPIBlockVector): # Note: do not need to check same size? this is checked implicitly msg = 'BlockVectors must be distributed in same processors' @@ -646,7 +751,9 @@ def copyfrom(self, other): """ if isinstance(other, MPIBlockVector): assert_block_structure(other) - msg = 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) + msg = 'Number of blocks mismatch {} != {}'.format( + self.nblocks, other.nblocks + ) assert self.nblocks == other.nblocks, msg msg = 'BlockVectors must be distributed in same processors' assert np.array_equal(self._rank_owner, other.rank_ownership), msg @@ -657,7 +764,9 @@ def copyfrom(self, other): elif isinstance(other, BlockVector): block_vector_assert_block_structure(other) - msg = 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks) + msg = 'Number of blocks mismatch {} != {}'.format( + self.nblocks, other.nblocks + ) assert self.nblocks == other.nblocks, msg for i in self._owned_blocks: self.set_block(i, other.get_block(i).copy()) @@ -665,11 +774,13 @@ def copyfrom(self, other): assert_block_structure(self) if not self.is_broadcasted(): self.broadcast_block_sizes() - assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape) + assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format( + self.shape, other.shape + ) offset = 0 for idx in range(self.nblocks): if self._owned_mask[idx]: - subarray = other[offset: offset + self.get_block_size(idx)] + subarray = other[offset : offset + self.get_block_size(idx)] if isinstance(self.get_block(idx), BlockVector): self.get_block(idx).copyfrom(subarray) else: @@ -719,14 +830,19 @@ def clone(self, value=None, copy=True): ---------- value: scalar, optional all entries of the cloned vector are set to this value - copy: bool, optinal + copy: bool, optional if set to true makes a deepcopy of each block in this vector. default False Returns ------- MPIBlockVector """ - result = MPIBlockVector(self.nblocks, self.rank_ownership, self.mpi_comm, assert_correct_owners=False) + result = MPIBlockVector( + self.nblocks, + self.rank_ownership, + self.mpi_comm, + assert_correct_owners=False, + ) result._block_vector = self._block_vector.clone(copy=copy) result._brow_lengths = self._brow_lengths.copy() result._undefined_brows = set(self._undefined_brows) @@ -738,7 +854,12 @@ def copy(self, order='C'): """ Returns a copy of the MPIBlockVector """ - result = MPIBlockVector(self.nblocks, self.rank_ownership, self.mpi_comm, assert_correct_owners=False) + result = MPIBlockVector( + self.nblocks, + self.rank_ownership, + self.mpi_comm, + assert_correct_owners=False, + ) result._block_vector = self._block_vector.copy(order=order) result._brow_lengths = self._brow_lengths.copy() result._undefined_brows = set(self._undefined_brows) @@ -748,9 +869,16 @@ def copy_structure(self): """ Returns a copy of the MPIBlockVector structure filled with zeros """ - result = MPIBlockVector(self.nblocks, self.rank_ownership, self.mpi_comm, assert_correct_owners=False) + result = MPIBlockVector( + self.nblocks, + self.rank_ownership, + self.mpi_comm, + assert_correct_owners=False, + ) if self.is_broadcasted(): - result.finalize_block_sizes(broadcast=False, block_sizes=self.block_sizes(copy=False)) + result.finalize_block_sizes( + broadcast=False, block_sizes=self.block_sizes(copy=False) + ) for bid in self.owned_blocks: block = self.get_block(bid) if block is not None: @@ -805,13 +933,20 @@ def dot(self, other, out=None): indices = self._unique_owned_blocks if rank != 0 else self._owned_blocks local_dot_prod = 0.0 for i in indices: - local_dot_prod += self._block_vector.get_block(i).dot(other.get_block(i)) + local_dot_prod += self._block_vector.get_block(i).dot( + other.get_block(i) + ) - return self._mpiw.allreduce(local_dot_prod, op=MPI.SUM) + return self._mpiw.allreduce(local_dot_prod, op=mpi4py.MPI.SUM) elif isinstance(other, BlockVector): - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) - return self.dot(other.toMPIBlockVector(self.rank_ownership, self.mpi_comm, assert_correct_owners=False)) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) + return self.dot( + other.toMPIBlockVector( + self.rank_ownership, self.mpi_comm, assert_correct_owners=False + ) + ) elif isinstance(other, np.ndarray): other_bv = self.copy_structure() other_bv.copyfrom(other) @@ -838,7 +973,9 @@ def _serialize_structure(block_vector): serialized_structure.append(blk.nblocks) serialized_structure.extend(MPIBlockVector._serialize_structure(blk)) elif isinstance(blk, MPIBlockVector): - raise NotImplementedError('Operation not supported for MPIBlockVectors containing other MPIBlockVectors') + raise NotImplementedError( + 'Operation not supported for MPIBlockVectors containing other MPIBlockVectors' + ) elif isinstance(blk, np.ndarray): serialized_structure.append(-2) serialized_structure.append(blk.size) @@ -864,9 +1001,9 @@ def _create_from_serialized_structure(serialized_structure, structure_ndx, resul structure_ndx += 1 block = BlockVector(serialized_structure[structure_ndx]) structure_ndx += 1 - structure_ndx = MPIBlockVector._create_from_serialized_structure(serialized_structure, - structure_ndx, - block) + structure_ndx = MPIBlockVector._create_from_serialized_structure( + serialized_structure, structure_ndx, block + ) result.set_block(ndx, block) elif serialized_structure[structure_ndx] == -2: structure_ndx += 1 @@ -909,31 +1046,43 @@ def make_local_structure_copy(self): blk_structure.append(blk.nblocks) blk_structure.extend(self._serialize_structure(blk)) elif isinstance(blk, MPIBlockVector): - raise NotImplementedError('Operation not supported for MPIBlockVectors containing other MPIBlockVectors') + raise NotImplementedError( + 'Operation not supported for MPIBlockVectors containing other MPIBlockVectors' + ) elif isinstance(blk, np.ndarray): blk_structure.append(-2) blk_structure.append(blk.size) else: raise NotImplementedError('Unrecognized input.') length_per_block[ndx] = len(blk_structure) - serialized_structure_by_block[ndx] = np.asarray(blk_structure, dtype=np.int64) + serialized_structure_by_block[ndx] = np.asarray( + blk_structure, dtype=np.int64 + ) global_length_per_block = np.zeros(self.nblocks, dtype=np.int64) self._mpiw.Allreduce(length_per_block, global_length_per_block) - local_serialized_structure = np.zeros(global_length_per_block.sum(), dtype=np.int64) + local_serialized_structure = np.zeros( + global_length_per_block.sum(), dtype=np.int64 + ) offset = 0 block_indices_set = set(block_indices) for ndx in range(self.nblocks): if ndx in block_indices_set: - local_serialized_structure[offset: offset+global_length_per_block[ndx]] = serialized_structure_by_block[ndx] + local_serialized_structure[ + offset : offset + global_length_per_block[ndx] + ] = serialized_structure_by_block[ndx] offset += global_length_per_block[ndx] - global_serialized_structure = np.zeros(global_length_per_block.sum(), dtype=np.int64) + global_serialized_structure = np.zeros( + global_length_per_block.sum(), dtype=np.int64 + ) self._mpiw.Allreduce(local_serialized_structure, global_serialized_structure) result = BlockVector(self.nblocks) structure_ndx = 0 - self._create_from_serialized_structure(global_serialized_structure, structure_ndx, result) + self._create_from_serialized_structure( + global_serialized_structure, structure_ndx, result + ) return result @@ -963,9 +1112,11 @@ def make_local_copy(self): if ndx in block_indices: blk = self.get_block(ndx) if isinstance(blk, BlockVector): - local_data[offset: offset + self.get_block_size(ndx)] = blk.flatten() + local_data[ + offset : offset + self.get_block_size(ndx) + ] = blk.flatten() elif isinstance(blk, np.ndarray): - local_data[offset: offset + self.get_block_size(ndx)] = blk + local_data[offset : offset + self.get_block_size(ndx)] = blk else: raise ValueError('Unrecognized block type') offset += self.get_block_size(ndx) @@ -979,11 +1130,14 @@ def _binary_operation_helper(self, other, operation): assert_block_structure(self) result = self.copy_structure() if isinstance(other, MPIBlockVector) or isinstance(other, BlockVector): - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) if isinstance(other, MPIBlockVector): - assert np.array_equal(self._rank_owner, other._rank_owner) or self._mpiw.Get_size() == 1, \ - 'MPIBlockVectors must be distributed in same processors' + assert ( + np.array_equal(self._rank_owner, other._rank_owner) + or self._mpiw.Get_size() == 1 + ), 'MPIBlockVectors must be distributed in same processors' assert self._mpiw == other._mpiw, 'Need to have same communicator' for i in self._owned_blocks: result.set_block(i, operation(self.get_block(i), other.get_block(i))) @@ -1008,7 +1162,7 @@ def _reverse_binary_operation_helper(self, other, operation): raise RuntimeError('Operation not supported by MPIBlockVector') elif np.isscalar(other): for i in self._owned_blocks: - result.set_block(i, operation(other, self.get_block(i))) + result.set_block(i, operation(other, self.get_block(i))) return result else: raise NotImplementedError('Operation not supported by MPIBlockVector') @@ -1016,11 +1170,14 @@ def _reverse_binary_operation_helper(self, other, operation): def _inplace_binary_operation_helper(self, other, operation): assert_block_structure(self) if isinstance(other, MPIBlockVector) or isinstance(other, BlockVector): - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) if isinstance(other, MPIBlockVector): - assert np.array_equal(self._rank_owner, other._rank_owner) or self._mpiw.Get_size() == 1, \ - 'MPIBlockVectors must be distributed in same processors' + assert ( + np.array_equal(self._rank_owner, other._rank_owner) + or self._mpiw.Get_size() == 1 + ), 'MPIBlockVectors must be distributed in same processors' assert self._mpiw == other._mpiw, 'Need to have same communicator' assert_block_structure(other) else: @@ -1107,10 +1264,13 @@ def _comparison_helper(self, other, operation): result = self.copy_structure() if isinstance(other, MPIBlockVector): assert_block_structure(other) - assert self.nblocks == other.nblocks, \ - 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) - assert np.array_equal(self._rank_owner, other._rank_owner) or self._mpiw.Get_size() == 1, \ - 'MPIBlockVectors must be distributed in same processors' + assert ( + self.nblocks == other.nblocks + ), 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks) + assert ( + np.array_equal(self._rank_owner, other._rank_owner) + or self._mpiw.Get_size() == 1 + ), 'MPIBlockVectors must be distributed in same processors' assert self._mpiw == other._mpiw, 'Need to have same communicator' for i in self._owned_blocks: @@ -1153,21 +1313,24 @@ def __contains__(self, item): for i in self._owned_blocks: if other in self.get_block(i): contains = True - return bool(self._mpiw.allreduce(contains, op=MPI.SUM)) + return bool(self._mpiw.allreduce(contains, op=mpi4py.MPI.SUM)) else: raise NotImplementedError('Operation not supported by MPIBlockVector') def get_block(self, key): owner = self._rank_owner[key] rank = self._mpiw.Get_rank() - assert owner == rank or owner < 0, 'Block {} not own by processor {}'.format(key, rank) + assert owner == rank or owner < 0, 'Block {} not own by processor {}'.format( + key, rank + ) return self._block_vector.get_block(key) def set_block(self, key, value): owner = self._rank_owner[key] rank = self._mpiw.Get_rank() - assert owner == rank or owner < 0, \ - 'Block {} not owned by processor {}'.format(key, rank) + assert owner == rank or owner < 0, 'Block {} not owned by processor {}'.format( + key, rank + ) self._block_vector.set_block(key, value) self._set_block_size(key, value.size) @@ -1178,7 +1341,9 @@ def _has_equal_structure(self, other): if self.nblocks != other.nblocks: return False if isinstance(other, MPIBlockVector): - if (self.owned_blocks != other.owned_blocks).any() and self._mpiw.Get_size() != 1: + if ( + self.owned_blocks != other.owned_blocks + ).any() and self._mpiw.Get_size() != 1: return False for ndx in self.owned_blocks: block1 = self.get_block(ndx) @@ -1194,16 +1359,22 @@ def _has_equal_structure(self, other): def __getitem__(self, item): if not self._has_equal_structure(item): - raise ValueError('MIPBlockVector.__getitem__ only accepts slices in the form of MPIBlockVectors of the same structure') + raise ValueError( + 'MIPBlockVector.__getitem__ only accepts slices in the form of MPIBlockVectors of the same structure' + ) res = self.copy_structure() for ndx in self.owned_blocks: block = self.get_block(ndx) res.set_block(ndx, block[item.get_block(ndx)]) def __setitem__(self, key, value): - if not (self._has_equal_structure(key) and (self._has_equal_structure(value) or np.isscalar(value))): + if not ( + self._has_equal_structure(key) + and (self._has_equal_structure(value) or np.isscalar(value)) + ): raise ValueError( - 'MPIBlockVector.__setitem__ only accepts slices in the form of MPIBlockVectors of the same structure') + 'MPIBlockVector.__setitem__ only accepts slices in the form of MPIBlockVectors of the same structure' + ) if np.isscalar(value): for ndx in self.owned_blocks: block = self.get_block(ndx) @@ -1229,8 +1400,7 @@ def pprint(self, root=0): msg = self.__repr__() + '\n' num_processors = self._mpiw.Get_size() local_mask = self._owned_mask.flatten() - receive_data = np.empty(num_processors * self.nblocks, - dtype=bool) + receive_data = np.empty(num_processors * self.nblocks, dtype=bool) self._mpiw.Allgather(local_mask, receive_data) processor_to_mask = np.split(receive_data, num_processors) @@ -1246,9 +1416,9 @@ def pprint(self, root=0): disp_owner = self._rank_owner[bid] if self._rank_owner[bid] >= 0 else 'All' is_none = '' if global_mask[bid] else 'None' - repn = 'Owned by {} Shape({},){}'.format(disp_owner, - self._brow_lengths[bid], - is_none) + repn = 'Owned by {} Shape({},){}'.format( + disp_owner, self._brow_lengths[bid], is_none + ) msg += '{}: {}\n'.format(bid, repn) if self._mpiw.Get_rank() == root: print(msg) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py b/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py index a30aceefacc..7402881a285 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py @@ -11,19 +11,23 @@ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available + numpy as np, + numpy_available, + scipy, + scipy_available, ) from pyomo.common.dependencies.scipy import sparse as sp if not (numpy_available and scipy_available): - raise unittest.SkipTest( - "Pynumero needs scipy and numpy to run BlockMatrix tests") + raise unittest.SkipTest("Pynumero needs scipy and numpy to run BlockMatrix tests") from scipy.sparse import coo_matrix, bmat -from pyomo.contrib.pynumero.sparse import (BlockMatrix, - BlockVector, - NotFullyDefinedBlockMatrixError) +from pyomo.contrib.pynumero.sparse import ( + BlockMatrix, + BlockVector, + NotFullyDefinedBlockMatrixError, +) import warnings @@ -31,7 +35,7 @@ class TestBlockMatrix(unittest.TestCase): def setUp(self): row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) self.block_m = m @@ -60,7 +64,7 @@ def test_bshape(self): self.assertEqual(self.basic_m.bshape, (2, 2)) def test_shape(self): - shape = (self.block_m.shape[0]*2, self.block_m.shape[1]*2) + shape = (self.block_m.shape[0] * 2, self.block_m.shape[1] * 2) self.assertEqual(self.basic_m.shape, shape) def test_tocoo(self): @@ -79,7 +83,6 @@ def test_tocoo(self): self.assertListEqual(ddata.tolist(), sdata.tolist()) def test_tocsr(self): - block = self.block_m m = self.basic_m scipy_mat = bmat([[block, block], [None, block]], format='csr') @@ -110,7 +113,6 @@ def test_tocsc(self): self.assertListEqual(ddata.tolist(), sdata.tolist()) def test_multiply(self): - # check scalar multiplication block = self.block_m m = self.basic_m * 5.0 @@ -174,7 +176,6 @@ def test_mul_sparse_matrix(self): self.assertTrue(np.allclose(flat_prod.toarray(), prod.toarray())) def test_getitem(self): - m = BlockMatrix(3, 3) for i in range(3): for j in range(3): @@ -184,7 +185,6 @@ def test_getitem(self): self.assertEqual(m.get_block(0, 1).shape, self.block_m.shape) def test_setitem(self): - m = BlockMatrix(2, 2) m.set_block(0, 1, self.block_m) self.assertFalse(m.is_empty_block(0, 1)) @@ -202,7 +202,7 @@ def test_coo_data(self): # ToDo: add tests for block matrices with coo and csc matrices def test_nnz(self): - self.assertEqual(self.block_m.nnz*3, self.basic_m.nnz) + self.assertEqual(self.block_m.nnz * 3, self.basic_m.nnz) def test_block_shapes(self): shapes = self.basic_m.block_shapes() @@ -224,13 +224,12 @@ def test_dot(self): self.assertEqual(block_res.bshape[0], 2) m = BlockMatrix(2, 2) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) m.set_block(0, 1, sub_m.copy()) m.set_block(1, 0, sub_m.copy()) x = np.arange(4) - res = m*x + res = m * x self.assertTrue(np.allclose(res.flatten(), np.array([2, 3, 0, 1]))) def test_reset_brow(self): @@ -244,7 +243,6 @@ def test_reset_bcol(self): self.assertIsNone(self.basic_m.get_block(j, 0)) def test_to_scipy(self): - block = self.block_m m = self.basic_m scipy_mat = bmat([[block, block], [None, block]], format='coo') @@ -266,7 +264,6 @@ def test_has_undefined_col_sizes(self): self.assertFalse(self.basic_m.has_undefined_col_sizes()) def test_transpose(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m A_dense_t = A_dense.transpose() @@ -283,7 +280,6 @@ def test_repr(self): self.assertEqual(len(self.basic_m.__repr__()), 17) def test_set_item(self): - self.basic_m.set_block(1, 0, None) self.assertIsNone(self.basic_m.get_block(1, 0)) self.basic_m.set_block(1, 1, None) @@ -293,7 +289,6 @@ def test_set_item(self): self.assertEqual(self.basic_m._brow_lengths[1], self.block_m.shape[0]) def test_add(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m @@ -312,7 +307,7 @@ def test_add(self): r = A_block.tocoo() + A_block dense_res = A_block.toarray() + A_block.toarray() - #self.assertIsInstance(r, BlockMatrix) + # self.assertIsInstance(r, BlockMatrix) self.assertTrue(np.allclose(r.toarray(), dense_res)) r = A_block + 2 * A_block.tocoo() @@ -322,7 +317,7 @@ def test_add(self): r = 2 * A_block.tocoo() + A_block dense_res = 2 * A_block.toarray() + A_block.toarray() - #self.assertIsInstance(r, BlockMatrix) + # self.assertIsInstance(r, BlockMatrix) self.assertTrue(np.allclose(r.toarray(), dense_res)) r = A_block.T + A_block.tocoo() @@ -357,7 +352,6 @@ def test_add_copy(self): self.assertTrue(np.allclose(res.toarray(), self.dense + self.dense.transpose())) def test_sub(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m A_block2 = 2 * self.basic_m @@ -401,10 +395,11 @@ def test_sub_copy(self): res = bm - bmT self.assertIsNot(res.get_block(1, 0), bmT.get_block(1, 0)) self.assertIsNot(res.get_block(0, 1), bm.get_block(0, 1)) - self.assertTrue(np.allclose(res.toarray(), self.dense - 2 * self.dense.transpose())) + self.assertTrue( + np.allclose(res.toarray(), self.dense - 2 * self.dense.transpose()) + ) def test_neg(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m @@ -428,12 +423,16 @@ def test_copyfrom(self): self.assertTrue(np.allclose(bm.toarray(), self.dense)) bm.get_block(0, 0).data.fill(1.0) - self.assertAlmostEqual(bm0.toarray()[0, 0], 2) # this tests that a deep copy was done + self.assertAlmostEqual( + bm0.toarray()[0, 0], 2 + ) # this tests that a deep copy was done self.assertAlmostEqual(bm.toarray()[0, 0], 1) bm.copyfrom(bm0, deep=False) bm.get_block(0, 0).data.fill(1.0) - self.assertAlmostEqual(bm0.toarray()[0, 0], 1) # this tests that a shallow copy was done + self.assertAlmostEqual( + bm0.toarray()[0, 0], 1 + ) # this tests that a shallow copy was done self.assertAlmostEqual(bm.toarray()[0, 0], 1) def test_copyto(self): @@ -461,12 +460,16 @@ def test_copyto(self): self.assertTrue(np.allclose(bm.toarray(), self.dense)) bm.get_block(0, 0).data.fill(1.0) - self.assertAlmostEqual(bm0.toarray()[0, 0], 2) # this tests that a deep copy was done + self.assertAlmostEqual( + bm0.toarray()[0, 0], 2 + ) # this tests that a deep copy was done self.assertAlmostEqual(bm.toarray()[0, 0], 1) bm0.copyto(bm, deep=False) bm.get_block(0, 0).data.fill(1.0) - self.assertAlmostEqual(bm0.toarray()[0, 0], 1) # this tests that a shallow copy was done + self.assertAlmostEqual( + bm0.toarray()[0, 0], 1 + ) # this tests that a shallow copy was done self.assertAlmostEqual(bm.toarray()[0, 0], 1) def test_copy(self): @@ -484,7 +487,6 @@ def test_copy(self): self.assertAlmostEqual(bm.toarray()[0, 0], 1) def test_iadd(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m.copy() A_dense += A_dense @@ -509,7 +511,6 @@ def test_iadd(self): A_block += 1.0 def test_isub(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m A_dense -= A_dense @@ -534,7 +535,6 @@ def test_isub(self): A_block -= 1.0 def test_imul(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m print(A_dense) @@ -542,7 +542,7 @@ def test_imul(self): A_dense *= 3 print(A_dense) print(A_block.toarray()) - A_block *= 3. + A_block *= 3.0 print(A_dense) print(A_block.toarray()) @@ -558,11 +558,10 @@ def test_imul(self): A_block *= A_block.toarray() def test_itruediv(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m.copy() A_dense /= 3 - A_block /= 3. + A_block /= 3.0 self.assertTrue(np.allclose(A_block.toarray(), A_dense)) @@ -576,11 +575,10 @@ def test_itruediv(self): A_block /= A_block.toarray() def test_truediv(self): - A_dense = self.basic_m.toarray() A_block = self.basic_m - B_block = A_block / 3. - self.assertTrue(np.allclose(B_block.toarray(), A_dense/3.)) + B_block = A_block / 3.0 + self.assertTrue(np.allclose(B_block.toarray(), A_dense / 3.0)) with self.assertRaises(Exception) as context: b = A_block / A_block @@ -592,10 +590,9 @@ def test_truediv(self): b = A_block / A_block.toarray() with self.assertRaises(Exception) as context: - B_block = 3./ A_block + B_block = 3.0 / A_block def test_eq(self): - with warnings.catch_warnings(): warnings.simplefilter("ignore") A_flat = self.basic_m.tocoo() @@ -604,23 +601,18 @@ def test_eq(self): A_bool_flat = A_flat == 2.0 A_bool_block = A_block == 2.0 - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = A_flat == A_flat A_bool_block = A_block == A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) - + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = 2.0 != A_flat A_bool_block = 2.0 != A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) def test_ne(self): - with warnings.catch_warnings(): warnings.simplefilter("ignore") A_flat = self.basic_m.tocoo() @@ -628,22 +620,18 @@ def test_ne(self): A_bool_flat = A_flat != 2.0 A_bool_block = A_block != 2.0 - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = 2.0 != A_flat A_bool_block = 2.0 != A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = A_flat != A_flat A_bool_block = A_block != A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) def test_le(self): - with warnings.catch_warnings(): warnings.simplefilter("ignore") A_flat = self.basic_m.tocoo() @@ -651,8 +639,7 @@ def test_le(self): A_bool_flat = A_flat <= 2.0 A_bool_block = A_block <= 2.0 - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) # A_bool_flat = 2.0 <= A_flat # A_bool_block = 2.0 <= A_block @@ -662,22 +649,18 @@ def test_le(self): A_bool_flat = A_flat <= A_flat A_bool_block = A_block <= A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = A_flat <= 2 * A_flat A_bool_block = A_block <= 2 * A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = 2.0 >= A_flat A_bool_block = 2.0 >= A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) def test_lt(self): - with warnings.catch_warnings(): warnings.simplefilter("ignore") A_flat = self.basic_m.tocoo() @@ -686,8 +669,7 @@ def test_lt(self): A_bool_flat = A_flat < 2.0 A_bool_block = A_block < 2.0 - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) # A_bool_flat = 2.0 <= A_flat # A_bool_block = 2.0 <= A_block @@ -697,22 +679,18 @@ def test_lt(self): A_bool_flat = A_flat < A_flat A_bool_block = A_block < A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = A_flat < 2 * A_flat A_bool_block = A_block < 2 * A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = 2.0 > A_flat A_bool_block = 2.0 > A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) def test_ge(self): - with warnings.catch_warnings(): warnings.simplefilter("ignore") A_flat = self.basic_m.tocoo() @@ -720,28 +698,23 @@ def test_ge(self): A_bool_flat = A_flat >= 2.0 A_bool_block = A_block >= 2.0 - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = 2.0 <= A_flat A_bool_block = 2.0 <= A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = A_flat >= A_flat A_bool_block = A_block >= A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) A_bool_flat = A_flat >= 0.5 * A_flat A_bool_block = A_block >= 0.5 * A_block - self.assertTrue(np.allclose(A_bool_flat.toarray(), - A_bool_block.toarray())) + self.assertTrue(np.allclose(A_bool_flat.toarray(), A_bool_block.toarray())) def test_gt(self): - with warnings.catch_warnings(): warnings.simplefilter("ignore") A = self.basic_m.copy() @@ -752,10 +725,9 @@ def test_gt(self): self.assertTrue(np.allclose(res.toarray(), expected)) def test_abs(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = -1.0 * np.array([2., 1, 3, 4, 5, 1]) + data = -1.0 * np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) self.block_m = m @@ -769,46 +741,37 @@ def test_abs(self): abs_mat = abs(bm) self.assertIsInstance(abs_mat, BlockMatrix) - self.assertTrue(np.allclose(abs_flat.toarray(), - abs_mat.toarray())) + self.assertTrue(np.allclose(abs_flat.toarray(), abs_mat.toarray())) def test_getcol(self): - m = self.basic_m flat_mat = m.tocoo() flat_col = flat_mat.getcol(2) block_col = m.getcol(2) - self.assertTrue(np.allclose(flat_col.toarray().flatten(), - block_col.flatten())) + self.assertTrue(np.allclose(flat_col.toarray().flatten(), block_col.flatten())) flat_col = flat_mat.getcol(4) block_col = m.getcol(4) - self.assertTrue(np.allclose(flat_col.toarray().flatten(), - block_col.flatten())) + self.assertTrue(np.allclose(flat_col.toarray().flatten(), block_col.flatten())) flat_col = flat_mat.getcol(6) block_col = m.getcol(6) - self.assertTrue(np.allclose(flat_col.toarray().flatten(), - block_col.flatten())) + self.assertTrue(np.allclose(flat_col.toarray().flatten(), block_col.flatten())) def test_getrow(self): - m = self.basic_m flat_mat = m.tocoo() flat_row = flat_mat.getrow(2) block_row = m.getrow(2) - self.assertTrue(np.allclose(flat_row.toarray().flatten(), - block_row.flatten())) + self.assertTrue(np.allclose(flat_row.toarray().flatten(), block_row.flatten())) flat_row = flat_mat.getrow(7) block_row = m.getrow(7) - self.assertTrue(np.allclose(flat_row.toarray().flatten(), - block_row.flatten())) + self.assertTrue(np.allclose(flat_row.toarray().flatten(), block_row.flatten())) def test_nonzero(self): - m = self.basic_m flat_mat = m.tocoo() flat_row, flat_col = flat_mat.nonzero() @@ -816,8 +779,7 @@ def test_nonzero(self): block_row, block_col = m.nonzero() def test_get_block_column_index(self): - - m = BlockMatrix(2,4) + m = BlockMatrix(2, 4) m.set_block(0, 0, coo_matrix((3, 2))) m.set_block(0, 1, coo_matrix((3, 4))) m.set_block(0, 2, coo_matrix((3, 3))) @@ -832,8 +794,7 @@ def test_get_block_column_index(self): self.assertEqual(bcol, 3) def test_get_block_row_index(self): - - m = BlockMatrix(2,4) + m = BlockMatrix(2, 4) m.set_block(0, 0, coo_matrix((3, 2))) m.set_block(0, 1, coo_matrix((3, 4))) m.set_block(0, 2, coo_matrix((3, 3))) @@ -922,10 +883,10 @@ def test_dimensions(self): self.assertFalse(bm.has_undefined_row_sizes()) self.assertFalse(bm.has_undefined_col_sizes()) self.assertEqual(bm.shape, (8, 8)) - self.assertTrue(np.all(bm.row_block_sizes() == np.ones(2)*4)) - self.assertTrue(np.all(bm.col_block_sizes() == np.ones(2)*4)) - self.assertTrue(np.all(bm.row_block_sizes(copy=False) == np.ones(2)*4)) - self.assertTrue(np.all(bm.col_block_sizes(copy=False) == np.ones(2)*4)) + self.assertTrue(np.all(bm.row_block_sizes() == np.ones(2) * 4)) + self.assertTrue(np.all(bm.col_block_sizes() == np.ones(2) * 4)) + self.assertTrue(np.all(bm.row_block_sizes(copy=False) == np.ones(2) * 4)) + self.assertTrue(np.all(bm.col_block_sizes(copy=False) == np.ones(2) * 4)) def test_transpose_with_empty_rows(self): m = BlockMatrix(2, 2) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py b/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py index 844d696c71e..2d1bc7b640d 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py @@ -13,20 +13,22 @@ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy_available + numpy as np, + numpy_available, + scipy_available, ) + if not (numpy_available and scipy_available): - raise unittest.SkipTest( - "Pynumero needs scipy and numpy to run BlockVector tests") + raise unittest.SkipTest("Pynumero needs scipy and numpy to run BlockVector tests") from pyomo.contrib.pynumero.sparse.block_vector import ( - BlockVector, NotFullyDefinedBlockVectorError + BlockVector, + NotFullyDefinedBlockVectorError, ) -class TestBlockVector(unittest.TestCase): +class TestBlockVector(unittest.TestCase): def test_constructor(self): - v = BlockVector(2) self.assertEqual(v.nblocks, 2) self.assertEqual(v.bshape, (2,)) @@ -44,7 +46,6 @@ def test_constructor(self): BlockVector('hola') def setUp(self): - self.ones = BlockVector(3) self.list_sizes_ones = [2, 4, 3] for idx, s in enumerate(self.list_sizes_ones): @@ -57,8 +58,8 @@ def test_dot(self): v1 = self.ones self.assertEqual(v1.dot(v1), v1.size) v2 = v1.clone(3.3) - self.assertAlmostEqual(v1.dot(v2), v1.size*3.3) - self.assertAlmostEqual(v2.dot(v1.flatten()), v1.size*3.3) + self.assertAlmostEqual(v1.dot(v2), v1.size * 3.3) + self.assertAlmostEqual(v2.dot(v1.flatten()), v1.size * 3.3) with self.assertRaises(Exception) as context: v1.dot(1.0) @@ -78,7 +79,6 @@ def test_sum(self): self.assertEqual(v.sum(), 46) def test_all(self): - v = BlockVector(2) a = np.ones(5) b = np.ones(3) @@ -94,7 +94,6 @@ def test_all(self): self.assertFalse(v.all()) def test_any(self): - v = BlockVector(2) a = np.zeros(5) b = np.ones(3) @@ -138,12 +137,11 @@ def test_choose(self): v.choose(1) def test_clip(self): - v = BlockVector(3) v2 = BlockVector(3) a = np.zeros(5) - b = np.ones(3)*5.0 - c = np.ones(3)*10.0 + b = np.ones(3) * 5.0 + c = np.ones(3) * 10.0 v.set_block(0, a) v.set_block(1, b) @@ -188,7 +186,6 @@ def test_compress(self): v.compress(1.0) def test_nonzero(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -205,7 +202,6 @@ def test_nonzero(self): self.assertTrue(np.allclose(blk, v2.get_block(bid))) def test_ptp(self): - v = BlockVector(2) a = np.arange(5) b = np.arange(9) @@ -216,10 +212,9 @@ def test_ptp(self): self.assertEqual(vv.ptp(), v.ptp()) def test_round(self): - v = BlockVector(2) - a = np.ones(5)*1.1 - b = np.ones(9)*1.1 + a = np.ones(5) * 1.1 + b = np.ones(9) * 1.1 v.set_block(0, a) v.set_block(1, b) @@ -233,7 +228,6 @@ def test_round(self): self.assertTrue(np.allclose(blk, v.get_block(bid))) def test_std(self): - v = BlockVector(2) a = np.arange(5) b = np.arange(9) @@ -438,7 +432,6 @@ def test_argmin(self): self.assertEqual(argmin, 4) def test_cumprod(self): - v = BlockVector(3) v.set_block(0, np.arange(1, 5)) v.set_block(1, np.arange(5, 10)) @@ -465,24 +458,23 @@ def test_clone(self): w = v.clone() self.assertListEqual(w.tolist(), v.tolist()) x = v.clone(4) - self.assertListEqual(x.tolist(), [4]*v.size) + self.assertListEqual(x.tolist(), [4] * v.size) y = x.clone(copy=False) y.get_block(2)[-1] = 6 - d = np.ones(y.size)*4 + d = np.ones(y.size) * 4 d[-1] = 6 self.assertListEqual(y.tolist(), d.tolist()) self.assertListEqual(x.tolist(), d.tolist()) def test_add(self): - v = self.ones v1 = self.ones result = v + v1 - self.assertListEqual(result.tolist(), [2]*v.size) + self.assertListEqual(result.tolist(), [2] * v.size) result = v + 2 self.assertListEqual(result.tolist(), [3] * v.size) result = v + v1.flatten() - self.assertTrue(np.allclose(result.flatten(), v.flatten()+v1.flatten())) + self.assertTrue(np.allclose(result.flatten(), v.flatten() + v1.flatten())) with self.assertRaises(Exception) as context: result = v + 'hola' @@ -547,7 +539,7 @@ def test_truediv(self): v = self.ones v1 = v.clone(5.0, copy=True) result = v / v1 - self.assertListEqual(result.tolist(), [1.0/5.0] * v.size) + self.assertListEqual(result.tolist(), [1.0 / 5.0] * v.size) result = v / v1.flatten() self.assertTrue(np.allclose(result.flatten(), v.flatten() / v1.flatten())) result = 5.0 / v1 @@ -587,18 +579,18 @@ def test_rfloordiv(self): result = 2.0 // v1 self.assertTrue(np.allclose(result.flatten(), np.zeros(v1.size))) result = v1 // 2.0 - self.assertTrue(np.allclose(result.flatten(), np.ones(v1.size)*2.0)) + self.assertTrue(np.allclose(result.flatten(), np.ones(v1.size) * 2.0)) def test_iadd(self): v = self.ones v += 3 - self.assertListEqual(v.tolist(), [4]*v.size) + self.assertListEqual(v.tolist(), [4] * v.size) v.fill(1.0) v += v self.assertListEqual(v.tolist(), [2] * v.size) v.fill(1.0) - v += np.ones(v.size)*3 - self.assertTrue(np.allclose(v.flatten(), np.ones(v.size)*4)) + v += np.ones(v.size) * 3 + self.assertTrue(np.allclose(v.flatten(), np.ones(v.size) * 4)) v = BlockVector(2) a = np.ones(5) @@ -729,7 +721,7 @@ def test_imul(self): def test_itruediv(self): v = self.ones v /= 3 - self.assertTrue(np.allclose(v.flatten(), np.ones(v.size)/3)) + self.assertTrue(np.allclose(v.flatten(), np.ones(v.size) / 3)) v.fill(1.0) v /= v self.assertTrue(np.allclose(v.flatten(), np.ones(v.size))) @@ -790,7 +782,7 @@ def test_setitem(self): def test_set_blocks(self): v = self.ones - blocks = [np.ones(s)*i for i, s in enumerate(self.list_sizes_ones)] + blocks = [np.ones(s) * i for i, s in enumerate(self.list_sizes_ones)] v.set_blocks(blocks) for i, s in enumerate(self.list_sizes_ones): self.assertEqual(v.get_block(i).size, s) @@ -816,7 +808,7 @@ def test_copyfrom(self): v2 = BlockVector(len(self.list_sizes_ones)) for i, s in enumerate(self.list_sizes_ones): - v2.set_block(i, np.ones(s)*i) + v2.set_block(i, np.ones(s) * i) v.copyfrom(v2) for idx, blk in enumerate(v2): self.assertListEqual(blk.tolist(), v2.get_block(idx).tolist()) @@ -825,7 +817,7 @@ def test_copyfrom(self): v4 = v.clone(2) v3.set_block(0, v4) v3.set_block(1, np.zeros(3)) - self.assertListEqual(v3.tolist(), v4.tolist() + [0]*3) + self.assertListEqual(v3.tolist(), v4.tolist() + [0] * 3) def test_copyto(self): v = self.ones @@ -840,7 +832,6 @@ def test_copyto(self): self.assertListEqual(v.tolist(), v2.tolist()) def test_gt(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -865,7 +856,6 @@ def test_gt(self): self.assertTrue(np.allclose(blk, v.get_block(bid))) def test_ge(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -894,7 +884,6 @@ def test_ge(self): self.assertTrue(np.allclose(blk, v.get_block(bid))) def test_lt(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -902,8 +891,8 @@ def test_lt(self): v.set_block(1, b) flags = v < 1 - v.set_block(0, a-1) - v.set_block(1, b+1) + v.set_block(0, a - 1) + v.set_block(1, b + 1) self.assertEqual(v.nblocks, flags.nblocks) for bid, blk in enumerate(flags): self.assertTrue(np.allclose(blk, v.get_block(bid))) @@ -928,7 +917,6 @@ def test_lt(self): self.assertTrue(np.allclose(blk, v.get_block(bid))) def test_le(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -956,7 +944,6 @@ def test_le(self): self.assertTrue(np.allclose(blk, vv.get_block(bid))) def test_eq(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -981,7 +968,6 @@ def test_eq(self): self.assertTrue(np.allclose(blk, v.get_block(bid))) def test_ne(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -1006,7 +992,6 @@ def test_ne(self): self.assertTrue(np.allclose(blk, v.get_block(bid))) def test_contains(self): - v = BlockVector(2) a = np.ones(5) b = np.zeros(9) @@ -1015,6 +1000,7 @@ def test_contains(self): self.assertTrue(0 in v) self.assertFalse(3 in v) + # ToDo: add tests for block vectors with block vectors in them # ToDo: vector comparisons def test_copy(self): @@ -1037,7 +1023,6 @@ def test_copy_structure(self): self.assertEqual(v.get_block(1).size, v2.get_block(1).size) def test_unary_ufuncs(self): - v = BlockVector(2) a = np.ones(3) * 0.5 b = np.ones(2) * 0.8 @@ -1046,16 +1031,45 @@ def test_unary_ufuncs(self): v2 = BlockVector(2) - unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil, - np.floor, np.tan, np.arctan, np.arcsin, - np.arccos, np.sinh, np.cosh, np.abs, - np.tanh, np.arcsinh, np.arctanh, - np.fabs, np.sqrt, np.log, np.log2, - np.absolute, np.isfinite, np.isinf, np.isnan, - np.log1p, np.logical_not, np.exp2, np.expm1, - np.sign, np.rint, np.square, np.positive, - np.negative, np.rad2deg, np.deg2rad, - np.conjugate, np.reciprocal] + unary_funcs = [ + np.log10, + np.sin, + np.cos, + np.exp, + np.ceil, + np.floor, + np.tan, + np.arctan, + np.arcsin, + np.arccos, + np.sinh, + np.cosh, + np.abs, + np.tanh, + np.arcsinh, + np.arctanh, + np.fabs, + np.sqrt, + np.log, + np.log2, + np.absolute, + np.isfinite, + np.isinf, + np.isnan, + np.log1p, + np.logical_not, + np.exp2, + np.expm1, + np.sign, + np.rint, + np.square, + np.positive, + np.negative, + np.rad2deg, + np.deg2rad, + np.conjugate, + np.reciprocal, + ] for fun in unary_funcs: v2.set_block(0, fun(v.get_block(0))) @@ -1078,7 +1092,6 @@ def test_unary_ufuncs(self): np.cbrt(v) def test_reduce_ufuncs(self): - v = BlockVector(2) a = np.ones(3) * 0.5 b = np.ones(2) * 0.8 @@ -1094,7 +1107,6 @@ def test_reduce_ufuncs(self): self.assertAlmostEqual(fun(v), fun(v.flatten())) def test_binary_ufuncs(self): - v = BlockVector(2) a = np.ones(3) * 0.5 b = np.ones(2) * 0.8 @@ -1107,13 +1119,27 @@ def test_binary_ufuncs(self): v2.set_block(0, a2) v2.set_block(1, b2) - binary_ufuncs = [np.add, np.multiply, np.divide, np.subtract, - np.greater, np.greater_equal, np.less, - np.less_equal, np.not_equal, - np.maximum, np.minimum, - np.fmax, np.fmin, np.equal, - np.logaddexp, np.logaddexp2, np.remainder, - np.heaviside, np.hypot] + binary_ufuncs = [ + np.add, + np.multiply, + np.divide, + np.subtract, + np.greater, + np.greater_equal, + np.less, + np.less_equal, + np.not_equal, + np.maximum, + np.minimum, + np.fmax, + np.fmin, + np.equal, + np.logaddexp, + np.logaddexp2, + np.remainder, + np.heaviside, + np.hypot, + ] for fun in binary_ufuncs: flat_res = fun(v.flatten(), v2.flatten()) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py b/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py index 1fee07b660d..0768442c2c4 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py @@ -12,30 +12,32 @@ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy_available + numpy as np, + numpy_available, + scipy_available, ) + if not (numpy_available and scipy_available): raise unittest.SkipTest( - "Pynumero needs scipy and numpy to run Sparse intrinsict tests") + "Pynumero needs scipy and numpy to run Sparse intrinsic tests" + ) from pyomo.contrib.pynumero.sparse import BlockVector import pyomo.contrib.pynumero as pn class TestSparseIntrinsics(unittest.TestCase): - def setUp(self): self.v1 = np.array([1.1, 2.2, 3.3]) self.v2 = np.array([4.4, 5.5, 6.6, 7.7]) - self.v3 = np.array([1.1, 2.2, 3.3])*2 - self.v4 = np.array([4.4, 5.5, 6.6, 7.7])*2 + self.v3 = np.array([1.1, 2.2, 3.3]) * 2 + self.v4 = np.array([4.4, 5.5, 6.6, 7.7]) * 2 self.bv = BlockVector(2) self.bv2 = BlockVector(2) self.bv.set_blocks([self.v1, self.v2]) self.bv2.set_blocks([self.v3, self.v4]) def test_where(self): - bv = self.bv condition = bv >= 4.5 res = pn.where(condition)[0] @@ -79,7 +81,6 @@ def test_where(self): self.assertTrue(np.allclose(res.flatten(), res_flat)) def test_isin(self): - bv = self.bv test_bv = BlockVector(2) a = np.array([1.1, 3.3]) @@ -116,7 +117,6 @@ def test_isin(self): # ToDo: try np.copy on a blockvector def test_intersect1d(self): - vv1 = np.array([1.1, 3.3]) vv2 = np.array([4.4, 7.7]) bvv = BlockVector(2) @@ -136,7 +136,6 @@ def test_intersect1d(self): self.assertTrue(np.allclose(res.get_block(1), np.array([7.7]))) def test_setdiff1d(self): - vv1 = np.array([1.1, 3.3]) vv2 = np.array([4.4, 7.7]) bvv = BlockVector(2) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_matrix.py b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_matrix.py index bc7374126ef..1415636c50d 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_matrix.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_matrix.py @@ -13,19 +13,20 @@ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy_available, scipy_available, numpy as np + numpy_available, + scipy_available, + numpy as np, ) -SKIPTESTS=[] +SKIPTESTS = [] if numpy_available and scipy_available: from scipy.sparse import coo_matrix, bmat, rand else: - SKIPTESTS.append( - "Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests" - ) + SKIPTESTS.append("Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests") try: from mpi4py import MPI + comm = MPI.COMM_WORLD if comm.Get_size() < 3: SKIPTESTS.append( @@ -38,13 +39,13 @@ from pyomo.contrib.pynumero.sparse import BlockVector, BlockMatrix from pyomo.contrib.pynumero.sparse.mpi_block_vector import MPIBlockVector from pyomo.contrib.pynumero.sparse.mpi_block_matrix import ( - MPIBlockMatrix, NotFullyDefinedBlockMatrixError + MPIBlockMatrix, + NotFullyDefinedBlockMatrixError, ) @unittest.pytest.mark.mpi class TestMPIBlockMatrix(unittest.TestCase): - # Because the setUpClass is called before decorators around the # class itself, we need to put the skipIf on the class setup and not # the class. @@ -56,7 +57,7 @@ def setUpClass(cls): row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -106,7 +107,7 @@ def setUpClass(cls): row = np.array([0, 1, 2, 3]) col = np.array([0, 1, 0, 1]) - data = np.array([1., 1., 1., 1.]) + data = np.array([1.0, 1.0, 1.0, 1.0]) m2 = coo_matrix((data, (row, col)), shape=(4, 2)) rank_ownership = [[0, -1, 0], [-1, 1, -1]] @@ -161,27 +162,29 @@ def test_coo_data(self): self.square_mpi_mat.coo_data() def test_getitem(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() with warnings.catch_warnings(): warnings.simplefilter("ignore") if rank == 0: - self.assertTrue((m == self.square_mpi_mat.get_block(0, 0)).toarray().all()) + self.assertTrue( + (m == self.square_mpi_mat.get_block(0, 0)).toarray().all() + ) if rank == 1: - self.assertTrue((m == self.square_mpi_mat.get_block(1, 1)).toarray().all()) + self.assertTrue( + (m == self.square_mpi_mat.get_block(1, 1)).toarray().all() + ) self.assertTrue((m == self.square_mpi_mat2.get_block(0, 1)).toarray().all()) def test_setitem(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -201,7 +204,6 @@ def test_nnz(self): self.assertEqual(self.rectangular_mpi_mat.nnz, 16) def test_block_shapes(self): - m, n = self.square_mpi_mat.bshape mpi_shapes = self.square_mpi_mat.block_shapes() serial_shapes = self.square_serial_mat.block_shapes() @@ -210,10 +212,9 @@ def test_block_shapes(self): self.assertEqual(serial_shapes[i][j], mpi_shapes[i][j]) def test_reset_brow(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -229,23 +230,19 @@ def test_reset_brow(self): serial_bm.set_block(0, 0, m) serial_bm.set_block(1, 1, m) - self.assertTrue(np.allclose(serial_bm.row_block_sizes(), - bm.row_block_sizes())) + self.assertTrue(np.allclose(serial_bm.row_block_sizes(), bm.row_block_sizes())) bm.reset_brow(0) serial_bm.reset_brow(0) - self.assertTrue(np.allclose(serial_bm.row_block_sizes(), - bm.row_block_sizes())) + self.assertTrue(np.allclose(serial_bm.row_block_sizes(), bm.row_block_sizes())) bm.reset_brow(1) serial_bm.reset_brow(1) - self.assertTrue(np.allclose(serial_bm.row_block_sizes(), - bm.row_block_sizes())) + self.assertTrue(np.allclose(serial_bm.row_block_sizes(), bm.row_block_sizes())) def test_reset_bcol(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -261,17 +258,14 @@ def test_reset_bcol(self): serial_bm.set_block(0, 0, m) serial_bm.set_block(1, 1, m) - self.assertTrue(np.allclose(serial_bm.row_block_sizes(), - bm.row_block_sizes())) + self.assertTrue(np.allclose(serial_bm.row_block_sizes(), bm.row_block_sizes())) bm.reset_bcol(0) serial_bm.reset_bcol(0) - self.assertTrue(np.allclose(serial_bm.col_block_sizes(), - bm.col_block_sizes())) + self.assertTrue(np.allclose(serial_bm.col_block_sizes(), bm.col_block_sizes())) bm.reset_bcol(1) serial_bm.reset_bcol(1) - self.assertTrue(np.allclose(serial_bm.col_block_sizes(), - bm.col_block_sizes())) + self.assertTrue(np.allclose(serial_bm.col_block_sizes(), bm.col_block_sizes())) def test_has_empty_rows(self): with self.assertRaises(Exception) as context: @@ -282,7 +276,6 @@ def test_has_empty_cols(self): self.square_mpi_mat.has_empty_cols() def test_transpose(self): - mat1 = self.square_mpi_mat mat2 = self.rectangular_mpi_mat @@ -294,8 +287,11 @@ def test_transpose(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray().T, - mat1.get_block(j, i).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray().T, mat1.get_block(j, i).toarray() + ) + ) res = mat2.transpose() self.assertIsInstance(res, MPIBlockMatrix) @@ -305,8 +301,11 @@ def test_transpose(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray().T, - mat2.get_block(j, i).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray().T, mat2.get_block(j, i).toarray() + ) + ) res = mat1.transpose(copy=True) self.assertIsInstance(res, MPIBlockMatrix) @@ -316,8 +315,11 @@ def test_transpose(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray().T, - mat1.get_block(j, i).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray().T, mat1.get_block(j, i).toarray() + ) + ) res = mat2.transpose(copy=True) self.assertIsInstance(res, MPIBlockMatrix) @@ -327,8 +329,11 @@ def test_transpose(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray().T, - mat2.get_block(j, i).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray().T, mat2.get_block(j, i).toarray() + ) + ) res = mat1.T self.assertIsInstance(res, MPIBlockMatrix) @@ -338,8 +343,11 @@ def test_transpose(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray().T, - mat1.get_block(j, i).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray().T, mat1.get_block(j, i).toarray() + ) + ) res = mat2.T self.assertIsInstance(res, MPIBlockMatrix) @@ -349,8 +357,11 @@ def test_transpose(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray().T, - mat2.get_block(j, i).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray().T, mat2.get_block(j, i).toarray() + ) + ) def _compare_mpi_and_serial_block_matrices(self, mpi_mat, serial_mat): self.assertTrue(np.allclose(mpi_mat.to_local_array(), serial_mat.toarray())) @@ -358,8 +369,12 @@ def _compare_mpi_and_serial_block_matrices(self, mpi_mat, serial_mat): rows, columns = np.nonzero(mpi_mat.ownership_mask) for i, j in zip(rows, columns): if mpi_mat.get_block(i, j) is not None: - self.assertTrue(np.allclose(mpi_mat.get_block(i, j).toarray(), - serial_mat.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + mpi_mat.get_block(i, j).toarray(), + serial_mat.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_mat.get_block(i, j)) @@ -390,7 +405,6 @@ def test_add(self): res = serial_mat2.tocoo() + mat1 def test_sub(self): - mat1 = self.square_mpi_mat mat2 = self.square_mpi_mat2 @@ -418,27 +432,29 @@ def test_sub(self): res = serial_mat2.tocoo() - mat1 def test_div(self): - mat1 = self.square_mpi_mat serial_mat1 = self.square_serial_mat - res = mat1 / 3.0 + res = mat1 / 3.0 serial_res = serial_mat1 / 3.0 self.assertIsInstance(res, MPIBlockMatrix) rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) def test_iadd(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -460,18 +476,21 @@ def test_iadd(self): rows, columns = np.nonzero(bm.ownership_mask) for i, j in zip(rows, columns): if bm.get_block(i, j) is not None: - self.assertTrue(np.allclose(bm.get_block(i, j).toarray(), - serial_bm.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + bm.get_block(i, j).toarray(), + serial_bm.get_block(i, j).toarray(), + ) + ) bm += serial_bm serial_bm += serial_bm self._compare_mpi_and_serial_block_matrices(bm, serial_bm) def test_isub(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -493,18 +512,21 @@ def test_isub(self): rows, columns = np.nonzero(bm.ownership_mask) for i, j in zip(rows, columns): if bm.get_block(i, j) is not None: - self.assertTrue(np.allclose(bm.get_block(i, j).toarray(), - serial_bm.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + bm.get_block(i, j).toarray(), + serial_bm.get_block(i, j).toarray(), + ) + ) bm -= serial_bm serial_bm -= serial_bm self._compare_mpi_and_serial_block_matrices(bm, serial_bm) def test_imul(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -526,14 +548,17 @@ def test_imul(self): rows, columns = np.nonzero(bm.ownership_mask) for i, j in zip(rows, columns): if bm.get_block(i, j) is not None: - self.assertTrue(np.allclose(bm.get_block(i, j).toarray(), - serial_bm.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + bm.get_block(i, j).toarray(), + serial_bm.get_block(i, j).toarray(), + ) + ) def test_idiv(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -555,14 +580,17 @@ def test_idiv(self): rows, columns = np.nonzero(bm.ownership_mask) for i, j in zip(rows, columns): if bm.get_block(i, j) is not None: - self.assertTrue(np.allclose(bm.get_block(i, j).toarray(), - serial_bm.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + bm.get_block(i, j).toarray(), + serial_bm.get_block(i, j).toarray(), + ) + ) def test_neg(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -584,14 +612,17 @@ def test_neg(self): rows, columns = np.nonzero(bm.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) def test_abs(self): - row = np.array([0, 3, 1, 2, 3, 0]) col = np.array([0, 0, 1, 2, 3, 3]) - data = np.array([2., 1, 3, 4, 5, 1]) + data = np.array([2.0, 1, 3, 4, 5, 1]) m = coo_matrix((data, (row, col)), shape=(4, 4)) rank = comm.Get_rank() @@ -613,11 +644,14 @@ def test_abs(self): rows, columns = np.nonzero(bm.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) def test_eq(self): - mat1 = self.square_mpi_mat mat2 = self.square_mpi_mat2 @@ -634,8 +668,12 @@ def test_eq(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -655,8 +693,12 @@ def test_eq(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -664,7 +706,6 @@ def test_eq(self): res = mat1 == serial_mat1 def test_ne(self): - mat1 = self.square_mpi_mat mat2 = self.square_mpi_mat2 @@ -681,8 +722,12 @@ def test_ne(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -702,8 +747,12 @@ def test_ne(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -723,13 +772,16 @@ def test_ne(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) def test_le(self): - mat1 = self.square_mpi_mat mat2 = self.square_mpi_mat2 @@ -746,8 +798,12 @@ def test_le(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -760,8 +816,12 @@ def test_le(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -778,8 +838,12 @@ def test_le(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -799,13 +863,16 @@ def test_le(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) def test_lt(self): - mat1 = self.square_mpi_mat mat2 = self.square_mpi_mat2 @@ -822,8 +889,12 @@ def test_lt(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -843,8 +914,12 @@ def test_lt(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -855,7 +930,6 @@ def test_lt(self): res = serial_mat1 < mat1 def test_ge(self): - mat1 = self.square_mpi_mat mat2 = self.square_mpi_mat2 @@ -872,8 +946,12 @@ def test_ge(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -893,8 +971,12 @@ def test_ge(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -905,7 +987,6 @@ def test_ge(self): res = serial_mat1 >= mat1 def test_gt(self): - mat1 = self.square_mpi_mat mat2 = self.square_mpi_mat2 @@ -922,8 +1003,12 @@ def test_gt(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -943,8 +1028,12 @@ def test_gt(self): rows, columns = np.nonzero(res.ownership_mask) for i, j in zip(rows, columns): if res.get_block(i, j) is not None: - self.assertTrue(np.allclose(res.get_block(i, j).toarray(), - serial_res.get_block(i, j).toarray())) + self.assertTrue( + np.allclose( + res.get_block(i, j).toarray(), + serial_res.get_block(i, j).toarray(), + ) + ) else: self.assertIsNone(serial_res.get_block(i, j)) @@ -1012,7 +1101,7 @@ def test_not_fully_defined_block_matrix(self): m.set_block(1, 1, m1) res = m.shape - self.assertEqual(res, (4,4)) + self.assertEqual(res, (4, 4)) def test_is_row_size_defined(self): m = MPIBlockMatrix(2, 2, [[0, 1], [-1, -1]], comm) @@ -1088,7 +1177,6 @@ def test_get_block_row_index(self): @unittest.pytest.mark.mpi class TestMPIMatVec(unittest.TestCase): - @classmethod @unittest.skipIf(SKIPTESTS, SKIPTESTS) def setUpClass(cls): @@ -1097,13 +1185,9 @@ def setUpClass(cls): def test_get_block_vector_for_dot_product_1(self): rank = comm.Get_rank() - rank_ownership = np.array([[0, 1, 2], - [1, 1, 2], - [0, 1, 2], - [0, 1, 2]]) + rank_ownership = np.array([[0, 1, 2], [1, 1, 2], [0, 1, 2], [0, 1, 2]]) m = MPIBlockMatrix(4, 3, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) m.set_block(rank, rank, sub_m.copy()) m.set_block(3, rank, sub_m.copy()) @@ -1120,13 +1204,9 @@ def test_get_block_vector_for_dot_product_1(self): def test_get_block_vector_for_dot_product_2(self): rank = comm.Get_rank() - rank_ownership = np.array([[1, 1, 2], - [0, 1, 2], - [0, 1, 2], - [0, 1, 2]]) + rank_ownership = np.array([[1, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) m = MPIBlockMatrix(4, 3, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) if rank == 0: m.set_block(3, rank, sub_m.copy()) @@ -1152,13 +1232,9 @@ def test_get_block_vector_for_dot_product_2(self): def test_get_block_vector_for_dot_product_3(self): rank = comm.Get_rank() - rank_ownership = np.array([[1, 1, 2], - [0, 1, 2], - [0, 1, 2], - [0, 1, 2]]) + rank_ownership = np.array([[1, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) m = MPIBlockMatrix(4, 3, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) if rank == 0: m.set_block(3, rank, sub_m.copy()) @@ -1192,13 +1268,9 @@ def test_get_block_vector_for_dot_product_3(self): def test_get_block_vector_for_dot_product_4(self): rank = comm.Get_rank() - rank_ownership = np.array([[-1, 1, 2], - [0, 1, 2], - [0, 1, 2], - [0, 1, 2]]) + rank_ownership = np.array([[-1, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) m = MPIBlockMatrix(4, 3, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) m.set_block(0, 0, sub_m.copy()) if rank == 0: @@ -1219,13 +1291,9 @@ def test_get_block_vector_for_dot_product_4(self): def test_get_block_vector_for_dot_product_5(self): rank = comm.Get_rank() - rank_ownership = np.array([[1, 1, 2], - [0, 1, 2], - [0, 1, 2], - [0, 1, 2]]) + rank_ownership = np.array([[1, 1, 2], [0, 1, 2], [0, 1, 2], [0, 1, 2]]) m = MPIBlockMatrix(4, 3, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) if rank == 0: m.set_block(3, rank, sub_m.copy()) @@ -1256,9 +1324,9 @@ def test_get_block_vector_for_dot_product_5(self): def test_matvec_1(self): rank = comm.Get_rank() np.random.seed(0) - orig_m = np.zeros((8,8)) + orig_m = np.zeros((8, 8)) for ndx in range(4): - start = ndx*2 + start = ndx * 2 stop = (ndx + 1) * 2 orig_m[start:stop, start:stop] = np.random.uniform(-10, 10, size=(2, 2)) orig_m[start:stop, 6:8] = np.random.uniform(-10, 10, size=(2, 2)) @@ -1266,15 +1334,14 @@ def test_matvec_1(self): orig_m[6:8, 6:8] = np.random.uniform(-10, 10, size=(2, 2)) orig_v = np.random.uniform(-10, 10, size=8) - correct_res = coo_matrix(orig_m)*orig_v + correct_res = coo_matrix(orig_m) * orig_v - rank_ownership = np.array([[0, -1, -1, 0], - [-1, 1, -1, 1], - [-1, -1, 2, 2], - [0, 1, 2, -1]]) + rank_ownership = np.array( + [[0, -1, -1, 0], [-1, 1, -1, 1], [-1, -1, 2, 2], [0, 1, 2, -1]] + ) m = MPIBlockMatrix(4, 4, rank_ownership, comm) - start = rank*2 - stop = (rank+1)*2 + start = rank * 2 + stop = (rank + 1) * 2 m.set_block(rank, rank, coo_matrix(orig_m[start:stop, start:stop])) m.set_block(rank, 3, coo_matrix(orig_m[start:stop, 6:8])) m.set_block(3, rank, coo_matrix(orig_m[6:8, start:stop])) @@ -1296,9 +1363,9 @@ def test_matvec_1(self): def test_matvec_with_block_vector(self): rank = comm.Get_rank() np.random.seed(0) - orig_m = np.zeros((8,8)) + orig_m = np.zeros((8, 8)) for ndx in range(4): - start = ndx*2 + start = ndx * 2 stop = (ndx + 1) * 2 orig_m[start:stop, start:stop] = np.random.uniform(-10, 10, size=(2, 2)) orig_m[start:stop, 6:8] = np.random.uniform(-10, 10, size=(2, 2)) @@ -1306,15 +1373,14 @@ def test_matvec_with_block_vector(self): orig_m[6:8, 6:8] = np.random.uniform(-10, 10, size=(2, 2)) orig_v = np.random.uniform(-10, 10, size=8) - correct_res = coo_matrix(orig_m)*orig_v + correct_res = coo_matrix(orig_m) * orig_v - rank_ownership = np.array([[0, -1, -1, 0], - [-1, 1, -1, 1], - [-1, -1, 2, 2], - [0, 1, 2, -1]]) + rank_ownership = np.array( + [[0, -1, -1, 0], [-1, 1, -1, 1], [-1, -1, 2, 2], [0, 1, 2, -1]] + ) m = MPIBlockMatrix(4, 4, rank_ownership, comm) - start = rank*2 - stop = (rank+1)*2 + start = rank * 2 + stop = (rank + 1) * 2 m.set_block(rank, rank, coo_matrix(orig_m[start:stop, start:stop])) m.set_block(rank, 3, coo_matrix(orig_m[start:stop, 6:8])) m.set_block(3, rank, coo_matrix(orig_m[6:8, start:stop])) @@ -1336,13 +1402,11 @@ def test_matvec_with_block_vector(self): def test_matvect_with_empty_rows(self): rank = comm.Get_rank() - rank_ownership = np.array([[ 0, -1, -1, 0], - [-1, 1, -1, 1], - [-1, -1, 2, 2], - [ 0, 1, 2, -1]]) + rank_ownership = np.array( + [[0, -1, -1, 0], [-1, 1, -1, 1], [-1, -1, 2, 2], [0, 1, 2, -1]] + ) m = MPIBlockMatrix(4, 4, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) m.set_block(rank, rank, sub_m.copy()) m.set_block(rank, 3, sub_m.copy()) @@ -1356,18 +1420,16 @@ def test_matvect_with_empty_rows(self): res = m.dot(v) self.assertIsInstance(res, MPIBlockVector) - self.assertTrue(np.allclose(res.get_block(rank), sub_v*2)) + self.assertTrue(np.allclose(res.get_block(rank), sub_v * 2)) self.assertTrue(np.allclose(res.get_block(3), np.zeros(2))) self.assertTrue(np.allclose(res.rank_ownership, np.array([0, 1, 2, -1]))) self.assertFalse(res.has_none) - rank_ownership = np.array([[ 0, -1, -1, 0], - [-1, 1, -1, 1], - [-1, -1, 2, 2], - [ 0, -1, -1, -1]]) + rank_ownership = np.array( + [[0, -1, -1, 0], [-1, 1, -1, 1], [-1, -1, 2, 2], [0, -1, -1, -1]] + ) m = MPIBlockMatrix(4, 4, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) m.set_block(rank, rank, sub_m.copy()) m.set_block(rank, 3, sub_m.copy()) @@ -1375,19 +1437,17 @@ def test_matvect_with_empty_rows(self): res = m.dot(v) self.assertIsInstance(res, MPIBlockVector) - self.assertTrue(np.allclose(res.get_block(rank), sub_v*2)) + self.assertTrue(np.allclose(res.get_block(rank), sub_v * 2)) if rank == 0: self.assertTrue(np.allclose(res.get_block(3), np.zeros(2))) self.assertTrue(np.allclose(res.rank_ownership, np.array([0, 1, 2, 0]))) self.assertFalse(res.has_none) - rank_ownership = np.array([[ 0, -1, -1, 0], - [-1, 1, -1, 1], - [-1, -1, 2, 2], - [-1, -1, -1, 0]]) + rank_ownership = np.array( + [[0, -1, -1, 0], [-1, 1, -1, 1], [-1, -1, 2, 2], [-1, -1, -1, 0]] + ) m = MPIBlockMatrix(4, 4, rank_ownership, comm) - sub_m = np.array([[1, 0], - [0, 1]]) + sub_m = np.array([[1, 0], [0, 1]]) sub_m = coo_matrix(sub_m) m.set_block(rank, rank, sub_m.copy()) m.set_block(rank, 3, sub_m.copy()) @@ -1395,7 +1455,7 @@ def test_matvect_with_empty_rows(self): res = m.dot(v) self.assertIsInstance(res, MPIBlockVector) - self.assertTrue(np.allclose(res.get_block(rank), sub_v*2)) + self.assertTrue(np.allclose(res.get_block(rank), sub_v * 2)) if rank == 0: self.assertTrue(np.allclose(res.get_block(3), np.zeros(2))) self.assertTrue(np.allclose(res.rank_ownership, np.array([0, 1, 2, 0]))) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py index d73ff45c13e..cd37b7543a2 100644 --- a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py +++ b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py @@ -11,19 +11,20 @@ import pyomo.common.unittest as unittest from pyomo.contrib.pynumero.dependencies import ( - numpy_available, scipy_available, numpy as np + numpy_available, + scipy_available, + numpy as np, ) -SKIPTESTS=[] +SKIPTESTS = [] if numpy_available and scipy_available: from scipy.sparse import coo_matrix, bmat else: - SKIPTESTS.append( - "Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests" - ) + SKIPTESTS.append("Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests") try: from mpi4py import MPI + comm = MPI.COMM_WORLD if comm.Get_size() < 3: SKIPTESTS.append( @@ -39,7 +40,6 @@ @unittest.pytest.mark.mpi class TestMPIBlockVector(unittest.TestCase): - # Because the setUpClass is called before decorators around the # class itself, we need to put the skipIf on the class setup and not # the class. @@ -49,7 +49,7 @@ class TestMPIBlockVector(unittest.TestCase): def setUpClass(cls): # test problem 1 - v1 = MPIBlockVector(4, [0,1,0,1], comm) + v1 = MPIBlockVector(4, [0, 1, 0, 1], comm) rank = comm.Get_rank() if rank == 0: @@ -60,7 +60,7 @@ def setUpClass(cls): v1.set_block(3, np.ones(2)) cls.v1 = v1 - v2 = MPIBlockVector(7, [0,0,1,1,2,2,-1], comm) + v2 = MPIBlockVector(7, [0, 0, 1, 1, 2, 2, -1], comm) rank = comm.Get_rank() if rank == 0: @@ -122,7 +122,7 @@ def test_has_none(self): self.assertFalse(self.v1.has_none) def test_any(self): - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3)) @@ -133,7 +133,7 @@ def test_any(self): self.assertTrue(self.v2.any()) def test_all(self): - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3)) @@ -169,8 +169,37 @@ def test_min(self): self.assertEqual(self.v1.min(), 0.0) self.assertEqual(self.v2.min(), 0.0) + def test_min_max_with_size0_blocks(self): + v = MPIBlockVector(3, [0, 1, 2], comm) + rank = comm.Get_rank() + if rank == 0: + v.set_block(0, np.array([8, 4, 7, 12])) + if rank == 1: + v.set_block(1, np.array([])) + if rank == 2: + v.set_block(2, np.array([5, 6, 3])) + self.assertAlmostEqual(v.min(), 3) + self.assertAlmostEqual(v.max(), 12) + + if rank == 0: + v.set_block(0, np.array([np.inf, np.inf, np.inf, np.inf])) + if rank == 2: + v.set_block(2, np.array([np.inf, np.inf, np.inf])) + self.assertEqual(v.min(), np.inf) + self.assertEqual(v.max(), np.inf) + v *= -1 + self.assertEqual(v.min(), -np.inf) + self.assertEqual(v.max(), -np.inf) + + v = MPIBlockVector(3, [0, 1, 2], comm) + v.set_block(rank, np.array([])) + with self.assertRaisesRegex(ValueError, 'cannot get the min of a size 0 array'): + v.min() + with self.assertRaisesRegex(ValueError, 'cannot get the max of a size 0 array'): + v.max() + def test_max(self): - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3) + 10) @@ -178,7 +207,7 @@ def test_max(self): v.set_block(1, np.arange(3)) self.assertEqual(v.max(), 12.0) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3) + 10) @@ -190,7 +219,7 @@ def test_max(self): self.assertEqual(self.v2.max(), 3.0) def test_sum(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -250,18 +279,18 @@ def test_conjugate(self): self.assertTrue(np.allclose(res.get_block(j), v.get_block(j).conjugate())) def test_nonzero(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: - v.set_block(0, np.array([0,1,2])) + v.set_block(0, np.array([0, 1, 2])) if rank == 1: - v.set_block(1, np.array([0,0,2])) + v.set_block(1, np.array([0, 0, 2])) v.set_block(2, np.ones(3)) res = v.nonzero()[0] self.assertTrue(isinstance(res, MPIBlockVector)) self.assertEqual(res.nblocks, v.nblocks) if rank == 0: - self.assertTrue(np.allclose(res.get_block(0), np.array([1,2]))) + self.assertTrue(np.allclose(res.get_block(0), np.array([1, 2]))) if rank == 1: self.assertTrue(np.allclose(res.get_block(1), np.array([2]))) self.assertTrue(np.allclose(res.get_block(2), np.arange(3))) @@ -275,7 +304,7 @@ def test_nonzero(self): self.assertTrue(np.allclose(res.get_block(3), np.arange(2))) def test_round(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3) + 0.01) @@ -289,11 +318,11 @@ def test_round(self): if rank == 0: self.assertTrue(np.allclose(np.arange(3), res.get_block(0))) if rank == 1: - self.assertTrue(np.allclose(np.arange(3)+3, res.get_block(1))) - self.assertTrue(np.allclose(np.arange(3)+6, res.get_block(2))) + self.assertTrue(np.allclose(np.arange(3) + 3, res.get_block(1))) + self.assertTrue(np.allclose(np.arange(3) + 6, res.get_block(2))) def test_clip(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -305,19 +334,19 @@ def test_clip(self): self.assertTrue(isinstance(res, MPIBlockVector)) self.assertEqual(res.nblocks, v.nblocks) if rank == 0: - self.assertTrue(np.allclose(np.array([2,2,2]), res.get_block(0))) + self.assertTrue(np.allclose(np.array([2, 2, 2]), res.get_block(0))) if rank == 1: - self.assertTrue(np.allclose(np.arange(3)+3, res.get_block(1))) - self.assertTrue(np.allclose(np.arange(3)+6, res.get_block(2))) + self.assertTrue(np.allclose(np.arange(3) + 3, res.get_block(1))) + self.assertTrue(np.allclose(np.arange(3) + 6, res.get_block(2))) res = v.clip(min=2.0, max=5.0) self.assertTrue(isinstance(res, MPIBlockVector)) self.assertEqual(res.nblocks, v.nblocks) if rank == 0: - self.assertTrue(np.allclose(np.array([2,2,2]), res.get_block(0))) + self.assertTrue(np.allclose(np.array([2, 2, 2]), res.get_block(0))) if rank == 1: - self.assertTrue(np.allclose(np.array([3,4,5]), res.get_block(1))) - self.assertTrue(np.allclose(np.array([5,5,5]), res.get_block(2))) + self.assertTrue(np.allclose(np.array([3, 4, 5]), res.get_block(1))) + self.assertTrue(np.allclose(np.array([5, 5, 5]), res.get_block(2))) v1 = self.v1 res = v1.clip(max=0.5) @@ -329,7 +358,6 @@ def test_clip(self): self.assertTrue(np.allclose(np.ones(2) * 0.5, res.get_block(3))) def test_compress(self): - v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: @@ -367,7 +395,7 @@ def test_compress(self): res = v.compress(cond.flatten()) def test_owned_blocks(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -389,7 +417,7 @@ def test_owned_blocks(self): self.assertTrue(np.allclose(np.array([1, 3]), owned)) def test_shared_blocks(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -401,7 +429,7 @@ def test_shared_blocks(self): self.assertTrue(np.allclose(np.array([2]), shared)) def test_clone(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -422,7 +450,7 @@ def test_clone(self): self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2))) def test_copy(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -480,13 +508,12 @@ def test_fill(self): self.assertTrue(np.allclose(np.array([2]), v.shared_blocks)) if rank == 0: - self.assertTrue(np.allclose(np.ones(3)*7.0, v.get_block(0))) + self.assertTrue(np.allclose(np.ones(3) * 7.0, v.get_block(0))) if rank == 1: - self.assertTrue(np.allclose(np.ones(4)*7.0, v.get_block(1))) - self.assertTrue(np.allclose(np.ones(2)*7.0, v.get_block(2))) + self.assertTrue(np.allclose(np.ones(4) * 7.0, v.get_block(1))) + self.assertTrue(np.allclose(np.ones(2) * 7.0, v.get_block(2))) def test_dot(self): - v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: @@ -505,7 +532,7 @@ def test_dot(self): self.assertAlmostEqual(expected, v.dot(vv.flatten())) def test_add(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -520,11 +547,11 @@ def test_add(self): if rank == 0: self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks)) - self.assertTrue(np.allclose(np.arange(3)*2, res.get_block(0))) + self.assertTrue(np.allclose(np.arange(3) * 2, res.get_block(0))) if rank == 1: self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks)) - self.assertTrue(np.allclose(np.arange(4)*2, res.get_block(1))) - self.assertTrue(np.allclose(np.arange(2)*2, res.get_block(2))) + self.assertTrue(np.allclose(np.arange(4) * 2, res.get_block(1))) + self.assertTrue(np.allclose(np.arange(2) * 2, res.get_block(2))) res = v + 5.0 self.assertTrue(isinstance(res, MPIBlockVector)) @@ -553,7 +580,7 @@ def test_add(self): self.assertTrue(np.allclose(np.arange(2) + 5.0, res.get_block(2))) def test_sub(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -601,7 +628,7 @@ def test_sub(self): self.assertTrue(np.allclose(np.arange(2) - 5.0, res.get_block(2))) def test_mul(self): - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -677,11 +704,11 @@ def test_truediv(self): if rank == 0: self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks)) - self.assertTrue(np.allclose((np.arange(3) + 1.0)/2.0, res.get_block(0))) + self.assertTrue(np.allclose((np.arange(3) + 1.0) / 2.0, res.get_block(0))) if rank == 1: self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks)) - self.assertTrue(np.allclose((np.arange(4) + 1.0)/2.0, res.get_block(1))) - self.assertTrue(np.allclose((np.arange(2) + 1.0)/2.0, res.get_block(2))) + self.assertTrue(np.allclose((np.arange(4) + 1.0) / 2.0, res.get_block(1))) + self.assertTrue(np.allclose((np.arange(2) + 1.0) / 2.0, res.get_block(2))) res = 2.0 / v self.assertTrue(isinstance(res, MPIBlockVector)) @@ -690,15 +717,14 @@ def test_truediv(self): if rank == 0: self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks)) - self.assertTrue(np.allclose(2.0/(np.arange(3) + 1.0), res.get_block(0))) + self.assertTrue(np.allclose(2.0 / (np.arange(3) + 1.0), res.get_block(0))) if rank == 1: self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks)) - self.assertTrue(np.allclose(2.0/(np.arange(4) + 1.0), res.get_block(1))) - self.assertTrue(np.allclose(2.0/(np.arange(2) + 1.0), res.get_block(2))) + self.assertTrue(np.allclose(2.0 / (np.arange(4) + 1.0), res.get_block(1))) + self.assertTrue(np.allclose(2.0 / (np.arange(2) + 1.0), res.get_block(2))) def test_floordiv(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3) + 1.0) @@ -720,9 +746,7 @@ def test_floordiv(self): self.assertTrue(np.allclose(np.ones(2), res.get_block(2))) bv = BlockVector(3) - bv.set_blocks([np.arange(3) + 1.0, - np.arange(4) + 1.0, - np.arange(2) + 1.0]) + bv.set_blocks([np.arange(3) + 1.0, np.arange(4) + 1.0, np.arange(2) + 1.0]) res1 = v // 2.0 res2 = bv // 2.0 @@ -753,8 +777,7 @@ def test_floordiv(self): self.assertTrue(np.allclose(res1.get_block(2), res2.get_block(2))) def test_isum(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -771,7 +794,7 @@ def test_isum(self): self.assertTrue(np.allclose(np.arange(4) * 2.0, v.get_block(1))) self.assertTrue(np.allclose(np.arange(2) * 2.0, v.get_block(2))) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -779,7 +802,7 @@ def test_isum(self): v.set_block(1, np.arange(4)) v.set_block(2, np.arange(2)) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3, dtype='d')) @@ -797,8 +820,7 @@ def test_isum(self): self.assertTrue(np.allclose(np.arange(2) + 7.0, v.get_block(2))) def test_isub(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -815,7 +837,7 @@ def test_isub(self): self.assertTrue(np.allclose(np.zeros(4), v.get_block(1))) self.assertTrue(np.allclose(np.zeros(2), v.get_block(2))) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -823,7 +845,7 @@ def test_isub(self): v.set_block(1, np.arange(4)) v.set_block(2, np.arange(2)) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3, dtype='d')) @@ -841,8 +863,7 @@ def test_isub(self): self.assertTrue(np.allclose(np.arange(2) - 7.0, v.get_block(2))) def test_imul(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -859,7 +880,7 @@ def test_imul(self): self.assertTrue(np.allclose(np.arange(4) * np.arange(4), v.get_block(1))) self.assertTrue(np.allclose(np.arange(2) * np.arange(2), v.get_block(2))) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) @@ -867,7 +888,7 @@ def test_imul(self): v.set_block(1, np.arange(4)) v.set_block(2, np.arange(2)) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3, dtype='d')) @@ -885,8 +906,7 @@ def test_imul(self): self.assertTrue(np.allclose(np.arange(2) * 7.0, v.get_block(2))) def test_itruediv(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3) + 1.0) @@ -903,7 +923,7 @@ def test_itruediv(self): self.assertTrue(np.allclose(np.ones(4), v.get_block(1))) self.assertTrue(np.allclose(np.ones(2), v.get_block(2))) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3) + 1.0) @@ -911,7 +931,7 @@ def test_itruediv(self): v.set_block(1, np.arange(4) + 1.0) v.set_block(2, np.arange(2) + 1.0) - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3, dtype='d')) @@ -960,9 +980,7 @@ def test_le(self): self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2))) bv = BlockVector(3) - bv.set_blocks([np.ones(3) * 2, - np.ones(4) * 8, - np.ones(2) * 4]) + bv.set_blocks([np.ones(3) * 2, np.ones(4) * 8, np.ones(2) * 4]) with self.assertRaises(Exception) as context: res = v <= bv @@ -1005,8 +1023,7 @@ def test_le(self): self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2))) def test_lt(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 8) @@ -1014,7 +1031,7 @@ def test_lt(self): v.set_block(1, np.ones(4) * 2) v.set_block(2, np.ones(2) * 4) - v1 = MPIBlockVector(3, [0,1,-1], comm) + v1 = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v1.set_block(0, np.ones(3) * 2) @@ -1037,9 +1054,7 @@ def test_lt(self): self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2))) bv = BlockVector(3) - bv.set_blocks([np.ones(3) * 2, - np.ones(4) * 8, - np.ones(2) * 4]) + bv.set_blocks([np.ones(3) * 2, np.ones(4) * 8, np.ones(2) * 4]) with self.assertRaises(Exception) as context: res = v < bv @@ -1082,8 +1097,7 @@ def test_lt(self): self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2))) def test_ge(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 8) @@ -1091,7 +1105,7 @@ def test_ge(self): v.set_block(1, np.ones(4) * 2) v.set_block(2, np.ones(2) * 4) - v1 = MPIBlockVector(3, [0,1,-1], comm) + v1 = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v1.set_block(0, np.ones(3) * 2) @@ -1114,9 +1128,7 @@ def test_ge(self): self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2))) bv = BlockVector(3) - bv.set_blocks([np.ones(3) * 2, - np.ones(4) * 8, - np.ones(2) * 4]) + bv.set_blocks([np.ones(3) * 2, np.ones(4) * 8, np.ones(2) * 4]) with self.assertRaises(Exception) as context: res = v >= bv @@ -1159,8 +1171,7 @@ def test_ge(self): self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2))) def test_gt(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 8) @@ -1168,7 +1179,7 @@ def test_gt(self): v.set_block(1, np.ones(4) * 2) v.set_block(2, np.ones(2) * 4) - v1 = MPIBlockVector(3, [0,1,-1], comm) + v1 = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v1.set_block(0, np.ones(3) * 2) @@ -1191,9 +1202,7 @@ def test_gt(self): self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2))) bv = BlockVector(3) - bv.set_blocks([np.ones(3) * 2, - np.ones(4) * 8, - np.ones(2) * 4]) + bv.set_blocks([np.ones(3) * 2, np.ones(4) * 8, np.ones(2) * 4]) with self.assertRaises(Exception) as context: res = v > bv @@ -1236,8 +1245,7 @@ def test_gt(self): self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2))) def test_eq(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 8) @@ -1245,7 +1253,7 @@ def test_eq(self): v.set_block(1, np.ones(4) * 2) v.set_block(2, np.ones(2) * 4) - v1 = MPIBlockVector(3, [0,1,-1], comm) + v1 = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v1.set_block(0, np.ones(3) * 2) @@ -1268,9 +1276,7 @@ def test_eq(self): self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2))) bv = BlockVector(3) - bv.set_blocks([np.ones(3) * 2, - np.ones(4) * 8, - np.ones(2) * 4]) + bv.set_blocks([np.ones(3) * 2, np.ones(4) * 8, np.ones(2) * 4]) with self.assertRaises(Exception) as context: res = v == bv @@ -1313,8 +1319,7 @@ def test_eq(self): self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2))) def test_ne(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 8) @@ -1322,7 +1327,7 @@ def test_ne(self): v.set_block(1, np.ones(4) * 2) v.set_block(2, np.ones(2) * 4) - v1 = MPIBlockVector(3, [0,1,-1], comm) + v1 = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v1.set_block(0, np.ones(3) * 2) @@ -1345,9 +1350,7 @@ def test_ne(self): self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2))) bv = BlockVector(3) - bv.set_blocks([np.ones(3) * 2, - np.ones(4) * 8, - np.ones(2) * 4]) + bv.set_blocks([np.ones(3) * 2, np.ones(4) * 8, np.ones(2) * 4]) with self.assertRaises(Exception) as context: res = v != bv @@ -1387,8 +1390,7 @@ def test_ne(self): self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2))) def test_unary_ufuncs(self): - - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 0.5) @@ -1401,16 +1403,45 @@ def test_unary_ufuncs(self): bv.set_block(0, a) bv.set_block(1, b) - unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil, - np.floor, np.tan, np.arctan, np.arcsin, - np.arccos, np.sinh, np.cosh, np.abs, - np.tanh, np.arcsinh, np.arctanh, - np.fabs, np.sqrt, np.log, np.log2, - np.absolute, np.isfinite, np.isinf, np.isnan, - np.log1p, np.logical_not, np.exp2, np.expm1, - np.sign, np.rint, np.square, np.positive, - np.negative, np.rad2deg, np.deg2rad, - np.conjugate, np.reciprocal] + unary_funcs = [ + np.log10, + np.sin, + np.cos, + np.exp, + np.ceil, + np.floor, + np.tan, + np.arctan, + np.arcsin, + np.arccos, + np.sinh, + np.cosh, + np.abs, + np.tanh, + np.arcsinh, + np.arctanh, + np.fabs, + np.sqrt, + np.log, + np.log2, + np.absolute, + np.isfinite, + np.isinf, + np.isnan, + np.log1p, + np.logical_not, + np.exp2, + np.expm1, + np.sign, + np.rint, + np.square, + np.positive, + np.negative, + np.rad2deg, + np.deg2rad, + np.conjugate, + np.reciprocal, + ] bv2 = BlockVector(2) for fun in unary_funcs: @@ -1435,8 +1466,7 @@ def test_unary_ufuncs(self): np.cumproduct(v) def test_reduce_ufuncs(self): - - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 0.5) @@ -1452,15 +1482,14 @@ def test_reduce_ufuncs(self): self.assertAlmostEqual(fun(v), fun(bv.flatten())) def test_binary_ufuncs(self): - - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3) * 0.5) if rank == 1: v.set_block(1, np.ones(2) * 0.8) - v2 = MPIBlockVector(2, [0,1], comm) + v2 = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v2.set_block(0, np.ones(3) * 3.0) @@ -1475,13 +1504,27 @@ def test_binary_ufuncs(self): bv2.set_block(0, np.ones(3) * 3.0) bv2.set_block(1, np.ones(2) * 2.8) - binary_ufuncs = [np.add, np.multiply, np.divide, np.subtract, - np.greater, np.greater_equal, np.less, - np.less_equal, np.not_equal, - np.maximum, np.minimum, - np.fmax, np.fmin, np.equal, - np.logaddexp, np.logaddexp2, np.remainder, - np.heaviside, np.hypot] + binary_ufuncs = [ + np.add, + np.multiply, + np.divide, + np.subtract, + np.greater, + np.greater_equal, + np.less, + np.less_equal, + np.not_equal, + np.maximum, + np.minimum, + np.fmax, + np.fmin, + np.equal, + np.logaddexp, + np.logaddexp2, + np.remainder, + np.heaviside, + np.hypot, + ] for fun in binary_ufuncs: serial_res = fun(bv, bv2) @@ -1516,15 +1559,14 @@ def test_binary_ufuncs(self): for i in res.owned_blocks: self.assertTrue(np.allclose(res.get_block(i), serial_res.get_block(i))) - - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3, dtype=bool)) if rank == 1: v.set_block(1, np.ones(2, dtype=bool)) - v2 = MPIBlockVector(2, [0,1], comm) + v2 = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v2.set_block(0, np.zeros(3, dtype=bool)) @@ -1557,8 +1599,7 @@ def test_binary_ufuncs(self): res = fun(bv, v2) def test_contains(self): - - v = MPIBlockVector(2, [0,1], comm) + v = MPIBlockVector(2, [0, 1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.ones(3)) @@ -1569,8 +1610,7 @@ def test_contains(self): self.assertFalse(3 in v) def test_copyfrom(self): - - v = MPIBlockVector(3, [0,1,-1], comm) + v = MPIBlockVector(3, [0, 1, -1], comm) rank = comm.Get_rank() if rank == 0: v.set_block(0, np.arange(3)) diff --git a/pyomo/contrib/pynumero/sparse/tests/test_sparse_utils.py b/pyomo/contrib/pynumero/sparse/tests/test_sparse_utils.py deleted file mode 100644 index d864797201b..00000000000 --- a/pyomo/contrib/pynumero/sparse/tests/test_sparse_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ -import pyomo.common.unittest as unittest - -from pyomo.contrib.pynumero.dependencies import ( - numpy as np, numpy_available, scipy_available -) -if not (numpy_available and scipy_available): - raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests") - -from scipy.sparse import coo_matrix, bmat - -from pyomo.contrib.pynumero.sparse.utils import is_symmetric_dense, is_symmetric_sparse - -class TestSparseUtils(unittest.TestCase): - - def setUp(self): - - row = np.array([0, 1, 4, 1, 2, 7, 2, 3, 5, 3, 4, 5, 4, 7, 5, 6, 6, 7]) - col = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 7]) - data = np.array([27, 5, 12, 56, 66, 34, 94, 31, 41, 7, 98, 72, 24, 33, 78, 47, 98, 41]) - - off_diagonal_mask = row != col - new_row = np.concatenate([row, col[off_diagonal_mask]]) - new_col = np.concatenate([col, row[off_diagonal_mask]]) - new_data = np.concatenate([data, data[off_diagonal_mask]]) - m = coo_matrix((new_data, (new_row, new_col)), shape=(8, 8)) - - self.block00 = m - - row = np.array([0, 3, 1, 0]) - col = np.array([0, 3, 1, 2]) - data = np.array([4, 5, 7, 9]) - m = coo_matrix((data, (row, col)), shape=(4, 8)) - - self.block10 = m - - row = np.array([0, 1, 2, 3]) - col = np.array([0, 1, 2, 3]) - data = np.array([1, 1, 1, 1]) - m = coo_matrix((data, (row, col)), shape=(4, 4)) - - self.block11 = m - - def test_is_symmetric_dense(self): - - m = self.block00.toarray() - self.assertTrue(is_symmetric_dense(m)) - self.assertTrue(is_symmetric_dense(2)) - with self.assertRaises(Exception) as context: - self.assertTrue(is_symmetric_dense(self.block00)) - - def test_is_symmetric_sparse(self): - m = self.block00 - self.assertTrue(is_symmetric_sparse(m)) - m = self.block00.toarray() - self.assertTrue(is_symmetric_sparse(m)) - m = self.block11 - self.assertTrue(is_symmetric_sparse(m)) - m = self.block10 - self.assertFalse(is_symmetric_sparse(m)) - self.assertTrue(is_symmetric_sparse(2)) - - row = np.array([0, 1, 2, 3]) - col = np.array([0, 1, 2, 3]) - data = np.array([1, 1, 1, 1]) - m = coo_matrix((data, (row, col)), shape=(4, 6)) - self.assertFalse(is_symmetric_sparse(m)) - - with self.assertRaises(Exception) as context: - self.assertTrue(is_symmetric_sparse(range(5))) diff --git a/pyomo/contrib/pynumero/sparse/utils.py b/pyomo/contrib/pynumero/sparse/utils.py deleted file mode 100644 index b4fffc4ab8a..00000000000 --- a/pyomo/contrib/pynumero/sparse/utils.py +++ /dev/null @@ -1,55 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ -from scipy.sparse.sputils import isscalarlike -from scipy.sparse import tril, triu, isspmatrix - -import numpy as np - - -def is_symmetric_dense(mat): - - flag = False - if isinstance(mat, np.ndarray): - if mat.ndim == 2 and mat.shape[0] == mat.shape[1]: - if np.allclose(mat, mat.T, atol=1e-6): - flag = True - elif isscalarlike(mat): - flag = True - else: - raise RuntimeError("Format not recognized {}".format(type(mat))) - return flag - - -def is_symmetric_sparse(mat): - from pyomo.contrib.pynumero.sparse.block_matrix import BlockMatrix - # Note: this check is expensive - flag = False - if isinstance(mat, np.ndarray): - flag = is_symmetric_dense(mat) - elif isscalarlike(mat): - flag = True - elif isspmatrix(mat) or isinstance(mat, BlockMatrix): - if mat.shape[0] != mat.shape[1]: - flag = False - else: - if isinstance(mat, BlockMatrix): - mat = mat.tocoo() - # get upper and lower triangular - l = tril(mat) - u = triu(mat) - diff = l - u.transpose() - z = np.zeros(diff.nnz) - flag = np.allclose(diff.data, z, atol=1e-6) - else: - raise RuntimeError("Format not recognized {}".format(type(mat))) - return flag - - diff --git a/pyomo/contrib/pynumero/src/AmplInterface.cpp b/pyomo/contrib/pynumero/src/AmplInterface.cpp index 293729d58dc..26053a9611b 100644 --- a/pyomo/contrib/pynumero/src/AmplInterface.cpp +++ b/pyomo/contrib/pynumero/src/AmplInterface.cpp @@ -450,7 +450,7 @@ AmplInterfaceStr::AmplInterfaceStr(char* nl, size_t size) nl_size(size) {} -// THIS METHOD IS DIABLED FOR NOW +// THIS METHOD IS DISABLED FOR NOW FILE* AmplInterfaceStr::open_nl(ASL_pfgh *asl, char* stub) { // Ignore the stub and use the cached NL file content diff --git a/pyomo/contrib/pynumero/src/AmplInterface.hpp b/pyomo/contrib/pynumero/src/AmplInterface.hpp index a63d730ae9f..259cf88d895 100644 --- a/pyomo/contrib/pynumero/src/AmplInterface.hpp +++ b/pyomo/contrib/pynumero/src/AmplInterface.hpp @@ -100,7 +100,7 @@ class PYNUMERO_ASL_EXPORT AmplInterface { // write the solution to the .sol file // pass in the ampl_solve_status_num (this is the "solve_status_num" from - // the AMPL documentation. It should be interpretted as follows: + // the AMPL documentation. It should be interpreted as follows: // // number string interpretation // 0 - 99 solved optimal solution found diff --git a/pyomo/contrib/pyros/CHANGELOG.txt b/pyomo/contrib/pyros/CHANGELOG.txt index ce656fc6ab3..b1866ed955c 100644 --- a/pyomo/contrib/pyros/CHANGELOG.txt +++ b/pyomo/contrib/pyros/CHANGELOG.txt @@ -2,6 +2,56 @@ PyROS CHANGELOG =============== +------------------------------------------------------------------------------- +PyROS 1.2.7 26 Apr 2023 +------------------------------------------------------------------------------- +- Refurbish separation problem routines and data structures +- Ensure implementation of separation scenario selection heuristic + is consistent with original intent +- Fix local and global separation solve time accumulators +- Prefer TicTocTimer for solve time accumulators + + +------------------------------------------------------------------------------- +PyROS 1.2.6 07 Dec 2022 +------------------------------------------------------------------------------- +- Add more judicious enforcement of PyROS time limit. +- Update PyROS solver and webpage docs + + +------------------------------------------------------------------------------- +PyROS 1.2.5 06 Dec 2022 +------------------------------------------------------------------------------- +- Add `config` argument to `UncertaintySet.bounding_model` + + +------------------------------------------------------------------------------- +PyROS 1.2.4 22 Nov 2022 +------------------------------------------------------------------------------- +- Add origin attribute to BudgetSet. + + +------------------------------------------------------------------------------- +PyROS 1.2.3 22 Nov 2022 +------------------------------------------------------------------------------- +- Generalize FactorModelSet. +- Resolve issues with FactorModelSet parameter bounds. +- Modularize construction of uncertainty set bounding problems. + + +------------------------------------------------------------------------------- +PyROS 1.2.2 09 Nov 2022 +------------------------------------------------------------------------------- +- Rewrite PyROS `UncertaintySet` module, class, and attribute docstrings + and make attribute validation more rigorous. + + +------------------------------------------------------------------------------- +PyROS 1.2.1 05 Oct 2022 +------------------------------------------------------------------------------- +- Fix subordinate optimizer SolverResults solve time access routines. + + ------------------------------------------------------------------------------- PyROS 1.2.0 09 Sep 2022 ------------------------------------------------------------------------------- diff --git a/pyomo/contrib/pyros/__init__.py b/pyomo/contrib/pyros/__init__.py index 09f6ae0e84d..aeb92eb13fd 100644 --- a/pyomo/contrib/pyros/__init__.py +++ b/pyomo/contrib/pyros/__init__.py @@ -1,13 +1,14 @@ from pyomo.contrib.pyros.pyros import PyROS from pyomo.contrib.pyros.pyros import ObjectiveType, pyrosTerminationCondition -from pyomo.contrib.pyros.uncertainty_sets import (UncertaintySet, - EllipsoidalSet, - PolyhedralSet, - CardinalitySet, - BudgetSet, - DiscreteScenarioSet, - FactorModelSet, - BoxSet, - IntersectionSet, - AxisAlignedEllipsoidalSet) - +from pyomo.contrib.pyros.uncertainty_sets import ( + UncertaintySet, + EllipsoidalSet, + PolyhedralSet, + CardinalitySet, + BudgetSet, + DiscreteScenarioSet, + FactorModelSet, + BoxSet, + IntersectionSet, + AxisAlignedEllipsoidalSet, +) diff --git a/pyomo/contrib/pyros/master_problem_methods.py b/pyomo/contrib/pyros/master_problem_methods.py index 82ede749cd1..a0e2245cab1 100644 --- a/pyomo/contrib/pyros/master_problem_methods.py +++ b/pyomo/contrib/pyros/master_problem_methods.py @@ -1,20 +1,30 @@ """ Functions for handling the construction and solving of the GRCS master problem via ROSolver """ -from pyomo.core.base import (ConcreteModel, Block, - Var, - Objective, Constraint, - ConstraintList, SortComponents) +from pyomo.core.base import ( + ConcreteModel, + Block, + Var, + Objective, + Constraint, + ConstraintList, + SortComponents, +) from pyomo.opt import TerminationCondition as tc +from pyomo.opt import SolverResults from pyomo.core.expr import value from pyomo.core.base.set_types import NonNegativeIntegers, NonNegativeReals -from pyomo.contrib.pyros.util import (selective_clone, - ObjectiveType, - pyrosTerminationCondition, - process_termination_condition_master_problem, - output_logger) -from pyomo.contrib.pyros.solve_data import (MasterProblemData, - MasterResult) +from pyomo.contrib.pyros.util import ( + selective_clone, + ObjectiveType, + pyrosTerminationCondition, + process_termination_condition_master_problem, + adjust_solver_time_settings, + revert_solver_max_time_adjustment, + get_main_elapsed_time, + output_logger, +) +from pyomo.contrib.pyros.solve_data import MasterProblemData, MasterResult from pyomo.opt.results import check_optimal_termination from pyomo.core.expr.visitor import replace_expressions, identify_variables from pyomo.common.collections import ComponentMap, ComponentSet @@ -26,6 +36,9 @@ from pyomo.common.errors import ApplicationError from pyomo.common.modeling import unique_component_name +from pyomo.common.timing import TicTocTimer +from pyomo.contrib.pyros.util import TIC_TOC_SOLVE_TIME_ATTR + def initial_construct_master(model_data): """ @@ -63,20 +76,9 @@ def get_state_vars(model, iterations): """ iter_state_var_map = dict() for itn in iterations: - fsv_set = ComponentSet( - model.scenarios[itn, 0].util.first_stage_variables) - state_vars = list() - for blk in model.scenarios[itn, :]: - ssv_set = ComponentSet(blk.util.second_stage_variables) - state_vars.extend( - v for v in blk.component_data_objects( - Var, - active=True, - descend_into=True, - sort=SortComponents.deterministic, # guarantee order - ) - if v not in fsv_set and v not in ssv_set - ) + state_vars = [ + var for blk in model.scenarios[itn, :] for var in blk.util.state_vars + ] iter_state_var_map[itn] = state_vars return iter_state_var_map @@ -105,17 +107,16 @@ def construct_master_feasibility_problem(model_data, config): # obtain mapping from master problem to master feasibility # problem variables - varmap_name = unique_component_name( + varmap_name = unique_component_name(model_data.master_model, 'pyros_var_map') + setattr( model_data.master_model, - 'pyros_var_map', + varmap_name, + list(model_data.master_model.component_data_objects(Var)), ) - setattr(model_data.master_model, varmap_name, - list(model_data.master_model.component_data_objects(Var))) model = model_data.master_model.clone() - model_data.feasibility_problem_varmap = list(zip( - getattr(model_data.master_model, varmap_name), - getattr(model, varmap_name) - )) + model_data.feasibility_problem_varmap = list( + zip(getattr(model_data.master_model, varmap_name), getattr(model, varmap_name)) + ) delattr(model_data.master_model, varmap_name) delattr(model, varmap_name) @@ -132,8 +133,7 @@ def construct_master_feasibility_problem(model_data, config): ssv_set = ComponentSet(blk.util.second_stage_variables) # get second-stage var in DR eqn. should only be one var - ssv_in_dr_eq = [var for var in vars_in_dr_eq - if var in ssv_set][0] + ssv_in_dr_eq = [var for var in vars_in_dr_eq if var in ssv_set][0] # update var value for initialization # fine since DR eqns are f(d) - z == 0 (not z - f(d) == 0) @@ -142,8 +142,8 @@ def construct_master_feasibility_problem(model_data, config): # initialize state vars to previous master solution values if iteration != 0: - stvar_map = get_state_vars(model, [iteration, iteration-1]) - for current, prev in zip(stvar_map[iteration], stvar_map[iteration-1]): + stvar_map = get_state_vars(model, [iteration, iteration - 1]) + for current, prev in zip(stvar_map[iteration], stvar_map[iteration - 1]): current.set_value(value(prev)) # constraints to which slacks should be added @@ -155,24 +155,25 @@ def construct_master_feasibility_problem(model_data, config): else: dr_eqs = list() - targets.extend([ - con for con in blk.component_data_objects( - Constraint, active=True, descend_into=True) - if con not in dr_eqs]) + targets.extend( + [ + con + for con in blk.component_data_objects( + Constraint, active=True, descend_into=True + ) + if con not in dr_eqs + ] + ) # retain original constraint exprs (for slack initialization and scaling) - pre_slack_con_exprs = ComponentMap( - (con, con.body - con.upper) for con in targets - ) + pre_slack_con_exprs = ComponentMap((con, con.body - con.upper) for con in targets) # add slack variables and objective # inequalities g(v) <= b become g(v) -- s^-<= b # equalities h(v) == b become h(v) -- s^- + s^+ == b - TransformationFactory("core.add_slack_variables").apply_to(model, - targets=targets) + TransformationFactory("core.add_slack_variables").apply_to(model, targets=targets) slack_vars = ComponentSet( - model._core_add_slack_variables.component_data_objects( - Var, descend_into=True) + model._core_add_slack_variables.component_data_objects(Var, descend_into=True) ) # initialize and scale slack variables @@ -200,13 +201,15 @@ def construct_master_feasibility_problem(model_data, config): slack_var.set_value(con_slack) # update expression replacement map - slack_substitution_map[id(slack_var)] = (scaling_coeff * slack_var) + slack_substitution_map[id(slack_var)] = scaling_coeff * slack_var # finally, scale slack(s) con.set_value( - (replace_expressions(con.lower, slack_substitution_map), - replace_expressions(con.body, slack_substitution_map), - replace_expressions(con.upper, slack_substitution_map),) + ( + replace_expressions(con.lower, slack_substitution_map), + replace_expressions(con.body, slack_substitution_map), + replace_expressions(con.upper, slack_substitution_map), + ) ) return model @@ -238,6 +241,11 @@ def solve_master_feasibility_problem(model_data, config): else: solver = config.local_solver + timer = TicTocTimer() + orig_setting, custom_setting_present = adjust_solver_time_settings( + model_data.timing, solver, config + ) + timer.tic(msg=None) try: results = solver.solve(model, tee=config.tee, load_solutions=False) except ApplicationError: @@ -250,9 +258,18 @@ def solve_master_feasibility_problem(model_data, config): f"{model_data.iteration}" ) raise + else: + setattr(results.solver, TIC_TOC_SOLVE_TIME_ATTR, timer.toc(msg=None)) + finally: + revert_solver_max_time_adjustment( + solver, orig_setting, custom_setting_present, config + ) feasible_terminations = { - tc.optimal, tc.locallyOptimal, tc.globallyOptimal, tc.feasible + tc.optimal, + tc.locallyOptimal, + tc.globallyOptimal, + tc.feasible, } if results.solver.termination_condition in feasible_terminations: model.solutions.load_from(results) @@ -282,7 +299,7 @@ def minimize_dr_vars(model_data, config): results : SolverResults Subordinate solver results for the polishing problem. """ - #config.progress_logger.info("Executing decision rule variable polishing solve.") + # config.progress_logger.info("Executing decision rule variable polishing solve.") model = model_data.master_model polishing_model = model.clone() @@ -294,24 +311,31 @@ def minimize_dr_vars(model_data, config): polishing_model.tau_vars = [] # ========== for idx in range(len(decision_rule_vars)): - polishing_model.scenarios[0,0].add_component( - "polishing_var_" + str(idx), - Var(index_set, initialize=1e6, domain=NonNegativeReals)) + polishing_model.scenarios[0, 0].add_component( + "polishing_var_" + str(idx), + Var(index_set, initialize=1e6, domain=NonNegativeReals), + ) polishing_model.tau_vars.append( - getattr(polishing_model.scenarios[0,0], "polishing_var_" + str(idx)) + getattr(polishing_model.scenarios[0, 0], "polishing_var_" + str(idx)) ) # ========== this_iter = polishing_model.scenarios[max(polishing_model.scenarios.keys())[0], 0] nom_block = polishing_model.scenarios[0, 0] if config.objective_focus == ObjectiveType.nominal: - obj_val = value(this_iter.second_stage_objective + this_iter.first_stage_objective) - polishing_model.scenarios[0,0].polishing_constraint = \ - Constraint(expr=obj_val >= nom_block.second_stage_objective + nom_block.first_stage_objective) + obj_val = value( + this_iter.second_stage_objective + this_iter.first_stage_objective + ) + polishing_model.scenarios[0, 0].polishing_constraint = Constraint( + expr=obj_val + >= nom_block.second_stage_objective + nom_block.first_stage_objective + ) elif config.objective_focus == ObjectiveType.worst_case: - polishing_model.zeta.fix() # Searching equivalent optimal solutions given optimal zeta + polishing_model.zeta.fix() # Searching equivalent optimal solutions given optimal zeta # === Make absolute value constraints on polishing_vars - polishing_model.scenarios[0,0].util.absolute_var_constraints = cons = ConstraintList() + polishing_model.scenarios[ + 0, 0 + ].util.absolute_var_constraints = cons = ConstraintList() uncertain_params = nom_block.util.uncertain_params if config.decision_rule_order == 1: for i, tau in enumerate(polishing_model.tau_vars): @@ -320,8 +344,16 @@ def minimize_dr_vars(model_data, config): cons.add(-tau[j] <= this_iter.util.decision_rule_vars[i][j]) cons.add(this_iter.util.decision_rule_vars[i][j] <= tau[j]) else: - cons.add(-tau[j] <= this_iter.util.decision_rule_vars[i][j] * uncertain_params[j - 1]) - cons.add(this_iter.util.decision_rule_vars[i][j] * uncertain_params[j - 1] <= tau[j]) + cons.add( + -tau[j] + <= this_iter.util.decision_rule_vars[i][j] + * uncertain_params[j - 1] + ) + cons.add( + this_iter.util.decision_rule_vars[i][j] + * uncertain_params[j - 1] + <= tau[j] + ) elif config.decision_rule_order == 2: l = list(range(len(uncertain_params))) index_pairs = list(it.combinations(l, 2)) @@ -335,27 +367,67 @@ def minimize_dr_vars(model_data, config): elif r <= len(uncertain_params) and r > 0: cons.add(-tau[r] <= Z[r] * uncertain_params[r - 1]) cons.add(Z[r] * uncertain_params[r - 1] <= tau[r]) - elif r <= len(indices) - len(uncertain_params) - 1 and r > len(uncertain_params): - cons.add(-tau[r] <= Z[r] * uncertain_params[index_pairs[r - len(uncertain_params) - 1][0]] * uncertain_params[ - index_pairs[r - len(uncertain_params) - 1][1]]) - cons.add(Z[r] * uncertain_params[index_pairs[r - len(uncertain_params) - 1][0]] * - uncertain_params[index_pairs[r - len(uncertain_params) - 1][1]] <= tau[r]) + elif r <= len(indices) - len(uncertain_params) - 1 and r > len( + uncertain_params + ): + cons.add( + -tau[r] + <= Z[r] + * uncertain_params[ + index_pairs[r - len(uncertain_params) - 1][0] + ] + * uncertain_params[ + index_pairs[r - len(uncertain_params) - 1][1] + ] + ) + cons.add( + Z[r] + * uncertain_params[ + index_pairs[r - len(uncertain_params) - 1][0] + ] + * uncertain_params[ + index_pairs[r - len(uncertain_params) - 1][1] + ] + <= tau[r] + ) elif r > len(indices) - len(uncertain_params) - 1: - cons.add(-tau[r] <= Z[r] * uncertain_params[r - len(index_pairs) - len(uncertain_params) - 1] ** 2) - cons.add(Z[r] * uncertain_params[r - len(index_pairs) - len(uncertain_params) - 1] ** 2 <= tau[r]) + cons.add( + -tau[r] + <= Z[r] + * uncertain_params[ + r - len(index_pairs) - len(uncertain_params) - 1 + ] + ** 2 + ) + cons.add( + Z[r] + * uncertain_params[ + r - len(index_pairs) - len(uncertain_params) - 1 + ] + ** 2 + <= tau[r] + ) else: - raise NotImplementedError("Decision rule variable polishing has not been generalized to decision_rule_order " - + str(config.decision_rule_order) + ".") + raise NotImplementedError( + "Decision rule variable polishing has not been generalized to decision_rule_order " + + str(config.decision_rule_order) + + "." + ) - polishing_model.scenarios[0,0].polishing_obj = \ - Objective(expr=sum(sum(tau[j] for j in tau.index_set()) for tau in polishing_model.tau_vars)) + polishing_model.scenarios[0, 0].polishing_obj = Objective( + expr=sum( + sum(tau[j] for j in tau.index_set()) for tau in polishing_model.tau_vars + ) + ) # === Fix design for d in first_stage_variables: d.fix() # === Unfix DR vars - num_dr_vars = len(model.scenarios[0, 0].util.decision_rule_vars[0]) # there is at least one dr var + num_dr_vars = len( + model.scenarios[0, 0].util.decision_rule_vars[0] + ) # there is at least one dr var num_uncertain_params = len(config.uncertain_params) if model.const_efficiency_applied: @@ -396,12 +468,13 @@ def minimize_dr_vars(model_data, config): solver = config.local_solver # === Solve the polishing model + timer = TicTocTimer() + orig_setting, custom_setting_present = adjust_solver_time_settings( + model_data.timing, solver, config + ) + timer.tic(msg=None) try: - results = solver.solve( - polishing_model, - tee=config.tee, - load_solutions=False, - ) + results = solver.solve(polishing_model, tee=config.tee, load_solutions=False) except ApplicationError: config.progress_logger.error( f"Optimizer {repr(solver)} encountered an exception " @@ -409,11 +482,15 @@ def minimize_dr_vars(model_data, config): f"in iteration {model_data.iteration}" ) raise + else: + setattr(results.solver, TIC_TOC_SOLVE_TIME_ATTR, timer.toc(msg=None)) + finally: + revert_solver_max_time_adjustment( + solver, orig_setting, custom_setting_present, config + ) # === Process solution by termination condition - acceptable = { - tc.globallyOptimal, tc.optimal, tc.locallyOptimal, tc.feasible, - } + acceptable = {tc.globallyOptimal, tc.optimal, tc.locallyOptimal, tc.feasible} if results.solver.termination_condition not in acceptable: # continue with "unpolished" master model solution return results @@ -427,10 +504,8 @@ def minimize_dr_vars(model_data, config): polishing_model.scenarios[idx].util.second_stage_variables, ) sv_zip = zip( - get_state_vars(model_data.master_model, [idx[0]])[idx[0]], - get_state_vars(polishing_model, [idx[0]])[idx[0]], + blk.util.state_vars, polishing_model.scenarios[idx].util.state_vars ) - for master_ssv, polish_ssv in ssv_zip: master_ssv.set_value(value(polish_ssv)) for master_sv, polish_sv in sv_zip: @@ -457,13 +532,15 @@ def add_p_robust_constraint(model_data, config): rho = config.p_robustness['rho'] model = model_data.master_model block_0 = model.scenarios[0, 0] - frac_nom_cost = (1 + rho) * (block_0.first_stage_objective + - block_0.second_stage_objective) + frac_nom_cost = (1 + rho) * ( + block_0.first_stage_objective + block_0.second_stage_objective + ) for block_k in model.scenarios[model_data.iteration, :]: model.p_robust_constraints.add( block_k.first_stage_objective + block_k.second_stage_objective - <= frac_nom_cost) + <= frac_nom_cost + ) return @@ -476,8 +553,10 @@ def add_scenario_to_master(model_data, violations): i = max(m.scenarios.keys())[0] + 1 # === Add a block to master for each violation - idx = 0 # Only supporting adding single violation back to master in v1 - new_block = selective_clone(m.scenarios[0, 0], m.scenarios[0, 0].util.first_stage_variables) + idx = 0 # Only supporting adding single violation back to master in v1 + new_block = selective_clone( + m.scenarios[0, 0], m.scenarios[0, 0].util.first_stage_variables + ) m.scenarios[i, idx].transfer_attributes_from(new_block) # === Set uncertain params in new block(s) to correct value(s) @@ -497,7 +576,9 @@ def higher_order_decision_rule_efficiency(config, model_data): # Ensure all are unfixed unless next conditions are met... for dr_var in nlp_model.scenarios[0, 0].util.decision_rule_vars: dr_var.unfix() - num_dr_vars = len(nlp_model.scenarios[0, 0].util.decision_rule_vars[0]) # there is at least one dr var + num_dr_vars = len( + nlp_model.scenarios[0, 0].util.decision_rule_vars[0] + ) # there is at least one dr var num_uncertain_params = len(config.uncertain_params) nlp_model.const_efficiency_applied = False nlp_model.linear_efficiency_applied = False @@ -506,7 +587,10 @@ def higher_order_decision_rule_efficiency(config, model_data): for dr_var in nlp_model.scenarios[0, 0].util.decision_rule_vars: for i in range(1, num_dr_vars): dr_var[i].fix(0) - elif model_data.iteration <= num_uncertain_params and config.decision_rule_order > 1: + elif ( + model_data.iteration <= num_uncertain_params + and config.decision_rule_order > 1 + ): # Only applied in DR order > 1 case for dr_var in nlp_model.scenarios[0, 0].util.decision_rule_vars: for i in range(num_uncertain_params + 1, num_dr_vars): @@ -551,7 +635,12 @@ def solver_call_master(model_data, config, solver, solve_data): higher_order_decision_rule_efficiency(config, model_data) + timer = TicTocTimer() for opt in backup_solvers: + orig_setting, custom_setting_present = adjust_solver_time_settings( + model_data.timing, opt, config + ) + timer.tic(msg=None) try: results = opt.solve( nlp_model, @@ -568,6 +657,12 @@ def solver_call_master(model_data, config, solver, solve_data): f"optimize master problem in iteration {model_data.iteration}" ) raise + else: + setattr(results.solver, TIC_TOC_SOLVE_TIME_ATTR, timer.toc(msg=None)) + finally: + revert_solver_max_time_adjustment( + solver, orig_setting, custom_setting_present, config + ) optimal_termination = check_optimal_termination(results) infeasible = results.solver.termination_condition == tc.infeasible @@ -582,12 +677,12 @@ def solver_call_master(model_data, config, solver, solve_data): solver_term_cond_dict[str(opt)] = str(results.solver.termination_condition) master_soln.termination_condition = results.solver.termination_condition master_soln.pyros_termination_condition = None - try_backup, _ = master_soln.master_subsolver_results = ( - process_termination_condition_master_problem( - config=config, - results=results, - ) - ) + ( + try_backup, + _, + ) = ( + master_soln.master_subsolver_results + ) = process_termination_condition_master_problem(config=config, results=results) master_soln.nominal_block = nlp_model.scenarios[0, 0] master_soln.results = results @@ -597,15 +692,12 @@ def solver_call_master(model_data, config, solver, solve_data): # (nominal block DOF variable and objective values) if not try_backup and not infeasible: master_soln.fsv_vals = list( - v.value - for v in nlp_model.scenarios[0, 0].util.first_stage_variables + v.value for v in nlp_model.scenarios[0, 0].util.first_stage_variables ) - if config.objective_focus is ObjectiveType.nominal: master_soln.ssv_vals = list( v.value - for v - in nlp_model.scenarios[0, 0].util.second_stage_variables + for v in nlp_model.scenarios[0, 0].util.second_stage_variables ) master_soln.second_stage_objective = value( nlp_model.scenarios[0, 0].second_stage_objective @@ -614,8 +706,7 @@ def solver_call_master(model_data, config, solver, solve_data): idx = max(nlp_model.scenarios.keys())[0] master_soln.ssv_vals = list( v.value - for v - in nlp_model.scenarios[idx, 0].util.second_stage_variables + for v in nlp_model.scenarios[idx, 0].util.second_stage_variables ) master_soln.second_stage_objective = value( nlp_model.scenarios[idx, 0].second_stage_objective @@ -628,6 +719,20 @@ def solver_call_master(model_data, config, solver, solve_data): master_soln.results = results master_soln.master_model = nlp_model + # if PyROS time limit exceeded, exit loop and return solution + elapsed = get_main_elapsed_time(model_data.timing) + if config.time_limit: + if elapsed >= config.time_limit: + try_backup = False + master_soln.master_subsolver_results = ( + None, + pyrosTerminationCondition.time_out, + ) + master_soln.pyros_termination_condition = ( + pyrosTerminationCondition.time_out + ) + output_logger(config=config, time_out=True, elapsed=elapsed) + if not try_backup: return master_soln @@ -647,7 +752,7 @@ def solver_call_master(model_data, config, solver, solve_data): + "_master_" + str(model_data.iteration) + ".bar" - ) + ), ) nlp_model.write(name, io_options={'symbolic_solver_labels': True}) output_logger( @@ -672,7 +777,37 @@ def solve_master(model_data, config): results = solve_master_feasibility_problem(model_data, config) master_soln.feasibility_problem_results = results - solver = config.global_solver if config.solve_master_globally else config.local_solver + # if pyros time limit reached, load time out status + # to master results and return to caller + elapsed = get_main_elapsed_time(model_data.timing) + if config.time_limit: + if elapsed >= config.time_limit: + # load master model + master_soln.master_model = model_data.master_model + master_soln.nominal_block = model_data.master_model.scenarios[0, 0] + + # empty results object, with master solve time of zero + master_soln.results = SolverResults() + setattr(master_soln.results.solver, TIC_TOC_SOLVE_TIME_ATTR, 0) + + # PyROS time out status + master_soln.pyros_termination_condition = ( + pyrosTerminationCondition.time_out + ) + master_soln.master_subsolver_results = ( + None, + pyrosTerminationCondition.time_out, + ) + + # log time out message + output_logger(config=config, time_out=True, elapsed=elapsed) + + return master_soln - return solver_call_master(model_data=model_data, config=config, solver=solver, - solve_data=master_soln) + solver = ( + config.global_solver if config.solve_master_globally else config.local_solver + ) + + return solver_call_master( + model_data=model_data, config=config, solver=solver, solve_data=master_soln + ) diff --git a/pyomo/contrib/pyros/pyros.py b/pyomo/contrib/pyros/pyros.py index df62997a15b..34db54b64e6 100644 --- a/pyomo/contrib/pyros/pyros.py +++ b/pyomo/contrib/pyros/pyros.py @@ -11,42 +11,41 @@ # pyros.py: Generalized Robust Cutting-Set Algorithm for Pyomo import logging +from textwrap import indent, dedent, wrap from pyomo.common.collections import Bunch, ComponentSet -from pyomo.common.config import ( - ConfigDict, ConfigValue, In, NonNegativeFloat, add_docstring_list -) +from pyomo.common.config import ConfigDict, ConfigValue, In, NonNegativeFloat from pyomo.core.base.block import Block from pyomo.core.expr import value from pyomo.core.base.var import Var, _VarData from pyomo.core.base.param import Param, _ParamData from pyomo.core.base.objective import Objective, maximize -from pyomo.contrib.pyros.util import (a_logger, - time_code, - get_main_elapsed_time) +from pyomo.contrib.pyros.util import a_logger, time_code, get_main_elapsed_time from pyomo.common.modeling import unique_component_name from pyomo.opt import SolverFactory -from pyomo.contrib.pyros.util import (model_is_valid, - recast_to_min_obj, - add_decision_rule_constraints, - add_decision_rule_variables, - load_final_solution, - pyrosTerminationCondition, - ValidEnum, - ObjectiveType, - validate_uncertainty_set, - identify_objective_functions, - validate_kwarg_inputs, - transform_to_standard_form, - turn_bounds_to_constraints, - replace_uncertain_bounds_with_constraints, - output_logger) +from pyomo.contrib.pyros.util import ( + model_is_valid, + recast_to_min_obj, + add_decision_rule_constraints, + add_decision_rule_variables, + load_final_solution, + pyrosTerminationCondition, + ValidEnum, + ObjectiveType, + validate_uncertainty_set, + identify_objective_functions, + validate_kwarg_inputs, + transform_to_standard_form, + turn_bounds_to_constraints, + replace_uncertain_bounds_with_constraints, + output_logger, +) from pyomo.contrib.pyros.solve_data import ROSolveResults from pyomo.contrib.pyros.pyros_algorithm_methods import ROSolver_iterative_solve from pyomo.contrib.pyros.uncertainty_sets import uncertainty_sets from pyomo.core.base import Constraint -__version__ = "1.2.0" +__version__ = "1.2.7" def NonNegIntOrMinusOne(obj): @@ -57,10 +56,10 @@ def NonNegIntOrMinusOne(obj): ''' ans = int(obj) if ans != float(obj) or (ans < 0 and ans != -1): - raise ValueError( - "Expected non-negative int, but received %s" % (obj,)) + raise ValueError("Expected non-negative int, but received %s" % (obj,)) return ans + def PositiveIntOrMinusOne(obj): ''' if obj is a positive int, return the int @@ -69,17 +68,15 @@ def PositiveIntOrMinusOne(obj): ''' ans = int(obj) if ans != float(obj) or (ans <= 0 and ans != -1): - raise ValueError( - "Expected positive int, but received %s" % (obj,)) + raise ValueError("Expected positive int, but received %s" % (obj,)) return ans class SolverResolvable(object): - def __call__(self, obj): ''' if obj is a string, return the Solver object for that solver name - if obj is a Solver object, return the Solver + if obj is a Solver object, return a copy of the Solver if obj is a list, and each element of list is solver resolvable, return list of solvers ''' if isinstance(obj, str): @@ -89,8 +86,11 @@ def __call__(self, obj): elif isinstance(obj, list): return [self(o) for o in obj] else: - raise ValueError("Expected a Pyomo solver or string object, " - "instead recieved {1}".format(obj.__class__.__name__)) + raise ValueError( + "Expected a Pyomo solver or string object, " + "instead received {1}".format(obj.__class__.__name__) + ) + class InputDataStandardizer(object): def __init__(self, ctype, cdatatype): @@ -109,147 +109,531 @@ def __call__(self, obj): assert isinstance(_, self.cdatatype) return ans + +class PyROSConfigValue(ConfigValue): + """ + Subclass of ``common.collections.ConfigValue``, + with a few attributes added to facilitate documentation + of the PyROS solver. + An instance of this class is used for storing and + documenting an argument to the PyROS solver. + + Attributes + ---------- + is_optional : bool + Argument is optional. + document_default : bool, optional + Document the default value of the argument + in any docstring generated from this instance, + or a `ConfigDict` object containing this instance. + dtype_spec_str : None or str, optional + String documenting valid types for this argument. + If `None` is provided, then this string is automatically + determined based on the `domain` argument to the + constructor. + + NOTES + ----- + Cleaner way to access protected attributes + (particularly _doc, _description) inherited from ConfigValue? + + """ + + def __init__( + self, + default=None, + domain=None, + description=None, + doc=None, + visibility=0, + is_optional=True, + document_default=True, + dtype_spec_str=None, + ): + """Initialize self (see class docstring).""" + + # initialize base class attributes + super(self.__class__, self).__init__( + default=default, + domain=domain, + description=description, + doc=doc, + visibility=visibility, + ) + + self.is_optional = is_optional + self.document_default = document_default + + if dtype_spec_str is None: + self.dtype_spec_str = self.domain_name() + # except AttributeError: + # self.dtype_spec_str = repr(self._domain) + else: + self.dtype_spec_str = dtype_spec_str + + def pyros_config(): CONFIG = ConfigDict('PyROS') # ================================================ # === Options common to all solvers # ================================================ - CONFIG.declare('time_limit', ConfigValue( - default=None, - domain=NonNegativeFloat, description="Optional. Default = None. " - "Total allotted time for the execution of the PyROS solver in seconds " - "(includes time spent in sub-solvers). 'None' is no time limit." - )) - CONFIG.declare('keepfiles', ConfigValue( - default=False, - domain=bool, description="Optional. Default = False. Whether or not to write files of sub-problems for use in debugging. " - "Must be paired with a writable directory supplied via ``subproblem_file_directory``." - )) - CONFIG.declare('tee', ConfigValue( - default=False, - domain=bool, description="Optional. Default = False. Sets the ``tee`` for all sub-solvers utilized." - )) - CONFIG.declare('load_solution', ConfigValue( - default=True, - domain=bool, description="Optional. Default = True. " - "Whether or not to load the final solution of PyROS into the model object." - )) + CONFIG.declare( + 'time_limit', + PyROSConfigValue( + default=None, + domain=NonNegativeFloat, + doc=( + """ + Wall time limit for the execution of the PyROS solver + in seconds (including time spent by subsolvers). + If `None` is provided, then no time limit is enforced. + """ + ), + is_optional=True, + document_default=False, + dtype_spec_str="None or NonNegativeFloat", + ), + ) + CONFIG.declare( + 'keepfiles', + PyROSConfigValue( + default=False, + domain=bool, + description=( + """ + Export subproblems with a non-acceptable termination status + for debugging purposes. + If True is provided, then the argument `subproblem_file_directory` + must also be specified. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + 'tee', + PyROSConfigValue( + default=False, + domain=bool, + description="Output subordinate solver logs for all subproblems.", + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + 'load_solution', + PyROSConfigValue( + default=True, + domain=bool, + description=( + """ + Load final solution(s) found by PyROS to the deterministic model + provided. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) # ================================================ # === Required User Inputs # ================================================ - CONFIG.declare("first_stage_variables", ConfigValue( - default=[], domain=InputDataStandardizer(Var, _VarData), - description="Required. List of ``Var`` objects referenced in ``model`` representing the design variables." - )) - CONFIG.declare("second_stage_variables", ConfigValue( - default=[], domain=InputDataStandardizer(Var, _VarData), - description="Required. List of ``Var`` referenced in ``model`` representing the control variables." - )) - CONFIG.declare("uncertain_params", ConfigValue( - default=[], domain=InputDataStandardizer(Param, _ParamData), - description="Required. List of ``Param`` referenced in ``model`` representing the uncertain parameters. MUST be ``mutable``. " - "Assumes entries are provided in consistent order with the entries of 'nominal_uncertain_param_vals' input." - )) - CONFIG.declare("uncertainty_set", ConfigValue( - default=None, domain=uncertainty_sets, - description="Required. ``UncertaintySet`` object representing the uncertainty space " - "that the final solutions will be robust against." - )) - CONFIG.declare("local_solver", ConfigValue( - default=None, domain=SolverResolvable(), - description="Required. ``Solver`` object to utilize as the primary local NLP solver." - )) - CONFIG.declare("global_solver", ConfigValue( - default=None, domain=SolverResolvable(), - description="Required. ``Solver`` object to utilize as the primary global NLP solver." - )) + CONFIG.declare( + "first_stage_variables", + PyROSConfigValue( + default=[], + domain=InputDataStandardizer(Var, _VarData), + description="First-stage (or design) variables.", + is_optional=False, + dtype_spec_str="list of Var", + ), + ) + CONFIG.declare( + "second_stage_variables", + PyROSConfigValue( + default=[], + domain=InputDataStandardizer(Var, _VarData), + description="Second-stage (or control) variables.", + is_optional=False, + dtype_spec_str="list of Var", + ), + ) + CONFIG.declare( + "uncertain_params", + PyROSConfigValue( + default=[], + domain=InputDataStandardizer(Param, _ParamData), + description=( + """ + Uncertain model parameters. + The `mutable` attribute for all uncertain parameter + objects should be set to True. + """ + ), + is_optional=False, + dtype_spec_str="list of Param", + ), + ) + CONFIG.declare( + "uncertainty_set", + PyROSConfigValue( + default=None, + domain=uncertainty_sets, + description=( + """ + Uncertainty set against which the + final solution(s) returned by PyROS should be certified + to be robust. + """ + ), + is_optional=False, + dtype_spec_str="UncertaintySet", + ), + ) + CONFIG.declare( + "local_solver", + PyROSConfigValue( + default=None, + domain=SolverResolvable(), + description="Subordinate local NLP solver.", + is_optional=False, + dtype_spec_str="Solver", + ), + ) + CONFIG.declare( + "global_solver", + PyROSConfigValue( + default=None, + domain=SolverResolvable(), + description="Subordinate global NLP solver.", + is_optional=False, + dtype_spec_str="Solver", + ), + ) # ================================================ # === Optional User Inputs # ================================================ - CONFIG.declare("objective_focus", ConfigValue( - default=ObjectiveType.nominal, domain=ValidEnum(ObjectiveType), - description="Optional. Default = ``ObjectiveType.nominal``. Choice of objective function to optimize in the master problems. " - "Choices are: ``ObjectiveType.worst_case``, ``ObjectiveType.nominal``. See Note for details." - )) - CONFIG.declare("nominal_uncertain_param_vals", ConfigValue( - default=[], domain=list, - description="Optional. Default = deterministic model ``Param`` values. List of nominal values for all uncertain parameters. " - "Assumes entries are provided in consistent order with the entries of ``uncertain_params`` input." - )) - CONFIG.declare("decision_rule_order", ConfigValue( - default=0, domain=In([0, 1, 2]), - description="Optional. Default = 0. Order of decision rule functions for handling second-stage variable recourse. " - "Choices are: '0' for constant recourse (a.k.a. static approximation), '1' for affine recourse " - "(a.k.a. affine decision rules), '2' for quadratic recourse." - )) - CONFIG.declare("solve_master_globally", ConfigValue( - default=False, domain=bool, - description="Optional. Default = False. 'True' for the master problems to be solved with the user-supplied global solver(s); " - "or 'False' for the master problems to be solved with the user-supplied local solver(s). " - - )) - CONFIG.declare("max_iter", ConfigValue( - default=-1, domain=PositiveIntOrMinusOne, - description="Optional. Default = -1. Iteration limit for the GRCS algorithm. '-1' is no iteration limit." - )) - CONFIG.declare("robust_feasibility_tolerance", ConfigValue( - default=1e-4, domain=NonNegativeFloat, - description="Optional. Default = 1e-4. Relative tolerance for assessing robust feasibility violation during separation phase." - )) - CONFIG.declare("separation_priority_order", ConfigValue( - default={}, domain=dict, - description="Optional. Default = {}. Dictionary mapping inequality constraint names to positive integer priorities for separation. " - "Constraints not referenced in the dictionary assume a priority of 0 (lowest priority)." - )) - CONFIG.declare("progress_logger", ConfigValue( - default="pyomo.contrib.pyros", domain=a_logger, - description="Optional. Default = \"pyomo.contrib.pyros\". The logger object to use for reporting." - )) - CONFIG.declare("backup_local_solvers", ConfigValue( - default=[], domain=SolverResolvable(), - description="Optional. Default = []. List of additional ``Solver`` objects to utilize as backup " - "whenever primary local NLP solver fails to identify solution to a sub-problem." - )) - CONFIG.declare("backup_global_solvers", ConfigValue( - default=[], domain=SolverResolvable(), - description="Optional. Default = []. List of additional ``Solver`` objects to utilize as backup " - "whenever primary global NLP solver fails to identify solution to a sub-problem." - )) - CONFIG.declare("subproblem_file_directory", ConfigValue( - default=None, domain=str, - description="Optional. Path to a directory where subproblem files and " - "logs will be written in the case that a subproblem fails to solve." - )) + CONFIG.declare( + "objective_focus", + PyROSConfigValue( + default=ObjectiveType.nominal, + domain=ValidEnum(ObjectiveType), + description=( + """ + Choice of objective focus to optimize in the master problems. + Choices are: `ObjectiveType.worst_case`, + `ObjectiveType.nominal`. + """ + ), + doc=( + """ + Objective focus for the master problems: + + - `ObjectiveType.nominal`: + Optimize the objective function subject to the nominal + uncertain parameter realization. + - `ObjectiveType.worst_case`: + Optimize the objective function subject to the worst-case + uncertain parameter realization. + + By default, `ObjectiveType.nominal` is chosen. + + A worst-case objective focus is required for certification + of robust optimality of the final solution(s) returned + by PyROS. + If a nominal objective focus is chosen, then only robust + feasibility is guaranteed. + """ + ), + is_optional=True, + document_default=False, + dtype_spec_str="ObjectiveType", + ), + ) + CONFIG.declare( + "nominal_uncertain_param_vals", + PyROSConfigValue( + default=[], + domain=list, + doc=( + """ + Nominal uncertain parameter realization. + Entries should be provided in an order consistent with the + entries of the argument `uncertain_params`. + If an empty list is provided, then the values of the `Param` + objects specified through `uncertain_params` are chosen. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str="list of float", + ), + ) + CONFIG.declare( + "decision_rule_order", + PyROSConfigValue( + default=0, + domain=In([0, 1, 2]), + description=( + """ + Order (or degree) of the polynomial decision rule functions used + for approximating the adjustability of the second stage + variables with respect to the uncertain parameters. + """ + ), + doc=( + """ + Order (or degree) of the polynomial decision rule functions used + for approximating the adjustability of the second stage + variables with respect to the uncertain parameters. + + Choices are: + + - 0: static recourse + - 1: affine recourse + - 2: quadratic recourse + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + "solve_master_globally", + PyROSConfigValue( + default=False, + domain=bool, + doc=( + """ + True to solve all master problems with the subordinate + global solver, False to solve all master problems with + the subordinate local solver. + Along with a worst-case objective focus + (see argument `objective_focus`), + solving the master problems to global optimality is required + for certification + of robust optimality of the final solution(s) returned + by PyROS. Otherwise, only robust feasibility is guaranteed. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + "max_iter", + PyROSConfigValue( + default=-1, + domain=PositiveIntOrMinusOne, + description=( + """ + Iteration limit. If -1 is provided, then no iteration + limit is enforced. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str="int", + ), + ) + CONFIG.declare( + "robust_feasibility_tolerance", + PyROSConfigValue( + default=1e-4, + domain=NonNegativeFloat, + description=( + """ + Relative tolerance for assessing maximal inequality + constraint violations during the GRCS separation step. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + "separation_priority_order", + PyROSConfigValue( + default={}, + domain=dict, + doc=( + """ + Mapping from model inequality constraint names + to positive integers specifying the priorities + of their corresponding separation subproblems. + A higher integer value indicates a higher priority. + Constraints not referenced in the `dict` assume + a priority of 0. + Separation subproblems are solved in order of decreasing + priority. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + "progress_logger", + PyROSConfigValue( + default="pyomo.contrib.pyros", + domain=a_logger, + doc=( + """ + Logger (or name thereof) used for reporting PyROS solver + progress. If a `str` is specified, then + ``logging.getLogger(progress_logger)`` is used. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str="str or logging.Logger", + ), + ) + CONFIG.declare( + "backup_local_solvers", + PyROSConfigValue( + default=[], + domain=SolverResolvable(), + doc=( + """ + Additional subordinate local NLP optimizers to invoke + in the event the primary local NLP optimizer fails + to solve a subproblem to an acceptable termination condition. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str="list of Solver", + ), + ) + CONFIG.declare( + "backup_global_solvers", + PyROSConfigValue( + default=[], + domain=SolverResolvable(), + doc=( + """ + Additional subordinate global NLP optimizers to invoke + in the event the primary global NLP optimizer fails + to solve a subproblem to an acceptable termination condition. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str="list of Solver", + ), + ) + CONFIG.declare( + "subproblem_file_directory", + PyROSConfigValue( + default=None, + domain=str, + description=( + """ + Directory to which to export subproblems not successfully + solved to an acceptable termination condition. + In the event ``keepfiles=True`` is specified, a str or + path-like referring to an existing directory must be + provided. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str="None, str, or path-like", + ), + ) + # ================================================ # === Advanced Options # ================================================ - CONFIG.declare("bypass_local_separation", ConfigValue( - default=False, domain=bool, - description="This is an advanced option. Default = False. 'True' to only use global solver(s) during separation; " - "'False' to use local solver(s) at intermediate separations, " - "using global solver(s) only before termination to certify robust feasibility. " - )) - CONFIG.declare("bypass_global_separation", ConfigValue( - default=False, domain=bool, - description="This is an advanced option. Default = False. 'True' to only use local solver(s) during separation; " - "however, robustness of the final result will not be guaranteed. Use to expedite PyROS run when " - "global solver(s) cannot (efficiently) solve separation problems." - )) - CONFIG.declare("p_robustness", ConfigValue( - default={}, domain=dict, - description="This is an advanced option. Default = {}. Whether or not to add p-robustness constraints to the master problems. " - "If the dictionary is empty (default), then p-robustness constraints are not added. " - "See Note for how to specify arguments." - )) + CONFIG.declare( + "bypass_local_separation", + PyROSConfigValue( + default=False, + domain=bool, + description=( + """ + This is an advanced option. + Solve all separation subproblems with the subordinate global + solver(s) only. + This option is useful for expediting PyROS + in the event that the subordinate global optimizer(s) provided + can quickly solve separation subproblems to global optimality. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + "bypass_global_separation", + PyROSConfigValue( + default=False, + domain=bool, + doc=( + """ + This is an advanced option. + Solve all separation subproblems with the subordinate local + solver(s) only. + If `True` is chosen, then robustness of the final solution(s) + returned by PyROS is not guaranteed, and a warning will + be issued at termination. + This option is useful for expediting PyROS + in the event that the subordinate global optimizer provided + cannot tractably solve separation subproblems to global + optimality. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) + CONFIG.declare( + "p_robustness", + PyROSConfigValue( + default={}, + domain=dict, + doc=( + """ + This is an advanced option. + Add p-robustness constraints to all master subproblems. + If an empty dict is provided, then p-robustness constraints + are not added. + Otherwise, the dict must map a `str` of value ``'rho'`` + to a non-negative `float`. PyROS automatically + specifies ``1 + p_robustness['rho']`` + as an upper bound for the ratio of the + objective function value under any PyROS-sampled uncertain + parameter realization to the objective function under + the nominal parameter realization. + """ + ), + is_optional=True, + document_default=True, + dtype_spec_str=None, + ), + ) return CONFIG + @SolverFactory.register( "pyros", doc="Robust optimization (RO) solver implementing " - "the generalized robust cutting-set algorithm (GRCS)") + "the generalized robust cutting-set algorithm (GRCS)", +) class PyROS(object): ''' PyROS (Pyomo Robust Optimization Solver) implementing a @@ -260,8 +644,7 @@ class PyROS(object): CONFIG = pyros_config() def available(self, exception_flag=True): - """Check if solver is available. - """ + """Check if solver is available.""" return True def version(self): @@ -269,7 +652,7 @@ def version(self): return __version__ def license_is_valid(self): - ''' License for using PyROS ''' + '''License for using PyROS''' return True # The Pyomo solver API expects that solvers support the context @@ -280,34 +663,43 @@ def __enter__(self): def __exit__(self, et, ev, tb): pass - def solve(self, model, first_stage_variables, second_stage_variables, - uncertain_params, uncertainty_set, local_solver, global_solver, - **kwds): - """Solve the model. + def solve( + self, + model, + first_stage_variables, + second_stage_variables, + uncertain_params, + uncertainty_set, + local_solver, + global_solver, + **kwds, + ): + """Solve a model. Parameters ---------- model: ConcreteModel - A ``ConcreteModel`` object representing the deterministic - model, cast as a minimization problem. - first_stage_variables: List[Var] - The list of ``Var`` objects referenced in ``model`` - representing the design variables. - second_stage_variables: List[Var] - The list of ``Var`` objects referenced in ``model`` - representing the control variables. - uncertain_params: List[Param] - The list of ``Param`` objects referenced in ``model`` - representing the uncertain parameters. MUST be ``mutable``. - Assumes entries are provided in consistent order with the - entries of 'nominal_uncertain_param_vals' input. + The deterministic model. + first_stage_variables: list of Var + First-stage model variables (or design variables). + second_stage_variables: list of Var + Second-stage model variables (or control variables). + uncertain_params: list of Param + Uncertain model parameters. + The `mutable` attribute for every uncertain parameter + objects must be set to True. uncertainty_set: UncertaintySet - ``UncertaintySet`` object representing the uncertainty space - that the final solutions will be robust against. + Uncertainty set against which the solution(s) returned + will be confirmed to be robust. local_solver: Solver - ``Solver`` object to utilize as the primary local NLP solver. + Subordinate local NLP solver. global_solver: Solver - ``Solver`` object to utilize as the primary global NLP solver. + Subordinate global NLP solver. + + Returns + ------- + return_soln : ROSolveResults + Summary of PyROS termination outcome. """ @@ -320,7 +712,7 @@ def solve(self, model, first_stage_variables, second_stage_variables, config.local_solver = local_solver config.global_solver = global_solver - dev_options = kwds.pop('dev_options',{}) + dev_options = kwds.pop('dev_options', {}) config.set_value(kwds) config.set_value(dev_options) @@ -331,14 +723,20 @@ def solve(self, model, first_stage_variables, second_stage_variables, # === Validate ability of grcs RO solver to handle this model if not model_is_valid(model): - raise AttributeError("This model structure is not currently handled by the ROSolver.") + raise AttributeError( + "This model structure is not currently handled by the ROSolver." + ) # === Define nominal point if not specified if len(config.nominal_uncertain_param_vals) == 0: - config.nominal_uncertain_param_vals = list(p.value for p in config.uncertain_params) + config.nominal_uncertain_param_vals = list( + p.value for p in config.uncertain_params + ) elif len(config.nominal_uncertain_param_vals) != len(config.uncertain_params): - raise AttributeError("The nominal_uncertain_param_vals list must be the same length" - "as the uncertain_params list") + raise AttributeError( + "The nominal_uncertain_param_vals list must be the same length" + "as the uncertain_params list" + ) # === Create data containers model_data = ROSolveResults() @@ -380,9 +778,7 @@ def solve(self, model, first_stage_variables, second_stage_variables, # recast to minimization if necessary active_objs = list( model_data.working_model.component_data_objects( - Objective, - active=True, - descend_into=True, + Objective, active=True, descend_into=True ) ) assert len(active_objs) == 1 @@ -398,8 +794,9 @@ def solve(self, model, first_stage_variables, second_stage_variables, # === Replace variable bounds depending on uncertain params with # explicit inequality constraints - replace_uncertain_bounds_with_constraints(model_data.working_model, - model_data.working_model.util.uncertain_params) + replace_uncertain_bounds_with_constraints( + model_data.working_model, model_data.working_model.util.uncertain_params + ) # === Add decision rule information add_decision_rule_variables(model_data, config) @@ -428,19 +825,25 @@ def solve(self, model, first_stage_variables, second_stage_variables, # === Make control_variable_bounds array wm_util.ssv_bounds = [] - for c in model_data.working_model.component_data_objects(Constraint, descend_into=True): + for c in model_data.working_model.component_data_objects( + Constraint, descend_into=True + ): if "bound_con" in c.name: wm_util.ssv_bounds.append(c) # === Solve and load solution into model - pyros_soln, final_iter_separation_solns = ROSolver_iterative_solve(model_data, config) - + pyros_soln, final_iter_separation_solns = ROSolver_iterative_solve( + model_data, config + ) return_soln = ROSolveResults() if pyros_soln is not None and final_iter_separation_solns is not None: - if config.load_solution and \ - (pyros_soln.pyros_termination_condition is pyrosTerminationCondition.robust_optimal or - pyros_soln.pyros_termination_condition is pyrosTerminationCondition.robust_feasible): + if config.load_solution and ( + pyros_soln.pyros_termination_condition + is pyrosTerminationCondition.robust_optimal + or pyros_soln.pyros_termination_condition + is pyrosTerminationCondition.robust_feasible + ): load_final_solution(model_data, pyros_soln.master_soln, config) # === Return time info @@ -455,10 +858,16 @@ def solve(self, model, first_stage_variables, second_stage_variables, else: negation = 1 if config.objective_focus == ObjectiveType.nominal: - return_soln.final_objective_value = negation * value(pyros_soln.master_soln.master_model.obj) + return_soln.final_objective_value = negation * value( + pyros_soln.master_soln.master_model.obj + ) elif config.objective_focus == ObjectiveType.worst_case: - return_soln.final_objective_value = negation * value(pyros_soln.master_soln.master_model.zeta) - return_soln.pyros_termination_condition = pyros_soln.pyros_termination_condition + return_soln.final_objective_value = negation * value( + pyros_soln.master_soln.master_model.zeta + ) + return_soln.pyros_termination_condition = ( + pyros_soln.pyros_termination_condition + ) return_soln.time = model_data.total_cpu_time return_soln.iterations = iterations @@ -469,7 +878,9 @@ def solve(self, model, first_stage_variables, second_stage_variables, del pyros_soln.util_block del pyros_soln.working_model else: - return_soln.pyros_termination_condition = pyrosTerminationCondition.robust_infeasible + return_soln.pyros_termination_condition = ( + pyrosTerminationCondition.robust_infeasible + ) return_soln.final_objective_value = None return_soln.time = get_main_elapsed_time(model_data.timing) return_soln.iterations = 0 @@ -477,13 +888,128 @@ def solve(self, model, first_stage_variables, second_stage_variables, def _generate_filtered_docstring(): + """ + Add Numpy-style 'Keyword arguments' section to `PyROS.solve()` + docstring. + """ cfg = PyROS.CONFIG() - del cfg['first_stage_variables'] - del cfg['second_stage_variables'] - del cfg['uncertain_params'] - del cfg['uncertainty_set'] - del cfg['local_solver'] - del cfg['global_solver'] - return add_docstring_list(PyROS.solve.__doc__, cfg, indent_by=8) + + # mandatory args already documented + exclude_args = [ + "first_stage_variables", + "second_stage_variables", + "uncertain_params", + "uncertainty_set", + "local_solver", + "global_solver", + ] + + indent_by = 8 + width = 72 + before = PyROS.solve.__doc__ + section_name = "Keyword Arguments" + + indent_str = ' ' * indent_by + wrap_width = width - indent_by + cfg = pyros_config() + + arg_docs = [] + + def wrap_doc(doc, indent_by, width): + """ + Wrap a string, accounting for paragraph + breaks ('\n\n') and bullet points (paragraphs + which, when dedented, are such that each line + starts with '- ' or ' '). + """ + paragraphs = doc.split("\n\n") + wrapped_pars = [] + for par in paragraphs: + lines = dedent(par).split("\n") + has_bullets = all( + line.startswith("- ") or line.startswith(" ") + for line in lines + if line != "" + ) + if has_bullets: + # obtain strings of each bullet point + # (dedented, bullet dash and bullet indent removed) + bullet_groups = [] + new_group = False + group = "" + for line in lines: + new_group = line.startswith("- ") + if new_group: + bullet_groups.append(group) + group = "" + new_line = line[2:] + group += f"{new_line}\n" + if group != "": + # ensure last bullet not skipped + bullet_groups.append(group) + + # first entry is just ''; remove + bullet_groups = bullet_groups[1:] + + # wrap each bullet point, then add bullet + # and indents as necessary + wrapped_groups = [] + for group in bullet_groups: + wrapped_groups.append( + "\n".join( + f"{'- ' if idx == 0 else ' '}{line}" + for idx, line in enumerate( + wrap(group, width - 2 - indent_by) + ) + ) + ) + + # now combine bullets into single 'paragraph' + wrapped_pars.append( + indent("\n".join(wrapped_groups), prefix=' ' * indent_by) + ) + else: + wrapped_pars.append( + indent( + "\n".join(wrap(dedent(par), width=width - indent_by)), + prefix=' ' * indent_by, + ) + ) + + return "\n\n".join(wrapped_pars) + + section_header = indent(f"{section_name}\n" + "-" * len(section_name), indent_str) + for key, itm in cfg._data.items(): + if key in exclude_args: + continue + arg_name = key + arg_dtype = itm.dtype_spec_str + + if itm.is_optional: + if itm.document_default: + optional_str = f", default={repr(itm._default)}" + else: + optional_str = ", optional" + else: + optional_str = "" + + arg_header = f"{indent_str}{arg_name} : {arg_dtype}{optional_str}" + + # dedented_doc_str = dedent(itm.doc).replace("\n", ' ').strip() + if itm._doc is not None: + raw_arg_desc = itm._doc + else: + raw_arg_desc = itm._description + + arg_description = wrap_doc( + raw_arg_desc, width=wrap_width, indent_by=indent_by + 4 + ) + + arg_docs.append(f"{arg_header}\n{arg_description}") + + kwargs_section_doc = "\n".join([section_header] + arg_docs) + + return f"{before}\n{kwargs_section_doc}\n" + PyROS.solve.__doc__ = _generate_filtered_docstring() diff --git a/pyomo/contrib/pyros/pyros_algorithm_methods.py b/pyomo/contrib/pyros/pyros_algorithm_methods.py index 5e370a3a0d7..7a0c990d549 100644 --- a/pyomo/contrib/pyros/pyros_algorithm_methods.py +++ b/pyomo/contrib/pyros/pyros_algorithm_methods.py @@ -6,12 +6,24 @@ from pyomo.opt.results import TerminationCondition from pyomo.contrib.pyros import master_problem_methods, separation_problem_methods from pyomo.contrib.pyros.solve_data import SeparationProblemData, MasterResult -from pyomo.contrib.pyros.util import ObjectiveType, get_time_from_solver, pyrosTerminationCondition -from pyomo.contrib.pyros.util import get_main_elapsed_time, output_logger, coefficient_matching +from pyomo.contrib.pyros.uncertainty_sets import Geometry +from pyomo.contrib.pyros.util import ( + ObjectiveType, + get_time_from_solver, + pyrosTerminationCondition, +) +from pyomo.contrib.pyros.util import ( + get_main_elapsed_time, + output_logger, + coefficient_matching, +) from pyomo.core.base import value from pyomo.common.collections import ComponentSet -def update_grcs_solve_data(pyros_soln, term_cond, nominal_data, timing_data, separation_data, master_soln, k): + +def update_grcs_solve_data( + pyros_soln, term_cond, nominal_data, timing_data, separation_data, master_soln, k +): ''' This function updates the results data container object to return to the user so that they have all pertinent information from the PyROS run. @@ -34,6 +46,7 @@ def update_grcs_solve_data(pyros_soln, term_cond, nominal_data, timing_data, sep return + def ROSolver_iterative_solve(model_data, config): ''' GRCS algorithm implementation @@ -47,27 +60,36 @@ def ROSolver_iterative_solve(model_data, config): violation = list(p for p in config.nominal_uncertain_param_vals) # === Do coefficient matching - constraints = [c for c in model_data.working_model.component_data_objects(Constraint) if c.equality - and c not in ComponentSet(model_data.working_model.util.decision_rule_eqns)] + constraints = [ + c + for c in model_data.working_model.component_data_objects(Constraint) + if c.equality + and c not in ComponentSet(model_data.working_model.util.decision_rule_eqns) + ] model_data.working_model.util.h_x_q_constraints = ComponentSet() for c in constraints: - coeff_matching_success, robust_infeasible = coefficient_matching(model=model_data.working_model, constraint=c, - uncertain_params=model_data.working_model.util.uncertain_params, - config=config) + coeff_matching_success, robust_infeasible = coefficient_matching( + model=model_data.working_model, + constraint=c, + uncertain_params=model_data.working_model.util.uncertain_params, + config=config, + ) if not coeff_matching_success and not robust_infeasible: - raise ValueError("Equality constraint \"%s\" cannot be guaranteed to be robustly feasible, " - "given the current partitioning between first-stage, second-stage and state variables. " - "You might consider editing this constraint to reference some second-stage " - "and/or state variable(s)." - % c.name) + raise ValueError( + "Equality constraint \"%s\" cannot be guaranteed to be robustly feasible, " + "given the current partitioning between first-stage, second-stage and state variables. " + "You might consider editing this constraint to reference some second-stage " + "and/or state variable(s)." % c.name + ) elif not coeff_matching_success and robust_infeasible: - config.progress_logger.info("PyROS has determined that the model is robust infeasible. " - "One reason for this is that equality constraint \"%s\" cannot be satisfied " - "against all realizations of uncertainty, " - "given the current partitioning between first-stage, second-stage and state variables. " - "You might consider editing this constraint to reference some (additional) second-stage " - "and/or state variable(s)." - % c.name) + config.progress_logger.info( + "PyROS has determined that the model is robust infeasible. " + "One reason for this is that equality constraint \"%s\" cannot be satisfied " + "against all realizations of uncertainty, " + "given the current partitioning between first-stage, second-stage and state variables. " + "You might consider editing this constraint to reference some (additional) second-stage " + "and/or state variable(s)." % c.name + ) return None, None else: pass @@ -84,11 +106,14 @@ def ROSolver_iterative_solve(model_data, config): master_data.master_model.p_robust_constraints = ConstraintList() # === Add scenario_0 - master_data.master_model.scenarios[0, 0].transfer_attributes_from(master_data.original.clone()) - if len(master_data.master_model.scenarios[0,0].util.uncertain_params) != len(violation): + master_data.master_model.scenarios[0, 0].transfer_attributes_from( + master_data.original.clone() + ) + if len(master_data.master_model.scenarios[0, 0].util.uncertain_params) != len( + violation + ): raise ValueError - # === Set the nominal uncertain parameters to the violation values for i, v in enumerate(violation): master_data.master_model.scenarios[0, 0].util.uncertain_params[i].value = v @@ -96,42 +121,71 @@ def ROSolver_iterative_solve(model_data, config): # === Add objective function (assuming minimization of costs) with nominal second-stage costs if config.objective_focus is ObjectiveType.nominal: master_data.master_model.obj = Objective( - expr=master_data.master_model.scenarios[0,0].first_stage_objective + - master_data.master_model.scenarios[0,0].second_stage_objective + expr=master_data.master_model.scenarios[0, 0].first_stage_objective + + master_data.master_model.scenarios[0, 0].second_stage_objective ) elif config.objective_focus is ObjectiveType.worst_case: # === Worst-case cost objective master_data.master_model.zeta = Var( - initialize=value( - master_data.master_model.scenarios[0, 0].first_stage_objective + - master_data.master_model.scenarios[0, 0].second_stage_objective, - exception=False) + initialize=value( + master_data.master_model.scenarios[0, 0].first_stage_objective + + master_data.master_model.scenarios[0, 0].second_stage_objective, + exception=False, + ) ) master_data.master_model.obj = Objective(expr=master_data.master_model.zeta) - master_data.master_model.scenarios[0,0].epigraph_constr = Constraint(expr= - master_data.master_model.scenarios[0, 0].first_stage_objective + - master_data.master_model.scenarios[0, 0].second_stage_objective <= master_data.master_model.zeta ) - master_data.master_model.scenarios[0,0].util.first_stage_variables.append(master_data.master_model.zeta) + master_data.master_model.scenarios[0, 0].epigraph_constr = Constraint( + expr=master_data.master_model.scenarios[0, 0].first_stage_objective + + master_data.master_model.scenarios[0, 0].second_stage_objective + <= master_data.master_model.zeta + ) + master_data.master_model.scenarios[0, 0].util.first_stage_variables.append( + master_data.master_model.zeta + ) # === Add deterministic constraints to ComponentSet on original so that these become part of separation model - master_data.original.util.deterministic_constraints = \ - ComponentSet(c for c in master_data.original.component_data_objects(Constraint, descend_into=True)) + master_data.original.util.deterministic_constraints = ComponentSet( + c + for c in master_data.original.component_data_objects( + Constraint, descend_into=True + ) + ) # === Make separation problem model once before entering the solve loop - separation_model = separation_problem_methods.make_separation_problem(model_data=master_data, config=config) + separation_model = separation_problem_methods.make_separation_problem( + model_data=master_data, config=config + ) # === Create separation problem data container object and add information to catalog during solve separation_data = SeparationProblemData() separation_data.separation_model = separation_model - separation_data.points_separated = [] # contains last point separated in the separation problem - separation_data.points_added_to_master = [config.nominal_uncertain_param_vals] # explicitly robust against in master - separation_data.constraint_violations = [] # list of constraint violations for each iteration - separation_data.total_global_separation_solves = 0 # number of times global solve is used - separation_data.timing = master_data.timing # timing object + separation_data.points_separated = ( + [] + ) # contains last point separated in the separation problem + separation_data.points_added_to_master = [ + config.nominal_uncertain_param_vals + ] # explicitly robust against in master + separation_data.constraint_violations = ( + [] + ) # list of constraint violations for each iteration + separation_data.total_global_separation_solves = ( + 0 # number of times global solve is used + ) + separation_data.timing = master_data.timing # timing object # === Keep track of subsolver termination statuses from each iteration separation_data.separation_problem_subsolver_statuses = [] + # for discrete set types, keep track of scenarios added to master + if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS: + separation_data.idxs_of_master_scenarios = [ + config.uncertainty_set.scenarios.index( + tuple(config.nominal_uncertain_param_vals) + ) + ] + else: + separation_data.idxs_of_master_scenarios = None + # === Nominal information nominal_data = Block() nominal_data.nom_fsv_vals = [] @@ -151,54 +205,68 @@ def ROSolver_iterative_solve(model_data, config): dr_var_lists_polished = [] k = 0 + master_statuses = [] while config.max_iter == -1 or k < config.max_iter: master_data.iteration = k # === Add p-robust constraint if iteration > 0 if k > 0 and config.p_robustness: - master_problem_methods.add_p_robust_constraint(model_data=master_data, config=config) + master_problem_methods.add_p_robust_constraint( + model_data=master_data, config=config + ) # === Solve Master Problem config.progress_logger.info("PyROS working on iteration %s..." % k) - master_soln = master_problem_methods.solve_master(model_data=master_data, config=config) - #config.progress_logger.info("Done solving Master Problem!") - master_soln.master_problem_subsolver_statuses = [] + master_soln = master_problem_methods.solve_master( + model_data=master_data, config=config + ) + # config.progress_logger.info("Done solving Master Problem!") # === Keep track of total time and subsolver termination conditions timing_data.total_master_solve_time += get_time_from_solver(master_soln.results) if k > 0: # master feas problem not solved for iteration 0 - timing_data.total_master_solve_time += get_time_from_solver(master_soln.feasibility_problem_results) + timing_data.total_master_solve_time += get_time_from_solver( + master_soln.feasibility_problem_results + ) - master_soln.master_problem_subsolver_statuses.append(master_soln.results.solver.termination_condition) + master_statuses.append(master_soln.results.solver.termination_condition) + master_soln.master_problem_subsolver_statuses = master_statuses # === Check for robust infeasibility or error or time-out in master problem solve - if master_soln.master_subsolver_results[1] is pyrosTerminationCondition.robust_infeasible: + if ( + master_soln.master_subsolver_results[1] + is pyrosTerminationCondition.robust_infeasible + ): term_cond = pyrosTerminationCondition.robust_infeasible output_logger(config=config, robust_infeasible=True) - elif master_soln.pyros_termination_condition is pyrosTerminationCondition.subsolver_error: + elif ( + master_soln.pyros_termination_condition + is pyrosTerminationCondition.subsolver_error + ): term_cond = pyrosTerminationCondition.subsolver_error + elif ( + master_soln.pyros_termination_condition + is pyrosTerminationCondition.time_out + ): + term_cond = pyrosTerminationCondition.time_out else: term_cond = None - if term_cond == pyrosTerminationCondition.subsolver_error or \ - term_cond == pyrosTerminationCondition.robust_infeasible: - update_grcs_solve_data(pyros_soln=model_data, k=k, term_cond=term_cond, - nominal_data=nominal_data, - timing_data=timing_data, - separation_data=separation_data, - master_soln=master_soln) + if term_cond in { + pyrosTerminationCondition.subsolver_error, + pyrosTerminationCondition.time_out, + pyrosTerminationCondition.robust_infeasible, + }: + update_grcs_solve_data( + pyros_soln=model_data, + k=k, + term_cond=term_cond, + nominal_data=nominal_data, + timing_data=timing_data, + separation_data=separation_data, + master_soln=master_soln, + ) return model_data, [] - # === Check if time limit reached - elapsed = get_main_elapsed_time(model_data.timing) - if config.time_limit: - if elapsed >= config.time_limit: - output_logger(config=config, time_out=True, elapsed=elapsed) - update_grcs_solve_data(pyros_soln=model_data, k=k, term_cond=pyrosTerminationCondition.time_out, - nominal_data=nominal_data, - timing_data=timing_data, - separation_data=separation_data, - master_soln=master_soln) - return model_data, [] # === Save nominal information if k == 0: @@ -212,30 +280,55 @@ def ROSolver_iterative_solve(model_data, config): nominal_data.nom_second_stage_cost = master_soln.second_stage_objective nominal_data.nom_obj = value(master_data.master_model.obj) - if ( - # === Decision rule polishing (do not polish on first iteration if no ssv or if decision_rule_order = 0) - (config.decision_rule_order != 0 and len(config.second_stage_variables) > 0 and k != 0) + config.decision_rule_order != 0 + and len(config.second_stage_variables) > 0 + and k != 0 ): # === Save initial values of DR vars to file - for varslist in master_data.master_model.scenarios[0,0].util.decision_rule_vars: + for varslist in master_data.master_model.scenarios[ + 0, 0 + ].util.decision_rule_vars: vals = [] for dvar in varslist.values(): vals.append(dvar.value) dr_var_lists_original.append(vals) - polishing_results = master_problem_methods.minimize_dr_vars(model_data=master_data, config=config) + polishing_results = master_problem_methods.minimize_dr_vars( + model_data=master_data, config=config + ) timing_data.total_dr_polish_time += get_time_from_solver(polishing_results) - #=== Save after polish - for varslist in master_data.master_model.scenarios[0,0].util.decision_rule_vars: + # === Save after polish + for varslist in master_data.master_model.scenarios[ + 0, 0 + ].util.decision_rule_vars: vals = [] for dvar in varslist.values(): vals.append(dvar.value) dr_var_lists_polished.append(vals) + # === Check if time limit reached + elapsed = get_main_elapsed_time(model_data.timing) + if config.time_limit: + if elapsed >= config.time_limit: + output_logger(config=config, time_out=True, elapsed=elapsed) + update_grcs_solve_data( + pyros_soln=model_data, + k=k, + term_cond=pyrosTerminationCondition.time_out, + nominal_data=nominal_data, + timing_data=timing_data, + separation_data=separation_data, + master_soln=master_soln, + ) + return model_data, [] + # === Set up for the separation problem - separation_data.opt_fsv_vals = [v.value for v in master_soln.master_model.scenarios[0,0].util.first_stage_variables] + separation_data.opt_fsv_vals = [ + v.value + for v in master_soln.master_model.scenarios[0, 0].util.first_stage_variables + ] separation_data.opt_ssv_vals = master_soln.ssv_vals # === Provide master model scenarios to separation problem for initialization options @@ -246,100 +339,121 @@ def ROSolver_iterative_solve(model_data, config): # === Solve Separation Problem separation_data.iteration = k - separation_data.master_nominal_scenario = master_data.master_model.scenarios[0,0] + separation_data.master_nominal_scenario = master_data.master_model.scenarios[ + 0, 0 + ] separation_data.master_model = master_data.master_model - separation_solns, violating_realizations, constr_violations, is_global, \ - local_sep_time, global_sep_time = \ - separation_problem_methods.solve_separation_problem(model_data=separation_data, config=config) + separation_results = separation_problem_methods.solve_separation_problem( + model_data=separation_data, config=config + ) - for sep_soln_list in separation_solns: - for s in sep_soln_list: - separation_data.separation_problem_subsolver_statuses.append(s.termination_condition) + separation_data.separation_problem_subsolver_statuses.extend( + [ + res.solver.termination_condition + for res in separation_results.generate_subsolver_results() + ] + ) - if is_global: + if separation_results.solved_globally: separation_data.total_global_separation_solves += 1 - timing_data.total_separation_local_time += local_sep_time - timing_data.total_separation_global_time += global_sep_time - - separation_data.constraint_violations.append(constr_violations) - - - if not any(s.found_violation for solve_data_list in separation_solns for s in solve_data_list): - separation_data.points_separated = [] - else: - separation_data.points_separated = violating_realizations + # make updates based on separation results + timing_data.total_separation_local_time += ( + separation_results.evaluate_local_solve_time(get_time_from_solver) + ) + timing_data.total_separation_global_time += ( + separation_results.evaluate_global_solve_time(get_time_from_solver) + ) + if separation_results.found_violation: + scaled_violations = separation_results.scaled_violations + if scaled_violations is not None: + # can be None if time out or subsolver error + # reported in separation + separation_data.constraint_violations.append(scaled_violations.values()) + separation_data.points_separated = ( + separation_results.violating_param_realization + ) - # === Check if time limit reached + # terminate on time limit elapsed = get_main_elapsed_time(model_data.timing) - if config.time_limit: - if elapsed >= config.time_limit: - output_logger(config=config, time_out=True, elapsed=elapsed) - termination_condition = pyrosTerminationCondition.time_out - update_grcs_solve_data(pyros_soln=model_data, k=k, term_cond=termination_condition, - nominal_data=nominal_data, - timing_data=timing_data, - separation_data=separation_data, - master_soln=master_soln) - return model_data, separation_solns - - # === Check if we exit due to solver returning unsatisfactory statuses (not in permitted_termination_conditions) - local_solve_term_conditions = {TerminationCondition.optimal, TerminationCondition.locallyOptimal, - TerminationCondition.globallyOptimal} - global_solve_term_conditions = {TerminationCondition.optimal, TerminationCondition.globallyOptimal} - if (is_global and any((s.termination_condition not in global_solve_term_conditions) - for sep_soln_list in separation_solns for s in sep_soln_list)) or \ - (not is_global and any((s.termination_condition not in local_solve_term_conditions) - for sep_soln_list in separation_solns for s in sep_soln_list)): + if separation_results.time_out: + output_logger(config=config, time_out=True, elapsed=elapsed) + termination_condition = pyrosTerminationCondition.time_out + update_grcs_solve_data( + pyros_soln=model_data, + k=k, + term_cond=termination_condition, + nominal_data=nominal_data, + timing_data=timing_data, + separation_data=separation_data, + master_soln=master_soln, + ) + return model_data, separation_results + + # terminate on separation subsolver error + if separation_results.subsolver_error: termination_condition = pyrosTerminationCondition.subsolver_error - update_grcs_solve_data(pyros_soln=model_data, k=k, term_cond=termination_condition, - nominal_data=nominal_data, - timing_data=timing_data, - separation_data=separation_data, - master_soln=master_soln) - return model_data, separation_solns + update_grcs_solve_data( + pyros_soln=model_data, + k=k, + term_cond=termination_condition, + nominal_data=nominal_data, + timing_data=timing_data, + separation_data=separation_data, + master_soln=master_soln, + ) + return model_data, separation_results # === Check if we terminate due to robust optimality or feasibility, # or in the event of bypassing global separation, no violations - if (not any(s.found_violation for sep_soln_list in separation_solns for s in sep_soln_list) - and (is_global or config.bypass_global_separation)): + robustness_certified = separation_results.robustness_certified + if robustness_certified: output_logger( - config=config, - bypass_global_separation=config.bypass_global_separation + config=config, bypass_global_separation=config.bypass_global_separation ) - if config.solve_master_globally and config.objective_focus is ObjectiveType.worst_case: + robust_optimal = ( + config.solve_master_globally + and config.objective_focus is ObjectiveType.worst_case + ) + if robust_optimal: output_logger(config=config, robust_optimal=True) termination_condition = pyrosTerminationCondition.robust_optimal else: output_logger(config=config, robust_feasible=True) termination_condition = pyrosTerminationCondition.robust_feasible - update_grcs_solve_data(pyros_soln=model_data, k=k, term_cond=termination_condition, - nominal_data=nominal_data, - timing_data=timing_data, - separation_data=separation_data, - master_soln=master_soln) - return model_data, separation_solns + update_grcs_solve_data( + pyros_soln=model_data, + k=k, + term_cond=termination_condition, + nominal_data=nominal_data, + timing_data=timing_data, + separation_data=separation_data, + master_soln=master_soln, + ) + return model_data, separation_results # === Add block to master at violation - master_problem_methods.add_scenario_to_master(master_data, violating_realizations) - separation_data.points_added_to_master.append(violating_realizations) + master_problem_methods.add_scenario_to_master( + model_data=master_data, + violations=separation_results.violating_param_realization, + ) + separation_data.points_added_to_master.append( + separation_results.violating_param_realization + ) k += 1 + # Iteration limit reached output_logger(config=config, max_iter=True) - update_grcs_solve_data(pyros_soln=model_data, k=k, term_cond=pyrosTerminationCondition.max_iter, - nominal_data=nominal_data, - timing_data=timing_data, - separation_data=separation_data, - master_soln=master_soln) - - # === In this case we still return the final solution objects for the last iteration - return model_data, separation_solns - - - - - - + update_grcs_solve_data( + pyros_soln=model_data, + k=k, + term_cond=pyrosTerminationCondition.max_iter, + nominal_data=nominal_data, + timing_data=timing_data, + separation_data=separation_data, + master_soln=master_soln, + ) + return model_data, separation_results diff --git a/pyomo/contrib/pyros/separation_problem_methods.py b/pyomo/contrib/pyros/separation_problem_methods.py index 87be86c6ede..2c41c869474 100644 --- a/pyomo/contrib/pyros/separation_problem_methods.py +++ b/pyomo/contrib/pyros/separation_problem_methods.py @@ -2,38 +2,50 @@ Functions for the construction and solving of the GRCS separation problem via ROsolver """ from pyomo.core.base.constraint import Constraint, ConstraintList -from pyomo.core.base.objective import (Objective, - maximize, - value) +from pyomo.core.base.objective import Objective, maximize, value from pyomo.core.base import Var, Param from pyomo.common.collections import ComponentSet, ComponentMap from pyomo.common.dependencies import numpy as np -from pyomo.contrib.pyros.util import (ObjectiveType, - get_time_from_solver, - output_logger) -from pyomo.contrib.pyros.solve_data import SeparationResult +from pyomo.contrib.pyros.util import ObjectiveType, get_time_from_solver, output_logger +from pyomo.contrib.pyros.solve_data import ( + DiscreteSeparationSolveCallResults, + SeparationSolveCallResults, + SeparationLoopResults, + SeparationResults, +) from pyomo.opt import TerminationCondition as tc -from pyomo.core.expr.current import (replace_expressions, - identify_mutable_parameters, - identify_variables) +from pyomo.core.expr import ( + replace_expressions, + identify_mutable_parameters, + identify_variables, +) from pyomo.contrib.pyros.util import get_main_elapsed_time, is_certain_parameter from pyomo.contrib.pyros.uncertainty_sets import Geometry from pyomo.common.errors import ApplicationError from pyomo.contrib.pyros.util import ABS_CON_CHECK_FEAS_TOL +from pyomo.common.timing import TicTocTimer +from pyomo.contrib.pyros.util import ( + TIC_TOC_SOLVE_TIME_ATTR, + adjust_solver_time_settings, + revert_solver_max_time_adjustment, +) import os from copy import deepcopy +from itertools import product + def add_uncertainty_set_constraints(model, config): """ Add inequality constraint(s) representing the uncertainty set. """ - model.util.uncertainty_set_constraint = \ - config.uncertainty_set.set_as_constraint( - uncertain_params=model.util.uncertain_param_vars, model=model, config=config - ) + model.util.uncertainty_set_constraint = config.uncertainty_set.set_as_constraint( + uncertain_params=model.util.uncertain_param_vars, model=model, config=config + ) - config.uncertainty_set.add_bounds_on_uncertain_parameters(model=model, config=config) + config.uncertainty_set.add_bounds_on_uncertain_parameters( + model=model, config=config + ) # === Pre-process out any uncertain parameters which have q_LB = q_ub via (q_ub - q_lb)/max(1,|q_UB|) <= TOL # before building the uncertainty set constraint(s) @@ -42,7 +54,9 @@ def add_uncertainty_set_constraints(model, config): if is_certain_parameter(uncertain_param_index=i, config=config): # This parameter is effectively certain for this set, can remove it from the uncertainty set # We do this by fixing it in separation to its nominal value - model.util.uncertain_param_vars[i].fix(config.nominal_uncertain_param_vals[i]) + model.util.uncertain_param_vars[i].fix( + config.nominal_uncertain_param_vals[i] + ) return @@ -55,21 +69,30 @@ def make_separation_objective_functions(model, config): performance_constraints = [] for c in model.component_data_objects(Constraint, active=True, descend_into=True): _vars = ComponentSet(identify_variables(expr=c.expr)) - uncertain_params_in_expr = list(v for v in model.util.uncertain_param_vars.values() if v in _vars) + uncertain_params_in_expr = list( + v for v in model.util.uncertain_param_vars.values() if v in _vars + ) state_vars_in_expr = list(v for v in model.util.state_vars if v in _vars) - second_stage_variables_in_expr = list(v for v in model.util.second_stage_variables if v in _vars) - if not c.equality and (uncertain_params_in_expr or state_vars_in_expr or second_stage_variables_in_expr): + second_stage_variables_in_expr = list( + v for v in model.util.second_stage_variables if v in _vars + ) + if not c.equality and ( + uncertain_params_in_expr + or state_vars_in_expr + or second_stage_variables_in_expr + ): # This inequality constraint depends on uncertain parameters therefore it must be separated against performance_constraints.append(c) - elif not c.equality and not (uncertain_params_in_expr or state_vars_in_expr or second_stage_variables_in_expr): - c.deactivate() # These are x \in X constraints, not active in separation because x is fixed to x* from previous master + elif not c.equality and not ( + uncertain_params_in_expr + or state_vars_in_expr + or second_stage_variables_in_expr + ): + c.deactivate() # These are x \in X constraints, not active in separation because x is fixed to x* from previous master model.util.performance_constraints = performance_constraints model.util.separation_objectives = [] map_obj_to_constr = ComponentMap() - if len(model.util.performance_constraints) == 0: - raise ValueError("No performance constraints identified for the postulated robust optimization problem.") - for idx, c in enumerate(performance_constraints): # Separation objective constraints standardized to be MAXIMIZATION of <= constraints c.deactivate() @@ -81,7 +104,9 @@ def make_separation_objective_functions(model, config): model.util.separation_objectives.append(obj) elif c.lower is not None: # This is an >= constraint, not supported - raise ValueError("All inequality constraints in model must be in standard form (<= RHS)") + raise ValueError( + "All inequality constraints in model must be in standard form (<= RHS)" + ) model.util.map_obj_to_constr = map_obj_to_constr for obj in model.util.separation_objectives: @@ -100,17 +125,23 @@ def make_separation_problem(model_data, config): separation_model.del_component("coefficient_matching_constraints_index") uncertain_params = separation_model.util.uncertain_params - separation_model.util.uncertain_param_vars = param_vars = Var(range(len(uncertain_params))) + separation_model.util.uncertain_param_vars = param_vars = Var( + range(len(uncertain_params)) + ) map_new_constraint_list_to_original_con = ComponentMap() if config.objective_focus is ObjectiveType.worst_case: separation_model.util.zeta = Param(initialize=0, mutable=True) - constr = Constraint(expr= separation_model.first_stage_objective + separation_model.second_stage_objective - - separation_model.util.zeta <= 0) + constr = Constraint( + expr=separation_model.first_stage_objective + + separation_model.second_stage_objective + - separation_model.util.zeta + <= 0 + ) separation_model.add_component("epigraph_constr", constr) substitution_map = {} - #Separation problem initialized to nominal uncertain parameter values + # Separation problem initialized to nominal uncertain parameter values for idx, var in enumerate(list(param_vars.values())): param = uncertain_params[idx] var.set_value(param.value, skip_validation=True) @@ -122,28 +153,52 @@ def make_separation_problem(model_data, config): for c in separation_model.component_data_objects(Constraint): if any(v in uncertain_param_set for v in identify_mutable_parameters(c.expr)): if c.equality: - constraints.add( - replace_expressions(expr=c.lower, substitution_map=substitution_map) == - replace_expressions(expr=c.body, substitution_map=substitution_map)) + if c in separation_model.util.h_x_q_constraints: + # ensure that constraints subject to + # coefficient matching are not involved in + # separation problem. + # keeping them may induce numerical sensitivity + # issues, possibly leading to incorrect result + c.deactivate() + else: + constraints.add( + replace_expressions( + expr=c.lower, substitution_map=substitution_map + ) + == replace_expressions( + expr=c.body, substitution_map=substitution_map + ) + ) elif c.lower is not None: constraints.add( - replace_expressions(expr=c.lower, substitution_map=substitution_map) <= - replace_expressions(expr=c.body, substitution_map=substitution_map)) + replace_expressions(expr=c.lower, substitution_map=substitution_map) + <= replace_expressions( + expr=c.body, substitution_map=substitution_map + ) + ) elif c.upper is not None: constraints.add( - replace_expressions(expr=c.upper, substitution_map=substitution_map) >= - replace_expressions(expr=c.body, substitution_map=substitution_map)) + replace_expressions(expr=c.upper, substitution_map=substitution_map) + >= replace_expressions( + expr=c.body, substitution_map=substitution_map + ) + ) else: - raise ValueError("Unable to parse constraint for building the separation problem.") + raise ValueError( + "Unable to parse constraint for building the separation problem." + ) c.deactivate() map_new_constraint_list_to_original_con[ - constraints[constraints.index_set().last()]] = c + constraints[constraints.index_set().last()] + ] = c - separation_model.util.map_new_constraint_list_to_original_con = map_new_constraint_list_to_original_con + separation_model.util.map_new_constraint_list_to_original_con = ( + map_new_constraint_list_to_original_con + ) # === Add objectives first so that the uncertainty set # Constraints do not get picked up into the set - # of performance constraints which become objectives + # of performance constraints which become objectives make_separation_objective_functions(separation_model, config) add_uncertainty_set_constraints(separation_model, config) @@ -154,218 +209,512 @@ def make_separation_problem(model_data, config): return separation_model -def get_all_sep_objective_values(model_data, config): +def get_sep_objective_values(model_data, config, perf_cons): """ - Returns all violations from separation + Evaluate performance constraint functions at current + separation solution. + + Parameters + ---------- + model_data : SeparationProblemData + Separation problem data. + config : ConfigDict + PyROS solver settings. + perf_cons : list of Constraint + Performance constraints to be evaluated. + + Returns + ------- + violations : ComponentMap + Mapping from performance constraints to violation values. """ - list_of_violations_across_objectives = [] - for o in model_data.separation_model.util.separation_objectives: + con_to_obj_map = model_data.separation_model.util.map_obj_to_constr + violations = ComponentMap() + + for perf_con in perf_cons: + obj = con_to_obj_map[perf_con] try: - list_of_violations_across_objectives.append(value(o.expr)) - except: + violations[perf_con] = value(obj.expr) + except ValueError: for v in model_data.separation_model.util.first_stage_variables: config.progress_logger.info(v.name + " " + str(v.value)) for v in model_data.separation_model.util.second_stage_variables: config.progress_logger.info(v.name + " " + str(v.value)) raise ArithmeticError( - "Objective function " + str(o) + " led to a math domain error. " - "Does this objective (meaning, its parent performance constraint) " - "contain log(x) or 1/x functions or others with tricky domains?") - return list_of_violations_across_objectives - - -def get_index_of_max_violation(model_data, config, solve_data_list): - - is_discrete_scenarios = True if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS else False - matrix_dim=0 - indices_of_violating_realizations = [] - indices_of_violating_realizations_and_scenario = {} - if is_discrete_scenarios: - # There are num_scenarios by num_sep_objectives solutions to consider, take the worst-case per sep_objective - for idx, row in enumerate(solve_data_list): - if any(v.found_violation for v in row): - matrix_dim+=1 - if len([v for v in row if v.found_violation]) > 1: - max_val, violation_idx = max( - (val.list_of_scaled_violations[idx], the_index) for the_index, val in enumerate(row) - ) - else: - for elem in row: - if elem.found_violation: - violation_idx = row.index(elem) - indices_of_violating_realizations.append(idx) - indices_of_violating_realizations_and_scenario[idx] = violation_idx + f"Evaluation of performance constraint {perf_con.name} " + f"(separation objective {obj.name}) " + "led to a math domain error. " + "Does the performance constraint expression " + "contain log(x) or 1/x functions " + "or others with tricky domains?" + ) + + return violations + + +def get_argmax_sum_violations(solver_call_results_map, perf_cons_to_evaluate): + """ + Get key of entry of `solver_call_results_map` which contains + separation problem solution with maximal sum of performance + constraint violations over a specified sequence of performance + constraints. + + Parameters + ---------- + solver_call_results : ComponentMap + Mapping from performance constraints to corresponding + separation solver call results. + perf_cons_to_evaluate : list of Constraints + Performance constraints to consider for evaluating + maximal sum. + + Returns + ------- + worst_perf_con : None or Constraint + Performance constraint corresponding to solver call + results object containing solution with maximal sum + of violations across all performance constraints. + If ``found_violation`` attribute of all value entries of + `solver_call_results_map` is False, then `None` is + returned, as this means none of the performance constraints + were found to be violated. + """ + # get indices of performance constraints for which violation found + idx_to_perf_con_map = { + idx: perf_con for idx, perf_con in enumerate(solver_call_results_map) + } + idxs_of_violated_cons = [ + idx + for idx, perf_con in idx_to_perf_con_map.items() + if solver_call_results_map[perf_con].found_violation + ] + + num_violated_cons = len(idxs_of_violated_cons) + + if num_violated_cons == 0: + return None + + # assemble square matrix (2D array) of constraint violations. + # matrix size: number of constraints for which violation was found + # each row corresponds to a performance constraint + # each column corresponds to a separation problem solution + violations_arr = np.zeros(shape=(num_violated_cons, num_violated_cons)) + idxs_product = product( + enumerate(idxs_of_violated_cons), enumerate(idxs_of_violated_cons) + ) + for (row_idx, viol_con_idx), (col_idx, viol_param_idx) in idxs_product: + violations_arr[row_idx, col_idx] = max( + 0, + ( + # violation of this row's performance constraint + # by this column's separation solution + # if separation problems were solved globally, + # then diagonal entries should be the largest in each row + solver_call_results_map[ + idx_to_perf_con_map[viol_param_idx] + ].scaled_violations[idx_to_perf_con_map[viol_con_idx]] + ), + ) + + worst_col_idx = np.argmax(np.sum(violations_arr, axis=0)) + + return idx_to_perf_con_map[idxs_of_violated_cons[worst_col_idx]] + + +def solve_separation_problem(model_data, config): + """ + Solve PyROS separation problems. + + Parameters + ---------- + model_data : SeparationProblemData + Separation problem data. + config : ConfigDict + PyROS solver settings. + + Returns + ------- + pyros.solve_data.SeparationResults + Separation problem solve results. + """ + run_local = not config.bypass_local_separation + run_global = config.bypass_local_separation + + uncertainty_set_is_discrete = ( + config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS + ) + + if run_local: + local_separation_loop_results = perform_separation_loop( + model_data=model_data, config=config, solve_globally=False + ) + run_global = not ( + local_separation_loop_results.found_violation + or uncertainty_set_is_discrete + or local_separation_loop_results.subsolver_error + or local_separation_loop_results.time_out + or config.bypass_global_separation + ) else: - matrix_dim = len(list(result for solve_list in solve_data_list for result in solve_list if result.found_violation == True)) - idx_j = 0 - indices_of_violating_realizations.extend(i for i,x in enumerate(solve_data_list) if x[idx_j].found_violation==True) - - if matrix_dim == 0: - # no violating realizations - return None, None - - matrix_of_violations = np.zeros(shape=(matrix_dim, len(model_data.separation_model.util.performance_constraints))) - violation_dict = {} - if is_discrete_scenarios: - violation_dict = indices_of_violating_realizations_and_scenario + local_separation_loop_results = None + + if run_global: + global_separation_loop_results = perform_separation_loop( + model_data=model_data, config=config, solve_globally=True + ) else: - for k in indices_of_violating_realizations: - for l in range(len(solve_data_list[k])): - if solve_data_list[k][l].found_violation: - violation_dict[k] = l - for i in range(matrix_dim): - for j in range(len(model_data.separation_model.util.performance_constraints)): - if is_discrete_scenarios: - idx_max_violation_from_scenario = violation_dict[indices_of_violating_realizations[i]] - matrix_of_violations[i][j] = max( - solve_data_list[indices_of_violating_realizations[i]][idx_max_violation_from_scenario].list_of_scaled_violations[j], 0) - else: - matrix_of_violations[i][j] = max(solve_data_list[indices_of_violating_realizations[i]][0].list_of_scaled_violations[j], 0) + global_separation_loop_results = None - sums = [] - for i in range(matrix_of_violations.shape[1]): - sum = 0 - column = matrix_of_violations[:, i] - for j in range(len(column)): - sum += column[j] - sums.append(sum) - max_value = max(sums) - idx_i = sums.index(max_value) + return SeparationResults( + local_separation_loop_results=local_separation_loop_results, + global_separation_loop_results=global_separation_loop_results, + ) - if is_discrete_scenarios: - idx_j = violation_dict[idx_i] - return idx_i, idx_j +def evaluate_violations_by_nominal_master(model_data, performance_cons): + """ + Evaluate violation of performance constraints by + variables in nominal block of most recent master + problem. + Returns + ------- + nom_perf_con_violations : dict + Mapping from performance constraint names + to floats equal to violations by nominal master + problem variables. + """ + constraint_map_to_master = ( + model_data.separation_model.util.map_new_constraint_list_to_original_con + ) -def solve_separation_problem(model_data, config): + # get deterministic model constraints (include epigraph) + set_of_deterministic_constraints = ( + model_data.separation_model.util.deterministic_constraints + ) + if hasattr(model_data.separation_model, "epigraph_constr"): + set_of_deterministic_constraints.add( + model_data.separation_model.epigraph_constr + ) + nom_perf_con_violations = {} - # Timing variables - global_solve_time = 0 - local_solve_time = 0 + for perf_con in performance_cons: + if perf_con in set_of_deterministic_constraints: + nom_constraint = perf_con + else: + nom_constraint = constraint_map_to_master[perf_con] + nom_violation = value( + model_data.master_nominal_scenario.find_component(nom_constraint) + ) + nom_perf_con_violations[perf_con] = nom_violation - # List of objective functions - objectives_map = model_data.separation_model.util.map_obj_to_constr - constraint_map_to_master = model_data.separation_model.util.map_new_constraint_list_to_original_con + return nom_perf_con_violations + + +def group_performance_constraints_by_priority(model_data, config): + """ + Group model performance constraints by separation priority. + + Parameters + ---------- + model_data : SeparationProblemData + Separation problem data. + config : ConfigDict + User-specified PyROS solve options. - # Add additional or remaining separation objectives to the dict - # (those either not assigned an explicit priority or those added by Pyros for ssv bounds) + Returns + ------- + dict + Mapping from an int to a list of performance constraints + (Constraint objects), + for which the int is equal to the specified priority. + Keys are sorted in descending order + (i.e. highest priority first). + """ + separation_priority_groups = dict() config_sep_priority_dict = config.separation_priority_order - actual_sep_priority_dict = ComponentMap() for perf_con in model_data.separation_model.util.performance_constraints: - actual_sep_priority_dict[perf_con] = config_sep_priority_dict.get(perf_con.name, 0) + # by default, priority set to 0 + priority = config_sep_priority_dict.get(perf_con.name, 0) + cons_with_same_priority = separation_priority_groups.setdefault(priority, []) + cons_with_same_priority.append(perf_con) + + # sort separation priority groups + return { + priority: perf_cons + for priority, perf_cons in sorted( + separation_priority_groups.items(), reverse=True + ) + } - # "Bin" the objectives based on priorities - sorted_unique_priorities = sorted(list(set(actual_sep_priority_dict.values())), reverse=True) - set_of_deterministic_constraints = model_data.separation_model.util.deterministic_constraints - if hasattr(model_data.separation_model, "epigraph_constr"): - set_of_deterministic_constraints.add(model_data.separation_model.epigraph_constr) - # Determine whether to solve separation problems globally as well - if config.bypass_global_separation: - separation_cycle = [False] - elif config.bypass_local_separation: - separation_cycle = [True] +def get_worst_discrete_separation_solution( + performance_constraint, + model_data, + config, + perf_cons_to_evaluate, + discrete_solve_results, +): + """ + Determine separation solution (and therefore worst-case + uncertain parameter realization) with maximum violation + of specified performance constraint. + + Parameters + ---------- + performance_constraint : Constraint + Performance constraint of interest. + model_data : SeparationProblemData + Separation problem data. + config : ConfigDict + User-specified PyROS solver settings. + perf_cons_to_evaluate : list of Constraint + Performance constraints for which to report violations + by separation solution. + discrete_solve_results : DiscreteSeparationSolveCallResults + Separation problem solutions corresponding to the + uncertain parameter scenarios listed in + ``config.uncertainty_set.scenarios``. + + Returns + ------- + SeparationSolveCallResult + Solver call result for performance constraint of interest. + """ + # violation of specified performance constraint by separation + # problem solutions for all scenarios + violations_of_perf_con = [ + solve_call_res.scaled_violations[performance_constraint] + for solve_call_res in discrete_solve_results.solver_call_results.values() + ] + + list_of_scenario_idxs = list(discrete_solve_results.solver_call_results.keys()) + + # determine separation solution for which scaled violation of this + # performance constraint is the worst + worst_case_res = discrete_solve_results.solver_call_results[ + list_of_scenario_idxs[np.argmax(violations_of_perf_con)] + ] + worst_case_violation = np.max(violations_of_perf_con) + assert worst_case_violation in worst_case_res.scaled_violations.values() + + # evaluate violations for specified performance constraints + eval_perf_con_scaled_violations = ComponentMap( + (perf_con, worst_case_res.scaled_violations[perf_con]) + for perf_con in perf_cons_to_evaluate + ) + + # discrete separation solutions were obtained by optimizing + # just one performance constraint, as an efficiency. + # if the constraint passed to this routine is the same as the + # constraint used to obtain the solutions, then we bundle + # the separation solve call results into a single list. + # otherwise, we return an empty list, as we did not need to call + # subsolvers for the other performance constraints + is_optimized_performance_con = ( + performance_constraint is discrete_solve_results.performance_constraint + ) + if is_optimized_performance_con: + results_list = [ + res + for solve_call_results in discrete_solve_results.solver_call_results.values() + for res in solve_call_results.results_list + ] else: - separation_cycle = [False, True] - for is_global in separation_cycle: - solver = config.global_solver if is_global else config.local_solver - solve_data_list = [] - - for val in sorted_unique_priorities: - # Descending ordered by value - # The list of performance constraints with this priority - perf_constraints = [constr_name for constr_name, priority in actual_sep_priority_dict.items() if priority == val] - for perf_con in perf_constraints: - #config.progress_logger.info("Separating constraint " + str(perf_con)) - try: - separation_obj = objectives_map[perf_con] - except: - raise ValueError("Error in mapping separation objective to its master constraint form.") - separation_obj.activate() - - if perf_con in set_of_deterministic_constraints: - nom_constraint = perf_con - else: - nom_constraint = constraint_map_to_master[perf_con] - - try: - model_data.master_nominal_scenario_value = value(model_data.master_nominal_scenario.find_component(nom_constraint)) - except: - raise ValueError("Unable to access nominal scenario value for the constraint " + str(nom_constraint)) - - if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS: - solve_data_list.append(discrete_solve(model_data=model_data, config=config, - solver=solver, is_global=is_global)) - if all(s.termination_condition in globally_acceptable for - sep_soln_list in solve_data_list for s in sep_soln_list) or \ - (is_global == False and all(s.termination_condition in locally_acceptable for - sep_soln_list in solve_data_list for s in sep_soln_list)): - exit_separation_loop = False - else: - exit_separation_loop = True - else: - solve_data = SeparationResult() - exit_separation_loop = solver_call_separation(model_data=model_data, - config=config, - solver=solver, - solve_data=solve_data, - is_global=is_global) - solve_data_list.append([solve_data]) - - # === Keep track of total solve times - if is_global: - if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS: - for sublist in solve_data_list: - for s in sublist: - global_solve_time += get_time_from_solver(s.results) - else: - global_solve_time += get_time_from_solver(solve_data.results) - else: - if config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS: - for sublist in solve_data_list: - for s in sublist: - local_solve_time += get_time_from_solver(s.results) - else: - local_solve_time += get_time_from_solver(solve_data.results) - - # === Terminate for timing - if exit_separation_loop: - return solve_data_list, [], [], is_global, local_solve_time, global_solve_time - separation_obj.deactivate() + results_list = [] + + return SeparationSolveCallResults( + solved_globally=worst_case_res.solved_globally, + results_list=results_list, + scaled_violations=eval_perf_con_scaled_violations, + violating_param_realization=worst_case_res.violating_param_realization, + variable_values=worst_case_res.variable_values, + found_violation=(worst_case_violation > config.robust_feasibility_tolerance), + time_out=False, + subsolver_error=False, + discrete_set_scenario_index=worst_case_res.discrete_set_scenario_index, + ) - # Do we return? - # If there are multiple violations in this bucket, pick the worst-case - idx_i, idx_j = get_index_of_max_violation( - model_data=model_data, - config=config, - solve_data_list=solve_data_list, + +def perform_separation_loop(model_data, config, solve_globally): + """ + Loop through, and solve, PyROS separation problems to + desired optimality condition. + + Parameters + ---------- + model_data : SeparationProblemData + Separation problem data. + config : ConfigDict + PyROS solver settings. + solve_globally : bool + True to solve separation problems globally, + False to solve separation problems locally. + + Returns + ------- + pyros.solve_data.SeparationLoopResults + Separation problem solve results. + """ + all_performance_constraints = ( + model_data.separation_model.util.performance_constraints + ) + if not all_performance_constraints: + # robustness certified: no separation problems to solve + return SeparationLoopResults( + solver_call_results=ComponentMap(), + solved_globally=solve_globally, + worst_case_perf_con=None, ) - if (idx_i, idx_j) != (None, None): - violating_realizations = [v for v in solve_data_list[idx_i][idx_j].violating_param_realization] - violations = solve_data_list[idx_i][idx_j].list_of_scaled_violations - else: - violating_realizations = [] - violations = [] + # needed for normalizing separation solution constraint violations + model_data.nom_perf_con_violations = evaluate_violations_by_nominal_master( + model_data=model_data, performance_cons=all_performance_constraints + ) + sorted_priority_groups = group_performance_constraints_by_priority( + model_data, config + ) + uncertainty_set_is_discrete = ( + config.uncertainty_set.geometry == Geometry.DISCRETE_SCENARIOS + ) + + if uncertainty_set_is_discrete: + all_scenarios_exhausted = len(model_data.idxs_of_master_scenarios) == len( + config.uncertainty_set.scenarios + ) + if all_scenarios_exhausted: + # robustness certified: entire uncertainty set already + # accounted for in master + return SeparationLoopResults( + solver_call_results=ComponentMap(), + solved_globally=solve_globally, + worst_case_perf_con=None, + ) - if any(s.found_violation for solve_list in solve_data_list for s in solve_list): - #config.progress_logger.info( - # "Violation found in constraint %s with realization %s" % ( - # list(objectives_map.keys())[idx_i], violating_realizations)) - return solve_data_list, violating_realizations, violations, is_global, local_solve_time, global_solve_time + perf_con_to_maximize = sorted_priority_groups[ + max(sorted_priority_groups.keys()) + ][0] - return solve_data_list, [], [], is_global, local_solve_time, global_solve_time + # efficiency: evaluate all separation problem solutions in + # advance of entering loop + discrete_sep_results = discrete_solve( + model_data=model_data, + config=config, + solve_globally=solve_globally, + perf_con_to_maximize=perf_con_to_maximize, + perf_cons_to_evaluate=all_performance_constraints, + ) + termination_not_ok = ( + discrete_sep_results.time_out or discrete_sep_results.subsolver_error + ) + if termination_not_ok: + single_solver_call_res = ComponentMap() + results_list = [ + res + for solve_call_results in discrete_sep_results.solver_call_results.values() + for res in solve_call_results.results_list + ] + single_solver_call_res[perf_con_to_maximize] = ( + # not the neatest assembly, + # but should maintain accuracy of total solve times + # and overall outcome + SeparationSolveCallResults( + solved_globally=solve_globally, + results_list=results_list, + time_out=discrete_sep_results.time_out, + subsolver_error=discrete_sep_results.subsolver_error, + ) + ) + return SeparationLoopResults( + solver_call_results=single_solver_call_res, + solved_globally=solve_globally, + worst_case_perf_con=None, + ) -def get_absolute_tol(model_data, config): - nom_value = model_data.master_nominal_scenario_value - denom = float(max(1, abs(nom_value))) - tol = config.robust_feasibility_tolerance - return denom * tol, nom_value + all_solve_call_results = ComponentMap() + for priority, perf_constraints in sorted_priority_groups.items(): + priority_group_solve_call_results = ComponentMap() + for perf_con in perf_constraints: + # config.progress_logger.info( + # f"Separating constraint {perf_con.name}" + # ) + + # solve separation problem for this performance constraint + if uncertainty_set_is_discrete: + solve_call_results = get_worst_discrete_separation_solution( + performance_constraint=perf_con, + model_data=model_data, + config=config, + perf_cons_to_evaluate=all_performance_constraints, + discrete_solve_results=discrete_sep_results, + ) + else: + solve_call_results = solver_call_separation( + model_data=model_data, + config=config, + solve_globally=solve_globally, + perf_con_to_maximize=perf_con, + perf_cons_to_evaluate=all_performance_constraints, + ) + + priority_group_solve_call_results[perf_con] = solve_call_results + + termination_not_ok = ( + solve_call_results.time_out or solve_call_results.subsolver_error + ) + if termination_not_ok: + all_solve_call_results.update(priority_group_solve_call_results) + return SeparationLoopResults( + solver_call_results=all_solve_call_results, + solved_globally=solve_globally, + worst_case_perf_con=None, + ) + + all_solve_call_results.update(priority_group_solve_call_results) + + # there may be multiple separation problem solutions + # found to have violated a performance constraint. + # we choose just one for master problem of next iteration + worst_case_perf_con = get_argmax_sum_violations( + solver_call_results_map=all_solve_call_results, + perf_cons_to_evaluate=perf_constraints, + ) + if worst_case_perf_con is not None: + # take note of chosen separation solution + worst_case_res = all_solve_call_results[worst_case_perf_con] + if uncertainty_set_is_discrete: + model_data.idxs_of_master_scenarios.append( + worst_case_res.discrete_set_scenario_index + ) + + # # auxiliary log messages + # objectives_map = ( + # model_data.separation_model.util.map_obj_to_constr + # ) + # violated_con_name = list(objectives_map.keys())[ + # worst_case_perf_con + # ] + # config.progress_logger.info( + # f"Violation found for constraint {violated_con_name} " + # "under realization " + # f"{worst_case_res.violating_param_realization}" + # ) + + # violating separation problem solution now chosen. + # exit loop + break + + return SeparationLoopResults( + solver_call_results=all_solve_call_results, + solved_globally=solve_globally, + worst_case_perf_con=worst_case_perf_con, + ) -def update_solve_data_violations(model_data, config, solve_data): +def evaluate_performance_constraint_violations( + model_data, config, perf_con_to_maximize, perf_cons_to_evaluate +): """ Evaluate the inequality constraint function violations of the current separation model solution, and store the @@ -380,39 +729,61 @@ def update_solve_data_violations(model_data, config, solve_data): Object containing the separation model. config : ConfigDict PyROS solver settings. - solve_data : SeparationResult - Result for most recent separation problem. + perf_cons_to_evaluate : list of Constraint + Performance constraints whose expressions are to + be evaluated at the current separation problem + solution. + Exactly one of these constraints should be mapped + to an active Objective in the separation model. Returns ------- - : bool - True if constraint is violated, False otherwise. + violating_param_realization : list of float + Uncertain parameter realization corresponding to maximum + constraint violation. + scaled_violations : ComponentMap + Mapping from performance constraints to be evaluated + to their violations by the separation problem solution. + constraint_violated : bool + True if performance constraint mapped to active + separation model Objective is violated (beyond tolerance), + False otherwise + + Raises + ------ + ValueError + If `perf_cons_to_evaluate` does not contain exactly + 1 entry which can be mapped to an active Objective + of ``model_data.separation_model``. """ - - nom_value = model_data.master_nominal_scenario_value - denom = float(max(1, abs(nom_value))) - tol = config.robust_feasibility_tolerance - active_objective = next( - model_data.separation_model.component_data_objects( - Objective, - active=True - ) + # parameter realization for current separation problem solution + violating_param_realization = list( + param.value + for param in model_data.separation_model.util.uncertain_param_vars.values() ) - # update solve data attributes - solve_data.violating_param_realization = list( - p.value for p in - model_data.separation_model.util.uncertain_param_vars.values() + # evaluate violations for all performance constraints provided + violations_by_sep_solution = get_sep_objective_values( + model_data=model_data, config=config, perf_cons=perf_cons_to_evaluate ) - list_of_violations = get_all_sep_objective_values( - model_data=model_data, - config=config, + + # normalize constraint violation: i.e. divide by + # absolute value of constraint expression evaluated at + # nominal master solution (if expression value is large enough) + scaled_violations = ComponentMap() + for perf_con, sep_sol_violation in violations_by_sep_solution.items(): + scaled_violation = sep_sol_violation / max( + 1, abs(model_data.nom_perf_con_violations[perf_con]) + ) + scaled_violations[perf_con] = scaled_violation + if perf_con is perf_con_to_maximize: + scaled_active_obj_violation = scaled_violation + + constraint_violated = ( + scaled_active_obj_violation > config.robust_feasibility_tolerance ) - solve_data.list_of_scaled_violations = [ - l/denom for l in list_of_violations - ] - return value(active_objective) / denom > tol + return (violating_param_realization, scaled_violations, constraint_violated) def initialize_separation(model_data, config): @@ -467,8 +838,7 @@ def get_parent_master_blk(var): # may be the nominal block) parent_master_blk = get_parent_master_blk(master_var) sep_var_name = master_var.getname( - relative_to=parent_master_blk, - fully_qualified=True, + relative_to=parent_master_blk, fully_qualified=True ) # initialize separation problem var to value from master block @@ -508,12 +878,8 @@ def get_parent_master_blk(var): # check: initial point feasible? for con in sep_model.component_data_objects(Constraint, active=True): lb, val, ub = value(con.lb), value(con.body), value(con.ub) - lb_viol = ( - val < lb - ABS_CON_CHECK_FEAS_TOL if lb is not None else False - ) - ub_viol = ( - val > ub + ABS_CON_CHECK_FEAS_TOL if ub is not None else False - ) + lb_viol = val < lb - ABS_CON_CHECK_FEAS_TOL if lb is not None else False + ub_viol = val > ub + ABS_CON_CHECK_FEAS_TOL if ub is not None else False if lb_viol or ub_viol: config.progress_logger.debug(con.name, lb, val, ub) @@ -522,7 +888,9 @@ def get_parent_master_blk(var): globally_acceptable = {tc.optimal, tc.globallyOptimal} -def solver_call_separation(model_data, config, solver, solve_data, is_global): +def solver_call_separation( + model_data, config, solve_globally, perf_con_to_maximize, perf_cons_to_evaluate +): """ Invoke subordinate solver(s) on separation problem. @@ -532,34 +900,53 @@ def solver_call_separation(model_data, config, solver, solve_data, is_global): Separation problem data. config : ConfigDict PyROS solver settings. - solver : solver type - Primary subordinate optimizer with which to solve - the model. - solve_data : SeparationResult - Container for separation problem result. - is_global : bool - Is separation problem to be solved globally. + solve_globally : bool + True to solve separation problems globally, + False to solve locally. + perf_con_to_maximize : Constraint + Performance constraint for which to solve separation problem. + Informs the objective (constraint violation) to maximize. + perf_cons_to_evaluate : list of Constraint + Performance constraints whose expressions are to be + evaluated at the separation problem solution + obtained. Returns ------- - : bool - True if separation problem was not solved to an appropriate - optimality status by any of the solvers available or the - PyROS elapsed time limit is exceeded, False otherwise. + solve_call_results : pyros.solve_data.SeparationSolveCallResults + Solve results for separation problem of interest. """ - if is_global: - backup_solvers = deepcopy(config.backup_global_solvers) + # objective corresponding to specified performance constraint + objectives_map = model_data.separation_model.util.map_obj_to_constr + separation_obj = objectives_map[perf_con_to_maximize] + + if solve_globally: + solvers = [config.global_solver] + config.backup_global_solvers else: - backup_solvers = deepcopy(config.backup_local_solvers) - backup_solvers.insert(0, solver) + solvers = [config.local_solver] + config.backup_local_solvers + # keep track of solver statuses for output logging solver_status_dict = {} nlp_model = model_data.separation_model # === Initialize separation problem; fix first-stage variables initialize_separation(model_data, config) - for opt in backup_solvers: + separation_obj.activate() + + solve_call_results = SeparationSolveCallResults( + solved_globally=solve_globally, + time_out=False, + results_list=[], + found_violation=False, + subsolver_error=False, + ) + timer = TicTocTimer() + for opt in solvers: + orig_setting, custom_setting_present = adjust_solver_time_settings( + model_data.timing, opt, config + ) + timer.tic(msg=None) try: results = opt.solve( nlp_model, @@ -573,50 +960,68 @@ def solver_call_separation(model_data, config, solver, solve_data, is_global): # errors, etc.) config.progress_logger.error( f"Solver {repr(opt)} encountered exception attempting to " - f"optimize master problem in iteration {model_data.iteration}" + "optimize separation problem in iteration " + f"{model_data.iteration}" ) raise + else: + setattr(results.solver, TIC_TOC_SOLVE_TIME_ATTR, timer.toc(msg=None)) + finally: + revert_solver_max_time_adjustment( + opt, orig_setting, custom_setting_present, config + ) # record termination condition for this particular solver solver_status_dict[str(opt)] = results.solver.termination_condition - solve_data.termination_condition = results.solver.termination_condition - solve_data.results = results + solve_call_results.results_list.append(results) # has PyROS time limit been reached? elapsed = get_main_elapsed_time(model_data.timing) if config.time_limit: if elapsed >= config.time_limit: - solve_data.found_violation = False - return True + solve_call_results.time_out = True + separation_obj.deactivate() + return solve_call_results # if separation problem solved to optimality, record results # and exit acceptable_conditions = ( - globally_acceptable if is_global else locally_acceptable + globally_acceptable if solve_globally else locally_acceptable ) - optimal_termination = ( - solve_data.termination_condition in acceptable_conditions + optimal_termination = solve_call_results.termination_acceptable( + acceptable_conditions ) if optimal_termination: nlp_model.solutions.load_from(results) - solve_data.found_violation = update_solve_data_violations( - model_data, - config, - solve_data, + + # record second-stage and state variable values + solve_call_results.variable_values = ComponentMap() + for var in nlp_model.util.second_stage_variables: + solve_call_results.variable_values[var] = value(var) + for var in nlp_model.util.state_vars: + solve_call_results.variable_values[var] = value(var) + + # record uncertain parameter realization + # and constraint violations + ( + solve_call_results.violating_param_realization, + solve_call_results.scaled_violations, + solve_call_results.found_violation, + ) = evaluate_performance_constraint_violations( + model_data, config, perf_con_to_maximize, perf_cons_to_evaluate ) - return False - # problem not solved successfully, so no violation found - solve_data.found_violation = False + separation_obj.deactivate() + + return solve_call_results # All subordinate solvers failed to optimize model to appropriate # termination condition. PyROS will terminate with subsolver # error. At this point, export model if desired + solve_call_results.subsolver_error = True save_dir = config.subproblem_file_directory if save_dir and config.keepfiles: - objective = str( - list(nlp_model.component_data_objects(Objective, active=True))[0].name - ) + objective = separation_obj.name name = os.path.join( save_dir, ( @@ -630,7 +1035,7 @@ def solver_call_separation(model_data, config, solver, solve_data, is_global): + ".bar" ), ) - nlp_model.write(name, io_options={'symbolic_solver_labels':True}) + nlp_model.write(name, io_options={'symbolic_solver_labels': True}) output_logger( config=config, separation_error=True, @@ -639,46 +1044,102 @@ def solver_call_separation(model_data, config, solver, solve_data, is_global): objective=objective, status_dict=solver_status_dict, ) - return True + + separation_obj.deactivate() + + return solve_call_results -def discrete_solve(model_data, config, solver, is_global): +def discrete_solve( + model_data, config, solve_globally, perf_con_to_maximize, perf_cons_to_evaluate +): """ - Loops over discrete scenarios, solving square problem to determine constraint violation in separation objective. + Obtain separation problem solution for each scenario + of the uncertainty set not already added to the most + recent master problem. + + Parameters + ---------- + model_data : SeparationProblemData + Separation problem data. + config : ConfigDict + PyROS solver settings. + solver : solver type + Primary subordinate optimizer with which to solve + the model. + solve_globally : bool + Is separation problem to be solved globally. + perf_con_to_maximize : Constraint + Performance constraint for which to solve separation + problem. + perf_cons_to_evaluate : list of Constraint + Performance constraints whose expressions are to be + evaluated at the each of separation problem solutions + obtained. + + Returns + ------- + discrete_separation_results : DiscreteSeparationSolveCallResults + Separation solver call results on performance constraint + of interest for every scenario considered. + + Notes + ----- + Since we assume that models passed to PyROS are such that the DOF + variables and uncertain parameter values uniquely define the state + variables, this method need be only be invoked once per separation + loop. Subject to our assumption, the choice of objective + (``perf_con_to_maximize``) should not affect the solutions returned + beyond subsolver tolerances. For other performance constraints, the + optimal separation problem solution can then be evaluated by simple + enumeration of the solutions returned by this function, since for + discrete uncertainty sets, the number of feasible separation + solutions is, under our assumption, merely equal to the number + of scenarios in the uncertainty set. """ - # Constraint are grouped by dim(uncertain_param) groups for each scenario in D - solve_data_list = [] - # === Remove (skip over) already accounted for violations - chunk_size = len(model_data.separation_model.util.uncertain_param_vars) - conlist = model_data.separation_model.util.uncertainty_set_constraint - _constraints = list(conlist.values()) - constraints_to_skip = ComponentSet() - conlist.deactivate() - - for pnt in model_data.points_added_to_master: - _idx = config.uncertainty_set.scenarios.index(tuple(pnt)) - skip_index_list = list(range(chunk_size * _idx, chunk_size * _idx + chunk_size)) - for _index in range(len(_constraints)): - if _index in skip_index_list: - constraints_to_skip.add(_constraints[_index]) - constraints = list(c for c in _constraints if c not in constraints_to_skip) - - for i in range(0, len(constraints), chunk_size): - chunk = list(constraints[i:i + chunk_size]) - for idx, con in enumerate(chunk): - con.activate() - model_data.separation_model.util.uncertain_param_vars[idx].fix(con.lower) - con.deactivate() - solve_data = SeparationResult() - solver_call_separation(model_data=model_data, - config=config, - solver=solver, - solve_data=solve_data, - is_global=is_global) - solve_data_list.append(solve_data) - for con in chunk: - con.deactivate() - - return solve_data_list + # Ensure uncertainty set constraints deactivated + model_data.separation_model.util.uncertainty_set_constraint.deactivate() + uncertain_param_vars = list( + model_data.separation_model.util.uncertain_param_vars.values() + ) + # skip scenarios already added to most recent master problem + master_scenario_idxs = model_data.idxs_of_master_scenarios + scenario_idxs_to_separate = [ + idx + for idx, _ in enumerate(config.uncertainty_set.scenarios) + if idx not in master_scenario_idxs + ] + + solve_call_results_dict = {} + for scenario_idx in scenario_idxs_to_separate: + # fix uncertain parameters to scenario value + # hence, no need to activate uncertainty set constraints + scenario = config.uncertainty_set.scenarios[scenario_idx] + for param, coord_val in zip(uncertain_param_vars, scenario): + param.fix(coord_val) + + # obtain separation problem solution + solve_call_results = solver_call_separation( + model_data=model_data, + config=config, + solve_globally=solve_globally, + perf_con_to_maximize=perf_con_to_maximize, + perf_cons_to_evaluate=perf_cons_to_evaluate, + ) + solve_call_results.discrete_set_scenario_index = scenario_idx + solve_call_results_dict[scenario_idx] = solve_call_results + + # halt at first encounter of unacceptable termination + termination_not_ok = ( + solve_call_results.subsolver_error or solve_call_results.time_out + ) + if termination_not_ok: + break + + return DiscreteSeparationSolveCallResults( + solved_globally=solve_globally, + solver_call_results=solve_call_results_dict, + performance_constraint=perf_con_to_maximize, + ) diff --git a/pyomo/contrib/pyros/solve_data.py b/pyomo/contrib/pyros/solve_data.py index c8d351f280e..63e7fdd7ebd 100644 --- a/pyomo/contrib/pyros/solve_data.py +++ b/pyomo/contrib/pyros/solve_data.py @@ -1,9 +1,10 @@ -''' +""" Objects to contain all model data and solve results for the ROSolver -''' +""" + class ROSolveResults(object): - ''' + """ Container for solve-instance data returned to the user after solving with PyROS. Attributes: @@ -12,21 +13,24 @@ class ROSolveResults(object): :time: Total solver CPU time :iterations: total iterations done by PyROS solver :final_objective_value: objective function value at termination - ''' + """ + pass + class MasterProblemData(object): - ''' + """ Container for the grcs master problem Attributes: :master_model: master problem model object :base_model: block representing the original model object :iteration: current iteration of the algorithm - ''' + """ + class SeparationProblemData(object): - ''' + """ Container for the grcs separation problem Attributes: @@ -35,38 +39,668 @@ class SeparationProblemData(object): :separation_problem_subsolver_statuses: list of subordinate sub-solver statuses throughout separations :total_global_separation_solvers: Counter for number of times global solvers were employed in separation :constraint_violations: List of constraint violations identified in separation - ''' + """ + pass + class MasterResult(object): """Data class for master problem results data. - Attributes: - - termination_condition: Solver termination condition - - fsv_values: list of design variable values - - ssv_values: list of control variable values - - first_stage_objective: objective contribution due to first-stage degrees of freedom - - second_stage_objective: objective contribution due to second-stage degrees of freedom - - grcs_termination_condition: the conditions under which the grcs terminated - (max_iter, robust_optimal, error) - - pyomo_results: results object from solve() statement + Attributes: + - termination_condition: Solver termination condition + - fsv_values: list of design variable values + - ssv_values: list of control variable values + - first_stage_objective: objective contribution due to first-stage degrees of freedom + - second_stage_objective: objective contribution due to second-stage degrees of freedom + - grcs_termination_condition: the conditions under which the grcs terminated + (max_iter, robust_optimal, error) + - pyomo_results: results object from solve() statement """ -class SeparationResult(object): - """Data class for master problem results data. - Attributes: - - termination_condition: Solver termination condition - - violation_found: True if a violating parameter realization was identified in separation. For a given - separation objective function, it is considered a violation only if the parameter realization led to a - violation of the corresponding ineq. constraint used to define that objective - - is_global: True if separation problem differed to global solver, False if local solver - - separation_model: Pyomo model for separation problem at optimal solution - - control_var_values: list of control variable values - - violating_param_realization: list for the values of the uncertain_params identified as a violation - - list_of_violations: value of constraints violation for each ineq. constraint considered - in separation against the violation in violating_param_realizations - - pyomo_results: results object from solve() statement - - """ \ No newline at end of file +class SeparationSolveCallResults: + """ + Container for results of solve attempt for single separation + problem. + + Parameters + ---------- + solved_globally : bool + True if separation problem was solved globally, + False otherwise. + results_list : list of pyomo.opt.results.SolverResults, optional + Pyomo solver results for each subordinate optimizer invoked on + the separation problem. + For problems with non-discrete uncertainty set types, + each entry corresponds to a single subordinate solver. + For problems with discrete set types, the list may + be empty (didn't need to use a subordinate solver to + evaluate optimal separation solution), or the number + of entries may be as high as the product of the number of + subordinate local/global solvers provided (including backup) + and the number of scenarios in the uncertainty set. + scaled_violations : ComponentMap, optional + Mapping from performance constraints to floats equal + to their scaled violations by separation problem solution + stored in this result. + violating_param_realization : list of float, optional + Uncertain parameter realization for reported separation + problem solution. + variable_values : ComponentMap, optional + Second-stage DOF and state variable values for reported + separation problem solution. + found_violation : bool, optional + True if violation of performance constraint (i.e. constraint + expression value) by reported separation solution was found to + exceed tolerance, False otherwise. + time_out : bool, optional + True if PyROS time limit reached attempting to solve the + separation problem, False otherwise. + subsolver_error : bool, optional + True if subsolvers found to be unable to solve separation + problem of interest, False otherwise. + discrete_set_scenario_index : None or int, optional + If discrete set used to solve the problem, index of + `violating_param_realization` as listed in the + `scenarios` attribute of a ``DiscreteScenarioSet`` + instance. If discrete set not used, pass None. + + Attributes + ---------- + solved_globally + results_list + scaled_violations + violating_param_realizations + variable_values + found_violation + time_out + subsolver_error + discrete_set_scenario_index + """ + + def __init__( + self, + solved_globally, + results_list=None, + scaled_violations=None, + violating_param_realization=None, + variable_values=None, + found_violation=None, + time_out=None, + subsolver_error=None, + discrete_set_scenario_index=None, + ): + """Initialize self (see class docstring).""" + self.results_list = results_list + self.solved_globally = solved_globally + self.scaled_violations = scaled_violations + self.violating_param_realization = violating_param_realization + self.variable_values = variable_values + self.found_violation = found_violation + self.time_out = time_out + self.subsolver_error = subsolver_error + self.discrete_set_scenario_index = discrete_set_scenario_index + + def termination_acceptable(self, acceptable_terminations): + """ + Return True if termination condition for at least + one result in `self.results_list` is in list + of pre-specified acceptable terminations, False otherwise. + + Parameters + ---------- + acceptable_terminations : set of pyomo.opt.TerminationCondition + Acceptable termination conditions. + + Returns + ------- + bool + """ + return any( + res.solver.termination_condition in acceptable_terminations + for res in self.results_list + ) + + def evaluate_total_solve_time(self, evaluator_func, **evaluator_func_kwargs): + """ + Evaluate total time required by subordinate solvers + for separation problem of interest, according to Pyomo + ``SolverResults`` objects stored in ``self.results_list``. + + Parameters + ---------- + evaluator_func : callable + Solve time evaluator function. + This callable should accept an object of type + ``pyomo.opt.results.SolverResults``, and + return a float equal to the time required. + **evaluator_func_kwargs : dict, optional + Keyword arguments to evaluator function. + + Returns + ------- + float + Total time spent by solvers. + """ + return sum( + evaluator_func(res, **evaluator_func_kwargs) for res in self.results_list + ) + + +class DiscreteSeparationSolveCallResults: + """ + Container for results of solve attempt for single separation + problem. + + Parameters + ---------- + solved_globally : bool + True if separation problems solved to global optimality, + False otherwise. + solver_call_results : dict + Mapping from discrete uncertainty set scenario list + indexes to solver call results for separation problems + subject to the scenarios. + performance_constraint : Constraint + Separation problem performance constraint for which + `self` was generated. + + Attributes + ---------- + solved_globally + scenario_indexes + solver_call_results + performance_constraint + time_out + subsolver_error + """ + + def __init__( + self, solved_globally, solver_call_results=None, performance_constraint=None + ): + """Initialize self (see class docstring).""" + self.solved_globally = solved_globally + self.solver_call_results = solver_call_results + self.performance_constraint = performance_constraint + + @property + def time_out(self): + """ + bool : True if there is a time out status for at least one of + the ``SeparationSolveCallResults`` objects listed in `self`, + False otherwise. + """ + return any(res.time_out for res in self.solver_call_results.values()) + + @property + def subsolver_error(self): + """ + bool : True if there is a subsolver error status for at least + one of the the ``SeparationSolveCallResults`` objects listed + in `self`, False otherwise. + """ + return any(res.subsolver_error for res in self.solver_call_results.values()) + + def evaluate_total_solve_time(self, evaluator_func, **evaluator_func_kwargs): + """ + Evaluate total time required by subordinate solvers + for separation problem of interest. + + Parameters + ---------- + evaluator_func : callable + Solve time evaluator function. + This callable should accept an object of type + ``pyomo.opt.results.SolveResults``, and + return a float equal to the time required. + **evaluator_func_kwargs : dict, optional + Keyword arguments to evaluator function. + + Returns + ------- + float + Total time spent by solvers. + """ + return sum( + solver_call_res.evaluate_total_solve_time(evaluator_func) + for solver_call_res in self.solver_call_results.values() + ) + + +class SeparationLoopResults: + """ + Container for results of all separation problems solved + to a single desired optimality target (local or global). + + Parameters + ---------- + solved_globally : bool + True if separation problems were solved to global optimality, + False otherwise. + solver_call_results : ComponentMap + Mapping from performance constraints to corresponding + ``SeparationSolveCallResults`` objects. + worst_case_perf_con : None or int, optional + Performance constraint mapped to ``SeparationSolveCallResults`` + object in `self` corresponding to maximally violating + separation problem solution. + + Attributes + ---------- + solver_call_results + solved_globally + worst_case_perf_con + found_violation + violating_param_realization + scaled_violations + violating_separation_variable_values + subsolver_error + time_out + """ + + def __init__(self, solved_globally, solver_call_results, worst_case_perf_con): + """Initialize self (see class docstring).""" + self.solver_call_results = solver_call_results + self.solved_globally = solved_globally + self.worst_case_perf_con = worst_case_perf_con + + @property + def found_violation(self): + """ + bool : True if separation solution for at least one + ``SeparationSolveCallResults`` object listed in self + was reported to violate its corresponding performance + constraint, False otherwise. + """ + return any( + solver_call_res.found_violation + for solver_call_res in self.solver_call_results.values() + ) + + @property + def violating_param_realization(self): + """ + None or list of float : Uncertain parameter values for + for maximally violating separation problem solution, + specified according to solver call results object + listed in self at index ``self.worst_case_perf_con``. + If ``self.worst_case_perf_con`` is not specified, + then None is returned. + """ + if self.worst_case_perf_con is not None: + return self.solver_call_results[ + self.worst_case_perf_con + ].violating_param_realization + else: + return None + + @property + def scaled_violations(self): + """ + None or ComponentMap : Scaled performance constraint violations + for maximally violating separation problem solution, + specified according to solver call results object + listed in self at index ``self.worst_case_perf_con``. + If ``self.worst_case_perf_con`` is not specified, + then None is returned. + """ + if self.worst_case_perf_con is not None: + return self.solver_call_results[self.worst_case_perf_con].scaled_violations + else: + return None + + @property + def violating_separation_variable_values(self): + """ + None or ComponentMap : Second-stage and state variable values + for maximally violating separation problem solution, + specified according to solver call results object + listed in self at index ``self.worst_case_perf_con``. + If ``self.worst_case_perf_con`` is not specified, + then None is returned. + """ + if self.worst_case_perf_con is not None: + return self.solver_call_results[self.worst_case_perf_con].variable_values + else: + return None + + @property + def violated_performance_constraints(self): + """ + list of Constraint : Performance constraints for which violation + found. + """ + return [ + con + for con, solver_call_results in self.solver_call_results.items() + if solver_call_results.found_violation + ] + + @property + def subsolver_error(self): + """ + bool : Return True if subsolver error reported for + at least one ``SeparationSolveCallResults`` stored in + `self`, False otherwise. + """ + return any( + solver_call_res.subsolver_error + for solver_call_res in self.solver_call_results.values() + ) + + @property + def time_out(self): + """ + bool : Return True if time out reported for + at least one ``SeparationSolveCallResults`` stored in + `self`, False otherwise. + """ + return any( + solver_call_res.time_out + for solver_call_res in self.solver_call_results.values() + ) + + def evaluate_total_solve_time(self, evaluator_func, **evaluator_func_kwargs): + """ + Evaluate total time required by subordinate solvers + for separation problem of interest. + + Parameters + ---------- + evaluator_func : callable + Solve time evaluator function. + This callable should accept an object of type + ``pyomo.opt.results.SolveResults``, and + return a float equal to the time required. + **evaluator_func_kwargs : dict, optional + Keyword arguments to evaluator function. + + Returns + ------- + float + Total time spent by solvers. + """ + return sum( + res.evaluate_total_solve_time(evaluator_func) + for res in self.solver_call_results.values() + ) + + +class SeparationResults: + """ + Container for results of PyROS separation problem routine. + + Parameters + ---------- + local_separation_loop_results : None or SeparationLoopResults + Local separation problem loop results. + global_separation_loop_results : None or SeparationLoopResults + Global separation problem loop results. + + Attributes + ---------- + local_separation_loop_results + global_separation_loop_results + subsolver_error + time_out + solved_locally + solved_globally + found_violation + violating_param_realization + scaled_violations + violating_separation_variable_values + robustness_certified + """ + + def __init__(self, local_separation_loop_results, global_separation_loop_results): + """Initialize self (see class docstring).""" + self.local_separation_loop_results = local_separation_loop_results + self.global_separation_loop_results = global_separation_loop_results + + @property + def time_out(self): + """ + Return True if time out found for local or global + separation loop, False otherwise. + """ + local_time_out = ( + self.solved_locally and self.local_separation_loop_results.time_out + ) + global_time_out = ( + self.solved_globally and self.global_separation_loop_results.time_out + ) + return local_time_out or global_time_out + + @property + def subsolver_error(self): + """ + Return True if subsolver error found for local or global + separation loop, False otherwise. + """ + local_subsolver_error = ( + self.solved_locally and self.local_separation_loop_results.subsolver_error + ) + global_subsolver_error = ( + self.solved_globally and self.global_separation_loop_results.subsolver_error + ) + return local_subsolver_error or global_subsolver_error + + @property + def solved_locally(self): + """ + Return true if local separation loop was invoked, + False otherwise. + """ + return self.local_separation_loop_results is not None + + @property + def solved_globally(self): + """ + Return True if global separation loop was invoked, + False otherwise. + """ + return self.global_separation_loop_results is not None + + def get_violating_attr(self, attr_name): + """ + If local separation loop results specified, return + value of attribute of local separation loop results. + + Otherwise, if global separation loop results specified, + return value of attribute of global separation loop + results. + + Otherwise, return None. + + Parameters + ---------- + attr_name : str + Name of attribute to be retrieved. Should be + valid attribute name for object of type + ``SeparationLoopResults``. + + Returns + ------- + object + Attribute value. + """ + if self.solved_locally: + local_loop_val = getattr(self.local_separation_loop_results, attr_name) + else: + local_loop_val = None + + if local_loop_val is not None: + attr_val = local_loop_val + else: + if self.solved_globally: + attr_val = getattr(self.global_separation_loop_results, attr_name) + else: + attr_val = None + + return attr_val + + @property + def found_violation(self): + """ + bool: True if ``found_violation`` attribute for + local or global separation loop results found + to be True, False otherwise. + """ + found_viol = self.get_violating_attr("found_violation") + if found_viol is None: + found_viol = False + return found_viol + + @property + def violating_param_realization(self): + """ + None or list of float : Uncertain parameter values + for maximally violating separation problem solution + reported in local or global separation loop results. + If no such solution found, (i.e. ``worst_case_perf_con`` + set to None for both local and global loop results), + then None is returned. + """ + return self.get_violating_attr("violating_param_realization") + + @property + def scaled_violations(self): + """ + None or ComponentMap : Scaled performance constraint violations + for maximally violating separation problem solution + reported in local or global separation loop results. + If no such solution found, (i.e. ``worst_case_perf_con`` + set to None for both local and global loop results), + then None is returned. + """ + return self.get_violating_attr("scaled_violations") + + @property + def violating_separation_variable_values(self): + """ + None or ComponentMap : Second-stage and state variable values + for maximally violating separation problem solution + reported in local or global separation loop results. + If no such solution found, (i.e. ``worst_case_perf_con`` + set to None for both local and global loop results), + then None is returned. + """ + return self.get_violating_attr("violating_separation_variable_values") + + @property + def violated_performance_constraints(self): + """ + Return list of violated performance constraints. + """ + return self.get_violating_attr("violated_performance_constraints") + + def evaluate_local_solve_time(self, evaluator_func, **evaluator_func_kwargs): + """ + Evaluate total time required by local subordinate solvers + for separation problem of interest. + + Parameters + ---------- + evaluator_func : callable + Solve time evaluator function. + This callable should accept an object of type + ``pyomo.opt.results.SolverResults``, and + return a float equal to the time required. + **evaluator_func_kwargs : dict, optional + Keyword arguments to evaluator function. + + Returns + ------- + float + Total time spent by local solvers. + """ + if self.solved_locally: + return self.local_separation_loop_results.evaluate_total_solve_time( + evaluator_func, **evaluator_func_kwargs + ) + else: + return 0 + + def evaluate_global_solve_time(self, evaluator_func, **evaluator_func_kwargs): + """ + Evaluate total time required by global subordinate solvers + for separation problem of interest. + + Parameters + ---------- + evaluator_func : callable + Solve time evaluator function. + This callable should accept an object of type + ``pyomo.opt.results.SolverResults``, and + return a float equal to the time required. + **evaluator_func_kwargs : dict, optional + Keyword arguments to evaluator function. + + Returns + ------- + float + Total time spent by global solvers. + """ + if self.solved_globally: + return self.global_separation_loop_results.evaluate_total_solve_time( + evaluator_func, **evaluator_func_kwargs + ) + else: + return 0 + + @property + def robustness_certified(self): + """ + bool : Return True if separation results certify that + first-stage solution is robust, False otherwise. + """ + assert self.solved_locally or self.solved_globally + + if self.time_out or self.subsolver_error: + return False + + if self.solved_locally: + heuristically_robust = ( + not self.local_separation_loop_results.found_violation + ) + else: + heuristically_robust = None + + if self.solved_globally: + is_robust = not self.global_separation_loop_results.found_violation + else: + # global separation bypassed, either + # because uncertainty set is discrete + # or user opted to bypass global separation + is_robust = heuristically_robust + + return is_robust + + def generate_subsolver_results(self, include_local=True, include_global=True): + """ + Generate flattened sequence all Pyomo SolverResults objects + for all ``SeparationSolveCallResults`` objects listed in + the local and global ``SeparationLoopResults`` + attributes of `self`. + + Yields + ------ + pyomo.opt.SolverResults + """ + if include_local and self.local_separation_loop_results is not None: + all_local_call_results = ( + self.local_separation_loop_results.solver_call_results.values() + ) + for solve_call_res in all_local_call_results: + for res in solve_call_res.results_list: + yield res + + if include_global and self.global_separation_loop_results is not None: + all_global_call_results = ( + self.global_separation_loop_results.solver_call_results.values() + ) + for solve_call_res in all_global_call_results: + for res in solve_call_res.results_list: + yield res diff --git a/pyomo/contrib/pyros/tests/test_grcs.py b/pyomo/contrib/pyros/tests/test_grcs.py index 1f3165ad855..3214c987804 100644 --- a/pyomo/contrib/pyros/tests/test_grcs.py +++ b/pyomo/contrib/pyros/tests/test_grcs.py @@ -8,23 +8,57 @@ from pyomo.common.collections import ComponentSet from pyomo.common.config import ConfigBlock, ConfigValue from pyomo.core.base.set_types import NonNegativeIntegers -from pyomo.environ import * -from pyomo.core.expr.current import identify_variables, identify_mutable_parameters -from pyomo.contrib.pyros.util import selective_clone, add_decision_rule_variables, add_decision_rule_constraints, \ - model_is_valid, turn_bounds_to_constraints, transform_to_standard_form, ObjectiveType, pyrosTerminationCondition, \ - coefficient_matching +from pyomo.core.expr import identify_variables, identify_mutable_parameters +from pyomo.contrib.pyros.util import ( + selective_clone, + add_decision_rule_variables, + add_decision_rule_constraints, + model_is_valid, + turn_bounds_to_constraints, + transform_to_standard_form, + ObjectiveType, + pyrosTerminationCondition, + coefficient_matching, +) from pyomo.contrib.pyros.util import replace_uncertain_bounds_with_constraints from pyomo.contrib.pyros.util import get_vars_from_component from pyomo.contrib.pyros.util import identify_objective_functions -from pyomo.core.expr import current as EXPR +from pyomo.common.collections import Bunch +import time +from pyomo.contrib.pyros.util import time_code from pyomo.contrib.pyros.uncertainty_sets import * -from pyomo.contrib.pyros.master_problem_methods import add_scenario_to_master, initial_construct_master, solve_master, \ - minimize_dr_vars +from pyomo.contrib.pyros.master_problem_methods import ( + add_scenario_to_master, + initial_construct_master, + solve_master, + minimize_dr_vars, +) from pyomo.contrib.pyros.solve_data import MasterProblemData from pyomo.common.dependencies import numpy as np, numpy_available from pyomo.common.dependencies import scipy as sp, scipy_available from pyomo.environ import maximize as pyo_max from pyomo.common.errors import ApplicationError +from pyomo.opt import ( + SolverResults, + SolverStatus, + SolutionStatus, + TerminationCondition, + Solution, +) +from pyomo.environ import ( + Constraint, + Expression, + Objective, + Param, + SolverFactory, + Var, + cos, + exp, + log, + sin, + sqrt, + value, +) if not (numpy_available and scipy_available): @@ -36,6 +70,104 @@ global_solver_args = dict() nlp_solver_args = dict() +_baron = SolverFactory('baron') +baron_available = _baron.available(exception_flag=False) +if baron_available: + baron_license_is_valid = _baron.license_is_valid() + baron_version = _baron.version() +else: + baron_license_is_valid = False + baron_version = (0, 0, 0) + +_scip = SolverFactory('scip') +scip_available = _scip.available(exception_flag=False) +if scip_available: + scip_license_is_valid = _scip.license_is_valid() + scip_version = _scip.version() +else: + scip_license_is_valid = False + scip_version = (0, 0, 0) + + +# @SolverFactory.register("time_delay_solver") +class TimeDelaySolver(object): + """ + Solver which puts program to sleep for a specified + duration after having been invoked a specified number + of times. + """ + + def __init__(self, calls_to_sleep, max_time, sub_solver): + self.max_time = max_time + self.calls_to_sleep = calls_to_sleep + self.sub_solver = sub_solver + + self.num_calls = 0 + self.options = Bunch() + + def available(self): + return True + + def license_is_valid(self): + return True + + def __enter__(self): + return self + + def __exit__(self, et, ev, tb): + pass + + def solve(self, model, **kwargs): + """ + 'Solve' a model. + + Parameters + ---------- + model : ConcreteModel + Model of interest. + + Returns + ------- + results : SolverResults + Solver results. + """ + + # ensure only one active objective + active_objs = [ + obj for obj in model.component_data_objects(Objective, active=True) + ] + assert len(active_objs) == 1 + + if self.num_calls < self.calls_to_sleep: + # invoke subsolver + results = self.sub_solver.solve(model, **kwargs) + self.num_calls += 1 + else: + # trigger time delay + time.sleep(self.max_time) + results = SolverResults() + + # reset number of calls + self.num_calls = 0 + + # generate solution (current model variable values) + sol = Solution() + sol.variable = { + var.name: {"Value": value(var)} + for var in model.component_data_objects(Var, active=True) + } + sol._cuid = False + sol.status = SolutionStatus.stoppedByLimit + results.solution.insert(sol) + + # set up results.solver + results.solver.time = self.max_time + results.solver.termination_condition = TerminationCondition.maxTimeLimit + results.solver.status = SolverStatus.warning + + return results + + # === util.py class testSelectiveClone(unittest.TestCase): ''' @@ -53,7 +185,7 @@ def test_cloning_negative_case(self): m.x = Var(initialize=2) m.y = Var(initialize=2) m.p = Param(initialize=1) - m.con = Constraint(expr= m.x * m.p + m.y <= 0) + m.con = Constraint(expr=m.x * m.p + m.y <= 0) n = ConcreteModel() n.x = Var() @@ -61,8 +193,12 @@ def test_cloning_negative_case(self): cloned_model = selective_clone(block=m, first_stage_vars=m.first_stage_vars) - self.assertNotEqual(id(m.first_stage_vars), id(cloned_model.first_stage_vars), msg="First stage variables should" - "not be equal.") + self.assertNotEqual( + id(m.first_stage_vars), + id(cloned_model.first_stage_vars), + msg="First stage variables should not be equal.", + ) + def test_cloning_positive_case(self): ''' Testing if selective_clone works correctly for correct first_stage_var object definition. @@ -76,12 +212,23 @@ def test_cloning_positive_case(self): cloned_model = selective_clone(block=m, first_stage_vars=m.first_stage_vars) - self.assertEqual(id(m.x), id(cloned_model.x), - msg="First stage variables should" - "be equal.") - self.assertNotEqual(id(m.y), id(cloned_model.y), msg="Non-first-stage variables should not be equal.") - self.assertNotEqual(id(m.p), id(cloned_model.p), msg="Params should not be equal.") - self.assertNotEqual(id(m.con), id(cloned_model.con), msg="Constraint objects should not be equal.") + self.assertEqual( + id(m.x), id(cloned_model.x), msg="First stage variables should be equal." + ) + self.assertNotEqual( + id(m.y), + id(cloned_model.y), + msg="Non-first-stage variables should not be equal.", + ) + self.assertNotEqual( + id(m.p), id(cloned_model.p), msg="Params should not be equal." + ) + self.assertNotEqual( + id(m.con), + id(cloned_model.con), + msg="Constraint objects should not be equal.", + ) + class testAddDecisionRuleVars(unittest.TestCase): ''' @@ -114,9 +261,12 @@ def test_add_decision_rule_vars_positive_case(self): add_decision_rule_variables(model_data=m, config=config) - self.assertEqual(len(m.working_model.util.first_stage_variables), len(m.working_model.util.second_stage_variables), - msg="For static approximation decision rule the number of decision rule variables" - "added to the list of design variables should equal the number of control variables.") + self.assertEqual( + len(m.working_model.util.first_stage_variables), + len(m.working_model.util.second_stage_variables), + msg="For static approximation decision rule the number of decision rule variables" + "added to the list of design variables should equal the number of control variables.", + ) m.working_model.util.first_stage_variables = [] @@ -127,11 +277,14 @@ def test_add_decision_rule_vars_positive_case(self): add_decision_rule_variables(m, config=config) - self.assertEqual(len(m.working_model.util.first_stage_variables), - len(m.working_model.util.second_stage_variables)*(1 + len(m.working_model.util.uncertain_params)), - msg="For affine decision rule the number of decision rule variables add to the " - "list of design variables should equal the number of control variables" - "multiplied by the number of uncertain parameters plus 1.") + self.assertEqual( + len(m.working_model.util.first_stage_variables), + len(m.working_model.util.second_stage_variables) + * (1 + len(m.working_model.util.uncertain_params)), + msg="For affine decision rule the number of decision rule variables add to the " + "list of design variables should equal the number of control variables" + "multiplied by the number of uncertain parameters plus 1.", + ) m.working_model.util.first_stage_variables = [] @@ -144,14 +297,20 @@ def test_add_decision_rule_vars_positive_case(self): add_decision_rule_variables(m, config=config) - self.assertEqual(len(m.working_model.util.first_stage_variables), - len(m.working_model.util.second_stage_variables)* - int(2 * len(m.working_model.util.uncertain_params) + - sp.special.comb(N=len(m.working_model.util.uncertain_params), k=2) + 1), - msg="For quadratic decision rule the number of decision rule variables add to the " - "list of design variables should equal the number of control variables" - "multiplied by 2 time the number of uncertain parameters plus all 2-combinations" - "of uncertain parameters plus 1.") + self.assertEqual( + len(m.working_model.util.first_stage_variables), + len(m.working_model.util.second_stage_variables) + * int( + 2 * len(m.working_model.util.uncertain_params) + + sp.special.comb(N=len(m.working_model.util.uncertain_params), k=2) + + 1 + ), + msg="For quadratic decision rule the number of decision rule variables add to the " + "list of design variables should equal the number of control variables" + "multiplied by 2 time the number of uncertain parameters plus all 2-combinations" + "of uncertain parameters plus 1.", + ) + class testAddDecisionRuleConstraints(unittest.TestCase): ''' @@ -186,16 +345,19 @@ def test_correct_number_of_decision_rule_constraints(self): config = Block() config.decision_rule_order = 0 - add_decision_rule_constraints(model_data=m,config=config) + add_decision_rule_constraints(model_data=m, config=config) for c in m.working_model.component_data_objects(Constraint, descend_into=True): if "decision_rule_eqn_" in c.name: decision_rule_cons.append(c) m.working_model.del_component(c) - self.assertEqual(len(decision_rule_cons), len(m.working_model.util.second_stage_variables), - msg="The number of decision rule constraints added to model should equal" - "the number of control variables in the model.") + self.assertEqual( + len(decision_rule_cons), + len(m.working_model.util.second_stage_variables), + msg="The number of decision rule constraints added to model should equal" + "the number of control variables in the model.", + ) decision_rule_cons = [] config.decision_rule_order = 1 @@ -214,9 +376,12 @@ def test_correct_number_of_decision_rule_constraints(self): decision_rule_cons.append(c) m.working_model.del_component(c) - self.assertEqual(len(decision_rule_cons), len(m.working_model.util.second_stage_variables), - msg="The number of decision rule constraints added to model should equal" - "the number of control variables in the model.") + self.assertEqual( + len(decision_rule_cons), + len(m.working_model.util.second_stage_variables), + msg="The number of decision rule constraints added to model should equal" + "the number of control variables in the model.", + ) decision_rule_cons = [] config.decision_rule_order = 2 @@ -237,18 +402,21 @@ def test_correct_number_of_decision_rule_constraints(self): decision_rule_cons.append(c) m.working_model.del_component(c) - self.assertEqual(len(decision_rule_cons), len(m.working_model.util.second_stage_variables), - msg="The number of decision rule constraints added to model should equal" - "the number of control variables in the model.") + self.assertEqual( + len(decision_rule_cons), + len(m.working_model.util.second_stage_variables), + msg="The number of decision rule constraints added to model should equal" + "the number of control variables in the model.", + ) + class testModelIsValid(unittest.TestCase): - def test_model_is_valid_via_possible_inputs(self): m = ConcreteModel() m.x = Var() - m.obj1 = Objective(expr = m.x**2) + m.obj1 = Objective(expr=m.x**2) self.assertTrue(model_is_valid(m)) - m.obj2 = Objective(expr = m.x) + m.obj2 = Objective(expr=m.x) self.assertFalse(model_is_valid(m)) m.obj2.deactivate() self.assertTrue(model_is_valid(m)) @@ -256,29 +424,41 @@ def test_model_is_valid_via_possible_inputs(self): m.del_component("obj2") self.assertFalse(model_is_valid(m)) -class testTurnBoundsToConstraints(unittest.TestCase): +class testTurnBoundsToConstraints(unittest.TestCase): def test_bounds_to_constraints(self): m = ConcreteModel() - m.x = Var(initialize=1, bounds=(0,1)) - m.y = Var(initialize=0, bounds=(None,1)) + m.x = Var(initialize=1, bounds=(0, 1)) + m.y = Var(initialize=0, bounds=(None, 1)) m.w = Var(initialize=0, bounds=(1, None)) - m.z = Var(initialize=0, bounds=(None,None)) + m.z = Var(initialize=0, bounds=(None, None)) turn_bounds_to_constraints(m.z, m) - self.assertEqual(len(list(m.component_data_objects(Constraint))), 0, - msg="Inequality constraints were written for bounds on a variable with no bounds.") + self.assertEqual( + len(list(m.component_data_objects(Constraint))), + 0, + msg="Inequality constraints were written for bounds on a variable with no bounds.", + ) turn_bounds_to_constraints(m.y, m) - self.assertEqual(len(list(m.component_data_objects(Constraint))), 1, - msg="Inequality constraints were not " - "written correctly for a variable with an upper bound and no lower bound.") + self.assertEqual( + len(list(m.component_data_objects(Constraint))), + 1, + msg="Inequality constraints were not " + "written correctly for a variable with an upper bound and no lower bound.", + ) turn_bounds_to_constraints(m.w, m) - self.assertEqual(len(list(m.component_data_objects(Constraint))), 2, - msg="Inequality constraints were not " - "written correctly for a variable with a lower bound and no upper bound.") + self.assertEqual( + len(list(m.component_data_objects(Constraint))), + 2, + msg="Inequality constraints were not " + "written correctly for a variable with a lower bound and no upper bound.", + ) turn_bounds_to_constraints(m.x, m) - self.assertEqual(len(list(m.component_data_objects(Constraint))), 4, - msg="Inequality constraints were not " - "written correctly for a variable with both lower and upper bound.") + self.assertEqual( + len(list(m.component_data_objects(Constraint))), + 4, + msg="Inequality constraints were not " + "written correctly for a variable with both lower and upper bound.", + ) def test_uncertain_bounds_to_constraints(self): # test model @@ -294,10 +474,10 @@ def test_uncertain_bounds_to_constraints(self): m.u = Var(initialize=0, bounds=(0, m.p)) m.v = Var(initialize=1, bounds=(m.r, m.p)) m.w = Var(initialize=1, bounds=(None, None)) - m.x = Var(initialize=1, bounds=(0, exp(-1*m.p / 8) * m.q * m.s)) + m.x = Var(initialize=1, bounds=(0, exp(-1 * m.p / 8) * m.q * m.s)) m.y = Var(initialize=-1, bounds=(m.r * m.p, 0)) m.z = Var(initialize=1, bounds=(0, m.s)) - m.t = Var(initialize=1, bounds=(0, m.p ** 2)) + m.t = Var(initialize=1, bounds=(0, m.p**2)) # objective m.obj = Objective(sense=maximize, expr=m.x**2 - m.y + m.t**2 + m.v) @@ -310,10 +490,12 @@ def test_uncertain_bounds_to_constraints(self): # or active performance constraints mod.obj.deactivate() replace_uncertain_bounds_with_constraints(mod, uncertain_params) - self.assertTrue(hasattr(mod, 'uncertain_var_bound_cons'), - msg='Uncertain variable bounds erroneously added. ' - 'Check only variables participating in active ' - 'objective and constraints are added.') + self.assertTrue( + hasattr(mod, 'uncertain_var_bound_cons'), + msg='Uncertain variable bounds erroneously added. ' + 'Check only variables participating in active ' + 'objective and constraints are added.', + ) self.assertFalse(mod.uncertain_var_bound_cons) mod.obj.activate() @@ -322,8 +504,8 @@ def test_uncertain_bounds_to_constraints(self): m.add_component('perf_constraints', constraints_m) constraints_m.add(m.w == 2 * m.x + m.y) constraints_m.add(m.v + m.x + m.y >= 0) - constraints_m.add(m.y ** 2 + m.z >= 0) - constraints_m.add(m.x ** 2 + m.u <= 1) + constraints_m.add(m.y**2 + m.z >= 0) + constraints_m.add(m.x**2 + m.u <= 1) constraints_m[4].deactivate() # clone model with constraints added @@ -349,26 +531,33 @@ def test_uncertain_bounds_to_constraints(self): # active objective and activated constraints correctly determined svars_con = ComponentSet(get_vars_from_component(mod_2, Constraint)) svars_obj = ComponentSet(get_vars_from_component(mod_2, Objective)) - vars_in_active_cons = ComponentSet([mod_2.z, mod_2.w, mod_2.y, - mod_2.x, mod_2.v]) + vars_in_active_cons = ComponentSet( + [mod_2.z, mod_2.w, mod_2.y, mod_2.x, mod_2.v] + ) vars_in_active_obj = ComponentSet([mod_2.x, mod_2.y, mod_2.t, mod_2.v]) - self.assertEqual(svars_con, vars_in_active_cons, - msg='Mismatch of variables participating in ' - 'activated constraints.') - self.assertEqual(svars_obj, vars_in_active_obj, - msg='Mismatch of variables participating in ' - 'activated objectives.') + self.assertEqual( + svars_con, + vars_in_active_cons, + msg='Mismatch of variables participating in activated constraints.', + ) + self.assertEqual( + svars_obj, + vars_in_active_obj, + msg='Mismatch of variables participating in activated objectives.', + ) # replace bounds in model with performance constraints uncertain_params = [mod_2.p, mod_2.r] replace_uncertain_bounds_with_constraints(mod_2, uncertain_params) # check that same number of constraints added to model - self.assertEqual(len(list(m.component_data_objects(Constraint))), - len(list(mod_2.component_data_objects(Constraint))), - msg='Mismatch between number of explicit variable ' - 'bound inequality constraints added ' - 'automatically and added manually.') + self.assertEqual( + len(list(m.component_data_objects(Constraint))), + len(list(mod_2.component_data_objects(Constraint))), + msg='Mismatch between number of explicit variable ' + 'bound inequality constraints added ' + 'automatically and added manually.', + ) # check that explicit constraints contain correct vars and params vars_in_cons = ComponentSet() @@ -377,27 +566,29 @@ def test_uncertain_bounds_to_constraints(self): # get variables, mutable params in the explicit constraints cons = mod_2.uncertain_var_bound_cons for idx in cons: - for p in EXPR.identify_mutable_parameters(cons[idx].expr): + for p in identify_mutable_parameters(cons[idx].expr): params_in_cons.add(p) - for v in EXPR.identify_variables(cons[idx].expr): + for v in identify_variables(cons[idx].expr): vars_in_cons.add(v) # reduce only to uncertain mutable params found params_in_cons = params_in_cons & uncertain_params # expected participating variables - vars_with_bounds_removed = ComponentSet([mod_2.x, mod_2.y, mod_2.v, - mod_2.t]) + vars_with_bounds_removed = ComponentSet([mod_2.x, mod_2.y, mod_2.v, mod_2.t]) # complete the check - self.assertEqual(params_in_cons, ComponentSet([mod_2.p, mod_2.r]), - msg='Mismatch of parameters added to explicit ' - 'inequality constraints.') - self.assertEqual(vars_in_cons, vars_with_bounds_removed, - msg='Mismatch of variables added to explicit ' - 'inequality constraints.') + self.assertEqual( + params_in_cons, + ComponentSet([mod_2.p, mod_2.r]), + msg='Mismatch of parameters added to explicit inequality constraints.', + ) + self.assertEqual( + vars_in_cons, + vars_with_bounds_removed, + msg='Mismatch of variables added to explicit inequality constraints.', + ) class testTransformToStandardForm(unittest.TestCase): - def test_transform_to_std_form(self): """Check that `pyros.util.transform_to_standard_form` works correctly for an example model. That is: @@ -434,56 +625,82 @@ def test_transform_to_std_form(self): clist.add(m.x >= 1) clist.add((0, m.x, 1)) - num_orig_cons = len([con for con in - m.component_data_objects(Constraint, - active=True, - descend_into=True)]) + num_orig_cons = len( + [ + con + for con in m.component_data_objects( + Constraint, active=True, descend_into=True + ) + ] + ) # constraints with finite, distinct lower & upper bounds - num_lbub_cons = len([con for con in - m.component_data_objects(Constraint, - active=True, - descend_into=True) - if con.lower is not None - and con.upper is not None - and con.lower is not con.upper]) + num_lbub_cons = len( + [ + con + for con in m.component_data_objects( + Constraint, active=True, descend_into=True + ) + if con.lower is not None + and con.upper is not None + and con.lower is not con.upper + ] + ) # count constraints with no bounds - num_nobound_cons = len([con for con in - m.component_data_objects(Constraint, - active=True, - descend_into=True) - if con.lower is None - and con.upper is None - ]) + num_nobound_cons = len( + [ + con + for con in m.component_data_objects( + Constraint, active=True, descend_into=True + ) + if con.lower is None and con.upper is None + ] + ) transform_to_standard_form(m) - cons = [con for con in m.component_data_objects(Constraint, - active=True, - descend_into=True)] + cons = [ + con + for con in m.component_data_objects( + Constraint, active=True, descend_into=True + ) + ] for con in cons: - has_lb_or_ub = not(con.lower is None and con.upper is None) + has_lb_or_ub = not (con.lower is None and con.upper is None) if has_lb_or_ub and not con.equality: - self.assertTrue(con.lower is None, msg="Constraint %s not" - " in standard form" % con.name) + self.assertTrue( + con.lower is None, + msg="Constraint %s not in standard form" % con.name, + ) lb_is_ub = con.lower is con.upper - self.assertFalse(lb_is_ub, msg="Constraint %s should be" - " converted to equality" % con.name) + self.assertFalse( + lb_is_ub, + msg="Constraint %s should be converted to equality" % con.name, + ) if con is not m.c3: - self.assertTrue(has_lb_or_ub, msg="Constraint %s should have" - " a lower or upper bound" % con.name) - - self.assertEqual(len([con for con in - m.component_data_objects(Constraint, - active=True, - descend_into=True)]), - num_orig_cons + num_lbub_cons - num_nobound_cons, - msg="Expected number of constraints after\n " - "standardizing constraints not matched. " - "Number of constraints after\n " - "transformation" - " should be (number constraints in original " - "model) \n + (number of constraints with " - "distinct finite lower and upper bounds).") + self.assertTrue( + has_lb_or_ub, + msg="Constraint %s should have" + " a lower or upper bound" % con.name, + ) + + self.assertEqual( + len( + [ + con + for con in m.component_data_objects( + Constraint, active=True, descend_into=True + ) + ] + ), + num_orig_cons + num_lbub_cons - num_nobound_cons, + msg="Expected number of constraints after\n " + "standardizing constraints not matched. " + "Number of constraints after\n " + "transformation" + " should be (number constraints in original " + "model) \n + (number of constraints with " + "distinct finite lower and upper bounds).", + ) def test_transform_does_not_alter_num_of_constraints(self): """ @@ -498,17 +715,27 @@ def test_transform_does_not_alter_num_of_constraints(self): m.x = Var(initialize=1, bounds=(0, 1)) m.y = Var(initialize=0, bounds=(None, 1)) m.con1 = Constraint(expr=m.x >= 1 + m.y) - m.con2 = Constraint(expr=m.x**2 + m.y**2>= 9) + m.con2 = Constraint(expr=m.x**2 + m.y**2 >= 9) original_num_constraints = len(list(m.component_data_objects(Constraint))) transform_to_standard_form(m) final_num_constraints = len(list(m.component_data_objects(Constraint))) - self.assertEqual(original_num_constraints, final_num_constraints, - msg="Transform to standard form function led to a " - "different number of constraints than in the original model.") - number_of_non_standard_form_inequalities = \ - len(list(c for c in list(m.component_data_objects(Constraint)) if c.lower != None)) - self.assertEqual(number_of_non_standard_form_inequalities, 0, - msg="All inequality constraints were not transformed to standard form.") + self.assertEqual( + original_num_constraints, + final_num_constraints, + msg="Transform to standard form function led to a " + "different number of constraints than in the original model.", + ) + number_of_non_standard_form_inequalities = len( + list( + c for c in list(m.component_data_objects(Constraint)) if c.lower != None + ) + ) + self.assertEqual( + number_of_non_standard_form_inequalities, + 0, + msg="All inequality constraints were not transformed to standard form.", + ) + # === UncertaintySets.py # Mock abstract class @@ -519,11 +746,9 @@ class myUncertaintySet(UncertaintySet): ''' def set_as_constraint(self, uncertain_params, **kwargs): - - return Constraint(expr= sum(v for v in uncertain_params) <= 0) + return Constraint(expr=sum(v for v in uncertain_params) <= 0) def point_in_set(self, uncertain_params, **kwargs): - return True def geometry(self): @@ -533,7 +758,8 @@ def dim(self): self.dim = 1 def parameter_bounds(self): - return [(0,1)] + return [(0, 1)] + class testAbstractUncertaintySetClass(unittest.TestCase): ''' @@ -555,15 +781,21 @@ def test_uncertainty_set_with_correct_params(self): m.uncertain_param_vars = m.uncertain_params _set = myUncertaintySet() - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) uncertain_params_in_expr = list( - v for v in m.uncertain_param_vars if - v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr.expr)) + v + for v in m.uncertain_param_vars + if v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr.expr)) ) - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - "be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + "be the same uncertain param Var objects in the original model.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' @@ -576,21 +808,180 @@ def test_uncertainty_set_with_incorrect_params(self): m.uncertain_params = [m.p1, m.p2] _set = myUncertaintySet() - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_params) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_params + ) variables_in_constr = list( - v for v in m.uncertain_params if - v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr.expr)) + v + for v in m.uncertain_params + if v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr.expr)) + ) + + self.assertEqual( + len(variables_in_constr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + "variable expression.", ) - self.assertEqual(len(variables_in_constr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - "variable expression.") class testEllipsoidalUncertaintySetClass(unittest.TestCase): - ''' - Ellipsoidal uncertainty sets. Required inputs are covariance matrix covar, scale, mean, and list - of uncertain params. - ''' + """ + Unit tests for the EllipsoidalSet + """ + + def test_normal_construction_and_update(self): + """ + Test EllipsoidalSet constructor and setter + work normally when arguments are appropriate. + """ + center = [0, 0] + shape_matrix = [[1, 0], [0, 2]] + scale = 2 + eset = EllipsoidalSet(center, shape_matrix, scale) + np.testing.assert_allclose( + center, eset.center, err_msg="EllipsoidalSet center not as expected" + ) + np.testing.assert_allclose( + shape_matrix, + eset.shape_matrix, + err_msg="EllipsoidalSet shape matrix not as expected", + ) + np.testing.assert_allclose( + scale, eset.scale, err_msg="EllipsoidalSet scale not as expected" + ) + + # check attributes update + new_center = [-1, -3] + new_shape_matrix = [[2, 1], [1, 3]] + new_scale = 1 + + eset.center = new_center + eset.shape_matrix = new_shape_matrix + eset.scale = new_scale + + np.testing.assert_allclose( + new_center, + eset.center, + err_msg="EllipsoidalSet center update not as expected", + ) + np.testing.assert_allclose( + new_shape_matrix, + eset.shape_matrix, + err_msg="EllipsoidalSet shape matrix update not as expected", + ) + np.testing.assert_allclose( + new_scale, eset.scale, err_msg="EllipsoidalSet scale update not as expected" + ) + + def test_error_on_ellipsoidal_dim_change(self): + """ + EllipsoidalSet dimension is considered immutable. + Test ValueError raised when center size is not equal + to set dimension. + """ + invalid_center = [0, 0] + shape_matrix = [[1, 0], [0, 1]] + scale = 2 + + eset = EllipsoidalSet([0, 0], shape_matrix, scale) + + exc_str = r"Attempting to set.*dimension 2 to value of dimension 3" + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + eset.center = [0, 0, 0] + + def test_error_on_neg_scale(self): + """ + Test ValueError raised if scale attribute set to negative + value. + """ + center = [0, 0] + shape_matrix = [[1, 0], [0, 2]] + neg_scale = -1 + + exc_str = r".*must be a non-negative real \(provided.*-1\)" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + EllipsoidalSet(center, shape_matrix, neg_scale) + + # construct a valid EllipsoidalSet + eset = EllipsoidalSet(center, shape_matrix, scale=2) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + eset.scale = neg_scale + + def test_error_on_shape_matrix_with_wrong_size(self): + """ + Test error in event EllipsoidalSet shape matrix + is not in accordance with set dimension. + """ + center = [0, 0] + invalid_shape_matrix = [[1, 0]] + scale = 1 + + exc_str = r".*must be a square matrix of size 2.*\(provided.*shape \(1, 2\)\)" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + EllipsoidalSet(center, invalid_shape_matrix, scale) + + # construct a valid EllipsoidalSet + eset = EllipsoidalSet(center, [[1, 0], [0, 1]], scale) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + eset.shape_matrix = invalid_shape_matrix + + def test_error_on_invalid_shape_matrix(self): + """ + Test exceptional cases of invalid square shape matrix + arguments + """ + center = [0, 0] + scale = 3 + + # assert error on construction + with self.assertRaisesRegex( + ValueError, + r"Shape matrix must be symmetric", + msg="Asymmetric shape matrix test failed", + ): + EllipsoidalSet(center, [[1, 1], [0, 1]], scale) + with self.assertRaises( + np.linalg.LinAlgError, msg="Singular shape matrix test failed" + ): + EllipsoidalSet(center, [[0, 0], [0, 0]], scale) + with self.assertRaisesRegex( + ValueError, + r"Non positive-definite.*", + msg="Indefinite shape matrix test failed", + ): + EllipsoidalSet(center, [[1, 0], [0, -2]], scale) + + # construct a valid EllipsoidalSet + eset = EllipsoidalSet(center, [[1, 0], [0, 2]], scale) + + # assert error on update + with self.assertRaisesRegex( + ValueError, + r"Shape matrix must be symmetric", + msg="Asymmetric shape matrix test failed", + ): + eset.shape_matrix = [[1, 1], [0, 1]] + with self.assertRaises( + np.linalg.LinAlgError, msg="Singular shape matrix test failed" + ): + eset.shape_matrix = [[0, 0], [0, 0]] + with self.assertRaisesRegex( + ValueError, + r"Non positive-definite.*", + msg="Indefinite shape matrix test failed", + ): + eset.shape_matrix = [[1, 0], [0, -2]] def test_uncertainty_set_with_correct_params(self): ''' @@ -603,19 +994,26 @@ def test_uncertainty_set_with_correct_params(self): m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) - cov = [[1,0], [0,1]] + cov = [[1, 0], [0, 1]] s = 1 _set = EllipsoidalSet(center=[0, 0], shape_matrix=cov, scale=s) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) uncertain_params_in_expr = list( - v for v in m.uncertain_param_vars.values() if - v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) + v + for v in m.uncertain_param_vars.values() + if v + in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) ) - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' @@ -626,20 +1024,29 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Param(initialize=0, mutable=True) m.p2 = Param(initialize=0, mutable=True) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) - cov = [[1,0],[0,1]] + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) + cov = [[1, 0], [0, 1]] s = 1 _set = EllipsoidalSet(center=[0, 0], shape_matrix=cov, scale=s) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) variables_in_constr = list( - v for v in m.uncertain_params if - v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) + v + for v in m.uncertain_params + if v + in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) ) - self.assertEqual(len(variables_in_constr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") + self.assertEqual( + len(variables_in_constr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_point_in_set(self): m = ConcreteModel() @@ -651,7 +1058,9 @@ def test_point_in_set(self): s = 1 _set = EllipsoidalSet(center=[0, 0], shape_matrix=cov, scale=s) - self.assertTrue(_set.point_in_set([0,0]), msg="Point is not in the EllipsoidalSet.") + self.assertTrue( + _set.point_in_set([0, 0]), msg="Point is not in the EllipsoidalSet." + ) def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() @@ -666,19 +1075,31 @@ def test_add_bounds_on_uncertain_parameters(self): EllipsoidalSet.add_bounds_on_uncertain_parameters(model=m, config=config) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, - "Bounds not added correctly for EllipsoidalSet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, - "Bounds not added correctly for EllipsoidalSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, - "Bounds not added correctly for EllipsoidalSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, - "Bounds not added correctly for EllipsoidalSet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for EllipsoidalSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for EllipsoidalSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for EllipsoidalSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for EllipsoidalSet", + ) def test_ellipsoidal_set_bounds(self): """Check `EllipsoidalSet` parameter bounds method correct.""" cov = [[2, 1], [1, 2]] - scales=[0.5, 2] + scales = [0.5, 2] mean = [1, 1] for scale in scales: @@ -689,10 +1110,7 @@ def test_ellipsoidal_set_bounds(self): diff = (cov[idx][idx] * scale) ** 0.5 actual_bounds.append((val - diff, val + diff)) self.assertTrue( - np.allclose( - np.array(bounds), - np.array(actual_bounds), - ), + np.allclose(np.array(bounds), np.array(actual_bounds)), msg=( f"EllipsoidalSet bounds {bounds} do not match their actual" f" values {actual_bounds} (for scale {scale}" @@ -702,10 +1120,84 @@ def test_ellipsoidal_set_bounds(self): ), ) + class testAxisAlignedEllipsoidalUncertaintySetClass(unittest.TestCase): - ''' - Axis aligned ellipsoidal uncertainty sets. Required inputs are half-lengths, nominal point, and right-hand side. - ''' + """ + Unit tests for the AxisAlignedEllipsoidalSet. + """ + + def test_normal_construction_and_update(self): + """ + Test AxisAlignedEllipsoidalSet constructor and setter + work normally when bounds are appropriate. + """ + center = [0, 0] + half_lengths = [1, 3] + aset = AxisAlignedEllipsoidalSet(center, half_lengths) + np.testing.assert_allclose( + center, + aset.center, + err_msg="AxisAlignedEllipsoidalSet center not as expected", + ) + np.testing.assert_allclose( + half_lengths, + aset.half_lengths, + err_msg="AxisAlignedEllipsoidalSet half-lengths not as expected", + ) + + # check attributes update + new_center = [-1, -3] + new_half_lengths = [0, 1] + aset.center = new_center + aset.half_lengths = new_half_lengths + + np.testing.assert_allclose( + new_center, + aset.center, + err_msg="AxisAlignedEllipsoidalSet center update not as expected", + ) + np.testing.assert_allclose( + new_half_lengths, + aset.half_lengths, + err_msg=("AxisAlignedEllipsoidalSet half lengths update not as expected"), + ) + + def test_error_on_axis_aligned_dim_change(self): + """ + AxisAlignedEllipsoidalSet dimension is considered immutable. + Test ValueError raised when attempting to alter the + box set dimension (i.e. number of rows of `bounds`). + """ + center = [0, 0] + half_lengths = [1, 3] + aset = AxisAlignedEllipsoidalSet(center, half_lengths) + + exc_str = r"Attempting to set.*dimension 2 to value of dimension 3" + with self.assertRaisesRegex(ValueError, exc_str): + aset.center = [0, 0, 1] + + with self.assertRaisesRegex(ValueError, exc_str): + aset.half_lengths = [0, 0, 1] + + def test_error_on_negative_axis_aligned_half_lengths(self): + """ + Test ValueError if half lengths for AxisAlignedEllipsoidalSet + contains a negative value. + """ + center = [1, 1] + invalid_half_lengths = [1, -1] + exc_str = r"Entry -1 of.*'half_lengths' is negative.*" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + AxisAlignedEllipsoidalSet(center, invalid_half_lengths) + + # construct a valid axis-aligned ellipsoidal set + aset = AxisAlignedEllipsoidalSet(center, [1, 0]) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + aset.half_lengths = invalid_half_lengths def test_uncertainty_set_with_correct_params(self): ''' @@ -718,16 +1210,23 @@ def test_uncertainty_set_with_correct_params(self): m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) - _set = AxisAlignedEllipsoidalSet(center=[0,0], half_lengths=[2,1]) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + _set = AxisAlignedEllipsoidalSet(center=[0, 0], half_lengths=[2, 1]) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) uncertain_params_in_expr = list( - v for v in m.uncertain_param_vars.values() if - v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) + v + for v in m.uncertain_param_vars.values() + if v + in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) ) - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' @@ -738,17 +1237,26 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Param(initialize=0, mutable=True) m.p2 = Param(initialize=0, mutable=True) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) - _set = AxisAlignedEllipsoidalSet(center=[0,0], half_lengths=[2,1]) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) + _set = AxisAlignedEllipsoidalSet(center=[0, 0], half_lengths=[2, 1]) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) variables_in_constr = list( - v for v in m.uncertain_params if - v in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) + v + for v in m.uncertain_params + if v + in ComponentSet(identify_variables(expr=m.uncertainty_set_contr[1].expr)) ) - self.assertEqual(len(variables_in_constr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") + self.assertEqual( + len(variables_in_constr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_point_in_set(self): m = ConcreteModel() @@ -757,24 +1265,44 @@ def test_point_in_set(self): m.uncertain_params = [m.p1, m.p2] m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) _set = AxisAlignedEllipsoidalSet(center=[0, 0], half_lengths=[2, 1]) - self.assertTrue(_set.point_in_set([0, 0]), - msg="Point is not in the AxisAlignedEllipsoidalSet.") - + self.assertTrue( + _set.point_in_set([0, 0]), + msg="Point is not in the AxisAlignedEllipsoidalSet.", + ) + def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() - m.util.uncertain_param_vars = Var([0,1], initialize=0.5) + m.util.uncertain_param_vars = Var([0, 1], initialize=0.5) _set = AxisAlignedEllipsoidalSet(center=[0, 0], half_lengths=[2, 1]) config = Block() config.uncertainty_set = _set - AxisAlignedEllipsoidalSet.add_bounds_on_uncertain_parameters(model=m, config=config) + AxisAlignedEllipsoidalSet.add_bounds_on_uncertain_parameters( + model=m, config=config + ) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, "Bounds not added correctly for AxisAlignedEllipsoidalSet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, "Bounds not added correctly for AxisAlignedEllipsoidalSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, "Bounds not added correctly for AxisAlignedEllipsoidalSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, "Bounds not added correctly for AxisAlignedEllipsoidalSet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for AxisAlignedEllipsoidalSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for AxisAlignedEllipsoidalSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for AxisAlignedEllipsoidalSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for AxisAlignedEllipsoidalSet", + ) def test_set_with_zero_half_lengths(self): # construct ellipsoid @@ -810,8 +1338,9 @@ def test_set_with_zero_half_lengths(self): ), ) - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_two_stg_mod_with_axis_aligned_set(self): """ Test two-stage model with `AxisAlignedEllipsoidalSet` @@ -825,17 +1354,14 @@ def test_two_stg_mod_with_axis_aligned_set(self): m.u1 = Param(initialize=1.125, mutable=True) m.u2 = Param(initialize=1, mutable=True) - m.con1 = Constraint(expr=m.x1 * m.u1**(0.5) - m.x2 * m.u1 <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u1 == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u1 ** (0.5) - m.x2 * m.u1 <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u1 == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - m.u2) ** 2) # Define the uncertainty set # we take the parameter `u2` to be 'fixed' - ellipsoid = AxisAlignedEllipsoidalSet( - center=[1.125, 1], - half_lengths=[1, 0], - ) + ellipsoid = AxisAlignedEllipsoidalSet(center=[1.125, 1], half_lengths=[1, 0]) # Instantiate the PyROS solver pyros_solver = SolverFactory("pyros") @@ -856,26 +1382,127 @@ def test_two_stg_mod_with_axis_aligned_set(self): options={ "objective_focus": ObjectiveType.worst_case, "solve_master_globally": True, - } + }, ) # check successful termination self.assertEqual( results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, - msg="Did not identify robust optimal solution to problem instance." + msg="Did not identify robust optimal solution to problem instance.", ) self.assertGreater( results.iterations, 0, - msg="Robust infeasible model terminated in 0 iterations (nominal case)." + msg="Robust infeasible model terminated in 0 iterations (nominal case).", ) class testPolyhedralUncertaintySetClass(unittest.TestCase): - ''' - Polyhedral uncertainty sets. Required inputs are matrix A, right-hand-side b, and list of uncertain params. - ''' + """ + Unit tests for the Polyhedral set. + """ + + def test_normal_construction_and_update(self): + """ + Test PolyhedralSet constructor and attribute setters work + appropriately. + """ + lhs_coefficients_mat = [[1, 2, 3], [4, 5, 6]] + rhs_vec = [1, 3] + + pset = PolyhedralSet(lhs_coefficients_mat, rhs_vec) + + # check attributes are as expected + np.testing.assert_allclose(lhs_coefficients_mat, pset.coefficients_mat) + np.testing.assert_allclose(rhs_vec, pset.rhs_vec) + + # update the set + pset.coefficients_mat = [[1, 0, 1], [1, 1, 1.5]] + pset.rhs_vec = [3, 4] + + # check updates work + np.testing.assert_allclose([[1, 0, 1], [1, 1, 1.5]], pset.coefficients_mat) + np.testing.assert_allclose([3, 4], pset.rhs_vec) + + def test_error_on_polyhedral_set_dim_change(self): + """ + PolyhedralSet dimension (number columns of 'coefficients_mat') + is considered immutable. + Test ValueError raised if attempt made to change dimension. + """ + # construct valid set + pset = PolyhedralSet([[1, 2, 3], [4, 5, 6]], [1, 3]) + + exc_str = ( + r".*must have 3 columns to match set dimension \(provided.*2 columns\)" + ) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + pset.coefficients_mat = [[1, 2], [3, 4]] + + def test_error_on_inconsistent_rows(self): + """ + Number of rows of budget membership mat is immutable. + Similarly, size of rhs_vec is immutable. + Check ValueError raised in event of attempted change. + """ + coeffs_mat_exc_str = ( + r".*must have 2 rows to match shape of attribute 'rhs_vec' " + r"\(provided.*3 rows\)" + ) + rhs_vec_exc_str = ( + r".*must have 2 entries to match shape of attribute " + r"'coefficients_mat' \(provided.*3 entries\)" + ) + # assert error on construction + with self.assertRaisesRegex(ValueError, rhs_vec_exc_str): + PolyhedralSet([[1, 2], [3, 4]], rhs_vec=[1, 3, 3]) + + # construct a valid polyhedral set + # (2 x 2 coefficients, 2-vector for RHS) + pset = PolyhedralSet([[1, 2], [3, 4]], rhs_vec=[1, 3]) + + # assert error on update + with self.assertRaisesRegex(ValueError, coeffs_mat_exc_str): + # 3 x 2 matrix row mismatch + pset.coefficients_mat = [[1, 2], [3, 4], [5, 6]] + with self.assertRaisesRegex(ValueError, rhs_vec_exc_str): + # 3-vector mismatches 2 rows + pset.rhs_vec = [1, 3, 2] + + def test_error_on_empty_set(self): + """ + Check ValueError raised if nonemptiness check performed + at construction returns a negative result. + """ + exc_str = r"PolyhedralSet.*is empty.*" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + PolyhedralSet([[1], [-1]], rhs_vec=[1, -3]) + + def test_error_on_polyhedral_mat_all_zero_columns(self): + """ + Test ValueError raised if budget membership mat + has a column with all zeros. + """ + invalid_col_mat = [[0, 0, 1], [0, 0, 1], [0, 0, 1]] + rhs_vec = [1, 1, 2] + + exc_str = r".*all entries zero in columns at indexes: 0, 1.*" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + PolyhedralSet(invalid_col_mat, rhs_vec) + + # construct a valid budget set + pset = PolyhedralSet([[1, 0, 1], [1, 1, 0], [1, 1, 1]], rhs_vec) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + pset.coefficients_mat = invalid_col_mat def test_uncertainty_set_with_correct_params(self): ''' @@ -891,8 +1518,10 @@ def test_uncertainty_set_with_correct_params(self): A = [[0, 1], [1, 0]] b = [0, 0] - _set = PolyhedralSet(lhs_coefficients_mat=A, rhs_vec=b, ) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + _set = PolyhedralSet(lhs_coefficients_mat=A, rhs_vec=b) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) uncertain_params_in_expr = ComponentSet() for con in m.uncertainty_set_contr.values(): con_vars = ComponentSet(identify_variables(expr=con.expr)) @@ -900,10 +1529,12 @@ def test_uncertainty_set_with_correct_params(self): if v in con_vars: uncertain_params_in_expr.add(v) - self.assertEqual(uncertain_params_in_expr, - ComponentSet(m.uncertain_param_vars.values()), - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + uncertain_params_in_expr, + ComponentSet(m.uncertain_param_vars.values()), + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' @@ -915,30 +1546,38 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Var(initialize=0) m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) A = [[0, 1], [1, 0]] b = [0, 0] _set = PolyhedralSet(lhs_coefficients_mat=A, rhs_vec=b) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) vars_in_expr = [] for con in m.uncertainty_set_contr.values(): vars_in_expr.extend( - v for v in m.uncertain_param_vars if - v in ComponentSet(identify_variables(expr=con.expr)) + v + for v in m.uncertain_param_vars + if v in ComponentSet(identify_variables(expr=con.expr)) ) - self.assertEqual(len(vars_in_expr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") - - def test_polyhedral_set_as_constraint(self): + self.assertEqual( + len(vars_in_expr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) + + def test_polyhedral_set_as_constraint(self): ''' The set_as_constraint method must return an indexed uncertainty_set_constr which has as many elements at their are dimensions in A. ''' - A = [[1, 0],[0, 1]] + A = [[1, 0], [0, 1]] b = [0, 0] m = ConcreteModel() @@ -946,11 +1585,16 @@ def test_polyhedral_set_as_constraint(self): m.p2 = Var(initialize=0) polyhedral_set = PolyhedralSet(lhs_coefficients_mat=A, rhs_vec=b) - m.uncertainty_set_constr = polyhedral_set.set_as_constraint(uncertain_params=[m.p1, m.p2]) + m.uncertainty_set_constr = polyhedral_set.set_as_constraint( + uncertain_params=[m.p1, m.p2] + ) - self.assertEqual(len(A), len(m.uncertainty_set_constr.index_set()), - msg="Polyhedral uncertainty set constraints must be as many as the" - "number of rows in the matrix A.") + self.assertEqual( + len(A), + len(m.uncertainty_set_constr.index_set()), + msg="Polyhedral uncertainty set constraints must be as many as the" + "number of rows in the matrix A.", + ) def test_point_in_set(self): A = [[1, 0], [0, 1]] @@ -962,10 +1606,12 @@ def test_point_in_set(self): m.uncertain_params = [m.p1, m.p2] m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) polyhedral_set = PolyhedralSet(lhs_coefficients_mat=A, rhs_vec=b) - self.assertTrue(polyhedral_set.point_in_set([0, 0]), - msg="Point is not in the PolyhedralSet.") - - @unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), "Global NLP solver is not available.") + self.assertTrue( + polyhedral_set.point_in_set([0, 0]), + msg="Point is not in the PolyhedralSet.", + ) + + @unittest.skipUnless(baron_available, "Global NLP solver is not available.") def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() @@ -981,16 +1627,240 @@ def test_add_bounds_on_uncertain_parameters(self): PolyhedralSet.add_bounds_on_uncertain_parameters(model=m, config=config) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, "Bounds not added correctly for PolyhedralSet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, "Bounds not added correctly for PolyhedralSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, "Bounds not added correctly for PolyhedralSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, "Bounds not added correctly for PolyhedralSet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for PolyhedralSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for PolyhedralSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for PolyhedralSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for PolyhedralSet", + ) + class testBudgetUncertaintySetClass(unittest.TestCase): ''' - Budget uncertainty sets. Required inputs are matrix budget_membership_mat, rhs_vec. + Budget uncertainty sets. + Required inputs are matrix budget_membership_mat, rhs_vec. ''' + def test_normal_budget_construction_and_update(self): + """ + Test BudgetSet constructor and attribute setters work + appropriately. + """ + budget_mat = [[1, 0, 1], [0, 1, 0]] + budget_rhs_vec = [1, 3] + + # check attributes are as expected + buset = BudgetSet(budget_mat, budget_rhs_vec) + + np.testing.assert_allclose(budget_mat, buset.budget_membership_mat) + np.testing.assert_allclose(budget_rhs_vec, buset.budget_rhs_vec) + np.testing.assert_allclose( + [[1, 0, 1], [0, 1, 0], [-1, 0, 0], [0, -1, 0], [0, 0, -1]], + buset.coefficients_mat, + ) + np.testing.assert_allclose([1, 3, 0, 0, 0], buset.rhs_vec) + np.testing.assert_allclose(np.zeros(3), buset.origin) + + # update the set + buset.budget_membership_mat = [[1, 1, 0], [0, 0, 1]] + buset.budget_rhs_vec = [3, 4] + + # check updates work + np.testing.assert_allclose([[1, 1, 0], [0, 0, 1]], buset.budget_membership_mat) + np.testing.assert_allclose([3, 4], buset.budget_rhs_vec) + np.testing.assert_allclose( + [[1, 1, 0], [0, 0, 1], [-1, 0, 0], [0, -1, 0], [0, 0, -1]], + buset.coefficients_mat, + ) + np.testing.assert_allclose([3, 4, 0, 0, 0], buset.rhs_vec) + + # update origin + buset.origin = [1, 0, -1.5] + np.testing.assert_allclose([1, 0, -1.5], buset.origin) + + def test_error_on_budget_set_dim_change(self): + """ + BudgetSet dimension is considered immutable. + Test ValueError raised when attempting to alter the + budget set dimension. + """ + budget_mat = [[1, 0, 1], [0, 1, 0]] + budget_rhs_vec = [1, 3] + bu_set = BudgetSet(budget_mat, budget_rhs_vec) + + # error on budget incidence matrix update + exc_str = ( + r".*must have 3 columns to match set dimension \(provided.*1 columns\)" + ) + with self.assertRaisesRegex(ValueError, exc_str): + bu_set.budget_membership_mat = [[1], [1]] + + # error on origin update + exc_str = ( + r".*must have 3 entries to match set dimension \(provided.*4 entries\)" + ) + with self.assertRaisesRegex(ValueError, exc_str): + bu_set.origin = [1, 2, 1, 0] + + def test_error_on_budget_member_mat_row_change(self): + """ + Number of rows of budget membership mat is immutable. + Hence, size of budget_rhs_vec is also immutable. + """ + budget_mat = [[1, 0, 1], [0, 1, 0]] + budget_rhs_vec = [1, 3] + bu_set = BudgetSet(budget_mat, budget_rhs_vec) + + exc_str = ( + r".*must have 2 rows to match shape of attribute 'budget_rhs_vec' " + r"\(provided.*1 rows\)" + ) + with self.assertRaisesRegex(ValueError, exc_str): + bu_set.budget_membership_mat = [[1, 0, 1]] + + exc_str = ( + r".*must have 2 entries to match shape of attribute " + r"'budget_membership_mat' \(provided.*1 entries\)" + ) + with self.assertRaisesRegex(ValueError, exc_str): + bu_set.budget_rhs_vec = [1] + + def test_error_on_neg_budget_rhs_vec_entry(self): + """ + Test ValueError raised if budget RHS vec has entry + with negative value entry. + """ + budget_mat = [[1, 0, 1], [1, 1, 0]] + neg_val_rhs_vec = [1, -1] + + exc_str = r"Entry -1 of.*'budget_rhs_vec' is negative*" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BudgetSet(budget_mat, neg_val_rhs_vec) + + # construct a valid budget set + buset = BudgetSet(budget_mat, [1, 1]) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + buset.budget_rhs_vec = neg_val_rhs_vec + + def test_error_on_non_bool_budget_mat_entry(self): + """ + Test ValueError raised if budget membership mat has + entry which is not a 0-1 value. + """ + invalid_budget_mat = [[1, 0, 1], [1, 1, 0.1]] + budget_rhs_vec = [1, 1] + + exc_str = r"Attempting.*entries.*not 0-1 values \(example: 0.1\).*" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BudgetSet(invalid_budget_mat, budget_rhs_vec) + + # construct a valid budget set + buset = BudgetSet([[1, 0, 1], [1, 1, 0]], budget_rhs_vec) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + buset.budget_membership_mat = invalid_budget_mat + + def test_error_on_budget_mat_all_zero_rows(self): + """ + Test ValueError raised if budget membership mat + has a row with all zeros. + """ + invalid_row_mat = [[0, 0, 0], [1, 1, 1], [0, 0, 0]] + budget_rhs_vec = [1, 1, 2] + + exc_str = r".*all entries zero in rows at indexes: 0, 2.*" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BudgetSet(invalid_row_mat, budget_rhs_vec) + + # construct a valid budget set + buset = BudgetSet([[1, 0, 1], [1, 1, 0], [1, 1, 1]], budget_rhs_vec) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + buset.budget_membership_mat = invalid_row_mat + + def test_error_on_budget_mat_all_zero_columns(self): + """ + Test ValueError raised if budget membership mat + has a column with all zeros. + """ + invalid_col_mat = [[0, 0, 1], [0, 0, 1], [0, 0, 1]] + budget_rhs_vec = [1, 1, 2] + + exc_str = r".*all entries zero in columns at indexes: 0, 1.*" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BudgetSet(invalid_col_mat, budget_rhs_vec) + + # construct a valid budget set + buset = BudgetSet([[1, 0, 1], [1, 1, 0], [1, 1, 1]], budget_rhs_vec) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + buset.budget_membership_mat = invalid_col_mat + + @unittest.skipUnless( + SolverFactory("cbc").available(exception_flag=False), + "LP solver CBC not available", + ) + def test_budget_set_parameter_bounds_correct(self): + """ + If LP solver is available, test parameter bounds method + for factor model set is correct (check against + results from an LP solver). + """ + solver = SolverFactory("cbc") + + # construct budget set instances + buset1 = BudgetSet( + budget_membership_mat=[[1, 1], [0, 1]], rhs_vec=[2, 3], origin=None + ) + buset2 = BudgetSet( + budget_membership_mat=[[1, 0], [1, 1]], rhs_vec=[3, 2], origin=[1, 1] + ) + + # check parameter bounds matches LP results + # exactly for each case + for buset in [buset1, buset2]: + param_bounds = buset.parameter_bounds + lp_param_bounds = eval_parameter_bounds(buset, solver) + + self.assertTrue( + np.allclose(param_bounds, lp_param_bounds), + msg=( + "Parameter bounds not consistent with LP values for " + "BudgetSet with parameterization:\n" + f"budget_membership_mat={buset.budget_membership_mat},\n" + f"budget_rhs_vec={buset.budget_rhs_vec},\n" + f"origin={buset.origin}.\n" + f"({param_bounds} does not match {lp_param_bounds})" + ), + ) + def test_uncertainty_set_with_correct_params(self): ''' Case in which the UncertaintySet is constructed using the uncertain_param objects from the model to @@ -1004,11 +1874,15 @@ def test_uncertainty_set_with_correct_params(self): m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) # Single budget budget_membership_mat = [[1 for i in range(len(m.uncertain_param_vars))]] - rhs_vec = [0.1 * len(m.uncertain_param_vars) + sum(p.value for p in m.uncertain_param_vars.values())] - - _set = BudgetSet(budget_membership_mat=budget_membership_mat, - rhs_vec=rhs_vec) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + rhs_vec = [ + 0.1 * len(m.uncertain_param_vars) + + sum(p.value for p in m.uncertain_param_vars.values()) + ] + + _set = BudgetSet(budget_membership_mat=budget_membership_mat, rhs_vec=rhs_vec) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) uncertain_params_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1017,10 +1891,12 @@ def test_uncertainty_set_with_correct_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't uncertain_params_in_expr.append(v) - - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' @@ -1032,24 +1908,34 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Var(initialize=0) m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) # Single budget budget_membership_mat = [[1 for i in range(len(m.uncertain_param_vars))]] - rhs_vec = [0.1 * len(m.uncertain_param_vars) + sum(p.value for p in m.uncertain_param_vars.values())] - - _set = BudgetSet(budget_membership_mat=budget_membership_mat, - rhs_vec=rhs_vec) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + rhs_vec = [ + 0.1 * len(m.uncertain_param_vars) + + sum(p.value for p in m.uncertain_param_vars.values()) + ] + + _set = BudgetSet(budget_membership_mat=budget_membership_mat, rhs_vec=rhs_vec) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) vars_in_expr = [] for con in m.uncertainty_set_contr.values(): vars_in_expr.extend( - v for v in m.uncertain_param_vars.values() if - v in ComponentSet(identify_variables(expr=con.expr)) + v + for v in m.uncertain_param_vars.values() + if v in ComponentSet(identify_variables(expr=con.expr)) ) - self.assertEqual(len(vars_in_expr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") + self.assertEqual( + len(vars_in_expr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_budget_set_as_constraint(self): ''' @@ -1064,15 +1950,25 @@ def test_budget_set_as_constraint(self): # Single budget budget_membership_mat = [[1 for i in range(len(m.uncertain_params))]] - rhs_vec = [0.1 * len(m.uncertain_params) + sum(p.value for p in m.uncertain_params)] + rhs_vec = [ + 0.1 * len(m.uncertain_params) + sum(p.value for p in m.uncertain_params) + ] - budget_set = BudgetSet(budget_membership_mat=budget_membership_mat, - rhs_vec=rhs_vec) - m.uncertainty_set_constr = budget_set.set_as_constraint(uncertain_params=m.uncertain_params) + budget_set = BudgetSet( + budget_membership_mat=budget_membership_mat, rhs_vec=rhs_vec + ) + m.uncertainty_set_constr = budget_set.set_as_constraint( + uncertain_params=m.uncertain_params + ) - self.assertEqual(len(budget_membership_mat), len(m.uncertainty_set_constr.index_set()), - msg="Budget uncertainty set constraints must be as many as the" - "number of rows in the matrix A.") + self.assertEqual( + len(budget_set.coefficients_mat), + len(m.uncertainty_set_constr.index_set()), + msg=( + "Number of budget set constraints should be equal to the " + "number of rows in the 'coefficients_mat' attribute" + ), + ) def test_point_in_set(self): m = ConcreteModel() @@ -1082,32 +1978,57 @@ def test_point_in_set(self): m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) budget_membership_mat = [[1 for i in range(len(m.uncertain_params))]] - rhs_vec = [0.1 * len(m.uncertain_params) + sum(p.value for p in m.uncertain_params)] + rhs_vec = [ + 0.1 * len(m.uncertain_params) + sum(p.value for p in m.uncertain_params) + ] - budget_set = BudgetSet(budget_membership_mat=budget_membership_mat, - rhs_vec=rhs_vec) - self.assertTrue(budget_set.point_in_set([0, 0]), - msg="Point is not in the BudgetSet.") + budget_set = BudgetSet( + budget_membership_mat=budget_membership_mat, rhs_vec=rhs_vec + ) + self.assertTrue( + budget_set.point_in_set([0, 0]), msg="Point is not in the BudgetSet." + ) def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() - m.util.uncertain_param_vars = Var([0,1], initialize=0.5) + m.util.uncertain_param_vars = Var([0, 1], initialize=0.5) budget_membership_mat = [[1 for i in range(len(m.util.uncertain_param_vars))]] - rhs_vec = [0.1 * len(m.util.uncertain_param_vars) + sum(value(p) for p in m.util.uncertain_param_vars.values())] + rhs_vec = [ + 0.1 * len(m.util.uncertain_param_vars) + + sum(value(p) for p in m.util.uncertain_param_vars.values()) + ] - budget_set = BudgetSet(budget_membership_mat=budget_membership_mat, - rhs_vec=rhs_vec) + budget_set = BudgetSet( + budget_membership_mat=budget_membership_mat, rhs_vec=rhs_vec + ) config = Block() config.uncertainty_set = budget_set BudgetSet.add_bounds_on_uncertain_parameters(model=m, config=config) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, "Bounds not added correctly for BudgetSet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, "Bounds not added correctly for BudgetSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, "Bounds not added correctly for BudgetSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, "Bounds not added correctly for BudgetSet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for BudgetSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for BudgetSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for BudgetSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for BudgetSet", + ) + class testCardinalityUncertaintySetClass(unittest.TestCase): ''' @@ -1115,6 +2036,99 @@ class testCardinalityUncertaintySetClass(unittest.TestCase): Because Cardinality adds cassi vars to model, must pass model to set_as_constraint() ''' + def test_normal_cardinality_construction_and_update(self): + """ + Test CardinalitySet constructor and setter work normally + when bounds are appropriate. + """ + # valid inputs + cset = CardinalitySet(origin=[0, 0], positive_deviation=[1, 3], gamma=2) + + # check attributes are as expected + np.testing.assert_allclose(cset.origin, [0, 0]) + np.testing.assert_allclose(cset.positive_deviation, [1, 3]) + np.testing.assert_allclose(cset.gamma, 2) + self.assertEqual(cset.dim, 2) + + # update the set + cset.origin = [1, 2] + cset.positive_deviation = [3, 0] + cset.gamma = 0.5 + + # check updates work + np.testing.assert_allclose(cset.origin, [1, 2]) + np.testing.assert_allclose(cset.positive_deviation, [3, 0]) + np.testing.assert_allclose(cset.gamma, 0.5) + + def test_error_on_neg_positive_deviation(self): + """ + Cardinality set positive deviation attribute should + contain nonnegative numerical entries. + + Check ValueError raised if any negative entries provided. + """ + origin = [0, 0] + positive_deviation = [1, -2] # invalid + gamma = 2 + + exc_str = r"Entry -2 of attribute 'positive_deviation' is negative value" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + cset = CardinalitySet(origin, positive_deviation, gamma) + + # construct a valid cardinality set + cset = CardinalitySet(origin, [1, 1], gamma) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + cset.positive_deviation = positive_deviation + + def test_error_on_invalid_gamma(self): + """ + Cardinality set gamma attribute should be a float-like + between 0 and the set dimension. + + Check ValueError raised if gamma attribute is set + to an invalid value. + """ + origin = [0, 0] + positive_deviation = [1, 1] + gamma = 3 # should be invalid + + exc_str = ( + r".*attribute 'gamma' must be a real number " + r"between 0 and dimension 2 \(provided value 3\)" + ) + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + CardinalitySet(origin, positive_deviation, gamma) + + # construct a valid cardinality set + cset = CardinalitySet(origin, positive_deviation, gamma=2) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + cset.gamma = gamma + + def test_error_on_cardinality_set_dim_change(self): + """ + Dimension is considered immutable. + Test ValueError raised when attempting to alter the + set dimension (i.e. number of entries of `origin`). + """ + # construct a valid cardinality set + cset = CardinalitySet(origin=[0, 0], positive_deviation=[1, 1], gamma=2) + + exc_str = r"Attempting to set.*dimension 2 to value of dimension 3" + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + cset.origin = [0, 0, 0] + with self.assertRaisesRegex(ValueError, exc_str): + cset.positive_deviation = [1, 1, 1] + @unittest.skipIf(not numpy_available, 'Numpy is not available.') def test_uncertainty_set_with_correct_params(self): ''' @@ -1133,9 +2147,12 @@ def test_uncertainty_set_with_correct_params(self): positive_deviation = list(0.3 for j in range(len(center))) gamma = np.ceil(len(m.uncertain_param_vars) / 2) - _set = CardinalitySet(origin=center, - positive_deviation=positive_deviation, gamma=gamma) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars, model=m) + _set = CardinalitySet( + origin=center, positive_deviation=positive_deviation, gamma=gamma + ) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars, model=m + ) uncertain_params_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1144,11 +2161,12 @@ def test_uncertainty_set_with_correct_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't uncertain_params_in_expr.append(v) - - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") - + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) @unittest.skipIf(not numpy_available, 'Numpy is not available.') def test_uncertainty_set_with_incorrect_params(self): @@ -1162,15 +2180,20 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Var(initialize=0) m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) center = list(p.value for p in m.uncertain_param_vars.values()) positive_deviation = list(0.3 for j in range(len(center))) gamma = np.ceil(len(m.uncertain_param_vars) / 2) - _set = CardinalitySet(origin=center, - positive_deviation=positive_deviation, gamma=gamma) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars, model=m) + _set = CardinalitySet( + origin=center, positive_deviation=positive_deviation, gamma=gamma + ) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars, model=m + ) vars_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1179,10 +2202,12 @@ def test_uncertainty_set_with_incorrect_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't vars_in_expr.append(v) - self.assertEqual(len(vars_in_expr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") - + self.assertEqual( + len(vars_in_expr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_point_in_set(self): m = ConcreteModel() @@ -1195,37 +2220,282 @@ def test_point_in_set(self): positive_deviation = list(0.3 for j in range(len(center))) gamma = np.ceil(len(m.uncertain_param_vars) / 2) - _set = CardinalitySet(origin=center, - positive_deviation=positive_deviation, gamma=gamma) + _set = CardinalitySet( + origin=center, positive_deviation=positive_deviation, gamma=gamma + ) - self.assertTrue(_set.point_in_set([0, 0]), - msg="Point is not in the CardinalitySet.") + self.assertTrue( + _set.point_in_set([0, 0]), msg="Point is not in the CardinalitySet." + ) def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() - m.util.uncertain_param_vars = Var([0,1], initialize=0.5) + m.util.uncertain_param_vars = Var([0, 1], initialize=0.5) center = list(p.value for p in m.util.uncertain_param_vars.values()) positive_deviation = list(0.3 for j in range(len(center))) gamma = np.ceil(len(center) / 2) - cardinality_set = CardinalitySet(origin=center, - positive_deviation=positive_deviation, gamma=gamma) + cardinality_set = CardinalitySet( + origin=center, positive_deviation=positive_deviation, gamma=gamma + ) config = Block() config.uncertainty_set = cardinality_set CardinalitySet.add_bounds_on_uncertain_parameters(model=m, config=config) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, "Bounds not added correctly for CardinalitySet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, "Bounds not added correctly for CardinalitySet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, "Bounds not added correctly for CardinalitySet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, "Bounds not added correctly for CardinalitySet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for CardinalitySet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for CardinalitySet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for CardinalitySet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for CardinalitySet", + ) + + +def eval_parameter_bounds(uncertainty_set, solver): + """ + Evaluate parameter bounds of uncertainty set by solving + bounding problems (as opposed to via the `parameter_bounds` + method). + """ + bounding_mdl = uncertainty_set.bounding_model() + + param_bounds = [] + for idx, obj in bounding_mdl.param_var_objectives.items(): + # activate objective for corresponding dimension + obj.activate() + bounds = [] + + # solve for lower bound, then upper bound + # solve should be successful + for sense in (minimize, maximize): + obj.sense = sense + solver.solve(bounding_mdl) + bounds.append(value(obj)) + + # add parameter bounds for current dimension + param_bounds.append(tuple(bounds)) + + # ensure sense is minimize when done, deactivate + obj.sense = minimize + obj.deactivate() + + return param_bounds + class testBoxUncertaintySetClass(unittest.TestCase): - ''' - Box uncertainty sets. Required input is bounds list. - ''' + """ + Unit tests for the box uncertainty set (BoxSet). + """ + + def test_normal_construction_and_update(self): + """ + Test BoxSet constructor and setter work normally + when bounds are appropriate. + """ + bounds = [[1, 2], [3, 4]] + bset = BoxSet(bounds=bounds) + np.testing.assert_allclose( + bounds, bset.bounds, err_msg="BoxSet bounds not as expected" + ) + + # check bounds update + new_bounds = [[3, 4], [5, 6]] + bset.bounds = new_bounds + np.testing.assert_allclose( + new_bounds, bset.bounds, err_msg="BoxSet bounds not as expected" + ) + + def test_error_on_box_set_dim_change(self): + """ + BoxSet dimension is considered immutable. + Test ValueError raised when attempting to alter the + box set dimension (i.e. number of rows of `bounds`). + """ + bounds = [[1, 2], [3, 4]] + bset = BoxSet(bounds=bounds) # 2-dimensional set + + exc_str = r"Attempting to set.*dimension 2 to a value of dimension 3" + with self.assertRaisesRegex(ValueError, exc_str): + bset.bounds = [[1, 2], [3, 4], [5, 6]] + + def test_error_on_lb_exceeds_ub(self): + """ + Test exception raised when an LB exceeds a UB. + """ + bad_bounds = [[1, 2], [4, 3]] + + exc_str = r"Lower bound 4 exceeds upper bound 3" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BoxSet(bad_bounds) + + # construct a valid box set + bset = BoxSet([[1, 2], [3, 4]]) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + bset.bounds = bad_bounds + + def test_error_on_ragged_bounds_array(self): + """ + Test ValueError raised on attempting to set BoxSet bounds + to a ragged array. + + This test also validates `uncertainty_sets.is_ragged` for all + pre-defined array-like attributes of all set-types, as the + `is_ragged` method is used throughout. + """ + # example ragged arrays + ragged_arrays = ( + [[1, 2], 3], # list and int in same sequence + [[1, 2], [3, [4, 5]]], # 2nd row ragged (list and int) + [[1, 2], [3]], # variable row lengths + ) + + # construct valid box set + bset = BoxSet(bounds=[[1, 2], [3, 4]]) + + # exception message should match this regex + exc_str = r"Argument `bounds` should not be a ragged array-like.*" + for ragged_arr in ragged_arrays: + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BoxSet(bounds=ragged_arr) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + bset.bounds = ragged_arr + + def test_error_on_invalid_bounds_shape(self): + """ + Test ValueError raised when attempting to set + Box set bounds to array of incorrect shape + (should be a 2-D array with 2 columns). + """ + # 3d array + three_d_arr = [[[1, 2], [3, 4], [5, 6]]] + exc_str = ( + r"Argument `bounds` must be a 2-dimensional.*" + r"\(detected 3 dimensions.*\)" + ) + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BoxSet(three_d_arr) + + # construct valid box set + bset = BoxSet([[1, 2], [3, 4], [5, 6]]) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + bset.bounds = three_d_arr + + def test_error_on_wrong_number_columns(self): + """ + BoxSet bounds should be a 2D array-like with 2 columns. + ValueError raised if number columns wrong + """ + three_col_arr = [[1, 2, 3], [4, 5, 6]] + exc_str = ( + r"Attribute 'bounds' should be of shape \(\.{3},2\), " + r"but detected shape \(\.{3},3\)" + ) + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BoxSet(three_col_arr) + + # construct a valid box set + bset = BoxSet([[1, 2], [3, 4]]) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + bset.bounds = three_col_arr + + def test_error_on_empty_last_dimension(self): + """ + Check ValueError raised when last dimension of BoxSet bounds is + empty. + """ + empty_2d_arr = [[], [], []] + exc_str = ( + r"Last dimension of argument `bounds` must be non-empty " + r"\(detected shape \(3, 0\)\)" + ) + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BoxSet(bounds=empty_2d_arr) + + # create a valid box set + bset = BoxSet([[1, 2], [3, 4]]) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + bset.bounds = empty_2d_arr + + def test_error_on_non_numeric_bounds(self): + """ + Test that ValueError is raised if box set bounds + are set to array-like with entries of a non-numeric + type (such as int, float). + """ + # invalid bounds (contains an entry type str) + new_bounds = [[1, "test"], [3, 2]] + + exc_str = ( + r"Entry 'test' of the argument `bounds` " + r"is not a valid numeric type \(provided type 'str'\)" + ) + + # assert error on construction + with self.assertRaisesRegex(TypeError, exc_str): + BoxSet(new_bounds) + + # construct a valid box set + bset = BoxSet(bounds=[[1, 2], [3, 4]]) + + # assert error on update + with self.assertRaisesRegex(TypeError, exc_str): + bset.bounds = new_bounds + + def test_error_on_bounds_with_nan_or_inf(self): + """ + Box set bounds set to array-like with inf or nan. + """ + # construct a valid box set + bset = BoxSet(bounds=[[1, 2], [3, 4]]) + + for val_str in ["inf", "nan"]: + bad_bounds = [[1, float(val_str)], [2, 3]] + exc_str = ( + fr"Entry '{val_str}' of the argument `bounds` " + fr"is not a finite numeric value" + ) + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + BoxSet(bad_bounds) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + bset.bounds = bad_bounds def test_uncertainty_set_with_correct_params(self): ''' @@ -1238,9 +2508,11 @@ def test_uncertainty_set_with_correct_params(self): m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) - bounds = [(-1,1), (-1,1)] + bounds = [(-1, 1), (-1, 1)] _set = BoxSet(bounds=bounds) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) uncertain_params_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1249,9 +2521,12 @@ def test_uncertainty_set_with_correct_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't uncertain_params_in_expr.append(v) - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' @@ -1263,10 +2538,14 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Var(initialize=0) m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) bounds = [(-1, 1), (-1, 1)] _set = BoxSet(bounds=bounds) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) vars_in_expr = [] vars_in_expr = [] for con in m.uncertainty_set_contr.values(): @@ -1276,9 +2555,12 @@ def test_uncertainty_set_with_incorrect_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't vars_in_expr.append(v) - self.assertEqual(len(vars_in_expr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") + self.assertEqual( + len(vars_in_expr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_point_in_set(self): m = ConcreteModel() @@ -1289,13 +2571,12 @@ def test_point_in_set(self): bounds = [(-1, 1), (-1, 1)] _set = BoxSet(bounds=bounds) - self.assertTrue(_set.point_in_set([0, 0]), - msg="Point is not in the BoxSet.") + self.assertTrue(_set.point_in_set([0, 0]), msg="Point is not in the BoxSet.") def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() - m.util.uncertain_param_vars = Var([0,1], initialize=0) + m.util.uncertain_param_vars = Var([0, 1], initialize=0) bounds = [(-1, 1), (-1, 1)] box_set = BoxSet(bounds=bounds) @@ -1304,30 +2585,86 @@ def test_add_bounds_on_uncertain_parameters(self): BoxSet.add_bounds_on_uncertain_parameters(model=m, config=config) - self.assertEqual(m.util.uncertain_param_vars[0].lb, -1, "Bounds not added correctly for BoxSet") - self.assertEqual(m.util.uncertain_param_vars[0].ub, 1, "Bounds not added correctly for BoxSet") - self.assertEqual(m.util.uncertain_param_vars[1].lb, -1, "Bounds not added correctly for BoxSet") - self.assertEqual(m.util.uncertain_param_vars[1].ub, 1, "Bounds not added correctly for BoxSet") + self.assertEqual( + m.util.uncertain_param_vars[0].lb, + -1, + "Bounds not added correctly for BoxSet", + ) + self.assertEqual( + m.util.uncertain_param_vars[0].ub, + 1, + "Bounds not added correctly for BoxSet", + ) + self.assertEqual( + m.util.uncertain_param_vars[1].lb, + -1, + "Bounds not added correctly for BoxSet", + ) + self.assertEqual( + m.util.uncertain_param_vars[1].ub, + 1, + "Bounds not added correctly for BoxSet", + ) + class testDiscreteUncertaintySetClass(unittest.TestCase): ''' Discrete uncertainty sets. Required inputis a scenarios list. ''' - def test_uncertainty_set_with_correct_params(self): - ''' - Case in which the UncertaintySet is constructed using the uncertain_param objects from the model to - which the uncertainty set constraint is being added. - ''' - m = ConcreteModel() - # At this stage, the separation problem has uncertain_params which are now Var objects - m.p1 = Var(initialize=0) - m.p2 = Var(initialize=0) - m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) - scenarios = [(0,0), (1,0), (0,1), (1,1), (2,0)] + def test_normal_discrete_set_construction_and_update(self): + """ + Test DiscreteScenarioSet constructor and setter work normally + when scenarios are appropriate. + """ + scenarios = [[0, 0, 0], [1, 2, 3]] + + # normal construction should work + dset = DiscreteScenarioSet(scenarios) + + # check scenarios added appropriately + np.testing.assert_allclose( + scenarios, dset.scenarios, err_msg="BoxSet bounds not as expected" + ) + + # check scenarios updated appropriately + new_scenarios = [[0, 1, 2], [1, 2, 0], [3, 5, 4]] + dset.scenarios = new_scenarios + np.testing.assert_allclose( + new_scenarios, dset.scenarios, err_msg="BoxSet bounds not as expected" + ) + + def test_error_on_discrete_set_dim_change(self): + """ + Test ValueError raised when attempting to update + DiscreteScenarioSet dimension. + """ + scenarios = [[1, 2], [3, 4]] + dset = DiscreteScenarioSet(scenarios) # 2-dimensional set + + exc_str = ( + r".*must have 2 columns.* to match set dimension " + r"\(provided.*with 3 columns\)" + ) + with self.assertRaisesRegex(ValueError, exc_str): + dset.scenarios = [[1, 2, 3], [4, 5, 6]] + + def test_uncertainty_set_with_correct_params(self): + ''' + Case in which the UncertaintySet is constructed using the uncertain_param objects from the model to + which the uncertainty set constraint is being added. + ''' + m = ConcreteModel() + # At this stage, the separation problem has uncertain_params which are now Var objects + m.p1 = Var(initialize=0) + m.p2 = Var(initialize=0) + m.uncertain_params = [m.p1, m.p2] + m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) + scenarios = [(0, 0), (1, 0), (0, 1), (1, 1), (2, 0)] _set = DiscreteScenarioSet(scenarios=scenarios) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) uncertain_params_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1336,10 +2673,12 @@ def test_uncertainty_set_with_correct_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't uncertain_params_in_expr.append(v) - - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' @@ -1351,10 +2690,14 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Var(initialize=0) m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) scenarios = [(0, 0), (1, 0), (0, 1), (1, 1), (2, 0)] _set = DiscreteScenarioSet(scenarios=scenarios) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars + ) vars_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1363,9 +2706,12 @@ def test_uncertainty_set_with_incorrect_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't vars_in_expr.append(v) - self.assertEqual(len(vars_in_expr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") + self.assertEqual( + len(vars_in_expr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_point_in_set(self): m = ConcreteModel() @@ -1376,13 +2722,14 @@ def test_point_in_set(self): scenarios = [(0, 0), (1, 0), (0, 1), (1, 1), (2, 0)] _set = DiscreteScenarioSet(scenarios=scenarios) - self.assertTrue(_set.point_in_set([0, 0]), - msg="Point is not in the DiscreteScenarioSet.") + self.assertTrue( + _set.point_in_set([0, 0]), msg="Point is not in the DiscreteScenarioSet." + ) def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() - m.util.uncertain_param_vars = Var([0,1], initialize=0) + m.util.uncertain_param_vars = Var([0, 1], initialize=0) scenarios = [(0, 0), (1, 0), (0, 1), (1, 1), (2, 0)] _set = DiscreteScenarioSet(scenarios=scenarios) @@ -1391,13 +2738,30 @@ def test_add_bounds_on_uncertain_parameters(self): DiscreteScenarioSet.add_bounds_on_uncertain_parameters(model=m, config=config) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, "Bounds not added correctly for DiscreteScenarioSet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, "Bounds not added correctly for DiscreteScenarioSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, "Bounds not added correctly for DiscreteScenarioSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, "Bounds not added correctly for DiscreteScenarioSet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for DiscreteScenarioSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for DiscreteScenarioSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for DiscreteScenarioSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for DiscreteScenarioSet", + ) - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_two_stg_model_discrete_set_single_scenario(self): """ Test two-stage model under discrete uncertainty with @@ -1415,15 +2779,13 @@ def test_two_stg_model_discrete_set_single_scenario(self): m.x3 = Var(initialize=0, bounds=(None, None)) # model constraints - m.con1 = Constraint(expr=m.x1 * m.u1**(0.5) - m.x2 * m.u1 <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u1 == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u1 ** (0.5) - m.x2 * m.u1 <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u1 == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - m.u2) ** 2) # uncertainty set - discrete_set = DiscreteScenarioSet( - scenarios=[(1.125, 1)], - ) + discrete_set = DiscreteScenarioSet(scenarios=[(1.125, 1)]) # Instantiate PyROS solver pyros_solver = SolverFactory("pyros") @@ -1444,14 +2806,14 @@ def test_two_stg_model_discrete_set_single_scenario(self): options={ "objective_focus": ObjectiveType.worst_case, "solve_master_globally": True, - } + }, ) # check successful termination self.assertEqual( results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, - msg="Did not identify robust optimal solution to problem instance." + msg="Did not identify robust optimal solution to problem instance.", ) # only one iteration required @@ -1461,7 +2823,49 @@ def test_two_stg_model_discrete_set_single_scenario(self): msg=( "PyROS was unable to solve a singleton discrete set instance " " successfully within a single iteration." - ) + ), + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + def test_two_stg_model_discrete_set(self): + """ + Test PyROS successfully solves two-stage model with + multiple scenarios. + """ + m = ConcreteModel() + m.x1 = Var(bounds=(0, 10)) + m.x2 = Var(bounds=(0, 10)) + m.u = Param(mutable=True, initialize=1.125) + m.con = Constraint(expr=sqrt(m.u) * m.x1 - m.u * m.x2 <= 2) + m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - m.u) ** 2) + + discrete_set = DiscreteScenarioSet(scenarios=[[0.25], [1.125], [2]]) + + global_solver = SolverFactory("baron") + pyros_solver = SolverFactory("pyros") + + res = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u], + uncertainty_set=discrete_set, + local_solver=global_solver, + global_solver=global_solver, + decision_rule_order=0, + solve_master_globally=True, + objective_focus=ObjectiveType.worst_case, + ) + + self.assertEqual( + res.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg=( + "Failed to solve discrete set multiple scenarios instance to " + "robust optimality" + ), ) @@ -1470,6 +2874,159 @@ class testFactorModelUncertaintySetClass(unittest.TestCase): FactorModelSet uncertainty sets. Required inputs are psi_matrix, number_of_factors, origin and beta. ''' + def test_normal_factor_model_construction_and_update(self): + """ + Test FactorModelSet constructor and setter work normally + when attribute values are appropriate. + """ + # valid inputs + fset = FactorModelSet( + origin=[0, 0, 1], + number_of_factors=2, + psi_mat=[[1, 2], [0, 1], [1, 0]], + beta=0.1, + ) + + # check attributes are as expected + np.testing.assert_allclose(fset.origin, [0, 0, 1]) + np.testing.assert_allclose(fset.psi_mat, [[1, 2], [0, 1], [1, 0]]) + np.testing.assert_allclose(fset.number_of_factors, 2) + np.testing.assert_allclose(fset.beta, 0.1) + self.assertEqual(fset.dim, 3) + + # update the set + fset.origin = [1, 1, 0] + fset.psi_mat = [[1, 0], [0, 1], [1, 1]] + fset.beta = 0.5 + + # check updates work + np.testing.assert_allclose(fset.origin, [1, 1, 0]) + np.testing.assert_allclose(fset.psi_mat, [[1, 0], [0, 1], [1, 1]]) + np.testing.assert_allclose(fset.beta, 0.5) + + def test_error_on_factor_model_set_dim_change(self): + """ + Test ValueError raised when attempting to change FactorModelSet + dimension (by changing number of entries in origin + or number of rows of psi_mat). + """ + origin = [0, 0, 0] + number_of_factors = 2 + psi_mat = [[1, 0], [0, 1], [1, 1]] + beta = 0.5 + + # construct factor model set + fset = FactorModelSet(origin, number_of_factors, psi_mat, beta) + + # assert error on psi mat update + exc_str = ( + r"should be of shape \(3, 2\) to match.*dimensions " + r"\(provided shape \(2, 2\)\)" + ) + with self.assertRaisesRegex(ValueError, exc_str): + fset.psi_mat = [[1, 0], [1, 2]] + + # assert error on origin update + exc_str = r"Attempting.*factor model set of dimension 3 to value of dimension 2" + with self.assertRaisesRegex(ValueError, exc_str): + fset.origin = [1, 3] + + def test_error_on_invalid_number_of_factors(self): + """ + Test ValueError raised if number of factors + is negative int, or AttributeError + if attempting to update (should be immutable). + """ + exc_str = r".*'number_of_factors' must be a positive int \(provided value -1\)" + with self.assertRaisesRegex(ValueError, exc_str): + FactorModelSet(origin=[0], number_of_factors=-1, psi_mat=[[1, 1]], beta=0.1) + + fset = FactorModelSet( + origin=[0], number_of_factors=2, psi_mat=[[1, 1]], beta=0.1 + ) + + exc_str = r".*'number_of_factors' is immutable" + with self.assertRaisesRegex(AttributeError, exc_str): + fset.number_of_factors = 3 + + def test_error_on_invalid_beta(self): + """ + Test ValueError raised if beta is invalid (exceeds 1 or + is negative) + """ + origin = [0, 0, 0] + number_of_factors = 2 + psi_mat = [[1, 0], [0, 1], [1, 1]] + neg_beta = -0.5 + big_beta = 1.5 + + # assert error on construction + neg_exc_str = ( + r".*must be a real number between 0 and 1.*\(provided value -0.5\)" + ) + big_exc_str = r".*must be a real number between 0 and 1.*\(provided value 1.5\)" + with self.assertRaisesRegex(ValueError, neg_exc_str): + FactorModelSet(origin, number_of_factors, psi_mat, neg_beta) + with self.assertRaisesRegex(ValueError, big_exc_str): + FactorModelSet(origin, number_of_factors, psi_mat, big_beta) + + # create a valid factor model set + fset = FactorModelSet(origin, number_of_factors, psi_mat, 1) + + # assert error on update + with self.assertRaisesRegex(ValueError, neg_exc_str): + fset.beta = neg_beta + with self.assertRaisesRegex(ValueError, big_exc_str): + fset.beta = big_beta + + @unittest.skipUnless( + SolverFactory("cbc").available(exception_flag=False), + "LP solver CBC not available", + ) + def test_factor_model_parameter_bounds_correct(self): + """ + If LP solver is available, test parameter bounds method + for factor model set is correct (check against + results from an LP solver). + """ + solver = SolverFactory("cbc") + + # four cases where prior parameter bounds + # approximations were probably too tight + fset1 = FactorModelSet( + origin=[0, 0], + number_of_factors=3, + psi_mat=[[1, -1, 1], [1, 0.1, 1]], + beta=1 / 6, + ) + fset2 = FactorModelSet( + origin=[0], number_of_factors=3, psi_mat=[[1, 6, 8]], beta=1 / 2 + ) + fset3 = FactorModelSet( + origin=[1], number_of_factors=2, psi_mat=[[1, 2]], beta=1 / 4 + ) + fset4 = FactorModelSet( + origin=[1], number_of_factors=3, psi_mat=[[-1, -6, -8]], beta=1 / 2 + ) + + # check parameter bounds matches LP results + # exactly for each case + for fset in [fset1, fset2, fset3, fset4]: + param_bounds = fset.parameter_bounds + lp_param_bounds = eval_parameter_bounds(fset, solver) + + self.assertTrue( + np.allclose(param_bounds, lp_param_bounds), + msg=( + "Parameter bounds not consistent with LP values for " + "FactorModelSet with parameterization:\n" + f"F={fset.number_of_factors},\n" + f"beta={fset.beta},\n" + f"psi_mat={fset.psi_mat},\n" + f"origin={fset.origin}." + ), + ) + @unittest.skipIf(not numpy_available, 'Numpy is not available.') def test_uncertainty_set_with_correct_params(self): ''' @@ -1483,14 +3040,18 @@ def test_uncertainty_set_with_correct_params(self): m.uncertain_params = [m.p1, m.p2] m.util = Block() m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) - F=1 + F = 1 psi_mat = np.zeros(shape=(len(m.uncertain_params), F)) for i in range(len(psi_mat)): random_row_entries = list(np.random.uniform(low=0, high=0.2, size=F)) for j in range(len(psi_mat[i])): psi_mat[i][j] = random_row_entries[j] - _set = FactorModelSet(origin=[0,0], psi_mat=psi_mat, number_of_factors=F, beta=1) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars, model=m) + _set = FactorModelSet( + origin=[0, 0], psi_mat=psi_mat, number_of_factors=F, beta=1 + ) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars, model=m + ) uncertain_params_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1499,10 +3060,12 @@ def test_uncertainty_set_with_correct_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't uncertain_params_in_expr.append(v) - - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) @unittest.skipIf(not numpy_available, 'Numpy is not available.') def test_uncertainty_set_with_incorrect_params(self): @@ -1516,15 +3079,21 @@ def test_uncertainty_set_with_incorrect_params(self): m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] m.util = Block() - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) F = 1 psi_mat = np.zeros(shape=(len(m.uncertain_params), F)) for i in range(len(psi_mat)): random_row_entries = list(np.random.uniform(low=0, high=0.2, size=F)) for j in range(len(psi_mat[i])): psi_mat[i][j] = random_row_entries[j] - _set = FactorModelSet(origin=[0, 0], psi_mat=psi_mat, number_of_factors=F, beta=1) - m.uncertainty_set_contr = _set.set_as_constraint(uncertain_params=m.uncertain_param_vars, model=m) + _set = FactorModelSet( + origin=[0, 0], psi_mat=psi_mat, number_of_factors=F, beta=1 + ) + m.uncertainty_set_contr = _set.set_as_constraint( + uncertain_params=m.uncertain_param_vars, model=m + ) vars_in_expr = [] vars_in_expr = [] for con in m.uncertainty_set_contr.values(): @@ -1534,9 +3103,12 @@ def test_uncertainty_set_with_incorrect_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't vars_in_expr.append(v) - self.assertEqual(len(vars_in_expr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") + self.assertEqual( + len(vars_in_expr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_point_in_set(self): m = ConcreteModel() @@ -1551,14 +3123,17 @@ def test_point_in_set(self): random_row_entries = list(np.random.uniform(low=0, high=0.2, size=F)) for j in range(len(psi_mat[i])): psi_mat[i][j] = random_row_entries[j] - _set = FactorModelSet(origin=[0, 0], psi_mat=psi_mat, number_of_factors=F, beta=1) - self.assertTrue(_set.point_in_set([0, 0]), - msg="Point is not in the FactorModelSet.") + _set = FactorModelSet( + origin=[0, 0], psi_mat=psi_mat, number_of_factors=F, beta=1 + ) + self.assertTrue( + _set.point_in_set([0, 0]), msg="Point is not in the FactorModelSet." + ) def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() - m.util.uncertain_param_vars = Var([0,1], initialize=0) + m.util.uncertain_param_vars = Var([0, 1], initialize=0) F = 1 psi_mat = np.zeros(shape=(len(list(m.util.uncertain_param_vars.values())), F)) @@ -1566,24 +3141,218 @@ def test_add_bounds_on_uncertain_parameters(self): random_row_entries = list(np.random.uniform(low=0, high=0.2, size=F)) for j in range(len(psi_mat[i])): psi_mat[i][j] = random_row_entries[j] - _set = FactorModelSet(origin=[0, 0], psi_mat=psi_mat, number_of_factors=F, beta=1) + _set = FactorModelSet( + origin=[0, 0], psi_mat=psi_mat, number_of_factors=F, beta=1 + ) config = Block() config.uncertainty_set = _set FactorModelSet.add_bounds_on_uncertain_parameters(model=m, config=config) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, "Bounds not added correctly for FactorModelSet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, "Bounds not added correctly for FactorModelSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, "Bounds not added correctly for FactorModelSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, "Bounds not added correctly for FactorModelSet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for FactorModelSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for FactorModelSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for FactorModelSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for FactorModelSet", + ) + class testIntersectionSetClass(unittest.TestCase): - ''' - Intersection uncertainty sets. Required input is set objects to intersect, and set_as_constraint requires - a NLP solver to confirm the intersection is not empty. - ''' + """ + Unit tests for the IntersectionSet class. + Required input is set objects to intersect, + and set_as_constraint requires + an NLP solver to confirm the intersection is not empty. + """ - @unittest.skipUnless(SolverFactory('ipopt').available(exception_flag=False), "Local NLP solver is not available.") + def test_normal_construction_and_update(self): + """ + Test IntersectionSet constructor and setter + work normally when arguments are appropriate. + """ + bset = BoxSet(bounds=[[-1, 1], [-1, 1], [-1, 1]]) + aset = AxisAlignedEllipsoidalSet([0, 0, 0], [1, 1, 1]) + + iset = IntersectionSet(box_set=bset, axis_aligned_set=aset) + self.assertIn( + bset, + iset.all_sets, + msg=( + "IntersectionSet 'all_sets' attribute does not" + "contain expected BoxSet" + ), + ) + self.assertIn( + aset, + iset.all_sets, + msg=( + "IntersectionSet 'all_sets' attribute does not" + "contain expected AxisAlignedEllipsoidalSet" + ), + ) + + def test_error_on_intersecting_wrong_dims(self): + """ + Test ValueError raised if IntersectionSet sets + are not of same dimension. + """ + bset = BoxSet(bounds=[[-1, 1], [-1, 1]]) + aset = AxisAlignedEllipsoidalSet([0, 0], [2, 2]) + wrong_aset = AxisAlignedEllipsoidalSet([0, 0, 0], [1, 1, 1]) + + exc_str = r".*of dimension 2, but attempting to add set of dimension 3" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + IntersectionSet(box_set=bset, axis_set=aset, wrong_set=wrong_aset) + + # construct a valid intersection set + iset = IntersectionSet(box_set=bset, axis_set=aset) + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + iset.all_sets.append(wrong_aset) + + def test_type_error_on_invalid_arg(self): + """ + Test TypeError raised if an argument not of type + UncertaintySet is passed to the IntersectionSet + constructor or appended to 'all_sets'. + """ + bset = BoxSet(bounds=[[-1, 1], [-1, 1]]) + aset = AxisAlignedEllipsoidalSet([0, 0], [2, 2]) + + exc_str = ( + r"Entry '1' of the argument `all_sets` is not An `UncertaintySet` " + r"object.*\(provided type 'int'\)" + ) + + # assert error on construction + with self.assertRaisesRegex(TypeError, exc_str): + IntersectionSet(box_set=bset, axis_set=aset, invalid_arg=1) + + # construct a valid intersection set + iset = IntersectionSet(box_set=bset, axis_set=aset) + + # assert error on update + with self.assertRaisesRegex(TypeError, exc_str): + iset.all_sets.append(1) + + def test_error_on_intersection_dim_change(self): + """ + IntersectionSet dimension is considered immutable. + Test ValueError raised when attempting to set the + constituent sets to a different dimension. + """ + bset = BoxSet(bounds=[[-1, 1], [-1, 1]]) + aset = AxisAlignedEllipsoidalSet([0, 0], [2, 2]) + + # construct the set + iset = IntersectionSet(box_set=bset, axis_set=aset) + + exc_str = r"Attempting to set.*dimension 2 to a sequence.* of dimension 1" + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + # attempt to set to 1-dimensional sets + iset.all_sets = [BoxSet([[1, 1]]), AxisAlignedEllipsoidalSet([0], [1])] + + def test_error_on_too_few_sets(self): + """ + Check ValueError raised if too few sets are passed + to the intersection set. + """ + exc_str = r"Attempting.*minimum required length 2.*iterable of length 1" + + # assert error on construction + with self.assertRaisesRegex(ValueError, exc_str): + IntersectionSet(bset=BoxSet([[1, 2]])) + + # construct a valid intersection set + iset = IntersectionSet( + box_set=BoxSet([[1, 2]]), axis_set=AxisAlignedEllipsoidalSet([0], [1]) + ) + + # assert error on update + with self.assertRaisesRegex(ValueError, exc_str): + # attempt to set to 1-dimensional sets + iset.all_sets = [BoxSet([[1, 1]])] + + def test_intersection_uncertainty_set_list_behavior(self): + """ + Test the 'all_sets' attribute of the IntersectionSet + class behaves like a regular Python list. + """ + iset = IntersectionSet( + bset=BoxSet([[0, 2]]), aset=AxisAlignedEllipsoidalSet([0], [1]) + ) + + # an UncertaintySetList of length 2. + # should behave like a list of length 2 + all_sets = iset.all_sets + + # test append + all_sets.append(BoxSet([[1, 2]])) + del all_sets[2:] + + # test extend + all_sets.extend([BoxSet([[1, 2]]), EllipsoidalSet([0], [[1]], 2)]) + del all_sets[2:] + + # index in range. Allow slicing as well + # none of these should result in exception + all_sets[0] + all_sets[1] + all_sets[100:] + all_sets[0:2:20] + all_sets[0:2:1] + all_sets[-20:-1:2] + + # index out of range + self.assertRaises(IndexError, lambda: all_sets[2]) + self.assertRaises(IndexError, lambda: all_sets[-3]) + + # assert min length ValueError if attempting to clear + # list to length less than 2 + with self.assertRaisesRegex(ValueError, r"Length.* must be at least 2"): + all_sets[:] = all_sets[0] + with self.assertRaisesRegex(ValueError, r"Length.* must be at least 2"): + del all_sets[1] + with self.assertRaisesRegex(ValueError, r"Length.* must be at least 2"): + del all_sets[1:] + with self.assertRaisesRegex(ValueError, r"Length.* must be at least 2"): + del all_sets[:] + with self.assertRaisesRegex(ValueError, r"Length.* must be at least 2"): + all_sets.clear() + with self.assertRaisesRegex(ValueError, r"Length.* must be at least 2"): + all_sets[0:] = [] + + # assignment out of range + with self.assertRaisesRegex(IndexError, r"assignment index out of range"): + all_sets[-3] = BoxSet([[1, 1.5]]) + with self.assertRaisesRegex(IndexError, r"assignment index out of range"): + all_sets[2] = BoxSet([[1, 1.5]]) + + # assigning to slices should work fine + all_sets[3:] = [BoxSet([[1, 1.5]]), BoxSet([[1, 3]])] + + @unittest.skipUnless( + SolverFactory('ipopt').available(exception_flag=False), + "Local NLP solver is not available.", + ) def test_uncertainty_set_with_correct_params(self): ''' Case in which the UncertaintySet is constructed using the uncertain_param objects from the model to @@ -1595,7 +3364,7 @@ def test_uncertainty_set_with_correct_params(self): m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] m.uncertain_param_vars = Var(range(len(m.uncertain_params)), initialize=0) - bounds = [(-1,1), (-1,1)] + bounds = [(-1, 1), (-1, 1)] Q1 = BoxSet(bounds=bounds) Q2 = AxisAlignedEllipsoidalSet(center=[0, 0], half_lengths=[2, 1]) Q = IntersectionSet(Q1=Q1, Q2=Q2) @@ -1604,7 +3373,9 @@ def test_uncertainty_set_with_correct_params(self): solver = SolverFactory("ipopt") config.declare("global_solver", ConfigValue(default=solver)) - m.uncertainty_set_contr = Q.set_as_constraint(uncertain_params=m.uncertain_param_vars, config=config) + m.uncertainty_set_contr = Q.set_as_constraint( + uncertain_params=m.uncertain_param_vars, config=config + ) uncertain_params_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1613,11 +3384,17 @@ def test_uncertainty_set_with_correct_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't uncertain_params_in_expr.append(v) - self.assertEqual([id(u) for u in uncertain_params_in_expr], [id(u) for u in m.uncertain_param_vars.values()], - msg="Uncertain param Var objects used to construct uncertainty set constraint must" - " be the same uncertain param Var objects in the original model.") + self.assertEqual( + [id(u) for u in uncertain_params_in_expr], + [id(u) for u in m.uncertain_param_vars.values()], + msg="Uncertain param Var objects used to construct uncertainty set constraint must" + " be the same uncertain param Var objects in the original model.", + ) - @unittest.skipUnless(SolverFactory('ipopt').available(exception_flag=False), "Local NLP solver is not available.") + @unittest.skipUnless( + SolverFactory('ipopt').available(exception_flag=False), + "Local NLP solver is not available.", + ) def test_uncertainty_set_with_incorrect_params(self): ''' Case in which the set is constructed using uncertain_param objects which are Params instead of @@ -1628,7 +3405,9 @@ def test_uncertainty_set_with_incorrect_params(self): m.p1 = Var(initialize=0) m.p2 = Var(initialize=0) m.uncertain_params = [m.p1, m.p2] - m.uncertain_param_vars = Param(range(len(m.uncertain_params)), initialize=0, mutable=True) + m.uncertain_param_vars = Param( + range(len(m.uncertain_params)), initialize=0, mutable=True + ) bounds = [(-1, 1), (-1, 1)] Q1 = BoxSet(bounds=bounds) @@ -1639,7 +3418,9 @@ def test_uncertainty_set_with_incorrect_params(self): config = ConfigBlock() config.declare("global_solver", ConfigValue(default=solver)) - m.uncertainty_set_contr = Q.set_as_constraint(uncertain_params=m.uncertain_param_vars, config=config) + m.uncertainty_set_contr = Q.set_as_constraint( + uncertain_params=m.uncertain_param_vars, config=config + ) vars_in_expr = [] for con in m.uncertainty_set_contr.values(): for v in m.uncertain_param_vars.values(): @@ -1648,9 +3429,12 @@ def test_uncertainty_set_with_incorrect_params(self): # Not using ID here leads to it thinking both are in the list already when they aren't vars_in_expr.append(v) - self.assertEqual(len(vars_in_expr), 0, - msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" - " variable expression.") + self.assertEqual( + len(vars_in_expr), + 0, + msg="Uncertainty set constraint contains no Var objects, consists of a not potentially" + " variable expression.", + ) def test_point_in_set(self): m = ConcreteModel() @@ -1663,10 +3447,11 @@ def test_point_in_set(self): Q1 = BoxSet(bounds=bounds) Q2 = BoxSet(bounds=[(-2, 1), (-1, 2)]) Q = IntersectionSet(Q1=Q1, Q2=Q2) - self.assertTrue(Q.point_in_set([0, 0]), - msg="Point is not in the IntersectionSet.") + self.assertTrue( + Q.point_in_set([0, 0]), msg="Point is not in the IntersectionSet." + ) - @unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), "Global NLP solver is not available.") + @unittest.skipUnless(baron_available, "Global NLP solver is not available.") def test_add_bounds_on_uncertain_parameters(self): m = ConcreteModel() m.util = Block() @@ -1682,97 +3467,143 @@ def test_add_bounds_on_uncertain_parameters(self): IntersectionSet.add_bounds_on_uncertain_parameters(m, config) - self.assertNotEqual(m.util.uncertain_param_vars[0].lb, None, "Bounds not added correctly for IntersectionSet") - self.assertNotEqual(m.util.uncertain_param_vars[0].ub, None, "Bounds not added correctly for IntersectionSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].lb, None, "Bounds not added correctly for IntersectionSet") - self.assertNotEqual(m.util.uncertain_param_vars[1].ub, None, "Bounds not added correctly for IntersectionSet") + self.assertNotEqual( + m.util.uncertain_param_vars[0].lb, + None, + "Bounds not added correctly for IntersectionSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[0].ub, + None, + "Bounds not added correctly for IntersectionSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].lb, + None, + "Bounds not added correctly for IntersectionSet", + ) + self.assertNotEqual( + m.util.uncertain_param_vars[1].ub, + None, + "Bounds not added correctly for IntersectionSet", + ) + # === master_problem_methods.py class testInitialConstructMaster(unittest.TestCase): - ''' - - ''' - def test_initial_construct_master(self): model_data = MasterProblemData() model_data.timing = None model_data.working_model = ConcreteModel() master_data = initial_construct_master(model_data) - self.assertTrue(hasattr(master_data, "master_model"), - msg="Initial construction of master problem " - "did not create a master problem ConcreteModel object.") - -class testAddScenarioToMaster(unittest.TestCase): - ''' + self.assertTrue( + hasattr(master_data, "master_model"), + msg="Initial construction of master problem " + "did not create a master problem ConcreteModel object.", + ) - ''' +class testAddScenarioToMaster(unittest.TestCase): def test_add_scenario_to_master(self): working_model = ConcreteModel() - working_model.p = Param([1,2],initialize=0,mutable=True) + working_model.p = Param([1, 2], initialize=0, mutable=True) working_model.x = Var() model_data = MasterProblemData() model_data.working_model = working_model model_data.timing = None master_data = initial_construct_master(model_data) - master_data.master_model.scenarios[0,0].transfer_attributes_from(working_model.clone()) + master_data.master_model.scenarios[0, 0].transfer_attributes_from( + working_model.clone() + ) master_data.master_model.scenarios[0, 0].util = Block() - master_data.master_model.scenarios[0, 0].util.first_stage_variables = \ - [master_data.master_model.scenarios[0,0].x] - master_data.master_model.scenarios[0,0].util.uncertain_params = [master_data.master_model.scenarios[0,0].p[1], - master_data.master_model.scenarios[0,0].p[2]] - add_scenario_to_master(master_data, violations=[1,1]) + master_data.master_model.scenarios[0, 0].util.first_stage_variables = [ + master_data.master_model.scenarios[0, 0].x + ] + master_data.master_model.scenarios[0, 0].util.uncertain_params = [ + master_data.master_model.scenarios[0, 0].p[1], + master_data.master_model.scenarios[0, 0].p[2], + ] + add_scenario_to_master(master_data, violations=[1, 1]) + + self.assertEqual( + len(master_data.master_model.scenarios), + 2, + msg="Scenario not added to master correctly. Expected 2 scenarios.", + ) - self.assertEqual(len(master_data.master_model.scenarios), 2, msg="Scenario not added to master correctly. " - "Expected 2 scenarios.") global_solver = "baron" -class testSolveMaster(unittest.TestCase): - @unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), "Global NLP solver is not available.") + +class testSolveMaster(unittest.TestCase): + @unittest.skipUnless(baron_available, "Global NLP solver is not available.") def test_solve_master(self): working_model = m = ConcreteModel() - m.x = Var(initialize=0.5, bounds=(0,10)) - m.y = Var(initialize=1.0, bounds=(0,5)) + m.x = Var(initialize=0.5, bounds=(0, 10)) + m.y = Var(initialize=1.0, bounds=(0, 5)) m.z = Var(initialize=0, bounds=(None, None)) m.p = Param(initialize=1, mutable=True) m.obj = Objective(expr=m.x) - m.con = Constraint(expr = m.x + m.y + m.z <= 3) + m.con = Constraint(expr=m.x + m.y + m.z <= 3) model_data = MasterProblemData() model_data.working_model = working_model model_data.timing = None model_data.iteration = 0 master_data = initial_construct_master(model_data) - master_data.master_model.scenarios[0, 0].transfer_attributes_from(working_model.clone()) + master_data.master_model.scenarios[0, 0].transfer_attributes_from( + working_model.clone() + ) master_data.master_model.scenarios[0, 0].util = Block() - master_data.master_model.scenarios[0, 0].util.first_stage_variables = \ - [master_data.master_model.scenarios[0, 0].x] + master_data.master_model.scenarios[0, 0].util.first_stage_variables = [ + master_data.master_model.scenarios[0, 0].x + ] master_data.master_model.scenarios[0, 0].util.decision_rule_vars = [] master_data.master_model.scenarios[0, 0].util.second_stage_variables = [] - master_data.master_model.scenarios[0, 0].util.uncertain_params = [master_data.master_model.scenarios[0, 0].p] + master_data.master_model.scenarios[0, 0].util.uncertain_params = [ + master_data.master_model.scenarios[0, 0].p + ] master_data.master_model.scenarios[0, 0].first_stage_objective = 0 - master_data.master_model.scenarios[0, 0].second_stage_objective = \ - Expression(expr=master_data.master_model.scenarios[0, 0].x) + master_data.master_model.scenarios[0, 0].second_stage_objective = Expression( + expr=master_data.master_model.scenarios[0, 0].x + ) master_data.iteration = 0 - box_set = BoxSet(bounds=[(0,2)]) + master_data.timing = Bunch() + + box_set = BoxSet(bounds=[(0, 2)]) solver = SolverFactory(global_solver) config = ConfigBlock() - config.declare("backup_global_solvers",ConfigValue(default=[])) + config.declare("backup_global_solvers", ConfigValue(default=[])) config.declare("backup_local_solvers", ConfigValue(default=[])) config.declare("solve_master_globally", ConfigValue(default=True)) config.declare("global_solver", ConfigValue(default=solver)) config.declare("tee", ConfigValue(default=False)) config.declare("decision_rule_order", ConfigValue(default=1)) config.declare("objective_focus", ConfigValue(default=ObjectiveType.worst_case)) - config.declare("second_stage_variables", ConfigValue(default=master_data.master_model.scenarios[0, 0].util.second_stage_variables)) + config.declare( + "second_stage_variables", + ConfigValue( + default=master_data.master_model.scenarios[ + 0, 0 + ].util.second_stage_variables + ), + ) config.declare("subproblem_file_directory", ConfigValue(default=None)) - master_soln = solve_master(master_data, config) - self.assertEqual(master_soln.termination_condition, TerminationCondition.optimal, - msg="Could not solve simple master problem with solve_master function.") + config.declare("time_limit", ConfigValue(default=None)) + + with time_code(master_data.timing, "total", is_main_timer=True): + master_soln = solve_master(master_data, config) + self.assertEqual( + master_soln.termination_condition, + TerminationCondition.optimal, + msg=( + "Could not solve simple master problem with solve_master " + "function." + ), + ) + # === regression test for the solver class coefficientMatchingTests(unittest.TestCase): - def test_coefficient_matching_correct_num_constraints_added(self): # Write the deterministic Pyomo model m = ConcreteModel() @@ -1781,7 +3612,13 @@ def test_coefficient_matching_correct_num_constraints_added(self): m.u = Param(initialize=1.125, mutable=True) m.con = Constraint(expr=m.u ** (0.5) * m.x1 - m.u * m.x2 <= 2) - m.eq_con = Constraint(expr = m.u**2 * (m.x2- 1) + m.u * (m.x1**3 + 0.5) - 5 * m.u * m.x1 * m.x2 + m.u * (m.x1 + 2) == 0) + m.eq_con = Constraint( + expr=m.u**2 * (m.x2 - 1) + + m.u * (m.x1**3 + 0.5) + - 5 * m.u * m.x1 * m.x2 + + m.u * (m.x1 + 2) + == 0 + ) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) config = Block() @@ -1797,12 +3634,23 @@ def test_coefficient_matching_correct_num_constraints_added(self): m.util.h_x_q_constraints = ComponentSet() - coeff_matching_success, robust_infeasible = coefficient_matching(m, m.eq_con, [m.u], config) + coeff_matching_success, robust_infeasible = coefficient_matching( + m, m.eq_con, [m.u], config + ) - self.assertEqual(coeff_matching_success, True, msg="Coefficient matching was unsuccessful.") - self.assertEqual(robust_infeasible, False, msg="Coefficient matching detected a robust infeasible constraint (1 == 0).") - self.assertEqual(len(m.coefficient_matching_constraints), 2, - msg="Coefficient matching produced incorrect number of h(x,q)=0 constraints.") + self.assertEqual( + coeff_matching_success, True, msg="Coefficient matching was unsuccessful." + ) + self.assertEqual( + robust_infeasible, + False, + msg="Coefficient matching detected a robust infeasible constraint (1 == 0).", + ) + self.assertEqual( + len(m.coefficient_matching_constraints), + 2, + msg="Coefficient matching produced incorrect number of h(x,q)=0 constraints.", + ) config.decision_rule_order = 1 model_data = Block() @@ -1814,11 +3662,21 @@ def test_coefficient_matching_correct_num_constraints_added(self): add_decision_rule_variables(model_data=model_data, config=config) add_decision_rule_constraints(model_data=model_data, config=config) - coeff_matching_success, robust_infeasible = coefficient_matching(m, m.eq_con, [m.u], config) - self.assertEqual(coeff_matching_success, False, msg="Coefficient matching should have been " - "unsuccessful for higher order polynomial expressions.") - self.assertEqual(robust_infeasible, False, msg="Coefficient matching is not successful, " - "but should not be proven robust infeasible.") + coeff_matching_success, robust_infeasible = coefficient_matching( + m, m.eq_con, [m.u], config + ) + self.assertEqual( + coeff_matching_success, + False, + msg="Coefficient matching should have been " + "unsuccessful for higher order polynomial expressions.", + ) + self.assertEqual( + robust_infeasible, + False, + msg="Coefficient matching is not successful, " + "but should not be proven robust infeasible.", + ) def test_coefficient_matching_robust_infeasible_proof(self): # Write the deterministic Pyomo model @@ -1828,7 +3686,13 @@ def test_coefficient_matching_robust_infeasible_proof(self): m.u = Param(initialize=1.125, mutable=True) m.con = Constraint(expr=m.u ** (0.5) * m.x1 - m.u * m.x2 <= 2) - m.eq_con = Constraint(expr = m.u * (m.x1**3 + 0.5) - 5 * m.u * m.x1 * m.x2 + m.u * (m.x1 + 2) + m.u**2 == 0) + m.eq_con = Constraint( + expr=m.u * (m.x1**3 + 0.5) + - 5 * m.u * m.x1 * m.x2 + + m.u * (m.x1 + 2) + + m.u**2 + == 0 + ) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) config = Block() @@ -1844,16 +3708,25 @@ def test_coefficient_matching_robust_infeasible_proof(self): m.util.h_x_q_constraints = ComponentSet() - coeff_matching_success, robust_infeasible = coefficient_matching(m, m.eq_con, [m.u], config) + coeff_matching_success, robust_infeasible = coefficient_matching( + m, m.eq_con, [m.u], config + ) + + self.assertEqual( + coeff_matching_success, + False, + msg="Coefficient matching should have been unsuccessful.", + ) + self.assertEqual( + robust_infeasible, + True, + msg="Coefficient matching should be proven robust infeasible.", + ) - self.assertEqual(coeff_matching_success, False, msg="Coefficient matching should have been " - "unsuccessful.") - self.assertEqual(robust_infeasible, True, msg="Coefficient matching should be proven robust infeasible.") # === regression test for the solver -@unittest.skipUnless(SolverFactory('baron').available(exception_flag=False), "Global NLP solver is not available.") +@unittest.skipUnless(baron_available, "Global NLP solver is not available.") class RegressionTest(unittest.TestCase): - def regression_test_constant_drs(self): model = m = ConcreteModel() m.name = "s381" @@ -1877,16 +3750,20 @@ def regression_test_constant_drs(self): box_set = BoxSet(bounds=[(1.8, 2.2)]) solver = SolverFactory("baron") pyros = SolverFactory("pyros") - results = pyros.solve(model=m, - first_stage_variables=m.decision_vars, - second_stage_variables=[], - uncertain_params=[m.p[1]], - uncertainty_set=box_set, - local_solver=solver, - global_solver=solver, - options={"objective_focus":ObjectiveType.nominal}) - self.assertTrue(results.pyros_termination_condition, - pyrosTerminationCondition.robust_feasible) + results = pyros.solve( + model=m, + first_stage_variables=m.decision_vars, + second_stage_variables=[], + uncertain_params=[m.p[1]], + uncertainty_set=box_set, + local_solver=solver, + global_solver=solver, + options={"objective_focus": ObjectiveType.nominal}, + ) + self.assertTrue( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_feasible, + ) def regression_test_affine_drs(self): model = m = ConcreteModel() @@ -1911,27 +3788,33 @@ def regression_test_affine_drs(self): box_set = BoxSet(bounds=[(1.8, 2.2)]) solver = SolverFactory("baron") pyros = SolverFactory("pyros") - results = pyros.solve(model=m, - first_stage_variables=m.decision_vars, - second_stage_variables=[], - uncertain_params=[m.p[1]], - uncertainty_set=box_set, - local_solver=solver, - global_solver=solver, - options={"objective_focus": ObjectiveType.nominal, - "decision_rule_order":1}) - self.assertTrue(results.pyros_termination_condition, - pyrosTerminationCondition.robust_feasible) - - def regression_test_quad_drs(self): - model = m = ConcreteModel() - m.name = "s381" - - m.x1 = Var(within=Reals, bounds=(0, None), initialize=0.1) - m.x2 = Var(within=Reals, bounds=(0, None), initialize=0.1) - m.x3 = Var(within=Reals, bounds=(0, None), initialize=0.1) - - # === State Vars = [x13] + results = pyros.solve( + model=m, + first_stage_variables=m.decision_vars, + second_stage_variables=[], + uncertain_params=[m.p[1]], + uncertainty_set=box_set, + local_solver=solver, + global_solver=solver, + options={ + "objective_focus": ObjectiveType.nominal, + "decision_rule_order": 1, + }, + ) + self.assertTrue( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_feasible, + ) + + def regression_test_quad_drs(self): + model = m = ConcreteModel() + m.name = "s381" + + m.x1 = Var(within=Reals, bounds=(0, None), initialize=0.1) + m.x2 = Var(within=Reals, bounds=(0, None), initialize=0.1) + m.x3 = Var(within=Reals, bounds=(0, None), initialize=0.1) + + # === State Vars = [x13] # === Decision Vars === m.decision_vars = [m.x1, m.x2, m.x3] @@ -1946,27 +3829,33 @@ def regression_test_quad_drs(self): box_set = BoxSet(bounds=[(1.8, 2.2)]) solver = SolverFactory("baron") pyros = SolverFactory("pyros") - results = pyros.solve(model=m, - first_stage_variables=m.decision_vars, - second_stage_variables=[], - uncertain_params=[m.p[1]], - uncertainty_set=box_set, - local_solver=solver, - global_solver=solver, - options={"objective_focus": ObjectiveType.nominal, - "decision_rule_order": 2}) - self.assertTrue(results.pyros_termination_condition, - pyrosTerminationCondition.robust_feasible) - - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + results = pyros.solve( + model=m, + first_stage_variables=m.decision_vars, + second_stage_variables=[], + uncertain_params=[m.p[1]], + uncertainty_set=box_set, + local_solver=solver, + global_solver=solver, + options={ + "objective_focus": ObjectiveType.nominal, + "decision_rule_order": 2, + }, + ) + self.assertTrue( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_feasible, + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_minimize_dr_norm(self): m = ConcreteModel() m.p1 = Param(initialize=0, mutable=True) m.p2 = Param(initialize=0, mutable=True) - m.z1 = Var(initialize=0, bounds=(0,1)) - m.z2 = Var(initialize=0, bounds=(0,1)) - + m.z1 = Var(initialize=0, bounds=(0, 1)) + m.z2 = Var(initialize=0, bounds=(0, 1)) m.working_model = ConcreteModel() m.working_model.util = Block() @@ -1974,6 +3863,7 @@ def test_minimize_dr_norm(self): m.working_model.util.second_stage_variables = [m.z1, m.z2] m.working_model.util.uncertain_params = [m.p1, m.p2] m.working_model.util.first_stage_variables = [] + m.working_model.util.state_vars = [] m.working_model.util.first_stage_variables = [] config = Block() @@ -1983,6 +3873,7 @@ def test_minimize_dr_norm(self): config.uncertain_params = m.working_model.util.uncertain_params config.tee = False config.solve_master_globally = True + config.time_limit = None add_decision_rule_variables(model_data=m, config=config) add_decision_rule_constraints(model_data=m, config=config) @@ -1992,20 +3883,28 @@ def test_minimize_dr_norm(self): master.scenarios = Block(NonNegativeIntegers, NonNegativeIntegers) master.scenarios[0, 0].transfer_attributes_from(m.working_model.clone()) master.scenarios[0, 0].first_stage_objective = 0 - master.scenarios[0, 0].second_stage_objective = Expression(expr=(master.scenarios[0, 0].util.second_stage_variables[0] - 1)**2 + - (master.scenarios[0, 0].util.second_stage_variables[1] - 1)**2) + master.scenarios[0, 0].second_stage_objective = Expression( + expr=(master.scenarios[0, 0].util.second_stage_variables[0] - 1) ** 2 + + (master.scenarios[0, 0].util.second_stage_variables[1] - 1) ** 2 + ) master.obj = Objective(expr=master.scenarios[0, 0].second_stage_objective) master_data = MasterProblemData() master_data.master_model = master master_data.master_model.const_efficiency_applied = False master_data.master_model.linear_efficiency_applied = False - results = minimize_dr_vars(model_data=master_data, config=config) - self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal, - msg="Minimize dr norm did not solve to optimality.") + master_data.timing = Bunch() + with time_code(master_data.timing, "total", is_main_timer=True): + results = minimize_dr_vars(model_data=master_data, config=config) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.optimal, + msg="Minimize dr norm did not solve to optimality.", + ) - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_identifying_violating_param_realization(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -2013,8 +3912,8 @@ def test_identifying_violating_param_realization(self): m.x3 = Var(initialize=0, bounds=(None, None)) m.u = Param(initialize=1.125, mutable=True) - m.con1 = Constraint(expr=m.x1 * m.u**(0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) @@ -2029,25 +3928,37 @@ def test_identifying_violating_param_realization(self): global_subsolver = SolverFactory("baron") # Call the PyROS solver - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1, m.x2], - second_stage_variables=[], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True - }) - - self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, - msg="Did not identify robust optimal solution to problem instance.") - self.assertGreater(results.iterations, 0, - msg="Robust infeasible model terminated in 0 iterations (nominal case).") - - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + }, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg="Did not identify robust optimal solution to problem instance.", + ) + self.assertGreater( + results.iterations, + 0, + msg="Robust infeasible model terminated in 0 iterations (nominal case).", + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + @unittest.skipUnless( + baron_version < (23, 1, 5), "Test known to fail beginning with Baron 23.1.5" + ) def test_terminate_with_max_iter(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -2055,8 +3966,8 @@ def test_terminate_with_max_iter(self): m.x3 = Var(initialize=0, bounds=(None, None)) m.u = Param(initialize=1.125, mutable=True) - m.con1 = Constraint(expr=m.x1 * m.u**(0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) @@ -2071,25 +3982,31 @@ def test_terminate_with_max_iter(self): global_subsolver = SolverFactory("baron") # Call the PyROS solver - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1], - second_stage_variables=[m.x2], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True, - "max_iter":1, - "decision_rule_order":2 - }) - - self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.max_iter, - msg="Returned termination condition is not return max_iter.") - - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + "max_iter": 1, + "decision_rule_order": 2, + }, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.max_iter, + msg="Returned termination condition is not return max_iter.", + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_terminate_with_time_limit(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -2097,8 +4014,8 @@ def test_terminate_with_time_limit(self): m.x3 = Var(initialize=0, bounds=(None, None)) m.u = Param(initialize=1.125, mutable=True) - m.con1 = Constraint(expr=m.x1 * m.u**(0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) @@ -2113,24 +4030,211 @@ def test_terminate_with_time_limit(self): global_subsolver = SolverFactory("baron") # Call the PyROS solver - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1, m.x2], - second_stage_variables=[], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True, - "time_limit": 0.001 - }) - - self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.time_out, - msg="Returned termination condition is not return time_out.") - - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + objective_focus=ObjectiveType.worst_case, + solve_master_globally=True, + time_limit=0.001, + ) + + # validate termination condition + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.time_out, + msg="Returned termination condition is not return time_out.", + ) + + # verify subsolver options are unchanged + subsolvers = [local_subsolver, global_subsolver] + for slvr, desc in zip(subsolvers, ["Local", "Global"]): + self.assertEqual( + len(list(slvr.options.keys())), + 0, + msg=f"{desc} subsolver options were changed by PyROS", + ) + self.assertIs( + getattr(slvr.options, "MaxTime", None), + None, + msg=( + f"{desc} subsolver (BARON) MaxTime setting was added " + "by PyROS, but not reverted" + ), + ) + + @unittest.skipUnless( + SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.", + ) + def test_separation_terminate_time_limit(self): + """ + Test PyROS time limit status returned in event + separation problem times out. + """ + m = ConcreteModel() + m.x1 = Var(initialize=0, bounds=(0, None)) + m.x2 = Var(initialize=0, bounds=(0, None)) + m.x3 = Var(initialize=0, bounds=(None, None)) + m.u = Param(initialize=1.125, mutable=True) + + m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) + + m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) + + # Define the uncertainty set + interval = BoxSet(bounds=[(0.25, 2)]) + + # Instantiate the PyROS solver + pyros_solver = SolverFactory("pyros") + + # Define subsolvers utilized in the algorithm + local_subsolver = TimeDelaySolver( + calls_to_sleep=0, sub_solver=SolverFactory("baron"), max_time=1 + ) + global_subsolver = SolverFactory("baron") + + # Call the PyROS solver + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + objective_focus=ObjectiveType.worst_case, + solve_master_globally=True, + time_limit=1, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.time_out, + msg="Returned termination condition is not return time_out.", + ) + + @unittest.skipUnless( + SolverFactory('gams').license_is_valid() + and SolverFactory('baron').license_is_valid(), + "Global NLP solver is not available and licensed.", + ) + def test_gams_successful_time_limit(self): + """ + Test PyROS time limit status returned in event + separation problem times out. + """ + m = ConcreteModel() + m.x1 = Var(initialize=0, bounds=(0, None)) + m.x2 = Var(initialize=0, bounds=(0, None)) + m.x3 = Var(initialize=0, bounds=(None, None)) + m.u = Param(initialize=1.125, mutable=True) + + m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) + + m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) + + # Define the uncertainty set + interval = BoxSet(bounds=[(0.25, 2)]) + + # Instantiate the PyROS solver + pyros_solver = SolverFactory("pyros") + + # Define subsolvers utilized in the algorithm + # two GAMS solvers, one of which has reslim set + # (overridden when invoked in PyROS) + local_subsolvers = [ + SolverFactory("gams:conopt"), + SolverFactory("gams:conopt"), + SolverFactory("ipopt"), + ] + local_subsolvers[0].options["add_options"] = ["option reslim=100;"] + global_subsolver = SolverFactory("baron") + global_subsolver.options["MaxTime"] = 300 + + # Call the PyROS solver + for idx, opt in enumerate(local_subsolvers): + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=opt, + global_solver=global_subsolver, + objective_focus=ObjectiveType.worst_case, + solve_master_globally=True, + time_limit=100, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg=( + f"Returned termination condition with local " + "subsolver {idx + 1} of 2 is not robust_optimal." + ), + ) + + # check first local subsolver settings + # remain unchanged after PyROS exit + self.assertEqual( + len(list(local_subsolvers[0].options["add_options"])), + 1, + msg=( + f"Local subsolver {local_subsolvers[0]} options 'add_options'" + "were changed by PyROS" + ), + ) + self.assertEqual( + local_subsolvers[0].options["add_options"][0], + "option reslim=100;", + msg=( + f"Local subsolver {local_subsolvers[0]} setting " + "'add_options' was modified " + "by PyROS, but changes were not properly undone" + ), + ) + + # check global subsolver settings unchanged + self.assertEqual( + len(list(global_subsolver.options.keys())), + 1, + msg=(f"Global subsolver {global_subsolver} options were changed by PyROS"), + ) + self.assertEqual( + global_subsolver.options["MaxTime"], + 300, + msg=( + f"Global subsolver {global_subsolver} setting " + "'MaxTime' was modified " + "by PyROS, but changes were not properly undone" + ), + ) + + # check other local subsolvers remain unchanged + for slvr, key in zip(local_subsolvers[1:], ["add_options", "max_cpu_time"]): + # no custom options were added to the `options` + # attribute of the optimizer, so any attribute + # of `options` should be `None` + self.assertIs( + getattr(slvr.options, key, None), + None, + msg=( + f"Local subsolver {slvr} setting '{key}' was added " + "by PyROS, but not reverted" + ), + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_terminate_with_application_error(self): """ Check that PyROS correctly raises ApplicationError @@ -2149,9 +4253,8 @@ def test_terminate_with_application_error(self): box_set = BoxSet(bounds=[(1, 2)]) pyros_solver = SolverFactory("pyros") with self.assertRaisesRegex( - ApplicationError, - r"Solver \(ipopt\) did not exit normally", - ): + ApplicationError, r"Solver \(ipopt\) did not exit normally" + ): pyros_solver.solve( model=m, first_stage_variables=[m.x1], @@ -2161,10 +4264,32 @@ def test_terminate_with_application_error(self): local_solver=solver, global_solver=baron, objective_focus=ObjectiveType.nominal, + time_limit=1000, ) - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + # check solver settings are unchanged + self.assertEqual( + len(list(solver.options.keys())), + 1, + msg=(f"Local subsolver {solver} options were changed by PyROS"), + ) + self.assertEqual( + solver.options["halt_on_ampl_error"], + "yes", + msg=( + f"Local subsolver {solver} option " + "'halt_on_ampl_error' was changed by PyROS" + ), + ) + self.assertEqual( + len(list(baron.options.keys())), + 0, + msg=(f"Global subsolver {baron} options were changed by PyROS"), + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_master_subsolver_error(self): """ Test PyROS on a two-stage problem with a subsolver error @@ -2205,11 +4330,12 @@ def test_master_subsolver_error(self): msg=( f"Returned termination condition for separation error" "test is not {pyrosTerminationCondition.subsolver_error}.", - ) + ), ) - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_separation_subsolver_error(self): """ Test PyROS on a two-stage problem with a subsolver error @@ -2248,14 +4374,139 @@ def test_separation_subsolver_error(self): self.assertEqual( res.pyros_termination_condition, pyrosTerminationCondition.subsolver_error, + msg=( + "Returned termination condition for separation error" + f"test is not {pyrosTerminationCondition.subsolver_error}." + ), + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + def test_discrete_separation_subsolver_error(self): + """ + Test PyROS for two-stage problem with discrete type set, + subsolver error status. + """ + m = ConcreteModel() + + m.q = Param(initialize=1, mutable=True) + m.x1 = Var(initialize=1, bounds=(0, 1)) + + # upper bound induces subsolver error: separation + # max(x2 - log(m.q)) will force subsolver to q = 0 + m.x2 = Var(initialize=2, bounds=(None, log(m.q))) + + m.obj = Objective(expr=m.x1 + m.x2, sense=maximize) + + discrete_set = DiscreteScenarioSet(scenarios=[(1,), (0,)]) + + local_solver = SolverFactory("ipopt") + global_solver = SolverFactory("baron") + pyros_solver = SolverFactory("pyros") + + res = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.q], + uncertainty_set=discrete_set, + local_solver=local_solver, + global_solver=global_solver, + decision_rule_order=1, + tee=True, + ) + self.assertEqual( + res.pyros_termination_condition, + pyrosTerminationCondition.subsolver_error, + msg=( + "Returned termination condition for separation error" + f"test is not {pyrosTerminationCondition.subsolver_error}." + ), + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + def test_pyros_math_domain_error(self): + """ + Test PyROS on a two-stage problem, discrete + set type with a math domain error evaluating + performance constraint expressions in separation. + """ + m = ConcreteModel() + m.q = Param(initialize=1, mutable=True) + m.x1 = Var(initialize=1, bounds=(0, 1)) + m.x2 = Var(initialize=2, bounds=(-m.q, log(m.q))) + m.obj = Objective(expr=m.x1 + m.x2) + + box_set = BoxSet(bounds=[[0, 1]]) + + local_solver = SolverFactory("baron") + global_solver = SolverFactory("baron") + pyros_solver = SolverFactory("pyros") + + with self.assertRaisesRegex( + expected_exception=ArithmeticError, + expected_regex=( + "Evaluation of performance constraint.*math domain error.*" + ), + msg="ValueError arising from math domain error not raised", + ): + # should raise math domain error: + # (1) lower bounding constraint on x2 solved first + # in separation, q = 0 in worst case + # (2) now tries to evaluate log(q), but q = 0 + pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.q], + uncertainty_set=box_set, + local_solver=local_solver, + global_solver=global_solver, + decision_rule_order=1, + tee=True, + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + def test_pyros_no_perf_cons(self): + """ + Ensure PyROS properly accommodates models with no + performance constraints (such as effectively deterministic + models). + """ + m = ConcreteModel() + m.x = Var(bounds=(0, 1)) + m.q = Param(mutable=True, initialize=1) + + m.obj = Objective(expr=m.x * m.q) + + pyros_solver = SolverFactory("pyros") + res = pyros_solver.solve( + model=m, + first_stage_variables=[m.x], + second_stage_variables=[], + uncertain_params=[m.q], + uncertainty_set=BoxSet(bounds=[[0, 1]]), + local_solver=SolverFactory("ipopt"), + global_solver=SolverFactory("ipopt"), + solve_master_globally=True, + ) + self.assertEqual( + res.pyros_termination_condition, + pyrosTerminationCondition.robust_feasible, msg=( f"Returned termination condition for separation error" "test is not {pyrosTerminationCondition.subsolver_error}.", - ) + ), ) - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_nominal_focus_robust_feasible(self): """ Test problem under nominal objective focus terminates @@ -2267,8 +4518,8 @@ def test_nominal_focus_robust_feasible(self): m.x3 = Var(initialize=0, bounds=(None, None)) m.u = Param(initialize=1.125, mutable=True) - m.con1 = Constraint(expr=m.x1 * m.u**(0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) # singleton set, guaranteed robust feasibility @@ -2294,7 +4545,7 @@ def test_nominal_focus_robust_feasible(self): bypass_local_separation=True, options={ "objective_focus": ObjectiveType.nominal, - "solve_master_globally": True + "solve_master_globally": True, }, ) # check for robust feasible termination @@ -2304,8 +4555,9 @@ def test_nominal_focus_robust_feasible(self): msg="Returned termination condition is not return robust_optimal.", ) - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) def test_discrete_separation(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -2313,8 +4565,8 @@ def test_discrete_separation(self): m.x3 = Var(initialize=0, bounds=(None, None)) m.u = Param(initialize=1.125, mutable=True) - m.con1 = Constraint(expr=m.x1 * m.u**(0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) @@ -2329,23 +4581,32 @@ def test_discrete_separation(self): global_subsolver = SolverFactory("baron") # Call the PyROS solver - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1, m.x2], - second_stage_variables=[], - uncertain_params=[m.u], - uncertainty_set=discrete_scenarios, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True - }) - - self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, - msg="Returned termination condition is not return robust_optimal.") - - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u], + uncertainty_set=discrete_scenarios, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + }, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg="Returned termination condition is not return robust_optimal.", + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + @unittest.skipUnless( + baron_version == (23, 1, 5), "Test runs >90 minutes with Baron 22.9.30" + ) def test_higher_order_decision_rules(self): m = ConcreteModel() m.x1 = Var(initialize=0, bounds=(0, None)) @@ -2354,76 +4615,272 @@ def test_higher_order_decision_rules(self): m.u = Param(initialize=1.125, mutable=True) m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) # Define the uncertainty set interval = BoxSet(bounds=[(0.25, 2)]) - # Instantiate the PyROS solver + # Instantiate the PyROS solver + pyros_solver = SolverFactory("pyros") + + # Define subsolvers utilized in the algorithm + local_subsolver = SolverFactory('baron') + global_subsolver = SolverFactory("baron") + + # Call the PyROS solver + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + "decision_rule_order": 2, + }, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg="Returned termination condition is not return robust_optimal.", + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + def test_coefficient_matching_solve(self): + # Write the deterministic Pyomo model + m = ConcreteModel() + m.x1 = Var(initialize=0, bounds=(0, None)) + m.x2 = Var(initialize=0, bounds=(0, None)) + m.u = Param(initialize=1.125, mutable=True) + + m.con = Constraint(expr=m.u ** (0.5) * m.x1 - m.u * m.x2 <= 2) + m.eq_con = Constraint( + expr=m.u**2 * (m.x2 - 1) + + m.u * (m.x1**3 + 0.5) + - 5 * m.u * m.x1 * m.x2 + + m.u * (m.x1 + 2) + == 0 + ) + m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) + + interval = BoxSet(bounds=[(0.25, 2)]) + + # Instantiate the PyROS solver + pyros_solver = SolverFactory("pyros") + + # Define subsolvers utilized in the algorithm + local_subsolver = SolverFactory('baron') + global_subsolver = SolverFactory("baron") + + # Call the PyROS solver + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + }, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg="Non-optimal termination condition from robust feasible coefficient matching problem.", + ) + self.assertAlmostEqual( + results.final_objective_value, + 6.0394, + 2, + msg="Incorrect objective function value.", + ) + + def create_mitsos_4_3(self): + """ + Create instance of Problem 4_3 from Mitsos (2011)'s + Test Set of semi-infinite programs. + """ + # construct the deterministic model + m = ConcreteModel() + m.u = Param(initialize=0.5, mutable=True) + m.x1 = Var(bounds=[-1000, 1000]) + m.x2 = Var(bounds=[-1000, 1000]) + m.x3 = Var(bounds=[-1000, 1000]) + m.con = Constraint(expr=exp(m.u - 1) - m.x1 - m.x2 * m.u - m.x3 * m.u**2 <= 0) + m.eq_con = Constraint( + expr=( + m.u**2 * (m.x2 - 1) + + m.u * (m.x1**3 + 0.5) + - 5 * m.u * m.x1 * m.x2 + + m.u * (m.x1 + 2) + == 0 + ) + ) + m.obj = Objective(expr=m.x1 + m.x2 / 2 + m.x3 / 3) + + return m + + @unittest.skipUnless( + baron_license_is_valid and scip_available and scip_license_is_valid, + "Global solvers BARON and SCIP not both available and licensed", + ) + def test_coeff_matching_solver_insensitive(self): + """ + Check that result for instance with constraint subject to + coefficient matching is insensitive to subsolver settings. Based + on Mitsos (2011) semi-infinite programming instance 4_3. + """ + m = self.create_mitsos_4_3() + + # instantiate BARON subsolver and PyROS solver + baron = SolverFactory("baron") + scip = SolverFactory("scip") pyros_solver = SolverFactory("pyros") - # Define subsolvers utilized in the algorithm - local_subsolver = SolverFactory('baron') - global_subsolver = SolverFactory("baron") + # solve with PyROS + solver_names = {"baron": baron, "scip": scip} + for name, solver in solver_names.items(): + res = pyros_solver.solve( + model=m, + first_stage_variables=[], + second_stage_variables=[m.x1, m.x2, m.x3], + uncertain_params=[m.u], + uncertainty_set=BoxSet(bounds=[[0, 1]]), + local_solver=solver, + global_solver=solver, + objective_focus=ObjectiveType.worst_case, + solve_master_globally=True, + bypass_local_separation=True, + robust_feasibility_tolerance=1e-4, + ) + self.assertEqual( + first=res.iterations, + second=2, + msg=( + "Iterations for Watson 43 instance solved with " + f"subsolver {name} not as expected" + ), + ) + np.testing.assert_allclose( + actual=res.final_objective_value, + desired=0.9781633, + rtol=0, + atol=5e-3, + err_msg=( + "Final objective for Watson 43 instance solved with " + f"subsolver {name} not as expected" + ), + ) - # Call the PyROS solver - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1], - second_stage_variables=[m.x2], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True, - "decision_rule_order":2 - }) - - self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, - msg="Returned termination condition is not return robust_optimal.") - - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") - def test_coefficient_matching_solve(self): + @unittest.skipUnless( + baron_license_is_valid and baron_version >= (23, 2, 27), + "BARON licensing and version requirements not met", + ) + def test_coefficient_matching_partitioning_insensitive(self): + """ + Check that result for instance with constraint subject to + coefficient matching is insensitive to DOF partitioning. Model + is based on Mitsos (2011) semi-infinite programming instance + 4_3. + """ + m = self.create_mitsos_4_3() - # Write the deterministic Pyomo model - m = ConcreteModel() - m.x1 = Var(initialize=0, bounds=(0, None)) - m.x2 = Var(initialize=0, bounds=(0, None)) - m.u = Param(initialize=1.125, mutable=True) + # instantiate BARON subsolver and PyROS solver + baron = SolverFactory("baron") + pyros_solver = SolverFactory("pyros") - m.con = Constraint(expr=m.u ** (0.5) * m.x1 - m.u * m.x2 <= 2) - m.eq_con = Constraint(expr = m.u**2 * (m.x2- 1) + m.u * (m.x1**3 + 0.5) - 5 * m.u * m.x1 * m.x2 + m.u * (m.x1 + 2) == 0) - m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) + # solve with PyROS + partitionings = [ + {"fsv": [m.x1, m.x2, m.x3], "ssv": []}, + {"fsv": [], "ssv": [m.x1, m.x2, m.x3]}, + ] + for partitioning in partitionings: + res = pyros_solver.solve( + model=m, + first_stage_variables=partitioning["fsv"], + second_stage_variables=partitioning["ssv"], + uncertain_params=[m.u], + uncertainty_set=BoxSet(bounds=[[0, 1]]), + local_solver=baron, + global_solver=baron, + objective_focus=ObjectiveType.worst_case, + solve_master_globally=True, + bypass_local_separation=True, + robust_feasibility_tolerance=1e-4, + ) + self.assertEqual( + first=res.iterations, + second=2, + msg=( + "Iterations for Watson 43 instance solved with " + f"first-stage vars {[fsv.name for fsv in partitioning['fsv']]} " + f"second-stage vars {[ssv.name for ssv in partitioning['ssv']]} " + "not as expected" + ), + ) + np.testing.assert_allclose( + actual=res.final_objective_value, + desired=0.9781633, + rtol=0, + atol=5e-3, + err_msg=( + "Final objective for Watson 43 instance solved with " + f"first-stage vars {[fsv.name for fsv in partitioning['fsv']]} " + f"second-stage vars {[ssv.name for ssv in partitioning['ssv']]} " + "not as expected" + ), + ) - interval = BoxSet(bounds=[(0.25, 2)]) + def test_coefficient_matching_raises_error_4_3(self): + """ + Check that result for instance with constraint subject to + coefficient matching results in exception certifying robustness + cannot be certified where expected. Model + is based on Mitsos (2011) semi-infinite programming instance + 4_3. + """ + m = self.create_mitsos_4_3() - # Instantiate the PyROS solver + # instantiate BARON subsolver and PyROS solver + baron = SolverFactory("baron") pyros_solver = SolverFactory("pyros") - # Define subsolvers utilized in the algorithm - local_subsolver = SolverFactory('baron') - global_subsolver = SolverFactory("baron") - - # Call the PyROS solver - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1, m.x2], - second_stage_variables=[], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True - }) - - self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, - msg="Non-optimal termination condition from robust feasible coefficient matching problem.") - self.assertAlmostEqual(results.final_objective_value, 6.0394, 2, msg="Incorrect objective function value.") + # solve with PyROS + dr_orders = [1, 2] + for dr_order in dr_orders: + with self.assertRaisesRegex( + ValueError, + expected_regex=( + "Equality constraint.*cannot be guaranteed to be robustly " + "feasible.*" + ), + ): + res = pyros_solver.solve( + model=m, + first_stage_variables=[], + second_stage_variables=[m.x1, m.x2, m.x3], + uncertain_params=[m.u], + uncertainty_set=BoxSet(bounds=[[0, 1]]), + local_solver=baron, + global_solver=baron, + objective_focus=ObjectiveType.worst_case, + decision_rule_order=dr_order, + solve_master_globally=True, + bypass_local_separation=True, + robust_feasibility_tolerance=1e-4, + ) def test_coefficient_matching_robust_infeasible_proof_in_pyros(self): # Write the deterministic Pyomo model @@ -2433,7 +4890,13 @@ def test_coefficient_matching_robust_infeasible_proof_in_pyros(self): m.u = Param(initialize=1.125, mutable=True) m.con = Constraint(expr=m.u ** (0.5) * m.x1 - m.u * m.x2 <= 2) - m.eq_con = Constraint(expr = m.u * (m.x1**3 + 0.5) - 5 * m.u * m.x1 * m.x2 + m.u * (m.x1 + 2) + m.u**2 == 0) + m.eq_con = Constraint( + expr=m.u * (m.x1**3 + 0.5) + - 5 * m.u * m.x1 * m.x2 + + m.u * (m.x1 + 2) + + m.u**2 + == 0 + ) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) interval = BoxSet(bounds=[(0.25, 2)]) @@ -2447,20 +4910,25 @@ def test_coefficient_matching_robust_infeasible_proof_in_pyros(self): # Call the PyROS solver - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1, m.x2], - second_stage_variables=[], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True - }) - - self.assertEqual(results.pyros_termination_condition, pyrosTerminationCondition.robust_infeasible, - msg="Robust infeasible problem not identified via coefficient matching.") + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + }, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_infeasible, + msg="Robust infeasible problem not identified via coefficient matching.", + ) def test_coefficient_matching_nonlinear_expr(self): # Write the deterministic Pyomo model @@ -2470,7 +4938,13 @@ def test_coefficient_matching_nonlinear_expr(self): m.u = Param(initialize=1.125, mutable=True) m.con = Constraint(expr=m.u ** (0.5) * m.x1 - m.u * m.x2 <= 2) - m.eq_con = Constraint(expr = m.u**2 * (m.x2- 1) + m.u * (m.x1**3 + 0.5) - 5 * m.u * m.x1 * m.x2 + m.u * (m.x1 + 2) == 0) + m.eq_con = Constraint( + expr=m.u**2 * (m.x2 - 1) + + m.u * (m.x1**3 + 0.5) + - 5 * m.u * m.x1 * m.x2 + + m.u * (m.x1 + 2) + == 0 + ) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) interval = BoxSet(bounds=[(0.25, 2)]) @@ -2484,25 +4958,30 @@ def test_coefficient_matching_nonlinear_expr(self): # Call the PyROS solver with self.assertRaises( - ValueError, msg="ValueError should be raised for general " - "nonlinear expressions in h(x,z,q)=0 constraints."): - results = pyros_solver.solve(model=m, - first_stage_variables=[m.x1], - second_stage_variables=[m.x2], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True, - "decision_rule_order":1 - }) - - -@unittest.skipUnless(SolverFactory('baron').available(exception_flag=False) - and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + ValueError, + msg="ValueError should be raised for general " + "nonlinear expressions in h(x,z,q)=0 constraints.", + ): + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + "decision_rule_order": 1, + }, + ) + + +@unittest.skipUnless( + baron_available and baron_license_is_valid, + "Global NLP solver is not available and licensed.", +) class testBypassingSeparation(unittest.TestCase): def test_bypass_global_separation(self): """Test bypassing of global separation solve calls.""" @@ -2513,7 +4992,7 @@ def test_bypass_global_separation(self): m.u = Param(initialize=1.125, mutable=True) m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) @@ -2529,29 +5008,32 @@ def test_bypass_global_separation(self): # Call the PyROS solver results = pyros_solver.solve( - model=m, - first_stage_variables=[m.x1], - second_stage_variables=[m.x2], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True, - "decision_rule_order":0, - "bypass_global_separation": True - } - ) - - self.assertEqual(results.pyros_termination_condition, - pyrosTerminationCondition.robust_optimal, - msg="Returned termination condition is not return robust_optimal.") - - -@unittest.skipUnless(SolverFactory('baron').available(exception_flag=False) - and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + "decision_rule_order": 0, + "bypass_global_separation": True, + }, + ) + + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg="Returned termination condition is not return robust_optimal.", + ) + + +@unittest.skipUnless( + baron_available and baron_license_is_valid, + "Global NLP solver is not available and licensed.", +) class testUninitializedVars(unittest.TestCase): def test_uninitialized_vars(self): """ @@ -2575,7 +5057,7 @@ def test_uninitialized_vars(self): m.w = Var(bounds=(0, 1)) # objectives - m.obj = Objective(expr=-m.x ** 2 + m.z ** 2) + m.obj = Objective(expr=-m.x**2 + m.z**2) # auxiliary constraints m.t_lb_con = Constraint(expr=m.x - m.z <= m.t) @@ -2585,9 +5067,7 @@ def test_uninitialized_vars(self): m.con1 = Constraint(expr=m.x - m.z >= 0.1) m.eq_con = Constraint(expr=m.w == 0.5 * m.t) - box_set = BoxSet( - bounds=((value(m.ell), value(m.u)),) - ) + box_set = BoxSet(bounds=((value(m.ell), value(m.u)),)) # solvers local_solver = SolverFactory("ipopt") @@ -2606,35 +5086,39 @@ def test_uninitialized_vars(self): uncertain_params = [model.p] res = pyros_solver.solve( - model=model, - first_stage_variables=fsv, - second_stage_variables=ssv, - uncertain_params=uncertain_params, - uncertainty_set=box_set, - local_solver=local_solver, - global_solver=global_solver, - objective_focus=ObjectiveType.worst_case, - decision_rule_order=2, - solve_master_globally=True + model=model, + first_stage_variables=fsv, + second_stage_variables=ssv, + uncertain_params=uncertain_params, + uncertainty_set=box_set, + local_solver=local_solver, + global_solver=global_solver, + objective_focus=ObjectiveType.worst_case, + decision_rule_order=2, + solve_master_globally=True, ) self.assertEqual( - res.pyros_termination_condition, - pyrosTerminationCondition.robust_optimal, - msg=("Returned termination condition for solve with" - f"decision rule order {dr_order} is not return " - "robust_optimal.") + res.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg=( + "Returned termination condition for solve with" + f"decision rule order {dr_order} is not return " + "robust_optimal." + ), ) -@unittest.skipUnless(SolverFactory('baron').available(exception_flag=False) - and SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") +@unittest.skipUnless( + baron_available and baron_license_is_valid, + "Global NLP solver is not available and licensed.", +) class testModelMultipleObjectives(unittest.TestCase): """ This class contains tests for models with multiple Objective attributes. """ + def test_multiple_objs(self): """Test bypassing of global separation solve calls.""" m = ConcreteModel() @@ -2644,7 +5128,7 @@ def test_multiple_objs(self): m.u = Param(initialize=1.125, mutable=True) m.con1 = Constraint(expr=m.x1 * m.u ** (0.5) - m.x2 * m.u <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u == m.x3) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - 1) ** 2) @@ -2666,63 +5150,55 @@ def test_multiple_objs(self): global_subsolver = SolverFactory("baron") solve_kwargs = dict( - model=m, - first_stage_variables=[m.x1], - second_stage_variables=[m.x2], - uncertain_params=[m.u], - uncertainty_set=interval, - local_solver=local_subsolver, - global_solver=global_subsolver, - options={ - "objective_focus": ObjectiveType.worst_case, - "solve_master_globally": True, - "decision_rule_order":0, - } + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u], + uncertainty_set=interval, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + "decision_rule_order": 0, + }, ) # check validation error raised due to multiple objectives with self.assertRaisesRegex( - AttributeError, - "This model structure is not currently handled by the ROSolver." - ): + AttributeError, + "This model structure is not currently handled by the ROSolver.", + ): pyros_solver.solve(**solve_kwargs) # check validation error raised due to multiple objectives m.b.obj.deactivate() with self.assertRaisesRegex( - AttributeError, - "This model structure is not currently handled by the ROSolver." - ): + AttributeError, + "This model structure is not currently handled by the ROSolver.", + ): pyros_solver.solve(**solve_kwargs) # now solve with only one active obj, # check successful termination m.obj2.deactivate() res = pyros_solver.solve(**solve_kwargs) - self.assertIs(res.pyros_termination_condition, - pyrosTerminationCondition.robust_optimal) + self.assertIs( + res.pyros_termination_condition, pyrosTerminationCondition.robust_optimal + ) # check active objectives - self.assertEqual( - len(list(m.component_data_objects(Objective, active=True))), - 1 - ) + self.assertEqual(len(list(m.component_data_objects(Objective, active=True))), 1) self.assertTrue(m.obj.active) # swap to maximization objective. # and solve again - m.obj_max = Objective( - expr=-m.obj.expr, - sense=pyo_max, - ) + m.obj_max = Objective(expr=-m.obj.expr, sense=pyo_max) m.obj.deactivate() res = pyros_solver.solve(**solve_kwargs) # check active objectives - self.assertEqual( - len(list(m.component_data_objects(Objective, active=True))), - 1 - ) + self.assertEqual(len(list(m.component_data_objects(Objective, active=True))), 1) self.assertTrue(m.obj_max.active) @@ -2732,6 +5208,7 @@ class testModelIdentifyObjectives(unittest.TestCase): determine the first-stage and second-stage portions of a two-stage expression. """ + def test_identify_objectives(self): """ Test first and second-stage objective identification @@ -2752,10 +5229,12 @@ def test_identify_objectives(self): # objective m.obj = Objective( expr=( - (m.x[0] + m.y) * - (sum(m.x[idx] * m.p[idx] for idx in range(3)) - + m.q * m.z - + m.x[0] * m.q) + (m.x[0] + m.y) + * ( + sum(m.x[idx] * m.p[idx] for idx in range(3)) + + m.q * m.z + + m.x[0] * m.q + ) + sin(m.x[0] + m.q) + cos(m.x[2] + m.z) ) @@ -2775,12 +5254,10 @@ def test_identify_objectives(self): # determine vars and uncertain params participating in # objective fsv_in_obj = ComponentSet( - var for var in identify_variables(m.obj) - if var in fsv_set + var for var in identify_variables(m.obj) if var in fsv_set ) ssv_in_obj = ComponentSet( - var for var in identify_variables(m.obj) - if var not in fsv_set + var for var in identify_variables(m.obj) if var not in fsv_set ) uncertain_params_in_obj = ComponentSet( param @@ -2791,11 +5268,11 @@ def test_identify_objectives(self): # determine vars and uncertain params participating in # first-stage objective fsv_in_first_stg_cost = ComponentSet( - var for var in identify_variables(m.first_stage_objective) - if var in fsv_set + var for var in identify_variables(m.first_stage_objective) if var in fsv_set ) ssv_in_first_stg_cost = ComponentSet( - var for var in identify_variables(m.first_stage_objective) + var + for var in identify_variables(m.first_stage_objective) if var not in fsv_set ) uncertain_params_in_first_stg_cost = ComponentSet( @@ -2807,11 +5284,13 @@ def test_identify_objectives(self): # determine vars and uncertain params participating in # second-stage objective fsv_in_second_stg_cost = ComponentSet( - var for var in identify_variables(m.second_stage_objective) + var + for var in identify_variables(m.second_stage_objective) if var in fsv_set ) ssv_in_second_stg_cost = ComponentSet( - var for var in identify_variables(m.second_stage_objective) + var + for var in identify_variables(m.second_stage_objective) if var not in fsv_set ) uncertain_params_in_second_stg_cost = ComponentSet( @@ -2830,7 +5309,7 @@ def test_identify_objectives(self): ssv_in_first_stg_cost, f"First-stage expression {str(m.first_stage_objective.expr)}" f" consists of non first-stage variables " - f"{{var.name for var in fsv_in_second_stg_cost}}" + f"{{var.name for var in fsv_in_second_stg_cost}}", ) self.assertTrue( ssv_in_second_stg_cost == ssv_in_obj, @@ -2841,12 +5320,12 @@ def test_identify_objectives(self): uncertain_params_in_first_stg_cost, f"First-stage expression {str(m.first_stage_objective.expr)}" " consists of uncertain params" - f" {{p.name for p in uncertain_params_in_first_stg_cost}}" + f" {{p.name for p in uncertain_params_in_first_stg_cost}}", ) self.assertTrue( uncertain_params_in_second_stg_cost == uncertain_params_in_obj, f"{{p.name for p in uncertain_params_in_second_stg_cost}} is not " - f"{{p.name for p in uncertain_params_in_obj}}" + f"{{p.name for p in uncertain_params_in_obj}}", ) def test_identify_objectives_var_expr(self): @@ -2875,19 +5354,14 @@ def test_identify_objectives_var_expr(self): identify_objective_functions(m, m.obj) fsv_in_second_stg_obj = list( - v.name for v in - identify_variables(m.second_stage_objective) + v.name for v in identify_variables(m.second_stage_objective) ) # perform checks - self.assertTrue( - list(identify_variables(m.first_stage_objective)) - == [m.x[1]] - ) + self.assertTrue(list(identify_variables(m.first_stage_objective)) == [m.x[1]]) self.assertFalse( fsv_in_second_stg_obj, - "Second stage objective contains variable(s) " - f"{fsv_in_second_stg_obj}" + "Second stage objective contains variable(s) " f"{fsv_in_second_stg_obj}", ) @@ -2895,8 +5369,13 @@ class testMasterFeasibilityUnitConsistency(unittest.TestCase): """ Test cases for models with unit-laden model components. """ - @unittest.skipUnless(SolverFactory('baron').license_is_valid(), - "Global NLP solver is not available and licensed.") + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + @unittest.skipUnless( + baron_version < (23, 1, 5), "Test known to fail beginning with Baron 23.1.5" + ) def test_two_stg_mod_with_axis_aligned_set(self): """ Test two-stage model with `AxisAlignedEllipsoidalSet` @@ -2910,19 +5389,16 @@ def test_two_stg_mod_with_axis_aligned_set(self): m.x2 = Var(initialize=0, bounds=(0, None), units=u.m) m.x3 = Var(initialize=0, bounds=(None, None)) m.u1 = Param(initialize=1.125, mutable=True, units=u.s) - m.u2 = Param(initialize=1, mutable=True, units=u.m ** 2) + m.u2 = Param(initialize=1, mutable=True, units=u.m**2) - m.con1 = Constraint(expr=m.x1 * m.u1**(0.5) - m.x2 * m.u1 <= 2) - m.con2 = Constraint(expr=m.x1 ** 2 - m.x2 ** 2 * m.u1 == m.x3) + m.con1 = Constraint(expr=m.x1 * m.u1 ** (0.5) - m.x2 * m.u1 <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u1 == m.x3) m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - m.u2) ** 2) # Define the uncertainty set # we take the parameter `u2` to be 'fixed' - ellipsoid = AxisAlignedEllipsoidalSet( - center=[1.125, 1], - half_lengths=[1, 0], - ) + ellipsoid = AxisAlignedEllipsoidalSet(center=[1.125, 1], half_lengths=[1, 0]) # Instantiate the PyROS solver pyros_solver = SolverFactory("pyros") @@ -2944,7 +5420,7 @@ def test_two_stg_mod_with_axis_aligned_set(self): options={ "objective_focus": ObjectiveType.worst_case, "solve_master_globally": True, - } + }, ) # check successful termination @@ -2952,7 +5428,7 @@ def test_two_stg_mod_with_axis_aligned_set(self): self.assertEqual( results.pyros_termination_condition, pyrosTerminationCondition.robust_optimal, - msg="Did not identify robust optimal solution to problem instance." + msg="Did not identify robust optimal solution to problem instance.", ) self.assertGreater( results.iterations, @@ -2962,8 +5438,194 @@ def test_two_stg_mod_with_axis_aligned_set(self): " Hence master feasibility problem construction not tested." " Consider implementing a more challenging model for this" " test case." - ) + ), + ) + + +class TestSubsolverTiming(unittest.TestCase): + """ + Tests to confirm that the PyROS subsolver timing routines + work appropriately. + """ + + def simple_nlp_model(self): + """ + Create simple NLP for the unit tests defined + within this class + """ + # define model + m = ConcreteModel() + m.x1 = Var(initialize=0, bounds=(0, None)) + m.x2 = Var(initialize=0, bounds=(0, None)) + m.x3 = Var(initialize=0, bounds=(None, None)) + m.u1 = Param(initialize=1.125, mutable=True) + m.u2 = Param(initialize=1, mutable=True) + + m.con1 = Constraint(expr=m.x1 * m.u1 ** (0.5) - m.x2 * m.u1 <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u1 == m.x3) + + m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - m.u2) ** 2) + + return m + + @unittest.skipUnless( + SolverFactory('appsi_ipopt').available(exception_flag=False), + "Local NLP solver is not available.", + ) + def test_pyros_appsi_ipopt(self): + """ + Test PyROS usage with solver appsi ipopt + works without exceptions. + """ + m = self.simple_nlp_model() + + # Define the uncertainty set + # we take the parameter `u2` to be 'fixed' + ellipsoid = AxisAlignedEllipsoidalSet(center=[1.125, 1], half_lengths=[1, 0]) + + # Instantiate the PyROS solver + pyros_solver = SolverFactory("pyros") + + # Define subsolvers utilized in the algorithm + local_subsolver = SolverFactory('appsi_ipopt') + global_subsolver = SolverFactory("appsi_ipopt") + + # Call the PyROS solver + # note: second-stage variable and uncertain params have units + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u1, m.u2], + uncertainty_set=ellipsoid, + local_solver=local_subsolver, + global_solver=global_subsolver, + objective_focus=ObjectiveType.worst_case, + solve_master_globally=False, + bypass_global_separation=True, + ) + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_feasible, + msg="Did not identify robust optimal solution to problem instance.", + ) + self.assertFalse( + math.isnan(results.time), + msg=( + "PyROS solve time is nan (expected otherwise since subsolver" + "time estimates are made using TicTocTimer" + ), + ) + + @unittest.skipUnless( + SolverFactory('gams:ipopt').available(exception_flag=False), + "Local NLP solver GAMS/IPOPT is not available.", + ) + def test_pyros_gams_ipopt(self): + """ + Test PyROS usage with solver GAMS ipopt + works without exceptions. + """ + m = self.simple_nlp_model() + + # Define the uncertainty set + # we take the parameter `u2` to be 'fixed' + ellipsoid = AxisAlignedEllipsoidalSet(center=[1.125, 1], half_lengths=[1, 0]) + + # Instantiate the PyROS solver + pyros_solver = SolverFactory("pyros") + + # Define subsolvers utilized in the algorithm + local_subsolver = SolverFactory('gams:ipopt') + global_subsolver = SolverFactory("gams:ipopt") + + # Call the PyROS solver + # note: second-stage variable and uncertain params have units + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1], + second_stage_variables=[m.x2], + uncertain_params=[m.u1, m.u2], + uncertainty_set=ellipsoid, + local_solver=local_subsolver, + global_solver=global_subsolver, + objective_focus=ObjectiveType.worst_case, + solve_master_globally=False, + bypass_global_separation=True, + ) + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_feasible, + msg="Did not identify robust optimal solution to problem instance.", + ) + self.assertFalse( + math.isnan(results.time), + msg=( + "PyROS solve time is nan (expected otherwise since subsolver" + "time estimates are made using TicTocTimer" + ), + ) + + @unittest.skipUnless( + baron_license_is_valid, "Global NLP solver is not available and licensed." + ) + def test_two_stg_mod_with_intersection_set(self): + """ + Test two-stage model with `AxisAlignedEllipsoidalSet` + as the uncertainty set. + """ + # define model + m = ConcreteModel() + m.x1 = Var(initialize=0, bounds=(0, None)) + m.x2 = Var(initialize=0, bounds=(0, None)) + m.x3 = Var(initialize=0, bounds=(None, None)) + m.u1 = Param(initialize=1.125, mutable=True) + m.u2 = Param(initialize=1, mutable=True) + + m.con1 = Constraint(expr=m.x1 * m.u1 ** (0.5) - m.x2 * m.u1 <= 2) + m.con2 = Constraint(expr=m.x1**2 - m.x2**2 * m.u1 == m.x3) + + m.obj = Objective(expr=(m.x1 - 4) ** 2 + (m.x2 - m.u2) ** 2) + + # construct the IntersectionSet + ellipsoid = AxisAlignedEllipsoidalSet(center=[1.125, 1], half_lengths=[1, 0]) + bset = BoxSet(bounds=[[1, 2], [0.5, 1.5]]) + iset = IntersectionSet(ellipsoid=ellipsoid, bset=bset) + + # Instantiate the PyROS solver + pyros_solver = SolverFactory("pyros") + + # Define subsolvers utilized in the algorithm + local_subsolver = SolverFactory('baron') + global_subsolver = SolverFactory("baron") + + # Call the PyROS solver + results = pyros_solver.solve( + model=m, + first_stage_variables=[m.x1, m.x2], + second_stage_variables=[], + uncertain_params=[m.u1, m.u2], + uncertainty_set=iset, + local_solver=local_subsolver, + global_solver=global_subsolver, + options={ + "objective_focus": ObjectiveType.worst_case, + "solve_master_globally": True, + }, + ) + + # check successful termination + self.assertEqual( + results.pyros_termination_condition, + pyrosTerminationCondition.robust_optimal, + msg="Did not identify robust optimal solution to problem instance.", + ) + self.assertGreater( + results.iterations, + 0, + msg="Robust infeasible model terminated in 0 iterations (nominal case).", ) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/pyros/uncertainty_sets.py b/pyomo/contrib/pyros/uncertainty_sets.py index 33768a4243f..54a268f204e 100644 --- a/pyomo/contrib/pyros/uncertainty_sets.py +++ b/pyomo/contrib/pyros/uncertainty_sets.py @@ -1,48 +1,296 @@ """ -UncertaintySet class: defines generic methods and attributes -of an uncertainty set in the context of robust optimization. UncertaintySet objects only -contain data which describes the set, and does not contain any Pyomo object information. - -Supports the following classes of uncertainty sets: - -- UncertaintySet (user defined/implemented) -- Ellipsoidal -- AxesAlignedEllipsoidal -- Polyhedral -- Box -- BudgetSet -- Cardinality/Gamma -- Discrete -- FactorModel -- IntersectedSet +Abstract and pre-defined classes for representing uncertainty sets (or +uncertain parameter spaces) of two-stage nonlinear robust optimization +models. + +Along with a ``ConcreteModel`` object representing a deterministic model +formulation, an uncertainty set object may be passed to the PyROS solver +to obtain a solution to the model's two-stage robust optimization +counterpart. + +Classes +------- +``UncertaintySet`` + Abstract base class for a generic uncertainty set. All other set + types defined in this module are subclasses. A user may implement + their own uncertainty set type as a custom-written subclass. + +``EllipsoidalSet`` + A hyperellipsoid. + +``AxisAlignedEllipsoidalSet`` + An axis-aligned hyperellipsoid. + +``PolyhedralSet`` + A bounded convex polyhedron/polytope. + +``BoxSet`` + A hyperrectangle. + +``BudgetSet`` + A budget set. + +``CardinalitySet`` + A cardinality set (or gamma set). + +``DiscreteScenarioSet`` + A discrete set of finitely many points. + +``FactorModelSet`` + A factor model set (or net-alpha model set). + +``IntersectionSet`` + An intersection of two or more sets, each represented by an + ``UncertaintySet`` object. """ + import abc -import functools import math +import functools +from numbers import Integral +from collections.abc import Iterable, MutableSequence from enum import Enum + from pyomo.common.dependencies import numpy as np, scipy as sp from pyomo.core.base import ConcreteModel, Objective, maximize, minimize, Block from pyomo.core.base.constraint import ConstraintList -from pyomo.core.base.var import Var, _VarData, IndexedVar -from pyomo.core.base.param import Param, _ParamData, IndexedParam -from pyomo.core.expr import value +from pyomo.core.base.var import Var, IndexedVar +from pyomo.core.expr.numvalue import value, native_numeric_types from pyomo.opt.results import check_optimal_termination from pyomo.contrib.pyros.util import add_bounds_for_uncertain_parameters + +valid_num_types = tuple(native_numeric_types) + + +def validate_arg_type( + arg_name, + arg_val, + valid_types, + valid_type_desc=None, + is_entry_of_arg=False, + check_numeric_type_finite=True, +): + """ + Perform type validation of an argument to a function/method. + If type is not valid, raise a TypeError with an appropriate + message. + + Parameters + ---------- + arg_name : str + Name of argument to be displayed in exception message. + arg_val : object + Value of argument to be checked. + valid_types : type or tuple of types + Valid types for the argument value. + valid_type_desc : str or None, optional + Description of valid types for the argument value; + this description is included in the exception message. + is_entry_of_arg : bool, optional + Is the argument value passed an entry of the argument + described by `arg_name` (such as entry of an array or list). + This will be indicated in the exception message. + The default is `False`. + check_numeric_type_finite : bool, optional + If the valid types comprise a sequence of numeric types, + check that the argument value is finite (and also not NaN), + as well. The default is `True`. + + Raises + ------ + TypeError + If the argument value is not a valid type. + ValueError + If the finiteness check on a numerical value returns + a negative result. + """ + if not isinstance(arg_val, valid_types): + if valid_type_desc is not None: + type_phrase = f"not {valid_type_desc}" + else: + if not isinstance(valid_types, Iterable): + valid_types = [valid_types] + valid_type_str = ", ".join(dtype.__name__ for dtype in valid_types) + type_phrase = f"not of any of the valid types ({valid_type_str})" + + if is_entry_of_arg: + raise TypeError( + f"Entry '{arg_val}' of the argument `{arg_name}` " + f"is {type_phrase} (provided type '{type(arg_val).__name__}')" + ) + else: + raise TypeError( + f"Argument `{arg_name}` is {type_phrase} " + f"(provided type '{type(arg_val).__name__}')" + ) + + # check for finiteness, if desired + if check_numeric_type_finite: + if isinstance(valid_types, type): + numeric_types_required = valid_types in valid_num_types + else: + numeric_types_required = set(valid_types).issubset(valid_num_types) + if numeric_types_required and (math.isinf(arg_val) or math.isnan(arg_val)): + if is_entry_of_arg: + raise ValueError( + f"Entry '{arg_val}' of the argument `{arg_name}` " + f"is not a finite numeric value" + ) + else: + raise ValueError( + f"Argument `{arg_name}` is not a finite numeric value " + f"(provided value '{arg_val}')" + ) + + +def is_ragged(arr, arr_types=None): + """ + Determine whether an array-like (such as a list or Numpy ndarray) + is ragged. + + NOTE: if Numpy ndarrays are considered to be arr types, + then zero-dimensional arrays are not considered to be as such. + """ + arr_types = (list, np.ndarray, tuple) if arr_types is None else arr_types + + is_zero_dim_arr = isinstance(arr, np.ndarray) and len(arr.shape) == 0 + if not isinstance(arr, arr_types) or is_zero_dim_arr: + return False + + entries_are_seqs = [] + for entry in arr: + if np.ndarray in arr_types and isinstance(entry, np.ndarray): + # account for 0-D arrays (treat as non-arrays) + entries_are_seqs.append(len(entry.shape) > 0) + else: + entries_are_seqs.append(isinstance(entry, arr_types)) + + if not any(entries_are_seqs): + return False + if not all(entries_are_seqs): + return True + + entries_ragged = [is_ragged(entry) for entry in arr] + if any(entries_ragged): + return True + else: + return any(np.array(arr[0]).shape != np.array(entry).shape for entry in arr) + + +def validate_dimensions(arr_name, arr, dim, display_value=False): + """ + Validate dimension of an array-like object. + Raise Exception if validation fails. + """ + if is_ragged(arr): + raise ValueError( + f"Argument `{arr_name}` should not be a ragged array-like " + "(nested sequence of lists, tuples, arrays of different shape)" + ) + + # check dimensions matched + array = np.asarray(arr) + if len(array.shape) != dim: + val_str = f" from provided value {str(arr)}" if display_value else "" + raise ValueError( + f"Argument `{arr_name}` must be a " + f"{dim}-dimensional array-like " + f"(detected {len(array.shape)} dimensions{val_str})" + ) + elif array.shape[-1] == 0: + raise ValueError( + f"Last dimension of argument `{arr_name}` must be non-empty " + f"(detected shape {array.shape})" + ) + + +def validate_array( + arr, arr_name, dim, valid_types, valid_type_desc=None, required_shape=None +): + """ + Validate shape and entry types of an array-like object. + + Parameters + ---------- + arr : array_like + Object to validate. + arr_name : str + A name/descriptor of the object to validate. + Usually, this is the name of an object attribute + to which the array is meant to be set. + dim : int + Required dimension of the array-like object. + valid_types : set[type] + Allowable type(s) for each entry of the array. + valid_type_desc : str or None, optional + Descriptor for the allowable types. + required_shape : list or None, optional + Specification of the length of the array in each dimension. + If `None` is provided, no specifications are imposed. + If a `list` is provided, then each entry of the list must be + an `int` specifying the required length in the dimension + corresponding to the position of the entry + or `None` (meaning no requirement for the length in the + corresponding dimension). + """ + np_arr = np.array(arr, dtype=object) + validate_dimensions(arr_name, np_arr, dim, display_value=False) + + def generate_shape_str(shape, required_shape): + shape_str = "" + assert len(shape) == len(required_shape) + for idx, (sval, rsval) in enumerate(zip(shape, required_shape)): + if rsval is None: + shape_str += "..." + else: + shape_str += f"{sval}" + if idx < len(shape) - 1: + shape_str += "," + return "(" + shape_str + ")" + + # validate shape requirements + if required_shape is not None: + assert len(required_shape) == dim + for idx, size in enumerate(required_shape): + if size is not None and size != np_arr.shape[idx]: + req_shape_str = generate_shape_str(required_shape, required_shape) + actual_shape_str = generate_shape_str(np_arr.shape, required_shape) + raise ValueError( + f"Attribute '{arr_name}' should be of shape " + f"{req_shape_str}, but detected shape " + f"{actual_shape_str}" + ) + + for val in np_arr.flat: + validate_arg_type( + arr_name, + val, + valid_types, + valid_type_desc=valid_type_desc, + is_entry_of_arg=True, + ) + + def uncertainty_sets(obj): if not isinstance(obj, UncertaintySet): - raise ValueError("Expected an UncertaintySet object, instead recieved %s" % (obj,)) + raise ValueError( + "Expected an UncertaintySet object, instead received %s" % (obj,) + ) return obj + def column(matrix, i): # Get column i of a given multi-dimensional list return [row[i] for row in matrix] + class Geometry(Enum): - ''' - Enum defining uncertainty set geometries - ''' + """ + Geometry classifications for PyROS uncertainty set objects. + """ + LINEAR = 1 CONVEX_NONLINEAR = 2 GENERAL_NONLINEAR = 3 @@ -50,24 +298,22 @@ class Geometry(Enum): class UncertaintySet(object, metaclass=abc.ABCMeta): - ''' - Base class for custom user-defined uncertainty sets. - ''' - - def __init__(self, **kwargs): - """ - Constructor for UncertaintySet base class + """ + An object representing an uncertainty set to be passed to the + PyROS solver. - Args: - kwargs: Use the kwargs for specifying data for the UncertaintySet object. This data should be used in defining constraints in the 'set_as_constraint' function. - """ - return + An `UncertaintySet` object should be viewed as merely a container + for data needed to parameterize the set it represents, + such that the object's attributes do not reference the + components of a Pyomo modeling object. + """ @property @abc.abstractmethod def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + Dimension of the uncertainty set (number of uncertain + parameters in a corresponding optimization model of interest). """ raise NotImplementedError @@ -75,11 +321,8 @@ def dim(self): @abc.abstractmethod def geometry(self): """ - UncertaintySet geometry: - 1 is linear, - 2 is convex nonlinear, - 3 is general nonlinear, - 4 is discrete. + Geometry of the uncertainty set. See the `Geometry` class + documentation. """ raise NotImplementedError @@ -87,43 +330,99 @@ def geometry(self): @abc.abstractmethod def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. + Bounds for the value of each uncertain parameter constrained + by the set (i.e. bounds for each set dimension). """ raise NotImplementedError + def bounding_model(self, config=None): + """ + Make uncertain parameter value bounding problems (optimize + value of each uncertain parameter subject to constraints on the + uncertain parameters). + + Parameters + ---------- + config : None or ConfigDict, optional + If a ConfigDict is provided, then it contains + arguments passed to the PyROS solver. + + Returns + ------- + model : ConcreteModel + Bounding problem, with all Objectives deactivated. + """ + model = ConcreteModel() + model.util = Block() + + # construct param vars, initialize to nominal point + model.param_vars = Var(range(self.dim)) + + # add constraints + model.cons = self.set_as_constraint( + uncertain_params=model.param_vars, model=model, config=config + ) + + @model.Objective(range(self.dim)) + def param_var_objectives(self, idx): + return model.param_vars[idx] + + # deactivate all objectives + model.param_var_objectives.deactivate() + + return model + def is_bounded(self, config): """ - Return True if the uncertainty set is bounded, else False. - """ - # === Determine bounds on all uncertain params - bounding_model = ConcreteModel() - bounding_model.util = Block() # So that boundedness checks work for Cardinality and FactorModel sets - bounding_model.uncertain_param_vars = IndexedVar(range(len(config.uncertain_params)), initialize=1) - for idx, param in enumerate(config.uncertain_params): - bounding_model.uncertain_param_vars[idx].set_value( - param.value, skip_validation=True) - - bounding_model.add_component("uncertainty_set_constraint", - config.uncertainty_set.set_as_constraint( - uncertain_params=bounding_model.uncertain_param_vars, - model=bounding_model, - config=config - )) - - for idx, param in enumerate(list(bounding_model.uncertain_param_vars.values())): - bounding_model.add_component("lb_obj_" + str(idx), Objective(expr=param, sense=minimize)) - bounding_model.add_component("ub_obj_" + str(idx), Objective(expr=param, sense=maximize)) - - for o in bounding_model.component_data_objects(Objective): - o.deactivate() - - for i in range(len(bounding_model.uncertain_param_vars)): - for limit in ("lb", "ub"): - getattr(bounding_model, limit + "_obj_" + str(i)).activate() - res = config.global_solver.solve(bounding_model, tee=False) - getattr(bounding_model, limit + "_obj_" + str(i)).deactivate() + Determine whether the uncertainty set is bounded. + + Parameters + ---------- + config : ConfigDict + PyROS solver configuration. + + Returns + ------- + : bool + True if the uncertainty set is certified to be bounded, + and False otherwise. + + Notes + ----- + This check is carried out by solving a sequence of maximization + and minimization problems (in which the objective for each + problem is the value of a single uncertain parameter). If any of + the optimization models cannot be solved successfully to + optimality, then False is returned. + + This method is invoked during the validation step of a PyROS + solver call. + """ + bounding_model = self.bounding_model(config=config) + solver = config.global_solver + + # initialize uncertain parameter variables + for param, param_var in zip( + config.uncertain_params, bounding_model.param_vars.values() + ): + param_var.set_value(param.value, skip_validation=True) + + for idx, obj in bounding_model.param_var_objectives.items(): + # activate objective for corresponding dimension + obj.activate() + + # solve for lower bound, then upper bound + for sense in (minimize, maximize): + obj.sense = sense + res = solver.solve(bounding_model, load_solutions=False, tee=False) + if not check_optimal_termination(res): return False + + # ensure sense is minimize when done, deactivate + obj.sense = minimize + obj.deactivate() + return True def is_nonempty(self, config): @@ -134,37 +433,57 @@ def is_nonempty(self, config): def is_valid(self, config): """ - Return True if the uncertainty set is bounded and non-empty, else False. + Return True if the uncertainty set is bounded and non-empty, + else False. """ return self.is_nonempty(config=config) and self.is_bounded(config=config) @abc.abstractmethod def set_as_constraint(self, **kwargs): """ - An uncertainty set *must* have a set_as_constraint method. UncertaintySets are instantiated with "q" as - the list of uncertain param objects. Returns a Pyomo Constraint object (could - be indexed) representing the uncertainty set for use in the separation problem + Construct a (sequence of) mathematical constraint(s) + (represented by Pyomo `Constraint` objects) on the uncertain + parameters to represent the uncertainty set for use in a + two-stage robust optimization problem or subproblem (such as a + PyROS separation subproblem). - Args: - **kwargs: may be used to pass any additional information needed to generate the constraint(s) - representing the UncertaintySet + Parameters + ---------- + **kwargs : dict + Keyword arguments containing, at the very least, a sequence + of `Param` or `Var` objects representing the uncertain + parameters of interest, and any additional information + needed to generate the constraints. """ pass def point_in_set(self, point): """ - Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False. + Determine whether a given point lies in the uncertainty set. + + Parameters + ---------- + point : (N,) array-like + Point (parameter value) of interest. + + Returns + ------- + is_in_set : bool + True if the point lies in the uncertainty set, + False otherwise. - Args: - point: The point being checked for membership in the set. - The coordinates of the point should be supplied in the same order as the elements of ``uncertain_params`` - that is to be supplied to the PyROS solve statement. - This point must match the dimension of the uncertain parameters of the set. + Notes + ----- + This method is invoked at the outset of a PyROS solver call to + determine whether a user-specified nominal parameter realization + lies in the uncertainty set. """ # === Ensure point is of correct dimensionality as the uncertain parameters if len(point) != self.dim: - raise AttributeError("Point must have same dimensions as uncertain parameters.") + raise AttributeError( + "Point must have same dimensions as uncertain parameters." + ) m = ConcreteModel() the_params = [] @@ -183,12 +502,23 @@ def point_in_set(self, point): @staticmethod def add_bounds_on_uncertain_parameters(**kwargs): """ - Numerical bounds on uncertain parameters are used in separation. This method should take a separation-type model - and update the .lb() and .ub() property for each `uncertain_param_var` member of the model to a numerical value. - This could be an inferred bound based on the uncertainty set itself, or a big-M type bound. - If the bounds need to be numerically determined, return an empty list. See PolyhedralSet and IntersectedSet as examples. - :param kwargs: the separation model and uncertainty set objects should be passed here. - :return: + Specify the numerical bounds for the uncertain parameters + restricted by the set. Each uncertain parameter is represented + by a Pyomo `Var` object in a model passed to this method, + and the numerical bounds are specified by setting the + `.lb()` and `.ub()` attributes of the `Var` object. + + Parameters + ---------- + kwargs : dict + Keyword arguments consisting of a Pyomo `ConfigDict` and a + Pyomo `ConcreteModel` object, representing a PyROS solver + configuration and the optimization model of interest. + + Notes + ----- + This method is invoked in advance of a PyROS separation + subproblem. """ config = kwargs.pop('config') model = kwargs.pop('model') @@ -199,63 +529,329 @@ def add_bounds_on_uncertain_parameters(**kwargs): p.setub(parameter_bounds[i][1]) +class UncertaintySetList(MutableSequence): + """ + Wrapper around a list of uncertainty sets, all of which have + an immutable common dimension. + + Parameters + ---------- + uncertainty_sets : iterable, optional + Sequence of uncertainty sets. + name : str or None, optional + Name of the uncertainty set list. + min_length : int or None, optional + Minimum required length of the sequence. If `None` is + provided, then the minimum required length is set to 0. + """ + + def __init__(self, uncertainty_sets=[], name=None, min_length=None): + """Initialize self (see class docstring).""" + self._name = name + self._min_length = 0 if min_length is None else min_length + + # check minimum length requirement satisfied + initlist = list(uncertainty_sets) + if len(initlist) < self._min_length: + raise ValueError( + f"Attempting to initialize uncertainty set list " + f"{self._name!r} " + f"of minimum required length {self._min_length} with an " + f"iterable of length {len(initlist)}" + ) + + # validate first entry of initial list. + # The common dimension is set to that of the first entry + # if validation is successful + self._dim = None + if initlist: + self._validate(initlist[0]) + + # now initialize the list + self._list = [] + self.extend(initlist) + + def __len__(self): + """Length of the list contained in self.""" + return len(self._list) + + def __repr__(self): + """Return repr(self).""" + return f"{self.__class__.__name__}({repr(self._list)})" + + def __getitem__(self, idx): + """Return self[idx].""" + return self._list[idx] + + def __setitem__(self, idx, value): + """Set self[idx] = value.""" + if self._index_is_valid(idx): + # perform validation and length check only if + # index is valid, so that exceptions due to + # index referencing (wrong type, out of range) + # are raised in update attempt + self._validate(value) + self._check_length_update(idx, value) + + self._list[idx] = value + + def __delitem__(self, idx): + """Perform del self[idx].""" + if self._index_is_valid(idx): + self._check_length_update(idx, []) + del self._list[idx] + + def clear(self): + """Remove all items from the list.""" + self._check_length_update(slice(0, len(self)), []) + self._list.clear() + + def insert(self, idx, value): + """Insert an object before index denoted by idx.""" + if isinstance(idx, Integral): + # index should be valid. Validate value before + # inserting + self._validate(value, single_item=True) + self._list.insert(idx, value) + + def _index_is_valid(self, idx, allow_int_only=False): + """ + Object to be used as list index is within range of + list contained within self. + + Parameters + ---------- + idx : object + List index. Usually an integer type or slice. + allow_int_only : bool, optional + Being an integral type is a necessary condition + for validity. The default is True. + + Returns + ------- + : bool + True if index is valid, False otherwise. + """ + try: + self._list[idx] + except (TypeError, IndexError): + slice_valid = False + else: + slice_valid = True + + # if only integer types allowed, then must be an integer type + int_req_satisfied = not allow_int_only or isinstance(idx, Integral) + + return slice_valid and int_req_satisfied + + def _check_length_update(self, idx, value): + """ + Check whether the update ``self[idx] = value`` reduces the + length of self to a value smaller than the minimum length. + + Raises + ------ + ValueError + If minimum length requirement is violated by the update. + """ + if isinstance(idx, Integral): + slice_len = 1 + else: + slice_len = len(self._list[idx]) + + val_len = len(value) if isinstance(value, Iterable) else 1 + new_len = len(self) + val_len - slice_len + if new_len < self._min_length: + raise ValueError( + f"Length of uncertainty set list {self._name!r} must " + f"be at least {self._min_length}" + ) + + def _validate(self, value, single_item=False): + """ + Validate item or sequence of items to be inserted into self. + + Parameters + ---------- + value : object + Object to validate. + single_item : bool, optional + Do not allow validation of iterables of objects + (e.g. a list of ``UncertaintySet`` objects). + The default is `False`. + + Raises + ------ + TypeError + If object passed is not of the appropriate type + (``UncertaintySet``, or an iterable thereof). + ValueError + If object passed is (or contains) an ``UncertaintySet`` + whose dimension does not match that of other uncertainty + sets in self. + """ + if not single_item and isinstance(value, Iterable): + for val in value: + self._validate(val, single_item=True) + else: + validate_arg_type( + self._name, + value, + UncertaintySet, + "An `UncertaintySet` object", + is_entry_of_arg=True, + ) + if self._dim is None: + # common dimension is now set + self._dim = value.dim + else: + # ensure set added matches common dimension + if value.dim != self._dim: + raise ValueError( + f"Uncertainty set list with name {self._name!r} " + f"contains UncertaintySet objects of dimension " + f"{self._dim}, but attempting to add set of dimension " + f"{value.dim}" + ) + + @property + def dim(self): + """Dimension of all uncertainty sets contained in self.""" + return self._dim + + class BoxSet(UncertaintySet): """ - Hyper-rectangle (a.k.a. "Box") + A hyper-rectangle (a.k.a. "box"). + + Parameters + ---------- + bounds : (N, 2) array_like + Lower and upper bounds for each dimension of the set. + + Examples + -------- + 1D box set (interval): + + >>> from pyomo.contrib.pyros import BoxSet + >>> interval = BoxSet(bounds=[(1, 2)]) + >>> interval.bounds + array([[1, 2]]) + + 2D box set: + + >>> box_set = BoxSet(bounds=[[1, 2], [3, 4]]) + >>> box_set.bounds + array([[1, 2], + [3, 4]]) + + 5D hypercube with bounds 0 and 1 in each dimension: + + >>> hypercube_5d = BoxSet(bounds=[[0, 1] for idx in range(5)]) + >>> hypercube_5d.bounds + array([[0, 1], + [0, 1], + [0, 1], + [0, 1], + [0, 1]]) """ def __init__(self, bounds): + """Initialize self (see class docstring).""" + self.bounds = bounds + + @property + def type(self): + """ + str : Brief description of the type of the uncertainty set. """ - BoxSet constructor - - Args: - bounds: A list of tuples providing lower and upper bounds (lb, ub) for each uncertain parameter, in the same order as the 'uncertain_params' required input that is to be supplied to the PyROS solve statement. - """ - # === non-empty bounds - if len(bounds) == 0: - raise AttributeError("Vector of bounds must be non-empty") - # === Real number valued bounds - if not all(isinstance(bound, (int, float)) for tup in bounds for bound in tup): - raise AttributeError("Bounds must be real numbers.") - # === Ensure no bound is None e.g. all are bounded - if any(bound is None for tup in bounds for bound in tup): - raise AttributeError("All bounds for uncertain parameters must be real numbers, not None.") - # === Ensure each tuple has a lower and upper bound - if not all(len(b) == 2 for b in bounds): - raise AttributeError("Vector of bounds must include a finite lb and ub for each uncertain parameter") - # === Ensure each lb <= ub - if not all(bound[0] <= bound[1] for bound in bounds): - raise AttributeError("Lower bounds must be less than or equal to upper bounds") + return "box" - self.bounds = bounds - self.type = "box" + @property + def bounds(self): + """ + (N, 2) numpy.ndarray : Lower and upper bounds for each dimension + of the set. + + The bounds of a `BoxSet` instance can be changed, such that + the dimension of the set remains unchanged. + """ + return self._bounds + + @bounds.setter + def bounds(self, val): + validate_array( + arr=val, + arr_name="bounds", + dim=2, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=[None, 2], + ) + + bounds_arr = np.array(val) + + for lb, ub in bounds_arr: + if lb > ub: + raise ValueError(f"Lower bound {lb} exceeds upper bound {ub}") + + # box set dimension is immutable + if hasattr(self, "_bounds") and bounds_arr.shape[0] != self.dim: + raise ValueError( + "Attempting to set bounds of a box set of dimension " + f"{self.dim} to a value of dimension {bounds_arr.shape[0]}" + ) + self._bounds = np.array(val) @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the box set. """ return len(self.bounds) @property def geometry(self): + """ + Geometry of the box set. + See the `Geometry` class documentation. + """ return Geometry.LINEAR @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. + Bounds in each dimension of the box set. + This is numerically equivalent to the `bounds` attribute. + + Returns + ------- + : list of tuples + List, length `N`, of 2-tuples. Each tuple + specifies the bounds in its corresponding + dimension. """ - return self.bounds + return [tuple(bound) for bound in self.bounds] def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the BoxSet uncertainty set. + Construct a list of box constraints on a given sequence + of uncertain parameter objects. - Args: - uncertain_params: uncertain parameter objects for writing constraint objects + Parameters + ---------- + uncertain_params : list of Param or list of Var + Uncertain parameter objects upon which the constraints + are imposed. + **kwargs : dict, optional + Additional arguments. These arguments are currently + ignored. + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ - conlist = ConstraintList() conlist.construct() @@ -270,73 +866,210 @@ def set_as_constraint(self, uncertain_params, **kwargs): class CardinalitySet(UncertaintySet): """ - Cardinality-constrained (a.k.a "Gamma") uncertainty set + A cardinality-constrained (a.k.a. "gamma") set. + + Parameters + ---------- + origin : (N,) array_like + Origin of the set (e.g., nominal uncertain parameter values). + positive_deviation : (N,) array_like + Maximal non-negative coordinate deviation from the origin + in each dimension. + gamma : numeric type + Upper bound for the number of uncertain parameters which + may realize their maximal deviations from the origin + simultaneously. + + Examples + -------- + A 3D cardinality set: + + >>> from pyomo.contrib.pyros import CardinalitySet + >>> gamma_set = CardinalitySet( + ... origin=[0, 0, 0], + ... positive_deviation=[1.0, 2.0, 1.5], + ... gamma=1, + ... ) + >>> gamma_set.origin + array([0, 0, 0]) + >>> gamma_set.positive_deviation + array([1. , 2. , 1.5]) + >>> gamma_set.gamma + 1 """ def __init__(self, origin, positive_deviation, gamma): - """ - CardinalitySet constructor - - Args: - origin: The origin of the set (e.g., the nominal point). - positive_deviation: Vector (``list``) of maximal deviations of each parameter. - gamma: Scalar to bound the total number of uncertain parameters that can maximally deviate from their respective 'origin'. Setting 'gamma = 0' reduces the set to the 'origin' point. Setting 'gamma' to be equal to the number of parameters produces the hyper-rectangle [origin, origin+positive_deviation] - """ - # === Real number valued data - if not all(isinstance(elem, (int, float)) for elem in origin): - raise AttributeError("Elements of origin vector must be numeric.") - if not all(isinstance(elem, (int, float)) for elem in positive_deviation): - raise AttributeError("Elements of positive_deviation vector must be numeric") - # === Dimension of positive_deviations and origin must be same - if len(origin) != len(positive_deviation): - raise AttributeError("Vectors for origin and positive_deviation must have same dimensions.") - # === Gamma between 0,1 - if gamma < 0 or gamma > len(origin): - raise AttributeError("Gamma parameter must be in [0, n].") - # === positive_deviations must all be >= 0 - if any(elem < 0 for elem in positive_deviation): - raise AttributeError("Elements of positive_deviations vector must be non-negative.") - # === Non-emptiness is implied - + """Initialize self (see class docstring).""" self.origin = origin self.positive_deviation = positive_deviation self.gamma = gamma - self.type = "cardinality" + + @property + def type(self): + """ + str : Brief description of the type of the uncertainty set. + """ + return "cardinality" + + @property + def origin(self): + """ + (N,) numpy.ndarray : Origin of the cardinality set + (e.g. nominal parameter values). + """ + return self._origin + + @origin.setter + def origin(self, val): + validate_array( + arr=val, + arr_name="origin", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + ) + + # dimension of the set is immutable + val_arr = np.array(val) + if hasattr(self, "_origin"): + if val_arr.size != self.dim: + raise ValueError( + "Attempting to set attribute 'origin' of cardinality " + f"set of dimension {self.dim} " + f"to value of dimension {val_arr.size}" + ) + + self._origin = val_arr + + @property + def positive_deviation(self): + """ + (N,) numpy.ndarray : Maximal coordinate deviations from the + origin in each dimension. All entries are nonnegative. + """ + return self._positive_deviation + + @positive_deviation.setter + def positive_deviation(self, val): + validate_array( + arr=val, + arr_name="positive_deviation", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + ) + + for dev_val in val: + if dev_val < 0: + raise ValueError( + f"Entry {dev_val} of attribute 'positive_deviation' " + f"is negative value" + ) + + val_arr = np.array(val) + + # dimension of the set is immutable + if hasattr(self, "_origin"): + if val_arr.size != self.dim: + raise ValueError( + "Attempting to set attribute 'positive_deviation' of " + f"cardinality set of dimension {self.dim} " + f"to value of dimension {val_arr.size}" + ) + + self._positive_deviation = val_arr + + @property + def gamma(self): + """ + numeric type : Upper bound for the number of uncertain + parameters which may maximally deviate from their respective + origin values simultaneously. Must be a numerical value ranging + from 0 to the set dimension `N`. + + Note that, mathematically, setting `gamma` to 0 reduces the set + to a singleton containing the center, while setting `gamma` to + the set dimension `N` makes the set mathematically equivalent + to a `BoxSet` with bounds + ``numpy.array([origin, origin + positive_deviation]).T``. + """ + return self._gamma + + @gamma.setter + def gamma(self, val): + validate_arg_type("gamma", val, valid_num_types, "a valid numeric type", False) + if val < 0 or val > self.dim: + raise ValueError( + "Cardinality set attribute " + f"'gamma' must be a real number between 0 and dimension " + f"{self.dim} " + f"(provided value {val})" + ) + + self._gamma = val @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the cardinality set. """ return len(self.origin) @property def geometry(self): + """ + Geometry of the cardinality set. + See the `Geometry` class documentation. + """ return Geometry.LINEAR @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. - """ + Bounds in each dimension of the cardinality set. + Returns + ------- + : list of tuples + List, length `N`, of 2-tuples. Each tuple + specifies the bounds in its corresponding + dimension. + """ nom_val = self.origin deviation = self.positive_deviation gamma = self.gamma - parameter_bounds = [(nom_val[i], nom_val[i] + min(gamma, 1) * deviation[i]) for i in range(len(nom_val))] + parameter_bounds = [ + (nom_val[i], nom_val[i] + min(gamma, 1) * deviation[i]) + for i in range(len(nom_val)) + ] return parameter_bounds def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the CardinalitySet uncertainty set. + Construct a list of cardinality set constraints on + a sequence of uncertain parameter objects. - Args: - uncertain_params: uncertain parameter objects for writing constraint objects + Parameters + ---------- + uncertain_params : list of Param or list of Var + Uncertain parameter objects upon which the constraints + are imposed. + **kwargs : dict + Additional arguments. This dictionary should consist + of a `model` entry, which maps to a `ConcreteModel` + object representing the model of interest (parent model + of the uncertain parameter objects). + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ - # === Ensure dimensions if len(uncertain_params) != len(self.origin): - raise AttributeError("Dimensions of origin and uncertain_param lists must be equal.") + raise AttributeError( + "Dimensions of origin and uncertain_param lists must be equal." + ) model = kwargs['model'] set_i = list(range(len(uncertain_params))) @@ -346,7 +1079,10 @@ def set_as_constraint(self, uncertain_params, **kwargs): conlist = ConstraintList() conlist.construct() for i in set_i: - conlist.add(self.origin[i] + self.positive_deviation[i] * model.util.cassi[i] == uncertain_params[i]) + conlist.add( + self.origin[i] + self.positive_deviation[i] * model.util.cassi[i] + == uncertain_params[i] + ) conlist.add(sum(model.util.cassi[i] for i in set_i) <= self.gamma) @@ -354,19 +1090,26 @@ def set_as_constraint(self, uncertain_params, **kwargs): def point_in_set(self, point): """ - Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False. + Determine whether a given point lies in the cardinality set. - Args: - point: the point being checked for membership in the set - """ + Parameters + ---------- + point : (N,) array-like + Point (parameter value) of interest. + Returns + ------- + : bool + True if the point lies in the set, False otherwise. + """ cassis = [] for i in range(self.dim): if self.positive_deviation[i] > 0: - cassis.append((point[i] - self.origin[i])/self.positive_deviation[i]) + cassis.append((point[i] - self.origin[i]) / self.positive_deviation[i]) - if sum(cassi for cassi in cassis) <= self.gamma and \ - all(cassi >= 0 and cassi <= 1 for cassi in cassis): + if sum(cassi for cassi in cassis) <= self.gamma and all( + cassi >= 0 and cassi <= 1 for cassi in cassis + ): return True else: return False @@ -374,83 +1117,231 @@ def point_in_set(self, point): class PolyhedralSet(UncertaintySet): """ - Polyhedral uncertainty set + A bounded convex polyhedron or polytope. + + Parameters + ---------- + lhs_coefficients_mat : (M, N) array_like + Left-hand side coefficients for the linear + inequality constraints defining the polyhedral set. + rhs_vec : (M,) array_like + Right-hand side values for the linear inequality + constraints defining the polyhedral set. + Each entry is an upper bound for the quantity + ``lhs_coefficients_mat @ x``, where `x` is an (N,) + array representing any point in the polyhedral set. + + Examples + -------- + 2D polyhedral set with 4 defining inequalities: + + >>> from pyomo.contrib.pyros import PolyhedralSet + >>> pset = PolyhedralSet( + ... lhs_coefficients_mat=[[-1, 0], [0, -1], [-1, 1], [1, 0]], + ... rhs_vec=[0, 0, 0, 1], + ... ) + >>> pset.coefficients_mat + array([[-1, 0], + [ 0, -1], + [-1, 1], + [ 1, 0]]) + >>> pset.rhs_vec + array([0, 0, 0, 1]) """ def __init__(self, lhs_coefficients_mat, rhs_vec): + """Initialize self (see class docstring).""" + # set attributes to copies of the originals + self.coefficients_mat = lhs_coefficients_mat + self.rhs_vec = rhs_vec + + # validate nonemptiness and boundedness here. + # This check is only performed at construction. + self._validate() + + def _validate(self): + """ + Check polyhedral set attributes are such that set is nonempty + (solve a feasibility problem). + + Raises + ------ + ValueError + If set is empty, or the check was not + successfully completed due to numerical issues. + """ + # solve LP + res = sp.optimize.linprog( + c=np.zeros(self.coefficients_mat.shape[1]), + A_ub=self.coefficients_mat, + b_ub=self.rhs_vec, + method="simplex", + bounds=(None, None), + ) + + # check termination + if res.status == 1 or res.status == 4: + raise ValueError( + "Could not verify nonemptiness of the " + "polyhedral set (`scipy.optimize.linprog(method=simplex)` " + f" status {res.status}) " + ) + elif res.status == 2: + raise ValueError( + "PolyhedralSet defined by 'coefficients_mat' and " + "'rhs_vec' is empty. Check arguments" + ) + + @property + def type(self): """ - PolyhedralSet constructor - - Args: - lhs_coefficients_mat: Matrix of left-hand side coefficients for the linear inequality constraints defining the polyhedral set. - rhs_vec: Vector (``list``) of right-hand side values for the linear inequality constraints defining the polyhedral set. - """ - - # === Real valued data - mat = np.asarray(lhs_coefficients_mat) - if not all(isinstance(elem, (int, float)) for row in lhs_coefficients_mat for elem in row): - raise AttributeError("Matrix lhs_coefficients_mat must be real-valued and numeric.") - if not all(isinstance(elem, (int, float)) for elem in rhs_vec): - raise AttributeError("Vector rhs_vec must be real-valued and numeric.") - # === Check columns of A must be same length as rhs - if mat.shape[0] != len(rhs_vec): - raise AttributeError("Rows of lhs_coefficients_mat matrix must equal length of rhs_vec list.") - # === Columns are non-zero - if mat.shape[1] == 0: - raise AttributeError("Columns of lhs_coefficients_mat must be non-zero.") - # === Matrix is not all zeros - if all(np.isclose(elem, 0) for row in lhs_coefficients_mat for elem in row): - raise AttributeError("Matrix lhs_coefficients_mat cannot be all zeroes.") - # === Non-emptiness - res = sp.optimize.linprog(c=np.zeros(mat.shape[1]), A_ub=mat, b_ub=rhs_vec, method="simplex") - if not res.success: - raise AttributeError("User-defined PolyhedralSet was determined to be empty. " - "Please check the set of constraints supplied during set construction.") - # === Boundedness - if res.status == 3: - # scipy linprog status == 3 indicates unboundedness - raise AttributeError("User-defined PolyhedralSet was determined to be unbounded. " - "Please augment the set of constraints supplied during set construction.") + str : Brief description of the type of the uncertainty set. + """ + return "polyhedral" + @property + def coefficients_mat(self): + """ + (M, N) numpy.ndarray : Coefficient matrix for the (linear) + inequality constraints defining the polyhedral set. + + In tandem with the `rhs_vec` attribute, this matrix should + be such that the polyhedral set is nonempty and bounded. + Such a check is performed only at instance construction. + """ + return self._coefficients_mat + + @coefficients_mat.setter + def coefficients_mat(self, val): + validate_array( + arr=val, + arr_name="coefficients_mat", + dim=2, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + lhs_coeffs_arr = np.array(val) + + # check no change in set dimension + if hasattr(self, "_coefficients_mat"): + if lhs_coeffs_arr.shape[1] != self.dim: + raise ValueError( + f"Polyhedral set attribute 'coefficients_mat' must have " + f"{self.dim} columns to match set dimension " + f"(provided matrix with {lhs_coeffs_arr.shape[1]} columns)" + ) + + # check shape match with rhs vector + if hasattr(self, "_rhs_vec"): + if lhs_coeffs_arr.shape[0] != self.rhs_vec.size: + raise ValueError( + "PolyhedralSet attribute 'coefficients_mat' " + f"must have {self.rhs_vec.size} rows " + f"to match shape of attribute 'rhs_vec' " + f"(provided {lhs_coeffs_arr.shape[0]} rows)" + ) + + # check no column is all zeros. otherwise, set is unbounded + cols_with_all_zeros = np.nonzero( + [np.all(col == 0) for col in lhs_coeffs_arr.T] + )[0] + if cols_with_all_zeros.size > 0: + col_str = ", ".join(str(val) for val in cols_with_all_zeros) + raise ValueError( + "Attempting to set attribute 'coefficients_mat' to value " + f"with all entries zero in columns at indexes: {col_str}. " + "Ensure column has at least one nonzero entry" + ) - self.coefficients_mat = lhs_coefficients_mat - self.rhs_vec = rhs_vec - self.type = "polyhedral" + self._coefficients_mat = lhs_coeffs_arr + + @property + def rhs_vec(self): + """ + (M,) numpy.ndarray : Right-hand side values (upper bounds) for + the (linear) inequality constraints defining the polyhedral set. + """ + return self._rhs_vec + + @rhs_vec.setter + def rhs_vec(self, val): + validate_array( + arr=val, + arr_name="rhs_vec", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + rhs_vec_arr = np.array(val) + + # ensure shape of coefficients matrix + # and rhs vec match + if hasattr(self, "_coefficients_mat"): + if len(val) != self.coefficients_mat.shape[0]: + raise ValueError( + "PolyhedralSet attribute 'rhs_vec' " + f"must have {self.coefficients_mat.shape[0]} entries " + f"to match shape of attribute 'coefficients_mat' " + f"(provided {rhs_vec_arr.size} entries)" + ) + + self._rhs_vec = rhs_vec_arr @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the polyhedral set. """ return len(self.coefficients_mat[0]) @property def geometry(self): + """ + Geometry of the polyhedral set. + See the `Geometry` class documentation. + """ return Geometry.LINEAR @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. - PolyhedralSet bounds are not computed at set construction because they cannot be algebraically determined - and require access to an optimization solver. + Bounds in each dimension of the polyhedral set. + + Currently, an empty `list` is returned, as the bounds cannot, in + general, be computed without access to an optimization solver. """ - # For the PolyhedralSet, these are numerically determined - # in the algorithm therefore they cannot presently be determined at construction of the set. return [] def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the PolyhedralSet uncertainty set. + Construct a list of polyhedral constraints on a given sequence + of uncertain parameter objects. - Args: - uncertain_params: uncertain parameter objects for writing constraint objects + Parameters + ---------- + uncertain_params : list of Param or list of Var + Uncertain parameter objects upon which the constraints + are imposed. + **kwargs : dict, optional + Additional arguments. These arguments are currently + ignored. + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ # === Ensure valid dimensions of lhs and rhs w.r.t uncertain_params if np.asarray(self.coefficients_mat).shape[1] != len(uncertain_params): - raise AttributeError("Columns of coefficients_mat matrix " - "must equal length of uncertain parameters list.") + raise AttributeError( + "Columns of coefficients_mat matrix " + "must equal length of uncertain parameters list." + ) set_i = list(range(len(self.coefficients_mat))) @@ -467,219 +1358,648 @@ def set_as_constraint(self, uncertain_params, **kwargs): @staticmethod def add_bounds_on_uncertain_parameters(model, config): - ''' - Add bounds on uncertain parameters + """ + Specify the numerical bounds for each of a sequence of uncertain + parameters, represented by Pyomo `Var` objects, in a modeling + object. The numerical bounds are specified through the `.lb()` + and `.ub()` attributes of the `Var` objects. + + Parameters + ---------- + model : ConcreteModel + Model of interest (parent model of the uncertain parameter + objects for which to specify bounds). + config : ConfigDict + PyROS solver config. - Args: - model: The model to add bounds on for the uncertain parameter variable objects - config: the config object for the PyROS solver instance - ''' + Notes + ----- + This method is invoked in advance of a PyROS separation + subproblem. + """ add_bounds_for_uncertain_parameters(model=model, config=config) - return -class BudgetSet(PolyhedralSet): +class BudgetSet(UncertaintySet): """ - Budget uncertainty set + A budget set. + + Parameters + ---------- + budget_membership_mat : (L, N) array_like + Incidence matrix of the budget constraints. + Each row corresponds to a single budget constraint, + and defines which uncertain parameters + (which dimensions) participate in that row's constraint. + rhs_vec : (L,) array_like + Budget limits (upper bounds) with respect to + the origin of the set. + origin : (N,) array_like or None, optional + Origin of the budget set. If `None` is provided, then + the origin is set to the zero vector. + + Examples + -------- + 3D budget set with one budget constraint and + no origin chosen (hence origin defaults to 3D zero vector): + + >>> from pyomo.contrib.pyros import BudgetSet + >>> budget_set = BudgetSet( + ... budget_membership_mat=[[1, 1, 1]], + ... rhs_vec=[2], + ... ) + >>> budget_set.budget_membership_mat + array([[1, 1, 1]]) + >>> budget_set.budget_rhs_vec + array([2]) + >>> budget_set.origin + array([0., 0., 0.]) + + 3D budget set with two budget constraints and custom origin: + + >>> budget_custom = BudgetSet( + ... budget_membership_mat=[[1, 0, 1], [0, 1, 0]], + ... rhs_vec=[1, 1], + ... origin=[2, 2, 2], + ... ) + >>> budget_custom.budget_membership_mat + array([[1, 0, 1], + [0, 1, 0]]) + >>> budget_custom.budget_rhs_vec + array([1, 1]) + >>> budget_custom.origin + array([2, 2, 2]) """ - def __init__(self, budget_membership_mat, rhs_vec): + def __init__(self, budget_membership_mat, rhs_vec, origin=None): + """Initialize self (see class docstring).""" + self.budget_membership_mat = budget_membership_mat + self.budget_rhs_vec = rhs_vec + self.origin = np.zeros(self.dim) if origin is None else origin + + @property + def type(self): + """ + str : Brief description of the type of the uncertainty set. + """ + return "budget" + + @property + def coefficients_mat(self): """ - BudgetSet constructor + (L + N, N) numpy.ndarray : Coefficient matrix of all polyhedral + constraints defining the budget set. Composed from the incidence + matrix used for defining the budget constraints and a + coefficient matrix for individual uncertain parameter + nonnegativity constraints. - Args: - budget_membership_mat: A matrix with 0-1 entries to designate which uncertain parameters participate in each budget constraint. Here, each row is associated with a separate budget constraint. - rhs_vec: Vector (``list``) of right-hand side values for the budget constraints. + This attribute cannot be set. The budget constraint + incidence matrix may be altered through the + `budget_membership_mat` attribute. """ - # === Non-zero number of columns - mat = np.asarray(budget_membership_mat) - rhs = np.asarray(rhs_vec) + return np.append(self.budget_membership_mat, -np.identity(self.dim), axis=0) - if len(mat.shape) == 1: - cols = mat.shape - else: - cols = mat.shape[1] - if cols == 0: - raise AttributeError("Budget membership matrix must have non-zero number of columns.") - # === Assert is valid matrix (same number of columns across all rows - if not all(len(row) == cols for row in budget_membership_mat): - raise AttributeError("Budget membership matrix must be a valid matrix, " - "e.g. same number of column entries across rows.") - # === Matrix dimension compatibility - if mat.shape[0] != rhs.shape[0] : - raise AttributeError("Rows of lhs_coefficients_mat matrix must equal rows of rhs_vec lists.") - # === Ensure a 0-1 matrix - if any(not np.isclose(elem, 0) and not np.isclose(elem, 1) for row in budget_membership_mat for elem in row): - raise AttributeError("Budget membership matrix must be a matrix of 0's and 1's.") - # === No all zero rows - if all(elem == 0 for row in budget_membership_mat for elem in row): - raise AttributeError("All zero rows are not permitted in the budget membership matrix.") - - # === Ensure 0 <= rhs_i for all i - if any(rhs_vec[i] < 0 for i in range(len(rhs_vec))): - raise AttributeError("RHS vector entries must be >= 0.") - # === Non-emptiness is implied by the set - - # === Add constraints such that uncertain params are >= 0 - # === This adds >=0 bound on all parameters in the set - cols = mat.shape[1] - identity = np.identity(cols) * -1 - for row in identity: - budget_membership_mat.append(row.tolist()) - - for i in range(identity.shape[0]): - rhs_vec.append(0) - - self.coefficients_mat = budget_membership_mat - self.rhs_vec = rhs_vec + @property + def rhs_vec(self): + """ + (L + N,) numpy.ndarray : Right-hand side vector for polyhedral + constraints defining the budget set. This also includes entries + for nonnegativity constraints on the uncertain parameters. + + This attribute cannot be set, and is automatically determined + given other attributes. + """ + return np.append( + self.budget_rhs_vec + self.budget_membership_mat @ self.origin, -self.origin + ) + + @property + def budget_membership_mat(self): + """ + (L, N) numpy.ndarray : Incidence matrix of the budget + constraints. Each row corresponds to a single budget + constraint and defines which uncertain parameters + participate in that row's constraint. + """ + return self._budget_membership_mat + + @budget_membership_mat.setter + def budget_membership_mat(self, val): + validate_array( + arr=val, + arr_name="budget_membership_mat", + dim=2, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + lhs_coeffs_arr = np.array(val) + + # check dimension match + if hasattr(self, "_budget_membership_mat"): + if lhs_coeffs_arr.shape[1] != self.dim: + raise ValueError( + f"BudgetSet attribute 'budget_membership_mat' " + "must have " + f"{self.dim} columns to match set dimension " + f"(provided matrix with {lhs_coeffs_arr.shape[1]} columns)" + ) + + # check shape match with rhs vector + if hasattr(self, "_budget_rhs_vec"): + if lhs_coeffs_arr.shape[0] != self.budget_rhs_vec.size: + raise ValueError( + "BudgetSet attribute 'budget_membership_mat' " + f"must have {self.budget_rhs_vec.size} rows " + f"to match shape of attribute 'budget_rhs_vec' " + f"(provided {lhs_coeffs_arr.shape[0]} rows)" + ) + + # ensure all entries are 0-1 values + uniq_entries = np.unique(lhs_coeffs_arr) + non_bool_entries = uniq_entries[(uniq_entries != 0) & (uniq_entries != 1)] + if non_bool_entries.size > 0: + raise ValueError( + "Attempting to set attribute `budget_membership_mat` to value " + "containing entries that are not 0-1 values " + f"(example: {non_bool_entries[0]}). " + "Ensure all entries are of value 0 or 1" + ) - self.type = "budget" + # check no row is all zeros + rows_with_zero_sums = np.nonzero(lhs_coeffs_arr.sum(axis=1) == 0)[0] + if rows_with_zero_sums.size > 0: + row_str = ", ".join(str(val) for val in rows_with_zero_sums) + raise ValueError( + "Attempting to set attribute `budget_membership_mat` to value " + f"with all entries zero in rows at indexes: {row_str}. " + "Ensure each row and column has at least one nonzero entry" + ) + + # check no column is all zeros + cols_with_zero_sums = np.nonzero(lhs_coeffs_arr.sum(axis=0) == 0)[0] + if cols_with_zero_sums.size > 0: + col_str = ", ".join(str(val) for val in cols_with_zero_sums) + raise ValueError( + "Attempting to set attribute `budget_membership_mat` to value " + f"with all entries zero in columns at indexes: {col_str}. " + "Ensure each row and column has at least one nonzero entry" + ) + + # matrix is valid; update + self._budget_membership_mat = lhs_coeffs_arr + + @property + def budget_rhs_vec(self): + """ + (L,) numpy.ndarray : Budget limits (upper bounds) + with respect to the origin. + """ + return self._budget_rhs_vec + + @budget_rhs_vec.setter + def budget_rhs_vec(self, val): + validate_array( + arr=val, + arr_name="budget_rhs_vec", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + rhs_vec_arr = np.array(val) + + # ensure shape of coefficients matrix + # and rhs vec match + if hasattr(self, "_budget_membership_mat"): + if len(val) != self.budget_membership_mat.shape[0]: + raise ValueError( + "Budget set attribute 'budget_rhs_vec' " + f"must have {self.budget_membership_mat.shape[0]} entries " + f"to match shape of attribute 'budget_membership_mat' " + f"(provided {rhs_vec_arr.size} entries)" + ) + + # ensure all entries are nonnegative + for entry in rhs_vec_arr: + if entry < 0: + raise ValueError( + f"Entry {entry} of attribute 'budget_rhs_vec' is " + "negative. Ensure all entries are nonnegative" + ) + + self._budget_rhs_vec = rhs_vec_arr + + @property + def origin(self): + """ + (N,) numpy.ndarray : Origin of the budget set. + """ + return self._origin + + @origin.setter + def origin(self, val): + validate_array( + arr=val, + arr_name="origin", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + origin_arr = np.array(val) + + # ensure shape of coefficients matrix + # and rhs vec match + if len(val) != self.dim: + raise ValueError( + "Budget set attribute 'origin' " + f"must have {self.dim} entries " + f"to match set dimension " + f"(provided {origin_arr.size} entries)" + ) + + self._origin = origin_arr @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the budget set. """ - return np.asarray(self.coefficients_mat).shape[1] + return self.budget_membership_mat.shape[1] @property def geometry(self): + """ + Geometry of the budget set. + See the `Geometry` class documentation. + """ return Geometry.LINEAR @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. + Bounds in each dimension of the budget set. + + Returns + ------- + : list of tuples + List, length `N`, of 2-tuples. Each tuple + specifies the bounds in its corresponding + dimension. """ - membership_mat = np.asarray(self.coefficients_mat) - rhs_vec = self.rhs_vec - parameter_bounds = [] - for i in range(membership_mat.shape[1]): - col = column(membership_mat, i) - ub = min(list(col[j] * rhs_vec[j] for j in range(len(rhs_vec)))) - lb = 0 - parameter_bounds.append((lb, ub)) - return parameter_bounds + bounds = [] + for orig_val, col in zip(self.origin, self.budget_membership_mat.T): + lb = orig_val + ub = orig_val + np.min(self.budget_rhs_vec[col == 1]) + bounds.append((lb, ub)) + + return bounds def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the BudgetSet uncertainty set. + Construct a list of the constraints defining the budget + set on a given sequence of uncertain parameter objects. - Args: - uncertain_params: uncertain parameter objects for writing constraint objects + Parameters + ---------- + uncertain_params : list of Param or list of Var + Uncertain parameter objects upon which the constraints + are imposed. + **kwargs : dict, optional + Additional arguments. These arguments are currently + ignored. + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ - # === Ensure matrix cols == len uncertain params - if np.asarray(self.coefficients_mat).shape[1] != len(uncertain_params): - raise AttributeError("Budget membership matrix must have compatible " - "dimensions with uncertain parameters vector.") + if self.dim != len(uncertain_params): + raise ValueError( + f"Argument 'uncertain_params' must contain {self.dim}" + "Param objects to match BudgetSet dimension" + f"(provided {len(uncertain_params)} objects)" + ) - conlist = PolyhedralSet.set_as_constraint(self, uncertain_params) - return conlist + return PolyhedralSet.set_as_constraint(self, uncertain_params) @staticmethod def add_bounds_on_uncertain_parameters(model, config): - # In this case, we use the UncertaintySet class method because we have numerical parameter_bounds + """ + Specify the numerical bounds for each of a sequence of uncertain + parameters, represented by Pyomo `Var` objects, in a modeling + object. The numerical bounds are specified through the `.lb()` + and `.ub()` attributes of the `Var` objects. + + Parameters + ---------- + model : ConcreteModel + Model of interest (parent model of the uncertain parameter + objects for which to specify bounds). + config : ConfigDict + PyROS solver config. + + Notes + ----- + This method is invoked in advance of a PyROS separation + subproblem. + """ + # In this case, we use the UncertaintySet class method + # because we have numerical parameter_bounds UncertaintySet.add_bounds_on_uncertain_parameters(model=model, config=config) + class FactorModelSet(UncertaintySet): """ - Factor model (a.k.a. "net-alpha" model) uncertainty set + A factor model (a.k.a. "net-alpha" model) set. + + Parameters + ---------- + origin : (N,) array_like + Uncertain parameter values around which deviations are + restrained. + number_of_factors : int + Natural number representing the dimensionality of the + space to which the set projects. + psi_mat : (N, F) array_like + Matrix designating each uncertain parameter's contribution to + each factor. Each row is associated with a separate uncertain + parameter. Each column is associated with a separate factor. + Number of columns `F` of `psi_mat` should be equal to + `number_of_factors`. + beta : numeric type + Real value between 0 and 1 specifying the fraction of the + independent factors that can simultaneously attain + their extreme values. + + Examples + -------- + A 4D factor model set with a 2D factor space: + + >>> from pyomo.contrib.pyros import FactorModelSet + >>> import numpy as np + >>> fset = FactorModelSet( + ... origin=np.zeros(4), + ... number_of_factors=2, + ... psi_mat=np.full(shape=(4, 2), fill_value=0.1), + ... beta=0.5, + ... ) + >>> fset.origin + array([0., 0., 0., 0.]) + >>> fset.number_of_factors + 2 + >>> fset.psi_mat + array([[0.1, 0.1], + [0.1, 0.1], + [0.1, 0.1], + [0.1, 0.1]]) + >>> fset.beta + 0.5 """ def __init__(self, origin, number_of_factors, psi_mat, beta): - """ - FactorModelSet constructor - - Args: - origin: Vector (``list``) of uncertain parameter values around which deviations are restrained. - number_of_factors: Natural number representing the dimensionality of the space to which the set projects. - psi: Matrix with non-negative entries designating each uncertain parameter's contribution to each factor. Here, each row is associated with a separate uncertain parameter and each column with a separate factor. - beta: Number in [0,1] representing the fraction of the independent factors that can simultaneously attain their extreme values. Setting 'beta = 0' will enforce that as many factors will be above 0 as there will be below 0 (i.e., "zero-net-alpha" model). Setting 'beta = 1' produces the hyper-rectangle [origin - psi e, origin + psi e], where 'e' is the vector of ones. - """ - mat = np.asarray(psi_mat) - # === Numeric valued arrays - if not all(isinstance(elem, (int, float)) for elem in origin): - raise AttributeError("All elements of origin vector must be numeric.") - if not all(isinstance(elem, (int, float)) for row in psi_mat for elem in row): - raise AttributeError("All elements of psi_mat vector must be numeric.") - if not isinstance(beta, (int, float)): - raise AttributeError("Beta parameter must be numeric.") - if not isinstance(number_of_factors, (int)): - raise AttributeError("number_of_factors must be integer.") - # === Ensure dimensions of psi are n x F - if mat.shape != (len(origin), number_of_factors): - raise AttributeError("Psi matrix must be of dimensions n x F where n is dim(uncertain_params)" - "and F is number_of_factors.") - # === Ensure beta in [0,1] - if beta > 1 or beta < 0: - raise AttributeError("Beta parameter must be in [0,1].") - # === No all zero columns of psi_mat - for idx in range(mat.shape[1]): - if all(np.isclose(elem, 0) for elem in mat[:,idx]): - raise AttributeError("Psi matrix cannot have all zero columns.") - # === Psi must be strictly positive entries - for idx in range(mat.shape[1]): - if any(elem < 0 for elem in mat[:,idx]): - raise AttributeError("Psi matrix cannot have any negative entries. All factors must be non-negative.") - + """Initialize self (see class docstring).""" self.origin = origin self.number_of_factors = number_of_factors - self.psi_mat = psi_mat self.beta = beta - self.type = "factor_model" + self.psi_mat = psi_mat + + @property + def type(self): + """ + str : Brief description of the type of the uncertainty set. + """ + return "factor_model" + + @property + def origin(self): + """ + (N,) numpy.ndarray : Uncertain parameter values around which + deviations are restrained. + """ + return self._origin + + @origin.setter + def origin(self, val): + validate_array( + arr=val, + arr_name="origin", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + ) + + # dimension of the set is immutable + val_arr = np.array(val) + if hasattr(self, "_origin"): + if val_arr.size != self.dim: + raise ValueError( + "Attempting to set attribute 'origin' of factor model " + f"set of dimension {self.dim} " + f"to value of dimension {val_arr.size}" + ) + + self._origin = val_arr + + @property + def number_of_factors(self): + """ + int : Natural number representing the dimensionality `F` + of the space to which the set projects. + + This attribute is immutable, and may only be set at + object construction. Typically, the number of factors + is significantly less than the set dimension, but no + restriction to that end is imposed here. + """ + return self._number_of_factors + + @number_of_factors.setter + def number_of_factors(self, val): + if hasattr(self, "_number_of_factors"): + raise AttributeError("Attribute 'number_of_factors' is immutable") + else: + # validate type and value + validate_arg_type("number_of_factors", val, Integral) + if val < 1: + raise ValueError( + "Attribute 'number_of_factors' must be a positive int " + f"(provided value {val})" + ) + self._number_of_factors = val + + @property + def psi_mat(self): + """ + (N, F) numpy.ndarray : Matrix designating each + uncertain parameter's contribution to each factor. Each row is + associated with a separate uncertain parameter. Each column with + a separate factor. + """ + return self._psi_mat + + @psi_mat.setter + def psi_mat(self, val): + validate_array( + arr=val, + arr_name="psi_mat", + dim=2, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + psi_mat_arr = np.array(val) + + # validate shape (check it matches set dimensions) + # origin and number of factors already set + if psi_mat_arr.shape != (self.dim, self.number_of_factors): + raise ValueError( + "Psi matrix for factor model set " + f"should be of shape {self.dim, self.number_of_factors} " + f"to match the set and factor model space dimensions " + f"(provided shape {psi_mat_arr.shape})" + ) + + # check values acceptable + for column in psi_mat_arr.T: + if np.allclose(column, 0): + raise ValueError( + "Each column of attribute 'psi_mat' should have at least " + "one nonzero entry" + ) + + self._psi_mat = psi_mat_arr + + @property + def beta(self): + """ + numeric type : Real number ranging from 0 to 1 representing the + fraction of the independent factors that can simultaneously + attain their extreme values. + + Note that, mathematically, setting ``beta = 0`` will enforce + that as many factors will be above 0 as there will be below 0 + (i.e., "zero-net-alpha" model). If ``beta = 1``, + then the set is numerically equivalent to a `BoxSet` with bounds + ``[origin - psi @ np.ones(F), origin + psi @ np.ones(F)].T``. + """ + return self._beta + + @beta.setter + def beta(self, val): + if val > 1 or val < 0: + raise ValueError( + "Beta parameter must be a real number between 0 " + f"and 1 inclusive (provided value {val})" + ) + + self._beta = val @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the factor model set. """ return len(self.origin) @property def geometry(self): + """ + Geometry of the factor model set. + See the `Geometry` class documentation. + """ return Geometry.LINEAR @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. + Bounds in each dimension of the factor model set. + + Returns + ------- + : list of tuples + List, length `N`, of 2-tuples. Each tuple + specifies the bounds in its corresponding + dimension. """ - nom_val = self.origin + F = self.number_of_factors psi_mat = self.psi_mat - F = self.number_of_factors - beta_F = self.beta * F - floor_beta_F = math.floor(beta_F) + # evaluate some important quantities + beta_F = self.beta * self.number_of_factors + crit_pt_type = int((beta_F + F) / 2) + beta_F_fill_in = (beta_F + F) - 2 * crit_pt_type - 1 + + # argsort rows of psi_mat in descending order + row_wise_args = np.argsort(-psi_mat, axis=1) + parameter_bounds = [] - for i in range(len(nom_val)): - non_decreasing_factor_row = sorted(psi_mat[i], reverse=True) - # deviation = sum_j=1^floor(beta F) {psi_if_j} + (beta F - floor(beta F)) psi_{if_{betaF +1}} - # because indexing starts at 0, we adjust the limit on the sum and the final factor contribution - if beta_F - floor_beta_F == 0: - deviation = sum(non_decreasing_factor_row[j] for j in range(floor_beta_F - 1)) + for idx, orig_val in enumerate(self.origin): + # number nonnegative values in row + M = len(psi_mat[idx][psi_mat[idx] >= 0]) + + # argsort psi matrix row in descending order + sorted_psi_row_args = row_wise_args[idx] + sorted_psi_row = psi_mat[idx, sorted_psi_row_args] + + # now evaluate max deviation from origin + # (depends on number nonneg entries and critical point type) + if M > crit_pt_type: + max_deviation = ( + sorted_psi_row[:crit_pt_type].sum() + + beta_F_fill_in * sorted_psi_row[crit_pt_type] + - sorted_psi_row[crit_pt_type + 1 :].sum() + ) + elif M < F - crit_pt_type: + max_deviation = ( + sorted_psi_row[: F - crit_pt_type - 1].sum() + - beta_F_fill_in * sorted_psi_row[F - crit_pt_type - 1] + - sorted_psi_row[F - crit_pt_type :].sum() + ) else: - deviation = sum(non_decreasing_factor_row[j] for j in range(floor_beta_F - 1)) + ( - beta_F - floor_beta_F) * psi_mat[i][floor_beta_F] - lb = nom_val[i] - deviation - ub = nom_val[i] + deviation - if lb > ub: - raise AttributeError("The computed lower bound on uncertain parameters must be less than or equal to the upper bound.") - parameter_bounds.append((lb, ub)) + max_deviation = sorted_psi_row[:M].sum() - sorted_psi_row[M:].sum() + + # finally, evaluate the bounds for this dimension + parameter_bounds.append( + (orig_val - max_deviation, orig_val + max_deviation) + ) + return parameter_bounds def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the FactorModelSet uncertainty set. + Construct a list of factor model constraints on a given sequence + of uncertain parameter objects. - Args: - uncertain_params: uncertain parameter objects for writing constraint objects + Parameters + ---------- + uncertain_params : list of Param or list of Var + Uncertain parameter objects upon which the constraints + are imposed. + **kwargs : dict + Additional arguments. This dictionary should consist + of a `model` entry, which maps to a `ConcreteModel` + object representing the model of interest (parent model + of the uncertain parameter objects). + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ model = kwargs['model'] # === Ensure dimensions if len(uncertain_params) != len(self.origin): - raise AttributeError("Dimensions of origin and uncertain_param lists must be equal.") + raise AttributeError( + "Dimensions of origin and uncertain_param lists must be equal." + ) # Make F-dim cassi variable n = list(range(self.number_of_factors)) @@ -688,92 +2008,213 @@ def set_as_constraint(self, uncertain_params, **kwargs): conlist = ConstraintList() conlist.construct() - disturbances = [sum(self.psi_mat[i][j] * model.util.cassi[j] for j in n) - for i in range(len(uncertain_params))] + disturbances = [ + sum(self.psi_mat[i][j] * model.util.cassi[j] for j in n) + for i in range(len(uncertain_params)) + ] # Make n equality constraints for i in range(len(uncertain_params)): conlist.add(self.origin[i] + disturbances[i] == uncertain_params[i]) - conlist.add(sum(model.util.cassi[i] for i in n) <= +self.beta * self.number_of_factors) - conlist.add(sum(model.util.cassi[i] for i in n) >= -self.beta * self.number_of_factors) + conlist.add( + sum(model.util.cassi[i] for i in n) <= +self.beta * self.number_of_factors + ) + conlist.add( + sum(model.util.cassi[i] for i in n) >= -self.beta * self.number_of_factors + ) return conlist - def point_in_set(self, point): """ - Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False. + Determine whether a given point lies in the factor model set. - Args: - point: the point being checked for membership in the set + Parameters + ---------- + point : (N,) array-like + Point (parameter value) of interest. + + Returns + ------- + : bool + True if the point lies in the set, False otherwise. """ inv_psi = np.linalg.pinv(self.psi_mat) diff = np.asarray(list(point[i] - self.origin[i] for i in range(len(point)))) cassis = np.dot(inv_psi, np.transpose(diff)) - if abs(sum(cassi for cassi in cassis)) <= self.beta * self.number_of_factors and \ - all(cassi >= -1 and cassi <= 1 for cassi in cassis): + if abs( + sum(cassi for cassi in cassis) + ) <= self.beta * self.number_of_factors and all( + cassi >= -1 and cassi <= 1 for cassi in cassis + ): return True else: return False class AxisAlignedEllipsoidalSet(UncertaintySet): - ''' - Axis-aligned ellipsoidal uncertainty set - ''' + """ + An axis-aligned ellipsoid. + + Parameters + ---------- + center : (N,) array_like + Center of the ellipsoid. + half_lengths : (N,) array_like + Semi-axis lengths of the ellipsoid. + + Examples + -------- + 3D origin-centered unit hypersphere: + + >>> from pyomo.contrib.pyros import AxisAlignedEllipsoidalSet + >>> sphere = AxisAlignedEllipsoidalSet( + ... center=[0, 0, 0], + ... half_lengths=[1, 1, 1] + ... ) + >>> sphere.center + array([0, 0, 0]) + >>> sphere.half_lengths + array([1, 1, 1]) + + """ + def __init__(self, center, half_lengths): + """Initialize self (see class docstring).""" + self.center = center + self.half_lengths = half_lengths + + @property + def type(self): + """ + str : Brief description of the type of the uncertainty set. """ - AxisAlignedEllipsoidalSet constructor + return "ellipsoidal" - Args: - center: Vector (``list``) of uncertain parameter values around which deviations are restrained. - half_lengths: Vector (``list``) of half-length values representing the maximal deviations for each uncertain parameter. + @property + def center(self): """ - # === Valid data in lists - if not all(isinstance(elem, (int, float)) for elem in half_lengths): - raise AttributeError("Vector of half-lengths must be real-valued and numeric.") - if not all(isinstance(elem, (int, float)) for elem in center): - raise AttributeError("Vector center must be real-valued and numeric.") - if any(elem < 0 for elem in half_lengths): - raise AttributeError("Half length values must be nonnegative.") - # === Valid variance dimensions - if not len(center) == len(half_lengths): - raise AttributeError("Half lengths and center of ellipsoid must have same dimensions.") + (N,) numpy.ndarray : Center of the ellipsoid. + """ + return self._center + + @center.setter + def center(self, val): + validate_array( + arr=val, + arr_name="center", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + val_arr = np.array(val) - self.center=center - self.half_lengths=half_lengths - self.type="ellipsoidal" + # dimension of the set is immutable + if hasattr(self, "_center"): + if val_arr.size != self.dim: + raise ValueError( + "Attempting to set attribute 'center' of " + f"AxisAlignedEllipsoidalSet of dimension {self.dim} " + f"to value of dimension {val_arr.size}" + ) + + self._center = val_arr + + @property + def half_lengths(self): + """ + (N,) numpy.ndarray : Semi-axis lengths. + """ + return self._half_lengths + + @half_lengths.setter + def half_lengths(self, val): + validate_array( + arr=val, + arr_name="half_lengths", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + val_arr = np.array(val) + + # dimension of the set is immutable + if hasattr(self, "_center"): + if val_arr.size != self.dim: + raise ValueError( + "Attempting to set attribute 'half_lengths' of " + f"AxisAlignedEllipsoidalSet of dimension {self.dim} " + f"to value of dimension {val_arr.size}" + ) + + # ensure half-lengths are non-negative + for half_len in val_arr: + if half_len < 0: + raise ValueError( + f"Entry {half_len} of 'half_lengths' " + "is negative. All half-lengths must be nonnegative" + ) + + self._half_lengths = val_arr @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the axis-aligned ellipsoidal set. """ return len(self.center) @property def geometry(self): + """ + Geometry of the axis-aligned ellipsoidal set. + See the `Geometry` class documentation. + """ return Geometry.CONVEX_NONLINEAR @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. + Bounds in each dimension of the axis-aligned ellipsoidal set. + + Returns + ------- + : list of tuples + List, length `N`, of 2-tuples. Each tuple + specifies the bounds in its corresponding + dimension. """ nom_value = self.center - half_length =self.half_lengths - parameter_bounds = [(nom_value[i] - half_length[i], nom_value[i] + half_length[i]) for i in range(len(nom_value))] + half_length = self.half_lengths + parameter_bounds = [ + (nom_value[i] - half_length[i], nom_value[i] + half_length[i]) + for i in range(len(nom_value)) + ] return parameter_bounds def set_as_constraint(self, uncertain_params, model=None, config=None): """ - Generate constraint(s) for the `AxisAlignedEllipsoidSet` - class. + Construct a list of ellipsoidal constraints on a given sequence + of uncertain parameter objects. - Args: - uncertain_params: uncertain parameter objects for writing - constraint objects. Indexed parameters are accepted, and + Parameters + ---------- + uncertain_params : {IndexedParam, IndexedVar, list of Param/Var} + Uncertain parameter objects upon which the constraints + are imposed. Indexed parameters are accepted, and are unpacked for constraint generation. + **kwargs : dict, optional + Additional arguments. These arguments are currently + ignored. + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ all_params = list() @@ -816,105 +2257,269 @@ def set_as_constraint(self, uncertain_params, model=None, config=None): class EllipsoidalSet(UncertaintySet): """ - Ellipsoidal uncertainty set + A general ellipsoid. + + Parameters + ---------- + center : (N,) array-like + Center of the ellipsoid. + shape_matrix : (N, N) array-like + A positive definite matrix characterizing the shape + and orientation of the ellipsoid. + scale : numeric type, optional + Square of the factor by which to scale the semi-axes + of the ellipsoid (i.e. the eigenvectors of the shape + matrix). The default is `1`. + + Examples + -------- + 3D origin-centered unit hypersphere: + + >>> from pyomo.contrib.pyros import EllipsoidalSet + >>> import numpy as np + >>> hypersphere = EllipsoidalSet( + ... center=[0, 0, 0], + ... shape_matrix=np.eye(3), + ... scale=1, + ... ) + >>> hypersphere.center + array([0, 0, 0]) + >>> hypersphere.shape_matrix + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) + >>> hypersphere.scale + 1 + + A 2D ellipsoid with custom rotation and scaling: + + >>> rotated_ellipsoid = EllipsoidalSet( + ... center=[1, 1], + ... shape_matrix=[[4, 2], [2, 4]], + ... scale=0.5, + ... ) + >>> rotated_ellipsoid.center + array([1, 1]) + >>> rotated_ellipsoid.shape_matrix + array([[4, 2], + [2, 4]]) + >>> rotated_ellipsoid.scale + 0.5 + """ def __init__(self, center, shape_matrix, scale=1): + """Initialize self (see class docstring).""" + self.center = center + self.shape_matrix = shape_matrix + self.scale = scale + + @property + def type(self): + """ + str : Brief description of the type of the uncertainty set. """ - EllipsoidalSet constructor. + return "ellipsoidal" + + @property + def center(self): + """ + (N,) numpy.ndarray : Center of the ellipsoid. + """ + return self._center + + @center.setter + def center(self, val): + validate_array( + arr=val, + arr_name="center", + dim=1, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + val_arr = np.array(val) + + # dimension of the set is immutable + if hasattr(self, "_center"): + if val_arr.size != self.dim: + raise ValueError( + "Attempting to set attribute 'center' of " + f"AxisAlignedEllipsoidalSet of dimension {self.dim} " + f"to value of dimension {val_arr.size}" + ) + + self._center = val_arr + + @staticmethod + def _verify_positive_definite(matrix): + """ + Verify that a given symmetric square matrix is positive + definite. An exception is raised if the square matrix + is not positive definite. Parameters ---------- - center : (N,) array-like - Center of the ellipsoid. - shape_matrix : (N, N) array-like - A positive definite matrix characterizing the shape - and orientation of the ellipsoid. - scale : float - Square of the factor by which to scale the semi-axes - of the ellipsoid (i.e. the eigenvectors of the covariance - matrix). - """ - - # === Valid data in lists/matrixes - if not all(isinstance(elem, (int, float)) for row in shape_matrix for elem in row): - raise AttributeError("Matrix shape_matrix must be real-valued and numeric.") - if not all(isinstance(elem, (int, float)) for elem in center): - raise AttributeError("Vector center must be real-valued and numeric.") - if not isinstance(scale, (int, float)): - raise AttributeError("Ellipse scale must be a real-valued numeric.") - # === Valid matrix dimensions - num_cols = len(shape_matrix[0]) - if not all(len(row) == num_cols for row in shape_matrix): - raise AttributeError("Shape matrix must have valid matrix dimensions.") - # === Ensure shape_matrix is a square matrix - array_shape_mat = np.asarray(shape_matrix) - if array_shape_mat.shape[0] != array_shape_mat.shape[1]: - raise AttributeError("Shape matrix must be square.") - # === Ensure dimensions of shape_matrix are same as dimensions of uncertain_params - if array_shape_mat.shape[1] != len(center): - raise AttributeError("Shape matrix must be " - "same dimensions as vector of uncertain parameters.") - # === Symmetric shape_matrix - if not np.all(np.abs(array_shape_mat-array_shape_mat.T) < 1e-8): - raise AttributeError("Shape matrix must be symmetric.") - # === Ensure scale is non-negative - if scale < 0: - raise AttributeError("Scale of ellipse (rhs) must be non-negative.") - # === Check if shape matrix is invertible - try: - np.linalg.inv(shape_matrix) - except np.linalg.LinAlgError as err: - raise("Error with shape matrix supplied to EllipsoidalSet object being singular. %s" % err) - # === Check is shape matrix is positive semidefinite - if not all(np.linalg.eigvals(shape_matrix) >= 0): - raise("Non positive-semidefinite shape matrix.") - # === Ensure matrix is not degenerate, for determining inferred bounds - try: - for i in range(len(shape_matrix)): - np.power(shape_matrix[i][i], 0.5) - except: - raise AttributeError("Shape matrix must be non-degenerate.") + matrix : (N, N) array_like + Candidate matrix. + + Raises + ------ + ValueError + If matrix is not symmetric, not positive definite, + or the square roots of the diagonal entries are + not accessible. + LinAlgError + If matrix is not invertible. + """ + matrix = np.array(matrix) + + if not np.allclose(matrix, matrix.T, atol=1e-8): + raise ValueError("Shape matrix must be symmetric.") + + # Numpy raises LinAlgError if not invertible + np.linalg.inv(matrix) + + # check positive semi-definite. + # since also invertible, means positive definite + eigvals = np.linalg.eigvals(matrix) + if np.min(eigvals) < 0: + raise ValueError( + "Non positive-definite shape matrix " + f"(detected eigenvalues {eigvals})" + ) - self.center = center - self.shape_matrix = shape_matrix - self.scale = scale - self.type = "ellipsoidal" + # check roots of diagonal entries accessible + # (should theoretically be true if positive definite) + for diag_entry in np.diagonal(matrix): + if np.isnan(np.power(diag_entry, 0.5)): + raise ValueError( + "Cannot evaluate square root of the diagonal entry " + f"{diag_entry} of argument `shape_matrix`. " + "Check that this entry is nonnegative" + ) + + @property + def shape_matrix(self): + """ + (N, N) numpy.ndarray : A positive definite matrix characterizing + the shape and orientation of the ellipsoid. + """ + return self._shape_matrix + + @shape_matrix.setter + def shape_matrix(self, val): + validate_array( + arr=val, + arr_name="shape_matrix", + dim=2, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + shape_mat_arr = np.array(val) + + # check matrix shape matches set dimension + if hasattr(self, "_center"): + if not all(size == self.dim for size in shape_mat_arr.shape): + raise ValueError( + f"EllipsoidalSet attribute 'shape_matrix' " + f"must be a square matrix of size " + f"{self.dim} to match set dimension " + f"(provided matrix with shape {shape_mat_arr.shape})" + ) + + self._verify_positive_definite(shape_mat_arr) + self._shape_matrix = shape_mat_arr + + @property + def scale(self): + """ + numeric type : Square of the factor by which to scale the + semi-axes of the ellipsoid (i.e. the eigenvectors of the shape + matrix). + """ + return self._scale + + @scale.setter + def scale(self, val): + validate_arg_type("scale", val, valid_num_types, "a valid numeric type", False) + if val < 0: + raise ValueError( + "EllipsoidalSet attribute " + f"'scale' must be a non-negative real " + f"(provided value {val})" + ) + + self._scale = val @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the ellipsoidal set. """ return len(self.center) @property def geometry(self): + """ + Geometry of the ellipsoidal set. + See the `Geometry` class documentation. + """ return Geometry.CONVEX_NONLINEAR @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. + Bounds in each dimension of the ellipsoidal set. + + Returns + ------- + : list of tuples + List, length `N`, of 2-tuples. Each tuple + specifies the bounds in its corresponding + dimension. """ scale = self.scale nom_value = self.center P = self.shape_matrix - parameter_bounds = [(nom_value[i] - np.power(P[i][i] * scale, 0.5), - nom_value[i] + np.power(P[i][i] * scale, 0.5)) for i in range(self.dim)] + parameter_bounds = [ + ( + nom_value[i] - np.power(P[i][i] * scale, 0.5), + nom_value[i] + np.power(P[i][i] * scale, 0.5), + ) + for i in range(self.dim) + ] return parameter_bounds def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the EllipsoidalSet uncertainty set. + Construct a list of ellipsoidal constraints on a given sequence + of uncertain parameter objects. + + Parameters + ---------- + uncertain_params : {IndexedParam, IndexedVar, list of Param/Var} + Uncertain parameter objects upon which the constraints + are imposed. Indexed parameters are accepted, and + are unpacked for constraint generation. + **kwargs : dict, optional + Additional arguments. These arguments are currently + ignored. - Args: - uncertain_params: uncertain parameter objects for writing constraint objects + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ inv_covar = np.linalg.inv(self.shape_matrix) if len(uncertain_params) != len(self.center): - raise AttributeError("Center of ellipsoid must be same dimensions as vector of uncertain parameters.") + raise AttributeError( + "Center of ellipsoid must be same dimensions as vector of uncertain parameters." + ) # Calculate row vector of differences diff = [] @@ -927,7 +2532,10 @@ def set_as_constraint(self, uncertain_params, **kwargs): diff.append(uncertain_params[idx] - self.center[idx]) # Calculate inner product of difference vector and covar matrix - product1 = [sum([x * y for x, y in zip(diff, column(inv_covar, i))]) for i in range(len(inv_covar))] + product1 = [ + sum([x * y for x, y in zip(diff, column(inv_covar, i))]) + for i in range(len(inv_covar)) + ] constraint = sum([x * y for x, y in zip(product1, diff)]) conlist = ConstraintList() @@ -938,71 +2546,142 @@ def set_as_constraint(self, uncertain_params, **kwargs): class DiscreteScenarioSet(UncertaintySet): """ - Set of discrete scenarios (i.e., finite collection of realizations) + A discrete set of finitely many uncertain parameter realizations + (or scenarios). + + Parameters + ---------- + scenarios : (M, N) array_like + A sequence of `M` distinct uncertain parameter realizations. + + Examples + -------- + 2D set with three scenarios: + + >>> from pyomo.contrib.pyros import DiscreteScenarioSet + >>> discrete_set = DiscreteScenarioSet( + ... scenarios=[[1, 1], [2, 1], [1, 2]], + ... ) + >>> discrete_set.scenarios + [(1, 1), (2, 1), (1, 2)] + """ def __init__(self, scenarios): - """ - DiscreteScenarioSet constructor + """Initialize self (see class docstring).""" + # Standardize to list of tuples + self.scenarios = scenarios - Args: - scenarios: Vector (``list``) of discrete scenarios where each scenario represents a realization of the uncertain parameters. + @property + def type(self): """ + str : Brief description of the type of the uncertainty set. + """ + return "discrete" - # === Non-empty - if len(scenarios) == 0: - raise AttributeError("Scenarios list must be non-empty.") - # === Each scenario must be of real numbers - if not all(isinstance(elem, (int, float)) for d in scenarios for elem in d): - raise AttributeError("Each scenario must consist of real-number values for each parameter.") - # === Confirm all scenarios are of same dimensionality - dim = len(scenarios[0]) - if not all(len(d)==dim for d in scenarios): - raise AttributeError("All points in list of scenarios must be same dimension.") - - # Standardize to list of tuples - self.scenarios = list(tuple(s) for s in scenarios) # set of discrete points which are distinct realizations of uncertain params - self.type = "discrete" + @property + def scenarios(self): + """ + list of tuples : Uncertain parameter realizations comprising the + set. Each tuple is an uncertain parameter realization. + + Note that the `scenarios` attribute may be modified, but + only such that the dimension of the set remains unchanged. + """ + return self._scenarios + + @scenarios.setter + def scenarios(self, val): + validate_array( + arr=val, + arr_name="scenarios", + dim=2, + valid_types=valid_num_types, + valid_type_desc="a valid numeric type", + required_shape=None, + ) + + scenario_arr = np.array(val) + if hasattr(self, "_scenarios"): + if scenario_arr.shape[1] != self.dim: + raise ValueError( + f"DiscreteScenarioSet attribute 'scenarios' must have " + f"{self.dim} columns to match set dimension " + f"(provided array-like with {scenario_arr.shape[1]} " + "columns)" + ) + + self._scenarios = [tuple(s) for s in val] @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension `N` of the discrete scenario set. """ return len(self.scenarios[0]) @property def geometry(self): + """ + Geometry of the discrete scenario set. + See the `Geometry` class documentation. + """ return Geometry.DISCRETE_SCENARIOS @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. + Bounds in each dimension of the discrete scenario set. + + Returns + ------- + : list of tuples + List, length `N`, of 2-tuples. Each tuple + specifies the bounds in its corresponding + dimension. """ - parameter_bounds = [(min(s[i] for s in self.scenarios), - max(s[i] for s in self.scenarios)) for i in range(self.dim)] + parameter_bounds = [ + (min(s[i] for s in self.scenarios), max(s[i] for s in self.scenarios)) + for i in range(self.dim) + ] return parameter_bounds def is_bounded(self, config): - ''' - DiscreteScenarios is bounded by default due to finiteness of the set. - :param config: - :return: True - ''' + """ + Return True if the uncertainty set is bounded, and False + otherwise. + + By default, the discrete scenario set is bounded, + as the entries of all uncertain parameter scenarios + are finite. + """ return True def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the EllipsoidalSet uncertainty set. + Construct a list of constraints on a given sequence + of uncertain parameter objects. - Args: - uncertain_params: uncertain parameter objects for writing constraint objects + Parameters + ---------- + uncertain_params : list of Param or list of Var + Uncertain parameter objects upon which the constraints + are imposed. + **kwargs : dict, optional + Additional arguments. These arguments are currently + ignored. + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. """ # === Ensure point is of correct dimensionality as the uncertain parameters dim = len(uncertain_params) if any(len(d) != dim for d in self.scenarios): - raise AttributeError("All scenarios must have same dimensions as uncertain parameters.") + raise AttributeError( + "All scenarios must have same dimensions as uncertain parameters." + ) conlist = ConstraintList() conlist.construct() @@ -1016,74 +2695,144 @@ def set_as_constraint(self, uncertain_params, **kwargs): def point_in_set(self, point): """ - Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False. + Determine whether a given point lies in the discrete + scenario set. + + Parameters + ---------- + point : (N,) array-like + Point (parameter value) of interest. - Args: - point: the point being checked for membership in the set + Returns + ------- + : bool + True if the point lies in the set, False otherwise. """ # Round all double precision to a tolerance num_decimals = 8 - rounded_scenarios = list(list(round(num, num_decimals) for num in d) for d in self.scenarios) + rounded_scenarios = list( + list(round(num, num_decimals) for num in d) for d in self.scenarios + ) rounded_point = list(round(num, num_decimals) for num in point) - return any(rounded_point==rounded_d for rounded_d in rounded_scenarios) + return any(rounded_point == rounded_d for rounded_d in rounded_scenarios) class IntersectionSet(UncertaintySet): """ - Set stemming from intersecting previously constructed sets of any type + An intersection of a sequence of uncertainty sets, each of which + is represented by an `UncertaintySet` object. + + Parameters + ---------- + **unc_sets : dict + PyROS `UncertaintySet` objects of which to construct + an intersection. At least two uncertainty sets must + be provided. All sets must be of the same dimension. + + Examples + -------- + Intersection of origin-centered 2D box (square) and 2D + hypersphere (circle): + + >>> from pyomo.contrib.pyros import ( + ... BoxSet, AxisAlignedEllipsoidalSet, IntersectionSet, + ... ) + >>> square = BoxSet(bounds=[[-1.5, 1.5], [-1.5, 1.5]]) + >>> circle = AxisAlignedEllipsoidalSet( + ... center=[0, 0], + ... half_lengths=[2, 2], + ... ) + >>> # to construct intersection, pass sets as keyword arguments + >>> intersection = IntersectionSet(set1=square, set2=circle) + >>> intersection.all_sets + UncertaintySetList([...]) + """ - def __init__(self, **kwargs): + def __init__(self, **unc_sets): + """Initialize self (see class docstring).""" + self.all_sets = unc_sets + + @property + def type(self): + """ + str : Brief description of the type of the uncertainty set. + """ + return "intersection" + + @property + def all_sets(self): """ - IntersectionSet constructor + UncertaintySetList : List of the uncertainty sets of which to + take the intersection. Must be of minimum length 2. - Args: - **kwargs: Keyword arguments for specifying all PyROS UncertaintySet objects to be intersected. + This attribute may be set through any iterable of + `UncertaintySet` objects, and exhibits similar behavior + to a `list`. """ - if not all(isinstance(a_set, UncertaintySet) for a_set in kwargs.values()): - raise ValueError("SetIntersection objects can only be constructed via UncertaintySet objects.") + return self._all_sets - # === dim must be defined on all UncertaintySet objects - all_sets = list(a_set for a_set in kwargs.values()) - if len(all_sets) < 2: - raise AttributeError("SetIntersection requires 2 or more UncertaintySet objects.") + @all_sets.setter + def all_sets(self, val): + if isinstance(val, dict): + the_sets = val.values() + else: + the_sets = list(val) - a_dim = all_sets[0].dim - if not all(uncertainty_set.dim == a_dim for uncertainty_set in all_sets): - raise AttributeError("Uncertainty sets being intersected must have equal dimension.") + # type validation, ensure all entries have same dimension + all_sets = UncertaintySetList(the_sets, name="all_sets", min_length=2) - self.all_sets = all_sets - self.type = "intersection" + # set dimension is immutable + if hasattr(self, "_all_sets"): + if all_sets.dim != self.dim: + raise ValueError( + "Attempting to set attribute 'all_sets' of an " + f"IntersectionSet of dimension {self.dim} to a sequence " + f"of sets of dimension {all_sets[0].dim}" + ) + + self._all_sets = all_sets @property def dim(self): """ - Dimension of the uncertainty set, i.e., number of parameters in “uncertain_params†list. + int : Dimension of the intersection set. """ return self.all_sets[0].dim @property def geometry(self): + """ + Geometry of the intersection set. + See the `Geometry` class documentation. + """ return max(self.all_sets[i].geometry.value for i in range(len(self.all_sets))) @property def parameter_bounds(self): """ - Bounds on the realizations of the uncertain parameters, as inferred from the uncertainty set. - IntersectedSet bounds are not computed at set construction because they cannot be algebraically determined - and require access to an optimization solver. + Uncertain parameter value bounds for the intersection + set. + + Currently, an empty list, as the bounds cannot, in general, + be computed without access to an optimization solver. """ - # For the IntersectedSet, these are numerically determined - # in the algorithm therefore they cannot presently be determined at construction of the set. return [] def point_in_set(self, point): """ - Calculates if supplied ``point`` is contained in the uncertainty set. Returns True or False. + Determine whether a given point lies in the intersection set. + + Parameters + ---------- + point : (N,) array-like + Point (parameter value) of interest. - Args: - point: the point being checked for membership in the set + Returns + ------- + : bool + True if the point lies in the set, False otherwise. """ if all(a_set.point_in_set(point=point) for a_set in self.all_sets): return True @@ -1092,18 +2841,29 @@ def point_in_set(self, point): def is_empty_intersection(self, uncertain_params, nlp_solver): """ - Determine if intersection is empty + Determine if intersection is empty. + + Arguments + --------- + uncertain_params : list of Param or list of Var + List of uncertain parameter objects. + nlp_solver : Pyomo SolverFactory object + NLP solver. - Args: - uncertain_params: list of uncertain parameters - nlp_solver: a Pyomo Solver object for solving NLPs + Returns + ------- + is_empty_intersection : bool + True if the intersection is certified to be empty, + and False otherwise. """ # === Non-emptiness check for the set intersection is_empty_intersection = True if any(a_set.type == "discrete" for a_set in self.all_sets): disc_sets = (a_set for a_set in self.all_sets if a_set.type == "discrete") - disc_set = min(disc_sets, key=lambda x: len(x.scenarios)) # minimum set of scenarios + disc_set = min( + disc_sets, key=lambda x: len(x.scenarios) + ) # minimum set of scenarios # === Ensure there is at least one scenario from this discrete set which is a member of all other sets for scenario in disc_set.scenarios: if all(a_set.point_in_set(point=scenario) for a_set in self.all_sets): @@ -1112,14 +2872,19 @@ def is_empty_intersection(self, uncertain_params, nlp_solver): else: # === Compile constraints and solve NLP m = ConcreteModel() - m.obj = Objective(expr=0) # dummy objective required if using baron + m.obj = Objective(expr=0) # dummy objective required if using baron m.param_vars = Var(uncertain_params.index_set()) for a_set in self.all_sets: - m.add_component(a_set.type + "_constraints", a_set.set_as_constraint(uncertain_params=m.param_vars)) + m.add_component( + a_set.type + "_constraints", + a_set.set_as_constraint(uncertain_params=m.param_vars), + ) try: res = nlp_solver.solve(m) except: - raise ValueError("Solver terminated with an error while checking set intersection non-emptiness.") + raise ValueError( + "Solver terminated with an error while checking set intersection non-emptiness." + ) if check_optimal_termination(res): is_empty_intersection = False return is_empty_intersection @@ -1128,10 +2893,19 @@ def is_empty_intersection(self, uncertain_params, nlp_solver): @staticmethod def intersect(Q1, Q2): """ - Binary function intersecting two UncertaintySet objects - Args: - Q1: uncertainty set 1 - Q2: uncertainty set 2 + Obtain the intersection of two uncertainty sets. + + Parameters + ---------- + Q1, Q2 : UncertaintySet + Operand uncertainty sets. + + Returns + ------- + : DiscreteScenarioSet or IntersectionSet + Intersection of the sets. A `DiscreteScenarioSet` is + returned if both operand sets are `DiscreteScenarioSet` + instances; otherwise, an `IntersectionSet` is returned. """ constraints = ConstraintList() constraints.construct() @@ -1152,16 +2926,43 @@ def intersect(Q1, Q2): def set_as_constraint(self, uncertain_params, **kwargs): """ - Function to generate constraints for the IntersectedSet uncertainty set. - Args: - uncertain_params: list of uncertain param objects participating in the sets to be intersected + Construct a list of constraints on a given sequence + of uncertain parameter objects. In advance of constructing + the constraints, a check is performed to determine whether + the set is empty. + + Parameters + ---------- + uncertain_params : list of Param or list of Var + Uncertain parameter objects upon which the constraints + are imposed. + **kwargs : dict + Additional arguments. Must contain a `config` entry, + which maps to a `ConfigDict` containing an entry + entitled `global_solver`. The `global_solver` + key maps to an NLP solver, purportedly with global + optimization capabilities. + + Returns + ------- + conlist : ConstraintList + The constraints on the uncertain parameters. + + Raises + ------ + AttributeError + If the intersection set is found to be empty. """ try: nlp_solver = kwargs["config"].global_solver except: - raise AttributeError("set_as_constraint for SetIntersection requires access to an NLP solver via" - "the PyROS Solver config.") - is_empty_intersection = self.is_empty_intersection(uncertain_params=uncertain_params, nlp_solver=nlp_solver) + raise AttributeError( + "set_as_constraint for SetIntersection requires access to an NLP solver via" + "the PyROS Solver config." + ) + is_empty_intersection = self.is_empty_intersection( + uncertain_params=uncertain_params, nlp_solver=nlp_solver + ) def _intersect(Q1, Q2): return self.intersect(Q1, Q2) @@ -1175,19 +2976,38 @@ def _intersect(Q1, Q2): conlist = ConstraintList() conlist.construct() for set in Qint.all_sets: - for con in list(set.set_as_constraint(uncertain_params=uncertain_params).values()): + for con in list( + set.set_as_constraint( + uncertain_params=uncertain_params + ).values() + ): conlist.add(con.expr) return conlist else: - raise AttributeError("Set intersection is empty, cannot proceed with PyROS robust optimization.") + raise AttributeError( + "Set intersection is empty, cannot proceed with PyROS robust optimization." + ) @staticmethod def add_bounds_on_uncertain_parameters(model, config): """ - Add bounds on uncertain parameters + Specify the numerical bounds for each of a sequence of uncertain + parameters, represented by Pyomo `Var` objects, in a modeling + object. The numerical bounds are specified through the `.lb()` + and `.ub()` attributes of the `Var` objects. - Args: - model: The model to add bounds on for the uncertain parameter variable objects + Parameters + ---------- + model : ConcreteModel + Model of interest (parent model of the uncertain parameter + objects for which to specify bounds). + config : ConfigDict + PyROS solver config. + + Notes + ----- + This method is invoked in advance of a PyROS separation + subproblem. """ add_bounds_for_uncertain_parameters(model=model, config=config) diff --git a/pyomo/contrib/pyros/util.py b/pyomo/contrib/pyros/util.py index 97369123301..2c1a309ced3 100644 --- a/pyomo/contrib/pyros/util.py +++ b/pyomo/contrib/pyros/util.py @@ -5,21 +5,36 @@ from enum import Enum, auto from pyomo.common.collections import ComponentSet from pyomo.common.modeling import unique_component_name -from pyomo.core.base import (Constraint, Var, ConstraintList, - Objective, minimize, Expression, - ConcreteModel, maximize, Block, Param) +from pyomo.core.base import ( + Constraint, + Var, + ConstraintList, + Objective, + minimize, + Expression, + ConcreteModel, + maximize, + Block, + Param, +) from pyomo.core.base.var import IndexedVar from pyomo.core.base.set_types import Reals from pyomo.opt import TerminationCondition as tc from pyomo.core.expr import value -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.expr.numeric_expr import NPV_MaxExpression, NPV_MinExpression from pyomo.repn.standard_repn import generate_standard_repn -from pyomo.core.expr.visitor import identify_variables, identify_mutable_parameters, replace_expressions +from pyomo.core.expr.visitor import ( + identify_variables, + identify_mutable_parameters, + replace_expressions, +) from pyomo.common.dependencies import scipy as sp from pyomo.core.expr.numvalue import native_types from pyomo.util.vars_from_expressions import get_vars_from_components from pyomo.core.expr.numeric_expr import SumExpression +from pyomo.environ import SolverFactory + import itertools as it import timeit from contextlib import contextmanager @@ -34,9 +49,12 @@ COEFF_MATCH_REL_TOL = 1e-6 COEFF_MATCH_ABS_TOL = 0 ABS_CON_CHECK_FEAS_TOL = 1e-5 +TIC_TOC_SOLVE_TIME_ATTR = "pyros_tic_toc_time" + + +'''Code borrowed from gdpopt: time_code, get_main_elapsed_time, a_logger.''' -'''Code borrowed from gdpopt: time_code, get_main_ellapsed_time, a_logger.''' @contextmanager def time_code(timing_data_obj, code_block_name, is_main_timer=False): """Starts timer at entry, stores elapsed time at exit @@ -61,10 +79,150 @@ def get_main_elapsed_time(timing_data_obj): return current_time - timing_data_obj.main_timer_start_time except AttributeError as e: if 'main_timer_start_time' in str(e): - raise AttributeError( + raise AttributeError( "You need to be in a 'time_code' context to use `get_main_elapsed_time()`." ) + +def adjust_solver_time_settings(timing_data_obj, solver, config): + """ + Adjust solver max time setting based on current PyROS elapsed + time. + + Parameters + ---------- + timing_data_obj : Bunch + PyROS timekeeper. + solver : solver type + Solver for which to adjust the max time setting. + config : ConfigDict + PyROS solver config. + + Returns + ------- + original_max_time_setting : float or None + If IPOPT or BARON is used, a float is returned. + If GAMS is used, the ``options.add_options`` attribute + of ``solver`` is returned. + Otherwise, None is returned. + custom_setting_present : bool or None + If IPOPT or BARON is used, True if the max time is + specified, False otherwise. + If GAMS is used, True if the attribute ``options.add_options`` + is not None, False otherwise. + If ``config.time_limit`` is None, then None is returned. + + Note + ---- + (1) Adjustment only supported for GAMS, BARON, and IPOPT + interfaces. This routine can be generalized to other solvers + after a generic interface to the time limit setting + is introduced. + (2) For IPOPT, and probably also BARON, the CPU time limit + rather than the wallclock time limit, is adjusted, as + no interface to wallclock limit available. + For this reason, extra 30s is added to time remaining + for subsolver time limit. + (The extra 30s is large enough to ensure solver + elapsed time is not beneath elapsed time - user time limit, + but not so large as to overshoot the user-specified time limit + by an inordinate margin.) + """ + if config.time_limit is not None: + time_remaining = config.time_limit - get_main_elapsed_time(timing_data_obj) + if isinstance(solver, type(SolverFactory("gams", solver_io="shell"))): + original_max_time_setting = solver.options["add_options"] + custom_setting_present = "add_options" in solver.options + + # adjust GAMS solver time + reslim_str = f"option reslim={max(30, 30 + time_remaining)};" + if isinstance(solver.options["add_options"], list): + solver.options["add_options"].append(reslim_str) + else: + solver.options["add_options"] = [reslim_str] + else: + # determine name of option to adjust + if isinstance(solver, SolverFactory.get_class("baron")): + options_key = "MaxTime" + elif isinstance(solver, SolverFactory.get_class("ipopt")): + options_key = "max_cpu_time" + else: + options_key = None + + if options_key is not None: + custom_setting_present = options_key in solver.options + original_max_time_setting = solver.options[options_key] + + # ensure positive value assigned to avoid application error + solver.options[options_key] = max(30, 30 + time_remaining) + else: + custom_setting_present = False + original_max_time_setting = None + config.progress_logger.warning( + "Subproblem time limit setting not adjusted for " + f"subsolver of type:\n {type(solver)}.\n" + " PyROS time limit may not be honored " + ) + + return original_max_time_setting, custom_setting_present + else: + return None, None + + +def revert_solver_max_time_adjustment( + solver, original_max_time_setting, custom_setting_present, config +): + """ + Revert solver `options` attribute to its state prior to a + time limit adjustment performed via + the routine `adjust_solver_time_settings`. + + Parameters + ---------- + solver : solver type + Solver of interest. + original_max_time_setting : float, list, or None + Original solver settings. Type depends on the + solver type. + custom_setting_present : bool or None + Was the max time, or other custom solver settings, + specified prior to the adjustment? + Can be None if ``config.time_limit`` is None. + config : ConfigDict + PyROS solver config. + """ + if config.time_limit is not None: + assert isinstance(custom_setting_present, bool) + + # determine name of option to adjust + if isinstance(solver, type(SolverFactory("gams", solver_io="shell"))): + options_key = "add_options" + elif isinstance(solver, SolverFactory.get_class("baron")): + options_key = "MaxTime" + elif isinstance(solver, SolverFactory.get_class("ipopt")): + options_key = "max_cpu_time" + else: + options_key = None + + if options_key is not None: + if custom_setting_present: + # restore original setting + solver.options[options_key] = original_max_time_setting + + # if GAMS solver used, need to remove the last entry + # of 'add_options', which contains the max time setting + # added by PyROS + if isinstance(solver, type(SolverFactory("gams", solver_io="shell"))): + solver.options[options_key].pop() + else: + # remove the max time specification introduced. + # All lines are needed here to completely remove the option + # from access through getattr and dictionary reference. + delattr(solver.options, options_key) + if options_key in solver.options.keys(): + del solver.options[options_key] + + def a_logger(str_or_logger): """Returns a logger when passed either a logger name or logger object.""" if isinstance(str_or_logger, logging.Logger): @@ -72,35 +230,46 @@ def a_logger(str_or_logger): else: return logging.getLogger(str_or_logger) + def ValidEnum(enum_class): ''' Python 3 dependent format string ''' + def fcn(obj): if obj not in enum_class: - raise ValueError("Expected an {0} object, " - "instead recieved {1}".format(enum_class.__name__, obj.__class__.__name__)) + raise ValueError( + "Expected an {0} object, " + "instead received {1}".format( + enum_class.__name__, obj.__class__.__name__ + ) + ) return obj + return fcn + class pyrosTerminationCondition(Enum): - ''' - Enum class to describe termination conditions of the grcs algorithm - robust_optimal: The grcs algorithm returned with a robust_optimal solution under normal conditions - robust_feasible: The grcs algorithm determined a proven robust feasible solution. - See documentation for the distinction between robust feasible and robust optimal. - robust_infeasible: The grcs algorithm terminated with a proof of robust infeasibility. - max_iter: The grcs algorithm could not identify a robust optimal solution within the specified max_iter. - Consider increasing the max_iter config param. - subsolver_error: There was an error in the user-specified sub-solvers used in the grcs solution procedure. Check the sub-solver log files. - time_out: The grcs algorithm could not identify a robust optimal solution within the specified time_limit. - ''' + """Enumeration of all possible PyROS termination conditions.""" + robust_feasible = 0 + """Final solution is robust feasible.""" + robust_optimal = 1 + """Final solution is robust optimal.""" + robust_infeasible = 2 + """Problem is robust infeasible.""" + max_iter = 3 + """Maximum number of GRCS iteration reached.""" + subsolver_error = 4 + """Subsolver(s) provided could not solve a subproblem to + an acceptable termination status.""" + time_out = 5 + """Maximum allowable time exceeded.""" class SeparationStrategy(Enum): @@ -144,10 +313,7 @@ def model_is_valid(model): Assess whether model is valid on basis of the number of active Objectives. A valid model must contain exactly one active Objective. """ - return ( - len(list(model.component_data_objects(Objective, active=True))) - == 1 - ) + return len(list(model.component_data_objects(Objective, active=True))) == 1 def turn_bounds_to_constraints(variable, model, config=None): @@ -176,8 +342,7 @@ def turn_bounds_to_constraints(variable, model, config=None): for arg in lb_args: if arg is not None: name = unique_component_name( - model, - variable.name + f"_lower_bound_con_{count}", + model, variable.name + f"_lower_bound_con_{count}" ) model.add_component(name, Constraint(expr=arg - variable <= 0)) count += 1 @@ -187,8 +352,7 @@ def turn_bounds_to_constraints(variable, model, config=None): for arg in ub_args: if arg is not None: name = unique_component_name( - model, - variable.name + f"_upper_bound_con_{count}", + model, variable.name + f"_upper_bound_con_{count}" ) model.add_component(name, Constraint(expr=variable - arg <= 0)) count += 1 @@ -196,22 +360,43 @@ def turn_bounds_to_constraints(variable, model, config=None): def get_time_from_solver(results): - ''' - Based on the solver used (GAMS or other pyomo solver) the time is named differently. This function gets the time - based on which sub-solver type is used. - :param results: the results returned from the solver - :return: time - ''' - if hasattr(results.solver, "name"): - if type(results.solver.name) == str: - if "GAMS" in results.solver.name: - return results.solver.user_time - else: - raise ValueError("Accessing the time for this type of solver is not supported by get_time_from_solver.") - else: - return results.solver.time - else: - return results.solver.time + """ + Obtain solver time from a Pyomo `SolverResults` object. + + Returns + ------- + : float + Solver time. May be CPU time or elapsed time, + depending on the solver. If no time attribute + is found, then `float("nan")` is returned. + + NOTE + ---- + This method attempts to access solver time through the + attributes of `results.solver` in the following order + of precedence: + + 1) Attribute with name ``pyros.util.TIC_TOC_SOLVE_TIME_ATTR``. + This attribute is an estimate of the elapsed solve time + obtained using the Pyomo `TicTocTimer` at the point the + solver from which the results object is derived was invoked. + Preferred over other time attributes, as other attributes + may be in CPUs, and for purposes of evaluating overhead + time, we require wall s. + 2) `'user_time'` if the results object was returned by a GAMS + solver, `'time'` otherwise. + """ + solver_name = getattr(results.solver, "name", None) + + # is this sufficient to confirm GAMS solver used? + from_gams = solver_name is not None and str(solver_name).startswith("GAMS ") + time_attr_name = "user_time" if from_gams else "time" + for attr_name in [TIC_TOC_SOLVE_TIME_ATTR, time_attr_name]: + solve_time = getattr(results.solver, attr_name, None) + if solve_time is not None: + break + + return float("nan") if solve_time is None else solve_time def validate_uncertainty_set(config): @@ -226,17 +411,25 @@ def validate_uncertainty_set(config): # === Non-zero number of uncertain parameters if len(uncertain_params) == 0: - raise AttributeError("Must provide uncertain params, uncertain_params list length is 0.") + raise AttributeError( + "Must provide uncertain params, uncertain_params list length is 0." + ) # === No duplicate parameters if len(uncertain_params) != len(ComponentSet(uncertain_params)): raise AttributeError("No duplicates allowed for uncertain param objects.") # === Ensure nominal point is in the set - if not config.uncertainty_set.point_in_set(point=config.nominal_uncertain_param_vals): - raise AttributeError("Nominal point for uncertain parameters must be in the uncertainty set.") + if not config.uncertainty_set.point_in_set( + point=config.nominal_uncertain_param_vals + ): + raise AttributeError( + "Nominal point for uncertain parameters must be in the uncertainty set." + ) # === Check set validity via boundedness and non-emptiness if not config.uncertainty_set.is_valid(config=config): - raise AttributeError("Invalid uncertainty set detected. Check the uncertainty set object to " - "ensure non-emptiness and boundedness.") + raise AttributeError( + "Invalid uncertainty set detected. Check the uncertainty set object to " + "ensure non-emptiness and boundedness." + ) return @@ -254,20 +447,32 @@ def add_bounds_for_uncertain_parameters(model, config): uncertain_param_bounds = [] bounding_model = ConcreteModel() bounding_model.util = Block() - bounding_model.util.uncertain_param_vars = IndexedVar(model.util.uncertain_param_vars.index_set()) + bounding_model.util.uncertain_param_vars = IndexedVar( + model.util.uncertain_param_vars.index_set() + ) for tup in model.util.uncertain_param_vars.items(): bounding_model.util.uncertain_param_vars[tup[0]].set_value( - tup[1].value, skip_validation=True) + tup[1].value, skip_validation=True + ) - bounding_model.add_component("uncertainty_set_constraint", - config.uncertainty_set.set_as_constraint( - uncertain_params=bounding_model.util.uncertain_param_vars, model=bounding_model, - config=config - )) + bounding_model.add_component( + "uncertainty_set_constraint", + config.uncertainty_set.set_as_constraint( + uncertain_params=bounding_model.util.uncertain_param_vars, + model=bounding_model, + config=config, + ), + ) - for idx, param in enumerate(list(bounding_model.util.uncertain_param_vars.values())): - bounding_model.add_component("lb_obj_" + str(idx), Objective(expr=param, sense=minimize)) - bounding_model.add_component("ub_obj_" + str(idx), Objective(expr=param, sense=maximize)) + for idx, param in enumerate( + list(bounding_model.util.uncertain_param_vars.values()) + ): + bounding_model.add_component( + "lb_obj_" + str(idx), Objective(expr=param, sense=minimize) + ) + bounding_model.add_component( + "ub_obj_" + str(idx), Objective(expr=param, sense=maximize) + ) for o in bounding_model.component_data_objects(Objective): o.deactivate() @@ -311,8 +516,9 @@ def transform_to_standard_form(model): # Note: because we will be adding / modifying the number of # constraints, we want to resolve the generator to a list before # starting. - cons = list(model.component_data_objects( - Constraint, descend_into=True, active=True)) + cons = list( + model.component_data_objects(Constraint, descend_into=True, active=True) + ) for con in cons: if not con.equality: has_lb = con.lower is not None @@ -326,8 +532,7 @@ def transform_to_standard_form(model): # range inequality; split into two Constraints. uniq_name = unique_component_name(model, con.name + '_lb') model.add_component( - uniq_name, - Constraint(expr=con.lower - con.body <= 0) + uniq_name, Constraint(expr=con.lower - con.body <= 0) ) con.set_value(con.body - con.upper <= 0) elif has_lb: @@ -355,8 +560,7 @@ def get_vars_from_component(block, ctype): """ - return get_vars_from_components(block, ctype, active=True, - descend_into=True) + return get_vars_from_components(block, ctype, active=True, descend_into=True) def replace_uncertain_bounds_with_constraints(model, uncertain_params): @@ -374,9 +578,10 @@ def replace_uncertain_bounds_with_constraints(model, uncertain_params): # component for explicit inequality constraints uncertain_var_bound_constrs = ConstraintList() - model.add_component(unique_component_name(model, - 'uncertain_var_bound_cons'), - uncertain_var_bound_constrs) + model.add_component( + unique_component_name(model, 'uncertain_var_bound_cons'), + uncertain_var_bound_constrs, + ) # get all variables in active objective and constraint expression(s) vars_in_cons = ComponentSet(get_vars_from_component(model, Constraint)) @@ -426,53 +631,82 @@ def validate_kwarg_inputs(model, config): if not config.first_stage_variables and not config.second_stage_variables: # Must have non-zero DOF - raise ValueError("first_stage_variables and " - "second_stage_variables cannot both be empty lists.") + raise ValueError( + "first_stage_variables and " + "second_stage_variables cannot both be empty lists." + ) - if ComponentSet(first_stage_variables) != ComponentSet(config.first_stage_variables): - raise ValueError("All elements in first_stage_variables must be Var members of the model object.") + if ComponentSet(first_stage_variables) != ComponentSet( + config.first_stage_variables + ): + raise ValueError( + "All elements in first_stage_variables must be Var members of the model object." + ) - if ComponentSet(second_stage_variables) != ComponentSet(config.second_stage_variables): - raise ValueError("All elements in second_stage_variables must be Var members of the model object.") + if ComponentSet(second_stage_variables) != ComponentSet( + config.second_stage_variables + ): + raise ValueError( + "All elements in second_stage_variables must be Var members of the model object." + ) - if any(v in ComponentSet(second_stage_variables) for v in ComponentSet(first_stage_variables)): - raise ValueError("No common elements allowed between first_stage_variables and second_stage_variables.") + if any( + v in ComponentSet(second_stage_variables) + for v in ComponentSet(first_stage_variables) + ): + raise ValueError( + "No common elements allowed between first_stage_variables and second_stage_variables." + ) if ComponentSet(uncertain_params) != ComponentSet(config.uncertain_params): - raise ValueError("uncertain_params must be mutable Param members of the model object.") + raise ValueError( + "uncertain_params must be mutable Param members of the model object." + ) if not config.uncertainty_set: - raise ValueError("An UncertaintySet object must be provided to the PyROS solver.") + raise ValueError( + "An UncertaintySet object must be provided to the PyROS solver." + ) non_mutable_params = [] for p in config.uncertain_params: - if not (not p.is_constant() and p.is_fixed() and not p.is_potentially_variable()): + if not ( + not p.is_constant() and p.is_fixed() and not p.is_potentially_variable() + ): non_mutable_params.append(p) if non_mutable_params: - raise ValueError("Param objects which are uncertain must have attribute mutable=True. " - "Offending Params: %s" % [p.name for p in non_mutable_params]) + raise ValueError( + "Param objects which are uncertain must have attribute mutable=True. " + "Offending Params: %s" % [p.name for p in non_mutable_params] + ) # === Solvers provided check if not config.local_solver or not config.global_solver: - raise ValueError("User must designate both a local and global optimization solver via the local_solver" - " and global_solver options.") + raise ValueError( + "User must designate both a local and global optimization solver via the local_solver" + " and global_solver options." + ) if config.bypass_local_separation and config.bypass_global_separation: - raise ValueError("User cannot simultaneously enable options " - "'bypass_local_separation' and " - "'bypass_global_separation'.") + raise ValueError( + "User cannot simultaneously enable options " + "'bypass_local_separation' and " + "'bypass_global_separation'." + ) # === Degrees of freedom provided check if len(config.first_stage_variables) + len(config.second_stage_variables) == 0: - raise ValueError("User must designate at least one first- and/or second-stage variable.") + raise ValueError( + "User must designate at least one first- and/or second-stage variable." + ) # === Uncertain params provided check if len(config.uncertain_params) == 0: raise ValueError("User must designate at least one uncertain parameter.") - return + def substitute_ssv_in_dr_constraints(model, constraint): ''' Generate the standard_repn for the dr constraints. Generate new expression with replace_expression to ignore @@ -488,33 +722,42 @@ def substitute_ssv_in_dr_constraints(model, constraint): fsv = ComponentSet(model.util.first_stage_variables) if not hasattr(model, "dr_substituted_constraints"): model.dr_substituted_constraints = ConstraintList() + + substitution_map = {} for eqn in dr_eqns: repn = generate_standard_repn(eqn.body, compute_values=False) new_expression = 0 - map_linear_coeff_to_var = [x for x in zip(repn.linear_coefs, repn.linear_vars) if x[1] in ComponentSet(fsv)] - map_quad_coeff_to_var = [x for x in zip(repn.quadratic_coefs, repn.quadratic_vars) if x[1] in ComponentSet(fsv)] + map_linear_coeff_to_var = [ + x + for x in zip(repn.linear_coefs, repn.linear_vars) + if x[1] in ComponentSet(fsv) + ] + map_quad_coeff_to_var = [ + x + for x in zip(repn.quadratic_coefs, repn.quadratic_vars) + if x[1] in ComponentSet(fsv) + ] if repn.linear_coefs: for coeff, var in map_linear_coeff_to_var: new_expression += coeff * var if repn.quadratic_coefs: for coeff, var in map_quad_coeff_to_var: - new_expression += coeff * var[0] * var[1] # var here is a 2-tuple + new_expression += coeff * var[0] * var[1] # var here is a 2-tuple - model.no_ssv_dr_expr = Expression(expr=new_expression) - substitution_map = {} - substitution_map[id(repn.linear_vars[-1])] = model.no_ssv_dr_expr.expr + substitution_map[id(repn.linear_vars[-1])] = new_expression model.dr_substituted_constraints.add( - replace_expressions(expr=constraint.lower, - substitution_map=substitution_map) == - replace_expressions(expr=constraint.body, - substitution_map=substitution_map)) + replace_expressions(expr=constraint.lower, substitution_map=substitution_map) + == replace_expressions(expr=constraint.body, substitution_map=substitution_map) + ) # === Delete the original constraint model.del_component(constraint.name) - model.del_component("no_ssv_dr_expr") - return model.dr_substituted_constraints[max(model.dr_substituted_constraints.keys())] + return model.dr_substituted_constraints[ + max(model.dr_substituted_constraints.keys()) + ] + def is_certain_parameter(uncertain_param_index, config): ''' @@ -526,10 +769,15 @@ def is_certain_parameter(uncertain_param_index, config): ''' if config.uncertainty_set.parameter_bounds: param_bounds = config.uncertainty_set.parameter_bounds[uncertain_param_index] - return math.isclose(a=param_bounds[0], b=param_bounds[1], - rel_tol=PARAM_IS_CERTAIN_REL_TOL, abs_tol=PARAM_IS_CERTAIN_ABS_TOL) + return math.isclose( + a=param_bounds[0], + b=param_bounds[1], + rel_tol=PARAM_IS_CERTAIN_REL_TOL, + abs_tol=PARAM_IS_CERTAIN_ABS_TOL, + ) else: - return False # cannot be determined without bounds + return False # cannot be determined without bounds + def coefficient_matching(model, constraint, uncertain_params, config): ''' @@ -564,21 +812,29 @@ def coefficient_matching(model, constraint, uncertain_params, config): # === Determine if we need to do DR expression/ssv substitution to # make h(x,z,q) == 0 into h(x,d,q) == 0 (which is just h(x,q) == 0) - if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \ - any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint): + if all( + v in ComponentSet(first_stage_variables) for v in variables_in_constraint + ) and any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint): # h(x, q) == 0 pass - elif all(v in ComponentSet(first_stage_variables + second_stage_variables) for v in variables_in_constraint) and \ - any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint): - constraint = substitute_ssv_in_dr_constraints(model=model, constraint=constraint) + elif all( + v in ComponentSet(first_stage_variables + second_stage_variables) + for v in variables_in_constraint + ) and any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint): + constraint = substitute_ssv_in_dr_constraints( + model=model, constraint=constraint + ) + variables_in_constraint = ComponentSet(identify_variables(constraint.expr)) - params_in_constraint = ComponentSet(identify_mutable_parameters(constraint.expr)) + params_in_constraint = ComponentSet( + identify_mutable_parameters(constraint.expr) + ) else: pass - if all(v in ComponentSet(first_stage_variables) for v in variables_in_constraint) and \ - any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint): - + if all( + v in ComponentSet(first_stage_variables) for v in variables_in_constraint + ) and any(q in ComponentSet(actual_uncertain_params) for q in params_in_constraint): # Swap param objects for variable objects in this constraint model.param_set = [] for i in range(len(list(variables_in_constraint))): @@ -591,8 +847,12 @@ def coefficient_matching(model, constraint, uncertain_params, config): model.add_component("x_%s" % i, Var(initialize=1)) model.variable_set.append(getattr(model, "x_%s" % i)) - original_var_to_param_map = list(zip(list(variables_in_constraint), model.param_set)) - original_param_to_vap_map = list(zip(list(actual_uncertain_params), model.variable_set)) + original_var_to_param_map = list( + zip(list(variables_in_constraint), model.param_set) + ) + original_param_to_vap_map = list( + zip(list(actual_uncertain_params), model.variable_set) + ) var_to_param_substitution_map_forward = {} # Separation problem initialized to nominal uncertain parameter values @@ -616,13 +876,20 @@ def coefficient_matching(model, constraint, uncertain_params, config): model.swapped_constraints.add( replace_expressions( - expr=replace_expressions(expr=constraint.lower, - substitution_map=param_to_var_substitution_map_forward), - substitution_map=var_to_param_substitution_map_forward) == - replace_expressions( - expr=replace_expressions(expr=constraint.body, - substitution_map=param_to_var_substitution_map_forward), - substitution_map=var_to_param_substitution_map_forward)) + expr=replace_expressions( + expr=constraint.lower, + substitution_map=param_to_var_substitution_map_forward, + ), + substitution_map=var_to_param_substitution_map_forward, + ) + == replace_expressions( + expr=replace_expressions( + expr=constraint.body, + substitution_map=param_to_var_substitution_map_forward, + ), + substitution_map=var_to_param_substitution_map_forward, + ) + ) swapped = model.swapped_constraints[max(model.swapped_constraints.keys())] @@ -630,15 +897,31 @@ def coefficient_matching(model, constraint, uncertain_params, config): if val.constant is not None: if type(val.constant) not in native_types: - temp_expr = replace_expressions(val.constant, substitution_map=var_to_param_substitution_map_reverse) - if temp_expr.is_potentially_variable(): + temp_expr = replace_expressions( + val.constant, substitution_map=var_to_param_substitution_map_reverse + ) + # We will use generate_standard_repn to generate a + # simplified expression (in particular, to remove any + # "0*..." terms) + temp_expr = generate_standard_repn(temp_expr).to_expression() + if temp_expr.__class__ not in native_types: model.coefficient_matching_constraints.add(expr=temp_expr == 0) - elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL): + elif math.isclose( + value(temp_expr), + 0, + rel_tol=COEFF_MATCH_REL_TOL, + abs_tol=COEFF_MATCH_ABS_TOL, + ): pass else: successful_matching = False robust_infeasible = True - elif math.isclose(value(val.constant), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL): + elif math.isclose( + value(val.constant), + 0, + rel_tol=COEFF_MATCH_REL_TOL, + abs_tol=COEFF_MATCH_ABS_TOL, + ): pass else: successful_matching = False @@ -646,15 +929,31 @@ def coefficient_matching(model, constraint, uncertain_params, config): if val.linear_coefs is not None: for coeff in val.linear_coefs: if type(coeff) not in native_types: - temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse) - if temp_expr.is_potentially_variable(): + temp_expr = replace_expressions( + coeff, substitution_map=var_to_param_substitution_map_reverse + ) + # We will use generate_standard_repn to generate a + # simplified expression (in particular, to remove any + # "0*..." terms) + temp_expr = generate_standard_repn(temp_expr).to_expression() + if temp_expr.__class__ not in native_types: model.coefficient_matching_constraints.add(expr=temp_expr == 0) - elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL): + elif math.isclose( + value(temp_expr), + 0, + rel_tol=COEFF_MATCH_REL_TOL, + abs_tol=COEFF_MATCH_ABS_TOL, + ): pass else: successful_matching = False robust_infeasible = True - elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL): + elif math.isclose( + value(coeff), + 0, + rel_tol=COEFF_MATCH_REL_TOL, + abs_tol=COEFF_MATCH_ABS_TOL, + ): pass else: successful_matching = False @@ -662,15 +961,31 @@ def coefficient_matching(model, constraint, uncertain_params, config): if val.quadratic_coefs: for coeff in val.quadratic_coefs: if type(coeff) not in native_types: - temp_expr = replace_expressions(coeff, substitution_map=var_to_param_substitution_map_reverse) - if temp_expr.is_potentially_variable(): + temp_expr = replace_expressions( + coeff, substitution_map=var_to_param_substitution_map_reverse + ) + # We will use generate_standard_repn to generate a + # simplified expression (in particular, to remove any + # "0*..." terms) + temp_expr = generate_standard_repn(temp_expr).to_expression() + if temp_expr.__class__ not in native_types: model.coefficient_matching_constraints.add(expr=temp_expr == 0) - elif math.isclose(value(temp_expr), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL): + elif math.isclose( + value(temp_expr), + 0, + rel_tol=COEFF_MATCH_REL_TOL, + abs_tol=COEFF_MATCH_ABS_TOL, + ): pass else: successful_matching = False robust_infeasible = True - elif math.isclose(value(coeff), 0, rel_tol=COEFF_MATCH_REL_TOL, abs_tol=COEFF_MATCH_ABS_TOL): + elif math.isclose( + value(coeff), + 0, + rel_tol=COEFF_MATCH_REL_TOL, + abs_tol=COEFF_MATCH_ABS_TOL, + ): pass else: successful_matching = False @@ -701,9 +1016,7 @@ def selective_clone(block, first_stage_vars): :param first_stage_vars: the variables which should not be cloned :return: """ - memo = { - '__block_scope__': {id(block): True, id(None): False} - } + memo = {'__block_scope__': {id(block): True, id(None): False}} for v in first_stage_vars: memo[id(v)] = v new_block = copy.deepcopy(block, memo) @@ -730,43 +1043,80 @@ def add_decision_rule_variables(model_data, config): if degree == 0: for i in range(len(second_stage_variables)): model_data.working_model.add_component( - "decision_rule_var_" + str(i), - Var(initialize=value(second_stage_variables[i], exception=False), - bounds=bounds,domain=Reals) + "decision_rule_var_" + str(i), + Var( + initialize=value(second_stage_variables[i], exception=False), + bounds=bounds, + domain=Reals, + ), + ) + first_stage_variables.extend( + getattr( + model_data.working_model, "decision_rule_var_" + str(i) + ).values() + ) + decision_rule_vars.append( + getattr(model_data.working_model, "decision_rule_var_" + str(i)) ) - first_stage_variables.extend(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values()) - decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i))) elif degree == 1: for i in range(len(second_stage_variables)): index_set = list(range(len(uncertain_params) + 1)) - model_data.working_model.add_component("decision_rule_var_" + str(i), - Var(index_set, - initialize=0, - bounds=bounds, - domain=Reals)) + model_data.working_model.add_component( + "decision_rule_var_" + str(i), + Var(index_set, initialize=0, bounds=bounds, domain=Reals), + ) # === For affine drs, the [0]th constant term is initialized to the control variable values, all other terms are initialized to 0 - getattr(model_data.working_model, "decision_rule_var_" + str(i))[0].set_value(value(second_stage_variables[i], exception=False), skip_validation=True) - first_stage_variables.extend(list(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values())) - decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i))) + getattr(model_data.working_model, "decision_rule_var_" + str(i))[ + 0 + ].set_value( + value(second_stage_variables[i], exception=False), skip_validation=True + ) + first_stage_variables.extend( + list( + getattr( + model_data.working_model, "decision_rule_var_" + str(i) + ).values() + ) + ) + decision_rule_vars.append( + getattr(model_data.working_model, "decision_rule_var_" + str(i)) + ) elif degree == 2 or degree == 3 or degree == 4: for i in range(len(second_stage_variables)): num_vars = int(sp.special.comb(N=len(uncertain_params) + degree, k=degree)) dict_init = {} for r in range(num_vars): if r == 0: - dict_init.update({r: value(second_stage_variables[i], exception=False)}) + dict_init.update( + {r: value(second_stage_variables[i], exception=False)} + ) else: dict_init.update({r: 0}) - model_data.working_model.add_component("decision_rule_var_" + str(i), - Var(list(range(num_vars)), initialize=dict_init, bounds=bounds, - domain=Reals)) + model_data.working_model.add_component( + "decision_rule_var_" + str(i), + Var( + list(range(num_vars)), + initialize=dict_init, + bounds=bounds, + domain=Reals, + ), + ) first_stage_variables.extend( - list(getattr(model_data.working_model, "decision_rule_var_" + str(i)).values())) - decision_rule_vars.append(getattr(model_data.working_model, "decision_rule_var_" + str(i))) + list( + getattr( + model_data.working_model, "decision_rule_var_" + str(i) + ).values() + ) + ) + decision_rule_vars.append( + getattr(model_data.working_model, "decision_rule_var_" + str(i)) + ) else: raise ValueError( - "Decision rule order " + str(config.decision_rule_order) + - " is not yet supported. PyROS supports polynomials of degree 0 (static approximation), 1, 2.") + "Decision rule order " + + str(config.decision_rule_order) + + " is not yet supported. PyROS supports polynomials of degree 0 (static approximation), 1, 2." + ) model_data.working_model.util.decision_rule_vars = decision_rule_vars @@ -791,8 +1141,9 @@ def partition_powers(n, v): # of the list. The degree for each variable is 1 less than the # difference of sequential starting points (to account for the # variable itself) - starts = (0,) + starts + (n+v,) - yield [starts[i+1] - starts[i] - 1 for i in range(v)] + starts = (0,) + starts + (n + v,) + yield [starts[i + 1] - starts[i] - 1 for i in range(v)] + def sort_partitioned_powers(powers_list): powers_list = sorted(powers_list, reverse=True) @@ -814,26 +1165,58 @@ def add_decision_rule_constraints(model_data, config): degree = config.decision_rule_order if degree == 0: for i in range(len(second_stage_variables)): - model_data.working_model.add_component("decision_rule_eqn_" + str(i), - Constraint(expr=getattr(model_data.working_model, "decision_rule_var_" + str(i)) == second_stage_variables[i])) - decision_rule_eqns.append(getattr(model_data.working_model, "decision_rule_eqn_" + str(i))) + model_data.working_model.add_component( + "decision_rule_eqn_" + str(i), + Constraint( + expr=getattr( + model_data.working_model, "decision_rule_var_" + str(i) + ) + == second_stage_variables[i] + ), + ) + decision_rule_eqns.append( + getattr(model_data.working_model, "decision_rule_eqn_" + str(i)) + ) elif degree == 1: for i in range(len(second_stage_variables)): expr = 0 - for j in range(len(getattr(model_data.working_model, "decision_rule_var_" + str(i)))): + for j in range( + len(getattr(model_data.working_model, "decision_rule_var_" + str(i))) + ): if j == 0: - expr += getattr(model_data.working_model, "decision_rule_var_" + str(i))[j] + expr += getattr( + model_data.working_model, "decision_rule_var_" + str(i) + )[j] else: - expr += getattr(model_data.working_model, "decision_rule_var_" + str(i))[j] * uncertain_params[j - 1] - model_data.working_model.add_component("decision_rule_eqn_" + str(i), Constraint(expr= expr == second_stage_variables[i])) - decision_rule_eqns.append(getattr(model_data.working_model, "decision_rule_eqn_" + str(i))) + expr += ( + getattr( + model_data.working_model, "decision_rule_var_" + str(i) + )[j] + * uncertain_params[j - 1] + ) + model_data.working_model.add_component( + "decision_rule_eqn_" + str(i), + Constraint(expr=expr == second_stage_variables[i]), + ) + decision_rule_eqns.append( + getattr(model_data.working_model, "decision_rule_eqn_" + str(i)) + ) elif degree >= 2: # Using bars and stars groupings of variable powers, construct x1^a * .... * xn^b terms for all c <= a+...+b = degree all_powers = [] - for n in range(1, degree+1): - all_powers.append(sort_partitioned_powers(list(partition_powers(n, len(uncertain_params))))) + for n in range(1, degree + 1): + all_powers.append( + sort_partitioned_powers( + list(partition_powers(n, len(uncertain_params))) + ) + ) for i in range(len(second_stage_variables)): - Z = list(z for z in getattr(model_data.working_model, "decision_rule_var_" + str(i)).values()) + Z = list( + z + for z in getattr( + model_data.working_model, "decision_rule_var_" + str(i) + ).values() + ) e = Z.pop(0) for degree_param_powers in all_powers: for param_powers in degree_param_powers: @@ -842,14 +1225,20 @@ def add_decision_rule_constraints(model_data, config): if power == 0: pass else: - product = product * uncertain_params[idx]**power + product = product * uncertain_params[idx] ** power e += Z.pop(0) * product - model_data.working_model.add_component("decision_rule_eqn_" + str(i), - Constraint(expr=e == second_stage_variables[i])) - decision_rule_eqns.append(getattr(model_data.working_model, "decision_rule_eqn_" + str(i))) + model_data.working_model.add_component( + "decision_rule_eqn_" + str(i), + Constraint(expr=e == second_stage_variables[i]), + ) + decision_rule_eqns.append( + getattr(model_data.working_model, "decision_rule_eqn_" + str(i)) + ) if len(Z) != 0: - raise RuntimeError("Construction of the decision rule functions did not work correctly! " - "Did not use all coefficient terms.") + raise RuntimeError( + "Construction of the decision rule functions did not work correctly! " + "Did not use all coefficient terms." + ) model_data.working_model.util.decision_rule_eqns = decision_rule_eqns @@ -889,11 +1278,11 @@ def identify_objective_functions(model, objective): for term in obj_args: non_first_stage_vars_in_term = ComponentSet( - v for v in identify_variables(term) - if v not in first_stage_var_set + v for v in identify_variables(term) if v not in first_stage_var_set ) uncertain_params_in_term = ComponentSet( - param for param in identify_mutable_parameters(term) + param + for param in identify_mutable_parameters(term) if param in uncertain_param_set ) @@ -919,8 +1308,13 @@ def load_final_solution(model_data, master_soln, config): elif config.objective_focus == ObjectiveType.worst_case: model = model_data.original_model indices = range(len(master_soln.master_model.scenarios)) - k = max(indices, key=lambda i: value(master_soln.master_model.scenarios[i, 0].first_stage_objective + - master_soln.master_model.scenarios[i, 0].second_stage_objective)) + k = max( + indices, + key=lambda i: value( + master_soln.master_model.scenarios[i, 0].first_stage_objective + + master_soln.master_model.scenarios[i, 0].second_stage_objective + ), + ) soln = master_soln.master_model.scenarios[k, 0] src_vars = getattr(model, 'tmp_var_list') @@ -943,11 +1337,24 @@ def process_termination_condition_master_problem(config, results): locally_acceptable = [tc.optimal, tc.locallyOptimal, tc.globallyOptimal] globally_acceptable = [tc.optimal, tc.globallyOptimal] robust_infeasible = [tc.infeasible] - try_backups = [tc.feasible, tc.maxTimeLimit, tc.maxIterations, tc.maxEvaluations, - tc.minStepLength, tc.minFunctionValue, tc.other, tc.solverFailure, - tc.internalSolverError, tc.error, - tc.unbounded, tc.infeasibleOrUnbounded, tc.invalidProblem, tc.intermediateNonInteger, - tc.noSolution, tc.unknown] + try_backups = [ + tc.feasible, + tc.maxTimeLimit, + tc.maxIterations, + tc.maxEvaluations, + tc.minStepLength, + tc.minFunctionValue, + tc.other, + tc.solverFailure, + tc.internalSolverError, + tc.error, + tc.unbounded, + tc.infeasibleOrUnbounded, + tc.invalidProblem, + tc.intermediateNonInteger, + tc.noSolution, + tc.unknown, + ] termination_condition = results.solver.termination_condition if config.solve_master_globally == False: @@ -958,8 +1365,10 @@ def process_termination_condition_master_problem(config, results): elif termination_condition in try_backups: return (True, None) else: - raise NotImplementedError("This solver return termination condition (%s) " - "is currently not supported by PyROS." % termination_condition) + raise NotImplementedError( + "This solver return termination condition (%s) " + "is currently not supported by PyROS." % termination_condition + ) else: if termination_condition in globally_acceptable: return (False, None) @@ -968,8 +1377,10 @@ def process_termination_condition_master_problem(config, results): elif termination_condition in try_backups: return (True, None) else: - raise NotImplementedError("This solver return termination condition (%s) " - "is currently not supported by PyROS." % termination_condition) + raise NotImplementedError( + "This solver return termination condition (%s) " + "is currently not supported by PyROS." % termination_condition + ) def output_logger(config, **kwargs): @@ -985,50 +1396,60 @@ def output_logger(config, **kwargs): if "preamble" in kwargs: if kwargs["preamble"]: version = str(kwargs["version"]) - preamble = "===========================================================================================\n" \ - "PyROS: Pyomo Robust Optimization Solver v.%s \n" \ - "Developed by Natalie M. Isenberg (1), John D. Siirola (2), Chrysanthos E. Gounaris (1) \n" \ - "(1) Carnegie Mellon University, Department of Chemical Engineering \n" \ - "(2) Sandia National Laboratories, Center for Computing Research\n\n" \ - "The developers gratefully acknowledge support from the U.S. Department of Energy's \n" \ - "Institute for the Design of Advanced Energy Systems (IDAES) \n" \ - "===========================================================================================" % version + preamble = ( + "===========================================================================================\n" + "PyROS: Pyomo Robust Optimization Solver v.%s \n" + "Developed by: Natalie M. Isenberg (1), Jason A. F. Sherman (1), \n" + " John D. Siirola (2), Chrysanthos E. Gounaris (1) \n" + "(1) Carnegie Mellon University, Department of Chemical Engineering \n" + "(2) Sandia National Laboratories, Center for Computing Research\n\n" + "The developers gratefully acknowledge support from the U.S. Department of Energy's \n" + "Institute for the Design of Advanced Energy Systems (IDAES) \n" + "===========================================================================================" + % version + ) print(preamble) # === DISCLAIMER if "disclaimer" in kwargs: if kwargs["disclaimer"]: - print("======================================== DISCLAIMER =======================================\n" - "PyROS is still under development. \n" - "Please provide feedback and/or report any issues by opening a Pyomo ticket.\n" - "===========================================================================================\n") + print( + "======================================== DISCLAIMER =======================================\n" + "PyROS is still under development. \n" + "Please provide feedback and/or report any issues by opening a Pyomo ticket.\n" + "===========================================================================================\n" + ) # === ALL LOGGER RETURN MESSAGES if "bypass_global_separation" in kwargs: if kwargs["bypass_global_separation"]: config.progress_logger.info( - "NOTE: Option to bypass global separation was chosen. " - "Robust feasibility and optimality of the reported " - "solution are not guaranteed." - ) + "NOTE: Option to bypass global separation was chosen. " + "Robust feasibility and optimality of the reported " + "solution are not guaranteed." + ) if "robust_optimal" in kwargs: if kwargs["robust_optimal"]: - config.progress_logger.info('Robust optimal solution identified. Exiting PyROS.') + config.progress_logger.info( + 'Robust optimal solution identified. Exiting PyROS.' + ) if "robust_feasible" in kwargs: if kwargs["robust_feasible"]: - config.progress_logger.info('Robust feasible solution identified. Exiting PyROS.') + config.progress_logger.info( + 'Robust feasible solution identified. Exiting PyROS.' + ) if "robust_infeasible" in kwargs: if kwargs["robust_infeasible"]: config.progress_logger.info('Robust infeasible problem. Exiting PyROS.') - if "time_out" in kwargs: if kwargs["time_out"]: config.progress_logger.info( 'PyROS was unable to identify robust solution ' 'before exceeding time limit of %s seconds. ' 'Consider increasing the time limit via option time_limit.' - % config.time_limit) + % config.time_limit + ) if "max_iter" in kwargs: if kwargs["max_iter"]: @@ -1036,23 +1457,26 @@ def output_logger(config, **kwargs): 'PyROS was unable to identify robust solution ' 'within %s iterations of the GRCS algorithm. ' 'Consider increasing the iteration limit via option max_iter.' - % config.max_iter) + % config.max_iter + ) if "master_error" in kwargs: if kwargs["master_error"]: status_dict = kwargs["status_dict"] filename = kwargs["filename"] # solver name to solver termination condition if kwargs["iteration"] == 0: - raise AttributeError("User-supplied solver(s) could not solve the deterministic model. " - "Returned termination conditions were: %s" - "Please ensure deterministic model is solvable by at least one of the supplied solvers. " - "Exiting PyROS." % pprint(status_dict, width=1)) + raise AttributeError( + "User-supplied solver(s) could not solve the deterministic model. " + "Returned termination conditions were: %s" + "Please ensure deterministic model is solvable by at least one of the supplied solvers. " + "Exiting PyROS." % pprint(status_dict, width=1) + ) config.progress_logger.info( "User-supplied solver(s) could not solve the master model at iteration %s.\n" "Returned termination conditions were: %s\n" - "For debugging, this problem has been written to a GAMS file titled %s. Exiting PyROS." % (kwargs["iteration"], - pprint(status_dict), - filename)) + "For debugging, this problem has been written to a GAMS file titled %s. Exiting PyROS." + % (kwargs["iteration"], pprint(status_dict), filename) + ) if "separation_error" in kwargs: if kwargs["separation_error"]: status_dict = kwargs["status_dict"] @@ -1062,9 +1486,8 @@ def output_logger(config, **kwargs): config.progress_logger.info( "User-supplied solver(s) could not solve the separation problem at iteration %s under separation objective %s.\n" "Returned termination conditions were: %s\n" - "For debugging, this problem has been written to a GAMS file titled %s. Exiting PyROS." % (iteration, - obj, - pprint(status_dict, width=1), - filename)) + "For debugging, this problem has been written to a GAMS file titled %s. Exiting PyROS." + % (iteration, obj, pprint(status_dict, width=1), filename) + ) return diff --git a/pyomo/contrib/satsolver/satsolver.py b/pyomo/contrib/satsolver/satsolver.py index 50c0e8d1622..139b5218169 100644 --- a/pyomo/contrib/satsolver/satsolver.py +++ b/pyomo/contrib/satsolver/satsolver.py @@ -1,12 +1,8 @@ -import math +import math from pyomo.common.dependencies import attempt_import from pyomo.core import value, SymbolMap, NumericLabeler, Var, Constraint -from pyomo.core.expr.logical_expr import ( - EqualityExpression, - InequalityExpression, -) -from pyomo.core.expr.numeric_expr import ( +from pyomo.core.expr import ( ProductExpression, SumExpression, PowExpression, @@ -15,13 +11,11 @@ DivisionExpression, AbsExpression, UnaryFunctionExpression, + EqualityExpression, + InequalityExpression, ) -from pyomo.core.expr.numvalue import ( - nonpyomo_leaf_types, -) -from pyomo.core.expr.visitor import ( - StreamBasedExpressionVisitor, -) +from pyomo.core.expr.numvalue import nonpyomo_leaf_types +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor from pyomo.gdp import Disjunction z3, z3_available = attempt_import('z3') @@ -115,9 +109,9 @@ def _add_bound(self, var): lb = var.lb ub = var.ub if lb is not None: - self.bounds_list.append("(assert (>= " + nm + " " + str(lb) + "))\n") + self.bounds_list.append("(assert (>= " + nm + ' ' + str(lb) + "))\n") if ub is not None: - self.bounds_list.append("(assert (<= " + nm + " " + str(ub) + "))\n") + self.bounds_list.append("(assert (<= " + nm + ' ' + str(ub) + "))\n") # define variables def add_var(self, var): @@ -151,8 +145,15 @@ def _compute_disjunction_string(self, smt_djn): for disj in smt_djn[1]: cons_string = "true" for c in disj[1]: - cons_string = "(and " + cons_string + " " + c + ")" - djn_string = djn_string + "(assert (=> ( = 1 " + disj[0] + ") " + cons_string + "))\n" + cons_string = "(and " + cons_string + ' ' + c + ")" + djn_string = ( + djn_string + + "(assert (=> ( = 1 " + + disj[0] + + ") " + + cons_string + + "))\n" + ) return djn_string # converts disjunction to internal class storage @@ -163,7 +164,7 @@ def _process_active_disjunction(self, djn): constraints = [] iv = disj.binary_indicator_var label = self.add_var(iv) - or_expr = "(+ " + or_expr + " " + label + ")" + or_expr = "(+ " + or_expr + ' ' + label + ")" for c in disj.component_data_objects(ctype=Constraint, active=True): try: constraints.append(self.walker.walk_expression(c.expr)) @@ -183,7 +184,7 @@ def _process_inactive_disjunction(self, djn): for disj in djn.disjuncts: iv = disj.binary_indicator_var label = self.add_var(iv) - or_expr = "(+ " + or_expr + " " + label + ")" + or_expr = "(+ " + or_expr + ' ' + label + ")" if djn.xor: or_expr = "(assert (= 1 " + or_expr + "))\n" else: @@ -195,8 +196,16 @@ def get_SMT_string(self): variable_string = ''.join(self.variable_list) bounds_string = ''.join(self.bounds_list) expression_string = ''.join(self.expression_list) - disjunctions_string = ''.join([self._compute_disjunction_string(d) for d in self.disjunctions_list]) - smtstring = prefix_string + variable_string + bounds_string + expression_string + disjunctions_string + disjunctions_string = ''.join( + [self._compute_disjunction_string(d) for d in self.disjunctions_list] + ) + smtstring = ( + prefix_string + + variable_string + + bounds_string + + expression_string + + disjunctions_string + ) return smtstring def get_var_dict(self): @@ -225,25 +234,25 @@ def __init__(self, varmap): def exitNode(self, node, data): if isinstance(node, EqualityExpression): - ans = "(= " + data[0] + " " + data[1] + ")" + ans = "(= " + data[0] + ' ' + data[1] + ")" elif isinstance(node, InequalityExpression): - ans = "(<= " + data[0] + " " + data[1] + ")" + ans = "(<= " + data[0] + ' ' + data[1] + ")" elif isinstance(node, ProductExpression): ans = data[0] for arg in data[1:]: - ans = "(* " + ans + " " + arg + ")" + ans = "(* " + ans + ' ' + arg + ")" elif isinstance(node, SumExpression): ans = data[0] for arg in data[1:]: - ans = "(+ " + ans + " " + arg + ")" + ans = "(+ " + ans + ' ' + arg + ")" elif isinstance(node, PowExpression): - ans = "(^ " + data[0] + " " + data[1] + ")" + ans = "(^ " + data[0] + ' ' + data[1] + ")" elif isinstance(node, NegationExpression): ans = "(- 0 " + data[0] + ")" elif isinstance(node, MonomialTermExpression): - ans = "(* " + data[0] + " " + data[1] + ")" + ans = "(* " + data[0] + ' ' + data[1] + ")" elif isinstance(node, DivisionExpression): - ans = "(/ " + data[0] + " " + data[1] + ")" + ans = "(/ " + data[0] + ' ' + data[1] + ")" elif isinstance(node, AbsExpression): ans = "(abs " + data[0] + ")" elif isinstance(node, UnaryFunctionExpression): @@ -268,7 +277,9 @@ def exitNode(self, node, data): else: raise NotImplementedError("Unknown unary function: %s" % (node.name,)) else: - raise NotImplementedError(str(type(node)) + " expression not handled by z3 interface") + raise NotImplementedError( + str(type(node)) + " expression not handled by z3 interface" + ) return ans def beforeChild(self, node, child, child_idx): diff --git a/pyomo/contrib/satsolver/test_satsolver.py b/pyomo/contrib/satsolver/test_satsolver.py index 4308573fb0d..7ac7aaff03f 100644 --- a/pyomo/contrib/satsolver/test_satsolver.py +++ b/pyomo/contrib/satsolver/test_satsolver.py @@ -17,8 +17,20 @@ from pyomo.contrib.satsolver.satsolver import satisfiable, z3_available from pyomo.core.base.set_types import PositiveIntegers, NonNegativeReals, Binary from pyomo.environ import ( - ConcreteModel, Var, Constraint, Objective, sin, cos, tan, asin, acos, atan, sqrt, log, - minimize) + ConcreteModel, + Var, + Constraint, + Objective, + sin, + cos, + tan, + asin, + acos, + atan, + sqrt, + log, + minimize, +) from pyomo.gdp import Disjunct, Disjunction currdir = dirname(abspath(__file__)) @@ -27,7 +39,6 @@ @unittest.skipUnless(z3_available, "Z3 SAT solver is not available.") class SatSolverTests(unittest.TestCase): - def test_simple_sat_model(self): m = ConcreteModel() m.x = Var() @@ -218,28 +229,24 @@ def test_binary_domains(self): self.assertFalse(satisfiable(m)) def test_8PP(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) m = exfile.build_eight_process_flowsheet() self.assertTrue(satisfiable(m) is not False) - def test_8PP_deactive(self): - exfile = import_file( - join(exdir, 'eight_process', 'eight_proc_model.py')) + def test_8PP_deactivate(self): + exfile = import_file(join(exdir, 'eight_process', 'eight_proc_model.py')) m = exfile.build_eight_process_flowsheet() for djn in m.component_data_objects(ctype=Disjunction): djn.deactivate() self.assertTrue(satisfiable(m) is not False) def test_strip_pack(self): - exfile = import_file( - join(exdir, 'strip_packing', 'strip_packing_concrete.py')) + exfile = import_file(join(exdir, 'strip_packing', 'strip_packing_concrete.py')) m = exfile.build_rect_strip_packing_model() self.assertTrue(satisfiable(m)) def test_constrained_layout(self): - exfile = import_file( - join(exdir, 'constrained_layout', 'cons_layout_model.py')) + exfile = import_file(join(exdir, 'constrained_layout', 'cons_layout_model.py')) m = exfile.build_constrained_layout_model() self.assertTrue(satisfiable(m) is not False) diff --git a/pyomo/contrib/sensitivity_toolbox/__init__.py b/pyomo/contrib/sensitivity_toolbox/__init__.py index 3e3786b6cfc..cac6562157e 100644 --- a/pyomo/contrib/sensitivity_toolbox/__init__.py +++ b/pyomo/contrib/sensitivity_toolbox/__init__.py @@ -11,4 +11,3 @@ """ pyomo.contrib.sensitivity_toolbox """ - diff --git a/pyomo/contrib/sensitivity_toolbox/examples/HIV_Transmission.py b/pyomo/contrib/sensitivity_toolbox/examples/HIV_Transmission.py index 1eb88db07b0..146baedd8aa 100755 --- a/pyomo/contrib/sensitivity_toolbox/examples/HIV_Transmission.py +++ b/pyomo/contrib/sensitivity_toolbox/examples/HIV_Transmission.py @@ -9,272 +9,327 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ # -# Model Reference: "A Dynamic HIV-Transmission Model for Evaluating the Costs -# and Benefits of Vaccine Programs", D.M. Edwards, R.D. Shachter, -# and D.K. Owen 1998, Interfaces +# Model Reference: "A Dynamic HIV-Transmission Model for Evaluating the Costs +# and Benefits of Vaccine Programs", D.M. Edwards, R.D. Shachter, +# and D.K. Owen 1998, Interfaces # from __future__ import division -from pyomo.environ import (ConcreteModel, Param, Var, Objective, - Constraint, Set, Expression, Suffix, - value, exp, TransformationFactory) +from pyomo.environ import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + Set, + Expression, + Suffix, + value, + exp, + TransformationFactory, +) from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.simulator import Simulator from pyomo.contrib.sensitivity_toolbox.sens import sensitivity_calculation def create_model(): - m = ConcreteModel() m.tf = Param(initialize=20) - m.t = ContinuousSet(bounds=(0,m.tf)) - m.i = Set(initialize=[0,1,2,3,4,5],ordered=True) - m.j = Set(initialize=[0,1],ordered=True) - m.ij = Set(initialize=[(0,0),(0,1),(1,0),(1,1),(2,0),(2,1),(3,0),(4,0)], - ordered=True) - - #Set Parameters - m.eps = Param(initialize = 0.75, mutable=True) - - m.sig = Param(initialize = 0.15, mutable=True) - m.xi = Param(initialize = 0.983, mutable=True) - m.omeg = Param(initialize = 1.0/10, mutable=True) - - m.Y0 = Param(initialize = 55816.0, mutable=True) - m.phi0 = Param(initialize = 0.493, mutable=True) - - d={} - d[(0,0)] = 0.0222 - d[(0,1)] = 0.0222 - d[(1,0)] = 1/7.1 - d[(1,1)] = 1/7.1 - d[(2,0)] = 1/8.1 - d[(2,1)] = 1/(8.1+5) - d[(3,0)] = 1/2.7 - d[(4,0)] = 1/2.1 + m.t = ContinuousSet(bounds=(0, m.tf)) + m.i = Set(initialize=[0, 1, 2, 3, 4, 5], ordered=True) + m.j = Set(initialize=[0, 1], ordered=True) + m.ij = Set( + initialize=[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1), (3, 0), (4, 0)], + ordered=True, + ) + + # Set Parameters + m.eps = Param(initialize=0.75, mutable=True) + + m.sig = Param(initialize=0.15, mutable=True) + m.xi = Param(initialize=0.983, mutable=True) + m.omeg = Param(initialize=1.0 / 10, mutable=True) + + m.Y0 = Param(initialize=55816.0, mutable=True) + m.phi0 = Param(initialize=0.493, mutable=True) + + d = {} + d[(0, 0)] = 0.0222 + d[(0, 1)] = 0.0222 + d[(1, 0)] = 1 / 7.1 + d[(1, 1)] = 1 / 7.1 + d[(2, 0)] = 1 / 8.1 + d[(2, 1)] = 1 / (8.1 + 5) + d[(3, 0)] = 1 / 2.7 + d[(4, 0)] = 1 / 2.1 m.dd = Param(m.ij, initialize=d, mutable=True) - - d_inv={} - d_inv[(1,0)] = 7.1 - d_inv[(2,0)] = 8.1 - d_inv[(3,0)] = 2.7 - d_inv[(4,0)] = 2.1 + + d_inv = {} + d_inv[(1, 0)] = 7.1 + d_inv[(2, 0)] = 8.1 + d_inv[(3, 0)] = 2.7 + d_inv[(4, 0)] = 2.1 m.dd_inv = Param(m.ij, initialize=d_inv, default=0, mutable=True) - - I={} - I[(0,0)] = 0.9*m.dd[(0,0)]*m.Y0 - I[(1,0)] = 0.04*m.dd[(0,0)]*m.Y0 - I[(2,0)] = 0.04*m.dd[(0,0)]*m.Y0 - I[(3,0)] = 0.02*m.dd[(0,0)]*m.Y0 - m.II=Param(m.ij, initialize=I, default=0, mutable=True) - - p={} - p[(4,0)] = 0.667 + + I = {} + I[(0, 0)] = 0.9 * m.dd[(0, 0)] * m.Y0 + I[(1, 0)] = 0.04 * m.dd[(0, 0)] * m.Y0 + I[(2, 0)] = 0.04 * m.dd[(0, 0)] * m.Y0 + I[(3, 0)] = 0.02 * m.dd[(0, 0)] * m.Y0 + m.II = Param(m.ij, initialize=I, default=0, mutable=True) + + p = {} + p[(4, 0)] = 0.667 m.pp = Param(m.ij, initialize=p, default=2, mutable=True) - - b={} - b[(1,0)] = 0.066 - b[(1,1)] = 0.066 - b[(2,0)] = 0.066 - b[(2,1)] = 0.066*(1-0.25) - b[(3,0)] = 0.147 - b[(4,0)] = 0.147 - m.bb = Param(m.ij,initialize=b, default=0, mutable=True) - - eta00={} - eta00[(0,0)] = 0.505 - eta00[(0,1)] = 0.505 - eta00[(1,0)] = 0.505 - eta00[(1,1)] = 0.505 - eta00[(2,0)] = 0.307 - eta00[(2,1)] = 0.4803 - eta00[(3,0)] = 0.235 - eta00[(4,0)] = 0.235 + + b = {} + b[(1, 0)] = 0.066 + b[(1, 1)] = 0.066 + b[(2, 0)] = 0.066 + b[(2, 1)] = 0.066 * (1 - 0.25) + b[(3, 0)] = 0.147 + b[(4, 0)] = 0.147 + m.bb = Param(m.ij, initialize=b, default=0, mutable=True) + + eta00 = {} + eta00[(0, 0)] = 0.505 + eta00[(0, 1)] = 0.505 + eta00[(1, 0)] = 0.505 + eta00[(1, 1)] = 0.505 + eta00[(2, 0)] = 0.307 + eta00[(2, 1)] = 0.4803 + eta00[(3, 0)] = 0.235 + eta00[(4, 0)] = 0.235 m.eta00 = Param(m.ij, initialize=eta00, mutable=True) - - eta01={} - eta01[(0,0)] = 0.505 - eta01[(0,1)] = 0.6287 - eta01[(1,0)] = 0.505 - eta01[(1,1)] = 0.6287 - eta01[(2,0)] = 0.307 - eta01[(2,1)] = 0.4803 - eta01[(3,0)] = 0.235 - eta01[(4,0)] = 0.235 + + eta01 = {} + eta01[(0, 0)] = 0.505 + eta01[(0, 1)] = 0.6287 + eta01[(1, 0)] = 0.505 + eta01[(1, 1)] = 0.6287 + eta01[(2, 0)] = 0.307 + eta01[(2, 1)] = 0.4803 + eta01[(3, 0)] = 0.235 + eta01[(4, 0)] = 0.235 m.eta01 = Param(m.ij, initialize=eta01, mutable=True) - - m.kp = Param(initialize = 1000.0, mutable=True) - m.kt = Param(initialize = 1000.0, mutable=True) - m.rr = Param(initialize = 0.05, mutable=True) - - c={} - c[(0,0)] = 3307 - c[(0,1)] = 3307 - c[(1,0)] = 5467 - c[(1,1)] = 5467 - c[(2,0)] = 5467 - c[(2,1)] = 5467 - c[(3,0)] = 12586 - c[(4,0)] = 35394 + + m.kp = Param(initialize=1000.0, mutable=True) + m.kt = Param(initialize=1000.0, mutable=True) + m.rr = Param(initialize=0.05, mutable=True) + + c = {} + c[(0, 0)] = 3307 + c[(0, 1)] = 3307 + c[(1, 0)] = 5467 + c[(1, 1)] = 5467 + c[(2, 0)] = 5467 + c[(2, 1)] = 5467 + c[(3, 0)] = 12586 + c[(4, 0)] = 35394 m.cc = Param(m.ij, initialize=c, mutable=True) - - q={} - q[(0,0)] = 1 - q[(0,1)] = 1 - q[(1,0)] = 1 - q[(1,1)] = 1 - q[(2,0)] = 0.83 - q[(2,1)] = 0.83 - q[(3,0)] = 0.42 - q[(4,0)] = 0.17 + + q = {} + q[(0, 0)] = 1 + q[(0, 1)] = 1 + q[(1, 0)] = 1 + q[(1, 1)] = 1 + q[(2, 0)] = 0.83 + q[(2, 1)] = 0.83 + q[(3, 0)] = 0.42 + q[(4, 0)] = 0.17 m.qq = Param(m.ij, initialize=q, mutable=True) - - m.aa = Param(initialize = 0.0001, mutable=True) - - #Set Variables - m.yy = Var(m.t,m.ij) + + m.aa = Param(initialize=0.0001, mutable=True) + + # Set Variables + m.yy = Var(m.t, m.ij) m.L = Var(m.t) - - m.vp = Var(m.t, initialize=0.75, bounds=(0,0.75)) - m.vt = Var(m.t, initialize=0.75, bounds=(0,0.75)) - + + m.vp = Var(m.t, initialize=0.75, bounds=(0, 0.75)) + m.vt = Var(m.t, initialize=0.75, bounds=(0, 0.75)) + m.dyy = DerivativeVar(m.yy, wrt=m.t) m.dL = DerivativeVar(m.L, wrt=m.t) - + def CostFunc(m): return m.L[m.tf] + m.cf = Objective(rule=CostFunc) - - + def _initDistConst(m): - return (m.phi0*m.Y0)/sum(m.dd_inv[kk] for kk in m.ij) + return (m.phi0 * m.Y0) / sum(m.dd_inv[kk] for kk in m.ij) + m.idc = Expression(rule=_initDistConst) - - m.yy[0,(0,0)].fix(value((1-m.phi0)*m.Y0)) - m.yy[0,(0,1)].fix(0) - m.yy[0,(1,0)].fix(value(m.dd_inv[(1,0)]*m.idc)) - m.yy[0,(1,1)].fix(0) - m.yy[0,(2,0)].fix(value(m.dd_inv[(2,0)]*m.idc)) - m.yy[0,(2,1)].fix(0) - m.yy[0,(3,0)].fix(value(m.dd_inv[(3,0)]*m.idc)) - m.yy[0,(4,0)].fix(value(m.dd_inv[(4,0)]*m.idc)) + + m.yy[0, (0, 0)].fix(value((1 - m.phi0) * m.Y0)) + m.yy[0, (0, 1)].fix(0) + m.yy[0, (1, 0)].fix(value(m.dd_inv[(1, 0)] * m.idc)) + m.yy[0, (1, 1)].fix(0) + m.yy[0, (2, 0)].fix(value(m.dd_inv[(2, 0)] * m.idc)) + m.yy[0, (2, 1)].fix(0) + m.yy[0, (3, 0)].fix(value(m.dd_inv[(3, 0)] * m.idc)) + m.yy[0, (4, 0)].fix(value(m.dd_inv[(4, 0)] * m.idc)) m.L[0].fix(0) - - - #ODEs - def _yy00(m, t): - return sum(m.pp[kk]*m.yy[t,kk] for kk in m.ij)*m.dyy[t,(0,0)] == \ - sum(m.pp[kk]*m.yy[t,kk] for kk in m.ij)*(m.II[(0,0)]-\ - (m.vp[t]+m.dd[(0,0)])*m.yy[t,(0,0)]+m.omeg*m.yy[t,(0,1)])-\ - m.pp[(0,0)]*sum(m.bb[kk]*m.eta00[kk]*m.pp[kk]*m.yy[t,kk] - for kk in m.ij)*m.yy[t,(0,0)] + + # ODEs + def _yy00(m, t): + return ( + sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) * m.dyy[t, (0, 0)] + == sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * ( + m.II[(0, 0)] + - (m.vp[t] + m.dd[(0, 0)]) * m.yy[t, (0, 0)] + + m.omeg * m.yy[t, (0, 1)] + ) + - m.pp[(0, 0)] + * sum(m.bb[kk] * m.eta00[kk] * m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * m.yy[t, (0, 0)] + ) + m.yy00DiffCon = Constraint(m.t, rule=_yy00) - + def _yy01(m, t): - return sum(m.pp[kk]*m.yy[t,kk] for kk in m.ij)*m.dyy[t,(0,1)] == \ - sum(m.pp[kk]*m.yy[t,kk] - for kk in m.ij)*(m.vp[t]*m.yy[t,(0,0)]- - (m.dd[(0,0)]+m.omeg)*m.yy[t,(0,1)])-\ - m.pp[(0,1)]*(1-m.eps)*sum(m.bb[kk]*m.eta01[kk]* - m.pp[kk]*m.yy[t,kk] - for kk in m.ij)*m.yy[t,(0,1)] + return ( + sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) * m.dyy[t, (0, 1)] + == sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * (m.vp[t] * m.yy[t, (0, 0)] - (m.dd[(0, 0)] + m.omeg) * m.yy[t, (0, 1)]) + - m.pp[(0, 1)] + * (1 - m.eps) + * sum(m.bb[kk] * m.eta01[kk] * m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * m.yy[t, (0, 1)] + ) + m.yy01DiffCon = Constraint(m.t, rule=_yy01) - + def _yy10(m, t): - return sum(m.pp[kk]*m.yy[t,kk] for kk in m.ij)*m.dyy[t,(1,0)] == \ - sum(m.pp[kk]*m.yy[t,kk] for kk in m.ij)*\ - (m.II[(1,0)]-((m.sig*m.xi)+m.vp[t]+m.dd[(1,0)]+m.dd[(0,0)])* - m.yy[t,(1,0)]+m.omeg*m.yy[t,(1,1)] - )+m.pp[(0,0)]*sum(m.bb[kk]*m.eta00[kk]* - m.pp[kk]*m.yy[t,kk] - for kk in m.ij)*m.yy[t,(0,0)] + return ( + sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) * m.dyy[t, (1, 0)] + == sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * ( + m.II[(1, 0)] + - ((m.sig * m.xi) + m.vp[t] + m.dd[(1, 0)] + m.dd[(0, 0)]) + * m.yy[t, (1, 0)] + + m.omeg * m.yy[t, (1, 1)] + ) + + m.pp[(0, 0)] + * sum(m.bb[kk] * m.eta00[kk] * m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * m.yy[t, (0, 0)] + ) + m.yy10DiffCon = Constraint(m.t, rule=_yy10) - + def _yy11(m, t): - return sum(m.pp[kk]*m.yy[t,kk] for kk in m.ij)*m.dyy[t,(1,1)] == \ - sum(m.pp[kk]*m.yy[t,kk] for kk in m.ij)*(m.vp[t]*m.yy[t,(1,0)]-\ - (m.omeg+(m.sig*m.xi)+m.dd[(1,1)]+m.dd[(0,0)])*m.yy[t,(1,1)])+\ - m.pp[(0,1)]*(1-m.eps)*sum(m.bb[kk]*m.eta01[kk]* - m.pp[kk]*m.yy[t,kk] - for kk in m.ij)*m.yy[t,(0,1)] + return ( + sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) * m.dyy[t, (1, 1)] + == sum(m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * ( + m.vp[t] * m.yy[t, (1, 0)] + - (m.omeg + (m.sig * m.xi) + m.dd[(1, 1)] + m.dd[(0, 0)]) + * m.yy[t, (1, 1)] + ) + + m.pp[(0, 1)] + * (1 - m.eps) + * sum(m.bb[kk] * m.eta01[kk] * m.pp[kk] * m.yy[t, kk] for kk in m.ij) + * m.yy[t, (0, 1)] + ) + m.yy11DiffCon = Constraint(m.t, rule=_yy11) - + def _yy20(m, t): - return m.dyy[t,(2,0)] == \ - m.II[(2,0)]+m.sig*m.xi*(m.yy[t,(1,0)]+m.yy[t,(1,1)])-\ - (m.vt[t]+m.dd[(2,0)]+m.dd[(0,0)])*m.yy[t,(2,0)] + return ( + m.dyy[t, (2, 0)] + == m.II[(2, 0)] + + m.sig * m.xi * (m.yy[t, (1, 0)] + m.yy[t, (1, 1)]) + - (m.vt[t] + m.dd[(2, 0)] + m.dd[(0, 0)]) * m.yy[t, (2, 0)] + ) + m.yy20DiffCon = Constraint(m.t, rule=_yy20) - + def _yy21(m, t): - return m.dyy[t,(2,1)] == \ - m.vt[t]*m.yy[t,(2,0)]-(m.dd[(2,1)]+m.dd[(0,0)])*m.yy[t,(2,1)] + return ( + m.dyy[t, (2, 1)] + == m.vt[t] * m.yy[t, (2, 0)] + - (m.dd[(2, 1)] + m.dd[(0, 0)]) * m.yy[t, (2, 1)] + ) + m.yy21DiffCon = Constraint(m.t, rule=_yy21) - + def _yy30(m, t): - return m.dyy[t,(3,0)] == \ - m.II[(3,0)]+m.dd[(1,0)]*m.yy[t,(1,0)]+\ - m.dd[(1,1)]*m.yy[t,(1,1)]+m.dd[(2,0)]*m.yy[t,(2,0)]+\ - m.dd[(2,1)]*m.yy[t,(2,1)]-\ - (m.dd[(3,0)]+m.dd[(0,0)])*m.yy[t,(3,0)] + return ( + m.dyy[t, (3, 0)] + == m.II[(3, 0)] + + m.dd[(1, 0)] * m.yy[t, (1, 0)] + + m.dd[(1, 1)] * m.yy[t, (1, 1)] + + m.dd[(2, 0)] * m.yy[t, (2, 0)] + + m.dd[(2, 1)] * m.yy[t, (2, 1)] + - (m.dd[(3, 0)] + m.dd[(0, 0)]) * m.yy[t, (3, 0)] + ) + m.yy30DiffCon = Constraint(m.t, rule=_yy30) - + def _yy40(m, t): - return m.dyy[t, (4,0)] == \ - m.dd[(3,0)]*m.yy[t,(3,0)]-(m.dd[(4,0)]+\ - m.dd[(0,0)])*m.yy[t,(4,0)] + return ( + m.dyy[t, (4, 0)] + == m.dd[(3, 0)] * m.yy[t, (3, 0)] + - (m.dd[(4, 0)] + m.dd[(0, 0)]) * m.yy[t, (4, 0)] + ) + m.yy40DiffCon = Constraint(m.t, rule=_yy40) - + def _L(m, t): - return m.dL[t] == \ - exp(-m.rr*t)*(m.aa*(m.kp*m.vp[t]*(m.yy[t,(0,0)]+m.yy[t,(1,0)]) \ - +(m.kt*m.vt[t]*m.yy[t,(2,0)])+sum(m.cc[kk]*m.yy[t,kk] - for kk in m.ij)) \ - -(1-m.aa)*sum(m.qq[kk]*m.yy[t,kk] for kk in m.ij)) + return m.dL[t] == exp(-m.rr * t) * ( + m.aa + * ( + m.kp * m.vp[t] * (m.yy[t, (0, 0)] + m.yy[t, (1, 0)]) + + (m.kt * m.vt[t] * m.yy[t, (2, 0)]) + + sum(m.cc[kk] * m.yy[t, kk] for kk in m.ij) + ) + - (1 - m.aa) * sum(m.qq[kk] * m.yy[t, kk] for kk in m.ij) + ) + m.LDiffCon = Constraint(m.t, rule=_L) - - return m - - -def initialize_model(m,n_sim,n_nfe,n_ncp): - vp_profile = {0:0.75} - vt_profile = {0:0.75} - - + + return m + + +def initialize_model(m, n_sim, n_nfe, n_ncp): + vp_profile = {0: 0.75} + vt_profile = {0: 0.75} + m.u_input = Suffix(direction=Suffix.LOCAL) m.u_input[m.vp] = vp_profile m.u_input[m.vt] = vt_profile - + sim = Simulator(m, package='scipy') tsim, profiles = sim.simulate(numpoints=n_sim, varying_inputs=m.u_input) - - + discretizer = TransformationFactory('dae.collocation') - discretizer.apply_to(m,nfe=n_nfe,ncp=n_ncp,scheme='LAGRANGE-RADAU') - + discretizer.apply_to(m, nfe=n_nfe, ncp=n_ncp, scheme='LAGRANGE-RADAU') + sim.initialize_model() - -if __name__=='__main__': +if __name__ == '__main__': m = create_model() - initialize_model(m,10,5,1) - - m.epsDelta = Param(initialize = 0.75001) - - q_del={} - q_del[(0,0)] = 1.001 - q_del[(0,1)] = 1.002 - q_del[(1,0)] = 1.003 - q_del[(1,1)] = 1.004 - q_del[(2,0)] = 0.83001 - q_del[(2,1)] = 0.83002 - q_del[(3,0)] = 0.42001 - q_del[(4,0)] = 0.17001 + initialize_model(m, 10, 5, 1) + + m.epsDelta = Param(initialize=0.75001) + + q_del = {} + q_del[(0, 0)] = 1.001 + q_del[(0, 1)] = 1.002 + q_del[(1, 0)] = 1.003 + q_del[(1, 1)] = 1.004 + q_del[(2, 0)] = 0.83001 + q_del[(2, 1)] = 0.83002 + q_del[(3, 0)] = 0.42001 + q_del[(4, 0)] = 0.17001 m.qqDelta = Param(m.ij, initialize=q_del) - - m.aaDelta = Param(initialize = .0001001) - - m_sipopt = sensitivity_calculation('sipopt', m,[m.eps,m.qq,m.aa], - [m.epsDelta,m.qqDelta,m.aaDelta], - tee = True) + + m.aaDelta = Param(initialize=0.0001001) + + m_sipopt = sensitivity_calculation( + 'sipopt', m, [m.eps, m.qq, m.aa], [m.epsDelta, m.qqDelta, m.aaDelta], tee=True + ) diff --git a/pyomo/contrib/sensitivity_toolbox/examples/__init__.py b/pyomo/contrib/sensitivity_toolbox/examples/__init__.py index be0a0cadc33..5223f39bbc1 100644 --- a/pyomo/contrib/sensitivity_toolbox/examples/__init__.py +++ b/pyomo/contrib/sensitivity_toolbox/examples/__init__.py @@ -12,5 +12,3 @@ """ pyomo.contrib.sensitivity_toolbox """ - - diff --git a/pyomo/contrib/sensitivity_toolbox/examples/feedbackController.py b/pyomo/contrib/sensitivity_toolbox/examples/feedbackController.py index ed00bea105b..1112a0c82b3 100644 --- a/pyomo/contrib/sensitivity_toolbox/examples/feedbackController.py +++ b/pyomo/contrib/sensitivity_toolbox/examples/feedbackController.py @@ -9,34 +9,44 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ # -# Reference: "Optimal Control Theory: An Introduction", Donald E. Kirk, +# Reference: "Optimal Control Theory: An Introduction", Donald E. Kirk, # (1970/1998) # # Example 5.2-1 # # x'(t) = a*x(t)+u(t) -# +# # min J(u) = (1/2)*H*x^2(T)+\int_0^T ((1/4)*u^2(t)dt) -from pyomo.environ import (ConcreteModel, Param, Var, Objective, Constraint, - Suffix, value, TransformationFactory, SolverFactory) +from pyomo.environ import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + Suffix, + value, + TransformationFactory, + SolverFactory, +) from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.simulator import Simulator from pyomo.contrib.sensitivity_toolbox.sens import sensitivity_calculation from pyomo.common.dependencies.matplotlib import pyplot as plt + def create_model(): m = ConcreteModel() - - m.a = Param(initialize = -0.2, mutable=True) - m.H = Param(initialize = 0.5, mutable=True) + + m.a = Param(initialize=-0.2, mutable=True) + m.H = Param(initialize=0.5, mutable=True) m.T = 15 - m.t = ContinuousSet(bounds=(0,m.T)) + m.t = ContinuousSet(bounds=(0, m.T)) m.x = Var(m.t) m.F = Var(m.t) - m.u = Var(m.t,initialize=0, bounds=(-0.2,0)) + m.u = Var(m.t, initialize=0, bounds=(-0.2, 0)) m.dx = DerivativeVar(m.x, wrt=m.t) m.df0 = DerivativeVar(m.F, wrt=m.t) @@ -44,28 +54,31 @@ def create_model(): m.x[0].fix(5) m.F[0].fix(0) - def _x(m,t): - return m.dx[t]==m.a*m.x[t]+m.u[t] + def _x(m, t): + return m.dx[t] == m.a * m.x[t] + m.u[t] + m.x_dot = Constraint(m.t, rule=_x) - def _f0(m,t): - return m.df0[t]==0.25*m.u[t]**2 + def _f0(m, t): + return m.df0[t] == 0.25 * m.u[t] ** 2 + m.FDiffCon = Constraint(m.t, rule=_f0) def _Cost(m): - return 0.5*m.H*m.x[m.T]**2+m.F[m.T] + return 0.5 * m.H * m.x[m.T] ** 2 + m.F[m.T] + m.J = Objective(rule=_Cost) return m -def initialize_model(m,nfe): - u_profile = {0:-0.06} +def initialize_model(m, nfe): + u_profile = {0: -0.06} m.u_input = Suffix(direction=Suffix.LOCAL) - m.u_input[m.u]=u_profile + m.u_input[m.u] = u_profile - sim = Simulator(m,package='scipy') + sim = Simulator(m, package='scipy') tsim, profiles = sim.simulate(numpoints=100, varying_inputs=m.u_input) discretizer = TransformationFactory('dae.collocation') @@ -77,9 +90,9 @@ def initialize_model(m,nfe): def plot_optimal_solution(m): SolverFactory('ipopt').solve(m, tee=True) - x=[] - u=[] - F=[] + x = [] + u = [] + F = [] for ii in m.t: x.append(value(m.x[ii])) @@ -87,21 +100,21 @@ def plot_optimal_solution(m): F.append(value(m.F[ii])) plt.subplot(131) - plt.plot(m.t.value,x,'ro',label='x') + plt.plot(m.t.value, x, 'ro', label='x') plt.title('State Soln') plt.xlabel('time') plt.subplot(132) - plt.plot(m.t.value,u,'ro',label='u') + plt.plot(m.t.value, u, 'ro', label='u') plt.title('Control Soln') plt.xlabel('time') plt.subplot(133) - plt.plot(m.t.value,F,'ro',label='Cost Integrand') + plt.plot(m.t.value, F, 'ro', label='Cost Integrand') plt.title('Anti-derivative of \n Cost Integrand') plt.xlabel('time') - #plt.show() + # plt.show() return plt @@ -109,17 +122,21 @@ def plot_optimal_solution(m): if __name__ == '__main__': m = create_model() - initialize_model(m,100) + initialize_model(m, 100) -# plt = plot_optimal_solution(m) -# plt.show() + # plt = plot_optimal_solution(m) + # plt.show() m.perturbed_a = Param(initialize=-0.25) m.perturbed_H = Param(initialize=0.55) - m_sipopt = sensitivity_calculation('sipopt', m,[m.a,m.H], - [m.perturbed_a,m.perturbed_H], - cloneModel=True, - tee=True) + m_sipopt = sensitivity_calculation( + 'sipopt', + m, + [m.a, m.H], + [m.perturbed_a, m.perturbed_H], + cloneModel=True, + tee=True, + ) for var, val in m_sipopt.sens_sol_state_1.items(): # To load updated variable values back into the model: diff --git a/pyomo/contrib/sensitivity_toolbox/examples/parameter.py b/pyomo/contrib/sensitivity_toolbox/examples/parameter.py index e131f4f92b0..93c6124701b 100644 --- a/pyomo/contrib/sensitivity_toolbox/examples/parameter.py +++ b/pyomo/contrib/sensitivity_toolbox/examples/parameter.py @@ -14,70 +14,77 @@ # Original implementation by Hans Pirany is in pyomo/examples/pyomo/suffixes # from __future__ import print_function -from pyomo.environ import ConcreteModel, Param, Var, Objective, Constraint, NonNegativeReals, value +from pyomo.environ import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + NonNegativeReals, + value, +) from pyomo.contrib.sensitivity_toolbox.sens import sensitivity_calculation + + def create_model(): - ''' Create a concrete Pyomo model for this example - ''' + '''Create a concrete Pyomo model for this example''' m = ConcreteModel() - - m.x1 = Var(initialize = 0.15, within=NonNegativeReals) - m.x2 = Var(initialize = 0.15, within=NonNegativeReals) - m.x3 = Var(initialize = 0.0, within=NonNegativeReals) - - m.eta1 = Param(initialize=4.5,mutable=True) - m.eta2 = Param(initialize=1.0,mutable=True) - - m.const1 = Constraint(expr=6*m.x1+3*m.x2+2*m.x3-m.eta1 ==0) - m.const2 = Constraint(expr=m.eta2*m.x1+m.x2-m.x3-1 ==0) - m.cost = Objective(expr=m.x1**2+m.x2**2+m.x3**2) - - return m + + m.x1 = Var(initialize=0.15, within=NonNegativeReals) + m.x2 = Var(initialize=0.15, within=NonNegativeReals) + m.x3 = Var(initialize=0.0, within=NonNegativeReals) + + m.eta1 = Param(initialize=4.5, mutable=True) + m.eta2 = Param(initialize=1.0, mutable=True) + + m.const1 = Constraint(expr=6 * m.x1 + 3 * m.x2 + 2 * m.x3 - m.eta1 == 0) + m.const2 = Constraint(expr=m.eta2 * m.x1 + m.x2 - m.x3 - 1 == 0) + m.cost = Objective(expr=m.x1**2 + m.x2**2 + m.x3**2) + + return m + def run_example(print_flag=True): ''' Execute the example - + Arguments: print_flag: Toggle on/off printing - + Returns: sln_dict: Dictionary containing solution (used for automated testing) - - ''' - m = create_model() - m.perturbed_eta1 = Param(initialize = 4.0) - m.perturbed_eta2 = Param(initialize = 1.0) + ''' + m = create_model() + m.perturbed_eta1 = Param(initialize=4.0) + m.perturbed_eta2 = Param(initialize=1.0) - m_sipopt = sensitivity_calculation('sipopt',m,[m.eta1,m.eta2], - [m.perturbed_eta1,m.perturbed_eta2], - tee=True) - + m_sipopt = sensitivity_calculation( + 'sipopt', m, [m.eta1, m.eta2], [m.perturbed_eta1, m.perturbed_eta2], tee=True + ) - if print_flag: print("\nOriginal parameter values:") - print("\teta1 =",m.eta1()) - print("\teta2 =",m.eta2()) - + print("\teta1 =", m.eta1()) + print("\teta2 =", m.eta2()) + print("Initial point:") - print("\tObjective =",value(m.cost)) - print("\tx1 =",m.x1()) - print("\tx2 =",m.x2()) - print("\tx3 =",m.x3()) - + print("\tObjective =", value(m.cost)) + print("\tx1 =", m.x1()) + print("\tx2 =", m.x2()) + print("\tx3 =", m.x3()) + print("Solution with the original parameter values:") - print("\tObjective =",m_sipopt.cost()) - print("\tx1 =",m_sipopt.x1()) - print("\tx2 =",m_sipopt.x2()) - print("\tx3 =",m_sipopt.x3()) - + print("\tObjective =", m_sipopt.cost()) + print("\tx1 =", m_sipopt.x1()) + print("\tx2 =", m_sipopt.x2()) + print("\tx3 =", m_sipopt.x3()) + print("\nNew parameter values:") - print("\teta1 =",m_sipopt.perturbed_eta1()) - print("\teta2 =",m_sipopt.perturbed_eta2()) - + print("\teta1 =", m_sipopt.perturbed_eta1()) + print("\teta2 =", m_sipopt.perturbed_eta2()) + # This highlights one limitation of sipopt. It will only return the # perturbed solution. The user needs to calculate relevant values such as # the objective or expressions @@ -85,14 +92,14 @@ def run_example(print_flag=True): x2 = m_sipopt.sens_sol_state_1[m_sipopt.x2] x3 = m_sipopt.sens_sol_state_1[m_sipopt.x3] obj = x1**2 + x2**2 + x3**2 - + if print_flag: print("(Approximate) solution with the new parameter values:") - print("\tObjective =",obj) - print("\tx1 =",m_sipopt.sens_sol_state_1[m_sipopt.x1]) - print("\tx2 =",m_sipopt.sens_sol_state_1[m_sipopt.x2]) - print("\tx3 =",m_sipopt.sens_sol_state_1[m_sipopt.x3]) - + print("\tObjective =", obj) + print("\tx1 =", m_sipopt.sens_sol_state_1[m_sipopt.x1]) + print("\tx2 =", m_sipopt.sens_sol_state_1[m_sipopt.x2]) + print("\tx3 =", m_sipopt.sens_sol_state_1[m_sipopt.x3]) + # Save the results in a dictionary. # This is optional and makes automated testing convenient. # This code is not important for a Minimum Working Example (MWE) of sipopt @@ -112,9 +119,9 @@ def run_example(print_flag=True): d['x2_pert'] = x2 d['x3_pert'] = x3 d['cost_pert'] = obj - + return d -if __name__=='__main__': +if __name__ == '__main__': d = run_example() diff --git a/pyomo/contrib/sensitivity_toolbox/examples/parameter_kaug.py b/pyomo/contrib/sensitivity_toolbox/examples/parameter_kaug.py index ce9b9108fe9..f54e7903442 100644 --- a/pyomo/contrib/sensitivity_toolbox/examples/parameter_kaug.py +++ b/pyomo/contrib/sensitivity_toolbox/examples/parameter_kaug.py @@ -14,69 +14,69 @@ # Original implementation by Hans Pirnay is in pyomo/examples/pyomo/suffixes # -from pyomo.environ import * +from pyomo.environ import * from pyomo.contrib.sensitivity_toolbox.sens import sensitivity_calculation + def create_model(): - ''' Create a concrete Pyomo model for this example - ''' + '''Create a concrete Pyomo model for this example''' m = ConcreteModel() - m.x1 = Var(initialize = 0.15, within=NonNegativeReals) - m.x2 = Var(initialize = 0.15, within=NonNegativeReals) - m.x3 = Var(initialize = 0.0, within=NonNegativeReals) + m.x1 = Var(initialize=0.15, within=NonNegativeReals) + m.x2 = Var(initialize=0.15, within=NonNegativeReals) + m.x3 = Var(initialize=0.0, within=NonNegativeReals) m.eta1 = Param(initialize=4.5, mutable=True) m.eta2 = Param(initialize=1.0, mutable=True) - m.const1 = Constraint(expr=6*m.x1+3*m.x2+2*m.x3-m.eta1 ==0) - m.const2 = Constraint(expr=m.eta2*m.x1+m.x2-m.x3-1 ==0) - m.cost = Objective(expr=m.x1**2+m.x2**2+m.x3**2) + m.const1 = Constraint(expr=6 * m.x1 + 3 * m.x2 + 2 * m.x3 - m.eta1 == 0) + m.const2 = Constraint(expr=m.eta2 * m.x1 + m.x2 - m.x3 - 1 == 0) + m.cost = Objective(expr=m.x1**2 + m.x2**2 + m.x3**2) + + return m - return m def run_example(print_flag=True): ''' Execute the example - + Arguments: print_flag: Toggle on/off printing - + Returns sln_dict: Dictionary containing solution (used for automated testing) - - ''' - m = create_model() - m.perturbed_eta1 = Param(initialize = 4.0) - m.perturbed_eta2 = Param(initialize = 1.0) + ''' + m = create_model() + m.perturbed_eta1 = Param(initialize=4.0) + m.perturbed_eta2 = Param(initialize=1.0) - m_kaug_dsdp = sensitivity_calculation('k_aug',m,[m.eta1,m.eta2], - [m.perturbed_eta1,m.perturbed_eta2], - tee=True) + m_kaug_dsdp = sensitivity_calculation( + 'k_aug', m, [m.eta1, m.eta2], [m.perturbed_eta1, m.perturbed_eta2], tee=True + ) if print_flag: print("\nOriginal parameter values:") - print("\teta1 =",m.eta1()) - print("\teta2 =",m.eta2()) + print("\teta1 =", m.eta1()) + print("\teta2 =", m.eta2()) print("Initial point:") - print("\tObjective =",value(m.cost)) - print("\tx1 =",m.x1()) - print("\tx2 =",m.x2()) - print("\tx3 =",m.x3()) + print("\tObjective =", value(m.cost)) + print("\tx1 =", m.x1()) + print("\tx2 =", m.x2()) + print("\tx3 =", m.x3()) # Kaug saves only approximated solutions not original solutions print("\nNew parameter values:") - print("\teta1 =",m_kaug_dsdp.perturbed_eta1()) - print("\teta2 =",m_kaug_dsdp.perturbed_eta2()) + print("\teta1 =", m_kaug_dsdp.perturbed_eta1()) + print("\teta2 =", m_kaug_dsdp.perturbed_eta2()) print("(Approximate) solution with the new parameter values:") - print("\tObjective =",m_kaug_dsdp.cost()) - print("\tx1 =",m_kaug_dsdp.x1()) - print("\tx2 =",m_kaug_dsdp.x2()) - print("\tx3 =",m_kaug_dsdp.x3()) + print("\tObjective =", m_kaug_dsdp.cost()) + print("\tx1 =", m_kaug_dsdp.x1()) + print("\tx2 =", m_kaug_dsdp.x2()) + print("\tx3 =", m_kaug_dsdp.x3()) # Save the results in a dictionary. # This is optional and makes automated testing convenient. @@ -96,5 +96,6 @@ def run_example(print_flag=True): return d -if __name__=='__main__': + +if __name__ == '__main__': d = run_example() diff --git a/pyomo/contrib/sensitivity_toolbox/examples/rangeInequality.py b/pyomo/contrib/sensitivity_toolbox/examples/rangeInequality.py index f3905220942..39e4d26f695 100644 --- a/pyomo/contrib/sensitivity_toolbox/examples/rangeInequality.py +++ b/pyomo/contrib/sensitivity_toolbox/examples/rangeInequality.py @@ -16,28 +16,26 @@ def create_model(): - m = ConcreteModel() m.a = Param(initialize=0, mutable=True) m.b = Param(initialize=1, mutable=True) - - m.x = Var(initialize = 1.0) + m.x = Var(initialize=1.0) m.y = Var() - m.C_rangedIn = Constraint(expr=inequality(m.a,m.x,m.b)) - m.C_equal = Constraint(expr=m.y==m.b) - m.C_singleBnd = Constraint(expr=m.x<=m.b) - + m.C_rangedIn = Constraint(expr=inequality(m.a, m.x, m.b)) + m.C_equal = Constraint(expr=m.y == m.b) + m.C_singleBnd = Constraint(expr=m.x <= m.b) return m -if __name__=='__main__': + +if __name__ == '__main__': m = create_model() m.pert_a = Param(initialize=0.01) m.pert_b = Param(initialize=1.01) - - m_sipopt = sensitivity_calculation('sipopt', m,[m.a,m.b],[m.pert_a,m.pert_b], - tee=True) + m_sipopt = sensitivity_calculation( + 'sipopt', m, [m.a, m.b], [m.pert_a, m.pert_b], tee=True + ) diff --git a/pyomo/contrib/sensitivity_toolbox/examples/rooney_biegler.py b/pyomo/contrib/sensitivity_toolbox/examples/rooney_biegler.py index 8b0c94b0505..701d3f71bb4 100644 --- a/pyomo/contrib/sensitivity_toolbox/examples/rooney_biegler.py +++ b/pyomo/contrib/sensitivity_toolbox/examples/rooney_biegler.py @@ -18,14 +18,15 @@ import pandas as pd import pyomo.environ as pyo + def rooney_biegler_model(data): """This function generates an instance of the rooney & biegler Pyomo model using 'data' as the input argument - + Parameters ---------- data: pandas DataFrame, list of dictionaries, or list of json file names Data that is used to build an instance of the Pyomo model - + Returns ------- m: an instance of the Pyomo model @@ -33,24 +34,28 @@ def rooney_biegler_model(data): """ model = pyo.ConcreteModel() - model.asymptote = pyo.Var(initialize = 15) - model.rate_constant = pyo.Var(initialize = 0.5) - + model.asymptote = pyo.Var(initialize=15) + model.rate_constant = pyo.Var(initialize=0.5) + def response_rule(m, h): expr = m.asymptote * (1 - pyo.exp(-m.rate_constant * h)) return expr - model.response_function = pyo.Expression(data.hour, rule = response_rule) - + + model.response_function = pyo.Expression(data.hour, rule=response_rule) + def SSE_rule(m): - return sum((data.y[i] - m.response_function[data.hour[i]])**2 for i in data.index) - model.SSE = pyo.Objective(rule = SSE_rule, sense=pyo.minimize) - + return sum( + (data.y[i] - m.response_function[data.hour[i]]) ** 2 for i in data.index + ) + + model.SSE = pyo.Objective(rule=SSE_rule, sense=pyo.minimize) + return model def rooney_biegler_model_opt(): - """This function generates an instance of the rooney & biegler Pyomo model - + """This function generates an instance of the rooney & biegler Pyomo model + Returns ------- m: an instance of the Pyomo model @@ -59,9 +64,11 @@ def rooney_biegler_model_opt(): model = pyo.ConcreteModel() - model.asymptote = pyo.Var(initialize = 15) - model.rate_constant = pyo.Var(initialize = 0.5) - - model.obj = pyo.Objective(expr = model.asymptote*( 1 - pyo.exp(-model.rate_constant*10 ) ), sense=pyo.minimize) - return model + model.asymptote = pyo.Var(initialize=15) + model.rate_constant = pyo.Var(initialize=0.5) + model.obj = pyo.Objective( + expr=model.asymptote * (1 - pyo.exp(-model.rate_constant * 10)), + sense=pyo.minimize, + ) + return model diff --git a/pyomo/contrib/sensitivity_toolbox/k_aug.py b/pyomo/contrib/sensitivity_toolbox/k_aug.py index 7d1e7e43139..8d739506492 100644 --- a/pyomo/contrib/sensitivity_toolbox/k_aug.py +++ b/pyomo/contrib/sensitivity_toolbox/k_aug.py @@ -9,9 +9,7 @@ # This software is distributed under the 3-clause BSD License # ______________________________________________________________________________ import os -from pyomo.environ import ( - SolverFactory, - ) +from pyomo.environ import SolverFactory from pyomo.common.tempfiles import TempfileManager @@ -21,20 +19,19 @@ # or dot_sens. Other files generated will still be deleted, # but not saved on the K_augInterface object. known_files = [ - "dsdp_in_.in", - "conorder.txt", - "timings_k_aug_dsdp.txt", - "dot_out.out", - "delta_p.out", - "timings_dot_driver_dsdp.txt", - os.path.join(debug_dir, "kkt.in"), - os.path.join(gjh_dir, "gradient_f_print.txt"), - os.path.join(gjh_dir, "A_print.txt"), - ] + "dsdp_in_.in", + "conorder.txt", + "timings_k_aug_dsdp.txt", + "dot_out.out", + "delta_p.out", + "timings_dot_driver_dsdp.txt", + os.path.join(debug_dir, "kkt.in"), + os.path.join(gjh_dir, "gradient_f_print.txt"), + os.path.join(gjh_dir, "A_print.txt"), +] class InTempDir(object): - def __init__(self, suffix=None, prefix=None, dir=None): self._suffix = suffix self._prefix = prefix @@ -46,10 +43,8 @@ def __enter__(self): TempfileManager.push() # Create a new tempdir in this context self._tempdir = TempfileManager.create_tempdir( - suffix=self._suffix, - prefix=self._prefix, - dir=self._dir, - ) + suffix=self._suffix, prefix=self._prefix, dir=self._dir + ) os.chdir(self._tempdir) def __exit__(self, ex_type, ex_val, ex_bt): @@ -61,7 +56,7 @@ class K_augInterface(object): """ k_aug and dot_sens store information in the user's filesystem, some of which is mandatory for subsequent calls. - This class ensures that calls to these executables happen in + This class ensures that calls to these executables happen in temporary directories. The resulting files are immediately read and cached as attributes of this object, and the temporary directories deleted. If we have cached files that can be used diff --git a/pyomo/contrib/sensitivity_toolbox/sens.py b/pyomo/contrib/sensitivity_toolbox/sens.py index 46e6a7c35c4..e1c69d75974 100644 --- a/pyomo/contrib/sensitivity_toolbox/sens.py +++ b/pyomo/contrib/sensitivity_toolbox/sens.py @@ -9,12 +9,20 @@ # This software is distributed under the 3-clause BSD License # ______________________________________________________________________________ from pyomo.environ import ( - Param, Var, Block, ComponentMap, Objective, Constraint, - ConstraintList, Suffix, value, ComponentUID, + Param, + Var, + Block, + ComponentMap, + Objective, + Constraint, + ConstraintList, + Suffix, + value, + ComponentUID, ) from pyomo.common.sorting import sorted_robust -from pyomo.core.expr.current import ExpressionReplacementVisitor +from pyomo.core.expr import ExpressionReplacementVisitor from pyomo.common.modeling import unique_component_name from pyomo.common.deprecation import deprecated @@ -25,58 +33,92 @@ import os import io import shutil -from pyomo.common.dependencies import ( - numpy as np, numpy_available - ) +from pyomo.common.dependencies import numpy as np, numpy_available from pyomo.common.dependencies import scipy, scipy_available logger = logging.getLogger('pyomo.contrib.sensitivity_toolbox') -@deprecated("The sipopt function has been deprecated. Use the sensitivity_calculation() " - "function with method='sipopt' to access this functionality.", - logger='pyomo.contrib.sensitivity_toolbox', - version='6.1') -def sipopt(instance, paramSubList, perturbList, - cloneModel=True, tee=False, keepfiles=False, - streamSoln=False): - m = sensitivity_calculation('sipopt', instance, paramSubList, perturbList, - cloneModel, tee, keepfiles, solver_options=None) + +@deprecated( + "The sipopt function has been deprecated. Use the sensitivity_calculation() " + "function with method='sipopt' to access this functionality.", + logger='pyomo.contrib.sensitivity_toolbox', + version='6.1', +) +def sipopt( + instance, + paramSubList, + perturbList, + cloneModel=True, + tee=False, + keepfiles=False, + streamSoln=False, +): + m = sensitivity_calculation( + 'sipopt', + instance, + paramSubList, + perturbList, + cloneModel, + tee, + keepfiles, + solver_options=None, + ) return m -@deprecated("The kaug function has been deprecated. Use the sensitivity_calculation() " - "function with method='k_aug' to access this functionality.", - logger='pyomo.contrib.sensitivity_toolbox', - version='6.1') -def kaug(instance, paramSubList, perturbList, - cloneModel=True, tee=False, keepfiles=False, solver_options=None, - streamSoln=False): - m = sensitivity_calculation('k_aug', instance, paramSubList, perturbList, - cloneModel, tee, keepfiles, solver_options) + +@deprecated( + "The kaug function has been deprecated. Use the sensitivity_calculation() " + "function with method='k_aug' to access this functionality.", + logger='pyomo.contrib.sensitivity_toolbox', + version='6.1', +) +def kaug( + instance, + paramSubList, + perturbList, + cloneModel=True, + tee=False, + keepfiles=False, + solver_options=None, + streamSoln=False, +): + m = sensitivity_calculation( + 'k_aug', + instance, + paramSubList, + perturbList, + cloneModel, + tee, + keepfiles, + solver_options, + ) return m -_SIPOPT_SUFFIXES = { - 'sens_state_0': Suffix.EXPORT, - # ^ Not sure what this suffix does -RBP - 'sens_state_1': Suffix.EXPORT, - 'sens_state_value_1': Suffix.EXPORT, - 'sens_init_constr': Suffix.EXPORT, - 'sens_sol_state_1': Suffix.IMPORT, - 'sens_sol_state_1_z_L': Suffix.IMPORT, - 'sens_sol_state_1_z_U': Suffix.IMPORT, - } +_SIPOPT_SUFFIXES = { + 'sens_state_0': Suffix.EXPORT, + # ^ Not sure what this suffix does -RBP + 'sens_state_1': Suffix.EXPORT, + 'sens_state_value_1': Suffix.EXPORT, + 'sens_init_constr': Suffix.EXPORT, + 'sens_sol_state_1': Suffix.IMPORT, + 'sens_sol_state_1_z_L': Suffix.IMPORT, + 'sens_sol_state_1_z_U': Suffix.IMPORT, +} _K_AUG_SUFFIXES = { - 'ipopt_zL_out': Suffix.IMPORT, - 'ipopt_zU_out': Suffix.IMPORT, - 'ipopt_zL_in': Suffix.EXPORT, - 'ipopt_zU_in': Suffix.EXPORT, - 'dual': Suffix.IMPORT_EXPORT, - 'dcdp': Suffix.EXPORT, - 'DeltaP': Suffix.EXPORT, - } + 'ipopt_zL_out': Suffix.IMPORT, + 'ipopt_zU_out': Suffix.IMPORT, + 'ipopt_zL_in': Suffix.EXPORT, + 'ipopt_zU_in': Suffix.EXPORT, + 'dual': Suffix.IMPORT_EXPORT, + 'dcdp': Suffix.EXPORT, + 'DeltaP': Suffix.EXPORT, +} + def _add_sensitivity_suffixes(block): suffix_dict = {} @@ -89,9 +131,11 @@ def _add_sensitivity_suffixes(block): # assume it is the proper suffix and move on. block.add_component(name, Suffix(direction=direction)) + class _NotAnIndex(object): pass + def _generate_component_items(components): if type(components) not in {list, tuple}: components = (components,) @@ -102,13 +146,22 @@ def _generate_component_items(components): else: yield _NotAnIndex, comp -def sensitivity_calculation(method, instance, paramList, perturbList, - cloneModel=True, tee=False, keepfiles=False, solver_options=None): + +def sensitivity_calculation( + method, + instance, + paramList, + perturbList, + cloneModel=True, + tee=False, + keepfiles=False, + solver_options=None, +): """This function accepts a Pyomo ConcreteModel, a list of parameters, and their corresponding perturbation list. The model is then augmented with dummy constraints required to call sipopt or k_aug to get an approximation of the perturbed solution. - + Parameters ---------- method: string @@ -128,21 +181,21 @@ def sensitivity_calculation(method, instance, paramList, perturbList, preserve solver interface files solver_options: dict, optional Provides options to the solver (also the name of an attribute) - + Returns ------- The model that was manipulated by the sensitivity interface """ - + sens = SensitivityInterface(instance, clone_model=cloneModel) sens.setup_sensitivity(paramList) m = sens.model_instance - + if method not in {"k_aug", "sipopt"}: raise ValueError("Only methods 'k_aug' and 'sipopt' are supported'") - + if method == 'k_aug': k_aug = SolverFactory('k_aug', solver_io='nl') dot_sens = SolverFactory('dot_sens', solver_io='nl') @@ -152,7 +205,7 @@ def sensitivity_calculation(method, instance, paramList, perturbList, ipopt.solve(m, tee=tee) m.ipopt_zL_in.update(m.ipopt_zL_out) #: important! - m.ipopt_zU_in.update(m.ipopt_zU_out) #: important! + m.ipopt_zU_in.update(m.ipopt_zU_out) #: important! k_aug.options['dsdp_mode'] = "" #: sensitivity mode! k_aug_interface.k_aug(m, tee=tee) @@ -162,6 +215,8 @@ def sensitivity_calculation(method, instance, paramList, perturbList, if method == 'sipopt': ipopt_sens = SolverFactory('ipopt_sens', solver_io='nl') ipopt_sens.options['run_sens'] = 'yes' + if solver_options is not None: + ipopt_sens.options['linear_solver'] = solver_options # Send the model to ipopt_sens and collect the solution results = ipopt_sens.solve(m, keepfiles=keepfiles, tee=tee) @@ -172,6 +227,7 @@ def sensitivity_calculation(method, instance, paramList, perturbList, return m + def get_dsdp(model, theta_names, theta, tee=False): """This function calculates gradient vector of the variables with respect to the parameters (theta_names). @@ -182,7 +238,7 @@ def get_dsdp(model, theta_names, theta, tee=False): 0 <= x1, x2, x3 <= 10 p1 = 10 p2 = 5 - the function retuns dx/dp and dp/dp, and column orders. + the function returns dx/dp and dp/dp, and column orders. The following terms are used to define the output dimensions: Ncon = number of constraints @@ -250,21 +306,21 @@ def get_dsdp(model, theta_names, theta, tee=False): col = nl_data[col_file].strip("\n").split("\n") row = nl_data[row_file].strip("\n").split("\n") - dsdp = dsdp.reshape((len(theta_names), int(len(dsdp)/len(theta_names)))) - dsdp = dsdp[:len(theta_names), :len(col)] + dsdp = dsdp.reshape((len(theta_names), int(len(dsdp) / len(theta_names)))) + dsdp = dsdp[: len(theta_names), : len(col)] col = [i for i in col if sens.get_default_block_name() not in i] - dsdp_out = np.zeros((len(theta_names),len(col))) + dsdp_out = np.zeros((len(theta_names), len(col))) for i in range(len(theta_names)): for j in range(len(col)): if sens.get_default_block_name() not in col[j]: - dsdp_out[i,j] = -dsdp[i, j] # e.g) k_aug dsdp returns -dx1/dx1 = -1.0 + dsdp_out[i, j] = -dsdp[i, j] # e.g) k_aug dsdp returns -dx1/dx1 = -1.0 return scipy.sparse.csr_matrix(dsdp_out), col def get_dfds_dcds(model, theta_names, tee=False, solver_options=None): - """This function calculates gradient vector of the objective function + """This function calculates gradient vector of the objective function and constraints with respect to the variables and parameters. e.g) min f: p1*x1+ p2*(x2^2) + p1*p2 @@ -302,9 +358,9 @@ def get_dfds_dcds(model, theta_names, tee=False, solver_options=None): gradient_c: scipy.sparse.csr.csr_matrix Ncon by Nvar size sparse matrix. A Jacobian matrix of the constraints with respect to the (decision variables, parameters) - at the optimal solution. Each row contains [column number, - row number, and value], column order follows variable order in col - and index starts from 1. Note that it follows k_aug. + at the optimal solution. Each row contains [row number, + column number, and value], column order follows variable order in col + and index starts from 0. Note that it follows k_aug. If no constraint exists, return [] col: list Size Nvar list of variable names @@ -319,13 +375,13 @@ def get_dfds_dcds(model, theta_names, tee=False, solver_options=None): RuntimeError When ipopt or k_aug or dotsens is not available Exception - When ipopt fails + When ipopt fails """ # Create the solver plugin using the ASL interface - ipopt = SolverFactory('ipopt',solver_io='nl') + ipopt = SolverFactory('ipopt', solver_io='nl') if solver_options is not None: ipopt.options = solver_options - k_aug = SolverFactory('k_aug',solver_io='nl') + k_aug = SolverFactory('k_aug', solver_io='nl') if not ipopt.available(False): raise RuntimeError('ipopt is not available') if not k_aug.available(False): @@ -339,10 +395,10 @@ def get_dfds_dcds(model, theta_names, tee=False, solver_options=None): model.rh_name = Suffix(direction=Suffix.IMPORT) #: SUFFIX FOR K_AUG AS WELL k_aug.options["print_kkt"] = "" - results = ipopt.solve(model,tee=tee) + results = ipopt.solve(model, tee=tee) - # Raise exception if ipopt fails - if (results.solver.status == SolverStatus.warning): + # Raise exception if ipopt fails + if results.solver.status == SolverStatus.warning: raise Exception(results.solver.Message) for o in model.component_objects(Objective, active=True): @@ -384,17 +440,18 @@ def get_dfds_dcds(model, theta_names, tee=False, solver_options=None): # Reshape to a numpy array that matches this format. gradient_c = gradient_c.reshape((-1, 3)) - num_constraints = len(row)-1 # Objective is included as a row - if num_constraints > 0 : - row_idx = gradient_c[:,1]-1 - col_idx = gradient_c[:,0]-1 - data = gradient_c[:,2] - gradient_c = scipy.sparse.csr_matrix((data, (row_idx, col_idx)), - shape=(num_constraints, len(col))) + num_constraints = len(row) - 1 # Objective is included as a row + if num_constraints > 0: + row_idx = gradient_c[:, 1] - 1 + col_idx = gradient_c[:, 0] - 1 + data = gradient_c[:, 2] + gradient_c = scipy.sparse.csr_matrix( + (data, (row_idx, col_idx)), shape=(num_constraints, len(col)) + ) else: gradient_c = np.array([]) - return gradient_f, gradient_c, col,row, line_dic + return gradient_f, gradient_c, col, row, line_dic def line_num(file_name, target): @@ -425,13 +482,12 @@ def line_num(file_name, target): if line.strip() == target: return int(count) count += 1 - raise Exception(file_name + " does not include "+target) + raise Exception(file_name + " does not include " + target) class SensitivityInterface(object): - def __init__(self, instance, clone_model=True): - """ Constructor clones model if necessary and attaches + """Constructor clones model if necessary and attaches to this object. """ self._original_model = instance @@ -449,12 +505,12 @@ def get_default_block_name(self): @staticmethod def get_default_var_name(name): - #return '_'.join(('sens_var', name)) + # return '_'.join(('sens_var', name)) return name @staticmethod def get_default_param_name(name): - #return '_'.join(('sens_param', name)) + # return '_'.join(('sens_param', name)) return name def _process_param_list(self, paramList): @@ -466,7 +522,7 @@ def _process_param_list(self, paramList): paramList = list( ComponentUID(param, context=orig).find_component_on(instance) for param in paramList - ) + ) return paramList def _add_data_block(self, existing_block=None): @@ -478,18 +534,21 @@ def _add_data_block(self, existing_block=None): # the constructor once, then perform multiple sensitivity # calculations with the same model instance. if existing_block is not None: - if (hasattr(existing_block, '_has_replaced_expressions') and - not existing_block._has_replaced_expressions): + if ( + hasattr(existing_block, '_has_replaced_expressions') + and not existing_block._has_replaced_expressions + ): for var, _, _, _ in existing_block._sens_data_list: # Re-fix variables that the previous block was # treating as parameters. var.fix() self.model_instance.del_component(existing_block) else: - msg = ("Re-using sensitivity interface is not supported " - "when calculating sensitivity for mutable parameters. " - "Used fixed vars instead if you want to do this." - ) + msg = ( + "Re-using sensitivity interface is not supported " + "when calculating sensitivity for mutable parameters. " + "Used fixed vars instead if you want to do this." + ) raise RuntimeError(msg) # Add a block to keep track of model components necessary for this @@ -531,9 +590,9 @@ def _add_sensitivity_data(self, param_list): parent = comp.parent_component() if not parent.mutable: raise ValueError( - "Parameters within paramList must be mutable. " - "Got %s, which is not mutable." % comp.name - ) + "Parameters within paramList must be mutable. " + "Got %s, which is not mutable." % comp.name + ) # Add a Var: if comp.is_indexed(): d = {k: value(comp[k]) for k in comp.index_set()} @@ -547,9 +606,9 @@ def _add_sensitivity_data(self, param_list): if comp.is_indexed(): sens_data_list.extend( - (var[idx], param, i, idx) - for idx, param in _generate_component_items(comp) - ) + (var[idx], param, i, idx) + for idx, param in _generate_component_items(comp) + ) else: sens_data_list.append((var, comp, i, _NotAnIndex)) @@ -558,10 +617,9 @@ def _add_sensitivity_data(self, param_list): for _, data in _generate_component_items(comp): if not data.fixed: raise ValueError( - "Specified \"parameter\" variables must be " - "fixed. Got %s, which is not fixed." - % comp.name - ) + "Specified \"parameter\" variables must be " + "fixed. Got %s, which is not fixed." % comp.name + ) # Add a Param: if comp.is_indexed(): d = {k: value(comp[k]) for k in comp.index_set()} @@ -575,9 +633,9 @@ def _add_sensitivity_data(self, param_list): if comp.is_indexed(): sens_data_list.extend( - (var, param[idx], i, idx) - for idx, var in _generate_component_items(comp) - ) + (var, param[idx], i, idx) + for idx, var in _generate_component_items(comp) + ) else: sens_data_list.append((comp, param, i, _NotAnIndex)) @@ -587,9 +645,8 @@ def _replace_parameters_in_constraints(self, variableSubMap): # Visitor that we will use to replace user-provided parameters # in the objective and the constraints. param_replacer = ExpressionReplacementVisitor( - substitute=variableSubMap, - remove_named_expressions=True, - ) + substitute=variableSubMap, remove_named_expressions=True + ) # TODO: Flag to ExpressionReplacementVisitor to only replace # named expressions if a node has been replaced within that # expression. @@ -597,9 +654,9 @@ def _replace_parameters_in_constraints(self, variableSubMap): new_old_comp_map = ComponentMap() # clone Objective, add to Block, and update any Expressions - for obj in list(instance.component_data_objects(Objective, - active=True, - descend_into=True)): + for obj in list( + instance.component_data_objects(Objective, active=True, descend_into=True) + ): tempName = unique_component_name(block, obj.local_name) new_expr = param_replacer.walk_expression(obj.expr) block.add_component(tempName, Objective(expr=new_expr)) @@ -610,12 +667,13 @@ def _replace_parameters_in_constraints(self, variableSubMap): # # Unfortunate that this deactivates and replaces constraints # even if they don't contain the parameters. - # - old_con_list = list(instance.component_data_objects(Constraint, - active=True, descend_into=True)) + # + old_con_list = list( + instance.component_data_objects(Constraint, active=True, descend_into=True) + ) last_idx = 0 for con in old_con_list: - if (con.equality or con.lower is None or con.upper is None): + if con.equality or con.lower is None or con.upper is None: new_expr = param_replacer.walk_expression(con.expr) block.constList.add(expr=new_expr) last_idx += 1 @@ -641,8 +699,6 @@ def _replace_parameters_in_constraints(self, variableSubMap): return new_old_comp_map def setup_sensitivity(self, paramList): - """ - """ instance = self.model_instance paramList = self._process_param_list(paramList) @@ -659,23 +715,26 @@ def setup_sensitivity(self, paramList): var.unfix() # Map used to replace user-provided parameters. - variableSubMap = dict((id(param), var) - for var, param, list_idx, _ in sens_data_list - if paramList[list_idx].ctype is Param) + variableSubMap = dict( + (id(param), var) + for var, param, list_idx, _ in sens_data_list + if paramList[list_idx].ctype is Param + ) if variableSubMap: # We now replace the provided parameters in the user's # expressions. Only do this if we have to, i.e. the # user provided some parameters rather than all vars. - block._replaced_map = \ - self._replace_parameters_in_constraints(variableSubMap) + block._replaced_map = self._replace_parameters_in_constraints( + variableSubMap + ) # Assume that we just replaced some params block._has_replaced_expressions = True block.paramConst = ConstraintList() for var, param, _, _ in sens_data_list: - #block.paramConst.add(param - var == 0) + # block.paramConst.add(param - var == 0) block.paramConst.add(var - param == 0) # Declare Suffixes @@ -693,10 +752,7 @@ def setup_sensitivity(self, paramList): # k_aug instance.dcdp[con] = idx - - def perturb_parameters(self, perturbList): - """ - """ + def perturb_parameters(self, perturbList): # Note that entries of perturbList need not be components # of the cloned model. All we need are the values. instance = self.model_instance @@ -705,11 +761,11 @@ def perturb_parameters(self, perturbList): if len(self.block._paramList) != len(perturbList): raise ValueError( - "Length of paramList argument does not equal " - "length of perturbList") + "Length of paramList argument does not equal length of perturbList" + ) for i, (var, param, list_idx, comp_idx) in enumerate(sens_data_list): - con = paramConst[i+1] + con = paramConst[i + 1] if comp_idx is _NotAnIndex: ptb = value(perturbList[list_idx]) else: @@ -724,7 +780,7 @@ def perturb_parameters(self, perturbList): instance.sens_state_value_1[var] = ptb # k_aug - #instance.DeltaP[con] = value(ptb - var) + # instance.DeltaP[con] = value(ptb - var) instance.DeltaP[con] = value(var - ptb) # FIXME: ^ This is incorrect. DeltaP should be (ptb - current). # But at least one test doesn't pass unless I use (current - ptb). diff --git a/pyomo/contrib/sensitivity_toolbox/tests/__init__.py b/pyomo/contrib/sensitivity_toolbox/tests/__init__.py index 2d36ab2238b..557846ee521 100644 --- a/pyomo/contrib/sensitivity_toolbox/tests/__init__.py +++ b/pyomo/contrib/sensitivity_toolbox/tests/__init__.py @@ -12,5 +12,3 @@ """ pyomo.contrib.sensitivity_toolbox tests """ - - diff --git a/pyomo/contrib/sensitivity_toolbox/tests/test_k_aug_interface.py b/pyomo/contrib/sensitivity_toolbox/tests/test_k_aug_interface.py index fb210048aac..8c14cfc91d0 100644 --- a/pyomo/contrib/sensitivity_toolbox/tests/test_k_aug_interface.py +++ b/pyomo/contrib/sensitivity_toolbox/tests/test_k_aug_interface.py @@ -18,8 +18,10 @@ import pyomo.environ as pyo from pyomo.common.dependencies import ( - numpy as np, numpy_available, - pandas as pd, pandas_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, ) from pyomo.contrib.sensitivity_toolbox.sens import SensitivityInterface from pyomo.contrib.sensitivity_toolbox.k_aug import K_augInterface @@ -36,7 +38,7 @@ def simple_model_1(): m.p = pyo.Param(mutable=True, initialize=1.0) - m.eq_con = pyo.Constraint(expr=m.v1*m.v2 - m.p == 0) + m.eq_con = pyo.Constraint(expr=m.v1 * m.v2 - m.p == 0) m.obj = pyo.Objective(expr=m.v1**2 + m.v2**2, sense=pyo.minimize) @@ -44,7 +46,6 @@ def simple_model_1(): class TestK_augInterface(unittest.TestCase): - @unittest.skipIf(not opt_k_aug.available(), "k_aug is not available") def test_clear_dir_k_aug(self): m = simple_model_1() @@ -59,7 +60,7 @@ def test_clear_dir_k_aug(self): sens_param = [m.p] sens.setup_sensitivity(sens_param) - + k_aug.k_aug(m, tee=True) # We are back in our working directory @@ -93,7 +94,7 @@ def test_clear_dir_dot_sens(self): sens_param = [m.p] sens.setup_sensitivity(sens_param) - + # Call k_aug k_aug.k_aug(m, tee=True) self.assertIsInstance(k_aug.data["dsdp_in_.in"], str) diff --git a/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py b/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py index f8a097ac5d3..f4b3fb5548c 100644 --- a/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py +++ b/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py @@ -17,18 +17,14 @@ import logging import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Param, Var, Block, Suffix, value +from pyomo.environ import ConcreteModel, Param, Var, Block, Suffix, value from pyomo.opt import SolverFactory from pyomo.dae import ContinuousSet from pyomo.common.dependencies import scipy_available from pyomo.common.log import LoggingIntercept from pyomo.common.collections import ComponentMap -from pyomo.core.expr.current import identify_variables -from pyomo.contrib.sensitivity_toolbox.sens import ( - sipopt, - kaug, - sensitivity_calculation, - ) +from pyomo.core.expr import identify_variables +from pyomo.contrib.sensitivity_toolbox.sens import sipopt, kaug, sensitivity_calculation import pyomo.contrib.sensitivity_toolbox.examples.parameter as param_ex import pyomo.contrib.sensitivity_toolbox.examples.parameter_kaug as param_kaug_ex import pyomo.contrib.sensitivity_toolbox.examples.feedbackController as fc @@ -36,44 +32,56 @@ import pyomo.contrib.sensitivity_toolbox.examples.HIV_Transmission as hiv opt = SolverFactory('ipopt_sens', solver_io='nl') -opt_kaug = SolverFactory('k_aug',solver_io='nl') -opt_dotsens = SolverFactory('dot_sens',solver_io='nl') +opt_kaug = SolverFactory('k_aug', solver_io='nl') +opt_dotsens = SolverFactory('dot_sens', solver_io='nl') class FunctionDeprecationTest(unittest.TestCase): - @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_sipopt_deprecated(self): m = param_ex.create_model() - m.perturbed_eta1 = Param(initialize = 4.0) - m.perturbed_eta2 = Param(initialize = 1.0) + m.perturbed_eta1 = Param(initialize=4.0) + m.perturbed_eta2 = Param(initialize=1.0) output = StringIO() - with LoggingIntercept(output, 'pyomo.contrib.sensitivity_toolbox', logging.WARNING): - sipopt(m,[m.eta1,m.eta1], - [m.perturbed_eta1,m.perturbed_eta2], - cloneModel=False) - self.assertIn("DEPRECATED: The sipopt function has been deprecated. Use the " - "sensitivity_calculation() function with method='sipopt' to access", - output.getvalue().replace('\n', ' ')) - + with LoggingIntercept( + output, 'pyomo.contrib.sensitivity_toolbox', logging.WARNING + ): + sipopt( + m, + [m.eta1, m.eta1], + [m.perturbed_eta1, m.perturbed_eta2], + cloneModel=False, + ) + self.assertIn( + "DEPRECATED: The sipopt function has been deprecated. Use the " + "sensitivity_calculation() function with method='sipopt' to access", + output.getvalue().replace('\n', ' '), + ) @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_sipopt_equivalent(self): m1 = param_ex.create_model() - m1.perturbed_eta1 = Param(initialize = 4.0) - m1.perturbed_eta2 = Param(initialize = 1.0) + m1.perturbed_eta1 = Param(initialize=4.0) + m1.perturbed_eta2 = Param(initialize=1.0) m2 = param_ex.create_model() - m2.perturbed_eta1 = Param(initialize = 4.0) - m2.perturbed_eta2 = Param(initialize = 1.0) - - m11 = sipopt(m1,[m1.eta1,m1.eta2], - [m1.perturbed_eta1,m1.perturbed_eta2], - cloneModel=True) - m22 = sensitivity_calculation('sipopt',m2,[m2.eta1,m2.eta2], - [m2.perturbed_eta1,m2.perturbed_eta2], - cloneModel=True) + m2.perturbed_eta1 = Param(initialize=4.0) + m2.perturbed_eta2 = Param(initialize=1.0) + + m11 = sipopt( + m1, + [m1.eta1, m1.eta2], + [m1.perturbed_eta1, m1.perturbed_eta2], + cloneModel=True, + ) + m22 = sensitivity_calculation( + 'sipopt', + m2, + [m2.eta1, m2.eta2], + [m2.perturbed_eta1, m2.perturbed_eta2], + cloneModel=True, + ) out1 = StringIO() out2 = StringIO() m11._SENSITIVITY_TOOLBOX_DATA.constList.pprint(ostream=out1) @@ -81,39 +89,52 @@ def test_sipopt_equivalent(self): self.assertMultiLineEqual(out1.getvalue(), out2.getvalue()) @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") - @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") + @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") def test_kaug_deprecated(self): m = param_ex.create_model() - m.perturbed_eta1 = Param(initialize = 4.0) - m.perturbed_eta2 = Param(initialize = 1.0) + m.perturbed_eta1 = Param(initialize=4.0) + m.perturbed_eta2 = Param(initialize=1.0) output = StringIO() - with LoggingIntercept(output, 'pyomo.contrib.sensitivity_toolbox', logging.WARNING): - kaug(m,[m.eta1,m.eta1], - [m.perturbed_eta1,m.perturbed_eta2], - cloneModel=False) - self.assertIn("DEPRECATED: The kaug function has been deprecated. Use the " - "sensitivity_calculation() function with method='k_aug'", - output.getvalue().replace('\n', ' ')) - + with LoggingIntercept( + output, 'pyomo.contrib.sensitivity_toolbox', logging.WARNING + ): + kaug( + m, + [m.eta1, m.eta1], + [m.perturbed_eta1, m.perturbed_eta2], + cloneModel=False, + ) + self.assertIn( + "DEPRECATED: The kaug function has been deprecated. Use the " + "sensitivity_calculation() function with method='k_aug'", + output.getvalue().replace('\n', ' '), + ) @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") - @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") + @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") def test_kaug_equivalent(self): m1 = param_ex.create_model() - m1.perturbed_eta1 = Param(initialize = 4.0) - m1.perturbed_eta2 = Param(initialize = 1.0) + m1.perturbed_eta1 = Param(initialize=4.0) + m1.perturbed_eta2 = Param(initialize=1.0) m2 = param_ex.create_model() - m2.perturbed_eta1 = Param(initialize = 4.0) - m2.perturbed_eta2 = Param(initialize = 1.0) - - m11 = kaug(m1,[m1.eta1,m1.eta2], - [m1.perturbed_eta1,m1.perturbed_eta2], - cloneModel=True) - m22 = sensitivity_calculation('k_aug',m2,[m2.eta1,m2.eta2], - [m2.perturbed_eta1,m2.perturbed_eta2], - cloneModel=True) + m2.perturbed_eta1 = Param(initialize=4.0) + m2.perturbed_eta2 = Param(initialize=1.0) + + m11 = kaug( + m1, + [m1.eta1, m1.eta2], + [m1.perturbed_eta1, m1.perturbed_eta2], + cloneModel=True, + ) + m22 = sensitivity_calculation( + 'k_aug', + m2, + [m2.eta1, m2.eta2], + [m2.perturbed_eta1, m2.perturbed_eta2], + cloneModel=True, + ) out1 = StringIO() out2 = StringIO() m11._SENSITIVITY_TOOLBOX_DATA.constList.pprint(ostream=out1) @@ -122,11 +143,10 @@ def test_kaug_equivalent(self): class TestSensitivityToolbox(unittest.TestCase): - @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_bad_arg(self): m = ConcreteModel() - m.t = ContinuousSet(bounds=(0,1)) + m.t = ContinuousSet(bounds=(0, 1)) m.a = Param(initialize=1, mutable=True) m.b = Param(initialize=2, mutable=True) @@ -141,18 +161,17 @@ def test_bad_arg(self): # verify ValueError thrown when param and perturb list are different # lengths - msg = ("Length of paramList argument does" - " not equal length of perturbList") + msg = "Length of paramList argument does not equal length of perturbList" with self.assertRaisesRegex(ValueError, msg): Result = sensitivity_calculation('sipopt', m, list_one, list_two) # verify ValueError thrown when param list has an unmutable param - msg = ("Parameters within paramList must be mutable") + msg = "Parameters within paramList must be mutable" with self.assertRaisesRegex(ValueError, msg): Result = sensitivity_calculation('sipopt', m, list_four, list_one) # verify ValueError thrown when param list has an unfixed var. - msg = ("Specified \"parameter\" variables must be fixed") + msg = "Specified \"parameter\" variables must be fixed" with self.assertRaisesRegex(ValueError, msg) as context: Result = sensitivity_calculation('sipopt', m, list_three, list_one) @@ -160,179 +179,244 @@ def test_bad_arg(self): @unittest.skipIf(not scipy_available, "scipy is required for this test") @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_clonedModel_soln(self): - m_orig = fc.create_model() - fc.initialize_model(m_orig,100) + fc.initialize_model(m_orig, 100) m_orig.perturbed_a = Param(initialize=-0.25) m_orig.perturbed_H = Param(initialize=0.55) - m_sipopt = sensitivity_calculation('sipopt',m_orig,[m_orig.a,m_orig.H], - [m_orig.perturbed_a,m_orig.perturbed_H], - cloneModel=True) - + m_sipopt = sensitivity_calculation( + 'sipopt', + m_orig, + [m_orig.a, m_orig.H], + [m_orig.perturbed_a, m_orig.perturbed_H], + cloneModel=True, + ) + # verify cloned model has _SENSITIVITY_TOOLBOX_DATA block # and original model is untouched self.assertFalse(m_sipopt == m_orig) - self.assertTrue(hasattr(m_sipopt,'_SENSITIVITY_TOOLBOX_DATA') and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.ctype is Block) + self.assertTrue( + hasattr(m_sipopt, '_SENSITIVITY_TOOLBOX_DATA') + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.ctype is Block + ) - self.assertFalse(hasattr(m_orig,'_SENSITIVITY_TOOLBOX_DATA')) - self.assertFalse(hasattr(m_orig,'b')) + self.assertFalse(hasattr(m_orig, '_SENSITIVITY_TOOLBOX_DATA')) + self.assertFalse(hasattr(m_orig, 'b')) # verify variable declaration - self.assertTrue(hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA,'a') and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.a.ctype is Var) - self.assertTrue(hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA,'H') and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.H.ctype is Var) - + self.assertTrue( + hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA, 'a') + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.a.ctype is Var + ) + self.assertTrue( + hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA, 'H') + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.H.ctype is Var + ) + # verify suffixes - self.assertTrue(hasattr(m_sipopt,'sens_state_0') and - m_sipopt.sens_state_0.ctype is Suffix and - m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a]==1) - - self.assertTrue(hasattr(m_sipopt,'sens_state_1') and - m_sipopt.sens_state_1.ctype is Suffix and - m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a]==1) - - self.assertTrue(hasattr(m_sipopt,'sens_state_value_1') and - m_sipopt.sens_state_value_1.ctype is Suffix and - m_sipopt.sens_state_value_1[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.H]==0.55 and - m_sipopt.sens_state_value_1[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.a]==-0.25) - - self.assertTrue(hasattr(m_sipopt,'sens_init_constr') and - m_sipopt.sens_init_constr.ctype is Suffix and - m_sipopt.sens_init_constr[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1]]==1 and - m_sipopt.sens_init_constr[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[2]]==2) - - self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1') and - m_sipopt.sens_sol_state_1.ctype is Suffix) + self.assertTrue( + hasattr(m_sipopt, 'sens_state_0') + and m_sipopt.sens_state_0.ctype is Suffix + and m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_state_1') + and m_sipopt.sens_state_1.ctype is Suffix + and m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_state_value_1') + and m_sipopt.sens_state_value_1.ctype is Suffix + and m_sipopt.sens_state_value_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H] + == 0.55 + and m_sipopt.sens_state_value_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a] + == -0.25 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_init_constr') + and m_sipopt.sens_init_constr.ctype is Suffix + and m_sipopt.sens_init_constr[ + m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1] + ] + == 1 + and m_sipopt.sens_init_constr[ + m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[2] + ] + == 2 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_sol_state_1') + and m_sipopt.sens_sol_state_1.ctype is Suffix + ) self.assertAlmostEqual( - m_sipopt.sens_sol_state_1[ - m_sipopt.F[15]],-0.00102016765,8) + m_sipopt.sens_sol_state_1[m_sipopt.F[15]], -0.00102016765, 8 + ) # These tests require way too much precision for something that # just needs to enforce that bounds are not active... - self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_L') and - m_sipopt.sens_sol_state_1_z_L.ctype is Suffix) + self.assertTrue( + hasattr(m_sipopt, 'sens_sol_state_1_z_L') + and m_sipopt.sens_sol_state_1_z_L.ctype is Suffix + ) self.assertAlmostEqual( - m_sipopt.sens_sol_state_1_z_L[ - m_sipopt.u[15]],-2.181712e-09,13) + m_sipopt.sens_sol_state_1_z_L[m_sipopt.u[15]], -2.181712e-09, 13 + ) - self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_U') and - m_sipopt.sens_sol_state_1_z_U.ctype is Suffix) + self.assertTrue( + hasattr(m_sipopt, 'sens_sol_state_1_z_U') + and m_sipopt.sens_sol_state_1_z_U.ctype is Suffix + ) self.assertAlmostEqual( - m_sipopt.sens_sol_state_1_z_U[ - m_sipopt.u[15]],6.580899e-09,13) + m_sipopt.sens_sol_state_1_z_U[m_sipopt.u[15]], 6.580899e-09, 13 + ) # verify deactivated constraints for cloned model - self.assertFalse(m_sipopt.FDiffCon[0].active and - m_sipopt.FDiffCon[7.5].active and - m_sipopt.FDiffCon[15].active ) + self.assertFalse( + m_sipopt.FDiffCon[0].active + and m_sipopt.FDiffCon[7.5].active + and m_sipopt.FDiffCon[15].active + ) - self.assertFalse(m_sipopt.x_dot[0].active and - m_sipopt.x_dot[7.5].active and - m_sipopt.x_dot[15].active ) + self.assertFalse( + m_sipopt.x_dot[0].active + and m_sipopt.x_dot[7.5].active + and m_sipopt.x_dot[15].active + ) # verify constraints on original model are still active - self.assertTrue(m_orig.FDiffCon[0].active and - m_orig.FDiffCon[7.5].active and - m_orig.FDiffCon[15].active ) + self.assertTrue( + m_orig.FDiffCon[0].active + and m_orig.FDiffCon[7.5].active + and m_orig.FDiffCon[15].active + ) - self.assertTrue(m_orig.x_dot[0].active and - m_orig.x_dot[7.5].active and - m_orig.x_dot[15].active ) + self.assertTrue( + m_orig.x_dot[0].active + and m_orig.x_dot[7.5].active + and m_orig.x_dot[15].active + ) # verify solution # NOTE: This is the solution to the original problem, # not the result of any sensitivity update. - self.assertAlmostEqual(value(m_sipopt.J),0.0048956783,8) - + self.assertAlmostEqual(value(m_sipopt.J), 0.0048956783, 8) @unittest.skipIf(not scipy_available, "scipy is required for this test") @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_noClone_soln(self): - m_orig = fc.create_model() - fc.initialize_model(m_orig,100) + fc.initialize_model(m_orig, 100) m_orig.perturbed_a = Param(initialize=-0.25) m_orig.perturbed_H = Param(initialize=0.55) - m_sipopt = sensitivity_calculation('sipopt',m_orig,[m_orig.a,m_orig.H], - [m_orig.perturbed_a,m_orig.perturbed_H], - cloneModel=False) + m_sipopt = sensitivity_calculation( + 'sipopt', + m_orig, + [m_orig.a, m_orig.H], + [m_orig.perturbed_a, m_orig.perturbed_H], + cloneModel=False, + ) self.assertTrue(m_sipopt == m_orig) # test _SENSITIVITY_TOOLBOX_DATA block exists - self.assertTrue(hasattr(m_orig,'_SENSITIVITY_TOOLBOX_DATA') and - m_orig._SENSITIVITY_TOOLBOX_DATA.ctype is Block) - + self.assertTrue( + hasattr(m_orig, '_SENSITIVITY_TOOLBOX_DATA') + and m_orig._SENSITIVITY_TOOLBOX_DATA.ctype is Block + ) + # test variable declaration - self.assertTrue(hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA,'a') and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.a.ctype is Var) - self.assertTrue(hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA,'H') and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.H.ctype is Var) + self.assertTrue( + hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA, 'a') + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.a.ctype is Var + ) + self.assertTrue( + hasattr(m_sipopt._SENSITIVITY_TOOLBOX_DATA, 'H') + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.H.ctype is Var + ) # test for suffixes - self.assertTrue(hasattr(m_sipopt,'sens_state_0') and - m_sipopt.sens_state_0.ctype is Suffix and - m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a]==1) - - self.assertTrue(hasattr(m_sipopt,'sens_state_1') and - m_sipopt.sens_state_1.ctype is Suffix and - m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a]==1) - - self.assertTrue(hasattr(m_sipopt,'sens_state_value_1') and - m_sipopt.sens_state_value_1.ctype is Suffix and - m_sipopt.sens_state_value_1[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.H]==0.55 and - m_sipopt.sens_state_value_1[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.a]==-0.25) - - self.assertTrue(hasattr(m_sipopt,'sens_init_constr') and - m_sipopt.sens_init_constr.ctype is Suffix and - m_sipopt.sens_init_constr[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1]]==1 and - m_sipopt.sens_init_constr[ - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[2]]==2) - - self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1') and - m_sipopt.sens_sol_state_1.ctype is Suffix) + self.assertTrue( + hasattr(m_sipopt, 'sens_state_0') + and m_sipopt.sens_state_0.ctype is Suffix + and m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_sipopt.sens_state_0[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_state_1') + and m_sipopt.sens_state_1.ctype is Suffix + and m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_sipopt.sens_state_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_state_value_1') + and m_sipopt.sens_state_value_1.ctype is Suffix + and m_sipopt.sens_state_value_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.H] + == 0.55 + and m_sipopt.sens_state_value_1[m_sipopt._SENSITIVITY_TOOLBOX_DATA.a] + == -0.25 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_init_constr') + and m_sipopt.sens_init_constr.ctype is Suffix + and m_sipopt.sens_init_constr[ + m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1] + ] + == 1 + and m_sipopt.sens_init_constr[ + m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[2] + ] + == 2 + ) + + self.assertTrue( + hasattr(m_sipopt, 'sens_sol_state_1') + and m_sipopt.sens_sol_state_1.ctype is Suffix + ) self.assertAlmostEqual( - m_sipopt.sens_sol_state_1[ - m_sipopt.F[15]],-0.00102016765,8) + m_sipopt.sens_sol_state_1[m_sipopt.F[15]], -0.00102016765, 8 + ) - self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_L') and - m_sipopt.sens_sol_state_1_z_L.ctype is Suffix) + self.assertTrue( + hasattr(m_sipopt, 'sens_sol_state_1_z_L') + and m_sipopt.sens_sol_state_1_z_L.ctype is Suffix + ) self.assertAlmostEqual( - m_sipopt.sens_sol_state_1_z_L[ - m_sipopt.u[15]],-2.181712e-09,13) + m_sipopt.sens_sol_state_1_z_L[m_sipopt.u[15]], -2.181712e-09, 13 + ) - self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_U') and - m_sipopt.sens_sol_state_1_z_U.ctype is Suffix) + self.assertTrue( + hasattr(m_sipopt, 'sens_sol_state_1_z_U') + and m_sipopt.sens_sol_state_1_z_U.ctype is Suffix + ) self.assertAlmostEqual( - m_sipopt.sens_sol_state_1_z_U[ - m_sipopt.u[15]],6.580899e-09,13) + m_sipopt.sens_sol_state_1_z_U[m_sipopt.u[15]], 6.580899e-09, 13 + ) # verify deactivated constraints on model - self.assertFalse(m_sipopt.FDiffCon[0].active and - m_sipopt.FDiffCon[7.5].active and - m_sipopt.FDiffCon[15].active ) + self.assertFalse( + m_sipopt.FDiffCon[0].active + and m_sipopt.FDiffCon[7.5].active + and m_sipopt.FDiffCon[15].active + ) - self.assertFalse(m_sipopt.x_dot[0].active and - m_sipopt.x_dot[7.5].active and - m_sipopt.x_dot[15].active ) + self.assertFalse( + m_sipopt.x_dot[0].active + and m_sipopt.x_dot[7.5].active + and m_sipopt.x_dot[15].active + ) # test model solution # NOTE: @@ -340,123 +424,168 @@ def test_noClone_soln(self): # so all this test is doing is making sure that the # objective value doesn't change. This test does nothing to # check values of the perturbed solution. - self.assertAlmostEqual(value(m_sipopt.J),0.0048956783,8) - + self.assertAlmostEqual(value(m_sipopt.J), 0.0048956783, 8) # test indexed param mapping to var and perturbed values @unittest.skipIf(not scipy_available, "scipy is required for this test") @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_indexedParamsMapping(self): - m = hiv.create_model() - hiv.initialize_model(m,10,5,1) + hiv.initialize_model(m, 10, 5, 1) - m.epsDelta = Param(initialize = 0.75001) + m.epsDelta = Param(initialize=0.75001) q_del = {} - q_del[(0,0)] = 1.001 - q_del[(0,1)] = 1.002 - q_del[(1,0)] = 1.003 - q_del[(1,1)] = 1.004 - q_del[(2,0)] = 0.83001 - q_del[(2,1)] = 0.83002 - q_del[(3,0)] = 0.42001 - q_del[(4,0)] = 0.17001 - m.qqDelta = Param(m.ij, initialize = q_del) - - m.aaDelta = Param(initialize =0.0001001) - - m_sipopt = sensitivity_calculation('sipopt',m, [m.eps,m.qq,m.aa], - [m.epsDelta,m.qqDelta,m.aaDelta]) + q_del[(0, 0)] = 1.001 + q_del[(0, 1)] = 1.002 + q_del[(1, 0)] = 1.003 + q_del[(1, 1)] = 1.004 + q_del[(2, 0)] = 0.83001 + q_del[(2, 1)] = 0.83002 + q_del[(3, 0)] = 0.42001 + q_del[(4, 0)] = 0.17001 + m.qqDelta = Param(m.ij, initialize=q_del) + + m.aaDelta = Param(initialize=0.0001001) + + m_sipopt = sensitivity_calculation( + 'sipopt', m, [m.eps, m.qq, m.aa], [m.epsDelta, m.qqDelta, m.aaDelta] + ) # Make sure Param constraints have the correct form, i.e. # 0 <= _SENSITIVITY_TOOLBOX_DATA.PARAM_NAME - PARAM_NAME <= 0 - self.assertEqual( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1].lower, 0.0) - self.assertEqual( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1].upper, 0.0) + self.assertEqual(m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1].lower, 0.0) + self.assertEqual(m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1].upper, 0.0) self.assertEqual( m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[1].body.to_string(), - '_SENSITIVITY_TOOLBOX_DATA.eps - eps') - self.assertEqual( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[6].lower, 0.0) - self.assertEqual( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[6].upper, 0.0) + '_SENSITIVITY_TOOLBOX_DATA.eps - eps', + ) + self.assertEqual(m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[6].lower, 0.0) + self.assertEqual(m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[6].upper, 0.0) self.assertEqual( m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[6].body.to_string(), - '_SENSITIVITY_TOOLBOX_DATA.qq[2,0] - qq[2,0]') - self.assertEqual( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[10].lower, 0.0) - self.assertEqual( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[10].upper, 0.0) + '_SENSITIVITY_TOOLBOX_DATA.qq[2,0] - qq[2,0]', + ) + self.assertEqual(m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[10].lower, 0.0) + self.assertEqual(m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[10].upper, 0.0) self.assertEqual( m_sipopt._SENSITIVITY_TOOLBOX_DATA.paramConst[10].body.to_string(), - '_SENSITIVITY_TOOLBOX_DATA.aa - aa') - + '_SENSITIVITY_TOOLBOX_DATA.aa - aa', + ) # test Constraint substitution @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_constraintSub(self): - m = ri.create_model() m.pert_a = Param(initialize=0.01) m.pert_b = Param(initialize=1.01) - m_sipopt = sensitivity_calculation('sipopt',m,[m.a,m.b], [m.pert_a,m.pert_b]) + m_sipopt = sensitivity_calculation( + 'sipopt', m, [m.a, m.b], [m.pert_a, m.pert_b] + ) # verify substitutions in equality constraint - self.assertTrue(m_sipopt.C_equal.lower.ctype is Param and - m_sipopt.C_equal.upper.ctype is Param) + self.assertTrue( + m_sipopt.C_equal.lower.ctype is Param + and m_sipopt.C_equal.upper.ctype is Param + ) self.assertFalse(m_sipopt.C_equal.active) - self.assertTrue(m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].lower == 0.0 and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].upper == 0.0 and - len(list(identify_variables( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].body))) == 2) + self.assertTrue( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].lower == 0.0 + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].upper == 0.0 + and len( + list( + identify_variables( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[3].body + ) + ) + ) + == 2 + ) # verify substitutions in one-sided bounded constraint - self.assertTrue(m_sipopt.C_singleBnd.lower is None and - m_sipopt.C_singleBnd.upper.ctype is Param) + self.assertTrue( + m_sipopt.C_singleBnd.lower is None + and m_sipopt.C_singleBnd.upper.ctype is Param + ) self.assertFalse(m_sipopt.C_singleBnd.active) - self.assertTrue(m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].lower is None and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].upper == 0.0 and - len(list(identify_variables( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].body))) == 2) - + self.assertTrue( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].lower is None + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].upper == 0.0 + and len( + list( + identify_variables( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[4].body + ) + ) + ) + == 2 + ) + # verify substitutions in ranged inequality constraint - self.assertTrue(m_sipopt.C_rangedIn.lower.ctype is Param and - m_sipopt.C_rangedIn.upper.ctype is Param) + self.assertTrue( + m_sipopt.C_rangedIn.lower.ctype is Param + and m_sipopt.C_rangedIn.upper.ctype is Param + ) self.assertFalse(m_sipopt.C_rangedIn.active) - self.assertTrue(m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].lower is None and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].upper == 0.0 and - len(list(identify_variables( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].body))) == 2) + self.assertTrue( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].lower is None + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].upper == 0.0 + and len( + list( + identify_variables( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[1].body + ) + ) + ) + == 2 + ) - self.assertTrue(m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].lower is None and - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].upper == 0.0 and - len(list(identify_variables( - m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].body))) == 2) + self.assertTrue( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].lower is None + and m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].upper == 0.0 + and len( + list( + identify_variables( + m_sipopt._SENSITIVITY_TOOLBOX_DATA.constList[2].body + ) + ) + ) + == 2 + ) # Test example `parameter.py` @unittest.skipIf(not opt.available(False), "ipopt_sens is not available") def test_parameter_example(self): - d = param_ex.run_example() - - d_correct = {'eta1':4.5, 'eta2':1.0, 'x1_init':0.15, 'x2_init':0.15, 'x3_init':0.0, - 'cost_sln':0.5, 'x1_sln':0.5, 'x2_sln':0.5, 'x3_sln':0.0, 'eta1_pert':4.0, - 'eta2_pert':1.0, 'x1_pert':0.3333333,'x2_pert':0.6666667,'x3_pert':0.0, - 'cost_pert':0.55555556} - + + d_correct = { + 'eta1': 4.5, + 'eta2': 1.0, + 'x1_init': 0.15, + 'x2_init': 0.15, + 'x3_init': 0.0, + 'cost_sln': 0.5, + 'x1_sln': 0.5, + 'x2_sln': 0.5, + 'x3_sln': 0.0, + 'eta1_pert': 4.0, + 'eta2_pert': 1.0, + 'x1_pert': 0.3333333, + 'x2_pert': 0.6666667, + 'x3_pert': 0.0, + 'cost_pert': 0.55555556, + } + for k in d_correct.keys(): - # Check each element of the 'correct' dictionary against the returned + # Check each element of the 'correct' dictionary against the returned # dictionary to 3 decimal places - self.assertAlmostEqual(d[k],d_correct[k],3) - - + self.assertAlmostEqual(d[k], d_correct[k], 3) + # Test kaug # Perform the same tests as for sipopt # test feedbackController Solution when the model gets cloned @@ -465,14 +594,18 @@ def test_parameter_example(self): @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") def test_kaug_clonedModel_soln_kaug(self): m_orig = fc.create_model() - fc.initialize_model(m_orig,100) + fc.initialize_model(m_orig, 100) m_orig.perturbed_a = Param(initialize=-0.25) m_orig.perturbed_H = Param(initialize=0.55) - m_kaug = sensitivity_calculation('k_aug',m_orig,[m_orig.a,m_orig.H], - [m_orig.perturbed_a,m_orig.perturbed_H], - cloneModel=True) + m_kaug = sensitivity_calculation( + 'k_aug', + m_orig, + [m_orig.a, m_orig.H], + [m_orig.perturbed_a, m_orig.perturbed_H], + cloneModel=True, + ) ptb_map = ComponentMap() ptb_map[m_kaug.a] = value(-(m_orig.perturbed_a - m_orig.a)) @@ -482,84 +615,110 @@ def test_kaug_clonedModel_soln_kaug(self): # and original model is untouched self.assertIsNot(m_kaug, m_orig) - self.assertTrue(hasattr(m_kaug,'_SENSITIVITY_TOOLBOX_DATA') and - m_kaug._SENSITIVITY_TOOLBOX_DATA.ctype is Block) + self.assertTrue( + hasattr(m_kaug, '_SENSITIVITY_TOOLBOX_DATA') + and m_kaug._SENSITIVITY_TOOLBOX_DATA.ctype is Block + ) - self.assertFalse(hasattr(m_orig,'_SENSITIVITY_TOOLBOX_DATA')) - self.assertFalse(hasattr(m_orig,'b')) + self.assertFalse(hasattr(m_orig, '_SENSITIVITY_TOOLBOX_DATA')) + self.assertFalse(hasattr(m_orig, 'b')) # verify variable declaration - self.assertTrue(hasattr(m_kaug._SENSITIVITY_TOOLBOX_DATA,'a') and - m_kaug._SENSITIVITY_TOOLBOX_DATA.a.ctype is Var) - self.assertTrue(hasattr(m_kaug._SENSITIVITY_TOOLBOX_DATA,'H') and - m_kaug._SENSITIVITY_TOOLBOX_DATA.H.ctype is Var) + self.assertTrue( + hasattr(m_kaug._SENSITIVITY_TOOLBOX_DATA, 'a') + and m_kaug._SENSITIVITY_TOOLBOX_DATA.a.ctype is Var + ) + self.assertTrue( + hasattr(m_kaug._SENSITIVITY_TOOLBOX_DATA, 'H') + and m_kaug._SENSITIVITY_TOOLBOX_DATA.H.ctype is Var + ) # verify suffixes - self.assertTrue(hasattr(m_kaug,'sens_state_0') and - m_kaug.sens_state_0.ctype is Suffix and - m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.a]==1) - self.assertTrue(hasattr(m_kaug,'sens_state_1') and - m_kaug.sens_state_1.ctype is Suffix and - m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.a]==1) - self.assertTrue(hasattr(m_kaug,'sens_state_value_1') and - m_kaug.sens_state_value_1.ctype is Suffix and - m_kaug.sens_state_value_1[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.H]==0.55 and - m_kaug.sens_state_value_1[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.a]==-0.25) - self.assertTrue(hasattr(m_kaug,'sens_init_constr') and - m_kaug.sens_init_constr.ctype is Suffix and - m_kaug.sens_init_constr[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]]==1 and - m_kaug.sens_init_constr[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]]==2) - self.assertTrue(hasattr(m_kaug,'DeltaP')) + self.assertTrue( + hasattr(m_kaug, 'sens_state_0') + and m_kaug.sens_state_0.ctype is Suffix + and m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_state_1') + and m_kaug.sens_state_1.ctype is Suffix + and m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_state_value_1') + and m_kaug.sens_state_value_1.ctype is Suffix + and m_kaug.sens_state_value_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.H] == 0.55 + and m_kaug.sens_state_value_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.a] == -0.25 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_init_constr') + and m_kaug.sens_init_constr.ctype is Suffix + and m_kaug.sens_init_constr[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]] + == 1 + and m_kaug.sens_init_constr[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]] + == 2 + ) + self.assertTrue(hasattr(m_kaug, 'DeltaP')) self.assertTrue(m_kaug.DeltaP.ctype is Suffix) self.assertEqual( - m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]], - ptb_map[m_kaug.a] - ) + m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]], + ptb_map[m_kaug.a], + ) self.assertEqual( - m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]], - ptb_map[m_kaug.H] - ) - self.assertTrue(hasattr(m_kaug,'dcdp') and - m_kaug.dcdp.ctype is Suffix and - m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]]==1 and - m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]]==2) - self.assertTrue(hasattr(m_kaug,'sens_sol_state_1') and - m_kaug.sens_sol_state_1.ctype is Suffix) - - self.assertTrue(hasattr(m_kaug,'ipopt_zL_in') and - m_kaug.ipopt_zL_in.ctype is Suffix) + m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]], + ptb_map[m_kaug.H], + ) + self.assertTrue( + hasattr(m_kaug, 'dcdp') + and m_kaug.dcdp.ctype is Suffix + and m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]] == 1 + and m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]] == 2 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_sol_state_1') + and m_kaug.sens_sol_state_1.ctype is Suffix + ) + + self.assertTrue( + hasattr(m_kaug, 'ipopt_zL_in') and m_kaug.ipopt_zL_in.ctype is Suffix + ) self.assertAlmostEqual( - m_kaug.ipopt_zL_in[ - m_kaug.u[15]],7.162686166847096e-09,13) + m_kaug.ipopt_zL_in[m_kaug.u[15]], 7.162686166847096e-09, 13 + ) - self.assertTrue(hasattr(m_kaug,'ipopt_zU_in') and - m_kaug.ipopt_zU_in.ctype is Suffix) + self.assertTrue( + hasattr(m_kaug, 'ipopt_zU_in') and m_kaug.ipopt_zU_in.ctype is Suffix + ) self.assertAlmostEqual( - m_kaug.ipopt_zU_in[ - m_kaug.u[15]],-1.2439730261288605e-08,13) + m_kaug.ipopt_zU_in[m_kaug.u[15]], -1.2439730261288605e-08, 13 + ) # verify deactivated constraints for cloned model - self.assertFalse(m_kaug.FDiffCon[0].active and - m_kaug.FDiffCon[7.5].active and - m_kaug.FDiffCon[15].active ) + self.assertFalse( + m_kaug.FDiffCon[0].active + and m_kaug.FDiffCon[7.5].active + and m_kaug.FDiffCon[15].active + ) - self.assertFalse(m_kaug.x_dot[0].active and - m_kaug.x_dot[7.5].active and - m_kaug.x_dot[15].active ) + self.assertFalse( + m_kaug.x_dot[0].active + and m_kaug.x_dot[7.5].active + and m_kaug.x_dot[15].active + ) # verify constraints on original model are still active - self.assertTrue(m_orig.FDiffCon[0].active and - m_orig.FDiffCon[7.5].active and - m_orig.FDiffCon[15].active ) + self.assertTrue( + m_orig.FDiffCon[0].active + and m_orig.FDiffCon[7.5].active + and m_orig.FDiffCon[15].active + ) - self.assertTrue(m_orig.x_dot[0].active and - m_orig.x_dot[7.5].active and - m_orig.x_dot[15].active ) + self.assertTrue( + m_orig.x_dot[0].active + and m_orig.x_dot[7.5].active + and m_orig.x_dot[15].active + ) # verify solution # This is the only test that verifies the solution. Here we @@ -576,81 +735,99 @@ def test_kaug_clonedModel_soln_kaug(self): @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") def test_noClone_soln_kaug(self): - m_orig = fc.create_model() - fc.initialize_model(m_orig,100) + fc.initialize_model(m_orig, 100) m_orig.perturbed_a = Param(initialize=-0.25) m_orig.perturbed_H = Param(initialize=0.55) - m_kaug = sensitivity_calculation('k_aug',m_orig,[m_orig.a,m_orig.H], - [m_orig.perturbed_a,m_orig.perturbed_H], - cloneModel=False) + m_kaug = sensitivity_calculation( + 'k_aug', + m_orig, + [m_orig.a, m_orig.H], + [m_orig.perturbed_a, m_orig.perturbed_H], + cloneModel=False, + ) ptb_map = ComponentMap() ptb_map[m_kaug.a] = value(-(m_kaug.perturbed_a - m_kaug.a)) ptb_map[m_kaug.H] = value(-(m_kaug.perturbed_H - m_kaug.H)) self.assertTrue(m_kaug == m_orig) - + # verify suffixes - self.assertTrue(hasattr(m_kaug,'sens_state_0') and - m_kaug.sens_state_0.ctype is Suffix and - m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.a]==1) - self.assertTrue(hasattr(m_kaug,'sens_state_1') and - m_kaug.sens_state_1.ctype is Suffix and - m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.H]==2 and - m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.a]==1) - self.assertTrue(hasattr(m_kaug,'sens_state_value_1') and - m_kaug.sens_state_value_1.ctype is Suffix and - m_kaug.sens_state_value_1[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.H]==0.55 and - m_kaug.sens_state_value_1[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.a]==-0.25) - self.assertTrue(hasattr(m_kaug,'sens_init_constr') and - m_kaug.sens_init_constr.ctype is Suffix and - m_kaug.sens_init_constr[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]]==1 and - m_kaug.sens_init_constr[ - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]]==2) - self.assertTrue(hasattr(m_kaug,'DeltaP')) + self.assertTrue( + hasattr(m_kaug, 'sens_state_0') + and m_kaug.sens_state_0.ctype is Suffix + and m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_kaug.sens_state_0[m_kaug._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_state_1') + and m_kaug.sens_state_1.ctype is Suffix + and m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.H] == 2 + and m_kaug.sens_state_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.a] == 1 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_state_value_1') + and m_kaug.sens_state_value_1.ctype is Suffix + and m_kaug.sens_state_value_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.H] == 0.55 + and m_kaug.sens_state_value_1[m_kaug._SENSITIVITY_TOOLBOX_DATA.a] == -0.25 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_init_constr') + and m_kaug.sens_init_constr.ctype is Suffix + and m_kaug.sens_init_constr[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]] + == 1 + and m_kaug.sens_init_constr[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]] + == 2 + ) + self.assertTrue(hasattr(m_kaug, 'DeltaP')) self.assertIs(m_kaug.DeltaP.ctype, Suffix) self.assertEqual( - m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]], - ptb_map[m_kaug.a] - ) + m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]], + ptb_map[m_kaug.a], + ) self.assertEqual( - m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]], - ptb_map[m_kaug.H] - ) - self.assertTrue(hasattr(m_kaug,'dcdp') and - m_kaug.dcdp.ctype is Suffix and - m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]]==1 and - m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]]==2) - self.assertTrue(hasattr(m_kaug,'sens_sol_state_1') and - m_kaug.sens_sol_state_1.ctype is Suffix) - - self.assertTrue(hasattr(m_kaug,'ipopt_zL_in') and - m_kaug.ipopt_zL_in.ctype is Suffix) + m_kaug.DeltaP[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]], + ptb_map[m_kaug.H], + ) + self.assertTrue( + hasattr(m_kaug, 'dcdp') + and m_kaug.dcdp.ctype is Suffix + and m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1]] == 1 + and m_kaug.dcdp[m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[2]] == 2 + ) + self.assertTrue( + hasattr(m_kaug, 'sens_sol_state_1') + and m_kaug.sens_sol_state_1.ctype is Suffix + ) + + self.assertTrue( + hasattr(m_kaug, 'ipopt_zL_in') and m_kaug.ipopt_zL_in.ctype is Suffix + ) self.assertAlmostEqual( - m_kaug.ipopt_zL_in[ - m_kaug.u[15]],7.162686166847096e-09,13) + m_kaug.ipopt_zL_in[m_kaug.u[15]], 7.162686166847096e-09, 13 + ) - self.assertTrue(hasattr(m_kaug,'ipopt_zU_in') and - m_kaug.ipopt_zU_in.ctype is Suffix) + self.assertTrue( + hasattr(m_kaug, 'ipopt_zU_in') and m_kaug.ipopt_zU_in.ctype is Suffix + ) self.assertAlmostEqual( - m_kaug.ipopt_zU_in[ - m_kaug.u[15]],-1.2439730261288605e-08,13) + m_kaug.ipopt_zU_in[m_kaug.u[15]], -1.2439730261288605e-08, 13 + ) # verify deactivated constraints for cloned model - self.assertFalse(m_kaug.FDiffCon[0].active and - m_kaug.FDiffCon[7.5].active and - m_kaug.FDiffCon[15].active ) - - self.assertFalse(m_kaug.x_dot[0].active and - m_kaug.x_dot[7.5].active and - m_kaug.x_dot[15].active ) + self.assertFalse( + m_kaug.FDiffCon[0].active + and m_kaug.FDiffCon[7.5].active + and m_kaug.FDiffCon[15].active + ) + self.assertFalse( + m_kaug.x_dot[0].active + and m_kaug.x_dot[7.5].active + and m_kaug.x_dot[15].active + ) # verify solution # This is the only test that verifies the solution. Here we @@ -663,74 +840,79 @@ def test_noClone_soln_kaug(self): # 0.00263 is the value we get after sensitivity update with k_aug # using MA57 and k_aug's default regularization strategy. - # test indexed param mapping to var and perturbed values @unittest.skipIf(not scipy_available, "scipy is required for this test") @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") def test_indexedParamsMapping_kaug(self): - m = hiv.create_model() - hiv.initialize_model(m,10,5,1) + hiv.initialize_model(m, 10, 5, 1) - m.epsDelta = Param(initialize = 0.75001) + m.epsDelta = Param(initialize=0.75001) q_del = {} - q_del[(0,0)] = 1.001 - q_del[(0,1)] = 1.002 - q_del[(1,0)] = 1.003 - q_del[(1,1)] = 1.004 - q_del[(2,0)] = 0.83001 - q_del[(2,1)] = 0.83002 - q_del[(3,0)] = 0.42001 - q_del[(4,0)] = 0.17001 - m.qqDelta = Param(m.ij, initialize = q_del) - - m.aaDelta = Param(initialize =0.0001001) - - m_kaug = sensitivity_calculation('k_aug',m, [m.eps,m.qq,m.aa], - [m.epsDelta,m.qqDelta,m.aaDelta]) + q_del[(0, 0)] = 1.001 + q_del[(0, 1)] = 1.002 + q_del[(1, 0)] = 1.003 + q_del[(1, 1)] = 1.004 + q_del[(2, 0)] = 0.83001 + q_del[(2, 1)] = 0.83002 + q_del[(3, 0)] = 0.42001 + q_del[(4, 0)] = 0.17001 + m.qqDelta = Param(m.ij, initialize=q_del) + + m.aaDelta = Param(initialize=0.0001001) + + m_kaug = sensitivity_calculation( + 'k_aug', m, [m.eps, m.qq, m.aa], [m.epsDelta, m.qqDelta, m.aaDelta] + ) # Make sure Param constraints have the correct form, i.e. # 0 <= _SENSITIVITY_TOOLBOX_DATA.PARAM_NAME - PARAM_NAME <= 0 - self.assertEqual( - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1].lower, 0.0) - self.assertEqual( - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1].upper, 0.0) + self.assertEqual(m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1].lower, 0.0) + self.assertEqual(m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1].upper, 0.0) self.assertEqual( m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[1].body.to_string(), - '_SENSITIVITY_TOOLBOX_DATA.eps - eps') - self.assertEqual( - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[6].lower, 0.0) - self.assertEqual( - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[6].upper, 0.0) + '_SENSITIVITY_TOOLBOX_DATA.eps - eps', + ) + self.assertEqual(m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[6].lower, 0.0) + self.assertEqual(m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[6].upper, 0.0) self.assertEqual( m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[6].body.to_string(), - '_SENSITIVITY_TOOLBOX_DATA.qq[2,0] - qq[2,0]') - self.assertEqual( - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[10].lower, 0.0) - self.assertEqual( - m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[10].upper, 0.0) + '_SENSITIVITY_TOOLBOX_DATA.qq[2,0] - qq[2,0]', + ) + self.assertEqual(m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[10].lower, 0.0) + self.assertEqual(m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[10].upper, 0.0) self.assertEqual( m_kaug._SENSITIVITY_TOOLBOX_DATA.paramConst[10].body.to_string(), - '_SENSITIVITY_TOOLBOX_DATA.aa - aa') + '_SENSITIVITY_TOOLBOX_DATA.aa - aa', + ) # Test example `parameter_kaug.py` @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") - @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") + @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") def test_parameter_example_kaug(self): - d = param_kaug_ex.run_example() - d_correct = {'eta1':4.5, 'eta2':1.0, 'x1_init':0.15, 'x2_init':0.15, 'x3_init':0.0, - 'eta1_pert':4.0, 'eta2_pert':1.0, 'x1_pert':0.3333333,'x2_pert':0.6666667, - 'x3_pert':0.0, 'cost_pert':0.55555556} + d_correct = { + 'eta1': 4.5, + 'eta2': 1.0, + 'x1_init': 0.15, + 'x2_init': 0.15, + 'x3_init': 0.0, + 'eta1_pert': 4.0, + 'eta2_pert': 1.0, + 'x1_pert': 0.3333333, + 'x2_pert': 0.6666667, + 'x3_pert': 0.0, + 'cost_pert': 0.55555556, + } for k in d_correct.keys(): - # Check each element of the 'correct' dictionary against the returned + # Check each element of the 'correct' dictionary against the returned # dictionary to 3 decimal places - self.assertAlmostEqual(d[k],d_correct[k],3) + self.assertAlmostEqual(d[k], d_correct[k], 3) -if __name__=="__main__": +if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/sensitivity_toolbox/tests/test_sens_unit.py b/pyomo/contrib/sensitivity_toolbox/tests/test_sens_unit.py index b0560f1723d..05faada3007 100644 --- a/pyomo/contrib/sensitivity_toolbox/tests/test_sens_unit.py +++ b/pyomo/contrib/sensitivity_toolbox/tests/test_sens_unit.py @@ -18,44 +18,46 @@ import logging from pyomo.environ import ( - ConcreteModel, - Objective, - Param, - Var, - Block, - Suffix, - value, - Constraint, - inequality, - NonNegativeReals, - minimize, - exp - ) + ConcreteModel, + Objective, + Param, + Var, + Block, + Suffix, + value, + Constraint, + inequality, + NonNegativeReals, + minimize, + exp, +) from pyomo.core.base.component import ComponentData from pyomo.common.dependencies import scipy_available from pyomo.common.log import LoggingIntercept from pyomo.common.collections import ComponentMap, ComponentSet -from pyomo.core.expr.current import identify_variables -from pyomo.core.expr.visitor import identify_mutable_parameters +from pyomo.core.expr.visitor import identify_variables, identify_mutable_parameters from pyomo.contrib.sensitivity_toolbox.sens import ( - SensitivityInterface, - _NotAnIndex, - get_dsdp, - get_dfds_dcds, - line_num - ) + SensitivityInterface, + _NotAnIndex, + get_dsdp, + get_dfds_dcds, + line_num, +) import pyomo.contrib.sensitivity_toolbox.examples.parameter as param_example from pyomo.opt import SolverFactory from pyomo.common.dependencies import ( - numpy as np, numpy_available, - pandas as pd, pandas_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, ) from pyomo.common.dependencies import scipy_available -opt_ipopt = SolverFactory('ipopt',solver_io='nl') -opt_kaug = SolverFactory('k_aug',solver_io='nl') -opt_dotsens = SolverFactory('dot_sens',solver_io='nl') +opt_ipopt = SolverFactory('ipopt', solver_io='nl') +opt_kaug = SolverFactory('k_aug', solver_io='nl') +opt_dotsens = SolverFactory('dot_sens', solver_io='nl') + def make_indexed_model(): """ @@ -64,20 +66,23 @@ def make_indexed_model(): """ m = ConcreteModel() - m.x = Var([1, 2, 3], initialize={1: 0.15, 2: 0.15, 3: 0.0}, - domain=NonNegativeReals) + m.x = Var([1, 2, 3], initialize={1: 0.15, 2: 0.15, 3: 0.0}, domain=NonNegativeReals) m.eta = Param([1, 2], initialize={1: 4.5, 2: 1.0}, mutable=True) - m.const = Constraint([1, 2], rule={ - 1: 6*m.x[1] + 3*m.x[2] + 2*m.x[3] - m.eta[1] == 0, - 2: m.eta[2]*m.x[1] + m.x[2] - m.x[3] - 1 == 0, - }) + m.const = Constraint( + [1, 2], + rule={ + 1: 6 * m.x[1] + 3 * m.x[2] + 2 * m.x[3] - m.eta[1] == 0, + 2: m.eta[2] * m.x[1] + m.x[2] - m.x[3] - 1 == 0, + }, + ) - m.cost = Objective(expr=m.x[1]**2 + m.x[2]**2 + m.x[3]**2) + m.cost = Objective(expr=m.x[1] ** 2 + m.x[2] ** 2 + m.x[3] ** 2) return m + def make_model_with_inequalities(): """ Creates a modified version of the model used in the "parameter.py" @@ -85,20 +90,23 @@ def make_model_with_inequalities(): """ m = ConcreteModel() - m.x = Var([1, 2, 3], initialize={1: 0.15, 2: 0.15, 3: 0.0}, - domain=NonNegativeReals) + m.x = Var([1, 2, 3], initialize={1: 0.15, 2: 0.15, 3: 0.0}, domain=NonNegativeReals) m.eta = Param([1, 2], initialize={1: 4.5, 2: 1.0}, mutable=True) - m.const = Constraint([1, 2], rule={ - 1: 6*m.x[1] + 3*m.x[2] + 2*m.x[3] >= m.eta[1], - 2: m.eta[2]*m.x[1] + m.x[2] - m.x[3] - 1 <= 0, - }) + m.const = Constraint( + [1, 2], + rule={ + 1: 6 * m.x[1] + 3 * m.x[2] + 2 * m.x[3] >= m.eta[1], + 2: m.eta[2] * m.x[1] + m.x[2] - m.x[3] - 1 <= 0, + }, + ) - m.cost = Objective(expr=m.x[1]**2 + m.x[2]**2 + m.x[3]**2) + m.cost = Objective(expr=m.x[1] ** 2 + m.x[2] ** 2 + m.x[3] ** 2) return m + def make_model_with_ranged_inequalities(): """ Creates a modified version of the model used in the "parameter.py" @@ -106,32 +114,34 @@ def make_model_with_ranged_inequalities(): """ m = ConcreteModel() - m.x = Var([1, 2, 3], initialize={1: 0.15, 2: 0.15, 3: 0.0}, - domain=NonNegativeReals) + m.x = Var([1, 2, 3], initialize={1: 0.15, 2: 0.15, 3: 0.0}, domain=NonNegativeReals) m.p = Param(initialize=10.0, mutable=True) m.eta = Param([1, 2], initialize={1: 4.5, 2: 1.0}, mutable=True) - m.const = Constraint([1, 2], rule={ - 1: inequality( - lower=-m.eta[1], - body=6*m.x[1] + 3*m.x[2] + 2*m.x[3], - upper=m.p + m.eta[1], + m.const = Constraint( + [1, 2], + rule={ + 1: inequality( + lower=-m.eta[1], + body=6 * m.x[1] + 3 * m.x[2] + 2 * m.x[3], + upper=m.p + m.eta[1], ), - 2: m.eta[2]*m.x[1] + m.x[2] - m.x[3] - 1 <= 0, - }) + 2: m.eta[2] * m.x[1] + m.x[2] - m.x[3] - 1 <= 0, + }, + ) - m.cost = Objective(expr=m.x[1]**2 + m.x[2]**2 + m.x[3]**2) + m.cost = Objective(expr=m.x[1] ** 2 + m.x[2] ** 2 + m.x[3] ** 2) return m -class TestSensitivityInterface(unittest.TestCase): +class TestSensitivityInterface(unittest.TestCase): def assertIsSubset(self, s1, s2): for item in s1: self.assertIn(item, s2) - + def test_get_names(self): block_name = SensitivityInterface.get_default_block_name() self.assertEqual(block_name, "_SENSITIVITY_TOOLBOX_DATA") @@ -220,28 +230,26 @@ def test_add_sensitivity_data(self): self.assertEqual(len(sens.block._sens_data_list), 7) pred_sens_data_list = [ - (model.x[1], Param, 0, 1), - (model.x[2], Param, 0, 2), - (model.x[3], Param, 0, 3), - (model.x[1], Param, 1, _NotAnIndex), - (Var, model.eta[1], 2, 1), - (Var, model.eta[2], 2, 2), - (Var, model.eta[1], 3, _NotAnIndex), - ] + (model.x[1], Param, 0, 1), + (model.x[2], Param, 0, 2), + (model.x[3], Param, 0, 3), + (model.x[1], Param, 1, _NotAnIndex), + (Var, model.eta[1], 2, 1), + (Var, model.eta[2], 2, 2), + (Var, model.eta[1], 3, _NotAnIndex), + ] for data, pred in zip(sens.block._sens_data_list, pred_sens_data_list): if isinstance(pred[0], ComponentData): self.assertIs(data[0], pred[0]) self.assertIs(data[1].ctype, pred[1]) name = data[0].parent_component().local_name - self.assertTrue( - data[1].parent_component().local_name.startswith(name)) + self.assertTrue(data[1].parent_component().local_name.startswith(name)) else: self.assertIs(data[0].ctype, pred[0]) self.assertIs(data[1], pred[1]) name = data[1].parent_component().local_name - self.assertTrue( - data[0].parent_component().local_name.startswith(name)) + self.assertTrue(data[0].parent_component().local_name.startswith(name)) self.assertEqual(data[2], pred[2]) self.assertEqual(data[3], pred[3]) @@ -278,29 +286,33 @@ def test_expression_replacement_equality(self): param_list = [instance.eta[1], instance.eta[2]] sens._add_sensitivity_data(param_list) - orig_components = (list(instance.component_data_objects(Constraint, - active=True)) + list(instance.component_data_objects(Objective, - active=True))) + orig_components = list( + instance.component_data_objects(Constraint, active=True) + ) + list(instance.component_data_objects(Objective, active=True)) orig_expr = [con.expr for con in orig_components] # These will be modified to account for expected replacements - expected_variables = ComponentMap((con, - ComponentSet(identify_variables(con.expr))) - for con in orig_components) - expected_parameters = ComponentMap((con, - ComponentSet(identify_mutable_parameters(con.expr))) - for con in orig_components) + expected_variables = ComponentMap( + (con, ComponentSet(identify_variables(con.expr))) for con in orig_components + ) + expected_parameters = ComponentMap( + (con, ComponentSet(identify_mutable_parameters(con.expr))) + for con in orig_components + ) # As constructed by the `setup_sensitivity` method: - variable_sub_map = dict((id(param), var) - for var, param, list_idx, _ in block._sens_data_list - if param_list[list_idx].ctype is Param) + variable_sub_map = dict( + (id(param), var) + for var, param, list_idx, _ in block._sens_data_list + if param_list[list_idx].ctype is Param + ) # Sanity check self.assertEqual(len(variable_sub_map), 2) # Map each param to the var that should replace it - param_var_map = ComponentMap((param, var) - for var, param, _, _ in block._sens_data_list) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in block._sens_data_list + ) # Remove parameters we expect to replace and add vars # we expect to replace with. @@ -335,7 +347,7 @@ def test_expression_replacement_equality(self): # Original components were deactivated but otherwise not altered for con, expr in zip(orig_components, orig_expr): self.assertFalse(con.active) - #self.assertIs(con.expr, expr) + # self.assertIs(con.expr, expr) # ^Why does this fail? self.assertEqual(con.expr.to_string(), expr.to_string()) @@ -349,29 +361,33 @@ def test_expression_replacement_inequality(self): param_list = [instance.eta[1], instance.eta[2]] sens._add_sensitivity_data(param_list) - orig_components = (list(instance.component_data_objects(Constraint, - active=True)) + list(instance.component_data_objects(Objective, - active=True))) + orig_components = list( + instance.component_data_objects(Constraint, active=True) + ) + list(instance.component_data_objects(Objective, active=True)) orig_expr = [con.expr for con in orig_components] # These will be modified to account for expected replacements - expected_variables = ComponentMap((con, - ComponentSet(identify_variables(con.expr))) - for con in orig_components) - expected_parameters = ComponentMap((con, - ComponentSet(identify_mutable_parameters(con.expr))) - for con in orig_components) + expected_variables = ComponentMap( + (con, ComponentSet(identify_variables(con.expr))) for con in orig_components + ) + expected_parameters = ComponentMap( + (con, ComponentSet(identify_mutable_parameters(con.expr))) + for con in orig_components + ) # As constructed by the `setup_sensitivity` method: - variable_sub_map = dict((id(param), var) - for var, param, list_idx, _ in block._sens_data_list - if param_list[list_idx].ctype is Param) + variable_sub_map = dict( + (id(param), var) + for var, param, list_idx, _ in block._sens_data_list + if param_list[list_idx].ctype is Param + ) # Sanity check self.assertEqual(len(variable_sub_map), 2) # Map each param to the var that should replace it - param_var_map = ComponentMap((param, var) - for var, param, _, _ in block._sens_data_list) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in block._sens_data_list + ) # Remove parameters we expect to replace and add vars # we expect to replace with. @@ -406,7 +422,7 @@ def test_expression_replacement_inequality(self): # Original components were deactivated but otherwise not altered for con, expr in zip(orig_components, orig_expr): self.assertFalse(con.active) - #self.assertIs(con.expr, expr) + # self.assertIs(con.expr, expr) # ^Why does this fail? self.assertEqual(con.expr.to_string(), expr.to_string()) @@ -420,29 +436,33 @@ def test_expression_replacement_ranged_inequality(self): param_list = [instance.eta[1], instance.eta[2]] sens._add_sensitivity_data(param_list) - orig_components = (list(instance.component_data_objects(Constraint, - active=True)) + list(instance.component_data_objects(Objective, - active=True))) + orig_components = list( + instance.component_data_objects(Constraint, active=True) + ) + list(instance.component_data_objects(Objective, active=True)) orig_expr = [con.expr for con in orig_components] # These will be modified to account for expected replacements - expected_variables = ComponentMap((con, - ComponentSet(identify_variables(con.expr))) - for con in orig_components) - expected_parameters = ComponentMap((con, - ComponentSet(identify_mutable_parameters(con.expr))) - for con in orig_components) + expected_variables = ComponentMap( + (con, ComponentSet(identify_variables(con.expr))) for con in orig_components + ) + expected_parameters = ComponentMap( + (con, ComponentSet(identify_mutable_parameters(con.expr))) + for con in orig_components + ) # As constructed by the `setup_sensitivity` method: - variable_sub_map = dict((id(param), var) - for var, param, list_idx, _ in block._sens_data_list - if param_list[list_idx].ctype is Param) + variable_sub_map = dict( + (id(param), var) + for var, param, list_idx, _ in block._sens_data_list + if param_list[list_idx].ctype is Param + ) # Sanity check self.assertEqual(len(variable_sub_map), 2) # Map each param to the var that should replace it - param_var_map = ComponentMap((param, var) - for var, param, _, _ in block._sens_data_list) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in block._sens_data_list + ) # Remove parameters we expect to replace and add vars # we expect to replace with. @@ -483,7 +503,7 @@ def test_expression_replacement_ranged_inequality(self): # Original components were deactivated but otherwise not altered for con, expr in zip(orig_components, orig_expr): self.assertFalse(con.active) - #self.assertIs(con.expr, expr) + # self.assertIs(con.expr, expr) # ^Why does this fail? self.assertEqual(con.expr.to_string(), expr.to_string()) @@ -497,8 +517,9 @@ def test_param_const(self): param_const = block.paramConst self.assertEqual(len(param_list), len(block.paramConst)) - param_var_map = ComponentMap((param, var) - for var, param, _, _ in block._sens_data_list) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in block._sens_data_list + ) var_list = [param_var_map[param] for param in param_list] # Here we rely on the order of paramConst @@ -514,8 +535,9 @@ def test_param_const_indexed(self): block = sens.block param_const = block.paramConst - param_var_map = ComponentMap((param, var) - for var, param, _, _ in block._sens_data_list) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in block._sens_data_list + ) for con in param_const.values(): var_list = list(identify_variables(con.expr)) @@ -523,8 +545,9 @@ def test_param_const_indexed(self): self.assertEqual(len(var_list), 1) self.assertEqual(len(mut_param_list), 1) self.assertIs(var_list[0], param_var_map[mut_param_list[0]]) - self.assertEqual(con.body.to_string(), - (var_list[0]-mut_param_list[0]).to_string()) + self.assertEqual( + con.body.to_string(), (var_list[0] - mut_param_list[0]).to_string() + ) def test_param_const_vars(self): model = make_indexed_model() @@ -537,8 +560,9 @@ def test_param_const_vars(self): param_const = block.paramConst self.assertEqual(len(var_list), len(block.paramConst)) - var_param_map = ComponentMap((var, param) - for var, param, _, _ in block._sens_data_list) + var_param_map = ComponentMap( + (var, param) for var, param, _, _ in block._sens_data_list + ) param_list = [var_param_map[var] for var in var_list] # Here we rely on the order of paramConst @@ -552,11 +576,11 @@ def test_suffixes_setup(self): sens.setup_sensitivity(param_list) for i, (var, _, _, _) in enumerate(sens.block._sens_data_list): - con = sens.block.paramConst[i+1] - self.assertEqual(model.sens_state_0[var], i+1) - self.assertEqual(model.sens_state_1[var], i+1) - self.assertEqual(model.sens_init_constr[con], i+1) - self.assertEqual(model.dcdp[con], i+1) + con = sens.block.paramConst[i + 1] + self.assertEqual(model.sens_state_0[var], i + 1) + self.assertEqual(model.sens_state_1[var], i + 1) + self.assertEqual(model.sens_init_constr[con], i + 1) + self.assertEqual(model.dcdp[con], i + 1) self.assertIs(type(model.sens_sol_state_1_z_L), Suffix) self.assertIs(type(model.sens_sol_state_1_z_U), Suffix) @@ -571,8 +595,11 @@ def test_perturb_parameters_unindexed(self): delta = 1.0 model = make_indexed_model() param_list = [model.eta[1], model.eta[2]] - model.perturbed_eta = Param([1,2], mutable=True, - initialize={i: p.value+delta for i, p in model.eta.items()}) + model.perturbed_eta = Param( + [1, 2], + mutable=True, + initialize={i: p.value + delta for i, p in model.eta.items()}, + ) ptb_list = [model.perturbed_eta[1], model.perturbed_eta[2]] sens = SensitivityInterface(model, clone_model=False) @@ -581,11 +608,13 @@ def test_perturb_parameters_unindexed(self): instance = sens.model_instance block = sens.block - param_var_map = ComponentMap((param, var) - for var, param, _, _ in sens.block._sens_data_list) - param_con_map = ComponentMap((param, block.paramConst[i+1]) - for i, (_, param, _, _) in - enumerate(sens.block._sens_data_list)) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in sens.block._sens_data_list + ) + param_con_map = ComponentMap( + (param, block.paramConst[i + 1]) + for i, (_, param, _, _) in enumerate(sens.block._sens_data_list) + ) for param, ptb in zip(param_list, ptb_list): var = param_var_map[param] con = param_con_map[param] @@ -596,8 +625,11 @@ def test_perturb_parameters_scalar(self): delta = 1.0 model = make_indexed_model() param_list = [model.eta[1], model.eta[2]] - model.perturbed_eta = Param([1,2], mutable=True, - initialize={i: p.value+delta for i, p in model.eta.items()}) + model.perturbed_eta = Param( + [1, 2], + mutable=True, + initialize={i: p.value + delta for i, p in model.eta.items()}, + ) ptb_list = [model.perturbed_eta[1].value, model.perturbed_eta[2].value] sens = SensitivityInterface(model, clone_model=False) @@ -606,11 +638,13 @@ def test_perturb_parameters_scalar(self): instance = sens.model_instance block = sens.block - param_var_map = ComponentMap((param, var) - for var, param, _, _ in sens.block._sens_data_list) - param_con_map = ComponentMap((param, block.paramConst[i+1]) - for i, (_, param, _, _) in - enumerate(sens.block._sens_data_list)) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in sens.block._sens_data_list + ) + param_con_map = ComponentMap( + (param, block.paramConst[i + 1]) + for i, (_, param, _, _) in enumerate(sens.block._sens_data_list) + ) for param, ptb in zip(param_list, ptb_list): var = param_var_map[param] con = param_con_map[param] @@ -621,8 +655,11 @@ def test_perturb_parameters_indexed(self): delta = 1.0 model = make_indexed_model() param_list = [model.eta] - model.perturbed_eta = Param([1,2], mutable=True, - initialize={i: p.value+delta for i, p in model.eta.items()}) + model.perturbed_eta = Param( + [1, 2], + mutable=True, + initialize={i: p.value + delta for i, p in model.eta.items()}, + ) ptb_list = [model.perturbed_eta] sens = SensitivityInterface(model, clone_model=False) @@ -631,19 +668,20 @@ def test_perturb_parameters_indexed(self): instance = sens.model_instance block = sens.block - param_var_map = ComponentMap((param, var) - for var, param, _, _ in sens.block._sens_data_list) - param_con_map = ComponentMap((param, block.paramConst[i+1]) - for i, (_, param, _, _) in - enumerate(sens.block._sens_data_list)) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in sens.block._sens_data_list + ) + param_con_map = ComponentMap( + (param, block.paramConst[i + 1]) + for i, (_, param, _, _) in enumerate(sens.block._sens_data_list) + ) for param, ptb in zip(param_list, ptb_list): for idx in param: obj = param[idx] ptb_data = ptb[idx] var = param_var_map[obj] con = param_con_map[obj] - self.assertEqual(instance.sens_state_value_1[var], - ptb_data.value) + self.assertEqual(instance.sens_state_value_1[var], ptb_data.value) self.assertEqual(instance.DeltaP[con], -delta) def test_perturb_indexed_parameters_with_scalar(self): @@ -657,18 +695,20 @@ def test_perturb_indexed_parameters_with_scalar(self): instance = sens.model_instance block = sens.block - param_var_map = ComponentMap((param, var) - for var, param, _, _ in sens.block._sens_data_list) - param_con_map = ComponentMap((param, block.paramConst[i+1]) - for i, (_, param, _, _) in - enumerate(sens.block._sens_data_list)) + param_var_map = ComponentMap( + (param, var) for var, param, _, _ in sens.block._sens_data_list + ) + param_con_map = ComponentMap( + (param, block.paramConst[i + 1]) + for i, (_, param, _, _) in enumerate(sens.block._sens_data_list) + ) for param, ptb in zip(param_list, ptb_list): for idx in param: obj = param[idx] var = param_var_map[obj] con = param_con_map[obj] self.assertEqual(instance.sens_state_value_1[var], ptb) - self.assertEqual(instance.DeltaP[con], obj.value-ptb) + self.assertEqual(instance.DeltaP[con], obj.value - ptb) @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") @unittest.skipIf(not opt_ipopt.available(False), "ipopt is not available") @@ -676,31 +716,34 @@ def test_perturb_indexed_parameters_with_scalar(self): def test_get_dsdp1(self): ''' It tests the function get_dsdp with a simple nonlinear programming example. - - min f: p1*x1+ p2*(x2^2) + p1*p2 + + min f: p1*x1+ p2*(x2^2) + p1*p2 s.t c1: x1 = p1 c2: x2 = p2 c3: 10 <= p1 <= 10 - c4: 5 <= p2 <= 5 + c4: 5 <= p2 <= 5 ''' variable_name = ['p1', 'p2'] - m= ConcreteModel() - m.x1 = Var(initialize = 0) - m.x2 = Var(initialize = 0) - m.p1 = Var(initialize = 0) - m.p2 = Var(initialize = 0) - m.obj = Objective(expr = m.x1*m.p1+m.x2*m.x2*m.p2 + m.p1*m.p2 , sense=minimize) - m.c1 = Constraint(expr = m.x1 == m.p1) - m.c2 = Constraint(expr = m.x2 == m.p2) - theta= {'p1': 10.0, 'p2': 5.0} + m = ConcreteModel() + m.x1 = Var(initialize=0) + m.x2 = Var(initialize=0) + m.p1 = Var(initialize=0) + m.p2 = Var(initialize=0) + m.obj = Objective( + expr=m.x1 * m.p1 + m.x2 * m.x2 * m.p2 + m.p1 * m.p2, sense=minimize + ) + m.c1 = Constraint(expr=m.x1 == m.p1) + m.c2 = Constraint(expr=m.x2 == m.p2) + theta = {'p1': 10.0, 'p2': 5.0} for v in variable_name: getattr(m, v).setlb(theta[v]) getattr(m, v).setub(theta[v]) dsdp, col = get_dsdp(m, variable_name, theta) - np.testing.assert_almost_equal(dsdp.toarray(),[[1., 0., 1., 0.],[0., 1., 0., 1.]]) - - assert col == ['x1', 'x2', 'p1', 'p2'] + ref = {'x1': [1.0, 0.0], 'x2': [0.0, 1.0], 'p1': [1.0, 0.0], 'p2': [0.0, 1.0]} + np.testing.assert_almost_equal( + dsdp.toarray(), np.vstack([ref[c] for c in col]).transpose() + ) @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") @unittest.skipIf(not opt_ipopt.available(False), "ipopt is not available") @@ -711,56 +754,66 @@ def test_get_dsdp2(self): ''' variable_name = ['asymptote', 'rate_constant'] - theta={'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} - cov=np.array([[ 6.30579403, -0.4395341 ],[-0.4395341 , 0.04193591]]) - model_uncertain= ConcreteModel() - model_uncertain.asymptote = Var(initialize = 15) - model_uncertain.rate_constant = Var(initialize = 0.5) - model_uncertain.obj = Objective(expr=model_uncertain.asymptote*( - 1 - exp(-model_uncertain.rate_constant*10) - ), sense=minimize) - theta= {'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} + theta = {'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} + cov = np.array([[6.30579403, -0.4395341], [-0.4395341, 0.04193591]]) + model_uncertain = ConcreteModel() + model_uncertain.asymptote = Var(initialize=15) + model_uncertain.rate_constant = Var(initialize=0.5) + model_uncertain.obj = Objective( + expr=model_uncertain.asymptote + * (1 - exp(-model_uncertain.rate_constant * 10)), + sense=minimize, + ) + theta = {'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} for v in variable_name: getattr(model_uncertain, v).setlb(theta[v]) getattr(model_uncertain, v).setub(theta[v]) - dsdp, col = get_dsdp(model_uncertain, variable_name, theta, {}) - np.testing.assert_almost_equal(dsdp.toarray() , [[ 1., 0.], - [ 0., 1.]]) + dsdp, col = get_dsdp(model_uncertain, variable_name, theta, {}) + np.testing.assert_almost_equal(dsdp.toarray(), [[1.0, 0.0], [0.0, 1.0]]) assert col == ['asymptote', 'rate_constant'] - @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") - @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") + @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") @unittest.skipIf(not scipy_available, "scipy is not available") def test_get_dfds_dcds(self): ''' It tests the function get_sensitivity with a simple nonlinear programming example. - - min f: p1*x1+ p2*(x2^2) + p1*p2 + + min f: p1*x1+ p2*(x2^2) + p1*p2 s.t c1: x1 = p1 c2: x2 = p2 c3: 10 <= p1 <= 10 - c4: 5 <= p2 <= 5 + c4: 5 <= p2 <= 5 ''' variable_name = ['p1', 'p2'] - m= ConcreteModel() - m.x1 = Var(initialize = 0) - m.x2 = Var(initialize = 0) - m.p1 = Var(initialize = 0) - m.p2 = Var(initialize = 0) - m.obj = Objective(expr = m.x1*m.p1+m.x2*m.x2*m.p2 + m.p1*m.p2 , sense=minimize) - m.c1 = Constraint(expr = m.x1 == m.p1) - m.c2 = Constraint(expr = m.x2 == m.p2) - theta= {'p1': 10.0, 'p2': 5.0} + m = ConcreteModel() + m.x1 = Var(initialize=0) + m.x2 = Var(initialize=0) + m.p1 = Var(initialize=0) + m.p2 = Var(initialize=0) + m.obj = Objective( + expr=m.x1 * m.p1 + m.x2 * m.x2 * m.p2 + m.p1 * m.p2, sense=minimize + ) + m.c1 = Constraint(expr=m.x1 == m.p1) + m.c2 = Constraint(expr=m.x2 == m.p2) + theta = {'p1': 10.0, 'p2': 5.0} for v in variable_name: getattr(m, v).setlb(theta[v]) getattr(m, v).setub(theta[v]) - gradient_f, gradient_c, col ,row, line_dic= get_dfds_dcds(m, variable_name) - np.testing.assert_almost_equal( gradient_f, [10., 50., 15., 35.]) - np.testing.assert_almost_equal( gradient_c.toarray(), [[ 1., 0., -1., 0.], [ 0., 1., 0., -1.]]) - assert col == ['x1', 'x2', 'p1', 'p2'] - assert row == ['c1', 'c2', 'obj'] + gradient_f, gradient_c, col, row, line_dic = get_dfds_dcds(m, variable_name) + + ref_f = {'x1': [10.0], 'x2': [50.0], 'p1': [15.0], 'p2': [35.0]} + ref_c = { + 'x1': [1.0, 0.0], + 'x2': [0.0, 1.0], + 'p1': [-1.0, 0.0], + 'p2': [0.0, -1.0], + } + np.testing.assert_almost_equal(gradient_f, np.hstack([ref_f[v] for v in col])) + np.testing.assert_almost_equal( + gradient_c.toarray(), np.vstack([ref_c[v] for v in col]).transpose() + ) @unittest.skipIf(not opt_kaug.available(False), "k_aug is not available") @unittest.skipIf(not opt_dotsens.available(False), "dot_sens is not available") @@ -770,19 +823,25 @@ def test_get_dfds_dcds2(self): It tests the function get_sensitivity with rooney & biegler's model. ''' variable_name = ['asymptote', 'rate_constant'] - theta={'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} - cov=np.array([[ 6.30579403, -0.4395341 ],[-0.4395341 , 0.04193591]]) - model_uncertain= ConcreteModel() - model_uncertain.asymptote = Var(initialize = 15) - model_uncertain.rate_constant = Var(initialize = 0.5) - model_uncertain.obj = Objective(expr = model_uncertain.asymptote*( 1 - exp(-model_uncertain.rate_constant*10 ) ), sense=minimize) - theta= {'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} + theta = {'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} + cov = np.array([[6.30579403, -0.4395341], [-0.4395341, 0.04193591]]) + model_uncertain = ConcreteModel() + model_uncertain.asymptote = Var(initialize=15) + model_uncertain.rate_constant = Var(initialize=0.5) + model_uncertain.obj = Objective( + expr=model_uncertain.asymptote + * (1 - exp(-model_uncertain.rate_constant * 10)), + sense=minimize, + ) + theta = {'asymptote': 19.142575284617866, 'rate_constant': 0.53109137696521} for v in variable_name: getattr(model_uncertain, v).setlb(theta[v]) getattr(model_uncertain, v).setub(theta[v]) - gradient_f, gradient_c, col,row, line_dic= get_dfds_dcds(model_uncertain, variable_name) - np.testing.assert_almost_equal( gradient_f , [0.99506259, 0.945148]) - np.testing.assert_almost_equal( gradient_c , np.array([])) + gradient_f, gradient_c, col, row, line_dic = get_dfds_dcds( + model_uncertain, variable_name + ) + np.testing.assert_almost_equal(gradient_f, [0.99506259, 0.945148]) + np.testing.assert_almost_equal(gradient_c, np.array([])) assert col == ['asymptote', 'rate_constant'] assert row == ['obj'] @@ -791,12 +850,13 @@ def test_line_num1(self): It tests the function line_num ''' import os + file_name = "test_col.col" with open(file_name, "w") as file: file.write("var1\n") file.write("var3\n") - i= line_num(file_name,'var1') - j= line_num(file_name,'var3') + i = line_num(file_name, 'var1') + j = line_num(file_name, 'var3') self.assertEqual(i, 1) self.assertEqual(j, 2) @@ -805,13 +865,15 @@ def test_line_num2(self): It tests an exception error when file does not include target ''' import os + file_name = "test_col.col" with open(file_name, "w") as file: file.write("var1\n") file.write("var3\n") with self.assertRaises(Exception) as context: - i= line_num(file_name,'var2') + i = line_num(file_name, 'var2') self.assertTrue('test_col.col does not include var2' in str(context.exception)) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/contrib/simplemodel/__init__.py b/pyomo/contrib/simplemodel/__init__.py index 84bf9175bad..4fa4fa2dd16 100644 --- a/pyomo/contrib/simplemodel/__init__.py +++ b/pyomo/contrib/simplemodel/__init__.py @@ -9,20 +9,18 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.deprecation import ( - deprecation_warning, in_testing_environment, -) +from pyomo.common.deprecation import deprecation_warning, in_testing_environment try: deprecation_warning( "The use of pyomo.contrib.simple model is deprecated. " "This capability is now supported in the pyomo_simplemodel " "package, which is included in the pyomo_community distribution.", - version='5.6.9') + version='5.6.9', + ) from pyomocontrib_simplemodel import * except ImportError: # Only raise the exception if nose/pytest/sphinx are NOT running # (otherwise test discovery can result in exceptions) if not in_testing_environment(): - raise RuntimeError( - "The pyomocontrib_simplemodel package is not installed.") + raise RuntimeError("The pyomocontrib_simplemodel package is not installed.") diff --git a/pyomo/contrib/trustregion/TRF.py b/pyomo/contrib/trustregion/TRF.py index bc4bf8e7625..45e60df7658 100644 --- a/pyomo/contrib/trustregion/TRF.py +++ b/pyomo/contrib/trustregion/TRF.py @@ -19,10 +19,15 @@ import logging from pyomo.core.base.range import NumericRange -from pyomo.common.config import (ConfigDict, ConfigValue, - Bool, PositiveInt, - PositiveFloat, In, - add_docstring_list) +from pyomo.common.config import ( + ConfigDict, + ConfigValue, + Bool, + PositiveInt, + PositiveFloat, + In, + document_kwargs_from_configdict, +) from pyomo.contrib.trustregion.filter import Filter, FilterElement from pyomo.contrib.trustregion.interface import TRFInterface from pyomo.contrib.trustregion.util import IterationLogger @@ -33,10 +38,7 @@ __version__ = '0.2.0' -def trust_region_method(model, - decision_variables, - ext_fcn_surrogate_map_rule, - config): +def trust_region_method(model, decision_variables, ext_fcn_surrogate_map_rule, config): """ The main driver of the Trust Region algorithm method. @@ -49,7 +51,7 @@ def trust_region_method(model, are the degrees of freedom or decision variables within the model. ext_fcn_surrogate_map_rule : Function, optional - In the 2020 Yoshio/Biegler paper, this is refered to as + In the 2020 Yoshio/Biegler paper, this is referred to as the basis function `b(w)`. This is the low-fidelity model with which to solve the original process model problem and which is integrated into the @@ -63,8 +65,9 @@ def trust_region_method(model, # Initialize necessary TRF methods TRFLogger = IterationLogger() TRFilter = Filter() - interface = TRFInterface(model, decision_variables, - ext_fcn_surrogate_map_rule, config) + interface = TRFInterface( + model, decision_variables, ext_fcn_surrogate_map_rule, config + ) # Initialize the problem rebuildSM = False @@ -80,8 +83,9 @@ def trust_region_method(model, iteration = 0 - TRFLogger.newIteration(iteration, feasibility_k, obj_val_k, - trust_radius, step_norm_k) + TRFLogger.newIteration( + iteration, feasibility_k, obj_val_k, trust_radius, step_norm_k + ) TRFLogger.logIteration() if config.verbose: TRFLogger.printIteration() @@ -89,8 +93,9 @@ def trust_region_method(model, iteration += 1 # Check termination conditions - if ((feasibility_k <= config.feasibility_termination) - and (step_norm_k <= config.step_size_termination)): + if (feasibility_k <= config.feasibility_termination) and ( + step_norm_k <= config.step_size_termination + ): print('EXIT: Optimal solution found.') interface.model.display() break @@ -98,8 +103,9 @@ def trust_region_method(model, # If trust region very small and no progress is being made, # terminate. The following condition must hold for two # consecutive iterations. - if ((trust_radius <= config.minimum_radius) and - (abs(feasibility_k - feasibility) < config.feasibility_termination)): + if (trust_radius <= config.minimum_radius) and ( + abs(feasibility_k - feasibility) < config.feasibility_termination + ): if subopt_flag: logger.warning('WARNING: Insufficient progress.') print('EXIT: Feasible solution found.') @@ -120,16 +126,18 @@ def trust_region_method(model, # Solve the Trust Region Subproblem (TRSP) obj_val_k, step_norm_k, feasibility_k = interface.solveModel() - TRFLogger.newIteration(iteration, feasibility_k, obj_val_k, - trust_radius, step_norm_k) + TRFLogger.newIteration( + iteration, feasibility_k, obj_val_k, trust_radius, step_norm_k + ) # Check filter acceptance filterElement = FilterElement(obj_val_k, feasibility_k) if not TRFilter.isAcceptable(filterElement, config.maximum_feasibility): # Reject the step TRFLogger.iterrecord.rejected = True - trust_radius = max(config.minimum_radius, - step_norm_k*config.radius_update_param_gamma_c) + trust_radius = max( + config.minimum_radius, step_norm_k * config.radius_update_param_gamma_c + ) rebuildSM = False interface.rejectStep() # Log iteration information @@ -139,36 +147,44 @@ def trust_region_method(model, continue # Switching condition: Eq. (7) in Yoshio/Biegler (2020) - if ((obj_val - obj_val_k) >= - (config.switch_condition_kappa_theta - * pow(feasibility, config.switch_condition_gamma_s)) - and (feasibility <= config.minimum_feasibility)): + if (obj_val - obj_val_k) >= ( + config.switch_condition_kappa_theta + * pow(feasibility, config.switch_condition_gamma_s) + ) and (feasibility <= config.minimum_feasibility): # f-type step TRFLogger.iterrecord.fStep = True - trust_radius = min(max(step_norm_k*config.radius_update_param_gamma_e, - trust_radius), - config.maximum_radius) + trust_radius = min( + max(step_norm_k * config.radius_update_param_gamma_e, trust_radius), + config.maximum_radius, + ) else: # theta-type step TRFLogger.iterrecord.thetaStep = True - filterElement = FilterElement(obj_val_k - config.param_filter_gamma_f*feasibility_k, - (1 - config.param_filter_gamma_theta)*feasibility_k) + filterElement = FilterElement( + obj_val_k - config.param_filter_gamma_f * feasibility_k, + (1 - config.param_filter_gamma_theta) * feasibility_k, + ) TRFilter.addToFilter(filterElement) # Calculate ratio: Eq. (10) in Yoshio/Biegler (2020) - rho_k = ((feasibility - feasibility_k + config.feasibility_termination) / - max(feasibility, config.feasibility_termination)) + rho_k = ( + feasibility - feasibility_k + config.feasibility_termination + ) / max(feasibility, config.feasibility_termination) # Ratio tests: Eq. (8) in Yoshio/Biegler (2020) # If rho_k is between eta_1 and eta_2, trust radius stays same - if ((rho_k < config.ratio_test_param_eta_1) or - (feasibility > config.minimum_feasibility)): - trust_radius = max(config.minimum_radius, - (config.radius_update_param_gamma_c - * step_norm_k)) - elif (rho_k >= config.ratio_test_param_eta_2): - trust_radius = min(config.maximum_radius, - max(trust_radius, - (config.radius_update_param_gamma_e - * step_norm_k))) + if (rho_k < config.ratio_test_param_eta_1) or ( + feasibility > config.minimum_feasibility + ): + trust_radius = max( + config.minimum_radius, + (config.radius_update_param_gamma_c * step_norm_k), + ) + elif rho_k >= config.ratio_test_param_eta_2: + trust_radius = min( + config.maximum_radius, + max( + trust_radius, (config.radius_update_param_gamma_e * step_norm_k) + ), + ) TRFLogger.updateIteration(trustRadius=trust_radius) # Accept step and reset for next iteration @@ -181,7 +197,9 @@ def trust_region_method(model, TRFLogger.printIteration() if iteration >= config.maximum_iterations: - logger.warning('EXIT: Maximum iterations reached: {}.'.format(config.maximum_iterations)) + logger.warning( + 'EXIT: Maximum iterations reached: {}.'.format(config.maximum_iterations) + ) return interface.model @@ -213,154 +231,210 @@ def _trf_config(): CONFIG = ConfigDict('TrustRegion') ### Solver options - CONFIG.declare('solver', ConfigValue( - default='ipopt', - description='Solver to use. Default = ``ipopt``.' - )) - CONFIG.declare('keepfiles', ConfigValue( - default=False, - domain=Bool, - description="Optional. Whether or not to " - "write files of sub-problems for use in debugging. " - "Default = False." - )) - CONFIG.declare('tee', ConfigValue( - default=False, - domain=Bool, - description="Optional. Sets the ``tee`` " - "for sub-solver(s) utilized. " - "Default = False." - )) + CONFIG.declare( + 'solver', + ConfigValue(default='ipopt', description='Solver to use. Default = ``ipopt``.'), + ) + CONFIG.declare( + 'keepfiles', + ConfigValue( + default=False, + domain=Bool, + description="Optional. Whether or not to " + "write files of sub-problems for use in debugging. " + "Default = False.", + ), + ) + CONFIG.declare( + 'tee', + ConfigValue( + default=False, + domain=Bool, + description="Optional. Sets the ``tee`` " + "for sub-solver(s) utilized. " + "Default = False.", + ), + ) ### Trust Region specific options - CONFIG.declare('verbose', ConfigValue( - default=False, - domain=Bool, - description="Optional. When True, print each " - "iteration's relevant information to the console " - "as well as to the log. " - "Default = False." - )) - CONFIG.declare('trust_radius', ConfigValue( - default=1.0, - domain=PositiveFloat, - description="Initial trust region radius ``delta_0``. " - "Default = 1.0." - )) - CONFIG.declare('minimum_radius', ConfigValue( - default=1e-6, - domain=PositiveFloat, - description="Minimum allowed trust region radius ``delta_min``. " - "Default = 1e-6." - )) - CONFIG.declare('maximum_radius', ConfigValue( - default=CONFIG.trust_radius * 100, - domain=PositiveFloat, - description="Maximum allowed trust region radius. If trust region " - "radius reaches maximum allowed, solver will exit. " - "Default = 100 * trust_radius." - )) - CONFIG.declare('maximum_iterations', ConfigValue( - default=50, - domain=PositiveInt, - description="Maximum allowed number of iterations. " - "Default = 50." - )) + CONFIG.declare( + 'verbose', + ConfigValue( + default=False, + domain=Bool, + description="Optional. When True, print each " + "iteration's relevant information to the console " + "as well as to the log. " + "Default = False.", + ), + ) + CONFIG.declare( + 'trust_radius', + ConfigValue( + default=1.0, + domain=PositiveFloat, + description="Initial trust region radius ``delta_0``. Default = 1.0.", + ), + ) + CONFIG.declare( + 'minimum_radius', + ConfigValue( + default=1e-6, + domain=PositiveFloat, + description="Minimum allowed trust region radius ``delta_min``. " + "Default = 1e-6.", + ), + ) + CONFIG.declare( + 'maximum_radius', + ConfigValue( + default=CONFIG.trust_radius * 100, + domain=PositiveFloat, + description="Maximum allowed trust region radius. If trust region " + "radius reaches maximum allowed, solver will exit. " + "Default = 100 * trust_radius.", + ), + ) + CONFIG.declare( + 'maximum_iterations', + ConfigValue( + default=50, + domain=PositiveInt, + description="Maximum allowed number of iterations. Default = 50.", + ), + ) ### Termination options - CONFIG.declare('feasibility_termination', ConfigValue( - default=1e-5, - domain=PositiveFloat, - description="Feasibility measure termination tolerance ``epsilon_theta``. " - "Default = 1e-5." - )) - CONFIG.declare('step_size_termination', ConfigValue( - default=CONFIG.feasibility_termination, - domain=PositiveFloat, - description="Step size termination tolerance ``epsilon_s``. " - "Matches the feasibility termination tolerance by default." - )) + CONFIG.declare( + 'feasibility_termination', + ConfigValue( + default=1e-5, + domain=PositiveFloat, + description="Feasibility measure termination tolerance ``epsilon_theta``. " + "Default = 1e-5.", + ), + ) + CONFIG.declare( + 'step_size_termination', + ConfigValue( + default=CONFIG.feasibility_termination, + domain=PositiveFloat, + description="Step size termination tolerance ``epsilon_s``. " + "Matches the feasibility termination tolerance by default.", + ), + ) ### Switching Condition options - CONFIG.declare('minimum_feasibility', ConfigValue( - default=1e-4, - domain=PositiveFloat, - description="Minimum feasibility measure ``theta_min``. " - "Default = 1e-4." - )) - CONFIG.declare('switch_condition_kappa_theta', ConfigValue( - default=0.1, - domain=In(NumericRange(0, 1, 0, (False, False))), - description="Switching condition parameter ``kappa_theta``. " - "Contained in open set (0, 1). " - "Default = 0.1." - )) - CONFIG.declare('switch_condition_gamma_s', ConfigValue( - default=2.0, - domain=PositiveFloat, - description="Switching condition parameter ``gamma_s``. " - "Must satisfy: ``gamma_s > 1/(1+mu)`` where ``mu`` " - "is contained in set (0, 1]. " - "Default = 2.0." - )) + CONFIG.declare( + 'minimum_feasibility', + ConfigValue( + default=1e-4, + domain=PositiveFloat, + description="Minimum feasibility measure ``theta_min``. Default = 1e-4.", + ), + ) + CONFIG.declare( + 'switch_condition_kappa_theta', + ConfigValue( + default=0.1, + domain=In(NumericRange(0, 1, 0, (False, False))), + description="Switching condition parameter ``kappa_theta``. " + "Contained in open set (0, 1). " + "Default = 0.1.", + ), + ) + CONFIG.declare( + 'switch_condition_gamma_s', + ConfigValue( + default=2.0, + domain=PositiveFloat, + description="Switching condition parameter ``gamma_s``. " + "Must satisfy: ``gamma_s > 1/(1+mu)`` where ``mu`` " + "is contained in set (0, 1]. " + "Default = 2.0.", + ), + ) ### Trust region update/ratio test parameters - CONFIG.declare('radius_update_param_gamma_c', ConfigValue( - default=0.5, - domain=In(NumericRange(0, 1, 0, (False, False))), - description="Lower trust region update parameter ``gamma_c``. " - "Default = 0.5." - )) - CONFIG.declare('radius_update_param_gamma_e', ConfigValue( - default=2.5, - domain=In(NumericRange(1, None, 0)), - description="Upper trust region update parameter ``gamma_e``. " - "Default = 2.5." - )) - CONFIG.declare('ratio_test_param_eta_1', ConfigValue( - default = 0.05, - domain=In(NumericRange(0, 1, 0, (False, False))), - description="Lower ratio test parameter ``eta_1``. " - "Must satisfy: ``0 < eta_1 <= eta_2 < 1``. " - "Default = 0.05." - )) - CONFIG.declare('ratio_test_param_eta_2', ConfigValue( - default = 0.2, - domain=In(NumericRange(0, 1, 0, (False, False))), - description="Lower ratio test parameter ``eta_2``. " - "Must satisfy: ``0 < eta_1 <= eta_2 < 1``. " - "Default = 0.2." - )) + CONFIG.declare( + 'radius_update_param_gamma_c', + ConfigValue( + default=0.5, + domain=In(NumericRange(0, 1, 0, (False, False))), + description="Lower trust region update parameter ``gamma_c``. " + "Default = 0.5.", + ), + ) + CONFIG.declare( + 'radius_update_param_gamma_e', + ConfigValue( + default=2.5, + domain=In(NumericRange(1, None, 0)), + description="Upper trust region update parameter ``gamma_e``. " + "Default = 2.5.", + ), + ) + CONFIG.declare( + 'ratio_test_param_eta_1', + ConfigValue( + default=0.05, + domain=In(NumericRange(0, 1, 0, (False, False))), + description="Lower ratio test parameter ``eta_1``. " + "Must satisfy: ``0 < eta_1 <= eta_2 < 1``. " + "Default = 0.05.", + ), + ) + CONFIG.declare( + 'ratio_test_param_eta_2', + ConfigValue( + default=0.2, + domain=In(NumericRange(0, 1, 0, (False, False))), + description="Lower ratio test parameter ``eta_2``. " + "Must satisfy: ``0 < eta_1 <= eta_2 < 1``. " + "Default = 0.2.", + ), + ) ### Filter - CONFIG.declare('maximum_feasibility', ConfigValue( - default=50.0, - domain=PositiveFloat, - description="Maximum allowable feasibility measure ``theta_max``. " - "Parameter for use in filter method." - "Default = 50.0." - )) - CONFIG.declare('param_filter_gamma_theta', ConfigValue( - default=0.01, - domain=In(NumericRange(0, 1, 0, (False, False))), - description="Fixed filter parameter ``gamma_theta`` within (0, 1). " - "Default = 0.01" - )) - CONFIG.declare('param_filter_gamma_f', ConfigValue( - default=0.01, - domain=In(NumericRange(0, 1, 0, (False, False))), - description="Fixed filter parameter ``gamma_f`` within (0, 1). " - "Default = 0.01" - )) + CONFIG.declare( + 'maximum_feasibility', + ConfigValue( + default=50.0, + domain=PositiveFloat, + description="Maximum allowable feasibility measure ``theta_max``. " + "Parameter for use in filter method." + "Default = 50.0.", + ), + ) + CONFIG.declare( + 'param_filter_gamma_theta', + ConfigValue( + default=0.01, + domain=In(NumericRange(0, 1, 0, (False, False))), + description="Fixed filter parameter ``gamma_theta`` within (0, 1). " + "Default = 0.01", + ), + ) + CONFIG.declare( + 'param_filter_gamma_f', + ConfigValue( + default=0.01, + domain=In(NumericRange(0, 1, 0, (False, False))), + description="Fixed filter parameter ``gamma_f`` within (0, 1). " + "Default = 0.01", + ), + ) return CONFIG @SolverFactory.register( 'trustregion', - doc='Trust region algorithm "solver" for black box/glass box optimization') + doc='Trust region algorithm "solver" for black box/glass box optimization', +) class TrustRegionSolver(object): """ The Trust Region Solver is a 'solver' based on the 2016/2018/2020 AiChE papers by Eason (2016/2018), Yoshio (2020), and Biegler. """ + CONFIG = _trf_config() def __init__(self, **kwds): @@ -390,21 +464,27 @@ def __enter__(self): def __exit__(self, et, ev, tb): pass - def solve(self, model, degrees_of_freedom_variables, - ext_fcn_surrogate_map_rule=None, **kwds): + @document_kwargs_from_configdict(CONFIG) + def solve( + self, + model, + degrees_of_freedom_variables, + ext_fcn_surrogate_map_rule=None, + **kwds + ): """ This method calls the TRF algorithm. Parameters ---------- - model: ``ConcreteModel`` + model : ConcreteModel The model to be solved using the Trust Region Framework. - degrees_of_freedom_variables : List of Vars + degrees_of_freedom_variables : List[Var] User-supplied input. The user must provide a list of vars which are the degrees of freedom or decision variables within the model. ext_fcn_surrogate_map_rule : Function, optional - In the 2020 Yoshio/Biegler paper, this is refered to as + In the 2020 Yoshio/Biegler paper, this is referred to as the basis function `b(w)`. This is the low-fidelity model with which to solve the original process model problem and which is integrated into the @@ -417,16 +497,8 @@ def solve(self, model, degrees_of_freedom_variables, if ext_fcn_surrogate_map_rule is None: # If the user does not pass us a "basis" function, # we default to 0. - ext_fcn_surrogate_map_rule = lambda comp,ef: 0 - result = trust_region_method(model, - degrees_of_freedom_variables, - ext_fcn_surrogate_map_rule, - config) + ext_fcn_surrogate_map_rule = lambda comp, ef: 0 + result = trust_region_method( + model, degrees_of_freedom_variables, ext_fcn_surrogate_map_rule, config + ) return result - - -def _generate_filtered_docstring(): - cfg = _trf_config() - return add_docstring_list(TrustRegionSolver.solve.__doc__, cfg, indent_by=8) - -TrustRegionSolver.solve.__doc__ = _generate_filtered_docstring() diff --git a/pyomo/contrib/trustregion/__init__.py b/pyomo/contrib/trustregion/__init__.py index c361238e119..62ba0892686 100644 --- a/pyomo/contrib/trustregion/__init__.py +++ b/pyomo/contrib/trustregion/__init__.py @@ -14,4 +14,3 @@ # # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/trustregion/examples/__init__.py b/pyomo/contrib/trustregion/examples/__init__.py index 6959cd5758a..62ba0892686 100644 --- a/pyomo/contrib/trustregion/examples/__init__.py +++ b/pyomo/contrib/trustregion/examples/__init__.py @@ -14,5 +14,3 @@ # # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - - diff --git a/pyomo/contrib/trustregion/examples/example1.py b/pyomo/contrib/trustregion/examples/example1.py index 71ad9786360..19965ff1cb2 100755 --- a/pyomo/contrib/trustregion/examples/example1.py +++ b/pyomo/contrib/trustregion/examples/example1.py @@ -25,37 +25,52 @@ """ from pyomo.environ import ( - ConcreteModel, Var, Reals, ExternalFunction, sin, cos, - sqrt, Constraint, Objective) + ConcreteModel, + Var, + Reals, + ExternalFunction, + sin, + cos, + sqrt, + Constraint, + Objective, +) from pyomo.opt import SolverFactory + def ext_fcn(a, b): - return sin(a - b) + return sin(a - b) + def grad_ext_fcn(args, fixed): a, b = args[:2] - return [ cos(a - b), -cos(a - b) ] + return [cos(a - b), -cos(a - b)] + def create_model(): m = ConcreteModel() m.name = 'Example 1: Eason' - m.z = Var(range(3), domain=Reals, initialize=2.) - m.x = Var(range(2), initialize=2.) + m.z = Var(range(3), domain=Reals, initialize=2.0) + m.x = Var(range(2), initialize=2.0) m.x[1] = 1.0 m.ext_fcn = ExternalFunction(ext_fcn, grad_ext_fcn) m.obj = Objective( - expr=(m.z[0]-1.0)**2 + (m.z[0]-m.z[1])**2 + (m.z[2]-1.0)**2 \ - + (m.x[0]-1.0)**4 + (m.x[1]-1.0)**6 + expr=(m.z[0] - 1.0) ** 2 + + (m.z[0] - m.z[1]) ** 2 + + (m.z[2] - 1.0) ** 2 + + (m.x[0] - 1.0) ** 4 + + (m.x[1] - 1.0) ** 6 ) m.c1 = Constraint( - expr=m.x[0] * m.z[0]**2 + m.ext_fcn(m.x[0], m.x[1]) == 2*sqrt(2.0) - ) - m.c2 = Constraint(expr=m.z[2]**4 * m.z[1]**2 + m.z[1] == 8+sqrt(2.0)) + expr=m.x[0] * m.z[0] ** 2 + m.ext_fcn(m.x[0], m.x[1]) == 2 * sqrt(2.0) + ) + m.c2 = Constraint(expr=m.z[2] ** 4 * m.z[1] ** 2 + m.z[1] == 8 + sqrt(2.0)) return m + def main(): m = create_model() optTRF = SolverFactory('trustregion', maximum_iterations=10, verbose=True) diff --git a/pyomo/contrib/trustregion/examples/example2.py b/pyomo/contrib/trustregion/examples/example2.py index af979923ab0..0c506eb6891 100644 --- a/pyomo/contrib/trustregion/examples/example2.py +++ b/pyomo/contrib/trustregion/examples/example2.py @@ -25,16 +25,18 @@ AIChE J. 2021; 67:e17054. https://doi.org/10.1002/aic.17054 """ -from pyomo.environ import ( - ConcreteModel, Var, ExternalFunction, Objective) +from pyomo.environ import ConcreteModel, Var, ExternalFunction, Objective from pyomo.opt import SolverFactory + def ext_fcn(a, b): return a**2 + b**2 + def grad_ext_fcn(args, fixed): a, b = args[:2] - return [ 2*a, 2*b ] + return [2 * a, 2 * b] + def create_model(): m = ConcreteModel() @@ -47,17 +49,16 @@ def create_model(): @m.Constraint() def con(m): - return 2*m.x1 + m.x2 + 10.0 == m.EF(m.x1, m.x2) + return 2 * m.x1 + m.x2 + 10.0 == m.EF(m.x1, m.x2) - m.obj = Objective( - expr = (m.x1 - 1)**2 + (m.x2 - 3)**2 + m.EF(m.x1, m.x2)**2 - ) + m.obj = Objective(expr=(m.x1 - 1) ** 2 + (m.x2 - 3) ** 2 + m.EF(m.x1, m.x2) ** 2) return m + def basis_rule(component, ef_expr): x = ef_expr.arg(0) y = ef_expr.arg(1) - return x**2 - y # This is the low fidelity model + return x**2 - y # This is the low fidelity model # This problem takes more than the default maximum iterations (50) to solve. @@ -65,10 +66,9 @@ def basis_rule(component, ef_expr): # it took 70 iterations. def main(): m = create_model() - optTRF = SolverFactory('trustregion', - maximum_iterations=100, - verbose=True) + optTRF = SolverFactory('trustregion', maximum_iterations=100, verbose=True) optTRF.solve(m, [m.x1], ext_fcn_surrogate_map_rule=basis_rule) + if __name__ == '__main__': main() diff --git a/pyomo/contrib/trustregion/filter.py b/pyomo/contrib/trustregion/filter.py index e424ee7dab3..2f0b20ee8f8 100644 --- a/pyomo/contrib/trustregion/filter.py +++ b/pyomo/contrib/trustregion/filter.py @@ -16,9 +16,7 @@ # ___________________________________________________________________________ - class FilterElement: - def __init__(self, objective, feasible): self.objective = objective self.feasible = feasible @@ -29,11 +27,15 @@ def compare(self, filterElement): of the filter element to determine whether or not the filter element should be added to the filter """ - if (filterElement.objective >= self.objective - and filterElement.feasible >= self.feasible): + if ( + filterElement.objective >= self.objective + and filterElement.feasible >= self.feasible + ): return -1 - if (filterElement.objective <= self.objective - and filterElement.feasible <= self.feasible): + if ( + filterElement.objective <= self.objective + and filterElement.feasible <= self.feasible + ): return 1 return 0 @@ -41,9 +43,10 @@ def compare(self, filterElement): class Filter: """ Trust region filter - + Based on original filter by Eason, Biegler (2016) """ + def __init__(self): self.TrustRegionFilter = [] @@ -54,9 +57,9 @@ def addToFilter(self, filterElement): filtercopy = list(self.TrustRegionFilter) for fe in filtercopy: acceptableMeasure = fe.compare(filterElement) - if (acceptableMeasure == 1): + if acceptableMeasure == 1: self.TrustRegionFilter.remove(fe) - elif (acceptableMeasure == -1): + elif acceptableMeasure == -1: return self.TrustRegionFilter.append(filterElement) @@ -65,9 +68,9 @@ def isAcceptable(self, filterElement, maximum_feasibility): Check whether a step is acceptable to the filter. If not, we reject the step. """ - if (filterElement.feasible > maximum_feasibility): + if filterElement.feasible > maximum_feasibility: return False for fe in self.TrustRegionFilter: - if (fe.compare(filterElement) == -1): + if fe.compare(filterElement) == -1: return False return True diff --git a/pyomo/contrib/trustregion/interface.py b/pyomo/contrib/trustregion/interface.py index 2e8143d5f9e..f68f2fdb308 100644 --- a/pyomo/contrib/trustregion/interface.py +++ b/pyomo/contrib/trustregion/interface.py @@ -22,16 +22,22 @@ from pyomo.common.modeling import unique_component_name from pyomo.contrib.trustregion.util import minIgnoreNone, maxIgnoreNone from pyomo.core import ( - Block, Param, VarList, Constraint, - Objective, value, Set, ExternalFunction, maximize, - minimize - ) + Block, + Param, + VarList, + Constraint, + Objective, + value, + Set, + ExternalFunction, + maximize, + minimize, +) from pyomo.core.expr.calculus.derivatives import differentiate -from pyomo.core.expr.visitor import (identify_variables, - ExpressionReplacementVisitor) +from pyomo.core.expr.visitor import identify_variables, ExpressionReplacementVisitor from pyomo.core.expr.numeric_expr import ExternalFunctionExpression from pyomo.core.expr.numvalue import native_types -from pyomo.opt import (SolverFactory, check_optimal_termination) +from pyomo.opt import SolverFactory, check_optimal_termination logger = logging.getLogger('pyomo.contrib.trustregion') @@ -47,9 +53,11 @@ class EFReplacement(ExpressionReplacementVisitor): NOTE: We use an empty substitution map. The EFs to be substituted are identified as part of exitNode. """ + def __init__(self, trfData, efSet): - super().__init__(descend_into_named_expressions=True, - remove_named_expressions=False) + super().__init__( + descend_into_named_expressions=True, remove_named_expressions=False + ) self.trfData = trfData self.efSet = efSet @@ -57,7 +65,11 @@ def beforeChild(self, node, child, child_idx): # We want to capture all of the variables on the model. # If we reject a step, we need to know all the vars to reset. descend, result = super().beforeChild(node, child, child_idx) - if not descend and result.__class__ not in native_types and result.is_variable_type(): + if ( + not descend + and result.__class__ not in native_types + and result.is_variable_type() + ): self.trfData.all_variables.add(result) return descend, result @@ -88,8 +100,7 @@ class TRFInterface(object): Pyomo interface for Trust Region algorithm. """ - def __init__(self, model, decision_variables, - ext_fcn_surrogate_map_rule, config): + def __init__(self, model, decision_variables, ext_fcn_surrogate_map_rule, config): self.original_model = model tmp_name = unique_component_name(self.original_model, 'tmp') setattr(self.original_model, tmp_name, decision_variables) @@ -98,8 +109,9 @@ def __init__(self, model, decision_variables, self.decision_variables = getattr(self.model, tmp_name) delattr(self.original_model, tmp_name) self.data = Block() - self.model.add_component(unique_component_name(self.model, 'trf_data'), - self.data) + self.model.add_component( + unique_component_name(self.model, 'trf_data'), self.data + ) self.basis_expression_rule = ext_fcn_surrogate_map_rule self.efSet = None self.solver = SolverFactory(self.config.solver) @@ -137,14 +149,13 @@ def _remove_ef_from_expr(self, component): if new_expr is not expr: component.set_value(new_expr) new_output_vars = list( - self.data.ef_outputs[i+1] for i in range( - next_ef_id, len(self.data.ef_outputs) - ) - ) + self.data.ef_outputs[i + 1] + for i in range(next_ef_id, len(self.data.ef_outputs)) + ) for v in new_output_vars: - self.data.basis_expressions[v] = \ - self.basis_expression_rule( - component, self.data.truth_models[v]) + self.data.basis_expressions[v] = self.basis_expression_rule( + component, self.data.truth_models[v] + ) def replaceExternalFunctionsWithVariables(self): """ @@ -175,30 +186,31 @@ def replaceExternalFunctionsWithVariables(self): self.data.ef_outputs = VarList() number_of_equality_constraints = 0 - for con in self.model.component_data_objects(Constraint, - active=True): + for con in self.model.component_data_objects(Constraint, active=True): if con.lb == con.ub and con.lb is not None: number_of_equality_constraints += 1 self._remove_ef_from_expr(con) - self.degrees_of_freedom = (len(list(self.data.all_variables)) - - number_of_equality_constraints) + self.degrees_of_freedom = ( + len(list(self.data.all_variables)) - number_of_equality_constraints + ) if self.degrees_of_freedom != len(self.decision_variables): raise ValueError( "replaceExternalFunctionsWithVariables: " "The degrees of freedom %d do not match the number of decision " - "variables supplied %d." - % (self.degrees_of_freedom, len(self.decision_variables))) + "variables supplied %d." + % (self.degrees_of_freedom, len(self.decision_variables)) + ) for var in self.decision_variables: if var not in self.data.all_variables: raise ValueError( "replaceExternalFunctionsWithVariables: " f"The supplied decision variable {var.name} cannot " - "be found in the model variables.") + "be found in the model variables." + ) - self.data.objs = list(self.model.component_data_objects(Objective, - active=True)) + self.data.objs = list(self.model.component_data_objects(Objective, active=True)) # HACK: This is a hack that we will want to remove once the NL writer # has been corrected to not send unused EFs to the solver for ef in self.model.component_objects(ExternalFunction): @@ -207,18 +219,19 @@ def replaceExternalFunctionsWithVariables(self): if len(self.data.objs) != 1: raise ValueError( "replaceExternalFunctionsWithVariables: " - "TrustRegion only supports models with a single active Objective.") + "TrustRegion only supports models with a single active Objective." + ) if self.data.objs[0].sense == maximize: - self.data.objs[0].expr = -1* self.data.objs[0].expr + self.data.objs[0].expr = -1 * self.data.objs[0].expr self.data.objs[0].sense = minimize self._remove_ef_from_expr(self.data.objs[0]) for i in self.data.ef_outputs: - self.data.ef_inputs[i] = \ - list(identify_variables( - self.data.truth_models[self.data.ef_outputs[i]], - include_fixed=False) + self.data.ef_inputs[i] = list( + identify_variables( + self.data.truth_models[self.data.ef_outputs[i]], include_fixed=False ) + ) self.data.all_variables.update(self.data.ef_outputs.values()) self.data.all_variables = list(self.data.all_variables) @@ -231,32 +244,40 @@ def createConstraints(self): are activated later as necessary. """ b = self.data + # This implements: y = b(w) from Yoshio/Biegler (2020) @b.Constraint(b.ef_outputs.index_set()) def basis_constraint(b, i): ef_output_var = b.ef_outputs[i] return ef_output_var == b.basis_expressions[ef_output_var] + b.basis_constraint.deactivate() - b.INPUT_OUTPUT = Set(initialize=( - (i, j) for i in b.ef_outputs.index_set() - for j in range(len(b.ef_inputs[i])) - )) + b.INPUT_OUTPUT = Set( + initialize=( + (i, j) + for i in b.ef_outputs.index_set() + for j in range(len(b.ef_inputs[i])) + ) + ) b.basis_model_output = Param(b.ef_outputs.index_set(), mutable=True) b.grad_basis_model_output = Param(b.INPUT_OUTPUT, mutable=True) b.truth_model_output = Param(b.ef_outputs.index_set(), mutable=True) b.grad_truth_model_output = Param(b.INPUT_OUTPUT, mutable=True) b.value_of_ef_inputs = Param(b.INPUT_OUTPUT, mutable=True) + # This implements: y = r_k(w) @b.Constraint(b.ef_outputs.index_set()) def sm_constraint_basis(b, i): ef_output_var = b.ef_outputs[i] - return ef_output_var == b.basis_expressions[ef_output_var] + \ - b.truth_model_output[i] - b.basis_model_output[i] + \ - sum((b.grad_truth_model_output[i, j] - - b.grad_basis_model_output[i, j]) - * (w - b.value_of_ef_inputs[i, j]) - for j, w in enumerate(b.ef_inputs[i])) + return ef_output_var == b.basis_expressions[ + ef_output_var + ] + b.truth_model_output[i] - b.basis_model_output[i] + sum( + (b.grad_truth_model_output[i, j] - b.grad_basis_model_output[i, j]) + * (w - b.value_of_ef_inputs[i, j]) + for j, w in enumerate(b.ef_inputs[i]) + ) + b.sm_constraint_basis.deactivate() def getCurrentDecisionVariableValues(self): @@ -279,11 +300,15 @@ def updateDecisionVariableBounds(self, radius): """ for var in self.decision_variables: var.setlb( - maxIgnoreNone(value(var) - radius, - self.initial_decision_bounds[var.name][0])) + maxIgnoreNone( + value(var) - radius, self.initial_decision_bounds[var.name][0] + ) + ) var.setub( - minIgnoreNone(value(var) + radius, - self.initial_decision_bounds[var.name][1])) + minIgnoreNone( + value(var) + radius, self.initial_decision_bounds[var.name][1] + ) + ) def updateSurrogateModel(self): """ @@ -298,11 +323,9 @@ def updateSurrogateModel(self): b.basis_model_output[i] = value(b.basis_expressions[y]) b.truth_model_output[i] = value(b.truth_models[y]) # Basis functions are Pyomo expressions (in theory) - gradBasis = differentiate(b.basis_expressions[y], - wrt_list=b.ef_inputs[i]) + gradBasis = differentiate(b.basis_expressions[y], wrt_list=b.ef_inputs[i]) # These, however, are external functions - gradTruth = differentiate(b.truth_models[y], - wrt_list=b.ef_inputs[i]) + gradTruth = differentiate(b.truth_models[y], wrt_list=b.ef_inputs[i]) for j, w in enumerate(b.ef_inputs[i]): b.grad_basis_model_output[i, j] = gradBasis[j] b.grad_truth_model_output[i, j] = gradTruth[j] @@ -313,8 +336,7 @@ def getCurrentModelState(self): Return current state of all model variables. This is necessary if we need to reject a step and move backwards. """ - return list(value(v, exception=False) - for v in self.data.all_variables) + return list(value(v, exception=False) for v in self.data.all_variables) def calculateFeasibility(self): """ @@ -322,8 +344,9 @@ def calculateFeasibility(self): || y - d(w) ||_1 """ b = self.data - return sum(abs(value(y) - value(b.truth_models[y])) - for i, y in b.ef_outputs.items()) + return sum( + abs(value(y) - value(b.truth_models[y])) for i, y in b.ef_outputs.items() + ) def calculateStepSizeInfNorm(self, original_values, new_values): """ @@ -337,8 +360,7 @@ def calculateStepSizeInfNorm(self, original_values, new_values): for var, val in original_values.items(): original_vals.append(val) new_vals.append(new_values[var]) - return max([abs(new - old) for new, old in - zip(new_vals, original_vals)]) + return max([abs(new - old) for new, old in zip(new_vals, original_vals)]) def initializeProblem(self): """ @@ -384,22 +406,24 @@ def solveModel(self): """ current_decision_values = self.getCurrentDecisionVariableValues() self.data.previous_model_state = self.getCurrentModelState() - results = self.solver.solve(self.model, - keepfiles=self.config.keepfiles, - tee=self.config.tee) - + results = self.solver.solve( + self.model, keepfiles=self.config.keepfiles, tee=self.config.tee + ) + if not check_optimal_termination(results): raise ArithmeticError( 'EXIT: Model solve failed with status {} and termination' ' condition(s) {}.'.format( str(results.solver.status), - str(results.solver.termination_condition)) + str(results.solver.termination_condition), ) + ) self.model.solutions.load_from(results) new_decision_values = self.getCurrentDecisionVariableValues() - step_norm = self.calculateStepSizeInfNorm(current_decision_values, - new_decision_values) + step_norm = self.calculateStepSizeInfNorm( + current_decision_values, new_decision_values + ) feasibility = self.calculateFeasibility() return self.data.objs[0](), step_norm, feasibility @@ -408,6 +432,5 @@ def rejectStep(self): If a step is rejected, we reset the model variables values back to their cached state - which we set in solveModel """ - for var, val in zip(self.data.all_variables, - self.data.previous_model_state): + for var, val in zip(self.data.all_variables, self.data.previous_model_state): var.set_value(val, skip_validation=True) diff --git a/pyomo/contrib/trustregion/plugins.py b/pyomo/contrib/trustregion/plugins.py index 2611f28363c..59a11986f3c 100644 --- a/pyomo/contrib/trustregion/plugins.py +++ b/pyomo/contrib/trustregion/plugins.py @@ -20,5 +20,6 @@ logger = logging.getLogger('pyomo.contrib.trustregion') + def load(): from pyomo.contrib.trustregion.TRF import TrustRegionSolver diff --git a/pyomo/contrib/trustregion/tests/__init__.py b/pyomo/contrib/trustregion/tests/__init__.py index c361238e119..62ba0892686 100644 --- a/pyomo/contrib/trustregion/tests/__init__.py +++ b/pyomo/contrib/trustregion/tests/__init__.py @@ -14,4 +14,3 @@ # # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/contrib/trustregion/tests/test_TRF.py b/pyomo/contrib/trustregion/tests/test_TRF.py index 75f0c281391..e14a784b4af 100644 --- a/pyomo/contrib/trustregion/tests/test_TRF.py +++ b/pyomo/contrib/trustregion/tests/test_TRF.py @@ -22,44 +22,58 @@ import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept from pyomo.environ import ( - Var, ConcreteModel, Reals, ExternalFunction, - Objective, Constraint, sqrt, sin, cos, SolverFactory, value - ) + Var, + ConcreteModel, + Reals, + ExternalFunction, + Objective, + Constraint, + sqrt, + sin, + cos, + SolverFactory, + value, +) from pyomo.contrib.trustregion.TRF import trust_region_method, _trf_config logger = logging.getLogger('pyomo.contrib.trustregion') -@unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") -class TestTrustRegionConfig(unittest.TestCase): +@unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" +) +class TestTrustRegionConfig(unittest.TestCase): def setUp(self): - self.m = ConcreteModel() - self.m.z = Var(range(3), domain=Reals, initialize=2.) - self.m.x = Var(range(2), initialize=2.) + self.m.z = Var(range(3), domain=Reals, initialize=2.0) + self.m.x = Var(range(2), initialize=2.0) self.m.x[1] = 1.0 def blackbox(a, b): return sin(a - b) + def grad_blackbox(args, fixed): a, b = args[:2] - return [ cos(a - b), -cos(a - b) ] + return [cos(a - b), -cos(a - b)] self.m.bb = ExternalFunction(blackbox, grad_blackbox) self.m.obj = Objective( - expr=(self.m.z[0]-1.0)**2 + (self.m.z[0]-self.m.z[1])**2 - + (self.m.z[2]-1.0)**2 + (self.m.x[0]-1.0)**4 - + (self.m.x[1]-1.0)**6 + expr=(self.m.z[0] - 1.0) ** 2 + + (self.m.z[0] - self.m.z[1]) ** 2 + + (self.m.z[2] - 1.0) ** 2 + + (self.m.x[0] - 1.0) ** 4 + + (self.m.x[1] - 1.0) ** 6 ) self.m.c1 = Constraint( - expr=(self.m.x[0] * self.m.z[0]**2 - + self.m.bb(self.m.x[0], self.m.x[1]) - == 2*sqrt(2.0)) + expr=( + self.m.x[0] * self.m.z[0] ** 2 + self.m.bb(self.m.x[0], self.m.x[1]) + == 2 * sqrt(2.0) ) + ) self.m.c2 = Constraint( - expr=self.m.z[2]**4 * self.m.z[1]**2 + self.m.z[1] == 8+sqrt(2.0)) + expr=self.m.z[2] ** 4 * self.m.z[1] ** 2 + self.m.z[1] == 8 + sqrt(2.0) + ) self.decision_variables = [self.m.z[0], self.m.z[1], self.m.z[2]] def maprule(self, a, b): @@ -163,39 +177,43 @@ def test_initialize_with_kwdval_solve_with_new_kwdval(self): self.assertEqual(self.TRF.config.trust_radius, 3.0) -@unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") +@unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" +) class TestTrustRegionMethod(unittest.TestCase): - def setUp(self): - self.m = ConcreteModel() - self.m.z = Var(range(3), domain=Reals, initialize=2.) - self.m.x = Var(range(2), initialize=2.) + self.m.z = Var(range(3), domain=Reals, initialize=2.0) + self.m.x = Var(range(2), initialize=2.0) self.m.x[1] = 1.0 def blackbox(a, b): return sin(a - b) + def grad_blackbox(args, fixed): a, b = args[:2] - return [ cos(a - b), -cos(a - b) ] + return [cos(a - b), -cos(a - b)] self.m.bb = ExternalFunction(blackbox, grad_blackbox) self.m.obj = Objective( - expr=(self.m.z[0]-1.0)**2 + (self.m.z[0]-self.m.z[1])**2 - + (self.m.z[2]-1.0)**2 + (self.m.x[0]-1.0)**4 - + (self.m.x[1]-1.0)**6 + expr=(self.m.z[0] - 1.0) ** 2 + + (self.m.z[0] - self.m.z[1]) ** 2 + + (self.m.z[2] - 1.0) ** 2 + + (self.m.x[0] - 1.0) ** 4 + + (self.m.x[1] - 1.0) ** 6 ) self.m.c1 = Constraint( - expr=(self.m.x[0] * self.m.z[0]**2 - + self.m.bb(self.m.x[0], self.m.x[1]) - == 2*sqrt(2.0)) + expr=( + self.m.x[0] * self.m.z[0] ** 2 + self.m.bb(self.m.x[0], self.m.x[1]) + == 2 * sqrt(2.0) ) + ) self.m.c2 = Constraint( - expr=self.m.z[2]**4 * self.m.z[1]**2 + self.m.z[1] == 8+sqrt(2.0)) + expr=self.m.z[2] ** 4 * self.m.z[1] ** 2 + self.m.z[1] == 8 + sqrt(2.0) + ) self.config = _trf_config() - self.ext_fcn_surrogate_map_rule = lambda comp,ef: 0 + self.ext_fcn_surrogate_map_rule = lambda comp, ef: 0 self.decision_variables = [self.m.z[0], self.m.z[1], self.m.z[2]] def test_solver(self): @@ -204,18 +222,18 @@ def test_solver(self): # Check the printed contents print_OUTPUT = StringIO() sys.stdout = print_OUTPUT - with LoggingIntercept(log_OUTPUT, - 'pyomo.contrib.trustregion', logging.INFO): - result = trust_region_method(self.m, - self.decision_variables, - self.ext_fcn_surrogate_map_rule, - self.config) + with LoggingIntercept(log_OUTPUT, 'pyomo.contrib.trustregion', logging.INFO): + result = trust_region_method( + self.m, + self.decision_variables, + self.ext_fcn_surrogate_map_rule, + self.config, + ) sys.stdout = sys.__stdout__ # Check the log to make sure it is capturing self.assertIn('Iteration 0', log_OUTPUT.getvalue()) # Check the printed output - self.assertIn('EXIT: Optimal solution found.', - print_OUTPUT.getvalue()) + self.assertIn('EXIT: Optimal solution found.', print_OUTPUT.getvalue()) # The names of both models should be the same self.assertEqual(result.name, self.m.name) # The values should not be the same diff --git a/pyomo/contrib/trustregion/tests/test_examples.py b/pyomo/contrib/trustregion/tests/test_examples.py index b129c028054..a954b0851c7 100644 --- a/pyomo/contrib/trustregion/tests/test_examples.py +++ b/pyomo/contrib/trustregion/tests/test_examples.py @@ -22,15 +22,14 @@ import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept from pyomo.contrib.trustregion.examples import example1, example2 -from pyomo.environ import ( - SolverFactory -) +from pyomo.environ import SolverFactory logger = logging.getLogger('pyomo.contrib.trustregion') -@unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") +@unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" +) class TestTrustRegionMethod(unittest.TestCase): def test_example1(self): # Check the log contents @@ -38,8 +37,7 @@ def test_example1(self): # Check the printed contents print_OUTPUT = StringIO() sys.stdout = print_OUTPUT - with LoggingIntercept(log_OUTPUT, - 'pyomo.contrib.trustregion', logging.INFO): + with LoggingIntercept(log_OUTPUT, 'pyomo.contrib.trustregion', logging.INFO): example1.main() sys.stdout = sys.__stdout__ # Check number of iterations - which should be 4 total @@ -50,20 +48,16 @@ def test_example1(self): self.assertIn('theta-type step', log_OUTPUT.getvalue()) self.assertNotIn('f-type step', log_OUTPUT.getvalue()) # These two pieces of information are only printed, not logged - self.assertNotIn('EXIT: Optimal solution found.', - log_OUTPUT.getvalue()) - self.assertNotIn('None : True : 0.2770447887637415', - log_OUTPUT.getvalue()) + self.assertNotIn('EXIT: Optimal solution found.', log_OUTPUT.getvalue()) + self.assertNotIn('None : True : 0.2770447887637415', log_OUTPUT.getvalue()) # All of this should be printed self.assertIn('Iteration 0', print_OUTPUT.getvalue()) self.assertIn('Iteration 4', print_OUTPUT.getvalue()) self.assertNotIn('Iteration 5', print_OUTPUT.getvalue()) self.assertIn('theta-type step', print_OUTPUT.getvalue()) self.assertNotIn('f-type step', print_OUTPUT.getvalue()) - self.assertIn('EXIT: Optimal solution found.', - print_OUTPUT.getvalue()) - self.assertIn('None : True : 0.2770447887637415', - print_OUTPUT.getvalue()) + self.assertIn('EXIT: Optimal solution found.', print_OUTPUT.getvalue()) + self.assertIn('None : True : 0.2770447887637415', print_OUTPUT.getvalue()) def test_example2(self): # Check the log contents @@ -71,8 +65,7 @@ def test_example2(self): # Check the printed contents print_OUTPUT = StringIO() sys.stdout = print_OUTPUT - with LoggingIntercept(log_OUTPUT, - 'pyomo.contrib.trustregion', logging.INFO): + with LoggingIntercept(log_OUTPUT, 'pyomo.contrib.trustregion', logging.INFO): example2.main() sys.stdout = sys.__stdout__ # Check the number of iterations - which should be 70ish, but not 80 @@ -84,10 +77,8 @@ def test_example2(self): self.assertIn('f-type step', log_OUTPUT.getvalue()) self.assertIn('step rejected', log_OUTPUT.getvalue()) # These two pieces of information are only printed, not logged - self.assertNotIn('EXIT: Optimal solution found.', - log_OUTPUT.getvalue()) - self.assertNotIn('None : True : 48.383116936949', - log_OUTPUT.getvalue()) + self.assertNotIn('EXIT: Optimal solution found.', log_OUTPUT.getvalue()) + self.assertNotIn('None : True : 48.383116936949', log_OUTPUT.getvalue()) # All of this should be printed self.assertIn('Iteration 0', print_OUTPUT.getvalue()) self.assertIn('Iteration 70', print_OUTPUT.getvalue()) @@ -95,7 +86,5 @@ def test_example2(self): self.assertIn('theta-type step', print_OUTPUT.getvalue()) self.assertIn('f-type step', print_OUTPUT.getvalue()) self.assertIn('step rejected', print_OUTPUT.getvalue()) - self.assertIn('EXIT: Optimal solution found.', - print_OUTPUT.getvalue()) - self.assertIn('None : True : 48.383116936949', - print_OUTPUT.getvalue()) + self.assertIn('EXIT: Optimal solution found.', print_OUTPUT.getvalue()) + self.assertIn('None : True : 48.383116936949', print_OUTPUT.getvalue()) diff --git a/pyomo/contrib/trustregion/tests/test_filter.py b/pyomo/contrib/trustregion/tests/test_filter.py index 0b438e1fd45..1b89d8d5cd1 100644 --- a/pyomo/contrib/trustregion/tests/test_filter.py +++ b/pyomo/contrib/trustregion/tests/test_filter.py @@ -19,6 +19,7 @@ import pyomo.common.unittest as unittest from pyomo.contrib.trustregion.filter import Filter, FilterElement + class TestFilter(unittest.TestCase): def setUp(self): self.objective = 1.0 @@ -42,9 +43,7 @@ def test_addToFilter(self): def test_isAcceptable(self): fe = FilterElement(0.5, 0.25) # A sufficiently feasible element - self.assertTrue(self.tmpFilter.isAcceptable(fe, - self.theta_max)) + self.assertTrue(self.tmpFilter.isAcceptable(fe, self.theta_max)) fe = FilterElement(10.0, 15.0) # A sufficiently infeasible element - self.assertFalse(self.tmpFilter.isAcceptable(fe, - self.theta_max)) + self.assertFalse(self.tmpFilter.isAcceptable(fe, self.theta_max)) diff --git a/pyomo/contrib/trustregion/tests/test_interface.py b/pyomo/contrib/trustregion/tests/test_interface.py index 685f749053c..24517041b2c 100644 --- a/pyomo/contrib/trustregion/tests/test_interface.py +++ b/pyomo/contrib/trustregion/tests/test_interface.py @@ -20,9 +20,19 @@ import pyomo.common.unittest as unittest from pyomo.common.collections import ComponentMap, ComponentSet from pyomo.environ import ( - Var, VarList, ConcreteModel, Reals, ExternalFunction, value, - Objective, Constraint, sqrt, sin, cos, SolverFactory - ) + Var, + VarList, + ConcreteModel, + Reals, + ExternalFunction, + value, + Objective, + Constraint, + sqrt, + sin, + cos, + SolverFactory, +) from pyomo.core.base.var import _GeneralVarData from pyomo.core.expr.numeric_expr import ExternalFunctionExpression from pyomo.core.expr.visitor import identify_variables @@ -33,45 +43,52 @@ class TestTrustRegionInterface(unittest.TestCase): - def setUp(self): self.m = ConcreteModel() - self.m.z = Var(range(3), domain=Reals, initialize=2.) - self.m.x = Var(range(2), initialize=2.) + self.m.z = Var(range(3), domain=Reals, initialize=2.0) + self.m.x = Var(range(2), initialize=2.0) self.m.x[1] = 1.0 def blackbox(a, b): return sin(a - b) + def grad_blackbox(args, fixed): a, b = args[:2] - return [ cos(a - b), -cos(a - b) ] + return [cos(a - b), -cos(a - b)] self.m.bb = ExternalFunction(blackbox, grad_blackbox) self.m.obj = Objective( - expr=(self.m.z[0]-1.0)**2 + (self.m.z[0]-self.m.z[1])**2 - + (self.m.z[2]-1.0)**2 + (self.m.x[0]-1.0)**4 - + (self.m.x[1]-1.0)**6 + expr=(self.m.z[0] - 1.0) ** 2 + + (self.m.z[0] - self.m.z[1]) ** 2 + + (self.m.z[2] - 1.0) ** 2 + + (self.m.x[0] - 1.0) ** 4 + + (self.m.x[1] - 1.0) ** 6 ) self.m.c1 = Constraint( - expr=(self.m.x[0] * self.m.z[0]**2 - + self.m.bb(self.m.x[0], self.m.x[1]) - == 2*sqrt(2.0)) + expr=( + self.m.x[0] * self.m.z[0] ** 2 + self.m.bb(self.m.x[0], self.m.x[1]) + == 2 * sqrt(2.0) ) + ) self.m.c2 = Constraint( - expr=self.m.z[2]**4 * self.m.z[1]**2 + self.m.z[1] == 8+sqrt(2.0)) + expr=self.m.z[2] ** 4 * self.m.z[1] ** 2 + self.m.z[1] == 8 + sqrt(2.0) + ) self.config = _trf_config() - self.ext_fcn_surrogate_map_rule = lambda comp,ef: 0 - self.interface = TRFInterface(self.m, - [self.m.z[0], self.m.z[1], self.m.z[2]], - self.ext_fcn_surrogate_map_rule, - self.config) + self.ext_fcn_surrogate_map_rule = lambda comp, ef: 0 + self.interface = TRFInterface( + self.m, + [self.m.z[0], self.m.z[1], self.m.z[2]], + self.ext_fcn_surrogate_map_rule, + self.config, + ) def test_initializeInterface(self): self.assertEqual(self.m, self.interface.original_model) self.assertEqual(self.config, self.interface.config) - self.assertEqual(self.interface.basis_expression_rule, - self.ext_fcn_surrogate_map_rule) + self.assertEqual( + self.interface.basis_expression_rule, self.ext_fcn_surrogate_map_rule + ) self.assertEqual('ipopt', self.interface.solver.name) def test_replaceRF(self): @@ -85,13 +102,15 @@ def test_replaceRF(self): expr = self.interface.model.obj.expr new_expr = self.interface.replaceEF(expr) self.assertEqual(expr, new_expr) - # The first contraint has one EF. + # The first constraint has one EF. # Therefore, replaceEF should do a substitution expr = self.interface.model.c1.expr new_expr = self.interface.replaceEF(expr) self.assertIsNot(expr, new_expr) - self.assertEquals(str(new_expr), - 'x[0]*z[0]**2 + trf_data.ef_outputs[1] == 2.8284271247461903') + self.assertEquals( + str(new_expr), + 'x[0]*z[0]**2 + trf_data.ef_outputs[1] == 2.8284271247461903', + ) def test_remove_ef_from_expr(self): # These data objects are normally initialized by @@ -104,16 +123,20 @@ def test_remove_ef_from_expr(self): # Therefore, remove_ef_from_expr should do nothing component = self.interface.model.obj self.interface._remove_ef_from_expr(component) - self.assertEqual(str(self.interface.model.obj.expr), - '(z[0] - 1.0)**2 + (z[0] - z[1])**2 + (z[2] - 1.0)**2 + (x[0] - 1.0)**4 + (x[1] - 1.0)**6') - # The first contraint has one EF. + self.assertEqual( + str(self.interface.model.obj.expr), + '(z[0] - 1.0)**2 + (z[0] - z[1])**2 + (z[2] - 1.0)**2 + (x[0] - 1.0)**4 + (x[1] - 1.0)**6', + ) + # The first constraint has one EF. # Therefore, remove_ef_from_expr should do something component = self.interface.model.c1 str_expr = str(component.expr) self.interface._remove_ef_from_expr(component) self.assertNotEqual(str_expr, str(component.expr)) - self.assertEqual(str(component.expr), - 'x[0]*z[0]**2 + trf_data.ef_outputs[1] == 2.8284271247461903') + self.assertEqual( + str(component.expr), + 'x[0]*z[0]**2 + trf_data.ef_outputs[1] == 2.8284271247461903', + ) def test_replaceExternalFunctionsWithVariables(self): # In running this method, we not only replace EFs @@ -125,8 +148,10 @@ def test_replaceExternalFunctionsWithVariables(self): self.assertIn(var, ComponentSet(self.interface.data.all_variables)) # Check the output vars against all_variables for i in self.interface.data.ef_outputs: - self.assertIn(self.interface.data.ef_outputs[i], - ComponentSet(self.interface.data.all_variables)) + self.assertIn( + self.interface.data.ef_outputs[i], + ComponentSet(self.interface.data.all_variables), + ) # The truth models should be a mapping from the EF to # the replacement for i, k in self.interface.data.truth_models.items(): @@ -139,23 +164,25 @@ def test_replaceExternalFunctionsWithVariables(self): self.assertEqual(k, 0) self.assertEqual(i, self.interface.data.ef_outputs[1]) self.assertEqual(1, list(self.interface.data.ef_inputs.keys())[0]) - self.assertEqual(self.interface.data.ef_inputs[1], - [self.interface.model.x[0], - self.interface.model.x[1]]) + self.assertEqual( + self.interface.data.ef_inputs[1], + [self.interface.model.x[0], self.interface.model.x[1]], + ) # HACK: This was in response to a hack. # Remove when NL writer re-write is complete. # Make sure that EFs were removed from the cloned model. self.assertEqual( - list(self.interface.model.component_objects(ExternalFunction)), - []) + list(self.interface.model.component_objects(ExternalFunction)), [] + ) # TRF only supports one active Objective. # Make sure that it fails if there are multiple objs. - self.m.obj2 = Objective( - expr=(self.m.x[0]**2 - (self.m.z[1] - 3)**3)) - interface = TRFInterface(self.m, - [self.m.z[0], self.m.z[1], self.m.z[2]], - self.ext_fcn_surrogate_map_rule, - self.config) + self.m.obj2 = Objective(expr=(self.m.x[0] ** 2 - (self.m.z[1] - 3) ** 3)) + interface = TRFInterface( + self.m, + [self.m.z[0], self.m.z[1], self.m.z[2]], + self.ext_fcn_surrogate_map_rule, + self.config, + ) with self.assertRaises(ValueError): interface.replaceExternalFunctionsWithVariables() @@ -172,11 +199,15 @@ def test_createConstraints(self): self.assertEqual(len(self.interface.data.sm_constraint_basis), 1) # Because they are size 1, they should have one key self.assertEqual(list(self.interface.data.basis_constraint.keys()), [1]) - cs = ComponentSet(identify_variables(self.interface.data.basis_constraint[1].expr)) + cs = ComponentSet( + identify_variables(self.interface.data.basis_constraint[1].expr) + ) # The basis constraint only has the EF variable self.assertEqual(len(cs), 1) self.assertIn(self.interface.data.ef_outputs[1], cs) - cs = ComponentSet(identify_variables(self.interface.data.sm_constraint_basis[1].expr)) + cs = ComponentSet( + identify_variables(self.interface.data.sm_constraint_basis[1].expr) + ) # The surrogate model constraint has the EF var, with inputs # of x[0] and x[1], as seen in self.m.c1 self.assertEqual(len(cs), 3) @@ -213,7 +244,7 @@ def test_updateSurrogateModel(self): for key, val in self.interface.data.value_of_ef_inputs.items(): self.assertEqual(value(self.interface.model.x[key[1]]), value(val)) # Change the model values to something else and try again - self.interface.model.x.set_values({0 : 0, 1 : 0}) + self.interface.model.x.set_values({0: 0, 1: 0}) self.interface.updateSurrogateModel() # The basis values should still all be 0 for key, val in self.interface.data.basis_model_output.items(): @@ -243,21 +274,24 @@ def test_getCurrentDecisionVariableValues(self): self.assertIn(var.name, list(current_values.keys())) self.assertEqual(current_values[var.name], value(var)) - @unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") + @unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" + ) def test_updateDecisionVariableBounds(self): # Initialize the problem self.interface.initializeProblem() # Make sure the initial bounds match the current bounds for var in self.interface.decision_variables: - self.assertEqual(self.interface.initial_decision_bounds[var.name], - [var.lb, var.ub]) + self.assertEqual( + self.interface.initial_decision_bounds[var.name], [var.lb, var.ub] + ) # Update the bounds and make sure that the initial no longer match # the current bounds self.interface.updateDecisionVariableBounds(0.5) for var in self.interface.decision_variables: - self.assertNotEqual(self.interface.initial_decision_bounds[var.name], - [var.lb, var.ub]) + self.assertNotEqual( + self.interface.initial_decision_bounds[var.name], [var.lb, var.ub] + ) def test_getCurrentModelState(self): # Set up necessary data objects @@ -270,8 +304,9 @@ def test_getCurrentModelState(self): for var in self.interface.data.all_variables: self.assertIn(value(var), result) - @unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") + @unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" + ) def test_calculateFeasibility(self): # Set up necessary data objects self.interface.replaceExternalFunctionsWithVariables() @@ -300,8 +335,9 @@ def test_calculateFeasibility(self): self.assertEqual(feasibility, 0.09569982275514467) self.interface.data.basis_constraint.deactivate() - @unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") + @unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" + ) def test_calculateStepSizeInfNorm(self): # Set up necessary data objects self.interface.replaceExternalFunctionsWithVariables() @@ -318,8 +354,7 @@ def test_calculateStepSizeInfNorm(self): original_values = self.interface.getCurrentDecisionVariableValues() self.interface.updateSurrogateModel() new_values = self.interface.getCurrentDecisionVariableValues() - stepnorm = self.interface.calculateStepSizeInfNorm(original_values, - new_values) + stepnorm = self.interface.calculateStepSizeInfNorm(original_values, new_values) # Currently, we have taken NO step. # Therefore, the norm should be 0. self.assertEqual(stepnorm, 0) @@ -329,8 +364,9 @@ def test_calculateStepSizeInfNorm(self): self.assertEqual(step_norm, 3.393437471478297) self.interface.data.basis_constraint.deactivate() - @unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") + @unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" + ) def test_solveModel(self): # Set up initial data objects and Params self.interface.replaceExternalFunctionsWithVariables() @@ -358,25 +394,27 @@ def test_solveModel(self): self.assertEqual(step_norm, 0.0017225116628372117) self.assertEqual(feasibility, 0.00014665023773349772) - @unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") + @unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" + ) def test_initializeProblem(self): # Set starter values on the model self.interface.model.x[0] = 2.0 self.interface.model.z.set_values({0: 5.0, 1: 2.5, 2: -1.0}) objective, feasibility = self.interface.initializeProblem() for var in self.interface.decision_variables: - self.assertIn(var.name, - list(self.interface.initial_decision_bounds.keys())) - self.assertEqual(self.interface.initial_decision_bounds[var.name], - [var.lb, var.ub]) + self.assertIn(var.name, list(self.interface.initial_decision_bounds.keys())) + self.assertEqual( + self.interface.initial_decision_bounds[var.name], [var.lb, var.ub] + ) self.assertEqual(objective, 5.150744273013601) self.assertEqual(feasibility, 0.09569982275514467) self.assertTrue(self.interface.data.sm_constraint_basis.active) self.assertFalse(self.interface.data.basis_constraint.active) - @unittest.skipIf(not SolverFactory('ipopt').available(False), - "The IPOPT solver is not available") + @unittest.skipIf( + not SolverFactory('ipopt').available(False), "The IPOPT solver is not available" + ) def test_rejectStep(self): self.interface.model.x[1] = 1.5 self.interface.model.x[0] = 2.0 @@ -385,8 +423,10 @@ def test_rejectStep(self): self.interface.createConstraints() self.interface.data.basis_constraint.activate() _, _, _ = self.interface.solveModel() - self.assertEqual(len(self.interface.data.all_variables), - len(self.interface.data.previous_model_state)) + self.assertEqual( + len(self.interface.data.all_variables), + len(self.interface.data.previous_model_state), + ) # Make sure the values changed from the original model self.assertNotEqual(value(self.interface.model.x[0]), 2.0) self.assertNotEqual(value(self.interface.model.x[1]), 1.5) diff --git a/pyomo/contrib/trustregion/tests/test_util.py b/pyomo/contrib/trustregion/tests/test_util.py index e7a8239e6b3..3054c2c2bd5 100644 --- a/pyomo/contrib/trustregion/tests/test_util.py +++ b/pyomo/contrib/trustregion/tests/test_util.py @@ -22,9 +22,7 @@ import pyomo.common.unittest as unittest -from pyomo.contrib.trustregion.util import ( - IterationLogger, minIgnoreNone, maxIgnoreNone -) +from pyomo.contrib.trustregion.util import IterationLogger, minIgnoreNone, maxIgnoreNone from pyomo.common.log import LoggingIntercept @@ -65,14 +63,16 @@ def test_maxIgnoreNone(self): self.assertEqual(maxIgnoreNone(a, b), None) def test_IterationRecord(self): - self.iterLogger.newIteration(self.iteration, self.thetak, self.objk, - self.radius, self.stepNorm) + self.iterLogger.newIteration( + self.iteration, self.thetak, self.objk, self.radius, self.stepNorm + ) self.assertEqual(len(self.iterLogger.iterations), 1) self.assertEqual(self.iterLogger.iterations[0].objectiveValue, 5.0) def test_logIteration(self): - self.iterLogger.newIteration(self.iteration, self.thetak, self.objk, - self.radius, self.stepNorm) + self.iterLogger.newIteration( + self.iteration, self.thetak, self.objk, self.radius, self.stepNorm + ) OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.contrib.trustregion', logging.INFO): self.iterLogger.logIteration() @@ -81,8 +81,9 @@ def test_logIteration(self): self.assertIn('stepNorm =', OUTPUT.getvalue()) def test_updateIteration(self): - self.iterLogger.newIteration(self.iteration, self.thetak, self.objk, - self.radius, self.stepNorm) + self.iterLogger.newIteration( + self.iteration, self.thetak, self.objk, self.radius, self.stepNorm + ) self.assertEqual(self.iterLogger.iterations[0].objectiveValue, self.objk) self.assertEqual(self.iterLogger.iterations[0].feasibility, self.thetak) self.assertEqual(self.iterLogger.iterations[0].trustRadius, self.radius) @@ -107,16 +108,18 @@ def test_updateIteration(self): self.assertEqual(self.iterLogger.iterations[0].feasibility, 5.0) self.assertEqual(self.iterLogger.iterations[0].trustRadius, 100) self.assertEqual(self.iterLogger.iterations[0].stepNorm, 1) - self.iterLogger.updateIteration(feasibility=10.0, objectiveValue=0.2, - trustRadius=1000, stepNorm=10) + self.iterLogger.updateIteration( + feasibility=10.0, objectiveValue=0.2, trustRadius=1000, stepNorm=10 + ) self.assertEqual(self.iterLogger.iterations[0].objectiveValue, 0.2) self.assertEqual(self.iterLogger.iterations[0].feasibility, 10.0) self.assertEqual(self.iterLogger.iterations[0].trustRadius, 1000) self.assertEqual(self.iterLogger.iterations[0].stepNorm, 10) def test_printIteration(self): - self.iterLogger.newIteration(self.iteration, self.thetak, self.objk, - self.radius, self.stepNorm) + self.iterLogger.newIteration( + self.iteration, self.thetak, self.objk, self.radius, self.stepNorm + ) OUTPUT = StringIO() sys.stdout = OUTPUT self.iterLogger.printIteration() diff --git a/pyomo/contrib/trustregion/util.py b/pyomo/contrib/trustregion/util.py index d82d3dd2c0e..f27420a2bee 100644 --- a/pyomo/contrib/trustregion/util.py +++ b/pyomo/contrib/trustregion/util.py @@ -36,6 +36,7 @@ def minIgnoreNone(a, b): return a return b + def maxIgnoreNone(a, b): """ Return the max of two numbers, ignoring None @@ -54,10 +55,16 @@ class IterationRecord: Record relevant information at each individual iteration """ - def __init__(self, iteration, feasibility=None, objectiveValue=None, - trustRadius=None, stepNorm=None): + def __init__( + self, + iteration, + feasibility=None, + objectiveValue=None, + trustRadius=None, + stepNorm=None, + ): self.iteration = iteration - self.fStep, self.thetaStep, self.rejected = [False]*3 + self.fStep, self.thetaStep, self.rejected = [False] * 3 if feasibility is not None: self.feasibility = feasibility if objectiveValue is not None: @@ -98,30 +105,35 @@ def verboseLogger(self): print("INFO: theta-type step") if self.rejected: print("INFO: step rejected") - print(25*'*') + print(25 * '*') class IterationLogger: """ Log (and print) information for all iterations """ + def __init__(self): self.iterations = [] - def newIteration(self, iteration, feasibility, objectiveValue, - trustRadius, stepNorm): + def newIteration( + self, iteration, feasibility, objectiveValue, trustRadius, stepNorm + ): """ Add a new iteration to the list of iterations """ - self.iterrecord = IterationRecord(iteration, - feasibility=feasibility, - objectiveValue=objectiveValue, - trustRadius=trustRadius, - stepNorm=stepNorm) + self.iterrecord = IterationRecord( + iteration, + feasibility=feasibility, + objectiveValue=objectiveValue, + trustRadius=trustRadius, + stepNorm=stepNorm, + ) self.iterations.append(self.iterrecord) - def updateIteration(self, feasibility=None, objectiveValue=None, - trustRadius=None, stepNorm=None): + def updateIteration( + self, feasibility=None, objectiveValue=None, trustRadius=None, stepNorm=None + ): """ Update values in current record """ diff --git a/pyomo/contrib/viewer/README.md b/pyomo/contrib/viewer/README.md index 59eda623516..cfc50b54ce2 100644 --- a/pyomo/contrib/viewer/README.md +++ b/pyomo/contrib/viewer/README.md @@ -3,7 +3,9 @@ ## Overview This is an interactive tree viewer for Pyomo models. You can inspect and change values, bound, fixed, and active attributes of Pyomo components. It also -calculates and displays constraint and named expression values. +calculates and displays constraint and named expression values. When used with +Jupyter, the graphical elements are run within the Jupyter kernel, so the UI can +be extended at runtime. ## Installation @@ -12,8 +14,8 @@ calculates and displays constraint and named expression values. The model viewer has a few additional Python package requirements beyond the standard Pyomo install. -The standard way to use the model viewer is from IPython or Jupyter. **PyQt5** -is required, and to use the stand-alone viewer, Jupyter **qtconsole** is +The standard way to use the model viewer is from IPython or Jupyter. **Pyside6** or +**PyQt5** is required, and to use the stand-alone viewer, Jupyter **qtconsole** is required. ### Install @@ -52,17 +54,17 @@ be updated, since for very large models the time required may be significant. ### Opening the Stand-Alone Version Run ```pyomo model-viewer``` to get a stand-alone model viewer. The standalone -viewer is based on the example code at -https://github.com/jupyter/qtconsole/blob/master/examples/embed_qtconsole.py. -The viewer will start with an empty Pyomo ConcreteModel called ```model```. The -advantage of the stand-alone viewer is that it will automatically set up the -environment and start the UI, saving typing a few lines of code. It also has a -few menu items to help do common tasks. In the kernel, ``pyomo.environ`` is -imported as ```pyo```. An empty ConcreteModel is available as ```model``` and -linked to the viewer. To launch the model viewer select "Show/Start Model Viewer" -from the "View" menu in the qtconsole window. After launching the model viewer -it is available as ```ui```. This provides a useful ability to script UI -actions. You can link the model viewer to other models. +viewer is the standard Jupyter qtconsole app with a few minor modifications. The +file menu, contains a run script action. This will allow you to run a script +to build a Pyomo model (this is the same as using the %run magic). The view +menu has two additional items to show and hide the Pyomo model viewer. When a +new Jupyter kernel is started, it will automatically set up the Pyomo model +viewer and ```import pyomo.environ as pyo```. + +Once the Pyomo model viewer is opened, the model viewer main window object is +available in the kernel as ```ui```. You can interact with the UI through the +Qt API, allowing you to add or modify UI elements at run time. In the qtconsole +app you can run multiple kernels and have multiple model viewers open. ### Setting the Model @@ -79,7 +81,7 @@ the ```__main__``` name space and allows you to select one to view. You can have multiple models and switch the model viewer widgets between them using ```ui.set_model(model)```, or the model selector. -# Controling the UI +# Controlling the UI You can interact with the UI through code. For example (assuming the UI object is ```ui```) to expand or collapse the tree view for variables: @@ -107,3 +109,14 @@ You can even add widgets and customize the interface while it is running. 6. Save/Load 7. Additional useful subwindows (maybe data frame views and plotting for multiple runs)? + +## Known Bugs + +If you use the Qt interface in the qtconsole app, the automatic documentation +for Qt functions doesn't work properly. To fix this, you need to ```import PySide6``` +or ```import PyQt5``` as appropriate. + +If you use the Qt API to modify the Pyomo modelviewer on the fly, it is +fairly easy to crash the kernel by providing the wrong arguments to a +function. Use caution and if you have a UI modification you regularly +use, it is probably best to run a canned script. diff --git a/pyomo/contrib/viewer/main.ui b/pyomo/contrib/viewer/main.ui index 512770b7ff4..76cd1c6f9c5 100644 --- a/pyomo/contrib/viewer/main.ui +++ b/pyomo/contrib/viewer/main.ui @@ -182,9 +182,9 @@ Move Cell &Down - + - &Interupt Kernel + &Interrupt Kernel diff --git a/pyomo/contrib/viewer/model_browser.py b/pyomo/contrib/viewer/model_browser.py index 6ce3e1ab294..8379518a4cf 100644 --- a/pyomo/contrib/viewer/model_browser.py +++ b/pyomo/contrib/viewer/model_browser.py @@ -23,8 +23,6 @@ """ A simple GUI viewer/editor for Pyomo models. """ -from __future__ import division, print_function, absolute_import - __author__ = "John Eslick" import os @@ -32,57 +30,65 @@ _log = logging.getLogger(__name__) -from pyomo.contrib.viewer.qt import * +import pyomo.contrib.viewer.qt as myqt from pyomo.contrib.viewer.report import value_no_exception, get_residual -from pyomo.core.base.block import _BlockData -from pyomo.core.base.var import _VarData -from pyomo.core.base.constraint import _ConstraintData from pyomo.core.base.param import _ParamData -from pyomo.environ import Block, Var, Constraint, Param, Expression, value - -mypath = os.path.dirname(__file__) +from pyomo.environ import ( + Block, + BooleanVar, + Var, + Constraint, + Param, + Expression, + value, + units, +) +from pyomo.common.fileutils import this_file_dir + +mypath = this_file_dir() try: - _ModelBrowserUI, _ModelBrowser = \ - uic.loadUiType(os.path.join(mypath, "model_browser.ui")) + _ModelBrowserUI, _ModelBrowser = myqt.uic.loadUiType( + os.path.join(mypath, "model_browser.ui") + ) except: # This lets the file still be imported, but you won't be able to use it class _ModelBrowserUI(object): pass + class _ModelBrowser(object): pass - class QItemEditorCreatorBase(object): - pass - class QItemDelegate(object): - pass -class LineEditCreator(QItemEditorCreatorBase): + +class LineEditCreator(myqt.QItemEditorCreatorBase): """ Class to create editor widget for int and floats in a model view type object """ + def createWidget(self, parent): - return QLineEdit(parent=parent) + return myqt.QLineEdit(parent=parent) -class NumberDelegate(QItemDelegate): +class NumberDelegate(myqt.QItemDelegate): """ Tree view item delegate. This is used here to change how items are edited. """ + def __init__(self, parent): - super(QItemDelegate, self).__init__(parent=parent) - factory = QItemEditorFactory() - factory.registerEditor(QtCore.QVariant.Int, LineEditCreator()) - factory.registerEditor(QtCore.QVariant.Double, LineEditCreator()) + super().__init__(parent=parent) + factory = myqt.QItemEditorFactory() + factory.registerEditor(myqt.QMetaType.Int, LineEditCreator()) + factory.registerEditor(myqt.QMetaType.Double, LineEditCreator()) self.setItemEditorFactory(factory) def setModelData(self, editor, model, index): - if isinstance(editor, QComboBox): + if isinstance(editor, myqt.QComboBox): value = editor.currentText() else: value = editor.text() a = model.column[index.column()] isinstance(index.internalPointer().get(a), bool) - try: # Recognize ints and floats. + try: # Recognize ints and floats. if value == "False" or value == "false": index.internalPointer().set(a, False) elif value == "True" or value == "true": @@ -91,22 +97,21 @@ def setModelData(self, editor, model, index): index.internalPointer().set(a, float(value)) else: index.internalPointer().set(a, int(value)) - except: # If not a valid number ignore + except: # If not a valid number ignore pass class ModelBrowser(_ModelBrowser, _ModelBrowserUI): - def __init__(self, ui_data, parent=None, standard="Var"): + def __init__(self, ui_data, standard="Var"): """ - Create a dock widdget with a QTreeView of a Pyomo model. + Create a dock widget with a QTreeView of a Pyomo model. Args: - parent: parent widget ui_data: Contains model and ui information - standard: A standard setup for differnt types of model components + standard: A standard setup for different types of model components {"Var", "Constraint", "Param", "Expression"} """ - super(ModelBrowser, self).__init__(parent=parent) + super().__init__() self.setupUi(self) # The default int and double spin boxes are not good for this # application. So just use regular line edits. @@ -116,8 +121,8 @@ def __init__(self, ui_data, parent=None, standard="Var"): self.treeView.setItemDelegate(number_delegate) if standard == "Var": # This if block sets up standard views - components = Var - columns = ["name", "value", "ub", "lb", "fixed", "stale"] + components = (Var, BooleanVar) + columns = ["name", "value", "ub", "lb", "fixed", "stale", "units", "domain"] editable = ["value", "ub", "lb", "fixed"] self.setWindowTitle("Variables") elif standard == "Constraint": @@ -127,27 +132,31 @@ def __init__(self, ui_data, parent=None, standard="Var"): self.setWindowTitle("Constraints") elif standard == "Param": components = Param - columns = ["name", "value", "mutable"] + columns = ["name", "value", "mutable", "units"] editable = ["value"] self.setWindowTitle("Parameters") elif standard == "Expression": components = Expression - columns = ["name", "value"] + columns = ["name", "value", "units"] editable = [] self.setWindowTitle("Expressions") else: raise ValueError("{} is not a valid view type".format(standard)) # Create a data model. This is what translates the Pyomo model into # a tree view. - datmodel = ComponentDataModel(self, ui_data=ui_data, - columns=columns, components=components, - editable=editable) + datmodel = ComponentDataModel( + self, + ui_data=ui_data, + columns=columns, + components=components, + editable=editable, + ) self.datmodel = datmodel self.treeView.setModel(datmodel) - self.treeView.setColumnWidth(0,400) + self.treeView.setColumnWidth(0, 400) # Selection behavior: select a whole row, can select multiple rows. - self.treeView.setSelectionBehavior(QAbstractItemView.SelectRows) - self.treeView.setSelectionMode(QAbstractItemView.ExtendedSelection) + self.treeView.setSelectionBehavior(myqt.QAbstractItemView.SelectRows) + self.treeView.setSelectionMode(myqt.QAbstractItemView.ExtendedSelection) def refresh(self): added = self.datmodel._update_tree() @@ -167,29 +176,38 @@ class ComponentDataItem(object): o: pyomo component object ui_data: a container for data, as of now mainly just pyomo model """ + def __init__(self, parent, o, ui_data): self.ui_data = ui_data self.data = o self.parent = parent - self.children = [] # child items + self.children = [] # child items self.ids = {} self.get_callback = { "value": self._get_value_callback, "lb": self._get_lb_callback, "ub": self._get_ub_callback, "expr": self._get_expr_callback, - "residual": self._get_residual_callback} + "residual": self._get_residual_callback, + "units": self._get_units_callback, + "domain": self._get_domain_callback, + } self.set_callback = { - "value":self._set_value_callback, - "lb":self._set_lb_callback, - "ub":self._set_ub_callback, - "active":self._set_active_callback, - "fixed":self._set_fixed_callback} + "value": self._set_value_callback, + "lb": self._set_lb_callback, + "ub": self._set_ub_callback, + "active": self._set_active_callback, + "fixed": self._set_fixed_callback, + } @property def _cache_value(self): return self.ui_data.value_cache.get(self.data, None) + @property + def _cache_units(self): + return self.ui_data.value_cache_units.get(self.data, None) + def add_child(self, o): """Add a child data item""" item = ComponentDataItem(self, o, ui_data=self.ui_data) @@ -226,24 +244,34 @@ def _get_expr_callback(self): def _get_value_callback(self): if isinstance(self.data, _ParamData): - v = value(self.data) + v = value_no_exception(self.data, div0="divide_by_0") + # Check the param value for numpy float and int, sometimes numpy + # values can sneak in especially if you set parameters from data + # and for whatever reason numpy values don't display + if isinstance(v, float): # includes numpy float + v = float(v) + elif isinstance(v, int): # includes numpy int + v = int(v) + return v + elif isinstance( + self.data, (Var._ComponentDataClass, BooleanVar._ComponentDataClass) + ): + v = value_no_exception(self.data) # Check the param value for numpy float and int, sometimes numpy # values can sneak in especially if you set parameters from data # and for whatever reason numpy values don't display - if isinstance(v, float): # includes numpy float + if isinstance(v, float): # includes numpy float v = float(v) - elif isinstance(v, int): # includes numpy int + elif isinstance(v, int): # includes numpy int v = int(v) return v - elif isinstance(self.data, _VarData): - return value(self.data, exception=False) elif isinstance(self.data, (float, int)): return self.data else: return self._cache_value def _get_lb_callback(self): - if isinstance(self.data, _VarData): + if isinstance(self.data, (Var._ComponentDataClass)): return self.data.lb elif hasattr(self.data, "lower"): return value_no_exception(self.data.lower, div0="Divide_by_0") @@ -251,7 +279,7 @@ def _get_lb_callback(self): return None def _get_ub_callback(self): - if isinstance(self.data, _VarData): + if isinstance(self.data, (Var._ComponentDataClass)): return self.data.ub elif hasattr(self.data, "upper"): return value_no_exception(self.data.upper, div0="Divide_by_0") @@ -259,41 +287,83 @@ def _get_ub_callback(self): return None def _get_residual_callback(self): - if isinstance(self.data, _ConstraintData): + if isinstance(self.data, Constraint._ComponentDataClass): return get_residual(self.ui_data, self.data) else: return None + def _get_units_callback(self): + if isinstance(self.data, (Var, Var._ComponentDataClass)): + return str(units.get_units(self.data)) + if isinstance(self.data, (Param, _ParamData)): + return str(units.get_units(self.data)) + return self._cache_units + + def _get_domain_callback(self): + if isinstance(self.data, Var._ComponentDataClass): + return str(self.data.domain) + if isinstance(self.data, (BooleanVar, BooleanVar._ComponentDataClass)): + return "BooleanVar" + return None + def _set_value_callback(self, val): - if isinstance(self.data, _VarData): + if isinstance( + self.data, (Var._ComponentDataClass, BooleanVar._ComponentDataClass) + ): try: self.data.value = val except: return + elif isinstance(self.data, (Var, BooleanVar)): + try: + for o in self.data.values(): + o.value = val + except: + return elif isinstance(self.data, _ParamData): - if not self.data.parent_component().mutable: return + if not self.data.parent_component().mutable: + return try: self.data.value = val except: return + elif isinstance(self.data, Param): + if not self.data.parent_component().mutable: + return + try: + for o in self.data.values(): + o.value = val + except: + return def _set_lb_callback(self, val): - if isinstance(self.data, _VarData): + if isinstance(self.data, (Var._ComponentDataClass)): try: self.data.setlb(val) except: return + elif isinstance(self.data, Var): + try: + for o in self.data.values(): + o.setlb(val) + except: + return def _set_ub_callback(self, val): - if isinstance(self.data, _VarData): + if isinstance(self.data, (Var._ComponentDataClass)): try: self.data.setub(val) except: return + elif isinstance(self.data, Var): + try: + for o in self.data.values(): + o.setub(val) + except: + return def _set_active_callback(self, val): - if not val or val == "False" or val == "false" or val == "0" or \ - val == "f" or val == "F": + if not val or val in ["False", "false", "0", "f", "F"]: # depending on the version of Qt, you may see a combo box that # lets you select true/false or may be able to type the combo # box will return True or False, if you have to type could be @@ -310,8 +380,7 @@ def _set_active_callback(self, val): return def _set_fixed_callback(self, val): - if not val or val == "False" or val == "false" or val == "0" or \ - val == "f" or val == "F": + if not val or val in ["False", "false", "0", "f", "F"]: # depending on the version of Qt, you may see a combo box that # lets you select true/false or may be able to type the combo # box will return True or False, if you have to type could be @@ -328,14 +397,21 @@ def _set_fixed_callback(self, val): return -class ComponentDataModel(QAbstractItemModel): +class ComponentDataModel(myqt.QAbstractItemModel): """ This is a data model to provide the tree structure and information to the tree viewer """ - def __init__(self, parent, ui_data, columns=["name", "value"], - components=(Var,), editable=[]): - super(ComponentDataModel, self).__init__(parent) + + def __init__( + self, + parent, + ui_data, + columns=["name", "value"], + components=(Var, BooleanVar), + editable=[], + ): + super().__init__(parent) self.column = columns self._col_editable = editable self.ui_data = ui_data @@ -352,49 +428,56 @@ def _update_tree(self, parent=None, o=None): components as needed. The arguments are to be used in the recursive function. Entering into this don't specify any args. """ - # Blocks are special they define the hiarchy of the model, so first - # check for blocks. Other comonent can be handled togeter - if o is None and len(self.rootItems) > 0: #top level object (no parent) - parent = self.rootItems[0] # should be single root node for now - o = parent.data # start with root node + # Blocks are special they define the hierarchy of the model, so first + # check for blocks. Other component can be handled together + if o is None and len(self.rootItems) > 0: # top level object (no parent) + parent = self.rootItems[0] # should be single root node for now + o = parent.data # start with root node for no in o.component_objects(descend_into=False): # This will traverse the whole Pyomo model tree self._update_tree(parent=parent, o=no) return - elif o is None: # if o is None, but no root nodes (when no model) + elif o is None: # if o is None, but no root nodes (when no model) return # past the root node go down here item = parent.ids.get(id(o), None) - if item is not None: # check if any children of item where deleted + if item is not None: # check if any children of item where deleted for i in item.children: try: if i.data.parent_block() is None: i.parent.children.remove(i) - del(i.parent.ids[id(i.data)]) - del(i) # probably should descend down and delete stuff + del i.parent.ids[id(i.data)] + del i # probably should descend down and delete stuff except AttributeError: # Probably an element of an indexed immutable param pass - if isinstance(o, _BlockData): #single block or element of indexed block + if isinstance( + o, Block._ComponentDataClass + ): # single block or element of indexed block if item is None: item = self._add_item(parent=parent, o=o) for no in o.component_objects(descend_into=False): self._update_tree(parent=item, o=no) - elif isinstance(o, Block): #indexed block, so need to add elements + elif isinstance(o, Block): # indexed block, so need to add elements if item is None: item = self._add_item(parent=parent, o=o) - if hasattr(o.index_set(), "is_constructed") and \ - o.index_set().is_constructed(): + if ( + hasattr(o.index_set(), "is_constructed") + and o.index_set().is_constructed() + ): for key in sorted(o.keys()): self._update_tree(parent=item, o=o[key]) - elif isinstance(o, self.components): #anything else + elif isinstance(o, self.components): # anything else if item is None: item = self._add_item(parent=parent, o=o) - if hasattr(o.index_set(), "is_constructed") and \ - o.index_set().is_constructed(): + if ( + hasattr(o.index_set(), "is_constructed") + and o.index_set().is_constructed() + ): for key in sorted(o.keys()): - if key == None: break # Single variable so skip + if key == None: + break # Single variable so skip item2 = item.ids.get(id(o[key]), None) if item2 is None: item2 = self._add_item(parent=item, o=o[key]) @@ -408,24 +491,31 @@ def _create_tree(self, parent=None, o=None): parent: a ComponentDataItem underwhich to create a TreeItem o: A Pyomo component to add to the tree """ - # Blocks are special they define the hiarchy of the model, so first - # check for blocks. Other comonent can be handled togeter - if isinstance(o, _BlockData): #single block or element of indexed block + # Blocks are special they define the hierarchy of the model, so first + # check for blocks. Other component can be handled together + if isinstance( + o, Block._ComponentDataClass + ): # single block or element of indexed block item = self._add_item(parent=parent, o=o) for no in o.component_objects(descend_into=False): self._create_tree(parent=item, o=no) - elif isinstance(o, Block): #indexed block, so need to add elements + elif isinstance(o, Block): # indexed block, so need to add elements item = self._add_item(parent=parent, o=o) - if hasattr(o.index_set(), "is_constructed") and \ - o.index_set().is_constructed(): + if ( + hasattr(o.index_set(), "is_constructed") + and o.index_set().is_constructed() + ): for key in sorted(o.keys()): self._create_tree(parent=item, o=o[key]) - elif isinstance(o, self.components): #anything else + elif isinstance(o, self.components): # anything else item = self._add_item(parent=parent, o=o) - if hasattr(o.index_set(), "is_constructed") and \ - o.index_set().is_constructed(): + if ( + hasattr(o.index_set(), "is_constructed") + and o.index_set().is_constructed() + ): for key in sorted(o.keys()): - if key == None: break #Single variable so skip + if key == None: + break # Single variable so skip self._add_item(parent=item, o=o[key]) def _add_item(self, parent, o): @@ -448,61 +538,74 @@ def _add_root_item(self, o): def parent(self, index): if not index.isValid(): - return QtCore.QModelIndex() + return myqt.QtCore.QModelIndex() item = index.internalPointer() if item.parent is None: - return QtCore.QModelIndex() + return myqt.QtCore.QModelIndex() else: return self.createIndex(0, 0, item.parent) - def index(self, row, column, parent=QtCore.QModelIndex()): + def index(self, row, column, parent=myqt.QtCore.QModelIndex()): if not parent.isValid(): return self.createIndex(row, column, self.rootItems[row]) parentItem = parent.internalPointer() return self.createIndex(row, column, parentItem.children[row]) - def columnCount(self, parent=QtCore.QModelIndex()): + def columnCount(self, parent=myqt.QtCore.QModelIndex()): """ Return the number of columns """ return len(self.column) - def rowCount(self, parent=QtCore.QModelIndex()): + def rowCount(self, parent=myqt.QtCore.QModelIndex()): if not parent.isValid(): return len(self.rootItems) return len(parent.internalPointer().children) - def data(self, index=QtCore.QModelIndex(), role=QtCore.Qt.DisplayRole): - if role==QtCore.Qt.DisplayRole or role==QtCore.Qt.EditRole: + def data( + self, index=myqt.QtCore.QModelIndex(), role=myqt.Qt.ItemDataRole.DisplayRole + ): + if ( + role == myqt.Qt.ItemDataRole.DisplayRole + or role == myqt.Qt.ItemDataRole.EditRole + ): a = self.column[index.column()] return index.internalPointer().get(a) - elif role==QtCore.Qt.ToolTipRole: + elif role == myqt.Qt.ItemDataRole.ToolTipRole: if self.column[index.column()] == "name": o = index.internalPointer() - if isinstance(o.data, _ConstraintData): + if isinstance(o.data, Constraint._ComponentDataClass): return o.get("expr") else: return o.get("doc") - elif role==QtCore.Qt.ForegroundRole: - if isinstance(index.internalPointer().data, (Block, _BlockData)): - return QtCore.QVariant(QColor(QtCore.Qt.black)) + elif role == myqt.Qt.ItemDataRole.ForegroundRole: + if isinstance( + index.internalPointer().data, (Block, Block._ComponentDataClass) + ): + return myqt.QColor(myqt.QtCore.Qt.black) else: - return QtCore.QVariant(QColor(QtCore.Qt.blue)); + return myqt.QColor(myqt.QtCore.Qt.blue) else: return - def headerData(self, i, orientation, role=QtCore.Qt.DisplayRole): + def headerData(self, i, orientation, role=myqt.Qt.ItemDataRole.DisplayRole): """ Return the column headings for the horizontal header and index numbers for the vertical header. """ - if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole: + if ( + orientation == myqt.Qt.Orientation.Horizontal + and role == myqt.Qt.ItemDataRole.DisplayRole + ): return self.column[i] return None - def flags(self, index=QtCore.QModelIndex()): + def flags(self, index=myqt.QtCore.QModelIndex()): if self.column[index.column()] in self._col_editable: - return(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | - QtCore.Qt.ItemIsEditable) + return ( + myqt.Qt.ItemFlag.ItemIsEnabled + | myqt.Qt.ItemFlag.ItemIsSelectable + | myqt.Qt.ItemFlag.ItemIsEditable + ) else: - return(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable) + return myqt.Qt.ItemFlag.ItemIsEnabled | myqt.Qt.ItemFlag.ItemIsSelectable diff --git a/pyomo/contrib/viewer/model_select.py b/pyomo/contrib/viewer/model_select.py index fb84a203a7f..3c6c4ccdf17 100644 --- a/pyomo/contrib/viewer/model_select.py +++ b/pyomo/contrib/viewer/model_select.py @@ -23,8 +23,6 @@ """ A simple GUI viewer/editor for Pyomo models. """ -from __future__ import division, print_function, absolute_import - __author__ = "John Eslick" import logging @@ -33,22 +31,26 @@ _log = logging.getLogger(__name__) import pyomo.environ as pyo -from pyomo.contrib.viewer.qt import * +import pyomo.contrib.viewer.qt as myqt +from pyomo.common.fileutils import this_file_dir -mypath = os.path.dirname(__file__) +mypath = this_file_dir() try: - _ModelSelectUI, _ModelSelect = \ - uic.loadUiType(os.path.join(mypath, "model_select.ui")) + _ModelSelectUI, _ModelSelect = myqt.uic.loadUiType( + os.path.join(mypath, "model_select.ui") + ) except: # This lets the file still be imported, but you won't be able to use it class _ModelSelectUI(object): pass + class _ModelSelect(object): pass + class ModelSelect(_ModelSelect, _ModelSelectUI): - def __init__(self, ui_data, parent=None): - super(ModelSelect, self).__init__(parent=parent) + def __init__(self, parent, ui_data): + super().__init__(parent) self.setupUi(self) self.ui_data = ui_data self.closeButton.clicked.connect(self.close) @@ -63,6 +65,7 @@ def select_model(self): def update_models(self): import __main__ + s = __main__.__dict__ keys = [] for k in s: @@ -72,16 +75,16 @@ def update_models(self): self.tableWidget.setRowCount(len(keys)) self.models = [] for row, k in enumerate(sorted(keys)): - item = QTableWidgetItem() + item = myqt.QTableWidgetItem() item.setText(k) self.tableWidget.setItem(row, 0, item) - item = QTableWidgetItem() + item = myqt.QTableWidgetItem() try: item.setText(s[k].name) except: item.setText("None") self.tableWidget.setItem(row, 1, item) - item = QTableWidgetItem() + item = myqt.QTableWidgetItem() item.setText(str(type(s[k]))) self.tableWidget.setItem(row, 2, item) self.models.append(s[k]) diff --git a/pyomo/contrib/viewer/model_select.ui b/pyomo/contrib/viewer/model_select.ui index ac5204b88b4..f794bb8ae49 100644 --- a/pyomo/contrib/viewer/model_select.ui +++ b/pyomo/contrib/viewer/model_select.ui @@ -47,7 +47,7 @@ - Qt::Horizontal + Qt::Qt.Orientation.Horizontal @@ -74,7 +74,7 @@ - Qt::Horizontal + Qt::Qt.Orientation.Horizontal diff --git a/pyomo/contrib/viewer/pyomo_viewer.py b/pyomo/contrib/viewer/pyomo_viewer.py index 414a335fdfa..a8fec745af4 100644 --- a/pyomo/contrib/viewer/pyomo_viewer.py +++ b/pyomo/contrib/viewer/pyomo_viewer.py @@ -20,181 +20,117 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# based on the example code at: -# https://github.com/jupyter/qtconsole/blob/master/examples/embed_qtconsole.py - -from __future__ import print_function - import os -import sys -import time - +from pyomo.common.dependencies import attempt_import, UnavailableClass from pyomo.scripting.pyomo_parser import add_subparser -from pyomo.contrib.viewer.qt import * +import pyomo.contrib.viewer.qt as myqt + +qtconsole_app, qtconsole_available = attempt_import("qtconsole.qtconsoleapp") + -qtconsole_available = False -if qt_available: +class QtApp( + qtconsole_app.JupyterQtConsoleApp + if qtconsole_available + else UnavailableClass(qtconsole_app) +): + _kernel_cmd_show_ui = """try: + ui.show() +except NameError: try: - from qtconsole.rich_jupyter_widget import RichJupyterWidget - from qtconsole.manager import QtKernelManager - qtconsole_available = True - except ImportError: - pass - -if qtconsole_available: - def _start_kernel(): - km = QtKernelManager(autorestart=False) - km.start_kernel() - kc = km.client() - kc.start_channels() - kc.execute("%gui qt", silent=True) - # make sure there is no possible way the user can start the model - # viewer before the Qt Application in the kernel finishes starting - time.sleep(1.0) - # Now just do the standard imports of things you want to be available - # and whatever we may want to do to set up the environment just create - # an empty model, so you can start the model viewer right away. You - # can add to the model if you want to use it, or create a new one. - kc.execute(""" -from pyomo.contrib.viewer.ui import get_mainwindow -import pyomo.environ as pyo -model = pyo.ConcreteModel("Default Model")""", silent=True) - return km, kc - - class MainWindow(QMainWindow): - """A window that contains a single Qt console.""" - def __init__(self, kernel_manager, kernel_client): - super(MainWindow, self).__init__() - self.jupyter_widget = RichJupyterWidget() - self.jupyter_widget.kernel_manager = kernel_manager - self.jupyter_widget.kernel_client = kernel_client - kernel_client.hb_channel.kernel_died.connect(self.close) - kernel_client.iopub_channel.message_received.connect(self.mrcv) - menubar = self.menuBar() - run_script_act = QAction("&Run Script...", self) - wdir_set_act = QAction("Set &Working Directory...", self) - exit_act = QAction("&Exit", self) - show_ui_act = QAction("&Start/Show Model Viewer", self) - hide_ui_act = QAction("&Hide Model Viewer", self) - exit_act.triggered.connect(self.close) - show_ui_act.triggered.connect(self.show_ui) - hide_ui_act.triggered.connect(self.hide_ui) - wdir_set_act.triggered.connect(self.wdir_select) - run_script_act.triggered.connect(self.run_script) - file_menu = menubar.addMenu('&File') - view_menu = menubar.addMenu('&View') - file_menu.addAction(wdir_set_act) - file_menu.addAction(run_script_act) - file_menu.addAction(exit_act) - view_menu.addAction(show_ui_act) - view_menu.addAction(hide_ui_act) - self.status_bar = QStatusBar() - self.setStatusBar(self.status_bar) - self.status_bar.show() - self.setCentralWidget(self.jupyter_widget) - self._ui_created = False - - def wdir_select(self, checked=False, wdir=None): - """ - Change the current working directory. - - Args: - wdir (str): if None show a dialog to select, otherwise try to - change to this path - checked (bool): triggered signal sends this, but it is not used - Returns: - (str): new working directory path - """ - if wdir is None: - # Show a dialog box for user to select working directory - wd = QFileDialog(self, 'Working Directory', os.getcwd()) - wd.setFileMode(QFileDialog.DirectoryOnly) - if wd.exec_() == QFileDialog.Accepted: - wdir = wd.selectedFiles()[0] + model + except NameError: + model=None + ui, model = get_mainwindow(model=model, ask_close=False) +ui.setWindowTitle('Pyomo Model Viewer -- {}')""" + + _kernel_cmd_hide_ui = """try: + ui.hide() +except NameError: + pass""" + + _kernel_cmd_import_qt_magic = r"%gui qt" + + _kernel_cmd_import_ui = "from pyomo.contrib.viewer.ui import get_mainwindow" + + _kernel_cmd_import_pyomo_env = "import pyomo.environ as pyo" + + def active_widget_name(self): + current_widget = self.window.tab_widget.currentWidget() + current_widget_index = self.window.tab_widget.indexOf(current_widget) + return self.window.tab_widget.tabText(current_widget_index) + + def show_ui(self): + kc = self.window.active_frontend.kernel_client + kc.execute( + self._kernel_cmd_show_ui.format(self.active_widget_name()), silent=True + ) + + def hide_ui(self): + kc = self.window.active_frontend.kernel_client + kc.execute(self._kernel_cmd_hide_ui, silent=True) + + def run_script(self, checked=False, filename=None): + """Run a python script in the current kernel.""" + if filename is None: + # Show a dialog box for user to select working directory + filename = myqt.QtWidgets.QFileDialog.getOpenFileName( + self.window, + "Run Script", + os.getcwd(), + "py (*.py);;text (*.txt);;all (*)", + ) + if filename[0]: # returns a tuple of file and filter or ("","") + filename = filename[0] else: - wdir = None - # Change directory if one was selected - if wdir is not None: - os.chdir(wdir) - self.jupyter_widget.kernel_client.execute( - "%cd {}".format(wdir)) - return wdir - - def run_script(self, checked=False, filename=None): - """ - Change the current working directory. - - Args: - filename (str): if None show a dialog to select, otherwise try - to change run filename script - checked (bool): triggered signal sends this, but it is not used - Returns: - (str): selected script file - """ - if filename is None: - # Show a dialog box for user to select working directory - filename = QFileDialog.getOpenFileName( - self, - 'Run Script', - os.getcwd(), - "py (*.py);;text (*.txt);;all (*)") - if filename[0]: # returns a tuple of file and filter or ("","") - filename = filename[0] - else: - filename = None - # Run script if one was selected - if filename is not None: - self.jupyter_widget.kernel_client.execute( - "%run {}".format(filename)) - return filename - - def hide_ui(self): - if self._ui_created: - self.jupyter_widget.kernel_client.execute( - "ui.hide()", silent=True) - - def show_ui(self): - kc = self.jupyter_widget.kernel_client - if self._ui_created: - kc.execute("ui.show()", silent=True) - else: - self._ui_created = True - kc.execute("ui, model = get_mainwindow(model=model)", - silent=True) - - def shutdown_kernel(self): - print('Shutting down kernel...') - self.jupyter_widget.kernel_client.stop_channels() - self.jupyter_widget.kernel_manager.shutdown_kernel() - - def mrcv(self, m): - try: - stat = m['content']['execution_state'] - if stat: - self.status_bar.showMessage("Kernel Status: {}".format(stat)) - except: - pass + filename = None + # Run script if one was selected + if filename is not None: + kc = self.window.active_frontend.kernel_client + kc.execute("%run {}".format(filename)) + + def kernel_pyomo_init(self, kc): + kc.execute(self._kernel_cmd_import_qt_magic, silent=True) + kc.execute(self._kernel_cmd_import_ui, silent=True) + kc.execute(self._kernel_cmd_import_pyomo_env, silent=False) + + def init_qt_elements(self): + super().init_qt_elements() + self.kernel_pyomo_init(self.widget.kernel_client) + self.run_script_act = myqt.QAction("&Run Script...", self.window) + self.show_ui_act = myqt.QAction("&Show Pyomo Model Viewer", self.window) + self.hide_ui_act = myqt.QAction("&Hide Pyomo Model Viewer", self.window) + self.window.file_menu.addSeparator() + self.window.file_menu.addAction(self.run_script_act) + self.window.view_menu.addSeparator() + self.window.view_menu.addAction(self.show_ui_act) + self.window.view_menu.addAction(self.hide_ui_act) + self.window.view_menu.addSeparator() + self.run_script_act.triggered.connect(self.run_script) + self.show_ui_act.triggered.connect(self.show_ui) + self.hide_ui_act.triggered.connect(self.hide_ui) + + def new_frontend_master(self): + widget = super().new_frontend_master() + self.kernel_pyomo_init(widget.kernel_client) + return widget + def main(*args): - if not qtconsole_available: - print("qtconsole not available") + if not myqt.available or not qtconsole_available: + errors = list(myqt.import_errors) + if not qtconsole_available: + errors.append(qtconsole_app._moduleunavailable_message()) + print("qt not available\n " + "\n ".join(errors)) return - km, kc = _start_kernel() - app = QApplication(sys.argv) - window = MainWindow(kernel_manager=km, kernel_client=kc) - window.show() - app.aboutToQuit.connect(window.shutdown_kernel) - app.exec_() - -# Add a subparser for the download-extensions command + QtApp.launch_instance() + + +# Add a subparser for the model-viewer command add_subparser( - 'model-viewer', + "model-viewer", func=main, - help='Run the Pyomo model viewer', + help="Run the Pyomo model viewer", add_help=False, - description='This runs the Pyomo model viewer' + description="This runs the Pyomo model viewer", ) - -if __name__ == "__main__": - main() diff --git a/pyomo/contrib/viewer/qt.py b/pyomo/contrib/viewer/qt.py index bf60da23811..150fa3560f6 100644 --- a/pyomo/contrib/viewer/qt.py +++ b/pyomo/contrib/viewer/qt.py @@ -21,88 +21,109 @@ # ___________________________________________________________________________ """ -Import PyQt5 if available, then try PyQt4, then, if all else fails, use some -dummy classes to allow some testing. If anything fails to import, the exception -is logged. That should make it clear exacly what's missing, but it could be a -little annoying if you are using PyQt4 or don't need jupyter qtconsole. In the -future, will probably cut PyQt4 support, so it will be less of an issue. +Try to import PySide6, which is the current official Qt 6 Python interface. Then, +try PyQt5 if that doesn't work. If no compatible Qt Python interface is found, +use some dummy classes to allow some testing. """ __author__ = "John Eslick" -import logging -_log = logging.getLogger(__name__) +import enum +import importlib +# Supported Qt wrappers in preferred order +supported = ["PySide6", "PyQt5"] +# Import errors encountered, delay logging for testing reasons +import_errors = [] +# Set this to the Qt wrapper module is available +available = False -class DummyQtCore(object): - """ - A dummy QtCore class to allow some testing without PyQt - """ - class QModelIndex(object): - pass +for module_str in supported: + try: + qt_package = importlib.import_module(module_str) + QtWidgets = importlib.import_module(f"{module_str}.QtWidgets") + QtCore = importlib.import_module(f"{module_str}.QtCore") + QtGui = importlib.import_module(f"{module_str}.QtGui") + available = module_str + break + except Exception as e: + import_errors.append(f"{e}") + +if not available: + # If Qt is not available, we still want to be able to test as much + # as we can, so add some dummy classes that allow for testing class Qt(object): - class DisplayRole(object): + class ItemDataRole(enum.Enum): + EditRole = 1 + DisplayRole = 2 + ToolTipRole = 3 + ForegroundRole = 4 + + class QtCore(object): + """ + A dummy QtCore class to allow some testing without PyQt + """ + + class QModelIndex(object): + pass + + Qt = Qt + + class QAbstractItemModel(object): + """ + A dummy QAbstractItemModel class to allow some testing without PyQt + """ + + def __init__(*args, **kwargs): pass - class EditRole(object): + + class QAbstractTableModel(object): + """ + A dummy QAbstractTableModel class to allow some testing without PyQt + """ + + def __init__(*args, **kwargs): pass -class DummyQAbstractItemModel(object): - """ - A dummy QAbstractItemModel class to allow some testing without PyQt - """ - def __init__(*args, **kwargs): - pass + class QItemEditorCreatorBase(object): + """ + A dummy QItemEditorCreatorBase class to allow some testing without PyQt + """ -class DummyQAbstractTableModel(object): - """ - A dummy QAbstractTableModel class to allow some testing without PyQt - """ - def __init__(*args, **kwargs): pass -qt_available = False -qt_import_errors = [] + class QItemDelegate(object): + """ + A dummy QItemDelegate class to allow some testing without PyQt + """ + + pass -try: - from PyQt5 import QtCore -except: - qt_import_errors.append("Cannot import PyQt5.QtCore") - try: - from PyQt4 import QtCore - except: - qt_import_errors.append("Cannot import PyQt4.QtCore") - else: - try: - from PyQt4.QtGui import (QAbstractItemView, QFileDialog, QMainWindow, - QMessageBox, QMdiArea, QApplication, - QTableWidgetItem, QColor, QAction, - QStatusBar, QLineEdit, QItemEditorFactory, - QItemEditorCreatorBase, QStyledItemDelegate, - QItemDelegate, QComboBox) - from PyQt4.QtCore import (QAbstractItemModel, QAbstractTableModel, - QVariant) - import PyQt4.QtCore as QtCore - from PyQt4 import uic - qt_available = True - except: - qt_import_errors.append("Cannot import PyQt4") else: - try: - from PyQt5.QtWidgets import (QAbstractItemView, QFileDialog, QMainWindow, - QMessageBox, QMdiArea, QApplication, - QTableWidgetItem, QAction, QStatusBar, - QLineEdit, QItemEditorFactory, - QItemEditorCreatorBase, QStyledItemDelegate, - QItemDelegate, QComboBox) - from PyQt5.QtGui import QColor - from PyQt5.QtCore import (QAbstractItemModel, QAbstractTableModel, - QVariant) - import PyQt5.QtCore as QtCore + QAbstractItemView = QtWidgets.QAbstractItemView + QFileDialog = QtWidgets.QFileDialog + QMainWindow = QtWidgets.QMainWindow + QMainWindow = QtWidgets.QMainWindow + QMdiArea = QtWidgets.QMdiArea + QApplication = QtWidgets.QApplication + QTableWidgetItem = QtWidgets.QTableWidgetItem + QStatusBar = QtWidgets.QStatusBar + QLineEdit = QtWidgets.QLineEdit + QItemEditorFactory = QtWidgets.QItemEditorFactory + QItemEditorCreatorBase = QtWidgets.QItemEditorCreatorBase + QStyledItemDelegate = QtWidgets.QStyledItemDelegate + QItemDelegate = QtWidgets.QItemDelegate + QComboBox = QtWidgets.QComboBox + QMessageBox = QtWidgets.QMessageBox + QColor = QtGui.QColor + QAbstractItemModel = QtCore.QAbstractItemModel + QAbstractTableModel = QtCore.QAbstractTableModel + QMetaType = QtCore.QMetaType + Qt = QtCore.Qt + if available == "PySide6": + from PySide6.QtGui import QAction + from PySide6.QtCore import Signal + from PySide6 import QtUiTools as uic + elif available == "PyQt5": + from PyQt5.QtWidgets import QAction + from PyQt5.QtCore import pyqtSignal as Signal from PyQt5 import uic - qt_available = True - except: - qt_import_errors.append("Cannot import PyQt5") - -if not qt_available: - QAbstractItemModel = DummyQAbstractItemModel - QAbstractTableModel = DummyQAbstractTableModel - QtCore = DummyQtCore diff --git a/pyomo/contrib/viewer/report.py b/pyomo/contrib/viewer/report.py index 34fe6bb25ac..6f212b2fbc3 100644 --- a/pyomo/contrib/viewer/report.py +++ b/pyomo/contrib/viewer/report.py @@ -21,9 +21,10 @@ # ___________________________________________________________________________ from pyomo.common.collections import ComponentSet -from pyomo.core.expr.current import identify_variables +from pyomo.core.expr import identify_variables from pyomo.environ import Constraint, value + def value_no_exception(c, div0=None): """ Get value and ignore most exceptions (including division by 0). @@ -38,6 +39,7 @@ def value_no_exception(c, div0=None): except ZeroDivisionError: return div0 + def get_residual(ui_data, c): """ Calculate the residual (constraint violation) of a constraint. This residual @@ -52,20 +54,33 @@ def get_residual(ui_data, c): Returns: (float) residual """ - ub = value_no_exception(c.upper) - lb = value_no_exception(c.lower) + if c.upper is None: + ub = None # This is no upper bound + else: + ub = value_no_exception(c.upper, "Divide_by_0") + if ub is None or isinstance(ub, str): + # This is calc error + return ub + if c.lower is None: + lb = None # This is no lower bound + else: + lb = value_no_exception(c.lower, "Divide_by_0") + if lb is None or isinstance(lb, str): + # This is calc error + return lb try: v = ui_data.value_cache[c] except KeyError: - v = None - if v is None: - return + return None + if v is None or isinstance(v, str): + return v if lb is not None and v < lb: return lb - v if ub is not None and v > ub: return v - ub return 0.0 + def active_equalities(blk): """ Generator returning active equality constraints in a model. @@ -82,6 +97,7 @@ def active_equalities(blk): except ZeroDivisionError: pass + def active_constraint_set(blk): """ Return a set of active constraints in a model. @@ -93,6 +109,7 @@ def active_constraint_set(blk): """ return ComponentSet(blk.component_data_objects(Constraint, active=True)) + def active_equality_set(blk): """ Return a set of active equalities. @@ -104,6 +121,7 @@ def active_equality_set(blk): """ return ComponentSet(active_equalities(blk)) + def count_free_variables(blk): """ Count free variables that are in active equality constraints. Ignore @@ -111,18 +129,21 @@ def count_free_variables(blk): """ return len(free_variables_in_active_equalities_set(blk)) + def count_equality_constraints(blk): """ Count active equality constraints. """ return len(active_equality_set(blk)) + def count_constraints(blk): """ Count active constraints. """ return len(active_constraint_set(blk)) + def degrees_of_freedom(blk): """ Return the degrees of freedom. @@ -134,9 +155,10 @@ def degrees_of_freedom(blk): """ return count_free_variables(blk) - count_equality_constraints(blk) + def free_variables_in_active_equalities_set(blk): """ - Return a set of variables that are contined in active equalities. + Return a set of variables that are continued in active equalities. """ vin = ComponentSet() for c in active_equalities(blk): diff --git a/pyomo/contrib/viewer/residual_table.py b/pyomo/contrib/viewer/residual_table.py index 40aecf17a31..46a86adbce6 100644 --- a/pyomo/contrib/viewer/residual_table.py +++ b/pyomo/contrib/viewer/residual_table.py @@ -23,8 +23,6 @@ """ A simple GUI viewer/editor for Pyomo models. """ -from __future__ import division, print_function, absolute_import - __author__ = "John Eslick" import os @@ -32,24 +30,28 @@ _log = logging.getLogger(__name__) -from pyomo.contrib.viewer.qt import * +import pyomo.contrib.viewer.qt as myqt from pyomo.contrib.viewer.report import value_no_exception, get_residual import pyomo.environ as pyo +from pyomo.common.fileutils import this_file_dir -mypath = os.path.dirname(__file__) +mypath = this_file_dir() try: - _ResidualTableUI, _ResidualTable = \ - uic.loadUiType(os.path.join(mypath, "residual_table.ui")) + _ResidualTableUI, _ResidualTable = myqt.uic.loadUiType( + os.path.join(mypath, "residual_table.ui") + ) except: + class _ResidualTableUI(object): pass + class _ResidualTable(object): pass class ResidualTable(_ResidualTable, _ResidualTableUI): - def __init__(self, ui_data, parent=None): - super(ResidualTable, self).__init__(parent=parent) + def __init__(self, ui_data): + super().__init__() self.setupUi(self) self.ui_data = ui_data datmodel = ResidualDataModel(parent=self, ui_data=ui_data) @@ -71,9 +73,10 @@ def calculate(self): self.ui_data.calculate_constraints() self.refresh() -class ResidualDataModel(QAbstractTableModel): + +class ResidualDataModel(myqt.QAbstractTableModel): def __init__(self, parent, ui_data): - super(ResidualDataModel, self).__init__(parent) + super().__init__(parent) self.column = ["name", "residual", "value", "ub", "lb", "active"] self.ui_data = ui_data self.include_inactive = True @@ -85,37 +88,52 @@ def update_model(self): ac = None else: ac = True - self._items = list(self.ui_data.model.component_data_objects( - pyo.Constraint, active=ac)) + self._items = list( + self.ui_data.model.component_data_objects(pyo.Constraint, active=ac) + ) def sort(self): - self._items.sort(key= - lambda o: (o is None, get_residual(self.ui_data, o) - if get_residual(self.ui_data, o) is not None - else -float("inf")), reverse=True) - - def rowCount(self, parent=QtCore.QModelIndex()): + def _inactive_to_back(c): + if c.active: + return float("inf") + else: + return float("-inf") + + self._items.sort( + key=lambda o: ( + o is None, + get_residual(self.ui_data, o) + if get_residual(self.ui_data, o) is not None + and not isinstance(get_residual(self.ui_data, o), str) + else _inactive_to_back(o), + ), + reverse=True, + ) + + def rowCount(self, parent=myqt.QtCore.QModelIndex()): return len(self._items) - def columnCount(self, parent=QtCore.QModelIndex()): + def columnCount(self, parent=myqt.QtCore.QModelIndex()): return len(self.column) - def data(self, index=QtCore.QModelIndex(), role=QtCore.Qt.DisplayRole): + def data( + self, index=myqt.QtCore.QModelIndex(), role=myqt.Qt.ItemDataRole.DisplayRole + ): row = index.row() col = self.column[index.column()] - if role == QtCore.Qt.DisplayRole: + if role == myqt.Qt.ItemDataRole.DisplayRole: o = self._items[row] - if col=="name": + if col == "name": return str(o) - elif col=="residual": + elif col == "residual": return get_residual(self.ui_data, o) - elif col=="active": + elif col == "active": return o.active - elif col=="ub": + elif col == "ub": return value_no_exception(o.upper) - elif col=="lb": + elif col == "lb": return value_no_exception(o.lower) - elif col=="value": + elif col == "value": try: return self.ui_data.value_cache[o] except KeyError: @@ -123,11 +141,14 @@ def data(self, index=QtCore.QModelIndex(), role=QtCore.Qt.DisplayRole): else: return None - def headerData(self, i, orientation, role=QtCore.Qt.DisplayRole): - ''' - Return the column headings for the horizontal header and - index numbers for the vertical header. - ''' - if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole: + def headerData(self, i, orientation, role=myqt.Qt.ItemDataRole.DisplayRole): + """ + Return the column headings for the horizontal header and + index numbers for the vertical header. + """ + if ( + orientation == myqt.Qt.Orientation.Horizontal + and role == myqt.Qt.ItemDataRole.DisplayRole + ): return self.column[i] return None diff --git a/pyomo/contrib/viewer/residual_table.ui b/pyomo/contrib/viewer/residual_table.ui index 8ebd0d4b3ae..2d1d930f029 100644 --- a/pyomo/contrib/viewer/residual_table.ui +++ b/pyomo/contrib/viewer/residual_table.ui @@ -36,7 +36,7 @@ - Qt::Horizontal + Qt::Qt.Orientation.Horizontal diff --git a/pyomo/contrib/viewer/tests/test_data_model_item.py b/pyomo/contrib/viewer/tests/test_data_model_item.py index 66542f4b543..f3e7aaf9513 100644 --- a/pyomo/contrib/viewer/tests/test_data_model_item.py +++ b/pyomo/contrib/viewer/tests/test_data_model_item.py @@ -26,123 +26,172 @@ import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Var, Block, Param, Expression, Constraint, Objective, ExternalFunction, Reals, log, sin, sqrt, expr +from pyomo.environ import ( + ConcreteModel, + Var, + BooleanVar, + Block, + Param, + Expression, + Constraint, + Objective, + ExternalFunction, + Reals, + log, + sin, + sqrt, + expr, +) +import pyomo.environ as pyo from pyomo.contrib.viewer.model_browser import ComponentDataItem from pyomo.contrib.viewer.ui_data import UIData +from pyomo.common.dependencies import DeferredImportError +try: + x = pyo.units.m + units_available = True +except DeferredImportError: + units_available = False + + +@unittest.skipIf(not units_available, "Pyomo units are not available") class TestDataModelItem(unittest.TestCase): def setUp(self): # Borrowed this test model from the trust region tests m = ConcreteModel() - m.z = Var(range(3), domain=Reals, initialize=2.) - m.x = Var(range(4), initialize=2.) + m.y = BooleanVar(range(3), initialize=False) + m.z = Var(range(3), domain=Reals, initialize=2.0, units=pyo.units.m) + m.x = Var(range(4), initialize=2.0, units=pyo.units.m) m.x[1] = 1.0 m.x[2] = 0.0 m.x[3] = None m.b1 = Block() m.b1.e1 = Expression(expr=m.x[0] + m.x[1]) - m.b1.e2 = Expression(expr=m.x[0]/m.x[2]) - m.b1.e3 = Expression(expr=m.x[3]*m.x[1]) + m.b1.e2 = Expression(expr=m.x[0] / m.x[2]) + m.b1.e3 = Expression(expr=m.x[3] * m.x[1]) m.b1.e4 = Expression(expr=log(m.x[2])) m.b1.e5 = Expression(expr=log(m.x[2] - 2)) - def blackbox(a,b): - return sin(a-b) + def blackbox(a, b): + return sin(a - b) + self.bb = ExternalFunction(blackbox) m.obj = Objective( - expr=(m.z[0]-1.0)**2 + (m.z[0]-m.z[1])**2 + (m.z[2]-1.0)**2 \ - + (m.x[0]-1.0)**4 + (m.x[1]-1.0)**6 # + m.bb(m.x[0],m.x[1]) - ) - m.c1 = Constraint(expr=m.x[0] * m.z[0]**2 + self.bb(m.x[0],m.x[1]) == 2*sqrt(2.0)) - m.c2 = Constraint(expr=m.z[2]**4 * m.z[1]**2 + m.z[1] == 8+sqrt(2.0)) + expr=(m.z[0] - 1.0) ** 2 + + (m.z[0] - m.z[1]) ** 2 + + (m.z[2] - 1.0) ** 2 + + (m.x[0] - 1.0) ** 4 + + (m.x[1] - 1.0) ** 6 # + m.bb(m.x[0],m.x[1]) + ) + m.c1 = Constraint( + expr=m.x[0] * m.z[0] ** 2 + self.bb(m.x[0], m.x[1]) == 2 * sqrt(2.0) + ) + m.c2 = Constraint(expr=m.z[2] ** 4 * m.z[1] ** 2 + m.z[1] == 8 + sqrt(2.0)) m.c3 = Constraint(expr=m.x[1] == 3) - m.c4 = Constraint(expr=0 == 3/m.x[2]) + m.c4 = Constraint(expr=0 == 3 / m.x[2]) m.c5 = Constraint(expr=0 == log(m.x[2])) - m.c6 = Constraint(expr=0 == log(m.x[2]-4)) + m.c6 = Constraint(expr=0 == log(m.x[2] - 4)) m.c7 = Constraint(expr=0 == log(m.x[3])) m.p1 = Param(mutable=True, initialize=1) - m.c8 = Constraint(expr = m.x[1] <= 1/m.p1) + m.p2 = Param(initialize=1) + m.p3 = Param(initialize=3.2) + m.p4 = Param([1, 2, 3], mutable=True, initialize=1) + m.c8 = Constraint(expr=m.x[1] <= 1 / m.p1) m.p1 = 0 self.m = m.clone() def test_expr_calc(self): cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e1) + parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e1 + ) cdi.ui_data.calculate_expressions() self.assertAlmostEqual(cdi.get("value"), 3) - self.assertIsInstance(cdi.get("expr"), str) # test get expr str + self.assertIsInstance(cdi.get("expr"), str) # test get expr str def test_expr_calc_div0(self): cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e2) + parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e2 + ) cdi.ui_data.calculate_expressions() self.assertEqual(cdi.get("value"), "Divide_by_0") def test_expr_calc_log0(self): cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e4) + parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e4 + ) cdi.ui_data.calculate_expressions() self.assertIsNone(cdi.get("value")) def test_expr_calc_log_neg(self): cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e5) + parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e5 + ) cdi.ui_data.calculate_expressions() self.assertIsNone(cdi.get("value")) def test_expr_calc_value_None(self): cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e3) + parent=None, ui_data=UIData(model=self.m), o=self.m.b1.e3 + ) cdi.ui_data.calculate_expressions() self.assertIsNone(cdi.get("value")) def test_cons_calc(self): - cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.c3) + cdi = ComponentDataItem(parent=None, ui_data=UIData(model=self.m), o=self.m.c3) cdi.ui_data.calculate_constraints() self.assertAlmostEqual(cdi.get("residual"), 2) def test_cons_calc_div0(self): - cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.c4) + cdi = ComponentDataItem(parent=None, ui_data=UIData(model=self.m), o=self.m.c4) cdi.ui_data.calculate_constraints() self.assertEqual(cdi.get("value"), "Divide_by_0") def test_cons_calc_log0(self): - cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.c5) + cdi = ComponentDataItem(parent=None, ui_data=UIData(model=self.m), o=self.m.c5) cdi.ui_data.calculate_constraints() self.assertIsNone(cdi.get("value")) def test_cons_calc_log_neg(self): - cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.c6) + cdi = ComponentDataItem(parent=None, ui_data=UIData(model=self.m), o=self.m.c6) cdi.ui_data.calculate_constraints() self.assertIsNone(cdi.get("value")) def test_cons_calc_value_None(self): - cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.c7) + cdi = ComponentDataItem(parent=None, ui_data=UIData(model=self.m), o=self.m.c7) cdi.ui_data.calculate_constraints() self.assertIsNone(cdi.get("value")) def test_cons_calc_upper_div0(self): - cdi = ComponentDataItem( - parent=None, ui_data=UIData(model=self.m), o=self.m.c8) + cdi = ComponentDataItem(parent=None, ui_data=UIData(model=self.m), o=self.m.c8) cdi.ui_data.calculate_constraints() # the ui lists the upper and lower attributes as ub and lb # this was originally so I could easily combine variables and - # constarints in the same view, but I split them up, so may want - # to reconsider that choise in the future. This is to remind myself + # constraint in the same view, but I split them up, so may want + # to reconsider that choice in the future. This is to remind myself # why I'm getting "ub" and not "upper" self.assertEqual(cdi.get("ub"), "Divide_by_0") + def test_cons_calc_lower(self): + cdi = ComponentDataItem(parent=None, ui_data=UIData(model=self.m), o=self.m.c7) + cdi.ui_data.calculate_constraints() + # the ui lists the upper and lower attributes as ub and lb + # this was originally so I could easily combine variables and + # constraint in the same view, but I split them up, so may want + # to reconsider that choice in the future. This is to remind myself + # why I'm getting "ub" and not "upper" + self.assertEqual(cdi.get("lb"), 0) + def test_var_get_value(self): cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x[1]) self.assertAlmostEqual(cdi.get("value"), 1) - self.assertIsNone(cdi.get(expr)) #test can't get expr + self.assertIsNone(cdi.get(expr)) # test can't get expr + + def test_var_get_units(self): + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x[1]) + self.assertAlmostEqual(cdi.get("units"), "m") + self.assertIsNone(cdi.get(expr)) # test can't get expr def test_var_get_bounds(self): cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x[1]) @@ -157,6 +206,12 @@ def test_var_set_bounds(self): cdi.set("ub", 8) self.assertAlmostEqual(cdi.get("lb"), 2) self.assertAlmostEqual(cdi.get("ub"), 8) + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x) + cdi.set("lb", 0) + cdi.set("ub", 10) + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x[1]) + self.assertAlmostEqual(cdi.get("lb"), 0) + self.assertAlmostEqual(cdi.get("ub"), 10) def test_var_fixed_bounds(self): cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x[1]) @@ -172,13 +227,32 @@ def test_get_attr_that_does_not_exist(self): def test_set_func(self): cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x[1]) self.assertIsNone(cdi.set("test_val", 5)) - self.assertIsNone(cdi.get("test_val")) # test can't set + self.assertIsNone(cdi.get("test_val")) # test can't set cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.x) self.assertIsNone(cdi.set("test_val", 5)) - self.assertEqual(cdi.get("test_val"), 5) # test can set with no callback + self.assertEqual(cdi.get("test_val"), 5) # test can set with no callback + + def test_get_set_param(self): + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.p1) + self.assertIsNone(cdi.set("value", 5)) + self.assertEqual(cdi.get("value"), 5) + self.assertIsNone(cdi.set("value", 0)) + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.p2) + self.assertEqual(cdi.get("value"), 1) + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.p3) + self.assertEqual(cdi.get("value"), 3.2) + self.assertEqual(cdi.get("expr"), None) + self.assertIsNone(cdi.set("value", 5)) + self.assertEqual(cdi.get("value"), 3.2) + self.assertEqual(cdi.get("units"), "dimensionless") + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.p4) + self.assertIsNone(cdi.set("value", 6)) + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.p4[1]) + self.assertEqual(cdi.get("value"), 6) - def test_degrees_of_freedom(self): - import pyomo.contrib.viewer.report as rpt - # this should hit everything in report. It only exists to calculate - # degrees of freedom for display in the ui - self.assertEqual(rpt.degrees_of_freedom(self.m),0) + def test_get_set_boolvar(self): + cdi = ComponentDataItem(parent=None, ui_data=None, o=self.m.y) + self.assertIsNone(cdi.set("value", True)) + self.assertEqual(pyo.value(self.m.y[1]), True) + self.assertIsNone(cdi.set("value", False)) + self.assertEqual(pyo.value(self.m.y[1]), False) diff --git a/pyomo/contrib/viewer/tests/test_data_model_tree.py b/pyomo/contrib/viewer/tests/test_data_model_tree.py index 2f3b5cfecba..db745aee9ca 100644 --- a/pyomo/contrib/viewer/tests/test_data_model_tree.py +++ b/pyomo/contrib/viewer/tests/test_data_model_tree.py @@ -25,43 +25,80 @@ """ import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Var, Constraint, Objective, Reals, Block, Expression, ExternalFunction, sin, sqrt +from pyomo.environ import ( + ConcreteModel, + Var, + BooleanVar, + Constraint, + Objective, + Reals, + Block, + Expression, + ExternalFunction, + sin, + sqrt, +) +import pyomo.environ as pyo from pyomo.contrib.viewer.model_browser import ComponentDataModel +import pyomo.contrib.viewer.qt as myqt +from pyomo.common.dependencies import DeferredImportError + try: - no_pyqt = False + _x = pyo.units.m + units_available = True +except DeferredImportError: + units_available = False + +available = myqt.available + +if available: from pyomo.contrib.viewer.ui_data import UIData import pyomo.contrib.viewer.ui as ui -except: - no_pyqt = True +else: + class UIData(object): model = None + def __init__(*args, **kwargs): pass -@unittest.skipIf(no_pyqt, "PyQt needed to test tree data model") +@unittest.skipIf(not available or not units_available, "PyQt or units not available") class TestDataModel(unittest.TestCase): def setUp(self): # Borrowed this test model from the trust region tests m = ConcreteModel(name="tm") - m.z = Var(range(3), domain=Reals, initialize=2.) - m.x = Var(range(2), initialize=2.) + m.y = BooleanVar(range(3), initialize=False) + m.z = Var( + range(3), domain=Reals, initialize=2.0, units=pyo.units.m, doc="test doc" + ) + m.x = Var(range(2), initialize=2.0, units=pyo.units.m) m.x[1] = 1.0 m.b1 = Block() m.b1.e1 = Expression(expr=m.x[0] + m.x[1]) - def blackbox(a,b): - return sin(a-b) + def blackbox(a, b): + return sin(a - b) + self.bb = ExternalFunction(blackbox) m.obj = Objective( - expr=(m.z[0]-1.0)**2 + (m.z[0]-m.z[1])**2 + (m.z[2]-1.0)**2 \ - + (m.x[0]-1.0)**4 + (m.x[1]-1.0)**6 # + m.bb(m.x[0],m.x[1]) - ) - m.c1 = Constraint(expr=m.x[0] * m.z[0]**2 + self.bb(m.x[0],m.x[1]) == 2*sqrt(2.0)) - m.c2 = Constraint(expr=m.z[2]**4 * m.z[1]**2 + m.z[1] == 8+sqrt(2.0)) + expr=(m.z[0] - 1.0) ** 2 + + (m.z[0] - m.z[1]) ** 2 + + (m.z[2] - 1.0) ** 2 + + (m.x[0] - 1.0) ** 4 + + (m.x[1] - 1.0) ** 6 # + m.bb(m.x[0],m.x[1]) + ) + m.c1 = Constraint( + expr=m.x[0] * m.z[0] ** 2 + self.bb(m.x[0], m.x[1]) == 2 * sqrt(2.0) + ) + m.c2 = Constraint(expr=m.z[2] ** 4 * m.z[1] ** 2 + m.z[1] == 8 + sqrt(2.0)) + # Add a few items to test more + m.b2 = Block([1, 2]) + m.b2[1].w = Var(initialize=4) + m.b2[2].w = Var(initialize=4) self.m = m.clone() def test_create_tree_var(self): @@ -69,132 +106,164 @@ def test_create_tree_var(self): # Defaults to variables and two columns name and value data_model = ComponentDataModel(parent=None, ui_data=ui_data) # There should be one root item - assert(len(data_model.rootItems)==1) - assert(data_model.rootItems[0].data==self.m) + assert len(data_model.rootItems) == 1 + assert data_model.rootItems[0].data == self.m # The children should be in the model construction order, # and the indexes are sorted children = data_model.rootItems[0].children - assert(children[0].data == self.m.z) - assert(children[1].data == self.m.x) - assert(children[2].data == self.m.b1) + assert children[0].data == self.m.y + assert children[1].data == self.m.z + assert children[2].data == self.m.x + assert children[3].data == self.m.b1 # Check the data display role The rows in the tree should be: # 0. Model - # 0. z + # 0. y + # 0. y[0], False + # 1. y[1], False + # 2. y[2], False + # 1. z # 0. z[0], 2 # 1. z[1], 2 # 2. z[2], 2 - # 1. x + # 2. x # 0. x[0], 2 # 1. x[1], 1 - # 2. b1 - root_index = data_model.index(0,0) - assert(data_model.data(root_index)=="tm") - zidx = data_model.index(0,0,parent=root_index) - assert(data_model.data(zidx)=="z") - xidx = data_model.index(1,0,parent=root_index) - assert(data_model.data(xidx)=="x") - b1idx = data_model.index(2,0,parent=root_index) - assert(data_model.data(b1idx)=="b1") - idx = data_model.index(0,0,parent=zidx) - assert(data_model.data(idx)=="z[0]") - idx = data_model.index(0,1,parent=zidx) - assert(abs(data_model.data(idx) - 2.0) < 0.0001) - idx = data_model.index(1,0,parent=zidx) - assert(data_model.data(idx)=="z[1]") - idx = data_model.index(1,1,parent=zidx) - assert(abs(data_model.data(idx) - 2.0) < 0.0001) + # 3. b1 + root_index = data_model.index(0, 0) + assert data_model.data(root_index) == "tm" + zidx = data_model.index(0, 0, parent=root_index) + assert data_model.data(zidx) == "y" + zidx = data_model.index(1, 0, parent=root_index) + assert data_model.data(zidx) == "z" + xidx = data_model.index(2, 0, parent=root_index) + assert data_model.data(xidx) == "x" + b1idx = data_model.index(3, 0, parent=root_index) + assert data_model.data(b1idx) == "b1" + idx = data_model.index(0, 0, parent=zidx) + assert data_model.data(idx) == "z[0]" + idx = data_model.index(0, 1, parent=zidx) + assert abs(data_model.data(idx) - 2.0) < 0.0001 + idx = data_model.index(1, 0, parent=zidx) + assert data_model.data(idx) == "z[1]" + idx = data_model.index(1, 1, parent=zidx) + assert abs(data_model.data(idx) - 2.0) < 0.0001 + # tooltip gives doc + assert "test doc" == data_model.data( + zidx, role=myqt.Qt.ItemDataRole.ToolTipRole + ) + assert myqt.QtCore.Qt.blue == data_model.data( + zidx, role=myqt.Qt.ItemDataRole.ForegroundRole + ) + assert myqt.QtCore.Qt.black == data_model.data( + root_index, role=myqt.Qt.ItemDataRole.ForegroundRole + ) def test_create_tree_con(self): ui_data = UIData(model=self.m) # Make a tree with constraints - data_model = ComponentDataModel(parent=None, ui_data=ui_data, - components=(Constraint,), - columns=["name", "active"]) + data_model = ComponentDataModel( + parent=None, + ui_data=ui_data, + components=(Constraint,), + columns=["name", "active"], + ) # There should be one root item - assert(len(data_model.rootItems)==1) - assert(data_model.rootItems[0].data==self.m) + assert len(data_model.rootItems) == 1 + assert data_model.rootItems[0].data == self.m # The children should be in the model construction order, # and the indexes are sorted children = data_model.rootItems[0].children - assert(children[0].data == self.m.b1) - assert(children[1].data == self.m.c1) - assert(children[2].data == self.m.c2) + assert children[0].data == self.m.b1 + assert children[1].data == self.m.c1 + assert children[2].data == self.m.c2 # Check the data display role The rows in the tree should be: # 0. Model # 0. c1, True # 1. c2, True # 2. b1, True - root_index = data_model.index(0,0) - assert(data_model.data(root_index)=="tm") - idx = data_model.index(0,0,parent=root_index) - assert(data_model.data(idx)=="b1") - idx = data_model.index(0,1,parent=root_index) - assert(data_model.data(idx)==True) - idx = data_model.index(1,0,parent=root_index) - assert(data_model.data(idx)=="c1") - idx = data_model.index(2,0,parent=root_index) - assert(data_model.data(idx)=="c2") + root_index = data_model.index(0, 0) + assert data_model.data(root_index) == "tm" + idx = data_model.index(0, 0, parent=root_index) + assert data_model.data(idx) == "b1" + idx = data_model.index(0, 1, parent=root_index) + assert data_model.data(idx) == True + idx = data_model.index(1, 0, parent=root_index) + assert data_model.data(idx) == "c1" + idx = data_model.index(2, 0, parent=root_index) + assert data_model.data(idx) == "c2" + c2 = idx.internalPointer() + c2.set("active", False) + assert not self.m.c2.active + c2.set("active", True) + assert self.m.c2.active + assert "z[2]" in data_model.data(idx, role=myqt.Qt.ItemDataRole.ToolTipRole) def test_create_tree_expr(self): ui_data = UIData(model=self.m) # Make a tree with constraints - data_model = ComponentDataModel(parent=None, ui_data=ui_data, - components=(Expression,), - columns=["name", "value"]) + data_model = ComponentDataModel( + parent=None, + ui_data=ui_data, + components=(Expression,), + columns=["name", "value"], + ) # There should be one root item - assert(len(data_model.rootItems)==1) - assert(data_model.rootItems[0].data==self.m) + assert len(data_model.rootItems) == 1 + assert data_model.rootItems[0].data == self.m # The children should be in the model construction order, # and the indexes are sorted children = data_model.rootItems[0].children - assert(children[0].data == self.m.b1) - assert(children[0].children[0].data == self.m.b1.e1) + assert children[0].data == self.m.b1 + assert children[0].children[0].data == self.m.b1.e1 ui_data.calculate_expressions() # Check the data display role The rows in the tree should be: # 0. Model # 0. b1, # 0. e1, value - root_index = data_model.index(0,0) - b1_index = data_model.index(0,0,parent=root_index) - e1_index0 = data_model.index(0,0,parent=b1_index) - e1_index1 = data_model.index(0,1,parent=b1_index) - assert(data_model.data(e1_index0)=="b1.e1") - assert(abs(data_model.data(e1_index1) - 3.0) < 0.0001) + root_index = data_model.index(0, 0) + b1_index = data_model.index(0, 0, parent=root_index) + e1_index0 = data_model.index(0, 0, parent=b1_index) + e1_index1 = data_model.index(0, 1, parent=b1_index) + assert data_model.data(e1_index0) == "b1.e1" + assert abs(data_model.data(e1_index1) - 3.0) < 0.0001 def test_update_tree_expr(self): ui_data = UIData(model=self.m) # Make a tree with constraints - data_model = ComponentDataModel(parent=None, ui_data=ui_data, - components=(Expression,), - columns=["name", "value"]) + data_model = ComponentDataModel( + parent=None, + ui_data=ui_data, + components=(Expression,), + columns=["name", "value"], + ) self.m.newe = Expression(expr=self.m.x[0] + self.m.x[1]) data_model._update_tree() # There should be one root item - assert(len(data_model.rootItems)==1) - assert(data_model.rootItems[0].data==self.m) + assert len(data_model.rootItems) == 1 + assert data_model.rootItems[0].data == self.m # The children should be in the model construction order, # and the indexes are sorted children = data_model.rootItems[0].children - assert(children[0].data == self.m.b1) - assert(children[0].children[0].data == self.m.b1.e1) + assert children[0].data == self.m.b1 + assert children[0].children[0].data == self.m.b1.e1 ui_data.calculate_expressions() # Check the data display role The rows in the tree should be: # 0. Model # 0. b1, # 0. e1, value - root_index = data_model.index(0,0) - b1_index = data_model.index(0,0,parent=root_index) - e1_index0 = data_model.index(0,0,parent=b1_index) - e1_index1 = data_model.index(0,1,parent=b1_index) - assert(data_model.data(e1_index0)=="b1.e1") - assert(abs(data_model.data(e1_index1) - 3.0) < 0.0001) + root_index = data_model.index(0, 0) + b1_index = data_model.index(0, 0, parent=root_index) + e1_index0 = data_model.index(0, 0, parent=b1_index) + e1_index1 = data_model.index(0, 1, parent=b1_index) + assert data_model.data(e1_index0) == "b1.e1" + assert abs(data_model.data(e1_index1) - 3.0) < 0.0001 # Check that in the update the new expression was added found = False for i in children: if id(i.data) == id(self.m.newe): found = True break - assert(found) + assert found diff --git a/pyomo/contrib/viewer/tests/test_qt.py b/pyomo/contrib/viewer/tests/test_qt.py index 9ec8eeaa7b8..38a022b6668 100644 --- a/pyomo/contrib/viewer/tests/test_qt.py +++ b/pyomo/contrib/viewer/tests/test_qt.py @@ -24,14 +24,28 @@ UI Tests """ -from pyomo.environ import (ConcreteModel, Var, Param, Constraint, Objective, - Reals, Block, Expression, ExternalFunction, - sin, sqrt, log) +from pyomo.environ import ( + ConcreteModel, + Var, + BooleanVar, + Param, + Constraint, + Objective, + Reals, + Block, + Expression, + ExternalFunction, + sin, + sqrt, + log, + value, +) import pyomo.common.unittest as unittest +import pyomo.contrib.viewer.qt as myqt +import pyomo.contrib.viewer.pyomo_viewer as pv +from pyomo.contrib.viewer.qt import available -from pyomo.contrib.viewer.qt import qt_available - -if qt_available: +if available: import contextvars from pyomo.contrib.viewer.qt import QtCore, QMessageBox from pyomo.contrib.viewer.ui import get_mainwindow, ModelBrowser @@ -43,71 +57,182 @@ def qtbot(): """Overwrite qtbot - remove test failure""" return + def get_model(): # Borrowed this test model from the trust region tests m = ConcreteModel() - m.z = Var(range(3), domain=Reals, initialize=2.) - m.x = Var(range(4), initialize=2.) + m.y = BooleanVar(range(3), initialize=False) + m.z = Var(range(3), domain=Reals, initialize=2.0) + m.x = Var(range(4), initialize=2.0) m.x[1] = 1.0 m.x[2] = 0.0 m.x[3] = None m.b1 = Block() m.b1.e1 = Expression(expr=m.x[0] + m.x[1]) - m.b1.e2 = Expression(expr=m.x[0]/m.x[2]) - m.b1.e3 = Expression(expr=m.x[3]*m.x[1]) + m.b1.e2 = Expression(expr=m.x[0] / m.x[2]) + m.b1.e3 = Expression(expr=m.x[3] * m.x[1]) m.b1.e4 = Expression(expr=log(m.x[2])) m.b1.e5 = Expression(expr=log(m.x[2] - 2)) - def blackbox(a,b): - return sin(a-b) + def blackbox(a, b): + return sin(a - b) + m.bb = ExternalFunction(blackbox) m.obj = Objective( - expr=(m.z[0]-1.0)**2 + (m.z[0]-m.z[1])**2 + (m.z[2]-1.0)**2 \ - + (m.x[0]-1.0)**4 + (m.x[1]-1.0)**6 # + m.bb(m.x[0],m.x[1]) - ) - m.c1 = Constraint(expr=m.x[0] * m.z[0]**2 + m.bb(m.x[0],m.x[1]) == 2*sqrt(2.0)) - m.c2 = Constraint(expr=m.z[2]**4 * m.z[1]**2 + m.z[1] == 8+sqrt(2.0)) + expr=(m.z[0] - 1.0) ** 2 + + (m.z[0] - m.z[1]) ** 2 + + (m.z[2] - 1.0) ** 2 + + (m.x[0] - 1.0) ** 4 + + (m.x[1] - 1.0) ** 6 # + m.bb(m.x[0],m.x[1]) + ) + m.c1 = Constraint(expr=m.x[0] * m.z[0] ** 2 + m.bb(m.x[0], m.x[1]) == 2 * sqrt(2.0)) + m.c2 = Constraint(expr=m.z[2] ** 4 * m.z[1] ** 2 + m.z[1] == 8 + sqrt(2.0)) m.c3 = Constraint(expr=m.x[1] == 3) - m.c4 = Constraint(expr=0 == 3/m.x[2]) + m.c4 = Constraint(expr=0 == 3 / m.x[2]) m.c5 = Constraint(expr=0 == log(m.x[2])) - m.c6 = Constraint(expr=0 == log(m.x[2]-4)) + m.c6 = Constraint(expr=0 == log(m.x[2] - 4)) m.c7 = Constraint(expr=0 == log(m.x[3])) m.p1 = Param(mutable=True, initialize=1) - m.c8 = Constraint(expr = m.x[1] <= 1/m.p1) + m.c8 = Constraint(expr=m.x[1] <= 1 / m.p1) m.p1 = 0 return m -@unittest.skipIf(not qt_available, - "Qt packages are not available.") + +@unittest.skipIf(not available, "Qt packages are not available.") def test_get_mainwindow(qtbot): m = get_model() mw, m = get_mainwindow(model=m, testing=True) - assert(hasattr(mw, "menuBar")) - assert(isinstance(mw.variables, ModelBrowser)) - assert(isinstance(mw.constraints, ModelBrowser)) - assert(isinstance(mw.expressions, ModelBrowser)) - assert(isinstance(mw.parameters, ModelBrowser)) + assert hasattr(mw, "menuBar") + assert isinstance(mw.variables, ModelBrowser) + assert isinstance(mw.constraints, ModelBrowser) + assert isinstance(mw.expressions, ModelBrowser) + assert isinstance(mw.parameters, ModelBrowser) + + +@unittest.skipIf(not available, "Qt packages are not available.") +def test_close_mainwindow(qtbot): + mw, m = get_mainwindow(model=None, testing=True) + mw.exit_action() -@unittest.skipIf(not qt_available, - "Qt packages are not available.") +@unittest.skipIf(not available, "Qt packages are not available.") +def test_show_model_select_no_models(qtbot): + mw, m = get_mainwindow(model=None, testing=True) + ms = mw.show_model_select() + ms.update_models() + ms.select_model() + + +@unittest.skipIf(not available, "Qt packages are not available.") def test_model_information(qtbot): m = get_model() mw, m = get_mainwindow(model=m, testing=True) mw.model_information() - assert(isinstance(mw._dialog, QMessageBox)) + assert isinstance(mw._dialog, QMessageBox) text = mw._dialog.text() mw._dialog.close() text = text.split("\n") - assert(str(text[0]).startswith("8")) # Active constraints - assert(str(text[1]).startswith("7")) # Active equalities - assert(str(text[2]).startswith("7")) # Free vars in active equalities - assert(str(text[3]).startswith("0")) # degrees of feedom - # Main window has parts it is supposed to - assert(hasattr(mw, "menuBar")) - assert(isinstance(mw.variables, ModelBrowser)) - assert(isinstance(mw.constraints, ModelBrowser)) - assert(isinstance(mw.expressions, ModelBrowser)) - assert(isinstance(mw.parameters, ModelBrowser)) + assert str(text[0]).startswith("8") # Active constraints + assert str(text[1]).startswith("7") # Active equalities + assert str(text[2]).startswith("7") # Free vars in active equalities + assert str(text[3]).startswith("0") # degrees of feedom + # Main window has parts it is supposed to + assert hasattr(mw, "menuBar") + assert isinstance(mw.variables, ModelBrowser) + assert isinstance(mw.constraints, ModelBrowser) + assert isinstance(mw.expressions, ModelBrowser) + assert isinstance(mw.parameters, ModelBrowser) + + +@unittest.skipIf(not available, "Qt packages are not available.") +def test_tree_expand_collapse(qtbot): + m = get_model() + mw, m = get_mainwindow(model=m, testing=True) + mw.variables.treeView.expandAll() + mw.variables.treeView.collapseAll() + + +@unittest.skipIf(not available, "Qt packages are not available.") +def test_residual_table(qtbot): + m = get_model() + mw, m = get_mainwindow(model=m, testing=True) + mw.residuals_restart() + mw.ui_data.calculate_expressions() + mw.residuals.calculate() + mw.residuals_restart() + mw.residuals.sort() + dm = mw.residuals.tableView.model() + # Name + assert dm.data(dm.index(0, 0)) == "c4" + # residual value + assert dm.data(dm.index(0, 1)) == "Divide_by_0" + # body value + assert dm.data(dm.index(0, 2)) == "Divide_by_0" + # upper + assert dm.data(dm.index(0, 3)) == 0 + # lower + assert dm.data(dm.index(0, 4)) == 0 + # active + assert dm.data(dm.index(0, 5)) == True + m.c4.deactivate() + mw.residuals.sort() + assert dm.data(dm.index(0, 0)) == "c5" + + +@unittest.skipIf(not available, "Qt packages are not available.") +def test_var_tree(qtbot): + m = get_model() + mw, m = get_mainwindow(model=m, testing=True) + qtbot.addWidget(mw) + mw.variables.treeView.expandAll() + root_index = mw.variables.datmodel.index(0, 0) + z_index = mw.variables.datmodel.index(1, 0, parent=root_index) + z_val_index = mw.variables.datmodel.index(1, 1, parent=root_index) + z1_val_index = mw.variables.datmodel.index(0, 1, parent=z_index) + assert mw.variables.datmodel.data(z1_val_index) == 2.0 + mw.variables.treeView.setCurrentIndex(z1_val_index) + mw.variables.treeView.openPersistentEditor(z1_val_index) + d = mw.variables.treeView.itemDelegate() + w = mw.variables.treeView.indexWidget(z1_val_index) + w.setText("Not a number") + d.setModelData(w, mw.variables.datmodel, z1_val_index) + assert (value(m.z[0]) - 2.0) < 1e-6 # unchanged + w.setText("1e5") + d.setModelData(w, mw.variables.datmodel, z1_val_index) + assert (value(m.z[0]) - 1e5) < 1e-6 # set float + w.setText("false") + d.setModelData(w, mw.variables.datmodel, z1_val_index) + assert (value(m.z[0]) - 0) < 1e-6 # bool to 0 + w.setText("true") + d.setModelData(w, mw.variables.datmodel, z1_val_index) + assert (value(m.z[0]) - 1) < 1e-6 # bool to 1 + mw.variables.treeView.closePersistentEditor(z1_val_index) + w.setText("2") + d.setModelData(w, mw.variables.datmodel, z1_val_index) + assert (value(m.z[0]) - 2) < 1e-6 # set int + mw.variables.treeView.closePersistentEditor(z1_val_index) + + +@unittest.skipIf(not available, "Qt packages are not available.") +def test_bad_view(qtbot): + m = get_model() + mw, m = get_mainwindow(model=m, testing=True) + err = None + try: + mw.badTree = mw._tree_restart( + w=mw.variables, standard="Bad Stuff", ui_data=mw.ui_data + ) + except ValueError: + err = "ValueError" + assert err == "ValueError" + + +@unittest.skipIf(not available, "Qt packages are not available.") +def test_qtconsole_app(qtbot): + app = pv.QtApp() + # empty list to prevent picking up args from pytest + app.initialize([]) + app.show_ui() + app.hide_ui() diff --git a/pyomo/contrib/viewer/tests/test_report.py b/pyomo/contrib/viewer/tests/test_report.py new file mode 100644 index 00000000000..b496e2294ff --- /dev/null +++ b/pyomo/contrib/viewer/tests/test_report.py @@ -0,0 +1,210 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# ___________________________________________________________________________ +# +# This module was originally developed as part of the IDAES PSE Framework +# +# Institute for the Design of Advanced Energy Systems Process Systems +# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the +# software owners: The Regents of the University of California, through +# Lawrence Berkeley National Laboratory, National Technology & Engineering +# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia +# University Research Corporation, et al. All rights reserved. +# +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +""" +Test data model items for QTreeView. These tests should work even without PyQt. +""" + +import pyomo.common.unittest as unittest +from pyomo.common.collections import ComponentSet +from pyomo.environ import ( + ConcreteModel, + Var, + BooleanVar, + Block, + Param, + Expression, + Constraint, + Objective, + ExternalFunction, + Reals, + log, + sin, + sqrt, + expr, +) +import pyomo.contrib.viewer.report as rpt +import pyomo.contrib.viewer.ui_data as uidata + + +class TestReportFunctions(unittest.TestCase): + def setUp(self): + # Borrowed this test model from the trust region tests + m = ConcreteModel() + m.z = Var(range(3), domain=Reals, initialize=2.0) + m.x = Var(range(4), initialize=2.0) + m.x[1] = 1.0 + m.x[2] = 0.0 + m.x[3] = None + + m.b1 = Block() + m.b1.e1 = Expression(expr=m.x[0] + m.x[1]) + m.b1.e2 = Expression(expr=m.x[0] / m.x[2]) + m.b1.e3 = Expression(expr=m.x[3] * m.x[1]) + m.b1.e4 = Expression(expr=log(m.x[2])) + m.b1.e5 = Expression(expr=log(m.x[2] - 2)) + + def blackbox(a, b): + return sin(a - b) + + self.bb = ExternalFunction(blackbox) + + m.obj = Objective( + expr=(m.z[0] - 1.0) ** 2 + + (m.z[0] - m.z[1]) ** 2 + + (m.z[2] - 1.0) ** 2 + + (m.x[0] - 1.0) ** 4 + + (m.x[1] - 1.0) ** 6 # + m.bb(m.x[0],m.x[1]) + ) + m.c1 = Constraint( + expr=m.x[0] * m.z[0] ** 2 + self.bb(m.x[0], m.x[1]) == 2 * sqrt(2.0) + ) + m.c2 = Constraint(expr=m.z[2] ** 4 * m.z[1] ** 2 + m.z[1] == 8 + sqrt(2.0)) + m.c3 = Constraint(expr=m.x[1] == 3) + m.c4 = Constraint(expr=0 == 3 / m.x[2]) + m.c5 = Constraint(expr=0 == log(m.x[2])) + m.c6 = Constraint(expr=0 == log(m.x[2] - 4)) + m.c7 = Constraint(expr=0 == log(m.x[3])) + m.p1 = Param(mutable=True, initialize=1) + m.c8 = Constraint(expr=m.x[1] <= 1 / m.p1) + m.c8b = Constraint(expr=m.x[1] >= 1 / m.p1) + m.c9 = Constraint(expr=m.x[1] <= 1) + m.c10 = Constraint(expr=m.x[1] >= 1) + m.p1 = 0 + self.m = m.clone() + + def test_value_no_exception(self): + # Try to divide by zero + self.m.x[2] = 0 + v = rpt.value_no_exception(self.m.b1.e2, div0="I like to divide by zero") + assert v == "I like to divide by zero" + # Try as calculation with None + self.m.x[2] = None + v = rpt.value_no_exception(self.m.b1.e2, div0=None) + assert v is None + # Try log of negative number + self.m.x[2] = 0.0 + v = rpt.value_no_exception(self.m.b1.e5) + assert v is None + # Try a valid calculation + self.m.x[2] = 2.0 + v = rpt.value_no_exception(self.m.b1.e2, div0=None) + self.assertAlmostEqual(v, 1) + + def test_get_residual(self): + dat = uidata.UIData(self.m) + # so that the model viewer doesn't run slow on large models, + # you have to explicitly ask for constraints and expressions + # to be calculated. Getting the residual before calculation + # should just give None + assert rpt.get_residual(dat, self.m.c3) is None + dat.calculate_constraints() + self.assertAlmostEqual(rpt.get_residual(dat, self.m.c3), 2.0) + # In c8 the bound has a divide by 0, I think this is only possible + # with a mutable param + assert rpt.get_residual(dat, self.m.c8) == "Divide_by_0" + assert rpt.get_residual(dat, self.m.c8b) == "Divide_by_0" + self.m.x[2] = 0 + assert rpt.get_residual(dat, self.m.c4) == "Divide_by_0" + self.m.x[2] = 2 + # haven't recalculated so still error + assert rpt.get_residual(dat, self.m.c4) == "Divide_by_0" + dat.calculate_constraints() + self.assertAlmostEqual(rpt.get_residual(dat, self.m.c4), 3.0 / 2.0) + self.assertAlmostEqual(rpt.get_residual(dat, self.m.c9), 0) + self.assertAlmostEqual(rpt.get_residual(dat, self.m.c10), 0) + + def test_active_equalities(self): + eq = [ + self.m.c1, + self.m.c2, + self.m.c3, + self.m.c4, + self.m.c5, + self.m.c6, + self.m.c7, + ] + for i, o in enumerate(rpt.active_equalities(self.m)): + assert o == eq[i] + + def test_active_constraint_set(self): + self.m.c4.deactivate() + assert rpt.active_constraint_set(self.m) == ComponentSet( + [ + self.m.c1, + self.m.c2, + self.m.c3, + self.m.c5, + self.m.c6, + self.m.c7, + self.m.c8, + self.m.c8b, + self.m.c9, + self.m.c10, + ] + ) + self.m.c4.activate() + assert rpt.active_constraint_set(self.m) == ComponentSet( + [ + self.m.c1, + self.m.c2, + self.m.c3, + self.m.c4, + self.m.c5, + self.m.c6, + self.m.c7, + self.m.c8, + self.m.c8b, + self.m.c9, + self.m.c10, + ] + ) + + def test_active_equality_set(self): + self.m.c4.deactivate() + assert rpt.active_equality_set(self.m) == ComponentSet( + [self.m.c1, self.m.c2, self.m.c3, self.m.c5, self.m.c6, self.m.c7] + ) + self.m.c4.activate() + assert rpt.active_equality_set(self.m) == ComponentSet( + [ + self.m.c1, + self.m.c2, + self.m.c3, + self.m.c4, + self.m.c5, + self.m.c6, + self.m.c7, + ] + ) + + def test_count_free_variables(self): + assert rpt.count_free_variables(self.m) == 7 + + def test_count_equality_constraints(self): + assert rpt.count_equality_constraints(self.m) == 7 + + def test_count_constraints(self): + assert rpt.count_constraints(self.m) == 11 + + def test_degrees_of_freedom(self): + assert rpt.degrees_of_freedom(self.m) == 0 diff --git a/pyomo/contrib/viewer/ui.py b/pyomo/contrib/viewer/ui.py index 8c6aabe8d79..8a621534b31 100644 --- a/pyomo/contrib/viewer/ui.py +++ b/pyomo/contrib/viewer/ui.py @@ -23,80 +23,83 @@ """ A simple GUI viewer/editor for Pyomo models. """ -from __future__ import division, print_function, absolute_import - __author__ = "John Eslick" import os import logging + try: from IPython import get_ipython except ImportError: + def get_ipython(): raise AttributeError("IPython not available") -import pyomo.contrib.viewer.report as rpt -import pyomo.environ as pyo -_log = logging.getLogger(__name__) -from pyomo.contrib.viewer.qt import * +import pyomo.contrib.viewer.report as rpt +import pyomo.environ as pyo +import pyomo.contrib.viewer.qt as myqt from pyomo.contrib.viewer.model_browser import ModelBrowser from pyomo.contrib.viewer.residual_table import ResidualTable from pyomo.contrib.viewer.model_select import ModelSelect from pyomo.contrib.viewer.ui_data import UIData +from pyomo.common.fileutils import this_file_dir + +_log = logging.getLogger(__name__) -_mypath = os.path.dirname(__file__) +_mypath = this_file_dir() try: - _MainWindowUI, _MainWindow = \ - uic.loadUiType(os.path.join(_mypath, "main.ui")) + _MainWindowUI, _MainWindow = myqt.uic.loadUiType(os.path.join(_mypath, "main.ui")) except: _log.exception("Failed to load UI files.") + # This lets the file still be imported, but you won't be able to use it # Allowing this to be imported will let some basic tests pass without PyQt class _MainWindowUI(object): pass + class _MainWindow(object): pass -if not qt_available: - for _err in qt_import_errors: - _log.error(_err) - _log.error("Qt is not available. Cannot create UI classes.") - raise ImportError("Could not import PyQt4 or PyQt5") -def get_mainwindow(model=None, show=True, testing=False): +for _err in myqt.import_errors: + _log.error(_err) + + +def get_mainwindow(model=None, show=True, ask_close=True, testing=False): """ Create a UI MainWindow. Args: model: A Pyomo model to work with show: show the window after it is created + ask_close: confirm close window + testing: if True, expect testing Returns: (ui, model): ui is the MainWindow widget, and model is the linked Pyomo model. If no model is provided a new ConcreteModel is created """ if model is None: model = pyo.ConcreteModel(name="Default") - ui = MainWindow(model=model, testing=testing) + ui = MainWindow(model=model, ask_close=ask_close, testing=testing) try: - get_ipython().events.register('post_execute', ui.refresh_on_execute) + get_ipython().events.register("post_execute", ui.refresh_on_execute) except AttributeError: - pass # not in ipy kernel, so is fine to not register callback - if show: ui.show() + pass # not in ipy kernel, so is fine to not register callback + if show: + ui.show() return ui, model -def get_mainwindow_nb(model=None, show=True, testing=False): - return get_mainwindow(model=model, show=show, testing=testing) - class MainWindow(_MainWindow, _MainWindowUI): def __init__(self, *args, **kwargs): model = self.model = kwargs.pop("model", None) - main = self.model = kwargs.pop("main", None) + main = self.main = kwargs.pop("main", None) + ask_close = self.ask_close = kwargs.pop("ask_close", True) self.testing = kwargs.pop("testing", False) flags = kwargs.pop("flags", 0) self.ui_data = UIData(model=model) - super(MainWindow, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.setupUi(self) self.setCentralWidget(self.mdiArea) @@ -106,7 +109,6 @@ def __init__(self, *args, **kwargs): self.expressions = None self.parameters = None self.residuals = None - self.update_model() self.ui_data.updated.connect(self.update_model) @@ -114,50 +116,46 @@ def __init__(self, *args, **kwargs): # you can edit the menus in qt-designer self.actionModel_Selector.triggered.connect(self.show_model_select) self.ui_data.exec_refresh.connect(self.refresh_on_execute) - self.actionRestart_Variable_View.triggered.connect( - self.variables_restart) - self.actionRestart_Constraint_View.triggered.connect( - self.constraints_restart) - self.actionRestart_Parameter_View.triggered.connect( - self.parameters_restart) - self.actionRestart_Expression_View.triggered.connect( - self.expressions_restart) - self.actionRestart_Residual_Table.triggered.connect( - self.residuals_restart) + self.actionRestart_Variable_View.triggered.connect(self.variables_restart) + self.actionRestart_Constraint_View.triggered.connect(self.constraints_restart) + self.actionRestart_Parameter_View.triggered.connect(self.parameters_restart) + self.actionRestart_Expression_View.triggered.connect(self.expressions_restart) + self.actionRestart_Residual_Table.triggered.connect(self.residuals_restart) self.actionInformation.triggered.connect(self.model_information) self.actionCalculateConstraints.triggered.connect( - self.ui_data.calculate_constraints) + self.ui_data.calculate_constraints + ) self.actionCalculateExpressions.triggered.connect( - self.ui_data.calculate_expressions) + self.ui_data.calculate_expressions + ) self.actionTile.triggered.connect(self.mdiArea.tileSubWindows) self.actionCascade.triggered.connect(self.mdiArea.cascadeSubWindows) self.actionTabs.triggered.connect(self.toggle_tabs) - self._dialog = None #dialog displayed so can access it easier for tests - self._dialog_test_button = None # button clicked on dialog in test mode - self.mdiArea.setViewMode(QMdiArea.TabbedView) + self._dialog = None # dialog displayed so can access it easier for tests + self._dialog_test_button = None # button clicked on dialog in test mode + self.mdiArea.setViewMode(myqt.QMdiArea.TabbedView) def toggle_tabs(self): - # Could use not here, but this is a little more future proof - if self.mdiArea.viewMode() == QMdiArea.SubWindowView: - self.mdiArea.setViewMode(QMdiArea.TabbedView) - elif self.mdiArea.viewMode() == QMdiArea.TabbedView: - self.mdiArea.setViewMode(QMdiArea.SubWindowView) + if self.mdiArea.viewMode() == myqt.QMdiArea.SubWindowView: + self.mdiArea.setViewMode(myqt.QMdiArea.TabbedView) + elif self.mdiArea.viewMode() == myqt.QMdiArea.TabbedView: + self.mdiArea.setViewMode(myqt.QMdiArea.SubWindowView) else: # There are no other modes unless there is a change in Qt so pass pass def _tree_restart(self, w, cls=ModelBrowser, **kwargs): """ - Start/Restart the variables window + Start/Restart a tree window """ try: self._refresh_list.remove(w) - except ValueError: # not in list? that's okay + except ValueError: # not in list? that's okay pass try: try: self.mdiArea.removeSubWindow(w.parent()) - except RuntimeError: # user closed with "X" button + except RuntimeError: # user closed with "X" button pass del w w = None @@ -165,29 +163,34 @@ def _tree_restart(self, w, cls=ModelBrowser, **kwargs): pass w = cls(**kwargs) self.mdiArea.addSubWindow(w) - w.parent().show() # parent is now a MdiAreaSubWindow + w.parent().show() # parent is now a MdiAreaSubWindow self._refresh_list.append(w) return w def variables_restart(self): self.variables = self._tree_restart( - w=self.variables, standard="Var", ui_data=self.ui_data) + w=self.variables, standard="Var", ui_data=self.ui_data + ) def expressions_restart(self): self.expressions = self._tree_restart( - w=self.expressions, standard="Expression", ui_data=self.ui_data) + w=self.expressions, standard="Expression", ui_data=self.ui_data + ) def parameters_restart(self): self.parameters = self._tree_restart( - w=self.parameters, standard="Param", ui_data=self.ui_data) + w=self.parameters, standard="Param", ui_data=self.ui_data + ) def constraints_restart(self): self.constraints = self._tree_restart( - w=self.constraints, standard="Constraint", ui_data=self.ui_data) + w=self.constraints, standard="Constraint", ui_data=self.ui_data + ) def residuals_restart(self): self.residuals = self._tree_restart( - w=self.residuals, cls=ResidualTable, ui_data=self.ui_data) + w=self.residuals, cls=ResidualTable, ui_data=self.ui_data + ) def set_model(self, model): self.ui_data.model = model @@ -231,17 +234,19 @@ def model_information(self): doftext = "Degree" else: doftext = "Degrees" - msg = QMessageBox() + msg = myqt.QMessageBox() msg.setStyleSheet("QLabel{min-width: 600px;}") self._dialog = msg - #msg.setIcon(QMessageBox.Information) msg.setWindowTitle("Model Information") msg.setText( -"""{} -- Active Constraints + """{} -- Active Constraints {} -- Active Equalities {} -- Free Variables -{} -- {} of Freedom""".format(cons, active_eq, free_vars, dof, doftext)) - msg.setStandardButtons(QMessageBox.Ok) +{} -- {} of Freedom""".format( + cons, active_eq, free_vars, dof, doftext + ) + ) + msg.setStandardButtons(myqt.QMessageBox.Ok) msg.setModal(False) msg.show() @@ -254,13 +259,14 @@ def refresh_on_execute(self): for w in self._refresh_list: try: w.refresh() - except RuntimeError: # window closed by user pushing "X" button + except RuntimeError: # window closed by user pushing "X" button pass def show_model_select(self): model_select = ModelSelect(parent=self, ui_data=self.ui_data) model_select.update_models() model_select.show() + return model_select def exit_action(self): """ @@ -272,18 +278,23 @@ def closeEvent(self, event): """ Handle the close event by asking for confirmation """ - msg = QMessageBox() + if not self.ask_close: + event.accept() + return + msg = myqt.QMessageBox() self._dialog = msg - msg.setIcon(QMessageBox.Question) - msg.setText("Are you sure you want to close this window?" - " You can reopen it with ui.show().") + msg.setIcon(myqt.QMessageBox.Question) + msg.setText( + "Are you sure you want to close this window?" + " You can reopen it with ui.show()." + ) msg.setWindowTitle("Close?") - msg.setStandardButtons(QMessageBox.Yes | QMessageBox.No) - if self.testing: # don't even show dialog just pretend button clicked + msg.setStandardButtons(myqt.QMessageBox.Yes | myqt.QMessageBox.No) + if self.testing: # don't even show dialog just pretend button clicked result = self._dialog_test_button else: result = msg.exec_() - if result == QMessageBox.Yes: + if result == myqt.QMessageBox.Yes: event.accept() else: event.ignore() diff --git a/pyomo/contrib/viewer/ui_data.py b/pyomo/contrib/viewer/ui_data.py index da848729a75..8bbaac14e13 100644 --- a/pyomo/contrib/viewer/ui_data.py +++ b/pyomo/contrib/viewer/ui_data.py @@ -23,8 +23,6 @@ """ UI data objects for sharing data and settings between different parts of the UI. """ -from __future__ import division, print_function, absolute_import - __author__ = "John Eslick" import logging @@ -34,11 +32,13 @@ _log = logging.getLogger(__name__) + class UIDataNoUi(object): """ This is the UIData object minus the signals. This is the base class for UIData. The class is split this way for testing when PyQt is not available. """ + def __init__(self, model=None): """ This class holds the basic UI setup, but doesn't depend on Qt. It @@ -47,10 +47,11 @@ def __init__(self, model=None): Args: model: The Pyomo model to view """ - super(UIDataNoUi, self).__init__() + super().__init__() self._model = None self._begin_update = False self.value_cache = ComponentMap() + self.value_cache_units = ComponentMap() self.begin_update() self.model = model self.end_update() @@ -91,6 +92,7 @@ def model(self): def model(self, value): self._model = value self.value_cache = ComponentMap() + self.value_cache_units = ComponentMap() self.emit_update() def calculate_constraints(self): @@ -107,24 +109,33 @@ def calculate_expressions(self): self.value_cache[o] = pyo.value(o, exception=False) except ZeroDivisionError: self.value_cache[o] = "Divide_by_0" + try: + self.value_cache_units[o] = str(pyo.units.get_units(o)) + except: + # If units aren't obtainable for whatever reason, let it go. + pass self.emit_exec_refresh() -if not qt_available: + +if not available: + class UIData(UIDataNoUi): pass + else: + class UIData(UIDataNoUi, QtCore.QObject): - updated = QtCore.pyqtSignal() - exec_refresh = QtCore.pyqtSignal() - def __init__(self, *args, **kwargs): + updated = Signal() + exec_refresh = Signal() + def __init__(self, *args, **kwargs): """ This class holds the basic UI setup Args: model: The Pyomo model to view """ - super(UIData, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) def end_update(self, emit=True): """ @@ -132,7 +143,7 @@ def end_update(self, emit=True): are changed and emit update for changes made between begin_update and end_update """ - super(UIData, self).end_update(emit=emit) + super().end_update(emit=emit) if emit: self.emit_update() diff --git a/pyomo/core/__init__.py b/pyomo/core/__init__.py index 4736e5a90c6..0f9fb5b6951 100644 --- a/pyomo/core/__init__.py +++ b/pyomo/core/__init__.py @@ -10,22 +10,51 @@ # ___________________________________________________________________________ from pyomo.core.expr.numvalue import ( - value, is_constant, is_fixed, is_variable_type, - is_potentially_variable, NumericValue, ZeroConstant, - native_numeric_types, native_types, polynomial_degree, + value, + is_constant, + is_fixed, + is_variable_type, + is_potentially_variable, + NumericValue, + ZeroConstant, + native_numeric_types, + native_types, + polynomial_degree, ) from pyomo.core.expr.boolean_value import BooleanValue -from pyomo.core.expr.numeric_expr import linear_expression, nonlinear_expression -from pyomo.core.expr.logical_expr import (land, lor, equivalent, exactly, - atleast, atmost, implies, lnot, - xor, inequality) - -from pyomo.core.expr.current import ( - log, log10, sin, cos, tan, cosh, sinh, tanh, - asin, acos, atan, exp, sqrt, asinh, acosh, - atanh, ceil, floor, +from pyomo.core.expr import ( + linear_expression, + nonlinear_expression, + land, + lor, + equivalent, + exactly, + atleast, + atmost, + implies, + lnot, + xor, + inequality, + log, + log10, + sin, + cos, + tan, + cosh, + sinh, + tanh, + asin, + acos, + atan, + exp, + sqrt, + asinh, + acosh, + atanh, + ceil, + floor, Expr_if, ) @@ -36,107 +65,153 @@ from pyomo.common.collections import ComponentMap from pyomo.core.expr.symbol_map import SymbolMap -from pyomo.core.expr import (numvalue, numeric_expr, boolean_value, - logical_expr, current, symbol_map, sympy_tools, - taylor_series, visitor, expr_common, expr_errors, - calculus) +from pyomo.core.expr import ( + numvalue, + numeric_expr, + boolean_value, + logical_expr, + symbol_map, + sympy_tools, + taylor_series, + visitor, + expr_common, + expr_errors, + calculus, +) from pyomo.core import expr, util, kernel -from pyomo.core.expr.numvalue import (nonpyomo_leaf_types, - PyomoObject, - native_numeric_types, - value, is_constant, - is_fixed, is_variable_type, - is_potentially_variable, - polynomial_degree, - NumericValue, - ZeroConstant) +from pyomo.core.expr.numvalue import ( + nonpyomo_leaf_types, + PyomoObject, + native_numeric_types, + value, + is_constant, + is_fixed, + is_variable_type, + is_potentially_variable, + polynomial_degree, + NumericValue, + ZeroConstant, +) from pyomo.core.expr.boolean_value import ( - as_boolean, BooleanConstant, BooleanValue, - native_logical_values) -from pyomo.core.kernel.objective import (minimize, - maximize) + as_boolean, + BooleanConstant, + BooleanValue, + native_logical_values, +) +from pyomo.core.kernel.objective import minimize, maximize from pyomo.core.base.config import PyomoOptions from pyomo.core.base.expression import Expression -from pyomo.core.base.label import (CuidLabeler, - CounterLabeler, NumericLabeler, - CNameLabeler, TextLabeler, - AlphaNumericTextLabeler, NameLabeler, - ShortNameLabeler) +from pyomo.core.base.label import ( + CuidLabeler, + CounterLabeler, + NumericLabeler, + CNameLabeler, + TextLabeler, + AlphaNumericTextLabeler, + NameLabeler, + ShortNameLabeler, +) # # Components # -from pyomo.core.base.component import (name, Component, ModelComponentFactory) +from pyomo.core.base.component import name, Component, ModelComponentFactory from pyomo.core.base.componentuid import ComponentUID import pyomo.core.base.indexed_component from pyomo.core.base.action import BuildAction from pyomo.core.base.check import BuildCheck -from pyomo.core.base.set import ( - Set, SetOf, simple_set_rule, RangeSet, -) +from pyomo.core.base.set import Set, SetOf, simple_set_rule, RangeSet from pyomo.core.base.param import Param -from pyomo.core.base.var import (Var, ScalarVar, VarList) -from pyomo.core.base.boolean_var import ( - BooleanVar, BooleanVarList, ScalarBooleanVar) -from pyomo.core.base.constraint import (logical_expr, - simple_constraint_rule, - simple_constraintlist_rule, - ConstraintList, Constraint) -from pyomo.core.base.logical_constraint import ( - LogicalConstraint, LogicalConstraintList) -from pyomo.core.base.objective import (simple_objective_rule, - simple_objectivelist_rule, - Objective, ObjectiveList) +from pyomo.core.base.var import Var, ScalarVar, VarList +from pyomo.core.base.boolean_var import BooleanVar, BooleanVarList, ScalarBooleanVar +from pyomo.core.base.constraint import ( + simple_constraint_rule, + simple_constraintlist_rule, + ConstraintList, + Constraint, +) +from pyomo.core.base.logical_constraint import LogicalConstraint, LogicalConstraintList +from pyomo.core.base.objective import ( + simple_objective_rule, + simple_objectivelist_rule, + Objective, + ObjectiveList, +) from pyomo.core.base.connector import Connector from pyomo.core.base.sos import SOSConstraint from pyomo.core.base.piecewise import Piecewise -from pyomo.core.base.suffix import (active_export_suffix_generator, - active_import_suffix_generator, - Suffix) +from pyomo.core.base.suffix import ( + active_export_suffix_generator, + active_import_suffix_generator, + Suffix, +) from pyomo.core.base.external import ExternalFunction from pyomo.core.base.symbol_map import symbol_map_from_instance from pyomo.core.base.reference import Reference -from pyomo.core.base.set import (Reals, PositiveReals, NonPositiveReals, - NegativeReals, NonNegativeReals, Integers, - PositiveIntegers, NonPositiveIntegers, - NegativeIntegers, NonNegativeIntegers, - Boolean, Binary, Any, AnyWithNone, EmptySet, - UnitInterval, PercentFraction, RealInterval, - IntegerInterval) +from pyomo.core.base.set import ( + Reals, + PositiveReals, + NonPositiveReals, + NegativeReals, + NonNegativeReals, + Integers, + PositiveIntegers, + NonPositiveIntegers, + NegativeIntegers, + NonNegativeIntegers, + Boolean, + Binary, + Any, + AnyWithNone, + EmptySet, + UnitInterval, + PercentFraction, + RealInterval, + IntegerInterval, +) from pyomo.core.base.misc import display -from pyomo.core.base.block import (SortComponents, TraversalStrategy, - Block, ScalarBlock, - active_components, - components, active_components_data, - components_data) -from pyomo.core.base.PyomoModel import (global_option, - Model, ConcreteModel, - AbstractModel) -from pyomo.core.base.transformation import ( - Transformation, - TransformationFactory, +from pyomo.core.base.block import ( + SortComponents, + TraversalStrategy, + Block, + ScalarBlock, + active_components, + components, + active_components_data, + components_data, ) +from pyomo.core.base.PyomoModel import ( + global_option, + Model, + ConcreteModel, + AbstractModel, +) +from pyomo.core.base.transformation import Transformation, TransformationFactory from pyomo.core.base.instance2dat import instance2dat -from pyomo.core.util import (prod, quicksum, sum_product, dot_product, - summation, sequence) +from pyomo.core.util import ( + prod, + quicksum, + sum_product, + dot_product, + summation, + sequence, +) # These APIs are deprecated and should be removed in the near future -from pyomo.core.base.set import ( - set_options, RealSet, IntegerSet, BooleanSet, -) +from pyomo.core.base.set import set_options, RealSet, IntegerSet, BooleanSet from pyomo.common.deprecation import relocated_module_attribute + relocated_module_attribute( - 'SimpleBlock', 'pyomo.core.base.block.SimpleBlock', version='6.0') -relocated_module_attribute( - 'SimpleVar', 'pyomo.core.base.var.SimpleVar', version='6.0') + 'SimpleBlock', 'pyomo.core.base.block.SimpleBlock', version='6.0' +) +relocated_module_attribute('SimpleVar', 'pyomo.core.base.var.SimpleVar', version='6.0') relocated_module_attribute( - 'SimpleBooleanVar', 'pyomo.core.base.boolean_var.SimpleBooleanVar', - version='6.0' + 'SimpleBooleanVar', 'pyomo.core.base.boolean_var.SimpleBooleanVar', version='6.0' ) del relocated_module_attribute diff --git a/pyomo/core/base/PyomoModel.py b/pyomo/core/base/PyomoModel.py index 3a8f9394cf5..f8b2710b9f2 100644 --- a/pyomo/core/base/PyomoModel.py +++ b/pyomo/core/base/PyomoModel.py @@ -23,6 +23,7 @@ from pyomo.common.deprecation import deprecated, deprecation_warning from pyomo.common.gc_manager import PauseGC from pyomo.common.log import is_debug_set +from pyomo.common.numeric_types import value from pyomo.core.staleflag import StaleFlagManager from pyomo.core.expr.symbol_map import SymbolMap from pyomo.core.base.component import ModelComponentFactory @@ -30,7 +31,6 @@ from pyomo.core.base.constraint import Constraint from pyomo.core.base.objective import Objective from pyomo.core.base.suffix import active_import_suffix_generator -from pyomo.core.base.numvalue import value from pyomo.core.base.block import ScalarBlock from pyomo.core.base.set import Set from pyomo.core.base.componentuid import ComponentUID @@ -57,8 +57,10 @@ def functor(): ... """ PyomoConfig._option[tuple(name.split('.'))] = value + def wrapper_function(*args, **kwargs): return function(*args, **kwargs) + return wrapper_function @@ -86,7 +88,6 @@ def __init__(self, *args, **kw): class ModelSolution(object): - def __init__(self): self._metadata = {} self._metadata['status'] = None @@ -94,7 +95,7 @@ def __init__(self): self._metadata['gap'] = None self._entry = {} # - # entry[name]: id -> (object weakref, entry) + # entry[name]: id -> (object, entry) # for name in ['objective', 'variable', 'constraint', 'problem']: self._entry[name] = {} @@ -104,8 +105,10 @@ def __getattr__(self, name): if name in self.__dict__: return self.__dict__[name] else: - raise AttributeError( "'%s' object has no attribute '%s'" - % (self.__class__.__name__, name) ) + raise AttributeError( + "'%s' object has no attribute '%s'" + % (self.__class__.__name__, name) + ) return self.__dict__['_metadata'][name] def __setattr__(self, name, val): @@ -115,11 +118,8 @@ def __setattr__(self, name, val): self.__dict__['_metadata'][name] = val def __getstate__(self): - state = { - '_metadata': self._metadata, - '_entry': {} - } - for (name, data) in self._entry.items(): + state = {'_metadata': self._metadata, '_entry': {}} + for name, data in self._entry.items(): tmp = state['_entry'][name] = [] # Note: We must convert all weakrefs to hard refs and # not indirect references like ComponentUIDs because @@ -128,12 +128,13 @@ def __getstate__(self): # so things like CUID.find_component will fail (return # None). for obj, entry in data.values(): - if obj is None or obj() is None: + if obj is None or obj is None: logger.warning( "Solution component in '%s' no longer " - "accessible: %s!" % ( name, entry )) + "accessible: %s!" % (name, entry) + ) else: - tmp.append( ( obj(), entry ) ) + tmp.append((obj, entry)) return state def __setstate__(self, state): @@ -142,11 +143,10 @@ def __setstate__(self, state): for name, data in state['_entry'].items(): tmp = self._entry[name] = {} for obj, entry in data: - tmp[ id(obj) ] = ( weakref_ref(obj), entry ) + tmp[id(obj)] = (obj, entry) class ModelSolutions(object): - def __init__(self, instance): self._instance = weakref_ref(instance) self.clear() @@ -185,17 +185,19 @@ def delete_symbol_map(self, smap_id): if not smap_id is None: del self.symbol_map[smap_id] - def load_from(self, - results, - allow_consistent_values_for_fixed_vars=False, - comparison_tolerance_for_fixed_vars=1e-5, - ignore_invalid_labels=False, - id=None, - delete_symbol_map=True, - clear=True, - default_variable_value=None, - select=0, - ignore_fixed_vars=True): + def load_from( + self, + results, + allow_consistent_values_for_fixed_vars=False, + comparison_tolerance_for_fixed_vars=1e-5, + ignore_invalid_labels=False, + id=None, + delete_symbol_map=True, + clear=True, + default_variable_value=None, + select=0, + ignore_fixed_vars=True, + ): """ Load solver results """ @@ -203,29 +205,32 @@ def load_from(self, # # If there is a warning, then print a warning message. # - if (results.solver.status == SolverStatus.warning): + if results.solver.status == SolverStatus.warning: tc = getattr(results.solver, 'termination_condition', None) msg = getattr(results.solver, 'message', None) logger.warning( 'Loading a SolverResults object with a ' 'warning status into model.name="%s";\n' ' - termination condition: %s\n' - ' - message from solver: %s' - % (instance.name, tc, msg)) + ' - message from solver: %s' % (instance.name, tc, msg) + ) # # If the solver status not one of either OK or Warning, then # generate an error. # elif results.solver.status != SolverStatus.ok: - if (results.solver.status == SolverStatus.aborted) and \ - (len(results.solution) > 0): + if (results.solver.status == SolverStatus.aborted) and ( + len(results.solution) > 0 + ): logger.warning( "Loading a SolverResults object with " - "an 'aborted' status, but containing a solution") + "an 'aborted' status, but containing a solution" + ) else: - raise ValueError("Cannot load a SolverResults object " - "with bad status: %s" - % str(results.solver.status)) + raise ValueError( + "Cannot load a SolverResults object " + "with bad status: %s" % str(results.solver.status) + ) if clear: # # Clear the solutions, but not the symbol map @@ -245,20 +250,24 @@ def load_from(self, smap_id = results.__dict__.get('_smap_id') cache = {} if not id is None: - self.add_solution(results.solution(id), - smap_id, - delete_symbol_map=False, - cache=cache, - ignore_invalid_labels=ignore_invalid_labels, - default_variable_value=default_variable_value) + self.add_solution( + results.solution(id), + smap_id, + delete_symbol_map=False, + cache=cache, + ignore_invalid_labels=ignore_invalid_labels, + default_variable_value=default_variable_value, + ) else: for i in range(len(results.solution)): - self.add_solution(results.solution(i), - smap_id, - delete_symbol_map=False, - cache=cache, - ignore_invalid_labels=ignore_invalid_labels, - default_variable_value=default_variable_value) + self.add_solution( + results.solution(i), + smap_id, + delete_symbol_map=False, + cache=cache, + ignore_invalid_labels=ignore_invalid_labels, + default_variable_value=default_variable_value, + ) if delete_symbol_map: self.delete_symbol_map(smap_id) @@ -271,7 +280,8 @@ def load_from(self, allow_consistent_values_for_fixed_vars=allow_consistent_values_for_fixed_vars, comparison_tolerance_for_fixed_vars=comparison_tolerance_for_fixed_vars, ignore_invalid_labels=ignore_invalid_labels, - ignore_fixed_vars=ignore_fixed_vars) + ignore_fixed_vars=ignore_fixed_vars, + ) def store_to(self, results, cuid=False, skip_stale_vars=False): """ @@ -301,7 +311,7 @@ def store_to(self, results, cuid=False, skip_stale_vars=False): else: vals = vals[1] vals['Value'] = value(obj) - soln.objective[ sm.getSymbol(obj, labeler) ] = vals + soln.objective[sm.getSymbol(obj, labeler)] = vals entry = soln_._entry['variable'] for obj in instance.component_data_objects(Var, active=True): if obj.stale and skip_stale_vars: @@ -312,7 +322,7 @@ def store_to(self, results, cuid=False, skip_stale_vars=False): else: vals = vals[1] vals['Value'] = value(obj) - soln.variable[ sm.getSymbol(obj, labeler) ] = vals + soln.variable[sm.getSymbol(obj, labeler)] = vals entry = soln_._entry['constraint'] for obj in instance.component_data_objects(Constraint, active=True): vals = entry.get(id(obj), None) @@ -320,18 +330,19 @@ def store_to(self, results, cuid=False, skip_stale_vars=False): continue else: vals = vals[1] - soln.constraint[ sm.getSymbol(obj, labeler) ] = vals - results.solution.insert( soln ) - - def add_solution(self, - solution, - smap_id, - delete_symbol_map=True, - cache=None, - ignore_invalid_labels=False, - ignore_missing_symbols=True, - default_variable_value=None): - + soln.constraint[sm.getSymbol(obj, labeler)] = vals + results.solution.insert(soln) + + def add_solution( + self, + solution, + smap_id, + delete_symbol_map=True, + cache=None, + ignore_invalid_labels=False, + ignore_missing_symbols=True, + default_variable_value=None, + ): instance = self._instance() soln = ModelSolution() @@ -367,9 +378,11 @@ def add_solution(self, if obj is None: if ignore_invalid_labels: continue - raise RuntimeError("CUID %s is missing from model %s" - % (str(cuid), instance.name)) - tmp[id(obj)] = (weakref_ref(obj), val) + raise RuntimeError( + "CUID %s is missing from model %s" + % (str(cuid), instance.name) + ) + tmp[id(obj)] = (obj, val) else: # # Loading a solution with string keys @@ -389,9 +402,11 @@ def add_solution(self, if obj is None: if ignore_invalid_labels: continue - raise RuntimeError("Symbol %s is missing from model %s" - % (symb, instance.name)) - tmp[id(obj)] = (weakref_ref(obj), val) + raise RuntimeError( + "Symbol %s is missing from model %s" + % (symb, instance.name) + ) + tmp[id(obj)] = (obj, val) else: # # Map solution @@ -406,16 +421,17 @@ def add_solution(self, obj = smap.aliases[symb] elif ignore_missing_symbols: continue - else: #pragma:nocover + else: # pragma:nocover # # This should never happen ... # raise RuntimeError( "ERROR: Symbol %s is missing from " "model %s when loading with a symbol map!" - % (symb, instance.name)) + % (symb, instance.name) + ) - tmp[id(obj())] = (obj, val) + tmp[id(obj)] = (obj, val) # # Wrap up # @@ -429,22 +445,26 @@ def add_solution(self, for vdata in instance.component_data_objects(Var): id_ = id(vdata) if vdata.fixed: - tmp[id_] = (weakref_ref(vdata), {'Value': vdata.value}) - elif (default_variable_value is not None) and \ - (smap_id is not None) and \ - (id_ in smap.byObject) and \ - (id_ not in tmp): - tmp[id_] = (weakref_ref(vdata), {'Value':default_variable_value}) + tmp[id_] = (vdata, {'Value': vdata.value}) + elif ( + (default_variable_value is not None) + and (smap_id is not None) + and (id_ in smap.byObject) + and (id_ not in tmp) + ): + tmp[id_] = (vdata, {'Value': default_variable_value}) self.solutions.append(soln) - return len(self.solutions)-1 - - def select(self, - index=0, - allow_consistent_values_for_fixed_vars=False, - comparison_tolerance_for_fixed_vars=1e-5, - ignore_invalid_labels=False, - ignore_fixed_vars=True): + return len(self.solutions) - 1 + + def select( + self, + index=0, + allow_consistent_values_for_fixed_vars=False, + comparison_tolerance_for_fixed_vars=1e-5, + ignore_invalid_labels=False, + ignore_fixed_vars=True, + ): """ Select a solution from the model's solutions. @@ -486,7 +506,7 @@ def select(self, # Load problem (model) level suffixes. These would only come from ampl # interfaced solution suffixes at this point in time. # - for id_, (pobj,entry) in soln._entry['problem'].items(): + for id_, (pobj, entry) in soln._entry['problem'].items(): for _attr_key, attr_value in entry.items(): attr_key = _attr_key[0].lower() + _attr_key[1:] if attr_key in valid_import_suffixes: @@ -495,7 +515,6 @@ def select(self, # Load objective data (suffixes) # for id_, (odata, entry) in soln._entry['objective'].items(): - odata = odata() for _attr_key, attr_value in entry.items(): attr_key = _attr_key[0].lower() + _attr_key[1:] if attr_key in valid_import_suffixes: @@ -504,25 +523,30 @@ def select(self, # Load variable data (suffixes and values) # for id_, (vdata, entry) in soln._entry['variable'].items(): - vdata = vdata() val = entry['Value'] if vdata.fixed is True: if ignore_fixed_vars: continue if not allow_consistent_values_for_fixed_vars: - msg = "Variable '%s' in model '%s' is currently fixed - new" \ - ' value is not expected in solution' + msg = ( + "Variable '%s' in model '%s' is currently fixed - new" + ' value is not expected in solution' + ) raise TypeError(msg % (vdata.name, instance.name)) if math.fabs(val - vdata.value) > comparison_tolerance_for_fixed_vars: - raise TypeError("Variable '%s' in model '%s' is currently " - "fixed - a value of '%s' in solution is " - "not within tolerance=%s of the current " - "value of '%s'" - % (vdata.name, - instance.name, - str(val), - str(comparison_tolerance_for_fixed_vars), - str(vdata.value))) + raise TypeError( + "Variable '%s' in model '%s' is currently " + "fixed - a value of '%s' in solution is " + "not within tolerance=%s of the current " + "value of '%s'" + % ( + vdata.name, + instance.name, + str(val), + str(comparison_tolerance_for_fixed_vars), + str(vdata.value), + ) + ) vdata.set_value(val, skip_validation=True) @@ -536,7 +560,6 @@ def select(self, # Load constraint data (suffixes) # for id_, (cdata, entry) in soln._entry['constraint'].items(): - cdata = cdata() for _attr_key, attr_value in entry.items(): attr_key = _attr_key[0].lower() + _attr_key[1:] if attr_key in valid_import_suffixes: @@ -547,7 +570,10 @@ def select(self, # variables to be marked as stale). StaleFlagManager.mark_all_as_stale(delayed=True) -@ModelComponentFactory.register('Model objects can be used as a component of other models.') + +@ModelComponentFactory.register( + 'Model objects can be used as a component of other models.' +) class Model(ScalarBlock): """ An optimization model. By default, this defers construction of components @@ -562,7 +588,8 @@ def __new__(cls, *args, **kwds): raise TypeError( "Directly creating the 'Model' class is not allowed. Please use the " - "AbstractModel or ConcreteModel class instead.") + "AbstractModel or ConcreteModel class instead." + ) def __init__(self, name='unknown', **kwargs): """Constructor""" @@ -606,31 +633,38 @@ def nobjectives(self): self.compute_statistics() return self.statistics.number_of_objectives - def create_instance( self, filename=None, data=None, name=None, - namespace=None, namespaces=None, - profile_memory=0, report_timing=False, - **kwds ): + def create_instance( + self, + filename=None, + data=None, + name=None, + namespace=None, + namespaces=None, + profile_memory=0, + report_timing=False, + **kwds + ): """ Create a concrete instance of an abstract model, possibly using data read in from a file. Parameters ---------- - filename: `str`, optional - The name of a Pyomo Data File that will be used to load data into + filename: `str`, optional + The name of a Pyomo Data File that will be used to load data into the model. data: `dict`, optional - A dictionary containing initialization data for the model to be + A dictionary containing initialization data for the model to be used if there is no filename name: `str`, optional The name given to the model. - namespace: `str`, optional + namespace: `str`, optional A namespace used to select data. - namespaces: `list`, optional + namespaces: `list`, optional A list of namespaces used to select data. - profile_memory: `int`, optional + profile_memory: `int`, optional A number that indicates the profiling level. - report_timing: `bool`, optional + report_timing: `bool`, optional Report timing statistics during construction. """ @@ -640,14 +674,15 @@ def create_instance( self, filename=None, data=None, name=None, # constructed, so passing in a data file is a waste of time. # if self.is_constructed() and isinstance(filename, str): - msg = "The filename=%s will not be loaded - supplied as an " \ - "argument to the create_instance() method of a "\ - "concrete instance with name=%s." % (filename, name) + msg = ( + "The filename=%s will not be loaded - supplied as an " + "argument to the create_instance() method of a " + "concrete instance with name=%s." % (filename, name) + ) logger.warning(msg) if kwds: - msg = \ -"""Model.create_instance() passed the following unrecognized keyword + msg = """Model.create_instance() passed the following unrecognized keyword arguments (which have been ignored):""" for k in kwds: msg = msg + "\n '%s'" % (k,) @@ -665,9 +700,11 @@ def create_instance( self, filename=None, data=None, name=None, name = self.local_name if filename is not None: if data is not None: - logger.warning("Model.create_instance() passed both 'filename' " - "and 'data' keyword arguments. Ignoring the " - "'data' argument") + logger.warning( + "Model.create_instance() passed both 'filename' " + "and 'data' keyword arguments. Ignoring the " + "'data' argument" + ) data = filename if data is None: data = {} @@ -694,9 +731,7 @@ def create_instance( self, filename=None, data=None, name=None, if None not in _namespaces: _namespaces.append(None) - instance.load( data, - namespaces=_namespaces, - profile_memory=profile_memory ) + instance.load(data, namespaces=_namespaces, profile_memory=profile_memory) # # Indicate that the model is concrete/constructed @@ -712,9 +747,11 @@ def create_instance( self, filename=None, data=None, name=None, instance.__class__ = ConcreteModel return instance - - @deprecated("The Model.preprocess() method is deprecated and no " - "longer performs any actions", version='6.0') + @deprecated( + "The Model.preprocess() method is deprecated and no " + "longer performs any actions", + version='6.0', + ) def preprocess(self, preprocessor=None): return @@ -730,11 +767,8 @@ def load(self, arg, namespaces=[None], profile_memory=0): dp = DataPortal(data_dict=arg, model=self) else: msg = "Cannot load model model data from with object of type '%s'" - raise ValueError(msg % str( type(arg) )) - self._load_model_data(dp, - namespaces, - profile_memory=profile_memory) - + raise ValueError(msg % str(type(arg))) + self._load_model_data(dp, namespaces, profile_memory=profile_memory) def _load_model_data(self, modeldata, namespaces, **kwds): """ @@ -747,7 +781,6 @@ def _load_model_data(self, modeldata, namespaces, **kwds): # sufficient to keep memory use under control. # with PauseGC() as pgc: - # # Unlike the standard method in the pympler summary # module, the tracker doesn't print 0-byte entries to pad @@ -758,14 +791,18 @@ def _load_model_data(self, modeldata, namespaces, **kwds): if profile_memory >= 2 and pympler_available: mem_used = pympler.muppy.get_size(muppy.get_objects()) print("") - print(" Total memory = %d bytes prior to model " - "construction" % mem_used) + print( + " Total memory = %d bytes prior to model " + "construction" % mem_used + ) if profile_memory >= 3: gc.collect() mem_used = pympler.muppy.get_size(muppy.get_objects()) - print(" Total memory = %d bytes prior to model " - "construction (after garbage collection)" % mem_used) + print( + " Total memory = %d bytes prior to model " + "construction (after garbage collection)" % mem_used + ) # # Do some error checking @@ -780,27 +817,31 @@ def _load_model_data(self, modeldata, namespaces, **kwds): # for component_name, component in self.component_map().items(): - if component.ctype is Model: continue - self._initialize_component(modeldata, namespaces, component_name, profile_memory) + self._initialize_component( + modeldata, namespaces, component_name, profile_memory + ) # Note: As is, connectors are expanded when using command-line pyomo but not calling model.create(...) in a Python script. # John says this has to do with extension points which are called from commandline but not when writing scripts. # Uncommenting the next two lines switches this (command-line fails because it tries to expand connectors twice) - #connector_expander = ConnectorExpander() - #connector_expander.apply(instance=self) + # connector_expander = ConnectorExpander() + # connector_expander.apply(instance=self) if profile_memory >= 2 and pympler_available: print("") print(" Summary of objects following instance construction") post_construction_summary = pympler.summary.summarize( - pympler.muppy.get_objects()) + pympler.muppy.get_objects() + ) pympler.summary.print_(post_construction_summary, limit=100) print("") - def _initialize_component(self, modeldata, namespaces, component_name, profile_memory): + def _initialize_component( + self, modeldata, namespaces, component_name, profile_memory + ): declaration = self.component(component_name) if component_name in modeldata._default: @@ -809,45 +850,63 @@ def _initialize_component(self, modeldata, namespaces, component_name, profile_m data = None for namespace in namespaces: - if component_name in modeldata._data.get(namespace,{}): + if component_name in modeldata._data.get(namespace, {}): data = modeldata._data[namespace][component_name] if data is not None: break generate_debug_messages = is_debug_set(logger) if generate_debug_messages: - _blockName = "Model" if self.parent_block() is None \ - else "Block '%s'" % self.name - logger.debug( "Constructing %s '%s' on %s from data=%s", - declaration.__class__.__name__, - declaration.name, _blockName, str(data) ) + _blockName = ( + "Model" if self.parent_block() is None else "Block '%s'" % self.name + ) + logger.debug( + "Constructing %s '%s' on %s from data=%s", + declaration.__class__.__name__, + declaration.name, + _blockName, + str(data), + ) try: declaration.construct(data) except: err = sys.exc_info()[1] logger.error( "Constructing component '%s' from data=%s failed:\n %s: %s", - str(declaration.name), str(data).strip(), - type(err).__name__, err ) + str(declaration.name), + str(data).strip(), + type(err).__name__, + err, + ) raise if generate_debug_messages: _out = StringIO() declaration.pprint(ostream=_out) - logger.debug("Constructed component '%s':\n %s" - % ( declaration.name, _out.getvalue())) + logger.debug( + "Constructed component '%s':\n %s" + % (declaration.name, _out.getvalue()) + ) if profile_memory >= 2 and pympler_available: mem_used = pympler.muppy.get_size(pympler.muppy.get_objects()) - print(" Total memory = %d bytes following construction of component=%s" % (mem_used, component_name)) + print( + " Total memory = %d bytes following construction of component=%s" + % (mem_used, component_name) + ) if profile_memory >= 3: gc.collect() mem_used = pympler.muppy.get_size(pympler.muppy.get_objects()) - print(" Total memory = %d bytes following construction of component=%s (after garbage collection)" % (mem_used, component_name)) + print( + " Total memory = %d bytes following construction of component=%s (after garbage collection)" + % (mem_used, component_name) + ) -@ModelComponentFactory.register('A concrete optimization model that does not defer construction of components.') +@ModelComponentFactory.register( + 'A concrete optimization model that does not defer construction of components.' +) class ConcreteModel(Model): """ A concrete optimization model that does not defer construction of @@ -859,7 +918,9 @@ def __init__(self, *args, **kwds): Model.__init__(self, *args, **kwds) -@ModelComponentFactory.register('An abstract optimization model that defers construction of components.') +@ModelComponentFactory.register( + 'An abstract optimization model that defers construction of components.' +) class AbstractModel(Model): """ An abstract optimization model that defers construction of @@ -879,4 +940,3 @@ def __init__(self, *args, **kwds): # reserved names. # Model._Block_reserved_words = set(dir(ConcreteModel())) - diff --git a/pyomo/core/base/__init__.py b/pyomo/core/base/__init__.py index 13ec9337b30..ab62f1163d4 100644 --- a/pyomo/core/base/__init__.py +++ b/pyomo/core/base/__init__.py @@ -14,100 +14,141 @@ from pyomo.common.collections import ComponentMap from pyomo.core.expr.symbol_map import SymbolMap -from pyomo.core.expr.numvalue import (nonpyomo_leaf_types, - native_types, - native_numeric_types, - value, is_constant, - is_fixed, is_variable_type, - is_potentially_variable, - polynomial_degree, - NumericValue, - ZeroConstant) +from pyomo.core.expr.numvalue import ( + nonpyomo_leaf_types, + native_types, + native_numeric_types, + value, + is_constant, + is_fixed, + is_variable_type, + is_potentially_variable, + polynomial_degree, + NumericValue, + ZeroConstant, +) from pyomo.core.expr.boolean_value import ( - as_boolean, BooleanConstant, BooleanValue, - native_logical_values) -from pyomo.core.kernel.objective import (minimize, - maximize) + as_boolean, + BooleanConstant, + BooleanValue, + native_logical_values, +) +from pyomo.core.kernel.objective import minimize, maximize from pyomo.core.base.config import PyomoOptions -from pyomo.core.base.expression import (Expression, _ExpressionData) -from pyomo.core.base.label import (CuidLabeler, - CounterLabeler, NumericLabeler, - CNameLabeler, TextLabeler, - AlphaNumericTextLabeler, NameLabeler, - ShortNameLabeler) +from pyomo.core.base.expression import Expression, _ExpressionData +from pyomo.core.base.label import ( + CuidLabeler, + CounterLabeler, + NumericLabeler, + CNameLabeler, + TextLabeler, + AlphaNumericTextLabeler, + NameLabeler, + ShortNameLabeler, +) # # Components # -from pyomo.core.base.component import (name, Component, ModelComponentFactory) +from pyomo.core.base.component import name, Component, ModelComponentFactory from pyomo.core.base.componentuid import ComponentUID from pyomo.core.base.action import BuildAction from pyomo.core.base.check import BuildCheck -from pyomo.core.base.set import ( - Set, SetOf, simple_set_rule, RangeSet, -) +from pyomo.core.base.set import Set, SetOf, simple_set_rule, RangeSet from pyomo.core.base.param import Param -from pyomo.core.base.var import (Var, _VarData, _GeneralVarData, - ScalarVar, VarList) +from pyomo.core.base.var import Var, _VarData, _GeneralVarData, ScalarVar, VarList from pyomo.core.base.boolean_var import ( - BooleanVar, _BooleanVarData, _GeneralBooleanVarData, - BooleanVarList, ScalarBooleanVar) -from pyomo.core.base.constraint import (simple_constraint_rule, - simple_constraintlist_rule, - ConstraintList, Constraint, - _ConstraintData) + BooleanVar, + _BooleanVarData, + _GeneralBooleanVarData, + BooleanVarList, + ScalarBooleanVar, +) +from pyomo.core.base.constraint import ( + simple_constraint_rule, + simple_constraintlist_rule, + ConstraintList, + Constraint, + _ConstraintData, +) from pyomo.core.base.logical_constraint import ( - LogicalConstraint, LogicalConstraintList, _LogicalConstraintData) -from pyomo.core.base.objective import (simple_objective_rule, - simple_objectivelist_rule, - Objective, ObjectiveList, - _ObjectiveData) + LogicalConstraint, + LogicalConstraintList, + _LogicalConstraintData, +) +from pyomo.core.base.objective import ( + simple_objective_rule, + simple_objectivelist_rule, + Objective, + ObjectiveList, + _ObjectiveData, +) from pyomo.core.base.connector import Connector from pyomo.core.base.sos import SOSConstraint from pyomo.core.base.piecewise import Piecewise -from pyomo.core.base.suffix import (active_export_suffix_generator, - active_import_suffix_generator, - Suffix) +from pyomo.core.base.suffix import ( + active_export_suffix_generator, + active_import_suffix_generator, + Suffix, +) from pyomo.core.base.external import ExternalFunction from pyomo.core.base.symbol_map import symbol_map_from_instance from pyomo.core.base.reference import Reference -from pyomo.core.base.set import (Reals, PositiveReals, NonPositiveReals, - NegativeReals, NonNegativeReals, Integers, - PositiveIntegers, NonPositiveIntegers, - NegativeIntegers, NonNegativeIntegers, - Boolean, Binary, Any, AnyWithNone, EmptySet, - UnitInterval, PercentFraction, RealInterval, - IntegerInterval) +from pyomo.core.base.set import ( + Reals, + PositiveReals, + NonPositiveReals, + NegativeReals, + NonNegativeReals, + Integers, + PositiveIntegers, + NonPositiveIntegers, + NegativeIntegers, + NonNegativeIntegers, + Boolean, + Binary, + Any, + AnyWithNone, + EmptySet, + UnitInterval, + PercentFraction, + RealInterval, + IntegerInterval, +) from pyomo.core.base.misc import display -from pyomo.core.base.block import (SortComponents, TraversalStrategy, - Block, ScalarBlock, active_components, - components, active_components_data, - components_data) -from pyomo.core.base.PyomoModel import (global_option, - ModelSolution, - ModelSolutions, Model, ConcreteModel, - AbstractModel) -from pyomo.core.base.transformation import ( - Transformation, - TransformationFactory, +from pyomo.core.base.block import ( + Block, + ScalarBlock, + active_components, + components, + active_components_data, + components_data, ) +from pyomo.core.base.enums import SortComponents, TraversalStrategy +from pyomo.core.base.PyomoModel import ( + global_option, + ModelSolution, + ModelSolutions, + Model, + ConcreteModel, + AbstractModel, +) +from pyomo.core.base.transformation import Transformation, TransformationFactory from pyomo.core.base.instance2dat import instance2dat # These APIs are deprecated and should be removed in the near future -from pyomo.core.base.set import ( - set_options, RealSet, IntegerSet, BooleanSet, -) +from pyomo.core.base.set import set_options, RealSet, IntegerSet, BooleanSet from pyomo.common.deprecation import relocated_module_attribute + relocated_module_attribute( - 'SimpleBlock', 'pyomo.core.base.block.SimpleBlock', version='6.0') -relocated_module_attribute( - 'SimpleVar', 'pyomo.core.base.var.SimpleVar', version='6.0') + 'SimpleBlock', 'pyomo.core.base.block.SimpleBlock', version='6.0' +) +relocated_module_attribute('SimpleVar', 'pyomo.core.base.var.SimpleVar', version='6.0') relocated_module_attribute( - 'SimpleBooleanVar', 'pyomo.core.base.boolean_var.SimpleBooleanVar', - version='6.0' + 'SimpleBooleanVar', 'pyomo.core.base.boolean_var.SimpleBooleanVar', version='6.0' ) del relocated_module_attribute diff --git a/pyomo/core/base/action.py b/pyomo/core/base/action.py index d9032a5813b..b54beab8584 100644 --- a/pyomo/core/base/action.py +++ b/pyomo/core/base/action.py @@ -23,7 +23,9 @@ logger = logging.getLogger('pyomo.core') -@ModelComponentFactory.register("A component that performs arbitrary actions during model construction. The action rule is applied to every index value.") +@ModelComponentFactory.register( + "A component that performs arbitrary actions during model construction. The action rule is applied to every index value." +) class BuildAction(IndexedComponent): """A build action, which executes a rule for all valid indices. @@ -40,23 +42,31 @@ def __init__(self, *args, **kwd): IndexedComponent.__init__(self, *args, **kwd) # if not type(self._rule) is types.FunctionType: - raise ValueError("BuildAction must have an 'rule' option specified whose value is a function") + raise ValueError( + "BuildAction must have an 'rule' option specified whose value is a function" + ) def _pprint(self): - return ([("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ("Active", self.active),] - , None, None, None) + return ( + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ("Active", self.active), + ], + None, + None, + None, + ) def construct(self, data=None): - """ Apply the rule to construct values in this set """ - if is_debug_set(logger): #pragma:nocover - logger.debug("Constructing Action, name="+self.name) + """Apply the rule to construct values in this set""" + if is_debug_set(logger): # pragma:nocover + logger.debug("Constructing Action, name=" + self.name) # - if self._constructed: #pragma:nocover + if self._constructed: # pragma:nocover return timer = ConstructionTimer(self) - self._constructed=True + self._constructed = True # if not self.is_indexed(): # Scalar component @@ -66,4 +76,3 @@ def construct(self, data=None): for index in self._index_set: apply_indexed_rule(self, self._rule, self._parent(), index) timer.report() - diff --git a/pyomo/core/base/block.py b/pyomo/core/base/block.py index acf5719392b..596f52b1259 100644 --- a/pyomo/core/base/block.py +++ b/pyomo/core/base/block.py @@ -9,11 +9,20 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ['Block', 'TraversalStrategy', 'SortComponents', - 'active_components', 'components', 'active_components_data', - 'components_data', 'SimpleBlock', 'ScalarBlock'] +__all__ = [ + 'Block', + 'TraversalStrategy', + 'SortComponents', + 'active_components', + 'components', + 'active_components_data', + 'components_data', + 'SimpleBlock', + 'ScalarBlock', +] import copy +import enum import logging import sys import weakref @@ -21,30 +30,33 @@ from contextlib import contextmanager from inspect import isclass -from itertools import filterfalse +from itertools import filterfalse, chain from operator import itemgetter, attrgetter from io import StringIO from pyomo.common.pyomo_typing import overload -from pyomo.common.collections import Mapping, OrderedDict -from pyomo.common.deprecation import ( - deprecated, deprecation_warning, RenamedClass, -) +from pyomo.common.autoslots import AutoSlots +from pyomo.common.collections import Mapping +from pyomo.common.deprecation import deprecated, deprecation_warning, RenamedClass from pyomo.common.formatting import StreamIndenter from pyomo.common.gc_manager import PauseGC from pyomo.common.log import is_debug_set from pyomo.common.sorting import sorted_robust from pyomo.common.timing import ConstructionTimer from pyomo.core.base.component import ( - Component, ActiveComponentData, ModelComponentFactory + Component, + ActiveComponentData, + ModelComponentFactory, ) +from pyomo.core.base.enums import SortComponents, TraversalStrategy from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.componentuid import ComponentUID from pyomo.core.base.set import Any, GlobalSetBase, _SetDataBase from pyomo.core.base.var import Var from pyomo.core.base.initializer import Initializer from pyomo.core.base.indexed_component import ( - ActiveIndexedComponent, UnindexedComponent_set, + ActiveIndexedComponent, + UnindexedComponent_set, ) from pyomo.opt.base import ProblemFormat, guess_format @@ -64,6 +76,7 @@ class _generic_component_decorator(object): (*excluding* the block argument) **kwds: keyword arguments to the Component constructor """ + def __init__(self, component, block, *args, **kwds): self._component = component self._block = block @@ -74,7 +87,7 @@ def __call__(self, rule): setattr( self._block, rule.__name__, - self._component(*self._args, rule=rule, **(self._kwds)) + self._component(*self._args, rule=rule, **(self._kwds)), ) return rule @@ -89,13 +102,13 @@ class _component_decorator(object): block: the block onto which to add the new component """ + def __init__(self, block, component): self._block = block self._component = component def __call__(self, *args, **kwds): - return _generic_component_decorator( - self._component, self._block, *args, **kwds) + return _generic_component_decorator(self._component, self._block, *args, **kwds) class SubclassOf(object): @@ -111,10 +124,10 @@ class SubclassOf(object): model.component_data_objects(Var, descend_into=SubclassOf(Block)) """ + def __init__(self, *ctype): self.ctype = ctype - self.__name__ = 'SubclassOf(%s)' % ( - ','.join(x.__name__ for x in ctype),) + self.__name__ = 'SubclassOf(%s)' % (','.join(x.__name__ for x in ctype),) def __contains__(self, item): return issubclass(item, self.ctype) @@ -148,6 +161,7 @@ class _DeduplicateInfo(object): of every data object. """ + __slots__ = ('seen_components', 'seen_comp_thru_reference', 'seen_data') def __init__(self): @@ -156,13 +170,15 @@ def __init__(self): self.seen_data = set() def unique(self, comp, items, are_values): - """Generator that filters duplicate _ComponentData objects from items + """Returns generator that filters duplicate _ComponentData objects from items Parameters ---------- comp: ComponentBase The Component (indexed or scalar) that contains all - _ComponentData returned by the `items` generator. + _ComponentData returned by the `items` generator. `comp` may + be an IndexedComponent generated by :py:func:`Reference` (and + hence may not own the component datas in `items`) items: generator Generator yielding either the values or the items from the @@ -175,20 +191,20 @@ def unique(self, comp, items, are_values): """ if comp.is_reference(): seen_components_contains = self.seen_components.__contains__ - seen_comp_thru_reference_contains \ - = self.seen_comp_thru_reference.__contains__ + seen_comp_thru_reference_contains = ( + self.seen_comp_thru_reference.__contains__ + ) seen_comp_thru_reference_add = self.seen_comp_thru_reference.add seen_data_contains = self.seen_data.__contains__ seen_data_add = self.seen_data.add - for _item in items: - _data = _item if are_values else _item[1] + def has_been_seen(data): # If the data is contained in a component we have # already processed, then it is a duplicate and we can # bypass further checks. - _id = id(_data.parent_component()) + _id = id(data.parent_component()) if seen_components_contains(_id): - continue + return True # Remember that this component has already been # partially visited (important for the case that we hit # the "natural" component later in the generator) @@ -196,11 +212,19 @@ def unique(self, comp, items, are_values): seen_comp_thru_reference_add(_id) # Yield any data objects we haven't seen yet (and # remember them) - _id = id(_data) - if not seen_data_contains(_id): + _id = id(data) + if seen_data_contains(_id): + return True + else: seen_data_add(_id) - yield _item - else: # this is a "natural" component + return False + + if are_values: + return filterfalse(has_been_seen, items) + else: + return filterfalse(lambda item: has_been_seen(item[1]), items) + + else: # this is a "natural" component # Remember that we have completely processed this component _id = id(comp) self.seen_components.add(_id) @@ -208,7 +232,7 @@ def unique(self, comp, items, are_values): # No data in this component has yet been emitted # (through a Reference), so we can just yield all the # values. - yield from items + return items else: # This component has had some data yielded (through # References). We need to check for conflicts before @@ -217,108 +241,11 @@ def unique(self, comp, items, are_values): # not reappear in natural components, we only need to # check for duplicates and not remember them. seen_data_contains = self.seen_data.__contains__ - for _item in items: - if not seen_data_contains(id( - _item if are_values else _item[0])): - yield _item - - -class SortComponents(object): - - """ - This class is a convenient wrapper for specifying various sort - ordering. We pass these objects to the "sort" argument to various - accessors / iterators to control how much work we perform sorting - the resultant list. The idea is that - "sort=SortComponents.deterministic" is more descriptive than - "sort=True". - """ - unsorted = set() - indices = set([1]) - declOrder = set([2]) - declarationOrder = declOrder - alphaOrder = set([3]) - alphabeticalOrder = alphaOrder - alphabetical = alphaOrder - # both alpha and decl orders are deterministic, so only must sort indices - deterministic = indices - sortBoth = indices | alphabeticalOrder # Same as True - alphabetizeComponentAndIndex = sortBoth - - @staticmethod - def default(): - return set() - - @staticmethod - def sorter(sort_by_names=False, sort_by_keys=False): - sort = SortComponents.default() - if sort_by_names: - sort |= SortComponents.alphabeticalOrder - if sort_by_keys: - sort |= SortComponents.indices - return sort - - @staticmethod - def sort_names(flag): - if type(flag) is bool: - return flag - else: - try: - return SortComponents.alphaOrder.issubset(flag) - except: - return False - - @staticmethod - def sort_indices(flag): - if type(flag) is bool: - return flag - else: - try: - return SortComponents.indices.issubset(flag) - except: - return False - - -class TraversalStrategy(object): - BreadthFirstSearch = (1,) - PrefixDepthFirstSearch = (2,) - PostfixDepthFirstSearch = (3,) - # aliases - BFS = BreadthFirstSearch - ParentLastDepthFirstSearch = PostfixDepthFirstSearch - PostfixDFS = PostfixDepthFirstSearch - ParentFirstDepthFirstSearch = PrefixDepthFirstSearch - PrefixDFS = PrefixDepthFirstSearch - DepthFirstSearch = PrefixDepthFirstSearch - DFS = DepthFirstSearch - - -def _sortingLevelWalker(list_of_generators): - """Utility function for iterating over all members of a list of - generators that prefixes each item with the index of the original - generator that produced it. This is useful for creating lists where - we want to preserve the original generator order but want to sort - the sub-lists. - - Note that the generators must produce tuples. - """ - lastName = '' - nameCounter = 0 - for gen in list_of_generators: - nameCounter += 1 # Each generator starts a new component name - for item in gen: - if item[0] != lastName: - nameCounter += 1 - lastName = item[0] - yield (nameCounter,) + item - - -def _levelWalker(list_of_generators): - """Simple utility function for iterating over all members of a list of - generators. - """ - for gen in list_of_generators: - yield from gen + if are_values: + has_been_seen = lambda item: seen_data_contains(id(item)) + else: + has_been_seen = lambda item: seen_data_contains(id(item[1])) + return filterfalse(has_been_seen, items) def _isNotNone(val): @@ -330,10 +257,11 @@ class _BlockConstruction(object): This class holds a "global" dict used when constructing (hierarchical) models. """ + data = {} -class PseudoMap(object): +class PseudoMap(AutoSlots.Mixin): """ This class presents a "mock" dict interface to the internal _BlockData data structures. We return this object to the @@ -352,7 +280,7 @@ def __init__(self, block, ctype, active=None, sort=False): """ self._block = block if isclass(ctype): - self._ctypes = {ctype,} + self._ctypes = {ctype} elif ctype is None: self._ctypes = Any elif ctype.__class__ is SubclassOf: @@ -360,7 +288,7 @@ def __init__(self, block, ctype, active=None, sort=False): else: self._ctypes = set(ctype) self._active = active - self._sorted = SortComponents.sort_names(sort) + self._sorted = SortComponents.ALPHABETICAL in SortComponents(sort) def __iter__(self): """ @@ -382,12 +310,13 @@ def __getitem__(self, key): msg += self._active and "active " or "inactive " if self._ctypes is not Any: if len(self._ctypes) == 1: - msg += next(iter(self._ctypes)).__name__ + " " + msg += next(iter(self._ctypes)).__name__ + ' ' else: types = sorted(x.__name__ for x in self._ctypes) msg += '%s or %s ' % (', '.join(types[:-1]), types[-1]) - raise KeyError("%scomponent '%s' not found in block %s" - % (msg, key, self._block.name)) + raise KeyError( + "%scomponent '%s' not found in block %s" % (msg, key, self._block.name) + ) def __nonzero__(self): """ @@ -424,9 +353,11 @@ def __len__(self): else: # Note that because of SubclassOf, we cannot iterate # over self._ctypes. - return sum(self._block._ctypes[x][2] - for x in self._block._ctypes - if x in self._ctypes) + return sum( + self._block._ctypes[x][2] + for x in self._block._ctypes + if x in self._ctypes + ) # # If _active is True or False, then we have to count by brute force. # @@ -463,12 +394,14 @@ def _ctypewalker(self): if self._ctypes.__class__ is set: _idx_list = [ self._block._ctypes[x][0] - for x in self._ctypes if x in self._block._ctypes + for x in self._ctypes + if x in self._block._ctypes ] else: _idx_list = [ self._block._ctypes[x][0] - for x in self._block._ctypes if x in self._ctypes + for x in self._block._ctypes + if x in self._ctypes ] _idx_list.sort(reverse=True) while _idx_list: @@ -506,8 +439,7 @@ def values(self): if self._ctypes is Any: # If there is no ctype, then we will just iterate over # all components and return them all - walker = filter( - _isNotNone, map(itemgetter(0), self._block._decl_order)) + walker = filter(_isNotNone, map(itemgetter(0), self._block._decl_order)) else: # The user specified a desired ctype; we will leverage # the _ctypewalker generator to walk the underlying linked @@ -540,24 +472,23 @@ def items(self): for obj in self.values(): yield (obj._name, obj) - @deprecated('The iterkeys method is deprecated. Use dict.keys().', - version='6.0') + @deprecated('The iterkeys method is deprecated. Use dict.keys().', version='6.0') def iterkeys(self): """ Generator returning the component names defined on the Block """ return self.keys() - @deprecated('The itervalues method is deprecated. Use dict.values().', - version='6.0') + @deprecated( + 'The itervalues method is deprecated. Use dict.values().', version='6.0' + ) def itervalues(self): """ Generator returning the components defined on the Block """ return self.values() - @deprecated('The iteritems method is deprecated. Use dict.items().', - version='6.0') + @deprecated('The iteritems method is deprecated. Use dict.items().', version='6.0') def iteritems(self): """ Generator returning (name, component) tuples for components @@ -570,8 +501,13 @@ class _BlockData(ActiveComponentData): """ This class holds the fundamental block data. """ + _Block_reserved_words = set() + # If a writer cached a repn on this block, remove it when cloning + # TODO: remove repn caching from the model + __autoslot_mappers = {'_repn': AutoSlots.encode_as_none} + def __init__(self, component): # # BLOCK DATA ELEMENTS @@ -615,35 +551,14 @@ def __init__(self, component): super(_BlockData, self).__setattr__('_decl', {}) super(_BlockData, self).__setattr__('_decl_order', []) - def __getstate__(self): - # Note: _BlockData is NOT slot-ized, so we must pickle the - # entire __dict__. However, we want the base class's - # __getstate__ to override our blanket approach here (i.e., it - # will handle the _component weakref), so we will call the base - # class's __getstate__ and allow it to overwrite the catch-all - # approach we use here. - ans = dict(self.__dict__) - ans.update(super(_BlockData, self).__getstate__()) - # Note sure why we are deleting these... - if '_repn' in ans: - del ans['_repn'] - return ans - - # - # The base class __setstate__ is sufficient (assigning all the - # pickled attributes to the object is appropriate - # - # def __setstate__(self, state): - # pass - def __getattr__(self, val): if val in ModelComponentFactory: - return _component_decorator( - self, ModelComponentFactory.get_class(val)) + return _component_decorator(self, ModelComponentFactory.get_class(val)) # Since the base classes don't support getattr, we can just # throw the "normal" AttributeError - raise AttributeError("'%s' object has no attribute '%s'" - % (self.__class__.__name__, val)) + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__.__name__, val) + ) def __setattr__(self, name, val): """ @@ -689,8 +604,8 @@ def __setattr__(self, name, val): "\nThis is usually indicative of a modelling error.\n" "To avoid this warning, use block.del_component() and " "block.add_component()." - % (name, type(self.component(name)), self.name, - type(val))) + % (name, type(self.component(name)), self.name, type(val)) + ) self.del_component(name) self.add_component(name, val) else: @@ -709,9 +624,9 @@ def __setattr__(self, name, val): except AttributeError: logger.error( "Expected component %s (type=%s) on block %s to have a " - "'set_value' method, but none was found." % - (name, type(self.component(name)), - self.name)) + "'set_value' method, but none was found." + % (name, type(self.component(name)), self.name) + ) raise # # Call the set_value method. @@ -746,7 +661,8 @@ def __setattr__(self, name, val): "Cannot set the '_parent' attribute of Block '%s' " "to a non-Block object (with type=%s); Did you " "try to create a model component named '_parent'?" - % (self.name, type(val))) + % (self.name, type(val)) + ) super(_BlockData, self).__setattr__(name, val) elif name == '_component': if val is not None and not isinstance(val(), _BlockData): @@ -754,7 +670,8 @@ def __setattr__(self, name, val): "Cannot set the '_component' attribute of Block '%s' " "to a non-Block object (with type=%s); Did you " "try to create a model component named '_component'?" - % (self.name, type(val))) + % (self.name, type(val)) + ) super(_BlockData, self).__setattr__(name, val) # # At this point, we should only be seeing non-component data @@ -767,8 +684,8 @@ def __setattr__(self, name, val): "on block (model).%s with a new Component\nwith type %s.\n" "This is usually indicative of a modelling error.\n" "To avoid this warning, explicitly delete the attribute:\n" - " del %s.%s" % ( - name, self.name, type(val), self.name, name)) + " del %s.%s" % (name, self.name, type(val), self.name, name) + ) delattr(self, name) self.add_component(name, val) else: @@ -808,7 +725,7 @@ def _compact_decl_storage(self): j += 1 _new_decl_order.append(entry) # Update the _decl map - self._decl = {k:idxMap[idx] for k,idx in self._decl.items()} + self._decl = {k: idxMap[idx] for k, idx in self._decl.items()} # Update the ctypes, _decl_order linked lists for ctype, info in self._ctypes.items(): idx = info[0] @@ -829,13 +746,16 @@ def _compact_decl_storage(self): self._decl_order = _new_decl_order def set_value(self, val): - raise RuntimeError(textwrap.dedent( - """\ - Block components do not support assignment or set_value(). - Use the transfer_attributes_from() method to transfer the - components and public attributes from one block to another: - model.b[1].transfer_attributes_from(other_block) - """)) + raise RuntimeError( + textwrap.dedent( + """ + Block components do not support assignment or set_value(). + Use the transfer_attributes_from() method to transfer the + components and public attributes from one block to another: + model.b[1].transfer_attributes_from(other_block) + """ + ).strip() + ) def clear(self): for name in self.component_map().keys(): @@ -879,7 +799,8 @@ def transfer_attributes_from(self, src): raise ValueError( "_BlockData.transfer_attributes_from(): Cannot set a " "sub-block (%s) to a parent block (%s): creates a " - "circular hierarchy" % (self, src)) + "circular hierarchy" % (self, src) + ) p_block = p_block.parent_block() # record the components and the non-component objects added # to the block @@ -887,39 +808,41 @@ def transfer_attributes_from(self, src): src_raw_dict = src.__dict__ del_src_comp = src.del_component elif isinstance(src, Mapping): - src_comp_map = {k: v for k, v in src.items() - if isinstance(v, Component)} + src_comp_map = {k: v for k, v in src.items() if isinstance(v, Component)} src_raw_dict = src del_src_comp = lambda x: None else: raise ValueError( "_BlockData.transfer_attributes_from(): expected a " - "Block or dict; received %s" % (type(src).__name__,)) + "Block or dict; received %s" % (type(src).__name__,) + ) if src_comp_map: # Filter out any components from src - src_raw_dict = {k: v for k, v in src_raw_dict.items() - if k not in src_comp_map} + src_raw_dict = { + k: v for k, v in src_raw_dict.items() if k not in src_comp_map + } # Use component_map for the components to preserve decl_order # Note that we will move any reserved components over as well as # any user-defined components. There is a bit of trust here # that the user knows what they are doing. with self._declare_reserved_components(): - for k,v in src_comp_map.items(): + for k, v in src_comp_map.items(): if k in self._decl: self.del_component(k) del_src_comp(k) - self.add_component(k,v) + self.add_component(k, v) # Because Blocks are not slotized and we allow the # assignment of arbitrary data to Blocks, we will move over # any other unrecognized entries in the object's __dict__: for k, v in src_raw_dict.items(): - if ( k not in self._Block_reserved_words # user-defined - or not hasattr(self, k) # reserved, but not present - or k in self._decl # reserved, but a component and the - # incoming thing is data (attempt to - # set the value) + if ( + k not in self._Block_reserved_words # user-defined + or not hasattr(self, k) # reserved, but not present + or k in self._decl # reserved, but a component and the + # incoming thing is data (attempt to + # set the value) ): setattr(self, k, v) @@ -934,28 +857,37 @@ def _add_implicit_sets(self, val): # if _component_sets is not None: for ctr, tset in enumerate(_component_sets): - if tset.parent_component().parent_block() is None \ - and not isinstance(tset.parent_component(), GlobalSetBase): + if tset.parent_component().parent_block() is None and not isinstance( + tset.parent_component(), GlobalSetBase + ): self.add_component("%s_index_%d" % (val.local_name, ctr), tset) - if getattr(val, '_index_set', None) is not None \ - and isinstance(val._index_set, _SetDataBase) \ - and val._index_set.parent_component().parent_block() is None \ - and not isinstance(val._index_set.parent_component(), GlobalSetBase): - self.add_component("%s_index" % (val.local_name,), val._index_set.parent_component()) - if getattr(val, 'initialize', None) is not None \ - and isinstance(val.initialize, _SetDataBase) \ - and val.initialize.parent_component().parent_block() is None \ - and not isinstance(val.initialize.parent_component(), GlobalSetBase): - self.add_component("%s_index_init" % (val.local_name,), val.initialize.parent_component()) - if getattr(val, 'domain', None) is not None \ - and isinstance(val.domain, _SetDataBase) \ - and val.domain.parent_block() is None \ - and not isinstance(val.domain, GlobalSetBase): + if ( + getattr(val, '_index_set', None) is not None + and isinstance(val._index_set, _SetDataBase) + and val._index_set.parent_component().parent_block() is None + and not isinstance(val._index_set.parent_component(), GlobalSetBase) + ): + self.add_component( + "%s_index" % (val.local_name,), val._index_set.parent_component() + ) + if ( + getattr(val, 'initialize', None) is not None + and isinstance(val.initialize, _SetDataBase) + and val.initialize.parent_component().parent_block() is None + and not isinstance(val.initialize.parent_component(), GlobalSetBase) + ): + self.add_component( + "%s_index_init" % (val.local_name,), val.initialize.parent_component() + ) + if ( + getattr(val, 'domain', None) is not None + and isinstance(val.domain, _SetDataBase) + and val.domain.parent_block() is None + and not isinstance(val.domain, GlobalSetBase) + ): self.add_component("%s_domain" % (val.local_name,), val.domain) - def collect_ctypes(self, - active=None, - descend_into=True): + def collect_ctypes(self, active=None, descend_into=True): """ Count all component types stored on or under this block. @@ -974,19 +906,20 @@ def collect_ctypes(self, """ assert active in (True, None) ctypes = set() - for block in self.block_data_objects(active=active, - descend_into=descend_into, - sort=SortComponents.unsorted): + for block in self.block_data_objects( + active=active, descend_into=descend_into, sort=SortComponents.UNSORTED + ): if active is None: ctypes.update(block._ctypes) else: assert active is True for ctype in block._ctypes: for component in block.component_data_objects( - ctype=ctype, - active=True, - descend_into=False, - sort=SortComponents.unsorted): + ctype=ctype, + active=True, + descend_into=False, + sort=SortComponents.UNSORTED, + ): # We only need to verify that there is at least # one active data member ctypes.add(ctype) @@ -1063,16 +996,19 @@ def add_component(self, name, val): # if not val.valid_model_component(): raise RuntimeError( - "Cannot add '%s' as a component to a block" % str(type(val))) + "Cannot add '%s' as a component to a block" % str(type(val)) + ) if name in self._Block_reserved_words: - raise ValueError("Attempting to declare a block component using " - "the name of a reserved attribute:\n\t%s" - % (name,)) + raise ValueError( + "Attempting to declare a block component using " + "the name of a reserved attribute:\n\t%s" % (name,) + ) if name in self.__dict__: raise RuntimeError( "Cannot add component '%s' (type %s) to block '%s': a " "component by that name (type %s) is already defined." - % (name, type(val), self.name, type(getattr(self, name)))) + % (name, type(val), self.name, type(getattr(self, name))) + ) # # Skip the add_component() logic if this is a # component type that is suppressed. @@ -1088,20 +1024,30 @@ def add_component(self, name, val): if val._parent() is self: msg = """ Attempting to re-assign the component '%s' to the same -block under a different name (%s).""" % (val.name, name) +block under a different name (%s).""" % ( + val.name, + name, + ) else: msg = """ Re-assigning the component '%s' from block '%s' to -block '%s' as '%s'.""" % (val._name, val._parent().name, - self.name, name) +block '%s' as '%s'.""" % ( + val._name, + val._parent().name, + self.name, + name, + ) - raise RuntimeError("""%s + raise RuntimeError( + """%s This behavior is not supported by Pyomo; components must have a single owning block (or model), and a component may not appear multiple times in a block. If you want to re-name or move this component, use the block del_component() and add_component() methods. -""" % (msg.strip(),)) +""" + % (msg.strip(),) + ) # # If the new component is a Block, then there is the chance that # it is the model(), and assigning it would create a circular @@ -1112,8 +1058,8 @@ def add_component(self, name, val): if isinstance(val, Block) and val is self.model(): raise ValueError( "Cannot assign the top-level block as a subblock of one of " - "its children (%s): creates a circular hierarchy" - % (self,)) + "its children (%s): creates a circular hierarchy" % (self,) + ) # # Set the name and parent pointer of this component. # @@ -1183,8 +1129,9 @@ def add_component(self, name, val): """As of Pyomo 4.0, Pyomo components no longer support implicit rules. You defined a component (%s) that appears to rely on an implicit rule (%s). -Components must now specify their rules explicitly using 'rule=' keywords.""" % - (val.name, _test)) +Components must now specify their rules explicitly using 'rule=' keywords.""" + % (val.name, _test) + ) # # Don't reconstruct if this component has already been constructed. # This allows a user to move a component from one block to @@ -1226,19 +1173,25 @@ def add_component(self, name, val): try: _blockName = "Block '%s'" % self.name except: - _blockName = "Block '%s[...]'" \ - % self.parent_component().name - logger.debug("Constructing %s '%s' on %s from data=%s", - val.__class__.__name__, name, - _blockName, str(data)) + _blockName = "Block '%s[...]'" % self.parent_component().name + logger.debug( + "Constructing %s '%s' on %s from data=%s", + val.__class__.__name__, + name, + _blockName, + str(data), + ) try: val.construct(data) except: err = sys.exc_info()[1] logger.error( "Constructing component '%s' from data=%s failed:\n%s: %s", - str(val.name), str(data).strip(), - type(err).__name__, err) + str(val.name), + str(data).strip(), + type(err).__name__, + err, + ) raise if generate_debug_messages: if _blockName[-1] == "'": @@ -1247,8 +1200,9 @@ def add_component(self, name, val): _blockName = "'" + _blockName + '.' + name + "'" _out = StringIO() val.pprint(ostream=_out) - logger.debug("Constructed component '%s':\n%s" - % (_blockName, _out.getvalue())) + logger.debug( + "Constructed component '%s':\n%s" % (_blockName, _out.getvalue()) + ) def del_component(self, name_or_object): """ @@ -1266,8 +1220,8 @@ def del_component(self, name_or_object): name = obj.local_name if name in self._Block_reserved_words: raise ValueError( - "Attempting to delete a reserved block component:\n\t%s" - % (obj.name,)) + "Attempting to delete a reserved block component:\n\t%s" % (obj.name,) + ) # Replace the component in the master list with a None placeholder idx = self._decl[name] @@ -1286,15 +1240,16 @@ def del_component(self, name_or_object): # Now that this component is not in the _decl map, we can call # delattr as usual. # - #del self.__dict__[name] + # del self.__dict__[name] # # Note: 'del self.__dict__[name]' is inappropriate here. The # correct way to add the attribute is to delegate the work to # the next class up the MRO. super(_BlockData, self).__delattr__(name) - def reclassify_component_type(self, name_or_object, new_ctype, - preserve_declaration_order=True): + def reclassify_component_type( + self, name_or_object, new_ctype, preserve_declaration_order=True + ): """ TODO """ @@ -1332,8 +1287,10 @@ def reclassify_component_type(self, name_or_object, new_ctype, prev = tmp tmp = self._decl_order[tmp][1] - self._decl_order[prev] = (self._decl_order[prev][0], - self._decl_order[idx][1]) + self._decl_order[prev] = ( + self._decl_order[prev][0], + self._decl_order[idx][1], + ) if ctype_info[1] == idx: ctype_info[1] = prev @@ -1364,7 +1321,7 @@ def reclassify_component_type(self, name_or_object, new_ctype, self._decl_order[prev] = (self._decl_order[prev][0], idx) self._decl_order[idx] = (obj, tmp) - def clone(self): + def clone(self, memo=None): """ TODO """ @@ -1381,22 +1338,23 @@ def clone(self): # NonNegativeReals, etc) that are not "owned" by any blocks and # should be preserved as singletons. # - try: - new_block = copy.deepcopy( - self, { - '__block_scope__': {id(self): True, id(None): False}, - '__paranoid__': False, - }) - except: - new_block = copy.deepcopy( - self, { - '__block_scope__': {id(self): True, id(None): False}, - '__paranoid__': True, - }) + pc = self.parent_component() + if pc is self: + parent = self.parent_block() + else: + parent = pc + + if memo is None: + memo = {} + memo['__block_scope__'] = {id(self): True, id(None): False} + memo[id(parent)] = parent + + with PauseGC(): + new_block = copy.deepcopy(self, memo) # We need to "detangle" the new block from the original block # hierarchy - if self.parent_component() is self: + if pc is self: new_block._parent = None else: new_block._component = None @@ -1511,7 +1469,6 @@ def _component_data_iteritems(self, ctype, active, sort, dedup): dedup: _DeduplicateInfo Deduplicator to prevent returning the same _ComponentData twice """ - _sort_indices = SortComponents.sort_indices(sort) for name, comp in PseudoMap(self, ctype, active, sort).items(): # NOTE: Suffix has a dict interface (something other derived # non-indexed Components may do as well), so we don't want @@ -1521,9 +1478,7 @@ def _component_data_iteritems(self, ctype, active, sort, dedup): # processing for the scalar components to catch the case # where there are "sparse scalar components" if comp.is_indexed(): - _items = comp.items() - if _sort_indices: - _items = sorted_robust(_items, key=itemgetter(0)) + _items = comp.items(sort) elif hasattr(comp, '_data'): # This is a Scalar component, which may be empty (e.g., # from Constraint.Skip on a scalar Constraint). Only @@ -1539,8 +1494,11 @@ def _component_data_iteritems(self, ctype, active, sort, dedup): if active is None or not isinstance(comp, ActiveIndexedComponent): _items = (((name, idx), compData) for idx, compData in _items) else: - _items = (((name, idx), compData) for idx, compData in _items - if compData.active == active) + _items = ( + ((name, idx), compData) + for idx, compData in _items + if compData.active == active + ) yield from dedup.unique(comp, _items, False) @@ -1562,15 +1520,6 @@ def _component_data_itervalues(self, ctype, active, sort, dedup): dedup: _DeduplicateInfo Deduplicator to prevent returning the same _ComponentData twice """ - if SortComponents.sort_indices(sort): - # We need the indices so that we can correctly sort. Fall - # back on _component_data_iteritems. - yield from map( - itemgetter(1), - self._component_data_iteritems(ctype, active, sort, dedup) - ) - return - for comp in PseudoMap(self, ctype, active, sort).values(): # NOTE: Suffix has a dict interface (something other derived # non-indexed Components may do as well), so we don't want @@ -1580,7 +1529,7 @@ def _component_data_itervalues(self, ctype, active, sort, dedup): # processing for the scalar components to catch the case # where there are "sparse scalar components" if comp.is_indexed(): - _values = comp.values() + _values = comp.values(sort) elif hasattr(comp, '_data'): # This is a Scalar component, which may be empty (e.g., # from Constraint.Skip on a scalar Constraint). Only @@ -1592,53 +1541,62 @@ def _component_data_itervalues(self, ctype, active, sort, dedup): _values = (comp,) if active is not None and isinstance(comp, ActiveIndexedComponent): - _values = filter(lambda cDat: cDat.active == active, _values) + _values = (filter if active else filterfalse)( + attrgetter('active'), _values + ) yield from dedup.unique(comp, _values, True) - @deprecated("The all_components method is deprecated. " - "Use the Block.component_objects() method.", - version="4.1.10486") + @deprecated( + "The all_components method is deprecated. " + "Use the Block.component_objects() method.", + version="4.1.10486", + ) def all_components(self, *args, **kwargs): return self.component_objects(*args, **kwargs) - @deprecated("The active_components method is deprecated. " - "Use the Block.component_objects() method.", - version="4.1.10486") + @deprecated( + "The active_components method is deprecated. " + "Use the Block.component_objects() method.", + version="4.1.10486", + ) def active_components(self, *args, **kwargs): kwargs['active'] = True return self.component_objects(*args, **kwargs) - @deprecated("The all_component_data method is deprecated. " - "Use the Block.component_data_objects() method.", - version="4.1.10486") + @deprecated( + "The all_component_data method is deprecated. " + "Use the Block.component_data_objects() method.", + version="4.1.10486", + ) def all_component_data(self, *args, **kwargs): return self.component_data_objects(*args, **kwargs) - @deprecated("The active_component_data method is deprecated. " - "Use the Block.component_data_objects() method.", - version="4.1.10486") + @deprecated( + "The active_component_data method is deprecated. " + "Use the Block.component_data_objects() method.", + version="4.1.10486", + ) def active_component_data(self, *args, **kwargs): kwargs['active'] = True return self.component_data_objects(*args, **kwargs) - def component_objects(self, ctype=None, active=None, sort=False, - descend_into=True, descent_order=None): + def component_objects( + self, ctype=None, active=None, sort=False, descend_into=True, descent_order=None + ): """ Return a generator that iterates through the component objects in a block. By default, the generator recursively descends into sub-blocks. """ for _block in self.block_data_objects( - active, sort, descend_into, descent_order): + active, sort, descend_into, descent_order + ): yield from _block.component_map(ctype, active, sort).values() - def component_data_objects(self, - ctype=None, - active=None, - sort=False, - descend_into=True, - descent_order=None): + def component_data_objects( + self, ctype=None, active=None, sort=False, descend_into=True, descent_order=None + ): """ Return a generator that iterates through the component data objects for all components in a @@ -1647,16 +1605,19 @@ def component_data_objects(self, """ dedup = _DeduplicateInfo() for _block in self.block_data_objects( - active, sort, descend_into, descent_order): - yield from _block._component_data_itervalues( - ctype, active, sort, dedup) - - def component_data_iterindex(self, - ctype=None, - active=None, - sort=False, - descend_into=True, - descent_order=None): + active, sort, descend_into, descent_order + ): + yield from _block._component_data_itervalues(ctype, active, sort, dedup) + + @deprecated( + "The component_data_iterindex method is deprecated. " + "Components now know their index, so it is more efficient to use the " + "Block.component_data_objects() method followed by .index().", + version="6.6.0", + ) + def component_data_iterindex( + self, ctype=None, active=None, sort=False, descend_into=True, descent_order=None + ): """ Return a generator that returns a tuple for each component data object in a block. By default, this @@ -1668,36 +1629,39 @@ def component_data_iterindex(self, """ dedup = _DeduplicateInfo() for _block in self.block_data_objects( - active, sort, descend_into, descent_order): - yield from _block._component_data_iteritems( - ctype, active, sort, dedup) - - @deprecated("The all_blocks method is deprecated. " - "Use the Block.block_data_objects() method.", - version="4.1.10486") + active, sort, descend_into, descent_order + ): + yield from _block._component_data_iteritems(ctype, active, sort, dedup) + + @deprecated( + "The all_blocks method is deprecated. " + "Use the Block.block_data_objects() method.", + version="4.1.10486", + ) def all_blocks(self, *args, **kwargs): return self.block_data_objects(*args, **kwargs) - @deprecated("The active_blocks method is deprecated. " - "Use the Block.block_data_objects() method.", - version="4.1.10486") + @deprecated( + "The active_blocks method is deprecated. " + "Use the Block.block_data_objects() method.", + version="4.1.10486", + ) def active_blocks(self, *args, **kwargs): kwargs['active'] = True return self.block_data_objects(*args, **kwargs) - def block_data_objects(self, - active=None, - sort=False, - descend_into=True, - descent_order=None): - """Generator returning this block and any matching sub-blocks. + def block_data_objects( + self, active=None, sort=False, descend_into=True, descent_order=None + ): + """Returns this block and any matching sub-blocks. This is roughly equivalent to + .. code-block:: python + iter(block for block in itertools.chain( - [self], self.component_data_objects(descend_into, ...)) - if block.active == active - ) + [self], self.component_data_objects(descend_into, ...)) + if block.active == active) Notes ----- @@ -1723,6 +1687,10 @@ def block_data_objects(self, The strategy used to walk the block hierarchy. Defaults to `TraversalStrategy.PrefixDepthFirstSearch`. + Returns + ------- + tuple or generator + """ # TODO: we should determine if that is desirable behavior(it is # historical, so there are backwards compatibility arguments to @@ -1730,10 +1698,9 @@ def block_data_objects(self, # component_data_objects, it might be desirable to always return # self. if active is not None and self.active != active: - return + return () if not descend_into: - yield self - return + return (self,) if descend_into is True: ctype = (Block,) @@ -1743,18 +1710,18 @@ def block_data_objects(self, ctype = descend_into dedup = _DeduplicateInfo() - if descent_order is None or \ - descent_order == TraversalStrategy.PrefixDepthFirstSearch: + if ( + descent_order is None + or descent_order == TraversalStrategy.PrefixDepthFirstSearch + ): walker = self._prefix_dfs_iterator(ctype, active, sort, dedup) elif descent_order == TraversalStrategy.BreadthFirstSearch: walker = self._bfs_iterator(ctype, active, sort, dedup) elif descent_order == TraversalStrategy.PostfixDepthFirstSearch: walker = self._postfix_dfs_iterator(ctype, active, sort, dedup) else: - raise RuntimeError("unrecognized traversal strategy: %s" - % (descent_order, )) - yield from walker - + raise RuntimeError("unrecognized traversal strategy: %s" % (descent_order,)) + return walker def _prefix_dfs_iterator(self, ctype, active, sort, dedup): """Helper function implementing a non-recursive prefix order @@ -1770,18 +1737,19 @@ def _prefix_dfs_iterator(self, ctype, active, sort, dedup): dedup.seen_data.add(id(self)) PM = PseudoMap(self, ctype, active, sort) - _stack = [(self,).__iter__(), ] - while _stack: + _stack = (None, (self,).__iter__()) + while _stack is not None: try: - PM._block = _block = next(_stack[-1]) + PM._block = _block = next(_stack[1]) yield _block if not PM: continue - _stack.append(_block._component_data_itervalues( - ctype, active, sort, dedup) + _stack = ( + _stack, + _block._component_data_itervalues(ctype, active, sort, dedup), ) except StopIteration: - _stack.pop() + _stack = _stack[0] def _postfix_dfs_iterator(self, ctype, active, sort, dedup): """ @@ -1797,18 +1765,22 @@ def _postfix_dfs_iterator(self, ctype, active, sort, dedup): # the list of "seen" IDs dedup.seen_data.add(id(self)) - _stack = [ - (self, self._component_data_itervalues(ctype, active, sort, dedup)) - ] - while _stack: + _stack = ( + None, + self, + self._component_data_itervalues(ctype, active, sort, dedup), + ) + while _stack is not None: try: - _sub = next(_stack[-1][1]) - _stack.append(( + _sub = next(_stack[2]) + _stack = ( + _stack, _sub, - _sub._component_data_itervalues(ctype, active, sort, dedup) - )) + _sub._component_data_itervalues(ctype, active, sort, dedup), + ) except StopIteration: - yield _stack.pop()[0] + yield _stack[1] + _stack = _stack[0] def _bfs_iterator(self, ctype, active, sort, dedup): """Helper function implementing a non-recursive breadth-first search. @@ -1824,38 +1796,15 @@ def _bfs_iterator(self, ctype, active, sort, dedup): # the list of "seen" IDs dedup.seen_data.add(id(self)) - if SortComponents.sort_indices(sort): - if SortComponents.sort_names(sort): - sorter = itemgetter(1, 2) - else: - sorter = itemgetter(0, 2) - elif SortComponents.sort_names(sort): - sorter = itemgetter(1) - else: - sorter = None - - _levelQueue = {0: (((None, None, self,),),)} - while _levelQueue: - _level = min(_levelQueue) - _queue = _levelQueue.pop(_level) - if not _queue: - break - if sorter is None: - _queue = _levelWalker(_queue) - else: - _queue = sorted(_sortingLevelWalker(_queue), key=sorter) - - _level += 1 - _levelQueue[_level] = [] - # JDS: rework the _levelQueue logic so we don't need to - # merge the key/value returned by the new - # component_data_iterindex() method. - for _items in _queue: - yield _items[-1] # _block - _levelQueue[_level].append( - tmp[0] + (tmp[1],) for tmp in - _items[-1]._component_data_iteritems( - ctype, active, sort, dedup) + _thisLevel = None + _nextLevel = [(self,)] + while _nextLevel: + _thisLevel = _nextLevel + _nextLevel = [] + for block in chain(*_thisLevel): + yield block + _nextLevel.append( + block._component_data_itervalues(ctype, active, sort, dedup) ) def fix_all_vars(self): @@ -1891,6 +1840,7 @@ def _pprint_blockdata_components(self, ostream): # that expected by a user. # import pyomo.core.base.component_order + items = list(pyomo.core.base.component_order.items) items_set = set(items) items_set.add(Block) @@ -1915,8 +1865,7 @@ def _pprint_blockdata_components(self, ostream): # # NOTE: these conditional checks should not be hard-coded. # - ostream.write("%d %s Declarations\n" - % (len(keys), item.__name__)) + ostream.write("%d %s Declarations\n" % (len(keys), item.__name__)) for key in keys: self.component(key).pprint(ostream=indented_ostream) ostream.write("\n") @@ -1924,9 +1873,10 @@ def _pprint_blockdata_components(self, ostream): # Model Order # decl_order_keys = list(self.component_map().keys()) - ostream.write("%d Declarations: %s\n" - % (len(decl_order_keys), - ' '.join(str(x) for x in decl_order_keys))) + ostream.write( + "%d Declarations: %s\n" + % (len(decl_order_keys), ' '.join(str(x) for x in decl_order_keys)) + ) def display(self, filename=None, ostream=None, prefix=""): """ @@ -1950,10 +1900,13 @@ def display(self, filename=None, ostream=None, prefix=""): # case for blocks below. I am not implementing this now as it # would break tests just before a release. [JDS 1/7/15] import pyomo.core.base.component_order + for item in pyomo.core.base.component_order.display_items: # ostream.write(prefix + "\n") - ostream.write(prefix + " %s:\n" % pyomo.core.base.component_order.display_name[item]) + ostream.write( + prefix + " %s:\n" % pyomo.core.base.component_order.display_name[item] + ) ACTIVE = self.component_map(item, active=True) if not ACTIVE: ostream.write(prefix + " None\n") @@ -1966,8 +1919,8 @@ def display(self, filename=None, ostream=None, prefix=""): if ACTIVE: ostream.write(prefix + "\n") ostream.write( - prefix + " %s:\n" % - pyomo.core.base.component_order.display_name[item]) + prefix + " %s:\n" % pyomo.core.base.component_order.display_name[item] + ) for obj in ACTIVE.values(): obj.display(prefix=prefix + " ", ostream=ostream) @@ -1981,11 +1934,7 @@ def valid_problem_types(self): Model object.""" return [ProblemFormat.pyomo] - def write(self, - filename=None, - format=None, - solver_capability=None, - io_options={}): + def write(self, filename=None, format=None, solver_capability=None, io_options={}): """ Write the model to a file, with a given format. """ @@ -2010,27 +1959,28 @@ def write(self, raise ValueError( "Could not infer file format from file name '%s'.\n" "Either provide a name with a recognized extension " - "or specify the format using the 'format' argument." - % filename) + "or specify the format using the 'format' argument." % filename + ) else: format = _format elif format != _format and _format is not None: logger.warning( "Filename '%s' likely does not match specified " - "file format (%s)" % (filename, format)) + "file format (%s)" % (filename, format) + ) problem_writer = WriterFactory(format) if problem_writer is None: raise ValueError( "Cannot write model in format '%s': no model " - "writer registered for that format" - % str(format)) + "writer registered for that format" % str(format) + ) if solver_capability is None: - def solver_capability(x): return True - (filename, smap) = problem_writer(self, - filename, - solver_capability, - io_options) + + def solver_capability(x): + return True + + (filename, smap) = problem_writer(self, filename, solver_capability, io_options) smap_id = id(smap) if not hasattr(self, 'solutions'): # This is a bit of a hack. The write() method was moved @@ -2042,6 +1992,7 @@ def solver_capability(x): return True # dependency (we only need it here because we store the # SymbolMap returned by the writer in the solutions). from pyomo.core.base.PyomoModel import ModelSolutions + self.solutions = ModelSolutions(self) self.solutions.add_symbol_map(smap) @@ -2050,21 +2001,28 @@ def solver_capability(x): return True "Writing model '%s' to file '%s' with format %s", self.name, str(filename), - str(format)) + str(format), + ) return filename, smap_id def _create_objects_for_deepcopy(self, memo, component_list): - super()._create_objects_for_deepcopy(memo, component_list) - # Blocks (and block-like things) need to pre-populate all - # Components / ComponentData objects to help prevent deepcopy() - # from violating the Python recursion limit. This step is - # recursive; however, we do not expect "super deep" Pyomo block - # hierarchies, so should be okay. - for comp in self.component_objects(descend_into=False): - comp._create_objects_for_deepcopy(memo, component_list) - - -@ModelComponentFactory.register("A component that contains one or more model components.") + _new = self.__class__.__new__(self.__class__) + _ans = memo.setdefault(id(self), _new) + if _ans is _new: + component_list.append(self) + # Blocks (and block-like things) need to pre-populate all + # Components / ComponentData objects to help prevent + # deepcopy() from violating the Python recursion limit. + # This step is recursive; however, we do not expect "super + # deep" Pyomo block hierarchies, so should be okay. + for comp in self.component_map().values(): + comp._create_objects_for_deepcopy(memo, component_list) + return _ans + + +@ModelComponentFactory.register( + "A component that contains one or more model components." +) class Block(ActiveIndexedComponent): """ Blocks are indexed components that contain other components @@ -2087,8 +2045,10 @@ def __new__(cls, *args, **kwds): # `options` is ignored since it is deprecated @overload - def __init__(self, *indexes, rule=None, concrete=False, dense=True, - name=None, doc=None): ... + def __init__( + self, *indexes, rule=None, concrete=False, dense=True, name=None, doc=None + ): + ... def __init__(self, *args, **kwargs): """Constructor""" @@ -2108,13 +2068,19 @@ def __init__(self, *args, **kwargs): "The Block 'options=' keyword is deprecated. " "Equivalent functionality can be obtained by wrapping " "the rule function to add the options dictionary to " - "the function arguments", version='5.7.2') + "the function arguments", + version='5.7.2', + ) if self.is_indexed(): + def rule_wrapper(model, *_idx): return _rule(model, *_idx, **_options) + else: + def rule_wrapper(model): return _rule(model, **_options) + self._rule = Initializer(rule_wrapper) else: self._rule = Initializer(_rule) @@ -2165,8 +2131,12 @@ def construct(self, data=None): Initialize the block """ if is_debug_set(logger): - logger.debug("Constructing %s '%s', from data=%s", - self.__class__.__name__, self.name, str(data)) + logger.debug( + "Constructing %s '%s', from data=%s", + self.__class__.__name__, + self.name, + str(data), + ) if self._constructed: return timer = ConstructionTimer(self) @@ -2190,7 +2160,8 @@ def construct(self, data=None): if self.is_indexed(): # We can only populate Blocks with finite indexing sets if self.index_set().isfinite() and ( - self._dense or self._rule is not None): + self._dense or self._rule is not None + ): for _idx in self.index_set(): # Trigger population & call the rule self._getitem_when_not_present(_idx) @@ -2264,7 +2235,6 @@ def display(self, filename=None, ostream=None, prefix=""): class ScalarBlock(_BlockData, Block): - def __init__(self, *args, **kwds): _BlockData.__init__(self, component=self) Block.__init__(self, *args, **kwds) @@ -2285,7 +2255,6 @@ class SimpleBlock(metaclass=RenamedClass): class IndexedBlock(Block): - def __init__(self, *args, **kwds): Block.__init__(self, *args, **kwds) @@ -2293,39 +2262,50 @@ def __init__(self, *args, **kwds): # # Deprecated functions. # -@deprecated("generate_cuid_names() is deprecated. " - "Use the ComponentUID.generate_cuid_string_map() static method", - version="5.7.2") +@deprecated( + "generate_cuid_names() is deprecated. " + "Use the ComponentUID.generate_cuid_string_map() static method", + version="5.7.2", +) def generate_cuid_names(block, ctype=None, descend_into=True): return ComponentUID.generate_cuid_string_map(block, ctype, descend_into) -@deprecated("The active_components function is deprecated. " - "Use the Block.component_objects() method.", - version="4.1.10486") + +@deprecated( + "The active_components function is deprecated. " + "Use the Block.component_objects() method.", + version="4.1.10486", +) def active_components(block, ctype, sort_by_names=False, sort_by_keys=False): return block.component_objects(ctype, active=True, sort=sort_by_names) -@deprecated("The components function is deprecated. " - "Use the Block.component_objects() method.", - version="4.1.10486") +@deprecated( + "The components function is deprecated. " + "Use the Block.component_objects() method.", + version="4.1.10486", +) def components(block, ctype, sort_by_names=False, sort_by_keys=False): return block.component_objects(ctype, active=False, sort=sort_by_names) -@deprecated("The active_components_data function is deprecated. " - "Use the Block.component_data_objects() method.", - version="4.1.10486") -def active_components_data(block, ctype, - sort=None, sort_by_keys=False, sort_by_names=False): +@deprecated( + "The active_components_data function is deprecated. " + "Use the Block.component_data_objects() method.", + version="4.1.10486", +) +def active_components_data( + block, ctype, sort=None, sort_by_keys=False, sort_by_names=False +): return block.component_data_objects(ctype=ctype, active=True, sort=sort) -@deprecated("The components_data function is deprecated. " - "Use the Block.component_data_objects() method.", - version="4.1.10486") -def components_data(block, ctype, - sort=None, sort_by_keys=False, sort_by_names=False): +@deprecated( + "The components_data function is deprecated. " + "Use the Block.component_data_objects() method.", + version="4.1.10486", +) +def components_data(block, ctype, sort=None, sort_by_keys=False, sort_by_names=False): return block.component_data_objects(ctype=ctype, active=False, sort=sort) @@ -2337,15 +2317,13 @@ def components_data(block, ctype, class _IndexedCustomBlockMeta(type): - """Metaclass for creating an indexed custom block. - """ + """Metaclass for creating an indexed custom block.""" pass class _ScalarCustomBlockMeta(type): - """Metaclass for creating a scalar custom block. - """ + """Metaclass for creating a scalar custom block.""" def __new__(meta, name, bases, dct): def __init__(self, *args, **kwargs): @@ -2360,39 +2338,30 @@ def __init__(self, *args, **kwargs): class CustomBlock(Block): - """ The base class used by instances of custom block components - """ + """The base class used by instances of custom block components""" def __init__(self, *args, **kwds): if self._default_ctype is not None: kwds.setdefault('ctype', self._default_ctype) Block.__init__(self, *args, **kwds) - def __new__(cls, *args, **kwds): - if cls.__name__.startswith('_Indexed') or \ - cls.__name__.startswith('_Scalar'): + if cls.__name__.startswith('_Indexed') or cls.__name__.startswith('_Scalar'): # we are entering here the second time (recursive) # therefore, we need to create what we have return super(CustomBlock, cls).__new__(cls) if not args or (args[0] is UnindexedComponent_set and len(args) == 1): n = _ScalarCustomBlockMeta( - "_Scalar%s" % (cls.__name__,), - (cls._ComponentDataClass, cls), - {} + "_Scalar%s" % (cls.__name__,), (cls._ComponentDataClass, cls), {} ) return n.__new__(n) else: - n = _IndexedCustomBlockMeta( - "_Indexed%s" % (cls.__name__,), - (cls,), - {} - ) + n = _IndexedCustomBlockMeta("_Indexed%s" % (cls.__name__,), (cls,), {}) return n.__new__(n) def declare_custom_block(name, new_ctype=None): - """ Decorator to declare components for a custom block data class + """Decorator to declare components for a custom block data class >>> @declare_custom_block(name=FooBlock) ... class FooBlockData(_BlockData): @@ -2425,8 +2394,10 @@ def proc_dec(cls): elif type(new_ctype) is type: c._default_ctype = new_ctype else: - raise ValueError("Expected new_ctype to be either type " - "or 'True'; received: %s" % (new_ctype,)) + raise ValueError( + "Expected new_ctype to be either type " + "or 'True'; received: %s" % (new_ctype,) + ) # Register the new Block type in the same module as the BlockData setattr(sys.modules[cls.__module__], name, c) @@ -2440,4 +2411,3 @@ def proc_dec(cls): return cls return proc_dec - diff --git a/pyomo/core/base/blockutil.py b/pyomo/core/base/blockutil.py index 446f9cc71e9..21e6ac4db90 100644 --- a/pyomo/core/base/blockutil.py +++ b/pyomo/core/base/blockutil.py @@ -10,7 +10,7 @@ # ___________________________________________________________________________ # the purpose of this file is to collect all utility methods that compute -# attributes of blocks, based on their contents. +# attributes of blocks, based on their contents. __all__ = ['has_discrete_variables'] @@ -18,8 +18,8 @@ from pyomo.core.base import Var -@deprecated("This function has been moved to `pyomo.util.blockutil`", - version='5.6.9') +@deprecated("This function has been moved to `pyomo.util.blockutil`", version='5.6.9') def has_discrete_variables(block): from pyomo.util.blockutil import has_discrete_variables + return has_discrete_variables(block) diff --git a/pyomo/core/base/boolean_var.py b/pyomo/core/base/boolean_var.py index fc61b7a0431..e2aebb4e466 100644 --- a/pyomo/core/base/boolean_var.py +++ b/pyomo/core/base/boolean_var.py @@ -12,18 +12,17 @@ import logging from weakref import ref as weakref_ref, ReferenceType -from pyomo.common.deprecation import RenamedClass +from pyomo.common.deprecation import deprecation_warning, RenamedClass from pyomo.common.log import is_debug_set -from pyomo.common.timing import ConstructionTimer from pyomo.common.modeling import unique_component_name, NOTSET -from pyomo.common.deprecation import deprecation_warning +from pyomo.common.timing import ConstructionTimer from pyomo.core.staleflag import StaleFlagManager from pyomo.core.expr.boolean_value import BooleanValue +from pyomo.core.expr import GetItemExpression from pyomo.core.expr.numvalue import value from pyomo.core.base.component import ComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index -from pyomo.core.base.indexed_component import (IndexedComponent, - UnindexedComponent_set) +from pyomo.core.base.indexed_component import IndexedComponent, UnindexedComponent_set from pyomo.core.base.misc import apply_indexed_rule from pyomo.core.base.set import Set, BooleanSet, Binary from pyomo.core.base.util import is_functor @@ -34,6 +33,7 @@ _logical_var_types = {bool, type(None)} + class _DeprecatedImplicitAssociatedBinaryVariable(object): __slots__ = ('_boolvar',) @@ -42,27 +42,31 @@ def __init__(self, boolvar): def __call__(self): deprecation_warning( - "Relying on core.logical_to_linear to transform " - "BooleanVars that do not appear in LogicalConstraints " - "is deprecated. Please associate your own binaries if " - "you have BooleanVars not used in logical expressions.", - version='6.2') + "Relying on core.logical_to_linear to transform " + "BooleanVars that do not appear in LogicalConstraints " + "is deprecated. Please associate your own binaries if " + "you have BooleanVars not used in logical expressions.", + version='6.2', + ) parent_block = self._boolvar().parent_block() new_var = Var(domain=Binary) parent_block.add_component( - unique_component_name(parent_block, - self._boolvar().local_name + "_asbinary"), - new_var) + unique_component_name( + parent_block, self._boolvar().local_name + "_asbinary" + ), + new_var, + ) self._boolvar()._associated_binary = None self._boolvar().associate_binary_var(new_var) return new_var def __getstate__(self): - return {'_boolvar': self._boolvar()} + return self._boolvar() def __setstate__(self, state): - self._boolvar = weakref_ref(state['_boolvar']) + self._boolvar = weakref_ref(state) + class _BooleanVarData(ComponentData, BooleanValue): """ @@ -79,11 +83,11 @@ class _BooleanVarData(ComponentData, BooleanValue): other interrogation. value The numeric value of this variable. """ + __slots__ = () def __init__(self, component=None): - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET def is_fixed(self): @@ -114,8 +118,9 @@ def set_value(self, val, skip_validation=False): # name of efficiency. if val.__class__ not in _logical_var_types: if not skip_validation: - logger.warning("implicitly casting '%s' value %s to bool" - % (self.name, val)) + logger.warning( + "implicitly casting '%s' value %s to bool" % (self.name, val) + ) val = bool(val) self._value = val self._stale = StaleFlagManager.get_flag(self._stale) @@ -123,7 +128,6 @@ def set_value(self, val, skip_validation=False): def clear(self): self.value = None - def __call__(self, exception=True): """Compute the value of this variable.""" return self.value @@ -161,7 +165,7 @@ def fix(self, value=NOTSET, skip_validation=False): self.set_value(value, skip_validation) def unfix(self): - """Unfix this varaible (treat as variable) + """Unfix this variable (treat as variable) This sets the `fixed` indicator to False. @@ -173,6 +177,18 @@ def free(self): return self.unfix() +def _associated_binary_mapper(encode, val): + if val is None: + return None + if encode: + if val.__class__ is not _DeprecatedImplicitAssociatedBinaryVariable: + return val() + else: + if val.__class__ is not _DeprecatedImplicitAssociatedBinaryVariable: + return weakref_ref(val) + return val + + class _GeneralBooleanVarData(_BooleanVarData): """ This class defines the data for a single Boolean variable. @@ -197,6 +213,10 @@ class _GeneralBooleanVarData(_BooleanVarData): """ __slots__ = ('_value', 'fixed', '_stale', '_associated_binary') + __autoslot_mappers__ = { + '_associated_binary': _associated_binary_mapper, + '_stale': StaleFlagManager.stale_mapper, + } def __init__(self, component=None): # @@ -205,40 +225,14 @@ def __init__(self, component=None): # - _BooleanVarData # - ComponentData # - BooleanValue - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._value = None self.fixed = False - self._stale = 0 # True + self._stale = 0 # True self._associated_binary = None - def __getstate__(self): - state = super().__getstate__() - for i in _GeneralBooleanVarData.__slots__: - state[i] = getattr(self, i) - if isinstance(self._associated_binary, ReferenceType): - state['_associated_binary'] = self._associated_binary() - state['_stale'] = StaleFlagManager.is_stale(self._stale) - return state - - def __setstate__(self, state): - """Restore a picked state into this instance. - - Note: adapted from class ComponentData in pyomo.core.base.component - - """ - if state.pop('_stale', True): - state['_stale'] = 0 - else: - state['_stale'] = StaleFlagManager.get_flag(0) - super().__setstate__(state) - if self._associated_binary is not None and \ - type(self._associated_binary) is not \ - _DeprecatedImplicitAssociatedBinaryVariable: - self._associated_binary = weakref_ref(self._associated_binary) - # # Abstract Interface # @@ -249,6 +243,7 @@ def __setstate__(self, state): def value(self): """Return (or set) the value for this variable.""" return self._value + @value.setter def value(self, val): self.set_value(val) @@ -261,6 +256,7 @@ def domain(self): @property def stale(self): return StaleFlagManager.is_stale(self._stale) + @stale.setter def stale(self, val): if val: @@ -269,23 +265,30 @@ def stale(self, val): self._stale = StaleFlagManager.get_flag(0) def get_associated_binary(self): - """Get the binary _VarData associated with this + """Get the binary _VarData associated with this _GeneralBooleanVarData""" - return self._associated_binary() if self._associated_binary \ - is not None else None + return ( + self._associated_binary() if self._associated_binary is not None else None + ) def associate_binary_var(self, binary_var): """Associate a binary _VarData to this _GeneralBooleanVarData""" - if self._associated_binary is not None and \ - type(self._associated_binary) is not \ - _DeprecatedImplicitAssociatedBinaryVariable: + if ( + self._associated_binary is not None + and type(self._associated_binary) + is not _DeprecatedImplicitAssociatedBinaryVariable + ): raise RuntimeError( "Reassociating BooleanVar '%s' (currently associated " - "with '%s') with '%s' is not allowed" % ( + "with '%s') with '%s' is not allowed" + % ( self.name, self._associated_binary().name - if self._associated_binary is not None else None, - binary_var.name if binary_var is not None else None)) + if self._associated_binary is not None + else None, + binary_var.name if binary_var is not None else None, + ) + ) if binary_var is not None: self._associated_binary = weakref_ref(binary_var) @@ -303,22 +306,22 @@ class BooleanVar(IndexedComponent): variables returned by `initialize`/`rule` (False). Defaults to True. """ + _ComponentDataClass = _GeneralBooleanVarData def __new__(cls, *args, **kwds): if cls != BooleanVar: return super(BooleanVar, cls).__new__(cls) - if not args or (args[0] is UnindexedComponent_set and len(args)==1): + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): return ScalarBooleanVar.__new__(ScalarBooleanVar) else: - return IndexedBooleanVar.__new__(IndexedBooleanVar) + return IndexedBooleanVar.__new__(IndexedBooleanVar) def __init__(self, *args, **kwd): initialize = kwd.pop('initialize', None) initialize = kwd.pop('rule', initialize) self._dense = kwd.pop('dense', True) - kwd.setdefault('ctype', BooleanVar) IndexedComponent.__init__(self, *args, **kwd) @@ -328,16 +331,11 @@ def __init__(self, *args, **kwd): self._value_init_value = None self._value_init_rule = None - if is_functor(initialize) and ( - not isinstance(initialize, BooleanValue)): + if is_functor(initialize) and (not isinstance(initialize, BooleanValue)): self._value_init_rule = initialize else: self._value_init_value = initialize - def is_expression_type(self): - """Returns False because this is not an expression""" - return False - def flag_as_stale(self): """ Set the 'stale' attribute of every variable data object to True. @@ -350,11 +348,12 @@ def get_values(self, include_fixed_values=True): Return a dictionary of index-value pairs. """ if include_fixed_values: - return dict((idx, vardata.value) - for idx, vardata in self._data.items()) - return dict((idx, vardata.value) - for idx, vardata in self._data.items() - if not vardata.fixed) + return dict((idx, vardata.value) for idx, vardata in self._data.items()) + return dict( + (idx, vardata.value) + for idx, vardata in self._data.items() + if not vardata.fixed + ) extract_values = get_values @@ -368,17 +367,16 @@ def set_values(self, new_values, skip_validation=False): for index, new_value in new_values.items(): self[index].set_value(new_value, skip_validation) - def construct(self, data=None): """Construct this component.""" - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover try: name = str(self.name) except: name = type(self) logger.debug( - "Constructing Variable, name=%s, from data=%s" - % (name, str(data))) + "Constructing Variable, name=%s, from data=%s" % (name, str(data)) + ) if self._constructed: return @@ -452,10 +450,9 @@ def _initialize_members(self, init_set): if self.is_indexed(): for key in init_set: vardata = self._data[key] - val = apply_indexed_rule(self, - self._value_init_rule, - self._parent(), - key) + val = apply_indexed_rule( + self, self._value_init_rule, self._parent(), key + ) val = value(val) vardata.set_value(val) else: @@ -485,37 +482,28 @@ def _initialize_members(self, init_set): def _pprint(self): """ - Print component information. + Print component information. """ - return ( [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ], - self._data.items(), - ( "Value","Fixed","Stale"), - lambda k, v: [ v.value, - v.fixed, - v.stale, - ] - ) + return ( + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ], + self._data.items(), + ("Value", "Fixed", "Stale"), + lambda k, v: [v.value, v.fixed, v.stale], + ) class ScalarBooleanVar(_GeneralBooleanVarData, BooleanVar): - + """A single variable.""" + def __init__(self, *args, **kwd): _GeneralBooleanVarData.__init__(self, component=self) BooleanVar.__init__(self, *args, **kwd) self._index = UnindexedComponent_index - """ - # Since this class derives from Component and Component.__getstate__ - # just packs up the entire __dict__ into the state dict, we do not - # need to define the __getstate__ or __setstate__ methods. - # We just defer to the super() get/set state. Since all of our - # get/set state methods rely on super() to traverse the MRO, this - # will automatically pick up both the Component and Data base classes. - # - # # Override abstract interface methods to first check for # construction @@ -523,7 +511,6 @@ def __init__(self, *args, **kwd): # NOTE: that we can't provide these errors for # fixed and stale because they are attributes - """ @property def value(self): @@ -533,8 +520,8 @@ def value(self): raise ValueError( "Accessing the value of variable '%s' " "before the Var has been constructed (there " - "is currently no value to return)." - % (self.name)) + "is currently no value to return)." % (self.name) + ) @value.setter def value(self, val): @@ -544,8 +531,8 @@ def value(self, val): raise ValueError( "Setting the value of variable '%s' " "before the Var has been constructed (there " - "is currently nothing to set." - % (self.name)) + "is currently nothing to set." % (self.name) + ) @property def domain(self): @@ -561,8 +548,8 @@ def fix(self, value=NOTSET, skip_validation=False): raise ValueError( "Fixing variable '%s' " "before the Var has been constructed (there " - "is currently nothing to set)." - % (self.name)) + "is currently nothing to set)." % (self.name) + ) def unfix(self): """Sets the fixed indicator to False.""" @@ -571,8 +558,8 @@ def unfix(self): raise ValueError( "Freeing variable '%s' " "before the Var has been constructed (there " - "is currently nothing to set)." - % (self.name)) + "is currently nothing to set)." % (self.name) + ) class SimpleBooleanVar(metaclass=RenamedClass): @@ -596,7 +583,7 @@ def fix(self, value=NOTSET, skip_validation=False): boolean_vardata.fix(value, skip_validation) def unfix(self): - """Unfix all varaibles in this IndexedBooleanVar (treat as variable) + """Unfix all variables in this IndexedBooleanVar (treat as variable) This sets the `fixed` indicator to False for every variable in this IndexedBooleanVar. @@ -612,7 +599,19 @@ def free(self): @property def domain(self): return BooleanSet - + + # Because Emma wants crazy things... (Where crazy things are the ability to + # index BooleanVars by other (integer) Vars and integer-valued + # expressions--a thing you can do in Constraint Programming.) + def __getitem__(self, args): + tmp = args if args.__class__ is tuple else (args,) + if any( + hasattr(arg, 'is_potentially_variable') and arg.is_potentially_variable() + for arg in tmp + ): + return GetItemExpression((self,) + tmp) + return super().__getitem__(args) + @ModelComponentFactory.register("List of logical decision variables.") class BooleanVarList(IndexedBooleanVar): @@ -638,14 +637,14 @@ def construct(self, data=None): if self._value_init_value.__class__ is dict: for i in range(len(self._value_init_value)): self._index_set.add(i + self._starting_index) - super(BooleanVarList,self).construct(data) + super(BooleanVarList, self).construct(data) # Note that the current Var initializer silently ignores # initialization data that is not in the underlying index set. To # ensure that at least here all initialization data is added to the # VarList (so we get potential domain errors), we will re-set # everything. if self._value_init_value.__class__ is dict: - for k,v in self._value_init_value.items(): + for k, v in self._value_init_value.items(): self[k] = v def add(self): @@ -653,7 +652,3 @@ def add(self): next_idx = len(self._index_set) + self._starting_index self._index_set.add(next_idx) return self[next_idx] - - - - diff --git a/pyomo/core/base/check.py b/pyomo/core/base/check.py index 922ac36c897..0e9d8e889b2 100644 --- a/pyomo/core/base/check.py +++ b/pyomo/core/base/check.py @@ -23,7 +23,9 @@ logger = logging.getLogger('pyomo.core') -@ModelComponentFactory.register("A component that performs tests during model construction. The action rule is applied to every index value.") +@ModelComponentFactory.register( + "A component that performs tests during model construction. The action rule is applied to every index value." +) class BuildCheck(IndexedComponent): """ A build check, which executes a rule for all valid indices. If @@ -42,20 +44,22 @@ def __init__(self, *args, **kwd): IndexedComponent.__init__(self, *args, **kwd) # if not type(self._rule) is types.FunctionType: - raise ValueError("BuildCheck must have an 'rule' option specified whose value is a function") + raise ValueError( + "BuildCheck must have an 'rule' option specified whose value is a function" + ) def _pprint(self): return ([], None, None, None) def construct(self, data=None): - """ Apply the rule to construct values in this set """ - if is_debug_set(logger): #pragma:nocover - logger.debug("Constructing Check, name="+self.name) + """Apply the rule to construct values in this set""" + if is_debug_set(logger): # pragma:nocover + logger.debug("Constructing Check, name=" + self.name) # - if self._constructed: #pragma:nocover + if self._constructed: # pragma:nocover return timer = ConstructionTimer(self) - self._constructed=True + self._constructed = True # if not self.is_indexed(): # Scalar component @@ -67,5 +71,8 @@ def construct(self, data=None): for index in self._index_set: res = apply_indexed_rule(self, self._rule, self._parent(), index) if not res: - raise ValueError("BuildCheck %r identified error with index %r" % (self.name, str(index))) + raise ValueError( + "BuildCheck %r identified error with index %r" + % (self.name, str(index)) + ) timer.report() diff --git a/pyomo/core/base/component.py b/pyomo/core/base/component.py index 6abf147e031..a8550f8f469 100644 --- a/pyomo/core/base/component.py +++ b/pyomo/core/base/component.py @@ -17,8 +17,13 @@ import pyomo.common from pyomo.common import DeveloperError +from pyomo.common.autoslots import AutoSlots, fast_deepcopy +from pyomo.common.collections import OrderedDict from pyomo.common.deprecation import ( - deprecated, deprecation_warning, relocated_module_attribute) + deprecated, + deprecation_warning, + relocated_module_attribute, +) from pyomo.common.factory import Factory from pyomo.common.formatting import tabular_writer, StreamIndenter from pyomo.common.modeling import NOTSET @@ -30,19 +35,22 @@ logger = logging.getLogger('pyomo.core') relocated_module_attribute( - 'ComponentUID', 'pyomo.core.base.componentuid.ComponentUID', - version='5.7.2') + 'ComponentUID', 'pyomo.core.base.componentuid.ComponentUID', version='5.7.2' +) _ref_types = {type(None), weakref_ref} -class ModelComponentFactoryClass(Factory): +class ModelComponentFactoryClass(Factory): def register(self, doc=None): def fn(cls): - return super(ModelComponentFactoryClass, self).register( - cls.__name__, doc)(cls) + return super(ModelComponentFactoryClass, self).register(cls.__name__, doc)( + cls + ) + return fn + ModelComponentFactory = ModelComponentFactoryClass('model component') @@ -51,20 +59,18 @@ def name(component, index=NOTSET, fully_qualified=False, relative_to=None): Return a string representation of component for a specific index value. """ - base = component.getname( - fully_qualified=fully_qualified, relative_to=relative_to - ) + base = component.getname(fully_qualified=fully_qualified, relative_to=relative_to) if index is NOTSET: return base else: if index not in component.index_set(): - raise KeyError( "Index %s is not valid for component %s" - % (index, component.name) ) - return base + index_repr( index ) + raise KeyError( + "Index %s is not valid for component %s" % (index, component.name) + ) + return base + index_repr(index) -@deprecated(msg="The cname() function has been renamed to name()", - version='5.6.9') +@deprecated(msg="The cname() function has been renamed to name()", version='5.6.9') def cname(*args, **kwds): return name(*args, **kwds) @@ -72,6 +78,7 @@ def cname(*args, **kwds): class CloneError(pyomo.common.errors.PyomoException): pass + class _ComponentBase(PyomoObject): """A base class for Component and ComponentData @@ -79,6 +86,7 @@ class _ComponentBase(PyomoObject): expected for all Component-like objects. They are centralized here to avoid repeated code in the Component and ComponentData classes. """ + __slots__ = () _PPRINT_INDENT = " " @@ -104,42 +112,65 @@ def __deepcopy__(self, memo): # expressions can refer to container (non-Simple) components, so # we need to override __deepcopy__ for both Component and # ComponentData. - - #try: - # print("Component: %s" % (self.name,)) - #except: - # print("DANGLING ComponentData: %s on %s" % ( - # type(self),self.parent_component())) - - # Note: there is an edge case when cloning a block: the initial - # call to deepcopy (on the target block) has __block_scope__ - # defined, however, the parent block of self is either None, or - # is (by definition) out of scope. So we will check that - # id(self) is not in __block_scope__: if it is, then this is the - # top-level block and we need to do the normal deepcopy. - if '__block_scope__' in memo and \ - id(self) not in memo['__block_scope__']: - _known = memo['__block_scope__'] - _new = [] + # + if '__block_scope__' in memo: + _scope = memo['__block_scope__'] + _new = None tmp = self.parent_block() - tmpId = id(tmp) + # "Floating" components should be in scope by default (we + # will handle 'global' components like GlobalSets in the + # components) + _in_scope = tmp is None # Note: normally we would need to check that tmp does not # end up being None. However, since clone() inserts # id(None) into the __block_scope__ dictionary, we are safe - while tmpId not in _known: - _new.append(tmpId) + while id(tmp) not in _scope: + _new = (_new, id(tmp)) tmp = tmp.parent_block() - tmpId = id(tmp) + _in_scope |= _scope[id(tmp)] # Remember whether all newly-encountered blocks are in or # out of scope (prevent duplicate work) - for _id in _new: - _known[_id] = _known[tmpId] - - if not _known[tmpId]: + while _new is not None: + _new, _id = _new + _scope[_id] = _in_scope + + # Note: there is an edge case when cloning a block: the + # initial call to deepcopy (on the target block) has + # __block_scope__ defined, however, the parent block of self + # is either None, or is (by definition) out of scope. So we + # will check that id(self) is not in __block_scope__: if it + # is, then this is the top-level block and we need to do the + # normal deepcopy. We defer this check until now for + # efficiency reasons because we expect that (for sane models) + # the bulk of the components we will encounter will be *in* + # scope. + if not _in_scope and id(self) not in _scope: # component is out-of-scope. shallow copy only - ans = memo[id(self)] = self - return ans + memo[id(self)] = self + return self + # + # At this point we know we need to deepcopy this component (and + # everything under it). We can't do the "obvious", since this + # is a (partially) slot-ized class and the __dict__ structure is + # nonauthoritative: + # + # for key, val in self.__dict__.iteritems(): + # object.__setattr__(ans, key, deepcopy(val, memo)) + # + # Further, __slots__ is also nonauthoritative (this may be a + # singleton component -- in which case it also has a __dict__). + # Plus, this may be a derived class with several layers of + # slots. So, we will piggyback on the __getstate__/__setstate__ + # logic amd resort to partially "pickling" the object, + # deepcopying the state, and then restoring the copy into + # the new instance. + # + # [JDS 7/7/14] I worry about the efficiency of using both + # getstate/setstate *and* deepcopy, but we need deepcopy to + # update the _parent refs appropriately, and since this is a + # slot-ized class, we cannot overwrite the __deepcopy__ + # attribute to prevent infinite recursion. # # deepcopy() is an inherently recursive operation. This can # cause problems for highly interconnected Pyomo models (for @@ -150,7 +181,7 @@ def __deepcopy__(self, memo): # knowledge that all component references point to other # components / component datas, and NOT to attributes on the # components/datas. So, if we can first go through and stub in - # all the objects that we will need to populate,and then go + # all the objects that we will need to populate, and then go # through and deepcopy them, then we can unroll the vast # majority of the recursion. # @@ -161,43 +192,9 @@ def __deepcopy__(self, memo): # components that we expect to need, we can go through and # populate all the components. # - if '__paranoid__' not in memo: - memo['__paranoid__'] = None # The component_list is roughly in declaration order. This # means that it should be relatively safe to clone the contents # in the same order. - for comp in component_list: - comp._populate_deepcopied_object(memo) - return memo[id(self)] - - def _create_objects_for_deepcopy(self, memo, component_list): - _id = id(self) - if _id not in memo: - component_list.append(self) - memo[_id] = self.__class__.__new__(self.__class__) - - def _populate_deepcopied_object(self, memo): - # We can't do the "obvious", since this is a (partially) - # slot-ized class and the __dict__ structure is - # nonauthoritative: - # - # for key, val in self.__dict__.iteritems(): - # object.__setattr__(ans, key, deepcopy(val, memo)) - # - # Further, __slots__ is also nonauthoritative (this may be a - # singleton component -- in which case it also has a __dict__). - # Plus, as this may be a derived class with several layers of - # slots. So, we will resort to partially "pickling" the object, - # deepcopying the state dict, and then restoring the copy into - # the new instance. - # - # [JDS 7/7/14] I worry about the efficiency of using both - # getstate/setstate *and* deepcopy, but we need deepcopy to - # update the _parent refs appropriately, and since this is a - # slot-ized class, we cannot overwrite the __deepcopy__ - # attribute to prevent infinite recursion. - # - state = self.__getstate__() # # There is a particularly subtle bug with 'uncopyable' # attributes: if the exception is thrown while copying a complex @@ -206,83 +203,133 @@ def _populate_deepcopied_object(self, memo): # haven't had their state set yet. When the exception moves us # into the except block, we need to effectively "undo" those # partially copied classes. The only way is to restore the memo - # to the state it was in before we started. Right now, our - # solution is to make a (shallow) copy of the memo before each - # operation and restoring it in the case of exception. - # Unfortunately that is a lot of usually unnecessary work. - # Since *most* classes are copyable, we will avoid that - # "paranoia" unless the naive clone generated an error - in - # which case Block.clone() will switch over to the more - # "paranoid" mode. + # to the state it was in before we started. We will make use of + # the knowledge that 1) memo entries are never reassigned during + # a deepcopy(), and 2) dict are ordered by insertion order in + # Python >= 3.7. As a result, we do not need to preserve the + # whole memo before calling __getstate__/__setstate__, and can + # get away with only remembering the number of items in the + # memo. + # + # Note that entering/leaving try-except contexts has a + # not-insignificant overhead. On the hope that the user wrote a + # sane (deepcopy-able) model, we will try to do everything in + # one try-except block. + # + try: + for i, comp in enumerate(component_list): + saved_memo = len(memo) + # Note: this implementation avoids deepcopying the + # temporary 'state' list, significantly speeding things + # up. + memo[id(comp)].__setstate__( + [fast_deepcopy(field, memo) for field in comp.__getstate__()] + ) + return memo[id(self)] + except: + pass + # + # We hit an error deepcopying a component. Attempt to reset + # things and try again, but in a more cautious manner (after + # all, if one component was not deepcopyable, it stands to + # reason that several others will not be either). # + # We want to remove any new entries added to the memo during the + # failed try above. + # + for _ in range(len(memo) - saved_memo): + memo.popitem() + # + # Now we are going to continue on, but in a more cautious + # manner: we will clone entries field at a time so that we can + # get the most "complete" copy possible. + for comp in component_list[i:]: + state = comp.__getstate__() + # Note: if has_dict, then __auto_slots__.slots will be 1 + # shorter than the state (the last element is the __dict__). + # Zip will ignore it. + _deepcopy_field = comp._deepcopy_field + new_state = [ + _deepcopy_field(memo, slot, value) + for slot, value in zip(comp.__auto_slots__.slots, state) + ] + if comp.__auto_slots__.has_dict: + new_state.append( + { + slot: _deepcopy_field(memo, slot, value) + for slot, value in state[-1].items() + } + ) + memo[id(comp)].__setstate__(new_state) + return memo[id(self)] + + def _create_objects_for_deepcopy(self, memo, component_list): + _new = self.__class__.__new__(self.__class__) + _ans = memo.setdefault(id(self), _new) + if _ans is _new: + component_list.append(self) + return _ans + + def _deepcopy_field(self, memo, slot_name, value): + saved_memo = len(memo) try: - if memo['__paranoid__']: - saved_memo = dict(memo) - new_state = deepcopy(state, memo) + return fast_deepcopy(value, memo) + except CloneError: + raise except: - paranoid = memo['__paranoid__'] - if paranoid: - # Note: memo is intentionally pass-by-reference. We - # need to clear and reset the object we were handed (and - # not overwrite it) - memo.clear() - memo.update(saved_memo) - elif paranoid is not None: - raise PickleError() - new_state = {} - for k, v in state.items(): - try: - if paranoid: - saved_memo = dict(memo) - new_state[k] = deepcopy(v, memo) - except CloneError: - raise - except: - if paranoid: - memo.clear() - memo.update(saved_memo) - elif paranoid is None: - logger.warning(""" - Uncopyable field encountered when deep - copying outside the scope of Block.clone(). - There is a distinct possibility that the new - copy is not complete. To avoid this - situation, either use Block.clone() or set - 'paranoid' mode by adding '__paranoid__' == - True to the memo before calling - copy.deepcopy.""") - if self.model() is self: - what = 'Model' - else: - what = 'Component' - logger.error( - "Unable to clone Pyomo component attribute.\n" - "%s '%s' contains an uncopyable field '%s' (%s)" - % ( what, self.name, k, type(v) )) - # If this is an abstract model, then we are probably - # in the middle of create_instance, and the model - # that will eventually become the concrete model is - # missing initialization data. This is an - # exceptional event worthy of a stronger (and more - # informative) error. - if not self.parent_component()._constructed: - raise CloneError( - "Uncopyable attribute (%s) encountered when " - "cloning component %s on an abstract block. " - "The resulting instance is therefore " - "missing data from the original abstract model " - "and likely will not construct correctly. " - "Consider changing how you initialize this " - "component or using a ConcreteModel." - % ( k, self.name )) - memo[id(self)].__setstate__(new_state) - - - @deprecated("""The cname() method has been renamed to getname(). + # remove entries added to the memo + for _ in range(len(memo) - saved_memo): + memo.popitem() + # warn the user + if '__block_scope__' not in memo: + logger.warning( + """ + Uncopyable field encountered when deep + copying outside the scope of Block.clone(). + There is a distinct possibility that the new + copy is not complete. To avoid this + situation, either use Block.clone() or set + 'paranoid' mode by adding '__paranoid__' == + True to the memo before calling + copy.deepcopy.""" + ) + if self.model() is self: + what = 'Model' + else: + what = 'Component' + logger.error( + "Unable to clone Pyomo component attribute.\n" + "%s '%s' contains an uncopyable field '%s' (%s). " + "Setting field to `None` on new object" + % (what, self.name, slot_name, type(value)) + ) + # If this is an abstract model, then we are probably + # in the middle of create_instance, and the model + # that will eventually become the concrete model is + # missing initialization data. This is an + # exceptional event worthy of a stronger (and more + # informative) error. + if not self.parent_component()._constructed: + raise CloneError( + "Uncopyable attribute (%s) encountered when " + "cloning component %s on an abstract block. " + "The resulting instance is therefore " + "missing data from the original abstract model " + "and likely will not construct correctly. " + "Consider changing how you initialize this " + "component or using a ConcreteModel." % (slot_name, self.name) + ) + # Drop the offending field value. The user has been warned. + return None + + @deprecated( + """The cname() method has been renamed to getname(). The preferred method of obtaining a component name is to use the .name property, which returns the fully qualified component name. The .local_name property will return the component name only within - the context of the immediate parent container.""", version='5.0') + the context of the immediate parent container.""", + version='5.0', + ) def cname(self, *args, **kwds): return self.getname(*args, **kwds) @@ -304,11 +351,20 @@ def pprint(self, ostream=None, verbose=False, prefix=""): _name = comp.local_name else: # restrict output to only this data object - _data = iter( ((self.index(), self),) ) + _data = iter(((self.index(), self),)) _name = "{Member of %s}" % (comp.local_name,) self._pprint_base_impl( - ostream, verbose, prefix, _name, comp.doc, - comp.is_constructed(), _attr, _data, _header, _fcn) + ostream, + verbose, + prefix, + _name, + comp.doc, + comp.is_constructed(), + _attr, + _data, + _header, + _fcn, + ) @property def name(self): @@ -323,7 +379,8 @@ def name(self, val): raise ValueError( "The .name attribute is now a property method " "that returns the fully qualified component name. " - "Assignment is not allowed.") + "Assignment is not allowed." + ) @property def local_name(self): @@ -342,16 +399,28 @@ def active(self, value): """Set the active attribute to the given value""" raise AttributeError( "Setting the 'active' flag on a component that does not " - "support deactivation is not allowed.") + "support deactivation is not allowed." + ) - def _pprint_base_impl(self, ostream, verbose, prefix, _name, _doc, - _constructed, _attr, _data, _header, _fcn): + def _pprint_base_impl( + self, + ostream, + verbose, + prefix, + _name, + _doc, + _constructed, + _attr, + _data, + _header, + _fcn, + ): if ostream is None: ostream = sys.stdout if prefix: ostream = StreamIndenter(ostream, prefix) - # FIXME: HACK for backwards compatability with suppressing the + # FIXME: HACK for backwards compatibility with suppressing the # header for the top block if not _attr and self.parent_block() is None: _name = '' @@ -367,16 +436,16 @@ def _pprint_base_impl(self, ostream, verbose, prefix, _name, _doc, _attr.append(('ReferenceTo', self.referent)) if _name: - ostream.write(_name+" : ") + ostream.write(_name + " : ") if _doc: - ostream.write(_doc+'\n') + ostream.write(_doc + '\n') if _attr: - ostream.write(", ".join("%s=%s" % (k,v) for k,v in _attr)) + ostream.write(", ".join("%s=%s" % (k, v) for k, v in _attr)) if _attr or _name or _doc: ostream.write("\n") if not _constructed: - # HACK: for backwards compatability, Abstract blocks will + # HACK: for backwards compatibility, Abstract blocks will # still print their assigned components. Should we instead # always pprint unconstructed components (possibly # suppressing the table header if the table is empty)? @@ -393,7 +462,7 @@ def _pprint_base_impl(self, ostream, verbose, prefix, _name, _doc, if _fcn2 is not None: _data_dict = dict(_data) _data = _data_dict.items() - tabular_writer( ostream, '', _data, _header, _fcn ) + tabular_writer(ostream, '', _data, _header, _fcn) if _fcn2 is not None: for _key in sorted_robust(_data_dict): _fcn2(ostream, _key, _data_dict[_key]) @@ -424,97 +493,49 @@ class Component(_ComponentBase): _ctype The class type for the derived subclass """ - def __init__ (self, **kwds): + __autoslot_mappers__ = {'_parent': AutoSlots.weakref_mapper} + + def __init__(self, **kwds): # # Get arguments # self._ctype = kwds.pop('ctype', None) - self.doc = kwds.pop('doc', None) - self._name = kwds.pop('name', str(type(self).__name__)) + self.doc = kwds.pop('doc', None) + self._name = kwds.pop('name', str(type(self).__name__)) if kwds: raise ValueError( "Unexpected keyword options found while constructing '%s':\n\t%s" - % ( type(self).__name__, ','.join(sorted(kwds.keys())) )) + % (type(self).__name__, ','.join(sorted(kwds.keys()))) + ) # # Verify that ctype has been specified. # if self._ctype is None: raise DeveloperError( - "Must specify a component type for class %s!" - % ( type(self).__name__, ) ) + "Must specify a component type for class %s!" % (type(self).__name__,) + ) # - self._constructed = False - self._parent = None # Must be a weakref - - def __getstate__(self): - """ - This method must be defined to support pickling because this class - owns weakrefs for '_parent'. - """ - # - # Nominally, __getstate__() should return: - # - # state = super(Class, self).__getstate__() - # for i in Class.__dict__: - # state[i] = getattr(self,i) - # return state - # - # However, in this case, the (nominal) parent class is 'object', - # and object does not implement __getstate__. So, we will check - # to make sure that there is a base __getstate__() to call... - # - _base = super(Component,self) - if hasattr(_base, '__getstate__'): - state = _base.__getstate__() - for key,val in self.__dict__.items(): - if key not in state: - state[key] = val - else: - state = dict(self.__dict__) - if self._parent is not None: - state['_parent'] = self._parent() - return state - - def __setstate__(self, state): - """ - This method must be defined to support pickling because this class - owns weakrefs for '_parent'. - """ - if state['_parent'].__class__ not in _ref_types: - state['_parent'] = weakref_ref(state['_parent']) - # - # Note: our model for setstate is for derived classes to modify - # the state dictionary as control passes up the inheritance - # hierarchy (using super() calls). All assignment of state -> - # object attributes is handled at the last class before 'object' - # (which may -- or may not (thanks to MRO) -- be here. - # - _base = super(Component,self) - if hasattr(_base, '__setstate__'): - _base.__setstate__(state) - else: - for key, val in state.items(): - # Note: per the Python data model docs, we explicitly - # set the attribute using object.__setattr__() instead - # of setting self.__dict__[key] = val. - object.__setattr__(self, key, val) + self._constructed = False + self._parent = None # Must be a weakref @property def ctype(self): """Return the class type for this component""" return self._ctype - @deprecated("Component.type() method has been replaced by the " - ".ctype property.", version='5.7') + @deprecated( + "Component.type() method has been replaced by the .ctype property.", + version='5.7', + ) def type(self): """Return the class type for this component""" return self.ctype - def construct(self, data=None): #pragma:nocover + def construct(self, data=None): # pragma:nocover """API definition for constructing components""" pass - def is_constructed(self): #pragma:nocover + def is_constructed(self): # pragma:nocover """Return True if this class has been constructed""" return self._constructed @@ -544,8 +565,13 @@ def valid_model_component(self): def pprint(self, ostream=None, verbose=False, prefix=""): """Print component information""" self._pprint_base_impl( - ostream, verbose, prefix, self.local_name, self.doc, - self.is_constructed(), *self._pprint() + ostream, + verbose, + prefix, + self.local_name, + self.doc, + self.is_constructed(), + *self._pprint() ) def display(self, ostream=None, verbose=False, prefix=""): @@ -596,7 +622,7 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): Generate full name from nested block names relative_to: Block - Generate fully_qualified names reletive to the specified block. + Generate fully_qualified names relative to the specified block. """ local_name = self._name if fully_qualified: @@ -604,12 +630,16 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): if relative_to is None: relative_to = self.model() if pb is not None and pb is not relative_to: - ans = pb.getname(fully_qualified, name_buffer, relative_to) \ - + "." + name_repr(local_name) + ans = ( + pb.getname(fully_qualified, name_buffer, relative_to) + + "." + + name_repr(local_name) + ) elif pb is None and relative_to != self.model(): raise RuntimeError( "The relative_to argument was specified but not found " - "in the block hierarchy: %s" % str(relative_to)) + "in the block hierarchy: %s" % str(relative_to) + ) else: ans = name_repr(local_name) else: @@ -624,7 +654,9 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): "is no longer a quadratic operation. Additionally, note that " "use of this argument poses risks if the buffer contains " "names relative to different Blocks in the model hierarchy or " - "a mixture of local and fully_qualified names.", version='TODO') + "a mixture of local and fully_qualified names.", + version='TODO', + ) name_buffer[id(self)] = ans return ans @@ -633,7 +665,7 @@ def name(self): """Get the fully qualifed component name.""" return self.getname(fully_qualified=True) - # Allow setting a componet's name if it is not owned by a parent + # Allow setting a component's name if it is not owned by a parent # block (this supports, e.g., naming a model) @name.setter def name(self, val): @@ -643,7 +675,8 @@ def name(self, val): raise ValueError( "The .name attribute is not settable when the component " "is assigned to a Block.\nTriggered by attempting to set " - "component '%s' to name '%s'" % (self.name,val)) + "component '%s' to name '%s'" % (self.name, val) + ) def is_indexed(self): """Return true if this component is indexed""" @@ -653,7 +686,10 @@ def clear_suffix_value(self, suffix_or_name, expand=True): """Clear the suffix value for this component data""" if isinstance(suffix_or_name, str): import pyomo.core.base.suffix - for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator(self.model()): + + for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator( + self.model() + ): if suffix_or_name == name_: suffix_.clear_value(self, expand=expand) break @@ -664,7 +700,10 @@ def set_suffix_value(self, suffix_or_name, value, expand=True): """Set the suffix value for this component data""" if isinstance(suffix_or_name, str): import pyomo.core.base.suffix - for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator(self.model()): + + for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator( + self.model() + ): if suffix_or_name == name_: suffix_.set_value(self, value, expand=expand) break @@ -675,7 +714,10 @@ def get_suffix_value(self, suffix_or_name, default=None): """Get the suffix value for this component data""" if isinstance(suffix_or_name, str): import pyomo.core.base.suffix - for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator(self.model()): + + for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator( + self.model() + ): if suffix_or_name == name_: return suffix_.get(self, default) else: @@ -703,16 +745,15 @@ def active(self): @active.setter def active(self, value): """Set the active attribute to the given value""" - raise AttributeError( - "Assignment not allowed. Use the (de)activate methods." ) + raise AttributeError("Assignment not allowed. Use the (de)activate methods.") def activate(self): """Set the active attribute to True""" - self._active=True + self._active = True def deactivate(self): """Set the active attribute to False""" - self._active=False + self._active = False class ComponentData(_ComponentBase): @@ -732,10 +773,10 @@ class ComponentData(_ComponentBase): Private class attributes: _component A weakref to the component that owns this data object _index The index of this data object - """ + """ - __pickle_slots__ = ('_component', '_index') - __slots__ = __pickle_slots__ + ('__weakref__',) + __slots__ = ('_component', '_index', '__weakref__') + __autoslot_mappers__ = {'_component': AutoSlots.weakref_mapper} # NOTE: This constructor is in-lined in the constructors for the following # classes: _BooleanVarData, _ConnectorData, _ConstraintData, @@ -756,82 +797,6 @@ def __init__(self, component): self._component = weakref_ref(component) self._index = NOTSET - def __getstate__(self): - """Prepare a picklable state of this instance for pickling. - - Nominally, __getstate__() should return: - - state = super(Class, self).__getstate__() - for i in Class.__slots__: - state[i] = getattr(self,i) - return state - - However, in this case, the (nominal) parent class is 'object', - and object does not implement __getstate__. So, we will check - to make sure that there is a base __getstate__() to call... - You might think that there is nothing to check, but multiple - inheritance could mean that another class got stuck between - this class and "object" in the MRO. - - This method must be defined to support pickling because this - class owns weakrefs for '_component', which must be either - removed or converted to hard references prior to pickling. - - Further, since there is only a single slot, and that slot - (_component) requires special processing, we will just deal with - it explicitly. As _component is a weakref (not pickable), we - need to resolve it to a concrete object. - """ - _base = super(ComponentData,self) - if hasattr(_base, '__getstate__'): - state = _base.__getstate__() - else: - state = {} - # - if self._component is None: - state['_component'] = None - else: - state['_component'] = self._component() - state['_index'] = self._index - return state - - def __setstate__(self, state): - """Restore a pickled state into this instance - - Note: our model for setstate is for derived classes to modify - the state dictionary as control passes up the inheritance - hierarchy (using super() calls). All assignment of state -> - object attributes is handled at the last class before 'object' - (which may -- or may not (thanks to MRO) -- be here. - - This method must be defined to support unpickling because this - class owns weakrefs for '_component', which must be restored - from the hard references used in the piclke. - """ - # - # FIXME: We shouldn't have to check for weakref.ref here, but if - # we don't the model cloning appears to fail (in the Benders - # example) - # - if state['_component'].__class__ not in _ref_types: - state['_component'] = weakref_ref(state['_component']) - # - # Note: our model for setstate is for derived classes to modify - # the state dictionary as control passes up the inheritance - # hierarchy (using super() calls). All assignment of state -> - # object attributes is handled at the last class before 'object' - # (which may -- or may not (thanks to MRO) -- be here. - # - _base = super(ComponentData,self) - if hasattr(_base, '__setstate__'): - _base.__setstate__(state) - else: - for key, val in state.items(): - # Note: per the Python data model docs, we explicitly - # set the attribute using object.__setattr__() instead - # of setting self.__dict__[key] = val. - object.__setattr__(self, key, val) - @property def ctype(self): """Return the class type for this component""" @@ -840,8 +805,10 @@ def ctype(self): return None return _parent._ctype - @deprecated("Component.type() method has been replaced by the " - ".ctype property.", version='5.7') + @deprecated( + "Component.type() method has been replaced by the .ctype property.", + version='5.7', + ) def type(self): """Return the class type for this component""" return self.ctype @@ -853,7 +820,7 @@ def parent_component(self): return self._component() def parent_block(self): - """Return the parent of the component that owns this data. """ + """Return the parent of the component that owns this data.""" # This is a re-implementation of parent_component(), duplicated # for effficiency to avoid the method call if self._component is None: @@ -867,7 +834,7 @@ def parent_block(self): return comp._parent() def model(self): - """Return the model of the component that owns this data. """ + """Return the model of the component that owns this data.""" ans = self.parent_block() if ans is None: return None @@ -887,16 +854,19 @@ def index(self): to the parent component's index set. """ parent = self.parent_component() - if ( parent is not None and - self._index is not NOTSET and - parent[self._index] is not self ): + if ( + parent is not None + and self._index is not NOTSET + and parent[self._index] is not self + ): # This error message is a bit goofy, but we can't call self.name # here--it's an infinite loop! raise DeveloperError( "The '_data' dictionary and '_index' attribute are out of " "sync for indexed %s '%s': The %s entry in the '_data' " "dictionary does not map back to this component data object." - % (parent.ctype.__name__, parent.name, self._index)) + % (parent.ctype.__name__, parent.name, self._index) + ) return self._index def __str__(self): @@ -922,7 +892,9 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): "is no longer a quadratic operation. Additionally, note that " "use of this argument poses risks if the buffer contains " "names relative to different Blocks in the model hierarchy or " - "a mixture of local and fully_qualified names.", version='TODO') + "a mixture of local and fully_qualified names.", + version='TODO', + ) if id(self) in name_buffer: # Return the name if it is in the buffer return name_buffer[id(self)] @@ -934,7 +906,8 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): # Component.getname() method # return super(ComponentData, self).getname( - fully_qualified, name_buffer, relative_to) + fully_qualified, name_buffer, relative_to + ) elif c is not None: # # Get the name of the parent component @@ -964,8 +937,10 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): # return base + index_repr(self.index()) # - raise RuntimeError("Fatal error: cannot find the component data in " - "the owning component's _data dictionary.") + raise RuntimeError( + "Fatal error: cannot find the component data in " + "the owning component's _data dictionary." + ) def is_indexed(self): """Return true if this component is indexed""" @@ -975,7 +950,10 @@ def clear_suffix_value(self, suffix_or_name, expand=True): """Set the suffix value for this component data""" if isinstance(suffix_or_name, str): import pyomo.core.base.suffix - for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator(self.model()): + + for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator( + self.model() + ): if suffix_or_name == name_: suffix_.clear_value(self, expand=expand) break @@ -986,7 +964,10 @@ def set_suffix_value(self, suffix_or_name, value, expand=True): """Set the suffix value for this component data""" if isinstance(suffix_or_name, str): import pyomo.core.base.suffix - for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator(self.model()): + + for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator( + self.model() + ): if suffix_or_name == name_: suffix_.set_value(self, value, expand=expand) break @@ -997,7 +978,10 @@ def get_suffix_value(self, suffix_or_name, default=None): """Get the suffix value for this component data""" if isinstance(suffix_or_name, str): import pyomo.core.base.suffix - for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator(self.model()): + + for name_, suffix_ in pyomo.core.base.suffix.active_suffix_generator( + self.model() + ): if suffix_or_name == name_: return suffix_.get(self, default) else: @@ -1027,24 +1011,12 @@ class ActiveComponentData(ComponentData): _active A boolean that indicates whether this data is active """ - __slots__ = ( '_active', ) + __slots__ = ('_active',) def __init__(self, component): super(ActiveComponentData, self).__init__(component) self._active = True - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - result = super(ActiveComponentData, self).__getstate__() - for i in ActiveComponentData.__slots__: - result[i] = getattr(self, i) - return result - - # Since this class requires no special processing of the state - # dictionary, it does not need to implement __setstate__() - @property def active(self): """Return the active attribute""" @@ -1053,8 +1025,7 @@ def active(self): @active.setter def active(self, value): """Set the active attribute to a specified value.""" - raise AttributeError( - "Assignment not allowed. Use the (de)activate method" ) + raise AttributeError("Assignment not allowed. Use the (de)activate method") def activate(self): """Set the active attribute to True""" diff --git a/pyomo/core/base/component_namer.py b/pyomo/core/base/component_namer.py index ce96c3b0768..17d46c12fae 100644 --- a/pyomo/core/base/component_namer.py +++ b/pyomo/core/base/component_namer.py @@ -23,9 +23,10 @@ special_chars = literals + '\'":\\' re_number = re.compile( - r'(?:[-+]?(?:[0-9]+\.?[0-9]*|\.[0-9]+)(?:[eE][-+]?[0-9]+)?|-?inf|nan)') -re_special_char = re.compile( - r'[' + re.escape(special_chars) + ']') + r'(?:[-+]?(?:[0-9]+\.?[0-9]*|\.[0-9]+)(?:[eE][-+]?[0-9]+)?|-?inf|nan)' +) +re_special_char = re.compile(r'[' + re.escape(special_chars) + ']') + def name_repr(x, unknown_handler=str): if not isinstance(x, str): @@ -41,9 +42,14 @@ def name_repr(x, unknown_handler=str): return x return unquoted + def tuple_repr(x, unknown_handler=str): - return '(' + ','.join(name_repr(_, unknown_handler) for _ in x) \ + return ( + '(' + + ','.join(name_repr(_, unknown_handler) for _ in x) + (',)' if len(x) == 1 else ')') + ) + def index_repr(idx, unknown_handler=str): """ @@ -55,6 +61,7 @@ def index_repr(idx, unknown_handler=str): idx_str = name_repr(idx, unknown_handler) return "[" + idx_str + "]" + _repr_map = { slice: lambda x: '*', Ellipsis.__class__: lambda x: '**', diff --git a/pyomo/core/base/component_order.py b/pyomo/core/base/component_order.py index 48d14aa4b21..0685571ccb0 100644 --- a/pyomo/core/base/component_order.py +++ b/pyomo/core/base/component_order.py @@ -21,10 +21,14 @@ from pyomo.core.base.constraint import Constraint from pyomo.core.base.sos import SOSConstraint -items = [ Set, RangeSet, Param, Var, Expression, \ - Objective, Constraint, SOSConstraint ] +items = [Set, RangeSet, Param, Var, Expression, Objective, Constraint, SOSConstraint] -display_items = [ Var, Objective, Constraint] +display_items = [Var, Objective, Constraint] # TODO: Add Block to display_items after 4.0 release. See note in # Block.display() [JDS 1/7/15] -display_name = {Var:"Variables", Objective:"Objectives", Constraint:"Constraints", Block:"Blocks"} +display_name = { + Var: "Variables", + Objective: "Objectives", + Constraint: "Constraints", + Block: "Blocks", +} diff --git a/pyomo/core/base/componentuid.py b/pyomo/core/base/componentuid.py index 1ccf34bb105..0ab57d1c253 100644 --- a/pyomo/core/base/componentuid.py +++ b/pyomo/core/base/componentuid.py @@ -17,8 +17,10 @@ from pyomo.common.dependencies import pickle from pyomo.common.deprecation import deprecated from pyomo.core.base.component_namer import ( - literals, special_chars, - name_repr as __name_repr, index_repr as __index_repr, + literals, + special_chars, + name_repr as __name_repr, + index_repr as __index_repr, re_number as _re_number, ) from pyomo.core.base.indexed_component_slice import IndexedComponent_slice @@ -28,15 +30,19 @@ class _NotSpecified(object): pass + def _pickle(x): - return '|'+repr(pickle.dumps(x, protocol=2)) + return '|' + repr(pickle.dumps(x, protocol=2)) + def _name_repr(x): return __name_repr(x, _pickle) + def _index_repr(x): return __index_repr(x, _pickle) + class ComponentUID(object): """ A Component unique identifier @@ -56,15 +62,15 @@ class ComponentUID(object): indexes) """ - __slots__ = ( '_cids', ) + __slots__ = ('_cids',) _lex = None _repr_v1_map = { slice: lambda x: '*', Ellipsis.__class__: lambda x: '**', - int: lambda x: '#'+str(x), - float: lambda x: '#'+str(x), - str: lambda x: '$'+str(x), + int: lambda x: '#' + str(x), + float: lambda x: '#' + str(x), + str: lambda x: '$' + str(x), } def __init__(self, component, cuid_buffer=None, context=None): @@ -72,21 +78,23 @@ def __init__(self, component, cuid_buffer=None, context=None): # the string representation. if isinstance(component, str): if context is not None: - raise ValueError("Context is not allowed when initializing a " - "ComponentUID object from a string type") + raise ValueError( + "Context is not allowed when initializing a " + "ComponentUID object from a string type" + ) try: self._cids = tuple(self._parse_cuid_v2(component)) except (OSError, IOError): self._cids = tuple(self._parse_cuid_v1(component)) elif type(component) is IndexedComponent_slice: - self._cids = tuple(self._generate_cuid_from_slice( - component, - context=context, - )) + self._cids = tuple( + self._generate_cuid_from_slice(component, context=context) + ) else: - self._cids = tuple(self._generate_cuid( - component, cuid_buffer=cuid_buffer, context=context)) + self._cids = tuple( + self._generate_cuid(component, cuid_buffer=cuid_buffer, context=context) + ) def __str__(self): "Return a 'nicely formatted' string representation of the CUID" @@ -102,29 +110,28 @@ def __str__(self): def get_repr(self, version=2): if version == 1: - _unknown = lambda x: '?'+str(x) + _unknown = lambda x: '?' + str(x) a = "" for name, args in self._cids: a += '.' + name if len(args) == 0: continue a += ':' + ','.join( - self._repr_v1_map.get(x.__class__, _unknown)(x) - for x in args) + self._repr_v1_map.get(x.__class__, _unknown)(x) for x in args + ) return a[1:] # Strip off the leading '.' elif version == 2: return repr(self) else: - raise ValueError("Invalid repr version '%s'; expected 1 or 2" - % (version,)) + raise ValueError("Invalid repr version '%s'; expected 1 or 2" % (version,)) def __getstate__(self): - ans = {x:getattr(self, x) for x in ComponentUID.__slots__} + ans = {x: getattr(self, x) for x in ComponentUID.__slots__} return ans def __setstate__(self, state): for key, val in state.items(): - setattr(self,key,val) + setattr(self, key, val) def __hash__(self): """Return a deterministic hash for this ComponentUID""" @@ -132,11 +139,20 @@ def __hash__(self): return hash(self._cids) except TypeError: # Special handling for unhashable data (slices) - return hash(tuple( - (name, tuple( - (slice, x.start, x.stop, x.step) - if x.__class__ is slice else x - for x in idx)) for name, idx in self._cids)) + return hash( + tuple( + ( + name, + tuple( + (slice, x.start, x.stop, x.step) + if x.__class__ is slice + else x + for x in idx + ), + ) + for name, idx in self._cids + ) + ) def __lt__(self, other): """Return True if this CUID <= the 'other' CUID @@ -156,10 +172,13 @@ def __lt__(self, other): try: other_cids = other._cids except AttributeError: - raise TypeError("'<' not supported between instances of " - "'ComponentUID' and '%s'" % (type(other).__name__)) + raise TypeError( + "'<' not supported between instances of " + "'ComponentUID' and '%s'" % (type(other).__name__) + ) for (self_name, self_idx), (other_name, other_idx) in zip( - self._cids, other_cids): + self._cids, other_cids + ): if self_name != other_name: return self_name < other_name for self_i, other_i in zip(self_idx, other_idx): @@ -217,19 +236,25 @@ def __ne__(self, other): return not self.__eq__(other) @staticmethod - def generate_cuid_string_map(block, ctype=None, descend_into=True, - repr_version=2): + def generate_cuid_string_map(block, ctype=None, descend_into=True, repr_version=2): def _record_indexed_object_cuid_strings_v1(obj, cuid_str): - _unknown = lambda x: '?'+str(x) + _unknown = lambda x: '?' + str(x) for idx, data in obj.items(): if idx.__class__ is tuple and len(idx) > 1: - cuid_strings[data] = cuid_str + ':' + ','.join( - ComponentUID._repr_v1_map.get(x.__class__, _unknown)(x) - for x in idx) + cuid_strings[data] = ( + cuid_str + + ':' + + ','.join( + ComponentUID._repr_v1_map.get(x.__class__, _unknown)(x) + for x in idx + ) + ) else: - cuid_strings[data] \ - = cuid_str + ':' + ComponentUID._repr_v1_map.get( - idx.__class__, _unknown)(idx) + cuid_strings[data] = ( + cuid_str + + ':' + + ComponentUID._repr_v1_map.get(idx.__class__, _unknown)(idx) + ) def _record_indexed_object_cuid_strings_v2(obj, cuid_str): for idx, data in obj.items(): @@ -239,10 +264,7 @@ def _record_indexed_object_cuid_strings_v2(obj, cuid_str): 1: _record_indexed_object_cuid_strings_v1, 2: _record_indexed_object_cuid_strings_v2, }[repr_version] - _record_name = { - 1: str, - 2: _name_repr, - }[repr_version] + _record_name = {1: str, 2: _name_repr}[repr_version] model = block.model() cuid_strings = ComponentMap() @@ -287,7 +309,7 @@ def _index_from_slice_info(self, slice_info): # Assume that the keys of fixed, sliced, and ellipsis # partition the index we're describing. - return tuple( value_map[i] for i in range(len(value_map)) ) + return tuple(value_map[i] for i in range(len(value_map))) def _generate_cuid_from_slice(self, _slice, cuid_buffer=None, context=None): """ @@ -317,21 +339,24 @@ def _generate_cuid_from_slice(self, _slice, cuid_buffer=None, context=None): if call != IndexedComponent_slice.get_attribute: raise ValueError( "Cannot create a CUID with a __call__ of anything " - "other than a 'component' attribute") + "other than a 'component' attribute" + ) if arg != 'component': raise ValueError( "Cannot create a CUID from a slice with a " "call to any method other than 'component': " - "got '%s'." % arg) + "got '%s'." % arg + ) arg, name = name, None - if call & ( IndexedComponent_slice.SET_MASK - | IndexedComponent_slice.DEL_MASK ): + if call & ( + IndexedComponent_slice.SET_MASK | IndexedComponent_slice.DEL_MASK + ): raise ValueError( "Cannot create a CUID from a slice that " "contains `set` or `del` calls: got call %s " "with argument %s" % (call, arg) - ) + ) elif call == IndexedComponent_slice.slice_info: comp = arg[0] slice_info = arg[1:] @@ -341,10 +366,8 @@ def _generate_cuid_from_slice(self, _slice, cuid_buffer=None, context=None): parent = comp.parent_block() base_cuid = self._generate_cuid( - parent, - cuid_buffer=cuid_buffer, - context=context, - ) + parent, cuid_buffer=cuid_buffer, context=context + ) base_cuid.reverse() rcuid.extend(base_cuid) # We assume slice_info will only occur at the top of the @@ -353,26 +376,27 @@ def _generate_cuid_from_slice(self, _slice, cuid_buffer=None, context=None): elif call == IndexedComponent_slice.get_item: if index is not _NotSpecified: raise ValueError( - "Two `get_item` calls, %s and %s, were detected before a" - "`get_attr` call. This is not supported by 'ComponentUID'." - % (index, arg)) + "Two `get_item` calls, %s and %s, were detected before a" + "`get_attr` call. This is not supported by 'ComponentUID'." + % (index, arg) + ) # Cache `get_item` arg until a `get_attr` is encountered. index = arg elif call == IndexedComponent_slice.call: if len(arg) != 1: raise ValueError( - "Cannot create a CUID from a slice with a " - "call that has multiple arguments: got " - "arguments %s." % (arg,) - ) + "Cannot create a CUID from a slice with a " + "call that has multiple arguments: got " + "arguments %s." % (arg,) + ) # Cache argument of a call to `component` name = arg[0] if kwds != {}: raise ValueError( - "Cannot create a CUID from a slice with a " - "call that contains keywords: got keyword " - "dict %s." % (kwds,) - ) + "Cannot create a CUID from a slice with a " + "call that contains keywords: got keyword " + "dict %s." % (kwds,) + ) elif call == IndexedComponent_slice.get_attribute: if index is _NotSpecified: index = () @@ -392,12 +416,13 @@ def _generate_cuid(self, component, cuid_buffer=None, context=None): rcuid = [] while component is not context: if component is model: - raise ValueError("Context '%s' does not apply to component " - "'%s'" % (context.name, - orig_component.name)) + raise ValueError( + "Context '%s' does not apply to component " + "'%s'" % (context.name, orig_component.name) + ) c = component.parent_component() if c is component: - rcuid.append(( c.local_name, () )) + rcuid.append((c.local_name, ())) elif cuid_buffer is not None: if id(component) not in cuid_buffer: c_local_name = c.local_name @@ -450,16 +475,16 @@ def _parse_cuid_v2(self, label): elif tok.type == ')': tmp = tuple(idx_stack.pop()) idx_stack[-1].append(tmp) - elif idx_stack: # processing a component index + elif idx_stack: # processing a component index if tok.type == ',': pass elif tok.type == 'STAR': idx_stack[-1].append(tok.value) else: - assert tok.type in {'WORD','STRING','NUMBER','PICKLE'} + assert tok.type in {'WORD', 'STRING', 'NUMBER', 'PICKLE'} idx_stack[-1].append(tok.value) else: - assert tok.type in {'WORD','STRING'} + assert tok.type in {'WORD', 'STRING'} assert name is None name = tok.value assert not idx_stack @@ -476,11 +501,11 @@ def _parse_cuid_v1(self, label): cList = label.split('.') for c in cList: if c[-1] == ']': - c_info = c[:-1].split('[',1) + c_info = c[:-1].split('[', 1) else: - c_info = c.split(':',1) + c_info = c.split(':', 1) if len(c_info) == 1: - yield ( c_info[0], tuple() ) + yield (c_info[0], tuple()) else: idx = c_info[1].split(',') for i, val in enumerate(idx): @@ -490,14 +515,14 @@ def _parse_cuid_v1(self, label): idx[i] = str(val[1:]) elif val[0] == '#': idx[i] = _int_or_float(val[1:]) - elif val[0] in "\"'" and val[-1] == val[0]: + elif val[0] in "\"'" and val[-1] == val[0]: idx[i] = val[1:-1] elif _re_number.match(val): idx[i] = _int_or_float(val) if len(idx) == 1 and idx[0] == '**': - yield ( c_info[0], (Ellipsis,) ) + yield (c_info[0], (Ellipsis,)) else: - yield ( c_info[0], tuple(idx) ) + yield (c_info[0], tuple(idx)) def _resolve_cuid(self, block): obj = block @@ -517,8 +542,11 @@ def _resolve_cuid(self, block): return None return obj - @deprecated("ComponentUID.find_component() is deprecated. " - "Use ComponentUID.find_component_on()", version='5.7.2') + @deprecated( + "ComponentUID.find_component() is deprecated. " + "Use ComponentUID.find_component_on()", + version='5.7.2', + ) def find_component(self, block): return self.find_component_on(block) @@ -578,18 +606,18 @@ def matches(self, component, context=None): if s_idx_val is Ellipsis: if len(idx) < len(s_idx) - 1: return False - for _k in range(-1, j-len(s_idx), -1): + for _k in range(-1, j - len(s_idx), -1): if s_idx[_k].__class__ is slice: continue elif s_idx[_k] != idx[_k]: return False - # Everything after the elipsis matched, so we can + # Everything after the ellipsis matched, so we can # move on to the next level. break if s_idx_val != idx[j]: return False # Matched if all self._cids were consumed - return i+1 == len(self._cids) + return i + 1 == len(self._cids) def _int_or_float(n): @@ -600,6 +628,7 @@ def _int_or_float(n): _int = 0 # a random int return _int if _num == _int else _num + # Known escape sequences: # \U{8}: unicode 8-digit hex codes # \u{4}: unicode 4-digit hex codes @@ -608,12 +637,16 @@ def _int_or_float(n): # \N{...}" unicode by name # \\, \', \", \a, \b, \f, \n, \r, \t, \v _re_escape_sequences = re.compile( - r"\\U[a-fA-F0-9]{8}|\\u[a-fA-F0-9]{4}|\\x[a-fA-F0-9]{2}" + - r"|\\[0-7]{1,3}|\\N\{[^}]+\}|\\[\\'\"abfnrtv]", re.UNICODE | re.VERBOSE) + r"\\U[a-fA-F0-9]{8}|\\u[a-fA-F0-9]{4}|\\x[a-fA-F0-9]{2}" + + r"|\\[0-7]{1,3}|\\N\{[^}]+\}|\\[\\'\"abfnrtv]", + re.UNICODE | re.VERBOSE, +) + def _match_escape(match): return codecs.decode(match.group(0), 'unicode-escape') + # # NOTE: literals and _re_number from component_namer # @@ -622,20 +655,22 @@ def _match_escape(match): t_ignore = " \t\r" tokens = [ - "WORD", # unquoted string - "STRING", # quoted string - "NUMBER", # raw number - "STAR", # either * or ** - "PICKLE", # a pickled index object + "WORD", # unquoted string + "STRING", # quoted string + "NUMBER", # raw number + "STAR", # either * or ** + "PICKLE", # a pickled index object ] + # Numbers should only appear in getitem lists, so they must be followed # by a delimiter token (one of ',]') -@ply.lex.TOKEN(_re_number.pattern+r'(?=[,\]])') +@ply.lex.TOKEN(_re_number.pattern + r'(?=[,\]])') def t_NUMBER(t): t.value = _int_or_float(t.value) return t + # A "word" must start with an alphanumeric character, followed by any # number of "non-special" characters. This regex matches numbers as # well as more traditional string names, so it is important that it is @@ -645,14 +680,18 @@ def t_WORD(t): t.value = t.value.strip() return t + # A "string" is a proper quoted string _quoted_str = r"'(?:[^'\\]|\\.)*'" -_general_str = "|".join([_quoted_str, _quoted_str.replace("'",'"')]) +_general_str = "|".join([_quoted_str, _quoted_str.replace("'", '"')]) + + @ply.lex.TOKEN(_general_str) def t_STRING(t): t.value = _re_escape_sequences.sub(_match_escape, t.value[1:-1]) return t + @ply.lex.TOKEN(r'\*{1,2}') def t_STAR(t): if len(t.value) == 1: @@ -661,7 +700,8 @@ def t_STAR(t): t.value = Ellipsis return t -@ply.lex.TOKEN(r'\|b?(?:'+_general_str+")") + +@ply.lex.TOKEN(r'\|b?(?:' + _general_str + ")") def t_PICKLE(t): start = 3 if t.value[1] == 'b' else 2 unescaped = _re_escape_sequences.sub(_match_escape, t.value[start:-1]) @@ -669,8 +709,10 @@ def t_PICKLE(t): t.value = pickle.loads(rawstr) return t + # Error handling rule def t_error(t): # Note this parser does not allow "\n", so lexpos is the column number - raise IOError("ERROR: Token '%s' Line %s Column %s" - % (t.value, t.lineno, t.lexpos+1)) + raise IOError( + "ERROR: Token '%s' Line %s Column %s" % (t.value, t.lineno, t.lexpos + 1) + ) diff --git a/pyomo/core/base/config.py b/pyomo/core/base/config.py index 4a007a7d724..4c6cc06f90c 100644 --- a/pyomo/core/base/config.py +++ b/pyomo/core/base/config.py @@ -13,39 +13,39 @@ import json import pyomo.common.envvar as envvar -from pyomo.common.config import ( - ConfigBase, ConfigBlock, ConfigValue, ADVANCED_OPTION, -) +from pyomo.common.config import ConfigBase, ConfigBlock, ConfigValue, ADVANCED_OPTION from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args import logging + logger = logging.getLogger('pyomo.core') class _PyomoOptions(object): - def __init__(self): - self._options_stack = [ default_pyomo_config() ] + self._options_stack = [default_pyomo_config()] # Load the user's configuration - sources = [(json, 'json', True, 'json', {}), - (json, 'jsn', True, 'json', {})] + sources = [(json, 'json', True, 'json', {}), (json, 'jsn', True, 'json', {})] sources.append((yaml, 'yml', yaml_available, 'yaml', yaml_load_args)) sources.append((yaml, 'yaml', yaml_available, 'yaml', yaml_load_args)) for parser, suffix, available, library, parser_args in sources: - cfg_file = os.path.join(envvar.PYOMO_CONFIG_DIR, 'config.'+suffix) + cfg_file = os.path.join(envvar.PYOMO_CONFIG_DIR, 'config.' + suffix) if not os.path.exists(cfg_file): continue if not available: - logger.warning("Default configuration file (%s) cannot be " - "loaded; %s is not available" - % (cfg_file, library)) + logger.warning( + "Default configuration file (%s) cannot be " + "loaded; %s is not available" % (cfg_file, library) + ) continue fp = open(cfg_file) try: data = parser.load(fp, **parser_args) except: - logger.error("Error parsing the user's default " - "configuration file\n\t%s." % (cfg_file,)) + logger.error( + "Error parsing the user's default " + "configuration file\n\t%s." % (cfg_file,) + ) self._options_stack[0].set_value(data) def active_config(self): @@ -59,7 +59,7 @@ def __getitem__(self, key): return self.active_config().__getitem__(key) def get(self, key, default=ConfigBase.NoArgument): - return self.active_config().get(key,default) + return self.active_config().get(key, default) def __setitem__(self, key, val): return self.active_config().__setitem__(key, val) @@ -74,13 +74,13 @@ def __iter__(self): return self.active_config().__iter__() def __getattr__(self, name): - #if name in self.__dict__: + # if name in self.__dict__: # return self.__dict__[name] return self._options_stack[-1].__getattr__(name) def __setattr__(self, name, value): if name == '_options_stack': - super(_PyomoOptions,self).__setattr__(name, value) + super(_PyomoOptions, self).__setattr__(name, value) else: return self.active_config().__setattr__(name, value) @@ -130,14 +130,19 @@ def reset(self): def default_pyomo_config(): config = ConfigBlock("Pyomo configuration file") - config.declare('paranoia_level', ConfigValue( - 0, int, - 'Pyomo paranoia and error checking level', - """Higher levels of paranoia enable additional error checking and - warning messages that may assist users in identifying likely - modeling problems. - Default=0""", - visibility=ADVANCED_OPTION ) ) + config.declare( + 'paranoia_level', + ConfigValue( + 0, + int, + 'Pyomo paranoia and error checking level', + """Higher levels of paranoia enable additional error checking and + warning messages that may assist users in identifying likely + modeling problems. + Default=0""", + visibility=ADVANCED_OPTION, + ), + ) return config diff --git a/pyomo/core/base/connector.py b/pyomo/core/base/connector.py index 3f2a6cd2e2a..f3d4833b837 100644 --- a/pyomo/core/base/connector.py +++ b/pyomo/core/base/connector.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = [ 'Connector' ] +__all__ = ['Connector'] import logging import sys @@ -19,13 +19,13 @@ from pyomo.common.formatting import tabular_writer from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET +from pyomo.common.numeric_types import value from pyomo.common.timing import ConstructionTimer - +from pyomo.core.expr.numvalue import NumericValue from pyomo.core.base.component import ComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.indexed_component import IndexedComponent from pyomo.core.base.misc import apply_indexed_rule -from pyomo.core.base.numvalue import NumericValue, value from pyomo.core.base.transformation import TransformationFactory logger = logging.getLogger('pyomo.core') @@ -34,7 +34,7 @@ class _ConnectorData(ComponentData, NumericValue): """Holds the actual connector information""" - __slots__ = ('vars','aggregators') + __slots__ = ('vars', 'aggregators') def __init__(self, component=None): """Constructor""" @@ -43,23 +43,11 @@ def __init__(self, component=None): # following constructors: # - ComponentData # - NumericValue - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self.vars = {} self.aggregators = {} - - - def __getstate__(self): - state = super(_ConnectorData, self).__getstate__() - for i in _ConnectorData.__slots__: - state[i] = getattr(self, i) - return state - - # Note: None of the slots on this class need to be edited, so we - # don't need to implement a specialized __setstate__ method, and - # can quietly rely on the super() class's implementation. def set_value(self, value): msg = "Cannot specify the value of a connector '%s'" @@ -99,18 +87,18 @@ def is_integer(self): def is_continuous(self): return len(self) and all(v.is_continuous() for v in self._iter_vars()) - def add(self, var, name=None, aggregate=None): if name is None: name = var.local_name if name in self.vars: - raise ValueError("Cannot insert duplicate variable name " - "'%s' into Connector '%s'" % (name, self.name)) + raise ValueError( + "Cannot insert duplicate variable name " + "'%s' into Connector '%s'" % (name, self.name) + ) self.vars[name] = var if aggregate is not None: self.aggregators[name] = aggregate - def _iter_vars(self): for var in self.vars.values(): if not hasattr(var, 'is_indexed') or not var.is_indexed(): @@ -121,17 +109,20 @@ def _iter_vars(self): @ModelComponentFactory.register( - "A bundle of variables that can be manipulated together.") -@deprecated("Use of pyomo.connectors is deprecated. " - "Its functionality has been replaced by pyomo.network.", - version='5.6.9') + "A bundle of variables that can be manipulated together." +) +@deprecated( + "Use of pyomo.connectors is deprecated. " + "Its functionality has been replaced by pyomo.network.", + version='5.6.9', +) class Connector(IndexedComponent): """A collection of variables, which may be defined over a index The idea behind a Connector is to create a bundle of variables that can be manipulated as a single variable within constraints. While Connectors inherit from variable (mostly so that the expression - infrastucture can manipulate them), they are not actual variables + infrastructure can manipulate them), they are not actual variables that are exposed to the solver. Instead, a preprocessor (ConnectorExpander) will look for expressions that involve connectors and replace the single constraint with a list of @@ -172,15 +163,15 @@ def _getitem_when_not_present(self, idx): _conval = self._data[idx] = _ConnectorData(component=self) return _conval - def construct(self, data=None): - if is_debug_set(logger): #pragma:nocover - logger.debug( "Constructing Connector, name=%s, from data=%s" - % (self.name, data) ) + if is_debug_set(logger): # pragma:nocover + logger.debug( + "Constructing Connector, name=%s, from data=%s" % (self.name, data) + ) if self._constructed: return timer = ConstructionTimer(self) - self._constructed=True + self._constructed = True # # Construct _ConnectorData objects for all index values # @@ -195,40 +186,41 @@ def _initialize_members(self, initSet): for idx in initSet: tmp = self[idx] for key in self._implicit: - tmp.add(None,key) + tmp.add(None, key) if self._extends: for key, val in self._extends.vars.items(): - tmp.add(val,key) + tmp.add(val, key) for key, val in self._initialize.items(): - tmp.add(val,key) + tmp.add(val, key) if self._rule: - items = apply_indexed_rule( - self, self._rule, self._parent(), idx) + items = apply_indexed_rule(self, self._rule, self._parent(), idx) for key, val in items.items(): - tmp.add(val,key) - + tmp.add(val, key) def _pprint(self, ostream=None, verbose=False): """Print component information.""" - def _line_generator(k,v): + + def _line_generator(k, v): for _k, _v in sorted(v.vars.items()): if _v is None: _len = '-' elif _k in v.aggregators: _len = '*' - elif hasattr(_v,'__len__'): + elif hasattr(_v, '__len__'): _len = len(_v) else: _len = 1 yield _k, _len, str(_v) - return ( [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ], - self._data.items(), - ( "Name","Size", "Variable", ), - _line_generator - ) + return ( + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ], + self._data.items(), + ("Name", "Size", "Variable"), + _line_generator, + ) def display(self, prefix="", ostream=None): """ @@ -240,42 +232,41 @@ def display(self, prefix="", ostream=None): return if ostream is None: ostream = sys.stdout - tab=" " - ostream.write(prefix+self.local_name+" : ") - ostream.write("Size="+str(len(self))) + tab = " " + ostream.write(prefix + self.local_name + " : ") + ostream.write("Size=" + str(len(self))) ostream.write("\n") - def _line_generator(k,v): + + def _line_generator(k, v): for _k, _v in sorted(v.vars.items()): if _v is None: _val = '-' elif not hasattr(_v, 'is_indexed') or not _v.is_indexed(): - _val = str(value( _v )) + _val = str(value(_v)) else: - _val = "{%s}" % (', '.join('%r: %r' % ( - x, value(_v[x])) for x in sorted(_v._data) ),) + _val = "{%s}" % ( + ', '.join( + '%r: %r' % (x, value(_v[x])) for x in sorted(_v._data) + ), + ) yield _k, _val - tabular_writer( ostream, prefix+tab, - ((k,v) for k,v in self._data.items()), - ( "Name","Value" ), _line_generator ) + tabular_writer( + ostream, + prefix + tab, + ((k, v) for k, v in self._data.items()), + ("Name", "Value"), + _line_generator, + ) -class ScalarConnector(Connector, _ConnectorData): +class ScalarConnector(Connector, _ConnectorData): def __init__(self, *args, **kwd): _ConnectorData.__init__(self, component=self) Connector.__init__(self, *args, **kwd) self._index = UnindexedComponent_index - # - # Since this class derives from Component and Component.__getstate__ - # just packs up the entire __dict__ into the state dict, we do not - # need to define the __getstate__ or __setstate__ methods. - # We just defer to the super() get/set state. Since all of our - # get/set state methods rely on super() to traverse the MRO, this - # will automatically pick up both the Component and Data base classes. - # - class SimpleConnector(metaclass=RenamedClass): __renamed__new_class__ = ScalarConnector @@ -284,5 +275,5 @@ class SimpleConnector(metaclass=RenamedClass): class IndexedConnector(Connector): """An array of connectors""" - pass + pass diff --git a/pyomo/core/base/constraint.py b/pyomo/core/base/constraint.py index 2b5ace99dc0..8316088dbf9 100644 --- a/pyomo/core/base/constraint.py +++ b/pyomo/core/base/constraint.py @@ -1,4 +1,3 @@ - # ___________________________________________________________________________ # # Pyomo: Python Optimization Modeling Objects @@ -10,8 +9,13 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ['Constraint', '_ConstraintData', 'ConstraintList', - 'simple_constraint_rule', 'simple_constraintlist_rule'] +__all__ = [ + 'Constraint', + '_ConstraintData', + 'ConstraintList', + 'simple_constraint_rule', + 'simple_constraintlist_rule', +] import io import sys @@ -26,20 +30,34 @@ from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET from pyomo.common.timing import ConstructionTimer -from pyomo.core.expr import logical_expr + from pyomo.core.expr.numvalue import ( - NumericValue, value, as_numeric, is_fixed, native_numeric_types, + NumericValue, + value, + as_numeric, + is_fixed, + native_numeric_types, native_types, ) +from pyomo.core.expr import ( + ExpressionType, + EqualityExpression, + InequalityExpression, + RangedExpression, +) from pyomo.core.base.component import ActiveComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.indexed_component import ( - ActiveIndexedComponent, UnindexedComponent_set, rule_wrapper, + ActiveIndexedComponent, + UnindexedComponent_set, + rule_wrapper, ) from pyomo.core.base.set import Set from pyomo.core.base.disable_methods import disable_methods from pyomo.core.base.initializer import ( - Initializer, IndexedCallInitializer, CountedCallInitializer, + Initializer, + IndexedCallInitializer, + CountedCallInitializer, ) @@ -47,6 +65,11 @@ _inf = float('inf') _nonfinite_values = {_inf, -_inf} +_known_relational_expressions = { + EqualityExpression, + InequalityExpression, + RangedExpression, +} _rule_returned_none_error = """Constraint '%s': rule returned None. Constraint rules must return either a valid expression, a 2- or 3-member @@ -55,6 +78,7 @@ forgetting to include the "return" statement at the end of your rule. """ + def simple_constraint_rule(rule): """ This is a decorator that translates None/True/False return @@ -70,11 +94,15 @@ def C_rule(model, i, j): model.c = Constraint(rule=simple_constraint_rule(...)) """ - return rule_wrapper(rule, { - None: Constraint.Skip, - True: Constraint.Feasible, - False: Constraint.Infeasible, - }) + return rule_wrapper( + rule, + { + None: Constraint.Skip, + True: Constraint.Feasible, + False: Constraint.Infeasible, + }, + ) + def simple_constraintlist_rule(rule): """ @@ -91,16 +119,21 @@ def C_rule(model, i, j): model.c = ConstraintList(expr=simple_constraintlist_rule(...)) """ - return rule_wrapper(rule, { - None: ConstraintList.End, - True: Constraint.Feasible, - False: Constraint.Infeasible, - }) + return rule_wrapper( + rule, + { + None: ConstraintList.End, + True: Constraint.Feasible, + False: Constraint.Infeasible, + }, + ) + # # This class is a pure interface # + class _ConstraintData(ActiveComponentData): """ This class defines the data for a single constraint. @@ -139,8 +172,7 @@ def __init__(self, component=None): # - _ConstraintData, # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._active = True @@ -280,15 +312,14 @@ class _GeneralConstraintData(_ConstraintData): __slots__ = ('_body', '_lower', '_upper', '_expr') - def __init__(self, expr=None, component=None): + def __init__(self, expr=None, component=None): # # These lines represent in-lining of the # following constructors: # - _ConstraintData, # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._active = True self._body = None @@ -298,18 +329,6 @@ def __init__(self, expr=None, component=None): if expr is not None: self.set_value(expr) - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - result = super(_GeneralConstraintData, self).__getstate__() - for i in _GeneralConstraintData.__slots__: - result[i] = getattr(self, i) - return result - - # Since this class requires no special processing of the state - # dictionary, it does not need to implement __setstate__() - # # Abstract Interface # @@ -340,8 +359,8 @@ def _lb(self): raise ValueError( "Constraint '%s' is a Ranged Inequality with a " "variable %s bound. Cannot normalize the " - "constraint or send it to a solver." - % (self.name, 'lower')) + "constraint or send it to a solver." % (self.name, 'lower') + ) return bound def _ub(self): @@ -355,8 +374,8 @@ def _ub(self): raise ValueError( "Constraint '%s' is a Ranged Inequality with a " "variable %s bound. Cannot normalize the " - "constraint or send it to a solver." - % (self.name, 'upper')) + "constraint or send it to a solver." % (self.name, 'upper') + ) return bound @property @@ -400,7 +419,8 @@ def lb(self): else: raise ValueError( "Constraint '%s' created with an invalid non-finite " - "lower bound (%s)." % (self.name, bound)) + "lower bound (%s)." % (self.name, bound) + ) return bound @property @@ -416,15 +436,16 @@ def ub(self): else: raise ValueError( "Constraint '%s' created with an invalid non-finite " - "upper bound (%s)." % (self.name, bound)) + "upper bound (%s)." % (self.name, bound) + ) return bound @property def equality(self): """A boolean indicating whether this is an equality constraint.""" - if self._expr.__class__ is logical_expr.EqualityExpression: + if self._expr.__class__ is EqualityExpression: return True - elif self._expr.__class__ is logical_expr.RangedExpression: + elif self._expr.__class__ is RangedExpression: # TODO: this is a very restrictive form of structural equality. lb = self._expr.arg(0) if lb is not None and lb is self._expr.arg(2): @@ -455,29 +476,23 @@ def set_value(self, expr): # Clear any previously-cached normalized constraint self._lower = self._upper = self._body = self._expr = None - _expr_type = expr.__class__ - if hasattr(expr, 'is_relational'): - if not expr.is_relational(): - raise ValueError( - "Constraint '%s' does not have a proper " - "value. Found '%s'\nExpecting a tuple or " - "equation. Examples:" - "\n sum(model.costs) == model.income" - "\n (0, model.price[item], 50)" - % (self.name, str(expr))) + if expr.__class__ in _known_relational_expressions: self._expr = expr - - elif _expr_type is tuple: # or expr_type is list: + elif expr.__class__ is tuple: # or expr_type is list: for arg in expr: - if arg is None or arg.__class__ in native_numeric_types \ - or isinstance(arg, NumericValue): + if ( + arg is None + or arg.__class__ in native_numeric_types + or isinstance(arg, NumericValue) + ): continue raise ValueError( "Constraint '%s' does not have a proper value. " "Constraint expressions expressed as tuples must " "contain native numeric types or Pyomo NumericValue " "objects. Tuple %s contained invalid type, %s" - % (self.name, expr, arg.__class__.__name__)) + % (self.name, expr, arg.__class__.__name__) + ) if len(expr) == 2: # # Form equality expression @@ -486,21 +501,19 @@ def set_value(self, expr): raise ValueError( "Constraint '%s' does not have a proper value. " "Equality Constraints expressed as 2-tuples " - "cannot contain None [received %s]" - % (self.name, expr,)) - self._expr = logical_expr.EqualityExpression(expr) + "cannot contain None [received %s]" % (self.name, expr) + ) + self._expr = EqualityExpression(expr) elif len(expr) == 3: # # Form (ranged) inequality expression # if expr[0] is None: - self._expr = logical_expr.InequalityExpression( - expr[1:], False) + self._expr = InequalityExpression(expr[1:], False) elif expr[2] is None: - self._expr = logical_expr.InequalityExpression( - expr[:2], False) + self._expr = InequalityExpression(expr[:2], False) else: - self._expr = logical_expr.RangedExpression(expr, False) + self._expr = RangedExpression(expr, False) else: raise ValueError( "Constraint '%s' does not have a proper value. " @@ -508,11 +521,12 @@ def set_value(self, expr): "length 2 or 3:\n" " Equality: (left, right)\n" " Inequality: (lower, expression, upper)" - % (self.name, len(expr))) + % (self.name, len(expr)) + ) # # Ignore an 'empty' constraint # - elif _expr_type is type: + elif expr.__class__ is type: del self.parent_component()[self.index()] if expr is Constraint.Skip: return @@ -521,76 +535,92 @@ def set_value(self, expr): # could be useful in the case of GDP where certain # disjuncts are trivially infeasible, but we would still # like to express the disjunction. - #del self.parent_component()[self.index()] - raise ValueError( - "Constraint '%s' is always infeasible" - % (self.name,) ) + # del self.parent_component()[self.index()] + raise ValueError("Constraint '%s' is always infeasible" % (self.name,)) else: raise ValueError( "Constraint '%s' does not have a proper " "value. Found '%s'\nExpecting a tuple or " - "equation. Examples:" + "relational expression. Examples:" "\n sum(model.costs) == model.income" - "\n (0, model.price[item], 50)" - % (self.name, str(expr))) + "\n (0, model.price[item], 50)" % (self.name, str(expr)) + ) elif expr is None: raise ValueError(_rule_returned_none_error % (self.name,)) - elif _expr_type is bool: + elif expr.__class__ is bool: raise ValueError( "Invalid constraint expression. The constraint " "expression resolved to a trivial Boolean (%s) " "instead of a Pyomo object. Please modify your " "rule to return Constraint.%s instead of %s." "\n\nError thrown for Constraint '%s'" - % (expr, "Feasible" if expr else "Infeasible", - expr, self.name)) + % (expr, "Feasible" if expr else "Infeasible", expr, self.name) + ) else: - msg = ("Constraint '%s' does not have a proper " - "value. Found '%s'\nExpecting a tuple or " - "equation. Examples:" - "\n sum(model.costs) == model.income" - "\n (0, model.price[item], 50)" - % (self.name, str(expr))) - raise ValueError(msg) + try: + if expr.is_expression_type(ExpressionType.RELATIONAL): + self._expr = expr + except AttributeError: + pass + if self._expr is None: + msg = ( + "Constraint '%s' does not have a proper " + "value. Found '%s'\nExpecting a tuple or " + "relational expression. Examples:" + "\n sum(model.costs) == model.income" + "\n (0, model.price[item], 50)" % (self.name, str(expr)) + ) + raise ValueError(msg) # # Normalize the incoming expressions, if we can # args = self._expr.args - if self._expr.__class__ is logical_expr.InequalityExpression: + if self._expr.__class__ is InequalityExpression: if self._expr.strict: raise ValueError( "Constraint '%s' encountered a strict " "inequality expression ('>' or '< '). All" " constraints must be formulated using " - "using '<=', '>=', or '=='." - % (self.name,)) - if args[1] is None or args[1].__class__ in native_numeric_types \ - or not args[1].is_potentially_variable(): + "using '<=', '>=', or '=='." % (self.name,) + ) + if ( + args[1] is None + or args[1].__class__ in native_numeric_types + or not args[1].is_potentially_variable() + ): self._body = args[0] self._upper = args[1] - elif args[0] is None or args[0].__class__ in native_numeric_types \ - or not args[0].is_potentially_variable(): + elif ( + args[0] is None + or args[0].__class__ in native_numeric_types + or not args[0].is_potentially_variable() + ): self._lower = args[0] self._body = args[1] else: self._body = args[0] - args[1] self._upper = 0 - elif self._expr.__class__ is logical_expr.EqualityExpression: + elif self._expr.__class__ is EqualityExpression: if args[0] is None or args[1] is None: # Error check: ensure equality does not have infinite RHS raise ValueError( "Equality constraint '%s' defined with " - "non-finite term (%sHS == None)." % ( - self.name, 'L' if args[0] is None else 'R')) - if args[0].__class__ in native_numeric_types or \ - not args[0].is_potentially_variable(): + "non-finite term (%sHS == None)." + % (self.name, 'L' if args[0] is None else 'R') + ) + if ( + args[0].__class__ in native_numeric_types + or not args[0].is_potentially_variable() + ): self._lower = self._upper = args[0] self._body = args[1] - elif args[1].__class__ in native_numeric_types or \ - not args[1].is_potentially_variable(): + elif ( + args[1].__class__ in native_numeric_types + or not args[1].is_potentially_variable() + ): self._lower = self._upper = args[1] self._body = args[0] else: @@ -604,25 +634,31 @@ def set_value(self, expr): # raise ValueError( # "Equality constraint '%s' defined with " # "non-finite term." % (self.name)) - elif self._expr.__class__ is logical_expr.RangedExpression: + elif self._expr.__class__ is RangedExpression: if any(self._expr.strict): raise ValueError( "Constraint '%s' encountered a strict " "inequality expression ('>' or '< '). All" - " constraints must be formulated using " - "using '<=', '>=', or '=='." - % (self.name,)) - if all(( arg is None or - arg.__class__ in native_numeric_types or - not arg.is_potentially_variable() ) - for arg in (args[0], args[2])): + " constraints must be formulated using " + "using '<=', '>=', or '=='." % (self.name,) + ) + if all( + ( + arg is None + or arg.__class__ in native_numeric_types + or not arg.is_potentially_variable() + ) + for arg in (args[0], args[2]) + ): self._lower, self._body, self._upper = args else: # Defensive programming: we currently only support three # relational expression types. This will only be hit if # someone defines a fourth... - raise DeveloperError("Unrecognized relational expression type: %s" - % (self._expr.__class__.__name__,)) + raise DeveloperError( + "Unrecognized relational expression type: %s" + % (self._expr.__class__.__name__,) + ) # We have historically forced the body to be a numeric expression. # TODO: remove this requirement @@ -639,7 +675,8 @@ def set_value(self, expr): else: raise ValueError( "Constraint '%s' created with an invalid non-finite " - "lower bound (%s)." % (self.name, self._lower)) + "lower bound (%s)." % (self.name, self._lower) + ) if self._upper.__class__ in native_numeric_types: bound = self._upper if bound in _nonfinite_values or bound != bound: @@ -649,7 +686,8 @@ def set_value(self, expr): else: raise ValueError( "Constraint '%s' created with an invalid non-finite " - "upper bound (%s)." % (self.name, self._upper)) + "upper bound (%s)." % (self.name, self._upper) + ) @ModelComponentFactory.register("General constraint expressions.") @@ -697,7 +735,10 @@ class Constraint(ActiveIndexedComponent): """ _ComponentDataClass = _GeneralConstraintData - class Infeasible(object): pass + + class Infeasible(object): + pass + Feasible = ActiveIndexedComponent.Skip NoConstraint = ActiveIndexedComponent.Skip Violated = Infeasible @@ -706,17 +747,17 @@ class Infeasible(object): pass def __new__(cls, *args, **kwds): if cls != Constraint: return super(Constraint, cls).__new__(cls) - if not args or (args[0] is UnindexedComponent_set and len(args)==1): + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): return super(Constraint, cls).__new__(AbstractScalarConstraint) else: return super(Constraint, cls).__new__(IndexedConstraint) - + @overload - def __init__(self, *indexes, expr=None, rule=None, name=None, doc=None): ... - + def __init__(self, *indexes, expr=None, rule=None, name=None, doc=None): + ... + def __init__(self, *args, **kwargs): - _init = self._pop_from_kwargs( - 'Constraint', kwargs, ('rule', 'expr'), None) + _init = self._pop_from_kwargs('Constraint', kwargs, ('rule', 'expr'), None) # Special case: we accept 2- and 3-tuples as constraints if type(_init) is tuple: self.rule = Initializer(_init, treat_sequences_as_mappings=False) @@ -732,7 +773,7 @@ def construct(self, data=None): """ if self._constructed: return - self._constructed=True + self._constructed = True timer = ConstructionTimer(self) if is_debug_set(logger): @@ -751,8 +792,8 @@ def construct(self, data=None): if rule.constant() and self.is_indexed(): raise IndexError( "Constraint '%s': Cannot initialize multiple indices " - "of a constraint with a single expression" % - (self.name,) ) + "of a constraint with a single expression" % (self.name,) + ) block = self.parent_block() if rule.contains_indices(): @@ -775,10 +816,8 @@ def construct(self, data=None): logger.error( "Rule failed when generating expression for " "Constraint %s with index %s:\n%s: %s" - % (self.name, - str(index), - type(err).__name__, - err)) + % (self.name, str(index), type(err).__name__, err) + ) raise finally: timer.report() @@ -786,8 +825,7 @@ def construct(self, data=None): def _getitem_when_not_present(self, idx): if self.rule is None: raise KeyError(idx) - con = self._setitem_when_not_present( - idx, self.rule(self.parent_block(), idx)) + con = self._setitem_when_not_present(idx, self.rule(self.parent_block(), idx)) if con is None: raise KeyError(idx) return con @@ -797,18 +835,20 @@ def _pprint(self): Return data that will be printed for this component. """ return ( - [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ("Active", self.active), - ], + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ("Active", self.active), + ], self.items(), - ( "Lower","Body","Upper","Active" ), - lambda k, v: [ "-Inf" if v.lower is None else v.lower, - v.body, - "+Inf" if v.upper is None else v.upper, - v.active, - ] - ) + ("Lower", "Body", "Upper", "Active"), + lambda k, v: [ + "-Inf" if v.lower is None else v.lower, + v.body, + "+Inf" if v.upper is None else v.upper, + v.active, + ], + ) def display(self, prefix="", ostream=None): """ @@ -820,19 +860,22 @@ def display(self, prefix="", ostream=None): return if ostream is None: ostream = sys.stdout - tab=" " - ostream.write(prefix+self.local_name+" : ") - ostream.write("Size="+str(len(self))) + tab = " " + ostream.write(prefix + self.local_name + " : ") + ostream.write("Size=" + str(len(self))) ostream.write("\n") - tabular_writer( ostream, prefix+tab, - ((k,v) for k,v in self._data.items() if v.active), - ( "Lower","Body","Upper" ), - lambda k, v: [ - value(v.lower, exception=False), - value(v.body, exception=False), - value(v.upper, exception=False), - ]) + tabular_writer( + ostream, + prefix + tab, + ((k, v) for k, v in self._data.items() if v.active), + ("Lower", "Body", "Upper"), + lambda k, v: [ + value(v.lower, exception=False), + value(v.body, exception=False), + value(v.upper, exception=False), + ], + ) class ScalarConstraint(_GeneralConstraintData, Constraint): @@ -846,16 +889,6 @@ def __init__(self, *args, **kwds): Constraint.__init__(self, *args, **kwds) self._index = UnindexedComponent_index - # - # Since this class derives from Component and - # Component.__getstate__ just packs up the entire __dict__ into - # the state dict, we do not need to define the __getstate__ or - # __setstate__ methods. We just defer to the super() get/set - # state. Since all of our get/set state methods rely on super() - # to traverse the MRO, this will automatically pick up both the - # Component and Data base classes. - # - # # Singleton constraints are strange in that we want them to be # both be constructed but have len() == 0 when not initialized with @@ -874,7 +907,8 @@ def body(self): "Accessing the body of ScalarConstraint " "'%s' before the Constraint has been assigned " "an expression. There is currently " - "nothing to access." % (self.name)) + "nothing to access." % (self.name) + ) return _GeneralConstraintData.body.fget(self) @property @@ -885,7 +919,8 @@ def lower(self): "Accessing the lower bound of ScalarConstraint " "'%s' before the Constraint has been assigned " "an expression. There is currently " - "nothing to access." % (self.name)) + "nothing to access." % (self.name) + ) return _GeneralConstraintData.lower.fget(self) @property @@ -896,7 +931,8 @@ def upper(self): "Accessing the upper bound of ScalarConstraint " "'%s' before the Constraint has been assigned " "an expression. There is currently " - "nothing to access." % (self.name)) + "nothing to access." % (self.name) + ) return _GeneralConstraintData.upper.fget(self) @property @@ -907,7 +943,8 @@ def equality(self): "Accessing the equality flag of ScalarConstraint " "'%s' before the Constraint has been assigned " "an expression. There is currently " - "nothing to access." % (self.name)) + "nothing to access." % (self.name) + ) return _GeneralConstraintData.equality.fget(self) @property @@ -918,7 +955,8 @@ def strict_lower(self): "Accessing the strict_lower flag of ScalarConstraint " "'%s' before the Constraint has been assigned " "an expression. There is currently " - "nothing to access." % (self.name)) + "nothing to access." % (self.name) + ) return _GeneralConstraintData.strict_lower.fget(self) @property @@ -929,7 +967,8 @@ def strict_upper(self): "Accessing the strict_upper flag of ScalarConstraint " "'%s' before the Constraint has been assigned " "an expression. There is currently " - "nothing to access." % (self.name)) + "nothing to access." % (self.name) + ) return _GeneralConstraintData.strict_upper.fget(self) def clear(self): @@ -950,8 +989,8 @@ def add(self, index, expr): if index is not None: raise ValueError( "ScalarConstraint object '%s' does not accept " - "index values other than None. Invalid value: %s" - % (self.name, index)) + "index values other than None. Invalid value: %s" % (self.name, index) + ) self.set_value(expr) return self @@ -961,8 +1000,18 @@ class SimpleConstraint(metaclass=RenamedClass): __renamed__version__ = '6.0' -@disable_methods({'add', 'set_value', 'body', 'lower', 'upper', 'equality', - 'strict_lower', 'strict_upper'}) +@disable_methods( + { + 'add', + 'set_value', + 'body', + 'lower', + 'upper', + 'equality', + 'strict_lower', + 'strict_upper', + } +) class AbstractScalarConstraint(ScalarConstraint): pass @@ -973,7 +1022,6 @@ class AbstractSimpleConstraint(metaclass=RenamedClass): class IndexedConstraint(Constraint): - # # Leaving this method for backward compatibility reasons # @@ -994,30 +1042,27 @@ class ConstraintList(IndexedConstraint): added an index value is not specified. """ - class End(object): pass + class End(object): + pass def __init__(self, **kwargs): """Constructor""" if 'expr' in kwargs: - raise ValueError( - "ConstraintList does not accept the 'expr' keyword") + raise ValueError("ConstraintList does not accept the 'expr' keyword") _rule = kwargs.pop('rule', None) self._starting_index = kwargs.pop('starting_index', 1) args = (Set(dimen=1),) super(ConstraintList, self).__init__(*args, **kwargs) - self.rule = Initializer(_rule, - treat_sequences_as_mappings=False, - allow_generators=True) + self.rule = Initializer( + _rule, treat_sequences_as_mappings=False, allow_generators=True + ) # HACK to make the "counted call" syntax work. We wait until # after the base class is set up so that is_indexed() is # reliable. if self.rule is not None and type(self.rule) is IndexedCallInitializer: - self.rule = CountedCallInitializer( - self, self.rule, self._starting_index - ) - + self.rule = CountedCallInitializer(self, self.rule, self._starting_index) def construct(self, data=None): """ @@ -1025,11 +1070,10 @@ def construct(self, data=None): """ if self._constructed: return - self._constructed=True + self._constructed = True if is_debug_set(logger): - logger.debug("Constructing constraint list %s" - % (self.name)) + logger.debug("Constructing constraint list %s" % (self.name)) self.index_set().construct() @@ -1042,10 +1086,8 @@ def construct(self, data=None): continue self.add(cc) - def add(self, expr): """Add a constraint with an implicit index.""" next_idx = len(self._index_set) + self._starting_index self._index_set.add(next_idx) return self.__setitem__(next_idx, expr) - diff --git a/pyomo/core/base/disable_methods.py b/pyomo/core/base/disable_methods.py index ba2ff555a4d..61d63d0a385 100644 --- a/pyomo/core/base/disable_methods.py +++ b/pyomo/core/base/disable_methods.py @@ -34,6 +34,7 @@ "%s.construct()." ) + def _disable_method(fcn, msg=None, exception=RuntimeError): _name = fcn.__name__ if msg is None: @@ -42,7 +43,7 @@ def _disable_method(fcn, msg=None, exception=RuntimeError): # functools.wraps doesn't preserve the function signature until # Python 3.4, and even then, does not preserve it accurately (e.g., # calling with the incorrect number of arguments does not generate - # an error). For backwards compatability with Python 2.x, we will + # an error). For backwards compatibility with Python 2.x, we will # create a temporary (local) function using exec that matches the # function signature passed in and raises an exception sig = inspect.signature(fcn) @@ -71,7 +72,12 @@ def _disable_method(fcn, msg=None, exception=RuntimeError): _funcdef = """def %s%s: raise %s("%s" %% (_msg, type(self).__name__, self.name, _name, self.name)) -""" % (_name, args, exception.__name__, _disabled_error) +""" % ( + _name, + args, + exception.__name__, + _disabled_error, + ) exec(_funcdef, _env) return functools.wraps(fcn)(_env[_name]) @@ -95,6 +101,7 @@ def _disable_property(fcn, msg=None, exception=RuntimeError): return property(fget=getter, fset=setter, doc=fcn.__doc__) + def disable_methods(methods): """Class decorator to disable methods before construct is called. @@ -105,8 +112,9 @@ def disable_methods(methods): restored. This prevents most class methods from having to begin with "`if not self.parent_component()._constructed: raise RuntimeError`" """ + def class_decorator(cls): - assert(len(cls.__bases__) == 1) + assert len(cls.__bases__) == 1 base = cls.__bases__[0] def construct(self, data=None): @@ -114,6 +122,7 @@ def construct(self, data=None): self._name = base.__name__ self.__class__ = base return base.construct(self, data) + construct.__doc__ = base.construct.__doc__ cls.construct = construct @@ -128,7 +137,8 @@ def construct(self, data=None): if not hasattr(base, method): raise DeveloperError( "Cannot disable method %s on %s: not present on base class" - % (method, cls)) + % (method, cls) + ) base_method = getattr(base, method) if type(base_method) is property: setattr(cls, method, _disable_property(base_method, msg, exc)) diff --git a/pyomo/core/base/enums.py b/pyomo/core/base/enums.py new file mode 100644 index 00000000000..35cca4e2ac4 --- /dev/null +++ b/pyomo/core/base/enums.py @@ -0,0 +1,96 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import enum +import sys + +if sys.version_info[:2] >= (3, 11): + strictEnum = {'boundary': enum.STRICT} +else: + strictEnum = {} + + +class TraversalStrategy(enum.Enum, **strictEnum): + BreadthFirstSearch = 1 + PrefixDepthFirstSearch = 2 + PostfixDepthFirstSearch = 3 + # aliases + BFS = BreadthFirstSearch + ParentLastDepthFirstSearch = PostfixDepthFirstSearch + PostfixDFS = PostfixDepthFirstSearch + ParentFirstDepthFirstSearch = PrefixDepthFirstSearch + PrefixDFS = PrefixDepthFirstSearch + DepthFirstSearch = PrefixDepthFirstSearch + DFS = DepthFirstSearch + + +class SortComponents(enum.Flag, **strictEnum): + + """ + This class is a convenient wrapper for specifying various sort + ordering. We pass these objects to the "sort" argument to various + accessors / iterators to control how much work we perform sorting + the resultant list. The idea is that + "sort=SortComponents.deterministic" is more descriptive than + "sort=True". + """ + + UNSORTED = 0 + # Note: skip '1' so that we can map True to something other than 1 + ORDERED_INDICES = 2 + SORTED_INDICES = 4 + ALPHABETICAL = 8 + + # aliases + # TODO: deprecate some of these + unsorted = UNSORTED + indices = SORTED_INDICES + declOrder = UNSORTED + declarationOrder = declOrder + alphaOrder = ALPHABETICAL + alphabeticalOrder = alphaOrder + alphabetical = alphaOrder + # both alpha and decl orders are deterministic, so only must sort indices + deterministic = indices + sortBoth = indices | alphabeticalOrder # Same as True + alphabetizeComponentAndIndex = sortBoth + + @classmethod + def _missing_(cls, value): + if type(value) is bool: + if value: + return cls.SORTED_INDICES | cls.ALPHABETICAL + else: + return cls.UNSORTED + elif value is None: + return cls.UNSORTED + return super()._missing_(value) + + @staticmethod + def default(): + return SortComponents.UNSORTED + + @staticmethod + def sorter(sort_by_names=False, sort_by_keys=False): + sort = SortComponents.default() + if sort_by_names: + sort |= SortComponents.ALPHABETICAL + if sort_by_keys: + sort |= SortComponents.SORTED_INDICES + return sort + + @staticmethod + def sort_names(flag): + return SortComponents.ALPHABETICAL in SortComponents(flag) + + @staticmethod + def sort_indices(flag): + return SortComponents.SORTED_INDICES in SortComponents(flag) diff --git a/pyomo/core/base/expression.py b/pyomo/core/base/expression.py index 930acc48ae8..df9abf0a5a5 100644 --- a/pyomo/core/base/expression.py +++ b/pyomo/core/base/expression.py @@ -21,21 +21,25 @@ from pyomo.common.modeling import NOTSET from pyomo.common.formatting import tabular_writer from pyomo.common.timing import ConstructionTimer - +from pyomo.common.numeric_types import ( + native_types, + native_numeric_types, + check_if_numeric_type, +) + +import pyomo.core.expr as EXPR +import pyomo.core.expr.numeric_expr as numeric_expr from pyomo.core.base.component import ComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index -from pyomo.core.base.indexed_component import ( - IndexedComponent, - UnindexedComponent_set, ) +from pyomo.core.base.indexed_component import IndexedComponent, UnindexedComponent_set from pyomo.core.base.misc import apply_indexed_rule -from pyomo.core.base.numvalue import (NumericValue, - as_numeric) +from pyomo.core.expr.numvalue import as_numeric from pyomo.core.base.initializer import Initializer logger = logging.getLogger('pyomo.core') -class _ExpressionData(NumericValue): +class _ExpressionData(numeric_expr.NumericValue): """ An object that defines a named expression. @@ -45,50 +49,46 @@ class _ExpressionData(NumericValue): __slots__ = () + EXPRESSION_SYSTEM = EXPR.ExpressionType.NUMERIC + PRECEDENCE = 0 + ASSOCIATIVITY = EXPR.OperatorAssociativity.NON_ASSOCIATIVE + # # Interface # def __call__(self, exception=True): """Compute the value of this expression.""" - if self.expr is None: - return None - return self.expr(exception=exception) + (arg,) = self._args_ + if arg.__class__ in native_types: + # Note: native_types includes NoneType + return arg + return arg(exception=exception) def is_named_expression_type(self): """A boolean indicating whether this in a named expression.""" return True - def is_expression_type(self): + def is_expression_type(self, expression_system=None): """A boolean indicating whether this in an expression.""" - return True + return expression_system is None or expression_system == self.EXPRESSION_SYSTEM def arg(self, index): - if index < 0 or index >= 1: + if index != 0: raise KeyError("Invalid index for expression argument: %d" % index) - return self.expr - - @property - def _args_(self): - return (self.expr,) + return self._args_[0] @property def args(self): - return (self.expr,) + return self._args_ def nargs(self): return 1 - def _precedence(self): - return 0 - - def _associativity(self): - return 0 - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): if verbose: return "%s{%s}" % (str(self), values[0]) - if self.expr is None: + if self._args_[0] is None: return "%s{None}" % str(self) return values[0] @@ -103,6 +103,8 @@ def _apply_operation(self, result): def polynomial_degree(self): """A tuple of subexpressions involved in this expressions operation.""" + if self._args_[0] is None: + return None return self.expr.polynomial_degree() def _compute_polynomial_degree(self, result): @@ -117,8 +119,14 @@ def _is_fixed(self, values): @property def expr(self): - """Return expression on this expression.""" - raise NotImplementedError + (arg,) = self._args_ + if arg is None: + return None + return as_numeric(arg) + + @expr.setter + def expr(self, value): + self.set_value(value) def set_value(self, expr): """Set the expression on this expression.""" @@ -150,12 +158,10 @@ class _GeneralExpressionDataImpl(_ExpressionData): expr The expression owned by this data. """ - __pickle_slots__ = ('_expr',) - __slots__ = () def __init__(self, expr=None): - self._expr = as_numeric(expr) if (expr is not None) else None + self._args_ = (expr,) def create_node_with_local_data(self, values): """ @@ -167,49 +173,31 @@ def create_node_with_local_data(self, values): """ obj = ScalarExpression() obj.construct() - obj.expr = values[0] + obj._args_ = values return obj - def __getstate__(self): - state = super(_GeneralExpressionDataImpl, self).__getstate__() - for i in _GeneralExpressionDataImpl.__pickle_slots__: - state[i] = getattr(self, i) - return state - - # Note: because NONE of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ - # method. - # # Abstract Interface # - @property - def expr(self): - """Return expression on this expression.""" - return self._expr - - @expr.setter - def expr(self, expr): - self.set_value(expr) - def set_value(self, expr): """Set the expression on this expression.""" - if expr is None: - self._expr = None + if expr is None or expr.__class__ in native_numeric_types: + self._args_ = (expr,) return - expr = as_numeric(expr) - # In-place operators will leave self as an argument. We need to - # replace that with the current expression in order to avoid - # loops in the expression tree. - if expr.is_expression_type(): - _args = expr.args - if any(arg is self for arg in _args): - new_args = _args.__class__( - arg.expr if arg is self else arg for arg in _args - ) - expr = expr.create_node_with_local_data(new_args) - self._expr = expr + try: + if expr.is_numeric_type(): + self._args_ = (expr,) + return + except AttributeError: + if check_if_numeric_type(expr): + self._args_ = (expr,) + return + raise ValueError( + f"Cannot assign {expr.__class__.__name__} to " + f"'{self.name}': {self.__class__.__name__} components only " + "allow numeric expression types." + ) def is_constant(self): """A boolean indicating whether this expression is constant.""" @@ -219,10 +207,38 @@ def is_constant(self): def is_fixed(self): """A boolean indicating whether this expression is fixed.""" - return self._expr.is_fixed() + (e,) = self._args_ + return e.__class__ in native_types or e.is_fixed() + + # Override the in-place operators here so that we can redirect the + # dispatcher based on the current contained expression type and not + # this Expression object (which would map to "other") -class _GeneralExpressionData(_GeneralExpressionDataImpl, - ComponentData): + def __iadd__(self, other): + (e,) = self._args_ + return numeric_expr._add_dispatcher[e.__class__, other.__class__](e, other) + + # Note: the default implementation of __isub__ leverages __iadd__ + # and doesn't need to be reimplemented here + + def __imul__(self, other): + (e,) = self._args_ + return numeric_expr._mul_dispatcher[e.__class__, other.__class__](e, other) + + def __idiv__(self, other): + (e,) = self._args_ + return numeric_expr._div_dispatcher[e.__class__, other.__class__](e, other) + + def __itruediv__(self, other): + (e,) = self._args_ + return numeric_expr._div_dispatcher[e.__class__, other.__class__](e, other) + + def __ipow__(self, other): + (e,) = self._args_ + return numeric_expr._pow_dispatcher[e.__class__, other.__class__](e, other) + + +class _GeneralExpressionData(_GeneralExpressionDataImpl, ComponentData): """ An object that defines an expression that is never cloned @@ -237,18 +253,18 @@ class _GeneralExpressionData(_GeneralExpressionDataImpl, _component The expression component. """ - __slots__ = _GeneralExpressionDataImpl.__pickle_slots__ + __slots__ = ('_args_',) def __init__(self, expr=None, component=None): _GeneralExpressionDataImpl.__init__(self, expr) # Inlining ComponentData.__init__ - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET @ModelComponentFactory.register( - "Named expressions that can be used in other expressions.") + "Named expressions that can be used in other expressions." +) class Expression(IndexedComponent): """ A shared expression container, which may be defined over a index. @@ -269,18 +285,21 @@ class Expression(IndexedComponent): def __new__(cls, *args, **kwds): if cls != Expression: return super(Expression, cls).__new__(cls) - if not args or (args[0] is UnindexedComponent_set and len(args)==1): + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): return ScalarExpression.__new__(ScalarExpression) else: return IndexedExpression.__new__(IndexedExpression) @overload - def __init__(self, *indexes, rule=None, expr=None, initialize=None, - name=None, doc=None): ... + def __init__( + self, *indexes, rule=None, expr=None, initialize=None, name=None, doc=None + ): + ... def __init__(self, *args, **kwds): _init = self._pop_from_kwargs( - 'Expression', kwds, ('rule', 'expr', 'initialize'), None) + 'Expression', kwds, ('rule', 'expr', 'initialize'), None + ) # Historically, Expression objects were dense (but None): # setting arg_not_specified causes Initializer to recognize # _init==None as a constant initializer returning None @@ -295,15 +314,14 @@ def __init__(self, *args, **kwds): def _pprint(self): return ( - [('Size', len(self)), - ('Index', None if (not self.is_indexed()) - else self._index_set) - ], + [ + ('Size', len(self)), + ('Index', None if (not self.is_indexed()) else self._index_set), + ], self.items(), ("Expression",), - lambda k,v: \ - ["Undefined" if v.expr is None else v.expr] - ) + lambda k, v: ["Undefined" if v.expr is None else v.expr], + ) def display(self, prefix="", ostream=None): """TODO""" @@ -311,18 +329,18 @@ def display(self, prefix="", ostream=None): return if ostream is None: ostream = sys.stdout - tab=" " - ostream.write(prefix+self.local_name+" : ") - ostream.write("Size="+str(len(self))) + tab = " " + ostream.write(prefix + self.local_name + " : ") + ostream.write("Size=" + str(len(self))) ostream.write("\n") tabular_writer( ostream, - prefix+tab, - ((k,v) for k,v in self._data.items()), - ( "Value", ), - lambda k, v: \ - ["Undefined" if v.expr is None else v()]) + prefix + tab, + ((k, v) for k, v in self._data.items()), + ("Value",), + lambda k, v: ["Undefined" if v.expr is None else v()], + ) # # A utility to extract all index-value pairs defining this @@ -331,8 +349,7 @@ def display(self, prefix="", ostream=None): # expensive to extract the contents of an expression. # def extract_values(self): - return {key:expression_data.expr - for key, expression_data in self.items()} + return {key: expression_data.expr for key, expression_data in self.items()} # # takes as input a (index, value) dictionary for updating this @@ -340,13 +357,12 @@ def extract_values(self): # checked through the __getitem__ method of this class. # def store_values(self, new_values): - - if (self.is_indexed() is False) and \ - (not None in new_values): + if (self.is_indexed() is False) and (not None in new_values): raise KeyError( "Cannot store value for scalar Expression" - "="+self.name+"; no value with index " - "None in input new values map.") + "=" + self.name + "; no value with index " + "None in input new values map." + ) for index, new_value in new_values.items(): self._data[index].set_value(new_value) @@ -358,7 +374,7 @@ def _getitem_when_not_present(self, idx): # an Expression if it was not originally defined, but I am less # convinced that implicitly creating an Expression (like what # works with a Var) makes sense. [JDS 25 Nov 17] - #raise KeyError(idx) + # raise KeyError(idx) else: _init = self._rule(self.parent_block(), idx) if _init is Expression.Skip: @@ -366,7 +382,7 @@ def _getitem_when_not_present(self, idx): return self._setitem_when_not_present(idx, _init) def construct(self, data=None): - """ Apply the rule to construct values in this set """ + """Apply the rule to construct values in this set""" if self._constructed: return self._constructed = True @@ -375,7 +391,8 @@ def construct(self, data=None): if is_debug_set(logger): logger.debug( "Constructing Expression, name=%s, from data=%s" - % (self.name, str(data))) + % (self.name, str(data)) + ) try: # We do not (currently) accept data for constructing Constraints @@ -386,37 +403,37 @@ def construct(self, data=None): class ScalarExpression(_GeneralExpressionData, Expression): - def __init__(self, *args, **kwds): _GeneralExpressionData.__init__(self, expr=None, component=self) Expression.__init__(self, *args, **kwds) self._index = UnindexedComponent_index - # - # Since this class derives from Component and - # Component.__getstate__ just packs up the entire __dict__ into - # the state dict, we do not need to define the __getstate__ or - # __setstate__ methods. We just defer to the super() get/set - # state. Since all of our get/set state methods rely on super() - # to traverse the MRO, this will automatically pick up both the - # Component and Data base classes. - # - # # Override abstract interface methods to first check for # construction # + def __call__(self, exception=True): + """Return expression on this expression.""" + if self._constructed: + return super().__call__(exception) + raise ValueError( + "Evaluating the expression of Expression '%s' " + "before the Expression has been constructed (there " + "is currently no value to return)." % (self.name) + ) + @property def expr(self): """Return expression on this expression.""" if self._constructed: return _GeneralExpressionData.expr.fget(self) raise ValueError( - "Accessing the expression of expression '%s' " + "Accessing the expression of Expression '%s' " "before the Expression has been constructed (there " - "is currently no value to return)." - % (self.name)) + "is currently no value to return)." % (self.name) + ) + @expr.setter def expr(self, expr): """Set the expression on this expression.""" @@ -430,30 +447,30 @@ def set_value(self, expr): if self._constructed: return _GeneralExpressionData.set_value(self, expr) raise ValueError( - "Setting the expression of expression '%s' " + "Setting the expression of Expression '%s' " "before the Expression has been constructed (there " - "is currently no object to set)." - % (self.name)) + "is currently no object to set)." % (self.name) + ) def is_constant(self): """A boolean indicating whether this expression is constant.""" if self._constructed: return _GeneralExpressionData.is_constant(self) raise ValueError( - "Accessing the is_constant flag of expression '%s' " + "Accessing the is_constant flag of Expression '%s' " "before the Expression has been constructed (there " - "is currently no value to return)." - % (self.name)) + "is currently no value to return)." % (self.name) + ) def is_fixed(self): """A boolean indicating whether this expression is fixed.""" if self._constructed: return _GeneralExpressionData.is_fixed(self) raise ValueError( - "Accessing the is_fixed flag of expression '%s' " + "Accessing the is_fixed flag of Expression '%s' " "before the Expression has been constructed (there " - "is currently no value to return)." - % (self.name)) + "is currently no value to return)." % (self.name) + ) # # Leaving this method for backward compatibility reasons. @@ -464,14 +481,13 @@ def add(self, index, expr): if index is not None: raise KeyError( "ScalarExpression object '%s' does not accept " - "index values other than None. Invalid value: %s" - % (self.name, index)) - if (type(expr) is tuple) and \ - (expr == Expression.Skip): + "index values other than None. Invalid value: %s" % (self.name, index) + ) + if (type(expr) is tuple) and (expr == Expression.Skip): raise ValueError( "Expression.Skip can not be assigned " - "to an Expression that is not indexed: %s" - % (self.name)) + "to an Expression that is not indexed: %s" % (self.name) + ) self.set_value(expr) return self @@ -482,7 +498,6 @@ class SimpleExpression(metaclass=RenamedClass): class IndexedExpression(Expression): - # # Leaving this method for backward compatibility reasons # Note: It allows adding members outside of self._index_set. @@ -492,10 +507,8 @@ class IndexedExpression(Expression): # def add(self, index, expr): """Add an expression with a given index.""" - if (type(expr) is tuple) and \ - (expr == Expression.Skip): + if (type(expr) is tuple) and (expr == Expression.Skip): return None cdata = _GeneralExpressionData(expr, component=self) self._data[index] = cdata return cdata - diff --git a/pyomo/core/base/external.py b/pyomo/core/base/external.py index e1e9cb97fc5..8157ca4badb 100644 --- a/pyomo/core/base/external.py +++ b/pyomo/core/base/external.py @@ -15,21 +15,38 @@ from pyomo.common.pyomo_typing import overload from ctypes import ( - Structure, POINTER, CFUNCTYPE, cdll, byref, - c_int, c_long, c_ulong, c_double, c_byte, c_char_p, c_void_p ) + Structure, + POINTER, + CFUNCTYPE, + cdll, + byref, + c_int, + c_long, + c_ulong, + c_double, + c_byte, + c_char_p, + c_void_p, +) +from pyomo.common.autoslots import AutoSlots from pyomo.common.fileutils import find_library from pyomo.core.expr.numvalue import ( - native_types, native_numeric_types, pyomo_constant_types, - NonNumericValue, NumericConstant, value + native_types, + native_numeric_types, + pyomo_constant_types, + NonNumericValue, + NumericConstant, + value, ) -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.base.component import Component from pyomo.core.base.units_container import units -__all__ = ( 'ExternalFunction', ) +__all__ = ('ExternalFunction',) logger = logging.getLogger('pyomo.core') +nan = float('nan') class ExternalFunction(Component): @@ -50,23 +67,26 @@ class ExternalFunction(Component): :class:`AMPLExternalFunction` interface. """ + def __new__(cls, *args, **kwargs): if cls is not ExternalFunction: return super().__new__(cls) elif args: return super().__new__(PythonCallbackFunction) elif 'library' not in kwargs and any( - kw in kwargs for kw in ('function', 'fgh')): + kw in kwargs for kw in ('function', 'fgh') + ): return super().__new__(PythonCallbackFunction) else: return super().__new__(AMPLExternalFunction) @overload - def __init__(self, function=None, gradient=None, hessian=None, - *, fgh=None): ... + def __init__(self, function=None, gradient=None, hessian=None, *, fgh=None): + ... @overload - def __init__(self, *, library: str, function: str): ... + def __init__(self, *, library: str, function: str): + ... def __init__(self, *args, **kwargs): """Construct a reference to an external function. @@ -179,7 +199,7 @@ def __call__(self, *args): # 2. See if we have a potentially variable argument # pv = False - for i,arg in enumerate(args_): + for i, arg in enumerate(args_): try: # Q: Is there a better way to test if a value is an object # not in native_types and not a standard expression type? @@ -195,7 +215,6 @@ def __call__(self, *args): return EXPR.NPV_ExternalFunctionExpression(args_, self) def evaluate(self, args): - """Return the value of the function given the specified arguments Parameters @@ -210,8 +229,7 @@ def evaluate(self, args): float The return value of the function evaluated at `args` """ - args_ = [arg if arg.__class__ in native_types else value(arg) - for arg in args] + args_ = [arg if arg.__class__ in native_types else value(arg) for arg in args] return self._evaluate(args_, None, 0)[0] def evaluate_fgh(self, args, fixed=None, fgh=2): @@ -257,8 +275,7 @@ def evaluate_fgh(self, args, fixed=None, fgh=2): :math:`h[i + j*(j + 1)/2] == H_{i,j}`. """ - args_ = [arg if arg.__class__ in native_types else value(arg) - for arg in args] + args_ = [arg if arg.__class__ in native_types else value(arg) for arg in args] # Note: this is passed-by-reference, and the args_ list may be # changed by _evaluate (e.g., for PythonCallbackFunction). # Remember the original length of the list. @@ -267,11 +284,12 @@ def evaluate_fgh(self, args, fixed=None, fgh=2): # Guarantee the return value behavior documented in the docstring if fgh == 2: n = N - 1 - if len(h) - 1 != n + n*(n+1)//2: + if len(h) - 1 != n + n * (n + 1) // 2: raise RuntimeError( f"External function '{self.name}' returned an invalid " f"Hessian matrix (expected {n + n*(n+1)//2 + 1}, " - f"received {len(h)})") + f"received {len(h)})" + ) else: h = None if fgh >= 1: @@ -279,7 +297,8 @@ def evaluate_fgh(self, args, fixed=None, fgh=2): raise RuntimeError( f"External function '{self.name}' returned an invalid " f"derivative vector (expected {N}, " - f"received {len(g)})") + f"received {len(g)})" + ) else: g = None # Note: the ASL does not require clients to honor the fixed flag @@ -298,9 +317,9 @@ def evaluate_fgh(self, args, fixed=None, fgh=2): continue for j in range(N): if i <= j: - h[i + (j*(j + 1))//2] = 0 + h[i + (j * (j + 1)) // 2] = 0 else: - h[j + (i*(i + 1))//2] = 0 + h[j + (i * (i + 1)) // 2] = 0 return f, g, h def _evaluate(self, args, fixed, fgh): @@ -311,17 +330,23 @@ def _evaluate(self, args, fixed, fgh): to concrete values (it will not have Pyomo components or expressions). """ - raise NotImplementedError( - f"{type(self)} did not implement _evaluate()" ) + raise NotImplementedError(f"{type(self)} did not implement _evaluate()") class AMPLExternalFunction(ExternalFunction): + __autoslot_mappers__ = { + # Remove reference to loaded library (they are not copyable or + # picklable) + '_so': AutoSlots.encode_as_none, + '_known_functions': AutoSlots.encode_as_none, + } def __init__(self, *args, **kwargs): if args: raise ValueError( "AMPLExternalFunction constructor does not support " - "positional arguments" ) + "positional arguments" + ) self._library = kwargs.pop('library', None) self._function = kwargs.pop('function', None) self._known_functions = None @@ -335,16 +360,10 @@ def __init__(self, *args, **kwargs): else: logger.warning( 'Defining AMPL external function, but cannot locate ' - f'specified library "{self._library}"') + f'specified library "{self._library}"' + ) ExternalFunction.__init__(self, *args, **kwargs) - def __getstate__(self): - state = super().__getstate__() - # Remove reference to loaded library (they are not copyable or - # picklable) - state['_so'] = state['_known_functions'] = None - return state - def _evaluate(self, args, fixed, fgh): if self._so is None: self.load_library() @@ -352,19 +371,36 @@ def _evaluate(self, args, fixed, fgh): raise RuntimeError( "Error: external function '%s' was not registered within " "external library %s.\n\tAvailable functions: (%s)" - % ( self._function, self._library, - ', '.join(self._known_functions.keys()) ) ) + % ( + self._function, + self._library, + ', '.join(self._known_functions.keys()), + ) + ) # N = len(args) arglist = _ARGLIST(args, fgh, fixed) fcn = self._known_functions[self._function][0] f = fcn(byref(arglist)) if fgh >= 1: - g = [arglist.derivs[i] for i in range(N)] + g = [nan] * N + for i in range(N): + if arglist.at[i] < 0: + continue + g[i] = arglist.derivs[arglist.at[i]] else: g = None if fgh >= 2: - h = [arglist.hes[i] for i in range((N + N**2)//2)] + h = [nan] * ((N + N**2) // 2) + for j in range(N): + j_r = arglist.at[j] + if j_r < 0: + continue + for i in range(j + 1): + i_r = arglist.at[i] + if i_r < 0: + continue + h[i + j * (j + 1) // 2] = arglist.hes[i_r + j_r * (j_r + 1) // 2] else: h = None return f, g, h @@ -385,39 +421,60 @@ def load_library(self): self._known_functions = {} AE = _AMPLEXPORTS() AE.ASLdate = 20160307 + def addfunc(name, f, _type, nargs, funcinfo, ae): # trap for Python 3, where the name comes in as bytes() and # not a string if not isinstance(name, str): name = name.decode() self._known_functions[str(name)] = (f, _type, nargs, funcinfo, ae) + AE.Addfunc = _AMPLEXPORTS.ADDFUNC(addfunc) + def addrandinit(ae, rss, v): # TODO: This should support the randinit ASL option rss(v, 1) + AE.Addrandinit = _AMPLEXPORTS.ADDRANDINIT(addrandinit) + def atreset(ae, a, b): logger.warning( "AMPL External function: ignoring AtReset call in external " "library. This may result in a memory leak or other " - "undesirable behavior.") + "undesirable behavior." + ) + AE.AtReset = _AMPLEXPORTS.ATRESET(atreset) - FUNCADD = CFUNCTYPE( None, POINTER(_AMPLEXPORTS) ) + FUNCADD = CFUNCTYPE(None, POINTER(_AMPLEXPORTS)) FUNCADD(('funcadd_ASL', self._so))(byref(AE)) def _pprint(self): return ( - [ ('function', self._function), - ('library', self._library), - ('units', str(self._units)), - ('arg_units', [ str(u) for u in self._arg_units ] - if self._arg_units is not None else None), + [ + ('function', self._function), + ('library', self._library), + ('units', str(self._units)), + ( + 'arg_units', + [str(u) for u in self._arg_units] + if self._arg_units is not None + else None, + ), ], - (), None, None + (), + None, + None, ) +def _python_callback_fid_mapper(encode, val): + if encode: + return PythonCallbackFunction.global_registry[val]() + else: + return PythonCallbackFunction.register_instance(val) + + class _PythonCallbackFunctionID(NumericConstant): """A specialized NumericConstant to preserve FunctionIDs through deepcopy. @@ -427,27 +484,21 @@ class _PythonCallbackFunctionID(NumericConstant): model.clone()). """ + __slots__ = () + __autoslot_mappers__ = {'value': _python_callback_fid_mapper} def is_constant(self): # Return False so this object is not simplified out of expressions return False - def __getstate__(self): - state = super().__getstate__() - state['value'] = PythonCallbackFunction.global_registry[ - state['value']]() - return state - - def __setstate__(self, state): - state['value'] = PythonCallbackFunction.register_instance( - state['value']) - super().__setstate__(state) pyomo_constant_types.add(_PythonCallbackFunctionID) class PythonCallbackFunction(ExternalFunction): + __autoslot_mappers__ = {'_fcn_id': _python_callback_fid_mapper} + global_registry = [] global_id_to_fid = {} @@ -475,20 +526,22 @@ def __init__(self, *args, **kwargs): if kw in kwargs: raise ValueError( "Duplicate definition of external function through " - f"positional and keyword ('{kw}=') arguments") + f"positional and keyword ('{kw}=') arguments" + ) kwargs[kw] = args[i] if len(args) > 3: raise ValueError( "PythonCallbackFunction constructor only supports " - "0 - 3 positional arguments" ) + "0 - 3 positional arguments" + ) self._fcn = kwargs.pop('function', None) self._grad = kwargs.pop('gradient', None) self._hess = kwargs.pop('hessian', None) self._fgh = kwargs.pop('fgh', None) if self._fgh is not None and any((self._fcn, self._grad, self._hess)): raise ValueError( - "Cannot specify 'fgh' with any of " - "{'function', 'gradient', hessian'}") + "Cannot specify 'fgh' with any of {'function', 'gradient', hessian'}" + ) # There is an implicit first argument (the function pointer), we # need to add that to the arg_units @@ -504,16 +557,6 @@ def __init__(self, *args, **kwargs): ExternalFunction.__init__(self, *args, **kwargs) self._fcn_id = PythonCallbackFunction.register_instance(self) - def __getstate__(self): - state = super().__getstate__() - state['_fcn_id'] = self - return state - - def __setstate__(self, state): - state['_fcn_id'] = PythonCallbackFunction.register_instance( - state['_fcn_id']) - super().__setstate__(state) - def __call__(self, *args): # NOTE: we append the Function ID to the END of the argument # list because it is easier to update the gradient / hessian @@ -526,8 +569,7 @@ def _evaluate(self, args, fixed, fgh): if fixed is not None: fixed = fixed[:-1] if _id != self._fcn_id: - raise RuntimeError( - "PythonCallbackFunction called with invalid Global ID" ) + raise RuntimeError("PythonCallbackFunction called with invalid Global ID") if self._fgh is not None: f, g, h = self._fgh(args, fgh, fixed) else: @@ -537,7 +579,8 @@ def _evaluate(self, args, fixed, fgh): raise RuntimeError( f"ExternalFunction '{self.name}' was not defined " "with a gradient callback. Cannot evaluate the " - "derivative of the function") + "derivative of the function" + ) g = self._grad(args, fixed) else: g = None @@ -546,7 +589,8 @@ def _evaluate(self, args, fixed, fgh): raise RuntimeError( f"ExternalFunction '{self.name}' was not defined " "with a Hessian callback. Cannot evaluate the " - "second derivative of the function") + "second derivative of the function" + ) h = self._hess(args, fixed) else: h = None @@ -555,17 +599,24 @@ def _evaluate(self, args, fixed, fgh): if g is not None: g.append(0) if h is not None: - h.extend([0]*(len(args)+1)) + h.extend([0] * (len(args) + 1)) return f, g, h def _pprint(self): return ( - [ ('function', self._fcn.__qualname__), - ('units', str(self._units)), - ('arg_units', [ str(u) for u in self._arg_units[:-1] ] - if self._arg_units is not None else None), + [ + ('function', self._fcn.__qualname__), + ('units', str(self._units)), + ( + 'arg_units', + [str(u) for u in self._arg_units[:-1]] + if self._arg_units is not None + else None, + ), ], - (), None, None + (), + None, + None, ) @@ -578,47 +629,43 @@ class _ARGLIST(Structure): """ _fields_ = [ - ('n', c_int), # number of args - ('nr', c_int), # number of real input args + ('n', c_int), # number of args + ('nr', c_int), # number of real input args ('at', POINTER(c_int)), # argument types -- see DISCUSSION below ('ra', POINTER(c_double)), # pure real args (IN, OUT, and INOUT) ('sa', POINTER(c_char_p)), # symbolic IN args ('derivs', POINTER(c_double)), # for partial derivatives (if nonzero) ('hes', POINTER(c_double)), # for second partials (if nonzero) - ('dig', POINTER(c_byte)), # if (dig && dig[i]) { partials w.r.t. - # ra[i] will not be used } + # if (dig && dig[i]) { partials w.r.t. ra[i] will not be used } + ('dig', POINTER(c_byte)), ('funcinfo', c_char_p), # for use by the function (if desired) - ('AE', c_void_p), # functions made visible (via #defines below) + ('AE', c_void_p), # functions made visible (via #defines below) ('f', c_void_p), # for internal use by AMPL ('tva', c_void_p), # for internal use by AMPL ('Errmsg', c_char_p), # To indicate an error, set this to a - # description of the error. When derivs - # is nonzero and the error is that first - # derivatives cannot or are not computed, - # a single quote character (') should be - # the first character in the text assigned - # to Errmsg, followed by the actual error - # message. Similarly, if hes is nonzero - # and the error is that second derivatives - # are not or cannot be computed, a double - # quote character (") should be the first - # character in Errmsg, followed by the - # actual error message text. - ('TMI', c_void_p), # used in Tempmem calls - ('Private', c_char_p), # The following fields are relevant - # only when imported functions are called - # by AMPL commands (not declarations). - ('nin', c_int), # number of input (IN and INOUT) args + # description of the error. When derivs is nonzero and the + # error is that first derivatives cannot or are not computed, a + # single quote character (') should be the first character in + # the text assigned to Errmsg, followed by the actual error + # message. Similarly, if hes is nonzero and the error is that + # second derivatives are not or cannot be computed, a double + # quote character (") should be the first character in Errmsg, + # followed by the actual error message text. + ('TMI', c_void_p), # used in Tempmem calls + ('Private', c_char_p), + # The following fields are relevant only when imported functions + # are called by AMPL commands (not declarations). + ('nin', c_int), # number of input (IN and INOUT) args ('nout', c_int), # number of output (OUT and INOUT) args ('nsin', c_int), # number of symbolic input arguments - ('nsout', c_int), # number of symbolic OUT and INOUT args - ] + ('nsout', c_int), # number of symbolic OUT and INOUT args + ] def __init__(self, args, fgh=0, fixed=None): super().__init__() self._encoded_strings = [] self.n = len(args) - self.at = (c_int*self.n)() + self.at = (c_int * self.n)() _reals = [] _strings = [] nr = 0 @@ -646,23 +693,25 @@ def __init__(self, args, fgh=0, fixed=None): else: raise RuntimeError( f"Unknown data type, {type(arg).__name__}, passed as " - f"argument {i} for an ASL ExternalFunction") + f"argument {i} for an ASL ExternalFunction" + ) self.nr = nr - self.ra = (c_double*nr)(*_reals) - self.sa = (c_char_p*ns)(*_strings) + self.ra = (c_double * nr)(*_reals) + self.sa = (c_char_p * ns)(*_strings) if fgh >= 1: - self.derivs = (c_double*nr)(0.) + self.derivs = (c_double * nr)(0.0) if fgh >= 2: - self.hes = (c_double*((nr + nr*nr)//2))(0.) + self.hes = (c_double * ((nr + nr * nr) // 2))(0.0) if fixed: - self.dig = (c_byte*nr)(0) + self.dig = (c_byte * nr)(0) for i, v in enumerate(fixed): if v: r_idx = self.at[i] if r_idx >= 0: self.dig[r_idx] = 1 + # The following "fake" class resolves a circular reference issue in the # _AMPLEXPORTS datastructure # @@ -672,6 +721,7 @@ def __init__(self, args, fgh=0, fixed=None): class _AMPLEXPORTS(Structure): pass + class _AMPLEXPORTS(Structure): """Mock up the AmplExports structure from AMPL's funcadd.h @@ -682,84 +732,77 @@ class _AMPLEXPORTS(Structure): trickier than it sounds, and at least so far is completely unneeded. """ - AMPLFUNC = CFUNCTYPE( c_double, POINTER(_ARGLIST) ) + AMPLFUNC = CFUNCTYPE(c_double, POINTER(_ARGLIST)) ADDFUNC = CFUNCTYPE( - None, - c_char_p, AMPLFUNC, c_int, c_int, c_void_p, - POINTER(_AMPLEXPORTS) ) + None, c_char_p, AMPLFUNC, c_int, c_int, c_void_p, POINTER(_AMPLEXPORTS) + ) - RANDSEEDSETTER = CFUNCTYPE( - None, - c_void_p, c_ulong ) + RANDSEEDSETTER = CFUNCTYPE(None, c_void_p, c_ulong) - ADDRANDINIT = CFUNCTYPE( - None, - POINTER(_AMPLEXPORTS), RANDSEEDSETTER, c_void_p ) + ADDRANDINIT = CFUNCTYPE(None, POINTER(_AMPLEXPORTS), RANDSEEDSETTER, c_void_p) - ATRESET = CFUNCTYPE( - None, - POINTER(_AMPLEXPORTS), c_void_p, c_void_p ) + ATRESET = CFUNCTYPE(None, POINTER(_AMPLEXPORTS), c_void_p, c_void_p) _fields_ = [ - ('StdErr', c_void_p), - ('Addfunc', ADDFUNC), - ('ASLdate', c_long), - ('FprintF', c_void_p), - ('PrintF', c_void_p), - ('SprintF', c_void_p), - ('VfprintF', c_void_p), - ('VsprintF', c_void_p), - ('Strtod', c_void_p), - ('Crypto', c_void_p), - ('asl', c_char_p), - ('AtExit', c_void_p), - ('AtReset', ATRESET), - ('Tempmem', c_void_p), - ('Add_table_handler', c_void_p), - ('Private', c_char_p), - ('Qsortv', c_void_p), - #/* More stuff for stdio in DLLs... */ - ('StdIn', c_void_p), - ('StdOut', c_void_p), - ('Clearerr', c_void_p), - ('Fclose', c_void_p), - ('Fdopen', c_void_p), - ('Feof', c_void_p), - ('Ferror', c_void_p), - ('Fflush', c_void_p), - ('Fgetc', c_void_p), - ('Fgets', c_void_p), - ('Fileno', c_void_p), - ('Fopen', c_void_p), - ('Fputc', c_void_p), - ('Fputs', c_void_p), - ('Fread', c_void_p), - ('Freopen', c_void_p), - ('Fscanf', c_void_p), - ('Fseek', c_void_p), - ('Ftell', c_void_p), - ('Fwrite', c_void_p), - ('Pclose', c_void_p), - ('Perror', c_void_p), - ('Popen', c_void_p), - ('Puts', c_void_p), - ('Rewind', c_void_p), - ('Scanf', c_void_p), - ('Setbuf', c_void_p), - ('Setvbuf', c_void_p), - ('Sscanf', c_void_p), - ('Tempnam', c_void_p), - ('Tmpfile', c_void_p), - ('Tmpnam', c_void_p), - ('Ungetc', c_void_p), - ('AI', c_void_p), - ('Getenv', c_void_p), - ('Breakfunc', c_void_p), - ('Breakarg', c_char_p), - #/* Items available with ASLdate >= 20020501 start here. */ - ('SnprintF', c_void_p), - ('VsnprintF', c_void_p), - ('Addrand', c_void_p), - ('Addrandinit', ADDRANDINIT), - ] + ('StdErr', c_void_p), + ('Addfunc', ADDFUNC), + ('ASLdate', c_long), + ('FprintF', c_void_p), + ('PrintF', c_void_p), + ('SprintF', c_void_p), + ('VfprintF', c_void_p), + ('VsprintF', c_void_p), + ('Strtod', c_void_p), + ('Crypto', c_void_p), + ('asl', c_char_p), + ('AtExit', c_void_p), + ('AtReset', ATRESET), + ('Tempmem', c_void_p), + ('Add_table_handler', c_void_p), + ('Private', c_char_p), + ('Qsortv', c_void_p), + # /* More stuff for stdio in DLLs... */ + ('StdIn', c_void_p), + ('StdOut', c_void_p), + ('Clearerr', c_void_p), + ('Fclose', c_void_p), + ('Fdopen', c_void_p), + ('Feof', c_void_p), + ('Ferror', c_void_p), + ('Fflush', c_void_p), + ('Fgetc', c_void_p), + ('Fgets', c_void_p), + ('Fileno', c_void_p), + ('Fopen', c_void_p), + ('Fputc', c_void_p), + ('Fputs', c_void_p), + ('Fread', c_void_p), + ('Freopen', c_void_p), + ('Fscanf', c_void_p), + ('Fseek', c_void_p), + ('Ftell', c_void_p), + ('Fwrite', c_void_p), + ('Pclose', c_void_p), + ('Perror', c_void_p), + ('Popen', c_void_p), + ('Puts', c_void_p), + ('Rewind', c_void_p), + ('Scanf', c_void_p), + ('Setbuf', c_void_p), + ('Setvbuf', c_void_p), + ('Sscanf', c_void_p), + ('Tempnam', c_void_p), + ('Tmpfile', c_void_p), + ('Tmpnam', c_void_p), + ('Ungetc', c_void_p), + ('AI', c_void_p), + ('Getenv', c_void_p), + ('Breakfunc', c_void_p), + ('Breakarg', c_char_p), + # /* Items available with ASLdate >= 20020501 start here. */ + ('SnprintF', c_void_p), + ('VsnprintF', c_void_p), + ('Addrand', c_void_p), + ('Addrandinit', ADDRANDINIT), + ] diff --git a/pyomo/core/base/global_set.py b/pyomo/core/base/global_set.py index ee553f1cda1..f4d97403308 100644 --- a/pyomo/core/base/global_set.py +++ b/pyomo/core/base/global_set.py @@ -9,13 +9,22 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from pyomo.core.pyomoobject import PyomoObject +from pyomo.core.base.range import NonNumericRange + GlobalSets = {} + + def _get_global_set(name): return GlobalSets[name] + + _get_global_set.__safe_for_unpickling__ = True -class GlobalSetBase(object): + +class GlobalSetBase(PyomoObject): """The base class for all Global sets""" + __slots__ = () def __reduce__(self): @@ -42,53 +51,136 @@ def _parent(self): def _parent(self, val): if val is None: return - val = val() # dereference the weakref + val = val() # dereference the weakref raise RuntimeError( "Cannot assign a GlobalSet '%s' to %s '%s'" - % (self.global_name, - 'model' if val.model() is val else 'block', - val.name or 'unknown') + % ( + self.global_name, + 'model' if val.model() is val else 'block', + val.name or 'unknown', + ) ) + # FIXME: This mocks up part of the Set API until we can break up the set # module to resolve circular dependencies and can make this a proper # GlobalSet (Scalar IndexedComponent objects are indexed by # UnindexedComponent_set, but we would like UnindexedComponent_set to be # a proper scalar IndexedComponent). # -#UnindexedComponent_set = set([None]) +# UnindexedComponent_set = set([None]) class _UnindexedComponent_set(GlobalSetBase): local_name = 'UnindexedComponent_set' + def __init__(self, name): self.name = name + def __contains__(self, val): return val is None + def get(self, value, default): if value is None: return value return default + def __iter__(self): return (None,).__iter__() + + def __reversed__(self): + return iter(self) + + def ordered_iter(self): + return iter(self) + + def sorted_iter(self): + return iter(self) + + def data(self): + return tuple(self) + + def ordered_data(self): + return tuple(self) + + def sorted_data(self): + return tuple(self) + def subsets(self, expand_all_set_operators=None): - return [ self ] + return [self] + def construct(self): pass + + def ranges(self): + yield NonNumericRange(None) + + def bounds(self): + return (None, None) + + def get_interval(self): + return (None, None, None) + def __len__(self): return 1 + def __eq__(self, other): return self is other + def __ne__(self, other): return self is not other + @property def dimen(self): return 0 + def isdiscrete(self): return True + def isfinite(self): return True + def isordered(self): # As this set only has a single element, it is implicitly "ordered" return True + + def at(self, index): + if index == 1: + return None + raise IndexError("%s index out of range" % (self.name,)) + + def ord(self, item): + if item is None: + return 1 + raise IndexError( + "Cannot identify position of %s in Set %s: item not in Set" + % (item, self.name) + ) + + def first(self): + return None + + def last(self): + return None + + def next(self, item, step=1): + # make sure that item is None (and raise the standard error if it is not) + self.ord(item) + if step < 0: + raise IndexError("Cannot advance before the beginning of the Set") + else: + raise IndexError("Cannot advance past the end of the Set") + + def nextw(self, item, step=1): + # make sure that item is None (and raise the standard error if it is not) + self.ord(item) + return None + + def prev(self, item, step=1): + return self.next(item, -step) + + def prevw(self, item, step=1): + return self.nextw(item, -step) + + UnindexedComponent_set = _UnindexedComponent_set('UnindexedComponent_set') GlobalSets[UnindexedComponent_set.local_name] = UnindexedComponent_set diff --git a/pyomo/core/base/indexed_component.py b/pyomo/core/base/indexed_component.py index 4157c4efd5e..6e356a8304e 100644 --- a/pyomo/core/base/indexed_component.py +++ b/pyomo/core/base/indexed_component.py @@ -11,21 +11,29 @@ __all__ = ['IndexedComponent', 'ActiveIndexedComponent'] +import enum import inspect import logging import sys import textwrap -from pyomo.core.expr.expr_errors import TemplateExpressionError -from pyomo.core.expr.numvalue import native_types, NumericNDArray +from copy import deepcopy + +import pyomo.core.expr as EXPR +from pyomo.core.expr.numeric_expr import NumericNDArray +from pyomo.core.expr.numvalue import native_types from pyomo.core.base.indexed_component_slice import IndexedComponent_slice from pyomo.core.base.initializer import Initializer from pyomo.core.base.component import Component, ActiveComponent from pyomo.core.base.config import PyomoOptions +from pyomo.core.base.enums import SortComponents from pyomo.core.base.global_set import UnindexedComponent_set +from pyomo.core.pyomoobject import PyomoObject from pyomo.common import DeveloperError +from pyomo.common.autoslots import fast_deepcopy from pyomo.common.dependencies import numpy as np, numpy_available -from pyomo.common.deprecation import deprecated +from pyomo.common.deprecation import deprecated, deprecation_warning +from pyomo.common.errors import DeveloperError, TemplateExpressionError from pyomo.common.modeling import NOTSET from pyomo.common.sorting import sorted_robust @@ -34,6 +42,8 @@ logger = logging.getLogger('pyomo.core') sequence_types = {tuple, list} + + def normalize_index(x): """Normalize a component index. @@ -68,7 +78,7 @@ def normalize_index(x): x_len += len(x[i]) - 1 # Note that casting a tuple to a tuple is cheap (no copy, no # new object) - x = x[:i] + tuple(x[i]) + x[i + 1:] + x = x[:i] + tuple(x[i]) + x[i + 1 :] elif issubclass(_xi_class, Sequence): if issubclass(_xi_class, str): # This is very difficult to get to: it would require a @@ -78,7 +88,7 @@ def normalize_index(x): else: sequence_types.add(_xi_class) x_len += len(x[i]) - 1 - x = x[:i] + tuple(x[i]) + x[i + 1:] + x = x[:i] + tuple(x[i]) + x[i + 1 :] else: i += 1 @@ -86,15 +96,19 @@ def normalize_index(x): return x[0] return x + # Pyomo will normalize indices by default normalize_index.flatten = True class _NotFound(object): pass + + class _NotSpecified(object): pass + # # Get the fully-qualified name for this index. If there isn't anything # in the _data dict (and there shouldn't be), then add something, get @@ -122,7 +136,8 @@ def _get_indexed_component_data_name(component, index): for i in range(5): try: component._data[index] = component._ComponentDataClass( - *((None,)*i), component=component) + *((None,) * i), component=component + ) i = None break except: @@ -130,8 +145,7 @@ def _get_indexed_component_data_name(component, index): if i is not None: # None of the generic positional arguments worked; raise an # exception - component._data[index] = component._ComponentDataClass( - component=component) + component._data[index] = component._ComponentDataClass(component=component) try: ans = component._data[index].name except: @@ -140,6 +154,7 @@ def _get_indexed_component_data_name(component, index): del component._data[index] return ans + _rule_returned_none_error = """%s '%s': rule returned None. %s rules must return either a valid expression, numeric value, or @@ -147,6 +162,7 @@ def _get_indexed_component_data_name(component, index): include the "return" statement at the end of your rule. """ + def rule_result_substituter(result_map): _map = result_map _map_types = set(type(key) for key in result_map) @@ -157,12 +173,21 @@ def rule_result_substituter_impl(rule, *args, **kwargs): # The argument is a trivial type and will be mapped # value = rule + elif isinstance(rule, PyomoObject): + # + # The argument is a Pyomo component. This can happen when + # the rule isn't a rule at all, but instead the decorator + # was used as a function to wrap an inline definition (not + # something I think we should support, but exists in some + # [old] examples). + # + return rule else: # # Otherwise, the argument is a functor, so call it to # generate the rule result. # - value = rule( *args, **kwargs ) + value = rule(*args, **kwargs) # # Map the returned value: # @@ -172,14 +197,15 @@ def rule_result_substituter_impl(rule, *args, **kwargs): return rule_result_substituter_impl -_map_rule_funcdef = \ -"""def wrapper_function%s: + +_map_rule_funcdef = """def wrapper_function%s: args, varargs, kwds, local_env = inspect.getargvalues( inspect.currentframe()) args = tuple(local_env[_] for _ in args) + (varargs or ()) return wrapping_fcn(rule, *args, **(kwds or {})) """ + def rule_wrapper(rule, wrapping_fcn, positional_arg_map=None): """Wrap a rule with another function @@ -216,8 +242,7 @@ def rule_wrapper(rule, wrapping_fcn, positional_arg_map=None): rule_sig = inspect.signature(rule) if positional_arg_map is not None: param = list(rule_sig.parameters.values()) - rule_sig = rule_sig.replace( - parameters=(param[i] for i in positional_arg_map)) + rule_sig = rule_sig.replace(parameters=(param[i] for i in positional_arg_map)) _funcdef = _map_rule_funcdef % (str(rule_sig),) # Create the wrapper in a temporary environment that mimics this # function's environment. @@ -255,10 +280,11 @@ class IndexedComponent(Component): component data objects _index_set The set of valid indices _implicit_subsets A temporary data element that stores - sets that are transfered to the model + sets that are transferred to the model """ - class Skip(object): pass + class Skip(object): + pass # # If an index is supplied for which there is not a _data entry @@ -271,14 +297,14 @@ class Skip(object): pass def __init__(self, *args, **kwds): from pyomo.core.base.set import process_setarg + # kwds.pop('noruleinit', None) Component.__init__(self, **kwds) # self._data = {} # - if len(args) == 0 or (len(args) == 1 and - args[0] is UnindexedComponent_set): + if len(args) == 0 or (len(args) == 1 and args[0] is UnindexedComponent_set): # # If no indexing sets are provided, generate a dummy index # @@ -310,41 +336,31 @@ def __init__(self, *args, **kwds): self._implicit_subsets = tmp self._index_set = tmp[0].cross(*tmp[1:]) - def __getstate__(self): - # Special processing of getstate so that we never copy the - # UnindexedComponent_set set - state = super(IndexedComponent, self).__getstate__() - if not self.is_indexed(): - state['_index_set'] = None - return state - - def __setstate__(self, state): - # Special processing of setstate so that we never copy the - # UnindexedComponent_set set - if state['_index_set'] is None: - state['_index_set'] = UnindexedComponent_set - super(IndexedComponent, self).__setstate__(state) - def _create_objects_for_deepcopy(self, memo, component_list): - _id = id(self) - if _id not in memo: + _new = self.__class__.__new__(self.__class__) + _ans = memo.setdefault(id(self), _new) + if _ans is _new: component_list.append(self) - memo[_id] = self.__class__.__new__(self.__class__) - # For indexed components, we need to pre-emptively clone all - # component data objects as well (as those are the objects that - # will be referenced by things like expressions) - if self.is_indexed() and not self.is_reference(): - for obj in self._data.values(): - # We need to catch things like References and *not* - # preemptively clone the data objects. - if obj.parent_component() is not self: - continue - _id = id(obj) - if _id in memo: - continue - # But everything else should be cloned. - component_list.append(obj) - memo[_id] = obj.__class__.__new__(obj.__class__) + # For indexed components, we will pre-emptively clone all + # component data objects as well (as those are the objects + # that will be referenced by things like expressions). It + # is important to only clone "normal" ComponentData obects: + # so we will want to skip this for all scalar components + # (where the _data points back to self) and references + # (where the data may be stored outside this block tree and + # therefore may not be cloned) + if self.is_indexed() and not self.is_reference(): + # Because we are already checking / updating the memo + # for the _data dict, we can effectively "deepcopy" it + # right now (almost for free!) + _src = self._data + memo[id(_src)] = _new._data = _data = _src.__class__() + for idx, obj in _src.items(): + _data[fast_deepcopy(idx, memo)] = obj._create_objects_for_deepcopy( + memo, component_list + ) + + return _ans def to_dense_data(self): """TODO""" @@ -364,7 +380,8 @@ def clear(self): else: raise DeveloperError( "Derived scalar component %s failed to define clear()." - % (self.__class__.__name__,)) + % (self.__class__.__name__,) + ) def index_set(self): """Return the index set""" @@ -406,7 +423,7 @@ def __iter__(self): """Return an iterator of the component data keys""" return self.keys() - def keys(self, ordered=False): + def keys(self, sort=SortComponents.UNSORTED, ordered=NOTSET): """Return an iterator over the component data keys This method sets the ordering of component data objects within @@ -416,14 +433,26 @@ def keys(self, ordered=False): Parameters ---------- + sort: bool or SortComponents + Iterate over the declared component keys in a specified + sorted order. See :py:class:`SortComponents` for valid + options and descriptions. + ordered: bool - If True, then the keys are returned in a deterministic - order. If the underlying indexing set is ordered then that - ordering is used. Otherwise, the keys are sorted using - :py:func:`sorted_robust`. + DEPRECATED: Please use `sort=SortComponents.ORDERED_INDICES`. + If True, then the keys are returned in a deterministic order + (using the underlying set's `ordered_iter()`). """ - sort_needed = ordered + sort = SortComponents(sort) + if ordered is not NOTSET: + deprecation_warning( + f"keys(ordered={ordered}) is deprecated. " + "Please use `sort=SortComponents.ORDERED_INDICES`", + version='6.6.0', + ) + if ordered: + sort = sort | SortComponents.ORDERED_INDICES if not self._index_set.isfinite(): # # If the index set is virtual (e.g., Any) then return the @@ -431,22 +460,36 @@ def keys(self, ordered=False): # of the underlying Set, there should be no warning if the # user iterates over the set when the _data dict is empty. # - ans = self._data.__iter__() - elif self.is_reference(): - ans = self._data.__iter__() + if ( + SortComponents.SORTED_INDICES in sort + or SortComponents.ORDERED_INDICES in sort + ): + return iter(sorted_robust(self._data)) + else: + return self._data.__iter__() + + if SortComponents.SORTED_INDICES in sort: + ans = self._index_set.sorted_iter() + elif SortComponents.ORDERED_INDICES in sort: + ans = self._index_set.ordered_iter() + else: + ans = iter(self._index_set) + + if self._data.__class__ is not dict: + # We currently only need to worry about sparse data + # structures when the underlying _data is a dict. Avoiding + # the len() and filter() below is especially important for + # References (where both can be expensive linear-time + # operations) + pass elif len(self) == len(self._index_set): # # If the data is dense then return the index iterator. # - ans = self._index_set.__iter__() - if ordered and self._index_set.isordered(): - # As this iterator is ordered, we do not need to sort it - sort_needed = False - else: - if not self._data and self._index_set and \ - PyomoOptions.paranoia_level: - logger.warning( -"""Iterating over a Component (%s) + pass + elif not self._data and self._index_set and PyomoOptions.paranoia_level: + logger.warning( + """Iterating over a Component (%s) defined by a non-empty concrete set before any data objects have actually been added to the Component. The iterator will be empty. This is usually caused by Concrete models where you declare the @@ -463,73 +506,104 @@ def keys(self, ordered=False): 3) If you intend to iterate over a component that may be empty, test if the component is empty first and avoid iteration in the case where it is empty. -""" % (self.name,) ) - - if not self._index_set.isordered(): - # - # If the index set is not ordered, then return the - # data iterator. This is in an arbitrary order, which is - # fine because the data is unordered. - # - ans = self._data.__iter__() - else: - # - # Test each element of a sparse data with an ordered - # index set in order. This is potentially *slow*: if - # the component is in fact very sparse, we could be - # iterating over a huge (dense) index in order to sort a - # small number of indices. However, this provides a - # consistent ordering that the user expects. - # - ans = filter(self._data.__contains__, self._index_set) - # As the iterator is ordered, we do not need to sort it - sort_needed = False - if sort_needed: - return iter(sorted_robust(ans)) +""" + % (self.name,) + ) else: - return ans + # + # Test each element of a sparse data with an ordered + # index set in order. This is potentially *slow*: if + # the component is in fact very sparse, we could be + # iterating over a huge (dense) index in order to sort a + # small number of indices. However, this provides a + # consistent ordering that the user expects. + # + ans = filter(self._data.__contains__, ans) + return ans - def values(self, ordered=False): + def values(self, sort=SortComponents.UNSORTED, ordered=NOTSET): """Return an iterator of the component data objects Parameters ---------- + sort: bool or SortComponents + Iterate over the declared component values in a specified + sorted order. See :py:class:`SortComponents` for valid + options and descriptions. + ordered: bool - If True, then the values are returned in a deterministic - order. If the underlying indexing set is ordered then that - ordering is used. Otherwise, the component keys are sorted - using :py:func:`sorted_robust` and the values are returned - in that order. + DEPRECATED: Please use `sort=SortComponents.ORDERED_INDICES`. + If True, then the values are returned in a deterministic order + (using the underlying set's `ordered_iter()`. """ - return map(self.__getitem__, self.keys(ordered)) + if ordered is not NOTSET: + deprecation_warning( + f"values(ordered={ordered}) is deprecated. " + "Please use `sort=SortComponents.ORDERED_INDICES`", + version='6.6.0', + ) + if ordered: + sort = SortComponents(sort) | SortComponents.ORDERED_INDICES + # Note that looking up the values in a reference may be an + # expensive operation (linear time). To avoid making this a + # quadratic time operation, we will leverage _ReferenceDict's + # values(). This may fail for references created from mappings + # or sequences, raising the TypeError + if self.is_reference(): + try: + return self._data.values(sort) + except TypeError: + pass + return map(self.__getitem__, self.keys(sort)) - def items(self, ordered=False): + def items(self, sort=SortComponents.UNSORTED, ordered=NOTSET): """Return an iterator of (index,data) component data tuples Parameters ---------- + sort: bool or SortComponents + Iterate over the declared component items in a specified + sorted order. See :py:class:`SortComponents` for valid + options and descriptions. + ordered: bool - If True, then the items are returned in a deterministic - order. If the underlying indexing set is ordered then that - ordering is used. Otherwise, the items are sorted using - :py:func:`sorted_robust`. + DEPRECATED: Please use `sort=SortComponents.ORDERED_INDICES`. + If True, then the items are returned in a deterministic order + (using the underlying set's `ordered_iter()`. """ - return((s, self[s]) for s in self.keys(ordered)) + if ordered is not NOTSET: + deprecation_warning( + f"items(ordered={ordered}) is deprecated. " + "Please use `sort=SortComponents.ORDERED_INDICES`", + version='6.6.0', + ) + if ordered: + sort = SortComponents(sort) | SortComponents.ORDERED_INDICES + # Note that looking up the values in a reference may be an + # expensive operation (linear time). To avoid making this a + # quadratic time operation, we will try and use _ReferenceDict's + # items(). This may fail for references created from mappings + # or sequences, raising the TypeError + if self.is_reference(): + try: + return self._data.items(sort) + except TypeError: + pass + return ((s, self[s]) for s in self.keys(sort)) - @deprecated('The iterkeys method is deprecated. Use dict.keys().', - version='6.0') + @deprecated('The iterkeys method is deprecated. Use dict.keys().', version='6.0') def iterkeys(self): """Return a list of keys in the dictionary""" return self.keys() - @deprecated('The itervalues method is deprecated. Use dict.values().', - version='6.0') + @deprecated( + 'The itervalues method is deprecated. Use dict.values().', version='6.0' + ) def itervalues(self): """Return a list of the component data objects in the dictionary""" return self.values() - @deprecated('The iteritems method is deprecated. Use dict.items().', - version='6.0') + @deprecated('The iteritems method is deprecated. Use dict.items().', version='6.0') def iteritems(self): """Return a list (index,data) tuples from the dictionary""" return self.items() @@ -542,7 +616,9 @@ def __getitem__(self, index): self._not_constructed_error(index) try: - obj = self._data.get(index, _NotFound) + return self._data[index] + except KeyError: + obj = _NotFound except TypeError: try: index = self._processUnhashableIndex(index) @@ -565,12 +641,7 @@ def __getitem__(self, index): obj = _NotFound if obj is _NotFound: - # Not good: we have to defer this import to now - # due to circular imports (expr imports _VarData - # imports indexed_component, but we need expr - # here - from pyomo.core.expr import current as EXPR - if index.__class__ is EXPR.GetItemExpression: + if isinstance(index, EXPR.GetItemExpression): return index validated_index = self._validate_index(index) if validated_index is not index: @@ -676,8 +747,11 @@ def __delitem__(self, index): del self._data[index] def _pop_from_kwargs(self, name, kwargs, namelist, notset=None): - args = [arg for arg in (kwargs.pop(name, notset) for name in namelist) - if arg is not notset] + args = [ + arg + for arg in (kwargs.pop(name, notset) for name in namelist) + if arg is not notset + ] if len(args) == 1: return args[0] elif not args: @@ -686,11 +760,12 @@ def _pop_from_kwargs(self, name, kwargs, namelist, notset=None): argnames = "%s%s '%s='" % ( ', '.join("'%s='" % _ for _ in namelist[:-1]), ',' if len(namelist) > 2 else '', - namelist[-1] + namelist[-1], ) raise ValueError( - "Duplicate initialization: %s() only accepts one of %s" % - (name, argnames)) + "Duplicate initialization: %s() only accepts one of %s" + % (name, argnames) + ) def _construct_from_rule_using_setitem(self): if self._rule is None: @@ -707,7 +782,7 @@ def _construct_from_rule_using_setitem(self): self._rule = rule = Initializer( rule(block, None), treat_sequences_as_mappings=False, - arg_not_specified=NOTSET + arg_not_specified=NOTSET, ) if rule.contains_indices(): @@ -734,11 +809,8 @@ def _construct_from_rule_using_setitem(self): err = sys.exc_info()[1] logger.error( "Rule failed for %s '%s' with index %s:\n%s: %s" - % (self.ctype.__name__, - self.name, - str(index), - type(err).__name__, - err)) + % (self.ctype.__name__, self.name, str(index), type(err).__name__, err) + ) raise def _not_constructed_error(self, idx): @@ -751,7 +823,8 @@ def _not_constructed_error(self, idx): idx_str = "[" + str(idx) + "]" raise ValueError( "Error retrieving component %s%s: The component has " - "not been constructed." % (self.name, idx_str,)) + "not been constructed." % (self.name, idx_str) + ) def _validate_index(self, idx): if not IndexedComponent._DEFAULT_INDEX_CHECKING_ENABLED: @@ -793,13 +866,14 @@ def _validate_index(self, idx): if not self.is_indexed(): raise KeyError( "Cannot treat the scalar component '%s' " - "as an indexed component" % ( self.name, )) + "as an indexed component" % (self.name,) + ) # # Raise an exception # raise KeyError( - "Index '%s' is not valid for indexed component '%s'" - % ( idx, self.name, )) + "Index '%s' is not valid for indexed component '%s'" % (idx, self.name) + ) def _processUnhashableIndex(self, idx): """Process a call to __getitem__ with unhashable elements @@ -810,15 +884,14 @@ def _processUnhashableIndex(self, idx): (Scalar)Component 3) the index contains an IndexTemplate """ - from pyomo.core.expr import current as EXPR # # Iterate through the index and look for slices and constant # components # + orig_idx = idx fixed = {} sliced = {} ellipsis = None - _found_numeric = False # # Setup the slice template (in fixed) # @@ -827,18 +900,22 @@ def _processUnhashableIndex(self, idx): if idx.__class__ is not tuple: idx = (idx,) - for i,val in enumerate(idx): + for i, val in enumerate(idx): if type(val) is slice: - if val.start is not None or val.stop is not None \ - or val.step is not None: + if ( + val.start is not None + or val.stop is not None + or val.step is not None + ): raise IndexError( "Indexed components can only be indexed with simple " - "slices: start and stop values are not allowed.") + "slices: start and stop values are not allowed." + ) else: if ellipsis is None: sliced[i] = val else: - sliced[i-len(idx)] = val + sliced[i - len(idx)] = val continue if val is Ellipsis: @@ -846,7 +923,8 @@ def _processUnhashableIndex(self, idx): raise IndexError( "Indexed components can only be indexed with simple " "slices: the Pyomo wildcard slice (Ellipsis; " - "e.g., '...') can only appear once") + "e.g., '...') can only appear once" + ) ellipsis = i continue @@ -857,14 +935,11 @@ def _processUnhashableIndex(self, idx): # should raise a TemplateExpressionError try: val = EXPR.evaluate_expression(val, constant=True) - _found_numeric = True - except TemplateExpressionError: # # The index is a template expression, so return the # templatized expression. # - from pyomo.core.expr import current as EXPR return EXPR.GetItemExpression((self,) + tuple(idx)) except EXPR.NonConstantExpressionError: @@ -872,24 +947,28 @@ def _processUnhashableIndex(self, idx): # The expression contains an unfixed variable # raise RuntimeError( -"""Error retrieving the value of an indexed item %s: + """Error retrieving the value of an indexed item %s: index %s is not a constant value. This is likely not what you meant to do, as if you later change the fixed value of the object this lookup will not change. If you understand the implications of using non-constant values, you can get the current value of the object using -the value() function.""" % ( self.name, i )) +the value() function.""" + % (self.name, i) + ) except EXPR.FixedExpressionError: # # The expression contains a fixed variable # raise RuntimeError( -"""Error retrieving the value of an indexed item %s: + """Error retrieving the value of an indexed item %s: index %s is a fixed but not constant value. This is likely not what you meant to do, as if you later change the fixed value of the object this lookup will not change. If you understand the implications of using fixed but not constant values, you can get the current value using the -value() function.""" % ( self.name, i )) +value() function.""" + % (self.name, i) + ) # # There are other ways we could get an exception such as # evaluating a Param / Var that is not initialized. @@ -919,7 +998,7 @@ def _processUnhashableIndex(self, idx): if slice_dim == set_dim or set_dim is None: structurally_valid = True elif type(set_dim) is type: - pass # UnknownSetDimen + pass # UnknownSetDimen elif ellipsis is not None and slice_dim < set_dim: structurally_valid = True elif set_dim == 0 and idx == (slice(None),): @@ -933,33 +1012,47 @@ def _processUnhashableIndex(self, idx): structurally_valid = True if not structurally_valid: - msg = ("Index %s contains an invalid number of entries for " - "component '%s'. Expected %s, got %s.") + msg = ( + "Index %s contains an invalid number of entries for " + "component '%s'. Expected %s, got %s." + ) if type(set_dim) is type: set_dim = set_dim.__name__ msg += '\n ' + '\n '.join( - textwrap.wrap(textwrap.dedent(""" - Slicing components relies on knowing the - underlying set dimensionality (even if the - dimensionality is None). The underlying - component set ('%s') dimensionality has not been - determined (likely because it is an empty Set). - You can avoid this error by specifying the Set - dimensionality (with the 'dimen=' keyword).""" % ( - self.index_set(), )).strip())) - raise IndexError(msg % ( - IndexedComponent_slice._getitem_args_to_str(list(idx)), - self.name, set_dim, slice_dim)) + textwrap.wrap( + textwrap.dedent( + """ + Slicing components relies on knowing the + underlying set dimensionality (even if the + dimensionality is None). The underlying + component set ('%s') dimensionality has not been + determined (likely because it is an empty Set). + You can avoid this error by specifying the Set + dimensionality (with the 'dimen=' keyword).""" + % (self.index_set(),) + ).strip() + ) + ) + raise IndexError( + msg + % ( + IndexedComponent_slice._getitem_args_to_str(list(idx)), + self.name, + set_dim, + slice_dim, + ) + ) return IndexedComponent_slice(self, fixed, sliced, ellipsis) - elif _found_numeric: + elif len(idx) == len(fixed): if len(idx) == 1: return fixed[0] else: - return tuple( fixed[i] for i in range(len(idx)) ) + return tuple(fixed[i] for i in range(len(idx))) else: raise DeveloperError( "Unknown problem encountered when trying to retrieve " - "index for component %s" % (self.name,) ) + f"index '{orig_idx}' for component '{self.name}'" + ) def _getitem_when_not_present(self, index): """Returns/initializes a value when the index is not in the _data dict. @@ -1028,23 +1121,25 @@ def set_value(self, value): raise ValueError( "Cannot set the value for the indexed component '%s' " "without specifying an index value.\n" - "\tFor example, model.%s[i] = value" - % (self.name, self.name)) + "\tFor example, model.%s[i] = value" % (self.name, self.name) + ) else: raise DeveloperError( "Derived component %s failed to define set_value() " - "for scalar instances." - % (self.__class__.__name__,)) + "for scalar instances." % (self.__class__.__name__,) + ) def _pprint(self): """Print component information.""" - return ( [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ], - self._data.items(), - ( "Object",), - lambda k, v: [ type(v) ] - ) + return ( + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ], + self._data.items(), + ("Object",), + lambda k, v: [type(v)], + ) def id_index_map(self): """ @@ -1113,21 +1208,22 @@ def __array__(self, dtype=None): if _dim is None: raise TypeError( "Cannot convert a non-dimensioned Pyomo IndexedComponent " - "(%s) into a numpy array" % (self,)) + "(%s) into a numpy array" % (self,) + ) bounds = self.index_set().bounds() if not isinstance(bounds[0], Sequence): bounds = ((bounds[0],), (bounds[1],)) if any(b != 0 for b in bounds[0]): raise TypeError( "Cannot convert a Pyomo IndexedComponent " - "(%s) with bounds [%s, %s] into a numpy array" % ( - self, bounds[0], bounds[1])) - shape = tuple(b+1 for b in bounds[1]) + "(%s) with bounds [%s, %s] into a numpy array" + % (self, bounds[0], bounds[1]) + ) + shape = tuple(b + 1 for b in bounds[1]) ans = NumericNDArray(shape=shape, dtype=object) for k, v in self.items(): ans[k] = v return ans def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return NumericNDArray.__array_ufunc__( - None, ufunc, method, *inputs, **kwargs) + return NumericNDArray.__array_ufunc__(None, ufunc, method, *inputs, **kwargs) diff --git a/pyomo/core/base/indexed_component_slice.py b/pyomo/core/base/indexed_component_slice.py index 06f7a6a5bb1..9779711a19b 100644 --- a/pyomo/core/base/indexed_component_slice.py +++ b/pyomo/core/base/indexed_component_slice.py @@ -15,8 +15,10 @@ from pyomo.common import DeveloperError from pyomo.common.collections import Sequence +from pyomo.core.base.enums import SortComponents from pyomo.core.base.global_set import UnindexedComponent_index + class IndexedComponent_slice(object): """Special class for slicing through hierarchical component trees @@ -29,6 +31,7 @@ class IndexedComponent_slice(object): calls to __getitem__ / __getattr__ / __call__ happen *before* the call to __iter__() """ + ATTR_MASK = 8 ITEM_MASK = 16 CALL_MASK = 32 @@ -96,17 +99,29 @@ def __init__(self, component, fixed=None, sliced=None, ellipsis=None): if fixed is not None: self._call_stack.append(fixed) self._len += 1 - set_attr('call_errors_generate_exceptions', - component.call_errors_generate_exceptions) - set_attr('key_errors_generate_exceptions', - component.key_errors_generate_exceptions) - set_attr('attribute_errors_generate_exceptions', - component.attribute_errors_generate_exceptions) + set_attr( + 'call_errors_generate_exceptions', + component.call_errors_generate_exceptions, + ) + set_attr( + 'key_errors_generate_exceptions', + component.key_errors_generate_exceptions, + ) + set_attr( + 'attribute_errors_generate_exceptions', + component.attribute_errors_generate_exceptions, + ) else: # Normal constructor - set_attr('_call_stack', [ - (IndexedComponent_slice.slice_info, - (component, fixed, sliced, ellipsis)) ]) + set_attr( + '_call_stack', + [ + ( + IndexedComponent_slice.slice_info, + (component, fixed, sliced, ellipsis), + ) + ], + ) set_attr('_len', 1) # Since this is an object, users may change these flags # between where they declare the slice and iterate over it. @@ -122,13 +137,13 @@ def __getstate__(self): "blanket" implementation of :py:meth:`__getattr__`, we need to explicitly implement these to avoid "accidentally" extending or evaluating this slice.""" - return {k:getattr(self,k) for k in self.__dict__} + return {k: getattr(self, k) for k in self.__dict__} def __setstate__(self, state): - """Deserialize the state into this object. """ + """Deserialize the state into this object.""" set_attr = super(IndexedComponent_slice, self).__setattr__ - for k,v in state.items(): - set_attr(k,v) + for k, v in state.items(): + set_attr(k, v) def __deepcopy__(self, memo): """Deepcopy this object (leveraging :py:meth:`__getstate__`)""" @@ -147,8 +162,9 @@ def __getattr__(self, name): IndexedComponent_slice object. Subsequent attempts to resolve attributes hit this method. """ - return IndexedComponent_slice(self, ( - IndexedComponent_slice.get_attribute, name ) ) + return IndexedComponent_slice( + self, (IndexedComponent_slice.get_attribute, name) + ) def __setattr__(self, name, value): """Override the "." operator implementing attribute assignment @@ -161,11 +177,12 @@ def __setattr__(self, name, value): """ # Don't overload any pre-existing attributes if name in self.__dict__: - return super(IndexedComponent_slice, self).__setattr__(name,value) + return super(IndexedComponent_slice, self).__setattr__(name, value) # Immediately evaluate the slice and set the attributes - for i in IndexedComponent_slice(self, ( - IndexedComponent_slice.set_attribute, name, value ) ): + for i in IndexedComponent_slice( + self, (IndexedComponent_slice.set_attribute, name, value) + ): pass return None @@ -176,8 +193,7 @@ def __getitem__(self, idx): IndexedComponent_slice object. Subsequent attempts to query items hit this method. """ - return IndexedComponent_slice(self, ( - IndexedComponent_slice.get_item, idx ) ) + return IndexedComponent_slice(self, (IndexedComponent_slice.get_item, idx)) def __setitem__(self, idx, val): """Override the "[]" operator for setting item values. @@ -189,8 +205,9 @@ def __setitem__(self, idx, val): and immediately evaluates the slice. """ # Immediately evaluate the slice and set the attributes - for i in IndexedComponent_slice(self, ( - IndexedComponent_slice.set_item, idx, val ) ): + for i in IndexedComponent_slice( + self, (IndexedComponent_slice.set_item, idx, val) + ): pass return None @@ -204,8 +221,7 @@ def __delitem__(self, idx): and immediately evaluates the slice. """ # Immediately evaluate the slice and set the attributes - for i in IndexedComponent_slice(self, ( - IndexedComponent_slice.del_item, idx ) ): + for i in IndexedComponent_slice(self, (IndexedComponent_slice.del_item, idx)): pass return None @@ -230,12 +246,13 @@ def __call__(self, *args, **kwds): # don't know why that happens, but we will trap it here and # remove the getattr(__name__) from the call stack. _len = self._len - if self._call_stack[_len-1][0] == IndexedComponent_slice.get_attribute \ - and self._call_stack[_len-1][1] == '__name__': + if ( + self._call_stack[_len - 1][0] == IndexedComponent_slice.get_attribute + and self._call_stack[_len - 1][1] == '__name__' + ): self._len -= 1 - ans = IndexedComponent_slice(self, ( - IndexedComponent_slice.call, args, kwds ) ) + ans = IndexedComponent_slice(self, (IndexedComponent_slice.call, args, kwds)) # Because we just duplicated the slice and added a new entry, we # know that the _len == len(_call_stack) if ans._call_stack[-2][1] == 'component': @@ -243,7 +260,7 @@ def __call__(self, *args, **kwds): else: # Note: simply calling "list(self)" results in infinite # recursion in python2.6 - return list( i for i in ans ) + return list(i for i in ans) @classmethod def _getitem_args_to_str(cls, args): @@ -252,9 +269,11 @@ def _getitem_args_to_str(cls, args): args[i] = '...' elif type(v) is slice: args[i] = ( - (repr(v.start) if v.start is not None else '') + ':' + - (repr(v.stop) if v.stop is not None else '') + - (':%r' % v.step if v.step is not None else '')) + (repr(v.start) if v.start is not None else '') + + ':' + + (repr(v.stop) if v.stop is not None else '') + + (':%r' % v.step if v.step is not None else '') + ) else: args[i] = repr(v) return '[' + ', '.join(args) + ']' @@ -269,8 +288,7 @@ def __str__(self): tmp.update(level[1][2]) if level[1][3] is not None: tmp[level[1][3]] = Ellipsis - ans += self._getitem_args_to_str( - [tmp[i] for i in sorted(tmp)]) + ans += self._getitem_args_to_str([tmp[i] for i in sorted(tmp)]) elif level[0] & IndexedComponent_slice.ITEM_MASK: if isinstance(level[1], Sequence): tmp = list(level[1]) @@ -281,11 +299,14 @@ def __str__(self): ans += '.' + level[1] elif level[0] & IndexedComponent_slice.CALL_MASK: ans += ( - '(' + ', '.join( + '(' + + ', '.join( itertools.chain( (repr(_) for _ in level[1]), - ('%s=%r' % kv for kv in level[2].items())) - ) + ')' + ('%s=%r' % kv for kv in level[2].items()), + ) + ) + + ')' ) if level[0] & IndexedComponent_slice.SET_MASK: ans += ' = %r' % (level[2],) @@ -294,34 +315,39 @@ def __str__(self): return ans def __hash__(self): - return hash(tuple(_freeze(x) for x in self._call_stack[:self._len])) + return hash(tuple(_freeze(x) for x in self._call_stack[: self._len])) def __eq__(self, other): if other is self: return True if type(other) is not IndexedComponent_slice: return False - return tuple(_freeze(x) for x in self._call_stack[:self._len]) \ - == tuple(_freeze(x) for x in other._call_stack[:other._len]) + return tuple(_freeze(x) for x in self._call_stack[: self._len]) == tuple( + _freeze(x) for x in other._call_stack[: other._len] + ) def __ne__(self, other): return not self.__eq__(other) def duplicate(self): ans = IndexedComponent_slice(self) - ans._call_stack = ans._call_stack[:ans._len] + ans._call_stack = ans._call_stack[: ans._len] return ans - def index_wildcard_keys(self): - _iter = _IndexedComponent_slice_iter(self, iter_over_index=True) + def index_wildcard_keys(self, sort): + _iter = _IndexedComponent_slice_iter(self, iter_over_index=True, sort=sort) return (_iter.get_last_index_wildcards() for _ in _iter) - def wildcard_keys(self): - _iter = self.__iter__() + def wildcard_keys(self, sort=SortComponents.UNSORTED): + _iter = _IndexedComponent_slice_iter(self, sort=sort) return (_iter.get_last_index_wildcards() for _ in _iter) - def wildcard_items(self): - _iter = self.__iter__() + def wildcard_values(self, sort=SortComponents.UNSORTED): + """Return an iterator over this slice""" + return _IndexedComponent_slice_iter(self, sort=sort) + + def wildcard_items(self, sort=SortComponents.UNSORTED): + _iter = _IndexedComponent_slice_iter(self, sort=sort) return ((_iter.get_last_index_wildcards(), _) for _ in _iter) def expanded_keys(self): @@ -338,9 +364,9 @@ def _freeze(info): return ( info[0], id(info[1][0]), # id of the Component - tuple(info[1][1].items()), # {idx: value} for fixed + tuple(info[1][1].items()), # {idx: value} for fixed tuple(info[1][2].keys()), # {idx: slice} for slices - info[1][3] # elipsis index + info[1][3], # ellipsis index ) elif info[0] & IndexedComponent_slice.ITEM_MASK: if type(info[1]) is not tuple: @@ -349,22 +375,21 @@ def _freeze(info): index = info[1] return ( info[0], - tuple( (x.start,x.stop,x.step) if type(x) is slice else x - for x in index ), + tuple((x.start, x.stop, x.step) if type(x) is slice else x for x in index), info[2:], ) else: return info - class _slice_generator(object): """Utility (iterator) for generating the elements of one slice Iterate through the component index and yield the component data values that match the slice template. """ - def __init__(self, component, fixed, sliced, ellipsis, iter_over_index): + + def __init__(self, component, fixed, sliced, ellipsis, iter_over_index, sort): self.component = component self.fixed = fixed self.sliced = sliced @@ -378,7 +403,8 @@ def __init__(self, component, fixed, sliced, ellipsis, iter_over_index): self.tuplize_unflattened_index = ( self.component._implicit_subsets is None - or len(self.component._implicit_subsets) == 1 ) + or len(self.component._implicit_subsets) == 1 + ) if fixed is None and sliced is None and ellipsis is None: # This is a slice rooted at a concrete component. This is @@ -394,11 +420,15 @@ def __init__(self, component, fixed, sliced, ellipsis, iter_over_index): if iter_over_index and component.index_set().isfinite(): # This should be used to iterate over all the potential # indices of a sparse IndexedComponent. - self.component_iter = component.index_set().__iter__() + if SortComponents.SORTED_INDICES in sort: + self.component_iter = component.index_set().sorted_iter() + elif SortComponents.ORDERED_INDICES in sort: + self.component_iter = component.index_set().ordered_iter() + else: + self.component_iter = iter(component.index_set()) else: # The default behavior is to iterate over the component. - self.component_iter = component.keys() - + self.component_iter = component.keys(sort) def next(self): """__next__() iterator for Py2 compatibility""" @@ -454,7 +484,7 @@ def __next__(self): # we can use the cached indices to iterate over "indices" # of a slice. # - # last_index is the most recent index encountered, not + # last_index is the most recent index encountered, not # the last index that will ever be encountered. self.last_index = _idx @@ -474,23 +504,36 @@ def __next__(self): # None. return None + # Backwards compatibility _IndexedComponent_slice = IndexedComponent_slice + # Mock up a callable object with a "check_complete" method def _advance_iter(_iter): return next(_iter) + + def _advance_iter_check_complete(): pass + + _advance_iter.check_complete = _advance_iter_check_complete + # A dummy class that we can use as a named entity below -class _NotIterable(object): pass +class _NotIterable(object): + pass class _IndexedComponent_slice_iter(object): - def __init__(self, component_slice, advance_iter=_advance_iter, - iter_over_index=False): + def __init__( + self, + component_slice, + advance_iter=_advance_iter, + iter_over_index=False, + sort=False, + ): # _iter_stack holds a list of elements X where X is either a # _slice_generator iterator (if this level in the hierarchy is a # slice) or None (if this level is either a SimpleComponent, @@ -498,16 +541,20 @@ def __init__(self, component_slice, advance_iter=_advance_iter, self._slice = component_slice self.advance_iter = advance_iter self._iter_over_index = iter_over_index + self._sort = SortComponents(sort) call_stack = self._slice._call_stack call_stack_len = self._slice._len - self._iter_stack = [None]*call_stack_len + self._iter_stack = [None] * call_stack_len # Initialize the top of the `_iter_stack` (deepest part of the # model hierarchy): if call_stack[0][0] == IndexedComponent_slice.slice_info: # The root of the _iter_stack is a generator for the # "highest-level slice" (slice closest to the model() block) self._iter_stack[0] = _slice_generator( - *call_stack[0][1], iter_over_index=self._iter_over_index) + *call_stack[0][1], + iter_over_index=self._iter_over_index, + sort=self._sort, + ) # call_stack[0][1] is a (fixed, sliced, ellipsis) tuple, where # fixed and sliced are dicts. elif call_stack[0][0] == IndexedComponent_slice.set_item: @@ -519,10 +566,11 @@ def __init__(self, component_slice, advance_iter=_advance_iter, # is not an iterable thing, so we will will use a type flag # to signal this case to __next__below. assert call_stack_len == 1 - self._iter_stack[0] = _NotIterable # Something not None + self._iter_stack[0] = _NotIterable # Something not None else: - raise DeveloperError("Unexpected call_stack flag encountered: %s" - % call_stack[0][0]) + raise DeveloperError( + "Unexpected call_stack flag encountered: %s" % call_stack[0][0] + ) def __iter__(self): """This class implements the iterator API""" @@ -539,7 +587,7 @@ def __next__(self): # # NOTE: We refer to this stack as growing "downward", just like # the model hierarchy to which it refers. - idx = len(self._iter_stack)-1 + idx = len(self._iter_stack) - 1 while True: # Flush out any non-slice levels. Since we initialize # _iter_stack with None, in the first call this will @@ -556,7 +604,7 @@ def __next__(self): try: if self._iter_stack[idx] is _NotIterable: # This happens when attempting a `set_item` call on - # a `_ReferenceDict` whose slice consists of only a + # a `_ReferenceDict` whose slice consists of only a # `slice_info` entry. # E.g. # ref = Reference(m.x[:]) @@ -622,8 +670,10 @@ def __next__(self): # things that match. We will allow users to # (silently) ignore any attribute errors generated # by concrete indices in the slice hierarchy... - if self._slice.attribute_errors_generate_exceptions \ - and not self._iter_over_index: + if ( + self._slice.attribute_errors_generate_exceptions + and not self._iter_over_index + ): raise # Break from the inner loop; next action will be to # advance the "highest-level iterator" @@ -631,15 +681,17 @@ def __next__(self): elif _call[0] == IndexedComponent_slice.get_item: try: # Get the specified index for the current component: - _comp = _comp.__getitem__( _call[1] ) + _comp = _comp.__getitem__(_call[1]) except LookupError: # Since we are slicing, we may only be # interested in things that match. We will # allow users to (silently) ignore any key # errors generated by concrete indices in the # slice hierarchy... - if self._slice.key_errors_generate_exceptions \ - and not self._iter_over_index: + if ( + self._slice.key_errors_generate_exceptions + and not self._iter_over_index + ): raise break # If the index defines a slice, add a slice generator @@ -652,12 +704,13 @@ def __next__(self): assert _comp._len == 1 self._iter_stack[idx] = _slice_generator( *_comp._call_stack[0][1], - iter_over_index=self._iter_over_index + iter_over_index=self._iter_over_index, + sort=self._sort, ) try: # Advance to get the first component defined # by this slice (so that we have a concrete - # context that we can use to decend further + # context that we can use to descend further # down the model hierarchy): _comp = self.advance_iter(self._iter_stack[idx]) # Note that the iterator will remained @@ -683,15 +736,17 @@ def __next__(self): try: # Assume the callable "comp" in our hierarchy # returns a component: - _comp = _comp( *(_call[1]), **(_call[2]) ) + _comp = _comp(*(_call[1]), **(_call[2])) except: # Since we are slicing, we may only be # interested in things that match. We will # allow users to (silently) ignore any key # errors generated by concrete indices in the # slice hierarchy... - if self._slice.call_errors_generate_exceptions \ - and not self._iter_over_index: + if ( + self._slice.call_errors_generate_exceptions + and not self._iter_over_index + ): raise break elif _call[0] == IndexedComponent_slice.set_attribute: @@ -729,7 +784,9 @@ def __next__(self): # _iter_stack value to _NotIterable. if self._iter_stack[idx] is _NotIterable: _iter = _slice_generator( - *_call[1], iter_over_index=self._iter_over_index + *_call[1], + iter_over_index=self._iter_over_index, + sort=self._sort, ) while True: # This ends when the _slice_generator raises @@ -750,22 +807,25 @@ def __next__(self): # our current advance_iter to walk it and set only # the appropriate keys try: - _tmp = _comp.__getitem__( _call[1] ) + _tmp = _comp.__getitem__(_call[1]) except KeyError: # Since we are slicing, we may only be # interested in things that match. We will # allow users to (silently) ignore any key # errors generated by concrete indices in the # slice hierarchy... - if self._slice.key_errors_generate_exceptions \ - and not self._iter_over_index: + if ( + self._slice.key_errors_generate_exceptions + and not self._iter_over_index + ): raise break if _tmp.__class__ is IndexedComponent_slice: # Extract the _slice_generator and evaluate it. assert _tmp._len == 1 _iter = _IndexedComponent_slice_iter( - _tmp, self.advance_iter) + _tmp, self.advance_iter, sort=self._sort + ) for _ in _iter: # Check to make sure the custom iterator # (i.e._fill_in_known_wildcards) is complete @@ -793,7 +853,7 @@ def __next__(self): # our current advance_iter to walk it and delete the # appropriate keys try: - _tmp = _comp.__getitem__( _call[1] ) + _tmp = _comp.__getitem__(_call[1]) except KeyError: # Since we are slicing, we may only be # interested in things that match. We will @@ -807,7 +867,8 @@ def __next__(self): # Extract the _slice_generator and evaluate it. assert _tmp._len == 1 _iter = _IndexedComponent_slice_iter( - _tmp, self.advance_iter) + _tmp, self.advance_iter, sort=self._sort + ) _idx_to_del = [] # Two passes, so that we don't edit the _data # dicts while we are iterating over them @@ -837,7 +898,8 @@ def __next__(self): else: raise DeveloperError( "Unexpected entry in IndexedComponent_slice " - "_call_stack: %s" % (_call[0],)) + "_call_stack: %s" % (_call[0],) + ) idx += 1 if idx == self._slice._len: @@ -848,10 +910,7 @@ def __next__(self): return _comp def get_last_index(self): - ans = sum( - ( x.last_index for x in self._iter_stack if x is not None ), - () - ) + ans = sum((x.last_index for x in self._iter_stack if x is not None), ()) if len(ans) == 1: return ans[0] else: @@ -870,11 +929,16 @@ def get_last_index_wildcards(self): # Extract the indices corresponding to the wildcard positions # for that slice. ans = sum( - ( tuple( x.last_index[i] - for i in range(len(x.last_index)) - if i not in x.fixed ) - for x in self._iter_stack if x is not None ), - () + ( + tuple( + x.last_index[i] + for i in range(len(x.last_index)) + if i not in x.fixed + ) + for x in self._iter_stack + if x is not None + ), + (), ) if not ans: return UnindexedComponent_index @@ -882,4 +946,3 @@ def get_last_index_wildcards(self): return ans[0] else: return ans - diff --git a/pyomo/core/base/initializer.py b/pyomo/core/base/initializer.py index cc13b0769d8..991feb0450d 100644 --- a/pyomo/core/base/initializer.py +++ b/pyomo/core/base/initializer.py @@ -9,35 +9,35 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import collections import functools import inspect from collections.abc import Sequence from collections.abc import Mapping -from pyomo.common.dependencies import ( - numpy, numpy_available, pandas, pandas_available, -) +from pyomo.common.dependencies import numpy, numpy_available, pandas, pandas_available +from pyomo.common.modeling import NOTSET from pyomo.core.pyomoobject import PyomoObject initializer_map = {} sequence_types = set() # initialize with function, method, and method-wrapper types. -function_types = set([ - type(PyomoObject.is_expression_type), - type(PyomoObject().is_expression_type), - type(PyomoObject.is_expression_type.__call__), -]) +function_types = set( + [ + type(PyomoObject.is_expression_type), + type(PyomoObject().is_expression_type), + type(PyomoObject.is_expression_type.__call__), + ] +) -# -# The following set of "Initializer" classes are a general functionality -# and should be promoted to their own module so that we can use them on -# all Components to standardize how we process component arguments. -# -def Initializer(init, - allow_generators=False, - treat_sequences_as_mappings=True, - arg_not_specified=None): + +def Initializer( + arg, + allow_generators=False, + treat_sequences_as_mappings=True, + arg_not_specified=None, +): """Standardized processing of Component keyword arguments Component keyword arguments accept a number of possible inputs, from @@ -45,21 +45,45 @@ def Initializer(init, function standardizes the processing of keyword arguments and returns "initializer classes" that are specialized to the specific data type provided. + + Parameters + ---------- + arg: + + The argument passed to the component constructor. This could + be almost any type, including a scalar, dict, list, function, + generator, or None. + + allow_generators: bool + + If False, then we will raise an exception if ``arg`` is a generator + + treat_sequences_as_mappings: bool + + If True, then if ``arg`` is a sequence, we will treat it as if + it were a mapping (i.e., ``dict(enumerate(arg))``). Otherwise + sequences will be returned back as the value of the initializer. + + arg_not_specified: + + If ``arg`` is ``arg_not_specified``, then the function will + return None (and not an InitializerBase object). + """ - if init is arg_not_specified: + if arg is arg_not_specified: return None - if init.__class__ in initializer_map: - return initializer_map[init.__class__](init) - if init.__class__ in sequence_types: + if arg.__class__ in initializer_map: + return initializer_map[arg.__class__](arg) + if arg.__class__ in sequence_types: if treat_sequences_as_mappings: - return ItemInitializer(init) + return ItemInitializer(arg) else: - return ConstantInitializer(init) - if init.__class__ in function_types: + return ConstantInitializer(arg) + if arg.__class__ in function_types: # Note: we do not use "inspect.isfunction or inspect.ismethod" # because some function-like things (notably cythonized # functions) return False - if not allow_generators and inspect.isgeneratorfunction(init): + if not allow_generators and inspect.isgeneratorfunction(arg): raise ValueError("Generator functions are not allowed") # Historically pyomo.core.base.misc.apply_indexed_rule # accepted rules that took only the parent block (even for @@ -71,50 +95,50 @@ def Initializer(init, # the partial handling), but I have been unable to come up with # an example. The closest was getattr(), but that falls back on # getattr.__call__, which does support getfullargspec. - _args = inspect.getfullargspec(init) + _args = inspect.getfullargspec(arg) _nargs = len(_args.args) - if inspect.ismethod(init) and init.__self__ is not None: + if inspect.ismethod(arg) and arg.__self__ is not None: # Ignore 'self' for bound instance methods and 'cls' for # @classmethods _nargs -= 1 if _nargs == 1 and _args.varargs is None: return ScalarCallInitializer( - init, constant=not inspect.isgeneratorfunction(init)) + arg, constant=not inspect.isgeneratorfunction(arg) + ) else: - return IndexedCallInitializer(init) - if hasattr(init, '__len__'): - if isinstance(init, Mapping): - initializer_map[init.__class__] = ItemInitializer - elif isinstance(init, Sequence) and not isinstance(init, str): - sequence_types.add(init.__class__) - elif isinstance(init, PyomoObject): + return IndexedCallInitializer(arg) + if hasattr(arg, '__len__'): + if isinstance(arg, Mapping): + initializer_map[arg.__class__] = ItemInitializer + elif isinstance(arg, Sequence) and not isinstance(arg, str): + sequence_types.add(arg.__class__) + elif isinstance(arg, PyomoObject): # TODO: Should IndexedComponent inherit from # collections.abc.Mapping? - if init.is_component_type() and init.is_indexed(): - initializer_map[init.__class__] = ItemInitializer + if arg.is_component_type() and arg.is_indexed(): + initializer_map[arg.__class__] = ItemInitializer else: - initializer_map[init.__class__] = ConstantInitializer - elif any(c.__name__ == 'ndarray' for c in init.__class__.__mro__): - if numpy_available and isinstance(init, numpy.ndarray): - sequence_types.add(init.__class__) - elif any(c.__name__ == 'Series' for c in init.__class__.__mro__): - if pandas_available and isinstance(init, pandas.Series): - sequence_types.add(init.__class__) - elif any(c.__name__ == 'DataFrame' for c in init.__class__.__mro__): - if pandas_available and isinstance(init, pandas.DataFrame): - initializer_map[init.__class__] = DataFrameInitializer + initializer_map[arg.__class__] = ConstantInitializer + elif any(c.__name__ == 'ndarray' for c in arg.__class__.__mro__): + if numpy_available and isinstance(arg, numpy.ndarray): + sequence_types.add(arg.__class__) + elif any(c.__name__ == 'Series' for c in arg.__class__.__mro__): + if pandas_available and isinstance(arg, pandas.Series): + sequence_types.add(arg.__class__) + elif any(c.__name__ == 'DataFrame' for c in arg.__class__.__mro__): + if pandas_available and isinstance(arg, pandas.DataFrame): + initializer_map[arg.__class__] = DataFrameInitializer else: # Note: this picks up (among other things) all string instances - initializer_map[init.__class__] = ConstantInitializer + initializer_map[arg.__class__] = ConstantInitializer # recursively call Initializer to pick up the new registration return Initializer( - init, + arg, allow_generators=allow_generators, treat_sequences_as_mappings=treat_sequences_as_mappings, - arg_not_specified=arg_not_specified + arg_not_specified=arg_not_specified, ) - if ( inspect.isgenerator(init) or - hasattr(init, 'next') or hasattr(init, '__next__') ): + if inspect.isgenerator(arg) or hasattr(arg, 'next') or hasattr(arg, '__next__'): # This catches generators and iterators (like enumerate()), but # skips "reusable" iterators like range() as well as Pyomo # (finite) Set objects [they were both caught by the @@ -124,50 +148,54 @@ def Initializer(init, # Deepcopying generators is problematic (e.g., it generates a # segfault in pypy3 7.3.0). We will immediately expand the # generator into a tuple and then store it as a constant. - return ConstantInitializer(tuple(init)) - if type(init) is functools.partial: + return ConstantInitializer(tuple(arg)) + if type(arg) is functools.partial: try: - _args = inspect.getfullargspec(init.func) + _args = inspect.getfullargspec(arg.func) except: # Inspect doesn't work for some built-in callables (notably # 'int'). We will just have to assume this is a "normal" # IndexedCallInitializer - return IndexedCallInitializer(init) - if len(_args.args) - len(init.args) == 1 and _args.varargs is None: - return ScalarCallInitializer(init) + return IndexedCallInitializer(arg) + _positional_args = set(_args.args) + for key in arg.keywords: + _positional_args.discard(key) + if len(_positional_args) - len(arg.args) == 1 and _args.varargs is None: + return ScalarCallInitializer(arg) else: - return IndexedCallInitializer(init) - if isinstance(init, InitializerBase): - return init - if isinstance(init, PyomoObject): + return IndexedCallInitializer(arg) + if isinstance(arg, InitializerBase): + return arg + if isinstance(arg, PyomoObject): # We re-check for PyomoObject here, as that picks up / caches # non-components like component data objects and expressions - initializer_map[init.__class__] = ConstantInitializer - return ConstantInitializer(init) - if callable(init) and not isinstance(init, type): + initializer_map[arg.__class__] = ConstantInitializer + return ConstantInitializer(arg) + if callable(arg) and not isinstance(arg, type): # We assume any callable thing could be a functor; but, we must # filter out types, as we use types as special identifiers that # should not be called (e.g., UnknownSetDimen) - if inspect.isfunction(init) or inspect.ismethod(init): + if inspect.isfunction(arg) or inspect.ismethod(arg): # Add this to the set of known function types and try again - function_types.add(type(init)) + function_types.add(type(arg)) else: # Try again, but use the __call__ method (for supporting # things like functors and cythonized functions). __call__ # is almost certainly going to be a method-wrapper - init = init.__call__ + arg = arg.__call__ return Initializer( - init, + arg, allow_generators=allow_generators, treat_sequences_as_mappings=treat_sequences_as_mappings, arg_not_specified=arg_not_specified, ) - initializer_map[init.__class__] = ConstantInitializer - return ConstantInitializer(init) + initializer_map[arg.__class__] = ConstantInitializer + return ConstantInitializer(arg) class InitializerBase(object): """Base class for all Initializer objects""" + __slots__ = () verified = False @@ -180,7 +208,7 @@ def __getstate__(self): classes (where __slots__ are only declared on the most derived class). """ - return {k:getattr(self,k) for k in self.__slots__} + return {k: getattr(self, k) for k in self.__slots__} def __setstate__(self, state): for key, val in state.items(): @@ -200,13 +228,15 @@ def indices(self): This will raise a RuntimeError if this initializer does not contain embedded indices """ - raise RuntimeError("Initializer %s does not contain embedded indices" - % (type(self).__name__,)) + raise RuntimeError( + "Initializer %s does not contain embedded indices" % (type(self).__name__,) + ) class ConstantInitializer(InitializerBase): """Initializer for constant values""" - __slots__ = ('val','verified') + + __slots__ = ('val', 'verified') def __init__(self, val): self.val = val @@ -221,6 +251,7 @@ def constant(self): class ItemInitializer(InitializerBase): """Initializer for dict-like values supporting __getitem__()""" + __slots__ = ('_dict',) def __init__(self, _dict): @@ -240,8 +271,9 @@ def indices(self): class DataFrameInitializer(InitializerBase): - """Initializer for dict-like values supporting __getitem__()""" - __slots__ = ('_df', '_column',) + """Initializer for pandas DataFrame values""" + + __slots__ = ('_df', '_column') def __init__(self, dataframe, column=None): self._df = dataframe @@ -252,7 +284,8 @@ def __init__(self, dataframe, column=None): else: raise ValueError( "Cannot construct DataFrameInitializer for DataFrame with " - "multiple columns without also specifying the data column") + "multiple columns without also specifying the data column" + ) def __call__(self, parent, idx): return self._df.at[idx, self._column] @@ -266,6 +299,7 @@ def indices(self): class IndexedCallInitializer(InitializerBase): """Initializer for functions and callable objects""" + __slots__ = ('_fcn',) def __init__(self, _fcn): @@ -282,7 +316,6 @@ def __call__(self, parent, idx): return self._fcn(parent, idx) - class CountedCallGenerator(object): """Generator implementing the "counted call" initialization scheme @@ -290,6 +323,7 @@ class CountedCallGenerator(object): first argument past the parent block is a monotonically-increasing integer beginning at `start_at`. """ + def __init__(self, ctype, fcn, scalar, parent, idx, start_at): # Note: this is called by a component using data from a Set (so # any tuple-like type should have already been checked and @@ -321,13 +355,14 @@ def _filter(ctype, x): repeatedly with an increasing count parameter until the rule returns %s.End. None is not a valid return value in this case due to the likelihood that an error in the rule can incorrectly return None.""" - % ((ctype.__name__,)*4)) + % ((ctype.__name__,) * 4) + ) return x class CountedCallInitializer(InitializerBase): - """Initializer for functions implementing the "counted call" API. - """ + """Initializer for functions implementing the "counted call" API.""" + # Pyomo has a historical feature for some rules, where the number of # times[*1] the rule was called could be passed as an additional # argument between the block and the index. This was primarily @@ -348,7 +383,7 @@ class CountedCallInitializer(InitializerBase): # # [JDS 6/2019] We will support a slightly restricted but more # consistent form of the original implementation for backwards - # compatability, but I believe that we should deprecate this syntax + # compatibility, but I believe that we should deprecate this syntax # entirely. __slots__ = ('_fcn', '_is_counted_rule', '_scalar', '_ctype', '_start') @@ -373,7 +408,7 @@ def __call__(self, parent, idx): return self._fcn(parent, idx) if self._is_counted_rule == True: return CountedCallGenerator( - self._ctype, self._fcn, self._scalar, parent, idx, self._start, + self._ctype, self._fcn, self._scalar, parent, idx, self._start ) # Note that this code will only be called once, and only if @@ -392,6 +427,7 @@ def __call__(self, parent, idx): class ScalarCallInitializer(InitializerBase): """Initializer for functions taking only the parent block argument.""" + __slots__ = ('_fcn', '_constant') def __init__(self, _fcn, constant=True): @@ -414,13 +450,16 @@ class DefaultInitializer(InitializerBase): ---------- initializer: :py:class`InitializerBase` the Initializer instance to wrap + default: the value to return inlieu of the caught exception(s) + exceptions: Exception or tuple the single Exception or tuple of Exceptions to catch and return the default value. """ + __slots__ = ('_initializer', '_default', '_exceptions') def __init__(self, initializer, default, exceptions): @@ -444,3 +483,76 @@ def contains_indices(self): def indices(self): return self._initializer.indices() + + +_bound_sequence_types = collections.defaultdict(None.__class__) + + +class BoundInitializer(InitializerBase): + """Initializer wrapper for processing bounds (mapping scalars to 2-tuples) + + Note that this class is meant to mimic the behavior of + :py:func:`Initializer` and will return ``None`` if the initializer + that it is wrapping is ``None``. + + Parameters + ---------- + arg: + + As with :py:func:`Initializer`, this is the raw argument passed + to the component constructor. + + obj: :py:class:`Component` + + The component that "owns" the initializer. This initializer + will treat sequences as mappings only if the owning component is + indexed and the sequence passed to the initializer is not of + length 2 + + """ + + __slots__ = ('_initializer',) + + def __new__(cls, arg=None, obj=NOTSET): + # The Initializer() function returns None if the initializer is + # None. We will mock that behavior by commandeering __new__() + if arg is None and obj is not NOTSET: + return None + else: + return super().__new__(cls) + + def __init__(self, arg, obj=NOTSET): + if obj is NOTSET or obj.is_indexed(): + treat_sequences_as_mappings = not ( + isinstance(arg, Sequence) + and len(arg) == 2 + and not isinstance(arg[0], Sequence) + ) + else: + treat_sequences_as_mappings = False + self._initializer = Initializer( + arg, treat_sequences_as_mappings=treat_sequences_as_mappings + ) + + def __call__(self, parent, index): + val = self._initializer(parent, index) + if _bound_sequence_types[val.__class__]: + return val + if _bound_sequence_types[val.__class__] is None: + _bound_sequence_types[val.__class__] = isinstance( + val, Sequence + ) and not isinstance(val, str) + if _bound_sequence_types[val.__class__]: + return val + return (val, val) + + def constant(self): + """Return True if this initializer is constant across all indices""" + return self._initializer.constant() + + def contains_indices(self): + """Return True if this initializer contains embedded indices""" + return self._initializer.contains_indices() + + def indices(self): + return self._initializer.indices() diff --git a/pyomo/core/base/instance2dat.py b/pyomo/core/base/instance2dat.py index 79db89402f6..b11c0c18e11 100644 --- a/pyomo/core/base/instance2dat.py +++ b/pyomo/core/base/instance2dat.py @@ -17,12 +17,14 @@ # IMPT: Only works on non-nested block models at the moment! -def instance2dat(instance, output_filename): - output_file = open(output_filename,"w") +def instance2dat(instance, output_filename): + output_file = open(output_filename, "w") for set_name, set_object in instance.component_map(Set, active=True).items(): - if (set_object.initialize is not None) and (type(set_object.initialize) is types.FunctionType): + if (set_object.initialize is not None) and ( + type(set_object.initialize) is types.FunctionType + ): continue if (set_name.find("_index_set") == -1) and (set_name.find("_domain") == -1): @@ -31,13 +33,15 @@ def instance2dat(instance, output_filename): continue output_file.write("set " + set_name + " := \n") for element in set_object: - output_file.write(element,) + output_file.write(element) output_file.write(";\n") elif set_object.dim() == 1: for index in set_object: - output_file.write("set " + set_name + "[\""+str(index) + "\"]"+" :=") + output_file.write( + "set " + set_name + "[\"" + str(index) + "\"]" + " :=" + ) for element in set_object[index]: - output_file.write(element,) + output_file.write(element) output_file.write(";\n") else: output_file.write("***MULTIPLY INDEXED SETS NOT IMPLEMENTED!!!\n") @@ -45,26 +49,30 @@ def instance2dat(instance, output_filename): output_file.write("\n") - for param_name, param_object in instance.component_map(Param, - active=True).items(): - if (param_object._initialize is not None) and (type(param_object._initialize) is types.FunctionType): + for param_name, param_object in instance.component_map(Param, active=True).items(): + if (param_object._initialize is not None) and ( + type(param_object._initialize) is types.FunctionType + ): continue elif len(param_object) == 0: continue if None in param_object: - output_file.write("param "+param_name+" := " - + str(value(param_object[None])) + " ;\n") + output_file.write( + "param " + param_name + " := " + str(value(param_object[None])) + " ;\n" + ) output_file.write("\n") else: output_file.write("param " + param_name + " := \n") if param_object.dim() == 1: for index in param_object: - output_file.write(str(index) + str(value(param_object[index])) + "\n") + output_file.write( + str(index) + str(value(param_object[index])) + "\n" + ) else: for index in param_object: for i in index: - output_file.write(i,) + output_file.write(i) output_file.write(str(value(param_object[index])) + "\n") output_file.write(";\n") output_file.write("\n") diff --git a/pyomo/core/base/label.py b/pyomo/core/base/label.py index 2700fcaf068..b642b834146 100644 --- a/pyomo/core/base/label.py +++ b/pyomo/core/base/label.py @@ -9,9 +9,16 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ['CounterLabeler', 'NumericLabeler', 'CNameLabeler', 'TextLabeler', - 'AlphaNumericTextLabeler','NameLabeler', 'CuidLabeler', - 'ShortNameLabeler'] +__all__ = [ + 'CounterLabeler', + 'NumericLabeler', + 'CNameLabeler', + 'TextLabeler', + 'AlphaNumericTextLabeler', + 'NameLabeler', + 'CuidLabeler', + 'ShortNameLabeler', +] import re @@ -26,6 +33,7 @@ # broadly problematic symbols. if solver-specific remaps are required, # they should be handled in the corresponding solver plugin. + class _CharMapper(object): def __init__(self, preserve, translate, other): """ @@ -35,13 +43,16 @@ def __init__(self, preserve, translate, other): other: the character to return for all characters not in preserve or translate """ - self.table = {k if isinstance(k, int) else ord(k): v - for k,v in dict(translate).items() } + self.table = { + k if isinstance(k, int) else ord(k): v for k, v in dict(translate).items() + } for c in preserve: _c = ord(c) if _c in self.table and self.table[_c] != c: - raise RuntimeError("Duplicate character '%s' appears in both " - "translate table and preserve list" % (c,)) + raise RuntimeError( + "Duplicate character '%s' appears in both " + "translate table and preserve list" % (c,) + ) self.table[_c] = c self.other = other @@ -58,35 +69,42 @@ def __getitem__(self, c): def make_table(self): return ''.join(self[i] for i in range(256)) + _alpha = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJLKMNOPQRSTUVWXYZ' _digit = '1234567890' -_cpxlp_translation_table = _CharMapper( preserve=_alpha+_digit+'()_', - translate = zip('[]{}', '()()'), - other='_' ).make_table() -def cpxlp_label_from_name(name): +_cpxlp_translation_table = _CharMapper( + preserve=_alpha + _digit + '()_', translate=zip('[]{}', '()()'), other='_' +).make_table() + +def cpxlp_label_from_name(name): if name is None: - raise RuntimeError("Illegal name=None supplied to " - "cpxlp_label_from_name function") + raise RuntimeError( + "Illegal name=None supplied to cpxlp_label_from_name function" + ) return str.translate(name, _cpxlp_translation_table) -_alphanum_translation_table = _CharMapper( preserve=_alpha+_digit+'_', - translate = {}, - other='_' ).make_table() -def alphanum_label_from_name(name): +_alphanum_translation_table = _CharMapper( + preserve=_alpha + _digit + '_', translate={}, other='_' +).make_table() + + +def alphanum_label_from_name(name): if name is None: - raise RuntimeError("Illegal name=None supplied to " - "alphanum_label_from_name function") + raise RuntimeError( + "Illegal name=None supplied to alphanum_label_from_name function" + ) return str.translate(name, _alphanum_translation_table) -class CuidLabeler(object): +class CuidLabeler(object): def __call__(self, obj=None): return ComponentUID(obj) + class CounterLabeler(object): def __init__(self, start=0): self._id = start @@ -95,6 +113,7 @@ def __call__(self, obj=None): self._id += 1 return self._id + class NumericLabeler(object): def __init__(self, prefix, start=0): self.id = start @@ -104,13 +123,18 @@ def __call__(self, obj=None): self.id += 1 return self.prefix + str(self.id) - @deprecated("The 'remove_obj' method is no longer " - "necessary now that 'getname' does not " - "support the use of a name buffer", version="6.4.1") + @deprecated( + "The 'remove_obj' method is no longer " + "necessary now that 'getname' does not " + "support the use of a name buffer", + version="6.4.1", + ) def remove_obj(self, obj): pass + + # -# TODO: [JDS] I would like to rename TextLabeler to LPLabeler - as it +# TODO: [JDS] I would like to rename TextLabeler to LPFileLabeler - as it # generated LP-file-compliant labels - and make the CNameLabeler the # TextLabeler. This makes sense as the name() is the closest thing we # have to a human-readable canonical text naming convention (the @@ -123,27 +147,45 @@ class CNameLabeler(object): def __call__(self, obj): return obj.getname(True) -class TextLabeler(object): + +class LPFileLabeler(object): def __call__(self, obj): return cpxlp_label_from_name(obj.getname(True)) - @deprecated("The 'remove_obj' method is no longer " - "necessary now that 'getname' does not " - "support the use of a name buffer", version="6.4.1") + @deprecated( + "The 'remove_obj' method is no longer " + "necessary now that 'getname' does not " + "support the use of a name buffer", + version="6.4.1", + ) def remove_obj(self, obj): pass + +TextLabeler = LPFileLabeler + + class AlphaNumericTextLabeler(object): def __call__(self, obj): return alphanum_label_from_name(obj.getname(True)) + class NameLabeler(object): def __call__(self, obj): return obj.getname(True) + class ShortNameLabeler(object): - def __init__(self, limit, suffix, start=0, labeler=None, - prefix="", caseInsensitive=False, legalRegex=None): + def __init__( + self, + limit, + suffix, + start=0, + labeler=None, + prefix="", + caseInsensitive=False, + legalRegex=None, + ): self.id = start self.prefix = prefix self.suffix = suffix @@ -165,8 +207,11 @@ def __call__(self, obj=None): shorten = False if lbl_len > self.limit: shorten = True - elif lbl_len == self.limit and lbl.startswith(self.prefix) \ - and lbl.endswith(self.suffix): + elif ( + lbl_len == self.limit + and lbl.startswith(self.prefix) + and lbl.endswith(self.suffix) + ): shorten = True elif (lbl.upper() if self.caseInsensitive else lbl) in self.known_labels: shorten = True @@ -180,7 +225,8 @@ def __call__(self, obj=None): raise RuntimeError( "Too many identifiers.\n\t" "The ShortNameLabeler cannot generate a guaranteed unique " - "label limited to %d characters" % (self.limit,)) + "label limited to %d characters" % (self.limit,) + ) lbl = self.prefix + lbl[tail:] + suffix if self.known_labels is not None: self.known_labels.add(lbl.upper() if self.caseInsensitive else lbl) diff --git a/pyomo/core/base/logical_constraint.py b/pyomo/core/base/logical_constraint.py index 5f0d718a8aa..6d553c66fed 100644 --- a/pyomo/core/base/logical_constraint.py +++ b/pyomo/core/base/logical_constraint.py @@ -21,15 +21,17 @@ from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET from pyomo.common.timing import ConstructionTimer + from pyomo.core.base.constraint import Constraint from pyomo.core.expr.boolean_value import as_boolean, BooleanConstant from pyomo.core.expr.numvalue import native_types, native_logical_types from pyomo.core.base.component import ActiveComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index -from pyomo.core.base.indexed_component import \ - (ActiveIndexedComponent, - UnindexedComponent_set, - _get_indexed_component_data_name, ) +from pyomo.core.base.indexed_component import ( + ActiveIndexedComponent, + UnindexedComponent_set, + _get_indexed_component_data_name, +) from pyomo.core.base.misc import apply_indexed_rule from pyomo.core.base.set import Set @@ -70,8 +72,7 @@ def __init__(self, component=None): # following constructors: # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._active = True @@ -106,7 +107,7 @@ class _GeneralLogicalConstraintData(_LogicalConstraintData): This class defines the data for a single general logical constraint. Constructor arguments: - component The LogicalStatment object that owns this data. + component The LogicalStatement object that owns this data. expr The Pyomo expression stored in this logical constraint. Public class attributes: @@ -128,8 +129,7 @@ def __init__(self, expr=None, component=None): # - _LogicalConstraintData, # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._active = True @@ -137,18 +137,6 @@ def __init__(self, expr=None, component=None): if expr is not None: self.set_value(expr) - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - result = super(_GeneralLogicalConstraintData, self).__getstate__() - for i in _GeneralLogicalConstraintData.__slots__: - result[i] = getattr(self, i) - return result - - # Since this class requires no special processing of the state - # dictionary, it does not need to implement __setstate__() - # # Abstract Interface # @@ -173,7 +161,7 @@ def set_value(self, expr): expr_type = type(expr) if expr_type in native_types and expr_type not in native_logical_types: msg = ( - "LogicalStatment '%s' does not have a proper value. " + "LogicalStatement '%s' does not have a proper value. " "Found '%s'.\n" "Expecting a logical expression or Boolean value. Examples:" "\n (m.Y1 & m.Y2).implies(m.Y3)" @@ -233,7 +221,10 @@ class LogicalConstraint(ActiveIndexedComponent): """ _ComponentDataClass = _GeneralLogicalConstraintData - class Infeasible(object): pass + + class Infeasible(object): + pass + Feasible = ActiveIndexedComponent.Skip NoConstraint = ActiveIndexedComponent.Skip Violated = Infeasible @@ -275,7 +266,8 @@ def _setitem_when_not_present(self, index, value): return None else: return super(LogicalConstraint, self)._setitem_when_not_present( - index=index, value=value) + index=index, value=value + ) def construct(self, data=None): """ @@ -297,8 +289,7 @@ def construct(self, data=None): # Utilities like DAE assume this stays around # self.rule = None - if (_init_rule is None) and \ - (_init_expr is None): + if (_init_rule is None) and (_init_expr is None): # No construction role or expression specified. return @@ -317,9 +308,8 @@ def construct(self, data=None): logger.error( "Rule failed when generating expression for " "logical constraint %s:\n%s: %s" - % (self.name, - type(err).__name__, - err)) + % (self.name, type(err).__name__, err) + ) raise self._setitem_when_not_present(None, tmp) @@ -327,24 +317,19 @@ def construct(self, data=None): if _init_expr is not None: raise IndexError( "LogicalConstraint '%s': Cannot initialize multiple indices " - "of a logical constraint with a single expression" % - (self.name,)) + "of a logical constraint with a single expression" % (self.name,) + ) for ndx in self._index_set: try: - tmp = apply_indexed_rule(self, - _init_rule, - _self_parent, - ndx) + tmp = apply_indexed_rule(self, _init_rule, _self_parent, ndx) except Exception: err = sys.exc_info()[1] logger.error( "Rule failed when generating expression for " "logical constraint %s with index %s:\n%s: %s" - % (self.name, - str(ndx), - type(err).__name__, - err)) + % (self.name, str(ndx), type(err).__name__, err) + ) raise self._setitem_when_not_present(ndx, tmp) timer.report() @@ -354,13 +339,14 @@ def _pprint(self): Return data that will be printed for this component. """ return ( - [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ("Active", self.active), - ], + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ("Active", self.active), + ], self.items(), ("Body", "Active"), - lambda k, v: [v.body, v.active, ] + lambda k, v: [v.body, v.active], ) def display(self, prefix="", ostream=None): @@ -378,10 +364,13 @@ def display(self, prefix="", ostream=None): ostream.write("Size=" + str(len(self))) ostream.write("\n") - tabular_writer(ostream, prefix + tab, - ((k, v) for k, v in self._data.items() if v.active), - ("Body",), - lambda k, v: [v.body(), ]) + tabular_writer( + ostream, + prefix + tab, + ((k, v) for k, v in self._data.items() if v.active), + ("Body",), + lambda k, v: [v.body()], + ) # # Checks flags like Constraint.Skip, etc. before actually creating a @@ -393,17 +382,20 @@ def _check_skip_add(self, index, expr): _expr_type = expr.__class__ if expr is None: raise ValueError( - _rule_returned_none_error % - (_get_indexed_component_data_name(self, index),)) + _rule_returned_none_error + % (_get_indexed_component_data_name(self, index),) + ) if expr is True: raise ValueError( "LogicalConstraint '%s' is always True." - % (_get_indexed_component_data_name(self, index),)) + % (_get_indexed_component_data_name(self, index),) + ) if expr is False: raise ValueError( "LogicalConstraint '%s' is always False." - % (_get_indexed_component_data_name(self, index),)) + % (_get_indexed_component_data_name(self, index),) + ) if _expr_type is tuple and len(expr) == 1: if expr is LogicalConstraint.Skip: @@ -412,7 +404,8 @@ def _check_skip_add(self, index, expr): if expr is LogicalConstraint.Infeasible: raise ValueError( "LogicalConstraint '%s' cannot be passed 'Infeasible' as a value." - % (_get_indexed_component_data_name(self, index),)) + % (_get_indexed_component_data_name(self, index),) + ) return expr @@ -424,21 +417,10 @@ class ScalarLogicalConstraint(_GeneralLogicalConstraintData, LogicalConstraint): """ def __init__(self, *args, **kwds): - _GeneralLogicalConstraintData.__init__( - self, component=self, expr=None) + _GeneralLogicalConstraintData.__init__(self, component=self, expr=None) LogicalConstraint.__init__(self, *args, **kwds) self._index = UnindexedComponent_index - # - # Since this class derives from Component and - # Component.__getstate__ just packs up the entire __dict__ into - # the state dict, we do not need to define the __getstate__ or - # __setstate__ methods. We just defer to the super() get/set - # state. Since all of our get/set state methods rely on super() - # to traverse the MRO, this will automatically pick up both the - # Component and Data base classes. - # - # # Override abstract interface methods to first check for # construction @@ -453,13 +435,14 @@ def body(self): "Accessing the body of ScalarLogicalConstraint " "'%s' before the LogicalConstraint has been assigned " "an expression. There is currently " - "nothing to access." % self.name) + "nothing to access." % self.name + ) return _GeneralLogicalConstraintData.body.fget(self) raise ValueError( "Accessing the body of logical constraint '%s' " "before the LogicalConstraint has been constructed (there " - "is currently no value to return)." - % self.name) + "is currently no value to return)." % self.name + ) # # Singleton logical constraints are strange in that we want them to be @@ -478,8 +461,8 @@ def set_value(self, expr): raise ValueError( "Setting the value of logical constraint '%s' " "before the LogicalConstraint has been constructed (there " - "is currently no object to set)." - % self.name) + "is currently no object to set)." % self.name + ) if len(self._data) == 0: self._data[None] = self @@ -497,8 +480,8 @@ def add(self, index, expr): if index is not None: raise ValueError( "ScalarLogicalConstraint object '%s' does not accept " - "index values other than None. Invalid value: %s" - % (self.name, index)) + "index values other than None. Invalid value: %s" % (self.name, index) + ) self.set_value(expr) return self @@ -509,7 +492,6 @@ class SimpleLogicalConstraint(metaclass=RenamedClass): class IndexedLogicalConstraint(LogicalConstraint): - # # Leaving this method for backward compatibility reasons # @@ -536,8 +518,7 @@ def __init__(self, **kwargs): """Constructor""" args = (Set(),) if 'expr' in kwargs: - raise ValueError( - "LogicalConstraintList does not accept the 'expr' keyword") + raise ValueError("LogicalConstraintList does not accept the 'expr' keyword") LogicalConstraint.__init__(self, *args, **kwargs) def construct(self, data=None): @@ -546,8 +527,7 @@ def construct(self, data=None): """ generate_debug_messages = is_debug_set(logger) if generate_debug_messages: - logger.debug("Constructing logical constraint list %s" - % self.name) + logger.debug("Constructing logical constraint list %s" % self.name) if self._constructed: return @@ -576,30 +556,25 @@ def construct(self, data=None): while True: val = len(self._index_set) + 1 if generate_debug_messages: - logger.debug( - " Constructing logical constraint index " + str(val)) - expr = apply_indexed_rule(self, - _init_rule, - _self_parent, - val) + logger.debug(" Constructing logical constraint index " + str(val)) + expr = apply_indexed_rule(self, _init_rule, _self_parent, val) if expr is None: raise ValueError( "LogicalConstraintList '%s': rule returned None " - "instead of LogicalConstraintList.End" % (self.name,)) - if (expr.__class__ is tuple) and \ - (expr == LogicalConstraintList.End): + "instead of LogicalConstraintList.End" % (self.name,) + ) + if (expr.__class__ is tuple) and (expr == LogicalConstraintList.End): return self.add(expr) else: - for expr in _generator: if expr is None: raise ValueError( "LogicalConstraintList '%s': generator returned None " - "instead of LogicalConstraintList.End" % (self.name,)) - if (expr.__class__ is tuple) and \ - (expr == LogicalConstraintList.End): + "instead of LogicalConstraintList.End" % (self.name,) + ) + if (expr.__class__ is tuple) and (expr == LogicalConstraintList.End): return self.add(expr) @@ -608,4 +583,3 @@ def add(self, expr): next_idx = len(self._index_set) + 1 self._index_set.add(next_idx) return self.__setitem__(next_idx, expr) - diff --git a/pyomo/core/base/matrix_constraint.py b/pyomo/core/base/matrix_constraint.py index 0c22ae4c869..0c55dbc15d3 100644 --- a/pyomo/core/base/matrix_constraint.py +++ b/pyomo/core/base/matrix_constraint.py @@ -19,8 +19,7 @@ from pyomo.core.expr.numvalue import value from pyomo.core.expr.numeric_expr import LinearExpression from pyomo.core.base.component import ModelComponentFactory -from pyomo.core.base.constraint import (IndexedConstraint, - _ConstraintData) +from pyomo.core.base.constraint import IndexedConstraint, _ConstraintData from pyomo.repn.standard_repn import StandardRepn from collections.abc import Mapping @@ -28,6 +27,7 @@ logger = logging.getLogger('pyomo.core') + class _MatrixConstraintData(_ConstraintData): """ This class defines the data for a single linear constraint @@ -80,8 +80,7 @@ def canonical_form(self, compute_values=True): variables = [] coefficients = [] constant = 0 - for p in range(indptr[index], - indptr[index+1]): + for p in range(indptr[index], indptr[index + 1]): v = x[indices[p]] c = data[p] if not v.fixed: @@ -139,11 +138,9 @@ def __call__(self, exception=True): indices = comp._A_indices indptr = comp._A_indptr x = comp._x - ptrs = range(indptr[index], - indptr[index+1]) + ptrs = range(indptr[index], indptr[index + 1]) try: - return sum(x[indices[p]].value * data[p] - for p in ptrs) + return sum(x[indices[p]].value * data[p] for p in ptrs) except (ValueError, TypeError): if exception: raise @@ -153,15 +150,13 @@ def has_lb(self): """Returns :const:`False` when the lower bound is :const:`None` or negative infinity""" lb = self.lower - return (lb is not None) and \ - (lb != float('-inf')) + return (lb is not None) and (lb != float('-inf')) def has_ub(self): """Returns :const:`False` when the upper bound is :const:`None` or positive infinity""" ub = self.upper - return (ub is not None) and \ - (ub != float('inf')) + return (ub is not None) and (ub != float('inf')) def lslack(self): """Lower slack (body - lb). Returns :const:`None` if @@ -226,12 +221,11 @@ def body(self): indices = comp._A_indices indptr = comp._A_indptr x = comp._x - ptrs = range(indptr[index], - indptr[index+1]) + ptrs = range(indptr[index], indptr[index + 1]) return LinearExpression( linear_vars=[x[indices[p]] for p in ptrs], linear_coefs=[data[p] for p in ptrs], - constant=0 + constant=0, ) @property @@ -256,8 +250,7 @@ def equality(self): constraint.""" comp = self.parent_component() index = self._index - if (comp._lower[index] is None) or \ - (comp._upper[index] is None): + if (comp._lower[index] is None) or (comp._upper[index] is None): return False return comp._lower[index] == comp._upper[index] @@ -275,13 +268,10 @@ def strict_upper(self): def set_value(self, expr): """Set the expression on this constraint.""" - raise NotImplementedError( - "MatrixConstraint row elements can not be updated" - ) + raise NotImplementedError("MatrixConstraint row elements can not be updated") -@ModelComponentFactory.register( - "A set of constraint expressions in Ax=b form.") +@ModelComponentFactory.register("A set of constraint expressions in Ax=b form.") class MatrixConstraint(Mapping, IndexedConstraint): """ Defines a set of linear constraints of the form: @@ -329,7 +319,6 @@ class MatrixConstraint(Mapping, IndexedConstraint): """ def __init__(self, A_data, A_indices, A_indptr, lb, ub, x): - m = len(lb) n = len(x) nnz = len(A_data) @@ -348,16 +337,16 @@ def __init__(self, A_data, A_indices, A_indptr, lb, ub, x): def construct(self, data=None): """Construct the expression(s) for this constraint.""" if is_debug_set(logger): - logger.debug("Constructing constraint %s" - % (self.name)) + logger.debug("Constructing constraint %s" % (self.name)) if self._constructed: return self._constructed = True ref = weakref.ref(self) with PauseGC(): - self._data = tuple(_MatrixConstraintData(i, ref) - for i in range(len(self._lower))) + self._data = tuple( + _MatrixConstraintData(i, ref) for i in range(len(self._lower)) + ) # # Override some IndexedComponent methods diff --git a/pyomo/core/base/misc.py b/pyomo/core/base/misc.py index b6cbe8a3eb0..cf37ad48fea 100644 --- a/pyomo/core/base/misc.py +++ b/pyomo/core/base/misc.py @@ -21,15 +21,15 @@ logger = logging.getLogger('pyomo.core') relocated_module_attribute( - 'tabular_writer', 'pyomo.common.formatting.tabular_writer', - version='6.1') + 'tabular_writer', 'pyomo.common.formatting.tabular_writer', version='6.1' +) relocated_module_attribute( - 'sorted_robust', 'pyomo.common.sorting.sorted_robust', - version='6.1') + 'sorted_robust', 'pyomo.common.sorting.sorted_robust', version='6.1' +) def display(obj, ostream=None): - """ Display data in a Pyomo object""" + """Display data in a Pyomo object""" if ostream is None: ostream = sys.stdout try: @@ -37,15 +37,16 @@ def display(obj, ostream=None): except AttributeError: raise TypeError( "Error trying to display values for object of type %s:\n" - "\tObject does not support the 'display()' method" - % (type(obj), ) ) + "\tObject does not support the 'display()' method" % (type(obj),) + ) try: display_fcn(ostream=ostream) except Exception: err = sys.exc_info()[1] logger.error( "Error trying to display values for object of type %s:\n\t%s" - % (type(obj), err) ) + % (type(obj), err) + ) raise @@ -54,9 +55,9 @@ def create_name(name, ndx): if ndx is None: return name if type(ndx) is tuple: - tmp = str(ndx).replace(', ',',') - return name+"["+tmp[1:-1]+"]" - return name+"["+str(ndx)+"]" + tmp = str(ndx).replace(', ', ',') + return name + "[" + tmp[1:-1] + "]" + return name + "[" + str(ndx) + "]" def apply_indexed_rule(obj, rule, model, index, options=None): @@ -99,6 +100,7 @@ def apply_indexed_rule(obj, rule, model, index, options=None): else: return rule(model, index, **options) + def apply_parameterized_indexed_rule(obj, rule, model, param, index): if index.__class__ is tuple: return rule(model, param, *index) diff --git a/pyomo/core/base/numvalue.py b/pyomo/core/base/numvalue.py index f3fd1e3ea86..11d45228bf5 100644 --- a/pyomo/core/base/numvalue.py +++ b/pyomo/core/base/numvalue.py @@ -11,4 +11,5 @@ import sys from pyomo.core.expr import numvalue + sys.modules[__name__] = numvalue diff --git a/pyomo/core/base/objective.py b/pyomo/core/base/objective.py index 39ed65a281e..3c625d81c2d 100644 --- a/pyomo/core/base/objective.py +++ b/pyomo/core/base/objective.py @@ -9,35 +9,41 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ('Objective', - 'simple_objective_rule', - '_ObjectiveData', - 'minimize', - 'maximize', - 'simple_objectivelist_rule', - 'ObjectiveList') +__all__ = ( + 'Objective', + 'simple_objective_rule', + '_ObjectiveData', + 'minimize', + 'maximize', + 'simple_objectivelist_rule', + 'ObjectiveList', +) import sys import logging from weakref import ref as weakref_ref from pyomo.common.pyomo_typing import overload +from pyomo.common.deprecation import RenamedClass from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET -from pyomo.common.deprecation import RenamedClass from pyomo.common.formatting import tabular_writer from pyomo.common.timing import ConstructionTimer + from pyomo.core.expr.numvalue import value from pyomo.core.base.component import ActiveComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.indexed_component import ( - ActiveIndexedComponent, UnindexedComponent_set, rule_wrapper, + ActiveIndexedComponent, + UnindexedComponent_set, + rule_wrapper, ) -from pyomo.core.base.expression import (_ExpressionData, - _GeneralExpressionDataImpl) +from pyomo.core.base.expression import _ExpressionData, _GeneralExpressionDataImpl from pyomo.core.base.set import Set from pyomo.core.base.initializer import ( - Initializer, IndexedCallInitializer, CountedCallInitializer, + Initializer, + IndexedCallInitializer, + CountedCallInitializer, ) from pyomo.core.base import minimize, maximize @@ -50,6 +56,7 @@ include the "return" statement at the end of your rule. """ + def simple_objective_rule(rule): """ This is a decorator that translates None into Objective.Skip. @@ -66,6 +73,7 @@ def O_rule(model, i, j): """ return rule_wrapper(rule, {None: Objective.Skip}) + def simple_objectivelist_rule(rule): """ This is a decorator that translates None into ObjectiveList.End. @@ -82,10 +90,12 @@ def O_rule(model, i, j): """ return rule_wrapper(rule, {None: ObjectiveList.End}) + # # This class is a pure interface # + class _ObjectiveData(_ExpressionData): """ This class defines the data for a single objective. @@ -118,9 +128,10 @@ def set_sense(self, sense): """Set the sense (direction) of this objective.""" raise NotImplementedError -class _GeneralObjectiveData(_GeneralExpressionDataImpl, - _ObjectiveData, - ActiveComponentData): + +class _GeneralObjectiveData( + _GeneralExpressionDataImpl, _ObjectiveData, ActiveComponentData +): """ This class defines the data for a single objective. @@ -143,36 +154,22 @@ class _GeneralObjectiveData(_GeneralExpressionDataImpl, _active A boolean that indicates whether this data is active """ - __pickle_slots__ = ("_sense",) - __slots__ = __pickle_slots__ + _GeneralExpressionDataImpl.__pickle_slots__ + __slots__ = ("_sense", "_args_") def __init__(self, expr=None, sense=minimize, component=None): _GeneralExpressionDataImpl.__init__(self, expr) # Inlining ActiveComponentData.__init__ - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._active = True self._sense = sense - if (self._sense != minimize) and \ - (self._sense != maximize): - raise ValueError("Objective sense must be set to one of " - "'minimize' (%s) or 'maximize' (%s). Invalid " - "value: %s'" % (minimize, maximize, sense)) - - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = _GeneralExpressionDataImpl.__getstate__(self) - for i in _GeneralObjectiveData.__pickle_slots__: - state[i] = getattr(self,i) - return state - - # Note: because NONE of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ - # method. + if (self._sense != minimize) and (self._sense != maximize): + raise ValueError( + "Objective sense must be set to one of " + "'minimize' (%s) or 'maximize' (%s). Invalid " + "value: %s'" % (minimize, maximize, sense) + ) def set_value(self, expr): if expr is None: @@ -187,6 +184,7 @@ def set_value(self, expr): def sense(self): """Access sense (direction) of this objective.""" return self._sense + @sense.setter def sense(self, sense): """Set the sense (direction) of this objective.""" @@ -197,9 +195,12 @@ def set_sense(self, sense): if sense in {minimize, maximize}: self._sense = sense else: - raise ValueError("Objective sense must be set to one of " - "'minimize' (%s) or 'maximize' (%s). Invalid " - "value: %s'" % (minimize, maximize, sense)) + raise ValueError( + "Objective sense must be set to one of " + "'minimize' (%s) or 'maximize' (%s). Invalid " + "value: %s'" % (minimize, maximize, sense) + ) + @ModelComponentFactory.register("Expressions that are minimized or maximized.") class Objective(ActiveIndexedComponent): @@ -257,27 +258,20 @@ class Objective(ActiveIndexedComponent): def __new__(cls, *args, **kwds): if cls != Objective: return super(Objective, cls).__new__(cls) - if not args or (args[0] is UnindexedComponent_set and len(args)==1): + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): return ScalarObjective.__new__(ScalarObjective) else: return IndexedObjective.__new__(IndexedObjective) @overload - def __init__(self, *indexes, expr=None, rule=None, sense=minimize, - name=None, doc=None): ... + def __init__( + self, *indexes, expr=None, rule=None, sense=minimize, name=None, doc=None + ): + ... def __init__(self, *args, **kwargs): _sense = kwargs.pop('sense', minimize) - _init = tuple( _arg for _arg in ( - kwargs.pop('rule', None), kwargs.pop('expr', None) - ) if _arg is not None ) - if len(_init) == 1: - _init = _init[0] - elif not _init: - _init = None - else: - raise ValueError("Duplicate initialization: Objective() only " - "accepts one of 'rule=' and 'expr='") + _init = self._pop_from_kwargs('Objective', kwargs, ('rule', 'expr'), None) kwargs.setdefault('ctype', Objective) ActiveIndexedComponent.__init__(self, *args, **kwargs) @@ -310,8 +304,8 @@ def construct(self, data=None): if rule.constant() and self.is_indexed(): raise IndexError( "Objective '%s': Cannot initialize multiple indices " - "of an objective with a single expression" % - (self.name,) ) + "of an objective with a single expression" % (self.name,) + ) block = self.parent_block() if rule.contains_indices(): @@ -330,8 +324,7 @@ def construct(self, data=None): else: # Bypass the index validation and create the member directly for index in self.index_set(): - ans = self._setitem_when_not_present( - index, rule(block, index)) + ans = self._setitem_when_not_present(index, rule(block, index)) if ans is not None: ans.set_sense(self._init_sense(block, index)) except Exception: @@ -339,10 +332,8 @@ def construct(self, data=None): logger.error( "Rule failed when generating expression for " "Objective %s with index %s:\n%s: %s" - % (self.name, - str(index), - type(err).__name__, - err)) + % (self.name, str(index), type(err).__name__, err) + ) raise finally: timer.report() @@ -352,8 +343,7 @@ def _getitem_when_not_present(self, index): raise KeyError(index) block = self.parent_block() - obj = self._setitem_when_not_present( - index, self.rule(block, index)) + obj = self._setitem_when_not_present(index, self.rule(block, index)) if obj is None: raise KeyError(index) obj.set_sense(self._init_sense(block, index)) @@ -365,17 +355,19 @@ def _pprint(self): Return data that will be printed for this component. """ return ( - [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ("Active", self.active) - ], + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ("Active", self.active), + ], self._data.items(), - ( "Active","Sense","Expression"), - lambda k, v: [ v.active, - ("minimize" if (v.sense == minimize) else "maximize"), - v.expr - ] - ) + ("Active", "Sense", "Expression"), + lambda k, v: [ + v.active, + ("minimize" if (v.sense == minimize) else "maximize"), + v.expr, + ], + ) def display(self, prefix="", ostream=None): """Provide a verbose display of this object""" @@ -384,18 +376,26 @@ def display(self, prefix="", ostream=None): tab = " " if ostream is None: ostream = sys.stdout - ostream.write(prefix+self.local_name+" : ") - ostream.write(", ".join("%s=%s" % (k,v) for k,v in [ + ostream.write(prefix + self.local_name + " : ") + ostream.write( + ", ".join( + "%s=%s" % (k, v) + for k, v in [ ("Size", len(self)), ("Index", self._index_set if self.is_indexed() else None), ("Active", self.active), - ] )) + ] + ) + ) ostream.write("\n") - tabular_writer( ostream, prefix+tab, - ((k,v) for k,v in self._data.items() if v.active), - ( "Active","Value" ), - lambda k, v: [ v.active, value(v), ] ) + tabular_writer( + ostream, + prefix + tab, + ((k, v) for k, v in self._data.items() if v.active), + ("Active", "Value"), + lambda k, v: [v.active, value(v)], + ) class ScalarObjective(_GeneralObjectiveData, Objective): @@ -409,21 +409,27 @@ def __init__(self, *args, **kwd): Objective.__init__(self, *args, **kwd) self._index = UnindexedComponent_index - # - # Since this class derives from Component and - # Component.__getstate__ just packs up the entire __dict__ into - # the state dict, we do not need to define the __getstate__ or - # __setstate__ methods. We just defer to the super() get/set - # state. Since all of our get/set state methods rely on super() - # to traverse the MRO, this will automatically pick up both the - # Component and Data base classes. - # - # # Override abstract interface methods to first check for # construction # + def __call__(self, exception=True): + if self._constructed: + if len(self._data) == 0: + raise ValueError( + "Evaluating the expression of ScalarObjective " + "'%s' before the Objective has been assigned " + "a sense or expression (there is currently " + "no value to return)." % (self.name) + ) + return super().__call__(exception) + raise ValueError( + "Evaluating the expression of objective '%s' " + "before the Objective has been constructed (there " + "is currently no value to return)." % (self.name) + ) + @property def expr(self): """Access the expression of this objective.""" @@ -432,14 +438,16 @@ def expr(self): raise ValueError( "Accessing the expression of ScalarObjective " "'%s' before the Objective has been assigned " - "a sense or expression. There is currently " - "nothing to access." % (self.name)) + "a sense or expression (there is currently " + "no value to return)." % (self.name) + ) return _GeneralObjectiveData.expr.fget(self) raise ValueError( "Accessing the expression of objective '%s' " "before the Objective has been constructed (there " - "is currently no value to return)." - % (self.name)) + "is currently no value to return)." % (self.name) + ) + @expr.setter def expr(self, expr): """Set the expression of this objective.""" @@ -453,14 +461,16 @@ def sense(self): raise ValueError( "Accessing the sense of ScalarObjective " "'%s' before the Objective has been assigned " - "a sense or expression. There is currently " - "nothing to access." % (self.name)) + "a sense or expression (there is currently " + "no value to return)." % (self.name) + ) return _GeneralObjectiveData.sense.fget(self) raise ValueError( "Accessing the sense of objective '%s' " "before the Objective has been constructed (there " - "is currently no value to return)." - % (self.name)) + "is currently no value to return)." % (self.name) + ) + @sense.setter def sense(self, sense): """Set the sense (direction) of this objective.""" @@ -486,8 +496,8 @@ def set_value(self, expr): raise ValueError( "Setting the value of objective '%s' " "before the Objective has been constructed (there " - "is currently no object to set)." - % (self.name)) + "is currently no object to set)." % (self.name) + ) if not self._data: self._data[None] = self return super().set_value(expr) @@ -501,8 +511,8 @@ def set_sense(self, sense): raise ValueError( "Setting the sense of objective '%s' " "before the Objective has been constructed (there " - "is currently no object to set)." - % (self.name)) + "is currently no object to set)." % (self.name) + ) # # Leaving this method for backward compatibility reasons. @@ -513,8 +523,8 @@ def add(self, index, expr): if index is not None: raise ValueError( "ScalarObjective object '%s' does not accept " - "index values other than None. Invalid value: %s" - % (self.name, index)) + "index values other than None. Invalid value: %s" % (self.name, index) + ) self.set_value(expr) return self @@ -525,7 +535,6 @@ class SimpleObjective(metaclass=RenamedClass): class IndexedObjective(Objective): - # # Leaving this method for backward compatibility reasons # @@ -546,13 +555,13 @@ class ObjectiveList(IndexedObjective): an index value is not specified. """ - class End(object): pass + class End(object): + pass def __init__(self, **kwargs): """Constructor""" if 'expr' in kwargs: - raise ValueError( - "ObjectiveList does not accept the 'expr' keyword") + raise ValueError("ObjectiveList does not accept the 'expr' keyword") _rule = kwargs.pop('rule', None) self._starting_index = kwargs.pop('starting_index', 1) @@ -564,9 +573,7 @@ def __init__(self, **kwargs): # after the base class is set up so that is_indexed() is # reliable. if self.rule is not None and type(self.rule) is IndexedCallInitializer: - self.rule = CountedCallInitializer( - self, self.rule, self._starting_index - ) + self.rule = CountedCallInitializer(self, self.rule, self._starting_index) def construct(self, data=None): """ @@ -574,11 +581,10 @@ def construct(self, data=None): """ if self._constructed: return - self._constructed=True + self._constructed = True if is_debug_set(logger): - logger.debug("Constructing objective list %s" - % (self.name)) + logger.debug("Constructing objective list %s" % (self.name)) self.index_set().construct() @@ -601,4 +607,3 @@ def add(self, expr, sense=minimize): sense = sense(self.parent_block(), next_idx) ans.set_sense(sense) return ans - diff --git a/pyomo/core/base/param.py b/pyomo/core/base/param.py index c421a8fb12f..859b52702bf 100644 --- a/pyomo/core/base/param.py +++ b/pyomo/core/base/param.py @@ -17,25 +17,29 @@ from weakref import ref as weakref_ref from pyomo.common.pyomo_typing import overload +from pyomo.common.autoslots import AutoSlots from pyomo.common.deprecation import deprecation_warning, RenamedClass from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET +from pyomo.common.numeric_types import native_types, value as expr_value from pyomo.common.timing import ConstructionTimer +from pyomo.core.expr.numvalue import NumericValue from pyomo.core.base.component import ComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.indexed_component import ( - IndexedComponent, UnindexedComponent_set, IndexedComponent_NDArrayMixin + IndexedComponent, + UnindexedComponent_set, + IndexedComponent_NDArrayMixin, ) from pyomo.core.base.initializer import Initializer from pyomo.core.base.misc import apply_indexed_rule, apply_parameterized_indexed_rule -from pyomo.core.base.numvalue import ( - NumericValue, native_types, value as expr_value -) -from pyomo.core.base.set import Any, GlobalSetBase, Reals +from pyomo.core.base.set import Reals, _AnySet from pyomo.core.base.units_container import units +from pyomo.core.expr import GetItemExpression logger = logging.getLogger('pyomo.core') + def _raise_modifying_immutable_error(obj, index): if obj.is_indexed(): name = "%s[%s]" % (obj.name, index) @@ -45,17 +49,21 @@ def _raise_modifying_immutable_error(obj, index): "Attempting to set the value of the immutable parameter " "%s after the parameter has been constructed. If you intend " "to change the value of this parameter dynamically, please " - "declare the parameter as mutable [i.e., Param(mutable=True)]" - % (name,)) + "declare the parameter as mutable [i.e., Param(mutable=True)]" % (name,) + ) -class _ImplicitAny(Any.__class__): +class _ImplicitAny(_AnySet): """An Any that issues a deprecation warning for non-Real values. This is a helper class to implement the deprecation warnings for the change of Param's implicit domain from Any to Reals. """ + + __slots__ = ('_owner',) + __autoslot_mappers__ = {'_owner': AutoSlots.weakref_mapper} + def __new__(cls, **kwargs): # Strip off owner / kwargs before calling base __new__ return super().__new__(cls) @@ -65,21 +73,11 @@ def __init__(self, owner, **kwargs): super().__init__(**kwargs) self._component = weakref_ref(self) self.construct() - - def __getstate__(self): - state = super().__getstate__() - state['_owner'] = None if self._owner is None else self._owner() - return state - - def __setstate__(self, state): - _owner = state.pop('_owner') - super().__setstate__(state) - self._owner = None if _owner is None else weakref_ref(_owner) - - def __deepcopy__(self, memo): - # Note: we need to start super() at GlobalSetBase to actually - # copy this object - return super(GlobalSetBase, self).__deepcopy__(memo) + # Because this is a "global set", we need to define the _bounds + # and _interval fields + object.__setattr__(self, '_parent', None) + self._bounds = (None, None) + self._interval = (None, None, None) def __contains__(self, val): if val not in Reals: @@ -94,7 +92,9 @@ def __contains__(self, val): "future. If you really intend the domain of this Param" "to be 'Any', you can suppress this warning by explicitly " "specifying 'within=Any' to the Param constructor.", - version='5.6.9', remove_in='6.0') + version='5.6.9', + remove_in='6.0', + ) return True # This should "mock up" a global set, so the "name" should always be @@ -110,6 +110,7 @@ def _parent(self): if self._owner is None or self._owner() is None: return None return self._owner()._parent + # This is not settable. However the base classes assume that it is, # so we need to define the setter and just ignore the incoming value @_parent.setter @@ -144,15 +145,6 @@ def __init__(self, component): # self._value = Param.NoValue - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(_ParamData, self).__getstate__() - for i in _ParamData.__slots__: - state[i] = getattr(self, i) - return state - # Note: because NONE of the slots on this class need to be edited, # we don't need to implement a specialized __setstate__ method. @@ -180,8 +172,8 @@ def set_value(self, value, idx=NOTSET): _src_magnitude = expr_value(value) _src_units = units.get_units(value) value = units.convert_value( - num_value=_src_magnitude, from_units=_src_units, - to_units=_comp._units) + num_value=_src_magnitude, from_units=_src_units, to_units=_comp._units + ) old_value, self._value = self._value, value try: @@ -200,8 +192,8 @@ def __call__(self, exception=True): "Error evaluating Param value (%s):\n\tThe Param value is " "currently set to an invalid value. This is\n\ttypically " "from a scalar Param or mutable Indexed Param without\n" - "\tan initial or default value." - % ( self.name, )) + "\tan initial or default value." % (self.name,) + ) else: return None return self._value @@ -210,6 +202,7 @@ def __call__(self, exception=True): def value(self): """Return the value for this variable.""" return self() + @value.setter def value(self, val): """Set the value for this variable.""" @@ -244,24 +237,26 @@ def _compute_polynomial_degree(self, result): return 0 -@ModelComponentFactory.register("Parameter data that is used to define a model instance.") +@ModelComponentFactory.register( + "Parameter data that is used to define a model instance." +) class Param(IndexedComponent, IndexedComponent_NDArrayMixin): """ A parameter value, which may be defined over an index. Constructor Arguments: - domain + domain A set that defines the type of values that each parameter must be. - within + within A set that defines the type of values that each parameter must be. - validate - A rule for validating this parameter w.r.t. data that exists in + validate + A rule for validating this parameter w.r.t. data that exists in the model - default - A scalar, rule, or dictionary that defines default values for + default + A scalar, rule, or dictionary that defines default values for this parameter - initialize - A dictionary or rule for setting up this parameter with existing + initialize + A dictionary or rule for setting up this parameter with existing model data unit: pyomo unit expression An expression containing the units for the parameter @@ -275,10 +270,12 @@ class Param(IndexedComponent, IndexedComponent_NDArrayMixin): """ DefaultMutable = False + _ComponentDataClass = _ParamData class NoValue(object): """A dummy type that is pickle-safe that we can use as the default value for Params to indicate that no valid value is present.""" + pass def __new__(cls, *args, **kwds): @@ -290,19 +287,31 @@ def __new__(cls, *args, **kwds): return super(Param, cls).__new__(IndexedParam) @overload - def __init__(self, *indexes, rule=NOTSET, initialize=NOTSET, - domain=None, within=None, validate=None, mutable=False, default=NoValue, - initialize_as_dense=False, units=None, name=None, doc=None): ... + def __init__( + self, + *indexes, + rule=NOTSET, + initialize=NOTSET, + domain=None, + within=None, + validate=None, + mutable=False, + default=NoValue, + initialize_as_dense=False, + units=None, + name=None, + doc=None, + ): + ... def __init__(self, *args, **kwd): - _init = self._pop_from_kwargs( - 'Param', kwd, ('rule', 'initialize'), NOTSET) + _init = self._pop_from_kwargs('Param', kwd, ('rule', 'initialize'), NOTSET) self.domain = self._pop_from_kwargs('Param', kwd, ('domain', 'within')) - self._validate = kwd.pop('validate', None ) - self._mutable = kwd.pop('mutable', Param.DefaultMutable ) - self._default_val = kwd.pop('default', Param.NoValue ) + self._validate = kwd.pop('validate', None) + self._mutable = kwd.pop('mutable', Param.DefaultMutable) + self._default_val = kwd.pop('default', Param.NoValue) self._dense_initialize = kwd.pop('initialize_as_dense', False) - self._units = kwd.pop('units', None) + self._units = kwd.pop('units', None) if self._units is not None: self._units = units.get_units(self._units) self._mutable = True @@ -313,9 +322,11 @@ def __init__(self, *args, **kwd): if self.domain is None: self.domain = _ImplicitAny(owner=self, name='Any') # After IndexedComponent.__init__ so we can call is_indexed(). - self._rule = Initializer(_init, - treat_sequences_as_mappings=self.is_indexed(), - arg_not_specified=NOTSET) + self._rule = Initializer( + _init, + treat_sequences_as_mappings=self.is_indexed(), + arg_not_specified=NOTSET, + ) def __len__(self): """ @@ -339,7 +350,7 @@ def __contains__(self, idx): # We do not need to override keys(), as the __len__ override will # cause the base class keys() to correctly correctly handle default # values - #def keys(self, ordered=False): + # def keys(self, sort=None): @property def mutable(self): @@ -393,19 +404,19 @@ def extract_values(self): # Thus, we need to create a temporary dictionary that contains the # values from the ParamData objects. # - return {key:param_value() for key,param_value in self.items()} + return {key: param_value() for key, param_value in self.items()} elif not self.is_indexed(): # # The parameter is a scalar, so we need to create a temporary # dictionary using the value for this parameter. # - return { None: self() } + return {None: self()} else: # # The parameter is not mutable, so iteritems() can be # converted into a dictionary containing parameter values. # - return dict( self.items() ) + return dict(self.items()) def extract_values_sparse(self): """ @@ -431,13 +442,13 @@ def extract_values_sparse(self): # The parameter is a scalar, so we need to create a temporary # dictionary using the value for this parameter. # - return { None: self() } + return {None: self()} else: # # The parameter is not mutable, so sparse_iteritems() can be # converted into a dictionary containing parameter values. # - return dict( self.sparse_iteritems() ) + return dict(self.sparse_iteritems()) def store_values(self, new_values, check=True): """ @@ -451,9 +462,10 @@ def store_values(self, new_values, check=True): _raise_modifying_immutable_error(self, '*') # _srcType = type(new_values) - _isDict = _srcType is dict or ( \ + _isDict = _srcType is dict or ( hasattr(_srcType, '__getitem__') - and not isinstance(new_values, NumericValue) ) + and not isinstance(new_values, NumericValue) + ) # if check: if _isDict: @@ -480,7 +492,7 @@ def store_values(self, new_values, check=True): else: # For scalars, we will choose an approach based on # how "dense" the Param is - if not self._data: # empty + if not self._data: # empty for index in self._index_set: p = self._data[index] = _ParamData(self) p._value = new_values @@ -500,8 +512,8 @@ def store_values(self, new_values, check=True): if None not in new_values: raise RuntimeError( "Cannot store value for scalar Param %s:\n\tNo value " - "with index None in the new values dict." - % (self.name,)) + "with index None in the new values dict." % (self.name,) + ) new_values = new_values[None] # scalars have to be handled differently self[None] = new_values @@ -512,13 +524,16 @@ def set_default(self, val): NOTE: this test will not validate the value of function return values. """ - if self._constructed \ - and val is not Param.NoValue \ - and type(val) in native_types \ - and val not in self.domain: + if ( + self._constructed + and val is not Param.NoValue + and type(val) in native_types + and val not in self.domain + ): raise ValueError( - "Default value (%s) is not valid for Param %s domain %s" % - (str(val), self.name, self.domain.name)) + "Default value (%s) is not valid for Param %s domain %s" + % (str(val), self.name, self.domain.name) + ) self._default_val = val def default(self): @@ -528,11 +543,11 @@ def default(self): Possible values: Param.NoValue No default value is provided. - Numeric - A constant value that is the default value for all undefined + Numeric + A constant value that is the default value for all undefined parameters. - Function - f(model, i) returns the value for the default value for + Function + f(model, i) returns the value for the default value for parameter i """ return self._default_val @@ -558,13 +573,13 @@ def _getitem_when_not_present(self, index): ans._index = index return ans if self.is_indexed(): - idx_str = '%s[%s]' % (self.name, index,) + idx_str = '%s[%s]' % (self.name, index) else: idx_str = '%s' % (self.name,) raise ValueError( "Error retrieving immutable Param value (%s):\n\tThe Param " - "value is undefined and no default value is specified." - % ( idx_str,) ) + "value is undefined and no default value is specified." % (idx_str,) + ) _default_type = type(val) _check_value_domain = True @@ -577,7 +592,8 @@ def _getitem_when_not_present(self, index): elif _default_type is types.FunctionType: val = apply_indexed_rule(self, val, self.parent_block(), index) elif hasattr(val, '__getitem__') and ( - not isinstance(val, NumericValue) or val.is_indexed() ): + not isinstance(val, NumericValue) or val.is_indexed() + ): # Things that look like Dictionaries should be allowable. This # includes other IndexedComponent objects. val = val[index] @@ -698,7 +714,6 @@ def _setitem_when_not_present(self, index, value, _check_domain=True): del self._data[index] raise - def _validate_value(self, index, value, validate_domain=True, data=None): """ Validate a given input/value pair. @@ -711,19 +726,21 @@ def _validate_value(self, index, value, validate_domain=True, data=None): index = data.index() raise ValueError( "Invalid parameter value: %s[%s] = '%s', value type=%s.\n" - "\tValue not in parameter domain %s" % - (self.name, index, value, type(value), self.domain.name)) + "\tValue not in parameter domain %s" + % (self.name, index, value, type(value), self.domain.name) + ) if self._validate: if index is NOTSET: index = data.index() valid = apply_parameterized_indexed_rule( - self, self._validate, self.parent_block(), value, index ) + self, self._validate, self.parent_block(), value, index + ) if not valid: raise ValueError( "Invalid parameter value: %s[%s] = '%s', value type=%s.\n" - "\tValue failed parameter validation rule" % - ( self.name, index, value, type(value) ) ) - + "\tValue failed parameter validation rule" + % (self.name, index, value, type(value)) + ) def construct(self, data=None): """ @@ -742,9 +759,10 @@ def construct(self, data=None): return timer = ConstructionTimer(self) - if is_debug_set(logger): #pragma:nocover - logger.debug("Constructing Param, name=%s, from data=%s" - % ( self.name, str(data) )) + if is_debug_set(logger): # pragma:nocover + logger.debug( + "Constructing Param, name=%s, from data=%s" % (self.name, str(data)) + ) try: # @@ -752,12 +770,15 @@ def construct(self, data=None): # the domain. # val = self._default_val - if val is not Param.NoValue \ - and type(val) in native_types \ - and val not in self.domain: + if ( + val is not Param.NoValue + and type(val) in native_types + and val not in self.domain + ): raise ValueError( - "Default value (%s) is not valid for Param %s domain %s" % - (str(val), self.name, self.domain.name)) + "Default value (%s) is not valid for Param %s domain %s" + % (str(val), self.name, self.domain.name) + ) # # Flag that we are in the "during construction" phase # @@ -777,20 +798,21 @@ def construct(self, data=None): raise ValueError( "Attempting to initialize parameter=%s with data=%s.\n" "\tData type is not a mapping type, and a Mapping is " - "expected." % (self.name, str(data)) ) + "expected." % (self.name, str(data)) + ) else: data_items = iter(()) try: for key, val in data_items: - self._setitem_when_not_present( - self._validate_index(key), val) + self._setitem_when_not_present(self._validate_index(key), val) except: msg = sys.exc_info()[1] raise RuntimeError( "Failed to set value for param=%s, index=%s, value=%s.\n" "\tsource error message=%s" - % (self.name, str(key), str(val), str(msg)) ) + % (self.name, str(key), str(val), str(msg)) + ) # # Flag that things are fully constructed now (and changing an # immutable Param is now an exception). @@ -809,15 +831,15 @@ def _pprint(self): Return data that will be printed for this component. """ if self._default_val is Param.NoValue: - default = "None" # for backwards compatibility in reporting + default = "None" # for backwards compatibility in reporting elif type(self._default_val) is types.FunctionType: default = "(function)" else: default = str(self._default_val) if self._mutable or not self.is_indexed(): - dataGen = lambda k, v: [ v._value, ] + dataGen = lambda k, v: [v._value] else: - dataGen = lambda k, v: [ v, ] + dataGen = lambda k, v: [v] headers = [ ("Size", len(self)), ("Index", self._index_set if self.is_indexed() else None), @@ -827,15 +849,10 @@ def _pprint(self): ] if self._units is not None: headers.append(('Units', str(self._units))) - return ( headers, - self.sparse_iteritems(), - ("Value",), - dataGen, - ) + return (headers, self.sparse_iteritems(), ("Value",), dataGen) class ScalarParam(_ParamData, Param): - def __init__(self, *args, **kwds): Param.__init__(self, *args, **kwds) _ParamData.__init__(self, component=self) @@ -869,7 +886,8 @@ def __call__(self, exception=True): raise ValueError( "Evaluating the numeric value of parameter '%s' before\n\t" "the Param has been constructed (there is currently no " - "value to return)." % (self.name,) ) + "value to return)." % (self.name,) + ) def set_value(self, value, index=NOTSET): if index is NOTSET: @@ -896,26 +914,48 @@ class SimpleParam(metaclass=RenamedClass): class IndexedParam(Param): - def __call__(self, exception=True): """Compute the value of the parameter""" if exception: - raise TypeError('Cannot compute the value of an indexed Param (%s)' - % (self.name,) ) + raise TypeError( + 'Cannot compute the value of an indexed Param (%s)' % (self.name,) + ) # Because IndexedParam can use a non-standard data store (i.e., the # values in the _data dict may not be ComponentData objects), we # need to override the normal scheme for pre-allocating # ComponentData objects during deepcopy. def _create_objects_for_deepcopy(self, memo, component_list): - _id = id(self) - if _id not in memo: - component_list.append(self) - memo[_id] = self.__class__.__new__(self.__class__) if self.mutable: - for obj in self._data.values(): - _id = id(obj) - if _id in memo: - continue - component_list.append(obj) - memo[id(obj)] = obj.__class__.__new__(obj.__class__) + # Normal indexed object; leverage base implementation + return super()._create_objects_for_deepcopy(memo, component_list) + # This is immutable; only add the container (not the _data) to + # the component_list. + _new = self.__class__.__new__(self.__class__) + _ans = memo.setdefault(id(self), _new) + if _ans is _new: + component_list.append(self) + return _ans + + # Because CP supports indirection [the ability to index objects by + # another (inter) Var] for certain types (including Var), we will + # catch the normal RuntimeError and return a (variable) + # GetItemExpression. + # + # FIXME: We should integrate this logic into the base implementation + # of `__getitem__()`, including the recognition / differentiation + # between potentially variable GetItemExpression objects and + # "constant" GetItemExpression objects. That will need to wait for + # the expression rework [JDS; Nov 22]. + def __getitem__(self, args): + try: + return super().__getitem__(args) + except: + tmp = args if args.__class__ is tuple else (args,) + if any( + hasattr(arg, 'is_potentially_variable') + and arg.is_potentially_variable() + for arg in tmp + ): + return GetItemExpression((self,) + tmp) + raise diff --git a/pyomo/core/base/piecewise.py b/pyomo/core/base/piecewise.py index d6e09e7c5c8..8ab6ce38ca5 100644 --- a/pyomo/core/base/piecewise.py +++ b/pyomo/core/base/piecewise.py @@ -48,6 +48,7 @@ from pyomo.common.log import is_debug_set from pyomo.common.deprecation import deprecation_warning +from pyomo.common.numeric_types import value from pyomo.common.timing import ConstructionTimer from pyomo.core.base.block import Block, _BlockData from pyomo.core.base.component import ModelComponentFactory @@ -55,27 +56,30 @@ from pyomo.core.base.sos import SOSConstraint from pyomo.core.base.var import Var, _VarData, IndexedVar from pyomo.core.base.set_types import PositiveReals, NonNegativeReals, Binary -from pyomo.core.base.numvalue import value from pyomo.core.base.util import flatten_tuple logger = logging.getLogger('pyomo.core') + + class PWRepn(str, enum.Enum): - SOS2 = 'SOS2' - BIGM_BIN = 'BIGM_BIN' + SOS2 = 'SOS2' + BIGM_BIN = 'BIGM_BIN' BIGM_SOS1 = 'BIGM_SOS1' - CC = 'CC' - DCC = 'DCC' - DLOG = 'DLOG' - LOG = 'LOG' - MC = 'MC' - INC = 'INC' + CC = 'CC' + DCC = 'DCC' + DLOG = 'DLOG' + LOG = 'LOG' + MC = 'MC' + INC = 'INC' + class Bound(str, enum.Enum): Lower = 'Lower' Upper = 'Upper' Equal = 'Equal' -# BE SURE TO CHANGE THE PIECWISE DOCSTRING + +# BE SURE TO CHANGE THE PIECEWISE DOCSTRING # IF THIS GETS CHANGED _WARNING_TOLERANCE = 1e-8 @@ -88,7 +92,8 @@ def _isNonDecreasing(vals): it = iter(vals) next(it) op = operator.ge - return all(itertools.starmap(op, zip(it,vals))) + return all(itertools.starmap(op, zip(it, vals))) + def _isNonIncreasing(vals): """ @@ -98,16 +103,18 @@ def _isNonIncreasing(vals): it = iter(vals) next(it) op = operator.le - return all(itertools.starmap(op, zip(it,vals))) + return all(itertools.starmap(op, zip(it, vals))) + def _isPowerOfTwo(x): """ checks that a number is a nonzero and positive power of 2 """ - if (x <= 0): + if x <= 0: return False else: - return ( (x & (x - 1)) == 0 ) + return (x & (x - 1)) == 0 + def _GrayCode(nbits): """ @@ -118,19 +125,20 @@ def _GrayCode(nbits): # important that we copy bitset each time graycode = [list(bitset)] - for i in range(2,(1< tol), zip(slopes, itertools.islice(slopes, 1, None)))): - msg = "**WARNING: Piecewise component '%s[%s]' has detected slopes of consecutive piecewise "\ - "segments to be within "+str(tol)+" of one another. Refer to the Piecewise help "\ - "documentation for information on how to disable this warning." + if not all( + itertools.starmap( + lambda x1, x2: (True) + if ((x1 is None) or (x2 is None)) + else (abs(x1 - x2) > tol), + zip(slopes, itertools.islice(slopes, 1, None)), + ) + ): + msg = ( + "**WARNING: Piecewise component '%s[%s]' has detected slopes of consecutive piecewise " + "segments to be within " + + str(tol) + + " of one another. Refer to the Piecewise help " + "documentation for information on how to disable this warning." + ) if index == (): index = None print(msg % (name, flatten_tuple(index))) if step is True: - return 0,values,True + return 0, values, True if _isNonDecreasing(slopes): # convex - return 1,values,False + return 1, values, False if _isNonIncreasing(slopes): # concave - return -1,values,False - return 0,values,False + return -1, values, False + return 0, values, False class _PiecewiseData(_BlockData): @@ -201,7 +227,7 @@ class _PiecewiseData(_BlockData): and piecewise constraint generators.. """ - def __init__(self,parent): + def __init__(self, parent): _BlockData.__init__(self, parent) self._constructed = True self._bound_type = None @@ -217,8 +243,7 @@ def updatePoints(self, domain_pts, range_pts): # ***Note: most (if not all) piecewise constraint generators # assume the list of domain points is sorted. if not _isNonDecreasing(domain_pts): - msg = "'%s' does not have a list of domain points "\ - "that is non-decreasing" + msg = "'%s' does not have a list of domain points that is non-decreasing" raise ValueError(msg % (self.name,)) self._domain_pts = domain_pts self._range_pts = range_pts @@ -233,51 +258,54 @@ def referenced_variables(self): def __call__(self, x): if self._constructed is False: - raise ValueError("Piecewise component %s has not " - "been constructed yet" % self.name) + raise ValueError( + "Piecewise component %s has not been constructed yet" % self.name + ) - for i in range(len(self._domain_pts)-1): + for i in range(len(self._domain_pts) - 1): xL = self._domain_pts[i] - xU = self._domain_pts[i+1] + xU = self._domain_pts[i + 1] if (xL <= x) and (x <= xU): yL = self._range_pts[i] - yU = self._range_pts[i+1] - if xL == xU: # a step function + yU = self._range_pts[i + 1] + if xL == xU: # a step function return yU # using future division - return yL + ((yU-yL)/(xU-xL))*(x-xL) - raise ValueError("The point %s is outside the list of domain " - "points for Piecewise component %s. The valid " - "point range is [%s,%s]." - % (x, self.name, - min(self._domain_pts), - max(self._domain_pts))) + return yL + ((yU - yL) / (xU - xL)) * (x - xL) + raise ValueError( + "The point %s is outside the list of domain " + "points for Piecewise component %s. The valid " + "point range is [%s,%s]." + % (x, self.name, min(self._domain_pts), max(self._domain_pts)) + ) + class _SimpleSinglePiecewise(object): """ Called when the piecewise points list has only two points """ - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_SimpleSinglePiecewise: construct() called during"\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_SimpleSinglePiecewise: construct() called duringinvalid state." + ) # create a single linear constraint LHS = y_var F_AT_XO = y_pts[0] # using future division - dF_AT_XO = (y_pts[1]-y_pts[0])/(x_pts[1]-x_pts[0]) - X_MINUS_XO = x_var-x_pts[0] + dF_AT_XO = (y_pts[1] - y_pts[0]) / (x_pts[1] - x_pts[0]) + X_MINUS_XO = x_var - x_pts[0] if bound_type == Bound.Upper: - expr= LHS <= F_AT_XO + dF_AT_XO*X_MINUS_XO + expr = LHS <= F_AT_XO + dF_AT_XO * X_MINUS_XO elif bound_type == Bound.Lower: - expr= LHS >= F_AT_XO + dF_AT_XO*X_MINUS_XO + expr = LHS >= F_AT_XO + dF_AT_XO * X_MINUS_XO elif bound_type == Bound.Equal: - expr= LHS == F_AT_XO + dF_AT_XO*X_MINUS_XO + expr = LHS == F_AT_XO + dF_AT_XO * X_MINUS_XO else: raise ValueError("Invalid Bound for _SimpleSinglePiecewise object") pblock.single_line_constraint = Constraint(expr=expr) @@ -285,12 +313,17 @@ def construct(self,pblock,x_var,y_var): # In order to enforce the same behavior as actual piecewise # constraints, we constrain the domain variable between the # outer domain pts. But in order to prevent filling the model - # with unecessary constraints, we only do this when absolutely + # with unnecessary constraints, we only do this when absolutely # necessary. if not x_var.lb is None and x_var.lb < x_pts[0]: - pblock.simplified_piecewise_domain_constraint_lower = Constraint(expr=x_pts[0] <= x_var) + pblock.simplified_piecewise_domain_constraint_lower = Constraint( + expr=x_pts[0] <= x_var + ) if not x_var.ub is None and x_var.ub > x_pts[1]: - pblock.simplified_piecewise_domain_constraint_upper = Constraint(expr=x_var <= x_pts[-1]) + pblock.simplified_piecewise_domain_constraint_upper = Constraint( + expr=x_var <= x_pts[-1] + ) + class _SimplifiedPiecewise(object): """ @@ -298,75 +331,84 @@ class _SimplifiedPiecewise(object): convex function or an upper bounding concave function """ - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_SimplifiedPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_SimplifiedPiecewise: construct() called during invalid state." + ) len_x_pts = len(x_pts) conlist = pblock.simplified_piecewise_constraint = ConstraintList() - for i in range(len_x_pts-1): + for i in range(len_x_pts - 1): F_AT_XO = y_pts[i] - dF_AT_XO = (y_pts[i+1]-y_pts[i])/(x_pts[i+1]-x_pts[i]) + dF_AT_XO = (y_pts[i + 1] - y_pts[i]) / (x_pts[i + 1] - x_pts[i]) XO = x_pts[i] if bound_type == Bound.Upper: - conlist.add((0,-y_var+F_AT_XO+dF_AT_XO*(x_var-XO),None)) + conlist.add((0, -y_var + F_AT_XO + dF_AT_XO * (x_var - XO), None)) elif bound_type == Bound.Lower: - conlist.add((None,-y_var+F_AT_XO+dF_AT_XO*(x_var-XO),0)) + conlist.add((None, -y_var + F_AT_XO + dF_AT_XO * (x_var - XO), 0)) else: raise ValueError("Invalid Bound for _SimplifiedPiecewise object") # In order to enforce the same behavior as actual piecewise # constraints, we constrain the domain variable between the # outer domain pts. But in order to prevent filling the model - # with unecessary constraints, we only do this when absolutely + # with unnecessary constraints, we only do this when absolutely # necessary. if not x_var.lb is None and x_var.lb < x_pts[0]: - pblock.simplified_piecewise_domain_constraint_lower = Constraint(expr=x_pts[0] <= x_var) + pblock.simplified_piecewise_domain_constraint_lower = Constraint( + expr=x_pts[0] <= x_var + ) if not x_var.ub is None and x_var.ub > x_pts[-1]: - pblock.simplified_piecewise_domain_constraint_upper = Constraint(expr=x_var <= x_pts[-1]) + pblock.simplified_piecewise_domain_constraint_upper = Constraint( + expr=x_var <= x_pts[-1] + ) + class _SOS2Piecewise(object): """ Called to generate Piecewise constraint using the SOS2 formulation """ - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_SOS2Piecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_SOS2Piecewise: construct() called during invalid state." + ) len_x_pts = len(x_pts) # create indexers sos2_index = range(len_x_pts) # create vars - sos2_y = pblock.SOS2_y = Var(sos2_index,within=NonNegativeReals) + sos2_y = pblock.SOS2_y = Var(sos2_index, within=NonNegativeReals) # create piecewise constraints conlist = pblock.SOS2_constraint = ConstraintList() - conlist.add( (x_var-sum(sos2_y[i]*x_pts[i] for i in sos2_index),0) ) + conlist.add((x_var - sum(sos2_y[i] * x_pts[i] for i in sos2_index), 0)) LHS = y_var - RHS = sum(sos2_y[i]*y_pts[i] for i in sos2_index) + RHS = sum(sos2_y[i] * y_pts[i] for i in sos2_index) expr = None if bound_type == Bound.Upper: - conlist.add( (None,LHS-RHS,0) ) + conlist.add((None, LHS - RHS, 0)) elif bound_type == Bound.Lower: - conlist.add( (0,LHS-RHS,None) ) + conlist.add((0, LHS - RHS, None)) elif bound_type == Bound.Equal: - conlist.add( (LHS-RHS,0) ) + conlist.add((LHS - RHS, 0)) else: raise ValueError("Invalid Bound for _SOS2Piecewise object") - conlist.add( (sum(sos2_y[j] for j in sos2_index),1) ) + conlist.add((sum(sos2_y[j] for j in sos2_index), 1)) + def SOS2_rule(model): return [sos2_y[i] for i in sos2_index] + pblock.SOS2_sosconstraint = SOSConstraint(initialize=SOS2_rule, sos=2) @@ -375,50 +417,57 @@ class _DCCPiecewise(object): Called to generate Piecewise constraint using the DCC formulation """ - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_DCCPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_DCCPiecewise: construct() called during invalid state." + ) len_x_pts = len(x_pts) # create indexers - polytopes = range(1,len_x_pts) - vertices = range(1,len_x_pts+1) + polytopes = range(1, len_x_pts) + vertices = range(1, len_x_pts + 1) + def polytope_verts(p): - return range(p,p+2) + return range(p, p + 2) # create vars - pblock.DCC_lambda = Var(polytopes,vertices,within=PositiveReals) + pblock.DCC_lambda = Var(polytopes, vertices, within=PositiveReals) lmda = pblock.DCC_lambda - pblock.DCC_bin_y = Var(polytopes,within=Binary) + pblock.DCC_bin_y = Var(polytopes, within=Binary) bin_y = pblock.DCC_bin_y # create piecewise constraints - pblock.DCC_constraint1 = Constraint(expr=x_var==sum(lmda[p,v]*x_pts[v-1] \ - for p in polytopes \ - for v in polytope_verts(p))) + pblock.DCC_constraint1 = Constraint( + expr=x_var + == sum( + lmda[p, v] * x_pts[v - 1] for p in polytopes for v in polytope_verts(p) + ) + ) LHS = y_var - RHS = sum(lmda[p,v]*y_pts[v-1] for p in polytopes for v in polytope_verts(p)) + RHS = sum( + lmda[p, v] * y_pts[v - 1] for p in polytopes for v in polytope_verts(p) + ) expr = None if bound_type == Bound.Upper: - expr= LHS <= RHS + expr = LHS <= RHS elif bound_type == Bound.Lower: - expr= LHS >= RHS + expr = LHS >= RHS elif bound_type == Bound.Equal: - expr= LHS == RHS + expr = LHS == RHS else: raise ValueError("Invalid Bound for _DCCPiecewise object") pblock.DCC_constraint2 = Constraint(expr=expr) - def con3_rule(model,p): - return bin_y[p] == sum(lmda[p,v] for v in polytope_verts(p)) - pblock.DCC_constraint3 = Constraint(polytopes,rule=con3_rule) - pblock.DCC_constraint4 = Constraint(expr=sum(bin_y[p] \ - for p in polytopes) == 1) + def con3_rule(model, p): + return bin_y[p] == sum(lmda[p, v] for v in polytope_verts(p)) + + pblock.DCC_constraint3 = Constraint(polytopes, rule=con3_rule) + pblock.DCC_constraint4 = Constraint(expr=sum(bin_y[p] for p in polytopes) == 1) class _DLOGPiecewise(object): @@ -426,21 +475,21 @@ class _DLOGPiecewise(object): Called to generate Piecewise constraint using the DLOG formulation """ - def _Branching_Scheme(self,L): + def _Branching_Scheme(self, L): """ Branching scheme for DLOG """ MAX = 2**L mylists1 = {} - for i in range(1,L+1): + for i in range(1, L + 1): mylists1[i] = [] start = 1 - step = int(MAX/(2**i)) - while(start < MAX): - mylists1[i].extend([j for j in range(start,start+step)]) - start += 2*step + step = int(MAX / (2**i)) + while start < MAX: + mylists1[i].extend([j for j in range(start, start + step)]) + start += 2 * step - biglist = range(1,MAX+1) + biglist = range(1, MAX + 1) mylists2 = {} for i in sorted(mylists1.keys()): mylists2[i] = [] @@ -451,65 +500,76 @@ def _Branching_Scheme(self,L): return mylists1, mylists2 - def construct(self,pblock,x_var,y_var): - if not _isPowerOfTwo(len(pblock._domain_pts)-1): - msg = "'%s' does not have a list of domain points "\ - "with length (2^n)+1" + def construct(self, pblock, x_var, y_var): + if not _isPowerOfTwo(len(pblock._domain_pts) - 1): + msg = "'%s' does not have a list of domain points with length (2^n)+1" raise ValueError(msg % (pblock.name,)) x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_DLOGPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_DLOGPiecewise: construct() called during invalid state." + ) len_x_pts = len(x_pts) # create branching schemes - L_i = int(math.log(len_x_pts-1,2)) - B_ZERO,B_ONE = self._Branching_Scheme(L_i) + L_i = int(math.log(len_x_pts - 1, 2)) + B_ZERO, B_ONE = self._Branching_Scheme(L_i) # create indexers - polytopes = range(1,len_x_pts) - vertices = range(1,len_x_pts+1) - bin_y_index = range(1,L_i+1) + polytopes = range(1, len_x_pts) + vertices = range(1, len_x_pts + 1) + bin_y_index = range(1, L_i + 1) + def polytope_verts(p): - return range(p,p+2) + return range(p, p + 2) # create vars - pblock.DLOG_lambda = Var(polytopes,vertices,within=PositiveReals) + pblock.DLOG_lambda = Var(polytopes, vertices, within=PositiveReals) lmda = pblock.DLOG_lambda - pblock.DLOG_bin_y = Var(bin_y_index,within=Binary) + pblock.DLOG_bin_y = Var(bin_y_index, within=Binary) bin_y = pblock.DLOG_bin_y # create piecewise constraints - pblock.DLOG_constraint1 = Constraint(expr=x_var==sum(lmda[p,v]*x_pts[v-1] \ - for p in polytopes \ - for v in polytope_verts(p))) + pblock.DLOG_constraint1 = Constraint( + expr=x_var + == sum( + lmda[p, v] * x_pts[v - 1] for p in polytopes for v in polytope_verts(p) + ) + ) LHS = y_var - RHS = sum(lmda[p,v]*y_pts[v-1] for p in polytopes for v in polytope_verts(p)) + RHS = sum( + lmda[p, v] * y_pts[v - 1] for p in polytopes for v in polytope_verts(p) + ) expr = None if bound_type == Bound.Upper: - expr= LHS <= RHS + expr = LHS <= RHS elif bound_type == Bound.Lower: - expr= LHS >= RHS + expr = LHS >= RHS elif bound_type == Bound.Equal: - expr= LHS == RHS + expr = LHS == RHS else: raise ValueError("Invalid Bound for _DLOGPiecewise object") pblock.DLOG_constraint2 = Constraint(expr=expr) - pblock.DLOG_constraint3 = Constraint(expr=sum(lmda[p,v] \ - for p in polytopes \ - for v in polytope_verts(p)) == 1) - def con4_rule(model,l): - return sum(lmda[p,v] for p in B_ZERO[l] \ - for v in polytope_verts(p)) \ - <= bin_y[l] - pblock.DLOG_constraint4 = Constraint(bin_y_index,rule=con4_rule) - def con5_rule(model,l): - return sum(lmda[p,v] for p in B_ONE[l] \ - for v in polytope_verts(p)) \ - <= (1-bin_y[l]) - pblock.DLOG_constraint5 = Constraint(bin_y_index,rule=con5_rule) + pblock.DLOG_constraint3 = Constraint( + expr=sum(lmda[p, v] for p in polytopes for v in polytope_verts(p)) == 1 + ) + + def con4_rule(model, l): + return ( + sum(lmda[p, v] for p in B_ZERO[l] for v in polytope_verts(p)) + <= bin_y[l] + ) + + pblock.DLOG_constraint4 = Constraint(bin_y_index, rule=con4_rule) + + def con5_rule(model, l): + return sum(lmda[p, v] for p in B_ONE[l] for v in polytope_verts(p)) <= ( + 1 - bin_y[l] + ) + + pblock.DLOG_constraint5 = Constraint(bin_y_index, rule=con5_rule) class _CCPiecewise(object): @@ -517,54 +577,55 @@ class _CCPiecewise(object): Called to generate Piecewise constraint using the CC formulation """ - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_CCPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError("_CCPiecewise: construct() called during invalid state.") len_x_pts = len(x_pts) # create indexers - polytopes = range(1,len_x_pts) - vertices = range(1,len_x_pts+1) + polytopes = range(1, len_x_pts) + vertices = range(1, len_x_pts + 1) + def vertex_polys(v): if v == 1: return [v] if v == len_x_pts: - return [v-1] + return [v - 1] else: - return [v-1,v] + return [v - 1, v] # create vars - pblock.CC_lambda = Var(vertices,within=NonNegativeReals) + pblock.CC_lambda = Var(vertices, within=NonNegativeReals) lmda = pblock.CC_lambda - pblock.CC_bin_y = Var(polytopes,within=Binary) + pblock.CC_bin_y = Var(polytopes, within=Binary) bin_y = pblock.CC_bin_y # create piecewise constraints - pblock.CC_constraint1 = Constraint(expr=x_var==sum(lmda[v]*x_pts[v-1] \ - for v in vertices)) + pblock.CC_constraint1 = Constraint( + expr=x_var == sum(lmda[v] * x_pts[v - 1] for v in vertices) + ) LHS = y_var - RHS = sum(lmda[v]*y_pts[v-1] for v in vertices) + RHS = sum(lmda[v] * y_pts[v - 1] for v in vertices) expr = None if bound_type == Bound.Upper: - expr= LHS <= RHS + expr = LHS <= RHS elif bound_type == Bound.Lower: - expr= LHS >= RHS + expr = LHS >= RHS elif bound_type == Bound.Equal: - expr= LHS == RHS + expr = LHS == RHS else: raise ValueError("Invalid Bound for _CCPiecewise object") pblock.CC_constraint2 = Constraint(expr=expr) - pblock.CC_constraint3 = Constraint(expr=sum(lmda[v] \ - for v in vertices) == 1) - def con4_rule(model,v): + pblock.CC_constraint3 = Constraint(expr=sum(lmda[v] for v in vertices) == 1) + + def con4_rule(model, v): return lmda[v] <= sum(bin_y[p] for p in vertex_polys(v)) - pblock.CC_constraint4 = Constraint(vertices,rule=con4_rule) - pblock.CC_constraint5 = Constraint(expr=sum(bin_y[p] \ - for p in polytopes) == 1) + + pblock.CC_constraint4 = Constraint(vertices, rule=con4_rule) + pblock.CC_constraint5 = Constraint(expr=sum(bin_y[p] for p in polytopes) == 1) class _LOGPiecewise(object): @@ -572,76 +633,92 @@ class _LOGPiecewise(object): Called to generate Piecewise constraint using the LOG formulation """ - def _Branching_Scheme(self,n): + def _Branching_Scheme(self, n): """ Branching scheme for LOG, requires a gray code """ BIGL = 2**n - S = range(1,n+1) + S = range(1, n + 1) # turn the GrayCode into a dictionary indexed # starting at 1 - G = {k:v for k,v in enumerate(_GrayCode(n),start=1)} - - L = {s:[k+1 for k in range(BIGL+1) \ - if ((k == 0) or (G[k][s-1] == 1)) \ - and ((k == BIGL) or (G[k+1][s-1] == 1))] for s in S} - R = {s:[k+1 for k in range(BIGL+1) \ - if ((k == 0) or (G[k][s-1] == 0)) \ - and ((k == BIGL) or (G[k+1][s-1] == 0))] for s in S} - - return S,L,R - - def construct(self,pblock,x_var,y_var): - if not _isPowerOfTwo(len(pblock._domain_pts)-1): - msg = "'%s' does not have a list of domain points "\ - "with length (2^n)+1" + G = {k: v for k, v in enumerate(_GrayCode(n), start=1)} + + L = { + s: [ + k + 1 + for k in range(BIGL + 1) + if ((k == 0) or (G[k][s - 1] == 1)) + and ((k == BIGL) or (G[k + 1][s - 1] == 1)) + ] + for s in S + } + R = { + s: [ + k + 1 + for k in range(BIGL + 1) + if ((k == 0) or (G[k][s - 1] == 0)) + and ((k == BIGL) or (G[k + 1][s - 1] == 0)) + ] + for s in S + } + + return S, L, R + + def construct(self, pblock, x_var, y_var): + if not _isPowerOfTwo(len(pblock._domain_pts) - 1): + msg = "'%s' does not have a list of domain points with length (2^n)+1" raise ValueError(msg % (pblock.name,)) x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_LOGPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_LOGPiecewise: construct() called during invalid state." + ) len_x_pts = len(x_pts) # create branching schemes - L_i = int(math.log(len_x_pts-1,2)) - S_i,B_LEFT,B_RIGHT = self._Branching_Scheme(L_i) + L_i = int(math.log(len_x_pts - 1, 2)) + S_i, B_LEFT, B_RIGHT = self._Branching_Scheme(L_i) # create indexers - polytopes = range(1,len_x_pts) - vertices = range(1,len_x_pts+1) + polytopes = range(1, len_x_pts) + vertices = range(1, len_x_pts + 1) bin_y_index = S_i # create vars - pblock.LOG_lambda = Var(vertices,within=NonNegativeReals) + pblock.LOG_lambda = Var(vertices, within=NonNegativeReals) lmda = pblock.LOG_lambda - pblock.LOG_bin_y = Var(bin_y_index,within=Binary) + pblock.LOG_bin_y = Var(bin_y_index, within=Binary) bin_y = pblock.LOG_bin_y # create piecewise constraints - pblock.LOG_constraint1 = Constraint(expr=x_var==sum(lmda[v]*x_pts[v-1] \ - for v in vertices)) + pblock.LOG_constraint1 = Constraint( + expr=x_var == sum(lmda[v] * x_pts[v - 1] for v in vertices) + ) LHS = y_var - RHS = sum(lmda[v]*y_pts[v-1] for v in vertices) + RHS = sum(lmda[v] * y_pts[v - 1] for v in vertices) expr = None if bound_type == Bound.Upper: - expr= LHS <= RHS + expr = LHS <= RHS elif bound_type == Bound.Lower: - expr= LHS >= RHS + expr = LHS >= RHS elif bound_type == Bound.Equal: - expr= LHS == RHS + expr = LHS == RHS else: raise ValueError("Invalid Bound for _LOGPiecewise object") pblock.LOG_constraint2 = Constraint(expr=expr) - pblock.LOG_constraint3 = Constraint(expr=sum(lmda[v] \ - for v in vertices) == 1) - def con4_rule(model,s): + pblock.LOG_constraint3 = Constraint(expr=sum(lmda[v] for v in vertices) == 1) + + def con4_rule(model, s): return sum(lmda[v] for v in B_LEFT[s]) <= bin_y[s] - pblock.LOG_constraint4 = Constraint(bin_y_index,rule=con4_rule) - def con5_rule(model,s): - return sum(lmda[v] for v in B_RIGHT[s]) <= (1-bin_y[s]) - pblock.LOG_constraint5 = Constraint(bin_y_index,rule=con5_rule) + + pblock.LOG_constraint4 = Constraint(bin_y_index, rule=con4_rule) + + def con5_rule(model, s): + return sum(lmda[v] for v in B_RIGHT[s]) <= (1 - bin_y[s]) + + pblock.LOG_constraint5 = Constraint(bin_y_index, rule=con5_rule) class _MCPiecewise(object): @@ -649,120 +726,134 @@ class _MCPiecewise(object): Called to generate Piecewise constraint using the MC formulation """ - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_MCPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError("_MCPiecewise: construct() called during invalid state.") len_x_pts = len(x_pts) # create indexers - polytopes = range(1,len_x_pts) + polytopes = range(1, len_x_pts) # create constants (using future division) - SLOPE = {p:(y_pts[p]-y_pts[p-1])/(x_pts[p]-x_pts[p-1]) - for p in polytopes} - INTERSEPT = {p:y_pts[p-1] - (SLOPE[p]*x_pts[p-1]) for p in polytopes} + SLOPE = { + p: (y_pts[p] - y_pts[p - 1]) / (x_pts[p] - x_pts[p - 1]) for p in polytopes + } + INTERSEPT = {p: y_pts[p - 1] - (SLOPE[p] * x_pts[p - 1]) for p in polytopes} # create vars pblock.MC_poly_x = Var(polytopes) poly_x = pblock.MC_poly_x - pblock.MC_bin_y = Var(polytopes,within=Binary) + pblock.MC_bin_y = Var(polytopes, within=Binary) bin_y = pblock.MC_bin_y # create piecewise constraints - pblock.MC_constraint1 = Constraint(expr=x_var==sum(poly_x[p] \ - for p in polytopes)) + pblock.MC_constraint1 = Constraint( + expr=x_var == sum(poly_x[p] for p in polytopes) + ) LHS = y_var - RHS = sum(poly_x[p]*SLOPE[p]+bin_y[p]*INTERSEPT[p] for p in polytopes) + RHS = sum(poly_x[p] * SLOPE[p] + bin_y[p] * INTERSEPT[p] for p in polytopes) expr = None if bound_type == Bound.Upper: - expr= LHS <= RHS + expr = LHS <= RHS elif bound_type == Bound.Lower: - expr= LHS >= RHS + expr = LHS >= RHS elif bound_type == Bound.Equal: - expr= LHS == RHS + expr = LHS == RHS else: raise ValueError("Invalid Bound for _INCPiecewise object") pblock.MC_constraint2 = Constraint(expr=expr) - def con3_rule(model,p): - return bin_y[p]*x_pts[p-1] <= poly_x[p] - pblock.MC_constraint3 = Constraint(polytopes,rule=con3_rule) - def con4_rule(model,p): - return poly_x[p] <= bin_y[p]*x_pts[p] - pblock.MC_constraint4 = Constraint(polytopes,rule=con4_rule) - pblock.MC_constraint5 = Constraint(expr=sum(bin_y[p] \ - for p in polytopes) == 1) + + def con3_rule(model, p): + return bin_y[p] * x_pts[p - 1] <= poly_x[p] + + pblock.MC_constraint3 = Constraint(polytopes, rule=con3_rule) + + def con4_rule(model, p): + return poly_x[p] <= bin_y[p] * x_pts[p] + + pblock.MC_constraint4 = Constraint(polytopes, rule=con4_rule) + pblock.MC_constraint5 = Constraint(expr=sum(bin_y[p] for p in polytopes) == 1) + class _INCPiecewise(object): """ Called to generate Piecewise constraint using the INC formulation """ - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_INCPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_INCPiecewise: construct() called during invalid state." + ) len_x_pts = len(x_pts) # create indexers - polytopes = range(1,len_x_pts) - bin_y_index = range(1,len_x_pts-1) + polytopes = range(1, len_x_pts) + bin_y_index = range(1, len_x_pts - 1) # create vars pblock.INC_delta = Var(polytopes) delta = pblock.INC_delta delta[1].setub(1) - delta[len_x_pts-1].setlb(0) - pblock.INC_bin_y = Var(bin_y_index,within=Binary) + delta[len_x_pts - 1].setlb(0) + pblock.INC_bin_y = Var(bin_y_index, within=Binary) bin_y = pblock.INC_bin_y # create piecewise constraints - pblock.INC_constraint1 = Constraint(expr=x_var==x_pts[0] + \ - sum(delta[p]*(x_pts[p]-x_pts[p-1]) \ - for p in polytopes)) + pblock.INC_constraint1 = Constraint( + expr=x_var + == x_pts[0] + sum(delta[p] * (x_pts[p] - x_pts[p - 1]) for p in polytopes) + ) LHS = y_var - RHS = y_pts[0] + sum(delta[p]*(y_pts[p]-y_pts[p-1]) for p in polytopes) + RHS = y_pts[0] + sum(delta[p] * (y_pts[p] - y_pts[p - 1]) for p in polytopes) expr = None if bound_type == Bound.Upper: - expr= LHS <= RHS + expr = LHS <= RHS elif bound_type == Bound.Lower: - expr= LHS >= RHS + expr = LHS >= RHS elif bound_type == Bound.Equal: - expr= LHS == RHS + expr = LHS == RHS else: raise ValueError("Invalid Bound for _INCPiecewise object") pblock.INC_constraint2 = Constraint(expr=expr) - def con3_rule(model,p): + + def con3_rule(model, p): if p != polytopes[-1]: - return delta[p+1] <= bin_y[p] + return delta[p + 1] <= bin_y[p] else: return Constraint.Skip - pblock.INC_constraint3 = Constraint(polytopes,rule=con3_rule) - def con4_rule(model,p): + + pblock.INC_constraint3 = Constraint(polytopes, rule=con3_rule) + + def con4_rule(model, p): if p != polytopes[-1]: return bin_y[p] <= delta[p] else: return Constraint.Skip - pblock.INC_constraint4 = Constraint(polytopes,rule=con4_rule) + + pblock.INC_constraint4 = Constraint(polytopes, rule=con4_rule) class _BIGMPiecewise(object): """ Called to generate Piecewise constraint using the BIGM formulation """ - def __init__(self,binary=True): + + def __init__(self, binary=True): self.binary = binary - if not (self.binary in [True,False]): - raise ValueError("_BIGMPiecewise must be initialized with the binary "\ - "flag set to True or False (choose one).") + if not (self.binary in [True, False]): + raise ValueError( + "_BIGMPiecewise must be initialized with the binary " + "flag set to True or False (choose one)." + ) - def construct(self,pblock,x_var,y_var): + def construct(self, pblock, x_var, y_var): # The BIGM methods currently determine tightest possible M # values. This method is implemented in such a way that # binary/sos1 variables are not created when this M is zero @@ -770,9 +861,10 @@ def construct(self,pblock,x_var,y_var): x_pts = pblock._domain_pts y_pts = pblock._range_pts bound_type = pblock._bound_type - if None in [x_pts,y_pts,bound_type]: - raise RuntimeError("_BIGMPiecewise: construct() called during "\ - "invalid state.") + if None in [x_pts, y_pts, bound_type]: + raise RuntimeError( + "_BIGMPiecewise: construct() called during invalid state." + ) len_x_pts = len(x_pts) if self.binary is True: @@ -785,14 +877,14 @@ def construct(self,pblock,x_var,y_var): OPT_M['UB'] = {} OPT_M['LB'] = {} - if bound_type in [Bound.Upper,Bound.Equal]: + if bound_type in [Bound.Upper, Bound.Equal]: OPT_M['UB'] = self._find_M(x_pts, y_pts, Bound.Upper) - if bound_type in [Bound.Lower,Bound.Equal]: + if bound_type in [Bound.Lower, Bound.Equal]: OPT_M['LB'] = self._find_M(x_pts, y_pts, Bound.Lower) all_keys = set(OPT_M['UB'].keys()).union(OPT_M['LB'].keys()) full_indices = [] - full_indices.extend(range(1,len_x_pts)) + full_indices.extend(range(1, len_x_pts)) bigm_y_index = None bigm_y = None if len(all_keys) > 0: @@ -803,54 +895,67 @@ def y_domain(): return Binary else: return NonNegativeReals - setattr(pblock,tag+'_y', Var(bigm_y_index,within=y_domain())) - bigm_y = getattr(pblock,tag+'_y') - def con1_rule(model,i): - if bound_type in [Bound.Upper,Bound.Equal]: + setattr(pblock, tag + '_y', Var(bigm_y_index, within=y_domain())) + bigm_y = getattr(pblock, tag + '_y') + + def con1_rule(model, i): + if bound_type in [Bound.Upper, Bound.Equal]: rhs = 1.0 if i not in OPT_M['UB']: rhs *= 0.0 else: - rhs *= OPT_M['UB'][i]*(1-bigm_y[i]) + rhs *= OPT_M['UB'][i] * (1 - bigm_y[i]) # using future division - return y_var - y_pts[i-1] - \ - ((y_pts[i]-y_pts[i-1])/(x_pts[i]-x_pts[i-1]))*(x_var-x_pts[i-1])\ - <= rhs + return ( + y_var + - y_pts[i - 1] + - ((y_pts[i] - y_pts[i - 1]) / (x_pts[i] - x_pts[i - 1])) + * (x_var - x_pts[i - 1]) + <= rhs + ) elif bound_type == Bound.Lower: rhs = 1.0 if i not in OPT_M['LB']: rhs *= 0.0 else: - rhs *= OPT_M['LB'][i]*(1-bigm_y[i]) + rhs *= OPT_M['LB'][i] * (1 - bigm_y[i]) # using future division - return y_var - y_pts[i-1] - \ - ((y_pts[i]-y_pts[i-1])/(x_pts[i]-x_pts[i-1]))*(x_var-x_pts[i-1])\ - >= rhs + return ( + y_var + - y_pts[i - 1] + - ((y_pts[i] - y_pts[i - 1]) / (x_pts[i] - x_pts[i - 1])) + * (x_var - x_pts[i - 1]) + >= rhs + ) def con2_rule(model): - expr = [bigm_y[i] for i in range(1,len_x_pts) if i in all_keys] + expr = [bigm_y[i] for i in range(1, len_x_pts) if i in all_keys] if len(expr) > 0: return sum(expr) == 1 else: return Constraint.Skip - def conAFF_rule(model,i): + def conAFF_rule(model, i): rhs = 1.0 if i not in OPT_M['LB']: rhs *= 0.0 else: - rhs *= OPT_M['LB'][i]*(1-bigm_y[i]) + rhs *= OPT_M['LB'][i] * (1 - bigm_y[i]) # using future division - return y_var - y_pts[i-1] - \ - ((y_pts[i]-y_pts[i-1])/(x_pts[i]-x_pts[i-1]))*(x_var-x_pts[i-1]) \ - >= rhs + return ( + y_var + - y_pts[i - 1] + - ((y_pts[i] - y_pts[i - 1]) / (x_pts[i] - x_pts[i - 1])) + * (x_var - x_pts[i - 1]) + >= rhs + ) - pblock.BIGM_constraint1 = Constraint(full_indices,rule=con1_rule) + pblock.BIGM_constraint1 = Constraint(full_indices, rule=con1_rule) if len(all_keys) > 0: pblock.BIGM_constraint2 = Constraint(rule=con2_rule) if bound_type == Bound.Equal: - pblock.BIGM_constraint3 = Constraint(full_indices,rule=conAFF_rule) + pblock.BIGM_constraint3 = Constraint(full_indices, rule=conAFF_rule) if len(all_keys) > 0: if self.binary is False: @@ -859,34 +964,62 @@ def conAFF_rule(model,i): # In order to enforce the same behavior as actual piecewise # constraints, we constrain the domain variable between the # outer domain pts. But in order to prevent filling the model - # with unecessary constraints, we only do this when absolutely + # with unnecessary constraints, we only do this when absolutely # necessary. if not x_var.lb is None and x_var.lb < x_pts[0]: pblock.bigm_domain_constraint_lower = Constraint(expr=x_pts[0] <= x_var) if not x_var.ub is None and x_var.ub > x_pts[-1]: pblock.bigm_domain_constraint_upper = Constraint(expr=x_var <= x_pts[-1]) - def _M_func(self,a,Fa,b,Fb,c,Fc): + def _M_func(self, a, Fa, b, Fb, c, Fc): # using future division - return Fa - Fb - ((a-b) * ((Fc-Fb) / (c-b))) + return Fa - Fb - ((a - b) * ((Fc - Fb) / (c - b))) - def _find_M(self,x_pts,y_pts,bound_type): + def _find_M(self, x_pts, y_pts, bound_type): len_x_pts = len(x_pts) _self_M_func = self._M_func M_final = {} - for j in range(1,len_x_pts): + for j in range(1, len_x_pts): index = j - if (bound_type == Bound.Lower): - M_final[index] = min( [0.0, min([_self_M_func(x_pts[k],y_pts[k], - x_pts[j-1],y_pts[j-1], - x_pts[j],y_pts[j]) \ - for k in range(len_x_pts)])] ) - elif (bound_type == Bound.Upper): - M_final[index] = max( [0.0, max([_self_M_func(x_pts[k],y_pts[k], - x_pts[j-1],y_pts[j-1], - x_pts[j],y_pts[j]) \ - for k in range(len_x_pts)])] ) + if bound_type == Bound.Lower: + M_final[index] = min( + [ + 0.0, + min( + [ + _self_M_func( + x_pts[k], + y_pts[k], + x_pts[j - 1], + y_pts[j - 1], + x_pts[j], + y_pts[j], + ) + for k in range(len_x_pts) + ] + ), + ] + ) + elif bound_type == Bound.Upper: + M_final[index] = max( + [ + 0.0, + max( + [ + _self_M_func( + x_pts[k], + y_pts[k], + x_pts[j - 1], + y_pts[j - 1], + x_pts[j], + y_pts[j], + ) + for k in range(len_x_pts) + ] + ), + ] + ) else: raise ValueError("Invalid Bound passed to _find_M function") if M_final[index] == 0.0: @@ -894,113 +1027,115 @@ def _find_M(self,x_pts,y_pts,bound_type): return M_final -@ModelComponentFactory.register("Constraints that contain piecewise linear expressions.") +@ModelComponentFactory.register( + "Constraints that contain piecewise linear expressions." +) class Piecewise(Block): """ - Adds piecewise constraints to a Pyomo model for functions of the - form, y = f(x). - - Usage: - model.const = Piecewise(index_1,...,index_n,yvar,xvar,**Keywords) - model.const = Piecewise(yvar,xvar,**Keywords) - - Keywords: - --pw_pts={},[],() - A dictionary of lists (keys are index set) or a single list - (for the non-indexed case or when an identical set of - breakpoints is used across all indices) defining the set of - domain breakpoints for the piecewise linear - function. **ALWAYS REQUIRED** - --pw_repn='' - Indicates the type of piecewise representation to use. This - can have a major impact on solver performance. - Choices: (Default 'SOS2') - - ~ + 'SOS2' - Standard representation using sos2 constraints - ~ 'BIGM_BIN' - BigM constraints with binary variables. - Theoretically tightest M values are automatically - determined. - ~ 'BIGM_SOS1' - BigM constraints with sos1 variables. - Theoretically tightest M values are automatically - determined. - ~*+ 'DCC' - Disaggregated convex combination model - ~*+ 'DLOG' - Logarithmic disaggregated convex combination model - ~*+ 'CC' - Convex combination model - ~*+ 'LOG' - Logarithmic branching convex combination - ~* 'MC' - Multiple choice model - ~*+ 'INC' - Incremental (delta) method - - + Supports step functions - * Source: "Mixed-Integer Models for Non-separable Piecewise Linear - Optimization: Unifying framework and Extensions" (Vielma, - Nemhauser 2008) - ~ Refer to the optional 'force_pw' keyword. - --pw_constr_type='' - Indicates the bound type of the piecewise function. - Choices: - - 'UB' - y variable is bounded above by piecewise function - 'LB' - y variable is bounded below by piecewise function - 'EQ' - y variable is equal to the piecewise function - --f_rule=f(model,i,j,...,x), {}, [], () - An object that returns a numeric value that is the range - value corresponding to each piecewise domain point. For - functions, the first argument must be a Pyomo model. The - last argument is the domain value at which the function - evaluates (Not a Pyomo Var). Intermediate arguments are the - corresponding indices of the Piecewise component (if any). - Otherwise, the object can be a dictionary of lists/tuples - (with keys the same as the indexing set) or a singe - list/tuple (when no indexing set is used or when all indices - use an identical piecewise function). - Examples: - - # A function which changes with index - def f(model,j,x): - if (j == 2): - return x**2 + 1.0 - else: - return x**2 + 5.0 - - # A nonlinear function - f = lambda model,x: return exp(x) + value(model.p) - (model.p is a Pyomo Param) - - # A step function - f = [0,0,1,1,2,2] - --force_pw=True/False - Using the given function rule and pw_pts, a check for - convexity/concavity is implemented. If (1) the function is - convex and the piecewise constraints are lower bounds or if - (2) the function is concave and the piecewise constraints - are upper bounds then the piecewise constraints will be - substituted for linear constraints. Setting 'force_pw=True' - will force the use of the original piecewise constraints - even when one of these two cases applies. - --warning_tol= Default=1e-8 - To aid in debugging, a warning is printed when consecutive - slopes of piecewise segments are within of - each other. - --warn_domain_coverage=True/False Default=True - Print a warning when the feasible region of the domain - variable is not completely covered by the piecewise - breakpoints. - --unbounded_domain_var=True/False Default=False - Allow an unbounded or partially bounded Pyomo Var to be used - as the domain variable. - **NOTE: This does not imply unbounded piecewise segments - will be constructed. The outermost piecwise - breakpoints will bound the domain variable at each - index. However, the Var attributes .lb and .ub will - not be modified. + Adds piecewise constraints to a Pyomo model for functions of the + form, y = f(x). + + Usage: + model.const = Piecewise(index_1,...,index_n,yvar,xvar,**Keywords) + model.const = Piecewise(yvar,xvar,**Keywords) + + Keywords: + + -pw_pts={},[],() + A dictionary of lists (keys are index set) or a single list + (for the non-indexed case or when an identical set of + breakpoints is used across all indices) defining the set of + domain breakpoints for the piecewise linear + function. **ALWAYS REQUIRED** + + -pw_repn='' + Indicates the type of piecewise representation to use. This + can have a major impact on solver performance. + Choices: (Default 'SOS2') + + ~ + 'SOS2' - Standard representation using sos2 constraints + ~ 'BIGM_BIN' - BigM constraints with binary variables. + Theoretically tightest M values are automatically + determined. + ~ 'BIGM_SOS1' - BigM constraints with sos1 variables. + Theoretically tightest M values are automatically + determined. + ~*+ 'DCC' - Disaggregated convex combination model + ~*+ 'DLOG' - Logarithmic disaggregated convex combination model + ~*+ 'CC' - Convex combination model + ~*+ 'LOG' - Logarithmic branching convex combination + ~* 'MC' - Multiple choice model + ~*+ 'INC' - Incremental (delta) method + + + Supports step functions + * Source: "Mixed-Integer Models for Non-separable Piecewise Linear + Optimization: Unifying framework and Extensions" (Vielma, + Nemhauser 2008) + ~ Refer to the optional 'force_pw' keyword. + + -pw_constr_type='' + Indicates the bound type of the piecewise function. + Choices: + + 'UB' - y variable is bounded above by piecewise function + 'LB' - y variable is bounded below by piecewise function + 'EQ' - y variable is equal to the piecewise function + + -f_rule=f(model,i,j,...,x), {}, [], () + An object that returns a numeric value that is the range + value corresponding to each piecewise domain point. For + functions, the first argument must be a Pyomo model. The + last argument is the domain value at which the function + evaluates (Not a Pyomo Var). Intermediate arguments are the + corresponding indices of the Piecewise component (if any). + Otherwise, the object can be a dictionary of lists/tuples + (with keys the same as the indexing set) or a singe + list/tuple (when no indexing set is used or when all indices + use an identical piecewise function). + Examples: + + # A function which changes with index + def f(model,j,x): + if (j == 2): + return x**2 + 1.0 + else: + return x**2 + 5.0 + + # A nonlinear function + f = lambda model,x: return exp(x) + value(model.p) + (model.p is a Pyomo Param) + + # A step function + f = [0,0,1,1,2,2] + + -force_pw=True/False + Using the given function rule and pw_pts, a check for + convexity/concavity is implemented. If (1) the function is + convex and the piecewise constraints are lower bounds or if + (2) the function is concave and the piecewise constraints + are upper bounds then the piecewise constraints will be + substituted for linear constraints. Setting 'force_pw=True' + will force the use of the original piecewise constraints + even when one of these two cases applies. + + -warning_tol= Default=1e-8 + To aid in debugging, a warning is printed when consecutive + slopes of piecewise segments are within of + each other. + + -warn_domain_coverage=True/False Default=True + Print a warning when the feasible region of the domain + variable is not completely covered by the piecewise + breakpoints. + + -unbounded_domain_var=True/False Default=False + Allow an unbounded or partially bounded Pyomo Var to be used + as the domain variable. + **NOTE: This does not imply unbounded piecewise segments + will be constructed. The outermost piecewise + breakpoints will bound the domain variable at each + index. However, the Var attributes .lb and .ub will + not be modified. """ _ComponentDataClass = _PiecewiseData @@ -1016,35 +1151,39 @@ def __new__(cls, *args, **kwds): def __init__(self, *args, **kwds): # this is temporary as part of a move to user inputs # using Enums rather than strings - translate_repn = {'BIGM_SOS1':PWRepn.BIGM_SOS1,\ - PWRepn.BIGM_SOS1:PWRepn.BIGM_SOS1,\ - 'BIGM_BIN':PWRepn.BIGM_BIN,\ - PWRepn.BIGM_BIN:PWRepn.BIGM_BIN,\ - 'SOS2':PWRepn.SOS2,\ - PWRepn.SOS2:PWRepn.SOS2,\ - 'CC':PWRepn.CC,\ - PWRepn.CC:PWRepn.CC,\ - 'DCC':PWRepn.DCC,\ - PWRepn.DCC:PWRepn.DCC,\ - 'DLOG':PWRepn.DLOG,\ - PWRepn.DLOG:PWRepn.DLOG,\ - 'LOG':PWRepn.LOG,\ - PWRepn.LOG:PWRepn.LOG,\ - 'MC':PWRepn.MC,\ - PWRepn.MC:PWRepn.MC,\ - 'INC':PWRepn.INC,\ - PWRepn.INC:PWRepn.INC,\ - None:None} + translate_repn = { + 'BIGM_SOS1': PWRepn.BIGM_SOS1, + PWRepn.BIGM_SOS1: PWRepn.BIGM_SOS1, + 'BIGM_BIN': PWRepn.BIGM_BIN, + PWRepn.BIGM_BIN: PWRepn.BIGM_BIN, + 'SOS2': PWRepn.SOS2, + PWRepn.SOS2: PWRepn.SOS2, + 'CC': PWRepn.CC, + PWRepn.CC: PWRepn.CC, + 'DCC': PWRepn.DCC, + PWRepn.DCC: PWRepn.DCC, + 'DLOG': PWRepn.DLOG, + PWRepn.DLOG: PWRepn.DLOG, + 'LOG': PWRepn.LOG, + PWRepn.LOG: PWRepn.LOG, + 'MC': PWRepn.MC, + PWRepn.MC: PWRepn.MC, + 'INC': PWRepn.INC, + PWRepn.INC: PWRepn.INC, + None: None, + } # this is temporary as part of a move to user inputs # using Enums rather than strings - translate_bound = {'UB':Bound.Upper,\ - Bound.Upper:Bound.Upper,\ - 'LB':Bound.Lower,\ - Bound.Lower:Bound.Lower,\ - 'EQ':Bound.Equal,\ - Bound.Equal:Bound.Equal,\ - None:None} + translate_bound = { + 'UB': Bound.Upper, + Bound.Upper: Bound.Upper, + 'LB': Bound.Lower, + Bound.Lower: Bound.Lower, + 'EQ': Bound.Equal, + Bound.Equal: Bound.Equal, + None: None, + } # TODO: Update the keyword names. I think these are more clear # pw_pts -> breakpoints @@ -1058,25 +1197,26 @@ def __init__(self, *args, **kwds): # # extract all keywords used by this class - pw_points = kwds.pop('pw_pts',None) + pw_points = kwds.pop('pw_pts', None) # translate the user input to the enum type - pw_rep = kwds.pop('pw_repn','SOS2') - pw_rep = translate_repn.get(pw_rep,pw_rep) - if (pw_rep == PWRepn.BIGM_BIN) or \ - (pw_rep == PWRepn.BIGM_SOS1): + pw_rep = kwds.pop('pw_repn', 'SOS2') + pw_rep = translate_repn.get(pw_rep, pw_rep) + if (pw_rep == PWRepn.BIGM_BIN) or (pw_rep == PWRepn.BIGM_SOS1): deprecation_warning( "The 'BIGM_BIN' and 'BIGM_SOS1' " "piecewise representations will be removed in " "a future version of Pyomo. They produce incorrect " - "results in certain cases", version='5.3') + "results in certain cases", + version='5.3', + ) # translate the user input to the enum type - bound_type = kwds.pop('pw_constr_type',None) - bound_type = translate_bound.get(bound_type,bound_type) - f_rule = kwds.pop('f_rule',None) - force_pw = kwds.pop('force_pw',False) - warning_tol = kwds.pop('warning_tol',_WARNING_TOLERANCE) - warn_domain_coverage = kwds.pop('warn_domain_coverage',True) - unbounded_domain_var = kwds.pop('unbounded_domain_var',False) + bound_type = kwds.pop('pw_constr_type', None) + bound_type = translate_bound.get(bound_type, bound_type) + f_rule = kwds.pop('f_rule', None) + force_pw = kwds.pop('force_pw', False) + warning_tol = kwds.pop('warning_tol', _WARNING_TOLERANCE) + warn_domain_coverage = kwds.pop('warn_domain_coverage', True) + unbounded_domain_var = kwds.pop('unbounded_domain_var', False) # all but the last two args should go to Block try: @@ -1103,25 +1243,35 @@ def __init__(self, *args, **kwds): # implementation is that model.pprint() labels Piecewise # blocks as simply Blocks. # - #kwds.setdefault('ctype', Piecewise) - Block.__init__(self,*args,**kwds) + # kwds.setdefault('ctype', Piecewise) + Block.__init__(self, *args, **kwds) # Check that the variables args are actually Pyomo Vars - if not( isinstance(self._domain_var,_VarData) or \ - isinstance(self._domain_var,IndexedVar) ): - msg = "Piecewise component has invalid "\ - "argument type for domain variable, %s" + if not ( + isinstance(self._domain_var, _VarData) + or isinstance(self._domain_var, IndexedVar) + ): + msg = ( + "Piecewise component has invalid " + "argument type for domain variable, %s" + ) raise TypeError(msg % (repr(self._domain_var),)) - if not( isinstance(self._range_var,_VarData) or \ - isinstance(self._range_var,IndexedVar) ): - msg = "Piecewise component has invalid "\ - "argument type for range variable, %s" + if not ( + isinstance(self._range_var, _VarData) + or isinstance(self._range_var, IndexedVar) + ): + msg = ( + "Piecewise component has invalid " + "argument type for range variable, %s" + ) raise TypeError(msg % (repr(self._range_var),)) # Test that the keyword values make sense - if f_rule.__class__ not in [type(lambda: None),dict,list,tuple]: - msg = "Piecewise component keyword 'f_rule' must "\ - "be a function, dict, list, or tuple" + if f_rule.__class__ not in [type(lambda: None), dict, list, tuple]: + msg = ( + "Piecewise component keyword 'f_rule' must " + "be a function, dict, list, or tuple" + ) raise ValueError(msg) try: bound_type = Bound(bound_type) @@ -1129,20 +1279,25 @@ def __init__(self, *args, **kwds): try: bound_type = Bound[bound_type] except KeyError: - msg = "Invalid value for Piecewise component "\ - "keyword 'pw_constr_type'" + msg = "Invalid value for Piecewise component keyword 'pw_constr_type'" raise ValueError(msg) if warning_tol.__class__ is not float: - msg = "Invalid type '%s' for Piecewise component "\ - "keyword 'warning_tol', which must be of type 'float'" + msg = ( + "Invalid type '%s' for Piecewise component " + "keyword 'warning_tol', which must be of type 'float'" + ) raise TypeError(msg % (type(warning_tol),)) - if warn_domain_coverage not in [True,False]: - msg = "Invalid value for Piecewise component "\ - "keyword 'warn_domain_coverage', which must be True or False" + if warn_domain_coverage not in [True, False]: + msg = ( + "Invalid value for Piecewise component " + "keyword 'warn_domain_coverage', which must be True or False" + ) raise ValueError(msg) - if unbounded_domain_var not in [True,False]: - msg = "Invalid value for Piecewise component "\ - "keyword 'unbounded_domain_var', which must be True or False" + if unbounded_domain_var not in [True, False]: + msg = ( + "Invalid value for Piecewise component " + "keyword 'unbounded_domain_var', which must be True or False" + ) raise ValueError(msg) self._pw_rep = pw_rep @@ -1154,23 +1309,25 @@ def __init__(self, *args, **kwds): self._unbounded_domain_var = unbounded_domain_var if self.is_indexed() is False: - if not ( isinstance(pw_points, list) or \ - isinstance(pw_points,tuple) ): - msg = "Invalid type '%s' for Piecewise component "\ - "keyword 'pw_pts', which must be of type "\ - "'list' or 'tuple' for non-indexed Piecewise component" + if not (isinstance(pw_points, list) or isinstance(pw_points, tuple)): + msg = ( + "Invalid type '%s' for Piecewise component " + "keyword 'pw_pts', which must be of type " + "'list' or 'tuple' for non-indexed Piecewise component" + ) raise TypeError(msg % (type(pw_points),)) - self._domain_points = {None:pw_points} + self._domain_points = {None: pw_points} else: - if isinstance(pw_points, list) or \ - isinstance(pw_points,tuple): - self._domain_points = {None:pw_points} - elif isinstance(pw_points,dict): + if isinstance(pw_points, list) or isinstance(pw_points, tuple): + self._domain_points = {None: pw_points} + elif isinstance(pw_points, dict): self._domain_points = pw_points else: - msg = "Invalid type '%s' for Piecewise component "\ - "keyword 'pw_pts', which must be of type "\ - "'dict', 'list', or 'tuple' for indexed Piecewise component" + msg = ( + "Invalid type '%s' for Piecewise component " + "keyword 'pw_pts', which must be of type " + "'dict', 'list', or 'tuple' for indexed Piecewise component" + ) raise TypeError(msg % (type(pw_points),)) def construct(self, *args, **kwds): @@ -1184,7 +1341,7 @@ def construct(self, *args, **kwds): timer = ConstructionTimer(self) # We need to be able to add and construct new model # components on the fly so we make this Block behave concretely - self._constructed=True + self._constructed = True # cache this because it is apparently expensive is_indexed = self.is_indexed() @@ -1197,12 +1354,11 @@ def construct(self, *args, **kwds): else: for index in self._index_set: if generate_debug_messages: - logger.debug(" Constructing Piecewise index "+str(index)) + logger.debug(" Constructing Piecewise index " + str(index)) self.add(index, _is_indexed=is_indexed) timer.report() def add(self, index, _is_indexed=None): - if _is_indexed is None: _is_indexed = self.is_indexed() @@ -1241,34 +1397,60 @@ def add(self, index, _is_indexed=None): _self_domain_pts_index = self._domain_points[None] if self._unbounded_domain_var is False: - # We add the requirment that the domain variable used by Piecewise is + # We add the requirement that the domain variable used by Piecewise is # always bounded from above and below. if (_self_xvar.lb is None) or (_self_xvar.ub is None): - msg = "Piecewise '%s[%s]' found an unbounded variable "\ - "used for the constraint domain: '%s'. "\ - "Piecewise component requires the domain variable have "\ - "lower and upper bounds. Refer to the Piecewise help "\ - "documentation for information on how to disable this "\ - "restriction" + msg = ( + "Piecewise '%s[%s]' found an unbounded variable " + "used for the constraint domain: '%s'. " + "Piecewise component requires the domain variable have " + "lower and upper bounds. Refer to the Piecewise help " + "documentation for information on how to disable this " + "restriction" + ) raise ValueError(msg % (self.name, index, _self_xvar)) if self._warn_domain_coverage is True: # Print a warning when the feasible region created by the piecewise # constraints does not include the domain variables bounds - if (_self_xvar.lb is not None) and (_self_xvar.lb < min(_self_domain_pts_index)): - msg = "**WARNING: Piecewise '%s[%s]' feasible region does not "\ - "include the lower bound of domain variable: %s.lb = %s < %s. "\ - "Refer to the Piecewise help documentation for information on "\ + if (_self_xvar.lb is not None) and ( + _self_xvar.lb < min(_self_domain_pts_index) + ): + msg = ( + "**WARNING: Piecewise '%s[%s]' feasible region does not " + "include the lower bound of domain variable: %s.lb = %s < %s. " + "Refer to the Piecewise help documentation for information on " "how to disable this warning." - print(msg % ( self.name, index, _self_xvar, _self_xvar.lb, - min(_self_domain_pts_index) )) - if (_self_xvar.ub is not None) and (_self_xvar.ub > max(_self_domain_pts_index)): - msg = "**WARNING: Piecewise '%s[%s]' feasible region does not "\ - "include the upper bound of domain variable: %s.ub = %s > %s. "\ - "Refer to the Piecewise help documentation for information on "\ - "how to disable this warning." - print(msg % ( self.name, index, _self_xvar, _self_xvar.ub, - max(_self_domain_pts_index) )) + ) + print( + msg + % ( + self.name, + index, + _self_xvar, + _self_xvar.lb, + min(_self_domain_pts_index), + ) + ) + if (_self_xvar.ub is not None) and ( + _self_xvar.ub > max(_self_domain_pts_index) + ): + msg = ( + "**WARNING: Piecewise '%s[%s]' feasible region does not " + "include the upper bound of domain variable: %s.ub = %s > %s. " + "Refer to the Piecewise help documentation for information on " + "how to disable this warning." + ) + print( + msg + % ( + self.name, + index, + _self_xvar, + _self_xvar.ub, + max(_self_domain_pts_index), + ) + ) if len(_self_domain_pts_index) <= 1: # TODO: Technically one could interpret this @@ -1282,43 +1464,50 @@ def add(self, index, _is_indexed=None): "Piecewise component '%s[%s]' failed to construct " "piecewise representation. List of breakpoints " "must contain at least two elements. Current list: %s" - % (self.name, index, str(_self_domain_pts_index))) + % (self.name, index, str(_self_domain_pts_index)) + ) # generate the list of range values using the function rule # check if convexity or concavity holds as well force_simple = False if not _is_indexed: - character,range_pts,isStep=_characterize_function(self.name, - self._warning_tol, - self._f_rule, - _self_parent, - _self_domain_pts_index) + character, range_pts, isStep = _characterize_function( + self.name, + self._warning_tol, + self._f_rule, + _self_parent, + _self_domain_pts_index, + ) else: - character,range_pts,isStep=_characterize_function(self.name, - self._warning_tol, - self._f_rule, - _self_parent, - _self_domain_pts_index, - index) - - - assert not ((isStep) and (character in [-1,1])) - if (isStep) and \ - (self._pw_rep in [PWRepn.MC, PWRepn.BIGM_BIN, PWRepn.BIGM_SOS1]): - msg = "Piecewise '%s[%s]' has detected a step function but the selected "\ - "piecewise representation '%s' does not currently support this "\ - "functionality. Refer to the Piecewise help documentation for "\ - "information about which piecewise representations support step functions." + character, range_pts, isStep = _characterize_function( + self.name, + self._warning_tol, + self._f_rule, + _self_parent, + _self_domain_pts_index, + index, + ) + + assert not ((isStep) and (character in [-1, 1])) + if (isStep) and ( + self._pw_rep in [PWRepn.MC, PWRepn.BIGM_BIN, PWRepn.BIGM_SOS1] + ): + msg = ( + "Piecewise '%s[%s]' has detected a step function but the selected " + "piecewise representation '%s' does not currently support this " + "functionality. Refer to the Piecewise help documentation for " + "information about which piecewise representations support step functions." + ) raise ValueError(msg % (self.name, index, self._pw_rep)) # Make automatic simplications to the piecewise constraints # for the special cases of convexity and lower bound # or concavity and upper bound - if (character == -1): - if (self._bound_type == Bound.Upper): + if character == -1: + if self._bound_type == Bound.Upper: force_simple = True - elif (character == 1): - if (self._bound_type == Bound.Lower): + elif character == 1: + if self._bound_type == Bound.Lower: force_simple = True # make sure the user does not want to disable the automatic @@ -1358,8 +1547,10 @@ def add(self, index, _is_indexed=None): elif self._pw_rep == PWRepn.BIGM_SOS1: func = _BIGMPiecewise(binary=False) else: - msg = "Piecewise '%s[%s]' does not have a valid "\ - "piecewise representation: '%s'" + msg = ( + "Piecewise '%s[%s]' does not have a valid " + "piecewise representation: '%s'" + ) raise ValueError(msg % (self.name, index, self._pw_rep)) if _is_indexed: @@ -1369,20 +1560,19 @@ def add(self, index, _is_indexed=None): self._data[index] = comp comp._index = index comp.updateBoundType(self._bound_type) - comp.updatePoints(_self_domain_pts_index,range_pts) - comp.build_constraints(func,_self_xvar,_self_yvar) + comp.updatePoints(_self_domain_pts_index, range_pts) + comp.build_constraints(func, _self_xvar, _self_yvar) -class SimplePiecewise(_PiecewiseData,Piecewise): +class SimplePiecewise(_PiecewiseData, Piecewise): def __init__(self, *args, **kwds): - _PiecewiseData.__init__(self,self) + _PiecewiseData.__init__(self, self) Piecewise.__init__(self, *args, **kwds) -class IndexedPiecewise(Piecewise): - def __init__(self,*args,**kwds): - Piecewise.__init__(self,*args,**kwds) +class IndexedPiecewise(Piecewise): + def __init__(self, *args, **kwds): + Piecewise.__init__(self, *args, **kwds) def __str__(self): return str(self.name) - diff --git a/pyomo/core/base/plugin.py b/pyomo/core/base/plugin.py index 303f8230c90..4ecb12d86a6 100644 --- a/pyomo/core/base/plugin.py +++ b/pyomo/core/base/plugin.py @@ -10,43 +10,56 @@ # ___________________________________________________________________________ import inspect from pyomo.common.deprecation import deprecation_warning + deprecation_warning( "The pyomo.core.base.plugin module is deprecated. " "See pyomo.core.base.transformation for Transformation and " "TransformationFactory, pyomo.core.base.component for " "ModelComponentFactory and pyomo.scripting.interface for " "IPyomoScript* interfaces.", - version='6.0', calling_frame=inspect.currentframe().f_back, + version='6.0', + calling_frame=inspect.currentframe().f_back, ) -__all__ = ['pyomo_callback', - 'IPyomoExpression', 'ExpressionFactory', 'ExpressionRegistration', - 'IPyomoPresolver', 'IPyomoPresolveAction', - 'IParamRepresentation', - 'ParamRepresentationFactory', - 'IPyomoScriptPreprocess', - 'IPyomoScriptCreateModel', - 'IPyomoScriptCreateDataPortal', - 'IPyomoScriptModifyInstance', - 'IPyomoScriptPrintModel', - 'IPyomoScriptPrintInstance', - 'IPyomoScriptSaveInstance', - 'IPyomoScriptPrintResults', - 'IPyomoScriptSaveResults', - 'IPyomoScriptPostprocess', - 'ModelComponentFactory', - 'Transformation', - 'TransformationFactory', - ] +__all__ = [ + 'pyomo_callback', + 'IPyomoExpression', + 'ExpressionFactory', + 'ExpressionRegistration', + 'IPyomoPresolver', + 'IPyomoPresolveAction', + 'IParamRepresentation', + 'ParamRepresentationFactory', + 'IPyomoScriptPreprocess', + 'IPyomoScriptCreateModel', + 'IPyomoScriptCreateDataPortal', + 'IPyomoScriptModifyInstance', + 'IPyomoScriptPrintModel', + 'IPyomoScriptPrintInstance', + 'IPyomoScriptSaveInstance', + 'IPyomoScriptPrintResults', + 'IPyomoScriptSaveResults', + 'IPyomoScriptPostprocess', + 'ModelComponentFactory', + 'Transformation', + 'TransformationFactory', +] from pyomo.core.base.component import ModelComponentFactory from pyomo.core.base.transformation import ( - Transformation, TransformationFactory, TransformationData, - TransformationInfo, TransformationTimer + Transformation, + TransformationFactory, + TransformationData, + TransformationInfo, + TransformationTimer, ) from pyomo.scripting.interface import ( - implements, Interface, Plugin, ExtensionPoint, DeprecatedInterface, + implements, + Interface, + Plugin, + ExtensionPoint, + DeprecatedInterface, pyomo_callback, IPyomoPresolver, IPyomoPresolveAction, @@ -64,7 +77,6 @@ class IPyomoExpression(DeprecatedInterface): - def type(self): """Return the type of expression""" @@ -74,4 +86,3 @@ def create(self, args): class IParamRepresentation(DeprecatedInterface): pass - diff --git a/pyomo/core/base/range.py b/pyomo/core/base/range.py index 59a1c633685..b0863f11207 100644 --- a/pyomo/core/base/range.py +++ b/pyomo/core/base/range.py @@ -15,16 +15,21 @@ try: from math import remainder except ImportError: - def remainder(a,b): + + def remainder(a, b): ans = a % b - if ans > abs(b/2.): + if ans > abs(b / 2.0): ans -= b return ans + _inf = float('inf') _infinite = {_inf, -_inf} -class RangeDifferenceError(ValueError): pass + +class RangeDifferenceError(ValueError): + pass + class NumericRange(object): """A representation of a numeric range. @@ -55,16 +60,22 @@ class NumericRange(object): of the range is closed. Open ranges are only allowed for continuous NumericRange objects. """ - __slots__ = ('start','end','step','closed') - _EPS = 1e-15 - _types_comparable_to_int = {int,} - _closedMap = {True:True, False:False, - '[':True, ']':True, '(':False, ')':False} - def __init__(self, start, end, step, closed=(True,True)): + __slots__ = ('start', 'end', 'step', 'closed') + _EPS = 1e-15 + _types_comparable_to_int = {int} + _closedMap = { + True: True, + False: False, + '[': True, + ']': True, + '(': False, + ')': False, + } + + def __init__(self, start, end, step, closed=(True, True)): if int(step) != step: - raise ValueError( - "NumericRange step must be int (got %s)" % (step,)) + raise ValueError("NumericRange step must be int (got %s)" % (step,)) step = int(step) if start is None: start = -_inf @@ -73,16 +84,18 @@ def __init__(self, start, end, step, closed=(True,True)): if step: if start == -_inf: - raise ValueError("NumericRange: start must not be None/-inf " - "for non-continuous steps") - if (end-start)*step < 0: + raise ValueError( + "NumericRange: start must not be None/-inf " + "for non-continuous steps" + ) + if (end - start) * step < 0: raise ValueError( "NumericRange: start, end ordering incompatible " - "with step direction (got [%s:%s:%s])" % (start,end,step) + "with step direction (got [%s:%s:%s])" % (start, end, step) ) if end not in _infinite: - n = int( (end - start) // step ) - new_end = start + n*step + n = int((end - start) // step) + new_end = start + n * step assert abs(end - new_end) < abs(step) end = new_end # It is important (for iterating) that all finite @@ -93,7 +106,7 @@ def __init__(self, start, end, step, closed=(True,True)): elif end < start: # and step == 0 raise ValueError( "NumericRange: start must be <= end for " - "continuous ranges (got %s..%s)" % (start,end) + "continuous ranges (got %s..%s)" % (start, end) ) if start == end: # If this is a scalar, we will force the step to be 0 (so that @@ -105,10 +118,11 @@ def __init__(self, start, end, step, closed=(True,True)): self.step = step self.closed = (self._closedMap[closed[0]], self._closedMap[closed[1]]) - if self.isdiscrete() and self.closed != (True,True): + if self.isdiscrete() and self.closed != (True, True): raise ValueError( "NumericRange %s is discrete, but passed closed=%s." - " Discrete ranges must be closed." % (self, self.closed,)) + " Discrete ranges must be closed." % (self, self.closed) + ) def __getstate__(self): """ @@ -116,7 +130,7 @@ def __getstate__(self): This method must be defined because this class uses slots. """ - state = {} #super(NumericRange, self).__getstate__() + state = {} # super(NumericRange, self).__getstate__() for i in NumericRange.__slots__: state[i] = getattr(self, i) return state @@ -137,11 +151,12 @@ def __str__(self): if not self.isdiscrete(): return "%s%s..%s%s" % ( "[" if self.closed[0] else "(", - self.start, self.end, + self.start, + self.end, "]" if self.closed[1] else ")", ) if self.start == self.end: - return "[%s]" % (self.start, ) + return "[%s]" % (self.start,) elif self.step == 1: return "[%s:%s]" % (self.start, self.end) else: @@ -152,10 +167,12 @@ def __str__(self): def __eq__(self, other): if type(other) is not NumericRange: return False - return self.start == other.start \ - and self.end == other.end \ - and self.step == other.step \ + return ( + self.start == other.start + and self.end == other.end + and self.step == other.step and self.closed == other.closed + ) def __ne__(self, other): return not self.__eq__(other) @@ -169,15 +186,19 @@ def __contains__(self, value): try: # Note: trap "value[0] is not value" to catch things like # single-character strings - if hasattr(value, '__len__') and hasattr(value, '__getitem__') \ - and len(value) == 1 and value[0] is not value: + if ( + hasattr(value, '__len__') + and hasattr(value, '__getitem__') + and len(value) == 1 + and value[0] is not value + ): return value[0] in self except: pass # See if this class behaves like a "normal" number: both # comparable and creatable try: - if not ( bool(value - 0 > 0) ^ bool(value - 0 <= 0) ): + if not (bool(value - 0 > 0) ^ bool(value - 0 <= 0)): return False elif value.__class__(0) != 0 or not value.__class__(0) == 0: return False @@ -190,13 +211,11 @@ def __contains__(self, value): _dir = math.copysign(1, self.step) _from_start = value - self.start return ( - 0 <= _dir*_from_start <= _dir*(self.end - self.start) + 0 <= _dir * _from_start <= _dir * (self.end - self.start) and abs(remainder(_from_start, self.step)) <= self._EPS ) else: - return ( - value >= self.start if self.closed[0] else value > self.start - ) and ( + return (value >= self.start if self.closed[0] else value > self.start) and ( value <= self.end if self.closed[1] else value < self.end ) @@ -233,8 +252,7 @@ def isdiscrete(self): return self.step or self.start == self.end def isfinite(self): - return (self.step and self.end not in _infinite - ) or self.end == self.start + return (self.step and self.end not in _infinite) or self.end == self.start def isdisjoint(self, other): if not isinstance(other, NumericRange): @@ -256,18 +274,15 @@ def isdisjoint(self, other): # We now need to check a continuous set is a subset of a discrete # set and the continuous set sits between discrete points if self.step: - return NumericRange._continuous_discrete_disjoint( - other, self) + return NumericRange._continuous_discrete_disjoint(other, self) elif other.step: - return NumericRange._continuous_discrete_disjoint( - self, other) + return NumericRange._continuous_discrete_disjoint(self, other) else: # 2 continuous sets, with overlapping end points: not disjoint return False # both sets are discrete if self.step == other.step: - return abs(remainder(other.start-self.start, self.step)) \ - > self._EPS + return abs(remainder(other.start - self.start, self.step)) > self._EPS # Two infinite discrete sets will *eventually* have a common # point. This is trivial for coprime integer steps. For steps # with gcd > 1, we need to ensure that the two ranges are @@ -280,11 +295,13 @@ def isdisjoint(self, other): # Personally, anyone making a discrete set with a non-integer # step is asking for trouble. Maybe the better solution is to # require that the step be integer (which is what we do). - elif self.end in _infinite and other.end in _infinite \ - and self.step*other.step > 0: + elif ( + self.end in _infinite + and other.end in _infinite + and self.step * other.step > 0 + ): gcd = NumericRange._gcd(self.step, other.step) - return abs(remainder(other.start-self.start, gcd)) \ - > self._EPS + return abs(remainder(other.start - self.start, gcd)) > self._EPS # OK - at this point, there are a finite number of set members # that can overlap. Just check all the members of one set # against the other @@ -294,11 +311,11 @@ def isdisjoint(self, other): end = max(self.end, min(other.start, other.end)) i = 0 item = self.start - while (self.step>0 and item <= end) or (self.step<0 and item >= end): + while (self.step > 0 and item <= end) or (self.step < 0 and item >= end): if item in other: return False i += 1 - item = self.start + self.step*i + item = self.start + self.step * i return True def issubset(self, other): @@ -307,7 +324,7 @@ def issubset(self, other): return True elif type(other) is NonNumericRange: return False - # Other non NumericRange objects wil generate + # Other non NumericRange objects will generate # AttributeError exceptions below # First, do a simple sanity check on the endpoints @@ -360,15 +377,15 @@ def normalize_bounds(self): return self.end, self.start, (self.closed[1], self.closed[0]) def _nooverlap(self, other): - """Return True if the ranges for self and other are strictly separate - - """ + """Return True if the ranges for self and other are strictly separate""" s1, e1, c1 = self.normalize_bounds() s2, e2, c2 = other.normalize_bounds() - if ( e1 < s2 - or e2 < s1 - or ( e1 == s2 and not ( c1[1] and c2[0] )) - or ( e2 == s1 and not ( c2[1] and c1[0] )) ): + if ( + e1 < s2 + or e2 < s1 + or (e1 == s2 and not (c1[1] and c2[0])) + or (e2 == s1 and not (c2[1] and c1[0])) + ): return True return False @@ -397,30 +414,30 @@ def _split_ranges(cnr, new_step): _dir = math.copysign(1, cnr.step) _subranges = [] for i in range(int(abs(new_step // cnr.step))): - if _dir*(cnr.start + i*cnr.step) > _dir*cnr.end: + if _dir * (cnr.start + i * cnr.step) > _dir * cnr.end: # Once we walk past the end of the range, we are done # (all remaining offsets will be farther past the end) break - _subranges.append(NumericRange( - cnr.start + i*cnr.step, cnr.end, _dir*new_step - )) + _subranges.append( + NumericRange(cnr.start + i * cnr.step, cnr.end, _dir * new_step) + ) return _subranges @staticmethod - def _gcd(a,b): + def _gcd(a, b): while b != 0: - a,b = b, a % b + a, b = b, a % b return a @staticmethod - def _lcm(a,b): - gcd = NumericRange._gcd(a,b) + def _lcm(a, b): + gcd = NumericRange._gcd(a, b) if not gcd: return 0 return a * b / gcd - def _step_lcm(self,other_ranges): + def _step_lcm(self, other_ranges): """This computes an approximate Least Common Multiple step""" # Note: scalars are discrete, but have a step of 0. Pretend the # step is 1 so that we can compute a realistic "step lcm" @@ -433,7 +450,7 @@ def _step_lcm(self,other_ranges): b = o.step or 1 else: b = 0 - lcm = NumericRange._lcm(a,b) + lcm = NumericRange._lcm(a, b) # This is a modified LCM. LCM(n,0) == 0, but for step # calculations, we want it to be n if lcm: @@ -456,8 +473,9 @@ def _push_to_discrete_element(self, val, push_to_next_larger_value): _rndFcn = math.ceil if self.step > 0 else math.floor else: _rndFcn = math.floor if self.step > 0 else math.ceil - return self.start + self.step*_rndFcn( - (val - self.start) / float(self.step) ) + return self.start + self.step * _rndFcn( + (val - self.start) / float(self.step) + ) def range_difference(self, other_ranges): """Return the difference between this range and a list of other ranges. @@ -508,7 +526,7 @@ def range_difference(self, other_ranges): if t.isdiscrete(): # s and t are discrete ranges. Note if there is a # discrete range in the list of ranges, then lcm > 0 - if s.isdiscrete() and (s.start-t.start) % lcm != 0: + if s.isdiscrete() and (s.start - t.start) % lcm != 0: # s is offset from t and cannot remove any # elements _new_subranges.append(t) @@ -520,32 +538,33 @@ def range_difference(self, other_ranges): if s.isdiscrete() and not t.isdiscrete(): # # This handles the special case of continuous-discrete - if ((s_min == -_inf and t.start == -_inf) or - (s_max == _inf and t.end == _inf)): + if (s_min == -_inf and t.start == -_inf) or ( + s_max == _inf and t.end == _inf + ): raise RangeDifferenceError( "We do not support subtracting an infinite " "discrete range %s from an infinite continuous " - "range %s" % (s,t)) + "range %s" % (s, t) + ) # At least one of s_min amd t.start must be non-inf - start = max( - s_min, s._push_to_discrete_element(t.start, True)) + start = max(s_min, s._push_to_discrete_element(t.start, True)) # At least one of s_max amd t.end must be non-inf end = min(s_max, s._push_to_discrete_element(t.end, False)) if t.start < start: - _new_subranges.append(NumericRange( - t.start, start, 0, (t.closed[0], False) - )) - if s.step: # i.e., not a single point - for i in range(int(start//s.step), int(end//s.step)): - _new_subranges.append(NumericRange( - i*s.step, (i+1)*s.step, 0, '()' - )) + _new_subranges.append( + NumericRange(t.start, start, 0, (t.closed[0], False)) + ) + if s.step: # i.e., not a single point + for i in range(int(start // s.step), int(end // s.step)): + _new_subranges.append( + NumericRange(i * s.step, (i + 1) * s.step, 0, '()') + ) if t.end > end: - _new_subranges.append(NumericRange( - end, t.end, 0, (False,t.closed[1]) - )) + _new_subranges.append( + NumericRange(end, t.end, 0, (False, t.closed[1])) + ) else: # # This handles discrete-discrete, @@ -559,7 +578,7 @@ def range_difference(self, other_ranges): _min = min(t_max, s_min) if not t.step: closed1 = not s_c[0] if _min is s_min else t_c[1] - _closed = ( t_c[0], closed1 ) + _closed = (t_c[0], closed1) _step = abs(t.step) _rng = t_min, _min if t_min == -_inf and t.step: @@ -567,8 +586,9 @@ def range_difference(self, other_ranges): _rng = _rng[1], _rng[0] _closed = _closed[1], _closed[0] - _new_subranges.append(NumericRange( - _rng[0], _rng[1], _step, _closed)) + _new_subranges.append( + NumericRange(_rng[0], _rng[1], _step, _closed) + ) elif t_min == s_min and t_c[0] and not s_c[0]: _new_subranges.append(NumericRange(t_min, t_min, 0)) @@ -580,9 +600,9 @@ def range_difference(self, other_ranges): _max = max(t_min, s_max) if not t.step: closed0 = not s_c[1] if _max is s_max else t_c[0] - _new_subranges.append(NumericRange( - _max, t_max, abs(t.step), (closed0, t_c[1]) - )) + _new_subranges.append( + NumericRange(_max, t_max, abs(t.step), (closed0, t_c[1])) + ) elif t_max == s_max and t_c[1] and not s_c[1]: _new_subranges.append(NumericRange(t_max, t_max, 0)) _this = _new_subranges @@ -591,8 +611,8 @@ def range_difference(self, other_ranges): def range_intersection(self, other_ranges): """Return the intersection between this range and a set of other ranges. - Paramters - --------- + Parameters + ---------- other_ranges: `iterable` An iterable of other range objects to intersect with this range @@ -632,7 +652,7 @@ def range_intersection(self, other_ranges): if s.isdiscrete() and t.isdiscrete(): # s and t are discrete ranges. Note if there is a # finite range in the list of ranges, then lcm > 0 - if (s.start-t.start) % lcm != 0: + if (s.start - t.start) % lcm != 0: # s is offset from t and cannot have any # elements in common continue @@ -652,7 +672,7 @@ def range_intersection(self, other_ranges): t._push_to_discrete_element(s_max, False), s._push_to_discrete_element(t_max, False), ) - c = [True,True] + c = [True, True] if intersect_start == t_min: c[0] &= t_c[0] if intersect_start == s_min: @@ -662,13 +682,13 @@ def range_intersection(self, other_ranges): if intersect_end == s_max: c[1] &= s_c[1] if step and intersect_start == -_inf: - ans.append(NumericRange( - intersect_end, intersect_start, -step, (c[1], c[0]) - )) + ans.append( + NumericRange( + intersect_end, intersect_start, -step, (c[1], c[0]) + ) + ) else: - ans.append(NumericRange( - intersect_start, intersect_end, step, c - )) + ans.append(NumericRange(intersect_start, intersect_end, step, c)) return ans @@ -707,7 +727,7 @@ def __getstate__(self): This method must be defined because this class uses slots. """ - state = {} #super(NonNumericRange, self).__getstate__() + state = {} # super(NonNumericRange, self).__getstate__() for i in NonNumericRange.__slots__: state[i] = getattr(self, i) return state @@ -795,8 +815,7 @@ def range_intersection(self, other_ranges): class RangeProduct(object): - """A range-like object for representing the cross product of ranges - """ + """A range-like object for representing the cross product of ranges""" __slots__ = ('range_lists',) @@ -809,17 +828,22 @@ def __init__(self, range_lists): assert subrange.__class__ is list def __str__(self): - return "<" + ', '.join( - str(tuple(_)) if len(_) > 1 else str(_[0]) - for _ in self.range_lists - )+">" + return ( + "<" + + ', '.join( + str(tuple(_)) if len(_) > 1 else str(_[0]) for _ in self.range_lists + ) + + ">" + ) __repr__ = __str__ def __eq__(self, other): - return isinstance(other, RangeProduct) \ - and self.range_difference([other]) == [] \ + return ( + isinstance(other, RangeProduct) + and self.range_difference([other]) == [] and other.range_difference([self]) == [] + ) def __ne__(self, other): return not self.__eq__(other) @@ -829,8 +853,10 @@ def __contains__(self, value): return False if len(value) != len(self.range_lists): return False - return all(any(val in rng for rng in rng_list) - for val, rng_list in zip(value, self.range_lists)) + return all( + any(val in rng for rng in rng_list) + for val, rng_list in zip(value, self.range_lists) + ) def __getstate__(self): """ @@ -838,7 +864,7 @@ def __getstate__(self): This method must be defined because this class uses slots. """ - state = {} #super(RangeProduct, self).__getstate__() + state = {} # super(RangeProduct, self).__getstate__() for i in RangeProduct.__slots__: state[i] = getattr(self, i) return state @@ -856,12 +882,14 @@ def __setstate__(self, state): object.__setattr__(self, key, val) def isdiscrete(self): - return all(all(rng.isdiscrete() for rng in rng_list) - for rng_list in self.range_lists) + return all( + all(rng.isdiscrete() for rng in rng_list) for rng_list in self.range_lists + ) def isfinite(self): - return all(all(rng.isfinite() for rng in rng_list) - for rng_list in self.range_lists) + return all( + all(rng.isfinite() for rng in rng_list) for rng_list in self.range_lists + ) def isdisjoint(self, other): if type(other) is AnyRange: @@ -873,7 +901,7 @@ def isdisjoint(self, other): # Remember, range_lists is a list of lists of range objects. As # isdisjoint only accepts range objects, we need to unpack # everything. Non-disjoint range products require overlaps in - # all dimentions. + # all dimensions. for s, o in zip(self.range_lists, other.range_lists): if all(s_rng.isdisjoint(o_rng) for s_rng in s for o_rng in o): return True @@ -887,7 +915,7 @@ def issubset(self, other): def range_difference(self, other_ranges): # The goal is to start with a single range product and create a # set of range products that, when combined, model the - # range_difference. This will potentally create (redundant) + # range_difference. This will potentially create (redundant) # overlapping regions, but that is OK. ans = [self] N = len(self.range_lists) @@ -906,8 +934,7 @@ def range_difference(self, other_ranges): for dim in range(N): remainder = [] for r in rp.range_lists[dim]: - remainder.extend( - r.range_difference(other.range_lists[dim])) + remainder.extend(r.range_difference(other.range_lists[dim])) if remainder: tmp.append(RangeProduct(list(rp.range_lists))) tmp[-1].range_lists[dim] = remainder @@ -917,7 +944,7 @@ def range_difference(self, other_ranges): def range_intersection(self, other_ranges): # The goal is to start with a single range product and create a # set of range products that, when combined, model the - # range_difference. This will potentally create (redundant) + # range_difference. This will potentially create (redundant) # overlapping regions, but that is OK. ans = list(self.range_lists) N = len(self.range_lists) diff --git a/pyomo/core/base/rangeset.py b/pyomo/core/base/rangeset.py index 9634f76170c..18dedb84c34 100644 --- a/pyomo/core/base/rangeset.py +++ b/pyomo/core/base/rangeset.py @@ -14,7 +14,9 @@ from .set import RangeSet from pyomo.common.deprecation import deprecation_warning + deprecation_warning( 'The pyomo.core.base.rangeset module is deprecated. ' 'Import RangeSet objects from pyomo.core.base.set or pyomo.core.', - version='5.7') + version='5.7', +) diff --git a/pyomo/core/base/reference.py b/pyomo/core/base/reference.py index 51ef4dd2e17..79ae83b97be 100644 --- a/pyomo/core/base/reference.py +++ b/pyomo/core/base/reference.py @@ -11,28 +11,39 @@ from pyomo.common import DeveloperError from pyomo.common.collections import ( - UserDict, OrderedDict, Mapping, MutableMapping, - Set as collections_Set, Sequence, + UserDict, + Mapping, + MutableMapping, + Set as collections_Set, + Sequence, ) -from pyomo.core.base.set import SetOf, OrderedSetOf, _SetDataBase +from pyomo.common.modeling import NOTSET +from pyomo.core.base.set import DeclareGlobalSet, Set, SetOf, OrderedSetOf, _SetDataBase from pyomo.core.base.component import Component, ComponentData -from pyomo.core.base.global_set import ( - UnindexedComponent_set, -) -from pyomo.core.base.indexed_component import ( - IndexedComponent, normalize_index, -) +from pyomo.core.base.global_set import UnindexedComponent_set +from pyomo.core.base.enums import SortComponents +from pyomo.core.base.indexed_component import IndexedComponent, normalize_index from pyomo.core.base.indexed_component_slice import ( - IndexedComponent_slice, _IndexedComponent_slice_iter + IndexedComponent_slice, + _IndexedComponent_slice_iter, ) from pyomo.core.base.util import flatten_tuple from pyomo.common.deprecation import deprecated -_NotSpecified = object() - _UnindexedComponent_key = list(UnindexedComponent_set) _UnindexedComponent_base_key = tuple(UnindexedComponent_set) +DeclareGlobalSet( + Set( + initialize=UnindexedComponent_set, + name='UnindexedComponent_ReferenceSet', + doc='An indexing set used by references to unindexed (scalar) ' + 'components that is equivalent to but NOT the UnindexedComponent_set', + ), + globals(), +) + + class _fill_in_known_wildcards(object): """Variant of "six.advance_iterator" that substitutes wildcard values @@ -64,9 +75,8 @@ class _fill_in_known_wildcards(object): look_in_index. [default: False] """ - def __init__(self, wildcard_values, - look_in_index=False, - get_if_not_present=False): + + def __init__(self, wildcard_values, look_in_index=False, get_if_not_present=False): self.base_key = wildcard_values self.key = list(wildcard_values) self.known_slices = set() @@ -107,13 +117,15 @@ def __call__(self, _slice): raise SliceEllipsisLookupError( "Cannot lookup elements in a _ReferenceDict when the " "underlying slice object contains ellipsis over a jagged " - "(dimen=None) Set") + "(dimen=None) Set" + ) try: # Here we assemble the index we will actually use to access # the component. idx = tuple( _slice.fixed[i] if i in _slice.fixed else self.key.pop(0) - for i in range(idx_count)) + for i in range(idx_count) + ) # _slice corresponds to some sliced entry in the call/iter stacks # that contains the information describing the slice. # Here we fill in an index with the fixed indices from the slice @@ -123,7 +135,8 @@ def __call__(self, _slice): raise KeyError( "Insufficient values for slice of indexed component '%s' " "(found evaluating slice index %s)" - % (_slice.component.name, self.base_key)) + % (_slice.component.name, self.base_key) + ) if idx in _slice.component: # We have found a matching component at this level of the @@ -149,31 +162,32 @@ def __call__(self, _slice): # creation (and return) of the new component data. if idx in _slice.component.index_set(): _slice.last_index = idx - return _slice.component[idx] if self.get_if_not_present \ - else None + return _slice.component[idx] if self.get_if_not_present else None elif len(idx) == 1 and idx[0] in _slice.component.index_set(): _slice.last_index = idx - return _slice.component[idx[0]] if self.get_if_not_present \ - else None + return _slice.component[idx[0]] if self.get_if_not_present else None raise KeyError( "Index %s is not valid for indexed component '%s' " "(found evaluating slice index %s)" - % (idx, _slice.component.name, self.base_key)) + % (idx, _slice.component.name, self.base_key) + ) def check_complete(self): if not self.key: return - if (self.key == _UnindexedComponent_key and - self.base_key == _UnindexedComponent_base_key): + if ( + self.key == _UnindexedComponent_key + and self.base_key == _UnindexedComponent_base_key + ): return - raise KeyError("Extra (unused) values for slice index %s" - % ( self.base_key, )) + raise KeyError("Extra (unused) values for slice index %s" % (self.base_key,)) class SliceEllipsisLookupError(LookupError): pass + class _ReferenceDict(MutableMapping): """A dict-like object whose values are defined by a slice. @@ -188,6 +202,7 @@ class _ReferenceDict(MutableMapping): component_slice : :py:class:`IndexedComponent_slice` The slice object that defines the "members" of this mutable mapping. """ + def __init__(self, component_slice): self._slice = component_slice @@ -226,9 +241,7 @@ def __getitem__(self, key): try: # This calls IC_slice_iter.__next__, which calls # _fill_in_known_wildcards. - return next( - self._get_iter(self._slice, key, get_if_not_present=True) - ) + return next(self._get_iter(self._slice, key, get_if_not_present=True)) except SliceEllipsisLookupError: if type(key) is tuple and len(key) == 1: key = key[0] @@ -252,20 +265,22 @@ def __setitem__(self, key, val): tmp._call_stack[-1] = ( IndexedComponent_slice.set_item, tmp._call_stack[-1][1], - val ) + val, + ) elif op == IndexedComponent_slice.slice_info: tmp._call_stack[-1] = ( IndexedComponent_slice.set_item, tmp._call_stack[-1][1], - val ) + val, + ) elif op == IndexedComponent_slice.get_attribute: tmp._call_stack[-1] = ( IndexedComponent_slice.set_attribute, tmp._call_stack[-1][1], - val ) + val, + ) else: - raise DeveloperError( - "Unexpected slice _call_stack operation: %s" % op) + raise DeveloperError("Unexpected slice _call_stack operation: %s" % op) try: next(self._get_iter(tmp, key, get_if_not_present=True)) except StopIteration: @@ -279,54 +294,56 @@ def __delitem__(self, key): # change it to delete the item tmp._call_stack[-1] = ( IndexedComponent_slice.del_item, - tmp._call_stack[-1][1] ) + tmp._call_stack[-1][1], + ) elif op == IndexedComponent_slice.slice_info: assert len(tmp._call_stack) == 1 _iter = self._get_iter(tmp, key) - try: - next(_iter) - del _iter._iter_stack[0].component[_iter.get_last_index()] - return - except StopIteration: - raise KeyError("KeyError: %s" % (key,)) + # The iterator should map all StopIteration exceptions to KeyErrors + next(_iter) + del _iter._iter_stack[0].component[_iter.get_last_index()] + return elif op == IndexedComponent_slice.get_attribute: # If the last attribute of the slice retrieves an attribute, # change it to delete the attribute tmp._call_stack[-1] = ( IndexedComponent_slice.del_attribute, - tmp._call_stack[-1][1] ) + tmp._call_stack[-1][1], + ) else: - raise DeveloperError( - "Unexpected slice _call_stack operation: %s" % op) + raise DeveloperError("Unexpected slice _call_stack operation: %s" % op) try: next(self._get_iter(tmp, key)) except StopIteration: pass def __iter__(self): - return self._slice.wildcard_keys() + return self._slice.wildcard_keys(SortComponents.UNSORTED) def __len__(self): # Note that unlike for regular dicts, len() of a _ReferenceDict # is very slow (linear time). return sum(1 for i in self._slice) - def items(self): + def keys(self, sort=SortComponents.UNSORTED): + return self._slice.wildcard_keys(sort) + + def items(self, sort=SortComponents.UNSORTED): """Return the wildcard, value tuples for this ReferenceDict This method is necessary because the default implementation iterates over the keys and looks the values up in the dictionary. Unfortunately some slices have structures that make looking up components by the wildcard keys very expensive - (linear time; e.g., the use of elipses with jagged sets). By + (linear time; e.g., the use of ellipses with jagged sets). By implementing this method without using lookups, general methods that iterate over everything (like component.pprint()) will still be linear and not quadratic time. """ - return self._slice.wildcard_items() + return self._slice.wildcard_items(sort) - def values(self): + def values(self, sort=SortComponents.UNSORTED): """Return the values for this ReferenceDict This method is necessary because the default implementation @@ -339,15 +356,15 @@ def values(self): still be linear and not quadratic time. """ - return iter(self._slice) + return self._slice.wildcard_values(sort) - @deprecated('The iteritems method is deprecated. Use dict.items().', - version='6.0') + @deprecated('The iteritems method is deprecated. Use dict.items().', version='6.0') def iteritems(self): return self.items() - @deprecated('The itervalues method is deprecated. Use dict.values().', - version='6.0') + @deprecated( + 'The itervalues method is deprecated. Use dict.values().', version='6.0' + ) def itervalues(self): return self.values() @@ -362,9 +379,7 @@ def _get_iter(self, _slice, key, get_if_not_present=False): if normalize_index.flatten: key = flatten_tuple(key) return _IndexedComponent_slice_iter( - _slice, - _fill_in_known_wildcards( - key, get_if_not_present=get_if_not_present) + _slice, _fill_in_known_wildcards(key, get_if_not_present=get_if_not_present) ) @@ -392,6 +407,7 @@ class _ReferenceSet(collections_Set): The slice object that defines the "members" of this set """ + def __init__(self, component_slice): self._slice = component_slice @@ -412,7 +428,7 @@ def __contains__(self, key): return False def __iter__(self): - return self._slice.index_wildcard_keys() + return self._slice.index_wildcard_keys(False) def __len__(self): return sum(1 for _ in self) @@ -425,12 +441,18 @@ def _get_iter(self, _slice, key): return _IndexedComponent_slice_iter( _slice, _fill_in_known_wildcards(key, look_in_index=True), - iter_over_index=True + iter_over_index=True, ) def __str__(self): return "ReferenceSet(%s)" % (self._slice,) + def ordered_iter(self): + return self._slice.index_wildcard_keys(SortComponents.ORDERED_INDICES) + + def sorted_iter(self): + return self._slice.index_wildcard_keys(SortComponents.SORTED_INDICES) + def _identify_wildcard_sets(iter_stack, index): # Note that we can only _identify_wildcard_sets for a Reference if @@ -448,7 +470,7 @@ def _identify_wildcard_sets(iter_stack, index): # _iter_stack. Each dict maps position within that level's # component's "subsets" list to the set at that position if it is a # wildcard set. - wildcard_stack = [None]*len(iter_stack) + wildcard_stack = [None] * len(iter_stack) for i, level in enumerate(iter_stack): if level is not None: offset = 0 @@ -457,8 +479,9 @@ def _identify_wildcard_sets(iter_stack, index): wildcard_sets = {} # `wildcard_sets` maps position in the current level's # "subsets list" to its set if that set is a wildcard. - for j, s in enumerate(level.component.index_set().subsets( - expand_all_set_operators=False)): + for j, s in enumerate( + level.component.index_set().subsets(expand_all_set_operators=False) + ): # Iterate over the sets that could possibly be wildcards if s is UnindexedComponent_set: wildcard_sets[j] = s @@ -472,8 +495,9 @@ def _identify_wildcard_sets(iter_stack, index): if level.fixed is None: wildcard_count = s.dimen else: - wildcard_count = sum( 1 for k in range(s.dimen) - if k+offset not in level.fixed ) + wildcard_count = sum( + 1 for k in range(s.dimen) if k + offset not in level.fixed + ) # `k+offset` is a position in the "total" (flattened) # index tuple. All the _slice_generator's information # is in terms of this total index tuple. @@ -497,7 +521,7 @@ def _identify_wildcard_sets(iter_stack, index): # with ellipsis should get caught by the check for s.dimen # above. # - #if offset != level.explicit_index_count: + # if offset != level.explicit_index_count: # return None wildcard_stack[i] = wildcard_sets if not index: @@ -526,7 +550,7 @@ def _identify_wildcard_sets(iter_stack, index): if len(index[i]) != len(level): return None # if any wildcard "subset" differs in position or set. - if any(index[i].get(j,None) is not _set for j,_set in level.items()): + if any(index[i].get(j, None) is not _set for j, _set in level.items()): return None # These checks seem to intentionally preclude # m.b1[:].v and m.b2[1,:].v @@ -539,7 +563,8 @@ def _identify_wildcard_sets(iter_stack, index): # Reference(m.c[:].v) return index -def Reference(reference, ctype=_NotSpecified): + +def Reference(reference, ctype=NOTSET): """Creates a component that references other components ``Reference`` generates a *reference component*; that is, an indexed @@ -639,79 +664,112 @@ def Reference(reference, ctype=_NotSpecified): """ referent = reference + # + # Before constructing the reference object that we will return, + # we need to know its index set, its ctype, and its _data + # dict. The following if statement sets the _data dict + # for all possible input types and sets up data structures + # necessary to determine the index set and ctype. + # if isinstance(reference, IndexedComponent_slice): - _data = _ReferenceDict(reference) - _iter = iter(reference) slice_idx = [] index = None + _data = _ReferenceDict(reference) + _iter = iter(reference) elif isinstance(reference, Component): + slice_idx = None + if reference.is_indexed(): + index = reference.index_set() + else: + index = UnindexedComponent_ReferenceSet + if ctype is NOTSET: + ctype = reference.ctype reference = reference[...] _data = _ReferenceDict(reference) - _iter = iter(reference) - slice_idx = [] - index = None + # index and ctype are now set; no need to iterate over the slice + _iter = () elif isinstance(reference, ComponentData): + slice_idx = None + index = UnindexedComponent_ReferenceSet + if ctype is NOTSET: + ctype = reference.ctype reference = IndexedComponent_slice(reference.parent_component())[ - reference.index()] + reference.index() + ] _data = _ReferenceDict(reference) - _iter = iter(reference) - slice_idx = [] - index = SetOf(UnindexedComponent_set) + # index and ctype are now set; no need to iterate over the slice + _iter = () elif isinstance(reference, Mapping): + slice_idx = None _data = _ReferenceDict_mapping(dict(reference)) _iter = _data.values() - slice_idx = None index = SetOf(_data) elif isinstance(reference, Sequence): - _data = _ReferenceDict_mapping(OrderedDict(enumerate(reference))) - _iter = _data.values() slice_idx = None + _data = _ReferenceDict_mapping(dict(enumerate(reference))) + _iter = _data.values() index = OrderedSetOf(_data) else: raise TypeError( "First argument to Reference constructors must be a " "component, component slice, Sequence, or Mapping (received %s)" - % (type(reference).__name__,)) + % (type(reference).__name__,) + ) - if ctype is _NotSpecified: + if ctype is NOTSET: ctypes = set() else: - # If the caller specified a ctype, then we will prepopulate the - # list to improve our chances of avoiding a scan of the entire - # Reference (by simulating multiple ctypes having been found, we - # can break out as soon as we know that there are not common - # subsets). - ctypes = set((1,2)) + if slice_idx is None: + # A slice was not provided. We know the ctype and that there + # cannot be common subsets (because a slice was not provided). + # We don't need to iterate over the data objects at all. + # Note that this is redundant for Component and ComponentData + # inputs, as _iter was already empty. + _iter = () for obj in _iter: - ctypes.add(obj.ctype) - if not isinstance(obj, ComponentData): - # This object is not a ComponentData (likely it is a pure - # IndexedComponent container). As the Reference will treat - # it as if it *were* a ComponentData, we will skip ctype - # identification and return a base IndexedComponent, thereby - # preventing strange exceptions in the writers and with - # things like pprint(). Of course, all of this logic is - # skipped if the User knows better and forced a ctype on us. - ctypes.add(0) + # + # We were provided a collection of ComponentData, either via + # a slice, sequence, or mapping. Now we iterate over these + # objects to attempt to infer both the ctype and, if a slice + # was provided, the index_set (determined by slice_index). + # + if ctype is NOTSET: + ctypes.add(obj.ctype) + if not isinstance(obj, ComponentData): + # This object is not a ComponentData (likely it is a pure + # IndexedComponent container). As the Reference will treat + # it as if it *were* a ComponentData, we will skip ctype + # identification and return a base IndexedComponent, thereby + # preventing strange exceptions in the writers and with + # things like pprint(). Of course, all of this logic is + # skipped if the User knows better and forced a ctype on us. + ctypes.add(0) # Note that we want to walk the entire slice, unless we can # prove that BOTH there aren't common indexing sets (i.e., index # is None) AND there is more than one ctype. if slice_idx is not None: # As long as we haven't ruled out the possibility of common # wildcard sets, then we will use _identify_wildcard_sets to - # identify the wilcards for this obj and check compatibility + # identify the wildcards for this obj and check compatibility # of the wildcards with any previously-identified wildcards. slice_idx = _identify_wildcard_sets(_iter._iter_stack, slice_idx) - elif len(ctypes) > 1: + elif ctype is not NOTSET or len(ctypes) > 1: break if index is None: + # + # index is None, i.e. a slice was provided. If a slice index + # has been identified (by all slice members having the same + # "wildcard sets"), use this to construct an indexing set. + # Otherwise, use a _ReferenceSet. + # if not slice_idx: index = SetOf(_ReferenceSet(reference)) else: - wildcards = sum((sorted(lvl.items()) for lvl in slice_idx - if lvl is not None), []) + wildcards = sum( + (sorted(lvl.items()) for lvl in slice_idx if lvl is not None), [] + ) # Wildcards is a list of (coordinate, set) tuples. Coordinate # is that within the subsets list, and set is a wildcard set. index = wildcards[0][1] @@ -724,8 +782,10 @@ def Reference(reference, ctype=_NotSpecified): index = index * idx # index is now either a single Set, or a SetProduct of the # wildcard sets. - if ctype is _NotSpecified: + if ctype is NOTSET: if len(ctypes) == 1: + # If ctype is not set and only one ctype was identified above, + # use this ctype. ctype = ctypes.pop() else: ctype = IndexedComponent diff --git a/pyomo/core/base/set.py b/pyomo/core/base/set.py index e144c2e4e6f..17f69ad4c47 100644 --- a/pyomo/core/base/set.py +++ b/pyomo/core/base/set.py @@ -17,35 +17,50 @@ import weakref from pyomo.common.pyomo_typing import overload -from pyomo.common.deprecation import ( - deprecated, deprecation_warning, RenamedClass, -) +from pyomo.common.deprecation import deprecated, deprecation_warning, RenamedClass from pyomo.common.errors import DeveloperError, PyomoException from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET from pyomo.common.sorting import sorted_robust from pyomo.common.timing import ConstructionTimer + from pyomo.core.expr.numvalue import ( - native_types, native_numeric_types, as_numeric, value, is_constant, + native_types, + native_numeric_types, + as_numeric, + value, + is_constant, ) from pyomo.core.base.disable_methods import disable_methods from pyomo.core.base.initializer import ( - InitializerBase, Initializer, - CountedCallInitializer, IndexedCallInitializer, + InitializerBase, + Initializer, + CountedCallInitializer, + IndexedCallInitializer, ) from pyomo.core.base.range import ( - NumericRange, NonNumericRange, AnyRange, RangeProduct, + NumericRange, + NonNumericRange, + AnyRange, + RangeProduct, RangeDifferenceError, ) from pyomo.core.base.component import ( - _ComponentBase, Component, ComponentData, ModelComponentFactory, + _ComponentBase, + Component, + ComponentData, + ModelComponentFactory, ) from pyomo.core.base.indexed_component import ( - IndexedComponent, UnindexedComponent_set, normalize_index, + IndexedComponent, + UnindexedComponent_set, + normalize_index, rule_wrapper, ) from pyomo.core.base.global_set import ( - GlobalSets, GlobalSetBase, UnindexedComponent_index + GlobalSets, + GlobalSetBase, + UnindexedComponent_index, ) from collections.abc import Sequence @@ -53,7 +68,7 @@ logger = logging.getLogger('pyomo.core') -_prePython37 = sys.version_info[:2] < (3,7) +_prePython37 = sys.version_info[:2] < (3, 7) _inf = float('inf') @@ -109,28 +124,35 @@ implemented) through Mixin classes. """ + def process_setarg(arg): if isinstance(arg, _SetDataBase): return arg elif isinstance(arg, _ComponentBase): if isinstance(arg, IndexedComponent) and arg.is_indexed(): - raise TypeError("Cannot apply a Set operator to an " - "indexed %s component (%s)" - % (arg.ctype.__name__, arg.name,)) + raise TypeError( + "Cannot apply a Set operator to an " + "indexed %s component (%s)" % (arg.ctype.__name__, arg.name) + ) if isinstance(arg, Component): - raise TypeError("Cannot apply a Set operator to a non-Set " - "%s component (%s)" - % (arg.__class__.__name__, arg.name,)) + raise TypeError( + "Cannot apply a Set operator to a non-Set " + "%s component (%s)" % (arg.__class__.__name__, arg.name) + ) if isinstance(arg, ComponentData): - raise TypeError("Cannot apply a Set operator to a non-Set " - "component data (%s)" % (arg.name,)) + raise TypeError( + "Cannot apply a Set operator to a non-Set " + "component data (%s)" % (arg.name,) + ) # DEPRECATED: This functionality has never been documented, # and I don't know of a use of it in the wild. if hasattr(arg, 'set_options'): - deprecation_warning("The set_options set attribute is deprecated. " - "Please explicitly construct complex sets", - version='5.7.3') + deprecation_warning( + "The set_options set attribute is deprecated. " + "Please explicitly construct complex sets", + version='5.7.3', + ) # If the argument has a set_options attribute, then use # it to initialize a set args = arg.set_options @@ -139,10 +161,14 @@ def process_setarg(arg): ans = Set(**args) _init = args['initialize'] - if not ( inspect.isgenerator(_init) - or inspect.isfunction(_init) - or ( isinstance(_init, ComponentData) - and not _init.parent_component().is_constructed() )): + if not ( + inspect.isgenerator(_init) + or inspect.isfunction(_init) + or ( + isinstance(_init, ComponentData) + and not _init.parent_component().is_constructed() + ) + ): ans.construct() return ans @@ -175,7 +201,8 @@ def process_setarg(arg): "Cannot create a Set from data that does not support " "__contains__. Expected set-like object supporting " "collections.abc.Collection interface, but received '%s'." - % (type(arg).__name__,)) + % (type(arg).__name__,) + ) elif arg.__class__ is type: # This catches the (deprecated) RealSet API. return process_setarg(arg()) @@ -199,10 +226,12 @@ def process_setarg(arg): return ans -@deprecated('The set_options decorator is deprecated; create Sets from ' - 'functions explicitly by passing the function to the Set ' - 'constructor using the "initialize=" keyword argument.', - version='5.7') +@deprecated( + 'The set_options decorator is deprecated; create Sets from ' + 'functions explicitly by passing the function to the Set ' + 'constructor using the "initialize=" keyword argument.', + version='5.7', +) def set_options(**kwds): """ This is a decorator for set initializer functions. This @@ -218,11 +247,14 @@ def B_index(model): def B_index(model): return range(10) """ + def decorator(func): func.set_options = kwds return func + return decorator + def simple_set_rule(rule): """ This is a decorator that translates None into Set.End. @@ -238,7 +270,9 @@ def A_rule(model, i, j): return rule_wrapper(rule, {None: Set.End}) -class UnknownSetDimen(object): pass +class UnknownSetDimen(object): + pass + class SetInitializer(InitializerBase): """An Initializer wrapper for returning Set objects @@ -250,7 +284,8 @@ class SetInitializer(InitializerBase): initializers. """ - __slots__ = ('_set','verified') + + __slots__ = ('_set', 'verified') def __init__(self, init, allow_generators=True): self.verified = False @@ -258,8 +293,10 @@ def __init__(self, init, allow_generators=True): self._set = None else: self._set = Initializer( - init, allow_generators=allow_generators, - treat_sequences_as_mappings=False) + init, + allow_generators=allow_generators, + treat_sequences_as_mappings=False, + ) def intersect(self, other): if self._set is None: @@ -295,6 +332,7 @@ def setdefault(self, val): if self._set is None: self._set = Initializer(val) + class SetIntersectInitializer(InitializerBase): """An Initializer that returns the intersection of two SetInitializers @@ -303,7 +341,9 @@ class SetIntersectInitializer(InitializerBase): contains a SetIntersectInitializer instance. """ - __slots__ = ('_A','_B',) + + __slots__ = ('_A', '_B') + def __init__(self, setA, setB): self._A = setA self._B = setB @@ -320,16 +360,18 @@ def contains_indices(self): def indices(self): if self._A.contains_indices(): if self._B.contains_indices(): - if set(self._A.indices()) != set (self._B.indices()): + if set(self._A.indices()) != set(self._B.indices()): raise ValueError( "SetIntersectInitializer contains two " - "sub-initializers with inconsistent external indices") + "sub-initializers with inconsistent external indices" + ) return self._A.indices() else: # It is OK (and desirable) for this to raise the exception # if B does not contain external indices return self._B.indices() + class BoundsInitializer(InitializerBase): """An Initializer wrapper that converts bounds information to a RangeSet @@ -343,7 +385,9 @@ class BoundsInitializer(InitializerBase): SetInitializer objects using the SetInitializer.intersect() method. """ - __slots__ = ('_init', 'default_step',) + + __slots__ = ('_init', 'default_step') + def __init__(self, init, default_step=0): self._init = Initializer(init, treat_sequences_as_mappings=False) self.default_step = default_step @@ -363,7 +407,7 @@ def __call__(self, parent, idx): ans = RangeSet(*val) # We don't need to construct here, as the RangeSet will # automatically construct itself if it can - #ans.construct() + # ans.construct() return ans def constant(self): @@ -373,17 +417,20 @@ def setdefault(self, val): # This is a real range set... there is no default to set pass + class TuplizeError(PyomoException): pass + class TuplizeValuesInitializer(InitializerBase): """An initializer wrapper that will "tuplize" a sequence This initializer takes the result of another initializer, and if it - is a sequence that does not already contain tuples, wil convert it + is a sequence that does not already contain tuples, will convert it to a sequence of tuples, each of length 'dimen' before returning it. """ + __slots__ = ('_init', '_dimen') def __new__(cls, *args): @@ -427,9 +474,10 @@ def _tuplize(self, _val, parent, index): if len(_val) % d: raise TuplizeError( "Cannot tuplize list data for set %%s%%s because its " - "length %s is not a multiple of dimen=%s" % (len(_val), d)) + "length %s is not a multiple of dimen=%s" % (len(_val), d) + ) - return list(tuple(_val[d*i:d*(i+1)]) for i in range(len(_val)//d)) + return list(tuple(_val[d * i : d * (i + 1)]) for i in range(len(_val) // d)) class _NotFound(object): @@ -440,8 +488,8 @@ class _NotFound(object): # A trivial class that we can use to test if an object is a "legitimate" # set (either ScalarSet, or a member of an IndexedSet) class _SetDataBase(ComponentData): - """The base for all objects that can be used as a component indexing set. - """ + """The base for all objects that can be used as a component indexing set.""" + __slots__ = () @@ -451,6 +499,7 @@ class _SetData(_SetDataBase): Derived versions of this class can be used as the Index for any IndexedComponent (including IndexedSet).""" + __slots__ = () def __contains__(self, value): @@ -467,15 +516,19 @@ def __contains__(self, value): if isinstance(value, _SetData): deprecation_warning( "Testing for set subsets with 'a in b' is deprecated. " - "Use 'a.issubset(b)'.", version='5.7') + "Use 'a.issubset(b)'.", + version='5.7', + ) return value.issubset(self) else: return False return True def get(self, value, default=None): - raise DeveloperError("Derived set class (%s) failed to " - "implement get()" % (type(self).__name__,)) + raise DeveloperError( + "Derived set class (%s) failed to " + "implement get()" % (type(self).__name__,) + ) def isdiscrete(self): """Returns True if this set admits only discrete members""" @@ -502,7 +555,8 @@ def __iter__(self): """ raise TypeError( "'%s' object is not iterable (non-finite Set '%s' " - "is not iterable)" % (self.__class__.__name__, self.name)) + "is not iterable)" % (self.__class__.__name__, self.name) + ) def __eq__(self, other): if self is other: @@ -552,27 +606,37 @@ def __ne__(self, other): return not self.__eq__(other) def __str__(self): - raise DeveloperError("Derived set class (%s) failed to " - "implement __str__" % (type(self).__name__,)) + raise DeveloperError( + "Derived set class (%s) failed to " + "implement __str__" % (type(self).__name__,) + ) @property def dimen(self): - raise DeveloperError("Derived set class (%s) failed to " - "implement dimen" % (type(self).__name__,)) + raise DeveloperError( + "Derived set class (%s) failed to " + "implement dimen" % (type(self).__name__,) + ) @property def domain(self): - raise DeveloperError("Derived set class (%s) failed to " - "implement domain" % (type(self).__name__,)) + raise DeveloperError( + "Derived set class (%s) failed to " + "implement domain" % (type(self).__name__,) + ) def ranges(self): - raise DeveloperError("Derived set class (%s) failed to " - "implement ranges" % (type(self).__name__,)) + raise DeveloperError( + "Derived set class (%s) failed to " + "implement ranges" % (type(self).__name__,) + ) def bounds(self): try: - _bnds = [(r.start, r.end) if r.step >= 0 else (r.end, r.start) - for r in self.ranges()] + _bnds = [ + (r.start, r.end) if r.step >= 0 else (r.end, r.start) + for r in self.ranges() + ] except AttributeError: return None, None @@ -621,7 +685,11 @@ def _get_discrete_interval(self): # but problemmatic for code coverage. ranges = list(self.ranges()) if len(ranges) == 1: - start, end, c = ranges[0].normalize_bounds() + try: + start, end, c = ranges[0].normalize_bounds() + except AttributeError: + # Catching Any, NonNumericRange, etc... + return self.bounds() + (None,) return ( None if start == -_inf else start, None if end == _inf else end, @@ -639,9 +707,9 @@ def _get_discrete_interval(self): vals = sorted(self) if len(vals) < 2: return (vals[0], vals[0], 0) - step = vals[1]-vals[0] + step = vals[1] - vals[0] for i in range(2, len(vals)): - if step != vals[i] - vals[i-1]: + if step != vals[i] - vals[i - 1]: return self.bounds() + (None,) return (vals[0], vals[-1], step) except AttributeError: @@ -660,7 +728,7 @@ def _get_discrete_interval(self): return self.bounds() + (None,) # Catch misaligned ranges for r in ranges: - if ( r.start - ref ) % step: + if (r.start - ref) % step: return self.bounds() + (None,) if r.step % step: return self.bounds() + (None,) @@ -669,13 +737,13 @@ def _get_discrete_interval(self): # remove any ranges from the ranges list. while nRanges > _rlen: nRanges = _rlen - for i,r in enumerate(ranges): + for i, r in enumerate(ranges): if r.step > 0: rstart, rend = r.start, r.end else: rend, rstart = r.start, r.end if not r.step or abs(r.step) == step: - if start <= rend+step and rstart <= end+step: + if start <= rend + step and rstart <= end + step: ranges[i] = None if start > rstart: start = rstart @@ -699,18 +767,13 @@ def _get_discrete_interval(self): return self.bounds() + (None,) # Note: while unbounded NumericRanges are -inf..inf, Pyomo # Sets are None..None - return ( - None if start == -_inf else start, - None if end == _inf else end, - step, - ) - + return (None if start == -_inf else start, None if end == _inf else end, step) def _get_continuous_interval(self): # Note: this method assumes that at least one range is continuous. # # Note: I'd like to use set() for ranges, since we will be - # randomly removing elelments from the list; however, since we + # randomly removing elements from the list; however, since we # do it by enumerating over ranges, using set() would make this # routine nondeterministic. Not a hoge issue for the result, # but problemmatic for code coverage. @@ -729,8 +792,7 @@ def _get_continuous_interval(self): if r.isdiscrete(): discrete.append(r) else: - ranges.append( - NumericRange(r.start, r.end, r.step, r.closed)) + ranges.append(NumericRange(r.start, r.end, r.step, r.closed)) if len(ranges) == 1 and not discrete: r = ranges[0] @@ -810,11 +872,15 @@ def virtual(self, value): if value != self.virtual: raise ValueError( "Attempting to set the (deprecated) 'virtual' attribute on %s " - "to an invalid value (%s)" % (self.name, value)) + "to an invalid value (%s)" % (self.name, value) + ) @property - @deprecated("The 'concrete' attribute is no longer supported. " - "Use isdiscrete() or isfinite()", version='5.7') + @deprecated( + "The 'concrete' attribute is no longer supported. " + "Use isdiscrete() or isfinite()", + version='5.7', + ) def concrete(self): return self.isfinite() @@ -823,22 +889,25 @@ def concrete(self, value): if value != self.concrete: raise ValueError( "Attempting to set the (deprecated) 'concrete' attribute on %s " - "to an invalid value (%s)" % (self.name, value)) + "to an invalid value (%s)" % (self.name, value) + ) @property - @deprecated("The 'ordered' attribute is no longer supported. " - "Use isordered()", version='5.7') + @deprecated( + "The 'ordered' attribute is no longer supported. Use isordered()", + version='5.7', + ) def ordered(self): return self.isordered() @property - @deprecated("'filter' is no longer a public attribute.", - version='5.7') + @deprecated("'filter' is no longer a public attribute.", version='5.7') def filter(self): return None - @deprecated("check_values() is deprecated: Sets only contain valid members", - version='5.7') + @deprecated( + "check_values() is deprecated: Sets only contain valid members", version='5.7' + ) def check_values(self): """ Verify that the values in this set are valid. @@ -871,8 +940,7 @@ def isdisjoint(self, other): pass else: # Raise an exception consistent with Python's set.isdisjoint() - raise TypeError( - "'%s' object is not iterable" % (type(other).__name__,)) + raise TypeError("'%s' object is not iterable" % (type(other).__name__,)) if self.isfinite(): for x in self: if x in other: @@ -921,8 +989,7 @@ def issubset(self, other): pass else: # Raise an exception consistent with Python's set.issubset() - raise TypeError( - "'%s' object is not iterable" % (type(other).__name__,)) + raise TypeError("'%s' object is not iterable" % (type(other).__name__,)) if not self.isfinite(): try: self = RangeSet(ranges=list(self.ranges())) @@ -982,8 +1049,7 @@ def issuperset(self, other): pass else: # Raise an exception consistent with Python's set.issuperset() - raise TypeError( - "'%s' object is not iterable" % (type(other).__name__,)) + raise TypeError("'%s' object is not iterable" % (type(other).__name__,)) if other_isfinite: for x in other: # Other may contain elements that are not representable @@ -1052,9 +1118,9 @@ def cross(self, *args): # ^ is equivalent to symmetric_difference # * is equivalent to cross - __le__ = issubset - __ge__ = issuperset - __or__ = union + __le__ = issubset + __ge__ = issuperset + __or__ = union __and__ = intersection __sub__ = difference __xor__ = symmetric_difference @@ -1090,13 +1156,13 @@ def __rmul__(self, other): # return SetOf(other) * self return process_setarg(other) * self - def __lt__(self,other): + def __lt__(self, other): """ Return True if the set is a strict subset of 'other' """ return self <= other and not self == other - def __gt__(self,other): + def __gt__(self, other): """ Return True if the set is a strict superset of 'other' """ @@ -1107,12 +1173,16 @@ class _FiniteSetMixin(object): __slots__ = () def __len__(self): - raise DeveloperError("Derived finite set class (%s) failed to " - "implement __len__" % (type(self).__name__,)) + raise DeveloperError( + "Derived finite set class (%s) failed to " + "implement __len__" % (type(self).__name__,) + ) def _iter_impl(self): - raise DeveloperError("Derived finite set class (%s) failed to " - "implement _iter_impl" % (type(self).__name__,)) + raise DeveloperError( + "Derived finite set class (%s) failed to " + "implement _iter_impl" % (type(self).__name__,) + ) def __iter__(self): """Iterate over the finite set @@ -1128,6 +1198,12 @@ def __iter__(self): def __reversed__(self): return reversed(self.data()) + def sorted_iter(self): + return iter(sorted_robust(self)) + + def ordered_iter(self): + return self.sorted_iter() + def isdiscrete(self): """Returns True if this set admits only discrete members""" return True @@ -1140,15 +1216,21 @@ def data(self): return tuple(self) @property - @deprecated("The 'value' attribute is deprecated. Use .data() to " - "retrieve the values in a finite set.", version='5.7') + @deprecated( + "The 'value' attribute is deprecated. Use .data() to " + "retrieve the values in a finite set.", + version='5.7', + ) def value(self): return set(self) @property - @deprecated("The 'value_list' attribute is deprecated. Use " - ".ordered_data() to retrieve the values from a finite set " - "in a deterministic order.", version='5.7') + @deprecated( + "The 'value_list' attribute is deprecated. Use " + ".ordered_data() to retrieve the values from a finite set " + "in a deterministic order.", + version='5.7', + ) def value_list(self): return list(self.ordered_data()) @@ -1169,18 +1251,18 @@ def bounds(self): # set is mixed non-numeric type, then we will report the bounds # as None. if type(lb) is not type(ub) and ( - type(lb) not in native_numeric_types - or type(ub) not in native_numeric_types): - return None,None + type(lb) not in native_numeric_types or type(ub) not in native_numeric_types + ): + return None, None else: - return lb,ub + return lb, ub def ranges(self): # This is way inefficient, but should always work: the ranges in a # Finite set is the list of scalars for i in self: if i.__class__ in native_numeric_types: - yield NumericRange(i,i,0) + yield NumericRange(i, i, 0) elif i.__class__ in native_types: yield NonNumericRange(i) else: @@ -1188,13 +1270,14 @@ def ranges(self): # we have never seen before. try: as_numeric(i) - yield NumericRange(i,i,0) + yield NumericRange(i, i, 0) except: yield NonNumericRange(i) class _FiniteSetData(_FiniteSetMixin, _SetData): """A general unordered iterable Set""" + __slots__ = ('_values', '_domain', '_validate', '_filter', '_dimen') def __init__(self, component): @@ -1208,18 +1291,6 @@ def __init__(self, component): self._filter = None self._dimen = UnknownSetDimen - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(_FiniteSetData, self).__getstate__() - for i in _FiniteSetData.__slots__: - state[i] = getattr(self, i) - return state - - # Note: because none of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ method. - def get(self, value, default=None): """ Return True if the set contains a given value. @@ -1236,6 +1307,12 @@ def get(self, value, default=None): def _iter_impl(self): return iter(self._values) + def __reversed__(self): + try: + return reversed(self._values) + except: + return reversed(self.data()) + def __len__(self): """ Return the number of elements in the set. @@ -1264,8 +1341,7 @@ def domain(self): return self._domain @property - @deprecated("'filter' is no longer a public attribute.", - version='5.7') + @deprecated("'filter' is no longer a public attribute.", version='5.7') def filter(self): return self._filter @@ -1285,9 +1361,11 @@ def add(self, *values): _value = value _d = None if _value not in self._domain: - raise ValueError("Cannot add value %s to Set %s.\n" - "\tThe value is not in the domain %s" - % (value, self.name, self._domain)) + raise ValueError( + "Cannot add value %s to Set %s.\n" + "\tThe value is not in the domain %s" + % (value, self.name, self._domain) + ) # We wrap this check in a try-except because some values # (like lists) are not hashable and can raise exceptions. @@ -1295,12 +1373,15 @@ def add(self, *values): if _value in self: logger.warning( "Element %s already exists in Set %s; no action taken" - % (value, self.name)) + % (value, self.name) + ) continue except: exc = sys.exc_info() - raise TypeError("Unable to insert '%s' into Set %s:\n\t%s: %s" - % (value, self.name, exc[0].__name__, exc[1])) + raise TypeError( + "Unable to insert '%s' into Set %s:\n\t%s: %s" + % (value, self.name, exc[0].__name__, exc[1]) + ) if self._filter is not None: if not self._filter(_block, _value): @@ -1312,12 +1393,14 @@ def add(self, *values): except: logger.error( "Exception raised while validating element '%s' " - "for Set %s" % (value, self.name)) + "for Set %s" % (value, self.name) + ) raise if not flag: raise ValueError( "The value=%s violates the validation rule of Set %s" - % (value, self.name)) + % (value, self.name) + ) # If the Set has a fixed dimension, check that this element is # compatible. @@ -1331,7 +1414,8 @@ def add(self, *values): raise ValueError( "The value=%s has dimension %s and is not " "valid for Set %s which has dimen=%s" - % (value, _d, self.name, self._dimen)) + % (value, _d, self.name, self._dimen) + ) # Add the value to this object (this last redirection allows # derived classes to implement a different storage mechanism) @@ -1387,12 +1471,16 @@ class _OrderedSetMixin(object): _valid_getitem_keys = {None, (None,), Ellipsis} def at(self, index): - raise DeveloperError("Derived ordered set class (%s) failed to " - "implement at" % (type(self).__name__,)) + raise DeveloperError( + "Derived ordered set class (%s) failed to " + "implement at" % (type(self).__name__,) + ) def ord(self, val): - raise DeveloperError("Derived ordered set class (%s) failed to " - "implement ord" % (type(self).__name__,)) + raise DeveloperError( + "Derived ordered set class (%s) failed to " + "implement ord" % (type(self).__name__,) + ) def __getitem__(self, key): # If key looks like the valid key for UnindexedComponent_set, or @@ -1402,16 +1490,22 @@ def __getitem__(self, key): # In any other case, defer to the deprecated OrderedScalarSet # functionality if not self.is_indexed() and ( - key in self._valid_getitem_keys or type(key) is slice): + key in self._valid_getitem_keys or type(key) is slice + ): return super().__getitem__(key) deprecation_warning( "Using __getitem__ to return a set value from its (ordered) " "position is deprecated. Please use at()", - version='6.1', remove_in='7.0') + version='6.1', + remove_in='7.0', + ) return self.at(key) - @deprecated("card() was incorrectly added to the Set API. " - "Please use at()", version='6.1.2', remove_in='6.2') + @deprecated( + "card() was incorrectly added to the Set API. Please use at()", + version='6.1.2', + remove_in='6.2', + ) def card(self, index): return self.at(index) @@ -1422,6 +1516,9 @@ def isordered(self): def ordered_data(self): return self.data() + def ordered_iter(self): + return iter(self) + def first(self): return self.at(1) @@ -1438,7 +1535,7 @@ def next(self, item, step=1): If the search item is not in the Set, or the next element is beyond the end of the set, then an IndexError is raised. """ - position = self.ord(item)+step + position = self.ord(item) + step if position < 1: raise IndexError("Cannot advance before the beginning of the Set") if position > len(self): @@ -1457,7 +1554,7 @@ def nextw(self, item, step=1): If the search item is not in the Set an IndexError is raised. """ position = self.ord(item) - return self.at((position+step-1) % len(self) + 1) + return self.at((position + step - 1) % len(self) + 1) def prev(self, item, step=1): """Return the previous item in the set. @@ -1492,12 +1589,13 @@ def _to_0_based_index(self, item): if item != int(item): raise IndexError( "%s indices must be integers, not %s" - % (self.name, type(item).__name__,)) + % (self.name, type(item).__name__) + ) item = int(item) except: raise IndexError( - "%s indices must be integers, not %s" - % (self.name, type(item).__name__,)) + "%s indices must be integers, not %s" % (self.name, type(item).__name__) + ) if item >= 1: return item - 1 @@ -1509,7 +1607,8 @@ def _to_0_based_index(self, item): else: raise IndexError( "Pyomo Sets are 1-indexed: valid index values for Sets are " - "[1 .. len(Set)] or [-1 .. -len(Set)]") + "[1 .. len(Set)] or [-1 .. -len(Set)]" + ) class _OrderedSetData(_OrderedSetMixin, _FiniteSetData): @@ -1536,18 +1635,6 @@ def __init__(self, component): self._ordered_values = [] _FiniteSetData.__init__(self, component=component) - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(_OrderedSetData, self).__getstate__() - for i in _OrderedSetData.__slots__: - state[i] = getattr(self, i) - return state - - # Note: because none of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ method. - def _iter_impl(self): """ Return an iterator for the set. @@ -1617,13 +1704,11 @@ def ord(self, item): return self._values[item] + 1 except KeyError: if item.__class__ is not tuple or len(item) > 1: - raise ValueError( - "%s.ord(x): x not in %s" % (self.name, self.name)) + raise ValueError("%s.ord(x): x not in %s" % (self.name, self.name)) try: return self._values[item[0]] + 1 except KeyError: - raise ValueError( - "%s.ord(x): x not in %s" % (self.name, self.name)) + raise ValueError("%s.ord(x): x not in %s" % (self.name, self.name)) class _InsertionOrderSetData(_OrderedSetData): @@ -1636,6 +1721,7 @@ class _InsertionOrderSetData(_OrderedSetData): Public Class Attributes: """ + __slots__ = () def set_value(self, val): @@ -1644,7 +1730,8 @@ def set_value(self, val): "Calling set_value() on an insertion order Set with " "a fundamentally unordered data source (type: %s). " "This WILL potentially lead to nondeterministic behavior " - "in Pyomo" % (type(val).__name__,)) + "in Pyomo" % (type(val).__name__,) + ) super(_InsertionOrderSetData, self).set_value(val) def update(self, values): @@ -1653,14 +1740,22 @@ def update(self, values): "Calling update() on an insertion order Set with " "a fundamentally unordered data source (type: %s). " "This WILL potentially lead to nondeterministic behavior " - "in Pyomo" % (type(values).__name__,)) + "in Pyomo" % (type(values).__name__,) + ) super(_InsertionOrderSetData, self).update(values) class _SortedSetMixin(object): - "" + """""" + __slots__ = () + def ordered_iter(self): + return iter(self) + + def sorted_iter(self): + return iter(self) + class _SortedSetData(_SortedSetMixin, _OrderedSetData): """ @@ -1679,18 +1774,6 @@ def __init__(self, component): self._is_sorted = True _OrderedSetData.__init__(self, component=component) - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(_SortedSetData, self).__getstate__() - for i in _SortedSetData.__slots__: - state[i] = getattr(self, i) - return state - - # Note: because none of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ method. - def _iter_impl(self): """ Return an iterator for the set. @@ -1712,8 +1795,8 @@ def _add_impl(self, value): self._is_sorted = False # Note: removing data does not affect the sorted flag - #def remove(self, val): - #def discard(self, val): + # def remove(self, val): + # def discard(self, val): def clear(self): super(_SortedSetData, self).clear() @@ -1746,32 +1829,29 @@ def sorted_data(self): return self.data() def _sort(self): - self._ordered_values = list(self.parent_component()._sort_fcn( - self._ordered_values)) - self._values = {j:i for i, j in enumerate(self._ordered_values)} + self._ordered_values = list( + self.parent_component()._sort_fcn(self._ordered_values) + ) + self._values = {j: i for i, j in enumerate(self._ordered_values)} self._is_sorted = True ############################################################################ -_SET_API = ( - ('__contains__', 'test membership in'), - 'get', 'ranges', 'bounds', -) +_SET_API = (('__contains__', 'test membership in'), 'get', 'ranges', 'bounds') _FINITESET_API = _SET_API + ( ('__iter__', 'iterate over'), - '__reversed__', '__len__', 'data', 'sorted_data', 'ordered_data', -) -_ORDEREDSET_API = _FINITESET_API + ( - 'at', 'ord', -) -_SETDATA_API = ( - 'set_value', 'add', 'remove', 'discard', 'clear', 'update', 'pop', + '__reversed__', + '__len__', + 'data', + 'sorted_data', + 'ordered_data', ) +_ORDEREDSET_API = _FINITESET_API + ('at', 'ord') +_SETDATA_API = ('set_value', 'add', 'remove', 'discard', 'clear', 'update', 'pop') -@ModelComponentFactory.register( - "Set data that is used to define a model instance.") +@ModelComponentFactory.register("Set data that is used to define a model instance.") class Set(IndexedComponent): """A component used to index other Pyomo components. @@ -1782,7 +1862,7 @@ class Set(IndexedComponent): domains and provide callback functions to validate set members and to filter (ignore) potential members. 2. Set expressions. Operations on Set objects (&,|,*,-,^) - produce Set expressions taht preserve their references to the + produce Set expressions that preserve their references to the original Set objects so that updating the argument Sets implicitly updates the Set operator instance. 3. Support for set operations with RangeSet instances (both @@ -1856,10 +1936,18 @@ class Set(IndexedComponent): """ - class End(object): pass - class Skip(object): pass - class InsertionOrder(object): pass - class SortedOrder(object): pass + class End(object): + pass + + class Skip(object): + pass + + class InsertionOrder(object): + pass + + class SortedOrder(object): + pass + _ValidOrderedAuguments = {True, False, InsertionOrder, SortedOrder} _UnorderedInitializers = {set} if _prePython37: @@ -1897,12 +1985,19 @@ def __new__(cls, *args, **kwds): # SortedOrder would occasionally swap places. raise TypeError( "Set 'ordered' argument is not valid (must be one of {%s})" - % ( ', '.join(str(_) for _ in sorted_robust( - 'Set.'+x.__name__ if isinstance(x,type) else x - for x in Set._ValidOrderedAuguments.union( - {'',}) - )))) - if not args or (args[0] is UnindexedComponent_set and len(args)==1): + % ( + ', '.join( + str(_) + for _ in sorted_robust( + 'Set.' + x.__name__ if isinstance(x, type) else x + for x in Set._ValidOrderedAuguments.union( + {''} + ) + ) + ) + ) + ) + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): if ordered is Set.InsertionOrder: return super(Set, cls).__new__(AbstractOrderedScalarSet) elif ordered is Set.SortedOrder: @@ -1920,18 +2015,29 @@ def __new__(cls, *args, **kwds): return newObj @overload - def __init__(self, *indexes, initialize=None, dimen=UnknownSetDimen, - ordered=InsertionOrder, within=None, domain=None, - bounds=None, filter=None, validate=None, name=None, doc=None): ... + def __init__( + self, + *indexes, + initialize=None, + dimen=UnknownSetDimen, + ordered=InsertionOrder, + within=None, + domain=None, + bounds=None, + filter=None, + validate=None, + name=None, + doc=None + ): + ... def __init__(self, *args, **kwds): kwds.setdefault('ctype', Set) # The ordered flag was processed by __new__, but if this is a # sorted set, then we need to set the sorting function - _ordered = kwds.pop('ordered',None) - if _ordered and _ordered is not Set.InsertionOrder \ - and _ordered is not True: + _ordered = kwds.pop('ordered', None) + if _ordered and _ordered is not Set.InsertionOrder and _ordered is not True: if inspect.isfunction(_ordered): self._sort_fcn = _ordered else: @@ -1953,18 +2059,24 @@ def __init__(self, *args, **kwds): self._init_domain.intersect(BoundsInitializer(_bounds)) self._init_dimen = Initializer( - kwds.pop('dimen', UnknownSetDimen), - arg_not_specified=NOTSET) - self._init_values = TuplizeValuesInitializer(Initializer( - kwds.pop('initialize', None), - treat_sequences_as_mappings=False, allow_generators=True)) + kwds.pop('dimen', UnknownSetDimen), arg_not_specified=NOTSET + ) + self._init_values = TuplizeValuesInitializer( + Initializer( + kwds.pop('initialize', None), + treat_sequences_as_mappings=False, + allow_generators=True, + ) + ) self._init_validate = Initializer(kwds.pop('validate', None)) self._init_filter = Initializer(kwds.pop('filter', None)) if 'virtual' in kwds: deprecation_warning( "Pyomo Sets ignore the 'virtual' keyword argument", - logger='pyomo.core.base', version='5.6.7') + logger='pyomo.core.base', + version='5.6.7', + ) kwds.pop('virtual') IndexedComponent.__init__(self, *args, **kwds) @@ -1972,10 +2084,13 @@ def __init__(self, *args, **kwds): # HACK to make the "counted call" syntax work. We wait until # after the base class is set up so that is_indexed() is # reliable. - if self._init_values is not None \ - and self._init_values._init.__class__ is IndexedCallInitializer: + if ( + self._init_values is not None + and self._init_values._init.__class__ is IndexedCallInitializer + ): self._init_values._init = CountedCallInitializer( - self, self._init_values._init) + self, self._init_values._init + ) # HACK: the DAT parser needs to know the domain of a set in # order to correctly parse the data stream. if not self.is_indexed(): @@ -1984,30 +2099,28 @@ def __init__(self, *args, **kwds): if self._init_dimen.constant(): self._dimen = self._init_dimen(self.parent_block(), None) - - @deprecated("check_values() is deprecated: Sets only contain valid members", - version='5.7') + @deprecated( + "check_values() is deprecated: Sets only contain valid members", version='5.7' + ) def check_values(self): """ Verify that the values in this set are valid. """ return True - def construct(self, data=None): if self._constructed: return timer = ConstructionTimer(self) if is_debug_set(logger): - logger.debug("Constructing Set, name=%s, from data=%r" - % (self.name, data)) + logger.debug("Constructing Set, name=%s, from data=%r" % (self.name, data)) self._constructed = True if data is not None: # Data supplied to construct() should override data provided # to the constructor - tmp_init, self._init_values \ - = self._init_values, TuplizeValuesInitializer( - Initializer(data, treat_sequences_as_mappings=False)) + tmp_init, self._init_values = self._init_values, TuplizeValuesInitializer( + Initializer(data, treat_sequences_as_mappings=False) + ) try: if self._init_values is None: if not self.is_indexed(): @@ -2045,21 +2158,19 @@ def _getitem_when_not_present(self, index): # will actually be constructed (and not Skipped). _block = self.parent_block() - #Note: _init_dimen and _init_domain are guaranteed to be non-None + # Note: _init_dimen and _init_domain are guaranteed to be non-None _d = self._init_dimen(_block, index) - if ( not normalize_index.flatten and _d is not UnknownSetDimen - and _d is not None ): + if not normalize_index.flatten and _d is not UnknownSetDimen and _d is not None: logger.warning( "Ignoring non-None dimen (%s) for set %s%s " "(normalize_index.flatten is False, so dimen " - "verification is not available)." % ( - _d, self.name, - ("[%s]" % (index,) if self.is_indexed() else "") )) + "verification is not available)." + % (_d, self.name, ("[%s]" % (index,) if self.is_indexed() else "")) + ) _d = None domain = self._init_domain(_block, index) - if _d is UnknownSetDimen and domain is not None \ - and domain.dimen is not None: + if _d is UnknownSetDimen and domain is not None and domain.dimen is not None: _d = domain.dimen if self._init_values is not None: @@ -2067,14 +2178,16 @@ def _getitem_when_not_present(self, index): try: _values = self._init_values(_block, index) except TuplizeError as e: - raise ValueError( str(e) % ( - self._name, "[%s]" % index if self.is_indexed() else "")) + raise ValueError( + str(e) % (self._name, "[%s]" % index if self.is_indexed() else "") + ) if _values is Set.Skip: return elif _values is None: raise ValueError( - "Set rule or initializer returned None instead of Set.Skip") + "Set rule or initializer returned None instead of Set.Skip" + ) if index is None and not self.is_indexed(): obj = self._data[index] = self else: @@ -2111,13 +2224,13 @@ def _getitem_when_not_present(self, index): _filter = None if self._init_values is not None: # _values was initialized above... - if obj.isordered() \ - and type(_values) in Set._UnorderedInitializers: + if obj.isordered() and type(_values) in Set._UnorderedInitializers: logger.warning( "Initializing ordered Set %s with a fundamentally " "unordered data source (type: %s). This WILL potentially " "lead to nondeterministic behavior in Pyomo" - % (self.name, type(_values).__name__,)) + % (self.name, type(_values).__name__) + ) # Special case: set operations that are not first attached # to the model must be constructed. if isinstance(_values, SetOperator): @@ -2127,11 +2240,15 @@ def _getitem_when_not_present(self, index): except TypeError: logger.error( "Initializer for Set %s%s returned non-iterable object " - "of type %s." % ( + "of type %s." + % ( self.name, ("[%s]" % (index,) if self.is_indexed() else ""), - _values if _values.__class__ is type - else type(_values).__name__ )) + _values + if _values.__class__ is type + else type(_values).__name__, + ) + ) raise for val in val_iter: if val is Set.End: @@ -2176,7 +2293,7 @@ def _pprint(self): """ # # Eventually, we might want to support a 'verbose' flag to - # pprint() that will suppress som of the very long (less + # pprint() that will suppress some of the very long (less # informative) output # # if verbose: @@ -2208,29 +2325,32 @@ def _pprint(self): # will infer it from the class hierarchy if issubclass(_refClass, _SortedSetMixin): if self.parent_component()._sort_fcn is sorted_robust: - _ordered = "Sorted" + _ordered = "Sorted" else: - _ordered = "{user}" + _ordered = "{user}" elif issubclass(_refClass, _InsertionOrderSetData): _ordered = "Insertion" return ( - [("Size", len(self._data)), - ("Index", self._index_set if self.is_indexed() else None), - ("Ordered", _ordered),], + [ + ("Size", len(self._data)), + ("Index", self._index_set if self.is_indexed() else None), + ("Ordered", _ordered), + ], self._data.items(), - ("Dimen","Domain","Size","Members",), + ("Dimen", "Domain", "Size", "Members"), lambda k, v: [ Set._pprint_dimen(v), Set._pprint_domain(v), len(v) if v.isfinite() else 'Inf', Set._pprint_members(v), - ]) + ], + ) class IndexedSet(Set): def data(self): "Return a dict containing the data() of each Set in this IndexedSet" - return {k: v.data() for k,v in self.items()} + return {k: v.data() for k, v in self.items()} class FiniteScalarSet(_FiniteSetData, Set): @@ -2240,7 +2360,6 @@ def __init__(self, **kwds): self._index = UnindexedComponent_index - class FiniteSimpleSet(metaclass=RenamedClass): __renamed__new_class__ = FiniteScalarSet __renamed__version__ = '6.0' @@ -2309,12 +2428,14 @@ class AbstractSortedSimpleSet(metaclass=RenamedClass): ############################################################################ + class SetOf(_SetData, Component): """""" + def __new__(cls, *args, **kwds): if cls is not SetOf: return super(SetOf, cls).__new__(cls) - reference, = args + (reference,) = args if isinstance(reference, (_SetData, GlobalSetBase)): if reference.isfinite(): if reference.isordered(): @@ -2344,8 +2465,9 @@ def construct(self, data=None): return timer = ConstructionTimer(self) if is_debug_set(logger): - logger.debug("Constructing SetOf, name=%s, from data=%r" - % (self.name, data)) + logger.debug( + "Constructing SetOf, name=%s, from data=%r" % (self.name, data) + ) self._constructed = True timer.report() @@ -2377,15 +2499,11 @@ def _pprint(self): Return data that will be printed for this component. """ return ( - [("Dimen", self.dimen), - ("Size", len(self)), - ("Bounds", self.bounds())], - {None: self}.items() , - ("Ordered", "Members",), - lambda k, v: [ - v.isordered(), - str(v._ref), - ]) + [("Dimen", self.dimen), ("Size", len(self)), ("Bounds", self.bounds())], + {None: self}.items(), + ("Ordered", "Members"), + lambda k, v: [v.isordered(), str(v._ref)], + ) class InfiniteSetOf(SetOf): @@ -2413,6 +2531,12 @@ def __len__(self): def _iter_impl(self): return iter(self._ref) + def __reversed__(self): + try: + return reversed(self._ref) + except: + return reversed(self.data()) + class UnorderedSetOf(metaclass=RenamedClass): __renamed__new_class__ = FiniteSetOf @@ -2459,18 +2583,6 @@ def __init__(self, component): _SetData.__init__(self, component=component) self._ranges = None - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(_InfiniteRangeSetData, self).__getstate__() - for i in _InfiniteRangeSetData.__slots__: - state[i] = getattr(self, i) - return state - - # Note: because none of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ method. - def get(self, value, default=None): # The bulk of single-value set members were stored as scalars. # Check that first. @@ -2501,10 +2613,9 @@ def ranges(self): return iter(self._ranges) -class _FiniteRangeSetData( _SortedSetMixin, - _OrderedSetMixin, - _FiniteSetMixin, - _InfiniteRangeSetData ): +class _FiniteRangeSetData( + _SortedSetMixin, _OrderedSetMixin, _FiniteSetMixin, _InfiniteRangeSetData +): __slots__ = () @staticmethod @@ -2519,7 +2630,7 @@ def _range_gen(r): while n <= end: yield n i += 1 - n = start + i*step + n = start + i * step def _iter_impl(self): # If there is only a single underlying range, then we will @@ -2571,7 +2682,7 @@ def at(self, index): idx = self._to_0_based_index(index) if len(self._ranges) == 1: r = self._ranges[0] - ans = r.start + (idx)*r.step + ans = r.start + (idx) * r.step if ans <= r.end: return ans else: @@ -2585,9 +2696,12 @@ def ord(self, item): if len(self._ranges) == 1: r = self._ranges[0] i = float(item - r.start) / r.step - if item >= r.start and item <= r.end and \ - abs(i - math.floor(i+0.5)) < r._EPS: - return int(math.floor(i+0.5)) + 1 + if ( + item >= r.start + and item <= r.end + and abs(i - math.floor(i + 0.5)) < r._EPS + ): + return int(math.floor(i + 0.5)) + 1 else: ans = 1 for val in self: @@ -2596,7 +2710,8 @@ def ord(self, item): ans += 1 raise ValueError( "Cannot identify position of %s in Set %s: item not in Set" - % (item, self.name)) + % (item, self.name) + ) # We must redefine ranges(), bounds(), and domain so that we get the # _InfiniteRangeSetData version and not the one from @@ -2609,7 +2724,8 @@ def ord(self, item): @ModelComponentFactory.register( "A sequence of numeric values. RangeSet(start,end,step) is a sequence " "starting a value 'start', and increasing in values by 'step' until a " - "value greater than or equal to 'end' is reached.") + "value greater than or equal to 'end' is reached." +) class RangeSet(Component): """A set object that represents a set of numeric values @@ -2692,7 +2808,7 @@ class RangeSet(Component): Name for this component. doc: str, optional - Text describing this component. + Text describing this component. """ def __new__(cls, *args, **kwds): @@ -2704,13 +2820,15 @@ def __new__(cls, *args, **kwds): if 'ranges' in kwds: if any(not r.isfinite() for r in kwds['ranges']): finite = False - for i,_ in enumerate(args): + for i, _ in enumerate(args): if type(_) not in native_types: # Strange nosetest coverage issue: if the logic is # negated and the continue is in the "else", that # line is not caught as being covered. - if not isinstance(_, ComponentData) \ - or not _.parent_component().is_constructed(): + if ( + not isinstance(_, ComponentData) + or not _.parent_component().is_constructed() + ): continue else: # "Peek" at constructed components to try and @@ -2737,28 +2855,61 @@ def __new__(cls, *args, **kwds): # But positional-only params syntax are not supported before python 3.8. # To emphasize they are positional-only, an underscore is added before their name. @overload - def __init__(self, _end, *, finite=None, ranges=(), bounds=None, - filter=None, validate=None, name=None, doc=None): ... + def __init__( + self, + _end, + *, + finite=None, + ranges=(), + bounds=None, + filter=None, + validate=None, + name=None, + doc=None + ): + ... @overload - def __init__(self, _start, _end, _step=1, *, finite=None, ranges=(), bounds=None, - filter=None, validate=None, name=None, doc=None): ... + def __init__( + self, + _start, + _end, + _step=1, + *, + finite=None, + ranges=(), + bounds=None, + filter=None, + validate=None, + name=None, + doc=None + ): + ... @overload - def __init__(self, *, finite=None, ranges=(), bounds=None, - filter=None, validate=None, name=None, doc=None): ... + def __init__( + self, + *, + finite=None, + ranges=(), + bounds=None, + filter=None, + validate=None, + name=None, + doc=None + ): + ... def __init__(self, *args, **kwds): # Finite was processed by __new__ kwds.setdefault('ctype', RangeSet) if len(args) > 3: - raise ValueError("RangeSet expects 3 or fewer positional " - "arguments (received %s)" % (len(args),)) + raise ValueError( + "RangeSet expects 3 or fewer positional " + "arguments (received %s)" % (len(args),) + ) kwds.pop('finite', None) - self._init_data = ( - args, - kwds.pop('ranges', ()), - ) + self._init_data = (args, kwds.pop('ranges', ())) self._init_validate = Initializer(kwds.pop('validate', None)) self._init_filter = Initializer(kwds.pop('filter', None)) self._init_bounds = kwds.pop('bounds', None) @@ -2779,15 +2930,15 @@ def __init__(self, *args, **kwds): # meaningful warning message about a RangeSet defined by mutable # data. try: - if all( type(_) in native_types - or (_.parent_component().is_constructed() - and is_constant(_)) - for _ in args ): + if all( + type(_) in native_types + or (_.parent_component().is_constructed() and is_constant(_)) + for _ in args + ): self.construct() except AttributeError: pass - def __str__(self): if self.parent_block() is not None: return self.name @@ -2806,20 +2957,21 @@ def __str__(self): else: return "[]" - def construct(self, data=None): if self._constructed: return timer = ConstructionTimer(self) if is_debug_set(logger): - logger.debug("Constructing RangeSet, name=%s, from data=%r" - % (self.name, data)) + logger.debug( + "Constructing RangeSet, name=%s, from data=%r" % (self.name, data) + ) if data is not None: raise ValueError( "RangeSet.construct() does not support the data= argument.\n" "Initialization data (range endpoints) can only be supplied " "as numbers, constants, or Params to the RangeSet() " - "declaration") + "declaration" + ) self._constructed = True args, ranges = self._init_data @@ -2831,32 +2983,33 @@ def construct(self, data=None): "the data value in the future will not be reflected in this " "RangeSet. To suppress this warning, explicitly convert " "the source data to a constant type (e.g., float, int, or " - "immutable Param)" % (self.name,)) + "immutable Param)" % (self.name,) + ) args = tuple(value(arg) for arg in args) if type(ranges) is not tuple: ranges = tuple(ranges) if len(args) == 1: - # This is a bit of a hack for backwards compatability with + # This is a bit of a hack for backwards compatibility with # the old RangeSet implementation, where we did less # validation of the RangeSet arguments, and allowed the # creation of 0-length RangeSets if args[0] != 0: # No need to check for floating point - it will # automatically be truncated - ranges = ranges + (NumericRange(1,args[0],1),) + ranges = ranges + (NumericRange(1, args[0], 1),) elif len(args) == 2: - # This is a bit of a hack for backwards compatability with + # This is a bit of a hack for backwards compatibility with # the old RangeSet implementation, where we did less # validation of the RangeSet arguments, and allowed the # creation of 0-length RangeSets if None in args or args[1] - args[0] != -1: - args = (args[0],args[1],1) + args = (args[0], args[1], 1) if len(args) == 3: # Discrete ranges anchored by a floating point value or # incremented by a floating point value cannot be handled by # the NumericRange object. We will just discretize this - # range (mostly for backwards compatability) + # range (mostly for backwards compatibility) start, end, step = args if step: if start is None: @@ -2864,27 +3017,30 @@ def construct(self, data=None): step *= -1 if start is None: - # Backwards compatability: assume unbounded RangeSet + # Backwards compatibility: assume unbounded RangeSet # is grounded at 0 - ranges += ( NumericRange(0, None, step), - NumericRange(0, None, -step) ) + ranges += ( + NumericRange(0, None, step), + NumericRange(0, None, -step), + ) elif int(step) != step: if end is None: raise ValueError( "RangeSet does not support unbounded ranges " "with a non-integer step (got [%s:%s:%s])" - % (start, end, step)) + % (start, end, step) + ) if (end >= start) ^ (step > 0): raise ValueError( "RangeSet: start, end ordering incompatible with " - "step direction (got [%s:%s:%s])" - % (start, end, step)) + "step direction (got [%s:%s:%s])" % (start, end, step) + ) n = start i = 0 while (step > 0 and n <= end) or (step < 0 and n >= end): - ranges += (NumericRange(n,n,0),) + ranges += (NumericRange(n, n, 0),) i += 1 - n = start + step*i + n = start + step * i else: ranges += (NumericRange(start, end, step),) else: @@ -2894,13 +3050,14 @@ def construct(self, data=None): if not isinstance(r, NumericRange): raise TypeError( "RangeSet 'ranges' argument must be an " - "iterable of NumericRange objects") + "iterable of NumericRange objects" + ) if not r.isfinite() and self.isfinite(): raise ValueError( "Constructing a finite RangeSet over a non-finite " "range (%s). Either correct the range data or " - "specify 'finite=False' when declaring the RangeSet" - % (r,)) + "specify 'finite=False' when declaring the RangeSet" % (r,) + ) _block = self.parent_block() if self._init_bounds is not None: @@ -2916,7 +3073,8 @@ def construct(self, data=None): if not self.isfinite(): raise ValueError( "The 'filter' keyword argument is not valid for " - "non-finite RangeSet component (%s)" % (self.name,)) + "non-finite RangeSet component (%s)" % (self.name,) + ) try: _filter = Initializer(self._init_filter(_block, None)) @@ -2937,9 +3095,9 @@ def construct(self, data=None): old_ranges.reverse() while old_ranges: r = old_ranges.pop() - for i,val in enumerate(_FiniteRangeSetData._range_gen(r)): + for i, val in enumerate(_FiniteRangeSetData._range_gen(r)): if not _filter(_block, val): - split_r = r.range_difference((NumericRange(val,val,0),)) + split_r = r.range_difference((NumericRange(val, val, 0),)) if len(split_r) == 2: new_ranges.append(split_r[0]) old_ranges.append(split_r[1]) @@ -2958,7 +3116,8 @@ def construct(self, data=None): if not self.isfinite(): raise ValueError( "The 'validate' keyword argument is not valid for " - "non-finite RangeSet component (%s)" % (self.name,)) + "non-finite RangeSet component (%s)" % (self.name,) + ) try: _validate = Initializer(self._init_validate(_block, None)) @@ -2977,12 +3136,14 @@ def construct(self, data=None): except: logger.error( "Exception raised while validating element '%s' " - "for Set %s" % (val, self.name)) + "for Set %s" % (val, self.name) + ) raise if not flag: raise ValueError( "The value=%s violates the validation rule of " - "Set %s" % (val, self.name)) + "Set %s" % (val, self.name) + ) timer.report() @@ -2993,24 +3154,27 @@ def construct(self, data=None): # def dim(self): return 0 + def index_set(self): return UnindexedComponent_set - def _pprint(self): """ Return data that will be printed for this component. """ return ( - [("Dimen", self.dimen), - ("Size", len(self) if self.isfinite() else 'Inf'), - ("Bounds", self.bounds())], + [ + ("Dimen", self.dimen), + ("Size", len(self) if self.isfinite() else 'Inf'), + ("Bounds", self.bounds()), + ], {None: self}.items(), - ("Finite","Members",), + ("Finite", "Members"), lambda k, v: [ - v.isfinite(),#isinstance(v, _FiniteSetMixin), + v.isfinite(), # isinstance(v, _FiniteSetMixin), ', '.join(str(r) for r in self.ranges()) or '[]', - ]) + ], + ) class InfiniteScalarRangeSet(_InfiniteRangeSetData, RangeSet): @@ -3028,8 +3192,7 @@ class InfiniteSimpleRangeSet(metaclass=RenamedClass): __renamed__version__ = '6.0' -class FiniteScalarRangeSet(_ScalarOrderedSetMixin, - _FiniteRangeSetData, RangeSet): +class FiniteScalarRangeSet(_ScalarOrderedSetMixin, _FiniteRangeSetData, RangeSet): def __init__(self, *args, **kwds): _FiniteRangeSetData.__init__(self, component=self) RangeSet.__init__(self, *args, **kwds) @@ -3063,10 +3226,12 @@ class AbstractFiniteSimpleRangeSet(metaclass=RenamedClass): __renamed__new_class__ = AbstractFiniteScalarRangeSet __renamed__version__ = '6.0' + ############################################################################ # Set Operators ############################################################################ + class SetOperator(_SetData, Set): __slots__ = ('_sets',) @@ -3087,22 +3252,14 @@ def __init__(self, *args, **kwds): if all(_.parent_component()._constructed for _ in self._sets): self.construct() - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(SetOperator, self).__getstate__() - for i in SetOperator.__slots__: - state[i] = getattr(self, i) - return state - def construct(self, data=None): if self._constructed: return timer = ConstructionTimer(self) if is_debug_set(logger): - logger.debug("Constructing SetOperator, name=%s, from data=%r" - % (self.name, data)) + logger.debug( + "Constructing SetOperator, name=%s, from data=%r" % (self.name, data) + ) for s in self._sets: s.parent_component().construct() super(SetOperator, self).construct() @@ -3110,7 +3267,9 @@ def construct(self, data=None): deprecation_warning( "Providing construction data to SetOperator objects is " "deprecated. This data is ignored and in a future version " - "will not be allowed", version='5.7') + "will not be allowed", + version='5.7', + ) fail = len(data) > 1 or None not in data if not fail: _data = data[None] @@ -3124,12 +3283,10 @@ def construct(self, data=None): if fail: raise ValueError( "Constructing SetOperator %s with incompatible data " - "(data=%s}" % (self.name, data)) + "(data=%s}" % (self.name, data) + ) timer.report() - # Note: because none of the slots on this class need to be edited, - # we don't need to implement a specialized __setstate__ method. - def __len__(self): """Return the length of this Set @@ -3148,7 +3305,8 @@ def __len__(self): raise OverflowError( "The length of a non-finite Set is Inf; however, Python " "requires len() to return a non-negative integer value. Check " - "isfinite() before calling len() for possibly infinite Sets") + "isfinite() before calling len() for possibly infinite Sets" + ) def __str__(self): if self.parent_block() is not None: @@ -3205,27 +3363,31 @@ def isdiscrete(self): def subsets(self, expand_all_set_operators=None): if not isinstance(self, SetProduct): if expand_all_set_operators is None: - logger.warning(""" + logger.warning( + """ Extracting subsets for Set %s, which is a SetOperator other than a SetProduct. Returning this set and not descending into the set operands. To descend into this operator, specify 'subsets(expand_all_set_operators=True)' or to suppress this warning, specify - 'subsets(expand_all_set_operators=False)'""" % ( self.name, )) + 'subsets(expand_all_set_operators=False)'""" + % (self.name,) + ) yield self return elif not expand_all_set_operators: yield self return for s in self._sets: - yield from s.subsets( - expand_all_set_operators=expand_all_set_operators) + yield from s.subsets(expand_all_set_operators=expand_all_set_operators) @property - @deprecated("SetProduct.set_tuple is deprecated. " - "Use SetProduct.subsets() to get the operator arguments.", - version='5.7') + @deprecated( + "SetProduct.set_tuple is deprecated. " + "Use SetProduct.subsets() to get the operator arguments.", + version='5.7', + ) def set_tuple(self): # Despite its name, in the old SetProduct, set_tuple held a list return list(self.subsets()) @@ -3245,8 +3407,8 @@ def _domain(self): def _domain(self, val): if val is not Any: raise ValueError( - "Setting the domain of a Set Operator is not allowed: %s" % val) - + "Setting the domain of a Set Operator is not allowed: %s" % val + ) @staticmethod def _checkArgs(*sets): @@ -3260,8 +3422,10 @@ def _checkArgs(*sets): ans.append((False, True)) return ans + ############################################################################ + class SetUnion(SetOperator): __slots__ = tuple() @@ -3301,7 +3465,7 @@ class SetUnion_InfiniteSet(SetUnion): __slots__ = tuple() def get(self, val, default=None): - #return any(val in s for s in self._sets) + # return any(val in s for s in self._sets) for s in self._sets: v = s.get(val, default) if v is not default: @@ -3314,10 +3478,7 @@ class SetUnion_FiniteSet(_FiniteSetMixin, SetUnion_InfiniteSet): def _iter_impl(self): set0 = self._sets[0] - return itertools.chain( - set0, - (_ for _ in self._sets[1] if _ not in set0) - ) + return itertools.chain(set0, (_ for _ in self._sets[1] if _ not in set0)) def __len__(self): """ @@ -3331,15 +3492,14 @@ def __len__(self): return len(set0) + sum(1 for s in set1 if s not in set0) -class SetUnion_OrderedSet(_ScalarOrderedSetMixin, _OrderedSetMixin, - SetUnion_FiniteSet): +class SetUnion_OrderedSet(_ScalarOrderedSetMixin, _OrderedSetMixin, SetUnion_FiniteSet): __slots__ = tuple() def at(self, index): idx = self._to_0_based_index(index) set0_len = len(self._sets[0]) if idx < set0_len: - return self._sets[0].at(idx+1) + return self._sets[0].at(idx + 1) else: idx -= set0_len - 1 set1_iter = iter(self._sets[1]) @@ -3365,7 +3525,8 @@ def ord(self, item): if item not in self._sets[1]: raise IndexError( "Cannot identify position of %s in Set %s: item not in Set" - % (item, self.name)) + % (item, self.name) + ) idx = len(self._sets[0]) _iter = iter(self._sets[1]) while True: @@ -3379,6 +3540,7 @@ def ord(self, item): ############################################################################ + class SetIntersection(SetOperator): __slots__ = tuple() @@ -3432,7 +3594,7 @@ class SetIntersection_InfiniteSet(SetIntersection): __slots__ = tuple() def get(self, val, default=None): - #return all(val in s for s in self._sets) + # return all(val in s for s in self._sets) for s in self._sets: v = s.get(val, default) if v is default: @@ -3458,7 +3620,7 @@ def _iter_impl(self): for r0 in set0.ranges(): ranges.extend(r0.range_intersection(set1.ranges())) # Note that the RangeSet is automatically - # constucted, as it has no non-native positional + # constructed, as it has no non-native positional # parameters. return iter(RangeSet(ranges=ranges)) return (s for s in set0 if s in set1) @@ -3470,8 +3632,9 @@ def __len__(self): return sum(1 for _ in self) -class SetIntersection_OrderedSet(_ScalarOrderedSetMixin, _OrderedSetMixin, - SetIntersection_FiniteSet): +class SetIntersection_OrderedSet( + _ScalarOrderedSetMixin, _OrderedSetMixin, SetIntersection_FiniteSet +): __slots__ = tuple() def at(self, index): @@ -3496,15 +3659,18 @@ def ord(self, item): if item not in self._sets[0] or item not in self._sets[1]: raise IndexError( "Cannot identify position of %s in Set %s: item not in Set" - % (item, self.name)) + % (item, self.name) + ) idx = 0 _iter = iter(self) while next(_iter) != item: idx += 1 return idx + 1 + ############################################################################ + class SetDifference(SetOperator): __slots__ = tuple() @@ -3531,11 +3697,12 @@ def ranges(self): def dimen(self): return self._sets[0].dimen + class SetDifference_InfiniteSet(SetDifference): __slots__ = tuple() def get(self, val, default=None): - #return val in self._sets[0] and not val in self._sets[1] + # return val in self._sets[0] and not val in self._sets[1] v_l = self._sets[0].get(val, default) if v_l is default: return default @@ -3559,8 +3726,9 @@ def __len__(self): return sum(1 for _ in self) -class SetDifference_OrderedSet(_ScalarOrderedSetMixin, _OrderedSetMixin, - SetDifference_FiniteSet): +class SetDifference_OrderedSet( + _ScalarOrderedSetMixin, _OrderedSetMixin, SetDifference_FiniteSet +): __slots__ = tuple() def at(self, index): @@ -3585,7 +3753,8 @@ def ord(self, item): if item not in self: raise IndexError( "Cannot identify position of %s in Set %s: item not in Set" - % (item, self.name)) + % (item, self.name) + ) idx = 0 _iter = iter(self) while next(_iter) != item: @@ -3595,6 +3764,7 @@ def ord(self, item): ############################################################################ + class SetSymmetricDifference(SetOperator): __slots__ = tuple() @@ -3638,7 +3808,7 @@ class SetSymmetricDifference_InfiniteSet(SetSymmetricDifference): __slots__ = tuple() def get(self, val, default=None): - #return (val in self._sets[0]) ^ (val in self._sets[1]) + # return (val in self._sets[0]) ^ (val in self._sets[1]) v_l = self._sets[0].get(val, default) v_r = self._sets[1].get(val, default) if v_l is default: @@ -3648,15 +3818,15 @@ def get(self, val, default=None): return default -class SetSymmetricDifference_FiniteSet(_FiniteSetMixin, - SetSymmetricDifference_InfiniteSet): +class SetSymmetricDifference_FiniteSet( + _FiniteSetMixin, SetSymmetricDifference_InfiniteSet +): __slots__ = tuple() def _iter_impl(self): set0, set1 = self._sets return itertools.chain( - (_ for _ in set0 if _ not in set1), - (_ for _ in set1 if _ not in set0), + (_ for _ in set0 if _ not in set1), (_ for _ in set1 if _ not in set0) ) def __len__(self): @@ -3666,9 +3836,9 @@ def __len__(self): return sum(1 for _ in self) -class SetSymmetricDifference_OrderedSet(_ScalarOrderedSetMixin, - _OrderedSetMixin, - SetSymmetricDifference_FiniteSet): +class SetSymmetricDifference_OrderedSet( + _ScalarOrderedSetMixin, _OrderedSetMixin, SetSymmetricDifference_FiniteSet +): __slots__ = tuple() def at(self, index): @@ -3693,7 +3863,8 @@ def ord(self, item): if item not in self: raise IndexError( "Cannot identify position of %s in Set %s: item not in Set" - % (item, self.name)) + % (item, self.name) + ) idx = 0 _iter = iter(self) while next(_iter) != item: @@ -3703,6 +3874,7 @@ def ord(self, item): ############################################################################ + class SetProduct(SetOperator): __slots__ = tuple() @@ -3722,9 +3894,7 @@ def __new__(cls, *args): return cls.__new__(cls) def ranges(self): - yield RangeProduct(list( - list(_.ranges()) for _ in self.subsets(False) - )) + yield RangeProduct(list(list(_.ranges()) for _ in self.subsets(False))) def bounds(self): lb, ub = zip(*map(lambda x: x.bounds(), self.subsets(False))) @@ -3757,16 +3927,17 @@ def _flatten_product(self, val): nested tuples (so this only needs to check the top-level terms) """ - for i in range(len(val)-1, -1, -1): + for i in range(len(val) - 1, -1, -1): if val[i].__class__ is tuple: - val = val[:i] + val[i] + val[i+1:] + val = val[:i] + val[i] + val[i + 1 :] return val + class SetProduct_InfiniteSet(SetProduct): __slots__ = tuple() def get(self, val, default=None): - #return self._find_val(val) is not None + # return self._find_val(val) is not None v = self._find_val(val) if v is None: return default @@ -3795,7 +3966,7 @@ def _find_val(self, val): # against the corresponding subset. Failure is not sufficient # to determine the val is not in this set. if hasattr(val, '__len__') and len(val) == len(self._sets): - if all(v in self._sets[i] for i,v in enumerate(val)): + if all(v in self._sets[i] for i, v in enumerate(val)): return val, None # If we are not normalizing indices, then if the above did not @@ -3815,13 +3986,13 @@ def _find_val(self, val): # For this search, if a subset has an unknown dimension, assume # it is "None". - for i,d in enumerate(setDims): + for i, d in enumerate(setDims): if d is UnknownSetDimen: setDims[i] = None # Find the starting index for each subset (based on dimentionality) - index = [None]*len(setDims) + index = [None] * len(setDims) lastIndex = 0 - for i,dim in enumerate(setDims): + for i, dim in enumerate(setDims): index[i] = lastIndex if dim is None: firstNonDimSet = i @@ -3831,7 +4002,7 @@ def _find_val(self, val): # Non-membership is sufficient to return "not found" if lastIndex > v_len: return None - elif val[index[i]:lastIndex] not in self._sets[i]: + elif val[index[i] : lastIndex] not in self._sets[i]: return None # The end of the last subset is always the length of the val index.append(v_len) @@ -3848,8 +4019,8 @@ def _find_val(self, val): # of the forward loop early. Start at the end and work # backwards. lastIndex = index[-1] - for iEnd,dim in enumerate(reversed(setDims)): - i = len(setDims)-(iEnd+1) + for iEnd, dim in enumerate(reversed(setDims)): + i = len(setDims) - (iEnd + 1) if dim is None: lastNonDimSet = i break @@ -3857,15 +4028,17 @@ def _find_val(self, val): index[i] = lastIndex # We can also check for this subset member immediately. # Non-membership is sufficient to return "not found" - if val[index[i]:index[i+1]] not in self._sets[i]: + if val[index[i] : index[i + 1]] not in self._sets[i]: return None if firstNonDimSet == lastNonDimSet: # We have inferred the subpart of val that must be in the # (single) non-dimentioned subset. Check membership and # return the final verdict. - if ( val[index[firstNonDimSet]:index[firstNonDimSet+1]] - in self._sets[firstNonDimSet] ): + if ( + val[index[firstNonDimSet] : index[firstNonDimSet + 1]] + in self._sets[firstNonDimSet] + ): return val, index else: return None @@ -3874,17 +4047,16 @@ def _find_val(self, val): # we can do at this point is to search for any possible # combination that works - subsets = self._sets[firstNonDimSet:lastNonDimSet+1] - _val = val[index[firstNonDimSet]:index[lastNonDimSet+1]] + subsets = self._sets[firstNonDimSet : lastNonDimSet + 1] + _val = val[index[firstNonDimSet] : index[lastNonDimSet + 1]] for cuts in self._cutPointGenerator(subsets, len(_val)): - if all(_val[cuts[i]:cuts[i+1]] in s for i,s in enumerate(subsets)): + if all(_val[cuts[i] : cuts[i + 1]] in s for i, s in enumerate(subsets)): offset = index[firstNonDimSet] - for i in range(1,len(subsets)): - index[firstNonDimSet+i] = offset + cuts[i] + for i in range(1, len(subsets)): + index[firstNonDimSet + i] = offset + cuts[i] return val, index return None - @staticmethod def _cutPointGenerator(subsets, val_len): """Generate the sequence of cut points for a series of subsets. @@ -3900,19 +4072,19 @@ def _cutPointGenerator(subsets, val_len): """ setDims = list(_.dimen for _ in subsets) - cutIters = [None] * (len(subsets)+1) - cutPoints = [0] * (len(subsets)+1) + cutIters = [None] * (len(subsets) + 1) + cutPoints = [0] * (len(subsets) + 1) i = 1 - cutIters[i] = iter(range(val_len+1)) + cutIters[i] = iter(range(val_len + 1)) cutPoints[-1] = val_len while i > 0: try: cutPoints[i] = next(cutIters[i]) - if i < len(subsets)-1: + if i < len(subsets) - 1: if setDims[i] is not None: - cutIters[i+1] = iter((cutPoints[i]+setDims[i],)) + cutIters[i + 1] = iter((cutPoints[i] + setDims[i],)) else: - cutIters[i+1] = iter(range(cutPoints[i], val_len+1)) + cutIters[i + 1] = iter(range(cutPoints[i], val_len + 1)) i += 1 elif cutPoints[i] > val_len: i -= 1 @@ -3922,7 +4094,6 @@ def _cutPointGenerator(subsets, val_len): i -= 1 - class SetProduct_FiniteSet(_FiniteSetMixin, SetProduct_InfiniteSet): __slots__ = tuple() @@ -3930,8 +4101,11 @@ def _iter_impl(self): _iter = itertools.product(*self._sets) # Note: if all the member sets are simple 1-d sets, then there # is no need to call flatten_product. - if FLATTEN_CROSS_PRODUCT and normalize_index.flatten \ - and self.dimen != len(self._sets): + if ( + FLATTEN_CROSS_PRODUCT + and normalize_index.flatten + and self.dimen != len(self._sets) + ): return (self._flatten_product(_) for _ in _iter) return _iter @@ -3945,8 +4119,9 @@ def __len__(self): return ans -class SetProduct_OrderedSet(_ScalarOrderedSetMixin, _OrderedSetMixin, - SetProduct_FiniteSet): +class SetProduct_OrderedSet( + _ScalarOrderedSetMixin, _OrderedSetMixin, SetProduct_FiniteSet +): __slots__ = tuple() def at(self, index): @@ -3958,9 +4133,8 @@ def at(self, index): _ord[i], _idx = _idx % _ord[i], _idx // _ord[i] if _idx: raise IndexError("%s index out of range" % (self.name,)) - ans = tuple(s.at(i+1) for s,i in zip(self._sets, _ord)) - if FLATTEN_CROSS_PRODUCT and normalize_index.flatten \ - and self.dimen != len(ans): + ans = tuple(s.at(i + 1) for s, i in zip(self._sets, _ord)) + if FLATTEN_CROSS_PRODUCT and normalize_index.flatten and self.dimen != len(ans): return self._flatten_product(ans) return ans @@ -3976,22 +4150,26 @@ def ord(self, item): if found is None: raise IndexError( "Cannot identify position of %s in Set %s: item not in Set" - % (item, self.name)) + % (item, self.name) + ) val, cutPoints = found if cutPoints is not None: - val = tuple( val[cutPoints[i]:cutPoints[i+1]] - for i in range(len(self._sets)) ) - _idx = tuple(s.ord(val[i])-1 for i,s in enumerate(self._sets)) + val = tuple( + val[cutPoints[i] : cutPoints[i + 1]] for i in range(len(self._sets)) + ) + _idx = tuple(s.ord(val[i]) - 1 for i, s in enumerate(self._sets)) _len = list(len(_) for _ in self._sets) _len.append(1) ans = 0 for pos, n in zip(_idx, _len[1:]): ans += pos ans *= n - return ans+1 + return ans + 1 + ############################################################################ + class _AnySet(_SetData, Set): def __init__(self, **kwds): _SetData.__init__(self, component=self) @@ -4038,9 +4216,11 @@ def __str__(self): class _AnyWithNoneSet(_AnySet): # Note that we put the deprecation warning on contains() and not on # the class because we will always create a global instance for - # backwards compatability with the Book. - @deprecated("The AnyWithNone set is deprecated. " - "Use Any, which includes None", version='5.7') + # backwards compatibility with the Book. + @deprecated( + "The AnyWithNone set is deprecated. Use Any, which includes None", + version='5.7', + ) def get(self, val, default=None): return super(_AnyWithNoneSet, self).get(val, default) @@ -4080,6 +4260,7 @@ def __str__(self): ############################################################################ + def DeclareGlobalSet(obj, caller_globals=None): """Declare a copy of a set as a global set in the calling module @@ -4101,8 +4282,7 @@ def DeclareGlobalSet(obj, caller_globals=None): # run afoul of the logic in GlobalSet.__new__ _name = obj.local_name if _name in GlobalSets and obj is not GlobalSets[_name]: - raise RuntimeError("Duplicate Global Set declaration, %s" - % (_name,)) + raise RuntimeError("Duplicate Global Set declaration, %s" % (_name,)) # Push this object into the caller's module namespace # Stack: 0: DeclareGlobalSet() @@ -4110,8 +4290,7 @@ def DeclareGlobalSet(obj, caller_globals=None): if caller_globals is None: caller_globals = inspect.currentframe().f_back.f_globals if _name in caller_globals and obj is not caller_globals[_name]: - raise RuntimeError("Refusing to overwrite global object, %s" - % (_name,)) + raise RuntimeError("Refusing to overwrite global object, %s" % (_name,)) if _name in GlobalSets: _set = caller_globals[_name] = GlobalSets[_name] @@ -4126,7 +4305,9 @@ class GlobalSet(GlobalSetBase, obj.__class__): References to this object will not be duplicated by deepcopy and be maintained/restored by pickle. - """ % (obj.doc,) + """ % ( + obj.doc, + ) # Note: a simple docstring does not appear to be picked up (at # least in Python 2.7), so we will explicitly set the __doc__ # attribute. @@ -4146,15 +4327,19 @@ def __new__(cls, *args, **kwds): GlobalSet, we can mock up the old behavior through how we handle __new__(). """ - if cls is GlobalSet and GlobalSet.global_name \ - and issubclass(GlobalSet, RangeSet): + if ( + cls is GlobalSet + and GlobalSet.global_name + and issubclass(GlobalSet, RangeSet) + ): deprecation_warning( "The use of RealSet, IntegerSet, BinarySet and " "BooleanSet as Pyomo Set class generators is " "deprecated. Please either use one of the pre-declared " "global Sets (e.g., Reals, NonNegativeReals, Integers, " "PositiveIntegers, Binary), or create a custom RangeSet.", - version='5.7.1') + version='5.7.1', + ) # Note: we will completely ignore any positional # arguments. In this situation, these could be the # parent_block and any indices; e.g., @@ -4171,16 +4356,15 @@ def __new__(cls, *args, **kwds): name = base_set.name else: name = cls_name - ans = RangeSet( ranges=list(range_init(None, None).ranges()), - name=name ) - if name_kwd is None and ( - cls_name is not None or bounds is not None): + ans = RangeSet(ranges=list(range_init(None, None).ranges()), name=name) + if name_kwd is None and (cls_name is not None or bounds is not None): ans._name += str(ans.bounds()) else: ans = super(GlobalSet, cls).__new__(cls, *args, **kwds) if kwds: raise RuntimeError("Unexpected keyword arguments: %s" % (kwds,)) return ans + # # Global sets are assumed to be constant sets. For performance, # we will precompute and cache the Set bounds() and interval @@ -4205,95 +4389,136 @@ def get_interval(self): return _set -DeclareGlobalSet(_AnySet( - name='Any', - doc="A global Pyomo Set that admits any value", -), globals()) -DeclareGlobalSet(_AnyWithNoneSet( - name='AnyWithNone', - doc="A global Pyomo Set that admits any value", -), globals()) -DeclareGlobalSet(_EmptySet( - name='EmptySet', - doc="A global Pyomo Set that contains no members", -), globals()) - -DeclareGlobalSet(RangeSet( - name='Reals', - doc='A global Pyomo Set that admits any real (floating point) value', - ranges=(NumericRange(None,None,0),), -), globals()) -DeclareGlobalSet(RangeSet( - name='NonNegativeReals', - doc='A global Pyomo Set admitting any real value in [0, +inf]', - ranges=(NumericRange(0,None,0),), -), globals()) -DeclareGlobalSet(RangeSet( - name='NonPositiveReals', - doc='A global Pyomo Set admitting any real value in [-inf, 0]', - ranges=(NumericRange(None,0,0),), -), globals()) -DeclareGlobalSet(RangeSet( - name='NegativeReals', - doc='A global Pyomo Set admitting any real value in [-inf, 0)', - ranges=(NumericRange(None,0,0,(True,False)),), -), globals()) -DeclareGlobalSet(RangeSet( - name='PositiveReals', - doc='A global Pyomo Set admitting any real value in (0, +inf]', - ranges=(NumericRange(0,None,0,(False,True)),), -), globals()) - -DeclareGlobalSet(RangeSet( - name='Integers', - doc='A global Pyomo Set admitting any integer value', - ranges=(NumericRange(0,None,1), NumericRange(0,None,-1)), -), globals()) -DeclareGlobalSet(RangeSet( - name='NonNegativeIntegers', - doc='A global Pyomo Set admitting any integer value in [0, +inf]', - ranges=(NumericRange(0,None,1),), -), globals()) -DeclareGlobalSet(RangeSet( - name='NonPositiveIntegers', - doc='A global Pyomo Set admitting any integer value in [-inf, 0]', - ranges=(NumericRange(0,None,-1),), -), globals()) -DeclareGlobalSet(RangeSet( - name='NegativeIntegers', - doc='A global Pyomo Set admitting any integer value in [-inf, -1]', - ranges=(NumericRange(-1,None,-1),), -), globals()) -DeclareGlobalSet(RangeSet( - name='PositiveIntegers', - doc='A global Pyomo Set admitting any integer value in [1, +inf]', - ranges=(NumericRange(1,None,1),), -), globals()) - -DeclareGlobalSet(RangeSet( - name='Binary', - doc='A global Pyomo Set admitting the integers {0, 1}', - ranges=(NumericRange(0,1,1),), -), globals()) - -#TODO: Convert Boolean from an alias for Binary to a proper Boolean Set +DeclareGlobalSet( + _AnySet(name='Any', doc="A global Pyomo Set that admits any value"), globals() +) +DeclareGlobalSet( + _AnyWithNoneSet(name='AnyWithNone', doc="A global Pyomo Set that admits any value"), + globals(), +) +DeclareGlobalSet( + _EmptySet(name='EmptySet', doc="A global Pyomo Set that contains no members"), + globals(), +) + +DeclareGlobalSet( + RangeSet( + name='Reals', + doc='A global Pyomo Set that admits any real (floating point) value', + ranges=(NumericRange(None, None, 0),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='NonNegativeReals', + doc='A global Pyomo Set admitting any real value in [0, +inf]', + ranges=(NumericRange(0, None, 0),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='NonPositiveReals', + doc='A global Pyomo Set admitting any real value in [-inf, 0]', + ranges=(NumericRange(None, 0, 0),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='NegativeReals', + doc='A global Pyomo Set admitting any real value in [-inf, 0)', + ranges=(NumericRange(None, 0, 0, (True, False)),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='PositiveReals', + doc='A global Pyomo Set admitting any real value in (0, +inf]', + ranges=(NumericRange(0, None, 0, (False, True)),), + ), + globals(), +) + +DeclareGlobalSet( + RangeSet( + name='Integers', + doc='A global Pyomo Set admitting any integer value', + ranges=(NumericRange(0, None, 1), NumericRange(0, None, -1)), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='NonNegativeIntegers', + doc='A global Pyomo Set admitting any integer value in [0, +inf]', + ranges=(NumericRange(0, None, 1),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='NonPositiveIntegers', + doc='A global Pyomo Set admitting any integer value in [-inf, 0]', + ranges=(NumericRange(0, None, -1),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='NegativeIntegers', + doc='A global Pyomo Set admitting any integer value in [-inf, -1]', + ranges=(NumericRange(-1, None, -1),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='PositiveIntegers', + doc='A global Pyomo Set admitting any integer value in [1, +inf]', + ranges=(NumericRange(1, None, 1),), + ), + globals(), +) + +DeclareGlobalSet( + RangeSet( + name='Binary', + doc='A global Pyomo Set admitting the integers {0, 1}', + ranges=(NumericRange(0, 1, 1),), + ), + globals(), +) + +# TODO: Convert Boolean from an alias for Binary to a proper Boolean Set # admitting {True, False}) -DeclareGlobalSet(RangeSet( - name='Boolean', - doc='A global Pyomo Set admitting the integers {0, 1}', - ranges=(NumericRange(0,1,1),), -), globals()) - -DeclareGlobalSet(RangeSet( - name='PercentFraction', - doc='A global Pyomo Set admitting any real value in [0, 1]', - ranges=(NumericRange(0,1,0),), -), globals()) -DeclareGlobalSet(RangeSet( - name='UnitInterval', - doc='A global Pyomo Set admitting any real value in [0, 1]', - ranges=(NumericRange(0,1,0),), -), globals()) +DeclareGlobalSet( + RangeSet( + name='Boolean', + doc='A global Pyomo Set admitting the integers {0, 1}', + ranges=(NumericRange(0, 1, 1),), + ), + globals(), +) + +DeclareGlobalSet( + RangeSet( + name='PercentFraction', + doc='A global Pyomo Set admitting any real value in [0, 1]', + ranges=(NumericRange(0, 1, 0),), + ), + globals(), +) +DeclareGlobalSet( + RangeSet( + name='UnitInterval', + doc='A global Pyomo Set admitting any real value in [0, 1]', + ranges=(NumericRange(0, 1, 0),), + ), + globals(), +) # DeclareGlobalSet(Set( # initialize=[None], @@ -4302,14 +4527,29 @@ def get_interval(self): # ), globals()) -real_global_set_ids = set(id(_) for _ in ( - Reals, NonNegativeReals, NonPositiveReals, NegativeReals, PositiveReals, - PercentFraction, UnitInterval, -)) -integer_global_set_ids = set(id(_) for _ in ( - Integers, NonNegativeIntegers, NonPositiveIntegers, NegativeIntegers, - PositiveIntegers, Binary, -)) +real_global_set_ids = set( + id(_) + for _ in ( + Reals, + NonNegativeReals, + NonPositiveReals, + NegativeReals, + PositiveReals, + PercentFraction, + UnitInterval, + ) +) +integer_global_set_ids = set( + id(_) + for _ in ( + Integers, + NonNegativeIntegers, + NonPositiveIntegers, + NegativeIntegers, + PositiveIntegers, + Binary, + ) +) RealSet = Reals.__class__ IntegerSet = Integers.__class__ @@ -4322,15 +4562,21 @@ def get_interval(self): # classes (leveraging the new global RangeSet objects) # -@deprecated("RealInterval has been deprecated. Please use " - "RangeSet(lower, upper, 0)", version='5.7') + +@deprecated( + "RealInterval has been deprecated. Please use RangeSet(lower, upper, 0)", + version='5.7', +) class RealInterval(RealSet): def __new__(cls, **kwds): kwds.setdefault('class_name', 'RealInterval') return super(RealInterval, cls).__new__(RealSet, **kwds) -@deprecated("IntegerInterval has been deprecated. Please use " - "RangeSet(lower, upper, 1)", version='5.7') + +@deprecated( + "IntegerInterval has been deprecated. Please use RangeSet(lower, upper, 1)", + version='5.7', +) class IntegerInterval(IntegerSet): def __new__(cls, **kwds): kwds.setdefault('class_name', 'IntegerInterval') diff --git a/pyomo/core/base/set_types.py b/pyomo/core/base/set_types.py index 4f175e001b5..db9fe0f796c 100644 --- a/pyomo/core/base/set_types.py +++ b/pyomo/core/base/set_types.py @@ -10,11 +10,23 @@ # ___________________________________________________________________________ from pyomo.core.base.set import ( - Reals, PositiveReals, NonPositiveReals, NegativeReals, NonNegativeReals, - Integers, PositiveIntegers, NonPositiveIntegers, - NegativeIntegers, NonNegativeIntegers, - Boolean, Binary, - Any, AnyWithNone, EmptySet, UnitInterval, PercentFraction, - RealInterval, IntegerInterval, + Reals, + PositiveReals, + NonPositiveReals, + NegativeReals, + NonNegativeReals, + Integers, + PositiveIntegers, + NonPositiveIntegers, + NegativeIntegers, + NonNegativeIntegers, + Boolean, + Binary, + Any, + AnyWithNone, + EmptySet, + UnitInterval, + PercentFraction, + RealInterval, + IntegerInterval, ) - diff --git a/pyomo/core/base/sets.py b/pyomo/core/base/sets.py index cc055fac635..cbaad33c0b8 100644 --- a/pyomo/core/base/sets.py +++ b/pyomo/core/base/sets.py @@ -16,12 +16,20 @@ __all__ = ['Set', 'set_options', 'simple_set_rule', 'SetOf'] from .set import ( - process_setarg, set_options, simple_set_rule, - _SetDataBase, _SetData, Set, SetOf, IndexedSet, + process_setarg, + set_options, + simple_set_rule, + _SetDataBase, + _SetData, + Set, + SetOf, + IndexedSet, ) from pyomo.common.deprecation import deprecation_warning + deprecation_warning( 'The pyomo.core.base.sets module is deprecated. ' 'Import Set objects from pyomo.core.base.set or pyomo.core.', - version='5.7') + version='5.7', +) diff --git a/pyomo/core/base/sos.py b/pyomo/core/base/sos.py index 4d0b451f3f8..98cc9d28c8f 100644 --- a/pyomo/core/base/sos.py +++ b/pyomo/core/base/sos.py @@ -17,11 +17,13 @@ from pyomo.common.deprecation import RenamedClass from pyomo.common.log import is_debug_set from pyomo.common.timing import ConstructionTimer + from pyomo.core.base.misc import apply_indexed_rule from pyomo.core.base.component import ActiveComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.indexed_component import ( - ActiveIndexedComponent, UnindexedComponent_set + ActiveIndexedComponent, + UnindexedComponent_set, ) from pyomo.core.base.set_types import PositiveIntegers @@ -48,24 +50,12 @@ class _SOSConstraintData(ActiveComponentData): __slots__ = ('_variables', '_weights', '_level') def __init__(self, owner): - """ Constructor """ + """Constructor""" self._level = None self._variables = [] self._weights = [] ActiveComponentData.__init__(self, owner) - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - result = super(_SOSConstraintData, self).__getstate__() - for i in _SOSConstraintData.__slots__: - result[i] = getattr(self, i) - return result - - # Since this class requires no special processing of the state - # dictionary, it does not need to implement __setstate__() - def num_variables(self): return len(self._variables) @@ -82,8 +72,7 @@ def level(self): @level.setter def level(self, level): if level not in PositiveIntegers: - raise ValueError("SOS Constraint level must " - "be a positive integer") + raise ValueError("SOS Constraint level must be a positive integer") self._level = level @property @@ -108,64 +97,250 @@ def set_items(self, variables, weights): for v, w in zip(variables, weights): self._variables.append(v) if w < 0.0: - raise ValueError("Cannot set negative weight %f " - "for variable %s" % (w, v.name)) + raise ValueError( + "Cannot set negative weight %f for variable %s" % (w, v.name) + ) self._weights.append(w) @ModelComponentFactory.register("SOS constraint expressions.") class SOSConstraint(ActiveIndexedComponent): """ - Represents an SOS-n constraint. - - Usage: - model.C1 = SOSConstraint( - [...], - var=VAR, - [set=SET OR index=SET], - [sos=N OR level=N] - [weights=WEIGHTS] - ) - [...] Any number of sets used to index SET - VAR The set of variables making up the SOS. Indexed by SET. - SET The set used to index VAR. SET is optionally indexed by - the [...] sets. If SET is not specified, VAR is indexed - over the set(s) it was defined with. - N This constraint is an SOS-N constraint. Defaults to 1. - WEIGHTS A Param representing the variables weights in the SOS sets. - A simple counter is used to generate weights when this keyword - is not used. - - Example: - - model = AbstractModel() - model.A = Set() - model.B = Set(A) - model.X = Set(B) - - model.C1 = SOSConstraint(model.A, var=model.X, set=model.B, sos=1) - - This constraint actually creates one SOS-1 constraint for each - element of model.A (e.g., if |A| == N, there are N constraints). - In each constraint, model.X is indexed by the elements of - model.B[a], where 'a' is the current index of model.A. - - model = AbstractModel() - model.A = Set() - model.X = Var(model.A) - - model.C2 = SOSConstraint(var=model.X, sos=2) - - This produces exactly one SOS-2 constraint using all the variables - in model.X. + Implements constraints for special ordered sets (SOS). + + Parameters + ---------- + sos : int + The type of SOS. + var : pyomo.environ.Var + The group of variables from which the SOS(s) will be created. + index : pyomo.environ.Set, list or dict, optional + A data structure with the indexes for the variables that are to be + members of the SOS(s). The indexes can be provided as a pyomo Set: + either indexed, if the SOS is indexed; or non-indexed, otherwise. + Alternatively, the indexes can be provided as a list, for a non-indexed + SOS, or as a dict, for indexed SOS(s). + weights : pyomo.environ.Param or dict, optional + A data structure with the weights for each member of the SOS(s). These + can be provided as pyomo Param or as a dict. If not provided, the + weights will be determined automatically using the var index set. + rule : optional + A method returning a 2-tuple with lists of variables and the respective + weights in the same order, or a list of variables whose weights are + then determined from their position within the list or, alternatively, + pyomo.environ.Constraint.Skip if the constraint should be not be + included in the model/instance. This parameter cannot be used in + combination with var, index or weights. + + Examples + ------- + + 1 - An SOS of type **N** made up of all members of a pyomo Var component: + + >>> # import pyomo + >>> import pyomo.environ as pyo + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A) + >>> # the sos constraint + >>> model.mysos = pyo.SOSConstraint(var=model.x, sos=N) + + 2 - An SOS of type **N** made up of all members of a pyomo Var component, + each with a specific weight: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A) + >>> # the weights for each variable used in the sos constraints + >>> model.mysosweights = pyo.Param(model.A) + >>> # the sos constraint + >>> model.mysos = pyo.SOSConstraint( + ... var=model.x, + ... sos=N, + ... weights=model.mysosweights + ... ) + + 3 - An SOS of type **N** made up of selected members of a Var component: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A) + >>> # the set that indexes the variables actually used in the constraint + >>> model.B = pyo.Set(within=model.A) + >>> # the sos constraint + >>> model.mysos = pyo.SOSConstraint(var=model.x, sos=N, index=model.B) + + 4 - An SOS of type **N** made up of selected members of a Var component, + each with a specific weight: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A) + >>> # the set that indexes the variables actually used in the constraint + >>> model.B = pyo.Set(within=model.A) + >>> # the weights for each variable used in the sos constraints + >>> model.mysosweights = pyo.Param(model.B) + >>> # the sos constraint + >>> model.mysos = pyo.SOSConstraint( + ... var=model.x, + ... sos=N, + ... index=model.B, + ... weights=model.mysosweights + ... ) + + 5 - A set of SOS(s) of type **N** made up of members of a pyomo Var + component: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A) + >>> # the set indexing the sos constraints + >>> model.B = pyo.Set() + >>> # the sets containing the variable indexes for each constraint + >>> model.mysosvarindexset = pyo.Set(model.B) + >>> # the sos constraints + >>> model.mysos = pyo.SOSConstraint( + ... model.B, + ... var=model.x, + ... sos=N, + ... index=model.mysosvarindexset + ... ) + + 6 - A set of SOS(s) of type **N** made up of members of a pyomo Var + component, each with a specific weight: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A) + >>> # the set indexing the sos constraints + >>> model.B = pyo.Set() + >>> # the sets containing the variable indexes for each constraint + >>> model.mysosvarindexset = pyo.Set(model.B) + >>> # the set that indexes the variables used in the sos constraints + >>> model.C = pyo.Set(within=model.A) + >>> # the weights for each variable used in the sos constraints + >>> model.mysosweights = pyo.Param(model.C) + >>> # the sos constraints + >>> model.mysos = pyo.SOSConstraint( + ... model.B, + ... var=model.x, + ... sos=N, + ... index=model.mysosvarindexset, + ... weights=model.mysosweights, + ... ) + + 7 - A simple SOS of type **N** created using the rule parameter: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A, domain=pyo.NonNegativeReals) + >>> # the rule method creating the constraint + >>> def rule_mysos(m): + ... var_list = [m.x[a] for a in m.x] + ... weight_list = [i+1 for i in range(len(var_list))] + ... return (var_list, weight_list) + >>> # the sos constraint(s) + >>> model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=N) + + 8 - A simple SOS of type **N** created using the rule parameter, in which + the weights are determined automatically: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the variables + >>> model.A = pyo.Set() + >>> # the variables under consideration + >>> model.x = pyo.Var(model.A, domain=pyo.NonNegativeReals) + >>> # the rule method creating the constraint + >>> def rule_mysos(m): + ... return [m.x[a] for a in m.x] + >>> # the sos constraint(s) + >>> model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=N) + + 9 - A set of SOS(s) of type **N** involving members of distinct pyomo Var + components, each with a specific weight. This requires the rule parameter: + + >>> # declare the model + >>> model = pyo.AbstractModel() + >>> # define the SOS type + >>> N = 1 # 2, 3, ... + >>> # the set that indexes the x variables + >>> model.A = pyo.Set() + >>> # the set that indexes the y variables + >>> model.B = pyo.Set() + >>> # the set that indexes the SOS constraints + >>> model.C = pyo.Set() + >>> # the x variables, which will be used in the constraints + >>> model.x = pyo.Var(model.A, domain=pyo.NonNegativeReals) + >>> # the y variables, which will be used in the constraints + >>> model.y = pyo.Var(model.B, domain=pyo.NonNegativeReals) + >>> # the x variable indices for each constraint + >>> model.mysosindex_x = pyo.Set(model.C) + >>> # the y variable indices for each constraint + >>> model.mysosindex_y = pyo.Set(model.C) + >>> # the weights for the x variable indices + >>> model.mysosweights_x = pyo.Param(model.A) + >>> # the weights for the y variable indices + >>> model.mysosweights_y = pyo.Param(model.B) + >>> # the rule method with which each constraint c is built + >>> def rule_mysos(m, c): + ... var_list = [m.x[a] for a in m.mysosindex_x[c]] + ... var_list.extend([m.y[b] for b in m.mysosindex_y[c]]) + ... weight_list = [m.mysosweights_x[a] for a in m.mysosindex_x[c]] + ... weight_list.extend([m.mysosweights_y[b] for b in m.mysosindex_y[c]]) + ... return (var_list, weight_list) + >>> # the sos constraint(s) + >>> model.mysos = pyo.SOSConstraint( + ... model.C, + ... rule=rule_mysos, + ... sos=N + ... ) + """ - Skip = (1000,) + Skip = (1000,) def __new__(cls, *args, **kwds): if cls != SOSConstraint: return super(SOSConstraint, cls).__new__(cls) - if not args or (args[0] is UnindexedComponent_set and len(args)==1): + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): return ScalarSOSConstraint.__new__(ScalarSOSConstraint) else: return IndexedSOSConstraint.__new__(IndexedSOSConstraint) @@ -181,17 +356,25 @@ def __init__(self, *args, **kwargs): initialize = kwargs.pop('rule', initialize) if not initialize is None: if 'var' in kwargs: - raise TypeError("Cannot specify the 'var' argument with the 'rule' or 'initialize' argument") + raise TypeError( + "Cannot specify the 'var' argument with the 'rule' or 'initialize' argument" + ) if 'index' in kwargs: - raise TypeError("Cannot specify the 'index' argument with the 'rule' or 'initialize' argument") + raise TypeError( + "Cannot specify the 'index' argument with the 'rule' or 'initialize' argument" + ) if 'weights' in kwargs: - raise TypeError("Cannot specify the 'weights' argument with the 'rule' or 'initialize' argument") + raise TypeError( + "Cannot specify the 'weights' argument with the 'rule' or 'initialize' argument" + ) # # The 'var' argument # sosVars = kwargs.pop('var', None) if sosVars is None and initialize is None: - raise TypeError("SOSConstraint() requires either the 'var' or 'initialize' arguments") + raise TypeError( + "SOSConstraint() requires either the 'var' or 'initialize' arguments" + ) # # The 'weights' argument # @@ -204,14 +387,18 @@ def __init__(self, *args, **kwargs): # The 'sos' or 'level' argument # if 'sos' in kwargs and 'level' in kwargs: - raise TypeError("Specify only one of 'sos' and 'level' -- " \ - "they are equivalent keyword arguments") + raise TypeError( + "Specify only one of 'sos' and 'level' -- " + "they are equivalent keyword arguments" + ) sosLevel = kwargs.pop('sos', None) sosLevel = kwargs.pop('level', sosLevel) if sosLevel is None: - raise TypeError("SOSConstraint() requires that either the " \ - "'sos' or 'level' keyword arguments be set to indicate " \ - "the type of SOS.") + raise TypeError( + "SOSConstraint() requires that either the " + "'sos' or 'level' keyword arguments be set to indicate " + "the type of SOS." + ) # # Set attributes # @@ -230,24 +417,30 @@ def construct(self, data=None): """ Construct this component """ - assert data is None # because I don't know why it's an argument + assert data is None # because I don't know why it's an argument generate_debug_messages = is_debug_set(logger) - if self._constructed is True: #pragma:nocover + if self._constructed is True: # pragma:nocover return - if generate_debug_messages: #pragma:nocover - logger.debug("Constructing SOSConstraint %s",self.name) + if generate_debug_messages: # pragma:nocover + logger.debug("Constructing SOSConstraint %s", self.name) timer = ConstructionTimer(self) self._constructed = True if self._rule is None: if self._sosSet is None and self.is_indexed(): - if generate_debug_messages: #pragma:nocover - logger.debug(" Cannot construct "+self.name+". No rule is defined and no SOS sets are defined.") + if generate_debug_messages: # pragma:nocover + logger.debug( + " Cannot construct " + + self.name + + ". No rule is defined and no SOS sets are defined." + ) else: if not self.is_indexed(): if self._sosSet is None: - if getattr(self._sosVars.index_set(), 'isordered', lambda *x: False)(): + if getattr( + self._sosVars.index_set(), 'isordered', lambda *x: False + )(): _sosSet = {None: list(self._sosVars.index_set())} else: _sosSet = {None: set(self._sosVars.index_set())} @@ -257,20 +450,28 @@ def construct(self, data=None): _sosSet = self._sosSet for index, sosSet in _sosSet.items(): - if generate_debug_messages: #pragma:nocover - logger.debug(" Constructing "+self.name+" index "+str(index)) + if generate_debug_messages: # pragma:nocover + logger.debug( + " Constructing " + self.name + " index " + str(index) + ) if self._sosLevel == 2: # # Check that the sets are ordered. # - ordered=False - if type(sosSet) is list or sosSet is UnindexedComponent_set or len(sosSet) == 1: - ordered=True + ordered = False + if ( + type(sosSet) is list + or sosSet is UnindexedComponent_set + or len(sosSet) == 1 + ): + ordered = True if hasattr(sosSet, 'isordered') and sosSet.isordered(): - ordered=True + ordered = True if not ordered: - raise ValueError("Cannot define a SOS over an unordered index.") + raise ValueError( + "Cannot define a SOS over an unordered index." + ) variables = [self._sosVars[idx] for idx in sosSet] if self._sosWeights is not None: @@ -290,10 +491,14 @@ def construct(self, data=None): logger.error( "Rule failed when generating expression for " "sos constraint %s with index %s:\n%s: %s" - % ( self.name, str(index), type(err).__name__, err ) ) + % (self.name, str(index), type(err).__name__, err) + ) raise if tmp is None: - raise ValueError("SOSConstraint rule returned None instead of SOSConstraint.Skip for index %s" % str(index)) + raise ValueError( + "SOSConstraint rule returned None instead of SOSConstraint.Skip for index %s" + % str(index) + ) if type(tmp) is tuple: if tmp is SOSConstraint.Skip: continue @@ -319,7 +524,7 @@ def add(self, index, variables, weights=None): soscondata.level = self._sosLevel if weights is None: - soscondata.set_items(variables, list(range(1, len(variables)+1))) + soscondata.set_items(variables, list(range(1, len(variables) + 1))) else: soscondata.set_items(variables, weights) @@ -328,33 +533,25 @@ def pprint(self, ostream=None, verbose=False, prefix=""): """TODO""" if ostream is None: ostream = sys.stdout - ostream.write(" "+self.local_name+" : ") + ostream.write(" " + self.local_name + " : ") if not self.doc is None: - ostream.write(self.doc+'\n') + ostream.write(self.doc + '\n') ostream.write(" ") - ostream.write("\tSize="+str(len(self._data.keys()))+' ') + ostream.write("\tSize=" + str(len(self._data.keys())) + ' ') if self.is_indexed(): - ostream.write("\tIndex= "+self._index_set.name+'\n') + ostream.write("\tIndex= " + self._index_set.name + '\n') else: ostream.write("\n") for val in self._data: if not val is None: - ostream.write("\t"+str(val)+'\n') - ostream.write("\t\tType="+str(self._data[val].level)+'\n') + ostream.write("\t" + str(val) + '\n') + ostream.write("\t\tType=" + str(self._data[val].level) + '\n') ostream.write("\t\tWeight : Variable\n") for var, weight in self._data[val].get_items(): - ostream.write("\t\t"+str(weight)+' : '+var.name+'\n') - + ostream.write("\t\t" + str(weight) + ' : ' + var.name + '\n') -# Since this class derives from Component and Component.__getstate__ -# just packs up the entire __dict__ into the state dict, there s -# nothing special that we need to do here. We will just defer to the -# super() get/set state. Since all of our get/set state methods -# rely on super() to traverse the MRO, this will automatically pick -# up both the Component and Data base classes. class ScalarSOSConstraint(SOSConstraint, _SOSConstraintData): - def __init__(self, *args, **kwd): _SOSConstraintData.__init__(self, self) SOSConstraint.__init__(self, *args, **kwd) @@ -367,7 +564,5 @@ class SimpleSOSConstraint(metaclass=RenamedClass): class IndexedSOSConstraint(SOSConstraint): - def __init__(self, *args, **kwds): - super(IndexedSOSConstraint,self).__init__(*args, **kwds) - + super(IndexedSOSConstraint, self).__init__(*args, **kwds) diff --git a/pyomo/core/base/suffix.py b/pyomo/core/base/suffix.py index 0bd2f9ae675..b0c3c8fdb22 100644 --- a/pyomo/core/base/suffix.py +++ b/pyomo/core/base/suffix.py @@ -9,9 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ('Suffix', - 'active_export_suffix_generator', - 'active_import_suffix_generator') +__all__ = ('Suffix', 'active_export_suffix_generator', 'active_import_suffix_generator') import logging from pyomo.common.pyomo_typing import overload @@ -39,79 +37,85 @@ def active_export_suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix, active=True).items(): if suffix.export_enabled() is True: yield name, suffix else: for name, suffix in a_block.component_map(Suffix, active=True).items(): - if (suffix.export_enabled() is True) and \ - (suffix.get_datatype() is datatype): + if (suffix.export_enabled() is True) and ( + suffix.get_datatype() is datatype + ): yield name, suffix def export_suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix).items(): if suffix.export_enabled() is True: yield name, suffix else: for name, suffix in a_block.component_map(Suffix).items(): - if (suffix.export_enabled() is True) and \ - (suffix.get_datatype() is datatype): + if (suffix.export_enabled() is True) and ( + suffix.get_datatype() is datatype + ): yield name, suffix def active_import_suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix, active=True).items(): if suffix.import_enabled() is True: yield name, suffix else: for name, suffix in a_block.component_map(Suffix, active=True).items(): - if (suffix.import_enabled() is True) and \ - (suffix.get_datatype() is datatype): + if (suffix.import_enabled() is True) and ( + suffix.get_datatype() is datatype + ): yield name, suffix def import_suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix).items(): if suffix.import_enabled() is True: yield name, suffix else: for name, suffix in a_block.component_map(Suffix).items(): - if (suffix.import_enabled() is True) and \ - (suffix.get_datatype() is datatype): + if (suffix.import_enabled() is True) and ( + suffix.get_datatype() is datatype + ): yield name, suffix def active_local_suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix, active=True).items(): if suffix.get_direction() is Suffix.LOCAL: yield name, suffix else: for name, suffix in a_block.component_map(Suffix, active=True).items(): - if (suffix.get_direction() is Suffix.LOCAL) and \ - (suffix.get_datatype() is datatype): + if (suffix.get_direction() is Suffix.LOCAL) and ( + suffix.get_datatype() is datatype + ): yield name, suffix def local_suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix).items(): if suffix.get_direction() is Suffix.LOCAL: yield name, suffix else: for name, suffix in a_block.component_map(Suffix).items(): - if (suffix.get_direction() is Suffix.LOCAL) and \ - (suffix.get_datatype() is datatype): + if (suffix.get_direction() is Suffix.LOCAL) and ( + suffix.get_datatype() is datatype + ): yield name, suffix def active_suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix, active=True).items(): yield name, suffix else: @@ -121,7 +125,7 @@ def active_suffix_generator(a_block, datatype=False): def suffix_generator(a_block, datatype=False): - if (datatype is False): + if datatype is False: for name, suffix in a_block.component_map(Suffix).items(): yield name, suffix else: @@ -129,9 +133,6 @@ def suffix_generator(a_block, datatype=False): if suffix.get_datatype() is datatype: yield name, suffix -# Note: The order of inheritance here is important so that -# __setstate__ works correctly on the ActiveComponent base class. - @ModelComponentFactory.register("Declare a container for extraneous model data") class Suffix(ComponentMap, ActiveComponent): @@ -158,24 +159,32 @@ class Suffix(ComponentMap, ActiveComponent): IMPORT_EXPORT = 3 # both SuffixDirections = (LOCAL, EXPORT, IMPORT, IMPORT_EXPORT) - SuffixDirectionToStr = {LOCAL: 'Suffix.LOCAL', - EXPORT: 'Suffix.EXPORT', - IMPORT: 'Suffix.IMPORT', - IMPORT_EXPORT: 'Suffix.IMPORT_EXPORT'} + SuffixDirectionToStr = { + LOCAL: 'Suffix.LOCAL', + EXPORT: 'Suffix.EXPORT', + IMPORT: 'Suffix.IMPORT', + IMPORT_EXPORT: 'Suffix.IMPORT_EXPORT', + } # Suffix Datatypes FLOAT = 4 INT = 0 SuffixDatatypes = (FLOAT, INT, None) - SuffixDatatypeToStr = {FLOAT: 'Suffix.FLOAT', - INT: 'Suffix.INT', - None: str(None)} + SuffixDatatypeToStr = {FLOAT: 'Suffix.FLOAT', INT: 'Suffix.INT', None: str(None)} @overload - def __init__(self, *, direction=LOCAL, datatype=FLOAT, - initialize=None, rule=None, name=None, doc=None): ... + def __init__( + self, + *, + direction=LOCAL, + datatype=FLOAT, + initialize=None, + rule=None, + name=None, + doc=None + ): + ... def __init__(self, **kwds): - # Suffix type information self._direction = None self._datatype = None @@ -205,14 +214,6 @@ def __init__(self, **kwds): if self._rule is None: self.construct() - def __setstate__(self, state): - """ - This method must be defined for deepcopy/pickling because this - class relies on component ids. - """ - ActiveComponent.__setstate__(self, state) - ComponentMap.__setstate__(self, state) - def construct(self, data=None): """ Constructs this component, applying rule if it exists. @@ -230,26 +231,26 @@ def construct(self, data=None): self.update_values(self._rule(self._parent())) timer.report() - @property def datatype(self): """Return the suffix datatype.""" return self._datatype + @datatype.setter def datatype(self, datatype): """Set the suffix datatype.""" if datatype not in self.SuffixDatatypeToStr: raise ValueError( "Suffix datatype must be one of: %s. \n" - "Value given: %s" - % (list(self.SuffixDatatypeToStr.values()), - datatype)) + "Value given: %s" % (list(self.SuffixDatatypeToStr.values()), datatype) + ) self._datatype = datatype @property def direction(self): """Return the suffix direction.""" return self._direction + @direction.setter def direction(self, direction): """Set the suffix direction.""" @@ -257,12 +258,14 @@ def direction(self, direction): raise ValueError( "Suffix direction must be one of: %s. \n" "Value given: %s" - % (list(self.SuffixDirectionToStr.values()), - direction)) + % (list(self.SuffixDirectionToStr.values()), direction) + ) self._direction = direction - @deprecated('Suffix.exportEnabled is replaced with Suffix.export_enabled.', - version='4.1.10486') + @deprecated( + 'Suffix.exportEnabled is replaced with Suffix.export_enabled.', + version='4.1.10486', + ) def exportEnabled(self): return self.export_enabled() @@ -273,8 +276,10 @@ def export_enabled(self): """ return bool(self._direction & Suffix.EXPORT) - @deprecated('Suffix.importEnabled is replaced with Suffix.import_enabled.', - version='4.1.10486') + @deprecated( + 'Suffix.importEnabled is replaced with Suffix.import_enabled.', + version='4.1.10486', + ) def importEnabled(self): return self.import_enabled() @@ -285,8 +290,10 @@ def import_enabled(self): """ return bool(self._direction & Suffix.IMPORT) - @deprecated('Suffix.updateValues is replaced with Suffix.update_values.', - version='4.1.10486') + @deprecated( + 'Suffix.updateValues is replaced with Suffix.update_values.', + version='4.1.10486', + ) def updateValues(self, data, expand=True): return self.update_values(data, expand) @@ -297,7 +304,6 @@ def update_values(self, data, expand=True): set_value on every component. """ if expand: - try: items = data.items() except AttributeError: @@ -307,12 +313,12 @@ def update_values(self, data, expand=True): self.set_value(component, value, expand=expand) else: - # As implemented by MutableMapping self.update(data) - @deprecated('Suffix.setValue is replaced with Suffix.set_value.', - version='4.1.10486') + @deprecated( + 'Suffix.setValue is replaced with Suffix.set_value.', version='4.1.10486' + ) def setValue(self, component, value, expand=True): return self.set_value(component, value, expand) @@ -333,8 +339,10 @@ def set_value(self, component, value, expand=True): else: self[component] = value - @deprecated('Suffix.setAllValues is replaced with Suffix.set_all_values.', - version='4.1.10486') + @deprecated( + 'Suffix.setAllValues is replaced with Suffix.set_all_values.', + version='4.1.10486', + ) def setAllValues(self, value): return self.set_all_values(value) @@ -345,8 +353,9 @@ def set_all_values(self, value): for ndx in self: self[ndx] = value - @deprecated('Suffix.clearValue is replaced with Suffix.clear_value.', - version='4.1.10486') + @deprecated( + 'Suffix.clearValue is replaced with Suffix.clear_value.', version='4.1.10486' + ) def clearValue(self, component, expand=True): return self.clear_value(component, expand) @@ -366,9 +375,10 @@ def clear_value(self, component, expand=True): except KeyError: pass - @deprecated('Suffix.clearAllValues is replaced with ' - 'Suffix.clear_all_values.', - version='4.1.10486') + @deprecated( + 'Suffix.clearAllValues is replaced with Suffix.clear_all_values.', + version='4.1.10486', + ) def clearAllValues(self): return self.clear_all_values() @@ -378,8 +388,9 @@ def clear_all_values(self): """ self.clear() - @deprecated('Suffix.setDatatype is replaced with Suffix.set_datatype.', - version='4.1.10486') + @deprecated( + 'Suffix.setDatatype is replaced with Suffix.set_datatype.', version='4.1.10486' + ) def setDatatype(self, datatype): return self.set_datatype(datatype) @@ -388,14 +399,16 @@ def set_datatype(self, datatype): Set the suffix datatype. """ if datatype not in self.SuffixDatatypes: - raise ValueError("Suffix datatype must be one of: %s. \n" - "Value given: %s" - % (list(Suffix.SuffixDatatypeToStr.values()), - datatype)) + raise ValueError( + "Suffix datatype must be one of: %s. \n" + "Value given: %s" + % (list(Suffix.SuffixDatatypeToStr.values()), datatype) + ) self._datatype = datatype - @deprecated('Suffix.getDatatype is replaced with Suffix.get_datatype.', - version='4.1.10486') + @deprecated( + 'Suffix.getDatatype is replaced with Suffix.get_datatype.', version='4.1.10486' + ) def getDatatype(self): return self.get_datatype() @@ -405,8 +418,10 @@ def get_datatype(self): """ return self._datatype - @deprecated('Suffix.setDirection is replaced with Suffix.set_direction.', - version='4.1.10486') + @deprecated( + 'Suffix.setDirection is replaced with Suffix.set_direction.', + version='4.1.10486', + ) def setDirection(self, direction): return self.set_direction(direction) @@ -415,14 +430,17 @@ def set_direction(self, direction): Set the suffix direction. """ if direction not in self.SuffixDirections: - raise ValueError("Suffix direction must be one of: %s. \n" - "Value given: %s" - % (list(self.SuffixDirectionToStr.values()), - direction)) + raise ValueError( + "Suffix direction must be one of: %s. \n" + "Value given: %s" + % (list(self.SuffixDirectionToStr.values()), direction) + ) self._direction = direction - @deprecated('Suffix.getDirection is replaced with Suffix.get_direction.', - version='4.1.10486') + @deprecated( + 'Suffix.getDirection is replaced with Suffix.get_direction.', + version='4.1.10486', + ) def getDirection(self): return self.get_direction() @@ -444,18 +462,20 @@ def __str__(self): def _pprint(self): return ( - [('Direction', self.SuffixDirectionToStr[self._direction]), - ('Datatype', self.SuffixDatatypeToStr[self._datatype]), - ], + [ + ('Direction', self.SuffixDirectionToStr[self._direction]), + ('Datatype', self.SuffixDatatypeToStr[self._datatype]), + ], ((str(k), v) for k, v in self._dict.values()), ("Value",), - lambda k, v: [v] + lambda k, v: [v], ) # TODO: delete - @deprecated('Suffix.getValue is replaced with ' - 'the dict-interface method Suffix.get.', - version='4.1.10486') + @deprecated( + 'Suffix.getValue is replaced with the dict-interface method Suffix.get.', + version='4.1.10486', + ) def getValue(self, component, *args): """ Returns the current value of this suffix for the specified @@ -465,9 +485,11 @@ def getValue(self, component, *args): return self.get(component, *args) # TODO: delete - @deprecated('Suffix.extractValues() is replaced with ' - 'the dict-interface method Suffix.items().', - version='4.1.10486') + @deprecated( + 'Suffix.extractValues() is replaced with ' + 'the dict-interface method Suffix.items().', + version='4.1.10486', + ) def extractValues(self): """ Extract all data stored on this Suffix into a list of @@ -499,4 +521,3 @@ def __eq__(self, other): def __ne__(self, other): """Not implemented.""" raise NotImplementedError("Suffix components are not comparable") - diff --git a/pyomo/core/base/symbol_map.py b/pyomo/core/base/symbol_map.py index 8638f78a6a1..e4e7f9d781c 100644 --- a/pyomo/core/base/symbol_map.py +++ b/pyomo/core/base/symbol_map.py @@ -12,6 +12,8 @@ from pyomo.core.expr.symbol_map import SymbolMap from pyomo.core.base.label import TextLabeler + + def symbol_map_from_instance(instance): """ Create a symbol map from an instance using name-based labelers. @@ -24,7 +26,7 @@ def symbol_map_from_instance(instance): # Recursively iterate over all variables # for varvalue in instance.component_data_objects(Var, active=True): - symbol_map.getSymbol(varvalue, labeler) + symbol_map.getSymbol(varvalue, labeler) # # Recursively iterate over all constraints # diff --git a/pyomo/core/base/symbolic.py b/pyomo/core/base/symbolic.py index 5c8ea4ecce1..3fa5c168207 100644 --- a/pyomo/core/base/symbolic.py +++ b/pyomo/core/base/symbolic.py @@ -15,10 +15,14 @@ from pyomo.common.errors import NondifferentiableError -@deprecated(msg=('The differentiate function in pyomo.core.base.symbolic has been deprecated. Please use the ' + - 'differentiate function in pyomo.core.expr.'), - version='5.6.7', - remove_in='5.7') +@deprecated( + msg=( + 'The differentiate function in pyomo.core.base.symbolic has been deprecated. Please use the ' + + 'differentiate function in pyomo.core.expr.' + ), + version='5.6.7', + remove_in='5.7', +) def differentiate(expr, wrt=None, wrt_list=None): """Return derivative of expression. @@ -35,4 +39,6 @@ def differentiate(expr, wrt=None, wrt_list=None): Expression or list of Expression objects """ - return diff_core.differentiate(expr=expr, wrt=wrt, wrt_list=wrt_list, mode=diff_core.Modes.sympy) + return diff_core.differentiate( + expr=expr, wrt=wrt, wrt_list=wrt_list, mode=diff_core.Modes.sympy + ) diff --git a/pyomo/core/base/template_expr.py b/pyomo/core/base/template_expr.py index 115908b4fa7..f8ff345a1e5 100644 --- a/pyomo/core/base/template_expr.py +++ b/pyomo/core/base/template_expr.py @@ -10,11 +10,15 @@ # ___________________________________________________________________________ from pyomo.core.expr.template_expr import ( - IndexTemplate, _GetItemIndexer, TemplateExpressionError + IndexTemplate, + _GetItemIndexer, + TemplateExpressionError, ) from pyomo.common.deprecation import deprecation_warning + deprecation_warning( 'The pyomo.core.base.template_expr module is deprecated. ' 'Import expression template objects from pyomo.core.expr.template_expr.', - version='5.7') + version='5.7', +) diff --git a/pyomo/core/base/transformation.py b/pyomo/core/base/transformation.py index c2e852a76b3..ee240a07800 100644 --- a/pyomo/core/base/transformation.py +++ b/pyomo/core/base/transformation.py @@ -14,7 +14,10 @@ from pyomo.common.modeling import unique_component_name from pyomo.common.timing import TransformationTimer -class TransformationInfo(object): pass + +class TransformationInfo(object): + pass + class TransformationData(object): """ @@ -34,9 +37,10 @@ class Transformation(object): """ Base class for all model transformations. """ + def __init__(self, **kwds): kwds["name"] = kwds.get("name", "transformation") - #super(Transformation, self).__init__(**kwds) + # super(Transformation, self).__init__(**kwds) # # Support "with" statements. @@ -52,7 +56,8 @@ def __exit__(self, t, v, traceback): "Transformation.apply_to() for in-place transformations or " "Transformation.create_using() for transformations that create a " "new, independent transformed model instance.", - version='4.3.11323') + version='4.3.11323', + ) def apply(self, model, **kwds): inplace = kwds.pop('inplace', True) if inplace: @@ -82,8 +87,7 @@ def create_using(self, model, **kwds): return new_model def _apply_to(self, model, **kwds): - raise RuntimeError( - "The Transformation.apply_to method is not implemented.") + raise RuntimeError("The Transformation.apply_to method is not implemented.") def _create_using(self, model, **kwds): # Put all the kwds onto the model so that when we clone the @@ -103,6 +107,7 @@ def _create_using(self, model, **kwds): TransformationFactory = Factory('transformation type') + @deprecated(version='4.3.11323') def apply_transformation(*args, **kwds): if len(args) == 0: @@ -110,5 +115,5 @@ def apply_transformation(*args, **kwds): xfrm = TransformationFactory(args[0]) if len(args) == 1 or xfrm is None: return xfrm - tmp=(args[1],) + tmp = (args[1],) return xfrm.apply(*tmp, **kwds) diff --git a/pyomo/core/base/units_container.py b/pyomo/core/base/units_container.py index a94e5099d4d..79386547a43 100644 --- a/pyomo/core/base/units_container.py +++ b/pyomo/core/base/units_container.py @@ -114,23 +114,34 @@ from pyomo.common.dependencies import attempt_import from pyomo.common.modeling import NOTSET from pyomo.core.expr.numvalue import ( - NumericValue, nonpyomo_leaf_types, value, native_types, - native_numeric_types, pyomo_constant_types, + NumericValue, + nonpyomo_leaf_types, + value, + native_types, + native_numeric_types, + pyomo_constant_types, ) from pyomo.core.expr.template_expr import IndexTemplate from pyomo.core.expr.visitor import ExpressionValueVisitor -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR pint_module, pint_available = attempt_import( - 'pint', defer_check=True, error_message='The "pint" package failed ' - 'to import. This package is necessary to use Pyomo units.') + 'pint', + defer_check=True, + error_message=( + 'The "pint" package failed to import. ' + 'This package is necessary to use Pyomo units.' + ), +) logger = logging.getLogger(__name__) + class UnitsError(Exception): """ An exception class for all general errors/warnings associated with units """ + def __init__(self, msg): self.msg = msg @@ -144,11 +155,45 @@ class InconsistentUnitsError(UnitsError): E.g., x == y, where x is in units of kg and y is in units of meter """ + def __init__(self, exp1, exp2, msg): - msg = '{}: {} not compatible with {}.'.format(str(msg), str(exp1), str(exp2)) + msg = f'{msg}: {exp1} not compatible with {exp2}.' super(InconsistentUnitsError, self).__init__(msg) +def _pint_unit_mapper(encode, val): + if encode: + return str(val) + else: + return units._pint_registry(val).units + + +def _pint_registry_mapper(encode, val): + if encode: + if val is not units._pint_registry: + # FIXME: we currently will not correctly unpickle units + # associated with a unit manager other than the default + # singleton. If we wanted to support this, we would need to + # do something like create a global units manager registry + # that would associate each unit manager with a name. We + # could then pickle that name and then attempt to restore + # the association with the original units manager. As we + # expect all users to just use the global default, for the + # time being we will just issue a warning that things may + # break. + logger.warning( + "pickling a _PyomoUnit associated with a PyomoUnitsContainer " + "that is not the default singleton (%s.units). Restoring " + "this state will attempt to return a unit associated with " + "the default singleton." % (__name__,) + ) + return None + elif val is None: + return units._pint_registry + else: + return val + + class _PyomoUnit(NumericValue): """An object that represents a single unit in Pyomo (e.g., kg, meter) @@ -157,7 +202,12 @@ class _PyomoUnit(NumericValue): This module contains a global PyomoUnitsContainer object :py:data:`units`. See module documentation for more information. """ + __slots__ = ('_pint_unit', '_pint_registry') + __autoslot_mappers__ = { + '_pint_unit': _pint_unit_mapper, + '_pint_registry': _pint_registry_mapper, + } def __init__(self, pint_unit, pint_registry): super(_PyomoUnit, self).__init__() @@ -167,11 +217,11 @@ def __init__(self, pint_unit, pint_registry): self._pint_registry = pint_registry def _get_pint_unit(self): - """ Return the pint unit corresponding to this Pyomo unit. """ + """Return the pint unit corresponding to this Pyomo unit.""" return self._pint_unit def _get_pint_registry(self): - """ Return the pint registry (pint.UnitRegistry) object used to create this unit. """ + """Return the pint registry (pint.UnitRegistry) object used to create this unit.""" return self._pint_registry def getname(self, fully_qualified=False, name_buffer=None): @@ -225,11 +275,11 @@ def is_fixed(self): return True def is_parameter_type(self): - """ This is not a parameter type (overloaded from NumericValue) """ + """This is not a parameter type (overloaded from NumericValue)""" return False def is_variable_type(self): - """ This is not a variable type (overloaded from NumericValue) """ + """This is not a variable type (overloaded from NumericValue)""" return False def is_potentially_variable(self): @@ -240,57 +290,27 @@ def is_potentially_variable(self): return False def is_named_expression_type(self): - """ This is not a named expression (overloaded from NumericValue) """ + """This is not a named expression (overloaded from NumericValue)""" return False - def is_expression_type(self): - """ This is a leaf, not an expression (overloaded from NumericValue) """ + def is_expression_type(self, expression_system=None): + """This is a leaf, not an expression (overloaded from NumericValue)""" return False def is_component_type(self): - """ This is not a component type (overloaded from NumericValue) """ - return False - - def is_relational(self): - """ This is not relational (overloaded from NumericValue) """ + """This is not a component type (overloaded from NumericValue)""" return False def is_indexed(self): - """ This is not indexed (overloaded from NumericValue) """ + """This is not indexed (overloaded from NumericValue)""" return False def _compute_polynomial_degree(self, result): - """ Returns the polynomial degree - since units are constants, they have degree of zero. + """Returns the polynomial degree - since units are constants, they have degree of zero. Note that :py:meth:`NumericValue.polynomial_degree` calls this method. """ return 0 - def __getstate__(self): - state = super(_PyomoUnit, self).__getstate__() - state['_pint_unit'] = str(self._pint_unit) - if self._pint_registry is not units._pint_registry: - # FIXME: we currently will not correctly unpickle units - # associated with a unit manager other than the default - # singleton. If we wanted to support this, we would need to - # do something like create a global units manager registry - # that would associate each unit manager with a name. We - # could then pickle that name and then attempt to restore - # the association with the original units manager. As we - # expect all users to just use the global default, for the - # time being we will just issue a warning that things may - # break. - logger.warning( - "pickling a _PyomoUnit associated with a PyomoUnitsContainer " - "that is not the default singleton (%s.units). Restoring " - "this pickle will attempt to return a unit associated with " - "the default singleton." % (__name__,)) - return state - - def __setstate__(self, state): - self._pint_registry = units._pint_registry - self._pint_unit = self._pint_registry(state.pop('_pint_unit')).units - super(_PyomoUnit, self).__setstate__(state) - def __deepcopy__(self, memo): # Note that while it is possible to deepcopy the _pint_unit and # _pint_registry object (in pint>0.10), that version does not @@ -302,6 +322,14 @@ def __deepcopy__(self, memo): # as outside the model scope and DO NOT duplicate them. return self + def __eq__(self, other): + if other.__class__ is _PyomoUnit: + return ( + self._pint_registry is other._pint_registry + and self._pint_unit == other._pint_unit + ) + return super().__eq__(other) + # __bool__ uses NumericValue base class implementation # __float__ uses NumericValue base class implementation # __int__ uses NumericValue base class implementation @@ -333,7 +361,7 @@ def __deepcopy__(self, memo): # __add__ uses NumericValue base class implementation def __str__(self): - """ Returns a string representing the unit """ + """Returns a string representing the unit""" # The ~ returns the short form of the pint unit if the unit is # an instance of the unit 'dimensionless', then pint returns '' @@ -349,8 +377,7 @@ def __str__(self): retstr = 'dimensionless' return retstr - def to_string(self, verbose=None, labeler=None, smap=None, - compute_values=False): + def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): """ Return a string representation of the expression tree. @@ -378,9 +405,8 @@ def __call__(self, exception=True): return 1.0 def pprint(self, ostream=None, verbose=False): - """Display a user readable string description of this object. - """ - if ostream is None: #pragma:nocover + """Display a user readable string description of this object.""" + if ostream is None: # pragma:nocover ostream = sys.stdout ostream.write(str(self)) # There is also a long form, but the verbose flag is not really the correct indicator @@ -421,10 +447,11 @@ def __init__(self, pyomo_units_container, units_equivalence_tolerance=1e-12): """ super(PintUnitExtractionVisitor, self).__init__() self._pyomo_units_container = pyomo_units_container - self._pint_dimensionless = pyomo_units_container._pint_dimensionless - self._pint_radian = pyomo_units_container._pint_registry.radian + self._pint_dimensionless = None self._equivalent_pint_units = pyomo_units_container._equivalent_pint_units - self._equivalent_to_dimensionless = pyomo_units_container._equivalent_to_dimensionless + self._equivalent_to_dimensionless = ( + pyomo_units_container._equivalent_to_dimensionless + ) def _get_unit_for_equivalent_children(self, node, child_units): """ @@ -453,8 +480,10 @@ def _get_unit_for_equivalent_children(self, node, child_units): for pint_unit_i in child_units: if not self._equivalent_pint_units(pint_unit_0, pint_unit_i): raise InconsistentUnitsError( - pint_unit_0, pint_unit_i, - 'Error in units found in expression: %s' % (node,)) + pint_unit_0, + pint_unit_i, + 'Error in units found in expression: %s' % (node,), + ) # checks were OK, return the first one in the list return pint_unit_0 @@ -530,14 +559,14 @@ def _get_unit_for_pow(self, node, child_units): if not self._equivalent_to_dimensionless(child_units[1]): # todo: allow radians? raise UnitsError( - "Error in sub-expression: {}. " + f"Error in sub-expression: {node}. " "Exponents in a pow expression must be dimensionless." - "".format(node)) + ) # common case - exponent is a constant number exponent = node.args[1] if type(exponent) in nonpyomo_leaf_types: - return child_units[0]**value(exponent) + return child_units[0] ** value(exponent) # if base is dimensioness, exponent doesn't matter if self._equivalent_to_dimensionless(child_units[0]): @@ -546,11 +575,12 @@ def _get_unit_for_pow(self, node, child_units): # base is not dimensionless, exponent is dimensionless # ensure that the exponent is fixed if not exponent.is_fixed(): - raise UnitsError("The base of an exponent has units {}, but " - "the exponent is not a fixed numerical value." - "".format(child_units[0])) + raise UnitsError( + f"The base of an exponent has units {child_units[0]}, but " + "the exponent is not a fixed numerical value." + ) - return child_units[0]**value(exponent) + return child_units[0] ** value(exponent) def _get_unit_for_single_child(self, node, child_units): """ @@ -574,7 +604,7 @@ def _get_unit_for_single_child(self, node, child_units): def _get_units_ExternalFunction(self, node, child_units): """ - Check to make sure that any child arguments are consistent with + Check to make sure that any child arguments are consistent with arg_units return the value from node.get_units() This was written for ExternalFunctionExpression where the external function has units assigned to its return value and arguments @@ -596,19 +626,19 @@ def _get_units_ExternalFunction(self, node, child_units): dless = self._pint_dimensionless if arg_units is None: # they should all be dimensionless - arg_units = [dless]*len(child_units) + arg_units = [dless] * len(child_units) else: # copy arg_units so we don't overwrite the ones in the expression object arg_units = list(arg_units) - for i,a in enumerate(arg_units): + for i, a in enumerate(arg_units): arg_units[i] = self._pyomo_units_container._get_pint_units(a) - for (arg_unit, pint_unit) in zip(arg_units, child_units): + for arg_unit, pint_unit in zip(arg_units, child_units): assert arg_unit is not None if not self._equivalent_pint_units(arg_unit, pint_unit): raise InconsistentUnitsError( - arg_unit, pint_unit, - 'Inconsistent units found in ExternalFunction.') + arg_unit, pint_unit, 'Inconsistent units found in ExternalFunction.' + ) # now return the units in node.get_units return self._pyomo_units_container._get_pint_units(node.get_units()) @@ -633,8 +663,9 @@ def _get_dimensionless_with_dimensionless_children(self, node, child_units): for pint_unit in child_units: if not self._equivalent_to_dimensionless(pint_unit): raise UnitsError( - 'Expected no units or dimensionless units in {}, ' - 'but found {}.'.format(str(node), str(pint_unit))) + f'Expected no units or dimensionless units in {node}, ' + f'but found {pint_unit}.' + ) return self._pint_dimensionless @@ -682,8 +713,10 @@ def _get_unit_for_unary_function(self, node, child_units): func_name = node.getname() node_func = self.unary_function_method_map.get(func_name, None) if node_func is None: - raise TypeError('An unhandled unary function: {} was encountered while retrieving the' - ' units of expression {}'.format(func_name, str(node))) + raise TypeError( + f'An unhandled unary function: {func_name} was encountered ' + f'while retrieving the units of expression {node}' + ) return node_func(self, node, child_units) def _get_unit_for_expr_if(self, node, child_units): @@ -711,8 +744,10 @@ def _get_unit_for_expr_if(self, node, child_units): # already checked) if not self._equivalent_pint_units(child_units[1], child_units[2]): raise InconsistentUnitsError( - child_units[1], child_units[2], - 'Error in units found in expression: %s' % (node,)) + child_units[1], + child_units[2], + 'Error in units found in expression: %s' % (node,), + ) return child_units[1] @@ -739,13 +774,16 @@ def _get_dimensionless_with_radians_child(self, node, child_units): if self._equivalent_to_dimensionless(child_units[0]): return self._pint_dimensionless - if self._equivalent_pint_units(child_units[0], self._pint_radian): + if self._equivalent_pint_units( + child_units[0], self._pyomo_units_container._pint_registry.radian + ): return self._pint_dimensionless # units are not None, dimensionless, or radians raise UnitsError( 'Expected radians or dimensionless in argument to function ' - 'in expression %s, but found %s' % (node, child_units[0])) + 'in expression %s, but found %s' % (node, child_units[0]) + ) def _get_radians_with_dimensionless_child(self, node, child_units): """ @@ -768,11 +806,12 @@ def _get_radians_with_dimensionless_child(self, node, child_units): assert len(child_units) == 1 if self._equivalent_to_dimensionless(child_units[0]): - return self._pint_radian + return self._pyomo_units_container._pint_registry.radian raise UnitsError( - 'Expected dimensionless argument to function in expression {},' - ' but found {}'.format(str(node), str(child_units[0]))) + f'Expected dimensionless argument to function in expression {node},' + f' but found {child_units[0]}' + ) def _get_unit_sqrt(self, node, child_units): """ @@ -792,7 +831,7 @@ def _get_unit_sqrt(self, node, child_units): : pint unit """ assert len(child_units) == 1 - return child_units[0]**0.5 + return child_units[0] ** 0.5 node_type_method_map = { EXPR.EqualityExpression: _get_unit_for_equivalent_children, @@ -815,7 +854,12 @@ def _get_unit_sqrt(self, node, child_units): EXPR.NPV_UnaryFunctionExpression: _get_unit_for_unary_function, EXPR.Expr_ifExpression: _get_unit_for_expr_if, IndexTemplate: _get_dimensionless_no_children, - EXPR.GetItemExpression: _get_dimensionless_with_dimensionless_children, + EXPR.Numeric_GetItemExpression: ( + _get_dimensionless_with_dimensionless_children + ), + EXPR.NPV_Numeric_GetItemExpression: ( + _get_dimensionless_with_dimensionless_children + ), EXPR.ExternalFunctionExpression: _get_units_ExternalFunction, EXPR.NPV_ExternalFunctionExpression: _get_units_ExternalFunction, EXPR.LinearExpression: _get_unit_for_equivalent_children, @@ -823,7 +867,7 @@ def _get_unit_sqrt(self, node, child_units): unary_function_method_map = { 'log': _get_dimensionless_with_dimensionless_children, - 'log10':_get_dimensionless_with_dimensionless_children, + 'log10': _get_dimensionless_with_dimensionless_children, 'sin': _get_dimensionless_with_radians_child, 'cos': _get_dimensionless_with_radians_child, 'tan': _get_dimensionless_with_radians_child, @@ -839,42 +883,71 @@ def _get_unit_sqrt(self, node, child_units): 'acosh': _get_radians_with_dimensionless_child, 'atanh': _get_radians_with_dimensionless_child, 'ceil': _get_unit_for_single_child, - 'floor': _get_unit_for_single_child + 'floor': _get_unit_for_single_child, } + def initializeWalker(self, expr): + # Refresh the cached dimensionless (in case the underlying pint + # registry was either changed or had not been set when the + # PyomoUnitsContainer was originally created). + self._pint_dimensionless = self._pyomo_units_container._pint_dimensionless + walk, result = self.beforeChild(None, expr, 0) + if not walk: + result = self.finalizeResult(result) + return walk, result + + def beforeChild(self, node, child, child_idx): + ctype = child.__class__ + if ctype in native_types or ctype in pyomo_constant_types: + return False, self._pint_dimensionless + + if child.is_expression_type(): + return True, None + + # this is a leaf, but not a native type + if ctype is _PyomoUnit: + return False, child._get_pint_unit() + elif hasattr(child, 'get_units'): + # might want to add other common types here + pyomo_unit = child.get_units() + pint_unit = self._pyomo_units_container._get_pint_units(pyomo_unit) + return False, pint_unit + + return True, None + def exitNode(self, node, data): - """ Callback for :class:`pyomo.core.current.StreamBasedExpressionVisitor`. This - method is called when moving back up the tree in a depth first search.""" - - # first check if the node is a leaf - nodetype = type(node) + """Visitor callback when moving up the expression tree. - if nodetype in native_types or nodetype in pyomo_constant_types: - return self._pint_dimensionless + Callback for + :class:`pyomo.core.current.StreamBasedExpressionVisitor`. This + method is called when moving back up the tree in a depth first + search. - node_func = self.node_type_method_map.get(nodetype, None) + """ + node_func = self.node_type_method_map.get(node.__class__, None) if node_func is not None: return node_func(self, node, data) - elif not node.is_expression_type(): - # this is a leaf, but not a native type - if nodetype is _PyomoUnit: - return node._get_pint_unit() - elif hasattr(node, 'get_units'): - # might want to add other common types here - pyomo_unit = node.get_units() - pint_unit = self._pyomo_units_container._get_pint_units(pyomo_unit) - return pint_unit - # not a leaf - check if it is a named expression - if hasattr(node, 'is_named_expression_type') and node.is_named_expression_type(): + if ( + hasattr(node, 'is_named_expression_type') + and node.is_named_expression_type() + ): pint_unit = self._get_unit_for_single_child(node, data) return pint_unit - raise TypeError('An unhandled expression node type: {} was encountered while retrieving the' - ' units of expression'.format(str(nodetype), str(node))) + raise TypeError( + f'An unhandled expression node type: {type(node)} was encountered ' + f'while retrieving the units of expression {node}' + ) + + def finalizeResult(self, result): + if hasattr(result, 'units'): + # likely, we got a quantity object and not a units object + return result.units + return result + - class PyomoUnitsContainer(object): """Class that is used to create and contain units in Pyomo. @@ -900,6 +973,7 @@ class PyomoUnitsContainer(object): on the class until they are requested. """ + def __init__(self, pint_registry=NOTSET): """Create a PyomoUnitsContainer instance.""" if pint_registry is NOTSET: @@ -909,6 +983,7 @@ def __init__(self, pint_registry=NOTSET): self._pint_dimensionless = None else: self._pint_dimensionless = self._pint_registry.dimensionless + self._pintUnitExtractionVisitor = PintUnitExtractionVisitor(self) def load_definitions_from_file(self, definition_file): """Load new units definitions from a file @@ -987,20 +1062,19 @@ def load_definitions_from_strings(self, definition_string_list): self._pint_registry.load_definitions(definition_string_list) def __getattr__(self, item): - """ - Here, __getattr__ is implemented to automatically create the necessary unit if - the attribute does not already exist. + """Here, __getattr__ is implemented to automatically create the + necessary unit if the attribute does not already exist. Parameters ---------- item : str - the name of the new field requested -external + the name of the new field requested external + Returns ------- - : PyomoUnit - returns a PyomoUnit corresponding to the requested attribute, - or None if it cannot be created. + PyomoUnit + returns a PyomoUnit corresponding to the requested attribute, + or None if it cannot be created. """ # since __getattr__ was called, we must not have this field yet @@ -1012,12 +1086,16 @@ def __getattr__(self, item): if pint_unit is not None: # check if the unit is an offset unit and throw an exception if necessary # TODO: should we prevent delta versions: delta_degC and delta_degF as well? - pint_unit_container = pint_module.util.to_units_container(pint_unit, pint_registry) - for (u, e) in pint_unit_container.items(): + pint_unit_container = pint_module.util.to_units_container( + pint_unit, pint_registry + ) + for u, e in pint_unit_container.items(): if not pint_registry._units[u].is_multiplicative: - raise UnitsError('Pyomo units system does not support the offset units "{}".' - ' Use absolute units (e.g. kelvin instead of degC) instead.' - ''.format(item)) + raise UnitsError( + 'Pyomo units system does not support the offset ' + f'units "{item}". Use absolute units ' + '(e.g. kelvin instead of degC) instead.' + ) unit = _PyomoUnit(pint_unit, pint_registry) setattr(self, item, unit) @@ -1026,9 +1104,9 @@ def __getattr__(self, item): pint_unit = None if pint_unit is None: - raise AttributeError('Attribute {0} not found.'.format(str(item))) + raise AttributeError(f'Attribute {item} not found.') - # We added support to specify a units definition file instead of this programatic interface + # We added support to specify a units definition file instead of this programmatic interface # def create_new_base_dimension(self, dimension_name, base_unit_name): # """ # Use this method to create a new base dimension (e.g. a new dimension other than Length, Mass) for the unit manager. @@ -1084,8 +1162,8 @@ def __getattr__(self, item): def _rel_diff(self, a, b): scale = min(abs(a), abs(b)) - if scale < 1.: - scale = 1. + if scale < 1.0: + scale = 1.0 return abs(a - b) / scale def _equivalent_pint_units(self, a, b, TOL=1e-12): @@ -1107,7 +1185,7 @@ def _equivalent_to_dimensionless(self, a, TOL=1e-12): base_a = self._pint_registry.get_base_units(a) if not base_a[1].dimensionless: return False - return self._rel_diff(base_a[0], 1.) <= TOL + return self._rel_diff(base_a[0], 1.0) <= TOL def _get_pint_units(self, expr): """ @@ -1125,17 +1203,12 @@ def _get_pint_units(self, expr): """ if expr is None: return self._pint_dimensionless + return self._pintUnitExtractionVisitor.walk_expression(expr=expr) - pint_units = PintUnitExtractionVisitor(self).walk_expression(expr=expr) - if hasattr(pint_units, 'units'): - # likely, we got a quantity object and not a units object - return pint_units.units - return pint_units - def get_units(self, expr): - """ - Return the Pyomo units corresponding to this expression (also performs validation - and will raise an exception if units are not consistent). + """Return the Pyomo units corresponding to this expression (also + performs validation and will raise an exception if units are not + consistent). Parameters ---------- @@ -1152,39 +1225,44 @@ def get_units(self, expr): :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError` """ - pint_unit = self._get_pint_units(expr) - if pint_unit.dimensionless: - if pint_unit == self._pint_dimensionless: - return None - return _PyomoUnit(pint_unit, self._pint_registry) + return _PyomoUnit(self._get_pint_units(expr), self._pint_registry) - def _pint_convert_temp_from_to(self, numerical_value, pint_from_units, pint_to_units): + def _pint_convert_temp_from_to( + self, numerical_value, pint_from_units, pint_to_units + ): if type(numerical_value) not in native_numeric_types: - raise UnitsError('Conversion routines for absolute and relative temperatures require a numerical value only.' - ' Pyomo objects (Var, Param, expressions) are not supported. Please use value(x) to' - ' extract the numerical value if necessary.') - + raise UnitsError( + 'Conversion routines for absolute and relative temperatures ' + 'require a numerical value only. Pyomo objects (Var, Param, ' + 'expressions) are not supported. Please use value(x) to ' + 'extract the numerical value if necessary.' + ) + src_quantity = self._pint_registry.Quantity(numerical_value, pint_from_units) dest_quantity = src_quantity.to(pint_to_units) return dest_quantity.magnitude - + def convert_temp_K_to_C(self, value_in_K): """ - Convert a value in Kelvin to degrees Celcius. Note that this method + Convert a value in Kelvin to degrees Celsius. Note that this method converts a numerical value only. If you need temperature conversions in expressions, please work in absolute temperatures only. """ - return self._pint_convert_temp_from_to(value_in_K, self._pint_registry.K, self._pint_registry.degC) + return self._pint_convert_temp_from_to( + value_in_K, self._pint_registry.K, self._pint_registry.degC + ) def convert_temp_C_to_K(self, value_in_C): """ - Convert a value in degrees Celcius to Kelvin Note that this + Convert a value in degrees Celsius to Kelvin Note that this method converts a numerical value only. If you need temperature conversions in expressions, please work in absolute temperatures only. """ - return self._pint_convert_temp_from_to(value_in_C, self._pint_registry.degC, self._pint_registry.K) + return self._pint_convert_temp_from_to( + value_in_C, self._pint_registry.degC, self._pint_registry.K + ) def convert_temp_R_to_F(self, value_in_R): """ @@ -1193,7 +1271,9 @@ def convert_temp_R_to_F(self, value_in_R): temperature conversions in expressions, please work in absolute temperatures only. """ - return self._pint_convert_temp_from_to(value_in_R, self._pint_registry.rankine, self._pint_registry.degF) + return self._pint_convert_temp_from_to( + value_in_R, self._pint_registry.rankine, self._pint_registry.degF + ) def convert_temp_F_to_R(self, value_in_F): """ @@ -1202,7 +1282,9 @@ def convert_temp_F_to_R(self, value_in_F): temperature conversions in expressions, please work in absolute temperatures only. """ - return self._pint_convert_temp_from_to(value_in_F, self._pint_registry.degF, self._pint_registry.rankine) + return self._pint_convert_temp_from_to( + value_in_F, self._pint_registry.degF, self._pint_registry.rankine + ) def convert(self, src, to_units=None): """ @@ -1229,17 +1311,22 @@ def convert(self, src, to_units=None): # We disallow offset units, so we only need a factor to convert # between the two src_base_factor, base_units_src = self._pint_registry.get_base_units( - src_pint_unit, check_nonmult=True) + src_pint_unit, check_nonmult=True + ) to_base_factor, base_units_to = self._pint_registry.get_base_units( - to_pint_unit, check_nonmult=True) + to_pint_unit, check_nonmult=True + ) if base_units_src != base_units_to: raise InconsistentUnitsError( - src_pint_unit, to_pint_unit, - 'Error in convert: units not compatible.') + src_pint_unit, to_pint_unit, 'Error in convert: units not compatible.' + ) - return (src_base_factor/to_base_factor) * _PyomoUnit( - to_pint_unit/src_pint_unit, self._pint_registry) * src + return ( + (src_base_factor / to_base_factor) + * _PyomoUnit(to_pint_unit / src_pint_unit, self._pint_registry) + * src + ) def convert_value(self, num_value, from_units=None, to_units=None): """ @@ -1265,9 +1352,11 @@ def convert_value(self, num_value, from_units=None, to_units=None): """ if type(num_value) not in native_numeric_types: - raise UnitsError('The argument "num_value" in convert_value must be a native numeric type, but' - ' instead type {} was found.'.format(type(num_value))) - + raise UnitsError( + 'The argument "num_value" in convert_value must be a native ' + 'numeric type, but instead type {type(num_value)} was found.' + ) + from_pint_unit = self._get_pint_units(from_units) to_pint_unit = self._get_pint_units(to_units) if from_pint_unit == to_pint_unit: @@ -1282,13 +1371,16 @@ def convert_value(self, num_value, from_units=None, to_units=None): # TODO: This check may be overkill - pint will raise an error # that may be sufficient from_base_factor, from_base_units = self._pint_registry.get_base_units( - from_pint_unit, check_nonmult=True) + from_pint_unit, check_nonmult=True + ) to_base_factor, to_base_units = self._pint_registry.get_base_units( - to_pint_unit, check_nonmult=True) + to_pint_unit, check_nonmult=True + ) if from_base_units != to_base_units: raise UnitsError( 'Cannot convert %s to %s. Units are not compatible.' - % (from_units, to_units)) + % (from_units, to_units) + ) # convert the values from_quantity = num_value * from_pint_unit @@ -1303,7 +1395,8 @@ def set_pint_registry(self, pint_registry): "Changing the pint registry used by the Pyomo Units " "system after the PyomoUnitsContainer was constructed. " "Pint requires that all units and dimensioned quantities " - "are generated by a single pint registry.") + "are generated by a single pint registry." + ) self._pint_registry = pint_registry self._pint_dimensionless = self._pint_registry.dimensionless @@ -1313,16 +1406,13 @@ def pint_registry(self): class _QuantityVisitor(ExpressionValueVisitor): - def __init__(self): self.native_types = set(nonpyomo_leaf_types) self.native_types.add(units._pint_registry.Quantity) - self._unary_inverse_trig = { - 'asin', 'acos', 'atan', 'asinh', 'acosh', 'atanh', - } + self._unary_inverse_trig = {'asin', 'acos', 'atan', 'asinh', 'acosh', 'atanh'} def visit(self, node, values): - """ Visit nodes that have been expanded """ + """Visit nodes that have been expanded""" if node.__class__ in self.handlers: return self.handlers[node.__class__](self, node, values) return node._apply_operation(values) @@ -1359,7 +1449,7 @@ def finalize(self, val): if val.__class__ is units._pint_registry.Quantity: return val elif val.__class__ is units._pint_registry.Unit: - return 1. * val + return 1.0 * val # else try: return val * units._pint_dimensionless @@ -1374,14 +1464,18 @@ def _handle_unary_function(self, node, values): def _handle_external(self, node, values): # External functions are units-unaware - ans = node._apply_operation([ - val.magnitude if val.__class__ is units._pint_registry.Quantity - else val for val in values]) + ans = node._apply_operation( + [ + val.magnitude if val.__class__ is units._pint_registry.Quantity else val + for val in values + ] + ) unit = node.get_units() if unit is not None: ans = ans * unit._pint_unit return ans + _QuantityVisitor.handlers = { EXPR.UnaryFunctionExpression: _QuantityVisitor._handle_unary_function, EXPR.NPV_UnaryFunctionExpression: _QuantityVisitor._handle_unary_function, @@ -1389,6 +1483,7 @@ def _handle_external(self, node, values): EXPR.NPV_ExternalFunctionExpression: _QuantityVisitor._handle_external, } + def as_quantity(expr): return _QuantityVisitor().dfs_postorder_stack(expr) @@ -1411,7 +1506,7 @@ def __init__(self): pass def __getattribute__(self, attr): - # Note that this methos will only be called ONCE: either pint is + # Note that this method will only be called ONCE: either pint is # present, at which point this instance __class__ will fall back # to PyomoUnitsContainer (where this method is not declared, OR # pint is not available and an ImportError will be raised. @@ -1432,6 +1527,7 @@ def __getattribute__(self, attr): # Generate the ImportError return getattr(pint_module, attr) + # Define a module level instance of a PyomoUnitsContainer to use for # all units within a Pyomo model. If pint is not available, this will # cause an error at the first usage See module level documentation for diff --git a/pyomo/core/base/util.py b/pyomo/core/base/util.py index 0084ba2a9d3..867a303395b 100644 --- a/pyomo/core/base/util.py +++ b/pyomo/core/base/util.py @@ -18,31 +18,32 @@ from pyomo.core.base.indexed_component import normalize_index relocated_module_attribute( - 'disable_methods', 'pyomo.core.base.disable_methods.disable_methods', - version='6.1') + 'disable_methods', 'pyomo.core.base.disable_methods.disable_methods', version='6.1' +) relocated_module_attribute( - 'Initializer', 'pyomo.core.base.initializer.Initializer', - version='6.1') + 'Initializer', 'pyomo.core.base.initializer.Initializer', version='6.1' +) relocated_module_attribute( - 'IndexedCallInitializer', 'pyomo.core.base.initializer.Initializer', - version='6.1') + 'IndexedCallInitializer', 'pyomo.core.base.initializer.Initializer', version='6.1' +) relocated_module_attribute( - 'CountedCallInitializer', 'pyomo.core.base.initializer.Initializer', - version='6.1') + 'CountedCallInitializer', 'pyomo.core.base.initializer.Initializer', version='6.1' +) + def is_functor(obj): """ Returns true iff obj.__call__ is defined. """ - return inspect.isfunction(obj) or hasattr(obj,'__call__') + return inspect.isfunction(obj) or hasattr(obj, '__call__') def flatten_tuple(x): """ - This wraps around normalize_index. It flattens a nested sequence into + This wraps around normalize_index. It flattens a nested sequence into a single tuple and always returns a tuple, even for single element inputs. - + Returns ------- tuple diff --git a/pyomo/core/base/var.py b/pyomo/core/base/var.py index 19bff63cf1f..5dba9d12396 100644 --- a/pyomo/core/base/var.py +++ b/pyomo/core/base/var.py @@ -9,36 +9,49 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ['Var', '_VarData', '_GeneralVarData', 'VarList', 'SimpleVar', - 'ScalarVar'] +__all__ = ['Var', '_VarData', '_GeneralVarData', 'VarList', 'SimpleVar', 'ScalarVar'] import logging import sys from pyomo.common.pyomo_typing import overload from weakref import ref as weakref_ref -from pyomo.common.collections import Sequence from pyomo.common.deprecation import RenamedClass from pyomo.common.log import is_debug_set from pyomo.common.modeling import NOTSET from pyomo.common.timing import ConstructionTimer + from pyomo.core.staleflag import StaleFlagManager +from pyomo.core.expr import GetItemExpression from pyomo.core.expr.numeric_expr import NPV_MaxExpression, NPV_MinExpression from pyomo.core.expr.numvalue import ( - NumericValue, value, is_potentially_variable, native_numeric_types, + NumericValue, + value, + is_potentially_variable, + native_numeric_types, native_types, ) from pyomo.core.base.component import ComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.disable_methods import disable_methods from pyomo.core.base.indexed_component import ( - IndexedComponent, UnindexedComponent_set, IndexedComponent_NDArrayMixin + IndexedComponent, + UnindexedComponent_set, + IndexedComponent_NDArrayMixin, +) +from pyomo.core.base.initializer import ( + Initializer, + DefaultInitializer, + BoundInitializer, ) -from pyomo.core.base.initializer import Initializer, DefaultInitializer from pyomo.core.base.misc import apply_indexed_rule from pyomo.core.base.set import ( - Reals, Binary, Set, SetInitializer, - real_global_set_ids, integer_global_set_ids, + Reals, + Binary, + Set, + SetInitializer, + real_global_set_ids, + integer_global_set_ids, ) from pyomo.core.base.units_container import units from pyomo.core.base.util import is_functor @@ -50,16 +63,33 @@ _no_lower_bound = {None, _ninf} _no_upper_bound = {None, _inf} _known_global_real_domains = dict( - [(_, True) for _ in real_global_set_ids] + - [(_, False) for _ in integer_global_set_ids] + [(_, True) for _ in real_global_set_ids] + + [(_, False) for _ in integer_global_set_ids] ) _VARDATA_API = ( # including 'domain' runs afoul of logic in Block._add_implicit_sets() # 'domain', - 'bounds', 'lower', 'upper', 'lb', 'ub', 'has_lb', 'has_ub', - 'setlb', 'setub', 'get_units', - 'is_integer', 'is_binary', 'is_continuous', 'is_fixed', - 'fix', 'unfix', 'free', 'set_value', 'value', 'stale', 'fixed', + 'bounds', + 'lower', + 'upper', + 'lb', + 'ub', + 'has_lb', + 'has_ub', + 'setlb', + 'setub', + 'get_units', + 'is_integer', + 'is_binary', + 'is_continuous', + 'is_fixed', + 'fix', + 'unfix', + 'free', + 'set_value', + 'value', + 'stale', + 'fixed', ) @@ -113,6 +143,7 @@ def bounds(self): """ return self.lb, self.ub + @bounds.setter def bounds(self, val): self.lower, self.upper = val @@ -122,6 +153,7 @@ def lb(self): """Return (or set) the numeric value of the variable lower bound.""" lb = value(self.lower) return None if lb == _ninf else lb + @lb.setter def lb(self, val): self.lower = val @@ -131,6 +163,7 @@ def ub(self): """Return (or set) the numeric value of the variable upper bound.""" ub = value(self.upper) return None if ub == _inf else ub + @ub.setter def ub(self, val): self.upper = val @@ -141,7 +174,16 @@ def is_integer(self): if _id in _known_global_real_domains: return not _known_global_real_domains[_id] _interval = self.domain.get_interval() - return _interval is not None and _interval[2] == 1 + if _interval is None: + return False + # Note: it is not sufficient to just check the step: the + # starting / ending points must be integers (or not specified) + start, stop, step = _interval + return ( + step == 1 + and (start is None or int(start) == start) + and (stop is None or int(stop) == stop) + ) def is_binary(self): """Returns True when the domain is restricted to Binary values.""" @@ -251,7 +293,7 @@ def stale(self): Updating a stale :class:`Var` value will not cause other variable values to be come stale. However, updating the first - non-stale :class:`Var` value adter a solve or solution load + non-stale :class:`Var` value after a solve or solution load *will* cause all other variables to be marked as stale """ @@ -283,11 +325,10 @@ def free(self): class _GeneralVarData(_VarData): - """This class defines the data for a single variable. - - """ + """This class defines the data for a single variable.""" __slots__ = ('_value', '_lb', '_ub', '_domain', '_fixed', '_stale') + __autoslot_mappers__ = {'_stale': StaleFlagManager.stale_mapper} def __init__(self, component=None): # @@ -296,8 +337,7 @@ def __init__(self, component=None): # - _VarData # - ComponentData # - NumericValue - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._value = None # @@ -310,7 +350,7 @@ def __init__(self, component=None): self._ub = None self._domain = None self._fixed = False - self._stale = 0 # True + self._stale = 0 # True @classmethod def copy(cls, src): @@ -325,20 +365,6 @@ def copy(cls, src): self._index = src._index return self - def __getstate__(self): - state = super(_GeneralVarData, self).__getstate__() - for i in _GeneralVarData.__slots__: - state[i] = getattr(self, i) - state['_stale'] = StaleFlagManager.is_stale(self._stale) - return state - - def __setstate__(self, state): - if state.pop('_stale', True): - state['_stale'] = 0 - else: - state['_stale'] = StaleFlagManager.get_flag(0) - super().__setstate__(state) - # # Abstract Interface # @@ -359,7 +385,7 @@ def set_value(self, val, skip_validation=False): # Special case: setting a variable to None "clears" the variable. if val is None: self._value = None - self._stale = 0 # True + self._stale = 0 # True return # TODO: generate a warning/error: # @@ -370,24 +396,27 @@ def set_value(self, val, skip_validation=False): _src_magnitude = value(val) _src_units = units.get_units(val) val = units.convert_value( - num_value=_src_magnitude, from_units=_src_units, - to_units=self.parent_component()._units) + num_value=_src_magnitude, + from_units=_src_units, + to_units=self.parent_component()._units, + ) else: val = value(val) if not skip_validation: if val not in self.domain: logger.warning( - "Setting Var '%s' to a value `%s` (%s) not in domain %s." % - (self.name, val, type(val).__name__, self.domain), - extra={'id':'W1001'}, + "Setting Var '%s' to a value `%s` (%s) not in domain %s." + % (self.name, val, type(val).__name__, self.domain), + extra={'id': 'W1001'}, ) elif (self._lb is not None and val < value(self._lb)) or ( - self._ub is not None and val > value(self._ub)): + self._ub is not None and val > value(self._ub) + ): logger.warning( "Setting Var '%s' to a numeric value `%s` " "outside the bounds %s." % (self.name, val, self.bounds), - extra={'id':'W1002'}, + extra={'id': 'W1002'}, ) self._value = val @@ -396,6 +425,7 @@ def set_value(self, val, skip_validation=False): @property def value(self): return self._value + @value.setter def value(self, val): self.set_value(val) @@ -403,6 +433,7 @@ def value(self, val): @property def domain(self): return self._domain + @domain.setter def domain(self, domain): try: @@ -410,9 +441,9 @@ def domain(self, domain): except: logger.error( "%s is not a valid domain. Variable domains must be an " - "instance of a Pyomo Set or convertable to a Pyomo Set." - % (domain,), - extra={'id': 'E2001'}) + "instance of a Pyomo Set or convertible to a Pyomo Set." % (domain,), + extra={'id': 'E2001'}, + ) raise @_VarData.bounds.getter @@ -488,6 +519,7 @@ def lower(self): return self._lb # _process_bound() guarantees _lb is not potentially variable return NPV_MaxExpression((self._lb, dlb)) + @lower.setter def lower(self, val): self._lb = self._process_bound(val, 'lower') @@ -512,6 +544,7 @@ def upper(self): return self._ub # _process_bound() guarantees _lb is not potentially variable return NPV_MinExpression((self._ub, dub)) + @upper.setter def upper(self, val): self._ub = self._process_bound(val, 'upper') @@ -525,6 +558,7 @@ def get_units(self): @property def fixed(self): return self._fixed + @fixed.setter def fixed(self, val): self._fixed = bool(val) @@ -532,10 +566,11 @@ def fixed(self, val): @property def stale(self): return StaleFlagManager.is_stale(self._stale) + @stale.setter def stale(self, val): if val: - self._stale = 0 # True + self._stale = 0 # True else: self._stale = StaleFlagManager.get_flag(0) @@ -554,7 +589,8 @@ def _process_bound(self, val, bound_type): "Potentially variable input of type '%s' supplied as " "%s bound for variable '%s' - legal types must be constants " "or non-potentially variable expressions." - % (type(val).__name__, bound_type, self.name)) + % (type(val).__name__, bound_type, self.name) + ) else: # We want to create an expression and not just convert the # current value so that things like mutable Params behave as @@ -595,24 +631,37 @@ class Var(IndexedComponent, IndexedComponent_NDArrayMixin): def __new__(cls, *args, **kwargs): if cls is not Var: return super(Var, cls).__new__(cls) - if not args or (args[0] is UnindexedComponent_set and len(args)==1): + if not args or (args[0] is UnindexedComponent_set and len(args) == 1): return super(Var, cls).__new__(AbstractScalarVar) else: return super(Var, cls).__new__(IndexedVar) @overload - def __init__(self, *indexes, domain=Reals, within=Reals, bounds=None, - initialize=None, rule=None, dense=True, units=None, - name=None, doc=None): ... - + def __init__( + self, + *indexes, + domain=Reals, + within=Reals, + bounds=None, + initialize=None, + rule=None, + dense=True, + units=None, + name=None, + doc=None + ): + ... + def __init__(self, *args, **kwargs): # # Default keyword values # - self._rule_init = Initializer(self._pop_from_kwargs( - 'Var', kwargs, ('rule', 'initialize'), None)) - self._rule_domain = SetInitializer(self._pop_from_kwargs( - 'Var', kwargs, ('domain', 'within'), Reals)) + self._rule_init = Initializer( + self._pop_from_kwargs('Var', kwargs, ('rule', 'initialize'), None) + ) + self._rule_domain = SetInitializer( + self._pop_from_kwargs('Var', kwargs, ('domain', 'within'), Reals) + ) _bounds_arg = kwargs.pop('bounds', None) self._dense = kwargs.pop('dense', True) self._units = kwargs.pop('units', None) @@ -626,24 +675,13 @@ def __init__(self, *args, **kwargs): # # Now that we can call is_indexed(), process bounds initializer # - if self.is_indexed(): - treat_bounds_sequences_as_mappings = not ( - isinstance(_bounds_arg, Sequence) - and len(_bounds_arg) == 2 - and not isinstance(_bounds_arg[0], Sequence) + if not self.is_indexed() and not self._dense: + logger.warning( + "ScalarVar object '%s': dense=False is not allowed " + "for scalar variables; converting to dense=True" % (self.name,) ) - else: - treat_bounds_sequences_as_mappings = False - if not self._dense: - logger.warning( - "ScalarVar object '%s': dense=False is not allowed " - "for scalar variables; converting to dense=True" - % (self.name,)) - self._dense = True - self._rule_bounds = Initializer( - _bounds_arg, - treat_sequences_as_mappings=treat_bounds_sequences_as_mappings - ) + self._dense = True + self._rule_bounds = BoundInitializer(_bounds_arg, self) def flag_as_stale(self): """ @@ -657,10 +695,12 @@ def get_values(self, include_fixed_values=True): Return a dictionary of index-value pairs. """ if include_fixed_values: - return {idx:vardata.value for idx,vardata in self._data.items()} - return {idx:vardata.value - for idx, vardata in self._data.items() - if not vardata.fixed} + return {idx: vardata.value for idx, vardata in self._data.items()} + return { + idx: vardata.value + for idx, vardata in self._data.items() + if not vardata.fixed + } extract_values = get_values @@ -689,7 +729,7 @@ def construct(self, data=None): """ if self._constructed: return - self._constructed=True + self._constructed = True timer = ConstructionTimer(self) if is_debug_set(logger): @@ -712,17 +752,16 @@ def construct(self, data=None): "with 'dense=True'. Reverting to 'dense=False' as " "it is not possible to make this variable dense. " "This warning can be suppressed by specifying " - "'dense=False'" % (self.name,)) + "'dense=False'" % (self.name,) + ) self._dense = False - if ( self._rule_init is not None and - self._rule_init.contains_indices() ): + if self._rule_init is not None and self._rule_init.contains_indices(): # Historically we have allowed Vars to be initialized by # a sparse map (i.e., a dict containing only some of the # keys). We will wrap the incoming initializer to map # KeyErrors to None - self._rule_init = DefaultInitializer( - self._rule_init, None, KeyError) + self._rule_init = DefaultInitializer(self._rule_init, None, KeyError) # The index is coming in externally; we need to validate it for index in self._rule_init.indices(): self[index] @@ -745,20 +784,21 @@ def construct(self, data=None): # (constant portions of) every VarData so as to not # repeat all the domain/bounds validation. try: - ref = self._getitem_when_not_present( - next(iter(self.index_set()))) + ref = self._getitem_when_not_present(next(iter(self.index_set()))) except StopIteration: # Empty index! return call_domain_rule = not self._rule_domain.constant() - call_bounds_rule = self._rule_bounds is not None and ( - not self._rule_bounds.constant()) + call_bounds_rule = ( + self._rule_bounds is not None and not self._rule_bounds.constant() + ) call_init_rule = self._rule_init is not None and ( not self._rule_init.constant() # If either the domain or bounds change, then we # need to re-verify the initial value, even if it is # constant: - or call_domain_rule or call_bounds_rule + or call_domain_rule + or call_bounds_rule ) # Initialize all the component datas with the common data for index in self.index_set(): @@ -792,10 +832,8 @@ def construct(self, data=None): logger.error( "Rule failed when initializing variable for " "Var %s with index %s:\n%s: %s" - % (self.name, - str(index), - type(err).__name__, - err)) + % (self.name, str(index), type(err).__name__, err) + ) raise finally: timer.report() @@ -845,17 +883,19 @@ def _pprint(self): ] if self._units is not None: headers.append(('Units', str(self._units))) - return ( headers, - self._data.items(), - ( "Lower","Value","Upper","Fixed","Stale","Domain"), - lambda k, v: [ value(v.lb), - v.value, - value(v.ub), - v.fixed, - v.stale, - v.domain - ] - ) + return ( + headers, + self._data.items(), + ("Lower", "Value", "Upper", "Fixed", "Stale", "Domain"), + lambda k, v: [ + value(v.lb), + v.value, + value(v.ub), + v.fixed, + v.stale, + v.domain, + ], + ) class ScalarVar(_GeneralVarData, Var): @@ -866,13 +906,6 @@ def __init__(self, *args, **kwd): Var.__init__(self, *args, **kwd) self._index = UnindexedComponent_index - # Since this class derives from Component and Component.__getstate__ - # just packs up the entire __dict__ into the state dict, we do not - # need to define the __getstate__ or __setstate__ methods. - # We just defer to the super() get/set state. Since all of our - # get/set state methods rely on super() to traverse the MRO, this - # will automatically pick up both the Component and Data base classes. - @disable_methods(_VARDATA_API) class AbstractScalarVar(ScalarVar): @@ -932,7 +965,9 @@ def domain(self): raise AttributeError( "The domain is not an attribute for IndexedVar. It " "can be set for all indices using this property setter, " - "but must be accessed for individual variables in this container.") + "but must be accessed for individual variables in this container." + ) + @domain.setter def domain(self, domain): """Sets the domain for all variables in this container.""" @@ -944,6 +979,29 @@ def domain(self, domain): for vardata in self.values(): vardata.domain = domain + # Because CP supports indirection [the ability to index objects by + # another (inter) Var] for certain types (including Var), we will + # catch the normal RuntimeError and return a (variable) + # GetItemExpression. + # + # FIXME: We should integrate this logic into the base implementation + # of `__getitem__()`, including the recognition / differentiation + # between potentially variable GetItemExpression objects and + # "constant" GetItemExpression objects. That will need to wait for + # the expression rework [JDS; Nov 22]. + def __getitem__(self, args): + try: + return super().__getitem__(args) + except RuntimeError: + tmp = args if args.__class__ is tuple else (args,) + if any( + hasattr(arg, 'is_potentially_variable') + and arg.is_potentially_variable() + for arg in tmp + ): + return GetItemExpression((self,) + tmp) + raise + @ModelComponentFactory.register("List of decision variables.") class VarList(IndexedVar): @@ -976,7 +1034,7 @@ def construct(self, data=None): if self._rule_init is not None and self._rule_init.contains_indices(): for i, idx in enumerate(self._rule_init.indices()): self._index_set.add(i + self._starting_index) - super(VarList,self).construct(data) + super(VarList, self).construct(data) def add(self): """Add a variable to this list.""" diff --git a/pyomo/core/beta/__init__.py b/pyomo/core/beta/__init__.py index 8f25d032737..d07668534c8 100644 --- a/pyomo/core/beta/__init__.py +++ b/pyomo/core/beta/__init__.py @@ -11,4 +11,3 @@ import pyomo.core.beta.dict_objects import pyomo.core.beta.list_objects - diff --git a/pyomo/core/beta/dict_objects.py b/pyomo/core/beta/dict_objects.py index d58311b7a5c..c987d0946a3 100644 --- a/pyomo/core/beta/dict_objects.py +++ b/pyomo/core/beta/dict_objects.py @@ -16,14 +16,10 @@ from pyomo.common.log import is_debug_set from pyomo.core.base.set_types import Any -from pyomo.core.base.var import (IndexedVar, - _VarData) -from pyomo.core.base.constraint import (IndexedConstraint, - _ConstraintData) -from pyomo.core.base.objective import (IndexedObjective, - _ObjectiveData) -from pyomo.core.base.expression import (IndexedExpression, - _ExpressionData) +from pyomo.core.base.var import IndexedVar, _VarData +from pyomo.core.base.constraint import IndexedConstraint, _ConstraintData +from pyomo.core.base.objective import IndexedObjective, _ObjectiveData +from pyomo.core.base.expression import IndexedExpression, _ExpressionData from collections.abc import MutableMapping from collections.abc import Mapping @@ -37,8 +33,8 @@ # be implemented on top of these classes. # -class ComponentDict(MutableMapping): +class ComponentDict(MutableMapping): def __init__(self, interface_datatype, *args): self._interface_datatype = interface_datatype self._data = {} @@ -46,15 +42,17 @@ def __init__(self, interface_datatype, *args): if len(args) > 1: raise TypeError( "ComponentDict expected at most 1 arguments, " - "got %s" % (len(args))) + "got %s" % (len(args)) + ) self.update(args[0]) def construct(self, data=None): if is_debug_set(logger): - logger.debug( #pragma:nocover + logger.debug( # pragma:nocover "Constructing ComponentDict object, name=%s, from data=%s" - % (self.name, str(data))) - if self._constructed: #pragma:nocover + % (self.name, str(data)) + ) + if self._constructed: # pragma:nocover return self._constructed = True @@ -126,16 +124,14 @@ def __setitem__(self, key, val): "Invalid component object assignment to ComponentDict " "%s at key %s. A parent component has already been " "assigned the object: %s" - % (self.name, - key, - val.parent_component().name)) + % (self.name, key, val.parent_component().name) + ) # see note about implicit assignment and update raise TypeError( "ComponentDict must be assigned objects " "of type %s. Invalid type for key %s: %s" - % (self._interface_datatype.__name__, - key, - type(val))) + % (self._interface_datatype.__name__, key, type(val)) + ) # Since we don't currently allow objects to be assigned when their # parent component is already set, it would make sense to reset @@ -150,9 +146,14 @@ def __delitem__(self, key): obj._component = None del self._data[key] - def __getitem__(self, key): return self._data[key] - def __iter__(self): return self._data.__iter__() - def __len__(self): return self._data.__len__() + def __getitem__(self, key): + return self._data[key] + + def __iter__(self): + return self._data.__iter__() + + def __len__(self): + return self._data.__len__() # # Override a few default implementations on MutableMapping @@ -165,62 +166,51 @@ def __len__(self): return self._data.__len__() def __eq__(self, other): if not isinstance(other, Mapping): return False - return dict((key, (type(val), id(val))) - for key,val in self.items()) == \ - dict((key, (type(val), id(val))) - for key,val in other.items()) + return dict((key, (type(val), id(val))) for key, val in self.items()) == dict( + (key, (type(val), id(val))) for key, val in other.items() + ) + def __ne__(self, other): return not (self == other) + # # ComponentDict needs to come before IndexedComponent # (or subclasses of) so we can override certain methods # -class VarDict(ComponentDict, IndexedVar): +class VarDict(ComponentDict, IndexedVar): def __init__(self, *args, **kwds): IndexedVar.__init__(self, Any, **kwds) # Constructor for ComponentDict needs to # go last in order to handle any initialization # iterable as an argument - ComponentDict.__init__(self, - _VarData, - *args, - **kwds) + ComponentDict.__init__(self, _VarData, *args, **kwds) -class ConstraintDict(ComponentDict, IndexedConstraint): +class ConstraintDict(ComponentDict, IndexedConstraint): def __init__(self, *args, **kwds): IndexedConstraint.__init__(self, Any, **kwds) # Constructor for ComponentDict needs to # go last in order to handle any initialization # iterable as an argument - ComponentDict.__init__(self, - _ConstraintData, - *args, - **kwds) + ComponentDict.__init__(self, _ConstraintData, *args, **kwds) -class ObjectiveDict(ComponentDict, IndexedObjective): +class ObjectiveDict(ComponentDict, IndexedObjective): def __init__(self, *args, **kwds): IndexedObjective.__init__(self, Any, **kwds) # Constructor for ComponentDict needs to # go last in order to handle any initialization # iterable as an argument - ComponentDict.__init__(self, - _ObjectiveData, - *args, - **kwds) + ComponentDict.__init__(self, _ObjectiveData, *args, **kwds) -class ExpressionDict(ComponentDict, IndexedExpression): +class ExpressionDict(ComponentDict, IndexedExpression): def __init__(self, *args, **kwds): IndexedExpression.__init__(self, Any, **kwds) # Constructor for ComponentDict needs to # go last in order to handle any initialization # iterable as an argument - ComponentDict.__init__(self, - _ExpressionData, - *args, - **kwds) + ComponentDict.__init__(self, _ExpressionData, *args, **kwds) diff --git a/pyomo/core/beta/list_objects.py b/pyomo/core/beta/list_objects.py index 2e3e90d2df2..2c42dfa57c8 100644 --- a/pyomo/core/beta/list_objects.py +++ b/pyomo/core/beta/list_objects.py @@ -16,14 +16,10 @@ from pyomo.common.log import is_debug_set from pyomo.core.base.set_types import Any -from pyomo.core.base.var import (IndexedVar, - _VarData) -from pyomo.core.base.constraint import (IndexedConstraint, - _ConstraintData) -from pyomo.core.base.objective import (IndexedObjective, - _ObjectiveData) -from pyomo.core.base.expression import (IndexedExpression, - _ExpressionData) +from pyomo.core.base.var import IndexedVar, _VarData +from pyomo.core.base.constraint import IndexedConstraint, _ConstraintData +from pyomo.core.base.objective import IndexedObjective, _ObjectiveData +from pyomo.core.base.expression import IndexedExpression, _ExpressionData from collections.abc import MutableSequence @@ -36,8 +32,8 @@ # be implemented on top of these classes. # -class ComponentList(MutableSequence): +class ComponentList(MutableSequence): def __init__(self, interface_datatype, *args): self._interface_datatype = interface_datatype self._data = [] @@ -45,16 +41,18 @@ def __init__(self, interface_datatype, *args): if len(args) > 1: raise TypeError( "ComponentList expected at most 1 arguments, " - "got %s" % (len(args))) + "got %s" % (len(args)) + ) for item in args[0]: self.append(item) def construct(self, data=None): if is_debug_set(logger): - logger.debug( #pragma:nocover + logger.debug( # pragma:nocover "Constructing ComponentList object, name=%s, from data=%s" - % (self.name, str(data))) - if self._constructed: #pragma:nocover + % (self.name, str(data)) + ) + if self._constructed: # pragma:nocover return self._constructed = True @@ -68,11 +66,19 @@ def construct(self, data=None): # iterating. I don't think that would be difficult to do. # - def keys(self): return range(len(self)) + def keys(self): + return range(len(self)) + iterkeys = keys - def values(self): return list(iter(self)) + + def values(self): + return list(iter(self)) + itervalues = values - def items(self): return zip(self.keys(), self.values()) + + def items(self): + return zip(self.keys(), self.values()) + iteritems = items # @@ -114,17 +120,14 @@ def __setitem__(self, i, item): raise ValueError( "Invalid component object assignment to ComponentList " "%s at index %s. A parent component has already been " - "assigned the object: %s" - % (self.name, - i, - item.parent_component().name)) + "assigned the object: %s" % (self.name, i, item.parent_component().name) + ) # see note about implicit assignment and update raise TypeError( "ComponentList must be assigned objects " "of type %s. Invalid type for key %s: %s" - % (self._interface_datatype.__name__, - i, - type(item))) + % (self._interface_datatype.__name__, i, type(item)) + ) # * Only supports explicit objects. See notes above __setitem__ # for more information @@ -142,17 +145,14 @@ def insert(self, i, item): raise ValueError( "Invalid component object assignment to ComponentList " "%s at index %s. A parent component has already been " - "assigned the object: %s" - % (self.name, - i, - item.parent_component().name)) + "assigned the object: %s" % (self.name, i, item.parent_component().name) + ) # see note about implicit assignment and update raise TypeError( "ComponentList must be assigned objects " "of type %s. Invalid type for key %s: %s" - % (self._interface_datatype.__name__, - i, - type(item))) + % (self._interface_datatype.__name__, i, type(item)) + ) # Since we don't currently allow objects to be assigned when their # parent component is already set, it would make sense to reset @@ -167,8 +167,11 @@ def __delitem__(self, i): obj._component = None del self._data[i] - def __getitem__(self, i): return self._data[i] - def __len__(self): return self._data.__len__() + def __getitem__(self, i): + return self._data[i] + + def __len__(self): + return self._data.__len__() # # Override a few default implementations on MutableSequence @@ -183,7 +186,7 @@ def __contains__(self, item): def index(self, item, start=0, stop=None): '''S.index(value, [start, [stop]]) -> integer -- return first index of value. - Raises ValueError if the value is not present. + Raises ValueError if the value is not present. ''' if start is not None and start < 0: start = max(len(self) + start, 0) @@ -215,58 +218,47 @@ def reverse(self): 'S.reverse() -- reverse *IN PLACE*' n = len(self) data = self._data - for i in range(n//2): - data[i], data[n-i-1] = data[n-i-1], data[i] + for i in range(n // 2): + data[i], data[n - i - 1] = data[n - i - 1], data[i] + # # ComponentList needs to come before IndexedComponent # (or subclasses of) so we can override certain methods # -class XVarList(ComponentList, IndexedVar): +class XVarList(ComponentList, IndexedVar): def __init__(self, *args, **kwds): IndexedVar.__init__(self, Any, **kwds) # Constructor for ComponentList needs to # go last in order to handle any initialization # iterable as an argument - ComponentList.__init__(self, - _VarData, - *args, - **kwds) + ComponentList.__init__(self, _VarData, *args, **kwds) -class XConstraintList(ComponentList, IndexedConstraint): +class XConstraintList(ComponentList, IndexedConstraint): def __init__(self, *args, **kwds): IndexedConstraint.__init__(self, Any, **kwds) # Constructor for ComponentList needs to # go last in order to handle any initialization # iterable as an argument - ComponentList.__init__(self, - _ConstraintData, - *args, - **kwds) + ComponentList.__init__(self, _ConstraintData, *args, **kwds) -class XObjectiveList(ComponentList, IndexedObjective): +class XObjectiveList(ComponentList, IndexedObjective): def __init__(self, *args, **kwds): IndexedObjective.__init__(self, Any, **kwds) # Constructor for ComponentList needs to # go last in order to handle any initialization # iterable as an argument - ComponentList.__init__(self, - _ObjectiveData, - *args, - **kwds) + ComponentList.__init__(self, _ObjectiveData, *args, **kwds) -class XExpressionList(ComponentList, IndexedExpression): +class XExpressionList(ComponentList, IndexedExpression): def __init__(self, *args, **kwds): IndexedExpression.__init__(self, Any, **kwds) # Constructor for ComponentList needs to # go last in order to handle any initialization # iterable as an argument - ComponentList.__init__(self, - _ExpressionData, - *args, - **kwds) + ComponentList.__init__(self, _ExpressionData, *args, **kwds) diff --git a/pyomo/core/expr/__init__.py b/pyomo/core/expr/__init__.py index e0033a1f38e..5e30fceeeaa 100644 --- a/pyomo/core/expr/__init__.py +++ b/pyomo/core/expr/__init__.py @@ -15,29 +15,193 @@ # pyomo.core.expr. The idea is that pyomo.core.expr provides symbols # that are used by general users, but pyomo.core.expr.current provides # symbols that are used by developers. -# +# + +from . import ( + numvalue, + visitor, + numeric_expr, + boolean_value, + logical_expr, + relational_expr, +) -from pyomo.core.expr import numvalue, numeric_expr, boolean_value, logical_expr, current +# +# FIXME: remove circular dependencies between relational_expr and numeric_expr +# -from pyomo.core.expr.numvalue import ( - value, is_constant, is_fixed, is_variable_type, - is_potentially_variable, NumericValue, ZeroConstant, - native_numeric_types, native_types, polynomial_degree, +# Initialize relational expression functions +numeric_expr._generate_relational_expression = ( + relational_expr._generate_relational_expression ) -from pyomo.core.expr.boolean_value import BooleanValue +# Initialize logicalvalue functions +boolean_value._generate_logical_proposition = logical_expr._generate_logical_proposition + -from pyomo.core.expr.numeric_expr import linear_expression, nonlinear_expression -from pyomo.core.expr.logical_expr import (land, lor, equivalent, exactly, - atleast, atmost, implies, lnot, - xor, inequality) +from pyomo.common.numeric_types import ( + value, + native_numeric_types, + native_types, + nonpyomo_leaf_types, +) +from pyomo.common.errors import TemplateExpressionError -from pyomo.core.expr.current import ( - log, log10, sin, cos, tan, cosh, sinh, tanh, - asin, acos, atan, exp, sqrt, asinh, acosh, - atanh, ceil, floor, +from .base import ExpressionBase +from .boolean_value import BooleanValue +from .expr_common import ExpressionType, Mode, OperatorAssociativity +from .logical_expr import ( + native_logical_types, + special_boolean_atom_types, + # + BooleanValue, + BooleanConstant, + BooleanExpressionBase, + # + UnaryBooleanExpression, + NotExpression, + BinaryBooleanExpression, + EquivalenceExpression, + XorExpression, + ImplicationExpression, + NaryBooleanExpression, + AndExpression, + OrExpression, + ExactlyExpression, + AtMostExpression, + AtLeastExpression, + # + land, + lnot, + lor, + xor, + equivalent, + exactly, + atleast, + atmost, + implies, +) +from .numeric_expr import ( + NumericValue, + NumericExpression, + # operators: + AbsExpression, + DivisionExpression, + Expr_ifExpression, + ExternalFunctionExpression, + LinearExpression, + MaxExpression, + MinExpression, + MonomialTermExpression, + NegationExpression, + PowExpression, + ProductExpression, + SumExpressionBase, # TODO: deprecate / remove + SumExpression, + UnaryFunctionExpression, + # TBD: remove export of NPV classes here? + NPV_AbsExpression, + NPV_DivisionExpression, + NPV_Expr_ifExpression, + NPV_ExternalFunctionExpression, + NPV_MaxExpression, + NPV_MinExpression, + NPV_NegationExpression, + NPV_PowExpression, + NPV_ProductExpression, + NPV_SumExpression, + NPV_UnaryFunctionExpression, + # functions to generate expressions Expr_if, + log, + log10, + sin, + cos, + tan, + cosh, + sinh, + tanh, + asin, + acos, + atan, + exp, + sqrt, + asinh, + acosh, + atanh, + ceil, + floor, + # Lgacy utilities + NPV_expression_types, # TODO: remove + LinearDecompositionError, # TODO: move to common.errors + decompose_term, + linear_expression, + nonlinear_expression, + mutable_expression, +) +from .numvalue import ( + as_numeric, + is_constant, + is_fixed, + is_variable_type, + is_potentially_variable, + ZeroConstant, + polynomial_degree, +) +from .relational_expr import ( + RelationalExpression, + RangedExpression, + InequalityExpression, + EqualityExpression, + NotEqualExpression, + inequality, +) +from .symbol_map import SymbolMap +from .template_expr import ( + GetItemExpression, + Numeric_GetItemExpression, + Boolean_GetItemExpression, + Structural_GetItemExpression, + GetAttrExpression, + Numeric_GetAttrExpression, + Boolean_GetAttrExpression, + Structural_GetAttrExpression, + CallExpression, + TemplateSumExpression, + # + NPV_Numeric_GetItemExpression, + NPV_Boolean_GetItemExpression, + NPV_Structural_GetItemExpression, + NPV_Numeric_GetAttrExpression, + NPV_Boolean_GetAttrExpression, + NPV_Structural_GetAttrExpression, + # + IndexTemplate, + resolve_template, + ReplaceTemplateExpression, + substitute_template_expression, + substitute_getitem_with_param, + substitute_template_with_value, + templatize_rule, + templatize_constraint, +) +from .visitor import ( + StreamBasedExpressionVisitor, + SimpleExpressionVisitor, + ExpressionValueVisitor, + ExpressionReplacementVisitor, + FixedExpressionError, + NonConstantExpressionError, + identify_components, + identify_variables, + identify_mutable_parameters, + clone_expression, + evaluate_expression, + expression_to_string, + polynomial_degree, + replace_expressions, + sizeof_expression, ) -from pyomo.core.expr.calculus.derivatives import differentiate -from pyomo.core.expr.taylor_series import taylor_series_expansion +from .calculus.derivatives import differentiate +from .taylor_series import taylor_series_expansion diff --git a/pyomo/core/expr/base.py b/pyomo/core/expr/base.py new file mode 100644 index 00000000000..b74bbff4e3c --- /dev/null +++ b/pyomo/core/expr/base.py @@ -0,0 +1,452 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import enum + +from pyomo.common.dependencies import attempt_import +from pyomo.common.numeric_types import native_types +from pyomo.core.pyomoobject import PyomoObject +from pyomo.core.expr.expr_common import OperatorAssociativity + +visitor, _ = attempt_import('pyomo.core.expr.visitor') + + +class ExpressionBase(PyomoObject): + """The base class for all Pyomo expression systems. + + This class is used to define nodes in a general expression tree. + Individual expression systems (numeric, logical, etc.) will mix this + class in with their fundamental base data type (NumericValue, + BooleanValue, etc) to form the base node of that expression system. + """ + + __slots__ = () + + PRECEDENCE = 0 + + # Most operators in Python are left-to-right associative + """Return the associativity of this operator. + + Returns 1 if this operator is left-to-right associative or -1 if + it is right-to-left associative. Any other return value will be + interpreted as "not associative" (implying any arguments that + are at this operator's PRECEDENCE will be enclosed in parens). + """ + ASSOCIATIVITY = OperatorAssociativity.LEFT_TO_RIGHT + + def nargs(self): + """Returns the number of child nodes. + + Note + ---- + + Individual expression nodes may use different internal storage + schemes, so it is imperative that developers use this method and + not assume the existence of a particular attribute! + + Returns + ------- + int: A nonnegative integer that is the number of child nodes. + + """ + raise NotImplementedError( + f"Derived expression ({self.__class__}) failed to implement nargs()" + ) + + def arg(self, i): + """Return the i-th child node. + + Parameters + ---------- + i: int + Index of the child argument to return + + Returns: The i-th child node. + + """ + if i < 0: + i += self.nargs() + if i < 0: + raise KeyError( + "Invalid index for expression argument: %d" % i - self.nargs() + ) + elif i >= self.nargs(): + raise KeyError("Invalid index for expression argument: %d" % i) + return self._args_[i] + + @property + def args(self): + """Return the child nodes + + Returns + ------- + list or tuple: + Sequence containing only the child nodes of this node. The + return type depends on the node storage model. Users are + not permitted to change the returned data (even for the case + of data returned as a list), as that breaks the promise of + tree immutability. + + """ + raise NotImplementedError( + f"Derived expression ({self.__class__}) failed to implement args()" + ) + + def __call__(self, exception=True): + """Evaluate the value of the expression tree. + + Parameters + ---------- + exception: bool + If :const:`False`, then an exception raised while evaluating + is captured, and the value returned is :const:`None`. + Default is :const:`True`. + + Returns + ------- + The value of the expression or :const:`None`. + + """ + return visitor.evaluate_expression(self, exception) + + def __str__(self): + """Returns a string description of the expression. + + Note: + + The value of ``pyomo.core.expr.expr_common.TO_STRING_VERBOSE`` + is used to configure the execution of this method. If this + value is :const:`True`, then the string representation is a + nested function description of the expression. The default is + :const:`False`, which returns an algebraic (infix notation) + description of the expression. + + Returns + ------- + str + """ + return visitor.expression_to_string(self) + + def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): + """Return a string representation of the expression tree. + + Parameters + ---------- + verbose: bool + If :const:`True`, then the string representation + consists of nested functions. Otherwise, the string + representation is an algebraic (infix notation) equation. + Defaults to :const:`False`. + + labeler: + An object that generates string labels for variables in the + expression tree. Defaults to :const:`None`. + + smap: + If specified, this + :class:`SymbolMap ` + is used to cache labels for variables. + + compute_values (bool): + If :const:`True`, then parameters and fixed variables are + evaluated before the expression string is generated. + Default is :const:`False`. + + Returns: + A string representation for the expression tree. + + """ + return visitor.expression_to_string( + self, + verbose=verbose, + labeler=labeler, + smap=smap, + compute_values=compute_values, + ) + + def _to_string(self, values, verbose, smap): + """ + Construct a string representation for this node, using the string + representations of its children. + + This method is called by the :class:`_ToStringVisitor + ` class. It must + must be defined in subclasses. + + Args: + values (list): The string representations of the children of this + node. + verbose (bool): If :const:`True`, then the string + representation consists of nested functions. Otherwise, + the string representation is an algebraic equation. + smap: If specified, this :class:`SymbolMap + ` is + used to cache labels for variables. + + Returns: + A string representation for this node. + """ + raise NotImplementedError( + f"Derived expression ({self.__class__}) failed to implement _to_string()" + ) + + def getname(self, *args, **kwds): + """Return the text name of a function associated with this expression + object. + + In general, no arguments are passed to this function. + + Args: + *arg: a variable length list of arguments + **kwds: keyword arguments + + Returns: + A string name for the function. + + """ + raise NotImplementedError( + f"Derived expression ({self.__class__}) failed to implement getname()" + ) + + def clone(self, substitute=None): + """ + Return a clone of the expression tree. + + Note: + This method does not clone the leaves of the + tree, which are numeric constants and variables. + It only clones the interior nodes, and + expression leaf nodes like + :class:`_MutableLinearExpression`. + However, named expressions are treated like + leaves, and they are not cloned. + + Args: + substitute (dict): a dictionary that maps object ids to clone + objects generated earlier during the cloning process. + + Returns: + A new expression tree. + """ + return visitor.clone_expression(self, substitute=substitute) + + def create_node_with_local_data(self, args, classtype=None): + """ + Construct a node using given arguments. + + This method provides a consistent interface for constructing a + node, which is used in tree visitor scripts. In the simplest + case, this returns:: + + self.__class__(args) + + But in general this creates an expression object using local + data as well as arguments that represent the child nodes. + + Args: + args (list): A list of child nodes for the new expression + object + + Returns: + A new expression object with the same type as the current + class. + """ + if classtype is None: + classtype = self.__class__ + return classtype(args) + + def is_constant(self): + """Return True if this expression is an atomic constant + + This method contrasts with the is_fixed() method. This method + returns True if the expression is an atomic constant, that is it + is composed exclusively of constants and immutable parameters. + NumericValue objects returning is_constant() == True may be + simplified to their numeric value at any point without warning. + + Note: This defaults to False, but gets redefined in sub-classes. + """ + return False + + def is_fixed(self): + """ + Return :const:`True` if this expression contains no free variables. + + Returns: + A boolean. + """ + return visitor._expression_is_fixed(self) + + def _is_fixed(self, values): + """ + Compute whether this expression is fixed given + the fixed values of its children. + + This method is called by the :class:`_IsFixedVisitor + ` class. It can + be over-written by expression classes to customize this + logic. + + Args: + values (list): A list of boolean values that indicate whether + the children of this expression are fixed + + Returns: + A boolean that is :const:`True` if the fixed values of the + children are all :const:`True`. + """ + return all(values) + + def is_potentially_variable(self): + """ + Return :const:`True` if this expression might represent + a variable expression. + + This method returns :const:`True` when (a) the expression + tree contains one or more variables, or (b) the expression + tree contains a named expression. In both cases, the + expression cannot be treated as constant since (a) the variables + may not be fixed, or (b) the named expressions may be changed + at a later time to include non-fixed variables. + + Returns: + A boolean. Defaults to :const:`True` for expressions. + """ + return True + + def is_named_expression_type(self): + """ + Return :const:`True` if this object is a named expression. + + This method returns :const:`False` for this class, and it + is included in other classes within Pyomo that are not named + expressions, which allows for a check for named expressions + without evaluating the class type. + + Returns: + A boolean. + """ + return False + + def is_expression_type(self, expression_system=None): + """ + Return :const:`True` if this object is an expression. + + This method obviously returns :const:`True` for this class, but it + is included in other classes within Pyomo that are not expressions, + which allows for a check for expressions without + evaluating the class type. + + Returns: + A boolean. + """ + return expression_system is None or expression_system == self.EXPRESSION_SYSTEM + + def size(self): + """ + Return the number of nodes in the expression tree. + + Returns: + A nonnegative integer that is the number of interior and leaf + nodes in the expression tree. + """ + return visitor.sizeof_expression(self) + + def _apply_operation(self, result): # pragma: no cover + """ + Compute the values of this node given the values of its children. + + This method is called by the :class:`_EvaluationVisitor + ` class. It must + be over-written by expression classes to customize this logic. + + Note: + This method applies the logical operation of the + operator to the arguments. It does *not* evaluate + the arguments in the process, but assumes that they + have been previously evaluated. But note that if + this class contains auxiliary data (e.g. like the + numeric coefficients in the :class:`LinearExpression + ` class) then + those values *must* be evaluated as part of this + function call. An uninitialized parameter value + encountered during the execution of this method is + considered an error. + + Args: + values (list): A list of values that indicate the value + of the children expressions. + + Returns: + A floating point value for this expression. + """ + raise NotImplementedError( + f"Derived expression ({self.__class__}) failed to " + "implement _apply_operation()" + ) + + +class NPV_Mixin(object): + __slots__ = () + + def is_potentially_variable(self): + return False + + def create_node_with_local_data(self, args, classtype=None): + assert classtype is None + try: + npv_args = all( + type(arg) in native_types or not arg.is_potentially_variable() + for arg in args + ) + except AttributeError: + # We can hit this during expression replacement when the new + # type is not a PyomoObject type, but is not in the + # native_types set. We will play it safe and clear the NPV flag + npv_args = False + if npv_args: + return super().create_node_with_local_data(args, None) + else: + return super().create_node_with_local_data( + args, self.potentially_variable_base_class() + ) + + def potentially_variable_base_class(self): + cls = list(self.__class__.__bases__) + cls.remove(NPV_Mixin) + assert len(cls) == 1 + return cls[0] + + +class ExpressionArgs_Mixin(object): + __slots__ = ('_args_',) + + def __init__(self, args): + self._args_ = args + + def nargs(self): + return len(self._args_) + + @property + def args(self): + """ + Return the child nodes + + Returns + ------- + list or tuple: + Sequence containing only the child nodes of this node. The + return type depends on the node storage model. Users are + not permitted to change the returned data (even for the case + of data returned as a list), as that breaks the promise of + tree immutability. + """ + return self._args_ diff --git a/pyomo/core/expr/boolean_value.py b/pyomo/core/expr/boolean_value.py index c7321e04c96..b9c8ece29c8 100644 --- a/pyomo/core/expr/boolean_value.py +++ b/pyomo/core/expr/boolean_value.py @@ -1,6 +1,18 @@ +# -*- coding: utf-8 -*- +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ import sys import logging +from pyomo.common.deprecation import deprecated from pyomo.core.expr.numvalue import native_types, native_logical_types from pyomo.core.expr.expr_common import _and, _or, _equiv, _inv, _xor, _impl from pyomo.core.pyomoobject import PyomoObject @@ -10,7 +22,9 @@ def _generate_logical_proposition(etype, _self, _other): - raise RuntimeError("Incomplete import of Pyomo expression system") #pragma: no cover + raise RuntimeError( + "Incomplete import of Pyomo expression system" + ) # pragma: no cover def as_boolean(obj): @@ -21,7 +35,7 @@ def as_boolean(obj): Args: obj: The logical value that may be wrapped. - Raises: TypeError if the object is in native_types and not in + Raises: TypeError if the object is in native_types and not in native_logical_types Returns: A true or false BooleanConstant or the original object @@ -40,45 +54,21 @@ def as_boolean(obj): # Generate errors # if obj.__class__ in native_types: - raise TypeError("Cannot treat the value '%s' as a logical constant" % str(obj)) + raise TypeError(f"Cannot treat the value '{obj}' as a logical constant") raise TypeError( - "Cannot treat the value '%s' as a logical constant because it has unknown " - "type '%s'" % (str(obj), type(obj).__name__)) + "Cannot treat the value '%s' as a logical constant because it has " + "unknown type '%s'" % (str(obj), type(obj).__name__) + ) class BooleanValue(PyomoObject): """ This is the base class for Boolean values used in Pyomo. """ + __slots__ = () __hash__ = None - def __getstate__(self): - _base = super(BooleanValue, self) - if hasattr(_base, '__getstate__'): - return _base.__getstate__() - else: - return {} - - def __setstate__(self, state): - """ - Restore a pickled state into this instance - Our model for setstate is for derived classes to modify - the state dictionary as control passes up the inheritance - hierarchy (using super() calls). All assignment of state -> - object attributes is handled at the last class before 'object', - which may -- or may not (thanks to MRO) -- be here. - """ - _base = super(BooleanValue, self) - if hasattr(_base, '__setstate__'): - return _base.__setstate__(state) - else: - for key, val in state.items(): - # Note: per the Python data model docs, we explicitly - # set the attribute using object.__setattr__() instead - # of setting self.__dict__[key] = val. - object.__setattr__(self, key, val) - def getname(self, fully_qualified=False, name_buffer=None): """ If this is a component, return the component's name on the owning @@ -106,6 +96,11 @@ def is_fixed(self): """Return True if this is a non-constant value that has been fixed""" return False + @deprecated( + "is_relational() is deprecated in favor of " + "is_expression_type(ExpressionType.RELATIONAL)", + version='6.4.3', + ) def is_relational(self): """ Return True if this Logical value represents a relational expression. @@ -123,33 +118,108 @@ def is_numeric_type(self): def is_logical_type(self): return True + def __invert__(self): + """ + Construct a NotExpression using operator '~' + """ + return _generate_logical_proposition(_inv, self, None) + def equivalent_to(self, other): """ Construct an EquivalenceExpression between this BooleanValue and its operand. """ - return _generate_logical_proposition(_equiv, self, other) + ans = _generate_logical_proposition(_equiv, self, other) + if ans is NotImplemented: + raise TypeError( + "unsupported operand type for equivalent_to(): " + f"'{type(other).__name__}'" + ) + return ans def land(self, other): """ - Construct an AndExpression (Logical And) between this BooleanValue and its operand. + Construct an AndExpression (Logical And) between this BooleanValue and `other`. + """ + ans = _generate_logical_proposition(_and, self, other) + if ans is NotImplemented: + raise TypeError( + f"unsupported operand type for land(): '{type(other).__name__}'" + ) + return ans + + def __and__(self, other): + """ + Construct an AndExpression using the '&' operator + """ + return _generate_logical_proposition(_and, self, other) + + def __rand__(self, other): + """ + Construct an AndExpression using the '&' operator + """ + return _generate_logical_proposition(_and, other, self) + + def __iand__(self, other): + """ + Construct an AndExpression using the '&' operator """ return _generate_logical_proposition(_and, self, other) def lor(self, other): """ - Construct an OrExpression (Logical OR) between this BooleanValue and its operand. + Construct an OrExpression (Logical OR) between this BooleanValue and `other`. + """ + ans = _generate_logical_proposition(_or, self, other) + if ans is NotImplemented: + raise TypeError( + f"unsupported operand type for lor(): '{type(other).__name__}'" + ) + return ans + + def __or__(self, other): + """ + Construct an OrExpression using the '|' operator """ return _generate_logical_proposition(_or, self, other) - def __invert__(self): + def __ror__(self, other): """ - Construct a NotExpression using operator '~' + Construct an OrExpression using the '|' operator """ - return _generate_logical_proposition(_inv, self, None) + return _generate_logical_proposition(_or, other, self) + + def __ior__(self, other): + """ + Construct an OrExpression using the '|' operator + """ + return _generate_logical_proposition(_or, self, other) def xor(self, other): """ - Construct an EquivalenceExpression using method "xor" + Construct an XorExpression using method "xor" + """ + ans = _generate_logical_proposition(_xor, self, other) + if ans is NotImplemented: + raise TypeError( + f"unsupported operand type for xor(): '{type(other).__name__}'" + ) + return ans + + def __xor__(self, other): + """ + Construct an XorExpression using the '^' operator + """ + return _generate_logical_proposition(_xor, self, other) + + def __rxor__(self, other): + """ + Construct an XorExpression using the '^' operator + """ + return _generate_logical_proposition(_xor, other, self) + + def __ixor__(self, other): + """ + Construct an XorExpression using the '^' operator """ return _generate_logical_proposition(_xor, self, other) @@ -157,35 +227,39 @@ def implies(self, other): """ Construct an ImplicationExpression using method "implies" """ - return _generate_logical_proposition(_impl, self, other) - - def to_string(self, verbose=None, labeler=None, smap=None, - compute_values=False): + ans = _generate_logical_proposition(_impl, self, other) + if ans is NotImplemented: + raise TypeError( + f"unsupported operand type for implies(): '{type(other).__name__}'" + ) + return ans + + def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): """ Return a string representation of the expression tree. Args: - verbose (bool): If :const:`True`, then the the string + verbose (bool): If :const:`True`, then the the string representation consists of nested functions. Otherwise, the string representation is an algebraic equation. Defaults to :const:`False`. - labeler: An object that generates string labels for + labeler: An object that generates string labels for variables in the expression tree. Defaults to :const:`None`. Returns: A string representation for the expression tree. """ - if compute_values and self.is_fixed(): + if (compute_values and self.is_fixed()) or self.is_constant(): try: return str(self()) except: - pass - if not self.is_constant(): - if smap: - return smap.getSymbol(self, labeler) - elif labeler is not None: - return labeler(self) - return str(self) + pass # return str(self) + if smap: + return smap.getSymbol(self, labeler) + elif labeler is not None: + return labeler(self) + else: + return str(self) class BooleanConstant(BooleanValue): @@ -199,15 +273,11 @@ class BooleanConstant(BooleanValue): def __init__(self, value): if value not in native_logical_values: - raise TypeError('Not a valid BooleanValue. Unable to create a logical constant') + raise TypeError( + 'Not a valid BooleanValue. Unable to create a logical constant' + ) self.value = value - def __getstate__(self): - state = super(BooleanConstant, self).__getstate__() - for i in BooleanConstant.__slots__: - state[i] = getattr(self, i) - return state - def is_constant(self): return True @@ -231,6 +301,6 @@ def __call__(self, exception=True): return self.value def pprint(self, ostream=None, verbose=False): - if ostream is None: #pragma:nocover + if ostream is None: # pragma:nocover ostream = sys.stdout ostream.write(str(self)) diff --git a/pyomo/core/expr/calculus/derivatives.py b/pyomo/core/expr/calculus/derivatives.py index b4d2863708a..c9787b0e309 100644 --- a/pyomo/core/expr/calculus/derivatives.py +++ b/pyomo/core/expr/calculus/derivatives.py @@ -37,7 +37,7 @@ def differentiate(expr, wrt=None, wrt_list=None, mode=Modes.reverse_numeric): Parameters ---------- - expr: pyomo.core.expr.numeric_expr.ExpressionBase + expr: pyomo.core.expr.numeric_expr.NumericExpression The expression to differentiate wrt: pyomo.core.base.var._GeneralVarData If specified, this function will return the derivative with @@ -77,7 +77,7 @@ def differentiate(expr, wrt=None, wrt_list=None, mode=Modes.reverse_numeric): Returns ------- - res: float, :py:class:`ExpressionBase`, :py:class:`ComponentMap`, or list + res: float, :py:class:`NumericExpression`, :py:class:`ComponentMap`, or list The value or expression of the derivative(s) """ @@ -98,7 +98,8 @@ def differentiate(expr, wrt=None, wrt_list=None, mode=Modes.reverse_numeric): if wrt is not None: if wrt_list is not None: raise ValueError( - 'differentiate(): Cannot specify both wrt and wrt_list.') + 'differentiate(): Cannot specify both wrt and wrt_list.' + ) if wrt in res: res = res[wrt] else: diff --git a/pyomo/core/expr/calculus/diff_with_pyomo.py b/pyomo/core/expr/calculus/diff_with_pyomo.py index d5812cbec9c..952e8ec6dd3 100644 --- a/pyomo/core/expr/calculus/diff_with_pyomo.py +++ b/pyomo/core/expr/calculus/diff_with_pyomo.py @@ -10,10 +10,10 @@ # ___________________________________________________________________________ from pyomo.common.collections import ComponentMap, ComponentSet -from pyomo.core.expr import current as _expr +import pyomo.core.expr as _expr from pyomo.core.expr.visitor import ExpressionValueVisitor, nonpyomo_leaf_types from pyomo.core.expr.numvalue import value, is_constant -from pyomo.core.expr.current import exp, log, sin, cos +from pyomo.core.expr import exp, log, sin, cos import math @@ -78,7 +78,7 @@ def _diff_PowExpression(node, val_dict, der_dict): der = der_dict[node] val1 = val_dict[arg1] val2 = val_dict[arg2] - der_dict[arg1] += der * val2 * val1**(val2 - 1) + der_dict[arg1] += der * val2 * val1 ** (val2 - 1) if arg2.__class__ not in nonpyomo_leaf_types: der_dict[arg2] += der * val1**val2 * log(val1) @@ -96,8 +96,8 @@ def _diff_DivisionExpression(node, val_dict, der_dict): num = node.args[0] den = node.args[1] der = der_dict[node] - der_dict[num] += der * (1/val_dict[den]) - der_dict[den] -= der * val_dict[num] / val_dict[den]**2 + der_dict[num] += der * (1 / val_dict[den]) + der_dict[den] -= der * val_dict[num] / val_dict[den] ** 2 def _diff_NegationExpression(node, val_dict, der_dict): @@ -202,7 +202,7 @@ def _diff_tan(node, val_dict, der_dict): assert len(node.args) == 1 arg = node.args[0] der = der_dict[node] - der_dict[arg] += der / (cos(val_dict[arg])**2) + der_dict[arg] += der / (cos(val_dict[arg]) ** 2) def _diff_asin(node, val_dict, der_dict): @@ -217,7 +217,7 @@ def _diff_asin(node, val_dict, der_dict): assert len(node.args) == 1 arg = node.args[0] der = der_dict[node] - der_dict[arg] += der / (1 - val_dict[arg]**2)**0.5 + der_dict[arg] += der / (1 - val_dict[arg] ** 2) ** 0.5 def _diff_acos(node, val_dict, der_dict): @@ -232,7 +232,7 @@ def _diff_acos(node, val_dict, der_dict): assert len(node.args) == 1 arg = node.args[0] der = der_dict[node] - der_dict[arg] -= der / (1 - val_dict[arg]**2)**0.5 + der_dict[arg] -= der / (1 - val_dict[arg] ** 2) ** 0.5 def _diff_atan(node, val_dict, der_dict): @@ -247,7 +247,7 @@ def _diff_atan(node, val_dict, der_dict): assert len(node.args) == 1 arg = node.args[0] der = der_dict[node] - der_dict[arg] += der / (1 + val_dict[arg]**2) + der_dict[arg] += der / (1 + val_dict[arg] ** 2) def _diff_sqrt(node, val_dict, der_dict): @@ -264,7 +264,7 @@ def _diff_sqrt(node, val_dict, der_dict): assert len(node.args) == 1 arg = node.args[0] der = der_dict[node] - der_dict[arg] += der * 0.5 * val_dict[arg]**(-0.5) + der_dict[arg] += der * 0.5 * val_dict[arg] ** (-0.5) def _diff_abs(node, val_dict, der_dict): @@ -313,7 +313,9 @@ def _diff_UnaryFunctionExpression(node, val_dict, der_dict): if node.getname() in _unary_map: _unary_map[node.getname()](node, val_dict, der_dict) else: - raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node))) + raise DifferentiationException( + 'Unsupported expression type for differentiation: {0}'.format(type(node)) + ) def _diff_GeneralExpression(node, val_dict, der_dict): @@ -439,7 +441,9 @@ def _reverse_diff_helper(expr, numeric=True): elif e.is_named_expression_type(): _diff_GeneralExpression(e, val_dict, der_dict) else: - raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(e))) + raise DifferentiationException( + 'Unsupported expression type for differentiation: {0}'.format(type(e)) + ) return der_dict @@ -450,7 +454,7 @@ def reverse_ad(expr): Parameters ---------- - expr: pyomo.core.expr.numeric_expr.ExpressionBase + expr: pyomo.core.expr.numeric_expr.NumericExpression expression to differentiate Returns @@ -468,7 +472,7 @@ def reverse_sd(expr): Parameters ---------- - expr: pyomo.core.expr.numeric_expr.ExpressionBase + expr: pyomo.core.expr.numeric_expr.NumericExpression expression to differentiate Returns diff --git a/pyomo/core/expr/calculus/diff_with_sympy.py b/pyomo/core/expr/calculus/diff_with_sympy.py index 8c5f275af36..32cf60547ec 100644 --- a/pyomo/core/expr/calculus/diff_with_sympy.py +++ b/pyomo/core/expr/calculus/diff_with_sympy.py @@ -9,7 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr.sympy_tools import sympy_available, sympyify_expression, sympy2pyomo_expression +from pyomo.core.expr.sympy_tools import ( + sympy_available, + sympyify_expression, + sympy2pyomo_expression, +) # A "public" attribute indicating that differentiate() can be called # ... this provides a bit of future-proofing for alternative approaches @@ -36,11 +40,14 @@ def differentiate(expr, wrt=None, wrt_list=None): if not sympy_available: raise RuntimeError( "The sympy module is not available.\n\t" - "Cannot perform automatic symbolic differentiation.") - if not (( wrt is None ) ^ ( wrt_list is None )): + "Cannot perform automatic symbolic differentiation." + ) + if not ((wrt is None) ^ (wrt_list is None)): raise ValueError( - "differentiate(): Must specify exactly one of wrt and wrt_list") + "differentiate(): Must specify exactly one of wrt and wrt_list" + ) import sympy + # # Convert the Pyomo expression to a sympy expression # @@ -51,26 +58,26 @@ def differentiate(expr, wrt=None, wrt_list=None): # appear in the expression (so that we can detect wrt combinations # that are, by definition, 0) # - partial_derivs = {x:None for x in objectMap.sympyVars()} + partial_derivs = {x: None for x in objectMap.sympyVars()} # # Setup the WRT list # if wrt is not None: - wrt_list = [ wrt ] + wrt_list = [wrt] else: # Copy the list because we will normalize things in place below wrt_list = list(wrt_list) # # Convert WRT vars into sympy vars # - ans = [None]*len(wrt_list) + ans = [None] * len(wrt_list) for i, target in enumerate(wrt_list): if target.__class__ is not tuple: target = (target,) wrt_list[i] = tuple(objectMap.getSympySymbol(x) for x in target) for x in wrt_list[i]: if x not in partial_derivs: - ans[i] = 0. + ans[i] = 0.0 break # # We assume that users will not request duplicate derivatives. We @@ -90,7 +97,7 @@ def differentiate(expr, wrt=None, wrt_list=None): if j == last_partial_idx: part = sympy.diff(part, wrt_var) else: - partial_target = target[:j+1] + partial_target = target[: j + 1] if partial_target in partial_derivs: part = partial_derivs[partial_target] else: diff --git a/pyomo/core/expr/cnf_walker.py b/pyomo/core/expr/cnf_walker.py index 87b93d4d31f..a7bf61bef5a 100644 --- a/pyomo/core/expr/cnf_walker.py +++ b/pyomo/core/expr/cnf_walker.py @@ -9,150 +9,43 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - from pyomo.common import DeveloperError from pyomo.common.collections import ComponentMap from pyomo.common.dependencies import attempt_import -from pyomo.core.expr.logical_expr import ( - AndExpression, EquivalenceExpression, equivalent, ImplicationExpression, - implies, land, lnot, lor, NotExpression, - OrExpression, special_boolean_atom_types, XorExpression, -) +from pyomo.core.expr.logical_expr import special_boolean_atom_types from pyomo.core.expr.numvalue import native_types, value -from pyomo.core.expr.visitor import StreamBasedExpressionVisitor - -_operatorMap = {} -_pyomo_operator_map = {} - -def _configure_sympy(sympy, available): - if not available: - return - - _operatorMap.update({ - sympy.Or: lor, - sympy.And: land, - sympy.Implies: implies, - sympy.Equivalent: equivalent, - sympy.Not: lnot, - }) - - _pyomo_operator_map.update({ - AndExpression: sympy.And, - OrExpression: sympy.Or, - ImplicationExpression: sympy.Implies, - EquivalenceExpression: sympy.Equivalent, - XorExpression: sympy.Xor, - NotExpression: sympy.Not, - }) - -sympy, _sympy_available = attempt_import('sympy', callback=_configure_sympy) - - -class _PyomoSympyLogicalBimap(object): - def __init__(self): - self.pyomo2sympy = ComponentMap() - self.sympy2pyomo = {} - self.i = 0 - - def getPyomoSymbol(self, sympy_object, default=None): - return self.sympy2pyomo.get(sympy_object, default) - - def getSympySymbol(self, pyomo_object): - if pyomo_object in self.pyomo2sympy: - return self.pyomo2sympy[pyomo_object] - # Pyomo currently ONLY supports Real variables (not complex - # variables). If that ever changes, then we will need to - # revisit hard-coding the symbol type here - sympy_obj = sympy.Symbol("x%d" % self.i, real=True) - self.i += 1 - self.pyomo2sympy[pyomo_object] = sympy_obj - self.sympy2pyomo[sympy_obj] = pyomo_object - return sympy_obj - - def sympyVars(self): - return self.sympy2pyomo.keys() - - -class _Pyomo2SympyVisitor(StreamBasedExpressionVisitor): +from pyomo.core.expr.sympy_tools import ( + Pyomo2SympyVisitor, + PyomoSympyBimap, + sympy, + sympy2pyomo_expression, +) + +class CNF_Pyomo2SympyVisitor(Pyomo2SympyVisitor): def __init__(self, object_map, bool_varlist): - sympy.Add # this ensures _configure_sympy gets run - super(_Pyomo2SympyVisitor, self).__init__() - self.object_map = object_map + super().__init__(object_map) self.boolean_variable_list = bool_varlist self.special_atom_map = ComponentMap() - def exitNode(self, node, values): - _op = _pyomo_operator_map.get(node.__class__, None) - if _op is None: - if node.__class__ in special_boolean_atom_types: - raise ValueError("Encountered special atom class '%s' in root node" % node.__class__) - return node._apply_operation(values) - else: - return _op(*tuple(values)) - def beforeChild(self, node, child, child_idx): - # - # Don't replace native or sympy types - # - if type(child) in native_types: - return False, child - # - # We will descend into all expressions... - # - if child.is_expression_type(): + descend, result = super().beforeChild(node, child, child_idx) + if descend: if child.__class__ in special_boolean_atom_types: indicator_var = self.boolean_variable_list.add() self.special_atom_map[indicator_var] = child return False, self.object_map.getSympySymbol(indicator_var) - else: - return True, None - # - # Replace pyomo variables with sympy variables - # - if child.is_potentially_variable(): - return False, self.object_map.getSympySymbol(child) - # - # Everything else is a constant... - # - return False, value(child) - - -class _Sympy2PyomoVisitor(StreamBasedExpressionVisitor): - - def __init__(self, object_map): - sympy.Add # this ensures _configure_sympy gets run - super(_Sympy2PyomoVisitor, self).__init__() - self.object_map = object_map - - def enterNode(self, node): - return (node.args, []) - - def exitNode(self, node, values): - """ Visit nodes that have been expanded """ - _sympyOp = node - _op = _operatorMap.get( type(_sympyOp), None ) - if _op is None: - raise DeveloperError( - "sympy expression type '%s' not found in the operator " - "map" % type(_sympyOp) ) - return _op(*tuple(values)) - - def beforeChild(self, node, child, child_idx): - if not child.args: - item = self.object_map.getPyomoSymbol(child, None) - if item is None: - item = float(child.evalf()) - return False, item - return True, None + return descend, result def to_cnf(expr, bool_varlist=None, bool_var_to_special_atoms=None): """Converts a Pyomo logical constraint to CNF form. - Note: the atoms AtMostExpression, AtLeastExpression, and ExactlyExpression - require special treatment if they are not the root node, or if their children are not atoms, - e.g. atmost(2, Y1, Y1 | Y2, Y2, Y3) + Note: the atoms AtMostExpression, AtLeastExpression, and + ExactlyExpression require special treatment if they are not the root + node, or if their children are not atoms, e.g. + + atmost(2, Y1, Y1 | Y2, Y2, Y3) As a result, the model may need to be augmented with additional boolean indicator variables and logical propositions. @@ -173,7 +66,9 @@ def to_cnf(expr, bool_varlist=None, bool_var_to_special_atoms=None): if type(expr) in special_boolean_atom_types: # If root node is one of the special atoms, recursively convert its # children nodes to CNF. - return _convert_children_to_literals(expr, bool_varlist, bool_var_to_special_atoms) + return _convert_children_to_literals( + expr, bool_varlist, bool_var_to_special_atoms + ) # If root node is not an expression, just return it. if type(expr) in native_types or not expr.is_expression_type(): @@ -181,25 +76,35 @@ def to_cnf(expr, bool_varlist=None, bool_var_to_special_atoms=None): # While performing conversion to sympy, substitute new boolean variables for # non-root special atoms. - pyomo_sympy_map = _PyomoSympyLogicalBimap() - bool_var_to_special_atoms = ComponentMap() if bool_var_to_special_atoms is None else bool_var_to_special_atoms - visitor = _Pyomo2SympyVisitor(pyomo_sympy_map, bool_varlist) + pyomo_sympy_map = PyomoSympyBimap() + bool_var_to_special_atoms = ( + ComponentMap() + if bool_var_to_special_atoms is None + else bool_var_to_special_atoms + ) + visitor = CNF_Pyomo2SympyVisitor(pyomo_sympy_map, bool_varlist) sympy_expr = visitor.walk_expression(expr) new_statements = [] - # If visitor encountered any special atoms in non-root node, ensure that their children are literals: + # If visitor encountered any special atoms in non-root node, ensure + # that their children are literals: for indicator_var, special_atom in visitor.special_atom_map.items(): atom_cnf = _convert_children_to_literals( - special_atom, bool_varlist, bool_var_to_special_atoms) + special_atom, bool_varlist, bool_var_to_special_atoms + ) bool_var_to_special_atoms[indicator_var] = atom_cnf[0] new_statements.extend(atom_cnf[1:]) - cnf_form = sympy.to_cnf(sympy_expr) - return [_sympy2pyomo_expression(cnf_form, pyomo_sympy_map)] + new_statements # additional statements + return [ + sympy2pyomo_expression(cnf_form, pyomo_sympy_map) + ] + new_statements # additional statements -def _convert_children_to_literals(special_atom, bool_varlist, bool_var_to_special_atoms): - """If the child logical constraints are not literals, substitute augmented boolean variables. +def _convert_children_to_literals( + special_atom, bool_varlist, bool_var_to_special_atoms +): + """If the child logical constraints are not literals, substitute + augmented boolean variables. Same return types as to_cnf() function. @@ -216,10 +121,16 @@ def _convert_children_to_literals(special_atom, bool_varlist, bool_var_to_specia need_new_expression = True new_indicator = bool_varlist.add() if type(child) in special_boolean_atom_types: - child_cnf = _convert_children_to_literals(child, bool_varlist, bool_var_to_special_atoms) + child_cnf = _convert_children_to_literals( + child, bool_varlist, bool_var_to_special_atoms + ) bool_var_to_special_atoms[new_indicator] = child_cnf[0] else: - child_cnf = to_cnf(new_indicator.equivalent_to(child), bool_varlist, bool_var_to_special_atoms) + child_cnf = to_cnf( + new_indicator.equivalent_to(child), + bool_varlist, + bool_var_to_special_atoms, + ) new_statements.append(child_cnf[0]) new_args.append(new_indicator) new_statements.extend(child_cnf[1:]) @@ -228,11 +139,3 @@ def _convert_children_to_literals(special_atom, bool_varlist, bool_var_to_specia return [new_atom_with_literals] + new_statements else: return [special_atom] - - -def _sympy2pyomo_expression(expr, object_map): - visitor = _Sympy2PyomoVisitor(object_map) - is_expr, ans = visitor.beforeChild(None, expr, None) - if not is_expr: - return ans - return visitor.walk_expression(expr) diff --git a/pyomo/core/expr/compare.py b/pyomo/core/expr/compare.py index 9ae85241440..ec8d56896b8 100644 --- a/pyomo/core/expr/compare.py +++ b/pyomo/core/expr/compare.py @@ -8,29 +8,41 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import collections from .visitor import StreamBasedExpressionVisitor from .numvalue import nonpyomo_leaf_types -from .numeric_expr import ( - LinearExpression, MonomialTermExpression, SumExpression, ExpressionBase, - ProductExpression, DivisionExpression, PowExpression, - NegationExpression, UnaryFunctionExpression, ExternalFunctionExpression, - NPV_ProductExpression, NPV_DivisionExpression, NPV_PowExpression, - NPV_SumExpression, NPV_NegationExpression, NPV_UnaryFunctionExpression, - NPV_ExternalFunctionExpression, Expr_ifExpression, AbsExpression, - NPV_AbsExpression, NumericValue) -from pyomo.core.expr.logical_expr import ( - RangedExpression, InequalityExpression, EqualityExpression +from pyomo.core.expr import ( + LinearExpression, + MonomialTermExpression, + SumExpression, + ExpressionBase, + ProductExpression, + DivisionExpression, + PowExpression, + NegationExpression, + UnaryFunctionExpression, + ExternalFunctionExpression, + NPV_ProductExpression, + NPV_DivisionExpression, + NPV_PowExpression, + NPV_SumExpression, + NPV_NegationExpression, + NPV_UnaryFunctionExpression, + NPV_ExternalFunctionExpression, + Expr_ifExpression, + AbsExpression, + NPV_AbsExpression, + NumericValue, + RangedExpression, + InequalityExpression, + EqualityExpression, + GetItemExpression, ) from typing import List +from pyomo.common.collections import Sequence from pyomo.common.errors import PyomoException - - -def handle_linear_expression(node: LinearExpression, pn: List): - pn.append((type(node), 2*len(node.linear_vars) + 1)) - pn.append(node.constant) - pn.extend(node.linear_coefs) - pn.extend(node.linear_vars) - return tuple() +from pyomo.common.formatting import tostr +from pyomo.common.numeric_types import native_types def handle_expression(node: ExpressionBase, pn: List): @@ -41,7 +53,7 @@ def handle_expression(node: ExpressionBase, pn: List): def handle_named_expression(node, pn: List, include_named_exprs=True): if include_named_exprs: pn.append((type(node), 1)) - return (node.expr, ) + return (node.expr,) def handle_unary_expression(node: UnaryFunctionExpression, pn: List): @@ -54,29 +66,19 @@ def handle_external_function_expression(node: ExternalFunctionExpression, pn: Li return node.args -handler = dict() -handler[LinearExpression] = handle_linear_expression -handler[SumExpression] = handle_expression -handler[MonomialTermExpression] = handle_expression -handler[ProductExpression] = handle_expression -handler[DivisionExpression] = handle_expression -handler[PowExpression] = handle_expression -handler[NegationExpression] = handle_expression -handler[NPV_ProductExpression] = handle_expression -handler[NPV_DivisionExpression] = handle_expression -handler[NPV_PowExpression] = handle_expression -handler[NPV_SumExpression] = handle_expression -handler[NPV_NegationExpression] = handle_expression +def _generic_expression_handler(): + return handle_expression + + +handler = collections.defaultdict(_generic_expression_handler) + handler[UnaryFunctionExpression] = handle_unary_expression handler[NPV_UnaryFunctionExpression] = handle_unary_expression handler[ExternalFunctionExpression] = handle_external_function_expression handler[NPV_ExternalFunctionExpression] = handle_external_function_expression -handler[Expr_ifExpression] = handle_expression handler[AbsExpression] = handle_unary_expression handler[NPV_AbsExpression] = handle_unary_expression handler[RangedExpression] = handle_expression -handler[InequalityExpression] = handle_expression -handler[EqualityExpression] = handle_expression class PrefixVisitor(StreamBasedExpressionVisitor): @@ -97,7 +99,12 @@ def enterNode(self, node): if node.is_expression_type(): if node.is_named_expression_type(): - return handle_named_expression(node, self._result, self._include_named_exprs), None + return ( + handle_named_expression( + node, self._result, self._include_named_exprs + ), + None, + ) else: return handler[ntype](node, self._result), None else: @@ -154,13 +161,14 @@ def convert_expression_to_prefix_notation(expr, include_named_exprs=True): """ visitor = PrefixVisitor(include_named_exprs=include_named_exprs) - return visitor.walk_expression(expr) + if isinstance(expr, Sequence): + return expr.__class__(visitor.walk_expression(e) for e in expr) + else: + return visitor.walk_expression(expr) def compare_expressions(expr1, expr2, include_named_exprs=True): - """ - Returns True if 2 expression trees are identical. Returns False - otherwise. + """Returns True if 2 expression trees are identical, False otherwise. Parameters ---------- @@ -169,9 +177,10 @@ def compare_expressions(expr1, expr2, include_named_exprs=True): expr2: NumericValue A Pyomo Var, Param, or expression include_named_exprs: bool - If False, then named expressions will be ignored. In other words, this function - will return True if one expression has a named expression and the other does not - as long as the rest of the expression trees are identical. + If False, then named expressions will be ignored. In other + words, this function will return True if one expression has a + named expression and the other does not as long as the rest of + the expression trees are identical. Returns ------- @@ -179,10 +188,116 @@ def compare_expressions(expr1, expr2, include_named_exprs=True): A bool indicating whether or not the expressions are identical. """ - pn1 = convert_expression_to_prefix_notation(expr1, include_named_exprs=include_named_exprs) - pn2 = convert_expression_to_prefix_notation(expr2, include_named_exprs=include_named_exprs) + pn1 = convert_expression_to_prefix_notation( + expr1, include_named_exprs=include_named_exprs + ) + pn2 = convert_expression_to_prefix_notation( + expr2, include_named_exprs=include_named_exprs + ) try: res = pn1 == pn2 except PyomoException: res = False return res + + +def assertExpressionsEqual(test, a, b, include_named_exprs=True, places=None): + """unittest-based assertion for comparing expressions + + This converts the expressions `a` and `b` into prefix notation and + then compares the resulting lists. + + Parameters + ---------- + test: unittest.TestCase + The unittest `TestCase` class that is performing the test. + + a: ExpressionBase or native type + + b: ExpressionBase or native type + + include_named_exprs: bool + If True (the default), the comparison expands all named + expressions when generating the prefix notation + + places: Number of decimal places required for equality of floating + point numbers in the expression. If None (the default), the + expressions must be exactly equal. + """ + prefix_a = convert_expression_to_prefix_notation(a, include_named_exprs) + prefix_b = convert_expression_to_prefix_notation(b, include_named_exprs) + try: + test.assertEqual(len(prefix_a), len(prefix_b)) + for _a, _b in zip(prefix_a, prefix_b): + test.assertIs(_a.__class__, _b.__class__) + if places is None: + test.assertEqual(_a, _b) + else: + test.assertAlmostEqual(_a, _b, places=places) + except (PyomoException, AssertionError): + test.fail( + f"Expressions not equal:\n\t" + f"{tostr(prefix_a)}\n\t!=\n\t{tostr(prefix_b)}" + ) + + +def assertExpressionsStructurallyEqual( + test, a, b, include_named_exprs=True, places=None +): + """unittest-based assertion for comparing expressions + + This converts the expressions `a` and `b` into prefix notation and + then compares the resulting lists. Operators and (non-native type) + leaf nodes in the prefix representation are converted to strings + before comparing (so that things like variables can be compared + across clones or pickles) + + Parameters + ---------- + test: unittest.TestCase + The unittest `TestCase` class that is performing the test. + + a: ExpressionBase or native type + + b: ExpressionBase or native type + + include_named_exprs: bool + If True (the default), the comparison expands all named + expressions when generating the prefix notation + + """ + prefix_a = convert_expression_to_prefix_notation(a, include_named_exprs) + prefix_b = convert_expression_to_prefix_notation(b, include_named_exprs) + # Convert leaf nodes and operators to their string equivalents + for prefix in (prefix_a, prefix_b): + for i, v in enumerate(prefix): + if type(v) in native_types: + continue + if type(v) is tuple: + # This is an expression node. Most expression nodes are + # 2-tuples (node type, nargs), but some are 3-tuples + # with supplemental data. The biggest problem is + # external functions, where the third element is the + # external function. We need to convert that to a + # string to support "structural" comparisons. + if len(v) == 3: + prefix[i] = v[:2] + (str(v[2]),) + continue + # This should be a leaf node (Var, mutable Param, etc.). + # Convert to string to support "structural" comparison + # (e.g., across clones) + prefix[i] = str(v) + try: + test.assertEqual(len(prefix_a), len(prefix_b)) + for _a, _b in zip(prefix_a, prefix_b): + if _a.__class__ not in native_types and _b.__class__ not in native_types: + test.assertIs(_a.__class__, _b.__class__) + if places is None: + test.assertEqual(_a, _b) + else: + test.assertAlmostEqual(_a, _b, places=places) + except (PyomoException, AssertionError): + test.fail( + f"Expressions not structurally equal:\n\t" + f"{tostr(prefix_a)}\n\t!=\n\t{tostr(prefix_b)}" + ) diff --git a/pyomo/core/expr/current.py b/pyomo/core/expr/current.py index 34d984cddd6..3765a8ed871 100644 --- a/pyomo/core/expr/current.py +++ b/pyomo/core/expr/current.py @@ -9,221 +9,156 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from __future__ import division +import enum import math -# -# Common intrinsic functions -# -from pyomo.core.expr import expr_common as common +from pyomo.common.deprecation import deprecation_warning -# -# Provide a global value that indicates which expression system is being used -# -class Mode(object): - pyomo5_trees = (3,) -_mode = Mode.pyomo5_trees +deprecation_warning( + "pyomo.core.expr.current is deprecated. " + "Please import expression symbols from pyomo.core.expr", + version='6.6.2.dev0', +) # -# Pull symbols from the appropriate expression system -# -from pyomo.core.expr import numvalue as _numvalue -from pyomo.core.expr import boolean_value as _logicalvalue - -# Pyomo5 -if _mode == Mode.pyomo5_trees: - from pyomo.core.expr import numeric_expr as _numeric_expr - from pyomo.core.expr.numeric_expr import (_add, _sub, _mul, _div, _pow, - _neg, _abs, _inplace, _unary, - NumericValue, native_types, - nonpyomo_leaf_types, - native_numeric_types, - as_numeric, value, - evaluate_expression, - expression_to_string, - polynomial_degree, - clone_expression, - sizeof_expression, - _expression_is_fixed, - clone_counter, - nonlinear_expression, - linear_expression, ExpressionBase, - NegationExpression, - NPV_NegationExpression, - ExternalFunctionExpression, - NPV_ExternalFunctionExpression, - PowExpression, NPV_PowExpression, - ProductExpression, - NPV_ProductExpression, - MonomialTermExpression, - DivisionExpression, - NPV_DivisionExpression, - _LinearOperatorExpression, - SumExpressionBase, - NPV_SumExpression, SumExpression, - _MutableSumExpression, - Expr_ifExpression, - UnaryFunctionExpression, - NPV_UnaryFunctionExpression, - AbsExpression, NPV_AbsExpression, - LinearExpression, - _MutableLinearExpression, - decompose_term, - LinearDecompositionError, - _decompose_linear_terms, - _process_arg, - _generate_sum_expression, - _generate_mul_expression, - _generate_other_expression, - _generate_intrinsic_function_expression, - _balanced_parens, - NPV_expression_types) - from pyomo.core.expr import logical_expr as _logical_expr - from pyomo.core.expr.logical_expr import (native_logical_types, BooleanValue, - BooleanConstant, _lt, _le, _eq, - _and, _or, _equiv, _inv, _xor, - _impl, - RangedExpression, - InequalityExpression, inequality, - EqualityExpression, - _generate_relational_expression, - _generate_logical_proposition, - BooleanExpressionBase, lnot, - equivalent, xor, implies, - _flattened, land, lor, exactly, - atmost, atleast, - UnaryBooleanExpression, - NotExpression, - BinaryBooleanExpression, - EquivalenceExpression, - XorExpression, - ImplicationExpression, - NaryBooleanExpression, - _add_to_and_or_expression, - AndExpression, OrExpression, - ExactlyExpression, - AtMostExpression, - AtLeastExpression, - special_boolean_atom_types) - from pyomo.core.expr.template_expr import (TemplateExpressionError, - _NotSpecified, GetItemExpression, - GetAttrExpression, - _TemplateSumExpression_argList, - TemplateSumExpression, - IndexTemplate, resolve_template, - ReplaceTemplateExpression, - substitute_template_expression, - _GetItemIndexer, - substitute_getitem_with_param, - substitute_template_with_value, - _set_iterator_template_generator, - _template_iter_context, - templatize_rule, - templatize_constraint) - from pyomo.core.expr import visitor as _visitor - from pyomo.core.expr.visitor import (SymbolMap, StreamBasedExpressionVisitor, - SimpleExpressionVisitor, - ExpressionValueVisitor, - replace_expressions, - ExpressionReplacementVisitor, - _EvaluationVisitor, - FixedExpressionError, - NonConstantExpressionError, - _EvaluateConstantExpressionVisitor, - _ComponentVisitor, identify_components, - _VariableVisitor, identify_variables, - _MutableParamVisitor, - identify_mutable_parameters, - _PolynomialDegreeVisitor, - _IsFixedVisitor, _ToStringVisitor) - # FIXME: we shouldn't need circular dependencies between modules - _visitor.LinearExpression = _numeric_expr.LinearExpression - _visitor.MonomialTermExpression = _numeric_expr.MonomialTermExpression - _visitor.NPV_expression_types = _numeric_expr.NPV_expression_types - _visitor.clone_counter = _numeric_expr.clone_counter - - # Initialize numvalue functions - _numvalue._generate_sum_expression \ - = _numeric_expr._generate_sum_expression - _numvalue._generate_mul_expression \ - = _numeric_expr._generate_mul_expression - _numvalue._generate_other_expression \ - = _numeric_expr._generate_other_expression - _numvalue._generate_relational_expression \ - = _logical_expr._generate_relational_expression - - # Initialize logicalvalue functions - _logicalvalue._generate_logical_proposition = _logical_expr._generate_logical_proposition -else: - raise ValueError("No other expression systems are supported in Pyomo right now.") #pragma: no cover - - -def Expr_if(IF=None, THEN=None, ELSE=None): - """ - Function used to construct a logical conditional expression. - """ - return Expr_ifExpression(IF_=IF, THEN_=THEN, ELSE_=ELSE) - -# -# NOTE: abs() and pow() are not defined here, because they are -# Python operators. +# Common intrinsic functions # -def ceil(arg): - return _generate_intrinsic_function_expression(arg, 'ceil', math.ceil) - -def floor(arg): - return _generate_intrinsic_function_expression(arg, 'floor', math.floor) - -# e ** x -def exp(arg): - return _generate_intrinsic_function_expression(arg, 'exp', math.exp) - -def log(arg): - return _generate_intrinsic_function_expression(arg, 'log', math.log) - -def log10(arg): - return _generate_intrinsic_function_expression(arg, 'log10', math.log10) - -# FIXME: this is nominally the same as x ** 0.5, but follows a different -# path and produces a different NL file! -def sqrt(arg): - return _generate_intrinsic_function_expression(arg, 'sqrt', math.sqrt) -# return _generate_expression(common._pow, arg, 0.5) - - -def sin(arg): - return _generate_intrinsic_function_expression(arg, 'sin', math.sin) - -def cos(arg): - return _generate_intrinsic_function_expression(arg, 'cos', math.cos) - -def tan(arg): - return _generate_intrinsic_function_expression(arg, 'tan', math.tan) - -def sinh(arg): - return _generate_intrinsic_function_expression(arg, 'sinh', math.sinh) - -def cosh(arg): - return _generate_intrinsic_function_expression(arg, 'cosh', math.cosh) - -def tanh(arg): - return _generate_intrinsic_function_expression(arg, 'tanh', math.tanh) - - -def asin(arg): - return _generate_intrinsic_function_expression(arg, 'asin', math.asin) - -def acos(arg): - return _generate_intrinsic_function_expression(arg, 'acos', math.acos) - -def atan(arg): - return _generate_intrinsic_function_expression(arg, 'atan', math.atan) - -def asinh(arg): - return _generate_intrinsic_function_expression(arg, 'asinh', math.asinh) - -def acosh(arg): - return _generate_intrinsic_function_expression(arg, 'acosh', math.acosh) - -def atanh(arg): - return _generate_intrinsic_function_expression(arg, 'atanh', math.atanh) +import pyomo.core.expr.expr_common as common +from pyomo.core.expr.expr_common import clone_counter, _mode + +from pyomo.core.expr import ( + Mode, + # from pyomo.core.expr.base + ExpressionBase, + # pyomo.core.expr.visitor + evaluate_expression, + expression_to_string, + polynomial_degree, + clone_expression, + sizeof_expression, + # pyomo.core.expr.numeric_expr + NumericExpression, + NumericValue, + native_types, + nonpyomo_leaf_types, + native_numeric_types, + value, + nonlinear_expression, + linear_expression, + NegationExpression, + NPV_NegationExpression, + ExternalFunctionExpression, + NPV_ExternalFunctionExpression, + PowExpression, + NPV_PowExpression, + ProductExpression, + NPV_ProductExpression, + MonomialTermExpression, + DivisionExpression, + NPV_DivisionExpression, + SumExpressionBase, + NPV_SumExpression, + SumExpression, + Expr_ifExpression, + NPV_Expr_ifExpression, + UnaryFunctionExpression, + NPV_UnaryFunctionExpression, + AbsExpression, + NPV_AbsExpression, + LinearExpression, + decompose_term, + LinearDecompositionError, + NPV_expression_types, + Expr_if, + ceil, + floor, + exp, + log, + log10, + sqrt, + sin, + cos, + tan, + sinh, + cosh, + tanh, + asin, + acos, + atan, + asinh, + acosh, + atanh, + # pyomo.core.expr.numvalue + as_numeric, + # pyomo.core.expr.logical_expr + native_logical_types, + BooleanValue, + BooleanConstant, + BooleanExpressionBase, + lnot, + equivalent, + xor, + implies, + land, + lor, + exactly, + atmost, + atleast, + UnaryBooleanExpression, + NotExpression, + BinaryBooleanExpression, + EquivalenceExpression, + XorExpression, + ImplicationExpression, + NaryBooleanExpression, + AndExpression, + OrExpression, + ExactlyExpression, + AtMostExpression, + AtLeastExpression, + special_boolean_atom_types, + # pyomo.core.expr.relational_expr + RelationalExpression, + RangedExpression, + InequalityExpression, + EqualityExpression, + inequality, + # pyomo.core.expr.template_expr + TemplateExpressionError, + GetItemExpression, + Numeric_GetItemExpression, + Boolean_GetItemExpression, + Structural_GetItemExpression, + NPV_Numeric_GetItemExpression, + NPV_Boolean_GetItemExpression, + NPV_Structural_GetItemExpression, + GetAttrExpression, + Numeric_GetAttrExpression, + Boolean_GetAttrExpression, + Structural_GetAttrExpression, + NPV_Numeric_GetAttrExpression, + NPV_Boolean_GetAttrExpression, + NPV_Structural_GetAttrExpression, + CallExpression, + TemplateSumExpression, + IndexTemplate, + resolve_template, + ReplaceTemplateExpression, + substitute_template_expression, + substitute_getitem_with_param, + substitute_template_with_value, + templatize_rule, + templatize_constraint, + # pyomo.core.expr.visitor + SymbolMap, + StreamBasedExpressionVisitor, + SimpleExpressionVisitor, + ExpressionValueVisitor, + replace_expressions, + ExpressionReplacementVisitor, + FixedExpressionError, + NonConstantExpressionError, + identify_components, + identify_variables, + identify_mutable_parameters, +) diff --git a/pyomo/core/expr/expr_common.py b/pyomo/core/expr/expr_common.py index 29832d6b3cd..98c9a433994 100644 --- a/pyomo/core/expr/expr_common.py +++ b/pyomo/core/expr/expr_common.py @@ -9,48 +9,12 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -TO_STRING_VERBOSE=False - -_add = 1 -_sub = 2 -_mul = 3 -_div = 4 -_pow = 5 -_neg = 6 -_abs = 7 -_inplace = 10 -_unary = _neg - -_radd = -_add -_iadd = _inplace+_add -_rsub = -_sub -_isub = _inplace+_sub -_rmul = -_mul -_imul = _inplace+_mul -_rdiv = -_div -_idiv = _inplace+_div -_rpow = -_pow -_ipow = _inplace+_pow - -_old_etype_strings = { - 'add' : _add, - 'radd' : -_add, - 'iadd' : _inplace+_add, - 'sub' : _sub, - 'rsub' : -_sub, - 'isub' : _inplace+_sub, - 'mul' : _mul, - 'rmul' : -_mul, - 'imul' : _inplace+_mul, - 'div' : _div, - 'rdiv' : -_div, - 'idiv' : _inplace+_div, - 'pow' : _pow, - 'rpow' : -_pow, - 'ipow' : _inplace+_pow, - 'neg' : _neg, - 'abs' : _abs, - } +import enum + +from pyomo.common.backports import nullcontext +from pyomo.common.deprecation import deprecated + +TO_STRING_VERBOSE = False _eq = 0 _le = 1 @@ -63,3 +27,84 @@ _equiv = 3 _xor = 4 _impl = 5 + + +# +# Provide a global value that indicates which expression system is being used +# +class Mode(enum.IntEnum): + # coopr: Original Coopr/Pyomo expression system + coopr_trees = 1 + # coopr3: leverage reference counts to reduce the amount of required + # expression cloning to ensure independent expression trees. + coopr3_trees = 3 + # pyomo4: rework the expression system to remove reliance on + # reference counting. This enables pypy support (which doesn't have + # reference counting). This version never became the default. + pyomo4_trees = 4 + # pyomo5: refinement of pyomo4. Expressions are now immutable by + # contract, which tolerates "entangled" expression trees. Added + # specialized classes for NPV expressions and LinearExpressions. + pyomo5_trees = 5 + # pyomo6: refinement of pyomo5 expression generation to leverage + # multiple dispatch. Standardized expression storage and argument + # handling (significant rework of the LinearExpression structure). + pyomo6_trees = 6 + # + CURRENT = pyomo6_trees + + +_mode = Mode.CURRENT +# We no longer support concurrent expression systems. _mode is left +# primarily so we can support expression system-specific baselines +assert _mode == Mode.pyomo6_trees + + +class OperatorAssociativity(enum.IntEnum): + """Enum for indicating the associativity of an operator. + + LEFT_TO_RIGHT(1) if this operator is left-to-right associative or + RIGHT_TO_LEFT(-1) if it is right-to-left associative. Any other + values will be interpreted as "not associative" (implying any + arguments that are at this operator's PRECEDENCE will be enclosed + in parens). + + """ + + RIGHT_TO_LEFT = -1 + NON_ASSOCIATIVE = 0 + LEFT_TO_RIGHT = 1 + + +class ExpressionType(enum.Enum): + NUMERIC = 0 + RELATIONAL = 1 + LOGICAL = 2 + + +@deprecated( + """The clone counter has been removed and will always return 0. + +Beginning with Pyomo5 expressions, expression cloning (detangling) no +longer occurs automatically within expression generation. As a result, +the 'clone counter' has lost its utility and is no longer supported. +This context manager will always report 0.""", + version='6.4.3', +) +class clone_counter(nullcontext): + """Context manager for counting cloning events. + + This context manager counts the number of times that the + :func:`clone_expression ` + function is executed. + """ + + _count = 0 + + def __init__(self): + super().__init__(enter_result=self) + + @property + def count(self): + """A property that returns the clone count value.""" + return clone_counter._count diff --git a/pyomo/core/expr/expr_errors.py b/pyomo/core/expr/expr_errors.py index b5fd85b164d..cc540c69b47 100644 --- a/pyomo/core/expr/expr_errors.py +++ b/pyomo/core/expr/expr_errors.py @@ -9,8 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -class TemplateExpressionError(ValueError): +from pyomo.common.deprecation import relocated_module_attribute - def __init__(self, template, *args, **kwds): - self.template = template - super(TemplateExpressionError, self).__init__(*args, **kwds) +relocated_module_attribute( + 'TemplateExpressionError', + 'pyomo.common.errors.TemplateExpressionError', + version='6.6.2.dev0', + f_globals=globals(), +) diff --git a/pyomo/core/expr/logical_expr.py b/pyomo/core/expr/logical_expr.py index 21070e2ecbc..e5a2f411a6e 100644 --- a/pyomo/core/expr/logical_expr.py +++ b/pyomo/core/expr/logical_expr.py @@ -10,7 +10,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - from __future__ import division import types @@ -20,8 +19,13 @@ import traceback logger = logging.getLogger('pyomo.core') + from pyomo.common.errors import PyomoException, DeveloperError -from pyomo.common.deprecation import deprecation_warning +from pyomo.common.deprecation import ( + deprecation_warning, + RenamedClass, + relocated_module_attribute, +) from .numvalue import ( native_types, native_numeric_types, @@ -30,388 +34,49 @@ value, is_potentially_variable, ) +from .base import ExpressionBase +from .boolean_value import BooleanValue, BooleanConstant +from .expr_common import _and, _or, _equiv, _inv, _xor, _impl, ExpressionType -from .boolean_value import ( - BooleanValue, - BooleanConstant, -) - -from .expr_common import ( - _lt, _le, - _eq, - _and, _or, _equiv, _inv, _xor, _impl) - -from .visitor import ( - evaluate_expression, expression_to_string, polynomial_degree, - clone_expression, sizeof_expression, _expression_is_fixed -) - -from .numeric_expr import _LinearOperatorExpression, _process_arg import operator - -#------------------------------------------------------- -# -# Expression classes -# -#------------------------------------------------------- - -class RangedExpression(_LinearOperatorExpression): - """ - Ranged expressions, which define relations with a lower and upper bound:: - - x < y < z - x <= y <= z - - args: - args (tuple): child nodes - strict (tuple): flags that indicates whether the inequalities are strict - """ - - __slots__ = ('_strict',) - PRECEDENCE = 9 - - # Shared tuples for the most common RangedExpression objects encountered - # in math programming. Creating a single (shared) tuple saves memory - STRICT = { - False: (False, False), - True: (True, True), - (True, True): (True, True), - (False, False): (False, False), - (True, False): (True, False), - (False, True): (False, True), - } - - def __init__(self, args, strict): - super(RangedExpression, self).__init__(args) - self._strict = RangedExpression.STRICT[strict] - - def nargs(self): - return 3 - - def create_node_with_local_data(self, args): - return self.__class__(args, self._strict) - - def __getstate__(self): - state = super(RangedExpression, self).__getstate__() - for i in RangedExpression.__slots__: - state[i] = getattr(self, i) - return state - - def __bool__(self): - if self.is_constant(): - return bool(self()) - raise PyomoException(""" -Cannot convert non-constant Pyomo expression (%s) to bool. -This error is usually caused by using a Var, unit, or mutable Param in a -Boolean context such as an "if" statement, or when checking container -membership or equality. For example, - >>> m.x = Var() - >>> if m.x >= 1: - ... pass -and - >>> m.y = Var() - >>> if m.y in [m.x, m.y]: - ... pass -would both cause this exception.""".strip() % (self,)) - - def is_relational(self): - return True - - def _precedence(self): - return RangedExpression.PRECEDENCE - - def _apply_operation(self, result): - _l, _b, _r = result - if not self._strict[0]: - if not self._strict[1]: - return _l <= _b and _b <= _r - else: - return _l <= _b and _b < _r - elif not self._strict[1]: - return _l < _b and _b <= _r - else: - return _l < _b and _b < _r - - def _to_string(self, values, verbose, smap, compute_values): - return "{0} {1} {2} {3} {4}".format(values[0], '<' if self._strict[0] else '<=', values[1], '<' if self._strict[1] else '<=', values[2]) - - def is_constant(self): - return all(arg is None - or arg.__class__ in native_numeric_types - or arg.is_constant() - for arg in self._args_) - - def is_potentially_variable(self): - return any(map(is_potentially_variable, self._args_)) - - @property - def strict(self): - return self._strict - - -class InequalityExpression(_LinearOperatorExpression): - """ - Inequality expressions, which define less-than or - less-than-or-equal relations:: - - x < y - x <= y - - args: - args (tuple): child nodes - strict (bool): a flag that indicates whether the inequality is strict - """ - - __slots__ = ('_strict',) - PRECEDENCE = 9 - - def __init__(self, args, strict): - super(InequalityExpression,self).__init__(args) - self._strict = strict - - def nargs(self): - return 2 - - def create_node_with_local_data(self, args): - return self.__class__(args, self._strict) - - def __getstate__(self): - state = super(InequalityExpression, self).__getstate__() - for i in InequalityExpression.__slots__: - state[i] = getattr(self, i) - return state - - def __bool__(self): - if self.is_constant(): - return bool(self()) - raise PyomoException(""" -Cannot convert non-constant Pyomo expression (%s) to bool. -This error is usually caused by using a Var, unit, or mutable Param in a -Boolean context such as an "if" statement, or when checking container -membership or equality. For example, - >>> m.x = Var() - >>> if m.x >= 1: - ... pass -and - >>> m.y = Var() - >>> if m.y in [m.x, m.y]: - ... pass -would both cause this exception.""".strip() % (self,)) - - def is_relational(self): - return True - - def _precedence(self): - return InequalityExpression.PRECEDENCE - - def _apply_operation(self, result): - _l, _r = result - if self._strict: - return _l < _r - return _l <= _r - - def _to_string(self, values, verbose, smap, compute_values): - if len(values) == 2: - return "{0} {1} {2}".format(values[0], '<' if self._strict else '<=', values[1]) - - def is_constant(self): - return all(arg is None - or arg.__class__ in native_numeric_types - or arg.is_constant() - for arg in self._args_) - - def is_potentially_variable(self): - return any(map(is_potentially_variable, self._args_)) - - @property - def strict(self): - return self._strict - - -def inequality(lower=None, body=None, upper=None, strict=False): - """ - A utility function that can be used to declare inequality and - ranged inequality expressions. The expression:: - - inequality(2, model.x) - - is equivalent to the expression:: - - 2 <= model.x - - The expression:: - - inequality(2, model.x, 3) - - is equivalent to the expression:: - - 2 <= model.x <= 3 - - .. note:: This ranged inequality syntax is deprecated in Pyomo. - This function provides a mechanism for expressing - ranged inequalities without chained inequalities. - - args: - lower: an expression defines a lower bound - body: an expression defines the body of a ranged constraint - upper: an expression defines an upper bound - strict (bool): A boolean value that indicates whether the inequality - is strict. Default is :const:`False`. - - Returns: - A relational expression. The expression is an inequality - if any of the values :attr:`lower`, :attr:`body` or - :attr:`upper` is :const:`None`. Otherwise, the expression - is a ranged inequality. - """ - if lower is None: - if body is None or upper is None: - raise ValueError("Invalid inequality expression.") - return InequalityExpression((body, upper), strict) - if body is None: - if lower is None or upper is None: - raise ValueError("Invalid inequality expression.") - return InequalityExpression((lower, upper), strict) - if upper is None: - return InequalityExpression((lower, body), strict) - return RangedExpression((lower, body, upper), strict) - - -class EqualityExpression(_LinearOperatorExpression): - """ - Equality expression:: - - x == y - """ - - __slots__ = () - PRECEDENCE = 9 - - def nargs(self): - return 2 - - def __bool__(self): - lhs, rhs = self.args - if lhs is rhs: - return True - if self.is_constant(): - return bool(self()) - raise PyomoException(""" -Cannot convert non-constant Pyomo expression (%s) to bool. -This error is usually caused by using a Var, unit, or mutable Param in a -Boolean context such as an "if" statement, or when checking container -membership or equality. For example, - >>> m.x = Var() - >>> if m.x >= 1: - ... pass -and - >>> m.y = Var() - >>> if m.y in [m.x, m.y]: - ... pass -would both cause this exception.""".strip() % (self,)) - - def is_relational(self): - return True - - def _precedence(self): - return EqualityExpression.PRECEDENCE - - def _apply_operation(self, result): - _l, _r = result - return _l == _r - - def _to_string(self, values, verbose, smap, compute_values): - return "{0} == {1}".format(values[0], values[1]) - - def is_constant(self): - return self._args_[0].is_constant() and self._args_[1].is_constant() - - def is_potentially_variable(self): - return any(map(is_potentially_variable, self._args_)) - - -def _generate_relational_expression(etype, lhs, rhs): - rhs_is_relational = False - lhs_is_relational = False - - constant_lhs = True - constant_rhs = True - - if lhs is not None and lhs.__class__ not in native_numeric_types: - lhs = _process_arg(lhs) - # Note: _process_arg can return a native type - if lhs is not None and lhs.__class__ not in native_numeric_types: - lhs_is_relational = lhs.is_relational() - constant_lhs = False - if rhs is not None and rhs.__class__ not in native_numeric_types: - rhs = _process_arg(rhs) - # Note: _process_arg can return a native type - if rhs is not None and rhs.__class__ not in native_numeric_types: - rhs_is_relational = rhs.is_relational() - constant_rhs = False - - if constant_lhs and constant_rhs: - if etype == _eq: - return lhs == rhs - elif etype == _le: - return lhs <= rhs - elif etype == _lt: - return lhs < rhs - else: - raise ValueError("Unknown relational expression type '%s'" % etype) - - if etype == _eq: - if lhs_is_relational or rhs_is_relational: - raise TypeError( - "Cannot create an EqualityExpression where one of the " - "sub-expressions is a relational expression:\n" - " %s\n {==}\n %s" % (lhs, rhs,) - ) - return EqualityExpression((lhs, rhs)) - else: - if etype == _le: - strict = False - elif etype == _lt: - strict = True - else: - raise DeveloperError( - "Unknown relational expression type '%s'" % (etype,)) - if lhs_is_relational: - if lhs.__class__ is InequalityExpression: - if rhs_is_relational: - raise TypeError( - "Cannot create an InequalityExpression where both " - "sub-expressions are relational expressions:\n" - " %s\n {%s}\n %s" - % (lhs, "<" if strict else "<=", rhs,)) - return RangedExpression( - lhs._args_ + (rhs,), (lhs._strict, strict)) - else: - raise TypeError( - "Cannot create an InequalityExpression where one of the " - "sub-expressions is an equality or ranged expression:\n" - " %s\n {%s}\n %s" - % (lhs, "<" if strict else "<=", rhs,)) - elif rhs_is_relational: - if rhs.__class__ is InequalityExpression: - return RangedExpression( - (lhs,) + rhs._args_, (strict, rhs._strict)) - else: - raise TypeError( - "Cannot create an InequalityExpression where one of the " - "sub-expressions is an equality or ranged expression:\n" - " %s\n {%s}\n %s" - % (lhs, "<" if strict else "<=", rhs,)) - else: - return InequalityExpression((lhs, rhs), strict) +relocated_module_attribute( + 'EqualityExpression', + 'pyomo.core.expr.relational_expr.EqualityExpression', + version='6.4.3', + f_globals=globals(), +) +relocated_module_attribute( + 'InequalityExpression', + 'pyomo.core.expr.relational_expr.InequalityExpression', + version='6.4.3', + f_globals=globals(), +) +relocated_module_attribute( + 'RangedExpression', + 'pyomo.core.expr.relational_expr.RangedExpression', + version='6.4.3', + f_globals=globals(), +) +relocated_module_attribute( + 'inequality', + 'pyomo.core.expr.relational_expr.inequality', + version='6.4.3', + f_globals=globals(), +) def _generate_logical_proposition(etype, lhs, rhs): - if lhs.__class__ in native_types and lhs.__class__ not in native_logical_types: - raise TypeError("Cannot create Logical expression with lhs of type '%s'" % lhs.__class__) - if rhs.__class__ in native_types and rhs.__class__ not in native_logical_types and rhs is not None: - raise TypeError("Cannot create Logical expression with rhs of type '%s'" % rhs.__class__) + if ( + lhs.__class__ in native_types and lhs.__class__ not in native_logical_types + ) and not isinstance(lhs, BooleanValue): + return NotImplemented + if ( + (rhs.__class__ in native_types and rhs.__class__ not in native_logical_types) + and not isinstance(rhs, BooleanValue) + and not (rhs is None and etype == _inv) + ): + return NotImplemented if etype == _equiv: return EquivalenceExpression((lhs, rhs)) @@ -427,12 +92,14 @@ def _generate_logical_proposition(etype, lhs, rhs): elif etype == _or: return lor(lhs, rhs) else: - raise ValueError("Unknown logical proposition type '%s'" % etype) # pragma: no cover + raise ValueError( + "Unknown logical proposition type '%s'" % etype + ) # pragma: no cover -class BooleanExpressionBase(BooleanValue): +class BooleanExpression(ExpressionBase, BooleanValue): """ - Logical expressions base expression. + Logical expression base class. This class is used to define nodes in an expression tree. @@ -444,35 +111,12 @@ class BooleanExpressionBase(BooleanValue): """ __slots__ = ('_args_',) + EXPRESSION_SYSTEM = ExpressionType.LOGICAL PRECEDENCE = 0 def __init__(self, args): self._args_ = args - def nargs(self): - """ - Returns the number of child nodes. - """ - raise NotImplementedError( - "Derived expression (%s) failed to " - "implement nargs()" % (str(self.__class__), )) - - def args(self, i): - """ - Return the i-th child node. - - args: - i (int): Nonnegative index of the child that is returned. - - Returns: - The i-th child node. - """ - if i >= self.nargs(): - raise KeyError("Invalid index for expression argsument: %d" % i) - if i < 0: - return self._args_[self.nargs()+i] - return self._args_[i] - @property def args(self): """ @@ -481,290 +125,13 @@ def args(self): Returns: Either a list or tuple (depending on the node storage model) containing only the child nodes of this node """ - return self._args_[:self.nargs()] + return self._args_[: self.nargs()] - def __getstate__(self): - """ - Pickle the expression object - - Returns: - The pickled state. - """ - state = super(BooleanExpressionBase, self).__getstate__() - for i in BooleanExpressionBase.__slots__: - state[i] = getattr(self,i) - return state - - def __call__(self, exception=True): - """ - Evaluate the value of the expression tree. - args: - exception (bool): If :const:`False`, then - an exception raised while evaluating - is captured, and the value returned is - :const:`None`. Default is :const:`True`. - - Returns: - The value of the expression or :const:`None`. - """ - return evaluate_expression(self, exception) - - def __str__(self): - """ - Returns a string description of the expression. - Note: - The value of ``pyomo.core.expr.expr_common.TO_STRING_VERBOSE`` - is used to configure the execution of this method. - If this value is :const:`True`, then the string - representation is a nested function description of the expression. - The default is :const:`False`, which is an algebraic - description of the expression. - - Returns: - A string. - """ - return expression_to_string(self) - - def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): - """ - Return a string representation of the expression tree. - args: - verbose (bool): If :const:`True`, then the the string - representation consists of nested functions. Otherwise, - the string representation is an algebraic equation. - Defaults to :const:`False`. - labeler: An object that generates string labels for - variables in the expression tree. Defaults to :const:`None`. - smap: If specified, this :class:`SymbolMap ` is - used to cache labels for variables. - compute_values (bool): If :const:`True`, then - parameters and fixed variables are evaluated before the - expression string is generated. Default is :const:`False`. - - Returns: - A string representation for the expression tree. - """ - return expression_to_string(self, verbose=verbose, labeler=labeler, smap=smap, compute_values=compute_values) - - def _precedence(self): - return BooleanExpressionBase.PRECEDENCE - - def _associativity(self): - """Return the associativity of this operator. - - Returns 1 if this operator is left-to-right associative or -1 if - it is right-to-left associative. Any other return value will be - interpreted as "not associative" (implying any arguments that - are at this operator's _precedence() will be enclosed in parens). - """ - return 1 - - def _to_string(self, values, verbose, smap, compute_values): #pragma: no cover - """ - Construct a string representation for this node, using the string - representations of its children. - - This method is called by the :class:`_ToStringVisitor - ` class. It must - must be defined in subclasses. - - args: - values (list): The string representations of the children of this - node. - verbose (bool): If :const:`True`, then the the string - representation consists of nested functions. Otherwise, - the string representation is an algebraic equation. - smap: If specified, this :class:`SymbolMap - ` is - used to cache labels for variables. - compute_values (bool): If :const:`True`, then - parameters and fixed variables are evaluated before the - expression string is generated. - - Returns: - A string representation for this node. - """ - raise NotImplementedError( - "Derived expression (%s) failed to " - "implement _to_string()" % (str(self.__class__), )) - - def getname(self, *args, **kwds): #pragma: no cover - """ - Return the text name of a function associated with this expression object. - - In general, no arguments are passed to this function. - - args: - *arg: a variable length list of arguments - **kwds: keyword arguments - - Returns: - A string name for the function. - """ - raise NotImplementedError( - "Derived expression (%s) failed to " - "implement getname()" % (str(self.__class__), )) - - def clone(self, substitute=None): - """ - Return a clone of the expression tree. - - Note: - This method does not clone the leaves of the - tree, which are numeric constants and variables. - It only clones the interior nodes, and - expression leaf nodes like - :class:`_MutableLinearExpression`. - However, named expressions are treated like - leaves, and they are not cloned. - - args: - substitute (dict): a dictionary that maps object ids to clone - objects generated earlier during the cloning process. - - Returns: - A new expression tree. - """ - return clone_expression(self, substitute=substitute) - - def create_node_with_local_data(self, args): - """ - Construct a node using given arguments. - - This method provides a consistent interface for constructing a - node, which is used in tree visitor scripts. In the simplest - case, this simply returns:: - - self.__class__(args) - - But in general this creates an expression object using local - data as well as arguments that represent the child nodes. - - args: - args (list): A list of child nodes for the new expression - object - memo (dict): A dictionary that maps object ids to clone - objects generated earlier during a cloning process. - This argsument is needed to clone objects that are - owned by a model, and it can be safely ignored for - most expression classes. - - Returns: - A new expression object with the same type as the current - class. - """ - return self.__class__(args) - - def is_constant(self): - """Return True if this expression is an atomic constant - - This method contrasts with the is_fixed() method. This method - returns True if the expression is an atomic constant, that is it - is composed exclusively of constants and immutable parameters. - NumericValue objects returning is_constant() == True may be - simplified to their numeric value at any point without warning. - - Note: This defaults to False, but gets redefined in sub-classes. - """ - return False - - def is_fixed(self): - """ - Return :const:`True` if this expression contains no free variables. - - Returns: - A boolean. - """ - return _expression_is_fixed(self) - - def _is_fixed(self, values): - """ - Compute whether this expression is fixed given - the fixed values of its children. - - This method is called by the :class:`_IsFixedVisitor - ` class. It can - be over-written by expression classes to customize this - logic. - args: - values (list): A list of boolean values that indicate whether - the children of this expression are fixed +class BooleanExpressionBase(metaclass=RenamedClass): + __renamed__new_class__ = BooleanExpression + __renamed__version__ = '6.4.3' - Returns: - A boolean that is :const:`True` if the fixed values of the - children are all :const:`True`. - """ - return all(values) - - def is_potentially_variable(self): - """ - Return :const:`True` if this expression might represent - a variable expression. - - This method returns :const:`True` when the expression - tree contains one or more variables - - Returns: - A boolean. Defaults to :const:`True` for expressions. - """ - return True - - def is_expression_type(self): - """ - Return :const:`True` if this object is an expression. - - This method obviously returns :const:`True` for this class, but it - is included in other classes within Pyomo that are not expressions, - which allows for a check for expressions without - evaluating the class type. - - Returns: - A boolean. - """ - return True - - def size(self): - """ - Return the number of nodes in the expression tree. - - Returns: - A nonnegative integer that is the number of interior and leaf - nodes in the expression tree. - """ - return sizeof_expression(self) - - def _apply_operation(self, result): #pragma: no cover - """ - Compute the values of this node given the values of its children. - - This method is called by the :class:`_EvaluationVisitor - ` class. It must - be over-written by expression classes to customize this logic. - - Note: - This method applies the logical operation of the - operator to the arguments. It does *not* evaluate - the arguments in the process, but assumes that they - have been previously evaluated. But noted that if - this class contains auxiliary data (e.g. like the - numeric coefficients in the :class:`LinearExpression - ` class, then - those values *must* be evaluated as part of this - function call. An uninitialized parameter value - encountered during the execution of this method is - considered an error. - - args: - values (list): A list of values that indicate the value - of the children expressions. - - Returns: - A floating point value for this expression. - """ - raise NotImplementedError( - "Derived expression (%s) failed to " - "implement _apply_operation()" % (str(self.__class__), )) """ ---------------------------******************-------------------- @@ -845,7 +212,7 @@ def exactly(n, *args): Usage: exactly(2, m.Y1, m.Y2, m.Y3, ...) """ - result = ExactlyExpression([n, ] + list(_flattened(args))) + result = ExactlyExpression([n] + list(_flattened(args))) return result @@ -857,7 +224,7 @@ def atmost(n, *args): Usage: atmost(2, m.Y1, m.Y2, m.Y3, ...) """ - result = AtMostExpression([n, ] + list(_flattened(args))) + result = AtMostExpression([n] + list(_flattened(args))) return result @@ -869,14 +236,15 @@ def atleast(n, *args): Usage: atleast(2, m.Y1, m.Y2, m.Y3, ...) """ - result = AtLeastExpression([n, ] + list(_flattened(args))) + result = AtLeastExpression([n] + list(_flattened(args))) return result -class UnaryBooleanExpression(BooleanExpressionBase): +class UnaryBooleanExpression(BooleanExpression): """ Abstract class for single-argument logical expressions. """ + def nargs(self): """ Returns number of arguments in expression @@ -888,25 +256,24 @@ class NotExpression(UnaryBooleanExpression): """ This is the node for a NotExpression, this node should have exactly one child """ + PRECEDENCE = 2 def getname(self, *arg, **kwd): return 'Logical Negation' - def _precedence(self): - return NotExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return "~%s" % values[0] def _apply_operation(self, result): return not result[0] -class BinaryBooleanExpression(BooleanExpressionBase): +class BinaryBooleanExpression(BooleanExpression): """ Abstract class for binary logical expressions. """ + def nargs(self): """ Return the number of argument the expression has @@ -919,6 +286,7 @@ class EquivalenceExpression(BinaryBooleanExpression): Logical equivalence statement: Y_1 iff Y_2. """ + __slots__ = () PRECEDENCE = 6 @@ -926,10 +294,7 @@ class EquivalenceExpression(BinaryBooleanExpression): def getname(self, *arg, **kwd): return 'iff' - def _precedence(self): - return EquivalenceExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return " iff ".join(values) def _apply_operation(self, result): @@ -940,17 +305,15 @@ class XorExpression(BinaryBooleanExpression): """ Logical Exclusive OR statement: Y_1 ⊻ Y_2 """ + __slots__ = () - PRECEDENCE = 5 + PRECEDENCE = 4 def getname(self, *arg, **kwd): return 'xor' - def _precedence(self): - return XorExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return " ⊻ ".join(values) def _apply_operation(self, result): @@ -961,6 +324,7 @@ class ImplicationExpression(BinaryBooleanExpression): """ Logical Implication statement: Y_1 --> Y_2. """ + __slots__ = () PRECEDENCE = 6 @@ -968,22 +332,20 @@ class ImplicationExpression(BinaryBooleanExpression): def getname(self, *arg, **kwd): return 'implies' - def _precedence(self): - return ImplicationExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return " --> ".join(values) def _apply_operation(self, result): return (not result[0]) or result[1] -class NaryBooleanExpression(BooleanExpressionBase): +class NaryBooleanExpression(BooleanExpression): """ The abstract class for NaryBooleanExpression. This class should never be initialized. """ + __slots__ = ('_nargs',) def __init__(self, args): @@ -999,18 +361,6 @@ def nargs(self): def getname(self, *arg, **kwd): return 'NaryBooleanExpression' - def __getstate__(self): - """ - Pickle the expression object - - Returns: - The pickled state. - """ - state = super().__getstate__() - for i in NaryBooleanExpression.__slots__: - state[i] = getattr(self, i) - return state - def _add_to_and_or_expression(orig_expr, new_arg): """ @@ -1037,17 +387,15 @@ class AndExpression(NaryBooleanExpression): """ This is the node for AndExpression. """ + __slots__ = () - PRECEDENCE = 4 + PRECEDENCE = 3 def getname(self, *arg, **kwd): return 'and' - def _precedence(self): - return AndExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return " ∧ ".join(values) def _apply_operation(self, result): @@ -1066,17 +414,15 @@ class OrExpression(NaryBooleanExpression): """ This is the node for OrExpression. """ + __slots__ = () - PRECEDENCE = 4 + PRECEDENCE = 5 def getname(self, *arg, **kwd): return 'or' - def _precedence(self): - return OrExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return " ∨ ".join(values) def _apply_operation(self, result): @@ -1101,6 +447,7 @@ class ExactlyExpression(NaryBooleanExpression): Usage: exactly(1, True, False, False) --> True """ + __slots__ = () PRECEDENCE = 9 @@ -1108,10 +455,7 @@ class ExactlyExpression(NaryBooleanExpression): def getname(self, *arg, **kwd): return 'exactly' - def _precedence(self): - return ExactlyExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return "exactly(%s: [%s])" % (values[0], ", ".join(values[1:])) def _apply_operation(self, result): @@ -1128,6 +472,7 @@ class AtMostExpression(NaryBooleanExpression): Usage: atmost(1, True, False, False) --> True """ + __slots__ = () PRECEDENCE = 9 @@ -1135,10 +480,7 @@ class AtMostExpression(NaryBooleanExpression): def getname(self, *arg, **kwd): return 'atmost' - def _precedence(self): - return AtMostExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return "atmost(%s: [%s])" % (values[0], ", ".join(values[1:])) def _apply_operation(self, result): @@ -1155,6 +497,7 @@ class AtLeastExpression(NaryBooleanExpression): Usage: atleast(1, True, False, False) --> True """ + __slots__ = () PRECEDENCE = 9 @@ -1162,10 +505,7 @@ class AtLeastExpression(NaryBooleanExpression): def getname(self, *arg, **kwd): return 'atleast' - def _precedence(self): - return AtLeastExpression.PRECEDENCE - - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): return "atleast(%s: [%s])" % (values[0], ", ".join(values[1:])) def _apply_operation(self, result): diff --git a/pyomo/core/expr/numeric_expr.py b/pyomo/core/expr/numeric_expr.py index 5667ac6c934..b0f2cf380ff 100644 --- a/pyomo/core/expr/numeric_expr.py +++ b/pyomo/core/expr/numeric_expr.py @@ -9,610 +9,884 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from __future__ import division - -import math +import collections +import enum import logging -from operator import attrgetter -from itertools import islice +import math +import operator logger = logging.getLogger('pyomo.core') from math import isclose -from pyomo.common.deprecation import deprecation_warning -from .expr_common import ( - _add, _sub, _mul, _div, - _pow, _neg, _abs, _inplace, - _unary +from pyomo.common.dependencies import numpy as np, numpy_available +from pyomo.common.deprecation import ( + deprecated, + deprecation_warning, + relocated_module_attribute, ) -from .numvalue import ( - NumericValue, +from pyomo.common.errors import PyomoException, DeveloperError +from pyomo.common.formatting import tostr +from pyomo.common.numeric_types import ( native_types, nonpyomo_leaf_types, native_numeric_types, - as_numeric, + check_if_numeric_type, value, - is_potentially_variable, - is_constant, ) -from .visitor import ( - evaluate_expression, expression_to_string, polynomial_degree, - clone_expression, sizeof_expression, _expression_is_fixed +from pyomo.core.pyomoobject import PyomoObject +from pyomo.core.expr.expr_common import ( + OperatorAssociativity, + ExpressionType, + _lt, + _le, + _eq, ) +# Note: pyggyback on expr.base's use of attempt_import(visitor) +from pyomo.core.expr.base import ExpressionBase, NPV_Mixin, visitor -class clone_counter(object): - """ Context manager for counting cloning events. +relocated_module_attribute( + 'is_potentially_variable', + 'pyomo.core.expr.numvalue.is_potentially_variable', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'as_numeric', + 'pyomo.core.expr.numvalue.as_numeric', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'clone_counter', + 'pyomo.core.expr.expr_common.clone_counter', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'evaluate_expression', + 'pyomo.core.expr.visitor.evaluate_expression', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'expression_to_string', + 'pyomo.core.expr.visitor.expression_to_string', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'polynomial_degree', + 'pyomo.core.expr.visitor.polynomial_degree', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'clone_expression', + 'pyomo.core.expr.visitor.clone_expression', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'sizeof_expression', + 'pyomo.core.expr.visitor.sizeof_expression', + version='6.6.2.dev0', + f_globals=globals(), +) - This context manager counts the number of times that the - :func:`clone_expression ` - function is executed. - """ +_zero_one_optimizations = {1} - _count = 0 - def __enter__(self): - return self +# Stub in the dispatchers +def _generate_relational_expression(etype, lhs, rhs): + raise RuntimeError("incomplete import of Pyomo expression system") - def __exit__(self, *args): - pass - @property - def count(self): - """A property that returns the clone count value. - """ - return clone_counter._count +def enable_expression_optimizations(zero=None, one=None): + """Enable(disable) expression generation optimizations + There are currently two optimizations available during expression generation: -class nonlinear_expression(object): - """ Context manager for mutable sums. + - zero: aggressively resolve `0*f(.)` expressions to `0`, `0/f(.)` + expressions to `0`, and `f(.)**0` expressions to `1` + + - one: aggressively resolve identities: `1*f(.)` expressions to + `f(.)`, `f(.)/1` expressions to `f(.)`, and `f(.)**1` expressions + to `f(.)`. + + The default optimizations are `zero=False` and `one=True`. + + Notes + ----- + + Enabling the `zero` optimization can mask certain modeling errors. + In particular, the optimization will suppress `ZeroDivisionError`s + that should be raised if `f(.)` resolves to `0` (in the case of + `0/f(.)`), as well as any errors that would have otherwise been + raised during the evaluation of `f(.)`. In addition, optimizing + `f(.)**0 == 1` is only valid when `f(.)!=0`. **Users who enable + this optimization bear responsibility for ensuring that these + optimizations will be valid for the model.** + + The `one` optimizations should generally be safe. + + Parameters + ---------- + zero: bool, optional + + If `True` (`False`), enable (disable) the "zero" optimizations. + If None, leave the optimization state unchanged. + + one: bool, optional + + If `True` (`False`), enable (disable) the "one" optimizations. + If None, leave the optimization state unchanged. + + """ + for arg, key in ((zero, 0), (one, 1)): + if arg is None: + continue + if arg: + _zero_one_optimizations.add(key) + else: + _zero_one_optimizations.discard(key) + + +class mutable_expression(object): + """Context manager for mutable sums. + + This context manager is used to compute a sum while treating the + summation as a mutable object. - This context manager is used to compute a sum while - treating the summation as a mutable object. """ def __enter__(self): - self.e = _MutableSumExpression([]) + self.e = _MutableNPVSumExpression([]) return self.e def __exit__(self, *args): - if self.e.__class__ == _MutableSumExpression: - self.e.__class__ = SumExpression + if isinstance(self.e, _MutableSumExpression): + self.e.make_immutable() -class linear_expression(object): - """ Context manager for mutable linear sums. +class nonlinear_expression(mutable_expression): + """Context manager for mutable nonlinear sums. + + This context manager is used to compute a general nonlinear sum + while treating the summation as a mutable object. + + Note + ---- + + The preferred context manager is :py:class:`mutable_expression`, as + the return type will be the most specific of + :py:class:`SumExpression`, :py:class:`LinearExpression`, or + :py:class:`NPV_SumExpression`. This context manager will *always* + return a :py:class:`SumExpression`. - This context manager is used to compute a linear sum while - treating the summation as a mutable object. """ def __enter__(self): - """ - The :class:`_MutableLinearExpression ` - class is the context that is used to to - hold the mutable linear sum. - """ - self.e = _MutableLinearExpression() + self.e = _MutableSumExpression([]) return self.e - def __exit__(self, *args): - """ - The context is changed to the - :class:`LinearExpression ` - class to transform the context into a nonmutable - form. - """ - if self.e.__class__ == _MutableLinearExpression: - self.e.__class__ = LinearExpression +class linear_expression(mutable_expression): + """Context manager for mutable linear sums. -#------------------------------------------------------- -# -# Expression classes -# -#------------------------------------------------------- + This context manager is used to compute a linear sum while + treating the summation as a mutable object. + Note + ---- + + The preferred context manager is :py:class:`mutable_expression`. + :py:class:`linear_expression` is an alias to + :py:class:`mutable_expression` provided for backwards compatibility. -class ExpressionBase(NumericValue): """ - The base class for Pyomo expressions. - This class is used to define nodes in an expression - tree. - Args: - args (list or tuple): Children of this node. +class NumericValue(PyomoObject): + """ + This is the base class for numeric values used in Pyomo. """ - # Previously, we used _args to define expression class arguments. - # Here, we use _args_ to force errors for code that was referencing this - # data. There are now accessor methods, so in most cases users - # and developers should not directly access the _args_ data values. - __slots__ = ('_args_',) - PRECEDENCE = 0 + __slots__ = () - def __init__(self, args): - self._args_ = args + # This is required because we define __eq__ + __hash__ = None - def nargs(self): + def getname(self, fully_qualified=False, name_buffer=None): + """ + If this is a component, return the component's name on the owning + block; otherwise return the value converted to a string + """ + _base = super(NumericValue, self) + if hasattr(_base, 'getname'): + return _base.getname(fully_qualified, name_buffer) + else: + return str(type(self)) + + @property + def name(self): + return self.getname(fully_qualified=True) + + @property + def local_name(self): + return self.getname(fully_qualified=False) + + def is_numeric_type(self): + """Return True if this class is a Pyomo numeric object""" + return True + + def is_constant(self): + """Return True if this numeric value is a constant value""" + return False + + def is_fixed(self): + """Return True if this is a non-constant value that has been fixed""" + return False + + def is_potentially_variable(self): + """Return True if variables can appear in this expression""" + return False + + @deprecated( + "is_relational() is deprecated in favor of " + "is_expression_type(ExpressionType.RELATIONAL)", + version='6.4.3', + ) + def is_relational(self): + """ + Return True if this numeric value represents a relational expression. """ - Returns the number of child nodes. + return False - By default, Pyomo expressions represent binary operations - with two arguments. + def is_indexed(self): + """Return True if this numeric value is an indexed object""" + return False - Note: - This function does not simply compute the length of - :attr:`_args_` because some expression classes use - a subset of the :attr:`_args_` array. Thus, it - is imperative that developers use this method! + def polynomial_degree(self): + """ + Return the polynomial degree of the expression. Returns: - A nonnegative integer that is the number of child nodes. + :const:`None` """ - return 2 + return self._compute_polynomial_degree(None) - def arg(self, i): + def _compute_polynomial_degree(self, values): """ - Return the i-th child node. + Compute the polynomial degree of this expression given + the degree values of its children. Args: - i (int): Nonnegative index of the child that is returned. + values (list): A list of values that indicate the degree + of the children expression. Returns: - The i-th child node. + :const:`None` """ - if i >= self.nargs(): - raise KeyError("Invalid index for expression argument: %d" % i) - if i < 0: - return self._args_[self.nargs()+i] - return self._args_[i] + return None - @property - def args(self): - """ - Return the child nodes + def __bool__(self): + """Coerce the value to a bool + + Numeric values can be coerced to bool only if the value / + expression is constant. Fixed (but non-constant) or variable + values will raise an exception. + + Raises: + PyomoException - Returns: Either a list or tuple (depending on the node storage - model) containing only the child nodes of this node """ - return self._args_[:self.nargs()] + # Note that we want to implement __bool__, as scalar numeric + # components (e.g., Param, Var) implement __len__ (since they + # are implicit containers), and Python falls back on __len__ if + # __bool__ is not defined. + if self.is_constant(): + return bool(self()) + raise PyomoException( + """ +Cannot convert non-constant Pyomo numeric value (%s) to bool. +This error is usually caused by using a Var, unit, or mutable Param in a +Boolean context such as an "if" statement. For example, + >>> m.x = Var() + >>> if not m.x: + ... pass +would cause this exception.""".strip() + % (self,) + ) + + def __float__(self): + """Coerce the value to a floating point + Numeric values can be coerced to float only if the value / + expression is constant. Fixed (but non-constant) or variable + values will raise an exception. + + Raises: + TypeError - def __getstate__(self): """ - Pickle the expression object + if self.is_constant(): + return float(self()) + raise TypeError( + """ +Implicit conversion of Pyomo numeric value (%s) to float is disabled. +This error is often the result of using Pyomo components as arguments to +one of the Python built-in math module functions when defining +expressions. Avoid this error by using Pyomo-provided math functions or +explicitly resolving the numeric value using the Pyomo value() function. +""".strip() + % (self,) + ) + + def __int__(self): + """Coerce the value to an integer + + Numeric values can be coerced to int only if the value / + expression is constant. Fixed (but non-constant) or variable + values will raise an exception. + + Raises: + TypeError - Returns: - The pickled state. """ - state = super(ExpressionBase, self).__getstate__() - for i in ExpressionBase.__slots__: - state[i] = getattr(self,i) - return state + if self.is_constant(): + return int(self()) + raise TypeError( + """ +Implicit conversion of Pyomo numeric value (%s) to int is disabled. +This error is often the result of using Pyomo components as arguments to +one of the Python built-in math module functions when defining +expressions. Avoid this error by using Pyomo-provided math functions or +explicitly resolving the numeric value using the Pyomo value() function. +""".strip() + % (self,) + ) - def __call__(self, exception=True): + def __lt__(self, other): """ - Evaluate the value of the expression tree. + Less than operator - Args: - exception (bool): If :const:`False`, then - an exception raised while evaluating - is captured, and the value returned is - :const:`None`. Default is :const:`True`. + This method is called when Python processes statements of the form:: - Returns: - The value of the expression or :const:`None`. + self < other + other > self """ - return evaluate_expression(self, exception) + return _generate_relational_expression(_lt, self, other) - def __str__(self): + def __gt__(self, other): """ - Returns a string description of the expression. + Greater than operator - Note: - The value of ``pyomo.core.expr.expr_common.TO_STRING_VERBOSE`` - is used to configure the execution of this method. - If this value is :const:`True`, then the string - representation is a nested function description of the expression. - The default is :const:`False`, which is an algebraic - description of the expression. + This method is called when Python processes statements of the form:: - Returns: - A string. + self > other + other < self """ - return expression_to_string(self) + return _generate_relational_expression(_lt, other, self) - def to_string(self, verbose=None, labeler=None, smap=None, - compute_values=False): + def __le__(self, other): """ - Return a string representation of the expression tree. + Less than or equal operator - Args: - verbose (bool): If :const:`True`, then the the string - representation consists of nested functions. Otherwise, - the string representation is an algebraic equation. - Defaults to :const:`False`. - labeler: An object that generates string labels for - variables in the expression tree. Defaults to :const:`None`. - smap: If specified, this - :class:`SymbolMap ` - is used to cache labels for variables. - compute_values (bool): If :const:`True`, then - parameters and fixed variables are evaluated before the - expression string is generated. Default is :const:`False`. + This method is called when Python processes statements of the form:: - Returns: - A string representation for the expression tree. + self <= other + other >= self """ - return expression_to_string(self, verbose=verbose, labeler=labeler, - smap=smap, compute_values=compute_values) + return _generate_relational_expression(_le, self, other) - def _precedence(self): - return ExpressionBase.PRECEDENCE + def __ge__(self, other): + """ + Greater than or equal operator - def _associativity(self): - """Return the associativity of this operator. + This method is called when Python processes statements of the form:: - Returns 1 if this operator is left-to-right associative or -1 if - it is right-to-left associative. Any other return value will be - interpreted as "not associative" (implying any arguments that - are at this operator's _precedence() will be enclosed in parens). + self >= other + other <= self """ - # Most operators in Python are left-to-right associative - return 1 + return _generate_relational_expression(_le, other, self) - def _to_string(self, values, verbose, smap, compute_values): #pragma: no cover + def __eq__(self, other): """ - Construct a string representation for this node, using the string - representations of its children. + Equal to operator - This method is called by the :class:`_ToStringVisitor - ` class. It must - must be defined in subclasses. + This method is called when Python processes the statement:: - Args: - values (list): The string representations of the children of this - node. - verbose (bool): If :const:`True`, then the the string - representation consists of nested functions. Otherwise, - the string representation is an algebraic equation. - smap: If specified, this :class:`SymbolMap - ` is - used to cache labels for variables. - compute_values (bool): If :const:`True`, then - parameters and fixed variables are evaluated before the - expression string is generated. - - Returns: - A string representation for this node. + self == other """ - raise NotImplementedError( - "Derived expression (%s) failed to implement _to_string()" - % ( str(self.__class__), )) + return _generate_relational_expression(_eq, self, other) - def getname(self, *args, **kwds): #pragma: no cover + def __add__(self, other): """ - Return the text name of a function associated with this expression object. + Binary addition - In general, no arguments are passed to this function. + This method is called when Python processes the statement:: - Args: - *arg: a variable length list of arguments - **kwds: keyword arguments + self + other + """ + return _add_dispatcher[self.__class__, other.__class__](self, other) - Returns: - A string name for the function. + def __sub__(self, other): """ - raise NotImplementedError("Derived expression (%s) failed to "\ - "implement getname()" % ( str(self.__class__), )) + Binary subtraction + + This method is called when Python processes the statement:: - def clone(self, substitute=None): + self - other """ - Return a clone of the expression tree. + return self.__add__(-other) - Note: - This method does not clone the leaves of the - tree, which are numeric constants and variables. - It only clones the interior nodes, and - expression leaf nodes like - :class:`_MutableLinearExpression`. - However, named expressions are treated like - leaves, and they are not cloned. + def __mul__(self, other): + """ + Binary multiplication - Args: - substitute (dict): a dictionary that maps object ids to clone - objects generated earlier during the cloning process. + This method is called when Python processes the statement:: - Returns: - A new expression tree. + self * other """ - return clone_expression(self, substitute=substitute) + return _mul_dispatcher[self.__class__, other.__class__](self, other) - def create_node_with_local_data(self, args, classtype=None): + def __div__(self, other): """ - Construct a node using given arguments. + Binary division - This method provides a consistent interface for constructing a - node, which is used in tree visitor scripts. In the simplest - case, this simply returns:: + This method is called when Python processes the statement:: - self.__class__(args) + self / other + """ + return _div_dispatcher[self.__class__, other.__class__](self, other) - But in general this creates an expression object using local - data as well as arguments that represent the child nodes. + def __truediv__(self, other): + """ + Binary division (when __future__.division is in effect) - Args: - args (list): A list of child nodes for the new expression - object + This method is called when Python processes the statement:: - Returns: - A new expression object with the same type as the current - class. + self / other """ - if classtype is None: - classtype = self.__class__ - return classtype(args) + return _div_dispatcher[self.__class__, other.__class__](self, other) - def create_potentially_variable_object(self): + def __pow__(self, other): """ - Create a potentially variable version of this object. - - This method returns an object that is a potentially variable - version of the current object. In the simplest - case, this simply sets the value of `__class__`: + Binary power - self.__class__ = self.__class__.__mro__[1] + This method is called when Python processes the statement:: - Note that this method is allowed to modify the current object - and return it. But in some cases it may create a new - potentially variable object. + self ** other + """ + return _pow_dispatcher[self.__class__, other.__class__](self, other) - Returns: - An object that is potentially variable. + def __radd__(self, other): """ - self.__class__ = self.__class__.__mro__[1] - return self + Binary addition - def is_constant(self): - """Return True if this expression is an atomic constant + This method is called when Python processes the statement:: - This method contrasts with the is_fixed() method. This method - returns True if the expression is an atomic constant, that is it - is composed exclusively of constants and immutable parameters. - NumericValue objects returning is_constant() == True may be - simplified to their numeric value at any point without warning. + other + self + """ + return _add_dispatcher[other.__class__, self.__class__](other, self) - Note: This defaults to False, but gets redefined in sub-classes. + def __rsub__(self, other): """ - return False + Binary subtraction - def is_fixed(self): + This method is called when Python processes the statement:: + + other - self """ - Return :const:`True` if this expression contains no free variables. + return other + (-self) - Returns: - A boolean. + def __rmul__(self, other): """ - return _expression_is_fixed(self) + Binary multiplication + + This method is called when Python processes the statement:: - def _is_fixed(self, values): + other * self + + when other is not a :class:`NumericValue ` object. """ - Compute whether this expression is fixed given - the fixed values of its children. + return _mul_dispatcher[other.__class__, self.__class__](other, self) - This method is called by the :class:`_IsFixedVisitor - ` class. It can - be over-written by expression classes to customize this - logic. + def __rdiv__(self, other): + """Binary division - Args: - values (list): A list of boolean values that indicate whether - the children of this expression are fixed + This method is called when Python processes the statement:: - Returns: - A boolean that is :const:`True` if the fixed values of the - children are all :const:`True`. + other / self """ - return all(values) + return _div_dispatcher[other.__class__, self.__class__](other, self) - def is_potentially_variable(self): + def __rtruediv__(self, other): """ - Return :const:`True` if this expression might represent - a variable expression. + Binary division (when __future__.division is in effect) - This method returns :const:`True` when (a) the expression - tree contains one or more variables, or (b) the expression - tree contains a named expression. In both cases, the - expression cannot be treated as constant since (a) the variables - may not be fixed, or (b) the named expressions may be changed - at a later time to include non-fixed variables. + This method is called when Python processes the statement:: - Returns: - A boolean. Defaults to :const:`True` for expressions. + other / self """ - return True + return _div_dispatcher[other.__class__, self.__class__](other, self) - def is_named_expression_type(self): + def __rpow__(self, other): """ - Return :const:`True` if this object is a named expression. + Binary power - This method returns :const:`False` for this class, and it - is included in other classes within Pyomo that are not named - expressions, which allows for a check for named expressions - without evaluating the class type. + This method is called when Python processes the statement:: - Returns: - A boolean. + other ** self """ - return False + return _pow_dispatcher[other.__class__, self.__class__](other, self) - def is_expression_type(self): + def __iadd__(self, other): """ - Return :const:`True` if this object is an expression. + Binary addition - This method obviously returns :const:`True` for this class, but it - is included in other classes within Pyomo that are not expressions, - which allows for a check for expressions without - evaluating the class type. + This method is called when Python processes the statement:: - Returns: - A boolean. + self += other """ - return True + return _add_dispatcher[self.__class__, other.__class__](self, other) - def size(self): + def __isub__(self, other): """ - Return the number of nodes in the expression tree. + Binary subtraction - Returns: - A nonnegative integer that is the number of interior and leaf - nodes in the expression tree. + This method is called when Python processes the statement:: + + self -= other """ - return sizeof_expression(self) + return self.__iadd__(-other) - def polynomial_degree(self): + def __imul__(self, other): """ - Return the polynomial degree of the expression. + Binary multiplication - Returns: - A non-negative integer that is the polynomial - degree if the expression is polynomial, or :const:`None` otherwise. + This method is called when Python processes the statement:: + + self *= other """ - return polynomial_degree(self) + return _mul_dispatcher[self.__class__, other.__class__](self, other) - def _compute_polynomial_degree(self, values): + def __idiv__(self, other): """ - Compute the polynomial degree of this expression given - the degree values of its children. + Binary division - This method is called by the :class:`_PolynomialDegreeVisitor - ` class. It can - be over-written by expression classes to customize this - logic. + This method is called when Python processes the statement:: - Args: - values (list): A list of values that indicate the degree - of the children expression. + self /= other + """ + return _div_dispatcher[self.__class__, other.__class__](self, other) - Returns: - A nonnegative integer that is the polynomial degree of the - expression, or :const:`None`. Default is :const:`None`. + def __itruediv__(self, other): """ - return None + Binary division (when __future__.division is in effect) + + This method is called when Python processes the statement:: - def _apply_operation(self, result): #pragma: no cover + self /= other """ - Compute the values of this node given the values of its children. + return _div_dispatcher[self.__class__, other.__class__](self, other) - This method is called by the :class:`_EvaluationVisitor - ` class. It must - be over-written by expression classes to customize this logic. + def __ipow__(self, other): + """ + Binary power - Note: - This method applies the logical operation of the - operator to the arguments. It does *not* evaluate - the arguments in the process, but assumes that they - have been previously evaluated. But noted that if - this class contains auxilliary data (e.g. like the - numeric coefficients in the :class:`LinearExpression - ` class, then - those values *must* be evaluated as part of this - function call. An uninitialized parameter value - encountered during the execution of this method is - considered an error. + This method is called when Python processes the statement:: - Args: - values (list): A list of values that indicate the value - of the children expressions. + self **= other + """ + return _pow_dispatcher[self.__class__, other.__class__](self, other) - Returns: - A floating point value for this expression. + def __neg__(self): """ - raise NotImplementedError("Derived expression (%s) failed to "\ - "implement _apply_operation()" % ( str(self.__class__), )) + Negation + This method is called when Python processes the statement:: -class NPV_Mixin(object): - __slots__ = () + - self + """ + return _neg_dispatcher[self.__class__](self) - def is_potentially_variable(self): - return False + def __pos__(self): + """ + Positive expression - def create_node_with_local_data(self, args, classtype=None): - assert classtype is None - try: - npv_args = all( - type(arg) in native_types or not arg.is_potentially_variable() - for arg in args - ) - except AttributeError: - # We can hit this during expression replacement when the new - # type is not a PyomoObject type, but is not in the - # native_types set. We will play it safe and clear the NPV flag - npv_args = False - if npv_args: - return super().create_node_with_local_data(args, None) - else: - cls = list(self.__class__.__bases__) - cls.remove(NPV_Mixin) - assert len(cls) == 1 - return super().create_node_with_local_data(args, cls[0]) + This method is called when Python processes the statement:: + + self + """ + return self -class NegationExpression(ExpressionBase): - """ - Negation expressions:: + def __abs__(self): + """Absolute value - - x - """ + This method is called when Python processes the statement:: - __slots__ = () + abs(self) + """ + return _abs_dispatcher[self.__class__](self) - PRECEDENCE = 4 + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + return NumericNDArray.__array_ufunc__(None, ufunc, method, *inputs, **kwargs) - def nargs(self): - return 1 + def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): + """Return a string representation of the expression tree. - def getname(self, *args, **kwds): - return 'neg' + Args: + verbose (bool): If :const:`True`, then the string + representation consists of nested functions. Otherwise, + the string representation is an infix algebraic equation. + Defaults to :const:`False`. + labeler: An object that generates string labels for + non-constant in the expression tree. Defaults to + :const:`None`. + smap: A SymbolMap instance that stores string labels for + non-constant nodes in the expression tree. Defaults to + :const:`None`. + compute_values (bool): If :const:`True`, then fixed + expressions are evaluated and the string representation + of the resulting value is returned. - def _compute_polynomial_degree(self, result): - return result[0] + Returns: + A string representation for the expression tree. - def _precedence(self): - return NegationExpression.PRECEDENCE + """ + if compute_values and self.is_fixed(): + try: + return str(self()) + except: + pass + if not self.is_constant(): + if smap is not None: + return smap.getSymbol(self, labeler) + elif labeler is not None: + return labeler(self) + return str(self) - def _to_string(self, values, verbose, smap, compute_values): - if verbose: - return "{0}({1})".format(self.getname(), values[0]) - tmp = values[0] - if tmp[0] == '-': - i = 1 - while tmp[i] == ' ': - i += 1 - return tmp[i:] - return "- "+tmp - def _apply_operation(self, result): - return -result[0] +# +# Note: the "if numpy_available" in the class definition also ensures +# that the numpy types are registered if numpy is in fact available +# +# TODO: Move this to a separate module to support avoiding the numpy +# import if numpy is not actually used. +class NumericNDArray(np.ndarray if numpy_available else object): + """An ndarray subclass that stores Pyomo numeric expressions""" + + def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): + if method == '__call__': + # Convert all incoming types to ndarray (to prevent recursion) + args = [np.asarray(i) for i in inputs] + # Set the return type to be an 'object'. This prevents the + # logical operators from casting the result to a bool. This + # requires numpy >= 1.6 + kwargs['dtype'] = object + + # Delegate to the base ufunc, but return an instance of this + # class so that additional operators hit this method. + ans = getattr(ufunc, method)(*args, **kwargs) + if isinstance(ans, np.ndarray): + if ans.size == 1: + return ans[0] + return ans.view(NumericNDArray) + else: + return ans -class NPV_NegationExpression(NPV_Mixin, NegationExpression): - __slots__ = () +# ------------------------------------------------------- +# +# Expression classes +# +# ------------------------------------------------------- -class ExternalFunctionExpression(ExpressionBase): +class NumericExpression(ExpressionBase, NumericValue): """ - External function expressions + The base class for Pyomo expressions. - Example:: + This class is used to define nodes in a numeric expression + tree. - model = ConcreteModel() - model.a = Var() + Args: + args (list or tuple): Children of this node. + """ + + # Previously, we used _args to define expression class arguments. + # Here, we use _args_ to force errors for code that was referencing this + # data. There are now accessor methods, so in most cases users + # and developers should not directly access the _args_ data values. + __slots__ = ('_args_',) + EXPRESSION_SYSTEM = ExpressionType.NUMERIC + PRECEDENCE = 0 + + def __init__(self, args): + self._args_ = args + + def nargs(self): + # by default, Pyomo numeric operators are binary operators + return 2 + + @property + def args(self): + """ + Return the child nodes + + Returns + ------- + list or tuple: + Sequence containing only the child nodes of this node. The + return type depends on the node storage model. Users are + not permitted to change the returned data (even for the case + of data returned as a list), as that breaks the promise of + tree immutability. + """ + return self._args_ + + @deprecated( + 'The implicit recasting of a "not potentially variable" ' + 'expression node to a potentially variable one is no ' + 'longer supported (this violates that immutability ' + 'promise for Pyomo5 expression trees).', + version='6.4.3', + ) + def create_potentially_variable_object(self): + """ + Create a potentially variable version of this object. + + This method returns an object that is a potentially variable + version of the current object. In the simplest + case, this simply sets the value of `__class__`: + + self.__class__ = self.__class__.__mro__[1] + + Note that this method is allowed to modify the current object + and return it. But in some cases it may create a new + potentially variable object. + + Returns: + An object that is potentially variable. + """ + if not self.is_potentially_variable(): + logger.error( + 'recasting a non-potentially variable expression to a ' + 'potentially variable one violates the immutability ' + 'promise for Pyomo expression trees.' + ) + self.__class__ = self.potentially_variable_base_class() + return self + + def polynomial_degree(self): + """ + Return the polynomial degree of the expression. + + Returns: + A non-negative integer that is the polynomial + degree if the expression is polynomial, or :const:`None` otherwise. + """ + return visitor.polynomial_degree(self) + + def _compute_polynomial_degree(self, values): + """ + Compute the polynomial degree of this expression given + the degree values of its children. + + This method is called by the :class:`_PolynomialDegreeVisitor + ` class. It can + be over-written by expression classes to customize this + logic. + + Args: + values (list): A list of values that indicate the degree + of the children expression. + + Returns: + A nonnegative integer that is the polynomial degree of the + expression, or :const:`None`. Default is :const:`None`. + """ + if all(val == 0 for val in values): + return 0 + else: + return None + + +class Numeric_NPV_Mixin(NPV_Mixin): + __slots__ = () + + def potentially_variable_base_class(self): + cls = list(self.__class__.__bases__) + cls.remove(Numeric_NPV_Mixin) + assert len(cls) == 1 + return cls[0] + + # + # Special cases: unary operators on NPV expressions are NPV + # + def __neg__(self): + return NPV_NegationExpression((self,)) + + def __abs__(self): + return NPV_AbsExpression((self,)) + + +class NegationExpression(NumericExpression): + """ + Negation expressions:: + + - x + """ + + __slots__ = () + + PRECEDENCE = 4 + + def nargs(self): + return 1 + + def getname(self, *args, **kwds): + return 'neg' + + def _compute_polynomial_degree(self, result): + return result[0] + + def _to_string(self, values, verbose, smap): + if verbose: + return f"{self.getname()}({values[0]})" + tmp = values[0] + if tmp[0] == '-': + return tmp[1:].strip() + # TODO: remove space after negation + return "- " + tmp + + def _apply_operation(self, result): + return -result[0] + + def __neg__(self): + return self._args_[0] + + +class NPV_NegationExpression(Numeric_NPV_Mixin, NegationExpression): + __slots__ = () + + # Because NPV also defines __neg__ we need to override it here, too + def __neg__(self): + return self._args_[0] + + +class ExternalFunctionExpression(NumericExpression): + """ + External function expressions + + Example:: + + model = ConcreteModel() + model.a = Var() model.f = ExternalFunction(library='foo.so', function='bar') expr = model.f(model.a) @@ -620,8 +894,12 @@ class ExternalFunctionExpression(ExpressionBase): args (tuple): children of this node fcn: a class that defines this external function """ + __slots__ = ('_fcn',) + # This operator does not have an infix representation + PRECEDENCE = None + def __init__(self, args, fcn=None): self._args_ = args self._fcn = fcn @@ -634,37 +912,29 @@ def create_node_with_local_data(self, args, classtype=None): classtype = self.__class__ return classtype(args, self._fcn) - def __getstate__(self): - state = super(ExternalFunctionExpression, self).__getstate__() - for i in ExternalFunctionExpression.__slots__: - state[i] = getattr(self, i) - return state - - def getname(self, *args, **kwds): #pragma: no cover + def getname(self, *args, **kwds): return self._fcn.getname(*args, **kwds) - def _compute_polynomial_degree(self, result): - return 0 if all(arg == 0 for arg in result) else None - def _apply_operation(self, result): - return self._fcn.evaluate( result ) + return self._fcn.evaluate(result) - def _to_string(self, values, verbose, smap, compute_values): - return "{0}({1})".format(self.getname(), ", ".join(values)) + def _to_string(self, values, verbose, smap): + return f"{self.getname()}({', '.join(values)})" def get_arg_units(self): - """ Return the units for this external functions arguments """ + """Return the units for this external functions arguments""" return self._fcn.get_arg_units() def get_units(self): - """ Get the units of the return value for this external function """ + """Get the units of the return value for this external function""" return self._fcn.get_units() -class NPV_ExternalFunctionExpression(NPV_Mixin, ExternalFunctionExpression): + +class NPV_ExternalFunctionExpression(Numeric_NPV_Mixin, ExternalFunctionExpression): __slots__ = () -class PowExpression(ExpressionBase): +class PowExpression(NumericExpression): """ Power expressions:: @@ -674,6 +944,12 @@ class PowExpression(ExpressionBase): __slots__ = () PRECEDENCE = 2 + # "**" is right-to-left associative in Python (so this should + # return -1), however, as this rule is not widely known and can + # confuse novice users, we will make our "**" operator + # non-associative (forcing parens) + ASSOCIATIVITY = OperatorAssociativity.NON_ASSOCIATIVE + def _compute_polynomial_degree(self, result): # PowExpression is a tricky thing. In general, a**b is # nonpolynomial, however, if b == 0, it is a constant @@ -681,10 +957,8 @@ def _compute_polynomial_degree(self, result): # integer, it is also polynomial. While we would like to just # call this a non-polynomial expression, these exceptions occur # too frequently (and in particular, a**2) - l,r = result + l, r = result if r == 0: - if l == 0: - return 0 # NOTE: use value before int() so that we don't # run into the disabled __int__ method on # NumericValue @@ -692,46 +966,35 @@ def _compute_polynomial_degree(self, result): if exp is None: return None if exp == int(exp): - if l is not None and exp > 0: - return l * exp - elif exp == 0: + if not exp: return 0 + if l is not None and exp > 0: + return l * int(exp) return None def _is_fixed(self, args): - assert(len(args) == 2) if not args[1]: return False - return args[0] or value(self._args_[1]) == 0 - - def _precedence(self): - return PowExpression.PRECEDENCE - - def _associativity(self): - # "**" is right-to-left associative in Python (so this should - # return -1), however, as this rule is not widely known and can - # confuse novice users, we will make our "**" operator - # non-associative (forcing parens) - return 0 + return args[0] or value(self._args_[1], exception=False) == 0 def _apply_operation(self, result): _l, _r = result - return _l ** _r + return _l**_r def getname(self, *args, **kwds): return 'pow' - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): if verbose: - return "{0}({1}, {2})".format(self.getname(), values[0], values[1]) - return "{0}**{1}".format(values[0], values[1]) + return f"{self.getname()}({', '.join(values)})" + return f"{values[0]}**{values[1]}" -class NPV_PowExpression(NPV_Mixin, PowExpression): +class NPV_PowExpression(Numeric_NPV_Mixin, PowExpression): __slots__ = () -class MaxExpression(ExpressionBase): +class MaxExpression(NumericExpression): """ Maximum expressions:: @@ -740,6 +1003,9 @@ class MaxExpression(ExpressionBase): __slots__ = () + # This operator does not have an infix representation + PRECEDENCE = None + def nargs(self): return len(self._args_) @@ -749,19 +1015,15 @@ def _apply_operation(self, result): def getname(self, *args, **kwds): return 'max' - def _to_string(self, values, verbose, smap, compute_values): - return "%s(%s)" % (self.getname(), ', '.join( - arg[1:-1] - if (arg and arg[0] == '(' and arg[-1] == ')' - and _balanced_parens(arg[1:-1])) - else arg for arg in values)) + def _to_string(self, values, verbose, smap): + return f"{self.getname()}({', '.join(values)})" -class NPV_MaxExpression(NPV_Mixin, MaxExpression): +class NPV_MaxExpression(Numeric_NPV_Mixin, MaxExpression): __slots__ = () -class MinExpression(ExpressionBase): +class MinExpression(NumericExpression): """ Minimum expressions:: @@ -770,6 +1032,9 @@ class MinExpression(ExpressionBase): __slots__ = () + # This operator does not have an infix representation + PRECEDENCE = None + def nargs(self): return len(self._args_) @@ -779,19 +1044,15 @@ def _apply_operation(self, result): def getname(self, *args, **kwds): return 'min' - def _to_string(self, values, verbose, smap, compute_values): - return "%s(%s)" % (self.getname(), ', '.join( - arg[1:-1] - if (arg and arg[0] == '(' and arg[-1] == ')' - and _balanced_parens(arg[1:-1])) - else arg for arg in values)) + def _to_string(self, values, verbose, smap): + return f"{self.getname()}({', '.join(values)})" -class NPV_MinExpression(NPV_Mixin, MinExpression): +class NPV_MinExpression(Numeric_NPV_Mixin, MinExpression): __slots__ = () -class ProductExpression(ExpressionBase): +class ProductExpression(NumericExpression): """ Product expressions:: @@ -801,14 +1062,15 @@ class ProductExpression(ExpressionBase): __slots__ = () PRECEDENCE = 4 - def _precedence(self): - return ProductExpression.PRECEDENCE - def _compute_polynomial_degree(self, result): # NB: We can't use sum() here because None (non-polynomial) # overrides a numeric value (and sum() just ignores it - or # errors in py3k) a, b = result + if a == 0 and value(self._args_[0], exception=False) == 0: + return 0 + if b == 0 and value(self._args_[1], exception=False) == 0: + return 0 if a is None or b is None: return None else: @@ -820,11 +1082,10 @@ def getname(self, *args, **kwds): def _is_fixed(self, args): # Anything times 0 equals 0, so one of the children is # fixed and has a value of 0, then this expression is fixed - assert(len(args) == 2) if all(args): return True for i in (0, 1): - if args[i] and value(self._args_[i]) == 0: + if args[i] and value(self._args_[i], exception=False) == 0: return True return False @@ -832,20 +1093,22 @@ def _apply_operation(self, result): _l, _r = result return _l * _r - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): if verbose: - return "{0}({1}, {2})".format(self.getname(), values[0], values[1]) + return f"{self.getname()}({', '.join(values)})" if values[0] in self._to_string.one: return values[1] + # TODO: remove space after negation if values[0] in self._to_string.minus_one: - return "- {0}".format(values[1]) - return "{0}*{1}".format(values[0],values[1]) + return f"- {values[1]}" + return f"{values[0]}*{values[1]}" + # Store these reference sets on the function for quick lookup _to_string.one = {"1", "1.0", "(1)", "(1.0)"} _to_string.minus_one = {"-1", "-1.0", "(-1)", "(-1.0)"} -class NPV_ProductExpression(NPV_Mixin, ProductExpression): +class NPV_ProductExpression(Numeric_NPV_Mixin, ProductExpression): __slots__ = () @@ -857,37 +1120,23 @@ def getname(self, *args, **kwds): def create_node_with_local_data(self, args, classtype=None): if classtype is None: - # If this doesn't look like a MonomialTermExpression, then - # fall back on the expression generation system to sort out - # what the appropriate return type is. - try: - if not (args[0].__class__ in native_types - or not args[0].is_potentially_variable()): - return args[0] * args[1] - elif (args[1].__class__ in native_types - or not args[1].is_variable_type()): - return args[0] * args[1] - except AttributeError: - # Fall back on general expression generation - return args[0] * args[1] + # Because monomial terms place requirements on the argument + # types, the simplest / fastest thing to do is just defer to + # the operator dispatcher. + return operator.mul(*args) return self.__class__(args) -class DivisionExpression(ExpressionBase): +class DivisionExpression(NumericExpression): """ Division expressions:: x/y """ + __slots__ = () PRECEDENCE = 4 - def nargs(self): - return 2 - - def _precedence(self): - return DivisionExpression.PRECEDENCE - def _compute_polynomial_degree(self, result): if result[1] == 0: return result[0] @@ -896,26 +1145,56 @@ def _compute_polynomial_degree(self, result): def getname(self, *args, **kwds): return 'div' - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): if verbose: - return "{0}({1}, {2})".format(self.getname(), values[0], values[1]) - return "{0}/{1}".format(values[0], values[1]) + return f"{self.getname()}({', '.join(values)})" + return f"{values[0]}/{values[1]}" def _apply_operation(self, result): return result[0] / result[1] -class NPV_DivisionExpression(NPV_Mixin, DivisionExpression): +class NPV_DivisionExpression(Numeric_NPV_Mixin, DivisionExpression): __slots__ = () -class _LinearOperatorExpression(ExpressionBase): +class SumExpression(NumericExpression): """ - An 'abstract' class that defines the polynomial degree for a simple - linear operator + Sum expression:: + + x + y + ... + + This node represents an "n-ary" sum expression over at least 2 arguments. + + Args: + args (list): Children nodes + """ - __slots__ = () + __slots__ = ('_nargs',) + PRECEDENCE = 6 + + def __init__(self, args): + # unlike other expressions, we expect (require) args to be a list + if args.__class__ is not list: + args = list(args) + self._args_ = args + self._nargs = len(args) + + def nargs(self): + return self._nargs + + @property + def args(self): + if len(self._args_) != self._nargs: + self._args_ = self._args_[: self._nargs] + return self._args_ + + def getname(self, *args, **kwds): + return 'sum' + + def _apply_operation(self, result): + return sum(result) def _compute_polynomial_degree(self, result): # NB: We can't use max() here because None (non-polynomial) @@ -928,152 +1207,163 @@ def _compute_polynomial_degree(self, result): ans = x return ans + def _to_string(self, values, verbose, smap): + if not values: + values = ['0'] + if verbose: + return f"{self.getname()}({', '.join(values)})" + term = values[0] + # TODO: remove space after negation for first term + # if term[0] in '-+': + # values[0] = term[0] + term[1:].strip() + for i in range(1, len(values)): + term = values[i] + # TODO: remove unnecessary parenthetical grouping + # (addition is the lowest priority algebraic operator, so if + # a term is enclosed in parens, then it is a nested sum.) + # if term[0] == '(' and term[-1] == ')' and _balanced_parens(term[1:-1]): + # term = term[1:-1] + if term[0] in '-+': + values[i] = term[0] + ' ' + term[1:].strip() + else: + values[i] = '+ ' + term.strip() + return ' '.join(values) -class SumExpressionBase(_LinearOperatorExpression): - """ - A base class for simple summation of expressions - - The class hierarchy for summation is different than for other - expression types. For example, ProductExpression defines - the class for representing binary products, and sub-classes are - specializations of that class. - - By contrast, the SumExpressionBase is not directly used to - represent expressions. Rather, this base class provides - commonly used methods and data. The reason is that some - subclasses of SumExpressionBase are binary while others - are n-ary. - - Thus, developers will need to treat checks for summation - classes differently, depending on whether the binary/n-ary - operations are different. - """ - - __slots__ = () - PRECEDENCE = 6 - - def _precedence(self): - return SumExpressionBase.PRECEDENCE + @deprecated( + "SumExpression.add() is deprecated. Please use regular Python operators " + "(infix '+' or inplace '+='.)", + version='6.6.0', + ) + def add(self, new_arg): + self += new_arg + return self - def getname(self, *args, **kwds): - return 'sum' +# TODO: deprecate this class name +SumExpressionBase = SumExpression -class NPV_SumExpression(NPV_Mixin, SumExpressionBase): - __slots__ = () - def create_potentially_variable_object(self): - return SumExpression( self._args_ ) +class LinearExpression(SumExpression): + """An expression object for linear polynomials. - def _apply_operation(self, result): - l_, r_ = result - return l_ + r_ + This is a derived :py:class`SumExpression` that guarantees all + arguments are either not potentially variable (e.g., native types, + Params, or NPV expressions) OR :py:class:`MonomialTermExpression` + objects. - def _to_string(self, values, verbose, smap, compute_values): - if verbose: - return "{0}({1}, {2})".format(self.getname(), values[0], values[1]) - if values[1][0] == '-': - return "{0} {1}".format(values[0],values[1]) - return "{0} + {1}".format(values[0],values[1]) + Args: + args (tuple): Children nodes - def create_node_with_local_data(self, args, classtype=None): - assert classtype is None - try: - npv_args = all( - type(arg) in native_types or not arg.is_potentially_variable() - for arg in args - ) - except AttributeError: - # We can hit this during expression replacement when the new - # type is not a PyomoObject type, but is not in the - # native_types set. We will play it safe and clear the NPV flag - npv_args = False - if npv_args: - return NPV_SumExpression(args) - else: - return SumExpression(args) + """ + __slots__ = () -class SumExpression(SumExpressionBase): - """ - Sum expression:: + _allowable_linear_expr_arg_types = set([MonomialTermExpression]) + _cache = (None, None, None, None) - x + y + def __init__(self, args=None, constant=None, linear_coefs=None, linear_vars=None): + """A linear expression of the form `const + sum_i(c_i*x_i)`. - Args: - args (list): Children nodes - """ - __slots__ = ('_nargs','_shared_args') - PRECEDENCE = 6 + You can specify `args` OR (`constant`, `linear_coefs`, and + `linear_vars`). If `args` is provided, it should be a list that + contains only constants, NPV objects/expressions, or + :py:class:`MonomialTermExpression` objects. Alternatively, you + can specify the constant, the list of linear_coefs and the list + of linear_vars separately. Note that these lists are NOT + preserved. - def __init__(self, args): - self._args_ = args - self._shared_args = False + """ + # I am not sure why LinearExpression allows omitting args, but + # it does. If they are provided, they should be the (non-zero) + # constant followed by MonomialTermExpressions. + if args is not None: + if not (constant is None and linear_coefs is None and linear_vars is None): + raise ValueError( + "Cannot specify both args and any of " + "{constant, linear_coefs, or linear_vars}" + ) + # unlike other expressions, we expect (require) args to be a list + if args.__class__ is not list: + args = list(args) + self._args_ = args + else: + self._args_ = [] + if constant is not None: + # Filter 0, but only if it is a native type + if constant.__class__ not in native_types or constant: + self._args_.append(constant) + if linear_vars is not None: + if linear_coefs is None or len(linear_vars) != len(linear_coefs): + raise ValueError( + f"linear_vars ({tostr(linear_vars)}) is not compatible " + f"with linear_coefs ({tostr(linear_coefs)})" + ) + self._args_.extend( + map(MonomialTermExpression, zip(linear_coefs, linear_vars)) + ) self._nargs = len(self._args_) - def add(self, new_arg): - if new_arg.__class__ in native_numeric_types and new_arg == 0: - return self - # Clone 'self', because SumExpression are immutable - self._shared_args = True - self = self.__class__(self._args_) - # - if new_arg.__class__ is SumExpression or new_arg.__class__ is _MutableSumExpression: - self._args_.extend( islice(new_arg._args_, new_arg._nargs) ) - elif not new_arg is None: - self._args_.append(new_arg) - self._nargs = len(self._args_) - return self + def _build_cache(self): + const = 0 + coef = [] + var = [] + for arg in self.args: + if arg.__class__ is MonomialTermExpression: + coef.append(arg._args_[0]) + var.append(arg._args_[1]) + else: + const += arg + LinearExpression._cache = (self, const, coef, var) - def nargs(self): - return self._nargs + @property + def constant(self): + if LinearExpression._cache[0] is not self: + self._build_cache() + return LinearExpression._cache[1] - def _precedence(self): - return SumExpression.PRECEDENCE + @property + def linear_coefs(self): + if LinearExpression._cache[0] is not self: + self._build_cache() + return LinearExpression._cache[2] - def _apply_operation(self, result): - return sum(result) + @property + def linear_vars(self): + if LinearExpression._cache[0] is not self: + self._build_cache() + return LinearExpression._cache[3] def create_node_with_local_data(self, args, classtype=None): - return super().create_node_with_local_data(list(args), classtype) - - def __getstate__(self): - state = super(SumExpression, self).__getstate__() - for i in SumExpression.__slots__: - state[i] = getattr(self, i) - return state + if classtype is None: + classtype = self.__class__ + if type(args) is not list: + args = list(args) + for i, arg in enumerate(args): + if arg.__class__ in self._allowable_linear_expr_arg_types: + # 99% of the time, the arg type hasn't changed + continue + elif arg.__class__ in native_numeric_types: + # native numbers are OK (that's part of the constant) + pass + elif not arg.is_potentially_variable(): + # NPV expressions are OK + pass + elif arg.is_variable_type(): + # vars are OK, but need to be mapped to monomial terms + args[i] = MonomialTermExpression((1, arg)) + continue + else: + # For anything else, convert this to a general sum + classtype = SumExpression + break + # We get here for new types (likely NPV types) -- + # remember them for when they show up again + self._allowable_linear_expr_arg_types.add(arg.__class__) + return super().create_node_with_local_data(args, classtype) - def is_constant(self): - # - # In most normal contexts, a SumExpression is - # non-constant. When Forming expressions, constant - # parameters are turned into numbers, which are - # simply added. Mutable parameters, variables and - # expressions are not constant. - # - return False - def _to_string(self, values, verbose, smap, compute_values): - if verbose: - tmp = [values[0]] - for i in range(1,len(values)): - tmp.append(", ") - tmp.append(values[i]) - return "{0}({1})".format(self.getname(), "".join(tmp)) - - tmp = [values[0]] - for i in range(1,len(values)): - if values[i][0] == '-': - tmp.append(' - ') - tmp.append(values[i][1:].strip()) - elif len(values[i]) > 3 and values[i][:2] == '(-' \ - and values[i][-1] == ')' and _balanced_parens(values[i][1:-1]): - tmp.append(' - ') - tmp.append(values[i][2:-1].strip()) - else: - tmp.append(' + ') - tmp.append(values[i]) - return ''.join(tmp) +class NPV_SumExpression(Numeric_NPV_Mixin, LinearExpression): + __slots__ = () class _MutableSumExpression(SumExpression): @@ -1087,118 +1377,109 @@ class _MutableSumExpression(SumExpression): __slots__ = () - def add(self, new_arg): - if new_arg.__class__ in native_numeric_types and new_arg == 0: - return self - # Do not clone 'self', because _MutableSumExpression are mutable - #self._shared_args = True - #self = self.__class__(list(self.args)) - # - if new_arg.__class__ is SumExpression or new_arg.__class__ is _MutableSumExpression: - self._args_.extend( islice(new_arg._args_, new_arg._nargs) ) - elif not new_arg is None: - self._args_.append(new_arg) - self._nargs = len(self._args_) - return self + def make_immutable(self): + self.__class__ = SumExpression + def __iadd__(self, other): + return _iadd_mutablesum_dispatcher[other.__class__](self, other) -class Expr_ifExpression(ExpressionBase): - """ - A logical if-then-else expression:: - Expr_if(IF_=x, THEN_=y, ELSE_=z) +class _MutableLinearExpression(_MutableSumExpression): + __slots__ = () - Args: - IF_ (expression): A relational expression - THEN_ (expression): An expression that is used if :attr:`IF_` is true. - ELSE_ (expression): An expression that is used if :attr:`IF_` is false. - """ - __slots__ = ('_if','_then','_else') + def make_immutable(self): + self.__class__ = LinearExpression - # **NOTE**: This class evaluates the branching "_if" expression - # on a number of occasions. It is important that - # one uses __call__ for value() and NOT bool(). + def __iadd__(self, other): + return _iadd_mutablelinear_dispatcher[other.__class__](self, other) - def __init__(self, IF_=None, THEN_=None, ELSE_=None): - if type(IF_) is tuple and THEN_==None and ELSE_==None: - IF_, THEN_, ELSE_ = IF_ - self._args_ = (IF_, THEN_, ELSE_) - self._if = IF_ - self._then = THEN_ - self._else = ELSE_ - if self._if.__class__ in native_numeric_types: - self._if = as_numeric(self._if) - def nargs(self): - return 3 +class _MutableNPVSumExpression(_MutableLinearExpression): + __slots__ = () - def __getstate__(self): - state = super(Expr_ifExpression, self).__getstate__() - for i in Expr_ifExpression.__slots__: - state[i] = getattr(self, i) - return state + def make_immutable(self): + self.__class__ = NPV_SumExpression - def getname(self, *args, **kwds): - return "Expr_if" + def __iadd__(self, other): + return _iadd_mutablenpvsum_dispatcher[other.__class__](self, other) - def _is_fixed(self, args): - assert(len(args) == 3) - if args[0]: # self._if.is_fixed(): - if args[1] and args[2]: - return True - if value(self._if): - return args[1] # self._then.is_fixed() - else: - return args[2] # self._else.is_fixed() - else: - return False - def is_constant(self): - if is_constant(self._if): - if value(self._if): - return is_constant(self._then) +class Expr_ifExpression(NumericExpression): + """A numeric ternary (if-then-else) expression:: + + Expr_if(IF=x, THEN=y, ELSE=z) + + Note that this is a mixed expression: `IF` can be numeric or logical; + `THEN` and `ELSE` are numeric, and the result is a numeric expression. + + """ + + __slots__ = () + + # This operator does not have an infix representation + PRECEDENCE = None + + # **NOTE**: This class evaluates the branching "_if" expression + # on a number of occasions. It is important that + # one uses __call__ for value() and NOT bool(). + + def nargs(self): + return 3 + + def getname(self, *args, **kwds): + return "Expr_if" + + def _is_fixed(self, args): + if args[0]: # if.is_fixed(): + if value(self._args_[0]): + return args[1] # then.is_fixed() else: - return is_constant(self._else) + return args[2] # else.is_fixed() else: return False - def is_potentially_variable(self): - return any(map(is_potentially_variable, self._args_)) - def _compute_polynomial_degree(self, result): _if, _then, _else = result if _if == 0: if _then == _else: + # It doesn't matter which branch is active return _then - try: - return _then if value(self._if) else _else - except ValueError: - pass + val = value(self.arg(0), exception=False) + if val is not None: + return _then if val else _else return None - def _to_string(self, values, verbose, smap, compute_values): - return '{0}( ( {1} ), then=( {2} ), else=( {3} ) )'.\ - format(self.getname(), self._if, self._then, self._else) + def _to_string(self, values, verbose, smap): + return ( + f'{self.getname()}( ( {values[0]} ), then=( {values[1]} ), ' + f'else=( {values[2]} ) )' + ) def _apply_operation(self, result): _if, _then, _else = result return _then if _if else _else -class UnaryFunctionExpression(ExpressionBase): +class NPV_Expr_ifExpression(Numeric_NPV_Mixin, Expr_ifExpression): + __slots__ = () + + +class UnaryFunctionExpression(NumericExpression): """ - An expression object used to define intrinsic functions (e.g. sin, cos, tan). + An expression object for intrinsic (math) functions (e.g. sin, cos, tan). Args: args (tuple): Children nodes name (string): The function name fcn: The function that is used to evaluate this expression """ + __slots__ = ('_fcn', '_name') + # This operator does not have an infix representation + PRECEDENCE = None + def __init__(self, args, name=None, fcn=None): - if type(args) is not tuple: - args = (args,) self._args_ = args self._name = name self._fcn = fcn @@ -1211,35 +1492,17 @@ def create_node_with_local_data(self, args, classtype=None): classtype = self.__class__ return classtype(args, self._name, self._fcn) - def __getstate__(self): - state = super(UnaryFunctionExpression, self).__getstate__() - for i in UnaryFunctionExpression.__slots__: - state[i] = getattr(self, i) - return state - def getname(self, *args, **kwds): return self._name - def _to_string(self, values, verbose, smap, compute_values): - if verbose: - return "{0}({1})".format(self.getname(), values[0]) - if values[0] and values[0][0] == '(' and values[0][-1] == ')' \ - and _balanced_parens(values[0][1:-1]): - return '{0}{1}'.format(self._name, values[0]) - else: - return '{0}({1})'.format(self._name, values[0]) - - def _compute_polynomial_degree(self, result): - if result[0] == 0: - return 0 - else: - return None + def _to_string(self, values, verbose, smap): + return f"{self.getname()}({', '.join(values)})" def _apply_operation(self, result): return self._fcn(result[0]) -class NPV_UnaryFunctionExpression(NPV_Mixin, UnaryFunctionExpression): +class NPV_UnaryFunctionExpression(Numeric_NPV_Mixin, UnaryFunctionExpression): __slots__ = () @@ -1252,289 +1515,34 @@ class AbsExpression(UnaryFunctionExpression): Args: args (tuple): Children nodes """ + __slots__ = () def __init__(self, arg): super(AbsExpression, self).__init__(arg, 'abs', abs) def create_node_with_local_data(self, args, classtype=None): + # Because this class removes arguments from the __init__, we + # also need to reimplement create_node_with_local_data to not + # add those arguments here. if classtype is None: classtype = self.__class__ return classtype(args) -class NPV_AbsExpression(NPV_Mixin, AbsExpression): - __slots__ = () - - -class LinearExpression(ExpressionBase): - """ - An expression object linear polynomials. - - Args: - args (tuple): Children nodes - """ - __slots__ = ( - 'constant', # The constant term - 'linear_coefs', # Linear coefficients - 'linear_vars', # Linear variables - '_args_cache_', - ) - - PRECEDENCE = 6 - - def __init__(self, args=None, constant=None, linear_coefs=None, linear_vars=None): - """A linear expression of the form `const + sum_i(c_i*x_i). - - You can specify args OR (constant, linear_coefs, and - linear_vars). If args is provided, it should be a list that - contains the constant, followed by a series of - :py:class:`MonomialTermExpression` objects. Alternatively, you - can specify the constant, the list of linear_coeffs and the list - of linear_vars separately. Note that these lists are NOT copied. - - """ - # I am not sure why LinearExpression allows omitting args, but - # it does. If they are provided, they should be the (non-zero) - # constant followed by MonomialTermExpressions. - if args: - if any(arg is not None for arg in - (constant, linear_coefs, linear_vars)): - raise ValueError("Cannot specify both args and any of " - "{constant, linear_coeffs, or linear_vars}") - if len(args) > 1 and (args[1].__class__ in native_types - or not args[1].is_potentially_variable()): - deprecation_warning( - "LinearExpression has been updated to expect args= to " - "be a constant followed by MonomialTermExpressions. " - "The older format (`[const, coefficient_1, ..., " - "variable_1, ...]`) is deprecated.", version='6.2') - args = args[:1] + list(map( - MonomialTermExpression, - zip(args[1:1+len(args)//2], args[1+len(args)//2:]))) - self._args_ = args - else: - self.constant = constant if constant is not None else 0 - self.linear_coefs = linear_coefs if linear_coefs else [] - self.linear_vars = linear_vars if linear_vars else [] - self._args_cache_ = [] - - def nargs(self): - return len(self.linear_vars) + ( - 0 if (self.constant is None - or (self.constant.__class__ in native_numeric_types - and not self.constant)) else 1 - ) - - @property - def _args_(self): - nargs = self.nargs() - if len(self._args_cache_) != nargs: - if len(self.linear_vars) == nargs: - self._args_cache_ = [] - else: - self._args_cache_ = [self.constant] - self._args_cache_.extend( - map(MonomialTermExpression, - zip(self.linear_coefs, self.linear_vars))) - elif len(self.linear_vars) != nargs: - self._args_cache_[0] = self.constant - return self._args_cache_ - - @_args_.setter - def _args_(self, value): - self._args_cache_ = list(value) - if not self._args_cache_: - self.constant = 0 - self.linear_coefs = [] - self.linear_vars = [] - return - if self._args_cache_[0].__class__ is not MonomialTermExpression: - self.constant = value[0] - first_var = 1 - else: - self.constant = 0 - first_var = 0 - self.linear_coefs, self.linear_vars = zip( - *map(attrgetter('args'), value[first_var:])) - self.linear_coefs = list(self.linear_coefs) - self.linear_vars = list(self.linear_vars) - - def _precedence(self): - return LinearExpression.PRECEDENCE - - # __getstate__ is not needed, as while we are defining local slots, - # all the data in the slot is redundant to the information already - # being pickled through the base class _args_ attribute. - - def create_node_with_local_data(self, args, classtype=None): - if classtype is not None: - return classtype(args) - else: - const = 0 - new_args = [] - for arg in args: - if arg.__class__ is MonomialTermExpression: - new_args.append(arg) - elif arg.__class__ in native_types or arg.is_constant(): - const += arg - else: - return SumExpression(args) - if not new_args: - return const - if const: - new_args.insert(0, const) - return self.__class__(new_args) - - def getname(self, *args, **kwds): - return 'sum' - - def _compute_polynomial_degree(self, result): - return 1 if not self.is_fixed() else 0 - - def is_constant(self): - return len(self.linear_vars) == 0 - - def _is_fixed(self, values=None): - return all(v.fixed for v in self.linear_vars) - - def is_fixed(self): - return self._is_fixed() - - def _to_string(self, values, verbose, smap, compute_values): - if not values: - values = ['0'] - if verbose: - return "%s(%s)" % (self.getname(), ', '.join(values)) - - for i in range(1, len(values)): - term = values[i] - if term[0] not in '+-': - values[i] = '+ ' + term - elif term[1] != ' ': - values[i] = term[0] + ' ' + term[1:] - return ' '.join(values) - - def is_potentially_variable(self): - return len(self.linear_vars) > 0 - - def _apply_operation(self, result): - return sum(result) - - #@profile - def _combine_expr(self, etype, _other): - if etype == _add or etype == _sub or etype == -_add or etype == -_sub: - # - # if etype == _sub, then _MutableLinearExpression - VAL - # if etype == -_sub, then VAL - _MutableLinearExpression - # - if etype == _sub: - omult = -1 - else: - omult = 1 - if etype == -_sub: - self.constant *= -1 - for i,c in enumerate(self.linear_coefs): - self.linear_coefs[i] = -c - - if _other.__class__ in native_numeric_types or not _other.is_potentially_variable(): - self.constant = self.constant + omult * _other - # - # WEH - These seem like uncommon cases, so I think we should defer processing them - # until _decompose_linear_terms - # - #elif _other.__class__ is _MutableLinearExpression: - # self.constant = self.constant + omult * _other.constant - # for c,v in zip(_other.linear_coefs, _other.linear_vars): - # self.linear_coefs.append(omult*c) - # self.linear_vars.append(v) - #elif _other.__class__ is SumExpression or _other.__class__ is _MutableSumExpression: - # for e in _other._args_: - # for c,v in _decompose_linear_terms(e, multiplier=omult): - # if v is None: - # self.constant += c - # else: - # self.linear_coefs.append(c) - # self.linear_vars.append(v) - else: - for c,v in _decompose_linear_terms(_other, multiplier=omult): - if v is None: - self.constant += c - else: - self.linear_coefs.append(c) - self.linear_vars.append(v) - - elif etype == _mul or etype == -_mul: - if _other.__class__ in native_numeric_types: - multiplier = _other - elif _other.is_potentially_variable(): - if len(self.linear_vars) > 0: - raise ValueError("Cannot multiply a linear expression with a variable expression") - # - # The linear expression is a constant, so re-initialize it with - # a single term that multiplies the expression by the constant value. - # - c_ = self.constant - self.constant = 0 - for c,v in _decompose_linear_terms(_other): - if v is None: - self.constant = c*c_ - else: - self.linear_vars.append(v) - self.linear_coefs.append(c*c_) - return self - else: - multiplier = _other - - if multiplier.__class__ in native_numeric_types and multiplier == 0: - self.constant = 0 - self.linear_vars = [] - self.linear_coefs = [] - else: - self.constant *= multiplier - for i,c in enumerate(self.linear_coefs): - self.linear_coefs[i] = c*multiplier - - elif etype == _div: - if _other.__class__ in native_numeric_types: - divisor = _other - elif _other.is_potentially_variable(): - raise ValueError("Unallowed operation on linear expression: division with a variable RHS") - else: - divisor = _other - self.constant /= divisor - for i,c in enumerate(self.linear_coefs): - self.linear_coefs[i] = c/divisor - - elif etype == -_div: - if self.is_potentially_variable(): - raise ValueError("Unallowed operation on linear expression: division with a variable RHS") - return _other / self.constant - - elif etype == _neg: - self.constant *= -1 - for i,c in enumerate(self.linear_coefs): - self.linear_coefs[i] = - c - - else: - raise ValueError("Unallowed operation on mutable linear expression: %d" % etype) #pragma: no cover - - return self - - -class _MutableLinearExpression(LinearExpression): +class NPV_AbsExpression(Numeric_NPV_Mixin, AbsExpression): __slots__ = () -#------------------------------------------------------- +# ----------------------------------------------------------------- # -# Functions used to generate expressions +# Functions for decomposing a linear expression into linear terms # -#------------------------------------------------------- +# ----------------------------------------------------------------- + def decompose_term(expr): - """ - A function that returns a tuple consisting of (1) a flag indicated + """A function that returns a tuple consisting of (1) a flag indicating whether the expression is linear, and (2) a list of tuples that represents the terms in the linear expression. @@ -1542,18 +1550,19 @@ def decompose_term(expr): expr (expression): The root node of an expression tree Returns: - A tuple with the form ``(flag, list)``. If :attr:`flag` is :const:`False`, then - a nonlinear term has been found, and :const:`list` is :const:`None`. - Otherwise, :const:`list` is a list of tuples: ``(coef, value)``. - If :attr:`value` is :const:`None`, then this - represents a constant term with value :attr:`coef`. Otherwise, - :attr:`value` is a variable object, and :attr:`coef` is the - numeric coefficient. + A tuple with the form ``(flag, list)``. If :attr:`flag` is + :const:`False`, then a nonlinear term has been found, and + :const:`list` is :const:`None`. Otherwise, :const:`list` is a + list of tuples: ``(coef, value)``. If :attr:`value` is + :const:`None`, then this represents a constant term with value + :attr:`coef`. Otherwise, :attr:`value` is a variable object, + and :attr:`coef` is the numeric coefficient. + """ if expr.__class__ in nonpyomo_leaf_types or not expr.is_potentially_variable(): - return True, [(expr,None)] + return True, [(expr, None)] elif expr.is_variable_type(): - return True, [(1,expr)] + return True, [(1, expr)] else: try: terms = [t_ for t_ in _decompose_linear_terms(expr)] @@ -1561,10 +1570,9 @@ def decompose_term(expr): except LinearDecompositionError: return False, None -class LinearDecompositionError(Exception): - def __init__(self, message): - super(LinearDecompositionError, self).__init__(message) +class LinearDecompositionError(Exception): + pass def _decompose_linear_terms(expr, multiplier=1): @@ -1586,445 +1594,2551 @@ def _decompose_linear_terms(expr, multiplier=1): :class:`LinearDecompositionError` if a nonlinear term is encountered. """ if expr.__class__ in native_numeric_types or not expr.is_potentially_variable(): - yield (multiplier*expr,None) + yield (multiplier * expr, None) elif expr.is_variable_type(): - yield (multiplier,expr) + yield (multiplier, expr) elif expr.__class__ is MonomialTermExpression: - yield (multiplier*expr._args_[0], expr._args_[1]) + yield (multiplier * expr._args_[0], expr._args_[1]) elif expr.__class__ is ProductExpression: - if expr._args_[0].__class__ in native_numeric_types or not expr._args_[0].is_potentially_variable(): - yield from _decompose_linear_terms(expr._args_[1], multiplier*expr._args_[0]) - elif expr._args_[1].__class__ in native_numeric_types or not expr._args_[1].is_potentially_variable(): - yield from _decompose_linear_terms(expr._args_[0], multiplier*expr._args_[1]) + if ( + expr._args_[0].__class__ in native_numeric_types + or not expr._args_[0].is_potentially_variable() + ): + yield from _decompose_linear_terms( + expr._args_[1], multiplier * expr._args_[0] + ) + elif ( + expr._args_[1].__class__ in native_numeric_types + or not expr._args_[1].is_potentially_variable() + ): + yield from _decompose_linear_terms( + expr._args_[0], multiplier * expr._args_[1] + ) else: - raise LinearDecompositionError("Quadratic terms exist in a product expression.") + raise LinearDecompositionError( + "Quadratic terms exist in a product expression." + ) elif expr.__class__ is DivisionExpression: - if expr._args_[1].__class__ in native_numeric_types or not expr._args_[1].is_potentially_variable(): - yield from _decompose_linear_terms(expr._args_[0], multiplier/expr._args_[1]) + if ( + expr._args_[1].__class__ in native_numeric_types + or not expr._args_[1].is_potentially_variable() + ): + yield from _decompose_linear_terms( + expr._args_[0], multiplier / expr._args_[1] + ) else: raise LinearDecompositionError("Unexpected nonlinear term (division)") - elif expr.__class__ is SumExpression or expr.__class__ is _MutableSumExpression: + elif isinstance(expr, SumExpression): for arg in expr.args: yield from _decompose_linear_terms(arg, multiplier) elif expr.__class__ is NegationExpression: yield from _decompose_linear_terms(expr._args_[0], -multiplier) - elif expr.__class__ is LinearExpression or expr.__class__ is _MutableLinearExpression: - if not (expr.constant.__class__ in native_numeric_types and expr.constant == 0): - yield (multiplier*expr.constant,None) - if len(expr.linear_coefs) > 0: - for c,v in zip(expr.linear_coefs, expr.linear_vars): - yield (multiplier*c,v) else: - raise LinearDecompositionError("Unexpected nonlinear term") #pragma: no cover - - -def _process_arg(obj): - # Note: caller is responsible for filtering out native types and - # expressions. - if not obj.is_numeric_type(): - if hasattr(obj, 'as_binary'): - # We assume non-numeric types that have an as_binary method - # are instances of AutoLinkedBooleanVar. Calling as_binary - # will return a valid Binary Var (and issue the appropriate - # deprecation warning) - obj = obj.as_binary() - else: - # User assistance: provide a helpful exception when using an - # indexed object in an expression - if obj.is_component_type() and obj.is_indexed(): - raise TypeError( - "Argument for expression is an indexed numeric " - "value\nspecified without an index:\n\t%s\nIs this " - "value defined over an index that you did not specify?" - % (obj.name, ) ) + raise LinearDecompositionError("Unexpected nonlinear term") - raise TypeError( - "Attempting to use a non-numeric type (%s) in a " - "numeric context" % (obj,)) - if obj.is_constant(): - # Resolve constants (e.g., immutable scalar Params & NumericConstants) - return value(obj) - return obj +# ------------------------------------------------------- +# +# Functions used to generate expressions +# +# ------------------------------------------------------- -#@profile -def _generate_sum_expression(etype, _self, _other): +class ARG_TYPE(enum.IntEnum): + MUTABLE = -2 + ASNUMERIC = -1 + INVALID = 0 + NATIVE = 1 + NPV = 2 + PARAM = 3 + VAR = 4 + MONOMIAL = 5 + LINEAR = 6 + SUM = 7 + OTHER = 8 - if etype > _inplace: - etype -= _inplace - if _self.__class__ is _MutableLinearExpression: - try: - if etype >= _unary: - return _self._combine_expr(etype, None) - if _other.__class__ is not _MutableLinearExpression: - if not (_other.__class__ in native_types or _other.is_expression_type()): - _other = _process_arg(_other) - return _self._combine_expr(etype, _other) - except LinearDecompositionError: - pass - elif _other.__class__ is _MutableLinearExpression: - try: - if not (_self.__class__ in native_types or _self.is_expression_type()): - _self = _process_arg(_self) - return _other._combine_expr(-etype, _self) - except LinearDecompositionError: - pass +_known_arg_types = {} - # - # A mutable sum is used as a context manager, so we don't - # need to process it to see if it's entangled. - # - if not (_self.__class__ in native_types or _self.is_expression_type()): - _self = _process_arg(_self) - - if etype == _neg: - if _self.__class__ in native_numeric_types: - return - _self - elif _self.__class__ is MonomialTermExpression: - tmp = _self._args_[0] - if tmp.__class__ in native_numeric_types: - return MonomialTermExpression((-tmp, _self._args_[1])) - else: - return MonomialTermExpression((NPV_NegationExpression((tmp,)), _self._args_[1])) - elif _self.is_variable_type(): - return MonomialTermExpression((-1, _self)) - elif _self.is_potentially_variable(): - return NegationExpression((_self,)) - else: - if _self.__class__ is NPV_NegationExpression: - return _self._args_[0] - return NPV_NegationExpression((_self,)) - - if not (_other.__class__ in native_types or _other.is_expression_type()): - _other = _process_arg(_other) - - if etype < 0: - # - # This may seem obvious, but if we are performing an - # "R"-operation (i.e. reverse operation), then simply reverse - # self and other. This is legitimate as we are generating a - # completely new expression here. - # - etype *= -1 - _self, _other = _other, _self - - if etype == _add: - # - # x + y - # - if (_self.__class__ is SumExpression and not _self._shared_args) or \ - _self.__class__ is _MutableSumExpression: - return _self.add(_other) - elif (_other.__class__ is SumExpression and not _other._shared_args) or \ - _other.__class__ is _MutableSumExpression: - return _other.add(_self) - elif _other.__class__ in native_numeric_types: - if _self.__class__ in native_numeric_types: - return _self + _other - elif _other == 0: - return _self - if _self.is_potentially_variable(): - return SumExpression([_self, _other]) - return NPV_SumExpression((_self, _other)) - elif _self.__class__ in native_numeric_types: - if _self == 0: - return _other - if _other.is_potentially_variable(): - #return _LinearSumExpression((_self, _other)) - return SumExpression([_self, _other]) - return NPV_SumExpression((_self, _other)) - elif _other.is_potentially_variable(): - #return _LinearSumExpression((_self, _other)) - return SumExpression([_self, _other]) - elif _self.is_potentially_variable(): - #return _LinearSumExpression((_other, _self)) - #return SumExpression([_other, _self]) - return SumExpression([_self, _other]) - else: - return NPV_SumExpression((_self, _other)) - - elif etype == _sub: - # - # x - y - # - if (_self.__class__ is SumExpression and not _self._shared_args) or \ - _self.__class__ is _MutableSumExpression: - return _self.add(-_other) - elif _other.__class__ in native_numeric_types: - if _self.__class__ in native_numeric_types: - return _self - _other - elif _other == 0: - return _self - if _self.is_potentially_variable(): - return SumExpression([_self, -_other]) - return NPV_SumExpression((_self, -_other)) - elif _self.__class__ in native_numeric_types: - if _self == 0: - if _other.__class__ is MonomialTermExpression: - tmp = _other._args_[0] - if tmp.__class__ in native_numeric_types: - return MonomialTermExpression((-tmp, _other._args_[1])) - return MonomialTermExpression((NPV_NegationExpression((_other._args_[0],)), _other._args_[1])) - elif _other.is_variable_type(): - return MonomialTermExpression((-1, _other)) - elif _other.is_potentially_variable(): - return NegationExpression((_other,)) - return NPV_NegationExpression((_other,)) - elif _other.__class__ is MonomialTermExpression: - return SumExpression([_self, MonomialTermExpression((-_other._args_[0], _other._args_[1]))]) - elif _other.is_variable_type(): - return SumExpression([_self, MonomialTermExpression((-1,_other))]) - elif _other.is_potentially_variable(): - return SumExpression([_self, NegationExpression((_other,))]) - return NPV_SumExpression((_self, NPV_NegationExpression((_other,)))) - elif _other.__class__ is MonomialTermExpression: - return SumExpression([_self, MonomialTermExpression((-_other._args_[0], _other._args_[1]))]) - elif _other.is_variable_type(): - return SumExpression([_self, MonomialTermExpression((-1,_other))]) - elif _other.is_potentially_variable(): - return SumExpression([_self, NegationExpression((_other,))]) - elif _self.is_potentially_variable(): - return SumExpression([_self, NPV_NegationExpression((_other,))]) - else: - return NPV_SumExpression((_self, NPV_NegationExpression((_other,)))) - raise RuntimeError("Unknown expression type '%s'" % etype) #pragma: no cover +def register_arg_type(arg_class, etype): + _known_arg_types.setdefault(arg_class, ARG_TYPE(etype)) -#@profile -def _generate_mul_expression(etype, _self, _other): - if etype > _inplace: - etype -= _inplace +def _categorize_arg_type(arg): + if arg.__class__ in _known_arg_types: + return _known_arg_types[arg.__class__] - if _self.__class__ is _MutableLinearExpression: - try: - if _other.__class__ is not _MutableLinearExpression: - if not (_other.__class__ in native_types or _other.is_expression_type()): - _other = _process_arg(_other) - return _self._combine_expr(etype, _other) - except LinearDecompositionError: - pass - elif _other.__class__ is _MutableLinearExpression: + if arg.__class__ in native_numeric_types: + ans = ARG_TYPE.NATIVE + else: try: - if not (_self.__class__ in native_types or _self.is_expression_type()): - _self = _process_arg(_self) - return _other._combine_expr(-etype, _self) - except LinearDecompositionError: - pass - - # - # A mutable sum is used as a context manager, so we don't - # need to process it to see if it's entangled. - # - if not (_self.__class__ in native_types or _self.is_expression_type()): - _self = _process_arg(_self) - - if not (_other.__class__ in native_types or _other.is_expression_type()): - _other = _process_arg(_other) - - if etype < 0: - # - # This may seem obvious, but if we are performing an - # "R"-operation (i.e. reverse operation), then simply reverse - # self and other. This is legitimate as we are generating a - # completely new expression here. - # - etype *= -1 - _self, _other = _other, _self - - if etype == _mul: - # - # x * y - # - if _other.__class__ in native_numeric_types: - if _self.__class__ in native_numeric_types: - return _self * _other - elif _other == 0: - return 0 - elif _other == 1: - return _self - if _self.is_variable_type(): - return MonomialTermExpression((_other, _self)) - elif _self.__class__ is MonomialTermExpression: - tmp = _self._args_[0] - if tmp.__class__ in native_numeric_types: - return MonomialTermExpression((_other*tmp, _self._args_[1])) - else: - return MonomialTermExpression((NPV_ProductExpression((_other,tmp)), _self._args_[1])) - elif _self.is_potentially_variable(): - return ProductExpression((_self, _other)) - return NPV_ProductExpression((_self, _other)) - elif _self.__class__ in native_numeric_types: - if _self == 0: - return 0 - elif _self == 1: - return _other - if _other.is_variable_type(): - return MonomialTermExpression((_self, _other)) - elif _other.__class__ is MonomialTermExpression: - tmp = _other._args_[0] - if tmp.__class__ in native_numeric_types: - return MonomialTermExpression((_self*tmp, _other._args_[1])) - else: - return MonomialTermExpression((NPV_ProductExpression((_self,tmp)), _other._args_[1])) - elif _other.is_potentially_variable(): - return ProductExpression((_self, _other)) - return NPV_ProductExpression((_self, _other)) - elif _other.is_variable_type(): - if _self.is_potentially_variable(): - return ProductExpression((_self, _other)) - return MonomialTermExpression((_self, _other)) - elif _other.is_potentially_variable(): - return ProductExpression((_self, _other)) - elif _self.is_variable_type(): - return MonomialTermExpression((_other, _self)) - elif _self.is_potentially_variable(): - return ProductExpression((_self, _other)) + is_numeric = arg.is_numeric_type() + except AttributeError: + if check_if_numeric_type(arg): + ans = ARG_TYPE.NATIVE + else: + ans = ARG_TYPE.INVALID else: - return NPV_ProductExpression((_self, _other)) - - elif etype == _div: - # - # x / y - # - if _other.__class__ in native_numeric_types: - if _other == 1: - return _self - elif not _other: - raise ZeroDivisionError() - elif _self.__class__ in native_numeric_types: - return _self / _other - if _self.is_variable_type(): - return MonomialTermExpression((1/_other, _self)) - elif _self.__class__ is MonomialTermExpression: - return MonomialTermExpression((_self._args_[0]/_other, _self._args_[1])) - elif _self.is_potentially_variable(): - return DivisionExpression((_self, _other)) - return NPV_DivisionExpression((_self, _other)) - elif _self.__class__ in native_numeric_types: - if _self == 0: - return 0 - elif _other.is_potentially_variable(): - return DivisionExpression((_self, _other)) - return NPV_DivisionExpression((_self, _other)) - elif _other.is_potentially_variable(): - return DivisionExpression((_self, _other)) - elif _self.is_potentially_variable(): - if _self.is_variable_type(): - return MonomialTermExpression((NPV_DivisionExpression((1, _other)), _self)) - return DivisionExpression((_self, _other)) + if is_numeric: + ans = None + elif hasattr(arg, 'as_numeric'): + ans = ARG_TYPE.ASNUMERIC + else: + ans = ARG_TYPE.INVALID + + if ans is None: + if arg.is_expression_type(): + # Note: this makes a strong assumption that NPV is a class + # attribute and not determined by the current expression + # arguments / state. + if not arg.is_potentially_variable(): + ans = ARG_TYPE.NPV + # TODO: remove NPV_expression_types + NPV_expression_types.add(arg.__class__) + elif isinstance(arg, _MutableSumExpression): + ans = ARG_TYPE.MUTABLE + elif arg.__class__ is MonomialTermExpression: + ans = ARG_TYPE.MONOMIAL + elif isinstance(arg, LinearExpression): + ans = ARG_TYPE.LINEAR + elif isinstance(arg, SumExpression): + ans = ARG_TYPE.SUM + else: + ans = ARG_TYPE.OTHER else: - return NPV_DivisionExpression((_self, _other)) + if not arg.is_potentially_variable(): + ans = ARG_TYPE.PARAM + elif arg.is_variable_type(): + ans = ARG_TYPE.VAR + else: + ans = ARG_TYPE.OTHER + register_arg_type(arg.__class__, ans) + return ans + + +def _categorize_arg_types(*args): + return tuple(_categorize_arg_type(arg) for arg in args) + - raise RuntimeError("Unknown expression type '%s'" % etype) #pragma: no cover +def _invalid(*args): + return NotImplemented -#@profile -def _generate_other_expression(etype, _self, _other): +def _recast_mutable(expr): + expr.make_immutable() + if expr._nargs > 1: + return expr + elif not expr._nargs: + return 0 + else: + return expr._args_[0] - if etype > _inplace: - etype -= _inplace +def _unary_op_dispatcher_type_mapping(dispatcher, updates): # - # A mutable sum is used as a context manager, so we don't - # need to process it to see if it's entangled. + # Special case (wrapping) operators # - if not (_self.__class__ in native_types or _self.is_expression_type()): - _self = _process_arg(_self) + def _asnumeric(a): + a = a.as_numeric() + return dispatcher[a.__class__](a) + + def _mutable(a): + a = _recast_mutable(a) + return dispatcher[a.__class__](a) + + mapping = { + ARG_TYPE.ASNUMERIC: _asnumeric, + ARG_TYPE.MUTABLE: _mutable, + ARG_TYPE.INVALID: _invalid, + } + mapping.update(updates) + return mapping + + +def _binary_op_dispatcher_type_mapping(dispatcher, updates): # - # abs(x) + # Special case (wrapping) operators # - if etype == _abs: - if _self.__class__ in native_numeric_types: - return abs(_self) - elif _self.is_potentially_variable(): - return AbsExpression(_self) + def _any_asnumeric(a, b): + b = b.as_numeric() + return dispatcher[a.__class__, b.__class__](a, b) + + def _asnumeric_any(a, b): + a = a.as_numeric() + return dispatcher[a.__class__, b.__class__](a, b) + + def _asnumeric_asnumeric(a, b): + a = a.as_numeric() + b = b.as_numeric() + return dispatcher[a.__class__, b.__class__](a, b) + + def _any_mutable(a, b): + b = _recast_mutable(b) + return dispatcher[a.__class__, b.__class__](a, b) + + def _mutable_any(a, b): + a = _recast_mutable(a) + return dispatcher[a.__class__, b.__class__](a, b) + + def _mutable_mutable(a, b): + if a is b: + a = b = _recast_mutable(a) else: - return NPV_AbsExpression(_self) - - if not (_other.__class__ in native_types or _other.is_expression_type()): - _other = _process_arg(_other) - - if etype < 0: - # - # This may seem obvious, but if we are performing an - # "R"-operation (i.e. reverse operation), then simply reverse - # self and other. This is legitimate as we are generating a - # completely new expression here. - # - etype *= -1 - _self, _other = _other, _self - - if etype == _pow: - if _other.__class__ in native_numeric_types: - if _other == 1: - return _self - elif not _other: - return 1 - elif _self.__class__ in native_numeric_types: - return _self ** _other - elif _self.is_potentially_variable(): - return PowExpression((_self, _other)) - return NPV_PowExpression((_self, _other)) - elif _self.__class__ in native_numeric_types: - if _other.is_potentially_variable(): - return PowExpression((_self, _other)) - return NPV_PowExpression((_self, _other)) - elif _self.is_potentially_variable() or _other.is_potentially_variable(): - return PowExpression((_self, _other)) - else: - return NPV_PowExpression((_self, _other)) + a = _recast_mutable(a) + b = _recast_mutable(b) + return dispatcher[a.__class__, b.__class__](a, b) - raise RuntimeError("Unknown expression type '%s'" % etype) #pragma: no cover + mapping = {} + mapping.update({(i, ARG_TYPE.ASNUMERIC): _any_asnumeric for i in ARG_TYPE}) + mapping.update({(ARG_TYPE.ASNUMERIC, i): _asnumeric_any for i in ARG_TYPE}) + mapping[ARG_TYPE.ASNUMERIC, ARG_TYPE.ASNUMERIC] = _asnumeric_asnumeric -def _generate_intrinsic_function_expression(arg, name, fcn): - if not (arg.__class__ in native_types or arg.is_expression_type()): - arg = _process_arg(arg) + mapping.update({(i, ARG_TYPE.MUTABLE): _any_mutable for i in ARG_TYPE}) + mapping.update({(ARG_TYPE.MUTABLE, i): _mutable_any for i in ARG_TYPE}) + mapping[ARG_TYPE.MUTABLE, ARG_TYPE.MUTABLE] = _mutable_mutable - if arg.__class__ in native_types: - return fcn(arg) - elif arg.is_potentially_variable(): - return UnaryFunctionExpression(arg, name, fcn) - else: - return NPV_UnaryFunctionExpression(arg, name, fcn) + mapping.update({(i, ARG_TYPE.INVALID): _invalid for i in ARG_TYPE}) + mapping.update({(ARG_TYPE.INVALID, i): _invalid for i in ARG_TYPE}) -def _balanced_parens(arg): - """Verify the string argument contains balanced parentheses. + mapping.update(updates) + return mapping - This checks that every open paren is balanced by a closed paren. - That is, the infix string expression is likely to be valid. This is - primarily used to determine if a string that starts and ends with - parens can have those parens removed. - Examples: - >>> a = "(( x + y ) * ( z - w ))" - >>> _balanced_parens(a[1:-1]) - True - >>> a = "( x + y ) * ( z - w )" - >>> _balanced_parens(a[1:-1]) - False - """ - _parenCount = 0 - for c in arg: - if c == '(': - _parenCount += 1 - elif c == ')': - _parenCount -= 1 - if _parenCount < 0: - return False - return _parenCount == 0 +# +# ADD: NATIVE handlers +# -NPV_expression_types = set( - [NPV_NegationExpression, - NPV_ExternalFunctionExpression, - NPV_PowExpression, - NPV_ProductExpression, - NPV_DivisionExpression, - NPV_SumExpression, - NPV_UnaryFunctionExpression, - NPV_AbsExpression]) +def _add_native_native(a, b): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return a + b + + +def _add_native_npv(a, b): + if not a: + return b + return NPV_SumExpression([a, b]) + + +def _add_native_param(a, b): + if b.is_constant(): + return a + b.value + if not a: + return b + return NPV_SumExpression([a, b]) + + +def _add_native_var(a, b): + if not a: + return b + return LinearExpression([a, MonomialTermExpression((1, b))]) + + +def _add_native_monomial(a, b): + if not a: + return b + return LinearExpression([a, b]) + + +def _add_native_linear(a, b): + if not a: + return b + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_native_sum(a, b): + if not a: + return b + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_native_other(a, b): + if not a: + return b + return SumExpression([a, b]) + + +# +# ADD: NPV handlers +# + + +def _add_npv_native(a, b): + if not b: + return a + return NPV_SumExpression([a, b]) + + +def _add_npv_npv(a, b): + return NPV_SumExpression([a, b]) + + +def _add_npv_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + return NPV_SumExpression([a, b]) + + +def _add_npv_var(a, b): + return LinearExpression([a, MonomialTermExpression((1, b))]) + + +def _add_npv_monomial(a, b): + return LinearExpression([a, b]) + + +def _add_npv_linear(a, b): + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_npv_sum(a, b): + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_npv_other(a, b): + return SumExpression([a, b]) + + +# +# ADD: PARAM handlers +# + + +def _add_param_native(a, b): + if a.is_constant(): + return a.value + b + if not b: + return a + return NPV_SumExpression([a, b]) + + +def _add_param_npv(a, b): + if a.is_constant(): + a = a.value + return a + b + return NPV_SumExpression([a, b]) + + +def _add_param_param(a, b): + if a.is_constant(): + a = a.value + if b.is_constant(): + return a + b.value + elif not a: + return b + elif b.is_constant(): + b = b.value + if not b: + return a + return NPV_SumExpression([a, b]) + + +def _add_param_var(a, b): + if a.is_constant(): + a = a.value + if not a: + return b + return LinearExpression([a, MonomialTermExpression((1, b))]) + + +def _add_param_monomial(a, b): + if a.is_constant(): + a = a.value + if not a: + return b + return LinearExpression([a, b]) + + +def _add_param_linear(a, b): + if a.is_constant(): + a = a.value + if not a: + return b + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_param_sum(a, b): + if a.is_constant(): + a = value(a) + if not a: + return b + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_param_other(a, b): + if a.is_constant(): + a = a.value + if not a: + return b + return SumExpression([a, b]) + + +# +# ADD: VAR handlers +# + + +def _add_var_native(a, b): + if not b: + return a + return LinearExpression([MonomialTermExpression((1, a)), b]) + + +def _add_var_npv(a, b): + return LinearExpression([MonomialTermExpression((1, a)), b]) + + +def _add_var_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + return LinearExpression([MonomialTermExpression((1, a)), b]) + + +def _add_var_var(a, b): + return LinearExpression( + [MonomialTermExpression((1, a)), MonomialTermExpression((1, b))] + ) + + +def _add_var_monomial(a, b): + return LinearExpression([MonomialTermExpression((1, a)), b]) + +def _add_var_linear(a, b): + args = b.args + args.append(MonomialTermExpression((1, a))) + return b.__class__(args) + + +def _add_var_sum(a, b): + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_var_other(a, b): + return SumExpression([a, b]) + + +# +# ADD: MONOMIAL handlers +# + + +def _add_monomial_native(a, b): + if not b: + return a + return LinearExpression([a, b]) + + +def _add_monomial_npv(a, b): + return LinearExpression([a, b]) + + +def _add_monomial_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + return LinearExpression([a, b]) + + +def _add_monomial_var(a, b): + return LinearExpression([a, MonomialTermExpression((1, b))]) + + +def _add_monomial_monomial(a, b): + return LinearExpression([a, b]) + + +def _add_monomial_linear(a, b): + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_monomial_sum(a, b): + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_monomial_other(a, b): + return SumExpression([a, b]) + + +# +# ADD: LINEAR handlers +# + + +def _add_linear_native(a, b): + if not b: + return a + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_linear_npv(a, b): + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_linear_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_linear_var(a, b): + args = a.args + args.append(MonomialTermExpression((1, b))) + return a.__class__(args) + + +def _add_linear_monomial(a, b): + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_linear_linear(a, b): + args = a.args + args.extend(b.args) + return a.__class__(args) + + +def _add_linear_sum(a, b): + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_linear_other(a, b): + return SumExpression([a, b]) + + +# +# ADD: SUM handlers +# + + +def _add_sum_native(a, b): + if not b: + return a + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_sum_npv(a, b): + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_sum_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_sum_var(a, b): + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_sum_monomial(a, b): + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_sum_linear(a, b): + args = a.args + args.append(b) + return a.__class__(args) + + +def _add_sum_sum(a, b): + args = a.args + args.extend(b.args) + return a.__class__(args) + + +def _add_sum_other(a, b): + args = a.args + args.append(b) + return a.__class__(args) + + +# +# ADD: OTHER handlers +# + + +def _add_other_native(a, b): + if not b: + return a + return SumExpression([a, b]) + + +def _add_other_npv(a, b): + return SumExpression([a, b]) + + +def _add_other_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + return SumExpression([a, b]) + + +def _add_other_var(a, b): + return SumExpression([a, b]) + + +def _add_other_monomial(a, b): + return SumExpression([a, b]) + + +def _add_other_linear(a, b): + return SumExpression([a, b]) + + +def _add_other_sum(a, b): + args = b.args + args.append(a) + return b.__class__(args) + + +def _add_other_other(a, b): + return SumExpression([a, b]) + + +def _register_new_add_handler(a, b): + types = _categorize_arg_types(a, b) + # Retrieve the appropriate handler, record it in the main + # _add_dispatcher dict (so this method is not called a second time for + # these types) + _add_dispatcher[a.__class__, b.__class__] = handler = _add_type_handler_mapping[ + types + ] + # Call the appropriate handler + return handler(a, b) + + +_add_dispatcher = collections.defaultdict(lambda: _register_new_add_handler) + +_add_type_handler_mapping = _binary_op_dispatcher_type_mapping( + _add_dispatcher, + { + (ARG_TYPE.NATIVE, ARG_TYPE.NATIVE): _add_native_native, + (ARG_TYPE.NATIVE, ARG_TYPE.NPV): _add_native_npv, + (ARG_TYPE.NATIVE, ARG_TYPE.PARAM): _add_native_param, + (ARG_TYPE.NATIVE, ARG_TYPE.VAR): _add_native_var, + (ARG_TYPE.NATIVE, ARG_TYPE.MONOMIAL): _add_native_monomial, + (ARG_TYPE.NATIVE, ARG_TYPE.LINEAR): _add_native_linear, + (ARG_TYPE.NATIVE, ARG_TYPE.SUM): _add_native_sum, + (ARG_TYPE.NATIVE, ARG_TYPE.OTHER): _add_native_other, + (ARG_TYPE.NPV, ARG_TYPE.NATIVE): _add_npv_native, + (ARG_TYPE.NPV, ARG_TYPE.NPV): _add_npv_npv, + (ARG_TYPE.NPV, ARG_TYPE.PARAM): _add_npv_param, + (ARG_TYPE.NPV, ARG_TYPE.VAR): _add_npv_var, + (ARG_TYPE.NPV, ARG_TYPE.MONOMIAL): _add_npv_monomial, + (ARG_TYPE.NPV, ARG_TYPE.LINEAR): _add_npv_linear, + (ARG_TYPE.NPV, ARG_TYPE.SUM): _add_npv_sum, + (ARG_TYPE.NPV, ARG_TYPE.OTHER): _add_npv_other, + (ARG_TYPE.PARAM, ARG_TYPE.NATIVE): _add_param_native, + (ARG_TYPE.PARAM, ARG_TYPE.NPV): _add_param_npv, + (ARG_TYPE.PARAM, ARG_TYPE.PARAM): _add_param_param, + (ARG_TYPE.PARAM, ARG_TYPE.VAR): _add_param_var, + (ARG_TYPE.PARAM, ARG_TYPE.MONOMIAL): _add_param_monomial, + (ARG_TYPE.PARAM, ARG_TYPE.LINEAR): _add_param_linear, + (ARG_TYPE.PARAM, ARG_TYPE.SUM): _add_param_sum, + (ARG_TYPE.PARAM, ARG_TYPE.OTHER): _add_param_other, + (ARG_TYPE.VAR, ARG_TYPE.NATIVE): _add_var_native, + (ARG_TYPE.VAR, ARG_TYPE.NPV): _add_var_npv, + (ARG_TYPE.VAR, ARG_TYPE.PARAM): _add_var_param, + (ARG_TYPE.VAR, ARG_TYPE.VAR): _add_var_var, + (ARG_TYPE.VAR, ARG_TYPE.MONOMIAL): _add_var_monomial, + (ARG_TYPE.VAR, ARG_TYPE.LINEAR): _add_var_linear, + (ARG_TYPE.VAR, ARG_TYPE.SUM): _add_var_sum, + (ARG_TYPE.VAR, ARG_TYPE.OTHER): _add_var_other, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NATIVE): _add_monomial_native, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NPV): _add_monomial_npv, + (ARG_TYPE.MONOMIAL, ARG_TYPE.PARAM): _add_monomial_param, + (ARG_TYPE.MONOMIAL, ARG_TYPE.VAR): _add_monomial_var, + (ARG_TYPE.MONOMIAL, ARG_TYPE.MONOMIAL): _add_monomial_monomial, + (ARG_TYPE.MONOMIAL, ARG_TYPE.LINEAR): _add_monomial_linear, + (ARG_TYPE.MONOMIAL, ARG_TYPE.SUM): _add_monomial_sum, + (ARG_TYPE.MONOMIAL, ARG_TYPE.OTHER): _add_monomial_other, + (ARG_TYPE.LINEAR, ARG_TYPE.NATIVE): _add_linear_native, + (ARG_TYPE.LINEAR, ARG_TYPE.NPV): _add_linear_npv, + (ARG_TYPE.LINEAR, ARG_TYPE.PARAM): _add_linear_param, + (ARG_TYPE.LINEAR, ARG_TYPE.VAR): _add_linear_var, + (ARG_TYPE.LINEAR, ARG_TYPE.MONOMIAL): _add_linear_monomial, + (ARG_TYPE.LINEAR, ARG_TYPE.LINEAR): _add_linear_linear, + (ARG_TYPE.LINEAR, ARG_TYPE.SUM): _add_linear_sum, + (ARG_TYPE.LINEAR, ARG_TYPE.OTHER): _add_linear_other, + (ARG_TYPE.SUM, ARG_TYPE.NATIVE): _add_sum_native, + (ARG_TYPE.SUM, ARG_TYPE.NPV): _add_sum_npv, + (ARG_TYPE.SUM, ARG_TYPE.PARAM): _add_sum_param, + (ARG_TYPE.SUM, ARG_TYPE.VAR): _add_sum_var, + (ARG_TYPE.SUM, ARG_TYPE.MONOMIAL): _add_sum_monomial, + (ARG_TYPE.SUM, ARG_TYPE.LINEAR): _add_sum_linear, + (ARG_TYPE.SUM, ARG_TYPE.SUM): _add_sum_sum, + (ARG_TYPE.SUM, ARG_TYPE.OTHER): _add_sum_other, + (ARG_TYPE.OTHER, ARG_TYPE.NATIVE): _add_other_native, + (ARG_TYPE.OTHER, ARG_TYPE.NPV): _add_other_npv, + (ARG_TYPE.OTHER, ARG_TYPE.PARAM): _add_other_param, + (ARG_TYPE.OTHER, ARG_TYPE.VAR): _add_other_var, + (ARG_TYPE.OTHER, ARG_TYPE.MONOMIAL): _add_other_monomial, + (ARG_TYPE.OTHER, ARG_TYPE.LINEAR): _add_other_linear, + (ARG_TYPE.OTHER, ARG_TYPE.SUM): _add_other_sum, + (ARG_TYPE.OTHER, ARG_TYPE.OTHER): _add_other_other, + }, +) + +# +# MUTABLENPVSUM __iadd__ handlers +# + + +def _iadd_mutablenpvsum_asnumeric(a, b): + b = b.as_numeric() + return _iadd_mutablenpvsum_dispatcher[b.__class__](a, b) + + +def _iadd_mutablenpvsum_mutable(a, b): + b = _recast_mutable(b) + return _iadd_mutablenpvsum_dispatcher[b.__class__](a, b) + + +def _iadd_mutablenpvsum_native(a, b): + if not b: + return a + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablenpvsum_npv(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablenpvsum_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablenpvsum_var(a, b): + a.__class__ = _MutableLinearExpression + return _iadd_mutablelinear_var(a, b) + + +def _iadd_mutablenpvsum_monomial(a, b): + a.__class__ = _MutableLinearExpression + return _iadd_mutablelinear_monomial(a, b) + + +def _iadd_mutablenpvsum_linear(a, b): + a.__class__ = _MutableLinearExpression + return _iadd_mutablelinear_linear(a, b) + + +def _iadd_mutablenpvsum_sum(a, b): + a.__class__ = _MutableSumExpression + return _iadd_mutablesum_sum(a, b) + + +def _iadd_mutablenpvsum_other(a, b): + a.__class__ = _MutableSumExpression + return _iadd_mutablesum_other(a, b) + + +_iadd_mutablenpvsum_type_handler_mapping = { + ARG_TYPE.INVALID: _invalid, + ARG_TYPE.ASNUMERIC: _iadd_mutablenpvsum_asnumeric, + ARG_TYPE.MUTABLE: _iadd_mutablenpvsum_mutable, + ARG_TYPE.NATIVE: _iadd_mutablenpvsum_native, + ARG_TYPE.NPV: _iadd_mutablenpvsum_npv, + ARG_TYPE.PARAM: _iadd_mutablenpvsum_param, + ARG_TYPE.VAR: _iadd_mutablenpvsum_var, + ARG_TYPE.MONOMIAL: _iadd_mutablenpvsum_monomial, + ARG_TYPE.LINEAR: _iadd_mutablenpvsum_linear, + ARG_TYPE.SUM: _iadd_mutablenpvsum_sum, + ARG_TYPE.OTHER: _iadd_mutablenpvsum_other, +} + + +def _register_new_iadd_mutablenpvsum_handler(a, b): + types = _categorize_arg_types(b) + # Retrieve the appropriate handler, record it in the main + # _iadd_mutablenpvsum_dispatcher dict (so this method is not called a second time for + # these types) + _iadd_mutablenpvsum_dispatcher[ + b.__class__ + ] = handler = _iadd_mutablenpvsum_type_handler_mapping[types[0]] + # Call the appropriate handler + return handler(a, b) + + +_iadd_mutablenpvsum_dispatcher = collections.defaultdict( + lambda: _register_new_iadd_mutablenpvsum_handler +) + + +# +# MUTABLELINEAR __iadd__ handlers +# + + +def _iadd_mutablelinear_asnumeric(a, b): + b = b.as_numeric() + return _iadd_mutablelinear_dispatcher[b.__class__](a, b) + + +def _iadd_mutablelinear_mutable(a, b): + b = _recast_mutable(b) + return _iadd_mutablelinear_dispatcher[b.__class__](a, b) + + +def _iadd_mutablelinear_native(a, b): + if not b: + return a + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablelinear_npv(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablelinear_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablelinear_var(a, b): + a._args_.append(MonomialTermExpression((1, b))) + a._nargs += 1 + return a + + +def _iadd_mutablelinear_monomial(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablelinear_linear(a, b): + a._args_.extend(b.args) + a._nargs += b.nargs() + return a + + +def _iadd_mutablelinear_sum(a, b): + a.__class__ = _MutableSumExpression + return _iadd_mutablesum_sum(a, b) + + +def _iadd_mutablelinear_other(a, b): + a.__class__ = _MutableSumExpression + return _iadd_mutablesum_other(a, b) + + +_iadd_mutablelinear_type_handler_mapping = { + ARG_TYPE.INVALID: _invalid, + ARG_TYPE.ASNUMERIC: _iadd_mutablelinear_asnumeric, + ARG_TYPE.MUTABLE: _iadd_mutablelinear_mutable, + ARG_TYPE.NATIVE: _iadd_mutablelinear_native, + ARG_TYPE.NPV: _iadd_mutablelinear_npv, + ARG_TYPE.PARAM: _iadd_mutablelinear_param, + ARG_TYPE.VAR: _iadd_mutablelinear_var, + ARG_TYPE.MONOMIAL: _iadd_mutablelinear_monomial, + ARG_TYPE.LINEAR: _iadd_mutablelinear_linear, + ARG_TYPE.SUM: _iadd_mutablelinear_sum, + ARG_TYPE.OTHER: _iadd_mutablelinear_other, +} + + +def _register_new_iadd_mutablelinear_handler(a, b): + types = _categorize_arg_types(b) + # Retrieve the appropriate handler, record it in the main + # _iadd_mutablelinear_dispatcher dict (so this method is not called a second time for + # these types) + _iadd_mutablelinear_dispatcher[ + b.__class__ + ] = handler = _iadd_mutablelinear_type_handler_mapping[types[0]] + # Call the appropriate handler + return handler(a, b) + + +_iadd_mutablelinear_dispatcher = collections.defaultdict( + lambda: _register_new_iadd_mutablelinear_handler +) + + +# +# MUTABLESUM __iadd__ handlers +# + + +def _iadd_mutablesum_asnumeric(a, b): + b = b.as_numeric() + return _iadd_mutablesum_dispatcher[b.__class__](a, b) + + +def _iadd_mutablesum_mutable(a, b): + b = _recast_mutable(b) + return _iadd_mutablesum_dispatcher[b.__class__](a, b) + + +def _iadd_mutablesum_native(a, b): + if not b: + return a + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablesum_npv(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablesum_param(a, b): + if b.is_constant(): + b = b.value + if not b: + return a + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablesum_var(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablesum_monomial(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablesum_linear(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +def _iadd_mutablesum_sum(a, b): + a._args_.extend(b.args) + a._nargs += b.nargs() + return a + + +def _iadd_mutablesum_other(a, b): + a._args_.append(b) + a._nargs += 1 + return a + + +_iadd_mutablesum_type_handler_mapping = { + ARG_TYPE.INVALID: _invalid, + ARG_TYPE.ASNUMERIC: _iadd_mutablesum_asnumeric, + ARG_TYPE.MUTABLE: _iadd_mutablesum_mutable, + ARG_TYPE.NATIVE: _iadd_mutablesum_native, + ARG_TYPE.NPV: _iadd_mutablesum_npv, + ARG_TYPE.PARAM: _iadd_mutablesum_param, + ARG_TYPE.VAR: _iadd_mutablesum_var, + ARG_TYPE.MONOMIAL: _iadd_mutablesum_monomial, + ARG_TYPE.LINEAR: _iadd_mutablesum_linear, + ARG_TYPE.SUM: _iadd_mutablesum_sum, + ARG_TYPE.OTHER: _iadd_mutablesum_other, +} + + +def _register_new_iadd_mutablesum_handler(a, b): + types = _categorize_arg_types(b) + # Retrieve the appropriate handler, record it in the main + # _iadd_mutablesum_dispatcher dict (so this method is not called a + # second time for these types) + _iadd_mutablesum_dispatcher[ + b.__class__ + ] = handler = _iadd_mutablesum_type_handler_mapping[types[0]] + # Call the appropriate handler + return handler(a, b) + + +_iadd_mutablesum_dispatcher = collections.defaultdict( + lambda: _register_new_iadd_mutablesum_handler +) + + +# +# NEGATION handlers +# + + +def _neg_native(a): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return -a + + +def _neg_npv(a): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return NPV_NegationExpression((a,)) + + +def _neg_param(a): + if a.is_constant(): + return -(a.value) + return NPV_NegationExpression((a,)) + + +def _neg_var(a): + return MonomialTermExpression((-1, a)) + + +def _neg_monomial(a): + args = a.args + return MonomialTermExpression((-args[0], args[1])) + + +def _neg_sum(a): + if not a.nargs(): + return 0 + # return LinearExpression([-arg for arg in a.args]) + return NegationExpression((a,)) + + +def _neg_other(a): + return NegationExpression((a,)) + + +def _register_new_neg_handler(a): + types = _categorize_arg_types(a) + # Retrieve the appropriate handler, record it in the main + # _neg_dispatcher dict (so this method is not called a second time for + # these types) + _neg_dispatcher[a.__class__] = handler = _neg_type_handler_mapping[types[0]] + # Call the appropriate handler + return handler(a) + + +_neg_dispatcher = collections.defaultdict(lambda: _register_new_neg_handler) + +_neg_type_handler_mapping = _unary_op_dispatcher_type_mapping( + _neg_dispatcher, + { + ARG_TYPE.NATIVE: _neg_native, + ARG_TYPE.NPV: _neg_npv, + ARG_TYPE.PARAM: _neg_param, + ARG_TYPE.VAR: _neg_var, + ARG_TYPE.MONOMIAL: _neg_monomial, + ARG_TYPE.LINEAR: _neg_sum, + ARG_TYPE.SUM: _neg_sum, + ARG_TYPE.OTHER: _neg_other, + }, +) + + +# +# MUL: NATIVE handlers +# + + +def _mul_native_native(a, b): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return a * b + + +def _mul_native_npv(a, b): + if a in _zero_one_optimizations: + return b if a else 0 + return NPV_ProductExpression((a, b)) + + +def _mul_native_param(a, b): + if a in _zero_one_optimizations: + return b if a else 0 + if b.is_constant(): + return a * b.value + return NPV_ProductExpression((a, b)) + + +def _mul_native_var(a, b): + if a in _zero_one_optimizations: + return b if a else 0 + return MonomialTermExpression((a, b)) + + +def _mul_native_monomial(a, b): + if a in _zero_one_optimizations: + return b if a else 0 + return MonomialTermExpression((a * b._args_[0], b._args_[1])) + + +def _mul_native_linear(a, b): + if a in _zero_one_optimizations: + return b if a else 0 + return ProductExpression((a, b)) + + +def _mul_native_sum(a, b): + if a in _zero_one_optimizations: + return b if a else 0 + return ProductExpression((a, b)) + + +def _mul_native_other(a, b): + if a in _zero_one_optimizations: + return b if a else 0 + return ProductExpression((a, b)) + + +# +# MUL: NPV handlers +# + + +def _mul_npv_native(a, b): + if b in _zero_one_optimizations: + return a if b else 0 + return NPV_ProductExpression((a, b)) + + +def _mul_npv_npv(a, b): + return NPV_ProductExpression((a, b)) + + +def _mul_npv_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 0 + return NPV_ProductExpression((a, b)) + + +def _mul_npv_var(a, b): + return MonomialTermExpression((a, b)) + + +def _mul_npv_monomial(a, b): + return MonomialTermExpression( + (NPV_ProductExpression((a, b._args_[0])), b._args_[1]) + ) + + +def _mul_npv_linear(a, b): + return ProductExpression((a, b)) + + +def _mul_npv_sum(a, b): + return ProductExpression((a, b)) + + +def _mul_npv_other(a, b): + return ProductExpression((a, b)) + + +# +# MUL: PARAM handlers +# + + +def _mul_param_native(a, b): + if a.is_constant(): + return a.value * b + if b in _zero_one_optimizations: + return a if b else 0 + return NPV_ProductExpression((a, b)) + + +def _mul_param_npv(a, b): + if a.is_constant(): + a = a.value + if a in _zero_one_optimizations: + return b if a else 0 + return NPV_ProductExpression((a, b)) + + +def _mul_param_param(a, b): + if a.is_constant(): + a = a.value + if a in _zero_one_optimizations: + return b if a else 0 + if b.is_constant(): + return a * b.value + elif b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 0 + return NPV_ProductExpression((a, b)) + + +def _mul_param_var(a, b): + if a.is_constant(): + a = a.value + if a in _zero_one_optimizations: + return b if a else 0 + return MonomialTermExpression((a, b)) + + +def _mul_param_monomial(a, b): + if a.is_constant(): + a = a.value + if a in _zero_one_optimizations: + return b if a else 0 + return MonomialTermExpression((a * b._args_[0], b._args_[1])) + + +def _mul_param_linear(a, b): + if a.is_constant(): + a = a.value + if a in _zero_one_optimizations: + return b if a else 0 + return ProductExpression((a, b)) + + +def _mul_param_sum(a, b): + if a.is_constant(): + a = value(a) + if a in _zero_one_optimizations: + return b if a else 0 + return ProductExpression((a, b)) + + +def _mul_param_other(a, b): + if a.is_constant(): + a = a.value + if a in _zero_one_optimizations: + return b if a else 0 + return ProductExpression((a, b)) + + +# +# MUL: VAR handlers +# + + +def _mul_var_native(a, b): + if b in _zero_one_optimizations: + return a if b else 0 + return MonomialTermExpression((b, a)) + + +def _mul_var_npv(a, b): + return MonomialTermExpression((b, a)) + + +def _mul_var_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 0 + return MonomialTermExpression((b, a)) + + +def _mul_var_var(a, b): + return ProductExpression((a, b)) + + +def _mul_var_monomial(a, b): + return ProductExpression((a, b)) + + +def _mul_var_linear(a, b): + return ProductExpression((a, b)) + + +def _mul_var_sum(a, b): + return ProductExpression((a, b)) + + +def _mul_var_other(a, b): + return ProductExpression((a, b)) + + +# +# MUL: MONOMIAL handlers +# + + +def _mul_monomial_native(a, b): + if b in _zero_one_optimizations: + return a if b else 0 + return MonomialTermExpression((a._args_[0] * b, a._args_[1])) + + +def _mul_monomial_npv(a, b): + return MonomialTermExpression( + (NPV_ProductExpression((a._args_[0], b)), a._args_[1]) + ) + + +def _mul_monomial_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 0 + return MonomialTermExpression((a._args_[0] * b, a._args_[1])) + + +def _mul_monomial_var(a, b): + return ProductExpression((a, b)) + + +def _mul_monomial_monomial(a, b): + return ProductExpression((a, b)) + + +def _mul_monomial_linear(a, b): + return ProductExpression((a, b)) + + +def _mul_monomial_sum(a, b): + return ProductExpression((a, b)) + + +def _mul_monomial_other(a, b): + return ProductExpression((a, b)) + + +# +# MUL: LINEAR handlers +# + + +def _mul_linear_native(a, b): + if b in _zero_one_optimizations: + return a if b else 0 + return ProductExpression((a, b)) + + +def _mul_linear_npv(a, b): + return ProductExpression((a, b)) + + +def _mul_linear_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 0 + return ProductExpression((a, b)) + + +def _mul_linear_var(a, b): + return ProductExpression((a, b)) + + +def _mul_linear_monomial(a, b): + return ProductExpression((a, b)) + + +def _mul_linear_linear(a, b): + return ProductExpression((a, b)) + + +def _mul_linear_sum(a, b): + return ProductExpression((a, b)) + + +def _mul_linear_other(a, b): + return ProductExpression((a, b)) + + +# +# MUL: SUM handlers +# + + +def _mul_sum_native(a, b): + if b in _zero_one_optimizations: + return a if b else 0 + return ProductExpression((a, b)) + + +def _mul_sum_npv(a, b): + return ProductExpression((a, b)) + + +def _mul_sum_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 0 + return ProductExpression((a, b)) + + +def _mul_sum_var(a, b): + return ProductExpression((a, b)) + + +def _mul_sum_monomial(a, b): + return ProductExpression((a, b)) + + +def _mul_sum_linear(a, b): + return ProductExpression((a, b)) + + +def _mul_sum_sum(a, b): + return ProductExpression((a, b)) + + +def _mul_sum_other(a, b): + return ProductExpression((a, b)) + + +# +# MUL: OTHER handlers +# + + +def _mul_other_native(a, b): + if b in _zero_one_optimizations: + return a if b else 0 + return ProductExpression((a, b)) + + +def _mul_other_npv(a, b): + return ProductExpression((a, b)) + + +def _mul_other_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 0 + return ProductExpression((a, b)) + + +def _mul_other_var(a, b): + return ProductExpression((a, b)) + + +def _mul_other_monomial(a, b): + return ProductExpression((a, b)) + + +def _mul_other_linear(a, b): + return ProductExpression((a, b)) + + +def _mul_other_sum(a, b): + return ProductExpression((a, b)) + + +def _mul_other_other(a, b): + return ProductExpression((a, b)) + + +def _register_new_mul_handler(a, b): + types = _categorize_arg_types(a, b) + # Retrieve the appropriate handler, record it in the main + # _mul_dispatcher dict (so this method is not called a second time for + # these types) + _mul_dispatcher[a.__class__, b.__class__] = handler = _mul_type_handler_mapping[ + types + ] + # Call the appropriate handler + return handler(a, b) + + +_mul_dispatcher = collections.defaultdict(lambda: _register_new_mul_handler) + +_mul_type_handler_mapping = _binary_op_dispatcher_type_mapping( + _mul_dispatcher, + { + (ARG_TYPE.NATIVE, ARG_TYPE.NATIVE): _mul_native_native, + (ARG_TYPE.NATIVE, ARG_TYPE.NPV): _mul_native_npv, + (ARG_TYPE.NATIVE, ARG_TYPE.PARAM): _mul_native_param, + (ARG_TYPE.NATIVE, ARG_TYPE.VAR): _mul_native_var, + (ARG_TYPE.NATIVE, ARG_TYPE.MONOMIAL): _mul_native_monomial, + (ARG_TYPE.NATIVE, ARG_TYPE.LINEAR): _mul_native_linear, + (ARG_TYPE.NATIVE, ARG_TYPE.SUM): _mul_native_sum, + (ARG_TYPE.NATIVE, ARG_TYPE.OTHER): _mul_native_other, + (ARG_TYPE.NPV, ARG_TYPE.NATIVE): _mul_npv_native, + (ARG_TYPE.NPV, ARG_TYPE.NPV): _mul_npv_npv, + (ARG_TYPE.NPV, ARG_TYPE.PARAM): _mul_npv_param, + (ARG_TYPE.NPV, ARG_TYPE.VAR): _mul_npv_var, + (ARG_TYPE.NPV, ARG_TYPE.MONOMIAL): _mul_npv_monomial, + (ARG_TYPE.NPV, ARG_TYPE.LINEAR): _mul_npv_linear, + (ARG_TYPE.NPV, ARG_TYPE.SUM): _mul_npv_sum, + (ARG_TYPE.NPV, ARG_TYPE.OTHER): _mul_npv_other, + (ARG_TYPE.PARAM, ARG_TYPE.NATIVE): _mul_param_native, + (ARG_TYPE.PARAM, ARG_TYPE.NPV): _mul_param_npv, + (ARG_TYPE.PARAM, ARG_TYPE.PARAM): _mul_param_param, + (ARG_TYPE.PARAM, ARG_TYPE.VAR): _mul_param_var, + (ARG_TYPE.PARAM, ARG_TYPE.MONOMIAL): _mul_param_monomial, + (ARG_TYPE.PARAM, ARG_TYPE.LINEAR): _mul_param_linear, + (ARG_TYPE.PARAM, ARG_TYPE.SUM): _mul_param_sum, + (ARG_TYPE.PARAM, ARG_TYPE.OTHER): _mul_param_other, + (ARG_TYPE.VAR, ARG_TYPE.NATIVE): _mul_var_native, + (ARG_TYPE.VAR, ARG_TYPE.NPV): _mul_var_npv, + (ARG_TYPE.VAR, ARG_TYPE.PARAM): _mul_var_param, + (ARG_TYPE.VAR, ARG_TYPE.VAR): _mul_var_var, + (ARG_TYPE.VAR, ARG_TYPE.MONOMIAL): _mul_var_monomial, + (ARG_TYPE.VAR, ARG_TYPE.LINEAR): _mul_var_linear, + (ARG_TYPE.VAR, ARG_TYPE.SUM): _mul_var_sum, + (ARG_TYPE.VAR, ARG_TYPE.OTHER): _mul_var_other, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NATIVE): _mul_monomial_native, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NPV): _mul_monomial_npv, + (ARG_TYPE.MONOMIAL, ARG_TYPE.PARAM): _mul_monomial_param, + (ARG_TYPE.MONOMIAL, ARG_TYPE.VAR): _mul_monomial_var, + (ARG_TYPE.MONOMIAL, ARG_TYPE.MONOMIAL): _mul_monomial_monomial, + (ARG_TYPE.MONOMIAL, ARG_TYPE.LINEAR): _mul_monomial_linear, + (ARG_TYPE.MONOMIAL, ARG_TYPE.SUM): _mul_monomial_sum, + (ARG_TYPE.MONOMIAL, ARG_TYPE.OTHER): _mul_monomial_other, + (ARG_TYPE.LINEAR, ARG_TYPE.NATIVE): _mul_linear_native, + (ARG_TYPE.LINEAR, ARG_TYPE.NPV): _mul_linear_npv, + (ARG_TYPE.LINEAR, ARG_TYPE.PARAM): _mul_linear_param, + (ARG_TYPE.LINEAR, ARG_TYPE.VAR): _mul_linear_var, + (ARG_TYPE.LINEAR, ARG_TYPE.MONOMIAL): _mul_linear_monomial, + (ARG_TYPE.LINEAR, ARG_TYPE.LINEAR): _mul_linear_linear, + (ARG_TYPE.LINEAR, ARG_TYPE.SUM): _mul_linear_sum, + (ARG_TYPE.LINEAR, ARG_TYPE.OTHER): _mul_linear_other, + (ARG_TYPE.SUM, ARG_TYPE.NATIVE): _mul_sum_native, + (ARG_TYPE.SUM, ARG_TYPE.NPV): _mul_sum_npv, + (ARG_TYPE.SUM, ARG_TYPE.PARAM): _mul_sum_param, + (ARG_TYPE.SUM, ARG_TYPE.VAR): _mul_sum_var, + (ARG_TYPE.SUM, ARG_TYPE.MONOMIAL): _mul_sum_monomial, + (ARG_TYPE.SUM, ARG_TYPE.LINEAR): _mul_sum_linear, + (ARG_TYPE.SUM, ARG_TYPE.SUM): _mul_sum_sum, + (ARG_TYPE.SUM, ARG_TYPE.OTHER): _mul_sum_other, + (ARG_TYPE.OTHER, ARG_TYPE.NATIVE): _mul_other_native, + (ARG_TYPE.OTHER, ARG_TYPE.NPV): _mul_other_npv, + (ARG_TYPE.OTHER, ARG_TYPE.PARAM): _mul_other_param, + (ARG_TYPE.OTHER, ARG_TYPE.VAR): _mul_other_var, + (ARG_TYPE.OTHER, ARG_TYPE.MONOMIAL): _mul_other_monomial, + (ARG_TYPE.OTHER, ARG_TYPE.LINEAR): _mul_other_linear, + (ARG_TYPE.OTHER, ARG_TYPE.SUM): _mul_other_sum, + (ARG_TYPE.OTHER, ARG_TYPE.OTHER): _mul_other_other, + }, +) + + +# +# DIV: NATIVE handlers +# + + +def _div_native_native(a, b): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return a / b + + +def _div_native_npv(a, b): + if not a and a in _zero_one_optimizations: + return 0 + return NPV_DivisionExpression((a, b)) + + +def _div_native_param(a, b): + if b.is_constant(): + return a / b.value + if not a and a in _zero_one_optimizations: + return 0 + return NPV_DivisionExpression((a, b)) + + +def _div_native_var(a, b): + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_native_monomial(a, b): + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_native_linear(a, b): + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_native_sum(a, b): + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_native_other(a, b): + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +# +# DIV: NPV handlers +# + + +def _div_npv_native(a, b): + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return NPV_DivisionExpression((a, b)) + + +def _div_npv_npv(a, b): + return NPV_DivisionExpression((a, b)) + + +def _div_npv_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return NPV_DivisionExpression((a, b)) + + +def _div_npv_var(a, b): + return DivisionExpression((a, b)) + + +def _div_npv_monomial(a, b): + return DivisionExpression((a, b)) + + +def _div_npv_linear(a, b): + return DivisionExpression((a, b)) + + +def _div_npv_sum(a, b): + return DivisionExpression((a, b)) + + +def _div_npv_other(a, b): + return DivisionExpression((a, b)) + + +# +# DIV: PARAM handlers +# + + +def _div_param_native(a, b): + if a.is_constant(): + return a.value / b + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return NPV_DivisionExpression((a, b)) + + +def _div_param_npv(a, b): + if a.is_constant(): + a = a.value + if not a and a in _zero_one_optimizations: + return 0 + return NPV_DivisionExpression((a, b)) + + +def _div_param_param(a, b): + if a.is_constant(): + a = a.value + if b.is_constant(): + return a / b.value + if not a and a in _zero_one_optimizations: + return 0 + elif b.is_constant(): + b = b.value + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return NPV_DivisionExpression((a, b)) + + +def _div_param_var(a, b): + if a.is_constant(): + a = a.value + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_param_monomial(a, b): + if a.is_constant(): + a = a.value + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_param_linear(a, b): + if a.is_constant(): + a = a.value + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_param_sum(a, b): + if a.is_constant(): + a = value(a) + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +def _div_param_other(a, b): + if a.is_constant(): + a = a.value + if not a and a in _zero_one_optimizations: + return 0 + return DivisionExpression((a, b)) + + +# +# DIV: VAR handlers +# + + +def _div_var_native(a, b): + if b in _zero_one_optimizations and b: + return a + return MonomialTermExpression((1 / b, a)) + + +def _div_var_npv(a, b): + return MonomialTermExpression((NPV_DivisionExpression((1, b)), a)) + + +def _div_var_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations and b: + return a + return MonomialTermExpression((1 / b, a)) + return MonomialTermExpression((NPV_DivisionExpression((1, b)), a)) + + +def _div_var_var(a, b): + return DivisionExpression((a, b)) + + +def _div_var_monomial(a, b): + return DivisionExpression((a, b)) + + +def _div_var_linear(a, b): + return DivisionExpression((a, b)) + + +def _div_var_sum(a, b): + return DivisionExpression((a, b)) + + +def _div_var_other(a, b): + return DivisionExpression((a, b)) + + +# +# DIV: MONOMIAL handlers +# + + +def _div_monomial_native(a, b): + if b in _zero_one_optimizations and b: + return a + return MonomialTermExpression((a._args_[0] / b, a._args_[1])) + + +def _div_monomial_npv(a, b): + return MonomialTermExpression( + (NPV_DivisionExpression((a._args_[0], b)), a._args_[1]) + ) + + +def _div_monomial_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations and b: + return a + return MonomialTermExpression((a._args_[0] / b, a._args_[1])) + return MonomialTermExpression( + (NPV_DivisionExpression((a._args_[0], b)), a._args_[1]) + ) + + +def _div_monomial_var(a, b): + return DivisionExpression((a, b)) + + +def _div_monomial_monomial(a, b): + return DivisionExpression((a, b)) + + +def _div_monomial_linear(a, b): + return DivisionExpression((a, b)) + + +def _div_monomial_sum(a, b): + return DivisionExpression((a, b)) + + +def _div_monomial_other(a, b): + return DivisionExpression((a, b)) + + +# +# DIV: LINEAR handlers +# + + +def _div_linear_native(a, b): + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return DivisionExpression((a, b)) + + +def _div_linear_npv(a, b): + return DivisionExpression((a, b)) + + +def _div_linear_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return DivisionExpression((a, b)) + + +def _div_linear_var(a, b): + return DivisionExpression((a, b)) + + +def _div_linear_monomial(a, b): + return DivisionExpression((a, b)) + + +def _div_linear_linear(a, b): + return DivisionExpression((a, b)) + + +def _div_linear_sum(a, b): + return DivisionExpression((a, b)) + + +def _div_linear_other(a, b): + return DivisionExpression((a, b)) + + +# +# DIV: SUM handlers +# + + +def _div_sum_native(a, b): + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return DivisionExpression((a, b)) + + +def _div_sum_npv(a, b): + return DivisionExpression((a, b)) + + +def _div_sum_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return DivisionExpression((a, b)) + + +def _div_sum_var(a, b): + return DivisionExpression((a, b)) + + +def _div_sum_monomial(a, b): + return DivisionExpression((a, b)) + + +def _div_sum_linear(a, b): + return DivisionExpression((a, b)) + + +def _div_sum_sum(a, b): + return DivisionExpression((a, b)) + + +def _div_sum_other(a, b): + return DivisionExpression((a, b)) + + +# +# DIV: OTHER handlers +# + + +def _div_other_native(a, b): + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return DivisionExpression((a, b)) + + +def _div_other_npv(a, b): + return DivisionExpression((a, b)) + + +def _div_other_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations and b: + return a + if not b: + raise ZeroDivisionError() + return DivisionExpression((a, b)) + + +def _div_other_var(a, b): + return DivisionExpression((a, b)) + + +def _div_other_monomial(a, b): + return DivisionExpression((a, b)) + + +def _div_other_linear(a, b): + return DivisionExpression((a, b)) + + +def _div_other_sum(a, b): + return DivisionExpression((a, b)) + + +def _div_other_other(a, b): + return DivisionExpression((a, b)) + + +def _register_new_div_handler(a, b): + types = _categorize_arg_types(a, b) + # Retrieve the appropriate handler, record it in the main + # _div_dispatcher dict (so this method is not called a second time for + # these types) + _div_dispatcher[a.__class__, b.__class__] = handler = _div_type_handler_mapping[ + types + ] + # Call the appropriate handler + return handler(a, b) + + +_div_dispatcher = collections.defaultdict(lambda: _register_new_div_handler) + +_div_type_handler_mapping = _binary_op_dispatcher_type_mapping( + _div_dispatcher, + { + (ARG_TYPE.NATIVE, ARG_TYPE.NATIVE): _div_native_native, + (ARG_TYPE.NATIVE, ARG_TYPE.NPV): _div_native_npv, + (ARG_TYPE.NATIVE, ARG_TYPE.PARAM): _div_native_param, + (ARG_TYPE.NATIVE, ARG_TYPE.VAR): _div_native_var, + (ARG_TYPE.NATIVE, ARG_TYPE.MONOMIAL): _div_native_monomial, + (ARG_TYPE.NATIVE, ARG_TYPE.LINEAR): _div_native_linear, + (ARG_TYPE.NATIVE, ARG_TYPE.SUM): _div_native_sum, + (ARG_TYPE.NATIVE, ARG_TYPE.OTHER): _div_native_other, + (ARG_TYPE.NPV, ARG_TYPE.NATIVE): _div_npv_native, + (ARG_TYPE.NPV, ARG_TYPE.NPV): _div_npv_npv, + (ARG_TYPE.NPV, ARG_TYPE.PARAM): _div_npv_param, + (ARG_TYPE.NPV, ARG_TYPE.VAR): _div_npv_var, + (ARG_TYPE.NPV, ARG_TYPE.MONOMIAL): _div_npv_monomial, + (ARG_TYPE.NPV, ARG_TYPE.LINEAR): _div_npv_linear, + (ARG_TYPE.NPV, ARG_TYPE.SUM): _div_npv_sum, + (ARG_TYPE.NPV, ARG_TYPE.OTHER): _div_npv_other, + (ARG_TYPE.PARAM, ARG_TYPE.NATIVE): _div_param_native, + (ARG_TYPE.PARAM, ARG_TYPE.NPV): _div_param_npv, + (ARG_TYPE.PARAM, ARG_TYPE.PARAM): _div_param_param, + (ARG_TYPE.PARAM, ARG_TYPE.VAR): _div_param_var, + (ARG_TYPE.PARAM, ARG_TYPE.MONOMIAL): _div_param_monomial, + (ARG_TYPE.PARAM, ARG_TYPE.LINEAR): _div_param_linear, + (ARG_TYPE.PARAM, ARG_TYPE.SUM): _div_param_sum, + (ARG_TYPE.PARAM, ARG_TYPE.OTHER): _div_param_other, + (ARG_TYPE.VAR, ARG_TYPE.NATIVE): _div_var_native, + (ARG_TYPE.VAR, ARG_TYPE.NPV): _div_var_npv, + (ARG_TYPE.VAR, ARG_TYPE.PARAM): _div_var_param, + (ARG_TYPE.VAR, ARG_TYPE.VAR): _div_var_var, + (ARG_TYPE.VAR, ARG_TYPE.MONOMIAL): _div_var_monomial, + (ARG_TYPE.VAR, ARG_TYPE.LINEAR): _div_var_linear, + (ARG_TYPE.VAR, ARG_TYPE.SUM): _div_var_sum, + (ARG_TYPE.VAR, ARG_TYPE.OTHER): _div_var_other, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NATIVE): _div_monomial_native, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NPV): _div_monomial_npv, + (ARG_TYPE.MONOMIAL, ARG_TYPE.PARAM): _div_monomial_param, + (ARG_TYPE.MONOMIAL, ARG_TYPE.VAR): _div_monomial_var, + (ARG_TYPE.MONOMIAL, ARG_TYPE.MONOMIAL): _div_monomial_monomial, + (ARG_TYPE.MONOMIAL, ARG_TYPE.LINEAR): _div_monomial_linear, + (ARG_TYPE.MONOMIAL, ARG_TYPE.SUM): _div_monomial_sum, + (ARG_TYPE.MONOMIAL, ARG_TYPE.OTHER): _div_monomial_other, + (ARG_TYPE.LINEAR, ARG_TYPE.NATIVE): _div_linear_native, + (ARG_TYPE.LINEAR, ARG_TYPE.NPV): _div_linear_npv, + (ARG_TYPE.LINEAR, ARG_TYPE.PARAM): _div_linear_param, + (ARG_TYPE.LINEAR, ARG_TYPE.VAR): _div_linear_var, + (ARG_TYPE.LINEAR, ARG_TYPE.MONOMIAL): _div_linear_monomial, + (ARG_TYPE.LINEAR, ARG_TYPE.LINEAR): _div_linear_linear, + (ARG_TYPE.LINEAR, ARG_TYPE.SUM): _div_linear_sum, + (ARG_TYPE.LINEAR, ARG_TYPE.OTHER): _div_linear_other, + (ARG_TYPE.SUM, ARG_TYPE.NATIVE): _div_sum_native, + (ARG_TYPE.SUM, ARG_TYPE.NPV): _div_sum_npv, + (ARG_TYPE.SUM, ARG_TYPE.PARAM): _div_sum_param, + (ARG_TYPE.SUM, ARG_TYPE.VAR): _div_sum_var, + (ARG_TYPE.SUM, ARG_TYPE.MONOMIAL): _div_sum_monomial, + (ARG_TYPE.SUM, ARG_TYPE.LINEAR): _div_sum_linear, + (ARG_TYPE.SUM, ARG_TYPE.SUM): _div_sum_sum, + (ARG_TYPE.SUM, ARG_TYPE.OTHER): _div_sum_other, + (ARG_TYPE.OTHER, ARG_TYPE.NATIVE): _div_other_native, + (ARG_TYPE.OTHER, ARG_TYPE.NPV): _div_other_npv, + (ARG_TYPE.OTHER, ARG_TYPE.PARAM): _div_other_param, + (ARG_TYPE.OTHER, ARG_TYPE.VAR): _div_other_var, + (ARG_TYPE.OTHER, ARG_TYPE.MONOMIAL): _div_other_monomial, + (ARG_TYPE.OTHER, ARG_TYPE.LINEAR): _div_other_linear, + (ARG_TYPE.OTHER, ARG_TYPE.SUM): _div_other_sum, + (ARG_TYPE.OTHER, ARG_TYPE.OTHER): _div_other_other, + }, +) + + +# +# POW handlers +# + + +def _pow_native_native(a, b): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return a**b + + +def _pow_native_npv(a, b): + return NPV_PowExpression((a, b)) + + +def _pow_native_param(a, b): + if b.is_constant(): + return a ** (b.value) + return NPV_PowExpression((a, b)) + + +def _pow_native_other(a, b): + return PowExpression((a, b)) + + +def _pow_npv_native(a, b): + if b in _zero_one_optimizations: + return a if b else 1 + return NPV_PowExpression((a, b)) + + +def _pow_npv_npv(a, b): + return NPV_PowExpression((a, b)) + + +def _pow_npv_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 1 + return NPV_PowExpression((a, b)) + + +def _pow_npv_other(a, b): + return PowExpression((a, b)) + + +def _pow_param_native(a, b): + if a.is_constant(): + return a.value**b + if b in _zero_one_optimizations: + return a if b else 1 + return NPV_PowExpression((a, b)) + + +def _pow_param_npv(a, b): + if a.is_constant(): + a = a.value + return NPV_PowExpression((a, b)) + + +def _pow_param_param(a, b): + if a.is_constant(): + a = a.value + if b.is_constant(): + return a**b.value + elif b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 1 + return NPV_PowExpression((a, b)) + + +def _pow_param_other(a, b): + if a.is_constant(): + a = a.value + return PowExpression((a, b)) + + +def _pow_other_native(a, b): + if b in _zero_one_optimizations: + return a if b else 1 + return PowExpression((a, b)) + + +def _pow_other_npv(a, b): + return PowExpression((a, b)) + + +def _pow_other_param(a, b): + if b.is_constant(): + b = b.value + if b in _zero_one_optimizations: + return a if b else 1 + return PowExpression((a, b)) + + +def _pow_other_other(a, b): + return PowExpression((a, b)) + + +def _register_new_pow_handler(a, b): + types = _categorize_arg_types(a, b) + # Retrieve the appropriate handler, record it in the main + # _pow_dispatcher dict (so this method is not called a second time for + # these types) + _pow_dispatcher[a.__class__, b.__class__] = handler = _pow_type_handler_mapping[ + types + ] + # Call the appropriate handler + return handler(a, b) + + +_pow_dispatcher = collections.defaultdict(lambda: _register_new_pow_handler) + +_pow_type_handler_mapping = _binary_op_dispatcher_type_mapping( + _pow_dispatcher, + { + (ARG_TYPE.NATIVE, ARG_TYPE.NATIVE): _pow_native_native, + (ARG_TYPE.NATIVE, ARG_TYPE.NPV): _pow_native_npv, + (ARG_TYPE.NATIVE, ARG_TYPE.PARAM): _pow_native_param, + (ARG_TYPE.NATIVE, ARG_TYPE.VAR): _pow_native_other, + (ARG_TYPE.NATIVE, ARG_TYPE.MONOMIAL): _pow_native_other, + (ARG_TYPE.NATIVE, ARG_TYPE.LINEAR): _pow_native_other, + (ARG_TYPE.NATIVE, ARG_TYPE.SUM): _pow_native_other, + (ARG_TYPE.NATIVE, ARG_TYPE.OTHER): _pow_native_other, + (ARG_TYPE.NPV, ARG_TYPE.NATIVE): _pow_npv_native, + (ARG_TYPE.NPV, ARG_TYPE.NPV): _pow_npv_npv, + (ARG_TYPE.NPV, ARG_TYPE.PARAM): _pow_npv_param, + (ARG_TYPE.NPV, ARG_TYPE.VAR): _pow_npv_other, + (ARG_TYPE.NPV, ARG_TYPE.MONOMIAL): _pow_npv_other, + (ARG_TYPE.NPV, ARG_TYPE.LINEAR): _pow_npv_other, + (ARG_TYPE.NPV, ARG_TYPE.SUM): _pow_npv_other, + (ARG_TYPE.NPV, ARG_TYPE.OTHER): _pow_npv_other, + (ARG_TYPE.PARAM, ARG_TYPE.NATIVE): _pow_param_native, + (ARG_TYPE.PARAM, ARG_TYPE.NPV): _pow_param_npv, + (ARG_TYPE.PARAM, ARG_TYPE.PARAM): _pow_param_param, + (ARG_TYPE.PARAM, ARG_TYPE.VAR): _pow_param_other, + (ARG_TYPE.PARAM, ARG_TYPE.MONOMIAL): _pow_param_other, + (ARG_TYPE.PARAM, ARG_TYPE.LINEAR): _pow_param_other, + (ARG_TYPE.PARAM, ARG_TYPE.SUM): _pow_param_other, + (ARG_TYPE.PARAM, ARG_TYPE.OTHER): _pow_param_other, + (ARG_TYPE.VAR, ARG_TYPE.NATIVE): _pow_other_native, + (ARG_TYPE.VAR, ARG_TYPE.NPV): _pow_other_npv, + (ARG_TYPE.VAR, ARG_TYPE.PARAM): _pow_other_param, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NATIVE): _pow_other_native, + (ARG_TYPE.MONOMIAL, ARG_TYPE.NPV): _pow_other_npv, + (ARG_TYPE.MONOMIAL, ARG_TYPE.PARAM): _pow_other_param, + (ARG_TYPE.LINEAR, ARG_TYPE.NATIVE): _pow_other_native, + (ARG_TYPE.LINEAR, ARG_TYPE.NPV): _pow_other_npv, + (ARG_TYPE.LINEAR, ARG_TYPE.PARAM): _pow_other_param, + (ARG_TYPE.SUM, ARG_TYPE.NATIVE): _pow_other_native, + (ARG_TYPE.SUM, ARG_TYPE.NPV): _pow_other_npv, + (ARG_TYPE.SUM, ARG_TYPE.PARAM): _pow_other_param, + (ARG_TYPE.OTHER, ARG_TYPE.NATIVE): _pow_other_native, + (ARG_TYPE.OTHER, ARG_TYPE.NPV): _pow_other_npv, + (ARG_TYPE.OTHER, ARG_TYPE.PARAM): _pow_other_param, + }, +) +_pow_type_handler_mapping.update( + { + (i, j): _pow_other_other + for i in ARG_TYPE + for j in ARG_TYPE + if (i, j) not in _pow_type_handler_mapping + } +) + + +# +# ABS handlers +# + + +def _abs_native(a): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return abs(a) + + +def _abs_npv(a): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return NPV_AbsExpression((a,)) + + +def _abs_param(a): + if a.is_constant(): + return abs(a.value) + return NPV_AbsExpression((a,)) + + +def _abs_other(a): + return AbsExpression((a,)) + + +def _register_new_abs_handler(a): + types = _categorize_arg_types(a) + # Retrieve the appropriate handler, record it in the main + # _abs_dispatcher dict (so this method is not called a second time for + # these types) + _abs_dispatcher[a.__class__] = handler = _abs_type_handler_mapping[types[0]] + # Call the appropriate handler + return handler(a) + + +_abs_dispatcher = collections.defaultdict(lambda: _register_new_abs_handler) + +_abs_type_handler_mapping = _unary_op_dispatcher_type_mapping( + _abs_dispatcher, + { + ARG_TYPE.NATIVE: _abs_native, + ARG_TYPE.NPV: _abs_npv, + ARG_TYPE.PARAM: _abs_param, + ARG_TYPE.VAR: _abs_other, + ARG_TYPE.MONOMIAL: _abs_other, + ARG_TYPE.LINEAR: _abs_other, + ARG_TYPE.SUM: _abs_other, + ARG_TYPE.OTHER: _abs_other, + }, +) + + +# +# INTRINSIC FUNCTION handlers +# + + +def _fcn_asnumeric(a, name, fcn): + a = a.as_numeric() + return _fcn_dispatcher[a.__class__](a, name, fcn) + + +def _fcn_mutable(a, name, fcn): + a = _recast_mutable(a) + return _fcn_dispatcher[a.__class__](a, name, fcn) + + +def _fcn_invalid(a, name, fcn): + fcn(a) + # returns None + + +def _fcn_native(a, name, fcn): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return fcn(a) + + +def _fcn_npv(a, name, fcn): + # This can be hit because of the asnumeric / mutable wrapper handlers. + return NPV_UnaryFunctionExpression((a,), name, fcn) + + +def _fcn_param(a, name, fcn): + if a.is_constant(): + return fcn(a.value) + return NPV_UnaryFunctionExpression((a,), name, fcn) + + +def _fcn_other(a, name, fcn): + return UnaryFunctionExpression((a,), name, fcn) + + +def _register_new_fcn_dispatcher(a, name, fcn): + types = _categorize_arg_types(a) + # Retrieve the appropriate handler, record it in the main + # _fcn_dispatcher dict (so this method is not called a second time for + # these types) + _fcn_dispatcher[a.__class__] = handler = _fcn_type_handler_mapping[types[0]] + # Call the appropriate handler + return handler(a, name, fcn) + + +_fcn_dispatcher = collections.defaultdict(lambda: _register_new_fcn_dispatcher) + +_fcn_type_handler_mapping = { + ARG_TYPE.ASNUMERIC: _fcn_asnumeric, + ARG_TYPE.MUTABLE: _fcn_mutable, + ARG_TYPE.INVALID: _fcn_invalid, + ARG_TYPE.NATIVE: _fcn_native, + ARG_TYPE.NPV: _fcn_npv, + ARG_TYPE.PARAM: _fcn_param, + ARG_TYPE.VAR: _fcn_other, + ARG_TYPE.MONOMIAL: _fcn_other, + ARG_TYPE.LINEAR: _fcn_other, + ARG_TYPE.SUM: _fcn_other, + ARG_TYPE.OTHER: _fcn_other, +} + + +# +# NOTE: abs() and pow() are not defined here, because they are +# Python operators. +# +def ceil(arg): + return _fcn_dispatcher[arg.__class__](arg, 'ceil', math.ceil) + + +def floor(arg): + return _fcn_dispatcher[arg.__class__](arg, 'floor', math.floor) + + +# e ** x +def exp(arg): + return _fcn_dispatcher[arg.__class__](arg, 'exp', math.exp) + + +def log(arg): + return _fcn_dispatcher[arg.__class__](arg, 'log', math.log) + + +def log10(arg): + return _fcn_dispatcher[arg.__class__](arg, 'log10', math.log10) + + +# FIXME: this is nominally the same as x ** 0.5, but follows a different +# path and produces a different NL file! +def sqrt(arg): + return _fcn_dispatcher[arg.__class__](arg, 'sqrt', math.sqrt) + # return _pow_dispatcher[arg.__class__, float](arg, 0.5) + + +def sin(arg): + return _fcn_dispatcher[arg.__class__](arg, 'sin', math.sin) + + +def cos(arg): + return _fcn_dispatcher[arg.__class__](arg, 'cos', math.cos) + + +def tan(arg): + return _fcn_dispatcher[arg.__class__](arg, 'tan', math.tan) + + +def sinh(arg): + return _fcn_dispatcher[arg.__class__](arg, 'sinh', math.sinh) + + +def cosh(arg): + return _fcn_dispatcher[arg.__class__](arg, 'cosh', math.cosh) + + +def tanh(arg): + return _fcn_dispatcher[arg.__class__](arg, 'tanh', math.tanh) + + +def asin(arg): + return _fcn_dispatcher[arg.__class__](arg, 'asin', math.asin) + + +def acos(arg): + return _fcn_dispatcher[arg.__class__](arg, 'acos', math.acos) + + +def atan(arg): + return _fcn_dispatcher[arg.__class__](arg, 'atan', math.atan) + + +def asinh(arg): + return _fcn_dispatcher[arg.__class__](arg, 'asinh', math.asinh) + + +def acosh(arg): + return _fcn_dispatcher[arg.__class__](arg, 'acosh', math.acosh) + + +def atanh(arg): + return _fcn_dispatcher[arg.__class__](arg, 'atanh', math.atanh) + + +# +# Function interface to Expr_ifExpression +# + + +def _process_expr_if_arg(arg, kwargs, name): + alt = kwargs.pop(name, None) + if alt is not None: + if arg is not None: + raise ValueError(f'Cannot specify both {name}_ and {name}') + arg = alt + _type = _categorize_arg_type(arg) + # Note that relational expressions get mapped to INVALID + while _type < ARG_TYPE.INVALID: + if _type is ARG_TYPE.MUTABLE: + arg = _recast_mutable(arg) + elif _type is ARG_TYPE.ASNUMERIC: + arg = arg.as_numeric() + else: + raise DeveloperError('_categorize_arg_type() returned unexpected ARG_TYPE') + _type = _categorize_arg_type(arg) + return arg, _type + + +def Expr_if(IF_=None, THEN_=None, ELSE_=None, **kwargs): + """ + Function used to construct a conditional numeric expression. + + This function accepts either of the following signatures: + + - Expr_if(IF={expr}, THEN={expr}, ELSE={expr}) + - Expr_if(IF_={expr}, THEN_={expr}, ELSE_={expr}) + + (the former is historical, and the latter is required to support Cythonization) + """ + _pv = False + ELSE_, _type = _process_expr_if_arg(ELSE_, kwargs, 'ELSE') + _pv |= _type >= ARG_TYPE.VAR or _type == ARG_TYPE.INVALID + THEN_, _type = _process_expr_if_arg(THEN_, kwargs, 'THEN') + _pv |= _type >= ARG_TYPE.VAR or _type == ARG_TYPE.INVALID + IF_, _type = _process_expr_if_arg(IF_, kwargs, 'IF') + _pv |= _type >= ARG_TYPE.VAR or _type == ARG_TYPE.INVALID + if kwargs: + raise ValueError('Unrecognized arguments: ' + ', '.join(kwargs)) + # Notes: + # - side effect: IF is the last iteration, so _type == _categorize_arg_type(IF) + # - we do NO error checking as to the actual arg types. That is + # left to the writer (and as of writing [Jul 2023], the NL writer + # is the only writer that recognized Expr_if) + if _type is ARG_TYPE.NATIVE: + return THEN_ if IF_ else ELSE_ + elif _type is ARG_TYPE.PARAM and IF_.is_constant(): + return THEN_ if IF_.value else ELSE_ + elif _pv: + return Expr_ifExpression((IF_, THEN_, ELSE_)) + else: + return NPV_Expr_ifExpression((IF_, THEN_, ELSE_)) + + +# +# Misc (legacy) functions +# + + +def _balanced_parens(arg): + """Verify the string argument contains balanced parentheses. + + This checks that every open paren is balanced by a closed paren. + That is, the infix string expression is likely to be valid. This is + primarily used to determine if a string that starts and ends with + parens can have those parens removed. + + Examples: + >>> a = "(( x + y ) * ( z - w ))" + >>> _balanced_parens(a[1:-1]) + True + >>> a = "( x + y ) * ( z - w )" + >>> _balanced_parens(a[1:-1]) + False + """ + _parenCount = 0 + for c in arg: + if c == '(': + _parenCount += 1 + elif c == ')': + _parenCount -= 1 + if _parenCount < 0: + return False + return _parenCount == 0 + + +# TODO: this is fragile (and not currently used anywhere). It should be +# deprecated / removed. +NPV_expression_types = set( + [ + NPV_NegationExpression, + NPV_ExternalFunctionExpression, + NPV_PowExpression, + NPV_MinExpression, + NPV_MaxExpression, + NPV_ProductExpression, + NPV_DivisionExpression, + NPV_SumExpression, + NPV_UnaryFunctionExpression, + NPV_AbsExpression, + ] +) diff --git a/pyomo/core/expr/numvalue.py b/pyomo/core/expr/numvalue.py index 88e1423f415..ba008475b86 100644 --- a/pyomo/core/expr/numvalue.py +++ b/pyomo/core/expr/numvalue.py @@ -9,44 +9,90 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ('value', 'is_constant', 'is_fixed', 'is_variable_type', - 'is_potentially_variable', 'NumericValue', 'ZeroConstant', - 'native_numeric_types', 'native_types', 'nonpyomo_leaf_types', - 'polynomial_degree') +__all__ = ( + 'value', + 'is_constant', + 'is_fixed', + 'is_variable_type', + 'is_potentially_variable', + 'NumericValue', + 'ZeroConstant', + 'native_numeric_types', + 'native_types', + 'nonpyomo_leaf_types', + 'polynomial_degree', +) +import collections import sys import logging -from pyomo.common.dependencies import numpy as np, numpy_available -from pyomo.common.errors import PyomoException -from pyomo.core.expr.expr_common import ( - _add, _sub, _mul, _div, _pow, - _neg, _abs, _radd, - _rsub, _rmul, _rdiv, _rpow, - _iadd, _isub, _imul, _idiv, - _ipow, _lt, _le, _eq +from pyomo.common.deprecation import ( + deprecated, + deprecation_warning, + relocated_module_attribute, ) -# TODO: update imports of these objects to pull from numeric_types +from pyomo.core.expr.expr_common import ExpressionType +from pyomo.core.expr.numeric_expr import NumericValue +import pyomo.common.numeric_types as _numeric_types + +# TODO: update Pyomo to import these objects from common.numeric_types +# (and not from here) from pyomo.common.numeric_types import ( - nonpyomo_leaf_types, native_types, native_numeric_types, - native_integer_types, native_boolean_types, native_logical_types, - RegisterNumericType, RegisterIntegerType, RegisterBooleanType, + nonpyomo_leaf_types, + native_types, + native_numeric_types, + native_integer_types, + native_logical_types, pyomo_constant_types, + check_if_numeric_type, + value, ) from pyomo.core.pyomoobject import PyomoObject -from pyomo.core.expr.expr_errors import TemplateExpressionError -logger = logging.getLogger('pyomo.core') +relocated_module_attribute( + 'native_boolean_types', + 'pyomo.common.numeric_types._native_boolean_types', + version='6.6.0', + f_globals=globals(), + msg="The native_boolean_types set will be removed in the future: the set " + "contains types that were convertible to bool, and not types that should " + "be treated as if they were bool (as was the case for the other " + "native_*_types sets). Users likely should use native_logical_types.", +) +relocated_module_attribute( + 'RegisterNumericType', + 'pyomo.common.numeric_types.RegisterNumericType', + version='6.6.0', + f_globals=globals(), +) +relocated_module_attribute( + 'RegisterIntegerType', + 'pyomo.common.numeric_types.RegisterIntegerType', + version='6.6.0', + f_globals=globals(), +) +relocated_module_attribute( + 'RegisterBooleanType', + 'pyomo.common.numeric_types.RegisterBooleanType', + version='6.6.0', + f_globals=globals(), +) +relocated_module_attribute( + 'NumericValue', + 'pyomo.core.expr.numeric_expr.NumericValue', + version='6.6.2.dev0', + f_globals=globals(), +) +relocated_module_attribute( + 'NumericNDArray', + 'pyomo.core.expr.numeric_expr.NumericNDArray', + version='6.6.2.dev0', + f_globals=globals(), +) +logger = logging.getLogger('pyomo.core') -def _generate_sum_expression(etype, _self, _other): - raise RuntimeError("incomplete import of Pyomo expression system") #pragma: no cover -def _generate_mul_expression(etype, _self, _other): - raise RuntimeError("incomplete import of Pyomo expression system") #pragma: no cover -def _generate_other_expression(etype, _self, _other): - raise RuntimeError("incomplete import of Pyomo expression system") #pragma: no cover -def _generate_relational_expression(etype, lhs, rhs): - raise RuntimeError("incomplete import of Pyomo expression system") #pragma: no cover ##------------------------------------------------------------------------ ## @@ -54,12 +100,14 @@ def _generate_relational_expression(etype, lhs, rhs): ## ##------------------------------------------------------------------------ + class NonNumericValue(object): """An object that contains a non-numeric value Constructor Arguments: value The initial value. """ + __slots__ = ('value',) def __init__(self, value): @@ -68,109 +116,15 @@ def __init__(self, value): def __str__(self): return str(self.value) - def __getstate__(self): - state = {} - state['value'] = getattr(self,'value') - return state - - def __setstate__(self, state): - setattr(self, 'value', state['value']) nonpyomo_leaf_types.add(NonNumericValue) -def value(obj, exception=True): - """ - A utility function that returns the value of a Pyomo object or - expression. - - Args: - obj: The argument to evaluate. If it is None, a - string, or any other primative numeric type, - then this function simply returns the argument. - Otherwise, if the argument is a NumericValue - then the __call__ method is executed. - exception (bool): If :const:`True`, then an exception should - be raised when instances of NumericValue fail to - evaluate due to one or more objects not being - initialized to a numeric value (e.g, one or more - variables in an algebraic expression having the - value None). If :const:`False`, then the function - returns :const:`None` when an exception occurs. - Default is True. - - Returns: A numeric value or None. - """ - if obj.__class__ in native_types: - return obj - if obj.__class__ in pyomo_constant_types: - # - # I'm commenting this out for now, but I think we should never expect - # to see a numeric constant with value None. - # - #if exception and obj.value is None: - # raise ValueError( - # "No value for uninitialized NumericConstant object %s" - # % (obj.name,)) - return obj.value - # - # Test if we have a duck types for Pyomo expressions - # - try: - obj.is_numeric_type() - except AttributeError: - # - # If not, then try to coerce this into a numeric constant. If that - # works, then return the object - # - try: - check_if_numeric_type_and_cache(obj) - return obj - except: - raise TypeError( - "Cannot evaluate object with unknown type: %s" % - (type(obj).__name__,)) - # - # Evaluate the expression object - # - if exception: - # - # Here, we try to catch the exception - # - try: - tmp = obj(exception=True) - if tmp is None: - raise ValueError( - "No value for uninitialized NumericValue object %s" - % (obj.name,)) - return tmp - except TemplateExpressionError: - # Template expressions work by catching this error type. So - # we should defer this error handling and not log an error - # message. - raise - except: - logger.error( - "evaluating object as numeric value: %s\n (object: %s)\n%s" - % (obj, type(obj), sys.exc_info()[1])) - raise - else: - # - # Here, we do not try to catch the exception - # - return obj(exception=False) - - def is_constant(obj): """ A utility function that returns a boolean that indicates whether the object is a constant. """ - # This method is rarely, if ever, called. Plus, since the - # expression generation (and constraint generation) system converts - # everything to NumericValues, it is better (i.e., faster) to assume - # that the obj is a NumericValue - # # JDS: NB: I am not sure why we allow str to be a constant, but # since we have historically done so, we check for type membership # in native_types and not in native_numeric_types. @@ -181,17 +135,19 @@ def is_constant(obj): return obj.is_constant() except AttributeError: pass - try: - # Now we need to confirm that we have an unknown numeric type - check_if_numeric_type_and_cache(obj) - # As this branch is only hit for previously unknown (to Pyomo) - # types that behave reasonably like numbers, we know they *must* - # be constant. + # Now we need to confirm that we have an unknown numeric type + # + # As this branch is only hit for previously unknown (to Pyomo) + # types that behave reasonably like numbers, we know they *must* + # be constant. + if check_if_numeric_type(obj): return True - except: + else: raise TypeError( "Cannot assess properties of object with unknown type: %s" - % (type(obj).__name__,)) + % (type(obj).__name__,) + ) + def is_fixed(obj): """ @@ -208,17 +164,19 @@ def is_fixed(obj): return obj.is_fixed() except AttributeError: pass - try: - # Now we need to confirm that we have an unknown numeric type - check_if_numeric_type_and_cache(obj) - # As this branch is only hit for previously unknown (to Pyomo) - # types that behave reasonably like numbers, we know they *must* - # be fixed. + # Now we need to confirm that we have an unknown numeric type + # + # As this branch is only hit for previously unknown (to Pyomo) + # types that behave reasonably like numbers, we know they *must* + # be fixed. + if check_if_numeric_type(obj): return True - except: + else: raise TypeError( "Cannot assess properties of object with unknown type: %s" - % (type(obj).__name__,)) + % (type(obj).__name__,) + ) + def is_variable_type(obj): """ @@ -232,6 +190,7 @@ def is_variable_type(obj): except AttributeError: return False + def is_potentially_variable(obj): """ A utility function that returns a boolean indicating @@ -244,6 +203,7 @@ def is_potentially_variable(obj): except AttributeError: return False + def is_numeric_data(obj): """ A utility function that returns a boolean indicating @@ -261,16 +221,13 @@ def is_numeric_data(obj): return not obj.is_potentially_variable() except AttributeError: pass - try: - # Now we need to confirm that we have an unknown numeric type - check_if_numeric_type_and_cache(obj) - # As this branch is only hit for previously unknown (to Pyomo) - # types that behave reasonably like numbers, we know they *must* - # be numeric data (unless an exception is raised). - return True - except: - pass - return False + # Now we need to confirm that we have an unknown numeric type + # + # As this branch is only hit for previously unknown (to Pyomo) + # types that behave reasonably like numbers, we know they *must* + # be numeric data (unless an exception is raised). + return check_if_numeric_type(obj) + def polynomial_degree(obj): """ @@ -283,22 +240,24 @@ def polynomial_degree(obj): elif obj.__class__ in native_types: raise TypeError( "Cannot evaluate the polynomial degree of a non-numeric type: %s" - % (type(obj).__name__,)) + % (type(obj).__name__,) + ) try: return obj.polynomial_degree() except AttributeError: pass - try: - # Now we need to confirm that we have an unknown numeric type - check_if_numeric_type_and_cache(obj) + # Now we need to confirm that we have an unknown numeric type + if check_if_numeric_type(obj): # As this branch is only hit for previously unknown (to Pyomo) # types that behave reasonably like numbers, we know they *must* # be a numeric constant. return 0 - except: + else: raise TypeError( "Cannot assess properties of object with unknown type: %s" - % (type(obj).__name__,)) + % (type(obj).__name__,) + ) + # # It is very common to have only a few constants in a model, but those @@ -316,6 +275,7 @@ def polynomial_degree(obj): # _KnownConstants = {} + def as_numeric(obj): """ A function that creates a NumericConstant object that @@ -361,8 +321,13 @@ def as_numeric(obj): # is worth the extra cost. # if len(_KnownConstants) < 1024: - _KnownConstants[obj] = retval - return retval + # obj may (or may not) be hashable, so we need this try + # block so that things proceed normally for non-hashable + # "numeric" types + try: + _KnownConstants[obj] = retval + except: + pass # return retval # @@ -371,6 +336,14 @@ def as_numeric(obj): try: if obj.is_numeric_type(): return obj + elif obj.is_expression_type(ExpressionType.RELATIONAL): + deprecation_warning( + "returning a relational expression from as_numeric(). " + "Relational expressions are no longer numeric types. " + "In the future this will raise a TypeError.", + version='6.4.3', + ) + return obj else: try: _name = obj.name @@ -378,581 +351,46 @@ def as_numeric(obj): _name = str(obj) raise TypeError( "The '%s' object '%s' is not a valid type for Pyomo " - "numeric expressions" % (type(obj).__name__, _name)) + "numeric expressions" % (type(obj).__name__, _name) + ) except AttributeError: pass # - # Test if the object looks like a number. If so, register that type with a - # warning. + # Test if the object looks like a number. If so, re-call as_numeric + # (this type will have been added to native_numeric_types). # - try: - return check_if_numeric_type_and_cache(obj) - except: - pass + if check_if_numeric_type(obj): + return as_numeric(obj) # # Generate errors # if obj.__class__ in native_types: - raise TypeError("%s values ('%s') are not allowed in Pyomo " - "numeric expressions" % (type(obj).__name__, str(obj))) + raise TypeError( + "%s values ('%s') are not allowed in Pyomo " + "numeric expressions" % (type(obj).__name__, str(obj)) + ) raise TypeError( "Cannot treat the value '%s' as a numeric value because it has " - "unknown type '%s'" % (str(obj), type(obj).__name__)) + "unknown type '%s'" % (str(obj), type(obj).__name__) + ) +@deprecated( + "check_if_numeric_type_and_cache() has been deprecated in " + "favor of just calling as_numeric()", + version='6.4.3', +) def check_if_numeric_type_and_cache(obj): """Test if the argument is a numeric type by checking if we can add zero to it. If that works, then we cache the value and return a NumericConstant object. """ - obj_class = obj.__class__ - if obj_class is (obj + 0).__class__: - # - # Coerce the value to a float, if possible - # - try: - obj = float(obj) - except: - pass - # - # obj may (or may not) be hashable, so we need this try - # block so that things proceed normally for non-hashable - # "numeric" types - # - retval = NumericConstant(obj) - try: - # - # Create the numeric constant and add to the - # list of known constants. - # - # Note: we don't worry about the size of the - # cache here, since we need to confirm that the - # object is hashable. - # - _KnownConstants[obj] = retval - # - # If we get here, this is a reasonably well-behaving - # numeric type: add it to the native numeric types - # so that future lookups will be faster. - # - native_numeric_types.add(obj_class) - native_types.add(obj_class) - nonpyomo_leaf_types.add(obj_class) - # - # Generate a warning, since Pyomo's management of third-party - # numeric types is more robust when registering explicitly. - # - logger.warning( - """Dynamically registering the following numeric type: - %s -Dynamic registration is supported for convenience, but there are known -limitations to this approach. We recommend explicitly registering -numeric types using the following functions: - RegisterNumericType(), RegisterIntegerType(), RegisterBooleanType().""" - % (obj_class.__name__,)) - except: - pass - return retval - - -class NumericValue(PyomoObject): - """ - This is the base class for numeric values used in Pyomo. - """ - - __slots__ = () - - # This is required because we define __eq__ - __hash__ = None - - def __getstate__(self): - """ - Prepare a picklable state of this instance for pickling. - - Nominally, __getstate__() should execute the following:: - - state = super(Class, self).__getstate__() - for i in Class.__slots__: - state[i] = getattr(self,i) - return state - - However, in this case, the (nominal) parent class is 'object', - and object does not implement __getstate__. So, we will - check to make sure that there is a base __getstate__() to - call. You might think that there is nothing to check, but - multiple inheritance could mean that another class got stuck - between this class and "object" in the MRO. - - Further, since there are actually no slots defined here, the - real question is to either return an empty dict or the - parent's dict. - """ - _base = super(NumericValue, self) - if hasattr(_base, '__getstate__'): - return _base.__getstate__() - else: - return {} - - def __setstate__(self, state): - """ - Restore a pickled state into this instance - - Our model for setstate is for derived classes to modify - the state dictionary as control passes up the inheritance - hierarchy (using super() calls). All assignment of state -> - object attributes is handled at the last class before 'object', - which may -- or may not (thanks to MRO) -- be here. - """ - _base = super(NumericValue, self) - if hasattr(_base, '__setstate__'): - return _base.__setstate__(state) - else: - for key, val in state.items(): - # Note: per the Python data model docs, we explicitly - # set the attribute using object.__setattr__() instead - # of setting self.__dict__[key] = val. - object.__setattr__(self, key, val) - - def getname(self, fully_qualified=False, name_buffer=None): - """ - If this is a component, return the component's name on the owning - block; otherwise return the value converted to a string - """ - _base = super(NumericValue, self) - if hasattr(_base,'getname'): - return _base.getname(fully_qualified, name_buffer) - else: - return str(type(self)) - - @property - def name(self): - return self.getname(fully_qualified=True) - - @property - def local_name(self): - return self.getname(fully_qualified=False) - - def is_numeric_type(self): - """Return True if this class is a Pyomo numeric object""" - return True - - def is_constant(self): - """Return True if this numeric value is a constant value""" - return False - - def is_fixed(self): - """Return True if this is a non-constant value that has been fixed""" - return False - - def is_potentially_variable(self): - """Return True if variables can appear in this expression""" - return False - - def is_relational(self): - """ - Return True if this numeric value represents a relational expression. - """ - return False - - def is_indexed(self): - """Return True if this numeric value is an indexed object""" - return False - - def polynomial_degree(self): - """ - Return the polynomial degree of the expression. - - Returns: - :const:`None` - """ - return self._compute_polynomial_degree(None) - - def _compute_polynomial_degree(self, values): - """ - Compute the polynomial degree of this expression given - the degree values of its children. - - Args: - values (list): A list of values that indicate the degree - of the children expression. - - Returns: - :const:`None` - """ - return None - - def __bool__(self): - """Coerce the value to a bool - - Numeric values can be coerced to bool only if the value / - expression is constant. Fixed (but non-constant) or variable - values will raise an exception. - - Raises: - PyomoException - - """ - # Note that we want to implement __bool__, as scalar numeric - # components (e.g., Param, Var) implement __len__ (since they - # are implicit containers), and Python falls back on __len__ if - # __bool__ is not defined. - if self.is_constant(): - return bool(self()) - raise PyomoException(""" -Cannot convert non-constant Pyomo numeric value (%s) to bool. -This error is usually caused by using a Var, unit, or mutable Param in a -Boolean context such as an "if" statement. For example, - >>> m.x = Var() - >>> if not m.x: - ... pass -would cause this exception.""".strip() % (self,)) - - def __float__(self): - """Coerce the value to a floating point - - Numeric values can be coerced to float only if the value / - expression is constant. Fixed (but non-constant) or variable - values will raise an exception. - - Raises: - TypeError - - """ - if self.is_constant(): - return float(self()) - raise TypeError(""" -Implicit conversion of Pyomo numeric value (%s) to float is disabled. -This error is often the result of using Pyomo components as arguments to -one of the Python built-in math module functions when defining -expressions. Avoid this error by using Pyomo-provided math functions or -explicitly resolving the numeric value using the Pyomo value() function. -""".strip() % (self,)) - - def __int__(self): - """Coerce the value to an integer - - Numeric values can be coerced to int only if the value / - expression is constant. Fixed (but non-constant) or variable - values will raise an exception. - - Raises: - TypeError - - """ - if self.is_constant(): - return int(self()) - raise TypeError(""" -Implicit conversion of Pyomo numeric value (%s) to int is disabled. -This error is often the result of using Pyomo components as arguments to -one of the Python built-in math module functions when defining -expressions. Avoid this error by using Pyomo-provided math functions or -explicitly resolving the numeric value using the Pyomo value() function. -""".strip() % (self,)) - - def __lt__(self,other): - """ - Less than operator - - This method is called when Python processes statements of the form:: - - self < other - other > self - """ - return _generate_relational_expression(_lt, self, other) - - def __gt__(self,other): - """ - Greater than operator - - This method is called when Python processes statements of the form:: - - self > other - other < self - """ - return _generate_relational_expression(_lt, other, self) - - def __le__(self,other): - """ - Less than or equal operator - - This method is called when Python processes statements of the form:: - - self <= other - other >= self - """ - return _generate_relational_expression(_le, self, other) - - def __ge__(self,other): - """ - Greater than or equal operator - - This method is called when Python processes statements of the form:: - - self >= other - other <= self - """ - return _generate_relational_expression(_le, other, self) - - def __eq__(self,other): - """ - Equal to operator - - This method is called when Python processes the statement:: - - self == other - """ - return _generate_relational_expression(_eq, self, other) - - def __add__(self,other): - """ - Binary addition - - This method is called when Python processes the statement:: - - self + other - """ - return _generate_sum_expression(_add,self,other) - - def __sub__(self,other): - """ - Binary subtraction - - This method is called when Python processes the statement:: - - self - other - """ - return _generate_sum_expression(_sub,self,other) - - def __mul__(self,other): - """ - Binary multiplication - - This method is called when Python processes the statement:: - - self * other - """ - return _generate_mul_expression(_mul,self,other) - - def __div__(self,other): - """ - Binary division - - This method is called when Python processes the statement:: - - self / other - """ - return _generate_mul_expression(_div,self,other) - - def __truediv__(self,other): - """ - Binary division (when __future__.division is in effect) - - This method is called when Python processes the statement:: - - self / other - """ - return _generate_mul_expression(_div,self,other) - - def __pow__(self,other): - """ - Binary power - - This method is called when Python processes the statement:: - - self ** other - """ - return _generate_other_expression(_pow,self,other) - - def __radd__(self,other): - """ - Binary addition - - This method is called when Python processes the statement:: - - other + self - """ - return _generate_sum_expression(_radd,self,other) - - def __rsub__(self,other): - """ - Binary subtraction - - This method is called when Python processes the statement:: - - other - self - """ - return _generate_sum_expression(_rsub,self,other) - - def __rmul__(self,other): - """ - Binary multiplication - - This method is called when Python processes the statement:: - - other * self - - when other is not a :class:`NumericValue ` object. - """ - return _generate_mul_expression(_rmul,self,other) - - def __rdiv__(self,other): - """Binary division - - This method is called when Python processes the statement:: - - other / self - """ - return _generate_mul_expression(_rdiv,self,other) - - def __rtruediv__(self,other): - """ - Binary division (when __future__.division is in effect) - - This method is called when Python processes the statement:: - - other / self - """ - return _generate_mul_expression(_rdiv,self,other) - - def __rpow__(self,other): - """ - Binary power - - This method is called when Python processes the statement:: - - other ** self - """ - return _generate_other_expression(_rpow,self,other) - - def __iadd__(self,other): - """ - Binary addition - - This method is called when Python processes the statement:: - - self += other - """ - return _generate_sum_expression(_iadd,self,other) - - def __isub__(self,other): - """ - Binary subtraction - - This method is called when Python processes the statement:: - - self -= other - """ - return _generate_sum_expression(_isub,self,other) - - def __imul__(self,other): - """ - Binary multiplication - - This method is called when Python processes the statement:: - - self *= other - """ - return _generate_mul_expression(_imul,self,other) - - def __idiv__(self,other): - """ - Binary division - - This method is called when Python processes the statement:: - - self /= other - """ - return _generate_mul_expression(_idiv,self,other) - - def __itruediv__(self,other): - """ - Binary division (when __future__.division is in effect) - - This method is called when Python processes the statement:: - - self /= other - """ - return _generate_mul_expression(_idiv,self,other) - - def __ipow__(self,other): - """ - Binary power - - This method is called when Python processes the statement:: - - self **= other - """ - return _generate_other_expression(_ipow,self,other) - - def __neg__(self): - """ - Negation - - This method is called when Python processes the statement:: - - - self - """ - return _generate_sum_expression(_neg, self, None) - - def __pos__(self): - """ - Positive expression - - This method is called when Python processes the statement:: - - + self - """ - return self - - def __abs__(self): - """ Absolute value - - This method is called when Python processes the statement:: - - abs(self) - """ - return _generate_other_expression(_abs,self, None) - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - return NumericNDArray.__array_ufunc__( - None, ufunc, method, *inputs, **kwargs) - - def to_string(self, verbose=None, labeler=None, smap=None, - compute_values=False): - """Return a string representation of the expression tree. - - Args: - verbose (bool): If :const:`True`, then the string - representation consists of nested functions. Otherwise, - the string representation is an infix algebraic equation. - Defaults to :const:`False`. - labeler: An object that generates string labels for - non-constant in the expression tree. Defaults to - :const:`None`. - smap: A SymbolMap instance that stores string labels for - non-constant nodes in the expression tree. Defaults to - :const:`None`. - compute_values (bool): If :const:`True`, then fixed - expressions are evaluated and the string representation - of the resulting value is returned. - - Returns: - A string representation for the expression tree. - - """ - if compute_values and self.is_fixed(): - try: - return str(self()) - except: - pass - if not self.is_constant(): - if smap is not None: - return smap.getSymbol(self, labeler) - elif labeler is not None: - return labeler(self) - return str(self) + if check_if_numeric_type(obj): + return as_numeric(obj) + else: + return obj class NumericConstant(NumericValue): @@ -967,12 +405,6 @@ class NumericConstant(NumericValue): def __init__(self, value): self.value = value - def __getstate__(self): - state = super(NumericConstant, self).__getstate__() - for i in NumericConstant.__slots__: - state[i] = getattr(self,i) - return state - def is_constant(self): return True @@ -990,7 +422,7 @@ def __call__(self, exception=True): return self.value def pprint(self, ostream=None, verbose=False): - if ostream is None: #pragma:nocover + if ostream is None: # pragma:nocover ostream = sys.stdout ostream.write(str(self)) @@ -999,29 +431,3 @@ def pprint(self, ostream=None, verbose=False): # We use as_numeric() so that the constant is also in the cache ZeroConstant = as_numeric(0) - -# -# Note: the "if numpy_available" in the class definition also ensures -# that the numpy types are registered if numpy is in fact available -# -class NumericNDArray(np.ndarray if numpy_available else object): - """An ndarray subclass that stores Pyomo numeric expressions""" - - def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): - if method == '__call__': - # Convert all incoming types to ndarray (to prevent recursion) - args = [np.asarray(i) for i in inputs] - # Set the return type to be an 'object'. This prevents the - # logical operators from casting the result to a bool. This - # requires numpy >= 1.6 - kwargs['dtype'] = object - - # Delegate to the base ufunc, but return an instance of this - # class so that additional operators hit this method. - ans = getattr(ufunc, method)(*args, **kwargs) - if isinstance(ans, np.ndarray): - if ans.size == 1: - return ans[0] - return ans.view(NumericNDArray) - else: - return ans diff --git a/pyomo/core/expr/relational_expr.py b/pyomo/core/expr/relational_expr.py new file mode 100644 index 00000000000..2909be95c5a --- /dev/null +++ b/pyomo/core/expr/relational_expr.py @@ -0,0 +1,460 @@ +# -*- coding: utf-8 -*- +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import operator + +from pyomo.common.deprecation import deprecated +from pyomo.common.errors import PyomoException, DeveloperError +from pyomo.common.numeric_types import ( + native_numeric_types, + check_if_numeric_type, + value, +) + +from .base import ExpressionBase +from .boolean_value import BooleanValue +from .expr_common import _lt, _le, _eq, ExpressionType +from .numvalue import is_potentially_variable, is_constant +from .visitor import polynomial_degree + +# ------------------------------------------------------- +# +# Expression classes +# +# ------------------------------------------------------- + + +class RelationalExpression(ExpressionBase, BooleanValue): + __slots__ = ('_args_',) + + EXPRESSION_SYSTEM = ExpressionType.RELATIONAL + + def __init__(self, args): + self._args_ = args + + def __bool__(self): + if self.is_constant(): + return bool(self()) + raise PyomoException( + """ +Cannot convert non-constant Pyomo expression (%s) to bool. +This error is usually caused by using a Var, unit, or mutable Param in a +Boolean context such as an "if" statement, or when checking container +membership or equality. For example, + >>> m.x = Var() + >>> if m.x >= 1: + ... pass +and + >>> m.y = Var() + >>> if m.y in [m.x, m.y]: + ... pass +would both cause this exception.""".strip() + % (self,) + ) + + @property + def args(self): + """ + Return the child nodes + + Returns: Either a list or tuple (depending on the node storage + model) containing only the child nodes of this node + """ + return self._args_[: self.nargs()] + + @deprecated( + "is_relational() is deprecated in favor of " + "is_expression_type(ExpressionType.RELATIONAL)", + version='6.4.3', + ) + def is_relational(self): + return self.is_expression_type(ExpressionType.RELATIONAL) + + def is_potentially_variable(self): + return any(is_potentially_variable(arg) for arg in self._args_) + + def polynomial_degree(self): + """ + Return the polynomial degree of the expression. + + Returns: + A non-negative integer that is the polynomial + degree if the expression is polynomial, or :const:`None` otherwise. + """ + return polynomial_degree(self) + + def _compute_polynomial_degree(self, result): + # NB: We can't use max() here because None (non-polynomial) + # overrides a numeric value (and max() just ignores it) + ans = 0 + for x in result: + if x is None: + return None + elif ans < x: + ans = x + return ans + + def __eq__(self, other): + """ + Equal to operator + + This method is called when Python processes statements of the form:: + + self == other + other == self + """ + return _generate_relational_expression(_eq, self, other) + + def __lt__(self, other): + """ + Less than operator + + This method is called when Python processes statements of the form:: + + self < other + other > self + """ + return _generate_relational_expression(_lt, self, other) + + def __gt__(self, other): + """ + Greater than operator + + This method is called when Python processes statements of the form:: + + self > other + other < self + """ + return _generate_relational_expression(_lt, other, self) + + def __le__(self, other): + """ + Less than or equal operator + + This method is called when Python processes statements of the form:: + + self <= other + other >= self + """ + return _generate_relational_expression(_le, self, other) + + def __ge__(self, other): + """ + Greater than or equal operator + + This method is called when Python processes statements of the form:: + + self >= other + other <= self + """ + return _generate_relational_expression(_le, other, self) + + +class RangedExpression(RelationalExpression): + """ + Ranged expressions, which define relations with a lower and upper bound:: + + x < y < z + x <= y <= z + + args: + args (tuple): child nodes + strict (tuple): flags that indicate whether the inequalities are strict + """ + + __slots__ = ('_strict',) + PRECEDENCE = 9 + + # Shared tuples for the most common RangedExpression objects encountered + # in math programming. Creating a single (shared) tuple saves memory + STRICT = { + False: (False, False), + True: (True, True), + (True, True): (True, True), + (False, False): (False, False), + (True, False): (True, False), + (False, True): (False, True), + } + + def __init__(self, args, strict): + super(RangedExpression, self).__init__(args) + self._strict = RangedExpression.STRICT[strict] + + def nargs(self): + return 3 + + def create_node_with_local_data(self, args): + return self.__class__(args, self._strict) + + def _apply_operation(self, result): + _l, _b, _r = result + if not self._strict[0]: + if not self._strict[1]: + return _l <= _b and _b <= _r + else: + return _l <= _b and _b < _r + elif not self._strict[1]: + return _l < _b and _b <= _r + else: + return _l < _b and _b < _r + + def _to_string(self, values, verbose, smap): + return "%s %s %s %s %s" % ( + values[0], + "<="[: 2 - self._strict[0]], + values[1], + "<="[: 2 - self._strict[1]], + values[2], + ) + + @property + def strict(self): + return self._strict + + +class InequalityExpression(RelationalExpression): + """ + Inequality expressions, which define less-than or + less-than-or-equal relations:: + + x < y + x <= y + + args: + args (tuple): child nodes + strict (bool): a flag that indicates whether the inequality is strict + """ + + __slots__ = ('_strict',) + PRECEDENCE = 9 + + def __init__(self, args, strict): + super().__init__(args) + self._strict = strict + + def nargs(self): + return 2 + + def create_node_with_local_data(self, args): + return self.__class__(args, self._strict) + + def _apply_operation(self, result): + _l, _r = result + if self._strict: + return _l < _r + return _l <= _r + + def _to_string(self, values, verbose, smap): + return "%s %s %s" % (values[0], "<="[: 2 - self._strict], values[1]) + + @property + def strict(self): + return self._strict + + +def inequality(lower=None, body=None, upper=None, strict=False): + """ + A utility function that can be used to declare inequality and + ranged inequality expressions. The expression:: + + inequality(2, model.x) + + is equivalent to the expression:: + + 2 <= model.x + + The expression:: + + inequality(2, model.x, 3) + + is equivalent to the expression:: + + 2 <= model.x <= 3 + + .. note:: This ranged inequality syntax is deprecated in Pyomo. + This function provides a mechanism for expressing + ranged inequalities without chained inequalities. + + args: + lower: an expression defines a lower bound + body: an expression defines the body of a ranged constraint + upper: an expression defines an upper bound + strict (bool): A boolean value that indicates whether the inequality + is strict. Default is :const:`False`. + + Returns: + A relational expression. The expression is an inequality + if any of the values :attr:`lower`, :attr:`body` or + :attr:`upper` is :const:`None`. Otherwise, the expression + is a ranged inequality. + """ + if lower is None: + if body is None or upper is None: + raise ValueError("Invalid inequality expression.") + return InequalityExpression((body, upper), strict) + if body is None: + if lower is None or upper is None: + raise ValueError("Invalid inequality expression.") + return InequalityExpression((lower, upper), strict) + if upper is None: + return InequalityExpression((lower, body), strict) + return RangedExpression((lower, body, upper), strict) + + +class EqualityExpression(RelationalExpression): + """ + Equality expression:: + + x == y + """ + + __slots__ = () + PRECEDENCE = 9 + + def nargs(self): + return 2 + + def __bool__(self): + lhs, rhs = self.args + if lhs is rhs: + return True + return super().__bool__() + + def _apply_operation(self, result): + _l, _r = result + return _l == _r + + def _to_string(self, values, verbose, smap): + return "%s == %s" % (values[0], values[1]) + + +class NotEqualExpression(RelationalExpression): + """ + Not-equal expression:: + + x != y + """ + + __slots__ = () + + def nargs(self): + return 2 + + def __bool__(self): + lhs, rhs = self.args + if lhs is not rhs: + return True + return super().__bool__() + + def _apply_operation(self, result): + _l, _r = result + return _l != _r + + def _to_string(self, values, verbose, smap): + return "%s != %s" % (values[0], values[1]) + + +_relational_op = { + _eq: (operator.eq, '==', None), + _le: (operator.le, '<=', False), + _lt: (operator.lt, '<', True), +} + + +def _process_nonnumeric_arg(obj): + if hasattr(obj, 'as_numeric'): + # We assume non-numeric types that have an as_numeric method + # are instances of AutoLinkedBooleanVar. Calling as_numeric + # will return a valid Binary Var (and issue the appropriate + # deprecation warning) + obj = obj.as_numeric() + elif check_if_numeric_type(obj): + return obj + else: + # User assistance: provide a helpful exception when using an + # indexed object in an expression + if obj.is_component_type() and obj.is_indexed(): + raise TypeError( + "Argument for expression is an indexed numeric " + "value\nspecified without an index:\n\t%s\nIs this " + "value defined over an index that you did not specify?" % (obj.name,) + ) + + raise TypeError( + "Attempting to use a non-numeric type (%s) in a " + "numeric expression context." % (obj.__class__.__name__,) + ) + + +def _process_relational_arg(arg, n): + try: + _numeric = arg.is_numeric_type() + except AttributeError: + _numeric = False + if _numeric: + if arg.is_constant(): + arg = value(arg) + else: + _process_relational_arg.constant = False + else: + if arg.__class__ is InequalityExpression: + _process_relational_arg.relational += n + _process_relational_arg.constant = False + else: + arg = _process_nonnumeric_arg(arg) + if arg.__class__ not in native_numeric_types: + _process_relational_arg.constant = False + return arg + + +def _generate_relational_expression(etype, lhs, rhs): + # Note that the use of "global" state flags is fast, but not + # thread-safe. This should not be an issue because the GIL + # effectively prevents parallel model construction. If we ever need + # to revisit this design, we can pass in a "state" to + # _process_relational_arg() - at the cost of creating/destroying the + # state and an extra function argument. + _process_relational_arg.relational = 0 + _process_relational_arg.constant = True + if lhs.__class__ not in native_numeric_types: + lhs = _process_relational_arg(lhs, 1) + if rhs.__class__ not in native_numeric_types: + rhs = _process_relational_arg(rhs, 2) + + if _process_relational_arg.constant: + return _relational_op[etype][0](value(lhs), value(rhs)) + + if etype == _eq: + if _process_relational_arg.relational: + raise TypeError( + "Cannot create an EqualityExpression where one of the " + "sub-expressions is a relational expression:\n" + " %s\n {==}\n %s" % (lhs, rhs) + ) + return EqualityExpression((lhs, rhs)) + elif _process_relational_arg.relational: + if _process_relational_arg.relational == 1: + return RangedExpression( + lhs._args_ + (rhs,), (lhs._strict, _relational_op[etype][2]) + ) + elif _process_relational_arg.relational == 2: + return RangedExpression( + (lhs,) + rhs._args_, (_relational_op[etype][2], rhs._strict) + ) + else: # _process_relational_arg.relational == 3 + raise TypeError( + "Cannot create an InequalityExpression where both " + "sub-expressions are relational expressions:\n" + " %s\n {%s}\n %s" % (lhs, _relational_op[etype][1], rhs) + ) + else: + return InequalityExpression((lhs, rhs), _relational_op[etype][2]) diff --git a/pyomo/core/expr/symbol_map.py b/pyomo/core/expr/symbol_map.py index 61af7f09eb3..ab497c217a8 100644 --- a/pyomo/core/expr/symbol_map.py +++ b/pyomo/core/expr/symbol_map.py @@ -20,8 +20,8 @@ class SymbolMap(object): input to an optimizer. Warning: - A symbol map should never be pickled. This class is - typically constructed by solvers and writers, and it may be + A symbol map should never be pickled. This class is + typically constructed by solvers and writers, and it may be owned by models. Note: @@ -29,8 +29,8 @@ class SymbolMap(object): Attributes: byObject (dict): maps (object id) to (string label) - bySymbol (dict): maps (string label) to (object weakref) - alias (dict): maps (string label) to (object weakref) + bySymbol (dict): maps (string label) to (object) + alias (dict): maps (string label) to (object) default_labeler: used to compute a string label from an object """ @@ -45,40 +45,62 @@ class UnknownSymbol: def __getstate__(self): # - # TODO: Why is this method defined given the previous - # comment that this object should not be pickled? + # While we should generally not pickle a SymbolMap, we still + # need to implement __getstate__ / __setstate__ so that the + # bi-map is correctly duplicated if the object is ever + # deepcopied (the id() keys need to be updated to point to the + # new model objects) # # Note: byObject and bySymbol constitute a bimap. We only need - # to pickle one of them, and bySymbol is easier. + # to save one of them. # - return { - 'bySymbol': tuple( - (key, obj()) for key, obj in self.bySymbol.items() ), - 'aliases': tuple( - (key, obj()) for key, obj in self.aliases.items() ), - } + return (self.bySymbol, self.aliases, self.default_labeler) def __setstate__(self, state): - self.byObject = {id(obj):key for key, obj in state['bySymbol']} - self.bySymbol = {key:weakref_ref(obj) for key,obj in state['bySymbol']} - self.aliases = {key:weakref_ref(obj) for key, obj in state['aliases']} + self.bySymbol, self.aliases, self.default_labeler = state + self.byObject = {id(v): k for k, v in self.bySymbol.items()} def addSymbol(self, obj, symb): """ Add a symbol for a given object + + This method assumes that objects and symbol names will not conflict. """ + nSymbols = len(self.byObject) + 1 self.byObject[id(obj)] = symb - self.bySymbol[symb] = weakref_ref(obj) + self.bySymbol[symb] = obj + if nSymbols != len(self.bySymbol): + raise RuntimeError( + "SymbolMap.addSymbol(): duplicate symbol. " + "SymbolMap likely in an inconsistent state" + ) + if len(self.byObject) != len(self.bySymbol): + raise RuntimeError( + "SymbolMap.addSymbol(): duplicate object. " + "SymbolMap likely in an inconsistent state" + ) def addSymbols(self, obj_symbol_tuples): """ Add (object, symbol) tuples from an iterable object. - This method assumes that symbol names will not conflict. + This method assumes that objects and symbol names will not conflict. """ + nSymbols = len(self.bySymbol) for obj, symbol in obj_symbol_tuples: self.byObject[id(obj)] = symbol - self.bySymbol[symbol] = weakref_ref(obj) + self.bySymbol[symbol] = obj + nSymbols += 1 + if nSymbols != len(self.bySymbol): + raise RuntimeError( + "SymbolMap.addSymbols(): duplicate symbol. " + "SymbolMap likely in an inconsistent state" + ) + if len(self.byObject) != len(self.bySymbol): + raise RuntimeError( + "SymbolMap.addSymbols(): duplicate object. " + "SymbolMap likely in an inconsistent state" + ) def createSymbol(self, obj, labeler=None, *args): """ @@ -86,31 +108,27 @@ def createSymbol(self, obj, labeler=None, *args): error checking is done to ensure that the generated symbol name is unique. """ - #if args: - # symb = labeler(obj, *args) - #else: - # symb = labeler(obj) - if labeler: - symb = labeler(obj) - elif self.default_labeler: - symb = self.default_labeler(obj) - else: - symb = str(obj) - self.byObject[id(obj)] = symb - self.bySymbol[symb] = weakref_ref(obj) - return symb + if labeler is None: + if self.default_labeler is not None: + labeler = self.default_labeler + else: + labeler = str + symbol = labeler(obj, *args) + self.addSymbol(obj, symbol) + return symbol - def createSymbols(self, objs, labeler, *args): + def createSymbols(self, objs, labeler=None, *args): """ Create a symbol for iterable objects with a given labeler. No error checking is done to ensure that the generated symbol names are unique. """ - #if args: - # self.addSymbols([(obj,labeler(obj, *args)) for obj in objs]) - #else: - # self.addSymbols([(obj,labeler(obj)) for obj in objs]) - self.addSymbols([(obj,labeler(obj)) for obj in objs]) + if labeler is None: + if self.default_labeler is not None: + labeler = self.default_labeler + else: + labeler = str + self.addSymbols((obj, labeler(obj, *args)) for obj in objs) def getSymbol(self, obj, labeler=None, *args): """ @@ -123,21 +141,20 @@ def getSymbol(self, obj, labeler=None, *args): # # Create a new symbol, performing an error check if it is a duplicate # - if labeler: - symb = labeler(obj) - elif self.default_labeler: - symb = self.default_labeler(obj) - else: - symb = str(obj) - if symb in self.bySymbol: - if self.bySymbol[symb]() is not obj: - raise RuntimeError( - "Duplicate symbol '%s' already associated with " - "component '%s' (conflicting component: '%s')" - % (symb, self.bySymbol[symb]().name, obj.name) ) - self.bySymbol[symb] = weakref_ref(obj) - self.byObject[obj_id] = symb - return symb + symbol = (labeler or self.default_labeler or str)(obj, *args) + if symbol in self.bySymbol: + # The labeler can have side-effects, including registering + # this symbol in the symbol map + if obj is self.bySymbol[symbol]: + return symbol + raise RuntimeError( + "Duplicate symbol '%s' already associated with " + "component '%s' (conflicting component: '%s')" + % (symbol, self.bySymbol[symbol].name, obj.name) + ) + self.bySymbol[symbol] = obj + self.byObject[obj_id] = symbol + return symbol def alias(self, obj, name): """ @@ -149,28 +166,33 @@ def alias(self, obj, name): # If the alias exists and the objects are the same, # then return. Otherwise, raise an exception. # - old_object = self.aliases[name]() + old_object = self.aliases[name] if old_object is obj: return else: raise RuntimeError( "Duplicate alias '%s' already associated with " "component '%s' (conflicting component: '%s')" - % (name, "UNKNOWN" if old_object is None else old_object.name, obj.name) ) + % ( + name, + "UNKNOWN" if old_object is None else old_object.name, + obj.name, + ) + ) else: # # Add the alias # - self.aliases[name] = weakref_ref(obj) + self.aliases[name] = obj def getObject(self, symbol): """ Return the object corresponding to a symbol """ if symbol in self.bySymbol: - return self.bySymbol[symbol]() + return self.bySymbol[symbol] elif symbol in self.aliases: - return self.aliases[symbol]() + return self.aliases[symbol] else: return SymbolMap.UnknownSymbol diff --git a/pyomo/core/expr/sympy_tools.py b/pyomo/core/expr/sympy_tools.py index 8c5288d8aac..7b494a610cd 100644 --- a/pyomo/core/expr/sympy_tools.py +++ b/pyomo/core/expr/sympy_tools.py @@ -8,13 +8,15 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import operator +import sys from pyomo.common import DeveloperError from pyomo.common.collections import ComponentMap from pyomo.common.dependencies import attempt_import from pyomo.common.errors import NondifferentiableError -from pyomo.core.expr import current as EXPR, native_types -from pyomo.core.expr.numvalue import value +import pyomo.core.expr as EXPR +from pyomo.core.expr.numvalue import value, native_types # # Sympy takes a significant time to load; defer importing it unless @@ -25,77 +27,105 @@ _pyomo_operator_map = {} _functionMap = {} + def _configure_sympy(sympy, available): if not available: return - _operatorMap.update({ - sympy.Add: _sum, - sympy.Mul: _prod, - sympy.Pow: lambda x, y: x**y, - sympy.exp: lambda x: EXPR.exp(x), - sympy.log: lambda x: EXPR.log(x), - sympy.sin: lambda x: EXPR.sin(x), - sympy.asin: lambda x: EXPR.asin(x), - sympy.sinh: lambda x: EXPR.sinh(x), - sympy.asinh: lambda x: EXPR.asinh(x), - sympy.cos: lambda x: EXPR.cos(x), - sympy.acos: lambda x: EXPR.acos(x), - sympy.cosh: lambda x: EXPR.cosh(x), - sympy.acosh: lambda x: EXPR.acosh(x), - sympy.tan: lambda x: EXPR.tan(x), - sympy.atan: lambda x: EXPR.atan(x), - sympy.tanh: lambda x: EXPR.tanh(x), - sympy.atanh: lambda x: EXPR.atanh(x), - sympy.ceiling: lambda x: EXPR.ceil(x), - sympy.floor: lambda x: EXPR.floor(x), - sympy.sqrt: lambda x: EXPR.sqrt(x), - sympy.Abs: lambda x: abs(x), - sympy.Derivative: _nondifferentiable, - sympy.Tuple: lambda *x: x, - }) - - _pyomo_operator_map.update({ - EXPR.SumExpression: sympy.Add, - EXPR.ProductExpression: sympy.Mul, - EXPR.NPV_ProductExpression: sympy.Mul, - EXPR.MonomialTermExpression: sympy.Mul, - }) - - _functionMap.update({ - 'exp': sympy.exp, - 'log': sympy.log, - 'log10': lambda x: sympy.log(x)/sympy.log(10), - 'sin': sympy.sin, - 'asin': sympy.asin, - 'sinh': sympy.sinh, - 'asinh': sympy.asinh, - 'cos': sympy.cos, - 'acos': sympy.acos, - 'cosh': sympy.cosh, - 'acosh': sympy.acosh, - 'tan': sympy.tan, - 'atan': sympy.atan, - 'tanh': sympy.tanh, - 'atanh': sympy.atanh, - 'ceil': sympy.ceiling, - 'floor': sympy.floor, - 'sqrt': sympy.sqrt, - }) + _operatorMap.update( + { + sympy.Add: sum, + sympy.Mul: _prod, + sympy.Pow: lambda x: operator.pow(*x), + sympy.exp: lambda x: EXPR.exp(*x), + sympy.log: lambda x: EXPR.log(*x), + sympy.sin: lambda x: EXPR.sin(*x), + sympy.asin: lambda x: EXPR.asin(*x), + sympy.sinh: lambda x: EXPR.sinh(*x), + sympy.asinh: lambda x: EXPR.asinh(*x), + sympy.cos: lambda x: EXPR.cos(*x), + sympy.acos: lambda x: EXPR.acos(*x), + sympy.cosh: lambda x: EXPR.cosh(*x), + sympy.acosh: lambda x: EXPR.acosh(*x), + sympy.tan: lambda x: EXPR.tan(*x), + sympy.atan: lambda x: EXPR.atan(*x), + sympy.tanh: lambda x: EXPR.tanh(*x), + sympy.atanh: lambda x: EXPR.atanh(*x), + sympy.ceiling: lambda x: EXPR.ceil(*x), + sympy.floor: lambda x: EXPR.floor(*x), + sympy.sqrt: lambda x: EXPR.sqrt(*x), + sympy.Abs: lambda x: abs(*x), + sympy.Derivative: _nondifferentiable, + sympy.Tuple: lambda x: x, + sympy.Or: lambda x: EXPR.lor(*x), + sympy.And: lambda x: EXPR.land(*x), + sympy.Implies: lambda x: EXPR.implies(*x), + sympy.Equivalent: lambda x: EXPR.equivalents(*x), + sympy.Not: lambda x: EXPR.lnot(*x), + sympy.LessThan: lambda x: operator.le(*x), + sympy.StrictLessThan: lambda x: operator.lt(*x), + sympy.GreaterThan: lambda x: operator.ge(*x), + sympy.StrictGreaterThan: lambda x: operator.gt(*x), + sympy.Equality: lambda x: operator.eq(*x), + } + ) + + _pyomo_operator_map.update( + { + EXPR.SumExpression: sympy.Add, + EXPR.LinearExpression: sympy.Add, + EXPR.ProductExpression: sympy.Mul, + EXPR.MonomialTermExpression: sympy.Mul, + EXPR.ExternalFunctionExpression: _external_fcn, + EXPR.AndExpression: sympy.And, + EXPR.OrExpression: sympy.Or, + EXPR.ImplicationExpression: sympy.Implies, + EXPR.EquivalenceExpression: sympy.Equivalent, + EXPR.XorExpression: sympy.Xor, + EXPR.NotExpression: sympy.Not, + } + ) + + _functionMap.update( + { + 'exp': sympy.exp, + 'log': sympy.log, + 'log10': lambda x: sympy.log(x) / sympy.log(10), + 'sin': sympy.sin, + 'asin': sympy.asin, + 'sinh': sympy.sinh, + 'asinh': sympy.asinh, + 'cos': sympy.cos, + 'acos': sympy.acos, + 'cosh': sympy.cosh, + 'acosh': sympy.acosh, + 'tan': sympy.tan, + 'atan': sympy.atan, + 'tanh': sympy.tanh, + 'atanh': sympy.atanh, + 'ceil': sympy.ceiling, + 'floor': sympy.floor, + 'sqrt': sympy.sqrt, + } + ) + sympy, sympy_available = attempt_import('sympy', callback=_configure_sympy) -def _prod(*x): - ans = x[0] - for i in x[1:]: - ans *= i - return ans +if sys.version_info[:2] < (3, 8): + + def _prod(args): + ans = 1 + for arg in args: + ans *= arg + return ans -def _sum(*x): - return sum(x_ for x_ in x) +else: + from math import prod as _prod -def _nondifferentiable(*x): + +def _nondifferentiable(x): if type(x[1]) is tuple: # sympy >= 1.3 returns tuples (var, order) wrt = x[1][0] @@ -103,8 +133,16 @@ def _nondifferentiable(*x): # early versions of sympy returned the bare var wrt = x[1] raise NondifferentiableError( - "The sub-expression '%s' is not differentiable with respect to %s" - % (x[0], wrt) ) + "The sub-expression '%s' is not differentiable with respect to %s" % (x[0], wrt) + ) + + +def _external_fcn(*x): + raise TypeError( + "Expressions containing external functions are not convertible to " + f"sympy expressions (found 'f{x}')" + ) + class PyomoSympyBimap(object): def __init__(self): @@ -130,12 +168,13 @@ def getSympySymbol(self, pyomo_object): def sympyVars(self): return self.sympy2pyomo.keys() + # ===================================================== # sympyify_expression # ===================================================== -class Pyomo2SympyVisitor(EXPR.StreamBasedExpressionVisitor): +class Pyomo2SympyVisitor(EXPR.StreamBasedExpressionVisitor): def __init__(self, object_map): sympy.Add # this ensures _configure_sympy gets run super(Pyomo2SympyVisitor, self).__init__() @@ -160,22 +199,23 @@ def beforeChild(self, node, child, child_idx): if type(child) in native_types: return False, child # - # We will descend into all expressions... - # - if child.is_expression_type(): - return True, None - # # Replace pyomo variables with sympy variables # if child.is_potentially_variable(): - return False, self.object_map.getSympySymbol(child) + # + # We will descend into all expressions... + # + if child.is_expression_type(): + return True, None + else: + return False, self.object_map.getSympySymbol(child) # # Everything else is a constant... # return False, value(child) -class Sympy2PyomoVisitor(EXPR.StreamBasedExpressionVisitor): +class Sympy2PyomoVisitor(EXPR.StreamBasedExpressionVisitor): def __init__(self, object_map): sympy.Add # this ensures _configure_sympy gets run super(Sympy2PyomoVisitor, self).__init__() @@ -185,26 +225,26 @@ def initializeWalker(self, expr): return self.beforeChild(None, expr, None) def enterNode(self, node): - return (node._args, []) + return (node.args, []) def exitNode(self, node, values): - """ Visit nodes that have been expanded """ - _sympyOp = node - _op = _operatorMap.get( type(_sympyOp), None ) + """Visit nodes that have been expanded""" + _op = _operatorMap.get(node.func, None) if _op is None: raise DeveloperError( - "sympy expression type '%s' not found in the operator " - "map" % type(_sympyOp) ) - return _op(*tuple(values)) + f"sympy expression type {node.func} not found in the operator map" + ) + return _op(tuple(values)) def beforeChild(self, node, child, child_idx): - if not child._args: + if not child.args: item = self.object_map.getPyomoSymbol(child, None) if item is None: item = float(child.evalf()) return False, item return True, None + def sympyify_expression(expr): """Convert a Pyomo expression to a Sympy expression""" # diff --git a/pyomo/core/expr/taylor_series.py b/pyomo/core/expr/taylor_series.py index 68f503b4550..2c72f8bcfbc 100644 --- a/pyomo/core/expr/taylor_series.py +++ b/pyomo/core/expr/taylor_series.py @@ -1,5 +1,4 @@ -from pyomo.core.expr.current import identify_variables, value -from pyomo.core.expr.calculus.derivatives import differentiate +from pyomo.core.expr import identify_variables, value, differentiate import logging import math @@ -20,13 +19,15 @@ def _loop(derivs, e_vars, diff_mode, ndx_list): ndx_list.pop() -def taylor_series_expansion(expr, diff_mode=differentiate.Modes.reverse_numeric, order=1): +def taylor_series_expansion( + expr, diff_mode=differentiate.Modes.reverse_numeric, order=1 +): """ Generate a taylor series approximation for expr. Parameters ---------- - expr: pyomo.core.expr.numeric_expr.ExpressionBase + expr: pyomo.core.expr.numeric_expr.NumericExpression diff_mode: pyomo.core.expr.calculus.derivatives.Modes The method for differentiation. order: The order of the taylor series expansion @@ -36,19 +37,25 @@ def taylor_series_expansion(expr, diff_mode=differentiate.Modes.reverse_numeric, Returns ------- - res: pyomo.core.expr.numeric_expr.ExpressionBase + res: pyomo.core.expr.numeric_expr.NumericExpression """ if order < 0: - raise ValueError('Cannot compute taylor series expansion of order {0}'.format(str(order))) + raise ValueError( + 'Cannot compute taylor series expansion of order {0}'.format(str(order)) + ) if order != 1 and diff_mode is differentiate.Modes.reverse_numeric: - logger.warning('taylor_series_expansion can only use symbolic differentiation for orders larger than 1') + logger.warning( + 'taylor_series_expansion can only use symbolic differentiation for orders larger than 1' + ) diff_mode = differentiate.Modes.reverse_symbolic e_vars = list(identify_variables(expr=expr, include_fixed=False)) res = value(expr) if order >= 1: derivs = differentiate(expr=expr, wrt_list=e_vars, mode=diff_mode) - res += sum((e_vars[i] - e_vars[i].value) * value(derivs[i]) for i in range(len(e_vars))) + res += sum( + (e_vars[i] - e_vars[i].value) * value(derivs[i]) for i in range(len(e_vars)) + ) """ This last bit of code is just for higher order taylor series expansions. @@ -63,11 +70,14 @@ def taylor_series_expansion(expr, diff_mode=differentiate.Modes.reverse_numeric, """ if order >= 2: for n in range(2, order + 1): - coef = 1.0/math.factorial(n) + coef = 1.0 / math.factorial(n) for ndx_list, _derivs in _loop(derivs, e_vars, diff_mode, list()): tmp = coef for ndx in ndx_list: - tmp *= (e_vars[ndx] - e_vars[ndx].value) - res += tmp * sum((e_vars[i] - e_vars[i].value) * value(_derivs[i]) for i in range(len(e_vars))) + tmp *= e_vars[ndx] - e_vars[ndx].value + res += tmp * sum( + (e_vars[i] - e_vars[i].value) * value(_derivs[i]) + for i in range(len(e_vars)) + ) return res diff --git a/pyomo/core/expr/template_expr.py b/pyomo/core/expr/template_expr.py index 146b039025b..4682844fef0 100644 --- a/pyomo/core/expr/template_expr.py +++ b/pyomo/core/expr/template_expr.py @@ -15,35 +15,75 @@ import sys import builtins -from pyomo.core.expr.expr_errors import TemplateExpressionError +from pyomo.common.backports import nullcontext +from pyomo.common.errors import TemplateExpressionError +from pyomo.core.expr.base import ExpressionBase, ExpressionArgs_Mixin, NPV_Mixin +from pyomo.core.expr.logical_expr import BooleanExpression +from pyomo.core.expr.numeric_expr import ( + NumericExpression, + SumExpression, + Numeric_NPV_Mixin, + register_arg_type, + ARG_TYPE, +) from pyomo.core.expr.numvalue import ( - NumericValue, native_types, nonpyomo_leaf_types, - as_numeric, value, + NumericValue, + native_types, + nonpyomo_leaf_types, + as_numeric, + value, + is_constant, ) -from pyomo.core.expr.numeric_expr import ExpressionBase, SumExpression from pyomo.core.expr.visitor import ( - ExpressionReplacementVisitor, StreamBasedExpressionVisitor + ExpressionReplacementVisitor, + StreamBasedExpressionVisitor, ) logger = logging.getLogger(__name__) -class _NotSpecified(object): pass + +class _NotSpecified(object): + pass + class GetItemExpression(ExpressionBase): """ Expression to call :func:`__getitem__` on the base object. """ - PRECEDENCE = 1 - def _precedence(self): - return GetItemExpression.PRECEDENCE - - def __init__(self, args): - """Construct an expression with an operation and a set of arguments""" - self._args_ = args + __slots__ = () + PRECEDENCE = 1 - def nargs(self): - return len(self._args_) + def __new__(cls, args=()): + if cls is not GetItemExpression: + return super().__new__(cls) + npv_args = not any( + hasattr(arg, 'is_potentially_variable') and arg.is_potentially_variable() + for arg in args + ) + try: + component = _reduce_template_to_component(args[0]) + cdata = component._ComponentDataClass(component) + if cdata.is_numeric_type(): + if npv_args and not cdata.is_potentially_variable(): + return super().__new__(NPV_Numeric_GetItemExpression) + else: + return super().__new__(Numeric_GetItemExpression) + if cdata.is_logical_type(): + if npv_args and not cdata.is_potentially_variable(): + return super().__new__(NPV_Boolean_GetItemExpression) + else: + return super().__new__(Boolean_GetItemExpression) + except (AttributeError, TypeError): + # TypeError: error reducing to a component (usually due to + # unbounded domain on a Var used in a GetItemExpression) + # AttributeError: resolved component did not support the + # PyomoObject API + pass + if npv_args: + return super().__new__(NPV_Structural_GetItemExpression) + else: + return super().__new__(Structural_GetItemExpression) def __getattr__(self, attr): if attr.startswith('__') and attr.endswith('__'): @@ -59,46 +99,48 @@ def __len__(self): def getname(self, *args, **kwds): return self._args_[0].getname(*args, **kwds) - def is_potentially_variable(self): - _false = lambda: False - if any( getattr(arg, 'is_potentially_variable', _false)() - for arg in self._args_ ): - return True - base = self._args_[0] - if base.is_expression_type(): - base = value(base) - # TODO: fix value iteration when generating templates - # - # There is a nasty problem here: we want to iterate over all the - # members of the base and see if *any* of them are potentially - # variable. Unfortunately, this method is called during - # expression generation, and we *could* be generating a - # template. When that occurs, iterating over the base will - # yield a new IndexTemplate (which will in turn raise an - # exception because IndexTemplates are not constant). The real - # solution is probably to re-think how we define - # is_potentially_variable, but for now we will only handle - # members that are explicitly stored in the _data dict. Not - # general (because a Component could implement a non-standard - # storage scheme), but as of now [30 Apr 20], there are no known - # Components where this assumption will cause problems. - return any( getattr(x, 'is_potentially_variable', _false)() - for x in getattr(base, '_data', {}).values() ) + def nargs(self): + return len(self._args_) def _is_fixed(self, values): if not all(values[1:]): return False _true = lambda: True - return all( getattr(x, 'is_fixed', _true)() - for x in values[0].values() ) + return all(getattr(x, 'is_fixed', _true)() for x in values[0].values()) + + def _to_string(self, values, verbose, smap): + values = tuple(_[1:-1] if _[0] == '(' and _[-1] == ')' else _ for _ in values) + if verbose: + return "getitem(%s, %s)" % (values[0], ', '.join(values[1:])) + return "%s[%s]" % (values[0], ','.join(values[1:])) + + def _resolve_template(self, args): + return args[0].__getitem__(tuple(args[1:])) + + def _apply_operation(self, result): + args = tuple( + arg + if arg.__class__ in native_types or not arg.is_numeric_type() + else value(arg) + for arg in result[1:] + ) + return result[0].__getitem__(tuple(result[1:])) + + +class Numeric_GetItemExpression(GetItemExpression, NumericExpression): + __slots__ = () + + def nargs(self): + return len(self._args_) def _compute_polynomial_degree(self, result): if any(x != 0 for x in result[1:]): return None ans = 0 for x in result[0].values(): - if x.__class__ in nonpyomo_leaf_types \ - or not hasattr(x, 'polynomial_degree'): + if x.__class__ in nonpyomo_leaf_types or not hasattr( + x, 'polynomial_degree' + ): continue tmp = x.polynomial_degree() if tmp is None: @@ -107,40 +149,63 @@ def _compute_polynomial_degree(self, result): ans = tmp return ans - def _apply_operation(self, result): - obj = result[0].__getitem__( tuple(result[1:]) ) - if obj.__class__ in nonpyomo_leaf_types: - return obj - # Note that because it is possible (likely) that the result - # could be an IndexedComponent_slice object, must test "is - # True", as the slice will return a list of values. - if obj.is_numeric_type() is True: - obj = value(obj) - return obj - - def _to_string(self, values, verbose, smap, compute_values): - values = tuple(_[1:-1] if _[0]=='(' and _[-1]==')' else _ - for _ in values) - if verbose: - return "getitem(%s, %s)" % (values[0], ', '.join(values[1:])) - return "%s[%s]" % (values[0], ','.join(values[1:])) - def _resolve_template(self, args): - return args[0].__getitem__(tuple(args[1:])) +class NPV_Numeric_GetItemExpression(Numeric_NPV_Mixin, Numeric_GetItemExpression): + __slots__ = () + + +class Boolean_GetItemExpression(GetItemExpression, BooleanExpression): + __slots__ = () + + +class NPV_Boolean_GetItemExpression(NPV_Mixin, Boolean_GetItemExpression): + __slots__ = () + + +class Structural_GetItemExpression(ExpressionArgs_Mixin, GetItemExpression): + __slots__ = () + + +class NPV_Structural_GetItemExpression(NPV_Mixin, Structural_GetItemExpression): + __slots__ = () class GetAttrExpression(ExpressionBase): """ Expression to call :func:`__getattr__` on the base object. """ + __slots__ = () PRECEDENCE = 1 - def _precedence(self): - return GetAttrExpression.PRECEDENCE - - def nargs(self): - return len(self._args_) + def __new__(cls, args=()): + if cls is not GetAttrExpression: + return super().__new__(cls) + # Ironically, we need to actually create this object in order to + # determine what the class for this object should be. + if args[0].is_potentially_variable(): + self = Structural_GetAttrExpression(args) + else: + self = NPV_Structural_GetAttrExpression(args) + try: + attr = _reduce_template_to_component(self) + if attr.is_numeric_type(): + if attr.is_potentially_variable() or self.is_potentially_variable(): + return super().__new__(Numeric_GetAttrExpression) + else: + return super().__new__(NPV_Numeric_GetAttrExpression) + elif attr.is_logical_type(): + if attr.is_potentially_variable() or self.is_potentially_variable(): + return super().__new__(Boolean_GetAttrExpression) + else: + return super().__new__(NPV_Boolean_GetAttrExpression) + except (AttributeError, TypeError): + # TypeError: error reducing to a component (usually due to + # unbounded domain on a Var used in a GetItemExpression) + # AttributeError: resolved component did not support the + # PyomoObject API + pass + return self def __getattr__(self, attr): if attr.startswith('__') and attr.endswith('__'): @@ -156,27 +221,44 @@ def __iter__(self): def __len__(self): return len(value(self)) + def __call__(self, *args, **kwargs): + """ + Return the value of this object. + """ + # Backwards compatibility with __call__(exception): + # + # TODO: deprecate (then remove) evaluating expressions by + # "calling" them. + try: + if not args: + if not kwargs: + return super().__call__() + elif len(kwargs) == 1 and 'exception' in kwargs: + return super().__call__(**kwargs) + elif ( + not kwargs and len(args) == 1 and (args[0] is True or args[0] is False) + ): + return super().__call__(*args) + except TemplateExpressionError: + pass + # Note: the only time we will implicitly create a CallExpression + # node is directly after a GetAttrExpression: that is, someone + # got the attribute (method) and is now calling it. + # Implementing the auto-generation of CallExpression in other + # contexts is likely to be confounded with evaluating expressions. + return CallExpression((self,) + args, kwargs) + def getname(self, *args, **kwds): return 'getattr' - def _compute_polynomial_degree(self, result): - if result[1] != 0: - return None - return result[0] + def nargs(self): + return 2 def _apply_operation(self, result): assert len(result) == 2 - obj = getattr(result[0], result[1]) - if obj.__class__ in nonpyomo_leaf_types: - return obj - # Note that because it is possible (likely) that the result - # could be an IndexedComponent_slice object, must test "is - # True", as the slice will return a list of values. - if obj.is_numeric_type() is True: - obj = value(obj) - return obj - - def _to_string(self, values, verbose, smap, compute_values): + return getattr(result[0], result[1]) + + def _to_string(self, values, verbose, smap): assert len(values) == 2 if verbose: return "getattr(%s, %s)" % tuple(values) @@ -191,6 +273,91 @@ def _resolve_template(self, args): return getattr(*tuple(args)) +class Numeric_GetAttrExpression(GetAttrExpression, NumericExpression): + __slots__ = () + + def _compute_polynomial_degree(self, result): + if result[1] != 0: + return None + return result[0] + + +class NPV_Numeric_GetAttrExpression(Numeric_NPV_Mixin, Numeric_GetAttrExpression): + __slots__ = () + + +class Boolean_GetAttrExpression(GetAttrExpression, BooleanExpression): + __slots__ = () + + +class NPV_Boolean_GetAttrExpression(NPV_Mixin, Boolean_GetAttrExpression): + __slots__ = () + + +class Structural_GetAttrExpression(ExpressionArgs_Mixin, GetAttrExpression): + __slots__ = () + + +class NPV_Structural_GetAttrExpression(NPV_Mixin, Structural_GetAttrExpression): + __slots__ = () + + +class CallExpression(NumericExpression): + """ + Expression to call :func:`__call__` on the base object. + """ + + __slots__ = ('_kwds',) + PRECEDENCE = None + + def __init__(self, args, kwargs): + self._args_ = tuple(args) + tuple(kwargs.values()) + self._kwds = tuple(kwargs.keys()) + + def nargs(self): + return len(self._args_) + + def __getattr__(self, attr): + if attr.startswith('__') and attr.endswith('__'): + raise AttributeError() + return GetAttrExpression((self, attr)) + + def __getitem__(self, *idx): + return GetItemExpression((self,) + idx) + + def __iter__(self): + return iter(value(self)) + + def __len__(self): + return len(value(self)) + + def getname(self, *args, **kwds): + return 'call' + + def _compute_polynomial_degree(self, result): + return None + + def _apply_operation(self, result): + na = len(self._args_) - len(self._kwds) + return result[0](*result[1:na], **dict(zip(self._kwds, result[na:]))) + + def _to_string(self, values, verbose, smap): + na = len(self._args_) - len(self._kwds) + args = ', '.join(values[1:na]) + if self._kwds: + if na > 1: + args += ', ' + args += ', '.join( + f'{key}={val}' for key, val in zip(self._kwds, values[na:]) + ) + if verbose: + return f"call({values[0]}, {args})" + return f"{values[0]}({args})" + + def _resolve_template(self, args): + return self._apply_operation(args) + + class _TemplateSumExpression_argList(object): """A virtual list to represent the expanded SumExpression args @@ -208,6 +375,7 @@ class _TemplateSumExpression_argList(object): It is (intentionally) not iterable. """ + def __init__(self, TSE): self._tse = TSE self._i = 0 @@ -248,9 +416,9 @@ def _get_iter(self): def _lock_iters(self): self._init_vals = tuple( - tuple( - it.lock(self._lock) for it in iterGroup - ) for iterGroup in self._tse._iters ) + tuple(it.lock(self._lock) for it in iterGroup) + for iterGroup in self._tse._iters + ) def _unlock_iters(self): self._set_iter_vals(self._init_vals) @@ -267,16 +435,14 @@ def _set_iter_vals(self, val): iterGroup[j].set_value(v, self._lock) -class TemplateSumExpression(ExpressionBase): +class TemplateSumExpression(NumericExpression): """ Expression to represent an unexpanded sum over one or more sets. """ + __slots__ = ('_iters', '_local_args_') PRECEDENCE = 1 - def _precedence(self): - return TemplateSumExpression.PRECEDENCE - def __init__(self, args, _iters): assert len(args) == 1 self._args_ = args @@ -305,18 +471,15 @@ def _args_(self, args): def create_node_with_local_data(self, args): return self.__class__(args, self._iters) - def __getstate__(self): - state = super(TemplateSumExpression, self).__getstate__() - for i in TemplateSumExpression.__slots__: - state[i] = getattr(self, i) - return state - def getname(self, *args, **kwds): return "SUM" def is_potentially_variable(self): - if any(arg.is_potentially_variable() for arg in self._local_args_ - if arg.__class__ not in nonpyomo_leaf_types): + if any( + arg.is_potentially_variable() + for arg in self._local_args_ + if arg.__class__ not in nonpyomo_leaf_types + ): return True return False @@ -331,16 +494,20 @@ def _compute_polynomial_degree(self, result): def _apply_operation(self, result): return sum(result) - def _to_string(self, values, verbose, smap, compute_values): + def _to_string(self, values, verbose, smap): ans = '' val = values[0] - if val[0]=='(' and val[-1]==')' and _balanced_parens(val[1:-1]): + if val[0] == '(' and val[-1] == ')' and _balanced_parens(val[1:-1]): val = val[1:-1] iterStrGenerator = ( - ( ', '.join(str(i) for i in iterGroup), - ( iterGroup[0]._set.to_string(verbose=verbose) - if hasattr(iterGroup[0]._set, 'to_string') - else str(iterGroup[0]._set) ) ) + ( + ', '.join(str(i) for i in iterGroup), + ( + iterGroup[0]._set.to_string(verbose=verbose) + if hasattr(iterGroup[0]._set, 'to_string') + else str(iterGroup[0]._set) + ), + ) for iterGroup in self._iters ) if verbose: @@ -369,28 +536,20 @@ class IndexTemplate(NumericValue): _set: the Set from which this IndexTemplate can take values """ - __slots__ = ('_set', '_value', '_index', '_id', '_lock') + __slots__ = ('_set', '_value', '_index', '_id', '_group', '_lock') - def __init__(self, _set, index=0, _id=None): + def __init__(self, _set, index=0, _id=None, _group=None): self._set = _set self._value = _NotSpecified self._index = index self._id = _id + self._group = _group self._lock = None - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - state = super(IndexTemplate, self).__getstate__() - for i in IndexTemplate.__slots__: - state[i] = getattr(self, i) - return state - def __deepcopy__(self, memo): - # Because we leverage deepcopy for expression cloning, we need - # to see if this is a clone operation and *not* copy the - # template. + # Because we leverage deepcopy for expression/component cloning, + # we need to see if this is a Component.clone() operation and + # *not* copy the template. # # TODO: JDS: We should consider converting the IndexTemplate to # a proper Component: that way it could leverage the normal @@ -402,9 +561,7 @@ def __deepcopy__(self, memo): # # "Normal" deepcopying outside the context of pyomo. # - ans = memo[id(self)] = self.__class__.__new__(self.__class__) - ans.__setstate__(copy.deepcopy(self.__getstate__(), memo)) - return ans + return super().__deepcopy__(memo) # Note: because NONE of the slots on this class need to be edited, # we don't need to implement a specialized __setstate__ method. @@ -416,8 +573,8 @@ def __call__(self, exception=True): if self._value is _NotSpecified: if exception: raise TemplateExpressionError( - self, "Evaluating uninitialized IndexTemplate (%s)" - % (self,)) + self, "Evaluating uninitialized IndexTemplate (%s)" % (self,) + ) return None else: return self._value @@ -432,12 +589,6 @@ def is_fixed(self): """ return True - def is_constant(self): - """ - Returns False because this cannot immediately be simplified. - """ - return False - def is_potentially_variable(self): """Returns False because index values cannot be variables. @@ -457,18 +608,19 @@ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None): _set_name = self._set.getname(fully_qualified, name_buffer, relative_to) if self._index is not None and self._set.dimen != 1: _set_name += "(%s)" % (self._index,) - return "{"+_set_name+"}" + return "{" + _set_name + "}" def set_value(self, values=_NotSpecified, lock=None): # It might be nice to check if the value is valid for the base - # set, but things are tricky when the base set is not dimention + # set, but things are tricky when the base set is not dimension # 1. So, for the time being, we will just "trust" the user. # After all, the actual Set will raise exceptions if the value # is not present. if lock is not self._lock: raise RuntimeError( "The TemplateIndex %s is currently locked by %s and " - "cannot be set through lock %s" % (self, self._lock, lock)) + "cannot be set through lock %s" % (self, self._lock, lock) + ) if values is _NotSpecified: self._value = _NotSpecified return @@ -478,8 +630,7 @@ def set_value(self, values=_NotSpecified, lock=None): if len(values) == 1: self._value = values[0] else: - raise ValueError("Passed multiple values %s to a scalar " - "IndexTemplate %s" % (values, self)) + self._value = values[self._index] else: self._value = values @@ -493,6 +644,11 @@ def unlock(self, lock): self._lock = None +# Instead of special-casing _categorize_arg_type for this class, we +# will directly register that it should be treated as an NPV arg +register_arg_type(IndexTemplate, ARG_TYPE.NPV) + + def resolve_template(expr): """Resolve a template into a concrete expression @@ -502,9 +658,15 @@ def resolve_template(expr): GetAttrExpression, and TemplateSumExpression expression nodes. """ + wildcards = [] + wildcard_groups = {} + level = -1 + def beforeChild(node, child, child_idx): - # Efficiency: do not decend into leaf nodes. - if type(child) in native_types or not child.is_expression_type(): + # Efficiency: do not descend into leaf nodes. + if type(child) in native_types: + return False, child + elif not child.is_expression_type(): if hasattr(child, '_resolve_template'): return False, child._resolve_template(()) return False, child @@ -514,20 +676,167 @@ def beforeChild(node, child, child_idx): def exitNode(node, args): if hasattr(node, '_resolve_template'): return node._resolve_template(args) - if len(args) == node.nargs() and all( - a is b for a,b in zip(node.args, args)): + if len(args) == node.nargs() and all(a is b for a, b in zip(node.args, args)): return node - return node.create_node_with_local_data(args) + if all(map(is_constant, args)): + return node._apply_operation(args) + else: + return node.create_node_with_local_data(args) - return StreamBasedExpressionVisitor( + walker = StreamBasedExpressionVisitor( initializeWalker=lambda x: beforeChild(None, x, None), beforeChild=beforeChild, exitNode=exitNode, - ).walk_expression(expr) + ) + return walker.walk_expression(expr) + + +class _wildcard_info(object): + __slots__ = ('iter', 'source', 'value', 'original_value', 'objects') + + def __init__(self, src, obj): + self.source = src + self.original_value = obj._value + self.objects = [obj] + self.reset() + if self.original_value in (None, _NotSpecified): + self.advance() + + def advance(self): + with _TemplateIterManager.pause(): + self.value = next(self.iter) + for obj in self.objects: + obj.set_value(self.value) + + def reset(self): + # Because we want to actually iterate over the underlying + # template expression, we will temporarily pause our overrides + # of sum() and the set iters + with _TemplateIterManager.pause(): + self.iter = iter(self.source) + + def restore(self): + for obj in self.objects: + obj.set_value(self.original_value) + + +def _reduce_template_to_component(expr): + """Resolve a template into a concrete component + + This takes a template expression and returns the concrete equivalent + by substituting the current values of all IndexTemplate objects and + resolving (evaluating and removing) all GetItemExpression, + GetAttrExpression, and TemplateSumExpression expression nodes. + + """ + import pyomo.core.base.set + + # wildcards holds lists of + # [iterator, source, value, orig_value, object0, ...] + # 'iterator' iterates over 'source' to provide 'value's for each of + # the 1 or more 'objects'. Objects can be IndexTemplate objects or + # (discrete) Variables + wildcards = [] + wildcard_groups = {} + level = -1 + + def beforeChild(node, child, child_idx): + # Efficiency: do not descend into leaf nodes. + if type(child) in native_types: + return False, child + elif not child.is_expression_type(): + if hasattr(child, '_resolve_template'): + try: + ans = child._resolve_template(()) + except TemplateExpressionError: + # We are attempting "loose" template resolution: for + # every unset IndexTemplate, search the underlying + # set to find *any* valid match. + if child._group not in wildcard_groups: + wildcard_groups[child._group] = len(wildcards) + info = _wildcard_info(child._set, child) + wildcards.append(info) + else: + info = wildcards[wildcard_groups[child._group]] + info.objects.append(child) + child.set_value(info.value) + ans = child._resolve_template(()) + return False, ans + if child.is_variable_type(): + from pyomo.core.base.set import RangeSet + + if child.domain.isdiscrete(): + domain = child.domain + bounds = child.bounds + if bounds != (None, None): + try: + bounds = pyomo.core.base.set.RangeSet(*bounds, 0) + domain = domain & bounds + except: + pass + info = _wildcard_info(domain, child) + wildcards.append(info) + return False, value(child) + return False, child + else: + return True, None + + def exitNode(node, args): + if hasattr(node, '_resolve_template'): + return node._resolve_template(args) + if len(args) == node.nargs() and all(a is b for a, b in zip(node.args, args)): + return node + if all(map(is_constant, args)): + return node._apply_operation(args) + else: + return node.create_node_with_local_data(args) + + walker = StreamBasedExpressionVisitor( + initializeWalker=lambda x: beforeChild(None, x, None), + beforeChild=beforeChild, + exitNode=exitNode, + ) + while 1: + try: + with _TemplateIterManager.pause(): + ans = walker.walk_expression(expr) + break + except (KeyError, AttributeError): + # We are attempting "loose" template resolution: for every + # unset IndexTemplate, search the underlying set to find + # *any* valid match. + level = len(wildcards) - 1 + while level >= 0: + info = wildcards[level] + try: + info.advance() + break + except StopIteration: + # Because we want to actually iterate over the + # underlying template expression, we will + # temporarily pause our overrides of sum() and the + # set iters + info.reset() + info.advance() + level -= 1 + if level < 0: + for info in wildcards: + info.restore() + raise + for info in wildcards: + info.restore() + return ans class ReplaceTemplateExpression(ExpressionReplacementVisitor): - template_types = {GetItemExpression, IndexTemplate} + template_types = { + IndexTemplate, + GetItemExpression, + Numeric_GetItemExpression, + NPV_Numeric_GetItemExpression, + Boolean_GetItemExpression, + NPV_Boolean_GetItemExpression, + } def __init__(self, substituter, *args, **kwargs): kwargs.setdefault('remove_named_expressions', True) @@ -545,8 +854,8 @@ def substitute_template_expression(expr, substituter, *args, **kwargs): """Substitute IndexTemplates in an expression tree. This is a general utility function for walking the expression tree - and subtituting all occurances of IndexTemplate and - _GetItemExpression nodes. + and substituting all occurrences of IndexTemplate and + GetItemExpression nodes. Args: substituter: method taking (expression, *args) and returning @@ -567,7 +876,7 @@ class _GetItemIndexer(object): def __init__(self, expr): self._base = expr.arg(0) self._args = [] - _hash = [ id(self._base) ] + _hash = [id(self._base)] for x in expr.args[1:]: try: logging.disable(logging.CRITICAL) @@ -579,8 +888,8 @@ def __init__(self, expr): raise TypeError( "Cannot use the param substituter with expression " "templates\nwhere the component index has the " - "IndexTemplate in an expression.\n\tFound in %s" - % ( expr, )) + "IndexTemplate in an expression.\n\tFound in %s" % (expr,) + ) self._args.append(e.template) _hash.append(id(e.template._set)) finally: @@ -612,18 +921,18 @@ def __eq__(self, other): return False def __str__(self): - return "%s[%s]" % ( - self._base.name, ','.join(str(x) for x in self._args) ) + return "%s[%s]" % (self._base.name, ','.join(str(x) for x in self._args)) def substitute_getitem_with_param(expr, _map): """A simple substituter to replace _GetItem nodes with mutable Params. - This substituter will replace all _GetItemExpression nodes with a + This substituter will replace all GetItemExpression nodes with a new Param. For example, this method will create expressions suitable for passing to DAE integrators """ import pyomo.core.base.param + if type(expr) is IndexTemplate: return expr @@ -631,15 +940,14 @@ def substitute_getitem_with_param(expr, _map): if _id not in _map: _map[_id] = pyomo.core.base.param.Param(mutable=True) _map[_id].construct() - _map[_id]._name = "%s[%s]" % ( - _id.base.name, ','.join(str(x) for x in _id.args) ) + _map[_id]._name = "%s[%s]" % (_id.base.name, ','.join(str(x) for x in _id.args)) return _map[_id] def substitute_template_with_value(expr): """A simple substituter to expand expression for current template - This substituter will replace all _GetItemExpression / IndexTemplate + This substituter will replace all GetItemExpression / IndexTemplate nodes with the actual _ComponentData based on the current value of the IndexTemplate(s) @@ -660,6 +968,7 @@ class _set_iterator_template_generator(object): object(s) instead of the actual Set items the first time next() is called. """ + def __init__(self, _set, context): self._set = _set self.context = context @@ -674,12 +983,16 @@ def __next__(self): context, self.context = self.context, None _set = self._set - d = _set.dimen + if _set.is_expression_type(): + d = _reduce_template_to_component(_set).dimen + else: + d = _set.dimen + grp = context.next_group() if d is None or type(d) is not int: - idx = (IndexTemplate(_set, None, context.next_id()),) + idx = (IndexTemplate(_set, None, context.next_id(), grp),) else: idx = tuple( - IndexTemplate(_set, i, context.next_id()) for i in range(d) + IndexTemplate(_set, i, context.next_id(), grp) for i in range(d) ) context.cache.append(idx) if len(idx) == 1: @@ -689,6 +1002,7 @@ def __next__(self): next = __next__ + class _template_iter_context(object): """Manage the iteration context when generating templatized rules @@ -699,9 +1013,11 @@ class _template_iter_context(object): unique identifiers for IndexTemplate objects and their groupings within `sum()` generators. """ + def __init__(self): self.cache = [] self._id = 0 + self._group = 0 def get_iter(self, _set): return _set_iterator_template_generator(_set, self) @@ -715,71 +1031,144 @@ def next_id(self): self._id += 1 return self._id + def next_group(self): + self._group += 1 + return self._group + def sum_template(self, generator): init_cache = len(self.cache) expr = next(generator) final_cache = len(self.cache) - return TemplateSumExpression( - (expr,), self.npop_cache(final_cache-init_cache) - ) + return TemplateSumExpression((expr,), self.npop_cache(final_cache - init_cache)) + + +class _template_iter_manager(object): + class _iter_wrapper(object): + __slots__ = ('_class', '_iter', '_old_iter') + + def __init__(self, cls, context): + def _iter_fcn(obj): + return context.get_iter(obj) + + self._class = cls + self._old_iter = cls.__iter__ + self._iter = _iter_fcn + + def acquire(self): + self._class.__iter__ = self._iter + + def release(self): + self._class.__iter__ = self._old_iter + + class _pause_template_iter_manager(object): + __slots__ = ('iter_manager',) + + def __init__(self, iter_manager): + self.iter_manager = iter_manager + + def __enter__(self): + self.iter_manager.release() + return self + + def __exit__(self, et, ev, tb): + self.iter_manager.acquire() + + def __init__(self): + self.paused = True + self.context = None + self.iters = None + self.builtin_sum = builtins.sum + + def init(self, context, *iter_fcns): + assert self.context is None + self.context = context + self.iters = [self._iter_wrapper(it, context) for it in iter_fcns] + return self + + def acquire(self): + assert self.paused + self.paused = False + builtins.sum = self.context.sum_template + for it in self.iters: + it.acquire() + + def release(self): + assert not self.paused + self.paused = True + builtins.sum = self.builtin_sum + for it in self.iters: + it.release() + + def __enter__(self): + assert self.context + self.acquire() + return self + + def __exit__(self, et, ev, tb): + self.release() + self.context = None + self.iters = None + + def pause(self): + if self.paused: + return nullcontext() + else: + return self._pause_template_iter_manager(self) + + +# Global manager for coordinating overriding set iteration +_TemplateIterManager = _template_iter_manager() def templatize_rule(block, rule, index_set): import pyomo.core.base.set + context = _template_iter_context() internal_error = None - _old_iters = ( - pyomo.core.base.set._FiniteSetMixin.__iter__, - GetItemExpression.__iter__, - GetAttrExpression.__iter__, - ) - _old_sum = builtins.sum try: # Override Set iteration to return IndexTemplates - pyomo.core.base.set._FiniteSetMixin.__iter__ \ - = GetItemExpression.__iter__ \ - = GetAttrExpression.__iter__ \ - = lambda x: context.get_iter(x).__iter__() - # Override sum with our sum - builtins.sum = context.sum_template - # Get the index templates needed for calling the rule - if index_set is not None: - # Note, do not rely on the __iter__ overload, as non-finite - # Sets don't have an __iter__. - indices = next(iter(context.get_iter(index_set))) - try: - context.cache.pop() - except IndexError: - assert indices is None + with _TemplateIterManager.init( + context, + pyomo.core.base.set._FiniteSetMixin, + GetItemExpression, + GetAttrExpression, + ): + # Get the index templates needed for calling the rule + if index_set is not None: + # Note, do not rely on the __iter__ overload, as non-finite + # Sets don't have an __iter__. + indices = next(iter(context.get_iter(index_set))) + try: + context.cache.pop() + except IndexError: + assert indices is None + indices = () + else: indices = () - else: - indices = () - if type(indices) is not tuple: - indices = (indices,) - # Call the rule, returning the template expression and the - # top-level IndexTemplate(s) generated when calling the rule. - # - # TBD: Should this just return a "FORALL()" expression node that - # behaves similarly to the GetItemExpression node? - return rule(block, indices), indices + if type(indices) is not tuple: + indices = (indices,) + # Call the rule, returning the template expression and the + # top-level IndexTemplate(s) generated when calling the rule. + # + # TBD: Should this just return a "FORALL()" expression node that + # behaves similarly to the GetItemExpression node? + return rule(block, indices), indices except: internal_error = sys.exc_info() raise finally: - pyomo.core.base.set._FiniteSetMixin.__iter__, \ - GetItemExpression.__iter__, \ - GetAttrExpression.__iter__ = _old_iters - builtins.sum = _old_sum if len(context.cache): if internal_error is not None: - logger.error("The following exception was raised when " - "templatizing the rule '%s':\n\t%s" - % (rule.__name__, internal_error[1])) + logger.error( + "The following exception was raised when " + "templatizing the rule '%s':\n\t%s" % (rule.name, internal_error[1]) + ) raise TemplateExpressionError( None, "Explicit iteration (for loops) over Sets is not supported " "by template expressions. Encountered loop over %s" - % (context.cache[-1][0]._set,)) + % (context.cache[-1][0]._set,), + ) return None, indices diff --git a/pyomo/core/expr/visitor.py b/pyomo/core/expr/visitor.py index a2f60a40b25..c8f22ba1d3a 100644 --- a/pyomo/core/expr/visitor.py +++ b/pyomo/core/expr/visitor.py @@ -19,16 +19,16 @@ logger = logging.getLogger('pyomo.core') -from .symbol_map import SymbolMap -from . import expr_common as common -from .expr_errors import TemplateExpressionError from pyomo.common.deprecation import deprecated, deprecation_warning -from pyomo.common.errors import DeveloperError -from pyomo.core.expr.numvalue import ( +from pyomo.common.errors import DeveloperError, TemplateExpressionError +from pyomo.common.numeric_types import ( nonpyomo_leaf_types, native_types, native_numeric_types, - value,) + value, +) +import pyomo.core.expr.expr_common as common +from pyomo.core.expr.symbol_map import SymbolMap try: # sys._getframe is slightly faster than inspect's currentframe, but @@ -37,14 +37,16 @@ except AttributeError: currentframe = inspect.currentframe + def get_stack_depth(): - n = -1 # skip *this* frame in the count + n = -1 # skip *this* frame in the count f = currentframe() while f is not None: n += 1 f = f.f_back return n + # For efficiency, we want to run recursively, but don't want to hit # Python's recursion limit (because that would be difficult to recover # from cleanly). However, there is a non-trivial cost to determine the @@ -58,9 +60,11 @@ def get_stack_depth(): # the vast majority of cases that could generate a recursion error. RECURSION_LIMIT = 50 + class RevertToNonrecursive(Exception): pass + # NOTE: This module also has dependencies on numeric_expr; however, to # avoid circular dependencies, we will NOT import them here. Instead, # until we can resolve the circular dependencies, they will be injected @@ -68,110 +72,111 @@ class RevertToNonrecursive(Exception): # *after* numeric_expr, logocal_expr, and this module. -#------------------------------------------------------- +# ------------------------------------------------------- # # Visitor Logic # -#------------------------------------------------------- +# ------------------------------------------------------- + class StreamBasedExpressionVisitor(object): """This class implements a generic stream-based expression walker. - This visitor walks an expression tree using a depth-first strategy - and generates a full event stream similar to other tree visitors - (e.g., the expat XML parser). The following events are triggered - through callback functions as the traversal enters and leaves nodes - in the tree: - - initializeWalker(expr) -> walk, result - enterNode(N1) -> args, data - {for N2 in args:} - beforeChild(N1, N2) -> descend, child_result - enterNode(N2) -> N2_args, N2_data - [...] - exitNode(N2, n2_data) -> child_result - acceptChildResult(N1, data, child_result) -> data - afterChild(N1, N2) -> None - exitNode(N1, data) -> N1_result - finalizeWalker(result) -> result - - Individual event callbacks match the following signatures: - - walk, result = initializeWalker(self, expr): - - initializeWalker() is called to set the walker up and perform - any preliminary processing on the root node. The method returns - a flag indicating if the tree should be walked and a result. If - `walk` is True, then result is ignored. If `walk` is False, - then `result` is returned as the final result from the walker, - bypassing all other callbacks (including finalizeResult). - - args, data = enterNode(self, node): - - enterNode() is called when the walker first enters a node (from - above), and is passed the node being entered. It is expected to - return a tuple of child `args` (as either a tuple or list) and a - user-specified data structure for collecting results. If None - is returned for args, the node's args attribute is used for - expression types and the empty tuple for leaf nodes. Returning - None is equivalent to returning (None,None). If the callback is - not defined, the default behavior is equivalent to returning - (None, []). - - node_result = exitNode(self, node, data): - - exitNode() is called after the node is completely processed (as - the walker returns up the tree to the parent node). It is - passed the node and the results data structure (defined by - enterNode() and possibly further modified by - acceptChildResult()), and is expected to return the "result" for - this node. If not specified, the default action is to return - the data object from enterNode(). - - descend, child_result = beforeChild(self, node, child, child_idx): - - beforeChild() is called by a node for every child before - entering the child node. The node, child node, and child index - (position in the args list from enterNode()) are passed as - arguments. beforeChild should return a tuple (descend, - child_result). If descend is False, the child node will not be - entered and the value returned to child_result will be passed to - the node's acceptChildResult callback. Returning None is - equivalent to (True, None). The default behavior if not - specified is equivalent to (True, None). - - data = acceptChildResult(self, node, data, child_result, child_idx): - - acceptChildResult() is called for each child result being - returned to a node. This callback is responsible for recording - the result for later processing or passing up the tree. It is - passed the node, result data structure (see enterNode()), child - result, and the child index (position in args from enterNode()). - The data structure (possibly modified or replaced) must be - returned. If acceptChildResult is not specified, it does - nothing if data is None, otherwise it calls data.append(result). - - afterChild(self, node, child, child_idx): - - afterChild() is called by a node for every child node - immediately after processing the node is complete before control - moves to the next child or up to the parent node. The node, - child node, an child index (position in args from enterNode()) - are passed, and nothing is returned. If afterChild is not - specified, no action takes place. - - finalizeResult(self, result): - - finalizeResult() is called once after the entire expression tree - has been walked. It is passed the result returned by the root - node exitNode() callback. If finalizeResult is not specified, - the walker returns the result obtained from the exitNode - callback on the root node. - - Clients interact with this class by either deriving from it and - implementing the necessary callbacks (see above), assigning callable - functions to an instance of this class, or passing the callback - functions as arguments to this class' constructor. + This visitor walks an expression tree using a depth-first strategy + and generates a full event stream similar to other tree visitors + (e.g., the expat XML parser). The following events are triggered + through callback functions as the traversal enters and leaves nodes + in the tree: + + initializeWalker(expr) -> walk, result + enterNode(N1) -> args, data + {for N2 in args:} + beforeChild(N1, N2) -> descend, child_result + enterNode(N2) -> N2_args, N2_data + [...] + exitNode(N2, n2_data) -> child_result + acceptChildResult(N1, data, child_result) -> data + afterChild(N1, N2) -> None + exitNode(N1, data) -> N1_result + finalizeWalker(result) -> result + + Individual event callbacks match the following signatures: + + walk, result = initializeWalker(self, expr): + + initializeWalker() is called to set the walker up and perform + any preliminary processing on the root node. The method returns + a flag indicating if the tree should be walked and a result. If + `walk` is True, then result is ignored. If `walk` is False, + then `result` is returned as the final result from the walker, + bypassing all other callbacks (including finalizeResult). + + args, data = enterNode(self, node): + + enterNode() is called when the walker first enters a node (from + above), and is passed the node being entered. It is expected to + return a tuple of child `args` (as either a tuple or list) and a + user-specified data structure for collecting results. If None + is returned for args, the node's args attribute is used for + expression types and the empty tuple for leaf nodes. Returning + None is equivalent to returning (None,None). If the callback is + not defined, the default behavior is equivalent to returning + (None, []). + + node_result = exitNode(self, node, data): + + exitNode() is called after the node is completely processed (as + the walker returns up the tree to the parent node). It is + passed the node and the results data structure (defined by + enterNode() and possibly further modified by + acceptChildResult()), and is expected to return the "result" for + this node. If not specified, the default action is to return + the data object from enterNode(). + + descend, child_result = beforeChild(self, node, child, child_idx): + + beforeChild() is called by a node for every child before + entering the child node. The node, child node, and child index + (position in the args list from enterNode()) are passed as + arguments. beforeChild should return a tuple (descend, + child_result). If descend is False, the child node will not be + entered and the value returned to child_result will be passed to + the node's acceptChildResult callback. Returning None is + equivalent to (True, None). The default behavior if not + specified is equivalent to (True, None). + + data = acceptChildResult(self, node, data, child_result, child_idx): + + acceptChildResult() is called for each child result being + returned to a node. This callback is responsible for recording + the result for later processing or passing up the tree. It is + passed the node, result data structure (see enterNode()), child + result, and the child index (position in args from enterNode()). + The data structure (possibly modified or replaced) must be + returned. If acceptChildResult is not specified, it does + nothing if data is None, otherwise it calls data.append(result). + + afterChild(self, node, child, child_idx): + + afterChild() is called by a node for every child node + immediately after processing the node is complete before control + moves to the next child or up to the parent node. The node, + child node, an child index (position in args from enterNode()) + are passed, and nothing is returned. If afterChild is not + specified, no action takes place. + + finalizeResult(self, result): + + finalizeResult() is called once after the entire expression tree + has been walked. It is passed the result returned by the root + node exitNode() callback. If finalizeResult is not specified, + the walker returns the result obtained from the exitNode + callback on the root node. + + Clients interact with this class by either deriving from it and + implementing the necessary callbacks (see above), assigning callable + functions to an instance of this class, or passing the callback + functions as arguments to this class' constructor. """ @@ -210,7 +215,7 @@ def __init__(self, **kwds): raise RuntimeError("Unrecognized keyword arguments: %s" % (kwds,)) # Handle deprecated APIs - _fcns = (('beforeChild',2), ('acceptChildResult',3), ('afterChild',2)) + _fcns = (('beforeChild', 2), ('acceptChildResult', 3), ('afterChild', 2)) for name, nargs in _fcns: fcn = getattr(self, name) if fcn is None: @@ -222,11 +227,15 @@ def __init__(self, **kwds): "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the %s() " "method. Please update your walker callbacks." % (name,), - version='5.7.0') + version='5.7.0', + ) + def wrap(fcn, nargs): def wrapper(*args): return fcn(*args[:nargs]) + return wrapper + setattr(self, name, wrap(fcn, nargs)) self.recursion_stack = None @@ -234,15 +243,18 @@ def wrapper(*args): # Set up the custom recursive node handler function (customized # for the specific set of callbacks that are defined for this # class instance). - recursive_node_handler = '_process_node_' + ''.join(sorted( - '' if getattr(self, f[0]) is None else f[1] - for f in self.client_methods.items())) + recursive_node_handler = '_process_node_' + ''.join( + sorted( + '' if getattr(self, f[0]) is None else f[1] + for f in self.client_methods.items() + ) + ) self._process_node = getattr( - self, recursive_node_handler, self._process_node_general) + self, recursive_node_handler, self._process_node_general + ) def walk_expression(self, expr): - """Walk an expression, calling registered callbacks. - """ + """Walk an expression, calling registered callbacks.""" if self.initializeWalker is not None: walk, root = self.initializeWalker(expr) if not walk: @@ -254,17 +266,22 @@ def walk_expression(self, expr): try: result = self._process_node(root, RECURSION_LIMIT) + _nonrecursive = None except RevertToNonrecursive: ptr = (None,) + self.recursion_stack.pop() while self.recursion_stack: ptr = (ptr,) + self.recursion_stack.pop() self.recursion_stack = None - result = self._nonrecursive_walker_loop(ptr) + _nonrecursive = self._nonrecursive_walker_loop, ptr except RecursionError: logger.warning( 'Unexpected RecursionError walking an expression tree.', - extra={'id': 'W1003'}) - return self.walk_expression_nonrecursive(expr) + extra={'id': 'W1003'}, + ) + _nonrecursive = self.walk_expression_nonrecursive, expr + + if _nonrecursive is not None: + return _nonrecursive[0](_nonrecursive[1]) if self.finalizeResult is not None: return self.finalizeResult(result) @@ -272,8 +289,9 @@ def walk_expression(self, expr): return result def _compute_actual_recursion_limit(self): - recursion_limit \ - = sys.getrecursionlimit() - get_stack_depth() - 2*RECURSION_LIMIT + recursion_limit = ( + sys.getrecursionlimit() - get_stack_depth() - 2 * RECURSION_LIMIT + ) if recursion_limit <= RECURSION_LIMIT: self.recursion_stack = [] raise RevertToNonrecursive() @@ -302,8 +320,7 @@ def _process_node_general(self, node, recursion_limit): args = None data = [] if args is None: - if type(node) in nonpyomo_leaf_types \ - or not node.is_expression_type(): + if type(node) in nonpyomo_leaf_types or not node.is_expression_type(): args = () else: args = node.args @@ -334,8 +351,7 @@ def _process_node_general(self, node, recursion_limit): child_result = self._process_node(child, recursion_limit) if self.acceptChildResult is not None: - data = self.acceptChildResult( - node, data, child_result, child_idx) + data = self.acceptChildResult(node, data, child_result, child_idx) elif data is not None: data.append(child_result) @@ -376,8 +392,7 @@ def _process_node_bex(self, node, recursion_limit): else: args, data = tmp if args is None: - if type(node) in nonpyomo_leaf_types \ - or not node.is_expression_type(): + if type(node) in nonpyomo_leaf_types or not node.is_expression_type(): args = () else: args = node.args @@ -476,17 +491,12 @@ def _recursive_frame_to_nonrecursive_stack(self, local): # started processing it yet, we need to decrement # child_idx so that it is revisited child_idx -= 1 - self.recursion_stack.append(( - local['node'], - _arg_list, - len(_arg_list)-1, - local['data'], - child_idx, - )) + self.recursion_stack.append( + (local['node'], _arg_list, len(_arg_list) - 1, local['data'], child_idx) + ) def walk_expression_nonrecursive(self, expr): - """Walk an expression, calling registered callbacks. - """ + """Walk an expression, calling registered callbacks.""" # # This walker uses a linked list to store the stack (instead of # an array). The nodes of the linked list are 6-member tuples: @@ -518,8 +528,7 @@ def walk_expression_nonrecursive(self, expr): args = None data = [] if args is None: - if type(expr) in nonpyomo_leaf_types \ - or not expr.is_expression_type(): + if type(expr) in nonpyomo_leaf_types or not expr.is_expression_type(): args = () else: args = expr.args @@ -530,7 +539,8 @@ def walk_expression_nonrecursive(self, expr): # the child node, it must be initialized to -1, and ptr[3] must # always be *one less than* the number of arguments return self._nonrecursive_walker_loop( - (None, node, args, len(args)-1, data, -1)) + (None, node, args, len(args) - 1, data, -1) + ) def _nonrecursive_walker_loop(self, ptr): _, node, args, _, data, child_idx = ptr @@ -565,7 +575,8 @@ def _nonrecursive_walker_loop(self, ptr): # we will move along if self.acceptChildResult is not None: data = self.acceptChildResult( - node, data, child_result, child_idx) + node, data, child_result, child_idx + ) elif data is not None: data.append(child_result) # And let the node know that we are done with a @@ -579,7 +590,7 @@ def _nonrecursive_walker_loop(self, ptr): # Update the child argument counter in the stack. # Because we are using tuples, we need to recreate the # "ptr" object (linked list node) - ptr = ptr[:4] + (data, child_idx,) + ptr = ptr[:4] + (data, child_idx) # We are now going to actually enter this node. The # node will tell us the list of its child nodes that we @@ -594,8 +605,10 @@ def _nonrecursive_walker_loop(self, ptr): args = None data = [] if args is None: - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if ( + type(child) in nonpyomo_leaf_types + or not child.is_expression_type() + ): # Leaves (either non-pyomo types or # non-Expressions) have no child arguments, so # are just put on the stack @@ -606,9 +619,9 @@ def _nonrecursive_walker_loop(self, ptr): args.__enter__() node = child child_idx = -1 - ptr = (ptr, node, args, len(args)-1, data, child_idx) + ptr = (ptr, node, args, len(args) - 1, data, child_idx) - else: # child_idx == ptr[3]: + else: # child_idx == ptr[3]: # We are done with this node. Call exitNode to compute # any result if hasattr(ptr[2], '__exit__'): @@ -636,7 +649,8 @@ def _nonrecursive_walker_loop(self, ptr): # We need to alert the node to accept the child's result: if self.acceptChildResult is not None: data = self.acceptChildResult( - node, data, node_result, child_idx) + node, data, node_result, child_idx + ) elif data is not None: data.append(node_result) @@ -647,7 +661,7 @@ def _nonrecursive_walker_loop(self, ptr): finally: while ptr is not None: if hasattr(ptr[2], '__exit__'): - ptr[2].__exit__(None, None, None) + ptr[2].__exit__(None, None, None) ptr = ptr[0] @@ -663,7 +677,7 @@ class SimpleExpressionVisitor(object): are reimplemented. """ - def visit(self, node): #pragma: no cover + def visit(self, node): # pragma: no cover """ Visit a node in an expression tree and perform some operation on it. @@ -679,7 +693,7 @@ def visit(self, node): #pragma: no cover """ pass - def finalize(self): #pragma: no cover + def finalize(self): # pragma: no cover """ Return the "final value" of the search. @@ -713,10 +727,14 @@ def xbfs(self, node): while dq: current = dq.popleft() self.visit(current) - #for c in self.children(current): + # for c in self.children(current): for c in current.args: - #if self.is_leaf(c): - if c.__class__ in nonpyomo_leaf_types or not c.is_expression_type() or c.nargs() == 0: + # if self.is_leaf(c): + if ( + c.__class__ in nonpyomo_leaf_types + or not c.is_expression_type() + or c.nargs() == 0 + ): self.visit(c) else: dq.append(c) @@ -744,7 +762,11 @@ def xbfs_yield_leaves(self, node): # # If we start with a leaf, then yield it and stop iteration # - if node.__class__ in nonpyomo_leaf_types or not node.is_expression_type() or node.nargs() == 0: + if ( + node.__class__ in nonpyomo_leaf_types + or not node.is_expression_type() + or node.nargs() == 0 + ): ans = self.visit(node) if not ans is None: yield ans @@ -755,11 +777,15 @@ def xbfs_yield_leaves(self, node): dq = deque([node]) while dq: current = dq.popleft() - #self.visit(current) - #for c in self.children(current): + # self.visit(current) + # for c in self.children(current): for c in current.args: - #if self.is_leaf(c): - if c.__class__ in nonpyomo_leaf_types or not c.is_expression_type() or c.nargs() == 0: + # if self.is_leaf(c): + if ( + c.__class__ in nonpyomo_leaf_types + or not c.is_expression_type() + or c.nargs() == 0 + ): ans = self.visit(c) if not ans is None: yield ans @@ -778,7 +804,7 @@ class ExpressionValueVisitor(object): are reimplemented. """ - def visit(self, node, values): #pragma: no cover + def visit(self, node, values): # pragma: no cover """ Visit a node in a tree and compute its value using the values of its children. @@ -795,7 +821,7 @@ def visit(self, node, values): #pragma: no cover """ pass - def visiting_potential_leaf(self, node): #pragma: no cover + def visiting_potential_leaf(self, node): # pragma: no cover """ Visit a node and return its value if it is a leaf. @@ -813,7 +839,7 @@ def visiting_potential_leaf(self, node): #pragma: no cover """ raise RuntimeError("The visiting_potential_leaf method needs to be defined.") - def finalize(self, ans): #pragma: no cover + def finalize(self, ans): # pragma: no cover """ This method defines the return value for the search methods in this class. @@ -854,8 +880,8 @@ def dfs_postorder_stack(self, node): flag, value = self.visiting_potential_leaf(node) if flag: return self.finalize(value) - #_stack = [ (node, self.children(node), 0, len(self.children(node)), [])] - _stack = [ (node, node._args_, 0, node.nargs(), [])] + # _stack = [ (node, self.children(node), 0, len(self.children(node)), [])] + _stack = [(node, node._args_, 0, node.nargs(), [])] # # Iterate until the stack is empty # @@ -865,7 +891,7 @@ def dfs_postorder_stack(self, node): # # Get the top of the stack # _obj Current expression object - # _argList The arguments for this expression objet + # _argList The arguments for this expression object # _idx The current argument being considered # _len The number of arguments # _result The return values @@ -879,18 +905,18 @@ def dfs_postorder_stack(self, node): _idx += 1 flag, value = self.visiting_potential_leaf(_sub) if flag: - _result.append( value ) + _result.append(value) else: # # Push an expression onto the stack # - _stack.append( (_obj, _argList, _idx, _len, _result) ) - _obj = _sub - #_argList = self.children(_sub) - _argList = _sub._args_ - _idx = 0 - _len = _sub.nargs() - _result = [] + _stack.append((_obj, _argList, _idx, _len, _result)) + _obj = _sub + # _argList = self.children(_sub) + _argList = _sub._args_ + _idx = 0 + _len = _sub.nargs() + _result = [] # # Process the current node # @@ -899,15 +925,17 @@ def dfs_postorder_stack(self, node): # # "return" the recursion by putting the return value on the end of the results stack # - _stack[-1][-1].append( ans ) + _stack[-1][-1].append(ans) else: return self.finalize(ans) -def replace_expressions(expr, - substitution_map, - descend_into_named_expressions=True, - remove_named_expressions=True): +def replace_expressions( + expr, + substitution_map, + descend_into_named_expressions=True, + remove_named_expressions=True, +): """ Parameters @@ -935,10 +963,12 @@ def replace_expressions(expr, class ExpressionReplacementVisitor(StreamBasedExpressionVisitor): - def __init__(self, - substitute=None, - descend_into_named_expressions=True, - remove_named_expressions=True): + def __init__( + self, + substitute=None, + descend_into_named_expressions=True, + remove_named_expressions=True, + ): if substitute is None: substitute = {} # Note: preserving the attribute names from the previous @@ -954,10 +984,14 @@ def __init__(self, "to derive from StreamBasedExpressionVisitor. " "visiting_potential_leaf() has been replaced by beforeChild()" "(note to implementers: the sense of the bool return value " - "has been inverted).", version='6.2') + "has been inverted).", + version='6.2', + ) + def beforeChild(node, child, child_idx): is_leaf, ans = self.visiting_potential_leaf(child) return not is_leaf, ans + kwds['beforeChild'] = beforeChild if hasattr(self, 'visit'): @@ -965,7 +999,8 @@ def beforeChild(node, child, child_idx): "ExpressionReplacementVisitor: this walker has been ported " "to derive from StreamBasedExpressionVisitor. " "overriding visit() has no effect (and is likely to generate " - "invalid expression trees)") + "invalid expression trees)" + ) super().__init__(**kwds) def initializeWalker(self, expr): @@ -988,12 +1023,18 @@ def beforeChild(self, node, child, child_idx): def enterNode(self, node): args = list(node.args) - return args, [False, args] + # [bool:args_have_changed, list:original_args, bool:node_is_constant] + return args, [False, args, True] def acceptChildResult(self, node, data, child_result, child_idx): if data[1][child_idx] is not child_result: data[1][child_idx] = child_result data[0] = True + if ( + child_result.__class__ not in native_types + and not child_result.is_constant() + ): + data[2] = False return data def exitNode(self, node, data): @@ -1005,30 +1046,65 @@ def exitNode(self, node, data): node.set_value(data[1][0]) return node elif data[0]: - return node.create_node_with_local_data(tuple(data[1])) + if data[2]: + return node._apply_operation(data[1]) + else: + return node.create_node_with_local_data(data[1]) return node @deprecated( "ExpressionReplacementVisitor: this walker has been ported " "to derive from StreamBasedExpressionVisitor. " "dfs_postorder_stack() has been replaced with walk_expression()", - version='6.2') + version='6.2', + ) def dfs_postorder_stack(self, expr): return self.walk_expression(expr) +def evaluate_fixed_subexpressions( + expr, descend_into_named_expressions=True, remove_named_expressions=True +): + return EvaluateFixedSubexpressionVisitor( + descend_into_named_expressions=descend_into_named_expressions, + remove_named_expressions=remove_named_expressions, + ).walk_expression(expr) + + +class EvaluateFixedSubexpressionVisitor(ExpressionReplacementVisitor): + def __init__( + self, descend_into_named_expressions=False, remove_named_expressions=False + ): + super().__init__( + descend_into_named_expressions=descend_into_named_expressions, + remove_named_expressions=remove_named_expressions, + ) + + def beforeChild(self, node, child, child_idx): + if type(child) in native_types: + return False, child + elif not child.is_expression_type(): + if child.is_fixed(): + return False, child() + else: + return False, child + elif child.is_named_expression_type(): + if not self.enter_named_expr: + return False, child + return True, None -#------------------------------------------------------- +# ------------------------------------------------------- # # Functions used to process expression trees # -#------------------------------------------------------- +# ------------------------------------------------------- # ===================================================== # clone_expression # ===================================================== + def clone_expression(expr, substitute=None): """A function that is used to clone an expression. @@ -1048,7 +1124,7 @@ def clone_expression(expr, substitute=None): The cloned expression. """ - clone_counter._count += 1 + common.clone_counter._count += 1 memo = {'__block_scope__': {id(None): False}} if substitute: expr = replace_expressions(expr, substitute) @@ -1059,6 +1135,7 @@ def clone_expression(expr, substitute=None): # sizeof_expression # ===================================================== + def sizeof_expression(expr): """ Return the number of nodes in the expression tree. @@ -1070,26 +1147,29 @@ def sizeof_expression(expr): A non-negative integer that is the number of interior and leaf nodes in the expression tree. """ + def enter(node): return None, 1 + def accept(node, data, child_result, child_idx): return data + child_result + return StreamBasedExpressionVisitor( - enterNode=enter, - acceptChildResult=accept, + enterNode=enter, acceptChildResult=accept ).walk_expression(expr) + # ===================================================== # evaluate_expression # ===================================================== -class _EvaluationVisitor(ExpressionValueVisitor): +class _EvaluationVisitor(ExpressionValueVisitor): def __init__(self, exception): self.exception = exception def visit(self, node, values): - """ Visit nodes that have been expanded """ + """Visit nodes that have been expanded""" return node._apply_operation(values) def visiting_potential_leaf(self, node): @@ -1113,21 +1193,18 @@ def visiting_potential_leaf(self, node): class FixedExpressionError(Exception): - def __init__(self, *args, **kwds): super(FixedExpressionError, self).__init__(*args, **kwds) class NonConstantExpressionError(Exception): - def __init__(self, *args, **kwds): super(NonConstantExpressionError, self).__init__(*args, **kwds) class _EvaluateConstantExpressionVisitor(ExpressionValueVisitor): - def visit(self, node, values): - """ Visit nodes that have been expanded """ + """Visit nodes that have been expanded""" return node._apply_operation(values) def visiting_potential_leaf(self, node): @@ -1203,9 +1280,14 @@ def evaluate_expression(exp, exception=True, constant=False): clear_active = True try: - return visitor.dfs_postorder_stack(exp) - except ( TemplateExpressionError, ValueError, TypeError, - NonConstantExpressionError, FixedExpressionError ): + ans = visitor.dfs_postorder_stack(exp) + except ( + TemplateExpressionError, + ValueError, + TypeError, + NonConstantExpressionError, + FixedExpressionError, + ): # Errors that we want to be able to suppress: # # TemplateExpressionError: raised when generating expression @@ -1221,7 +1303,11 @@ def evaluate_expression(exp, exception=True, constant=False): return None finally: if clear_active: - evaluate_expression.visitor_active = False + evaluate_expression.visitor_active = False + if ans.__class__ not in native_types and ans.is_numeric_type() is True: + return value(ans) + return ans + evaluate_expression.visitor_cache = _EvaluationVisitor(True) evaluate_expression.visitor_active = False @@ -1230,8 +1316,8 @@ def evaluate_expression(exp, exception=True, constant=False): # identify_components # ===================================================== -class _ComponentVisitor(SimpleExpressionVisitor): +class _ComponentVisitor(SimpleExpressionVisitor): def __init__(self, types): self.seen = set() if types.__class__ is set: @@ -1273,8 +1359,8 @@ def identify_components(expr, component_types): # identify_variables # ===================================================== -class _VariableVisitor(SimpleExpressionVisitor): +class _VariableVisitor(SimpleExpressionVisitor): def __init__(self): self.seen = set() @@ -1288,19 +1374,6 @@ def visit(self, node): self.seen.add(id(node)) return node - if node.is_expression_type() and isinstance(node, LinearExpression): - if id(node) in self.seen: - return - self.seen.add(id(node)) - - def unique_vars_generator(): - for var in node.linear_vars: - if id(var) in self.seen: - continue - self.seen.add(id(var)) - yield var - return tuple(v for v in unique_vars_generator()) - def identify_variables(expr, include_fixed=True): """ @@ -1338,8 +1411,8 @@ def identify_variables(expr, include_fixed=True): # identify_mutable_parameters # ===================================================== -class _MutableParamVisitor(SimpleExpressionVisitor): +class _MutableParamVisitor(SimpleExpressionVisitor): def __init__(self): self.seen = set() @@ -1348,8 +1421,7 @@ def visit(self, node): return # TODO: Confirm that this has the right semantics - if (not node.is_variable_type() and node.is_fixed() - and not node.is_constant()): + if not node.is_variable_type() and node.is_fixed() and not node.is_constant(): if id(node) in self.seen: return self.seen.add(id(node)) @@ -1375,10 +1447,10 @@ def identify_mutable_parameters(expr): # polynomial_degree # ===================================================== -class _PolynomialDegreeVisitor(ExpressionValueVisitor): +class _PolynomialDegreeVisitor(ExpressionValueVisitor): def visit(self, node, values): - """ Visit nodes that have been expanded """ + """Visit nodes that have been expanded""" return node._compute_polynomial_degree(values) def visiting_potential_leaf(self, node): @@ -1418,6 +1490,7 @@ def polynomial_degree(node): # _expression_is_fixed # ===================================================== + class _IsFixedVisitor(ExpressionValueVisitor): """ NOTE: This doesn't check if combiner logic is @@ -1426,7 +1499,7 @@ class _IsFixedVisitor(ExpressionValueVisitor): """ def visit(self, node, values): - """ Visit nodes that have been expanded """ + """Visit nodes that have been expanded""" return node._is_fixed(values) def visiting_potential_leaf(self, node): @@ -1448,15 +1521,13 @@ def visiting_potential_leaf(self, node): def _expression_is_fixed(node): - """ - Return the polynomial degree of the expression. + """Return bool indicating if this expression is fixed (non-variable) Args: node: The root node of an expression tree. - Returns: - A non-negative integer that is the polynomial - degree if the expression is polynomial, or :const:`None` otherwise. + Returns: bool + """ visitor = _IsFixedVisitor() return visitor.dfs_postorder_stack(node) @@ -1466,44 +1537,54 @@ def _expression_is_fixed(node): # expression_to_string # ===================================================== +LEFT_TO_RIGHT = common.OperatorAssociativity.LEFT_TO_RIGHT +RIGHT_TO_LEFT = common.OperatorAssociativity.RIGHT_TO_LEFT + + class _ToStringVisitor(ExpressionValueVisitor): + _expression_handlers = None - def __init__(self, verbose, smap, compute_values): + def __init__(self, verbose, smap): super(_ToStringVisitor, self).__init__() self.verbose = verbose self.smap = smap - self.compute_values = compute_values def visit(self, node, values): - """ Visit nodes that have been expanded """ - tmp = [] - for i,val in enumerate(values): + """Visit nodes that have been expanded""" + for i, val in enumerate(values): arg = node._args_[i] if arg is None: - tmp.append('Undefined') # TODO: coverage + values[i] = 'Undefined' elif arg.__class__ in native_numeric_types: - tmp.append(val) + pass elif arg.__class__ in nonpyomo_leaf_types: - tmp.append("'{0}'".format(val)) + values[i] = f"'{val}'" else: parens = False - if not self.verbose and arg.is_expression_type(): - if node._precedence() < arg._precedence(): + if ( + not self.verbose + and arg.is_expression_type() + and node.PRECEDENCE is not None + ): + if arg.PRECEDENCE is None: + pass + elif node.PRECEDENCE < arg.PRECEDENCE: parens = True - elif node._precedence() == arg._precedence(): + elif node.PRECEDENCE == arg.PRECEDENCE: if i == 0: - parens = node._associativity() != 1 - elif i == len(node._args_)-1: - parens = node._associativity() != -1 + parens = node.ASSOCIATIVITY != LEFT_TO_RIGHT + elif i == len(node._args_) - 1: + parens = node.ASSOCIATIVITY != RIGHT_TO_LEFT else: parens = True if parens: - tmp.append("({0})".format(val)) - else: - tmp.append(val) + values[i] = f"({val})" + + if self._expression_handlers and node.__class__ in self._expression_handlers: + return self._expression_handlers[node.__class__](self, node, values) - return node._to_string(tmp, self.verbose, self.smap, self.compute_values) + return node._to_string(values, self.verbose, self.smap) def visiting_potential_leaf(self, node): """ @@ -1512,7 +1593,7 @@ def visiting_potential_leaf(self, node): Return True if the node is not expanded. """ if node is None: - return True, None # TODO: coverage + return True, None if node.__class__ in nonpyomo_leaf_types: return True, str(node) @@ -1521,34 +1602,43 @@ def visiting_potential_leaf(self, node): return False, None if hasattr(node, 'to_string'): - return True, node.to_string( - verbose=self.verbose, - smap=self.smap, - compute_values=self.compute_values - ) + return True, node.to_string(verbose=self.verbose, smap=self.smap) else: return True, str(node) -def expression_to_string(expr, verbose=None, labeler=None, smap=None, compute_values=False): - """ - Return a string representation of an expression. +def expression_to_string( + expr, verbose=None, labeler=None, smap=None, compute_values=False +): + """Return a string representation of an expression. - Args: - expr: The root node of an expression tree. - verbose (bool): If :const:`True`, then the output is - a nested functional form. Otherwise, the output - is an algebraic expression. Default is :const:`False`. - labeler: If specified, this labeler is used to label - variables in the expression. - smap: If specified, this :class:`SymbolMap ` is - used to cache labels. - compute_values (bool): If :const:`True`, then - parameters and fixed variables are evaluated before the - expression string is generated. Default is :const:`False`. + Parameters + ---------- + expr: ExpressionBase + The root node of an expression tree. + + verbose: bool + If :const:`True`, then the output is a nested functional form. + Otherwise, the output is an algebraic expression. Default is + retrieved from :py:attr:`common.TO_STRING_VERBOSE` + + labeler: Callable + If specified, this labeler is used to generate the string + representation for leaves (Var / Param objects) in the + expression. + + smap: SymbolMap + If specified, this :class:`SymbolMap + ` is used to cache labels. + + compute_values: bool + If :const:`True`, then parameters and fixed variables are + evaluated before the expression string is generated. Default is + :const:`False`. Returns: A string representation for the expression. + """ verbose = common.TO_STRING_VERBOSE if verbose is None else verbose # @@ -1559,7 +1649,12 @@ def expression_to_string(expr, verbose=None, labeler=None, smap=None, compute_va smap = SymbolMap() smap.default_labeler = labeler # + # TODO: should we deprecate the compute_values option? + # + if compute_values: + expr = evaluate_fixed_subexpressions(expr) + # # Create and execute the visitor pattern # - visitor = _ToStringVisitor(verbose, smap, compute_values) + visitor = _ToStringVisitor(verbose, smap) return visitor.dfs_postorder_stack(expr) diff --git a/pyomo/core/kernel/__init__.py b/pyomo/core/kernel/__init__.py index ee380e8567b..28a329109fc 100644 --- a/pyomo/core/kernel/__init__.py +++ b/pyomo/core/kernel/__init__.py @@ -9,22 +9,52 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr import numvalue, numeric_expr, boolean_value, logical_expr, current +from pyomo.core.expr import numvalue, numeric_expr, boolean_value, logical_expr from pyomo.core.expr.numvalue import ( - value, is_constant, is_fixed, is_variable_type, - is_potentially_variable, NumericValue, ZeroConstant, - native_numeric_types, native_types, polynomial_degree, + value, + is_constant, + is_fixed, + is_variable_type, + is_potentially_variable, + NumericValue, + ZeroConstant, + native_numeric_types, + native_types, + polynomial_degree, ) from pyomo.core.expr.boolean_value import BooleanValue -from pyomo.core.expr.numeric_expr import linear_expression, nonlinear_expression -from pyomo.core.expr.logical_expr import (land, lor, equivalent, exactly, - atleast, atmost, implies, lnot, - xor, inequality) -from pyomo.core.expr.current import ( - log, log10, sin, cos, tan, cosh, sinh, tanh, - asin, acos, atan, exp, sqrt, asinh, acosh, - atanh, ceil, floor, +from pyomo.core.expr import ( + log, + log10, + sin, + cos, + tan, + cosh, + sinh, + tanh, + asin, + acos, + atan, + exp, + sqrt, + asinh, + acosh, + atanh, + ceil, + floor, Expr_if, + inequality, + linear_expression, + nonlinear_expression, + land, + lor, + equivalent, + exactly, + atleast, + atmost, + implies, + lnot, + xor, ) from pyomo.core.expr.calculus.derivatives import differentiate from pyomo.core.expr.taylor_series import taylor_series_expansion @@ -47,5 +77,6 @@ # TODO: These are included for backwards compatibility. Accessing them # will result in a deprecation warning from pyomo.common.dependencies import attempt_import + component_map = attempt_import('pyomo.core.kernel.component_map')[0] component_set = attempt_import('pyomo.core.kernel.component_set')[0] diff --git a/pyomo/core/kernel/base.py b/pyomo/core/kernel/base.py index 4a27bd4c5e9..2c0af56bc10 100644 --- a/pyomo/core/kernel/base.py +++ b/pyomo/core/kernel/base.py @@ -11,31 +11,36 @@ import copy import weakref +from pyomo.common.autoslots import AutoSlots + def _not_implemented(*args, **kwds): - raise NotImplementedError("This property is abstract") #pragma:nocover + raise NotImplementedError("This property is abstract") # pragma:nocover + def _abstract_readwrite_property(**kwds): - p = property(fget=_not_implemented, - fset=_not_implemented, - **kwds) + p = property(fget=_not_implemented, fset=_not_implemented, **kwds) return p + def _abstract_readonly_property(**kwds): - p = property(fget=_not_implemented, - **kwds) + p = property(fget=_not_implemented, **kwds) return p + class _no_ctype(object): """The default argument for methods that accept a ctype.""" + pass + # This will be populated outside of core.kernel. It will map # AML classes (which are the ctypes used by all of the # solver interfaces) to Kernel classes _convert_ctype = {} _kernel_ctype_backmap = {} + def _convert_descend_into(value): """Converts the descend_into keyword to a function""" if hasattr(value, "__call__"): @@ -44,10 +49,13 @@ def _convert_descend_into(value): return _convert_descend_into._true else: return _convert_descend_into._false + + _convert_descend_into._true = lambda x: True _convert_descend_into._false = lambda x: False -class ICategorizedObject(object): + +class ICategorizedObject(AutoSlots.Mixin): """ Interface for objects that maintain a weak reference to a parent storage object and have a category type. @@ -67,7 +75,9 @@ class ICategorizedObject(object): _active (bool): Stores the active status of this object. """ + __slots__ = () + __autoslot_mappers__ = {'_parent': AutoSlots.weakref_mapper} # These flags can be used by implementations # to avoid isinstance calls. @@ -106,11 +116,10 @@ def storage_key(self): def active(self): """The active status of this object.""" return self._active + @active.setter def active(self, value): - raise AttributeError( - "Assignment not allowed. Use the " - "(de)activate method") + raise AttributeError("Assignment not allowed. Use the (de)activate method") ### The following group of methods use object.__setattr__ ### to update the _parent, _storage_key, and _active flags. @@ -132,13 +141,16 @@ def activate(self): def deactivate(self): """Deactivate this object.""" object.__setattr__(self, "_active", False) + ### - def getname(self, - fully_qualified=False, - name_buffer={}, # HACK: ignored (required to work with some solver interfaces, but that code should change soon) - convert=str, - relative_to=None): + def getname( + self, + fully_qualified=False, + name_buffer={}, # HACK: ignored (required to work with some solver interfaces, but that code should change soon) + convert=str, + relative_to=None, + ): """ Dynamically generates a name for this object. @@ -160,8 +172,7 @@ def getname(self, context of its parent; otherwise (if no parent exists), this method returns :const:`None`. """ - assert fully_qualified or \ - (relative_to is None) + assert fully_qualified or (relative_to is None) parent = self.parent if parent is None: return None @@ -169,14 +180,11 @@ def getname(self, key = self.storage_key name = parent._child_storage_entry_string % convert(key) if fully_qualified: - parent_name = parent.getname(fully_qualified=True, - relative_to=relative_to) - if (parent_name is not None) and \ - ((relative_to is None) or \ - (parent is not relative_to)): - return (parent_name + - parent._child_storage_delimiter_string + - name) + parent_name = parent.getname(fully_qualified=True, relative_to=relative_to) + if (parent_name is not None) and ( + (relative_to is None) or (parent is not relative_to) + ): + return parent_name + parent._child_storage_delimiter_string + name else: return name else: @@ -203,7 +211,7 @@ def __str__(self): name is returned.""" name = self.name if name is None: - return "<"+self.__class__.__name__+">" + return "<" + self.__class__.__name__ + ">" else: return name @@ -220,9 +228,9 @@ def clone(self): save_parent = self._parent object.__setattr__(self, "_parent", None) try: - new_block = copy.deepcopy(self, - {'__categorized_object_scope__': - {id(self): True, id(None): False}}) + new_block = copy.deepcopy( + self, {'__block_scope__': {id(self): True, id(None): False}} + ) finally: object.__setattr__(self, "_parent", save_parent) return new_block @@ -235,68 +243,39 @@ def clone(self): # def __deepcopy__(self, memo): - if '__categorized_object_scope__' in memo and \ - id(self) not in memo['__categorized_object_scope__']: - _known = memo['__categorized_object_scope__'] - _new = [] + if '__block_scope__' in memo: + _known = memo['__block_scope__'] + _new = None tmp = self.parent - tmpId = id(tmp) - while tmpId not in _known: - _new.append(tmpId) + _in_scope = tmp is None + while id(tmp) not in _known: + _new = (_new, id(tmp)) tmp = tmp.parent - tmpId = id(tmp) + _in_scope |= _known[id(tmp)] - for _id in _new: - _known[_id] = _known[tmpId] + while _new is not None: + _new, _id = _new + _known[_id] = _in_scope - if not _known[tmpId]: + if not _in_scope and id(self) not in _known: # component is out-of-scope. shallow copy only - ans = memo[id(self)] = self - return ans + memo[id(self)] = self + return self + + if id(self) in memo: + return memo[id(self)] ans = memo[id(self)] = self.__class__.__new__(self.__class__) ans.__setstate__(copy.deepcopy(self.__getstate__(), memo)) return ans - # - # The following two methods allow implementations to be - # pickled. These should work whether or not the - # implementation makes use of __slots__, and whether or - # not non-empty __slots__ declarations appear on - # multiple classes in the inheritance chain. - # - - def __getstate__(self): - state = getattr(self, "__dict__", {}).copy() - # Get all slots in the inheritance chain - for cls in self.__class__.__mro__: - for key in cls.__dict__.get("__slots__",()): - state[key] = getattr(self, key) - # make sure we don't store the __dict__ in - # duplicate (it can be declared as a slot) - state.pop('__dict__', None) - # make sure not to pickle the __weakref__ - # slot if it was declared - state.pop('__weakref__', None) - # make sure to dereference the parent weakref - state['_parent'] = self.parent - return state - - def __setstate__(self, state): - for key, value in state.items(): - # bypass a possibly overridden __setattr__ - object.__setattr__(self, key, value) - # make sure _parent is a weakref - # if it is not None - if self._parent is not None: - self._update_parent_and_storage_key(self._parent, - self._storage_key) class ICategorizedObjectContainer(ICategorizedObject): """ Interface for categorized containers of categorized objects. """ + _is_container = True _child_storage_delimiter_string = None _child_storage_entry_string = None @@ -329,13 +308,13 @@ def deactivate(self, shallow=True): def child(self, *args, **kwds): """Returns a child of this container given a storage key.""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def children(self, *args, **kwds): """A generator over the children of this container.""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def components(self, *args, **kwds): """A generator over the set of components stored under this container.""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover diff --git a/pyomo/core/kernel/block.py b/pyomo/core/kernel/block.py index 48d5206fefc..fd779578fc4 100644 --- a/pyomo/core/kernel/block.py +++ b/pyomo/core/kernel/block.py @@ -14,20 +14,18 @@ from pyomo.core.staleflag import StaleFlagManager from pyomo.core.expr.symbol_map import SymbolMap -from pyomo.core.kernel.base import \ - (_no_ctype, - _convert_ctype) -from pyomo.core.kernel.heterogeneous_container import \ - IHeterogeneousContainer -from pyomo.core.kernel.container_utils import \ - define_simple_containers +from pyomo.core.kernel.base import _no_ctype, _convert_ctype +from pyomo.core.kernel.heterogeneous_container import IHeterogeneousContainer +from pyomo.core.kernel.container_utils import define_simple_containers logger = logging.getLogger('pyomo.core') + class IBlock(IHeterogeneousContainer): """A generalized container that can store objects of any category type as attributes. """ + __slots__ = () _child_storage_delimiter_string = "." _child_storage_entry_string = "%s" @@ -36,7 +34,7 @@ class IBlock(IHeterogeneousContainer): # Define the IHeterogeneousContainer abstract methods # - #def child_ctypes(self, *args, **kwds): + # def child_ctypes(self, *args, **kwds): # ... not defined here # @@ -56,9 +54,10 @@ def child(self, key): except AttributeError: raise KeyError(str(key)) - #def children(self, *args, **kwds): + # def children(self, *args, **kwds): # ... not defined here + class block(IBlock): """A generalized container for defining hierarchical models by adding modeling components as attributes. @@ -95,8 +94,7 @@ def __init__(self): def _activate_large_storage_mode(self): if self.__byctype.__class__ is not dict: - self_byctype = \ - self.__dict__['_block__byctype'] = dict() + self_byctype = self.__dict__['_block__byctype'] = dict() for key, obj in self.__order.items(): ctype = obj.ctype if ctype not in self_byctype: @@ -180,8 +178,9 @@ def children(self, ctype=_no_ctype): def __setattr__(self, name, obj): if name in self._block_reserved_words: - raise ValueError("Attempting to modify a reserved " - "block attribute: %s" % (name,)) + raise ValueError( + "Attempting to modify a reserved block attribute: %s" % (name,) + ) needs_del = False same_obj = False self_order = self.__order @@ -198,9 +197,12 @@ def __setattr__(self, name, obj): "To avoid this warning, delete the original " "object from the block before assigning a new " "object." - % (name, - getattr(self, name).__class__.__name__, - obj.__class__.__name__)) + % ( + name, + getattr(self, name).__class__.__name__, + obj.__class__.__name__, + ) + ) else: same_obj = True assert obj.parent is self @@ -242,17 +244,16 @@ def __setattr__(self, name, obj): # if we have exceeded the threshold self._activate_large_storage_mode() else: - self.__dict__['_block__byctype'] = \ - hash(self_byctype) | hash(ctype) + self.__dict__['_block__byctype'] = hash( + self_byctype + ) | hash(ctype) else: raise ValueError( "Invalid assignment to %s type with name '%s' " "at entry %s. A parent container has already " "been assigned to the object being inserted: %s" - % (self.__class__.__name__, - self.name, - name, - obj.parent.name)) + % (self.__class__.__name__, self.name, name, obj.parent.name) + ) super(block, self).__setattr__(name, obj) def __delattr__(self, name): @@ -270,12 +271,14 @@ def __delattr__(self, name): del self_byctype[ctype] super(block, self).__delattr__(name) - def write(self, - filename, - format=None, - _solver_capability=None, - _called_by_solver=False, - **kwds): + def write( + self, + filename, + format=None, + _solver_capability=None, + _called_by_solver=False, + **kwds + ): """ Write the model to a file, with a given format. @@ -291,6 +294,7 @@ def write(self, a :class:`SymbolMap` """ import pyomo.opt + # # Guess the format if none is specified # @@ -301,15 +305,12 @@ def write(self, if problem_writer is None: raise ValueError( "Cannot write model in format '%s': no model " - "writer registered for that format" - % str(format)) + "writer registered for that format" % str(format) + ) if _solver_capability is None: _solver_capability = lambda x: True - (filename_, smap) = problem_writer(self, - filename, - _solver_capability, - kwds) + (filename_, smap) = problem_writer(self, filename, _solver_capability, kwds) assert filename_ == filename if _called_by_solver: @@ -322,10 +323,12 @@ def write(self, else: return smap - def load_solution(self, - solution, - allow_consistent_values_for_fixed_vars=False, - comparison_tolerance_for_fixed_vars=1e-5): + def load_solution( + self, + solution, + allow_consistent_values_for_fixed_vars=False, + comparison_tolerance_for_fixed_vars=1e-5, + ): """ Load a solution. @@ -344,19 +347,16 @@ def load_solution(self, value in the solution is consistent with the value of a fixed variable. """ - from pyomo.core.kernel.suffix import \ - import_suffix_generator + from pyomo.core.kernel.suffix import import_suffix_generator symbol_map = solution.symbol_map - default_variable_value = getattr(solution, - "default_variable_value", - None) + default_variable_value = getattr(solution, "default_variable_value", None) # Generate the list of active import suffixes on # this top level model - valid_import_suffixes = \ - {obj.storage_key:obj - for obj in import_suffix_generator(self)} + valid_import_suffixes = { + obj.storage_key: obj for obj in import_suffix_generator(self) + } # To ensure that import suffix data gets properly # overwritten (e.g., the case where nonzero dual @@ -380,16 +380,16 @@ def load_solution(self, # values and which ones don't. # StaleFlagManager.mark_all_as_stale() - # + # # Load variable data # from pyomo.core.kernel.variable import IVariable - var_skip_attrs = ['id','canonical_label'] + + var_skip_attrs = ['id', 'canonical_label'] seen_var_ids = set() for label, entry in solution.variable.items(): var = symbol_map.getObject(label) - if (var is None) or \ - (var is SymbolMap.UnknownSymbol): + if (var is None) or (var is SymbolMap.UnknownSymbol): # NOTE: the following is a hack, to handle # the ONE_VAR_CONSTANT variable that is # necessary for the objective @@ -400,33 +400,43 @@ def load_solution(self, if "ONE_VAR_CONST" in label: continue else: - raise KeyError("Variable associated with symbol '%s' " - "is not found on this block" - % (label)) + raise KeyError( + "Variable associated with symbol '%s' " + "is not found on this block" % (label) + ) seen_var_ids.add(id(var)) - if (not allow_consistent_values_for_fixed_vars) and \ - var.fixed: - raise ValueError("Variable '%s' is currently fixed. " - "A new value is not expected " - "in solution" % (var.name)) + if (not allow_consistent_values_for_fixed_vars) and var.fixed: + raise ValueError( + "Variable '%s' is currently fixed. " + "A new value is not expected " + "in solution" % (var.name) + ) for _attr_key, attr_value in entry.items(): attr_key = _attr_key[0].lower() + _attr_key[1:] if attr_key == 'value': - if allow_consistent_values_for_fixed_vars and \ - var.fixed and \ - (math.fabs(attr_value - var.value) > \ - comparison_tolerance_for_fixed_vars): + if ( + allow_consistent_values_for_fixed_vars + and var.fixed + and ( + math.fabs(attr_value - var.value) + > comparison_tolerance_for_fixed_vars + ) + ): raise ValueError( "Variable %s is currently fixed. " "A value of '%s' in solution is " "not within tolerance=%s of the current " "value of '%s'" - % (var.name, attr_value, - comparison_tolerance_for_fixed_vars, - var.value)) + % ( + var.name, + attr_value, + comparison_tolerance_for_fixed_vars, + var.value, + ) + ) var.set_value(attr_value, skip_validation=True) elif attr_key in valid_import_suffixes: valid_import_suffixes[attr_key][var] = attr_value @@ -440,22 +450,21 @@ def load_solution(self, # Load objective solution (should simply be suffixes if # they exist) # - objective_skip_attrs = ['id','canonical_label','value'] - for label,entry in solution.objective.items(): + objective_skip_attrs = ['id', 'canonical_label', 'value'] + for label, entry in solution.objective.items(): obj = symbol_map.getObject(label) - if (obj is None) or \ - (obj is SymbolMap.UnknownSymbol): - raise KeyError("Objective associated with symbol '%s' " - "is not found on this block" - % (label)) + if (obj is None) or (obj is SymbolMap.UnknownSymbol): + raise KeyError( + "Objective associated with symbol '%s' " + "is not found on this block" % (label) + ) # Because of __default_objective__, an objective might # appear twice in the objective dictionary. unseen_var_ids.discard(id(obj)) for _attr_key, attr_value in entry.items(): attr_key = _attr_key[0].lower() + _attr_key[1:] if attr_key in valid_import_suffixes: - valid_import_suffixes[attr_key][obj] = \ - attr_value + valid_import_suffixes[attr_key][obj] = attr_value # # Load constraint solution @@ -470,16 +479,15 @@ def load_solution(self, if "ONE_VAR_CONST" in label: continue else: - raise KeyError("Constraint associated with symbol '%s' " - "is not found on this block" - % (label)) + raise KeyError( + "Constraint associated with symbol '%s' " + "is not found on this block" % (label) + ) unseen_var_ids.discard(id(con)) for _attr_key, attr_value in entry.items(): attr_key = _attr_key[0].lower() + _attr_key[1:] if attr_key in valid_import_suffixes: - valid_import_suffixes[attr_key][con] = \ - attr_value - + valid_import_suffixes[attr_key][con] = attr_value # # Load sparse variable solution @@ -489,24 +497,33 @@ def load_solution(self, var = symbol_map.getObject(symbol_map.byObject[var_id]) if var.ctype is not IVariable: continue - if (not allow_consistent_values_for_fixed_vars) and \ - var.fixed: - raise ValueError("Variable '%s' is currently fixed. " - "A new value is not expected " - "in solution" % (var.name)) - - if allow_consistent_values_for_fixed_vars and \ - var.fixed and \ - (math.fabs(default_variable_value - var.value) > \ - comparison_tolerance_for_fixed_vars): + if (not allow_consistent_values_for_fixed_vars) and var.fixed: + raise ValueError( + "Variable '%s' is currently fixed. " + "A new value is not expected " + "in solution" % (var.name) + ) + + if ( + allow_consistent_values_for_fixed_vars + and var.fixed + and ( + math.fabs(default_variable_value - var.value) + > comparison_tolerance_for_fixed_vars + ) + ): raise ValueError( "Variable %s is currently fixed. " "A value of '%s' in solution is " "not within tolerance=%s of the current " "value of '%s'" - % (var.name, default_variable_value, - comparison_tolerance_for_fixed_vars, - var.value)) + % ( + var.name, + default_variable_value, + comparison_tolerance_for_fixed_vars, + var.value, + ) + ) var.set_value(default_variable_value, skip_validation=True) # Set the state flag to "delayed advance": it will auto-advance @@ -514,11 +531,10 @@ def load_solution(self, # variables to be marked as stale). StaleFlagManager.mark_all_as_stale(delayed=True) + # inserts class definitions for simple _tuple, _list, and # _dict containers into this module -define_simple_containers(globals(), - "block", - IBlock) +define_simple_containers(globals(), "block", IBlock) # populate the initial set of reserved block attributes so # that users can not overwrite them when building a model diff --git a/pyomo/core/kernel/component_map.py b/pyomo/core/kernel/component_map.py index d97bd36b83b..501854ad972 100644 --- a/pyomo/core/kernel/component_map.py +++ b/pyomo/core/kernel/component_map.py @@ -11,7 +11,9 @@ from pyomo.common.collections import ComponentMap from pyomo.common.deprecation import deprecation_warning + deprecation_warning( 'The pyomo.core.kernel.component_map module is deprecated. ' 'Import ComponentMap from pyomo.common.collections.', - version='5.7.1') + version='5.7.1', +) diff --git a/pyomo/core/kernel/component_set.py b/pyomo/core/kernel/component_set.py index b65945594ed..b0eb3507347 100644 --- a/pyomo/core/kernel/component_set.py +++ b/pyomo/core/kernel/component_set.py @@ -11,7 +11,9 @@ from pyomo.common.collections import ComponentSet from pyomo.common.deprecation import deprecation_warning + deprecation_warning( 'The pyomo.core.kernel.component_set module is deprecated. ' 'Import ComponentSet from pyomo.common.collections.', - version='5.7.1') + version='5.7.1', +) diff --git a/pyomo/core/kernel/conic.py b/pyomo/core/kernel/conic.py index 82d3cde0f6e..730c072d1b7 100644 --- a/pyomo/core/kernel/conic.py +++ b/pyomo/core/kernel/conic.py @@ -11,16 +11,16 @@ """Various conic constraint implementations.""" from pyomo.core.expr.numvalue import is_numeric_data -from pyomo.core.expr.current import (value, - exp) +from pyomo.core.expr import value, exp from pyomo.core.kernel.block import block -from pyomo.core.kernel.variable import (IVariable, - variable, - variable_tuple) -from pyomo.core.kernel.constraint import (IConstraint, - linear_constraint, - constraint, - constraint_tuple) +from pyomo.core.kernel.variable import IVariable, variable, variable_tuple +from pyomo.core.kernel.constraint import ( + IConstraint, + linear_constraint, + constraint, + constraint_tuple, +) + def _build_linking_constraints(v, v_aux): assert len(v) == len(v_aux) @@ -31,24 +31,22 @@ def _build_linking_constraints(v, v_aux): continue elif is_numeric_data(vi): c_aux.append( - linear_constraint(variables=(vi_aux,), - coefficients=(1,), - rhs=vi)) + linear_constraint(variables=(vi_aux,), coefficients=(1,), rhs=vi) + ) elif isinstance(vi, IVariable): c_aux.append( - linear_constraint(variables=(vi_aux, vi), - coefficients=(1, -1), - rhs=0)) + linear_constraint(variables=(vi_aux, vi), coefficients=(1, -1), rhs=0) + ) else: - c_aux.append( - constraint(body=vi_aux - vi, - rhs=0)) + c_aux.append(constraint(body=vi_aux - vi, rhs=0)) return constraint_tuple(c_aux) + class _ConicBase(IConstraint): """Base class for a few conic constraints that implements some shared functionality. Derived classes are expected to declare any necessary slots.""" + _ctype = IConstraint _linear_canonical_form = False __slots__ = () @@ -65,25 +63,25 @@ def __init__(self): @classmethod def as_domain(cls, *args, **kwds): """Builds a conic domain""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def _body_function(self, *args): """A function that defines the body expression""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def _body_function_variables(self, values=False): """Returns variables in the order they should be passed to the body function. If values is True, then return the current value of each variable in place of the variables themselves.""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def check_convexity_conditions(self, relax=False): """Returns True if all convexity conditions for the conic constraint are satisfied. If relax is True, then variable domains are ignored and it is assumed that all variables are continuous.""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover # # Define the IConstraint abstract methods @@ -94,7 +92,8 @@ def body(self): """The body of the constraint""" if self._body is None: self._body = self._body_function( - *self._body_function_variables(values=False)) + *self._body_function_variables(values=False) + ) return self._body @property @@ -122,7 +121,8 @@ def rhs(self): """The right-hand side of the constraint""" raise ValueError( "The rhs property can not be read because this " - "is not an equality constraint") + "is not an equality constraint" + ) @property def equality(self): @@ -138,14 +138,15 @@ def __call__(self, exception=True): # we wrap the result with value(...) as the # alpha term used by some of the constraints # may be a parameter - return value(self._body_function( - *self._body_function_variables(values=True))) + return value( + self._body_function(*self._body_function_variables(values=True)) + ) except (ValueError, TypeError): if exception: - raise ValueError("one or more terms " - "could not be evaluated") + raise ValueError("one or more terms could not be evaluated") return None + class quadratic(_ConicBase): """A quadratic conic constraint of the form: @@ -160,20 +161,23 @@ class quadratic(_ConicBase): x : list[:class:`variable`] An iterable of variables. """ - __slots__ = ("_parent", - "_storage_key", - "_active", - "_body", - "_r", - "_x", - "__weakref__") + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r", + "_x", + "__weakref__", + ) + def __init__(self, r, x): super(quadratic, self).__init__() self._r = r self._x = tuple(x) assert isinstance(self._r, IVariable) - assert all(isinstance(xi, IVariable) - for xi in self._x) + assert all(isinstance(xi, IVariable) for xi in self._x) @classmethod def as_domain(cls, r, x): @@ -192,10 +196,8 @@ def as_domain(cls, r, x): """ b = block() b.r = variable(lb=0) - b.x = variable_tuple( - [variable() for i in range(len(x))]) - b.c = _build_linking_constraints([r] + list(x), - [b.r] + list(b.x)) + b.x = variable_tuple([variable() for i in range(len(x))]) + b.c = _build_linking_constraints([r] + list(x), [b.r] + list(b.x)) b.q = cls(r=b.r, x=b.x) return b @@ -230,10 +232,11 @@ def check_convexity_conditions(self, relax=False): conic constraint are satisfied. If relax is True, then variable domains are ignored and it is assumed that all variables are continuous.""" - return (relax or \ - (self.r.is_continuous() and \ - all(xi.is_continuous() for xi in self.x))) and \ - (self.r.has_lb() and value(self.r.lb) >= 0) + return ( + relax + or (self.r.is_continuous() and all(xi.is_continuous() for xi in self.x)) + ) and (self.r.has_lb() and value(self.r.lb) >= 0) + class rotated_quadratic(_ConicBase): """A rotated quadratic conic constraint of the form: @@ -251,14 +254,17 @@ class rotated_quadratic(_ConicBase): x : list[:class:`variable`] An iterable of variables. """ - __slots__ = ("_parent", - "_storage_key", - "_active", - "_body", - "_r1", - "_r2", - "_x", - "__weakref__") + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r1", + "_r2", + "_x", + "__weakref__", + ) def __init__(self, r1, r2, x): super(rotated_quadratic, self).__init__() @@ -267,8 +273,7 @@ def __init__(self, r1, r2, x): self._x = tuple(x) assert isinstance(self._r1, IVariable) assert isinstance(self._r2, IVariable) - assert all(isinstance(xi, IVariable) - for xi in self._x) + assert all(isinstance(xi, IVariable) for xi in self._x) @classmethod def as_domain(cls, r1, r2, x): @@ -289,10 +294,8 @@ def as_domain(cls, r1, r2, x): b = block() b.r1 = variable(lb=0) b.r2 = variable(lb=0) - b.x = variable_tuple( - [variable() for i in range(len(x))]) - b.c = _build_linking_constraints([r1,r2] + list(x), - [b.r1,b.r2] + list(b.x)) + b.x = variable_tuple([variable() for i in range(len(x))]) + b.c = _build_linking_constraints([r1, r2] + list(x), [b.r1, b.r2] + list(b.x)) b.q = cls(r1=b.r1, r2=b.r2, x=b.x) return b @@ -314,7 +317,7 @@ def x(self): def _body_function(self, r1, r2, x): """A function that defines the body expression""" - return sum(xi**2 for xi in x) - 2*r1*r2 + return sum(xi**2 for xi in x) - 2 * r1 * r2 def _body_function_variables(self, values=False): """Returns variables in the order they should be @@ -324,20 +327,26 @@ def _body_function_variables(self, values=False): if not values: return self.r1, self.r2, self.x else: - return self.r1.value, self.r2.value, \ - tuple(xi.value for xi in self.x) + return self.r1.value, self.r2.value, tuple(xi.value for xi in self.x) def check_convexity_conditions(self, relax=False): """Returns True if all convexity conditions for the conic constraint are satisfied. If relax is True, then variable domains are ignored and it is assumed that all variables are continuous.""" - return (relax or \ - (self.r1.is_continuous() and \ - self.r2.is_continuous() and \ - all(xi.is_continuous() for xi in self.x))) and \ - (self.r1.has_lb() and value(self.r1.lb) >= 0) and \ - (self.r2.has_lb() and value(self.r2.lb) >= 0) + return ( + ( + relax + or ( + self.r1.is_continuous() + and self.r2.is_continuous() + and all(xi.is_continuous() for xi in self.x) + ) + ) + and (self.r1.has_lb() and value(self.r1.lb) >= 0) + and (self.r2.has_lb() and value(self.r2.lb) >= 0) + ) + class primal_exponential(_ConicBase): """A primal exponential conic constraint of the form: @@ -355,14 +364,17 @@ class primal_exponential(_ConicBase): x2 : :class:`variable` A variable. """ - __slots__ = ("_parent", - "_storage_key", - "_active", - "_body", - "_r", - "_x1", - "_x2", - "__weakref__") + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r", + "_x1", + "_x2", + "__weakref__", + ) def __init__(self, r, x1, x2): super(primal_exponential, self).__init__() @@ -393,8 +405,7 @@ def as_domain(cls, r, x1, x2): b.r = variable(lb=0) b.x1 = variable(lb=0) b.x2 = variable() - b.c = _build_linking_constraints([r,x1,x2], - [b.r,b.x1,b.x2]) + b.c = _build_linking_constraints([r, x1, x2], [b.r, b.x1, b.x2]) b.q = cls(r=b.r, x1=b.x1, x2=b.x2) return b @@ -416,7 +427,7 @@ def x2(self): def _body_function(self, r, x1, x2): """A function that defines the body expression""" - return x1*exp(x2/x1) - r + return x1 * exp(x2 / x1) - r def _body_function_variables(self, values=False): """Returns variables in the order they should be @@ -433,12 +444,19 @@ def check_convexity_conditions(self, relax=False): conic constraint are satisfied. If relax is True, then variable domains are ignored and it is assumed that all variables are continuous.""" - return (relax or \ - (self.x1.is_continuous() and \ - self.x2.is_continuous() and \ - self.r.is_continuous())) and \ - (self.x1.has_lb() and value(self.x1.lb) >= 0) and \ - (self.r.has_lb() and value(self.r.lb) >= 0) + return ( + ( + relax + or ( + self.x1.is_continuous() + and self.x2.is_continuous() + and self.r.is_continuous() + ) + ) + and (self.x1.has_lb() and value(self.x1.lb) >= 0) + and (self.r.has_lb() and value(self.r.lb) >= 0) + ) + class primal_power(_ConicBase): """A primal power conic constraint of the form: @@ -458,15 +476,18 @@ class primal_power(_ConicBase): alpha : float, :class:`parameter`, etc. A constant term. """ - __slots__ = ("_parent", - "_storage_key", - "_active", - "_body", - "_r1", - "_r2", - "_x", - "_alpha", - "__weakref__") + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r1", + "_r2", + "_x", + "_alpha", + "__weakref__", + ) def __init__(self, r1, r2, x, alpha): super(primal_power, self).__init__() @@ -476,13 +497,13 @@ def __init__(self, r1, r2, x, alpha): self._alpha = alpha assert isinstance(self._r1, IVariable) assert isinstance(self._r2, IVariable) - assert all(isinstance(xi, IVariable) - for xi in self._x) + assert all(isinstance(xi, IVariable) for xi in self._x) if not is_numeric_data(self._alpha): raise TypeError( "The type of the alpha parameter of a conic " "constraint is restricted numeric data or " - "objects that store numeric data.") + "objects that store numeric data." + ) @classmethod def as_domain(cls, r1, r2, x, alpha): @@ -503,10 +524,8 @@ def as_domain(cls, r1, r2, x, alpha): b = block() b.r1 = variable(lb=0) b.r2 = variable(lb=0) - b.x = variable_tuple( - [variable() for i in range(len(x))]) - b.c = _build_linking_constraints([r1,r2] + list(x), - [b.r1,b.r2] + list(b.x)) + b.x = variable_tuple([variable() for i in range(len(x))]) + b.c = _build_linking_constraints([r1, r2] + list(x), [b.r1, b.r2] + list(b.x)) b.q = cls(r1=b.r1, r2=b.r2, x=b.x, alpha=alpha) return b @@ -533,9 +552,7 @@ def alpha(self): def _body_function(self, r1, r2, x): """A function that defines the body expression""" alpha = self.alpha - return (sum(xi**2 for xi in x)**0.5) - \ - (r1**alpha) * \ - (r2**(1-alpha)) + return (sum(xi**2 for xi in x) ** 0.5) - (r1**alpha) * (r2 ** (1 - alpha)) def _body_function_variables(self, values=False): """Returns variables in the order they should be @@ -545,8 +562,7 @@ def _body_function_variables(self, values=False): if not values: return self.r1, self.r2, self.x else: - return self.r1.value, self.r2.value, \ - tuple(xi.value for xi in self.x) + return self.r1.value, self.r2.value, tuple(xi.value for xi in self.x) def check_convexity_conditions(self, relax=False): """Returns True if all convexity conditions for the @@ -554,13 +570,80 @@ def check_convexity_conditions(self, relax=False): then variable domains are ignored and it is assumed that all variables are continuous.""" alpha = value(self.alpha, exception=False) - return (relax or \ - (self.r1.is_continuous() and \ - self.r2.is_continuous() and \ - all(xi.is_continuous() for xi in self.x))) and \ - (self.r1.has_lb() and value(self.r1.lb) >= 0) and \ - (self.r2.has_lb() and value(self.r2.lb) >= 0) and \ - ((alpha is not None) and (0 < alpha < 1)) + return ( + ( + relax + or ( + self.r1.is_continuous() + and self.r2.is_continuous() + and all(xi.is_continuous() for xi in self.x) + ) + ) + and (self.r1.has_lb() and value(self.r1.lb) >= 0) + and (self.r2.has_lb() and value(self.r2.lb) >= 0) + and ((alpha is not None) and (0 < alpha < 1)) + ) + + +class primal_geomean(_ConicBase): + """A primal geometric mean conic constraint of the form: + (r[0]*...*r[n-2])^(1/(n-1)) >= |x[n-1]| + + Parameters + ---------- + r : :class:`variable` + An iterable of variables. + x : :class:`variable` + A scalar variable. + + """ + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r", + "_x", + "__weakref__", + ) + + def __init__(self, r, x): + super(primal_geomean, self).__init__() + self._r = tuple(r) + self._x = x + assert isinstance(self._x, IVariable) + assert all(isinstance(ri, IVariable) for ri in self._r) + + @classmethod + def as_domain(cls, r, x): + """Builds a conic domain. Input arguments take the + same form as those of the conic constraint, but in + place of each variable, one can optionally supply a + constant, linear expression, or None. + + Returns + ------- + block + A block object with the core conic constraint + (block.q) expressed using auxiliary variables + (block.r, block.x) linked to the input arguments + through auxiliary constraints (block.c).""" + b = block() + b.r = variable_tuple([variable(lb=0) for i in range(len(r))]) + b.x = variable() + b.c = _build_linking_constraints(list(r) + [x], list(b.r) + [x]) + b.q = cls(r=b.r, x=b.x) + return b + + @property + def r(self): + return self._r + + @property + def x(self): + return self._x + class dual_exponential(_ConicBase): """A dual exponential conic constraint of the form: @@ -578,14 +661,17 @@ class dual_exponential(_ConicBase): x2 : :class:`variable` A variable. """ - __slots__ = ("_parent", - "_storage_key", - "_active", - "_body", - "_r", - "_x1", - "_x2", - "__weakref__") + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r", + "_x1", + "_x2", + "__weakref__", + ) def __init__(self, r, x1, x2): super(dual_exponential, self).__init__() @@ -616,8 +702,7 @@ def as_domain(cls, r, x1, x2): b.r = variable(lb=0) b.x1 = variable() b.x2 = variable(ub=0) - b.c = _build_linking_constraints([r,x1,x2], - [b.r,b.x1,b.x2]) + b.c = _build_linking_constraints([r, x1, x2], [b.r, b.x1, b.x2]) b.q = cls(r=b.r, x1=b.x1, x2=b.x2) return b @@ -639,7 +724,7 @@ def x2(self): def _body_function(self, r, x1, x2): """A function that defines the body expression""" - return -x2*exp((x1/x2) - 1) - r + return -x2 * exp((x1 / x2) - 1) - r def _body_function_variables(self, values=False): """Returns variables in the order they should be @@ -656,18 +741,26 @@ def check_convexity_conditions(self, relax=False): conic constraint are satisfied. If relax is True, then variable domains are ignored and it is assumed that all variables are continuous.""" - return (relax or \ - (self.x1.is_continuous() and \ - self.x2.is_continuous() and \ - self.r.is_continuous())) and \ - (self.x2.has_ub() and value(self.x2.ub) <= 0) and \ - (self.r.has_lb() and value(self.r.lb) >= 0) + return ( + ( + relax + or ( + self.x1.is_continuous() + and self.x2.is_continuous() + and self.r.is_continuous() + ) + ) + and (self.x2.has_ub() and value(self.x2.ub) <= 0) + and (self.r.has_lb() and value(self.r.lb) >= 0) + ) + class dual_power(_ConicBase): """A dual power conic constraint of the form: - sqrt(x[0]^2 + ... + x[n-1]^2) <= ((r1/alpha)^alpha) * \ - ((r2/(1-alpha))^(1-alpha)) + sqrt(x[0]^2 + ... + x[n-1]^2) + <= + ((r1/alpha)^alpha) * ((r2/(1-alpha))^(1-alpha)) which is recognized as convex for r1,r2 >= 0 and 0 < alpha < 1. @@ -683,15 +776,18 @@ class dual_power(_ConicBase): alpha : float, :class:`parameter`, etc. A constant term. """ - __slots__ = ("_parent", - "_storage_key", - "_active", - "_body", - "_r1", - "_r2", - "_x", - "_alpha", - "__weakref__") + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r1", + "_r2", + "_x", + "_alpha", + "__weakref__", + ) def __init__(self, r1, r2, x, alpha): super(dual_power, self).__init__() @@ -701,13 +797,13 @@ def __init__(self, r1, r2, x, alpha): self._alpha = alpha assert isinstance(self._r1, IVariable) assert isinstance(self._r2, IVariable) - assert all(isinstance(xi, IVariable) - for xi in self._x) + assert all(isinstance(xi, IVariable) for xi in self._x) if not is_numeric_data(self._alpha): raise TypeError( "The type of the alpha parameter of a conic " "constraint is restricted numeric data or " - "objects that store numeric data.") + "objects that store numeric data." + ) @classmethod def as_domain(cls, r1, r2, x, alpha): @@ -728,10 +824,8 @@ def as_domain(cls, r1, r2, x, alpha): b = block() b.r1 = variable(lb=0) b.r2 = variable(lb=0) - b.x = variable_tuple( - [variable() for i in range(len(x))]) - b.c = _build_linking_constraints([r1,r2] + list(x), - [b.r1,b.r2] + list(b.x)) + b.x = variable_tuple([variable() for i in range(len(x))]) + b.c = _build_linking_constraints([r1, r2] + list(x), [b.r1, b.r2] + list(b.x)) b.q = cls(r1=b.r1, r2=b.r2, x=b.x, alpha=alpha) return b @@ -758,9 +852,9 @@ def alpha(self): def _body_function(self, r1, r2, x): """A function that defines the body expression""" alpha = self.alpha - return (sum(xi**2 for xi in x)**0.5) - \ - ((r1/alpha)**alpha) * \ - ((r2/(1-alpha))**(1-alpha)) + return (sum(xi**2 for xi in x) ** 0.5) - ((r1 / alpha) ** alpha) * ( + (r2 / (1 - alpha)) ** (1 - alpha) + ) def _body_function_variables(self, values=False): """Returns variables in the order they should be @@ -770,8 +864,7 @@ def _body_function_variables(self, values=False): if not values: return self.r1, self.r2, self.x else: - return self.r1.value, self.r2.value, \ - tuple(xi.value for xi in self.x) + return self.r1.value, self.r2.value, tuple(xi.value for xi in self.x) def check_convexity_conditions(self, relax=False): """Returns True if all convexity conditions for the @@ -779,10 +872,128 @@ def check_convexity_conditions(self, relax=False): then variable domains are ignored and it is assumed that all variables are continuous.""" alpha = value(self.alpha, exception=False) - return (relax or \ - (self.r1.is_continuous() and \ - self.r2.is_continuous() and \ - all(xi.is_continuous() for xi in self.x))) and \ - (self.r1.has_lb() and value(self.r1.lb) >= 0) and \ - (self.r2.has_lb() and value(self.r2.lb) >= 0) and \ - ((alpha is not None) and (0 < alpha < 1)) + return ( + ( + relax + or ( + self.r1.is_continuous() + and self.r2.is_continuous() + and all(xi.is_continuous() for xi in self.x) + ) + ) + and (self.r1.has_lb() and value(self.r1.lb) >= 0) + and (self.r2.has_lb() and value(self.r2.lb) >= 0) + and ((alpha is not None) and (0 < alpha < 1)) + ) + + +class dual_geomean(_ConicBase): + """A dual geometric mean conic constraint of the form: + (n-1)*(r[0]*...*r[n-2])^(1/(n-1)) >= |x[n-1]| + + Parameters + ---------- + r : :class:`variable` + An iterable of variables. + x : :class:`variable` + A scalar variable. + + """ + + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_r", + "_x", + "__weakref__", + ) + + def __init__(self, r, x): + super(dual_geomean, self).__init__() + self._r = tuple(r) + self._x = x + assert isinstance(self._x, IVariable) + assert all(isinstance(ri, IVariable) for ri in self._r) + + @classmethod + def as_domain(cls, r, x): + """Builds a conic domain. Input arguments take the + same form as those of the conic constraint, but in + place of each variable, one can optionally supply a + constant, linear expression, or None. + + Returns + ------- + block + A block object with the core conic constraint + (block.q) expressed using auxiliary variables + (block.r, block.x) linked to the input arguments + through auxiliary constraints (block.c).""" + b = block() + b.r = variable_tuple([variable(lb=0) for i in range(len(r))]) + b.x = variable() + b.c = _build_linking_constraints(list(r) + [x], list(b.r) + [x]) + b.q = cls(r=b.r, x=b.x) + return b + + @property + def r(self): + return self._r + + @property + def x(self): + return self._x + + +class svec_psdcone(_ConicBase): + """A domain consisting of vectorizations of the lower-triangular + part of a positive semidefinite matrx, with the non-diagonal + elements additionally rescaled. In other words, if a vector 'x' + of length n = d*(d+1)/2 belongs to this cone, then the matrix: + + sMat(x) = [[ x[1], x[2]/sqrt(2), ..., x[d]/sqrt(2)], + [x[2]/sqrt(2), x[d+1], ..., x[2d-1]/sqrt(2)], + ... + [x[d]/sqrt(2), x[2d-1]/sqrt(2), ..., x[d*(d+1)/2]/sqrt(2)]] + + will be restricted to be a positive-semidefinite matrix. + + Parameters + ---------- + x : :class:`variable` + An iterable of variables with length d*(d+1)/2. + + """ + + __slots__ = ("_parent", "_storage_key", "_active", "_body", "_x", "__weakref__") + + def __init__(self, x): + super(svec_psdcone, self).__init__() + self._x = tuple(x) + assert all(isinstance(xi, IVariable) for xi in self._x) + + @classmethod + def as_domain(cls, x): + """Builds a conic domain. Input arguments take the + same form as those of the conic constraint, but in + place of each variable, one can optionally supply a + constant, linear expression, or None. + + Returns + ------- + block + A block object with the core conic constraint + (block.q) expressed using auxiliary variables + (block.r, block.x) linked to the input arguments + through auxiliary constraints (block.c).""" + b = block() + b.x = variable_tuple([variable() for i in range(len(x))]) + b.c = _build_linking_constraints(list(x), list(b.x)) + b.q = cls(x=b.x) + return b + + @property + def x(self): + return self._x diff --git a/pyomo/core/kernel/constraint.py b/pyomo/core/kernel/constraint.py index 6d27d5c7651..3c5b8164fe1 100644 --- a/pyomo/core/kernel/constraint.py +++ b/pyomo/core/kernel/constraint.py @@ -9,23 +9,30 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr.numvalue import (ZeroConstant, - as_numeric, - is_potentially_variable, - is_numeric_data, - value) -from pyomo.core.expr import logical_expr -from pyomo.core.kernel.base import \ - (ICategorizedObject, - _abstract_readonly_property) -from pyomo.core.kernel.container_utils import \ - define_simple_containers +from pyomo.core.expr.numvalue import ( + ZeroConstant, + as_numeric, + is_potentially_variable, + is_numeric_data, + value, +) +from pyomo.core.expr.expr_common import ExpressionType +from pyomo.core.expr.relational_expr import ( + EqualityExpression, + RangedExpression, + InequalityExpression, +) +from pyomo.core.kernel.base import ICategorizedObject, _abstract_readonly_property +from pyomo.core.kernel.container_utils import define_simple_containers _pos_inf = float('inf') _neg_inf = float('-inf') +_RELATIONAL = ExpressionType.RELATIONAL + class IConstraint(ICategorizedObject): """The interface for constraints""" + __slots__ = () # @@ -35,26 +42,33 @@ class IConstraint(ICategorizedObject): # body = _abstract_readonly_property( - doc="The expression for the body of the constraint") + doc="The expression for the body of the constraint" + ) lower = _abstract_readonly_property( - doc="The expression for the lower bound of the constraint") + doc="The expression for the lower bound of the constraint" + ) upper = _abstract_readonly_property( - doc="The expression for the upper bound of the constraint") + doc="The expression for the upper bound of the constraint" + ) lb = _abstract_readonly_property( - doc="The value of the lower bound of the constraint") + doc="The value of the lower bound of the constraint" + ) ub = _abstract_readonly_property( - doc="The value of the upper bound of the constraint") - rhs = _abstract_readonly_property( - doc="The right-hand side of the constraint") + doc="The value of the upper bound of the constraint" + ) + rhs = _abstract_readonly_property(doc="The right-hand side of the constraint") equality = _abstract_readonly_property( - doc=("A boolean indicating whether this " - "is an equality constraint")) + doc=("A boolean indicating whether this is an equality constraint") + ) _linear_canonical_form = _abstract_readonly_property( - doc=("Indicates whether or not the class or " - "instance provides the properties that " - "define the linear canonical form of a " - "constraint")) + doc=( + "Indicates whether or not the class or " + "instance provides the properties that " + "define the linear canonical form of a " + "constraint" + ) + ) # # Interface @@ -127,7 +141,7 @@ def expr(self): return body_expr <= self.ub elif self.ub is None: return self.lb <= body_expr - return logical_expr.RangedExpression((self.lb, body_expr, self.ub), (False, False)) + return RangedExpression((self.lb, body_expr, self.ub), (False, False)) @property def bounds(self): @@ -138,15 +152,14 @@ def has_lb(self): """Returns :const:`False` when the lower bound is :const:`None` or negative infinity""" lb = self.lb - return (lb is not None) and \ - (value(lb) != float('-inf')) + return (lb is not None) and (value(lb) != float('-inf')) def has_ub(self): """Returns :const:`False` when the upper bound is :const:`None` or positive infinity""" ub = self.ub - return (ub is not None) and \ - (value(ub) != float('inf')) + return (ub is not None) and (value(ub) != float('inf')) + class _MutableBoundsConstraintMixin(object): """ @@ -157,6 +170,7 @@ class _MutableBoundsConstraintMixin(object): Assumes the derived class has _lb, _ub, and _equality attributes that can be modified. """ + __slots__ = () # @@ -167,40 +181,45 @@ class _MutableBoundsConstraintMixin(object): def lower(self): """The expression for the lower bound of the constraint""" return self._lb + @lower.setter def lower(self, lb): if self.equality: raise ValueError( "The lower property can not be set " - "when the equality property is True.") - if (lb is not None) and \ - (not is_numeric_data(lb)): + "when the equality property is True." + ) + if (lb is not None) and (not is_numeric_data(lb)): raise TypeError( - "Constraint lower bounds must be " - "expressions restricted to numeric data.") + "Constraint lower bounds must be " + "expressions restricted to numeric data." + ) self._lb = lb @property def upper(self): """The expression for the upper bound of the constraint""" return self._ub + @upper.setter def upper(self, ub): if self.equality: raise ValueError( "The upper property can not be set " - "when the equality property is True.") - if (ub is not None) and \ - (not is_numeric_data(ub)): + "when the equality property is True." + ) + if (ub is not None) and (not is_numeric_data(ub)): raise TypeError( - "Constraint upper bounds must be " - "expressions restricted to numeric data.") + "Constraint upper bounds must be " + "expressions restricted to numeric data." + ) self._ub = ub @property def lb(self): """The value of the lower bound of the constraint""" return value(self._lb) + @lb.setter def lb(self, lb): self.lower = lb @@ -209,6 +228,7 @@ def lb(self, lb): def ub(self): """The value of the upper bound of the constraint""" return value(self._ub) + @ub.setter def ub(self, ub): self.upper = ub @@ -219,8 +239,10 @@ def rhs(self): if not self.equality: raise ValueError( "The rhs property can not be read " - "when the equality property is False.") + "when the equality property is False." + ) return self._lb + @rhs.setter def rhs(self, rhs): if rhs is None: @@ -228,12 +250,13 @@ def rhs(self, rhs): # context (lb or ub), so there is no way to # interpret this raise ValueError( - "Constraint right-hand side can not " - "be assigned a value of None.") + "Constraint right-hand side can not be assigned a value of None." + ) elif not is_numeric_data(rhs): raise TypeError( - "Constraint right-hand side must be numbers " - "or expressions restricted to data.") + "Constraint right-hand side must be numbers " + "or expressions restricted to data." + ) self._lb = rhs self._ub = rhs self._equality = True @@ -242,6 +265,7 @@ def rhs(self, rhs): def bounds(self): """The bounds of the constraint as a tuple (lb, ub)""" return super(_MutableBoundsConstraintMixin, self).bounds + @bounds.setter def bounds(self, bounds_tuple): self.lb, self.ub = bounds_tuple @@ -255,6 +279,7 @@ def equality(self): :const:`False`. Equality can only be activated by assigning a value to the .rhs property.""" return self._equality + @equality.setter def equality(self, equality): if equality: @@ -262,12 +287,13 @@ def equality(self, equality): "The constraint equality flag can " "only be set to True by assigning " "a value to the rhs property " - "(e.g., con.rhs = con.lb).") + "(e.g., con.rhs = con.lb)." + ) assert not equality self._equality = False -class constraint(_MutableBoundsConstraintMixin, - IConstraint): + +class constraint(_MutableBoundsConstraintMixin, IConstraint): """A general algebraic constraint Algebraic constraints store relational expressions @@ -324,23 +350,21 @@ class constraint(_MutableBoundsConstraintMixin, >>> # (equivalent form) >>> c = pmo.constraint(body=x**2, rhs=1) """ + _ctype = IConstraint _linear_canonical_form = False - __slots__ = ("_parent", - "_storage_key", - "_active", - "_body", - "_lb", - "_ub", - "_equality", - "__weakref__") - - def __init__(self, - expr=None, - body=None, - lb=None, - ub=None, - rhs=None): + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_body", + "_lb", + "_ub", + "_equality", + "__weakref__", + ) + + def __init__(self, expr=None, body=None, lb=None, ub=None, rhs=None): self._parent = None self._storage_key = None self._active = True @@ -351,21 +375,29 @@ def __init__(self, if expr is not None: if body is not None: - raise ValueError("Both the 'expr' and 'body' " - "keywords can not be used to " - "initialize a constraint.") + raise ValueError( + "Both the 'expr' and 'body' " + "keywords can not be used to " + "initialize a constraint." + ) if lb is not None: - raise ValueError("Both the 'expr' and 'lb' " - "keywords can not be used to " - "initialize a constraint.") + raise ValueError( + "Both the 'expr' and 'lb' " + "keywords can not be used to " + "initialize a constraint." + ) if ub is not None: - raise ValueError("Both the 'expr' and 'ub' " - "keywords can not be used to " - "initialize a constraint.") + raise ValueError( + "Both the 'expr' and 'ub' " + "keywords can not be used to " + "initialize a constraint." + ) if rhs is not None: - raise ValueError("Both the 'expr' and 'rhs' " - "keywords can not be used to " - "initialize a constraint.") + raise ValueError( + "Both the 'expr' and 'rhs' " + "keywords can not be used to " + "initialize a constraint." + ) # call the setter self.expr = expr else: @@ -374,12 +406,13 @@ def __init__(self, self.lb = lb self.ub = ub else: - if ((lb is not None) or \ - (ub is not None)): - raise ValueError("The 'rhs' keyword can not " - "be used with the 'lb' or " - "'ub' keywords to initialize" - " a constraint.") + if (lb is not None) or (ub is not None): + raise ValueError( + "The 'rhs' keyword can not " + "be used with the 'lb' or " + "'ub' keywords to initialize" + " a constraint." + ) self.rhs = rhs # @@ -390,6 +423,7 @@ def __init__(self, def body(self): """The body of the constraint""" return self._body + @body.setter def body(self, body): if body is not None: @@ -405,10 +439,10 @@ def body(self, body): @property def expr(self): """Get or set the expression on this constraint.""" - return super(constraint,self).expr + return super(constraint, self).expr + @expr.setter def expr(self, expr): - self._equality = False if expr is None: self.body = None @@ -449,8 +483,8 @@ def expr(self, expr): " expression, upper) but the lower " "value was not numeric data or an " "expression restricted to storage of " - "numeric data." - % (self.name)) + "numeric data." % (self.name) + ) arg1 = expr[1] if arg1 is not None: @@ -464,8 +498,8 @@ def expr(self, expr): " expression, upper) but the upper " "value was not numeric data or an " "expression restricted to storage of " - "numeric data." - % (self.name)) + "numeric data." % (self.name) + ) elif arg1 is not None and is_numeric_data(arg1): # Special case (reflect behavior of AML): if the # upper bound is None and the "body" is only data, @@ -474,7 +508,7 @@ def expr(self, expr): arg0, arg1, arg2 = arg2, arg0, arg1 self.lb = arg0 - self.body = arg1 + self.body = arg1 self.ub = arg2 else: raise ValueError( @@ -482,36 +516,39 @@ def expr(self, expr): "of length %d. Expecting a tuple of " "length 2 or 3:\n" "Equality: (body, rhs)\n" - "Inequality: (lb, body, ub)" - % (self.name, len(expr))) + "Inequality: (lb, body, ub)" % (self.name, len(expr)) + ) relational_expr = False else: try: - relational_expr = expr.is_relational() + relational_expr = expr.is_expression_type(_RELATIONAL) if not relational_expr: raise ValueError( "Constraint '%s' does not have a proper " "value. Found '%s'\nExpecting a tuple or " "equation. Examples:" "\n sum_product(model.costs) == model.income" - "\n (0, model.price[item], 50)" - % (self.name, str(expr))) + "\n (0, model.price[item], 50)" % (self.name, str(expr)) + ) except AttributeError: - msg = ("Constraint '%s' does not have a proper " - "value. Found '%s'\nExpecting a tuple or " - "equation. Examples:" - "\n sum_product(model.costs) == model.income" - "\n (0, model.price[item], 50)" - % (self.name, str(expr))) + msg = ( + "Constraint '%s' does not have a proper " + "value. Found '%s'\nExpecting a tuple or " + "equation. Examples:" + "\n sum_product(model.costs) == model.income" + "\n (0, model.price[item], 50)" % (self.name, str(expr)) + ) if type(expr) is bool: - msg += ("\nNote: constant Boolean expressions " - "are not valid constraint expressions. " - "Some apparently non-constant compound " - "inequalities (e.g. 'expr >= 0 <= 1') " - "can return boolean values; the proper " - "form for compound inequalities is " - "always 'lb <= expr <= ub'.") + msg += ( + "\nNote: constant Boolean expressions " + "are not valid constraint expressions. " + "Some apparently non-constant compound " + "inequalities (e.g. 'expr >= 0 <= 1') " + "can return boolean values; the proper " + "form for compound inequalities is " + "always 'lb <= expr <= ub'." + ) raise ValueError(msg) # @@ -519,7 +556,7 @@ def expr(self, expr): # (i.e. explicit '==', '<', and '<=') # if relational_expr: - if _expr_type is logical_expr.EqualityExpression: + if _expr_type is EqualityExpression: # assigning to the rhs property # will set the equality flag to True if not is_potentially_variable(expr.arg(1)): @@ -533,14 +570,14 @@ def expr(self, expr): self.body = expr.arg(0) self.body -= expr.arg(1) - elif _expr_type is logical_expr.InequalityExpression: + elif _expr_type is InequalityExpression: if expr._strict: raise ValueError( "Constraint '%s' encountered a strict " "inequality expression ('>' or '<'). All" " constraints must be formulated using " - "using '<=', '>=', or '=='." - % (self.name)) + "using '<=', '>=', or '=='." % (self.name) + ) if not is_potentially_variable(expr.arg(1)): self.lb = None self.body = expr.arg(0) @@ -551,18 +588,18 @@ def expr(self, expr): self.ub = None else: self.lb = None - self.body = expr.arg(0) + self.body = expr.arg(0) self.body -= expr.arg(1) self.ub = ZeroConstant - else: # RangedExpression + else: # RangedExpression if any(expr._strict): raise ValueError( "Constraint '%s' encountered a strict " "inequality expression ('>' or '<'). All" " constraints must be formulated using " - "using '<=', '>=', or '=='." - % (self.name)) + "using '<=', '>=', or '=='." % (self.name) + ) if not is_numeric_data(expr.arg(0)): raise ValueError( @@ -571,20 +608,20 @@ def expr(self, expr): "expression <= upper) but the lower " "bound was not numeric data or an " "expression restricted to storage of " - "numeric data." - % (self.name)) + "numeric data." % (self.name) + ) if not is_numeric_data(expr.arg(2)): raise ValueError( - "Constraint '%s' found a double-sided "\ + "Constraint '%s' found a double-sided " "inequality expression (lower <= " "expression <= upper) but the upper " "bound was not numeric data or an " "expression restricted to storage of " - "numeric data." - % (self.name)) + "numeric data." % (self.name) + ) self.lb = expr.arg(0) - self.body = expr.arg(1) + self.body = expr.arg(1) self.ub = expr.arg(2) # @@ -594,12 +631,12 @@ def expr(self, expr): assert not (self.equality and (self.lower is None)) assert (not self.equality) or (self.lower is self.upper) + # # Note: This class is experimental. The implementation may # change or it may go away. # -class linear_constraint(_MutableBoundsConstraintMixin, - IConstraint): +class linear_constraint(_MutableBoundsConstraintMixin, IConstraint): """A linear constraint A linear constraint stores a linear relational @@ -658,25 +695,24 @@ class linear_constraint(_MutableBoundsConstraintMixin, >>> # (equivalent form using a general constraint) >>> c = pmo.constraint(x + 2*y <= 1) """ + _ctype = IConstraint _linear_canonical_form = True - __slots__ = ("_parent", - "_storage_key", - "_active", - "_variables", - "_coefficients", - "_lb", - "_ub", - "_equality", - "__weakref__") - - def __init__(self, - variables=None, - coefficients=None, - terms=None, - lb=None, - ub=None, - rhs=None): + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_variables", + "_coefficients", + "_lb", + "_ub", + "_equality", + "__weakref__", + ) + + def __init__( + self, variables=None, coefficients=None, terms=None, lb=None, ub=None, rhs=None + ): self._parent = None self._storage_key = None self._active = True @@ -687,20 +723,21 @@ def __init__(self, self._equality = False if terms is not None: - if (variables is not None) or \ - (coefficients is not None): - raise ValueError("Both the 'variables' and 'coefficients' " - "keywords must be None when the 'terms' " - "keyword is not None") + if (variables is not None) or (coefficients is not None): + raise ValueError( + "Both the 'variables' and 'coefficients' " + "keywords must be None when the 'terms' " + "keyword is not None" + ) # use the setter method self.terms = terms - elif (variables is not None) or \ - (coefficients is not None): - if (variables is None) or \ - (coefficients is None): - raise ValueError("Both the 'variables' and 'coefficients' " - "keywords must be set when the 'terms' " - "keyword is None") + elif (variables is not None) or (coefficients is not None): + if (variables is None) or (coefficients is None): + raise ValueError( + "Both the 'variables' and 'coefficients' " + "keywords must be set when the 'terms' " + "keyword is None" + ) self._variables = tuple(variables) self._coefficients = tuple(coefficients) else: @@ -712,12 +749,13 @@ def __init__(self, self.lb = lb self.ub = ub else: - if ((lb is not None) or \ - (ub is not None)): - raise ValueError("The 'rhs' keyword can not " - "be used with the 'lb' or " - "'ub' keywords to initialize" - " a constraint.") + if (lb is not None) or (ub is not None): + raise ValueError( + "The 'rhs' keyword can not " + "be used with the 'lb' or " + "'ub' keywords to initialize" + " a constraint." + ) self.rhs = rhs @property @@ -725,6 +763,7 @@ def terms(self): """An iterator over the terms in the body of this constraint as (variable, coefficient) tuples""" return zip(self._variables, self._coefficients) + @terms.setter def terms(self, terms): """Set the terms in the body of this constraint @@ -744,12 +783,13 @@ def terms(self, terms): def __call__(self, exception=True): try: - return sum(value(c, exception=exception) * \ - v(exception=exception) for v,c in self.terms) + return sum( + value(c, exception=exception) * v(exception=exception) + for v, c in self.terms + ) except (ValueError, TypeError): if exception: - raise ValueError("one or more terms " - "could not be evaluated") + raise ValueError("one or more terms could not be evaluated") return None # @@ -769,8 +809,8 @@ def body(self): def canonical_form(self, compute_values=True): """Build a canonical representation of the body of this constraints""" - from pyomo.repn.standard_repn import \ - StandardRepn + from pyomo.repn.standard_repn import StandardRepn + variables = [] coefficients = [] constant = 0 @@ -794,8 +834,7 @@ def canonical_form(self, compute_values=True): repn.constant = constant return repn + # inserts class definitions for simple _tuple, _list, and # _dict containers into this module -define_simple_containers(globals(), - "constraint", - IConstraint) +define_simple_containers(globals(), "constraint", IConstraint) diff --git a/pyomo/core/kernel/container_utils.py b/pyomo/core/kernel/container_utils.py index ed8dde58c87..7f3329aadb3 100644 --- a/pyomo/core/kernel/container_utils.py +++ b/pyomo/core/kernel/container_utils.py @@ -13,12 +13,10 @@ from pyomo.core.kernel.tuple_container import TupleContainer from pyomo.core.kernel.list_container import ListContainer -def define_homogeneous_container_type(namespace, - name, - container_class, - ctype, - doc=None, - use_slots=True): + +def define_homogeneous_container_type( + namespace, name, container_class, ctype, doc=None, use_slots=True +): """ This function is designed to be called for the simple container implementations (DictContainer, TupleContainer, @@ -50,40 +48,42 @@ def __init__(self, *args, **kwds): cls_dict = {} cls_dict['_ctype'] = ctype if use_slots: - cls_dict['__slots__'] = ("_parent", - "_storage_key", - "_active", - "_data", - "__weakref__") + cls_dict['__slots__'] = ( + "_parent", + "_storage_key", + "_active", + "_data", + "__weakref__", + ) def _init(self, *args, **kwds): self._parent = None self._storage_key = None self._active = True container_class.__init__(self, *args, **kwds) + cls_dict['__init__'] = _init cls_dict['__module__'] = namespace['__name__'] if doc is not None: cls_dict['__doc__'] = doc - namespace[name] = type(name, - (container_class,), - cls_dict) + namespace[name] = type(name, (container_class,), cls_dict) + -def define_simple_containers(namespace, - prefix, - ctype, - use_slots=True): +def define_simple_containers(namespace, prefix, ctype, use_slots=True): """Use this function to define all three simple container definitions for a new object category type.""" - doc_ = ("A %s-style container for objects " - "with category type "+ctype.__name__) - for suffix, container_class in (('tuple', TupleContainer), - ('list', ListContainer), - ('dict', DictContainer)): + doc_ = "A %s-style container for objects with category type " + ctype.__name__ + for suffix, container_class in ( + ('tuple', TupleContainer), + ('list', ListContainer), + ('dict', DictContainer), + ): doc = doc_ % (suffix,) - define_homogeneous_container_type(namespace, - prefix+"_"+suffix, - container_class, - ctype, - doc=doc, - use_slots=use_slots) + define_homogeneous_container_type( + namespace, + prefix + "_" + suffix, + container_class, + ctype, + doc=doc, + use_slots=use_slots, + ) diff --git a/pyomo/core/kernel/dict_container.py b/pyomo/core/kernel/dict_container.py index 3b9e1b7a863..b86d9c5b8f2 100644 --- a/pyomo/core/kernel/dict_container.py +++ b/pyomo/core/kernel/dict_container.py @@ -12,14 +12,12 @@ import logging import collections.abc -from pyomo.core.kernel.homogeneous_container import \ - IHomogeneousContainer +from pyomo.core.kernel.homogeneous_container import IHomogeneousContainer logger = logging.getLogger('pyomo.core') -class DictContainer(IHomogeneousContainer, - collections.abc.MutableMapping): +class DictContainer(IHomogeneousContainer, collections.abc.MutableMapping): """ A partial implementation of the IHomogeneousContainer interface that provides dict-like storage functionality. @@ -33,6 +31,7 @@ class DictContainer(IHomogeneousContainer, other ICategorizedObjectContainer implementations that are defined with the same ctype. """ + __slots__ = () _child_storage_delimiter_string = "" _child_storage_entry_string = "[%s]" @@ -43,8 +42,8 @@ def __init__(self, *args, **kwds): if len(args) > 1: raise TypeError( "%s expected at most 1 arguments, " - "got %s" % (self.__class__.__name__, - len(args))) + "got %s" % (self.__class__.__name__, len(args)) + ) self.update(args[0]) if len(kwds): self.update(**kwds) @@ -85,9 +84,12 @@ def __setitem__(self, key, item): "indicative of a modeling error. To avoid this " "warning, delete the original object from the " "container before assigning a new object." - % (self[key].name, - self[key].__class__.__name__, - item.__class__.__name__)) + % ( + self[key].name, + self[key].__class__.__name__, + item.__class__.__name__, + ) + ) self._data[key]._clear_parent_and_storage_key() self._fast_insert(key, item) return @@ -105,18 +107,14 @@ def __setitem__(self, key, item): "Invalid assignment to %s type with name '%s' " "at key %s. A parent container has already been " "assigned to the object being inserted: %s" - % (self.__class__.__name__, - self.name, - key, - item.parent.name)) + % (self.__class__.__name__, self.name, key, item.parent.name) + ) else: raise TypeError( "Invalid assignment to type %s with index %s. " "The object being inserted has the wrong " - "category type: %s" - % (self.__class__.__name__, - key, - item.ctype)) + "category type: %s" % (self.__class__.__name__, key, item.ctype) + ) def __delitem__(self, key): self._data[key]._clear_parent_and_storage_key() @@ -145,10 +143,9 @@ def __contains__(self, key): def __eq__(self, other): if not isinstance(other, collections.abc.Mapping): return False - return {key:(type(val), id(val)) - for key, val in self.items()} == \ - {key:(type(val), id(val)) - for key, val in other.items()} + return {key: (type(val), id(val)) for key, val in self.items()} == { + key: (type(val), id(val)) for key, val in other.items() + } def __ne__(self, other): return not (self == other) diff --git a/pyomo/core/kernel/expression.py b/pyomo/core/kernel/expression.py index 3495023dedc..195d8f3189a 100644 --- a/pyomo/core/kernel/expression.py +++ b/pyomo/core/kernel/expression.py @@ -9,18 +9,20 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr import current as EXPR -from pyomo.core.kernel.base import \ - (ICategorizedObject, - _abstract_readwrite_property) -from pyomo.core.kernel.container_utils import \ - define_simple_containers -from pyomo.core.expr.numvalue import (NumericValue, - is_fixed, - is_constant, - is_potentially_variable, - is_numeric_data, - value) +from pyomo.common.deprecation import deprecated +from pyomo.common.modeling import NOTSET +import pyomo.core.expr as EXPR +from pyomo.core.kernel.base import ICategorizedObject, _abstract_readwrite_property +from pyomo.core.kernel.container_utils import define_simple_containers +from pyomo.core.expr.numvalue import ( + NumericValue, + is_fixed, + is_constant, + is_potentially_variable, + is_numeric_data, + value, +) + class IIdentityExpression(NumericValue): """The interface for classes that simply wrap another @@ -29,8 +31,13 @@ class IIdentityExpression(NumericValue): Derived classes should declare an _expr attribute or override all implemented methods. """ + __slots__ = () + PRECEDENCE = 0 + + ASSOCIATIVITY = EXPR.OperatorAssociativity.NON_ASSOCIATIVE + @property def expr(self): return self._expr @@ -73,7 +80,7 @@ def is_named_expression_type(self): """A boolean indicating whether this in a named expression.""" return True - def is_expression_type(self): + def is_expression_type(self, expression_system=None): """A boolean indicating whether this in an expression.""" return True @@ -107,9 +114,15 @@ def _compute_polynomial_degree(self, values): def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False): """Convert this expression into a string.""" - return EXPR.expression_to_string(self, verbose=verbose, labeler=labeler, smap=smap, compute_values=compute_values) - - def _to_string(self, values, verbose, smap, compute_values): + return EXPR.expression_to_string( + self, + verbose=verbose, + labeler=labeler, + smap=smap, + compute_values=compute_values, + ) + + def _to_string(self, values, verbose, smap): if verbose: name = self.getname() if name == None: @@ -122,12 +135,6 @@ def _to_string(self, values, verbose, smap, compute_values): return "%s{Undefined}" % str(self) return values[0] - def _precedence(self): - return 0 - - def _associativity(self): - return 0 - def _apply_operation(self, result): return result[0] @@ -145,69 +152,39 @@ def create_node_with_local_data(self, values): return self.__class__(expr=values[0]) def is_constant(self): - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def is_potentially_variable(self): - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def clone(self): - raise NotImplementedError #pragma:nocover - -class noclone(IIdentityExpression): - """ - A helper factory class for creating an expression with - cloning disabled. This allows the expression to be used - in two or more parent expressions without causing a copy - to be generated. If it is initialized with a value that - is not an instance of NumericValue, that value is simply - returned. - """ - __slots__ = ("_expr",) - - def __new__(cls, expr): - if isinstance(expr, NumericValue): - return super(noclone, cls).__new__(cls) - else: + raise NotImplementedError # pragma:nocover + + +@deprecated( + "noclone() is deprecated and can be omitted: " + "Pyomo expressions natively support shared subexpressions.", + version='6.6.2.dev0', +) +def noclone(expr): + try: + if expr.is_potentially_variable(): + return expression(expr) + except AttributeError: + pass + try: + if is_constant(expr): return expr + except: + return expr + return data_expression(expr) - def __init__(self, expr): - self._expr = expr - - def __getnewargs__(self): - return (self._expr,) - - def __getstate__(self): - return (self._expr,) - - def __setstate__(self, state): - assert len(state) == 1 - self._expr = state[0] - - def __str__(self): - return "{%s}" % EXPR.expression_to_string(self) - - # - # Override some of the NumericValue methods implemented - # by the base class - # - - def is_constant(self): - """A boolean indicating whether this expression is constant.""" - return is_constant(self._expr) - - def is_potentially_variable(self): - """A boolean indicating whether this expression can - reference variables.""" - return is_potentially_variable(self._expr) - - def clone(self): - """Return a clone of this expression (no-op).""" - return self class IExpression(ICategorizedObject, IIdentityExpression): """ The interface for mutable expressions. """ + __slots__ = () # @@ -216,8 +193,7 @@ class IExpression(ICategorizedObject, IIdentityExpression): # by overriding the @property method # - expr = _abstract_readwrite_property( - doc="The stored expression") + expr = _abstract_readwrite_property(doc="The stored expression") # # Override some of the NumericValue methods implemented @@ -237,14 +213,13 @@ def clone(self): """Return a clone of this expression (no-op).""" return self + class expression(IExpression): """A named, mutable expression.""" + _ctype = IExpression - __slots__ = ("_parent", - "_storage_key", - "_active", - "_expr", - "__weakref__") + __slots__ = ("_parent", "_storage_key", "_active", "_expr", "__weakref__") + def __init__(self, expr=None): self._parent = None self._storage_key = None @@ -261,15 +236,18 @@ def __init__(self, expr=None): @property def expr(self): return self._expr + @expr.setter def expr(self, expr): self._expr = expr + class data_expression(expression): """A named, mutable expression that is restricted to storage of data expressions. An exception will be raised if an expression is assigned that references (or is allowed to reference) variables.""" + __slots__ = () # @@ -290,16 +268,14 @@ def polynomial_degree(self): @property def expr(self): return self._expr + @expr.setter def expr(self, expr): - if (expr is not None) and \ - (not is_numeric_data(expr)): - raise ValueError("Expression is not restricted to " - "numeric data.") + if (expr is not None) and (not is_numeric_data(expr)): + raise ValueError("Expression is not restricted to numeric data.") self._expr = expr + # inserts class definitions for simple _tuple, _list, and # _dict containers into this module -define_simple_containers(globals(), - "expression", - IExpression) +define_simple_containers(globals(), "expression", IExpression) diff --git a/pyomo/core/kernel/heterogeneous_container.py b/pyomo/core/kernel/heterogeneous_container.py index 0dcdfea23a7..43846673838 100644 --- a/pyomo/core/kernel/heterogeneous_container.py +++ b/pyomo/core/kernel/heterogeneous_container.py @@ -9,16 +9,15 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.kernel.base import \ - (_no_ctype, - _convert_ctype, - _convert_descend_into, - ICategorizedObjectContainer) - -def heterogeneous_containers(node, - ctype=_no_ctype, - active=True, - descend_into=True): +from pyomo.core.kernel.base import ( + _no_ctype, + _convert_ctype, + _convert_descend_into, + ICategorizedObjectContainer, +) + + +def heterogeneous_containers(node, ctype=_no_ctype, active=True, descend_into=True): """ A generator that yields all heterogeneous containers included in an object storage tree, including the root @@ -49,8 +48,7 @@ def heterogeneous_containers(node, assert active in (None, True) # if not active, then nothing below is active - if (active is not None) and \ - (not node.active): + if (active is not None) and (not node.active): return if not node.ctype._is_heterogeneous_container: @@ -63,47 +61,39 @@ def heterogeneous_containers(node, for obj in node.components(active=active): assert obj._is_heterogeneous_container yield from heterogeneous_containers( - obj, - ctype=ctype, - active=active, - descend_into=descend_into) + obj, ctype=ctype, active=active, descend_into=descend_into + ) return # convert AML types into Kernel types (hack for the # solver interfaces) ctype = _convert_ctype.get(ctype, ctype) - assert (ctype is _no_ctype) or \ - ctype._is_heterogeneous_container + assert (ctype is _no_ctype) or ctype._is_heterogeneous_container # convert descend_into to a function if # it is not already one descend_into = _convert_descend_into(descend_into) # a heterogeneous container - if (ctype is _no_ctype) or \ - (node.ctype is ctype): + if (ctype is _no_ctype) or (node.ctype is ctype): yield node if not descend_into(node): return for child_ctype in node.child_ctypes(): - if not child_ctype._is_heterogeneous_container: continue for child in node.children(ctype=child_ctype): assert child._is_container - if (active is not None) and \ - (not child.active): + if (active is not None) and (not child.active): continue yield from heterogeneous_containers( - child, - ctype=ctype, - active=active, - descend_into=descend_into) + child, ctype=ctype, active=active, descend_into=descend_into + ) class IHeterogeneousContainer(ICategorizedObjectContainer): @@ -117,6 +107,7 @@ class IHeterogeneousContainer(ICategorizedObjectContainer): properties of the ICategorizedObjectContainer base class. """ + __slots__ = () _is_heterogeneous_container = True @@ -124,9 +115,7 @@ class IHeterogeneousContainer(ICategorizedObjectContainer): # Interface # - def collect_ctypes(self, - active=True, - descend_into=True): + def collect_ctypes(self, active=True, descend_into=True): """Returns the set of object category types that can be found under this container. @@ -151,8 +140,7 @@ def collect_ctypes(self, ctypes = set() # if not active, then nothing below is active - if (active is not None) and \ - (not self.active): + if (active is not None) and (not self.active): return ctypes # convert descend_into to a function if @@ -161,9 +149,10 @@ def collect_ctypes(self, for child_ctype in self.child_ctypes(): for obj in self.components( - ctype=child_ctype, - active=active, - descend_into=_convert_descend_into._false): + ctype=child_ctype, + active=active, + descend_into=_convert_descend_into._false, + ): ctypes.add(child_ctype) # just need 1 to appear in order to # count the child_ctype @@ -177,36 +166,34 @@ def collect_ctypes(self, for child_ctype in tuple(ctypes): if child_ctype._is_heterogeneous_container: for obj in self.components( - ctype=child_ctype, - active=active, - descend_into=_convert_descend_into._false): + ctype=child_ctype, + active=active, + descend_into=_convert_descend_into._false, + ): assert obj._is_heterogeneous_container if descend_into(obj): - ctypes.update(obj.collect_ctypes( - active=active, - descend_into=descend_into)) + ctypes.update( + obj.collect_ctypes(active=active, descend_into=descend_into) + ) return ctypes def child_ctypes(self, *args, **kwds): """Returns the set of child object category types stored in this container.""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover # # Define the ICategorizedObjectContainer abstract methods # - #def child(self, *args, **kwds): + # def child(self, *args, **kwds): # ... not defined here - #def children(self, *args, **kwds): + # def children(self, *args, **kwds): # ... not defined here - def components(self, - ctype=_no_ctype, - active=True, - descend_into=True): + def components(self, ctype=_no_ctype, active=True, descend_into=True): """ Generates an efficient traversal of all components stored under this container. Components are @@ -236,8 +223,7 @@ def components(self, assert active in (None, True) # if not active, then nothing below is active - if (active is not None) and \ - (not self.active): + if (active is not None) and (not self.active): return # convert AML types into Kernel types (hack for the @@ -249,11 +235,8 @@ def components(self, descend_into = _convert_descend_into(descend_into) if ctype is _no_ctype: - for child in self.children(): - - if (active is not None) and \ - (not child.active): + if (active is not None) and (not child.active): continue if not child._is_container: @@ -262,10 +245,11 @@ def components(self, yield child if descend_into(child): yield from child.components( - active=active, - descend_into=descend_into) - elif (descend_into is _convert_descend_into._false) or \ - (not child.ctype._is_heterogeneous_container): + active=active, descend_into=descend_into + ) + elif (descend_into is _convert_descend_into._false) or ( + not child.ctype._is_heterogeneous_container + ): assert child._is_container yield from child.components(active=active) else: @@ -275,19 +259,16 @@ def components(self, yield obj if descend_into(obj): yield from obj.components( - active=active, - descend_into=descend_into) + active=active, descend_into=descend_into + ) else: - - for item in heterogeneous_containers(self, - active=active, - descend_into=descend_into): + for item in heterogeneous_containers( + self, active=active, descend_into=descend_into + ): for child in item.children(ctype=ctype): - if (not child._is_container) or \ - child._is_heterogeneous_container: - if (active is None) or \ - child.active: + if (not child._is_container) or child._is_heterogeneous_container: + if (active is None) or child.active: yield child else: assert child._is_container diff --git a/pyomo/core/kernel/homogeneous_container.py b/pyomo/core/kernel/homogeneous_container.py index e6f8d0e1434..22a70e1edff 100644 --- a/pyomo/core/kernel/homogeneous_container.py +++ b/pyomo/core/kernel/homogeneous_container.py @@ -11,6 +11,7 @@ from pyomo.core.kernel.base import ICategorizedObjectContainer + class IHomogeneousContainer(ICategorizedObjectContainer): """ A partial implementation of the ICategorizedObjectContainer @@ -27,16 +28,17 @@ class IHomogeneousContainer(ICategorizedObjectContainer): other :class:`ICategorizedObjectContainer` implementations that are defined with the same ctype. """ + __slots__ = () # # Define the ICategorizedObjectContainer abstract methods # - #def child(self, *args, **kwds): + # def child(self, *args, **kwds): # ... not defined here - #def children(self, *args, **kwds): + # def children(self, *args, **kwds): # ... not defined here def components(self, active=True): @@ -60,16 +62,13 @@ def components(self, active=True): assert active in (None, True) # if not active, then no children can be active - if (active is not None) and \ - (not self.active): + if (active is not None) and (not self.active): return for child in self.children(): - if (active is None) or \ - child.active: - if (not child._is_container) or \ - child._is_heterogeneous_container: - yield child + if (active is None) or child.active: + if (not child._is_container) or child._is_heterogeneous_container: + yield child else: assert child._is_container for obj in child.components(active=active): diff --git a/pyomo/core/kernel/list_container.py b/pyomo/core/kernel/list_container.py index 82acbf755c0..05116797f3a 100644 --- a/pyomo/core/kernel/list_container.py +++ b/pyomo/core/kernel/list_container.py @@ -16,8 +16,8 @@ logger = logging.getLogger('pyomo.core') -class ListContainer(TupleContainer, - collections.abc.MutableSequence): + +class ListContainer(TupleContainer, collections.abc.MutableSequence): """ A partial implementation of the IHomogeneousContainer interface that provides list-like storage functionality. @@ -31,6 +31,7 @@ class ListContainer(TupleContainer, other ICategorizedObjectContainer implementations that are defined with the same ctype. """ + __slots__ = () def __init__(self, *args): @@ -52,9 +53,12 @@ def __setitem__(self, i, item): "indicative of a modeling error. To avoid this " "warning, delete the original object from the " "container before assigning a new object." - % (self[i].name, - self[i].__class__.__name__, - item.__class__.__name__)) + % ( + self[i].name, + self[i].__class__.__name__, + item.__class__.__name__, + ) + ) self._data[i]._clear_parent_and_storage_key() item._update_parent_and_storage_key(self, i) self._data[i] = item @@ -73,18 +77,14 @@ def __setitem__(self, i, item): "Invalid assignment to %s type with name '%s' " "at index %s. A parent container has already been " "assigned to the object being inserted: %s" - % (self.__class__.__name__, - self.name, - i, - item.parent.name)) + % (self.__class__.__name__, self.name, i, item.parent.name) + ) else: raise TypeError( "Invalid assignment to type %s with index %s. " "The object being inserted has the wrong " - "category type: %s" - % (self.__class__.__name__, - i, - item.ctype)) + "category type: %s" % (self.__class__.__name__, i, item.ctype) + ) def insert(self, i, item): """S.insert(index, object) -- insert object before index""" @@ -104,5 +104,5 @@ def reverse(self): """S.reverse() -- reverse *IN PLACE*""" n = len(self) data = self._data - for i in range(n//2): - data[i], data[n-i-1] = data[n-i-1], data[i] + for i in range(n // 2): + data[i], data[n - i - 1] = data[n - i - 1], data[i] diff --git a/pyomo/core/kernel/matrix_constraint.py b/pyomo/core/kernel/matrix_constraint.py index 5938ec6a862..ab278eb59c2 100644 --- a/pyomo/core/kernel/matrix_constraint.py +++ b/pyomo/core/kernel/matrix_constraint.py @@ -10,13 +10,13 @@ # ___________________________________________________________________________ from pyomo.common.dependencies import ( - numpy, numpy_available as has_numpy, - scipy, scipy_available as has_scipy, + numpy, + numpy_available as has_numpy, + scipy, + scipy_available as has_scipy, ) from pyomo.core.expr.numvalue import NumericValue, value -from pyomo.core.kernel.constraint import \ - (IConstraint, - constraint_tuple) +from pyomo.core.kernel.constraint import IConstraint, constraint_tuple _noarg = object() @@ -25,18 +25,17 @@ # change or it may go away. # + class _MatrixConstraintData(IConstraint): """ A placeholder object for linear constraints in a matrix_constraint container. A user should not directly instantiate this class. """ + _ctype = IConstraint _linear_canonical_form = True - __slots__ = ("_parent", - "_storage_key", - "_active", - "__weakref__") + __slots__ = ("_parent", "_storage_key", "_active", "__weakref__") def __init__(self, index): assert index >= 0 @@ -56,15 +55,15 @@ def terms(self): parent = self.parent x = parent.x if x is None: - raise ValueError( - "No variable order has been assigned") + raise ValueError("No variable order has been assigned") A = parent._A if parent._sparse: - for k in range(A.indptr[self._storage_key], - A.indptr[self._storage_key+1]): + for k in range( + A.indptr[self._storage_key], A.indptr[self._storage_key + 1] + ): yield x[A.indices[k]], A.data[k] else: - for item in zip(x, A[self._storage_key,:].tolist()): + for item in zip(x, A[self._storage_key, :].tolist()): yield item # @@ -76,10 +75,9 @@ def __call__(self, exception=True): # don't mask an exception in the terms # property method if self.parent.x is None: - raise ValueError( - "No variable order has been assigned") + raise ValueError("No variable order has been assigned") try: - return sum(c*v() for v,c in self.terms) + return sum(c * v() for v, c in self.terms) except (ValueError, TypeError): if exception: raise @@ -98,42 +96,43 @@ def body(self): def lower(self): """The expression for the lower bound of the constraint""" return self.parent.lb[self._storage_key] + @lower.setter def lower(self, lb): if self.equality: raise ValueError( "The lower property can not be set " - "when the equality property is True.") + "when the equality property is True." + ) if lb is None: lb = -numpy.inf elif isinstance(lb, NumericValue): - raise ValueError("lb must be set to " - "a simple numeric type " - "or None") + raise ValueError("lb must be set to a simple numeric type or None") self.parent.lb[self._storage_key] = lb @property def upper(self): """The expression for the upper bound of the constraint""" return self.parent.ub[self._storage_key] + @upper.setter def upper(self, ub): if self.equality: raise ValueError( "The upper property can not be set " - "when the equality property is True.") + "when the equality property is True." + ) if ub is None: ub = numpy.inf elif isinstance(ub, NumericValue): - raise ValueError("ub must be set to " - "a simple numeric type " - "or None") + raise ValueError("ub must be set to a simple numeric type or None") self.parent.ub[self._storage_key] = ub @property def lb(self): """The value of the lower bound of the constraint""" return value(self.lower) + @lb.setter def lb(self, lb): self.lower = lb @@ -142,6 +141,7 @@ def lb(self, lb): def ub(self): """The value of the upper bound of the constraint""" return value(self.upper) + @ub.setter def ub(self, ub): self.upper = ub @@ -156,8 +156,10 @@ def rhs(self): if not self.equality: raise ValueError( "The rhs property can not be read " - "when the equality property is False.") + "when the equality property is False." + ) return self.parent.lb[self._storage_key] + @rhs.setter def rhs(self, rhs): if rhs is None: @@ -165,12 +167,10 @@ def rhs(self, rhs): # context (lb or ub), so there is no way to # interpret this raise ValueError( - "Constraint right-hand side can not " - "be assigned a value of None.") + "Constraint right-hand side can not be assigned a value of None." + ) elif isinstance(rhs, NumericValue): - raise ValueError("rhs must be set to " - "a simple numeric type " - "or None") + raise ValueError("rhs must be set to a simple numeric type or None") self.parent.lb[self._storage_key] = rhs self.parent.ub[self._storage_key] = rhs self.parent.equality[self._storage_key] = True @@ -178,8 +178,8 @@ def rhs(self, rhs): @property def bounds(self): """The bounds of the constraint as a tuple (lb, ub)""" - return (self.parent.lb[self._storage_key], - self.parent.ub[self._storage_key]) + return (self.parent.lb[self._storage_key], self.parent.ub[self._storage_key]) + @bounds.setter def bounds(self, bounds_tuple): self.lb, self.ub = bounds_tuple @@ -193,6 +193,7 @@ def equality(self): :const:`False`. Equality can only be activated by assigning a value to the .rhs property.""" return self.parent.equality[self._storage_key] + @equality.setter def equality(self, equality): if equality: @@ -200,7 +201,8 @@ def equality(self, equality): "The constraint equality flag can " "only be set to True by assigning " "a value to the rhs property " - "(e.g., con.rhs = con.lb).") + "(e.g., con.rhs = con.lb)." + ) assert not equality self.parent.equality[self._storage_key] = False @@ -213,6 +215,7 @@ def canonical_form(self, compute_values=True): """Build a canonical representation of the body of this constraints""" from pyomo.repn.standard_repn import StandardRepn + variables = [] coefficients = [] constant = 0 @@ -233,6 +236,7 @@ def canonical_form(self, compute_values=True): repn.constant = constant return repn + class matrix_constraint(constraint_tuple): """ A container for constraints of the form lb <= Ax <= ub. @@ -255,42 +259,28 @@ class matrix_constraint(constraint_tuple): format) should be used to store A. Default is :const:`True`. """ - __slots__ = ("_A", - "_sparse", - "_lb", - "_ub", - "_equality", - "_x") - def __init__(self, - A, - lb=None, - ub=None, - rhs=None, - x=None, - sparse=True): - if (not has_numpy) or (not has_scipy): #pragma:nocover + + __slots__ = ("_A", "_sparse", "_lb", "_ub", "_equality", "_x") + + def __init__(self, A, lb=None, ub=None, rhs=None, x=None, sparse=True): + if (not has_numpy) or (not has_scipy): # pragma:nocover raise ValueError("This class requires numpy and scipy") m, n = A.shape assert m > 0 assert n > 0 - cons = (_MatrixConstraintData(i) - for i in range(m)) + cons = (_MatrixConstraintData(i) for i in range(m)) super(matrix_constraint, self).__init__(cons) if sparse: self._sparse = True - self._A = scipy.sparse.csr_matrix(A, - dtype=float, - copy=True) + self._A = scipy.sparse.csr_matrix(A, dtype=float, copy=True) self._A.data.setflags(write=False) self._A.indices.setflags(write=False) self._A.indptr.setflags(write=False) else: self._sparse = False - self._A = numpy.array(A, - dtype=float, - copy=True) + self._A = numpy.array(A, dtype=float, copy=True) self._A.setflags(write=False) self._lb = numpy.ndarray(m, dtype=float) self._ub = numpy.ndarray(m, dtype=float) @@ -303,12 +293,13 @@ def __init__(self, self.lb = lb self.ub = ub else: - if ((lb is not None) or \ - (ub is not None)): - raise ValueError("The 'rhs' keyword can not " - "be used with the 'lb' or " - "'ub' keywords to initialize" - " a constraint.") + if (lb is not None) or (ub is not None): + raise ValueError( + "The 'rhs' keyword can not " + "be used with the 'lb' or " + "'ub' keywords to initialize" + " a constraint." + ) self.rhs = rhs @property @@ -321,8 +312,7 @@ def sparse(self): def A(self): """A read-only view of the constraint matrix""" if self._sparse: - return scipy.sparse.csr_matrix(self._A, - copy=False) + return scipy.sparse.csr_matrix(self._A, copy=False) else: return self._A.view() @@ -331,38 +321,37 @@ def x(self): """The list of variables associated with the columns of the constraint matrix""" return self._x + @x.setter def x(self, x): if x is None: self._x = None else: x = tuple(x) - m,n = self._A.shape + m, n = self._A.shape if len(x) != n: - raise ValueError( - "Argument length must be %s " - "not %s" % (n, len(x))) + raise ValueError("Argument length must be %s not %s" % (n, len(x))) self._x = x @property def lb(self): """The array of constraint lower bounds""" return self._lb.view() + @lb.setter def lb(self, lb): if self.equality.any(): raise ValueError( "The lb array can not be set " "when there are indices of the " - "equality array that are True") + "equality array that are True" + ) if lb is None: lb = -numpy.inf if isinstance(lb, numpy.ndarray): numpy.copyto(self._lb, lb) elif isinstance(lb, NumericValue): - raise ValueError("lb must be set to " - "a simple numeric type " - "or a numpy array") + raise ValueError("lb must be set to a simple numeric type or a numpy array") else: self._lb.fill(lb) @@ -370,21 +359,21 @@ def lb(self, lb): def ub(self): """The array of constraint upper bounds""" return self._ub.view() + @ub.setter def ub(self, ub): if self.equality.any(): raise ValueError( "The ub array can not be set " "when there are indices of the " - "equality array that are True") + "equality array that are True" + ) if ub is None: ub = numpy.inf if isinstance(ub, numpy.ndarray): numpy.copyto(self._ub, ub) elif isinstance(ub, NumericValue): - raise ValueError("ub must be set to " - "a simple numeric type " - "or a numpy array") + raise ValueError("ub must be set to a simple numeric type or a numpy array") else: self._ub.fill(ub) @@ -401,8 +390,10 @@ def rhs(self): raise ValueError( "The rhs array can not be read when " "there are indices of the equality array " - "that are False.") + "that are False." + ) return self._lb.view() + @rhs.setter def rhs(self, rhs): if rhs is None: @@ -410,12 +401,12 @@ def rhs(self, rhs): # context (lb or ub), so there is no way to # interpret this raise ValueError( - "Constraint right-hand side can not " - "be assigned a value of None.") + "Constraint right-hand side can not be assigned a value of None." + ) elif isinstance(rhs, NumericValue): - raise ValueError("rhs must be set to " - "a simple numeric type " - "or a numpy array") + raise ValueError( + "rhs must be set to a simple numeric type or a numpy array" + ) elif isinstance(rhs, numpy.ndarray): numpy.copyto(self._lb, rhs) numpy.copyto(self._ub, rhs) @@ -429,6 +420,7 @@ def equality(self): """The array of boolean entries indicating the indices that are equality constraints""" return self._equality.view() + @equality.setter def equality(self, equality): if equality: @@ -436,21 +428,19 @@ def equality(self, equality): "The constraint equality flag can " "only be set to True by assigning " "an expression to the rhs property " - "(e.g., con.rhs = con.lb).") + "(e.g., con.rhs = con.lb)." + ) assert not equality self._equality.fill(False) def __call__(self, exception=True): """Compute the value of the body of this constraint""" if self.x is None: - raise ValueError( - "No variable order has been assigned") - values = numpy.array([v.value for v in self.x], - dtype=float) + raise ValueError("No variable order has been assigned") + values = numpy.array([v.value for v in self.x], dtype=float) if numpy.isnan(values).any(): if exception: - raise ValueError("One or more variables " - "do not have a value") + raise ValueError("One or more variables do not have a value") return None return self._A.dot(values) diff --git a/pyomo/core/kernel/objective.py b/pyomo/core/kernel/objective.py index 19f9186b792..c25c86d3c09 100644 --- a/pyomo/core/kernel/objective.py +++ b/pyomo/core/kernel/objective.py @@ -15,14 +15,15 @@ from pyomo.core.kernel.expression import IExpression # Constants used to define the optimization sense -minimize=1 -maximize=-1 +minimize = 1 +maximize = -1 class IObjective(IExpression): """ The interface for optimization objectives. """ + __slots__ = () # @@ -32,8 +33,8 @@ class IObjective(IExpression): # sense = _abstract_readwrite_property( - doc=("The optimization direction for the " - "objective (minimize or maximize)")) + doc=("The optimization direction for the objective (minimize or maximize)") + ) # # Interface @@ -45,13 +46,10 @@ def is_minimizing(self): class objective(IObjective): """An optimization objective.""" + _ctype = IObjective - __slots__ = ("_parent", - "_storage_key", - "_active", - "_expr", - "_sense", - "__weakref__") + __slots__ = ("_parent", "_storage_key", "_active", "_expr", "_sense", "__weakref__") + def __init__(self, expr=None, sense=minimize): self._parent = None self._storage_key = None @@ -86,18 +84,16 @@ def sense(self): @sense.setter def sense(self, sense): """Set the sense (direction) of this objective.""" - if (sense == minimize) or \ - (sense == maximize): + if (sense == minimize) or (sense == maximize): self._sense = sense else: raise ValueError( "Objective sense must be set to one of: " "[minimize (%s), maximize (%s)]. Invalid " - "value: %s'" % (minimize, maximize, sense)) + "value: %s'" % (minimize, maximize, sense) + ) # inserts class definitions for simple _tuple, _list, and # _dict containers into this module -define_simple_containers(globals(), - "objective", - IObjective) +define_simple_containers(globals(), "objective", IObjective) diff --git a/pyomo/core/kernel/parameter.py b/pyomo/core/kernel/parameter.py index a69c9338385..1d22072435d 100644 --- a/pyomo/core/kernel/parameter.py +++ b/pyomo/core/kernel/parameter.py @@ -9,19 +9,19 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr.numvalue import (is_numeric_data, - NumericValue) +from pyomo.core.expr.numvalue import is_numeric_data, NumericValue from pyomo.core.kernel.base import ICategorizedObject from pyomo.core.kernel.container_utils import define_simple_containers class IParameter(ICategorizedObject, NumericValue): """The interface for mutable numeric data.""" + __slots__ = () def __call__(self, exception=True): """Computes the numeric value of this object.""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover # # Implement the NumericValue abstract methods @@ -63,12 +63,10 @@ def polynomial_degree(self): class parameter(IParameter): """A object for storing a mutable, numeric value that can be used to build a symbolic expression.""" + _ctype = IParameter - __slots__ = ("_parent", - "_storage_key", - "_active", - "_value", - "__weakref__") + __slots__ = ("_parent", "_storage_key", "_active", "_value", "__weakref__") + def __init__(self, value=None): self._parent = None self._storage_key = None @@ -89,7 +87,7 @@ def __call__(self, exception=True): @property def value(self): - """The value of the paramater""" + """The value of the parameter""" return self._value @value.setter @@ -104,12 +102,10 @@ class functional_value(IParameter): Note that models making use of this object may require the dill module for serialization. """ + _ctype = IParameter - __slots__ = ("_parent", - "_storage_key", - "_active", - "_fn", - "__weakref__") + __slots__ = ("_parent", "_storage_key", "_active", "_fn", "__weakref__") + def __init__(self, fn=None): self._parent = None self._storage_key = None @@ -132,8 +128,7 @@ def __call__(self, exception=True): return None # this exception should never be masked if not is_numeric_data(val): - raise TypeError( - "Functional value is not numeric data") + raise TypeError("Functional value is not numeric data") return val # @@ -152,6 +147,4 @@ def fn(self, fn): # inserts class definitions for simple _tuple, _list, and # _dict containers into this module -define_simple_containers(globals(), - "parameter", - IParameter) +define_simple_containers(globals(), "parameter", IParameter) diff --git a/pyomo/core/kernel/piecewise_library/transforms.py b/pyomo/core/kernel/piecewise_library/transforms.py index e2f640d12ad..f00e57c199d 100644 --- a/pyomo/core/kernel/piecewise_library/transforms.py +++ b/pyomo/core/kernel/piecewise_library/transforms.py @@ -12,10 +12,10 @@ """ This module contains transformations for representing a single-variate piecewise linear function using a -mixed-interger problem formulation. Reference:: +mixed-integer problem formulation. Reference:: - Mixed-Integer Models for Non-separable Piecewise Linear \ -Optimization: Unifying framework and Extensions (Vielma, \ + Mixed-Integer Models for Non-separable Piecewise Linear +Optimization: Unifying framework and Extensions (Vielma, Nemhauser 2008) """ @@ -29,52 +29,63 @@ from pyomo.core.expr.numvalue import value as _value from pyomo.core.kernel.set_types import IntegerSet from pyomo.core.kernel.block import block -from pyomo.core.kernel.expression import (expression, - expression_tuple) -from pyomo.core.kernel.variable import (IVariable, - variable_list, - variable_tuple, - variable_dict, - variable) -from pyomo.core.kernel.constraint import (constraint_list, - constraint_tuple, - linear_constraint) +from pyomo.core.kernel.expression import expression, expression_tuple +from pyomo.core.kernel.variable import ( + IVariable, + variable_list, + variable_tuple, + variable_dict, + variable, +) +from pyomo.core.kernel.constraint import ( + constraint_list, + constraint_tuple, + linear_constraint, +) from pyomo.core.kernel.sos import sos2 -from pyomo.core.kernel.piecewise_library.util import \ - (characterize_function, - is_nondecreasing, - is_positive_power_of_two, - log2floor, - generate_gray_code, - PiecewiseValidationError) +from pyomo.core.kernel.piecewise_library.util import ( + characterize_function, + is_nondecreasing, + is_positive_power_of_two, + log2floor, + generate_gray_code, + PiecewiseValidationError, +) logger = logging.getLogger('pyomo.core') registered_transforms = {} + # wrapper that allows a list containing parameters to be # used with the bisect module class _shadow_list(object): __slots__ = ("_x",) + def __init__(self, x): self._x = x + def __len__(self): return self._x.__len__() + def __getitem__(self, i): return _value(self._x.__getitem__(i)) -def piecewise(breakpoints, - values, - input=None, - output=None, - bound='eq', - repn='sos2', - validate=True, - simplify=True, - equal_slopes_tolerance=1e-6, - require_bounded_input_variable=True, - require_variable_domain_coverage=True): + +def piecewise( + breakpoints, + values, + input=None, + output=None, + bound='eq', + repn='sos2', + validate=True, + simplify=True, + equal_slopes_tolerance=1e-6, + require_bounded_input_variable=True, + require_variable_domain_coverage=True, +): """ Models a single-variate piecewise linear function. @@ -160,8 +171,8 @@ def piecewise(breakpoints, :attr:`False`. Returns: - TransformedPiecewiseLinearFunction: a block that \ - stores any new variables, constraints, and other \ + TransformedPiecewiseLinearFunction: a block that + stores any new variables, constraints, and other modeling objects used by the piecewise representation """ transform = None @@ -170,47 +181,41 @@ def piecewise(breakpoints, except KeyError: raise ValueError( "Keyword assignment repn='%s' is not valid. " - "Must be one of: %s" - % (repn, - str(sorted(registered_transforms.keys())))) + "Must be one of: %s" % (repn, str(sorted(registered_transforms.keys()))) + ) assert transform is not None if not validate: # can not simplify if we do not validate simplify = False - func = PiecewiseLinearFunction(breakpoints, - values, - validate=False) + func = PiecewiseLinearFunction(breakpoints, values, validate=False) - if simplify and \ - (transform is not piecewise_convex): - ftype = func.validate( - equal_slopes_tolerance=equal_slopes_tolerance) + if simplify and (transform is not piecewise_convex): + ftype = func.validate(equal_slopes_tolerance=equal_slopes_tolerance) - if (bound == 'eq') and \ - (ftype == characterize_function.affine): + if (bound == 'eq') and (ftype == characterize_function.affine): transform = piecewise_convex - elif (bound == 'lb') and \ - (ftype in (characterize_function.affine, - characterize_function.convex)): + elif (bound == 'lb') and ( + ftype in (characterize_function.affine, characterize_function.convex) + ): transform = piecewise_convex - elif (bound == 'ub') and \ - (ftype in (characterize_function.affine, - characterize_function.concave)): + elif (bound == 'ub') and ( + ftype in (characterize_function.affine, characterize_function.concave) + ): transform = piecewise_convex - return transform(func, - input=input, - output=output, - bound=bound, - validate=validate, - equal_slopes_tolerance=\ - equal_slopes_tolerance, - require_bounded_input_variable=\ - require_bounded_input_variable, - require_variable_domain_coverage=\ - require_variable_domain_coverage) + return transform( + func, + input=input, + output=output, + bound=bound, + validate=validate, + equal_slopes_tolerance=equal_slopes_tolerance, + require_bounded_input_variable=require_bounded_input_variable, + require_variable_domain_coverage=require_variable_domain_coverage, + ) + class PiecewiseLinearFunction(object): """A piecewise linear function @@ -238,13 +243,10 @@ class PiecewiseLinearFunction(object): keyword is :const:`True`; otherwise, they are ignored. """ + __slots__ = ("_breakpoints", "_values") - def __init__(self, - breakpoints, - values, - validate=True, - **kwds): + def __init__(self, breakpoints, values, validate=True, **kwds): self._breakpoints = breakpoints self._values = values if type(self._breakpoints) is not tuple: @@ -255,14 +257,15 @@ def __init__(self, raise ValueError( "The number of breakpoints (%s) differs from " "the number of function values (%s)" - % (len(self._breakpoints), len(self._values))) + % (len(self._breakpoints), len(self._values)) + ) if validate: self.validate(**kwds) def __getstate__(self): """Required for older versions of the pickle protocol since this class uses __slots__""" - return {key:getattr(self, key) for key in self.__slots__} + return {key: getattr(self, key) for key in self.__slots__} def __setstate__(self, state): """Required for older versions of the pickle @@ -270,8 +273,7 @@ def __setstate__(self, state): for key in state: setattr(self, key, state[key]) - def validate(self, - equal_slopes_tolerance=1e-6): + def validate(self, equal_slopes_tolerance=1e-6): """ Validate this piecewise linear function by verifying various properties of the breakpoints and values @@ -286,7 +288,7 @@ def validate(self, Returns: int: - a function characterization code (see \ + a function characterization code (see :func:`util.characterize_function`) Raises: @@ -297,21 +299,24 @@ def validate(self, values = [_value(x) for x in self._values] if not is_nondecreasing(breakpoints): raise PiecewiseValidationError( - "The list of breakpoints is not nondecreasing: %s" - % (str(breakpoints))) + "The list of breakpoints is not nondecreasing: %s" % (str(breakpoints)) + ) ftype, slopes = characterize_function(breakpoints, values) for i in range(1, len(slopes)): - if (slopes[i-1] is not None) and \ - (slopes[i] is not None) and \ - (abs(slopes[i-1] - slopes[i]) <= equal_slopes_tolerance): + if ( + (slopes[i - 1] is not None) + and (slopes[i] is not None) + and (abs(slopes[i - 1] - slopes[i]) <= equal_slopes_tolerance) + ): raise PiecewiseValidationError( "Piecewise function validation detected slopes " "of consecutive line segments to be within %s " "of one another. This may cause numerical issues. " "To avoid this error, set the 'equal_slopes_tolerance' " "keyword to a smaller value or disable validation." - % (equal_slopes_tolerance)) + % (equal_slopes_tolerance) + ) return ftype @@ -335,18 +340,19 @@ def __call__(self, x): if xP == x: return float(_value(self.values[i])) elif i != len(self.breakpoints): - xL = _value(self.breakpoints[i-1]) + xL = _value(self.breakpoints[i - 1]) xU = _value(self.breakpoints[i]) assert xL <= xU if (xL <= x) and (x <= xU): - yL = _value(self.values[i-1]) + yL = _value(self.values[i - 1]) yU = _value(self.values[i]) - return yL + (float(yU-yL)/(xU-xL))*(x-xL) - raise ValueError("The point %s is outside of the " - "function domain: [%s,%s]." - % (x, - _value(self.breakpoints[0]), - _value(self.breakpoints[-1]))) + return yL + (float(yU - yL) / (xU - xL)) * (x - xL) + raise ValueError( + "The point %s is outside of the " + "function domain: [%s,%s]." + % (x, _value(self.breakpoints[0]), _value(self.breakpoints[-1])) + ) + class TransformedPiecewiseLinearFunction(block): """Base class for transformed piecewise linear functions @@ -384,23 +390,16 @@ class TransformedPiecewiseLinearFunction(block): ignored. """ - def __init__(self, - f, - input=None, - output=None, - bound='eq', - validate=True, - **kwds): + def __init__(self, f, input=None, output=None, bound='eq', validate=True, **kwds): super(TransformedPiecewiseLinearFunction, self).__init__() assert isinstance(f, PiecewiseLinearFunction) if bound not in ('lb', 'ub', 'eq'): - raise ValueError("Invalid bound type %r. Must be " - "one of: ['lb','ub','eq']" - % (bound)) + raise ValueError( + "Invalid bound type %r. Must be one of: ['lb','ub','eq']" % (bound) + ) self._bound = bound self._f = f - self._inout = expression_tuple([expression(input), - expression(output)]) + self._inout = expression_tuple([expression(input), expression(output)]) if validate: self.validate(**kwds) @@ -426,10 +425,12 @@ def bound(self): relationship ('lb','ub','eq').""" return self._bound - def validate(self, - equal_slopes_tolerance=1e-6, - require_bounded_input_variable=True, - require_variable_domain_coverage=True): + def validate( + self, + equal_slopes_tolerance=1e-6, + require_bounded_input_variable=True, + require_variable_domain_coverage=True, + ): """ Validate this piecewise linear function by verifying various properties of the breakpoints, values, and @@ -459,58 +460,51 @@ def validate(self, Returns: int: - a function characterization code (see \ + a function characterization code (see :func:`util.characterize_function`) Raises: PiecewiseValidationError: if validation fails """ - ftype = self._f.validate( - equal_slopes_tolerance=equal_slopes_tolerance) - assert ftype in (1,2,3,4,5) + ftype = self._f.validate(equal_slopes_tolerance=equal_slopes_tolerance) + assert ftype in (1, 2, 3, 4, 5) input_var = self.input.expr if not isinstance(input_var, IVariable): input_var = None - if require_bounded_input_variable and \ - ((input_var is None) or \ - (not input_var.has_lb()) or \ - (not input_var.has_ub())): - raise PiecewiseValidationError( - "Piecewise function input is not a " - "variable with finite upper and lower " - "bounds: %s. To avoid this error, set the " - "'require_bounded_input_variable' keyword " - "to False or disable validation." - % (str(input_var))) - - if require_variable_domain_coverage and \ - (input_var is not None): + if require_bounded_input_variable and ( + (input_var is None) or (not input_var.has_lb()) or (not input_var.has_ub()) + ): + raise PiecewiseValidationError( + "Piecewise function input is not a " + "variable with finite upper and lower " + "bounds: %s. To avoid this error, set the " + "'require_bounded_input_variable' keyword " + "to False or disable validation." % (str(input_var)) + ) + + if require_variable_domain_coverage and (input_var is not None): domain_lb = _value(self.breakpoints[0]) domain_ub = _value(self.breakpoints[-1]) - if input_var.has_lb() and \ - _value(input_var.lb) < domain_lb: + if input_var.has_lb() and _value(input_var.lb) < domain_lb: raise PiecewiseValidationError( "Piecewise function domain does not include " "the lower bound of the input variable: " "%s.ub = %s > %s. To avoid this error, set " "the 'require_variable_domain_coverage' " "keyword to False or disable validation." - % (input_var.name, - _value(input_var.lb), - domain_lb)) - if input_var.has_ub() and \ - _value(input_var.ub) > domain_ub: + % (input_var.name, _value(input_var.lb), domain_lb) + ) + if input_var.has_ub() and _value(input_var.ub) > domain_ub: raise PiecewiseValidationError( "Piecewise function domain does not include " "the upper bound of the input variable: " "%s.ub = %s > %s. To avoid this error, set " "the 'require_variable_domain_coverage' " "keyword to False or disable validation." - % (input_var.name, - _value(input_var.ub), - domain_ub)) + % (input_var.name, _value(input_var.ub), domain_ub) + ) return ftype @@ -529,6 +523,7 @@ def __call__(self, x): given point using interpolation""" return self._f(x) + class piecewise_convex(TransformedPiecewiseLinearFunction): """Simple convex piecewise representation @@ -543,15 +538,12 @@ def __init__(self, *args, **kwds): breakpoints = self.breakpoints values = self.values self.c = constraint_list() - for i in range(len(breakpoints)-1): + for i in range(len(breakpoints) - 1): X0 = breakpoints[i] F_AT_X0 = values[i] - dF_AT_X0 = (values[i+1] - F_AT_X0) / \ - (breakpoints[i+1] - X0) - const = F_AT_X0 - dF_AT_X0*X0 - con = linear_constraint( - (self.output, self.input), - (-1, dF_AT_X0)) + dF_AT_X0 = (values[i + 1] - F_AT_X0) / (breakpoints[i + 1] - X0) + const = F_AT_X0 - dF_AT_X0 * X0 + con = linear_constraint((self.output, self.input), (-1, dF_AT_X0)) if self.bound == 'ub': con.lb = -const elif self.bound == 'lb': @@ -568,10 +560,11 @@ def __init__(self, *args, **kwds): # variable, but its not always the case, and there's # no guarantee that the input "variable" is not a # more general linear expression. - self.c.append(linear_constraint( - terms=[(self.input, 1)], - lb=self.breakpoints[0], - ub=self.breakpoints[-1])) + self.c.append( + linear_constraint( + terms=[(self.input, 1)], lb=self.breakpoints[0], ub=self.breakpoints[-1] + ) + ) def validate(self, **kwds): """ @@ -584,33 +577,37 @@ def validate(self, **kwds): descriptions. """ ftype = super(piecewise_convex, self).validate(**kwds) - if (self.bound == 'eq') and \ - (ftype != characterize_function.affine): + if (self.bound == 'eq') and (ftype != characterize_function.affine): raise PiecewiseValidationError( "The bound type is 'eq' but the function " "was not characterized as affine (only two " "breakpoints). The 'convex' piecewise " - "representation does not support this function.") - elif (self.bound == 'lb') and \ - (ftype not in (characterize_function.affine, - characterize_function.convex)): + "representation does not support this function." + ) + elif (self.bound == 'lb') and ( + ftype not in (characterize_function.affine, characterize_function.convex) + ): raise PiecewiseValidationError( "The bound type is 'lb' but the function " "was not characterized as convex or affine. " "The 'convex' piecewise representation does " - "not support this function.") - elif (self.bound == 'ub') and \ - (ftype not in (characterize_function.affine, - characterize_function.concave)): + "not support this function." + ) + elif (self.bound == 'ub') and ( + ftype not in (characterize_function.affine, characterize_function.concave) + ): raise PiecewiseValidationError( "The bound type is 'ub' but the function " "was not characterized as concave or affine. " "The 'convex' piecewise representation does " - "not support this function.") + "not support this function." + ) return ftype + registered_transforms['convex'] = piecewise_convex + class piecewise_sos2(TransformedPiecewiseLinearFunction): """Discrete SOS2 piecewise representation @@ -622,21 +619,25 @@ def __init__(self, *args, **kwds): super(piecewise_sos2, self).__init__(*args, **kwds) # create vars - y_tuple = tuple(variable(lb=0) - for i in range(len(self.breakpoints))) + y_tuple = tuple(variable(lb=0) for i in range(len(self.breakpoints))) y = self.v = variable_tuple(y_tuple) # create piecewise constraints self.c = constraint_list() - self.c.append(linear_constraint( - variables=y_tuple + (self.input,), - coefficients=self.breakpoints + (-1,), - rhs=0)) - - self.c.append(linear_constraint( - variables=y_tuple + (self.output,), - coefficients=self.values + (-1,))) + self.c.append( + linear_constraint( + variables=y_tuple + (self.input,), + coefficients=self.breakpoints + (-1,), + rhs=0, + ) + ) + + self.c.append( + linear_constraint( + variables=y_tuple + (self.output,), coefficients=self.values + (-1,) + ) + ) if self.bound == 'ub': self.c[-1].lb = 0 elif self.bound == 'lb': @@ -645,9 +646,9 @@ def __init__(self, *args, **kwds): assert self.bound == 'eq' self.c[-1].rhs = 0 - self.c.append(linear_constraint(variables=y_tuple, - coefficients=(1,)*len(y), - rhs=1)) + self.c.append( + linear_constraint(variables=y_tuple, coefficients=(1,) * len(y), rhs=1) + ) self.s = sos2(y) @@ -663,8 +664,10 @@ def validate(self, **kwds): """ return super(piecewise_sos2, self).validate(**kwds) + registered_transforms['sos2'] = piecewise_sos2 + class piecewise_dcc(TransformedPiecewiseLinearFunction): """Discrete DCC piecewise representation @@ -676,43 +679,50 @@ def __init__(self, *args, **kwds): super(piecewise_dcc, self).__init__(*args, **kwds) # create index sets - polytopes = range(len(self.breakpoints)-1) + polytopes = range(len(self.breakpoints) - 1) vertices = range(len(self.breakpoints)) + def polytope_verts(p): - return range(p,p+2) + return range(p, p + 2) # create vars self.v = variable_dict() lmbda = self.v['lambda'] = variable_dict( - ((p,v), variable(lb=0)) - for p in polytopes - for v in vertices) + ((p, v), variable(lb=0)) for p in polytopes for v in vertices + ) y = self.v['y'] = variable_tuple( - variable(domain_type=IntegerSet, lb=0, ub=1) - for p in polytopes) + variable(domain_type=IntegerSet, lb=0, ub=1) for p in polytopes + ) # create piecewise constraints self.c = constraint_list() - self.c.append(linear_constraint( - variables=tuple(lmbda[p,v] - for p in polytopes - for v in polytope_verts(p)) + \ - (self.input,), - coefficients=tuple(self.breakpoints[v] - for p in polytopes - for v in polytope_verts(p)) + \ - (-1,), - rhs=0)) - - self.c.append(linear_constraint( - variables=tuple(lmbda[p,v] - for p in polytopes - for v in polytope_verts(p)) + \ - (self.output,), - coefficients=tuple(self.values[v] - for p in polytopes - for v in polytope_verts(p)) + (-1,))) + self.c.append( + linear_constraint( + variables=tuple( + lmbda[p, v] for p in polytopes for v in polytope_verts(p) + ) + + (self.input,), + coefficients=tuple( + self.breakpoints[v] for p in polytopes for v in polytope_verts(p) + ) + + (-1,), + rhs=0, + ) + ) + + self.c.append( + linear_constraint( + variables=tuple( + lmbda[p, v] for p in polytopes for v in polytope_verts(p) + ) + + (self.output,), + coefficients=tuple( + self.values[v] for p in polytopes for v in polytope_verts(p) + ) + + (-1,), + ) + ) if self.bound == 'ub': self.c[-1].lb = 0 elif self.bound == 'lb': @@ -723,18 +733,19 @@ def polytope_verts(p): clist = [] for p in polytopes: - variables = tuple(lmbda[p,v] for v in polytope_verts(p)) + variables = tuple(lmbda[p, v] for v in polytope_verts(p)) clist.append( linear_constraint( variables=variables + (y[p],), - coefficients=(1,)*len(variables) + (-1,), - rhs=0)) + coefficients=(1,) * len(variables) + (-1,), + rhs=0, + ) + ) self.c.append(constraint_tuple(clist)) - self.c.append(linear_constraint( - variables=tuple(y), - coefficients=(1,)*len(y), - rhs=1)) + self.c.append( + linear_constraint(variables=tuple(y), coefficients=(1,) * len(y), rhs=1) + ) def validate(self, **kwds): """ @@ -748,8 +759,10 @@ def validate(self, **kwds): """ return super(piecewise_dcc, self).validate(**kwds) + registered_transforms['dcc'] = piecewise_dcc + class piecewise_cc(TransformedPiecewiseLinearFunction): """Discrete CC piecewise representation @@ -761,37 +774,42 @@ def __init__(self, *args, **kwds): super(piecewise_cc, self).__init__(*args, **kwds) # create index sets - polytopes = range(len(self.breakpoints)-1) + polytopes = range(len(self.breakpoints) - 1) vertices = range(len(self.breakpoints)) + def vertex_polys(v): if v == 0: return [v] - if v == len(self.breakpoints)-1: - return [v-1] + if v == len(self.breakpoints) - 1: + return [v - 1] else: - return [v-1,v] + return [v - 1, v] # create vars self.v = variable_dict() - lmbda = self.v['lambda'] = variable_tuple( - variable(lb=0) for v in vertices) + lmbda = self.v['lambda'] = variable_tuple(variable(lb=0) for v in vertices) y = self.v['y'] = variable_tuple( - variable(domain_type=IntegerSet, lb=0, ub=1) - for p in polytopes) + variable(domain_type=IntegerSet, lb=0, ub=1) for p in polytopes + ) lmbda_tuple = tuple(lmbda) # create piecewise constraints self.c = constraint_list() - self.c.append(linear_constraint( - variables=lmbda_tuple + (self.input,), - coefficients=self.breakpoints + (-1,), - rhs=0)) - - self.c.append(linear_constraint( - variables=lmbda_tuple + (self.output,), - coefficients=self.values + (-1,))) + self.c.append( + linear_constraint( + variables=lmbda_tuple + (self.input,), + coefficients=self.breakpoints + (-1,), + rhs=0, + ) + ) + + self.c.append( + linear_constraint( + variables=lmbda_tuple + (self.output,), coefficients=self.values + (-1,) + ) + ) if self.bound == 'ub': self.c[-1].lb = 0 elif self.bound == 'lb': @@ -800,24 +818,27 @@ def vertex_polys(v): assert self.bound == 'eq' self.c[-1].rhs = 0 - self.c.append(linear_constraint( - variables=lmbda_tuple, - coefficients=(1,)*len(lmbda), - rhs=1)) + self.c.append( + linear_constraint( + variables=lmbda_tuple, coefficients=(1,) * len(lmbda), rhs=1 + ) + ) clist = [] for v in vertices: variables = tuple(y[p] for p in vertex_polys(v)) - clist.append(linear_constraint( - variables=variables + (lmbda[v],), - coefficients=(1,)*len(variables) + (-1,), - lb=0)) + clist.append( + linear_constraint( + variables=variables + (lmbda[v],), + coefficients=(1,) * len(variables) + (-1,), + lb=0, + ) + ) self.c.append(constraint_tuple(clist)) - self.c.append(linear_constraint( - variables=tuple(y), - coefficients=(1,)*len(y), - rhs=1)) + self.c.append( + linear_constraint(variables=tuple(y), coefficients=(1,) * len(y), rhs=1) + ) def validate(self, **kwds): """ @@ -831,8 +852,10 @@ def validate(self, **kwds): """ return super(piecewise_cc, self).validate(**kwds) + registered_transforms['cc'] = piecewise_cc + class piecewise_mc(TransformedPiecewiseLinearFunction): """Discrete MC piecewise representation @@ -844,38 +867,46 @@ def __init__(self, *args, **kwds): super(piecewise_mc, self).__init__(*args, **kwds) # create indexers - polytopes = range(len(self.breakpoints)-1) + polytopes = range(len(self.breakpoints) - 1) # create constants (using future division) # these might also be expressions if the breakpoints # or values lists contain mutable objects - slopes = tuple((self.values[p+1] - self.values[p]) / \ - (self.breakpoints[p+1] - self.breakpoints[p]) - for p in polytopes) - intercepts = tuple(self.values[p] - \ - (slopes[p] * self.breakpoints[p]) - for p in polytopes) + slopes = tuple( + (self.values[p + 1] - self.values[p]) + / (self.breakpoints[p + 1] - self.breakpoints[p]) + for p in polytopes + ) + intercepts = tuple( + self.values[p] - (slopes[p] * self.breakpoints[p]) for p in polytopes + ) # create vars self.v = variable_dict() - lmbda = self.v['lambda'] = variable_tuple( - variable() for p in polytopes) + lmbda = self.v['lambda'] = variable_tuple(variable() for p in polytopes) lmbda_tuple = tuple(lmbda) y = self.v['y'] = variable_tuple( - variable(domain_type=IntegerSet, lb=0, ub=1) for p in polytopes) + variable(domain_type=IntegerSet, lb=0, ub=1) for p in polytopes + ) y_tuple = tuple(y) # create piecewise constraints self.c = constraint_list() - self.c.append(linear_constraint( - variables=lmbda_tuple + (self.input,), - coefficients=(1,)*len(lmbda) + (-1,), - rhs=0)) - - self.c.append(linear_constraint( - variables=lmbda_tuple + y_tuple + (self.output,), - coefficients=slopes + intercepts + (-1,))) + self.c.append( + linear_constraint( + variables=lmbda_tuple + (self.input,), + coefficients=(1,) * len(lmbda) + (-1,), + rhs=0, + ) + ) + + self.c.append( + linear_constraint( + variables=lmbda_tuple + y_tuple + (self.output,), + coefficients=slopes + intercepts + (-1,), + ) + ) if self.bound == 'ub': self.c[-1].lb = 0 elif self.bound == 'lb': @@ -887,21 +918,26 @@ def __init__(self, *args, **kwds): clist1 = [] clist2 = [] for p in polytopes: - clist1.append(linear_constraint( - variables=(y[p], lmbda[p]), - coefficients=(self.breakpoints[p], -1), - ub=0)) - clist2.append(linear_constraint( - variables=(lmbda[p], y[p]), - coefficients=(1, -self.breakpoints[p+1]), - ub=0)) + clist1.append( + linear_constraint( + variables=(y[p], lmbda[p]), + coefficients=(self.breakpoints[p], -1), + ub=0, + ) + ) + clist2.append( + linear_constraint( + variables=(lmbda[p], y[p]), + coefficients=(1, -self.breakpoints[p + 1]), + ub=0, + ) + ) self.c.append(constraint_tuple(clist1)) self.c.append(constraint_tuple(clist2)) - self.c.append(linear_constraint( - variables=y_tuple, - coefficients=(1,)*len(y), - rhs=1)) + self.c.append( + linear_constraint(variables=y_tuple, coefficients=(1,) * len(y), rhs=1) + ) def validate(self, **kwds): """ @@ -917,12 +953,14 @@ def validate(self, **kwds): # this representation does not support step functions if ftype == characterize_function.step: raise PiecewiseValidationError( - "The 'mc' piecewise representation does " - "not support step functions.") + "The 'mc' piecewise representation does not support step functions." + ) return ftype + registered_transforms['mc'] = piecewise_mc + class piecewise_inc(TransformedPiecewiseLinearFunction): """Discrete INC piecewise representation @@ -934,34 +972,39 @@ def __init__(self, *args, **kwds): super(piecewise_inc, self).__init__(*args, **kwds) # create indexers - polytopes = range(len(self.breakpoints)-1) + polytopes = range(len(self.breakpoints) - 1) # create vars self.v = variable_dict() - delta = self.v['delta'] = variable_tuple( - variable() for p in polytopes) + delta = self.v['delta'] = variable_tuple(variable() for p in polytopes) delta[0].ub = 1 delta[-1].lb = 0 delta_tuple = tuple(delta) y = self.v['y'] = variable_tuple( - variable(domain_type=IntegerSet, lb=0, ub=1) - for p in polytopes[:-1]) + variable(domain_type=IntegerSet, lb=0, ub=1) for p in polytopes[:-1] + ) # create piecewise constraints self.c = constraint_list() - self.c.append(linear_constraint( - variables=(self.input,) + delta_tuple, - coefficients=(-1,) + tuple(self.breakpoints[p+1] - \ - self.breakpoints[p] - for p in polytopes), - rhs=-self.breakpoints[0])) - - self.c.append(linear_constraint( - variables=(self.output,) + delta_tuple, - coefficients=(-1,) + tuple(self.values[p+1] - \ - self.values[p] - for p in polytopes))) + self.c.append( + linear_constraint( + variables=(self.input,) + delta_tuple, + coefficients=(-1,) + + tuple( + self.breakpoints[p + 1] - self.breakpoints[p] for p in polytopes + ), + rhs=-self.breakpoints[0], + ) + ) + + self.c.append( + linear_constraint( + variables=(self.output,) + delta_tuple, + coefficients=(-1,) + + tuple(self.values[p + 1] - self.values[p] for p in polytopes), + ) + ) if self.bound == 'ub': self.c[-1].lb = -self.values[0] elif self.bound == 'lb': @@ -973,14 +1016,16 @@ def __init__(self, *args, **kwds): clist1 = [] clist2 = [] for p in polytopes[:-1]: - clist1.append(linear_constraint( - variables=(delta[p+1], y[p]), - coefficients=(1, -1), - ub=0)) - clist2.append(linear_constraint( - variables=(y[p], delta[p]), - coefficients=(1, -1), - ub=0)) + clist1.append( + linear_constraint( + variables=(delta[p + 1], y[p]), coefficients=(1, -1), ub=0 + ) + ) + clist2.append( + linear_constraint( + variables=(y[p], delta[p]), coefficients=(1, -1), ub=0 + ) + ) self.c.append(constraint_tuple(clist1)) self.c.append(constraint_tuple(clist2)) @@ -996,8 +1041,10 @@ def validate(self, **kwds): """ return super(piecewise_inc, self).validate(**kwds) + registered_transforms['inc'] = piecewise_inc + class piecewise_dlog(TransformedPiecewiseLinearFunction): """Discrete DLOG piecewise representation @@ -1012,51 +1059,55 @@ def __init__(self, *args, **kwds): breakpoints = self.breakpoints values = self.values - if not is_positive_power_of_two(len(breakpoints)-1): - raise ValueError("The list of breakpoints must be " - "of length (2^n)+1 for some positive " - "integer n. Invalid length: %s" - % (len(breakpoints))) + if not is_positive_power_of_two(len(breakpoints) - 1): + raise ValueError( + "The list of breakpoints must be " + "of length (2^n)+1 for some positive " + "integer n. Invalid length: %s" % (len(breakpoints)) + ) # create branching schemes - L = log2floor(len(breakpoints)-1) - assert 2**L == len(breakpoints)-1 + L = log2floor(len(breakpoints) - 1) + assert 2**L == len(breakpoints) - 1 B_LEFT, B_RIGHT = self._branching_scheme(L) # create indexers - polytopes = range(len(breakpoints)-1) + polytopes = range(len(breakpoints) - 1) vertices = range(len(breakpoints)) + def polytope_verts(p): - return range(p,p+2) + return range(p, p + 2) # create vars self.v = variable_dict() lmbda = self.v['lambda'] = variable_dict( - ((p,v), variable(lb=0)) - for p in polytopes - for v in polytope_verts(p)) + ((p, v), variable(lb=0)) for p in polytopes for v in polytope_verts(p) + ) y = self.v['y'] = variable_tuple( - variable(domain_type=IntegerSet, lb=0, ub=1) for i in range(L)) + variable(domain_type=IntegerSet, lb=0, ub=1) for i in range(L) + ) # create piecewise constraints self.c = constraint_list() - self.c.append(linear_constraint( - variables=(self.input,) + tuple(lmbda[p,v] - for p in polytopes - for v in polytope_verts(p)), - coefficients=(-1,) + tuple(breakpoints[v] - for p in polytopes - for v in polytope_verts(p)), - rhs=0)) - - self.c.append(linear_constraint( - variables=(self.output,) + tuple(lmbda[p,v] - for p in polytopes - for v in polytope_verts(p)), - coefficients=(-1,) + tuple(values[v] - for p in polytopes - for v in polytope_verts(p)))) + self.c.append( + linear_constraint( + variables=(self.input,) + + tuple(lmbda[p, v] for p in polytopes for v in polytope_verts(p)), + coefficients=(-1,) + + tuple(breakpoints[v] for p in polytopes for v in polytope_verts(p)), + rhs=0, + ) + ) + + self.c.append( + linear_constraint( + variables=(self.output,) + + tuple(lmbda[p, v] for p in polytopes for v in polytope_verts(p)), + coefficients=(-1,) + + tuple(values[v] for p in polytopes for v in polytope_verts(p)), + ) + ) if self.bound == 'ub': self.c[-1].lb = 0 elif self.bound == 'lb': @@ -1065,44 +1116,49 @@ def polytope_verts(p): assert self.bound == 'eq' self.c[-1].rhs = 0 - self.c.append(linear_constraint( - variables=tuple(lmbda.values()), - coefficients=(1,)*len(lmbda), - rhs=1)) + self.c.append( + linear_constraint( + variables=tuple(lmbda.values()), coefficients=(1,) * len(lmbda), rhs=1 + ) + ) clist = [] for i in range(L): - variables = tuple(lmbda[p,v] - for p in B_LEFT[i] - for v in polytope_verts(p)) - clist.append(linear_constraint( - variables=variables + (y[i],), - coefficients=(1,)*len(variables) + (-1,), - ub=0)) + variables = tuple(lmbda[p, v] for p in B_LEFT[i] for v in polytope_verts(p)) + clist.append( + linear_constraint( + variables=variables + (y[i],), + coefficients=(1,) * len(variables) + (-1,), + ub=0, + ) + ) self.c.append(constraint_tuple(clist)) del clist clist = [] for i in range(L): - variables = tuple(lmbda[p,v] - for p in B_RIGHT[i] - for v in polytope_verts(p)) - clist.append(linear_constraint( - variables=variables + (y[i],), - coefficients=(1,)*len(variables) + (1,), - ub=1)) + variables = tuple( + lmbda[p, v] for p in B_RIGHT[i] for v in polytope_verts(p) + ) + clist.append( + linear_constraint( + variables=variables + (y[i],), + coefficients=(1,) * len(variables) + (1,), + ub=1, + ) + ) self.c.append(constraint_tuple(clist)) def _branching_scheme(self, L): N = 2**L B_LEFT = [] - for i in range(1,L+1): + for i in range(1, L + 1): start = 1 - step = N//(2**i) + step = N // (2**i) tmp = [] while start < N: - tmp.extend(j-1 for j in range(start,start+step)) - start += 2*step + tmp.extend(j - 1 for j in range(start, start + step)) + start += 2 * step B_LEFT.append(tmp) biglist = range(N) @@ -1128,8 +1184,10 @@ def validate(self, **kwds): """ return super(piecewise_dlog, self).validate(**kwds) + registered_transforms['dlog'] = piecewise_dlog + class piecewise_log(TransformedPiecewiseLinearFunction): """Discrete LOG piecewise representation @@ -1144,15 +1202,16 @@ def __init__(self, *args, **kwds): breakpoints = self.breakpoints values = self.values - if not is_positive_power_of_two(len(breakpoints)-1): - raise ValueError("The list of breakpoints must be " - "of length (2^n)+1 for some positive " - "integer n. Invalid length: %s" - % (len(breakpoints))) + if not is_positive_power_of_two(len(breakpoints) - 1): + raise ValueError( + "The list of breakpoints must be " + "of length (2^n)+1 for some positive " + "integer n. Invalid length: %s" % (len(breakpoints)) + ) # create branching schemes - L = log2floor(len(breakpoints)-1) - S,B_LEFT,B_RIGHT = self._branching_scheme(L) + L = log2floor(len(breakpoints) - 1) + S, B_LEFT, B_RIGHT = self._branching_scheme(L) # create indexers polytopes = range(len(breakpoints) - 1) @@ -1160,22 +1219,27 @@ def __init__(self, *args, **kwds): # create vars self.v = variable_dict() - lmbda = self.v['lambda'] = variable_tuple( - variable(lb=0) for v in vertices) + lmbda = self.v['lambda'] = variable_tuple(variable(lb=0) for v in vertices) y = self.v['y'] = variable_list( - variable(domain_type=IntegerSet, lb=0, ub=1) for s in S) + variable(domain_type=IntegerSet, lb=0, ub=1) for s in S + ) # create piecewise constraints self.c = constraint_list() - self.c.append(linear_constraint( - variables=(self.input,) + tuple(lmbda), - coefficients=(-1,) + breakpoints, - rhs=0)) - - self.c.append(linear_constraint( - variables=(self.output,) + tuple(lmbda), - coefficients=(-1,) + values)) + self.c.append( + linear_constraint( + variables=(self.input,) + tuple(lmbda), + coefficients=(-1,) + breakpoints, + rhs=0, + ) + ) + + self.c.append( + linear_constraint( + variables=(self.output,) + tuple(lmbda), coefficients=(-1,) + values + ) + ) if self.bound == 'ub': self.c[-1].lb = 0 elif self.bound == 'lb': @@ -1184,40 +1248,57 @@ def __init__(self, *args, **kwds): assert self.bound == 'eq' self.c[-1].rhs = 0 - self.c.append(linear_constraint( - variables=tuple(lmbda), - coefficients=(1,)*len(lmbda), - rhs=1)) + self.c.append( + linear_constraint( + variables=tuple(lmbda), coefficients=(1,) * len(lmbda), rhs=1 + ) + ) clist = [] for s in S: - variables=tuple(lmbda[v] for v in B_LEFT[s]) - clist.append(linear_constraint( - variables=variables + (y[s],), - coefficients=(1,)*len(variables) + (-1,), - ub=0)) + variables = tuple(lmbda[v] for v in B_LEFT[s]) + clist.append( + linear_constraint( + variables=variables + (y[s],), + coefficients=(1,) * len(variables) + (-1,), + ub=0, + ) + ) self.c.append(constraint_tuple(clist)) del clist clist = [] for s in S: - variables=tuple(lmbda[v] for v in B_RIGHT[s]) - clist.append(linear_constraint( - variables=variables + (y[s],), - coefficients=(1,)*len(variables) + (1,), - ub=1)) + variables = tuple(lmbda[v] for v in B_RIGHT[s]) + clist.append( + linear_constraint( + variables=variables + (y[s],), + coefficients=(1,) * len(variables) + (1,), + ub=1, + ) + ) self.c.append(constraint_tuple(clist)) def _branching_scheme(self, n): N = 2**n S = range(n) G = generate_gray_code(n) - L = tuple([k for k in range(N+1) - if ((k == 0) or (G[k-1][s] == 1)) - and ((k == N) or (G[k][s] == 1))] for s in S) - R = tuple([k for k in range(N+1) - if ((k == 0) or (G[k-1][s] == 0)) - and ((k == N) or (G[k][s] == 0))] for s in S) + L = tuple( + [ + k + for k in range(N + 1) + if ((k == 0) or (G[k - 1][s] == 1)) and ((k == N) or (G[k][s] == 1)) + ] + for s in S + ) + R = tuple( + [ + k + for k in range(N + 1) + if ((k == 0) or (G[k - 1][s] == 0)) and ((k == N) or (G[k][s] == 0)) + ] + for s in S + ) return S, L, R def validate(self, **kwds): @@ -1232,4 +1313,5 @@ def validate(self, **kwds): """ return super(piecewise_log, self).validate(**kwds) + registered_transforms['log'] = piecewise_log diff --git a/pyomo/core/kernel/piecewise_library/transforms_nd.py b/pyomo/core/kernel/piecewise_library/transforms_nd.py index 150c6a88483..f1ea67e8d4b 100644 --- a/pyomo/core/kernel/piecewise_library/transforms_nd.py +++ b/pyomo/core/kernel/piecewise_library/transforms_nd.py @@ -12,10 +12,10 @@ """ This module contains transformations for representing a multi-variate piecewise linear function using a -mixed-interger problem formulation. Reference:: +mixed-integer problem formulation. Reference:: - Mixed-Integer Models for Non-separable Piecewise Linear \ -Optimization: Unifying framework and Extensions (Vielma, \ + Mixed-Integer Models for Non-separable Piecewise Linear +Optimization: Unifying framework and Extensions (Vielma, Nemhauser 2008) """ @@ -24,26 +24,21 @@ from pyomo.core.kernel.block import block from pyomo.core.kernel.set_types import IntegerSet -from pyomo.core.kernel.variable import (variable, - variable_dict, - variable_tuple) -from pyomo.core.kernel.constraint import (linear_constraint, - constraint_list, - constraint_tuple) -from pyomo.core.kernel.expression import (expression, - expression_tuple) +from pyomo.core.kernel.variable import variable, variable_dict, variable_tuple +from pyomo.core.kernel.constraint import ( + linear_constraint, + constraint_list, + constraint_tuple, +) +from pyomo.core.kernel.expression import expression, expression_tuple import pyomo.core.kernel.piecewise_library.util logger = logging.getLogger('pyomo.core') registered_transforms = {} -def piecewise_nd(tri, - values, - input=None, - output=None, - bound='eq', - repn='cc'): + +def piecewise_nd(tri, values, input=None, output=None, bound='eq', repn='cc'): """ Models a multi-variate piecewise linear function. @@ -89,9 +84,9 @@ def piecewise_nd(tri, - 'cc': convex combination Returns: - TransformedPiecewiseLinearFunctionND: a block \ - containing any new variables, constraints, and \ - other components used by the piecewise \ + TransformedPiecewiseLinearFunctionND: a block + containing any new variables, constraints, and + other components used by the piecewise representation """ transform = None @@ -100,18 +95,14 @@ def piecewise_nd(tri, except KeyError: raise ValueError( "Keyword assignment repn='%s' is not valid. " - "Must be one of: %s" - % (repn, - str(sorted(registered_transforms.keys())))) + "Must be one of: %s" % (repn, str(sorted(registered_transforms.keys()))) + ) assert transform is not None - func = PiecewiseLinearFunctionND(tri, - values) + func = PiecewiseLinearFunctionND(tri, values) + + return transform(func, input=input, output=output, bound=bound) - return transform(func, - input=input, - output=output, - bound=bound) class PiecewiseLinearFunctionND(object): """A multi-variate piecewise linear function @@ -140,21 +131,18 @@ class PiecewiseLinearFunctionND(object): the values of the piecewise function at each of coordinates in the triangulation points array. """ + __slots__ = ("_tri", "_values") - def __init__(self, - tri, - values, - validate=True, - **kwds): + def __init__(self, tri, values, validate=True, **kwds): assert pyomo.core.kernel.piecewise_library.util.numpy_available assert pyomo.core.kernel.piecewise_library.util.scipy_available - assert isinstance(tri, - pyomo.core.kernel.piecewise_library.\ - util.scipy.spatial.Delaunay) - assert isinstance(values, - pyomo.core.kernel.piecewise_library.\ - util.numpy.ndarray) + assert isinstance( + tri, pyomo.core.kernel.piecewise_library.util.scipy.spatial.Delaunay + ) + assert isinstance( + values, pyomo.core.kernel.piecewise_library.util.numpy.ndarray + ) npoints, ndim = tri.points.shape nsimplices, _ = tri.simplices.shape assert tri.simplices.shape[1] == ndim + 1 @@ -167,7 +155,7 @@ def __init__(self, def __getstate__(self): """Required for older versions of the pickle protocol since this class uses __slots__""" - return {key:getattr(self, key) for key in self.__slots__} + return {key: getattr(self, key) for key in self.__slots__} def __setstate__(self, state): """Required for older versions of the pickle @@ -201,12 +189,13 @@ def __call__(self, x): a (n,D)-shaped numpy array. """ assert isinstance(x, Sized) - if isinstance(x, pyomo.core.kernel.piecewise_library.\ - util.numpy.ndarray): + if isinstance(x, pyomo.core.kernel.piecewise_library.util.numpy.ndarray): if x.shape != self._tri.points.shape[1:]: multi = True - assert x.shape[1:] == self._tri.points[0].shape, \ - "%s[1] != %s" % (x.shape, self._tri.points[0].shape) + assert x.shape[1:] == self._tri.points[0].shape, "%s[1] != %s" % ( + x.shape, + self._tri.points[0].shape, + ) else: multi = False else: @@ -214,22 +203,24 @@ def __call__(self, x): _, ndim = self._tri.points.shape i = self._tri.find_simplex(x) if multi: - Tinv = self._tri.transform[i,:ndim] - r = self._tri.transform[i,ndim] - b = pyomo.core.kernel.piecewise_library.util.\ - numpy.einsum('ijk,ik->ij', Tinv, x-r) - b = pyomo.core.kernel.piecewise_library.util.\ - numpy.c_[b, 1 - b.sum(axis=1)] + Tinv = self._tri.transform[i, :ndim] + r = self._tri.transform[i, ndim] + b = pyomo.core.kernel.piecewise_library.util.numpy.einsum( + 'ijk,ik->ij', Tinv, x - r + ) + b = pyomo.core.kernel.piecewise_library.util.numpy.c_[b, 1 - b.sum(axis=1)] s = self._tri.simplices[i] - return (b*self._values[s]).sum(axis=1) + return (b * self._values[s]).sum(axis=1) else: - b = self._tri.transform[i,:ndim,:ndim].dot( - x - self._tri.transform[i,ndim,:]) + b = self._tri.transform[i, :ndim, :ndim].dot( + x - self._tri.transform[i, ndim, :] + ) s = self._tri.simplices[i] val = b.dot(self._values[s[:ndim]]) - val += (1-b.sum())*self._values[s[ndim]] + val += (1 - b.sum()) * self._values[s[ndim]] return val + class TransformedPiecewiseLinearFunctionND(block): """Base class for transformed multi-variate piecewise linear functions @@ -255,24 +246,19 @@ class TransformedPiecewiseLinearFunctionND(block): - 'ub': y >= f(x) """ - def __init__(self, - f, - input=None, - output=None, - bound='eq'): + def __init__(self, f, input=None, output=None, bound='eq'): super(TransformedPiecewiseLinearFunctionND, self).__init__() assert isinstance(f, PiecewiseLinearFunctionND) if bound not in ('lb', 'ub', 'eq'): - raise ValueError("Invalid bound type %r. Must be " - "one of: ['lb','ub','eq']" - % (bound)) + raise ValueError( + "Invalid bound type %r. Must be one of: ['lb','ub','eq']" % (bound) + ) self._bound = bound self._f = f - _,ndim = f._tri.points.shape + _, ndim = f._tri.points.shape if input is None: - input = [None]*ndim - self._input = expression_tuple( - expression(input[i]) for i in range(ndim)) + input = [None] * ndim + self._input = expression_tuple(expression(input[i]) for i in range(ndim)) self._output = expression(output) @property @@ -324,6 +310,7 @@ def __call__(self, x): """ return self._f(x) + class piecewise_nd_cc(TransformedPiecewiseLinearFunctionND): """Discrete CC multi-variate piecewise representation @@ -346,10 +333,10 @@ def __init__(self, *args, **kwds): # create vars self.v = variable_dict() - lmbda = self.v['lambda'] = variable_tuple( - variable(lb=0) for v in vertices) + lmbda = self.v['lambda'] = variable_tuple(variable(lb=0) for v in vertices) y = self.v['y'] = variable_tuple( - variable(domain_type=IntegerSet, lb=0, ub=1) for s in simplices) + variable(domain_type=IntegerSet, lb=0, ub=1) for s in simplices + ) lmbda_tuple = tuple(lmbda) # create constraints @@ -357,16 +344,22 @@ def __init__(self, *args, **kwds): clist = [] for d in dimensions: - clist.append(linear_constraint( - variables=lmbda_tuple + (self.input[d],), - coefficients=tuple(pointsT[d]) + (-1,), - rhs=0)) + clist.append( + linear_constraint( + variables=lmbda_tuple + (self.input[d],), + coefficients=tuple(pointsT[d]) + (-1,), + rhs=0, + ) + ) self.c.append(constraint_tuple(clist)) del clist - self.c.append(linear_constraint( - variables=lmbda_tuple + (self.output,), - coefficients=tuple(self.values) + (-1,))) + self.c.append( + linear_constraint( + variables=lmbda_tuple + (self.output,), + coefficients=tuple(self.values) + (-1,), + ) + ) if self.bound == 'ub': self.c[-1].lb = 0 elif self.bound == 'lb': @@ -375,10 +368,11 @@ def __init__(self, *args, **kwds): assert self.bound == 'eq' self.c[-1].rhs = 0 - self.c.append(linear_constraint( - variables=lmbda_tuple, - coefficients=(1,)*len(lmbda_tuple), - rhs=1)) + self.c.append( + linear_constraint( + variables=lmbda_tuple, coefficients=(1,) * len(lmbda_tuple), rhs=1 + ) + ) # generate a map from vertex index to simplex index, # which avoids an n^2 lookup when generating the @@ -391,16 +385,17 @@ def __init__(self, *args, **kwds): clist = [] for v in vertices: variables = tuple(y[s] for s in vertex_to_simplex[v]) - clist.append(linear_constraint( - variables=variables + (lmbda[v],), - coefficients=(1,)*len(variables) + (-1,), - lb=0)) + clist.append( + linear_constraint( + variables=variables + (lmbda[v],), + coefficients=(1,) * len(variables) + (-1,), + lb=0, + ) + ) self.c.append(constraint_tuple(clist)) del clist - self.c.append(linear_constraint( - variables=y, - coefficients=(1,)*len(y), - rhs=1)) + self.c.append(linear_constraint(variables=y, coefficients=(1,) * len(y), rhs=1)) + registered_transforms['cc'] = piecewise_nd_cc diff --git a/pyomo/core/kernel/piecewise_library/util.py b/pyomo/core/kernel/piecewise_library/util.py index 9a0a6056a82..e65502b1a12 100644 --- a/pyomo/core/kernel/piecewise_library/util.py +++ b/pyomo/core/kernel/piecewise_library/util.py @@ -12,14 +12,14 @@ import operator import itertools -from pyomo.common.dependencies import ( - numpy, numpy_available, scipy, scipy_available -) +from pyomo.common.dependencies import numpy, numpy_available, scipy, scipy_available + class PiecewiseValidationError(Exception): """An exception raised when validation of piecewise linear functions fail.""" + def is_constant(vals): """Checks if a list of points is constant""" if len(vals) <= 1: @@ -27,7 +27,8 @@ def is_constant(vals): it = iter(vals) next(it) op = operator.eq - return all(itertools.starmap(op, zip(it,vals))) + return all(itertools.starmap(op, zip(it, vals))) + def is_nondecreasing(vals): """Checks if a list of points is nondecreasing""" @@ -36,7 +37,8 @@ def is_nondecreasing(vals): it = iter(vals) next(it) op = operator.ge - return all(itertools.starmap(op, zip(it,vals))) + return all(itertools.starmap(op, zip(it, vals))) + def is_nonincreasing(vals): """Checks if a list of points is nonincreasing""" @@ -45,14 +47,16 @@ def is_nonincreasing(vals): it = iter(vals) next(it) op = operator.le - return all(itertools.starmap(op, zip(it,vals))) + return all(itertools.starmap(op, zip(it, vals))) + def is_positive_power_of_two(x): """Checks if a number is a nonzero and positive power of 2""" - if (x <= 0): + if x <= 0: return False else: - return ( (x & (x - 1)) == 0 ) + return (x & (x - 1)) == 0 + def log2floor(n): """Computes the exact value of floor(log2(n)) without @@ -61,25 +65,27 @@ def log2floor(n): assert n > 0 return n.bit_length() - 1 + def generate_gray_code(nbits): """Generates a Gray code of nbits as list of lists""" bitset = [0 for i in range(nbits)] # important that we copy bitset each time graycode = [list(bitset)] - for i in range(2,(1<= 1 @@ -89,11 +90,17 @@ def __init__(self, variables, weights=None, level=1): # @property - def variables(self): return self._variables + def variables(self): + return self._variables + @property - def weights(self): return self._weights + def weights(self): + return self._weights + @property - def level(self): return self._level + def level(self): + return self._level + def sos1(variables, weights=None): """A Special Ordered Set of type 1. @@ -101,6 +108,7 @@ def sos1(variables, weights=None): This is an alias for sos(..., level=1)""" return sos(variables, weights=weights, level=1) + def sos2(variables, weights=None): """A Special Ordered Set of type 2. @@ -108,8 +116,7 @@ def sos2(variables, weights=None): """ return sos(variables, weights=weights, level=2) + # inserts class definitions for simple _tuple, _list, and # _dict containers into this module -define_simple_containers(globals(), - "sos", - ISOS) +define_simple_containers(globals(), "sos", ISOS) diff --git a/pyomo/core/kernel/suffix.py b/pyomo/core/kernel/suffix.py index 9498dd5bf10..0416c1269f9 100644 --- a/pyomo/core/kernel/suffix.py +++ b/pyomo/core/kernel/suffix.py @@ -13,18 +13,15 @@ from pyomo.common.collections import ComponentMap from pyomo.common.deprecation import deprecated -from pyomo.core.kernel.base import ( - ICategorizedObject, _abstract_readonly_property -) +from pyomo.core.kernel.base import ICategorizedObject, _abstract_readonly_property from pyomo.core.kernel.dict_container import DictContainer -from pyomo.core.kernel.container_utils import ( - define_homogeneous_container_type -) +from pyomo.core.kernel.container_utils import define_homogeneous_container_type logger = logging.getLogger('pyomo.core') _noarg = object() + # Note: ComponentMap is first in the inheritance chain # because its __getstate__ / __setstate__ methods # contain some special hacks that allow it to be used @@ -32,9 +29,9 @@ # temporary). As a result, we need to override the # __str__ method on this class so that suffix behaves # like ICategorizedObject instead of ComponentMap -class ISuffix(ComponentMap, - ICategorizedObject): +class ISuffix(ComponentMap, ICategorizedObject): """The interface for suffixes.""" + __slots__ = () # @@ -43,10 +40,8 @@ class ISuffix(ComponentMap, # by overriding the @property method # - direction = _abstract_readonly_property( - doc="The suffix direction") - datatype = _abstract_readonly_property( - doc="The suffix datatype") + direction = _abstract_readonly_property(doc="The suffix direction") + datatype = _abstract_readonly_property(doc="The suffix datatype") # # Interface @@ -55,19 +50,23 @@ class ISuffix(ComponentMap, def __str__(self): return ICategorizedObject.__str__(self) + class suffix(ISuffix): """A container for storing extraneous model data that can be imported to or exported from a solver.""" + _ctype = ISuffix - __slots__ = ("_parent", - "_storage_key", - "_active", - "_direction", - "_datatype", - "__weakref__") + __slots__ = ( + "_parent", + "_storage_key", + "_active", + "_direction", + "_datatype", + "__weakref__", + ) # neither sent to solver or received from solver - LOCAL = 0 + LOCAL = 0 # sent to solver or other external location EXPORT = 1 # obtained from solver or other external source @@ -75,17 +74,17 @@ class suffix(ISuffix): # both IMPORT_EXPORT = 3 - _directions = {LOCAL: 'suffix.LOCAL', - EXPORT: 'suffix.EXPORT', - IMPORT: 'suffix.IMPORT', - IMPORT_EXPORT: 'suffix.IMPORT_EXPORT'} + _directions = { + LOCAL: 'suffix.LOCAL', + EXPORT: 'suffix.EXPORT', + IMPORT: 'suffix.IMPORT', + IMPORT_EXPORT: 'suffix.IMPORT_EXPORT', + } # datatypes (numbers are compatible with ASL bitcodes) FLOAT = 4 INT = 0 - _datatypes = {FLOAT: 'suffix.FLOAT', - INT: 'suffix.INT', - None: str(None)} + _datatypes = {FLOAT: 'suffix.FLOAT', INT: 'suffix.INT', None: str(None)} def __init__(self, *args, **kwds): self._parent = None @@ -119,75 +118,85 @@ def import_enabled(self): def datatype(self): """Return the suffix datatype.""" return self._datatype + @datatype.setter def datatype(self, datatype): """Set the suffix datatype.""" if datatype not in self._datatypes: raise ValueError( "Suffix datatype must be one of: %s. \n" - "Value given: %s" - % (list(self._datatypes.values()), - datatype)) + "Value given: %s" % (list(self._datatypes.values()), datatype) + ) self._datatype = datatype @property def direction(self): """Return the suffix direction.""" return self._direction + @direction.setter def direction(self, direction): """Set the suffix direction.""" if not direction in self._directions: raise ValueError( "Suffix direction must be one of: %s. \n" - "Value given: %s" - % (list(self._directions.values()), - direction)) + "Value given: %s" % (list(self._directions.values()), direction) + ) self._direction = direction # # Methods that are deprecated # - @deprecated("suffix.set_all_values will be removed in the future.", - version='5.3') + @deprecated("suffix.set_all_values will be removed in the future.", version='5.3') def set_all_values(self, value): for ndx in self: self[ndx] = value - @deprecated("suffix.clear_value will be removed in the future. " - "Use 'del suffix[key]' instead.", version='5.3') + @deprecated( + "suffix.clear_value will be removed in the future. " + "Use 'del suffix[key]' instead.", + version='5.3', + ) def clear_value(self, component): try: del self[component] except KeyError: pass - @deprecated("suffix.clear_all_values is replaced with suffix.clear", - version='5.3') + @deprecated("suffix.clear_all_values is replaced with suffix.clear", version='5.3') def clear_all_values(self): self.clear() - @deprecated("suffix.get_datatype is replaced with the property " - "suffix.datatype", version='5.3') + @deprecated( + "suffix.get_datatype is replaced with the property suffix.datatype", + version='5.3', + ) def get_datatype(self): return self.datatype - @deprecated("suffix.set_datatype is replaced with the property " - "setter suffix.datatype", version='5.3') + @deprecated( + "suffix.set_datatype is replaced with the property setter suffix.datatype", + version='5.3', + ) def set_datatype(self, datatype): self.datatype = datatype - @deprecated("suffix.get_direction is replaced with the property " - "suffix.direction", version='5.3') + @deprecated( + "suffix.get_direction is replaced with the property suffix.direction", + version='5.3', + ) def get_direction(self): return self.direction - @deprecated("suffix.set_direction is replaced with the property " - "setter suffix.direction", version='5.3') + @deprecated( + "suffix.set_direction is replaced with the property setter suffix.direction", + version='5.3', + ) def set_direction(self, direction): self.direction = direction + # A list of convenient suffix generators, including: # - export_suffix_generator # **(used by problem writers) @@ -196,10 +205,8 @@ def set_direction(self, direction): # - local_suffix_generator # - suffix_generator -def export_suffix_generator(blk, - datatype=_noarg, - active=True, - descend_into=True): + +def export_suffix_generator(blk, datatype=_noarg, active=True, descend_into=True): """ Generates an efficient traversal of all suffixes that have been declared for exporting data. @@ -224,18 +231,16 @@ def export_suffix_generator(blk, Returns: iterator of suffixes """ - for suf in filter(lambda x: (x.export_enabled and \ - ((datatype is _noarg) or \ - (x.datatype is datatype))), - blk.components(ctype=suffix._ctype, - active=active, - descend_into=descend_into)): + for suf in filter( + lambda x: ( + x.export_enabled and ((datatype is _noarg) or (x.datatype is datatype)) + ), + blk.components(ctype=suffix._ctype, active=active, descend_into=descend_into), + ): yield suf -def import_suffix_generator(blk, - datatype=_noarg, - active=True, - descend_into=True): + +def import_suffix_generator(blk, datatype=_noarg, active=True, descend_into=True): """ Generates an efficient traversal of all suffixes that have been declared for importing data. @@ -260,18 +265,16 @@ def import_suffix_generator(blk, Returns: iterator of suffixes """ - for suf in filter(lambda x: (x.import_enabled and \ - ((datatype is _noarg) or \ - (x.datatype is datatype))), - blk.components(ctype=suffix._ctype, - active=active, - descend_into=descend_into)): + for suf in filter( + lambda x: ( + x.import_enabled and ((datatype is _noarg) or (x.datatype is datatype)) + ), + blk.components(ctype=suffix._ctype, active=active, descend_into=descend_into), + ): yield suf -def local_suffix_generator(blk, - datatype=_noarg, - active=True, - descend_into=True): + +def local_suffix_generator(blk, datatype=_noarg, active=True, descend_into=True): """ Generates an efficient traversal of all suffixes that have been declared local data storage. @@ -296,18 +299,17 @@ def local_suffix_generator(blk, Returns: iterator of suffixes """ - for suf in filter(lambda x: (x.direction is suffix.LOCAL and \ - ((datatype is _noarg) or \ - (x.datatype is datatype))), - blk.components(ctype=suffix._ctype, - active=active, - descend_into=descend_into)): + for suf in filter( + lambda x: ( + x.direction is suffix.LOCAL + and ((datatype is _noarg) or (x.datatype is datatype)) + ), + blk.components(ctype=suffix._ctype, active=active, descend_into=descend_into), + ): yield suf -def suffix_generator(blk, - datatype=_noarg, - active=True, - descend_into=True): + +def suffix_generator(blk, datatype=_noarg, active=True, descend_into=True): """ Generates an efficient traversal of all suffixes that have been declared. @@ -332,13 +334,13 @@ def suffix_generator(blk, Returns: iterator of suffixes """ - for suf in filter(lambda x: ((datatype is _noarg) or \ - (x.datatype is datatype)), - blk.components(ctype=suffix._ctype, - active=active, - descend_into=descend_into)): + for suf in filter( + lambda x: ((datatype is _noarg) or (x.datatype is datatype)), + blk.components(ctype=suffix._ctype, active=active, descend_into=descend_into), + ): yield suf + # inserts class definition for simple a # simple suffix_dict into this module define_homogeneous_container_type( @@ -346,6 +348,6 @@ def suffix_generator(blk, "suffix_dict", DictContainer, ISuffix, - doc=("A dict-style container for objects " - "with category type "+ISuffix.__name__), - use_slots=True) + doc=("A dict-style container for objects with category type " + ISuffix.__name__), + use_slots=True, +) diff --git a/pyomo/core/kernel/tuple_container.py b/pyomo/core/kernel/tuple_container.py index b9a084d0e6d..f717fe0350a 100644 --- a/pyomo/core/kernel/tuple_container.py +++ b/pyomo/core/kernel/tuple_container.py @@ -11,12 +11,10 @@ import collections.abc -from pyomo.core.kernel.homogeneous_container import \ - IHomogeneousContainer +from pyomo.core.kernel.homogeneous_container import IHomogeneousContainer -class TupleContainer(IHomogeneousContainer, - collections.abc.Sequence): +class TupleContainer(IHomogeneousContainer, collections.abc.Sequence): """ A partial implementation of the IHomogeneousContainer interface that provides tuple-like storage functionality. @@ -30,6 +28,7 @@ class TupleContainer(IHomogeneousContainer, other ICategorizedObjectContainer implementations that are defined with the same ctype. """ + __slots__ = () _child_storage_delimiter_string = "" _child_storage_entry_string = "[%s]" @@ -44,8 +43,8 @@ def _init(self, args): if len(args) > 1: raise TypeError( "%s expected at most 1 arguments, " - "got %s" % (self.__class__.__name__, - len(args))) + "got %s" % (self.__class__.__name__, len(args)) + ) for item in args[0]: self._insert(len(self), item) @@ -64,17 +63,14 @@ def _insert(self, i, item): "Invalid assignment to type %s with index %s. " "A parent container has already been " "assigned to the object being inserted: %s" - % (self.__class__.__name__, - i, - item.parent.name)) + % (self.__class__.__name__, i, item.parent.name) + ) else: raise TypeError( "Invalid assignment to type %s with index %s. " "The object being inserted has the wrong " - "category type: %s" - % (self.__class__.__name__, - i, - item.ctype)) + "category type: %s" % (self.__class__.__name__, i, item.ctype) + ) # # Define the ICategorizedObjectContainer abstract methods @@ -116,13 +112,11 @@ def __len__(self): # Convert both objects to a plain tuple of (type(val), # id(val)) tuples and compare that instead. def __eq__(self, other): - if not isinstance(other, (collections.abc.Set, - collections.abc.Sequence)): + if not isinstance(other, (collections.abc.Set, collections.abc.Sequence)): return False - return tuple((type(val), id(val)) - for val in self) == \ - tuple((type(val), id(val)) - for val in other) + return tuple((type(val), id(val)) for val in self) == tuple( + (type(val), id(val)) for val in other + ) def __ne__(self, other): return not (self == other) @@ -143,7 +137,7 @@ def __contains__(self, item): def index(self, item, start=0, stop=None): """S.index(value, [start, [stop]]) -> integer -- return first index of value. - Raises ValueError if the value is not present. + Raises ValueError if the value is not present. """ if start is not None and start < 0: start = max(len(self) + start, 0) diff --git a/pyomo/core/kernel/variable.py b/pyomo/core/kernel/variable.py index 1edeb332ed4..75f3118620c 100644 --- a/pyomo/core/kernel/variable.py +++ b/pyomo/core/kernel/variable.py @@ -10,30 +10,24 @@ # ___________________________________________________________________________ from pyomo.common.modeling import NoArgumentGiven from pyomo.core.staleflag import StaleFlagManager -from pyomo.core.expr.numvalue import (NumericValue, - is_numeric_data, - value) -from pyomo.core.kernel.base import \ - (ICategorizedObject, - _abstract_readwrite_property) -from pyomo.core.kernel.container_utils import \ - define_simple_containers -from pyomo.core.kernel.set_types import (RealSet, - IntegerSet) +from pyomo.core.expr.numvalue import NumericValue, is_numeric_data, value +from pyomo.core.kernel.base import ICategorizedObject, _abstract_readwrite_property +from pyomo.core.kernel.container_utils import define_simple_containers +from pyomo.core.kernel.set_types import RealSet, IntegerSet _pos_inf = float('inf') _neg_inf = float('-inf') -def _extract_domain_type_and_bounds(domain_type, - domain, - lb, ub): + +def _extract_domain_type_and_bounds(domain_type, domain, lb, ub): if domain is not None: if domain_type is not None: raise ValueError( "At most one of the 'domain' and " "'domain_type' keywords can be changed " "from their default value when " - "initializing a variable.") + "initializing a variable." + ) domain_lb, domain_ub, domain_step = domain.get_interval() if domain_step == 0: domain_type = RealSet @@ -45,28 +39,31 @@ def _extract_domain_type_and_bounds(domain_type, raise ValueError( "The 'lb' keyword can not be used " "to initialize a variable when the " - "domain lower bound is finite.") + "domain lower bound is finite." + ) lb = domain_lb if domain_ub is not None: if ub is not None: raise ValueError( "The 'ub' keyword can not be used " "to initialize a variable when the " - "domain upper bound is finite.") + "domain upper bound is finite." + ) ub = domain_ub elif domain_type is None: domain_type = RealSet if domain_type not in IVariable._valid_domain_types: raise ValueError( "Domain type '%s' is not valid. Must be " - "one of: %s" % (domain_type, - IVariable._valid_domain_types)) + "one of: %s" % (domain_type, IVariable._valid_domain_types) + ) return domain_type, lb, ub class IVariable(ICategorizedObject, NumericValue): """The interface for decision variables""" + __slots__ = () _valid_domain_types = (RealSet, IntegerSet) @@ -78,18 +75,16 @@ class IVariable(ICategorizedObject, NumericValue): # domain_type = _abstract_readwrite_property( - doc=("The domain type of the variable " - "(:class:`RealSet` or :class:`IntegerSet`)")) - lb = _abstract_readwrite_property( - doc="The lower bound of the variable") - ub = _abstract_readwrite_property( - doc="The upper bound of the variable") - value = _abstract_readwrite_property( - doc="The value of the variable") - fixed = _abstract_readwrite_property( - doc="The fixed status of the variable") - stale = _abstract_readwrite_property( - doc="The stale status of the variable") + doc=( + "The domain type of the variable " + "(:class:`RealSet` or :class:`IntegerSet`)" + ) + ) + lb = _abstract_readwrite_property(doc="The lower bound of the variable") + ub = _abstract_readwrite_property(doc="The upper bound of the variable") + value = _abstract_readwrite_property(doc="The value of the variable") + fixed = _abstract_readwrite_property(doc="The fixed status of the variable") + stale = _abstract_readwrite_property(doc="The stale status of the variable") # # Interface @@ -99,6 +94,7 @@ class IVariable(ICategorizedObject, NumericValue): def bounds(self): """Get/Set the bounds as a tuple (lb, ub).""" return (self.lb, self.ub) + @bounds.setter def bounds(self, bounds_tuple): self.lower, self.upper = bounds_tuple @@ -107,6 +103,7 @@ def bounds(self, bounds_tuple): def lb(self): """Return the numeric value of the variable lower bound.""" return value(self.lower) + @lb.setter def lb(self, val): self.lower = val @@ -115,6 +112,7 @@ def lb(self, val): def ub(self): """Return the numeric value of the variable upper bound.""" return value(self.upper) + @ub.setter def ub(self, val): self.upper = val @@ -135,7 +133,7 @@ def unfix(self): :const:`False`.""" self.fixed = False - free=unfix + free = unfix def has_lb(self): """Returns :const:`False` when the lower bound is @@ -211,19 +209,22 @@ def is_binary(self): """Returns :const:`True` when the domain type is :class:`IntegerSet` and the bounds are within [0,1].""" - return self.domain_type.get_interval()[2] == 1 \ - and (self.lb, self.ub) in {(0,1), (0,0), (1,1)} - -# TODO? -# def is_semicontinuous(self): -# """Returns :const:`True` when the domain class is -# SemiContinuous.""" -# return issubclass(self.domain_type, SemiRealSet) - -# def is_semiinteger(self): -# """Returns :const:`True` when the domain class is -# SemiInteger.""" -# return issubclass(self.domain_type, SemiIntegerSet) + return self.domain_type.get_interval()[2] == 1 and (self.lb, self.ub) in { + (0, 1), + (0, 0), + (1, 1), + } + + # TODO? + # def is_semicontinuous(self): + # """Returns :const:`True` when the domain class is + # SemiContinuous.""" + # return issubclass(self.domain_type, SemiRealSet) + + # def is_semiinteger(self): + # """Returns :const:`True` when the domain class is + # SemiInteger.""" + # return issubclass(self.domain_type, SemiIntegerSet) # # Implement the NumericValue abstract methods @@ -269,6 +270,7 @@ def __call__(self, exception=True): raise ValueError("value is None") return self.value + class variable(IVariable): """A decision variable @@ -318,25 +320,24 @@ class variable(IVariable): >>> # Also a binary variable >>> x = pmo.variable(domain_type=pmo.IntegerSet, lb=0, ub=1) """ + _ctype = IVariable - __slots__ = ("_parent", - "_storage_key", - "_domain_type", - "_active", - "_lb", - "_ub", - "_value", - "_fixed", - "_stale", - "__weakref__") - - def __init__(self, - domain_type=None, - domain=None, - lb=None, - ub=None, - value=None, - fixed=False): + __slots__ = ( + "_parent", + "_storage_key", + "_domain_type", + "_active", + "_lb", + "_ub", + "_value", + "_fixed", + "_stale", + "__weakref__", + ) + + def __init__( + self, domain_type=None, domain=None, lb=None, ub=None, value=None, fixed=False + ): self._parent = None self._storage_key = None self._active = True @@ -345,44 +346,45 @@ def __init__(self, self._ub = ub self._value = value self._fixed = fixed - self._stale = 0 # True - if (domain_type is not None) or \ - (domain is not None): - self._domain_type, self._lb, self._ub = \ - _extract_domain_type_and_bounds(domain_type, - domain, - lb, ub) + self._stale = 0 # True + if (domain_type is not None) or (domain is not None): + self._domain_type, self._lb, self._ub = _extract_domain_type_and_bounds( + domain_type, domain, lb, ub + ) @property def lower(self): """The lower bound of the variable""" return self._lb + @lower.setter def lower(self, lb): - if (lb is not None) and \ - (not is_numeric_data(lb)): + if (lb is not None) and (not is_numeric_data(lb)): raise ValueError( - "Variable lower bounds must be numbers or " - "expressions restricted to numeric data.") + "Variable lower bounds must be numbers or " + "expressions restricted to numeric data." + ) self._lb = lb @property def upper(self): """The upper bound of the variable""" return self._ub + @upper.setter def upper(self, ub): - if (ub is not None) and \ - (not is_numeric_data(ub)): + if (ub is not None) and (not is_numeric_data(ub)): raise ValueError( - "Variable upper bounds must be numbers or " - "expressions restricted to numeric data.") + "Variable upper bounds must be numbers or " + "expressions restricted to numeric data." + ) self._ub = ub @property def value(self): """The value of the variable""" return self._value + @value.setter def value(self, value): self._value = value @@ -395,6 +397,7 @@ def set_value(self, value, skip_validation=True): def fixed(self): """The fixed status of the variable""" return self._fixed + @fixed.setter def fixed(self, fixed): self._fixed = fixed @@ -403,6 +406,7 @@ def fixed(self, fixed): def stale(self): """The stale status of the variable""" return StaleFlagManager.is_stale(self._stale) + @stale.setter def stale(self, stale): if stale: @@ -415,13 +419,14 @@ def domain_type(self): """The domain type of the variable (:class:`RealSet` or :class:`IntegerSet`)""" return self._domain_type + @domain_type.setter def domain_type(self, domain_type): if domain_type not in IVariable._valid_domain_types: raise ValueError( "Domain type '%s' is not valid. Must be " - "one of: %s" % (self.domain_type, - IVariable._valid_domain_types)) + "one of: %s" % (self.domain_type, IVariable._valid_domain_types) + ) self._domain_type = domain_type def _set_domain(self, domain): @@ -429,15 +434,13 @@ def _set_domain(self, domain): updates the :attr:`domain_type` property and overwrites the :attr:`lb` and :attr:`ub` properties with the domain bounds.""" - self.domain_type, self.lb, self.ub = \ - _extract_domain_type_and_bounds(None, - domain, - None, None) - domain = property(fset=_set_domain, - doc=_set_domain.__doc__) + self.domain_type, self.lb, self.ub = _extract_domain_type_and_bounds( + None, domain, None, None + ) + + domain = property(fset=_set_domain, doc=_set_domain.__doc__) + # inserts class definitions for simple _tuple, _list, and # _dict containers into this module -define_simple_containers(globals(), - "variable", - IVariable) +define_simple_containers(globals(), "variable", IVariable) diff --git a/pyomo/core/plugins/__init__.py b/pyomo/core/plugins/__init__.py index ff3e5e23ff0..f763881c50c 100644 --- a/pyomo/core/plugins/__init__.py +++ b/pyomo/core/plugins/__init__.py @@ -9,6 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.core.plugins.transform - diff --git a/pyomo/core/plugins/transform/__init__.py b/pyomo/core/plugins/transform/__init__.py index 402acec534b..7d37c706542 100644 --- a/pyomo/core/plugins/transform/__init__.py +++ b/pyomo/core/plugins/transform/__init__.py @@ -10,13 +10,16 @@ # ___________________________________________________________________________ import pyomo.core.plugins.transform.relax_integrality + # import pyomo.core.plugins.transform.eliminate_fixed_vars # import pyomo.core.plugins.transform.standard_form import pyomo.core.plugins.transform.expand_connectors + # import pyomo.core.plugins.transform.equality_transform import pyomo.core.plugins.transform.nonnegative_transform import pyomo.core.plugins.transform.radix_linearization import pyomo.core.plugins.transform.discrete_vars + # import pyomo.core.plugins.transform.util import pyomo.core.plugins.transform.add_slack_vars import pyomo.core.plugins.transform.scaling diff --git a/pyomo/core/plugins/transform/add_slack_vars.py b/pyomo/core/plugins/transform/add_slack_vars.py index f42217fca74..6906b033aab 100644 --- a/pyomo/core/plugins/transform/add_slack_vars.py +++ b/pyomo/core/plugins/transform/add_slack_vars.py @@ -9,7 +9,15 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core import TransformationFactory, Var, NonNegativeReals, Constraint, Objective, Block, value +from pyomo.core import ( + TransformationFactory, + Var, + NonNegativeReals, + Constraint, + Objective, + Block, + value, +) from pyomo.common.modeling import unique_component_name from pyomo.core.plugins.transform.hierarchy import NonIsomorphicTransformation @@ -18,11 +26,14 @@ from pyomo.core.base.constraint import _ConstraintData from pyomo.common.deprecation import deprecation_warning + def target_list(x): - deprecation_msg = ("In future releases ComponentUID targets will no " - "longer be supported in the core.add_slack_variables " - "transformation. Specify targets as a Constraint or " - "list of Constraints.") + deprecation_msg = ( + "In future releases ComponentUID targets will no " + "longer be supported in the core.add_slack_variables " + "transformation. Specify targets as a Constraint or " + "list of Constraints." + ) if isinstance(x, ComponentUID): if deprecation_msg: deprecation_warning(deprecation_msg, version='5.7.1') @@ -30,9 +41,9 @@ def target_list(x): deprecation_msg = None # [ESJ 07/15/2020] We have to just pass it through because we need the # instance in order to be able to do anything about it... - return [ x ] + return [x] elif isinstance(x, (Constraint, _ConstraintData)): - return [ x ] + return [x] elif hasattr(x, '__iter__'): ans = [] for i in x: @@ -47,20 +58,25 @@ def target_list(x): else: raise ValueError( "Expected Constraint or list of Constraints." - "\n\tReceived %s" % (type(i),)) + "\n\tReceived %s" % (type(i),) + ) return ans else: raise ValueError( - "Expected Constraint or list of Constraints." - "\n\tReceived %s" % (type(x),)) + "Expected Constraint or list of Constraints.\n\tReceived %s" % (type(x),) + ) + import logging + logger = logging.getLogger('pyomo.core') -@TransformationFactory.register('core.add_slack_variables', \ - doc="Create a model where we add slack variables to every constraint " - "and add new objective penalizing the sum of the slacks") +@TransformationFactory.register( + 'core.add_slack_variables', + doc="Create a model where we add slack variables to every constraint " + "and add new objective penalizing the sum of the slacks", +) class AddSlackVariables(NonIsomorphicTransformation): """ This plugin adds slack variables to every constraint or to the constraints @@ -68,15 +84,15 @@ class AddSlackVariables(NonIsomorphicTransformation): """ CONFIG = ConfigBlock("core.add_slack_variables") - CONFIG.declare('targets', ConfigValue( - default=None, - domain=target_list, - description="target or list of targets to which slacks will be added", - doc=""" - - This specifies the list of Constraints to add slack variables to. - """ - )) + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="target or list of targets to which slacks will be added", + doc="This specifies the list of Constraints to add slack variables to.", + ), + ) def __init__(self, **kwds): kwds['name'] = "add_slack_vars" @@ -92,7 +108,8 @@ def _apply_to_impl(self, instance, **kwds): if targets is None: constraintDatas = instance.component_data_objects( - Constraint, descend_into=True) + Constraint, descend_into=True + ) else: constraintDatas = [] for t in targets: @@ -123,13 +140,16 @@ def _apply_to_impl(self, instance, **kwds): obj_expr = 0 for cons in constraintDatas: - if (cons.lower is not None and cons.upper is not None) and \ - value(cons.lower) > value(cons.upper): + if (cons.lower is not None and cons.upper is not None) and value( + cons.lower + ) > value(cons.upper): # this is a structural infeasibility so slacks aren't going to # help: - raise RuntimeError("Lower bound exceeds upper bound in " - "constraint %s" % cons.name) - if not cons.active: continue + raise RuntimeError( + "Lower bound exceeds upper bound in constraint %s" % cons.name + ) + if not cons.active: + continue cons_name = cons.getname(fully_qualified=True) if cons.lower is not None: # we add positive slack variable to body: diff --git a/pyomo/core/plugins/transform/discrete_vars.py b/pyomo/core/plugins/transform/discrete_vars.py index ccb5a261213..cfb1c5e144f 100644 --- a/pyomo/core/plugins/transform/discrete_vars.py +++ b/pyomo/core/plugins/transform/discrete_vars.py @@ -10,26 +10,21 @@ # ___________________________________________________________________________ import logging + logger = logging.getLogger('pyomo.core') from pyomo.common import deprecated -from pyomo.core.base import ( - Transformation, - TransformationFactory, - Var, - Suffix, - Reals, -) +from pyomo.core.base import Transformation, TransformationFactory, Var, Suffix, Reals + # # This transformation relaxes integer ranges to their continuous # counterparts # @TransformationFactory.register( - 'core.relax_integer_vars', - doc="Relax integer variables to continuous counterparts" ) + 'core.relax_integer_vars', doc="Relax integer variables to continuous counterparts" +) class RelaxIntegerVars(Transformation): - def __init__(self): super(RelaxIntegerVars, self).__init__() @@ -44,14 +39,17 @@ def _apply_to(self, model, **kwds): model.del_component("_relaxed_integer_vars") return # True by default, you can specify False if you want - descend = kwds.get('transform_deactivated_blocks', - options.get('transform_deactivated_blocks', True)) + descend = kwds.get( + 'transform_deactivated_blocks', + options.get('transform_deactivated_blocks', True), + ) active = None if descend else True # Relax the model relaxed_vars = {} _base_model_vars = model.component_data_objects( - Var, active=active, descend_into=True ) + Var, active=active, descend_into=True + ) for var in _base_model_vars: if not var.is_integer(): continue @@ -81,10 +79,11 @@ def _apply_to(self, model, **kwds): @TransformationFactory.register( 'core.relax_discrete', - doc="[DEPRECATED] Relax integer variables to continuous counterparts" ) + doc="[DEPRECATED] Relax integer variables to continuous counterparts", +) @deprecated( - "core.relax_discrete is deprecated. Use core.relax_integer_vars", - version='5.7') + "core.relax_discrete is deprecated. Use core.relax_integer_vars", version='5.7' +) class RelaxDiscreteVars(RelaxIntegerVars): """ This plugin relaxes integrality in a Pyomo model. @@ -98,10 +97,9 @@ def __init__(self, **kwds): # This transformation fixes known discrete domains to their current values # @TransformationFactory.register( - 'core.fix_integer_vars', - doc="Fix all integer variables to their current values") + 'core.fix_integer_vars', doc="Fix all integer variables to their current values" +) class FixIntegerVars(Transformation): - def __init__(self): super(FixIntegerVars, self).__init__() @@ -115,7 +113,8 @@ def _apply_to(self, model, **kwds): fixed_vars = [] _base_model_vars = model.component_data_objects( - Var, active=True, descend_into=True) + Var, active=True, descend_into=True + ) for var in _base_model_vars: # Instead of checking against # `_integer_relaxation_map.keys()` we just check the item @@ -130,10 +129,11 @@ def _apply_to(self, model, **kwds): @TransformationFactory.register( 'core.fix_discrete', - doc="[DEPRECATED] Fix all integer variables to their current values") + doc="[DEPRECATED] Fix all integer variables to their current values", +) @deprecated( - "core.fix_discrete is deprecated. Use core.fix_integer_vars", - version='5.7') + "core.fix_discrete is deprecated. Use core.fix_integer_vars", version='5.7' +) class FixDiscreteVars(FixIntegerVars): def __init__(self, **kwds): super(FixDiscreteVars, self).__init__(**kwds) diff --git a/pyomo/core/plugins/transform/eliminate_fixed_vars.py b/pyomo/core/plugins/transform/eliminate_fixed_vars.py index 968b8e000c6..1048b957e08 100644 --- a/pyomo/core/plugins/transform/eliminate_fixed_vars.py +++ b/pyomo/core/plugins/transform/eliminate_fixed_vars.py @@ -9,15 +9,17 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr.current import ExpressionBase -from pyomo.core.expr.numvalue import as_numeric +from pyomo.core.expr import ExpressionBase, as_numeric from pyomo.core import Constraint, Objective, TransformationFactory from pyomo.core.base.var import Var, _VarData -from pyomo.core.base.util import sequence +from pyomo.core.util import sequence from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation -@TransformationFactory.register('core.remove_fixed_vars', doc="Create an equivalent model that omits all fixed variables.") +@TransformationFactory.register( + 'core.remove_fixed_vars', + doc="Create an equivalent model that omits all fixed variables.", +) class EliminateFixedVars(IsomorphicTransformation): """ Create an equivalent model that omits all fixed variables. @@ -47,7 +49,7 @@ def _create_using(self, model, **kwds): ctr = 0 for i in sequence(M.nvariables()): var = M.variable(i) - del M._var[ i-1 ] + del M._var[i - 1] if var.fixed: if var.is_binary(): M.statistics.number_of_binary_variables -= 1 @@ -56,28 +58,30 @@ def _create_using(self, model, **kwds): elif var.is_continuous(): M.statistics.number_of_continuous_variables -= 1 M.statistics.number_of_variables -= 1 - del M._label_var_map[ var.label ] - del var.component()._data[ var.index ] + del M._label_var_map[var.label] + del var.component()._data[var.index] else: - M._var[ ctr ] = var + M._var[ctr] = var var._old_id = var.id var.id = ctr ctr += 1 return M def _fix_vars(self, expr, model): - """ Walk through the S-expression, fixing variables. """ + """Walk through the S-expression, fixing variables.""" # TODO - Change this to use a visitor pattern! if expr._args is None: return expr _args = [] for i in range(len(expr._args)): - if isinstance(expr._args[i],ExpressionBase): - _args.append( self._fix_vars(expr._args[i], model) ) - elif (isinstance(expr._args[i],Var) or isinstance(expr._args[i],_VarData)) and expr._args[i].fixed: + if isinstance(expr._args[i], ExpressionBase): + _args.append(self._fix_vars(expr._args[i], model)) + elif ( + isinstance(expr._args[i], Var) or isinstance(expr._args[i], _VarData) + ) and expr._args[i].fixed: if expr._args[i].value != 0.0: - _args.append( as_numeric(expr._args[i].value) ) + _args.append(as_numeric(expr._args[i].value)) else: - _args.append( expr._args[i] ) + _args.append(expr._args[i]) expr._args = _args return expr diff --git a/pyomo/core/plugins/transform/equality_transform.py b/pyomo/core/plugins/transform/equality_transform.py index f910803eac9..e0cc463e238 100644 --- a/pyomo/core/plugins/transform/equality_transform.py +++ b/pyomo/core/plugins/transform/equality_transform.py @@ -16,7 +16,10 @@ from pyomo.core.plugins.transform.util import collectAbstractComponents -@TransformationFactory.register("core.add_slack_vars", doc="Create an equivalent model by introducing slack variables to eliminate inequality constraints.") +@TransformationFactory.register( + "core.add_slack_vars", + doc="Create an equivalent model by introducing slack variables to eliminate inequality constraints.", +) class EqualityTransform(IsomorphicTransformation): """ Creates a new, equivalent model by introducing slack and excess variables @@ -69,8 +72,7 @@ def _create_using(self, model, **kwds): # con._data on-the-fly. # indices = con._data.keys() - for (ndx, cdata) in [(ndx, con._data[ndx]) for ndx in indices]: - + for ndx, cdata in [(ndx, con._data[ndx]) for ndx in indices]: qualified_con_name = create_name(con_name, ndx) # Do nothing with equality constraints @@ -79,30 +81,26 @@ def _create_using(self, model, **kwds): # Add an excess variable if the lower bound exists if cdata.lower is not None: - # Make the excess variable excess_name = "%s_%s" % (qualified_con_name, excess_suffix) - equality.__setattr__(excess_name, - Var(within=NonNegativeReals)) + equality.__setattr__(excess_name, Var(within=NonNegativeReals)) # Make a new lower bound constraint lb_name = "%s_%s" % (create_name("", ndx), lb_suffix) excess = equality.__getattribute__(excess_name) - new_expr = (cdata.lower == cdata.body - excess) + new_expr = cdata.lower == cdata.body - excess con.add(lb_name, new_expr) # Add a slack variable if the lower bound exists if cdata.upper is not None: - # Make the excess variable slack_name = "%s_%s" % (qualified_con_name, slack_suffix) - equality.__setattr__(slack_name, - Var(within=NonNegativeReals)) + equality.__setattr__(slack_name, Var(within=NonNegativeReals)) # Make a new upper bound constraint ub_name = "%s_%s" % (create_name("", ndx), ub_suffix) slack = equality.__getattribute__(slack_name) - new_expr = (cdata.upper == cdata.body + slack) + new_expr = cdata.upper == cdata.body + slack con.add(ub_name, new_expr) # Since we explicitly `continue` for equality constraints, we diff --git a/pyomo/core/plugins/transform/expand_connectors.py b/pyomo/core/plugins/transform/expand_connectors.py index 632bb0de7a9..bf1b517c1b0 100644 --- a/pyomo/core/plugins/transform/expand_connectors.py +++ b/pyomo/core/plugins/transform/expand_connectors.py @@ -10,22 +10,31 @@ # ___________________________________________________________________________ import logging + logger = logging.getLogger('pyomo.core') from pyomo.common.collections import ComponentMap, ComponentSet from pyomo.common.log import is_debug_set -from pyomo.core.expr import current as EXPR -from pyomo.core.base import Transformation, TransformationFactory, Connector, Constraint, \ - ConstraintList, Var, SortComponents +import pyomo.core.expr as EXPR +from pyomo.core.base import ( + Transformation, + TransformationFactory, + Connector, + Constraint, + ConstraintList, + Var, + SortComponents, +) from pyomo.core.base.connector import _ConnectorData, ScalarConnector -@TransformationFactory.register('core.expand_connectors', - doc="Expand all connectors in the model to simple constraints") +@TransformationFactory.register( + 'core.expand_connectors', + doc="Expand all connectors in the model to simple constraints", +) class ExpandConnectors(Transformation): - def _apply_to(self, instance, **kwds): - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover logger.debug("Calling ConnectorExpander") connectorsFound = False @@ -35,7 +44,7 @@ def _apply_to(self, instance, **kwds): if not connectorsFound: return - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover logger.debug(" Connectors found!") # @@ -62,7 +71,8 @@ def _apply_to(self, instance, **kwds): connector_types = set([ScalarConnector, _ConnectorData]) for constraint in instance.component_data_objects( - Constraint, sort=SortComponents.deterministic): + Constraint, sort=SortComponents.deterministic + ): ref = None for c in EXPR.identify_components(constraint.body, connector_types): found.add(c) @@ -109,19 +119,19 @@ def _apply_to(self, instance, **kwds): # Validate all connector sets and expand the empty ones known_conn_sets = {} for groupID, conn_set in sorted(connector_groups.values()): - known_conn_sets[id(conn_set)] \ - = self._validate_and_expand_connector_set(conn_set) + known_conn_sets[id(conn_set)] = self._validate_and_expand_connector_set( + conn_set + ) # Expand each constraint for constraint, conn_set in constraint_list: cList = ConstraintList() constraint.parent_block().add_component( - '%s.expanded' % ( constraint.getname( - fully_qualified=False), ), - cList ) + '%s.expanded' % (constraint.getname(fully_qualified=False),), cList + ) connId = next(iter(conn_set)) ref = known_conn_sets[id(matched_connectors[connId])] - for k,v in sorted(ref.items()): + for k, v in sorted(ref.items()): if v[1] >= 0: _iter = v[0] else: @@ -136,10 +146,13 @@ def _apply_to(self, instance, **kwds): else: new_v = c.vars[k] substitution[id(c)] = new_v - cList.add(( - constraint.lower, - EXPR.clone_expression( constraint.body, substitution ), - constraint.upper )) + cList.add( + ( + constraint.lower, + EXPR.clone_expression(constraint.body, substitution), + constraint.upper, + ) + ) constraint.deactivate() # Now, go back and implement VarList aggregators @@ -148,17 +161,14 @@ def _apply_to(self, instance, **kwds): for var, aggregator in conn.aggregators.items(): c = Constraint(expr=aggregator(block, conn.vars[var])) block.add_component( - '%s.%s.aggregate' % ( - conn.getname( - fully_qualified=True), - var), c ) - + '%s.%s.aggregate' % (conn.getname(fully_qualified=True), var), c + ) def _validate_and_expand_connector_set(self, connectors): ref = {} # First, go through the connectors and get the superset of all fields for c in connectors: - for k,v in c.vars.items(): + for k, v in c.vars.items(): if k in ref: # We have already seen this var continue @@ -167,17 +177,21 @@ def _validate_and_expand_connector_set(self, connectors): continue # OK: New var, so add it to the reference list _len = ( - #-3 if v is None else - -2 if k in c.aggregators else - -1 if not hasattr(v, 'is_indexed') or not v.is_indexed() - else len(v) ) - ref[k] = ( v, _len, c ) + # -3 if v is None else + -2 + if k in c.aggregators + else -1 + if not hasattr(v, 'is_indexed') or not v.is_indexed() + else len(v) + ) + ref[k] = (v, _len, c) if not ref: logger.warning( "Cannot identify a reference connector: no connectors " "in the connector set have assigned variables:\n\t(%s)" - % ( ', '.join(sorted(c.name for c in connectors)), )) + % (', '.join(sorted(c.name for c in connectors)),) + ) return ref # Now make sure that connectors match @@ -190,12 +204,13 @@ def _validate_and_expand_connector_set(self, connectors): empty_or_partial.append(c) continue - for k,v in ref.items(): + for k, v in ref.items(): if k not in c.vars: raise ValueError( "Connector mismatch: Connector '%s' missing variable " - "'%s' (appearing in reference connector '%s')" % - ( c.name, k, v[2].name ) ) + "'%s' (appearing in reference connector '%s')" + % (c.name, k, v[2].name) + ) _v = c.vars[k] if _v is None: if not c_is_partial: @@ -203,28 +218,33 @@ def _validate_and_expand_connector_set(self, connectors): c_is_partial = True continue _len = ( - -3 if _v is None else - -2 if k in c.aggregators else - -1 if not hasattr(_v, 'is_indexed') or not _v.is_indexed() - else len(_v) ) + -3 + if _v is None + else -2 + if k in c.aggregators + else -1 + if not hasattr(_v, 'is_indexed') or not _v.is_indexed() + else len(_v) + ) if (_len >= 0) ^ (v[1] >= 0): raise ValueError( "Connector mismatch: Connector variable '%s' mixing " "indexed and non-indexed targets on connectors '%s' " - "and '%s'" % - ( k, v[2].name, c.name )) + "and '%s'" % (k, v[2].name, c.name) + ) if _len >= 0 and _len != v[1]: raise ValueError( "Connector mismatch: Connector variable '%s' index " "mismatch (%s elements in reference connector '%s', " - "but %s elements in connector '%s')" % - ( k, v[1], v[2].name, _len, c.name )) + "but %s elements in connector '%s')" + % (k, v[1], v[2].name, _len, c.name) + ) if v[1] >= 0 and len(v[0].index_set() ^ _v.index_set()): raise ValueError( "Connector mismatch: Connector variable '%s' has " - "mismatched indices on connectors '%s' and '%s'" % - ( k, v[2].name, c.name )) - + "mismatched indices on connectors '%s' and '%s'" + % (k, v[2].name, c.name) + ) # as we are adding things to the model, sort by key so that # the order things are added is deterministic @@ -232,8 +252,7 @@ def _validate_and_expand_connector_set(self, connectors): if len(empty_or_partial) > 1: # This is expensive (names aren't cheap), but does result in # a deterministic ordering - empty_or_partial.sort(key=lambda x: x.getname( - fully_qualified=True)) + empty_or_partial.sort(key=lambda x: x.getname(fully_qualified=True)) # Fill in any empty connectors for c in empty_or_partial: @@ -243,7 +262,7 @@ def _validate_and_expand_connector_set(self, connectors): continue if v[1] >= 0: - idx = ( v[0].index_set(), ) + idx = (v[0].index_set(),) else: idx = () var_args = {} @@ -255,16 +274,15 @@ def _validate_and_expand_connector_set(self, connectors): var_args['bounds'] = v[0].bounds except AttributeError: pass - new_var = Var( *idx, **var_args ) + new_var = Var(*idx, **var_args) block.add_component( - '%s.auto.%s' % ( - c.getname(fully_qualified=True), k ), - new_var) + '%s.auto.%s' % (c.getname(fully_qualified=True), k), new_var + ) if idx: for i in idx[0]: new_var[i].domain = v[0][i].domain - new_var[i].setlb( v[0][i].lb ) - new_var[i].setub( v[0][i].ub ) + new_var[i].setlb(v[0][i].lb) + new_var[i].setub(v[0][i].ub) c.vars[k] = new_var return ref diff --git a/pyomo/core/plugins/transform/hierarchy.py b/pyomo/core/plugins/transform/hierarchy.py index 6b71a5d68a9..a7667fc028a 100644 --- a/pyomo/core/plugins/transform/hierarchy.py +++ b/pyomo/core/plugins/transform/hierarchy.py @@ -47,7 +47,7 @@ def __init__(self, **kwds): class LinearTransformation(Transformation): - """ Base class for all linear model transformations. """ + """Base class for all linear model transformations.""" def __init__(self, **kwds): kwds["name"] = kwds.get("name", "linear_transform") @@ -67,9 +67,8 @@ def __init__(self, **kwds): class NonlinearTransformation(Transformation): - """ Base class for all nonlinear model transformations. """ + """Base class for all nonlinear model transformations.""" def __init__(self, **kwds): kwds["name"] = kwds.get("name", "nonlinear_transform") super(NonlinearTransformation, self).__init__(**kwds) - diff --git a/pyomo/core/plugins/transform/logical_to_linear.py b/pyomo/core/plugins/transform/logical_to_linear.py index df5bafb1302..e6554e0ed38 100644 --- a/pyomo/core/plugins/transform/logical_to_linear.py +++ b/pyomo/core/plugins/transform/logical_to_linear.py @@ -1,57 +1,77 @@ """Transformation from BooleanVar and LogicalConstraint to Binary and Constraints.""" from pyomo.common.collections import ComponentMap +from pyomo.common.errors import MouseTrap, DeveloperError from pyomo.common.modeling import unique_component_name from pyomo.common.config import ConfigBlock, ConfigValue from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr -from pyomo.core import (TransformationFactory, BooleanVar, VarList, Binary, - LogicalConstraint, Block, ConstraintList, native_types, - BooleanVarList) +from pyomo.core import ( + TransformationFactory, + BooleanVar, + VarList, + Binary, + LogicalConstraint, + Block, + ConstraintList, + native_types, + BooleanVarList, + SortComponents, +) from pyomo.core.base.block import _BlockData -from pyomo.core.base.boolean_var import ( - _DeprecatedImplicitAssociatedBinaryVariable) +from pyomo.core.base.boolean_var import _DeprecatedImplicitAssociatedBinaryVariable from pyomo.core.expr.cnf_walker import to_cnf -from pyomo.core.expr.logical_expr import (AndExpression, OrExpression, - NotExpression, AtLeastExpression, - AtMostExpression, ExactlyExpression, - special_boolean_atom_types, - EqualityExpression, - InequalityExpression, - RangedExpression) +from pyomo.core.expr import ( + AndExpression, + OrExpression, + NotExpression, + AtLeastExpression, + AtMostExpression, + ExactlyExpression, + special_boolean_atom_types, + EqualityExpression, + InequalityExpression, + RangedExpression, + identify_variables, +) from pyomo.core.expr.numvalue import native_logical_types, value from pyomo.core.expr.visitor import StreamBasedExpressionVisitor -from pyomo.core.expr.current import identify_variables from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.core.util import target_list -@TransformationFactory.register("core.logical_to_linear", - doc="Convert logic to linear constraints") + +@TransformationFactory.register( + "core.logical_to_linear", doc="Convert logic to linear constraints" +) class LogicalToLinear(IsomorphicTransformation): """ Re-encode logical constraints as linear constraints, converting Boolean variables to binary. """ + CONFIG = ConfigBlock('core.logical_to_linear') - CONFIG.declare('targets', ConfigValue( - default=None, - domain=target_list, - description="target or list of targets that will be relaxed", - doc=""" - This specifies the list of LogicalConstraints to transform, or the - list of Blocks or Disjuncts on which to transform all of the - LogicalConstraints. Note that if the transformation is done out - of place, the list of targets should be attached to the model before it - is cloned, and the list will specify the targets on the cloned - instance. - """ - )) + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be relaxed", + doc=""" + This specifies the list of LogicalConstraints to transform, or the + list of Blocks or Disjuncts on which to transform all of the + LogicalConstraints. Note that if the transformation is done out + of place, the list of targets should be attached to the model before it + is cloned, and the list will specify the targets on the cloned + instance. + """, + ), + ) def _apply_to(self, model, **kwds): config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) targets = config.targets if targets is None: - targets = (model, ) + targets = (model,) new_var_lists = ComponentMap() transBlocks = {} @@ -74,13 +94,13 @@ def _apply_to(self, model, **kwds): if t.is_indexed(): self._transform_constraint(t, new_var_lists, transBlocks) else: - self._transform_constraintData(t, new_var_lists, - transBlocks) + self._transform_constraintData(t, new_var_lists, transBlocks) else: - raise RuntimeError("Target '%s' was not a Block, Disjunct, or" - " LogicalConstraint. It was of type %s " - "and can't be transformed." % (t.name, - type(t))) + raise RuntimeError( + "Target '%s' was not a Block, Disjunct, or" + " LogicalConstraint. It was of type %s " + "and can't be transformed." % (t.name, type(t)) + ) def _transform_boolean_varData(self, bool_vardata, new_varlists): # This transformation tries to group the binaries it creates for indexed @@ -92,13 +112,12 @@ def _transform_boolean_varData(self, bool_vardata, new_varlists): parent_component = bool_vardata.parent_component() new_varlist = new_varlists.get(parent_component) - if new_varlist is None and \ - bool_vardata.get_associated_binary() is None: + if new_varlist is None and bool_vardata.get_associated_binary() is None: # Case 2) we have neither the VarList nor an associated binary parent_block = bool_vardata.parent_block() new_var_list_name = unique_component_name( - parent_block, - parent_component.local_name + '_asbinary') + parent_block, parent_component.local_name + '_asbinary' + ) new_varlist = VarList(domain=Binary) setattr(parent_block, new_var_list_name, new_varlist) new_varlists[parent_component] = new_varlist @@ -114,33 +133,35 @@ def _transform_boolean_varData(self, bool_vardata, new_varlists): new_binary_vardata.fix() def _transform_constraint(self, constraint, new_varlists, transBlocks): - for i in constraint.keys(ordered=True): - self._transform_constraintData(constraint[i], new_varlists, - transBlocks) + for i in constraint.keys(sort=SortComponents.ORDERED_INDICES): + self._transform_constraintData(constraint[i], new_varlists, transBlocks) constraint.deactivate() def _transform_block(self, target_block, model, new_varlists, transBlocks): - _blocks = target_block.values() if target_block.is_indexed() else \ - (target_block,) + _blocks = ( + target_block.values() if target_block.is_indexed() else (target_block,) + ) for block in _blocks: for logical_constraint in block.component_objects( - ctype=LogicalConstraint, active=True, descend_into=Block): - self._transform_constraint(logical_constraint, new_varlists, - transBlocks) + ctype=LogicalConstraint, active=True, descend_into=Block + ): + self._transform_constraint( + logical_constraint, new_varlists, transBlocks + ) # This can go away when we deprecate this transformation # transforming BooleanVars. This just marks the BooleanVars as # "seen" so that if someone asks for their binary var later, we can # create it on the fly and complain. for bool_vardata in block.component_data_objects( - BooleanVar, descend_into=Block): + BooleanVar, descend_into=Block + ): if bool_vardata._associated_binary is None: - bool_vardata._associated_binary = \ - _DeprecatedImplicitAssociatedBinaryVariable( - bool_vardata) + bool_vardata._associated_binary = ( + _DeprecatedImplicitAssociatedBinaryVariable(bool_vardata) + ) - def _transform_constraintData(self, logical_constraint, new_varlists, - transBlocks): + def _transform_constraintData(self, logical_constraint, new_varlists, transBlocks): # first find all the relevant BooleanVars and associate a binary (if # they don't have one already) for bool_vardata in identify_variables(logical_constraint.expr): @@ -161,8 +182,7 @@ def _transform_constraintData(self, logical_constraint, new_varlists, old_boolvarlist_length = len(new_boolvarlist) indicator_map = ComponentMap() - cnf_statements = to_cnf(logical_constraint.body, new_boolvarlist, - indicator_map) + cnf_statements = to_cnf(logical_constraint.body, new_boolvarlist, indicator_map) logical_constraint.deactivate() # Associate new Boolean vars to new binary variables @@ -175,8 +195,7 @@ def _transform_constraintData(self, logical_constraint, new_varlists, # Add constraints associated with each CNF statement for cnf_statement in cnf_statements: - for linear_constraint in _cnf_to_linear_constraint_list( - cnf_statement): + for linear_constraint in _cnf_to_linear_constraint_list(cnf_statement): new_constrlist.add(expr=linear_constraint) # Add bigM associated with special atoms @@ -185,9 +204,8 @@ def _transform_constraintData(self, logical_constraint, new_varlists, old_varlist_length = len(new_varlist) for indicator_var, special_atom in indicator_map.items(): for linear_constraint in _cnf_to_linear_constraint_list( - special_atom, - indicator_var, - new_varlist): + special_atom, indicator_var, new_varlist + ): new_constrlist.add(expr=linear_constraint) # Previous step may have added auxiliary binaries. Associate augmented @@ -206,15 +224,15 @@ def _create_transformation_block(self, context): new_xfrm_block.transformed_constraints = ConstraintList() new_xfrm_block.augmented_vars = BooleanVarList() - new_xfrm_block.augmented_vars_asbinary = VarList( domain=Binary) + new_xfrm_block.augmented_vars_asbinary = VarList(domain=Binary) return new_xfrm_block + def update_boolean_vars_from_binary(model, integer_tolerance=1e-5): """Updates all Boolean variables based on the value of their linked binary variables.""" - for boolean_var in model.component_data_objects(BooleanVar, - descend_into=Block): + for boolean_var in model.component_data_objects(BooleanVar, descend_into=Block): binary_var = boolean_var.get_associated_binary() if binary_var is not None and binary_var.value is not None: if abs(binary_var.value - 1) <= integer_tolerance: @@ -222,13 +240,14 @@ def update_boolean_vars_from_binary(model, integer_tolerance=1e-5): elif abs(binary_var.value) <= integer_tolerance: boolean_var.value = False else: - raise ValueError("Binary variable has non-{0,1} value: " - "%s = %s" % (binary_var.name, - binary_var.value)) + raise ValueError( + "Binary variable has non-{0,1} value: " + "%s = %s" % (binary_var.name, binary_var.value) + ) boolean_var.stale = binary_var.stale -def _cnf_to_linear_constraint_list(cnf_expr, indicator_var=None, - binary_varlist=None): + +def _cnf_to_linear_constraint_list(cnf_expr, indicator_var=None, binary_varlist=None): # Screen for constants if type(cnf_expr) in native_types or cnf_expr.is_constant(): if value(cnf_expr) is True: @@ -236,17 +255,19 @@ def _cnf_to_linear_constraint_list(cnf_expr, indicator_var=None, else: raise ValueError( "Cannot build linear constraint for logical expression with " - "constant value False: %s" - % cnf_expr) + "constant value False: %s" % cnf_expr + ) if cnf_expr.is_expression_type(): - return CnfToLinearVisitor(indicator_var, binary_varlist).\ - walk_expression(cnf_expr) + return CnfToLinearVisitor(indicator_var, binary_varlist).walk_expression( + cnf_expr + ) else: return [cnf_expr.get_associated_binary() == 1] # Assume that cnf_expr - # is a BooleanVar + # is a BooleanVar + + +_numeric_relational_types = {InequalityExpression, EqualityExpression, RangedExpression} -_numeric_relational_types = {InequalityExpression, EqualityExpression, - RangedExpression} class CnfToLinearVisitor(StreamBasedExpressionVisitor): """Convert CNF logical constraint to linear constraints. @@ -255,6 +276,7 @@ class CnfToLinearVisitor(StreamBasedExpressionVisitor): AtLeastExpression, AtMostExpression, ExactlyExpression, _BooleanVarData """ + def __init__(self, indicator_var, binary_varlist): super(CnfToLinearVisitor, self).__init__() self._indicator = indicator_var @@ -262,8 +284,9 @@ def __init__(self, indicator_var, binary_varlist): def exitNode(self, node, values): if type(node) == AndExpression: - return list((v if type(v) in _numeric_relational_types else v == 1) - for v in values) + return list( + (v if type(v) in _numeric_relational_types else v == 1) for v in values + ) elif type(node) == OrExpression: return sum(values) >= 1 elif type(node) == NotExpression: @@ -282,38 +305,49 @@ def exitNode(self, node, values): else: rhs_lb, rhs_ub = compute_bounds_on_expr(values[0]) if rhs_lb == float('-inf') or rhs_ub == float('inf'): - raise ValueError( "Cannnot generate linear constraints for %s" - "([N, *logical_args]) with unbounded N. " - "Detected %s <= N <= %s." % - (type(node).__name__, rhs_lb, rhs_ub) ) + raise ValueError( + "Cannot generate linear constraints for %s" + "([N, *logical_args]) with unbounded N. " + "Detected %s <= N <= %s." % (type(node).__name__, rhs_lb, rhs_ub) + ) indicator_binary = self._indicator.get_associated_binary() if type(node) == AtLeastExpression: return [ sum_values >= values[0] - rhs_ub * (1 - indicator_binary), - sum_values <= values[0] - 1 + (-(rhs_lb - 1) + num_args) * \ - indicator_binary + sum_values + <= values[0] - 1 + (-(rhs_lb - 1) + num_args) * indicator_binary, ] elif type(node) == AtMostExpression: return [ - sum_values <= values[0] + (-rhs_lb + num_args) * \ - (1 - indicator_binary), - sum_values >= (values[0] + 1) - (rhs_ub + 1) * \ - indicator_binary + sum_values + <= values[0] + (-rhs_lb + num_args) * (1 - indicator_binary), + sum_values >= (values[0] + 1) - (rhs_ub + 1) * indicator_binary, ] elif type(node) == ExactlyExpression: less_than_binary = self._binary_varlist.add() more_than_binary = self._binary_varlist.add() return [ - sum_values <= values[0] + (-rhs_lb + num_args) * \ - (1 - indicator_binary), + sum_values + <= values[0] + (-rhs_lb + num_args) * (1 - indicator_binary), sum_values >= values[0] - rhs_ub * (1 - indicator_binary), indicator_binary + less_than_binary + more_than_binary >= 1, - sum_values <= values[0] - 1 + (-(rhs_lb - 1) + num_args) * \ - (1 - less_than_binary), - sum_values >= values[0] + 1 - (rhs_ub + 1) * \ - (1 - more_than_binary), + sum_values + <= values[0] + - 1 + + (-(rhs_lb - 1) + num_args) * (1 - less_than_binary), + sum_values >= values[0] + 1 - (rhs_ub + 1) * (1 - more_than_binary), ] - pass + if type(node) in _numeric_relational_types: + raise MouseTrap( + "core.logical_to_linear does not support transforming " + "LogicalConstraints with embedded relational expressions. " + f"Found '{node}'." + ) + else: + raise DeveloperError( + f"Unsupported node type {type(node)} encountered when " + f"transforming a CNF expression to its linear equivalent ({node})." + ) def beforeChild(self, node, child, child_idx): if type(node) in special_boolean_atom_types and child is node.args[0]: @@ -327,7 +361,13 @@ def beforeChild(self, node, child, child_idx): return True, None # Only thing left should be _BooleanVarData - return False, child.get_associated_binary() + # + # TODO: After the expr_multiple_dispatch is merged, this should + # be switched to using as_numeric. + if hasattr(child, 'get_associated_binary'): + return False, child.get_associated_binary() + else: + return False, child def finalizeResult(self, result): if type(result) is list: diff --git a/pyomo/core/plugins/transform/model.py b/pyomo/core/plugins/transform/model.py index 79ac9e29d77..99c1d21c9a0 100644 --- a/pyomo/core/plugins/transform/model.py +++ b/pyomo/core/plugins/transform/model.py @@ -19,6 +19,7 @@ from pyomo.core.base import Objective, Constraint import array + def to_standard_form(self): """ Produces a standard-form representation of the model. Returns @@ -35,7 +36,6 @@ def to_standard_form(self): from pyomo.repn import generate_standard_repn - # We first need to create an map of all variables to their column # number colID = {} @@ -52,7 +52,7 @@ def to_standard_form(self): # First we go through the constraints and introduce slack and excess # variables to eliminate inequality constraints # - # N.B. Structure heirarchy: + # N.B. Structure hierarchy: # # active_components: {class: {attr_name: object}} # object -> Constraint: ._data: {ndx: _ConstraintData} @@ -92,7 +92,6 @@ def to_standard_form(self): objectives = {} # For each registered component for c in self.component_map(active=True): - # Get all subclasses of Constraint if issubclass(c, Constraint): cons = self.component_map(c, active=True) @@ -107,13 +106,15 @@ def to_standard_form(self): # Process the body terms = self._process_canonical_repn( - generate_standard_repn(con.body, var_id_map)) + generate_standard_repn(con.body, var_id_map) + ) # Process the bounds of the constraint if con.equality: # Equality constraint, only check lower bound lb = self._process_canonical_repn( - generate_standard_repn(con.lower, var_id_map)) + generate_standard_repn(con.lower, var_id_map) + ) # Update terms for k in lb: @@ -126,14 +127,14 @@ def to_standard_form(self): # Add constraint to equality constraints eqConstraints[(con_set_name, ndx)] = terms else: - # Process upper bounds (<= constraints) if con.upper is not None: # Less than or equal to constraint tmp = dict(terms) ub = self._process_canonical_repn( - generate_standard_repn(con.upper, var_id_map)) + generate_standard_repn(con.upper, var_id_map) + ) # Update terms for k in ub: @@ -152,7 +153,8 @@ def to_standard_form(self): tmp = dict(terms) lb = self._process_canonical_repn( - generate_standard_repn(con.lower, var_id_map)) + generate_standard_repn(con.lower, var_id_map) + ) # Update terms for k in lb: @@ -177,25 +179,24 @@ def to_standard_form(self): obj = obj_set._data[ndx] # Process the objective terms = self._process_canonical_repn( - generate_standard_repn(obj.expr, var_id_map)) + generate_standard_repn(obj.expr, var_id_map) + ) objectives[(obj_set_name, ndx)] = terms - # We now have all the constraints. Add a slack variable for every # <= constraint and an excess variable for every >= constraint. nSlack = len(leConstraints) nExcess = len(geConstraints) - nConstraints = len(leConstraints) + len(geConstraints) + \ - len(eqConstraints) + nConstraints = len(leConstraints) + len(geConstraints) + len(eqConstraints) nVariables = len(colID) + nSlack + nExcess nRegVariables = len(colID) # Make the arrays - coefficients = array.array("d", [0]*nConstraints*nVariables) - constraints = array.array("d", [0]*nConstraints) - costs = array.array("d", [0]*nVariables) + coefficients = array.array("d", [0] * nConstraints * nVariables) + constraints = array.array("d", [0] * nConstraints) + costs = array.array("d", [0] * nVariables) # Populate the coefficient matrix constraintID = 0 @@ -212,11 +213,10 @@ def to_standard_form(self): else: # Variable coefficient col = colID[termKey] - coefficients[constraintID*nVariables + col] = coef + coefficients[constraintID * nVariables + col] = coef # Add the slack - coefficients[constraintID*nVariables + nRegVariables + \ - constraintID] = 1 + coefficients[constraintID * nVariables + nRegVariables + constraintID] = 1 constraintID += 1 # Add greater than or equal to constraints @@ -231,11 +231,10 @@ def to_standard_form(self): else: # Variable coefficient col = colID[termKey] - coefficients[constraintID*nVariables + col] = coef + coefficients[constraintID * nVariables + col] = coef # Add the slack - coefficients[constraintID*nVariables + nRegVariables + \ - constraintID] = -1 + coefficients[constraintID * nVariables + nRegVariables + constraintID] = -1 constraintID += 1 # Add equality constraints @@ -250,7 +249,7 @@ def to_standard_form(self): else: # Variable coefficient col = colID[termKey] - coefficients[constraintID*nVariables + col] = coef + coefficients[constraintID * nVariables + col] = coef constraintID += 1 @@ -302,9 +301,9 @@ def to_standard_form(self): conNames.append(strName) # Generate the variable names - varNames = [None]*len(colID) + varNames = [None] * len(colID) for name in colID: - tmp_name = " " + name + tmp_name = ' ' + name if len(tmp_name) > maxColWidth: maxColWidth = len(tmp_name) varNames[colID[name]] = tmp_name @@ -320,22 +319,28 @@ def to_standard_form(self): varNames.append(tmp_name) # Variable names - line = " "*maxConNameLen + (" "*constraintPadding) + " " + line = ' ' * maxConNameLen + (' ' * constraintPadding) + ' ' for col in range(0, nVariables): # Format entry token = varNames[col] # Pad with trailing whitespace - token += " "*(maxColWidth - len(token)) + token += ' ' * (maxColWidth - len(token)) # Add to line - line += " " + token + " " - print(line+'\n') + line += ' ' + token + ' ' + print(line + '\n') # Cost vector - print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ - " "*((maxColWidth+2)*nVariables - 4) + "--+" + '\n') - line = " "*maxConNameLen + (" "*constraintPadding) + "|" + print( + ' ' * maxConNameLen + + (' ' * constraintPadding) + + "+--" + + ' ' * ((maxColWidth + 2) * nVariables - 4) + + "--+" + + '\n' + ) + line = ' ' * maxConNameLen + (' ' * constraintPadding) + "|" for col in range(0, nVariables): # Format entry token = numFmt % costs[col] @@ -343,38 +348,57 @@ def to_standard_form(self): token = altFmt % costs[col] # Pad with trailing whitespace - token += " "*(maxColWidth - len(token)) + token += ' ' * (maxColWidth - len(token)) # Add to line - line += " " + token + " " + line += ' ' + token + ' ' line += "|" - print(line+'\n') - print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ - " "*((maxColWidth+2)*nVariables - 4) + "--+"+'\n') + print(line + '\n') + print( + ' ' * maxConNameLen + + (' ' * constraintPadding) + + "+--" + + ' ' * ((maxColWidth + 2) * nVariables - 4) + + "--+" + + '\n' + ) # Constraints - print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ - " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ - (" "*constraintPadding) + "+--" + \ - (" "*(maxConstraintColWidth-1)) + "--+"+'\n') + print( + ' ' * maxConNameLen + + (' ' * constraintPadding) + + "+--" + + ' ' * ((maxColWidth + 2) * nVariables - 4) + + "--+" + + (' ' * constraintPadding) + + "+--" + + (' ' * (maxConstraintColWidth - 1)) + + "--+" + + '\n' + ) for row in range(0, nConstraints): # Print constraint name - line = conNames[row] + (" "*constraintPadding) + (" "*(maxConNameLen - len(conNames[row]))) + "|" + line = ( + conNames[row] + + (' ' * constraintPadding) + + (' ' * (maxConNameLen - len(conNames[row]))) + + "|" + ) # Print each coefficient for col in range(0, nVariables): # Format entry - token = numFmt % coefficients[nVariables*row + col] + token = numFmt % coefficients[nVariables * row + col] if len(token) > maxColWidth: - token = altFmt % coefficients[nVariables*row + col] + token = altFmt % coefficients[nVariables * row + col] # Pad with trailing whitespace - token += " "*(maxColWidth - len(token)) + token += ' ' * (maxColWidth - len(token)) # Add to line - line += " " + token + " " + line += ' ' + token + ' ' - line += "|" + (" "*constraintPadding) + "|" + line += "|" + (' ' * constraintPadding) + "|" # Add constraint vector token = numFmt % constraints[row] @@ -382,17 +406,26 @@ def to_standard_form(self): token = altFmt % constraints[row] # Pad with trailing whitespace - token += " "*(maxConstraintColWidth - len(token)) - - line += " " + token + " |" - print(line+'\n') - print(" "*maxConNameLen + (" "*constraintPadding) + "+--" + \ - " "*((maxColWidth+2)*nVariables - 4) + "--+" + \ - (" "*constraintPadding) + "+--" + (" "*(maxConstraintColWidth-1))\ - + "--+"+'\n') + token += ' ' * (maxConstraintColWidth - len(token)) + + line += ' ' + token + " |" + print(line + '\n') + print( + ' ' * maxConNameLen + + (' ' * constraintPadding) + + "+--" + + ' ' * ((maxColWidth + 2) * nVariables - 4) + + "--+" + + (' ' * constraintPadding) + + "+--" + + (' ' * (maxConstraintColWidth - 1)) + + "--+" + + '\n' + ) return (coefficients, costs, constraints) + def _process_canonical_repn(self, expr): """ Returns a dictionary of {var_name_or_None: coef} values @@ -421,4 +454,3 @@ def _process_canonical_repn(self, expr): raise TypeError("Nonlinear terms in expression") return terms - diff --git a/pyomo/core/plugins/transform/nonnegative_transform.py b/pyomo/core/plugins/transform/nonnegative_transform.py index 28ae8c99ffc..b32b7b1efc0 100644 --- a/pyomo/core/plugins/transform/nonnegative_transform.py +++ b/pyomo/core/plugins/transform/nonnegative_transform.py @@ -9,14 +9,31 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr import current as EXPR - -from pyomo.core import (nonpyomo_leaf_types, TransformationFactory, IntegerSet, - Integers, PositiveIntegers, NonPositiveIntegers, - NegativeIntegers, NonNegativeIntegers, Reals, PositiveReals, - NonNegativeReals, NegativeReals, NonPositiveReals, - PercentFraction, RealSet, Var, Set, value, Binary, - Constraint, Objective) +import pyomo.core.expr as EXPR + +from pyomo.core import ( + nonpyomo_leaf_types, + TransformationFactory, + IntegerSet, + Integers, + PositiveIntegers, + NonPositiveIntegers, + NegativeIntegers, + NonNegativeIntegers, + Reals, + PositiveReals, + NonNegativeReals, + NegativeReals, + NonPositiveReals, + PercentFraction, + RealSet, + Var, + Set, + value, + Binary, + Constraint, + Objective, +) from pyomo.core.base.misc import create_name from pyomo.core.plugins.transform.util import partial from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation @@ -24,10 +41,11 @@ import logging + logger = logging.getLogger('pyomo.core') -class VarmapVisitor(EXPR.ExpressionReplacementVisitor): +class VarmapVisitor(EXPR.ExpressionReplacementVisitor): def __init__(self, varmap): super(VarmapVisitor, self).__init__() self.varmap = varmap @@ -41,7 +59,7 @@ def visiting_potential_leaf(self, node): if node.is_variable_type(): if node.local_name in self.varmap: return True, self.varmap[node.local_name] - else: + else: return True, node if isinstance(node, EXPR.LinearExpression): @@ -64,7 +82,10 @@ def _walk_expr(expr, varMap): return visitor.dfs_postorder_stack(expr) -@TransformationFactory.register("core.nonnegative_vars", doc="Create an equivalent model in which all variables lie in the nonnegative orthant.") +@TransformationFactory.register( + "core.nonnegative_vars", + doc="Create an equivalent model in which all variables lie in the nonnegative orthant.", +) class NonNegativeTransformation(IsomorphicTransformation): """ Creates a new, equivalent model by forcing all variables to lie in @@ -75,14 +96,25 @@ def __init__(self, **kwds): kwds["name"] = kwds.pop("name", "vars") super(NonNegativeTransformation, self).__init__(**kwds) - self.realSets = (Reals, PositiveReals, NonNegativeReals, NegativeReals, - NonPositiveReals, PercentFraction, RealSet) + self.realSets = ( + Reals, + PositiveReals, + NonNegativeReals, + NegativeReals, + NonPositiveReals, + PercentFraction, + RealSet, + ) # Intentionally leave out Binary, Boolean, BinarySet, and BooleanSet; # we check for those explicitly - self.discreteSets = (IntegerSet, Integers, PositiveIntegers, - NonPositiveIntegers, NegativeIntegers, - NonNegativeIntegers) - + self.discreteSets = ( + IntegerSet, + Integers, + PositiveIntegers, + NonPositiveIntegers, + NegativeIntegers, + NonNegativeIntegers, + ) def _create_using(self, model, **kwds): """ @@ -122,7 +154,7 @@ def _create_using(self, model, **kwds): # Map from fully qualified variable names to replacement expressions. # For now, it is actually a map from a variable name to a closure that - # must later be evaulated with a model containing the replacement + # must later be evaluated with a model containing the replacement # variables. var_map = {} @@ -179,12 +211,16 @@ def _create_using(self, model, **kwds): # If both the bounds and domain allow for negative values, # replace the variable with the sum of nonnegative ones. - bounds_neg = (orig_bounds[ndx] == (None, None) or - orig_bounds[ndx][0] is None or - orig_bounds[ndx][0] < 0) - domain_neg = (orig_domain[ndx] is None or - orig_domain[ndx].bounds()[0] is None or - orig_domain[ndx].bounds()[0] < 0) + bounds_neg = ( + orig_bounds[ndx] == (None, None) + or orig_bounds[ndx][0] is None + or orig_bounds[ndx][0] < 0 + ) + domain_neg = ( + orig_domain[ndx] is None + or orig_domain[ndx].bounds()[0] is None + or orig_domain[ndx].bounds()[0] < 0 + ) if bounds_neg and domain_neg: # Make two new variables. posVarSuffix = "%s%s" % (v_ndx, pos_suffix) @@ -219,31 +255,23 @@ def _create_using(self, model, **kwds): indices.add(x) # Replace the original variable with an expression - var_map[vname] = partial(self.sumRule, - var_name, - expr_dict) + var_map[vname] = partial(self.sumRule, var_name, expr_dict) # Enforce bounds as constraints if orig_bounds[ndx] != (None, None): cname = "%s_%s" % (vname, "bounds") tmp = orig_bounds[ndx] constraints[cname] = partial( - self.boundsConstraintRule, - tmp[0], - tmp[1], - var_name, - expr_dict) + self.boundsConstraintRule, tmp[0], tmp[1], var_name, expr_dict + ) # Enforce the bounds of the domain as constraints if orig_domain[ndx] != None: cname = "%s_%s" % (vname, "domain_bounds") tmp = orig_domain[ndx].bounds() constraints[cname] = partial( - self.boundsConstraintRule, - tmp[0], - tmp[1], - var_name, - expr_dict) + self.boundsConstraintRule, tmp[0], tmp[1], var_name, expr_dict + ) # Domain will either be NonNegativeReals, NonNegativeIntegers, # or Binary. We consider Binary because some solvers may @@ -260,7 +288,8 @@ def _create_using(self, model, **kwds): else: logger.warning( "Warning: domain '%s' not recognized, " - "defaulting to 'NonNegativeReals'" % (var.domain,)) + "defaulting to 'NonNegativeReals'" % (var.domain,) + ) for x in new_indices: domains[x] = NonNegativeReals @@ -270,19 +299,24 @@ def _create_using(self, model, **kwds): # Remove all existing variables. toRemove = [] - for (attr_name, attr) in nonneg.__dict__.items(): + for attr_name, attr in nonneg.__dict__.items(): if isinstance(attr, Var): toRemove.append(attr_name) for attr_name in toRemove: nonneg.__delattr__(attr_name) # Add the sets defining the variables, then the variables - for (k, v) in var_indices.items(): + for k, v in var_indices.items(): sname = "%s_indices" % k nonneg.__setattr__(sname, Set(initialize=v)) - nonneg.__setattr__(k, Var(nonneg.__getattribute__(sname), - domain = domain_rules[k], - bounds = (0, None))) + nonneg.__setattr__( + k, + Var( + nonneg.__getattribute__(sname), + domain=domain_rules[k], + bounds=(0, None), + ), + ) # Construct the model to get the variables and their indices # recognized in the model @@ -305,9 +339,9 @@ def _create_using(self, model, **kwds): # Map from constraint indices to a corrected expression exprMap = {} - for (ndx, cdata) in con._data.items(): + for ndx, cdata in con._data.items(): lower = _walk_expr(cdata.lower, var_map) - body = _walk_expr(cdata.body, var_map) + body = _walk_expr(cdata.body, var_map) upper = _walk_expr(cdata.upper, var_map) # Lie if ndx is None. Pyomo treats 'None' indices specially. @@ -333,15 +367,14 @@ def _create_using(self, model, **kwds): # Map from objective indices to a corrected expression exprMap = {} - for (ndx, odata) in obj._data.items(): + for ndx, odata in obj._data.items(): exprMap[ndx] = _walk_expr(odata.expr, var_map) # Add to list of expression maps objectiveExprs[objName] = exprMap - # Make the modified original constraints - for (conName, ruleMap) in constraintExprs.items(): + for conName, ruleMap in constraintExprs.items(): # Make the set of indices sname = conName + "_indices" _set = Set(initialize=ruleMap.keys()) @@ -349,13 +382,14 @@ def _create_using(self, model, **kwds): _set.construct() # Define the constraint - _con = Constraint( nonneg.__getattribute__(sname), - rule=partial(self.exprMapRule, ruleMap) ) + _con = Constraint( + nonneg.__getattribute__(sname), rule=partial(self.exprMapRule, ruleMap) + ) nonneg.__setattr__(conName, _con) _con.construct() # Make the bounds constraints - for (varName, ruleMap) in constraint_rules.items(): + for varName, ruleMap in constraint_rules.items(): conName = varName + "_constraints" # Make the set of indices sname = conName + "_indices" @@ -364,13 +398,15 @@ def _create_using(self, model, **kwds): _set.construct() # Define the constraint - _con = Constraint(nonneg.__getattribute__(sname), - rule=partial(self.delayedExprMapRule, ruleMap)) + _con = Constraint( + nonneg.__getattribute__(sname), + rule=partial(self.delayedExprMapRule, ruleMap), + ) nonneg.__setattr__(conName, _con) _con.construct() # Make the objectives - for (objName, ruleMap) in objectiveExprs.items(): + for objName, ruleMap in objectiveExprs.items(): # Make the set of indices sname = objName + "_indices" _set = Set(initialize=ruleMap.keys()) @@ -378,8 +414,9 @@ def _create_using(self, model, **kwds): _set.construct() # Define the constraint - _obj = Objective(nonneg.__getattribute__(sname), - rule=partial(self.exprMapRule, ruleMap)) + _obj = Objective( + nonneg.__getattribute__(sname), rule=partial(self.exprMapRule, ruleMap) + ) nonneg.__setattr__(objName, _obj) _obj.construct() @@ -398,10 +435,11 @@ def boundsConstraintRule(lb, ub, attr, vars, model): and so attr='X', and 1 is a key of vars. """ - return (lb, - sum(c * model.__getattribute__(attr)[v] \ - for (v,c) in vars.items()), - ub) + return ( + lb, + sum(c * model.__getattribute__(attr)[v] for (v, c) in vars.items()), + ub, + ) @staticmethod def noConstraint(*args): @@ -412,11 +450,11 @@ def sumRule(attr, vars, model): """ Returns a sum expression. """ - return sum(c*model.__getattribute__(attr)[v] for (v, c) in vars.items()) + return sum(c * model.__getattribute__(attr)[v] for (v, c) in vars.items()) @staticmethod def exprMapRule(ruleMap, model, ndx=None): - """ Rule intended to return expressions from a lookup table """ + """Rule intended to return expressions from a lookup table""" return ruleMap[ndx] @staticmethod @@ -427,4 +465,3 @@ def delayedExprMapRule(ruleMap, model, ndx=None): returning. """ return ruleMap[ndx](model) - diff --git a/pyomo/core/plugins/transform/radix_linearization.py b/pyomo/core/plugins/transform/radix_linearization.py index 2f4d1f6ec6d..0d77a342147 100644 --- a/pyomo/core/plugins/transform/radix_linearization.py +++ b/pyomo/core/plugins/transform/radix_linearization.py @@ -9,19 +9,30 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.expr.current import ProductExpression, PowExpression +from pyomo.core.expr import ProductExpression, PowExpression +from pyomo.core.expr.numvalue import as_numeric from pyomo.core import Binary, value -from pyomo.core.base import Transformation, TransformationFactory, Var, Constraint, ConstraintList, Block, RangeSet -from pyomo.core.base.numvalue import as_numeric +from pyomo.core.base import ( + Transformation, + TransformationFactory, + Var, + Constraint, + ConstraintList, + Block, + RangeSet, +) from pyomo.core.base.var import _VarData import logging + logger = logging.getLogger(__name__) -@TransformationFactory.register("core.radix_linearization", - doc="Linearize bilinear and quadratic terms through " - "radix discretization (multiparametric disaggregation)" ) +@TransformationFactory.register( + "core.radix_linearization", + doc="Linearize bilinear and quadratic terms through " + "radix discretization (multiparametric disaggregation)", +) class RadixLinearization(Transformation): """ This plugin generates linear relaxations of bilinear problems using @@ -35,9 +46,9 @@ class RadixLinearization(Transformation): """ def _create_using(self, model, **kwds): - precision = kwds.pop('precision',8) - user_discretize = kwds.pop('discretize',set()) - verbose = kwds.pop('verbose',False) + precision = kwds.pop('precision', 8) + user_discretize = kwds.pop('discretize', set()) + verbose = kwds.pop('verbose', False) M = model.clone() @@ -77,13 +88,13 @@ def _create_using(self, model, **kwds): _counts[_id] = (q[1], set()) _counts[_id][1].add(_id) for bi in bilinear_terms: - for i in (0,1): - if not bi[i+1].is_continuous(): + for i in (0, 1): + if not bi[i + 1].is_continuous(): continue - _id = id(bi[i+1]) + _id = id(bi[i + 1]) if _id not in _counts: - _counts[_id] = (bi[i+1], set()) - _counts[_id][1].add(id(bi[2-i])) + _counts[_id] = (bi[i + 1], set()) + _counts[_id][1].add(id(bi[2 - i])) _tmp_counts = dict(_counts) # First, remove the variables that the user wants to have discretized @@ -94,7 +105,7 @@ def _create_using(self, model, **kwds): _tmp_counts[_i][1].remove(_id) del _tmp_counts[_id] # All quadratic terms must be discretized (?) - #for q in quadratic_terms: + # for q in quadratic_terms: # _id = id(q[1]) # if _id not in _tmp_counts: # continue @@ -107,7 +118,7 @@ def _create_using(self, model, **kwds): # Now pick a (minimal) subset of the terms in bilinear expressions while _tmp_counts: - _ct, _id = max( (len(_tmp_counts[i][1]), i) for i in _tmp_counts ) + _ct, _id = max((len(_tmp_counts[i][1]), i) for i in _tmp_counts) if not _ct: break _discretize.setdefault(_id, len(_discretize)) @@ -123,54 +134,57 @@ def _create_using(self, model, **kwds): # Define a block (namespace) for holding the disaggregated # variables and new constraints - if False: # Set to true when the LP writer is fixed + if False: # Set to true when the LP writer is fixed M._radix_linearization = Block() _block = M._radix_linearization else: _block = M _block.DISCRETIZATION = RangeSet(precision) - _block.DISCRETIZED_VARIABLES = RangeSet(0, len(_discretize)-1) - _block.z = Var( _block.DISCRETIZED_VARIABLES, _block.DISCRETIZATION, - within=Binary ) - _block.dv = Var( _block.DISCRETIZED_VARIABLES, - bounds=(0,2**-precision) ) + _block.DISCRETIZED_VARIABLES = RangeSet(0, len(_discretize) - 1) + _block.z = Var( + _block.DISCRETIZED_VARIABLES, _block.DISCRETIZATION, within=Binary + ) + _block.dv = Var(_block.DISCRETIZED_VARIABLES, bounds=(0, 2**-precision)) # Actually discretize the terms we have marked for discretization for _id, _idx in _discretize.items(): if verbose: - logger.info("Discretizing variable %s as %s" % - (_counts[_id][0].name, _idx)) + logger.info( + "Discretizing variable %s as %s" % (_counts[_id][0].name, _idx) + ) self._discretize_variable(_block, _counts[_id][0], _idx) _known_bilinear = {} # For each quadratic term, if it hasn't been discretized / # generated, do so, and remember the resulting W term for later # use... - #for _expr, _x1 in quadratic_terms: + # for _expr, _x1 in quadratic_terms: # self._discretize_term( _expr, _x1, _x1, # _block, _discretize, _known_bilinear ) # For each bilinear term, if it hasn't been discretized / # generated, do so, and remember the resulting W term for later # use... for _expr, _x1, _x2 in bilinear_terms: - self._discretize_term( _expr, _x1, _x2, - _block, _discretize, _known_bilinear ) + self._discretize_term(_expr, _x1, _x2, _block, _discretize, _known_bilinear) # Return the discretized instance! return M - def _discretize_variable(self, b, v, idx): _lb, _ub = v.bounds if _lb is None or _ub is None: - raise RuntimeError("Couldn't discretize variable %s: missing " - "finite lower/upper bounds." % (v.name)) + raise RuntimeError( + "Couldn't discretize variable %s: missing " + "finite lower/upper bounds." % (v.name) + ) _c = Constraint( - expr= v == _lb + (_ub-_lb) * ( b.dv[idx] + - sum(b.z[idx,k] * 2**-k for k in b.DISCRETIZATION) ) ) + expr=v + == _lb + + (_ub - _lb) + * (b.dv[idx] + sum(b.z[idx, k] * 2**-k for k in b.DISCRETIZATION)) + ) b.add_component("c_discr_v%s" % idx, _c) - def _discretize_term(self, _expr, _x1, _x2, _block, _discretize, _known_bilinear): if id(_x1) in _discretize: _v = _x1 @@ -179,61 +193,72 @@ def _discretize_term(self, _expr, _x1, _x2, _block, _discretize, _known_bilinear _u = _x1 _v = _x2 else: - raise RuntimeError("Couldn't identify discretized variable " - "for expression '%s'!" % _expr) + raise RuntimeError( + "Couldn't identify discretized variable for expression '%s'!" % _expr + ) _id = (id(_v), id(_u)) if _id not in _known_bilinear: _known_bilinear[_id] = self._discretize_bilinear( - _block, _v, _discretize[id(_v)], _u, len(_known_bilinear)) + _block, _v, _discretize[id(_v)], _u, len(_known_bilinear) + ) # _expr should be a "simple" product expression; substitute # in the bilinear "W" term for the raw bilinear terms - _expr._numerator = [ _known_bilinear[_id] ] - + _expr._numerator = [_known_bilinear[_id]] def _discretize_bilinear(self, b, v, v_idx, u, u_idx): _z = b.z _dv = b.dv[v_idx] _u = Var(b.DISCRETIZATION, within=u.domain, bounds=u.bounds) - logger.info("Discretizing (v=%s)*(u=%s) as u%s_v%s" - % (v.name, u.name, u_idx, v_idx )) - b.add_component( "u%s_v%s" % (u_idx, v_idx), _u) + logger.info( + "Discretizing (v=%s)*(u=%s) as u%s_v%s" % (v.name, u.name, u_idx, v_idx) + ) + b.add_component("u%s_v%s" % (u_idx, v_idx), _u) _lb, _ub = u.bounds if _lb is None or _ub is None: - raise RuntimeError("Couldn't relax variable %s: missing " - "finite lower/upper bounds." % (u.name)) + raise RuntimeError( + "Couldn't relax variable %s: missing " + "finite lower/upper bounds." % (u.name) + ) _c = ConstraintList() - b.add_component( "c_disaggregate_u%s_v%s" % (u_idx, v_idx), _c ) + b.add_component("c_disaggregate_u%s_v%s" % (u_idx, v_idx), _c) for k in b.DISCRETIZATION: # _lb * z[v_idx,k] <= _u[k] <= _ub * z[v_idx,k] - _c.add(expr= _lb*_z[v_idx,k] <= _u[k] ) - _c.add(expr= _u[k] <= _ub*_z[v_idx,k] ) + _c.add(expr=_lb * _z[v_idx, k] <= _u[k]) + _c.add(expr=_u[k] <= _ub * _z[v_idx, k]) # _lb * (1-z[v_idx,k]) <= u - _u[k] <= _ub * (1-z[v_idx,k]) - _c.add(expr= _lb * (1-_z[v_idx,k]) <= u - _u[k] ) - _c.add(expr= u - _u[k] <= _ub * (1-_z[v_idx,k])) + _c.add(expr=_lb * (1 - _z[v_idx, k]) <= u - _u[k]) + _c.add(expr=u - _u[k] <= _ub * (1 - _z[v_idx, k])) _v_lb, _v_ub = v.bounds - _bnd_rng = (_v_lb*_lb, _v_lb*_ub, _v_ub*_lb, _v_ub*_ub) + _bnd_rng = (_v_lb * _lb, _v_lb * _ub, _v_ub * _lb, _v_ub * _ub) _w = Var(bounds=(min(_bnd_rng), max(_bnd_rng))) - b.add_component( "w%s_v%s" % (u_idx, v_idx), _w) + b.add_component("w%s_v%s" % (u_idx, v_idx), _w) K = max(b.DISCRETIZATION) - _dw = Var(bounds=( min(0, _lb*2**-K, _ub*2**-K), - max(0, _lb*2**-K, _ub*2**-K) )) - b.add_component( "dw%s_v%s" % (u_idx, v_idx), _dw) + _dw = Var( + bounds=( + min(0, _lb * 2**-K, _ub * 2**-K), + max(0, _lb * 2**-K, _ub * 2**-K), + ) + ) + b.add_component("dw%s_v%s" % (u_idx, v_idx), _dw) - _c = Constraint(expr= _w == _v_lb*u + (_v_ub-_v_lb) * ( - sum(2**-k * _u[k] for k in b.DISCRETIZATION) + _dw ) ) - b.add_component( "c_bilinear_u%s_v%s" % (u_idx, v_idx), _c ) + _c = Constraint( + expr=_w + == _v_lb * u + + (_v_ub - _v_lb) * (sum(2**-k * _u[k] for k in b.DISCRETIZATION) + _dw) + ) + b.add_component("c_bilinear_u%s_v%s" % (u_idx, v_idx), _c) _c = ConstraintList() - b.add_component( "c_mccormick_u%s_v%s" % (u_idx, v_idx), _c ) + b.add_component("c_mccormick_u%s_v%s" % (u_idx, v_idx), _c) # u_lb * dv <= dw <= u_ub * dv - _c.add(expr= _lb*_dv <= _dw ) - _c.add(expr= _dw <= _ub*_dv ) + _c.add(expr=_lb * _dv <= _dw) + _c.add(expr=_dw <= _ub * _dv) # (u-u_ub)*2^-K + u_ub*dv <= dw <= (u-u_lb)*2^-K + u_lb*dv - _c.add(expr= (u - _ub)*2**-K + _ub*_dv <= _dw ) - _c.add(expr= _dw <= (u - _lb)*2**-K + _lb*_dv ) + _c.add(expr=(u - _ub) * 2**-K + _ub * _dv <= _dw) + _c.add(expr=_dw <= (u - _lb) * 2**-K + _lb * _dv) return _w @@ -246,26 +271,26 @@ def _collect_bilinear(self, expr, bilin, quad): self._collect_bilinear(e, bilin, quad) # No need to check denominator, as this is poly_degree==2 return - if not isinstance(expr._numerator[0], _VarData) or \ - not isinstance(expr._numerator[1], _VarData): + if not isinstance(expr._numerator[0], _VarData) or not isinstance( + expr._numerator[1], _VarData + ): raise RuntimeError("Cannot yet handle complex subexpressions") if expr._numerator[0] is expr._numerator[1]: - quad.append( (expr, expr._numerator[0]) ) + quad.append((expr, expr._numerator[0])) else: - bilin.append( (expr, expr._numerator[0], expr._numerator[1]) ) + bilin.append((expr, expr._numerator[0], expr._numerator[1])) return if type(expr) is PowExpression and value(expr._args[1]) == 2: # Note: directly testing the value of the exponent above is # safe: we have already verified that this expression is # polynominal, so the exponent must be constant. tmp = ProductExpression() - tmp._numerator = [ expr._args[0], expr._args[0] ] + tmp._numerator = [expr._args[0], expr._args[0]] tmp._denominator = [] - expr._args = (tmp, as_numeric(1)) # THIS CODE DOES NOT WORK - #quad.append( (tmp, tmp._args[0]) ) + expr._args = (tmp, as_numeric(1)) # THIS CODE DOES NOT WORK + # quad.append( (tmp, tmp._args[0]) ) self._collect_bilinear(tmp, bilin, quad) return # All other expression types for e in expr._args: self._collect_bilinear(e, bilin, quad) - diff --git a/pyomo/core/plugins/transform/relax_integrality.py b/pyomo/core/plugins/transform/relax_integrality.py index 38dcbb7bc20..06dd2faba77 100644 --- a/pyomo/core/plugins/transform/relax_integrality.py +++ b/pyomo/core/plugins/transform/relax_integrality.py @@ -17,10 +17,11 @@ @TransformationFactory.register( 'core.relax_integrality', doc="[DEPRECATED] Create a model where integer variables are replaced with " - "real variables.") + "real variables.", +) @deprecated( - "core.relax_integrality is deprecated. Use core.relax_integer_vars", - version='5.7') + "core.relax_integrality is deprecated. Use core.relax_integer_vars", version='5.7' +) class RelaxIntegrality(RelaxIntegerVars): """ This plugin relaxes integrality in a Pyomo model. diff --git a/pyomo/core/plugins/transform/scaling.py b/pyomo/core/plugins/transform/scaling.py index ab7965ee617..0596590e979 100644 --- a/pyomo/core/plugins/transform/scaling.py +++ b/pyomo/core/plugins/transform/scaling.py @@ -10,22 +10,32 @@ # ___________________________________________________________________________ from pyomo.common.collections import ComponentMap -from pyomo.core.base import Var, Constraint, Objective, _ConstraintData, _ObjectiveData, Suffix, value +from pyomo.core.base import ( + Block, + Var, + Constraint, + Objective, + _ConstraintData, + _ObjectiveData, + Suffix, + value, +) from pyomo.core.plugins.transform.hierarchy import Transformation from pyomo.core.base import TransformationFactory -from pyomo.core.expr.current import replace_expressions +from pyomo.core.expr import replace_expressions from pyomo.util.components import rename_components -@TransformationFactory.register('core.scale_model', - doc="Scale model variables, constraints, and objectives.") +@TransformationFactory.register( + 'core.scale_model', doc="Scale model variables, constraints, and objectives." +) class ScaleModel(Transformation): """ Transformation to scale a model. This plugin performs variable, constraint, and objective scaling on a model based on the scaling factors in the suffix 'scaling_parameter' - set for the variables, constraints, and/or objective. This is typically + set for the variables, constraints, and/or objective. This is typically done to scale the problem for improved numerical properties. Supported transformation methods: @@ -43,7 +53,7 @@ class ScaleModel(Transformation): >>> # create the model >>> model = ConcreteModel() >>> model.x = Var(bounds=(-5, 5), initialize=1.0) - >>> model.y = Var(bounds=(0, 1), initialize=1.0) + >>> model.y = Var(bounds=(0, 1), initialize=1.0) >>> model.obj = Objective(expr=1e8*model.x + 1e6*model.y) >>> model.con = Constraint(expr=model.x + model.y == 1.0) >>> # create the scaling factors @@ -51,7 +61,7 @@ class ScaleModel(Transformation): >>> model.scaling_factor[model.obj] = 1e-6 # scale the objective >>> model.scaling_factor[model.con] = 2.0 # scale the constraint >>> model.scaling_factor[model.x] = 0.2 # scale the x variable - >>> # transform the model + >>> # transform the model >>> scaled_model = TransformationFactory('core.scale_model').create_using(model) >>> # print the value of the objective function to show scaling has occurred >>> print(value(model.x)) @@ -65,9 +75,7 @@ class ScaleModel(Transformation): >>> print(value(scaled_model.scaled_obj)) 101.0 - ToDo - ==== - - implement an option to change the variables names or not + .. todo:: Implement an option to change the variables names or not """ @@ -81,23 +89,102 @@ def _create_using(self, original_model, **kwds): self._apply_to(scaled_model, **kwds) return scaled_model + def _suffix_finder(self, component_data, suffix_name, root=None): + """Find suffix value for a given component data object in model tree + + Suffixes are searched by traversing the model hierarchy in three passes: + + 1. Search for a Suffix matching the specific component_data, + starting at the `root` and descending down the tree to + the component_data. Return the first match found. + 2. Search for a Suffix matching the component_data's container, + starting at the `root` and descending down the tree to + the component_data. Return the first match found. + 3. Search for a Suffix with key `None`, starting from the + component_data and working up the tree to the `root`. + Return the first match found. + 4. Return None + + Parameters + ---------- + component_data: ComponentDataBase + + Component or component data object to find suffix value for. + + suffix_name: str + + Name of Suffix to search for. + + root: BlockData + + When searching up the block hierarchy, stop at this + BlockData instead of traversing all the way to the + `component_data.model()` Block. If the `component_data` is + not in the subtree defined by `root`, then the search will + proceed up to `component_data.model()`. + + Returns + ------- + The value for Suffix associated with component data if found, else None. + + """ + # Prototype for Suffix finder + + # We want to *include* the root (if it is not None), so if + # it is not None, we want to stop as soon as we get to its + # parent. + if root is not None: + if root.ctype is not Block and not issubclass(root.ctype, Block): + raise ValueError( + "_find_suffix: root must be a BlockData " + f"(found {root.ctype.__name__}: {root})" + ) + if root.is_indexed(): + raise ValueError( + "_find_suffix: root must be a BlockData " + f"(found {type(root).__name__}: {root})" + ) + root = root.parent_block() + # Walk parent tree and search for suffixes + parent = component_data.parent_block() + suffixes = [] + while parent is not root: + s = parent.component(suffix_name) + if s is not None and s.ctype is Suffix: + suffixes.append(s) + parent = parent.parent_block() + # Pass 1: look for the component_data, working root to leaf + for s in reversed(suffixes): + if component_data in s: + return s[component_data] + # Pass 2: look for the component container, working root to leaf + parent_comp = component_data.parent_component() + if parent_comp is not component_data: + for s in reversed(suffixes): + if parent_comp in s: + return s[parent_comp] + # Pass 3: look for None, working leaf to root + for s in suffixes: + if None in s: + return s[None] + return None + def _get_float_scaling_factor(self, instance, component_data): - scaling_factor = None - if component_data in instance.scaling_factor: - scaling_factor = instance.scaling_factor[component_data] - elif component_data.parent_component() in instance.scaling_factor: - scaling_factor = instance.scaling_factor[component_data.parent_component()] + scaling_factor = self._suffix_finder(component_data, "scaling_factor") + # If still no scaling factor, return 1.0 if scaling_factor is None: return 1.0 + # Make sure scaling factor is a float try: scaling_factor = float(scaling_factor) except ValueError: raise ValueError( "Suffix 'scaling_factor' has a value %s for component %s that cannot be converted to a float. " "Floating point values are required for this suffix in the ScaleModel transformation." - % (scaling_factor, component_data)) + % (scaling_factor, component_data) + ) return scaling_factor def _apply_to(self, model, rename=True): @@ -106,40 +193,49 @@ def _apply_to(self, model, rename=True): # if the scaling_method is 'user', get the scaling parameters from the suffixes if self._scaling_method == 'user': - # perform some checks to make sure we have the necessary suffixes - if type(model.component('scaling_factor')) is not Suffix: - raise ValueError("ScaleModel transformation called with scaling_method='user'" - ", but cannot find the suffix 'scaling_factor' on the model") - # get the scaling factors - for c in model.component_data_objects(ctype=(Var, Constraint, Objective), descend_into=True): - component_scaling_factor_map[c] = self._get_float_scaling_factor(model, c) + for c in model.component_data_objects( + ctype=(Var, Constraint, Objective), descend_into=True + ): + component_scaling_factor_map[c] = self._get_float_scaling_factor( + model, c + ) else: - raise ValueError("ScaleModel transformation: unknown scaling_method found" - "-- supported values: 'user' ") + raise ValueError( + "ScaleModel transformation: unknown scaling_method found" + "-- supported values: 'user' " + ) if rename: # rename all the Vars, Constraints, and Objectives # from foo to scaled_foo - component_list = list(model.component_objects( - ctype=[Var, Constraint, Objective])) + component_list = list( + model.component_objects(ctype=[Var, Constraint, Objective]) + ) scaled_component_to_original_name_map = rename_components( - model=model, - component_list=component_list, - prefix='scaled_', - ) + model=model, component_list=component_list, prefix='scaled_' + ) else: scaled_component_to_original_name_map = ComponentMap( - [(comp, comp.name) for comp in - model.component_objects( - ctype=[Var,Constraint, Objective])] + [ + (comp, comp.name) + for comp in model.component_objects( + ctype=[Var, Constraint, Objective] ) + ] + ) # scale the variable bounds and values and build the variable substitution map # for scaling vars in constraints variable_substitution_map = ComponentMap() already_scaled = set() - for variable in [var for var in model.component_objects(ctype=Var, descend_into=True)]: + for variable in [ + var for var in model.component_objects(ctype=Var, descend_into=True) + ]: + if variable.is_reference(): + # Skip any references - these should get picked up when handling the actual variable + continue + # set the bounds/value for the scaled variable for k in variable: v = variable[k] @@ -170,13 +266,20 @@ def _apply_to(self, model, rename=True): scale_constraint_dual = True # translate the variable_substitution_map (ComponentMap) - # to variable_substition_dict (key: id() of component) + # to variable_substitution_dict (key: id() of component) # ToDo: We should change replace_expressions to accept a ComponentMap as well - variable_substitution_dict = {id(k):variable_substitution_map[k] - for k in variable_substitution_map} + variable_substitution_dict = { + id(k): variable_substitution_map[k] for k in variable_substitution_map + } already_scaled = set() - for component in model.component_objects(ctype=(Constraint, Objective), descend_into=True): + for component in model.component_objects( + ctype=(Constraint, Objective), descend_into=True + ): + if component.is_reference(): + # Skip any references - these should get picked up when handling the actual component + continue + for k in component: c = component[k] if id(c) in already_scaled: @@ -185,11 +288,12 @@ def _apply_to(self, model, rename=True): # perform the constraint/objective scaling and variable sub scaling_factor = component_scaling_factor_map[c] if isinstance(c, _ConstraintData): - body = scaling_factor * \ - replace_expressions(expr=c.body, - substitution_map=variable_substitution_dict, - descend_into_named_expressions=True, - remove_named_expressions=True) + body = scaling_factor * replace_expressions( + expr=c.body, + substitution_map=variable_substitution_dict, + descend_into_named_expressions=True, + remove_named_expressions=True, + ) # scale the rhs lower = c.lower @@ -206,24 +310,28 @@ def _apply_to(self, model, rename=True): dual_value = model.dual[c] if dual_value is not None: model.dual[c] = dual_value / scaling_factor - + if c.equality: c.set_value((lower, body)) else: c.set_value((lower, body, upper)) elif isinstance(c, _ObjectiveData): - c.expr = scaling_factor * \ - replace_expressions(expr=c.expr, - substitution_map=variable_substitution_dict, - descend_into_named_expressions=True, - remove_named_expressions=True) + c.expr = scaling_factor * replace_expressions( + expr=c.expr, + substitution_map=variable_substitution_dict, + descend_into_named_expressions=True, + remove_named_expressions=True, + ) else: raise NotImplementedError( - 'Unknown object type found when applying scaling factors in ScaleModel transformation - Internal Error') + 'Unknown object type found when applying scaling factors in ScaleModel transformation - Internal Error' + ) model.component_scaling_factor_map = component_scaling_factor_map - model.scaled_component_to_original_name_map = scaled_component_to_original_name_map + model.scaled_component_to_original_name_map = ( + scaled_component_to_original_name_map + ) return model @@ -244,27 +352,47 @@ def propagate_solution(self, scaled_model, original_model): """ if not hasattr(scaled_model, 'component_scaling_factor_map'): - raise AttributeError('ScaleModel:propagate_solution called with scaled_model that does not ' - 'have a component_scaling_factor_map. It is possible this method was called ' - 'using a model that was not scaled with the ScaleModel transformation') + raise AttributeError( + 'ScaleModel:propagate_solution called with scaled_model that does not ' + 'have a component_scaling_factor_map. It is possible this method was called ' + 'using a model that was not scaled with the ScaleModel transformation' + ) if not hasattr(scaled_model, 'scaled_component_to_original_name_map'): - raise AttributeError('ScaleModel:propagate_solution called with scaled_model that does not ' - 'have a scaled_component_to_original_name_map. It is possible this method was called ' - 'using a model that was not scaled with the ScaleModel transformation') + raise AttributeError( + 'ScaleModel:propagate_solution called with scaled_model that does not ' + 'have a scaled_component_to_original_name_map. It is possible this method was called ' + 'using a model that was not scaled with the ScaleModel transformation' + ) component_scaling_factor_map = scaled_model.component_scaling_factor_map - scaled_component_to_original_name_map = scaled_model.scaled_component_to_original_name_map - - # get the objective scaling factor - scaled_objectives = list(scaled_model.component_data_objects(ctype=Objective, active=True, descend_into=True)) - if len(scaled_objectives) != 1: - raise NotImplementedError( - 'ScaleModel.propagate_solution requires a single active objective function, but %d objectives found.' % ( - len(objectives))) - objective_scaling_factor = component_scaling_factor_map[scaled_objectives[0]] + scaled_component_to_original_name_map = ( + scaled_model.scaled_component_to_original_name_map + ) # transfer the variable values and reduced costs check_reduced_costs = type(scaled_model.component('rc')) is Suffix + check_dual = ( + type(scaled_model.component('dual')) is Suffix + and type(original_model.component('dual')) is Suffix + ) + + if check_reduced_costs or check_dual: + # get the objective scaling factor + scaled_objectives = list( + scaled_model.component_data_objects( + ctype=Objective, active=True, descend_into=True + ) + ) + if len(scaled_objectives) != 1: + raise NotImplementedError( + 'ScaleModel.propagate_solution requires a single active objective function, but %d objectives found.' + % (len(scaled_objectives)) + ) + else: + objective_scaling_factor = component_scaling_factor_map[ + scaled_objectives[0] + ] + for scaled_v in scaled_model.component_objects(ctype=Var, descend_into=True): # get the unscaled_v from the original model original_v_path = scaled_component_to_original_name_map[scaled_v] @@ -277,14 +405,24 @@ def propagate_solution(self, scaled_model, original_model): skip_validation=True, ) if check_reduced_costs and scaled_v[k] in scaled_model.rc: - original_model.rc[original_v[k]] = scaled_model.rc[scaled_v[k]] * component_scaling_factor_map[ - scaled_v[k]] / objective_scaling_factor + original_model.rc[original_v[k]] = ( + scaled_model.rc[scaled_v[k]] + * component_scaling_factor_map[scaled_v[k]] + / objective_scaling_factor + ) # transfer the duals - if type(scaled_model.component('dual')) is Suffix and type(original_model.component('dual')) is Suffix: - for scaled_c in scaled_model.component_objects(ctype=Constraint, descend_into=True): - original_c = original_model.find_component(scaled_component_to_original_name_map[scaled_c]) + if check_dual: + for scaled_c in scaled_model.component_objects( + ctype=Constraint, descend_into=True + ): + original_c = original_model.find_component( + scaled_component_to_original_name_map[scaled_c] + ) for k in scaled_c: - original_model.dual[original_c[k]] = scaled_model.dual[scaled_c[k]] * component_scaling_factor_map[ - scaled_c[k]] / objective_scaling_factor + original_model.dual[original_c[k]] = ( + scaled_model.dual[scaled_c[k]] + * component_scaling_factor_map[scaled_c[k]] + / objective_scaling_factor + ) diff --git a/pyomo/core/plugins/transform/standard_form.py b/pyomo/core/plugins/transform/standard_form.py index bf5bd675022..54df13fc49d 100644 --- a/pyomo/core/plugins/transform/standard_form.py +++ b/pyomo/core/plugins/transform/standard_form.py @@ -15,7 +15,9 @@ from pyomo.core.plugins.transform.equality_transform import EqualityTransform -@TransformationFactory.register("core.standard_form", doc="Create an equivalent LP model in standard form.") +@TransformationFactory.register( + "core.standard_form", doc="Create an equivalent LP model in standard form." +) class StandardForm(IsomorphicTransformation): """ Produces a standard-form representation of the model. This form has @@ -41,7 +43,7 @@ def __init__(self, **kwds): def _create_using(self, model, **kwds): """ - Tranform a model to standard form + Transform a model to standard form """ # Optional naming schemes to pass to EqualityTransform @@ -63,7 +65,7 @@ def _create_using(self, model, **kwds): # (that aren't equality constraints) we call it first. # # EqualityTransform introduces new variables, but they are - # constrainted to be nonnegative. + # constrained to be nonnegative. sf = nonneg(model, **nn_kwds) sf = equality(sf, **eq_kwds) diff --git a/pyomo/core/plugins/transform/util.py b/pyomo/core/plugins/transform/util.py index efcdc96ad28..bba8adfbc0f 100644 --- a/pyomo/core/plugins/transform/util.py +++ b/pyomo/core/plugins/transform/util.py @@ -79,11 +79,10 @@ def collectAbstractComponents(model): # Iterate over all model components for comp in cp._ctypes: - # Collect all Constraint objects if issubclass(comp, Constraint): comps = cp.component_map(comp, active=True) - for (name, obj) in [(name, comps[name]) for name in comps]: + for name, obj in [(name, comps[name]) for name in comps]: # Query this constraint's attributes data = {} @@ -99,7 +98,7 @@ def collectAbstractComponents(model): # Collect all Objective objects if issubclass(comp, Objective): comps = cp.component_map(comp, active=True) - for (name, obj) in [(name, comps[name]) for name in comps]: + for name, obj in [(name, comps[name]) for name in comps]: # Query this objective's attributes data = {} @@ -115,7 +114,7 @@ def collectAbstractComponents(model): # Collect all Var objects if issubclass(comp, Var): comps = cp.component_map(comp, active=True) - for (name, obj) in [(name, comps[name]) for name in comps]: + for name, obj in [(name, comps[name]) for name in comps]: # Query this variable's attributes data = {} @@ -134,7 +133,7 @@ def collectAbstractComponents(model): # Collect all Set objects if issubclass(comp, Set): comps = cp.component_map(comp, active=True) - for (name, obj) in [(name, comps[name]) for name in comps]: + for name, obj in [(name, comps[name]) for name in comps]: # Query this variable's attributes data = {} @@ -150,7 +149,7 @@ def collectAbstractComponents(model): # Collect all Param objects if issubclass(comp, Param): comps = cp.component_map(comp, active=True) - for (name, obj) in [(name, comps[name]) for name in comps]: + for name, obj in [(name, comps[name]) for name in comps]: # Query this variable's attributes data = {} @@ -173,6 +172,7 @@ def collectAbstractComponents(model): return master + def _getAbstractIndices(comp): """ Returns the index or index set of this component @@ -184,27 +184,31 @@ def _getAbstractIndices(comp): # Unindexed constraint return {None: None} + def _getAbstractRule(comp): """ Returns the rule defining this component """ return comp.rule + def _getAbstractDomain(comp): """ Returns the domain of this component """ - return getattr(comp,'domain', None) + return getattr(comp, 'domain', None) + def _getAbstractBounds(comp): """ Returns the bounds of this component """ - if getattr(comp,'bounds',None) is None: + if getattr(comp, 'bounds', None) is None: return (None, None) else: return comp.bounds + def _getAbstractInitialize(comp): """ Returns the initialization rule. If initialize is a container; return None; @@ -215,6 +219,7 @@ def _getAbstractInitialize(comp): else: return None + try: from functools import partial as _partial except ImportError: @@ -225,6 +230,7 @@ def _partial(f, *args, **kwds): partially applied """ + def closure(*cargs, **ckwds): # Collect positional arguments tmp_args = list(args) @@ -235,8 +241,10 @@ def closure(*cargs, **ckwds): # Call the original function return f(*tmp_args, **tmp_kwds) + return closure + def partial(*args, **kwargs): """ copy.deepcopy balks at copying anonymous functions. This overrides @@ -251,6 +259,7 @@ def _partial_deepcopy(memo={}): func.__deepcopy__ = _partial_deepcopy return func + def process_canonical_repn(expr): """ Returns a dictionary of {var_name_or_None: coef} values. None diff --git a/pyomo/core/pyomoobject.py b/pyomo/core/pyomoobject.py index aea520be15d..692db444f84 100644 --- a/pyomo/core/pyomoobject.py +++ b/pyomo/core/pyomoobject.py @@ -9,8 +9,15 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from pyomo.common.autoslots import AutoSlots -class PyomoObject(object): + +# Note: in an ideal world, PyomoObject would use the AutoSlots +# metaclass. However, declaring a custom (non-type) metaclass has +# measurable performance implications. It is faster to just look for +# the __auto_slots__ attribute and generate it if it is not present than +# to slow down the entire class hierarchy by declaring a metaclass. +class PyomoObject(AutoSlots.Mixin): __slots__ = () def is_component_type(self): @@ -29,7 +36,7 @@ def is_variable_type(self): """Return False unless this class is a variable object""" return False - def is_expression_type(self): + def is_expression_type(self, expression_system=None): """Return True if this numeric value is an expression""" return False @@ -38,7 +45,10 @@ def is_named_expression_type(self): return False def is_logical_type(self): - """Return True if this class is a Pyomo Boolean value, variable, or expression.""" + """Return True if this class is a Pyomo Boolean object. + + Boolean objects include constants, variables, or logical expressions. + """ return False def is_reference(self): diff --git a/pyomo/core/staleflag.py b/pyomo/core/staleflag.py index d926424c0ec..7d0dddef0dd 100644 --- a/pyomo/core/staleflag.py +++ b/pyomo/core/staleflag.py @@ -9,11 +9,21 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + class _StaleFlagManager(object): def __init__(self): self._current = 0 self.mark_all_as_stale() + def stale_mapper(self, encode, value): + if encode: + return self.is_stale(value) + else: + if value: + return 0 + else: + return self.get_flag(0) + def _get_flag(self, current_flag): """Return the current global stale flag value""" return self._current @@ -59,4 +69,5 @@ def mark_all_as_stale(self, delayed=False): setattr(self, 'get_flag', getattr(self, '_get_flag')) self._current += 1 + StaleFlagManager = _StaleFlagManager() diff --git a/pyomo/core/tests/data/test_odbc_ini.py b/pyomo/core/tests/data/test_odbc_ini.py index d8184aa5948..e7152181645 100644 --- a/pyomo/core/tests/data/test_odbc_ini.py +++ b/pyomo/core/tests/data/test_odbc_ini.py @@ -17,6 +17,7 @@ try: import pyodbc + pyodbc_available = True from pyomo.dataportal.plugins.db_table import ODBCConfig, ODBCError @@ -26,11 +27,9 @@ @unittest.skipIf(not pyodbc_available, "PyODBC is not installed.") class TestODBCIni(unittest.TestCase): - def __init__(self, *args, **kwargs): unittest.TestCase.__init__(self, *args, **kwargs) - self.ACCESS_CONFIGSTR = "Microsoft Access Driver (*.mdb)" self.EXCEL_CONFIGSTR = "Microsoft Excel Driver (*.xls)" @@ -71,20 +70,33 @@ def test_init_empty_data(self): def test_init_simple_data(self): config = ODBCConfig(data=self.simple_data) - self.assertEqual({'testdb' : self.ACCESS_CONFIGSTR}, config.sources) - self.assertEqual({'testdb' : {'Database' : "testdb.mdb"}}, config.source_specs) + self.assertEqual({'testdb': self.ACCESS_CONFIGSTR}, config.sources) + self.assertEqual({'testdb': {'Database': "testdb.mdb"}}, config.source_specs) self.assertEqual({}, config.odbc_info) def test_init_complex_data(self): config = ODBCConfig(data=self.complex_data) - self.assertEqual({'test1' : self.ACCESS_CONFIGSTR, 'test2' : self.EXCEL_CONFIGSTR}, config.sources) - self.assertEqual({'test1' : {'Database' : "test1.db", 'LogonID' : "Admin", 'pwd' : "secret_pass"}, 'test2' : {'Database' : "test2.xls"}}, config.source_specs) - self.assertEqual({'UNICODE' : "UTF-8"}, config.odbc_info) + self.assertEqual( + {'test1': self.ACCESS_CONFIGSTR, 'test2': self.EXCEL_CONFIGSTR}, + config.sources, + ) + self.assertEqual( + { + 'test1': { + 'Database': "test1.db", + 'LogonID': "Admin", + 'pwd': "secret_pass", + }, + 'test2': {'Database': "test2.xls"}, + }, + config.source_specs, + ) + self.assertEqual({'UNICODE': "UTF-8"}, config.odbc_info) def test_add_source(self): config = ODBCConfig() config.add_source("testdb", self.ACCESS_CONFIGSTR) - self.assertEqual({'testdb' : self.ACCESS_CONFIGSTR}, config.sources) + self.assertEqual({'testdb': self.ACCESS_CONFIGSTR}, config.sources) self.assertEqual({}, config.source_specs) self.assertEqual({}, config.odbc_info) @@ -103,18 +115,18 @@ def test_add_source_reserved(self): def test_add_source_spec(self): config = ODBCConfig() config.add_source("testdb", self.ACCESS_CONFIGSTR) - config.add_source_spec("testdb", {'Database' : "testdb.mdb"}) - self.assertEqual({'testdb' : {'Database' : "testdb.mdb"}}, config.source_specs) + config.add_source_spec("testdb", {'Database': "testdb.mdb"}) + self.assertEqual({'testdb': {'Database': "testdb.mdb"}}, config.source_specs) def test_add_spec_bad(self): config = ODBCConfig() with self.assertRaises(ODBCError): - config.add_source_spec("testdb", {'Database' : "testdb.mdb"}) + config.add_source_spec("testdb", {'Database': "testdb.mdb"}) def test_del_source_dependent(self): config = ODBCConfig() config.add_source("testdb", self.ACCESS_CONFIGSTR) - config.add_source_spec("testdb", {'Database' : "testdb.mdb"}) + config.add_source_spec("testdb", {'Database': "testdb.mdb"}) config.del_source("testdb") self.assertEqual({}, config.sources) self.assertEqual({}, config.source_specs) @@ -122,7 +134,7 @@ def test_del_source_dependent(self): def test_set_odbc_info(self): config = ODBCConfig() config.set_odbc_info("UNICODE", "UTF-8") - self.assertEqual({'UNICODE' : "UTF-8"}, config.odbc_info) + self.assertEqual({'UNICODE': "UTF-8"}, config.odbc_info) def test_odbc_repr(self): config = ODBCConfig(data=self.simple_data) @@ -138,7 +150,7 @@ def test_baselines(self): config = ODBCConfig(filename=iniPath) config.write(outPath) - written = ODBCConfig(filename = outPath) + written = ODBCConfig(filename=outPath) self.assertEqual(config, written) try: @@ -151,9 +163,10 @@ def test_eq(self): configA = ODBCConfig(data=self.simple_data) configB = ODBCConfig() - configB.sources = {'testdb' : self.ACCESS_CONFIGSTR} - configB.source_specs = {'testdb' : {'Database' : 'testdb.mdb'}} + configB.sources = {'testdb': self.ACCESS_CONFIGSTR} + configB.source_specs = {'testdb': {'Database': 'testdb.mdb'}} self.assertEqual(configA, configB) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/diet/baselines/diet1_pyomo_dat.jsn b/pyomo/core/tests/diet/baselines/diet1_pyomo_dat.jsn index 92c48620d5e..367f2e82e91 100644 --- a/pyomo/core/tests/diet/baselines/diet1_pyomo_dat.jsn +++ b/pyomo/core/tests/diet/baselines/diet1_pyomo_dat.jsn @@ -1,45 +1,45 @@ { "Problem": [ { - "Lower bound": 2.8100000000000001, - "Number of constraints": 4, - "Number of nonzeros": 10, - "Number of objectives": 1, - "Number of variables": 10, - "Sense": "minimize", + "Lower bound": 2.8100000000000001, + "Number of constraints": 3, + "Number of nonzeros": 9, + "Number of objectives": 1, + "Number of variables": 9, + "Sense": "minimize", "Upper bound": 2.8100000000000001 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Gap": 0.0, + "Gap": 0.0, "Objective": { "Total_Cost": { "Value": 2.8100000000000001 } - }, - "Status": "optimal", + }, + "Status": "optimal", "Variable": { "Buy[1% Lowfat Milk]": { "Value": 1.0 - }, + }, "Buy[Filet-O-Fish]": { "Value": 1.0 - }, + }, "Buy['Fries, small']": { "Value": 1.0 - } + } } } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/diet/baselines/diet1_pyomo_sqlite3.jsn b/pyomo/core/tests/diet/baselines/diet1_pyomo_sqlite3.jsn index 92c48620d5e..367f2e82e91 100644 --- a/pyomo/core/tests/diet/baselines/diet1_pyomo_sqlite3.jsn +++ b/pyomo/core/tests/diet/baselines/diet1_pyomo_sqlite3.jsn @@ -1,45 +1,45 @@ { "Problem": [ { - "Lower bound": 2.8100000000000001, - "Number of constraints": 4, - "Number of nonzeros": 10, - "Number of objectives": 1, - "Number of variables": 10, - "Sense": "minimize", + "Lower bound": 2.8100000000000001, + "Number of constraints": 3, + "Number of nonzeros": 9, + "Number of objectives": 1, + "Number of variables": 9, + "Sense": "minimize", "Upper bound": 2.8100000000000001 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Gap": 0.0, + "Gap": 0.0, "Objective": { "Total_Cost": { "Value": 2.8100000000000001 } - }, - "Status": "optimal", + }, + "Status": "optimal", "Variable": { "Buy[1% Lowfat Milk]": { "Value": 1.0 - }, + }, "Buy[Filet-O-Fish]": { "Value": 1.0 - }, + }, "Buy['Fries, small']": { "Value": 1.0 - } + } } } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/diet/test_diet.py b/pyomo/core/tests/diet/test_diet.py index 131acea00a4..d92f0a024ba 100644 --- a/pyomo/core/tests/diet/test_diet.py +++ b/pyomo/core/tests/diet/test_diet.py @@ -18,21 +18,24 @@ from pyomo.opt import check_available_solvers currdir = os.path.dirname(os.path.abspath(__file__)) -exdir = os.path.abspath(os.path.join( - currdir, '..', '..', '..', '..', 'examples', 'pyomo', 'diet')) +exdir = os.path.abspath( + os.path.join(currdir, '..', '..', '..', '..', 'examples', 'pyomo', 'diet') +) sqlite3_available = pyodbc_available = False try: import sqlite3 + sqlite3_available = True except ImportError: pass try: import pyodbc + pyodbc_available = True # # Temporarily deprecating pyodbc tests. - # These tests are not reliably executing with Python 2.6 and 2.7, + # These tests are not reliably executing with Python 2.6 and 2.7, # due to apparent issues with unicode representation. # pyodbc_available = False @@ -40,12 +43,14 @@ pass solvers = None -class Test(unittest.TestCase): + +class Test(unittest.TestCase): @classmethod def setUpClass(cls): global solvers import pyomo.environ + solvers = check_available_solvers('glpk') def run_pyomo(self, *args, **kwargs): @@ -61,8 +66,15 @@ def run_pyomo(self, *args, **kwargs): args = list(map(str, args)) outputpath = kwargs.pop('outputpath', os.path.join(exdir, 'results.jsn')) - args = ['solve', '--solver=glpk', '--results-format=json', - '-c', '--logging=quiet', '--save-results', outputpath] + args + args = [ + 'solve', + '--solver=glpk', + '--results-format=json', + '-c', + '--logging=quiet', + '--save-results', + outputpath, + ] + args old_path = os.getcwd() os.chdir(exdir) @@ -74,34 +86,43 @@ def compare_json(self, file1, file2): with open(file1, 'r') as f1, open(file2, 'r') as f2: file1_contents = json.load(f1) file2_contents = json.load(f2) - self.assertStructuredAlmostEqual(file2_contents, file1_contents, - allow_second_superset=True) + self.assertStructuredAlmostEqual( + file2_contents, file1_contents, allow_second_superset=True + ) def test_pyomo_dat(self): - results_file = self.run_pyomo(os.path.join(exdir, 'diet1.py'), - os.path.join(exdir, 'diet.dat'), - outputpath=os.path.join(currdir, 'pyomo_dat.jsn')) + results_file = self.run_pyomo( + os.path.join(exdir, 'diet1.py'), + os.path.join(exdir, 'diet.dat'), + outputpath=os.path.join(currdir, 'pyomo_dat.jsn'), + ) baseline_file = os.path.join(currdir, 'baselines', 'diet1_pyomo_dat.jsn') self.compare_json(results_file, baseline_file) @unittest.skipUnless(pyodbc_available, "Requires PyODBC") def test_pyomo_mdb(self): - results_file = self.run_pyomo(os.path.join(exdir, 'diet1.py'), - os.path.join(exdir, 'diet1.db.dat'), - outputpath=os.path.join(currdir, 'pyomo_mdb.jsn')) + results_file = self.run_pyomo( + os.path.join(exdir, 'diet1.py'), + os.path.join(exdir, 'diet1.db.dat'), + outputpath=os.path.join(currdir, 'pyomo_mdb.jsn'), + ) baseline_file = os.path.join(currdir, 'baselines', 'diet1_pyomo_mdb.jsn') self.compare_json(results_file, baseline_file) @unittest.skipUnless(pyodbc_available, "Requires PyODBC") def test_mdb_equality(self): dat_results_file = self.run_pyomo( - os.path.join(exdir, 'diet1.py'), os.path.join(exdir, 'diet.dat'), - outputpath=os.path.join(currdir, 'dat_results.jsn')) + os.path.join(exdir, 'diet1.py'), + os.path.join(exdir, 'diet.dat'), + outputpath=os.path.join(currdir, 'dat_results.jsn'), + ) with open(dat_results_file) as FILE: dat_results = json.load(FILE) db_results_file = self.run_pyomo( - os.path.join(exdir, 'diet1.py'), os.path.join(exdir, 'diet1.db.dat'), - outputpath=os.path.join(currdir, 'db_results.jsn')) + os.path.join(exdir, 'diet1.py'), + os.path.join(exdir, 'diet1.db.dat'), + outputpath=os.path.join(currdir, 'db_results.jsn'), + ) with open(db_results_file) as FILE: db_results = json.load(FILE) # Filter out the solver time @@ -114,22 +135,28 @@ def test_mdb_equality(self): @unittest.skipUnless(sqlite3_available, "Requires SQLite3") def test_pyomo_sqlite3(self): - results_file = self.run_pyomo(os.path.join(exdir, 'diet1.py'), - os.path.join(exdir, 'diet1.sqlite.dat'), - outputpath=os.path.join(currdir, 'pyomo_sqlite3.jsn')) + results_file = self.run_pyomo( + os.path.join(exdir, 'diet1.py'), + os.path.join(exdir, 'diet1.sqlite.dat'), + outputpath=os.path.join(currdir, 'pyomo_sqlite3.jsn'), + ) baseline_file = os.path.join(currdir, 'baselines', 'diet1_pyomo_sqlite3.jsn') self.compare_json(results_file, baseline_file) @unittest.skipUnless(sqlite3_available, "Requires SQLite3") def test_sqlite_equality(self): dat_results_file = self.run_pyomo( - os.path.join(exdir, 'diet1.py'), os.path.join(exdir, 'diet.dat'), - outputpath=os.path.join(currdir, 'dat_results.jsn')) + os.path.join(exdir, 'diet1.py'), + os.path.join(exdir, 'diet.dat'), + outputpath=os.path.join(currdir, 'dat_results.jsn'), + ) with open(dat_results_file) as FILE: dat_results = json.load(FILE) sqlite_results_file = self.run_pyomo( - os.path.join(exdir, 'diet1.py'), os.path.join(exdir, 'diet1.sqlite.dat'), - outputpath=os.path.join(currdir, 'sqlite_results.jsn')) + os.path.join(exdir, 'diet1.py'), + os.path.join(exdir, 'diet1.sqlite.dat'), + outputpath=os.path.join(currdir, 'sqlite_results.jsn'), + ) with open(sqlite_results_file) as FILE: sqlite_results = json.load(FILE) # Filter out the solver time @@ -140,6 +167,6 @@ def test_sqlite_equality(self): os.remove(dat_results_file) os.remove(sqlite_results_file) + if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/examples/pmedian.py b/pyomo/core/tests/examples/pmedian.py index 22357c8144a..5176f8bad18 100644 --- a/pyomo/core/tests/examples/pmedian.py +++ b/pyomo/core/tests/examples/pmedian.py @@ -9,41 +9,72 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import AbstractModel, Param, RangeSet, Var, Reals, Binary, PositiveIntegers, Constraint, Objective +from pyomo.environ import ( + AbstractModel, + Param, + RangeSet, + Var, + Reals, + Binary, + PositiveIntegers, + Constraint, + Objective, +) import math model = AbstractModel() model.N = Param(within=PositiveIntegers) -model.Locations = RangeSet(1,model.N) +model.Locations = RangeSet(1, model.N) -model.P = Param(within=RangeSet(1,model.N)) +model.P = Param(within=RangeSet(1, model.N)) model.M = Param(within=PositiveIntegers) -model.Customers = RangeSet(1,model.M) +model.Customers = RangeSet(1, model.M) + def d_rule(model, n, m): - return math.sin(n*2.33333+m*7.99999) + return math.sin(n * 2.33333 + m * 7.99999) + + model.d = Param(model.Locations, model.Customers, initialize=d_rule, within=Reals) -model.x = Var(model.Locations, model.Customers, bounds=(0.0,1.0)) +model.x = Var(model.Locations, model.Customers, bounds=(0.0, 1.0)) model.y = Var(model.Locations, within=Binary) + def rule(model): - return sum( [model.d[n,m]*model.x[n,m] for n in model.Locations for m in model.Customers] ) + return sum( + [ + model.d[n, m] * model.x[n, m] + for n in model.Locations + for m in model.Customers + ] + ) + + model.obj = Objective(rule=rule) + def rule(model, m): - return (sum( [model.x[n,m] for n in model.Locations]), 1.0) + return (sum([model.x[n, m] for n in model.Locations]), 1.0) + + model.single_x = Constraint(model.Customers, rule=rule) + def rule(model, n, m): - return (None, model.x[n,m] - model.y[n], 0.0) + return (None, model.x[n, m] - model.y[n], 0.0) + + model.bound_y = Constraint(model.Locations, model.Customers, rule=rule) + def rule(model): - return (sum( [model.y[n] for n in model.Locations] ) - model.P, 0.0) + return (sum([model.y[n] for n in model.Locations]) - model.P, 0.0) + + model.num_facilities = Constraint(rule=rule) diff --git a/pyomo/core/tests/examples/pmedian1.py b/pyomo/core/tests/examples/pmedian1.py index 9773a62261b..5aeec502f7c 100644 --- a/pyomo/core/tests/examples/pmedian1.py +++ b/pyomo/core/tests/examples/pmedian1.py @@ -11,4 +11,4 @@ from . import pmedian -MODEL=pmedian.model +MODEL = pmedian.model diff --git a/pyomo/core/tests/examples/pmedian2.py b/pyomo/core/tests/examples/pmedian2.py index e28963b018b..8a908f7d661 100644 --- a/pyomo/core/tests/examples/pmedian2.py +++ b/pyomo/core/tests/examples/pmedian2.py @@ -11,27 +11,35 @@ from . import pmedian + def pyomo_preprocess(**kwds): - print( "PREPROCESSING %s"%(sorted(list(kwds.keys()))) ) + print("PREPROCESSING %s" % (sorted(list(kwds.keys())))) + def pyomo_create_model(**kwds): - print( "CREATING MODEL %s"%(sorted(list(kwds.keys()))) ) + print("CREATING MODEL %s" % (sorted(list(kwds.keys())))) return pmedian.model + def pyomo_print_model(**kwds): - print( "PRINTING MODEL %s"%(sorted(list(kwds.keys()))) ) + print("PRINTING MODEL %s" % (sorted(list(kwds.keys())))) + def pyomo_print_instance(**kwds): - print( "PRINTING INSTANCE %s"%(sorted(list(kwds.keys()))) ) + print("PRINTING INSTANCE %s" % (sorted(list(kwds.keys())))) + def pyomo_save_instance(**kwds): - print( "SAVE INSTANCE %s"%(sorted(list(kwds.keys()))) ) + print("SAVE INSTANCE %s" % (sorted(list(kwds.keys())))) + def pyomo_print_results(**kwds): - print( "PRINTING RESULTS %s"%(sorted(list(kwds.keys()))) ) + print("PRINTING RESULTS %s" % (sorted(list(kwds.keys())))) + def pyomo_save_results(**kwds): - print( "SAVING RESULTS %s"%(sorted(list(kwds.keys()))) ) + print("SAVING RESULTS %s" % (sorted(list(kwds.keys())))) + def pyomo_postprocess(**kwds): - print( "POSTPROCESSING %s"%(sorted(list(kwds.keys()))) ) + print("POSTPROCESSING %s" % (sorted(list(kwds.keys())))) diff --git a/pyomo/core/tests/examples/pmedian4.py b/pyomo/core/tests/examples/pmedian4.py index 4365afc44f6..98dd90f3e8f 100644 --- a/pyomo/core/tests/examples/pmedian4.py +++ b/pyomo/core/tests/examples/pmedian4.py @@ -9,7 +9,17 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import ConcreteModel, RangeSet, Param, Var, Reals, Binary, Objective, Constraint, ConstraintList +from pyomo.environ import ( + ConcreteModel, + RangeSet, + Param, + Var, + Reals, + Binary, + Objective, + Constraint, + ConstraintList, +) import math N = 5 @@ -18,29 +28,41 @@ model = ConcreteModel() -model.Locations = RangeSet(1,N) +model.Locations = RangeSet(1, N) + +model.Customers = RangeSet(1, M) -model.Customers = RangeSet(1,M) def d_rule(model, n, m): - return math.sin(n*2.33333+m*7.99999) + return math.sin(n * 2.33333 + m * 7.99999) + + model.d = Param(model.Locations, model.Customers, initialize=d_rule, within=Reals) -model.x = Var(model.Locations, model.Customers, bounds=(0.0,1.0)) +model.x = Var(model.Locations, model.Customers, bounds=(0.0, 1.0)) model.y = Var(model.Locations, within=Binary) + def rule(model): - return sum( [model.d[n,m]*model.x[n,m] for n in model.Locations for m in model.Customers] ) + return sum( + [ + model.d[n, m] * model.x[n, m] + for n in model.Locations + for m in model.Customers + ] + ) + + model.obj = Objective(rule=rule) model.single_x = ConstraintList() for m in model.Customers: - model.single_x.add( sum( [model.x[n,m] for n in model.Locations]) == 1.0 ) + model.single_x.add(sum([model.x[n, m] for n in model.Locations]) == 1.0) model.bound_y = ConstraintList() for n in model.Locations: for m in model.Customers: - model.bound_y.add( model.x[n,m] <= model.y[n] ) + model.bound_y.add(model.x[n, m] <= model.y[n]) -model.num_facilities = Constraint(expr=sum( [model.y[n] for n in model.Locations] ) == P) +model.num_facilities = Constraint(expr=sum([model.y[n] for n in model.Locations]) == P) diff --git a/pyomo/core/tests/examples/test1.txt b/pyomo/core/tests/examples/test1.txt index 3f2e3ea657e..48145130b30 100644 --- a/pyomo/core/tests/examples/test1.txt +++ b/pyomo/core/tests/examples/test1.txt @@ -1,63 +1,63 @@ { "Problem": [ { - "Lower bound": -5.0156090000000004, - "Number of constraints": 38, - "Number of nonzeros": 96, - "Number of objectives": 1, - "Number of variables": 36, - "Sense": "minimize", + "Lower bound": -5.0156090000000004, + "Number of constraints": 37, + "Number of nonzeros": 95, + "Number of objectives": 1, + "Number of variables": 35, + "Sense": "minimize", "Upper bound": -5.0156090000000004 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Gap": 0.0, + "Gap": 0.0, "Objective": { "obj": { "Value": -5.0156090000000004 } - }, - "Status": "optimal", + }, + "Status": "optimal", "Variable": { "x[3,2]": { "Value": 1.0 - }, + }, "x[3,6]": { "Value": 1.0 - }, + }, "x[4,1]": { "Value": 1.0 - }, + }, "x[4,4]": { "Value": 1.0 - }, + }, "x[4,5]": { "Value": 1.0 - }, + }, "x[5,3]": { "Value": 1.0 - }, + }, "y[3]": { "Value": 1.0 - }, + }, "y[4]": { "Value": 1.0 - }, + }, "y[5]": { "Value": 1.0 } } } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/examples/test10.txt b/pyomo/core/tests/examples/test10.txt index 4d6b23171c4..ea7807db623 100644 --- a/pyomo/core/tests/examples/test10.txt +++ b/pyomo/core/tests/examples/test10.txt @@ -101,7 +101,7 @@ About to generate 'N' with data: {None: 5} 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -153,7 +153,7 @@ About to generate 'Locations' with data: None 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -203,7 +203,7 @@ About to generate 'P_domain' with data: None 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -253,7 +253,7 @@ About to generate 'P' with data: {None: 3} 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -305,7 +305,7 @@ About to generate 'M' with data: {None: 6} 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -357,7 +357,7 @@ About to generate 'Customers' with data: None 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -407,7 +407,7 @@ About to generate 'd_index' with data: None 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -459,7 +459,7 @@ About to generate 'd' with data: None 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -541,7 +541,7 @@ About to generate 'x_index' with data: None 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -623,7 +623,7 @@ About to generate 'x' with data: None 2 Var Declarations x : Size=0 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status y : Size=0 Domain=Binary Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status @@ -705,7 +705,7 @@ About to generate 'y' with data: None 2 Var Declarations x : Size=30 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status (1, 1) : None : 0.0 : 1.0 : None : False : undefined (1, 2) : None : 0.0 : 1.0 : None : False : undefined @@ -817,7 +817,7 @@ About to generate 'obj' with data: None 2 Var Declarations x : Size=30 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status (1, 1) : None : 0.0 : 1.0 : None : False : undefined (1, 2) : None : 0.0 : 1.0 : None : False : undefined @@ -934,7 +934,7 @@ About to generate 'single_x' with data: None 2 Var Declarations x : Size=30 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status (1, 1) : None : 0.0 : 1.0 : None : False : undefined (1, 2) : None : 0.0 : 1.0 : None : False : undefined @@ -1065,7 +1065,7 @@ About to generate 'bound_y_index' with data: None 2 Var Declarations x : Size=30 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status (1, 1) : None : 0.0 : 1.0 : None : False : undefined (1, 2) : None : 0.0 : 1.0 : None : False : undefined @@ -1220,7 +1220,7 @@ About to generate 'bound_y' with data: None 2 Var Declarations x : Size=30 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status (1, 1) : None : 0.0 : 1.0 : None : False : undefined (1, 2) : None : 0.0 : 1.0 : None : False : undefined @@ -1435,7 +1435,7 @@ About to generate 'num_facilities' with data: None 2 Var Declarations x : Size=30 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status (1, 1) : None : 0.0 : 1.0 : None : False : undefined (1, 2) : None : 0.0 : 1.0 : None : False : undefined @@ -1772,7 +1772,7 @@ MODEL INSTANCE 2 Var Declarations x : Size=30 Domain=Reals - Indicies: Locations, Customers, + Indices: Locations, Customers, Key : Initial Value : Lower Bound : Upper Bound : Current Value: Fixed: Status (1, 1) : None : 0.0 : 1.0 : None : False : used (1, 2) : None : 0.0 : 1.0 : None : False : used diff --git a/pyomo/core/tests/examples/test12.txt b/pyomo/core/tests/examples/test12.txt index 3f2e3ea657e..48145130b30 100644 --- a/pyomo/core/tests/examples/test12.txt +++ b/pyomo/core/tests/examples/test12.txt @@ -1,63 +1,63 @@ { "Problem": [ { - "Lower bound": -5.0156090000000004, - "Number of constraints": 38, - "Number of nonzeros": 96, - "Number of objectives": 1, - "Number of variables": 36, - "Sense": "minimize", + "Lower bound": -5.0156090000000004, + "Number of constraints": 37, + "Number of nonzeros": 95, + "Number of objectives": 1, + "Number of variables": 35, + "Sense": "minimize", "Upper bound": -5.0156090000000004 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Gap": 0.0, + "Gap": 0.0, "Objective": { "obj": { "Value": -5.0156090000000004 } - }, - "Status": "optimal", + }, + "Status": "optimal", "Variable": { "x[3,2]": { "Value": 1.0 - }, + }, "x[3,6]": { "Value": 1.0 - }, + }, "x[4,1]": { "Value": 1.0 - }, + }, "x[4,4]": { "Value": 1.0 - }, + }, "x[4,5]": { "Value": 1.0 - }, + }, "x[5,3]": { "Value": 1.0 - }, + }, "y[3]": { "Value": 1.0 - }, + }, "y[4]": { "Value": 1.0 - }, + }, "y[5]": { "Value": 1.0 } } } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/examples/test14.txt b/pyomo/core/tests/examples/test14.txt index 3f2e3ea657e..48145130b30 100644 --- a/pyomo/core/tests/examples/test14.txt +++ b/pyomo/core/tests/examples/test14.txt @@ -1,63 +1,63 @@ { "Problem": [ { - "Lower bound": -5.0156090000000004, - "Number of constraints": 38, - "Number of nonzeros": 96, - "Number of objectives": 1, - "Number of variables": 36, - "Sense": "minimize", + "Lower bound": -5.0156090000000004, + "Number of constraints": 37, + "Number of nonzeros": 95, + "Number of objectives": 1, + "Number of variables": 35, + "Sense": "minimize", "Upper bound": -5.0156090000000004 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Gap": 0.0, + "Gap": 0.0, "Objective": { "obj": { "Value": -5.0156090000000004 } - }, - "Status": "optimal", + }, + "Status": "optimal", "Variable": { "x[3,2]": { "Value": 1.0 - }, + }, "x[3,6]": { "Value": 1.0 - }, + }, "x[4,1]": { "Value": 1.0 - }, + }, "x[4,4]": { "Value": 1.0 - }, + }, "x[4,5]": { "Value": 1.0 - }, + }, "x[5,3]": { "Value": 1.0 - }, + }, "y[3]": { "Value": 1.0 - }, + }, "y[4]": { "Value": 1.0 - }, + }, "y[5]": { "Value": 1.0 } } } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/examples/test_amplbook2.py b/pyomo/core/tests/examples/test_amplbook2.py index 3a3069ab5cf..fdb9cc571bf 100644 --- a/pyomo/core/tests/examples/test_amplbook2.py +++ b/pyomo/core/tests/examples/test_amplbook2.py @@ -15,25 +15,28 @@ import os from os.path import abspath, dirname -topdir = dirname(dirname(abspath(__file__)))+os.sep+".."+os.sep+".." -currdir = dirname(abspath(__file__))+os.sep +topdir = dirname(dirname(abspath(__file__))) + os.sep + ".." + os.sep + ".." +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.environ import * -class TestAmplbook2(unittest.TestCase): pass + +class TestAmplbook2(unittest.TestCase): + pass + # # DISABLED FOR NOW ... Revisit these when the ipconvert utility is stable... # -#data_dir=topdir+os.sep+"examples"+os.sep+"pyomo"+os.sep+"amplbook2"+os.sep -#files = glob.glob(data_dir+"*.py") -#for file in files: +# data_dir=topdir+os.sep+"examples"+os.sep+"pyomo"+os.sep+"amplbook2"+os.sep +# files = glob.glob(data_dir+"*.py") +# for file in files: # bname=os.path.basename(file) # name=bname.split('.')[0] -# TestAmplbook2.add_commandline_test(cmd="cd "+data_dir+"; "+topdir+os.sep+"scripts/pyomo "+bname+" "+name+".dat", baseline=data_dir+name+".log", name=name) +# TestAmplbook2.add_commandline_test(cmd='cd '+data_dir+'; '+topdir+os.sep+'scripts/pyomo '+bname+' '+name+'.dat', baseline=data_dir+name+'.log', name=name) if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/examples/test_kernel_examples.py b/pyomo/core/tests/examples/test_kernel_examples.py index 28f2d5d0f58..7039f457f84 100644 --- a/pyomo/core/tests/examples/test_kernel_examples.py +++ b/pyomo/core/tests/examples/test_kernel_examples.py @@ -23,6 +23,7 @@ from pyomo.common.dependencies import numpy_available, scipy_available import platform + if platform.python_implementation() == "PyPy": # The scipy is importable into PyPy, but ODE integrators don't work. (2/ 18) scipy_available = False @@ -31,45 +32,55 @@ topdir = dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))) examplesdir = join(topdir, "examples", "kernel") -examples = glob.glob(join(examplesdir,"*.py")) -examples.extend(glob.glob(join(examplesdir,"mosek","*.py"))) +examples = glob.glob(join(examplesdir, "*.py")) +examples.extend(glob.glob(join(examplesdir, "mosek", "*.py"))) testing_solvers = {} testing_solvers['ipopt', 'nl'] = False testing_solvers['glpk', 'lp'] = False testing_solvers['mosek_direct', 'python'] = False + def setUpModule(): global testing_solvers import pyomo.environ from pyomo.solvers.tests.solvers import test_solver_cases + for _solver, _io in test_solver_cases(): - if (_solver, _io) in testing_solvers and \ - test_solver_cases(_solver, _io).available: + if (_solver, _io) in testing_solvers and test_solver_cases( + _solver, _io + ).available: testing_solvers[_solver, _io] = True def create_method(example): # It is important that this inner function has a name that - # starts with 'test' in order for nose to discover it + # starts with 'test' in order for pytest to discover it # after we assign it to the class. I have _no_ idea why - # this is the case since we are returing the function object + # this is the case since we are returning the function object # and placing it on the class with a different name. def testmethod(self): if basename(example) == "piecewise_nd_functions.py": - if (not numpy_available) or \ - (not scipy_available) or \ - (not testing_solvers['ipopt', 'nl']) or \ - (not testing_solvers['glpk', 'lp']): + if ( + (not numpy_available) + or (not scipy_available) + or (not testing_solvers['ipopt', 'nl']) + or (not testing_solvers['glpk', 'lp']) + ): self.skipTest("Numpy or Scipy or Ipopt or Glpk is not available") elif "mosek" in example: - if (not testing_solvers['ipopt', 'nl']) or \ - (not testing_solvers['mosek_direct', 'python']): + if (not testing_solvers['ipopt', 'nl']) or ( + not testing_solvers['mosek_direct', 'python'] + ): self.skipTest("Ipopt or Mosek is not available") - result = subprocess.run([sys.executable, example], - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - universal_newlines=True) + result = subprocess.run( + [sys.executable, example], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) self.assertEqual(result.returncode, 0, msg=result.stdout) + return testmethod @@ -80,10 +91,8 @@ class TestKernelExamples(unittest.TestCase): for filename in examples: testname = basename(filename) assert testname.endswith(".py") - testname = "test_"+testname[:-3]+"_example" - setattr(TestKernelExamples, - testname, - create_method(filename)) + testname = "test_" + testname[:-3] + "_example" + setattr(TestKernelExamples, testname, create_method(filename)) if __name__ == "__main__": diff --git a/pyomo/core/tests/examples/test_pyomo.py b/pyomo/core/tests/examples/test_pyomo.py index 301f93c0c85..64c195c0ab4 100644 --- a/pyomo/core/tests/examples/test_pyomo.py +++ b/pyomo/core/tests/examples/test_pyomo.py @@ -39,12 +39,14 @@ deleteFiles = True solvers = None -class BaseTester(unittest.TestCase): + +class BaseTester(unittest.TestCase): @classmethod def setUpClass(cls): global solvers import pyomo.environ + solvers = check_available_solvers('glpk') def pyomo(self, cmd, **kwds): @@ -61,8 +63,12 @@ def pyomo(self, cmd, **kwds): try: _dir = os.getcwd() os.chdir(currdir) - args = ['solve', '--solver=glpk', '--results-format=json', - '--save-results=%s' % results] + args = [ + 'solve', + '--solver=glpk', + '--results-format=json', + '--save-results=%s' % results, + ] if type(cmd) is list: args.extend(cmd) elif cmd.endswith('json') or cmd.endswith('yaml'): @@ -89,23 +95,27 @@ def run_pyomo(self, cmd, root): TempfileManager.add_tempfile(results, exists=False) output = root + '.out' TempfileManager.add_tempfile(output, exists=False) - cmd = ['pyomo', 'solve', '--solver=glpk', '--results-format=json', - '--save-results=%s' % results] + cmd + cmd = [ + 'pyomo', + 'solve', + '--solver=glpk', + '--results-format=json', + '--save-results=%s' % results, + ] + cmd with open(output, 'w') as f: result = subprocess.run(cmd, stdout=f, stderr=f) return result class TestJson(BaseTester): - def compare_json(self, file1, file2): with open(file1, 'r') as f1, open(file2, 'r') as f2: f1_contents = json.load(f1) f2_contents = json.load(f2) - self.assertStructuredAlmostEqual(f2_contents, - f1_contents, - abstol=_diff_tol, - allow_second_superset=True) + self.assertStructuredAlmostEqual( + f2_contents, f1_contents, abstol=_diff_tol, allow_second_superset=True + ) + def filter_items(self, items): filtered = [] for i in items: @@ -118,8 +128,9 @@ def filter_items(self, items): def compare_files(self, file1, file2): try: - self.assertTrue(cmp(file1, file2), - msg="Files %s and %s differ" % (file1, file2)) + self.assertTrue( + cmp(file1, file2), msg="Files %s and %s differ" % (file1, file2) + ) except: with open(file1, 'r') as f1, open(file2, 'r') as f2: f1_contents = f1.read().strip().split('\n') @@ -127,24 +138,34 @@ def compare_files(self, file1, file2): f1_filtered = [] f2_filtered = [] for item1, item2 in zip_longest(f1_contents, f2_contents): - if not item1.startswith('['): + if not item1: + f1_filtered.append(item1) + elif not item1.startswith('['): items1 = item1.strip().split() - items2 = item2.strip().split() f1_filtered.append(self.filter_items(items1)) + if not item2: + f2_filtered.append(item2) + elif not item2.startswith('['): + items2 = item2.strip().split() f2_filtered.append(self.filter_items(items2)) - self.assertStructuredAlmostEqual(f2_filtered, f1_filtered, - abstol=1e-6, - allow_second_superset=True) + self.assertStructuredAlmostEqual( + f2_filtered, f1_filtered, abstol=1e-6, allow_second_superset=True + ) def test1_simple_pyomo_execution(self): # Simple execution of 'pyomo' - self.pyomo([join(currdir, 'pmedian.py'),join(currdir, 'pmedian.dat')], root=join(currdir, 'test1')) + self.pyomo( + [join(currdir, 'pmedian.py'), join(currdir, 'pmedian.dat')], + root=join(currdir, 'test1'), + ) self.compare_json(join(currdir, 'test1.jsn'), join(currdir, 'test1.txt')) def test1a_simple_pyomo_execution(self): # Simple execution of 'pyomo' in a subprocess - files = [ os.path.join(currdir, 'pmedian.py'), - os.path.join(currdir, 'pmedian.dat') ] + files = [ + os.path.join(currdir, 'pmedian.py'), + os.path.join(currdir, 'pmedian.dat'), + ] self.run_pyomo(files, root=os.path.join(currdir, 'test1a')) self.compare_json(join(currdir, 'test1a.jsn'), join(currdir, 'test1.txt')) @@ -155,7 +176,9 @@ def test1b_simple_pyomo_execution(self): def test2_bad_model_name(self): # Run pyomo with bad --model-name option value - self.pyomo('--model-name=dummy pmedian.py pmedian.dat', root=join(currdir, 'test2')) + self.pyomo( + '--model-name=dummy pmedian.py pmedian.dat', root=join(currdir, 'test2') + ) self.compare_files(join(currdir, "test2.out"), join(currdir, "test2.txt")) def test2b_bad_model_name(self): @@ -170,7 +193,10 @@ def test3_missing_model_object(self): def test4_valid_modelname_option(self): # Run pyomo with good --model-name option value - self.pyomo('--model-name=MODEL '+join(currdir, 'pmedian1.py pmedian.dat'), root=join(currdir, 'test4')) + self.pyomo( + '--model-name=MODEL ' + join(currdir, 'pmedian1.py pmedian.dat'), + root=join(currdir, 'test4'), + ) self.compare_json(join(currdir, "test4.jsn"), join(currdir, "test1.txt")) def test4b_valid_modelname_option(self): @@ -179,7 +205,7 @@ def test4b_valid_modelname_option(self): self.compare_json(join(currdir, "test4b.jsn"), join(currdir, "test1.txt")) def test5_create_model_fcn(self): - #"""Run pyomo with create_model function""" + # """Run pyomo with create_model function""" self.pyomo('pmedian2.py pmedian.dat', root=join(currdir, 'test5')) self.compare_files(join(currdir, "test5.out"), join(currdir, "test5.txt")) @@ -189,8 +215,10 @@ def test5b_create_model_fcn(self): self.compare_files(join(currdir, "test5.out"), join(currdir, "test5.txt")) def test8_instanceonly_option(self): - #"""Run pyomo with --instance-only option""" - output = self.pyomo('--instance-only pmedian.py pmedian.dat', root=join(currdir, 'test8')) + # """Run pyomo with --instance-only option""" + output = self.pyomo( + '--instance-only pmedian.py pmedian.dat', root=join(currdir, 'test8') + ) self.assertEqual(type(output.retval.instance), pyomo.core.ConcreteModel) # Check that the results file was NOT created self.assertFalse(os.path.exists(join(currdir, 'test8.jsn'))) @@ -203,8 +231,10 @@ def test8b_instanceonly_option(self): self.assertFalse(os.path.exists(join(currdir, 'test8.jsn'))) def test9_disablegc_option(self): - #"""Run pyomo with --disable-gc option""" - output = self.pyomo('--disable-gc pmedian.py pmedian.dat', root=join(currdir, 'test9')) + # """Run pyomo with --disable-gc option""" + output = self.pyomo( + '--disable-gc pmedian.py pmedian.dat', root=join(currdir, 'test9') + ) self.assertEqual(type(output.retval.instance), pyomo.core.ConcreteModel) def test9b_disablegc_option(self): @@ -213,10 +243,12 @@ def test9b_disablegc_option(self): self.assertEqual(type(output.retval.instance), pyomo.core.ConcreteModel) def test12_output_option(self): - #"""Run pyomo with --output option""" + # """Run pyomo with --output option""" log = join(currdir, 'test12.log') TempfileManager.add_tempfile(log, exists=False) - self.pyomo('--logfile=%s pmedian.py pmedian.dat' % (log,), root=join(currdir, 'test12')) + self.pyomo( + '--logfile=%s pmedian.py pmedian.dat' % (log,), root=join(currdir, 'test12') + ) self.compare_json(join(currdir, "test12.jsn"), join(currdir, "test12.txt")) def test12b_output_option(self): @@ -238,7 +270,14 @@ def test14b_concrete_model_with_constraintlist(self): def test15_simple_pyomo_execution(self): # Simple execution of 'pyomo' with options - self.pyomo(['--solver-options="mipgap=0.02 cuts="', join(currdir, 'pmedian.py'), 'pmedian.dat'], root=join(currdir, 'test15')) + self.pyomo( + [ + '--solver-options="mipgap=0.02 cuts="', + join(currdir, 'pmedian.py'), + 'pmedian.dat', + ], + root=join(currdir, 'test15'), + ) self.compare_json(join(currdir, "test15.jsn"), join(currdir, "test1.txt")) def test15b_simple_pyomo_execution(self): @@ -254,15 +293,13 @@ def test15c_simple_pyomo_execution(self): @unittest.skipIf(not yaml_available, "YAML not available available") class TestWithYaml(BaseTester): - def compare_json(self, file1, file2): with open(file1, 'r') as f1, open(file2, 'r') as f2: f1_contents = json.load(f1) f2_contents = json.load(f2) - self.assertStructuredAlmostEqual(f2_contents, - f1_contents, - abstol=_diff_tol, - allow_second_superset=True) + self.assertStructuredAlmostEqual( + f2_contents, f1_contents, abstol=_diff_tol, allow_second_superset=True + ) def test15b_simple_pyomo_execution(self): # Simple execution of 'pyomo' with options diff --git a/pyomo/core/tests/examples/test_tutorials.py b/pyomo/core/tests/examples/test_tutorials.py index e56a92be47e..3a74c1ca142 100644 --- a/pyomo/core/tests/examples/test_tutorials.py +++ b/pyomo/core/tests/examples/test_tutorials.py @@ -16,22 +16,28 @@ import sys import os from os.path import abspath, dirname + topdir = dirname(dirname(dirname(dirname(dirname(abspath(__file__)))))) -currdir = dirname(abspath(__file__))+os.sep -tutorial_dir=topdir+os.sep+"examples"+os.sep+"pyomo"+os.sep+"tutorials"+os.sep +currdir = dirname(abspath(__file__)) + os.sep +tutorial_dir = ( + topdir + os.sep + "examples" + os.sep + "pyomo" + os.sep + "tutorials" + os.sep +) import pyomo.common.unittest as unittest try: from win32com.client.dynamic import Dispatch - _win32com=True + + _win32com = True except: - _win32com=False #pragma:nocover + _win32com = False # pragma:nocover from pyomo.common.dependencies import pyutilib, pyutilib_available + _excel_available = False if _win32com and pyutilib_available: from pyutilib.excel.spreadsheet_win32com import ExcelSpreadsheet_win32com + tmp = ExcelSpreadsheet_win32com() try: tmp._excel_dispatch() @@ -42,18 +48,19 @@ try: import xlrd - _xlrd=True + + _xlrd = True except: - _xlrd=False + _xlrd = False try: import openpyxl - _openpyxl=True + + _openpyxl = True except: - _openpyxl=False + _openpyxl = False class PyomoTutorials(unittest.TestCase): - def setUp(self): self.cwd = os.getcwd() self.tmp_path = list(sys.path) @@ -72,21 +79,25 @@ def tearDown(self): sys.stderr = self.save_stderr def driver(self, name): - OUTPUT = open(currdir+name+'.log', 'w') + OUTPUT = open(currdir + name + '.log', 'w') sys.stdout = OUTPUT sys.stderr = OUTPUT runpy.run_module(name, None, "__main__") OUTPUT.close() - self.assertIn(open(tutorial_dir+name+".out", 'r').read(), - open(currdir+name+".log", 'r').read()) - os.remove(currdir+name+".log") + self.assertIn( + open(tutorial_dir + name + ".out", 'r').read(), + open(currdir + name + ".log", 'r').read(), + ) + os.remove(currdir + name + ".log") def test_data(self): self.driver('data') @unittest.skipIf(not (_xlrd or _openpyxl), "Cannot read excel file.") - @unittest.skipIf(not (_win32com and _excel_available and pyutilib_available), - "Cannot read excel file.") + @unittest.skipIf( + not (_win32com and _excel_available and pyutilib_available), + "Cannot read excel file.", + ) def test_excel(self): self.driver('excel') @@ -99,5 +110,6 @@ def test_table(self): def test_param(self): self.driver('param') + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/transform/test_add_slacks.py b/pyomo/core/tests/transform/test_add_slacks.py index 2fe24151c35..a3698b7d529 100644 --- a/pyomo/core/tests/transform/test_add_slacks.py +++ b/pyomo/core/tests/transform/test_add_slacks.py @@ -11,7 +11,8 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep from io import StringIO from pyomo.common.log import LoggingIntercept @@ -20,18 +21,27 @@ import random from pyomo.opt import check_available_solvers -from pyomo.environ import (ConcreteModel, Set, Objective, - Constraint, Var, Block, Param, - NonNegativeReals, TransformationFactory, ComponentUID, - inequality) - -import pyomo.core.expr.current as EXPR +from pyomo.environ import ( + ConcreteModel, + Set, + Objective, + Constraint, + Var, + Block, + Param, + NonNegativeReals, + TransformationFactory, + ComponentUID, + inequality, +) + +import pyomo.core.expr as EXPR +from pyomo.core.expr.compare import assertExpressionsEqual solvers = check_available_solvers('glpk') class TestAddSlacks(unittest.TestCase): - def setUp(self): # set seed so we can test name collisions predictably random.seed(666) @@ -44,7 +54,7 @@ def makeModel(): model.rule1 = Constraint(expr=model.x <= 5) model.rule2 = Constraint(expr=inequality(1, model.y, 3)) model.rule3 = Constraint(expr=model.x >= 0.1) - model.obj = Objective(expr=-model.x-model.y) + model.obj = Objective(expr=-model.x - model.y) return model def test_add_trans_block(self): @@ -65,7 +75,7 @@ def test_slack_vars_added(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to(m) xblock = m.component("_core_add_slack_variables") - + # should have new variables on new block self.assertIsInstance(xblock.component("_slack_minus_rule1"), Var) self.assertFalse(hasattr(xblock, "_slack_plus_rule1")) @@ -85,16 +95,20 @@ def checkRule1(self, m): # check all original variables still there: cons = m.rule1 transBlock = m.component("_core_add_slack_variables") - + self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 5) - self.assertEqual(cons.body.nargs(), 2) + assertExpressionsEqual( + self, + cons.body, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, m.x)), + EXPR.MonomialTermExpression((-1, transBlock._slack_minus_rule1)), + ] + ), + ) - self.assertIs(cons.body.arg(0), m.x) - self.assertIs(cons.body.arg(1).__class__, EXPR.MonomialTermExpression) - self.assertEqual(cons.body.arg(1).arg(0), -1) - self.assertIs(cons.body.arg(1).arg(1), transBlock._slack_minus_rule1) - def checkRule3(self, m): # check all original variables still there: cons = m.rule3 @@ -102,11 +116,17 @@ def checkRule3(self, m): self.assertIsNone(cons.upper) self.assertEqual(cons.lower, 0.1) - - self.assertEqual(cons.body.nargs(), 2) - self.assertIs(cons.body.arg(0), m.x) - self.assertIs(cons.body.arg(1), transBlock._slack_plus_rule3) + assertExpressionsEqual( + self, + cons.body, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, m.x)), + EXPR.MonomialTermExpression((1, transBlock._slack_plus_rule3)), + ] + ), + ) def test_ub_constraint_modified(self): m = self.makeModel() @@ -117,7 +137,7 @@ def test_lb_constraint_modified(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to(m) self.checkRule3(m) - + def test_both_bounds_constraint_modified(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to(m) @@ -125,17 +145,21 @@ def test_both_bounds_constraint_modified(self): # check all original variables still there: cons = m.rule2 transBlock = m.component("_core_add_slack_variables") - + self.assertEqual(cons.lower, 1) self.assertEqual(cons.upper, 3) - self.assertEqual(cons.body.nargs(), 3) - - self.assertIs(cons.body.arg(0), m.y) - self.assertIs(cons.body.arg(1), transBlock._slack_plus_rule2) - self.assertIs(cons.body.arg(2).__class__, EXPR.MonomialTermExpression) - self.assertEqual(cons.body.arg(2).arg(0), -1) - self.assertIs(cons.body.arg(2).arg(1), transBlock._slack_minus_rule2) + assertExpressionsEqual( + self, + cons.body, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, m.y)), + EXPR.MonomialTermExpression((1, transBlock._slack_plus_rule2)), + EXPR.MonomialTermExpression((-1, transBlock._slack_minus_rule2)), + ] + ), + ) def test_obj_deactivated(self): m = self.makeModel() @@ -147,36 +171,43 @@ def test_obj_deactivated(self): def test_new_obj_created(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to(m) - + transBlock = m.component("_core_add_slack_variables") # active objective should minimize sum of slacks obj = transBlock.component("_slack_objective") self.assertIsInstance(obj, Objective) self.assertTrue(obj.active) - - self.assertEqual(obj.expr.nargs(), 4) - self.assertIs(obj.expr.arg(0), transBlock._slack_minus_rule1) - self.assertIs(obj.expr.arg(1), transBlock._slack_plus_rule2) - self.assertIs(obj.expr.arg(2), transBlock._slack_minus_rule2) - self.assertIs(obj.expr.arg(3), transBlock._slack_plus_rule3) + assertExpressionsEqual( + self, + obj.expr, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, transBlock._slack_minus_rule1)), + EXPR.MonomialTermExpression((1, transBlock._slack_plus_rule2)), + EXPR.MonomialTermExpression((1, transBlock._slack_minus_rule2)), + EXPR.MonomialTermExpression((1, transBlock._slack_plus_rule3)), + ] + ), + ) def test_badModel_err(self): model = ConcreteModel() model.x = Var(within=NonNegativeReals) model.rule1 = Constraint(expr=inequality(6, model.x, 5)) self.assertRaisesRegex( - RuntimeError, - "Lower bound exceeds upper bound in constraint rule1*", - TransformationFactory('core.add_slack_variables').apply_to, - model) + RuntimeError, + "Lower bound exceeds upper bound in constraint rule1*", + TransformationFactory('core.add_slack_variables').apply_to, + model, + ) def test_leave_deactivated_constraints(self): m = self.makeModel() m.rule2.deactivate() TransformationFactory('core.add_slack_variables').apply_to(m) - + cons = m.rule2 self.assertFalse(cons.active) self.assertEqual(cons.lower, 1) @@ -201,9 +232,9 @@ def checkTargetSlackVars(self, transBlock): def test_only_targets_have_slack_vars(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1, m.rule3]) - + m, targets=[m.rule1, m.rule3] + ) + transBlock = m.component("_core_add_slack_variables") # check that we only made slack vars for targets self.checkTargetSlackVars(transBlock) @@ -211,13 +242,13 @@ def test_only_targets_have_slack_vars(self): def test_only_targets_have_slack_vars_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1, m.rule3]) + m, targets=[m.rule1, m.rule3] + ) transBlock = m2.component("_core_add_slack_variables") # check that we only made slack vars for targets self.checkTargetSlackVars(transBlock) - + def checkNonTargetCons(self, m): cons = m.rule2 self.assertEqual(cons.lower, 1) @@ -228,24 +259,24 @@ def checkNonTargetCons(self, m): def test_nontarget_constraint_same(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1, m.rule3]) - + m, targets=[m.rule1, m.rule3] + ) + self.checkNonTargetCons(m) def test_nontarget_constraint_same_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1, m.rule3]) - + m, targets=[m.rule1, m.rule3] + ) + self.checkNonTargetCons(m2) def test_target_constraints_transformed(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1, m.rule3]) + m, targets=[m.rule1, m.rule3] + ) self.checkRule1(m) self.checkRule3(m) @@ -253,8 +284,8 @@ def test_target_constraints_transformed(self): def test_target_constraints_transformed_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1, m.rule3]) + m, targets=[m.rule1, m.rule3] + ) self.checkRule1(m2) self.checkRule3(m2) @@ -267,15 +298,22 @@ def checkTargetObj(self, m): def checkTargetsObj(self, m): transBlock = m._core_add_slack_variables obj = transBlock.component("_slack_objective") - self.assertEqual(obj.expr.nargs(), 2) - self.assertIs(obj.expr.arg(0), transBlock._slack_minus_rule1) - self.assertIs(obj.expr.arg(1), transBlock._slack_plus_rule3) + assertExpressionsEqual( + self, + obj.expr, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, transBlock._slack_minus_rule1)), + EXPR.MonomialTermExpression((1, transBlock._slack_plus_rule3)), + ] + ), + ) def test_target_objective(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1, m.rule3]) + m, targets=[m.rule1, m.rule3] + ) self.assertFalse(m.obj.active) self.checkTargetsObj(m) @@ -283,8 +321,8 @@ def test_target_objective(self): def test_target_objective_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1, m.rule3]) + m, targets=[m.rule1, m.rule3] + ) self.assertFalse(m2.obj.active) self.checkTargetsObj(m2) @@ -296,8 +334,8 @@ def test_err_for_bogus_kwds(self): "key 'notakwd' not defined for ConfigDict ''", TransformationFactory('core.add_slack_variables').apply_to, m, - notakwd="I want a feasible model" - ) + notakwd="I want a feasible model", + ) def test_error_for_non_constraint_noniterable_target(self): m = self.makeModel() @@ -308,8 +346,8 @@ def test_error_for_non_constraint_noniterable_target(self): "", TransformationFactory('core.add_slack_variables').apply_to, m, - targets=m.indexedVar[1] - ) + targets=m.indexedVar[1], + ) def test_error_for_non_constraint_target_in_list(self): m = self.makeModel() @@ -319,23 +357,25 @@ def test_error_for_non_constraint_target_in_list(self): "", TransformationFactory('core.add_slack_variables').apply_to, m, - targets=[m.rule1, m.x] - ) + targets=[m.rule1, m.x], + ) def test_deprecation_warning_for_cuid_target(self): m = self.makeModel() out = StringIO() with LoggingIntercept(out, 'pyomo.core'): TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=ComponentUID(m.rule3)) - self.assertRegex(out.getvalue(), - "DEPRECATED: In future releases ComponentUID " - "targets will no longer be\nsupported in the " - "core.add_slack_variables transformation. " - "Specify\ntargets as a Constraint or list of " - "Constraints.*") - + m, targets=ComponentUID(m.rule3) + ) + self.assertRegex( + out.getvalue(), + "DEPRECATED: In future releases ComponentUID " + "targets will no longer be\nsupported in the " + "core.add_slack_variables transformation. " + "Specify\ntargets as a Constraint or list of " + "Constraints.*", + ) + # make sure that it still worked though self.checkNonTargetCons(m) self.checkRule3(m) @@ -349,14 +389,16 @@ def test_deprecation_warning_for_cuid_targets(self): out = StringIO() with LoggingIntercept(out, 'pyomo.core'): TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[ComponentUID(m.rule1), ComponentUID(m.rule3)]) - self.assertRegex(out.getvalue(), - "DEPRECATED: In future releases ComponentUID " - "targets will no longer be\nsupported in the " - "core.add_slack_variables transformation. " - "Specify\ntargets as a Constraint or list of " - "Constraints.*") + m, targets=[ComponentUID(m.rule1), ComponentUID(m.rule3)] + ) + self.assertRegex( + out.getvalue(), + "DEPRECATED: In future releases ComponentUID " + "targets will no longer be\nsupported in the " + "core.add_slack_variables transformation. " + "Specify\ntargets as a Constraint or list of " + "Constraints.*", + ) # make sure that it still worked though self.checkNonTargetCons(m) self.checkRule1(m) @@ -368,34 +410,33 @@ def test_deprecation_warning_for_cuid_targets(self): def test_transformed_constraints_sumexpression_body(self): m = self.makeModel() - m.rule4 = Constraint(expr=inequality(5, m.x - 2*m.y, 9)) - TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=m.rule4) + m.rule4 = Constraint(expr=inequality(5, m.x - 2 * m.y, 9)) + TransformationFactory('core.add_slack_variables').apply_to(m, targets=m.rule4) transBlock = m._core_add_slack_variables c = m.rule4 self.assertEqual(c.lower, 5) self.assertEqual(c.upper, 9) - self.assertEqual(c.body.nargs(), 4) - - self.assertIs(c.body.arg(0), m.x) - self.assertIs(c.body.arg(1).arg(0), -2) - self.assertIs(c.body.arg(1).arg(1), m.y) - self.assertIs(c.body.arg(2), transBlock._slack_plus_rule4) - self.assertIs(c.body.arg(3).__class__, EXPR.MonomialTermExpression) - self.assertEqual(c.body.arg(3).arg(0), -1) - self.assertIs(c.body.arg(3).arg(1), transBlock._slack_minus_rule4) + assertExpressionsEqual( + self, + c.body, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, m.x)), + EXPR.MonomialTermExpression((-2, m.y)), + EXPR.MonomialTermExpression((1, transBlock._slack_plus_rule4)), + EXPR.MonomialTermExpression((-1, transBlock._slack_minus_rule4)), + ] + ), + ) def test_transformed_constraint_scalar_body(self): m = self.makeModel() m.p = Param(initialize=6, mutable=True) m.rule4 = Constraint(expr=m.p <= 9) - TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule4]) - + TransformationFactory('core.add_slack_variables').apply_to(m, targets=[m.rule4]) + transBlock = m._core_add_slack_variables c = m.rule4 self.assertIsNone(c.lower) @@ -405,23 +446,24 @@ def test_transformed_constraint_scalar_body(self): self.assertIs(c.body.arg(1).__class__, EXPR.MonomialTermExpression) self.assertEqual(c.body.arg(1).arg(0), -1) self.assertIs(c.body.arg(1).arg(1), transBlock._slack_minus_rule4) - -class TestAddSlacks_IndexedConstraints(unittest.TestCase): +class TestAddSlacks_IndexedConstraints(unittest.TestCase): @staticmethod def makeModel(): m = ConcreteModel() - m.S = Set(initialize=[1,2,3]) + m.S = Set(initialize=[1, 2, 3]) m.x = Var(m.S) m.y = Var() + def rule1_rule(m, s): - return 2*m.x[s] >= 4 + return 2 * m.x[s] >= 4 + m.rule1 = Constraint(m.S, rule=rule1_rule) m.rule2 = Constraint(expr=m.y <= 6) m.obj = Objective(expr=sum(m.x[s] for s in m.S) - m.y) return m - + def checkSlackVars_indexedtarget(self, transBlock): self.assertIsInstance(transBlock.component("_slack_plus_rule1[1]"), Var) self.assertIsInstance(transBlock.component("_slack_plus_rule1[2]"), Var) @@ -430,44 +472,40 @@ def checkSlackVars_indexedtarget(self, transBlock): def test_indexedtarget_only_create_slackvars_for_targets(self): m = self.makeModel() - TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1]) + TransformationFactory('core.add_slack_variables').apply_to(m, targets=[m.rule1]) transBlock = m.component("_core_add_slack_variables") # TODO: So, right now indexed constraints don't result in indexed # slack variables. They could... But I don't know if it matters much? # They are named sensibly either way... Dunno. self.checkSlackVars_indexedtarget(transBlock) - + def test_indexedtarget_only_create_slackvars_for_targets_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1]) + m, targets=[m.rule1] + ) transBlock = m2.component("_core_add_slack_variables") self.checkSlackVars_indexedtarget(transBlock) - + def checkRule2(self, m): cons = m.rule2 self.assertEqual(cons.upper, 6) self.assertIsNone(cons.lower) self.assertIs(cons.body, m.y) - + def test_indexedtarget_nontarget_same(self): m = self.makeModel() - TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1]) + TransformationFactory('core.add_slack_variables').apply_to(m, targets=[m.rule1]) self.checkRule2(m) def test_indexedtarget_nontarget_same_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1]) + m, targets=[m.rule1] + ) self.checkRule2(m2) @@ -475,61 +513,77 @@ def checkTargetObj(self, m): transBlock = m._core_add_slack_variables obj = transBlock.component("_slack_objective") self.assertIsInstance(obj, Objective) - self.assertEqual(obj.expr.nargs(), 3) - self.assertIs(obj.expr.arg(0), - transBlock.component("_slack_plus_rule1[1]")) - self.assertIs(obj.expr.arg(1), - transBlock.component("_slack_plus_rule1[2]")) - self.assertIs(obj.expr.arg(2), - transBlock.component("_slack_plus_rule1[3]")) + assertExpressionsEqual( + self, + obj.expr, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression( + (1, transBlock.component("_slack_plus_rule1[1]")) + ), + EXPR.MonomialTermExpression( + (1, transBlock.component("_slack_plus_rule1[2]")) + ), + EXPR.MonomialTermExpression( + (1, transBlock.component("_slack_plus_rule1[3]")) + ), + ] + ), + ) def test_indexedtarget_objective(self): m = self.makeModel() - TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1]) - + TransformationFactory('core.add_slack_variables').apply_to(m, targets=[m.rule1]) + self.assertFalse(m.obj.active) self.checkTargetObj(m) def test_indexedtarget_objective_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1]) - + m, targets=[m.rule1] + ) + self.assertFalse(m2.obj.active) self.checkTargetObj(m2) - + def checkTransformedRule1(self, m, i): c = m.rule1[i] self.assertEqual(c.lower, 4) self.assertIsNone(c.upper) - self.assertEqual(c.body.nargs(), 2) - self.assertEqual(c.body.arg(0).arg(0), 2) - self.assertIs(c.body.arg(0).arg(1), m.x[i]) - self.assertIs( - c.body.arg(1), - m._core_add_slack_variables.component( - "_slack_plus_rule1[%s]" % i)) + assertExpressionsEqual( + self, + c.body, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((2, m.x[i])), + EXPR.MonomialTermExpression( + ( + 1, + m._core_add_slack_variables.component( + "_slack_plus_rule1[%s]" % i + ), + ) + ), + ] + ), + ) def test_indexedtarget_targets_transformed(self): m = self.makeModel() - TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1]) - - for i in [1,2,3]: + TransformationFactory('core.add_slack_variables').apply_to(m, targets=[m.rule1]) + + for i in [1, 2, 3]: self.checkTransformedRule1(m, i) def test_indexedtarget_targets_transformed_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=m.rule1) - - for i in [1,2,3]: + m, targets=m.rule1 + ) + + for i in [1, 2, 3]: self.checkTransformedRule1(m2, i) def checkSlackVars_constraintDataTarget(self, transBlock): @@ -541,8 +595,8 @@ def checkSlackVars_constraintDataTarget(self, transBlock): def test_ConstraintDatatarget_only_add_slackvars_for_targets(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1[2]]) + m, targets=[m.rule1[2]] + ) transBlock = m._core_add_slack_variables self.checkSlackVars_constraintDataTarget(transBlock) @@ -550,8 +604,8 @@ def test_ConstraintDatatarget_only_add_slackvars_for_targets(self): def test_ConstraintDatatarget_only_add_slackvars_for_targets_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=m.rule1[2]) + m, targets=m.rule1[2] + ) transBlock = m2._core_add_slack_variables self.checkSlackVars_constraintDataTarget(transBlock) @@ -566,9 +620,9 @@ def checkUntransformedRule1(self, m, i): def test_ConstraintDatatarget_nontargets_same(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1[2]]) - + m, targets=[m.rule1[2]] + ) + self.checkUntransformedRule1(m, 1) self.checkUntransformedRule1(m, 3) self.checkRule2(m) @@ -576,9 +630,9 @@ def test_ConstraintDatatarget_nontargets_same(self): def test_ConstraintDatatarget_nontargets_same_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1[2]]) - + m, targets=[m.rule1[2]] + ) + self.checkUntransformedRule1(m2, 1) self.checkUntransformedRule1(m2, 3) self.checkRule2(m2) @@ -586,16 +640,16 @@ def test_ConstraintDatatarget_nontargets_same_create_using(self): def test_ConstraintDatatarget_target_transformed(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1[2]]) + m, targets=[m.rule1[2]] + ) self.checkTransformedRule1(m, 2) def test_ConstraintDatatarget_target_transformed_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1[2]]) + m, targets=[m.rule1[2]] + ) self.checkTransformedRule1(m2, 2) @@ -608,8 +662,8 @@ def checkConstraintDataObj(self, m): def test_ConstraintDatatarget_objective(self): m = self.makeModel() TransformationFactory('core.add_slack_variables').apply_to( - m, - targets=[m.rule1[2]]) + m, targets=[m.rule1[2]] + ) self.assertFalse(m.obj.active) self.checkConstraintDataObj(m) @@ -617,8 +671,8 @@ def test_ConstraintDatatarget_objective(self): def test_ConstraintDatatarget_objective_create_using(self): m = self.makeModel() m2 = TransformationFactory('core.add_slack_variables').create_using( - m, - targets=[m.rule1[2]]) + m, targets=[m.rule1[2]] + ) self.assertFalse(m2.obj.active) self.checkConstraintDataObj(m2) @@ -626,4 +680,3 @@ def test_ConstraintDatatarget_objective_create_using(self): if __name__ == '__main__': unittest.main() - diff --git a/pyomo/core/tests/transform/test_scaling.py b/pyomo/core/tests/transform/test_scaling.py index 574403b8e63..b7f34a4f4aa 100644 --- a/pyomo/core/tests/transform/test_scaling.py +++ b/pyomo/core/tests/transform/test_scaling.py @@ -13,10 +13,10 @@ import pyomo.common.unittest as unittest import pyomo.environ as pyo from pyomo.opt.base.solvers import UnknownSolver +from pyomo.core.plugins.transform.scaling import ScaleModel class TestScaleModelTransformation(unittest.TestCase): - def test_linear_scaling(self): model = pyo.ConcreteModel() model.x = pyo.Var([1, 2, 3], bounds=(-10, 10), initialize=5.0) @@ -29,12 +29,13 @@ def test_linear_scaling(self): def con_rule(m, i): if i == 1: - return m.x[1] + 2*m.x[2] + 1*m.x[3] == 4.0 + return m.x[1] + 2 * m.x[2] + 1 * m.x[3] == 4.0 if i == 2: - return m.x[1] + 2*m.x[2] + 2*m.x[3] == 5.0 + return m.x[1] + 2 * m.x[2] + 2 * m.x[3] == 5.0 if i == 3: - return m.x[1] + 3.0*m.x[2] + 1*m.x[3] == 5.0 - model.con = pyo.Constraint([1,2,3], rule=con_rule) + return m.x[1] + 3.0 * m.x[2] + 1 * m.x[3] == 5.0 + + model.con = pyo.Constraint([1, 2, 3], rule=con_rule) model.zcon = pyo.Constraint(expr=model.z >= model.x[2]) x_scale = 0.5 @@ -50,84 +51,183 @@ def con_rule(m, i): unscaled_model.scaling_factor[unscaled_model.obj] = obj_scale unscaled_model.scaling_factor[unscaled_model.x] = x_scale unscaled_model.scaling_factor[unscaled_model.z] = z_scale - unscaled_model.scaling_factor[unscaled_model.con[1]] = con_scale1 + unscaled_model.scaling_factor[unscaled_model.con[1]] = con_scale1 unscaled_model.scaling_factor[unscaled_model.con[2]] = con_scale2 unscaled_model.scaling_factor[unscaled_model.con[3]] = con_scale3 unscaled_model.scaling_factor[unscaled_model.zcon] = zcon_scale - scaled_model = pyo.TransformationFactory('core.scale_model').create_using(unscaled_model) + scaled_model = pyo.TransformationFactory('core.scale_model').create_using( + unscaled_model + ) # print('*** unscaled ***') # unscaled_model.pprint() # print('*** scaled ***') # scaled_model.pprint() - glpk_solver = pyo.SolverFactory('glpk') - if isinstance(glpk_solver, UnknownSolver) or \ - (not glpk_solver.available()): + glpk_solver = pyo.SolverFactory('glpk') + if isinstance(glpk_solver, UnknownSolver) or (not glpk_solver.available()): raise unittest.SkipTest("glpk solver not available") glpk_solver.solve(unscaled_model) glpk_solver.solve(scaled_model) # check vars - self.assertAlmostEqual(pyo.value(unscaled_model.x[1]), pyo.value(scaled_model.scaled_x[1])/x_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.x[2]), pyo.value(scaled_model.scaled_x[2])/x_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.x[3]), pyo.value(scaled_model.scaled_x[3])/x_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.z), pyo.value(scaled_model.scaled_z)/z_scale, 4) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[1]), + pyo.value(scaled_model.scaled_x[1]) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[2]), + pyo.value(scaled_model.scaled_x[2]) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[3]), + pyo.value(scaled_model.scaled_x[3]) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.z), pyo.value(scaled_model.scaled_z) / z_scale, 4 + ) # check var lb - self.assertAlmostEqual(pyo.value(unscaled_model.x[1].lb), pyo.value(scaled_model.scaled_x[1].lb)/x_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.x[2].lb), pyo.value(scaled_model.scaled_x[2].lb)/x_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.x[3].lb), pyo.value(scaled_model.scaled_x[3].lb)/x_scale, 4) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[1].lb), + pyo.value(scaled_model.scaled_x[1].lb) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[2].lb), + pyo.value(scaled_model.scaled_x[2].lb) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[3].lb), + pyo.value(scaled_model.scaled_x[3].lb) / x_scale, + 4, + ) # note: z_scale is negative, therefore, the inequality directions swap - self.assertAlmostEqual(pyo.value(unscaled_model.z.lb), pyo.value(scaled_model.scaled_z.ub)/z_scale, 4) + self.assertAlmostEqual( + pyo.value(unscaled_model.z.lb), + pyo.value(scaled_model.scaled_z.ub) / z_scale, + 4, + ) # check var ub - self.assertAlmostEqual(pyo.value(unscaled_model.x[1].ub), pyo.value(scaled_model.scaled_x[1].ub)/x_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.x[2].ub), pyo.value(scaled_model.scaled_x[2].ub)/x_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.x[3].ub), pyo.value(scaled_model.scaled_x[3].ub)/x_scale, 4) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[1].ub), + pyo.value(scaled_model.scaled_x[1].ub) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[2].ub), + pyo.value(scaled_model.scaled_x[2].ub) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.x[3].ub), + pyo.value(scaled_model.scaled_x[3].ub) / x_scale, + 4, + ) # note: z_scale is negative, therefore, the inequality directions swap - self.assertAlmostEqual(pyo.value(unscaled_model.z.ub), pyo.value(scaled_model.scaled_z.lb)/z_scale, 4) + self.assertAlmostEqual( + pyo.value(unscaled_model.z.ub), + pyo.value(scaled_model.scaled_z.lb) / z_scale, + 4, + ) # check var multipliers (rc) - self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.x[1]]), pyo.value(scaled_model.rc[scaled_model.scaled_x[1]])*x_scale/obj_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.x[2]]), pyo.value(scaled_model.rc[scaled_model.scaled_x[2]])*x_scale/obj_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.x[3]]), pyo.value(scaled_model.rc[scaled_model.scaled_x[3]])*x_scale/obj_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.rc[unscaled_model.z]), pyo.value(scaled_model.rc[scaled_model.scaled_z])*z_scale/obj_scale, 4) + self.assertAlmostEqual( + pyo.value(unscaled_model.rc[unscaled_model.x[1]]), + pyo.value(scaled_model.rc[scaled_model.scaled_x[1]]) * x_scale / obj_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.rc[unscaled_model.x[2]]), + pyo.value(scaled_model.rc[scaled_model.scaled_x[2]]) * x_scale / obj_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.rc[unscaled_model.x[3]]), + pyo.value(scaled_model.rc[scaled_model.scaled_x[3]]) * x_scale / obj_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.rc[unscaled_model.z]), + pyo.value(scaled_model.rc[scaled_model.scaled_z]) * z_scale / obj_scale, + 4, + ) # check constraint multipliers - self.assertAlmostEqual(pyo.value(unscaled_model.dual[unscaled_model.con[1]]),pyo.value(scaled_model.dual[scaled_model.scaled_con[1]])*con_scale1/obj_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.dual[unscaled_model.con[2]]),pyo.value(scaled_model.dual[scaled_model.scaled_con[2]])*con_scale2/obj_scale, 4) - self.assertAlmostEqual(pyo.value(unscaled_model.dual[unscaled_model.con[3]]),pyo.value(scaled_model.dual[scaled_model.scaled_con[3]])*con_scale3/obj_scale, 4) + self.assertAlmostEqual( + pyo.value(unscaled_model.dual[unscaled_model.con[1]]), + pyo.value(scaled_model.dual[scaled_model.scaled_con[1]]) + * con_scale1 + / obj_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.dual[unscaled_model.con[2]]), + pyo.value(scaled_model.dual[scaled_model.scaled_con[2]]) + * con_scale2 + / obj_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(unscaled_model.dual[unscaled_model.con[3]]), + pyo.value(scaled_model.dual[scaled_model.scaled_con[3]]) + * con_scale3 + / obj_scale, + 4, + ) # put the solution from the scaled back into the original - pyo.TransformationFactory('core.scale_model').propagate_solution(scaled_model, model) + pyo.TransformationFactory('core.scale_model').propagate_solution( + scaled_model, model + ) # compare var values and rc with the unscaled soln for vm in model.component_objects(ctype=pyo.Var, descend_into=True): cuid = pyo.ComponentUID(vm) vum = cuid.find_component_on(unscaled_model) - self.assertEqual((vm in model.rc), (vum in unscaled_model.rc)) + self.assertEqual((vm in model.rc), (vum in unscaled_model.rc)) if vm in model.rc: - self.assertAlmostEqual(pyo.value(model.rc[vm]), pyo.value(unscaled_model.rc[vum]), 4) + self.assertAlmostEqual( + pyo.value(model.rc[vm]), pyo.value(unscaled_model.rc[vum]), 4 + ) for k in vm: vmk = vm[k] vumk = vum[k] self.assertAlmostEqual(pyo.value(vmk), pyo.value(vumk), 4) - self.assertEqual((vmk in model.rc), (vumk in unscaled_model.rc)) + self.assertEqual((vmk in model.rc), (vumk in unscaled_model.rc)) if vmk in model.rc: - self.assertAlmostEqual(pyo.value(model.rc[vmk]), pyo.value(unscaled_model.rc[vumk]), 4) + self.assertAlmostEqual( + pyo.value(model.rc[vmk]), pyo.value(unscaled_model.rc[vumk]), 4 + ) # compare constraint duals and value - for model_con in model.component_objects(ctype=pyo.Constraint, descend_into=True): + for model_con in model.component_objects( + ctype=pyo.Constraint, descend_into=True + ): cuid = pyo.ComponentUID(model_con) unscaled_model_con = cuid.find_component_on(unscaled_model) - self.assertEqual((model_con in model.rc), (unscaled_model_con in unscaled_model.rc)) + self.assertEqual( + (model_con in model.rc), (unscaled_model_con in unscaled_model.rc) + ) if model_con in model.dual: - self.assertAlmostEqual(pyo.value(model.dual[model_con]), pyo.value(unscaled_model.dual[unscaled_model_con]), 4) + self.assertAlmostEqual( + pyo.value(model.dual[model_con]), + pyo.value(unscaled_model.dual[unscaled_model_con]), + 4, + ) for k in model_con: mk = model_con[k] umk = unscaled_model_con[k] - self.assertEqual((mk in model.dual), (umk in unscaled_model.dual)) + self.assertEqual((mk in model.dual), (umk in unscaled_model.dual)) if mk in model.dual: - self.assertAlmostEqual(pyo.value(model.dual[mk]), pyo.value(unscaled_model.dual[umk]), 4) + self.assertAlmostEqual( + pyo.value(model.dual[mk]), + pyo.value(unscaled_model.dual[umk]), + 4, + ) def test_scaling_without_rename(self): m = pyo.ConcreteModel() @@ -138,9 +238,12 @@ def test_scaling_without_rename(self): def c1_rule(m): return m.v1 == 1e6 + m.c1 = pyo.Constraint(rule=c1_rule) + def c2_rule(m): return m.v2 == 1e-4 + m.c2 = pyo.Constraint(rule=c2_rule) m.scaling_factor[m.v1] = 1.0 @@ -168,45 +271,499 @@ def c2_rule(m): self.assertTrue(hasattr(m, 'c2')) orig_val, factor = values[id(m.v1)] - self.assertAlmostEqual( - m.v1.value, - orig_val*factor, - ) + self.assertAlmostEqual(m.v1.value, orig_val * factor) orig_val, factor = values[id(m.v2)] - self.assertAlmostEqual( - m.v2.value, - orig_val*factor, - ) + self.assertAlmostEqual(m.v2.value, orig_val * factor) orig_val, factor = values[id(m.c1)] - self.assertAlmostEqual( - pyo.value(m.c1.body), - orig_val*factor, - ) + self.assertAlmostEqual(pyo.value(m.c1.body), orig_val * factor) orig_val, factor = values[id(m.c2)] - self.assertAlmostEqual( - pyo.value(m.c2.body), - orig_val*factor, - ) + self.assertAlmostEqual(pyo.value(m.c2.body), orig_val * factor) orig_val, factor = values[id(m.v3)] - self.assertAlmostEqual( - m.v3_ref[None].value, - orig_val*factor, - ) + self.assertAlmostEqual(m.v3_ref[None].value, orig_val * factor) + # Note that because the model was not renamed, + # v3_ref is still intact. + + lhs = m.c2.body + monom_factor = lhs.arg(0) + scale_factor = m.scaling_factor[m.c2] / m.scaling_factor[m.v2] + self.assertAlmostEqual(monom_factor, scale_factor) + + def test_scaling_hierarchical(self): + m = pyo.ConcreteModel() + m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + m.v1 = pyo.Var(initialize=10) + m.v2 = pyo.Var(initialize=20) + m.v3 = pyo.Var(initialize=30) + + def c1_rule(m): + return m.v1 == 1e6 + + m.c1 = pyo.Constraint(rule=c1_rule) + + def c2_rule(m): + return m.v2 == 1e-4 + + m.c2 = pyo.Constraint(rule=c2_rule) + + m.scaling_factor[m.v1] = 1.0 + m.scaling_factor[m.v2] = 0.5 + m.scaling_factor[m.v3] = 0.25 + m.scaling_factor[m.c1] = 1e-5 + m.scaling_factor[m.c2] = 1e5 + + m.b = pyo.Block() + m.b.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + m.b.v4 = pyo.Var(initialize=10) + m.b.v5 = pyo.Var(initialize=20) + m.b.v6 = pyo.Var(initialize=30) + + def c3_rule(m): + return m.v4 == 1e6 + + m.b.c3 = pyo.Constraint(rule=c3_rule) + + def c4_rule(m): + return m.v5 == 1e-4 + + m.b.c4 = pyo.Constraint(rule=c4_rule) + + m.b.scaling_factor[m.b.v4] = 1.0 + m.b.scaling_factor[m.b.v5] = 0.5 + m.b.scaling_factor[m.b.v6] = 0.25 + m.b.scaling_factor[m.b.c3] = 1e-5 + m.b.scaling_factor[m.b.c4] = 1e5 + + values = {} + values[id(m.v1)] = (m.v1.value, m.scaling_factor[m.v1]) + values[id(m.v2)] = (m.v2.value, m.scaling_factor[m.v2]) + values[id(m.v3)] = (m.v3.value, m.scaling_factor[m.v3]) + values[id(m.c1)] = (pyo.value(m.c1.body), m.scaling_factor[m.c1]) + values[id(m.c2)] = (pyo.value(m.c2.body), m.scaling_factor[m.c2]) + values[id(m.b.v4)] = (m.b.v4.value, m.b.scaling_factor[m.b.v4]) + values[id(m.b.v5)] = (m.b.v5.value, m.b.scaling_factor[m.b.v5]) + values[id(m.b.v6)] = (m.b.v6.value, m.b.scaling_factor[m.b.v6]) + values[id(m.b.c3)] = (pyo.value(m.b.c3.body), m.b.scaling_factor[m.b.c3]) + values[id(m.b.c4)] = (pyo.value(m.b.c4.body), m.b.scaling_factor[m.b.c4]) + + m.c2_ref = pyo.Reference(m.c2) + m.v3_ref = pyo.Reference(m.v3) + + m.b.c4_ref = pyo.Reference(m.b.c4) + m.b.v6_ref = pyo.Reference(m.b.v6) + + scale = pyo.TransformationFactory('core.scale_model') + scale.apply_to(m, rename=False) + + self.assertTrue(hasattr(m, 'v1')) + self.assertTrue(hasattr(m, 'v2')) + self.assertTrue(hasattr(m, 'c1')) + self.assertTrue(hasattr(m, 'c2')) + self.assertTrue(hasattr(m.b, 'v4')) + self.assertTrue(hasattr(m.b, 'v5')) + self.assertTrue(hasattr(m.b, 'c3')) + self.assertTrue(hasattr(m.b, 'c4')) + + orig_val, factor = values[id(m.v1)] + self.assertAlmostEqual(m.v1.value, orig_val * factor) + + orig_val, factor = values[id(m.v2)] + self.assertAlmostEqual(m.v2.value, orig_val * factor) + + orig_val, factor = values[id(m.c1)] + self.assertAlmostEqual(pyo.value(m.c1.body), orig_val * factor) + + orig_val, factor = values[id(m.c2)] + self.assertAlmostEqual(pyo.value(m.c2.body), orig_val * factor) + + orig_val, factor = values[id(m.v3)] + self.assertAlmostEqual(m.v3_ref[None].value, orig_val * factor) # Note that because the model was not renamed, # v3_ref is still intact. + orig_val, factor = values[id(m.b.v4)] + self.assertAlmostEqual(m.b.v4.value, orig_val * factor) + + orig_val, factor = values[id(m.b.v5)] + self.assertAlmostEqual(m.b.v5.value, orig_val * factor) + + orig_val, factor = values[id(m.b.c3)] + self.assertAlmostEqual(pyo.value(m.b.c3.body), orig_val * factor) + + orig_val, factor = values[id(m.b.c4)] + self.assertAlmostEqual(pyo.value(m.b.c4.body), orig_val * factor) + + orig_val, factor = values[id(m.b.v6)] + self.assertAlmostEqual(m.b.v6_ref[None].value, orig_val * factor) + # Note that because the model was not renamed, + # v6_ref is still intact. + lhs = m.c2.body monom_factor = lhs.arg(0) - scale_factor = (m.scaling_factor[m.c2]/ - m.scaling_factor[m.v2]) + scale_factor = m.scaling_factor[m.c2] / m.scaling_factor[m.v2] + self.assertAlmostEqual(monom_factor, scale_factor) + + lhs = m.b.c4.body + monom_factor = lhs.arg(0) + scale_factor = m.b.scaling_factor[m.b.c4] / m.b.scaling_factor[m.b.v5] + self.assertAlmostEqual(monom_factor, scale_factor) + + def test_scaling_no_solve(self): + model = pyo.ConcreteModel() + model.x = pyo.Var([1, 2, 3], bounds=(-10, 10), initialize=5.0) + model.z = pyo.Var(bounds=(10, 20), initialize=15) + + def con_rule(m, i): + if i == 1: + return m.x[1] + 2 * m.x[2] + 1 * m.x[3] == 8.0 + if i == 2: + return m.x[1] + 2 * m.x[2] + 2 * m.x[3] == 11.0 + if i == 3: + return m.x[1] + 3.0 * m.x[2] + 1 * m.x[3] == 10.0 + + model.con = pyo.Constraint([1, 2, 3], rule=con_rule) + model.zcon = pyo.Constraint(expr=model.z >= model.x[2]) + + model.x_ref = pyo.Reference(model.x) + + x_scale = 0.5 + obj_scale = 2.0 + z_scale = -10.0 + con_scale1 = 0.5 + con_scale2 = 2.0 + con_scale3 = -5.0 + zcon_scale = -3.0 + + model.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + model.scaling_factor[model.x] = x_scale + model.scaling_factor[model.z] = z_scale + model.scaling_factor[model.con[1]] = con_scale1 + model.scaling_factor[model.con[2]] = con_scale2 + model.scaling_factor[model.con[3]] = con_scale3 + model.scaling_factor[model.zcon] = zcon_scale + + # Set scaling factors for References too, but these should be ignored by the transformation + model.scaling_factor[model.x_ref] = x_scale * 2 + + scaled_model = pyo.TransformationFactory('core.scale_model').create_using(model) + + # check vars self.assertAlmostEqual( - monom_factor, - scale_factor, - ) + pyo.value(model.x[1]), pyo.value(scaled_model.scaled_x[1]) / x_scale, 4 + ) + self.assertAlmostEqual( + pyo.value(model.x[2]), pyo.value(scaled_model.scaled_x[2]) / x_scale, 4 + ) + self.assertAlmostEqual( + pyo.value(model.x[3]), pyo.value(scaled_model.scaled_x[3]) / x_scale, 4 + ) + self.assertAlmostEqual( + pyo.value(model.z), pyo.value(scaled_model.scaled_z) / z_scale, 4 + ) + # check var lb + self.assertAlmostEqual( + pyo.value(model.x[1].lb), + pyo.value(scaled_model.scaled_x[1].lb) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[2].lb), + pyo.value(scaled_model.scaled_x[2].lb) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[3].lb), + pyo.value(scaled_model.scaled_x[3].lb) / x_scale, + 4, + ) + # note: z_scale is negative, therefore, the inequality directions swap + self.assertAlmostEqual( + pyo.value(model.z.lb), pyo.value(scaled_model.scaled_z.ub) / z_scale, 4 + ) + # check var ub + self.assertAlmostEqual( + pyo.value(model.x[1].ub), + pyo.value(scaled_model.scaled_x[1].ub) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[2].ub), + pyo.value(scaled_model.scaled_x[2].ub) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[3].ub), + pyo.value(scaled_model.scaled_x[3].ub) / x_scale, + 4, + ) + # note: z_scale is negative, therefore, the inequality directions swap + self.assertAlmostEqual( + pyo.value(model.z.ub), pyo.value(scaled_model.scaled_z.lb) / z_scale, 4 + ) + + # check references to vars + self.assertAlmostEqual( + pyo.value(model.x[1]), pyo.value(scaled_model.scaled_x_ref[1]) / x_scale, 4 + ) + self.assertAlmostEqual( + pyo.value(model.x[2]), pyo.value(scaled_model.scaled_x_ref[2]) / x_scale, 4 + ) + self.assertAlmostEqual( + pyo.value(model.x[3]), pyo.value(scaled_model.scaled_x_ref[3]) / x_scale, 4 + ) + # check var lb + self.assertAlmostEqual( + pyo.value(model.x[1].lb), + pyo.value(scaled_model.scaled_x_ref[1].lb) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[2].lb), + pyo.value(scaled_model.scaled_x_ref[2].lb) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[3].lb), + pyo.value(scaled_model.scaled_x_ref[3].lb) / x_scale, + 4, + ) + # note: z_scale is negative, therefore, the inequality directions swap + self.assertAlmostEqual( + pyo.value(model.z.lb), pyo.value(scaled_model.scaled_z.ub) / z_scale, 4 + ) + # check var ub + self.assertAlmostEqual( + pyo.value(model.x[1].ub), + pyo.value(scaled_model.scaled_x_ref[1].ub) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[2].ub), + pyo.value(scaled_model.scaled_x_ref[2].ub) / x_scale, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.x[3].ub), + pyo.value(scaled_model.scaled_x_ref[3].ub) / x_scale, + 4, + ) + + # check constraints + self.assertAlmostEqual( + pyo.value(model.con[1]), + pyo.value(scaled_model.scaled_con[1]) / con_scale1, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.con[2]), + pyo.value(scaled_model.scaled_con[2]) / con_scale2, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.con[3]), + pyo.value(scaled_model.scaled_con[3]) / con_scale3, + 4, + ) + self.assertAlmostEqual( + pyo.value(model.zcon), pyo.value(scaled_model.scaled_zcon) / zcon_scale, 4 + ) + + # Set values on scaled model and check that they map back to original + scaled_model.scaled_x[1].set_value(1 * x_scale) + scaled_model.scaled_x[2].set_value(2 * x_scale) + scaled_model.scaled_x[3].set_value(3 * x_scale) + scaled_model.scaled_z.set_value(10 * z_scale) + + # put the solution from the scaled back into the original + pyo.TransformationFactory('core.scale_model').propagate_solution( + scaled_model, model + ) + + # Check var values + self.assertAlmostEqual(pyo.value(model.x[1]), 1, 4) + self.assertAlmostEqual(pyo.value(model.x[2]), 2, 4) + self.assertAlmostEqual(pyo.value(model.x[3]), 3, 4) + self.assertAlmostEqual(pyo.value(model.z), 10, 4) + + # Check reference values + self.assertAlmostEqual(pyo.value(model.x_ref[1]), 1, 4) + self.assertAlmostEqual(pyo.value(model.x_ref[2]), 2, 4) + self.assertAlmostEqual(pyo.value(model.x_ref[3]), 3, 4) + + # check constraints + self.assertAlmostEqual(pyo.value(model.con[1]), 8, 4) + self.assertAlmostEqual(pyo.value(model.con[2]), 11, 4) + self.assertAlmostEqual(pyo.value(model.con[3]), 10, 4) + self.assertAlmostEqual(pyo.value(model.zcon), -8, 4) + + def test_get_float_scaling_factor_top_level(self): + m = pyo.ConcreteModel() + m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.b1 = pyo.Block() + m.b1.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.b1.b2 = pyo.Block() + m.b1.b2.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.v1 = pyo.Var(initialize=10) + m.b1.v2 = pyo.Var(initialize=20) + m.b1.b2.v3 = pyo.Var(initialize=30) + + m.scaling_factor[m.v1] = 0.1 + m.scaling_factor[m.b1.v2] = 0.2 + + # SF should be 0.1 from top level + sf = ScaleModel()._get_float_scaling_factor(m, m.v1) + assert sf == float(0.1) + # SF should be 0.1 from top level, lower level ignored + sf = ScaleModel()._get_float_scaling_factor(m, m.b1.v2) + assert sf == float(0.2) + # No SF, should return 1 + sf = ScaleModel()._get_float_scaling_factor(m, m.b1.b2.v3) + assert sf == 1.0 + + def test_get_float_scaling_factor_local_level(self): + m = pyo.ConcreteModel() + m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.b1 = pyo.Block() + m.b1.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.b1.b2 = pyo.Block() + m.b1.b2.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.v1 = pyo.Var(initialize=10) + m.b1.v2 = pyo.Var(initialize=20) + m.b1.b2.v3 = pyo.Var(initialize=30) + + m.scaling_factor[m.v1] = 0.1 + m.b1.scaling_factor[m.b1.v2] = 0.2 + m.b1.b2.scaling_factor[m.b1.b2.v3] = 0.3 + + # Add an intermediate scaling factor - this should take priority + m.b1.scaling_factor[m.b1.b2.v3] = 0.4 + + # Should get SF from local levels + sf = ScaleModel()._get_float_scaling_factor(m, m.v1) + assert sf == float(0.1) + sf = ScaleModel()._get_float_scaling_factor(m, m.b1.v2) + assert sf == float(0.2) + sf = ScaleModel()._get_float_scaling_factor(m, m.b1.b2.v3) + assert sf == float(0.4) + + def test_get_float_scaling_factor_intermediate_level(self): + m = pyo.ConcreteModel() + m.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.b1 = pyo.Block() + m.b1.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.b1.b2 = pyo.Block() + # No suffix at b2 level - this should not cause an issue + + m.b1.b2.b3 = pyo.Block() + m.b1.b2.b3.scaling_factor = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + m.v1 = pyo.Var(initialize=10) + m.b1.b2.b3.v2 = pyo.Var(initialize=20) + m.b1.b2.b3.v3 = pyo.Var(initialize=30) + + # Scale v1 at lowest level - this should not get picked up + m.b1.b2.b3.scaling_factor[m.v1] = 0.1 + + m.b1.scaling_factor[m.b1.b2.b3.v2] = 0.2 + m.b1.scaling_factor[m.b1.b2.b3.v3] = 0.3 + + m.b1.b2.b3.scaling_factor[m.b1.b2.b3.v3] = 0.4 + + # v1 should be unscaled as SF set below variable level + sf = ScaleModel()._get_float_scaling_factor(m, m.v1) + assert sf == 1.0 + # v2 should get SF from b1 level + sf = ScaleModel()._get_float_scaling_factor(m, m.b1.b2.b3.v2) + assert sf == float(0.2) + # v2 should get SF from highest level, ignoring b3 level + sf = ScaleModel()._get_float_scaling_factor(m, m.b1.b2.b3.v3) + assert sf == float(0.3) + + def test_suffix_finder(self): + # Build a dummy model + m = pyo.ConcreteModel() + m.v1 = pyo.Var() + + m.b1 = pyo.Block() + m.b1.v2 = pyo.Var() + + m.b1.b2 = pyo.Block() + m.b1.b2.v3 = pyo.Var([0]) + + xfrm = ScaleModel() + _suffix_finder = xfrm._suffix_finder + + # Add Suffixes + m.suffix = pyo.Suffix(direction=pyo.Suffix.EXPORT) + # No suffix on b1 - make sure we can handle missing suffixes + m.b1.b2.suffix = pyo.Suffix(direction=pyo.Suffix.EXPORT) + + # Check for no suffix value + assert _suffix_finder(m.b1.b2.v3[0], "suffix") == None + assert _suffix_finder(m.b1.b2.v3[0], "suffix", root=m.b1) == None + + # Check finding default values + # Add a default at the top level + m.suffix[None] = 1 + assert _suffix_finder(m.b1.b2.v3[0], "suffix") == 1 + assert _suffix_finder(m.b1.b2.v3[0], "suffix", root=m.b1) == None + + # Add a default suffix at a lower level + m.b1.b2.suffix[None] = 2 + assert _suffix_finder(m.b1.b2.v3[0], "suffix") == 2 + assert _suffix_finder(m.b1.b2.v3[0], "suffix", root=m.b1) == 2 + + # Check for container at lowest level + m.b1.b2.suffix[m.b1.b2.v3] = 3 + assert _suffix_finder(m.b1.b2.v3[0], "suffix") == 3 + assert _suffix_finder(m.b1.b2.v3[0], "suffix", root=m.b1) == 3 + + # Check for container at top level + m.suffix[m.b1.b2.v3] = 4 + assert _suffix_finder(m.b1.b2.v3[0], "suffix") == 4 + assert _suffix_finder(m.b1.b2.v3[0], "suffix", root=m.b1) == 3 + + # Check for specific values at lowest level + m.b1.b2.suffix[m.b1.b2.v3[0]] = 5 + assert _suffix_finder(m.b1.b2.v3[0], "suffix") == 5 + assert _suffix_finder(m.b1.b2.v3[0], "suffix", root=m.b1) == 5 + + # Check for specific values at top level + m.suffix[m.b1.b2.v3[0]] = 6 + assert _suffix_finder(m.b1.b2.v3[0], "suffix") == 6 + assert _suffix_finder(m.b1.b2.v3[0], "suffix", root=m.b1) == 5 + + # Make sure we don't find default suffixes at lower levels + assert _suffix_finder(m.b1.v2, "suffix") == 1 + assert _suffix_finder(m.b1.v2, "suffix", root=m.b1) == None + + # Make sure we don't find specific suffixes at lower levels + m.b1.b2.suffix[m.v1] = 5 + assert _suffix_finder(m.v1, "suffix") == 1 + + with self.assertRaisesRegex( + ValueError, r"_find_suffix: root must be a BlockData \(found Var: v1\)" + ): + _suffix_finder(m.b1.v2, "suffix", root=m.v1) + + m.bn = pyo.Block([1, 2]) + with self.assertRaisesRegex( + ValueError, + r"_find_suffix: root must be a BlockData " r"\(found IndexedBlock: bn\)", + ): + _suffix_finder(m.b1.v2, "suffix", root=m.bn) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/transform/test_transform.py b/pyomo/core/tests/transform/test_transform.py index a4493eb730f..7c3f17fcfec 100644 --- a/pyomo/core/tests/transform/test_transform.py +++ b/pyomo/core/tests/transform/test_transform.py @@ -14,19 +14,38 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.common.tempfiles import TempfileManager from pyomo.opt import check_available_solvers -from pyomo.environ import (AbstractModel, Set, RangeSet, Objective, - Constraint, Var, Block, Integers, Boolean, - Binary, Reals, RealSet, NonNegativeIntegers, - NonNegativeReals, NegativeReals, NegativeIntegers, - PositiveReals, PositiveIntegers, NonPositiveIntegers, - NonPositiveReals, TransformationFactory, SolverFactory, - sum_product) +from pyomo.environ import ( + AbstractModel, + Set, + RangeSet, + Objective, + Constraint, + Var, + Block, + Integers, + Boolean, + Binary, + Reals, + RealSet, + NonNegativeIntegers, + NonNegativeReals, + NegativeReals, + NegativeIntegers, + PositiveReals, + PositiveIntegers, + NonPositiveIntegers, + NonPositiveReals, + TransformationFactory, + SolverFactory, + sum_product, +) from pyomo.core.plugins.transform.standard_form import StandardForm from pyomo.core.plugins.transform.nonnegative_transform import NonNegativeTransformation @@ -35,7 +54,6 @@ class Test(unittest.TestCase): - def setUp(self): self.model = AbstractModel() @@ -43,8 +61,8 @@ def tearDown(self): if os.path.exists("unknown.lp"): os.unlink("unknown.lp") TempfileManager.clear_tempfiles() - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) self.model = None @staticmethod @@ -63,14 +81,14 @@ def test_transform_dir(self): def test_fix_discrete(self): # Coverage of the _clear_attribute method - self.model.A = RangeSet(1,4) + self.model.A = RangeSet(1, 4) self.model.a = Var() self.model.b = Var(within=self.model.A) self.model.c = Var(within=NonNegativeIntegers) - self.model.d = Var(within=Integers, bounds=(-2,3)) + self.model.d = Var(within=Integers, bounds=(-2, 3)) self.model.e = Var(within=Boolean) self.model.f = Var(domain=Boolean) - instance=self.model.create_instance() + instance = self.model.create_instance() xfrm = TransformationFactory('core.fix_discrete') rinst = xfrm.create_using(instance) self.assertFalse(rinst.a.is_fixed()) @@ -82,14 +100,14 @@ def test_fix_discrete(self): def test_fix_discrete_clone(self): # Coverage of the _clear_attribute method - self.model.A = RangeSet(1,4) + self.model.A = RangeSet(1, 4) self.model.a = Var() self.model.b = Var(within=self.model.A) self.model.c = Var(within=NonNegativeIntegers) - self.model.d = Var(within=Integers, bounds=(-2,3)) + self.model.d = Var(within=Integers, bounds=(-2, 3)) self.model.e = Var(within=Boolean) self.model.f = Var(domain=Boolean) - instance=self.model.create_instance() + instance = self.model.create_instance() instance_clone = instance.clone() xfrm = TransformationFactory('core.fix_discrete') rinst = xfrm.create_using(instance_clone) @@ -102,14 +120,14 @@ def test_fix_discrete_clone(self): def test_relax_integrality1(self): # Coverage of the _clear_attribute method - self.model.A = RangeSet(1,4) + self.model.A = RangeSet(1, 4) self.model.a = Var() self.model.b = Var(within=self.model.A) self.model.c = Var(within=NonNegativeIntegers) - self.model.d = Var(within=Integers, bounds=(-2,3)) + self.model.d = Var(within=Integers, bounds=(-2, 3)) self.model.e = Var(within=Boolean) self.model.f = Var(domain=Boolean) - instance=self.model.create_instance() + instance = self.model.create_instance() xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance) self.assertEqual(type(rinst.a.domain), RealSet) @@ -127,14 +145,14 @@ def test_relax_integrality1(self): def test_relax_integrality2(self): # Coverage of the _clear_attribute method - self.model.A = RangeSet(1,4) - self.model.a = Var([1,2,3], dense=True) - self.model.b = Var([1,2,3], within=self.model.A, dense=True) - self.model.c = Var([1,2,3], within=NonNegativeIntegers, dense=True) - self.model.d = Var([1,2,3], within=Integers, bounds=(-2,3), dense=True) - self.model.e = Var([1,2,3], within=Boolean, dense=True) - self.model.f = Var([1,2,3], domain=Boolean, dense=True) - instance=self.model.create_instance() + self.model.A = RangeSet(1, 4) + self.model.a = Var([1, 2, 3], dense=True) + self.model.b = Var([1, 2, 3], within=self.model.A, dense=True) + self.model.c = Var([1, 2, 3], within=NonNegativeIntegers, dense=True) + self.model.d = Var([1, 2, 3], within=Integers, bounds=(-2, 3), dense=True) + self.model.e = Var([1, 2, 3], within=Boolean, dense=True) + self.model.f = Var([1, 2, 3], domain=Boolean, dense=True) + instance = self.model.create_instance() xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance) self.assertEqual(type(rinst.a[1].domain), RealSet) @@ -152,14 +170,14 @@ def test_relax_integrality2(self): def test_relax_integrality_cloned(self): # Coverage of the _clear_attribute method - self.model.A = RangeSet(1,4) + self.model.A = RangeSet(1, 4) self.model.a = Var() self.model.b = Var(within=self.model.A) self.model.c = Var(within=NonNegativeIntegers) - self.model.d = Var(within=Integers, bounds=(-2,3)) + self.model.d = Var(within=Integers, bounds=(-2, 3)) self.model.e = Var(within=Boolean) self.model.f = Var(domain=Boolean) - instance=self.model.create_instance() + instance = self.model.create_instance() instance_cloned = instance.clone() xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance_cloned) @@ -178,24 +196,24 @@ def test_relax_integrality_cloned(self): def test_relax_integrality(self): # Coverage of the _clear_attribute method - self.model.d = Var(within=Integers, bounds=(-2,3)) - instance=self.model.create_instance() + self.model.d = Var(within=Integers, bounds=(-2, 3)) + instance = self.model.create_instance() instance_cloned = instance.clone() xfrm = TransformationFactory('core.relax_integer_vars') rinst = xfrm.create_using(instance_cloned) self.assertEqual(type(rinst.d.domain), RealSet) - self.assertEqual(rinst.d.bounds, (-2,3)) + self.assertEqual(rinst.d.bounds, (-2, 3)) self.assertIs(instance.d.domain, Integers) self.assertIs(instance_cloned.d.domain, Integers) def test_relax_integrality_simple_cloned(self): - self.model.x = Var(within=Integers, bounds=(-2,3)) + self.model.x = Var(within=Integers, bounds=(-2, 3)) instance = self.model.create_instance() instance_cloned = instance.clone() xfrm = TransformationFactory('core.relax_discrete') rinst = xfrm.create_using(instance_cloned) self.assertIs(rinst.x.domain, Reals) - self.assertEqual(rinst.x.bounds, (-2,3)) + self.assertEqual(rinst.x.bounds, (-2, 3)) self.assertIs(instance.x.domain, Integers) self.assertIs(instance_cloned.x.domain, Integers) @@ -203,7 +221,7 @@ def test_relax_integrality_on_deactivated_blocks(self): self.model.x = Var(domain=NonNegativeIntegers) self.model.b = Block() self.model.b.x = Var(domain=Binary) - self.model.b.y = Var(domain=Integers, bounds=(-3,2)) + self.model.b.y = Var(domain=Integers, bounds=(-3, 2)) instance = self.model.create_instance() instance.b.deactivate() relax_integrality = TransformationFactory('core.relax_integer_vars') @@ -222,7 +240,7 @@ def test_relax_integrality_only_active_blocks(self): self.model.x = Var(domain=NonNegativeIntegers) self.model.b = Block() self.model.b.x = Var(domain=Binary) - self.model.b.y = Var(domain=Integers, bounds=(-3,2)) + self.model.b.y = Var(domain=Integers, bounds=(-3, 2)) instance = self.model.create_instance() instance.b.deactivate() relax_integrality = TransformationFactory('core.relax_integer_vars') @@ -236,11 +254,11 @@ def test_relax_integrality_only_active_blocks(self): def test_nonnegativity_transformation_1(self): self.model.a = Var() self.model.b = Var(within=NonNegativeIntegers) - self.model.c = Var(within=Integers, bounds=(-2,3)) + self.model.c = Var(within=Integers, bounds=(-2, 3)) self.model.d = Var(within=Boolean) self.model.e = Var(domain=Boolean) - instance=self.model.create_instance() + instance = self.model.create_instance() xfrm = TransformationFactory('core.nonnegative_vars') transformed = xfrm.create_using(instance) @@ -264,7 +282,7 @@ def test_nonnegativity_transformation_1(self): self.assertIs(transformed.e[ndx].domain, Binary) def test_nonnegativity_transformation_2(self): - self.model.S = RangeSet(0,10) + self.model.S = RangeSet(0, 10) self.model.T = Set(initialize=["foo", "bar"]) # Unindexed, singly indexed, and doubly indexed variables with @@ -277,16 +295,16 @@ def test_nonnegativity_transformation_2(self): # rule-defined bounds def boundsRule(*args): return (-4, 4) + self.model.x2 = Var(bounds=boundsRule) self.model.y2 = Var(self.model.S, bounds=boundsRule) self.model.z2 = Var(self.model.S, self.model.T, bounds=boundsRule) - # Unindexed, singly indexed, and doubly indexed variables with # explicit domains self.model.x3 = Var(domain=NegativeReals) - self.model.y3 = Var(self.model.S, domain = NegativeIntegers) - self.model.z3 = Var(self.model.S, self.model.T, domain = Reals) + self.model.y3 = Var(self.model.S, domain=NegativeIntegers) + self.model.z3 = Var(self.model.S, self.model.T, domain=Reals) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined domains @@ -327,14 +345,14 @@ def domainRule(*args): # Make sure everything is nonnegative for c in ('x', 'y', 'z'): for n in ('1', '2', '3', '4'): - var = transformed.__getattribute__(c+n) + var = transformed.__getattribute__(c + n) for ndx in var.index_set(): self.assertTrue(self.nonnegativeBounds(var[ndx])) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") @unittest.expectedFailure def test_nonnegative_transform_3(self): - self.model.S = RangeSet(0,10) + self.model.S = RangeSet(0, 10) self.model.T = Set(initialize=["foo", "bar"]) # Unindexed, singly indexed, and doubly indexed variables with @@ -347,16 +365,16 @@ def test_nonnegative_transform_3(self): # rule-defined bounds def boundsRule(*args): return (-4, 4) + self.model.x2 = Var(bounds=boundsRule) self.model.y2 = Var(self.model.S, bounds=boundsRule) self.model.z2 = Var(self.model.S, self.model.T, bounds=boundsRule) - # Unindexed, singly indexed, and doubly indexed variables with # explicit domains self.model.x3 = Var(domain=NegativeReals, bounds=(-10, 10)) - self.model.y3 = Var(self.model.S, domain = NegativeIntegers, bounds=(-10, 10)) - self.model.z3 = Var(self.model.S, self.model.T, domain = Reals, bounds=(-10, 10)) + self.model.y3 = Var(self.model.S, domain=NegativeIntegers, bounds=(-10, 10)) + self.model.z3 = Var(self.model.S, self.model.T, domain=Reals, bounds=(-10, 10)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined domains @@ -393,16 +411,21 @@ def domainRule(*args): self.model.x4 = Var(domain=domainRule, bounds=(-10, 10)) self.model.y4 = Var(self.model.S, domain=domainRule, bounds=(-10, 10)) - self.model.z4 = Var(self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10)) + self.model.z4 = Var( + self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10) + ) def objRule(model): - return sum(5*sum_product(model.__getattribute__(c+n)) \ - for c in ('x', 'y', 'z') for n in ('1', '2', '3', '4')) + return sum( + 5 * sum_product(model.__getattribute__(c + n)) + for c in ('x', 'y', 'z') + for n in ('1', '2', '3', '4') + ) self.model.obj = Objective(rule=objRule) transform = TransformationFactory('core.nonnegative_vars') - instance=self.model.create_instance() + instance = self.model.create_instance() transformed = transform.create_using(instance) opt = SolverFactory("glpk") @@ -412,14 +435,14 @@ def objRule(model): self.assertEqual( instance_sol["Solution"][0]["Objective"]['obj']["value"], - transformed_sol["Solution"][0]["Objective"]['obj']["value"] - ) + transformed_sol["Solution"][0]["Objective"]['obj']["value"], + ) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") @unittest.expectedFailure def test_nonnegative_transform_4(self): - """ Same as #3, but adds constraints """ - self.model.S = RangeSet(0,10) + """Same as #3, but adds constraints""" + self.model.S = RangeSet(0, 10) self.model.T = Set(initialize=["foo", "bar"]) # Unindexed, singly indexed, and doubly indexed variables with @@ -432,16 +455,16 @@ def test_nonnegative_transform_4(self): # rule-defined bounds def boundsRule(*args): return (-4, 4) + self.model.x2 = Var(bounds=boundsRule) self.model.y2 = Var(self.model.S, bounds=boundsRule) self.model.z2 = Var(self.model.S, self.model.T, bounds=boundsRule) - # Unindexed, singly indexed, and doubly indexed variables with # explicit domains self.model.x3 = Var(domain=NegativeReals, bounds=(-10, 10)) - self.model.y3 = Var(self.model.S, domain = NegativeIntegers, bounds=(-10, 10)) - self.model.z3 = Var(self.model.S, self.model.T, domain = Reals, bounds=(-10, 10)) + self.model.y3 = Var(self.model.S, domain=NegativeIntegers, bounds=(-10, 10)) + self.model.z3 = Var(self.model.S, self.model.T, domain=Reals, bounds=(-10, 10)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined domains @@ -478,7 +501,9 @@ def domainRule(*args): self.model.x4 = Var(domain=domainRule, bounds=(-10, 10)) self.model.y4 = Var(self.model.S, domain=domainRule, bounds=(-10, 10)) - self.model.z4 = Var(self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10)) + self.model.z4 = Var( + self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10) + ) # Add some constraints def makeXConRule(var): @@ -496,30 +521,30 @@ def zConRule(model, var, s, t): for n in ('1', '2', '3', '4'): self.model.__setattr__( "x" + n + "_constraint", - Constraint( - rule=makeXConRule( - self.model.__getattribute__("x"+n)))) + Constraint(rule=makeXConRule(self.model.__getattribute__("x" + n))), + ) self.model.__setattr__( "y" + n + "_constraint", - Constraint( - rule=makeYConRule( - self.model.__getattribute__("y"+n)))) + Constraint(rule=makeYConRule(self.model.__getattribute__("y" + n))), + ) self.model.__setattr__( "z" + n + "_constraint", - Constraint( - rule=makeZConRule( - self.model.__getattribute__("z"+n)))) + Constraint(rule=makeZConRule(self.model.__getattribute__("z" + n))), + ) def objRule(model): - return sum(5*sum_product(model.__getattribute__(c+n)) \ - for c in ('x', 'y', 'z') for n in ('1', '2', '3', '4')) + return sum( + 5 * sum_product(model.__getattribute__(c + n)) + for c in ('x', 'y', 'z') + for n in ('1', '2', '3', '4') + ) self.model.obj = Objective(rule=objRule) transform = NonNegativeTransformation() - instance=self.model.create_instance() + instance = self.model.create_instance() transformed = transform(instance) opt = SolverFactory("glpk") @@ -529,13 +554,13 @@ def objRule(model): self.assertEqual( instance_sol["Solution"][0]["Objective"]['obj']["value"], - transformed_sol["Solution"][0]["Objective"]['obj']["value"] - ) + transformed_sol["Solution"][0]["Objective"]['obj']["value"], + ) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") @unittest.expectedFailure def test_standard_form_transform_1(self): - self.model.S = RangeSet(0,10) + self.model.S = RangeSet(0, 10) self.model.T = Set(initialize=["foo", "bar"]) # Unindexed, singly indexed, and doubly indexed variables with @@ -548,16 +573,16 @@ def test_standard_form_transform_1(self): # rule-defined bounds def boundsRule(*args): return (-4, 4) + self.model.x2 = Var(bounds=boundsRule) self.model.y2 = Var(self.model.S, bounds=boundsRule) self.model.z2 = Var(self.model.S, self.model.T, bounds=boundsRule) - # Unindexed, singly indexed, and doubly indexed variables with # explicit domains self.model.x3 = Var(domain=NegativeReals, bounds=(-10, 10)) - self.model.y3 = Var(self.model.S, domain = NegativeIntegers, bounds=(-10, 10)) - self.model.z3 = Var(self.model.S, self.model.T, domain = Reals, bounds=(-10, 10)) + self.model.y3 = Var(self.model.S, domain=NegativeIntegers, bounds=(-10, 10)) + self.model.z3 = Var(self.model.S, self.model.T, domain=Reals, bounds=(-10, 10)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined domains @@ -594,16 +619,21 @@ def domainRule(*args): self.model.x4 = Var(domain=domainRule, bounds=(-10, 10)) self.model.y4 = Var(self.model.S, domain=domainRule, bounds=(-10, 10)) - self.model.z4 = Var(self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10)) + self.model.z4 = Var( + self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10) + ) def objRule(model): - return sum(5*sum_product(model.__getattribute__(c+n)) \ - for c in ('x', 'y', 'z') for n in ('1', '2', '3', '4')) + return sum( + 5 * sum_product(model.__getattribute__(c + n)) + for c in ('x', 'y', 'z') + for n in ('1', '2', '3', '4') + ) self.model.obj = Objective(rule=objRule) transform = StandardForm() - instance=self.model.create_instance() + instance = self.model.create_instance() transformed = transform(instance) opt = SolverFactory("glpk") @@ -613,14 +643,14 @@ def objRule(model): self.assertEqual( instance_sol["Solution"][0]["Objective"]['obj']["value"], - transformed_sol["Solution"][0]["Objective"]['obj']["value"] - ) + transformed_sol["Solution"][0]["Objective"]['obj']["value"], + ) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") @unittest.expectedFailure def test_standard_form_transform_2(self): - """ Same as #1, but adds constraints """ - self.model.S = RangeSet(0,10) + """Same as #1, but adds constraints""" + self.model.S = RangeSet(0, 10) self.model.T = Set(initialize=["foo", "bar"]) # Unindexed, singly indexed, and doubly indexed variables with @@ -633,16 +663,16 @@ def test_standard_form_transform_2(self): # rule-defined bounds def boundsRule(*args): return (-4, 4) + self.model.x2 = Var(bounds=boundsRule) self.model.y2 = Var(self.model.S, bounds=boundsRule) self.model.z2 = Var(self.model.S, self.model.T, bounds=boundsRule) - # Unindexed, singly indexed, and doubly indexed variables with # explicit domains self.model.x3 = Var(domain=NegativeReals, bounds=(-10, 10)) - self.model.y3 = Var(self.model.S, domain = NegativeIntegers, bounds=(-10, 10)) - self.model.z3 = Var(self.model.S, self.model.T, domain = Reals, bounds=(-10, 10)) + self.model.y3 = Var(self.model.S, domain=NegativeIntegers, bounds=(-10, 10)) + self.model.z3 = Var(self.model.S, self.model.T, domain=Reals, bounds=(-10, 10)) # Unindexed, singly indexed, and doubly indexed variables with # rule-defined domains @@ -679,7 +709,9 @@ def domainRule(*args): self.model.x4 = Var(domain=domainRule, bounds=(-10, 10)) self.model.y4 = Var(self.model.S, domain=domainRule, bounds=(-10, 10)) - self.model.z4 = Var(self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10)) + self.model.z4 = Var( + self.model.S, self.model.T, domain=domainRule, bounds=(-10, 10) + ) # Add some constraints def makeXConRule(var): @@ -697,30 +729,30 @@ def zConRule(model, var, s, t): for n in ('1', '2', '3', '4'): self.model.__setattr__( "x" + n + "_constraint", - Constraint( - rule=makeXConRule( - self.model.__getattribute__("x"+n)))) + Constraint(rule=makeXConRule(self.model.__getattribute__("x" + n))), + ) self.model.__setattr__( "y" + n + "_constraint", - Constraint( - rule=makeYConRule( - self.model.__getattribute__("y"+n)))) + Constraint(rule=makeYConRule(self.model.__getattribute__("y" + n))), + ) self.model.__setattr__( "z" + n + "_constraint", - Constraint( - rule=makeZConRule( - self.model.__getattribute__("z"+n)))) + Constraint(rule=makeZConRule(self.model.__getattribute__("z" + n))), + ) def objRule(model): - return sum(5*sum_product(model.__getattribute__(c+n)) \ - for c in ('x', 'y', 'z') for n in ('1', '2', '3', '4')) + return sum( + 5 * sum_product(model.__getattribute__(c + n)) + for c in ('x', 'y', 'z') + for n in ('1', '2', '3', '4') + ) self.model.obj = Objective(rule=objRule) transform = StandardForm() - instance=self.model.create_instance() + instance = self.model.create_instance() transformed = transform(instance) opt = SolverFactory("glpk") @@ -730,8 +762,8 @@ def objRule(model): self.assertEqual( instance_sol["Solution"][0]["Objective"]['obj']["value"], - transformed_sol["Solution"][0]["Objective"]['obj']["value"] - ) + transformed_sol["Solution"][0]["Objective"]['obj']["value"], + ) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/blend.txt b/pyomo/core/tests/unit/blend.txt index 35c67183299..147200a82a9 100644 --- a/pyomo/core/tests/unit/blend.txt +++ b/pyomo/core/tests/unit/blend.txt @@ -1,41 +1,41 @@ { "Problem": [ { - "Lower bound": 0.96666666666666701, - "Number of constraints": 6, - "Number of nonzeros": 11, - "Number of objectives": 1, - "Number of variables": 3, - "Sense": "minimize", + "Lower bound": 0.96666666666666701, + "Number of constraints": 5, + "Number of nonzeros": 10, + "Number of objectives": 1, + "Number of variables": 2, + "Sense": "minimize", "Upper bound": 0.96666666666666701 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Gap": 0.0, + "Gap": 0.0, "Objective": { "obj": { "Value": 0.96666666666666701 } - }, + }, "Variable": { "x1": { "Value": 33.3333333333333 - }, + }, "x2": { "Value": 66.6666666666667 } } } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/unit/kernel/test_block.py b/pyomo/core/tests/unit/kernel/test_block.py index 03a99d69e22..5d1ecc33f06 100644 --- a/pyomo/core/tests/unit/kernel/test_block.py +++ b/pyomo/core/tests/unit/kernel/test_block.py @@ -22,46 +22,42 @@ from pyomo.core.expr.symbol_map import SymbolMap import pyomo.kernel as pmo from pyomo.common.log import LoggingIntercept -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_tuple_container import \ - _TestActiveTupleContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase -from pyomo.core.kernel.base import \ - (ICategorizedObject, - ICategorizedObjectContainer) -from pyomo.core.kernel.heterogeneous_container import \ - (heterogeneous_containers, - IHeterogeneousContainer) +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_tuple_container import ( + _TestActiveTupleContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) +from pyomo.core.kernel.base import ICategorizedObject, ICategorizedObjectContainer +from pyomo.core.kernel.heterogeneous_container import ( + heterogeneous_containers, + IHeterogeneousContainer, +) from pyomo.common.collections import ComponentMap from pyomo.core.kernel.suffix import suffix -from pyomo.core.kernel.constraint import (IConstraint, - constraint, - constraint_dict, - constraint_list) -from pyomo.core.kernel.parameter import (parameter, - parameter_dict, - parameter_list) -from pyomo.core.kernel.expression import (expression, - data_expression, - expression_dict, - expression_list) -from pyomo.core.kernel.objective import (objective, - objective_dict, - objective_list) -from pyomo.core.kernel.variable import (IVariable, - variable, - variable_dict, - variable_list) -from pyomo.core.kernel.block import (IBlock, - block, - block_dict, - block_tuple, - block_list) +from pyomo.core.kernel.constraint import ( + IConstraint, + constraint, + constraint_dict, + constraint_list, +) +from pyomo.core.kernel.parameter import parameter, parameter_dict, parameter_list +from pyomo.core.kernel.expression import ( + expression, + data_expression, + expression_dict, + expression_list, +) +from pyomo.core.kernel.objective import objective, objective_dict, objective_list +from pyomo.core.kernel.variable import IVariable, variable, variable_dict, variable_list +from pyomo.core.kernel.block import IBlock, block, block_dict, block_tuple, block_list from pyomo.core.kernel.sos import sos from pyomo.opt.results import Solution + def _path_to_object_exists(obj, descendent): if descendent is obj: return True @@ -72,6 +68,7 @@ def _path_to_object_exists(obj, descendent): else: return _path_to_object_exists(obj, parent) + def _active_path_to_object_exists(obj, descendent): if descendent is obj: return True @@ -85,6 +82,7 @@ def _active_path_to_object_exists(obj, descendent): else: return False + def _collect_expr_components(exp): ans = {} if isinstance(exp, ICategorizedObject): @@ -96,16 +94,21 @@ def _collect_expr_components(exp): ans.update(_collect_expr_components(subexp)) return ans + class IJunk(IBlock): __slots__ = () + + class junk(pmo.block): _ctype = IJunk + + class junk_list(pmo.block_list): __slots__ = () _ctype = IJunk -class TestHeterogeneousContainer(unittest.TestCase): +class TestHeterogeneousContainer(unittest.TestCase): model = pmo.block() model.v = pmo.variable() model.V = pmo.variable_list() @@ -141,211 +144,397 @@ def test_preorder_traversal(self): model = self.model.clone() order = list(str(obj) for obj in pmo.preorder_traversal(model)) - self.assertEqual(order, - ['', - 'v','V','V[0]','V[1]','V[1][0]', - 'c','C','C[0]','C[1]','C[1][0]', - 'b', - 'b.v','b.V','b.V[0]','b.V[1]','b.V[1][0]', - 'b.c','b.C','b.C[0]','b.C[1]','b.C[1][0]', - 'B', - 'B[0]', - 'B[0].v','B[0].V','B[0].V[0]','B[0].V[1]','B[0].V[1][0]', - 'B[0].c','B[0].C','B[0].C[0]','B[0].C[1]','B[0].C[1][0]', - 'B[1]', - 'B[1][0]', - 'B[1][0].v','B[1][0].V','B[1][0].V[0]','B[1][0].V[1]','B[1][0].V[1][0]', - 'B[1][0].c','B[1][0].C','B[1][0].C[0]','B[1][0].C[1]','B[1][0].C[1][0]', - 'j', - 'J', - 'J[0]', - 'J[1]', - 'J[1][0]', - 'J[1][0].b', - 'J[1][0].b.v', - 'k', - 'K', - 'K[0]', - 'K[0].v','K[0].V','K[0].V[0]','K[0].V[1]','K[0].V[1][0]', - 'K[0].c','K[0].C','K[0].C[0]','K[0].C[1]','K[0].C[1][0]', - 'K[0].b', - 'K[0].b.v','K[0].b.V','K[0].b.V[0]','K[0].b.V[1]','K[0].b.V[1][0]', - 'K[0].b.c','K[0].b.C','K[0].b.C[0]','K[0].b.C[1]','K[0].b.C[1][0]', - 'K[0].B', - 'K[0].B[0]', - 'K[0].B[0].v','K[0].B[0].V','K[0].B[0].V[0]','K[0].B[0].V[1]','K[0].B[0].V[1][0]', - 'K[0].B[0].c','K[0].B[0].C','K[0].B[0].C[0]','K[0].B[0].C[1]','K[0].B[0].C[1][0]', - 'K[0].B[1]', - 'K[0].B[1][0]', - 'K[0].B[1][0].v','K[0].B[1][0].V','K[0].B[1][0].V[0]','K[0].B[1][0].V[1]','K[0].B[1][0].V[1][0]', - 'K[0].B[1][0].c','K[0].B[1][0].C','K[0].B[1][0].C[0]','K[0].B[1][0].C[1]','K[0].B[1][0].C[1][0]', - 'K[0].j', - 'K[0].J', - 'K[0].J[0]', - 'K[0].J[1]', - 'K[0].J[1][0]', - 'K[0].J[1][0].b', - 'K[0].J[1][0].b.v']) - - order = list(str(obj) for obj in pmo.preorder_traversal( - model, - descend=lambda x: (x is not model.k) and (x is not model.K))) - self.assertEqual(order, - ['', - 'v','V','V[0]','V[1]','V[1][0]', - 'c','C','C[0]','C[1]','C[1][0]', - 'b', - 'b.v','b.V','b.V[0]','b.V[1]','b.V[1][0]', - 'b.c','b.C','b.C[0]','b.C[1]','b.C[1][0]', - 'B', - 'B[0]', - 'B[0].v','B[0].V','B[0].V[0]','B[0].V[1]','B[0].V[1][0]', - 'B[0].c','B[0].C','B[0].C[0]','B[0].C[1]','B[0].C[1][0]', - 'B[1]', - 'B[1][0]', - 'B[1][0].v','B[1][0].V','B[1][0].V[0]','B[1][0].V[1]','B[1][0].V[1][0]', - 'B[1][0].c','B[1][0].C','B[1][0].C[0]','B[1][0].C[1]','B[1][0].C[1][0]', - 'j', - 'J', - 'J[0]', - 'J[1]', - 'J[1][0]', - 'J[1][0].b', - 'J[1][0].b.v', - 'k', - 'K']) - - order = list(str(obj) for obj in pmo.preorder_traversal(model, - ctype=IBlock)) - self.assertEqual(order, - ['', - 'b', - 'B', - 'B[0]', - 'B[1]', - 'B[1][0]', - 'j', - 'J', - 'J[0]', - 'J[1]', - 'J[1][0]', - 'J[1][0].b', - 'k', - 'K', - 'K[0]', - 'K[0].b', - 'K[0].B', - 'K[0].B[0]', - 'K[0].B[1]', - 'K[0].B[1][0]', - 'K[0].j', - 'K[0].J', - 'K[0].J[0]', - 'K[0].J[1]', - 'K[0].J[1][0]', - 'K[0].J[1][0].b']) - - order = list(str(obj) for obj in pmo.preorder_traversal(model, - ctype=IVariable)) - self.assertEqual(order, - ['', - 'v','V','V[0]','V[1]','V[1][0]', - 'b', - 'b.v','b.V','b.V[0]','b.V[1]','b.V[1][0]', - 'B', - 'B[0]', - 'B[0].v','B[0].V','B[0].V[0]','B[0].V[1]','B[0].V[1][0]', - 'B[1]', - 'B[1][0]', - 'B[1][0].v','B[1][0].V','B[1][0].V[0]','B[1][0].V[1]','B[1][0].V[1][0]', - 'j', - 'J', - 'J[0]', - 'J[1]', - 'J[1][0]', - 'J[1][0].b', - 'J[1][0].b.v', - 'k', - 'K', - 'K[0]', - 'K[0].v','K[0].V','K[0].V[0]','K[0].V[1]','K[0].V[1][0]', - 'K[0].b', - 'K[0].b.v','K[0].b.V','K[0].b.V[0]','K[0].b.V[1]','K[0].b.V[1][0]', - 'K[0].B', - 'K[0].B[0]', - 'K[0].B[0].v','K[0].B[0].V','K[0].B[0].V[0]','K[0].B[0].V[1]','K[0].B[0].V[1][0]', - 'K[0].B[1]', - 'K[0].B[1][0]', - 'K[0].B[1][0].v','K[0].B[1][0].V','K[0].B[1][0].V[0]','K[0].B[1][0].V[1]','K[0].B[1][0].V[1][0]', - 'K[0].j', - 'K[0].J', - 'K[0].J[0]', - 'K[0].J[1]', - 'K[0].J[1][0]', - 'K[0].J[1][0].b', - 'K[0].J[1][0].b.v']) + self.assertEqual( + order, + [ + '', + 'v', + 'V', + 'V[0]', + 'V[1]', + 'V[1][0]', + 'c', + 'C', + 'C[0]', + 'C[1]', + 'C[1][0]', + 'b', + 'b.v', + 'b.V', + 'b.V[0]', + 'b.V[1]', + 'b.V[1][0]', + 'b.c', + 'b.C', + 'b.C[0]', + 'b.C[1]', + 'b.C[1][0]', + 'B', + 'B[0]', + 'B[0].v', + 'B[0].V', + 'B[0].V[0]', + 'B[0].V[1]', + 'B[0].V[1][0]', + 'B[0].c', + 'B[0].C', + 'B[0].C[0]', + 'B[0].C[1]', + 'B[0].C[1][0]', + 'B[1]', + 'B[1][0]', + 'B[1][0].v', + 'B[1][0].V', + 'B[1][0].V[0]', + 'B[1][0].V[1]', + 'B[1][0].V[1][0]', + 'B[1][0].c', + 'B[1][0].C', + 'B[1][0].C[0]', + 'B[1][0].C[1]', + 'B[1][0].C[1][0]', + 'j', + 'J', + 'J[0]', + 'J[1]', + 'J[1][0]', + 'J[1][0].b', + 'J[1][0].b.v', + 'k', + 'K', + 'K[0]', + 'K[0].v', + 'K[0].V', + 'K[0].V[0]', + 'K[0].V[1]', + 'K[0].V[1][0]', + 'K[0].c', + 'K[0].C', + 'K[0].C[0]', + 'K[0].C[1]', + 'K[0].C[1][0]', + 'K[0].b', + 'K[0].b.v', + 'K[0].b.V', + 'K[0].b.V[0]', + 'K[0].b.V[1]', + 'K[0].b.V[1][0]', + 'K[0].b.c', + 'K[0].b.C', + 'K[0].b.C[0]', + 'K[0].b.C[1]', + 'K[0].b.C[1][0]', + 'K[0].B', + 'K[0].B[0]', + 'K[0].B[0].v', + 'K[0].B[0].V', + 'K[0].B[0].V[0]', + 'K[0].B[0].V[1]', + 'K[0].B[0].V[1][0]', + 'K[0].B[0].c', + 'K[0].B[0].C', + 'K[0].B[0].C[0]', + 'K[0].B[0].C[1]', + 'K[0].B[0].C[1][0]', + 'K[0].B[1]', + 'K[0].B[1][0]', + 'K[0].B[1][0].v', + 'K[0].B[1][0].V', + 'K[0].B[1][0].V[0]', + 'K[0].B[1][0].V[1]', + 'K[0].B[1][0].V[1][0]', + 'K[0].B[1][0].c', + 'K[0].B[1][0].C', + 'K[0].B[1][0].C[0]', + 'K[0].B[1][0].C[1]', + 'K[0].B[1][0].C[1][0]', + 'K[0].j', + 'K[0].J', + 'K[0].J[0]', + 'K[0].J[1]', + 'K[0].J[1][0]', + 'K[0].J[1][0].b', + 'K[0].J[1][0].b.v', + ], + ) + + order = list( + str(obj) + for obj in pmo.preorder_traversal( + model, descend=lambda x: (x is not model.k) and (x is not model.K) + ) + ) + self.assertEqual( + order, + [ + '', + 'v', + 'V', + 'V[0]', + 'V[1]', + 'V[1][0]', + 'c', + 'C', + 'C[0]', + 'C[1]', + 'C[1][0]', + 'b', + 'b.v', + 'b.V', + 'b.V[0]', + 'b.V[1]', + 'b.V[1][0]', + 'b.c', + 'b.C', + 'b.C[0]', + 'b.C[1]', + 'b.C[1][0]', + 'B', + 'B[0]', + 'B[0].v', + 'B[0].V', + 'B[0].V[0]', + 'B[0].V[1]', + 'B[0].V[1][0]', + 'B[0].c', + 'B[0].C', + 'B[0].C[0]', + 'B[0].C[1]', + 'B[0].C[1][0]', + 'B[1]', + 'B[1][0]', + 'B[1][0].v', + 'B[1][0].V', + 'B[1][0].V[0]', + 'B[1][0].V[1]', + 'B[1][0].V[1][0]', + 'B[1][0].c', + 'B[1][0].C', + 'B[1][0].C[0]', + 'B[1][0].C[1]', + 'B[1][0].C[1][0]', + 'j', + 'J', + 'J[0]', + 'J[1]', + 'J[1][0]', + 'J[1][0].b', + 'J[1][0].b.v', + 'k', + 'K', + ], + ) + + order = list(str(obj) for obj in pmo.preorder_traversal(model, ctype=IBlock)) + self.assertEqual( + order, + [ + '', + 'b', + 'B', + 'B[0]', + 'B[1]', + 'B[1][0]', + 'j', + 'J', + 'J[0]', + 'J[1]', + 'J[1][0]', + 'J[1][0].b', + 'k', + 'K', + 'K[0]', + 'K[0].b', + 'K[0].B', + 'K[0].B[0]', + 'K[0].B[1]', + 'K[0].B[1][0]', + 'K[0].j', + 'K[0].J', + 'K[0].J[0]', + 'K[0].J[1]', + 'K[0].J[1][0]', + 'K[0].J[1][0].b', + ], + ) + + order = list(str(obj) for obj in pmo.preorder_traversal(model, ctype=IVariable)) + self.assertEqual( + order, + [ + '', + 'v', + 'V', + 'V[0]', + 'V[1]', + 'V[1][0]', + 'b', + 'b.v', + 'b.V', + 'b.V[0]', + 'b.V[1]', + 'b.V[1][0]', + 'B', + 'B[0]', + 'B[0].v', + 'B[0].V', + 'B[0].V[0]', + 'B[0].V[1]', + 'B[0].V[1][0]', + 'B[1]', + 'B[1][0]', + 'B[1][0].v', + 'B[1][0].V', + 'B[1][0].V[0]', + 'B[1][0].V[1]', + 'B[1][0].V[1][0]', + 'j', + 'J', + 'J[0]', + 'J[1]', + 'J[1][0]', + 'J[1][0].b', + 'J[1][0].b.v', + 'k', + 'K', + 'K[0]', + 'K[0].v', + 'K[0].V', + 'K[0].V[0]', + 'K[0].V[1]', + 'K[0].V[1][0]', + 'K[0].b', + 'K[0].b.v', + 'K[0].b.V', + 'K[0].b.V[0]', + 'K[0].b.V[1]', + 'K[0].b.V[1][0]', + 'K[0].B', + 'K[0].B[0]', + 'K[0].B[0].v', + 'K[0].B[0].V', + 'K[0].B[0].V[0]', + 'K[0].B[0].V[1]', + 'K[0].B[0].V[1][0]', + 'K[0].B[1]', + 'K[0].B[1][0]', + 'K[0].B[1][0].v', + 'K[0].B[1][0].V', + 'K[0].B[1][0].V[0]', + 'K[0].B[1][0].V[1]', + 'K[0].B[1][0].V[1][0]', + 'K[0].j', + 'K[0].J', + 'K[0].J[0]', + 'K[0].J[1]', + 'K[0].J[1][0]', + 'K[0].J[1][0].b', + 'K[0].J[1][0].b.v', + ], + ) def test_components(self): model = self.model.clone() checked = [] + def descend_into(x): self.assertTrue(x._is_heterogeneous_container) checked.append(x.name) return True - order = list(str(obj) for obj in model.components( - descend_into=descend_into)) - self.assertEqual(checked, - ['b', - 'B[0]', - 'B[1][0]', - 'j', - 'J[0]', - 'J[1][0]', - 'J[1][0].b', - 'k', - 'K[0]', - 'K[0].b', - 'K[0].B[0]', - 'K[0].B[1][0]', - 'K[0].j', - 'K[0].J[0]', - 'K[0].J[1][0]', - 'K[0].J[1][0].b']) - self.assertEqual(order, - ['v','V[0]','V[1][0]', - 'c','C[0]','C[1][0]', - 'b', - 'b.v','b.V[0]','b.V[1][0]', - 'b.c','b.C[0]','b.C[1][0]', - 'B[0]', - 'B[0].v','B[0].V[0]','B[0].V[1][0]', - 'B[0].c','B[0].C[0]','B[0].C[1][0]', - 'B[1][0]', - 'B[1][0].v','B[1][0].V[0]','B[1][0].V[1][0]', - 'B[1][0].c','B[1][0].C[0]','B[1][0].C[1][0]', - 'j', - 'J[0]', - 'J[1][0]', - 'J[1][0].b', - 'J[1][0].b.v', - 'k', - 'K[0]', - 'K[0].v','K[0].V[0]','K[0].V[1][0]', - 'K[0].c','K[0].C[0]','K[0].C[1][0]', - 'K[0].b', - 'K[0].b.v','K[0].b.V[0]','K[0].b.V[1][0]', - 'K[0].b.c','K[0].b.C[0]','K[0].b.C[1][0]', - 'K[0].B[0]', - 'K[0].B[0].v','K[0].B[0].V[0]','K[0].B[0].V[1][0]', - 'K[0].B[0].c','K[0].B[0].C[0]','K[0].B[0].C[1][0]', - 'K[0].B[1][0]', - 'K[0].B[1][0].v','K[0].B[1][0].V[0]','K[0].B[1][0].V[1][0]', - 'K[0].B[1][0].c','K[0].B[1][0].C[0]','K[0].B[1][0].C[1][0]', - 'K[0].j', - 'K[0].J[0]', - 'K[0].J[1][0]', - 'K[0].J[1][0].b', - 'K[0].J[1][0].b.v']) + + order = list(str(obj) for obj in model.components(descend_into=descend_into)) + self.assertEqual( + checked, + [ + 'b', + 'B[0]', + 'B[1][0]', + 'j', + 'J[0]', + 'J[1][0]', + 'J[1][0].b', + 'k', + 'K[0]', + 'K[0].b', + 'K[0].B[0]', + 'K[0].B[1][0]', + 'K[0].j', + 'K[0].J[0]', + 'K[0].J[1][0]', + 'K[0].J[1][0].b', + ], + ) + self.assertEqual( + order, + [ + 'v', + 'V[0]', + 'V[1][0]', + 'c', + 'C[0]', + 'C[1][0]', + 'b', + 'b.v', + 'b.V[0]', + 'b.V[1][0]', + 'b.c', + 'b.C[0]', + 'b.C[1][0]', + 'B[0]', + 'B[0].v', + 'B[0].V[0]', + 'B[0].V[1][0]', + 'B[0].c', + 'B[0].C[0]', + 'B[0].C[1][0]', + 'B[1][0]', + 'B[1][0].v', + 'B[1][0].V[0]', + 'B[1][0].V[1][0]', + 'B[1][0].c', + 'B[1][0].C[0]', + 'B[1][0].C[1][0]', + 'j', + 'J[0]', + 'J[1][0]', + 'J[1][0].b', + 'J[1][0].b.v', + 'k', + 'K[0]', + 'K[0].v', + 'K[0].V[0]', + 'K[0].V[1][0]', + 'K[0].c', + 'K[0].C[0]', + 'K[0].C[1][0]', + 'K[0].b', + 'K[0].b.v', + 'K[0].b.V[0]', + 'K[0].b.V[1][0]', + 'K[0].b.c', + 'K[0].b.C[0]', + 'K[0].b.C[1][0]', + 'K[0].B[0]', + 'K[0].B[0].v', + 'K[0].B[0].V[0]', + 'K[0].B[0].V[1][0]', + 'K[0].B[0].c', + 'K[0].B[0].C[0]', + 'K[0].B[0].C[1][0]', + 'K[0].B[1][0]', + 'K[0].B[1][0].v', + 'K[0].B[1][0].V[0]', + 'K[0].B[1][0].V[1][0]', + 'K[0].B[1][0].c', + 'K[0].B[1][0].C[0]', + 'K[0].B[1][0].C[1][0]', + 'K[0].j', + 'K[0].J[0]', + 'K[0].J[1][0]', + 'K[0].J[1][0].b', + 'K[0].J[1][0].b.v', + ], + ) vlist = [str(obj) for obj in model.components(ctype=IVariable)] self.assertEqual(len(vlist), len(set(vlist))) clist = [str(obj) for obj in model.components(ctype=IConstraint)] @@ -355,30 +544,31 @@ def descend_into(x): jlist = [str(obj) for obj in model.components(ctype=IJunk)] self.assertEqual(len(jlist), len(set(jlist))) - for l1, l2 in itertools.product([vlist, clist, blist, jlist], - repeat=2): + for l1, l2 in itertools.product([vlist, clist, blist, jlist], repeat=2): if l1 is l2: continue self.assertEqual(set(l1).intersection(set(l2)), set([])) - self.assertEqual(len(vlist)+len(clist)+len(blist)+len(jlist), - len(order)) + self.assertEqual(len(vlist) + len(clist) + len(blist) + len(jlist), len(order)) def test_getname(self): model = self.model.clone() - self.assertEqual(model.J[1][0].b.v.getname(fully_qualified=True), - 'J[1][0].b.v') - self.assertEqual(model.J[1][0].b.v.getname(fully_qualified=True, - relative_to=model.J[1][0]), - 'b.v') - self.assertEqual(model.J[1][0].b.v.getname(fully_qualified=True, - relative_to=model.J[1]), - '[0].b.v') - self.assertEqual(model.J[1][0].b.v.getname(fully_qualified=True, - relative_to=model.J), - '[1][0].b.v') - self.assertEqual(model.J[1][0].b.v.getname(fully_qualified=True, - relative_to=model), - 'J[1][0].b.v') + self.assertEqual(model.J[1][0].b.v.getname(fully_qualified=True), 'J[1][0].b.v') + self.assertEqual( + model.J[1][0].b.v.getname(fully_qualified=True, relative_to=model.J[1][0]), + 'b.v', + ) + self.assertEqual( + model.J[1][0].b.v.getname(fully_qualified=True, relative_to=model.J[1]), + '[0].b.v', + ) + self.assertEqual( + model.J[1][0].b.v.getname(fully_qualified=True, relative_to=model.J), + '[1][0].b.v', + ) + self.assertEqual( + model.J[1][0].b.v.getname(fully_qualified=True, relative_to=model), + 'J[1][0].b.v', + ) def test_heterogeneous_containers(self): order = list(str(obj) for obj in heterogeneous_containers(self.model.V)) @@ -387,24 +577,29 @@ def test_heterogeneous_containers(self): self.assertEqual(order, []) order = list(str(obj) for obj in heterogeneous_containers(self.model)) - self.assertEqual(order, - ['', - 'b', - 'B[0]', - 'B[1][0]', - 'k', - 'K[0]', - 'K[0].b', - 'K[0].B[0]', - 'K[0].B[1][0]', - 'K[0].j', - 'K[0].J[0]', - 'K[0].J[1][0]', - 'K[0].J[1][0].b', - 'j', - 'J[0]', - 'J[1][0]', - 'J[1][0].b']) + self.assertEqual( + order, + [ + '', + 'b', + 'B[0]', + 'B[1][0]', + 'k', + 'K[0]', + 'K[0].b', + 'K[0].B[0]', + 'K[0].B[1][0]', + 'K[0].j', + 'K[0].J[0]', + 'K[0].J[1][0]', + 'K[0].J[1][0].b', + 'j', + 'J[0]', + 'J[1][0]', + 'J[1][0].b', + ], + ) + def f(x): # do not descend below heterogeneous containers # stored on self.model @@ -415,75 +610,66 @@ def f(x): return False parent = parent.parent return True - order1 = list(str(obj) for obj in heterogeneous_containers( - self.model, - descend_into=f)) - order2 = list(str(obj) for obj in heterogeneous_containers( - self.model, - descend_into=lambda x: True if (x is self.model) else False)) + + order1 = list( + str(obj) for obj in heterogeneous_containers(self.model, descend_into=f) + ) + order2 = list( + str(obj) + for obj in heterogeneous_containers( + self.model, descend_into=lambda x: True if (x is self.model) else False + ) + ) self.assertEqual(order1, order2) - self.assertEqual(order1, - ['', - 'b', - 'B[0]', - 'B[1][0]', - 'k', - 'K[0]', - 'j', - 'J[0]', - 'J[1][0]']) - order = list(str(obj) for obj in heterogeneous_containers( - self.model, - ctype=IBlock)) - self.assertEqual(order, - ['', - 'b', - 'B[0]', - 'B[1][0]', - 'k', - 'K[0]', - 'K[0].b', - 'K[0].B[0]', - 'K[0].B[1][0]', - 'K[0].J[1][0].b', - 'J[1][0].b']) - order = list(str(obj) for obj in heterogeneous_containers( - self.model, - ctype=IJunk)) - self.assertEqual(order, - ['K[0].j', - 'K[0].J[0]', - 'K[0].J[1][0]', - 'j', - 'J[0]', - 'J[1][0]']) - order = list(str(obj) for obj in heterogeneous_containers( - self.model.K, - ctype=IJunk)) - self.assertEqual(order, - ['K[0].j', - 'K[0].J[0]', - 'K[0].J[1][0]']) - order = list(str(obj) for obj in heterogeneous_containers( - self.model.K[0], - ctype=IJunk)) - self.assertEqual(order, - ['K[0].j', - 'K[0].J[0]', - 'K[0].J[1][0]']) - order = list(str(obj) for obj in heterogeneous_containers( - self.model.K[0].j, - ctype=IJunk)) - self.assertEqual(order, - ['K[0].j']) - order = list(str(obj) for obj in heterogeneous_containers( - self.model.K[0].j, - ctype=IBlock)) - self.assertEqual(order, - []) + self.assertEqual( + order1, + ['', 'b', 'B[0]', 'B[1][0]', 'k', 'K[0]', 'j', 'J[0]', 'J[1][0]'], + ) + order = list( + str(obj) for obj in heterogeneous_containers(self.model, ctype=IBlock) + ) + self.assertEqual( + order, + [ + '', + 'b', + 'B[0]', + 'B[1][0]', + 'k', + 'K[0]', + 'K[0].b', + 'K[0].B[0]', + 'K[0].B[1][0]', + 'K[0].J[1][0].b', + 'J[1][0].b', + ], + ) + order = list( + str(obj) for obj in heterogeneous_containers(self.model, ctype=IJunk) + ) + self.assertEqual( + order, ['K[0].j', 'K[0].J[0]', 'K[0].J[1][0]', 'j', 'J[0]', 'J[1][0]'] + ) + order = list( + str(obj) for obj in heterogeneous_containers(self.model.K, ctype=IJunk) + ) + self.assertEqual(order, ['K[0].j', 'K[0].J[0]', 'K[0].J[1][0]']) + order = list( + str(obj) for obj in heterogeneous_containers(self.model.K[0], ctype=IJunk) + ) + self.assertEqual(order, ['K[0].j', 'K[0].J[0]', 'K[0].J[1][0]']) + order = list( + str(obj) for obj in heterogeneous_containers(self.model.K[0].j, ctype=IJunk) + ) + self.assertEqual(order, ['K[0].j']) + order = list( + str(obj) + for obj in heterogeneous_containers(self.model.K[0].j, ctype=IBlock) + ) + self.assertEqual(order, []) -class TestMisc(unittest.TestCase): +class TestMisc(unittest.TestCase): def test_reserved_attributes(self): b = block() self.assertTrue(len(block._block_reserved_words) > 0) @@ -588,8 +774,7 @@ def test_load_solution(self): soln = Solution() soln.symbol_map = sm - soln.variable['v'] = {"Value": 1.0, - "vsuffix": 'v'} + soln.variable['v'] = {"Value": 1.0, "vsuffix": 'v'} soln.variable['ONE_VAR_CONSTANT'] = None soln.constraint['c'] = {"csuffix": 'c'} soln.constraint['ONE_VAR_CONSTANT'] = None @@ -603,8 +788,7 @@ def test_load_solution(self): self.assertEqual(m.osuffix[m.o], 'o') self.assertEqual(m.msuffix[m], 'm') - soln.variable['vv'] = {"Value": 1.0, - "vsuffix": 'v'} + soln.variable['vv'] = {"Value": 1.0, "vsuffix": 'v'} with self.assertRaises(KeyError): m.load_solution(soln) del soln.variable['vv'] @@ -621,18 +805,21 @@ def test_load_solution(self): m.v.fix() with self.assertRaises(ValueError): - m.load_solution(soln, - allow_consistent_values_for_fixed_vars=False) + m.load_solution(soln, allow_consistent_values_for_fixed_vars=False) m.v.fix(1.1) - m.load_solution(soln, - allow_consistent_values_for_fixed_vars=True, - comparison_tolerance_for_fixed_vars=0.5) + m.load_solution( + soln, + allow_consistent_values_for_fixed_vars=True, + comparison_tolerance_for_fixed_vars=0.5, + ) m.v.fix(1.1) with self.assertRaises(ValueError): - m.load_solution(soln, - allow_consistent_values_for_fixed_vars=True, - comparison_tolerance_for_fixed_vars=0.05) + m.load_solution( + soln, + allow_consistent_values_for_fixed_vars=True, + comparison_tolerance_for_fixed_vars=0.05, + ) del soln.variable['v'] @@ -649,18 +836,21 @@ def test_load_solution(self): m.v.fix(1.0) with self.assertRaises(ValueError): - m.load_solution(soln, - allow_consistent_values_for_fixed_vars=False) + m.load_solution(soln, allow_consistent_values_for_fixed_vars=False) m.v.fix(1.1) - m.load_solution(soln, - allow_consistent_values_for_fixed_vars=True, - comparison_tolerance_for_fixed_vars=0.5) + m.load_solution( + soln, + allow_consistent_values_for_fixed_vars=True, + comparison_tolerance_for_fixed_vars=0.5, + ) m.v.fix(1.1) with self.assertRaises(ValueError): - m.load_solution(soln, - allow_consistent_values_for_fixed_vars=True, - comparison_tolerance_for_fixed_vars=0.05) + m.load_solution( + soln, + allow_consistent_values_for_fixed_vars=True, + comparison_tolerance_for_fixed_vars=0.05, + ) # a temporary test to make sure solve and load # functionality work (will be moved elsewhere in the @@ -685,40 +875,37 @@ def test_solve_load(self): from pyomo.opt import SolverStatus, TerminationCondition opt = SolverFactory("glpk") - if isinstance(opt, UnknownSolver) or \ - (not opt.available()): + if isinstance(opt, UnknownSolver) or (not opt.available()): raise unittest.SkipTest("glpk solver not available") status = opt.solve(b) - self.assertEqual(status.solver.status, - SolverStatus.ok) - self.assertEqual(status.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual(status.solver.status, SolverStatus.ok) + self.assertEqual( + status.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual(b.o(), -7, places=5) self.assertAlmostEqual(b.v(), -8, places=5) self.assertAlmostEqual(b.y(), 1.0, places=5) opt = SolverFactory("glpk") - if isinstance(opt, UnknownSolver) or \ - (not opt.available()): + if isinstance(opt, UnknownSolver) or (not opt.available()): raise unittest.SkipTest("glpk solver not available") status = opt.solve(b, symbolic_solver_labels=True) - self.assertEqual(status.solver.status, - SolverStatus.ok) - self.assertEqual(status.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual(status.solver.status, SolverStatus.ok) + self.assertEqual( + status.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual(b.o(), -7, places=5) self.assertAlmostEqual(b.v(), -8, places=5) self.assertAlmostEqual(b.y(), 1.0, places=5) opt = SolverFactory("ipopt") - if isinstance(opt, UnknownSolver) or \ - (not opt.available()): + if isinstance(opt, UnknownSolver) or (not opt.available()): raise unittest.SkipTest("ipopt solver not available") status = opt.solve(b) - self.assertEqual(status.solver.status, - SolverStatus.ok) - self.assertEqual(status.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual(status.solver.status, SolverStatus.ok) + self.assertEqual( + status.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual(b.o(), -7, places=5) self.assertAlmostEqual(b.v(), -8, places=5) self.assertAlmostEqual(b.y(), 1.0, places=5) @@ -727,16 +914,15 @@ def test_solve_load(self): if isinstance(opt, UnknownSolver): raise unittest.SkipTest("ipopt solver not available") status = opt.solve(b, symbolic_solver_labels=True) - self.assertEqual(status.solver.status, - SolverStatus.ok) - self.assertEqual(status.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual(status.solver.status, SolverStatus.ok) + self.assertEqual( + status.solver.termination_condition, TerminationCondition.optimal + ) self.assertAlmostEqual(b.o(), -7, places=5) self.assertAlmostEqual(b.v(), -8, places=5) self.assertAlmostEqual(b.y(), 1.0, places=5) def test_traversal(self): - b = block() b.v = variable() b.c1 = constraint() @@ -754,41 +940,72 @@ def test_traversal(self): def descend(obj): self.assertTrue(obj._is_container) return True + self.assertEqual( - [obj.name for obj in pmo.preorder_traversal(b, - active=None, - descend=descend)], - [None,'v','c1','c2','c2[0]','B','B[0]','B[0][0]','B[0][0].c','B[0][0].b']) - self.assertEqual( - [obj.name for obj in pmo.preorder_traversal(b, - descend=descend)], - [None,'v','c2','c2[0]','B']) - self.assertEqual( - [obj.name for obj in pmo.preorder_traversal(b, - active=True, - descend=descend)], - [None,'v','c2','c2[0]','B']) - - self.assertEqual( - [obj.name for obj in pmo.preorder_traversal( - b, - active=None, - ctype=IConstraint, - descend=descend)], - [None,'c1','c2','c2[0]','B','B[0]','B[0][0]','B[0][0].c','B[0][0].b']) - self.assertEqual( - [obj.name for obj in pmo.preorder_traversal( - b, - ctype=IConstraint, - descend=descend)], - [None, 'c2', 'c2[0]', 'B']) - self.assertEqual( - [obj.name for obj in pmo.preorder_traversal( - b, - ctype=IConstraint, - active=True, - descend=descend)], - [None, 'c2', 'c2[0]', 'B']) + [ + obj.name + for obj in pmo.preorder_traversal(b, active=None, descend=descend) + ], + [ + None, + 'v', + 'c1', + 'c2', + 'c2[0]', + 'B', + 'B[0]', + 'B[0][0]', + 'B[0][0].c', + 'B[0][0].b', + ], + ) + self.assertEqual( + [obj.name for obj in pmo.preorder_traversal(b, descend=descend)], + [None, 'v', 'c2', 'c2[0]', 'B'], + ) + self.assertEqual( + [ + obj.name + for obj in pmo.preorder_traversal(b, active=True, descend=descend) + ], + [None, 'v', 'c2', 'c2[0]', 'B'], + ) + + self.assertEqual( + [ + obj.name + for obj in pmo.preorder_traversal( + b, active=None, ctype=IConstraint, descend=descend + ) + ], + [ + None, + 'c1', + 'c2', + 'c2[0]', + 'B', + 'B[0]', + 'B[0][0]', + 'B[0][0].c', + 'B[0][0].b', + ], + ) + self.assertEqual( + [ + obj.name + for obj in pmo.preorder_traversal(b, ctype=IConstraint, descend=descend) + ], + [None, 'c2', 'c2[0]', 'B'], + ) + self.assertEqual( + [ + obj.name + for obj in pmo.preorder_traversal( + b, ctype=IConstraint, active=True, descend=descend + ) + ], + [None, 'c2', 'c2[0]', 'B'], + ) m = pmo.block() m.B = pmo.block_list() @@ -796,25 +1013,21 @@ def descend(obj): m.B[0].v = pmo.variable() self.assertEqual( - [obj.name for obj in pmo.preorder_traversal( - m, - ctype=IVariable)], - [None, 'B', 'B[0]', 'B[0].v']) + [obj.name for obj in pmo.preorder_traversal(m, ctype=IVariable)], + [None, 'B', 'B[0]', 'B[0].v'], + ) self.assertEqual( - [obj.name for obj in pmo.preorder_traversal( - m.B, - ctype=IVariable)], - ['B', 'B[0]', 'B[0].v']) + [obj.name for obj in pmo.preorder_traversal(m.B, ctype=IVariable)], + ['B', 'B[0]', 'B[0].v'], + ) self.assertEqual( - [obj.name for obj in pmo.preorder_traversal( - m.B[0], - ctype=IVariable)], - ['B[0]', 'B[0].v']) + [obj.name for obj in pmo.preorder_traversal(m.B[0], ctype=IVariable)], + ['B[0]', 'B[0].v'], + ) self.assertEqual( - [obj.name for obj in pmo.preorder_traversal( - m.B[0].v, - ctype=IVariable)], - ['B[0].v']) + [obj.name for obj in pmo.preorder_traversal(m.B[0].v, ctype=IVariable)], + ['B[0].v'], + ) # test how clone behaves when there are # references to components on a different block @@ -844,58 +1057,65 @@ def test_clone1(self): b_b = b.b.clone() self.assertIsNot(b_b.e, b.b.e) self.assertTrue(len(_collect_expr_components(b.b.e.expr)) == 1) - self.assertIs(list(_collect_expr_components(b.b.e.expr).values())[0], - b.v) + self.assertIs(list(_collect_expr_components(b.b.e.expr).values())[0], b.v) self.assertTrue(len(_collect_expr_components(b_b.e.expr)) == 1) - self.assertIs(list(_collect_expr_components(b_b.e.expr).values())[0], - b.v) + self.assertIs(list(_collect_expr_components(b_b.e.expr).values())[0], b.v) b_bdict0 = b.bdict[0].clone() self.assertIsNot(b_bdict0.e, b.bdict[0].e) self.assertTrue(len(_collect_expr_components(b.bdict[0].e.expr)) == 1) - self.assertIs(list(_collect_expr_components(b.bdict[0].e.expr).values())[0], - b.v) + self.assertIs( + list(_collect_expr_components(b.bdict[0].e.expr).values())[0], b.v + ) self.assertTrue(len(_collect_expr_components(b_bdict0.e.expr)) == 1) - self.assertIs(list(_collect_expr_components(b_bdict0.e.expr).values())[0], - b.v) + self.assertIs(list(_collect_expr_components(b_bdict0.e.expr).values())[0], b.v) b_blist0 = b.blist[0].clone() self.assertIsNot(b_blist0.e, b.blist[0].e) self.assertTrue(len(_collect_expr_components(b.blist[0].e.expr)) == 2) - self.assertEqual(sorted(list(id(v_) for v_ in _collect_expr_components(b.blist[0].e.expr).values())), - sorted(list(id(v_) for v_ in [b.v, b.b.v]))) + self.assertEqual( + sorted( + list( + id(v_) + for v_ in _collect_expr_components(b.blist[0].e.expr).values() + ) + ), + sorted(list(id(v_) for v_ in [b.v, b.b.v])), + ) self.assertTrue(len(_collect_expr_components(b_blist0.e.expr)) == 2) - self.assertEqual(sorted(list(id(v_) for v_ in _collect_expr_components(b_blist0.e.expr).values())), - sorted(list(id(v_) for v_ in [b.v, b.b.v]))) + self.assertEqual( + sorted( + list( + id(v_) for v_ in _collect_expr_components(b_blist0.e.expr).values() + ) + ), + sorted(list(id(v_) for v_ in [b.v, b.b.v])), + ) # test bulk clone behavior def test_clone2(self): b = block() b.v = variable() - b.vdict = variable_dict(((i, variable()) - for i in range(10))) - b.vlist = variable_list(variable() - for i in range(10)) + b.vdict = variable_dict(((i, variable()) for i in range(10))) + b.vlist = variable_list(variable() for i in range(10)) b.o = objective(b.v + b.vdict[0] + b.vlist[0]) - b.odict = objective_dict(((i, objective(b.v + b.vdict[i])) - for i in b.vdict)) - b.olist = objective_list(objective(b.v + v_) - for i,v_ in enumerate(b.vdict)) + b.odict = objective_dict(((i, objective(b.v + b.vdict[i])) for i in b.vdict)) + b.olist = objective_list(objective(b.v + v_) for i, v_ in enumerate(b.vdict)) b.c = constraint(b.v >= 1) - b.cdict = constraint_dict(((i, constraint(b.vdict[i] == i)) - for i in b.vdict)) - b.clist = constraint_list(constraint((0, v_, i)) - for i, v_ in enumerate(b.vlist)) + b.cdict = constraint_dict(((i, constraint(b.vdict[i] == i)) for i in b.vdict)) + b.clist = constraint_list( + constraint((0, v_, i)) for i, v_ in enumerate(b.vlist) + ) b.p = parameter() - b.pdict = parameter_dict(((i, parameter(i)) - for i in b.vdict)) - b.plist = parameter_list(parameter(i) - for i in range(len(b.vlist))) + b.pdict = parameter_dict(((i, parameter(i)) for i in b.vdict)) + b.plist = parameter_list(parameter(i) for i in range(len(b.vlist))) b.e = expression(b.v * b.p + 1) - b.edict = expression_dict(((i, expression(b.vdict[i] * b.pdict[i] + 1)) - for i in b.vdict)) - b.elist = expression_list(expression(v_ * b.plist[i] + 1) - for i,v_ in enumerate(b.vlist)) + b.edict = expression_dict( + ((i, expression(b.vdict[i] * b.pdict[i] + 1)) for i in b.vdict) + ) + b.elist = expression_list( + expression(v_ * b.plist[i] + 1) for i, v_ in enumerate(b.vlist) + ) self.assertIs(b.parent, None) @@ -906,26 +1126,28 @@ def test_clone2(self): self.assertIs(bc.parent, None) self.assertIsNot(b, bc) self.assertTrue(len(list(b.children())) > 0) - self.assertEqual(len(list(b.children())), - len(list(bc.children()))) + self.assertEqual(len(list(b.children())), len(list(bc.children()))) for c1, c2 in zip(b.children(), bc.children()): self.assertIs(c1.parent, b) self.assertIs(c2.parent, bc) self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) - self.assertEqual(len(list(b.components())), - len(list(bc.components()))) + self.assertEqual(len(list(b.components())), len(list(bc.components()))) for c1, c2 in zip(b.components(), bc.components()): self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) - if hasattr(c1,'expr'): + if hasattr(c1, 'expr'): self.assertIsNot(c1.expr, c2.expr) self.assertEqual(str(c1.expr), str(c2.expr)) - self.assertEqual(len(_collect_expr_components(c1.expr)), - len(_collect_expr_components(c2.expr))) - for subc1, subc2 in zip(_collect_expr_components(c1.expr).values(), - _collect_expr_components(c2.expr).values()): + self.assertEqual( + len(_collect_expr_components(c1.expr)), + len(_collect_expr_components(c2.expr)), + ) + for subc1, subc2 in zip( + _collect_expr_components(c1.expr).values(), + _collect_expr_components(c2.expr).values(), + ): self.assertIsNot(subc1, subc2) self.assertEqual(subc1.name, subc2.name) @@ -939,27 +1161,29 @@ def test_clone2(self): bcc = b.clone() self.assertIsNot(b, bcc) - self.assertEqual(len(list(b.children())), - len(list(bcc.children()))) + self.assertEqual(len(list(b.children())), len(list(bcc.children()))) for c1, c2 in zip(b.children(), bcc.children()): self.assertIs(c1.parent, b) self.assertIs(c2.parent, bcc) self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) - self.assertEqual(len(list(b.components())), - len(list(bcc.components()))) + self.assertEqual(len(list(b.components())), len(list(bcc.components()))) self.assertTrue(hasattr(bcc, 'bc')) for c1, c2 in zip(b.components(), bcc.components()): self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) - if hasattr(c1,'expr'): + if hasattr(c1, 'expr'): self.assertIsNot(c1.expr, c2.expr) self.assertEqual(str(c1.expr), str(c2.expr)) - self.assertEqual(len(_collect_expr_components(c1.expr)), - len(_collect_expr_components(c2.expr))) - for subc1, subc2 in zip(_collect_expr_components(c1.expr).values(), - _collect_expr_components(c2.expr).values()): + self.assertEqual( + len(_collect_expr_components(c1.expr)), + len(_collect_expr_components(c2.expr)), + ) + for subc1, subc2 in zip( + _collect_expr_components(c1.expr).values(), + _collect_expr_components(c2.expr).values(), + ): self.assertIsNot(subc1, subc2) self.assertEqual(subc1.name, subc2.name) @@ -974,26 +1198,30 @@ def test_clone2(self): self.assertIsNot(bc_init, sub_bc) self.assertIsNot(bc, sub_bc) - self.assertEqual(len(list(bc_init.children())), - len(list(sub_bc.children()))) + self.assertEqual(len(list(bc_init.children())), len(list(sub_bc.children()))) for c1, c2 in zip(bc_init.children(), sub_bc.children()): self.assertIs(c1.parent, bc_init) self.assertIs(c2.parent, sub_bc) self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) - self.assertEqual(len(list(bc_init.components())), - len(list(sub_bc.components()))) + self.assertEqual( + len(list(bc_init.components())), len(list(sub_bc.components())) + ) for c1, c2 in zip(bc_init.components(), sub_bc.components()): self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) - if hasattr(c1,'expr'): + if hasattr(c1, 'expr'): self.assertIsNot(c1.expr, c2.expr) self.assertEqual(str(c1.expr), str(c2.expr)) - self.assertEqual(len(_collect_expr_components(c1.expr)), - len(_collect_expr_components(c2.expr))) - for subc1, subc2 in zip(_collect_expr_components(c1.expr).values(), - _collect_expr_components(c2.expr).values()): + self.assertEqual( + len(_collect_expr_components(c1.expr)), + len(_collect_expr_components(c2.expr)), + ) + for subc1, subc2 in zip( + _collect_expr_components(c1.expr).values(), + _collect_expr_components(c2.expr).values(), + ): self.assertIsNot(subc1, subc2) self.assertEqual(subc1.name, subc2.name) @@ -1081,14 +1309,16 @@ def test_activate(self): # this is a randomized test def test_ordering(self): b = block() - attr_types = [variable, - constraint, - parameter, - expression, - data_expression, - objective, - variable, - block] + attr_types = [ + variable, + constraint, + parameter, + expression, + data_expression, + objective, + variable, + block, + ] key_types = [float, int] keys = collections.deque() objs = collections.deque() @@ -1110,8 +1340,8 @@ def test_ordering(self): keys.rotate(-1) objs.rotate(-1) -class _Test_block_base(object): +class _Test_block_base(object): _children = None _child_key = None _components_no_descend = None @@ -1123,151 +1353,158 @@ class _Test_block_base(object): def test_overwrite_warning(self): b = self._block.clone() name = "x" - while hasattr(b,name): + while hasattr(b, name): name += "x" out = StringIO() with LoggingIntercept(out, 'pyomo.core'): - setattr(b,name,variable()) - setattr(b,name,getattr(b,name)) + setattr(b, name, variable()) + setattr(b, name, getattr(b, name)) assert out.getvalue() == "", str(out.getvalue()) with LoggingIntercept(out, 'pyomo.core'): - setattr(b,name,variable()) - assert out.getvalue() == \ - ("Implicitly replacing attribute %s (type=variable) " - "on block with new object (type=variable). This " - "is usually indicative of a modeling error. " - "To avoid this warning, delete the original " - "object from the block before assigning a new " - "object.\n" % (name)) + setattr(b, name, variable()) + assert out.getvalue() == ( + "Implicitly replacing attribute %s (type=variable) " + "on block with new object (type=variable). This " + "is usually indicative of a modeling error. " + "To avoid this warning, delete the original " + "object from the block before assigning a new " + "object.\n" % (name) + ) out = StringIO() with LoggingIntercept(out, 'pyomo.core'): - setattr(b,name,1.0) - assert out.getvalue() == \ - ("Implicitly replacing attribute %s (type=variable) " - "on block with new object (type=float). This " - "is usually indicative of a modeling error. " - "To avoid this warning, delete the original " - "object from the block before assigning a new " - "object.\n" % (name)) + setattr(b, name, 1.0) + assert out.getvalue() == ( + "Implicitly replacing attribute %s (type=variable) " + "on block with new object (type=float). This " + "is usually indicative of a modeling error. " + "To avoid this warning, delete the original " + "object from the block before assigning a new " + "object.\n" % (name) + ) def test_clone(self): b = self._block bc = b.clone() self.assertIsNot(b, bc) - self.assertEqual(len(list(b.children())), - len(list(bc.children()))) + self.assertEqual(len(list(b.children())), len(list(bc.children()))) for c1, c2 in zip(b.children(), bc.children()): self.assertIs(c1.parent, b) self.assertIs(c2.parent, bc) self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) - self.assertEqual(len(list(b.components())), - len(list(bc.components()))) + self.assertEqual(len(list(b.components())), len(list(bc.components()))) for c1, c2 in zip(b.components(), bc.components()): self.assertIsNot(c1, c2) self.assertEqual(c1.name, c2.name) def test_pickle(self): - b = pickle.loads( - pickle.dumps(self._block)) - self.assertEqual(len(list(pmo.preorder_traversal(b, active=None))), - len(self._names)+1) + b = pickle.loads(pickle.dumps(self._block)) + self.assertEqual( + len(list(pmo.preorder_traversal(b, active=None))), len(self._names) + 1 + ) names = pmo.generate_names(b, active=None) - self.assertEqual(sorted(names.values()), - sorted(self._names.values())) + self.assertEqual(sorted(names.values()), sorted(self._names.values())) def test_preorder_traversal(self): # this first test makes failures a # little easier to debug self.assertEqual( - [str(obj) for obj in pmo.preorder_traversal(self._block, - active=None)], - [str(obj) for obj in self._preorder]) + [str(obj) for obj in pmo.preorder_traversal(self._block, active=None)], + [str(obj) for obj in self._preorder], + ) self.assertEqual( - [id(obj) for obj in pmo.preorder_traversal(self._block, - active=None)], - [id(obj) for obj in self._preorder]) + [id(obj) for obj in pmo.preorder_traversal(self._block, active=None)], + [id(obj) for obj in self._preorder], + ) # this first test makes failures a # little easier to debug self.assertEqual( - [str(obj) for obj in pmo.preorder_traversal( - self._block, - active=None, - ctype=IVariable)], - [str(obj) for obj in self._preorder - if obj.ctype in (IBlock, IVariable)]) + [ + str(obj) + for obj in pmo.preorder_traversal( + self._block, active=None, ctype=IVariable + ) + ], + [str(obj) for obj in self._preorder if obj.ctype in (IBlock, IVariable)], + ) self.assertEqual( - [id(obj) for obj in pmo.preorder_traversal( - self._block, - active=None, - ctype=IVariable)], - [id(obj) for obj in self._preorder - if obj.ctype in (IBlock, IVariable)]) + [ + id(obj) + for obj in pmo.preorder_traversal( + self._block, active=None, ctype=IVariable + ) + ], + [id(obj) for obj in self._preorder if obj.ctype in (IBlock, IVariable)], + ) def test_preorder_traversal_descend_check(self): def descend(x): self.assertTrue(x._is_container) return True - order = list(pmo.preorder_traversal(self._block, - active=None, - descend=descend)) + + order = list(pmo.preorder_traversal(self._block, active=None, descend=descend)) # this first test makes failures a # little easier to debug self.assertEqual( - [str(obj) for obj in order], - [str(obj) for obj in self._preorder]) + [str(obj) for obj in order], [str(obj) for obj in self._preorder] + ) self.assertEqual( - [id(obj) for obj in order], - [id(obj) for obj in self._preorder]) + [id(obj) for obj in order], [id(obj) for obj in self._preorder] + ) def descend(x): self.assertTrue(x._is_container) return True - order = list(pmo.preorder_traversal(self._block, - active=None, - ctype=IVariable, - descend=descend)) + + order = list( + pmo.preorder_traversal( + self._block, active=None, ctype=IVariable, descend=descend + ) + ) # this first test makes failures a # little easier to debug self.assertEqual( [str(obj) for obj in order], - [str(obj) for obj in self._preorder - if obj.ctype in (IBlock, IVariable)]) + [str(obj) for obj in self._preorder if obj.ctype in (IBlock, IVariable)], + ) self.assertEqual( [id(obj) for obj in order], - [id(obj) for obj in self._preorder - if obj.ctype in (IBlock, IVariable)]) + [id(obj) for obj in self._preorder if obj.ctype in (IBlock, IVariable)], + ) def descend(x): if x.parent is self._block: return False return True - order = list(pmo.preorder_traversal(self._block, - active=None, - descend=descend)) + + order = list(pmo.preorder_traversal(self._block, active=None, descend=descend)) # this first test makes failures a # little easier to debug self.assertEqual( [str(obj) for obj in order], - [str(obj) for obj in self._preorder - if (obj.parent is None) or \ - (obj.parent is self._block)]) + [ + str(obj) + for obj in self._preorder + if (obj.parent is None) or (obj.parent is self._block) + ], + ) self.assertEqual( [id(obj) for obj in order], - [id(obj) for obj in self._preorder - if (obj.parent is None) or \ - (obj.parent is self._block)]) + [ + id(obj) + for obj in self._preorder + if (obj.parent is None) or (obj.parent is self._block) + ], + ) def test_child(self): for child in self._child_key: parent = child.parent self.assertTrue(parent is not None) - self.assertTrue(id(child) in set( - id(_c) for _c in self._children[parent])) - self.assertIs(parent.child(self._child_key[child]), - child) + self.assertTrue(id(child) in set(id(_c) for _c in self._children[parent])) + self.assertIs(parent.child(self._child_key[child]), child) with self.assertRaises(KeyError): parent.child("_not_a_valid_child_key_") @@ -1280,43 +1517,56 @@ def test_children(self): # this first test makes failures a # little easier to debug self.assertEqual( - sorted(str(child) - for child in obj.children()), - sorted(str(child) - for child in self._children[obj])) + sorted(str(child) for child in obj.children()), + sorted(str(child) for child in self._children[obj]), + ) self.assertEqual( set(id(child) for child in obj.children()), - set(id(child) for child in self._children[obj])) + set(id(child) for child in self._children[obj]), + ) # this first test makes failures a # little easier to debug self.assertEqual( - sorted(str(child) - for child in obj.children(ctype=IBlock)), - sorted(str(child) - for child in self._children[obj] - if child.ctype is IBlock)) + sorted(str(child) for child in obj.children(ctype=IBlock)), + sorted( + str(child) + for child in self._children[obj] + if child.ctype is IBlock + ), + ) self.assertEqual( set(id(child) for child in obj.children(ctype=IBlock)), - set(id(child) for child in self._children[obj] - if child.ctype is IBlock)) + set( + id(child) + for child in self._children[obj] + if child.ctype is IBlock + ), + ) # this first test makes failures a # little easier to debug self.assertEqual( - sorted(str(child) - for child in obj.children(ctype=IVariable)), - sorted(str(child) - for child in self._children[obj] - if child.ctype is IVariable)) + sorted(str(child) for child in obj.children(ctype=IVariable)), + sorted( + str(child) + for child in self._children[obj] + if child.ctype is IVariable + ), + ) self.assertEqual( set(id(child) for child in obj.children(ctype=IVariable)), - set(id(child) for child in self._children[obj] - if child.ctype is IVariable)) + set( + id(child) + for child in self._children[obj] + if child.ctype is IVariable + ), + ) elif isinstance(obj, ICategorizedObjectContainer): for child in obj.children(): self.assertTrue(child.parent is obj) self.assertEqual( set(id(child) for child in obj.children()), - set(id(child) for child in self._children[obj])) + set(id(child) for child in self._children[obj]), + ) else: self.assertEqual(len(self._children[obj]), 0) @@ -1325,61 +1575,64 @@ def test_components_no_descend_active_None(self): self.assertTrue(isinstance(obj, ICategorizedObjectContainer)) self.assertTrue(isinstance(obj, IBlock)) for c in obj.components(descend_into=False): - self.assertTrue( - _path_to_object_exists(obj, c)) + self.assertTrue(_path_to_object_exists(obj, c)) # test ctype=IBlock self.assertEqual( - sorted(str(_b) - for _b in - obj.components(ctype=IBlock, - active=None, - descend_into=False)), - sorted(str(_b) - for _b in - self._components_no_descend[obj][IBlock])) + sorted( + str(_b) + for _b in obj.components( + ctype=IBlock, active=None, descend_into=False + ) + ), + sorted(str(_b) for _b in self._components_no_descend[obj][IBlock]), + ) self.assertEqual( - set(id(_b) for _b in - obj.components(ctype=IBlock, - active=None, - descend_into=False)), - set(id(_b) for _b in - self._components_no_descend[obj][IBlock])) + set( + id(_b) + for _b in obj.components( + ctype=IBlock, active=None, descend_into=False + ) + ), + set(id(_b) for _b in self._components_no_descend[obj][IBlock]), + ) # test ctype=IVariable self.assertEqual( - sorted(str(_v) - for _v in - obj.components(ctype=IVariable, - active=None, - descend_into=False)), - sorted(str(_v) - for _v in - self._components_no_descend[obj][IVariable])) + sorted( + str(_v) + for _v in obj.components( + ctype=IVariable, active=None, descend_into=False + ) + ), + sorted(str(_v) for _v in self._components_no_descend[obj][IVariable]), + ) self.assertEqual( - set(id(_v) for _v in - obj.components(ctype=IVariable, - active=None, - descend_into=False)), - set(id(_v) for _v in - self._components_no_descend[obj][IVariable])) + set( + id(_v) + for _v in obj.components( + ctype=IVariable, active=None, descend_into=False + ) + ), + set(id(_v) for _v in self._components_no_descend[obj][IVariable]), + ) # test no ctype self.assertEqual( - sorted(str(_c) - for _c in - obj.components(active=None, - descend_into=False)), - sorted(str(_c) - for ctype in - self._components_no_descend[obj] - for _c in - self._components_no_descend[obj][ctype])) + sorted( + str(_c) for _c in obj.components(active=None, descend_into=False) + ), + sorted( + str(_c) + for ctype in self._components_no_descend[obj] + for _c in self._components_no_descend[obj][ctype] + ), + ) self.assertEqual( - set(id(_c) for _c in - obj.components(active=None, - descend_into=False)), - set(id(_c) for ctype in - self._components_no_descend[obj] - for _c in - self._components_no_descend[obj][ctype])) + set(id(_c) for _c in obj.components(active=None, descend_into=False)), + set( + id(_c) + for ctype in self._components_no_descend[obj] + for _c in self._components_no_descend[obj][ctype] + ), + ) def test_components_no_descend_active_True(self): for obj in self._components_no_descend: @@ -1387,128 +1640,147 @@ def test_components_no_descend_active_True(self): self.assertTrue(isinstance(obj, IBlock)) # test ctype=IBlock self.assertEqual( - sorted(str(_b) - for _b in - obj.components(ctype=IBlock, - active=True, - descend_into=False)), - sorted(str(_b) - for _b in - self._components_no_descend[obj][IBlock] - if _b.active) - if getattr(obj, 'active', True) else []) + sorted( + str(_b) + for _b in obj.components( + ctype=IBlock, active=True, descend_into=False + ) + ), + sorted( + str(_b) + for _b in self._components_no_descend[obj][IBlock] + if _b.active + ) + if getattr(obj, 'active', True) + else [], + ) self.assertEqual( - set(id(_b) for _b in - obj.components(ctype=IBlock, - active=True, - descend_into=False)), - set(id(_b) for _b in - self._components_no_descend[obj][IBlock] - if _b.active) - if getattr(obj, 'active', True) else set()) + set( + id(_b) + for _b in obj.components( + ctype=IBlock, active=True, descend_into=False + ) + ), + set( + id(_b) + for _b in self._components_no_descend[obj][IBlock] + if _b.active + ) + if getattr(obj, 'active', True) + else set(), + ) # test ctype=IVariable self.assertEqual( - sorted(str(_v) - for _v in - obj.components(ctype=IVariable, - active=True, - descend_into=False)), - sorted(str(_v) - for _v in - self._components_no_descend[obj][IVariable]) - if getattr(obj, 'active', True) else []) + sorted( + str(_v) + for _v in obj.components( + ctype=IVariable, active=True, descend_into=False + ) + ), + sorted(str(_v) for _v in self._components_no_descend[obj][IVariable]) + if getattr(obj, 'active', True) + else [], + ) self.assertEqual( - set(id(_v) for _v in - obj.components(ctype=IVariable, - active=True, - descend_into=False)), - set(id(_v) for _v in - self._components_no_descend[obj][IVariable]) - if getattr(obj, 'active', True) else set()) + set( + id(_v) + for _v in obj.components( + ctype=IVariable, active=True, descend_into=False + ) + ), + set(id(_v) for _v in self._components_no_descend[obj][IVariable]) + if getattr(obj, 'active', True) + else set(), + ) # test no ctype self.assertEqual( - sorted(str(_c) - for _c in - obj.components(active=True, - descend_into=False)), - sorted(str(_c) - for ctype in - self._components_no_descend[obj] - for _c in - self._components_no_descend[obj][ctype] - if getattr(_c, "active", True)) - if getattr(obj, 'active', True) else []) + sorted( + str(_c) for _c in obj.components(active=True, descend_into=False) + ), + sorted( + str(_c) + for ctype in self._components_no_descend[obj] + for _c in self._components_no_descend[obj][ctype] + if getattr(_c, "active", True) + ) + if getattr(obj, 'active', True) + else [], + ) self.assertEqual( - set(id(_c) for _c in - obj.components(active=True, - descend_into=False)), - set(id(_c) for ctype in - self._components_no_descend[obj] - for _c in - self._components_no_descend[obj][ctype] - if getattr(_c, "active", True)) - if getattr(obj, 'active', True) else set()) + set(id(_c) for _c in obj.components(active=True, descend_into=False)), + set( + id(_c) + for ctype in self._components_no_descend[obj] + for _c in self._components_no_descend[obj][ctype] + if getattr(_c, "active", True) + ) + if getattr(obj, 'active', True) + else set(), + ) def test_components_active_None(self): for obj in self._components: self.assertTrue(isinstance(obj, ICategorizedObjectContainer)) self.assertTrue(isinstance(obj, IBlock)) for c in obj.components(descend_into=True): - self.assertTrue( - _path_to_object_exists(obj, c)) + self.assertTrue(_path_to_object_exists(obj, c)) # test ctype=IBlock self.assertEqual( - sorted(str(_b) - for _b in - obj.components(ctype=IBlock, - active=None, - descend_into=True)), - sorted(str(_b) - for _b in - self._components[obj][IBlock])) + sorted( + str(_b) + for _b in obj.components( + ctype=IBlock, active=None, descend_into=True + ) + ), + sorted(str(_b) for _b in self._components[obj][IBlock]), + ) self.assertEqual( - set(id(_b) for _b in - obj.components(ctype=IBlock, - active=None, - descend_into=True)), - set(id(_b) for _b in - self._components[obj][IBlock])) + set( + id(_b) + for _b in obj.components( + ctype=IBlock, active=None, descend_into=True + ) + ), + set(id(_b) for _b in self._components[obj][IBlock]), + ) # test ctype=IVariable self.assertEqual( - sorted(str(_v) - for _v in - obj.components(ctype=IVariable, - active=None, - descend_into=True)), - sorted(str(_v) - for _v in - self._components[obj][IVariable])) + sorted( + str(_v) + for _v in obj.components( + ctype=IVariable, active=None, descend_into=True + ) + ), + sorted(str(_v) for _v in self._components[obj][IVariable]), + ) self.assertEqual( - set(id(_v) for _v in - obj.components(ctype=IVariable, - active=None, - descend_into=True)), - set(id(_v) for _v in - self._components[obj][IVariable])) + set( + id(_v) + for _v in obj.components( + ctype=IVariable, active=None, descend_into=True + ) + ), + set(id(_v) for _v in self._components[obj][IVariable]), + ) # test no ctype self.assertEqual( - sorted(str(_c) - for _c in - obj.components(active=None, - descend_into=True)), - sorted(str(_c) - for ctype in - self._components[obj] - for _c in - self._components[obj][ctype])) + sorted( + str(_c) for _c in obj.components(active=None, descend_into=True) + ), + sorted( + str(_c) + for ctype in self._components[obj] + for _c in self._components[obj][ctype] + ), + ) self.assertEqual( - set(id(_c) for _c in - obj.components(active=None, - descend_into=True)), - set(id(_c) for ctype in - self._components[obj] - for _c in - self._components[obj][ctype])) + set(id(_c) for _c in obj.components(active=None, descend_into=True)), + set( + id(_c) + for ctype in self._components[obj] + for _c in self._components[obj][ctype] + ), + ) def test_components_active_True(self): for obj in self._components: @@ -1516,72 +1788,86 @@ def test_components_active_True(self): self.assertTrue(isinstance(obj, IBlock)) # test ctype=IBlock self.assertEqual( - sorted(str(_b) - for _b in - obj.components(ctype=IBlock, - active=True, - descend_into=True)), - sorted(str(_b) - for _b in - self._components[obj][IBlock] - if _b.active) - if getattr(obj, 'active', True) else []) + sorted( + str(_b) + for _b in obj.components( + ctype=IBlock, active=True, descend_into=True + ) + ), + sorted(str(_b) for _b in self._components[obj][IBlock] if _b.active) + if getattr(obj, 'active', True) + else [], + ) self.assertEqual( - set(id(_b) for _b in - obj.components(ctype=IBlock, - active=True, - descend_into=True)), - set(id(_b) for _b in - self._components[obj][IBlock] - if _b.active) - if getattr(obj, 'active', True) else set()) + set( + id(_b) + for _b in obj.components( + ctype=IBlock, active=True, descend_into=True + ) + ), + set(id(_b) for _b in self._components[obj][IBlock] if _b.active) + if getattr(obj, 'active', True) + else set(), + ) # test ctype=IVariable self.assertEqual( - sorted(str(_v) - for _v in - obj.components(ctype=IVariable, - active=True, - descend_into=True)), - sorted(str(_v) - for _v in - self._components[obj][IVariable] - if _active_path_to_object_exists(obj, _v)) - if getattr(obj, 'active', True) else []) + sorted( + str(_v) + for _v in obj.components( + ctype=IVariable, active=True, descend_into=True + ) + ), + sorted( + str(_v) + for _v in self._components[obj][IVariable] + if _active_path_to_object_exists(obj, _v) + ) + if getattr(obj, 'active', True) + else [], + ) self.assertEqual( - set(id(_v) for _v in - obj.components(ctype=IVariable, - active=True, - descend_into=True)), - set(id(_v) for _v in - self._components[obj][IVariable] - if _active_path_to_object_exists(obj, _v)) - if getattr(obj, 'active', True) else set()) + set( + id(_v) + for _v in obj.components( + ctype=IVariable, active=True, descend_into=True + ) + ), + set( + id(_v) + for _v in self._components[obj][IVariable] + if _active_path_to_object_exists(obj, _v) + ) + if getattr(obj, 'active', True) + else set(), + ) # test no ctype self.assertEqual( - sorted(str(_c) - for _c in - obj.components(active=True, - descend_into=True)), - sorted(str(_c) - for ctype in - self._components[obj] - for _c in - self._components[obj][ctype] - if _active_path_to_object_exists(obj, _c)) - if getattr(obj, 'active', True) else []) + sorted( + str(_c) for _c in obj.components(active=True, descend_into=True) + ), + sorted( + str(_c) + for ctype in self._components[obj] + for _c in self._components[obj][ctype] + if _active_path_to_object_exists(obj, _c) + ) + if getattr(obj, 'active', True) + else [], + ) self.assertEqual( - set(id(_c) for _c in - obj.components(active=True, - descend_into=True)), - set(id(_c) for ctype in - self._components[obj] - for _c in - self._components[obj][ctype] - if _active_path_to_object_exists(obj, _c)) - if getattr(obj, 'active', True) else set()) + set(id(_c) for _c in obj.components(active=True, descend_into=True)), + set( + id(_c) + for ctype in self._components[obj] + for _c in self._components[obj][ctype] + if _active_path_to_object_exists(obj, _c) + ) + if getattr(obj, 'active', True) + else set(), + ) -class _Test_block(_Test_block_base): +class _Test_block(_Test_block_base): _do_clone = None @classmethod @@ -1616,25 +1902,27 @@ def setUpClass(cls): # for tests in the base testing class # - cls._preorder = [model, - model.v_1, - model.vdict_1, - model.vdict_1[None], - model.vlist_1, - model.vlist_1[0], - model.vlist_1[1], - model.b_1, - model.b_1.v_2, - model.b_1.b_2, - model.b_1.b_2.b_3, - model.b_1.b_2.v_3, - model.b_1.b_2.vlist_3, - model.b_1.b_2.vlist_3[0], - model.bdict_1, - model.blist_1, - model.blist_1[0], - model.blist_1[0].v_2, - model.blist_1[0].b_2] + cls._preorder = [ + model, + model.v_1, + model.vdict_1, + model.vdict_1[None], + model.vlist_1, + model.vlist_1[0], + model.vlist_1[1], + model.b_1, + model.b_1.v_2, + model.b_1.b_2, + model.b_1.b_2.b_3, + model.b_1.b_2.v_3, + model.b_1.b_2.vlist_3, + model.b_1.b_2.vlist_3[0], + model.bdict_1, + model.blist_1, + model.blist_1[0], + model.blist_1[0].v_2, + model.blist_1[0].b_2, + ] cls._names = ComponentMap() cls._names[model.v_1] = "v_1" @@ -1657,27 +1945,27 @@ def setUpClass(cls): cls._names[model.blist_1[0].b_2] = "blist_1[0].b_2" cls._children = ComponentMap() - cls._children[model] = [model.v_1, - model.vdict_1, - model.vlist_1, - model.b_1, - model.bdict_1, - model.blist_1] + cls._children[model] = [ + model.v_1, + model.vdict_1, + model.vlist_1, + model.b_1, + model.bdict_1, + model.blist_1, + ] cls._children[model.vdict_1] = [model.vdict_1[None]] - cls._children[model.vlist_1] = [model.vlist_1[0], - model.vlist_1[1]] - cls._children[model.b_1] = [model.b_1.v_2, - model.b_1.b_2] - cls._children[model.b_1.b_2] = [model.b_1.b_2.v_3, - model.b_1.b_2.vlist_3, - model.b_1.b_2.b_3] + cls._children[model.vlist_1] = [model.vlist_1[0], model.vlist_1[1]] + cls._children[model.b_1] = [model.b_1.v_2, model.b_1.b_2] + cls._children[model.b_1.b_2] = [ + model.b_1.b_2.v_3, + model.b_1.b_2.vlist_3, + model.b_1.b_2.b_3, + ] cls._children[model.b_1.b_2.b_3] = [] - cls._children[model.b_1.b_2.vlist_3] = \ - [model.b_1.b_2.vlist_3[0]] + cls._children[model.b_1.b_2.vlist_3] = [model.b_1.b_2.vlist_3[0]] cls._children[model.bdict_1] = [] cls._children[model.blist_1] = [model.blist_1[0]] - cls._children[model.blist_1[0]] = [model.blist_1[0].v_2, - model.blist_1[0].b_2] + cls._children[model.blist_1[0]] = [model.blist_1[0].v_2, model.blist_1[0].b_2] cls._child_key = ComponentMap() cls._child_key[model.v_1] = "v_1" @@ -1701,89 +1989,83 @@ def setUpClass(cls): cls._components_no_descend = ComponentMap() cls._components_no_descend[model] = {} - cls._components_no_descend[model][IVariable] = \ - [model.v_1, - model.vdict_1[None], - model.vlist_1[0], - model.vlist_1[1]] - cls._components_no_descend[model][IBlock] = \ - [model.b_1, - model.blist_1[0]] + cls._components_no_descend[model][IVariable] = [ + model.v_1, + model.vdict_1[None], + model.vlist_1[0], + model.vlist_1[1], + ] + cls._components_no_descend[model][IBlock] = [model.b_1, model.blist_1[0]] cls._components_no_descend[model.b_1] = {} - cls._components_no_descend[model.b_1][IVariable] = \ - [model.b_1.v_2] - cls._components_no_descend[model.b_1][IBlock] = \ - [model.b_1.b_2] + cls._components_no_descend[model.b_1][IVariable] = [model.b_1.v_2] + cls._components_no_descend[model.b_1][IBlock] = [model.b_1.b_2] cls._components_no_descend[model.b_1.b_2] = {} - cls._components_no_descend[model.b_1.b_2][IVariable] = \ - [model.b_1.b_2.v_3, - model.b_1.b_2.vlist_3[0]] - cls._components_no_descend[model.b_1.b_2][IBlock] = \ - [model.b_1.b_2.b_3] + cls._components_no_descend[model.b_1.b_2][IVariable] = [ + model.b_1.b_2.v_3, + model.b_1.b_2.vlist_3[0], + ] + cls._components_no_descend[model.b_1.b_2][IBlock] = [model.b_1.b_2.b_3] cls._components_no_descend[model.b_1.b_2.b_3] = {} cls._components_no_descend[model.b_1.b_2.b_3][IVariable] = [] cls._components_no_descend[model.b_1.b_2.b_3][IBlock] = [] cls._components_no_descend[model.blist_1[0]] = {} - cls._components_no_descend[model.blist_1[0]][IVariable] = \ - [model.blist_1[0].v_2] - cls._components_no_descend[model.blist_1[0]][IBlock] = \ - [model.blist_1[0].b_2] + cls._components_no_descend[model.blist_1[0]][IVariable] = [model.blist_1[0].v_2] + cls._components_no_descend[model.blist_1[0]][IBlock] = [model.blist_1[0].b_2] cls._components_no_descend[model.blist_1[0].b_2] = {} cls._components_no_descend[model.blist_1[0].b_2][IVariable] = [] cls._components_no_descend[model.blist_1[0].b_2][IBlock] = [] cls._components = ComponentMap() cls._components[model] = {} - cls._components[model][IVariable] = \ - [model.v_1, - model.vdict_1[None], - model.vlist_1[0], - model.vlist_1[1], - model.b_1.v_2, - model.b_1.b_2.v_3, - model.b_1.b_2.vlist_3[0], - model.blist_1[0].v_2] - cls._components[model][IBlock] = \ - [model.b_1, - model.blist_1[0], - model.b_1.b_2, - model.b_1.b_2.b_3, - model.blist_1[0].b_2] + cls._components[model][IVariable] = [ + model.v_1, + model.vdict_1[None], + model.vlist_1[0], + model.vlist_1[1], + model.b_1.v_2, + model.b_1.b_2.v_3, + model.b_1.b_2.vlist_3[0], + model.blist_1[0].v_2, + ] + cls._components[model][IBlock] = [ + model.b_1, + model.blist_1[0], + model.b_1.b_2, + model.b_1.b_2.b_3, + model.blist_1[0].b_2, + ] cls._components[model.b_1] = {} - cls._components[model.b_1][IVariable] = \ - [model.b_1.v_2, - model.b_1.b_2.v_3, - model.b_1.b_2.vlist_3[0]] - cls._components[model.b_1][IBlock] = \ - [model.b_1.b_2, - model.b_1.b_2.b_3] + cls._components[model.b_1][IVariable] = [ + model.b_1.v_2, + model.b_1.b_2.v_3, + model.b_1.b_2.vlist_3[0], + ] + cls._components[model.b_1][IBlock] = [model.b_1.b_2, model.b_1.b_2.b_3] cls._components[model.b_1.b_2] = {} - cls._components[model.b_1.b_2][IVariable] = \ - [model.b_1.b_2.v_3, - model.b_1.b_2.vlist_3[0]] - cls._components[model.b_1.b_2][IBlock] = \ - [model.b_1.b_2.b_3] + cls._components[model.b_1.b_2][IVariable] = [ + model.b_1.b_2.v_3, + model.b_1.b_2.vlist_3[0], + ] + cls._components[model.b_1.b_2][IBlock] = [model.b_1.b_2.b_3] cls._components[model.b_1.b_2.b_3] = {} cls._components[model.b_1.b_2.b_3][IVariable] = [] cls._components[model.b_1.b_2.b_3][IBlock] = [] cls._components[model.blist_1[0]] = {} - cls._components[model.blist_1[0]][IVariable] = \ - [model.blist_1[0].v_2] - cls._components[model.blist_1[0]][IBlock] = \ - [model.blist_1[0].b_2] + cls._components[model.blist_1[0]][IVariable] = [model.blist_1[0].v_2] + cls._components[model.blist_1[0]][IBlock] = [model.blist_1[0].b_2] cls._components[model.blist_1[0].b_2] = {} cls._components[model.blist_1[0].b_2][IVariable] = [] cls._components[model.blist_1[0].b_2][IBlock] = [] cls._blocks_no_descend = ComponentMap() for obj in cls._components_no_descend: - cls._blocks_no_descend[obj] = \ - [obj] + cls._components_no_descend[obj][IBlock] + cls._blocks_no_descend[obj] = [obj] + cls._components_no_descend[obj][ + IBlock + ] cls._blocks = ComponentMap() for obj in cls._components: - cls._blocks[obj] = \ - [obj] + cls._components[obj][IBlock] + cls._blocks[obj] = [obj] + cls._components[obj][IBlock] def test_init(self): b = block() @@ -1865,93 +2147,67 @@ def test_delattr(self): def test_collect_ctypes_small_block_storage(self): b = block() - self.assertEqual(b.collect_ctypes(active=None), - set()) - self.assertEqual(b.collect_ctypes(), - set()) - self.assertEqual(b.collect_ctypes(active=True), - set()) + self.assertEqual(b.collect_ctypes(active=None), set()) + self.assertEqual(b.collect_ctypes(), set()) + self.assertEqual(b.collect_ctypes(active=True), set()) b.x = variable() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable])) - self.assertEqual(b.collect_ctypes(), - set([IVariable])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable])) + self.assertEqual(b.collect_ctypes(), set([IVariable])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable])) b.y = constraint() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable, IConstraint])) - self.assertEqual(b.collect_ctypes(), - set([IVariable, IConstraint])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(), set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable, IConstraint])) b.y.deactivate() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable, IConstraint])) - self.assertEqual(b.collect_ctypes(), - set([IVariable])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(), set([IVariable])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable])) B = block() B.b = b - self.assertEqual(B.collect_ctypes(descend_into=False, - active=None), - set([IBlock])) - self.assertEqual(B.collect_ctypes(descend_into=False), - set([IBlock])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([IBlock])) - self.assertEqual(B.collect_ctypes(active=None), - set([IBlock, IVariable, IConstraint])) - self.assertEqual(B.collect_ctypes(), - set([IBlock, IVariable])) - self.assertEqual(B.collect_ctypes(active=True), - set([IBlock, IVariable])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=None), set([IBlock]) + ) + self.assertEqual(B.collect_ctypes(descend_into=False), set([IBlock])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=True), set([IBlock]) + ) + self.assertEqual( + B.collect_ctypes(active=None), set([IBlock, IVariable, IConstraint]) + ) + self.assertEqual(B.collect_ctypes(), set([IBlock, IVariable])) + self.assertEqual(B.collect_ctypes(active=True), set([IBlock, IVariable])) b.deactivate() - self.assertEqual(B.collect_ctypes(descend_into=False, - active=None), - set([IBlock])) - self.assertEqual(B.collect_ctypes(descend_into=False), - set([])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([])) - self.assertEqual(B.collect_ctypes(active=None), - set([IBlock, IVariable, IConstraint])) - self.assertEqual(B.collect_ctypes(), - set([])) - self.assertEqual(B.collect_ctypes(active=True), - set([])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=None), set([IBlock]) + ) + self.assertEqual(B.collect_ctypes(descend_into=False), set([])) + self.assertEqual(B.collect_ctypes(descend_into=False, active=True), set([])) + self.assertEqual( + B.collect_ctypes(active=None), set([IBlock, IVariable, IConstraint]) + ) + self.assertEqual(B.collect_ctypes(), set([])) + self.assertEqual(B.collect_ctypes(active=True), set([])) B.x = variable() - self.assertEqual(B.collect_ctypes(descend_into=False, - active=None), - set([IBlock, IVariable])) - self.assertEqual(B.collect_ctypes(descend_into=False), - set([IVariable])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([IVariable])) - self.assertEqual(B.collect_ctypes(active=None), - set([IBlock, IVariable, IConstraint])) - self.assertEqual(B.collect_ctypes(), - set([IVariable])) - self.assertEqual(B.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=None), set([IBlock, IVariable]) + ) + self.assertEqual(B.collect_ctypes(descend_into=False), set([IVariable])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=True), set([IVariable]) + ) + self.assertEqual( + B.collect_ctypes(active=None), set([IBlock, IVariable, IConstraint]) + ) + self.assertEqual(B.collect_ctypes(), set([IVariable])) + self.assertEqual(B.collect_ctypes(active=True), set([IVariable])) del b.y - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable])) - self.assertEqual(b.collect_ctypes(), - set([])) - self.assertEqual(b.collect_ctypes(active=True), - set([])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable])) + self.assertEqual(b.collect_ctypes(), set([])) + self.assertEqual(b.collect_ctypes(active=True), set([])) b.activate() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable])) - self.assertEqual(b.collect_ctypes(), - set([IVariable])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable])) + self.assertEqual(b.collect_ctypes(), set([IVariable])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable])) del b.x self.assertEqual(b.collect_ctypes(), set()) @@ -1959,116 +2215,97 @@ def test_collect_ctypes_small_block_storage(self): def test_collect_ctypes_large_block_storage(self): b = block() b._activate_large_storage_mode() - self.assertEqual(b.collect_ctypes(active=None), - set()) - self.assertEqual(b.collect_ctypes(), - set()) - self.assertEqual(b.collect_ctypes(active=True), - set()) + self.assertEqual(b.collect_ctypes(active=None), set()) + self.assertEqual(b.collect_ctypes(), set()) + self.assertEqual(b.collect_ctypes(active=True), set()) b.x = variable() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable])) - self.assertEqual(b.collect_ctypes(), - set([IVariable])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable])) + self.assertEqual(b.collect_ctypes(), set([IVariable])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable])) b.y = constraint() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable, IConstraint])) - self.assertEqual(b.collect_ctypes(), - set([IVariable, IConstraint])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(), set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable, IConstraint])) b.y.deactivate() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable, IConstraint])) - self.assertEqual(b.collect_ctypes(), - set([IVariable])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable, IConstraint])) + self.assertEqual(b.collect_ctypes(), set([IVariable])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable])) B = block() b._activate_large_storage_mode() B.b = b - self.assertEqual(B.collect_ctypes(descend_into=False, - active=None), - set([IBlock])) - self.assertEqual(B.collect_ctypes(descend_into=False), - set([IBlock])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([IBlock])) - self.assertEqual(B.collect_ctypes(active=None), - set([IBlock, IVariable, IConstraint])) - self.assertEqual(B.collect_ctypes(), - set([IBlock, IVariable])) - self.assertEqual(B.collect_ctypes(active=True), - set([IBlock, IVariable])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=None), set([IBlock]) + ) + self.assertEqual(B.collect_ctypes(descend_into=False), set([IBlock])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=True), set([IBlock]) + ) + self.assertEqual( + B.collect_ctypes(active=None), set([IBlock, IVariable, IConstraint]) + ) + self.assertEqual(B.collect_ctypes(), set([IBlock, IVariable])) + self.assertEqual(B.collect_ctypes(active=True), set([IBlock, IVariable])) b.deactivate() - self.assertEqual(B.collect_ctypes(descend_into=False, - active=None), - set([IBlock])) - self.assertEqual(B.collect_ctypes(descend_into=False), - set([])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([])) - self.assertEqual(B.collect_ctypes(active=None), - set([IBlock, IVariable, IConstraint])) - self.assertEqual(B.collect_ctypes(), - set([])) - self.assertEqual(B.collect_ctypes(active=True), - set([])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=None), set([IBlock]) + ) + self.assertEqual(B.collect_ctypes(descend_into=False), set([])) + self.assertEqual(B.collect_ctypes(descend_into=False, active=True), set([])) + self.assertEqual( + B.collect_ctypes(active=None), set([IBlock, IVariable, IConstraint]) + ) + self.assertEqual(B.collect_ctypes(), set([])) + self.assertEqual(B.collect_ctypes(active=True), set([])) B.x = variable() - self.assertEqual(B.collect_ctypes(descend_into=False, - active=None), - set([IBlock, IVariable])) - self.assertEqual(B.collect_ctypes(descend_into=False), - set([IVariable])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([IVariable])) - self.assertEqual(B.collect_ctypes(active=None), - set([IBlock, IVariable, IConstraint])) - self.assertEqual(B.collect_ctypes(), - set([IVariable])) - self.assertEqual(B.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=None), set([IBlock, IVariable]) + ) + self.assertEqual(B.collect_ctypes(descend_into=False), set([IVariable])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=True), set([IVariable]) + ) + self.assertEqual( + B.collect_ctypes(active=None), set([IBlock, IVariable, IConstraint]) + ) + self.assertEqual(B.collect_ctypes(), set([IVariable])) + self.assertEqual(B.collect_ctypes(active=True), set([IVariable])) del b.y - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable])) - self.assertEqual(b.collect_ctypes(), - set([])) - self.assertEqual(b.collect_ctypes(active=True), - set([])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable])) + self.assertEqual(b.collect_ctypes(), set([])) + self.assertEqual(b.collect_ctypes(active=True), set([])) b.activate() - self.assertEqual(b.collect_ctypes(active=None), - set([IVariable])) - self.assertEqual(b.collect_ctypes(), - set([IVariable])) - self.assertEqual(b.collect_ctypes(active=True), - set([IVariable])) + self.assertEqual(b.collect_ctypes(active=None), set([IVariable])) + self.assertEqual(b.collect_ctypes(), set([IVariable])) + self.assertEqual(b.collect_ctypes(active=True), set([IVariable])) del b.x self.assertEqual(b.collect_ctypes(), set()) + class Test_block_noclone(_Test_block, unittest.TestCase): _do_clone = False + class Test_block_clone(_Test_block, unittest.TestCase): _do_clone = True + class _MyBlockBaseBase(block): __slots__ = () + def __init__(self): super(_MyBlockBaseBase, self).__init__() + class _MyBlockBase(_MyBlockBaseBase): __slots__ = ("b",) + def __init__(self): super(_MyBlockBase, self).__init__() self.b = block() self.b.v = variable() + class _MyBlock(_MyBlockBase): # testing when a __dict__ might appear (no __slots__) def __init__(self): @@ -2078,8 +2315,8 @@ def __init__(self): self.v = variable() self.n = 2.0 -class _Test_small_block(_Test_block_base): +class _Test_small_block(_Test_block_base): _do_clone = None @classmethod @@ -2096,12 +2333,14 @@ def setUpClass(cls): # for tests in the base testing class # - cls._preorder = [model, - model.b, - model.b.v, - model.b.blist, - model.b.blist[0], - model.v] + cls._preorder = [ + model, + model.b, + model.b.v, + model.b.blist, + model.b.blist[0], + model.v, + ] cls._names = ComponentMap() cls._names[model.b] = "b" @@ -2111,10 +2350,8 @@ def setUpClass(cls): cls._names[model.v] = "v" cls._children = ComponentMap() - cls._children[model] = [model.b, - model.v] - cls._children[model.b] = [model.b.v, - model.b.blist] + cls._children[model] = [model.b, model.v] + cls._children[model.b] = [model.b.v, model.b.blist] cls._children[model.b.blist] = [model.b.blist[0]] cls._children[model.b.blist[0]] = [] @@ -2149,44 +2386,49 @@ def setUpClass(cls): cls._blocks_no_descend = ComponentMap() for obj in cls._components_no_descend: - cls._blocks_no_descend[obj] = \ - [obj] + cls._components_no_descend[obj][IBlock] + cls._blocks_no_descend[obj] = [obj] + cls._components_no_descend[obj][ + IBlock + ] cls._blocks = ComponentMap() for obj in cls._components: - cls._blocks[obj] = \ - [obj] + cls._components[obj][IBlock] + cls._blocks[obj] = [obj] + cls._components[obj][IBlock] # override this test method on the base class def test_collect_ctypes(self): - self.assertEqual(self._block.collect_ctypes(active=None), - set([IBlock, IVariable])) - self.assertEqual(self._block.collect_ctypes(), - set([IBlock, IVariable])) - self.assertEqual(self._block.collect_ctypes(active=True), - set([IBlock, IVariable])) - self.assertEqual(self._block.collect_ctypes(descend_into=False), - set([IBlock, IVariable])) - self.assertEqual(self._block.collect_ctypes(active=True, - descend_into=False), - set([IBlock, IVariable])) + self.assertEqual( + self._block.collect_ctypes(active=None), set([IBlock, IVariable]) + ) + self.assertEqual(self._block.collect_ctypes(), set([IBlock, IVariable])) + self.assertEqual( + self._block.collect_ctypes(active=True), set([IBlock, IVariable]) + ) + self.assertEqual( + self._block.collect_ctypes(descend_into=False), set([IBlock, IVariable]) + ) + self.assertEqual( + self._block.collect_ctypes(active=True, descend_into=False), + set([IBlock, IVariable]), + ) self._block.b.deactivate() try: - self.assertEqual(self._block.collect_ctypes(active=None), - set([IBlock, IVariable])) - self.assertEqual(self._block.collect_ctypes(), - set([IVariable])) - self.assertEqual(self._block.collect_ctypes(active=True), - set([IVariable])) - self.assertEqual(self._block.collect_ctypes(active=None, - descend_into=False), - set([IBlock, IVariable])) - self.assertEqual(self._block.collect_ctypes(descend_into=False), - set([IVariable])) - self.assertEqual(self._block.collect_ctypes(active=True, - descend_into=False), - set([IVariable])) + self.assertEqual( + self._block.collect_ctypes(active=None), set([IBlock, IVariable]) + ) + self.assertEqual(self._block.collect_ctypes(), set([IVariable])) + self.assertEqual(self._block.collect_ctypes(active=True), set([IVariable])) + self.assertEqual( + self._block.collect_ctypes(active=None, descend_into=False), + set([IBlock, IVariable]), + ) + self.assertEqual( + self._block.collect_ctypes(descend_into=False), set([IVariable]) + ) + self.assertEqual( + self._block.collect_ctypes(active=True, descend_into=False), + set([IVariable]), + ) finally: # use a finally block in case there is a failure above self._block.b.activate() @@ -2220,7 +2462,7 @@ def test_customblock_setattr(self): # test the edge case in setattr b.b = c self.assertIs(c.parent, b) - assert not hasattr(b,"g") + assert not hasattr(b, "g") with self.assertRaises(ValueError): b.g = b.b self.assertIs(b.b.parent, b) @@ -2257,37 +2499,29 @@ def test_customblock__with_dict_setattr(self): def test_inactive_behavior(self): b = _MyBlock() b.deactivate() - self.assertNotEqual(len(list(pmo.preorder_traversal(b, - active=None))), 0) + self.assertNotEqual(len(list(pmo.preorder_traversal(b, active=None))), 0) self.assertEqual(len(list(pmo.preorder_traversal(b))), 0) - self.assertEqual(len(list(pmo.preorder_traversal(b, - active=True))), 0) + self.assertEqual(len(list(pmo.preorder_traversal(b, active=True))), 0) def descend(x): return True + self.assertNotEqual( - len(list(pmo.preorder_traversal(b, - active=None, - descend=descend))), - 0) - self.assertEqual( - len(list(pmo.preorder_traversal(b, - descend=descend))), - 0) - self.assertEqual( - len(list(pmo.preorder_traversal(b, - active=True, - descend=descend))), - 0) + len(list(pmo.preorder_traversal(b, active=None, descend=descend))), 0 + ) + self.assertEqual(len(list(pmo.preorder_traversal(b, descend=descend))), 0) + self.assertEqual( + len(list(pmo.preorder_traversal(b, active=True, descend=descend))), 0 + ) + def descend(x): descend.seen.append(x) return x.active + descend.seen = [] self.assertEqual( - len(list(pmo.preorder_traversal(b, - active=None, - descend=descend))), - 1) + len(list(pmo.preorder_traversal(b, active=None, descend=descend))), 1 + ) self.assertEqual(len(descend.seen), 1) self.assertIs(descend.seen[0], b) @@ -2299,26 +2533,29 @@ def descend(x): self.assertEqual(len(list(pmo.generate_names(b))), 0) self.assertEqual(len(list(pmo.generate_names(b, active=True))), 0) + class Test_small_block_noclone(_Test_small_block, unittest.TestCase): _do_clone = False + class Test_small_block_clone(_Test_small_block, unittest.TestCase): _do_clone = True -class Test_block_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_block_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = block_dict _ctype_factory = lambda self: block() -class Test_block_tuple(_TestActiveTupleContainerBase, - unittest.TestCase): + +class Test_block_tuple(_TestActiveTupleContainerBase, unittest.TestCase): _container_type = block_tuple _ctype_factory = lambda self: block() -class Test_block_list(_TestActiveListContainerBase, - unittest.TestCase): + +class Test_block_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = block_list _ctype_factory = lambda self: block() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_component_map.py b/pyomo/core/tests/unit/kernel/test_component_map.py index e02e97a9793..64ba700895e 100644 --- a/pyomo/core/tests/unit/kernel/test_component_map.py +++ b/pyomo/core/tests/unit/kernel/test_component_map.py @@ -14,48 +14,38 @@ import pyomo.common.unittest as unittest from pyomo.common.collections import ComponentMap -from pyomo.core.kernel.variable import (variable, - variable_dict, - variable_list) -from pyomo.core.kernel.constraint import (constraint, - constraint_dict, - constraint_list) -from pyomo.core.kernel.objective import (objective, - objective_dict, - objective_list) -from pyomo.core.kernel.expression import (expression, - expression_dict, - expression_list) -from pyomo.core.kernel.block import (block, - block_dict, - block_list) +from pyomo.core.kernel.variable import variable, variable_dict, variable_list +from pyomo.core.kernel.constraint import constraint, constraint_dict, constraint_list +from pyomo.core.kernel.objective import objective, objective_dict, objective_list +from pyomo.core.kernel.expression import expression, expression_dict, expression_list +from pyomo.core.kernel.block import block, block_dict, block_list from pyomo.core.kernel.suffix import suffix class TestComponentMap(unittest.TestCase): - - _components = [(variable(), "v"), - (variable_dict(), "vdict"), - (variable_list(), "vlist"), - (constraint(), "c"), - (constraint_dict(), "cdict"), - (constraint_list(), "clist"), - (objective(), "o"), - (objective_dict(), "odict"), - (objective_list(), "olist"), - (expression(), "e"), - (expression_dict(), "edict"), - (expression_list(), "elist"), - (block(), "b"), - (block_dict(), "bdict"), - (block_list(), "blist"), - (suffix(), "s")] + _components = [ + (variable(), "v"), + (variable_dict(), "vdict"), + (variable_list(), "vlist"), + (constraint(), "c"), + (constraint_dict(), "cdict"), + (constraint_list(), "clist"), + (objective(), "o"), + (objective_dict(), "odict"), + (objective_list(), "olist"), + (expression(), "e"), + (expression_dict(), "edict"), + (expression_list(), "elist"), + (block(), "b"), + (block_dict(), "bdict"), + (block_list(), "blist"), + (suffix(), "s"), + ] def test_pickle(self): c = ComponentMap() self.assertEqual(len(c), 0) - cup = pickle.loads( - pickle.dumps(c)) + cup = pickle.loads(pickle.dumps(c)) self.assertIsNot(cup, c) self.assertEqual(len(cup), 0) @@ -63,8 +53,7 @@ def test_pickle(self): c[v] = 1.0 self.assertEqual(len(c), 1) self.assertEqual(c[v], 1.0) - cup = pickle.loads( - pickle.dumps(c)) + cup = pickle.loads(pickle.dumps(c)) vup = list(cup.keys())[0] self.assertIsNot(cup, c) self.assertIsNot(vup, v) @@ -81,8 +70,7 @@ def test_pickle(self): self.assertIs(v.parent, b.V) self.assertIs(V.parent, b) self.assertIs(b.parent, None) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) Vup = bup.V vup = Vup[0] cup = bup.c @@ -149,29 +137,30 @@ def test_iter(self): ids_seen = set() for c in cmap: ids_seen.add(id(c)) - self.assertEqual(ids_seen, - set(id(c) for c,val in self._components)) + self.assertEqual(ids_seen, set(id(c) for c, val in self._components)) def test_keys(self): cmap = ComponentMap(self._components) - self.assertEqual(sorted(cmap.keys(), key=id), - sorted(list(c for c,val in self._components), - key=id)) + self.assertEqual( + sorted(cmap.keys(), key=id), + sorted(list(c for c, val in self._components), key=id), + ) def test_values(self): cmap = ComponentMap(self._components) - self.assertEqual(sorted(cmap.values()), - sorted(list(val for c,val in self._components))) + self.assertEqual( + sorted(cmap.values()), sorted(list(val for c, val in self._components)) + ) def test_items(self): cmap = ComponentMap(self._components) for x in cmap.items(): self.assertEqual(type(x), tuple) self.assertEqual(len(x), 2) - self.assertEqual(sorted(cmap.items(), - key=lambda _x: (id(_x[0]), _x[1])), - sorted(self._components, - key=lambda _x: (id(_x[0]), _x[1]))) + self.assertEqual( + sorted(cmap.items(), key=lambda _x: (id(_x[0]), _x[1])), + sorted(self._components, key=lambda _x: (id(_x[0]), _x[1])), + ) def test_update(self): cmap = ComponentMap() @@ -191,7 +180,7 @@ def test_clear(self): def test_setdefault(self): cmap = ComponentMap() - for c,_ in self._components: + for c, _ in self._components: with self.assertRaises(KeyError): cmap[c] self.assertTrue(c not in cmap) diff --git a/pyomo/core/tests/unit/kernel/test_component_set.py b/pyomo/core/tests/unit/kernel/test_component_set.py index a5c09d65c84..10a7b27e59e 100644 --- a/pyomo/core/tests/unit/kernel/test_component_set.py +++ b/pyomo/core/tests/unit/kernel/test_component_set.py @@ -14,48 +14,38 @@ import pyomo.common.unittest as unittest from pyomo.common.collections import ComponentSet -from pyomo.core.kernel.variable import (variable, - variable_dict, - variable_list) -from pyomo.core.kernel.constraint import (constraint, - constraint_dict, - constraint_list) -from pyomo.core.kernel.objective import (objective, - objective_dict, - objective_list) -from pyomo.core.kernel.expression import (expression, - expression_dict, - expression_list) -from pyomo.core.kernel.block import (block, - block_dict, - block_list) +from pyomo.core.kernel.variable import variable, variable_dict, variable_list +from pyomo.core.kernel.constraint import constraint, constraint_dict, constraint_list +from pyomo.core.kernel.objective import objective, objective_dict, objective_list +from pyomo.core.kernel.expression import expression, expression_dict, expression_list +from pyomo.core.kernel.block import block, block_dict, block_list from pyomo.core.kernel.suffix import suffix class TestComponentSet(unittest.TestCase): - - _components = [variable(), - variable_dict(), - variable_list(), - constraint(), - constraint_dict(), - constraint_list(), - objective(), - objective_dict(), - objective_list(), - expression(), - expression_dict(), - expression_list(), - block(), - block_dict(), - block_list(), - suffix()] + _components = [ + variable(), + variable_dict(), + variable_list(), + constraint(), + constraint_dict(), + constraint_list(), + objective(), + objective_dict(), + objective_list(), + expression(), + expression_dict(), + expression_list(), + block(), + block_dict(), + block_list(), + suffix(), + ] def test_pickle(self): c = ComponentSet() self.assertEqual(len(c), 0) - cup = pickle.loads( - pickle.dumps(c)) + cup = pickle.loads(pickle.dumps(c)) self.assertIsNot(cup, c) self.assertEqual(len(cup), 0) @@ -63,8 +53,7 @@ def test_pickle(self): c.add(v) self.assertEqual(len(c), 1) self.assertTrue(v in c) - cup = pickle.loads( - pickle.dumps(c)) + cup = pickle.loads(pickle.dumps(c)) vup = cup.pop() cup.add(vup) self.assertIsNot(cup, c) @@ -82,8 +71,7 @@ def test_pickle(self): self.assertIs(v.parent, b.V) self.assertIs(V.parent, b) self.assertIs(b.parent, None) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) Vup = bup.V vup = Vup[0] cup = bup.c @@ -135,8 +123,7 @@ def test_iter(self): ids_seen = set() for c in cset: ids_seen.add(id(c)) - self.assertEqual(ids_seen, - set(id(c) for c in self._components)) + self.assertEqual(ids_seen, set(id(c) for c in self._components)) def set_add(self): cset = ComponentSet() @@ -145,7 +132,7 @@ def set_add(self): self.assertTrue(c not in cset) cset.add(c) self.assertTrue(c in cset) - self.assertEqual(len(cset), i+1) + self.assertEqual(len(cset), i + 1) self.assertEqual(len(cset), len(self._components)) for c in self._components: self.assertTrue(c in cset) @@ -190,7 +177,7 @@ def test_remove(self): self.assertEqual(len(cset), len(self._components)) for i, c in enumerate(self._components): cset.remove(c) - self.assertEqual(len(cset), len(self._components)-(i+1)) + self.assertEqual(len(cset), len(self._components) - (i + 1)) for c in self._components: self.assertTrue(c not in cset) with self.assertRaises(KeyError): @@ -203,7 +190,7 @@ def test_discard(self): self.assertEqual(len(cset), len(self._components)) for i, c in enumerate(self._components): cset.discard(c) - self.assertEqual(len(cset), len(self._components)-(i+1)) + self.assertEqual(len(cset), len(self._components) - (i + 1)) for c in self._components: self.assertTrue(c not in cset) cset.discard(c) @@ -226,7 +213,7 @@ def test_misc_set_ops(self): cset1 = ComponentSet([v1]) v2 = variable() cset2 = ComponentSet([v2]) - cset3 = ComponentSet([v1,v2]) + cset3 = ComponentSet([v1, v2]) empty = ComponentSet([]) self.assertEqual(cset1 | cset2, cset3) self.assertEqual((cset1 | cset2) - cset3, empty) diff --git a/pyomo/core/tests/unit/kernel/test_conic.py b/pyomo/core/tests/unit/kernel/test_conic.py index e7230f17142..e7416210b8a 100644 --- a/pyomo/core/tests/unit/kernel/test_conic.py +++ b/pyomo/core/tests/unit/kernel/test_conic.py @@ -15,28 +15,30 @@ import pyomo.common.unittest as unittest from pyomo.kernel import pprint, IntegerSet from pyomo.core.kernel.base import ICategorizedObject -from pyomo.core.kernel.constraint import (IConstraint, - linear_constraint, - constraint, - constraint_dict, - constraint_tuple, - constraint_list) -from pyomo.core.kernel.variable import (variable, - variable_tuple) +from pyomo.core.kernel.constraint import ( + IConstraint, + linear_constraint, + constraint, + constraint_dict, + constraint_tuple, + constraint_list, +) +from pyomo.core.kernel.variable import variable, variable_tuple from pyomo.core.kernel.block import block from pyomo.core.kernel.parameter import parameter -from pyomo.core.kernel.expression import (expression, - data_expression) -from pyomo.core.kernel.conic import (_build_linking_constraints, - quadratic, - rotated_quadratic, - primal_exponential, - primal_power, - dual_exponential, - dual_power) +from pyomo.core.kernel.expression import expression, data_expression +from pyomo.core.kernel.conic import ( + _build_linking_constraints, + quadratic, + rotated_quadratic, + primal_exponential, + primal_power, + dual_exponential, + dual_power, +) -class _conic_tester_base(object): +class _conic_tester_base(object): _object_factory = None def setUp(self): @@ -74,8 +76,7 @@ def test_pickle(self): self.assertEqual(c.ub, 0) self.assertIsNot(c.body, None) self.assertIs(c.parent, None) - cup = pickle.loads( - pickle.dumps(c)) + cup = pickle.loads(pickle.dumps(c)) self.assertIs(cup.lb, None) self.assertEqual(cup.ub, 0) self.assertIsNot(cup.body, None) @@ -83,8 +84,7 @@ def test_pickle(self): b = block() b.c = c self.assertIs(c.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) cup = bup.c self.assertIs(cup.lb, None) self.assertEqual(cup.ub, 0) @@ -100,12 +100,9 @@ def test_properties(self): self.assertEqual(c.has_ub(), True) self.assertEqual(c.ub, 0) self.assertEqual(c.equality, False) - self.assertEqual(c.check_convexity_conditions(), - True) - self.assertEqual(c.check_convexity_conditions(relax=False), - True) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), True) + self.assertEqual(c.check_convexity_conditions(relax=False), True) + self.assertEqual(c.check_convexity_conditions(relax=True), True) with self.assertRaises(AttributeError): c.lb = 1 @@ -125,12 +122,9 @@ def test_properties(self): self.assertEqual(c.has_ub(), True) self.assertEqual(c.ub, 0) self.assertEqual(c.equality, False) - self.assertEqual(c.check_convexity_conditions(), - True) - self.assertEqual(c.check_convexity_conditions(relax=False), - True) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), True) + self.assertEqual(c.check_convexity_conditions(relax=False), True) + self.assertEqual(c.check_convexity_conditions(relax=True), True) self.assertEqual(c.active, True) with self.assertRaises(AttributeError): @@ -157,13 +151,11 @@ def test_containers(self): ctuple = constraint_tuple((c,)) self.assertIs(c.parent, ctuple) -class Test_quadratic(_conic_tester_base, - unittest.TestCase): +class Test_quadratic(_conic_tester_base, unittest.TestCase): _object_factory = lambda self: quadratic( - r=variable(lb=0), - x=[variable(), - variable()]) + r=variable(lb=0), x=[variable(), variable()] + ) def test_expression(self): c = self._object_factory() @@ -197,36 +189,28 @@ def test_expression(self): def test_check_convexity_conditions(self): c = self._object_factory() - self.assertEqual(c.check_convexity_conditions(), - True) + self.assertEqual(c.check_convexity_conditions(), True) c = self._object_factory() c.r.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x[0].domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) def test_as_domain(self): - ret = quadratic.as_domain( - r=3,x=[1,2]) + ret = quadratic.as_domain(r=3, x=[1, 2]) self.assertIs(type(ret), block) - q,c,r,x = ret.q,ret.c,ret.r,ret.x + q, c, r, x = ret.q, ret.c, ret.r, ret.x self.assertEqual(q.check_convexity_conditions(), True) self.assertIs(type(q), quadratic) self.assertIs(type(x), variable_tuple) @@ -247,14 +231,11 @@ def test_as_domain(self): self.assertEqual(c[2].slack, 0) x[1].value = None -class Test_rotated_quadratic(_conic_tester_base, - unittest.TestCase): +class Test_rotated_quadratic(_conic_tester_base, unittest.TestCase): _object_factory = lambda self: rotated_quadratic( - r1=variable(lb=0), - r2=variable(lb=0), - x=[variable(), - variable()]) + r1=variable(lb=0), r2=variable(lb=0), x=[variable(), variable()] + ) def test_expression(self): c = self._object_factory() @@ -273,7 +254,7 @@ def test_expression(self): c.r2.value = 7 c.x[0].value = 2 c.x[1].value = 3 - val = 2**2 + 3**2 - 2*5*7 + val = 2**2 + 3**2 - 2 * 5 * 7 self.assertEqual(c(), val) self.assertEqual(c.slack, -val) self.assertEqual(c.lslack, float('inf')) @@ -289,51 +270,39 @@ def test_expression(self): def test_check_convexity_conditions(self): c = self._object_factory() - self.assertEqual(c.check_convexity_conditions(), - True) + self.assertEqual(c.check_convexity_conditions(), True) c = self._object_factory() c.r1.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r1.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r1.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r2.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r2.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r2.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x[0].domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) def test_as_domain(self): - ret = rotated_quadratic.as_domain( - r1=3,r2=4,x=[1,2]) + ret = rotated_quadratic.as_domain(r1=3, r2=4, x=[1, 2]) self.assertIs(type(ret), block) - q,c,r1,r2,x = ret.q,ret.c,ret.r1,ret.r2,ret.x + q, c, r1, r2, x = ret.q, ret.c, ret.r1, ret.r2, ret.x self.assertEqual(q.check_convexity_conditions(), True) self.assertIs(type(q), rotated_quadratic) self.assertIs(type(x), variable_tuple) @@ -359,13 +328,11 @@ def test_as_domain(self): self.assertEqual(c[3].slack, 0) x[1].value = None -class Test_primal_exponential(_conic_tester_base, - unittest.TestCase): +class Test_primal_exponential(_conic_tester_base, unittest.TestCase): _object_factory = lambda self: primal_exponential( - r=variable(lb=0), - x1=variable(lb=0), - x2=variable()) + r=variable(lb=0), x1=variable(lb=0), x2=variable() + ) def test_expression(self): c = self._object_factory() @@ -383,67 +350,55 @@ def test_expression(self): c.r.value = 8 c.x1.value = 1.1 c.x2.value = 2.3 - val = round(1.1*math.exp(2.3/1.1) - 8, 9) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + val = round(1.1 * math.exp(2.3 / 1.1) - 8, 9) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIs(c._body, None) # check body - self.assertEqual(round(c.body(),9), val) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + self.assertEqual(round(c.body(), 9), val) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIsNot(c._body, None) def test_check_convexity_conditions(self): c = self._object_factory() - self.assertEqual(c.check_convexity_conditions(), - True) + self.assertEqual(c.check_convexity_conditions(), True) c = self._object_factory() c.r.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x1.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.x1.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x1.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x2.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) def test_as_domain(self): - ret = primal_exponential.as_domain( - r=3,x1=1,x2=2) + ret = primal_exponential.as_domain(r=3, x1=1, x2=2) self.assertIs(type(ret), block) - q,c,r,x1,x2 = ret.q,ret.c,ret.r,ret.x1,ret.x2 + q, c, r, x1, x2 = ret.q, ret.c, ret.r, ret.x1, ret.x2 self.assertEqual(q.check_convexity_conditions(), True) self.assertIs(type(q), primal_exponential) self.assertIs(type(r), variable) @@ -464,43 +419,42 @@ def test_as_domain(self): self.assertEqual(c[2].slack, 0) x2.value = None -class Test_primal_power(_conic_tester_base, - unittest.TestCase): +class Test_primal_power(_conic_tester_base, unittest.TestCase): _object_factory = lambda self: primal_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=parameter(value=0.4)) + x=[variable(), variable()], + alpha=parameter(value=0.4), + ) def test_bad_alpha_type(self): c = primal_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=parameter()) + x=[variable(), variable()], + alpha=parameter(), + ) c = primal_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=data_expression()) + x=[variable(), variable()], + alpha=data_expression(), + ) with self.assertRaises(TypeError): c = primal_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=variable()) + x=[variable(), variable()], + alpha=variable(), + ) with self.assertRaises(TypeError): c = primal_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=expression()) + x=[variable(), variable()], + alpha=expression(), + ) def test_expression(self): c = self._object_factory() @@ -519,77 +473,62 @@ def test_expression(self): c.r2.value = 3.4 c.x[0].value = 1.1 c.x[1].value = -2.3 - val = round((1.1**2 + (-2.3)**2)**0.5 - \ - (5.9**0.4)*(3.4**0.6), 9) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + val = round((1.1**2 + (-2.3) ** 2) ** 0.5 - (5.9**0.4) * (3.4**0.6), 9) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIs(c._body, None) # check body - self.assertEqual(round(c.body(),9), val) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + self.assertEqual(round(c.body(), 9), val) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIsNot(c._body, None) def test_check_convexity_conditions(self): c = self._object_factory() - self.assertEqual(c.check_convexity_conditions(), - True) + self.assertEqual(c.check_convexity_conditions(), True) c = self._object_factory() c.r1.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r1.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r1.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r2.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r2.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r2.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x[0].domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.alpha.value = 0 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.alpha.value = 1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) def test_as_domain(self): - ret = primal_power.as_domain( - r1=3,r2=4,x=[1,2],alpha=0.5) + ret = primal_power.as_domain(r1=3, r2=4, x=[1, 2], alpha=0.5) self.assertIs(type(ret), block) - q,c,r1,r2,x = ret.q,ret.c,ret.r1,ret.r2,ret.x + q, c, r1, r2, x = ret.q, ret.c, ret.r1, ret.r2, ret.x self.assertEqual(q.check_convexity_conditions(), True) self.assertIs(type(q), primal_power) self.assertIs(type(r1), variable) @@ -615,13 +554,11 @@ def test_as_domain(self): self.assertEqual(c[3].slack, 0) x[1].value = None -class Test_dual_exponential(_conic_tester_base, - unittest.TestCase): +class Test_dual_exponential(_conic_tester_base, unittest.TestCase): _object_factory = lambda self: dual_exponential( - r=variable(lb=0), - x1=variable(), - x2=variable(ub=0)) + r=variable(lb=0), x1=variable(), x2=variable(ub=0) + ) def test_expression(self): c = self._object_factory() @@ -639,67 +576,55 @@ def test_expression(self): c.r.value = 2.7 c.x1.value = 1.2 c.x2.value = -5.3 - val = round(-(-5.3/math.e)*math.exp(1.2/-5.3) - 2.7, 9) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + val = round(-(-5.3 / math.e) * math.exp(1.2 / -5.3) - 2.7, 9) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIs(c._body, None) # check body - self.assertEqual(round(c.body(),9), val) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + self.assertEqual(round(c.body(), 9), val) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIsNot(c._body, None) def test_check_convexity_conditions(self): c = self._object_factory() - self.assertEqual(c.check_convexity_conditions(), - True) + self.assertEqual(c.check_convexity_conditions(), True) c = self._object_factory() c.r.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x1.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.x2.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.x2.ub = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x2.ub = 1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) def test_as_domain(self): - ret = dual_exponential.as_domain( - r=3,x1=1,x2=2) + ret = dual_exponential.as_domain(r=3, x1=1, x2=2) self.assertIs(type(ret), block) - q,c,r,x1,x2 = ret.q,ret.c,ret.r,ret.x1,ret.x2 + q, c, r, x1, x2 = ret.q, ret.c, ret.r, ret.x1, ret.x2 self.assertEqual(q.check_convexity_conditions(), True) self.assertIs(type(q), dual_exponential) self.assertIs(type(x1), variable) @@ -720,43 +645,42 @@ def test_as_domain(self): self.assertEqual(c[2].slack, 0) x2.value = None -class Test_dual_power(_conic_tester_base, - unittest.TestCase): +class Test_dual_power(_conic_tester_base, unittest.TestCase): _object_factory = lambda self: dual_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=parameter(value=0.4)) + x=[variable(), variable()], + alpha=parameter(value=0.4), + ) def test_bad_alpha_type(self): c = dual_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=parameter()) + x=[variable(), variable()], + alpha=parameter(), + ) c = dual_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=data_expression()) + x=[variable(), variable()], + alpha=data_expression(), + ) with self.assertRaises(TypeError): c = dual_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=variable()) + x=[variable(), variable()], + alpha=variable(), + ) with self.assertRaises(TypeError): c = dual_power( r1=variable(lb=0), r2=variable(lb=0), - x=[variable(), - variable()], - alpha=expression()) + x=[variable(), variable()], + alpha=expression(), + ) def test_expression(self): c = self._object_factory() @@ -775,78 +699,66 @@ def test_expression(self): c.r2.value = 3.7 c.x[0].value = 1.2 c.x[1].value = -5.3 - val = round((1.2**2 + (-5.3)**2)**0.5 - \ - ((2.7/0.4)**0.4) * \ - ((3.7/0.6)**0.6), 9) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + val = round( + (1.2**2 + (-5.3) ** 2) ** 0.5 + - ((2.7 / 0.4) ** 0.4) * ((3.7 / 0.6) ** 0.6), + 9, + ) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIs(c._body, None) # check body - self.assertEqual(round(c.body(),9), val) - self.assertEqual(round(c(),9), val) - self.assertEqual(round(c.slack,9), -val) + self.assertEqual(round(c.body(), 9), val) + self.assertEqual(round(c(), 9), val) + self.assertEqual(round(c.slack, 9), -val) self.assertEqual(c.lslack, float('inf')) - self.assertEqual(round(c.uslack,9), -val) + self.assertEqual(round(c.uslack, 9), -val) self.assertIsNot(c._body, None) def test_check_convexity_conditions(self): c = self._object_factory() - self.assertEqual(c.check_convexity_conditions(), - True) + self.assertEqual(c.check_convexity_conditions(), True) c = self._object_factory() c.r1.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r1.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r1.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r2.domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.r2.lb = None - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.r2.lb = -1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.x[0].domain_type = IntegerSet - self.assertEqual(c.check_convexity_conditions(), - False) - self.assertEqual(c.check_convexity_conditions(relax=True), - True) + self.assertEqual(c.check_convexity_conditions(), False) + self.assertEqual(c.check_convexity_conditions(relax=True), True) c = self._object_factory() c.alpha.value = 0 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) c = self._object_factory() c.alpha.value = 1 - self.assertEqual(c.check_convexity_conditions(), - False) + self.assertEqual(c.check_convexity_conditions(), False) def test_as_domain(self): - ret = dual_power.as_domain( - r1=3,r2=4,x=[1,2],alpha=0.5) + ret = dual_power.as_domain(r1=3, r2=4, x=[1, 2], alpha=0.5) self.assertIs(type(ret), block) - q,c,r1,r2,x = ret.q,ret.c,ret.r1,ret.r2,ret.x + q, c, r1, r2, x = ret.q, ret.c, ret.r1, ret.r2, ret.x self.assertEqual(q.check_convexity_conditions(), True) self.assertIs(type(q), dual_power) self.assertIs(type(r1), variable) @@ -872,23 +784,17 @@ def test_as_domain(self): self.assertEqual(c[3].slack, 0) x[1].value = None -class TestMisc(unittest.TestCase): +class TestMisc(unittest.TestCase): def test_build_linking_constraints(self): - c = _build_linking_constraints([],[]) + c = _build_linking_constraints([], []) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 0) - c = _build_linking_constraints([None],[variable()]) + c = _build_linking_constraints([None], [variable()]) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 0) - v = [1, - data_expression(), - variable(), - expression(expr=1.0)] - vaux = [variable(), - variable(), - variable(), - variable()] + v = [1, data_expression(), variable(), expression(expr=1.0)] + vaux = [variable(), variable(), variable(), variable()] c = _build_linking_constraints(v, vaux) self.assertIs(type(c), constraint_tuple) self.assertEqual(len(c), 4) @@ -912,11 +818,13 @@ def test_build_linking_constraints(self): self.assertIs(type(c[3]), constraint) self.assertEqual(c[3].rhs, 0) from pyomo.repn import generate_standard_repn + repn = generate_standard_repn(c[3].body) self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], vaux[3]) self.assertEqual(repn.linear_coefs[0], 1) self.assertEqual(repn.constant, -1) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_constraint.py b/pyomo/core/tests/unit/kernel/test_constraint.py index 2a2273cd3fa..f3ddfe50697 100644 --- a/pyomo/core/tests/unit/kernel/test_constraint.py +++ b/pyomo/core/tests/unit/kernel/test_constraint.py @@ -12,29 +12,33 @@ import pickle import pyomo.common.unittest as unittest -from pyomo.core.expr import logical_expr +from pyomo.core.expr import inequality, RangedExpression, EqualityExpression from pyomo.kernel import pprint -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_tuple_container import \ - _TestActiveTupleContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_tuple_container import ( + _TestActiveTupleContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) from pyomo.core.kernel.base import ICategorizedObject -from pyomo.core.kernel.constraint import (IConstraint, - constraint, - linear_constraint, - constraint_dict, - constraint_tuple, - constraint_list) +from pyomo.core.kernel.constraint import ( + IConstraint, + constraint, + linear_constraint, + constraint_dict, + constraint_tuple, + constraint_list, +) from pyomo.core.kernel.variable import variable from pyomo.core.kernel.parameter import parameter -from pyomo.core.kernel.expression import (expression, - data_expression) +from pyomo.core.kernel.expression import expression, data_expression from pyomo.core.kernel.block import block -class Test_constraint(unittest.TestCase): +class Test_constraint(unittest.TestCase): def test_pprint(self): # Not really testing what the output is, just that # an error does not occur. The pprint functionality @@ -64,8 +68,7 @@ def test_pickle(self): self.assertIs(c.body, None) self.assertIs(c.ub, None) self.assertEqual(c.parent, None) - cup = pickle.loads( - pickle.dumps(c)) + cup = pickle.loads(pickle.dumps(c)) self.assertEqual(cup.lb, None) self.assertEqual(cup.body, None) self.assertEqual(cup.ub, None) @@ -73,8 +76,7 @@ def test_pickle(self): b = block() b.c = c self.assertIs(c.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) cup = bup.c self.assertEqual(cup.lb, None) self.assertEqual(cup.body, None) @@ -240,31 +242,31 @@ def test_bounds_getter_setter(self): self.assertEqual(c.lb, None) self.assertEqual(c.ub, None) - c.bounds = (1,2) - self.assertEqual(c.bounds, (1,2)) + c.bounds = (1, 2) + self.assertEqual(c.bounds, (1, 2)) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 2) c.rhs = 3 - self.assertEqual(c.bounds, (3,3)) + self.assertEqual(c.bounds, (3, 3)) self.assertEqual(c.lb, 3) self.assertEqual(c.ub, 3) self.assertEqual(c.rhs, 3) with self.assertRaises(ValueError): - c.bounds = (3,3) - self.assertEqual(c.bounds, (3,3)) + c.bounds = (3, 3) + self.assertEqual(c.bounds, (3, 3)) self.assertEqual(c.lb, 3) self.assertEqual(c.ub, 3) self.assertEqual(c.rhs, 3) with self.assertRaises(ValueError): - c.bounds = (2,2) - self.assertEqual(c.bounds, (3,3)) + c.bounds = (2, 2) + self.assertEqual(c.bounds, (3, 3)) self.assertEqual(c.lb, 3) self.assertEqual(c.ub, 3) self.assertEqual(c.rhs, 3) with self.assertRaises(ValueError): - c.bounds = (1,2) - self.assertEqual(c.bounds, (3,3)) + c.bounds = (1, 2) + self.assertEqual(c.bounds, (3, 3)) self.assertEqual(c.lb, 3) self.assertEqual(c.ub, 3) self.assertEqual(c.rhs, 3) @@ -475,19 +477,19 @@ def test_nondata_bounds(self): vL.value = 2 vU.value = 1 - c.expr = (vL <= vU) + c.expr = vL <= vU self.assertEqual(c.lb, None) self.assertEqual(c.body(), 1) self.assertEqual(c.ub, 0) - c.expr = (vU >= vL) + c.expr = vU >= vL self.assertEqual(c.lb, None) self.assertEqual(c.body(), 1) self.assertEqual(c.ub, 0) - c.expr = (vU <= vL) + c.expr = vU <= vL self.assertEqual(c.lb, None) self.assertEqual(c.body(), -1) self.assertEqual(c.ub, 0) - c.expr = (vL >= vU) + c.expr = vL >= vU self.assertEqual(c.lb, None) self.assertEqual(c.body(), -1) self.assertEqual(c.ub, 0) @@ -518,12 +520,12 @@ def test_fixed_variable_stays_in_body(self): x.free() x.value = 1 - c.expr = (0 == x) + c.expr = 0 == x self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) self.assertEqual(c.ub, 0) - c.expr = (x == 0) + c.expr = x == 0 self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) @@ -532,12 +534,12 @@ def test_fixed_variable_stays_in_body(self): # ensure the variable is not moved into the upper or # lower bound expression (this used to be a bug) x.fix() - c.expr = (0 == x) + c.expr = 0 == x self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) self.assertEqual(c.ub, 0) - c.expr = (x == 0) + c.expr = x == 0 self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) @@ -546,14 +548,14 @@ def test_fixed_variable_stays_in_body(self): # ensure the variable is not moved into the upper or # lower bound expression (this used to be a bug) x.free() - c.expr = (0 == x) + c.expr = 0 == x x.fix() self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) self.assertEqual(c.ub, 0) x.free() - c.expr = (x == 0) + c.expr = x == 0 x.fix() self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) @@ -616,7 +618,7 @@ def test_mutable_novalue_param_lower_bound(self): c = constraint(expr=p + 1 <= x) self.assertEqual(c.equality, False) - c = constraint(expr=(p + 1)**2 <= x) + c = constraint(expr=(p + 1) ** 2 <= x) self.assertEqual(c.equality, False) c = constraint(expr=(p, x, p + 1)) @@ -635,7 +637,7 @@ def test_mutable_novalue_param_lower_bound(self): c = constraint(expr=x >= p + 1) self.assertEqual(c.equality, False) - c = constraint(expr=x >= (p + 1)**2) + c = constraint(expr=x >= (p + 1) ** 2) self.assertEqual(c.equality, False) c = constraint(expr=(p, x, None)) @@ -672,7 +674,7 @@ def test_mutable_novalue_param_upper_bound(self): c = constraint(expr=x <= p + 1) self.assertEqual(c.equality, False) - c = constraint(expr=x <= (p + 1)**2) + c = constraint(expr=x <= (p + 1) ** 2) self.assertEqual(c.equality, False) c = constraint(expr=(p + 1, x, p)) @@ -691,7 +693,7 @@ def test_mutable_novalue_param_upper_bound(self): c = constraint(expr=p + 1 >= x) self.assertEqual(c.equality, False) - c = constraint(expr=(p + 1)**2 >= x) + c = constraint(expr=(p + 1) ** 2 >= x) self.assertEqual(c.equality, False) c = constraint(expr=(None, x, p)) @@ -725,7 +727,7 @@ def test_mutable_novalue_param_equality(self): c = constraint(expr=x + 1 == p) self.assertEqual(c.equality, True) - c = constraint(expr=x + 1 == (p + 1)**2) + c = constraint(expr=x + 1 == (p + 1) ** 2) self.assertEqual(c.equality, True) c = constraint(expr=x == p + 1) @@ -745,14 +747,14 @@ def test_mutable_novalue_param_equality(self): self.assertIs(c.body, x) self.assertEqual(c.equality, True) - c = constraint(expr=logical_expr.EqualityExpression((p, x))) + c = constraint(expr=EqualityExpression((p, x))) self.assertIs(c.upper, p) self.assertIs(c.lower, p) self.assertIs(c.rhs, p) self.assertIs(c.body, x) self.assertEqual(c.equality, True) - c = constraint(expr=logical_expr.EqualityExpression((x, p))) + c = constraint(expr=EqualityExpression((x, p))) self.assertIs(c.upper, p) self.assertIs(c.lower, p) self.assertIs(c.rhs, p) @@ -809,7 +811,7 @@ def test_tuple_construct_1sided_inequality(self): c = constraint((0, y, None)) self.assertEqual(c.equality, False) self.assertEqual(c.lb, 0) - self.assertIs (c.body, y) + self.assertIs(c.body, y) self.assertIs(c.ub, None) def test_tuple_construct_1sided_inf_inequality(self): @@ -879,10 +881,10 @@ def test_tuple_construct_invalid_2sided_inequality(self): y = variable() z = variable() with self.assertRaises(ValueError): - constraint(logical_expr.RangedExpression((x, y, 1), (False, False))) + constraint(RangedExpression((x, y, 1), (False, False))) with self.assertRaises(ValueError): - constraint(logical_expr.RangedExpression((0, y, z), (False, False))) + constraint(RangedExpression((0, y, z), (False, False))) def test_expr_construct_equality(self): x = variable(value=1) @@ -908,13 +910,13 @@ def test_expr_construct_equality(self): self.assertEqual(c.ub, 0) c = constraint() - c.expr = (x == float('inf')) + c.expr = x == float('inf') self.assertEqual(c.equality, True) self.assertEqual(c.lb, float('inf')) self.assertEqual(c.ub, float('inf')) self.assertEqual(c.rhs, float('inf')) self.assertIs(c.body, x) - c.expr = (float('inf') == x) + c.expr = float('inf') == x self.assertEqual(c.equality, True) self.assertEqual(c.lb, float('inf')) self.assertEqual(c.ub, float('inf')) @@ -926,36 +928,36 @@ def test_strict_inequality_failure(self): y = variable() c = constraint() with self.assertRaises(ValueError): - c.expr = (x < 0) + c.expr = x < 0 with self.assertRaises(ValueError): - c.expr = logical_expr.inequality(body=x, upper=0, strict=True) - c.expr = (x <= 0) - c.expr = logical_expr.inequality(body=x, upper=0, strict=False) + c.expr = inequality(body=x, upper=0, strict=True) + c.expr = x <= 0 + c.expr = inequality(body=x, upper=0, strict=False) with self.assertRaises(ValueError): - c.expr = (x > 0) + c.expr = x > 0 with self.assertRaises(ValueError): - c.expr = logical_expr.inequality(body=x, lower=0, strict=True) - c.expr = (x >= 0) - c.expr = logical_expr.inequality(body=x, lower=0, strict=False) + c.expr = inequality(body=x, lower=0, strict=True) + c.expr = x >= 0 + c.expr = inequality(body=x, lower=0, strict=False) with self.assertRaises(ValueError): - c.expr = (x < y) + c.expr = x < y with self.assertRaises(ValueError): - c.expr = logical_expr.inequality(body=x, upper=y, strict=True) - c.expr = (x <= y) - c.expr = logical_expr.inequality(body=x, upper=y, strict=False) + c.expr = inequality(body=x, upper=y, strict=True) + c.expr = x <= y + c.expr = inequality(body=x, upper=y, strict=False) with self.assertRaises(ValueError): - c.expr = (x > y) + c.expr = x > y with self.assertRaises(ValueError): - c.expr = logical_expr.inequality(body=x, lower=y, strict=True) - c.expr = (x >= y) - c.expr = logical_expr.inequality(body=x, lower=y, strict=False) + c.expr = inequality(body=x, lower=y, strict=True) + c.expr = x >= y + c.expr = inequality(body=x, lower=y, strict=False) with self.assertRaises(ValueError): - c.expr = logical_expr.RangedExpression((0, x, 1), (True, True)) + c.expr = RangedExpression((0, x, 1), (True, True)) with self.assertRaises(ValueError): - c.expr = logical_expr.RangedExpression((0, x, 1), (False, True)) + c.expr = RangedExpression((0, x, 1), (False, True)) with self.assertRaises(ValueError): - c.expr = logical_expr.RangedExpression((0, x, 1), (True, False)) - c.expr = logical_expr.RangedExpression((0, x, 1), (False, False)) + c.expr = RangedExpression((0, x, 1), (True, False)) + c.expr = RangedExpression((0, x, 1), (False, False)) def test_expr_construct_inf_equality(self): x = variable() @@ -1071,18 +1073,18 @@ def test_expr_invalid_double_sided_inequality(self): self.assertEqual(c.ub, 1) self.assertEqual(c.equality, False) with self.assertRaises(ValueError): - c.expr = (y, x-y, 0) + c.expr = (y, x - y, 0) def test_equality_infinite(self): c = constraint() v = variable() - c.expr = (v == 1) + c.expr = v == 1 self.assertEqual(c.equality, True) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) self.assertEqual(c.rhs, 1) self.assertIs(c.body, v) - c.expr = (v == float('inf')) + c.expr = v == float('inf') self.assertEqual(c.equality, True) self.assertEqual(c.lb, float('inf')) self.assertEqual(c.ub, float('inf')) @@ -1094,7 +1096,7 @@ def test_equality_infinite(self): self.assertEqual(c.ub, float('inf')) self.assertEqual(c.rhs, float('inf')) self.assertIs(c.body, v) - c.expr = (float('inf') == v) + c.expr = float('inf') == v self.assertEqual(c.equality, True) self.assertEqual(c.lb, float('inf')) self.assertEqual(c.ub, float('inf')) @@ -1106,7 +1108,7 @@ def test_equality_infinite(self): self.assertEqual(c.ub, float('inf')) self.assertEqual(c.rhs, float('inf')) self.assertIs(c.body, v) - c.expr = (v == float('-inf')) + c.expr = v == float('-inf') self.assertEqual(c.equality, True) self.assertEqual(c.lb, float('-inf')) self.assertEqual(c.ub, float('-inf')) @@ -1118,7 +1120,7 @@ def test_equality_infinite(self): self.assertEqual(c.ub, float('-inf')) self.assertEqual(c.rhs, float('-inf')) self.assertIs(c.body, v) - c.expr = (float('-inf') == v) + c.expr = float('-inf') == v self.assertEqual(c.equality, True) self.assertEqual(c.lb, float('-inf')) self.assertEqual(c.ub, float('-inf')) @@ -1134,7 +1136,7 @@ def test_equality_infinite(self): def test_equality_nonnumeric(self): c = constraint() v = variable() - c.expr = (v == 1) + c.expr = v == 1 with self.assertRaises(TypeError): c.expr = (v, 'x') with self.assertRaises(TypeError): @@ -1371,9 +1373,7 @@ def test_slack_methods(self): self.assertEqual(cR.uslack, None) # range finite (parameter) - cR = constraint(body=x, - lb=parameter(L), - ub=parameter(U)) + cR = constraint(body=x, lb=parameter(L), ub=parameter(U)) x.value = 4 self.assertEqual(cR.body(), 4) self.assertEqual(cR.slack, 1) @@ -1398,9 +1398,7 @@ def test_slack_methods(self): self.assertEqual(cR.uslack, None) # range unbounded (parameter) - cR = constraint(body=x, - lb=parameter(float('-inf')), - ub=parameter(float('inf'))) + cR = constraint(body=x, lb=parameter(float('-inf')), ub=parameter(float('inf'))) x.value = 4 self.assertEqual(cR.body(), 4) self.assertEqual(cR.slack, float('inf')) @@ -1425,7 +1423,6 @@ def test_slack_methods(self): self.assertEqual(cR.uslack, None) def test_expr(self): - x = variable(value=1.0) c = constraint() c.expr = (0, x, 2) @@ -1486,23 +1483,23 @@ def test_expr_getter(self): def test_expr_wrong_type(self): c = constraint() with self.assertRaises(ValueError): - c.expr = (2) + c.expr = 2 with self.assertRaises(ValueError): - c.expr = (True) + c.expr = True def test_tuple_constraint_create(self): x = variable() y = variable() z = variable() - c = constraint((0.0,x)) + c = constraint((0.0, x)) with self.assertRaises(ValueError): - constraint((y,x,z)) + constraint((y, x, z)) with self.assertRaises(ValueError): - constraint((0,x,z)) + constraint((0, x, z)) with self.assertRaises(ValueError): - constraint((y,x,0)) + constraint((y, x, 0)) with self.assertRaises(ValueError): - constraint((x,0,0,0)) + constraint((x, 0, 0, 0)) c = constraint((x, y)) self.assertEqual(c.upper, 0) @@ -1570,19 +1567,18 @@ def test_expression_constructor_coverage(self): expr = U <= x c = constraint(expr) - x = variable() with self.assertRaises(ValueError): - constraint(x+x) + constraint(x + x) -class Test_linear_constraint(unittest.TestCase): +class Test_linear_constraint(unittest.TestCase): def test_pprint(self): # Not really testing what the output is, just that # an error does not occur. The pprint functionality # is still in the early stages. v = variable() - c = linear_constraint(lb=1, terms=[(v,1)], ub=1) + c = linear_constraint(lb=1, terms=[(v, 1)], ub=1) pprint(c) b = block() b.c = c @@ -1595,19 +1591,18 @@ def test_pprint(self): pprint(m) def test_ctype(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) self.assertIs(c.ctype, IConstraint) self.assertIs(type(c), linear_constraint) self.assertIs(type(c)._ctype, IConstraint) def test_pickle(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) self.assertIs(c.lb, None) self.assertEqual(c.body, 0) self.assertIs(c.ub, None) self.assertEqual(c.parent, None) - cup = pickle.loads( - pickle.dumps(c)) + cup = pickle.loads(pickle.dumps(c)) self.assertEqual(cup.lb, None) self.assertEqual(cup.body, 0) self.assertEqual(cup.ub, None) @@ -1615,8 +1610,7 @@ def test_pickle(self): b = block() b.c = c self.assertIs(c.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) cup = bup.c self.assertEqual(cup.lb, None) self.assertEqual(cup.body, 0) @@ -1624,7 +1618,7 @@ def test_pickle(self): self.assertIs(cup.parent, bup) def test_init(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) self.assertTrue(c.parent is None) self.assertEqual(c.ctype, IConstraint) self.assertEqual(c.body, 0) @@ -1645,7 +1639,7 @@ def test_init_nonexpr(self): self.assertEqual(c.body, 0) self.assertEqual(c.ub, None) - c = linear_constraint([v],[1],lb=0,ub=1) + c = linear_constraint([v], [1], lb=0, ub=1) self.assertEqual(len(list(c.terms)), 1) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 3) @@ -1653,13 +1647,13 @@ def test_init_nonexpr(self): self.assertEqual(c.ub, 1) # can't use both terms and variables with self.assertRaises(ValueError): - linear_constraint(terms=(),variables=()) + linear_constraint(terms=(), variables=()) # can't use both terms and coefficients with self.assertRaises(ValueError): - linear_constraint(terms=(),coefficients=()) + linear_constraint(terms=(), coefficients=()) # can't use both all three with self.assertRaises(ValueError): - linear_constraint(terms=(),variables=(),coefficients=()) + linear_constraint(terms=(), variables=(), coefficients=()) # can't use only variables with self.assertRaises(ValueError): linear_constraint(variables=[v]) @@ -1668,19 +1662,19 @@ def test_init_nonexpr(self): linear_constraint(coefficients=[1]) # can't use both lb and rhs with self.assertRaises(ValueError): - linear_constraint([v],[1],lb=0,rhs=0) + linear_constraint([v], [1], lb=0, rhs=0) # can't use both ub and rhs with self.assertRaises(ValueError): - linear_constraint([v],[1],ub=0,rhs=0) + linear_constraint([v], [1], ub=0, rhs=0) - c = linear_constraint([v],[1],rhs=1) + c = linear_constraint([v], [1], rhs=1) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) self.assertEqual(c.rhs, 1) self.assertEqual(c.body(), 3) self.assertEqual(c(), 3) - c = linear_constraint([],[],rhs=1) + c = linear_constraint([], [], rhs=1) c.terms = ((v, 1),) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) @@ -1690,7 +1684,7 @@ def test_init_nonexpr(self): def test_init_terms(self): v = variable(value=3) - c = linear_constraint([],[],rhs=1) + c = linear_constraint([], [], rhs=1) c.terms = ((v, 2),) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) @@ -1698,15 +1692,15 @@ def test_init_terms(self): self.assertEqual(c.body(), 6) self.assertEqual(c(), 6) - c = linear_constraint(terms=[(v,2)],rhs=1) + c = linear_constraint(terms=[(v, 2)], rhs=1) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) self.assertEqual(c.rhs, 1) self.assertEqual(c.body(), 6) self.assertEqual(c(), 6) - terms = [(v,2)] - c = linear_constraint(terms=iter(terms),rhs=1) + terms = [(v, 2)] + c = linear_constraint(terms=iter(terms), rhs=1) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) self.assertEqual(c.rhs, 1) @@ -1722,12 +1716,12 @@ def test_init_terms(self): self.assertEqual(tuple(c.terms), ()) def test_type(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) self.assertTrue(isinstance(c, ICategorizedObject)) self.assertTrue(isinstance(c, IConstraint)) def test_active(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) self.assertEqual(c.active, True) c.deactivate() self.assertEqual(c.active, False) @@ -1756,13 +1750,13 @@ def test_active(self): def test_equality(self): v = variable() - c = linear_constraint([v],[1],rhs=1) + c = linear_constraint([v], [1], rhs=1) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) self.assertEqual(c.rhs, 1) self.assertEqual(c.equality, True) - c = linear_constraint([],[],rhs=1) + c = linear_constraint([], [], rhs=1) self.assertEqual(c.body, 0) self.assertEqual(c.lb, 1) self.assertEqual(c.ub, 1) @@ -1799,7 +1793,7 @@ def test_equality(self): self.assertEqual(c.equality, True) def test_nondata_bounds(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) eL = expression() eU = expression() @@ -1841,10 +1835,7 @@ def test_nondata_bounds(self): vL.value = -1 vU.value = -1 - c = linear_constraint([vL, vU], - [1, 1], - lb=1.0, - ub=1.0) + c = linear_constraint([vL, vU], [1, 1], lb=1.0, ub=1.0) self.assertEqual(c(), -2.0) self.assertEqual(c.slack, -3.0) self.assertEqual(c.lslack, -3.0) @@ -1852,7 +1843,7 @@ def test_nondata_bounds(self): def test_fixed_variable_stays_in_body(self): x = variable(value=0.5) - c = linear_constraint([x],[1], lb=0, ub=1) + c = linear_constraint([x], [1], lb=0, ub=1) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 0.5) self.assertEqual(c(), 0.5) @@ -1874,7 +1865,7 @@ def test_fixed_variable_stays_in_body(self): self.assertEqual(repn.constant, 0) x.fix(0.5) - c = linear_constraint([x],[2], lb=0, ub=1) + c = linear_constraint([x], [2], lb=0, ub=1) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) self.assertEqual(c(), 1) @@ -1895,7 +1886,7 @@ def test_fixed_variable_stays_in_body(self): x.free() x.value = 1 - c = linear_constraint([x],[1], rhs=0) + c = linear_constraint([x], [1], rhs=0) self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) @@ -1908,7 +1899,7 @@ def test_fixed_variable_stays_in_body(self): self.assertEqual(repn.constant, 0) x.fix() - c = linear_constraint([x],[1], rhs=0) + c = linear_constraint([x], [1], rhs=0) self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) self.assertEqual(c.body(), 1) @@ -1920,7 +1911,7 @@ def test_fixed_variable_stays_in_body(self): self.assertEqual(repn.constant, 1) x.free() - c = linear_constraint([x],[1], rhs=0) + c = linear_constraint([x], [1], rhs=0) x.fix() self.assertEqual(c.equality, True) self.assertEqual(c.lb, 0) @@ -1933,7 +1924,7 @@ def test_fixed_variable_stays_in_body(self): self.assertEqual(repn.constant, 1) def test_data_bounds(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) e = expression(expr=1.0) c.lb = 1.0 @@ -1954,11 +1945,11 @@ def test_data_bounds(self): self.assertIs(c.upper, eU) def test_call(self): - c = linear_constraint([],[]) + c = linear_constraint([], []) self.assertEqual(c(), 0) v = variable() - c = linear_constraint([v],[2]) + c = linear_constraint([v], [2]) with self.assertRaises(ValueError): c() with self.assertRaises(ValueError): @@ -1980,7 +1971,7 @@ def test_call(self): v.value = None e = expression(v) - c = linear_constraint([e],[1]) + c = linear_constraint([e], [1]) with self.assertRaises(ValueError): c() with self.assertRaises(ValueError): @@ -2012,7 +2003,7 @@ def test_canonical_form(self): # compute_values = True # - c.terms = [(v,p)] + c.terms = [(v, p)] repn = c.canonical_form() self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], v) @@ -2027,7 +2018,7 @@ def test_canonical_form(self): v.free() e.expr = v - c.terms = [(e,p)] + c.terms = [(e, p)] repn = c.canonical_form() self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], v) @@ -2045,7 +2036,7 @@ def test_canonical_form(self): # v.free() - c.terms = [(v,p)] + c.terms = [(v, p)] repn = c.canonical_form(compute_values=False) self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], v) @@ -2062,7 +2053,7 @@ def test_canonical_form(self): v.free() e.expr = v - c.terms = [(e,p)] + c.terms = [(e, p)] repn = c.canonical_form(compute_values=False) self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], v) @@ -2077,20 +2068,21 @@ def test_canonical_form(self): self.assertEqual(len(repn.linear_coefs), 0) self.assertEqual(repn.constant(), 2) -class Test_constraint_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_constraint_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = constraint_dict _ctype_factory = lambda self: constraint() -class Test_constraint_tuple(_TestActiveTupleContainerBase, - unittest.TestCase): + +class Test_constraint_tuple(_TestActiveTupleContainerBase, unittest.TestCase): _container_type = constraint_tuple _ctype_factory = lambda self: constraint() -class Test_constraint_list(_TestActiveListContainerBase, - unittest.TestCase): + +class Test_constraint_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = constraint_list _ctype_factory = lambda self: constraint() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_dict_container.py b/pyomo/core/tests/unit/kernel/test_dict_container.py index 7b28cbc18c9..e6b6f8d7aab 100644 --- a/pyomo/core/tests/unit/kernel/test_dict_container.py +++ b/pyomo/core/tests/unit/kernel/test_dict_container.py @@ -16,14 +16,11 @@ import pyomo.common.unittest as unittest import pyomo.kernel as pmo from pyomo.common.log import LoggingIntercept -from pyomo.core.kernel.base import \ - (ICategorizedObject, - ICategorizedObjectContainer) -from pyomo.core.kernel.homogeneous_container import \ - IHomogeneousContainer +from pyomo.common.tee import capture_output +from pyomo.core.kernel.base import ICategorizedObject, ICategorizedObjectContainer +from pyomo.core.kernel.homogeneous_container import IHomogeneousContainer from pyomo.core.kernel.dict_container import DictContainer -from pyomo.core.kernel.block import (block, - block_dict) +from pyomo.core.kernel.block import block, block_dict # # There are no fully implemented test suites in this @@ -44,7 +41,6 @@ class _bad_ctype(object): class _TestDictContainerBase(object): - # set by derived class _container_type = None _ctype_factory = None @@ -58,32 +54,37 @@ def test_overwrite_warning(self): assert out.getvalue() == "" with LoggingIntercept(out, 'pyomo.core'): c[0] = self._ctype_factory() - assert out.getvalue() == \ - ("Implicitly replacing the entry [0] " - "(type=%s) with a new object (type=%s). " - "This is usually indicative of a modeling " - "error. To avoid this warning, delete the " - "original object from the container before " - "assigning a new object.\n" - % (self._ctype_factory().__class__.__name__, - self._ctype_factory().__class__.__name__)) + assert out.getvalue() == ( + "Implicitly replacing the entry [0] " + "(type=%s) with a new object (type=%s). " + "This is usually indicative of a modeling " + "error. To avoid this warning, delete the " + "original object from the container before " + "assigning a new object.\n" + % ( + self._ctype_factory().__class__.__name__, + self._ctype_factory().__class__.__name__, + ) + ) def test_pprint(self): import pyomo.kernel + # Not really testing what the output is, just that # an error does not occur. The pprint functionality # is still in the early stages. - cdict = self._container_type({None: self._ctype_factory()}) - pyomo.kernel.pprint(cdict) - b = block() - b.cdict = cdict - pyomo.kernel.pprint(cdict) - pyomo.kernel.pprint(b) - m = block() - m.b = b - pyomo.kernel.pprint(cdict) - pyomo.kernel.pprint(b) - pyomo.kernel.pprint(m) + with capture_output() as OUT: + cdict = self._container_type({None: self._ctype_factory()}) + pyomo.kernel.pprint(cdict) + b = block() + b.cdict = cdict + pyomo.kernel.pprint(cdict) + pyomo.kernel.pprint(b) + m = block() + m.b = b + pyomo.kernel.pprint(cdict) + pyomo.kernel.pprint(b) + pyomo.kernel.pprint(m) def test_ctype(self): c = self._container_type() @@ -97,15 +98,11 @@ def test_init1(self): def test_init2(self): index = ['a', 1, None, (1,), (1, 2)] - self._container_type((i, self._ctype_factory()) - for i in index) - self._container_type(((i, self._ctype_factory()) - for i in index)) + self._container_type((i, self._ctype_factory()) for i in index) + self._container_type(((i, self._ctype_factory()) for i in index)) with self.assertRaises(TypeError): - self._container_type(*tuple((i, self._ctype_factory()) - for i in index)) - c = self._container_type(a=self._ctype_factory(), - b=self._ctype_factory()) + self._container_type(*tuple((i, self._ctype_factory()) for i in index)) + c = self._container_type(a=self._ctype_factory(), b=self._ctype_factory()) self.assertEqual(len(c), 2) self.assertTrue('a' in c) self.assertTrue('b' in c) @@ -123,8 +120,7 @@ def test_ordered_init(self): cdict[-1] = self._ctype_factory() cdict['bc'] = self._ctype_factory() cdict[3] = self._ctype_factory() - self.assertEqual(list(cdict.keys()), - [1,'a',2,None,-1,'bc',3]) + self.assertEqual(list(cdict.keys()), [1, 'a', 2, None, -1, 'bc', 3]) def test_type(self): cdict = self._container_type() @@ -142,9 +138,8 @@ def test_len1(self): self.assertEqual(len(cdict), 0) def test_len2(self): - index = ['a', 1, None, (1,), (1,2)] - cdict = self._container_type((i, self._ctype_factory()) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + cdict = self._container_type((i, self._ctype_factory()) for i in index) self.assertEqual(len(cdict), len(index)) def test_setitem(self): @@ -164,8 +159,7 @@ def test_setitem(self): def test_wrong_type_init(self): index = ['a', 1, None, (1,), (1, 2)] with self.assertRaises(TypeError): - c = self._container_type( - (i, _bad_ctype()) for i in index) + c = self._container_type((i, _bad_ctype()) for i in index) def test_wrong_type_update(self): index = ['a', 1, None, (1,), (1, 2)] @@ -216,8 +210,7 @@ def test_has_parent_setitem(self): # by a call to setitem and not simply updated. def test_setitem_exists_overwrite(self): index = ['a', 1, None, (1,), (1, 2)] - c = self._container_type((i, self._ctype_factory()) - for i in index) + c = self._container_type((i, self._ctype_factory()) for i in index) self.assertEqual(len(c), len(index)) for i in index: self.assertTrue(i in c) @@ -230,23 +223,20 @@ def test_setitem_exists_overwrite(self): def test_delitem(self): index = ['a', 1, None, (1,), (1, 2)] - c = self._container_type((i, self._ctype_factory()) - for i in index) + c = self._container_type((i, self._ctype_factory()) for i in index) self.assertEqual(len(c), len(index)) for cnt, i in enumerate(index, 1): self.assertTrue(i in c) cdata = c[i] - self.assertEqual(id(cdata.parent), - id(c)) + self.assertEqual(id(cdata.parent), id(c)) del c[i] - self.assertEqual(len(c), len(index)-cnt) + self.assertEqual(len(c), len(index) - cnt) self.assertTrue(i not in c) self.assertEqual(cdata.parent, None) def test_iter(self): index = ['a', 1, None, (1,), (1, 2)] - c = self._container_type((i, self._ctype_factory()) - for i in index) + c = self._container_type((i, self._ctype_factory()) for i in index) self.assertEqual(len(c), len(index)) comp_index = [i for i in c] self.assertEqual(len(comp_index), len(index)) @@ -255,15 +245,13 @@ def test_iter(self): def test_pickle(self): index = ['a', 1, None, (1,), (1, 2)] - cdict = self._container_type((i, self._ctype_factory()) - for i in index) + cdict = self._container_type((i, self._ctype_factory()) for i in index) cdict[0] = self._container_type() index.append(0) for i in index: self.assertTrue(cdict[i].parent is cdict) pickled_cdict = pickle.loads(pickle.dumps(cdict)) - self.assertTrue( - isinstance(pickled_cdict, self._container_type)) + self.assertTrue(isinstance(pickled_cdict, self._container_type)) self.assertTrue(pickled_cdict.parent is None) self.assertEqual(len(pickled_cdict), len(index)) self.assertNotEqual(id(pickled_cdict), id(cdict)) @@ -274,44 +262,42 @@ def test_pickle(self): def test_keys(self): index = ['a', 1, None, (1,), (1, 2)] - raw_constraint_dict = {i:self._ctype_factory() for i in index} + raw_constraint_dict = {i: self._ctype_factory() for i in index} c = self._container_type(raw_constraint_dict) - self.assertEqual(sorted(list(raw_constraint_dict.keys()), - key=str), - sorted(list(c.keys()), key=str)) + self.assertEqual( + sorted(list(raw_constraint_dict.keys()), key=str), + sorted(list(c.keys()), key=str), + ) def test_values(self): index = ['a', 1, None, (1,), (1, 2)] - raw_constraint_dict = {i:self._ctype_factory() for i in index} + raw_constraint_dict = {i: self._ctype_factory() for i in index} c = self._container_type(raw_constraint_dict) self.assertEqual( - sorted(list(id(_v) - for _v in raw_constraint_dict.values()), - key=str), - sorted(list(id(_v) - for _v in c.values()), - key=str)) + sorted(list(id(_v) for _v in raw_constraint_dict.values()), key=str), + sorted(list(id(_v) for _v in c.values()), key=str), + ) def test_items(self): index = ['a', 1, None, (1,), (1, 2)] - raw_constraint_dict = {i:self._ctype_factory() for i in index} + raw_constraint_dict = {i: self._ctype_factory() for i in index} c = self._container_type(raw_constraint_dict) self.assertEqual( - sorted(list((_i, id(_v)) - for _i,_v in raw_constraint_dict.items()), - key=str), - sorted(list((_i, id(_v)) - for _i,_v in c.items()), - key=str)) + sorted( + list((_i, id(_v)) for _i, _v in raw_constraint_dict.items()), key=str + ), + sorted(list((_i, id(_v)) for _i, _v in c.items()), key=str), + ) def test_update(self): index = ['a', 1, None, (1,), (1, 2)] - raw_constraint_dict = {i:self._ctype_factory() for i in index} + raw_constraint_dict = {i: self._ctype_factory() for i in index} c = self._container_type() c.update(raw_constraint_dict) - self.assertEqual(sorted(list(raw_constraint_dict.keys()), - key=str), - sorted(list(c.keys()), key=str)) + self.assertEqual( + sorted(list(raw_constraint_dict.keys()), key=str), + sorted(list(c.keys()), key=str), + ) def test_clear(self): c = self._container_type() @@ -380,7 +366,7 @@ def test_name(self): children[1] = self._ctype_factory() children[None] = self._ctype_factory() children[(1,)] = self._ctype_factory() - children[(1,2)] = self._ctype_factory() + children[(1, 2)] = self._ctype_factory() children['(1,2)'] = self._ctype_factory() children['x'] = self._container_type() children['x']['y'] = self._ctype_factory() @@ -398,14 +384,18 @@ def test_name(self): names = pmo.generate_names(cdict) for key, c in children.items(): self.assertTrue(c.parent is cdict) - self.assertEqual(c.getname(fully_qualified=False, convert=str), - "[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=False, convert=repr), - "[%s]" % (repr(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=str), - "[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=repr), - "[%s]" % (repr(key))) + self.assertEqual( + c.getname(fully_qualified=False, convert=str), "[%s]" % (str(key)) + ) + self.assertEqual( + c.getname(fully_qualified=False, convert=repr), "[%s]" % (repr(key)) + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=str), "[%s]" % (str(key)) + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=repr), "[%s]" % (repr(key)) + ) self.assertEqual(c.name, names[c]) for c in cdict.components(): self.assertNotEqual(c.name, None) @@ -420,14 +410,18 @@ def test_name(self): names = pmo.generate_names(model) for key, c in children.items(): self.assertTrue(c.parent is cdict) - self.assertEqual(c.getname(fully_qualified=False, convert=str), - "[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=False, convert=repr), - "[%s]" % (repr(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=str), - "cdict[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=repr), - "cdict[%s]" % (repr(key))) + self.assertEqual( + c.getname(fully_qualified=False, convert=str), "[%s]" % (str(key)) + ) + self.assertEqual( + c.getname(fully_qualified=False, convert=repr), "[%s]" % (repr(key)) + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=str), "cdict[%s]" % (str(key)) + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=repr), "cdict[%s]" % (repr(key)) + ) self.assertEqual(c.name, names[c]) for c in cdict.components(): self.assertNotEqual(c.name, None) @@ -443,14 +437,20 @@ def test_name(self): names = pmo.generate_names(b) for key, c in children.items(): self.assertTrue(c.parent is cdict) - self.assertEqual(c.getname(fully_qualified=False, convert=str), - "[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=False, convert=repr), - "[%s]" % (repr(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=str), - "model.cdict[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=repr), - "model.cdict[%s]" % (repr(key))) + self.assertEqual( + c.getname(fully_qualified=False, convert=str), "[%s]" % (str(key)) + ) + self.assertEqual( + c.getname(fully_qualified=False, convert=repr), "[%s]" % (repr(key)) + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=str), + "model.cdict[%s]" % (str(key)), + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=repr), + "model.cdict[%s]" % (repr(key)), + ) self.assertEqual(c.name, names[c]) for c in cdict.components(): self.assertNotEqual(c.name, None) @@ -466,14 +466,20 @@ def test_name(self): self.assertEqual(cdict.name, "[0].model.cdict") for key, c in children.items(): self.assertTrue(c.parent is cdict) - self.assertEqual(c.getname(fully_qualified=False, convert=str), - "[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=False, convert=repr), - "[%s]" % (repr(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=str), - "[0].model.cdict[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=repr), - "[0].model.cdict[%s]" % (repr(key))) + self.assertEqual( + c.getname(fully_qualified=False, convert=str), "[%s]" % (str(key)) + ) + self.assertEqual( + c.getname(fully_qualified=False, convert=repr), "[%s]" % (repr(key)) + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=str), + "[0].model.cdict[%s]" % (str(key)), + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=repr), + "[0].model.cdict[%s]" % (repr(key)), + ) m = block() m.bdict = bdict @@ -487,14 +493,20 @@ def test_name(self): names = pmo.generate_names(m) for key, c in children.items(): self.assertTrue(c.parent is cdict) - self.assertEqual(c.getname(fully_qualified=False, convert=str), - "[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=False, convert=repr), - "[%s]" % (repr(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=str), - "bdict[0].model.cdict[%s]" % (str(key))) - self.assertEqual(c.getname(fully_qualified=True, convert=repr), - "bdict[0].model.cdict[%s]" % (repr(key))) + self.assertEqual( + c.getname(fully_qualified=False, convert=str), "[%s]" % (str(key)) + ) + self.assertEqual( + c.getname(fully_qualified=False, convert=repr), "[%s]" % (repr(key)) + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=str), + "bdict[0].model.cdict[%s]" % (str(key)), + ) + self.assertEqual( + c.getname(fully_qualified=True, convert=repr), + "bdict[0].model.cdict[%s]" % (repr(key)), + ) self.assertEqual(c.name, names[c]) for c in cdict.components(): self.assertNotEqual(c.name, None) @@ -518,14 +530,14 @@ def test_preorder_traversal(self): descend = lambda x: not x._is_heterogeneous_container - self.assertEqual([c.name for c in traversal], - [c.name for c in pmo.preorder_traversal( - cdict, - descend=descend)]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in pmo.preorder_traversal( - cdict, - descend=descend)]) + self.assertEqual( + [c.name for c in traversal], + [c.name for c in pmo.preorder_traversal(cdict, descend=descend)], + ) + self.assertEqual( + [id(c) for c in traversal], + [id(c) for c in pmo.preorder_traversal(cdict, descend=descend)], + ) return cdict, traversal def test_preorder_traversal_descend_check(self): @@ -545,9 +557,9 @@ def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return False + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - descend=descend)) + order = list(pmo.preorder_traversal(cdict, descend=descend)) self.assertEqual(len(order), 1) self.assertIs(order[0], cdict) self.assertEqual(len(descend.seen), 1) @@ -557,42 +569,39 @@ def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - descend=descend)) - self.assertEqual([c.name for c in traversal], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(cdict, descend=descend)) + self.assertEqual([c.name for c in traversal], [c.name for c in order]) + self.assertEqual([id(c) for c in traversal], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c._is_container], [id(c) for c in descend.seen] + ) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - descend=descend)) - self.assertEqual([c.name for c in traversal], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(cdict, descend=descend)) + self.assertEqual([c.name for c in traversal], [c.name for c in order]) + self.assertEqual([id(c) for c in traversal], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c._is_container], [id(c) for c in descend.seen] + ) return cdict, traversal class _TestActiveDictContainerBase(_TestDictContainerBase): - def test_active_type(self): cdict = self._container_type() self.assertTrue(isinstance(cdict, ICategorizedObject)) @@ -642,8 +651,9 @@ def test_active(self): for c in cdict.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(cdict.components())), len(cdict)) - self.assertEqual(len(list(cdict.components())), - len(list(cdict.components(active=True)))) + self.assertEqual( + len(list(cdict.components())), len(list(cdict.components(active=True))) + ) m.deactivate(shallow=False) @@ -655,8 +665,9 @@ def test_active(self): self.assertEqual(cdict.active, False) for c in cdict.values(): self.assertEqual(c.active, False) - self.assertNotEqual(len(list(cdict.components())), - len(list(cdict.components(active=None)))) + self.assertNotEqual( + len(list(cdict.components())), len(list(cdict.components(active=None))) + ) self.assertEqual(len(list(cdict.components(active=True))), 0) test_key = list(children.keys())[0] @@ -671,8 +682,9 @@ def test_active(self): self.assertEqual(cdict.active, False) for c in cdict.values(): self.assertEqual(c.active, False) - self.assertNotEqual(len(list(cdict.components())), - len(list(cdict.components(active=None)))) + self.assertNotEqual( + len(list(cdict.components())), len(list(cdict.components(active=None))) + ) self.assertEqual(len(list(cdict.components(active=True))), 0) del cdict[test_key] @@ -705,11 +717,11 @@ def test_active(self): self.assertEqual(c.active, False) for c in cdict.components(active=True): self.assertEqual(c.active, True) - self.assertNotEqual(len(list(cdict.components())), - len(list(cdict.components(active=None)))) + self.assertNotEqual( + len(list(cdict.components())), len(list(cdict.components(active=None))) + ) self.assertEqual(len(list(cdict.components(active=True))), 1) - cdict.deactivate() m.activate(shallow=False) @@ -726,8 +738,9 @@ def test_active(self): for c in cdict.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(cdict.components())), len(cdict)) - self.assertEqual(len(list(cdict.components())), - len(list(cdict.components(active=True)))) + self.assertEqual( + len(list(cdict.components())), len(list(cdict.components(active=True))) + ) cdict.deactivate(shallow=False) @@ -739,8 +752,9 @@ def test_active(self): self.assertEqual(cdict.active, False) for c in cdict.values(): self.assertEqual(c.active, False) - self.assertNotEqual(len(list(cdict.components())), - len(list(cdict.components(active=None)))) + self.assertNotEqual( + len(list(cdict.components())), len(list(cdict.components(active=None))) + ) self.assertEqual(len(list(cdict.components(active=True))), 0) cdict.activate(shallow=False) @@ -758,8 +772,9 @@ def test_active(self): for c in cdict.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(cdict.components())), len(cdict)) - self.assertEqual(len(list(cdict.components())), - len(list(cdict.components(active=True)))) + self.assertEqual( + len(list(cdict.components())), len(list(cdict.components(active=True))) + ) cdict.deactivate(shallow=False) cdict[test_key].activate() @@ -789,165 +804,167 @@ def test_active(self): self.assertEqual(c.active, False) for c in cdict.components(active=True): self.assertEqual(c.active, True) - self.assertNotEqual(len(list(cdict.components())), - len(list(cdict.components(active=None)))) + self.assertNotEqual( + len(list(cdict.components())), len(list(cdict.components(active=None))) + ) self.assertEqual(len(list(cdict.components(active=True))), 1) def test_preorder_traversal(self): - cdict, traversal = \ - super(_TestActiveDictContainerBase, self).\ - test_preorder_traversal() + cdict, traversal = super( + _TestActiveDictContainerBase, self + ).test_preorder_traversal() descend = lambda x: not x._is_heterogeneous_container cdict[1].deactivate() - self.assertEqual([None, '[0]', '[2]'], - [c.name for c in pmo.preorder_traversal( - cdict, - active=True, - descend=descend)]) - self.assertEqual([id(cdict),id(cdict[0]),id(cdict[2])], - [id(c) for c in pmo.preorder_traversal( - cdict, - active=True, - descend=descend)]) + self.assertEqual( + [None, '[0]', '[2]'], + [ + c.name + for c in pmo.preorder_traversal(cdict, active=True, descend=descend) + ], + ) + self.assertEqual( + [id(cdict), id(cdict[0]), id(cdict[2])], + [ + id(c) + for c in pmo.preorder_traversal(cdict, active=True, descend=descend) + ], + ) cdict[1].deactivate(shallow=False) - self.assertEqual([c.name for c in traversal if c.active], - [c.name for c in pmo.preorder_traversal( - cdict, - active=True, - descend=descend)]) - self.assertEqual([id(c) for c in traversal if c.active], - [id(c) for c in pmo.preorder_traversal( - cdict, - active=True, - descend=descend)]) + self.assertEqual( + [c.name for c in traversal if c.active], + [ + c.name + for c in pmo.preorder_traversal(cdict, active=True, descend=descend) + ], + ) + self.assertEqual( + [id(c) for c in traversal if c.active], + [ + id(c) + for c in pmo.preorder_traversal(cdict, active=True, descend=descend) + ], + ) cdict.deactivate() - self.assertEqual(len(list(pmo.preorder_traversal(cdict, - active=True))), - 0) - self.assertEqual(len(list(pmo.generate_names(cdict, - active=True))), - 0) + self.assertEqual(len(list(pmo.preorder_traversal(cdict, active=True))), 0) + self.assertEqual(len(list(pmo.generate_names(cdict, active=True))), 0) def test_preorder_traversal_descend_check(self): - cdict, traversal = \ - super(_TestActiveDictContainerBase, self).\ - test_preorder_traversal_descend_check() + cdict, traversal = super( + _TestActiveDictContainerBase, self + ).test_preorder_traversal_descend_check() cdict[1].deactivate() + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - pmo.pprint(cdict) - order = list(pmo.preorder_traversal(cdict, - active=True, - descend=descend)) - self.assertEqual([None, '[0]', '[2]'], - [c.name for c in order]) - self.assertEqual([id(cdict),id(cdict[0]),id(cdict[2])], - [id(c) for c in order]) + # pmo.pprint(cdict) + order = list(pmo.preorder_traversal(cdict, active=True, descend=descend)) + self.assertEqual([None, '[0]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(cdict), id(cdict[0]), id(cdict[2])], [id(c) for c in order] + ) if cdict.ctype._is_heterogeneous_container: - self.assertEqual([None, '[0]', '[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(cdict),id(cdict[0]),id(cdict[2])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[0]', '[2]'], [c.name for c in descend.seen]) + self.assertEqual( + [id(cdict), id(cdict[0]), id(cdict[2])], [id(c) for c in descend.seen] + ) else: - self.assertEqual([None], - [c.name for c in descend.seen]) - self.assertEqual([id(cdict)], - [id(c) for c in descend.seen]) + self.assertEqual([None], [c.name for c in descend.seen]) + self.assertEqual([id(cdict)], [id(c) for c in descend.seen]) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active and (not x._is_heterogeneous_container) + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - active=None, - descend=descend)) - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in order]) - self.assertEqual([id(cdict),id(cdict[0]),id(cdict[1]),id(cdict[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(cdict, active=None, descend=descend)) + self.assertEqual([None, '[0]', '[1]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(cdict), id(cdict[0]), id(cdict[1]), id(cdict[2])], + [id(c) for c in order], + ) if cdict.ctype._is_heterogeneous_container: - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(cdict),id(cdict[0]),id(cdict[1]),id(cdict[2])], - [id(c) for c in descend.seen]) + self.assertEqual( + [None, '[0]', '[1]', '[2]'], [c.name for c in descend.seen] + ) + self.assertEqual( + [id(cdict), id(cdict[0]), id(cdict[1]), id(cdict[2])], + [id(c) for c in descend.seen], + ) else: - self.assertEqual([None,'[1]'], - [c.name for c in descend.seen]) - self.assertEqual([id(cdict),id(cdict[1])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[1]'], [c.name for c in descend.seen]) + self.assertEqual([id(cdict), id(cdict[1])], [id(c) for c in descend.seen]) cdict[1].deactivate(shallow=False) + def descend(x): descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - active=True, - descend=descend)) - self.assertEqual([c.name for c in traversal if c.active], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal if c.active], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c.active and \ - c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c.active and \ - c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(cdict, active=True, descend=descend)) + self.assertEqual( + [c.name for c in traversal if c.active], [c.name for c in order] + ) + self.assertEqual([id(c) for c in traversal if c.active], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c.active and c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c.active and c._is_container], + [id(c) for c in descend.seen], + ) def descend(x): descend.seen.append(x) return x.active and (not x._is_heterogeneous_container) + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - active=None, - descend=descend)) - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in order]) - self.assertEqual([id(cdict),id(cdict[0]),id(cdict[1]),id(cdict[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(cdict, active=None, descend=descend)) + self.assertEqual([None, '[0]', '[1]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(cdict), id(cdict[0]), id(cdict[1]), id(cdict[2])], + [id(c) for c in order], + ) if cdict.ctype._is_heterogeneous_container: - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(cdict),id(cdict[0]),id(cdict[1]),id(cdict[2])], - [id(c) for c in descend.seen]) + self.assertEqual( + [None, '[0]', '[1]', '[2]'], [c.name for c in descend.seen] + ) + self.assertEqual( + [id(cdict), id(cdict[0]), id(cdict[1]), id(cdict[2])], + [id(c) for c in descend.seen], + ) else: - self.assertEqual([None,'[1]'], - [c.name for c in descend.seen]) - self.assertEqual([id(cdict),id(cdict[1])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[1]'], [c.name for c in descend.seen]) + self.assertEqual([id(cdict), id(cdict[1])], [id(c) for c in descend.seen]) cdict.deactivate() + def descend(x): descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - active=True, - descend=descend)) + order = list(pmo.preorder_traversal(cdict, active=True, descend=descend)) self.assertEqual(len(descend.seen), 0) - self.assertEqual(len(list(pmo.generate_names(cdict, - active=True))), - 0) + self.assertEqual(len(list(pmo.generate_names(cdict, active=True))), 0) def descend(x): descend.seen.append(x) return x.active + descend.seen = [] - order = list(pmo.preorder_traversal(cdict, - active=None, - descend=descend)) + order = list(pmo.preorder_traversal(cdict, active=None, descend=descend)) self.assertEqual(len(descend.seen), 1) self.assertIs(descend.seen[0], cdict) diff --git a/pyomo/core/tests/unit/kernel/test_expression.py b/pyomo/core/tests/unit/kernel/test_expression.py index 0701bb02c0b..85f8c331a46 100644 --- a/pyomo/core/tests/unit/kernel/test_expression.py +++ b/pyomo/core/tests/unit/kernel/test_expression.py @@ -12,27 +12,34 @@ import pickle import pyomo.common.unittest as unittest -from pyomo.core.expr.numvalue import (NumericValue, - is_fixed, - is_constant, - is_potentially_variable, - value) +from pyomo.core.expr.numvalue import ( + NumericValue, + is_fixed, + is_constant, + is_potentially_variable, + value, +) import pyomo.kernel -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_tuple_container import \ - _TestActiveTupleContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_tuple_container import ( + _TestActiveTupleContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) from pyomo.core.kernel.base import ICategorizedObject -from pyomo.core.kernel.expression import (IIdentityExpression, - noclone, - IExpression, - expression, - data_expression, - expression_dict, - expression_tuple, - expression_list) +from pyomo.core.kernel.expression import ( + IIdentityExpression, + noclone, + IExpression, + expression, + data_expression, + expression_dict, + expression_tuple, + expression_list, +) from pyomo.core.kernel.variable import variable from pyomo.core.kernel.parameter import parameter from pyomo.core.kernel.objective import objective @@ -40,12 +47,13 @@ try: import numpy + numpy_available = True except: numpy_available = False -class Test_noclone(unittest.TestCase): +class Test_noclone(unittest.TestCase): def test_is_named_expression_type(self): e = expression() self.assertEqual(e.is_named_expression_type(), True) @@ -74,18 +82,30 @@ def test_init_NumericValue(self): e = expression() d = data_expression() o = objective() - for obj in (v, v+1, v**2, - p, p+1, p**2, - e, e+1, e**2, - d, d+1, d**2, - o, o+1, o**2): + for obj in ( + v, + v + 1, + v**2, + p, + p + 1, + p**2, + e, + e + 1, + e**2, + d, + d + 1, + d**2, + o, + o + 1, + o**2, + ): self.assertTrue(isinstance(noclone(obj), NumericValue)) self.assertTrue(isinstance(noclone(obj), IIdentityExpression)) - self.assertTrue(isinstance(noclone(obj), noclone)) self.assertIs(noclone(obj).expr, obj) def test_pprint(self): import pyomo.kernel + # Not really testing what the output is, just that # an error does not occur. The pprint functionality # is still in the early stages. @@ -103,21 +123,20 @@ def test_pprint(self): pyomo.kernel.pprint(b) pyomo.kernel.pprint(m) # tests compatibility with _ToStringVisitor - pyomo.kernel.pprint(noclone(v)+1) - pyomo.kernel.pprint(noclone(v+1)) + pyomo.kernel.pprint(noclone(v) + 1) + pyomo.kernel.pprint(noclone(v + 1)) x = variable() y = variable() - pyomo.kernel.pprint(y + x*noclone(noclone(x*y))) - pyomo.kernel.pprint(y + noclone(noclone(x*y))*x) + pyomo.kernel.pprint(y + x * noclone(noclone(x * y))) + pyomo.kernel.pprint(y + noclone(noclone(x * y)) * x) def test_pickle(self): v = variable() e = noclone(v) - self.assertEqual(type(e), noclone) + self.assertEqual(type(e), expression) self.assertIs(type(e.expr), variable) - eup = pickle.loads( - pickle.dumps(e)) - self.assertEqual(type(eup), noclone) + eup = pickle.loads(pickle.dumps(e)) + self.assertEqual(type(eup), expression) self.assertTrue(e is not eup) self.assertIs(type(eup.expr), variable) self.assertIs(type(e.expr), variable) @@ -131,8 +150,7 @@ def test_pickle(self): b.v = v eraw = b.v + 1 b.e = 1 + noclone(eraw) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) self.assertTrue(isinstance(bup.e, NumericValue)) self.assertEqual(value(bup.e), 3.0) b.v.value = 2 @@ -219,13 +237,23 @@ def test_polynomial_degree(self): self.assertEqual(e.polynomial_degree(), None) def test_is_expression_type(self): - for obj in (variable(), parameter(), objective(), - expression(), data_expression()): + for obj in ( + variable(), + parameter(), + objective(), + expression(), + data_expression(), + ): self.assertEqual(noclone(obj).is_expression_type(), True) def test_is_parameter_type(self): - for obj in (variable(), parameter(), objective(), - expression(), data_expression()): + for obj in ( + variable(), + parameter(), + objective(), + expression(), + data_expression(), + ): self.assertEqual(noclone(obj).is_parameter_type(), False) def test_args(self): @@ -233,13 +261,12 @@ def test_args(self): self.assertEqual(e.nargs(), 1) self.assertTrue(e.arg(0) is e.expr) - def test_aruments(self): + def test_arguments(self): e = noclone(parameter() + 1) self.assertEqual(len(tuple(e.args)), 1) self.assertTrue(tuple(e.args)[0] is e.expr) def test_clone(self): - p = parameter() e = noclone(p) self.assertTrue(e.clone() is e) @@ -254,33 +281,36 @@ def test_division_behavior(self): # use __future__ behavior e = noclone(parameter(value=2)) self.assertIs(type(e.expr), parameter) - self.assertEqual((1/e)(), 0.5) - self.assertEqual((parameter(1)/e)(), 0.5) - self.assertEqual((1/e.expr()), 0.5) + self.assertEqual((1 / e)(), 0.5) + self.assertEqual((parameter(1) / e)(), 0.5) + self.assertEqual((1 / e.expr()), 0.5) def test_to_string(self): b = block() p = parameter() e = noclone(p**2) self.assertEqual(str(e.expr), "**2") - self.assertEqual(str(e), "{(**2)}") + self.assertEqual(str(e), "") self.assertEqual(e.to_string(), "(**2)") self.assertEqual(e.to_string(verbose=False), "(**2)") - self.assertEqual(e.to_string(verbose=True), "{pow(, 2)}") + self.assertEqual( + e.to_string(verbose=True), "{pow(, 2)}" + ) b.e = e b.p = p self.assertNotEqual(p.name, None) - self.assertEqual(e.to_string(verbose=True), "{pow("+p.name+", 2)}") - self.assertEqual(e.to_string(verbose=True), "{pow(p, 2)}") + self.assertEqual(e.to_string(verbose=True), "e{pow(" + p.name + ", 2)}") + self.assertEqual(e.to_string(verbose=True), "e{pow(p, 2)}") del b.e del b.p -class _Test_expression_base(object): +class _Test_expression_base(object): _ctype_factory = None def test_pprint(self): import pyomo.kernel + # Not really testing what the output is, just that # an error does not occur. The pprint functionality # is still in the early stages. @@ -302,16 +332,14 @@ def test_pickle(self): self.assertEqual(type(e.expr), float) self.assertEqual(e.expr, 1.0) self.assertEqual(e.parent, None) - eup = pickle.loads( - pickle.dumps(e)) + eup = pickle.loads(pickle.dumps(e)) self.assertEqual(type(eup.expr), float) self.assertEqual(eup.expr, 1.0) self.assertEqual(eup.parent, None) b = block() b.e = e self.assertIs(e.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) eup = bup.e self.assertEqual(type(eup.expr), float) self.assertEqual(eup.expr, 1.0) @@ -376,7 +404,7 @@ def test_args(self): self.assertEqual(e.nargs(), 1) self.assertTrue(e.arg(0) is e.expr) - def test_aruments(self): + def test_arguments(self): e = self._ctype_factory() p = parameter() e.expr = p + 1 @@ -401,9 +429,9 @@ def test_division_behavior(self): e = self._ctype_factory() e.expr = 2 self.assertIs(type(e.expr), int) - self.assertEqual((1/e)(), 0.5) - self.assertEqual((parameter(1)/e)(), 0.5) - self.assertEqual((1/e.expr), 0.5) + self.assertEqual((1 / e)(), 0.5) + self.assertEqual((parameter(1) / e)(), 0.5) + self.assertEqual((1 / e.expr), 0.5) def test_to_string(self): b = block() @@ -414,9 +442,9 @@ def test_to_string(self): self.assertEqual(str(e.expr), "None") self.assertEqual(str(e), label) - self.assertEqual(e.to_string(), label+"{Undefined}") - self.assertEqual(e.to_string(verbose=False), label+"{Undefined}") - self.assertEqual(e.to_string(verbose=True), label+"{Undefined}") + self.assertEqual(e.to_string(), label + "{Undefined}") + self.assertEqual(e.to_string(verbose=False), label + "{Undefined}") + self.assertEqual(e.to_string(verbose=True), label + "{Undefined}") b.e = e self.assertNotEqual(e.name, None) self.assertEqual(e.to_string(verbose=True), "e{Undefined}") @@ -428,26 +456,25 @@ def test_to_string(self): self.assertEqual(str(e), label) self.assertEqual(e.to_string(), "1") self.assertEqual(e.to_string(verbose=False), "1") - self.assertEqual(e.to_string(verbose=True), label+"{1}") + self.assertEqual(e.to_string(verbose=True), label + "{1}") b.e = e self.assertNotEqual(e.name, None) self.assertEqual(e.to_string(verbose=True), "e{1}") del b.e self.assertEqual(e.name, None) - p = parameter() e.expr = p**2 self.assertEqual(str(e.expr), "**2") self.assertEqual(str(e), label) self.assertEqual(e.to_string(), "(**2)") self.assertEqual(e.to_string(verbose=False), "(**2)") - self.assertEqual(e.to_string(verbose=True), label+"{pow(, 2)}") + self.assertEqual(e.to_string(verbose=True), label + "{pow(, 2)}") b.e = e b.p = p self.assertNotEqual(e.name, None) self.assertNotEqual(p.name, None) - self.assertEqual(e.to_string(verbose=True), e.name+"{pow("+p.name+", 2)}") + self.assertEqual(e.to_string(verbose=True), e.name + "{pow(" + p.name + ", 2)}") self.assertEqual(e.to_string(verbose=True), "e{pow(p, 2)}") del b.e del b.p @@ -458,12 +485,12 @@ def test_iadd(self): # expression e = self._ctype_factory(1.0) expr = 0.0 - for v in [1.0,e]: + for v in [1.0, e]: expr += v self.assertEqual(e.expr, 1) self.assertEqual(expr(), 2) expr = 0.0 - for v in [e,1.0]: + for v in [e, 1.0]: expr += v self.assertEqual(e.expr, 1) self.assertEqual(expr(), 2) @@ -474,12 +501,12 @@ def test_isub(self): # expression e = self._ctype_factory(1.0) expr = 0.0 - for v in [1.0,e]: + for v in [1.0, e]: expr -= v self.assertEqual(e.expr, 1) self.assertEqual(expr(), -2) expr = 0.0 - for v in [e,1.0]: + for v in [e, 1.0]: expr -= v self.assertEqual(e.expr, 1) self.assertEqual(expr(), -2) @@ -490,12 +517,12 @@ def test_imul(self): # expression e = self._ctype_factory(3.0) expr = 1.0 - for v in [2.0,e]: + for v in [2.0, e]: expr *= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 6) expr = 1.0 - for v in [e,2.0]: + for v in [e, 2.0]: expr *= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 6) @@ -507,12 +534,12 @@ def test_idiv(self): # floating point division e = self._ctype_factory(3.0) expr = e - for v in [2.0,1.0]: + for v in [2.0, 1.0]: expr /= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 1.5) expr = e - for v in [1.0,2.0]: + for v in [1.0, 2.0]: expr /= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 1.5) @@ -520,12 +547,12 @@ def test_idiv(self): # Pyomo expressions e = self._ctype_factory(3) expr = e - for v in [2,1]: + for v in [2, 1]: expr /= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 1.5) expr = e - for v in [1,2]: + for v in [1, 2]: expr /= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 1.5) @@ -536,25 +563,25 @@ def test_ipow(self): # expression e = self._ctype_factory(3.0) expr = e - for v in [2.0,1.0]: + for v in [2.0, 1.0]: expr **= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 9) expr = e - for v in [1.0,2.0]: + for v in [1.0, 2.0]: expr **= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 9) -class Test_expression(_Test_expression_base, - unittest.TestCase): + +class Test_expression(_Test_expression_base, unittest.TestCase): _ctype_factory = expression def test_associativity(self): x = variable() y = variable() - pyomo.kernel.pprint(y + x*expression(expression(x*y))) - pyomo.kernel.pprint(y + expression(expression(x*y))*x) + pyomo.kernel.pprint(y + x * expression(expression(x * y))) + pyomo.kernel.pprint(y + expression(expression(x * y)) * x) def test_ctype(self): e = expression() @@ -614,18 +641,15 @@ def test_polynomial_degree(self): v.free() self.assertEqual(e.polynomial_degree(), None) -class Test_data_expression(_Test_expression_base, - unittest.TestCase): +class Test_data_expression(_Test_expression_base, unittest.TestCase): _ctype_factory = data_expression def test_associativity(self): x = parameter() y = parameter() - pyomo.kernel.pprint( - y + x*data_expression(data_expression(x*y))) - pyomo.kernel.pprint( - y + data_expression(data_expression(x*y))*x) + pyomo.kernel.pprint(y + x * data_expression(data_expression(x * y))) + pyomo.kernel.pprint(y + data_expression(data_expression(x * y)) * x) def test_ctype(self): e = data_expression() @@ -661,7 +685,7 @@ def test_is_fixed(self): self.assertEqual(e.is_fixed(), True) self.assertEqual(is_fixed(e), True) a = self._ctype_factory() - e.expr = (a*p)**2/(p + 5) + e.expr = (a * p) ** 2 / (p + 5) self.assertEqual(e.is_fixed(), True) self.assertEqual(is_fixed(e), True) a.expr = 2.0 @@ -686,7 +710,7 @@ def testis_potentially_variable(self): self.assertEqual(e.is_potentially_variable(), False) self.assertEqual(is_potentially_variable(e), False) a = self._ctype_factory() - e.expr = (a*p)**2/(p + 5) + e.expr = (a * p) ** 2 / (p + 5) self.assertEqual(e.is_potentially_variable(), False) self.assertEqual(is_potentially_variable(e), False) a.expr = 2.0 @@ -708,7 +732,7 @@ def test_polynomial_degree(self): e.expr = p**2 self.assertEqual(e.polynomial_degree(), 0) a = self._ctype_factory() - e.expr = (a*p)**2/(p + 5) + e.expr = (a * p) ** 2 / (p + 5) self.assertEqual(e.polynomial_degree(), 0) a.expr = 2.0 p.value = 5.0 @@ -719,20 +743,21 @@ def test_polynomial_degree(self): with self.assertRaises(ValueError): e.expr = v + 1 -class Test_expression_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_expression_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = expression_dict _ctype_factory = lambda self: expression() -class Test_expression_tuple(_TestActiveTupleContainerBase, - unittest.TestCase): + +class Test_expression_tuple(_TestActiveTupleContainerBase, unittest.TestCase): _container_type = expression_tuple _ctype_factory = lambda self: expression() -class Test_expression_list(_TestActiveListContainerBase, - unittest.TestCase): + +class Test_expression_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = expression_list _ctype_factory = lambda self: expression() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_kernel.py b/pyomo/core/tests/unit/kernel/test_kernel.py index dc38ed514fa..fbff295881a 100644 --- a/pyomo/core/tests/unit/kernel/test_kernel.py +++ b/pyomo/core/tests/unit/kernel/test_kernel.py @@ -18,12 +18,17 @@ class IJunk(IBlock): __slots__ = () + + class junk(pmo.block): _ctype = IJunk + + class junk_list(pmo.block_list): __slots__ = () _ctype = IJunk + _model = pmo.block() _model.v = pmo.variable() _model.V = pmo.variable_list() @@ -55,94 +60,139 @@ class junk_list(pmo.block_list): _model.K.append(model_clone.clone()) del model_clone -class Test_kernel(unittest.TestCase): +class Test_kernel(unittest.TestCase): def test_no_ctype_collisions(self): hash_set = set() hash_list = list() - for cls in [pmo.variable, - pmo.constraint, - pmo.objective, - pmo.expression, - pmo.parameter, - pmo.suffix, - pmo.sos, - pmo.block]: + for cls in [ + pmo.variable, + pmo.constraint, + pmo.objective, + pmo.expression, + pmo.parameter, + pmo.suffix, + pmo.sos, + pmo.block, + ]: ctype = cls._ctype hash_set.add(hash(ctype)) hash_list.append(hash(ctype)) - self.assertEqual(len(hash_set), - len(hash_list)) + self.assertEqual(len(hash_set), len(hash_list)) def test_component_data_objects_hack(self): model = _model.clone() self.assertEqual( [str(obj) for obj in model.component_data_objects()], - [str(obj) for obj in model.components()]) + [str(obj) for obj in model.components()], + ) self.assertEqual( [str(obj) for obj in model.component_data_objects(ctype=IVariable)], - [str(obj) for obj in model.components(ctype=IVariable)]) + [str(obj) for obj in model.components(ctype=IVariable)], + ) self.assertEqual( [str(obj) for obj in model.component_data_objects(ctype=IConstraint)], - [str(obj) for obj in model.components(ctype=IConstraint)]) + [str(obj) for obj in model.components(ctype=IConstraint)], + ) self.assertEqual( [str(obj) for obj in model.component_data_objects(ctype=IBlock)], - [str(obj) for obj in model.components(ctype=IBlock)]) + [str(obj) for obj in model.components(ctype=IBlock)], + ) self.assertEqual( [str(obj) for obj in model.component_data_objects(ctype=IJunk)], - [str(obj) for obj in model.components(ctype=IJunk)]) + [str(obj) for obj in model.components(ctype=IJunk)], + ) for item in pmo.preorder_traversal(model): item.deactivate() self.assertEqual( [str(obj) for obj in model.component_data_objects(active=True)], - [str(obj) for obj in model.components(active=True)]) + [str(obj) for obj in model.components(active=True)], + ) self.assertEqual( - [str(obj) for obj in model.component_data_objects(ctype=IVariable, active=True)], - [str(obj) for obj in model.components(ctype=IVariable, active=True)]) + [ + str(obj) + for obj in model.component_data_objects( + ctype=IVariable, active=True + ) + ], + [str(obj) for obj in model.components(ctype=IVariable, active=True)], + ) self.assertEqual( - [str(obj) for obj in model.component_data_objects(ctype=IConstraint, active=True)], - [str(obj) for obj in model.components(ctype=IConstraint, active=True)]) + [ + str(obj) + for obj in model.component_data_objects( + ctype=IConstraint, active=True + ) + ], + [str(obj) for obj in model.components(ctype=IConstraint, active=True)], + ) self.assertEqual( - [str(obj) for obj in model.component_data_objects(ctype=IBlock, active=True)], - [str(obj) for obj in model.components(ctype=IBlock, active=True)]) + [ + str(obj) + for obj in model.component_data_objects(ctype=IBlock, active=True) + ], + [str(obj) for obj in model.components(ctype=IBlock, active=True)], + ) self.assertEqual( - [str(obj) for obj in model.component_data_objects(ctype=IJunk, active=True)], - [str(obj) for obj in model.components(ctype=IJunk, active=True)]) + [ + str(obj) + for obj in model.component_data_objects(ctype=IJunk, active=True) + ], + [str(obj) for obj in model.components(ctype=IJunk, active=True)], + ) item.activate() def test_component_objects_hack(self): model = _model.clone() - objs = {key: [] for key in - [None, IVariable, IConstraint, IBlock, IJunk]} + objs = {key: [] for key in [None, IVariable, IConstraint, IBlock, IJunk]} for item in pmo.heterogeneous_containers(model): objs[None].extend(item.component_objects(descend_into=False)) self.assertEqual( [str(obj) for obj in item.component_objects(descend_into=False)], - [str(obj) for obj in item.children()]) - objs[IVariable].extend(item.component_objects(ctype=IVariable, - descend_into=False)) + [str(obj) for obj in item.children()], + ) + objs[IVariable].extend( + item.component_objects(ctype=IVariable, descend_into=False) + ) self.assertEqual( - [str(obj) for obj in item.component_objects(ctype=IVariable, - descend_into=False)], - [str(obj) for obj in item.children(ctype=IVariable)]) - objs[IConstraint].extend(item.component_objects(ctype=IConstraint, - descend_into=False)) + [ + str(obj) + for obj in item.component_objects( + ctype=IVariable, descend_into=False + ) + ], + [str(obj) for obj in item.children(ctype=IVariable)], + ) + objs[IConstraint].extend( + item.component_objects(ctype=IConstraint, descend_into=False) + ) self.assertEqual( - [str(obj) for obj in item.component_objects(ctype=IConstraint, - descend_into=False)], - [str(obj) for obj in item.children(ctype=IConstraint)]) - objs[IBlock].extend(item.component_objects(ctype=IBlock, - descend_into=False)) + [ + str(obj) + for obj in item.component_objects( + ctype=IConstraint, descend_into=False + ) + ], + [str(obj) for obj in item.children(ctype=IConstraint)], + ) + objs[IBlock].extend( + item.component_objects(ctype=IBlock, descend_into=False) + ) self.assertEqual( - [str(obj) for obj in item.component_objects(ctype=IBlock, - descend_into=False)], - [str(obj) for obj in item.children(ctype=IBlock)]) - objs[IJunk].extend(item.component_objects(ctype=IJunk, - descend_into=False)) + [ + str(obj) + for obj in item.component_objects(ctype=IBlock, descend_into=False) + ], + [str(obj) for obj in item.children(ctype=IBlock)], + ) + objs[IJunk].extend(item.component_objects(ctype=IJunk, descend_into=False)) self.assertEqual( - [str(obj) for obj in item.component_objects(ctype=IJunk, - descend_into=False)], - [str(obj) for obj in item.children(ctype=IJunk)]) + [ + str(obj) + for obj in item.component_objects(ctype=IJunk, descend_into=False) + ], + [str(obj) for obj in item.children(ctype=IJunk)], + ) all_ = [] for key in objs: if key is None: @@ -150,43 +200,52 @@ def test_component_objects_hack(self): names = [str(obj) for obj in objs[key]] self.assertEqual( sorted([str(obj) for obj in model.component_objects(ctype=key)]), - sorted(names)) + sorted(names), + ) all_.extend(names) self.assertEqual( - sorted([str(obj) for obj in model.component_objects()]), - sorted(all_)) - self.assertEqual( - sorted([str(obj) for obj in objs[None]]), - sorted(all_)) + sorted([str(obj) for obj in model.component_objects()]), sorted(all_) + ) + self.assertEqual(sorted([str(obj) for obj in objs[None]]), sorted(all_)) model.deactivate() self.assertEqual( - sorted([str(obj) for obj in model.component_objects()]), - sorted(all_)) + sorted([str(obj) for obj in model.component_objects()]), sorted(all_) + ) self.assertEqual( - [str(obj) for obj in model.component_objects(descend_into=False, - active=True)], - []) + [ + str(obj) + for obj in model.component_objects(descend_into=False, active=True) + ], + [], + ) self.assertEqual( - [str(obj) for obj in model.component_objects(descend_into=True, - active=True)], - []) + [ + str(obj) + for obj in model.component_objects(descend_into=True, active=True) + ], + [], + ) def test_block_data_objects_hack(self): model = _model.clone() model.deactivate() self.assertEqual( - [str(obj) for obj in model.block_data_objects(active=True)], - []) + [str(obj) for obj in model.block_data_objects(active=True)], [] + ) self.assertEqual( [str(obj) for obj in model.block_data_objects()], - [str(model)]+[str(obj) for obj in model.components(ctype=IBlock)]) + [str(model)] + [str(obj) for obj in model.components(ctype=IBlock)], + ) model.activate() self.assertEqual( [str(obj) for obj in model.block_data_objects(active=True)], - [str(model)]+[str(obj) for obj in model.components(ctype=IBlock)]) + [str(model)] + [str(obj) for obj in model.components(ctype=IBlock)], + ) self.assertEqual( [str(obj) for obj in model.block_data_objects()], - [str(model)]+[str(obj) for obj in model.components(ctype=IBlock)]) + [str(model)] + [str(obj) for obj in model.components(ctype=IBlock)], + ) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_list_container.py b/pyomo/core/tests/unit/kernel/test_list_container.py index 5668b565760..9e3ada739b2 100644 --- a/pyomo/core/tests/unit/kernel/test_list_container.py +++ b/pyomo/core/tests/unit/kernel/test_list_container.py @@ -16,14 +16,10 @@ import pyomo.common.unittest as unittest import pyomo.kernel as pmo from pyomo.common.log import LoggingIntercept -from pyomo.core.kernel.base import \ - (ICategorizedObject, - ICategorizedObjectContainer) -from pyomo.core.kernel.homogeneous_container import \ - IHomogeneousContainer +from pyomo.core.kernel.base import ICategorizedObject, ICategorizedObjectContainer +from pyomo.core.kernel.homogeneous_container import IHomogeneousContainer from pyomo.core.kernel.list_container import ListContainer -from pyomo.core.kernel.block import (block, - block_list) +from pyomo.core.kernel.block import block, block_list # # There are no fully implemented test suites in this @@ -38,11 +34,12 @@ # and weakref (bas _pickle_test_protocol = pickle.HIGHEST_PROTOCOL + class _bad_ctype(object): ctype = "_this_is_definitely_not_the_ctype_being_tested" -class _TestListContainerBase(object): +class _TestListContainerBase(object): # set by derived class _container_type = None _ctype_factory = None @@ -56,15 +53,18 @@ def test_overwrite_warning(self): assert out.getvalue() == "" with LoggingIntercept(out, 'pyomo.core'): c[0] = self._ctype_factory() - assert out.getvalue() == \ - ("Implicitly replacing the entry [0] " - "(type=%s) with a new object (type=%s). " - "This is usually indicative of a modeling " - "error. To avoid this warning, delete the " - "original object from the container before " - "assigning a new object.\n" - % (self._ctype_factory().__class__.__name__, - self._ctype_factory().__class__.__name__)) + assert out.getvalue() == ( + "Implicitly replacing the entry [0] " + "(type=%s) with a new object (type=%s). " + "This is usually indicative of a modeling " + "error. To avoid this warning, delete the " + "original object from the container before " + "assigning a new object.\n" + % ( + self._ctype_factory().__class__.__name__, + self._ctype_factory().__class__.__name__, + ) + ) def test_ctype(self): c = self._container_type() @@ -78,11 +78,9 @@ def test_init1(self): def test_init2(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) with self.assertRaises(TypeError): - d = self._container_type( - *tuple(self._ctype_factory() for i in index)) + d = self._container_type(*tuple(self._ctype_factory() for i in index)) def test_type(self): clist = self._container_type() @@ -101,8 +99,7 @@ def test_len1(self): def test_len2(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) self.assertEqual(len(c), len(index)) def test_append(self): @@ -113,7 +110,7 @@ def test_append(self): c_new = self._ctype_factory() c.append(c_new) self.assertEqual(id(c[-1]), id(c_new)) - self.assertEqual(len(c), i+1) + self.assertEqual(len(c), i + 1) def test_insert(self): c = self._container_type() @@ -123,7 +120,7 @@ def test_insert(self): c_new = self._ctype_factory() c.insert(0, c_new) self.assertEqual(id(c[0]), id(c_new)) - self.assertEqual(len(c), i+1) + self.assertEqual(len(c), i + 1) def test_setitem(self): c = self._container_type() @@ -140,8 +137,7 @@ def test_setitem(self): def test_wrong_type_init(self): index = range(5) with self.assertRaises(TypeError): - c = self._container_type( - _bad_ctype() for i in index) + c = self._container_type(_bad_ctype() for i in index) def test_wrong_type_append(self): c = self._container_type() @@ -205,13 +201,11 @@ def test_has_parent_setitem(self): def test_setitem_exists_overwrite(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) self.assertEqual(len(c), len(index)) for i in index: cdata = c[i] - self.assertEqual(id(cdata.parent), - id(c)) + self.assertEqual(id(cdata.parent), id(c)) c[i] = self._ctype_factory() self.assertEqual(len(c), len(index)) self.assertNotEqual(id(cdata), id(c[i])) @@ -219,21 +213,18 @@ def test_setitem_exists_overwrite(self): def test_delitem(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) self.assertEqual(len(c), len(index)) for i in index: cdata = c[0] - self.assertEqual(id(cdata.parent), - id(c)) + self.assertEqual(id(cdata.parent), id(c)) del c[0] - self.assertEqual(len(c), len(index)-(i+1)) + self.assertEqual(len(c), len(index) - (i + 1)) self.assertEqual(cdata.parent, None) def test_iter(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) self.assertEqual(len(c), len(index)) raw_list = c[:] self.assertEqual(type(raw_list), list) @@ -242,8 +233,7 @@ def test_iter(self): def test_reverse(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) raw_list = c[:] self.assertEqual(type(raw_list), list) for c1, c2 in zip(reversed(c), reversed(raw_list)): @@ -256,8 +246,7 @@ def test_reverse(self): def test_remove(self): model = block() index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) for i in index: cdata = c[0] self.assertEqual(cdata in c, True) @@ -266,8 +255,7 @@ def test_remove(self): def test_pop(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) for i in index: cdata = c[-1] self.assertEqual(cdata in c, True) @@ -277,49 +265,40 @@ def test_pop(self): def test_index(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) for i in index: cdata = c[i] self.assertEqual(c.index(cdata), i) self.assertEqual(c.index(cdata, start=i), i) with self.assertRaises(ValueError): - c.index(cdata, start=i+1) + c.index(cdata, start=i + 1) with self.assertRaises(ValueError): c.index(cdata, start=i, stop=i) with self.assertRaises(ValueError): c.index(cdata, stop=i) - self.assertEqual( - c.index(cdata, start=i, stop=i+1), i) + self.assertEqual(c.index(cdata, start=i, stop=i + 1), i) with self.assertRaises(ValueError): - c.index(cdata, start=i+1, stop=i+1) - self.assertEqual( - c.index(cdata, start=-len(index)+i), i) + c.index(cdata, start=i + 1, stop=i + 1) + self.assertEqual(c.index(cdata, start=-len(index) + i), i) if i == index[-1]: - self.assertEqual( - c.index(cdata, start=-len(index)+i+1), i) + self.assertEqual(c.index(cdata, start=-len(index) + i + 1), i) else: with self.assertRaises(ValueError): - self.assertEqual( - c.index(cdata, start=-len(index)+i+1), - i) + self.assertEqual(c.index(cdata, start=-len(index) + i + 1), i) if i == index[-1]: with self.assertRaises(ValueError): - self.assertEqual( - c.index(cdata, stop=-len(index)+i+1), i) + self.assertEqual(c.index(cdata, stop=-len(index) + i + 1), i) else: - self.assertEqual( - c.index(cdata, stop=-len(index)+i+1), i) + self.assertEqual(c.index(cdata, stop=-len(index) + i + 1), i) tmp = self._ctype_factory() with self.assertRaises(ValueError): c.index(tmp) with self.assertRaises(ValueError): - c.index(tmp, stop=len(c)+1) + c.index(tmp, stop=len(c) + 1) def test_extend(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) c_more_list = [self._ctype_factory() for i in index] self.assertEqual(len(c), len(index)) self.assertTrue(len(c_more_list) > 0) @@ -327,29 +306,26 @@ def test_extend(self): self.assertEqual(cdata.parent, None) c.extend(c_more_list) for cdata in c_more_list: - self.assertEqual(id(cdata.parent), - id(c)) + self.assertEqual(id(cdata.parent), id(c)) def test_count(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) for i in index: self.assertEqual(c.count(c[i]), 1) def test_pickle(self): index = range(5) - clist = self._container_type( - self._ctype_factory() for i in index) + clist = self._container_type(self._ctype_factory() for i in index) clist.append(self._container_type()) index = list(index) index = index + [len(index)] for i in index: self.assertTrue(clist[i].parent is clist) pickled_clist = pickle.loads( - pickle.dumps(clist, protocol=_pickle_test_protocol)) - self.assertTrue( - isinstance(pickled_clist, self._container_type)) + pickle.dumps(clist, protocol=_pickle_test_protocol) + ) + self.assertTrue(isinstance(pickled_clist, self._container_type)) self.assertTrue(pickled_clist.parent is None) self.assertEqual(len(pickled_clist), len(index)) self.assertNotEqual(id(pickled_clist), id(clist)) @@ -480,8 +456,7 @@ def test_name(self): for i, c in enumerate(children): self.assertTrue(c.parent is clist) self.assertEqual(c.local_name, "[%s]" % (i)) - self.assertEqual(c.name, - "[0].model.clist[%s]" % (i)) + self.assertEqual(c.name, "[0].model.clist[%s]" % (i)) m = block() m.blist = blist @@ -496,8 +471,7 @@ def test_name(self): for i, c in enumerate(children): self.assertTrue(c.parent is clist) self.assertEqual(c.local_name, "[%s]" % (i)) - self.assertEqual(c.name, - "blist[0].model.clist[%s]" % (i)) + self.assertEqual(c.name, "blist[0].model.clist[%s]" % (i)) self.assertEqual(c.name, names[c]) for c in clist.components(): self.assertNotEqual(c.name, None) @@ -515,8 +489,10 @@ def test_components(self): clist.append(clistflattened[-1]) clistflattened.append(self._ctype_factory()) clist.append(clistflattened[-1]) - self.assertEqual(list(id(_c) for _c in clist.components()), - list(id(_c) for _c in clistflattened)) + self.assertEqual( + list(id(_c) for _c in clist.components()), + list(id(_c) for _c in clistflattened), + ) csublist = self._container_type() self.assertEqual(list(csublist.components()), []) @@ -534,20 +510,26 @@ def test_components(self): clistflattened.append(csublistflattened[-1]) csublist.append(csublistflattened[-1]) - self.assertEqual(list(id(_c) for _c in csublist.components()), - list(id(_c) for _c in csublistflattened)) - self.assertEqual(len(set(id(_c) for _c in csublist.components())), - len(list(id(_c) for _c in csublist.components()))) - self.assertEqual(len(set(id(_c) for _c in csublist.components())), - 3) + self.assertEqual( + list(id(_c) for _c in csublist.components()), + list(id(_c) for _c in csublistflattened), + ) + self.assertEqual( + len(set(id(_c) for _c in csublist.components())), + len(list(id(_c) for _c in csublist.components())), + ) + self.assertEqual(len(set(id(_c) for _c in csublist.components())), 3) clist.append(csublist) - self.assertEqual(list(id(_c) for _c in clist.components()), - list(id(_c) for _c in clistflattened)) - self.assertEqual(len(set(id(_c) for _c in clist.components())), - len(list(id(_c) for _c in clist.components()))) - self.assertEqual(len(set(id(_c) for _c in clist.components())), - 5) + self.assertEqual( + list(id(_c) for _c in clist.components()), + list(id(_c) for _c in clistflattened), + ) + self.assertEqual( + len(set(id(_c) for _c in clist.components())), + len(list(id(_c) for _c in clist.components())), + ) + self.assertEqual(len(set(id(_c) for _c in clist.components())), 5) def test_preorder_traversal(self): traversal = [] @@ -564,14 +546,14 @@ def test_preorder_traversal(self): descend = lambda x: not x._is_heterogeneous_container - self.assertEqual([c.name for c in traversal], - [c.name for c in pmo.preorder_traversal( - clist, - descend=descend)]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in pmo.preorder_traversal( - clist, - descend=descend)]) + self.assertEqual( + [c.name for c in traversal], + [c.name for c in pmo.preorder_traversal(clist, descend=descend)], + ) + self.assertEqual( + [id(c) for c in traversal], + [id(c) for c in pmo.preorder_traversal(clist, descend=descend)], + ) return clist, traversal @@ -592,9 +574,9 @@ def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return False + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - descend=descend)) + order = list(pmo.preorder_traversal(clist, descend=descend)) self.assertEqual(len(order), 1) self.assertIs(order[0], clist) self.assertEqual(len(descend.seen), 1) @@ -604,41 +586,39 @@ def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - descend=descend)) - self.assertEqual([c.name for c in traversal], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(clist, descend=descend)) + self.assertEqual([c.name for c in traversal], [c.name for c in order]) + self.assertEqual([id(c) for c in traversal], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c._is_container], [id(c) for c in descend.seen] + ) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - descend=descend)) - self.assertEqual([c.name for c in traversal], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(clist, descend=descend)) + self.assertEqual([c.name for c in traversal], [c.name for c in order]) + self.assertEqual([id(c) for c in traversal], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c._is_container], [id(c) for c in descend.seen] + ) return clist, traversal -class _TestActiveListContainerBase(_TestListContainerBase): +class _TestActiveListContainerBase(_TestListContainerBase): def test_active_type(self): clist = self._container_type() self.assertTrue(isinstance(clist, ICategorizedObject)) @@ -652,8 +632,7 @@ def test_active_type(self): def test_active(self): index = list(range(4)) - clist = self._container_type(self._ctype_factory() - for i in index) + clist = self._container_type(self._ctype_factory() for i in index) with self.assertRaises(AttributeError): clist.active = False for c in clist: @@ -683,8 +662,9 @@ def test_active(self): for c in clist.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(clist.components())), len(clist)) - self.assertEqual(len(list(clist.components())), - len(list(clist.components(active=True)))) + self.assertEqual( + len(list(clist.components())), len(list(clist.components(active=True))) + ) m.deactivate(shallow=False) @@ -696,8 +676,9 @@ def test_active(self): self.assertEqual(clist.active, False) for c in clist: self.assertEqual(c.active, False) - self.assertNotEqual(len(list(clist.components())), - len(list(clist.components(active=None)))) + self.assertNotEqual( + len(list(clist.components())), len(list(clist.components(active=None))) + ) self.assertEqual(len(list(clist.components(active=True))), 0) test_c = clist[0] @@ -712,8 +693,9 @@ def test_active(self): self.assertEqual(clist.active, False) for c in clist: self.assertEqual(c.active, False) - self.assertNotEqual(len(list(clist.components())), - len(list(clist.components(active=None)))) + self.assertNotEqual( + len(list(clist.components())), len(list(clist.components(active=None))) + ) self.assertEqual(len(list(clist.components(active=True))), 0) clist.remove(test_c) @@ -749,8 +731,9 @@ def test_active(self): self.assertEqual(c.active, True) for c in clist.components(active=True): self.assertEqual(c.active, True) - self.assertNotEqual(len(list(clist.components())), - len(list(clist.components(active=None)))) + self.assertNotEqual( + len(list(clist.components())), len(list(clist.components(active=None))) + ) self.assertEqual(len(list(clist.components(active=True))), 1) m.activate(shallow=False) @@ -768,8 +751,9 @@ def test_active(self): for c in clist.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(clist.components())), len(clist)) - self.assertEqual(len(list(clist.components())), - len(list(clist.components(active=True)))) + self.assertEqual( + len(list(clist.components())), len(list(clist.components(active=True))) + ) m.deactivate(shallow=False) @@ -781,11 +765,12 @@ def test_active(self): self.assertEqual(clist.active, False) for c in clist: self.assertEqual(c.active, False) - self.assertNotEqual(len(list(clist.components())), - len(list(clist.components(active=None)))) + self.assertNotEqual( + len(list(clist.components())), len(list(clist.components(active=None))) + ) self.assertEqual(len(list(clist.components(active=True))), 0) - clist[len(clist)-1] = self._ctype_factory() + clist[len(clist) - 1] = self._ctype_factory() self.assertEqual(m.active, False) self.assertEqual(blist.active, False) @@ -801,12 +786,12 @@ def test_active(self): self.assertEqual(model.active, False) self.assertEqual(clist.active, True) for i, c in enumerate(clist): - if i == len(clist)-1: + if i == len(clist) - 1: self.assertEqual(c.active, True) else: self.assertEqual(c.active, False) for i, c in enumerate(clist.components(active=None)): - if i == len(clist)-1: + if i == len(clist) - 1: self.assertEqual(c.active, True) else: self.assertEqual(c.active, False) @@ -814,8 +799,9 @@ def test_active(self): self.assertEqual(c.active, True) for c in clist.components(active=True): self.assertEqual(c.active, True) - self.assertNotEqual(len(list(clist.components())), - len(list(clist.components(active=None)))) + self.assertNotEqual( + len(list(clist.components())), len(list(clist.components(active=None))) + ) self.assertEqual(len(list(clist.components(active=True))), 1) clist.activate(shallow=False) @@ -833,8 +819,9 @@ def test_active(self): for c in clist.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(clist.components())), len(clist)) - self.assertEqual(len(list(clist.components())), - len(list(clist.components(active=True)))) + self.assertEqual( + len(list(clist.components())), len(list(clist.components(active=True))) + ) clist.deactivate(shallow=False) @@ -846,8 +833,9 @@ def test_active(self): self.assertEqual(clist.active, False) for i, c in enumerate(clist): self.assertEqual(c.active, False) - self.assertNotEqual(len(list(clist.components())), - len(list(clist.components(active=None)))) + self.assertNotEqual( + len(list(clist.components())), len(list(clist.components(active=None))) + ) self.assertEqual(len(list(clist.components(active=True))), 0) clist[-1].activate() @@ -866,12 +854,12 @@ def test_active(self): self.assertEqual(model.active, False) self.assertEqual(clist.active, True) for i, c in enumerate(clist): - if i == len(clist)-1: + if i == len(clist) - 1: self.assertEqual(c.active, True) else: self.assertEqual(c.active, False) for i, c in enumerate(clist.components(active=None)): - if i == len(clist)-1: + if i == len(clist) - 1: self.assertEqual(c.active, True) else: self.assertEqual(c.active, False) @@ -879,8 +867,9 @@ def test_active(self): self.assertEqual(c.active, True) for c in clist.components(active=True): self.assertEqual(c.active, True) - self.assertNotEqual(len(list(clist.components())), - len(list(clist.components(active=None)))) + self.assertNotEqual( + len(list(clist.components())), len(list(clist.components(active=None))) + ) self.assertEqual(len(list(clist.components(active=True))), 1) clist.deactivate(shallow=False) @@ -899,169 +888,172 @@ def test_active(self): for c in clist.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(clist.components())), len(clist)) - self.assertEqual(len(list(clist.components())), - len(list(clist.components(active=True)))) + self.assertEqual( + len(list(clist.components())), len(list(clist.components(active=True))) + ) def test_preorder_traversal(self): - clist, traversal = \ - super(_TestActiveListContainerBase, self).\ - test_preorder_traversal() + clist, traversal = super( + _TestActiveListContainerBase, self + ).test_preorder_traversal() descend = lambda x: not x._is_heterogeneous_container clist[1].deactivate() - self.assertEqual([None,'[0]','[2]'], - [c.name for c in pmo.preorder_traversal( - clist, - active=True, - descend=descend)]) - self.assertEqual([id(clist),id(clist[0]),id(clist[2])], - [id(c) for c in pmo.preorder_traversal( - clist, - active=True, - descend=descend)]) + self.assertEqual( + [None, '[0]', '[2]'], + [ + c.name + for c in pmo.preorder_traversal(clist, active=True, descend=descend) + ], + ) + self.assertEqual( + [id(clist), id(clist[0]), id(clist[2])], + [ + id(c) + for c in pmo.preorder_traversal(clist, active=True, descend=descend) + ], + ) clist[1].deactivate(shallow=False) - self.assertEqual([c.name for c in traversal if c.active], - [c.name for c in pmo.preorder_traversal( - clist, - active=True, - descend=descend)]) - self.assertEqual([id(c) for c in traversal if c.active], - [id(c) for c in pmo.preorder_traversal( - clist, - active=True, - descend=descend)]) + self.assertEqual( + [c.name for c in traversal if c.active], + [ + c.name + for c in pmo.preorder_traversal(clist, active=True, descend=descend) + ], + ) + self.assertEqual( + [id(c) for c in traversal if c.active], + [ + id(c) + for c in pmo.preorder_traversal(clist, active=True, descend=descend) + ], + ) clist.deactivate() - self.assertEqual(len(list(pmo.preorder_traversal(clist, - active=True))), - 0) - self.assertEqual(len(list(pmo.generate_names(clist, - active=True))), - 0) + self.assertEqual(len(list(pmo.preorder_traversal(clist, active=True))), 0) + self.assertEqual(len(list(pmo.generate_names(clist, active=True))), 0) def test_preorder_traversal_descend_check(self): - clist, traversal = \ - super(_TestActiveListContainerBase, self).\ - test_preorder_traversal_descend_check() + clist, traversal = super( + _TestActiveListContainerBase, self + ).test_preorder_traversal_descend_check() clist[1].deactivate() + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - active=True, - descend=descend)) - self.assertEqual([None,'[0]','[2]'], - [c.name for c in order]) - self.assertEqual([id(clist),id(clist[0]),id(clist[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(clist, active=True, descend=descend)) + self.assertEqual([None, '[0]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(clist), id(clist[0]), id(clist[2])], [id(c) for c in order] + ) if clist.ctype._is_heterogeneous_container: - self.assertEqual([None,'[0]','[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(clist),id(clist[0]),id(clist[2])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[0]', '[2]'], [c.name for c in descend.seen]) + self.assertEqual( + [id(clist), id(clist[0]), id(clist[2])], [id(c) for c in descend.seen] + ) else: - self.assertEqual([None], - [c.name for c in descend.seen]) - self.assertEqual([id(clist)], - [id(c) for c in descend.seen]) + self.assertEqual([None], [c.name for c in descend.seen]) + self.assertEqual([id(clist)], [id(c) for c in descend.seen]) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active and (not x._is_heterogeneous_container) + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - active=None, - descend=descend)) - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in order]) - self.assertEqual([id(clist),id(clist[0]),id(clist[1]),id(clist[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(clist, active=None, descend=descend)) + self.assertEqual([None, '[0]', '[1]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(clist), id(clist[0]), id(clist[1]), id(clist[2])], + [id(c) for c in order], + ) if clist.ctype._is_heterogeneous_container: - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(clist),id(clist[0]),id(clist[1]),id(clist[2])], - [id(c) for c in descend.seen]) + self.assertEqual( + [None, '[0]', '[1]', '[2]'], [c.name for c in descend.seen] + ) + self.assertEqual( + [id(clist), id(clist[0]), id(clist[1]), id(clist[2])], + [id(c) for c in descend.seen], + ) else: - self.assertEqual([None,'[1]'], - [c.name for c in descend.seen]) - self.assertEqual([id(clist),id(clist[1])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[1]'], [c.name for c in descend.seen]) + self.assertEqual([id(clist), id(clist[1])], [id(c) for c in descend.seen]) clist[1].deactivate(shallow=False) + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return not x._is_heterogeneous_container + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - active=True, - descend=descend)) - self.assertEqual([c.name for c in traversal if c.active], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal if c.active], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c.active and \ - c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c.active and \ - c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(clist, active=True, descend=descend)) + self.assertEqual( + [c.name for c in traversal if c.active], [c.name for c in order] + ) + self.assertEqual([id(c) for c in traversal if c.active], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c.active and c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c.active and c._is_container], + [id(c) for c in descend.seen], + ) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active and (not x._is_heterogeneous_container) + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - active=None, - descend=descend)) - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in order]) - self.assertEqual([id(clist),id(clist[0]),id(clist[1]),id(clist[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(clist, active=None, descend=descend)) + self.assertEqual([None, '[0]', '[1]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(clist), id(clist[0]), id(clist[1]), id(clist[2])], + [id(c) for c in order], + ) if clist.ctype._is_heterogeneous_container: - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(clist),id(clist[0]),id(clist[1]),id(clist[2])], - [id(c) for c in descend.seen]) + self.assertEqual( + [None, '[0]', '[1]', '[2]'], [c.name for c in descend.seen] + ) + self.assertEqual( + [id(clist), id(clist[0]), id(clist[1]), id(clist[2])], + [id(c) for c in descend.seen], + ) else: - self.assertEqual([None,'[1]'], - [c.name for c in descend.seen]) - self.assertEqual([id(clist),id(clist[1])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[1]'], [c.name for c in descend.seen]) + self.assertEqual([id(clist), id(clist[1])], [id(c) for c in descend.seen]) clist.deactivate() + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - active=True, - descend=descend)) + order = list(pmo.preorder_traversal(clist, active=True, descend=descend)) self.assertEqual(len(descend.seen), 0) - self.assertEqual(len(list(pmo.generate_names(clist, - active=True))), - 0) + self.assertEqual(len(list(pmo.generate_names(clist, active=True))), 0) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active + descend.seen = [] - order = list(pmo.preorder_traversal(clist, - active=None, - descend=descend)) + order = list(pmo.preorder_traversal(clist, active=None, descend=descend)) self.assertEqual(len(descend.seen), 1) self.assertIs(descend.seen[0], clist) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_matrix_constraint.py b/pyomo/core/tests/unit/kernel/test_matrix_constraint.py index 41fb7a23988..fcb29c9f09c 100644 --- a/pyomo/core/tests/unit/kernel/test_matrix_constraint.py +++ b/pyomo/core/tests/unit/kernel/test_matrix_constraint.py @@ -13,40 +13,37 @@ import pyomo.common.unittest as unittest import pyomo.kernel as pmo -from pyomo.core.kernel.base import \ - (ICategorizedObject, - ICategorizedObjectContainer) -from pyomo.core.kernel.homogeneous_container import \ - IHomogeneousContainer +from pyomo.core.kernel.base import ICategorizedObject, ICategorizedObjectContainer +from pyomo.core.kernel.homogeneous_container import IHomogeneousContainer from pyomo.core.kernel.tuple_container import TupleContainer -from pyomo.core.kernel.constraint import (IConstraint, - constraint, - constraint_dict, - constraint_tuple, - constraint_list) -from pyomo.core.kernel.matrix_constraint import \ - (matrix_constraint, - _MatrixConstraintData) -from pyomo.core.kernel.variable import (variable, - variable_list) +from pyomo.core.kernel.constraint import ( + IConstraint, + constraint, + constraint_dict, + constraint_tuple, + constraint_list, +) +from pyomo.core.kernel.matrix_constraint import matrix_constraint, _MatrixConstraintData +from pyomo.core.kernel.variable import variable, variable_list from pyomo.core.kernel.parameter import parameter from pyomo.core.kernel.expression import expression -from pyomo.core.kernel.block import (block, - block_list) +from pyomo.core.kernel.block import block, block_list try: import numpy + has_numpy = True except: has_numpy = False try: import scipy + has_scipy = True _scipy_ver = tuple(int(_) for _ in scipy.version.version.split('.')[:2]) except: has_scipy = False - _scipy_ver = (0,0) + _scipy_ver = (0, 0) def _create_variable_list(size, **kwds): @@ -57,21 +54,16 @@ def _create_variable_list(size, **kwds): return vlist -@unittest.skipUnless(has_numpy and has_scipy, - "NumPy or SciPy is not available") +@unittest.skipUnless(has_numpy and has_scipy, "NumPy or SciPy is not available") class Test_matrix_constraint(unittest.TestCase): - def test_pprint(self): # Not really testing what the output is, just that # an error does not occur. The pprint functionality # is still in the early stages. - m,n = 3,2 + m, n = 3, 2 vlist = _create_variable_list(2) - A = numpy.random.rand(m,n) - ctuple = matrix_constraint(A, - lb=1, - ub=2, - x=vlist) + A = numpy.random.rand(m, n) + ctuple = matrix_constraint(A, lb=1, ub=2, x=vlist) pmo.pprint(ctuple) b = block() b.c = ctuple @@ -84,7 +76,7 @@ def test_pprint(self): pmo.pprint(m) def test_ctype(self): - ctuple = matrix_constraint(numpy.random.rand(3,3)) + ctuple = matrix_constraint(numpy.random.rand(3, 3)) self.assertIs(ctuple.ctype, IConstraint) self.assertIs(type(ctuple), matrix_constraint) self.assertIs(type(ctuple)._ctype, IConstraint) @@ -93,15 +85,12 @@ def test_ctype(self): def test_pickle(self): vlist = _create_variable_list(3) - ctuple = matrix_constraint( - numpy.array([[0,0,0],[0,0,0]]), - x=vlist) + ctuple = matrix_constraint(numpy.array([[0, 0, 0], [0, 0, 0]]), x=vlist) self.assertTrue((ctuple.lb == -numpy.inf).all()) self.assertTrue((ctuple.ub == numpy.inf).all()) self.assertTrue((ctuple.equality == False).all()) self.assertEqual(ctuple.parent, None) - ctuple_up = pickle.loads( - pickle.dumps(ctuple)) + ctuple_up = pickle.loads(pickle.dumps(ctuple)) self.assertTrue((ctuple_up.lb == -numpy.inf).all()) self.assertTrue((ctuple_up.ub == numpy.inf).all()) self.assertTrue((ctuple_up.equality == False).all()) @@ -109,8 +98,7 @@ def test_pickle(self): b = block() b.ctuple = ctuple self.assertIs(ctuple.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) ctuple_up = bup.ctuple self.assertTrue((ctuple_up.lb == -numpy.inf).all()) self.assertTrue((ctuple_up.ub == numpy.inf).all()) @@ -119,8 +107,7 @@ def test_pickle(self): def test_init(self): vlist = _create_variable_list(3, value=1.0) - ctuple = matrix_constraint(numpy.zeros((3,3)), - x=vlist) + ctuple = matrix_constraint(numpy.zeros((3, 3)), x=vlist) self.assertEqual(len(ctuple), 3) self.assertTrue(ctuple.parent is None) self.assertEqual(ctuple.ctype, IConstraint) @@ -139,11 +126,8 @@ def test_init(self): self.assertEqual(c.has_ub(), False) vlist = _create_variable_list(3, value=3) - A = numpy.ones((2,3)) - ctuple = matrix_constraint(A, - lb=0, - ub=2, - x=vlist) + A = numpy.ones((2, 3)) + ctuple = matrix_constraint(A, lb=0, ub=2, x=vlist) self.assertEqual(len(ctuple), 2) self.assertTrue((ctuple.lb == 0).all()) self.assertTrue((ctuple.ub == 2).all()) @@ -155,10 +139,7 @@ def test_init(self): self.assertEqual(c(), 9) self.assertEqual(c.ub, 2) - - ctuple = matrix_constraint(A, - rhs=1, - x=vlist) + ctuple = matrix_constraint(A, rhs=1, x=vlist) self.assertEqual(len(ctuple), 2) self.assertTrue((ctuple.lb == 1).all()) self.assertTrue((ctuple.ub == 1).all()) @@ -174,19 +155,13 @@ def test_init(self): # can't use both lb and rhs with self.assertRaises(ValueError): - matrix_constraint(A, - lb=0, - rhs=0, - x=vlist) + matrix_constraint(A, lb=0, rhs=0, x=vlist) # can't use both ub and rhs with self.assertRaises(ValueError): - matrix_constraint(A, - ub=0, - rhs=0, - x=vlist) + matrix_constraint(A, ub=0, rhs=0, x=vlist) def test_type(self): - A = numpy.ones((2,3)) + A = numpy.ones((2, 3)) ctuple = matrix_constraint(A) self.assertTrue(isinstance(ctuple, ICategorizedObject)) self.assertTrue(isinstance(ctuple, ICategorizedObjectContainer)) @@ -199,7 +174,7 @@ def test_type(self): self.assertTrue(isinstance(ctuple[0], _MatrixConstraintData)) def test_active(self): - A = numpy.ones((2,2)) + A = numpy.ones((2, 2)) ctuple = matrix_constraint(A) self.assertEqual(ctuple.active, True) for c in ctuple: @@ -246,15 +221,16 @@ def test_active(self): self.assertEqual(b.active, False) def test_index(self): - A = numpy.ones((4,5)) + A = numpy.ones((4, 5)) ctuple = matrix_constraint(A) for i, c in enumerate(ctuple): self.assertEqual(c.index, i) - @unittest.skipIf(_scipy_ver < (1,1), - "csr_matrix.reshape only available in scipy >= 1.1") + @unittest.skipIf( + _scipy_ver < (1, 1), "csr_matrix.reshape only available in scipy >= 1.1" + ) def test_A(self): - A = numpy.ones((4,5)) + A = numpy.ones((4, 5)) # sparse c = matrix_constraint(A) @@ -268,9 +244,9 @@ def test_A(self): with self.assertRaises(ValueError): c.A.indptr[0] = 2 cA = c.A - cA.shape = (5,4) + cA.shape = (5, 4) # the shape of c.A should not be changed - self.assertEqual(c.A.shape, (4,5)) + self.assertEqual(c.A.shape, (4, 5)) # dense c = matrix_constraint(A, sparse=False) @@ -278,14 +254,14 @@ def test_A(self): self.assertTrue((c.A == A).all()) self.assertEqual(c.sparse, False) with self.assertRaises(ValueError): - c.A[0,0] = 2 + c.A[0, 0] = 2 cA = c.A - cA.shape = (5,4) + cA.shape = (5, 4) # the shape of c.A should not be changed - self.assertEqual(c.A.shape, (4,5)) + self.assertEqual(c.A.shape, (4, 5)) def test_x(self): - A = numpy.ones((4,5)) + A = numpy.ones((4, 5)) ctuple = matrix_constraint(A) self.assertEqual(ctuple.x, None) for c in ctuple: @@ -314,22 +290,22 @@ def test_x(self): ctuple.x = vlist def test_bad_shape(self): - A = numpy.array([[1,2,3],[1,2,3]]) + A = numpy.array([[1, 2, 3], [1, 2, 3]]) matrix_constraint(A) - A = scipy.sparse.csr_matrix(numpy.array([[1,2,3],[1,2,3]])) + A = scipy.sparse.csr_matrix(numpy.array([[1, 2, 3], [1, 2, 3]])) matrix_constraint(A) - A = numpy.array([1,2,3]) + A = numpy.array([1, 2, 3]) with self.assertRaises(ValueError): matrix_constraint(A) - A = numpy.ones((2,2,2)) + A = numpy.ones((2, 2, 2)) with self.assertRaises(ValueError): matrix_constraint(A) - A = [1,2,3] + A = [1, 2, 3] with self.assertRaises(AttributeError): matrix_constraint(A) def test_equality(self): - A = numpy.ones((5,4)) + A = numpy.ones((5, 4)) ctuple = matrix_constraint(A, rhs=1) self.assertTrue((ctuple.lb == 1).all()) self.assertTrue((ctuple.ub == 1).all()) @@ -437,9 +413,8 @@ def test_equality(self): self.assertEqual(c.ub, 4) self.assertEqual(c.equality, False) - def test_nondata_bounds(self): - A = numpy.ones((5,4)) + A = numpy.ones((5, 4)) ctuple = matrix_constraint(A, rhs=1) eL = expression() @@ -493,7 +468,6 @@ def test_nondata_bounds(self): self.assertTrue((ctuple.rhs == 1).all()) self.assertTrue((ctuple.equality == True).all()) - ctuple.equality = False self.assertTrue((ctuple.lb == 1).all()) self.assertTrue((ctuple.ub == 1).all()) @@ -586,7 +560,7 @@ def test_nondata_bounds(self): self.assertTrue((ctuple.equality == False).all()) def test_data_bounds(self): - A = numpy.ones((5,4)) + A = numpy.ones((5, 4)) ctuple = matrix_constraint(A) self.assertTrue((ctuple.lb == -numpy.inf).all()) self.assertTrue((ctuple.ub == numpy.inf).all()) @@ -761,14 +735,14 @@ def test_call(self): vlist[0].value = 1 vlist[1].value = 0 vlist[2].value = 3 - A = numpy.ones((3,3)) + A = numpy.ones((3, 3)) ctuple = matrix_constraint(A, x=vlist) self.assertTrue((ctuple() == 4).all()) self.assertEqual(ctuple[0](), 4) self.assertEqual(ctuple[1](), 4) self.assertEqual(ctuple[2](), 4) - A[:,0] = 0 - A[:,2] = 2 + A[:, 0] = 0 + A[:, 2] = 2 ctuple = matrix_constraint(A, x=vlist) vlist[2].value = 4 self.assertTrue((ctuple() == 8).all()) @@ -776,13 +750,10 @@ def test_call(self): self.assertEqual(ctuple[1](), 8) self.assertEqual(ctuple[2](), 8) - A = numpy.random.rand(4,3) + A = numpy.random.rand(4, 3) ctuple = matrix_constraint(A, x=vlist) vlist[1].value = 2 - cvals = numpy.array([ctuple[0](), - ctuple[1](), - ctuple[2](), - ctuple[3]()]) + cvals = numpy.array([ctuple[0](), ctuple[1](), ctuple[2](), ctuple[3]()]) self.assertTrue((ctuple() == cvals).all()) vlist[1].value = None @@ -818,14 +789,14 @@ def test_slack(self): vlist[0].value = 1 vlist[1].value = 0 vlist[2].value = 3 - A = numpy.ones((3,3)) + A = numpy.ones((3, 3)) ctuple = matrix_constraint(A, x=vlist) self.assertTrue((ctuple() == 4).all()) self.assertEqual(ctuple[0](), 4) self.assertEqual(ctuple[1](), 4) self.assertEqual(ctuple[2](), 4) - A[:,0] = 0 - A[:,2] = 2 + A[:, 0] = 0 + A[:, 2] = 2 ctuple = matrix_constraint(A, x=vlist) vlist[2].value = 4 self.assertTrue((ctuple() == 8).all()) @@ -833,13 +804,10 @@ def test_slack(self): self.assertEqual(ctuple[1](), 8) self.assertEqual(ctuple[2](), 8) - A = numpy.random.rand(4,3) + A = numpy.random.rand(4, 3) ctuple = matrix_constraint(A, x=vlist) vlist[1].value = 2 - cvals = numpy.array([ctuple[0](), - ctuple[1](), - ctuple[2](), - ctuple[3]()]) + cvals = numpy.array([ctuple[0](), ctuple[1](), ctuple[2](), ctuple[3]()]) self.assertTrue((ctuple() == cvals).all()) def test_slack_methods(self): @@ -848,8 +816,7 @@ def test_slack_methods(self): U = 5 A = numpy.array([[1]]) - cE = matrix_constraint(A, x=[x], - rhs=L) + cE = matrix_constraint(A, x=[x], rhs=L) x.value = 4 self.assertEqual(cE[0].body(), 4) self.assertEqual(cE[0].slack, -3) @@ -887,8 +854,7 @@ def test_slack_methods(self): self.assertEqual(cE.lslack, None) self.assertEqual(cE.uslack, None) - cE = matrix_constraint(A, x=[x], - rhs=U) + cE = matrix_constraint(A, x=[x], rhs=U) x.value = 4 self.assertEqual(cE[0].body(), 4) self.assertEqual(cE[0].slack, -1) @@ -914,8 +880,7 @@ def test_slack_methods(self): self.assertEqual(cE.lslack[0], -5) self.assertEqual(cE.uslack[0], 5) - cL = matrix_constraint(A, x=[x], - lb=L) + cL = matrix_constraint(A, x=[x], lb=L) x.value = 4 self.assertEqual(cL[0].body(), 4) self.assertEqual(cL[0].slack, 3) @@ -941,8 +906,7 @@ def test_slack_methods(self): self.assertEqual(cL.lslack[0], -1) self.assertEqual(cL.uslack[0], float('inf')) - cL = matrix_constraint(A, x=[x], - lb=float('-inf')) + cL = matrix_constraint(A, x=[x], lb=float('-inf')) x.value = 4 self.assertEqual(cL[0].body(), 4) self.assertEqual(cL[0].slack, float('inf')) @@ -968,8 +932,7 @@ def test_slack_methods(self): self.assertEqual(cL.lslack[0], float('inf')) self.assertEqual(cL.uslack[0], float('inf')) - cU = matrix_constraint(A, x=[x], - ub=U) + cU = matrix_constraint(A, x=[x], ub=U) x.value = 4 self.assertEqual(cU[0].body(), 4) self.assertEqual(cU[0].slack, 1) @@ -995,8 +958,7 @@ def test_slack_methods(self): self.assertEqual(cU.lslack[0], float('inf')) self.assertEqual(cU.uslack[0], 5) - cU = matrix_constraint(A, x=[x], - ub=float('inf')) + cU = matrix_constraint(A, x=[x], ub=float('inf')) x.value = 4 self.assertEqual(cU[0].body(), 4) self.assertEqual(cU[0].slack, float('inf')) @@ -1022,8 +984,7 @@ def test_slack_methods(self): self.assertEqual(cU.lslack[0], float('inf')) self.assertEqual(cU.uslack[0], float('inf')) - cR = matrix_constraint(A, x=[x], - lb=L, ub=U) + cR = matrix_constraint(A, x=[x], lb=L, ub=U) x.value = 4 self.assertEqual(cR[0].body(), 4) self.assertEqual(cR[0].slack, 1) @@ -1075,9 +1036,7 @@ def test_slack_methods(self): self.assertEqual(cR.lslack[0], float('inf')) self.assertEqual(cR.uslack[0], float('inf')) - cR = matrix_constraint(A, x=[x], - lb=float('-inf'), - ub=float('inf')) + cR = matrix_constraint(A, x=[x], lb=float('-inf'), ub=float('inf')) x.value = 4 self.assertEqual(cR[0].body(), 4) self.assertEqual(cR[0].slack, float('inf')) @@ -1111,7 +1070,7 @@ def test_canonical_form_sparse(self): for c in ctuple: self.assertEqual(c._linear_canonical_form, True) terms = list(ctuple[0].terms) - vs,cs = zip(*terms) + vs, cs = zip(*terms) self.assertEqual(len(terms), 1) self.assertIs(vs[0], vlist[1]) self.assertEqual(cs[0], 2) @@ -1139,13 +1098,12 @@ def test_canonical_form_sparse(self): def test_canonical_form_dense(self): A = numpy.array([[0, 2]]) vlist = _create_variable_list(2) - ctuple = matrix_constraint(A, x=vlist, - sparse=False) + ctuple = matrix_constraint(A, x=vlist, sparse=False) self.assertEqual(ctuple.sparse, False) for c in ctuple: self.assertEqual(c._linear_canonical_form, True) terms = list(ctuple[0].terms) - vs,cs = zip(*terms) + vs, cs = zip(*terms) self.assertEqual(len(terms), 2) self.assertIs(vs[0], vlist[0]) self.assertIs(vs[1], vlist[1]) @@ -1174,7 +1132,7 @@ def test_canonical_form_dense(self): self.assertEqual(repn.constant(), 4) def test_preorder_traversal(self): - A = numpy.ones((3,3)) + A = numpy.ones((3, 3)) m = block() m.c = matrix_constraint(A) @@ -1200,30 +1158,30 @@ def no_mc_descend(x): if isinstance(x, matrix_constraint): return False return True + cnt = 0 - for obj in pmo.preorder_traversal(m, - ctype=IConstraint, - descend=no_mc_descend): + for obj in pmo.preorder_traversal(m, ctype=IConstraint, descend=no_mc_descend): self.assertTrue(type(obj.parent) is not matrix_constraint) - self.assertTrue((obj.ctype is block._ctype) or \ - (obj.ctype is constraint._ctype)) + self.assertTrue( + (obj.ctype is block._ctype) or (obj.ctype is constraint._ctype) + ) cnt += 1 self.assertEqual(cnt, 11) cnt = 0 mc_child_cnt = 0 for obj in pmo.preorder_traversal(m, ctype=IConstraint): - self.assertTrue((obj.ctype is block._ctype) or \ - (obj.ctype is constraint._ctype)) + self.assertTrue( + (obj.ctype is block._ctype) or (obj.ctype is constraint._ctype) + ) if type(obj.parent) is matrix_constraint: mc_child_cnt += 1 cnt += 1 self.assertEqual(cnt, 23) self.assertEqual(mc_child_cnt, 12) - self.assertEqual( - len(list(m.components(ctype=IConstraint))), - 13) + self.assertEqual(len(list(m.components(ctype=IConstraint))), 13) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_objective.py b/pyomo/core/tests/unit/kernel/test_objective.py index 3495da781aa..f60ff9bdb49 100644 --- a/pyomo/core/tests/unit/kernel/test_objective.py +++ b/pyomo/core/tests/unit/kernel/test_objective.py @@ -14,25 +14,30 @@ import pyomo.common.unittest as unittest from pyomo.core.expr.numvalue import NumericValue from pyomo.kernel import pprint -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_tuple_container import \ - _TestActiveTupleContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_tuple_container import ( + _TestActiveTupleContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) from pyomo.core.kernel.base import ICategorizedObject -from pyomo.core.kernel.objective import (IObjective, - objective, - objective_dict, - objective_tuple, - objective_list, - minimize, - maximize) +from pyomo.core.kernel.objective import ( + IObjective, + objective, + objective_dict, + objective_tuple, + objective_list, + minimize, + maximize, +) from pyomo.core.kernel.variable import variable from pyomo.core.kernel.block import block -class Test_objective(unittest.TestCase): +class Test_objective(unittest.TestCase): def test_pprint(self): # Not really testing what the output is, just that # an error does not occur. The pprint functionality @@ -57,21 +62,18 @@ def test_ctype(self): self.assertIs(type(o)._ctype, IObjective) def test_pickle(self): - o = objective(sense=maximize, - expr=1.0) + o = objective(sense=maximize, expr=1.0) self.assertEqual(o.sense, maximize) self.assertEqual(o.expr, 1.0) self.assertEqual(o.parent, None) - oup = pickle.loads( - pickle.dumps(o)) + oup = pickle.loads(pickle.dumps(o)) self.assertEqual(oup.sense, maximize) self.assertEqual(oup.expr, 1.0) self.assertEqual(oup.parent, None) b = block() b.o = o self.assertIs(o.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) oup = bup.o self.assertEqual(oup.sense, maximize) self.assertEqual(oup.expr, 1.0) @@ -129,20 +131,21 @@ def test_active(self): self.assertEqual(o.active, False) self.assertEqual(b.active, False) -class Test_objective_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_objective_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = objective_dict _ctype_factory = lambda self: objective() -class Test_objective_tuple(_TestActiveTupleContainerBase, - unittest.TestCase): + +class Test_objective_tuple(_TestActiveTupleContainerBase, unittest.TestCase): _container_type = objective_tuple _ctype_factory = lambda self: objective() -class Test_objective_list(_TestActiveListContainerBase, - unittest.TestCase): + +class Test_objective_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = objective_list _ctype_factory = lambda self: objective() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_parameter.py b/pyomo/core/tests/unit/kernel/test_parameter.py index 226e2a115a7..04dc08f095f 100644 --- a/pyomo/core/tests/unit/kernel/test_parameter.py +++ b/pyomo/core/tests/unit/kernel/test_parameter.py @@ -14,29 +14,36 @@ import pyomo.common.unittest as unittest from pyomo.common.dependencies import dill, dill_available as has_dill -from pyomo.core.expr.numvalue import (NumericValue, - is_fixed, - is_constant, - is_potentially_variable) +from pyomo.core.expr.numvalue import ( + NumericValue, + is_fixed, + is_constant, + is_potentially_variable, +) from pyomo.kernel import pprint -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_tuple_container import \ - _TestActiveTupleContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_tuple_container import ( + _TestActiveTupleContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) from pyomo.core.kernel.base import ICategorizedObject -from pyomo.core.kernel.parameter import (IParameter, - parameter, - functional_value, - parameter_dict, - parameter_tuple, - parameter_list) +from pyomo.core.kernel.parameter import ( + IParameter, + parameter, + functional_value, + parameter_dict, + parameter_tuple, + parameter_list, +) from pyomo.core.kernel.variable import variable from pyomo.core.kernel.block import block -class Test_parameter(unittest.TestCase): +class Test_parameter(unittest.TestCase): def test_pprint(self): # Not really testing what the output is, just that # an error does not occur. The pprint functionality @@ -63,15 +70,13 @@ def test_pickle(self): p = parameter(value=1.0) self.assertEqual(p.value, 1.0) self.assertIs(p.parent, None) - pup = pickle.loads( - pickle.dumps(p)) + pup = pickle.loads(pickle.dumps(p)) self.assertEqual(pup.value, 1.0) self.assertIs(pup.parent, None) b = block() b.p = p self.assertIs(p.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) pup = bup.p self.assertEqual(pup.value, 1.0) self.assertIs(pup.parent, bup) @@ -139,8 +144,8 @@ def test_is_parameter_type(self): # to do with mutability... self.assertEqual(p.is_parameter_type(), False) -class Test_functional_value(unittest.TestCase): +class Test_functional_value(unittest.TestCase): def test_pprint(self): # Not really testing what the output is, just that # an error does not occur. The pprint functionality @@ -167,27 +172,23 @@ def test_pickle(self): f = functional_value() self.assertIs(f.fn, None) self.assertIs(f.parent, None) - fup = pickle.loads( - pickle.dumps(f)) + fup = pickle.loads(pickle.dumps(f)) self.assertIs(fup.fn, None) self.assertIs(fup.parent, None) b = block() b.f = f self.assertIs(f.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) fup = bup.f self.assertIs(fup.fn, None) self.assertIs(fup.parent, bup) - @unittest.skipIf(not has_dill, - "The dill module is not available") + @unittest.skipIf(not has_dill, "The dill module is not available") def test_dill(self): p = parameter(1) f = functional_value(lambda: p()) self.assertEqual(f(), 1) - fup = dill.loads( - dill.dumps(f)) + fup = dill.loads(dill.dumps(f)) p.value = 2 self.assertEqual(f(), 2) self.assertEqual(fup(), 1) @@ -195,8 +196,7 @@ def test_dill(self): b.p = p b.f = f self.assertEqual(b.f(), 2) - bup = dill.loads( - dill.dumps(b)) + bup = dill.loads(dill.dumps(b)) fup = bup.f b.p.value = 4 self.assertEqual(b.f(), 4) @@ -228,8 +228,10 @@ def test_call(self): f(exception=True) with self.assertRaises(TypeError): f() + def value_error(): raise ValueError() + f.fn = value_error self.assertIsNot(f.fn, None) self.assertEqual(f(exception=False), None) @@ -244,7 +246,7 @@ def test_init(self): self.assertEqual(f.ctype, IParameter) self.assertEqual(f.fn, None) self.assertEqual(f(), None) - x = [1,2] + x = [1, 2] f.fn = lambda: max(x) self.assertEqual(f(), 2) x[0] = 3 @@ -304,20 +306,20 @@ def test_is_parameter_type(self): self.assertEqual(f.is_parameter_type(), False) -class Test_parameter_dict(_TestActiveDictContainerBase, - unittest.TestCase): +class Test_parameter_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = parameter_dict _ctype_factory = lambda self: parameter() -class Test_parameter_tuple(_TestActiveTupleContainerBase, - unittest.TestCase): + +class Test_parameter_tuple(_TestActiveTupleContainerBase, unittest.TestCase): _container_type = parameter_tuple _ctype_factory = lambda self: parameter() -class Test_parameter_list(_TestActiveListContainerBase, - unittest.TestCase): + +class Test_parameter_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = parameter_list _ctype_factory = lambda self: parameter() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_piecewise.py b/pyomo/core/tests/unit/kernel/test_piecewise.py index d991fb93f91..2c236c0dd12 100644 --- a/pyomo/core/tests/unit/kernel/test_piecewise.py +++ b/pyomo/core/tests/unit/kernel/test_piecewise.py @@ -13,95 +13,91 @@ import pyomo.common.unittest as unittest import pyomo.kernel as pmo -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase -from pyomo.core.kernel.base import \ - (ICategorizedObject, - ICategorizedObjectContainer) -from pyomo.core.kernel.heterogeneous_container import \ - IHeterogeneousContainer -from pyomo.core.kernel.block import (IBlock, - block, - block_dict, - block_list) -from pyomo.core.kernel.variable import (variable, - variable_list) -from pyomo.core.kernel.piecewise_library.transforms import \ - (PiecewiseLinearFunction, - TransformedPiecewiseLinearFunction) -import pyomo.core.kernel.piecewise_library.transforms as \ - transforms -from pyomo.core.kernel.piecewise_library.transforms_nd import \ - (PiecewiseLinearFunctionND, - TransformedPiecewiseLinearFunctionND) -import pyomo.core.kernel.piecewise_library.transforms_nd as \ - transforms_nd +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) +from pyomo.core.kernel.base import ICategorizedObject, ICategorizedObjectContainer +from pyomo.core.kernel.heterogeneous_container import IHeterogeneousContainer +from pyomo.core.kernel.block import IBlock, block, block_dict, block_list +from pyomo.core.kernel.variable import variable, variable_list +from pyomo.core.kernel.piecewise_library.transforms import ( + PiecewiseLinearFunction, + TransformedPiecewiseLinearFunction, +) +import pyomo.core.kernel.piecewise_library.transforms as transforms +from pyomo.core.kernel.piecewise_library.transforms_nd import ( + PiecewiseLinearFunctionND, + TransformedPiecewiseLinearFunctionND, +) +import pyomo.core.kernel.piecewise_library.transforms_nd as transforms_nd import pyomo.core.kernel.piecewise_library.util as util # for the multi-dimensional piecewise tests _test_v = None _test_tri = None _test_values = None + + def setUpModule(): global _test_v global _test_tri global _test_values if util.numpy_available and util.scipy_available: - _test_v = variable_list( - variable(lb=i, ub=i+1) for i in range(3)) + _test_v = variable_list(variable(lb=i, ub=i + 1) for i in range(3)) _test_tri = util.generate_delaunay(_test_v, num=4) _test_values = [] for _xi in _test_tri.points: _test_values.append(sum(_xi)) _test_values = util.numpy.array(_test_values) -class Test_util(unittest.TestCase): +class Test_util(unittest.TestCase): def test_is_constant(self): self.assertEqual(util.is_constant([]), True) self.assertEqual(util.is_constant([1]), True) - self.assertEqual(util.is_constant([1,2]), False) - self.assertEqual(util.is_constant([1,1]), True) - self.assertEqual(util.is_constant([1,2,3]), False) - self.assertEqual(util.is_constant([2.1,2.1,2.1]), True) - self.assertEqual(util.is_constant([1,1,3,4]), False) - self.assertEqual(util.is_constant([1,1,3,3]), False) - self.assertEqual(util.is_constant([1,1,1,4]), False) - self.assertEqual(util.is_constant([1,1,1,1]), True) - self.assertEqual(util.is_constant([-1,1,1,1]), False) - self.assertEqual(util.is_constant([1,-1,1,1]), False) - self.assertEqual(util.is_constant([1,1,-1,1]), False) - self.assertEqual(util.is_constant([1,1,1,-1]), False) + self.assertEqual(util.is_constant([1, 2]), False) + self.assertEqual(util.is_constant([1, 1]), True) + self.assertEqual(util.is_constant([1, 2, 3]), False) + self.assertEqual(util.is_constant([2.1, 2.1, 2.1]), True) + self.assertEqual(util.is_constant([1, 1, 3, 4]), False) + self.assertEqual(util.is_constant([1, 1, 3, 3]), False) + self.assertEqual(util.is_constant([1, 1, 1, 4]), False) + self.assertEqual(util.is_constant([1, 1, 1, 1]), True) + self.assertEqual(util.is_constant([-1, 1, 1, 1]), False) + self.assertEqual(util.is_constant([1, -1, 1, 1]), False) + self.assertEqual(util.is_constant([1, 1, -1, 1]), False) + self.assertEqual(util.is_constant([1, 1, 1, -1]), False) def test_is_nondecreasing(self): self.assertEqual(util.is_nondecreasing([]), True) self.assertEqual(util.is_nondecreasing([1]), True) - self.assertEqual(util.is_nondecreasing([1,2]), True) - self.assertEqual(util.is_nondecreasing([1,2,3]), True) - self.assertEqual(util.is_nondecreasing([1,1,3,4]), True) - self.assertEqual(util.is_nondecreasing([1,1,3,3]), True) - self.assertEqual(util.is_nondecreasing([1,1,1,4]), True) - self.assertEqual(util.is_nondecreasing([1,1,1,1]), True) - self.assertEqual(util.is_nondecreasing([-1,1,1,1]), True) - self.assertEqual(util.is_nondecreasing([1,-1,1,1]), False) - self.assertEqual(util.is_nondecreasing([1,1,-1,1]), False) - self.assertEqual(util.is_nondecreasing([1,1,1,-1]), False) + self.assertEqual(util.is_nondecreasing([1, 2]), True) + self.assertEqual(util.is_nondecreasing([1, 2, 3]), True) + self.assertEqual(util.is_nondecreasing([1, 1, 3, 4]), True) + self.assertEqual(util.is_nondecreasing([1, 1, 3, 3]), True) + self.assertEqual(util.is_nondecreasing([1, 1, 1, 4]), True) + self.assertEqual(util.is_nondecreasing([1, 1, 1, 1]), True) + self.assertEqual(util.is_nondecreasing([-1, 1, 1, 1]), True) + self.assertEqual(util.is_nondecreasing([1, -1, 1, 1]), False) + self.assertEqual(util.is_nondecreasing([1, 1, -1, 1]), False) + self.assertEqual(util.is_nondecreasing([1, 1, 1, -1]), False) def test_is_nonincreasing(self): self.assertEqual(util.is_nonincreasing([]), True) self.assertEqual(util.is_nonincreasing([1]), True) - self.assertEqual(util.is_nonincreasing([2,1]), True) - self.assertEqual(util.is_nonincreasing([3,2,1]), True) - self.assertEqual(util.is_nonincreasing([4,3,2,1]), True) - self.assertEqual(util.is_nonincreasing([3,3,1,1]), True) - self.assertEqual(util.is_nonincreasing([4,1,1,1]), True) - self.assertEqual(util.is_nonincreasing([1,1,1,1]), True) - self.assertEqual(util.is_nonincreasing([-1,1,1,1]), False) - self.assertEqual(util.is_nonincreasing([1,-1,1,1]), False) - self.assertEqual(util.is_nonincreasing([1,1,-1,1]), False) - self.assertEqual(util.is_nonincreasing([1,1,1,-1]), True) + self.assertEqual(util.is_nonincreasing([2, 1]), True) + self.assertEqual(util.is_nonincreasing([3, 2, 1]), True) + self.assertEqual(util.is_nonincreasing([4, 3, 2, 1]), True) + self.assertEqual(util.is_nonincreasing([3, 3, 1, 1]), True) + self.assertEqual(util.is_nonincreasing([4, 1, 1, 1]), True) + self.assertEqual(util.is_nonincreasing([1, 1, 1, 1]), True) + self.assertEqual(util.is_nonincreasing([-1, 1, 1, 1]), False) + self.assertEqual(util.is_nonincreasing([1, -1, 1, 1]), False) + self.assertEqual(util.is_nonincreasing([1, 1, -1, 1]), False) + self.assertEqual(util.is_nonincreasing([1, 1, 1, -1]), True) def test_is_positive_power_of_two(self): self.assertEqual(util.is_positive_power_of_two(-8), False) @@ -143,71 +139,71 @@ def test_log2floor(self): self.assertEqual(util.log2floor(2**40 + 1), 40) def test_generate_gray_code(self): - self.assertEqual(util.generate_gray_code(0), - [[]]) - self.assertEqual(util.generate_gray_code(1), - [[0],[1]]) - self.assertEqual(util.generate_gray_code(2), - [[0,0],[0,1],[1,1],[1,0]]) - self.assertEqual(util.generate_gray_code(3), - [[0,0,0], - [0,0,1], - [0,1,1], - [0,1,0], - [1,1,0], - [1,1,1], - [1,0,1], - [1,0,0]]) - self.assertEqual(util.generate_gray_code(4), - [[0, 0, 0, 0], - [0, 0, 0, 1], - [0, 0, 1, 1], - [0, 0, 1, 0], - [0, 1, 1, 0], - [0, 1, 1, 1], - [0, 1, 0, 1], - [0, 1, 0, 0], - [1, 1, 0, 0], - [1, 1, 0, 1], - [1, 1, 1, 1], - [1, 1, 1, 0], - [1, 0, 1, 0], - [1, 0, 1, 1], - [1, 0, 0, 1], - [1, 0, 0, 0]]) + self.assertEqual(util.generate_gray_code(0), [[]]) + self.assertEqual(util.generate_gray_code(1), [[0], [1]]) + self.assertEqual(util.generate_gray_code(2), [[0, 0], [0, 1], [1, 1], [1, 0]]) + self.assertEqual( + util.generate_gray_code(3), + [ + [0, 0, 0], + [0, 0, 1], + [0, 1, 1], + [0, 1, 0], + [1, 1, 0], + [1, 1, 1], + [1, 0, 1], + [1, 0, 0], + ], + ) + self.assertEqual( + util.generate_gray_code(4), + [ + [0, 0, 0, 0], + [0, 0, 0, 1], + [0, 0, 1, 1], + [0, 0, 1, 0], + [0, 1, 1, 0], + [0, 1, 1, 1], + [0, 1, 0, 1], + [0, 1, 0, 0], + [1, 1, 0, 0], + [1, 1, 0, 1], + [1, 1, 1, 1], + [1, 1, 1, 0], + [1, 0, 1, 0], + [1, 0, 1, 1], + [1, 0, 0, 1], + [1, 0, 0, 0], + ], + ) def test_characterize_function(self): with self.assertRaises(ValueError): - util.characterize_function([1,2,-1], - [1,1,1]) + util.characterize_function([1, 2, -1], [1, 1, 1]) - fc, slopes = util.characterize_function([1,2,3], - [1,1,1]) - self.assertEqual(fc, 1) # affine - self.assertEqual(slopes, [0,0]) + fc, slopes = util.characterize_function([1, 2, 3], [1, 1, 1]) + self.assertEqual(fc, 1) # affine + self.assertEqual(slopes, [0, 0]) - fc, slopes = util.characterize_function([1,2,3], - [1,0,1]) - self.assertEqual(fc, 2) # convex - self.assertEqual(slopes, [-1,1]) + fc, slopes = util.characterize_function([1, 2, 3], [1, 0, 1]) + self.assertEqual(fc, 2) # convex + self.assertEqual(slopes, [-1, 1]) - fc, slopes = util.characterize_function([1,2,3], - [1,2,1]) - self.assertEqual(fc, 3) # concave - self.assertEqual(slopes, [1,-1]) + fc, slopes = util.characterize_function([1, 2, 3], [1, 2, 1]) + self.assertEqual(fc, 3) # concave + self.assertEqual(slopes, [1, -1]) - fc, slopes = util.characterize_function([1,1,2], - [1,2,1]) - self.assertEqual(fc, 4) # step - self.assertEqual(slopes, [None,-1]) + fc, slopes = util.characterize_function([1, 1, 2], [1, 2, 1]) + self.assertEqual(fc, 4) # step + self.assertEqual(slopes, [None, -1]) - fc, slopes = util.characterize_function([1,2,3,4], - [1,2,1,2]) - self.assertEqual(fc, 5) # none of the above - self.assertEqual(slopes, [1,-1,1]) + fc, slopes = util.characterize_function([1, 2, 3, 4], [1, 2, 1, 2]) + self.assertEqual(fc, 5) # none of the above + self.assertEqual(slopes, [1, -1, 1]) - @unittest.skipUnless(util.numpy_available and util.scipy_available, - "Numpy or Scipy is not available") + @unittest.skipUnless( + util.numpy_available and util.scipy_available, "Numpy or Scipy is not available" + ) def test_generate_delaunay(self): vlist = variable_list() vlist.append(variable(lb=0, ub=1)) @@ -218,14 +214,12 @@ def test_generate_delaunay(self): util.generate_delaunay(vlist) else: tri = util.generate_delaunay(vlist, num=2) - self.assertTrue( - isinstance(tri, util.scipy.spatial.Delaunay)) + self.assertTrue(isinstance(tri, util.scipy.spatial.Delaunay)) self.assertEqual(len(tri.simplices), 6) self.assertEqual(len(tri.points), 8) tri = util.generate_delaunay(vlist, num=3) - self.assertTrue( - isinstance(tri, util.scipy.spatial.Delaunay)) + self.assertTrue(isinstance(tri, util.scipy.spatial.Delaunay)) self.assertEqual(len(tri.simplices), 62) self.assertEqual(len(tri.points), 27) @@ -242,21 +236,18 @@ def test_generate_delaunay(self): with self.assertRaises(ValueError): util.generate_delaunay(vlist) -class Test_piecewise(unittest.TestCase): +class Test_piecewise(unittest.TestCase): def test_pickle(self): for key in transforms.registered_transforms: - v = variable(lb=1,ub=3) - p = transforms.piecewise([1,2,3], - [1,2,1], - input=v, - validate=False, - repn=key) + v = variable(lb=1, ub=3) + p = transforms.piecewise( + [1, 2, 3], [1, 2, 1], input=v, validate=False, repn=key + ) self.assertEqual(p.parent, None) self.assertEqual(p.input.expr.parent, None) self.assertIs(p.input.expr, v) - pup = pickle.loads( - pickle.dumps(p)) + pup = pickle.loads(pickle.dumps(p)) self.assertEqual(pup.parent, None) self.assertEqual(pup.input.expr.parent, None) self.assertIsNot(pup.input.expr, v) @@ -265,8 +256,7 @@ def test_pickle(self): b.p = p self.assertIs(p.parent, b) self.assertEqual(p.input.expr.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) pup = bup.p self.assertIs(pup.parent, bup) self.assertEqual(pup.input.expr.parent, bup) @@ -274,11 +264,8 @@ def test_pickle(self): self.assertIsNot(pup.input.expr, b.v) def test_call(self): - - g = PiecewiseLinearFunction([1], - [0]) - f = TransformedPiecewiseLinearFunction( - g, require_bounded_input_variable=False) + g = PiecewiseLinearFunction([1], [0]) + f = TransformedPiecewiseLinearFunction(g, require_bounded_input_variable=False) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 0) @@ -288,10 +275,8 @@ def test_call(self): with self.assertRaises(ValueError): f(1.1) - g = PiecewiseLinearFunction([1,2], - [0,4]) - f = TransformedPiecewiseLinearFunction( - g, require_bounded_input_variable=False) + g = PiecewiseLinearFunction([1, 2], [0, 4]) + f = TransformedPiecewiseLinearFunction(g, require_bounded_input_variable=False) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 0) @@ -306,10 +291,8 @@ def test_call(self): f(2.1) # step function - g = PiecewiseLinearFunction([1,1], - [0,1]) - f = TransformedPiecewiseLinearFunction( - g, require_bounded_input_variable=False) + g = PiecewiseLinearFunction([1, 1], [0, 1]) + f = TransformedPiecewiseLinearFunction(g, require_bounded_input_variable=False) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 0) @@ -319,10 +302,8 @@ def test_call(self): with self.assertRaises(ValueError): f(1.1) - g = PiecewiseLinearFunction([1,2,3], - [1,2,1]) - f = TransformedPiecewiseLinearFunction( - g, require_bounded_input_variable=False) + g = PiecewiseLinearFunction([1, 2, 3], [1, 2, 1]) + f = TransformedPiecewiseLinearFunction(g, require_bounded_input_variable=False) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) @@ -341,17 +322,15 @@ def test_call(self): f(3.1) # step function - g = PiecewiseLinearFunction([1,2,2,3], - [1,2,3,4]) - f = TransformedPiecewiseLinearFunction( - g, require_bounded_input_variable=False) + g = PiecewiseLinearFunction([1, 2, 2, 3], [1, 2, 3, 4]) + f = TransformedPiecewiseLinearFunction(g, require_bounded_input_variable=False) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) self.assertIs(type(f(1)), float) self.assertEqual(f(1.5), 1.5) self.assertIs(type(f(1.5)), float) - self.assertEqual(f(2), 2) # lower semicontinuous + self.assertEqual(f(2), 2) # lower semicontinuous self.assertIs(type(f(2)), float) self.assertEqual(f(2.5), 3.5) self.assertIs(type(f(2.5)), float) @@ -363,18 +342,17 @@ def test_call(self): f(3.1) # another step function - g = PiecewiseLinearFunction([1,1,2,3], - [1,2,3,4], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [1, 1, 2, 3], [1, 2, 3, 4], equal_slopes_tolerance=-1 + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) self.assertEqual(f(1.5), 2.5) - self.assertEqual(f(2), 3) # lower semicontinuous + self.assertEqual(f(2), 3) # lower semicontinuous self.assertEqual(f(2.5), 3.5) self.assertEqual(f(3), 4) with self.assertRaises(ValueError): @@ -383,44 +361,38 @@ def test_call(self): f(3.1) # another step function - g = PiecewiseLinearFunction([1,2,3,3], - [1,2,3,4], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [1, 2, 3, 3], [1, 2, 3, 4], equal_slopes_tolerance=-1 + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) self.assertEqual(f(1.5), 1.5) self.assertEqual(f(2), 2) self.assertEqual(f(2.5), 2.5) - self.assertEqual(f(3), 3) # lower semicontinuous + self.assertEqual(f(3), 3) # lower semicontinuous with self.assertRaises(ValueError): f(0.9) with self.assertRaises(ValueError): f(3.1) # another step function using parameters - g = PiecewiseLinearFunction([pmo.parameter(1), - pmo.parameter(1), - pmo.parameter(2), - pmo.parameter(3)], - [pmo.parameter(1), - pmo.parameter(2), - pmo.parameter(3), - pmo.parameter(4)], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [pmo.parameter(1), pmo.parameter(1), pmo.parameter(2), pmo.parameter(3)], + [pmo.parameter(1), pmo.parameter(2), pmo.parameter(3), pmo.parameter(4)], + equal_slopes_tolerance=-1, + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) self.assertEqual(f(1.5), 2.5) - self.assertEqual(f(2), 3) # lower semicontinuous + self.assertEqual(f(2), 3) # lower semicontinuous self.assertEqual(f(2.5), 3.5) self.assertEqual(f(3), 4) with self.assertRaises(ValueError): @@ -429,16 +401,15 @@ def test_call(self): f(3.1) # another step function - g = PiecewiseLinearFunction([1,1,2,3,4], - [1,2,3,4,5], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [1, 1, 2, 3, 4], [1, 2, 3, 4, 5], equal_slopes_tolerance=-1 + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) - self.assertEqual(f(1), 1) # lower semicontinuous + self.assertEqual(f(1), 1) # lower semicontinuous self.assertEqual(f(1.5), 2.5) self.assertEqual(f(2), 3) self.assertEqual(f(2.5), 3.5) @@ -451,18 +422,17 @@ def test_call(self): f(4.1) # another step function - g = PiecewiseLinearFunction([1,2,2,3,4], - [1,2,3,4,5], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [1, 2, 2, 3, 4], [1, 2, 3, 4, 5], equal_slopes_tolerance=-1 + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) self.assertEqual(f(1.5), 1.5) - self.assertEqual(f(2), 2) # lower semicontinuous + self.assertEqual(f(2), 2) # lower semicontinuous self.assertEqual(f(2.5), 3.5) self.assertEqual(f(3), 4) self.assertEqual(f(3.5), 4.5) @@ -473,20 +443,19 @@ def test_call(self): f(4.1) # another step function - g = PiecewiseLinearFunction([1,2,3,3,4], - [1,2,3,4,5], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [1, 2, 3, 3, 4], [1, 2, 3, 4, 5], equal_slopes_tolerance=-1 + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) self.assertEqual(f(1.5), 1.5) self.assertEqual(f(2), 2) self.assertEqual(f(2.5), 2.5) - self.assertEqual(f(3), 3) # lower semicontinuous + self.assertEqual(f(3), 3) # lower semicontinuous self.assertEqual(f(3.5), 4.5) self.assertEqual(f(4), 5) with self.assertRaises(ValueError): @@ -495,13 +464,12 @@ def test_call(self): f(4.1) # another step function - g = PiecewiseLinearFunction([1,2,3,4,4], - [1,2,3,4,5], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [1, 2, 3, 4, 4], [1, 2, 3, 4, 5], equal_slopes_tolerance=-1 + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) @@ -510,20 +478,19 @@ def test_call(self): self.assertEqual(f(2.5), 2.5) self.assertEqual(f(3), 3) self.assertEqual(f(3.5), 3.5) - self.assertEqual(f(4), 4) # lower semicontinuous + self.assertEqual(f(4), 4) # lower semicontinuous with self.assertRaises(ValueError): f(0.9) with self.assertRaises(ValueError): f(4.1) # another step function - g = PiecewiseLinearFunction([1,2,3,4,5], - [1,2,3,4,5], - equal_slopes_tolerance=-1) + g = PiecewiseLinearFunction( + [1, 2, 3, 4, 5], [1, 2, 3, 4, 5], equal_slopes_tolerance=-1 + ) f = TransformedPiecewiseLinearFunction( - g, - require_bounded_input_variable=False, - equal_slopes_tolerance=-1) + g, require_bounded_input_variable=False, equal_slopes_tolerance=-1 + ) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertEqual(f(1), 1) @@ -542,10 +509,7 @@ def test_call(self): def test_type(self): for key in transforms.registered_transforms: - p = transforms.piecewise([1,2,3], - [1,2,1], - repn=key, - validate=False) + p = transforms.piecewise([1, 2, 3], [1, 2, 1], repn=key, validate=False) self.assertTrue(len(list(p.children())) <= 4) self.assertTrue(isinstance(p, TransformedPiecewiseLinearFunction)) self.assertTrue(isinstance(p, transforms.registered_transforms[key])) @@ -558,39 +522,39 @@ def test_type(self): def test_bad_repn(self): repn = list(transforms.registered_transforms.keys())[0] self.assertTrue(repn in transforms.registered_transforms) - transforms.piecewise([1,2,3], - [1,2,1], - validate=False, - repn=repn) + transforms.piecewise([1, 2, 3], [1, 2, 1], validate=False, repn=repn) repn = '_bad_repn_' self.assertFalse(repn in transforms.registered_transforms) with self.assertRaises(ValueError): - transforms.piecewise([1,2,3], - [1,2,1], - validate=False, - repn=repn) - with self.assertRaises(ValueError): - transforms.piecewise([1,2,3], - [1,2,1], - input=variable(lb=1,ub=3), - validate=True, - simplify=False, - repn=repn) - with self.assertRaises(ValueError): - transforms.piecewise([1,2,3], - [1,2,1], - input=variable(lb=1,ub=3), - validate=True, - simplify=True, - repn=repn) + transforms.piecewise([1, 2, 3], [1, 2, 1], validate=False, repn=repn) + with self.assertRaises(ValueError): + transforms.piecewise( + [1, 2, 3], + [1, 2, 1], + input=variable(lb=1, ub=3), + validate=True, + simplify=False, + repn=repn, + ) + with self.assertRaises(ValueError): + transforms.piecewise( + [1, 2, 3], + [1, 2, 1], + input=variable(lb=1, ub=3), + validate=True, + simplify=True, + repn=repn, + ) def test_init(self): for key in transforms.registered_transforms: - for bound in ['lb','ub','eq','bad']: - for args in [([1,2,3], [1,2,1]), - ([1,2,3,4,5],[1,2,1,2,1]), - ([1,2,3,4,5,6,7,8,9],[1,2,1,2,1,2,1,2,1])]: + for bound in ['lb', 'ub', 'eq', 'bad']: + for args in [ + ([1, 2, 3], [1, 2, 1]), + ([1, 2, 3, 4, 5], [1, 2, 1, 2, 1]), + ([1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 2, 1, 2, 1, 2, 1, 2, 1]), + ]: kwds = {'repn': key, 'bound': bound, 'validate': False} if bound == 'bad': with self.assertRaises(ValueError): @@ -604,330 +568,344 @@ def test_init(self): else: p = transforms.piecewise(*args, **kwds) self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + isinstance(p, transforms.registered_transforms[key]) + ) self.assertTrue( - isinstance(p, TransformedPiecewiseLinearFunction)) + isinstance(p, TransformedPiecewiseLinearFunction) + ) self.assertEqual(p.active, True) self.assertIs(p.parent, None) kwds['simplify'] = True p = transforms.piecewise(*args, **kwds) self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + isinstance(p, transforms.registered_transforms[key]) + ) self.assertTrue( - isinstance(p, TransformedPiecewiseLinearFunction)) + isinstance(p, TransformedPiecewiseLinearFunction) + ) self.assertEqual(p.active, True) self.assertIs(p.parent, None) kwds['simplify'] = False p = transforms.piecewise(*args, **kwds) self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + isinstance(p, transforms.registered_transforms[key]) + ) self.assertTrue( - isinstance(p, TransformedPiecewiseLinearFunction)) + isinstance(p, TransformedPiecewiseLinearFunction) + ) self.assertEqual(p.active, True) self.assertIs(p.parent, None) def test_bad_init(self): - # lists not the same length with self.assertRaises(ValueError): - PiecewiseLinearFunction([1,2,3], - [1,2,1,1], - validate=False) + PiecewiseLinearFunction([1, 2, 3], [1, 2, 1, 1], validate=False) # lists not the same length with self.assertRaises(ValueError): - PiecewiseLinearFunction([1,2,3,4], - [1,2,1], - validate=False) + PiecewiseLinearFunction([1, 2, 3, 4], [1, 2, 1], validate=False) # breakpoints list not nondecreasing with self.assertRaises(util.PiecewiseValidationError): - PiecewiseLinearFunction([1,3,2], - [1,2,1]) + PiecewiseLinearFunction([1, 3, 2], [1, 2, 1]) - PiecewiseLinearFunction([1,3,2], - [1,2,1], - validate=False) + PiecewiseLinearFunction([1, 3, 2], [1, 2, 1], validate=False) - PiecewiseLinearFunction([1,2,3], - [1,1,1+2e-6], - equal_slopes_tolerance=1e-6) + PiecewiseLinearFunction( + [1, 2, 3], [1, 1, 1 + 2e-6], equal_slopes_tolerance=1e-6 + ) # consecutive slopes are "equal" with self.assertRaises(util.PiecewiseValidationError): - PiecewiseLinearFunction([1,2,3], - [1,1,1+2e-6], - equal_slopes_tolerance=3e-6) + PiecewiseLinearFunction( + [1, 2, 3], [1, 1, 1 + 2e-6], equal_slopes_tolerance=3e-6 + ) - PiecewiseLinearFunction([1,2,3], - [1,1,1+2e-6], - validate=False) + PiecewiseLinearFunction([1, 2, 3], [1, 1, 1 + 2e-6], validate=False) - f = PiecewiseLinearFunction([1,2,3], - [1,2,1]) - TransformedPiecewiseLinearFunction(f, - input=variable(lb=1,ub=3), - require_bounded_input_variable=True) + f = PiecewiseLinearFunction([1, 2, 3], [1, 2, 1]) + TransformedPiecewiseLinearFunction( + f, input=variable(lb=1, ub=3), require_bounded_input_variable=True + ) - TransformedPiecewiseLinearFunction(f, - input=variable(lb=1,ub=3), - require_bounded_input_variable=False) + TransformedPiecewiseLinearFunction( + f, input=variable(lb=1, ub=3), require_bounded_input_variable=False + ) # variable is not bounded with self.assertRaises(util.PiecewiseValidationError): - TransformedPiecewiseLinearFunction(f, - input=variable(lb=1), - require_bounded_input_variable=True) - TransformedPiecewiseLinearFunction(f, - input=variable(lb=1), - require_bounded_input_variable=False) + TransformedPiecewiseLinearFunction( + f, input=variable(lb=1), require_bounded_input_variable=True + ) + TransformedPiecewiseLinearFunction( + f, input=variable(lb=1), require_bounded_input_variable=False + ) with self.assertRaises(util.PiecewiseValidationError): - TransformedPiecewiseLinearFunction(f, - input=variable(ub=3), - require_bounded_input_variable=True) - TransformedPiecewiseLinearFunction(f, - input=variable(ub=3), - require_bounded_input_variable=False) + TransformedPiecewiseLinearFunction( + f, input=variable(ub=3), require_bounded_input_variable=True + ) + TransformedPiecewiseLinearFunction( + f, input=variable(ub=3), require_bounded_input_variable=False + ) with self.assertRaises(util.PiecewiseValidationError): - TransformedPiecewiseLinearFunction(f, - require_bounded_input_variable=True) - TransformedPiecewiseLinearFunction(f, - require_bounded_input_variable=False) + TransformedPiecewiseLinearFunction(f, require_bounded_input_variable=True) + TransformedPiecewiseLinearFunction(f, require_bounded_input_variable=False) # variable domain is not fully covered with self.assertRaises(util.PiecewiseValidationError): - TransformedPiecewiseLinearFunction(f, - input=variable(lb=0), - require_bounded_input_variable=False, - require_variable_domain_coverage=True) - TransformedPiecewiseLinearFunction(f, - input=variable(lb=0), - require_bounded_input_variable=False, - require_variable_domain_coverage=False) + TransformedPiecewiseLinearFunction( + f, + input=variable(lb=0), + require_bounded_input_variable=False, + require_variable_domain_coverage=True, + ) + TransformedPiecewiseLinearFunction( + f, + input=variable(lb=0), + require_bounded_input_variable=False, + require_variable_domain_coverage=False, + ) with self.assertRaises(util.PiecewiseValidationError): - TransformedPiecewiseLinearFunction(f, - input=variable(ub=4), - require_bounded_input_variable=False, - require_variable_domain_coverage=True) - TransformedPiecewiseLinearFunction(f, - input=variable(ub=4), - require_bounded_input_variable=False, - require_variable_domain_coverage=False) + TransformedPiecewiseLinearFunction( + f, + input=variable(ub=4), + require_bounded_input_variable=False, + require_variable_domain_coverage=True, + ) + TransformedPiecewiseLinearFunction( + f, + input=variable(ub=4), + require_bounded_input_variable=False, + require_variable_domain_coverage=False, + ) def test_bad_init_log_types(self): # lists are not of length: (2^n) + 1 with self.assertRaises(ValueError): - transforms.piecewise([1,2,3,4],[1,2,3,4],repn='dlog',validate=False) + transforms.piecewise( + [1, 2, 3, 4], [1, 2, 3, 4], repn='dlog', validate=False + ) with self.assertRaises(ValueError): - transforms.piecewise([1,2,3,4],[1,2,3,4],repn='log',validate=False) + transforms.piecewise([1, 2, 3, 4], [1, 2, 3, 4], repn='log', validate=False) def test_step(self): - breakpoints = [1,2,2] - values = [1,0,1] + breakpoints = [1, 2, 2] + values = [1, 0, 1] v = variable() v.bounds = min(breakpoints), max(breakpoints) for key in transforms.registered_transforms: - if key in ('mc','convex'): + if key in ('mc', 'convex'): with self.assertRaises(util.PiecewiseValidationError): - transforms.piecewise(breakpoints, - values, - input=v, - repn=key) + transforms.piecewise(breakpoints, values, input=v, repn=key) else: - p = transforms.piecewise(breakpoints, - values, - input=v, - repn=key) + p = transforms.piecewise(breakpoints, values, input=v, repn=key) self.assertEqual(p.validate(), 4) def test_simplify(self): v = variable(lb=1, ub=3) - convex_breakpoints = [1,2,3] - convex_values = [1,0,1] + convex_breakpoints = [1, 2, 3] + convex_values = [1, 0, 1] for key in transforms.registered_transforms: - for bound in ('lb','ub','eq'): - if (key == 'convex') and \ - (bound != 'lb'): + for bound in ('lb', 'ub', 'eq'): + if (key == 'convex') and (bound != 'lb'): with self.assertRaises(util.PiecewiseValidationError): - transforms.piecewise(convex_breakpoints, - convex_values, - input=v, - repn=key, - bound=bound, - simplify=False) + transforms.piecewise( + convex_breakpoints, + convex_values, + input=v, + repn=key, + bound=bound, + simplify=False, + ) with self.assertRaises(util.PiecewiseValidationError): - transforms.piecewise(convex_breakpoints, - convex_values, - input=v, - repn=key, - bound=bound, - simplify=True) + transforms.piecewise( + convex_breakpoints, + convex_values, + input=v, + repn=key, + bound=bound, + simplify=True, + ) else: - p = transforms.piecewise(convex_breakpoints, - convex_values, - input=v, - repn=key, - bound=bound, - simplify=False) + p = transforms.piecewise( + convex_breakpoints, + convex_values, + input=v, + repn=key, + bound=bound, + simplify=False, + ) self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + isinstance(p, transforms.registered_transforms[key]) + ) self.assertEqual(p.validate(), util.characterize_function.convex) - p = transforms.piecewise(convex_breakpoints, - convex_values, - input=v, - repn=key, - bound=bound, - simplify=True) + p = transforms.piecewise( + convex_breakpoints, + convex_values, + input=v, + repn=key, + bound=bound, + simplify=True, + ) if bound == 'lb': self.assertTrue( - isinstance(p, transforms.registered_transforms['convex'])) + isinstance(p, transforms.registered_transforms['convex']) + ) else: self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + isinstance(p, transforms.registered_transforms[key]) + ) - concave_breakpoints = [1,2,3] - concave_values = [-1,0,-1] + concave_breakpoints = [1, 2, 3] + concave_values = [-1, 0, -1] for key in transforms.registered_transforms: - for bound in ('lb','ub','eq'): - if (key == 'convex') and \ - (bound != 'ub'): + for bound in ('lb', 'ub', 'eq'): + if (key == 'convex') and (bound != 'ub'): with self.assertRaises(util.PiecewiseValidationError): - transforms.piecewise(concave_breakpoints, - concave_values, - input=v, - repn=key, - bound=bound, - simplify=False) + transforms.piecewise( + concave_breakpoints, + concave_values, + input=v, + repn=key, + bound=bound, + simplify=False, + ) with self.assertRaises(util.PiecewiseValidationError): - transforms.piecewise(concave_breakpoints, - concave_values, - input=v, - repn=key, - bound=bound, - simplify=True) + transforms.piecewise( + concave_breakpoints, + concave_values, + input=v, + repn=key, + bound=bound, + simplify=True, + ) else: - p = transforms.piecewise(concave_breakpoints, - concave_values, - input=v, - repn=key, - bound=bound, - simplify=False) + p = transforms.piecewise( + concave_breakpoints, + concave_values, + input=v, + repn=key, + bound=bound, + simplify=False, + ) self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + isinstance(p, transforms.registered_transforms[key]) + ) self.assertEqual(p.validate(), util.characterize_function.concave) - p = transforms.piecewise(concave_breakpoints, - concave_values, - input=v, - repn=key, - bound=bound, - simplify=True) + p = transforms.piecewise( + concave_breakpoints, + concave_values, + input=v, + repn=key, + bound=bound, + simplify=True, + ) if bound == 'ub': self.assertTrue( - isinstance(p, transforms.registered_transforms['convex'])) + isinstance(p, transforms.registered_transforms['convex']) + ) else: self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + isinstance(p, transforms.registered_transforms[key]) + ) - affine_breakpoints = [1,3] - affine_values = [1,3] + affine_breakpoints = [1, 3] + affine_values = [1, 3] for key in transforms.registered_transforms: - for bound in ('lb','ub','eq'): - p = transforms.piecewise(affine_breakpoints, - affine_values, - input=v, - repn=key, - bound=bound, - simplify=False) - self.assertTrue( - isinstance(p, transforms.registered_transforms[key])) + for bound in ('lb', 'ub', 'eq'): + p = transforms.piecewise( + affine_breakpoints, + affine_values, + input=v, + repn=key, + bound=bound, + simplify=False, + ) + self.assertTrue(isinstance(p, transforms.registered_transforms[key])) self.assertEqual(p.validate(), util.characterize_function.affine) - p = transforms.piecewise(affine_breakpoints, - affine_values, - input=v, - repn=key, - bound=bound, - simplify=True) + p = transforms.piecewise( + affine_breakpoints, + affine_values, + input=v, + repn=key, + bound=bound, + simplify=True, + ) self.assertTrue( - isinstance(p, transforms.registered_transforms['convex'])) + isinstance(p, transforms.registered_transforms['convex']) + ) -class Test_piecewise_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_piecewise_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = block_dict - _ctype_factory = lambda self: transforms.piecewise([1,2,3], - [1,2,1], - validate=False) + _ctype_factory = lambda self: transforms.piecewise( + [1, 2, 3], [1, 2, 1], validate=False + ) + -class Test_piecewise_list(_TestActiveListContainerBase, - unittest.TestCase): +class Test_piecewise_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = block_list - _ctype_factory = lambda self: transforms.piecewise([1,2,3], - [1,2,1], - validate=False) + _ctype_factory = lambda self: transforms.piecewise( + [1, 2, 3], [1, 2, 1], validate=False + ) -@unittest.skipUnless(util.numpy_available and util.scipy_available, - "Numpy or Scipy is not available") -class Test_piecewise_nd(unittest.TestCase): +@unittest.skipUnless( + util.numpy_available and util.scipy_available, "Numpy or Scipy is not available" +) +class Test_piecewise_nd(unittest.TestCase): def test_pickle(self): for key in transforms_nd.registered_transforms: - p = transforms_nd.piecewise_nd(_test_tri, - _test_values, - repn=key) + p = transforms_nd.piecewise_nd(_test_tri, _test_values, repn=key) self.assertEqual(p.parent, None) - pup = pickle.loads( - pickle.dumps(p)) + pup = pickle.loads(pickle.dumps(p)) self.assertEqual(pup.parent, None) b = block() b.p = p self.assertIs(p.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) pup = bup.p self.assertIs(pup.parent, bup) def test_call(self): - # # 2d points # - vlist = variable_list([variable(lb=0, ub=1), - variable(lb=0, ub=1)]) + vlist = variable_list([variable(lb=0, ub=1), variable(lb=0, ub=1)]) tri = util.generate_delaunay(vlist, num=3) x, y = tri.points.T - values = x*y + values = x * y g = PiecewiseLinearFunctionND(tri, values) f = TransformedPiecewiseLinearFunctionND(g) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertTrue(util.numpy.isclose(f(tri.points), values).all()) - self.assertAlmostEqual(f([0,0]), 0.0) - self.assertAlmostEqual(f(util.numpy.array([0,0])), 0.0) - self.assertAlmostEqual(f([1,1]), 1.0) - self.assertAlmostEqual(f(util.numpy.array([1,1])), 1.0) + self.assertAlmostEqual(f([0, 0]), 0.0) + self.assertAlmostEqual(f(util.numpy.array([0, 0])), 0.0) + self.assertAlmostEqual(f([1, 1]), 1.0) + self.assertAlmostEqual(f(util.numpy.array([1, 1])), 1.0) # # 3d points # - vlist = variable_list([variable(lb=0, ub=1), - variable(lb=0, ub=1), - variable(lb=0, ub=1)]) + vlist = variable_list( + [variable(lb=0, ub=1), variable(lb=0, ub=1), variable(lb=0, ub=1)] + ) tri = util.generate_delaunay(vlist, num=10) x, y, z = tri.points.T - values = x*y*z + values = x * y * z g = PiecewiseLinearFunctionND(tri, values) f = TransformedPiecewiseLinearFunctionND(g) self.assertTrue(f.parent is None) self.assertEqual(f.ctype, IBlock) self.assertTrue(util.numpy.isclose(f(tri.points), values).all()) - self.assertAlmostEqual(f([0,0,0]), 0.0) - self.assertAlmostEqual(f(util.numpy.array([0,0,0])), 0.0) - self.assertAlmostEqual(f([1,1,1]), 1.0) - self.assertAlmostEqual(f(util.numpy.array([1,1,1])), 1.0) + self.assertAlmostEqual(f([0, 0, 0]), 0.0) + self.assertAlmostEqual(f(util.numpy.array([0, 0, 0])), 0.0) + self.assertAlmostEqual(f([1, 1, 1]), 1.0) + self.assertAlmostEqual(f(util.numpy.array([1, 1, 1])), 1.0) def test_type(self): for key in transforms_nd.registered_transforms: - p = transforms_nd.piecewise_nd(_test_tri, - _test_values, - repn=key) + p = transforms_nd.piecewise_nd(_test_tri, _test_values, repn=key) # small block storage self.assertTrue(len(list(p.children())) <= 4) self.assertTrue(isinstance(p, TransformedPiecewiseLinearFunctionND)) @@ -941,20 +919,16 @@ def test_type(self): def test_bad_repn(self): repn = list(transforms_nd.registered_transforms.keys())[0] self.assertTrue(repn in transforms_nd.registered_transforms) - transforms_nd.piecewise_nd(_test_tri, - _test_values, - repn=repn) + transforms_nd.piecewise_nd(_test_tri, _test_values, repn=repn) repn = '_bad_repn_' self.assertFalse(repn in transforms_nd.registered_transforms) with self.assertRaises(ValueError): - transforms_nd.piecewise_nd(_test_tri, - _test_values, - repn=repn) + transforms_nd.piecewise_nd(_test_tri, _test_values, repn=repn) def test_init(self): for key in transforms_nd.registered_transforms: - for bound in ['lb','ub','eq','bad']: + for bound in ['lb', 'ub', 'eq', 'bad']: args = (_test_tri, _test_values) kwds = {'repn': key, 'bound': bound} if bound == 'bad': @@ -963,29 +937,28 @@ def test_init(self): else: p = transforms_nd.piecewise_nd(*args, **kwds) self.assertTrue( - isinstance(p, transforms_nd.registered_transforms[key])) - self.assertTrue( - isinstance(p, TransformedPiecewiseLinearFunctionND)) + isinstance(p, transforms_nd.registered_transforms[key]) + ) + self.assertTrue(isinstance(p, TransformedPiecewiseLinearFunctionND)) self.assertEqual(p.active, True) self.assertIs(p.parent, None) -@unittest.skipUnless(util.numpy_available and util.scipy_available, - "Numpy or Scipy is not available") -class Test_piecewise_nd_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +@unittest.skipUnless( + util.numpy_available and util.scipy_available, "Numpy or Scipy is not available" +) +class Test_piecewise_nd_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = block_dict - _ctype_factory = lambda self: \ - transforms_nd.piecewise_nd(_test_tri, - _test_values) - -@unittest.skipUnless(util.numpy_available and util.scipy_available, - "Numpy or Scipy is not available") -class Test_piecewise_nd_list(_TestActiveListContainerBase, - unittest.TestCase): + _ctype_factory = lambda self: transforms_nd.piecewise_nd(_test_tri, _test_values) + + +@unittest.skipUnless( + util.numpy_available and util.scipy_available, "Numpy or Scipy is not available" +) +class Test_piecewise_nd_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = block_list - _ctype_factory = lambda self:\ - transforms_nd.piecewise_nd(_test_tri, - _test_values) + _ctype_factory = lambda self: transforms_nd.piecewise_nd(_test_tri, _test_values) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_sos.py b/pyomo/core/tests/unit/kernel/test_sos.py index 058bbbf87b2..9410425d405 100644 --- a/pyomo/core/tests/unit/kernel/test_sos.py +++ b/pyomo/core/tests/unit/kernel/test_sos.py @@ -12,31 +12,27 @@ import pickle import pyomo.common.unittest as unittest -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_tuple_container import \ - _TestActiveTupleContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_tuple_container import ( + _TestActiveTupleContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) from pyomo.core.kernel.base import ICategorizedObject -from pyomo.core.kernel.sos import (ISOS, - sos, - sos1, - sos2, - sos_dict, - sos_tuple, - sos_list) +from pyomo.core.kernel.sos import ISOS, sos, sos1, sos2, sos_dict, sos_tuple, sos_list from pyomo.core.kernel.block import block -from pyomo.core.kernel.variable import (variable, - variable_list) +from pyomo.core.kernel.variable import variable, variable_list from pyomo.core.kernel.parameter import parameter -from pyomo.core.kernel.expression import (expression, - data_expression) +from pyomo.core.kernel.expression import expression, data_expression -class Test_sos(unittest.TestCase): +class Test_sos(unittest.TestCase): def test_pprint(self): import pyomo.kernel + # Not really testing what the output is, just that # an error does not occur. The pprint functionality # is still in the early stages. @@ -61,15 +57,14 @@ def test_ctype(self): def test_pickle(self): v = variable() - s = sos([v],weights=[1]) + s = sos([v], weights=[1]) self.assertEqual(len(s), 1) self.assertIs(s.variables[0], v) self.assertTrue(v in s) self.assertEqual(s.weights[0], 1) self.assertEqual(s.level, 1) self.assertEqual(s.parent, None) - sup = pickle.loads( - pickle.dumps(s)) + sup = pickle.loads(pickle.dumps(s)) self.assertEqual(len(sup), 1) self.assertIsNot(sup.variables[0], v) self.assertFalse(v in sup) @@ -82,8 +77,7 @@ def test_pickle(self): self.assertIs(v.parent, b) b.s = s self.assertIs(s.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) sup = bup.s self.assertEqual(len(sup), 1) self.assertIs(sup.variables[0], bup.v) @@ -109,11 +103,11 @@ def test_init(self): self.assertEqual(len(s.variables), 2) for v in vlist: self.assertTrue(v in s) - self.assertEqual(s.weights, tuple([1,2])) + self.assertEqual(s.weights, tuple([1, 2])) self.assertEqual(s.level, 1) vlist = tuple([variable(), variable()]) - s = sos(vlist, weights=[3.5,4.5], level=2) + s = sos(vlist, weights=[3.5, 4.5], level=2) self.assertTrue(s.parent is None) self.assertEqual(s.ctype, ISOS) self.assertEqual(len(s), 2) @@ -122,7 +116,7 @@ def test_init(self): self.assertTrue(v in s) self.assertEqual(s.weights, tuple([3.5, 4.5])) self.assertEqual(s.level, 2) - for i, (v,w) in enumerate(s.items()): + for i, (v, w) in enumerate(s.items()): self.assertIs(v, vlist[i]) self.assertEqual(w, s.weights[i]) @@ -186,20 +180,21 @@ def test_active(self): self.assertEqual(s.active, False) self.assertEqual(b.active, False) -class Test_sos_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_sos_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = sos_dict _ctype_factory = lambda self: sos([variable()]) -class Test_sos_tuple(_TestActiveTupleContainerBase, - unittest.TestCase): + +class Test_sos_tuple(_TestActiveTupleContainerBase, unittest.TestCase): _container_type = sos_tuple _ctype_factory = lambda self: sos([variable()]) -class Test_sos_list(_TestActiveListContainerBase, - unittest.TestCase): + +class Test_sos_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = sos_list _ctype_factory = lambda self: sos([variable()]) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_suffix.py b/pyomo/core/tests/unit/kernel/test_suffix.py index 89378f24a56..6565c6a920d 100644 --- a/pyomo/core/tests/unit/kernel/test_suffix.py +++ b/pyomo/core/tests/unit/kernel/test_suffix.py @@ -14,24 +14,25 @@ import pyomo.common.unittest as unittest from pyomo.kernel import pprint -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) from pyomo.core.kernel.base import ICategorizedObject -from pyomo.core.kernel.suffix import (ISuffix, - suffix, - suffix_dict, - export_suffix_generator, - import_suffix_generator, - local_suffix_generator, - suffix_generator) +from pyomo.core.kernel.suffix import ( + ISuffix, + suffix, + suffix_dict, + export_suffix_generator, + import_suffix_generator, + local_suffix_generator, + suffix_generator, +) from pyomo.core.kernel.variable import variable -from pyomo.core.kernel.constraint import (constraint, - constraint_list) -from pyomo.core.kernel.block import (block, - block_dict) +from pyomo.core.kernel.constraint import constraint, constraint_list +from pyomo.core.kernel.block import block, block_dict -class Test_suffix(unittest.TestCase): +class Test_suffix(unittest.TestCase): def test_pprint(self): # Not really testing what the output is, just that # an error does not occur. The pprint functionality @@ -68,29 +69,25 @@ def test_ctype(self): self.assertIs(type(s)._ctype, ISuffix) def test_pickle(self): - s = suffix(direction=suffix.EXPORT, - datatype=suffix.FLOAT) + s = suffix(direction=suffix.EXPORT, datatype=suffix.FLOAT) self.assertEqual(s.direction, suffix.EXPORT) self.assertEqual(s.datatype, suffix.FLOAT) self.assertEqual(s.parent, None) - sup = pickle.loads( - pickle.dumps(s)) + sup = pickle.loads(pickle.dumps(s)) self.assertEqual(sup.direction, suffix.EXPORT) self.assertEqual(sup.datatype, suffix.FLOAT) self.assertEqual(sup.parent, None) b = block() b.s = s self.assertIs(s.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) sup = bup.s self.assertEqual(sup.direction, suffix.EXPORT) self.assertEqual(sup.datatype, suffix.FLOAT) self.assertIs(sup.parent, bup) b.v = variable(lb=1) b.s[b.v] = 1.0 - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) sup = bup.s vup = bup.v self.assertEqual(sup[vup], 1.0) @@ -205,7 +202,6 @@ def test_name(self): self.assertEqual(s.name, "bdict[0].model.s") def test_active(self): - s = suffix() with self.assertRaises(AttributeError): s.active = False @@ -284,236 +280,222 @@ def test_active(self): def test_export_suffix_generator(self): m = block() m.s0 = suffix(direction=suffix.LOCAL) - m.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.s2 = suffix(direction=suffix.IMPORT) - m.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.s3 = suffix(direction=suffix.EXPORT) - m.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) m.b = block() m.b.s0 = suffix(direction=suffix.LOCAL) - m.b.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.b.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.b.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.b.s2 = suffix(direction=suffix.IMPORT) - m.b.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.b.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.b.s3 = suffix(direction=suffix.EXPORT) - m.b.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.b.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) # default - self.assertEqual([id(c_) for c_ - in export_suffix_generator(m)], - [id(m.s1), id(m.s1i), - id(m.s3), id(m.s3i), - id(m.b.s1), id(m.b.s1i), - id(m.b.s3), id(m.b.s3i)]) + self.assertEqual( + [id(c_) for c_ in export_suffix_generator(m)], + [ + id(m.s1), + id(m.s1i), + id(m.s3), + id(m.s3i), + id(m.b.s1), + id(m.b.s1i), + id(m.b.s3), + id(m.b.s3i), + ], + ) # descend_into=False - self.assertEqual([id(c_) for c_ - in export_suffix_generator(m, - descend_into=False)], - [id(m.s1), id(m.s1i), - id(m.s3), id(m.s3i)]) + self.assertEqual( + [id(c_) for c_ in export_suffix_generator(m, descend_into=False)], + [id(m.s1), id(m.s1i), id(m.s3), id(m.s3i)], + ) # datatype=INT - self.assertEqual([id(c_) for c_ - in export_suffix_generator(m, - datatype=suffix.INT)], - [id(m.s1i), - id(m.s3i), - id(m.b.s1i), - id(m.b.s3i)]) + self.assertEqual( + [id(c_) for c_ in export_suffix_generator(m, datatype=suffix.INT)], + [id(m.s1i), id(m.s3i), id(m.b.s1i), id(m.b.s3i)], + ) # active=True m.s1.deactivate() m.b.deactivate() - self.assertEqual([id(c_) for c_ in export_suffix_generator(m, - active=True)], - [id(m.s1i), id(m.s3), id(m.s3i)]) + self.assertEqual( + [id(c_) for c_ in export_suffix_generator(m, active=True)], + [id(m.s1i), id(m.s3), id(m.s3i)], + ) def test_import_suffix_generator(self): m = block() m.s0 = suffix(direction=suffix.LOCAL) - m.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.s2 = suffix(direction=suffix.IMPORT) - m.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.s3 = suffix(direction=suffix.EXPORT) - m.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) m.b = block() m.b.s0 = suffix(direction=suffix.LOCAL) - m.b.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.b.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.b.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.b.s2 = suffix(direction=suffix.IMPORT) - m.b.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.b.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.b.s3 = suffix(direction=suffix.EXPORT) - m.b.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.b.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) # default - self.assertEqual([id(c_) for c_ - in import_suffix_generator(m)], - [id(m.s1), id(m.s1i), - id(m.s2), id(m.s2i), - id(m.b.s1), id(m.b.s1i), - id(m.b.s2), id(m.b.s2i)]) + self.assertEqual( + [id(c_) for c_ in import_suffix_generator(m)], + [ + id(m.s1), + id(m.s1i), + id(m.s2), + id(m.s2i), + id(m.b.s1), + id(m.b.s1i), + id(m.b.s2), + id(m.b.s2i), + ], + ) # descend_into=False - self.assertEqual([id(c_) for c_ - in import_suffix_generator(m, - descend_into=False)], - [id(m.s1), id(m.s1i), - id(m.s2), id(m.s2i)]) + self.assertEqual( + [id(c_) for c_ in import_suffix_generator(m, descend_into=False)], + [id(m.s1), id(m.s1i), id(m.s2), id(m.s2i)], + ) # datatype=INT - self.assertEqual([id(c_) for c_ - in import_suffix_generator(m, - datatype=suffix.INT)], - [id(m.s1i), - id(m.s2i), - id(m.b.s1i), - id(m.b.s2i)]) + self.assertEqual( + [id(c_) for c_ in import_suffix_generator(m, datatype=suffix.INT)], + [id(m.s1i), id(m.s2i), id(m.b.s1i), id(m.b.s2i)], + ) # active=True m.s1.deactivate() m.b.deactivate() - self.assertEqual([id(c_) for c_ - in import_suffix_generator(m, - active=True)], - [id(m.s1i), id(m.s2), id(m.s2i)]) + self.assertEqual( + [id(c_) for c_ in import_suffix_generator(m, active=True)], + [id(m.s1i), id(m.s2), id(m.s2i)], + ) def test_local_suffix_generator(self): m = block() m.s0 = suffix(direction=suffix.LOCAL) - m.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.s2 = suffix(direction=suffix.IMPORT) - m.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.s3 = suffix(direction=suffix.EXPORT) - m.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) m.b = block() m.b.s0 = suffix(direction=suffix.LOCAL) - m.b.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.b.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.b.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.b.s2 = suffix(direction=suffix.IMPORT) - m.b.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.b.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.b.s3 = suffix(direction=suffix.EXPORT) - m.b.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.b.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) # default - self.assertEqual([id(c_) for c_ - in local_suffix_generator(m)], - [id(m.s0), id(m.s0i), - id(m.b.s0), id(m.b.s0i)]) + self.assertEqual( + [id(c_) for c_ in local_suffix_generator(m)], + [id(m.s0), id(m.s0i), id(m.b.s0), id(m.b.s0i)], + ) # descend_into=False - self.assertEqual([id(c_) for c_ - in local_suffix_generator(m, - descend_into=False)], - [id(m.s0), id(m.s0i)]) + self.assertEqual( + [id(c_) for c_ in local_suffix_generator(m, descend_into=False)], + [id(m.s0), id(m.s0i)], + ) # datatype=INT - self.assertEqual([id(c_) for c_ - in local_suffix_generator(m, - datatype=suffix.INT)], - [id(m.s0i), - id(m.b.s0i)]) + self.assertEqual( + [id(c_) for c_ in local_suffix_generator(m, datatype=suffix.INT)], + [id(m.s0i), id(m.b.s0i)], + ) # active=True m.s0.deactivate() m.b.deactivate() - self.assertEqual([id(c_) for c_ - in local_suffix_generator(m, - active=True)], - [id(m.s0i)]) + self.assertEqual( + [id(c_) for c_ in local_suffix_generator(m, active=True)], [id(m.s0i)] + ) def test_suffix_generator(self): m = block() m.s0 = suffix(direction=suffix.LOCAL) - m.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.s2 = suffix(direction=suffix.IMPORT) - m.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.s3 = suffix(direction=suffix.EXPORT) - m.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) m.b = block() m.b.s0 = suffix(direction=suffix.LOCAL) - m.b.s0i = suffix(direction=suffix.LOCAL, - datatype=suffix.INT) + m.b.s0i = suffix(direction=suffix.LOCAL, datatype=suffix.INT) m.b.s1 = suffix(direction=suffix.IMPORT_EXPORT) - m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, - datatype=suffix.INT) + m.b.s1i = suffix(direction=suffix.IMPORT_EXPORT, datatype=suffix.INT) m.b.s2 = suffix(direction=suffix.IMPORT) - m.b.s2i = suffix(direction=suffix.IMPORT, - datatype=suffix.INT) + m.b.s2i = suffix(direction=suffix.IMPORT, datatype=suffix.INT) m.b.s3 = suffix(direction=suffix.EXPORT) - m.b.s3i = suffix(direction=suffix.EXPORT, - datatype=suffix.INT) + m.b.s3i = suffix(direction=suffix.EXPORT, datatype=suffix.INT) # default - self.assertEqual([id(c_) for c_ - in suffix_generator(m)], - [id(m.s0), id(m.s0i), - id(m.s1), id(m.s1i), - id(m.s2), id(m.s2i), - id(m.s3), id(m.s3i), - id(m.b.s0), id(m.b.s0i), - id(m.b.s1), id(m.b.s1i), - id(m.b.s2), id(m.b.s2i), - id(m.b.s3), id(m.b.s3i)]) + self.assertEqual( + [id(c_) for c_ in suffix_generator(m)], + [ + id(m.s0), + id(m.s0i), + id(m.s1), + id(m.s1i), + id(m.s2), + id(m.s2i), + id(m.s3), + id(m.s3i), + id(m.b.s0), + id(m.b.s0i), + id(m.b.s1), + id(m.b.s1i), + id(m.b.s2), + id(m.b.s2i), + id(m.b.s3), + id(m.b.s3i), + ], + ) # descend_into=False - self.assertEqual([id(c_) for c_ - in suffix_generator(m, - descend_into=False)], - [id(m.s0), id(m.s0i), - id(m.s1), id(m.s1i), - id(m.s2), id(m.s2i), - id(m.s3), id(m.s3i)]) + self.assertEqual( + [id(c_) for c_ in suffix_generator(m, descend_into=False)], + [ + id(m.s0), + id(m.s0i), + id(m.s1), + id(m.s1i), + id(m.s2), + id(m.s2i), + id(m.s3), + id(m.s3i), + ], + ) # datatype=INT - self.assertEqual([id(c_) for c_ - in suffix_generator(m, - datatype=suffix.INT)], - [id(m.s0i), - id(m.s1i), - id(m.s2i), - id(m.s3i), - id(m.b.s0i), - id(m.b.s1i), - id(m.b.s2i), - id(m.b.s3i)]) + self.assertEqual( + [id(c_) for c_ in suffix_generator(m, datatype=suffix.INT)], + [ + id(m.s0i), + id(m.s1i), + id(m.s2i), + id(m.s3i), + id(m.b.s0i), + id(m.b.s1i), + id(m.b.s2i), + id(m.b.s3i), + ], + ) # active=True m.s1.deactivate() m.b.deactivate() - self.assertEqual([id(c_) for c_ - in suffix_generator(m, - active=True)], - [id(m.s0), - id(m.s0i), - id(m.s1i), - id(m.s2), - id(m.s2i), - id(m.s3), - id(m.s3i)]) + self.assertEqual( + [id(c_) for c_ in suffix_generator(m, active=True)], + [id(m.s0), id(m.s0i), id(m.s1i), id(m.s2), id(m.s2i), id(m.s3), id(m.s3i)], + ) # # These methods are deprecated @@ -579,10 +561,11 @@ def test_getset_datatype(self): with self.assertRaises(ValueError): s.set_datatype('something') -class Test_suffix_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_suffix_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = suffix_dict _ctype_factory = lambda self: suffix() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_tuple_container.py b/pyomo/core/tests/unit/kernel/test_tuple_container.py index 5ffb5916853..0b45c36b299 100644 --- a/pyomo/core/tests/unit/kernel/test_tuple_container.py +++ b/pyomo/core/tests/unit/kernel/test_tuple_container.py @@ -14,14 +14,10 @@ import pyomo.common.unittest as unittest import pyomo.kernel as pmo -from pyomo.core.kernel.base import \ - (ICategorizedObject, - ICategorizedObjectContainer) -from pyomo.core.kernel.homogeneous_container import \ - IHomogeneousContainer +from pyomo.core.kernel.base import ICategorizedObject, ICategorizedObjectContainer +from pyomo.core.kernel.homogeneous_container import IHomogeneousContainer from pyomo.core.kernel.tuple_container import TupleContainer -from pyomo.core.kernel.block import (block, - block_list) +from pyomo.core.kernel.block import block, block_list # # There are no fully implemented test suites in this @@ -36,11 +32,12 @@ # and weakref (bas _pickle_test_protocol = pickle.HIGHEST_PROTOCOL + class _bad_ctype(object): ctype = "_this_is_definitely_not_the_ctype_being_tested" -class _TestTupleContainerBase(object): +class _TestTupleContainerBase(object): # set by derived class _container_type = None _ctype_factory = None @@ -57,11 +54,9 @@ def test_init1(self): def test_init2(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) with self.assertRaises(TypeError): - d = self._container_type( - *tuple(self._ctype_factory() for i in index)) + d = self._container_type(*tuple(self._ctype_factory() for i in index)) def test_type(self): ctuple = self._container_type() @@ -78,16 +73,13 @@ def test_len1(self): def test_len2(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) self.assertEqual(len(c), len(index)) - def test_wrong_type_init(self): index = range(5) with self.assertRaises(TypeError): - c = self._container_type( - _bad_ctype() for i in index) + c = self._container_type(_bad_ctype() for i in index) def test_has_parent_init(self): ctuple = self._container_type([self._ctype_factory()]) @@ -96,8 +88,7 @@ def test_has_parent_init(self): def test_iter(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) self.assertEqual(len(c), len(index)) raw_tuple = c[:] self.assertEqual(type(raw_tuple), tuple) @@ -106,8 +97,7 @@ def test_iter(self): def test_reverse(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) raw_tuple = c[:] self.assertEqual(type(raw_tuple), tuple) for c1, c2 in zip(reversed(c), reversed(raw_tuple)): @@ -115,65 +105,56 @@ def test_reverse(self): def test_index(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) for i in index: cdata = c[i] self.assertEqual(c.index(cdata), i) self.assertEqual(c.index(cdata, start=i), i) with self.assertRaises(ValueError): - c.index(cdata, start=i+1) + c.index(cdata, start=i + 1) with self.assertRaises(ValueError): c.index(cdata, start=i, stop=i) with self.assertRaises(ValueError): c.index(cdata, stop=i) - self.assertEqual( - c.index(cdata, start=i, stop=i+1), i) + self.assertEqual(c.index(cdata, start=i, stop=i + 1), i) with self.assertRaises(ValueError): - c.index(cdata, start=i+1, stop=i+1) - self.assertEqual( - c.index(cdata, start=-len(index)+i), i) + c.index(cdata, start=i + 1, stop=i + 1) + self.assertEqual(c.index(cdata, start=-len(index) + i), i) if i == index[-1]: - self.assertEqual( - c.index(cdata, start=-len(index)+i+1), i) + self.assertEqual(c.index(cdata, start=-len(index) + i + 1), i) else: with self.assertRaises(ValueError): - self.assertEqual( - c.index(cdata, start=-len(index)+i+1), - i) + self.assertEqual(c.index(cdata, start=-len(index) + i + 1), i) if i == index[-1]: with self.assertRaises(ValueError): - self.assertEqual( - c.index(cdata, stop=-len(index)+i+1), i) + self.assertEqual(c.index(cdata, stop=-len(index) + i + 1), i) else: - self.assertEqual( - c.index(cdata, stop=-len(index)+i+1), i) + self.assertEqual(c.index(cdata, stop=-len(index) + i + 1), i) tmp = self._ctype_factory() with self.assertRaises(ValueError): c.index(tmp) with self.assertRaises(ValueError): - c.index(tmp, stop=len(c)+1) + c.index(tmp, stop=len(c) + 1) def test_count(self): index = range(5) - c = self._container_type( - self._ctype_factory() for i in index) + c = self._container_type(self._ctype_factory() for i in index) for i in index: self.assertEqual(c.count(c[i]), 1) def test_pickle(self): index = range(5) ctuple = self._container_type( - [self._ctype_factory() for i in index] + \ - [self._container_type()]) + [self._ctype_factory() for i in index] + [self._container_type()] + ) index = list(index) index = index + [len(index)] for i in index: self.assertTrue(ctuple[i].parent is ctuple) pickled_ctuple = pickle.loads( - pickle.dumps(ctuple, protocol=_pickle_test_protocol)) - self.assertTrue( - isinstance(pickled_ctuple, self._container_type)) + pickle.dumps(ctuple, protocol=_pickle_test_protocol) + ) + self.assertTrue(isinstance(pickled_ctuple, self._container_type)) self.assertTrue(pickled_ctuple.parent is None) self.assertEqual(len(pickled_ctuple), len(index)) self.assertNotEqual(id(pickled_ctuple), id(ctuple)) @@ -183,10 +164,8 @@ def test_pickle(self): self.assertTrue(ctuple[i].parent is ctuple) def test_eq(self): - ctuple1 = self._container_type( - [self._ctype_factory()]) - ctuple2 = self._container_type( - [self._ctype_factory()]) + ctuple1 = self._container_type([self._ctype_factory()]) + ctuple2 = self._container_type([self._ctype_factory()]) self.assertNotEqual(ctuple1, set()) self.assertFalse(ctuple1 == set()) @@ -237,8 +216,7 @@ def test_child(self): def test_name(self): children = [self._ctype_factory() for i in range(5)] - children.append(self._container_type( - [self._ctype_factory()])) + children.append(self._container_type([self._ctype_factory()])) for c in children: self.assertTrue(c.parent is None) @@ -303,8 +281,7 @@ def test_name(self): for i, c in enumerate(children): self.assertTrue(c.parent is ctuple) self.assertEqual(c.local_name, "[%s]" % (i)) - self.assertEqual(c.name, - "[0].model.ctuple[%s]" % (i)) + self.assertEqual(c.name, "[0].model.ctuple[%s]" % (i)) m = block() m.blist = blist @@ -319,8 +296,7 @@ def test_name(self): for i, c in enumerate(children): self.assertTrue(c.parent is ctuple) self.assertEqual(c.local_name, "[%s]" % (i)) - self.assertEqual(c.name, - "blist[0].model.ctuple[%s]" % (i)) + self.assertEqual(c.name, "blist[0].model.ctuple[%s]" % (i)) self.assertEqual(c.name, names[c]) for c in ctuple.components(): self.assertNotEqual(c.name, None) @@ -348,31 +324,34 @@ def test_components(self): ctupleflattened.append(csubtupleflattened[-1]) csubtuple = self._container_type(csubtupleflattened) - self.assertEqual(list(id(_c) for _c in csubtuple.components()), - list(id(_c) for _c in csubtupleflattened)) - self.assertEqual(len(set(id(_c) for _c in csubtuple.components())), - len(list(id(_c) for _c in csubtuple.components()))) - self.assertEqual(len(set(id(_c) for _c in csubtuple.components())), - 3) - - ctuple = self._container_type([ctupleflattened[0], - ctupleflattened[1], - csubtuple]) - self.assertEqual(list(id(_c) for _c in ctuple.components()), - list(id(_c) for _c in ctupleflattened)) - self.assertEqual(len(set(id(_c) for _c in ctuple.components())), - len(list(id(_c) for _c in ctuple.components()))) - self.assertEqual(len(set(id(_c) for _c in ctuple.components())), - 5) + self.assertEqual( + list(id(_c) for _c in csubtuple.components()), + list(id(_c) for _c in csubtupleflattened), + ) + self.assertEqual( + len(set(id(_c) for _c in csubtuple.components())), + len(list(id(_c) for _c in csubtuple.components())), + ) + self.assertEqual(len(set(id(_c) for _c in csubtuple.components())), 3) - def test_preorder_traversal(self): + ctuple = self._container_type( + [ctupleflattened[0], ctupleflattened[1], csubtuple] + ) + self.assertEqual( + list(id(_c) for _c in ctuple.components()), + list(id(_c) for _c in ctupleflattened), + ) + self.assertEqual( + len(set(id(_c) for _c in ctuple.components())), + len(list(id(_c) for _c in ctuple.components())), + ) + self.assertEqual(len(set(id(_c) for _c in ctuple.components())), 5) - csubtuple = self._container_type( - [self._ctype_factory()]) + def test_preorder_traversal(self): + csubtuple = self._container_type([self._ctype_factory()]) ctuple = self._container_type( - [self._ctype_factory(), - csubtuple, - self._ctype_factory()]) + [self._ctype_factory(), csubtuple, self._ctype_factory()] + ) traversal = [] traversal.append(ctuple) @@ -381,21 +360,21 @@ def test_preorder_traversal(self): traversal.append(ctuple[1][0]) traversal.append(ctuple[2]) - self.assertEqual([c.name for c in traversal], - [c.name for c in pmo.preorder_traversal(ctuple)]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in pmo.preorder_traversal(ctuple)]) + self.assertEqual( + [c.name for c in traversal], + [c.name for c in pmo.preorder_traversal(ctuple)], + ) + self.assertEqual( + [id(c) for c in traversal], [id(c) for c in pmo.preorder_traversal(ctuple)] + ) return ctuple, traversal def test_preorder_traversal_descend_check(self): - - csubtuple = self._container_type( - [self._ctype_factory()]) + csubtuple = self._container_type([self._ctype_factory()]) ctuple = self._container_type( - [self._ctype_factory(), - csubtuple, - self._ctype_factory()]) + [self._ctype_factory(), csubtuple, self._ctype_factory()] + ) traversal = [] traversal.append(ctuple) @@ -408,9 +387,9 @@ def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return False + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - descend=descend)) + order = list(pmo.preorder_traversal(ctuple, descend=descend)) self.assertEqual(len(order), 1) self.assertIs(order[0], ctuple) self.assertEqual(len(descend.seen), 1) @@ -420,41 +399,39 @@ def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - descend=descend)) - self.assertEqual([c.name for c in traversal], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(ctuple, descend=descend)) + self.assertEqual([c.name for c in traversal], [c.name for c in order]) + self.assertEqual([id(c) for c in traversal], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c._is_container], [id(c) for c in descend.seen] + ) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - descend=descend)) - self.assertEqual([c.name for c in traversal], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(ctuple, descend=descend)) + self.assertEqual([c.name for c in traversal], [c.name for c in order]) + self.assertEqual([id(c) for c in traversal], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c._is_container], [id(c) for c in descend.seen] + ) return ctuple, traversal -class _TestActiveTupleContainerBase(_TestTupleContainerBase): +class _TestActiveTupleContainerBase(_TestTupleContainerBase): def test_active_type(self): ctuple = self._container_type() self.assertTrue(isinstance(ctuple, ICategorizedObject)) @@ -466,8 +443,7 @@ def test_active_type(self): def test_active(self): index = list(range(4)) - ctuple = self._container_type(self._ctype_factory() - for i in index) + ctuple = self._container_type(self._ctype_factory() for i in index) with self.assertRaises(AttributeError): ctuple.active = False for c in ctuple: @@ -497,8 +473,9 @@ def test_active(self): for c in ctuple.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(ctuple.components())), len(ctuple)) - self.assertEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=True)))) + self.assertEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=True))) + ) m.deactivate(shallow=False) @@ -510,8 +487,9 @@ def test_active(self): self.assertEqual(ctuple.active, False) for c in ctuple: self.assertEqual(c.active, False) - self.assertNotEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=None)))) + self.assertNotEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=None))) + ) self.assertEqual(len(list(ctuple.components(active=True))), 0) test_c = ctuple[0] @@ -541,8 +519,9 @@ def test_active(self): self.assertEqual(c.active, False) for c in ctuple.components(active=True): self.assertEqual(c.active, True) - self.assertNotEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=None)))) + self.assertNotEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=None))) + ) self.assertEqual(len(list(ctuple.components(active=True))), 1) m.activate(shallow=False) @@ -560,8 +539,9 @@ def test_active(self): for c in ctuple.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(ctuple.components())), len(ctuple)) - self.assertEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=True)))) + self.assertEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=True))) + ) m.deactivate(shallow=False) @@ -573,8 +553,9 @@ def test_active(self): self.assertEqual(ctuple.active, False) for c in ctuple: self.assertEqual(c.active, False) - self.assertNotEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=None)))) + self.assertNotEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=None))) + ) self.assertEqual(len(list(ctuple.components(active=True))), 0) ctuple.activate(shallow=False) @@ -592,8 +573,9 @@ def test_active(self): for c in ctuple.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(ctuple.components())), len(ctuple)) - self.assertEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=True)))) + self.assertEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=True))) + ) ctuple.deactivate(shallow=False) @@ -605,8 +587,9 @@ def test_active(self): self.assertEqual(ctuple.active, False) for i, c in enumerate(ctuple): self.assertEqual(c.active, False) - self.assertNotEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=None)))) + self.assertNotEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=None))) + ) self.assertEqual(len(list(ctuple.components(active=True))), 0) ctuple[-1].activate() @@ -625,12 +608,12 @@ def test_active(self): self.assertEqual(model.active, False) self.assertEqual(ctuple.active, True) for i, c in enumerate(ctuple): - if i == len(ctuple)-1: + if i == len(ctuple) - 1: self.assertEqual(c.active, True) else: self.assertEqual(c.active, False) for i, c in enumerate(ctuple.components(active=None)): - if i == len(ctuple)-1: + if i == len(ctuple) - 1: self.assertEqual(c.active, True) else: self.assertEqual(c.active, False) @@ -638,8 +621,9 @@ def test_active(self): self.assertEqual(c.active, True) for c in ctuple.components(active=True): self.assertEqual(c.active, True) - self.assertNotEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=None)))) + self.assertNotEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=None))) + ) self.assertEqual(len(list(ctuple.components(active=True))), 1) ctuple.deactivate(shallow=False) @@ -658,188 +642,181 @@ def test_active(self): for c in ctuple.components(active=True): self.assertEqual(c.active, True) self.assertEqual(len(list(ctuple.components())), len(ctuple)) - self.assertEqual(len(list(ctuple.components())), - len(list(ctuple.components(active=True)))) + self.assertEqual( + len(list(ctuple.components())), len(list(ctuple.components(active=True))) + ) def test_preorder_traversal(self): - ctuple, traversal = \ - super(_TestActiveTupleContainerBase, self).\ - test_preorder_traversal() + ctuple, traversal = super( + _TestActiveTupleContainerBase, self + ).test_preorder_traversal() ctuple[1].deactivate() - self.assertEqual([None, '[0]', '[2]'], - [c.name for c in pmo.preorder_traversal( - ctuple, - active=True)]) - self.assertEqual([id(ctuple),id(ctuple[0]),id(ctuple[2])], - [id(c) for c in pmo.preorder_traversal( - ctuple, - active=True)]) + self.assertEqual( + [None, '[0]', '[2]'], + [c.name for c in pmo.preorder_traversal(ctuple, active=True)], + ) + self.assertEqual( + [id(ctuple), id(ctuple[0]), id(ctuple[2])], + [id(c) for c in pmo.preorder_traversal(ctuple, active=True)], + ) ctuple[1].deactivate(shallow=False) - self.assertEqual([c.name for c in traversal if c.active], - [c.name for c in pmo.preorder_traversal( - ctuple, - active=True)]) - self.assertEqual([id(c) for c in traversal if c.active], - [id(c) for c in pmo.preorder_traversal( - ctuple, - active=True)]) + self.assertEqual( + [c.name for c in traversal if c.active], + [c.name for c in pmo.preorder_traversal(ctuple, active=True)], + ) + self.assertEqual( + [id(c) for c in traversal if c.active], + [id(c) for c in pmo.preorder_traversal(ctuple, active=True)], + ) ctuple.deactivate() - self.assertEqual(len(list(pmo.preorder_traversal(ctuple, - active=True))), - 0) - self.assertEqual(len(list(pmo.generate_names(ctuple, - active=True))), - 0) + self.assertEqual(len(list(pmo.preorder_traversal(ctuple, active=True))), 0) + self.assertEqual(len(list(pmo.generate_names(ctuple, active=True))), 0) def test_preorder_traversal_descend_check(self): - ctuple, traversal = \ - super(_TestActiveTupleContainerBase, self).\ - test_preorder_traversal_descend_check() + ctuple, traversal = super( + _TestActiveTupleContainerBase, self + ).test_preorder_traversal_descend_check() ctuple[1].deactivate() + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=True, - descend=descend)) - self.assertEqual([None, '[0]', '[2]'], - [c.name for c in order]) - self.assertEqual([id(ctuple),id(ctuple[0]),id(ctuple[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(ctuple, active=True, descend=descend)) + self.assertEqual([None, '[0]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(ctuple), id(ctuple[0]), id(ctuple[2])], [id(c) for c in order] + ) if ctuple.ctype._is_heterogeneous_container: - self.assertEqual([None, '[0]', '[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(ctuple),id(ctuple[0]),id(ctuple[2])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[0]', '[2]'], [c.name for c in descend.seen]) + self.assertEqual( + [id(ctuple), id(ctuple[0]), id(ctuple[2])], + [id(c) for c in descend.seen], + ) else: - self.assertEqual([None], - [c.name for c in descend.seen]) - self.assertEqual([id(ctuple)], - [id(c) for c in descend.seen]) + self.assertEqual([None], [c.name for c in descend.seen]) + self.assertEqual([id(ctuple)], [id(c) for c in descend.seen]) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=None, - descend=descend)) - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in order]) - self.assertEqual([id(ctuple),id(ctuple[0]),id(ctuple[1]),id(ctuple[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(ctuple, active=None, descend=descend)) + self.assertEqual([None, '[0]', '[1]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(ctuple), id(ctuple[0]), id(ctuple[1]), id(ctuple[2])], + [id(c) for c in order], + ) if ctuple.ctype._is_heterogeneous_container: - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(ctuple),id(ctuple[0]),id(ctuple[1]),id(ctuple[2])], - [id(c) for c in descend.seen]) + self.assertEqual( + [None, '[0]', '[1]', '[2]'], [c.name for c in descend.seen] + ) + self.assertEqual( + [id(ctuple), id(ctuple[0]), id(ctuple[1]), id(ctuple[2])], + [id(c) for c in descend.seen], + ) else: - self.assertEqual([None,'[1]'], - [c.name for c in descend.seen]) - self.assertEqual([id(ctuple),id(ctuple[1])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[1]'], [c.name for c in descend.seen]) + self.assertEqual([id(ctuple), id(ctuple[1])], [id(c) for c in descend.seen]) ctuple[1].deactivate(shallow=False) + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=True, - descend=descend)) - self.assertEqual([c.name for c in traversal if c.active], - [c.name for c in order]) - self.assertEqual([id(c) for c in traversal if c.active], - [id(c) for c in order]) - self.assertEqual([c.name for c in traversal - if c.active and \ - c._is_container], - [c.name for c in descend.seen]) - self.assertEqual([id(c) for c in traversal - if c.active and \ - c._is_container], - [id(c) for c in descend.seen]) + order = list(pmo.preorder_traversal(ctuple, active=True, descend=descend)) + self.assertEqual( + [c.name for c in traversal if c.active], [c.name for c in order] + ) + self.assertEqual([id(c) for c in traversal if c.active], [id(c) for c in order]) + self.assertEqual( + [c.name for c in traversal if c.active and c._is_container], + [c.name for c in descend.seen], + ) + self.assertEqual( + [id(c) for c in traversal if c.active and c._is_container], + [id(c) for c in descend.seen], + ) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=None, - descend=descend)) - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in order]) - self.assertEqual([id(ctuple),id(ctuple[0]),id(ctuple[1]),id(ctuple[2])], - [id(c) for c in order]) + order = list(pmo.preorder_traversal(ctuple, active=None, descend=descend)) + self.assertEqual([None, '[0]', '[1]', '[2]'], [c.name for c in order]) + self.assertEqual( + [id(ctuple), id(ctuple[0]), id(ctuple[1]), id(ctuple[2])], + [id(c) for c in order], + ) if ctuple.ctype._is_heterogeneous_container: - self.assertEqual([None,'[0]','[1]','[2]'], - [c.name for c in descend.seen]) - self.assertEqual([id(ctuple),id(ctuple[0]),id(ctuple[1]),id(ctuple[2])], - [id(c) for c in descend.seen]) + self.assertEqual( + [None, '[0]', '[1]', '[2]'], [c.name for c in descend.seen] + ) + self.assertEqual( + [id(ctuple), id(ctuple[0]), id(ctuple[1]), id(ctuple[2])], + [id(c) for c in descend.seen], + ) else: - self.assertEqual([None,'[1]'], - [c.name for c in descend.seen]) - self.assertEqual([id(ctuple),id(ctuple[1])], - [id(c) for c in descend.seen]) + self.assertEqual([None, '[1]'], [c.name for c in descend.seen]) + self.assertEqual([id(ctuple), id(ctuple[1])], [id(c) for c in descend.seen]) ctuple.deactivate() + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=True, - descend=descend)) + order = list(pmo.preorder_traversal(ctuple, active=True, descend=descend)) self.assertEqual(len(descend.seen), 0) - self.assertEqual(len(list(pmo.generate_names(ctuple, - active=True))), - 0) + self.assertEqual(len(list(pmo.generate_names(ctuple, active=True))), 0) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=None, - descend=descend)) + order = list(pmo.preorder_traversal(ctuple, active=None, descend=descend)) self.assertEqual(len(descend.seen), 1) self.assertIs(descend.seen[0], ctuple) ctuple.deactivate(shallow=False) + def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return True + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=True, - descend=descend)) + order = list(pmo.preorder_traversal(ctuple, active=True, descend=descend)) self.assertEqual(len(descend.seen), 0) - self.assertEqual(len(list(pmo.generate_names(ctuple, - active=True))), - 0) + self.assertEqual(len(list(pmo.generate_names(ctuple, active=True))), 0) def descend(x): self.assertTrue(x._is_container) descend.seen.append(x) return x.active + descend.seen = [] - order = list(pmo.preorder_traversal(ctuple, - active=None, - descend=descend)) + order = list(pmo.preorder_traversal(ctuple, active=None, descend=descend)) self.assertEqual(len(descend.seen), 1) self.assertIs(descend.seen[0], ctuple) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/kernel/test_variable.py b/pyomo/core/tests/unit/kernel/test_variable.py index bedcffb706d..4127bd1adfe 100644 --- a/pyomo/core/tests/unit/kernel/test_variable.py +++ b/pyomo/core/tests/unit/kernel/test_variable.py @@ -12,42 +12,49 @@ import pickle import pyomo.common.unittest as unittest -from pyomo.core.expr.numvalue import (NumericValue, - is_fixed, - is_constant, - is_potentially_variable) -from pyomo.core.tests.unit.kernel.test_dict_container import \ - _TestActiveDictContainerBase -from pyomo.core.tests.unit.kernel.test_tuple_container import \ - _TestActiveTupleContainerBase -from pyomo.core.tests.unit.kernel.test_list_container import \ - _TestActiveListContainerBase +from pyomo.core.expr.numvalue import ( + NumericValue, + is_fixed, + is_constant, + is_potentially_variable, +) +from pyomo.core.tests.unit.kernel.test_dict_container import ( + _TestActiveDictContainerBase, +) +from pyomo.core.tests.unit.kernel.test_tuple_container import ( + _TestActiveTupleContainerBase, +) +from pyomo.core.tests.unit.kernel.test_list_container import ( + _TestActiveListContainerBase, +) from pyomo.core.kernel.base import ICategorizedObject from pyomo.core.kernel.parameter import parameter -from pyomo.core.kernel.variable import \ - (IVariable, - variable, - variable_dict, - variable_tuple, - variable_list, - _extract_domain_type_and_bounds) +from pyomo.core.kernel.variable import ( + IVariable, + variable, + variable_dict, + variable_tuple, + variable_list, + _extract_domain_type_and_bounds, +) from pyomo.core.kernel.block import block -from pyomo.core.kernel.set_types import (RealSet, - IntegerSet, - BooleanSet) -from pyomo.core.base.set import(Binary, - NonNegativeReals, - NegativeReals, - Reals, - NonNegativeIntegers, - NegativeIntegers, - RealInterval, - IntegerInterval) +from pyomo.core.kernel.set_types import RealSet, IntegerSet, BooleanSet +from pyomo.core.base.set import ( + Binary, + NonNegativeReals, + NegativeReals, + Reals, + NonNegativeIntegers, + NegativeIntegers, + RealInterval, + IntegerInterval, +) -class Test_variable(unittest.TestCase): +class Test_variable(unittest.TestCase): def test_pprint(self): import pyomo.kernel + # Not really testing what the output is, just that # an error does not occur. The pprint functionality # is still in the early stages. @@ -82,10 +89,7 @@ def test_bad_bounds(self): def test_extract_domain_type_and_bounds(self): # test an edge case - domain_type, lb, ub = _extract_domain_type_and_bounds(None, - None, - None, - None) + domain_type, lb, ub = _extract_domain_type_and_bounds(None, None, None, None) self.assertIs(domain_type, RealSet) self.assertIs(lb, None) self.assertIs(ub, None) @@ -105,10 +109,7 @@ def test_ctype(self): self.assertIs(type(v)._ctype, IVariable) def test_pickle(self): - v = variable(lb=1, - ub=2, - domain_type=IntegerSet, - fixed=True) + v = variable(lb=1, ub=2, domain_type=IntegerSet, fixed=True) self.assertEqual(v.lb, 1) self.assertEqual(type(v.lb), int) self.assertEqual(v.ub, 2) @@ -116,8 +117,7 @@ def test_pickle(self): self.assertEqual(v.domain_type, IntegerSet) self.assertEqual(v.fixed, True) self.assertEqual(v.parent, None) - vup = pickle.loads( - pickle.dumps(v)) + vup = pickle.loads(pickle.dumps(v)) self.assertEqual(vup.lb, 1) self.assertEqual(type(vup.lb), int) self.assertEqual(vup.ub, 2) @@ -128,8 +128,7 @@ def test_pickle(self): b = block() b.v = v self.assertIs(v.parent, b) - bup = pickle.loads( - pickle.dumps(b)) + bup = pickle.loads(pickle.dumps(b)) vup = bup.v self.assertEqual(vup.lb, 1) self.assertEqual(vup.ub, 2) @@ -153,11 +152,7 @@ def test_init(self): del b.v self.assertTrue(v.parent is None) - v = variable(domain_type=IntegerSet, - value=1, - lb=0, - ub=2, - fixed=True) + v = variable(domain_type=IntegerSet, value=1, lb=0, ub=2, fixed=True) self.assertTrue(v.parent is None) self.assertEqual(v.ctype, IVariable) self.assertEqual(v.domain_type, IntegerSet) @@ -288,8 +283,8 @@ def test_domain(self): self.assertIs(v.lower, lb) self.assertIs(v.upper, ub) with self.assertRaisesRegex( - ValueError, - 'No value for uninitialized NumericValue object None'): + ValueError, 'No value for uninitialized NumericValue object None' + ): v.bounds lb = lb**2 @@ -340,8 +335,8 @@ def test_domain(self): with self.assertRaises(ValueError): variable(domain=NegativeIntegers, ub=ub) - unit_interval = RealInterval(bounds=(0,1)) - self.assertEqual(unit_interval.bounds(), (0,1)) + unit_interval = RealInterval(bounds=(0, 1)) + self.assertEqual(unit_interval.bounds(), (0, 1)) v = variable(domain=unit_interval) self.assertEqual(v.domain_type, RealSet) self.assertEqual(v.is_continuous(), True) @@ -367,8 +362,8 @@ def test_domain(self): self.assertEqual(v.ub, 1) self.assertEqual(v.bounds, (0, 1)) - binary = IntegerInterval(bounds=(0,1)) - self.assertEqual(binary.bounds(), (0,1)) + binary = IntegerInterval(bounds=(0, 1)) + self.assertEqual(binary.bounds(), (0, 1)) v = variable(domain=binary) self.assertEqual(v.domain_type, IntegerSet) self.assertEqual(v.is_continuous(), False) @@ -397,8 +392,7 @@ def test_domain(self): variable(domain_type=RealSet) variable(domain=Reals) with self.assertRaises(ValueError): - variable(domain_type=RealSet, - domain=Reals) + variable(domain_type=RealSet, domain=Reals) with self.assertRaises(ValueError): variable(domain_type=BooleanSet) @@ -481,7 +475,7 @@ def test_binary_type(self): self.assertEqual(type(v.lb), int) self.assertEqual(v.ub, 1) self.assertEqual(type(v.ub), int) - self.assertEqual(v.bounds, (0,1)) + self.assertEqual(v.bounds, (0, 1)) v.lb = 0 v.ub = 0 @@ -492,7 +486,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), True) self.assertEqual(v.lb, 0) self.assertEqual(v.ub, 0) - self.assertEqual(v.bounds, (0,0)) + self.assertEqual(v.bounds, (0, 0)) v.lb = 1 v.ub = 1 @@ -503,7 +497,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), True) self.assertEqual(v.lb, 1) self.assertEqual(v.ub, 1) - self.assertEqual(v.bounds, (1,1)) + self.assertEqual(v.bounds, (1, 1)) v = variable(domain=Binary) self.assertEqual(v.domain_type, IntegerSet) @@ -513,7 +507,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), True) self.assertEqual(v.lb, 0) self.assertEqual(v.ub, 1) - self.assertEqual(v.bounds, (0,1)) + self.assertEqual(v.bounds, (0, 1)) v.ub = 2 self.assertEqual(v.domain_type, IntegerSet) @@ -523,7 +517,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), True) self.assertEqual(v.lb, 0) self.assertEqual(v.ub, 2) - self.assertEqual(v.bounds, (0,2)) + self.assertEqual(v.bounds, (0, 2)) v.lb = -1 self.assertEqual(v.domain_type, IntegerSet) @@ -533,7 +527,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), True) self.assertEqual(v.lb, -1) self.assertEqual(v.ub, 2) - self.assertEqual(v.bounds, (-1,2)) + self.assertEqual(v.bounds, (-1, 2)) v.domain = Binary self.assertEqual(v.domain_type, IntegerSet) @@ -543,7 +537,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), True) self.assertEqual(v.lb, 0) self.assertEqual(v.ub, 1) - self.assertEqual(v.bounds, (0,1)) + self.assertEqual(v.bounds, (0, 1)) v.domain_type = RealSet self.assertEqual(v.domain_type, RealSet) @@ -553,7 +547,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), False) self.assertEqual(v.lb, 0) self.assertEqual(v.ub, 1) - self.assertEqual(v.bounds, (0,1)) + self.assertEqual(v.bounds, (0, 1)) v.domain_type = IntegerSet self.assertEqual(v.domain_type, IntegerSet) @@ -563,7 +557,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), True) self.assertEqual(v.lb, 0) self.assertEqual(v.ub, 1) - self.assertEqual(v.bounds, (0,1)) + self.assertEqual(v.bounds, (0, 1)) v.domain = Reals self.assertEqual(v.domain_type, RealSet) @@ -585,7 +579,7 @@ def test_binary_type(self): self.assertEqual(v.is_integer(), False) self.assertEqual(v.lb, float('-inf')) self.assertEqual(v.ub, float('inf')) - self.assertEqual(v.bounds, (float('-inf'),float('inf'))) + self.assertEqual(v.bounds, (float('-inf'), float('inf'))) self.assertEqual(v.has_lb(), False) self.assertEqual(v.has_ub(), False) @@ -679,18 +673,18 @@ def test_bounds_setter(self): v.ub = 1 self.assertEqual(v.lb, 0) self.assertEqual(v.ub, 1) - self.assertEqual(v.bounds, (0,1)) + self.assertEqual(v.bounds, (0, 1)) v.bounds = (2, 3) self.assertEqual(v.lb, 2) self.assertEqual(v.ub, 3) - self.assertEqual(v.bounds, (2,3)) + self.assertEqual(v.bounds, (2, 3)) v.lb = -1 v.ub = 0 self.assertEqual(v.lb, -1) self.assertEqual(v.ub, 0) - self.assertEqual(v.bounds, (-1,0)) + self.assertEqual(v.bounds, (-1, 0)) def test_has_lb_ub(self): v = variable() @@ -761,13 +755,13 @@ def test_fix_free(self): self.assertEqual(v.fixed, True) with self.assertRaises(TypeError): - v.fix(1,2) + v.fix(1, 2) self.assertEqual(v.value, 0) self.assertEqual(v.fixed, True) v.free() with self.assertRaises(TypeError): - v.fix(1,2) + v.fix(1, 2) self.assertEqual(v.value, 0) self.assertEqual(v.fixed, False) @@ -803,7 +797,7 @@ def test_slack_methods(self): U = 5 # equality - x.bounds = L,L + x.bounds = L, L x.value = 4 self.assertEqual(x.value, 4) self.assertEqual(x.slack, -3) @@ -1055,23 +1049,25 @@ def test_slack_methods(self): self.assertEqual(x.lslack, None) self.assertEqual(x.uslack, None) + class _variable_subclass(variable): pass -class Test_variable_dict(_TestActiveDictContainerBase, - unittest.TestCase): + +class Test_variable_dict(_TestActiveDictContainerBase, unittest.TestCase): _container_type = variable_dict _ctype_factory = lambda self: variable() -class Test_variable_tuple(_TestActiveTupleContainerBase, - unittest.TestCase): + +class Test_variable_tuple(_TestActiveTupleContainerBase, unittest.TestCase): _container_type = variable_tuple _ctype_factory = lambda self: variable() -class Test_variable_list(_TestActiveListContainerBase, - unittest.TestCase): + +class Test_variable_list(_TestActiveListContainerBase, unittest.TestCase): _container_type = variable_list _ctype_factory = lambda self: variable() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/solve1.txt b/pyomo/core/tests/unit/solve1.txt index 3b3ffd43f1a..c35a248fa79 100644 --- a/pyomo/core/tests/unit/solve1.txt +++ b/pyomo/core/tests/unit/solve1.txt @@ -2,10 +2,10 @@ "Problem": [ { "Lower bound": -1.33333333333333, - "Number of constraints": 2, - "Number of nonzeros": 5, + "Number of constraints": 1, + "Number of nonzeros": 4, "Number of objectives": 1, - "Number of variables": 5, + "Number of variables": 4, "Sense": "minimize", "Upper bound": -1.33333333333333 } diff --git a/pyomo/core/tests/unit/solve1a.txt b/pyomo/core/tests/unit/solve1a.txt index 572796f7a7d..2bddab5b7ab 100644 --- a/pyomo/core/tests/unit/solve1a.txt +++ b/pyomo/core/tests/unit/solve1a.txt @@ -2,10 +2,10 @@ "Problem": [ { "Lower bound": -0.666666666666667, - "Number of constraints": 3, - "Number of nonzeros": 6, + "Number of constraints": 2, + "Number of nonzeros": 5, "Number of objectives": 1, - "Number of variables": 5, + "Number of variables": 4, "Sense": "minimize", "Upper bound": -0.666666666666667 } diff --git a/pyomo/core/tests/unit/solve1b.txt b/pyomo/core/tests/unit/solve1b.txt index ff95a852bcb..5ac86e96ffe 100644 --- a/pyomo/core/tests/unit/solve1b.txt +++ b/pyomo/core/tests/unit/solve1b.txt @@ -1,33 +1,33 @@ { "Problem": [ { - "Lower bound": 0.0, - "Number of constraints": 6, - "Number of nonzeros": 9, - "Number of objectives": 1, - "Number of variables": 5, - "Sense": "minimize", + "Lower bound": 0.0, + "Number of constraints": 5, + "Number of nonzeros": 8, + "Number of objectives": 1, + "Number of variables": 4, + "Sense": "minimize", "Upper bound": 0.0 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Constraint": "No values", - "Gap": 0.0, - "Message": null, - "Objective": {}, - "Problem": {}, + "Constraint": "No values", + "Gap": 0.0, + "Message": null, + "Objective": {}, + "Problem": {}, "Variable": {} } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/unit/solve6.txt b/pyomo/core/tests/unit/solve6.txt index 56e77f7ea49..13990effcc6 100644 --- a/pyomo/core/tests/unit/solve6.txt +++ b/pyomo/core/tests/unit/solve6.txt @@ -1,49 +1,49 @@ { "Problem": [ { - "Lower bound": -1.66666666666667, - "Number of constraints": 2, - "Number of nonzeros": 6, - "Number of objectives": 1, - "Number of variables": 6, - "Sense": "minimize", + "Lower bound": -1.66666666666667, + "Number of constraints": 1, + "Number of nonzeros": 5, + "Number of objectives": 1, + "Number of variables": 5, + "Sense": "minimize", "Upper bound": -1.66666666666667 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Constraint": "No values", + "Constraint": "No values", "Objective": { "b.obj": { "Value": -1.66666666666667 } - }, + }, "Variable": { "b.x[1]": { "Value": -1.0 - }, + }, "b.x[2]": { "Value": -1.0 - }, + }, "b.x[3]": { "Value": -0.666666666666667 - }, + }, "b.x[4]": { "Value": 1.0 - }, + }, "y": { "Value": 1.0 } } } - ], + ], "Solver": [ { - "Error rc": 0, + "Error rc": 0, "Status": "ok" } ] diff --git a/pyomo/core/tests/unit/solve7.txt b/pyomo/core/tests/unit/solve7.txt index 573b6ec638b..3edc4507ab7 100644 --- a/pyomo/core/tests/unit/solve7.txt +++ b/pyomo/core/tests/unit/solve7.txt @@ -1,75 +1,75 @@ { "Problem": [ { - "Lower bound": -4.33333333333333, - "Number of constraints": 2, - "Number of nonzeros": 14, - "Number of objectives": 1, - "Number of variables": 14, - "Sense": "minimize", + "Lower bound": -4.33333333333333, + "Number of constraints": 1, + "Number of nonzeros": 13, + "Number of objectives": 1, + "Number of variables": 13, + "Sense": "minimize", "Upper bound": -4.33333333333333 } - ], + ], "Solution": [ { - "number of solutions": 1, + "number of solutions": 1, "number of solutions displayed": 1 - }, + }, { - "Constraint": "No values", + "Constraint": "No values", "Objective": { "obj": { "Value": -4.33333333333333 } - }, + }, "Gap": 0.0, "Variable": { "x[1,'C,D']": { "Value": -1.0 - }, + }, "x[1,A B]": { "Value": -1.0 - }, + }, "x[1,E]": { "Value": -1.0 - }, + }, "x[2,'C,D']": { "Value": -1.0 - }, + }, "x[2,A B]": { "Value": -1.0 - }, + }, "x[2,E]": { "Value": -1.0 - }, + }, "x[3,'C,D']": { "Value": -1.0 - }, + }, "x[3,A B]": { "Value": 0.666666666666667 - }, + }, "x[3,E]": { "Value": -1.0 - }, + }, "x[4,'C,D']": { "Value": 1.0 - }, + }, "x[4,A B]": { "Value": 1.0 - }, + }, "x[4,E]": { "Value": 1.0 - }, + }, "y": { "Value": 1.0 } } } - ], + ], "Solver": [ { - "Error rc": 0, - "Status": "ok", + "Error rc": 0, + "Status": "ok", "Termination condition": "optimal" } ] diff --git a/pyomo/core/tests/unit/solve_with_store1.txt b/pyomo/core/tests/unit/solve_with_store1.txt index 68c9fc0675b..bc9a930f279 100644 --- a/pyomo/core/tests/unit/solve_with_store1.txt +++ b/pyomo/core/tests/unit/solve_with_store1.txt @@ -9,9 +9,9 @@ Problem: Lower bound: -3.0 Upper bound: -3.0 Number of objectives: 1 - Number of constraints: 2 - Number of variables: 5 - Number of nonzeros: 2 + Number of constraints: 1 + Number of variables: 4 + Number of nonzeros: 1 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/pyomo/core/tests/unit/solve_with_store2.txt b/pyomo/core/tests/unit/solve_with_store2.txt index fddec19fff6..73ceebc90e2 100644 --- a/pyomo/core/tests/unit/solve_with_store2.txt +++ b/pyomo/core/tests/unit/solve_with_store2.txt @@ -9,9 +9,9 @@ Problem: Lower bound: -3.0 Upper bound: -3.0 Number of objectives: 1 - Number of constraints: 2 - Number of variables: 5 - Number of nonzeros: 2 + Number of constraints: 1 + Number of variables: 4 + Number of nonzeros: 1 Sense: minimize # ---------------------------------------------------------- # Solver Information diff --git a/pyomo/core/tests/unit/solve_with_store3.txt b/pyomo/core/tests/unit/solve_with_store3.txt index d8a6846c280..9d016353e7a 100644 --- a/pyomo/core/tests/unit/solve_with_store3.txt +++ b/pyomo/core/tests/unit/solve_with_store3.txt @@ -2,10 +2,10 @@ "Problem": [ { "Lower bound": -3.0, - "Number of constraints": 2, - "Number of nonzeros": 2, + "Number of constraints": 1, + "Number of nonzeros": 1, "Number of objectives": 1, - "Number of variables": 5, + "Number of variables": 4, "Sense": "minimize", "Upper bound": -3.0 } diff --git a/pyomo/core/tests/unit/solve_with_store4.txt b/pyomo/core/tests/unit/solve_with_store4.txt index 7e5816003d2..892e72b7d76 100644 --- a/pyomo/core/tests/unit/solve_with_store4.txt +++ b/pyomo/core/tests/unit/solve_with_store4.txt @@ -2,10 +2,10 @@ "Problem": [ { "Lower bound": -3.0, - "Number of constraints": 2, - "Number of nonzeros": 2, + "Number of constraints": 1, + "Number of nonzeros": 1, "Number of objectives": 1, - "Number of variables": 5, + "Number of variables": 4 , "Sense": "minimize", "Upper bound": -3.0 } diff --git a/pyomo/core/tests/unit/test_action.py b/pyomo/core/tests/unit/test_action.py index 29c0cb515d3..5db6f165854 100644 --- a/pyomo/core/tests/unit/test_action.py +++ b/pyomo/core/tests/unit/test_action.py @@ -27,17 +27,18 @@ def action1_fn(model): model.A = 4.3 + def action2_fn(model, i): if i in model.A: - model.A[i] = value(model.A[i])+i + model.A[i] = value(model.A[i]) + i + def action3_fn(model, i): if i in model.A.sparse_keys(): - model.A[i] = value(model.A[i])+i + model.A[i] = value(model.A[i]) + i class Scalar(unittest.TestCase): - def setUp(self): # # Create model instance @@ -55,75 +56,76 @@ def tearDown(self): def test_value(self): """Check the value of the parameter""" tmp = value(self.instance.A.value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 4.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 4.3) self.assertEqual(value(self.instance.A.value), value(self.instance.A)) def test_getattr(self): """Check the use of the __getattr__ method""" - self.assertEqual( self.instance.A.value, 4.3) + self.assertEqual(self.instance.A.value, 4.3) class Array_Param(unittest.TestCase): - def test_sparse_param_nodefault(self): # # Create model instance # model = AbstractModel() - model.Z = Set(initialize=[1,3]) - model.A = Param(model.Z, initialize={1:1.3}, mutable=True) + model.Z = Set(initialize=[1, 3]) + model.A = Param(model.Z, initialize={1: 1.3}, mutable=True) model.action2 = BuildAction(model.Z, rule=action2_fn) instance = model.create_instance() tmp = value(instance.A[1]) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 2.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 2.3) def test_sparse_param_nodefault_sparse_iter(self): # # Create model instance # model = AbstractModel() - model.Z = Set(initialize=[1,3]) - model.A = Param(model.Z, initialize={1:1.3}, mutable=True) + model.Z = Set(initialize=[1, 3]) + model.A = Param(model.Z, initialize={1: 1.3}, mutable=True) model.action2 = BuildAction(model.Z, rule=action3_fn) instance = model.create_instance() tmp = value(instance.A[1]) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 2.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 2.3) def test_sparse_param_default(self): # # Create model instance # model = AbstractModel() - model.Z = Set(initialize=[1,3]) - model.A = Param(model.Z, initialize={1:1.3}, default=0, mutable=True) + model.Z = Set(initialize=[1, 3]) + model.A = Param(model.Z, initialize={1: 1.3}, default=0, mutable=True) model.action2 = BuildAction(model.Z, rule=action2_fn) instance = model.create_instance() tmp = value(instance.A[1]) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 2.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 2.3) def test_dense_param(self): # # Create model instance # model = AbstractModel() - model.Z = Set(initialize=[1,3]) + model.Z = Set(initialize=[1, 3]) model.A = Param(model.Z, initialize=1.3, mutable=True) model.action2 = BuildAction(model.Z, rule=action2_fn) instance = model.create_instance() # - self.assertEqual( instance.A[1].value, 2.3) - self.assertEqual( value(instance.A[3]), 4.3) + self.assertEqual(instance.A[1].value, 2.3) + self.assertEqual(value(instance.A[3]), 4.3) # buf = StringIO() instance.pprint(ostream=buf) - self.assertEqual(buf.getvalue(),"""1 Set Declarations + self.assertEqual( + buf.getvalue(), + """1 Set Declarations Z : Size=1, Index=None, Ordered=Insertion Key : Dimen : Domain : Size : Members None : 1 : Any : 2 : {1, 3} @@ -138,11 +140,11 @@ def test_dense_param(self): action2 : Size=0, Index=Z, Active=True 3 Declarations: Z A action2 -""") +""", + ) class TestMisc(unittest.TestCase): - def test_error1(self): model = AbstractModel() try: @@ -150,7 +152,7 @@ def test_error1(self): self.fail("Expected ValueError") except ValueError: pass - + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_block.py b/pyomo/core/tests/unit/test_block.py index 65ba19d6f7e..f68850d9421 100644 --- a/pyomo/core/tests/unit/test_block.py +++ b/pyomo/core/tests/unit/test_block.py @@ -21,26 +21,50 @@ from copy import deepcopy from os.path import abspath, dirname, join -currdir = dirname( abspath(__file__) ) +currdir = dirname(abspath(__file__)) import pyomo.common.unittest as unittest -from pyomo.environ import (AbstractModel, ConcreteModel, Var, Set, - Param, Block, Suffix, Constraint, Component, - Objective, Expression, SOSConstraint, - SortComponents, NonNegativeIntegers, - TraversalStrategy, RangeSet, SolverFactory, - value, sum_product, ComponentUID, Any) +from pyomo.environ import ( + AbstractModel, + ConcreteModel, + Var, + Set, + Param, + Block, + Suffix, + Constraint, + Component, + Objective, + Expression, + Reference, + SOSConstraint, + SortComponents, + NonNegativeIntegers, + TraversalStrategy, + RangeSet, + SolverFactory, + value, + sum_product, + ComponentUID, + Any, +) from pyomo.common.log import LoggingIntercept from pyomo.common.tempfiles import TempfileManager -from pyomo.core.base.block import ScalarBlock, SubclassOf, _BlockData, declare_custom_block -from pyomo.core.expr import current as EXPR +from pyomo.core.base.block import ( + ScalarBlock, + SubclassOf, + _BlockData, + declare_custom_block, +) +import pyomo.core.expr as EXPR from pyomo.opt import check_available_solvers from pyomo.gdp import Disjunct solvers = check_available_solvers('glpk') + class DerivedBlock(ScalarBlock): def __init__(self, *args, **kwargs): """Constructor""" @@ -50,11 +74,11 @@ def __init__(self, *args, **kwargs): def foo(self): pass + DerivedBlock._Block_reserved_words = set(dir(DerivedBlock())) class TestGenerators(unittest.TestCase): - def generate_model(self): # # ** DO NOT modify the model below without updating the @@ -63,36 +87,36 @@ def generate_model(self): # model = ConcreteModel() - model.q = Set(initialize=[1,2]) - model.Q = Set(model.q,initialize=[1,2]) - model.qq = NonNegativeIntegers*model.q + model.q = Set(initialize=[1, 2]) + model.Q = Set(model.q, initialize=[1, 2]) + model.qq = NonNegativeIntegers * model.q model.x = Var(initialize=-1) - model.X = Var(model.q,initialize=-1) + model.X = Var(model.q, initialize=-1) model.e = Expression(initialize=-1) - model.E = Expression(model.q,initialize=-1) - model.p = Param(mutable=True,initialize=-1) - model.P = Param(model.q,mutable=True,initialize=-1) + model.E = Expression(model.q, initialize=-1) + model.p = Param(mutable=True, initialize=-1) + model.P = Param(model.q, mutable=True, initialize=-1) model.o = Objective(expr=-1) - model.O = Objective(model.q, rule=lambda model,i: -1) - model.c = Constraint(expr=model.x>=-1) - model.C = Constraint(model.q, rule=lambda model,i: model.X[i]>=-1) + model.O = Objective(model.q, rule=lambda model, i: -1) + model.c = Constraint(expr=model.x >= -1) + model.C = Constraint(model.q, rule=lambda model, i: model.X[i] >= -1) model.sos = SOSConstraint(var=model.X, index=model.q, sos=1) model.SOS = SOSConstraint(model.q, var=model.X, index=model.Q, sos=1) model.s = Suffix() model.b = Block() - model.b.q = Set(initialize=[1,2]) - model.b.Q = Set(model.b.q,initialize=[1,2]) + model.b.q = Set(initialize=[1, 2]) + model.b.Q = Set(model.b.q, initialize=[1, 2]) model.b.x = Var(initialize=0) - model.b.X = Var(model.b.q,initialize=0) + model.b.X = Var(model.b.q, initialize=0) model.b.e = Expression(initialize=0) - model.b.E = Expression(model.b.q,initialize=0) - model.b.p = Param(mutable=True,initialize=0) - model.b.P = Param(model.b.q,mutable=True,initialize=0) + model.b.E = Expression(model.b.q, initialize=0) + model.b.p = Param(mutable=True, initialize=0) + model.b.P = Param(model.b.q, mutable=True, initialize=0) model.b.o = Objective(expr=0) - model.b.O = Objective(model.b.q, rule=lambda b,i: 0) - model.b.c = Constraint(expr=model.b.x>=0) - model.b.C = Constraint(model.b.q, rule=lambda b,i: b.X[i]>=0) + model.b.O = Objective(model.b.q, rule=lambda b, i: 0) + model.b.c = Constraint(expr=model.b.x >= 0) + model.b.C = Constraint(model.b.q, rule=lambda b, i: b.X[i] >= 0) model.b.sos = SOSConstraint(var=model.b.X, index=model.b.q, sos=1) model.b.SOS = SOSConstraint(model.b.q, var=model.b.X, index=model.b.Q, sos=1) model.b.s = Suffix() @@ -103,33 +127,49 @@ def generate_model(self): model.b.component_lists[Var] = [model.b.x, model.b.X] model.b.component_data_lists[Var] = [model.b.x, model.b.X[1], model.b.X[2]] model.b.component_lists[Expression] = [model.b.e, model.b.E] - model.b.component_data_lists[Expression] = [model.b.e, model.b.E[1], model.b.E[2]] + model.b.component_data_lists[Expression] = [ + model.b.e, + model.b.E[1], + model.b.E[2], + ] model.b.component_lists[Param] = [model.b.p, model.b.P] model.b.component_data_lists[Param] = [model.b.p, model.b.P[1], model.b.P[2]] model.b.component_lists[Objective] = [model.b.o, model.b.O] - model.b.component_data_lists[Objective] = [model.b.o, model.b.O[1], model.b.O[2]] + model.b.component_data_lists[Objective] = [ + model.b.o, + model.b.O[1], + model.b.O[2], + ] model.b.component_lists[Constraint] = [model.b.c, model.b.C] - model.b.component_data_lists[Constraint] = [model.b.c, model.b.C[1], model.b.C[2]] + model.b.component_data_lists[Constraint] = [ + model.b.c, + model.b.C[1], + model.b.C[2], + ] model.b.component_lists[SOSConstraint] = [model.b.sos, model.b.SOS] - model.b.component_data_lists[SOSConstraint] = [model.b.sos, model.b.SOS[1], model.b.SOS[2]] + model.b.component_data_lists[SOSConstraint] = [ + model.b.sos, + model.b.SOS[1], + model.b.SOS[2], + ] model.b.component_lists[Suffix] = [model.b.s] model.b.component_data_lists[Suffix] = [model.b.s] model.b.component_lists[Block] = [] model.b.component_data_lists[Block] = [] - def B_rule(block,i): - block.q = Set(initialize=[1,2]) - block.Q = Set(block.q,initialize=[1,2]) + def B_rule(block, i): + block.q = Set(initialize=[1, 2]) + block.Q = Set(block.q, initialize=[1, 2]) block.x = Var(initialize=i) - block.X = Var(block.q,initialize=i) + block.X = Var(block.q, initialize=i) block.e = Expression(initialize=i) - block.E = Expression(block.q,initialize=i) - block.p = Param(mutable=True,initialize=i) - block.P = Param(block.q,mutable=True,initialize=i) + block.E = Expression(block.q, initialize=i) + block.p = Param(mutable=True, initialize=i) + block.P = Param(block.q, mutable=True, initialize=i) block.o = Objective(expr=i) - block.O = Objective(block.q, rule=lambda b,i: i) - block.c = Constraint(expr=block.x>=i) - block.C = Constraint(block.q, rule=lambda b,i: b.X[i]>=i) + block.O = Objective(block.q, rule=lambda b, i: i) + block.c = Constraint(expr=block.x >= i) + block.C = Constraint(block.q, rule=lambda b, i: b.X[i] >= i) block.sos = SOSConstraint(var=block.X, index=block.q, sos=1) block.SOS = SOSConstraint(block.q, var=block.X, index=block.Q, sos=1) block.s = Suffix() @@ -148,12 +188,17 @@ def B_rule(block,i): block.component_lists[Constraint] = [block.c, block.C] block.component_data_lists[Constraint] = [block.c, block.C[1], block.C[2]] block.component_lists[SOSConstraint] = [block.sos, block.SOS] - block.component_data_lists[SOSConstraint] = [block.sos, block.SOS[1], block.SOS[2]] + block.component_data_lists[SOSConstraint] = [ + block.sos, + block.SOS[1], + block.SOS[2], + ] block.component_lists[Suffix] = [block.s] block.component_data_lists[Suffix] = [block.s] block.component_lists[Block] = [] block.component_data_lists[Block] = [] - model.B = Block(model.q,rule=B_rule) + + model.B = Block(model.q, rule=B_rule) model.component_lists = {} model.component_data_lists = {} @@ -170,7 +215,11 @@ def B_rule(block,i): model.component_lists[Constraint] = [model.c, model.C] model.component_data_lists[Constraint] = [model.c, model.C[1], model.C[2]] model.component_lists[SOSConstraint] = [model.sos, model.SOS] - model.component_data_lists[SOSConstraint] = [model.sos, model.SOS[1], model.SOS[2]] + model.component_data_lists[SOSConstraint] = [ + model.sos, + model.SOS[1], + model.SOS[2], + ] model.component_lists[Suffix] = [model.s] model.component_data_lists[Suffix] = [model.s] model.component_lists[Block] = [model.b, model.B] @@ -179,30 +228,37 @@ def B_rule(block,i): return model def generator_runner(self, ctype): - model = self.generate_model() for block in model.block_data_objects(sort=SortComponents.indices): - # Non-nested components(active=True) generator = None try: - generator = list(block.component_objects(ctype, active=True, descend_into=False)) + generator = list( + block.component_objects(ctype, active=True, descend_into=False) + ) except: if issubclass(ctype, Component): print("component_objects(active=True) failed with ctype %s" % ctype) raise else: if not issubclass(ctype, Component): - self.fail("component_objects(active=True) should have failed with ctype %s" % ctype) + self.fail( + "component_objects(active=True) should have failed with ctype %s" + % ctype + ) # This first check is less safe but it gives a cleaner # failure message. I leave comparison of ids in the # second assertEqual to make sure the tests are working # as expected - self.assertEqual([comp.name for comp in generator], - [comp.name for comp in block.component_lists[ctype]]) - self.assertEqual([id(comp) for comp in generator], - [id(comp) for comp in block.component_lists[ctype]]) + self.assertEqual( + [comp.name for comp in generator], + [comp.name for comp in block.component_lists[ctype]], + ) + self.assertEqual( + [id(comp) for comp in generator], + [id(comp) for comp in block.component_lists[ctype]], + ) # Non-nested components generator = None @@ -219,90 +275,148 @@ def generator_runner(self, ctype): # failure message. I leave comparison of ids in the # second assertEqual to make sure the tests are working # as expected - self.assertEqual([comp.name for comp in generator], - [comp.name for comp in block.component_lists[ctype]]) - self.assertEqual([id(comp) for comp in generator], - [id(comp) for comp in block.component_lists[ctype]]) + self.assertEqual( + [comp.name for comp in generator], + [comp.name for comp in block.component_lists[ctype]], + ) + self.assertEqual( + [id(comp) for comp in generator], + [id(comp) for comp in block.component_lists[ctype]], + ) # Non-nested component_data_objects, active=True, sort_by_keys=False generator = None try: - generator = list(block.component_data_iterindex(ctype, active=True, sort=False, descend_into=False)) + generator = list( + block.component_data_iterindex( + ctype, active=True, sort=False, descend_into=False + ) + ) except: if issubclass(ctype, Component): - print("component_data_objects(active=True, sort_by_keys=False) failed with ctype %s" % ctype) + print( + "component_data_objects(active=True, sort_by_keys=False) failed with ctype %s" + % ctype + ) raise else: if not issubclass(ctype, Component): - self.fail("component_data_objects(active=True, sort_by_keys=False) should have failed with ctype %s" % ctype) + self.fail( + "component_data_objects(active=True, sort_by_keys=False) should have failed with ctype %s" + % ctype + ) # This first check is less safe but it gives a cleaner # failure message. I leave comparison of ids in the # second assertEqual to make sure the tests are working # as expected - self.assertEqual([comp.name for name, comp in generator], - [comp.name for comp in block.component_data_lists[ctype]]) - self.assertEqual([id(comp) for name, comp in generator], - [id(comp) for comp in block.component_data_lists[ctype]]) + self.assertEqual( + [comp.name for name, comp in generator], + [comp.name for comp in block.component_data_lists[ctype]], + ) + self.assertEqual( + [id(comp) for name, comp in generator], + [id(comp) for comp in block.component_data_lists[ctype]], + ) # Non-nested component_data_objects, active=True, sort=True generator = None try: - generator = list(block.component_data_iterindex(ctype, active=True, sort=True, descend_into=False)) + generator = list( + block.component_data_iterindex( + ctype, active=True, sort=True, descend_into=False + ) + ) except: if issubclass(ctype, Component): - print("component_data_objects(active=True, sort=True) failed with ctype %s" % ctype) + print( + "component_data_objects(active=True, sort=True) failed with ctype %s" + % ctype + ) raise else: if not issubclass(ctype, Component): - self.fail("component_data_objects(active=True, sort=True) should have failed with ctype %s" % ctype) + self.fail( + "component_data_objects(active=True, sort=True) should have failed with ctype %s" + % ctype + ) # This first check is less safe but it gives a cleaner # failure message. I leave comparison of ids in the # second assertEqual to make sure the tests are working # as expected - self.assertEqual(sorted([comp.name for name, comp in generator]), - sorted([comp.name for comp in block.component_data_lists[ctype]])) - self.assertEqual(sorted([id(comp) for name, comp in generator]), - sorted([id(comp) for comp in block.component_data_lists[ctype]])) + self.assertEqual( + sorted([comp.name for name, comp in generator]), + sorted([comp.name for comp in block.component_data_lists[ctype]]), + ) + self.assertEqual( + sorted([id(comp) for name, comp in generator]), + sorted([id(comp) for comp in block.component_data_lists[ctype]]), + ) # Non-nested components_data, sort_by_keys=True generator = None try: - generator = list(block.component_data_iterindex(ctype, sort=False, descend_into=False)) + generator = list( + block.component_data_iterindex( + ctype, sort=False, descend_into=False + ) + ) except: if issubclass(ctype, Component): - print("components_data(sort_by_keys=True) failed with ctype %s" % ctype) + print( + "components_data(sort_by_keys=True) failed with ctype %s" + % ctype + ) raise else: if not issubclass(ctype, Component): - self.fail("components_data(sort_by_keys=True) should have failed with ctype %s" % ctype) + self.fail( + "components_data(sort_by_keys=True) should have failed with ctype %s" + % ctype + ) # This first check is less safe but it gives a cleaner # failure message. I leave comparison of ids in the # second assertEqual to make sure the tests are working # as expected - self.assertEqual([comp.name for name, comp in generator], - [comp.name for comp in block.component_data_lists[ctype]]) - self.assertEqual([id(comp) for name, comp in generator], - [id(comp) for comp in block.component_data_lists[ctype]]) + self.assertEqual( + [comp.name for name, comp in generator], + [comp.name for comp in block.component_data_lists[ctype]], + ) + self.assertEqual( + [id(comp) for name, comp in generator], + [id(comp) for comp in block.component_data_lists[ctype]], + ) # Non-nested components_data, sort_by_keys=False generator = None try: - generator = list(block.component_data_iterindex(ctype, sort=True, descend_into=False)) + generator = list( + block.component_data_iterindex(ctype, sort=True, descend_into=False) + ) except: if issubclass(ctype, Component): - print("components_data(sort_by_keys=False) failed with ctype %s" % ctype) + print( + "components_data(sort_by_keys=False) failed with ctype %s" + % ctype + ) raise else: if not issubclass(ctype, Component): - self.fail("components_data(sort_by_keys=False) should have failed with ctype %s" % ctype) + self.fail( + "components_data(sort_by_keys=False) should have failed with ctype %s" + % ctype + ) # This first check is less safe but it gives a cleaner # failure message. I leave comparison of ids in the # second assertEqual to make sure the tests are working # as expected - self.assertEqual(sorted([comp.name for name, comp in generator]), - sorted([comp.name for comp in block.component_data_lists[ctype]])) - self.assertEqual(sorted([id(comp) for name, comp in generator]), - sorted([id(comp) for comp in block.component_data_lists[ctype]])) + self.assertEqual( + sorted([comp.name for name, comp in generator]), + sorted([comp.name for comp in block.component_data_lists[ctype]]), + ) + self.assertEqual( + sorted([id(comp) for name, comp in generator]), + sorted([id(comp) for comp in block.component_data_lists[ctype]]), + ) def test_Objective(self): self.generator_runner(Objective) @@ -329,38 +443,46 @@ def test_SOSConstraint(self): self.generator_runner(SOSConstraint) def test_Block(self): - self.generator_runner(Block) model = self.generate_model() # sorted all_blocks - self.assertEqual([id(comp) for comp in model.block_data_objects(sort=SortComponents.deterministic)], - [id(comp) for comp in [model,]+model.component_data_lists[Block]]) + self.assertEqual( + [ + id(comp) + for comp in model.block_data_objects(sort=SortComponents.deterministic) + ], + [id(comp) for comp in [model] + model.component_data_lists[Block]], + ) # unsorted all_blocks - self.assertEqual(sorted([id(comp) for comp in model.block_data_objects(sort=False)]), - sorted([id(comp) for comp in [model,]+model.component_data_lists[Block]])) + self.assertEqual( + sorted([id(comp) for comp in model.block_data_objects(sort=False)]), + sorted([id(comp) for comp in [model] + model.component_data_lists[Block]]), + ) def test_mixed_index_type(self): m = ConcreteModel() - m.I = Set(initialize=[1,'1',3.5,4]) + m.I = Set(initialize=[1, '1', 3.5, 4]) m.x = Var(m.I) v = list(m.component_data_objects(Var, sort=True)) self.assertEqual(len(v), 4) - for a,b in zip([m.x[1], m.x[3.5], m.x[4], m.x['1']], v): + for a, b in zip([m.x[1], m.x[3.5], m.x[4], m.x['1']], v): self.assertIs(a, b) class HierarchicalModel(object): def __init__(self): m = self.model = ConcreteModel() - m.a1_IDX = Set(initialize=[5,4], ordered=True) - m.a3_IDX = Set(initialize=[6,7], ordered=True) + m.a1_IDX = Set(initialize=[5, 4], ordered=True) + m.a3_IDX = Set(initialize=[6, 7], ordered=True) m.c = Block() + def x(b, i): pass + def a(b, i): if i == 1: b.d = Block() @@ -368,73 +490,121 @@ def a(b, i): elif i == 3: b.e = Block() b.f = Block(b.model().a3_IDX, rule=x) - m.a = Block([1,2,3], rule=a) + + m.a = Block([1, 2, 3], rule=a) m.b = Block() self.PrefixDFS = [ 'unknown', 'c', - 'a[1]', 'a[1].d', 'a[1].c[5]', 'a[1].c[4]', + 'a[1]', + 'a[1].d', + 'a[1].c[5]', + 'a[1].c[4]', 'a[2]', - 'a[3]', 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', 'b', ] self.PrefixDFS_sortIdx = [ 'unknown', 'c', - 'a[1]', 'a[1].d', 'a[1].c[4]', 'a[1].c[5]', + 'a[1]', + 'a[1].d', + 'a[1].c[4]', + 'a[1].c[5]', 'a[2]', - 'a[3]', 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', 'b', ] self.PrefixDFS_sortName = [ 'unknown', - 'a[1]', 'a[1].c[5]', 'a[1].c[4]', 'a[1].d', + 'a[1]', + 'a[1].c[5]', + 'a[1].c[4]', + 'a[1].d', 'a[2]', - 'a[3]', 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', 'b', 'c', ] self.PrefixDFS_sort = [ 'unknown', - 'a[1]', 'a[1].c[4]', 'a[1].c[5]', 'a[1].d', + 'a[1]', + 'a[1].c[4]', + 'a[1].c[5]', + 'a[1].d', 'a[2]', - 'a[3]', 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', 'b', 'c', ] - self.PostfixDFS = [ 'c', - 'a[1].d', 'a[1].c[5]', 'a[1].c[4]', 'a[1]', + 'a[1].d', + 'a[1].c[5]', + 'a[1].c[4]', + 'a[1]', 'a[2]', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', + 'a[3]', 'b', 'unknown', ] self.PostfixDFS_sortIdx = [ 'c', - 'a[1].d', 'a[1].c[4]', 'a[1].c[5]', 'a[1]', + 'a[1].d', + 'a[1].c[4]', + 'a[1].c[5]', + 'a[1]', 'a[2]', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', + 'a[3]', 'b', 'unknown', ] self.PostfixDFS_sortName = [ - 'a[1].c[5]', 'a[1].c[4]', 'a[1].d', 'a[1]', + 'a[1].c[5]', + 'a[1].c[4]', + 'a[1].d', + 'a[1]', 'a[2]', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', + 'a[3]', 'b', 'c', 'unknown', ] self.PostfixDFS_sort = [ - 'a[1].c[4]', 'a[1].c[5]', 'a[1].d', 'a[1]', + 'a[1].c[4]', + 'a[1].c[5]', + 'a[1].d', + 'a[1]', 'a[2]', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', 'a[3]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', + 'a[3]', 'b', 'c', 'unknown', @@ -443,37 +613,62 @@ def a(b, i): self.BFS = [ 'unknown', 'c', - 'a[1]', 'a[2]', 'a[3]', + 'a[1]', + 'a[2]', + 'a[3]', 'b', - 'a[1].d', 'a[1].c[5]', 'a[1].c[4]', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[1].d', + 'a[1].c[5]', + 'a[1].c[4]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', ] self.BFS_sortIdx = [ 'unknown', 'c', - 'a[1]', 'a[2]', 'a[3]', + 'a[1]', + 'a[2]', + 'a[3]', 'b', - 'a[1].d', 'a[1].c[4]', 'a[1].c[5]', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[1].d', + 'a[1].c[4]', + 'a[1].c[5]', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', ] self.BFS_sortName = [ 'unknown', - 'a[1]', 'a[2]', 'a[3]', + 'a[1]', + 'a[2]', + 'a[3]', 'b', 'c', - 'a[1].c[5]', 'a[1].c[4]', 'a[1].d', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[1].c[5]', + 'a[1].c[4]', + 'a[1].d', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', ] self.BFS_sort = [ 'unknown', - 'a[1]', 'a[2]', 'a[3]', + 'a[1]', + 'a[2]', + 'a[3]', 'b', 'c', - 'a[1].c[4]', 'a[1].c[5]', 'a[1].d', - 'a[3].e', 'a[3].f[6]', 'a[3].f[7]', + 'a[1].c[4]', + 'a[1].c[5]', + 'a[1].d', + 'a[3].e', + 'a[3].f[6]', + 'a[3].f[7]', ] + class MixedHierarchicalModel(object): def __init__(self): m = self.model = ConcreteModel() @@ -485,57 +680,42 @@ def __init__(self): m.b.e.f = DerivedBlock() m.b.e.f.g = Block() - self.PrefixDFS_block = [ - 'unknown', - 'a', - ] - self.PostfixDFS_block = [ - 'a', - 'unknown', - ] - self.BFS_block = [ - 'unknown', - 'a', - ] + self.PrefixDFS_block = ['unknown', 'a'] + self.PostfixDFS_block = ['a', 'unknown'] + self.BFS_block = ['unknown', 'a'] self.PrefixDFS_both = [ 'unknown', - 'a', 'a.c', - 'b', 'b.d', 'b.e', 'b.e.f', 'b.e.f.g', + 'a', + 'a.c', + 'b', + 'b.d', + 'b.e', + 'b.e.f', + 'b.e.f.g', ] self.PostfixDFS_both = [ - 'a.c', 'a', - 'b.d', 'b.e.f.g', 'b.e.f', 'b.e', 'b', - 'unknown', - ] - self.BFS_both = [ + 'a.c', + 'a', + 'b.d', + 'b.e.f.g', + 'b.e.f', + 'b.e', + 'b', 'unknown', - 'a', 'b', - 'a.c', 'b.d', 'b.e', 'b.e.f', 'b.e.f.g', ] + self.BFS_both = ['unknown', 'a', 'b', 'a.c', 'b.d', 'b.e', 'b.e.f', 'b.e.f.g'] # # References for component_objects tests (note: the model # doesn't appear) # - self.PrefixDFS_block_subclass = [ - 'a', - 'b.e', - 'b.e.f.g', - ] - self.PostfixDFS_block_subclass = [ - 'b.e.f.g', - 'b.e', - 'a', - ] - self.BFS_block_subclass = [ - 'a', - 'b.e', - 'b.e.f.g', - ] + self.PrefixDFS_block_subclass = ['a', 'b.e', 'b.e.f.g'] + self.PostfixDFS_block_subclass = ['b.e.f.g', 'b.e', 'a'] + self.BFS_block_subclass = ['a', 'b.e', 'b.e.f.g'] -class TestBlock(unittest.TestCase): +class TestBlock(unittest.TestCase): def setUp(self): # # Create block @@ -551,58 +731,38 @@ def tearDown(self): def test_collect_ctypes(self): b = Block(concrete=True) - self.assertEqual(b.collect_ctypes(), - set()) - self.assertEqual(b.collect_ctypes(active=True), - set()) + self.assertEqual(b.collect_ctypes(), set()) + self.assertEqual(b.collect_ctypes(active=True), set()) b.x = Var() - self.assertEqual(b.collect_ctypes(), - set([Var])) - self.assertEqual(b.collect_ctypes(active=True), - set([Var])) + self.assertEqual(b.collect_ctypes(), set([Var])) + self.assertEqual(b.collect_ctypes(active=True), set([Var])) b.y = Constraint(expr=b.x >= 1) - self.assertEqual(b.collect_ctypes(), - set([Var, Constraint])) - self.assertEqual(b.collect_ctypes(active=True), - set([Var, Constraint])) + self.assertEqual(b.collect_ctypes(), set([Var, Constraint])) + self.assertEqual(b.collect_ctypes(active=True), set([Var, Constraint])) b.y.deactivate() - self.assertEqual(b.collect_ctypes(), - set([Var, Constraint])) - self.assertEqual(b.collect_ctypes(active=True), - set([Var])) + self.assertEqual(b.collect_ctypes(), set([Var, Constraint])) + self.assertEqual(b.collect_ctypes(active=True), set([Var])) B = Block() B.b = b - self.assertEqual(B.collect_ctypes(descend_into=False), - set([Block])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([Block])) - self.assertEqual(B.collect_ctypes(), - set([Block, Var, Constraint])) - self.assertEqual(B.collect_ctypes(active=True), - set([Block, Var])) + self.assertEqual(B.collect_ctypes(descend_into=False), set([Block])) + self.assertEqual( + B.collect_ctypes(descend_into=False, active=True), set([Block]) + ) + self.assertEqual(B.collect_ctypes(), set([Block, Var, Constraint])) + self.assertEqual(B.collect_ctypes(active=True), set([Block, Var])) b.deactivate() - self.assertEqual(B.collect_ctypes(descend_into=False), - set([Block])) - self.assertEqual(B.collect_ctypes(descend_into=False, - active=True), - set([])) - self.assertEqual(B.collect_ctypes(), - set([Block, Var, Constraint])) - self.assertEqual(B.collect_ctypes(active=True), - set([])) + self.assertEqual(B.collect_ctypes(descend_into=False), set([Block])) + self.assertEqual(B.collect_ctypes(descend_into=False, active=True), set([])) + self.assertEqual(B.collect_ctypes(), set([Block, Var, Constraint])) + self.assertEqual(B.collect_ctypes(active=True), set([])) del b.y # a block DOES check its own .active flag - self.assertEqual(b.collect_ctypes(), - set([Var])) - self.assertEqual(b.collect_ctypes(active=True), - set([])) + self.assertEqual(b.collect_ctypes(), set([Var])) + self.assertEqual(b.collect_ctypes(active=True), set([])) b.activate() - self.assertEqual(b.collect_ctypes(), - set([Var])) - self.assertEqual(b.collect_ctypes(active=True), - set([Var])) + self.assertEqual(b.collect_ctypes(), set([Var])) + self.assertEqual(b.collect_ctypes(active=True), set([Var])) del b.x self.assertEqual(b.collect_ctypes(), set()) @@ -614,7 +774,7 @@ def test_collect_ctypes(self): self.assertEqual(b.collect_ctypes(), set()) def test_clear_attribute(self): - """ Coverage of the _clear_attribute method """ + """Coverage of the _clear_attribute method""" obj = Set() self.block.A = obj self.assertEqual(self.block.A.local_name, "A") @@ -667,29 +827,32 @@ def test_set_attr(self): b = Block(concrete=True) b.c = Block() with self.assertRaisesRegex( - ValueError, "Cannot assign the top-level block as a subblock " - r"of one of its children \(c\): creates a circular hierarchy"): + ValueError, + "Cannot assign the top-level block as a subblock " + r"of one of its children \(c\): creates a circular hierarchy", + ): b.c.d = b def test_set_value(self): b = Block(concrete=True) with self.assertRaisesRegex( - RuntimeError, "Block components do not support assignment " - "or set_value"): + RuntimeError, "Block components do not support assignment or set_value" + ): b.set_value(None) b.b = Block() with self.assertRaisesRegex( - RuntimeError, "Block components do not support assignment " - "or set_value"): + RuntimeError, "Block components do not support assignment or set_value" + ): b.b = 5 def test_clear(self): class DerivedBlock(ScalarBlock): _Block_reserved_words = None - DerivedBlock._Block_reserved_words \ - = set(['a','b','c']) | _BlockData._Block_reserved_words + DerivedBlock._Block_reserved_words = ( + set(['a', 'b', 'c']) | _BlockData._Block_reserved_words + ) m = ConcreteModel() m.clear() @@ -716,8 +879,8 @@ class DerivedBlock(ScalarBlock): m.b.z = Param() m.b.c = c = Param() m.b.clear() - self.assertEqual(m.b._ctypes, {Var: [1, 1, 1], Param:[0,2,2]}) - self.assertEqual(m.b._decl, {'a':0, 'b':1, 'c':2}) + self.assertEqual(m.b._ctypes, {Var: [1, 1, 1], Param: [0, 2, 2]}) + self.assertEqual(m.b._decl, {'a': 0, 'b': 1, 'c': 2}) self.assertEqual(len(m.b._decl_order), 3) self.assertIs(m.b._decl_order[0][0], a) self.assertIs(m.b._decl_order[1][0], b) @@ -737,18 +900,20 @@ def test_transfer_attributes_from(self): b.clear() b.transfer_attributes_from(c) - self.assertEqual(list(b.component_map()), ['z','x']) + self.assertEqual(list(b.component_map()), ['z', 'x']) self.assertEqual(list(c.component_map()), []) self.assertIs(b.x, c_x) self.assertIs(b.y, c_y) class DerivedBlock(ScalarBlock): _Block_reserved_words = set() + def __init__(self, *args, **kwds): super(DerivedBlock, self).__init__(*args, **kwds) with self._declare_reserved_components(): self.x = Var() self.y = Var() + DerivedBlock._Block_reserved_words = set(dir(DerivedBlock())) b = DerivedBlock(concrete=True) @@ -761,7 +926,7 @@ def __init__(self, *args, **kwds): b.clear() b.transfer_attributes_from(c) - self.assertEqual(list(b.component_map()), ['y','z','x']) + self.assertEqual(list(b.component_map()), ['y', 'z', 'x']) self.assertEqual(list(c.component_map()), []) self.assertIs(b.x, c_x) self.assertIsNot(b.y, c_y) @@ -772,14 +937,12 @@ def __init__(self, *args, **kwds): b = DerivedBlock(concrete=True) b_x = b.x b_y = b.y - c = { 'z': Param(initialize=5), - 'x': Param(initialize=5), - 'y': 5 } + c = {'z': Param(initialize=5), 'x': Param(initialize=5), 'y': 5} b.clear() b.transfer_attributes_from(c) - self.assertEqual(list(b.component_map()), ['y','z','x']) - self.assertEqual(sorted(list(c.keys())), ['x','y','z']) + self.assertEqual(list(b.component_map()), ['y', 'z', 'x']) + self.assertEqual(sorted(list(c.keys())), ['x', 'y', 'z']) self.assertIs(b.x, c['x']) self.assertIsNot(b.y, c['y']) self.assertIs(b.y, b_y) @@ -791,7 +954,7 @@ def __init__(self, *args, **kwds): b.y = b_y = Var() b.transfer_attributes_from(b) - self.assertEqual(list(b.component_map()), ['x','y']) + self.assertEqual(list(b.component_map()), ['x', 'y']) self.assertIs(b.x, b_x) self.assertIs(b.y, b_y) @@ -801,33 +964,37 @@ def __init__(self, *args, **kwds): b.c.d = Block() b.c.d.e = Block() with self.assertRaisesRegex( - ValueError, r'_BlockData.transfer_attributes_from\(\): ' - r'Cannot set a sub-block \(c.d.e\) to a parent block \(c\):'): + ValueError, + r'_BlockData.transfer_attributes_from\(\): ' + r'Cannot set a sub-block \(c.d.e\) to a parent block \(c\):', + ): b.c.d.e.transfer_attributes_from(b.c) ### bad data type b = Block(concrete=True) with self.assertRaisesRegex( - ValueError, - r'_BlockData.transfer_attributes_from\(\): expected a Block ' - 'or dict; received str'): + ValueError, + r'_BlockData.transfer_attributes_from\(\): expected a Block ' + 'or dict; received str', + ): b.transfer_attributes_from('foo') def test_iterate_hierarchy_defaults(self): - self.assertIs( TraversalStrategy.BFS, - TraversalStrategy.BreadthFirstSearch ) + self.assertIs(TraversalStrategy.BFS, TraversalStrategy.BreadthFirstSearch) - self.assertIs( TraversalStrategy.DFS, - TraversalStrategy.PrefixDepthFirstSearch ) - self.assertIs( TraversalStrategy.DFS, - TraversalStrategy.PrefixDFS ) - self.assertIs( TraversalStrategy.DFS, - TraversalStrategy.ParentFirstDepthFirstSearch ) + self.assertIs(TraversalStrategy.DFS, TraversalStrategy.PrefixDepthFirstSearch) + self.assertIs(TraversalStrategy.DFS, TraversalStrategy.PrefixDFS) + self.assertIs( + TraversalStrategy.DFS, TraversalStrategy.ParentFirstDepthFirstSearch + ) - self.assertIs( TraversalStrategy.PostfixDepthFirstSearch, - TraversalStrategy.PostfixDFS ) - self.assertIs( TraversalStrategy.PostfixDepthFirstSearch, - TraversalStrategy.ParentLastDepthFirstSearch ) + self.assertIs( + TraversalStrategy.PostfixDepthFirstSearch, TraversalStrategy.PostfixDFS + ) + self.assertIs( + TraversalStrategy.PostfixDepthFirstSearch, + TraversalStrategy.ParentLastDepthFirstSearch, + ) HM = HierarchicalModel() m = HM.model @@ -837,214 +1004,300 @@ def test_iterate_hierarchy_defaults(self): def test_iterate_hierarchy_PrefixDFS(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PrefixDepthFirstSearch)] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PrefixDepthFirstSearch + ) + ] self.assertEqual(HM.PrefixDFS, result) def test_iterate_hierarchy_PrefixDFS_sortIndex(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - sort=SortComponents.indices, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + sort=SortComponents.indices, + ) + ] self.assertEqual(HM.PrefixDFS_sortIdx, result) + def test_iterate_hierarchy_PrefixDFS_sortName(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - sort=SortComponents.alphaOrder, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + sort=SortComponents.alphaOrder, + ) + ] self.assertEqual(HM.PrefixDFS_sortName, result) + def test_iterate_hierarchy_PrefixDFS_sort(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - sort=True - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PrefixDepthFirstSearch, sort=True + ) + ] self.assertEqual(HM.PrefixDFS_sort, result) - def test_iterate_hierarchy_PostfixDFS(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PostfixDepthFirstSearch)] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PostfixDepthFirstSearch + ) + ] self.assertEqual(HM.PostfixDFS, result) def test_iterate_hierarchy_PostfixDFS_sortIndex(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - sort=SortComponents.indices, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PostfixDepthFirstSearch, + sort=SortComponents.indices, + ) + ] self.assertEqual(HM.PostfixDFS_sortIdx, result) + def test_iterate_hierarchy_PostfixDFS_sortName(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - sort=SortComponents.alphaOrder, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PostfixDepthFirstSearch, + sort=SortComponents.alphaOrder, + ) + ] self.assertEqual(HM.PostfixDFS_sortName, result) + def test_iterate_hierarchy_PostfixDFS_sort(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - sort=True - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PostfixDepthFirstSearch, sort=True + ) + ] self.assertEqual(HM.PostfixDFS_sort, result) def test_iterate_hierarchy_BFS(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.BreadthFirstSearch)] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.BreadthFirstSearch + ) + ] self.assertEqual(HM.BFS, result) def test_iterate_hierarchy_BFS_sortIndex(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.BreadthFirstSearch, - sort=SortComponents.indices, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.BreadthFirstSearch, + sort=SortComponents.indices, + ) + ] self.assertEqual(HM.BFS_sortIdx, result) def test_iterate_hierarchy_BFS_sortName(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.BreadthFirstSearch, - sort=SortComponents.alphaOrder, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.BreadthFirstSearch, + sort=SortComponents.alphaOrder, + ) + ] self.assertEqual(HM.BFS_sortName, result) def test_iterate_hierarchy_BFS_sort(self): HM = HierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.BreadthFirstSearch, - sort=True - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.BreadthFirstSearch, sort=True + ) + ] self.assertEqual(HM.BFS_sort, result) def test_iterate_mixed_hierarchy_PrefixDFS_block(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - descend_into=Block, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + descend_into=Block, + ) + ] self.assertEqual(HM.PrefixDFS_block, result) + def test_iterate_mixed_hierarchy_PrefixDFS_both(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - descend_into=(Block, DerivedBlock), - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + descend_into=(Block, DerivedBlock), + ) + ] self.assertEqual(HM.PrefixDFS_both, result) + def test_iterate_mixed_hierarchy_PrefixDFS_SubclassOf(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - descend_into=SubclassOf(Block), - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + descend_into=SubclassOf(Block), + ) + ] self.assertEqual(HM.PrefixDFS_both, result) - result = [x.name for x in m.component_objects( - ctype=Block, - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - descend_into=SubclassOf(Block), - )] + result = [ + x.name + for x in m.component_objects( + ctype=Block, + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + descend_into=SubclassOf(Block), + ) + ] self.assertEqual(HM.PrefixDFS_block_subclass, result) - result = [x.name for x in m.component_objects( - ctype=Block, - descent_order=TraversalStrategy.PrefixDepthFirstSearch, - descend_into=SubclassOf(Var, Block), - )] + result = [ + x.name + for x in m.component_objects( + ctype=Block, + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + descend_into=SubclassOf(Var, Block), + ) + ] self.assertEqual(HM.PrefixDFS_block_subclass, result) def test_iterate_mixed_hierarchy_PostfixDFS_block(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - descend_into=Block, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PostfixDepthFirstSearch, + descend_into=Block, + ) + ] self.assertEqual(HM.PostfixDFS_block, result) + def test_iterate_mixed_hierarchy_PostfixDFS_both(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - descend_into=(Block,DerivedBlock), - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PostfixDepthFirstSearch, + descend_into=(Block, DerivedBlock), + ) + ] self.assertEqual(HM.PostfixDFS_both, result) + def test_iterate_mixed_hierarchy_PostfixDFS_SubclassOf(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - descend_into=SubclassOf(Block), - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.PostfixDepthFirstSearch, + descend_into=SubclassOf(Block), + ) + ] self.assertEqual(HM.PostfixDFS_both, result) - result = [x.name for x in m.component_objects( - ctype=Block, - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - descend_into=SubclassOf(Block), - )] + result = [ + x.name + for x in m.component_objects( + ctype=Block, + descent_order=TraversalStrategy.PostfixDepthFirstSearch, + descend_into=SubclassOf(Block), + ) + ] self.assertEqual(HM.PostfixDFS_block_subclass, result) - result = [x.name for x in m.component_objects( - ctype=Block, - descent_order=TraversalStrategy.PostfixDepthFirstSearch, - descend_into=SubclassOf(Var, Block), - )] + result = [ + x.name + for x in m.component_objects( + ctype=Block, + descent_order=TraversalStrategy.PostfixDepthFirstSearch, + descend_into=SubclassOf(Var, Block), + ) + ] self.assertEqual(HM.PostfixDFS_block_subclass, result) def test_iterate_mixed_hierarchy_BFS_block(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.BFS, - descend_into=Block, - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.BFS, descend_into=Block + ) + ] self.assertEqual(HM.BFS_block, result) + def test_iterate_mixed_hierarchy_BFS_both(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.BFS, - descend_into=(Block, DerivedBlock), - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.BFS, descend_into=(Block, DerivedBlock) + ) + ] self.assertEqual(HM.BFS_both, result) + def test_iterate_mixed_hierarchy_BFS_SubclassOf(self): HM = MixedHierarchicalModel() m = HM.model - result = [x.name for x in m.block_data_objects( - descent_order=TraversalStrategy.BFS, - descend_into=SubclassOf(Block), - )] + result = [ + x.name + for x in m.block_data_objects( + descent_order=TraversalStrategy.BFS, descend_into=SubclassOf(Block) + ) + ] self.assertEqual(HM.BFS_both, result) - result = [x.name for x in m.component_objects( - ctype=Block, - descent_order=TraversalStrategy.BFS, - descend_into=SubclassOf(Block), - )] + result = [ + x.name + for x in m.component_objects( + ctype=Block, + descent_order=TraversalStrategy.BFS, + descend_into=SubclassOf(Block), + ) + ] self.assertEqual(HM.BFS_block_subclass, result) - result = [x.name for x in m.component_objects( - ctype=Block, - descent_order=TraversalStrategy.BFS, - descend_into=SubclassOf(Var,Block), - )] + result = [ + x.name + for x in m.component_objects( + ctype=Block, + descent_order=TraversalStrategy.BFS, + descend_into=SubclassOf(Var, Block), + ) + ] self.assertEqual(HM.BFS_block_subclass, result) - def test_add_remove_component_byname(self): m = Block() self.assertFalse(m.contains_component(Var)) @@ -1099,24 +1352,24 @@ def test_reclassify_component(self): self.assertEqual(len(m.component_map(Var)), 2) self.assertEqual(len(m.component_map(Param)), 1) - self.assertEqual( ['a', 'b'], list(m.component_map(Var)) ) - self.assertEqual( ['c'], list(m.component_map(Param)) ) + self.assertEqual(['a', 'b'], list(m.component_map(Var))) + self.assertEqual(['c'], list(m.component_map(Param))) # Test removing from the end of a list and appending to the beginning # of a list m.reclassify_component_type(m.b, Param) self.assertEqual(len(m.component_map(Var)), 1) self.assertEqual(len(m.component_map(Param)), 2) - self.assertEqual( ['a'], list(m.component_map(Var)) ) - self.assertEqual( ['b','c'], list(m.component_map(Param)) ) + self.assertEqual(['a'], list(m.component_map(Var))) + self.assertEqual(['b', 'c'], list(m.component_map(Param))) # Test removing from the beginning of a list and appending to # the end of a list m.reclassify_component_type(m.b, Var) self.assertEqual(len(m.component_map(Var)), 2) self.assertEqual(len(m.component_map(Param)), 1) - self.assertEqual( ['a','b'], list(m.component_map(Var)) ) - self.assertEqual( ['c'], list(m.component_map(Param)) ) + self.assertEqual(['a', 'b'], list(m.component_map(Var))) + self.assertEqual(['c'], list(m.component_map(Param))) # Test removing the last element of a list and creating a new list m.reclassify_component_type(m.c, Var) @@ -1125,8 +1378,8 @@ def test_reclassify_component(self): self.assertTrue(m.contains_component(Var)) self.assertFalse(m.contains_component(Param)) self.assertFalse(m.contains_component(Constraint)) - self.assertEqual( ['a','b','c'], list(m.component_map(Var)) ) - self.assertEqual( [], list(m.component_map(Param)) ) + self.assertEqual(['a', 'b', 'c'], list(m.component_map(Var))) + self.assertEqual([], list(m.component_map(Param))) # Test removing the last element of a list and creating a new list m.reclassify_component_type(m.c, Param) @@ -1136,8 +1389,8 @@ def test_reclassify_component(self): self.assertTrue(m.contains_component(Var)) self.assertTrue(m.contains_component(Param)) self.assertFalse(m.contains_component(Constraint)) - self.assertEqual( ['a','b'], list(m.component_map(Var)) ) - self.assertEqual( ['c'], list(m.component_map(Param)) ) + self.assertEqual(['a', 'b'], list(m.component_map(Var))) + self.assertEqual(['c'], list(m.component_map(Param))) # Test removing the first element of a list and creating a new list m.reclassify_component_type(m.a, Constraint) @@ -1147,9 +1400,9 @@ def test_reclassify_component(self): self.assertTrue(m.contains_component(Var)) self.assertTrue(m.contains_component(Param)) self.assertTrue(m.contains_component(Constraint)) - self.assertEqual( ['b'], list(m.component_map(Var)) ) - self.assertEqual( ['c'], list(m.component_map(Param)) ) - self.assertEqual( ['a'], list(m.component_map(Constraint)) ) + self.assertEqual(['b'], list(m.component_map(Var))) + self.assertEqual(['c'], list(m.component_map(Param))) + self.assertEqual(['a'], list(m.component_map(Constraint))) # Test removing the last element of a list and inserting it into # the middle of new list @@ -1161,53 +1414,48 @@ def test_reclassify_component(self): self.assertFalse(m.contains_component(Var)) self.assertTrue(m.contains_component(Param)) self.assertFalse(m.contains_component(Constraint)) - self.assertEqual( [], list(m.component_map(Var)) ) - self.assertEqual( ['a','b','c'], list(m.component_map(Param)) ) - self.assertEqual( [], list(m.component_map(Constraint)) ) + self.assertEqual([], list(m.component_map(Var))) + self.assertEqual(['a', 'b', 'c'], list(m.component_map(Param))) + self.assertEqual([], list(m.component_map(Constraint))) # Test idnoring decl order - m.reclassify_component_type( 'b', Var, - preserve_declaration_order=False ) - m.reclassify_component_type( 'c', Var, - preserve_declaration_order=False ) - m.reclassify_component_type( 'a', Var, - preserve_declaration_order=False ) + m.reclassify_component_type('b', Var, preserve_declaration_order=False) + m.reclassify_component_type('c', Var, preserve_declaration_order=False) + m.reclassify_component_type('a', Var, preserve_declaration_order=False) self.assertEqual(len(m.component_map(Var)), 3) self.assertEqual(len(m.component_map(Param)), 0) self.assertEqual(len(m.component_map(Constraint)), 0) self.assertTrue(m.contains_component(Var)) self.assertFalse(m.contains_component(Param)) self.assertFalse(m.contains_component(Constraint)) - self.assertEqual( ['b','c','a'], list(m.component_map(Var)) ) - self.assertEqual( [], list(m.component_map(Param)) ) - self.assertEqual( [], list(m.component_map(Constraint)) ) + self.assertEqual(['b', 'c', 'a'], list(m.component_map(Var))) + self.assertEqual([], list(m.component_map(Param))) + self.assertEqual([], list(m.component_map(Constraint))) def test_replace_attribute_with_component(self): OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.core'): self.block.x = 5 self.block.x = Var() - self.assertIn('Reassigning the non-component attribute', - OUTPUT.getvalue()) + self.assertIn('Reassigning the non-component attribute', OUTPUT.getvalue()) def test_replace_component_with_component(self): OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.core'): self.block.x = Var() self.block.x = Var() - self.assertIn('Implicitly replacing the Component attribute', - OUTPUT.getvalue()) + self.assertIn('Implicitly replacing the Component attribute', OUTPUT.getvalue()) def test_pseudomap_len(self): m = Block() m.a = Constraint() - m.b = Constraint() # active=False + m.b = Constraint() # active=False m.c = Constraint() - m.z = Objective() # active=False + m.z = Objective() # active=False m.x = Objective() m.v = Objective() m.y = Objective() - m.w = Objective() # active=False + m.w = Objective() # active=False m.b.deactivate() m.z.deactivate() @@ -1228,11 +1476,11 @@ def test_pseudomap_len(self): def test_pseudomap_contains(self): m = Block() m.a = Constraint() - m.b = Constraint() # active=False + m.b = Constraint() # active=False m.c = Constraint() m.s = Set() m.t = Suffix() - m.z = Objective() # active=False + m.z = Objective() # active=False m.x = Objective() m.b.deactivate() m.z.deactivate() @@ -1267,7 +1515,6 @@ def test_pseudomap_contains(self): self.assertTrue('x' not in pm) self.assertTrue('z' in pm) - pm = m.component_map(Constraint) self.assertTrue('a' in pm) self.assertTrue('b' in pm) @@ -1292,8 +1539,7 @@ def test_pseudomap_contains(self): self.assertTrue('x' not in pm) self.assertTrue('z' not in pm) - - pm = m.component_map([Constraint,Objective]) + pm = m.component_map([Constraint, Objective]) self.assertTrue('a' in pm) self.assertTrue('b' in pm) self.assertTrue('c' in pm) @@ -1303,7 +1549,7 @@ def test_pseudomap_contains(self): self.assertTrue('x' in pm) self.assertTrue('z' in pm) - pm = m.component_map([Constraint,Objective], active=True) + pm = m.component_map([Constraint, Objective], active=True) self.assertTrue('a' in pm) self.assertTrue('b' not in pm) self.assertTrue('c' in pm) @@ -1313,7 +1559,7 @@ def test_pseudomap_contains(self): self.assertTrue('x' in pm) self.assertTrue('z' not in pm) - pm = m.component_map([Constraint,Objective], active=False) + pm = m.component_map([Constraint, Objective], active=False) self.assertTrue('a' not in pm) self.assertTrue('b' in pm) self.assertTrue('c' not in pm) @@ -1323,9 +1569,8 @@ def test_pseudomap_contains(self): self.assertTrue('x' not in pm) self.assertTrue('z' in pm) - # You should be able to pass in a set as well as a list - pm = m.component_map(set([Constraint,Objective])) + pm = m.component_map(set([Constraint, Objective])) self.assertTrue('a' in pm) self.assertTrue('b' in pm) self.assertTrue('c' in pm) @@ -1335,7 +1580,7 @@ def test_pseudomap_contains(self): self.assertTrue('x' in pm) self.assertTrue('z' in pm) - pm = m.component_map(set([Constraint,Objective]), active=True) + pm = m.component_map(set([Constraint, Objective]), active=True) self.assertTrue('a' in pm) self.assertTrue('b' not in pm) self.assertTrue('c' in pm) @@ -1345,7 +1590,7 @@ def test_pseudomap_contains(self): self.assertTrue('x' in pm) self.assertTrue('z' not in pm) - pm = m.component_map(set([Constraint,Objective]), active=False) + pm = m.component_map(set([Constraint, Objective]), active=False) self.assertTrue('a' not in pm) self.assertTrue('b' in pm) self.assertTrue('c' not in pm) @@ -1355,21 +1600,21 @@ def test_pseudomap_contains(self): self.assertTrue('x' not in pm) self.assertTrue('z' in pm) - def test_pseudomap_getitem(self): m = Block() m.a = a = Constraint() - m.b = b = Constraint() # active=False + m.b = b = Constraint() # active=False m.c = c = Constraint() m.s = s = Set() m.t = t = Suffix() - m.z = z = Objective() # active=False + m.z = z = Objective() # active=False m.x = x = Objective() m.b.deactivate() m.z.deactivate() def assertWorks(self, key, pm): self.assertIs(pm[key.local_name], key) + def assertFails(self, key, pm): if not isinstance(key, str): key = key.local_name @@ -1405,7 +1650,6 @@ def assertFails(self, key, pm): assertFails(self, x, pm) assertWorks(self, z, pm) - pm = m.component_map(Constraint) assertWorks(self, a, pm) assertWorks(self, b, pm) @@ -1436,8 +1680,7 @@ def assertFails(self, key, pm): assertFails(self, x, pm) assertFails(self, z, pm) - - pm = m.component_map([Constraint,Objective]) + pm = m.component_map([Constraint, Objective]) assertWorks(self, a, pm) assertWorks(self, b, pm) assertWorks(self, c, pm) @@ -1447,7 +1690,7 @@ def assertFails(self, key, pm): assertWorks(self, x, pm) assertWorks(self, z, pm) - pm = m.component_map([Constraint,Objective], active=True) + pm = m.component_map([Constraint, Objective], active=True) assertWorks(self, a, pm) assertFails(self, b, pm) assertWorks(self, c, pm) @@ -1457,7 +1700,7 @@ def assertFails(self, key, pm): assertWorks(self, x, pm) assertFails(self, z, pm) - pm = m.component_map([Constraint,Objective], active=False) + pm = m.component_map([Constraint, Objective], active=False) assertFails(self, a, pm) assertWorks(self, b, pm) assertFails(self, c, pm) @@ -1467,8 +1710,7 @@ def assertFails(self, key, pm): assertFails(self, x, pm) assertWorks(self, z, pm) - - pm = m.component_map(set([Constraint,Objective])) + pm = m.component_map(set([Constraint, Objective])) assertWorks(self, a, pm) assertWorks(self, b, pm) assertWorks(self, c, pm) @@ -1478,7 +1720,7 @@ def assertFails(self, key, pm): assertWorks(self, x, pm) assertWorks(self, z, pm) - pm = m.component_map(set([Constraint,Objective]), active=True) + pm = m.component_map(set([Constraint, Objective]), active=True) assertWorks(self, a, pm) assertFails(self, b, pm) assertWorks(self, c, pm) @@ -1488,7 +1730,7 @@ def assertFails(self, key, pm): assertWorks(self, x, pm) assertFails(self, z, pm) - pm = m.component_map(set([Constraint,Objective]), active=False) + pm = m.component_map(set([Constraint, Objective]), active=False) assertFails(self, a, pm) assertWorks(self, b, pm) assertFails(self, c, pm) @@ -1508,198 +1750,262 @@ def tester(pm, _str): self.assertEqual(_str, err) m = Block(name='foo') - tester( m.component_map(), - "component 'a' not found in block foo" ) - tester( m.component_map(active=True), - "active component 'a' not found in block foo" ) - tester( m.component_map(active=False), - "inactive component 'a' not found in block foo" ) - - tester( m.component_map(Var), - "Var component 'a' not found in block foo" ) - tester( m.component_map(Var, active=True), - "active Var component 'a' not found in block foo" ) - tester( m.component_map(Var, active=False), - "inactive Var component 'a' not found in block foo" ) - - tester( m.component_map(SubclassOf(Var)), - "SubclassOf(Var) component 'a' not found in block foo" ) - tester( m.component_map(SubclassOf(Var), active=True), - "active SubclassOf(Var) component 'a' not found in block foo" ) - tester( m.component_map(SubclassOf(Var), active=False), - "inactive SubclassOf(Var) component " - "'a' not found in block foo" ) - - tester( m.component_map(SubclassOf(Var,Block)), - "SubclassOf(Var,Block) component 'a' not found in block foo" ) - tester( m.component_map(SubclassOf(Var,Block), active=True), - "active SubclassOf(Var,Block) component " - "'a' not found in block foo" ) - tester( m.component_map(SubclassOf(Var,Block), active=False), - "inactive SubclassOf(Var,Block) component " - "'a' not found in block foo" ) - - tester( m.component_map([Var,Param]), - "Param or Var component 'a' not found in block foo" ) - tester( m.component_map(set([Var,Param]), active=True), - "active Param or Var component 'a' not found in block foo" ) - tester( m.component_map(set([Var,Param]), active=False), - "inactive Param or Var component 'a' not found in block foo" ) + tester(m.component_map(), "component 'a' not found in block foo") + tester( + m.component_map(active=True), "active component 'a' not found in block foo" + ) + tester( + m.component_map(active=False), + "inactive component 'a' not found in block foo", + ) + + tester(m.component_map(Var), "Var component 'a' not found in block foo") + tester( + m.component_map(Var, active=True), + "active Var component 'a' not found in block foo", + ) + tester( + m.component_map(Var, active=False), + "inactive Var component 'a' not found in block foo", + ) + + tester( + m.component_map(SubclassOf(Var)), + "SubclassOf(Var) component 'a' not found in block foo", + ) + tester( + m.component_map(SubclassOf(Var), active=True), + "active SubclassOf(Var) component 'a' not found in block foo", + ) + tester( + m.component_map(SubclassOf(Var), active=False), + "inactive SubclassOf(Var) component 'a' not found in block foo", + ) + + tester( + m.component_map(SubclassOf(Var, Block)), + "SubclassOf(Var,Block) component 'a' not found in block foo", + ) + tester( + m.component_map(SubclassOf(Var, Block), active=True), + "active SubclassOf(Var,Block) component 'a' not found in block foo", + ) + tester( + m.component_map(SubclassOf(Var, Block), active=False), + "inactive SubclassOf(Var,Block) component 'a' not found in block foo", + ) + tester( + m.component_map([Var, Param]), + "Param or Var component 'a' not found in block foo", + ) + tester( + m.component_map(set([Var, Param]), active=True), + "active Param or Var component 'a' not found in block foo", + ) + tester( + m.component_map(set([Var, Param]), active=False), + "inactive Param or Var component 'a' not found in block foo", + ) tester( - m.component_map(set([Set,Var,Param])), - "Param, Set or Var component 'a' not found in block foo" ) + m.component_map(set([Set, Var, Param])), + "Param, Set or Var component 'a' not found in block foo", + ) tester( - m.component_map(set([Set,Var,Param]), active=True), - "active Param, Set or Var component 'a' not found in block foo" ) + m.component_map(set([Set, Var, Param]), active=True), + "active Param, Set or Var component 'a' not found in block foo", + ) tester( - m.component_map(set([Set,Var,Param]), active=False), - "inactive Param, Set or Var component 'a' not found in block foo" ) + m.component_map(set([Set, Var, Param]), active=False), + "inactive Param, Set or Var component 'a' not found in block foo", + ) def test_pseudomap_iteration(self): m = Block() m.a = Constraint() - m.z = Objective() # active=False + m.z = Objective() # active=False m.x = Objective() m.v = Objective() - m.b = Constraint() # active=False - m.t = Block() # active=False + m.b = Constraint() # active=False + m.t = Block() # active=False m.s = Block() m.c = Constraint() m.y = Objective() - m.w = Objective() # active=False + m.w = Objective() # active=False m.b.deactivate() m.z.deactivate() m.w.deactivate() m.t.deactivate() - self.assertEqual( ['a','z','x','v','b','t','s','c','y','w'], - list(m.component_map()) ) + self.assertEqual( + ['a', 'z', 'x', 'v', 'b', 't', 's', 'c', 'y', 'w'], list(m.component_map()) + ) - self.assertEqual( ['a','z','x','v','b','c','y','w'], - list(m.component_map( set([Constraint,Objective]) )) ) + self.assertEqual( + ['a', 'z', 'x', 'v', 'b', 'c', 'y', 'w'], + list(m.component_map(set([Constraint, Objective]))), + ) # test that the order of ctypes in the argument does not affect # the order in the resulting list - self.assertEqual( ['a','z','x','v','b','c','y','w'], - list(m.component_map( [Constraint,Objective] )) ) + self.assertEqual( + ['a', 'z', 'x', 'v', 'b', 'c', 'y', 'w'], + list(m.component_map([Constraint, Objective])), + ) - self.assertEqual( ['a','z','x','v','b','c','y','w'], - list(m.component_map( [Objective,Constraint] )) ) + self.assertEqual( + ['a', 'z', 'x', 'v', 'b', 'c', 'y', 'w'], + list(m.component_map([Objective, Constraint])), + ) - self.assertEqual( ['a','b','c'], - list(m.component_map( Constraint )) ) + self.assertEqual(['a', 'b', 'c'], list(m.component_map(Constraint))) - self.assertEqual( ['z','x','v','y','w'], - list(m.component_map( set([Objective]) )) ) + self.assertEqual( + ['z', 'x', 'v', 'y', 'w'], list(m.component_map(set([Objective]))) + ) - self.assertEqual( ['a','x','v','s','c','y'], - list(m.component_map( active=True )) ) + self.assertEqual( + ['a', 'x', 'v', 's', 'c', 'y'], list(m.component_map(active=True)) + ) - self.assertEqual( ['a','x','v','c','y'], - list(m.component_map( set([Constraint,Objective]), active=True )) ) + self.assertEqual( + ['a', 'x', 'v', 'c', 'y'], + list(m.component_map(set([Constraint, Objective]), active=True)), + ) - self.assertEqual( ['a','x','v','c','y'], - list(m.component_map( [Constraint,Objective], active=True )) ) + self.assertEqual( + ['a', 'x', 'v', 'c', 'y'], + list(m.component_map([Constraint, Objective], active=True)), + ) - self.assertEqual( ['a','x','v','c','y'], - list(m.component_map( [Objective,Constraint], active=True )) ) + self.assertEqual( + ['a', 'x', 'v', 'c', 'y'], + list(m.component_map([Objective, Constraint], active=True)), + ) - self.assertEqual( ['a','c'], - list(m.component_map( Constraint, active=True )) ) + self.assertEqual(['a', 'c'], list(m.component_map(Constraint, active=True))) - self.assertEqual( ['x','v','y'], - list(m.component_map( set([Objective]), active=True )) ) + self.assertEqual( + ['x', 'v', 'y'], list(m.component_map(set([Objective]), active=True)) + ) - self.assertEqual( ['z','b','t','w'], - list(m.component_map( active=False )) ) + self.assertEqual(['z', 'b', 't', 'w'], list(m.component_map(active=False))) - self.assertEqual( ['z','b','w'], - list(m.component_map( set([Constraint,Objective]), active=False )) ) + self.assertEqual( + ['z', 'b', 'w'], + list(m.component_map(set([Constraint, Objective]), active=False)), + ) - self.assertEqual( ['z','b','w'], - list(m.component_map( [Constraint,Objective], active=False )) ) + self.assertEqual( + ['z', 'b', 'w'], + list(m.component_map([Constraint, Objective], active=False)), + ) - self.assertEqual( ['z','b','w'], - list(m.component_map( [Objective,Constraint], active=False )) ) + self.assertEqual( + ['z', 'b', 'w'], + list(m.component_map([Objective, Constraint], active=False)), + ) - self.assertEqual( ['b'], - list(m.component_map( Constraint, active=False )) ) + self.assertEqual(['b'], list(m.component_map(Constraint, active=False))) - self.assertEqual( ['z','w'], - list(m.component_map( set([Objective]), active=False )) ) + self.assertEqual( + ['z', 'w'], list(m.component_map(set([Objective]), active=False)) + ) - self.assertEqual( ['a','b','c','s','t','v','w','x','y','z'], - list(m.component_map( sort=True )) ) + self.assertEqual( + ['a', 'b', 'c', 's', 't', 'v', 'w', 'x', 'y', 'z'], + list(m.component_map(sort=True)), + ) - self.assertEqual( ['a','b','c','v','w','x','y','z'], - list(m.component_map( set([Constraint,Objective]),sort=True )) ) + self.assertEqual( + ['a', 'b', 'c', 'v', 'w', 'x', 'y', 'z'], + list(m.component_map(set([Constraint, Objective]), sort=True)), + ) - self.assertEqual( ['a','b','c','v','w','x','y','z'], - list(m.component_map( [Constraint,Objective],sort=True )) ) + self.assertEqual( + ['a', 'b', 'c', 'v', 'w', 'x', 'y', 'z'], + list(m.component_map([Constraint, Objective], sort=True)), + ) - self.assertEqual( ['a','b','c','v','w','x','y','z'], - list(m.component_map( [Objective,Constraint],sort=True )) ) + self.assertEqual( + ['a', 'b', 'c', 'v', 'w', 'x', 'y', 'z'], + list(m.component_map([Objective, Constraint], sort=True)), + ) - self.assertEqual( ['a','b','c'], - list(m.component_map( Constraint,sort=True )) ) + self.assertEqual(['a', 'b', 'c'], list(m.component_map(Constraint, sort=True))) - self.assertEqual( ['v','w','x','y','z'], - list(m.component_map( set([Objective]),sort=True )) ) + self.assertEqual( + ['v', 'w', 'x', 'y', 'z'], + list(m.component_map(set([Objective]), sort=True)), + ) - self.assertEqual( ['a','c','s','v','x','y'], - list(m.component_map( active=True,sort=True )) ) + self.assertEqual( + ['a', 'c', 's', 'v', 'x', 'y'], + list(m.component_map(active=True, sort=True)), + ) - self.assertEqual( ['a','c','v','x','y'], - list(m.component_map( set([Constraint,Objective]), active=True, - sort=True )) ) + self.assertEqual( + ['a', 'c', 'v', 'x', 'y'], + list(m.component_map(set([Constraint, Objective]), active=True, sort=True)), + ) - self.assertEqual( ['a','c','v','x','y'], - list(m.component_map( [Constraint,Objective], active=True, - sort=True )) ) + self.assertEqual( + ['a', 'c', 'v', 'x', 'y'], + list(m.component_map([Constraint, Objective], active=True, sort=True)), + ) - self.assertEqual( ['a','c','v','x','y'], - list(m.component_map( [Objective,Constraint], active=True, - sort=True )) ) + self.assertEqual( + ['a', 'c', 'v', 'x', 'y'], + list(m.component_map([Objective, Constraint], active=True, sort=True)), + ) - self.assertEqual( ['a','c'], - list(m.component_map( Constraint, active=True, sort=True )) ) + self.assertEqual( + ['a', 'c'], list(m.component_map(Constraint, active=True, sort=True)) + ) - self.assertEqual( ['v','x','y'], - list(m.component_map( set([Objective]), active=True, - sort=True )) ) + self.assertEqual( + ['v', 'x', 'y'], + list(m.component_map(set([Objective]), active=True, sort=True)), + ) - self.assertEqual( ['b','t','w','z'], - list(m.component_map( active=False, sort=True )) ) + self.assertEqual( + ['b', 't', 'w', 'z'], list(m.component_map(active=False, sort=True)) + ) - self.assertEqual( ['b','w','z'], - list(m.component_map( set([Constraint,Objective]), active=False, - sort=True )) ) + self.assertEqual( + ['b', 'w', 'z'], + list( + m.component_map(set([Constraint, Objective]), active=False, sort=True) + ), + ) - self.assertEqual( ['b','w','z'], - list(m.component_map( [Constraint,Objective], active=False, - sort=True )) ) + self.assertEqual( + ['b', 'w', 'z'], + list(m.component_map([Constraint, Objective], active=False, sort=True)), + ) - self.assertEqual( ['b','w','z'], - list(m.component_map( [Objective,Constraint], active=False, - sort=True )) ) + self.assertEqual( + ['b', 'w', 'z'], + list(m.component_map([Objective, Constraint], active=False, sort=True)), + ) - self.assertEqual( ['b'], - list(m.component_map( Constraint, active=False, - sort=True )) ) + self.assertEqual( + ['b'], list(m.component_map(Constraint, active=False, sort=True)) + ) - self.assertEqual( ['w','z'], - list(m.component_map( set([Objective]), active=False, - sort=True )) ) + self.assertEqual( + ['w', 'z'], list(m.component_map(set([Objective]), active=False, sort=True)) + ) def test_iterate_hierarchical_blocks(self): def def_var(b, *args): b.x = Var() + def init_block(b): - b.c = Block([1,2], rule=def_var) - b.e = Disjunct([1,2], rule=def_var) + b.c = Block([1, 2], rule=def_var) + b.e = Disjunct([1, 2], rule=def_var) b.b = Block(rule=def_var) b.d = Disjunct(rule=def_var) @@ -1713,162 +2019,255 @@ def init_block(b): init_block(m.e[1]) init_block(m.e[2]) - ref = [x.name for x in ( - m, - m.c[1], m.c[1].c[1], m.c[1].c[2], m.c[1].b, - m.c[2], m.c[2].c[1], m.c[2].c[2], m.c[2].b, - m.b, m.b.c[1], m.b.c[2], m.b.b, - )] + ref = [ + x.name + for x in ( + m, + m.c[1], + m.c[1].c[1], + m.c[1].c[2], + m.c[1].b, + m.c[2], + m.c[2].c[1], + m.c[2].c[2], + m.c[2].b, + m.b, + m.b.c[1], + m.b.c[2], + m.b.b, + ) + ] test = list(x.name for x in m.block_data_objects()) self.assertEqual(test, ref) - test = list(x.name for x in m.block_data_objects( - descend_into=Block )) + test = list(x.name for x in m.block_data_objects(descend_into=Block)) self.assertEqual(test, ref) - test = list(x.name for x in m.block_data_objects( - descend_into=(Block,) )) + test = list(x.name for x in m.block_data_objects(descend_into=(Block,))) self.assertEqual(test, ref) - - ref = [x.name for x in ( - m, - m.e[1], m.e[1].e[1], m.e[1].e[2], m.e[1].d, - m.e[2], m.e[2].e[1], m.e[2].e[2], m.e[2].d, - m.d, m.d.e[1], m.d.e[2], m.d.d, - )] - test = list(x.name for x in m.block_data_objects( - descend_into=(Disjunct,) )) + ref = [ + x.name + for x in ( + m, + m.e[1], + m.e[1].e[1], + m.e[1].e[2], + m.e[1].d, + m.e[2], + m.e[2].e[1], + m.e[2].e[2], + m.e[2].d, + m.d, + m.d.e[1], + m.d.e[2], + m.d.d, + ) + ] + test = list(x.name for x in m.block_data_objects(descend_into=(Disjunct,))) self.assertEqual(test, ref) - ref = [x.name for x in ( - m.d, m.d.e[1], m.d.e[2], m.d.d, - )] - test = list(x.name for x in m.d.block_data_objects( - descend_into=(Disjunct,) )) + ref = [x.name for x in (m.d, m.d.e[1], m.d.e[2], m.d.d)] + test = list(x.name for x in m.d.block_data_objects(descend_into=(Disjunct,))) self.assertEqual(test, ref) - - ref = [x.name for x in ( - m, - m.c[1], - m.c[1].c[1], m.c[1].c[2], - m.c[1].e[1], m.c[1].e[2], - m.c[1].b, m.c[1].d, - m.c[2], - m.c[2].c[1], m.c[2].c[2], - m.c[2].e[1], m.c[2].e[2], - m.c[2].b, m.c[2].d, - m.e[1], - m.e[1].c[1], m.e[1].c[2], - m.e[1].e[1], m.e[1].e[2], - m.e[1].b, m.e[1].d, - m.e[2], - m.e[2].c[1], m.e[2].c[2], - m.e[2].e[1], m.e[2].e[2], - m.e[2].b, m.e[2].d, - m.b, - m.b.c[1], m.b.c[2], - m.b.e[1], m.b.e[2], - m.b.b, m.b.d, - m.d, - m.d.c[1], m.d.c[2], - m.d.e[1], m.d.e[2], - m.d.b, m.d.d, - )] - test = list(x.name for x in m.block_data_objects( - descend_into=(Block,Disjunct) )) + ref = [ + x.name + for x in ( + m, + m.c[1], + m.c[1].c[1], + m.c[1].c[2], + m.c[1].e[1], + m.c[1].e[2], + m.c[1].b, + m.c[1].d, + m.c[2], + m.c[2].c[1], + m.c[2].c[2], + m.c[2].e[1], + m.c[2].e[2], + m.c[2].b, + m.c[2].d, + m.e[1], + m.e[1].c[1], + m.e[1].c[2], + m.e[1].e[1], + m.e[1].e[2], + m.e[1].b, + m.e[1].d, + m.e[2], + m.e[2].c[1], + m.e[2].c[2], + m.e[2].e[1], + m.e[2].e[2], + m.e[2].b, + m.e[2].d, + m.b, + m.b.c[1], + m.b.c[2], + m.b.e[1], + m.b.e[2], + m.b.b, + m.b.d, + m.d, + m.d.c[1], + m.d.c[2], + m.d.e[1], + m.d.e[2], + m.d.b, + m.d.d, + ) + ] + test = list( + x.name for x in m.block_data_objects(descend_into=(Block, Disjunct)) + ) self.assertEqual(test, ref) - test = list(x.name for x in m.block_data_objects( - descend_into=(Disjunct,Block) )) + test = list( + x.name for x in m.block_data_objects(descend_into=(Disjunct, Block)) + ) self.assertEqual(test, ref) - - ref = [x.name for x in ( - m.x, - m.c[1].x, m.c[1].c[1].x, m.c[1].c[2].x, m.c[1].b.x, - m.c[2].x, m.c[2].c[1].x, m.c[2].c[2].x, m.c[2].b.x, - m.b.x, m.b.c[1].x, m.b.c[2].x, m.b.b.x, - )] - test = list(x.name for x in m.component_data_objects( - Var )) + ref = [ + x.name + for x in ( + m.x, + m.c[1].x, + m.c[1].c[1].x, + m.c[1].c[2].x, + m.c[1].b.x, + m.c[2].x, + m.c[2].c[1].x, + m.c[2].c[2].x, + m.c[2].b.x, + m.b.x, + m.b.c[1].x, + m.b.c[2].x, + m.b.b.x, + ) + ] + test = list(x.name for x in m.component_data_objects(Var)) self.assertEqual(test, ref) - test = list(x.name for x in m.component_data_objects( - Var, descend_into=Block )) + test = list(x.name for x in m.component_data_objects(Var, descend_into=Block)) self.assertEqual(test, ref) - test = list(x.name for x in m.component_data_objects( - Var, descend_into=(Block,) )) + test = list( + x.name for x in m.component_data_objects(Var, descend_into=(Block,)) + ) self.assertEqual(test, ref) - ref = [x.name for x in ( - m.x, - m.e[1].binary_indicator_var, m.e[1].x, - m.e[1].e[1].binary_indicator_var, m.e[1].e[1].x, - m.e[1].e[2].binary_indicator_var, m.e[1].e[2].x, - m.e[1].d.binary_indicator_var, m.e[1].d.x, - m.e[2].binary_indicator_var, m.e[2].x, - m.e[2].e[1].binary_indicator_var, m.e[2].e[1].x, - m.e[2].e[2].binary_indicator_var, m.e[2].e[2].x, - m.e[2].d.binary_indicator_var, m.e[2].d.x, - m.d.binary_indicator_var, m.d.x, - m.d.e[1].binary_indicator_var, m.d.e[1].x, - m.d.e[2].binary_indicator_var, m.d.e[2].x, - m.d.d.binary_indicator_var, m.d.d.x, - )] - test = list(x.name for x in m.component_data_objects( - Var, descend_into=Disjunct )) + ref = [ + x.name + for x in ( + m.x, + m.e[1].binary_indicator_var, + m.e[1].x, + m.e[1].e[1].binary_indicator_var, + m.e[1].e[1].x, + m.e[1].e[2].binary_indicator_var, + m.e[1].e[2].x, + m.e[1].d.binary_indicator_var, + m.e[1].d.x, + m.e[2].binary_indicator_var, + m.e[2].x, + m.e[2].e[1].binary_indicator_var, + m.e[2].e[1].x, + m.e[2].e[2].binary_indicator_var, + m.e[2].e[2].x, + m.e[2].d.binary_indicator_var, + m.e[2].d.x, + m.d.binary_indicator_var, + m.d.x, + m.d.e[1].binary_indicator_var, + m.d.e[1].x, + m.d.e[2].binary_indicator_var, + m.d.e[2].x, + m.d.d.binary_indicator_var, + m.d.d.x, + ) + ] + test = list( + x.name for x in m.component_data_objects(Var, descend_into=Disjunct) + ) self.assertEqual(test, ref) - ref = [x.name for x in ( - m.x, - m.c[1].x, - m.c[1].c[1].x, m.c[1].c[2].x, - m.c[1].e[1].binary_indicator_var, m.c[1].e[1].x, - m.c[1].e[2].binary_indicator_var, m.c[1].e[2].x, - m.c[1].b.x, - m.c[1].d.binary_indicator_var, m.c[1].d.x, - m.c[2].x, - m.c[2].c[1].x, m.c[2].c[2].x, - m.c[2].e[1].binary_indicator_var, m.c[2].e[1].x, - m.c[2].e[2].binary_indicator_var, m.c[2].e[2].x, - m.c[2].b.x, - m.c[2].d.binary_indicator_var, m.c[2].d.x, - - m.e[1].binary_indicator_var, m.e[1].x, - m.e[1].c[1].x, m.e[1].c[2].x, - m.e[1].e[1].binary_indicator_var, m.e[1].e[1].x, - m.e[1].e[2].binary_indicator_var, m.e[1].e[2].x, - m.e[1].b.x, - m.e[1].d.binary_indicator_var, m.e[1].d.x, - m.e[2].binary_indicator_var, m.e[2].x, - m.e[2].c[1].x, m.e[2].c[2].x, - m.e[2].e[1].binary_indicator_var, m.e[2].e[1].x, - m.e[2].e[2].binary_indicator_var, m.e[2].e[2].x, - m.e[2].b.x, - m.e[2].d.binary_indicator_var, m.e[2].d.x, - - m.b.x, - m.b.c[1].x, m.b.c[2].x, - m.b.e[1].binary_indicator_var, m.b.e[1].x, - m.b.e[2].binary_indicator_var, m.b.e[2].x, - m.b.b.x, - m.b.d.binary_indicator_var, m.b.d.x, - - m.d.binary_indicator_var, m.d.x, - m.d.c[1].x, m.d.c[2].x, - m.d.e[1].binary_indicator_var, m.d.e[1].x, - m.d.e[2].binary_indicator_var, m.d.e[2].x, - m.d.b.x, - m.d.d.binary_indicator_var, m.d.d.x, - )] - test = list(x.name for x in m.component_data_objects( - Var, descend_into=(Block,Disjunct) )) + ref = [ + x.name + for x in ( + m.x, + m.c[1].x, + m.c[1].c[1].x, + m.c[1].c[2].x, + m.c[1].e[1].binary_indicator_var, + m.c[1].e[1].x, + m.c[1].e[2].binary_indicator_var, + m.c[1].e[2].x, + m.c[1].b.x, + m.c[1].d.binary_indicator_var, + m.c[1].d.x, + m.c[2].x, + m.c[2].c[1].x, + m.c[2].c[2].x, + m.c[2].e[1].binary_indicator_var, + m.c[2].e[1].x, + m.c[2].e[2].binary_indicator_var, + m.c[2].e[2].x, + m.c[2].b.x, + m.c[2].d.binary_indicator_var, + m.c[2].d.x, + m.e[1].binary_indicator_var, + m.e[1].x, + m.e[1].c[1].x, + m.e[1].c[2].x, + m.e[1].e[1].binary_indicator_var, + m.e[1].e[1].x, + m.e[1].e[2].binary_indicator_var, + m.e[1].e[2].x, + m.e[1].b.x, + m.e[1].d.binary_indicator_var, + m.e[1].d.x, + m.e[2].binary_indicator_var, + m.e[2].x, + m.e[2].c[1].x, + m.e[2].c[2].x, + m.e[2].e[1].binary_indicator_var, + m.e[2].e[1].x, + m.e[2].e[2].binary_indicator_var, + m.e[2].e[2].x, + m.e[2].b.x, + m.e[2].d.binary_indicator_var, + m.e[2].d.x, + m.b.x, + m.b.c[1].x, + m.b.c[2].x, + m.b.e[1].binary_indicator_var, + m.b.e[1].x, + m.b.e[2].binary_indicator_var, + m.b.e[2].x, + m.b.b.x, + m.b.d.binary_indicator_var, + m.b.d.x, + m.d.binary_indicator_var, + m.d.x, + m.d.c[1].x, + m.d.c[2].x, + m.d.e[1].binary_indicator_var, + m.d.e[1].x, + m.d.e[2].binary_indicator_var, + m.d.e[2].x, + m.d.b.x, + m.d.d.binary_indicator_var, + m.d.d.x, + ) + ] + test = list( + x.name + for x in m.component_data_objects(Var, descend_into=(Block, Disjunct)) + ) self.assertEqual(test, ref) - def test_deepcopy(self): m = ConcreteModel() m.x = Var() @@ -1876,7 +2275,7 @@ def test_deepcopy(self): m.c = Constraint(expr=m.x**2 + m.y[1] <= 5) m.b = Block() m.b.x = Var() - m.b.y = Var([1,2]) + m.b.y = Var([1, 2]) m.b.c = Constraint(expr=m.x**2 + m.y[1] + m.b.x**2 + m.b.y[1] <= 10) n = deepcopy(m) @@ -1901,11 +2300,11 @@ def test_deepcopy(self): self.assertIs(n.c.parent_component(), n.c) self.assertEqual( sorted(id(x) for x in EXPR.identify_variables(m.c.body)), - sorted(id(x) for x in (m.x,m.y[1])), + sorted(id(x) for x in (m.x, m.y[1])), ) self.assertEqual( sorted(id(x) for x in EXPR.identify_variables(n.c.body)), - sorted(id(x) for x in (n.x,n.y[1])), + sorted(id(x) for x in (n.x, n.y[1])), ) self.assertNotEqual(id(m.b), id(n.b)) @@ -1947,7 +2346,7 @@ def test_clone_model(self): m.c = Constraint(expr=m.x**2 + m.y[1] <= 5) m.b = Block() m.b.x = Var() - m.b.y = Var([1,2]) + m.b.y = Var([1, 2]) m.b.c = Constraint(expr=m.x**2 + m.y[1] + m.b.x**2 + m.b.y[1] <= 10) n = m.clone() @@ -1972,11 +2371,11 @@ def test_clone_model(self): self.assertIs(n.c.parent_component(), n.c) self.assertEqual( sorted(id(x) for x in EXPR.identify_variables(m.c.body)), - sorted(id(x) for x in (m.x,m.y[1])), + sorted(id(x) for x in (m.x, m.y[1])), ) self.assertEqual( sorted(id(x) for x in EXPR.identify_variables(n.c.body)), - sorted(id(x) for x in (n.x,n.y[1])), + sorted(id(x) for x in (n.x, n.y[1])), ) self.assertNotEqual(id(m.b), id(n.b)) @@ -2018,7 +2417,7 @@ def test_clone_subblock(self): m.c = Constraint(expr=m.x**2 + m.y[1] <= 5) m.b = Block() m.b.x = Var() - m.b.y = Var([1,2]) + m.b.y = Var([1, 2]) m.b.c = Constraint(expr=m.x**2 + m.y[1] + m.b.x**2 + m.b.y[1] <= 10) nb = m.b.clone() @@ -2057,10 +2456,12 @@ def test_clone_subblock(self): def test_clone_indexed_subblock(self): m = ConcreteModel() - @m.Block([1,2,3]) + + @m.Block([1, 2, 3]) def blk(b, i): b.IDX = RangeSet(i) b.x = Var(b.IDX) + m.c = Block(rule=m.blk[2].clone()) self.assertEqual([1, 2], list(m.c.IDX)) @@ -2084,12 +2485,12 @@ def blk(b, i): self.assertIs(m.c1.parent_component(), m.c1) self.assertIs(m.c1.parent_block(), m) - @m.Block([1,2,3]) + @m.Block([1, 2, 3]) def d(b, i): return b.model().blk[i].clone() for i in [1, 2, 3]: - self.assertEqual(list(range(1, i+1)), list(m.d[i].IDX)) + self.assertEqual(list(range(1, i + 1)), list(m.d[i].IDX)) self.assertEqual(list(m.blk[i].IDX), list(m.d[i].IDX)) self.assertIsNot(m.blk[i].IDX, m.d[i].IDX) self.assertIsNot(m.blk[i].x, m.d[i].x) @@ -2110,7 +2511,7 @@ def __deepcopy__(bogus): m.c = Constraint(expr=m.x**2 + m.y[1] <= 5) m.b = Block() m.b.x = Var() - m.b.y = Var([1,2]) + m.b.y = Var([1, 2]) m.b.bad2 = foo() m.b.c = Constraint(expr=m.x**2 + m.y[1] + m.b.x**2 + m.b.y[1] <= 10) @@ -2119,39 +2520,47 @@ def __deepcopy__(bogus): with LoggingIntercept(OUTPUT, 'pyomo.core'): nb = deepcopy(m.b) # without the scope, the whole model is cloned! - self.assertIn("'unknown' contains an uncopyable field 'bad1'", - OUTPUT.getvalue()) - self.assertIn("'b' contains an uncopyable field 'bad2'", - OUTPUT.getvalue()) + self.assertIn( + "'unknown' contains an uncopyable field 'bad1'", OUTPUT.getvalue() + ) + self.assertIn("'b' contains an uncopyable field 'bad2'", OUTPUT.getvalue()) self.assertIn("'__paranoid__'", OUTPUT.getvalue()) self.assertTrue(hasattr(m.b, 'bad2')) - self.assertFalse(hasattr(nb, 'bad2')) + self.assertIsNotNone(m.b.bad2) + self.assertTrue(hasattr(nb, 'bad2')) + self.assertIsNone(nb.bad2) # Simple tests for the subblock OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.core'): nb = m.b.clone() - self.assertNotIn("'unknown' contains an uncopyable field 'bad1'", - OUTPUT.getvalue()) - self.assertIn("'b' contains an uncopyable field 'bad2'", - OUTPUT.getvalue()) + self.assertNotIn( + "'unknown' contains an uncopyable field 'bad1'", OUTPUT.getvalue() + ) + self.assertIn("'b' contains an uncopyable field 'bad2'", OUTPUT.getvalue()) self.assertNotIn("'__paranoid__'", OUTPUT.getvalue()) self.assertTrue(hasattr(m.b, 'bad2')) - self.assertFalse(hasattr(nb, 'bad2')) + self.assertIsNotNone(m.b.bad2) + self.assertTrue(hasattr(nb, 'bad2')) + self.assertIsNone(nb.bad2) # more involved tests for the model OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.core'): n = m.clone() - self.assertIn("'unknown' contains an uncopyable field 'bad1'", - OUTPUT.getvalue()) - self.assertIn("'b' contains an uncopyable field 'bad2'", - OUTPUT.getvalue()) + self.assertIn( + "'unknown' contains an uncopyable field 'bad1'", OUTPUT.getvalue() + ) + self.assertIn("'b' contains an uncopyable field 'bad2'", OUTPUT.getvalue()) self.assertNotIn("'__paranoid__'", OUTPUT.getvalue()) self.assertTrue(hasattr(m, 'bad1')) - self.assertFalse(hasattr(n, 'bad1')) + self.assertIsNotNone(m.bad1) + self.assertTrue(hasattr(n, 'bad1')) + self.assertIsNone(n.bad1) self.assertTrue(hasattr(m.b, 'bad2')) - self.assertFalse(hasattr(n.b, 'bad2')) + self.assertIsNotNone(m.b.bad2) + self.assertTrue(hasattr(n.b, 'bad2')) + self.assertIsNone(n.b.bad2) self.assertNotEqual(id(m), id(n)) @@ -2174,11 +2583,11 @@ def __deepcopy__(bogus): self.assertIs(n.c.parent_component(), n.c) self.assertEqual( sorted(id(x) for x in EXPR.identify_variables(m.c.body)), - sorted(id(x) for x in (m.x,m.y[1])), + sorted(id(x) for x in (m.x, m.y[1])), ) self.assertEqual( sorted(id(x) for x in EXPR.identify_variables(n.c.body)), - sorted(id(x) for x in (n.x,n.y[1])), + sorted(id(x) for x in (n.x, n.y[1])), ) self.assertNotEqual(id(m.b), id(n.b)) @@ -2267,98 +2676,116 @@ def test_pprint(self): @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def test_solve1(self): model = Block(concrete=True) - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): expr = 0 for i in model.A: - expr += i*model.x[i] + expr += i * model.x[i] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1.out"), format='json') - with open(join(currdir,"solve1.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1.out"), format='json') + with open(join(currdir, "solve1.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) + # def d_rule(model): return model.x[1] >= 0 + model.d = Constraint(rule=d_rule) model.d.deactivate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1x.out"), format='json') - with open(join(currdir,"solve1x.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1x.out"), format='json') + with open(join(currdir, "solve1x.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) # model.d.activate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1a.out"), format='json') - with open(join(currdir,"solve1a.out"), 'r') as out, \ - open(join(currdir,"solve1a.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1a.out"), format='json') + with open(join(currdir, "solve1a.out"), 'r') as out, open( + join(currdir, "solve1a.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) # model.d.deactivate() + def e_rule(model, i): return model.x[i] >= 0 + model.e = Constraint(model.A, rule=e_rule) for i in model.A: model.e[i].deactivate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1y.out"), format='json') - with open(join(currdir,"solve1y.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1y.out"), format='json') + with open(join(currdir, "solve1y.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) # model.e.activate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1b.out"), format='json') - with open(join(currdir,"solve1b.out"), 'r') as out, \ - open(join(currdir,"solve1b.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1b.out"), format='json') + with open(join(currdir, "solve1b.out"), 'r') as out, open( + join(currdir, "solve1b.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def test_solve4(self): model = Block(concrete=True) - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): expr = 0 for i in model.A: - expr += i*model.x[i] + expr += i * model.x[i] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) - results.write(filename=join(currdir,'solve4.out'), format='json') - with open(join(currdir,"solve4.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, 'solve4.out'), format='json') + with open(join(currdir, "solve4.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def test_solve6(self): @@ -2368,61 +2795,70 @@ def test_solve6(self): # b.x # model = Block(concrete=True) - model.y = Var(bounds=(-1,1)) + model.y = Var(bounds=(-1, 1)) model.b = Block() - model.b.A = RangeSet(1,4) - model.b.x = Var(model.b.A, bounds=(-1,1)) + model.b.A = RangeSet(1, 4) + model.b.x = Var(model.b.A, bounds=(-1, 1)) + def obj_rule(block): return sum_product(block.x) + model.b.obj = Objective(rule=obj_rule) + def c_rule(model): expr = model.y for i in model.b.A: - expr += i*model.b.x[i] + expr += i * model.b.x[i] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) - results.write(filename=join(currdir,'solve6.out'), format='json') - with open(join(currdir,"solve6.out"), 'r') as out, \ - open(join(currdir,"solve6.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, 'solve6.out'), format='json') + with open(join(currdir, "solve6.out"), 'r') as out, open( + join(currdir, "solve6.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def test_solve7(self): # - # Test that solution values are writen with appropriate + # Test that solution values are written with appropriate # quotations in results # model = Block(concrete=True) - model.y = Var(bounds=(-1,1)) - model.A = RangeSet(1,4) + model.y = Var(bounds=(-1, 1)) + model.A = RangeSet(1, 4) model.B = Set(initialize=['A B', 'C,D', 'E']) - model.x = Var(model.A, model.B, bounds=(-1,1)) + model.x = Var(model.A, model.B, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): expr = model.y for i in model.A: for j in model.B: - expr += i*model.x[i,j] + expr += i * model.x[i, j] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) - #model.display() + # model.display() model.solutions.store_to(results) - results.write(filename=join(currdir,'solve7.out'), format='json') - with open(join(currdir,"solve7.out"), 'r') as out, \ - open(join(currdir,"solve7.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) - + results.write(filename=join(currdir, 'solve7.out'), format='json') + with open(join(currdir, "solve7.out"), 'r') as out, open( + join(currdir, "solve7.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) def test_abstract_index(self): model = AbstractModel() @@ -2433,12 +2869,12 @@ def test_abstract_index(self): def test_decorated_definition(self): model = ConcreteModel() - model.I = Set(initialize=[1,2,3]) + model.I = Set(initialize=[1, 2, 3]) model.x = Var(model.I) @model.Constraint() def scalar_constraint(m): - return m.x[1]**2 <= 0 + return m.x[1] ** 2 <= 0 self.assertTrue(hasattr(model, 'scalar_constraint')) self.assertIs(model.scalar_constraint.ctype, Constraint) @@ -2447,7 +2883,7 @@ def scalar_constraint(m): @model.Constraint(model.I) def vector_constraint(m, i): - return m.x[i]**2 <= 0 + return m.x[i] ** 2 <= 0 self.assertTrue(hasattr(model, 'vector_constraint')) self.assertIs(model.vector_constraint.ctype, Constraint) @@ -2457,22 +2893,36 @@ def vector_constraint(m, i): def test_reserved_words(self): m = ConcreteModel() self.assertRaisesRegex( - ValueError, ".*using the name of a reserved attribute", - m.add_component, "add_component", Var()) + ValueError, + ".*using the name of a reserved attribute", + m.add_component, + "add_component", + Var(), + ) with self.assertRaisesRegex( - ValueError, ".*using the name of a reserved attribute"): + ValueError, ".*using the name of a reserved attribute" + ): m.add_component = Var() m.foo = Var() m.b = DerivedBlock() self.assertRaisesRegex( - ValueError, ".*using the name of a reserved attribute", - m.b.add_component, "add_component", Var()) + ValueError, + ".*using the name of a reserved attribute", + m.b.add_component, + "add_component", + Var(), + ) self.assertRaisesRegex( - ValueError, ".*using the name of a reserved attribute", - m.b.add_component, "foo", Var()) + ValueError, + ".*using the name of a reserved attribute", + m.b.add_component, + "foo", + Var(), + ) with self.assertRaisesRegex( - ValueError, ".*using the name of a reserved attribute"): + ValueError, ".*using the name of a reserved attribute" + ): m.b.foo = Var() class DerivedBlockReservedComp(DerivedBlock): @@ -2481,27 +2931,32 @@ def __init__(self, *args, **kwargs): super(DerivedBlock, self).__init__(*args, **kwargs) with self._declare_reserved_components(): self.x = Var() + DerivedBlockReservedComp._Block_reserved_words = set( - dir(DerivedBlockReservedComp())) + dir(DerivedBlockReservedComp()) + ) m.c = DerivedBlockReservedComp() with self.assertRaisesRegex( - ValueError, "Attempting to delete a reserved block component"): + ValueError, "Attempting to delete a reserved block component" + ): m.c.del_component('x') with self.assertRaisesRegex( - ValueError, "Attempting to delete a reserved block component"): + ValueError, "Attempting to delete a reserved block component" + ): m.c.x = Var() class RestrictedBlock(ScalarBlock): - _Block_reserved_words = Any - {'start', 'end',} + _Block_reserved_words = Any - {'start', 'end'} m.d = RestrictedBlock() m.d.start = v = Var() self.assertIs(m.d.start, v) with self.assertRaisesRegex( - ValueError, "using the name of a reserved attribute"): + ValueError, "using the name of a reserved attribute" + ): m.d.step = Var() # @@ -2517,11 +2972,11 @@ class RestrictedBlock(ScalarBlock): def test_write_exceptions(self): m = Block() with self.assertRaisesRegex( - ValueError, ".*Could not infer file format from file name"): + ValueError, ".*Could not infer file format from file name" + ): m.write(filename="foo.bogus") - with self.assertRaisesRegex( - ValueError, ".*Cannot write model in format"): + with self.assertRaisesRegex(ValueError, ".*Cannot write model in format"): m.write(format="bogus") def test_override_pprint(self): @@ -2540,13 +2995,15 @@ def test_block_rules(self): m = ConcreteModel() m.I = Set() _rule_ = [] - def _block_rule(b,i): + + def _block_rule(b, i): _rule_.append(i) b.x = Var(range(i)) + m.b = Block(m.I, rule=_block_rule) # I is empty: no rules called self.assertEqual(_rule_, []) - m.I.update([1,3,5]) + m.I.update([1, 3, 5]) # Fetching a new block will call the rule _b = m.b[3] self.assertEqual(len(m.b), 1) @@ -2560,7 +3017,7 @@ def _block_rule(b,i): _tmp.y = Var(range(3)) m.b[5].transfer_attributes_from(_tmp) self.assertEqual(len(m.b), 2) - self.assertEqual(_rule_, [3,5]) + self.assertEqual(_rule_, [3, 5]) self.assertIn('x', m.b[5].component_map()) self.assertIn('y', m.b[5].component_map()) @@ -2569,22 +3026,23 @@ def _block_rule(b,i): _tmp = Block() _tmp.y = Var(range(3)) with self.assertRaisesRegex( - RuntimeError, "Block components do not support " - "assignment or set_value"): + RuntimeError, "Block components do not support assignment or set_value" + ): m.b[1] = _tmp self.assertEqual(len(m.b), 2) - self.assertEqual(_rule_, [3,5]) + self.assertEqual(_rule_, [3, 5]) # Blocks with non-finite indexing sets cannot be automatically # populated (even if they have a rule!) def _bb_rule(b, i, j): - _rule_.append((i,j)) + _rule_.append((i, j)) b.x = Var(RangeSet(i)) b.y = Var(RangeSet(j)) + m.bb = Block(m.I, NonNegativeIntegers, rule=_bb_rule) - self.assertEqual(_rule_, [3,5]) - _b = m.bb[3,5] - self.assertEqual(_rule_, [3,5,(3,5)]) + self.assertEqual(_rule_, [3, 5]) + _b = m.bb[3, 5] + self.assertEqual(_rule_, [3, 5, (3, 5)]) self.assertEqual(len(m.bb), 1) self.assertEqual(len(_b.x), 3) self.assertEqual(len(_b.y), 5) @@ -2604,6 +3062,7 @@ def __init__(self, *args, **kwds): ConcreteBlock.__init__(self, *args, **kwds) _buf = [] + def _rule(b): _buf.append(1) @@ -2614,24 +3073,31 @@ def _rule(b): def test_abstract_construction(self): m = AbstractModel() m.I = Set() + def b_rule(b, i): b.p = Param(default=i) b.J = Set(initialize=range(i)) + m.b = Block(m.I, rule=b_rule) - i = m.create_instance({None: { - 'I': {None: [1,2,3,4]}, - 'b': {1: {'p': {None: 10}, 'J': {None: [7,8]}}, - 2: {'p': {None: 12}}, - 3: {'J': {None: [9]}}, - } - }}) - self.assertEqual(list(i.I), [1,2,3,4]) + i = m.create_instance( + { + None: { + 'I': {None: [1, 2, 3, 4]}, + 'b': { + 1: {'p': {None: 10}, 'J': {None: [7, 8]}}, + 2: {'p': {None: 12}}, + 3: {'J': {None: [9]}}, + }, + } + } + ) + self.assertEqual(list(i.I), [1, 2, 3, 4]) self.assertEqual(len(i.b), 4) - self.assertEqual(list(i.b[1].J), [7,8]) - self.assertEqual(list(i.b[2].J), [0,1]) + self.assertEqual(list(i.b[1].J), [7, 8]) + self.assertEqual(list(i.b[2].J), [0, 1]) self.assertEqual(list(i.b[3].J), [9]) - self.assertEqual(list(i.b[4].J), [0,1,2,3]) + self.assertEqual(list(i.b[4].J), [0, 1, 2, 3]) self.assertEqual(value(i.b[1].p), 10) self.assertEqual(value(i.b[2].p), 12) self.assertEqual(value(i.b[3].p), 3) @@ -2640,26 +3106,33 @@ def b_rule(b, i): def test_abstract_transfer_construction(self): m = AbstractModel() m.I = Set() + def b_rule(_b, i): b = Block() b.p = Param(default=i) b.J = Set(initialize=range(i)) return b + m.b = Block(m.I, rule=b_rule) - i = m.create_instance({None: { - 'I': {None: [1,2,3,4]}, - 'b': {1: {'p': {None: 10}, 'J': {None: [7,8]}}, - 2: {'p': {None: 12}}, - 3: {'J': {None: [9]}}, - } - }}) - self.assertEqual(list(i.I), [1,2,3,4]) + i = m.create_instance( + { + None: { + 'I': {None: [1, 2, 3, 4]}, + 'b': { + 1: {'p': {None: 10}, 'J': {None: [7, 8]}}, + 2: {'p': {None: 12}}, + 3: {'J': {None: [9]}}, + }, + } + } + ) + self.assertEqual(list(i.I), [1, 2, 3, 4]) self.assertEqual(len(i.b), 4) - self.assertEqual(list(i.b[1].J), [7,8]) - self.assertEqual(list(i.b[2].J), [0,1]) + self.assertEqual(list(i.b[1].J), [7, 8]) + self.assertEqual(list(i.b[2].J), [0, 1]) self.assertEqual(list(i.b[3].J), [9]) - self.assertEqual(list(i.b[4].J), [0,1,2,3]) + self.assertEqual(list(i.b[4].J), [0, 1, 2, 3]) self.assertEqual(value(i.b[1].p), 10) self.assertEqual(value(i.b[2].p), 12) self.assertEqual(value(i.b[3].p), 3) @@ -2667,23 +3140,25 @@ def b_rule(_b, i): def test_deprecated_options(self): m = ConcreteModel() + def b_rule(b, a=None): b.p = Param(initialize=a) + OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.core'): m.b = Block(rule=b_rule, options={'a': 5}) - self.assertIn("The Block 'options=' keyword is deprecated.", - OUTPUT.getvalue()) + self.assertIn("The Block 'options=' keyword is deprecated.", OUTPUT.getvalue()) self.assertEqual(value(m.b.p), 5) m = ConcreteModel() + def b_rule(b, i, **kwds): b.p = Param(initialize=kwds.get('a', {}).get(i, 0)) + OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.core'): - m.b = Block([1,2,3], rule=b_rule, options={'a': {1:5, 2:10}}) - self.assertIn("The Block 'options=' keyword is deprecated.", - OUTPUT.getvalue()) + m.b = Block([1, 2, 3], rule=b_rule, options={'a': {1: 5, 2: 10}}) + self.assertIn("The Block 'options=' keyword is deprecated.", OUTPUT.getvalue()) self.assertEqual(value(m.b[1].p), 5) self.assertEqual(value(m.b[2].p), 10) self.assertEqual(value(m.b[3].p), 0) @@ -2722,6 +3197,216 @@ def test_find_component_hierarchical_cuid(self): self.assertIs(b1.find_component(cuid1), b1.b2.v1) self.assertIs(b1.find_component(cuid2), b1.b2.v2[2]) + def test_deduplicate_component_data_objects(self): + m = ConcreteModel() + m.b = Block() + # Scalar, then reference + m.x = Var() + m.z_x = Reference(m.x) + # Indexed, then reference + m.I = Var([1, 3, 2]) + m.z_I = Reference(m.I) + + # Reference, then scalar + m.b.y = Var() + m.z_y = Reference(m.b.y) + # Reference, then indexed + m.b.J = Var([4, 6, 5]) + m.z_J = Reference(m.b.J) + + # Partial reference, then components + m.c = Block([2, 1]) + m.c[1].A = Var([(0, 2), (1, 1)]) + m.c[2].A = Var([(0, 3), (1, 1)]) + m.z_AA = Reference(m.c[:].A[1, :]) + # duplicate rederence + m.z_A = Reference(m.c[:].A[1, :]) + + ans = list(m.component_data_objects(Var)) + self.assertEqual( + ans, + [ + m.x, + m.I[1], + m.I[3], + m.I[2], + m.b.y, + m.b.J[4], + m.b.J[6], + m.b.J[5], + m.c[2].A[1, 1], + m.c[1].A[1, 1], + m.c[2].A[0, 3], + m.c[1].A[0, 2], + ], + ) + + ans = list(m.component_data_objects(Var, sort=SortComponents.SORTED_INDICES)) + self.assertEqual( + ans, + [ + m.x, + m.I[1], + m.I[2], + m.I[3], + m.b.y, + m.b.J[4], + m.b.J[5], + m.b.J[6], + m.c[1].A[1, 1], + m.c[2].A[1, 1], + m.c[1].A[0, 2], + m.c[2].A[0, 3], + ], + ) + + ans = list(m.component_data_objects(Var, sort=SortComponents.ALPHABETICAL)) + self.assertEqual( + ans, + [ + m.I[1], + m.I[3], + m.I[2], + m.x, + m.c[2].A[1, 1], + m.c[1].A[1, 1], + m.b.J[4], + m.b.J[6], + m.b.J[5], + m.b.y, + m.c[2].A[0, 3], + m.c[1].A[0, 2], + ], + ) + + ans = list( + m.component_data_objects( + Var, sort=SortComponents.ALPHABETICAL | SortComponents.SORTED_INDICES + ) + ) + self.assertEqual( + ans, + [ + m.I[1], + m.I[2], + m.I[3], + m.x, + m.c[1].A[1, 1], + m.c[2].A[1, 1], + m.b.J[4], + m.b.J[5], + m.b.J[6], + m.b.y, + m.c[1].A[0, 2], + m.c[2].A[0, 3], + ], + ) + + def test_deduplicate_component_data_iterindex(self): + m = ConcreteModel() + m.b = Block() + # Scalar, then reference + m.x = Var() + m.z_x = Reference(m.x) + # Indexed, then reference + m.I = Var([1, 3, 2]) + m.z_I = Reference(m.I) + + # Reference, then scalar + m.b.y = Var() + m.z_y = Reference(m.b.y) + # Reference, then indexed + m.b.J = Var([4, 6, 5]) + m.z_J = Reference(m.b.J) + + # Partial reference, then components + m.c = Block([2, 1]) + m.c[1].A = Var([(0, 2), (1, 1)]) + m.c[2].A = Var([(0, 3), (1, 1)]) + m.z_AA = Reference(m.c[:].A[1, :]) + # duplicate rederence + m.z_A = Reference(m.c[:].A[1, :]) + + ans = list(m.component_data_iterindex(Var)) + self.assertEqual( + ans, + [ + (('x', None), m.x), + (('I', 1), m.I[1]), + (('I', 3), m.I[3]), + (('I', 2), m.I[2]), + (('z_y', None), m.b.y), + (('z_J', 4), m.b.J[4]), + (('z_J', 6), m.b.J[6]), + (('z_J', 5), m.b.J[5]), + (('z_AA', (2, 1)), m.c[2].A[1, 1]), + (('z_AA', (1, 1)), m.c[1].A[1, 1]), + (('A', (0, 3)), m.c[2].A[0, 3]), + (('A', (0, 2)), m.c[1].A[0, 2]), + ], + ) + + ans = list(m.component_data_iterindex(Var, sort=SortComponents.SORTED_INDICES)) + self.assertEqual( + ans, + [ + (('x', None), m.x), + (('I', 1), m.I[1]), + (('I', 2), m.I[2]), + (('I', 3), m.I[3]), + (('z_y', None), m.b.y), + (('z_J', 4), m.b.J[4]), + (('z_J', 5), m.b.J[5]), + (('z_J', 6), m.b.J[6]), + (('z_AA', (1, 1)), m.c[1].A[1, 1]), + (('z_AA', (2, 1)), m.c[2].A[1, 1]), + (('A', (0, 2)), m.c[1].A[0, 2]), + (('A', (0, 3)), m.c[2].A[0, 3]), + ], + ) + + ans = list(m.component_data_iterindex(Var, sort=SortComponents.ALPHABETICAL)) + self.assertEqual( + ans, + [ + (('I', 1), m.I[1]), + (('I', 3), m.I[3]), + (('I', 2), m.I[2]), + (('x', None), m.x), + (('z_A', (2, 1)), m.c[2].A[1, 1]), + (('z_A', (1, 1)), m.c[1].A[1, 1]), + (('z_J', 4), m.b.J[4]), + (('z_J', 6), m.b.J[6]), + (('z_J', 5), m.b.J[5]), + (('z_y', None), m.b.y), + (('A', (0, 3)), m.c[2].A[0, 3]), + (('A', (0, 2)), m.c[1].A[0, 2]), + ], + ) + + ans = list( + m.component_data_iterindex( + Var, sort=SortComponents.ALPHABETICAL | SortComponents.SORTED_INDICES + ) + ) + self.assertEqual( + ans, + [ + (('I', 1), m.I[1]), + (('I', 2), m.I[2]), + (('I', 3), m.I[3]), + (('x', None), m.x), + (('z_A', (1, 1)), m.c[1].A[1, 1]), + (('z_A', (2, 1)), m.c[2].A[1, 1]), + (('z_J', 4), m.b.J[4]), + (('z_J', 5), m.b.J[5]), + (('z_J', 6), m.b.J[6]), + (('z_y', None), m.b.y), + (('A', (0, 2)), m.c[1].A[0, 2]), + (('A', (0, 3)), m.c[2].A[0, 3]), + ], + ) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_block_model.py b/pyomo/core/tests/unit/test_block_model.py index 389b469f57f..ed751e96fc5 100644 --- a/pyomo/core/tests/unit/test_block_model.py +++ b/pyomo/core/tests/unit/test_block_model.py @@ -17,18 +17,31 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest -from pyomo.environ import AbstractModel, Param, Block, Set, Var, RangeSet, Constraint, Connector, value +from pyomo.environ import ( + AbstractModel, + Param, + Block, + Set, + Var, + RangeSet, + Constraint, + Connector, + value, +) -class Test(unittest.TestCase): +class Test(unittest.TestCase): def test_nonindexed_block_immutable_param(self): model = AbstractModel() + def _b_rule(b): b.A = Param(initialize=2.0) + model.B = Block(rule=_b_rule) instance = model.create_instance() @@ -37,8 +50,10 @@ def _b_rule(b): def test_nonindexed_block_mutable_param(self): model = AbstractModel() + def _b_rule(b): b.A = Param(initialize=2.0, mutable=True) + model.B = Block(rule=_b_rule) instance = model.create_instance() @@ -50,74 +65,88 @@ def _b_rule(b): def test_indexed_block_immutable_param(self): model = AbstractModel() model.A = RangeSet(2) - def _b_rule(b,id): + + def _b_rule(b, id): b.A = Param(initialize=id) + model.B = Block(model.A, rule=_b_rule) instance = model.create_instance() - self.assertEqual(value(instance.B[1].A),1) - self.assertEqual(value(instance.B[2].A),2) + self.assertEqual(value(instance.B[1].A), 1) + self.assertEqual(value(instance.B[2].A), 2) def test_indexed_block_mutable_param(self): model = AbstractModel() model.A = RangeSet(2) - def _b_rule(b,id): + + def _b_rule(b, id): b.A = Param(initialize=id, mutable=True) + model.B = Block(model.A, rule=_b_rule) instance = model.create_instance() - self.assertEqual(value(instance.B[1].A),1) - self.assertEqual(value(instance.B[2].A),2) + self.assertEqual(value(instance.B[1].A), 1) + self.assertEqual(value(instance.B[2].A), 2) instance.B[1].A = 4.0 - self.assertEqual(value(instance.B[1].A),4.0) + self.assertEqual(value(instance.B[1].A), 4.0) def test_create_from_dict(self): model = AbstractModel() model.A = RangeSet(2) - def _b_rule(b,id): + + def _b_rule(b, id): b.S = Set() b.P = Param() b.Q = Param(b.S) + model.B = Block(model.A, rule=_b_rule) - instance = model.create_instance( {None:{'B': \ - {1:{'S':{None:['a','b','c']}, \ - 'P':{None:4}, \ - 'Q':{('a',):1,('b',):2,('c',):3}}, \ - 2:{'S':{None:[]}, \ - 'P':{None:3}} \ - } \ - }} ) - - self.assertEqual(set(instance.B[1].S),set(['a','b','c'])) - self.assertEqual(value(instance.B[1].P),4) - self.assertEqual(value(instance.B[1].Q['a']),1) - self.assertEqual(value(instance.B[1].Q['b']),2) - self.assertEqual(value(instance.B[1].Q['c']),3) - self.assertEqual(value(instance.B[2].P),3) + instance = model.create_instance( + { + None: { + 'B': { + 1: { + 'S': {None: ['a', 'b', 'c']}, + 'P': {None: 4}, + 'Q': {('a',): 1, ('b',): 2, ('c',): 3}, + }, + 2: {'S': {None: []}, 'P': {None: 3}}, + } + } + } + ) + + self.assertEqual(set(instance.B[1].S), set(['a', 'b', 'c'])) + self.assertEqual(value(instance.B[1].P), 4) + self.assertEqual(value(instance.B[1].Q['a']), 1) + self.assertEqual(value(instance.B[1].Q['b']), 2) + self.assertEqual(value(instance.B[1].Q['c']), 3) + self.assertEqual(value(instance.B[2].P), 3) def test_expand_connector(self): model = AbstractModel() model.A = Set() - def _b_rule(b,id): + + def _b_rule(b, id): b.X = Var() b.PORT = Connector() - b.PORT.add( b.X ) + b.PORT.add(b.X) + model.B = Block(model.A, rule=_b_rule) - def _c_rule(m,a): - return m.B[a].PORT == m.B[(a+1)%2].PORT - model.C = Constraint(model.A,rule=_c_rule) + def _c_rule(m, a): + return m.B[a].PORT == m.B[(a + 1) % 2].PORT - instance = model.create_instance( {None: {'A':{None:[0,1]}}} ) + model.C = Constraint(model.A, rule=_c_rule) - # FIXME: Not sure what to assert here, but at the moment this throws an error anyways. + instance = model.create_instance({None: {'A': {None: [0, 1]}}}) - def test_len(self): + # FIXME: Not sure what to assert here, but at the moment this throws an error anyways. + def test_len(self): model = AbstractModel() model.b = Block() @@ -128,7 +157,6 @@ def test_len(self): self.assertEqual(len(inst.b), 1) def test_none_key(self): - model = AbstractModel() model.b = Block() @@ -138,4 +166,3 @@ def test_none_key(self): if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/unit/test_bounds.py b/pyomo/core/tests/unit/test_bounds.py index 63aae2c6658..c2c6a69bdd2 100644 --- a/pyomo/core/tests/unit/test_bounds.py +++ b/pyomo/core/tests/unit/test_bounds.py @@ -14,15 +14,16 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.environ import AbstractModel, Param, Var, Constraint -class Test(unittest.TestCase): - #Test constraint bounds +class Test(unittest.TestCase): + # Test constraint bounds def test_constr_lower(self): model = AbstractModel() model.A = Param(default=2.0, mutable=True) @@ -31,11 +32,12 @@ def test_constr_lower(self): model.X = Var() def constr_rule(model): - return (model.A*(model.B+model.C),model.X) + return (model.A * (model.B + model.C), model.X) + model.constr = Constraint(rule=constr_rule) instance = model.create_instance() - self.assertEqual(instance.constr.lower(),8.0) + self.assertEqual(instance.constr.lower(), 8.0) def test_constr_upper(self): model = AbstractModel() @@ -45,12 +47,13 @@ def test_constr_upper(self): model.X = Var() def constr_rule(model): - return (model.X,model.A*(model.B+model.C)) + return (model.X, model.A * (model.B + model.C)) + model.constr = Constraint(rule=constr_rule) instance = model.create_instance() - self.assertEqual(instance.constr.upper(),8.0) + self.assertEqual(instance.constr.upper(), 8.0) def test_constr_both(self): model = AbstractModel() @@ -60,17 +63,21 @@ def test_constr_both(self): model.X = Var() def constr_rule(model): - return (model.A*(model.B-model.C),model.X,model.A*(model.B+model.C)) + return ( + model.A * (model.B - model.C), + model.X, + model.A * (model.B + model.C), + ) + model.constr = Constraint(rule=constr_rule) instance = model.create_instance() - self.assertEqual(instance.constr.lower(),-2.0) - self.assertEqual(instance.constr.upper(),8.0) - + self.assertEqual(instance.constr.lower(), -2.0) + self.assertEqual(instance.constr.upper(), 8.0) - #Test variable bounds - #JPW: Disabled until we are convinced that we want to support complex parametric expressions for variable bounds. + # Test variable bounds + # JPW: Disabled until we are convinced that we want to support complex parametric expressions for variable bounds. def test_var_bounds(self): model = AbstractModel() model.A = Param(default=2.0, mutable=True) @@ -78,15 +85,15 @@ def test_var_bounds(self): model.C = Param(default=2.5) def X_bounds_rule(model): - return (model.A*(model.B-model.C),model.A*(model.B+model.C)) + return (model.A * (model.B - model.C), model.A * (model.B + model.C)) + model.X = Var(bounds=X_bounds_rule) instance = model.create_instance() - self.assertEqual(instance.X.lb,-2.0) - self.assertEqual(instance.X.ub,8.0) - + self.assertEqual(instance.X.lb, -2.0) + self.assertEqual(instance.X.ub, 8.0) + if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/unit/test_check.py b/pyomo/core/tests/unit/test_check.py index eb0ad5afd5f..5b2d5408fd5 100644 --- a/pyomo/core/tests/unit/test_check.py +++ b/pyomo/core/tests/unit/test_check.py @@ -23,8 +23,8 @@ from pyomo.environ import AbstractModel, BuildCheck, Param, Set, value -class PyomoModel(unittest.TestCase): +class PyomoModel(unittest.TestCase): def setUp(self): self.model = AbstractModel() self.instance = None @@ -33,32 +33,34 @@ def tearDown(self): self.model = None self.instance = None - def construct(self,filename): + def construct(self, filename): self.instance = self.model.create_instance(filename) def action1a_fn(model): return value(model.A) == 3.3 + def action1b_fn(model): return value(model.A) != 3.3 + def action2a_fn(model, i): - ans=True + ans = True if i in model.A: - return (value(model.A[i]) == 1.3) + return value(model.A[i]) == 1.3 return True + def action2b_fn(model, i): if i in model.A: - ans = (value(model.A[i]) == 1.3) - #print "HERE",i,ans,not ans + ans = value(model.A[i]) == 1.3 + # print "HERE",i,ans,not ans return not ans return True class Scalar(PyomoModel): - def setUp(self): # # Create Model @@ -79,7 +81,7 @@ def test_true(self): self.model.action1 = BuildCheck(rule=action1a_fn) self.instance = self.model.create_instance() tmp = value(self.instance.A) - self.assertEqual( tmp, 3.3 ) + self.assertEqual(tmp, 3.3) def test_false(self): """Apply a build check that returns false""" @@ -92,7 +94,6 @@ def test_false(self): class Array1(PyomoModel): - def setUp(self): # # Create Model @@ -101,7 +102,7 @@ def setUp(self): # # Create model instance # - self.model.Z = Set(initialize=[1,3]) + self.model.Z = Set(initialize=[1, 3]) self.model.A = Param(self.model.Z, initialize=1.3) def tearDown(self): @@ -123,7 +124,6 @@ def test_false(self): class Array2(PyomoModel): - def setUp(self): # # Create Model @@ -132,7 +132,7 @@ def setUp(self): # # Create model instance # - self.model.Z = Set(initialize=[1,3]) + self.model.Z = Set(initialize=[1, 3]) self.model.A = Param(self.model.Z, initialize=1.3) def tearDown(self): @@ -154,7 +154,6 @@ def test_false(self): class TestMisc(unittest.TestCase): - def test_error1(self): model = AbstractModel() try: @@ -166,13 +165,15 @@ def test_error1(self): def test_io(self): model = AbstractModel() model.c1 = BuildCheck(rule=lambda M: True) - model.A = Set(initialize=[1,2,3]) - model.c2 = BuildCheck(model.A, rule=lambda M,i: True) + model.A = Set(initialize=[1, 2, 3]) + model.c2 = BuildCheck(model.A, rule=lambda M, i: True) instance = model.create_instance() # buf = StringIO() instance.pprint(ostream=buf) - self.assertEqual(buf.getvalue(),"""1 Set Declarations + self.assertEqual( + buf.getvalue(), + """1 Set Declarations A : Size=1, Index=None, Ordered=Insertion Key : Dimen : Domain : Size : Members None : 1 : Any : 3 : {1, 2, 3} @@ -182,8 +183,8 @@ def test_io(self): c2 : 3 Declarations: c1 A c2 -""") - +""", + ) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_compare.py b/pyomo/core/tests/unit/test_compare.py index b2292f80a5b..8b8538a8656 100644 --- a/pyomo/core/tests/unit/test_compare.py +++ b/pyomo/core/tests/unit/test_compare.py @@ -11,26 +11,55 @@ from pyomo.common import unittest import pyomo.environ as pe from pyomo.core.expr.numeric_expr import ( - LinearExpression, MonomialTermExpression, SumExpression, - ProductExpression, DivisionExpression, PowExpression, - NegationExpression, UnaryFunctionExpression, ExternalFunctionExpression, - Expr_ifExpression, AbsExpression + LinearExpression, + MonomialTermExpression, + SumExpression, + ProductExpression, + DivisionExpression, + PowExpression, + NegationExpression, + UnaryFunctionExpression, + ExternalFunctionExpression, + Expr_ifExpression, + AbsExpression, ) -from pyomo.core.expr.logical_expr import ( - InequalityExpression, EqualityExpression, RangedExpression +from pyomo.core.expr.relational_expr import ( + InequalityExpression, + EqualityExpression, + RangedExpression, ) from pyomo.core.expr.compare import ( - convert_expression_to_prefix_notation, compare_expressions + convert_expression_to_prefix_notation, + compare_expressions, + assertExpressionsEqual, + assertExpressionsStructurallyEqual, ) -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL class TestConvertToPrefixNotation(unittest.TestCase): def test_linear_expression(self): m = pe.ConcreteModel() m.x = pe.Var([1, 2, 3, 4]) - e = LinearExpression(constant=3, linear_coefs=list(m.x.keys()), linear_vars=list(m.x.values())) - expected = [(LinearExpression, 9), 3, 1, 2, 3, 4, m.x[1], m.x[2], m.x[3], m.x[4]] + e = LinearExpression( + constant=3, linear_coefs=list(m.x.keys()), linear_vars=list(m.x.values()) + ) + expected = [ + (LinearExpression, 5), + 3, + (MonomialTermExpression, 2), + 1, + m.x[1], + (MonomialTermExpression, 2), + 2, + m.x[2], + (MonomialTermExpression, 2), + 3, + m.x[3], + (MonomialTermExpression, 2), + 4, + m.x[4], + ] pn = convert_expression_to_prefix_notation(e) self.assertEqual(pn, expected) @@ -39,21 +68,23 @@ def test_multiple(self): m.x = pe.Var() m.y = pe.Var() - e = m.x**2 + m.x*m.y/3 + 4 - expected = [(SumExpression, 3), - (PowExpression, 2), - m.x, - 2, - (DivisionExpression, 2), - (ProductExpression, 2), - m.x, - m.y, - 3, - 4] + e = m.x**2 + m.x * m.y / 3 + 4 + expected = [ + (SumExpression, 3), + (PowExpression, 2), + m.x, + 2, + (DivisionExpression, 2), + (ProductExpression, 2), + m.x, + m.y, + 3, + 4, + ] pn = convert_expression_to_prefix_notation(e) self.assertEqual(pn, expected) - e2 = m.x**2 + m.x*m.y/3 + 4 - e3 = m.y**2 + m.x*m.y/3 + 4 + e2 = m.x**2 + m.x * m.y / 3 + 4 + e3 = m.y**2 + m.x * m.y / 3 + 4 self.assertTrue(compare_expressions(e, e2)) self.assertFalse(compare_expressions(e, e3)) @@ -74,14 +105,16 @@ def test_external_function(self): m.hypot = pe.ExternalFunction(library=DLL, function='gsl_hypot') m.x = pe.Var(initialize=0.5) m.y = pe.Var(initialize=1.5) - e = 2 * m.hypot(m.x, m.x*m.y) - expected = [(ProductExpression, 2), - 2, - (ExternalFunctionExpression, 2, m.hypot), - m.x, - (ProductExpression, 2), - m.x, - m.y] + e = 2 * m.hypot(m.x, m.x * m.y) + expected = [ + (ProductExpression, 2), + 2, + (ExternalFunctionExpression, 2, m.hypot), + m.x, + (ProductExpression, 2), + m.x, + m.y, + ] pn = convert_expression_to_prefix_notation(e) self.assertEqual(expected, pn) @@ -98,11 +131,9 @@ def test_float(self): def test_monomial(self): m = pe.ConcreteModel() m.x = pe.Var() - e = 2*m.x + e = 2 * m.x pn = convert_expression_to_prefix_notation(e) - expected = [(MonomialTermExpression, 2), - 2, - m.x] + expected = [(MonomialTermExpression, 2), 2, m.x] self.assertEqual(pn, expected) def test_negation(self): @@ -110,10 +141,7 @@ def test_negation(self): m.x = pe.Var() e = -m.x**2 pn = convert_expression_to_prefix_notation(e) - expected = [(NegationExpression, 1), - (PowExpression, 2), - m.x, - 2] + expected = [(NegationExpression, 1), (PowExpression, 2), m.x, 2] self.assertEqual(pn, expected) def test_abs(self): @@ -121,8 +149,7 @@ def test_abs(self): m.x = pe.Var() e = abs(m.x) pn = convert_expression_to_prefix_notation(e) - expected = [(AbsExpression, 1, 'abs'), - m.x] + expected = [(AbsExpression, 1, 'abs'), m.x] self.assertEqual(pn, expected) def test_expr_if(self): @@ -131,22 +158,30 @@ def test_expr_if(self): m.y = pe.Var() e = pe.Expr_if(m.x <= 0, m.y + m.x == 0, m.y - m.x == 0) pn = convert_expression_to_prefix_notation(e) - expected = [(Expr_ifExpression, 3), - (InequalityExpression, 2), - m.x, - 0, - (EqualityExpression, 2), - (SumExpression, 2), - m.y, - m.x, - 0, - (EqualityExpression, 2), - (SumExpression, 2), - m.y, - (MonomialTermExpression, 2), - -1, - m.x, - 0] + expected = [ + (Expr_ifExpression, 3), + (InequalityExpression, 2), + m.x, + 0, + (EqualityExpression, 2), + (LinearExpression, 2), + (MonomialTermExpression, 2), + 1, + m.y, + (MonomialTermExpression, 2), + 1, + m.x, + 0, + (EqualityExpression, 2), + (LinearExpression, 2), + (MonomialTermExpression, 2), + 1, + m.y, + (MonomialTermExpression, 2), + -1, + m.x, + 0, + ] self.assertEqual(pn, expected) def test_ranged_expression(self): @@ -154,8 +189,29 @@ def test_ranged_expression(self): m.x = pe.Var() e = pe.inequality(-1, m.x, 1) pn = convert_expression_to_prefix_notation(e) - expected = [(RangedExpression, 3), - -1, - m.x, - 1] + expected = [(RangedExpression, 3), -1, m.x, 1] self.assertEqual(pn, expected) + + def test_assertExpressionsEqual(self): + m = pe.ConcreteModel() + m.x = pe.Var() + m.e1 = pe.Expression(expr=m.x**2 + m.x - 1) + m.e2 = pe.Expression(expr=m.x**2 + m.x - 1) + m.f = pe.Expression(expr=m.x**2 + 2 * m.x - 1) + m.g = pe.Expression(expr=m.x**2 + m.x - 2) + + assertExpressionsEqual(self, m.e1.expr, m.e2.expr) + assertExpressionsStructurallyEqual(self, m.e1.expr, m.e2.expr) + with self.assertRaisesRegex(AssertionError, 'Expressions not equal:'): + assertExpressionsEqual(self, m.e1.expr, m.f.expr) + with self.assertRaisesRegex( + AssertionError, 'Expressions not structurally equal:' + ): + assertExpressionsStructurallyEqual(self, m.e1.expr, m.f.expr) + + # Structurally equal will compare across clones, whereas strict + # equality will not + i = m.clone() + with self.assertRaisesRegex(AssertionError, 'Expressions not equal:'): + assertExpressionsEqual(self, m.e1.expr, i.e1.expr) + assertExpressionsStructurallyEqual(self, m.e1.expr, i.e1.expr) diff --git a/pyomo/core/tests/unit/test_component.py b/pyomo/core/tests/unit/test_component.py index 1d4999a26fb..b4408fe8c54 100644 --- a/pyomo/core/tests/unit/test_component.py +++ b/pyomo/core/tests/unit/test_component.py @@ -16,74 +16,80 @@ from pyomo.common import DeveloperError from pyomo.environ import ( - ConcreteModel, Component, Block, Var, Set, ModelComponentFactory + ConcreteModel, + Component, + Block, + Var, + Set, + ModelComponentFactory, ) from pyomo.core.base.set import GlobalSets -class TestComponent(unittest.TestCase): +class TestComponent(unittest.TestCase): def test_construct_component_throws_exception(self): with self.assertRaisesRegex( - DeveloperError, - "Must specify a component type for class Component"): + DeveloperError, "Must specify a component type for class Component" + ): Component() def test_getname(self): m = ConcreteModel() - m.b = Block([1,2]) - m.b[2].c = Var([1,2],[3,4]) + m.b = Block([1, 2]) + m.b[2].c = Var([1, 2], [3, 4]) self.assertEqual(m.getname(fully_qualified=True), "unknown") self.assertEqual(m.b.getname(fully_qualified=True), "b") self.assertEqual(m.b[1].getname(fully_qualified=True), "b[1]") - self.assertEqual(m.b[2].c[2,4].getname(fully_qualified=True), - "b[2].c[2,4]") + self.assertEqual(m.b[2].c[2, 4].getname(fully_qualified=True), "b[2].c[2,4]") self.assertEqual(m.getname(fully_qualified=False), "unknown") self.assertEqual(m.b.getname(fully_qualified=False), "b") self.assertEqual(m.b[1].getname(fully_qualified=False), "b[1]") - self.assertEqual(m.b[2].c[2,4].getname(fully_qualified=False), - "c[2,4]") + self.assertEqual(m.b[2].c[2, 4].getname(fully_qualified=False), "c[2,4]") cache = {} self.assertEqual( - m.b[2].c[2,4].getname(fully_qualified=True, name_buffer=cache), - "b[2].c[2,4]") + m.b[2].c[2, 4].getname(fully_qualified=True, name_buffer=cache), + "b[2].c[2,4]", + ) self.assertEqual(len(cache), 8) - self.assertIn(id(m.b[2].c[2,4]), cache) - self.assertIn(id(m.b[2].c[1,3]), cache) + self.assertIn(id(m.b[2].c[2, 4]), cache) + self.assertIn(id(m.b[2].c[1, 3]), cache) self.assertIn(id(m.b[2].c), cache) self.assertIn(id(m.b[2]), cache) self.assertIn(id(m.b[1]), cache) self.assertIn(id(m.b), cache) self.assertNotIn(id(m), cache) self.assertEqual( - m.b[2].c[1,3].getname(fully_qualified=True, name_buffer=cache), - "b[2].c[1,3]") + m.b[2].c[1, 3].getname(fully_qualified=True, name_buffer=cache), + "b[2].c[1,3]", + ) m.b[2]._component = None - self.assertEqual(m.b[2].getname(fully_qualified=True), - "[Unattached _BlockData]") + self.assertEqual( + m.b[2].getname(fully_qualified=True), "[Unattached _BlockData]" + ) # I think that getname() should do this: - #self.assertEqual(m.b[2].c[2,4].getname(fully_qualified=True), + # self.assertEqual(m.b[2].c[2,4].getname(fully_qualified=True), # "[Unattached _BlockData].c[2,4]") # but it doesn't match current behavior. I will file a PEP to # propose changing the behavior later and proceed to test # current behavior. - self.assertEqual(m.b[2].c[2,4].getname(fully_qualified=True), - "c[2,4]") + self.assertEqual(m.b[2].c[2, 4].getname(fully_qualified=True), "c[2,4]") - self.assertEqual(m.b[2].getname(fully_qualified=False), - "[Unattached _BlockData]") - self.assertEqual(m.b[2].c[2,4].getname(fully_qualified=False), - "c[2,4]") + self.assertEqual( + m.b[2].getname(fully_qualified=False), "[Unattached _BlockData]" + ) + self.assertEqual(m.b[2].c[2, 4].getname(fully_qualified=False), "c[2,4]") # Cached names still work... self.assertEqual( - m.b[2].getname(fully_qualified=True, name_buffer=cache), - "b[2]") + m.b[2].getname(fully_qualified=True, name_buffer=cache), "b[2]" + ) self.assertEqual( - m.b[2].c[1,3].getname(fully_qualified=True, name_buffer=cache), - "b[2].c[1,3]") + m.b[2].c[1, 3].getname(fully_qualified=True, name_buffer=cache), + "b[2].c[1,3]", + ) def test_component_data_pprint(self): m = ConcreteModel() @@ -91,31 +97,33 @@ def test_component_data_pprint(self): m.x = Var(m.a) stream = StringIO() m.x[2].pprint(ostream=stream) - correct_s = '{Member of x} : Size=3, Index=a\n ' \ - 'Key : Lower : Value : Upper : Fixed : Stale : Domain\n ' \ - '2 : None : None : None : False : True : Reals\n' + correct_s = ( + '{Member of x} : Size=3, Index=a\n ' + 'Key : Lower : Value : Upper : Fixed : Stale : Domain\n ' + '2 : None : None : None : False : True : Reals\n' + ) self.assertEqual(correct_s, stream.getvalue()) def test_is_reference(self): m = ConcreteModel() + class _NotSpecified(object): pass + m.comp = Component(ctype=_NotSpecified) self.assertFalse(m.comp.is_reference()) -class TestEnviron(unittest.TestCase): +class TestEnviron(unittest.TestCase): def test_components(self): self.assertGreaterEqual( set(ModelComponentFactory), - set(['Set', 'Param', 'Var', 'Objective', 'Constraint']) + set(['Set', 'Param', 'Var', 'Objective', 'Constraint']), ) def test_sets(self): - self.assertGreaterEqual( - set(GlobalSets), - set(['Reals', 'Integers', 'Boolean']) - ) + self.assertGreaterEqual(set(GlobalSets), set(['Reals', 'Integers', 'Boolean'])) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_componentuid.py b/pyomo/core/tests/unit/test_componentuid.py index c518d0ffe65..1c9b3c444bf 100644 --- a/pyomo/core/tests/unit/test_componentuid.py +++ b/pyomo/core/tests/unit/test_componentuid.py @@ -18,7 +18,14 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - ConcreteModel, Block, Var, Set, Param, Constraint, Any, ComponentUID, + ConcreteModel, + Block, + Var, + Set, + Param, + Constraint, + Any, + ComponentUID, Reference, ) from pyomo.core.base.indexed_component import IndexedComponent @@ -27,175 +34,140 @@ _star = slice(None) -_Foo = namedtuple('_Foo', ['x','yy']) +_Foo = namedtuple('_Foo', ['x', 'yy']) -class TestComponentUID(unittest.TestCase): +class TestComponentUID(unittest.TestCase): def setUp(self): self.m = ConcreteModel() m = self.m m.a = Param() - m.s = Set(initialize=[1,'2',3]) + m.s = Set(initialize=[1, '2', 3]) m.b = Block(m.s, m.s) - m.b[1,1].c = Block() - m.b[1,'2'].c = Block() - m.b[1,'2'].c.a = Param(m.s, initialize=3, mutable=True) + m.b[1, 1].c = Block() + m.b[1, '2'].c = Block() + m.b[1, '2'].c.a = Param(m.s, initialize=3, mutable=True) def tearDown(self): self.m = None def test_genFromComponent_simple(self): cuid = ComponentUID(self.m.a) - self.assertEqual(cuid._cids, (('a',tuple()),)) + self.assertEqual(cuid._cids, (('a', tuple()),)) def test_genFromComponent_nested(self): - cuid = ComponentUID(self.m.b[1,'2'].c.a[3]) - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',(3,))) ) + cuid = ComponentUID(self.m.b[1, '2'].c.a[3]) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', (3,)))) def test_genFromComponent_indexed(self): - cuid = ComponentUID(self.m.b[1,'2'].c.a) - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',())) ) + cuid = ComponentUID(self.m.b[1, '2'].c.a) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', ()))) def test_genFromComponent_nameBuffer(self): buf = {} - cuid = ComponentUID(self.m.b[1,'2'].c.a, cuid_buffer=buf) - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',())) ) + cuid = ComponentUID(self.m.b[1, '2'].c.a, cuid_buffer=buf) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', ()))) self.assertEqual(len(buf), 9) for s1 in self.m.s: for s2 in self.m.s: - _id = id(self.m.b[s1,s2]) + _id = id(self.m.b[s1, s2]) self.assertIn(_id, buf) - self.assertEqual(buf[_id], ('b',(s1,s2))) + self.assertEqual(buf[_id], ('b', (s1, s2))) def test_genFromComponent_context(self): - cuid = ComponentUID(self.m.b[1,'2'].c.a, context=self.m.b[1,'2']) - self.assertEqual( - cuid._cids, - (('c',tuple()), ('a',())) ) + cuid = ComponentUID(self.m.b[1, '2'].c.a, context=self.m.b[1, '2']) + self.assertEqual(cuid._cids, (('c', tuple()), ('a', ()))) with self.assertRaisesRegex( - ValueError, - r"Context 'b\[1,'2'\]' does not apply to component 's'"): - ComponentUID(self.m.s, context=self.m.b[1,'2']) + ValueError, r"Context 'b\[1,'2'\]' does not apply to component 's'" + ): + ComponentUID(self.m.s, context=self.m.b[1, '2']) with self.assertRaisesRegex( - ValueError, - "Context is not allowed when initializing a ComponentUID " - "object from a string type"): - ComponentUID("b[1,2].c.a[2]", context=self.m.b[1,'2']) + ValueError, + "Context is not allowed when initializing a ComponentUID " + "object from a string type", + ): + ComponentUID("b[1,2].c.a[2]", context=self.m.b[1, '2']) def test_parseFromString(self): cuid = ComponentUID('b[1,2].c.a[2]') - self.assertEqual( - cuid._cids, - (('b',(1,2)), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, 2)), ('c', tuple()), ('a', (2,)))) def test_parseFromString_singleQuote(self): cuid = ComponentUID('b[1,\'2\'].c.a[2]') - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', (2,)))) def test_parseFromString_doubleQuote(self): cuid = ComponentUID('b[1,\"2\"].c.a[2]') - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', (2,)))) def test_parseFromString_typeID(self): cuid = ComponentUID('b[#1,$2].c.a[2]') - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', (2,)))) def test_parseFromString_wildcard_1(self): cuid = ComponentUID('b[**].c.a[*]') self.assertEqual( - cuid._cids, - (('b',(Ellipsis,)), ('c',tuple()), ('a',(_star,))) ) + cuid._cids, (('b', (Ellipsis,)), ('c', tuple()), ('a', (_star,))) + ) def test_parseFromString_wildcard_2(self): cuid = ComponentUID('b[*,*].c.a[*]') self.assertEqual( - cuid._cids, - (('b',(_star, _star)), ('c',tuple()), ('a',(_star,))) ) + cuid._cids, (('b', (_star, _star)), ('c', tuple()), ('a', (_star,))) + ) def test_parseFromString_spaces(self): cuid = ComponentUID('x[a b,c d]') - self.assertEqual( - cuid._cids, - (('x',('a b', 'c d')), )) + self.assertEqual(cuid._cids, (('x', ('a b', 'c d')),)) cuid = ComponentUID("x['a b',\"c d\"]") - self.assertEqual( - cuid._cids, - (('x',('a b', 'c d')), )) + self.assertEqual(cuid._cids, (('x', ('a b', 'c d')),)) cuid = ComponentUID('x[a b, c d]') - self.assertEqual( - cuid._cids, - (('x',('a b', 'c d')), )) + self.assertEqual(cuid._cids, (('x', ('a b', 'c d')),)) cuid = ComponentUID("x[ a b , 'c d' ]") - self.assertEqual( - cuid._cids, - (('x',('a b', 'c d')), )) + self.assertEqual(cuid._cids, (('x', ('a b', 'c d')),)) def test_parseFromRepr1(self): cuid = ComponentUID('b:1,2.c.a:2') - self.assertEqual( - cuid._cids, - (('b',(1,2)), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, 2)), ('c', tuple()), ('a', (2,)))) def test_parseFromRepr1_singleQuote(self): cuid = ComponentUID('b:1,\'2\'.c.a:2') - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', (2,)))) def test_parseFromRepr1_doubleQuote(self): cuid = ComponentUID('b:1,\"2\".c.a:2') - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', (2,)))) def test_parseFromRepr1_typeID(self): cuid = ComponentUID('b:#1,$2.c.a:2') - self.assertEqual( - cuid._cids, - (('b',(1,'2')), ('c',tuple()), ('a',(2,))) ) + self.assertEqual(cuid._cids, (('b', (1, '2')), ('c', tuple()), ('a', (2,)))) def test_parseFromRepr1_wildcard_1(self): cuid = ComponentUID('b:**.c.a:*') self.assertEqual( - cuid._cids, - (('b',(Ellipsis,)), ('c',tuple()), ('a',(_star,))) ) + cuid._cids, (('b', (Ellipsis,)), ('c', tuple()), ('a', (_star,))) + ) def test_parseFromRepr1_wildcard_2(self): cuid = ComponentUID('b:*,*.c.a:*') self.assertEqual( - cuid._cids, - (('b',(_star, _star)), ('c',tuple()), ('a',(_star,))) ) + cuid._cids, (('b', (_star, _star)), ('c', tuple()), ('a', (_star,))) + ) def test_parseFromRepr2_lexError(self): - cuid = ComponentUID('') # Bogus instance to access parser - with self.assertRaisesRegex( - IOError, "ERROR: Token ':' Line 1 Column 1"): + cuid = ComponentUID('') # Bogus instance to access parser + with self.assertRaisesRegex(IOError, "ERROR: Token ':' Line 1 Column 1"): list(cuid._parse_cuid_v2(':')) - with self.assertRaisesRegex( - IOError, "ERROR: Token '\n].b:' Line 1 Column 3"): + with self.assertRaisesRegex(IOError, "ERROR: Token '\n].b:' Line 1 Column 3"): list(cuid._parse_cuid_v2('a[\n].b:')) def test_escapeChars(self): ref = r"b['a\n.b\\'].x" cuid = ComponentUID(ref) - self.assertEqual( - cuid._cids, - (('b',('a\n.b\\',)), ('x',tuple())) ) + self.assertEqual(cuid._cids, (('b', ('a\n.b\\',)), ('x', tuple()))) m = ConcreteModel() m.b = Block(['a\n.b\\']) @@ -213,18 +185,14 @@ def test_nonIntNumber(self): ref = r"b[inf].x" cuid = ComponentUID(x) - self.assertEqual( - cuid._cids, - (('b',(inf,)), ('x',tuple())) ) + self.assertEqual(cuid._cids, (('b', (inf,)), ('x', tuple()))) self.assertTrue(cuid.matches(x)) self.assertEqual(repr(ComponentUID(x)), ref) self.assertEqual(str(ComponentUID(x)), ref) cuid = ComponentUID(ref) - self.assertEqual( - cuid._cids, - (('b',(inf,)), ('x',tuple())) ) + self.assertEqual(cuid._cids, (('b', (inf,)), ('x', tuple()))) self.assertTrue(cuid.matches(x)) self.assertEqual(repr(ComponentUID(x)), ref) @@ -232,9 +200,7 @@ def test_nonIntNumber(self): ref = r"b:#inf.x" cuid = ComponentUID(ref) - self.assertEqual( - cuid._cids, - (('b',(inf,)), ('x',tuple())) ) + self.assertEqual(cuid._cids, (('b', (inf,)), ('x', tuple()))) self.assertTrue(cuid.matches(x)) self.assertEqual(ComponentUID(x).get_repr(1), ref) @@ -246,18 +212,14 @@ def test_nonIntNumber(self): # cuid = ComponentUID(x) - self.assertEqual( - cuid._cids, - (('b',('inf',)), ('x',tuple())) ) + self.assertEqual(cuid._cids, (('b', ('inf',)), ('x', tuple()))) self.assertTrue(cuid.matches(x)) self.assertEqual(repr(ComponentUID(x)), ref) self.assertEqual(str(ComponentUID(x)), ref) cuid = ComponentUID(ref) - self.assertEqual( - cuid._cids, - (('b',('inf',)), ('x',tuple())) ) + self.assertEqual(cuid._cids, (('b', ('inf',)), ('x', tuple()))) self.assertTrue(cuid.matches(x)) self.assertEqual(repr(ComponentUID(x)), ref) @@ -265,31 +227,29 @@ def test_nonIntNumber(self): ref = r"b:$inf.x" cuid = ComponentUID(ref) - self.assertEqual( - cuid._cids, - (('b',('inf',)), ('x',tuple())) ) + self.assertEqual(cuid._cids, (('b', ('inf',)), ('x', tuple()))) self.assertTrue(cuid.matches(x)) self.assertEqual(ComponentUID(x).get_repr(1), ref) self.assertEqual(str(ComponentUID(x)), r"b['inf'].x") - def test_find_component_deprecated(self): - ref = self.m.b[1,'2'].c.a[3] + ref = self.m.b[1, '2'].c.a[3] cuid = ComponentUID(ref) DEP_OUT = StringIO() with LoggingIntercept(DEP_OUT, 'pyomo.core'): self.assertTrue(cuid.find_component(self.m) is ref) - self.assertIn('ComponentUID.find_component() is deprecated.', - DEP_OUT.getvalue()) + self.assertIn( + 'ComponentUID.find_component() is deprecated.', DEP_OUT.getvalue() + ) def test_find_explicit_exists(self): - ref = self.m.b[1,'2'].c.a[3] + ref = self.m.b[1, '2'].c.a[3] cuid = ComponentUID(ref) self.assertTrue(cuid.find_component_on(self.m) is ref) def test_find_component_exists_1(self): - ref = self.m.b[1,'2'].c.a + ref = self.m.b[1, '2'].c.a cuid = ComponentUID(ref) self.assertTrue(cuid.find_component_on(self.m) is ref) @@ -299,7 +259,7 @@ def test_find_wildcard(self): self.assertIs(comp.ctype, Param) cList = list(comp.values()) self.assertEqual(len(cList), 3) - self.assertEqual(cList, list(self.m.b[1,'2'].c.a[:])) + self.assertEqual(cList, list(self.m.b[1, '2'].c.a[:])) cuid = ComponentUID('b[*,*]') comp = cuid.find_component_on(self.m) @@ -315,7 +275,7 @@ def test_find_wildcard_partial_exists(self): self.assertIs(comp.ctype, Param) cList = list(comp.values()) self.assertEqual(len(cList), 3) - self.assertEqual(cList, list(self.m.b[1,'2'].c.a[:])) + self.assertEqual(cList, list(self.m.b[1, '2'].c.a[:])) # improper Reference: to IndexedComponent cuid = ComponentUID('b[*,*].c.a') @@ -323,7 +283,7 @@ def test_find_wildcard_partial_exists(self): self.assertIs(comp.ctype, IndexedComponent) cList = list(comp.values()) self.assertEqual(len(cList), 1) - self.assertIs(cList[0], self.m.b[1,'2'].c.a) + self.assertIs(cList[0], self.m.b[1, '2'].c.a) def test_find_wildcard_not_exists(self): cuid = ComponentUID('b[*,*].c.x') @@ -351,7 +311,7 @@ def test_find_explicit_notExists_2(self): self.assertTrue(cuid.find_component_on(self.m) is None) def test_printers_1(self): - cuid = ComponentUID(self.m.b[1,'2'].c.a[3]) + cuid = ComponentUID(self.m.b[1, '2'].c.a[3]) s = "b[1,'2'].c.a[3]" r1 = "b:#1,$2.c.a:#3" r2 = "b[1,'2'].c.a[3]" @@ -360,7 +320,8 @@ def test_printers_1(self): self.assertEqual(cuid.get_repr(1), r1) self.assertEqual(cuid.get_repr(2), r2) with self.assertRaisesRegex( - ValueError, "Invalid repr version '3'; expected 1 or 2"): + ValueError, "Invalid repr version '3'; expected 1 or 2" + ): cuid.get_repr(3) def test_printers_2(self): @@ -394,9 +355,9 @@ def test_printers_4(self): self.assertEqual(cuid.get_repr(2), r2) def test_matches_explicit(self): - cuid = ComponentUID(self.m.b[1,'2'].c.a[3]) - self.assertTrue(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a['2'])) + cuid = ComponentUID(self.m.b[1, '2'].c.a[3]) + self.assertTrue(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a['2'])) # def test_matches_implicit(self): # cuid = ComponentUID('b:1,2.c.a:3') @@ -405,114 +366,120 @@ def test_matches_explicit(self): def test_matches_explicit_1(self): cuid = ComponentUID('b:#1,$2.c.a:$3') - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a['2'])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a['2'])) def test_matches_explicit_2(self): cuid = ComponentUID('b:#1,#2.c.a:#3') - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a['2'])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a['2'])) def test_matches_wildcard_1(self): cuid = ComponentUID('b:**.c.a:*') - self.assertTrue(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertTrue(cuid.matches(self.m.b[1,'2'].c.a['2'])) + self.assertTrue(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertTrue(cuid.matches(self.m.b[1, '2'].c.a['2'])) def test_matches_wildcard_2(self): cuid = ComponentUID('b:*,*.c.a:**') - self.assertTrue(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertTrue(cuid.matches(self.m.b[1,'2'].c.a['2'])) + self.assertTrue(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertTrue(cuid.matches(self.m.b[1, '2'].c.a['2'])) def test_matches_wildcard_3(self): cuid = ComponentUID('b:*,*.c.a:*,*') - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a['2'])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a['2'])) def test_matches_mismatch_name(self): cuid = ComponentUID('b:*,*.d') - self.assertFalse(cuid.matches(self.m.b[1,'2'].c)) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c)) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c)) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c)) def test_matches_mismatch_1(self): cuid = ComponentUID('b:*,*.c.a:*') - self.assertFalse(cuid.matches(self.m.b[1,'2'].c)) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c)) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c)) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c)) def test_matches_mismatch_2(self): cuid = ComponentUID('b:*,*.c') - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a['2'])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a['2'])) def test_matches_mismatch_3(self): cuid = ComponentUID('b:*,*,*.c.a:*') - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a[3])) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c.a['2'])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a[3])) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c.a['2'])) def test_matches_ellipsis1(self): cuid = ComponentUID('b[**,1].c') - self.assertTrue(cuid.matches(self.m.b[1,1].c)) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c)) + self.assertTrue(cuid.matches(self.m.b[1, 1].c)) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c)) def test_matches_ellipsis2(self): cuid = ComponentUID('b[**,1,1].c') - self.assertTrue(cuid.matches(self.m.b[1,1].c)) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c)) + self.assertTrue(cuid.matches(self.m.b[1, 1].c)) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c)) def test_matches_ellipsis3(self): cuid = ComponentUID('b[**,1,1,3].c') - self.assertFalse(cuid.matches(self.m.b[1,1].c)) - self.assertFalse(cuid.matches(self.m.b[1,'2'].c)) + self.assertFalse(cuid.matches(self.m.b[1, 1].c)) + self.assertFalse(cuid.matches(self.m.b[1, '2'].c)) def test_matches_ellipsis4(self): cuid = ComponentUID('b[**,1,*].c') - self.assertTrue(cuid.matches(self.m.b[1,1].c)) - self.assertTrue(cuid.matches(self.m.b[1,'2'].c)) + self.assertTrue(cuid.matches(self.m.b[1, 1].c)) + self.assertTrue(cuid.matches(self.m.b[1, '2'].c)) - def test_list_components_dne_1(self): + def test_list_components_done_1(self): cuid = ComponentUID('b:*,*,*.c.a:*') ref = [] - cList = [ str(ComponentUID(x)) for x in cuid.list_components(self.m) ] + cList = [str(ComponentUID(x)) for x in cuid.list_components(self.m)] self.assertEqual(sorted(cList), sorted(ref)) - def test_list_components_dne_2(self): + def test_list_components_done_2(self): cuid = ComponentUID('b:*,*.c:#1.a:*') ref = [] - cList = [ str(ComponentUID(x)) for x in cuid.list_components(self.m) ] + cList = [str(ComponentUID(x)) for x in cuid.list_components(self.m)] self.assertEqual(sorted(cList), sorted(ref)) def test_list_components_scalar(self): cuid = ComponentUID('b:1,$2.c.a:3') - ref = [ str(ComponentUID(self.m.b[1,'2'].c.a[3])) ] - cList = [ str(ComponentUID(x)) for x in cuid.list_components(self.m) ] + ref = [str(ComponentUID(self.m.b[1, '2'].c.a[3]))] + cList = [str(ComponentUID(x)) for x in cuid.list_components(self.m)] self.assertEqual(sorted(cList), sorted(ref)) def test_list_components_wildcard_1(self): cuid = ComponentUID('b:**.c.a:3') - ref = [ str(ComponentUID(self.m.b[1,'2'].c.a[3])) ] - cList = [ str(ComponentUID(x)) for x in cuid.list_components(self.m) ] + ref = [str(ComponentUID(self.m.b[1, '2'].c.a[3]))] + cList = [str(ComponentUID(x)) for x in cuid.list_components(self.m)] self.assertEqual(sorted(cList), sorted(ref)) def test_list_components_wildcard_2(self): cuid = ComponentUID('b:*,*.c.a:*') - ref = [ str(ComponentUID(self.m.b[1,'2'].c.a[1])), - str(ComponentUID(self.m.b[1,'2'].c.a['2'])), - str(ComponentUID(self.m.b[1,'2'].c.a[3])) ] - cList = [ str(ComponentUID(x)) for x in cuid.list_components(self.m) ] + ref = [ + str(ComponentUID(self.m.b[1, '2'].c.a[1])), + str(ComponentUID(self.m.b[1, '2'].c.a['2'])), + str(ComponentUID(self.m.b[1, '2'].c.a[3])), + ] + cList = [str(ComponentUID(x)) for x in cuid.list_components(self.m)] self.assertEqual(sorted(cList), sorted(ref)) def test_list_components_wildcard_3(self): cuid = ComponentUID('b:1,*.c') - ref = [ str(ComponentUID(self.m.b[1,1].c)), - str(ComponentUID(self.m.b[1,'2'].c)) ] - cList = [ str(ComponentUID(x)) for x in cuid.list_components(self.m) ] + ref = [ + str(ComponentUID(self.m.b[1, 1].c)), + str(ComponentUID(self.m.b[1, '2'].c)), + ] + cList = [str(ComponentUID(x)) for x in cuid.list_components(self.m)] self.assertEqual(sorted(cList), sorted(ref)) def test_list_components_wildcard_4(self): cuid = ComponentUID('b:1,*') - ref = [ str(ComponentUID(self.m.b[1,1])), - str(ComponentUID(self.m.b[1,'2'])), - str(ComponentUID(self.m.b[1,3])) ] - cList = [ str(ComponentUID(x)) for x in cuid.list_components(self.m) ] + ref = [ + str(ComponentUID(self.m.b[1, 1])), + str(ComponentUID(self.m.b[1, '2'])), + str(ComponentUID(self.m.b[1, 3])), + ] + cList = [str(ComponentUID(x)) for x in cuid.list_components(self.m)] self.assertEqual(sorted(cList), sorted(ref)) def test_in_container(self): @@ -521,59 +488,59 @@ def test_in_container(self): c = ComponentUID('baz.bar') D = {a: 1, b: 2} - self.assertTrue( a in D ) - self.assertTrue( b in D ) - self.assertFalse( c in D ) + self.assertTrue(a in D) + self.assertTrue(b in D) + self.assertFalse(c in D) # Verify that hashing is not being done by id() - self.assertTrue( ComponentUID('foo.bar[*]') in D ) - self.assertTrue( ComponentUID('baz') in D ) + self.assertTrue(ComponentUID('foo.bar[*]') in D) + self.assertTrue(ComponentUID('baz') in D) def test_comparisons(self): a = ComponentUID('foo.x[*]') b = ComponentUID('baz') - self.assertFalse( a < b ) - self.assertFalse( a <= b ) - self.assertTrue ( a > b ) - self.assertTrue ( a >= b ) - self.assertFalse( a == b ) - self.assertTrue ( a != b ) - - self.assertTrue ( b < a ) - self.assertTrue ( b <= a ) - self.assertFalse( b > a ) - self.assertFalse( b >= a ) - self.assertFalse( b == a ) - self.assertTrue ( b != a ) - - self.assertFalse( a < ComponentUID('baz') ) - self.assertFalse( a <= ComponentUID('baz') ) - self.assertTrue ( a > ComponentUID('baz') ) - self.assertTrue ( a >= ComponentUID('baz') ) - self.assertFalse( a == ComponentUID('baz') ) - self.assertTrue ( a != ComponentUID('baz') ) - - self.assertTrue ( ComponentUID('baz') < a ) - self.assertTrue ( ComponentUID('baz') <= a ) - self.assertFalse( ComponentUID('baz') > a ) - self.assertFalse( ComponentUID('baz') >= a ) - self.assertFalse( ComponentUID('baz') == a ) - self.assertTrue ( ComponentUID('baz') != a ) - - self.assertFalse( b < b ) - self.assertTrue ( b <= b ) - self.assertFalse( b > b ) - self.assertTrue ( b >= b ) - self.assertTrue ( b == b ) - self.assertFalse( b != b ) - - self.assertFalse( ComponentUID('baz') < b ) - self.assertTrue ( ComponentUID('baz') <= b ) - self.assertFalse( ComponentUID('baz') > b ) - self.assertTrue ( ComponentUID('baz') >= b ) - self.assertTrue ( ComponentUID('baz') == b ) - self.assertFalse( ComponentUID('baz') != b ) + self.assertFalse(a < b) + self.assertFalse(a <= b) + self.assertTrue(a > b) + self.assertTrue(a >= b) + self.assertFalse(a == b) + self.assertTrue(a != b) + + self.assertTrue(b < a) + self.assertTrue(b <= a) + self.assertFalse(b > a) + self.assertFalse(b >= a) + self.assertFalse(b == a) + self.assertTrue(b != a) + + self.assertFalse(a < ComponentUID('baz')) + self.assertFalse(a <= ComponentUID('baz')) + self.assertTrue(a > ComponentUID('baz')) + self.assertTrue(a >= ComponentUID('baz')) + self.assertFalse(a == ComponentUID('baz')) + self.assertTrue(a != ComponentUID('baz')) + + self.assertTrue(ComponentUID('baz') < a) + self.assertTrue(ComponentUID('baz') <= a) + self.assertFalse(ComponentUID('baz') > a) + self.assertFalse(ComponentUID('baz') >= a) + self.assertFalse(ComponentUID('baz') == a) + self.assertTrue(ComponentUID('baz') != a) + + self.assertFalse(b < b) + self.assertTrue(b <= b) + self.assertFalse(b > b) + self.assertTrue(b >= b) + self.assertTrue(b == b) + self.assertFalse(b != b) + + self.assertFalse(ComponentUID('baz') < b) + self.assertTrue(ComponentUID('baz') <= b) + self.assertFalse(ComponentUID('baz') > b) + self.assertTrue(ComponentUID('baz') >= b) + self.assertTrue(ComponentUID('baz') == b) + self.assertFalse(ComponentUID('baz') != b) def test_comparisons_lt(self): a = ComponentUID('foo.x[*]') @@ -582,29 +549,29 @@ def test_comparisons_lt(self): aa = ComponentUID("foo.x['a']") a11 = ComponentUID('foo.x[1,1]') ae = ComponentUID('foo.x[**]') - self.assertTrue( a < ae ) - self.assertTrue( a1 < ae ) - self.assertTrue( a1 < a ) - self.assertTrue( a1 < a2 ) - self.assertTrue( a1 < aa ) - self.assertTrue( a1 < a11 ) - self.assertTrue( a11 < a2 ) - self.assertFalse( ae < a ) - self.assertFalse( ae < a1 ) - self.assertFalse( a < a1 ) - self.assertFalse( a2 < a1 ) - self.assertFalse( aa < a1 ) - self.assertFalse( a11 < a1 ) - self.assertFalse( a2 < a11 ) + self.assertTrue(a < ae) + self.assertTrue(a1 < ae) + self.assertTrue(a1 < a) + self.assertTrue(a1 < a2) + self.assertTrue(a1 < aa) + self.assertTrue(a1 < a11) + self.assertTrue(a11 < a2) + self.assertFalse(ae < a) + self.assertFalse(ae < a1) + self.assertFalse(a < a1) + self.assertFalse(a2 < a1) + self.assertFalse(aa < a1) + self.assertFalse(a11 < a1) + self.assertFalse(a2 < a11) x = ComponentUID('foo.x') xy = ComponentUID('foo.x.y') - self.assertTrue( x < xy ) - self.assertFalse( xy < x ) + self.assertTrue(x < xy) + self.assertFalse(xy < x) with self.assertRaisesRegex( - TypeError, "'<' not supported between instances of " - "'ComponentUID' and 'int'"): + TypeError, "'<' not supported between instances of 'ComponentUID' and 'int'" + ): a < 5 def test_comparisons_eq(self): @@ -615,17 +582,16 @@ def test_comparisons_eq(self): self.assertNotEqual(a, a1) self.assertNotEqual(a, 5) - def test_generate_cuid_string_map(self): model = Block(concrete=True) model.x = Var() - model.y = Var([1,2]) - model.V = Var([('a','b'),(1,'2'),(3,4)]) + model.y = Var([1, 2]) + model.V = Var([('a', 'b'), (1, '2'), (3, 4)]) model.b = Block(concrete=True) - model.b.z = Var([1,'2']) - setattr(model.b, '.H', Var(['a',2])) - model.B = Block(['a',2], concrete=True) - setattr(model.B['a'],'.k', Var()) + model.b.z = Var([1, '2']) + setattr(model.b, '.H', Var(['a', 2])) + model.B = Block(['a', 2], concrete=True) + setattr(model.B['a'], '.k', Var()) model.B[2].b = Block() model.B[2].b.x = Var() model.add_component('c tuple', Constraint(Any)) @@ -637,131 +603,139 @@ def test_generate_cuid_string_map(self): ) self.assertEqual(len(cuids[0]), 29) self.assertEqual(len(cuids[1]), 29) - for obj in [model, - model.x, - model.y, - model.y_index, - model.y[1], - model.y[2], - model.V, - model.V_index, - model.V['a','b'], - model.V[1,'2'], - model.V[3,4], - model.b, - model.b.z, - model.b.z_index, - model.b.z[1], - model.b.z['2'], - getattr(model.b, '.H'), - getattr(model.b, '.H_index'), - getattr(model.b, '.H')['a'], - getattr(model.b, '.H')[2], - model.B, - model.B_index, - model.B['a'], - getattr(model.B['a'],'.k'), - model.B[2], - model.B[2].b, - model.B[2].b.x, - model.component('c tuple')[(1,)]]: + for obj in [ + model, + model.x, + model.y, + model.y_index, + model.y[1], + model.y[2], + model.V, + model.V_index, + model.V['a', 'b'], + model.V[1, '2'], + model.V[3, 4], + model.b, + model.b.z, + model.b.z_index, + model.b.z[1], + model.b.z['2'], + getattr(model.b, '.H'), + getattr(model.b, '.H_index'), + getattr(model.b, '.H')['a'], + getattr(model.b, '.H')[2], + model.B, + model.B_index, + model.B['a'], + getattr(model.B['a'], '.k'), + model.B[2], + model.B[2].b, + model.B[2].b.x, + model.component('c tuple')[(1,)], + ]: self.assertEqual(ComponentUID(obj).get_repr(1), cuids[0][obj]) self.assertEqual(repr(ComponentUID(obj)), cuids[1][obj]) cuids = ( - ComponentUID.generate_cuid_string_map(model, descend_into=False, - repr_version=1), + ComponentUID.generate_cuid_string_map( + model, descend_into=False, repr_version=1 + ), ComponentUID.generate_cuid_string_map(model, descend_into=False), ) self.assertEqual(len(cuids[0]), 18) self.assertEqual(len(cuids[1]), 18) - for obj in [model, - model.x, - model.y, - model.y_index, - model.y[1], - model.y[2], - model.V, - model.V_index, - model.V['a','b'], - model.V[1,'2'], - model.V[3,4], - model.b, - model.B, - model.B_index, - model.B['a'], - model.B[2], - model.component('c tuple')[(1,)]]: + for obj in [ + model, + model.x, + model.y, + model.y_index, + model.y[1], + model.y[2], + model.V, + model.V_index, + model.V['a', 'b'], + model.V[1, '2'], + model.V[3, 4], + model.b, + model.B, + model.B_index, + model.B['a'], + model.B[2], + model.component('c tuple')[(1,)], + ]: self.assertEqual(ComponentUID(obj).get_repr(1), cuids[0][obj]) self.assertEqual(repr(ComponentUID(obj)), cuids[1][obj]) cuids = ( - ComponentUID.generate_cuid_string_map(model, ctype=Var, - repr_version=1), + ComponentUID.generate_cuid_string_map(model, ctype=Var, repr_version=1), ComponentUID.generate_cuid_string_map(model, ctype=Var), ) self.assertEqual(len(cuids[0]), 22) self.assertEqual(len(cuids[1]), 22) - for obj in [model, - model.x, - model.y, - model.y[1], - model.y[2], - model.V, - model.V['a','b'], - model.V[1,'2'], - model.V[3,4], - model.b, - model.b.z, - model.b.z[1], - model.b.z['2'], - getattr(model.b, '.H'), - getattr(model.b, '.H')['a'], - getattr(model.b, '.H')[2], - model.B, - model.B['a'], - getattr(model.B['a'],'.k'), - model.B[2], - model.B[2].b, - model.B[2].b.x]: + for obj in [ + model, + model.x, + model.y, + model.y[1], + model.y[2], + model.V, + model.V['a', 'b'], + model.V[1, '2'], + model.V[3, 4], + model.b, + model.b.z, + model.b.z[1], + model.b.z['2'], + getattr(model.b, '.H'), + getattr(model.b, '.H')['a'], + getattr(model.b, '.H')[2], + model.B, + model.B['a'], + getattr(model.B['a'], '.k'), + model.B[2], + model.B[2].b, + model.B[2].b.x, + ]: self.assertEqual(ComponentUID(obj).get_repr(1), cuids[0][obj]) self.assertEqual(repr(ComponentUID(obj)), cuids[1][obj]) cuids = ( ComponentUID.generate_cuid_string_map( - model, ctype=Var, descend_into=False, repr_version=1), - ComponentUID.generate_cuid_string_map( - model, ctype=Var, descend_into=False), + model, ctype=Var, descend_into=False, repr_version=1 + ), + ComponentUID.generate_cuid_string_map(model, ctype=Var, descend_into=False), ) self.assertEqual(len(cuids[0]), 9) self.assertEqual(len(cuids[1]), 9) - for obj in [model, - model.x, - model.y, - model.y[1], - model.y[2], - model.V, - model.V['a','b'], - model.V[1,'2'], - model.V[3,4]]: + for obj in [ + model, + model.x, + model.y, + model.y[1], + model.y[2], + model.V, + model.V['a', 'b'], + model.V[1, '2'], + model.V[3, 4], + ]: self.assertEqual(ComponentUID(obj).get_repr(1), cuids[0][obj]) self.assertEqual(repr(ComponentUID(obj)), cuids[1][obj]) def test_pickle(self): a = ComponentUID("b[1,'2'].c") b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) a = ComponentUID("b[1,*].c") b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) a = ComponentUID("b[**,*].c") b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) def test_findComponentOn_nestedTuples(self): # Tests for #1069 @@ -789,7 +763,7 @@ def test_findComponentOn_nestedTuples(self): m = ConcreteModel() m.x = Var() - m.c = Constraint([0,1]) + m.c = Constraint([0, 1]) m.c[0] = m.x >= 0 m.c[(1,)] = m.x >= 1 self.assertIs(ComponentUID(m.c[0]).find_component_on(m), m.c[0]) @@ -819,20 +793,16 @@ def test_findComponentOn_nestedTuples(self): buf = {} ref = m.b[0].c[0].x - self.assertIs( - ComponentUID(ref, cuid_buffer=buf).find_component_on(m), ref) + self.assertIs(ComponentUID(ref, cuid_buffer=buf).find_component_on(m), ref) self.assertEqual(len(buf), 3) ref = 'm.b[0].c[(0,)].x' - self.assertIsNone( - ComponentUID(ref, cuid_buffer=buf).find_component_on(m)) + self.assertIsNone(ComponentUID(ref, cuid_buffer=buf).find_component_on(m)) self.assertEqual(len(buf), 3) ref = m.b[(1,)].c[(1,)].x - self.assertIs( - ComponentUID(ref, cuid_buffer=buf).find_component_on(m), ref) + self.assertIs(ComponentUID(ref, cuid_buffer=buf).find_component_on(m), ref) self.assertEqual(len(buf), 4) ref = 'm.b[(1,)].c[1].x' - self.assertIsNone( - ComponentUID(ref, cuid_buffer=buf).find_component_on(m)) + self.assertIsNone(ComponentUID(ref, cuid_buffer=buf).find_component_on(m)) self.assertEqual(len(buf), 4) def test_pickle_index(self): @@ -853,7 +823,7 @@ def test_pickle_index(self): self.assertEqual(cuid, tmp) self.assertIs(tmp.find_component_on(m), m.b[idx].x) - idx = _Foo(1,'a') + idx = _Foo(1, 'a') m.b[idx].x = Var() cuid = ComponentUID(m.b[idx].x) # Note that the pickle string for namedtuple changes between @@ -871,7 +841,7 @@ def test_pickle_index(self): self.assertEqual(cuid, tmp) self.assertIs(tmp.find_component_on(m), m.b[idx].x) - idx = datetime(1,2,3) + idx = datetime(1, 2, 3) m.b[idx].x = Var() cuid = ComponentUID(m.b[idx].x) # Note that the pickle string for namedtuple changes between @@ -892,14 +862,16 @@ def test_pickle_index(self): def test_deprecated_ComponentUID_location(self): import pyomo.core.base.component as comp + self.assertNotIn('ComponentUID', dir(comp)) - warning = "DEPRECATED: the 'ComponentUID' class has been moved to " \ - "'pyomo.core.base.componentuid.ComponentUID'" + warning = ( + "DEPRECATED: the 'ComponentUID' class has been moved to " + "'pyomo.core.base.componentuid.ComponentUID'" + ) OUT = StringIO() with LoggingIntercept(OUT, 'pyomo.core'): - from pyomo.core.base.component import ComponentUID \ - as old_ComponentUID + from pyomo.core.base.component import ComponentUID as old_ComponentUID self.assertIn(warning, OUT.getvalue().replace('\n', ' ')) self.assertIs(old_ComponentUID, ComponentUID) @@ -912,65 +884,60 @@ def test_deprecated_ComponentUID_location(self): def _slice_model(self): m = ConcreteModel() - - m.d1_1 = Set(initialize=[1,2,3]) - m.d1_2 = Set(initialize=['a','b','c']) - m.d1_3 = Set(initialize=[1.1,1.2,1.3]) - m.d2 = Set(initialize=[('a',1), ('b',2)]) - m.dn = Set(initialize=[('c',3), ('d',4,5)], dimen=None) - + + m.d1_1 = Set(initialize=[1, 2, 3]) + m.d1_2 = Set(initialize=['a', 'b', 'c']) + m.d1_3 = Set(initialize=[1.1, 1.2, 1.3]) + m.d2 = Set(initialize=[('a', 1), ('b', 2)]) + m.dn = Set(initialize=[('c', 3), ('d', 4, 5)], dimen=None) + @m.Block() def b(b): - b.b = Block() - + @b.Block(m.d1_1) def b1(b1, i): b1.v = Var() b1.v1 = Var(m.d1_3) b1.v2 = Var(m.d1_1, m.d1_2) b1.vn = Var(m.dn, m.d1_2) - + @b.Block(m.d1_1, m.d1_2) def b2(b2, i, j): b2.v = Var() b2.v1 = Var(m.d1_3) b2.v2 = Var(m.d1_1, m.d1_2) b2.vn = Var(m.d1_1, m.dn, m.d1_2) - + @b.Block(m.d1_3, m.d2) def b3(b3, i, j, k): b3.v = Var() b3.v1 = Var(m.d1_3) b3.v2 = Var(m.d1_1, m.d1_2) b3.vn = Var(m.d1_1, m.dn, m.d2) - + # Don't think I can define a dim-None Block with # a rule unless normalize_index.flatten is False. b.bn = Block(m.d1_2, m.dn, m.d2) # NOTE: These blocks are only defined for 'a', ('a',1) # in the first and last "subsets" - b.bn['a','c',3,'a',1].v = Var() - b.bn['a','c',3,'a',1].v1 = Var(m.d1_3) - b.bn['a','c',3,'a',1].v2 = Var(m.d1_1, m.d1_2) - b.bn['a','c',3,'a',1].vn = Var(m.d1_1, m.dn, m.d2) - b.bn['a','d',4,5,'a',1].v = Var() - b.bn['a','d',4,5,'a',1].v1 = Var(m.d1_3) - b.bn['a','d',4,5,'a',1].v2 = Var(m.d1_1, m.d1_2) - b.bn['a','d',4,5,'a',1].vn = Var(m.d1_1, m.dn, m.d2) - + b.bn['a', 'c', 3, 'a', 1].v = Var() + b.bn['a', 'c', 3, 'a', 1].v1 = Var(m.d1_3) + b.bn['a', 'c', 3, 'a', 1].v2 = Var(m.d1_1, m.d1_2) + b.bn['a', 'c', 3, 'a', 1].vn = Var(m.d1_1, m.dn, m.d2) + b.bn['a', 'd', 4, 5, 'a', 1].v = Var() + b.bn['a', 'd', 4, 5, 'a', 1].v1 = Var(m.d1_3) + b.bn['a', 'd', 4, 5, 'a', 1].v2 = Var(m.d1_1, m.d1_2) + b.bn['a', 'd', 4, 5, 'a', 1].vn = Var(m.d1_1, m.dn, m.d2) + return m - + def assertListSameComponents(self, m, cuid1, cuid2): self.assertTrue(cuid1.list_components(m)) self.assertEqual( - len(list(cuid1.list_components(m))), - len(list(cuid2.list_components(m))) - ) - for c1, c2 in zip( - cuid1.list_components(m), - cuid2.list_components(m), - ): + len(list(cuid1.list_components(m))), len(list(cuid2.list_components(m))) + ) + for c1, c2 in zip(cuid1.list_components(m), cuid2.list_components(m)): self.assertIs(c1, c2) def test_cuid_from_slice_1(self): @@ -994,7 +961,7 @@ def test_cuid_from_slice_1(self): cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) - _slice = m.b.b2[:,'a'] + _slice = m.b.b2[:, 'a'] cuid_str = ComponentUID('b.b2[*,a]') cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) @@ -1006,21 +973,21 @@ def test_cuid_from_slice_1(self): cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) - _slice = m.b.b3[1.1,:,2] + _slice = m.b.b3[1.1, :, 2] cuid_str = ComponentUID('b.b3[1.1,*,2]') cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b3[:,:,'b'] + _slice = m.b.b3[:, :, 'b'] cuid_str = ComponentUID('b.b3[*,*,b]') cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b3[1.1,...] + _slice = m.b.b3[1.1, ...] cuid_str = ComponentUID('b.b3[1.1,**]') cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) @@ -1032,14 +999,14 @@ def test_cuid_from_slice_1(self): self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) - _slice = m.b.bn['a',:,:,'a',1] + _slice = m.b.bn['a', :, :, 'a', 1] cuid_str = ComponentUID('b.bn[a,*,*,a,1]') cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.bn['a','c',3,:,:] + _slice = m.b.bn['a', 'c', 3, :, :] cuid_str = ComponentUID('b.bn[a,c,3,*,*]') cuid = ComponentUID(_slice) self.assertEqual(cuid, cuid_str) @@ -1068,89 +1035,89 @@ def test_cuid_from_slice_2(self): cuid_str = ComponentUID('b[*].b1[*].v') self.assertEqual(cuid, cuid_str) - _slice = m.b.b2[2,:].v + _slice = m.b.b2[2, :].v cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[2,*].v') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[2,:].v1[:] + _slice = m.b.b2[2, :].v1[:] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[2,*].v1[*]') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[2,:].v1[1.1] + _slice = m.b.b2[2, :].v1[1.1] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[2,*].v1[1.1]') self.assertEqual(str(cuid), str(cuid_str)) self.assertEqual(cuid, cuid_str) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[2,:].vn[1,...,:,'b'] + _slice = m.b.b2[2, :].vn[1, ..., :, 'b'] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[2,*].vn[1,**,*,b]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[2,:].vn[...,'b'] + _slice = m.b.b2[2, :].vn[..., 'b'] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[2,*].vn[**,b]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[2,:].vn[...,...] + _slice = m.b.b2[2, :].vn[..., ...] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[2,*].vn[**,**]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[2,:].vn[...] + _slice = m.b.b2[2, :].vn[...] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[2,*].vn[**]') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[...].v2[:,'a'] + _slice = m.b.b2[...].v2[:, 'a'] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[**].v2[*,a]') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b3[:,'a',:].v1 + _slice = m.b.b3[:, 'a', :].v1 cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b3[*,a,*].v1') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b3[:,'a',:].v2[1,'a'] + _slice = m.b.b3[:, 'a', :].v2[1, 'a'] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b3[*,a,*].v2[1,a]') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b3[:,'a',:].v2[1,:] + _slice = m.b.b3[:, 'a', :].v2[1, :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b3[*,a,*].v2[1,*]') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b3[:,'a',:].vn[1,:,:,'a',1] + _slice = m.b.b3[:, 'a', :].vn[1, :, :, 'a', 1] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b3[*,a,*].vn[1,*,*,a,1]') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.bn['a','c',3,:,:].vn[1,:,3,'a',:] + _slice = m.b.bn['a', 'c', 3, :, :].vn[1, :, 3, 'a', :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.bn[a,c,3,*,*].vn[1,*,3,a,*]') self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.bn[...].vn[1,:,3,'a',:] + _slice = m.b.bn[...].vn[1, :, 3, 'a', :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.bn[**].vn[1,*,3,a,*]') self.assertEqual(str(cuid), str(cuid_str)) @@ -1176,13 +1143,13 @@ def test_cuid_from_slice_3(self): """ m = self._slice_model() - _slice = m.b[:].b3[:,'a',:].v2[1,:] + _slice = m.b[:].b3[:, 'a', :].v2[1, :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b[*].b3[*,a,*].v2[1,*]') self.assertEqual(cuid, cuid_str) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b[:].b3[:,'a',:].v2 + _slice = m.b[:].b3[:, 'a', :].v2 cuid = ComponentUID(_slice) cuid_str = ComponentUID('b[*].b3[*,a,*].v2') self.assertEqual(cuid, cuid_str) @@ -1191,16 +1158,16 @@ def test_cuid_from_slice_3(self): def test_cuid_from_slice_with_call(self): m = self._slice_model() - _slice = m.b.component('b2')[:,'a'].v2[1,:] + _slice = m.b.component('b2')[:, 'a'].v2[1, :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[*,a].v2[1,*]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - # This works as find_component is not in the + # This works as find_component is not in the # _call_stack of the slice. - _slice = m.b.find_component('b2')[:,'a'].v2[1,:] + _slice = m.b.find_component('b2')[:, 'a'].v2[1, :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[*,a].v2[1,*]') self.assertEqual(cuid, cuid_str) @@ -1213,79 +1180,80 @@ def test_cuid_from_slice_with_call(self): self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b[:].component('b2','b1') - with self.assertRaisesRegex(ValueError, - '.*multiple arguments.*'): + _slice = m.b[:].component('b2', 'b1') + with self.assertRaisesRegex(ValueError, '.*multiple arguments.*'): cuid = ComponentUID(_slice) # call of something other than component - _slice = IndexedComponent_slice(m.b[:].fix, ( - IndexedComponent_slice.call, ('fix',), {} ) ) + _slice = IndexedComponent_slice( + m.b[:].fix, (IndexedComponent_slice.call, ('fix',), {}) + ) with self.assertRaisesRegex( - ValueError, - "Cannot create a CUID from a slice with a call to any " - r"method other than 'component': got 'fix'\."): + ValueError, + "Cannot create a CUID from a slice with a call to any " + r"method other than 'component': got 'fix'\.", + ): cuid = ComponentUID(_slice) - _slice = IndexedComponent_slice(m.b[:].component('v'), ( - IndexedComponent_slice.call, ('fix',), {} ) ) + _slice = IndexedComponent_slice( + m.b[:].component('v'), (IndexedComponent_slice.call, ('fix',), {}) + ) with self.assertRaisesRegex( - ValueError, - "Cannot create a CUID with a __call__ of anything " - "other than a 'component' attribute"): + ValueError, + "Cannot create a CUID with a __call__ of anything " + "other than a 'component' attribute", + ): cuid = ComponentUID(_slice) _slice = m.b[:].component('b2', kwd=None) - with self.assertRaisesRegex(ValueError, - '.*call that contains keywords.*'): + with self.assertRaisesRegex(ValueError, '.*call that contains keywords.*'): cuid = ComponentUID(_slice) - _slice = m.b.b2[:,'a'].component('vn')[:,'c',3,:,:] + _slice = m.b.b2[:, 'a'].component('vn')[:, 'c', 3, :, :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[*,a].vn[*,c,3,*,*]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[1,'a'].component('vn')[:,'c',3,:,:] + _slice = m.b.b2[1, 'a'].component('vn')[:, 'c', 3, :, :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[1,a].vn[*,c,3,*,*]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[...].component('vn')[:,'c',3,:,:] + _slice = m.b.b2[...].component('vn')[:, 'c', 3, :, :] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[**].vn[*,c,3,*,*]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - _slice = m.b.b2[:,'a'].component('vn')[...] + _slice = m.b.b2[:, 'a'].component('vn')[...] cuid = ComponentUID(_slice) cuid_str = ComponentUID('b.b2[*,a].vn[**]') self.assertEqual(cuid, cuid_str) self.assertEqual(str(cuid), str(cuid_str)) self.assertListSameComponents(m, cuid, cuid_str) - def test_cuid_from_slice_errors(self): # two getitem m = self._slice_model() m.b.comp = Reference(m.b.b1[:].v1) _slice = m.b[:].comp[1][1.1] - with self.assertRaisesRegex(ValueError, - r'.*Two `get_item` calls.*'): + with self.assertRaisesRegex(ValueError, r'.*Two `get_item` calls.*'): cuid = ComponentUID(_slice) - _slice = IndexedComponent_slice(m.b[:].component('v'), ( - IndexedComponent_slice.del_attribute, ('foo',)) ) + _slice = IndexedComponent_slice( + m.b[:].component('v'), (IndexedComponent_slice.del_attribute, ('foo',)) + ) with self.assertRaisesRegex( - ValueError, - "Cannot create a CUID from a slice that " - "contains `set` or `del` calls: got call %s " - r"with argument \('foo',\)" % ( - IndexedComponent_slice.del_attribute,)): + ValueError, + "Cannot create a CUID from a slice that " + "contains `set` or `del` calls: got call %s " + r"with argument \('foo',\)" % (IndexedComponent_slice.del_attribute,), + ): cuid = ComponentUID(_slice) diff --git a/pyomo/core/tests/unit/test_con.py b/pyomo/core/tests/unit/test_con.py index 69d04bb60de..bd90972fee2 100644 --- a/pyomo/core/tests/unit/test_con.py +++ b/pyomo/core/tests/unit/test_con.py @@ -18,24 +18,37 @@ import sys import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, AbstractModel, Var, Constraint, \ - ConstraintList, Param, RangeSet, Set, Expression, value, \ - simple_constraintlist_rule, simple_constraint_rule, inequality -from pyomo.core.expr.current import ( - SumExpression, EqualityExpression, InequalityExpression, +from pyomo.environ import ( + ConcreteModel, + AbstractModel, + Var, + Constraint, + ConstraintList, + Param, + RangeSet, + Set, + Expression, + value, + simple_constraintlist_rule, + simple_constraint_rule, + inequality, +) +from pyomo.core.expr import ( + SumExpression, + EqualityExpression, + InequalityExpression, RangedExpression, ) -from pyomo.core.expr import logical_expr from pyomo.core.base.constraint import _GeneralConstraintData class TestConstraintCreation(unittest.TestCase): - - def create_model(self,abstract=False): + def create_model(self, abstract=False): if abstract is True: model = AbstractModel() else: @@ -47,119 +60,143 @@ def create_model(self,abstract=False): def test_tuple_construct_equality(self): model = self.create_model() + def rule(model): return (0.0, model.x) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, True) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.x) - self.assertEqual(model.c.upper, 0) + self.assertEqual(model.c.equality, True) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.x) + self.assertEqual(model.c.upper, 0) model = self.create_model() + def rule(model): return (model.x, 0.0) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, True) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.x) - self.assertEqual(model.c.upper, 0) + self.assertEqual(model.c.equality, True) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.x) + self.assertEqual(model.c.upper, 0) def test_tuple_construct_inf_equality(self): model = self.create_model(abstract=True) + def rule(model): return (model.x, float('inf')) + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) model = self.create_model(abstract=True) + def rule(model): return (float('inf'), model.x) + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) def test_tuple_construct_1sided_inequality(self): model = self.create_model() + def rule(model): return (None, model.y, 1) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, 1) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, 1) model = self.create_model() + def rule(model): return (0, model.y, None) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) def test_tuple_construct_1sided_inf_inequality(self): model = self.create_model() + def rule(model): return (float('-inf'), model.y, 1) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, 1) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, 1) model = self.create_model() + def rule(model): return (0, model.y, float('inf')) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) def test_tuple_construct_unbounded_inequality(self): model = self.create_model() + def rule(model): return (None, model.y, None) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) model = self.create_model() + def rule(model): return (float('-inf'), model.y, float('inf')) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) def test_tuple_construct_invalid_1sided_inequality(self): model = self.create_model(abstract=True) + def rule(model): return (model.x, model.y, None) + model.c = Constraint(rule=rule) # We now recognize this as a valid inequality - #self.assertRaises(ValueError, model.create_instance) + # self.assertRaises(ValueError, model.create_instance) instance = model.create_instance() self.assertEqual(instance.c.lower, None) self.assertIsInstance(instance.c.body, SumExpression) self.assertEqual(instance.c.upper, 0) model = self.create_model(abstract=True) + def rule(model): return (None, model.y, model.z) + model.c = Constraint(rule=rule) # We now recognize this as a valid inequality - #self.assertRaises(ValueError, model.create_instance) + # self.assertRaises(ValueError, model.create_instance) instance = model.create_instance() self.assertEqual(instance.c.lower, None) self.assertIsInstance(instance.c.body, SumExpression) @@ -167,24 +204,29 @@ def rule(model): def test_tuple_construct_2sided_inequality(self): model = self.create_model() + def rule(model): return (0, model.y, 1) + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, 1) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, 1) def test_tuple_construct_invalid_2sided_inequality(self): model = self.create_model(abstract=True) + def rule(model): return (model.x, model.y, 1) + model.c = Constraint(rule=rule) instance = model.create_instance() with self.assertRaisesRegex( - ValueError, "Constraint 'c' is a Ranged Inequality " - "with a variable lower bound"): + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): instance.c.lower self.assertIs(instance.c.body, instance.y) self.assertEqual(instance.c.upper, 1) @@ -192,157 +234,192 @@ def rule(model): self.assertEqual(value(instance.c.lower), 3) model = self.create_model(abstract=True) + def rule(model): return (0, model.y, model.z) + model.c = Constraint(rule=rule) instance = model.create_instance() self.assertEqual(instance.c.lower, 0) self.assertIs(instance.c.body, instance.y) with self.assertRaisesRegex( - ValueError, "Constraint 'c' is a Ranged Inequality " - "with a variable upper bound"): + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): instance.c.upper instance.z.fix(3) self.assertEqual(value(instance.c.upper), 3) def test_expr_construct_equality(self): model = self.create_model() + def rule(model): return 0.0 == model.x + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, True) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.x) - self.assertEqual(model.c.upper, 0) + self.assertEqual(model.c.equality, True) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.x) + self.assertEqual(model.c.upper, 0) model = self.create_model() + def rule(model): return model.x == 0.0 + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, True) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.x) - self.assertEqual(model.c.upper, 0) + self.assertEqual(model.c.equality, True) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.x) + self.assertEqual(model.c.upper, 0) def test_expr_construct_inf_equality(self): model = self.create_model(abstract=True) + def rule(model): return model.x == float('inf') + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) model = self.create_model(abstract=True) + def rule(model): return float('inf') == model.x + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) def test_expr_construct_1sided_inequality(self): model = self.create_model() + def rule(model): return model.y <= 1 + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, 1) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, 1) model = self.create_model() + def rule(model): return 0 <= model.y + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, 0) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, 0) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) model = self.create_model() + def rule(model): return model.y >= 1 + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, 1) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, 1) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) model = self.create_model() + def rule(model): return 0 >= model.y + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, 0) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, 0) def test_expr_construct_unbounded_inequality(self): model = self.create_model() + def rule(model): return model.y <= float('inf') + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) model = self.create_model() + def rule(model): return float('-inf') <= model.y + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) model = self.create_model() + def rule(model): return model.y >= float('-inf') + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) model = self.create_model() + def rule(model): return float('inf') >= model.y + model.c = Constraint(rule=rule) - self.assertEqual(model.c.equality, False) - self.assertEqual(model.c.lower, None) - self.assertIs (model.c.body, model.y) - self.assertEqual(model.c.upper, None) + self.assertEqual(model.c.equality, False) + self.assertEqual(model.c.lower, None) + self.assertIs(model.c.body, model.y) + self.assertEqual(model.c.upper, None) def test_expr_construct_invalid_unbounded_inequality(self): model = self.create_model(abstract=True) + def rule(model): return model.y <= float('-inf') + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) model = self.create_model(abstract=True) + def rule(model): return float('inf') <= model.y + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) model = self.create_model(abstract=True) + def rule(model): return model.y >= float('inf') + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) model = self.create_model(abstract=True) + def rule(model): return float('-inf') >= model.y + model.c = Constraint(rule=rule) self.assertRaises(ValueError, model.create_instance) @@ -350,49 +427,59 @@ def test_expr_construct_invalid(self): m = ConcreteModel() c = Constraint(rule=lambda m: None) self.assertRaisesRegex( - ValueError, ".*rule returned None", - m.add_component, 'c', c) + ValueError, ".*rule returned None", m.add_component, 'c', c + ) m = ConcreteModel() - c = Constraint([1], rule=lambda m,i: None) + c = Constraint([1], rule=lambda m, i: None) self.assertRaisesRegex( - ValueError, ".*rule returned None", - m.add_component, 'c', c) + ValueError, ".*rule returned None", m.add_component, 'c', c + ) m = ConcreteModel() c = Constraint(rule=lambda m: True) self.assertRaisesRegex( ValueError, r".*resolved to a trivial Boolean \(True\).*Constraint\.Feasible", - m.add_component, 'c', c) + m.add_component, + 'c', + c, + ) m = ConcreteModel() - c = Constraint([1], rule=lambda m,i: True) + c = Constraint([1], rule=lambda m, i: True) self.assertRaisesRegex( ValueError, r".*resolved to a trivial Boolean \(True\).*Constraint\.Feasible", - m.add_component, 'c', c) + m.add_component, + 'c', + c, + ) m = ConcreteModel() c = Constraint(rule=lambda m: False) self.assertRaisesRegex( ValueError, - r".*resolved to a trivial Boolean \(False\).*" - r"Constraint\.Infeasible", - m.add_component, 'c', c) + r".*resolved to a trivial Boolean \(False\).*" r"Constraint\.Infeasible", + m.add_component, + 'c', + c, + ) m = ConcreteModel() - c = Constraint([1], rule=lambda m,i: False) + c = Constraint([1], rule=lambda m, i: False) self.assertRaisesRegex( ValueError, - r".*resolved to a trivial Boolean \(False\).*" - r"Constraint\.Infeasible", - m.add_component, 'c', c) + r".*resolved to a trivial Boolean \(False\).*" r"Constraint\.Infeasible", + m.add_component, + 'c', + c, + ) def test_nondata_bounds(self): model = ConcreteModel() model.c = Constraint() - model.v = Var([1,2,3]) + model.v = Var([1, 2, 3]) model.e1 = Expression() model.e2 = Expression() model.e3 = Expression() @@ -413,13 +500,15 @@ def test_nondata_bounds(self): model.e2 = model.v[2] model.e3 = model.v[3] with self.assertRaisesRegex( - ValueError, "Constraint 'c' is a Ranged Inequality " - "with a variable lower bound"): + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): model.c.lower self.assertIs(model.c.body.expr, model.v[2]) with self.assertRaisesRegex( - ValueError, "Constraint 'c' is a Ranged Inequality " - "with a variable upper bound"): + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): model.c.upper # make sure we can use a mutable param that @@ -448,7 +537,7 @@ def test_mutable_novalue_param_lower_bound(self): self.assertEqual(model.c.equality, False) model.del_component(model.c) - model.c = Constraint(expr=(model.p + 1)**2 <= model.x) + model.c = Constraint(expr=(model.p + 1) ** 2 <= model.x) self.assertEqual(model.c.equality, False) model.del_component(model.c) @@ -473,7 +562,7 @@ def test_mutable_novalue_param_lower_bound(self): self.assertEqual(model.c.equality, False) model.del_component(model.c) - model.c = Constraint(expr=model.x >= (model.p + 1)**2) + model.c = Constraint(expr=model.x >= (model.p + 1) ** 2) self.assertEqual(model.c.equality, False) model.del_component(model.c) @@ -520,7 +609,7 @@ def test_mutable_novalue_param_upper_bound(self): self.assertEqual(model.c.equality, False) model.del_component(model.c) - model.c = Constraint(expr=model.x <= (model.p + 1)**2) + model.c = Constraint(expr=model.x <= (model.p + 1) ** 2) self.assertEqual(model.c.equality, False) model.del_component(model.c) @@ -545,7 +634,7 @@ def test_mutable_novalue_param_upper_bound(self): self.assertEqual(model.c.equality, False) model.del_component(model.c) - model.c = Constraint(expr=(model.p + 1)**2 >= model.x) + model.c = Constraint(expr=(model.p + 1) ** 2 >= model.x) self.assertEqual(model.c.equality, False) model.del_component(model.c) @@ -588,7 +677,7 @@ def test_mutable_novalue_param_equality(self): self.assertEqual(model.c.equality, True) model.del_component(model.c) - model.c = Constraint(expr=model.x + 1 == (model.p + 1)**2) + model.c = Constraint(expr=model.x + 1 == (model.p + 1) ** 2) self.assertEqual(model.c.equality, True) model.del_component(model.c) @@ -596,13 +685,13 @@ def test_mutable_novalue_param_equality(self): self.assertEqual(model.c.equality, True) model.del_component(model.c) - model.c = Constraint(expr=inequality(model.p, model.x, model.p)) + model.c = Constraint(expr=inequality(model.p, model.x, model.p)) self.assertTrue(model.c.upper is model.p) # GH: Not sure if we are supposed to detect equality # in this situation. I would rather us not, for # the sake of making the code less complicated. # Either way, I am not going to test for it here. - #self.assertEqual(model.c.equality, ) + # self.assertEqual(model.c.equality, ) model.del_component(model.c) model.c = Constraint(expr=(model.x, model.p)) @@ -637,13 +726,12 @@ def test_inequality(self): class TestSimpleCon(unittest.TestCase): - def test_set_expr_explicit_multivariate(self): """Test expr= option (multivariate expression)""" model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.x = Var(model.A, initialize=2) - ans=0 + ans = 0 for i in model.A: ans = ans + model.x[i] ans = ans >= 0 @@ -675,7 +763,8 @@ def test_set_expr_undefined_univariate(self): model.c = Constraint(expr=ans) with self.assertRaisesRegex( - ValueError, "No value for uninitialized NumericValue object x"): + ValueError, "No value for uninitialized NumericValue object x" + ): value(model.c) model.x = 2 self.assertEqual(model.c(), 2) @@ -684,8 +773,8 @@ def test_set_expr_undefined_univariate(self): def test_set_expr_inline(self): """Test expr= option (inline expression)""" model = ConcreteModel() - model.A = RangeSet(1,4) - model.x = Var(model.A,initialize=2) + model.A = RangeSet(1, 4) + model.x = Var(model.A, initialize=2) model.c = Constraint(expr=(0, sum(model.x[i] for i in model.A), 1)) self.assertEqual(model.c(), 8) @@ -694,14 +783,16 @@ def test_set_expr_inline(self): def test_rule1(self): """Test rule option""" model = ConcreteModel() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model): - ans=0 + ans = 0 for i in model.B: ans = ans + model.x[i] ans = ans >= 0 ans = ans <= 1 return ans + model.x = Var(model.B, initialize=2) model.c = Constraint(rule=f) @@ -711,12 +802,14 @@ def f(model): def test_rule2(self): """Test rule option""" model = ConcreteModel() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model): - ans=0 + ans = 0 for i in model.B: ans = ans + model.x[i] return (0, ans, 1) + model.x = Var(model.B, initialize=2) model.c = Constraint(rule=f) @@ -726,12 +819,14 @@ def f(model): def test_rule3(self): """Test rule option""" model = ConcreteModel() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model): - ans=0 + ans = 0 for i in model.B: ans = ans + model.x[i] return (0, ans, None) + model.x = Var(model.B, initialize=2) model.c = Constraint(rule=f) @@ -741,12 +836,14 @@ def f(model): def test_rule4(self): """Test rule option""" model = ConcreteModel() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model): - ans=0 + ans = 0 for i in model.B: ans = ans + model.x[i] return (None, ans, 1) + model.x = Var(model.B, initialize=2) model.c = Constraint(rule=f) @@ -756,12 +853,14 @@ def f(model): def test_rule5(self): """Test rule option""" model = ConcreteModel() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model): - ans=0 + ans = 0 for i in model.B: ans = ans + model.x[i] return (ans, 1) + model.x = Var(model.B, initialize=2) model.c = Constraint(rule=f) @@ -773,14 +872,14 @@ def test_dim(self): model = ConcreteModel() model.c = Constraint() - self.assertEqual(model.c.dim(),0) + self.assertEqual(model.c.dim(), 0) def test_keys_empty(self): """Test keys method""" model = ConcreteModel() model.c = Constraint() - self.assertEqual(list(model.c.keys()),[]) + self.assertEqual(list(model.c.keys()), []) def test_len_empty(self): """Test len method""" @@ -794,17 +893,17 @@ def test_None_key(self): model = ConcreteModel() model.x = Var() model.c = Constraint(expr=model.x == 1) - self.assertEqual(list(model.c.keys()),[None]) - self.assertEqual(id(model.c),id(model.c[None])) + self.assertEqual(list(model.c.keys()), [None]) + self.assertEqual(id(model.c), id(model.c[None])) def test_len(self): """Test len method""" model = AbstractModel() model.x = Var() model.c = Constraint(rule=lambda m: m.x == 1) - self.assertEqual(len(model.c),0) + self.assertEqual(len(model.c), 0) inst = model.create_instance() - self.assertEqual(len(inst.c),1) + self.assertEqual(len(inst.c), 1) def test_setitem(self): m = ConcreteModel() @@ -820,26 +919,28 @@ def test_setitem(self): m.c = Constraint.Skip self.assertEqual(len(m.c), 0) -class TestArrayCon(unittest.TestCase): +class TestArrayCon(unittest.TestCase): def create_model(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3,4]) + model.A = Set(initialize=[1, 2, 3, 4]) return model def test_rule_option1(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) - model.c = Constraint(model.A,rule=f) + model.c = Constraint(model.A, rule=f) self.assertEqual(model.c[1](), 8) self.assertEqual(model.c[2](), 16) @@ -847,78 +948,86 @@ def f(model, i): def test_rule_option2(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): - if i%2 == 0: + if i % 2 == 0: return Constraint.Skip - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) - model.c = Constraint(model.A,rule=f) + model.c = Constraint(model.A, rule=f) self.assertEqual(model.c[1](), 8) self.assertEqual(len(model.c), 2) def test_rule_option3(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): - if i%2 == 0: + if i % 2 == 0: return Constraint.Skip - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) - model.c = Constraint(model.A,rule=f) + model.c = Constraint(model.A, rule=f) self.assertEqual(model.c[1](), 8) self.assertEqual(len(model.c), 2) def test_rule_option2a(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + @simple_constraint_rule def f(model, i): - if i%2 == 0: + if i % 2 == 0: return None - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) - model.c = Constraint(model.A,rule=f) + model.c = Constraint(model.A, rule=f) self.assertEqual(model.c[1](), 8) self.assertEqual(len(model.c), 2) def test_rule_option3a(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + @simple_constraint_rule def f(model, i): - if i%2 == 0: + if i % 2 == 0: return None - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) - model.c = Constraint(model.A,rule=f) + model.c = Constraint(model.A, rule=f) self.assertEqual(model.c[1](), 8) self.assertEqual(len(model.c), 2) @@ -927,32 +1036,34 @@ def test_dim(self): model = self.create_model() model.c = Constraint(model.A) - self.assertEqual(model.c.dim(),1) + self.assertEqual(model.c.dim(), 1) def test_keys(self): model = self.create_model() model.c = Constraint(model.A) - self.assertEqual(len(list(model.c.keys())),0) + self.assertEqual(len(list(model.c.keys())), 0) def test_len(self): model = self.create_model() model.c = Constraint(model.A) - self.assertEqual(len(model.c),0) + self.assertEqual(len(model.c), 0) model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) """Test rule option""" + def f(model): - ans=0 + ans = 0 for i in model.B: ans = ans + model.x[i] - ans = ans==2 + ans = ans == 2 return ans + model.x = Var(model.B, initialize=2) model.c = Constraint(rule=f) - self.assertEqual(len(model.c),1) + self.assertEqual(len(model.c), 1) def test_setitem(self): m = ConcreteModel() @@ -968,20 +1079,21 @@ def test_setitem(self): m.c[3] = Constraint.Skip self.assertEqual(len(m.c), 1) - self.assertRaisesRegex( KeyError, "3", m.c.__getitem__, 3) + self.assertRaisesRegex(KeyError, "3", m.c.__getitem__, 3) - self.assertRaisesRegex( ValueError, r"'c\[3\]': rule returned None", - m.c.__setitem__, 3, None) + self.assertRaisesRegex( + ValueError, r"'c\[3\]': rule returned None", m.c.__setitem__, 3, None + ) self.assertEqual(len(m.c), 1) m.c[2] = Constraint.Skip self.assertEqual(len(m.c), 0) -class TestConList(unittest.TestCase): +class TestConList(unittest.TestCase): def create_model(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3,4]) + model.A = Set(initialize=[1, 2, 3, 4]) return model # @@ -1004,17 +1116,19 @@ def test_conlist_skip(self): def test_rule_option1(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): if i > 4: return ConstraintList.End - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) model.c = ConstraintList(rule=f) @@ -1024,18 +1138,20 @@ def f(model, i): def test_rule_option2(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): if i > 2: return ConstraintList.End - i = 2*i - 1 - ans=0 + i = 2 * i - 1 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) model.c = ConstraintList(rule=f) @@ -1044,18 +1160,20 @@ def f(model, i): def test_rule_option1a(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + @simple_constraintlist_rule def f(model, i): if i > 4: return None - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) model.c = ConstraintList(rule=f) @@ -1065,19 +1183,21 @@ def f(model, i): def test_rule_option2a(self): model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + @simple_constraintlist_rule def f(model, i): if i > 2: return None - i = 2*i - 1 - ans=0 + i = 2 * i - 1 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) model.c = ConstraintList(rule=f) @@ -1087,11 +1207,13 @@ def f(model, i): def test_rule_option3(self): model = self.create_model() model.y = Var(initialize=2) + def f(model): yield model.y <= 0 - yield 2*model.y <= 0 - yield 2*model.y <= 0 + yield 2 * model.y <= 0 + yield 2 * model.y <= 0 yield ConstraintList.End + model.c = ConstraintList(rule=f) self.assertEqual(len(model.c), 3) self.assertEqual(model.c[1](), 2) @@ -1102,7 +1224,7 @@ def f(model): def test_rule_option4(self): model = self.create_model() model.y = Var(initialize=2) - model.c = ConstraintList(rule=((i+1)*model.y >= 0 for i in range(3))) + model.c = ConstraintList(rule=((i + 1) * model.y >= 0 for i in range(3))) self.assertEqual(len(model.c), 3) self.assertEqual(model.c[1](), 2) @@ -1110,19 +1232,19 @@ def test_dim(self): model = self.create_model() model.c = ConstraintList() - self.assertEqual(model.c.dim(),1) + self.assertEqual(model.c.dim(), 1) def test_keys(self): model = self.create_model() model.c = ConstraintList() - self.assertEqual(len(list(model.c.keys())),0) + self.assertEqual(len(list(model.c.keys())), 0) def test_len(self): model = self.create_model() model.c = ConstraintList() - self.assertEqual(len(model.c),0) + self.assertEqual(len(model.c), 0) def test_0based_add(self): m = ConcreteModel() @@ -1133,76 +1255,78 @@ def test_0based_add(self): m.c.add(m.x >= 0) self.assertEqual(list(m.c.keys()), [0, 1]) -class Test2DArrayCon(unittest.TestCase): +class Test2DArrayCon(unittest.TestCase): def create_model(self): model = ConcreteModel() - model.A = Set(initialize=[1,2]) + model.A = Set(initialize=[1, 2]) return model def test_rule_option(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i, j): - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i ans = ans <= 0 ans = ans >= 0 return ans + model.x = Var(model.B, initialize=2) - model.c = Constraint(model.A,model.A,rule=f) + model.c = Constraint(model.A, model.A, rule=f) - self.assertEqual(model.c[1,1](), 8) - self.assertEqual(model.c[2,1](), 16) + self.assertEqual(model.c[1, 1](), 8) + self.assertEqual(model.c[2, 1](), 16) def test_dim(self): """Test dim method""" model = self.create_model() - model.c = Constraint(model.A,model.A) + model.c = Constraint(model.A, model.A) - self.assertEqual(model.c.dim(),2) + self.assertEqual(model.c.dim(), 2) def test_keys(self): """Test keys method""" model = self.create_model() - model.c = Constraint(model.A,model.A) + model.c = Constraint(model.A, model.A) - self.assertEqual(len(list(model.c.keys())),0) + self.assertEqual(len(list(model.c.keys())), 0) def test_len(self): """Test len method""" model = self.create_model() - model.c = Constraint(model.A,model.A) - self.assertEqual(len(model.c),0) + model.c = Constraint(model.A, model.A) + self.assertEqual(len(model.c), 0) model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) """Test rule option""" + def f(model): - ans=0 + ans = 0 for i in model.B: ans = ans + model.x[i] - ans = ans==2 + ans = ans == 2 return ans + model.x = Var(model.B, initialize=2) model.c = Constraint(rule=f) - self.assertEqual(len(model.c),1) + self.assertEqual(len(model.c), 1) + class MiscConTests(unittest.TestCase): - def test_infeasible(self): m = ConcreteModel() - with self.assertRaisesRegex( - ValueError, "Constraint 'c' is always infeasible"): + with self.assertRaisesRegex(ValueError, "Constraint 'c' is always infeasible"): m.c = Constraint(expr=Constraint.Infeasible) self.assertEqual(m.c._data, {}) - with self.assertRaisesRegex( - ValueError, "Constraint 'c' is always infeasible"): + with self.assertRaisesRegex(ValueError, "Constraint 'c' is always infeasible"): m.c = Constraint.Infeasible self.assertEqual(m.c._data, {}) self.assertIsNone(m.c.expr) @@ -1212,8 +1336,7 @@ def test_infeasible(self): self.assertEqual(m.c.lb, 0) self.assertEqual(m.c.ub, 2) - with self.assertRaisesRegex( - ValueError, "Constraint 'c' is always infeasible"): + with self.assertRaisesRegex(ValueError, "Constraint 'c' is always infeasible"): m.c = Constraint.Infeasible self.assertEqual(m.c._data, {}) self.assertIsNone(m.c.expr) @@ -1248,15 +1371,15 @@ def test_constructor(self): pass def test_contains(self): - model=ConcreteModel() - model.a=Set(initialize=[1,2,3]) - model.b=Constraint(model.a) + model = ConcreteModel() + model.a = Set(initialize=[1, 2, 3]) + model.b = Constraint(model.a) - self.assertEqual(2 in model.b,False) - tmp=[] + self.assertEqual(2 in model.b, False) + tmp = [] for i in model.b: tmp.append(i) - self.assertEqual(len(tmp),0) + self.assertEqual(len(tmp), 0) def test_empty_singleton(self): a = Constraint() @@ -1322,32 +1445,46 @@ def test_unconstructed_singleton(self): self.assertEqual(a._constructed, False) self.assertEqual(len(a), 0) with self.assertRaisesRegex( - RuntimeError, "Cannot access .* on AbstractScalarConstraint" - ".*before it has been constructed"): + RuntimeError, + "Cannot access .* on AbstractScalarConstraint" + ".*before it has been constructed", + ): a() with self.assertRaisesRegex( - RuntimeError, "Cannot access .* on AbstractScalarConstraint" - ".*before it has been constructed"): + RuntimeError, + "Cannot access .* on AbstractScalarConstraint" + ".*before it has been constructed", + ): a.body with self.assertRaisesRegex( - RuntimeError, "Cannot access .* on AbstractScalarConstraint" - ".*before it has been constructed"): + RuntimeError, + "Cannot access .* on AbstractScalarConstraint" + ".*before it has been constructed", + ): a.lower with self.assertRaisesRegex( - RuntimeError, "Cannot access .* on AbstractScalarConstraint" - ".*before it has been constructed"): + RuntimeError, + "Cannot access .* on AbstractScalarConstraint" + ".*before it has been constructed", + ): a.upper with self.assertRaisesRegex( - RuntimeError, "Cannot access .* on AbstractScalarConstraint" - ".*before it has been constructed"): + RuntimeError, + "Cannot access .* on AbstractScalarConstraint" + ".*before it has been constructed", + ): a.equality with self.assertRaisesRegex( - RuntimeError, "Cannot access .* on AbstractScalarConstraint" - ".*before it has been constructed"): + RuntimeError, + "Cannot access .* on AbstractScalarConstraint" + ".*before it has been constructed", + ): a.strict_lower with self.assertRaisesRegex( - RuntimeError, "Cannot access .* on AbstractScalarConstraint" - ".*before it has been constructed"): + RuntimeError, + "Cannot access .* on AbstractScalarConstraint" + ".*before it has been constructed", + ): a.strict_upper x = Var(initialize=1.0) @@ -1366,33 +1503,39 @@ def test_unconstructed_singleton(self): def test_rule(self): def rule1(model): return Constraint.Skip + model = ConcreteModel() try: model.o = Constraint(rule=rule1) except Exception: e = sys.exc_info()[1] self.fail("Failure to create empty constraint: %s" % str(e)) + # def rule1(model): - return (0.0,model.x,2.0) + return (0.0, model.x, 2.0) + model = ConcreteModel() model.x = Var(initialize=1.1) model.o = Constraint(rule=rule1) - self.assertEqual(model.o(),1.1) + self.assertEqual(model.o(), 1.1) + # def rule1(model, i): return Constraint.Skip + model = ConcreteModel() - model.a = Set(initialize=[1,2,3]) + model.a = Set(initialize=[1, 2, 3]) try: - model.o = Constraint(model.a,rule=rule1) + model.o = Constraint(model.a, rule=rule1) except Exception: self.fail("Error generating empty constraint") # def rule1(model): - return (0.0,1.1,2.0,None) + return (0.0, 1.1, 2.0, None) + model = ConcreteModel() try: model.o = Constraint(rule=rule1) @@ -1402,7 +1545,8 @@ def rule1(model): def test_tuple_constraint_create(self): def rule1(model): - return (0.0,model.x) + return (0.0, model.x) + model = ConcreteModel() model.x = Var() model.y = Var() @@ -1411,21 +1555,27 @@ def rule1(model): self.assertEqual(model.c.lower, 0) self.assertIs(model.c.body, model.x) self.assertEqual(model.c.upper, 0) + # def rule1(model): - return (model.y,model.x,model.z) + return (model.y, model.x, model.z) + model = AbstractModel() model.x = Var() model.y = Var() model.z = Var() model.c = Constraint(rule=rule1) instance = model.create_instance() - with self.assertRaisesRegex(ValueError, "Constraint 'c' is a Ranged " - "Inequality with a variable lower bound"): + with self.assertRaisesRegex( + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): instance.c.lower self.assertIs(instance.c.body, instance.x) - with self.assertRaisesRegex(ValueError, "Constraint 'c' is a Ranged " - "Inequality with a variable upper bound"): + with self.assertRaisesRegex( + ValueError, + "Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): instance.c.upper # @@ -1435,17 +1585,20 @@ def rule1(model): expr = expr == 0.0 expr = expr >= 1.0 return expr + model = AbstractModel() model.x = Var() model.y = Var() model.z = Var() model.o = Constraint(rule=rule1) self.assertRaises(TypeError, model.create_instance) + # def rule1(model): expr = model.U >= model.x expr = expr >= model.L return expr + model = ConcreteModel() model.x = Var() model.L = Param(initialize=0) @@ -1457,37 +1610,44 @@ def rule1(model): expr = model.x <= model.z expr = expr >= model.y return expr + model = AbstractModel() model.x = Var() model.y = Var() model.z = Var() model.o = Constraint(rule=rule1) - #self.assertRaises(ValueError, model.create_instance) + + # self.assertRaises(ValueError, model.create_instance) # def rule1(model): expr = model.x >= model.z expr = model.y >= expr return expr + model = AbstractModel() model.x = Var() model.y = Var() model.z = Var() model.o = Constraint(rule=rule1) - #self.assertRaises(ValueError, model.create_instance) + + # self.assertRaises(ValueError, model.create_instance) # def rule1(model): expr = model.y <= model.x expr = model.y >= expr return expr + model = AbstractModel() model.x = Var() model.y = Var() model.o = Constraint(rule=rule1) - #self.assertRaises(ValueError, model.create_instance) + + # self.assertRaises(ValueError, model.create_instance) # def rule1(model): expr = model.x >= model.L return expr + model = ConcreteModel() model.x = Var() model.L = Param(initialize=0) @@ -1497,6 +1657,7 @@ def rule1(model): def rule1(model): expr = model.U >= model.x return expr + model = ConcreteModel() model.x = Var() model.U = Param(initialize=0) @@ -1504,21 +1665,24 @@ def rule1(model): # def rule1(model): - expr=model.x + expr = model.x expr = expr == 0.0 expr = expr <= 1.0 return expr + model = AbstractModel() model.x = Var() model.y = Var() model.z = Var() model.o = Constraint(rule=rule1) self.assertRaises(TypeError, model.create_instance) + # def rule1(model): expr = model.U <= model.x expr = expr <= model.L return expr + model = ConcreteModel() model.x = Var() model.L = Param(initialize=0) @@ -1530,45 +1694,54 @@ def rule1(model): expr = model.x >= model.z expr = expr <= model.y return expr + model = AbstractModel() model.x = Var() model.y = Var() model.z = Var() model.o = Constraint(rule=rule1) - #self.assertRaises(ValueError, model.create_instance) + + # self.assertRaises(ValueError, model.create_instance) # def rule1(model): expr = model.x <= model.z expr = model.y <= expr return expr + model = AbstractModel() model.x = Var() model.y = Var() model.z = Var() model.o = Constraint(rule=rule1) - #self.assertRaises(ValueError, model.create_instance) + + # self.assertRaises(ValueError, model.create_instance) # def rule1(model): expr = model.x <= model.L return expr + model = ConcreteModel() model.x = Var() model.L = Param(initialize=0) model.o = Constraint(rule=rule1) + # def rule1(model): expr = model.y >= model.x expr = model.y <= expr return expr + model = AbstractModel() model.x = Var() model.y = Var() model.o = Constraint(rule=rule1) - #self.assertRaises(ValueError, model.create_instance) + + # self.assertRaises(ValueError, model.create_instance) # def rule1(model): expr = model.U <= model.x return expr + model = ConcreteModel() model.x = Var() model.U = Param(initialize=0) @@ -1576,7 +1749,8 @@ def rule1(model): # def rule1(model): - return model.x+model.x + return model.x + model.x + model = ConcreteModel() model.x = Var() try: @@ -1612,10 +1786,12 @@ def test_potentially_variable_bounds(self): self.assertIs(m.c.lower, m.l) self.assertIs(m.c.upper, m.u) with self.assertRaisesRegex( - ValueError, 'No value for uninitialized NumericValue object l'): + ValueError, 'No value for uninitialized NumericValue object l' + ): m.c.lb with self.assertRaisesRegex( - ValueError, 'No value for uninitialized NumericValue object u'): + ValueError, 'No value for uninitialized NumericValue object u' + ): m.c.ub m.l = 5 @@ -1627,13 +1803,15 @@ def test_potentially_variable_bounds(self): m.l.expr = m.x with self.assertRaisesRegex( - ValueError, r"Constraint 'c' is a Ranged Inequality " - "with a variable lower bound"): + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): m.c.lower self.assertIs(m.c.upper, m.u) with self.assertRaisesRegex( - ValueError, r"Constraint 'c' is a Ranged Inequality " - "with a variable lower bound"): + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable lower bound", + ): m.c.lb self.assertEqual(m.c.ub, 10) @@ -1641,13 +1819,15 @@ def test_potentially_variable_bounds(self): m.u.expr = m.x self.assertIs(m.c.lower, m.l) with self.assertRaisesRegex( - ValueError, r"Constraint 'c' is a Ranged Inequality " - "with a variable upper bound"): + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): m.c.upper self.assertEqual(m.c.lb, 15) with self.assertRaisesRegex( - ValueError, r"Constraint 'c' is a Ranged Inequality " - "with a variable upper bound"): + ValueError, + r"Constraint 'c' is a Ranged Inequality with a variable upper bound", + ): m.c.ub m.l = -float('inf') @@ -1662,12 +1842,16 @@ def test_potentially_variable_bounds(self): self.assertIs(m.c.lower, m.l) self.assertIs(m.c.upper, m.u) with self.assertRaisesRegex( - ValueError, r"Constraint 'c' created with an invalid " - r"non-finite lower bound \(inf\)"): + ValueError, + r"Constraint 'c' created with an invalid " + r"non-finite lower bound \(inf\)", + ): m.c.lb with self.assertRaisesRegex( - ValueError, r"Constraint 'c' created with an invalid " - r"non-finite upper bound \(-inf\)"): + ValueError, + r"Constraint 'c' created with an invalid " + r"non-finite upper bound \(-inf\)", + ): m.c.ub m.l = float('nan') @@ -1675,12 +1859,16 @@ def test_potentially_variable_bounds(self): self.assertIs(m.c.lower, m.l) self.assertIs(m.c.upper, m.u) with self.assertRaisesRegex( - ValueError, r"Constraint 'c' created with an invalid " - r"non-finite lower bound \(nan\)"): + ValueError, + r"Constraint 'c' created with an invalid " + r"non-finite lower bound \(nan\)", + ): m.c.lb with self.assertRaisesRegex( - ValueError, r"Constraint 'c' created with an invalid " - r"non-finite upper bound \(nan\)"): + ValueError, + r"Constraint 'c' created with an invalid " + r"non-finite upper bound \(nan\)", + ): m.c.ub def test_tuple_expression(self): @@ -1695,20 +1883,25 @@ def test_tuple_expression(self): self.assertIs(type(m.c.expr), EqualityExpression) with self.assertRaisesRegex( - ValueError, "Constraint 'c' does not have a proper value. " - "Equality Constraints expressed as 2-tuples cannot " - "contain None"): + ValueError, + "Constraint 'c' does not have a proper value. " + "Equality Constraints expressed as 2-tuples cannot " + "contain None", + ): m.c = (m.x, None) with self.assertRaisesRegex( - ValueError, r"Constraint 'c' created with an invalid " - r"non-finite lower bound \(inf\)"): + ValueError, + r"Constraint 'c' created with an invalid " + r"non-finite lower bound \(inf\)", + ): m.c = (m.x, float('inf')) with self.assertRaisesRegex( - ValueError, r"Equality constraint 'c' defined with " - "non-finite term"): + ValueError, r"Equality constraint 'c' defined with non-finite term" + ): m.c = EqualityExpression((m.x, None)) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_concrete.py b/pyomo/core/tests/unit/test_concrete.py index 22cd8e518e5..a9bd75f05c7 100644 --- a/pyomo/core/tests/unit/test_concrete.py +++ b/pyomo/core/tests/unit/test_concrete.py @@ -15,7 +15,8 @@ import json import os from os.path import abspath, dirname, join -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest @@ -24,32 +25,44 @@ solvers = check_available_solvers('glpk') + @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") class Test(unittest.TestCase): - def test_blending(self): - """ The blending example from the PuLP documentation """ + """The blending example from the PuLP documentation""" model = ConcreteModel() - model.x1 = Var(bounds=(0,None), doc="ChickenPercent") - model.x2 = Var(bounds=(0,None), doc="BeefPercent") + model.x1 = Var(bounds=(0, None), doc="ChickenPercent") + model.x2 = Var(bounds=(0, None), doc="BeefPercent") - model.obj = Objective(expr=0.013*model.x1 + 0.008*model.x2, doc="Total Cost of Ingredients per can") + model.obj = Objective( + expr=0.013 * model.x1 + 0.008 * model.x2, + doc="Total Cost of Ingredients per can", + ) - model.c0 = Constraint(expr=model.x1+model.x2 == 100.0, doc="Percentage Sum") - model.c1 = Constraint(expr=0.100*model.x1 + 0.200*model.x2 >= 8.0, doc="Protein Requirement") - model.c2 = Constraint(expr=0.080*model.x1 + 0.100*model.x2 >= 6.0, doc="Fat Requirement") - model.c3 = Constraint(expr=0.001*model.x1 + 0.005*model.x2 <= 2.0, doc="Fiber Requirement") - model.c4 = Constraint(expr=0.002*model.x1 + 0.005*model.x2 <= 0.4, doc="Salt Requirement") + model.c0 = Constraint(expr=model.x1 + model.x2 == 100.0, doc="Percentage Sum") + model.c1 = Constraint( + expr=0.100 * model.x1 + 0.200 * model.x2 >= 8.0, doc="Protein Requirement" + ) + model.c2 = Constraint( + expr=0.080 * model.x1 + 0.100 * model.x2 >= 6.0, doc="Fat Requirement" + ) + model.c3 = Constraint( + expr=0.001 * model.x1 + 0.005 * model.x2 <= 2.0, doc="Fiber Requirement" + ) + model.c4 = Constraint( + expr=0.002 * model.x1 + 0.005 * model.x2 <= 0.4, doc="Salt Requirement" + ) opt = SolverFactory('glpk') results = opt.solve(model) model.solutions.store_to(results) results.write(filename=join(currdir, "blend.out"), format='json') - with open(join(currdir,"blend.out"), 'r') as out, \ - open(join(currdir,"blend.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-2, - allow_second_superset=True) + with open(join(currdir, "blend.out"), 'r') as out, open( + join(currdir, "blend.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-2, allow_second_superset=True + ) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_connector.py b/pyomo/core/tests/unit/test_connector.py index bae6d3a1bad..1dde9f3af24 100644 --- a/pyomo/core/tests/unit/test_connector.py +++ b/pyomo/core/tests/unit/test_connector.py @@ -17,15 +17,28 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from io import StringIO -from pyomo.environ import ConcreteModel, AbstractModel, Connector, Var, NonNegativeReals, Set, Constraint, TransformationFactory, Binary, Reals, VarList +from pyomo.environ import ( + ConcreteModel, + AbstractModel, + Connector, + Var, + NonNegativeReals, + Set, + Constraint, + TransformationFactory, + Binary, + Reals, + VarList, +) -class TestConnector(unittest.TestCase): +class TestConnector(unittest.TestCase): def test_default_scalar_constructor(self): model = ConcreteModel() model.c = Connector() @@ -37,7 +50,7 @@ def test_default_scalar_constructor(self): self.assertEqual(len(model.c), 0) # FIXME: Not sure I like this behavior: but since this is # (currently) an attribute, there is no way to check for - # construction withough converting it to a property. + # construction without converting it to a property. # # TODO: if we move away from multiple inheritance for # simplevars, then this can trigger an exception (cleanly) @@ -49,12 +62,12 @@ def test_default_scalar_constructor(self): def test_default_indexed_constructor(self): model = ConcreteModel() - model.c = Connector([1,2,3]) + model.c = Connector([1, 2, 3]) self.assertEqual(len(model.c), 3) self.assertEqual(len(model.c[1].vars), 0) model = AbstractModel() - model.c = Connector([1,2,3]) + model.c = Connector([1, 2, 3]) self.assertEqual(len(model.c), 0) self.assertRaises(ValueError, model.c.__getitem__, 1) @@ -65,9 +78,9 @@ def test_default_indexed_constructor(self): def test_add_scalar_vars(self): pipe = ConcreteModel() pipe.flow = Var() - pipe.pIn = Var( within=NonNegativeReals ) - pipe.pOut = Var( within=NonNegativeReals ) - + pipe.pIn = Var(within=NonNegativeReals) + pipe.pOut = Var(within=NonNegativeReals) + pipe.OUT = Connector() pipe.OUT.add(pipe.flow, "flow") pipe.OUT.add(pipe.pOut, "pressure") @@ -81,13 +94,13 @@ def test_add_scalar_vars(self): self.assertEqual(len(pipe.IN), 1) self.assertEqual(len(pipe.IN.vars), 2) self.assertTrue(pipe.IN.vars['flow'].is_expression_type()) - + def test_add_indexed_vars(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Connector() pipe.OUT.add(pipe.flow, "flow") @@ -97,90 +110,88 @@ def test_add_indexed_vars(self): self.assertEqual(len(pipe.OUT), 1) self.assertEqual(len(pipe.OUT.vars), 3) - def test_fixed(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Connector() - self.assertTrue( pipe.OUT.is_fixed()) + self.assertTrue(pipe.OUT.is_fixed()) pipe.OUT.add(pipe.flow, "flow") - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.flow.fix(0) - self.assertTrue( pipe.OUT.is_fixed()) + self.assertTrue(pipe.OUT.is_fixed()) pipe.OUT.add(-pipe.pIn, "pressure") - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.pIn.fix(1) - self.assertTrue( pipe.OUT.is_fixed()) + self.assertTrue(pipe.OUT.is_fixed()) pipe.OUT.add(pipe.composition, "composition") - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.composition['a'].fix(1) - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.composition['b'].fix(1) pipe.composition['c'].fix(1) - self.assertTrue( pipe.OUT.is_fixed()) - + self.assertTrue(pipe.OUT.is_fixed()) def test_polynomial_degree(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Connector() - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.OUT.add(pipe.flow, "flow") - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.flow.fix(0) - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.OUT.add(-pipe.pIn, "pressure") - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.pIn.fix(1) - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.OUT.add(pipe.composition, "composition") - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.composition['a'].fix(1) - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.composition['b'].fix(1) pipe.composition['c'].fix(1) - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) - pipe.OUT.add(pipe.flow*pipe.pIn, "quadratic") - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + pipe.OUT.add(pipe.flow * pipe.pIn, "quadratic") + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.flow.unfix() - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.pIn.unfix() - self.assertEqual( pipe.OUT.polynomial_degree(), 2) + self.assertEqual(pipe.OUT.polynomial_degree(), 2) - pipe.OUT.add(pipe.flow/pipe.pIn, "nonLin") - self.assertEqual( pipe.OUT.polynomial_degree(), None) + pipe.OUT.add(pipe.flow / pipe.pIn, "nonLin") + self.assertEqual(pipe.OUT.polynomial_degree(), None) def test_pprint(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Connector() pipe.OUT.add(-pipe.flow, "flow") @@ -190,24 +201,26 @@ def test_pprint(self): os = StringIO() pipe.OUT.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""OUT : Size=1, Index=None + self.assertEqual( + os.getvalue(), + """OUT : Size=1, Index=None Key : Name : Size : Variable None : comp_a : 1 : composition[a] : composition : 3 : composition : flow : 1 : - flow : pressure : 1 : pIn -""") +""", + ) def _IN(m, i): - return { 'pressure': pipe.pIn, - 'flow': pipe.composition[i] * pipe.flow } + return {'pressure': pipe.pIn, 'flow': pipe.composition[i] * pipe.flow} pipe.IN = Connector(pipe.SPECIES, rule=_IN) os = StringIO() pipe.IN.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""IN : Size=3, Index=SPECIES + self.assertEqual( + os.getvalue(), + """IN : Size=3, Index=SPECIES Key : Name : Size : Variable a : flow : 1 : composition[a]*flow : pressure : 1 : pIn @@ -215,15 +228,15 @@ def _IN(m, i): : pressure : 1 : pIn c : flow : 1 : composition[c]*flow : pressure : 1 : pIn -""") - +""", + ) + def test_display(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var(initialize=10) - pipe.composition = Var( pipe.SPECIES, - initialize=lambda m,i: ord(i)-ord('a') ) - pipe.pIn = Var( within=NonNegativeReals, initialize=3.14 ) + pipe.composition = Var(pipe.SPECIES, initialize=lambda m, i: ord(i) - ord('a')) + pipe.pIn = Var(within=NonNegativeReals, initialize=3.14) pipe.OUT = Connector() pipe.OUT.add(-pipe.flow, "flow") @@ -232,23 +245,25 @@ def test_display(self): os = StringIO() pipe.OUT.display(ostream=os) - self.assertEqual(os.getvalue(), -"""OUT : Size=1 + self.assertEqual( + os.getvalue(), + """OUT : Size=1 Key : Name : Value None : composition : {'a': 0, 'b': 1, 'c': 2} : flow : -10 : pressure : 3.14 -""") +""", + ) def _IN(m, i): - return { 'pressure': pipe.pIn, - 'flow': pipe.composition[i] * pipe.flow } + return {'pressure': pipe.pIn, 'flow': pipe.composition[i] * pipe.flow} pipe.IN = Connector(pipe.SPECIES, rule=_IN) os = StringIO() pipe.IN.display(ostream=os) - self.assertEqual(os.getvalue(), -"""IN : Size=3 + self.assertEqual( + os.getvalue(), + """IN : Size=3 Key : Name : Value a : flow : 0 : pressure : 3.14 @@ -256,7 +271,8 @@ def _IN(m, i): : pressure : 3.14 c : flow : 20 : pressure : 3.14 -""") +""", + ) def test_expand_single_scalar(self): m = ConcreteModel() @@ -267,8 +283,8 @@ def test_expand_single_scalar(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == 1) - m.nocon = Constraint(expr = m.x == 2) + m.c = Constraint(expr=m.CON == 1) + m.nocon = Constraint(expr=m.x == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) @@ -283,12 +299,13 @@ def test_expand_single_scalar(self): os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=1, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=1, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 1.0 : x : 1.0 : True -""") - +""", + ) def test_expand_scalar(self): m = ConcreteModel() @@ -301,8 +318,8 @@ def test_expand_scalar(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == 1) - m.nocon = Constraint(expr = m.x == 2) + m.c = Constraint(expr=m.CON == 1) + m.nocon = Constraint(expr=m.x == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) @@ -317,13 +334,14 @@ def test_expand_scalar(self): os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=2, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=2, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 1.0 : x : 1.0 : True 2 : 1.0 : y : 1.0 : True -""") - +""", + ) def test_expand_expression(self): m = ConcreteModel() @@ -336,8 +354,8 @@ def test_expand_expression(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == 1) - m.nocon = Constraint(expr = m.x == 2) + m.c = Constraint(expr=m.CON == 1) + m.nocon = Constraint(expr=m.x == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) @@ -352,17 +370,18 @@ def test_expand_expression(self): os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=2, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=2, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 1.0 : - x : 1.0 : True 2 : 1.0 : 1 + y : 1.0 : True -""") - +""", + ) def test_expand_indexed(self): m = ConcreteModel() - m.x = Var([1,2]) + m.x = Var([1, 2]) m.y = Var() m.CON = Connector() m.CON.add(m.x) @@ -371,8 +390,8 @@ def test_expand_indexed(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == 1) - m.nocon = Constraint(expr = m.x[1] == 2) + m.c = Constraint(expr=m.CON == 1) + m.nocon = Constraint(expr=m.x[1] == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) @@ -387,18 +406,19 @@ def test_expand_indexed(self): os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=3, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=3, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 1.0 : x[1] : 1.0 : True 2 : 1.0 : x[2] : 1.0 : True 3 : 1.0 : y : 1.0 : True -""") - +""", + ) def test_expand_empty_scalar(self): m = ConcreteModel() - m.x = Var(bounds=(1,3)) + m.x = Var(bounds=(1, 3)) m.y = Var(domain=Binary) m.CON = Connector() m.CON.add(m.x) @@ -408,8 +428,8 @@ def test_expand_empty_scalar(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == m.ECON) - m.nocon = Constraint(expr = m.x == 2) + m.c = Constraint(expr=m.CON == m.ECON) + m.nocon = Constraint(expr=m.x == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) @@ -422,20 +442,21 @@ def test_expand_empty_scalar(self): self.assertFalse(m.c.active) self.assertTrue(m.component('c.expanded').active) - self.assertIs( m.x.domain, m.component('ECON.auto.x').domain ) - self.assertIs( m.y.domain, m.component('ECON.auto.y').domain ) - self.assertEqual( m.x.bounds, m.component('ECON.auto.x').bounds ) - self.assertEqual( m.y.bounds, m.component('ECON.auto.y').bounds ) + self.assertIs(m.x.domain, m.component('ECON.auto.x').domain) + self.assertIs(m.y.domain, m.component('ECON.auto.y').domain) + self.assertEqual(m.x.bounds, m.component('ECON.auto.x').bounds) + self.assertEqual(m.y.bounds, m.component('ECON.auto.y').bounds) os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=2, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=2, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : x - 'ECON.auto.x' : 0.0 : True 2 : 0.0 : y - 'ECON.auto.y' : 0.0 : True -""") - +""", + ) def test_expand_empty_expression(self): m = ConcreteModel() @@ -449,8 +470,8 @@ def test_expand_empty_expression(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == m.ECON) - m.nocon = Constraint(expr = m.x == 2) + m.c = Constraint(expr=m.CON == m.ECON) + m.nocon = Constraint(expr=m.x == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) @@ -465,18 +486,19 @@ def test_expand_empty_expression(self): os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=2, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=2, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : - x - 'ECON.auto.x' : 0.0 : True 2 : 0.0 : 1 + y - 'ECON.auto.y' : 0.0 : True -""") - +""", + ) def test_expand_empty_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.CON = Connector() m.CON.add(m.x) m.CON.add(m.y) @@ -485,14 +507,14 @@ def test_expand_empty_indexed(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == m.ECON) - m.nocon = Constraint(expr = m.x[1] == 2) + m.c = Constraint(expr=m.CON == m.ECON) + m.nocon = Constraint(expr=m.x[1] == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) TransformationFactory('core.expand_connectors').apply_to(m) - #m.pprint() + # m.pprint() self.assertEqual(len(list(m.component_objects(Constraint))), 3) self.assertEqual(len(list(m.component_data_objects(Constraint))), 5) @@ -500,27 +522,29 @@ def test_expand_empty_indexed(self): self.assertFalse(m.c.active) self.assertTrue(m.component('c.expanded').active) - self.assertIs( m.x[1].domain, m.component('ECON.auto.x')[1].domain ) - self.assertIs( m.x[2].domain, m.component('ECON.auto.x')[2].domain ) - self.assertIs( m.y.domain, m.component('ECON.auto.y').domain ) - self.assertEqual( m.x[1].bounds, m.component('ECON.auto.x')[1].bounds ) - self.assertEqual( m.x[2].bounds, m.component('ECON.auto.x')[2].bounds ) - self.assertEqual( m.y.bounds, m.component('ECON.auto.y').bounds ) + self.assertIs(m.x[1].domain, m.component('ECON.auto.x')[1].domain) + self.assertIs(m.x[2].domain, m.component('ECON.auto.x')[2].domain) + self.assertIs(m.y.domain, m.component('ECON.auto.y').domain) + self.assertEqual(m.x[1].bounds, m.component('ECON.auto.x')[1].bounds) + self.assertEqual(m.x[2].bounds, m.component('ECON.auto.x')[2].bounds) + self.assertEqual(m.y.bounds, m.component('ECON.auto.y').bounds) os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=3, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=3, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : x[1] - 'ECON.auto.x'[1] : 0.0 : True 2 : 0.0 : x[2] - 'ECON.auto.x'[2] : 0.0 : True 3 : 0.0 : y - 'ECON.auto.y' : 0.0 : True -""") +""", + ) def test_expand_multiple_empty_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.CON = Connector() m.CON.add(m.x) m.CON.add(m.y) @@ -530,15 +554,15 @@ def test_expand_multiple_empty_indexed(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == m.ECON1) - m.d = Constraint(expr= m.ECON2 == m.ECON1) - m.nocon = Constraint(expr = m.x[1] == 2) + m.c = Constraint(expr=m.CON == m.ECON1) + m.d = Constraint(expr=m.ECON2 == m.ECON1) + m.nocon = Constraint(expr=m.x[1] == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 3) self.assertEqual(len(list(m.component_data_objects(Constraint))), 3) TransformationFactory('core.expand_connectors').apply_to(m) - #m.pprint() + # m.pprint() self.assertEqual(len(list(m.component_objects(Constraint))), 5) self.assertEqual(len(list(m.component_data_objects(Constraint))), 9) @@ -548,71 +572,74 @@ def test_expand_multiple_empty_indexed(self): self.assertFalse(m.d.active) self.assertTrue(m.component('d.expanded').active) - self.assertIs( m.x[1].domain, m.component('ECON1.auto.x')[1].domain ) - self.assertIs( m.x[2].domain, m.component('ECON1.auto.x')[2].domain ) - self.assertIs( m.y.domain, m.component('ECON1.auto.y').domain ) - self.assertEqual( m.x[1].bounds, m.component('ECON1.auto.x')[1].bounds ) - self.assertEqual( m.x[2].bounds, m.component('ECON1.auto.x')[2].bounds ) - self.assertEqual( m.y.bounds, m.component('ECON1.auto.y').bounds ) + self.assertIs(m.x[1].domain, m.component('ECON1.auto.x')[1].domain) + self.assertIs(m.x[2].domain, m.component('ECON1.auto.x')[2].domain) + self.assertIs(m.y.domain, m.component('ECON1.auto.y').domain) + self.assertEqual(m.x[1].bounds, m.component('ECON1.auto.x')[1].bounds) + self.assertEqual(m.x[2].bounds, m.component('ECON1.auto.x')[2].bounds) + self.assertEqual(m.y.bounds, m.component('ECON1.auto.y').bounds) - self.assertIs( m.x[1].domain, m.component('ECON2.auto.x')[1].domain ) - self.assertIs( m.x[2].domain, m.component('ECON2.auto.x')[2].domain ) - self.assertIs( m.y.domain, m.component('ECON2.auto.y').domain ) - self.assertEqual( m.x[1].bounds, m.component('ECON2.auto.x')[1].bounds ) - self.assertEqual( m.x[2].bounds, m.component('ECON2.auto.x')[2].bounds ) - self.assertEqual( m.y.bounds, m.component('ECON2.auto.y').bounds ) + self.assertIs(m.x[1].domain, m.component('ECON2.auto.x')[1].domain) + self.assertIs(m.x[2].domain, m.component('ECON2.auto.x')[2].domain) + self.assertIs(m.y.domain, m.component('ECON2.auto.y').domain) + self.assertEqual(m.x[1].bounds, m.component('ECON2.auto.x')[1].bounds) + self.assertEqual(m.x[2].bounds, m.component('ECON2.auto.x')[2].bounds) + self.assertEqual(m.y.bounds, m.component('ECON2.auto.y').bounds) os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=3, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=3, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : x[1] - 'ECON1.auto.x'[1] : 0.0 : True 2 : 0.0 : x[2] - 'ECON1.auto.x'[2] : 0.0 : True 3 : 0.0 : y - 'ECON1.auto.y' : 0.0 : True -""") +""", + ) os = StringIO() m.component('d.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""d.expanded : Size=3, Index='d.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """d.expanded : Size=3, Index='d.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : 'ECON2.auto.x'[1] - 'ECON1.auto.x'[1] : 0.0 : True 2 : 0.0 : 'ECON2.auto.x'[2] - 'ECON1.auto.x'[2] : 0.0 : True 3 : 0.0 : 'ECON2.auto.y' - 'ECON1.auto.y' : 0.0 : True -""") - +""", + ) def test_expand_multiple_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.CON = Connector() m.CON.add(m.x) m.CON.add(m.y) - m.a1 = Var([1,2]) - m.a2 = Var([1,2]) + m.a1 = Var([1, 2]) + m.a2 = Var([1, 2]) m.b1 = Var() m.b2 = Var() m.ECON2 = Connector() - m.ECON2.add(m.a1,'x') - m.ECON2.add(m.b1,'y') + m.ECON2.add(m.a1, 'x') + m.ECON2.add(m.b1, 'y') m.ECON1 = Connector() - m.ECON1.add(m.a2,'x') - m.ECON1.add(m.b2,'y') + m.ECON1.add(m.a2, 'x') + m.ECON1.add(m.b2, 'y') # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == m.ECON1) - m.d = Constraint(expr= m.ECON2 == m.ECON1) - m.nocon = Constraint(expr = m.x[1] == 2) + m.c = Constraint(expr=m.CON == m.ECON1) + m.d = Constraint(expr=m.ECON2 == m.ECON1) + m.nocon = Constraint(expr=m.x[1] == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 3) self.assertEqual(len(list(m.component_data_objects(Constraint))), 3) TransformationFactory('core.expand_connectors').apply_to(m) - #m.pprint() + # m.pprint() self.assertEqual(len(list(m.component_objects(Constraint))), 5) self.assertEqual(len(list(m.component_data_objects(Constraint))), 9) @@ -624,69 +651,76 @@ def test_expand_multiple_indexed(self): os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=3, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=3, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : x[1] - a2[1] : 0.0 : True 2 : 0.0 : x[2] - a2[2] : 0.0 : True 3 : 0.0 : y - b2 : 0.0 : True -""") +""", + ) os = StringIO() m.component('d.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""d.expanded : Size=3, Index='d.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """d.expanded : Size=3, Index='d.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : a1[1] - a2[1] : 0.0 : True 2 : 0.0 : a1[2] - a2[2] : 0.0 : True 3 : 0.0 : b1 - b2 : 0.0 : True -""") - +""", + ) def test_expand_implicit_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.CON = Connector() m.CON.add(m.x) m.CON.add(m.y) - m.a2 = Var([1,2]) + m.a2 = Var([1, 2]) m.b1 = Var() m.ECON2 = Connector(implicit=['x']) - m.ECON2.add(m.b1,'y') + m.ECON2.add(m.b1, 'y') m.ECON1 = Connector(implicit=['y']) - m.ECON1.add(m.a2,'x') + m.ECON1.add(m.a2, 'x') # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == m.ECON1) - m.d = Constraint(expr= m.ECON2 == m.CON) - m.nocon = Constraint(expr = m.x[1] == 2) + m.c = Constraint(expr=m.CON == m.ECON1) + m.d = Constraint(expr=m.ECON2 == m.CON) + m.nocon = Constraint(expr=m.x[1] == 2) self.assertEqual(len(list(m.component_objects(Constraint))), 3) self.assertEqual(len(list(m.component_data_objects(Constraint))), 3) os = StringIO() m.ECON1.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""ECON1 : Size=1, Index=None + self.assertEqual( + os.getvalue(), + """ECON1 : Size=1, Index=None Key : Name : Size : Variable None : x : 2 : a2 : y : - : None -""") +""", + ) TransformationFactory('core.expand_connectors').apply_to(m) - #m.pprint() + # m.pprint() os = StringIO() m.ECON1.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""ECON1 : Size=1, Index=None + self.assertEqual( + os.getvalue(), + """ECON1 : Size=1, Index=None Key : Name : Size : Variable None : x : 2 : a2 : y : 1 : 'ECON1.auto.y' -""") +""", + ) self.assertEqual(len(list(m.component_objects(Constraint))), 5) self.assertEqual(len(list(m.component_data_objects(Constraint))), 9) @@ -698,32 +732,34 @@ def test_expand_implicit_indexed(self): os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=3, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=3, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : x[1] - a2[1] : 0.0 : True 2 : 0.0 : x[2] - a2[2] : 0.0 : True 3 : 0.0 : y - 'ECON1.auto.y' : 0.0 : True -""") +""", + ) os = StringIO() m.component('d.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""d.expanded : Size=3, Index='d.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """d.expanded : Size=3, Index='d.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : 'ECON2.auto.x'[1] - x[1] : 0.0 : True 2 : 0.0 : 'ECON2.auto.x'[2] - x[2] : 0.0 : True 3 : 0.0 : b1 - y : 0.0 : True -""") - +""", + ) def test_varlist_aggregator(self): m = ConcreteModel() m.flow = VarList() - m.phase = Var(bounds=(1,3)) + m.phase = Var(bounds=(1, 3)) m.CON = Connector() - m.CON.add( m.flow, - aggregate=lambda m,v: sum(v[i] for i in v) == 0 ) + m.CON.add(m.flow, aggregate=lambda m, v: sum(v[i] for i in v) == 0) m.CON.add(m.phase) m.ECON2 = Connector() m.ECON1 = Connector() @@ -731,14 +767,14 @@ def test_varlist_aggregator(self): # 2 constraints: one has a connector, the other doesn't. The # former should be deactivated and expanded, the latter should # be left untouched. - m.c = Constraint(expr= m.CON == m.ECON1) - m.d = Constraint(expr= m.ECON2 == m.CON) + m.c = Constraint(expr=m.CON == m.ECON1) + m.d = Constraint(expr=m.ECON2 == m.CON) self.assertEqual(len(list(m.component_objects(Constraint))), 2) self.assertEqual(len(list(m.component_data_objects(Constraint))), 2) TransformationFactory('core.expand_connectors').apply_to(m) - #m.pprint() + # m.pprint() self.assertEqual(len(list(m.component_objects(Constraint))), 5) self.assertEqual(len(list(m.component_data_objects(Constraint))), 7) @@ -747,49 +783,56 @@ def test_varlist_aggregator(self): self.assertFalse(m.d.active) self.assertTrue(m.component('d.expanded').active) - self.assertEqual( len(m.flow), 2 ) + self.assertEqual(len(m.flow), 2) os = StringIO() m.component('c.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c.expanded : Size=2, Index='c.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """c.expanded : Size=2, Index='c.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : flow[1] - 'ECON1.auto.flow' : 0.0 : True 2 : 0.0 : phase - 'ECON1.auto.phase' : 0.0 : True -""") +""", + ) os = StringIO() m.component('d.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""d.expanded : Size=2, Index='d.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """d.expanded : Size=2, Index='d.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : 'ECON2.auto.flow' - flow[2] : 0.0 : True 2 : 0.0 : 'ECON2.auto.phase' - phase : 0.0 : True -""") +""", + ) os = StringIO() m.component('CON.flow.aggregate').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""CON.flow.aggregate : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """CON.flow.aggregate : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active None : 0.0 : flow[1] + flow[2] : 0.0 : True -""") +""", + ) os = StringIO() m.CON.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""CON : Size=1, Index=None + self.assertEqual( + os.getvalue(), + """CON : Size=1, Index=None Key : Name : Size : Variable None : flow : * : flow : phase : 1 : phase -""") - +""", + ) def test_indexed_connector(self): m = ConcreteModel() m.x = Var(initialize=1, domain=Reals) m.y = Var(initialize=2, domain=Reals) - m.c = Connector([1,2]) + m.c = Connector([1, 2]) m.c[1].add(m.x, name='v') m.c[2].add(m.y, name='v') @@ -799,11 +842,13 @@ def test_indexed_connector(self): os = StringIO() m.component('eq.expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""eq.expanded : Size=1, Index='eq.expanded_index', Active=True + self.assertEqual( + os.getvalue(), + """eq.expanded : Size=1, Index='eq.expanded_index', Active=True Key : Lower : Body : Upper : Active 1 : 0.0 : x - y : 0.0 : True -""") +""", + ) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_deprecation.py b/pyomo/core/tests/unit/test_deprecation.py index 1b629d20234..9adf2de26cd 100644 --- a/pyomo/core/tests/unit/test_deprecation.py +++ b/pyomo/core/tests/unit/test_deprecation.py @@ -17,11 +17,13 @@ from pyomo.common.log import LoggingIntercept + def force_load(module): if module in sys.modules: del sys.modules[module] return import_module(module) + class TestDeprecatedModules(unittest.TestCase): def test_rangeset(self): log = StringIO() @@ -32,15 +34,18 @@ def test_rangeset(self): log = StringIO() with LoggingIntercept(log, 'pyomo'): rs = force_load('pyomo.core.base.rangeset') - self.assertIn("The pyomo.core.base.rangeset module is deprecated.", - log.getvalue().strip().replace('\n',' ')) + self.assertIn( + "The pyomo.core.base.rangeset module is deprecated.", + log.getvalue().strip().replace('\n', ' '), + ) self.assertIs(RangeSet, rs.RangeSet) # Run this twice to implicitly test the force_load() implementation log = StringIO() with LoggingIntercept(log, 'pyomo'): rs = force_load('pyomo.core.base.rangeset') - self.assertIn("The pyomo.core.base.rangeset module is deprecated.", - log.getvalue().strip().replace('\n',' ')) + self.assertIn( + "The pyomo.core.base.rangeset module is deprecated.", + log.getvalue().strip().replace('\n', ' '), + ) self.assertIs(RangeSet, rs.RangeSet) - diff --git a/pyomo/core/tests/unit/test_derivs.py b/pyomo/core/tests/unit/test_derivs.py index 74d117068fd..9e89f2beac9 100644 --- a/pyomo/core/tests/unit/test_derivs.py +++ b/pyomo/core/tests/unit/test_derivs.py @@ -11,13 +11,15 @@ import pyomo.common.unittest as unittest import pyomo.environ as pyo -from pyomo.common.getGSL import find_GSL -from pyomo.core.expr.calculus.derivatives import differentiate, Modes +from pyomo.common.gsl import find_GSL +from pyomo.core.expr.calculus.derivatives import differentiate from pyomo.core.expr.calculus.diff_with_pyomo import ( - reverse_ad, reverse_sd, DifferentiationException, + reverse_ad, + reverse_sd, + DifferentiationException, ) from pyomo.core.expr.numeric_expr import LinearExpression -from pyomo.core.expr.compare import compare_expressions +from pyomo.core.expr.compare import compare_expressions, assertExpressionsEqual from pyomo.core.expr.sympy_tools import sympy_available tol = 6 @@ -25,16 +27,16 @@ def approx_deriv(expr, wrt, delta=0.001): numerator = 0 - wrt.value += 2*delta + wrt.value += 2 * delta numerator -= pyo.value(expr) wrt.value -= delta - numerator += 8*pyo.value(expr) - wrt.value -= 2*delta - numerator -= 8*pyo.value(expr) + numerator += 8 * pyo.value(expr) + wrt.value -= 2 * delta + numerator -= 8 * pyo.value(expr) wrt.value -= delta numerator += pyo.value(expr) - wrt.value += 2*delta - return numerator / (12*delta) + wrt.value += 2 * delta + return numerator / (12 * delta) class TestDerivs(unittest.TestCase): @@ -45,8 +47,8 @@ def test_prod(self): e = m.x * m.y derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) - self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) @@ -54,11 +56,11 @@ def test_sum(self): m = pyo.ConcreteModel() m.x = pyo.Var(initialize=2.0) m.y = pyo.Var(initialize=3.0) - e = 2.0*m.x + 3.0*m.y - m.x*m.y + e = 2.0 * m.x + 3.0 * m.y - m.x * m.y derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) - self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) @@ -69,8 +71,8 @@ def test_div(self): e = m.x / m.y derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) - self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) @@ -78,11 +80,11 @@ def test_pow(self): m = pyo.ConcreteModel() m.x = pyo.Var(initialize=2.0) m.y = pyo.Var(initialize=3.0) - e = m.x ** m.y + e = m.x**m.y derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) - self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) @@ -93,7 +95,7 @@ def test_sqrt(self): e = pyo.sqrt(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_exp(self): @@ -102,7 +104,7 @@ def test_exp(self): e = pyo.exp(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_log(self): @@ -111,7 +113,7 @@ def test_log(self): e = pyo.log(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_log10(self): @@ -120,7 +122,7 @@ def test_log10(self): e = pyo.log10(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_sin(self): @@ -129,7 +131,7 @@ def test_sin(self): e = pyo.sin(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_cos(self): @@ -138,7 +140,7 @@ def test_cos(self): e = pyo.cos(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_tan(self): @@ -147,7 +149,7 @@ def test_tan(self): e = pyo.tan(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_asin(self): @@ -156,7 +158,7 @@ def test_asin(self): e = pyo.asin(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_acos(self): @@ -165,7 +167,7 @@ def test_acos(self): e = pyo.acos(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_atan(self): @@ -174,7 +176,7 @@ def test_atan(self): e = pyo.atan(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) def test_abs(self): @@ -183,17 +185,17 @@ def test_abs(self): e = 2 * abs(m.x) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) m.x.value = -2 derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) m.x.value = 0 with self.assertRaisesRegex( - DifferentiationException, - r'Cannot differentiate abs\(x\) at x=0'): + DifferentiationException, r'Cannot differentiate abs\(x\) at x=0' + ): reverse_ad(e) def test_nested(self): @@ -201,12 +203,12 @@ def test_nested(self): m.x = pyo.Var(initialize=2) m.y = pyo.Var(initialize=3) m.p = pyo.Param(initialize=0.5, mutable=True) - e = pyo.exp(m.x**m.p + 3.2*m.y - 12) + e = pyo.exp(m.x**m.p + 3.2 * m.y - 12) derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) - self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) - self.assertAlmostEqual(derivs[m.p], pyo.value(symbolic[m.p]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol + 3) + self.assertAlmostEqual(derivs[m.p], pyo.value(symbolic[m.p]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) self.assertAlmostEqual(derivs[m.p], approx_deriv(e, m.p), tol) @@ -221,7 +223,8 @@ def e2(m, i): if i == 1: return m.x + 4 else: - return m.x ** 2 + return m.x**2 + m.o = pyo.Objective(expr=m.e + 1 + m.e2[1] + m.e2[2]) derivs = reverse_ad(m.o.expr) symbolic = reverse_sd(m.o.expr) @@ -233,7 +236,7 @@ def test_multiple_named_expressions(self): m.y = pyo.Var() m.x.value = 1 m.y.value = 1 - m.E = pyo.Expression(expr=m.x*m.y) + m.E = pyo.Expression(expr=m.x * m.y) e = m.E - m.E derivs = reverse_ad(e) self.assertAlmostEqual(derivs[m.x], 0) @@ -251,7 +254,7 @@ def test_external(self): m.hypot = pyo.ExternalFunction(library=DLL, function='gsl_hypot') m.x = pyo.Var(initialize=0.5) m.y = pyo.Var(initialize=1.5) - e = 2 * m.hypot(m.x, m.x*m.y) + e = 2 * m.hypot(m.x, m.x * m.y) derivs = reverse_ad(e) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) @@ -261,7 +264,9 @@ def test_linear_expression(self): m.x = pyo.Var(initialize=2.0) m.y = pyo.Var(initialize=3.0) m.p = pyo.Param(initialize=2.5, mutable=True) - e = LinearExpression(constant=m.p, linear_vars=[m.x, m.y], linear_coefs=[1.8, m.p]) + e = LinearExpression( + constant=m.p, linear_vars=[m.x, m.y], linear_coefs=[1.8, m.p] + ) e = pyo.log(e) derivs = reverse_ad(e) symbolic = reverse_sd(e) @@ -282,30 +287,31 @@ def test_duplicate_expressions(self): m = pyo.ConcreteModel() m.x = pyo.Var(initialize=0.23) m.y = pyo.Var(initialize=0.88) - a = (m.x + 1)**2 - b = 3*(a + m.y) - e = 2*a + 2*b + 2*b + 2*a + a = (m.x + 1) ** 2 + b = 3 * (a + m.y) + e = 2 * a + 2 * b + 2 * b + 2 * a derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) - self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol + 3) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) def test_nested_named_expressions(self): m = pyo.ConcreteModel() m.x = pyo.Var(initialize=0.23) m.y = pyo.Var(initialize=0.88) - m.a = pyo.Expression(expr=(m.x + 1)**2) - m.b = pyo.Expression(expr=3*(m.a + m.y)) - e = 2*m.a + 2*m.b + 2*m.b + 2*m.a + m.a = pyo.Expression(expr=(m.x + 1) ** 2) + m.b = pyo.Expression(expr=3 * (m.a + m.y)) + e = 2 * m.a + 2 * m.b + 2 * m.b + 2 * m.a derivs = reverse_ad(e) symbolic = reverse_sd(e) - self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol+3) + self.assertAlmostEqual(derivs[m.x], pyo.value(symbolic[m.x]), tol + 3) self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol) - self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol+3) + self.assertAlmostEqual(derivs[m.y], pyo.value(symbolic[m.y]), tol + 3) self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol) + class TestDifferentiate(unittest.TestCase): @unittest.skipUnless(sympy_available, "test requires sympy") def test_sympy(self): @@ -313,7 +319,7 @@ def test_sympy(self): m.x = pyo.Var(initialize=0.23) m.y = pyo.Var(initialize=0.88) ddx = differentiate(m.x**2, wrt=m.x, mode='sympy') - self.assertTrue(compare_expressions(ddx, 2*m.x)) + self.assertTrue(compare_expressions(ddx, 2 * m.x)) self.assertAlmostEqual(ddx(), 0.46) ddy = differentiate(m.x**2, wrt=m.y, mode='sympy') self.assertEqual(ddy, 0) @@ -321,7 +327,7 @@ def test_sympy(self): ddx = differentiate(m.x**2, wrt_list=[m.x, m.y], mode='sympy') self.assertIsInstance(ddx, list) self.assertEqual(len(ddx), 2) - self.assertTrue(compare_expressions(ddx[0], 2*m.x)) + self.assertTrue(compare_expressions(ddx[0], 2 * m.x)) self.assertAlmostEqual(ddx[0](), 0.46) self.assertEqual(ddx[1], 0) @@ -330,16 +336,15 @@ def test_reverse_symbolic(self): m.x = pyo.Var(initialize=0.23) m.y = pyo.Var(initialize=0.88) ddx = differentiate(m.x**2, wrt=m.x, mode='reverse_symbolic') - self.assertTrue(compare_expressions(ddx, 2*m.x)) + assertExpressionsEqual(self, ddx, 2 * m.x) self.assertAlmostEqual(ddx(), 0.46) ddy = differentiate(m.x**2, wrt=m.y, mode='reverse_symbolic') self.assertEqual(ddy, 0) - ddx = differentiate(m.x**2, wrt_list=[m.x, m.y], - mode='reverse_symbolic') + ddx = differentiate(m.x**2, wrt_list=[m.x, m.y], mode='reverse_symbolic') self.assertIsInstance(ddx, list) self.assertEqual(len(ddx), 2) - self.assertTrue(compare_expressions(ddx[0], 2*m.x)) + assertExpressionsEqual(self, ddx[0], 2 * m.x) self.assertAlmostEqual(ddx[0](), 0.46) self.assertEqual(ddx[1], 0) @@ -353,8 +358,7 @@ def test_reverse_numeric(self): ddy = differentiate(m.x**2, wrt=m.y, mode='reverse_numeric') self.assertEqual(ddy, 0) - ddx = differentiate(m.x**2, wrt_list=[m.x, m.y], - mode='reverse_numeric') + ddx = differentiate(m.x**2, wrt_list=[m.x, m.y], mode='reverse_numeric') self.assertIsInstance(ddx, list) self.assertEqual(len(ddx), 2) self.assertIsInstance(ddx[0], float) @@ -365,14 +369,17 @@ def test_bad_mode(self): m = pyo.ConcreteModel() m.x = pyo.Var(initialize=0.23) with self.assertRaisesRegex( - ValueError, r'Unrecognized differentiation mode: foo\n' - r"Expected one of \['sympy', 'reverse_symbolic', " - r"'reverse_numeric'\]"): + ValueError, + r'Unrecognized differentiation mode: foo\n' + r"Expected one of \['sympy', 'reverse_symbolic', " + r"'reverse_numeric'\]", + ): ddx = differentiate(m.x**2, m.x, mode='foo') def test_bad_wrt(self): m = pyo.ConcreteModel() m.x = pyo.Var(initialize=0.23) with self.assertRaisesRegex( - ValueError, r'Cannot specify both wrt and wrt_list'): + ValueError, r'Cannot specify both wrt and wrt_list' + ): ddx = differentiate(m.x**2, wrt=m.x, wrt_list=[m.x]) diff --git a/pyomo/core/tests/unit/test_dict_objects.py b/pyomo/core/tests/unit/test_dict_objects.py index 7c9047f6153..7d3244f4d86 100644 --- a/pyomo/core/tests/unit/test_dict_objects.py +++ b/pyomo/core/tests/unit/test_dict_objects.py @@ -10,18 +10,20 @@ # ___________________________________________________________________________ import pyomo.common.unittest as unittest -from pyomo.core.base import (ConcreteModel, Var, Reals) -from pyomo.core.beta.dict_objects import (VarDict, - ConstraintDict, - ObjectiveDict, - ExpressionDict) +from pyomo.core.base import ConcreteModel, Var, Reals +from pyomo.core.beta.dict_objects import ( + VarDict, + ConstraintDict, + ObjectiveDict, + ExpressionDict, +) from pyomo.core.base.var import _GeneralVarData from pyomo.core.base.constraint import _GeneralConstraintData from pyomo.core.base.objective import _GeneralObjectiveData from pyomo.core.base.expression import _GeneralExpressionData -class _TestComponentDictBase(object): +class _TestComponentDictBase(object): _ctype = None _cdatatype = None @@ -43,15 +45,14 @@ def test_init1(self): def test_init2(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) self.assertEqual(model.c.is_indexed(), True) self.assertEqual(model.c.is_constructed(), True) with self.assertRaises(TypeError): - model.d = \ - self._ctype(*tuple((i, self._cdatatype(self._arg())) - for i in index)) + model.d = self._ctype( + *tuple((i, self._cdatatype(self._arg())) for i in index) + ) def test_len1(self): model = self.model @@ -61,16 +62,15 @@ def test_len1(self): def test_len2(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) self.assertEqual(len(model.c), len(index)) def test_setitem(self): model = self.model model = ConcreteModel() model.c = self._ctype() - index = ['a', 1, None, (1,), (1,2)] + index = ['a', 1, None, (1,), (1, 2)] for i in index: self.assertTrue(i not in model.c) for cnt, i in enumerate(index, 1): @@ -84,13 +84,13 @@ def test_setitem(self): # For now just test that implicit assignment raises an exception def test_wrong_type_init(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] + index = ['a', 1, None, (1,), (1, 2)] with self.assertRaises(TypeError): model.c = self._ctype((i, self._arg()) for i in index) def test_wrong_type_update(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] + index = ['a', 1, None, (1,), (1, 2)] model.c = self._ctype() with self.assertRaises(TypeError): model.c.update((i, self._arg()) for i in index) @@ -159,9 +159,8 @@ def test_setitem_exists(self): # by a call to setitem and not simply updated. def test_setitem_exists_overwrite(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) self.assertEqual(len(model.c), len(index)) for i in index: self.assertTrue(i in model.c) @@ -174,25 +173,22 @@ def test_setitem_exists_overwrite(self): def test_delitem(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) self.assertEqual(len(model.c), len(index)) for cnt, i in enumerate(index, 1): self.assertTrue(i in model.c) cdata = model.c[i] - self.assertEqual(id(cdata.parent_component()), - id(model.c)) + self.assertEqual(id(cdata.parent_component()), id(model.c)) del model.c[i] - self.assertEqual(len(model.c), len(index)-cnt) + self.assertEqual(len(model.c), len(index) - cnt) self.assertTrue(i not in model.c) self.assertEqual(cdata.parent_component(), None) def test_iter(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) self.assertEqual(len(model.c), len(index)) comp_index = [i for i in model.c] self.assertEqual(len(comp_index), len(index)) @@ -201,9 +197,8 @@ def test_iter(self): def test_model_clone(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) inst = model.clone() self.assertNotEqual(id(inst.c), id(model.c)) for i in index: @@ -211,67 +206,64 @@ def test_model_clone(self): def test_keys(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - raw_constraint_dict = {i:self._cdatatype(self._arg()) for i in index} + index = ['a', 1, None, (1,), (1, 2)] + raw_constraint_dict = {i: self._cdatatype(self._arg()) for i in index} model.c = self._ctype(raw_constraint_dict) - self.assertEqual(sorted(list(raw_constraint_dict.keys()), key=str), - sorted(list(model.c.keys()), key=str)) + self.assertEqual( + sorted(list(raw_constraint_dict.keys()), key=str), + sorted(list(model.c.keys()), key=str), + ) def test_values(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - raw_constraint_dict = {i:self._cdatatype(self._arg()) for i in index} + index = ['a', 1, None, (1,), (1, 2)] + raw_constraint_dict = {i: self._cdatatype(self._arg()) for i in index} model.c = self._ctype(raw_constraint_dict) self.assertEqual( - sorted(list(id(_v) - for _v in raw_constraint_dict.values()), - key=str), - sorted(list(id(_v) - for _v in model.c.values()), - key=str)) + sorted(list(id(_v) for _v in raw_constraint_dict.values()), key=str), + sorted(list(id(_v) for _v in model.c.values()), key=str), + ) def test_items(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - raw_constraint_dict = {i:self._cdatatype(self._arg()) for i in index} + index = ['a', 1, None, (1,), (1, 2)] + raw_constraint_dict = {i: self._cdatatype(self._arg()) for i in index} model.c = self._ctype(raw_constraint_dict) self.assertEqual( - sorted(list((_i, id(_v)) - for _i,_v in raw_constraint_dict.items()), - key=str), - sorted(list((_i, id(_v)) - for _i,_v in model.c.items()), - key=str)) + sorted( + list((_i, id(_v)) for _i, _v in raw_constraint_dict.items()), key=str + ), + sorted(list((_i, id(_v)) for _i, _v in model.c.items()), key=str), + ) def test_update(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - raw_constraint_dict = {i:self._cdatatype(self._arg()) for i in index} + index = ['a', 1, None, (1,), (1, 2)] + raw_constraint_dict = {i: self._cdatatype(self._arg()) for i in index} model.c = self._ctype() model.c.update(raw_constraint_dict) - self.assertEqual(sorted(list(raw_constraint_dict.keys()), key=str), - sorted(list(model.c.keys()), key=str)) + self.assertEqual( + sorted(list(raw_constraint_dict.keys()), key=str), + sorted(list(model.c.keys()), key=str), + ) def test_name(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) index_to_string = {} index_to_string['a'] = '[a]' index_to_string['a,b'] = "['a,b']" index_to_string[1] = '[1]' index_to_string[None] = '[None]' index_to_string[(1,)] = '[(1,)]' - index_to_string[(1,2)] = '[1,2]' + index_to_string[(1, 2)] = '[1,2]' prefix = "c" for i in index: cdata = model.c[i] - self.assertEqual(cdata.local_name, - cdata.name) + self.assertEqual(cdata.local_name, cdata.name) cname = prefix + index_to_string[i] - self.assertEqual(cdata.local_name, - cname) + self.assertEqual(cdata.local_name, cname) def test_clear(self): model = self.model @@ -311,13 +303,12 @@ def test_eq(self): self.assertTrue(model.c != model.d) self.assertNotEqual(model.c, model.d) -class _TestActiveComponentDictBase(_TestComponentDictBase): +class _TestActiveComponentDictBase(_TestComponentDictBase): def test_activate(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) self.assertEqual(len(model.c), len(index)) self.assertEqual(model.c.active, True) model.c._active = False @@ -331,9 +322,8 @@ def test_activate(self): def test_activate(self): model = self.model - index = ['a', 1, None, (1,), (1,2)] - model.c = self._ctype((i, self._cdatatype(self._arg())) - for i in index) + index = ['a', 1, None, (1,), (1, 2)] + model.c = self._ctype((i, self._cdatatype(self._arg())) for i in index) self.assertEqual(len(model.c), len(index)) self.assertEqual(model.c.active, True) for i in index: @@ -356,44 +346,50 @@ def test_active(self): model.c[1] = self._cdatatype(self._arg()) self.assertEqual(model.c.active, True) -class TestVarDict(_TestComponentDictBase, - unittest.TestCase): + +class TestVarDict(_TestComponentDictBase, unittest.TestCase): # Note: the updated _GeneralVarData class only takes an optional # parent argument (you no longer pass the domain in) _ctype = VarDict _cdatatype = lambda self, arg: _GeneralVarData() + def setUp(self): _TestComponentDictBase.setUp(self) self._arg = lambda: Reals -class TestExpressionDict(_TestComponentDictBase, - unittest.TestCase): + +class TestExpressionDict(_TestComponentDictBase, unittest.TestCase): _ctype = ExpressionDict _cdatatype = _GeneralExpressionData + def setUp(self): _TestComponentDictBase.setUp(self) self._arg = lambda: self.model.x**3 + # # Test components that include activate/deactivate # functionality. # -class TestConstraintDict(_TestActiveComponentDictBase, - unittest.TestCase): + +class TestConstraintDict(_TestActiveComponentDictBase, unittest.TestCase): _ctype = ConstraintDict _cdatatype = _GeneralConstraintData + def setUp(self): _TestComponentDictBase.setUp(self) self._arg = lambda: self.model.x >= 1 -class TestObjectiveDict(_TestActiveComponentDictBase, - unittest.TestCase): + +class TestObjectiveDict(_TestActiveComponentDictBase, unittest.TestCase): _ctype = ObjectiveDict _cdatatype = _GeneralObjectiveData + def setUp(self): _TestComponentDictBase.setUp(self) self._arg = lambda: self.model.x**2 + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_disable_methods.py b/pyomo/core/tests/unit/test_disable_methods.py index 82d90695866..4d6595e5fe8 100644 --- a/pyomo/core/tests/unit/test_disable_methods.py +++ b/pyomo/core/tests/unit/test_disable_methods.py @@ -16,6 +16,7 @@ from pyomo.core.base.disable_methods import disable_methods from pyomo.common.modeling import NOTSET + class LocalClass(object): def __init__(self, name): self._name = name @@ -23,8 +24,10 @@ def __init__(self, name): def __str__(self): return self._name + local_instance = LocalClass('local') + class _simple(object): def __init__(self, name): self.name = name @@ -46,6 +49,7 @@ def c(self): @property def d(self): return self._d + @d.setter def d(self, value='d'): self._d = value @@ -53,6 +57,7 @@ def d(self, value='d'): @property def e(self): return self._e + @e.setter def e(self, value='e'): self._e = value @@ -69,40 +74,68 @@ def h(self): return 'h' -@disable_methods(('a',('b', 'custom_msg'),'d',('e', 'custom_pmsg'),'f', - 'g',('h', 'custom_pmsg'))) +@disable_methods( + ( + 'a', + ('b', 'custom_msg'), + 'd', + ('e', 'custom_pmsg'), + 'f', + 'g', + ('h', 'custom_pmsg'), + ) +) class _abstract_simple(_simple): pass + class TestDisableMethods(unittest.TestCase): def test_signature(self): # check that signatures are properly preserved - self.assertEqual(inspect.signature(_simple.construct), - inspect.signature(_abstract_simple.construct)) - self.assertEqual(inspect.signature(_simple.a), - inspect.signature(_abstract_simple.a)) - self.assertEqual(inspect.signature(_simple.b), - inspect.signature(_abstract_simple.b)) - self.assertEqual(inspect.signature(_simple.c), - inspect.signature(_abstract_simple.c)) - self.assertEqual(inspect.signature(_simple.d.fget), - inspect.signature(_abstract_simple.d.fget)) - self.assertEqual(inspect.signature(_simple.d.fset), - inspect.signature(_abstract_simple.d.fset)) - self.assertEqual(inspect.signature(_simple.e.fget), - inspect.signature(_abstract_simple.e.fget)) - self.assertEqual(inspect.signature(_simple.e.fset), - inspect.signature(_abstract_simple.e.fset)) - - self.assertEqual(inspect.signature(_simple.f), - inspect.signature(_abstract_simple.f)) - - self.assertEqual(inspect.signature(_simple.g.fget), - inspect.signature(_abstract_simple.g.fget)) + self.assertEqual( + inspect.signature(_simple.construct), + inspect.signature(_abstract_simple.construct), + ) + self.assertEqual( + inspect.signature(_simple.a), inspect.signature(_abstract_simple.a) + ) + self.assertEqual( + inspect.signature(_simple.b), inspect.signature(_abstract_simple.b) + ) + self.assertEqual( + inspect.signature(_simple.c), inspect.signature(_abstract_simple.c) + ) + self.assertEqual( + inspect.signature(_simple.d.fget), + inspect.signature(_abstract_simple.d.fget), + ) + self.assertEqual( + inspect.signature(_simple.d.fset), + inspect.signature(_abstract_simple.d.fset), + ) + self.assertEqual( + inspect.signature(_simple.e.fget), + inspect.signature(_abstract_simple.e.fget), + ) + self.assertEqual( + inspect.signature(_simple.e.fset), + inspect.signature(_abstract_simple.e.fset), + ) + + self.assertEqual( + inspect.signature(_simple.f), inspect.signature(_abstract_simple.f) + ) + + self.assertEqual( + inspect.signature(_simple.g.fget), + inspect.signature(_abstract_simple.g.fget), + ) self.assertIsNone(_simple.g.fset) self.assertIsNone(_abstract_simple.g.fset) - self.assertEqual(inspect.signature(_simple.h.fget), - inspect.signature(_abstract_simple.h.fget)) + self.assertEqual( + inspect.signature(_simple.h.fget), + inspect.signature(_abstract_simple.h.fget), + ) self.assertIsNone(_simple.h.fset) self.assertIsNone(_abstract_simple.h.fset) @@ -111,52 +144,73 @@ def test_disable(self): self.assertIs(type(x), _abstract_simple) self.assertIsInstance(x, _simple) with self.assertRaisesRegex( - RuntimeError, "Cannot access 'a' on _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot access 'a' on _abstract_simple " + "'foo' before it has been constructed", + ): x.a() with self.assertRaisesRegex( - RuntimeError, "Cannot custom_msg _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot custom_msg _abstract_simple " + "'foo' before it has been constructed", + ): x.b() self.assertEqual(x.c(), 'c') with self.assertRaisesRegex( - RuntimeError, "Cannot access property 'd' on _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot access property 'd' on _abstract_simple " + "'foo' before it has been constructed", + ): x.d with self.assertRaisesRegex( - RuntimeError, "Cannot set property 'd' on _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot set property 'd' on _abstract_simple " + "'foo' before it has been constructed", + ): x.d = 1 with self.assertRaisesRegex( - RuntimeError, "Cannot custom_pmsg _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot custom_pmsg _abstract_simple " + "'foo' before it has been constructed", + ): x.e with self.assertRaisesRegex( - RuntimeError, "Cannot custom_pmsg _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot custom_pmsg _abstract_simple " + "'foo' before it has been constructed", + ): x.e = 1 # Verify that the wrapper function enforces the same API as the # wrapped function + with self.assertRaisesRegex(TypeError, r"f\(\) takes "): + x.f(1, 2, 3, 4, 5) with self.assertRaisesRegex( - TypeError, r"f\(\) takes "): - x.f(1,2,3,4,5) - with self.assertRaisesRegex( - RuntimeError, "Cannot access 'f' on _abstract_simple " - "'foo' before it has been constructed"): - x.f(1,2) + RuntimeError, + "Cannot access 'f' on _abstract_simple " + "'foo' before it has been constructed", + ): + x.f(1, 2) with self.assertRaisesRegex( - RuntimeError, "Cannot access property 'g' on _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot access property 'g' on _abstract_simple " + "'foo' before it has been constructed", + ): x.g - with self.assertRaisesRegex(AttributeError, "can't set attribute"): + with self.assertRaisesRegex( + AttributeError, "(can't set attribute)|(object has no setter)" + ): x.g = 1 with self.assertRaisesRegex( - RuntimeError, "Cannot custom_pmsg _abstract_simple " - "'foo' before it has been constructed"): + RuntimeError, + "Cannot custom_pmsg _abstract_simple " + "'foo' before it has been constructed", + ): x.h - with self.assertRaisesRegex(AttributeError, "can't set attribute"): + with self.assertRaisesRegex( + AttributeError, "(can't set attribute)|(object has no setter)" + ): x.h = 1 self.assertEqual(x.construct(), 'construct') @@ -171,15 +225,17 @@ def test_disable(self): self.assertEqual(x.e, 'e') x.e = 2 self.assertEqual(x.e, 2) - self.assertEqual(x.f(1,2), 'f:1,2,NOTSET,local') + self.assertEqual(x.f(1, 2), 'f:1,2,NOTSET,local') self.assertEqual(x.g, 'g') self.assertEqual(x.h, 'h') def test_bad_api(self): with self.assertRaisesRegex( - DeveloperError, r"Cannot disable method not_there on " - r""): + DeveloperError, + r"Cannot disable method not_there on ", + normalize_whitespace=True, + ): - @disable_methods(('a','not_there')) + @disable_methods(('a', 'not_there')) class foo(_simple): pass diff --git a/pyomo/core/tests/unit/test_enums.py b/pyomo/core/tests/unit/test_enums.py new file mode 100644 index 00000000000..8f342e55188 --- /dev/null +++ b/pyomo/core/tests/unit/test_enums.py @@ -0,0 +1,62 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import pyomo.common.unittest as unittest + +from pyomo.core.base.enums import SortComponents + + +class TestSortComponents(unittest.TestCase): + def test_mappings(self): + self.assertEqual( + SortComponents(True), + SortComponents.SORTED_INDICES | SortComponents.ALPHABETICAL, + ) + self.assertEqual(SortComponents(False), SortComponents.UNSORTED) + self.assertEqual(SortComponents(None), SortComponents.UNSORTED) + with self.assertRaisesRegex( + ValueError, r'(999 is not a valid SortComponents)|(invalid value 999)' + ): + SortComponents(999) + # Note that enum maps False to 0 without hitting missing. We + # will explicitly test passing False to missing to cover a + # "future proofing" logic branch + self.assertEqual(SortComponents._missing_(False), SortComponents.UNSORTED) + + def test_sorter(self): + self.assertEqual(SortComponents.sorter(), SortComponents.UNSORTED) + self.assertEqual( + SortComponents.sorter(True, False), SortComponents.ALPHABETICAL + ) + self.assertEqual( + SortComponents.sorter(False, True), SortComponents.SORTED_INDICES + ) + self.assertEqual( + SortComponents.sorter(True, True), + SortComponents.ALPHABETICAL | SortComponents.SORTED_INDICES, + ) + + def test_methods(self): + self.assertEqual(SortComponents.default(), SortComponents.UNSORTED) + + self.assertTrue(SortComponents.sort_names(SortComponents.sorter(True, True))) + self.assertTrue(SortComponents.sort_names(SortComponents.sorter(True, False))) + self.assertFalse(SortComponents.sort_names(SortComponents.sorter(False, True))) + self.assertFalse(SortComponents.sort_names(SortComponents.sorter(False, False))) + + self.assertTrue(SortComponents.sort_indices(SortComponents.sorter(True, True))) + self.assertFalse( + SortComponents.sort_indices(SortComponents.sorter(True, False)) + ) + self.assertTrue(SortComponents.sort_indices(SortComponents.sorter(False, True))) + self.assertFalse( + SortComponents.sort_indices(SortComponents.sorter(False, False)) + ) diff --git a/pyomo/core/tests/unit/test_expr_misc.py b/pyomo/core/tests/unit/test_expr_misc.py index 82848f060ab..4ec53521d6b 100644 --- a/pyomo/core/tests/unit/test_expr_misc.py +++ b/pyomo/core/tests/unit/test_expr_misc.py @@ -14,140 +14,182 @@ import os from os.path import abspath, dirname, join -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep from filecmp import cmp import pyomo.common.unittest as unittest -from pyomo.environ import AbstractModel, ConcreteModel, ConstraintList, Set, Param, Var, Constraint, Objective, sum_product, quicksum, sequence, prod +from pyomo.environ import ( + AbstractModel, + ConcreteModel, + ConstraintList, + Set, + Param, + Var, + Constraint, + Objective, + sum_product, + quicksum, + sequence, + prod, +) +from pyomo.core.expr.compare import assertExpressionsEqual + def obj_rule(model): return sum(model.x[a] + model.y[a] for a in model.A) -def constr_rule(model,a): + + +def constr_rule(model, a): return model.x[a] >= model.y[a] class Test(unittest.TestCase): - def test_expr0(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) - model.C = Param(model.A,initialize={1:100,2:200,3:300}, mutable=False) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) + model.C = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=False) model.x = Var(model.A) model.y = Var(model.A) - instance=model.create_instance() - expr = sum_product(instance.B,instance.y) + instance = model.create_instance() + expr = sum_product(instance.B, instance.y) baseline = "B[1]*y[1] + B[2]*y[2] + B[3]*y[3]" - self.assertEqual( str(expr), baseline ) - expr = sum_product(instance.C,instance.y) - self.assertEqual( str(expr), "100*y[1] + 200*y[2] + 300*y[3]" ) + self.assertEqual(str(expr), baseline) + expr = sum_product(instance.C, instance.y) + self.assertEqual(str(expr), "100*y[1] + 200*y[2] + 300*y[3]") def test_expr1(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) - model.C = Param(model.A,initialize={1:100,2:200,3:300}, mutable=False) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) + model.C = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=False) model.x = Var(model.A) model.y = Var(model.A) - instance=model.create_instance() - expr = sum_product(instance.x,instance.B,instance.y) + instance = model.create_instance() + expr = sum_product(instance.x, instance.B, instance.y) baseline = "B[1]*x[1]*y[1] + B[2]*x[2]*y[2] + B[3]*x[3]*y[3]" - self.assertEqual( str(expr), baseline ) - expr = sum_product(instance.x,instance.C,instance.y) - self.assertEqual( str(expr), "100*x[1]*y[1] + 200*x[2]*y[2] + 300*x[3]*y[3]" ) + self.assertEqual(str(expr), baseline) + expr = sum_product(instance.x, instance.C, instance.y) + self.assertEqual(str(expr), "100*x[1]*y[1] + 200*x[2]*y[2] + 300*x[3]*y[3]") def test_expr2(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) - model.C = Param(model.A,initialize={1:100,2:200,3:300}, mutable=False) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) + model.C = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=False) model.x = Var(model.A) model.y = Var(model.A) - instance=model.create_instance() - expr = sum_product(instance.x,instance.B,instance.y, index=[1,3]) + instance = model.create_instance() + expr = sum_product(instance.x, instance.B, instance.y, index=[1, 3]) baseline = "B[1]*x[1]*y[1] + B[3]*x[3]*y[3]" - self.assertEqual( str(expr), baseline ) - expr = sum_product(instance.x,instance.C,instance.y, index=[1,3]) - self.assertEqual( str(expr), "100*x[1]*y[1] + 300*x[3]*y[3]" ) + self.assertEqual(str(expr), baseline) + expr = sum_product(instance.x, instance.C, instance.y, index=[1, 3]) + self.assertEqual(str(expr), "100*x[1]*y[1] + 300*x[3]*y[3]") def test_expr3(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) - model.C = Param(model.A,initialize={1:100,2:200,3:300}, mutable=False) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) + model.C = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=False) model.x = Var(model.A) model.y = Var(model.A) - instance=model.create_instance() - expr = sum_product(instance.x,instance.B,denom=instance.y, index=[1,3]) + instance = model.create_instance() + expr = sum_product(instance.x, instance.B, denom=instance.y, index=[1, 3]) baseline = "B[1]*x[1]/y[1] + B[3]*x[3]/y[3]" - self.assertEqual( str(expr), baseline ) - expr = sum_product(instance.x,instance.C,denom=instance.y, index=[1,3]) - self.assertEqual( str(expr), "100*x[1]/y[1] + 300*x[3]/y[3]" ) + self.assertEqual(str(expr), baseline) + expr = sum_product(instance.x, instance.C, denom=instance.y, index=[1, 3]) + self.assertEqual(str(expr), "100*x[1]/y[1] + 300*x[3]/y[3]") def test_expr4(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) model.x = Var(model.A) model.y = Var(model.A) - instance=model.create_instance() - expr = sum_product(denom=[instance.y,instance.x]) + instance = model.create_instance() + expr = sum_product(denom=[instance.y, instance.x]) baseline = "1/(y[1]*x[1]) + 1/(y[2]*x[2]) + 1/(y[3]*x[3])" - self.assertEqual( str(expr), baseline ) + self.assertEqual(str(expr), baseline) + + def test_sum_product_ParamVarVar(self): + model = AbstractModel() + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) + model.x = Var(model.A) + model.y = Var(model.A) + instance = model.create_instance() + expr = sum_product(instance.B, instance.y, instance.x) + baseline = "B[1]*y[1]*x[1] + B[2]*y[2]*x[2] + B[3]*y[3]*x[3]" + self.assertEqual(str(expr), baseline) + + def test_sum_product_ParamParamVar(self): + model = AbstractModel() + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) + model.x = Var(model.A) + model.y = Param(model.A, mutable=True) + instance = model.create_instance() + expr = sum_product(instance.B, instance.y, instance.x) + baseline = "B[1]*y[1]*x[1] + B[2]*y[2]*x[2] + B[3]*y[3]*x[3]" + self.assertEqual(str(expr), baseline) def test_expr5(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3], doc='set A') - model.B = Param(model.A, initialize={1:100,2:200,3:300}, doc='param B', mutable=True) + model.A = Set(initialize=[1, 2, 3], doc='set A') + model.B = Param( + model.A, initialize={1: 100, 2: 200, 3: 300}, doc='param B', mutable=True + ) model.C = Param(initialize=3, doc='param C', mutable=True) model.x = Var(model.A, doc='var x') model.y = Var(doc='var y') model.o = Objective(expr=model.y, doc='obj o') model.c1 = Constraint(expr=model.x[1] >= 0, doc='con c1') + def c2_rule(model, a): return model.B[a] * model.x[a] <= 1 + model.c2 = Constraint(model.A, doc='con c2', rule=c2_rule) model.c3 = ConstraintList(doc='con c3') model.c3.add(model.y <= 0) # - OUTPUT=open(join(currdir, "test_expr5.out"), "w") + OUTPUT = open(join(currdir, "test_expr5.out"), "w") model.pprint(ostream=OUTPUT) OUTPUT.close() _out, _txt = join(currdir, "test_expr5.out"), join(currdir, "test_expr5.txt") - self.assertTrue(cmp(_out, _txt), - msg="Files %s and %s differ" % (_out, _txt)) + self.assertTrue(cmp(_out, _txt), msg="Files %s and %s differ" % (_out, _txt)) def test_prod1(self): - self.assertEqual(prod([1,2,3,5]),30) + self.assertEqual(prod([1, 2, 3, 5]), 30) def test_prod2(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3], doc='set A') + model.A = Set(initialize=[1, 2, 3], doc='set A') model.x = Var(model.A) expr = prod(model.x[i] for i in model.x) baseline = "x[1]*x[2]*x[3]" - self.assertEqual( str(expr), baseline ) + self.assertEqual(str(expr), baseline) expr = prod(model.x) - self.assertEqual( expr, 6) + self.assertEqual(expr, 6) def test_sum1(self): - self.assertEqual(quicksum([1,2,3,5]),11) + self.assertEqual(quicksum([1, 2, 3, 5]), 11) def test_sum2(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3], doc='set A') + model.A = Set(initialize=[1, 2, 3], doc='set A') model.x = Var(model.A) expr = quicksum(model.x[i] for i in model.x) baseline = "x[1] + x[2] + x[3]" - self.assertEqual( str(expr), baseline ) + assertExpressionsEqual(self, expr, model.x[1] + model.x[2] + model.x[3]) def test_sum3(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3], doc='set A') + model.A = Set(initialize=[1, 2, 3], doc='set A') model.x = Var(model.A) expr = quicksum(model.x) - self.assertEqual( expr, 6) + assertExpressionsEqual(self, expr, 6) def test_summation_error1(self): try: @@ -158,24 +200,24 @@ def test_summation_error1(self): def test_summation_error2(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) model.x = Var(model.A) - instance=model.create_instance() + instance = model.create_instance() try: - expr = sum_product(instance.x,instance.B) + expr = sum_product(instance.x, instance.B) self.fail("Expected ValueError") except ValueError: pass def test_summation_error3(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) model.x = Var(model.A) - instance=model.create_instance() + instance = model.create_instance() try: - expr = sum_product(denom=(instance.x,instance.B)) + expr = sum_product(denom=(instance.x, instance.B)) self.fail("Expected ValueError") except ValueError: pass @@ -187,15 +229,15 @@ def test_sequence_error1(self): except ValueError: pass try: - sequence(1,2,3,4) + sequence(1, 2, 3, 4) self.fail("Expected ValueError") except ValueError: pass def test_sequence(self): - self.assertEqual(list(sequence(10)), list(range(1,11))) - self.assertEqual(list(sequence(8,10)), [8,9,10]) - self.assertEqual(list(sequence(1,10,3)), [1,4,7,10]) + self.assertEqual(list(sequence(10)), list(range(1, 11))) + self.assertEqual(list(sequence(8, 10)), [8, 9, 10]) + self.assertEqual(list(sequence(1, 10, 3)), [1, 4, 7, 10]) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_expression.py b/pyomo/core/tests/unit/test_expression.py index 0ff00ac42af..8dca0062dd0 100644 --- a/pyomo/core/tests/unit/test_expression.py +++ b/pyomo/core/tests/unit/test_expression.py @@ -16,37 +16,45 @@ import pyomo.common.unittest as unittest +import pyomo.core.expr as EXPR from pyomo.environ import ( - ConcreteModel, AbstractModel, Expression, Var, Set, Param, Objective, - value, sum_product, + ConcreteModel, + AbstractModel, + Expression, + Var, + Set, + Param, + Objective, + value, + sum_product, ) from pyomo.core.base.expression import _GeneralExpressionData -from pyomo.core.expr.compare import compare_expressions +from pyomo.core.expr.compare import compare_expressions, assertExpressionsEqual from pyomo.common.tee import capture_output -class TestExpressionData(unittest.TestCase): +class TestExpressionData(unittest.TestCase): def test_exprdata_get_set(self): model = ConcreteModel() model.e = Expression([1]) self.assertEqual(len(model.e), 1) self.assertEqual(model.e[1].expr, None) - model.e.add(1,1) - self.assertEqual(model.e[1].expr(), 1) + model.e.add(1, 1) + self.assertEqual(model.e[1].expr, 1) model.e[1].expr += 2 - self.assertEqual(model.e[1].expr(), 3) + self.assertEqual(model.e[1].expr, 3) def test_exprdata_get_set_value(self): model = ConcreteModel() model.e = Expression([1]) self.assertEqual(len(model.e), 1) self.assertEqual(model.e[1].expr, None) - model.e.add(1,1) + model.e.add(1, 1) model.e[1].expr = 1 - self.assertEqual(model.e[1].expr(), 1) + self.assertEqual(model.e[1].expr, 1) model.e[1].expr += 2 - self.assertEqual(model.e[1].expr(), 3) + self.assertEqual(model.e[1].expr, 3) # The copy method must be invoked on expression container to obtain # a shallow copy of the class, the underlying expression remains @@ -60,21 +68,21 @@ def test_copy(self): # Do a shallow copy, the same underlying expression is still referenced expr2 = copy.copy(model.expr1) - self.assertEqual( model.expr1(), 5 ) - self.assertEqual( expr2(), 5 ) - self.assertEqual( id(model.expr1.expr), id(expr2.expr) ) + self.assertEqual(model.expr1(), 5) + self.assertEqual(expr2(), 5) + self.assertEqual(id(model.expr1.expr), id(expr2.expr)) # Do an in place modification the expression model.expr1.expr.set_value(1) - self.assertEqual( model.expr1(), 1 ) - self.assertEqual( expr2(), 1 ) - self.assertEqual( id(model.expr1.expr), id(expr2.expr) ) + self.assertEqual(model.expr1(), 1) + self.assertEqual(expr2(), 1) + self.assertEqual(id(model.expr1.expr), id(expr2.expr)) # Update the expression value on expr1 only model.expr1.set_value(model.b) - self.assertEqual( model.expr1(), 10 ) - self.assertEqual( expr2(), 1 ) - self.assertNotEqual( id(model.expr1.expr), id(expr2.expr) ) + self.assertEqual(model.expr1(), 10) + self.assertEqual(expr2(), 1) + self.assertNotEqual(id(model.expr1.expr), id(expr2.expr)) model.a.set_value(5) model.b.set_value(10) @@ -83,45 +91,39 @@ def test_copy(self): # Do a shallow copy, the same underlying expression is still referenced expr2 = copy.copy(model.expr1) - self.assertEqual( model.expr1(), 15 ) - self.assertEqual( expr2(), 15 ) - self.assertEqual( id(model.expr1.expr), id(expr2.expr) ) - self.assertEqual( id(model.expr1.expr.arg(0)), - id(expr2.expr.arg(0)) ) - self.assertEqual( id(model.expr1.expr.arg(1)), - id(expr2.expr.arg(1)) ) - + self.assertEqual(model.expr1(), 15) + self.assertEqual(expr2(), 15) + self.assertEqual(id(model.expr1.expr), id(expr2.expr)) + self.assertEqual(id(model.expr1.expr.arg(0)), id(expr2.expr.arg(0))) + self.assertEqual(id(model.expr1.expr.arg(1)), id(expr2.expr.arg(1))) # Do an in place modification the expression # This causes cloning due to reference counting model.a.set_value(0) - self.assertEqual( model.expr1(), 10 ) - self.assertEqual( expr2(), 10 ) - self.assertEqual( id(model.expr1.expr), id(expr2.expr) ) - self.assertEqual( id(model.expr1.expr.arg(0)), - id(expr2.expr.arg(0)) ) - self.assertEqual( id(model.expr1.expr.arg(1)), - id(expr2.expr.arg(1)) ) - + self.assertEqual(model.expr1(), 10) + self.assertEqual(expr2(), 10) + self.assertEqual(id(model.expr1.expr), id(expr2.expr)) + self.assertEqual(id(model.expr1.expr.arg(0)), id(expr2.expr.arg(0))) + self.assertEqual(id(model.expr1.expr.arg(1)), id(expr2.expr.arg(1))) # Do an in place modification the expression # This causes cloning due to reference counting model.expr1.expr += 1 - self.assertEqual( model.expr1(), 11 ) - self.assertEqual( expr2(), 10 ) - self.assertNotEqual( id(model.expr1.expr), id(expr2.expr) ) + self.assertEqual(model.expr1(), 11) + self.assertEqual(expr2(), 10) + self.assertNotEqual(id(model.expr1.expr), id(expr2.expr)) # test that an object is properly deepcopied when the model is cloned def test_model_clone(self): model = ConcreteModel() model.x = Var(initialize=2.0) model.y = Var(initialize=0.0) - model.ec = Expression(initialize=model.x**2+1) - model.obj = Objective(expr=model.y+model.ec) - self.assertEqual(model.obj.expr(),5.0) + model.ec = Expression(initialize=model.x**2 + 1) + model.obj = Objective(expr=model.y + model.ec) + self.assertEqual(model.obj.expr(), 5.0) self.assertTrue(id(model.ec) in [id(e) for e in model.obj.expr.args]) inst = model.clone() - self.assertEqual(inst.obj.expr(),5.0) + self.assertEqual(inst.obj.expr(), 5.0) if not id(inst.ec) in [id(e) for e in inst.obj.expr.args]: print("BUG?") print(id(inst.ec)) @@ -149,14 +151,15 @@ def test_polynomial_degree(self): model = ConcreteModel() model.x = Var(initialize=1.0) model.ec = Expression(initialize=model.x) - self.assertEqual( model.ec.polynomial_degree(), - model.ec.expr.polynomial_degree() ) + self.assertEqual( + model.ec.polynomial_degree(), model.ec.expr.polynomial_degree() + ) self.assertEqual(model.ec.polynomial_degree(), 1) model.ec.set_value(model.x**2) - self.assertEqual( model.ec.polynomial_degree(), - model.ec.expr.polynomial_degree()) - self.assertEqual( model.ec.polynomial_degree(), 2 ) - + self.assertEqual( + model.ec.polynomial_degree(), model.ec.expr.polynomial_degree() + ) + self.assertEqual(model.ec.polynomial_degree(), 2) def test_init_concrete(self): model = ConcreteModel() @@ -164,57 +167,57 @@ def test_init_concrete(self): model.x = Var(initialize=1.0) model.ec = Expression(expr=0) - model.obj = Objective(expr=1.0+model.ec) - self.assertEqual(model.obj.expr(),1.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + model.obj = Objective(expr=1.0 + model.ec) + self.assertEqual(model.obj.expr(), 1.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e = 1.0 model.ec.set_value(e) - self.assertEqual(model.obj.expr(),2.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 2.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x model.ec.set_value(e) - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) model.del_component('obj') model.del_component('ec') model.ec = Expression(initialize=model.y) - model.obj = Objective(expr=1.0+model.ec) - self.assertEqual(model.obj.expr(),1.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + model.obj = Objective(expr=1.0 + model.ec) + self.assertEqual(model.obj.expr(), 1.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e = 1.0 model.ec.set_value(e) - self.assertEqual(model.obj.expr(),2.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 2.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x model.ec.set_value(e) - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) model.del_component('obj') model.del_component('ec') model.y.set_value(-1) - model.ec = Expression(initialize=model.y+1.0) - model.obj = Objective(expr=1.0+model.ec) - self.assertEqual(model.obj.expr(),1.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + model.ec = Expression(initialize=model.y + 1.0) + model.obj = Objective(expr=1.0 + model.ec) + self.assertEqual(model.obj.expr(), 1.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e = 1.0 model.ec.set_value(e) - self.assertEqual(model.obj.expr(),2.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 2.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x model.ec.set_value(e) - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) def test_init_abstract(self): model = AbstractModel() @@ -223,73 +226,77 @@ def test_init_abstract(self): model.ec = Expression(initialize=0.0) def obj_rule(model): - return 1.0+model.ec + return 1.0 + model.ec + model.obj = Objective(rule=obj_rule) inst = model.create_instance() - self.assertEqual(inst.obj.expr(),1.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 1.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e = 1.0 inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),2.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 2.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) model.del_component('obj') model.del_component('ec') model.ec = Expression(initialize=0.0) + def obj_rule(model): - return 1.0+model.ec + return 1.0 + model.ec + model.obj = Objective(rule=obj_rule) inst = model.create_instance() - self.assertEqual(inst.obj.expr(),1.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 1.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e = 1.0 inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),2.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 2.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) model.del_component('obj') model.del_component('ec') model.ec = Expression(initialize=0.0) + def obj_rule(model): - return 1.0+model.ec + return 1.0 + model.ec + model.obj = Objective(rule=obj_rule) inst = model.create_instance() - self.assertEqual(inst.obj.expr(),1.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 1.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e = 1.0 inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),2.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 2.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) class TestExpression(unittest.TestCase): - def setUp(self): TestExpression._save = expr_common.TO_STRING_VERBOSE # Tests can choose what they want - this just makes sure that - #things are restored after the tests run. - #expr_common.TO_STRING_VERBOSE = True + # things are restored after the tests run. + # expr_common.TO_STRING_VERBOSE = True def tearDown(self): expr_common.TO_STRING_VERBOSE = TestExpression._save @@ -331,92 +338,104 @@ def test_unconstructed_singleton(self): a.set_value(5) self.assertEqual(len(a), 1) self.assertEqual(a(), 5) - self.assertEqual(a.expr(), 5) + self.assertEqual(a.expr, 5) self.assertEqual(a.is_constant(), False) self.assertEqual(a.is_fixed(), True) - def test_display(self): model = ConcreteModel() model.e = Expression() with capture_output() as out: model.e.display() - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ e : Size=1 Key : Value None : Undefined - """.strip()) + """.strip(), + ) model.e.set_value(1.0) with capture_output() as out: model.e.display() - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ e : Size=1 Key : Value None : 1.0 - """.strip()) + """.strip(), + ) out = StringIO() with capture_output() as no_out: model.e.display(ostream=out) self.assertEqual(no_out.getvalue(), "") - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ e : Size=1 Key : Value None : 1.0 - """.strip()) + """.strip(), + ) - model.E = Expression([1,2]) + model.E = Expression([1, 2]) with capture_output() as out: model.E.display() - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ E : Size=2 Key : Value 1 : Undefined 2 : Undefined - """.strip()) + """.strip(), + ) model.E[1].set_value(1.0) with capture_output() as out: model.E.display() - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ E : Size=2 Key : Value 1 : 1.0 2 : Undefined - """.strip()) + """.strip(), + ) out = StringIO() with capture_output() as no_out: model.E.display(ostream=out) self.assertEqual(no_out.getvalue(), "") - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ E : Size=2 Key : Value 1 : 1.0 2 : Undefined - """.strip()) + """.strip(), + ) def test_extract_values_store_values(self): model = ConcreteModel() model.e = Expression() - self.assertEqual(model.e.extract_values(), - {None: None}) + self.assertEqual(model.e.extract_values(), {None: None}) model.e.store_values({None: 1.0}) - self.assertEqual(model.e.extract_values(), - {None: 1.0}) + self.assertEqual(model.e.extract_values(), {None: 1.0}) with self.assertRaises(KeyError): model.e.store_values({1: 1.0}) - model.E = Expression([1,2]) - self.assertEqual(model.E.extract_values(), - {1: None, 2:None}) + model.E = Expression([1, 2]) + self.assertEqual(model.E.extract_values(), {1: None, 2: None}) model.E.store_values({1: 1.0}) - self.assertEqual(model.E.extract_values(), - {1: 1.0, 2: None}) + self.assertEqual(model.E.extract_values(), {1: 1.0, 2: None}) model.E.store_values({1: None, 2: 2.0}) - self.assertEqual(model.E.extract_values(), - {1: None, 2: 2.0}) + self.assertEqual(model.E.extract_values(), {1: None, 2: 2.0}) with self.assertRaises(KeyError): model.E.store_values({3: 3.0}) @@ -428,27 +447,31 @@ def test_setitem(self): with self.assertRaises(KeyError): model.E[2] = 1 model.del_component(model.E) - model.Index = Set(dimen=3, initialize=[(1,2,3)]) + model.Index = Set(dimen=3, initialize=[(1, 2, 3)]) model.E = Expression(model.Index) - model.E[(1,2,3)] = 1 - self.assertEqual(value(model.E[(1,2,3)]), 1) + model.E[(1, 2, 3)] = 1 + self.assertEqual(value(model.E[(1, 2, 3)]), 1) # GH: testing this ludicrous behavior simply for # coverage in expression.py. - model.E[(1,(2,3))] = 1 - self.assertEqual(value(model.E[(1,2,3)]), 1) + model.E[(1, (2, 3))] = 1 + self.assertEqual(value(model.E[(1, 2, 3)]), 1) with self.assertRaises(KeyError): model.E[2] = 1 def test_nonindexed_construct_rule(self): model = ConcreteModel() + def _some_rule(model): return 1.0 + model.e = Expression(rule=_some_rule) self.assertEqual(value(model.e), 1.0) model.del_component(model.e) del _some_rule + def _some_rule(model): return Expression.Skip + model.e = Expression(rule=_some_rule) self.assertEqual(len(model.e), 0) @@ -458,16 +481,13 @@ def test_nonindexed_construct_expr(self): self.assertEqual(len(model.e), 0) model.del_component(model.e) model.e = Expression() - self.assertEqual(model.e.extract_values(), - {None: None}) + self.assertEqual(model.e.extract_values(), {None: None}) model.del_component(model.e) model.e = Expression(expr=1.0) - self.assertEqual(model.e.extract_values(), - {None: 1.0}) + self.assertEqual(model.e.extract_values(), {None: 1.0}) model.del_component(model.e) model.e = Expression(expr={None: 1.0}) - self.assertEqual(model.e.extract_values(), - {None: 1.0}) + self.assertEqual(model.e.extract_values(), {None: 1.0}) # Even though add can be called with any # indexed on indexed Expressions, None must # always be used as the index for non-indexed @@ -477,21 +497,21 @@ def test_nonindexed_construct_expr(self): def test_indexed_construct_rule(self): model = ConcreteModel() - model.Index = Set(initialize=[1,2,3]) + model.Index = Set(initialize=[1, 2, 3]) + def _some_rule(model, i): if i == 1: return Expression.Skip else: return i - model.E = Expression(model.Index, - rule=_some_rule) - self.assertEqual(model.E.extract_values(), - {2:2, 3:3}) + + model.E = Expression(model.Index, rule=_some_rule) + self.assertEqual(model.E.extract_values(), {2: 2, 3: 3}) self.assertEqual(len(model.E), 2) def test_implicit_definition(self): model = ConcreteModel() - model.idx = Set(initialize=[1,2,3]) + model.idx = Set(initialize=[1, 2, 3]) model.E = Expression(model.idx) self.assertEqual(len(model.E), 3) expr = model.E[1] @@ -502,15 +522,15 @@ def test_implicit_definition(self): self.assertIs(expr.expr, None) model.E[1] = 5 self.assertIs(expr, model.E[1]) - self.assertEqual(model.E.extract_values(), {1:5, 2:None, 3:None}) + self.assertEqual(model.E.extract_values(), {1: 5, 2: None, 3: None}) model.E[2] = 6 self.assertIsNot(expr, model.E[2]) - self.assertEqual(model.E.extract_values(), {1:5, 2:6, 3:None}) + self.assertEqual(model.E.extract_values(), {1: 5, 2: 6, 3: None}) def test_explicit_skip_definition(self): model = ConcreteModel() - model.idx = Set(initialize=[1,2,3]) - model.E = Expression(model.idx, rule=lambda m,i: Expression.Skip) + model.idx = Set(initialize=[1, 2, 3]) + model.E = Expression(model.idx, rule=lambda m, i: Expression.Skip) self.assertEqual(len(model.E), 0) with self.assertRaises(KeyError): expr = model.E[1] @@ -521,58 +541,54 @@ def test_explicit_skip_definition(self): self.assertIs(expr.expr, None) model.E[1] = 5 self.assertIs(expr, model.E[1]) - self.assertEqual(model.E.extract_values(), {1:5}) + self.assertEqual(model.E.extract_values(), {1: 5}) model.E[2] = 6 self.assertIsNot(expr, model.E[2]) - self.assertEqual(model.E.extract_values(), {1:5, 2:6}) + self.assertEqual(model.E.extract_values(), {1: 5, 2: 6}) def test_indexed_construct_expr(self): model = ConcreteModel() - model.Index = Set(initialize=[1,2,3]) - model.E = Expression(model.Index, - expr=Expression.Skip) + model.Index = Set(initialize=[1, 2, 3]) + model.E = Expression(model.Index, expr=Expression.Skip) self.assertEqual(len(model.E), 0) model.E = Expression(model.Index) - self.assertEqual(model.E.extract_values(), - {1:None, 2:None, 3:None}) + self.assertEqual(model.E.extract_values(), {1: None, 2: None, 3: None}) model.del_component(model.E) model.E = Expression(model.Index, expr=1.0) - self.assertEqual(model.E.extract_values(), - {1:1.0, 2:1.0, 3:1.0}) + self.assertEqual(model.E.extract_values(), {1: 1.0, 2: 1.0, 3: 1.0}) model.del_component(model.E) - model.E = Expression(model.Index, - expr={1: Expression.Skip, - 2: Expression.Skip, - 3: 1.0}) - self.assertEqual(model.E.extract_values(), - {3: 1.0}) + model.E = Expression( + model.Index, expr={1: Expression.Skip, 2: Expression.Skip, 3: 1.0} + ) + self.assertEqual(model.E.extract_values(), {3: 1.0}) def test_bad_init_too_many_keywords(self): model = ConcreteModel() + def _some_rule(model): return 1.0 + with self.assertRaises(ValueError): - model.e = Expression(expr=1.0, - rule=_some_rule) + model.e = Expression(expr=1.0, rule=_some_rule) del _some_rule + def _some_indexed_rule(model, i): return 1.0 + with self.assertRaises(ValueError): - model.e = Expression([1], - expr=1.0, - rule=_some_indexed_rule) + model.e = Expression([1], expr=1.0, rule=_some_indexed_rule) del _some_indexed_rule def test_init_concrete_indexed(self): model = ConcreteModel() model.y = Var(initialize=0.0) - model.x = Var([1,2,3],initialize=1.0) + model.x = Var([1, 2, 3], initialize=1.0) - model.ec = Expression([1,2,3],initialize=1.0) - model.obj = Objective(expr=1.0+sum_product(model.ec, index=[1,2,3])) - self.assertEqual(model.obj.expr(),4.0) + model.ec = Expression([1, 2, 3], initialize=1.0) + model.obj = Objective(expr=1.0 + sum_product(model.ec, index=[1, 2, 3])) + self.assertEqual(model.obj.expr(), 4.0) model.ec[1].set_value(2.0) - self.assertEqual(model.obj.expr(),5.0) + self.assertEqual(model.obj.expr(), 5.0) def test_init_concrete_nonindexed(self): model = ConcreteModel() @@ -580,66 +596,66 @@ def test_init_concrete_nonindexed(self): model.x = Var(initialize=1.0) model.ec = Expression(initialize=0) - model.obj = Objective(expr=1.0+model.ec) - self.assertEqual(model.obj.expr(),1.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + model.obj = Objective(expr=1.0 + model.ec) + self.assertEqual(model.obj.expr(), 1.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e = 1.0 model.ec.set_value(e) - self.assertEqual(model.obj.expr(),2.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 2.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x model.ec.set_value(e) - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) model.del_component('obj') model.del_component('ec') model.ec = Expression(initialize=model.y) - model.obj = Objective(expr=1.0+model.ec) - self.assertEqual(model.obj.expr(),1.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + model.obj = Objective(expr=1.0 + model.ec) + self.assertEqual(model.obj.expr(), 1.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e = 1.0 model.ec.set_value(e) - self.assertEqual(model.obj.expr(),2.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 2.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x model.ec.set_value(e) - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) model.del_component('obj') model.del_component('ec') model.y.set_value(-1) - model.ec = Expression(initialize=model.y+1.0) - model.obj = Objective(expr=1.0+model.ec) - self.assertEqual(model.obj.expr(),1.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + model.ec = Expression(initialize=model.y + 1.0) + model.obj = Objective(expr=1.0 + model.ec) + self.assertEqual(model.obj.expr(), 1.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e = 1.0 model.ec.set_value(e) - self.assertEqual(model.obj.expr(),2.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 2.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x model.ec.set_value(e) - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) e += model.x - self.assertEqual(model.obj.expr(),3.0) - self.assertEqual(id(model.obj.expr.arg(1)),id(model.ec)) + self.assertEqual(model.obj.expr(), 3.0) + self.assertEqual(id(model.obj.expr.arg(1)), id(model.ec)) def test_init_abstract_indexed(self): model = AbstractModel() - model.ec = Expression([1,2,3],initialize=1.0) - model.obj = Objective(rule=lambda m: 1.0+sum_product(m.ec,index=[1,2,3])) + model.ec = Expression([1, 2, 3], initialize=1.0) + model.obj = Objective(rule=lambda m: 1.0 + sum_product(m.ec, index=[1, 2, 3])) inst = model.create_instance() - self.assertEqual(inst.obj.expr(),4.0) + self.assertEqual(inst.obj.expr(), 4.0) inst.ec[1].set_value(2.0) - self.assertEqual(inst.obj.expr(),5.0) + self.assertEqual(inst.obj.expr(), 5.0) def test_init_abstract_nonindexed(self): model = AbstractModel() @@ -648,96 +664,99 @@ def test_init_abstract_nonindexed(self): model.ec = Expression(initialize=0.0) def obj_rule(model): - return 1.0+model.ec + return 1.0 + model.ec + model.obj = Objective(rule=obj_rule) inst = model.create_instance() - self.assertEqual(inst.obj.expr(),1.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 1.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e = 1.0 inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),2.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 2.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) model.del_component('obj') model.del_component('ec') model.ec = Expression(initialize=0.0) + def obj_rule(model): - return 1.0+model.ec + return 1.0 + model.ec + model.obj = Objective(rule=obj_rule) inst = model.create_instance() - self.assertEqual(inst.obj.expr(),1.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 1.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e = 1.0 inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),2.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 2.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) model.del_component('obj') model.del_component('ec') model.ec = Expression(initialize=0.0) + def obj_rule(model): - return 1.0+model.ec + return 1.0 + model.ec + model.obj = Objective(rule=obj_rule) inst = model.create_instance() - self.assertEqual(inst.obj.expr(),1.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 1.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e = 1.0 inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),2.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 2.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x inst.ec.set_value(e) - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) e += inst.x - self.assertEqual(inst.obj.expr(),3.0) - self.assertEqual(id(inst.obj.expr.arg(1)),id(inst.ec)) + self.assertEqual(inst.obj.expr(), 3.0) + self.assertEqual(id(inst.obj.expr.arg(1)), id(inst.ec)) def test_pprint_oldStyle(self): expr_common.TO_STRING_VERBOSE = True model = ConcreteModel() model.x = Var() - model.e = Expression(initialize=model.x+2) - model.E = Expression([1,2],initialize=model.x**2+1) - expr = model.e*model.x**2 + model.E[1] + model.e = Expression(initialize=model.x + 2) + model.E = Expression([1, 2], initialize=model.x**2 + 1) + expr = model.e * model.x**2 + model.E[1] - output = \ -"""\ -sum(prod(e{sum(x, 2)}, pow(x, 2)), E[1]{sum(pow(x, 2), 1)}) + output = """\ +sum(prod(e{sum(mon(1, x), 2)}, pow(x, 2)), E[1]{sum(pow(x, 2), 1)}) e : Size=1, Index=None Key : Expression - None : sum(x, 2) + None : sum(mon(1, x), 2) E : Size=2, Index=E_index Key : Expression 1 : sum(pow(x, 2), 1) 2 : sum(pow(x, 2), 1) """ out = StringIO() - out.write(str(expr)+"\n") + out.write(str(expr) + "\n") model.e.pprint(ostream=out) - #model.E[1].pprint(ostream=out) + # model.E[1].pprint(ostream=out) model.E.pprint(ostream=out) self.assertEqual(output, out.getvalue()) model.e.set_value(1.0) model.E[1].set_value(2.0) - output = \ -"""\ + output = """\ sum(prod(e{1.0}, pow(x, 2)), E[1]{2.0}) e : Size=1, Index=None Key : Expression @@ -748,17 +767,15 @@ def test_pprint_oldStyle(self): 2 : sum(pow(x, 2), 1) """ out = StringIO() - out.write(str(expr)+"\n") + out.write(str(expr) + "\n") model.e.pprint(ostream=out) - #model.E[1].pprint(ostream=out) + # model.E[1].pprint(ostream=out) model.E.pprint(ostream=out) self.assertEqual(output, out.getvalue()) - model.e.set_value(None) model.E[1].set_value(None) - output = \ -"""\ + output = """\ sum(prod(e{Undefined}, pow(x, 2)), E[1]{Undefined}) e : Size=1, Index=None Key : Expression @@ -769,24 +786,22 @@ def test_pprint_oldStyle(self): 2 : sum(pow(x, 2), 1) """ out = StringIO() - out.write(str(expr)+"\n") + out.write(str(expr) + "\n") model.e.pprint(ostream=out) - #model.E[1].pprint(ostream=out) + # model.E[1].pprint(ostream=out) model.E.pprint(ostream=out) self.assertEqual(output, out.getvalue()) - def test_pprint_newStyle(self): expr_common.TO_STRING_VERBOSE = False model = ConcreteModel() model.x = Var() - model.e = Expression(initialize=model.x+2) - model.E = Expression([1,2],initialize=model.x**2+1) - expr = model.e*model.x**2 + model.E[1] + model.e = Expression(initialize=model.x + 2) + model.E = Expression([1, 2], initialize=model.x**2 + 1) + expr = model.e * model.x**2 + model.E[1] - output = \ -"""\ + output = """\ (x + 2)*x**2 + (x**2 + 1) e : Size=1, Index=None Key : Expression @@ -797,9 +812,9 @@ def test_pprint_newStyle(self): 2 : x**2 + 1 """ out = StringIO() - out.write(str(expr)+"\n") + out.write(str(expr) + "\n") model.e.pprint(ostream=out) - #model.E[1].pprint(ostream=out) + # model.E[1].pprint(ostream=out) model.E.pprint(ostream=out) self.assertEqual(output, out.getvalue()) @@ -810,8 +825,7 @@ def test_pprint_newStyle(self): # a fixed variable in a sub-expression. I can't decide if this # is the expected behavior or not. # - output = \ -"""\ + output = """\ x**2 + 2.0 e : Size=1, Index=None Key : Expression @@ -822,17 +836,15 @@ def test_pprint_newStyle(self): 2 : x**2 + 1 """ out = StringIO() - out.write(str(expr)+"\n") + out.write(str(expr) + "\n") model.e.pprint(ostream=out) - #model.E[1].pprint(ostream=out) + # model.E[1].pprint(ostream=out) model.E.pprint(ostream=out) self.assertEqual(output, out.getvalue()) - model.e.set_value(None) model.E[1].set_value(None) - output = \ -"""\ + output = """\ e{None}*x**2 + E[1]{None} e : Size=1, Index=None Key : Expression @@ -843,9 +855,9 @@ def test_pprint_newStyle(self): 2 : x**2 + 1 """ out = StringIO() - out.write(str(expr)+"\n") + out.write(str(expr) + "\n") model.e.pprint(ostream=out) - #model.E[1].pprint(ostream=out) + # model.E[1].pprint(ostream=out) model.E.pprint(ostream=out) self.assertEqual(output, out.getvalue()) @@ -869,9 +881,9 @@ def test_singleton_get_set(self): self.assertEqual(len(model.e), 1) self.assertEqual(model.e.expr, None) model.e.expr = 1 - self.assertEqual(model.e.expr(), 1) + self.assertEqual(model.e.expr, 1) model.e.expr += 2 - self.assertEqual(model.e.expr(), 3) + self.assertEqual(model.e.expr, 3) def test_singleton_get_set_value(self): model = ConcreteModel() @@ -879,9 +891,9 @@ def test_singleton_get_set_value(self): self.assertEqual(len(model.e), 1) self.assertEqual(model.e.expr, None) model.e.expr = 1 - self.assertEqual(model.e.expr(), 1) + self.assertEqual(model.e.expr, 1) model.e.expr += 2 - self.assertEqual(model.e.expr(), 3) + self.assertEqual(model.e.expr, 3) def test_abstract_index(self): model = AbstractModel() @@ -897,22 +909,22 @@ def test_iadd(self): m = ConcreteModel() e = m.e = Expression(expr=1.0) expr = 0.0 - for v in [1.0,e]: + for v in [1.0, e]: expr += v self.assertEqual(e.expr, 1) self.assertEqual(expr(), 2) expr = 0.0 - for v in [e,1.0]: + for v in [e, 1.0]: expr += v self.assertEqual(e.expr, 1) - self.assertEqual(expr(), 2) + self.assertEqual(expr, 2) # Make sure that using in-place operators on named expressions # do not create loops inthe expression tree (test #1890) m.x = Var() m.y = Var() m.e.expr = m.x m.e += m.y - self.assertTrue(compare_expressions(m.e.expr, m.x + m.y)) + assertExpressionsEqual(self, m.e.expr, m.x + m.y) def test_isub(self): # make sure simple for loops that look like they @@ -921,12 +933,12 @@ def test_isub(self): m = ConcreteModel() e = m.e = Expression(expr=1.0) expr = 0.0 - for v in [1.0,e]: + for v in [1.0, e]: expr -= v self.assertEqual(e.expr, 1) self.assertEqual(expr(), -2) expr = 0.0 - for v in [e,1.0]: + for v in [e, 1.0]: expr -= v self.assertEqual(e.expr, 1) self.assertEqual(expr(), -2) @@ -936,6 +948,16 @@ def test_isub(self): m.y = Var() m.e.expr = m.x m.e -= m.y + assertExpressionsEqual( + self, + m.e.expr, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, m.x)), + EXPR.MonomialTermExpression((-1, m.y)), + ] + ), + ) self.assertTrue(compare_expressions(m.e.expr, m.x - m.y)) def test_imul(self): @@ -945,15 +967,15 @@ def test_imul(self): m = ConcreteModel() e = m.e = Expression(expr=3.0) expr = 1.0 - for v in [2.0,e]: + for v in [2.0, e]: expr *= v self.assertEqual(e.expr, 3) self.assertEqual(expr(), 6) expr = 1.0 - for v in [e,2.0]: + for v in [e, 2.0]: expr *= v self.assertEqual(e.expr, 3) - self.assertEqual(expr(), 6) + self.assertEqual(expr, 6) # Make sure that using in-place operators on named expressions # do not create loops inthe expression tree (test #1890) m.x = Var() @@ -970,29 +992,29 @@ def test_idiv(self): m = ConcreteModel() e = m.e = Expression(expr=3.0) expr = e - for v in [2.0,1.0]: + for v in [2.0, 1.0]: expr /= v self.assertEqual(e.expr, 3) - self.assertEqual(expr(), 1.5) + self.assertEqual(expr, 1.5) expr = e - for v in [1.0,2.0]: + for v in [1.0, 2.0]: expr /= v self.assertEqual(e.expr, 3) - self.assertEqual(expr(), 1.5) + self.assertEqual(expr, 1.5) # note that integer division does not occur within # Pyomo expressions m = ConcreteModel() e = m.e = Expression(expr=3.0) expr = e - for v in [2,1]: + for v in [2, 1]: expr /= v self.assertEqual(e.expr, 3) - self.assertEqual(expr(), 1.5) + self.assertEqual(expr, 1.5) expr = e - for v in [1,2]: + for v in [1, 2]: expr /= v self.assertEqual(e.expr, 3) - self.assertEqual(expr(), 1.5) + self.assertEqual(expr, 1.5) # Make sure that using in-place operators on named expressions # do not create loops inthe expression tree (test #1890) m.x = Var() @@ -1008,23 +1030,23 @@ def test_ipow(self): m = ConcreteModel() e = m.e = Expression(expr=3.0) expr = e - for v in [2.0,1.0]: + for v in [2.0, 1.0]: expr **= v self.assertEqual(e.expr, 3) - self.assertEqual(expr(), 9) + self.assertEqual(expr, 9) expr = e - for v in [1.0,2.0]: + for v in [1.0, 2.0]: expr **= v self.assertEqual(e.expr, 3) - self.assertEqual(expr(), 9) + self.assertEqual(expr, 9) # Make sure that using in-place operators on named expressions # do not create loops inthe expression tree (test #1890) m.x = Var() m.y = Var() m.e.expr = m.x m.e **= m.y - self.assertTrue(compare_expressions(m.e.expr, m.x ** m.y)) + self.assertTrue(compare_expressions(m.e.expr, m.x**m.y)) + if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/unit/test_external.py b/pyomo/core/tests/unit/test_external.py index 1893a336ee8..96c05b6b0b8 100644 --- a/pyomo/core/tests/unit/test_external.py +++ b/pyomo/core/tests/unit/test_external.py @@ -16,81 +16,94 @@ import pyomo.common.unittest as unittest -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL from pyomo.common.log import LoggingIntercept from pyomo.common.tempfiles import TempfileManager from pyomo.environ import ( - ConcreteModel, Block, Var, Objective, Expression, SolverFactory, value, + ConcreteModel, + Block, + Var, + Objective, + Expression, + SolverFactory, + value, Param, ) from pyomo.core.base.external import ( - PythonCallbackFunction, ExternalFunction, AMPLExternalFunction, + PythonCallbackFunction, + ExternalFunction, + AMPLExternalFunction, ) from pyomo.core.base.units_container import pint_available, units from pyomo.core.expr.numeric_expr import ( - ExternalFunctionExpression, NPV_ExternalFunctionExpression, + ExternalFunctionExpression, + NPV_ExternalFunctionExpression, ) from pyomo.opt import check_available_solvers + def _count(*args): return len(args) + def _sum(*args): return 2 + sum(args) + def _f(x, y, z): - return x**2 + 3*x*y + x*y*z**2 + return x**2 + 3 * x * y + x * y * z**2 + def _g(args, fixed): x, y, z = args[:3] - return [ - 2*x + 3*y + y*z**2, - 3*x + x*z**2, - 2*x*y*z, - ] + return [2 * x + 3 * y + y * z**2, 3 * x + x * z**2, 2 * x * y * z] + def _h(args, fixed): x, y, z = args[:3] - return [ - 2, - 3+z**2, 0, - 2*y*z, 2*x*z, 2*x*y, - ] + return [2, 3 + z**2, 0, 2 * y * z, 2 * x * z, 2 * x * y] + def _g_bad(args, fixed): x, y, z = args[:3] - return [ - 2*x + 3*y + y*z**2, - 3*x + x*z**2, - 2*x*y*z, - 0, - ] + return [2 * x + 3 * y + y * z**2, 3 * x + x * z**2, 2 * x * y * z, 0] + def _h_bad(args, fixed): x, y, z = args[:3] return [ - #2, - 3+z**2, 0, - 2*y*z, 2*x*z, 2*x*y, + # 2, + 3 + z**2, + 0, + 2 * y * z, + 2 * x * z, + 2 * x * y, ] + def _fgh(args, fixed, fgh): return _f(*args), _g(args, fixed), _h(args, fixed) + class TestPythonCallbackFunction(unittest.TestCase): def test_constructor_errors(self): m = ConcreteModel() with self.assertRaisesRegex( - ValueError, "Duplicate definition of external function " - r"through positional and keyword \('function='\)"): + ValueError, + "Duplicate definition of external function " + r"through positional and keyword \('function='\)", + ): m.f = ExternalFunction(_count, function=_count) with self.assertRaisesRegex( - ValueError, "PythonCallbackFunction constructor only " - "supports 0 - 3 positional arguments"): + ValueError, + "PythonCallbackFunction constructor only " + "supports 0 - 3 positional arguments", + ): m.f = ExternalFunction(1, 2, 3, 4) with self.assertRaisesRegex( - ValueError, "Cannot specify 'fgh' with any of " - "{'function', 'gradient', hessian'}"): + ValueError, + "Cannot specify 'fgh' with any of {'function', 'gradient', hessian'}", + ): m.f = ExternalFunction(_count, fgh=_fgh) def test_call_countArgs(self): @@ -99,7 +112,7 @@ def test_call_countArgs(self): self.assertIsInstance(m.f, PythonCallbackFunction) self.assertEqual(value(m.f()), 0) self.assertEqual(value(m.f(2)), 1) - self.assertEqual(value(m.f(2,3)), 2) + self.assertEqual(value(m.f(2, 3)), 2) def test_call_sumfcn(self): m = ConcreteModel() @@ -107,78 +120,51 @@ def test_call_sumfcn(self): self.assertIsInstance(m.f, PythonCallbackFunction) self.assertEqual(value(m.f()), 2.0) self.assertEqual(value(m.f(1)), 3.0) - self.assertEqual(value(m.f(1,2)), 5.0) + self.assertEqual(value(m.f(1, 2)), 5.0) def test_evaluate_fgh_fgh(self): m = ConcreteModel() m.f = ExternalFunction(fgh=_fgh) f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id)) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) - self.assertEqual(g, [2*5 + 3*7 + 7*11**2, - 3*5 + 5*11**2, - 2*5*7*11, - 0]) - self.assertEqual(h, [ - 2, - 3+11**2, 0, - 2*7*11, 2*5*11, 2*5*7, - 0, 0, 0, 0, - ]) - - f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), - fixed=[0, 1, 0, 1]) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) - self.assertEqual(g, [2*5 + 3*7 + 7*11**2, - 0, - 2*5*7*11, - 0]) - self.assertEqual(h, [ - 2, - 0, 0, - 2*7*11, 0, 2*5*7, - 0, 0, 0, 0, - ]) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) + self.assertEqual( + g, [2 * 5 + 3 * 7 + 7 * 11**2, 3 * 5 + 5 * 11**2, 2 * 5 * 7 * 11, 0] + ) + self.assertEqual( + h, [2, 3 + 11**2, 0, 2 * 7 * 11, 2 * 5 * 11, 2 * 5 * 7, 0, 0, 0, 0] + ) + + f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fixed=[0, 1, 0, 1]) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) + self.assertEqual(g, [2 * 5 + 3 * 7 + 7 * 11**2, 0, 2 * 5 * 7 * 11, 0]) + self.assertEqual(h, [2, 0, 0, 2 * 7 * 11, 0, 2 * 5 * 7, 0, 0, 0, 0]) def test_evaluate_fgh_f_g_h(self): m = ConcreteModel() m.f = ExternalFunction(_f, _g, _h) f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id)) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) - self.assertEqual(g, [2*5 + 3*7 + 7*11**2, - 3*5 + 5*11**2, - 2*5*7*11, - 0]) - self.assertEqual(h, [ - 2, - 3+11**2, 0, - 2*7*11, 2*5*11, 2*5*7, - 0, 0, 0, 0, - ]) - - f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), - fixed=[0, 1, 0, 1]) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) - self.assertEqual(g, [2*5 + 3*7 + 7*11**2, - 0, - 2*5*7*11, - 0]) - self.assertEqual(h, [ - 2, - 0, 0, - 2*7*11, 0, 2*5*7, - 0, 0, 0, 0, - ]) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) + self.assertEqual( + g, [2 * 5 + 3 * 7 + 7 * 11**2, 3 * 5 + 5 * 11**2, 2 * 5 * 7 * 11, 0] + ) + self.assertEqual( + h, [2, 3 + 11**2, 0, 2 * 7 * 11, 2 * 5 * 11, 2 * 5 * 7, 0, 0, 0, 0] + ) + + f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fixed=[0, 1, 0, 1]) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) + self.assertEqual(g, [2 * 5 + 3 * 7 + 7 * 11**2, 0, 2 * 5 * 7 * 11, 0]) + self.assertEqual(h, [2, 0, 0, 2 * 7 * 11, 0, 2 * 5 * 7, 0, 0, 0, 0]) f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=1) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) - self.assertEqual(g, [2*5 + 3*7 + 7*11**2, - 3*5 + 5*11**2, - 2*5*7*11, - 0]) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) + self.assertEqual( + g, [2 * 5 + 3 * 7 + 7 * 11**2, 3 * 5 + 5 * 11**2, 2 * 5 * 7 * 11, 0] + ) self.assertIsNone(h) f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=0) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) self.assertIsNone(g) self.assertIsNone(h) @@ -186,20 +172,20 @@ def test_evaluate_fgh_f_g(self): m = ConcreteModel() m.f = ExternalFunction(_f, _g) with self.assertRaisesRegex( - RuntimeError, "ExternalFunction 'f' was not defined " - "with a Hessian callback."): + RuntimeError, + "ExternalFunction 'f' was not defined with a Hessian callback.", + ): f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id)) f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=1) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) - self.assertEqual(g, [2*5 + 3*7 + 7*11**2, - 3*5 + 5*11**2, - 2*5*7*11, - 0]) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) + self.assertEqual( + g, [2 * 5 + 3 * 7 + 7 * 11**2, 3 * 5 + 5 * 11**2, 2 * 5 * 7 * 11, 0] + ) self.assertIsNone(h) f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=0) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) self.assertIsNone(g) self.assertIsNone(h) @@ -207,17 +193,19 @@ def test_evaluate_fgh_f(self): m = ConcreteModel() m.f = ExternalFunction(_f) with self.assertRaisesRegex( - RuntimeError, "ExternalFunction 'f' was not defined " - "with a gradient callback."): + RuntimeError, + "ExternalFunction 'f' was not defined with a gradient callback.", + ): f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id)) with self.assertRaisesRegex( - RuntimeError, "ExternalFunction 'f' was not defined " - "with a gradient callback."): + RuntimeError, + "ExternalFunction 'f' was not defined with a gradient callback.", + ): f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=1) f, g, h = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=0) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) self.assertIsNone(g) self.assertIsNone(h) @@ -225,23 +213,25 @@ def test_evaluate_errors(self): m = ConcreteModel() m.f = ExternalFunction(_f, _g_bad, _h_bad) f = m.f.evaluate((5, 7, 11, m.f._fcn_id)) - self.assertEqual(f, 5**2 + 3*5*7 + 5*7*11**2) + self.assertEqual(f, 5**2 + 3 * 5 * 7 + 5 * 7 * 11**2) with self.assertRaisesRegex( - RuntimeError, - "PythonCallbackFunction called with invalid Global ID"): + RuntimeError, "PythonCallbackFunction called with invalid Global ID" + ): f = m.f.evaluate((5, 7, 11, -1)) with self.assertRaisesRegex( - RuntimeError, - "External function 'f' returned an invalid " - r"derivative vector \(expected 4, received 5\)"): + RuntimeError, + "External function 'f' returned an invalid " + r"derivative vector \(expected 4, received 5\)", + ): f = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=1) with self.assertRaisesRegex( - RuntimeError, - "External function 'f' returned an invalid " - r"Hessian matrix \(expected 10, received 9\)"): + RuntimeError, + "External function 'f' returned an invalid " + r"Hessian matrix \(expected 10, received 9\)", + ): f = m.f.evaluate_fgh((5, 7, 11, m.f._fcn_id), fgh=2) def test_getname(self): @@ -349,26 +339,33 @@ def test_pprint(self): out = StringIO() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 1 ExternalFunction Declarations h : function=_count, units=None, arg_units=None 1 Declarations: h - """.strip()) + """.strip(), + ) if not pint_available: return - m.i = ExternalFunction(function=_sum, - units=units.kg, arg_units=[units.m, units.s]) + m.i = ExternalFunction( + function=_sum, units=units.kg, arg_units=[units.m, units.s] + ) out = StringIO() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 2 ExternalFunction Declarations h : function=_count, units=None, arg_units=None i : function=_sum, units=kg, arg_units=['m', 's'] 2 Declarations: h i - """.strip()) + """.strip(), + ) def test_pprint(self): m = ConcreteModel() @@ -376,34 +373,40 @@ def test_pprint(self): out = StringIO() m.pprint() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 1 ExternalFunction Declarations h : function=_g, units=None, arg_units=None 1 Declarations: h - """.strip()) + """.strip(), + ) if not pint_available: return - m.i = ExternalFunction(function=_h, - units=units.kg, arg_units=[units.m, units.s]) + m.i = ExternalFunction( + function=_h, units=units.kg, arg_units=[units.m, units.s] + ) out = StringIO() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 2 ExternalFunction Declarations h : function=_g, units=None, arg_units=None i : function=_h, units=kg, arg_units=['m', 's'] 2 Declarations: h i - """.strip()) + """.strip(), + ) class TestAMPLExternalFunction(unittest.TestCase): def assertListsAlmostEqual(self, first, second, places=7, msg=None): self.assertEqual(len(first), len(second)) - msg = "lists %s and %s differ at item " % ( - first, second) - for i,a in enumerate(first): + msg = "lists %s and %s differ at item " % (first, second) + for i, a in enumerate(first): self.assertAlmostEqual(a, second[i], places, msg + str(i)) def test_getname(self): @@ -422,9 +425,11 @@ def test_getname(self): self.assertEqual(M.m.f.getname(), "f") self.assertEqual(M.m.f.getname(True), "m.f") - @unittest.skipIf(sys.platform.lower().startswith('win'), - "Cannot (easily) unload a DLL in Windows, so " - "cannot clean up the 'temporary' DLL") + @unittest.skipIf( + sys.platform.lower().startswith('win'), + "Cannot (easily) unload a DLL in Windows, so " + "cannot clean up the 'temporary' DLL", + ) def test_load_local_asl_library(self): DLL = find_GSL() if not DLL: @@ -433,9 +438,8 @@ def test_load_local_asl_library(self): LIB = 'test_pyomo_external_gsl.dll' model = ConcreteModel() - model.gamma = ExternalFunction( - library=LIB, function="gsl_sf_gamma") - model.x = Var(initialize=3, bounds=(1e-5,None)) + model.gamma = ExternalFunction(library=LIB, function="gsl_sf_gamma") + model.x = Var(initialize=3, bounds=(1e-5, None)) model.o = Objective(expr=model.gamma(model.x)) with TempfileManager.new_context() as tempfile: @@ -456,27 +460,26 @@ def test_unknown_library(self): m = ConcreteModel() with LoggingIntercept() as LOG: m.ef = ExternalFunction( - library='unknown_pyomo_external_testing_function', - function='f') + library='unknown_pyomo_external_testing_function', function='f' + ) self.assertEqual( LOG.getvalue(), 'Defining AMPL external function, but cannot locate ' - 'specified library "unknown_pyomo_external_testing_function"\n') + 'specified library "unknown_pyomo_external_testing_function"\n', + ) def test_eval_gsl_function(self): DLL = find_GSL() if not DLL: self.skipTest("Could not find the amplgsl.dll library") model = ConcreteModel() - model.gamma = ExternalFunction( - library=DLL, function="gsl_sf_gamma") - model.bessel = ExternalFunction( - library=DLL, function="gsl_sf_bessel_Jnu") - model.x = Var(initialize=3, bounds=(1e-5,None)) + model.gamma = ExternalFunction(library=DLL, function="gsl_sf_gamma") + model.bessel = ExternalFunction(library=DLL, function="gsl_sf_bessel_Jnu") + model.x = Var(initialize=3, bounds=(1e-5, None)) model.o = Objective(expr=model.gamma(model.x)) self.assertAlmostEqual(value(model.o), 2.0, 7) - f = model.bessel.evaluate((0.5, 2.0,)) + f = model.bessel.evaluate((0.5, 2.0)) self.assertAlmostEqual(f, 0.5130161365618272, 7) def test_eval_gsl_error(self): @@ -484,11 +487,12 @@ def test_eval_gsl_error(self): if not DLL: self.skipTest("Could not find the amplgsl.dll library") model = ConcreteModel() - model.bogus = ExternalFunction( - library=DLL, function="bogus_function") + model.bogus = ExternalFunction(library=DLL, function="bogus_function") with self.assertRaisesRegex( - RuntimeError, "Error: external function 'bogus_function' was " - "not registered within external library(?s:.*)gsl_sf_gamma"): + RuntimeError, + "Error: external function 'bogus_function' was " + "not registered within external library(?s:.*)gsl_sf_gamma", + ): f = model.bogus.evaluate((1,)) def test_eval_fgh_gsl_function(self): @@ -496,50 +500,43 @@ def test_eval_fgh_gsl_function(self): if not DLL: self.skipTest("Could not find the amplgsl.dll library") model = ConcreteModel() - model.gamma = ExternalFunction( - library=DLL, function="gsl_sf_gamma") - model.beta = ExternalFunction( - library=DLL, function="gsl_sf_beta") - model.bessel = ExternalFunction( - library=DLL, function="gsl_sf_bessel_Jnu") - - f,g,h = model.gamma.evaluate_fgh((2.0,)) + model.gamma = ExternalFunction(library=DLL, function="gsl_sf_gamma") + model.beta = ExternalFunction(library=DLL, function="gsl_sf_beta") + model.bessel = ExternalFunction(library=DLL, function="gsl_sf_bessel_Jnu") + + f, g, h = model.gamma.evaluate_fgh((2.0,)) self.assertAlmostEqual(f, 1.0, 7) self.assertListsAlmostEqual(g, [0.422784335098467], 7) self.assertListsAlmostEqual(h, [0.8236806608528794], 7) - f,g,h = model.beta.evaluate_fgh((2.5, 2.0,), fixed=[1,1]) + f, g, h = model.beta.evaluate_fgh((2.5, 2.0), fixed=[1, 1]) self.assertAlmostEqual(f, 0.11428571428571432, 7) self.assertListsAlmostEqual(g, [0.0, 0.0], 7) self.assertListsAlmostEqual(h, [0.0, 0.0, 0.0], 7) - f,g,h = model.beta.evaluate_fgh((2.5, 2.0,), fixed=[0,1]) + f, g, h = model.beta.evaluate_fgh((2.5, 2.0), fixed=[0, 1]) self.assertAlmostEqual(f, 0.11428571428571432, 7) - self.assertListsAlmostEqual( - g, [-0.07836734693877555, 0.0], 7) - self.assertListsAlmostEqual( - h, [0.08135276967930034, 0.0, 0.0], 7) + self.assertListsAlmostEqual(g, [-0.07836734693877555, 0.0], 7) + self.assertListsAlmostEqual(h, [0.08135276967930034, 0.0, 0.0], 7) - f,g,h = model.beta.evaluate_fgh((2.5, 2.0,)) + f, g, h = model.beta.evaluate_fgh((2.5, 2.0)) self.assertAlmostEqual(f, 0.11428571428571432, 7) + self.assertListsAlmostEqual(g, [-0.07836734693877555, -0.11040989614412142], 7) self.assertListsAlmostEqual( - g, [-0.07836734693877555, -0.11040989614412142], 7) - self.assertListsAlmostEqual( - h, [0.08135276967930034, 0.0472839170086535, 0.15194654464270113], - 7) + h, [0.08135276967930034, 0.0472839170086535, 0.15194654464270113], 7 + ) - f,g,h = model.beta.evaluate_fgh((2.5, 2.0,), fgh=1) + f, g, h = model.beta.evaluate_fgh((2.5, 2.0), fgh=1) self.assertAlmostEqual(f, 0.11428571428571432, 7) - self.assertListsAlmostEqual( - g, [-0.07836734693877555, -0.11040989614412142], 7) + self.assertListsAlmostEqual(g, [-0.07836734693877555, -0.11040989614412142], 7) self.assertIsNone(h) - f,g,h = model.beta.evaluate_fgh((2.5, 2.0,), fgh=0) + f, g, h = model.beta.evaluate_fgh((2.5, 2.0), fgh=0) self.assertAlmostEqual(f, 0.11428571428571432, 7) self.assertIsNone(g) self.assertIsNone(h) - f,g,h = model.bessel.evaluate_fgh((2.5, 2.0,), fixed=[1,0]) + f, g, h = model.bessel.evaluate_fgh((2.5, 2.0), fixed=[1, 0]) self.assertAlmostEqual(f, 0.223924531469, 7) self.assertListsAlmostEqual(g, [0.0, 0.21138811435101745], 7) self.assertListsAlmostEqual(h, [0.0, 0.0, 0.02026349177575621], 7) @@ -548,47 +545,49 @@ def test_eval_fgh_gsl_function(self): # (notably, gamma and bessel do not as of 12/2021). We will # test that our interface corrects that - f,g,h = model.gamma.evaluate_fgh((2.0,), fixed=[1]) + f, g, h = model.gamma.evaluate_fgh((2.0,), fixed=[1]) self.assertAlmostEqual(f, 1.0, 7) self.assertListsAlmostEqual(g, [0.0], 7) self.assertListsAlmostEqual(h, [0.0], 7) - f,g,h = model.bessel.evaluate_fgh((2.5, 2.0,), fixed=[1,1]) + f, g, h = model.bessel.evaluate_fgh((2.5, 2.0), fixed=[1, 1]) self.assertAlmostEqual(f, 0.223924531469, 7) self.assertListsAlmostEqual(g, [0.0, 0.0], 7) self.assertListsAlmostEqual(h, [0.0, 0.0, 0.0], 7) - - @unittest.skipIf(not check_available_solvers('ipopt'), - "The 'ipopt' solver is not available") + @unittest.skipIf( + not check_available_solvers('ipopt'), "The 'ipopt' solver is not available" + ) def test_solve_gsl_function(self): DLL = find_GSL() if not DLL: self.skipTest("Could not find the amplgsl.dll library") model = ConcreteModel() model.z_func = ExternalFunction(library=DLL, function="gsl_sf_gamma") - model.x = Var(initialize=3, bounds=(1e-5,None)) + model.x = Var(initialize=3, bounds=(1e-5, None)) model.o = Objective(expr=model.z_func(model.x)) opt = SolverFactory('ipopt') res = opt.solve(model, tee=True) self.assertAlmostEqual(value(model.o), 0.885603194411, 7) - @unittest.skipIf(not check_available_solvers('ipopt'), - "The 'ipopt' solver is not available") + @unittest.skipIf( + not check_available_solvers('ipopt'), "The 'ipopt' solver is not available" + ) def test_solve_gsl_function_const_arg(self): DLL = find_GSL() if not DLL: self.skipTest("Could not find the amplgsl.dll library") model = ConcreteModel() model.z_func = ExternalFunction(library=DLL, function="gsl_sf_beta") - model.x = Var(initialize=1, bounds=(0.1,None)) + model.x = Var(initialize=1, bounds=(0.1, None)) model.o = Objective(expr=-model.z_func(1, model.x)) opt = SolverFactory('ipopt') res = opt.solve(model, tee=True) self.assertAlmostEqual(value(model.x), 0.1, 5) - @unittest.skipIf(not check_available_solvers('ipopt'), - "The 'ipopt' solver is not available") + @unittest.skipIf( + not check_available_solvers('ipopt'), "The 'ipopt' solver is not available" + ) def test_clone_gsl_function(self): DLL = find_GSL() if not DLL: @@ -596,7 +595,7 @@ def test_clone_gsl_function(self): m = ConcreteModel() m.z_func = ExternalFunction(library=DLL, function="gsl_sf_gamma") self.assertIsInstance(m.z_func, AMPLExternalFunction) - m.x = Var(initialize=3, bounds=(1e-5,None)) + m.x = Var(initialize=3, bounds=(1e-5, None)) m.o = Objective(expr=m.z_func(m.x)) opt = SolverFactory('ipopt') @@ -620,26 +619,36 @@ def test_pprint(self): out = StringIO() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 1 ExternalFunction Declarations f : function=junk, library=junk.so, units=None, arg_units=None 1 Declarations: f - """.strip()) + """.strip(), + ) if not pint_available: return - m.g = ExternalFunction(library="junk.so", function="junk", - units=units.kg, arg_units=[units.m, units.s]) + m.g = ExternalFunction( + library="junk.so", + function="junk", + units=units.kg, + arg_units=[units.m, units.s], + ) out = StringIO() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 2 ExternalFunction Declarations f : function=junk, library=junk.so, units=None, arg_units=None g : function=junk, library=junk.so, units=kg, arg_units=['m', 's'] 2 Declarations: f g - """.strip()) + """.strip(), + ) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_indexed.py b/pyomo/core/tests/unit/test_indexed.py index cdf80be8e39..29bf22ceeb1 100644 --- a/pyomo/core/tests/unit/test_indexed.py +++ b/pyomo/core/tests/unit/test_indexed.py @@ -14,17 +14,21 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep from pyomo.common import DeveloperError import pyomo.common.unittest as unittest +from pyomo.common.log import LoggingIntercept -from pyomo.environ import ConcreteModel, Var, Param, Set, value +from pyomo.environ import ConcreteModel, Var, Param, Set, value, Integers +from pyomo.core.base.set import FiniteSetOf, OrderedSetOf from pyomo.core.base.indexed_component import normalize_index +from pyomo.core.expr import GetItemExpression +from pyomo.core import SortComponents class TestSimpleVar(unittest.TestCase): - def test0(self): # Test fixed attribute - 1D m = ConcreteModel() @@ -83,7 +87,22 @@ def test3a(self): names = set() for var in m.x[:, 1, :]: names.add(var.name) - self.assertEqual(names, set(['x[0,1,0]', 'x[0,1,1]', 'x[0,1,2]', 'x[1,1,0]', 'x[1,1,1]', 'x[1,1,2]', 'x[2,1,0]', 'x[2,1,1]', 'x[2,1,2]' ])) + self.assertEqual( + names, + set( + [ + 'x[0,1,0]', + 'x[0,1,1]', + 'x[0,1,2]', + 'x[1,1,0]', + 'x[1,1,1]', + 'x[1,1,2]', + 'x[2,1,0]', + 'x[2,1,1]', + 'x[2,1,2]', + ] + ), + ) def test3b(self): # Test fixed attribute - 3D @@ -109,10 +128,12 @@ def test_normalize_index(self): self.assertEqual((1, 2, 'abc'), normalize_index((1, 2, ('abc',)))) a = [0, 9, 8] self.assertEqual((1, 2, 0, 9, 8), normalize_index((1, 2, a))) - self.assertEqual((1, 2, 3, 4, 5), normalize_index( - [[], 1, [], 2, [[], 3, [[], 4, []], []], 5, []])) + self.assertEqual( + (1, 2, 3, 4, 5), + normalize_index([[], 1, [], 2, [[], 3, [[], 4, []], []], 5, []]), + ) self.assertEqual((), normalize_index([[[[], []], []], []])) - self.assertEqual((), normalize_index([[], [[], [[],]]])) + self.assertEqual((), normalize_index([[], [[], [[]]]])) # Test that normalize_index doesn't expand component-like things m = ConcreteModel() @@ -131,7 +152,7 @@ def test_normalize_index(self): def test_index_by_constant_simpleComponent(self): m = ConcreteModel() m.i = Param(initialize=2) - m.x = Var([1,2,3], initialize=lambda m,x: 2*x) + m.x = Var([1, 2, 3], initialize=lambda m, x: 2 * x) self.assertEqual(value(m.x[2]), 4) self.assertEqual(value(m.x[m.i]), 4) self.assertIs(m.x[2], m.x[m.i]) @@ -140,39 +161,106 @@ def test_index_by_multiple_constant_simpleComponent(self): m = ConcreteModel() m.i = Param(initialize=2) m.j = Param(initialize=3) - m.x = Var([1,2,3], [1,2,3], initialize=lambda m,x,y: 2*x*y) - self.assertEqual(value(m.x[2,3]), 12) - self.assertEqual(value(m.x[m.i,3]), 12) - self.assertEqual(value(m.x[m.i,m.j]), 12) - self.assertEqual(value(m.x[2,m.j]), 12) - self.assertIs(m.x[2,3], m.x[m.i,3]) - self.assertIs(m.x[2,3], m.x[m.i,m.j]) - self.assertIs(m.x[2,3], m.x[2,m.j]) + m.x = Var([1, 2, 3], [1, 2, 3], initialize=lambda m, x, y: 2 * x * y) + self.assertEqual(value(m.x[2, 3]), 12) + self.assertEqual(value(m.x[m.i, 3]), 12) + self.assertEqual(value(m.x[m.i, m.j]), 12) + self.assertEqual(value(m.x[2, m.j]), 12) + self.assertIs(m.x[2, 3], m.x[m.i, 3]) + self.assertIs(m.x[2, 3], m.x[m.i, m.j]) + self.assertIs(m.x[2, 3], m.x[2, m.j]) def test_index_by_fixed_simpleComponent(self): m = ConcreteModel() m.i = Param(initialize=2, mutable=True) - m.x = Var([1,2,3], initialize=lambda m,x: 2*x) + m.x = Var([1, 2, 3], initialize=lambda m, x: 2 * x) self.assertEqual(value(m.x[2]), 4) self.assertRaisesRegex( - RuntimeError, 'is a fixed but not constant value', - m.x.__getitem__, m.i) + RuntimeError, 'is a fixed but not constant value', m.x.__getitem__, m.i + ) def test_index_by_variable_simpleComponent(self): m = ConcreteModel() - m.i = Var(initialize=2) - m.x = Var([1,2,3], initialize=lambda m,x: 2*x) + m.i = Var(initialize=2, domain=Integers) + m.x = Var([1, 2, 3], initialize=lambda m, x: 2 * x) self.assertEqual(value(m.x[2]), 4) - self.assertRaisesRegex( - RuntimeError, 'is not a constant value', - m.x.__getitem__, m.i) + + # Test we can index by a variable + thing = m.x[m.i] + self.assertIsInstance(thing, GetItemExpression) + self.assertEqual(len(thing.args), 2) + self.assertIs(thing.args[0], m.x) + self.assertIs(thing.args[1], m.i) + + # Test we can index by an integer-valued expression + idx_expr = 2 * m.i + 1 + thing = m.x[idx_expr] + self.assertIsInstance(thing, GetItemExpression) + self.assertEqual(len(thing.args), 2) + self.assertIs(thing.args[0], m.x) + self.assertIs(thing.args[1], idx_expr) + + def test_index_param_by_variable(self): + m = ConcreteModel() + m.i = Var(initialize=2, domain=Integers) + m.p = Param([1, 2, 3], initialize=lambda m, x: 2 * x) + + # Test we can index by a variable + thing = m.p[m.i] + self.assertIsInstance(thing, GetItemExpression) + self.assertEqual(len(thing.args), 2) + self.assertIs(thing.args[0], m.p) + self.assertIs(thing.args[1], m.i) + + # Test we can index by an integer-valued expression + idx_expr = 2**m.i + 1 + thing = m.p[idx_expr] + self.assertIsInstance(thing, GetItemExpression) + self.assertEqual(len(thing.args), 2) + self.assertIs(thing.args[0], m.p) + self.assertIs(thing.args[1], idx_expr) + + def test_index_var_by_tuple_with_variables(self): + m = ConcreteModel() + m.x = Var([(1, 1), (2, 1), (1, 2), (2, 2)]) + m.i = Var([1, 2, 3], domain=Integers) + + thing = m.x[1, m.i[1]] + self.assertIsInstance(thing, GetItemExpression) + self.assertEqual(len(thing.args), 3) + self.assertIs(thing.args[0], m.x) + self.assertEqual(thing.args[1], 1) + self.assertIs(thing.args[2], m.i[1]) + + idx_expr = m.i[1] + m.i[2] * m.i[3] + thing = m.x[1, idx_expr] + self.assertIsInstance(thing, GetItemExpression) + self.assertEqual(len(thing.args), 3) + self.assertIs(thing.args[0], m.x) + self.assertEqual(thing.args[1], 1) + self.assertIs(thing.args[2], idx_expr) def test_index_by_unhashable_type(self): m = ConcreteModel() - m.x = Var([1,2,3], initialize=lambda m,x: 2*x) - self.assertRaisesRegex( - TypeError, '.*', - m.x.__getitem__, {}) + m.x = Var([1, 2, 3], initialize=lambda m, x: 2 * x) + # Indexing by a dict raises an error + self.assertRaisesRegex(TypeError, '.*', m.x.__getitem__, {}) + # Indexing by lists works... + # ... scalar + self.assertIs(m.x[[1]], m.x[1]) + # ... "tuple" + m.y = Var([(1, 1), (1, 2)]) + self.assertIs(m.y[[1, 1]], m.y[1, 1]) + m.y[[1, 2]] = 5 + y12 = m.y[[1, 2]] + self.assertEqual(y12.value, 5) + m.y[[1, 2]] = 15 + self.assertIs(y12, m.y[[1, 2]]) + self.assertEqual(y12.value, 15) + with self.assertRaisesRegex( + KeyError, r"Index '\(2, 2\)' is not valid for indexed component 'y'" + ): + m.y[[2, 2]] = 5 def test_ordered_keys(self): m = ConcreteModel() @@ -186,36 +274,154 @@ def test_ordered_keys(self): self.assertEqual(set(m.x.keys()), set(m.x.keys(True))) self.assertEqual(ordered_keys, list(m.x.keys(True))) - m.P = Param(m.I, initialize={k:v for v,k in enumerate(init_keys)}) + m.P = Param(m.I, initialize={k: v for v, k in enumerate(init_keys)}) self.assertNotEqual(list(m.P.keys()), list(m.P.keys(True))) self.assertEqual(set(m.P.keys()), set(m.P.keys(True))) self.assertEqual(ordered_keys, list(m.P.keys(True))) self.assertEqual([1, 0, 4, 2, 3], list(m.P.values(True))) - self.assertEqual(list(zip(ordered_keys, [1, 0, 4, 2, 3])), - list(m.P.items(True))) + self.assertEqual( + list(zip(ordered_keys, [1, 0, 4, 2, 3])), list(m.P.items(True)) + ) - m.P = Param(m.I, initialize={(1,2): 30, 1:10, 2:20}, default=1) + m.P = Param(m.I, initialize={(1, 2): 30, 1: 10, 2: 20}, default=1) self.assertNotEqual(list(m.P.keys()), list(m.P.keys(True))) self.assertEqual(set(m.P.keys()), set(m.P.keys(True))) self.assertEqual(ordered_keys, list(m.P.keys(True))) self.assertEqual([10, 20, 1, 30, 1], list(m.P.values(True))) - self.assertEqual(list(zip(ordered_keys, [10, 20, 1, 30, 1])), - list(m.P.items(True))) + self.assertEqual( + list(zip(ordered_keys, [10, 20, 1, 30, 1])), list(m.P.items(True)) + ) + + def test_ordered_keys_deprecation(self): + m = ConcreteModel() + unordered = [1, 3, 2] + ordered = [1, 2, 3] + m.I = FiniteSetOf(unordered) + m.x = Var(m.I) + self.assertEqual(list(m.x.keys()), unordered) + self.assertEqual(list(m.x.keys(SortComponents.ORDERED_INDICES)), ordered) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.keys(True)), ordered) + self.assertEqual(LOG.getvalue(), "") + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.keys(ordered=True)), ordered) + self.assertIn('keys(ordered=True) is deprecated', LOG.getvalue()) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.keys(ordered=False)), unordered) + self.assertIn('keys(ordered=False) is deprecated', LOG.getvalue()) + + m = ConcreteModel() + unordered = [1, 3, 2] + ordered = [1, 2, 3] + m.I = OrderedSetOf(unordered) + m.x = Var(m.I) + self.assertEqual(list(m.x.keys()), unordered) + self.assertEqual(list(m.x.keys(SortComponents.ORDERED_INDICES)), unordered) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.keys(True)), ordered) + self.assertEqual(LOG.getvalue(), "") + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.keys(ordered=True)), unordered) + self.assertIn('keys(ordered=True) is deprecated', LOG.getvalue()) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.keys(ordered=False)), unordered) + self.assertIn('keys(ordered=False) is deprecated', LOG.getvalue()) + + def test_ordered_values_deprecation(self): + m = ConcreteModel() + unordered = [1, 3, 2] + ordered = [1, 2, 3] + m.I = FiniteSetOf(unordered) + m.x = Var(m.I) + unordered = [m.x[i] for i in unordered] + ordered = [m.x[i] for i in ordered] + self.assertEqual(list(m.x.values()), unordered) + self.assertEqual(list(m.x.values(SortComponents.ORDERED_INDICES)), ordered) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.values(True)), ordered) + self.assertEqual(LOG.getvalue(), "") + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.values(ordered=True)), ordered) + self.assertIn('values(ordered=True) is deprecated', LOG.getvalue()) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.values(ordered=False)), unordered) + self.assertIn('values(ordered=False) is deprecated', LOG.getvalue()) + + m = ConcreteModel() + unordered = [1, 3, 2] + ordered = [1, 2, 3] + m.I = OrderedSetOf(unordered) + m.x = Var(m.I) + unordered = [m.x[i] for i in unordered] + ordered = [m.x[i] for i in ordered] + self.assertEqual(list(m.x.values()), unordered) + self.assertEqual(list(m.x.values(SortComponents.ORDERED_INDICES)), unordered) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.values(True)), ordered) + self.assertEqual(LOG.getvalue(), "") + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.values(ordered=True)), unordered) + self.assertIn('values(ordered=True) is deprecated', LOG.getvalue()) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.values(ordered=False)), unordered) + self.assertIn('values(ordered=False) is deprecated', LOG.getvalue()) + + def test_ordered_items_deprecation(self): + m = ConcreteModel() + unordered = [1, 3, 2] + ordered = [1, 2, 3] + m.I = FiniteSetOf(unordered) + m.x = Var(m.I) + unordered = [(i, m.x[i]) for i in unordered] + ordered = [(i, m.x[i]) for i in ordered] + self.assertEqual(list(m.x.items()), unordered) + self.assertEqual(list(m.x.items(SortComponents.ORDERED_INDICES)), ordered) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.items(True)), ordered) + self.assertEqual(LOG.getvalue(), "") + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.items(ordered=True)), ordered) + self.assertIn('items(ordered=True) is deprecated', LOG.getvalue()) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.items(ordered=False)), unordered) + self.assertIn('items(ordered=False) is deprecated', LOG.getvalue()) + + m = ConcreteModel() + unordered = [1, 3, 2] + ordered = [1, 2, 3] + m.I = OrderedSetOf(unordered) + m.x = Var(m.I) + unordered = [(i, m.x[i]) for i in unordered] + ordered = [(i, m.x[i]) for i in ordered] + self.assertEqual(list(m.x.items()), unordered) + self.assertEqual(list(m.x.items(SortComponents.ORDERED_INDICES)), unordered) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.items(True)), ordered) + self.assertEqual(LOG.getvalue(), "") + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.items(ordered=True)), unordered) + self.assertIn('items(ordered=True) is deprecated', LOG.getvalue()) + with LoggingIntercept() as LOG: + self.assertEqual(list(m.x.items(ordered=False)), unordered) + self.assertIn('items(ordered=False) is deprecated', LOG.getvalue()) def test_index_attribute_out_of_sync(self): m = ConcreteModel() - m.x = Var([1,2,3]) + m.x = Var([1, 2, 3]) # make sure everything is right to begin with for i in [1, 2, 3]: self.assertEqual(m.x[i].index(), i) # now mess it up m.x[3]._index = 2 with self.assertRaisesRegex( - DeveloperError, - ".*The '_data' dictionary and '_index' attribute are out of " - "sync for indexed Var 'x': The 2 entry in the '_data' " - "dictionary does not map back to this component data object."): + DeveloperError, + ".*The '_data' dictionary and '_index' attribute are out of " + "sync for indexed Var 'x': The 2 entry in the '_data' " + "dictionary does not map back to this component data object.", + normalize_whitespace=True, + ): m.x[3].index() + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_indexed_slice.py b/pyomo/core/tests/unit/test_indexed_slice.py index 52b49a07b51..e89c48a6061 100644 --- a/pyomo/core/tests/unit/test_indexed_slice.py +++ b/pyomo/core/tests/unit/test_indexed_slice.py @@ -21,33 +21,40 @@ from pyomo.core.base.indexed_component_slice import IndexedComponent_slice from pyomo.core.base.set import normalize_index + def _x_init(m, k): return k + def _y_init(m, i, j): - return i*10+j + return i * 10 + j + def _cx_init(b, k): i, j = b.index()[:2] - return i*100+j*10+k + return i * 100 + j * 10 + k + def _c(b, i, j): b.x = Var(b.model().K, initialize=_cx_init) + def _b(b, i, j): - _c(b,i,j) + _c(b, i, j) b.c = Block(b.model().I, b.model().J, rule=_c) + def _bb(b, i, j, k): - _c(b,i,j) + _c(b, i, j) b.c = Block(b.model().I, b.model().J, rule=_c) + class TestComponentSlices(unittest.TestCase): def setUp(self): self.m = m = ConcreteModel() - m.I = RangeSet(1,3) - m.J = RangeSet(4,6) - m.K = RangeSet(7,9) + m.I = RangeSet(1, 3) + m.J = RangeSet(4, 6) + m.K = RangeSet(7, 9) m.x = Var(m.K, initialize=_x_init) m.y = Var(m.I, m.J, initialize=_y_init) m.b = Block(m.I, m.J, rule=_b) @@ -57,180 +64,233 @@ def tearDown(self): self.m = None def test_simple_getitem(self): - self.assertIsInstance(self.m.b[1,4], _BlockData) + self.assertIsInstance(self.m.b[1, 4], _BlockData) def test_simple_getslice(self): - _slicer = self.m.b[:,4] + _slicer = self.m.b[:, 4] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, ['b[1,4]', 'b[2,4]', 'b[3,4]'] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['b[1,4]', 'b[2,4]', 'b[3,4]']) - _slicer = self.m.b[1,4].c[:,4] + _slicer = self.m.b[1, 4].c[:, 4] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, ['b[1,4].c[1,4]', 'b[1,4].c[2,4]', 'b[1,4].c[3,4]'] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['b[1,4].c[1,4]', 'b[1,4].c[2,4]', 'b[1,4].c[3,4]']) def test_wildcard_slice(self): with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): _slicer = self.m.b[:] _slicer = self.m.b[...] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] + ans = [str(x) for x in _slicer] self.assertEqual( - ans, [ 'b[1,4]', 'b[1,5]', 'b[1,6]', - 'b[2,4]', 'b[2,5]', 'b[2,6]', - 'b[3,4]', 'b[3,5]', 'b[3,6]', - ] ) + ans, + [ + 'b[1,4]', + 'b[1,5]', + 'b[1,6]', + 'b[2,4]', + 'b[2,5]', + 'b[2,6]', + 'b[3,4]', + 'b[3,5]', + 'b[3,6]', + ], + ) - _slicer = self.m.b[1,...] + _slicer = self.m.b[1, ...] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, [ 'b[1,4]', 'b[1,5]', 'b[1,6]', - ] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['b[1,4]', 'b[1,5]', 'b[1,6]']) - _slicer = self.m.b[...,5] + _slicer = self.m.b[..., 5] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, [ 'b[1,5]', - 'b[2,5]', - 'b[3,5]', - ] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['b[1,5]', 'b[2,5]', 'b[3,5]']) - _slicer = self.m.bb[2,...,8] + _slicer = self.m.bb[2, ..., 8] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, [ 'bb[2,4,8]', 'bb[2,5,8]', 'bb[2,6,8]', - ] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['bb[2,4,8]', 'bb[2,5,8]', 'bb[2,6,8]']) - _slicer = self.m.bb[:,...,8] + _slicer = self.m.bb[:, ..., 8] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] + ans = [str(x) for x in _slicer] self.assertEqual( - ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]', - 'bb[2,4,8]', 'bb[2,5,8]', 'bb[2,6,8]', - 'bb[3,4,8]', 'bb[3,5,8]', 'bb[3,6,8]', - ] ) + ans, + [ + 'bb[1,4,8]', + 'bb[1,5,8]', + 'bb[1,6,8]', + 'bb[2,4,8]', + 'bb[2,5,8]', + 'bb[2,6,8]', + 'bb[3,4,8]', + 'bb[3,5,8]', + 'bb[3,6,8]', + ], + ) - _slicer = self.m.bb[:,:,...,8] + _slicer = self.m.bb[:, :, ..., 8] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] + ans = [str(x) for x in _slicer] self.assertEqual( - ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]', - 'bb[2,4,8]', 'bb[2,5,8]', 'bb[2,6,8]', - 'bb[3,4,8]', 'bb[3,5,8]', 'bb[3,6,8]', - ] ) + ans, + [ + 'bb[1,4,8]', + 'bb[1,5,8]', + 'bb[1,6,8]', + 'bb[2,4,8]', + 'bb[2,5,8]', + 'bb[2,6,8]', + 'bb[3,4,8]', + 'bb[3,5,8]', + 'bb[3,6,8]', + ], + ) - _slicer = self.m.bb[:,...,:,8] + _slicer = self.m.bb[:, ..., :, 8] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] + ans = [str(x) for x in _slicer] self.assertEqual( - ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]', - 'bb[2,4,8]', 'bb[2,5,8]', 'bb[2,6,8]', - 'bb[3,4,8]', 'bb[3,5,8]', 'bb[3,6,8]', - ] ) + ans, + [ + 'bb[1,4,8]', + 'bb[1,5,8]', + 'bb[1,6,8]', + 'bb[2,4,8]', + 'bb[2,5,8]', + 'bb[2,6,8]', + 'bb[3,4,8]', + 'bb[3,5,8]', + 'bb[3,6,8]', + ], + ) - _slicer = self.m.b[1,4,...] + _slicer = self.m.b[1, 4, ...] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, [ 'b[1,4]', - ] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['b[1,4]']) with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): - _slicer = self.m.b[1,2,3,...] + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): + _slicer = self.m.b[1, 2, 3, ...] with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): - _slicer = self.m.b[1,:,2] + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): + _slicer = self.m.b[1, :, 2] self.assertRaisesRegex( - IndexError, 'wildcard slice .* can only appear once', - self.m.b.__getitem__, (Ellipsis,Ellipsis) ) + IndexError, + 'wildcard slice .* can only appear once', + self.m.b.__getitem__, + (Ellipsis, Ellipsis), + ) def test_any_slice(self): m = ConcreteModel() m.x = Var(Any, dense=False) m.x[1] = 1 - m.x[1,1] = 2 + m.x[1, 1] = 2 m.x[2] = 3 self.assertEqual(list(str(_) for _ in m.x[:]), ['x[1]', 'x[2]']) - self.assertEqual(list(str(_) for _ in m.x[:,:]), ['x[1,1]']) + self.assertEqual(list(str(_) for _ in m.x[:, :]), ['x[1,1]']) self.assertEqual(list(str(_) for _ in m.x[...]), ['x[1]', 'x[1,1]', 'x[2]']) - def test_nonterminal_slice(self): - _slicer = self.m.b[:,4].x + _slicer = self.m.b[:, 4].x self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, ['b[1,4].x', 'b[2,4].x', 'b[3,4].x'] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['b[1,4].x', 'b[2,4].x', 'b[3,4].x']) - _slicer = self.m.b[:,4].x[7] + _slicer = self.m.b[:, 4].x[7] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( - ans, ['b[1,4].x[7]', 'b[2,4].x[7]', 'b[3,4].x[7]'] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, ['b[1,4].x[7]', 'b[2,4].x[7]', 'b[3,4].x[7]']) def test_nested_slices(self): - _slicer = self.m.b[1,:].c[:,4].x + _slicer = self.m.b[1, :].c[:, 4].x self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] + ans = [str(x) for x in _slicer] self.assertEqual( - ans, ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x', - 'b[1,5].c[1,4].x', 'b[1,5].c[2,4].x', 'b[1,5].c[3,4].x', - 'b[1,6].c[1,4].x', 'b[1,6].c[2,4].x', 'b[1,6].c[3,4].x', - ] ) + ans, + [ + 'b[1,4].c[1,4].x', + 'b[1,4].c[2,4].x', + 'b[1,4].c[3,4].x', + 'b[1,5].c[1,4].x', + 'b[1,5].c[2,4].x', + 'b[1,5].c[3,4].x', + 'b[1,6].c[1,4].x', + 'b[1,6].c[2,4].x', + 'b[1,6].c[3,4].x', + ], + ) - _slicer = self.m.b[1,:].c[:,4].x[8] + _slicer = self.m.b[1, :].c[:, 4].x[8] self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] + ans = [str(x) for x in _slicer] self.assertEqual( ans, - [ 'b[1,4].c[1,4].x[8]', 'b[1,4].c[2,4].x[8]', 'b[1,4].c[3,4].x[8]', - 'b[1,5].c[1,4].x[8]', 'b[1,5].c[2,4].x[8]', 'b[1,5].c[3,4].x[8]', - 'b[1,6].c[1,4].x[8]', 'b[1,6].c[2,4].x[8]', 'b[1,6].c[3,4].x[8]', - ] ) + [ + 'b[1,4].c[1,4].x[8]', + 'b[1,4].c[2,4].x[8]', + 'b[1,4].c[3,4].x[8]', + 'b[1,5].c[1,4].x[8]', + 'b[1,5].c[2,4].x[8]', + 'b[1,5].c[3,4].x[8]', + 'b[1,6].c[1,4].x[8]', + 'b[1,6].c[2,4].x[8]', + 'b[1,6].c[3,4].x[8]', + ], + ) def test_component_function_slices(self): - _slicer = self.m.component('b')[1,:].component('c')[:,4].component('x') + _slicer = self.m.component('b')[1, :].component('c')[:, 4].component('x') self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] + ans = [str(x) for x in _slicer] self.assertEqual( - ans, ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x', - 'b[1,5].c[1,4].x', 'b[1,5].c[2,4].x', 'b[1,5].c[3,4].x', - 'b[1,6].c[1,4].x', 'b[1,6].c[2,4].x', 'b[1,6].c[3,4].x', - ] ) + ans, + [ + 'b[1,4].c[1,4].x', + 'b[1,4].c[2,4].x', + 'b[1,4].c[3,4].x', + 'b[1,5].c[1,4].x', + 'b[1,5].c[2,4].x', + 'b[1,5].c[3,4].x', + 'b[1,6].c[1,4].x', + 'b[1,6].c[2,4].x', + 'b[1,6].c[3,4].x', + ], + ) def test_noncomponent_function_slices(self): - ans = self.m.component('b')[1,:].component('c')[:,4].x.fix(5) + ans = self.m.component('b')[1, :].component('c')[:, 4].x.fix(5) self.assertIsInstance(ans, list) - self.assertEqual( ans, [None]*9 ) + self.assertEqual(ans, [None] * 9) - ans = self.m.component('b')[1,:].component('c')[:,4].x[:].is_fixed() + ans = self.m.component('b')[1, :].component('c')[:, 4].x[:].is_fixed() self.assertIsInstance(ans, list) - self.assertEqual( ans, [True]*(9*3) ) + self.assertEqual(ans, [True] * (9 * 3)) - ans = self.m.component('b')[1,:].component('c')[:,5].x[:].is_fixed() + ans = self.m.component('b')[1, :].component('c')[:, 5].x[:].is_fixed() self.assertIsInstance(ans, list) - self.assertEqual( ans, [False]*(9*3) ) + self.assertEqual(ans, [False] * (9 * 3)) def test_setattr_slices(self): - init_sum = sum(self.m.b[:,:].c[:,:].x[:].value) - init_vals = list(self.m.b[1,:].c[:,4].x[:].value) - self.m.b[1,:].c[:,4].x[:].value = 0 - new_sum = sum(self.m.b[:,:].c[:,:].x[:].value) - new_vals = list(self.m.b[1,:].c[:,4].x[:].value) + init_sum = sum(self.m.b[:, :].c[:, :].x[:].value) + init_vals = list(self.m.b[1, :].c[:, 4].x[:].value) + self.m.b[1, :].c[:, 4].x[:].value = 0 + new_sum = sum(self.m.b[:, :].c[:, :].x[:].value) + new_vals = list(self.m.b[1, :].c[:, 4].x[:].value) # nothing got deleted self.assertEqual(len(init_vals), len(new_vals)) # the lists values were changes @@ -238,12 +298,13 @@ def test_setattr_slices(self): # the set values are all now zero self.assertEqual(sum(new_vals), 0) # nothing outside the set values changed - self.assertEqual(init_sum-sum(init_vals), new_sum) + self.assertEqual(init_sum - sum(init_vals), new_sum) # Test error on invalid attribute _slice = self.m.b[...].c[...].x[:] with self.assertRaisesRegex( - AttributeError, ".*VarData' object has no attribute 'bogus'"): + AttributeError, ".*VarData' object has no attribute 'bogus'" + ): _slice.bogus = 0 # but disabling the exception flag will run without error _slice.attribute_errors_generate_exceptions = False @@ -252,21 +313,26 @@ def test_setattr_slices(self): _slice.bogus = 0 def test_delattr_slices(self): - self.m.b[1,:].c[:,4].x.foo = 10 + self.m.b[1, :].c[:, 4].x.foo = 10 # check that the attribute was added - self.assertEqual(len(list(self.m.b[1,:].c[:,4].x)), 3*3) - self.assertEqual(sum(list(self.m.b[1,:].c[:,4].x.foo)), 10*3*3) - self.assertEqual(sum(list(1 if hasattr(x,'foo') else 0 - for x in self.m.b[:,:].c[:,:].x)), 3*3) + self.assertEqual(len(list(self.m.b[1, :].c[:, 4].x)), 3 * 3) + self.assertEqual(sum(list(self.m.b[1, :].c[:, 4].x.foo)), 10 * 3 * 3) + self.assertEqual( + sum(list(1 if hasattr(x, 'foo') else 0 for x in self.m.b[:, :].c[:, :].x)), + 3 * 3, + ) - _slice = self.m.b[1,:].c[:,4].x.foo + _slice = self.m.b[1, :].c[:, 4].x.foo _slice._call_stack[-1] = ( IndexedComponent_slice.del_attribute, - _slice._call_stack[-1][1] ) + _slice._call_stack[-1][1], + ) # call the iterator to delete the attributes list(_slice) - self.assertEqual(sum(list(1 if hasattr(x,'foo') else 0 - for x in self.m.b[:,:].c[:,:].x)), 0) + self.assertEqual( + sum(list(1 if hasattr(x, 'foo') else 0 for x in self.m.b[:, :].c[:, :].x)), + 0, + ) # calling the iterator again will raise an exception with self.assertRaisesRegex(AttributeError, 'foo'): list(_slice) @@ -277,11 +343,11 @@ def test_delattr_slices(self): list(_slice) def test_setitem_slices(self): - init_sum = sum(self.m.b[:,:].c[:,:].x[:].value) - init_vals = list(self.m.b[1,:].c[:,4].x[:].value) - self.m.b[1,:].c[:,4].x[:] = 0 - new_sum = sum(self.m.b[:,:].c[:,:].x[:].value) - new_vals = list(self.m.b[1,:].c[:,4].x[:].value) + init_sum = sum(self.m.b[:, :].c[:, :].x[:].value) + init_vals = list(self.m.b[1, :].c[:, 4].x[:].value) + self.m.b[1, :].c[:, 4].x[:] = 0 + new_sum = sum(self.m.b[:, :].c[:, :].x[:].value) + new_vals = list(self.m.b[1, :].c[:, 4].x[:].value) # nothing got deleted self.assertEqual(len(init_vals), len(new_vals)) # the lists values were changes @@ -289,12 +355,14 @@ def test_setitem_slices(self): # the set values are all now zero self.assertEqual(sum(new_vals), 0) # nothing outside the set values changed - self.assertEqual(init_sum-sum(init_vals), new_sum) + self.assertEqual(init_sum - sum(init_vals), new_sum) - _slice = self.m.b[1,:].c[:,4].x + _slice = self.m.b[1, :].c[:, 4].x with self.assertRaisesRegex( - KeyError, "Index 'bogus' is not valid for indexed " - r"component 'b\[1,4\]\.c\[1,4\]\.x'"): + KeyError, + "Index 'bogus' is not valid for indexed " + r"component 'b\[1,4\]\.c\[1,4\]\.x'", + ): _slice['bogus'] = 0 # but disabling the exception flag will run without error _slice.key_errors_generate_exceptions = False @@ -302,7 +370,6 @@ def test_setitem_slices(self): # is sufficient to verify the desired behavior _slice['bogus'] = 0 - def test_setitem_component(self): init_sum = sum(self.m.x[:].value) init_vals = list(self.m.x[:].value) @@ -316,13 +383,13 @@ def test_setitem_component(self): # the set values are all now zero self.assertEqual(sum(new_vals), 0) # nothing outside the set values changed - self.assertEqual(init_sum-sum(init_vals), new_sum) + self.assertEqual(init_sum - sum(init_vals), new_sum) - init_sum = sum(self.m.y[:,:].value) - init_vals = list(self.m.y[1,:].value) - self.m.y[1,:] = 0 - new_sum = sum(self.m.y[:,:].value) - new_vals = list(self.m.y[1,:].value) + init_sum = sum(self.m.y[:, :].value) + init_vals = list(self.m.y[1, :].value) + self.m.y[1, :] = 0 + new_sum = sum(self.m.y[:, :].value) + new_vals = list(self.m.y[1, :].value) # nothing got deleted self.assertEqual(len(init_vals), len(new_vals)) # the lists values were changes @@ -330,24 +397,26 @@ def test_setitem_component(self): # the set values are all now zero self.assertEqual(sum(new_vals), 0) # nothing outside the set values changed - self.assertEqual(init_sum-sum(init_vals), new_sum) + self.assertEqual(init_sum - sum(init_vals), new_sum) def test_delitem_slices(self): - init_all = list(self.m.b[:,:].c[:,:].x[:]) - init_tgt = list(self.m.b[1,:].c[:,4].x[:]) - del self.m.b[1,:].c[:,4].x[:] - new_all = list(self.m.b[:,:].c[:,:].x[:]) - new_tgt = list(self.m.b[1,:].c[:,4].x[:]) - - self.assertEqual(len(init_tgt), 3*3*3) - self.assertEqual(len(init_all), (3*3)*(3*3)*3) + init_all = list(self.m.b[:, :].c[:, :].x[:]) + init_tgt = list(self.m.b[1, :].c[:, 4].x[:]) + del self.m.b[1, :].c[:, 4].x[:] + new_all = list(self.m.b[:, :].c[:, :].x[:]) + new_tgt = list(self.m.b[1, :].c[:, 4].x[:]) + + self.assertEqual(len(init_tgt), 3 * 3 * 3) + self.assertEqual(len(init_all), (3 * 3) * (3 * 3) * 3) self.assertEqual(len(new_tgt), 0) - self.assertEqual(len(new_all), (3*3)*(3*3)*3 - 3*3*3) + self.assertEqual(len(new_all), (3 * 3) * (3 * 3) * 3 - 3 * 3 * 3) - _slice = self.m.b[2,:].c[:,4].x + _slice = self.m.b[2, :].c[:, 4].x with self.assertRaisesRegex( - KeyError, "Index 'bogus' is not valid for indexed " - r"component 'b\[2,4\]\.c\[1,4\]\.x'"): + KeyError, + "Index 'bogus' is not valid for indexed " + r"component 'b\[2,4\]\.c\[1,4\]\.x'", + ): del _slice['bogus'] # but disabling the exception flag will run without error _slice.key_errors_generate_exceptions = False @@ -355,134 +424,136 @@ def test_delitem_slices(self): # is sufficient to verify the desired behavior del _slice['bogus'] # Nothing additional should have been deleted - final_all = list(self.m.b[:,:].c[:,:].x[:]) + final_all = list(self.m.b[:, :].c[:, :].x[:]) self.assertEqual(len(new_all), len(final_all)) def test_delitem_component(self): - init_all = list(self.m.bb[:,:,:]) - del self.m.bb[:,:,:] - new_all = list(self.m.bb[:,:,:]) - self.assertEqual(len(init_all), 3*3*3) + init_all = list(self.m.bb[:, :, :]) + del self.m.bb[:, :, :] + new_all = list(self.m.bb[:, :, :]) + self.assertEqual(len(init_all), 3 * 3 * 3) self.assertEqual(len(new_all), 0) - init_all = list(self.m.b[:,:]) - init_tgt = list(self.m.b[1,:]) - del self.m.b[1,:] - new_all = list(self.m.b[:,:]) - new_tgt = list(self.m.b[1,:]) + init_all = list(self.m.b[:, :]) + init_tgt = list(self.m.b[1, :]) + del self.m.b[1, :] + new_all = list(self.m.b[:, :]) + new_tgt = list(self.m.b[1, :]) self.assertEqual(len(init_tgt), 3) - self.assertEqual(len(init_all), 3*3) + self.assertEqual(len(init_all), 3 * 3) self.assertEqual(len(new_tgt), 0) - self.assertEqual(len(new_all), 2*3) + self.assertEqual(len(new_all), 2 * 3) def test_empty_slices(self): - _slicer = self.m.b[1,:].c[:,1].x + _slicer = self.m.b[1, :].c[:, 1].x self.assertIsInstance(_slicer, IndexedComponent_slice) - ans = [ str(x) for x in _slicer ] - self.assertEqual( ans, [] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, []) - _slicer = self.m.b[1,:].c[:,4].x[1] + _slicer = self.m.b[1, :].c[:, 4].x[1] self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.key_errors_generate_exceptions = False - ans = [ str(x) for x in _slicer ] - self.assertEqual( ans, [] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, []) - _slicer = self.m.b[1,:].c[:,4].y + _slicer = self.m.b[1, :].c[:, 4].y self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.attribute_errors_generate_exceptions = False - ans = [ str(x) for x in _slicer ] - self.assertEqual( ans, [] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, []) - _slicer = self.m.b[1,:].c[:,4].component('y', False) + _slicer = self.m.b[1, :].c[:, 4].component('y', False) self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.call_errors_generate_exceptions = False - ans = [ str(x) for x in _slicer ] - self.assertEqual( ans, [] ) + ans = [str(x) for x in _slicer] + self.assertEqual(ans, []) - _slicer = self.m.b[1,:].c[:,4].x[1] + _slicer = self.m.b[1, :].c[:, 4].x[1] self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.key_errors_generate_exceptions = True - self.assertRaises( KeyError, _slicer.next ) + self.assertRaises(KeyError, _slicer.next) - _slicer = self.m.b[1,:].c[:,4].y + _slicer = self.m.b[1, :].c[:, 4].y self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.attribute_errors_generate_exceptions = True - self.assertRaises( AttributeError, _slicer.next ) + self.assertRaises(AttributeError, _slicer.next) - _slicer = self.m.b[1,:].c[:,4].component('y', False) + _slicer = self.m.b[1, :].c[:, 4].component('y', False) self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.call_errors_generate_exceptions = True - self.assertRaises( TypeError,_slicer.next ) + self.assertRaises(TypeError, _slicer.next) - _slicer = self.m.b[1,:].c[:,4].component() + _slicer = self.m.b[1, :].c[:, 4].component() self.assertIsInstance(_slicer, IndexedComponent_slice) _slicer.call_errors_generate_exceptions = True - self.assertRaises( TypeError, _slicer.next ) + self.assertRaises(TypeError, _slicer.next) def test_iterators(self): m = self.m _slice = self.m.x[...] + self.assertEqual(list(_slice.wildcard_keys()), [7, 8, 9]) self.assertEqual( - list(_slice.wildcard_keys()), - [7,8,9] + list(_slice.wildcard_items()), [(7, m.x[7]), (8, m.x[8]), (9, m.x[9])] ) + self.assertEqual(list(_slice.expanded_keys()), [7, 8, 9]) self.assertEqual( - list(_slice.wildcard_items()), - [(7, m.x[7]), (8, m.x[8]), (9, m.x[9])] - ) - self.assertEqual( - list(_slice.expanded_keys()), - [7,8,9] - ) - self.assertEqual( - list(_slice.expanded_items()), - [(7, m.x[7]), (8, m.x[8]), (9, m.x[9])] + list(_slice.expanded_items()), [(7, m.x[7]), (8, m.x[8]), (9, m.x[9])] ) _slice = self.m.b[...] self.assertEqual( list(_slice.wildcard_keys()), - [(1,4), (1,5), (1,6), (2,4), (2,5), (2,6), (3,4), (3,5), (3,6)] + [(1, 4), (1, 5), (1, 6), (2, 4), (2, 5), (2, 6), (3, 4), (3, 5), (3, 6)], ) self.assertEqual( list(_slice.wildcard_items()), - [((1,4), m.b[1,4]), ((1,5), m.b[1,5]), ((1,6), m.b[1,6]), - ((2,4), m.b[2,4]), ((2,5), m.b[2,5]), ((2,6), m.b[2,6]), - ((3,4), m.b[3,4]), ((3,5), m.b[3,5]), ((3,6), m.b[3,6]),] + [ + ((1, 4), m.b[1, 4]), + ((1, 5), m.b[1, 5]), + ((1, 6), m.b[1, 6]), + ((2, 4), m.b[2, 4]), + ((2, 5), m.b[2, 5]), + ((2, 6), m.b[2, 6]), + ((3, 4), m.b[3, 4]), + ((3, 5), m.b[3, 5]), + ((3, 6), m.b[3, 6]), + ], ) self.assertEqual( list(_slice.expanded_keys()), - [(1,4), (1,5), (1,6), (2,4), (2,5), (2,6), (3,4), (3,5), (3,6)] + [(1, 4), (1, 5), (1, 6), (2, 4), (2, 5), (2, 6), (3, 4), (3, 5), (3, 6)], ) self.assertEqual( list(_slice.expanded_items()), - [((1,4), m.b[1,4]), ((1,5), m.b[1,5]), ((1,6), m.b[1,6]), - ((2,4), m.b[2,4]), ((2,5), m.b[2,5]), ((2,6), m.b[2,6]), - ((3,4), m.b[3,4]), ((3,5), m.b[3,5]), ((3,6), m.b[3,6]),] + [ + ((1, 4), m.b[1, 4]), + ((1, 5), m.b[1, 5]), + ((1, 6), m.b[1, 6]), + ((2, 4), m.b[2, 4]), + ((2, 5), m.b[2, 5]), + ((2, 6), m.b[2, 6]), + ((3, 4), m.b[3, 4]), + ((3, 5), m.b[3, 5]), + ((3, 6), m.b[3, 6]), + ], ) - _slice = self.m.b[1,:] - self.assertEqual( - list(_slice.wildcard_keys()), - [4, 5, 6] - ) - self.assertEqual( - list(_slice.wildcard_items()), - [(4, m.b[1,4]), (5, m.b[1,5]), (6, m.b[1,6]),] - ) + _slice = self.m.b[1, :] + self.assertEqual(list(_slice.wildcard_keys(False)), [4, 5, 6]) self.assertEqual( - list(_slice.expanded_keys()), - [(1,4), (1,5), (1,6)] + list(_slice.wildcard_items(False)), + [(4, m.b[1, 4]), (5, m.b[1, 5]), (6, m.b[1, 6])], ) + self.assertEqual(list(_slice.expanded_keys()), [(1, 4), (1, 5), (1, 6)]) self.assertEqual( list(_slice.expanded_items()), - [((1,4), m.b[1,4]), ((1,5), m.b[1,5]), ((1,6), m.b[1,6])] + [((1, 4), m.b[1, 4]), ((1, 5), m.b[1, 5]), ((1, 6), m.b[1, 6])], ) def test_pickle_slices(self): m = self.m - _slicer = m.b[1,:].c[:,4].x + _slicer = m.b[1, :].c[:, 4].x _new_slicer = pickle.loads(pickle.dumps(_slicer)) self.assertIsNot(_slicer, _new_slicer) @@ -490,20 +561,27 @@ def test_pickle_slices(self): self.assertIs(type(_slicer._call_stack), type(_new_slicer._call_stack)) self.assertEqual(len(_slicer._call_stack), len(_new_slicer._call_stack)) - ref = ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x', - 'b[1,5].c[1,4].x', 'b[1,5].c[2,4].x', 'b[1,5].c[3,4].x', - 'b[1,6].c[1,4].x', 'b[1,6].c[2,4].x', 'b[1,6].c[3,4].x', - ] - self.assertEqual([str(x) for x in _slicer], ref ) - self.assertEqual([str(x) for x in _new_slicer], ref ) - for x,y in zip(iter(_slicer), iter(_new_slicer)): + ref = [ + 'b[1,4].c[1,4].x', + 'b[1,4].c[2,4].x', + 'b[1,4].c[3,4].x', + 'b[1,5].c[1,4].x', + 'b[1,5].c[2,4].x', + 'b[1,5].c[3,4].x', + 'b[1,6].c[1,4].x', + 'b[1,6].c[2,4].x', + 'b[1,6].c[3,4].x', + ] + self.assertEqual([str(x) for x in _slicer], ref) + self.assertEqual([str(x) for x in _new_slicer], ref) + for x, y in zip(iter(_slicer), iter(_new_slicer)): self.assertIs(type(x), type(y)) self.assertEqual(x.name, y.name) self.assertIsNot(x, y) def test_clone_on_model(self): m = self.m - m.slicer = m.b[1,:].c[:,4].x + m.slicer = m.b[1, :].c[:, 4].x n = m.clone() self.assertIsNot(m, n) @@ -512,13 +590,20 @@ def test_clone_on_model(self): self.assertIs(type(m.slicer._call_stack), type(n.slicer._call_stack)) self.assertEqual(len(m.slicer._call_stack), len(n.slicer._call_stack)) - ref = ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x', - 'b[1,5].c[1,4].x', 'b[1,5].c[2,4].x', 'b[1,5].c[3,4].x', - 'b[1,6].c[1,4].x', 'b[1,6].c[2,4].x', 'b[1,6].c[3,4].x', - ] - self.assertEqual([str(x) for x in m.slicer], ref ) - self.assertEqual([str(x) for x in n.slicer], ref ) - for x,y in zip(iter(m.slicer), iter(n.slicer)): + ref = [ + 'b[1,4].c[1,4].x', + 'b[1,4].c[2,4].x', + 'b[1,4].c[3,4].x', + 'b[1,5].c[1,4].x', + 'b[1,5].c[2,4].x', + 'b[1,5].c[3,4].x', + 'b[1,6].c[1,4].x', + 'b[1,6].c[2,4].x', + 'b[1,6].c[3,4].x', + ] + self.assertEqual([str(x) for x in m.slicer], ref) + self.assertEqual([str(x) for x in n.slicer], ref) + for x, y in zip(iter(m.slicer), iter(n.slicer)): self.assertIs(type(x), type(y)) self.assertEqual(x.name, y.name) self.assertIsNot(x, y) @@ -527,36 +612,36 @@ def test_clone_on_model(self): def test_hash_eqality(self): m = self.m - a = m.b[1,:].c[:,...,4].x - b = m.b[1,:].c[1,...,:].x + a = m.b[1, :].c[:, ..., 4].x + b = m.b[1, :].c[1, ..., :].x self.assertNotEqual(a, b) self.assertNotEqual(a, m) self.assertEqual(a, a) - self.assertEqual(a, m.b[1,:].c[:,...,4].x) + self.assertEqual(a, m.b[1, :].c[:, ..., 4].x) - _set = set([a,b]) + _set = set([a, b]) self.assertEqual(len(_set), 2) - _set.add(m.b[1,:].c[:,...,4].x) + _set.add(m.b[1, :].c[:, ..., 4].x) self.assertEqual(len(_set), 2) - _set.add(m.b[1,:].c[:,4].x) + _set.add(m.b[1, :].c[:, 4].x) self.assertEqual(len(_set), 3) def test_duplicate(self): m = self.m - a = m.b[1,:].c[:,...,4] + a = m.b[1, :].c[:, ..., 4] b = a.x self.assertIs(a._call_stack, b._call_stack) - self.assertEqual(a._len+1, b._len) + self.assertEqual(a._len + 1, b._len) c = a.y - self.assertEqual(a._len+1, c._len) + self.assertEqual(a._len + 1, c._len) self.assertIsNot(a._call_stack, c._call_stack) b1 = b.duplicate() self.assertIsNot(a._call_stack, b1._call_stack) - self.assertEqual(a._len+1, b1._len) + self.assertEqual(a._len + 1, b1._len) self.assertEqual(hash(b), hash(b1)) def test_invalid_slices(self): @@ -566,25 +651,29 @@ def test_invalid_slices(self): self.assertIs(var, m.x) with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): _slicer = m.b[:] with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): _slicer = m.b[:, :, :] with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): - _slicer = m.b[:,:,:,...] + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): + _slicer = m.b[:, :, :, ...] # valid slice for b, but not c - _slicer = m.b[:,:,...].c[:,:,:].x + _slicer = m.b[:, :, ...].c[:, :, :].x with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): # Error not raised immediately because accessing c is deferred # until iteration. list(_slicer) @@ -592,33 +681,34 @@ def test_invalid_slices(self): # valid slice for b, but not c _slicer = m.b[2, :].c[:].x with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): list(_slicer) def test_nondim_set(self): m = ConcreteModel() - m.I = Set(dimen=None, initialize=[1,(2,3)]) + m.I = Set(dimen=None, initialize=[1, (2, 3)]) m.x = Var(m.I) ref = list(m.x[:]) self.assertEqual(len(ref), 1) self.assertIs(ref[0], m.x[1]) - ref = list(m.x[:,...,:]) + ref = list(m.x[:, ..., :]) self.assertEqual(len(ref), 1) - self.assertIs(ref[0], m.x[2,3]) + self.assertIs(ref[0], m.x[2, 3]) - ref = list(m.x[2,...]) + ref = list(m.x[2, ...]) self.assertEqual(len(ref), 1) - self.assertIs(ref[0], m.x[2,3]) + self.assertIs(ref[0], m.x[2, 3]) _old_flatten = normalize_index.flatten try: normalize_index.flatten = False m = ConcreteModel() - m.I = Set(dimen=None, initialize=[1,(2,3)]) + m.I = Set(dimen=None, initialize=[1, (2, 3)]) m.x = Var(m.I) ref = list(m.x[:]) @@ -626,26 +716,27 @@ def test_nondim_set(self): self.assertIs(ref[0], m.x[1]) with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): # If we are not flattening the sets, then # non-dimensioned Sets *still expect a single "slice". - list(m.x[:,...,:]) + list(m.x[:, ..., :]) finally: normalize_index.flatten = _old_flatten def test_UnknownSetDimen(self): m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) + m.I = Set(initialize=[1, 2, 3]) m.J = Set() m.x = Var(m.I, m.J) with self.assertRaisesRegex( - IndexError, - 'Slicing components relies on knowing the underlying ' - 'set dimensionality'): - ref = list(m.x[:,:]) + IndexError, + 'Slicing components relies on knowing the underlying set dimensionality', + ): + ref = list(m.x[:, :]) def test_flatten_false(self): _old_flatten = normalize_index.flatten @@ -654,45 +745,47 @@ def test_flatten_false(self): m = ConcreteModel() m.I = Set(initialize=range(2)) - m.J = Set(initialize=range(2,4)) - m.K = Set(initialize=['a','b','c']) - m.IJ = m.I*m.J + m.J = Set(initialize=range(2, 4)) + m.K = Set(initialize=['a', 'b', 'c']) + m.IJ = m.I * m.J m.a = Var(m.I, m.J, m.K) m.b = Var(m.IJ, m.K) m.c = Var() with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): - _slicer = m.a[(0,2),:] + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): + _slicer = m.a[(0, 2), :] - _slicer = m.a[0,2,:] - names = [ 'a[0,2,a]', 'a[0,2,b]', 'a[0,2,c]' ] + _slicer = m.a[0, 2, :] + names = ['a[0,2,a]', 'a[0,2,b]', 'a[0,2,c]'] self.assertEqual(names, [var.name for var in _slicer]) with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): - _slicer = m.b[0,2,:] + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): + _slicer = m.b[0, 2, :] - _slicer = m.b[(0,2),:] - names = [ 'b[(0,2),a]', 'b[(0,2),b]', 'b[(0,2),c]' ] + _slicer = m.b[(0, 2), :] + names = ['b[(0,2),a]', 'b[(0,2),b]', 'b[(0,2),c]'] self.assertEqual(names, [var.name for var in _slicer]) with self.assertRaisesRegex( - IndexError, 'Index .* contains an invalid number of ' - 'entries for component .*'): - _slicer = m.b[:,2,'b'] + IndexError, + 'Index .* contains an invalid number of entries for component .*', + ): + _slicer = m.b[:, 2, 'b'] - _slicer = m.b[:,'b'] - names = [ 'b[(0,2),b]', 'b[(0,3),b]', - 'b[(1,2),b]', 'b[(1,3),b]' ] + _slicer = m.b[:, 'b'] + names = ['b[(0,2),b]', 'b[(0,3),b]', 'b[(1,2),b]', 'b[(1,3),b]'] self.assertEqual(names, [var.name for var in _slicer]) - _slicer = m.b[...,'b'] + _slicer = m.b[..., 'b'] self.assertEqual(names, [var.name for var in _slicer]) - _slicer = m.b[0,...] + _slicer = m.b[0, ...] self.assertEqual([], [var.name for var in _slicer]) _slicer = m.c[:] @@ -704,15 +797,15 @@ def test_flatten_false(self): def test_compare_1dim_slice(self): m = ConcreteModel() m.I = Set(initialize=range(2)) - m.J = Set(initialize=range(2,4)) - m.K = Set(initialize=['a','b']) + m.J = Set(initialize=range(2, 4)) + m.K = Set(initialize=['a', 'b']) @m.Block(m.I, m.J) def b(b, i, j): b.v = Var(m.K) - self.assertEqual(m.b[0,:].v[:], m.b[0,:].v[:]) - self.assertNotEqual(m.b[0,:].v[:], m.b[0,:].v['a']) + self.assertEqual(m.b[0, :].v[:], m.b[0, :].v[:]) + self.assertNotEqual(m.b[0, :].v[:], m.b[0, :].v['a']) def test_str(self): m = ConcreteModel() @@ -721,26 +814,35 @@ def test_str(self): # not if the slice is valid s = m.b[...].x[:, 1:2, 1:5:2, ::1, 5, 'a'].component('foo', kwarg=1) self.assertEqual( - str(s), - "b[...].x[:, 1:2, 1:5:2, ::1, 5, 'a'].component('foo', kwarg=1)") + str(s), "b[...].x[:, 1:2, 1:5:2, ::1, 5, 'a'].component('foo', kwarg=1)" + ) # To test set / del, we want to form the IndexedComponent_slice # without evaluating it s = m.b[...] self.assertEqual( - str(IndexedComponent_slice( - s, (IndexedComponent_slice.del_attribute, 'bogus'))), - 'del b[...].bogus') + str( + IndexedComponent_slice( + s, (IndexedComponent_slice.del_attribute, 'bogus') + ) + ), + 'del b[...].bogus', + ) self.assertEqual( - str(IndexedComponent_slice( - s, (IndexedComponent_slice.set_attribute, 'bogus', 10))), - 'b[...].bogus = 10') + str( + IndexedComponent_slice( + s, (IndexedComponent_slice.set_attribute, 'bogus', 10) + ) + ), + 'b[...].bogus = 10', + ) def test_slice_to_componentdata(self): m = ConcreteModel() - m.x = Var([1,2]) + m.x = Var([1, 2]) i = IndexedComponent_slice(m.x, None, None, None)[2] self.assertEqual(list(i), [m.x[2]]) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_initializer.py b/pyomo/core/tests/unit/test_initializer.py index f482db92d41..5767406bdf7 100644 --- a/pyomo/core/tests/unit/test_initializer.py +++ b/pyomo/core/tests/unit/test_initializer.py @@ -16,28 +16,38 @@ import pyomo.common.unittest as unittest from pyomo.common.config import ConfigValue, ConfigList, ConfigDict from pyomo.common.dependencies import ( - pandas as pd, pandas_available, numpy as np, numpy_available + pandas as pd, + pandas_available, + numpy as np, + numpy_available, ) from pyomo.core.base.util import flatten_tuple from pyomo.core.base.initializer import ( - Initializer, ConstantInitializer, ItemInitializer, ScalarCallInitializer, - IndexedCallInitializer, CountedCallInitializer, CountedCallGenerator, - DataFrameInitializer, DefaultInitializer, -) -from pyomo.environ import ( - ConcreteModel, Var, + Initializer, + ConstantInitializer, + ItemInitializer, + ScalarCallInitializer, + IndexedCallInitializer, + CountedCallInitializer, + CountedCallGenerator, + DataFrameInitializer, + DefaultInitializer, ) +from pyomo.environ import ConcreteModel, Var + def _init_scalar(m): return 1 + def _init_indexed(m, *args): i = 1 for arg in args: - i *= (arg+1) + i *= arg + 1 return i + class Test_Initializer(unittest.TestCase): def test_flattener(self): tup = (1, 0, (0, 1), (2, 3)) @@ -54,13 +64,14 @@ def test_constant(self): self.assertFalse(a.verified) self.assertFalse(a.contains_indices()) with self.assertRaisesRegex( - RuntimeError, "Initializer ConstantInitializer does " - "not contain embedded indices"): + RuntimeError, + "Initializer ConstantInitializer does not contain embedded indices", + ): a.indices() self.assertEqual(a(None, 1), 5) def test_dict(self): - a = Initializer({1:5}) + a = Initializer({1: 5}) self.assertIs(type(a), ItemInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) @@ -69,24 +80,25 @@ def test_dict(self): self.assertEqual(a(None, 1), 5) def test_sequence(self): - a = Initializer([0,5]) + a = Initializer([0, 5]) self.assertIs(type(a), ItemInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [0,1]) + self.assertEqual(list(a.indices()), [0, 1]) self.assertEqual(a(None, 1), 5) - a = Initializer([0,5], treat_sequences_as_mappings=False) + a = Initializer([0, 5], treat_sequences_as_mappings=False) self.assertIs(type(a), ConstantInitializer) self.assertTrue(a.constant()) self.assertFalse(a.verified) self.assertFalse(a.contains_indices()) - self.assertEqual(a(None, 1), [0,5]) + self.assertEqual(a(None, 1), [0, 5]) def test_function(self): def a_init(m): return 0 + a = Initializer(a_init) self.assertIs(type(a), ScalarCallInitializer) self.assertTrue(a.constant()) @@ -95,7 +107,8 @@ def a_init(m): self.assertEqual(a(None, 1), 0) def x_init(m, i): - return i+1 + return i + 1 + a = Initializer(x_init) self.assertIs(type(a), IndexedCallInitializer) self.assertFalse(a.constant()) @@ -105,6 +118,7 @@ def x_init(m, i): def x2_init(m): return 0 + a = Initializer(x2_init) self.assertIs(type(a), ScalarCallInitializer) self.assertTrue(a.constant()) @@ -113,7 +127,8 @@ def x2_init(m): self.assertEqual(a(None, 1), 0) def y_init(m, i, j): - return j*(i+1) + return j * (i + 1) + a = Initializer(y_init) self.assertIs(type(a), IndexedCallInitializer) self.assertFalse(a.constant()) @@ -123,19 +138,19 @@ def y_init(m, i, j): def test_counted_call(self): def x_init(m, i): - return i+1 + return i + 1 def y_init(m, i, j): - return j*(i+1) + return j * (i + 1) def z_init(m, i, j, k): - return i*100 + j*10 + k + return i * 100 + j * 10 + k def bogus(m, i, j): return None m = ConcreteModel() - m.x = Var([1,2,3]) + m.x = Var([1, 2, 3]) a = Initializer(x_init) b = CountedCallInitializer(m.x, a) self.assertIs(type(b), CountedCallInitializer) @@ -158,8 +173,7 @@ def bogus(m, i, j): self.assertIs(a._fcn, b._fcn) c = b(None, 1) self.assertIs(type(c), CountedCallGenerator) - with self.assertRaisesRegex( - ValueError, 'Counted Var rule returned None'): + with self.assertRaisesRegex(ValueError, 'Counted Var rule returned None'): next(c) a = Initializer(y_init) @@ -176,7 +190,7 @@ def bogus(m, i, j): self.assertEqual(next(c), 3) self.assertEqual(next(c), 4) - m.y = Var([(1,2), (3,5)]) + m.y = Var([(1, 2), (3, 5)]) a = Initializer(y_init) b = CountedCallInitializer(m.y, a) self.assertIs(type(b), CountedCallInitializer) @@ -202,20 +216,20 @@ def bogus(m, i, j): self.assertEqual(next(c), 135) self.assertEqual(next(c), 235) self.assertEqual(next(c), 335) - + def test_method(self): class Init(object): def a_init(self, m): return 0 def x_init(self, m, i): - return i+1 + return i + 1 def x2_init(self, m): return 0 def y_init(self, m, i, j): - return j*(i+1) + return j * (i + 1) init = Init() @@ -248,7 +262,7 @@ def y_init(self, m, i, j): self.assertEqual(a(None, (1, 4)), 8) m = ConcreteModel() - m.x = Var([1,2,3]) + m.x = Var([1, 2, 3]) a = Initializer(init.y_init) b = CountedCallInitializer(m.x, a) self.assertIs(type(b), CountedCallInitializer) @@ -271,7 +285,7 @@ def a_init(cls, m): @classmethod def x_init(cls, m, i): - return i+1 + return i + 1 @classmethod def x2_init(cls, m): @@ -279,7 +293,7 @@ def x2_init(cls, m): @classmethod def y_init(cls, m, i, j): - return j*(i+1) + return j * (i + 1) a = Initializer(Init.a_init) self.assertIs(type(a), ScalarCallInitializer) @@ -310,7 +324,7 @@ def y_init(cls, m, i, j): self.assertEqual(a(None, (1, 4)), 8) m = ConcreteModel() - m.x = Var([1,2,3]) + m.x = Var([1, 2, 3]) a = Initializer(Init.y_init) b = CountedCallInitializer(m.x, a) self.assertIs(type(b), CountedCallInitializer) @@ -333,7 +347,7 @@ def a_init(m): @staticmethod def x_init(m, i): - return i+1 + return i + 1 @staticmethod def x2_init(m): @@ -341,7 +355,7 @@ def x2_init(m): @staticmethod def y_init(m, i, j): - return j*(i+1) + return j * (i + 1) a = Initializer(Init.a_init) self.assertIs(type(a), ScalarCallInitializer) @@ -372,7 +386,7 @@ def y_init(m, i, j): self.assertEqual(a(None, (1, 4)), 8) m = ConcreteModel() - m.x = Var([1,2,3]) + m.x = Var([1, 2, 3]) a = Initializer(Init.y_init) b = CountedCallInitializer(m.x, a) self.assertIs(type(b), CountedCallInitializer) @@ -391,33 +405,35 @@ def test_generator_fcn(self): def a_init(m): yield 0 yield 3 - with self.assertRaisesRegex( - ValueError, "Generator functions are not allowed"): + + with self.assertRaisesRegex(ValueError, "Generator functions are not allowed"): a = Initializer(a_init) a = Initializer(a_init, allow_generators=True) self.assertIs(type(a), ScalarCallInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, 1)), [0,3]) + self.assertEqual(list(a(None, 1)), [0, 3]) def x_init(m, i): yield i - yield i+1 + yield i + 1 + a = Initializer(x_init, allow_generators=True) self.assertIs(type(a), IndexedCallInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, 1)), [1,2]) + self.assertEqual(list(a(None, 1)), [1, 2]) def y_init(m, i, j): yield j - yield i+1 + yield i + 1 + a = Initializer(y_init, allow_generators=True) self.assertIs(type(a), IndexedCallInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, (1, 4))), [4,2]) + self.assertEqual(list(a(None, (1, 4))), [4, 2]) def test_generator_method(self): class Init(object): @@ -427,58 +443,57 @@ def a_init(self, m): def x_init(self, m, i): yield i - yield i+1 + yield i + 1 def y_init(self, m, i, j): yield j - yield i+1 + yield i + 1 + init = Init() - with self.assertRaisesRegex( - ValueError, "Generator functions are not allowed"): + with self.assertRaisesRegex(ValueError, "Generator functions are not allowed"): a = Initializer(init.a_init) a = Initializer(init.a_init, allow_generators=True) self.assertIs(type(a), ScalarCallInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, 1)), [0,3]) + self.assertEqual(list(a(None, 1)), [0, 3]) a = Initializer(init.x_init, allow_generators=True) self.assertIs(type(a), IndexedCallInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, 1)), [1,2]) + self.assertEqual(list(a(None, 1)), [1, 2]) a = Initializer(init.y_init, allow_generators=True) self.assertIs(type(a), IndexedCallInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, (1, 4))), [4,2]) + self.assertEqual(list(a(None, (1, 4))), [4, 2]) def test_generators(self): - with self.assertRaisesRegex( - ValueError, "Generators are not allowed"): - a = Initializer(iter([0,3])) + with self.assertRaisesRegex(ValueError, "Generators are not allowed"): + a = Initializer(iter([0, 3])) - a = Initializer(iter([0,3]), allow_generators=True) + a = Initializer(iter([0, 3]), allow_generators=True) self.assertIs(type(a), ConstantInitializer) self.assertTrue(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, 1)), [0,3]) + self.assertEqual(list(a(None, 1)), [0, 3]) def x_init(): yield 0 yield 3 - with self.assertRaisesRegex( - ValueError, "Generators are not allowed"): + + with self.assertRaisesRegex(ValueError, "Generators are not allowed"): a = Initializer(x_init()) a = Initializer(x_init(), allow_generators=True) self.assertIs(type(a), ConstantInitializer) self.assertTrue(a.constant()) self.assertFalse(a.verified) - self.assertEqual(list(a(None, 1)), [0,3]) + self.assertEqual(list(a(None, 1)), [0, 3]) def test_functor(self): class InitScalar(object): @@ -510,6 +525,7 @@ def __call__(self, m, i): def test_derived_function(self): def _scalar(m): return 10 + dynf = types.FunctionType(_scalar.__code__, {}) a = Initializer(dynf) @@ -520,6 +536,7 @@ def _scalar(m): def _indexed(m, i): return 10 + i + dynf = types.FunctionType(_indexed.__code__, {}) a = Initializer(dynf) @@ -553,11 +570,12 @@ def test_no_argspec(self): self.assertFalse(a.verified) self.assertFalse(a.contains_indices()) # but this is not callable, as int won't accept the 'model' - #self.assertEqual(a(None, None), 5) + # self.assertEqual(a(None, None), 5) def test_partial(self): def fcn(k, m, i, j): - return i*100 + j*10 + k + return i * 100 + j * 10 + k + part = functools.partial(fcn, 2) a = Initializer(part) self.assertIs(type(a), IndexedCallInitializer) @@ -567,7 +585,8 @@ def fcn(k, m, i, j): self.assertEqual(a(None, (5, 7)), 572) def fcn(k, i, j, m): - return i*100 + j*10 + k + return i * 100 + j * 10 + k + part = functools.partial(fcn, 2, 5, 7) a = Initializer(part) self.assertIs(type(a), ScalarCallInitializer) @@ -576,6 +595,17 @@ def fcn(k, i, j, m): self.assertFalse(a.contains_indices()) self.assertEqual(a(None, None), 572) + def fcn(m, k, i, j): + return i * 100 + j * 10 + k + + part = functools.partial(fcn, i=2, j=5, k=7) + a = Initializer(part) + self.assertIs(type(a), ScalarCallInitializer) + self.assertTrue(a.constant()) + self.assertFalse(a.verified) + self.assertFalse(a.contains_indices()) + self.assertEqual(a(None, None), 257) + @unittest.skipUnless(pandas_available, "Pandas is not installed") def test_dataframe(self): d = {'col1': [1, 2, 4]} @@ -585,7 +615,7 @@ def test_dataframe(self): self.assertFalse(a.constant()) self.assertFalse(a.verified) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(list(a.indices()), [0, 1, 2]) self.assertEqual(a(None, 0), 1) self.assertEqual(a(None, 1), 2) self.assertEqual(a(None, 2), 4) @@ -593,20 +623,20 @@ def test_dataframe(self): d = {'col1': [1, 2, 4], 'col2': [10, 20, 40]} df = pd.DataFrame(data=d) with self.assertRaisesRegex( - ValueError, - 'DataFrameInitializer for DataFrame with multiple columns'): + ValueError, 'DataFrameInitializer for DataFrame with multiple columns' + ): a = Initializer(df) a = DataFrameInitializer(df, 'col2') self.assertIs(type(a), DataFrameInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(list(a.indices()), [0, 1, 2]) self.assertEqual(a(None, 0), 10) self.assertEqual(a(None, 1), 20) self.assertEqual(a(None, 2), 40) - df = pd.DataFrame([10, 20, 30, 40], index=[[0,0,1,1],[0,1,0,1]]) + df = pd.DataFrame([10, 20, 30, 40], index=[[0, 0, 1, 1], [0, 1, 0, 1]]) a = Initializer(df) self.assertIs(type(a), DataFrameInitializer) self.assertFalse(a.constant()) @@ -620,26 +650,26 @@ def test_dataframe(self): @unittest.skipUnless(pandas_available, "Pandas is not installed") def test_series(self): - d = pd.Series({0:1, 1:2, 2:4}) + d = pd.Series({0: 1, 1: 2, 2: 4}) a = Initializer(d) self.assertIs(type(a), ItemInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(list(a.indices()), [0, 1, 2]) self.assertEqual(a(None, 0), 1) self.assertEqual(a(None, 1), 2) self.assertEqual(a(None, 2), 4) @unittest.skipUnless(numpy_available, "Numpy is not installed") def test_ndarray(self): - d = np.array([1,2,4]) + d = np.array([1, 2, 4]) a = Initializer(d) self.assertIs(type(a), ItemInitializer) self.assertFalse(a.constant()) self.assertFalse(a.verified) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(list(a.indices()), [0, 1, 2]) self.assertEqual(a(None, 0), 1) self.assertEqual(a(None, 1), 2) self.assertEqual(a(None, 2), 4) @@ -673,7 +703,7 @@ def test_initializer_initializer(self): self.assertFalse(a.constant()) self.assertFalse(a.verified) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [0,1,2]) + self.assertEqual(list(a.indices()), [0, 1, 2]) self.assertEqual(a(None, 0), 10) self.assertEqual(a(None, 1), 20) self.assertEqual(a(None, 2), 40) @@ -686,7 +716,7 @@ def test_pickle(self): self.assertEqual(a.val, b.val) self.assertEqual(a.verified, b.verified) - a = Initializer({1:5}) + a = Initializer({1: 5}) b = pickle.loads(pickle.dumps(a)) self.assertIsNot(a, b) self.assertEqual(a._dict, b._dict) @@ -710,7 +740,7 @@ def test_pickle(self): self.assertEqual(b(None, 2), 3) def test_default_initializer(self): - a = Initializer({1:5}) + a = Initializer({1: 5}) d = DefaultInitializer(a, None, KeyError) self.assertFalse(d.constant()) self.assertTrue(d.contains_indices()) @@ -727,6 +757,7 @@ def rule(m, i): raise TypeError("type") else: raise RuntimeError("runtime") + a = Initializer(rule) d = DefaultInitializer(a, 100, (KeyError, RuntimeError)) self.assertFalse(d.constant()) diff --git a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py index 405771cf297..0a9e3ab08f9 100644 --- a/pyomo/core/tests/unit/test_kernel_register_numpy_types.py +++ b/pyomo/core/tests/unit/test_kernel_register_numpy_types.py @@ -38,7 +38,6 @@ numpy_float_names.append('float16') numpy_float_names.append('float32') numpy_float_names.append('float64') - numpy_float_names.append('ndarray') # Complex numpy_complex_names = [] if numpy_available: @@ -53,12 +52,9 @@ def test_deprecation(self): import pyomo.core.kernel.register_numpy_types as rnt self.assertRegex( LOG.getvalue(), - "DEPRECATED: pyomo.core.kernel.register_numpy_types is deprecated.") - self.assertEqual(sorted(rnt.numpy_bool_names), - sorted(numpy_bool_names)) - self.assertEqual(sorted(rnt.numpy_int_names), - sorted(numpy_int_names)) - self.assertEqual(sorted(rnt.numpy_float_names), - sorted(numpy_float_names)) - self.assertEqual(sorted(rnt.numpy_complex_names), - sorted(numpy_complex_names)) + "DEPRECATED: pyomo.core.kernel.register_numpy_types is deprecated.", + ) + self.assertEqual(sorted(rnt.numpy_bool_names), sorted(numpy_bool_names)) + self.assertEqual(sorted(rnt.numpy_int_names), sorted(numpy_int_names)) + self.assertEqual(sorted(rnt.numpy_float_names), sorted(numpy_float_names)) + self.assertEqual(sorted(rnt.numpy_complex_names), sorted(numpy_complex_names)) diff --git a/pyomo/core/tests/unit/test_labelers.py b/pyomo/core/tests/unit/test_labelers.py index 0fc87204dd0..15c56b5390d 100644 --- a/pyomo/core/tests/unit/test_labelers.py +++ b/pyomo/core/tests/unit/test_labelers.py @@ -11,11 +11,25 @@ import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Var, RangeSet, Block, Constraint, CounterLabeler, NumericLabeler, TextLabeler, ComponentUID, ShortNameLabeler, CNameLabeler, CuidLabeler, AlphaNumericTextLabeler, NameLabeler +from pyomo.environ import ( + ConcreteModel, + Var, + RangeSet, + Block, + Constraint, + CounterLabeler, + NumericLabeler, + TextLabeler, + ComponentUID, + ShortNameLabeler, + CNameLabeler, + CuidLabeler, + AlphaNumericTextLabeler, + NameLabeler, +) class LabelerTests(unittest.TestCase): - def setUp(self): m = ConcreteModel() m.mycomp = Var() @@ -211,14 +225,13 @@ def test_case_shortnamelabeler_overflow(self): m = self.m lbl = ShortNameLabeler(4, '_', caseInsensitive=True) for i in range(9): - self.assertEqual(lbl(m.mycomp), 'p_%d_' % (i+1)) + self.assertEqual(lbl(m.mycomp), 'p_%d_' % (i + 1)) with self.assertRaisesRegex(RuntimeError, "Too many identifiers"): lbl(m.mycomp) def test_shortnamelabeler_legal_regex(self): m = ConcreteModel() - lbl = ShortNameLabeler( - 60, suffix='_', prefix='s_', legalRegex='^[a-zA-Z]') + lbl = ShortNameLabeler(60, suffix='_', prefix='s_', legalRegex='^[a-zA-Z]') m.legal_var = Var() self.assertEqual(lbl(m.legal_var), 'legal_var') diff --git a/pyomo/core/tests/unit/test_list_objects.py b/pyomo/core/tests/unit/test_list_objects.py index 1078f36872c..442fa97b6d1 100644 --- a/pyomo/core/tests/unit/test_list_objects.py +++ b/pyomo/core/tests/unit/test_list_objects.py @@ -10,18 +10,20 @@ # ___________________________________________________________________________ import pyomo.common.unittest as unittest -from pyomo.core.base import (ConcreteModel, Var, Reals) -from pyomo.core.beta.list_objects import (XVarList, - XConstraintList, - XObjectiveList, - XExpressionList) +from pyomo.core.base import ConcreteModel, Var, Reals +from pyomo.core.beta.list_objects import ( + XVarList, + XConstraintList, + XObjectiveList, + XExpressionList, +) from pyomo.core.base.var import _GeneralVarData from pyomo.core.base.constraint import _GeneralConstraintData from pyomo.core.base.objective import _GeneralObjectiveData from pyomo.core.base.expression import _GeneralExpressionData -class _TestComponentListBase(object): +class _TestComponentListBase(object): _ctype = None _cdatatype = None @@ -48,8 +50,7 @@ def test_init2(self): self.assertEqual(model.c.is_indexed(), True) self.assertEqual(model.c.is_constructed(), True) with self.assertRaises(TypeError): - model.d = self._ctype(*tuple(self._cdatatype(self._arg()) - for i in index)) + model.d = self._ctype(*tuple(self._cdatatype(self._arg()) for i in index)) def test_len1(self): model = self.model @@ -71,7 +72,7 @@ def test_append(self): c_new = self._cdatatype(self._arg()) model.c.append(c_new) self.assertEqual(id(model.c[-1]), id(c_new)) - self.assertEqual(len(model.c), i+1) + self.assertEqual(len(model.c), i + 1) def test_insert(self): model = self.model @@ -82,7 +83,7 @@ def test_insert(self): c_new = self._cdatatype(self._arg()) model.c.insert(0, c_new) self.assertEqual(id(model.c[0]), id(c_new)) - self.assertEqual(len(model.c), i+1) + self.assertEqual(len(model.c), i + 1) def test_setitem(self): model = self.model @@ -194,7 +195,7 @@ def test_delitem(self): cdata = model.c[0] self.assertEqual(id(cdata.parent_component()), id(model.c)) del model.c[0] - self.assertEqual(len(model.c), len(index)-(i+1)) + self.assertEqual(len(model.c), len(index) - (i + 1)) self.assertEqual(cdata.parent_component(), None) def test_iter(self): @@ -249,30 +250,30 @@ def test_index(self): self.assertEqual(model.c.index(cdata), i) self.assertEqual(model.c.index(cdata, start=i), i) with self.assertRaises(ValueError): - model.c.index(cdata, start=i+1) + model.c.index(cdata, start=i + 1) with self.assertRaises(ValueError): model.c.index(cdata, start=i, stop=i) with self.assertRaises(ValueError): model.c.index(cdata, stop=i) - self.assertEqual(model.c.index(cdata, start=i, stop=i+1), i) + self.assertEqual(model.c.index(cdata, start=i, stop=i + 1), i) with self.assertRaises(ValueError): - model.c.index(cdata, start=i+1, stop=i+1) - self.assertEqual(model.c.index(cdata, start=-len(index)+i), i) + model.c.index(cdata, start=i + 1, stop=i + 1) + self.assertEqual(model.c.index(cdata, start=-len(index) + i), i) if i == index[-1]: - self.assertEqual(model.c.index(cdata, start=-len(index)+i+1), i) + self.assertEqual(model.c.index(cdata, start=-len(index) + i + 1), i) else: with self.assertRaises(ValueError): - self.assertEqual(model.c.index(cdata, start=-len(index)+i+1), i) + self.assertEqual(model.c.index(cdata, start=-len(index) + i + 1), i) if i == index[-1]: with self.assertRaises(ValueError): - self.assertEqual(model.c.index(cdata, stop=-len(index)+i+1), i) + self.assertEqual(model.c.index(cdata, stop=-len(index) + i + 1), i) else: - self.assertEqual(model.c.index(cdata, stop=-len(index)+i+1), i) + self.assertEqual(model.c.index(cdata, stop=-len(index) + i + 1), i) tmp = self._cdatatype(self._arg()) with self.assertRaises(ValueError): model.c.index(tmp) with self.assertRaises(ValueError): - model.c.index(tmp, stop=len(model.c)+1) + model.c.index(tmp, stop=len(model.c) + 1) def test_extend(self): model = self.model @@ -285,8 +286,7 @@ def test_extend(self): self.assertEqual(cdata.parent_component(), None) model.c.extend(c_more_list) for cdata in c_more_list: - self.assertEqual(id(cdata.parent_component()), - id(model.c)) + self.assertEqual(id(cdata.parent_component()), id(model.c)) def test_count(self): model = self.model @@ -311,19 +311,16 @@ def test_name(self): prefix = "c" for i in index: cdata = model.c[i] - self.assertEqual(cdata.local_name, - cdata.name) - cname = prefix + "["+str(i)+"]" - self.assertEqual(cdata.local_name, - cname) + self.assertEqual(cdata.local_name, cdata.name) + cname = prefix + "[" + str(i) + "]" + self.assertEqual(cdata.local_name, cname) -class _TestActiveComponentListBase(_TestComponentListBase): +class _TestActiveComponentListBase(_TestComponentListBase): def test_activate(self): model = self.model index = list(range(4)) - model.c = self._ctype(self._cdatatype(self._arg()) - for i in index) + model.c = self._ctype(self._cdatatype(self._arg()) for i in index) self.assertEqual(len(model.c), len(index)) self.assertEqual(model.c.active, True) model.c._active = False @@ -338,8 +335,7 @@ def test_activate(self): def test_activate(self): model = self.model index = list(range(4)) - model.c = self._ctype(self._cdatatype(self._arg()) - for i in index) + model.c = self._ctype(self._cdatatype(self._arg()) for i in index) self.assertEqual(len(model.c), len(index)) self.assertEqual(model.c.active, True) for i in index: @@ -368,44 +364,49 @@ def test_active(self): self.assertEqual(model.c.active, True) -class TestVarList(_TestComponentListBase, - unittest.TestCase): +class TestVarList(_TestComponentListBase, unittest.TestCase): # Note: the updated _GeneralVarData class only takes an optional # parent argument (you no longer pass the domain in) _ctype = XVarList _cdatatype = lambda self, arg: _GeneralVarData() + def setUp(self): _TestComponentListBase.setUp(self) self._arg = lambda: Reals -class TestExpressionList(_TestComponentListBase, - unittest.TestCase): + +class TestExpressionList(_TestComponentListBase, unittest.TestCase): _ctype = XExpressionList _cdatatype = _GeneralExpressionData + def setUp(self): _TestComponentListBase.setUp(self) self._arg = lambda: self.model.x**3 + # # Test components that include activate/deactivate # functionality. # -class TestConstraintList(_TestActiveComponentListBase, - unittest.TestCase): + +class TestConstraintList(_TestActiveComponentListBase, unittest.TestCase): _ctype = XConstraintList _cdatatype = _GeneralConstraintData + def setUp(self): _TestComponentListBase.setUp(self) self._arg = lambda: self.model.x >= 1 -class TestObjectiveList(_TestActiveComponentListBase, - unittest.TestCase): + +class TestObjectiveList(_TestActiveComponentListBase, unittest.TestCase): _ctype = XObjectiveList _cdatatype = _GeneralObjectiveData + def setUp(self): _TestComponentListBase.setUp(self) self._arg = lambda: self.model.x**2 + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_logical_constraint.py b/pyomo/core/tests/unit/test_logical_constraint.py index cb9d7dd3be8..ed8120da935 100644 --- a/pyomo/core/tests/unit/test_logical_constraint.py +++ b/pyomo/core/tests/unit/test_logical_constraint.py @@ -1,11 +1,18 @@ import pyomo.common.unittest as unittest from pyomo.core.expr.sympy_tools import sympy_available -from pyomo.environ import (AbstractModel, BooleanVar, ConcreteModel, - LogicalConstraint, TransformationFactory, Constraint) +from pyomo.environ import ( + AbstractModel, + BooleanVar, + ConcreteModel, + LogicalConstraint, + TransformationFactory, + Constraint, +) from pyomo.repn import generate_standard_repn from pyomo.gdp import Disjunction + class TestLogicalConstraintCreation(unittest.TestCase): def create_model(self, abstract=False): if abstract is True: @@ -19,8 +26,10 @@ def create_model(self, abstract=False): def test_construct(self): model = self.create_model() + def rule(model): return model.x + model.p = LogicalConstraint(rule=rule) self.assertIs(model.p.body, model.x) @@ -43,25 +52,29 @@ def check_lor_on_disjunct(self, model, disjunct, x1, x2): self.assertEqual(repn.linear_coefs[1], 1) @unittest.skipUnless(sympy_available, "Sympy not available") - def test_statement_in_Disjunct(self): + def test_statement_in_Disjunct_with_logical_to_linear(self): + # This is an old test that originally tested that GDP's + # BigM/Hull correctly handled Disjuncts with LogicalConstraints + # (implicitly calling logical_to_linear to leave the transformed + # algebraic constraints on the Disjuncts). That is no longer + # the default behavior. However, we will preserve this (with an + # explicit call to logical_to_linear) for posterity model = self.create_model() - model.disj = Disjunction(expr=[ - [model.x.lor(model.y)], [model.y.lor(model.z)] - ]) + model.disj = Disjunction(expr=[[model.x.lor(model.y)], [model.y.lor(model.z)]]) + + TransformationFactory('core.logical_to_linear').apply_to( + model, targets=model.disj.disjuncts + ) bigmed = TransformationFactory('gdp.bigm').create_using(model) # check that the algebraic versions are living on the Disjuncts - self.check_lor_on_disjunct(bigmed, bigmed.disj.disjuncts[0], bigmed.x, - bigmed.y) - self.check_lor_on_disjunct(bigmed, bigmed.disj.disjuncts[1], bigmed.y, - bigmed.z) + self.check_lor_on_disjunct(bigmed, bigmed.disj.disjuncts[0], bigmed.x, bigmed.y) + self.check_lor_on_disjunct(bigmed, bigmed.disj.disjuncts[1], bigmed.y, bigmed.z) TransformationFactory('gdp.hull').apply_to(model) - self.check_lor_on_disjunct(model, model.disj.disjuncts[0], model.x, - model.y) - self.check_lor_on_disjunct(model, model.disj.disjuncts[1], model.y, - model.z) - + self.check_lor_on_disjunct(model, model.disj.disjuncts[0], model.x, model.y) + self.check_lor_on_disjunct(model, model.disj.disjuncts[1], model.y, model.z) + # TODO look to test_con.py for inspiration diff --git a/pyomo/core/tests/unit/test_logical_expr_expanded.py b/pyomo/core/tests/unit/test_logical_expr_expanded.py index 9db843c3798..f5b86d59cbd 100644 --- a/pyomo/core/tests/unit/test_logical_expr_expanded.py +++ b/pyomo/core/tests/unit/test_logical_expr_expanded.py @@ -23,10 +23,23 @@ from pyomo.core.expr.sympy_tools import sympy_available from pyomo.core.expr.visitor import identify_variables from pyomo.environ import ( - land, atleast, atmost, BooleanConstant, BooleanVarList, ComponentMap, equivalent, exactly, implies, lor, RangeSet, + land, + atleast, + atmost, + BooleanConstant, + BooleanVarList, + ComponentMap, + equivalent, + exactly, + implies, + lor, + RangeSet, value, - ConcreteModel, BooleanVar, - lnot, xor, ) + ConcreteModel, + BooleanVar, + lnot, + xor, +) def _generate_possible_truth_inputs(nargs): @@ -44,7 +57,6 @@ def _check_equivalent(assert_handle, expr_1, expr_2): class TestLogicalClasses(unittest.TestCase): - def test_BooleanVar(self): """ Simple construction and value setting @@ -90,13 +102,13 @@ def test_binary_xor(self): m.Y2 = BooleanVar() op_static = xor(m.Y1, m.Y2) op_class = m.Y1.xor(m.Y2) - # op_operator = m.Y1 ^ m.Y2 + op_operator = m.Y1 ^ m.Y2 for truth_combination in _generate_possible_truth_inputs(2): m.Y1.value, m.Y2.value = truth_combination[0], truth_combination[1] correct_value = operator.xor(*truth_combination) self.assertEqual(value(op_static), correct_value) self.assertEqual(value(op_class), correct_value) - # self.assertEqual(value(op_operator), correct_value) + self.assertEqual(value(op_operator), correct_value) def test_binary_implies(self): m = ConcreteModel() @@ -122,13 +134,13 @@ def test_binary_and(self): m.Y2 = BooleanVar() op_static = land(m.Y1, m.Y2) op_class = m.Y1.land(m.Y2) - # op_operator = m.Y1 & m.Y2 + op_operator = m.Y1 & m.Y2 for truth_combination in _generate_possible_truth_inputs(2): m.Y1.value, m.Y2.value = truth_combination[0], truth_combination[1] correct_value = all(truth_combination) self.assertEqual(value(op_static), correct_value) self.assertEqual(value(op_class), correct_value) - # self.assertEqual(value(op_operator), correct_value) + self.assertEqual(value(op_operator), correct_value) def test_binary_or(self): m = ConcreteModel() @@ -136,13 +148,13 @@ def test_binary_or(self): m.Y2 = BooleanVar() op_static = lor(m.Y1, m.Y2) op_class = m.Y1.lor(m.Y2) - # op_operator = m.Y1 | m.Y2 + op_operator = m.Y1 | m.Y2 for truth_combination in _generate_possible_truth_inputs(2): m.Y1.value, m.Y2.value = truth_combination[0], truth_combination[1] correct_value = any(truth_combination) self.assertEqual(value(op_static), correct_value) self.assertEqual(value(op_class), correct_value) - # self.assertEqual(value(op_operator), correct_value) + self.assertEqual(value(op_operator), correct_value) def test_nary_and(self): nargs = 3 @@ -189,7 +201,9 @@ def test_nary_exactly(self): for ntrue in range(nargs + 1): m.Y.set_values(dict(enumerate(truth_combination, 1))) correct_value = sum(truth_combination) == ntrue - self.assertEqual(value(exactly(ntrue, *(m.Y[i] for i in m.s))), correct_value) + self.assertEqual( + value(exactly(ntrue, *(m.Y[i] for i in m.s))), correct_value + ) self.assertEqual(value(exactly(ntrue, m.Y)), correct_value) def test_nary_atmost(self): @@ -201,7 +215,9 @@ def test_nary_atmost(self): for ntrue in range(nargs + 1): m.Y.set_values(dict(enumerate(truth_combination, 1))) correct_value = sum(truth_combination) <= ntrue - self.assertEqual(value(atmost(ntrue, *(m.Y[i] for i in m.s))), correct_value) + self.assertEqual( + value(atmost(ntrue, *(m.Y[i] for i in m.s))), correct_value + ) self.assertEqual(value(atmost(ntrue, m.Y)), correct_value) def test_nary_atleast(self): @@ -213,7 +229,9 @@ def test_nary_atleast(self): for ntrue in range(nargs + 1): m.Y.set_values(dict(enumerate(truth_combination, 1))) correct_value = sum(truth_combination) >= ntrue - self.assertEqual(value(atleast(ntrue, *(m.Y[i] for i in m.s))), correct_value) + self.assertEqual( + value(atleast(ntrue, *(m.Y[i] for i in m.s))), correct_value + ) self.assertEqual(value(atleast(ntrue, m.Y)), correct_value) def test_to_string(self): @@ -221,6 +239,7 @@ def test_to_string(self): m.Y1 = BooleanVar() m.Y2 = BooleanVar() m.Y3 = BooleanVar() + m.Y4 = BooleanVar() self.assertEqual(str(land(m.Y1, m.Y2, m.Y3)), "Y1 ∧ Y2 ∧ Y3") self.assertEqual(str(lor(m.Y1, m.Y2, m.Y3)), "Y1 ∨ Y2 ∨ Y3") @@ -231,8 +250,16 @@ def test_to_string(self): self.assertEqual(str(atmost(1, m.Y1, m.Y2)), "atmost(1: [Y1, Y2])") self.assertEqual(str(exactly(1, m.Y1, m.Y2)), "exactly(1: [Y1, Y2])") - # Precedence check + # Precedence checks self.assertEqual(str(m.Y1.implies(m.Y2).lor(m.Y3)), "(Y1 --> Y2) ∨ Y3") + self.assertEqual(str(m.Y1 & m.Y2 | m.Y3 ^ m.Y4), "Y1 ∧ Y2 ∨ Y3 ⊻ Y4") + self.assertEqual(str(m.Y1 & (m.Y2 | m.Y3) ^ m.Y4), "Y1 ∧ (Y2 ∨ Y3) ⊻ Y4") + self.assertEqual(str(m.Y1 & m.Y2 ^ m.Y3 | m.Y4), "Y1 ∧ Y2 ⊻ Y3 ∨ Y4") + self.assertEqual(str(m.Y1 & m.Y2 ^ (m.Y3 | m.Y4)), "Y1 ∧ Y2 ⊻ (Y3 ∨ Y4)") + self.assertEqual(str(m.Y1 & (m.Y2 ^ (m.Y3 | m.Y4))), "Y1 ∧ (Y2 ⊻ (Y3 ∨ Y4))") + self.assertEqual(str(m.Y1 | m.Y2 ^ m.Y3 & m.Y4), "Y1 ∨ Y2 ⊻ Y3 ∧ Y4") + self.assertEqual(str((m.Y1 | m.Y2) ^ m.Y3 & m.Y4), "(Y1 ∨ Y2) ⊻ Y3 ∧ Y4") + self.assertEqual(str(((m.Y1 | m.Y2) ^ m.Y3) & m.Y4), "((Y1 ∨ Y2) ⊻ Y3) ∧ Y4") def test_node_types(self): m = ConcreteModel() @@ -251,20 +278,70 @@ def test_numeric_invalid(self): m.Y2 = BooleanVar() m.Y3 = BooleanVar() + def iadd(): + m.Y3 += 2 + + def isub(): + m.Y3 -= 2 + + def imul(): + m.Y3 *= 2 + + def idiv(): + m.Y3 /= 2 + + def ipow(): + m.Y3 **= 2 + + def iand(): + m.Y3 &= 2 + + def ior(): + m.Y3 |= 2 + + def ixor(): + m.Y3 ^= 2 + def invalid_expression_generator(): yield lambda: m.Y1 + m.Y2 yield lambda: m.Y1 - m.Y2 yield lambda: m.Y1 * m.Y2 yield lambda: m.Y1 / m.Y2 yield lambda: m.Y1**m.Y2 + yield lambda: m.Y1.land(0) + yield lambda: m.Y1.lor(0) + yield lambda: m.Y1.xor(0) + yield lambda: m.Y1.equivalent_to(0) + yield lambda: m.Y1.implies(0) yield lambda: 0 + m.Y2 yield lambda: 0 - m.Y2 yield lambda: 0 * m.Y2 yield lambda: 0 / m.Y2 yield lambda: 0**m.Y2 - - numeric_error_msg = "(?:(?:unsupported operand type)|(?:operands do not support))" - for invalid_expr_fcn in invalid_expression_generator(): + yield lambda: 0 & m.Y2 + yield lambda: 0 | m.Y2 + yield lambda: 0 ^ m.Y2 + yield lambda: m.Y3 + 2 + yield lambda: m.Y3 - 2 + yield lambda: m.Y3 * 2 + yield lambda: m.Y3 / 2 + yield lambda: m.Y3**2 + yield lambda: m.Y3 & 2 + yield lambda: m.Y3 | 2 + yield lambda: m.Y3 ^ 2 + yield iadd + yield isub + yield imul + yield idiv + yield ipow + yield iand + yield ior + yield ixor + + numeric_error_msg = ( + "(?:(?:unsupported operand type)|(?:operands do not support))" + ) + for i, invalid_expr_fcn in enumerate(invalid_expression_generator()): with self.assertRaisesRegex(TypeError, numeric_error_msg): _ = invalid_expr_fcn() @@ -273,8 +350,11 @@ def invalid_unary_expression_generator(): yield lambda: +m.Y1 for invalid_expr_fcn in invalid_unary_expression_generator(): - with self.assertRaisesRegex(TypeError, "(?:(?:bad operand type for unary)" - "|(?:unsupported operand type for unary))"): + with self.assertRaisesRegex( + TypeError, + "(?:(?:bad operand type for unary)" + "|(?:unsupported operand type for unary))", + ): _ = invalid_expr_fcn() def invalid_comparison_generator(): @@ -284,7 +364,9 @@ def invalid_comparison_generator(): yield lambda: m.Y1 < 0 # These errors differ between python versions, regrettably - comparison_error_msg = "(?:(?:unorderable types)|(?:not supported between instances of))" + comparison_error_msg = ( + "(?:(?:unorderable types)|(?:not supported between instances of))" + ) for invalid_expr_fcn in invalid_comparison_generator(): with self.assertRaisesRegex(TypeError, comparison_error_msg): _ = invalid_expr_fcn() @@ -294,12 +376,14 @@ def test_invalid_conversion(self): m.Y1 = BooleanVar() with self.assertRaisesRegex( - TypeError, r"argument must be a string or a(.*) number"): + TypeError, r"argument must be a string or a(.*) number" + ): float(m.Y1) with self.assertRaisesRegex( - TypeError, r"argument must be a string" - r"(?:, a bytes-like object)? or a(.*) number"): + TypeError, + r"argument must be a string" r"(?:, a bytes-like object)? or a(.*) number", + ): int(m.Y1) diff --git a/pyomo/core/tests/unit/test_logical_to_linear.py b/pyomo/core/tests/unit/test_logical_to_linear.py index 5225d259b41..22133f22ba2 100644 --- a/pyomo/core/tests/unit/test_logical_to_linear.py +++ b/pyomo/core/tests/unit/test_logical_to_linear.py @@ -10,20 +10,40 @@ # ___________________________________________________________________________ import pyomo.common.unittest as unittest +from pyomo.common.errors import MouseTrap, DeveloperError from pyomo.common.log import LoggingIntercept import logging from pyomo.core.expr.sympy_tools import sympy_available -from pyomo.core.plugins.transform.logical_to_linear import \ - update_boolean_vars_from_binary +from pyomo.core.plugins.transform.logical_to_linear import ( + update_boolean_vars_from_binary, +) from pyomo.environ import ( - ConcreteModel, BooleanVar, LogicalConstraint, lor, TransformationFactory, - RangeSet, Var, Constraint, ComponentMap, value, BooleanSet, atleast, atmost, - exactly, Block, Binary, LogicalConstraintList) + ConcreteModel, + BooleanVar, + LogicalConstraint, + TransformationFactory, + RangeSet, + Var, + Constraint, + ExternalFunction, + ComponentMap, + value, + BooleanSet, + land, + lor, + atleast, + atmost, + exactly, + Block, + Binary, + LogicalConstraintList, +) from pyomo.gdp import Disjunct, Disjunction from pyomo.repn import generate_standard_repn from io import StringIO + def _generate_boolean_model(nvars): m = ConcreteModel() m.s = RangeSet(nvars) @@ -32,6 +52,7 @@ def _generate_boolean_model(nvars): m.constraint = LogicalConstraint(expr=exactly(2, m.Y)) return m + def _constrs_contained_within(test_case, test_constr_tuples, constraint_list): """Checks to see if constraints defined by test_constr_tuples are in the constraint list. @@ -43,6 +64,7 @@ def _constrs_contained_within(test_case, test_constr_tuples, constraint_list): test_case : unittest.TestCase """ + # Move const term from body def _move_const_from_body(lower, repn, upper): if repn.constant is not None and not repn.constant == 0: @@ -55,62 +77,79 @@ def _move_const_from_body(lower, repn, upper): def _repns_match(repn, test_repn): if not len(repn.linear_vars) == len(test_repn.linear_vars): return False - coef_map = ComponentMap((var, coef) for var, coef in - zip(repn.linear_vars, repn.linear_coefs)) + coef_map = ComponentMap( + (var, coef) for var, coef in zip(repn.linear_vars, repn.linear_coefs) + ) for var, coef in zip(test_repn.linear_vars, test_repn.linear_coefs): if not coef_map.get(var, 0) == coef: return False return True - constr_list_tuples = [ _move_const_from_body( - constr.lower, - generate_standard_repn(constr.body), - constr.upper) for constr in - constraint_list.values()] + constr_list_tuples = [ + _move_const_from_body( + constr.lower, generate_standard_repn(constr.body), constr.upper + ) + for constr in constraint_list.values() + ] for test_lower, test_body, test_upper in test_constr_tuples: test_repn = generate_standard_repn(test_body) - test_lower, test_repn, test_upper = _move_const_from_body(test_lower, - test_repn, - test_upper) + test_lower, test_repn, test_upper = _move_const_from_body( + test_lower, test_repn, test_upper + ) found_match = False # Make sure one of the list tuples matches for lower, repn, upper in constr_list_tuples: - if lower == test_lower and upper == test_upper and \ - _repns_match(repn, test_repn): + if ( + lower == test_lower + and upper == test_upper + and _repns_match(repn, test_repn) + ): found_match = True break test_case.assertTrue( found_match, "{} <= {} <= {} was not found in constraint list.".format( - test_lower, test_body, test_upper)) + test_lower, test_body, test_upper + ), + ) @unittest.skipUnless(sympy_available, "Sympy not available") class TestAtomicTransformations(unittest.TestCase): - def test_implies(self): m = ConcreteModel() m.x = BooleanVar() m.y = BooleanVar() m.p = LogicalConstraint(expr=m.x.implies(m.y)) TransformationFactory('core.logical_to_linear').apply_to(m) - _constrs_contained_within( self, [(1, (1 - m.x.get_associated_binary()) - + m.y.get_associated_binary(), - None)], - m.logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, + [ + ( + 1, + (1 - m.x.get_associated_binary()) + m.y.get_associated_binary(), + None, + ) + ], + m.logic_to_linear.transformed_constraints, + ) def test_literal(self): m = ConcreteModel() m.Y = BooleanVar() m.p = LogicalConstraint(expr=m.Y) TransformationFactory('core.logical_to_linear').apply_to(m) - _constrs_contained_within( self, [(1, m.Y.get_associated_binary(), 1)], - m.logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, + [(1, m.Y.get_associated_binary(), 1)], + m.logic_to_linear.transformed_constraints, + ) def test_constant_True(self): m = ConcreteModel() - with self.assertRaisesRegex(ValueError, - "LogicalConstraint 'p' is always True."): + with self.assertRaisesRegex( + ValueError, "LogicalConstraint 'p' is always True." + ): m.p = LogicalConstraint(expr=True) TransformationFactory('core.logical_to_linear').apply_to(m) self.assertIsNone(m.component('logic_to_linear')) @@ -129,6 +168,7 @@ def test_deactivate_empty_logical_constraint_container(self): self.assertIsNone(m.component('logic_to_linear')) self.assertFalse(m.propositions.active) + @unittest.skipUnless(sympy_available, "Sympy not available") class TestLogicalToLinearTransformation(unittest.TestCase): def test_longer_statement(self): @@ -138,12 +178,18 @@ def test_longer_statement(self): m.p = LogicalConstraint(expr=m.Y[1].implies(lor(m.Y[2], m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) _constrs_contained_within( - self, [ - (1, - m.Y[2].get_associated_binary() + m.Y[3].get_associated_binary() - + (1 - m.Y[1].get_associated_binary()), - None) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary() + + (1 - m.Y[1].get_associated_binary()), + None, + ) + ], + m.logic_to_linear.transformed_constraints, + ) def test_xfrm_atleast_statement(self): m = ConcreteModel() @@ -152,13 +198,18 @@ def test_xfrm_atleast_statement(self): m.p = LogicalConstraint(expr=atleast(2, m.Y[1], m.Y[2], m.Y[3])) TransformationFactory('core.logical_to_linear').apply_to(m) _constrs_contained_within( - self, [ - (2, - m.Y[1].get_associated_binary() + \ - m.Y[2].get_associated_binary() + \ - m.Y[3].get_associated_binary(), - None) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 2, + m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary(), + None, + ) + ], + m.logic_to_linear.transformed_constraints, + ) def test_xfrm_atmost_statement(self): m = ConcreteModel() @@ -167,13 +218,18 @@ def test_xfrm_atmost_statement(self): m.p = LogicalConstraint(expr=atmost(2, m.Y[1], m.Y[2], m.Y[3])) TransformationFactory('core.logical_to_linear').apply_to(m) _constrs_contained_within( - self, [ - (None, - m.Y[1].get_associated_binary() + \ - m.Y[2].get_associated_binary() + \ - m.Y[3].get_associated_binary(), - 2) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + None, + m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary(), + 2, + ) + ], + m.logic_to_linear.transformed_constraints, + ) def test_xfrm_exactly_statement(self): m = ConcreteModel() @@ -182,145 +238,273 @@ def test_xfrm_exactly_statement(self): m.p = LogicalConstraint(expr=exactly(2, m.Y[1], m.Y[2], m.Y[3])) TransformationFactory('core.logical_to_linear').apply_to(m) _constrs_contained_within( - self, [ - (2, m.Y[1].get_associated_binary() + \ - m.Y[2].get_associated_binary() + \ - m.Y[3].get_associated_binary(), 2) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 2, + m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary(), + 2, + ) + ], + m.logic_to_linear.transformed_constraints, + ) def test_xfrm_special_atoms_nonroot(self): m = ConcreteModel() m.s = RangeSet(3) m.Y = BooleanVar(m.s) - m.p = LogicalConstraint(expr=m.Y[1].implies(atleast(2, m.Y[1], m.Y[2], - m.Y[3]))) + m.p = LogicalConstraint(expr=m.Y[1].implies(atleast(2, m.Y[1], m.Y[2], m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 1) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( - self, [ - (None, sum(m.Y[:].get_associated_binary()) - \ - (1 + 2 * Y_aug[1].get_associated_binary()), 0), - (1, (1 - m.Y[1].get_associated_binary()) + \ - Y_aug[1].get_associated_binary(), None), - (None, 2 - 2 * (1 - Y_aug[1].get_associated_binary()) - \ - sum(m.Y[:].get_associated_binary()), 0) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + None, + sum(m.Y[:].get_associated_binary()) + - (1 + 2 * Y_aug[1].get_associated_binary()), + 0, + ), + ( + 1, + (1 - m.Y[1].get_associated_binary()) + + Y_aug[1].get_associated_binary(), + None, + ), + ( + None, + 2 + - 2 * (1 - Y_aug[1].get_associated_binary()) + - sum(m.Y[:].get_associated_binary()), + 0, + ), + ], + m.logic_to_linear.transformed_constraints, + ) m = ConcreteModel() m.s = RangeSet(3) m.Y = BooleanVar(m.s) - m.p = LogicalConstraint(expr=m.Y[1].implies(atmost(2, m.Y[1], m.Y[2], - m.Y[3]))) + m.p = LogicalConstraint(expr=m.Y[1].implies(atmost(2, m.Y[1], m.Y[2], m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 1) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( - self, [ - (None, sum(m.Y[:].get_associated_binary()) - \ - (1 - Y_aug[1].get_associated_binary() + 2), 0), - (1, (1 - m.Y[1].get_associated_binary()) + \ - Y_aug[1].get_associated_binary(), None), - (None, 3 - 3 * Y_aug[1].get_associated_binary() - \ - sum(m.Y[:].get_associated_binary()), 0) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + None, + sum(m.Y[:].get_associated_binary()) + - (1 - Y_aug[1].get_associated_binary() + 2), + 0, + ), + ( + 1, + (1 - m.Y[1].get_associated_binary()) + + Y_aug[1].get_associated_binary(), + None, + ), + ( + None, + 3 + - 3 * Y_aug[1].get_associated_binary() + - sum(m.Y[:].get_associated_binary()), + 0, + ), + ], + m.logic_to_linear.transformed_constraints, + ) m = ConcreteModel() m.s = RangeSet(3) m.Y = BooleanVar(m.s) - m.p = LogicalConstraint(expr=m.Y[1].implies(exactly(2, m.Y[1], m.Y[2], - m.Y[3]))) + m.p = LogicalConstraint(expr=m.Y[1].implies(exactly(2, m.Y[1], m.Y[2], m.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 3) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( - self, [ - (1, (1 - m.Y[1].get_associated_binary()) + \ - Y_aug[1].get_associated_binary(), None), - (None, sum(m.Y[:].get_associated_binary()) - \ - (1 - Y_aug[1].get_associated_binary() + 2), 0), - (None, 2 - 2 * (1 - Y_aug[1].get_associated_binary()) - \ - sum(m.Y[:].get_associated_binary()), 0), + self, + [ + ( + 1, + (1 - m.Y[1].get_associated_binary()) + + Y_aug[1].get_associated_binary(), + None, + ), + ( + None, + sum(m.Y[:].get_associated_binary()) + - (1 - Y_aug[1].get_associated_binary() + 2), + 0, + ), + ( + None, + 2 + - 2 * (1 - Y_aug[1].get_associated_binary()) + - sum(m.Y[:].get_associated_binary()), + 0, + ), (1, sum(Y_aug[:].get_associated_binary()), None), - (None, sum(m.Y[:].get_associated_binary()) - \ - (1 + 2 * (1 - Y_aug[2].get_associated_binary())), 0), - (None, 3 - 3 * (1 - Y_aug[3].get_associated_binary()) - \ - sum(m.Y[:].get_associated_binary()), 0), - ], m.logic_to_linear.transformed_constraints) + ( + None, + sum(m.Y[:].get_associated_binary()) + - (1 + 2 * (1 - Y_aug[2].get_associated_binary())), + 0, + ), + ( + None, + 3 + - 3 * (1 - Y_aug[3].get_associated_binary()) + - sum(m.Y[:].get_associated_binary()), + 0, + ), + ], + m.logic_to_linear.transformed_constraints, + ) # Note: x is now a variable m = ConcreteModel() m.s = RangeSet(3) m.Y = BooleanVar(m.s) m.x = Var(bounds=(1, 3)) - m.p = LogicalConstraint(expr=m.Y[1].implies(exactly(m.x, m.Y[1], m.Y[2], - m.Y[3]))) + m.p = LogicalConstraint( + expr=m.Y[1].implies(exactly(m.x, m.Y[1], m.Y[2], m.Y[3])) + ) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 3) self.assertEqual(Y_aug[1].domain, BooleanSet) _constrs_contained_within( - self, [ - (1, (1 - m.Y[1].get_associated_binary()) + \ - Y_aug[1].get_associated_binary(), None), - (None, sum(m.Y[:].get_associated_binary()) - \ - (m.x + 2 * (1 - Y_aug[1].get_associated_binary())), 0), - (None, m.x - 3 * (1 - Y_aug[1].get_associated_binary()) - \ - sum(m.Y[:].get_associated_binary()), 0), + self, + [ + ( + 1, + (1 - m.Y[1].get_associated_binary()) + + Y_aug[1].get_associated_binary(), + None, + ), + ( + None, + sum(m.Y[:].get_associated_binary()) + - (m.x + 2 * (1 - Y_aug[1].get_associated_binary())), + 0, + ), + ( + None, + m.x + - 3 * (1 - Y_aug[1].get_associated_binary()) + - sum(m.Y[:].get_associated_binary()), + 0, + ), (1, sum(Y_aug[:].get_associated_binary()), None), - (None, sum(m.Y[:].get_associated_binary()) - \ - (m.x - 1 + 3 * (1 - Y_aug[2].get_associated_binary())), 0), - (None, m.x + 1 - 4 * (1 - Y_aug[3].get_associated_binary()) - \ - sum(m.Y[:].get_associated_binary()), 0), - ], m.logic_to_linear.transformed_constraints) + ( + None, + sum(m.Y[:].get_associated_binary()) + - (m.x - 1 + 3 * (1 - Y_aug[2].get_associated_binary())), + 0, + ), + ( + None, + m.x + + 1 + - 4 * (1 - Y_aug[3].get_associated_binary()) + - sum(m.Y[:].get_associated_binary()), + 0, + ), + ], + m.logic_to_linear.transformed_constraints, + ) def test_xfrm_atleast_nested(self): m = _generate_boolean_model(4) - m.p = LogicalConstraint(expr=atleast(1, atleast(2, m.Y[1], - m.Y[1].lor(m.Y[2]), - m.Y[2]).lor(m.Y[3]), - m.Y[4])) + m.p = LogicalConstraint( + expr=atleast( + 1, atleast(2, m.Y[1], m.Y[1].lor(m.Y[2]), m.Y[2]).lor(m.Y[3]), m.Y[4] + ) + ) TransformationFactory('core.logical_to_linear').apply_to(m) Y_aug = m.logic_to_linear.augmented_vars self.assertEqual(len(Y_aug), 3) _constrs_contained_within( - self, [ - (1, Y_aug[1].get_associated_binary() + \ - m.Y[4].get_associated_binary(), None), - (1, 1 - Y_aug[2].get_associated_binary() + \ - Y_aug[1].get_associated_binary(), None), - (1, 1 - m.Y[3].get_associated_binary() + \ - Y_aug[1].get_associated_binary(), None), - (1, - Y_aug[2].get_associated_binary() + \ - m.Y[3].get_associated_binary() - + 1 - Y_aug[1].get_associated_binary(), - None), - (1, 1 - m.Y[1].get_associated_binary() + \ - Y_aug[3].get_associated_binary(), None), - (1, 1 - m.Y[2].get_associated_binary() + \ - Y_aug[3].get_associated_binary(), None), - (1, - m.Y[1].get_associated_binary() + \ - m.Y[2].get_associated_binary() + 1 - \ - Y_aug[3].get_associated_binary(), - None), - (None, - 2 - 2 * (1 - Y_aug[2].get_associated_binary()) - - (m.Y[1].get_associated_binary() + \ - Y_aug[3].get_associated_binary() + \ - m.Y[2].get_associated_binary()), - 0), - (None, - m.Y[1].get_associated_binary() + \ - Y_aug[3].get_associated_binary() + \ - m.Y[2].get_associated_binary() - - (1 + 2 * Y_aug[2].get_associated_binary()), - 0) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + Y_aug[1].get_associated_binary() + m.Y[4].get_associated_binary(), + None, + ), + ( + 1, + 1 + - Y_aug[2].get_associated_binary() + + Y_aug[1].get_associated_binary(), + None, + ), + ( + 1, + 1 + - m.Y[3].get_associated_binary() + + Y_aug[1].get_associated_binary(), + None, + ), + ( + 1, + Y_aug[2].get_associated_binary() + + m.Y[3].get_associated_binary() + + 1 + - Y_aug[1].get_associated_binary(), + None, + ), + ( + 1, + 1 + - m.Y[1].get_associated_binary() + + Y_aug[3].get_associated_binary(), + None, + ), + ( + 1, + 1 + - m.Y[2].get_associated_binary() + + Y_aug[3].get_associated_binary(), + None, + ), + ( + 1, + m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary() + + 1 + - Y_aug[3].get_associated_binary(), + None, + ), + ( + None, + 2 + - 2 * (1 - Y_aug[2].get_associated_binary()) + - ( + m.Y[1].get_associated_binary() + + Y_aug[3].get_associated_binary() + + m.Y[2].get_associated_binary() + ), + 0, + ), + ( + None, + m.Y[1].get_associated_binary() + + Y_aug[3].get_associated_binary() + + m.Y[2].get_associated_binary() + - (1 + 2 * Y_aug[2].get_associated_binary()), + 0, + ), + ], + m.logic_to_linear.transformed_constraints, + ) def test_link_with_gdp_indicators(self): m = _generate_boolean_model(4) @@ -339,41 +523,60 @@ def test_link_with_gdp_indicators(self): m.p = LogicalConstraint(expr=m.Y[1].implies(lor(m.Y[3], m.Y[4]))) m.p2 = LogicalConstraint(expr=atmost(2, *m.Y[:])) TransformationFactory('core.logical_to_linear').apply_to(m) - _constrs_contained_within( self, [ (1, m.dd[1].binary_indicator_var + - m.dd[2].binary_indicator_var + 1 - - m.d1.binary_indicator_var, None), - (None, m.d1.binary_indicator_var + - m.d2.binary_indicator_var + - m.dd[1].binary_indicator_var + - m.dd[2].binary_indicator_var, 2) ], - m.logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, + [ + ( + 1, + m.dd[1].binary_indicator_var + + m.dd[2].binary_indicator_var + + 1 + - m.d1.binary_indicator_var, + None, + ), + ( + None, + m.d1.binary_indicator_var + + m.d2.binary_indicator_var + + m.dd[1].binary_indicator_var + + m.dd[2].binary_indicator_var, + 2, + ), + ], + m.logic_to_linear.transformed_constraints, + ) def test_gdp_nesting(self): m = _generate_boolean_model(2) - m.disj = Disjunction(expr=[ - [m.Y[1].implies(m.Y[2])], - [m.Y[2].equivalent_to(False)] - ]) + m.disj = Disjunction( + expr=[[m.Y[1].implies(m.Y[2])], [m.Y[2].equivalent_to(False)]] + ) TransformationFactory('core.logical_to_linear').apply_to( - m, - targets=[m.disj.disjuncts[0], m.disj.disjuncts[1]]) + m, targets=[m.disj.disjuncts[0], m.disj.disjuncts[1]] + ) _constrs_contained_within( - self, [ - (1, 1 - m.Y[1].get_associated_binary() + \ - m.Y[2].get_associated_binary(), None), - ], m.disj_disjuncts[0].logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + 1 - m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary(), + None, + ) + ], + m.disj_disjuncts[0].logic_to_linear.transformed_constraints, + ) _constrs_contained_within( - self, [ - (1, 1 - m.Y[2].get_associated_binary(), 1), - ], m.disj_disjuncts[1].logic_to_linear.transformed_constraints) + self, + [(1, 1 - m.Y[2].get_associated_binary(), 1)], + m.disj_disjuncts[1].logic_to_linear.transformed_constraints, + ) def test_transformed_components_on_parent_block(self): m = ConcreteModel() m.b = Block() m.b.s = RangeSet(3) m.b.Y = BooleanVar(m.b.s) - m.b.p = LogicalConstraint(expr=m.b.Y[1].implies(lor(m.b.Y[2], - m.b.Y[3]))) + m.b.p = LogicalConstraint(expr=m.b.Y[1].implies(lor(m.b.Y[2], m.b.Y[3]))) TransformationFactory('core.logical_to_linear').apply_to(m) boolean_var = m.b.component("Y_asbinary") @@ -388,20 +591,25 @@ def test_transformed_components_on_parent_block(self): # check the constraints on the transBlock _constrs_contained_within( - self, [ - (1, - m.b.Y[2].get_associated_binary() + \ - m.b.Y[3].get_associated_binary() - + (1 - m.b.Y[1].get_associated_binary()), - None) - ], m.b.logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + m.b.Y[2].get_associated_binary() + + m.b.Y[3].get_associated_binary() + + (1 - m.b.Y[1].get_associated_binary()), + None, + ) + ], + m.b.logic_to_linear.transformed_constraints, + ) def make_nested_block_model(self): """For the next two tests: Has BooleanVar on model, but LogicalConstraints on a Block and a Block nested on that Block.""" m = ConcreteModel() m.b = Block() - m.Y = BooleanVar([1,2]) + m.Y = BooleanVar([1, 2]) m.b.logical = LogicalConstraint(expr=~m.Y[1]) m.b.b = Block() m.b.b.logical = LogicalConstraint(expr=m.Y[1].xor(m.Y[2])) @@ -411,71 +619,117 @@ def test_transform_block(self): m = self.make_nested_block_model() TransformationFactory('core.logical_to_linear').apply_to(m.b) - _constrs_contained_within( self, [(1, 1 - - m.Y[1].get_associated_binary(), 1)], - m.b.logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, + [(1, 1 - m.Y[1].get_associated_binary(), 1)], + m.b.logic_to_linear.transformed_constraints, + ) # ESJ: This is kinda whacky looking... Why not Y[1] + Y[2] == 1? (It's # special case of an exactly(1, ...) constraint. - _constrs_contained_within(self, [(1, m.Y[1].get_associated_binary() + - m.Y[2].get_associated_binary(), None), - (1, 1 - m.Y[1].get_associated_binary() - + 1 - m.Y[2].get_associated_binary(), - None)], - m.b.b.logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, + [ + ( + 1, + m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary(), + None, + ), + ( + 1, + 1 + - m.Y[1].get_associated_binary() + + 1 + - m.Y[2].get_associated_binary(), + None, + ), + ], + m.b.b.logic_to_linear.transformed_constraints, + ) self.assertEqual(len(m.b.logic_to_linear.transformed_constraints), 1) self.assertEqual(len(m.b.b.logic_to_linear.transformed_constraints), 2) def test_transform_targets_on_block(self): m = self.make_nested_block_model() - TransformationFactory('core.logical_to_linear').apply_to(m.b, - targets=m.b.b) + TransformationFactory('core.logical_to_linear').apply_to(m.b, targets=m.b.b) # didn't transform anything on m.b self.assertIsNone(m.b.component("logic_to_linear")) # got what we expected on m.b.b - _constrs_contained_within(self, [(1, m.Y[1].get_associated_binary() + - m.Y[2].get_associated_binary(), None), - (1, 1 - m.Y[1].get_associated_binary() - + 1 - m.Y[2].get_associated_binary(), - None)], - m.b.b.logic_to_linear.transformed_constraints) + _constrs_contained_within( + self, + [ + ( + 1, + m.Y[1].get_associated_binary() + m.Y[2].get_associated_binary(), + None, + ), + ( + 1, + 1 + - m.Y[1].get_associated_binary() + + 1 + - m.Y[2].get_associated_binary(), + None, + ), + ], + m.b.b.logic_to_linear.transformed_constraints, + ) self.assertEqual(len(m.b.b.logic_to_linear.transformed_constraints), 2) def test_logical_constraint_target(self): m = _generate_boolean_model(3) TransformationFactory('core.logical_to_linear').apply_to( - m, targets=m.constraint) + m, targets=m.constraint + ) _constrs_contained_within( - self, [ - (2, m.Y[1].get_associated_binary() + \ - m.Y[2].get_associated_binary() + \ - m.Y[3].get_associated_binary(), 2) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 2, + m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary(), + 2, + ) + ], + m.logic_to_linear.transformed_constraints, + ) def make_indexed_logical_constraint_model(self): m = _generate_boolean_model(3) - m.cons = LogicalConstraint([1,2]) + m.cons = LogicalConstraint([1, 2]) m.cons[1] = exactly(2, m.Y) m.cons[2] = m.Y[1].implies(lor(m.Y[2], m.Y[3])) return m def test_indexed_logical_constraint_target(self): m = self.make_indexed_logical_constraint_model() - TransformationFactory('core.logical_to_linear').apply_to( - m, targets=m.cons) + TransformationFactory('core.logical_to_linear').apply_to(m, targets=m.cons) _constrs_contained_within( - self, [ - (2, m.Y[1].get_associated_binary() + \ - m.Y[2].get_associated_binary() + \ - m.Y[3].get_associated_binary(), 2) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 2, + m.Y[1].get_associated_binary() + + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary(), + 2, + ) + ], + m.logic_to_linear.transformed_constraints, + ) _constrs_contained_within( - self, [ - (1, - m.Y[2].get_associated_binary() + \ - m.Y[3].get_associated_binary() - + (1 - m.Y[1].get_associated_binary()), - None) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary() + + (1 - m.Y[1].get_associated_binary()), + None, + ) + ], + m.logic_to_linear.transformed_constraints, + ) # and verify only the targets were transformed self.assertEqual(len(m.logic_to_linear.transformed_constraints), 2) @@ -483,73 +737,129 @@ def test_indexed_logical_constraint_target(self): def test_logical_constraintData_target(self): m = self.make_indexed_logical_constraint_model() - TransformationFactory('core.logical_to_linear').apply_to( - m, targets=m.cons[2]) + TransformationFactory('core.logical_to_linear').apply_to(m, targets=m.cons[2]) _constrs_contained_within( - self, [ - (1, - m.Y[2].get_associated_binary() + \ - m.Y[3].get_associated_binary() - + (1 - m.Y[1].get_associated_binary()), - None) - ], m.logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + m.Y[2].get_associated_binary() + + m.Y[3].get_associated_binary() + + (1 - m.Y[1].get_associated_binary()), + None, + ) + ], + m.logic_to_linear.transformed_constraints, + ) # only transformed the second one. self.assertEqual(len(m.logic_to_linear.transformed_constraints), 1) def test_blockData_target(self): m = ConcreteModel() - m.b = Block([1,2]) - m.b[1].transfer_attributes_from( - self.make_indexed_logical_constraint_model()) - TransformationFactory('core.logical_to_linear').apply_to(m, - targets=m.b[1]) + m.b = Block([1, 2]) + m.b[1].transfer_attributes_from(self.make_indexed_logical_constraint_model()) + TransformationFactory('core.logical_to_linear').apply_to(m, targets=m.b[1]) _constrs_contained_within( - self, [ - (2, m.b[1].Y[1].get_associated_binary() + \ - m.b[1].Y[2].get_associated_binary() + \ - m.b[1].Y[3].get_associated_binary(), 2) - ], m.b[1].logic_to_linear.transformed_constraints) + self, + [ + ( + 2, + m.b[1].Y[1].get_associated_binary() + + m.b[1].Y[2].get_associated_binary() + + m.b[1].Y[3].get_associated_binary(), + 2, + ) + ], + m.b[1].logic_to_linear.transformed_constraints, + ) _constrs_contained_within( - self, [ - (1, - m.b[1].Y[2].get_associated_binary() + \ - m.b[1].Y[3].get_associated_binary() - + (1 - m.b[1].Y[1].get_associated_binary()), - None) - ], m.b[1].logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + m.b[1].Y[2].get_associated_binary() + + m.b[1].Y[3].get_associated_binary() + + (1 - m.b[1].Y[1].get_associated_binary()), + None, + ) + ], + m.b[1].logic_to_linear.transformed_constraints, + ) def test_disjunctData_target(self): m = ConcreteModel() - m.d = Disjunct([1,2]) - m.d[1].transfer_attributes_from( - self.make_indexed_logical_constraint_model()) - TransformationFactory('core.logical_to_linear').apply_to(m, - targets=m.d[1]) + m.d = Disjunct([1, 2]) + m.d[1].transfer_attributes_from(self.make_indexed_logical_constraint_model()) + TransformationFactory('core.logical_to_linear').apply_to(m, targets=m.d[1]) _constrs_contained_within( - self, [ - (2, m.d[1].Y[1].get_associated_binary() + \ - m.d[1].Y[2].get_associated_binary() + \ - m.d[1].Y[3].get_associated_binary(), 2) - ], m.d[1].logic_to_linear.transformed_constraints) + self, + [ + ( + 2, + m.d[1].Y[1].get_associated_binary() + + m.d[1].Y[2].get_associated_binary() + + m.d[1].Y[3].get_associated_binary(), + 2, + ) + ], + m.d[1].logic_to_linear.transformed_constraints, + ) _constrs_contained_within( - self, [ - (1, - m.d[1].Y[2].get_associated_binary() + \ - m.d[1].Y[3].get_associated_binary() - + (1 - m.d[1].Y[1].get_associated_binary()), - None) - ], m.d[1].logic_to_linear.transformed_constraints) + self, + [ + ( + 1, + m.d[1].Y[2].get_associated_binary() + + m.d[1].Y[3].get_associated_binary() + + (1 - m.d[1].Y[1].get_associated_binary()), + None, + ) + ], + m.d[1].logic_to_linear.transformed_constraints, + ) def test_target_with_unrecognized_type(self): m = _generate_boolean_model(2) - with self.assertRaisesRegex(ValueError, - r"invalid value for configuration " - r"'targets':\n\tFailed casting 1\n\tto " - r"target_list\n\tError: " - r"Expected Component or list of Components." - r"\n\tReceived "): - TransformationFactory('core.logical_to_linear').apply_to( - m, targets=1) + with self.assertRaisesRegex( + ValueError, + r"invalid value for configuration " + r"'targets':\n\tFailed casting 1\n\tto " + r"target_list\n\tError: " + r"Expected Component or list of Components." + r"\n\tReceived ", + ): + TransformationFactory('core.logical_to_linear').apply_to(m, targets=1) + + def test_mixed_logical_relational_expressions(self): + m = ConcreteModel() + m.x = Var() + m.y = BooleanVar([1, 2]) + m.c = LogicalConstraint(expr=(land(m.y[1], m.y[2]).implies(m.x >= 0))) + with self.assertRaisesRegex( + MouseTrap, + "core.logical_to_linear does not support transforming " + "LogicalConstraints with embedded relational expressions. " + "Found '0.0 <= x'.", + normalize_whitespace=True, + ): + TransformationFactory('core.logical_to_linear').apply_to(m) + + def test_external_function(self): + def _fcn(*args): + raise RuntimeError('unreachable') + + m = ConcreteModel() + m.x = Var() + m.f = ExternalFunction(_fcn) + m.y = BooleanVar() + m.c = LogicalConstraint(expr=(m.y.implies(m.f(m.x)))) + with self.assertRaisesRegex( + TypeError, + "Expressions containing external functions are not convertible " + r"to sympy expressions \(found 'f\(x1", + ): + TransformationFactory('core.logical_to_linear').apply_to(m) + @unittest.skipUnless(sympy_available, "Sympy not available") class TestLogicalToLinearBackmap(unittest.TestCase): @@ -559,25 +869,27 @@ def test_backmap_deprecated(self): m.Y = BooleanVar(m.s) TransformationFactory('core.logical_to_linear').apply_to(m) output = StringIO() - with LoggingIntercept(output, 'pyomo.core.base', - logging.WARNING): + with LoggingIntercept(output, 'pyomo.core.base', logging.WARNING): y1 = m.Y[1].get_associated_binary() - self.assertIn("DEPRECATED: Relying on core.logical_to_linear to " - "transform BooleanVars that do not appear in " - "LogicalConstraints is deprecated. Please " - "associate your own binaries if you have BooleanVars " - "not used in logical expressions.", - output.getvalue().replace('\n', ' ')) + self.assertIn( + "DEPRECATED: Relying on core.logical_to_linear to " + "transform BooleanVars that do not appear in " + "LogicalConstraints is deprecated. Please " + "associate your own binaries if you have BooleanVars " + "not used in logical expressions.", + output.getvalue().replace('\n', ' '), + ) output = StringIO() - with LoggingIntercept(output, 'pyomo.core.base', - logging.WARNING): + with LoggingIntercept(output, 'pyomo.core.base', logging.WARNING): y2 = m.Y[2].get_associated_binary() - self.assertIn("DEPRECATED: Relying on core.logical_to_linear to " - "transform BooleanVars that do not appear in " - "LogicalConstraints is deprecated. Please " - "associate your own binaries if you have BooleanVars " - "not used in logical expressions.", - output.getvalue().replace('\n', ' ')) + self.assertIn( + "DEPRECATED: Relying on core.logical_to_linear to " + "transform BooleanVars that do not appear in " + "LogicalConstraints is deprecated. Please " + "associate your own binaries if you have BooleanVars " + "not used in logical expressions.", + output.getvalue().replace('\n', ' '), + ) y1.value = 1 y2.value = 0 update_boolean_vars_from_binary(m) @@ -591,8 +903,7 @@ def test_can_associate_unused_boolean_after_transformation(self): TransformationFactory('core.logical_to_linear').apply_to(m) m.y = Var(domain=Binary) output = StringIO() - with LoggingIntercept(output, 'pyomo.core.base', - logging.WARNING): + with LoggingIntercept(output, 'pyomo.core.base', logging.WARNING): m.Y.associate_binary_var(m.y) y = m.Y.get_associated_binary() self.assertIs(y, m.y) @@ -606,10 +917,11 @@ def test_cannot_reassociate_boolean_error(self): # allowed to change now. m.y = Var(domain=Binary) with self.assertRaisesRegex( - RuntimeError, - r"Reassociating BooleanVar 'Y\[1\]' " - r"\(currently associated with 'Y_asbinary\[1\]'\)" - r" with 'y' is not allowed"): + RuntimeError, + r"Reassociating BooleanVar 'Y\[1\]' " + r"\(currently associated with 'Y_asbinary\[1\]'\)" + r" with 'y' is not allowed", + ): m.Y[1].associate_binary_var(m.y) def test_backmap(self): @@ -644,8 +956,9 @@ def test_backmap_noninteger(self): update_boolean_vars_from_binary(m, integer_tolerance=0.1) self.assertTrue(m.Y[1].value) # Now try it without the tolerance set - with self.assertRaisesRegex(ValueError, - r"Binary variable has non-\{0,1\} value"): + with self.assertRaisesRegex( + ValueError, r"Binary variable has non-\{0,1\} value" + ): update_boolean_vars_from_binary(m) diff --git a/pyomo/core/tests/unit/test_matrix_constraint.py b/pyomo/core/tests/unit/test_matrix_constraint.py index f0317e552fe..d9b51de7bf6 100644 --- a/pyomo/core/tests/unit/test_matrix_constraint.py +++ b/pyomo/core/tests/unit/test_matrix_constraint.py @@ -17,7 +17,8 @@ def _create_variable_list(size, **kwds): assert size > 0 - return pyo.Var(pyo.RangeSet(0,size-1), **kwds) + return pyo.Var(pyo.RangeSet(0, size - 1), **kwds) + def _get_csr(m, n, value): data = [value] * (m * n) @@ -27,17 +28,15 @@ def _get_csr(m, n, value): indptr.append(indptr[-1] + n) return data, indices, indptr -class TestMatrixConstraint(unittest.TestCase): +class TestMatrixConstraint(unittest.TestCase): def test_init(self): m = pyo.ConcreteModel() m.v = _create_variable_list(3, initialize=1.0) - data, indices, indptr = _get_csr(3,3,0.0) + data, indices, indptr = _get_csr(3, 3, 0.0) lb = [None] * 3 ub = [None] * 3 - m.c = MatrixConstraint(data, indices, indptr, - lb, ub, - x=list(m.v.values())) + m.c = MatrixConstraint(data, indices, indptr, lb, ub, x=list(m.v.values())) self.assertEqual(len(m.c), 3) for k, c in m.c.items(): with self.assertRaises(NotImplementedError): @@ -58,11 +57,10 @@ def test_init(self): m = pyo.ConcreteModel() m.v = _create_variable_list(3, initialize=3) - data, indices, indptr = _get_csr(2,3,1.0) - m.c = MatrixConstraint(data, indices, indptr, - lb=[0]*2, - ub=[2]*2, - x=list(m.v.values())) + data, indices, indptr = _get_csr(2, 3, 1.0) + m.c = MatrixConstraint( + data, indices, indptr, lb=[0] * 2, ub=[2] * 2, x=list(m.v.values()) + ) self.assertEqual(len(m.c), 2) for k, c in m.c.items(): with self.assertRaises(NotImplementedError): @@ -76,14 +74,12 @@ def test_init(self): self.assertEqual(c.upper, 2) self.assertEqual(c.equality, False) - m = pyo.ConcreteModel() m.v = _create_variable_list(3, initialize=3) - data, indices, indptr = _get_csr(2,3,1.0) - m.c = MatrixConstraint(data, indices, indptr, - lb=[1]*2, - ub=[1]*2, - x=list(m.v.values())) + data, indices, indptr = _get_csr(2, 3, 1.0) + m.c = MatrixConstraint( + data, indices, indptr, lb=[1] * 2, ub=[1] * 2, x=list(m.v.values()) + ) self.assertEqual(len(m.c), 2) for k, c in m.c.items(): with self.assertRaises(NotImplementedError): @@ -97,5 +93,6 @@ def test_init(self): self.assertEqual(c.upper, 1) self.assertEqual(c.equality, True) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_misc.py b/pyomo/core/tests/unit/test_misc.py index 1fb2645031a..261c94d96bd 100644 --- a/pyomo/core/tests/unit/test_misc.py +++ b/pyomo/core/tests/unit/test_misc.py @@ -13,57 +13,73 @@ import re import os from os.path import abspath, dirname, join -currdir= dirname(abspath(__file__)) + +currdir = dirname(abspath(__file__)) from filecmp import cmp import pyomo.common.unittest as unittest from pyomo.opt import check_available_solvers from pyomo.scripting.pyomo_main import main -from pyomo.core import (AbstractModel, ConcreteModel, Block, Set, Param, Var, - Objective, Constraint, Reals, display) +from pyomo.core import ( + AbstractModel, + ConcreteModel, + Block, + Set, + Param, + Var, + Objective, + Constraint, + Reals, + display, +) from pyomo.common.tee import capture_output from io import StringIO + def rule1(model): - return (1,model.x+model.y[1],2) -def rule2(model,i): - return (1,model.x+model.y[1]+i,2) + return (1, model.x + model.y[1], 2) + + +def rule2(model, i): + return (1, model.x + model.y[1] + i, 2) solvers = None -class PyomoModel(unittest.TestCase): + +class PyomoModel(unittest.TestCase): @classmethod def setUpClass(cls): global solvers import pyomo.environ + solvers = check_available_solvers('glpk', 'cplex') def test_construct(self): model = AbstractModel() - model.a = Set(initialize=[1,2,3]) + model.a = Set(initialize=[1, 2, 3]) model.A = Param(initialize=1) model.B = Param(model.a) model.x = Var(initialize=1, within=Reals, dense=False) model.y = Var(model.a, initialize=1, within=Reals, dense=False) - model.obj = Objective(rule=lambda model: model.x+model.y[1]) - model.obj2 = Objective(model.a,rule=lambda model, i: i+model.x+model.y[1]) + model.obj = Objective(rule=lambda model: model.x + model.y[1]) + model.obj2 = Objective(model.a, rule=lambda model, i: i + model.x + model.y[1]) model.con = Constraint(rule=rule1) model.con2 = Constraint(model.a, rule=rule2) instance = model.create_instance() expr = instance.x + 1 OUTPUT = open(join(currdir, "display.out"), "w") - display(instance,ostream=OUTPUT) - display(instance.obj,ostream=OUTPUT) - display(instance.x,ostream=OUTPUT) - display(instance.con,ostream=OUTPUT) + display(instance, ostream=OUTPUT) + display(instance.obj, ostream=OUTPUT) + display(instance.x, ostream=OUTPUT) + display(instance.con, ostream=OUTPUT) OUTPUT.write(expr.to_string()) model = AbstractModel() instance = model.create_instance() - display(instance,ostream=OUTPUT) + display(instance, ostream=OUTPUT) OUTPUT.close() try: display(None) @@ -71,32 +87,31 @@ def test_construct(self): except TypeError: pass _out, _txt = join(currdir, "display.out"), join(currdir, "display.txt") - self.assertTrue(cmp(_out, _txt), - msg="Files %s and %s differ" % (_out, _txt)) + self.assertTrue(cmp(_out, _txt), msg="Files %s and %s differ" % (_out, _txt)) def test_construct2(self): model = AbstractModel() - model.a = Set(initialize=[1,2,3]) + model.a = Set(initialize=[1, 2, 3]) model.A = Param(initialize=1) model.B = Param(model.a) model.x = Var(initialize=1, within=Reals, dense=True) model.y = Var(model.a, initialize=1, within=Reals, dense=True) - model.obj = Objective(rule=lambda model: model.x+model.y[1]) - model.obj2 = Objective(model.a,rule=lambda model, i: i+model.x+model.y[1]) + model.obj = Objective(rule=lambda model: model.x + model.y[1]) + model.obj2 = Objective(model.a, rule=lambda model, i: i + model.x + model.y[1]) model.con = Constraint(rule=rule1) model.con2 = Constraint(model.a, rule=rule2) instance = model.create_instance() expr = instance.x + 1 OUTPUT = open(join(currdir, "display2.out"), "w") - display(instance,ostream=OUTPUT) - display(instance.obj,ostream=OUTPUT) - display(instance.x,ostream=OUTPUT) - display(instance.con,ostream=OUTPUT) + display(instance, ostream=OUTPUT) + display(instance.obj, ostream=OUTPUT) + display(instance.x, ostream=OUTPUT) + display(instance.con, ostream=OUTPUT) OUTPUT.write(expr.to_string()) model = AbstractModel() instance = model.create_instance() - display(instance,ostream=OUTPUT) + display(instance, ostream=OUTPUT) OUTPUT.close() try: display(None) @@ -104,64 +119,63 @@ def test_construct2(self): except TypeError: pass _out, _txt = join(currdir, "display2.out"), join(currdir, "display2.txt") - self.assertTrue(cmp(_out, _txt), - msg="Files %s and %s differ" % (_out, _txt)) + self.assertTrue(cmp(_out, _txt), msg="Files %s and %s differ" % (_out, _txt)) -class PyomoBadModels ( unittest.TestCase ): - +class PyomoBadModels(unittest.TestCase): @classmethod def setUpClass(cls): global solvers import pyomo.environ + solvers = check_available_solvers('glpk', 'cplex') - def pyomo ( self, cmd, **kwargs): - args = ['solve'] + re.split('[ ]+', cmd ) - out = kwargs.get( 'file', None ) + def pyomo(self, cmd, **kwargs): + args = ['solve'] + re.split('[ ]+', cmd) + out = kwargs.get('file', None) if out is None: out = StringIO() with capture_output(out): - os.chdir( currdir ) - output = main( args ) + os.chdir(currdir) + output = main(args) if not 'file' in kwargs: return output.getvalue() return output - def test_uninstantiated_model_linear ( self ): + def test_uninstantiated_model_linear(self): """Run pyomo with "bad" model file. Should fail gracefully, with a perhaps useful-to-the-user message.""" if not 'glpk' in solvers: self.skipTest("glpk solver is not available") - return # ignore for now + return # ignore for now base = '%s/test_uninstantiated_model' % currdir fout, fbase = join(base, '_linear.out'), join(base, '.txt') - self.pyomo('uninstantiated_model_linear.py', file=fout ) - self.assertTrue(cmp(fout, fbase), - msg="Files %s and %s differ" % (fout, fbase)) + self.pyomo('uninstantiated_model_linear.py', file=fout) + self.assertTrue(cmp(fout, fbase), msg="Files %s and %s differ" % (fout, fbase)) - def test_uninstantiated_model_quadratic ( self ): + def test_uninstantiated_model_quadratic(self): """Run pyomo with "bad" model file. Should fail gracefully, with a perhaps useful-to-the-user message.""" if not 'cplex' in solvers: self.skipTest("The 'cplex' executable is not available") - return # ignore for now + return # ignore for now base = '%s/test_uninstantiated_model' % currdir fout, fbase = join(base, '_quadratic.out'), join(base, '.txt') - self.pyomo('uninstantiated_model_quadratic.py --solver=cplex', file=fout ) - self.assertTrue(cmp(fout, fbase), - msg="Files %s and %s differ" % (fout, fbase)) + self.pyomo('uninstantiated_model_quadratic.py --solver=cplex', file=fout) + self.assertTrue(cmp(fout, fbase), msg="Files %s and %s differ" % (fout, fbase)) class TestApplyIndexedRule(unittest.TestCase): - def test_rules_with_None_in_set(self): def noarg_rule(b): b.args = () + def onearg_rule(b, i): b.args = (i,) + def twoarg_rule(b, i, j): - b.args = (i,j) + b.args = (i, j) + m = ConcreteModel() m.b1 = Block(rule=noarg_rule) self.assertEqual(m.b1.args, ()) @@ -169,12 +183,11 @@ def twoarg_rule(b, i, j): m.b2 = Block([None], rule=onearg_rule) self.assertEqual(m.b2[None].args, (None,)) - m.b3 = Block([(None,1)], rule=twoarg_rule) - self.assertEqual(m.b3[None,1].args, ((None,1))) + m.b3 = Block([(None, 1)], rule=twoarg_rule) + self.assertEqual(m.b3[None, 1].args, ((None, 1))) class TestComponent(unittest.TestCase): - def test_getname(self): m = ConcreteModel() m.b = Block() @@ -186,7 +199,9 @@ def test_getname_error(self): m.b = Block() m.b.v = Var() m.c = Block() - self.assertRaises(RuntimeError, m.b.v.getname, fully_qualified=True, relative_to=m.c) + self.assertRaises( + RuntimeError, m.b.v.getname, fully_qualified=True, relative_to=m.c + ) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_model.py b/pyomo/core/tests/unit/test_model.py index ae30b661c78..95ad17e97f4 100644 --- a/pyomo/core/tests/unit/test_model.py +++ b/pyomo/core/tests/unit/test_model.py @@ -17,6 +17,7 @@ import json import os from os.path import abspath, dirname, join + currdir = dirname(abspath(__file__)) import pickle @@ -25,8 +26,25 @@ from pyomo.common.dependencies import yaml_available from pyomo.common.tempfiles import TempfileManager -from pyomo.core.expr import current as EXPR -from pyomo.environ import RangeSet, ConcreteModel, Var, Param, Block, AbstractModel, Set, Constraint, Objective, value, sum_product, SolverFactory, VarList, ObjectiveList, ConstraintList, Model +import pyomo.core.expr as EXPR +from pyomo.environ import ( + RangeSet, + ConcreteModel, + Var, + Param, + Block, + AbstractModel, + Set, + Constraint, + Objective, + value, + sum_product, + SolverFactory, + VarList, + ObjectiveList, + ConstraintList, + Model, +) from pyomo.opt import check_available_solvers from pyomo.opt.parallel.local import SolverManager_Serial @@ -35,17 +53,16 @@ solvers = check_available_solvers('glpk') -class Test(unittest.TestCase): +class Test(unittest.TestCase): def tearDown(self): if os.path.exists("unknown.lp"): os.unlink("unknown.lp") TempfileManager.clear_tempfiles() - def test_clone_concrete_model(self): def _populate(b, *args): - b.A = RangeSet(1,3) + b.A = RangeSet(1, 3) b.v = Var() b.vv = Var(m.A) b.p = Param() @@ -73,7 +90,7 @@ def _populate(b, *args): self.assertEqual(id(n), id(n.bb[2].parent_block())) self.assertEqual(id(n), id(n.bb[3].parent_block())) - for x,y in ((m, n), (m.b, n.b), (m.b.c, n.b.c), (m.bb[2], n.bb[2])): + for x, y in ((m, n), (m.b, n.b), (m.b.c, n.b.c), (m.bb[2], n.bb[2])): self.assertNotEqual(id(x), id(y)) self.assertNotEqual(id(x.parent_block()), id(x)) self.assertNotEqual(id(y.parent_block()), id(y)) @@ -92,7 +109,7 @@ def _populate(b, *args): def test_clone_abstract_model(self): def _populate(b, *args): - b.A = RangeSet(1,3) + b.A = RangeSet(1, 3) b.v = Var() b.vv = Var(m.A) b.p = Param() @@ -114,7 +131,7 @@ def _populate(b, *args): self.assertEqual(id(n), id(n.b.parent_block())) self.assertEqual(id(n), id(n.bb.parent_block())) - for x,y in ((m, n), (m.b, n.b), (m.b.c, n.b.c)): + for x, y in ((m, n), (m.b, n.b), (m.b.c, n.b.c)): self.assertNotEqual(id(x), id(y)) self.assertNotEqual(id(x.parent_block()), id(x)) self.assertNotEqual(id(y.parent_block()), id(y)) @@ -180,176 +197,210 @@ def test_set_attr(self): def test_write(self): model = ConcreteModel() - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) model.write() def test_write2(self): model = ConcreteModel() - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): - return (1, model.x[1]+model.x[2], 2) + return (1, model.x[1] + model.x[2], 2) + model.c = Constraint(rule=c_rule) model.write() def test_write3(self): # Test that the summation works correctly, even though param 'w' has a default value model = ConcreteModel() - model.J = RangeSet(1,4) - model.w=Param(model.J, default=4) - model.x=Var(model.J, initialize=3) + model.J = RangeSet(1, 4) + model.w = Param(model.J, default=4) + model.x = Var(model.J, initialize=3) + def obj_rule(instance): return sum_product(instance.w, instance.x) + model.obj = Objective(rule=obj_rule) - self.assertEqual( value(model.obj), 48 ) + self.assertEqual(value(model.obj), 48) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") def test_solve1(self): model = ConcreteModel() - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): expr = 0 for i in model.A: - expr += i*model.x[i] + expr += i * model.x[i] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1.out"), format='json') - with open(join(currdir,"solve1.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1.out"), format='json') + with open(join(currdir, "solve1.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) + # def d_rule(model): return model.x[1] >= 0 + model.d = Constraint(rule=d_rule) model.d.deactivate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1x.out"), format='json') - with open(join(currdir,"solve1x.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1x.out"), format='json') + with open(join(currdir, "solve1x.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) # model.d.activate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1a.out"), format='json') - with open(join(currdir,"solve1a.out"), 'r') as out, \ - open(join(currdir,"solve1a.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1a.out"), format='json') + with open(join(currdir, "solve1a.out"), 'r') as out, open( + join(currdir, "solve1a.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) # model.d.deactivate() + def e_rule(model, i): return model.x[i] >= 0 + model.e = Constraint(model.A, rule=e_rule) for i in model.A: model.e[i].deactivate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1y.out"), format='json') - with open(join(currdir,"solve1y.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, "solve1y.out"), format='json') + with open(join(currdir, "solve1y.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) # model.e.activate() results = opt.solve(model) model.solutions.store_to(results) - results.write(filename=join(currdir,"solve1b.out"), format='json') - with open(join(currdir,"solve1b.out"), 'r') as out, \ - open(join(currdir,"solve1b.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) - + results.write(filename=join(currdir, "solve1b.out"), format='json') + with open(join(currdir, "solve1b.out"), 'r') as out, open( + join(currdir, "solve1b.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) + @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") def test_store_to_skip_stale_vars(self): # test store_to() function with skip_stale_vars=True model = ConcreteModel() - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): expr = 0 for i in model.A: - expr += i*model.x[i] + expr += i * model.x[i] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.x[1].fix() results = opt.solve(model, symbolic_solver_labels=True) - model.solutions.store_to(results,skip_stale_vars=False) + model.solutions.store_to(results, skip_stale_vars=False) for index in model.A: self.assertIn(model.x[index].getname(), results.solution.variable.keys()) - model.solutions.store_to(results,skip_stale_vars=True) + model.solutions.store_to(results, skip_stale_vars=True) for index in model.A: if index == 1: - self.assertNotIn(model.x[index].getname(), results.solution.variable.keys()) + self.assertNotIn( + model.x[index].getname(), results.solution.variable.keys() + ) else: - self.assertIn(model.x[index].getname(), results.solution.variable.keys()) - + self.assertIn( + model.x[index].getname(), results.solution.variable.keys() + ) def test_display(self): model = ConcreteModel() - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): expr = 0 for i in model.A: expr += model.x[i] return expr + model.obj = Objective(rule=obj_rule) - model.display(join(currdir,"solve3.out")) - _out, _txt = join(currdir,"solve3.out"), join(currdir,"solve3.txt") - self.assertTrue(cmp(_out, _txt), - msg="Files %s and %s differ" % (_txt, _out)) + model.display(join(currdir, "solve3.out")) + _out, _txt = join(currdir, "solve3.out"), join(currdir, "solve3.txt") + self.assertTrue(cmp(_out, _txt), msg="Files %s and %s differ" % (_txt, _out)) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") def test_solve4(self): model = ConcreteModel() - model.A = RangeSet(1,4) - model.x = Var(model.A, bounds=(-1,1)) + model.A = RangeSet(1, 4) + model.x = Var(model.A, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): expr = 0 for i in model.A: - expr += i*model.x[i] + expr += i * model.x[i] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) - results.write(filename=join(currdir,'solve4.out'), format='json') - with open(join(currdir,"solve4.out"), 'r') as out, \ - open(join(currdir,"solve1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, 'solve4.out'), format='json') + with open(join(currdir, "solve4.out"), 'r') as out, open( + join(currdir, "solve1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") def test_solve6(self): @@ -359,73 +410,87 @@ def test_solve6(self): # b.x # model = ConcreteModel() - model.y = Var(bounds=(-1,1)) + model.y = Var(bounds=(-1, 1)) model.b = Block() - model.b.A = RangeSet(1,4) - model.b.x = Var(model.b.A, bounds=(-1,1)) + model.b.A = RangeSet(1, 4) + model.b.x = Var(model.b.A, bounds=(-1, 1)) + def obj_rule(block): return sum_product(block.x) + model.b.obj = Objective(rule=obj_rule) + def c_rule(model): expr = model.y for i in model.b.A: - expr += i*model.b.x[i] + expr += i * model.b.x[i] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) model.solutions.store_to(results) - results.write(filename=join(currdir,'solve6.out'), format='json') - with open(join(currdir,"solve6.out"), 'r') as out, \ - open(join(currdir,"solve6.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, 'solve6.out'), format='json') + with open(join(currdir, "solve6.out"), 'r') as out, open( + join(currdir, "solve6.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") def test_solve7(self): # - # Test that solution values are writen with appropriate + # Test that solution values are written with appropriate # quotations in results # model = ConcreteModel() - model.y = Var(bounds=(-1,1)) - model.A = RangeSet(1,4) + model.y = Var(bounds=(-1, 1)) + model.A = RangeSet(1, 4) model.B = Set(initialize=['A B', 'C,D', 'E']) - model.x = Var(model.A, model.B, bounds=(-1,1)) + model.x = Var(model.A, model.B, bounds=(-1, 1)) + def obj_rule(model): return sum_product(model.x) + model.obj = Objective(rule=obj_rule) + def c_rule(model): expr = model.y for i in model.A: for j in model.B: - expr += i*model.x[i,j] + expr += i * model.x[i, j] return expr == 0 + model.c = Constraint(rule=c_rule) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) - #model.display() + # model.display() model.solutions.store_to(results) - results.write(filename=join(currdir,'solve7.out'), format='json') - with open(join(currdir,"solve7.out"), 'r') as out, \ - open(join(currdir,"solve7.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + results.write(filename=join(currdir, 'solve7.out'), format='json') + with open(join(currdir, "solve7.out"), 'r') as out, open( + join(currdir, "solve7.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) def test_stats1(self): model = ConcreteModel() - model.x = Var([1,2]) + model.x = Var([1, 2]) + def obj_rule(model, i): return sum_product(model.x) - model.obj = Objective([1,2], rule=obj_rule) + + model.obj = Objective([1, 2], rule=obj_rule) + def c_rule(model, i): expr = 0 - for j in [1,2]: - expr += j*model.x[j] + for j in [1, 2]: + expr += j * model.x[j] return expr == 0 - model.c = Constraint([1,2], rule=c_rule) + + model.c = Constraint([1, 2], rule=c_rule) self.assertEqual(model.nvariables(), 2) self.assertEqual(model.nobjectives(), 2) self.assertEqual(model.nconstraints(), 2) @@ -433,24 +498,28 @@ def c_rule(model, i): def test_stats2(self): model = ConcreteModel() # - model.x = Var([1,2]) + model.x = Var([1, 2]) + def obj_rule(model, i): return sum_product(model.x) + model.y = VarList() model.y.add() model.y.add() # - model.obj = Objective([1,2], rule=obj_rule) + model.obj = Objective([1, 2], rule=obj_rule) model.o = ObjectiveList() model.o.add(model.y[1]) model.o.add(model.y[2]) + # def c_rule(model, i): expr = 0 - for j in [1,2]: - expr += j*model.x[j] + for j in [1, 2]: + expr += j * model.x[j] return expr == 0 - model.c = Constraint([1,2], rule=c_rule) + + model.c = Constraint([1, 2], rule=c_rule) model.C = ConstraintList() model.C.add(model.y[1] == 0) model.C.add(model.y[2] == 0) @@ -461,19 +530,23 @@ def c_rule(model, i): def test_stats3(self): model = ConcreteModel() - model.x = Var([1,2]) + model.x = Var([1, 2]) + def obj_rule(model, i): return sum_product(model.x) - model.obj = Objective([1,2], rule=obj_rule) + + model.obj = Objective([1, 2], rule=obj_rule) + def c_rule(model, i): expr = 0 - for j in [1,2]: - expr += j*model.x[j] + for j in [1, 2]: + expr += j * model.x[j] return expr == 0 - model.c = Constraint([1,2], rule=c_rule) + + model.c = Constraint([1, 2], rule=c_rule) # model.B = Block() - model.B.x = Var([1,2]) + model.B.x = Var([1, 2]) model.B.o = ObjectiveList() model.B.o.add(model.B.x[1]) model.B.o.add(model.B.x[2]) @@ -507,9 +580,9 @@ def test_stats4(self): @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") def test_solve_with_pickle(self): model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') @@ -518,23 +591,23 @@ def test_solve_with_pickle(self): self.assertEqual(len(model.solutions), 1) # self.assertEqual(model.solutions[0].gap, 0.0) - #self.assertEqual(model.solutions[0].status, SolutionStatus.feasible) + # self.assertEqual(model.solutions[0].status, SolutionStatus.feasible) self.assertEqual(model.solutions[0].message, None) # buf = pickle.dumps(model) tmodel = pickle.loads(buf) self.assertEqual(len(tmodel.solutions), 1) self.assertEqual(tmodel.solutions[0].gap, 0.0) - #self.assertEqual(tmodel.solutions[0].status, SolutionStatus.feasible) + # self.assertEqual(tmodel.solutions[0].status, SolutionStatus.feasible) self.assertEqual(tmodel.solutions[0].message, None) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") def test_solve_with_pickle_then_clone(self): # This tests github issue Pyomo-#65 model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') @@ -543,28 +616,28 @@ def test_solve_with_pickle_then_clone(self): self.assertEqual(len(model.solutions), 1) # self.assertEqual(model.solutions[0].gap, 0.0) - #self.assertEqual(model.solutions[0].status, SolutionStatus.feasible) + # self.assertEqual(model.solutions[0].status, SolutionStatus.feasible) self.assertEqual(model.solutions[0].message, None) # buf = pickle.dumps(model) tmodel = pickle.loads(buf) self.assertEqual(len(tmodel.solutions), 1) self.assertEqual(tmodel.solutions[0].gap, 0.0) - #self.assertEqual(tmodel.solutions[0].status, SolutionStatus.feasible) + # self.assertEqual(tmodel.solutions[0].status, SolutionStatus.feasible) self.assertEqual(tmodel.solutions[0].message, None) self.assertIn(id(tmodel.b.obj), tmodel.solutions[0]._entry['objective']) self.assertIs( - tmodel.b.obj, - tmodel.solutions[0]._entry['objective'][id(tmodel.b.obj)][0]() ) + tmodel.b.obj, tmodel.solutions[0]._entry['objective'][id(tmodel.b.obj)][0] + ) inst = tmodel.clone() # make sure the clone has all the attributes - self.assertTrue(hasattr(inst,'A')) - self.assertTrue(hasattr(inst,'b')) - self.assertTrue(hasattr(inst.b,'x')) - self.assertTrue(hasattr(inst.b,'obj')) - self.assertTrue(hasattr(inst,'c')) + self.assertTrue(hasattr(inst, 'A')) + self.assertTrue(hasattr(inst, 'b')) + self.assertTrue(hasattr(inst.b, 'x')) + self.assertTrue(hasattr(inst.b, 'obj')) + self.assertTrue(hasattr(inst, 'c')) # and that they were all copied self.assertIsNot(inst.A, tmodel.A) self.assertIsNot(inst.b, tmodel.b) @@ -573,59 +646,59 @@ def test_solve_with_pickle_then_clone(self): self.assertIsNot(inst.c, tmodel.c) # Make sure the solution is on the new model - self.assertTrue(hasattr(inst,'solutions')) + self.assertTrue(hasattr(inst, 'solutions')) self.assertEqual(len(inst.solutions), 1) self.assertEqual(inst.solutions[0].gap, 0.0) - #self.assertEqual(inst.solutions[0].status, SolutionStatus.feasible) + # self.assertEqual(inst.solutions[0].status, SolutionStatus.feasible) self.assertEqual(inst.solutions[0].message, None) # Spot-check some components and make sure all the weakrefs in # the ModelSOlution got updated self.assertIn(id(inst.b.obj), inst.solutions[0]._entry['objective']) _obj = inst.solutions[0]._entry['objective'][id(inst.b.obj)] - self.assertIs(_obj[0](), inst.b.obj) + self.assertIs(_obj[0], inst.b.obj) - for v in [1,2,3,4]: + for v in [1, 2, 3, 4]: self.assertIn(id(inst.b.x[v]), inst.solutions[0]._entry['variable']) _v = inst.solutions[0]._entry['variable'][id(inst.b.x[v])] - self.assertIs(_v[0](), inst.b.x[v]) + self.assertIs(_v[0], inst.b.x[v]) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") @unittest.skipIf(not yaml_available, "YAML not available available") def test_solve_with_store1(self): # With symbolic solver labels model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=True) # - results.write(filename=join(currdir,'solve_with_store1.out'), - format='yaml') - with open(join(currdir,"solve_with_store1.out"), 'r') as out, \ - open(join(currdir,"solve_with_store1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store1.out'), format='yaml') + with open(join(currdir, "solve_with_store1.out"), 'r') as out, open( + join(currdir, "solve_with_store1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) model.solutions.store_to(results) # - results.write(filename=join(currdir,'solve_with_store2.out'), - format='yaml') - with open(join(currdir,"solve_with_store2.out"), 'r') as out, \ - open(join(currdir,"solve_with_store2.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store2.out'), format='yaml') + with open(join(currdir, "solve_with_store2.out"), 'r') as out, open( + join(currdir, "solve_with_store2.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) # # Load results with string indices # tmodel = ConcreteModel() - tmodel.A = RangeSet(1,4) + tmodel.A = RangeSet(1, 4) tmodel.b = Block() - tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) + tmodel.b.x = Var(tmodel.A, bounds=(-1, 1)) tmodel.b.obj = Objective(expr=sum_product(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) @@ -637,37 +710,37 @@ def test_solve_with_store1(self): def test_solve_with_store2(self): # Without symbolic solver labels model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') results = opt.solve(model, symbolic_solver_labels=False) # - results.write(filename=join(currdir,'solve_with_store1.out'), - format='yaml') - with open(join(currdir,"solve_with_store1.out"), 'r') as out, \ - open(join(currdir,"solve_with_store1.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store1.out'), format='yaml') + with open(join(currdir, "solve_with_store1.out"), 'r') as out, open( + join(currdir, "solve_with_store1.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) model.solutions.store_to(results) # - results.write(filename=join(currdir,'solve_with_store2.out'), - format='yaml') - with open(join(currdir,"solve_with_store2.out"), 'r') as out, \ - open(join(currdir,"solve_with_store2.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store2.out'), format='yaml') + with open(join(currdir, "solve_with_store2.out"), 'r') as out, open( + join(currdir, "solve_with_store2.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) # # Load results with string indices # tmodel = ConcreteModel() - tmodel.A = RangeSet(1,4) + tmodel.A = RangeSet(1, 4) tmodel.b = Block() - tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) + tmodel.b.x = Var(tmodel.A, bounds=(-1, 1)) tmodel.b.obj = Objective(expr=sum_product(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) @@ -678,49 +751,49 @@ def test_solve_with_store2(self): @unittest.skipIf(not yaml_available, "YAML not available available") def test_solve_with_store2(self): model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') results = opt.solve(model) # - results.write(filename=join(currdir,'solve_with_store3.out'), - format='json') - with open(join(currdir,"solve_with_store3.out"), 'r') as out, \ - open(join(currdir,"solve_with_store3.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store3.out'), format='json') + with open(join(currdir, "solve_with_store3.out"), 'r') as out, open( + join(currdir, "solve_with_store3.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) # model.solutions.store_to(results) - results.write(filename=join(currdir,'solve_with_store4.out'), - format='json') - with open(join(currdir,"solve_with_store4.out"), 'r') as out, \ - open(join(currdir,"solve_with_store4.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store4.out'), format='json') + with open(join(currdir, "solve_with_store4.out"), 'r') as out, open( + join(currdir, "solve_with_store4.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) # # Test that we can pickle the results object # buf = pickle.dumps(results) results_ = pickle.loads(buf) - results.write(filename=join(currdir,'solve_with_store4.out'), - format='json') - with open(join(currdir,"solve_with_store4.out"), 'r') as out, \ - open(join(currdir,"solve_with_store4.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store4.out'), format='json') + with open(join(currdir, "solve_with_store4.out"), 'r') as out, open( + join(currdir, "solve_with_store4.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) # # Load results with string indices # tmodel = ConcreteModel() - tmodel.A = RangeSet(1,3) + tmodel.A = RangeSet(1, 3) tmodel.b = Block() - tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) + tmodel.b.x = Var(tmodel.A, bounds=(-1, 1)) tmodel.b.obj = Objective(expr=sum_product(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) @@ -731,63 +804,63 @@ def test_solve_with_store2(self): @unittest.skipIf(not yaml_available, "YAML not available available") def test_solve_with_store3(self): model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') results = opt.solve(model) # model.solutions.store_to(results) - results.write(filename=join(currdir,'solve_with_store5.out'), - format='json') - with open(join(currdir,"solve_with_store5.out"), 'r') as out, \ - open(join(currdir,"solve_with_store4.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store5.out'), format='json') + with open(join(currdir, "solve_with_store5.out"), 'r') as out, open( + join(currdir, "solve_with_store4.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) # model.solutions.store_to(results, cuid=True) buf = pickle.dumps(results) results_ = pickle.loads(buf) model.solutions.load_from(results_) model.solutions.store_to(results_) - results_.write(filename=join(currdir,'solve_with_store6.out'), - format='json') - with open(join(currdir,"solve_with_store6.out"), 'r') as out, \ - open(join(currdir,"solve_with_store4.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results_.write(filename=join(currdir, 'solve_with_store6.out'), format='json') + with open(join(currdir, "solve_with_store6.out"), 'r') as out, open( + join(currdir, "solve_with_store4.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) # # Load results with string indices # tmodel = ConcreteModel() - tmodel.A = RangeSet(1,4) + tmodel.A = RangeSet(1, 4) tmodel.b = Block() - tmodel.b.x = Var(tmodel.A, bounds=(-1,1)) + tmodel.b.x = Var(tmodel.A, bounds=(-1, 1)) tmodel.b.obj = Objective(expr=sum_product(tmodel.b.x)) tmodel.c = Constraint(expr=tmodel.b.x[1] >= 0) self.assertEqual(len(tmodel.solutions), 0) tmodel.solutions.load_from(results) self.assertEqual(len(tmodel.solutions), 1) tmodel.solutions.store_to(results) - results.write(filename=join(currdir,'solve_with_store7.out'), - format='json') - with open(join(currdir,"solve_with_store7.out"), 'r') as out, \ - open(join(currdir,"solve_with_store4.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store7.out'), format='json') + with open(join(currdir, "solve_with_store7.out"), 'r') as out, open( + join(currdir, "solve_with_store4.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") @unittest.skipIf(not yaml_available, "YAML not available available") def test_solve_with_store4(self): model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) opt = SolverFactory('glpk') @@ -799,21 +872,21 @@ def test_solve_with_store4(self): self.assertEqual(len(results.solution), 1) # model.solutions.store_to(results) - results.write(filename=join(currdir,'solve_with_store8.out'), - format='json') - with open(join(currdir,"solve_with_store8.out"), 'r') as out, \ - open(join(currdir,"solve_with_store4.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + results.write(filename=join(currdir, 'solve_with_store8.out'), format='json') + with open(join(currdir, "solve_with_store8.out"), 'r') as out, open( + join(currdir, "solve_with_store4.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) @unittest.skipIf('glpk' not in solvers, "glpk solver is not available") @unittest.skipIf(not yaml_available, "YAML not available available") def test_solve_with_store5(self): model = ConcreteModel() - model.A = RangeSet(1,4) + model.A = RangeSet(1, 4) model.b = Block() - model.b.x = Var(model.A, bounds=(-1,1)) + model.b.x = Var(model.A, bounds=(-1, 1)) model.b.obj = Objective(expr=sum_product(model.b.x)) model.c = Constraint(expr=model.b.x[1] >= 0) @@ -827,64 +900,70 @@ def test_solve_with_store5(self): self.assertEqual(len(results.solution), 1) # model.solutions.store_to(results) - results.write(filename=join(currdir,'solve_with_store8.out'), - format='json') - with open(join(currdir,"solve_with_store8.out"), 'r') as out, \ - open(join(currdir,"solve_with_store4.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) - + results.write(filename=join(currdir, 'solve_with_store8.out'), format='json') + with open(join(currdir, "solve_with_store8.out"), 'r') as out, open( + join(currdir, "solve_with_store4.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) def test_create_concrete_from_rule(self): def make(m): m.I = RangeSet(3) m.x = Var(m.I) - m.c = Constraint( expr=sum(m.x[i] for i in m.I) >= 0 ) - model = ConcreteModel(rule=make) - self.assertEqual( [x.local_name for x in model.component_objects()], - ['I','x','c'] ) - self.assertEqual( len(list(EXPR.identify_variables(model.c.body))), 3 ) + m.c = Constraint(expr=sum(m.x[i] for i in m.I) >= 0) + model = ConcreteModel(rule=make) + self.assertEqual( + [x.local_name for x in model.component_objects()], ['I', 'x', 'c'] + ) + self.assertEqual(len(list(EXPR.identify_variables(model.c.body))), 3) def test_create_abstract_from_rule(self): def make_invalid(m): m.I = RangeSet(3) m.x = Var(m.I) - m.c = Constraint( expr=sum(m.x[i] for i in m.I) >= 0 ) + m.c = Constraint(expr=sum(m.x[i] for i in m.I) >= 0) def make(m): m.I = RangeSet(3) m.x = Var(m.I) + def c(b): return sum(m.x[i] for i in m.I) >= 0 - m.c = Constraint( rule=c ) + + m.c = Constraint(rule=c) with self.assertRaisesRegex( - ValueError, r'x\[1\]: The component has not been constructed.'): + ValueError, r'x\[1\]: The component has not been constructed.' + ): model = AbstractModel(rule=make_invalid) instance = model.create_instance() model = AbstractModel(rule=make) instance = model.create_instance() - self.assertEqual( [x.local_name for x in model.component_objects()], - [] ) - self.assertEqual( [x.local_name for x in instance.component_objects()], - ['I','x','c'] ) - self.assertEqual( len(list(EXPR.identify_variables(instance.c.body))), 3 ) + self.assertEqual([x.local_name for x in model.component_objects()], []) + self.assertEqual( + [x.local_name for x in instance.component_objects()], ['I', 'x', 'c'] + ) + self.assertEqual(len(list(EXPR.identify_variables(instance.c.body))), 3) model = AbstractModel(rule=make) model.y = Var() instance = model.create_instance() - self.assertEqual( [x.local_name for x in instance.component_objects()], - ['y','I','x','c'] ) - self.assertEqual( len(list(EXPR.identify_variables(instance.c.body))), 3 ) + self.assertEqual( + [x.local_name for x in instance.component_objects()], ['y', 'I', 'x', 'c'] + ) + self.assertEqual(len(list(EXPR.identify_variables(instance.c.body))), 3) def test_error_creating_model_baseclass(self): with self.assertRaisesRegex( - TypeError, "Directly creating the 'Model' class is not allowed. Please use the AbstractModel or ConcreteModel class instead."): + TypeError, + "Directly creating the 'Model' class is not allowed. Please use the AbstractModel or ConcreteModel class instead.", + ): m = Model() + if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/unit/test_mutable.py b/pyomo/core/tests/unit/test_mutable.py index 8b1ea3bf655..933ef1fe3dc 100644 --- a/pyomo/core/tests/unit/test_mutable.py +++ b/pyomo/core/tests/unit/test_mutable.py @@ -17,12 +17,14 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.environ import AbstractModel, Param, Var, Constraint, value + class TestMutable(unittest.TestCase): def test_mutable_constraint_upper(self): model = AbstractModel() @@ -31,6 +33,7 @@ def test_mutable_constraint_upper(self): def constraint_rule(m): return m.X <= m.P + model.C = Constraint(rule=constraint_rule) instance = model.create_instance() @@ -41,7 +44,6 @@ def constraint_rule(m): self.assertEqual(value(instance.C.upper), 4.0) - def test_mutable_constraint_lower(self): model = AbstractModel() model.Q = Param(initialize=2.0, mutable=True) @@ -49,6 +51,7 @@ def test_mutable_constraint_lower(self): def constraint_rule(m): return m.X >= m.Q + model.C = Constraint(rule=constraint_rule) instance = model.create_instance() @@ -59,7 +62,6 @@ def constraint_rule(m): self.assertEqual(value(instance.C.lower), 4.0) - def test_mutable_constraint_both(self): model = AbstractModel() model.P = Param(initialize=4.0, mutable=True) @@ -68,6 +70,7 @@ def test_mutable_constraint_both(self): def constraint_rule(m): return (m.Q, m.X, m.P) + model.C = Constraint(rule=constraint_rule) instance = model.create_instance() @@ -81,12 +84,10 @@ def constraint_rule(m): self.assertEqual(value(instance.C.lower), 1.0) self.assertEqual(value(instance.C.upper), 8.0) - - def test_mutable_var_bounds_lower(self): model = AbstractModel() model.P = Param(initialize=2.0, mutable=True) - model.X = Var(bounds=(model.P,None)) + model.X = Var(bounds=(model.P, None)) instance = model.create_instance() @@ -96,11 +97,10 @@ def test_mutable_var_bounds_lower(self): self.assertEqual(instance.X.bounds, (4.0, None)) - def test_mutable_var_bounds_upper(self): model = AbstractModel() model.Q = Param(initialize=2.0, mutable=True) - model.X = Var(bounds=(model.Q,None)) + model.X = Var(bounds=(model.Q, None)) instance = model.create_instance() @@ -110,12 +110,11 @@ def test_mutable_var_bounds_upper(self): self.assertEqual(instance.X.bounds, (4.0, None)) - def test_mutable_var_bounds_both(self): model = AbstractModel() model.P = Param(initialize=4.0, mutable=True) model.Q = Param(initialize=2.0, mutable=True) - model.X = Var(bounds=(model.P,model.Q)) + model.X = Var(bounds=(model.P, model.Q)) instance = model.create_instance() @@ -128,6 +127,6 @@ def test_mutable_var_bounds_both(self): self.assertEqual(value(instance.X.lb), 8.0) self.assertEqual(value(instance.X.ub), 1.0) + if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/unit/test_numeric_expr.py b/pyomo/core/tests/unit/test_numeric_expr.py index ae8947c3d6c..13af5adc9bb 100644 --- a/pyomo/core/tests/unit/test_numeric_expr.py +++ b/pyomo/core/tests/unit/test_numeric_expr.py @@ -19,7 +19,8 @@ from collections import defaultdict from os.path import abspath, dirname, join -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep from filecmp import cmp import pyomo.common.unittest as unittest @@ -27,33 +28,91 @@ from io import StringIO from pyomo.environ import ( - ConcreteModel, AbstractModel, RangeSet, Var, Param, Set, Constraint, - ConstraintList, Expression, Objective, Reals, ExternalFunction, - PositiveReals, log10, exp, floor, ceil, log, cos, sin, tan, acos, - asin, atan, sinh, cosh, tanh, acosh, asinh, atanh, sqrt, value, - quicksum, sum_product, is_fixed, is_constant + ConcreteModel, + AbstractModel, + RangeSet, + Var, + Param, + Set, + Constraint, + ConstraintList, + Expression, + Objective, + Reals, + ExternalFunction, + PositiveReals, + log10, + exp, + floor, + ceil, + log, + cos, + sin, + tan, + acos, + asin, + atan, + sinh, + cosh, + tanh, + acosh, + asinh, + atanh, + sqrt, + value, + quicksum, + sum_product, + is_fixed, + is_constant, ) from pyomo.kernel import variable, expression, objective +from pyomo.core.expr.expr_common import ExpressionType, clone_counter from pyomo.core.expr.numvalue import ( - NumericConstant, as_numeric, native_numeric_types, - is_potentially_variable, polynomial_degree + NumericConstant, + as_numeric, + native_numeric_types, + native_types, + is_potentially_variable, + polynomial_degree, ) +from pyomo.core.expr.base import ExpressionBase from pyomo.core.expr.numeric_expr import ( - ExpressionBase, UnaryFunctionExpression, SumExpression, PowExpression, - ProductExpression, NegationExpression, linear_expression, - MonomialTermExpression, LinearExpression, DivisionExpression, - NPV_NegationExpression, NPV_ProductExpression, - NPV_PowExpression, NPV_DivisionExpression, - decompose_term, clone_counter, nonlinear_expression, - _MutableLinearExpression, _MutableSumExpression, _decompose_linear_terms, - LinearDecompositionError, MaxExpression, MinExpression, + NumericExpression, + UnaryFunctionExpression, + SumExpression, + PowExpression, + ProductExpression, + NegationExpression, + linear_expression, + MonomialTermExpression, + LinearExpression, + DivisionExpression, + NPV_NegationExpression, + NPV_ProductExpression, + NPV_PowExpression, + NPV_DivisionExpression, + NPV_SumExpression, + decompose_term, + nonlinear_expression, + _MutableLinearExpression, + _MutableSumExpression, + _MutableNPVSumExpression, + _decompose_linear_terms, + LinearDecompositionError, + MaxExpression, + MinExpression, + _balanced_parens, +) +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, ) -import pyomo.core.expr.logical_expr as logical_expr +from pyomo.core.expr.relational_expr import RelationalExpression, EqualityExpression +from pyomo.core.expr.relational_expr import RelationalExpression, EqualityExpression from pyomo.common.errors import PyomoException -from pyomo.core.expr.visitor import (expression_to_string, - clone_expression) -from pyomo.core.expr.current import Expr_if +from pyomo.core.expr.visitor import expression_to_string, clone_expression +from pyomo.core.expr import Expr_if from pyomo.core.base.label import NumericLabeler from pyomo.core.expr.template_expr import IndexTemplate from pyomo.core.expr import expr_common @@ -97,13 +156,12 @@ def __eq__(self, other): class TestExpression_EvaluateNumericConstant(unittest.TestCase): - def create(self, val, domain): # Create the type of expression term that we are testing return NumericConstant(val) def value_check(self, exp, val): - """ Check the value of the expression. """ + """Check the value of the expression.""" # # Confirm whether 'exp' is an expression # @@ -121,74 +179,74 @@ def test_lt(self): # # Test the 'less than' operator # - a=self.create(1.3, Reals) - b=self.create(2.0, Reals) - self.relation_check(ab, False) - self.relation_check(a>a, False) - self.relation_check(b>a, True) - self.relation_check(a>2.0, False) - self.relation_check(a>1.3, False) - self.relation_check(b>1.3, True) - self.relation_check(1.3>b, False) - self.relation_check(1.3>a, False) - self.relation_check(2.0>a, True) + a = self.create(1.3, Reals) + b = self.create(2.0, Reals) + self.relation_check(a > b, False) + self.relation_check(a > a, False) + self.relation_check(b > a, True) + self.relation_check(a > 2.0, False) + self.relation_check(a > 1.3, False) + self.relation_check(b > 1.3, True) + self.relation_check(1.3 > b, False) + self.relation_check(1.3 > a, False) + self.relation_check(2.0 > a, True) def test_eq(self): # # Test the 'equals' operator # - a=self.create(1.3, Reals) - b=self.create(2.0, Reals) - self.relation_check(a==b, False) - self.relation_check(a==a, True) - self.relation_check(b==a, False) - self.relation_check(a==2.0, False) - self.relation_check(a==1.3, True) - self.relation_check(b==1.3, False) - self.relation_check(1.3==b, False) - self.relation_check(1.3==a, True) - self.relation_check(2.0==a, False) + a = self.create(1.3, Reals) + b = self.create(2.0, Reals) + self.relation_check(a == b, False) + self.relation_check(a == a, True) + self.relation_check(b == a, False) + self.relation_check(a == 2.0, False) + self.relation_check(a == 1.3, True) + self.relation_check(b == 1.3, False) + self.relation_check(1.3 == b, False) + self.relation_check(1.3 == a, True) + self.relation_check(2.0 == a, False) def test_arithmetic(self): # # # Test binary arithmetic operators # - a=self.create(-0.5, Reals) - b=self.create(2.0, Reals) - self.value_check(a-b, -2.5) - self.value_check(a+b, 1.5) - self.value_check(a*b, -1.0) - self.value_check(b/a, -4.0) + a = self.create(-0.5, Reals) + b = self.create(2.0, Reals) + self.value_check(a - b, -2.5) + self.value_check(a + b, 1.5) + self.value_check(a * b, -1.0) + self.value_check(b / a, -4.0) self.value_check(a**b, 0.25) - self.value_check(a-2.0, -2.5) - self.value_check(a+2.0, 1.5) - self.value_check(a*2.0, -1.0) - self.value_check(b/(0.5), 4.0) + self.value_check(a - 2.0, -2.5) + self.value_check(a + 2.0, 1.5) + self.value_check(a * 2.0, -1.0) + self.value_check(b / (0.5), 4.0) self.value_check(a**2.0, 0.25) - self.value_check(0.5-b, -1.5) - self.value_check(0.5+b, 2.5) - self.value_check(0.5*b, 1.0) - self.value_check(2.0/a, -4.0) - self.value_check((0.5)**b, 0.25) + self.value_check(0.5 - b, -1.5) + self.value_check(0.5 + b, 2.5) + self.value_check(0.5 * b, 1.0) + self.value_check(2.0 / a, -4.0) + self.value_check((0.5) ** b, 0.25) self.value_check(-a, 0.5) self.assertIs(+a, a) @@ -196,19 +254,18 @@ def test_arithmetic(self): class TestExpression_EvaluateNumericValue(TestExpression_EvaluateNumericConstant): - def create(self, val, domain): tmp = Var(name='unknown', initialize=val, domain=domain) tmp.construct() return tmp def relation_check(self, exp, val): - """ Check a relationship expression. """ + """Check a relationship expression.""" # # Confirm that this is a relational expression # - self.assertTrue(isinstance(exp, ExpressionBase)) - self.assertTrue(exp.is_relational()) + self.assertTrue(isinstance(exp, RelationalExpression)) + self.assertTrue(exp.is_expression_type(ExpressionType.RELATIONAL)) # # Check that the expression evaluates correctly # @@ -221,14 +278,14 @@ def relation_check(self, exp, val): # # Check that the expression evaluates to 'val' # - if isinstance(exp, logical_expr.EqualityExpression) and exp.args[0] is exp.args[1]: + if isinstance(exp, EqualityExpression) and exp.args[0] is exp.args[1]: self.assertEqual(bool(exp), val) else: with self.assertRaises(PyomoException): bool(exp) def value_check(self, exp, val): - """ Check the value of the expression. """ + """Check the value of the expression.""" # # Confirm whether 'exp' is an expression # @@ -240,51 +297,45 @@ def value_check(self, exp, val): class TestExpression_EvaluateVarData(TestExpression_EvaluateNumericValue): - def create(self, val, domain): - tmp=_GeneralVarData() + tmp = _GeneralVarData() tmp.domain = domain - tmp.value=val + tmp.value = val return tmp class TestExpression_EvaluateVar(TestExpression_EvaluateNumericValue): - def create(self, val, domain): - tmp=Var(name="unknown",domain=domain) + tmp = Var(name="unknown", domain=domain) tmp.construct() - tmp.value=val + tmp.value = val return tmp class TestExpression_EvaluateFixedVar(TestExpression_EvaluateNumericValue): - def create(self, val, domain): - tmp=Var(name="unknown", domain=domain) + tmp = Var(name="unknown", domain=domain) tmp.construct() - tmp.fixed=True - tmp.value=val + tmp.fixed = True + tmp.value = val return tmp class TestExpression_EvaluateImmutableParam(TestExpression_EvaluateNumericConstant): - def create(self, val, domain): - tmp=Param(default=val, mutable=False, within=domain) + tmp = Param(default=val, mutable=False, within=domain) tmp.construct() return tmp class TestExpression_Evaluate_MutableParam(TestExpression_EvaluateNumericValue): - def create(self, val, domain): - tmp=Param(default=val, mutable=True, within=domain) + tmp = Param(default=val, mutable=True, within=domain) tmp.construct() return tmp class TestExpression_Intrinsic(unittest.TestCase): - def test_abs_numval(self): e = abs(1.5) self.assertAlmostEqual(value(e), 1.5) @@ -392,7 +443,7 @@ def test_pow(self): m = ConcreteModel() m.v = Var() m.p = Param(mutable=True) - e = pow(m.v,m.p) + e = pow(m.v, m.p) self.assertEqual(e.__class__, PowExpression) m.v.value = 2 m.p.value = 0 @@ -418,7 +469,7 @@ def test_sin(self): self.assertEqual(e.__class__, UnaryFunctionExpression) m.v.value = 0 self.assertAlmostEqual(value(e), 0.0) - m.v.value = math.pi/2.0 + m.v.value = math.pi / 2.0 self.assertAlmostEqual(value(e), 1.0) def test_cos(self): @@ -428,7 +479,7 @@ def test_cos(self): self.assertEqual(e.__class__, UnaryFunctionExpression) m.v.value = 0 self.assertAlmostEqual(value(e), 1.0) - m.v.value = math.pi/2.0 + m.v.value = math.pi / 2.0 self.assertAlmostEqual(value(e), 0.0) def test_tan(self): @@ -438,7 +489,7 @@ def test_tan(self): self.assertEqual(e.__class__, UnaryFunctionExpression) m.v.value = 0 self.assertAlmostEqual(value(e), 0.0) - m.v.value = math.pi/4.0 + m.v.value = math.pi / 4.0 self.assertAlmostEqual(value(e), 1.0) def test_asin(self): @@ -449,7 +500,7 @@ def test_asin(self): m.v.value = 0 self.assertAlmostEqual(value(e), 0.0) m.v.value = 1.0 - self.assertAlmostEqual(value(e), math.pi/2.0) + self.assertAlmostEqual(value(e), math.pi / 2.0) def test_acos(self): m = ConcreteModel() @@ -458,8 +509,8 @@ def test_acos(self): self.assertEqual(e.__class__, UnaryFunctionExpression) m.v.value = 1.0 self.assertAlmostEqual(value(e), 0.0) - m.v.value = 0.0 - self.assertAlmostEqual(value(e), math.pi/2.0) + m.v.value = 0.0 + self.assertAlmostEqual(value(e), math.pi / 2.0) def test_atan(self): m = ConcreteModel() @@ -469,7 +520,7 @@ def test_atan(self): m.v.value = 0 self.assertAlmostEqual(value(e), 0.0) m.v.value = 1.0 - self.assertAlmostEqual(value(e), math.pi/4.0) + self.assertAlmostEqual(value(e), math.pi / 4.0) def test_sinh(self): m = ConcreteModel() @@ -479,7 +530,7 @@ def test_sinh(self): m.v.value = 0.0 self.assertAlmostEqual(value(e), 0.0) m.v.value = 1.0 - self.assertAlmostEqual(value(e), (math.e-1.0/math.e)/2.0) + self.assertAlmostEqual(value(e), (math.e - 1.0 / math.e) / 2.0) def test_cosh(self): m = ConcreteModel() @@ -489,7 +540,7 @@ def test_cosh(self): m.v.value = 0.0 self.assertAlmostEqual(value(e), 1.0) m.v.value = 1.0 - self.assertAlmostEqual(value(e), (math.e+1.0/math.e)/2.0) + self.assertAlmostEqual(value(e), (math.e + 1.0 / math.e) / 2.0) def test_tanh(self): m = ConcreteModel() @@ -499,7 +550,9 @@ def test_tanh(self): m.v.value = 0.0 self.assertAlmostEqual(value(e), 0.0) m.v.value = 1.0 - self.assertAlmostEqual(value(e), (math.e-1.0/math.e)/(math.e+1.0/math.e)) + self.assertAlmostEqual( + value(e), (math.e - 1.0 / math.e) / (math.e + 1.0 / math.e) + ) def test_asinh(self): m = ConcreteModel() @@ -508,7 +561,7 @@ def test_asinh(self): self.assertEqual(e.__class__, UnaryFunctionExpression) m.v.value = 0.0 self.assertAlmostEqual(value(e), 0.0) - m.v.value = (math.e-1.0/math.e)/2.0 + m.v.value = (math.e - 1.0 / math.e) / 2.0 self.assertAlmostEqual(value(e), 1.0) def test_acosh(self): @@ -518,7 +571,7 @@ def test_acosh(self): self.assertEqual(e.__class__, UnaryFunctionExpression) m.v.value = 1.0 self.assertAlmostEqual(value(e), 0.0) - m.v.value = (math.e+1.0/math.e)/2.0 + m.v.value = (math.e + 1.0 / math.e) / 2.0 self.assertAlmostEqual(value(e), 1.0) def test_atanh(self): @@ -528,12 +581,11 @@ def test_atanh(self): self.assertEqual(e.__class__, UnaryFunctionExpression) m.v.value = 0.0 self.assertAlmostEqual(value(e), 0.0) - m.v.value = (math.e-1.0/math.e)/(math.e+1.0/math.e) + m.v.value = (math.e - 1.0 / math.e) / (math.e + 1.0 / math.e) self.assertAlmostEqual(value(e), 1.0) class TestNumericValue(unittest.TestCase): - def test_asnum(self): try: as_numeric(None) @@ -547,9 +599,9 @@ def test_vals(self): # a = NumericConstant(1.1) b = float(value(a)) - self.assertEqual(b,1.1) + self.assertEqual(b, 1.1) b = int(value(a)) - self.assertEqual(b,1) + self.assertEqual(b, 1) def test_ops(self): # @@ -558,15 +610,15 @@ def test_ops(self): a = NumericConstant(1.1) b = NumericConstant(2.2) c = NumericConstant(-2.2) - #a <= b + # a <= b self.assertEqual(a() <= b(), True) self.assertEqual(a() >= b(), False) self.assertEqual(a() == b(), False) - self.assertEqual(abs(a() + b()-3.3) <= 1e-7, True) - self.assertEqual(abs(b() - a()-1.1) <= 1e-7, True) - self.assertEqual(abs(b() * 3-6.6) <= 1e-7, True) - self.assertEqual(abs(b() / 2-1.1) <= 1e-7, True) - self.assertEqual(abs(abs(-b())-2.2) <= 1e-7, True) + self.assertEqual(abs(a() + b() - 3.3) <= 1e-7, True) + self.assertEqual(abs(b() - a() - 1.1) <= 1e-7, True) + self.assertEqual(abs(b() * 3 - 6.6) <= 1e-7, True) + self.assertEqual(abs(b() / 2 - 1.1) <= 1e-7, True) + self.assertEqual(abs(abs(-b()) - 2.2) <= 1e-7, True) self.assertEqual(abs(c()), 2.2) # # Check that we can get the string representation for a numeric @@ -583,7 +635,6 @@ def test_var(self): class TestGenerate_SumExpression(unittest.TestCase): - def test_simpleSum(self): # a + b m = AbstractModel() @@ -591,46 +642,46 @@ def test_simpleSum(self): m.b = Var() e = m.a + m.b # - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), m.b) - self.assertEqual(e.size(), 3) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((1, m.a)), MonomialTermExpression((1, m.b))] + ), + ) self.assertRaises(KeyError, e.arg, 3) - self.assertIs(e.arg(-1), m.b) def test_simpleSum_API(self): m = ConcreteModel() m.a = Var() m.b = Var() e = m.a + m.b - e += (2*m.a) - self.assertIs(e.nargs(), 3) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), m.b) - self.assertIs(type(e.arg(2)), MonomialTermExpression) - self.assertEqual(id(e.arg(-1)), id(e.arg(2))) + e += 2 * m.a + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + MonomialTermExpression((2, m.a)), + ] + ), + ) def test_constSum(self): # a + 5 m = AbstractModel() m.a = Var() - e = m.a + 5 # - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), 5) - self.assertEqual(e.size(), 3) + assertExpressionsEqual( + self, m.a + 5, LinearExpression([MonomialTermExpression((1, m.a)), 5]) + ) - e = 5 + m.a - # - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), 5) - self.assertIs(e.arg(1), m.a) - self.assertEqual(e.size(), 3) + assertExpressionsEqual( + self, 5 + m.a, LinearExpression([5, MonomialTermExpression((1, m.a))]) + ) def test_nestedSum(self): # @@ -651,28 +702,28 @@ def test_nestedSum(self): # a b e1 = m.a + m.b e = e1 + 5 - # - self.assertIs(type(e), expectedType) - self.assertEqual(e.nargs(), 3) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), m.b) - self.assertIs(e.arg(2), 5) - self.assertEqual(e.size(), 4) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((1, m.a)), MonomialTermExpression((1, m.b)), 5] + ), + ) - # + - # / \ + # + + # / \ # 5 + # / \ # a b e1 = m.a + m.b e = 5 + e1 - # - self.assertIs(type(e), expectedType) - self.assertEqual(e.nargs(), 3) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), m.b) - self.assertIs(e.arg(2), 5) - self.assertEqual(e.size(), 4) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((1, m.a)), MonomialTermExpression((1, m.b)), 5] + ), + ) # + # / \ @@ -681,28 +732,36 @@ def test_nestedSum(self): # a b e1 = m.a + m.b e = e1 + m.c - # - self.assertIs(type(e), expectedType) - self.assertEqual(e.nargs(), 3) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), m.b) - self.assertIs(e.arg(2), m.c) - self.assertEqual(e.size(), 4) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + MonomialTermExpression((1, m.c)), + ] + ), + ) - # + - # / \ + # + + # / \ # c + # / \ # a b e1 = m.a + m.b e = m.c + e1 - # - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 3) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), m.b) - self.assertIs(e.arg(2), m.c) - self.assertEqual(e.size(), 4) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + MonomialTermExpression((1, m.c)), + ] + ), + ) # + # / \ @@ -713,13 +772,18 @@ def test_nestedSum(self): e2 = m.c + m.d e = e1 + e2 # - self.assertIs(type(e), expectedType) - self.assertEqual(e.nargs(), 4) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1), m.b) - self.assertIs(e.arg(2), m.c) - self.assertIs(e.arg(3), m.d) - self.assertEqual(e.size(), 5) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + MonomialTermExpression((1, m.c)), + MonomialTermExpression((1, m.d)), + ] + ), + ) def test_nestedSum2(self): # @@ -739,15 +803,30 @@ def test_nestedSum2(self): # / \ # 2 + # / \ - # a b + # a b e1 = m.a + m.b - e = 2*e1 + m.c - - self.assertIs(type(e), expectedType) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0).arg(1), e1) - self.assertIs(e.arg(1), m.c) - self.assertEqual(e.size(), 7) + e = 2 * e1 + m.c + + assertExpressionsEqual( + self, + e, + SumExpression( + [ + ProductExpression( + ( + 2, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + ] + ), + ) + ), + m.c, + ] + ), + ) # * # / \ @@ -757,15 +836,35 @@ def test_nestedSum2(self): # / \ # 2 + # / \ - # a b + # a b e1 = m.a + m.b - e = 3*(2*e1 + m.c) - - self.assertIs(type(e.arg(1)), expectedType) - self.assertEqual(e.arg(1).nargs(), 2) - self.assertIs(e.arg(1).arg(0).arg(1), e1) - self.assertIs(e.arg(1).arg(1), m.c) - self.assertEqual(e.size(), 9) + e = 3 * (2 * e1 + m.c) + + assertExpressionsEqual( + self, + e, + ProductExpression( + ( + 3, + SumExpression( + [ + ProductExpression( + ( + 2, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + ] + ), + ) + ), + m.c, + ] + ), + ) + ), + ) def test_trivialSum(self): # @@ -804,12 +903,13 @@ def test_sumOf_nestedTrivialProduct(self): e1 = m.a * 5 e = e1 + m.b # - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0).arg(0), 5) - self.assertIs(e.arg(0).arg(1), m.a) - self.assertIs(e.arg(1), m.b) - self.assertEqual(e.size(), 5) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((5, m.a)), MonomialTermExpression((1, m.b))] + ), + ) # + # / \ @@ -818,12 +918,13 @@ def test_sumOf_nestedTrivialProduct(self): # a 5 e = m.b + e1 # - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.b) - self.assertIs(e.arg(1).arg(0), 5) - self.assertIs(e.arg(1).arg(1), m.a) - self.assertEqual(e.size(), 5) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((1, m.b)), MonomialTermExpression((5, m.a))] + ), + ) # + # / \ @@ -833,13 +934,17 @@ def test_sumOf_nestedTrivialProduct(self): e2 = m.b + m.c e = e1 + e2 # - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 3) - self.assertIs(e.arg(0), m.b) - self.assertIs(e.arg(1), m.c) - self.assertIs(e.arg(2).arg(0), 5) - self.assertIs(e.arg(2).arg(1), m.a) - self.assertEqual(e.size(), 6) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.b)), + MonomialTermExpression((1, m.c)), + MonomialTermExpression((5, m.a)), + ] + ), + ) # + # / \ @@ -848,14 +953,18 @@ def test_sumOf_nestedTrivialProduct(self): # b c a 5 e2 = m.b + m.c e = e2 + e1 - - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 3) - self.assertIs(e.arg(0), m.b) - self.assertIs(e.arg(1), m.c) - self.assertIs(e.arg(2).arg(0), 5) - self.assertIs(e.arg(2).arg(1), m.a) - self.assertEqual(e.size(), 6) + # + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.b)), + MonomialTermExpression((1, m.c)), + MonomialTermExpression((5, m.a)), + ] + ), + ) def test_simpleDiff(self): # @@ -869,12 +978,13 @@ def test_simpleDiff(self): # / \ # a b e = m.a - m.b - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.a) - self.assertIs(type(e.arg(1)), MonomialTermExpression) - self.assertEqual(e.arg(1).arg(0), -1) - self.assertIs(e.arg(1).arg(1), m.b) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((1, m.a)), MonomialTermExpression((-1, m.b))] + ), + ) def test_constDiff(self): # @@ -886,24 +996,16 @@ def test_constDiff(self): # - # / \ # a 5 - e = m.a - 5 - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.a) - self.assertEqual(e.arg(1), -5) - self.assertEqual(e.size(), 3) + assertExpressionsEqual( + self, m.a - 5, LinearExpression([MonomialTermExpression((1, m.a)), -5]) + ) # - # / \ # 5 a - e = 5 - m.a - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), 5) - self.assertIs(type(e.arg(1)), MonomialTermExpression) - self.assertIs(e.arg(1).arg(0), -1) - self.assertIs(e.arg(1).arg(1), m.a) - self.assertEqual(e.size(), 5) + assertExpressionsEqual( + self, 5 - m.a, LinearExpression([5, MonomialTermExpression((-1, m.a))]) + ) def test_paramDiff(self): # @@ -917,24 +1019,21 @@ def test_paramDiff(self): # / \ # a p e = m.a - m.p - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.a) - self.assertIs(type(e.arg(1)), NPV_NegationExpression) - self.assertIs(e.arg(1).arg(0), m.p) - self.assertEqual(e.size(), 4) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((1, m.a)), NPV_NegationExpression((m.p,))] + ), + ) # - # / \ # m.p a e = m.p - m.a - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.p) - self.assertIs(type(e.arg(1)), MonomialTermExpression) - self.assertEqual(e.arg(1).arg(0), -1) - self.assertIs(e.arg(1).arg(1), m.a) - self.assertEqual(e.size(), 5) + assertExpressionsEqual( + self, e, LinearExpression([m.p, MonomialTermExpression((-1, m.a))]) + ) def test_constparamDiff(self): # @@ -975,13 +1074,11 @@ def test_termDiff(self): # 2 a # - e = 5 - 2*m.a + e = 5 - 2 * m.a - self.assertIs(type(e), SumExpression) - self.assertEqual(e.arg(0), 5) - self.assertIs(type(e.arg(1)), MonomialTermExpression) - self.assertEqual(e.arg(1).arg(0), -2) - self.assertIs(e.arg(1).arg(1), m.a) + assertExpressionsEqual( + self, e, LinearExpression([5, MonomialTermExpression((-2, m.a))]) + ) def test_nestedDiff(self): # @@ -1000,13 +1097,17 @@ def test_nestedDiff(self): # a b e1 = m.a - m.b e = e1 - 5 - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1).__class__, MonomialTermExpression) - self.assertIs(e.arg(1).arg(0), -1) - self.assertIs(e.arg(1).arg(1), m.b) - self.assertIs(e.arg(2), -5) - self.assertEqual(e.size(), 6) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((-1, m.b)), + -5, + ] + ), + ) # - # / \ @@ -1015,11 +1116,25 @@ def test_nestedDiff(self): # a b e1 = m.a - m.b e = 5 - e1 - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), 5) - self.assertIs(type(e.arg(1)), NegationExpression) - self.assertIs(e.arg(1).arg(0), e1) - self.assertEqual(e.size(), 8) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + 5, + NegationExpression( + ( + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((-1, m.b)), + ] + ), + ) + ), + ] + ), + ) # - # / \ @@ -1028,15 +1143,17 @@ def test_nestedDiff(self): # a b e1 = m.a - m.b e = e1 - m.c - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1).__class__, MonomialTermExpression) - self.assertEqual(e.arg(1).arg(0), -1) - self.assertIs(e.arg(1).arg(1), m.b) - self.assertIs(type(e.arg(2)), MonomialTermExpression) - self.assertEqual(e.arg(2).arg(0), -1) - self.assertIs(e.arg(2).arg(1), m.c) - self.assertEqual(e.size(), 8) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((-1, m.b)), + MonomialTermExpression((-1, m.c)), + ] + ), + ) # - # / \ @@ -1045,11 +1162,25 @@ def test_nestedDiff(self): # a b e1 = m.a - m.b e = m.c - e1 - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), m.c) - self.assertIs(type(e.arg(1)), NegationExpression) - self.assertIs(e.arg(1).arg(0), e1) - self.assertEqual(e.size(), 8) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + m.c, + NegationExpression( + ( + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((-1, m.b)), + ] + ), + ) + ), + ] + ), + ) # - # / \ @@ -1059,11 +1190,30 @@ def test_nestedDiff(self): e1 = m.a - m.b e2 = m.c - m.d e = e1 - e2 - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), m.a) - self.assertIs(e.arg(1).arg(1), m.b) - self.assertIs(e.arg(2).arg(0), e2) - self.assertEqual(e.size(), 11) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((-1, m.b)), + ] + ), + NegationExpression( + ( + LinearExpression( + [ + MonomialTermExpression((1, m.c)), + MonomialTermExpression((-1, m.d)), + ] + ), + ) + ), + ] + ), + ) def test_negation_param(self): # @@ -1071,9 +1221,9 @@ def test_negation_param(self): # m = AbstractModel() m.p = Param() - e = - m.p + e = -m.p self.assertIs(type(e), NPV_NegationExpression) - e = - e + e = -e self.assertTrue(isinstance(e, Param)) def test_negation_mutableparam(self): @@ -1082,10 +1232,9 @@ def test_negation_mutableparam(self): # m = AbstractModel() m.p = Param(mutable=True, initialize=1.0) - e = - m.p - self.assertIs(type(e), NPV_NegationExpression) - e = - e - self.assertTrue(isinstance(e, Param)) + e = -m.p + assertExpressionsEqual(self, e, NPV_NegationExpression((m.p,))) + assertExpressionsEqual(self, -e, m.p) def test_negation_terms(self): # @@ -1094,19 +1243,16 @@ def test_negation_terms(self): m = AbstractModel() m.v = Var() m.p = Param(mutable=True, initialize=1.0) - e = - m.p*m.v - self.assertIs(type(e), MonomialTermExpression) - self.assertIs(type(e.arg(0)), NPV_NegationExpression) - e = - e - self.assertIs(type(e), MonomialTermExpression) - self.assertIs(type(e.arg(0)), NPV_NegationExpression) + e = -m.p * m.v + assertExpressionsEqual( + self, e, MonomialTermExpression((NPV_NegationExpression((m.p,)), m.v)) + ) + assertExpressionsEqual(self, -e, MonomialTermExpression((m.p, m.v))) + # - e = - 5*m.v - self.assertIs(type(e), MonomialTermExpression) - self.assertEqual(e.arg(0), -5) - e = - e - self.assertIs(type(e), MonomialTermExpression) - self.assertEqual(e.arg(0), 5) + e = -5 * m.v + assertExpressionsEqual(self, e, MonomialTermExpression((-5, m.v))) + assertExpressionsEqual(self, -e, MonomialTermExpression((5, m.v))) def test_trivialDiff(self): # @@ -1140,20 +1286,20 @@ def test_trivialDiff(self): self.assertIs(e.arg(0), m.p) # 0 - 5*a - e = 0 - 5*m.a + e = 0 - 5 * m.a self.assertIs(type(e), MonomialTermExpression) self.assertEqual(e.nargs(), 2) self.assertEqual(e.arg(0), -5) # 0 - p*a - e = 0 - m.p*m.a + e = 0 - m.p * m.a self.assertIs(type(e), MonomialTermExpression) self.assertEqual(e.nargs(), 2) self.assertIs(type(e.arg(0)), NPV_NegationExpression) self.assertIs(e.arg(0).arg(0), m.p) # 0 - a*a - e = 0 - m.a*m.a + e = 0 - m.a * m.a self.assertIs(type(e), NegationExpression) self.assertEqual(e.nargs(), 1) self.assertIs(type(e.arg(0)), ProductExpression) @@ -1243,12 +1389,13 @@ def test_sumOf_nestedTrivialProduct2(self): # a 5 e1 = m.a * m.p e = e1 - m.b - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), e1) - self.assertIs(type(e.arg(1)), MonomialTermExpression) - self.assertEqual(e.arg(1).arg(0), -1) - self.assertIs(e.arg(1).arg(1), m.b) - self.assertEqual(e.size(), 7) + assertExpressionsEqual( + self, + e, + LinearExpression( + [MonomialTermExpression((m.p, m.a)), MonomialTermExpression((-1, m.b))] + ), + ) # - # / \ @@ -1257,14 +1404,16 @@ def test_sumOf_nestedTrivialProduct2(self): # a 5 e1 = m.a * m.p e = m.b - e1 - self.assertIs(type(e), SumExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), m.b) - self.assertIs(type(e.arg(1)), MonomialTermExpression) - self.assertIs(type(e.arg(1).arg(0)), NPV_NegationExpression) - self.assertIs(e.arg(1).arg(0).arg(0), m.p) - self.assertIs(e.arg(1).arg(1), m.a) - self.assertEqual(e.size(), 6) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.b)), + MonomialTermExpression((NPV_NegationExpression((m.p,)), m.a)), + ] + ), + ) # - # / \ @@ -1274,11 +1423,25 @@ def test_sumOf_nestedTrivialProduct2(self): e1 = m.a * m.p e2 = m.b - m.c e = e1 - e2 - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), e1) - self.assertIs(type(e.arg(1)), NegationExpression) - self.assertIs(e.arg(1).arg(0), e2) - self.assertEqual(e.size(), 10) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + MonomialTermExpression((m.p, m.a)), + NegationExpression( + ( + LinearExpression( + [ + MonomialTermExpression((1, m.b)), + MonomialTermExpression((-1, m.c)), + ] + ), + ) + ), + ] + ), + ) # - # / \ @@ -1288,20 +1451,21 @@ def test_sumOf_nestedTrivialProduct2(self): e1 = m.a * m.p e2 = m.b - m.c e = e2 - e1 - self.assertIs(type(e), SumExpression) - self.assertIs(e.arg(0), m.b) - self.assertIs(type(e.arg(1)), MonomialTermExpression) - self.assertEqual(e.arg(1).arg(0), -1) - self.assertIs(e.arg(1).arg(1), m.c) - self.assertIs(type(e.arg(2)), MonomialTermExpression) - self.assertIs(type(e.arg(2).arg(0)), NPV_NegationExpression) - self.assertIs(e.arg(2).arg(0).arg(0), m.p) - self.assertIs(e.arg(2).arg(1), m.a) - self.assertEqual(e.size(), 9) + self.maxDiff = None + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, m.b)), + MonomialTermExpression((-1, m.c)), + MonomialTermExpression((NPV_NegationExpression((m.p,)), m.a)), + ] + ), + ) class TestGenerate_ProductExpression(unittest.TestCase): - def test_simpleProduct(self): # # Check the structure of a simple product of variables @@ -1446,7 +1610,7 @@ def test_nestedProduct2(self): m.d = Var() # - # Check the structure of nested products + # Check the structure of nested sums # # * # / \ @@ -1460,19 +1624,34 @@ def test_nestedProduct2(self): e3 = e1 + m.d e = e2 * e3 - self.assertIs(type(e), ProductExpression) - self.assertEqual(e.nargs(), 2) - - self.assertIs(type(e.arg(0)), SumExpression) - self.assertIs(e.arg(0).nargs(), 3) - self.assertIs(e.arg(0).arg(0), m.a) - self.assertIs(e.arg(0).arg(1), m.b) - self.assertIs(e.arg(0).arg(2), m.c) - - self.assertIs(type(e.arg(1)), SumExpression) - self.assertIs(e.arg(1).nargs(), 2) - self.assertIs(e.arg(1).arg(1), m.d) - self.assertEqual(e.size(), 10) + assertExpressionsEqual( + self, + e, + ProductExpression( + ( + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + MonomialTermExpression((1, m.c)), + ] + ), + LinearExpression( + [ + MonomialTermExpression((1, m.a)), + MonomialTermExpression((1, m.b)), + MonomialTermExpression((1, m.d)), + ] + ), + ) + ), + ) + # Verify shared args... + self.assertIsNot(e1._args_, e2._args_) + self.assertIs(e1._args_, e3._args_) + self.assertIs(e1._args_, e.arg(1)._args_) + self.assertIs(e.arg(0).arg(0), e.arg(1).arg(0)) + self.assertIs(e.arg(0).arg(1), e.arg(1).arg(1)) # # Check the structure of nested products @@ -1489,29 +1668,18 @@ def test_nestedProduct2(self): e3 = e1 * m.d e = e2 * e3 # - self.assertEqual(e.size(), 11) - # - self.assertIs(type(e), ProductExpression) - self.assertEqual(e.nargs(), 2) - - self.assertIs(type(e.arg(0)), ProductExpression) - self.assertIs(e.arg(0).nargs(), 2) - self.assertIs(e.arg(0).arg(0), m.c) - - self.assertIs(type(e.arg(0).arg(1)), SumExpression) - self.assertIs(e.arg(0).arg(1).nargs(), 2) - self.assertIs(e.arg(0).arg(1).arg(0), m.a) - self.assertIs(e.arg(0).arg(1).arg(1), m.b) - - self.assertIs(type(e.arg(1)), ProductExpression) - self.assertIs(e.arg(1).nargs(), 2) - self.assertIs(e.arg(1).arg(1), m.d) - - self.assertIs(type(e.arg(1).arg(0)), SumExpression) - self.assertIs(e.arg(1).arg(0).nargs(), 2) - self.assertIs(e.arg(1).arg(0).arg(0), m.a) - self.assertIs(e.arg(1).arg(0).arg(1), m.b) - self.assertEqual(e.size(), 11) + inner = LinearExpression( + [MonomialTermExpression((1, m.a)), MonomialTermExpression((1, m.b))] + ) + assertExpressionsEqual( + self, + e, + ProductExpression( + (ProductExpression((m.c, inner)), ProductExpression((inner, m.d))) + ), + ) + # Verify shared args... + self.assertIs(e.arg(0).arg(1), e.arg(1).arg(0)) def test_nestedProduct3(self): # @@ -1543,13 +1711,9 @@ def test_nestedProduct3(self): # a b e1 = m.a * m.b e = e1 * 5 - self.assertIs(type(e), MonomialTermExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(type(e.arg(0)), NPV_ProductExpression) - self.assertEqual(e.arg(0).arg(0), 5) - self.assertIs(e.arg(0).arg(1), m.a) - self.assertIs(e.arg(1), m.b) - self.assertEqual(e.size(), 5) + assertExpressionsEqual( + self, e, MonomialTermExpression((NPV_ProductExpression((m.a, 5)), m.b)) + ) # * # / \ @@ -1627,7 +1791,6 @@ def test_nestedProduct3(self): self.assertIs(e.arg(1).arg(1), m.d) self.assertEqual(e.size(), 7) - def test_trivialProduct(self): # # Check that multiplying by zero gives zero @@ -1638,44 +1801,37 @@ def test_trivialProduct(self): m.q = Param(initialize=1) e = m.a * 0 - self.assertIs(type(e), int) - self.assertEqual(e, 0) + assertExpressionsEqual(self, e, MonomialTermExpression((0, m.a))) e = 0 * m.a - self.assertIs(type(e), int) - self.assertEqual(e, 0) + assertExpressionsEqual(self, e, MonomialTermExpression((0, m.a))) e = m.a * m.p - self.assertIs(type(e), int) - self.assertEqual(e, 0) + assertExpressionsEqual(self, e, MonomialTermExpression((0, m.a))) e = m.p * m.a - self.assertIs(type(e), int) - self.assertEqual(e, 0) + assertExpressionsEqual(self, e, MonomialTermExpression((0, m.a))) # # Check that multiplying by one gives the original expression # e = m.a * 1 - self.assertIs(type(e), type(m.a)) - self.assertIs(e, m.a) + assertExpressionsEqual(self, e, m.a) e = 1 * m.a - self.assertIs(type(e), type(m.a)) - self.assertIs(e, m.a) + assertExpressionsEqual(self, e, m.a) e = m.a * m.q - self.assertIs(type(e), type(m.a)) - self.assertIs(e, m.a) + assertExpressionsEqual(self, e, m.a) e = m.q * m.a - self.assertIs(type(e), type(m.a)) - self.assertIs(e, m.a) + assertExpressionsEqual(self, e, m.a) # # Check that numeric constants are simply muliplied out # e = NumericConstant(3) * NumericConstant(2) + assertExpressionsEqual(self, e, 6) self.assertIs(type(e), int) self.assertEqual(e, 6) @@ -1743,7 +1899,7 @@ def test_nestedDivision(self): e = e1 / 5 self.assertIs(type(e), MonomialTermExpression) self.assertEqual(e.nargs(), 2) - self.assertEqual(e.arg(0), 3./5) + self.assertEqual(e.arg(0), 3.0 / 5) self.assertIs(e.arg(1), m.b) self.assertEqual(e.size(), 3) @@ -1840,24 +1996,19 @@ def test_trivialDivision(self): # Check that dividing zero by anything non-zero gives zero # e = 0 / m.a - self.assertIs(type(e), int) - self.assertAlmostEqual(e, 0.0) + assertExpressionsEqual(self, e, DivisionExpression((0, m.a))) # # Check that dividing by one 1 gives the original expression # e = m.a / 1 - self.assertIs(type(e), type(m.a)) - self.assertIs(e, m.a) + assertExpressionsEqual(self, e, m.a) # # Check the structure dividing 1 by an expression # e = 1 / m.a - self.assertIs(type(e), DivisionExpression) - self.assertEqual(e.nargs(), 2) - self.assertIs(e.arg(0), 1) - self.assertIs(e.arg(1), m.a) + assertExpressionsEqual(self, e, DivisionExpression((1, m.a))) # # Check the structure dividing 1 by an expression @@ -1895,7 +2046,6 @@ def test_trivialDivision(self): class TestPrettyPrinter_oldStyle(unittest.TestCase): - _save = None def setUp(self): @@ -1915,10 +2065,10 @@ def test_sum(self): model.p = Param(mutable=True) expr = 5 + model.a + model.a - self.assertEqual("sum(5, a, a)", str(expr)) + self.assertEqual("sum(5, mon(1, a), mon(1, a))", str(expr)) expr += 5 - self.assertEqual("sum(5, a, a, 5)", str(expr)) + self.assertEqual("sum(5, mon(1, a), mon(1, a), 5)", str(expr)) expr = 2 + model.p self.assertEqual("sum(2, p)", str(expr)) @@ -1932,20 +2082,39 @@ def test_linearsum(self): model.a = Var(A) model.p = Param(A, initialize=2, mutable=True) - expr = quicksum(i*model.a[i] for i in A) - self.assertEqual("sum(mon(1, a[1]), mon(2, a[2]), mon(3, a[3]), mon(4, a[4]))", str(expr)) + expr = quicksum(i * model.a[i] for i in A) + self.assertEqual( + "sum(mon(0, a[0]), mon(1, a[1]), mon(2, a[2]), mon(3, a[3]), " + "mon(4, a[4]))", + str(expr), + ) - expr = quicksum((i-2)*model.a[i] for i in A) - self.assertEqual("sum(mon(-2, a[0]), mon(-1, a[1]), mon(1, a[3]), mon(2, a[4]))", str(expr)) + expr = quicksum((i - 2) * model.a[i] for i in A) + self.assertEqual( + "sum(mon(-2, a[0]), mon(-1, a[1]), mon(0, a[2]), mon(1, a[3]), " + "mon(2, a[4]))", + str(expr), + ) expr = quicksum(model.a[i] for i in A) - self.assertEqual("sum(mon(1, a[0]), mon(1, a[1]), mon(1, a[2]), mon(1, a[3]), mon(1, a[4]))", str(expr)) + self.assertEqual( + "sum(mon(1, a[0]), mon(1, a[1]), mon(1, a[2]), mon(1, a[3]), " + "mon(1, a[4]))", + str(expr), + ) model.p[1].value = 0 model.p[3].value = 3 - expr = quicksum(model.p[i]*model.a[i] if i != 3 else model.p[i] for i in A) - self.assertEqual("sum(3, mon(2, a[0]), mon(0, a[1]), mon(2, a[2]), mon(2, a[4]))", expression_to_string(expr, compute_values=True)) - self.assertEqual("sum(p[3], mon(p[0], a[0]), mon(p[1], a[1]), mon(p[2], a[2]), mon(p[4], a[4]))", expression_to_string(expr, compute_values=False)) + expr = quicksum(model.p[i] * model.a[i] if i != 3 else model.p[i] for i in A) + self.assertEqual( + "sum(mon(2, a[0]), mon(0, a[1]), mon(2, a[2]), 3, mon(2, a[4]))", + expression_to_string(expr, compute_values=True), + ) + self.assertEqual( + "sum(mon(p[0], a[0]), mon(p[1], a[1]), mon(p[2], a[2]), " + "p[3], mon(p[4], a[4]))", + expression_to_string(expr, compute_values=False), + ) def test_expr(self): # @@ -1958,22 +2127,19 @@ def test_expr(self): self.assertEqual("prod(mon(5, a), a)", str(expr)) # This returns an integer, which has no pprint(). - #expr = expr*0 - #buf = StringIO() - #EXPR.pprint(ostream=buf) - #self.assertEqual("0.0", buf.getvalue()) + # expr = expr*0 + # buf = StringIO() + # EXPR.pprint(ostream=buf) + # self.assertEqual("0.0", buf.getvalue()) expr = 5 * model.a / model.a - self.assertEqual( "div(mon(5, a), a)", - str(expr) ) + self.assertEqual("div(mon(5, a), a)", str(expr)) expr = expr / model.a - self.assertEqual( "div(div(mon(5, a), a), a)", - str(expr) ) + self.assertEqual("div(div(mon(5, a), a), a)", str(expr)) expr = 5 * model.a / model.a / 2 - self.assertEqual( "div(div(mon(5, a), a), 2)", - str(expr) ) + self.assertEqual("div(div(mon(5, a), a), 2)", str(expr)) def test_other(self): # @@ -1994,19 +2160,19 @@ def test_inequality(self): model.a = Var() expr = 5 < model.a - self.assertEqual( "5 < a", str(expr) ) + self.assertEqual("5 < a", str(expr)) expr = model.a >= 5 - self.assertEqual( "5 <= a", str(expr) ) + self.assertEqual("5 <= a", str(expr)) expr = expr < 10 - self.assertEqual( "5 <= a < 10", str(expr) ) + self.assertEqual("5 <= a < 10", str(expr)) expr = 5 <= model.a + 5 - self.assertEqual( "5 <= sum(a, 5)", str(expr) ) + self.assertEqual("5 <= sum(mon(1, a), 5)", str(expr)) expr = expr < 10 - self.assertEqual( "5 <= sum(a, 5) < 10", str(expr) ) + self.assertEqual("5 <= sum(mon(1, a), 5) < 10", str(expr)) def test_equality(self): # @@ -2017,34 +2183,36 @@ def test_equality(self): model.b = Param(initialize=5, mutable=True) expr = model.a == model.b - self.assertEqual( "a == b", str(expr) ) + self.assertEqual("a == b", str(expr)) expr = model.b == model.a - self.assertEqual( "b == a", str(expr) ) + self.assertEqual("b == a", str(expr)) # NB: since there is no "reverse equality" operator, explicit # constants will always show up second. expr = 5 == model.a - self.assertEqual( "a == 5", str(expr) ) + self.assertEqual("a == 5", str(expr)) expr = model.a == 10 - self.assertEqual( "a == 10", str(expr) ) + self.assertEqual("a == 10", str(expr)) expr = 5 == model.a + 5 - self.assertEqual( "sum(a, 5) == 5", str(expr) ) + self.assertEqual("sum(mon(1, a), 5) == 5", str(expr)) expr = model.a + 5 == 5 - self.assertEqual( "sum(a, 5) == 5", str(expr) ) + self.assertEqual("sum(mon(1, a), 5) == 5", str(expr)) def test_getitem(self): m = ConcreteModel() - m.I = RangeSet(1,9) - m.x = Var(m.I, initialize=lambda m,i: i+1) - m.P = Param(m.I, initialize=lambda m,i: 10-i, mutable=True) + m.I = RangeSet(1, 9) + m.x = Var(m.I, initialize=lambda m, i: i + 1) + m.P = Param(m.I, initialize=lambda m, i: 10 - i, mutable=True) t = IndexTemplate(m.I) - e = m.x[t+m.P[t+1]] + 3 - self.assertEqual("sum(getitem(x, sum({I}, getitem(P, sum({I}, 1)))), 3)", str(e)) + e = m.x[t + m.P[t + 1]] + 3 + self.assertEqual( + "sum(getitem(x, sum({I}, getitem(P, sum({I}, 1)))), 3)", str(e) + ) def test_small_expression(self): # @@ -2053,27 +2221,28 @@ def test_small_expression(self): model = AbstractModel() model.a = Var() model.b = Param(initialize=2, mutable=True) - instance=model.create_instance() - expr = instance.a+1 - expr = expr-1 - expr = expr*instance.a - expr = expr/instance.a + instance = model.create_instance() + expr = instance.a + 1 + expr = expr - 1 + expr = expr * instance.a + expr = expr / instance.a expr = expr**instance.b - expr = 1-expr - expr = 1+expr - expr = 2*expr - expr = 2/expr + expr = 1 - expr + expr = 1 + expr + expr = 2 * expr + expr = 2 / expr expr = 2**expr - expr = - expr - expr = + expr + expr = -expr + expr = +expr expr = abs(expr) self.assertEqual( - "abs(neg(pow(2, div(2, prod(2, sum(1, neg(pow(div(prod(sum(a, 1, -1), a), a), b)), 1))))))", - str(expr) ) + "abs(neg(pow(2, div(2, prod(2, sum(1, neg(pow(div(prod(sum(" + "mon(1, a), 1, -1), a), a), b)), 1))))))", + str(expr), + ) class TestPrettyPrinter_newStyle(unittest.TestCase): - _save = None def setUp(self): @@ -2093,11 +2262,11 @@ def test_sum(self): model.p = Param(mutable=True) expr = 5 + model.a + model.a - self.assertIs(type(expr), SumExpression) + self.assertIs(type(expr), LinearExpression) self.assertEqual("5 + a + a", str(expr)) expr += 5 - self.assertIs(type(expr), SumExpression) + self.assertIs(type(expr), LinearExpression) self.assertEqual("5 + a + a + 5", str(expr)) expr = 2 + model.p @@ -2115,36 +2284,56 @@ def test_linearsum(self): model.a = Var(A) model.p = Param(A, initialize=2, mutable=True) - expr = quicksum(i*model.a[i] for i in A) + 3 - self.assertEqual("a[1] + 2*a[2] + 3*a[3] + 4*a[4] + 3", str(expr)) - self.assertEqual("a[1] + 2*a[2] + 3*a[3] + 4*a[4] + 3", expression_to_string(expr, compute_values=True)) + expr = quicksum(i * model.a[i] for i in A) + 3 + self.assertEqual("0*a[0] + a[1] + 2*a[2] + 3*a[3] + 4*a[4] + 3", str(expr)) + self.assertEqual( + "0*a[0] + a[1] + 2*a[2] + 3*a[3] + 4*a[4] + 3", + expression_to_string(expr, compute_values=True), + ) - expr = quicksum((i-2)*model.a[i] for i in A) + 3 - self.assertEqual("-2*a[0] - a[1] + a[3] + 2*a[4] + 3", str(expr)) - self.assertEqual("-2*a[0] - a[1] + a[3] + 2*a[4] + 3", expression_to_string(expr, compute_values=True)) + expr = quicksum((i - 2) * model.a[i] for i in A) + 3 + self.assertEqual("-2*a[0] - a[1] + 0*a[2] + a[3] + 2*a[4] + 3", str(expr)) + self.assertEqual( + "-2*a[0] - a[1] + 0*a[2] + a[3] + 2*a[4] + 3", + expression_to_string(expr, compute_values=True), + ) expr = quicksum(model.a[i] for i in A) + 3 self.assertEqual("a[0] + a[1] + a[2] + a[3] + a[4] + 3", str(expr)) - expr = quicksum(model.p[i]*model.a[i] for i in A) - self.assertEqual("2*a[0] + 2*a[1] + 2*a[2] + 2*a[3] + 2*a[4]", expression_to_string(expr, compute_values=True)) - self.assertEqual("p[0]*a[0] + p[1]*a[1] + p[2]*a[2] + p[3]*a[3] + p[4]*a[4]", expression_to_string(expr, compute_values=False)) - self.assertEqual("p[0]*a[0] + p[1]*a[1] + p[2]*a[2] + p[3]*a[3] + p[4]*a[4]", str(expr)) + expr = quicksum(model.p[i] * model.a[i] for i in A) + self.assertEqual( + "2*a[0] + 2*a[1] + 2*a[2] + 2*a[3] + 2*a[4]", + expression_to_string(expr, compute_values=True), + ) + self.assertEqual( + "p[0]*a[0] + p[1]*a[1] + p[2]*a[2] + p[3]*a[3] + p[4]*a[4]", + expression_to_string(expr, compute_values=False), + ) + self.assertEqual( + "p[0]*a[0] + p[1]*a[1] + p[2]*a[2] + p[3]*a[3] + p[4]*a[4]", str(expr) + ) model.p[1].value = 0 model.p[3].value = 3 - expr = quicksum(model.p[i]*model.a[i] if i != 3 else model.p[i] for i in A) - self.assertEqual("3 + 2*a[0] + 0*a[1] + 2*a[2] + 2*a[4]", expression_to_string(expr, compute_values=True)) - expr = quicksum(model.p[i]*model.a[i] if i != 3 else -3 for i in A) - self.assertEqual("-3 + p[0]*a[0] + p[1]*a[1] + p[2]*a[2] + p[4]*a[4]", expression_to_string(expr, compute_values=False)) - + expr = quicksum(model.p[i] * model.a[i] if i != 3 else model.p[i] for i in A) + self.assertEqual( + "2*a[0] + 0*a[1] + 2*a[2] + 3 + 2*a[4]", + expression_to_string(expr, compute_values=True), + ) + expr = quicksum(model.p[i] * model.a[i] if i != 3 else -3 for i in A) + self.assertEqual( + "p[0]*a[0] + p[1]*a[1] + p[2]*a[2] - 3 + p[4]*a[4]", + expression_to_string(expr, compute_values=False), + ) + def test_negation(self): M = ConcreteModel() M.x = Var() M.y = Var() - e = M.x*(1 + M.y) - e = - e + e = M.x * (1 + M.y) + e = -e self.assertEqual("- x*(1 + y)", expression_to_string(e)) M.x = -1 @@ -2163,31 +2352,27 @@ def test_prod(self): self.assertEqual("5*a*a", str(expr)) # This returns an integer, which has no pprint(). - #expr = expr*0 - #buf = StringIO() - #EXPR.pprint(ostream=buf) - #self.assertEqual("0.0", buf.getvalue()) + # expr = expr*0 + # buf = StringIO() + # EXPR.pprint(ostream=buf) + # self.assertEqual("0.0", buf.getvalue()) expr = 5 * model.a / model.a - self.assertEqual( "5*a/a", - str(expr) ) + self.assertEqual("5*a/a", str(expr)) expr = expr / model.a - self.assertEqual( "5*a/a/a", - str(expr) ) + self.assertEqual("5*a/a/a", str(expr)) expr = 5 * model.a / (model.a * model.a) - self.assertEqual( "5*a/(a*a)", - str(expr) ) + self.assertEqual("5*a/(a*a)", str(expr)) expr = 5 * model.a / model.a / 2 - self.assertEqual( "5*a/a/2", - str(expr) ) + self.assertEqual("5*a/a/2", str(expr)) expr = model.a * model.b model.a = 1 model.a.fixed = True - self.assertEqual( "b", expression_to_string(expr, compute_values=True)) + self.assertEqual("b", expression_to_string(expr, compute_values=True)) def test_inequality(self): # @@ -2197,19 +2382,19 @@ def test_inequality(self): model.a = Var() expr = 5 < model.a - self.assertEqual( "5 < a", str(expr) ) + self.assertEqual("5 < a", str(expr)) expr = model.a >= 5 - self.assertEqual( "5 <= a", str(expr) ) + self.assertEqual("5 <= a", str(expr)) expr = expr < 10 - self.assertEqual( "5 <= a < 10", str(expr) ) + self.assertEqual("5 <= a < 10", str(expr)) expr = 5 <= model.a + 5 - self.assertEqual( "5 <= a + 5", str(expr) ) + self.assertEqual("5 <= a + 5", str(expr)) expr = expr < 10 - self.assertEqual( "5 <= a + 5 < 10", str(expr) ) + self.assertEqual("5 <= a + 5 < 10", str(expr)) def test_equality(self): # @@ -2220,25 +2405,24 @@ def test_equality(self): model.b = Param(initialize=5, mutable=True) expr = model.a == model.b - self.assertEqual( "a == b", str(expr) ) + self.assertEqual("a == b", str(expr)) expr = model.b == model.a - self.assertEqual( "b == a", str(expr) ) + self.assertEqual("b == a", str(expr)) # NB: since there is no "reverse equality" operator, explicit # constants will always show up second. expr = 5 == model.a - self.assertEqual( "a == 5", str(expr) ) + self.assertEqual("a == 5", str(expr)) expr = model.a == 10 - self.assertEqual( "a == 10", str(expr) ) + self.assertEqual("a == 10", str(expr)) expr = 5 == model.a + 5 - self.assertEqual( "a + 5 == 5", str(expr) ) + self.assertEqual("a + 5 == 5", str(expr)) expr = model.a + 5 == 5 - self.assertEqual( "a + 5 == 5", str(expr) ) - + self.assertEqual("a + 5 == 5", str(expr)) def test_linear(self): # @@ -2249,38 +2433,46 @@ def test_linear(self): m.y = Var() m.p = Param(initialize=2, mutable=True) - expr = m.x - m.p*m.y - self.assertEqual( "x - p*y", str(expr) ) + expr = m.x - m.p * m.y + self.assertEqual("x - p*y", str(expr)) - expr = m.x - m.p*m.y + 5 - self.assertIs(type(expr), SumExpression) - self.assertEqual( "x - p*y + 5", str(expr) ) + expr = m.x - m.p * m.y + 5 + self.assertIs(type(expr), LinearExpression) + self.assertEqual("x - p*y + 5", str(expr)) - expr = m.x - m.p*m.y - 5 - self.assertIs(type(expr), SumExpression) - self.assertEqual( "x - p*y - 5", str(expr) ) + expr = m.x - m.p * m.y - 5 + self.assertIs(type(expr), LinearExpression) + self.assertEqual("x - p*y - 5", str(expr)) - expr = m.x - m.p*m.y - 5 + m.p - self.assertIs(type(expr), SumExpression) - self.assertEqual( "x - p*y - 5 + p", str(expr) ) + expr = m.x - m.p * m.y - 5 + m.p + self.assertIs(type(expr), LinearExpression) + self.assertEqual("x - p*y - 5 + p", str(expr)) def test_expr_if(self): m = ConcreteModel() m.a = Var() m.b = Var() - expr = Expr_if(IF=m.a + m.b < 20, THEN=m.a, ELSE=m.b) - self.assertEqual("Expr_if( ( a + b < 20 ), then=( a ), else=( b ) )", str(expr)) + expr = Expr_if(IF_=m.a + m.b < 20, THEN_=m.a, ELSE_=m.b) + self.assertEqual( + "Expr_if( ( a + b < 20 ), then=( a ), else=( b ) )", str(expr) + ) expr = Expr_if(IF=m.a + m.b < 20, THEN=1, ELSE=m.b) - self.assertEqual("Expr_if( ( a + b < 20 ), then=( 1 ), else=( b ) )", str(expr)) + self.assertEqual( + "Expr_if( ( a + b < 20 ), then=( 1 ), else=( b ) )", str(expr) + ) + with self.assertRaisesRegex(ValueError, "Cannot specify both THEN_ and THEN"): + Expr_if(IF_=m.a + m.b < 20, THEN_=1, ELSE_=m.b, THEN=2) + with self.assertRaisesRegex(ValueError, "Unrecognized arguments: _THEN_"): + Expr_if(IF_=m.a + m.b < 20, _THEN_=1, ELSE_=m.b) def test_getitem(self): m = ConcreteModel() - m.I = RangeSet(1,9) - m.x = Var(m.I, initialize=lambda m,i: i+1) - m.P = Param(m.I, initialize=lambda m,i: 10-i, mutable=True) + m.I = RangeSet(1, 9) + m.x = Var(m.I, initialize=lambda m, i: i + 1) + m.P = Param(m.I, initialize=lambda m, i: 10 - i, mutable=True) t = IndexTemplate(m.I) - e = m.x[t+m.P[t+1]] + 3 + e = m.x[t + m.P[t + 1]] + 3 self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e)) def test_associativity_rules(self): @@ -2289,30 +2481,29 @@ def test_associativity_rules(self): m.x = Var() m.y = Var() m.z = Var() - self.assertEqual(str( m.z+m.x+m.y ), "z + x + y") - self.assertEqual(str( (m.z+m.x)+m.y ), "z + x + y") + self.assertEqual(str(m.z + m.x + m.y), "z + x + y") + self.assertEqual(str((m.z + m.x) + m.y), "z + x + y") # FIXME: Pyomo currently returns "z + y + x" # self.assertEqual(str( m.z+(m.x+m.y) ), "z + x + y") - self.assertEqual(str( (m.w+m.z)+(m.x+m.y) ), "w + z + x + y") - - self.assertEqual(str( (m.z/m.x)/(m.y/m.w) ), "z/x/(y/w)") + self.assertEqual(str((m.w + m.z) + (m.x + m.y)), "w + z + x + y") - self.assertEqual(str( m.z/m.x/m.y ), "z/x/y") - self.assertEqual(str( (m.z/m.x)/m.y ), "z/x/y") - self.assertEqual(str( m.z/(m.x/m.y) ), "z/(x/y)") + self.assertEqual(str((m.z / m.x) / (m.y / m.w)), "z/x/(y/w)") - self.assertEqual(str( m.z*m.x/m.y ), "z*x/y") - self.assertEqual(str( (m.z*m.x)/m.y ), "z*x/y") - self.assertEqual(str( m.z*(m.x/m.y) ), "z*(x/y)") + self.assertEqual(str(m.z / m.x / m.y), "z/x/y") + self.assertEqual(str((m.z / m.x) / m.y), "z/x/y") + self.assertEqual(str(m.z / (m.x / m.y)), "z/(x/y)") - self.assertEqual(str( m.z/m.x*m.y ), "z/x*y") - self.assertEqual(str( (m.z/m.x)*m.y ), "z/x*y") - self.assertEqual(str( m.z/(m.x*m.y) ), "z/(x*y)") + self.assertEqual(str(m.z * m.x / m.y), "z*x/y") + self.assertEqual(str((m.z * m.x) / m.y), "z*x/y") + self.assertEqual(str(m.z * (m.x / m.y)), "z*(x/y)") - self.assertEqual(str( m.x**m.y**m.z ), "x**(y**z)") - self.assertEqual(str( (m.x**m.y)**m.z ), "(x**y)**z") - self.assertEqual(str( m.x**(m.y**m.z) ), "x**(y**z)") + self.assertEqual(str(m.z / m.x * m.y), "z/x*y") + self.assertEqual(str((m.z / m.x) * m.y), "z/x*y") + self.assertEqual(str(m.z / (m.x * m.y)), "z/(x*y)") + self.assertEqual(str(m.x**m.y**m.z), "x**(y**z)") + self.assertEqual(str((m.x**m.y) ** m.z), "(x**y)**z") + self.assertEqual(str(m.x ** (m.y**m.z)), "x**(y**z)") def test_small_expression(self): # @@ -2321,90 +2512,104 @@ def test_small_expression(self): model = AbstractModel() model.a = Var() model.b = Param(initialize=2, mutable=True) - instance=model.create_instance() - expr = instance.a+1 - expr = expr-1 - expr = expr*instance.a - expr = expr/instance.a + instance = model.create_instance() + expr = instance.a + 1 + expr = expr - 1 + expr = expr * instance.a + expr = expr / instance.a expr = expr**instance.b - expr = 1-expr - expr = 1+expr - expr = 2*expr - expr = 2/expr + expr = 1 - expr + expr = 1 + expr + expr = 2 * expr + expr = 2 / expr expr = 2**expr - expr = - expr - expr = + expr + expr = -expr + expr = +expr expr = abs(expr) - self.assertEqual( - "abs(- 2**(2/(2*(1 - ((a + 1 - 1)*a/a)**b + 1))))", - str(expr) ) + self.assertEqual("abs(- 2**(2/(2*(1 - ((a + 1 - 1)*a/a)**b + 1))))", str(expr)) def test_large_expression(self): # # Diff against a large model # def c1_rule(model): - return (1.0,model.b[1],None) + return (1.0, model.b[1], None) + def c2_rule(model): - return (None,model.b[1],0.0) + return (None, model.b[1], 0.0) + def c3_rule(model): - return (0.0,model.b[1],1.0) + return (0.0, model.b[1], 1.0) + def c4_rule(model): - return (3.0,model.b[1]) + return (3.0, model.b[1]) + def c5_rule(model, i): - return (model.b[i],0.0) + return (model.b[i], 0.0) def c6a_rule(model): return 0.0 <= model.c + def c7a_rule(model): return model.c <= 1.0 + def c7b_rule(model): return model.c >= 1.0 + def c8_rule(model): return model.c == 2.0 + def c9a_rule(model): - return model.A+model.A <= model.c + return model.A + model.A <= model.c + def c9b_rule(model): - return model.A+model.A >= model.c + return model.A + model.A >= model.c + def c10a_rule(model): - return model.c <= model.B+model.B + return model.c <= model.B + model.B + def c11_rule(model): - return model.c == model.A+model.B + return model.c == model.A + model.B + def c15a_rule(model): - return model.A <= model.A*model.d + return model.A <= model.A * model.d + def c16a_rule(model): - return model.A*model.d <= model.B + return model.A * model.d <= model.B def c12_rule(model): return model.c == model.d + def c13a_rule(model): return model.c <= model.d + def c14a_rule(model): return model.c >= model.d def cl_rule(model, i): if i > 10: return ConstraintList.End - return i* model.c >= model.d + return i * model.c >= model.d def o2_rule(model, i): return model.b[i] - model=AbstractModel() - model.a = Set(initialize=[1,2,3]) - model.b = Var(model.a,initialize=1.1,within=PositiveReals) + + model = AbstractModel() + model.a = Set(initialize=[1, 2, 3]) + model.b = Var(model.a, initialize=1.1, within=PositiveReals) model.c = Var(initialize=2.1, within=PositiveReals) model.d = Var(initialize=3.1, within=PositiveReals) model.e = Var(initialize=4.1, within=PositiveReals) model.A = Param(default=-1, mutable=True) model.B = Param(default=-2, mutable=True) - #model.o1 = Objective() - model.o2 = Objective(model.a,rule=o2_rule) - model.o3 = Objective(model.a,model.a) + # model.o1 = Objective() + model.o2 = Objective(model.a, rule=o2_rule) + model.o3 = Objective(model.a, model.a) model.c1 = Constraint(rule=c1_rule) model.c2 = Constraint(rule=c2_rule) model.c3 = Constraint(rule=c3_rule) model.c4 = Constraint(rule=c4_rule) - model.c5 = Constraint(model.a,rule=c5_rule) + model.c5 = Constraint(model.a, rule=c5_rule) model.c6a = Constraint(rule=c6a_rule) model.c7a = Constraint(rule=c7a_rule) @@ -2423,13 +2628,12 @@ def o2_rule(model, i): model.cl = ConstraintList(rule=cl_rule) - instance=model.create_instance() - OUTPUT=open(join(currdir, "varpprint.out"), "w") + instance = model.create_instance() + OUTPUT = open(join(currdir, "varpprint.out"), "w") instance.pprint(ostream=OUTPUT) OUTPUT.close() _out, _txt = join(currdir, "varpprint.out"), join(currdir, "varpprint.txt") - self.assertTrue(cmp(_out, _txt), - msg="Files %s and %s differ" % (_txt, _out)) + self.assertTrue(cmp(_out, _txt), msg="Files %s and %s differ" % (_txt, _out)) def test_labeler(self): M = ConcreteModel() @@ -2440,37 +2644,56 @@ def test_labeler(self): M.p = Param(range(3), initialize=2) M.q = Param(range(3), initialize=3, mutable=True) - e = M.x*M.y + sum_product(M.p, M.a) + quicksum(M.q[i]*M.a[i] for i in M.a) / M.x + e = ( + M.x * M.y + + sum_product(M.p, M.a) + + quicksum(M.q[i] * M.a[i] for i in M.a) / M.x + ) self.assertEqual( str(e), - "x*y + (2*a[0] + 2*a[1] + 2*a[2]) + (q[0]*a[0] + q[1]*a[1] + q[2]*a[2])/x") + "x*y + (2*a[0] + 2*a[1] + 2*a[2]) + (q[0]*a[0] + q[1]*a[1] + q[2]*a[2])/x", + ) self.assertEqual( e.to_string(), - "x*y + (2*a[0] + 2*a[1] + 2*a[2]) + (q[0]*a[0] + q[1]*a[1] + q[2]*a[2])/x") + "x*y + (2*a[0] + 2*a[1] + 2*a[2]) + (q[0]*a[0] + q[1]*a[1] + q[2]*a[2])/x", + ) self.assertEqual( e.to_string(compute_values=True), - "x*y + (2*a[0] + 2*a[1] + 2*a[2]) + (3*a[0] + 3*a[1] + 3*a[2])/x") + "x*y + (2*a[0] + 2*a[1] + 2*a[2]) + (3*a[0] + 3*a[1] + 3*a[2])/x", + ) labeler = NumericLabeler('x') self.assertEqual( expression_to_string(e, labeler=labeler), - "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (x6*x3 + x7*x4 + x8*x5)/x1") + "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (x6*x3 + x7*x4 + x8*x5)/x1", + ) from pyomo.core.expr.symbol_map import SymbolMap + labeler = NumericLabeler('x') smap = SymbolMap(labeler) self.assertEqual( expression_to_string(e, smap=smap), - "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (x6*x3 + x7*x4 + x8*x5)/x1") + "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (x6*x3 + x7*x4 + x8*x5)/x1", + ) self.assertEqual( expression_to_string(e, smap=smap, compute_values=True), - "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (3*x3 + 3*x4 + 3*x5)/x1") + "x1*x2 + (2*x3 + 2*x4 + 2*x5) + (3*x3 + 3*x4 + 3*x5)/x1", + ) + + def test_balanced_parens(self): + self.assertTrue(_balanced_parens('(1+5)+((x - 1)*(5+x))')) + self.assertFalse(_balanced_parens('1+5)+((x - 1)*(5+x)')) + self.assertFalse(_balanced_parens('(((1+5)+((x - 1)*(5+x))')) + self.assertFalse(_balanced_parens('1+5)+((x - 1)*(5+x))')) + self.assertFalse(_balanced_parens('(1+5)+((x - 1)*(5+x)')) + self.assertFalse(_balanced_parens('(1+5)+((x - 1))*(5+x))')) + # # TODO:What is this checking? # class TestInplaceExpressionGeneration(unittest.TestCase): - def setUp(self): # This class tests the Pyomo 5.x expression trees @@ -2490,14 +2713,13 @@ def test_iadd(self): self.assertIs(type(x), type(m.a)) x += m.a - self.assertIs(type(x), SumExpression) + self.assertIs(type(x), LinearExpression) self.assertEqual(x.nargs(), 2) x += m.b - self.assertIs(type(x), SumExpression) + self.assertIs(type(x), LinearExpression) self.assertEqual(x.nargs(), 3) - def test_isub(self): m = self.m @@ -2511,15 +2733,15 @@ def test_isub(self): self.assertEqual(x.nargs(), 2) x -= m.a - self.assertIs(type(x), SumExpression) + self.assertIs(type(x), LinearExpression) self.assertEqual(x.nargs(), 2) x -= m.a - self.assertIs(type(x), SumExpression) + self.assertIs(type(x), LinearExpression) self.assertEqual(x.nargs(), 3) x -= m.b - self.assertIs(type(x), SumExpression) + self.assertIs(type(x), LinearExpression) self.assertEqual(x.nargs(), 4) def test_imul(self): @@ -2573,7 +2795,7 @@ def test_ipow(self): # If someone else holds a reference to the expression, we still # need to clone it: - x = 1 ** m.a + x = 1**m.a y = x x **= m.b self.assertIs(type(y), PowExpression) @@ -2591,7 +2813,6 @@ def test_ipow(self): class TestGeneralExpressionGeneration(unittest.TestCase): - def test_invalidIndexing(self): # # Check for errors when generating expressions with invalid indices @@ -2658,7 +2879,7 @@ def test_negation(self): e = -e1 self.assertIs(type(e), NegationExpression) self.assertIs(e.arg(0), e1) - self.assertIs(type(e.arg(0)), SumExpression) + self.assertIs(type(e.arg(0)), LinearExpression) e1 = m.a * m.b e = -e1 @@ -2673,20 +2894,22 @@ def test_negation(self): class TestExprConditionalContext(unittest.TestCase): - - def checkCondition(self, expr, expectedValue, use_value=False): if use_value: expr = value(expr) try: if expr: if expectedValue != True: - self.fail("__bool__ returned the wrong condition value" - " (expected %s)" % expectedValue) + self.fail( + "__bool__ returned the wrong condition value" + " (expected %s)" % expectedValue + ) else: if expectedValue != False: - self.fail("__bool__ returned the wrong condition value" - " (expected %s)" % expectedValue) + self.fail( + "__bool__ returned the wrong condition value" + " (expected %s)" % expectedValue + ) if expectedValue is None: self.fail("Expected ValueError because component was undefined") except (ValueError, PyomoException): @@ -2699,28 +2922,28 @@ def test_immutable_paramConditional(self): # Immutable Params appear mutable (non-constant) before they are # constructed with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 < p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 < p\) to bool.", + ): self.checkCondition(model.p > 0, True) - #self.checkCondition(model.p >= 0, True) - #self.checkCondition(model.p < 1, True) - #self.checkCondition(model.p <= 1, True) - #self.checkCondition(model.p == 0, None) + # self.checkCondition(model.p >= 0, True) + # self.checkCondition(model.p < 1, True) + # self.checkCondition(model.p <= 1, True) + # self.checkCondition(model.p == 0, None) instance = model.create_instance() # # Inequalities evaluate normally when the parameter is initialized # with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 < p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 < p\) to bool.", + ): self.checkCondition(model.p > 0, True) - #self.checkCondition(model.p >= 0, True) - #self.checkCondition(model.p < 1, True) - #self.checkCondition(model.p <= 1, True) - #self.checkCondition(model.p == 0, None) + # self.checkCondition(model.p >= 0, True) + # self.checkCondition(model.p < 1, True) + # self.checkCondition(model.p <= 1, True) + # self.checkCondition(model.p == 0, None) instance = model.create_instance() self.checkCondition(instance.p > 0, True) @@ -2740,29 +2963,29 @@ def test_immutable_paramConditional_reversed(self): # Immutable Params appear mutable (non-constant) before they are # constructed with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 < p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 < p\) to bool.", + ): self.checkCondition(0 < model.p, True) with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 <= p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 <= p\) to bool.", + ): self.checkCondition(0 <= model.p, True) with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(p < 1\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(p < 1\) to bool.", + ): self.checkCondition(1 > model.p, True) with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(p <= 1\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(p <= 1\) to bool.", + ): self.checkCondition(1 >= model.p, True) with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 == p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 == p\) to bool.", + ): self.checkCondition(0 == model.p, None) self.checkCondition(0 < model.p, True, use_value=True) self.checkCondition(0 <= model.p, True, use_value=True) @@ -2794,14 +3017,14 @@ def test_immutable_paramConditional_reversed(self): # Immutable Params appear mutable (non-constant) before they are # constructed with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 < p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 < p\) to bool.", + ): self.checkCondition(0 < model.p, True) - #self.checkCondition(0 <= model.p, True) - #self.checkCondition(1 > model.p, True) - #self.checkCondition(1 >= model.p, True) - #self.checkCondition(0 == model.p, None) + # self.checkCondition(0 <= model.p, True) + # self.checkCondition(1 > model.p, True) + # self.checkCondition(1 >= model.p, True) + # self.checkCondition(0 == model.p, None) instance = model.create_instance() # @@ -2823,14 +3046,14 @@ def test_mutable_paramConditional(self): model.p = Param(initialize=1.0, mutable=True) # with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 < p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 < p\) to bool.", + ): self.checkCondition(model.p > 0, True) - #self.checkCondition(model.p >= 0, True) - #self.checkCondition(model.p < 1, True) - #self.checkCondition(model.p <= 1, True) - #self.checkCondition(model.p == 0, None) + # self.checkCondition(model.p >= 0, True) + # self.checkCondition(model.p < 1, True) + # self.checkCondition(model.p <= 1, True) + # self.checkCondition(model.p == 0, None) instance = model.create_instance() with self.assertRaises(PyomoException): @@ -2869,14 +3092,14 @@ def test_mutable_paramConditional_reversed(self): model.p = Param(initialize=1.0, mutable=True) # with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 < p\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 < p\) to bool.", + ): self.checkCondition(0 < model.p, True) - #self.checkCondition(0 <= model.p, True) - #self.checkCondition(1 > model.p, True) - #self.checkCondition(1 >= model.p, True) - #self.checkCondition(0 == model.p, None) + # self.checkCondition(0 <= model.p, True) + # self.checkCondition(1 > model.p, True) + # self.checkCondition(1 >= model.p, True) + # self.checkCondition(0 == model.p, None) instance = model.create_instance() with self.assertRaises(PyomoException): @@ -2915,14 +3138,14 @@ def test_varConditional(self): model.v = Var(initialize=1.0) # with self.assertRaisesRegex( - PyomoException, - r"Cannot convert non-constant Pyomo expression " - r"\(0 < v\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo expression " r"\(0 < v\) to bool.", + ): self.checkCondition(model.v > 0, True) - #self.checkCondition(model.v >= 0, True) - #self.checkCondition(model.v < 1, True) - #self.checkCondition(model.v <= 1, True) - #self.checkCondition(model.v == 0, None) + # self.checkCondition(model.v >= 0, True) + # self.checkCondition(model.v < 1, True) + # self.checkCondition(model.v <= 1, True) + # self.checkCondition(model.v == 0, None) instance = model.create_instance() # @@ -2964,13 +3187,14 @@ def test_varConditional_reversed(self): model.v = Var(initialize=1.0) # with self.assertRaisesRegex( - PyomoException, r"Cannot convert non-constant Pyomo " - r"expression \(0 < v\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo " r"expression \(0 < v\) to bool.", + ): self.checkCondition(0 < model.v, True) - #self.checkCondition(0 <= model.v, True) - #self.checkCondition(1 > model.v, True) - #self.checkCondition(1 >= model.v, True) - #self.checkCondition(0 == model.v, None) + # self.checkCondition(0 <= model.v, True) + # self.checkCondition(1 > model.v, True) + # self.checkCondition(1 >= model.v, True) + # self.checkCondition(0 == model.v, None) instance = model.create_instance() # @@ -3015,24 +3239,34 @@ def test_eval_sub_varConditional(self): # is unconstructed! # with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v) > 0, None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v) >= 0, None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v) < 1, None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v) <= 1, None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v) == 0, None) instance = model.create_instance() @@ -3058,24 +3292,34 @@ def test_eval_sub_varConditional_reversed(self): # is unconstructed! # with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(0 < value(model.v), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(0 <= value(model.v), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(1 > value(model.v), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(1 >= value(model.v), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(0 == value(model.v), None) instance = model.create_instance() @@ -3101,16 +3345,22 @@ def test_eval_varConditional(self): # is unconstructed! # with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v > 0), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v >= 0), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(model.v == 0), None) instance = model.create_instance() @@ -3128,16 +3378,22 @@ def test_eval_varConditional_reversed(self): # The value() function generates an exception when the variable is unconstructed! # with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(0 < model.v), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(0 <= model.v), None) with self.assertRaisesRegex( - RuntimeError, r"Cannot access property 'value' on " - r"AbstractScalarVar 'v' before it has been constructed"): + RuntimeError, + r"Cannot access property 'value' on " + r"AbstractScalarVar 'v' before it has been constructed", + ): self.checkCondition(value(0 == model.v), None) instance = model.create_instance() @@ -3153,15 +3409,15 @@ def test_eval_varConditional_reversed(self): class TestPolynomialDegree(unittest.TestCase): - def setUp(self): # This class tests the Pyomo 5.x expression trees def d_fn(model): - return model.c+model.c + return model.c + model.c + self.model = ConcreteModel() self.model.a = Var(initialize=1.0) self.model.b = Var(initialize=2.0) - self.model.c = Param(initialize=0, mutable=True) + self.model.c = Param(initialize=3.0, mutable=True) self.model.d = Param(initialize=d_fn, mutable=True) self.model.e = Param(mutable=True) self.instance = self.model @@ -3216,16 +3472,16 @@ def test_linearsum(self): m.v = Var(A) e = quicksum(m.v[i] for i in A) - self.assertEqual(e.polynomial_degree(), 1) + self.assertIs(e.__class__, LinearExpression) + self.assertEqual(e.polynomial_degree(), 1) - e = quicksum(i*m.v[i] for i in A) - self.assertEqual(e.polynomial_degree(), 1) + e = quicksum(i * m.v[i] for i in A) + self.assertIs(e.__class__, LinearExpression) + self.assertEqual(e.polynomial_degree(), 1) e = quicksum(1 for i in A) - self.assertEqual(polynomial_degree(e), 0) - - e = quicksum((1 for i in A), linear=True) - self.assertTrue(e.__class__ in native_numeric_types) + self.assertIs(e.__class__, int) + self.assertEqual(polynomial_degree(e), 0) def test_relational_ops(self): # @@ -3338,7 +3594,7 @@ def test_nonpolynomial_abs(self): expr2 = self.model.a + self.model.b * abs(self.model.b) self.assertEqual(expr2.polynomial_degree(), None) - expr3 = self.model.a * ( self.model.b + abs(self.model.b) ) + expr3 = self.model.a * (self.model.b + abs(self.model.b)) self.assertEqual(expr3.polynomial_degree(), None) # # Fixing variables should turn intrinsic functions into constants @@ -3372,7 +3628,7 @@ def test_nonpolynomial_pow(self): expr = pow(m.a, m.b) self.assertEqual(expr.polynomial_degree(), None) # - # A power with a constant exponent is not a polynomial + # A power with a constant exponent # m.b.fixed = True self.assertEqual(expr.polynomial_degree(), 2) @@ -3399,17 +3655,17 @@ def test_nonpolynomial_pow(self): expr = pow(m.a, 2) self.assertEqual(expr.polynomial_degree(), 2) - expr = pow(m.a*m.a, 2) + expr = pow(m.a * m.a, 2) self.assertEqual(expr.polynomial_degree(), 4) # # A non-integer exponent is not a polynomial # - expr = pow(m.a*m.a, 2.1) + expr = pow(m.a * m.a, 2.1) self.assertEqual(expr.polynomial_degree(), None) # # A negative exponent is not a polynomial # - expr = pow(m.a*m.a, -1) + expr = pow(m.a * m.a, -1) self.assertEqual(expr.polynomial_degree(), None) # # A nonpolynomial base is not a polynomial if the exponent is nonzero @@ -3418,8 +3674,7 @@ def test_nonpolynomial_pow(self): self.assertEqual(expr.polynomial_degree(), None) expr = pow(2**m.a, 0) - self.assertEqual(expr, 1) - self.assertEqual(as_numeric(expr).polynomial_degree(), 0) + self.assertEqual(expr.polynomial_degree(), 0) # # With an undefined exponent, the polynomial degree is None # @@ -3431,13 +3686,13 @@ def test_Expr_if(self): # # When IF conditional is constant, then polynomial degree is propigated # - expr = Expr_if(1,m.a**3,m.a**2) + expr = Expr_if(1, m.a**3, m.a**2) self.assertEqual(expr.polynomial_degree(), 3) m.a.fixed = True self.assertEqual(expr.polynomial_degree(), 0) m.a.fixed = False - expr = Expr_if(0,m.a**3,m.a**2) + expr = Expr_if(0, m.a**3, m.a**2) self.assertEqual(expr.polynomial_degree(), 2) m.a.fixed = True self.assertEqual(expr.polynomial_degree(), 0) @@ -3445,7 +3700,7 @@ def test_Expr_if(self): # # When IF conditional is variable, then polynomial degree is propagated # - expr = Expr_if(m.a,m.b,m.b**2) + expr = Expr_if(m.a, m.b, m.b**2) self.assertEqual(expr.polynomial_degree(), None) m.a.fixed = True m.a.value = 1 @@ -3455,22 +3710,22 @@ def test_Expr_if(self): # # A constant expression has degree 0 # - expr = Expr_if(m.e,1,0) + expr = Expr_if(m.e, 1, 0) self.assertEqual(expr.polynomial_degree(), 0) # # A non-constant expression has degree if both arguments have the # same degree, as long as the IF is fixed (even if it is not # defined) # - expr = Expr_if(m.e,m.a,0) + expr = Expr_if(m.e, m.a, 0) self.assertEqual(expr.polynomial_degree(), 0) - expr = Expr_if(m.e,5*m.b,1+m.b) + expr = Expr_if(m.e, 5 * m.b, 1 + m.b) self.assertEqual(expr.polynomial_degree(), 1) # # A non-constant expression has degree None because # m.e is an uninitialized parameter # - expr = Expr_if(m.e,m.b,0) + expr = Expr_if(m.e, m.b, 0) self.assertEqual(expr.polynomial_degree(), None) @@ -3478,10 +3733,9 @@ def test_Expr_if(self): # TODO: Confirm that this checks for entangled expressions. # class EntangledExpressionErrors(unittest.TestCase): - def test_sumexpr_add_entangled(self): x = Var() - e = x*2 + 1 + e = x * 2 + 1 e + 1 def test_entangled_test1(self): @@ -3493,27 +3747,26 @@ def test_entangled_test1(self): e1 = self.m.a + self.m.b - #print(e1) - #print(e1_) - #print("--") + # print(e1) + # print(e1_) + # print("--") e2 = self.m.c + e1 - #print(e1) - #print(e1_) - #print(e2) - #print(e2_) - #print("--") + # print(e1) + # print(e1_) + # print(e2) + # print(e2_) + # print("--") e3 = self.m.d + e1 - self.assertEqual( e1.nargs(), 2) - self.assertEqual( e2.nargs(), 3) - self.assertEqual( e3.nargs(), 2) + self.assertEqual(e1.nargs(), 2) + self.assertEqual(e2.nargs(), 3) + self.assertEqual(e3.nargs(), 3) - self.assertNotEqual( id(e2.arg(2)), id(e3.arg(1).arg(1))) + self.assertNotEqual(id(e2.arg(2)), id(e3.arg(2))) class TestSummationExpression(unittest.TestCase): - def setUp(self): # This class tests the Pyomo 5.x expression trees @@ -3529,69 +3782,151 @@ def tearDown(self): def test_summation1(self): e = sum_product(self.m.a) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), LinearExpression) - self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) - self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) - self.assertEqual(e.size(), 16) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, self.m.a[1])), + MonomialTermExpression((1, self.m.a[2])), + MonomialTermExpression((1, self.m.a[3])), + MonomialTermExpression((1, self.m.a[4])), + MonomialTermExpression((1, self.m.a[5])), + ] + ), + ) def test_summation2(self): e = sum_product(self.m.p, self.m.a) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), LinearExpression) - self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) - self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) - self.assertEqual(e.size(), 16) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((self.m.p[1], self.m.a[1])), + MonomialTermExpression((self.m.p[2], self.m.a[2])), + MonomialTermExpression((self.m.p[3], self.m.a[3])), + MonomialTermExpression((self.m.p[4], self.m.a[4])), + MonomialTermExpression((self.m.p[5], self.m.a[5])), + ] + ), + ) def test_summation3(self): e = sum_product(self.m.q, self.m.a) - self.assertEqual( e(), 75 ) - self.assertIs(type(e), LinearExpression) - self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) - self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) - self.assertEqual(e.size(), 16) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((3, self.m.a[1])), + MonomialTermExpression((3, self.m.a[2])), + MonomialTermExpression((3, self.m.a[3])), + MonomialTermExpression((3, self.m.a[4])), + MonomialTermExpression((3, self.m.a[5])), + ] + ), + ) def test_summation4(self): e = sum_product(self.m.a, self.m.b) - self.assertEqual( e(), 250 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( id(self.m.a[1]), id(e.arg(0).arg(0)) ) - self.assertEqual( id(self.m.a[2]), id(e.arg(1).arg(0)) ) - self.assertEqual(e.size(), 16) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + ProductExpression((self.m.a[1], self.m.b[1])), + ProductExpression((self.m.a[2], self.m.b[2])), + ProductExpression((self.m.a[3], self.m.b[3])), + ProductExpression((self.m.a[4], self.m.b[4])), + ProductExpression((self.m.a[5], self.m.b[5])), + ] + ), + ) def test_summation5(self): e = sum_product(self.m.b, denom=self.m.a) - self.assertEqual( e(), 10 ) - self.assertIs(type(e), SumExpression) - self.assertEqual(e.size(), 16) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + DivisionExpression((self.m.b[1], self.m.a[1])), + DivisionExpression((self.m.b[2], self.m.a[2])), + DivisionExpression((self.m.b[3], self.m.a[3])), + DivisionExpression((self.m.b[4], self.m.a[4])), + DivisionExpression((self.m.b[5], self.m.a[5])), + ] + ), + ) def test_summation6(self): e = sum_product(self.m.a, denom=self.m.p) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), LinearExpression) - self.assertEqual( id(self.m.a[1]), id(e.linear_vars[0]) ) - self.assertEqual( id(self.m.a[2]), id(e.linear_vars[1]) ) - self.assertEqual(e.size(), 26) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[1])), self.m.a[1]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[2])), self.m.a[2]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[3])), self.m.a[3]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[4])), self.m.a[4]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[5])), self.m.a[5]) + ), + ] + ), + ) def test_summation7(self): e = sum_product(self.m.p, self.m.q, index=self.m.I) - self.assertEqual( e(), 15 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( e.nargs(), 5) - self.assertEqual(e.size(), 16) + assertExpressionsEqual( + self, + e, + NPV_SumExpression( + [ + NPV_ProductExpression((self.m.p[1], 3)), + NPV_ProductExpression((self.m.p[2], 3)), + NPV_ProductExpression((self.m.p[3], 3)), + NPV_ProductExpression((self.m.p[4], 3)), + NPV_ProductExpression((self.m.p[5], 3)), + ] + ), + ) def test_summation_compression(self): e1 = sum_product(self.m.a) e2 = sum_product(self.m.b) - e = e1+e2 - self.assertEqual( e(), 75 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( e.nargs(), 2) - self.assertEqual(e.size(), 33) + e = e1 + e2 + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, self.m.a[1])), + MonomialTermExpression((1, self.m.a[2])), + MonomialTermExpression((1, self.m.a[3])), + MonomialTermExpression((1, self.m.a[4])), + MonomialTermExpression((1, self.m.a[5])), + MonomialTermExpression((1, self.m.b[1])), + MonomialTermExpression((1, self.m.b[2])), + MonomialTermExpression((1, self.m.b[3])), + MonomialTermExpression((1, self.m.b[4])), + MonomialTermExpression((1, self.m.b[5])), + ] + ), + ) class TestSumExpression(unittest.TestCase): - def setUp(self): # This class tests the Pyomo 5.x expression trees @@ -3605,88 +3940,170 @@ def setUp(self): def tearDown(self): self.m = None + def test_deprecation(self): + with LoggingIntercept() as LOG: + e = quicksum((self.m.a[i] for i in self.m.a), linear=False) + self.assertRegex( + LOG.getvalue().replace('\n', ' '), + r"DEPRECATED: The quicksum\(linear=...\) argument is deprecated " + r"and ignored.", + ) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, self.m.a[1])), + MonomialTermExpression((1, self.m.a[2])), + MonomialTermExpression((1, self.m.a[3])), + MonomialTermExpression((1, self.m.a[4])), + MonomialTermExpression((1, self.m.a[5])), + ] + ), + ) + def test_summation1(self): - e = quicksum((self.m.a[i] for i in self.m.a), linear=False) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( id(self.m.a[1]), id(e.arg(0)) ) - self.assertEqual( id(self.m.a[2]), id(e.arg(1)) ) - self.assertEqual(e.size(), 6) - # - e = quicksum(self.m.a[i] for i in self.m.a) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), LinearExpression) + e = quicksum((self.m.a[i] for i in self.m.a)) + self.assertEqual(e(), 25) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((1, self.m.a[1])), + MonomialTermExpression((1, self.m.a[2])), + MonomialTermExpression((1, self.m.a[3])), + MonomialTermExpression((1, self.m.a[4])), + MonomialTermExpression((1, self.m.a[5])), + ] + ), + ) def test_summation2(self): - e = quicksum((self.m.p[i]*self.m.a[i] for i in self.m.a), linear=False) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( id(self.m.a[1]), id(e.arg(0).arg(1)) ) - self.assertEqual( id(self.m.a[2]), id(e.arg(1).arg(1)) ) - self.assertEqual(e.size(), 16) - # - e = quicksum(self.m.p[i]*self.m.a[i] for i in self.m.a) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), LinearExpression) + e = quicksum(self.m.p[i] * self.m.a[i] for i in self.m.a) + self.assertEqual(e(), 25) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((self.m.p[1], self.m.a[1])), + MonomialTermExpression((self.m.p[2], self.m.a[2])), + MonomialTermExpression((self.m.p[3], self.m.a[3])), + MonomialTermExpression((self.m.p[4], self.m.a[4])), + MonomialTermExpression((self.m.p[5], self.m.a[5])), + ] + ), + ) def test_summation3(self): - e = quicksum((self.m.q[i]*self.m.a[i] for i in self.m.a), linear=False) - self.assertEqual( e(), 75 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( id(self.m.a[1]), id(e.arg(0).arg(1)) ) - self.assertEqual( id(self.m.a[2]), id(e.arg(1).arg(1)) ) - self.assertEqual(e.size(), 16) - # - e = quicksum(self.m.q[i]*self.m.a[i] for i in self.m.a) - self.assertEqual( e(), 75 ) - self.assertIs(type(e), LinearExpression) + e = quicksum(self.m.q[i] * self.m.a[i] for i in self.m.a) + self.assertEqual(e(), 75) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression((3, self.m.a[1])), + MonomialTermExpression((3, self.m.a[2])), + MonomialTermExpression((3, self.m.a[3])), + MonomialTermExpression((3, self.m.a[4])), + MonomialTermExpression((3, self.m.a[5])), + ] + ), + ) def test_summation4(self): - e = quicksum(self.m.a[i]*self.m.b[i] for i in self.m.a) - self.assertEqual( e(), 250 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( id(self.m.a[1]), id(e.arg(0).arg(0)) ) - self.assertEqual( id(self.m.a[2]), id(e.arg(1).arg(0)) ) - self.assertEqual(e.size(), 16) + e = quicksum(self.m.a[i] * self.m.b[i] for i in self.m.a) + self.assertEqual(e(), 250) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + ProductExpression((self.m.a[1], self.m.b[1])), + ProductExpression((self.m.a[2], self.m.b[2])), + ProductExpression((self.m.a[3], self.m.b[3])), + ProductExpression((self.m.a[4], self.m.b[4])), + ProductExpression((self.m.a[5], self.m.b[5])), + ] + ), + ) def test_summation5(self): - e = quicksum(self.m.b[i]/self.m.a[i] for i in self.m.a) - self.assertEqual( e(), 10 ) - self.assertIs(type(e), SumExpression) - self.assertEqual(e.size(), 16) + e = quicksum(self.m.b[i] / self.m.a[i] for i in self.m.a) + self.assertEqual(e(), 10) + assertExpressionsEqual( + self, + e, + SumExpression( + [ + DivisionExpression((self.m.b[1], self.m.a[1])), + DivisionExpression((self.m.b[2], self.m.a[2])), + DivisionExpression((self.m.b[3], self.m.a[3])), + DivisionExpression((self.m.b[4], self.m.a[4])), + DivisionExpression((self.m.b[5], self.m.a[5])), + ] + ), + ) def test_summation6(self): - e = quicksum((self.m.a[i]/self.m.p[i] for i in self.m.a), linear=False) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( id(self.m.a[1]), id(e.arg(0).arg(1)) ) - self.assertEqual( id(self.m.a[2]), id(e.arg(1).arg(1)) ) - self.assertEqual(e.size(), 26) - # - e = quicksum(self.m.a[i]/self.m.p[i] for i in self.m.a) - self.assertEqual( e(), 25 ) - self.assertIs(type(e), LinearExpression) + e = quicksum(self.m.a[i] / self.m.p[i] for i in self.m.a) + self.assertEqual(e(), 25) + assertExpressionsEqual( + self, + e, + LinearExpression( + [ + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[1])), self.m.a[1]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[2])), self.m.a[2]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[3])), self.m.a[3]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[4])), self.m.a[4]) + ), + MonomialTermExpression( + (NPV_DivisionExpression((1, self.m.p[5])), self.m.a[5]) + ), + ] + ), + ) def test_summation7(self): - e = quicksum((self.m.p[i]*self.m.q[i] for i in self.m.I), linear=False) - self.assertEqual( e(), 15 ) - self.assertIs(type(e), SumExpression) - self.assertEqual( e.nargs(), 5) - self.assertEqual(e.size(), 16) - # - e = quicksum(self.m.p[i]*self.m.q[i] for i in self.m.I) - self.assertEqual( e(), 15 ) - self.assertIs(type(e), SumExpression) - + e = quicksum((self.m.p[i] * self.m.q[i] for i in self.m.I), linear=False) + self.assertEqual(e(), 15) + assertExpressionsEqual( + self, + e, + NPV_SumExpression( + [ + NPV_ProductExpression((self.m.p[1], 3)), + NPV_ProductExpression((self.m.p[2], 3)), + NPV_ProductExpression((self.m.p[3], 3)), + NPV_ProductExpression((self.m.p[4], 3)), + NPV_ProductExpression((self.m.p[5], 3)), + ] + ), + ) + def test_quicksum_reject_noniterable(self): with LoggingIntercept() as LOG: with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): quicksum(1) - self.assertEqual(LOG.getvalue(), 'The argument `args` to quicksum() is not iterable!\n') + self.assertEqual( + LOG.getvalue(), 'The argument `args` to quicksum() is not iterable!\n' + ) def test_quicksum_exception_exposure(self): ex0 = Exception() - def f(): raise ex0 + + def f(): + raise ex0 with self.assertRaises(Exception) as cm: quicksum((f() for i in [1, 2, 3]), linear=None) @@ -3706,7 +4123,6 @@ def f(): raise ex0 class TestCloneExpression(unittest.TestCase): - def setUp(self): # This class tests the Pyomo 5.x expression trees @@ -3729,7 +4145,7 @@ def test_numeric(self): # total = counter.count - start self.assertEqual(total, 2) - + def test_Expression(self): # # Identify variables when there are duplicates @@ -3737,19 +4153,19 @@ def test_Expression(self): m = ConcreteModel() m.a = Var(initialize=1) m.b = Var(initialize=2) - m.e = Expression(expr=3*m.a) - m.E = Expression([0,1], initialize={0:3*m.a, 1:4*m.b}) + m.e = Expression(expr=3 * m.a) + m.E = Expression([0, 1], initialize={0: 3 * m.a, 1: 4 * m.b}) with clone_counter() as counter: start = counter.count - expr1 = m.e + m.E[1] + expr1 = m.e + m.E[1] expr2 = expr1.clone() - self.assertEqual( expr1(), 11 ) - self.assertEqual( expr2(), 11 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 11) + self.assertEqual(expr2(), 11) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0)), id(expr2.arg(0))) + self.assertEqual(id(expr1.arg(1)), id(expr2.arg(1))) # total = counter.count - start self.assertEqual(total, 1) @@ -3761,19 +4177,19 @@ def test_ExpressionX(self): m = ConcreteModel() m.a = Var(initialize=1) m.b = Var(initialize=2) - m.e = Expression(expr=3*m.a) - m.E = Expression([0,1], initialize={0:3*m.a, 1:4*m.b}) + m.e = Expression(expr=3 * m.a) + m.E = Expression([0, 1], initialize={0: 3 * m.a, 1: 4 * m.b}) with clone_counter() as counter: start = counter.count - expr1 = m.e + m.E[1] + expr1 = m.e + m.E[1] expr2 = copy.deepcopy(expr1) - self.assertEqual( expr1(), 11 ) - self.assertEqual( expr2(), 11 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertNotEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertNotEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 11) + self.assertEqual(expr2(), 11) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertNotEqual(id(expr1.arg(0)), id(expr2.arg(0))) + self.assertNotEqual(id(expr1.arg(1)), id(expr2.arg(1))) # total = counter.count - start self.assertEqual(total, 0) @@ -3783,45 +4199,45 @@ def test_SumExpression(self): start = counter.count expr1 = self.m.a + self.m.b expr2 = expr1.clone() - self.assertEqual( expr1(), 15 ) - self.assertEqual( expr2(), 15 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 15) + self.assertEqual(expr2(), 15) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertIs(expr1.arg(0).arg(1), expr2.arg(0).arg(1)) + self.assertIs(expr1.arg(1).arg(1), expr2.arg(1).arg(1)) expr1 += self.m.b - self.assertEqual( expr1(), 25 ) - self.assertEqual( expr2(), 15 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) - self.assertEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 25) + self.assertEqual(expr2(), 15) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertIs(expr1.arg(0).arg(1), expr2.arg(0).arg(1)) + self.assertIs(expr1.arg(1).arg(1), expr2.arg(1).arg(1)) # total = counter.count - start self.assertEqual(total, 1) - + def test_SumExpressionX(self): with clone_counter() as counter: start = counter.count expr1 = self.m.a + self.m.b expr2 = copy.deepcopy(expr1) - self.assertEqual( expr1(), 15 ) - self.assertEqual( expr2(), 15 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertNotEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertNotEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 15) + self.assertEqual(expr2(), 15) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertNotEqual(id(expr1.arg(0)), id(expr2.arg(0))) + self.assertNotEqual(id(expr1.arg(1)), id(expr2.arg(1))) expr1 += self.m.b - self.assertEqual( expr1(), 25 ) - self.assertEqual( expr2(), 15 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertNotEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) - self.assertNotEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 25) + self.assertEqual(expr2(), 15) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertNotEqual(id(expr1.arg(1)), id(expr2.arg(1))) + self.assertNotEqual(id(expr1.arg(1)), id(expr2.arg(1))) # total = counter.count - start self.assertEqual(total, 0) - + def test_SumExpressionY(self): self.m = ConcreteModel() A = range(5) @@ -3832,55 +4248,55 @@ def test_SumExpressionY(self): start = counter.count expr1 = quicksum(self.m.a[i] for i in self.m.a) expr2 = copy.deepcopy(expr1) - self.assertEqual( expr1(), 25 ) - self.assertEqual( expr2(), 25 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertNotEqual( id(expr1.linear_vars[0]), id(expr2.linear_vars[0]) ) - self.assertNotEqual( id(expr1.linear_vars[1]), id(expr2.linear_vars[1]) ) + self.assertEqual(expr1(), 25) + self.assertEqual(expr2(), 25) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertNotEqual(id(expr1.linear_vars[0]), id(expr2.linear_vars[0])) + self.assertNotEqual(id(expr1.linear_vars[1]), id(expr2.linear_vars[1])) expr1 += self.m.b - self.assertEqual( expr1(), 35 ) - self.assertEqual( expr2(), 25 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) + self.assertEqual(expr1(), 35) + self.assertEqual(expr2(), 25) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) # total = counter.count - start self.assertEqual(total, 0) - + def test_ProductExpression_mult(self): with clone_counter() as counter: start = counter.count # expr1 = self.m.a * self.m.b expr2 = expr1.clone() - self.assertEqual( expr1(), 50 ) - self.assertEqual( expr2(), 50 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 50) + self.assertEqual(expr2(), 50) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertEqual(id(expr1._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0)), id(expr2.arg(0))) + self.assertEqual(id(expr1.arg(1)), id(expr2.arg(1))) expr1 *= self.m.b - self.assertEqual( expr1(), 500 ) - self.assertEqual( expr2(), 50 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) - self.assertEqual( id(expr1.arg(0).arg(0)), id(expr2.arg(0)) ) - self.assertEqual( id(expr1.arg(0).arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 500) + self.assertEqual(expr2(), 50) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0)._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(1)), id(expr2.arg(1))) + self.assertEqual(id(expr1.arg(0).arg(0)), id(expr2.arg(0))) + self.assertEqual(id(expr1.arg(0).arg(1)), id(expr2.arg(1))) expr1 = self.m.a * (self.m.b + self.m.a) expr2 = expr1.clone() - self.assertEqual( expr1(), 75 ) - self.assertEqual( expr2(), 75 ) + self.assertEqual(expr1(), 75) + self.assertEqual(expr2(), 75) # Note that since one of the args is a sum expression, the _args_ # in the sum is a *list*, which will be duplicated by deepcopy. # This will cause the two args in the Product to be different. - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertNotEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0)), id(expr2.arg(0))) + self.assertNotEqual(id(expr1.arg(1)), id(expr2.arg(1))) # total = counter.count - start self.assertEqual(total, 2) @@ -3891,34 +4307,34 @@ def test_ProductExpression_div(self): # expr1 = self.m.a / self.m.b expr2 = expr1.clone() - self.assertEqual( expr1(), 0.5 ) - self.assertEqual( expr2(), 0.5 ) - self.assertNotEqual( id(expr1), id(expr2) ) + self.assertEqual(expr1(), 0.5) + self.assertEqual(expr2(), 0.5) + self.assertNotEqual(id(expr1), id(expr2)) # Note: _args_ are the same because tuples are not copied - self.assertEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertEqual(id(expr1._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0)), id(expr2.arg(0))) + self.assertEqual(id(expr1.arg(1)), id(expr2.arg(1))) expr1 /= self.m.b - self.assertEqual( expr1(), 0.05 ) - self.assertEqual( expr2(), 0.5 ) - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0).arg(0)), id(expr2.arg(0)) ) - self.assertEqual( id(expr1.arg(0).arg(1)), id(expr2.arg(1)) ) + self.assertEqual(expr1(), 0.05) + self.assertEqual(expr2(), 0.5) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0)._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0).arg(0)), id(expr2.arg(0))) + self.assertEqual(id(expr1.arg(0).arg(1)), id(expr2.arg(1))) expr1 = self.m.a / (self.m.b + self.m.a) expr2 = expr1.clone() - self.assertEqual( expr1(), 1/3. ) - self.assertEqual( expr2(), 1/3. ) + self.assertEqual(expr1(), 1 / 3.0) + self.assertEqual(expr2(), 1 / 3.0) # Note that since one of the args is a sum expression, the _args_ # in the sum is a *list*, which will be duplicated by deepcopy. # This will cause the two args in the Product to be different. - self.assertNotEqual( id(expr1), id(expr2) ) - self.assertNotEqual( id(expr1._args_), id(expr2._args_) ) - self.assertEqual( id(expr1.arg(0)), id(expr2.arg(0)) ) - self.assertNotEqual( id(expr1.arg(1)), id(expr2.arg(1)) ) + self.assertNotEqual(id(expr1), id(expr2)) + self.assertNotEqual(id(expr1._args_), id(expr2._args_)) + self.assertEqual(id(expr1.arg(0)), id(expr2.arg(0))) + self.assertNotEqual(id(expr1.arg(1)), id(expr2.arg(1))) # total = counter.count - start self.assertEqual(total, 2) @@ -3972,12 +4388,9 @@ def test_productOfExpressions(self): self.assertEqual(expr1.arg(1).nargs(), 2) self.assertEqual(expr2.arg(1).nargs(), 2) - self.assertIs( expr1.arg(0).arg(0), - expr2.arg(0).arg(0) ) - self.assertIs( expr1.arg(0).arg(1), - expr2.arg(0).arg(1) ) - self.assertIs( expr1.arg(1).arg(0), - expr2.arg(1).arg(0) ) + self.assertIs(expr1.arg(0).arg(0).arg(1), expr2.arg(0).arg(0).arg(1)) + self.assertIs(expr1.arg(0).arg(1).arg(1), expr2.arg(0).arg(1).arg(1)) + self.assertIs(expr1.arg(1).arg(0).arg(1), expr2.arg(1).arg(0).arg(1)) expr1 *= self.m.b self.assertEqual(expr1(), 1500) @@ -4016,11 +4429,11 @@ def test_productOfExpressions_div(self): self.assertEqual(expr1.arg(1).nargs(), 2) self.assertEqual(expr2.arg(1).nargs(), 2) - self.assertIs( expr1.arg(0).arg(0), expr2.arg(0).arg(0) ) - self.assertIs( expr1.arg(0).arg(1), expr2.arg(0).arg(1) ) + self.assertIs(expr1.arg(0).arg(0).arg(1), expr2.arg(0).arg(0).arg(1)) + self.assertIs(expr1.arg(0).arg(1).arg(1), expr2.arg(0).arg(1).arg(1)) expr1 /= self.m.b - self.assertAlmostEqual(expr1(), .15) + self.assertAlmostEqual(expr1(), 0.15) self.assertAlmostEqual(expr2(), 1.5) self.assertNotEqual(id(expr1.arg(0)), id(expr2.arg(0))) self.assertNotEqual(id(expr1.arg(1)), id(expr2.arg(1))) @@ -4040,23 +4453,14 @@ def test_Expr_if(self): # expr1 = Expr_if(IF=self.m.a + self.m.b < 20, THEN=self.m.a, ELSE=self.m.b) expr2 = expr1.clone() - self.assertNotEqual(id(expr1), id(expr2)) - self.assertEqual(expr1(), value(self.m.a)) - self.assertEqual(expr2(), value(self.m.a)) - self.assertNotEqual(id(expr1._if), id(expr2._if)) - self.assertEqual(id(expr1._then), id(expr2._then)) - self.assertEqual(id(expr1._else), id(expr2._else)) - self.assertEqual(expr1._if(), expr2._if()) - self.assertEqual(expr1._then(), expr2._then()) - self.assertEqual(expr1._else(), expr2._else()) - # - total = counter.count - start - self.assertEqual(total, 1) + assertExpressionsStructurallyEqual(self, expr1, expr2) + self.assertIsNot(expr1, expr2) + self.assertIsNot(expr1.arg(0), expr2.arg(0)) def test_LinearExpression(self): m = ConcreteModel() m.x = Var() - m.y = Var([1,2]) + m.y = Var([1, 2]) e = LinearExpression() f = e.clone() self.assertIsNot(e, f) @@ -4070,7 +4474,8 @@ def test_LinearExpression(self): self.assertEqual(f.linear_vars, []) e = LinearExpression( - constant=5, linear_vars=[m.x, m.y[1]], linear_coefs=[10, 20]) + constant=5, linear_vars=[m.x, m.y[1]], linear_coefs=[10, 20] + ) f = e.clone() self.assertIsNot(e, f) self.assertIsNot(e.linear_coefs, f.linear_coefs) @@ -4088,12 +4493,12 @@ def test_getitem(self): start = counter.count # m = ConcreteModel() - m.I = RangeSet(1,9) - m.x = Var(m.I, initialize=lambda m,i: i+1) - m.P = Param(m.I, initialize=lambda m,i: 10-i, mutable=True) + m.I = RangeSet(1, 9) + m.x = Var(m.I, initialize=lambda m, i: i + 1) + m.P = Param(m.I, initialize=lambda m, i: 10 - i, mutable=True) t = IndexTemplate(m.I) - e = m.x[t+m.P[t+1]] + 3 + e = m.x[t + m.P[t + 1]] + 3 e_ = e.clone() self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e_)) # @@ -4108,7 +4513,7 @@ def test_other(self): model = ConcreteModel() model.a = Var() model.x = ExternalFunction(library='foo.so', function='bar') - e = model.x(2*model.a, 1, "foo", []) + e = model.x(2 * model.a, 1, "foo", []) e_ = e.clone() self.assertEqual(type(e_), type(e)) self.assertEqual(type(e_.arg(0)), type(e.arg(0))) @@ -4156,12 +4561,12 @@ def test_sin(self): # PotentiallyVariable - Expr contains one or more variables # class TestIsFixedIsConstant(unittest.TestCase): - def setUp(self): # This class tests the Pyomo 5.x expression trees def d_fn(model): - return model.c+model.c + return model.c + model.c + self.model = AbstractModel() self.model.a = Var(initialize=1.0) self.model.b = Var(initialize=2.0) @@ -4233,7 +4638,7 @@ def test_linear_sum(self): with linear_expression() as e: e += 1 - self.assertEqual(len(e.linear_vars), 0) + self.assertIs(e.__class__, NPV_SumExpression) self.assertEqual(e.is_fixed(), True) def test_simple_product(self): @@ -4335,17 +4740,17 @@ def test_polynomial_external_func(self): model.p = Param(initialize=1, mutable=True) model.x = ExternalFunction(library='foo.so', function='bar') - expr = model.x(2*model.a, 1, "foo", []) + expr = model.x(2 * model.a, 1, "foo", []) self.assertEqual(expr.polynomial_degree(), None) - expr = model.x(2*model.p, 1, "foo", []) + expr = model.x(2 * model.p, 1, "foo", []) self.assertEqual(expr.polynomial_degree(), 0) def test_getitem(self): m = ConcreteModel() - m.I = RangeSet(1,9) - m.x = Var(m.I, initialize=lambda m,i: i+1) - m.P = Param(m.I, initialize=lambda m,i: 10-i, mutable=True) + m.I = RangeSet(1, 9) + m.x = Var(m.I, initialize=lambda m, i: i + 1) + m.P = Param(m.I, initialize=lambda m, i: 10 - i, mutable=True) t = IndexTemplate(m.I) e = m.x[t] @@ -4353,7 +4758,7 @@ def test_getitem(self): self.assertEqual(e.is_potentially_variable(), True) self.assertEqual(e.is_fixed(), False) - e = m.x[t+m.P[t+1]] + 3 + e = m.x[t + m.P[t + 1]] + 3 self.assertEqual(e.is_constant(), False) self.assertEqual(e.is_potentially_variable(), True) self.assertEqual(e.is_fixed(), False) @@ -4365,14 +4770,14 @@ def test_getitem(self): self.assertEqual(e.is_potentially_variable(), True) self.assertEqual(e.is_fixed(), True) - e = m.x[t+m.P[t+1]] + 3 + e = m.x[t + m.P[t + 1]] + 3 self.assertEqual(e.is_constant(), False) self.assertEqual(e.is_potentially_variable(), True) self.assertEqual(e.is_fixed(), True) - - e = m.P[t+1] + 3 + e = m.P[t + 1] + 3 self.assertEqual(e.is_constant(), False) + self.assertEqual(m.P[t + 1].is_potentially_variable(), False) self.assertEqual(e.is_potentially_variable(), False) self.assertEqual(e.is_fixed(), True) @@ -4390,7 +4795,7 @@ def test_nonpolynomial_abs(self): self.assertEqual(expr2.is_constant(), False) self.assertEqual(expr2.is_potentially_variable(), True) - expr3 = self.instance.a * ( self.instance.b + abs(self.instance.b) ) + expr3 = self.instance.a * (self.instance.b + abs(self.instance.b)) self.assertEqual(expr3.is_fixed(), False) self.assertEqual(expr3.is_constant(), False) self.assertEqual(expr3.is_potentially_variable(), True) @@ -4477,17 +4882,17 @@ def test_nonpolynomial_pow(self): self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) - expr = pow(m.a*m.a, 2) + expr = pow(m.a * m.a, 2) self.assertEqual(expr.is_fixed(), False) self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) - expr = pow(m.a*m.a, 2.1) + expr = pow(m.a * m.a, 2.1) self.assertEqual(expr.is_fixed(), False) self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) - expr = pow(m.a*m.a, -1) + expr = pow(m.a * m.a, -1) self.assertEqual(expr.is_fixed(), False) self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) @@ -4499,14 +4904,13 @@ def test_nonpolynomial_pow(self): expr = pow(2**m.a, 0) self.assertEqual(is_fixed(expr), True) - self.assertEqual(is_constant(expr), True) - self.assertEqual(expr, 1) - self.assertEqual(as_numeric(expr).polynomial_degree(), 0) + self.assertEqual(is_constant(expr), False) + self.assertEqual(is_potentially_variable(expr), True) def test_Expr_if(self): m = self.instance - expr = Expr_if(1,m.a,m.e) + expr = Expr_if(1, m.a, m.e) self.assertEqual(expr.is_fixed(), False) self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) @@ -4516,19 +4920,19 @@ def test_Expr_if(self): self.assertEqual(expr.is_potentially_variable(), True) m.a.fixed = False - expr = Expr_if(0,m.a,m.e) + expr = Expr_if(0, m.a, m.e) self.assertEqual(expr.is_fixed(), True) self.assertEqual(expr.is_constant(), True) # BUG - #self.assertEqual(expr.is_potentially_variable(), False) + # self.assertEqual(expr.is_potentially_variable(), False) m.a.fixed = True self.assertEqual(expr.is_fixed(), True) self.assertEqual(expr.is_constant(), True) # BUG - #self.assertEqual(expr.is_potentially_variable(), False) + # self.assertEqual(expr.is_potentially_variable(), False) m.a.fixed = False - expr = Expr_if(m.a,m.b,m.b) + expr = Expr_if(m.a, m.b, m.b) self.assertEqual(expr.is_fixed(), False) self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) @@ -4542,7 +4946,7 @@ def test_LinearExpr(self): m = self.instance expr = m.a + m.b - self.assertIs(type(expr), SumExpression) + self.assertIs(type(expr), LinearExpression) self.assertEqual(expr.is_fixed(), False) self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) @@ -4565,7 +4969,9 @@ def test_LinearExpr(self): m.a.unfix() expr -= m.a - self.assertEqual(expr.is_fixed(), False) # With a simple tree, the terms do not cancel + self.assertEqual( + expr.is_fixed(), False + ) # With a simple tree, the terms do not cancel self.assertEqual(expr.is_constant(), False) self.assertEqual(expr.is_potentially_variable(), True) @@ -4589,7 +4995,7 @@ def test_expression(self): self.assertEqual(e.is_potentially_variable(), True) self.assertEqual(is_potentially_variable(e), True) - e = m.x**2/(m.x + 1) + e = m.x**2 / (m.x + 1) self.assertEqual(e.is_potentially_variable(), True) self.assertEqual(is_potentially_variable(e), True) @@ -4605,123 +5011,112 @@ def test_external_func(self): self.assertEqual(e.is_potentially_variable(), False) -# NOTE: These are fairly weak coverage tests. +# NOTE: These are fairly weak coverage tests. # It's probably worth confirming the final linear expression that is generated. class TestLinearExpression(unittest.TestCase): - def test_init(self): m = ConcreteModel() m.x = Var() m.y = Var() - e = LinearExpression( - constant=5, linear_vars=[m.x, m.y], linear_coefs=[2,3]) - self.assertEqual(e._args_cache_, []) + + e = LinearExpression(constant=5, linear_vars=[m.x, m.y], linear_coefs=[2, 3]) self.assertEqual(e.constant, 5) self.assertEqual(e.linear_vars, [m.x, m.y]) self.assertEqual(e.linear_coefs, [2, 3]) - args = [10, - MonomialTermExpression((4, m.y)), - MonomialTermExpression((5, m.x))] + f = LinearExpression([5, 2 * m.x, 3 * m.y]) + self.assertEqual(e.constant, 5) + self.assertEqual(e.linear_vars, [m.x, m.y]) + self.assertEqual(e.linear_coefs, [2, 3]) + + assertExpressionsEqual(self, e, f) + + args = [10, MonomialTermExpression((4, m.y)), MonomialTermExpression((5, m.x))] with LoggingIntercept() as OUT: e = LinearExpression(args) self.assertEqual(OUT.getvalue(), "") - self.assertEqual(e._args_cache_, args) + self.assertIs(e._args_, args) self.assertEqual(e.constant, 10) self.assertEqual(e.linear_vars, [m.y, m.x]) self.assertEqual(e.linear_coefs, [4, 5]) - with LoggingIntercept() as OUT: - e = LinearExpression([20, 6, 7, m.x, m.y]) - self.assertIn("LinearExpression has been updated to expect args= " - "to be a constant followed by MonomialTermExpressions", - OUT.getvalue().replace("\n", " ")) - self.assertIsNotNone(e._args_cache_) - self.assertEqual(len(e._args_cache_), 3) - self.assertEqual(e._args_cache_[0], 20) - self.assertIs(e._args_cache_[1].__class__, MonomialTermExpression) - self.assertEqual(e._args_cache_[1].args, (6, m.x)) - self.assertEqual(e._args_cache_[2].args, (7, m.y)) - self.assertEqual(e.constant, 20) - self.assertEqual(e.linear_vars, [m.x, m.y]) - self.assertEqual(e.linear_coefs, [6, 7]) - - with LoggingIntercept() as OUT: - e = LinearExpression([20, 6, 7, 8, m.x, m.y, m.x]) - self.assertIn("LinearExpression has been updated to expect args= " - "to be a constant followed by MonomialTermExpressions", - OUT.getvalue().replace("\n", " ")) - self.assertIsNotNone(e._args_cache_) - self.assertEqual(len(e._args_cache_), 4) - self.assertEqual(e._args_cache_[0], 20) - self.assertIs(e._args_cache_[1].__class__, MonomialTermExpression) - self.assertEqual(e._args_cache_[1].args, (6, m.x)) - self.assertEqual(e._args_cache_[2].args, (7, m.y)) - self.assertEqual(e._args_cache_[3].args, (8, m.x)) - self.assertEqual(e.constant, 20) - self.assertEqual(e.linear_vars, [m.x, m.y, m.x]) - self.assertEqual(e.linear_coefs, [6, 7, 8]) - def test_to_string(self): m = ConcreteModel() m.x = Var() m.y = Var() e = LinearExpression() self.assertEqual(e.to_string(), "0") - e = LinearExpression(constant=0, - linear_coefs=[-1, 1, -2, 2], - linear_vars=[m.x, m.y, m.x, m.y]) + e = LinearExpression( + constant=0, linear_coefs=[-1, 1, -2, 2], linear_vars=[m.x, m.y, m.x, m.y] + ) self.assertEqual(e.to_string(), "- x + y - 2*x + 2*y") - e = LinearExpression(constant=10, - linear_coefs=[-1, 1, -2, 2], - linear_vars=[m.x, m.y, m.x, m.y]) + e = LinearExpression( + constant=10, linear_coefs=[-1, 1, -2, 2], linear_vars=[m.x, m.y, m.x, m.y] + ) self.assertEqual(e.to_string(), "10 - x + y - 2*x + 2*y") def test_sum_other(self): m = ConcreteModel() m.v = Var(range(5)) - m.p = Param(mutable=True, initialize=2) - + m.p = Param(mutable=True, initialize=4) + + for arg in (2, m.p): + with linear_expression() as e: + e += arg + self.assertIs(e.__class__, _MutableNPVSumExpression) + e -= arg + self.assertIs(e.__class__, _MutableNPVSumExpression) + + for arg in (m.v[0], m.p * m.v[0]): + with linear_expression() as e: + e += arg + self.assertIs(e.__class__, _MutableLinearExpression) + e -= arg + self.assertIs(e.__class__, _MutableLinearExpression) + + arg = 1 + m.v[0] with linear_expression() as e: - e = e + 2 - self.assertIs(e.__class__, _MutableLinearExpression) - e = e + m.p*(1+m.v[0]) - self.assertIs(e.__class__, _MutableLinearExpression) - e = e + m.v[0] - self.assertIs(e.__class__, _MutableLinearExpression) - - e = 2 + e - self.assertIs(e.__class__, _MutableLinearExpression) - e = m.p*(1+m.v[0]) + e - self.assertIs(e.__class__, _MutableLinearExpression) - e = m.v[0] + e - self.assertIs(e.__class__, _MutableLinearExpression) - - e = e - 2 + e += arg self.assertIs(e.__class__, _MutableLinearExpression) - e = e - m.p(1+m.v[0]) - self.assertIs(e.__class__, _MutableLinearExpression) - e = e - m.v[0] - self.assertIs(e.__class__, _MutableLinearExpression) - - e = 2 - e - self.assertIs(e.__class__, _MutableLinearExpression) - e = m.p*(1+m.v[0]) - e - self.assertIs(e.__class__, _MutableLinearExpression) - e = m.v[0] - e - self.assertIs(e.__class__, _MutableLinearExpression) - - with linear_expression() as e: - e += m.v[0]*m.v[1] - self.assertIs(e.__class__, SumExpression) + e -= arg + self.assertIs(e.__class__, _MutableSumExpression) - with linear_expression() as e: - e = e + m.v[0]*m.v[1] - self.assertIs(e.__class__, SumExpression) - - with linear_expression() as e: - e = m.v[0]*m.v[1] + e - self.assertIs(e.__class__, SumExpression) + for arg in (m.p * (1 + m.v[0]), m.v[0] * m.v[1]): + with linear_expression() as e: + e += arg + self.assertIs(e.__class__, _MutableSumExpression) + self.assertIs(e.args[-1], arg) + + with linear_expression() as e: + e -= arg + self.assertIs(e.__class__, _MutableSumExpression) + self.assertIs(e.args[-1].__class__, NegationExpression) + self.assertIs(e.args[-1].arg(0), arg) + + for arg in ( + 2, + m.p, + m.v[0], + m.p * m.v[0], + 1 + m.v[0], + m.p * (1 + m.v[0]), + m.v[0] * m.v[1], + ): + with linear_expression() as e: + e = e + arg + self.assertIs(e, arg) + + with linear_expression() as e: + e = arg + e + self.assertIs(e, arg) + + with linear_expression() as e: + e = arg - e + self.assertIs(e, arg) + + with linear_expression() as e: + e = e - arg + assertExpressionsEqual(self, e, -arg) def test_mul_other(self): m = ConcreteModel() @@ -4730,50 +5125,63 @@ def test_mul_other(self): with linear_expression() as e: e += 1 - e = 2 * e - self.assertEqual("2", str(e)) - self.assertIs(e.__class__, _MutableLinearExpression) - e = (1+m.v[0]) * e - self.assertEqual("2 + 2*v[0]", str(e)) + self.assertIs(e.__class__, _MutableNPVSumExpression) + self.assertEqual("1", str(e)) + f = 2 * e + self.assertEqual(f, 2) + self.assertIs(e.__class__, NPV_SumExpression) + self.assertIs(f.__class__, int) + + with linear_expression() as e: + e += 1 + m.v[0] self.assertIs(e.__class__, _MutableLinearExpression) - try: - e = m.v[0] * e - self.fail("Expecting ValueError") - except ValueError: - pass + f = e * 2 + self.assertIs(e.__class__, LinearExpression) + self.assertIs(f.__class__, ProductExpression) with linear_expression() as e: e += 1 - e = e * m.p - self.assertEqual("p", str(e)) - self.assertIs(e.__class__, _MutableLinearExpression) + self.assertIs(e.__class__, _MutableNPVSumExpression) + f = e * m.p + self.assertEqual("p", str(f)) + self.assertIs(e.__class__, NPV_SumExpression) + self.assertIs(f, m.p) with linear_expression() as e: e += 1 - e = e * 0 - self.assertEqual(e.constant, 0) - self.assertIs(e.__class__, _MutableLinearExpression) + self.assertIs(e.__class__, _MutableNPVSumExpression) + f = e * 0 + self.assertIs(e.__class__, NPV_SumExpression) + self.assertEqual(f, 0) with linear_expression() as e: e += m.v[0] - e = e * 2 - self.assertEqual("2*v[0]", str(e)) self.assertIs(e.__class__, _MutableLinearExpression) + f = e * 2 + self.assertEqual("v[0]", str(e)) + self.assertEqual("2*v[0]", str(f)) + self.assertIs(e.__class__, LinearExpression) + self.assertIs(f.__class__, MonomialTermExpression) with linear_expression() as e: e += 1 - e *= m.v[0]*m.v[1] + self.assertIs(e.__class__, _MutableNPVSumExpression) + e *= m.v[0] * m.v[1] self.assertIs(e.__class__, ProductExpression) with linear_expression() as e: e += 1 - e = e * (m.v[0]*m.v[1]) - self.assertIs(e.__class__, ProductExpression) + self.assertIs(e.__class__, _MutableNPVSumExpression) + f = e * (m.v[0] * m.v[1]) + self.assertIs(e.__class__, NPV_SumExpression) + self.assertIs(f.__class__, ProductExpression) with linear_expression() as e: e += 1 - e = (m.v[0]*m.v[1]) * e - self.assertIs(e.__class__, ProductExpression) + self.assertIs(e.__class__, _MutableNPVSumExpression) + f = (m.v[0] * m.v[1]) * e + self.assertIs(e.__class__, NPV_SumExpression) + self.assertIs(f.__class__, ProductExpression) def test_div(self): m = ConcreteModel() @@ -4782,23 +5190,23 @@ def test_div(self): with linear_expression() as e: e += m.v[0] + self.assertIs(e.__class__, _MutableLinearExpression) e /= 2 self.assertEqual("0.5*v[0]", str(e)) - self.assertIs(e.__class__, _MutableLinearExpression) + self.assertIs(e.__class__, MonomialTermExpression) with linear_expression() as e: e += m.v[0] + self.assertIs(e.__class__, _MutableLinearExpression) e /= m.p self.assertEqual("1/p*v[0]", str(e)) - self.assertIs(e.__class__, _MutableLinearExpression) + self.assertIs(e.__class__, MonomialTermExpression) with linear_expression() as e: e += 1 - try: - e /= m.v[0] - self.fail("Expected ValueError") - except: - pass + self.assertIs(e.__class__, _MutableNPVSumExpression) + e /= m.v[0] + self.assertIs(e.__class__, DivisionExpression) def test_div_other(self): m = ConcreteModel() @@ -4816,42 +5224,71 @@ def test_div_other(self): with linear_expression() as e: e += 1 e = 1 / e - self.assertEqual("1.0",str(e)) + self.assertEqual("1.0", str(e)) def test_negation_other(self): m = ConcreteModel() m.v = Var(range(5)) with linear_expression() as e: - e = 2 - e - self.assertIs(e.__class__, _MutableLinearExpression) - e = - e + e += 2 + e += m.v[1] self.assertIs(e.__class__, _MutableLinearExpression) + e = -e + self.assertIs(e.__class__, NegationExpression) + self.assertIs(e.arg(0).__class__, LinearExpression) def test_pow_other(self): m = ConcreteModel() m.v = Var(range(5)) + m.p = Param(initialize=5, mutable=True) with linear_expression() as e: e = 2**e - self.assertIs(e.__class__, NPV_PowExpression) - e = m.v[0] + m.v[1] - e = m.v[0]**e - self.assertIs(e.__class__, PowExpression) + self.assertIs(e, 1) + with linear_expression() as e: + e += 2 + e = 2**e + self.assertIs(e, 4) -class TestNonlinearExpression(unittest.TestCase): + with linear_expression() as e: + e += m.p + e = 2**e + assertExpressionsEqual(self, e, NPV_PowExpression((2, m.p))) + + with linear_expression() as e: + e += m.v[0] + m.v[1] + e = m.v[0] ** e + assertExpressionsEqual( + self, + e, + PowExpression( + ( + m.v[0], + LinearExpression( + [ + MonomialTermExpression((1, m.v[0])), + MonomialTermExpression((1, m.v[1])), + ] + ), + ) + ), + ) + +class TestNonlinearExpression(unittest.TestCase): def test_sum_other(self): m = ConcreteModel() m.v = Var(range(5)) with nonlinear_expression() as e: e_ = 2 + m.v[0] - self.assertIs(e_.__class__, SumExpression) + self.assertIs(e_.__class__, LinearExpression) e += e_ self.assertIs(e.__class__, _MutableSumExpression) - self.assertEqual(e.nargs(), 2) + self.assertEqual(e.nargs(), 1) + class TestMinMaxExpression(unittest.TestCase): def test_max_expression(self): @@ -4895,156 +5332,188 @@ def test_min_expression(self): # Test the logic of _decompose_linear_terms # class TestLinearDecomp(unittest.TestCase): - def setUp(self): # # A hack to setup the _LinearExpression.vtypes data # - #try: + # try: # l = LinearExpression() # l._combine_expr(None,None) - #except: + # except: # pass pass def test_numeric(self): - self.assertEqual(list(_decompose_linear_terms(2.0)), [(2.0,None)]) + self.assertEqual(list(_decompose_linear_terms(2.0)), [(2.0, None)]) def test_NPV(self): M = ConcreteModel() M.q = Param(initialize=2) - self.assertEqual(list(_decompose_linear_terms(M.q)), [(M.q,None)]) + self.assertEqual(list(_decompose_linear_terms(M.q)), [(M.q, None)]) def test_var(self): M = ConcreteModel() M.v = Var() - self.assertEqual(list(_decompose_linear_terms(M.v)), [(1,M.v)]) + self.assertEqual(list(_decompose_linear_terms(M.v)), [(1, M.v)]) def test_simple(self): M = ConcreteModel() M.v = Var() - self.assertEqual(list(_decompose_linear_terms(2*M.v)), [(2,M.v)]) + self.assertEqual(list(_decompose_linear_terms(2 * M.v)), [(2, M.v)]) def test_sum(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=2) - self.assertEqual(list(_decompose_linear_terms(2+M.v)), [(2,None), (1,M.v)]) - self.assertEqual(list(_decompose_linear_terms(M.q+M.v)), [(2,None), (1,M.v)]) - self.assertEqual(list(_decompose_linear_terms(M.v+M.q)), [(1,M.v), (2,None)]) - self.assertEqual(list(_decompose_linear_terms(M.w+M.v)), [(1,M.w), (1,M.v)]) + self.assertEqual(list(_decompose_linear_terms(2 + M.v)), [(2, None), (1, M.v)]) + self.assertEqual( + list(_decompose_linear_terms(M.q + M.v)), [(2, None), (1, M.v)] + ) + self.assertEqual( + list(_decompose_linear_terms(M.v + M.q)), [(1, M.v), (2, None)] + ) + self.assertEqual(list(_decompose_linear_terms(M.w + M.v)), [(1, M.w), (1, M.v)]) def test_prod(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=2) - self.assertEqual(list(_decompose_linear_terms(2*M.v)), [(2,M.v)]) - self.assertEqual(list(_decompose_linear_terms(M.q*M.v)), [(2,M.v)]) - self.assertEqual(list(_decompose_linear_terms(M.v*M.q)), [(2,M.v)]) - self.assertRaises(LinearDecompositionError, list, _decompose_linear_terms(M.w*M.v)) + self.assertEqual(list(_decompose_linear_terms(2 * M.v)), [(2, M.v)]) + self.assertEqual(list(_decompose_linear_terms(M.q * M.v)), [(2, M.v)]) + self.assertEqual(list(_decompose_linear_terms(M.v * M.q)), [(2, M.v)]) + self.assertRaises( + LinearDecompositionError, list, _decompose_linear_terms(M.w * M.v) + ) def test_negation(self): M = ConcreteModel() M.v = Var() - self.assertEqual(list(_decompose_linear_terms(-M.v)), [(-1,M.v)]) - self.assertEqual(list(_decompose_linear_terms(-(2+M.v))), [(-2,None), (-1,M.v)]) + self.assertEqual(list(_decompose_linear_terms(-M.v)), [(-1, M.v)]) + self.assertEqual( + list(_decompose_linear_terms(-(2 + M.v))), [(-2, None), (-1, M.v)] + ) def test_reciprocal(self): M = ConcreteModel() M.v = Var() M.q = Param(initialize=2) - self.assertRaises(LinearDecompositionError, list, _decompose_linear_terms(1/M.v)) - self.assertEqual(list(_decompose_linear_terms(1/M.q)), [(0.5,None)]) - + self.assertRaises( + LinearDecompositionError, list, _decompose_linear_terms(1 / M.v) + ) + self.assertEqual(list(_decompose_linear_terms(1 / M.q)), [(0.5, None)]) + def test_multisum(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=2) e = SumExpression([2]) - self.assertEqual(decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), decompose_linear_term_wrapper([(2,None)])) - e = SumExpression([2,M.v]) - self.assertEqual(decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), decompose_linear_term_wrapper([(2,None), (1,M.v)])) - e = SumExpression([2,M.q+M.v]) - self.assertEqual(decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), decompose_linear_term_wrapper([(2,None), (2,None), (1,M.v)])) - e = SumExpression([2,M.q+M.v,M.w]) - self.assertEqual(decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), decompose_linear_term_wrapper([(2,None), (2,None), (1,M.v), (1,M.w)])) - + self.assertEqual( + decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), + decompose_linear_term_wrapper([(2, None)]), + ) + e = SumExpression([2, M.v]) + self.assertEqual( + decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), + decompose_linear_term_wrapper([(2, None), (1, M.v)]), + ) + e = SumExpression([2, M.q + M.v]) + self.assertEqual( + decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), + decompose_linear_term_wrapper([(2, None), (2, None), (1, M.v)]), + ) + e = SumExpression([2, M.q + M.v, M.w]) + self.assertEqual( + decompose_linear_term_wrapper(list(_decompose_linear_terms(e))), + decompose_linear_term_wrapper([(2, None), (2, None), (1, M.v), (1, M.w)]), + ) + # # Test the logic of decompose_term() # class Test_decompose_linear_terms(unittest.TestCase): - def test_numeric(self): - self.assertEqual(decompose_term(2.0), (True,[(2.0,None)])) + self.assertEqual(decompose_term(2.0), (True, [(2.0, None)])) def test_NPV(self): M = ConcreteModel() M.q = Param(initialize=2) - self.assertEqual(decompose_term(M.q), (True, [(M.q,None)])) + self.assertEqual(decompose_term(M.q), (True, [(M.q, None)])) def test_var(self): M = ConcreteModel() M.v = Var() - self.assertEqual(decompose_term(M.v), (True, [(1,M.v)])) + self.assertEqual(decompose_term(M.v), (True, [(1, M.v)])) def test_simple(self): M = ConcreteModel() M.v = Var() - self.assertEqual(decompose_term(2*M.v), (True, [(2,M.v)])) + self.assertEqual(decompose_term(2 * M.v), (True, [(2, M.v)])) def test_sum(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=2) - self.assertEqual(decompose_term(2+M.v), (True, [(2,None), (1,M.v)])) - self.assertEqual(decompose_term(M.q+M.v), (True, [(2,None), (1,M.v)])) - self.assertEqual(decompose_term(M.v+M.q), (True, [(1,M.v), (2,None)])) - self.assertEqual(decompose_term(M.v+M.w), (True, [(1,M.v), (1,M.w)])) + self.assertEqual(decompose_term(2 + M.v), (True, [(2, None), (1, M.v)])) + self.assertEqual(decompose_term(M.q + M.v), (True, [(2, None), (1, M.v)])) + self.assertEqual(decompose_term(M.v + M.q), (True, [(1, M.v), (2, None)])) + self.assertEqual(decompose_term(M.v + M.w), (True, [(1, M.v), (1, M.w)])) def test_prod(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=2) - self.assertEqual(decompose_term(2*M.v), (True, [(2,M.v)])) - self.assertEqual(decompose_term(M.q*M.v), (True, [(2,M.v)])) - self.assertEqual(decompose_term(M.v*M.q), (True, [(2,M.v)])) - self.assertEqual(decompose_term(M.w*M.v), (False, None)) + self.assertEqual(decompose_term(2 * M.v), (True, [(2, M.v)])) + self.assertEqual(decompose_term(M.q * M.v), (True, [(2, M.v)])) + self.assertEqual(decompose_term(M.v * M.q), (True, [(2, M.v)])) + self.assertEqual(decompose_term(M.w * M.v), (False, None)) def test_negation(self): M = ConcreteModel() M.v = Var() - self.assertEqual(decompose_term(-M.v), (True, [(-1,M.v)])) - self.assertEqual(decompose_term(-(2+M.v)), (True, [(-2,None), (-1,M.v)])) + self.assertEqual(decompose_term(-M.v), (True, [(-1, M.v)])) + self.assertEqual(decompose_term(-(2 + M.v)), (True, [(-2, None), (-1, M.v)])) def test_reciprocal(self): M = ConcreteModel() M.v = Var() M.q = Param(initialize=2) M.p = Param(initialize=2, mutable=True) - self.assertEqual(decompose_term(1/M.v), (False, None)) - self.assertEqual(decompose_term(1/M.q), (True, [(0.5,None)])) - e = 1/M.p - self.assertEqual(decompose_term(e), (True, [(e,None)])) - + self.assertEqual(decompose_term(1 / M.v), (False, None)) + self.assertEqual(decompose_term(1 / M.q), (True, [(0.5, None)])) + e = 1 / M.p + self.assertEqual(decompose_term(e), (True, [(e, None)])) + def test_multisum(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=3) e = SumExpression([2]) - self.assertEqual(decompose_term_wrapper(decompose_term(e)), decompose_term_wrapper((True, [(2,None)]))) - e = SumExpression([2,M.v]) - self.assertEqual(decompose_term_wrapper(decompose_term(e)), decompose_term_wrapper((True, [(2,None), (1,M.v)]))) - e = SumExpression([2,M.q+M.v]) - self.assertEqual(decompose_term_wrapper(decompose_term(e)), decompose_term_wrapper((True, [(2,None), (3,None), (1,M.v)]))) - e = SumExpression([2,M.q+M.v,M.w]) - self.assertEqual(decompose_term_wrapper(decompose_term(e)), decompose_term_wrapper((True, [(2,None), (3,None), (1,M.v), (1,M.w)]))) + self.assertEqual( + decompose_term_wrapper(decompose_term(e)), + decompose_term_wrapper((True, [(2, None)])), + ) + e = SumExpression([2, M.v]) + self.assertEqual( + decompose_term_wrapper(decompose_term(e)), + decompose_term_wrapper((True, [(2, None), (1, M.v)])), + ) + e = SumExpression([2, M.q + M.v]) + self.assertEqual( + decompose_term_wrapper(decompose_term(e)), + decompose_term_wrapper((True, [(2, None), (3, None), (1, M.v)])), + ) + e = SumExpression([2, M.q + M.v, M.w]) + self.assertEqual( + decompose_term_wrapper(decompose_term(e)), + decompose_term_wrapper((True, [(2, None), (3, None), (1, M.v), (1, M.w)])), + ) def test_linear(self): M = ConcreteModel() @@ -5052,50 +5521,42 @@ def test_linear(self): M.w = Var() with linear_expression() as e: e += 2 - # - # When the linear expression is constant, then it will be - # identified as not potentially variable, and the expression returned - # will be itself. - # - self.assertEqual(decompose_term(e), (True, [(e,None)])) + self.assertEqual(decompose_term(e), (True, [(2, None)])) e += M.v - self.assertEqual(decompose_term(-e), (True, [(-2,None), (-1,M.v)])) - -def x_(m,i): - return i+1 -def P_(m,i): - return 10-i + self.assertEqual(decompose_term(-e), (True, [(-2, None), (-1, M.v)])) + + +def x_(m, i): + return i + 1 + + +def P_(m, i): + return 10 - i + # # Test pickle logic # class Test_pickle(unittest.TestCase): - def test_simple(self): M = ConcreteModel() M.v = Var() - e = 2*M.v + e = 2 * M.v s = pickle.dumps(e) e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(terms[0][0], 2) - self.assertEqual(str(terms[0][1]), str(M.v)) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) def test_sum(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=2) - e = M.v+M.q + e = M.v + M.q s = pickle.dumps(e) e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(terms[0][0], 1) - self.assertEqual(str(terms[0][1]), str(M.v)) - self.assertEqual(terms[1][0], 2) - self.assertEqual(terms[1][1], None) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) def Xtest_Sum(self): M = ConcreteModel() @@ -5104,112 +5565,98 @@ def Xtest_Sum(self): e = quicksum(M.v[i] for i in M.v) s = pickle.dumps(e) e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(terms[0][0], 1) - self.assertEqual(str(terms[0][1]), str(M.v[0])) - self.assertEqual(terms[1][0], 1) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) def test_prod(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=2) - e = M.v*M.q + e = M.v * M.q s = pickle.dumps(e) e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(terms[0][0], 2) - self.assertEqual(str(terms[0][1]), str(M.v)) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) def test_negation(self): M = ConcreteModel() M.v = Var() - e = -(2+M.v) + e = -(2 + M.v) s = pickle.dumps(e) e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(terms[0][0], -2) - self.assertEqual(terms[0][1], None) - self.assertEqual(terms[1][0], -1) - self.assertEqual(str(terms[1][1]), str(M.v)) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) def test_reciprocal(self): M = ConcreteModel() M.v = Var() M.q = Param(initialize=2) M.p = Param(initialize=2, mutable=True) - e = 1/M.p + e = 1 / M.p s = pickle.dumps(e) e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(value(terms[0][0]), 0.5) - self.assertEqual(value(terms[0][1]), None) - + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) + def test_multisum(self): M = ConcreteModel() M.v = Var() M.w = Var() M.q = Param(initialize=3) - e = SumExpression([2,M.q+M.v,M.w]) + e = SumExpression([2, M.q + M.v, M.w]) s = pickle.dumps(e) e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(terms[0][0], 2) - self.assertEqual(terms[0][1], None) - self.assertEqual(terms[1][0], 3) - self.assertEqual(terms[1][1], None) - self.assertEqual(terms[2][0], 1) - self.assertEqual(str(terms[2][1]), str(M.v)) - self.assertEqual(terms[3][0], 1) - self.assertEqual(str(terms[3][1]), str(M.w)) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) def test_linear(self): + M = ConcreteModel() + M.v = Var() + M.w = Var() + e = LinearExpression() + e += 2 + e += M.v + e = -e + s = pickle.dumps(e) + e_ = pickle.loads(s) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) + + def test_linear_context(self): M = ConcreteModel() M.v = Var() M.w = Var() with linear_expression() as e: e += 2 - # - # When the linear expression is constant, then it will be - # identified as not potentially variable, and the expression returned - # will be itself. - # - self.assertEqual(decompose_term(e), (True, [(e,None)])) e += M.v - s = pickle.dumps(-e) - e_ = pickle.loads(s) - flag, terms = decompose_term(e_) - self.assertTrue(flag) - self.assertEqual(terms[0][0], -2) - self.assertEqual(terms[0][1], None) - self.assertEqual(terms[1][0], -1) - self.assertEqual(str(terms[1][1]), str(M.v)) - + e = -e + s = pickle.dumps(e) + e_ = pickle.loads(s) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) + def test_ExprIf(self): M = ConcreteModel() M.v = Var() e = Expr_if(M.v, 1, 0) s = pickle.dumps(e) e_ = pickle.loads(s) - self.assertEqual(type(e.arg(0)), type(e_.arg(0))) - self.assertEqual(e.arg(1), e_.arg(1)) - self.assertEqual(e.arg(2), e_.arg(2)) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) def test_getitem(self): m = ConcreteModel() - m.I = RangeSet(1,9) + m.I = RangeSet(1, 9) m.x = Var(m.I, initialize=x_) m.P = Param(m.I, initialize=P_, mutable=True) t = IndexTemplate(m.I) - e = m.x[t+m.P[t+1]] + 3 + e = m.x[t + m.P[t + 1]] + 3 s = pickle.dumps(e) e_ = pickle.loads(s) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e)) def test_abs(self): @@ -5218,6 +5665,8 @@ def test_abs(self): e = abs(M.v) s = pickle.dumps(e) e_ = pickle.loads(s) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) self.assertEqual(str(e), str(e_)) def test_sin(self): @@ -5226,6 +5675,8 @@ def test_sin(self): e = sin(M.v) s = pickle.dumps(e) e_ = pickle.loads(s) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) self.assertEqual(str(e), str(e_)) def test_external_fcn(self): @@ -5235,24 +5686,22 @@ def test_external_fcn(self): e = model.x(model.a, 1, "foo", []) s = pickle.dumps(e) e_ = pickle.loads(s) - self.assertEqual(type(e_), type(e)) - self.assertEqual(type(e_.arg(0)), type(e.arg(0))) - self.assertEqual(type(e_.arg(1)), type(e.arg(1))) - self.assertEqual(type(e_.arg(2)), type(e.arg(2))) + self.assertIsNot(e, e_) + assertExpressionsStructurallyEqual(self, e, e_) + # # Every class that is duck typed to be a named expression # should be tested here. # class TestNamedExpressionDuckTyping(unittest.TestCase): - def check_api(self, obj): self.assertTrue(hasattr(obj, 'nargs')) self.assertTrue(hasattr(obj, 'arg')) self.assertTrue(hasattr(obj, 'args')) self.assertTrue(hasattr(obj, '__call__')) self.assertTrue(hasattr(obj, 'to_string')) - self.assertTrue(hasattr(obj, '_precedence')) + self.assertTrue(hasattr(obj, 'PRECEDENCE')) self.assertTrue(hasattr(obj, '_to_string')) self.assertTrue(hasattr(obj, 'clone')) self.assertTrue(hasattr(obj, 'create_node_with_local_data')) @@ -5299,7 +5748,6 @@ def test_objective(self): class TestNumValueDuckTyping(unittest.TestCase): - def check_api(self, obj): self.assertTrue(hasattr(obj, 'is_fixed')) self.assertTrue(hasattr(obj, 'is_constant')) @@ -5341,18 +5789,24 @@ def test_variable(self): x = variable() self.check_api(x) -class TestDirect_LinearExpression(unittest.TestCase): +class TestDirect_LinearExpression(unittest.TestCase): def test_LinearExpression_Param(self): m = ConcreteModel() N = 10 - S = list(range(1,N+1)) - m.x = Var(S, initialize=lambda m,i: 1.0/i) - m.P = Param(S, initialize=lambda m,i: i) - m.obj = Objective(expr=LinearExpression(constant=1.0, linear_coefs=[m.P[i] for i in S], linear_vars=[m.x[i] for i in S])) + S = list(range(1, N + 1)) + m.x = Var(S, initialize=lambda m, i: 1.0 / i) + m.P = Param(S, initialize=lambda m, i: i) + m.obj = Objective( + expr=LinearExpression( + constant=1.0, + linear_coefs=[m.P[i] for i in S], + linear_vars=[m.x[i] for i in S], + ) + ) # test that the expression evaluates correctly - self.assertAlmostEqual(value(m.obj), N+1) + self.assertAlmostEqual(value(m.obj), N + 1) # test that the standard repn can be constructed repn = generate_standard_repn(m.obj.expr) @@ -5363,12 +5817,18 @@ def test_LinearExpression_Param(self): def test_LinearExpression_Number(self): m = ConcreteModel() N = 10 - S = list(range(1,N+1)) - m.x = Var(S, initialize=lambda m,i: 1.0/i) - m.obj = Objective(expr=LinearExpression(constant=1.0, linear_coefs=[i for i in S], linear_vars=[m.x[i] for i in S])) + S = list(range(1, N + 1)) + m.x = Var(S, initialize=lambda m, i: 1.0 / i) + m.obj = Objective( + expr=LinearExpression( + constant=1.0, + linear_coefs=[i for i in S], + linear_vars=[m.x[i] for i in S], + ) + ) # test that the expression evaluates correctly - self.assertAlmostEqual(value(m.obj), N+1) + self.assertAlmostEqual(value(m.obj), N + 1) # test that the standard repn can be constructed repn = generate_standard_repn(m.obj.expr) @@ -5379,13 +5839,19 @@ def test_LinearExpression_Number(self): def test_LinearExpression_MutableParam(self): m = ConcreteModel() N = 10 - S = list(range(1,N+1)) - m.x = Var(S, initialize=lambda m,i: 1.0/i) - m.P = Param(S, initialize=lambda m,i: i, mutable=True) - m.obj = Objective(expr=LinearExpression(constant=1.0, linear_coefs=[m.P[i] for i in S], linear_vars=[m.x[i] for i in S])) + S = list(range(1, N + 1)) + m.x = Var(S, initialize=lambda m, i: 1.0 / i) + m.P = Param(S, initialize=lambda m, i: i, mutable=True) + m.obj = Objective( + expr=LinearExpression( + constant=1.0, + linear_coefs=[m.P[i] for i in S], + linear_vars=[m.x[i] for i in S], + ) + ) # test that the expression evaluates correctly - self.assertAlmostEqual(value(m.obj), N+1) + self.assertAlmostEqual(value(m.obj), N + 1) # test that the standard repn can be constructed repn = generate_standard_repn(m.obj.expr) @@ -5396,13 +5862,19 @@ def test_LinearExpression_MutableParam(self): def test_LinearExpression_expression(self): m = ConcreteModel() N = 10 - S = list(range(1,N+1)) - m.x = Var(S, initialize=lambda m,i: 1.0/i) - m.P = Param(S, initialize=lambda m,i: i, mutable=True) - m.obj = Objective(expr=LinearExpression(constant=1.0, linear_coefs=[i*m.P[i] for i in S], linear_vars=[m.x[i] for i in S])) + S = list(range(1, N + 1)) + m.x = Var(S, initialize=lambda m, i: 1.0 / i) + m.P = Param(S, initialize=lambda m, i: i, mutable=True) + m.obj = Objective( + expr=LinearExpression( + constant=1.0, + linear_coefs=[i * m.P[i] for i in S], + linear_vars=[m.x[i] for i in S], + ) + ) # test that the expression evaluates correctly - self.assertAlmostEqual(value(m.obj), sum(i for i in S)+1) + self.assertAlmostEqual(value(m.obj), sum(i for i in S) + 1) # test that the standard repn can be constructed repn = generate_standard_repn(m.obj.expr) @@ -5418,7 +5890,11 @@ def test_LinearExpression_polynomial_degree(self): m.var_3 = Var(m.S, initialize=0) def con_rule(model): - return model.var_1 - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) <= 0 + return ( + model.var_1 + - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) + <= 0 + ) m.c1 = Constraint(rule=con_rule) @@ -5437,7 +5913,11 @@ def test_LinearExpression_is_fixed(self): m.var_3 = Var(m.S, initialize=0) def con_rule(model): - return model.var_1 - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) <= 0 + return ( + model.var_1 + - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) + <= 0 + ) m.c1 = Constraint(rule=con_rule) diff --git a/pyomo/core/tests/unit/test_numeric_expr_api.py b/pyomo/core/tests/unit/test_numeric_expr_api.py new file mode 100644 index 00000000000..0d85e959fa0 --- /dev/null +++ b/pyomo/core/tests/unit/test_numeric_expr_api.py @@ -0,0 +1,1113 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# +# Unit Tests for expression generation +# +import logging +import math + +import pyomo.common.unittest as unittest + +from pyomo.common.log import LoggingIntercept +from pyomo.core.expr.numvalue import is_fixed +from pyomo.core.expr.compare import assertExpressionsStructurallyEqual +from pyomo.core.expr import ( + value, + sin, + Expr_if, + SumExpression, + LinearExpression, + NPV_SumExpression, + AbsExpression, + DivisionExpression, + NPV_DivisionExpression, + ExternalFunctionExpression, + NPV_ExternalFunctionExpression, + MonomialTermExpression, + PowExpression, + NPV_PowExpression, + ProductExpression, + NPV_ProductExpression, + NegationExpression, + NPV_NegationExpression, + UnaryFunctionExpression, + NPV_UnaryFunctionExpression, +) +from pyomo.core.expr.numeric_expr import ( + mutable_expression, + nonlinear_expression, + linear_expression, + _MutableSumExpression, + _MutableLinearExpression, + _MutableNPVSumExpression, + Expr_ifExpression, + NPV_Expr_ifExpression, + MaxExpression, + NPV_MaxExpression, + MinExpression, + NPV_MinExpression, +) +from pyomo.environ import ConcreteModel, Param, Var, ExternalFunction + + +class MockExternalFunction(object): + def evaluate(self, args): + (x,) = args + return (math.log(x) / math.log(2)) ** 2 + + def getname(self): + return 'mock_fcn' + + +class TestExpressionAPI(unittest.TestCase): + def test_deprecated_functions(self): + m = ConcreteModel() + m.x = Var() + e = m.x**10 + self.assertIs(type(e), PowExpression) + with LoggingIntercept() as LOG: + f = e.create_potentially_variable_object() + self.assertIs(e, f) + self.assertIs(type(e), PowExpression) + self.assertIn( + 'DEPRECATED: The implicit recasting of a "not potentially variable" ' + 'expression node to a potentially variable one is no longer supported', + LOG.getvalue().replace('\n', ' '), + ) + self.assertNotIn( + 'recasting a non-potentially variable expression to a potentially variable ' + 'one violates the immutability promise for Pyomo expression trees.', + LOG.getvalue().replace('\n', ' '), + ) + + m.p = Param(mutable=True) + e = m.p**10 + self.assertIs(type(e), NPV_PowExpression) + with LoggingIntercept() as LOG: + f = e.create_potentially_variable_object() + self.assertIs(e, f) + self.assertIs(type(e), PowExpression) + self.assertIn( + 'DEPRECATED: The implicit recasting of a "not potentially variable" ' + 'expression node to a potentially variable one is no longer supported', + LOG.getvalue().replace('\n', ' '), + ) + self.assertIn( + 'recasting a non-potentially variable expression to a potentially variable ' + 'one violates the immutability promise for Pyomo expression trees.', + LOG.getvalue().replace('\n', ' '), + ) + + e = m.x + m.x + with LoggingIntercept() as LOG: + f = e.add(5) + self.assertIn( + 'DEPRECATED: SumExpression.add() is deprecated. Please use regular ' + 'Python operators', + LOG.getvalue().replace('\n', ' '), + ) + self.assertEqual(str(e), 'x + x') + self.assertEqual(str(f), 'x + x + 5') + + def test_mutable_expression(self): + m = ConcreteModel() + m.x = Var(range(3)) + with mutable_expression() as e: + f = e + self.assertIs(type(e), _MutableNPVSumExpression) + e += 1 + self.assertIs(e, f) + self.assertIs(type(e), _MutableNPVSumExpression) + e += m.x[0] + self.assertIs(e, f) + self.assertIs(type(e), _MutableLinearExpression) + e += 100 * m.x[1] + self.assertIs(e, f) + self.assertIs(type(e), _MutableLinearExpression) + e += m.x[0] ** 2 + self.assertIs(e, f) + self.assertIs(type(e), _MutableSumExpression) + self.assertIs(e, f) + self.assertIs(type(e), SumExpression) + + def test_linear_expression(self): + m = ConcreteModel() + m.x = Var(range(3)) + with linear_expression() as e: + f = e + self.assertIs(type(e), _MutableNPVSumExpression) + e += 1 + self.assertIs(e, f) + self.assertIs(type(e), _MutableNPVSumExpression) + e += m.x[0] + self.assertIs(e, f) + self.assertIs(type(e), _MutableLinearExpression) + e += 100 * m.x[1] + self.assertIs(e, f) + self.assertIs(type(e), _MutableLinearExpression) + e += m.x[0] ** 2 + self.assertIs(e, f) + self.assertIs(type(e), _MutableSumExpression) + self.assertIs(e, f) + self.assertIs(type(e), SumExpression) + + def test_nonlinear_expression(self): + m = ConcreteModel() + m.x = Var(range(3)) + with nonlinear_expression() as e: + f = e + self.assertIs(type(e), _MutableSumExpression) + e += 1 + self.assertIs(e, f) + self.assertIs(type(e), _MutableSumExpression) + e += m.x[0] + self.assertIs(e, f) + self.assertIs(type(e), _MutableSumExpression) + e += 100 * m.x[1] + self.assertIs(e, f) + self.assertIs(type(e), _MutableSumExpression) + e += m.x[0] ** 2 + self.assertIs(e, f) + self.assertIs(type(e), _MutableSumExpression) + self.assertIs(e, f) + self.assertIs(type(e), SumExpression) + + def test_negation(self): + m = ConcreteModel() + m.x = Var(initialize=5) + + e = NegationExpression((5,)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), -5) + self.assertEqual(str(e), "- 5") + self.assertEqual(e.to_string(verbose=True), "neg(5)") + + e = NegationExpression((-5,)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 5) + self.assertEqual(str(e), "5") + self.assertEqual(e.to_string(verbose=True), "neg(-5)") + + e = NegationExpression((m.x,)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), -5) + self.assertEqual(str(e), "- x") + self.assertEqual(e.to_string(verbose=True), "neg(x)") + + m.p = Param(initialize=10, mutable=True) + e = NPV_NegationExpression((m.p,)) + self.assertFalse(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), -10) + self.assertEqual(str(e), "- p") + self.assertEqual(e.to_string(verbose=True), "neg(p)") + + e = -(m.x + 2 * m.x) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), -15) + self.assertEqual(str(e), "- (x + 2*x)") + self.assertEqual(e.to_string(verbose=True), "neg(sum(mon(1, x), mon(2, x)))") + + # This can't occur through operator overloading, but could + # through expression substitution + e = NegationExpression((NegationExpression((m.x,)),)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 5) + self.assertEqual(str(e), "x") + self.assertEqual(e.to_string(verbose=True), "neg(neg(x))") + + def test_pow(self): + m = ConcreteModel() + m.x = Var(initialize=5) + e = PowExpression((m.x, 2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 2) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 25) + self.assertEqual(str(e), "x**2") + self.assertEqual(e.to_string(verbose=True), "pow(x, 2)") + + e = PowExpression((2, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 32) + self.assertEqual(str(e), "2**x") + self.assertEqual(e.to_string(verbose=True), "pow(2, x)") + + m.p = Param(initialize=3, mutable=True) + e = NPV_PowExpression((m.p, 2)) + self.assertFalse(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 9) + self.assertEqual(str(e), "p**2") + self.assertEqual(e.to_string(verbose=True), "pow(p, 2)") + + e = NPV_PowExpression((2, m.p)) + self.assertFalse(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 8) + self.assertEqual(str(e), "2**p") + self.assertEqual(e.to_string(verbose=True), "pow(2, p)") + + e = PowExpression((m.x, m.p)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 3) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 125) + self.assertEqual(str(e), "x**p") + self.assertEqual(e.to_string(verbose=True), "pow(x, p)") + + m.p = 0 + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 1) + self.assertEqual(str(e), "x**p") + self.assertEqual(e.to_string(verbose=True), "pow(x, p)") + + m.x.fix(2) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 1) + self.assertEqual(str(e), "x**p") + self.assertEqual(e.to_string(verbose=True), "pow(x, p)") + + m.p = 3 + e = PowExpression((m.p, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 9) + self.assertEqual(str(e), "p**x") + self.assertEqual(e.to_string(verbose=True), "pow(p, x)") + + m.y = Var() + m.x.fix(None) + e = PowExpression((m.y, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + with self.assertRaisesRegex( + ValueError, 'No value for uninitialized NumericValue object y' + ): + self.assertEqual(value(e), None) + self.assertEqual(str(e), "y**x") + self.assertEqual(e.to_string(verbose=True), "pow(y, x)") + + def test_min(self): + m = ConcreteModel() + m.x = Var(initialize=5) + e = MinExpression((m.x, 2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 2) + self.assertEqual(str(e), "min(x, 2)") + self.assertEqual(e.to_string(verbose=True), "min(x, 2)") + + m.x.fix(1) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 1) + self.assertEqual(str(e), "min(x, 2)") + self.assertEqual(e.to_string(verbose=True), "min(x, 2)") + + def test_max(self): + m = ConcreteModel() + m.x = Var(initialize=5) + e = MaxExpression((m.x, 2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 5) + self.assertEqual(str(e), "max(x, 2)") + self.assertEqual(e.to_string(verbose=True), "max(x, 2)") + + m.x.fix(10) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 10) + self.assertEqual(str(e), "max(x, 2)") + self.assertEqual(e.to_string(verbose=True), "max(x, 2)") + + def test_prod(self): + m = ConcreteModel() + m.x = Var(initialize=5) + e = ProductExpression((m.x, 2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 10) + self.assertEqual(str(e), "x*2") + self.assertEqual(e.to_string(verbose=True), "prod(x, 2)") + + e = ProductExpression((2, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 10) + self.assertEqual(str(e), "2*x") + self.assertEqual(e.to_string(verbose=True), "prod(2, x)") + + m.p = Param(initialize=3, mutable=True) + e = NPV_ProductExpression((m.p, 2)) + self.assertFalse(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 6) + self.assertEqual(str(e), "p*2") + self.assertEqual(e.to_string(verbose=True), "prod(p, 2)") + + e = NPV_ProductExpression((2, m.p)) + self.assertFalse(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 6) + self.assertEqual(str(e), "2*p") + self.assertEqual(e.to_string(verbose=True), "prod(2, p)") + + e = ProductExpression((m.x, m.p)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 15) + self.assertEqual(str(e), "x*p") + self.assertEqual(e.to_string(verbose=True), "prod(x, p)") + + m.p = 0 + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 0) + self.assertEqual(str(e), "x*p") + self.assertEqual(e.to_string(verbose=True), "prod(x, p)") + + e = ProductExpression((m.p, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 0) + self.assertEqual(str(e), "p*x") + self.assertEqual(e.to_string(verbose=True), "prod(p, x)") + + e = ProductExpression((-1, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), -5) + self.assertEqual(str(e), "- x") + self.assertEqual(e.to_string(verbose=True), "prod(-1, x)") + + m.y = Var() + e = ProductExpression((m.y, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 2) + self.assertEqual(is_fixed(e), False) + with self.assertRaisesRegex( + ValueError, 'No value for uninitialized NumericValue object y' + ): + self.assertEqual(value(e), None) + self.assertEqual(str(e), "y*x") + self.assertEqual(e.to_string(verbose=True), "prod(y, x)") + + m.x.fix(0) + e = ProductExpression((m.y, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + with self.assertRaisesRegex( + ValueError, 'No value for uninitialized NumericValue object y' + ): + self.assertEqual(value(e), None) + self.assertEqual(str(e), "y*x") + self.assertEqual(e.to_string(verbose=True), "prod(y, x)") + + m.x.fix(None) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + with self.assertRaisesRegex( + ValueError, 'No value for uninitialized NumericValue object y' + ): + self.assertEqual(value(e), None) + self.assertEqual(str(e), "y*x") + self.assertEqual(e.to_string(verbose=True), "prod(y, x)") + + m.y = 5 + e = ProductExpression((1 / m.y, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + with self.assertRaisesRegex( + ValueError, 'No value for uninitialized NumericValue object x' + ): + self.assertEqual(value(e), None) + self.assertEqual(str(e), "1/y*x") + self.assertEqual(e.to_string(verbose=True), "prod(div(1, y), x)") + + def test_monomial(self): + m = ConcreteModel() + m.x = Var(initialize=5) + e = MonomialTermExpression((2, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 10) + self.assertEqual(str(e), "2*x") + self.assertEqual(e.to_string(verbose=True), "mon(2, x)") + + e = MonomialTermExpression((-2, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), -10) + self.assertEqual(str(e), "-2*x") + self.assertEqual(e.to_string(verbose=True), "mon(-2, x)") + + m.x.fix(2) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), -4) + self.assertEqual(str(e), "-2*x") + self.assertEqual(e.to_string(verbose=True), "mon(-2, x)") + + def test_division(self): + m = ConcreteModel() + m.x = Var(initialize=5) + + e = DivisionExpression((m.x, 2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 2.5) + self.assertEqual(str(e), "x/2") + self.assertEqual(e.to_string(verbose=True), "div(x, 2)") + + e = DivisionExpression((2, m.x)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 0.4) + self.assertEqual(str(e), "2/x") + self.assertEqual(e.to_string(verbose=True), "div(2, x)") + + m.x.fix(2) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 1) + self.assertEqual(str(e), "2/x") + self.assertEqual(e.to_string(verbose=True), "div(2, x)") + + def test_sum(self): + m = ConcreteModel() + m.x = Var(initialize=5) + + e = SumExpression(()) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 0) + self.assertEqual(str(e), "0") + self.assertEqual(e.to_string(verbose=True), "sum(0)") + + e = SumExpression((m.x, 2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 7) + self.assertEqual(str(e), "x + 2") + self.assertEqual(e.to_string(verbose=True), "sum(x, 2)") + + e = SumExpression((m.x, -2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 3) + self.assertEqual(str(e), "x - 2") + self.assertEqual(e.to_string(verbose=True), "sum(x, -2)") + + e = SumExpression((-2, m.x, -2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 1) + self.assertEqual(str(e), "-2 + x - 2") + self.assertEqual(e.to_string(verbose=True), "sum(-2, x, -2)") + + e = SumExpression([-2, m.x, AbsExpression((m.x,)), m.x]) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 13) + self.assertEqual(str(e), "-2 + x + abs(x) + x") + self.assertEqual(e.to_string(verbose=True), "sum(-2, x, abs(x), x)") + + e = SumExpression([-2, m.x, SumExpression([-2])]) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 1) + self.assertEqual(str(e), "-2 + x + (-2)") + self.assertEqual(e.to_string(verbose=True), "sum(-2, x, sum(-2))") + + e = SumExpression([-2, m.x, SumExpression([-2, 3])]) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 4) + self.assertEqual(str(e), "-2 + x + (-2 + 3)") + self.assertEqual(e.to_string(verbose=True), "sum(-2, x, sum(-2, 3))") + + e = SumExpression([-2, m.x, SumExpression([2, 3])]) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 8) + self.assertEqual(str(e), "-2 + x + (2 + 3)") + self.assertEqual(e.to_string(verbose=True), "sum(-2, x, sum(2, 3))") + + e = SumExpression([-2, m.x, NegationExpression((SumExpression([-2]),))]) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 5) + self.assertEqual(str(e), "-2 + x - (-2)") + self.assertEqual(e.to_string(verbose=True), "sum(-2, x, neg(sum(-2)))") + + e = SumExpression((NegationExpression((m.x,)), -2)) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), -7) + self.assertEqual(str(e), "- x - 2") + self.assertEqual(e.to_string(verbose=True), "sum(neg(x), -2)") + + m.x.fix(2) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), -4) + self.assertEqual(str(e), "- x - 2") + self.assertEqual(e.to_string(verbose=True), "sum(neg(x), -2)") + + def test_linear(self): + m = ConcreteModel() + m.x = Var(range(3), initialize=range(3)) + m.y = Var(initialize=5) + with mutable_expression() as e: + for i in range(3): + e += i * m.x[i] + e += 5 + e += m.y + e -= 3 + + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 1 + 4 + 5 + 2) + self.assertEqual(str(e), "0*x[0] + x[1] + 2*x[2] + 5 + y - 3") + self.assertEqual( + e.to_string(verbose=True), + "sum(mon(0, x[0]), mon(1, x[1]), mon(2, x[2]), 5, mon(1, y), -3)", + ) + + self.assertIs(type(e), LinearExpression) + self.assertEqual(e.constant, 2) + cache = e._cache + self.assertEqual(e.linear_coefs, [0, 1, 2, 1]) + self.assertIs(cache, e._cache) + self.assertEqual(e.linear_vars, [m.x[0], m.x[1], m.x[2], m.y]) + self.assertIs(cache, e._cache) + + e = LinearExpression() + self.assertEqual(e.linear_coefs, []) + self.assertIsNot(cache, e._cache) + cache = e._cache + e = LinearExpression() + self.assertEqual(e.constant, 0) + self.assertIsNot(cache, e._cache) + cache = e._cache + e = LinearExpression() + self.assertEqual(e.linear_vars, []) + self.assertIsNot(cache, e._cache) + + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 0) + self.assertEqual(str(e), "0") + self.assertEqual(e.to_string(verbose=True), "sum(0)") + + e = LinearExpression(constant=5, linear_vars=[m.y, m.x[1]], linear_coefs=[3, 5]) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 25) + self.assertEqual(str(e), "5 + 3*y + 5*x[1]") + self.assertEqual(e.to_string(verbose=True), "sum(5, mon(3, y), mon(5, x[1]))") + + with self.assertRaisesRegex( + ValueError, + "Cannot specify both args and any of " + "{constant, linear_coefs, or linear_vars}", + ): + LinearExpression(5, constant=5) + + with self.assertRaisesRegex( + ValueError, + r"linear_vars \(\[y\]\) is not compatible with linear_coefs \(\[3, 5\]\)", + ): + LinearExpression(constant=5, linear_vars=[m.y], linear_coefs=[3, 5]) + + def test_expr_if(self): + m = ConcreteModel() + m.x = Var(range(3), initialize=range(3)) + m.y = Var(initialize=5) + e = Expr_if(IF=m.y >= 5, THEN=m.x[0] + 5, ELSE=m.x[1] ** 2) + + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 5) + self.assertEqual( + str(e), "Expr_if( ( 5 <= y ), then=( x[0] + 5 ), else=( x[1]**2 ) )" + ) + self.assertEqual( + e.to_string(verbose=True), + "Expr_if( ( 5 <= y ), then=( sum(mon(1, x[0]), 5) ), else=( pow(x[1], 2) ) )", + ) + + m.y.fix() + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 5) + + m.y.fix(4) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 2) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 1) + + e = Expr_if(IF=m.y >= 5, THEN=m.x[0] + 5, ELSE=m.x[1] + 10) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 11) + + m.y.fix(5) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 1) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 5) + + def test_unary(self): + m = ConcreteModel() + m.x = Var(initialize=5) + e = sin(2 * m.x) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), math.sin(10)) + self.assertEqual(str(e), "sin(2*x)") + self.assertEqual(e.to_string(verbose=True), "sin(mon(2, x))") + + m.x.fix(1) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), math.sin(2)) + self.assertEqual(str(e), "sin(2*x)") + self.assertEqual(e.to_string(verbose=True), "sin(mon(2, x))") + + def test_external(self): + m = ConcreteModel() + m.x = Var(initialize=16) + fcn = MockExternalFunction() + e = ExternalFunctionExpression((2 * m.x,), fcn) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), None) + self.assertEqual(is_fixed(e), False) + self.assertEqual(value(e), 25) + self.assertEqual(str(e), "mock_fcn(2*x)") + self.assertEqual(e.to_string(verbose=True), "mock_fcn(mon(2, x))") + + m.x.fix(1) + self.assertTrue(e.is_potentially_variable()) + self.assertEqual(e.polynomial_degree(), 0) + self.assertEqual(is_fixed(e), True) + self.assertEqual(value(e), 1) + self.assertEqual(str(e), "mock_fcn(2*x)") + self.assertEqual(e.to_string(verbose=True), "mock_fcn(mon(2, x))") + + +class TestExpressionDuplicateAPI(unittest.TestCase): + def test_negation(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = NegationExpression((m.x,)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((2,)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (2,)) + + e = NPV_NegationExpression((m.p,)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.x,)) + self.assertIsNot(f, e) + self.assertIs(type(f), NegationExpression) + + def test_pow(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = PowExpression((m.x, 2)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((2, 3)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (2, 3)) + + e = NPV_PowExpression((m.p, 2)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.p, m.x)) + self.assertIsNot(f, e) + self.assertIs(type(f), PowExpression) + + def test_min(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = MinExpression((m.x, 2)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((2, 3, 4)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (2, 3, 4)) + + e = NPV_MinExpression((m.p, 2)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.p, m.x)) + self.assertIsNot(f, e) + self.assertIs(type(f), MinExpression) + + def test_max(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = MaxExpression((m.x, 2)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((2, 3, 4)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (2, 3, 4)) + + e = NPV_MaxExpression((m.p, 2)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.p, m.x)) + self.assertIsNot(f, e) + self.assertIs(type(f), MaxExpression) + + def test_prod(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = ProductExpression((m.x, 2)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((m.p, 3)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (m.p, 3)) + + e = NPV_ProductExpression((m.p, 2)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.p, m.x)) + self.assertIsNot(f, e) + self.assertIs(type(f), ProductExpression) + + def test_monomial(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = MonomialTermExpression((2, m.x)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + # Note that MonomialTermExpression recreates the args tuple + self.assertEqual(f.args, e.args) + + f = e.create_node_with_local_data((m.p, 3)) + self.assertIsNot(f, e) + self.assertIs(type(f), NPV_ProductExpression) + self.assertEqual(f.args, (m.p, 3)) + + f = e.create_node_with_local_data((m.x, m.x)) + self.assertIsNot(f, e) + self.assertIs(type(f), ProductExpression) + self.assertEqual(f.args, (m.x, m.x)) + + def test_division(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = DivisionExpression((m.x, 2)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((2, 3)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (2, 3)) + + e = NPV_DivisionExpression((m.p, 2)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.p, m.x)) + self.assertIsNot(f, e) + self.assertIs(type(f), DivisionExpression) + + def test_sum(self): + m = ConcreteModel() + m.x = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = SumExpression((m.x, 2)) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((m.x, 2, 3)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, [m.x, 2, 3]) + + e = NPV_SumExpression((m.p, 2)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.p, m.x)) + self.assertIsNot(f, e) + self.assertIs(type(f), LinearExpression) + assertExpressionsStructurallyEqual( + self, f.args, [m.p, MonomialTermExpression((1, m.x))] + ) + + f = e.create_node_with_local_data((m.p, m.x**2)) + self.assertIsNot(f, e) + self.assertIs(type(f), SumExpression) + assertExpressionsStructurallyEqual(self, f.args, [m.p, PowExpression((m.x, 2))]) + + def test_linear(self): + m = ConcreteModel() + m.x = Var(range(3), initialize=range(3)) + m.y = Var(initialize=5) + with mutable_expression() as e: + for i in range(3): + e += i * m.x[i] + e += 5 + e += m.y + e -= 3 + + def test_expr_if(self): + m = ConcreteModel() + m.x = Var(range(3), initialize=range(3)) + m.y = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = Expr_if(IF=m.y >= 5, THEN=m.x[0] + 5, ELSE=m.x[1] ** 2) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + + f = e.create_node_with_local_data((2, 3, 4)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (2, 3, 4)) + + e = NPV_Expr_ifExpression((m.p <= 5, 2, m.p)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + + f = e.create_node_with_local_data((m.p <= 5, m.x, m.p)) + self.assertIsNot(f, e) + self.assertIs(type(f), Expr_ifExpression) + + def test_unary_fcn(self): + m = ConcreteModel() + m.x = Var(range(3), initialize=range(3)) + m.y = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = sin(2 * m.y) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + f = e.create_node_with_local_data((m.x[1],)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (m.x[1],)) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + e = sin((2 * m.p)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + f = e.create_node_with_local_data((m.x[1],)) + self.assertIsNot(f, e) + self.assertIs(type(f), UnaryFunctionExpression) + self.assertEqual(f.args, (m.x[1],)) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + def test_abs(self): + m = ConcreteModel() + m.x = Var(range(3), initialize=range(3)) + m.y = Var(initialize=5) + m.p = Param(initialize=3, mutable=True) + e = abs(2 * m.y) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + f = e.create_node_with_local_data((m.x[1],)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (m.x[1],)) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + e = abs((2 * m.p)) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + f = e.create_node_with_local_data((m.x[1],)) + self.assertIsNot(f, e) + self.assertIs(type(f), AbsExpression) + self.assertEqual(f.args, (m.x[1],)) + self.assertIs(e._fcn, f._fcn) + self.assertIs(e._name, f._name) + + def test_external(self): + m = ConcreteModel() + m.x = Var(initialize=16) + m.p = Param(initialize=32, mutable=True) + fcn = MockExternalFunction() + e = ExternalFunctionExpression((2 * m.x,), fcn) + + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(f.args, e.args) + self.assertIs(e._fcn, f._fcn) + + f = e.create_node_with_local_data((m.x,)) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertEqual(f.args, (m.x,)) + self.assertIs(e._fcn, f._fcn) + + e = NPV_ExternalFunctionExpression((2 * m.p,), fcn) + f = e.create_node_with_local_data(e.args) + self.assertIsNot(f, e) + self.assertIs(type(f), type(e)) + self.assertIs(e._fcn, f._fcn) + + f = e.create_node_with_local_data((m.x,)) + self.assertIsNot(f, e) + self.assertIs(type(f), ExternalFunctionExpression) + self.assertEqual(f.args, (m.x,)) + self.assertIs(e._fcn, f._fcn) diff --git a/pyomo/core/tests/unit/test_numeric_expr_dispatcher.py b/pyomo/core/tests/unit/test_numeric_expr_dispatcher.py new file mode 100644 index 00000000000..3e9e160b1b1 --- /dev/null +++ b/pyomo/core/tests/unit/test_numeric_expr_dispatcher.py @@ -0,0 +1,6923 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# +# Unit Tests for expression generation +# +import logging +import math +import operator +import sys + +import pyomo.common.unittest as unittest + +from pyomo.common.log import LoggingIntercept +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) +import pyomo.core.expr as EXPR +from pyomo.core.expr import ( + DivisionExpression, + NPV_DivisionExpression, + SumExpression, + NPV_SumExpression, + LinearExpression, + MonomialTermExpression, + NegationExpression, + NPV_NegationExpression, + ProductExpression, + NPV_ProductExpression, + PowExpression, + NPV_PowExpression, + AbsExpression, + NPV_AbsExpression, + UnaryFunctionExpression, + NPV_UnaryFunctionExpression, +) +from pyomo.core.expr.numeric_expr import ( + ARG_TYPE, + _categorize_arg_type, + _known_arg_types, + _MutableSumExpression, + _MutableLinearExpression, + _MutableNPVSumExpression, + _zero_one_optimizations, + enable_expression_optimizations, +) +from pyomo.core.expr.numvalue import NumericValue, native_types, native_numeric_types +from pyomo.core.expr.visitor import clone_expression +from pyomo.environ import ConcreteModel, Param, Var, BooleanVar +from pyomo.gdp import Disjunct + +logger = logging.getLogger(__name__) + + +class Base(object): + class SKIP: + pass + + NUM_TESTS = 21 + + def tearDown(self): + # Restore the state of 0 and 1 optimizations + _zero_one_optimizations.clear() + _zero_one_optimizations.update(self.zero_one_optimizations) + + def setUp(self): + # Save (and standardize) the state of 0 and 1 optimizations + self.zero_one_optimizations = set(_zero_one_optimizations) + + # Note there are 11 basic argument "types" that determine how + # expressions are generated (defined by the _EXPR_TYPE enum): + # + # class _EXPR_TYPE(enum.Enum): + # MUTABLE = -2 + # ASBINARY = -1 + # INVALID = 0 + # NATIVE = 1 + # NPV = 2 + # PARAM = 3 + # VAR = 4 + # MONOMIAL = 5 + # LINEAR = 6 + # SUM = 7 + # OTHER = 8 + self.m = ConcreteModel() + self.m.p0 = Param(initialize=0, mutable=False) + self.m.p1 = Param(initialize=1, mutable=False) + self.m.p = Param(initialize=6, mutable=False) + self.m.q = Param(initialize=7, mutable=True) + self.m.x = Var() + self.m.d = Disjunct() + self.bin = self.m.d.indicator_var.as_numeric() + + self.invalid = 'str' + self.asbinary = self.m.d.indicator_var + self.zero = 0 + self.one = 1 + self.native = 5 + self.npv = NPV_PowExpression((self.m.q, 2)) + self.param = self.m.p + self.param0 = self.m.p0 + self.param1 = self.m.p1 + self.param_mut = self.m.q + self.var = self.m.x + self.mon_native = MonomialTermExpression((3, self.m.x)) + self.mon_param = MonomialTermExpression((self.m.q, self.m.x)) + self.mon_npv = MonomialTermExpression((self.npv, self.m.x)) + self.linear = LinearExpression([4, self.mon_native]) + self.sum = SumExpression([4, self.mon_native, self.m.x**2]) + self.other = PowExpression((self.m.x, 2)) + + self.mutable_l0 = _MutableSumExpression([]) + self.mutable_l1 = _MutableLinearExpression([self.mon_npv]) + self.mutable_l2 = _MutableSumExpression([self.mon_npv, self.other]) + self.mutable_l3 = _MutableNPVSumExpression([self.npv]) + + # often repeated reference expressions + self.mon_bin = MonomialTermExpression((1, self.bin)) + self.mon_var = MonomialTermExpression((1, self.var)) + self.minus_bin = MonomialTermExpression((-1, self.bin)) + self.minus_npv = NPV_NegationExpression((self.npv,)) + self.minus_param_mut = NPV_NegationExpression((self.param_mut,)) + self.minus_var = MonomialTermExpression((-1, self.var)) + self.minus_mon_native = MonomialTermExpression((-3, self.mon_native.arg(1))) + self.minus_mon_param = MonomialTermExpression( + (NPV_NegationExpression((self.mon_param.arg(0),)), self.mon_param.arg(1)) + ) + self.minus_mon_npv = MonomialTermExpression( + (NPV_NegationExpression((self.mon_npv.arg(0),)), self.mon_npv.arg(1)) + ) + self.minus_linear = NegationExpression((self.linear,)) + self.minus_sum = NegationExpression((self.sum,)) + self.minus_other = NegationExpression((self.other,)) + self.minus_mutable_l2 = NegationExpression((self.mutable_l2,)) + + self.TEMPLATE = [ + self.invalid, + self.asbinary, + self.zero, + self.one, + # 4: + self.native, + self.npv, + self.param, + self.param_mut, + # 8: + self.var, + self.mon_native, + self.mon_param, + self.mon_npv, + # 12: + self.linear, + self.sum, + self.other, + self.mutable_l0, + # 16: + self.mutable_l1, + self.mutable_l2, + self.param0, + self.param1, + # 20: + self.mutable_l3, + ] + + def _print_error(self, test_num, test, ans): + msg = f"Failed test {test_num}:\n\t" + for arg in test: + try: + msg += str(arg) + except: + msg += '[ERROR]' + msg += f' ({arg.__class__.__name__}' + try: + msg += f': {arg.nargs()}' + except AttributeError: + pass + msg += ')\n\t' + msg += f'{ans} (result: {ans.__class__.__name__}' + try: + msg += f': {ans.nargs()}' + except AttributeError: + pass + msg += ')' + logger.error(msg) + + def _run_cases(self, tests, op): + self.assertEqual(len(tests), self.NUM_TESTS) + # Check that this test is checking the correct arg type (catch + # copy/paste errors) + ref = tests[0][:-2] + for test_num, test in enumerate(tests): + self.assertIs(test[-2], self.TEMPLATE[test_num]) + for i, j in zip(test[:-2], ref): + self.assertIs(i, j) + try: + for test_num, test in enumerate(tests): + ans = None + args = test[:-1] + result = test[-1] + if result is self.SKIP: + continue + orig_args = list(args) + orig_args_clone = [clone_expression(arg) for arg in args] + try: + mutable = [isinstance(arg, _MutableSumExpression) for arg in args] + classes = [arg.__class__ for arg in args] + with LoggingIntercept() as LOG: + ans = op(*args) + if not any(arg is self.asbinary for arg in args): + self.assertEqual(LOG.getvalue(), "") + assertExpressionsEqual(self, result, ans) + for i, arg in enumerate(args): + self.assertFalse(isinstance(arg, _MutableSumExpression)) + self.assertIs(arg, orig_args[i]) + if mutable[i]: + self.assertIsNot(arg.__class__, classes[i]) + assertExpressionsEqual( + self, + _MutableSumExpression(arg.args), + _MutableSumExpression(orig_args_clone[i].args), + ) + else: + self.assertIs(arg.__class__, classes[i]) + assertExpressionsEqual(self, arg, orig_args_clone[i]) + except TypeError: + if result is not NotImplemented: + raise + except ZeroDivisionError: + if result is not ZeroDivisionError: + raise + except ValueError: + if result is not ValueError: + raise + finally: + for i, arg in enumerate(args): + if mutable[i]: + arg.__class__ = classes[i] + except: + self._print_error(test_num, test, ans) + raise + + def _run_iadd_cases(self, tests, op): + self.assertEqual(len(tests), self.NUM_TESTS) + # Check that this test is checking the correct arg type (catch + # copy/paste errors) + ref = tests[0][:-2] + for test_num, test in enumerate(tests): + self.assertIs(test[-2], self.TEMPLATE[test_num]) + for i, j in zip(test[:-2], ref): + self.assertIs(i, j) + try: + for test_num, test in enumerate(tests): + ans = None + args = test[:-1] + result = test[-1] + if result is self.SKIP: + continue + orig_args = list(args) + orig_args_clone = [clone_expression(arg) for arg in args] + try: + mutable = [isinstance(arg, _MutableSumExpression) for arg in args] + classes = [arg.__class__ for arg in args] + with LoggingIntercept() as LOG: + ans = op(*args) + if not any(arg is self.asbinary for arg in args): + self.assertEqual(LOG.getvalue(), "") + assertExpressionsEqual(self, result, ans) + for i, arg in enumerate(args): + self.assertIs(arg, orig_args[i]) + if mutable[i]: + if i: + self.assertFalse(isinstance(arg, _MutableSumExpression)) + self.assertIsNot(arg.__class__, classes[i]) + assertExpressionsEqual( + self, + _MutableSumExpression(arg.args), + _MutableSumExpression(orig_args_clone[i].args), + ) + else: + self.assertIsInstance(arg, _MutableSumExpression) + self.assertIs(arg, ans) + else: + self.assertIs(arg.__class__, classes[i]) + assertExpressionsEqual(self, arg, orig_args_clone[i]) + except TypeError: + if result is not NotImplemented: + raise + except ZeroDivisionError: + if result is not ZeroDivisionError: + raise + except ValueError: + if result is not ValueError: + raise + else: + # Don't reset mutable args yet: we need to print out the error! + for i, arg in enumerate(args): + if mutable[i]: + arg.__class__ = classes[i] + arg._args_ = orig_args_clone[i]._args_ + arg._nargs = orig_args_clone[i]._nargs + except: + self._print_error(test_num, orig_args_clone + [result], ans) + for i, arg in enumerate(args): + if mutable[i]: + arg.__class__ = classes[i] + arg._args_ = orig_args_clone[i]._args_ + arg._nargs = orig_args_clone[i]._nargs + raise + + +class TestExpressionGeneration(Base, unittest.TestCase): + def setUp(self): + super().setUp() + enable_expression_optimizations(zero=False, one=True) + + # + # + # ADDITION + # + # + + def test_add_invalid(self): + tests = [ + # "invalid(str) + invalid(str)" is a legitimate Python + # operation and should never hit the Pyomo expression + # system + (self.invalid, self.invalid, self.SKIP), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support addition + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, self.bin), + (self.asbinary, self.one, LinearExpression([self.mon_bin, 1])), + # 4: + (self.asbinary, self.native, LinearExpression([self.mon_bin, 5])), + (self.asbinary, self.npv, LinearExpression([self.mon_bin, self.npv])), + (self.asbinary, self.param, LinearExpression([self.mon_bin, 6])), + ( + self.asbinary, + self.param_mut, + LinearExpression([self.mon_bin, self.param_mut]), + ), + # 8: + (self.asbinary, self.var, LinearExpression([self.mon_bin, self.mon_var])), + ( + self.asbinary, + self.mon_native, + LinearExpression([self.mon_bin, self.mon_native]), + ), + ( + self.asbinary, + self.mon_param, + LinearExpression([self.mon_bin, self.mon_param]), + ), + ( + self.asbinary, + self.mon_npv, + LinearExpression([self.mon_bin, self.mon_npv]), + ), + # 12: + ( + self.asbinary, + self.linear, + LinearExpression(self.linear.args + [self.mon_bin]), + ), + (self.asbinary, self.sum, SumExpression(self.sum.args + [self.bin])), + (self.asbinary, self.other, SumExpression([self.bin, self.other])), + (self.asbinary, self.mutable_l0, self.bin), + # 16: + ( + self.asbinary, + self.mutable_l1, + LinearExpression([self.mon_bin, self.mon_npv]), + ), + ( + self.asbinary, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.bin]), + ), + (self.asbinary, self.param0, self.bin), + (self.asbinary, self.param1, LinearExpression([self.mon_bin, 1])), + # 20: + ( + self.asbinary, + self.mutable_l3, + LinearExpression([self.mon_bin, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, self.bin), + (self.zero, self.zero, 0), + (self.zero, self.one, 1), + # 4: + (self.zero, self.native, 5), + (self.zero, self.npv, self.npv), + (self.zero, self.param, 6), + (self.zero, self.param_mut, self.param_mut), + # 8: + (self.zero, self.var, self.var), + (self.zero, self.mon_native, self.mon_native), + (self.zero, self.mon_param, self.mon_param), + (self.zero, self.mon_npv, self.mon_npv), + # 12: + (self.zero, self.linear, self.linear), + (self.zero, self.sum, self.sum), + (self.zero, self.other, self.other), + (self.zero, self.mutable_l0, 0), + # 16: + (self.zero, self.mutable_l1, self.mon_npv), + (self.zero, self.mutable_l2, self.mutable_l2), + (self.zero, self.param0, 0), + (self.zero, self.param1, 1), + # 20: + (self.zero, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, LinearExpression([1, self.mon_bin])), + (self.one, self.zero, 1), + (self.one, self.one, 2), + # 4: + (self.one, self.native, 6), + (self.one, self.npv, NPV_SumExpression([1, self.npv])), + (self.one, self.param, 7), + (self.one, self.param_mut, NPV_SumExpression([1, self.param_mut])), + # 8: + (self.one, self.var, LinearExpression([1, self.mon_var])), + (self.one, self.mon_native, LinearExpression([1, self.mon_native])), + (self.one, self.mon_param, LinearExpression([1, self.mon_param])), + (self.one, self.mon_npv, LinearExpression([1, self.mon_npv])), + # 12: + (self.one, self.linear, LinearExpression(self.linear.args + [1])), + (self.one, self.sum, SumExpression(self.sum.args + [1])), + (self.one, self.other, SumExpression([1, self.other])), + (self.one, self.mutable_l0, 1), + # 16: + (self.one, self.mutable_l1, LinearExpression([1] + self.mutable_l1.args)), + (self.one, self.mutable_l2, SumExpression(self.mutable_l2.args + [1])), + (self.one, self.param0, 1), + (self.one, self.param1, 2), + # 20: + (self.one, self.mutable_l3, NPV_SumExpression([1, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, LinearExpression([5, self.mon_bin])), + (self.native, self.zero, 5), + (self.native, self.one, 6), + # 4: + (self.native, self.native, 10), + (self.native, self.npv, NPV_SumExpression([5, self.npv])), + (self.native, self.param, 11), + (self.native, self.param_mut, NPV_SumExpression([5, self.param_mut])), + # 8: + (self.native, self.var, LinearExpression([5, self.mon_var])), + (self.native, self.mon_native, LinearExpression([5, self.mon_native])), + (self.native, self.mon_param, LinearExpression([5, self.mon_param])), + (self.native, self.mon_npv, LinearExpression([5, self.mon_npv])), + # 12: + (self.native, self.linear, LinearExpression(self.linear.args + [5])), + (self.native, self.sum, SumExpression(self.sum.args + [5])), + (self.native, self.other, SumExpression([5, self.other])), + (self.native, self.mutable_l0, 5), + # 16: + ( + self.native, + self.mutable_l1, + LinearExpression([5] + self.mutable_l1.args), + ), + (self.native, self.mutable_l2, SumExpression(self.mutable_l2.args + [5])), + (self.native, self.param0, 5), + (self.native, self.param1, 6), + # 20: + (self.native, self.mutable_l3, NPV_SumExpression([5, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, LinearExpression([self.npv, self.mon_bin])), + (self.npv, self.zero, self.npv), + (self.npv, self.one, NPV_SumExpression([self.npv, 1])), + # 4: + (self.npv, self.native, NPV_SumExpression([self.npv, 5])), + (self.npv, self.npv, NPV_SumExpression([self.npv, self.npv])), + (self.npv, self.param, NPV_SumExpression([self.npv, 6])), + (self.npv, self.param_mut, NPV_SumExpression([self.npv, self.param_mut])), + # 8: + (self.npv, self.var, LinearExpression([self.npv, self.mon_var])), + (self.npv, self.mon_native, LinearExpression([self.npv, self.mon_native])), + (self.npv, self.mon_param, LinearExpression([self.npv, self.mon_param])), + (self.npv, self.mon_npv, LinearExpression([self.npv, self.mon_npv])), + # 12: + (self.npv, self.linear, LinearExpression(self.linear.args + [self.npv])), + (self.npv, self.sum, SumExpression(self.sum.args + [self.npv])), + (self.npv, self.other, SumExpression([self.npv, self.other])), + (self.npv, self.mutable_l0, self.npv), + # 16: + ( + self.npv, + self.mutable_l1, + LinearExpression([self.npv] + self.mutable_l1.args), + ), + ( + self.npv, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.npv]), + ), + (self.npv, self.param0, self.npv), + (self.npv, self.param1, NPV_SumExpression([self.npv, 1])), + # 20: + (self.npv, self.mutable_l3, NPV_SumExpression([self.npv, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, LinearExpression([6, self.mon_bin])), + (self.param, self.zero, 6), + (self.param, self.one, 7), + # 4: + (self.param, self.native, 11), + (self.param, self.npv, NPV_SumExpression([6, self.npv])), + (self.param, self.param, 12), + (self.param, self.param_mut, NPV_SumExpression([6, self.param_mut])), + # 8: + (self.param, self.var, LinearExpression([6, self.mon_var])), + (self.param, self.mon_native, LinearExpression([6, self.mon_native])), + (self.param, self.mon_param, LinearExpression([6, self.mon_param])), + (self.param, self.mon_npv, LinearExpression([6, self.mon_npv])), + # 12: + (self.param, self.linear, LinearExpression(self.linear.args + [6])), + (self.param, self.sum, SumExpression(self.sum.args + [6])), + (self.param, self.other, SumExpression([6, self.other])), + (self.param, self.mutable_l0, 6), + # 16: + (self.param, self.mutable_l1, LinearExpression([6] + self.mutable_l1.args)), + (self.param, self.mutable_l2, SumExpression(self.mutable_l2.args + [6])), + (self.param, self.param0, 6), + (self.param, self.param1, 7), + # 20: + (self.param, self.mutable_l3, NPV_SumExpression([6, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + LinearExpression([self.param_mut, self.mon_bin]), + ), + (self.param_mut, self.zero, self.param_mut), + (self.param_mut, self.one, NPV_SumExpression([self.param_mut, 1])), + # 4: + (self.param_mut, self.native, NPV_SumExpression([self.param_mut, 5])), + (self.param_mut, self.npv, NPV_SumExpression([self.param_mut, self.npv])), + (self.param_mut, self.param, NPV_SumExpression([self.param_mut, 6])), + ( + self.param_mut, + self.param_mut, + NPV_SumExpression([self.param_mut, self.param_mut]), + ), + # 8: + ( + self.param_mut, + self.var, + LinearExpression([self.param_mut, self.mon_var]), + ), + ( + self.param_mut, + self.mon_native, + LinearExpression([self.param_mut, self.mon_native]), + ), + ( + self.param_mut, + self.mon_param, + LinearExpression([self.param_mut, self.mon_param]), + ), + ( + self.param_mut, + self.mon_npv, + LinearExpression([self.param_mut, self.mon_npv]), + ), + # 12: + ( + self.param_mut, + self.linear, + LinearExpression(self.linear.args + [self.param_mut]), + ), + (self.param_mut, self.sum, SumExpression(self.sum.args + [self.param_mut])), + (self.param_mut, self.other, SumExpression([self.param_mut, self.other])), + (self.param_mut, self.mutable_l0, self.param_mut), + # 16: + ( + self.param_mut, + self.mutable_l1, + LinearExpression([self.param_mut] + self.mutable_l1.args), + ), + ( + self.param_mut, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.param_mut]), + ), + (self.param_mut, self.param0, self.param_mut), + (self.param_mut, self.param1, NPV_SumExpression([self.param_mut, 1])), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_SumExpression([self.param_mut, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, LinearExpression([self.mon_var, self.mon_bin])), + (self.var, self.zero, self.var), + (self.var, self.one, LinearExpression([self.mon_var, 1])), + # 4: + (self.var, self.native, LinearExpression([self.mon_var, 5])), + (self.var, self.npv, LinearExpression([self.mon_var, self.npv])), + (self.var, self.param, LinearExpression([self.mon_var, 6])), + ( + self.var, + self.param_mut, + LinearExpression([self.mon_var, self.param_mut]), + ), + # 8: + (self.var, self.var, LinearExpression([self.mon_var, self.mon_var])), + ( + self.var, + self.mon_native, + LinearExpression([self.mon_var, self.mon_native]), + ), + ( + self.var, + self.mon_param, + LinearExpression([self.mon_var, self.mon_param]), + ), + (self.var, self.mon_npv, LinearExpression([self.mon_var, self.mon_npv])), + # 12: + ( + self.var, + self.linear, + LinearExpression(self.linear.args + [self.mon_var]), + ), + (self.var, self.sum, SumExpression(self.sum.args + [self.var])), + (self.var, self.other, SumExpression([self.var, self.other])), + (self.var, self.mutable_l0, self.var), + # 16: + ( + self.var, + self.mutable_l1, + LinearExpression([self.mon_var] + self.mutable_l1.args), + ), + ( + self.var, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.var]), + ), + (self.var, self.param0, self.var), + (self.var, self.param1, LinearExpression([self.mon_var, 1])), + # 20: + ( + self.var, + self.mutable_l3, + LinearExpression([MonomialTermExpression((1, self.var)), self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + LinearExpression([self.mon_native, self.mon_bin]), + ), + (self.mon_native, self.zero, self.mon_native), + (self.mon_native, self.one, LinearExpression([self.mon_native, 1])), + # 4: + (self.mon_native, self.native, LinearExpression([self.mon_native, 5])), + (self.mon_native, self.npv, LinearExpression([self.mon_native, self.npv])), + (self.mon_native, self.param, LinearExpression([self.mon_native, 6])), + ( + self.mon_native, + self.param_mut, + LinearExpression([self.mon_native, self.param_mut]), + ), + # 8: + ( + self.mon_native, + self.var, + LinearExpression([self.mon_native, self.mon_var]), + ), + ( + self.mon_native, + self.mon_native, + LinearExpression([self.mon_native, self.mon_native]), + ), + ( + self.mon_native, + self.mon_param, + LinearExpression([self.mon_native, self.mon_param]), + ), + ( + self.mon_native, + self.mon_npv, + LinearExpression([self.mon_native, self.mon_npv]), + ), + # 12: + ( + self.mon_native, + self.linear, + LinearExpression(self.linear.args + [self.mon_native]), + ), + ( + self.mon_native, + self.sum, + SumExpression(self.sum.args + [self.mon_native]), + ), + (self.mon_native, self.other, SumExpression([self.mon_native, self.other])), + (self.mon_native, self.mutable_l0, self.mon_native), + # 16: + ( + self.mon_native, + self.mutable_l1, + LinearExpression([self.mon_native] + self.mutable_l1.args), + ), + ( + self.mon_native, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.mon_native]), + ), + (self.mon_native, self.param0, self.mon_native), + (self.mon_native, self.param1, LinearExpression([self.mon_native, 1])), + # 20: + ( + self.mon_native, + self.mutable_l3, + LinearExpression([self.mon_native, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + LinearExpression([self.mon_param, self.mon_bin]), + ), + (self.mon_param, self.zero, self.mon_param), + (self.mon_param, self.one, LinearExpression([self.mon_param, 1])), + # 4: + (self.mon_param, self.native, LinearExpression([self.mon_param, 5])), + (self.mon_param, self.npv, LinearExpression([self.mon_param, self.npv])), + (self.mon_param, self.param, LinearExpression([self.mon_param, 6])), + ( + self.mon_param, + self.param_mut, + LinearExpression([self.mon_param, self.param_mut]), + ), + # 8: + ( + self.mon_param, + self.var, + LinearExpression([self.mon_param, self.mon_var]), + ), + ( + self.mon_param, + self.mon_native, + LinearExpression([self.mon_param, self.mon_native]), + ), + ( + self.mon_param, + self.mon_param, + LinearExpression([self.mon_param, self.mon_param]), + ), + ( + self.mon_param, + self.mon_npv, + LinearExpression([self.mon_param, self.mon_npv]), + ), + # 12: + ( + self.mon_param, + self.linear, + LinearExpression(self.linear.args + [self.mon_param]), + ), + (self.mon_param, self.sum, SumExpression(self.sum.args + [self.mon_param])), + (self.mon_param, self.other, SumExpression([self.mon_param, self.other])), + (self.mon_param, self.mutable_l0, self.mon_param), + # 16: + ( + self.mon_param, + self.mutable_l1, + LinearExpression([self.mon_param] + self.mutable_l1.args), + ), + ( + self.mon_param, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.mon_param]), + ), + (self.mon_param, self.param0, self.mon_param), + (self.mon_param, self.param1, LinearExpression([self.mon_param, 1])), + # 20: + ( + self.mon_param, + self.mutable_l3, + LinearExpression([self.mon_param, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + ( + self.mon_npv, + self.asbinary, + LinearExpression([self.mon_npv, self.mon_bin]), + ), + (self.mon_npv, self.zero, self.mon_npv), + (self.mon_npv, self.one, LinearExpression([self.mon_npv, 1])), + # 4: + (self.mon_npv, self.native, LinearExpression([self.mon_npv, 5])), + (self.mon_npv, self.npv, LinearExpression([self.mon_npv, self.npv])), + (self.mon_npv, self.param, LinearExpression([self.mon_npv, 6])), + ( + self.mon_npv, + self.param_mut, + LinearExpression([self.mon_npv, self.param_mut]), + ), + # 8: + (self.mon_npv, self.var, LinearExpression([self.mon_npv, self.mon_var])), + ( + self.mon_npv, + self.mon_native, + LinearExpression([self.mon_npv, self.mon_native]), + ), + ( + self.mon_npv, + self.mon_param, + LinearExpression([self.mon_npv, self.mon_param]), + ), + ( + self.mon_npv, + self.mon_npv, + LinearExpression([self.mon_npv, self.mon_npv]), + ), + # 12: + ( + self.mon_npv, + self.linear, + LinearExpression(self.linear.args + [self.mon_npv]), + ), + (self.mon_npv, self.sum, SumExpression(self.sum.args + [self.mon_npv])), + (self.mon_npv, self.other, SumExpression([self.mon_npv, self.other])), + (self.mon_npv, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mon_npv, + self.mutable_l1, + LinearExpression([self.mon_npv] + self.mutable_l1.args), + ), + ( + self.mon_npv, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.mon_npv]), + ), + (self.mon_npv, self.param0, self.mon_npv), + (self.mon_npv, self.param1, LinearExpression([self.mon_npv, 1])), + # 20: + (self.mon_npv, self.mutable_l3, LinearExpression([self.mon_npv, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + ( + self.linear, + self.asbinary, + LinearExpression(self.linear.args + [self.mon_bin]), + ), + (self.linear, self.zero, self.linear), + (self.linear, self.one, LinearExpression(self.linear.args + [1])), + # 4: + (self.linear, self.native, LinearExpression(self.linear.args + [5])), + (self.linear, self.npv, LinearExpression(self.linear.args + [self.npv])), + (self.linear, self.param, LinearExpression(self.linear.args + [6])), + ( + self.linear, + self.param_mut, + LinearExpression(self.linear.args + [self.param_mut]), + ), + # 8: + ( + self.linear, + self.var, + LinearExpression(self.linear.args + [self.mon_var]), + ), + ( + self.linear, + self.mon_native, + LinearExpression(self.linear.args + [self.mon_native]), + ), + ( + self.linear, + self.mon_param, + LinearExpression(self.linear.args + [self.mon_param]), + ), + ( + self.linear, + self.mon_npv, + LinearExpression(self.linear.args + [self.mon_npv]), + ), + # 12: + ( + self.linear, + self.linear, + LinearExpression(self.linear.args + self.linear.args), + ), + (self.linear, self.sum, SumExpression(self.sum.args + [self.linear])), + (self.linear, self.other, SumExpression([self.linear, self.other])), + (self.linear, self.mutable_l0, self.linear), + # 16: + ( + self.linear, + self.mutable_l1, + LinearExpression(self.linear.args + self.mutable_l1.args), + ), + ( + self.linear, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.linear]), + ), + (self.linear, self.param0, self.linear), + (self.linear, self.param1, LinearExpression(self.linear.args + [1])), + # 20: + ( + self.linear, + self.mutable_l3, + LinearExpression(self.linear.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, SumExpression(self.sum.args + [self.bin])), + (self.sum, self.zero, self.sum), + (self.sum, self.one, SumExpression(self.sum.args + [1])), + # 4: + (self.sum, self.native, SumExpression(self.sum.args + [5])), + (self.sum, self.npv, SumExpression(self.sum.args + [self.npv])), + (self.sum, self.param, SumExpression(self.sum.args + [6])), + (self.sum, self.param_mut, SumExpression(self.sum.args + [self.param_mut])), + # 8: + (self.sum, self.var, SumExpression(self.sum.args + [self.var])), + ( + self.sum, + self.mon_native, + SumExpression(self.sum.args + [self.mon_native]), + ), + (self.sum, self.mon_param, SumExpression(self.sum.args + [self.mon_param])), + (self.sum, self.mon_npv, SumExpression(self.sum.args + [self.mon_npv])), + # 12: + (self.sum, self.linear, SumExpression(self.sum.args + [self.linear])), + (self.sum, self.sum, SumExpression(self.sum.args + self.sum.args)), + (self.sum, self.other, SumExpression(self.sum.args + [self.other])), + (self.sum, self.mutable_l0, self.sum), + # 16: + ( + self.sum, + self.mutable_l1, + SumExpression(self.sum.args + self.mutable_l1.args), + ), + ( + self.sum, + self.mutable_l2, + SumExpression(self.sum.args + self.mutable_l2.args), + ), + (self.sum, self.param0, self.sum), + (self.sum, self.param1, SumExpression(self.sum.args + [1])), + # 20: + (self.sum, self.mutable_l3, SumExpression(self.sum.args + [self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, SumExpression([self.other, self.bin])), + (self.other, self.zero, self.other), + (self.other, self.one, SumExpression([self.other, 1])), + # 4: + (self.other, self.native, SumExpression([self.other, 5])), + (self.other, self.npv, SumExpression([self.other, self.npv])), + (self.other, self.param, SumExpression([self.other, 6])), + (self.other, self.param_mut, SumExpression([self.other, self.param_mut])), + # 8: + (self.other, self.var, SumExpression([self.other, self.var])), + (self.other, self.mon_native, SumExpression([self.other, self.mon_native])), + (self.other, self.mon_param, SumExpression([self.other, self.mon_param])), + (self.other, self.mon_npv, SumExpression([self.other, self.mon_npv])), + # 12: + (self.other, self.linear, SumExpression([self.other, self.linear])), + (self.other, self.sum, SumExpression(self.sum.args + [self.other])), + (self.other, self.other, SumExpression([self.other, self.other])), + (self.other, self.mutable_l0, self.other), + # 16: + (self.other, self.mutable_l1, SumExpression([self.other, self.mon_npv])), + ( + self.other, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.other]), + ), + (self.other, self.param0, self.other), + (self.other, self.param1, SumExpression([self.other, 1])), + # 20: + (self.other, self.mutable_l3, SumExpression([self.other, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, self.bin), + (self.mutable_l0, self.zero, 0), + (self.mutable_l0, self.one, 1), + # 4: + (self.mutable_l0, self.native, 5), + (self.mutable_l0, self.npv, self.npv), + (self.mutable_l0, self.param, 6), + (self.mutable_l0, self.param_mut, self.param_mut), + # 8: + (self.mutable_l0, self.var, self.var), + (self.mutable_l0, self.mon_native, self.mon_native), + (self.mutable_l0, self.mon_param, self.mon_param), + (self.mutable_l0, self.mon_npv, self.mon_npv), + # 12: + (self.mutable_l0, self.linear, self.linear), + (self.mutable_l0, self.sum, self.sum), + (self.mutable_l0, self.other, self.other), + (self.mutable_l0, self.mutable_l0, 0), + # 16: + (self.mutable_l0, self.mutable_l1, self.mon_npv), + (self.mutable_l0, self.mutable_l2, self.mutable_l2), + (self.mutable_l0, self.param0, 0), + (self.mutable_l0, self.param1, 1), + # 20: + (self.mutable_l0, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + def test_add_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + LinearExpression(self.mutable_l1.args + [self.mon_bin]), + ), + (self.mutable_l1, self.zero, self.mon_npv), + (self.mutable_l1, self.one, LinearExpression(self.mutable_l1.args + [1])), + # 4: + ( + self.mutable_l1, + self.native, + LinearExpression(self.mutable_l1.args + [5]), + ), + ( + self.mutable_l1, + self.npv, + LinearExpression(self.mutable_l1.args + [self.npv]), + ), + (self.mutable_l1, self.param, LinearExpression(self.mutable_l1.args + [6])), + ( + self.mutable_l1, + self.param_mut, + LinearExpression(self.mutable_l1.args + [self.param_mut]), + ), + # 8: + ( + self.mutable_l1, + self.var, + LinearExpression(self.mutable_l1.args + [self.mon_var]), + ), + ( + self.mutable_l1, + self.mon_native, + LinearExpression(self.mutable_l1.args + [self.mon_native]), + ), + ( + self.mutable_l1, + self.mon_param, + LinearExpression(self.mutable_l1.args + [self.mon_param]), + ), + ( + self.mutable_l1, + self.mon_npv, + LinearExpression(self.mutable_l1.args + [self.mon_npv]), + ), + # 12: + ( + self.mutable_l1, + self.linear, + LinearExpression(self.linear.args + self.mutable_l1.args), + ), + ( + self.mutable_l1, + self.sum, + SumExpression(self.sum.args + self.mutable_l1.args), + ), + ( + self.mutable_l1, + self.other, + SumExpression(self.mutable_l1.args + [self.other]), + ), + (self.mutable_l1, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + LinearExpression(self.mutable_l1.args + self.mutable_l1.args), + ), + ( + self.mutable_l1, + self.mutable_l2, + SumExpression(self.mutable_l2.args + self.mutable_l1.args), + ), + (self.mutable_l1, self.param0, self.mon_npv), + ( + self.mutable_l1, + self.param1, + LinearExpression(self.mutable_l1.args + [1]), + ), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + LinearExpression(self.mutable_l1.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + def test_add_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + SumExpression(self.mutable_l2.args + [self.bin]), + ), + (self.mutable_l2, self.zero, self.mutable_l2), + (self.mutable_l2, self.one, SumExpression(self.mutable_l2.args + [1])), + # 4: + (self.mutable_l2, self.native, SumExpression(self.mutable_l2.args + [5])), + ( + self.mutable_l2, + self.npv, + SumExpression(self.mutable_l2.args + [self.npv]), + ), + (self.mutable_l2, self.param, SumExpression(self.mutable_l2.args + [6])), + ( + self.mutable_l2, + self.param_mut, + SumExpression(self.mutable_l2.args + [self.param_mut]), + ), + # 8: + ( + self.mutable_l2, + self.var, + SumExpression(self.mutable_l2.args + [self.var]), + ), + ( + self.mutable_l2, + self.mon_native, + SumExpression(self.mutable_l2.args + [self.mon_native]), + ), + ( + self.mutable_l2, + self.mon_param, + SumExpression(self.mutable_l2.args + [self.mon_param]), + ), + ( + self.mutable_l2, + self.mon_npv, + SumExpression(self.mutable_l2.args + [self.mon_npv]), + ), + # 12: + ( + self.mutable_l2, + self.linear, + SumExpression(self.mutable_l2.args + [self.linear]), + ), + ( + self.mutable_l2, + self.sum, + SumExpression(self.mutable_l2.args + self.sum.args), + ), + ( + self.mutable_l2, + self.other, + SumExpression(self.mutable_l2.args + [self.other]), + ), + (self.mutable_l2, self.mutable_l0, self.mutable_l2), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + SumExpression(self.mutable_l2.args + self.mutable_l1.args), + ), + ( + self.mutable_l2, + self.mutable_l2, + SumExpression(self.mutable_l2.args + self.mutable_l2.args), + ), + (self.mutable_l2, self.param0, self.mutable_l2), + (self.mutable_l2, self.param1, SumExpression(self.mutable_l2.args + [1])), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + SumExpression(self.mutable_l2.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + def test_add_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, self.bin), + (self.param0, self.zero, 0), + (self.param0, self.one, 1), + # 4: + (self.param0, self.native, 5), + (self.param0, self.npv, self.npv), + (self.param0, self.param, 6), + (self.param0, self.param_mut, self.param_mut), + # 8: + (self.param0, self.var, self.var), + (self.param0, self.mon_native, self.mon_native), + (self.param0, self.mon_param, self.mon_param), + (self.param0, self.mon_npv, self.mon_npv), + # 12: + (self.param0, self.linear, self.linear), + (self.param0, self.sum, self.sum), + (self.param0, self.other, self.other), + (self.param0, self.mutable_l0, 0), + # 16: + (self.param0, self.mutable_l1, self.mon_npv), + (self.param0, self.mutable_l2, self.mutable_l2), + (self.param0, self.param0, 0), + (self.param0, self.param1, 1), + # 20: + (self.param0, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, LinearExpression([1, self.mon_bin])), + (self.param1, self.zero, 1), + (self.param1, self.one, 2), + # 4: + (self.param1, self.native, 6), + (self.param1, self.npv, NPV_SumExpression([1, self.npv])), + (self.param1, self.param, 7), + (self.param1, self.param_mut, NPV_SumExpression([1, self.param_mut])), + # 8: + (self.param1, self.var, LinearExpression([1, self.mon_var])), + (self.param1, self.mon_native, LinearExpression([1, self.mon_native])), + (self.param1, self.mon_param, LinearExpression([1, self.mon_param])), + (self.param1, self.mon_npv, LinearExpression([1, self.mon_npv])), + # 12: + (self.param1, self.linear, LinearExpression(self.linear.args + [1])), + (self.param1, self.sum, SumExpression(self.sum.args + [1])), + (self.param1, self.other, SumExpression([1, self.other])), + (self.param1, self.mutable_l0, 1), + # 16: + ( + self.param1, + self.mutable_l1, + LinearExpression([1] + self.mutable_l1.args), + ), + (self.param1, self.mutable_l2, SumExpression(self.mutable_l2.args + [1])), + (self.param1, self.param0, 1), + (self.param1, self.param1, 2), + # 20: + (self.param1, self.mutable_l3, NPV_SumExpression([1, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + ( + self.mutable_l3, + self.asbinary, + LinearExpression(self.mutable_l3.args + [self.mon_bin]), + ), + (self.mutable_l3, self.zero, self.npv), + (self.mutable_l3, self.one, NPV_SumExpression(self.mutable_l3.args + [1])), + # 4: + ( + self.mutable_l3, + self.native, + NPV_SumExpression(self.mutable_l3.args + [5]), + ), + ( + self.mutable_l3, + self.npv, + NPV_SumExpression(self.mutable_l3.args + [self.npv]), + ), + ( + self.mutable_l3, + self.param, + NPV_SumExpression(self.mutable_l3.args + [6]), + ), + ( + self.mutable_l3, + self.param_mut, + NPV_SumExpression(self.mutable_l3.args + [self.param_mut]), + ), + # 8: + ( + self.mutable_l3, + self.var, + LinearExpression(self.mutable_l3.args + [self.mon_var]), + ), + ( + self.mutable_l3, + self.mon_native, + LinearExpression(self.mutable_l3.args + [self.mon_native]), + ), + ( + self.mutable_l3, + self.mon_param, + LinearExpression(self.mutable_l3.args + [self.mon_param]), + ), + ( + self.mutable_l3, + self.mon_npv, + LinearExpression(self.mutable_l3.args + [self.mon_npv]), + ), + # 12: + ( + self.mutable_l3, + self.linear, + LinearExpression(self.linear.args + self.mutable_l3.args), + ), + ( + self.mutable_l3, + self.sum, + SumExpression(self.sum.args + self.mutable_l3.args), + ), + ( + self.mutable_l3, + self.other, + SumExpression(self.mutable_l3.args + [self.other]), + ), + (self.mutable_l3, self.mutable_l0, self.npv), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + LinearExpression(self.mutable_l3.args + self.mutable_l1.args), + ), + ( + self.mutable_l3, + self.mutable_l2, + SumExpression(self.mutable_l2.args + self.mutable_l3.args), + ), + (self.mutable_l3, self.param0, self.npv), + ( + self.mutable_l3, + self.param1, + NPV_SumExpression(self.mutable_l3.args + [1]), + ), + # 20: + ( + self.mutable_l3, + self.mutable_l3, + NPV_SumExpression(self.mutable_l3.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + # + # + # SUBTRACTION + # + # + + def test_sub_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support addition + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, self.bin), + (self.asbinary, self.one, LinearExpression([self.mon_bin, -1])), + # 4: + (self.asbinary, self.native, LinearExpression([self.mon_bin, -5])), + (self.asbinary, self.npv, LinearExpression([self.mon_bin, self.minus_npv])), + (self.asbinary, self.param, LinearExpression([self.mon_bin, -6])), + ( + self.asbinary, + self.param_mut, + LinearExpression([self.mon_bin, self.minus_param_mut]), + ), + # 8: + (self.asbinary, self.var, LinearExpression([self.mon_bin, self.minus_var])), + ( + self.asbinary, + self.mon_native, + LinearExpression([self.mon_bin, self.minus_mon_native]), + ), + ( + self.asbinary, + self.mon_param, + LinearExpression([self.mon_bin, self.minus_mon_param]), + ), + ( + self.asbinary, + self.mon_npv, + LinearExpression([self.mon_bin, self.minus_mon_npv]), + ), + # 12: + (self.asbinary, self.linear, SumExpression([self.bin, self.minus_linear])), + (self.asbinary, self.sum, SumExpression([self.bin, self.minus_sum])), + (self.asbinary, self.other, SumExpression([self.bin, self.minus_other])), + (self.asbinary, self.mutable_l0, self.bin), + # 16: + ( + self.asbinary, + self.mutable_l1, + LinearExpression([self.mon_bin, self.minus_mon_npv]), + ), + ( + self.asbinary, + self.mutable_l2, + SumExpression([self.bin, self.minus_mutable_l2]), + ), + (self.asbinary, self.param0, self.bin), + (self.asbinary, self.param1, LinearExpression([self.mon_bin, -1])), + # 20: + ( + self.asbinary, + self.mutable_l3, + LinearExpression([self.mon_bin, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, self.minus_bin), + (self.zero, self.zero, 0), + (self.zero, self.one, -1), + # 4: + (self.zero, self.native, -5), + (self.zero, self.npv, self.minus_npv), + (self.zero, self.param, -6), + (self.zero, self.param_mut, self.minus_param_mut), + # 8: + (self.zero, self.var, self.minus_var), + (self.zero, self.mon_native, self.minus_mon_native), + (self.zero, self.mon_param, self.minus_mon_param), + (self.zero, self.mon_npv, self.minus_mon_npv), + # 12: + (self.zero, self.linear, self.minus_linear), + (self.zero, self.sum, self.minus_sum), + (self.zero, self.other, self.minus_other), + (self.zero, self.mutable_l0, 0), + # 16: + (self.zero, self.mutable_l1, self.minus_mon_npv), + (self.zero, self.mutable_l2, self.minus_mutable_l2), + (self.zero, self.param0, 0), + (self.zero, self.param1, -1), + # 20: + (self.zero, self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, LinearExpression([1, self.minus_bin])), + (self.one, self.zero, 1), + (self.one, self.one, 0), + # 4: + (self.one, self.native, -4), + (self.one, self.npv, NPV_SumExpression([1, self.minus_npv])), + (self.one, self.param, -5), + (self.one, self.param_mut, NPV_SumExpression([1, self.minus_param_mut])), + # 8: + (self.one, self.var, LinearExpression([1, self.minus_var])), + (self.one, self.mon_native, LinearExpression([1, self.minus_mon_native])), + (self.one, self.mon_param, LinearExpression([1, self.minus_mon_param])), + (self.one, self.mon_npv, LinearExpression([1, self.minus_mon_npv])), + # 12: + (self.one, self.linear, SumExpression([1, self.minus_linear])), + (self.one, self.sum, SumExpression([1, self.minus_sum])), + (self.one, self.other, SumExpression([1, self.minus_other])), + (self.one, self.mutable_l0, 1), + # 16: + (self.one, self.mutable_l1, LinearExpression([1, self.minus_mon_npv])), + (self.one, self.mutable_l2, SumExpression([1, self.minus_mutable_l2])), + (self.one, self.param0, 1), + (self.one, self.param1, 0), + # 20: + (self.one, self.mutable_l3, NPV_SumExpression([1, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, LinearExpression([5, self.minus_bin])), + (self.native, self.zero, 5), + (self.native, self.one, 4), + # 4: + (self.native, self.native, 0), + (self.native, self.npv, NPV_SumExpression([5, self.minus_npv])), + (self.native, self.param, -1), + (self.native, self.param_mut, NPV_SumExpression([5, self.minus_param_mut])), + # 8: + (self.native, self.var, LinearExpression([5, self.minus_var])), + ( + self.native, + self.mon_native, + LinearExpression([5, self.minus_mon_native]), + ), + (self.native, self.mon_param, LinearExpression([5, self.minus_mon_param])), + (self.native, self.mon_npv, LinearExpression([5, self.minus_mon_npv])), + # 12: + (self.native, self.linear, SumExpression([5, self.minus_linear])), + (self.native, self.sum, SumExpression([5, self.minus_sum])), + (self.native, self.other, SumExpression([5, self.minus_other])), + (self.native, self.mutable_l0, 5), + # 16: + (self.native, self.mutable_l1, LinearExpression([5, self.minus_mon_npv])), + (self.native, self.mutable_l2, SumExpression([5, self.minus_mutable_l2])), + (self.native, self.param0, 5), + (self.native, self.param1, 4), + # 20: + (self.native, self.mutable_l3, NPV_SumExpression([5, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, LinearExpression([self.npv, self.minus_bin])), + (self.npv, self.zero, self.npv), + (self.npv, self.one, NPV_SumExpression([self.npv, -1])), + # 4: + (self.npv, self.native, NPV_SumExpression([self.npv, -5])), + (self.npv, self.npv, NPV_SumExpression([self.npv, self.minus_npv])), + (self.npv, self.param, NPV_SumExpression([self.npv, -6])), + ( + self.npv, + self.param_mut, + NPV_SumExpression([self.npv, self.minus_param_mut]), + ), + # 8: + (self.npv, self.var, LinearExpression([self.npv, self.minus_var])), + ( + self.npv, + self.mon_native, + LinearExpression([self.npv, self.minus_mon_native]), + ), + ( + self.npv, + self.mon_param, + LinearExpression([self.npv, self.minus_mon_param]), + ), + (self.npv, self.mon_npv, LinearExpression([self.npv, self.minus_mon_npv])), + # 12: + (self.npv, self.linear, SumExpression([self.npv, self.minus_linear])), + (self.npv, self.sum, SumExpression([self.npv, self.minus_sum])), + (self.npv, self.other, SumExpression([self.npv, self.minus_other])), + (self.npv, self.mutable_l0, self.npv), + # 16: + ( + self.npv, + self.mutable_l1, + LinearExpression([self.npv, self.minus_mon_npv]), + ), + ( + self.npv, + self.mutable_l2, + SumExpression([self.npv, self.minus_mutable_l2]), + ), + (self.npv, self.param0, self.npv), + (self.npv, self.param1, NPV_SumExpression([self.npv, -1])), + # 20: + (self.npv, self.mutable_l3, NPV_SumExpression([self.npv, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, LinearExpression([6, self.minus_bin])), + (self.param, self.zero, 6), + (self.param, self.one, 5), + # 4: + (self.param, self.native, 1), + (self.param, self.npv, NPV_SumExpression([6, self.minus_npv])), + (self.param, self.param, 0), + (self.param, self.param_mut, NPV_SumExpression([6, self.minus_param_mut])), + # 8: + (self.param, self.var, LinearExpression([6, self.minus_var])), + (self.param, self.mon_native, LinearExpression([6, self.minus_mon_native])), + (self.param, self.mon_param, LinearExpression([6, self.minus_mon_param])), + (self.param, self.mon_npv, LinearExpression([6, self.minus_mon_npv])), + # 12: + (self.param, self.linear, SumExpression([6, self.minus_linear])), + (self.param, self.sum, SumExpression([6, self.minus_sum])), + (self.param, self.other, SumExpression([6, self.minus_other])), + (self.param, self.mutable_l0, 6), + # 16: + (self.param, self.mutable_l1, LinearExpression([6, self.minus_mon_npv])), + (self.param, self.mutable_l2, SumExpression([6, self.minus_mutable_l2])), + (self.param, self.param0, 6), + (self.param, self.param1, 5), + # 20: + (self.param, self.mutable_l3, NPV_SumExpression([6, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + LinearExpression([self.param_mut, self.minus_bin]), + ), + (self.param_mut, self.zero, self.param_mut), + (self.param_mut, self.one, NPV_SumExpression([self.param_mut, -1])), + # 4: + (self.param_mut, self.native, NPV_SumExpression([self.param_mut, -5])), + ( + self.param_mut, + self.npv, + NPV_SumExpression([self.param_mut, self.minus_npv]), + ), + (self.param_mut, self.param, NPV_SumExpression([self.param_mut, -6])), + ( + self.param_mut, + self.param_mut, + NPV_SumExpression([self.param_mut, self.minus_param_mut]), + ), + # 8: + ( + self.param_mut, + self.var, + LinearExpression([self.param_mut, self.minus_var]), + ), + ( + self.param_mut, + self.mon_native, + LinearExpression([self.param_mut, self.minus_mon_native]), + ), + ( + self.param_mut, + self.mon_param, + LinearExpression([self.param_mut, self.minus_mon_param]), + ), + ( + self.param_mut, + self.mon_npv, + LinearExpression([self.param_mut, self.minus_mon_npv]), + ), + # 12: + ( + self.param_mut, + self.linear, + SumExpression([self.param_mut, self.minus_linear]), + ), + (self.param_mut, self.sum, SumExpression([self.param_mut, self.minus_sum])), + ( + self.param_mut, + self.other, + SumExpression([self.param_mut, self.minus_other]), + ), + (self.param_mut, self.mutable_l0, self.param_mut), + # 16: + ( + self.param_mut, + self.mutable_l1, + LinearExpression([self.param_mut, self.minus_mon_npv]), + ), + ( + self.param_mut, + self.mutable_l2, + SumExpression([self.param_mut, self.minus_mutable_l2]), + ), + (self.param_mut, self.param0, self.param_mut), + (self.param_mut, self.param1, NPV_SumExpression([self.param_mut, -1])), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_SumExpression([self.param_mut, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, LinearExpression([self.mon_var, self.minus_bin])), + (self.var, self.zero, self.var), + (self.var, self.one, LinearExpression([self.mon_var, -1])), + # 4: + (self.var, self.native, LinearExpression([self.mon_var, -5])), + (self.var, self.npv, LinearExpression([self.mon_var, self.minus_npv])), + (self.var, self.param, LinearExpression([self.mon_var, -6])), + ( + self.var, + self.param_mut, + LinearExpression([self.mon_var, self.minus_param_mut]), + ), + # 8: + (self.var, self.var, LinearExpression([self.mon_var, self.minus_var])), + ( + self.var, + self.mon_native, + LinearExpression([self.mon_var, self.minus_mon_native]), + ), + ( + self.var, + self.mon_param, + LinearExpression([self.mon_var, self.minus_mon_param]), + ), + ( + self.var, + self.mon_npv, + LinearExpression([self.mon_var, self.minus_mon_npv]), + ), + # 12: + ( + self.var, + self.linear, + SumExpression([self.var, NegationExpression((self.linear,))]), + ), + (self.var, self.sum, SumExpression([self.var, self.minus_sum])), + (self.var, self.other, SumExpression([self.var, self.minus_other])), + (self.var, self.mutable_l0, self.var), + # 16: + ( + self.var, + self.mutable_l1, + LinearExpression([self.mon_var, self.minus_mon_npv]), + ), + ( + self.var, + self.mutable_l2, + SumExpression([self.var, self.minus_mutable_l2]), + ), + (self.var, self.param0, self.var), + (self.var, self.param1, LinearExpression([self.mon_var, -1])), + # 20: + ( + self.var, + self.mutable_l3, + LinearExpression([self.mon_var, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + LinearExpression([self.mon_native, self.minus_bin]), + ), + (self.mon_native, self.zero, self.mon_native), + (self.mon_native, self.one, LinearExpression([self.mon_native, -1])), + # 4: + (self.mon_native, self.native, LinearExpression([self.mon_native, -5])), + ( + self.mon_native, + self.npv, + LinearExpression([self.mon_native, self.minus_npv]), + ), + (self.mon_native, self.param, LinearExpression([self.mon_native, -6])), + ( + self.mon_native, + self.param_mut, + LinearExpression([self.mon_native, self.minus_param_mut]), + ), + # 8: + ( + self.mon_native, + self.var, + LinearExpression([self.mon_native, self.minus_var]), + ), + ( + self.mon_native, + self.mon_native, + LinearExpression([self.mon_native, self.minus_mon_native]), + ), + ( + self.mon_native, + self.mon_param, + LinearExpression([self.mon_native, self.minus_mon_param]), + ), + ( + self.mon_native, + self.mon_npv, + LinearExpression([self.mon_native, self.minus_mon_npv]), + ), + # 12: + ( + self.mon_native, + self.linear, + SumExpression([self.mon_native, self.minus_linear]), + ), + ( + self.mon_native, + self.sum, + SumExpression([self.mon_native, self.minus_sum]), + ), + ( + self.mon_native, + self.other, + SumExpression([self.mon_native, self.minus_other]), + ), + (self.mon_native, self.mutable_l0, self.mon_native), + # 16: + ( + self.mon_native, + self.mutable_l1, + LinearExpression([self.mon_native, self.minus_mon_npv]), + ), + ( + self.mon_native, + self.mutable_l2, + SumExpression([self.mon_native, self.minus_mutable_l2]), + ), + (self.mon_native, self.param0, self.mon_native), + (self.mon_native, self.param1, LinearExpression([self.mon_native, -1])), + # 20: + ( + self.mon_native, + self.mutable_l3, + LinearExpression([self.mon_native, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + LinearExpression([self.mon_param, self.minus_bin]), + ), + (self.mon_param, self.zero, self.mon_param), + (self.mon_param, self.one, LinearExpression([self.mon_param, -1])), + # 4: + (self.mon_param, self.native, LinearExpression([self.mon_param, -5])), + ( + self.mon_param, + self.npv, + LinearExpression([self.mon_param, self.minus_npv]), + ), + (self.mon_param, self.param, LinearExpression([self.mon_param, -6])), + ( + self.mon_param, + self.param_mut, + LinearExpression([self.mon_param, self.minus_param_mut]), + ), + # 8: + ( + self.mon_param, + self.var, + LinearExpression([self.mon_param, self.minus_var]), + ), + ( + self.mon_param, + self.mon_native, + LinearExpression([self.mon_param, self.minus_mon_native]), + ), + ( + self.mon_param, + self.mon_param, + LinearExpression([self.mon_param, self.minus_mon_param]), + ), + ( + self.mon_param, + self.mon_npv, + LinearExpression([self.mon_param, self.minus_mon_npv]), + ), + # 12: + ( + self.mon_param, + self.linear, + SumExpression([self.mon_param, self.minus_linear]), + ), + (self.mon_param, self.sum, SumExpression([self.mon_param, self.minus_sum])), + ( + self.mon_param, + self.other, + SumExpression([self.mon_param, self.minus_other]), + ), + (self.mon_param, self.mutable_l0, self.mon_param), + # 16: + ( + self.mon_param, + self.mutable_l1, + LinearExpression([self.mon_param, self.minus_mon_npv]), + ), + ( + self.mon_param, + self.mutable_l2, + SumExpression([self.mon_param, self.minus_mutable_l2]), + ), + (self.mon_param, self.param0, self.mon_param), + (self.mon_param, self.param1, LinearExpression([self.mon_param, -1])), + # 20: + ( + self.mon_param, + self.mutable_l3, + LinearExpression([self.mon_param, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + ( + self.mon_npv, + self.asbinary, + LinearExpression([self.mon_npv, self.minus_bin]), + ), + (self.mon_npv, self.zero, self.mon_npv), + (self.mon_npv, self.one, LinearExpression([self.mon_npv, -1])), + # 4: + (self.mon_npv, self.native, LinearExpression([self.mon_npv, -5])), + (self.mon_npv, self.npv, LinearExpression([self.mon_npv, self.minus_npv])), + (self.mon_npv, self.param, LinearExpression([self.mon_npv, -6])), + ( + self.mon_npv, + self.param_mut, + LinearExpression([self.mon_npv, self.minus_param_mut]), + ), + # 8: + (self.mon_npv, self.var, LinearExpression([self.mon_npv, self.minus_var])), + ( + self.mon_npv, + self.mon_native, + LinearExpression([self.mon_npv, self.minus_mon_native]), + ), + ( + self.mon_npv, + self.mon_param, + LinearExpression([self.mon_npv, self.minus_mon_param]), + ), + ( + self.mon_npv, + self.mon_npv, + LinearExpression([self.mon_npv, self.minus_mon_npv]), + ), + # 12: + ( + self.mon_npv, + self.linear, + SumExpression([self.mon_npv, self.minus_linear]), + ), + (self.mon_npv, self.sum, SumExpression([self.mon_npv, self.minus_sum])), + (self.mon_npv, self.other, SumExpression([self.mon_npv, self.minus_other])), + (self.mon_npv, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mon_npv, + self.mutable_l1, + LinearExpression([self.mon_npv, self.minus_mon_npv]), + ), + ( + self.mon_npv, + self.mutable_l2, + SumExpression([self.mon_npv, self.minus_mutable_l2]), + ), + (self.mon_npv, self.param0, self.mon_npv), + (self.mon_npv, self.param1, LinearExpression([self.mon_npv, -1])), + # 20: + ( + self.mon_npv, + self.mutable_l3, + LinearExpression([self.mon_npv, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + ( + self.linear, + self.asbinary, + LinearExpression(self.linear.args + [self.minus_bin]), + ), + (self.linear, self.zero, self.linear), + (self.linear, self.one, LinearExpression(self.linear.args + [-1])), + # 4: + (self.linear, self.native, LinearExpression(self.linear.args + [-5])), + ( + self.linear, + self.npv, + LinearExpression(self.linear.args + [self.minus_npv]), + ), + (self.linear, self.param, LinearExpression(self.linear.args + [-6])), + ( + self.linear, + self.param_mut, + LinearExpression(self.linear.args + [self.minus_param_mut]), + ), + # 8: + ( + self.linear, + self.var, + LinearExpression(self.linear.args + [self.minus_var]), + ), + ( + self.linear, + self.mon_native, + LinearExpression(self.linear.args + [self.minus_mon_native]), + ), + ( + self.linear, + self.mon_param, + LinearExpression(self.linear.args + [self.minus_mon_param]), + ), + ( + self.linear, + self.mon_npv, + LinearExpression(self.linear.args + [self.minus_mon_npv]), + ), + # 12: + (self.linear, self.linear, SumExpression([self.linear, self.minus_linear])), + (self.linear, self.sum, SumExpression([self.linear, self.minus_sum])), + (self.linear, self.other, SumExpression([self.linear, self.minus_other])), + (self.linear, self.mutable_l0, self.linear), + # 16: + ( + self.linear, + self.mutable_l1, + LinearExpression(self.linear.args + [self.minus_mon_npv]), + ), + ( + self.linear, + self.mutable_l2, + SumExpression([self.linear, self.minus_mutable_l2]), + ), + (self.linear, self.param0, self.linear), + (self.linear, self.param1, LinearExpression(self.linear.args + [-1])), + # 20: + ( + self.linear, + self.mutable_l3, + LinearExpression(self.linear.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, SumExpression(self.sum.args + [self.minus_bin])), + (self.sum, self.zero, self.sum), + (self.sum, self.one, SumExpression(self.sum.args + [-1])), + # 4: + (self.sum, self.native, SumExpression(self.sum.args + [-5])), + (self.sum, self.npv, SumExpression(self.sum.args + [self.minus_npv])), + (self.sum, self.param, SumExpression(self.sum.args + [-6])), + ( + self.sum, + self.param_mut, + SumExpression(self.sum.args + [self.minus_param_mut]), + ), + # 8: + (self.sum, self.var, SumExpression(self.sum.args + [self.minus_var])), + ( + self.sum, + self.mon_native, + SumExpression(self.sum.args + [self.minus_mon_native]), + ), + ( + self.sum, + self.mon_param, + SumExpression(self.sum.args + [self.minus_mon_param]), + ), + ( + self.sum, + self.mon_npv, + SumExpression(self.sum.args + [self.minus_mon_npv]), + ), + # 12: + (self.sum, self.linear, SumExpression(self.sum.args + [self.minus_linear])), + (self.sum, self.sum, SumExpression(self.sum.args + [self.minus_sum])), + (self.sum, self.other, SumExpression(self.sum.args + [self.minus_other])), + (self.sum, self.mutable_l0, self.sum), + # 16: + ( + self.sum, + self.mutable_l1, + SumExpression(self.sum.args + [self.minus_mon_npv]), + ), + ( + self.sum, + self.mutable_l2, + SumExpression(self.sum.args + [self.minus_mutable_l2]), + ), + (self.sum, self.param0, self.sum), + (self.sum, self.param1, SumExpression(self.sum.args + [-1])), + # 20: + ( + self.sum, + self.mutable_l3, + SumExpression(self.sum.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, SumExpression([self.other, self.minus_bin])), + (self.other, self.zero, self.other), + (self.other, self.one, SumExpression([self.other, -1])), + # 4: + (self.other, self.native, SumExpression([self.other, -5])), + (self.other, self.npv, SumExpression([self.other, self.minus_npv])), + (self.other, self.param, SumExpression([self.other, -6])), + ( + self.other, + self.param_mut, + SumExpression([self.other, self.minus_param_mut]), + ), + # 8: + (self.other, self.var, SumExpression([self.other, self.minus_var])), + ( + self.other, + self.mon_native, + SumExpression([self.other, self.minus_mon_native]), + ), + ( + self.other, + self.mon_param, + SumExpression([self.other, self.minus_mon_param]), + ), + (self.other, self.mon_npv, SumExpression([self.other, self.minus_mon_npv])), + # 12: + (self.other, self.linear, SumExpression([self.other, self.minus_linear])), + (self.other, self.sum, SumExpression([self.other, self.minus_sum])), + (self.other, self.other, SumExpression([self.other, self.minus_other])), + (self.other, self.mutable_l0, self.other), + # 16: + ( + self.other, + self.mutable_l1, + SumExpression([self.other, self.minus_mon_npv]), + ), + ( + self.other, + self.mutable_l2, + SumExpression([self.other, self.minus_mutable_l2]), + ), + (self.other, self.param0, self.other), + (self.other, self.param1, SumExpression([self.other, -1])), + # 20: + (self.other, self.mutable_l3, SumExpression([self.other, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, self.minus_bin), + (self.mutable_l0, self.zero, 0), + (self.mutable_l0, self.one, -1), + # 4: + (self.mutable_l0, self.native, -5), + (self.mutable_l0, self.npv, self.minus_npv), + (self.mutable_l0, self.param, -6), + (self.mutable_l0, self.param_mut, self.minus_param_mut), + # 8: + (self.mutable_l0, self.var, self.minus_var), + (self.mutable_l0, self.mon_native, self.minus_mon_native), + (self.mutable_l0, self.mon_param, self.minus_mon_param), + (self.mutable_l0, self.mon_npv, self.minus_mon_npv), + # 12: + (self.mutable_l0, self.linear, self.minus_linear), + (self.mutable_l0, self.sum, self.minus_sum), + (self.mutable_l0, self.other, self.minus_other), + (self.mutable_l0, self.mutable_l0, self.mutable_l0), + # 16: + (self.mutable_l0, self.mutable_l1, self.minus_mon_npv), + (self.mutable_l0, self.mutable_l2, self.minus_mutable_l2), + (self.mutable_l0, self.param0, 0), + (self.mutable_l0, self.param1, -1), + # 20: + (self.mutable_l0, self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + def test_sub_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + LinearExpression(self.mutable_l1.args + [self.minus_bin]), + ), + (self.mutable_l1, self.zero, self.mon_npv), + (self.mutable_l1, self.one, LinearExpression(self.mutable_l1.args + [-1])), + # 4: + ( + self.mutable_l1, + self.native, + LinearExpression(self.mutable_l1.args + [-5]), + ), + ( + self.mutable_l1, + self.npv, + LinearExpression(self.mutable_l1.args + [self.minus_npv]), + ), + ( + self.mutable_l1, + self.param, + LinearExpression(self.mutable_l1.args + [-6]), + ), + ( + self.mutable_l1, + self.param_mut, + LinearExpression(self.mutable_l1.args + [self.minus_param_mut]), + ), + # 8: + ( + self.mutable_l1, + self.var, + LinearExpression(self.mutable_l1.args + [self.minus_var]), + ), + ( + self.mutable_l1, + self.mon_native, + LinearExpression(self.mutable_l1.args + [self.minus_mon_native]), + ), + ( + self.mutable_l1, + self.mon_param, + LinearExpression(self.mutable_l1.args + [self.minus_mon_param]), + ), + ( + self.mutable_l1, + self.mon_npv, + LinearExpression(self.mutable_l1.args + [self.minus_mon_npv]), + ), + # 12: + ( + self.mutable_l1, + self.linear, + SumExpression(self.mutable_l1.args + [self.minus_linear]), + ), + ( + self.mutable_l1, + self.sum, + SumExpression(self.mutable_l1.args + [self.minus_sum]), + ), + ( + self.mutable_l1, + self.other, + SumExpression(self.mutable_l1.args + [self.minus_other]), + ), + (self.mutable_l1, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + LinearExpression(self.mutable_l1.args + [self.minus_mon_npv]), + ), + ( + self.mutable_l1, + self.mutable_l2, + SumExpression(self.mutable_l1.args + [self.minus_mutable_l2]), + ), + (self.mutable_l1, self.param0, self.mon_npv), + ( + self.mutable_l1, + self.param1, + LinearExpression(self.mutable_l1.args + [-1]), + ), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + LinearExpression(self.mutable_l1.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + def test_sub_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + SumExpression(self.mutable_l2.args + [self.minus_bin]), + ), + (self.mutable_l2, self.zero, self.mutable_l2), + (self.mutable_l2, self.one, SumExpression(self.mutable_l2.args + [-1])), + # 4: + (self.mutable_l2, self.native, SumExpression(self.mutable_l2.args + [-5])), + ( + self.mutable_l2, + self.npv, + SumExpression(self.mutable_l2.args + [self.minus_npv]), + ), + (self.mutable_l2, self.param, SumExpression(self.mutable_l2.args + [-6])), + ( + self.mutable_l2, + self.param_mut, + SumExpression(self.mutable_l2.args + [self.minus_param_mut]), + ), + # 8: + ( + self.mutable_l2, + self.var, + SumExpression(self.mutable_l2.args + [self.minus_var]), + ), + ( + self.mutable_l2, + self.mon_native, + SumExpression(self.mutable_l2.args + [self.minus_mon_native]), + ), + ( + self.mutable_l2, + self.mon_param, + SumExpression(self.mutable_l2.args + [self.minus_mon_param]), + ), + ( + self.mutable_l2, + self.mon_npv, + SumExpression(self.mutable_l2.args + [self.minus_mon_npv]), + ), + # 12: + ( + self.mutable_l2, + self.linear, + SumExpression(self.mutable_l2.args + [self.minus_linear]), + ), + ( + self.mutable_l2, + self.sum, + SumExpression(self.mutable_l2.args + [self.minus_sum]), + ), + ( + self.mutable_l2, + self.other, + SumExpression(self.mutable_l2.args + [self.minus_other]), + ), + (self.mutable_l2, self.mutable_l0, self.mutable_l2), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + SumExpression(self.mutable_l2.args + [self.minus_mon_npv]), + ), + ( + self.mutable_l2, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.minus_mutable_l2]), + ), + (self.mutable_l2, self.param0, self.mutable_l2), + (self.mutable_l2, self.param1, SumExpression(self.mutable_l2.args + [-1])), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + SumExpression(self.mutable_l2.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + def test_sub_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, self.minus_bin), + (self.param0, self.zero, 0), + (self.param0, self.one, -1), + # 4: + (self.param0, self.native, -5), + (self.param0, self.npv, self.minus_npv), + (self.param0, self.param, -6), + (self.param0, self.param_mut, self.minus_param_mut), + # 8: + (self.param0, self.var, self.minus_var), + (self.param0, self.mon_native, self.minus_mon_native), + (self.param0, self.mon_param, self.minus_mon_param), + (self.param0, self.mon_npv, self.minus_mon_npv), + # 12: + (self.param0, self.linear, self.minus_linear), + (self.param0, self.sum, self.minus_sum), + (self.param0, self.other, self.minus_other), + (self.param0, self.mutable_l0, 0), + # 16: + (self.param0, self.mutable_l1, self.minus_mon_npv), + (self.param0, self.mutable_l2, self.minus_mutable_l2), + (self.param0, self.param0, 0), + (self.param0, self.param1, -1), + # 20: + (self.param0, self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, LinearExpression([1, self.minus_bin])), + (self.param1, self.zero, 1), + (self.param1, self.one, 0), + # 4: + (self.param1, self.native, -4), + (self.param1, self.npv, NPV_SumExpression([1, self.minus_npv])), + (self.param1, self.param, -5), + (self.param1, self.param_mut, NPV_SumExpression([1, self.minus_param_mut])), + # 8: + (self.param1, self.var, LinearExpression([1, self.minus_var])), + ( + self.param1, + self.mon_native, + LinearExpression([1, self.minus_mon_native]), + ), + (self.param1, self.mon_param, LinearExpression([1, self.minus_mon_param])), + (self.param1, self.mon_npv, LinearExpression([1, self.minus_mon_npv])), + # 12: + (self.param1, self.linear, SumExpression([1, self.minus_linear])), + (self.param1, self.sum, SumExpression([1, self.minus_sum])), + (self.param1, self.other, SumExpression([1, self.minus_other])), + (self.param1, self.mutable_l0, 1), + # 16: + (self.param1, self.mutable_l1, LinearExpression([1, self.minus_mon_npv])), + (self.param1, self.mutable_l2, SumExpression([1, self.minus_mutable_l2])), + (self.param1, self.param0, 1), + (self.param1, self.param1, 0), + # 20: + (self.param1, self.mutable_l3, NPV_SumExpression([1, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + ( + self.mutable_l3, + self.asbinary, + LinearExpression(self.mutable_l3.args + [self.minus_bin]), + ), + (self.mutable_l3, self.zero, self.npv), + (self.mutable_l3, self.one, NPV_SumExpression(self.mutable_l3.args + [-1])), + # 4: + ( + self.mutable_l3, + self.native, + NPV_SumExpression(self.mutable_l3.args + [-5]), + ), + ( + self.mutable_l3, + self.npv, + NPV_SumExpression(self.mutable_l3.args + [self.minus_npv]), + ), + ( + self.mutable_l3, + self.param, + NPV_SumExpression(self.mutable_l3.args + [-6]), + ), + ( + self.mutable_l3, + self.param_mut, + NPV_SumExpression(self.mutable_l3.args + [self.minus_param_mut]), + ), + # 8: + ( + self.mutable_l3, + self.var, + LinearExpression(self.mutable_l3.args + [self.minus_var]), + ), + ( + self.mutable_l3, + self.mon_native, + LinearExpression(self.mutable_l3.args + [self.minus_mon_native]), + ), + ( + self.mutable_l3, + self.mon_param, + LinearExpression(self.mutable_l3.args + [self.minus_mon_param]), + ), + ( + self.mutable_l3, + self.mon_npv, + LinearExpression(self.mutable_l3.args + [self.minus_mon_npv]), + ), + # 12: + ( + self.mutable_l3, + self.linear, + SumExpression(self.mutable_l3.args + [self.minus_linear]), + ), + ( + self.mutable_l3, + self.sum, + SumExpression(self.mutable_l3.args + [self.minus_sum]), + ), + ( + self.mutable_l3, + self.other, + SumExpression(self.mutable_l3.args + [self.minus_other]), + ), + (self.mutable_l3, self.mutable_l0, self.npv), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + LinearExpression(self.mutable_l3.args + [self.minus_mon_npv]), + ), + ( + self.mutable_l3, + self.mutable_l2, + SumExpression(self.mutable_l3.args + [self.minus_mutable_l2]), + ), + (self.mutable_l3, self.param0, self.npv), + ( + self.mutable_l3, + self.param1, + NPV_SumExpression(self.mutable_l3.args + [-1]), + ), + # 20: + # Note that because the mutable is resolved to a NPV_Sum in + # the negation, the 1-term summation for the first arg is + # not resolved to a bare term + ( + self.mutable_l3, + self.mutable_l3, + NPV_SumExpression( + [NPV_SumExpression(self.mutable_l3.args), self.minus_npv] + ), + ), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + # + # + # MULTIPLICATION + # + # + + def test_mul_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + # "invalid(str) * {0, 1, native}" are legitimate Python + # operations and should never hit the Pyomo expression + # system + (self.invalid, self.zero, self.SKIP), + (self.invalid, self.one, self.SKIP), + # 4: + (self.invalid, self.native, self.SKIP), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support multiplication + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, MonomialTermExpression((0, self.bin))), + (self.asbinary, self.one, self.bin), + # 4: + (self.asbinary, self.native, MonomialTermExpression((5, self.bin))), + (self.asbinary, self.npv, MonomialTermExpression((self.npv, self.bin))), + (self.asbinary, self.param, MonomialTermExpression((6, self.bin))), + ( + self.asbinary, + self.param_mut, + MonomialTermExpression((self.param_mut, self.bin)), + ), + # 8: + (self.asbinary, self.var, ProductExpression((self.bin, self.var))), + ( + self.asbinary, + self.mon_native, + ProductExpression((self.bin, self.mon_native)), + ), + ( + self.asbinary, + self.mon_param, + ProductExpression((self.bin, self.mon_param)), + ), + (self.asbinary, self.mon_npv, ProductExpression((self.bin, self.mon_npv))), + # 12: + (self.asbinary, self.linear, ProductExpression((self.bin, self.linear))), + (self.asbinary, self.sum, ProductExpression((self.bin, self.sum))), + (self.asbinary, self.other, ProductExpression((self.bin, self.other))), + (self.asbinary, self.mutable_l0, MonomialTermExpression((0, self.bin))), + # 16: + ( + self.asbinary, + self.mutable_l1, + ProductExpression((self.bin, self.mon_npv)), + ), + ( + self.asbinary, + self.mutable_l2, + ProductExpression((self.bin, self.mutable_l2)), + ), + (self.asbinary, self.param0, MonomialTermExpression((0, self.bin))), + (self.asbinary, self.param1, self.bin), + # 20: + ( + self.asbinary, + self.mutable_l3, + MonomialTermExpression((self.npv, self.bin)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_zero(self): + tests = [ + # "Zero * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.zero, self.invalid, self.SKIP), + (self.zero, self.asbinary, MonomialTermExpression((0, self.bin))), + (self.zero, self.zero, 0), + (self.zero, self.one, 0), + # 4: + (self.zero, self.native, 0), + (self.zero, self.npv, NPV_ProductExpression((0, self.npv))), + (self.zero, self.param, 0), + (self.zero, self.param_mut, NPV_ProductExpression((0, self.param_mut))), + # 8: + (self.zero, self.var, MonomialTermExpression((0, self.var))), + ( + self.zero, + self.mon_native, + MonomialTermExpression((0, self.mon_native.arg(1))), + ), + ( + self.zero, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.zero, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.zero, self.linear, ProductExpression((0, self.linear))), + (self.zero, self.sum, ProductExpression((0, self.sum))), + (self.zero, self.other, ProductExpression((0, self.other))), + (self.zero, self.mutable_l0, 0), + # 16: + ( + self.zero, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.zero, self.mutable_l2, ProductExpression((0, self.mutable_l2))), + (self.zero, self.param0, 0), + (self.zero, self.param1, 0), + # 20: + (self.zero, self.mutable_l3, NPV_ProductExpression((0, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_one(self): + tests = [ + # "One * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.one, self.invalid, self.SKIP), + (self.one, self.asbinary, self.bin), + (self.one, self.zero, 0), + (self.one, self.one, 1), + # 4: + (self.one, self.native, 5), + (self.one, self.npv, self.npv), + (self.one, self.param, self.param), + (self.one, self.param_mut, self.param_mut), + # 8: + (self.one, self.var, self.var), + (self.one, self.mon_native, self.mon_native), + (self.one, self.mon_param, self.mon_param), + (self.one, self.mon_npv, self.mon_npv), + # 12: + (self.one, self.linear, self.linear), + (self.one, self.sum, self.sum), + (self.one, self.other, self.other), + (self.one, self.mutable_l0, 0), + # 16: + (self.one, self.mutable_l1, self.mon_npv), + (self.one, self.mutable_l2, self.mutable_l2), + (self.one, self.param0, self.param0), + (self.one, self.param1, self.param1), + # 20: + (self.one, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_native(self): + tests = [ + # "Native * invalid(str) is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.native, self.invalid, self.SKIP), + (self.native, self.asbinary, MonomialTermExpression((5, self.bin))), + (self.native, self.zero, 0), + (self.native, self.one, 5), + # 4: + (self.native, self.native, 25), + (self.native, self.npv, NPV_ProductExpression((5, self.npv))), + (self.native, self.param, 30), + (self.native, self.param_mut, NPV_ProductExpression((5, self.param_mut))), + # 8: + (self.native, self.var, MonomialTermExpression((5, self.var))), + ( + self.native, + self.mon_native, + MonomialTermExpression((15, self.mon_native.arg(1))), + ), + ( + self.native, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((5, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.native, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((5, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.native, self.linear, ProductExpression((5, self.linear))), + (self.native, self.sum, ProductExpression((5, self.sum))), + (self.native, self.other, ProductExpression((5, self.other))), + (self.native, self.mutable_l0, 0), + # 16: + ( + self.native, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((5, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.native, self.mutable_l2, ProductExpression((5, self.mutable_l2))), + (self.native, self.param0, 0), + (self.native, self.param1, 5), + # 20: + (self.native, self.mutable_l3, NPV_ProductExpression((5, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, MonomialTermExpression((self.npv, self.bin))), + (self.npv, self.zero, NPV_ProductExpression((self.npv, 0))), + (self.npv, self.one, self.npv), + # 4: + (self.npv, self.native, NPV_ProductExpression((self.npv, 5))), + (self.npv, self.npv, NPV_ProductExpression((self.npv, self.npv))), + (self.npv, self.param, NPV_ProductExpression((self.npv, 6))), + ( + self.npv, + self.param_mut, + NPV_ProductExpression((self.npv, self.param_mut)), + ), + # 8: + (self.npv, self.var, MonomialTermExpression((self.npv, self.var))), + ( + self.npv, + self.mon_native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_native.arg(0))), + self.mon_native.arg(1), + ) + ), + ), + ( + self.npv, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.npv, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.npv, self.linear, ProductExpression((self.npv, self.linear))), + (self.npv, self.sum, ProductExpression((self.npv, self.sum))), + (self.npv, self.other, ProductExpression((self.npv, self.other))), + (self.npv, self.mutable_l0, NPV_ProductExpression((self.npv, 0))), + # 16: + ( + self.npv, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.npv, self.mutable_l2, ProductExpression((self.npv, self.mutable_l2))), + (self.npv, self.param0, NPV_ProductExpression((self.npv, 0))), + (self.npv, self.param1, self.npv), + # 20: + (self.npv, self.mutable_l3, NPV_ProductExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, MonomialTermExpression((6, self.bin))), + (self.param, self.zero, 0), + (self.param, self.one, 6), + # 4: + (self.param, self.native, 30), + (self.param, self.npv, NPV_ProductExpression((6, self.npv))), + (self.param, self.param, 36), + (self.param, self.param_mut, NPV_ProductExpression((6, self.param_mut))), + # 8: + (self.param, self.var, MonomialTermExpression((6, self.var))), + ( + self.param, + self.mon_native, + MonomialTermExpression((18, self.mon_native.arg(1))), + ), + ( + self.param, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((6, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.param, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((6, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.param, self.linear, ProductExpression((6, self.linear))), + (self.param, self.sum, ProductExpression((6, self.sum))), + (self.param, self.other, ProductExpression((6, self.other))), + (self.param, self.mutable_l0, 0), + # 16: + ( + self.param, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((6, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.param, self.mutable_l2, ProductExpression((6, self.mutable_l2))), + (self.param, self.param0, 0), + (self.param, self.param1, 6), + # 20: + (self.param, self.mutable_l3, NPV_ProductExpression((6, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + MonomialTermExpression((self.param_mut, self.bin)), + ), + (self.param_mut, self.zero, NPV_ProductExpression((self.param_mut, 0))), + (self.param_mut, self.one, self.param_mut), + # 4: + (self.param_mut, self.native, NPV_ProductExpression((self.param_mut, 5))), + ( + self.param_mut, + self.npv, + NPV_ProductExpression((self.param_mut, self.npv)), + ), + (self.param_mut, self.param, NPV_ProductExpression((self.param_mut, 6))), + ( + self.param_mut, + self.param_mut, + NPV_ProductExpression((self.param_mut, self.param_mut)), + ), + # 8: + ( + self.param_mut, + self.var, + MonomialTermExpression((self.param_mut, self.var)), + ), + ( + self.param_mut, + self.mon_native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_native.arg(0))), + self.mon_native.arg(1), + ) + ), + ), + ( + self.param_mut, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.param_mut, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + ( + self.param_mut, + self.linear, + ProductExpression((self.param_mut, self.linear)), + ), + (self.param_mut, self.sum, ProductExpression((self.param_mut, self.sum))), + ( + self.param_mut, + self.other, + ProductExpression((self.param_mut, self.other)), + ), + ( + self.param_mut, + self.mutable_l0, + NPV_ProductExpression((self.param_mut, 0)), + ), + # 16: + ( + self.param_mut, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.param_mut, + self.mutable_l2, + ProductExpression((self.param_mut, self.mutable_l2)), + ), + (self.param_mut, self.param0, NPV_ProductExpression((self.param_mut, 0))), + (self.param_mut, self.param1, self.param_mut), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_ProductExpression((self.param_mut, self.npv)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, ProductExpression((self.var, self.bin))), + (self.var, self.zero, MonomialTermExpression((0, self.var))), + (self.var, self.one, self.var), + # 4: + (self.var, self.native, MonomialTermExpression((5, self.var))), + (self.var, self.npv, MonomialTermExpression((self.npv, self.var))), + (self.var, self.param, MonomialTermExpression((6, self.var))), + ( + self.var, + self.param_mut, + MonomialTermExpression((self.param_mut, self.var)), + ), + # 8: + (self.var, self.var, ProductExpression((self.var, self.var))), + (self.var, self.mon_native, ProductExpression((self.var, self.mon_native))), + (self.var, self.mon_param, ProductExpression((self.var, self.mon_param))), + (self.var, self.mon_npv, ProductExpression((self.var, self.mon_npv))), + # 12: + (self.var, self.linear, ProductExpression((self.var, self.linear))), + (self.var, self.sum, ProductExpression((self.var, self.sum))), + (self.var, self.other, ProductExpression((self.var, self.other))), + (self.var, self.mutable_l0, MonomialTermExpression((0, self.var))), + # 16: + (self.var, self.mutable_l1, ProductExpression((self.var, self.mon_npv))), + (self.var, self.mutable_l2, ProductExpression((self.var, self.mutable_l2))), + (self.var, self.param0, MonomialTermExpression((0, self.var))), + (self.var, self.param1, self.var), + # 20: + (self.var, self.mutable_l3, MonomialTermExpression((self.npv, self.var))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + ProductExpression((self.mon_native, self.bin)), + ), + ( + self.mon_native, + self.zero, + MonomialTermExpression((0, self.mon_native.arg(1))), + ), + (self.mon_native, self.one, self.mon_native), + # 4: + ( + self.mon_native, + self.native, + MonomialTermExpression((15, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ( + self.mon_native, + self.param, + MonomialTermExpression((18, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_native.arg(0), self.param_mut)), + self.mon_native.arg(1), + ) + ), + ), + # 8: + (self.mon_native, self.var, ProductExpression((self.mon_native, self.var))), + ( + self.mon_native, + self.mon_native, + ProductExpression((self.mon_native, self.mon_native)), + ), + ( + self.mon_native, + self.mon_param, + ProductExpression((self.mon_native, self.mon_param)), + ), + ( + self.mon_native, + self.mon_npv, + ProductExpression((self.mon_native, self.mon_npv)), + ), + # 12: + ( + self.mon_native, + self.linear, + ProductExpression((self.mon_native, self.linear)), + ), + (self.mon_native, self.sum, ProductExpression((self.mon_native, self.sum))), + ( + self.mon_native, + self.other, + ProductExpression((self.mon_native, self.other)), + ), + ( + self.mon_native, + self.mutable_l0, + MonomialTermExpression((0, self.mon_native.arg(1))), + ), + # 16: + ( + self.mon_native, + self.mutable_l1, + ProductExpression((self.mon_native, self.mon_npv)), + ), + ( + self.mon_native, + self.mutable_l2, + ProductExpression((self.mon_native, self.mutable_l2)), + ), + ( + self.mon_native, + self.param0, + MonomialTermExpression((0, self.mon_native.arg(1))), + ), + (self.mon_native, self.param1, self.mon_native), + # 20: + ( + self.mon_native, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + ProductExpression((self.mon_param, self.bin)), + ), + ( + self.mon_param, + self.zero, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), 0)), + self.mon_param.arg(1), + ) + ), + ), + (self.mon_param, self.one, self.mon_param), + # 4: + ( + self.mon_param, + self.native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), 5)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), 6)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), self.param_mut)), + self.mon_param.arg(1), + ) + ), + ), + # 8: + (self.mon_param, self.var, ProductExpression((self.mon_param, self.var))), + ( + self.mon_param, + self.mon_native, + ProductExpression((self.mon_param, self.mon_native)), + ), + ( + self.mon_param, + self.mon_param, + ProductExpression((self.mon_param, self.mon_param)), + ), + ( + self.mon_param, + self.mon_npv, + ProductExpression((self.mon_param, self.mon_npv)), + ), + # 12: + ( + self.mon_param, + self.linear, + ProductExpression((self.mon_param, self.linear)), + ), + (self.mon_param, self.sum, ProductExpression((self.mon_param, self.sum))), + ( + self.mon_param, + self.other, + ProductExpression((self.mon_param, self.other)), + ), + ( + self.mon_param, + self.mutable_l0, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), 0)), + self.mon_param.arg(1), + ) + ), + ), + # 16: + ( + self.mon_param, + self.mutable_l1, + ProductExpression((self.mon_param, self.mon_npv)), + ), + ( + self.mon_param, + self.mutable_l2, + ProductExpression((self.mon_param, self.mutable_l2)), + ), + ( + self.mon_param, + self.param0, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), 0)), + self.mon_param.arg(1), + ) + ), + ), + (self.mon_param, self.param1, self.mon_param), + # 20: + ( + self.mon_param, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + (self.mon_npv, self.asbinary, ProductExpression((self.mon_npv, self.bin))), + ( + self.mon_npv, + self.zero, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 0)), + self.mon_npv.arg(1), + ) + ), + ), + (self.mon_npv, self.one, self.mon_npv), + # 4: + ( + self.mon_npv, + self.native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 5)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mon_npv, self.var, ProductExpression((self.mon_npv, self.var))), + ( + self.mon_npv, + self.mon_native, + ProductExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mon_npv, + self.mon_param, + ProductExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mon_npv, + self.mon_npv, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + (self.mon_npv, self.linear, ProductExpression((self.mon_npv, self.linear))), + (self.mon_npv, self.sum, ProductExpression((self.mon_npv, self.sum))), + (self.mon_npv, self.other, ProductExpression((self.mon_npv, self.other))), + ( + self.mon_npv, + self.mutable_l0, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 0)), + self.mon_npv.arg(1), + ) + ), + ), + # 16: + ( + self.mon_npv, + self.mutable_l1, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mon_npv, + self.mutable_l2, + ProductExpression((self.mon_npv, self.mutable_l2)), + ), + ( + self.mon_npv, + self.param0, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 0)), + self.mon_npv.arg(1), + ) + ), + ), + (self.mon_npv, self.param1, self.mon_npv), + # 20: + ( + self.mon_npv, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + (self.linear, self.asbinary, ProductExpression((self.linear, self.bin))), + (self.linear, self.zero, ProductExpression((self.linear, 0))), + (self.linear, self.one, self.linear), + # 4: + (self.linear, self.native, ProductExpression((self.linear, 5))), + (self.linear, self.npv, ProductExpression((self.linear, self.npv))), + (self.linear, self.param, ProductExpression((self.linear, 6))), + ( + self.linear, + self.param_mut, + ProductExpression((self.linear, self.param_mut)), + ), + # 8: + (self.linear, self.var, ProductExpression((self.linear, self.var))), + ( + self.linear, + self.mon_native, + ProductExpression((self.linear, self.mon_native)), + ), + ( + self.linear, + self.mon_param, + ProductExpression((self.linear, self.mon_param)), + ), + (self.linear, self.mon_npv, ProductExpression((self.linear, self.mon_npv))), + # 12: + (self.linear, self.linear, ProductExpression((self.linear, self.linear))), + (self.linear, self.sum, ProductExpression((self.linear, self.sum))), + (self.linear, self.other, ProductExpression((self.linear, self.other))), + (self.linear, self.mutable_l0, ProductExpression((self.linear, 0))), + # 16: + ( + self.linear, + self.mutable_l1, + ProductExpression((self.linear, self.mon_npv)), + ), + ( + self.linear, + self.mutable_l2, + ProductExpression((self.linear, self.mutable_l2)), + ), + (self.linear, self.param0, ProductExpression((self.linear, 0))), + (self.linear, self.param1, self.linear), + # 20: + (self.linear, self.mutable_l3, ProductExpression((self.linear, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, ProductExpression((self.sum, self.bin))), + (self.sum, self.zero, ProductExpression((self.sum, 0))), + (self.sum, self.one, self.sum), + # 4: + (self.sum, self.native, ProductExpression((self.sum, 5))), + (self.sum, self.npv, ProductExpression((self.sum, self.npv))), + (self.sum, self.param, ProductExpression((self.sum, 6))), + (self.sum, self.param_mut, ProductExpression((self.sum, self.param_mut))), + # 8: + (self.sum, self.var, ProductExpression((self.sum, self.var))), + (self.sum, self.mon_native, ProductExpression((self.sum, self.mon_native))), + (self.sum, self.mon_param, ProductExpression((self.sum, self.mon_param))), + (self.sum, self.mon_npv, ProductExpression((self.sum, self.mon_npv))), + # 12: + (self.sum, self.linear, ProductExpression((self.sum, self.linear))), + (self.sum, self.sum, ProductExpression((self.sum, self.sum))), + (self.sum, self.other, ProductExpression((self.sum, self.other))), + (self.sum, self.mutable_l0, ProductExpression((self.sum, 0))), + # 16: + (self.sum, self.mutable_l1, ProductExpression((self.sum, self.mon_npv))), + (self.sum, self.mutable_l2, ProductExpression((self.sum, self.mutable_l2))), + (self.sum, self.param0, ProductExpression((self.sum, 0))), + (self.sum, self.param1, self.sum), + # 20: + (self.sum, self.mutable_l3, ProductExpression((self.sum, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, ProductExpression((self.other, self.bin))), + (self.other, self.zero, ProductExpression((self.other, 0))), + (self.other, self.one, self.other), + # 4: + (self.other, self.native, ProductExpression((self.other, 5))), + (self.other, self.npv, ProductExpression((self.other, self.npv))), + (self.other, self.param, ProductExpression((self.other, 6))), + ( + self.other, + self.param_mut, + ProductExpression((self.other, self.param_mut)), + ), + # 8: + (self.other, self.var, ProductExpression((self.other, self.var))), + ( + self.other, + self.mon_native, + ProductExpression((self.other, self.mon_native)), + ), + ( + self.other, + self.mon_param, + ProductExpression((self.other, self.mon_param)), + ), + (self.other, self.mon_npv, ProductExpression((self.other, self.mon_npv))), + # 12: + (self.other, self.linear, ProductExpression((self.other, self.linear))), + (self.other, self.sum, ProductExpression((self.other, self.sum))), + (self.other, self.other, ProductExpression((self.other, self.other))), + (self.other, self.mutable_l0, ProductExpression((self.other, 0))), + # 16: + ( + self.other, + self.mutable_l1, + ProductExpression((self.other, self.mon_npv)), + ), + ( + self.other, + self.mutable_l2, + ProductExpression((self.other, self.mutable_l2)), + ), + (self.other, self.param0, ProductExpression((self.other, 0))), + (self.other, self.param1, self.other), + # 20: + (self.other, self.mutable_l3, ProductExpression((self.other, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, MonomialTermExpression((0, self.bin))), + (self.mutable_l0, self.zero, 0), + (self.mutable_l0, self.one, 0), + # 4: + (self.mutable_l0, self.native, 0), + (self.mutable_l0, self.npv, NPV_ProductExpression((0, self.npv))), + (self.mutable_l0, self.param, 0), + ( + self.mutable_l0, + self.param_mut, + NPV_ProductExpression((0, self.param_mut)), + ), + # 8: + (self.mutable_l0, self.var, MonomialTermExpression((0, self.var))), + ( + self.mutable_l0, + self.mon_native, + MonomialTermExpression((0, self.mon_native.arg(1))), + ), + ( + self.mutable_l0, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mutable_l0, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.mutable_l0, self.linear, ProductExpression((0, self.linear))), + (self.mutable_l0, self.sum, ProductExpression((0, self.sum))), + (self.mutable_l0, self.other, ProductExpression((0, self.other))), + (self.mutable_l0, self.mutable_l0, 0), + # 16: + ( + self.mutable_l0, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.mutable_l0, self.mutable_l2, ProductExpression((0, self.mutable_l2))), + (self.mutable_l0, self.param0, 0), + (self.mutable_l0, self.param1, 0), + # 20: + (self.mutable_l0, self.mutable_l3, NPV_ProductExpression((0, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + ProductExpression((self.mon_npv, self.bin)), + ), + ( + self.mutable_l1, + self.zero, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 0)), + self.mon_npv.arg(1), + ) + ), + ), + (self.mutable_l1, self.one, self.mon_npv), + # 4: + ( + self.mutable_l1, + self.native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 5)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mutable_l1, self.var, ProductExpression((self.mon_npv, self.var))), + ( + self.mutable_l1, + self.mon_native, + ProductExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mutable_l1, + self.mon_param, + ProductExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mutable_l1, + self.mon_npv, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + ( + self.mutable_l1, + self.linear, + ProductExpression((self.mon_npv, self.linear)), + ), + (self.mutable_l1, self.sum, ProductExpression((self.mon_npv, self.sum))), + ( + self.mutable_l1, + self.other, + ProductExpression((self.mon_npv, self.other)), + ), + ( + self.mutable_l1, + self.mutable_l0, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 0)), + self.mon_npv.arg(1), + ) + ), + ), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mutable_l1, + self.mutable_l2, + ProductExpression((self.mon_npv, self.mutable_l2)), + ), + ( + self.mutable_l1, + self.param0, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 0)), + self.mon_npv.arg(1), + ) + ), + ), + (self.mutable_l1, self.param1, self.mon_npv), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + ProductExpression((self.mutable_l2, self.bin)), + ), + (self.mutable_l2, self.zero, ProductExpression((self.mutable_l2, 0))), + (self.mutable_l2, self.one, self.mutable_l2), + # 4: + (self.mutable_l2, self.native, ProductExpression((self.mutable_l2, 5))), + (self.mutable_l2, self.npv, ProductExpression((self.mutable_l2, self.npv))), + (self.mutable_l2, self.param, ProductExpression((self.mutable_l2, 6))), + ( + self.mutable_l2, + self.param_mut, + ProductExpression((self.mutable_l2, self.param_mut)), + ), + # 8: + (self.mutable_l2, self.var, ProductExpression((self.mutable_l2, self.var))), + ( + self.mutable_l2, + self.mon_native, + ProductExpression((self.mutable_l2, self.mon_native)), + ), + ( + self.mutable_l2, + self.mon_param, + ProductExpression((self.mutable_l2, self.mon_param)), + ), + ( + self.mutable_l2, + self.mon_npv, + ProductExpression((self.mutable_l2, self.mon_npv)), + ), + # 12: + ( + self.mutable_l2, + self.linear, + ProductExpression((self.mutable_l2, self.linear)), + ), + (self.mutable_l2, self.sum, ProductExpression((self.mutable_l2, self.sum))), + ( + self.mutable_l2, + self.other, + ProductExpression((self.mutable_l2, self.other)), + ), + (self.mutable_l2, self.mutable_l0, ProductExpression((self.mutable_l2, 0))), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + ProductExpression((self.mutable_l2, self.mon_npv)), + ), + ( + self.mutable_l2, + self.mutable_l2, + ProductExpression((self.mutable_l2, self.mutable_l2)), + ), + (self.mutable_l2, self.param0, ProductExpression((self.mutable_l2, 0))), + (self.mutable_l2, self.param1, self.mutable_l2), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + ProductExpression((self.mutable_l2, self.npv)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param0(self): + tests = [ + # "Param0 * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.param0, self.invalid, self.SKIP), + (self.param0, self.asbinary, MonomialTermExpression((0, self.bin))), + (self.param0, self.zero, 0), + (self.param0, self.one, 0), + # 4: + (self.param0, self.native, 0), + (self.param0, self.npv, NPV_ProductExpression((0, self.npv))), + (self.param0, self.param, 0), + (self.param0, self.param_mut, NPV_ProductExpression((0, self.param_mut))), + # 8: + (self.param0, self.var, MonomialTermExpression((0, self.var))), + ( + self.param0, + self.mon_native, + MonomialTermExpression((0, self.mon_native.arg(1))), + ), + ( + self.param0, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.param0, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.param0, self.linear, ProductExpression((0, self.linear))), + (self.param0, self.sum, ProductExpression((0, self.sum))), + (self.param0, self.other, ProductExpression((0, self.other))), + (self.param0, self.mutable_l0, 0), + # 16: + ( + self.param0, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((0, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.param0, self.mutable_l2, ProductExpression((0, self.mutable_l2))), + (self.param0, self.param0, 0), + (self.param0, self.param1, 0), + # 20: + (self.param0, self.mutable_l3, NPV_ProductExpression((0, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param1(self): + tests = [ + # "One * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.param1, self.invalid, self.SKIP), + (self.param1, self.asbinary, self.bin), + (self.param1, self.zero, 0), + (self.param1, self.one, 1), + # 4: + (self.param1, self.native, 5), + (self.param1, self.npv, self.npv), + (self.param1, self.param, self.param), + (self.param1, self.param_mut, self.param_mut), + # 8: + (self.param1, self.var, self.var), + (self.param1, self.mon_native, self.mon_native), + (self.param1, self.mon_param, self.mon_param), + (self.param1, self.mon_npv, self.mon_npv), + # 12: + (self.param1, self.linear, self.linear), + (self.param1, self.sum, self.sum), + (self.param1, self.other, self.other), + (self.param1, self.mutable_l0, 0), + # 16: + (self.param1, self.mutable_l1, self.mon_npv), + (self.param1, self.mutable_l2, self.mutable_l2), + (self.param1, self.param0, self.param0), + (self.param1, self.param1, self.param1), + # 20: + (self.param1, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + ( + self.mutable_l3, + self.asbinary, + MonomialTermExpression((self.npv, self.bin)), + ), + (self.mutable_l3, self.zero, NPV_ProductExpression((self.npv, 0))), + (self.mutable_l3, self.one, self.npv), + # 4: + (self.mutable_l3, self.native, NPV_ProductExpression((self.npv, 5))), + (self.mutable_l3, self.npv, NPV_ProductExpression((self.npv, self.npv))), + (self.mutable_l3, self.param, NPV_ProductExpression((self.npv, 6))), + ( + self.mutable_l3, + self.param_mut, + NPV_ProductExpression((self.npv, self.param_mut)), + ), + # 8: + (self.mutable_l3, self.var, MonomialTermExpression((self.npv, self.var))), + ( + self.mutable_l3, + self.mon_native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_native.arg(0))), + self.mon_native.arg(1), + ) + ), + ), + ( + self.mutable_l3, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mutable_l3, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.mutable_l3, self.linear, ProductExpression((self.npv, self.linear))), + (self.mutable_l3, self.sum, ProductExpression((self.npv, self.sum))), + (self.mutable_l3, self.other, ProductExpression((self.npv, self.other))), + (self.mutable_l3, self.mutable_l0, NPV_ProductExpression((self.npv, 0))), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l3, + self.mutable_l2, + ProductExpression((self.npv, self.mutable_l2)), + ), + (self.mutable_l3, self.param0, NPV_ProductExpression((self.npv, 0))), + (self.mutable_l3, self.param1, self.npv), + # 20: + ( + self.mutable_l3, + self.mutable_l3, + NPV_ProductExpression((self.npv, self.npv)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + # + # + # DIVISION + # + # + + def test_div_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support division + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, ZeroDivisionError), + (self.asbinary, self.one, self.bin), + # 4: + (self.asbinary, self.native, MonomialTermExpression((0.2, self.bin))), + ( + self.asbinary, + self.npv, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.bin) + ), + ), + (self.asbinary, self.param, MonomialTermExpression((1 / 6, self.bin))), + ( + self.asbinary, + self.param_mut, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.param_mut)), self.bin) + ), + ), + # 8: + (self.asbinary, self.var, DivisionExpression((self.bin, self.var))), + ( + self.asbinary, + self.mon_native, + DivisionExpression((self.bin, self.mon_native)), + ), + ( + self.asbinary, + self.mon_param, + DivisionExpression((self.bin, self.mon_param)), + ), + (self.asbinary, self.mon_npv, DivisionExpression((self.bin, self.mon_npv))), + # 12: + (self.asbinary, self.linear, DivisionExpression((self.bin, self.linear))), + (self.asbinary, self.sum, DivisionExpression((self.bin, self.sum))), + (self.asbinary, self.other, DivisionExpression((self.bin, self.other))), + (self.asbinary, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.asbinary, + self.mutable_l1, + DivisionExpression((self.bin, self.mon_npv)), + ), + ( + self.asbinary, + self.mutable_l2, + DivisionExpression((self.bin, self.mutable_l2)), + ), + (self.asbinary, self.param0, ZeroDivisionError), + (self.asbinary, self.param1, self.bin), + # 20: + ( + self.asbinary, + self.mutable_l3, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.bin) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, DivisionExpression((0, self.bin))), + (self.zero, self.zero, ZeroDivisionError), + (self.zero, self.one, 0.0), + # 4: + (self.zero, self.native, 0.0), + (self.zero, self.npv, NPV_DivisionExpression((0, self.npv))), + (self.zero, self.param, 0.0), + (self.zero, self.param_mut, NPV_DivisionExpression((0, self.param_mut))), + # 8: + (self.zero, self.var, DivisionExpression((0, self.var))), + (self.zero, self.mon_native, DivisionExpression((0, self.mon_native))), + (self.zero, self.mon_param, DivisionExpression((0, self.mon_param))), + (self.zero, self.mon_npv, DivisionExpression((0, self.mon_npv))), + # 12: + (self.zero, self.linear, DivisionExpression((0, self.linear))), + (self.zero, self.sum, DivisionExpression((0, self.sum))), + (self.zero, self.other, DivisionExpression((0, self.other))), + (self.zero, self.mutable_l0, ZeroDivisionError), + # 16: + (self.zero, self.mutable_l1, DivisionExpression((0, self.mon_npv))), + (self.zero, self.mutable_l2, DivisionExpression((0, self.mutable_l2))), + (self.zero, self.param0, ZeroDivisionError), + (self.zero, self.param1, 0.0), + # 20: + (self.zero, self.mutable_l3, NPV_DivisionExpression((0, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, DivisionExpression((1, self.bin))), + (self.one, self.zero, ZeroDivisionError), + (self.one, self.one, 1.0), + # 4: + (self.one, self.native, 0.2), + (self.one, self.npv, NPV_DivisionExpression((1, self.npv))), + (self.one, self.param, 1 / 6), + (self.one, self.param_mut, NPV_DivisionExpression((1, self.param_mut))), + # 8: + (self.one, self.var, DivisionExpression((1, self.var))), + (self.one, self.mon_native, DivisionExpression((1, self.mon_native))), + (self.one, self.mon_param, DivisionExpression((1, self.mon_param))), + (self.one, self.mon_npv, DivisionExpression((1, self.mon_npv))), + # 12: + (self.one, self.linear, DivisionExpression((1, self.linear))), + (self.one, self.sum, DivisionExpression((1, self.sum))), + (self.one, self.other, DivisionExpression((1, self.other))), + (self.one, self.mutable_l0, ZeroDivisionError), + # 16: + (self.one, self.mutable_l1, DivisionExpression((1, self.mon_npv))), + (self.one, self.mutable_l2, DivisionExpression((1, self.mutable_l2))), + (self.one, self.param0, ZeroDivisionError), + (self.one, self.param1, 1.0), + # 20: + (self.one, self.mutable_l3, NPV_DivisionExpression((1, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, DivisionExpression((5, self.bin))), + (self.native, self.zero, ZeroDivisionError), + (self.native, self.one, 5.0), + # 4: + (self.native, self.native, 1.0), + (self.native, self.npv, NPV_DivisionExpression((5, self.npv))), + (self.native, self.param, 5 / 6), + (self.native, self.param_mut, NPV_DivisionExpression((5, self.param_mut))), + # 8: + (self.native, self.var, DivisionExpression((5, self.var))), + (self.native, self.mon_native, DivisionExpression((5, self.mon_native))), + (self.native, self.mon_param, DivisionExpression((5, self.mon_param))), + (self.native, self.mon_npv, DivisionExpression((5, self.mon_npv))), + # 12: + (self.native, self.linear, DivisionExpression((5, self.linear))), + (self.native, self.sum, DivisionExpression((5, self.sum))), + (self.native, self.other, DivisionExpression((5, self.other))), + (self.native, self.mutable_l0, ZeroDivisionError), + # 16: + (self.native, self.mutable_l1, DivisionExpression((5, self.mon_npv))), + (self.native, self.mutable_l2, DivisionExpression((5, self.mutable_l2))), + (self.native, self.param0, ZeroDivisionError), + (self.native, self.param1, 5.0), + # 20: + (self.native, self.mutable_l3, NPV_DivisionExpression((5, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, DivisionExpression((self.npv, self.bin))), + (self.npv, self.zero, ZeroDivisionError), + (self.npv, self.one, self.npv), + # 4: + (self.npv, self.native, NPV_DivisionExpression((self.npv, 5))), + (self.npv, self.npv, NPV_DivisionExpression((self.npv, self.npv))), + (self.npv, self.param, NPV_DivisionExpression((self.npv, 6))), + ( + self.npv, + self.param_mut, + NPV_DivisionExpression((self.npv, self.param_mut)), + ), + # 8: + (self.npv, self.var, DivisionExpression((self.npv, self.var))), + ( + self.npv, + self.mon_native, + DivisionExpression((self.npv, self.mon_native)), + ), + (self.npv, self.mon_param, DivisionExpression((self.npv, self.mon_param))), + (self.npv, self.mon_npv, DivisionExpression((self.npv, self.mon_npv))), + # 12: + (self.npv, self.linear, DivisionExpression((self.npv, self.linear))), + (self.npv, self.sum, DivisionExpression((self.npv, self.sum))), + (self.npv, self.other, DivisionExpression((self.npv, self.other))), + (self.npv, self.mutable_l0, ZeroDivisionError), + # 16: + (self.npv, self.mutable_l1, DivisionExpression((self.npv, self.mon_npv))), + ( + self.npv, + self.mutable_l2, + DivisionExpression((self.npv, self.mutable_l2)), + ), + (self.npv, self.param0, ZeroDivisionError), + (self.npv, self.param1, self.npv), + # 20: + (self.npv, self.mutable_l3, NPV_DivisionExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, DivisionExpression((6, self.bin))), + (self.param, self.zero, ZeroDivisionError), + (self.param, self.one, 6.0), + # 4: + (self.param, self.native, 1.2), + (self.param, self.npv, NPV_DivisionExpression((6, self.npv))), + (self.param, self.param, 1.0), + (self.param, self.param_mut, NPV_DivisionExpression((6, self.param_mut))), + # 8: + (self.param, self.var, DivisionExpression((6, self.var))), + (self.param, self.mon_native, DivisionExpression((6, self.mon_native))), + (self.param, self.mon_param, DivisionExpression((6, self.mon_param))), + (self.param, self.mon_npv, DivisionExpression((6, self.mon_npv))), + # 12: + (self.param, self.linear, DivisionExpression((6, self.linear))), + (self.param, self.sum, DivisionExpression((6, self.sum))), + (self.param, self.other, DivisionExpression((6, self.other))), + (self.param, self.mutable_l0, ZeroDivisionError), + # 16: + (self.param, self.mutable_l1, DivisionExpression((6, self.mon_npv))), + (self.param, self.mutable_l2, DivisionExpression((6, self.mutable_l2))), + (self.param, self.param0, ZeroDivisionError), + (self.param, self.param1, 6.0), + # 20: + (self.param, self.mutable_l3, NPV_DivisionExpression((6, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + DivisionExpression((self.param_mut, self.bin)), + ), + (self.param_mut, self.zero, ZeroDivisionError), + (self.param_mut, self.one, self.param_mut), + # 4: + (self.param_mut, self.native, NPV_DivisionExpression((self.param_mut, 5))), + ( + self.param_mut, + self.npv, + NPV_DivisionExpression((self.param_mut, self.npv)), + ), + (self.param_mut, self.param, NPV_DivisionExpression((self.param_mut, 6))), + ( + self.param_mut, + self.param_mut, + NPV_DivisionExpression((self.param_mut, self.param_mut)), + ), + # 8: + (self.param_mut, self.var, DivisionExpression((self.param_mut, self.var))), + ( + self.param_mut, + self.mon_native, + DivisionExpression((self.param_mut, self.mon_native)), + ), + ( + self.param_mut, + self.mon_param, + DivisionExpression((self.param_mut, self.mon_param)), + ), + ( + self.param_mut, + self.mon_npv, + DivisionExpression((self.param_mut, self.mon_npv)), + ), + # 12: + ( + self.param_mut, + self.linear, + DivisionExpression((self.param_mut, self.linear)), + ), + (self.param_mut, self.sum, DivisionExpression((self.param_mut, self.sum))), + ( + self.param_mut, + self.other, + DivisionExpression((self.param_mut, self.other)), + ), + (self.param_mut, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.param_mut, + self.mutable_l1, + DivisionExpression((self.param_mut, self.mon_npv)), + ), + ( + self.param_mut, + self.mutable_l2, + DivisionExpression((self.param_mut, self.mutable_l2)), + ), + (self.param_mut, self.param0, ZeroDivisionError), + (self.param_mut, self.param1, self.param_mut), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_DivisionExpression((self.param_mut, self.npv)), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, DivisionExpression((self.var, self.bin))), + (self.var, self.zero, ZeroDivisionError), + (self.var, self.one, self.var), + # 4: + (self.var, self.native, MonomialTermExpression((0.2, self.var))), + ( + self.var, + self.npv, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.var) + ), + ), + (self.var, self.param, MonomialTermExpression((1 / 6.0, self.var))), + ( + self.var, + self.param_mut, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.param_mut)), self.var) + ), + ), + # 8: + (self.var, self.var, DivisionExpression((self.var, self.var))), + ( + self.var, + self.mon_native, + DivisionExpression((self.var, self.mon_native)), + ), + (self.var, self.mon_param, DivisionExpression((self.var, self.mon_param))), + (self.var, self.mon_npv, DivisionExpression((self.var, self.mon_npv))), + # 12: + (self.var, self.linear, DivisionExpression((self.var, self.linear))), + (self.var, self.sum, DivisionExpression((self.var, self.sum))), + (self.var, self.other, DivisionExpression((self.var, self.other))), + (self.var, self.mutable_l0, ZeroDivisionError), + # 16: + (self.var, self.mutable_l1, DivisionExpression((self.var, self.mon_npv))), + ( + self.var, + self.mutable_l2, + DivisionExpression((self.var, self.mutable_l2)), + ), + (self.var, self.param0, ZeroDivisionError), + (self.var, self.param1, self.var), + # 20: + ( + self.var, + self.mutable_l3, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.var) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + DivisionExpression((self.mon_native, self.bin)), + ), + (self.mon_native, self.zero, ZeroDivisionError), + (self.mon_native, self.one, self.mon_native), + # 4: + ( + self.mon_native, + self.native, + MonomialTermExpression((0.6, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ( + self.mon_native, + self.param, + MonomialTermExpression((0.5, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression( + (self.mon_native.arg(0), self.param_mut) + ), + self.mon_native.arg(1), + ) + ), + ), + # 8: + ( + self.mon_native, + self.var, + DivisionExpression((self.mon_native, self.var)), + ), + ( + self.mon_native, + self.mon_native, + DivisionExpression((self.mon_native, self.mon_native)), + ), + ( + self.mon_native, + self.mon_param, + DivisionExpression((self.mon_native, self.mon_param)), + ), + ( + self.mon_native, + self.mon_npv, + DivisionExpression((self.mon_native, self.mon_npv)), + ), + # 12: + ( + self.mon_native, + self.linear, + DivisionExpression((self.mon_native, self.linear)), + ), + ( + self.mon_native, + self.sum, + DivisionExpression((self.mon_native, self.sum)), + ), + ( + self.mon_native, + self.other, + DivisionExpression((self.mon_native, self.other)), + ), + (self.mon_native, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mon_native, + self.mutable_l1, + DivisionExpression((self.mon_native, self.mon_npv)), + ), + ( + self.mon_native, + self.mutable_l2, + DivisionExpression((self.mon_native, self.mutable_l2)), + ), + (self.mon_native, self.param0, ZeroDivisionError), + (self.mon_native, self.param1, self.mon_native), + # 20: + ( + self.mon_native, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + DivisionExpression((self.mon_param, self.bin)), + ), + (self.mon_param, self.zero, ZeroDivisionError), + (self.mon_param, self.one, self.mon_param), + # 4: + ( + self.mon_param, + self.native, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), 5)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), 6)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), self.param_mut)), + self.mon_param.arg(1), + ) + ), + ), + # 8: + (self.mon_param, self.var, DivisionExpression((self.mon_param, self.var))), + ( + self.mon_param, + self.mon_native, + DivisionExpression((self.mon_param, self.mon_native)), + ), + ( + self.mon_param, + self.mon_param, + DivisionExpression((self.mon_param, self.mon_param)), + ), + ( + self.mon_param, + self.mon_npv, + DivisionExpression((self.mon_param, self.mon_npv)), + ), + # 12: + ( + self.mon_param, + self.linear, + DivisionExpression((self.mon_param, self.linear)), + ), + (self.mon_param, self.sum, DivisionExpression((self.mon_param, self.sum))), + ( + self.mon_param, + self.other, + DivisionExpression((self.mon_param, self.other)), + ), + (self.mon_param, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mon_param, + self.mutable_l1, + DivisionExpression((self.mon_param, self.mon_npv)), + ), + ( + self.mon_param, + self.mutable_l2, + DivisionExpression((self.mon_param, self.mutable_l2)), + ), + (self.mon_param, self.param0, ZeroDivisionError), + (self.mon_param, self.param1, self.mon_param), + # 20: + ( + self.mon_param, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + (self.mon_npv, self.asbinary, DivisionExpression((self.mon_npv, self.bin))), + (self.mon_npv, self.zero, ZeroDivisionError), + (self.mon_npv, self.one, self.mon_npv), + # 4: + ( + self.mon_npv, + self.native, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), 5)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mon_npv, self.var, DivisionExpression((self.mon_npv, self.var))), + ( + self.mon_npv, + self.mon_native, + DivisionExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mon_npv, + self.mon_param, + DivisionExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mon_npv, + self.mon_npv, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + ( + self.mon_npv, + self.linear, + DivisionExpression((self.mon_npv, self.linear)), + ), + (self.mon_npv, self.sum, DivisionExpression((self.mon_npv, self.sum))), + (self.mon_npv, self.other, DivisionExpression((self.mon_npv, self.other))), + (self.mon_npv, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mon_npv, + self.mutable_l1, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mon_npv, + self.mutable_l2, + DivisionExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mon_npv, self.param0, ZeroDivisionError), + (self.mon_npv, self.param1, self.mon_npv), + # 20: + ( + self.mon_npv, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + (self.linear, self.asbinary, DivisionExpression((self.linear, self.bin))), + (self.linear, self.zero, ZeroDivisionError), + (self.linear, self.one, self.linear), + # 4: + (self.linear, self.native, DivisionExpression((self.linear, 5))), + (self.linear, self.npv, DivisionExpression((self.linear, self.npv))), + (self.linear, self.param, DivisionExpression((self.linear, 6))), + ( + self.linear, + self.param_mut, + DivisionExpression((self.linear, self.param_mut)), + ), + # 8: + (self.linear, self.var, DivisionExpression((self.linear, self.var))), + ( + self.linear, + self.mon_native, + DivisionExpression((self.linear, self.mon_native)), + ), + ( + self.linear, + self.mon_param, + DivisionExpression((self.linear, self.mon_param)), + ), + ( + self.linear, + self.mon_npv, + DivisionExpression((self.linear, self.mon_npv)), + ), + # 12: + (self.linear, self.linear, DivisionExpression((self.linear, self.linear))), + (self.linear, self.sum, DivisionExpression((self.linear, self.sum))), + (self.linear, self.other, DivisionExpression((self.linear, self.other))), + (self.linear, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.linear, + self.mutable_l1, + DivisionExpression((self.linear, self.mon_npv)), + ), + ( + self.linear, + self.mutable_l2, + DivisionExpression((self.linear, self.mutable_l2)), + ), + (self.linear, self.param0, ZeroDivisionError), + (self.linear, self.param1, self.linear), + # 20: + (self.linear, self.mutable_l3, DivisionExpression((self.linear, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, DivisionExpression((self.sum, self.bin))), + (self.sum, self.zero, ZeroDivisionError), + (self.sum, self.one, self.sum), + # 4: + (self.sum, self.native, DivisionExpression((self.sum, 5))), + (self.sum, self.npv, DivisionExpression((self.sum, self.npv))), + (self.sum, self.param, DivisionExpression((self.sum, 6))), + (self.sum, self.param_mut, DivisionExpression((self.sum, self.param_mut))), + # 8: + (self.sum, self.var, DivisionExpression((self.sum, self.var))), + ( + self.sum, + self.mon_native, + DivisionExpression((self.sum, self.mon_native)), + ), + (self.sum, self.mon_param, DivisionExpression((self.sum, self.mon_param))), + (self.sum, self.mon_npv, DivisionExpression((self.sum, self.mon_npv))), + # 12: + (self.sum, self.linear, DivisionExpression((self.sum, self.linear))), + (self.sum, self.sum, DivisionExpression((self.sum, self.sum))), + (self.sum, self.other, DivisionExpression((self.sum, self.other))), + (self.sum, self.mutable_l0, ZeroDivisionError), + # 16: + (self.sum, self.mutable_l1, DivisionExpression((self.sum, self.mon_npv))), + ( + self.sum, + self.mutable_l2, + DivisionExpression((self.sum, self.mutable_l2)), + ), + (self.sum, self.param0, ZeroDivisionError), + (self.sum, self.param1, self.sum), + # 20: + (self.sum, self.mutable_l3, DivisionExpression((self.sum, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, DivisionExpression((self.other, self.bin))), + (self.other, self.zero, ZeroDivisionError), + (self.other, self.one, self.other), + # 4: + (self.other, self.native, DivisionExpression((self.other, 5))), + (self.other, self.npv, DivisionExpression((self.other, self.npv))), + (self.other, self.param, DivisionExpression((self.other, 6))), + ( + self.other, + self.param_mut, + DivisionExpression((self.other, self.param_mut)), + ), + # 8: + (self.other, self.var, DivisionExpression((self.other, self.var))), + ( + self.other, + self.mon_native, + DivisionExpression((self.other, self.mon_native)), + ), + ( + self.other, + self.mon_param, + DivisionExpression((self.other, self.mon_param)), + ), + (self.other, self.mon_npv, DivisionExpression((self.other, self.mon_npv))), + # 12: + (self.other, self.linear, DivisionExpression((self.other, self.linear))), + (self.other, self.sum, DivisionExpression((self.other, self.sum))), + (self.other, self.other, DivisionExpression((self.other, self.other))), + (self.other, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.other, + self.mutable_l1, + DivisionExpression((self.other, self.mon_npv)), + ), + ( + self.other, + self.mutable_l2, + DivisionExpression((self.other, self.mutable_l2)), + ), + (self.other, self.param0, ZeroDivisionError), + (self.other, self.param1, self.other), + # 20: + (self.other, self.mutable_l3, DivisionExpression((self.other, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, DivisionExpression((0, self.bin))), + (self.mutable_l0, self.zero, ZeroDivisionError), + (self.mutable_l0, self.one, 0.0), + # 4: + (self.mutable_l0, self.native, 0.0), + (self.mutable_l0, self.npv, NPV_DivisionExpression((0, self.npv))), + (self.mutable_l0, self.param, 0.0), + ( + self.mutable_l0, + self.param_mut, + NPV_DivisionExpression((0, self.param_mut)), + ), + # 8: + (self.mutable_l0, self.var, DivisionExpression((0, self.var))), + ( + self.mutable_l0, + self.mon_native, + DivisionExpression((0, self.mon_native)), + ), + (self.mutable_l0, self.mon_param, DivisionExpression((0, self.mon_param))), + (self.mutable_l0, self.mon_npv, DivisionExpression((0, self.mon_npv))), + # 12: + (self.mutable_l0, self.linear, DivisionExpression((0, self.linear))), + (self.mutable_l0, self.sum, DivisionExpression((0, self.sum))), + (self.mutable_l0, self.other, DivisionExpression((0, self.other))), + (self.mutable_l0, self.mutable_l0, ZeroDivisionError), + # 16: + (self.mutable_l0, self.mutable_l1, DivisionExpression((0, self.mon_npv))), + ( + self.mutable_l0, + self.mutable_l2, + DivisionExpression((0, self.mutable_l2)), + ), + (self.mutable_l0, self.param0, ZeroDivisionError), + (self.mutable_l0, self.param1, 0.0), + # 20: + (self.mutable_l0, self.mutable_l3, NPV_DivisionExpression((0, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + DivisionExpression((self.mon_npv, self.bin)), + ), + (self.mutable_l1, self.zero, ZeroDivisionError), + (self.mutable_l1, self.one, self.mon_npv), + # 4: + ( + self.mutable_l1, + self.native, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.native)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mutable_l1, self.var, DivisionExpression((self.mon_npv, self.var))), + ( + self.mutable_l1, + self.mon_native, + DivisionExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mutable_l1, + self.mon_param, + DivisionExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mutable_l1, + self.mon_npv, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + ( + self.mutable_l1, + self.linear, + DivisionExpression((self.mon_npv, self.linear)), + ), + (self.mutable_l1, self.sum, DivisionExpression((self.mon_npv, self.sum))), + ( + self.mutable_l1, + self.other, + DivisionExpression((self.mon_npv, self.other)), + ), + (self.mutable_l1, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mutable_l1, + self.mutable_l2, + DivisionExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mutable_l1, self.param0, ZeroDivisionError), + (self.mutable_l1, self.param1, self.mon_npv), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + DivisionExpression((self.mutable_l2, self.bin)), + ), + (self.mutable_l2, self.zero, ZeroDivisionError), + (self.mutable_l2, self.one, self.mutable_l2), + # 4: + (self.mutable_l2, self.native, DivisionExpression((self.mutable_l2, 5))), + ( + self.mutable_l2, + self.npv, + DivisionExpression((self.mutable_l2, self.npv)), + ), + (self.mutable_l2, self.param, DivisionExpression((self.mutable_l2, 6))), + ( + self.mutable_l2, + self.param_mut, + DivisionExpression((self.mutable_l2, self.param_mut)), + ), + # 8: + ( + self.mutable_l2, + self.var, + DivisionExpression((self.mutable_l2, self.var)), + ), + ( + self.mutable_l2, + self.mon_native, + DivisionExpression((self.mutable_l2, self.mon_native)), + ), + ( + self.mutable_l2, + self.mon_param, + DivisionExpression((self.mutable_l2, self.mon_param)), + ), + ( + self.mutable_l2, + self.mon_npv, + DivisionExpression((self.mutable_l2, self.mon_npv)), + ), + # 12: + ( + self.mutable_l2, + self.linear, + DivisionExpression((self.mutable_l2, self.linear)), + ), + ( + self.mutable_l2, + self.sum, + DivisionExpression((self.mutable_l2, self.sum)), + ), + ( + self.mutable_l2, + self.other, + DivisionExpression((self.mutable_l2, self.other)), + ), + (self.mutable_l2, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + DivisionExpression((self.mutable_l2, self.mon_npv)), + ), + ( + self.mutable_l2, + self.mutable_l2, + DivisionExpression((self.mutable_l2, self.mutable_l2)), + ), + (self.mutable_l2, self.param0, ZeroDivisionError), + (self.mutable_l2, self.param1, self.mutable_l2), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + DivisionExpression((self.mutable_l2, self.npv)), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, DivisionExpression((0, self.bin))), + (self.param0, self.zero, ZeroDivisionError), + (self.param0, self.one, 0.0), + # 4: + (self.param0, self.native, 0.0), + (self.param0, self.npv, NPV_DivisionExpression((0, self.npv))), + (self.param0, self.param, 0.0), + (self.param0, self.param_mut, NPV_DivisionExpression((0, self.param_mut))), + # 8: + (self.param0, self.var, DivisionExpression((0, self.var))), + (self.param0, self.mon_native, DivisionExpression((0, self.mon_native))), + (self.param0, self.mon_param, DivisionExpression((0, self.mon_param))), + (self.param0, self.mon_npv, DivisionExpression((0, self.mon_npv))), + # 12: + (self.param0, self.linear, DivisionExpression((0, self.linear))), + (self.param0, self.sum, DivisionExpression((0, self.sum))), + (self.param0, self.other, DivisionExpression((0, self.other))), + (self.param0, self.mutable_l0, ZeroDivisionError), + # 16: + (self.param0, self.mutable_l1, DivisionExpression((0, self.mon_npv))), + (self.param0, self.mutable_l2, DivisionExpression((0, self.mutable_l2))), + (self.param0, self.param0, ZeroDivisionError), + (self.param0, self.param1, 0.0), + # 20: + (self.param0, self.mutable_l3, NPV_DivisionExpression((0, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, DivisionExpression((1, self.bin))), + (self.param1, self.zero, ZeroDivisionError), + (self.param1, self.one, 1.0), + # 4: + (self.param1, self.native, 0.2), + (self.param1, self.npv, NPV_DivisionExpression((1, self.npv))), + (self.param1, self.param, 1 / 6), + (self.param1, self.param_mut, NPV_DivisionExpression((1, self.param_mut))), + # 8: + (self.param1, self.var, DivisionExpression((1, self.var))), + (self.param1, self.mon_native, DivisionExpression((1, self.mon_native))), + (self.param1, self.mon_param, DivisionExpression((1, self.mon_param))), + (self.param1, self.mon_npv, DivisionExpression((1, self.mon_npv))), + # 12: + (self.param1, self.linear, DivisionExpression((1, self.linear))), + (self.param1, self.sum, DivisionExpression((1, self.sum))), + (self.param1, self.other, DivisionExpression((1, self.other))), + (self.param1, self.mutable_l0, ZeroDivisionError), + # 16: + (self.param1, self.mutable_l1, DivisionExpression((1, self.mon_npv))), + (self.param1, self.mutable_l2, DivisionExpression((1, self.mutable_l2))), + (self.param1, self.param0, ZeroDivisionError), + (self.param1, self.param1, 1.0), + # 20: + (self.param1, self.mutable_l3, NPV_DivisionExpression((1, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + (self.mutable_l3, self.asbinary, DivisionExpression((self.npv, self.bin))), + (self.mutable_l3, self.zero, ZeroDivisionError), + (self.mutable_l3, self.one, self.npv), + # 4: + (self.mutable_l3, self.native, NPV_DivisionExpression((self.npv, 5))), + (self.mutable_l3, self.npv, NPV_DivisionExpression((self.npv, self.npv))), + (self.mutable_l3, self.param, NPV_DivisionExpression((self.npv, 6))), + ( + self.mutable_l3, + self.param_mut, + NPV_DivisionExpression((self.npv, self.param_mut)), + ), + # 8: + (self.mutable_l3, self.var, DivisionExpression((self.npv, self.var))), + ( + self.mutable_l3, + self.mon_native, + DivisionExpression((self.npv, self.mon_native)), + ), + ( + self.mutable_l3, + self.mon_param, + DivisionExpression((self.npv, self.mon_param)), + ), + ( + self.mutable_l3, + self.mon_npv, + DivisionExpression((self.npv, self.mon_npv)), + ), + # 12: + (self.mutable_l3, self.linear, DivisionExpression((self.npv, self.linear))), + (self.mutable_l3, self.sum, DivisionExpression((self.npv, self.sum))), + (self.mutable_l3, self.other, DivisionExpression((self.npv, self.other))), + (self.mutable_l3, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + DivisionExpression((self.npv, self.mon_npv)), + ), + ( + self.mutable_l3, + self.mutable_l2, + DivisionExpression((self.npv, self.mutable_l2)), + ), + (self.mutable_l3, self.param0, ZeroDivisionError), + (self.mutable_l3, self.param1, self.npv), + # 20: + ( + self.mutable_l3, + self.mutable_l3, + NPV_DivisionExpression((self.npv, self.npv)), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + # + # + # EXPONENTIATION + # + # + + def test_pow_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support exponentiation + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, PowExpression((self.bin, 0))), + (self.asbinary, self.one, self.bin), + # 4: + (self.asbinary, self.native, PowExpression((self.bin, 5))), + (self.asbinary, self.npv, PowExpression((self.bin, self.npv))), + (self.asbinary, self.param, PowExpression((self.bin, 6))), + (self.asbinary, self.param_mut, PowExpression((self.bin, self.param_mut))), + # 8: + (self.asbinary, self.var, PowExpression((self.bin, self.var))), + ( + self.asbinary, + self.mon_native, + PowExpression((self.bin, self.mon_native)), + ), + (self.asbinary, self.mon_param, PowExpression((self.bin, self.mon_param))), + (self.asbinary, self.mon_npv, PowExpression((self.bin, self.mon_npv))), + # 12: + (self.asbinary, self.linear, PowExpression((self.bin, self.linear))), + (self.asbinary, self.sum, PowExpression((self.bin, self.sum))), + (self.asbinary, self.other, PowExpression((self.bin, self.other))), + (self.asbinary, self.mutable_l0, PowExpression((self.bin, 0))), + # 16: + (self.asbinary, self.mutable_l1, PowExpression((self.bin, self.mon_npv))), + ( + self.asbinary, + self.mutable_l2, + PowExpression((self.bin, self.mutable_l2)), + ), + (self.asbinary, self.param0, PowExpression((self.bin, 0))), + (self.asbinary, self.param1, self.bin), + # 20: + (self.asbinary, self.mutable_l3, PowExpression((self.bin, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, PowExpression((0, self.bin))), + (self.zero, self.zero, 1), + (self.zero, self.one, 0), + # 4: + (self.zero, self.native, 0), + (self.zero, self.npv, NPV_PowExpression((0, self.npv))), + (self.zero, self.param, 0), + (self.zero, self.param_mut, NPV_PowExpression((0, self.param_mut))), + # 8: + (self.zero, self.var, PowExpression((0, self.var))), + (self.zero, self.mon_native, PowExpression((0, self.mon_native))), + (self.zero, self.mon_param, PowExpression((0, self.mon_param))), + (self.zero, self.mon_npv, PowExpression((0, self.mon_npv))), + # 12: + (self.zero, self.linear, PowExpression((0, self.linear))), + (self.zero, self.sum, PowExpression((0, self.sum))), + (self.zero, self.other, PowExpression((0, self.other))), + (self.zero, self.mutable_l0, 1), + # 16: + (self.zero, self.mutable_l1, PowExpression((0, self.mon_npv))), + (self.zero, self.mutable_l2, PowExpression((0, self.mutable_l2))), + (self.zero, self.param0, 1), + (self.zero, self.param1, 0), + # 20: + (self.zero, self.mutable_l3, NPV_PowExpression((0, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, PowExpression((1, self.bin))), + (self.one, self.zero, 1), + (self.one, self.one, 1), + # 4: + (self.one, self.native, 1), + (self.one, self.npv, NPV_PowExpression((1, self.npv))), + (self.one, self.param, 1), + (self.one, self.param_mut, NPV_PowExpression((1, self.param_mut))), + # 8: + (self.one, self.var, PowExpression((1, self.var))), + (self.one, self.mon_native, PowExpression((1, self.mon_native))), + (self.one, self.mon_param, PowExpression((1, self.mon_param))), + (self.one, self.mon_npv, PowExpression((1, self.mon_npv))), + # 12: + (self.one, self.linear, PowExpression((1, self.linear))), + (self.one, self.sum, PowExpression((1, self.sum))), + (self.one, self.other, PowExpression((1, self.other))), + (self.one, self.mutable_l0, 1), + # 16: + (self.one, self.mutable_l1, PowExpression((1, self.mon_npv))), + (self.one, self.mutable_l2, PowExpression((1, self.mutable_l2))), + (self.one, self.param0, 1), + (self.one, self.param1, 1), + # 20: + (self.one, self.mutable_l3, NPV_PowExpression((1, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, PowExpression((5, self.bin))), + (self.native, self.zero, 1), + (self.native, self.one, 5), + # 4: + (self.native, self.native, 3125), + (self.native, self.npv, NPV_PowExpression((5, self.npv))), + (self.native, self.param, 15625), + (self.native, self.param_mut, NPV_PowExpression((5, self.param_mut))), + # 8: + (self.native, self.var, PowExpression((5, self.var))), + (self.native, self.mon_native, PowExpression((5, self.mon_native))), + (self.native, self.mon_param, PowExpression((5, self.mon_param))), + (self.native, self.mon_npv, PowExpression((5, self.mon_npv))), + # 12: + (self.native, self.linear, PowExpression((5, self.linear))), + (self.native, self.sum, PowExpression((5, self.sum))), + (self.native, self.other, PowExpression((5, self.other))), + (self.native, self.mutable_l0, 1), + # 16: + (self.native, self.mutable_l1, PowExpression((5, self.mon_npv))), + (self.native, self.mutable_l2, PowExpression((5, self.mutable_l2))), + (self.native, self.param0, 1), + (self.native, self.param1, 5), + # 20: + (self.native, self.mutable_l3, NPV_PowExpression((5, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, PowExpression((self.npv, self.bin))), + (self.npv, self.zero, NPV_PowExpression((self.npv, 0))), + (self.npv, self.one, self.npv), + # 4: + (self.npv, self.native, NPV_PowExpression((self.npv, 5))), + (self.npv, self.npv, NPV_PowExpression((self.npv, self.npv))), + (self.npv, self.param, NPV_PowExpression((self.npv, 6))), + (self.npv, self.param_mut, NPV_PowExpression((self.npv, self.param_mut))), + # 8: + (self.npv, self.var, PowExpression((self.npv, self.var))), + (self.npv, self.mon_native, PowExpression((self.npv, self.mon_native))), + (self.npv, self.mon_param, PowExpression((self.npv, self.mon_param))), + (self.npv, self.mon_npv, PowExpression((self.npv, self.mon_npv))), + # 12: + (self.npv, self.linear, PowExpression((self.npv, self.linear))), + (self.npv, self.sum, PowExpression((self.npv, self.sum))), + (self.npv, self.other, PowExpression((self.npv, self.other))), + (self.npv, self.mutable_l0, NPV_PowExpression((self.npv, 0))), + # 16: + (self.npv, self.mutable_l1, PowExpression((self.npv, self.mon_npv))), + (self.npv, self.mutable_l2, PowExpression((self.npv, self.mutable_l2))), + (self.npv, self.param0, NPV_PowExpression((self.npv, 0))), + (self.npv, self.param1, self.npv), + # 20: + (self.npv, self.mutable_l3, NPV_PowExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, PowExpression((6, self.bin))), + (self.param, self.zero, 1), + (self.param, self.one, 6), + # 4: + (self.param, self.native, 7776), + (self.param, self.npv, NPV_PowExpression((6, self.npv))), + (self.param, self.param, 46656), + (self.param, self.param_mut, NPV_PowExpression((6, self.param_mut))), + # 8: + (self.param, self.var, PowExpression((6, self.var))), + (self.param, self.mon_native, PowExpression((6, self.mon_native))), + (self.param, self.mon_param, PowExpression((6, self.mon_param))), + (self.param, self.mon_npv, PowExpression((6, self.mon_npv))), + # 12: + (self.param, self.linear, PowExpression((6, self.linear))), + (self.param, self.sum, PowExpression((6, self.sum))), + (self.param, self.other, PowExpression((6, self.other))), + (self.param, self.mutable_l0, 1), + # 16: + (self.param, self.mutable_l1, PowExpression((6, self.mon_npv))), + (self.param, self.mutable_l2, PowExpression((6, self.mutable_l2))), + (self.param, self.param0, 1), + (self.param, self.param1, 6), + # 20: + (self.param, self.mutable_l3, NPV_PowExpression((6, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + (self.param_mut, self.asbinary, PowExpression((self.param_mut, self.bin))), + (self.param_mut, self.zero, NPV_PowExpression((self.param_mut, 0))), + (self.param_mut, self.one, self.param_mut), + # 4: + (self.param_mut, self.native, NPV_PowExpression((self.param_mut, 5))), + (self.param_mut, self.npv, NPV_PowExpression((self.param_mut, self.npv))), + (self.param_mut, self.param, NPV_PowExpression((self.param_mut, 6))), + ( + self.param_mut, + self.param_mut, + NPV_PowExpression((self.param_mut, self.param_mut)), + ), + # 8: + (self.param_mut, self.var, PowExpression((self.param_mut, self.var))), + ( + self.param_mut, + self.mon_native, + PowExpression((self.param_mut, self.mon_native)), + ), + ( + self.param_mut, + self.mon_param, + PowExpression((self.param_mut, self.mon_param)), + ), + ( + self.param_mut, + self.mon_npv, + PowExpression((self.param_mut, self.mon_npv)), + ), + # 12: + (self.param_mut, self.linear, PowExpression((self.param_mut, self.linear))), + (self.param_mut, self.sum, PowExpression((self.param_mut, self.sum))), + (self.param_mut, self.other, PowExpression((self.param_mut, self.other))), + (self.param_mut, self.mutable_l0, NPV_PowExpression((self.param_mut, 0))), + # 16: + ( + self.param_mut, + self.mutable_l1, + PowExpression((self.param_mut, self.mon_npv)), + ), + ( + self.param_mut, + self.mutable_l2, + PowExpression((self.param_mut, self.mutable_l2)), + ), + (self.param_mut, self.param0, NPV_PowExpression((self.param_mut, 0))), + (self.param_mut, self.param1, self.param_mut), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_PowExpression((self.param_mut, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, PowExpression((self.var, self.bin))), + (self.var, self.zero, PowExpression((self.var, 0))), + (self.var, self.one, self.var), + # 4: + (self.var, self.native, PowExpression((self.var, 5))), + (self.var, self.npv, PowExpression((self.var, self.npv))), + (self.var, self.param, PowExpression((self.var, 6))), + (self.var, self.param_mut, PowExpression((self.var, self.param_mut))), + # 8: + (self.var, self.var, PowExpression((self.var, self.var))), + (self.var, self.mon_native, PowExpression((self.var, self.mon_native))), + (self.var, self.mon_param, PowExpression((self.var, self.mon_param))), + (self.var, self.mon_npv, PowExpression((self.var, self.mon_npv))), + # 12: + (self.var, self.linear, PowExpression((self.var, self.linear))), + (self.var, self.sum, PowExpression((self.var, self.sum))), + (self.var, self.other, PowExpression((self.var, self.other))), + (self.var, self.mutable_l0, PowExpression((self.var, 0))), + # 16: + (self.var, self.mutable_l1, PowExpression((self.var, self.mon_npv))), + (self.var, self.mutable_l2, PowExpression((self.var, self.mutable_l2))), + (self.var, self.param0, PowExpression((self.var, 0))), + (self.var, self.param1, self.var), + # 20: + (self.var, self.mutable_l3, PowExpression((self.var, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + PowExpression((self.mon_native, self.bin)), + ), + (self.mon_native, self.zero, PowExpression((self.mon_native, 0))), + (self.mon_native, self.one, self.mon_native), + # 4: + (self.mon_native, self.native, PowExpression((self.mon_native, 5))), + (self.mon_native, self.npv, PowExpression((self.mon_native, self.npv))), + (self.mon_native, self.param, PowExpression((self.mon_native, 6))), + ( + self.mon_native, + self.param_mut, + PowExpression((self.mon_native, self.param_mut)), + ), + # 8: + (self.mon_native, self.var, PowExpression((self.mon_native, self.var))), + ( + self.mon_native, + self.mon_native, + PowExpression((self.mon_native, self.mon_native)), + ), + ( + self.mon_native, + self.mon_param, + PowExpression((self.mon_native, self.mon_param)), + ), + ( + self.mon_native, + self.mon_npv, + PowExpression((self.mon_native, self.mon_npv)), + ), + # 12: + ( + self.mon_native, + self.linear, + PowExpression((self.mon_native, self.linear)), + ), + (self.mon_native, self.sum, PowExpression((self.mon_native, self.sum))), + (self.mon_native, self.other, PowExpression((self.mon_native, self.other))), + (self.mon_native, self.mutable_l0, PowExpression((self.mon_native, 0))), + # 16: + ( + self.mon_native, + self.mutable_l1, + PowExpression((self.mon_native, self.mon_npv)), + ), + ( + self.mon_native, + self.mutable_l2, + PowExpression((self.mon_native, self.mutable_l2)), + ), + (self.mon_native, self.param0, PowExpression((self.mon_native, 0))), + (self.mon_native, self.param1, self.mon_native), + # 20: + ( + self.mon_native, + self.mutable_l3, + PowExpression((self.mon_native, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + (self.mon_param, self.asbinary, PowExpression((self.mon_param, self.bin))), + (self.mon_param, self.zero, PowExpression((self.mon_param, 0))), + (self.mon_param, self.one, self.mon_param), + # 4: + (self.mon_param, self.native, PowExpression((self.mon_param, 5))), + (self.mon_param, self.npv, PowExpression((self.mon_param, self.npv))), + (self.mon_param, self.param, PowExpression((self.mon_param, 6))), + ( + self.mon_param, + self.param_mut, + PowExpression((self.mon_param, self.param_mut)), + ), + # 8: + (self.mon_param, self.var, PowExpression((self.mon_param, self.var))), + ( + self.mon_param, + self.mon_native, + PowExpression((self.mon_param, self.mon_native)), + ), + ( + self.mon_param, + self.mon_param, + PowExpression((self.mon_param, self.mon_param)), + ), + ( + self.mon_param, + self.mon_npv, + PowExpression((self.mon_param, self.mon_npv)), + ), + # 12: + (self.mon_param, self.linear, PowExpression((self.mon_param, self.linear))), + (self.mon_param, self.sum, PowExpression((self.mon_param, self.sum))), + (self.mon_param, self.other, PowExpression((self.mon_param, self.other))), + (self.mon_param, self.mutable_l0, PowExpression((self.mon_param, 0))), + # 16: + ( + self.mon_param, + self.mutable_l1, + PowExpression((self.mon_param, self.mon_npv)), + ), + ( + self.mon_param, + self.mutable_l2, + PowExpression((self.mon_param, self.mutable_l2)), + ), + (self.mon_param, self.param0, PowExpression((self.mon_param, 0))), + (self.mon_param, self.param1, self.mon_param), + # 20: + ( + self.mon_param, + self.mutable_l3, + PowExpression((self.mon_param, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + (self.mon_npv, self.asbinary, PowExpression((self.mon_npv, self.bin))), + (self.mon_npv, self.zero, PowExpression((self.mon_npv, 0))), + (self.mon_npv, self.one, self.mon_npv), + # 4: + (self.mon_npv, self.native, PowExpression((self.mon_npv, 5))), + (self.mon_npv, self.npv, PowExpression((self.mon_npv, self.npv))), + (self.mon_npv, self.param, PowExpression((self.mon_npv, 6))), + ( + self.mon_npv, + self.param_mut, + PowExpression((self.mon_npv, self.param_mut)), + ), + # 8: + (self.mon_npv, self.var, PowExpression((self.mon_npv, self.var))), + ( + self.mon_npv, + self.mon_native, + PowExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mon_npv, + self.mon_param, + PowExpression((self.mon_npv, self.mon_param)), + ), + (self.mon_npv, self.mon_npv, PowExpression((self.mon_npv, self.mon_npv))), + # 12: + (self.mon_npv, self.linear, PowExpression((self.mon_npv, self.linear))), + (self.mon_npv, self.sum, PowExpression((self.mon_npv, self.sum))), + (self.mon_npv, self.other, PowExpression((self.mon_npv, self.other))), + (self.mon_npv, self.mutable_l0, PowExpression((self.mon_npv, 0))), + # 16: + ( + self.mon_npv, + self.mutable_l1, + PowExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mon_npv, + self.mutable_l2, + PowExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mon_npv, self.param0, PowExpression((self.mon_npv, 0))), + (self.mon_npv, self.param1, self.mon_npv), + # 20: + (self.mon_npv, self.mutable_l3, PowExpression((self.mon_npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + (self.linear, self.asbinary, PowExpression((self.linear, self.bin))), + (self.linear, self.zero, PowExpression((self.linear, 0))), + (self.linear, self.one, self.linear), + # 4: + (self.linear, self.native, PowExpression((self.linear, 5))), + (self.linear, self.npv, PowExpression((self.linear, self.npv))), + (self.linear, self.param, PowExpression((self.linear, 6))), + (self.linear, self.param_mut, PowExpression((self.linear, self.param_mut))), + # 8: + (self.linear, self.var, PowExpression((self.linear, self.var))), + ( + self.linear, + self.mon_native, + PowExpression((self.linear, self.mon_native)), + ), + (self.linear, self.mon_param, PowExpression((self.linear, self.mon_param))), + (self.linear, self.mon_npv, PowExpression((self.linear, self.mon_npv))), + # 12: + (self.linear, self.linear, PowExpression((self.linear, self.linear))), + (self.linear, self.sum, PowExpression((self.linear, self.sum))), + (self.linear, self.other, PowExpression((self.linear, self.other))), + (self.linear, self.mutable_l0, PowExpression((self.linear, 0))), + # 16: + (self.linear, self.mutable_l1, PowExpression((self.linear, self.mon_npv))), + ( + self.linear, + self.mutable_l2, + PowExpression((self.linear, self.mutable_l2)), + ), + (self.linear, self.param0, PowExpression((self.linear, 0))), + (self.linear, self.param1, self.linear), + # 20: + (self.linear, self.mutable_l3, PowExpression((self.linear, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, PowExpression((self.sum, self.bin))), + (self.sum, self.zero, PowExpression((self.sum, 0))), + (self.sum, self.one, self.sum), + # 4: + (self.sum, self.native, PowExpression((self.sum, 5))), + (self.sum, self.npv, PowExpression((self.sum, self.npv))), + (self.sum, self.param, PowExpression((self.sum, 6))), + (self.sum, self.param_mut, PowExpression((self.sum, self.param_mut))), + # 8: + (self.sum, self.var, PowExpression((self.sum, self.var))), + (self.sum, self.mon_native, PowExpression((self.sum, self.mon_native))), + (self.sum, self.mon_param, PowExpression((self.sum, self.mon_param))), + (self.sum, self.mon_npv, PowExpression((self.sum, self.mon_npv))), + # 12: + (self.sum, self.linear, PowExpression((self.sum, self.linear))), + (self.sum, self.sum, PowExpression((self.sum, self.sum))), + (self.sum, self.other, PowExpression((self.sum, self.other))), + (self.sum, self.mutable_l0, PowExpression((self.sum, 0))), + # 16: + (self.sum, self.mutable_l1, PowExpression((self.sum, self.mon_npv))), + (self.sum, self.mutable_l2, PowExpression((self.sum, self.mutable_l2))), + (self.sum, self.param0, PowExpression((self.sum, 0))), + (self.sum, self.param1, self.sum), + # 20: + (self.sum, self.mutable_l3, PowExpression((self.sum, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, PowExpression((self.other, self.bin))), + (self.other, self.zero, PowExpression((self.other, 0))), + (self.other, self.one, self.other), + # 4: + (self.other, self.native, PowExpression((self.other, 5))), + (self.other, self.npv, PowExpression((self.other, self.npv))), + (self.other, self.param, PowExpression((self.other, 6))), + (self.other, self.param_mut, PowExpression((self.other, self.param_mut))), + # 8: + (self.other, self.var, PowExpression((self.other, self.var))), + (self.other, self.mon_native, PowExpression((self.other, self.mon_native))), + (self.other, self.mon_param, PowExpression((self.other, self.mon_param))), + (self.other, self.mon_npv, PowExpression((self.other, self.mon_npv))), + # 12: + (self.other, self.linear, PowExpression((self.other, self.linear))), + (self.other, self.sum, PowExpression((self.other, self.sum))), + (self.other, self.other, PowExpression((self.other, self.other))), + (self.other, self.mutable_l0, PowExpression((self.other, 0))), + # 16: + (self.other, self.mutable_l1, PowExpression((self.other, self.mon_npv))), + (self.other, self.mutable_l2, PowExpression((self.other, self.mutable_l2))), + (self.other, self.param0, PowExpression((self.other, 0))), + (self.other, self.param1, self.other), + # 20: + (self.other, self.mutable_l3, PowExpression((self.other, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, PowExpression((0, self.bin))), + (self.mutable_l0, self.zero, 1), + (self.mutable_l0, self.one, 0), + # 4: + (self.mutable_l0, self.native, 0), + (self.mutable_l0, self.npv, NPV_PowExpression((0, self.npv))), + (self.mutable_l0, self.param, 0), + (self.mutable_l0, self.param_mut, NPV_PowExpression((0, self.param_mut))), + # 8: + (self.mutable_l0, self.var, PowExpression((0, self.var))), + (self.mutable_l0, self.mon_native, PowExpression((0, self.mon_native))), + (self.mutable_l0, self.mon_param, PowExpression((0, self.mon_param))), + (self.mutable_l0, self.mon_npv, PowExpression((0, self.mon_npv))), + # 12: + (self.mutable_l0, self.linear, PowExpression((0, self.linear))), + (self.mutable_l0, self.sum, PowExpression((0, self.sum))), + (self.mutable_l0, self.other, PowExpression((0, self.other))), + (self.mutable_l0, self.mutable_l0, 1), + # 16: + (self.mutable_l0, self.mutable_l1, PowExpression((0, self.mon_npv))), + (self.mutable_l0, self.mutable_l2, PowExpression((0, self.mutable_l2))), + (self.mutable_l0, self.param0, 1), + (self.mutable_l0, self.param1, 0), + # 20: + (self.mutable_l0, self.mutable_l3, NPV_PowExpression((0, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + (self.mutable_l1, self.asbinary, PowExpression((self.mon_npv, self.bin))), + (self.mutable_l1, self.zero, PowExpression((self.mon_npv, 0))), + (self.mutable_l1, self.one, self.mon_npv), + # 4: + (self.mutable_l1, self.native, PowExpression((self.mon_npv, 5))), + (self.mutable_l1, self.npv, PowExpression((self.mon_npv, self.npv))), + (self.mutable_l1, self.param, PowExpression((self.mon_npv, 6))), + ( + self.mutable_l1, + self.param_mut, + PowExpression((self.mon_npv, self.param_mut)), + ), + # 8: + (self.mutable_l1, self.var, PowExpression((self.mon_npv, self.var))), + ( + self.mutable_l1, + self.mon_native, + PowExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mutable_l1, + self.mon_param, + PowExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mutable_l1, + self.mon_npv, + PowExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + (self.mutable_l1, self.linear, PowExpression((self.mon_npv, self.linear))), + (self.mutable_l1, self.sum, PowExpression((self.mon_npv, self.sum))), + (self.mutable_l1, self.other, PowExpression((self.mon_npv, self.other))), + (self.mutable_l1, self.mutable_l0, PowExpression((self.mon_npv, 0))), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + PowExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mutable_l1, + self.mutable_l2, + PowExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mutable_l1, self.param0, PowExpression((self.mon_npv, 0))), + (self.mutable_l1, self.param1, self.mon_npv), + # 20: + (self.mutable_l1, self.mutable_l3, PowExpression((self.mon_npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + PowExpression((self.mutable_l2, self.bin)), + ), + (self.mutable_l2, self.zero, PowExpression((self.mutable_l2, 0))), + (self.mutable_l2, self.one, self.mutable_l2), + # 4: + (self.mutable_l2, self.native, PowExpression((self.mutable_l2, 5))), + (self.mutable_l2, self.npv, PowExpression((self.mutable_l2, self.npv))), + (self.mutable_l2, self.param, PowExpression((self.mutable_l2, 6))), + ( + self.mutable_l2, + self.param_mut, + PowExpression((self.mutable_l2, self.param_mut)), + ), + # 8: + (self.mutable_l2, self.var, PowExpression((self.mutable_l2, self.var))), + ( + self.mutable_l2, + self.mon_native, + PowExpression((self.mutable_l2, self.mon_native)), + ), + ( + self.mutable_l2, + self.mon_param, + PowExpression((self.mutable_l2, self.mon_param)), + ), + ( + self.mutable_l2, + self.mon_npv, + PowExpression((self.mutable_l2, self.mon_npv)), + ), + # 12: + ( + self.mutable_l2, + self.linear, + PowExpression((self.mutable_l2, self.linear)), + ), + (self.mutable_l2, self.sum, PowExpression((self.mutable_l2, self.sum))), + (self.mutable_l2, self.other, PowExpression((self.mutable_l2, self.other))), + (self.mutable_l2, self.mutable_l0, PowExpression((self.mutable_l2, 0))), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + PowExpression((self.mutable_l2, self.mon_npv)), + ), + ( + self.mutable_l2, + self.mutable_l2, + PowExpression((self.mutable_l2, self.mutable_l2)), + ), + (self.mutable_l2, self.param0, PowExpression((self.mutable_l2, 0))), + (self.mutable_l2, self.param1, self.mutable_l2), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + PowExpression((self.mutable_l2, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, PowExpression((0, self.bin))), + (self.param0, self.zero, 1), + (self.param0, self.one, 0), + # 4: + (self.param0, self.native, 0), + (self.param0, self.npv, NPV_PowExpression((0, self.npv))), + (self.param0, self.param, 0), + (self.param0, self.param_mut, NPV_PowExpression((0, self.param_mut))), + # 8: + (self.param0, self.var, PowExpression((0, self.var))), + (self.param0, self.mon_native, PowExpression((0, self.mon_native))), + (self.param0, self.mon_param, PowExpression((0, self.mon_param))), + (self.param0, self.mon_npv, PowExpression((0, self.mon_npv))), + # 12: + (self.param0, self.linear, PowExpression((0, self.linear))), + (self.param0, self.sum, PowExpression((0, self.sum))), + (self.param0, self.other, PowExpression((0, self.other))), + (self.param0, self.mutable_l0, 1), + # 16: + (self.param0, self.mutable_l1, PowExpression((0, self.mon_npv))), + (self.param0, self.mutable_l2, PowExpression((0, self.mutable_l2))), + (self.param0, self.param0, 1), + (self.param0, self.param1, 0), + # 20: + (self.param0, self.mutable_l3, NPV_PowExpression((0, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, PowExpression((1, self.bin))), + (self.param1, self.zero, 1), + (self.param1, self.one, 1), + # 4: + (self.param1, self.native, 1), + (self.param1, self.npv, NPV_PowExpression((1, self.npv))), + (self.param1, self.param, 1), + (self.param1, self.param_mut, NPV_PowExpression((1, self.param_mut))), + # 8: + (self.param1, self.var, PowExpression((1, self.var))), + (self.param1, self.mon_native, PowExpression((1, self.mon_native))), + (self.param1, self.mon_param, PowExpression((1, self.mon_param))), + (self.param1, self.mon_npv, PowExpression((1, self.mon_npv))), + # 12: + (self.param1, self.linear, PowExpression((1, self.linear))), + (self.param1, self.sum, PowExpression((1, self.sum))), + (self.param1, self.other, PowExpression((1, self.other))), + (self.param1, self.mutable_l0, 1), + # 16: + (self.param1, self.mutable_l1, PowExpression((1, self.mon_npv))), + (self.param1, self.mutable_l2, PowExpression((1, self.mutable_l2))), + (self.param1, self.param0, 1), + (self.param1, self.param1, 1), + # 20: + (self.param1, self.mutable_l3, NPV_PowExpression((1, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + (self.mutable_l3, self.asbinary, PowExpression((self.npv, self.bin))), + (self.mutable_l3, self.zero, NPV_PowExpression((self.npv, 0))), + (self.mutable_l3, self.one, self.npv), + # 4: + (self.mutable_l3, self.native, NPV_PowExpression((self.npv, 5))), + (self.mutable_l3, self.npv, NPV_PowExpression((self.npv, self.npv))), + (self.mutable_l3, self.param, NPV_PowExpression((self.npv, 6))), + ( + self.mutable_l3, + self.param_mut, + NPV_PowExpression((self.npv, self.param_mut)), + ), + # 8: + (self.mutable_l3, self.var, PowExpression((self.npv, self.var))), + ( + self.mutable_l3, + self.mon_native, + PowExpression((self.npv, self.mon_native)), + ), + ( + self.mutable_l3, + self.mon_param, + PowExpression((self.npv, self.mon_param)), + ), + (self.mutable_l3, self.mon_npv, PowExpression((self.npv, self.mon_npv))), + # 12: + (self.mutable_l3, self.linear, PowExpression((self.npv, self.linear))), + (self.mutable_l3, self.sum, PowExpression((self.npv, self.sum))), + (self.mutable_l3, self.other, PowExpression((self.npv, self.other))), + (self.mutable_l3, self.mutable_l0, NPV_PowExpression((self.npv, 0))), + # 16: + (self.mutable_l3, self.mutable_l1, PowExpression((self.npv, self.mon_npv))), + ( + self.mutable_l3, + self.mutable_l2, + PowExpression((self.npv, self.mutable_l2)), + ), + (self.mutable_l3, self.param0, NPV_PowExpression((self.npv, 0))), + (self.mutable_l3, self.param1, self.npv), + # 20: + (self.mutable_l3, self.mutable_l3, NPV_PowExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + # + # + # NEGATION + # + # + + def test_neg(self): + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, MonomialTermExpression((-1, self.bin))), + (self.zero, 0), + (self.one, -1), + # 4: + (self.native, -5), + (self.npv, NPV_NegationExpression((self.npv,))), + (self.param, -6), + (self.param_mut, NPV_NegationExpression((self.param_mut,))), + # 8: + (self.var, MonomialTermExpression((-1, self.var))), + (self.mon_native, self.minus_mon_native), + (self.mon_param, self.minus_mon_param), + (self.mon_npv, self.minus_mon_npv), + # 12: + (self.linear, NegationExpression((self.linear,))), + (self.sum, NegationExpression((self.sum,))), + (self.other, NegationExpression((self.other,))), + (self.mutable_l0, 0), + # 16: + (self.mutable_l1, self.minus_mon_npv), + (self.mutable_l2, NegationExpression((self.mutable_l2,))), + (self.param0, 0), + (self.param1, -1), + # 20: + (self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.neg) + + def test_neg_neg(self): + def _neg_neg(x): + return operator.neg(operator.neg(x)) + + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, MonomialTermExpression((1, self.bin))), + (self.zero, 0), + (self.one, 1), + # 4: + (self.native, 5), + (self.npv, self.npv), + (self.param, 6), + (self.param_mut, self.param_mut), + # 8: + (self.var, MonomialTermExpression((1, self.var))), + (self.mon_native, self.mon_native), + (self.mon_param, self.mon_param), + (self.mon_npv, self.mon_npv), + # 12: + (self.linear, self.linear), + (self.sum, self.sum), + (self.other, self.other), + (self.mutable_l0, 0), + # 16: + (self.mutable_l1, self.mon_npv), + (self.mutable_l2, self.mutable_l2), + (self.param0, 0), + (self.param1, 1), + # 20: + (self.mutable_l3, self.npv), + ] + self._run_cases(tests, _neg_neg) + + # + # + # ABSOLUTE VALUE + # + # + + def test_abs(self): + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, AbsExpression((self.bin,))), + (self.zero, 0), + (self.one, 1), + # 4: + (self.native, 5), + (self.npv, NPV_AbsExpression((self.npv,))), + (self.param, 6), + (self.param_mut, NPV_AbsExpression((self.param_mut,))), + # 8: + (self.var, AbsExpression((self.var,))), + (self.mon_native, AbsExpression((self.mon_native,))), + (self.mon_param, AbsExpression((self.mon_param,))), + (self.mon_npv, AbsExpression((self.mon_npv,))), + # 12: + (self.linear, AbsExpression((self.linear,))), + (self.sum, AbsExpression((self.sum,))), + (self.other, AbsExpression((self.other,))), + (self.mutable_l0, 0), + # 16: + (self.mutable_l1, AbsExpression((self.mon_npv,))), + (self.mutable_l2, AbsExpression((self.mutable_l2,))), + (self.param0, 0), + (self.param1, 1), + # 20: + (self.mutable_l3, NPV_AbsExpression((self.npv,))), + ] + self._run_cases(tests, operator.abs) + + # + # + # UNARY FUNCTION + # + # + + def test_unary(self): + SKIP_0 = {'log', 'log10', 'acosh'} + SKIP_1 = {'atanh'} + SKIP_5 = {'asin', 'acos', 'atanh'} + SKIP_6 = SKIP_5 + for op, name, fcn in [ + (EXPR.ceil, 'ceil', math.ceil), + (EXPR.floor, 'floor', math.floor), + (EXPR.exp, 'exp', math.exp), + (EXPR.log, 'log', math.log), + (EXPR.log10, 'log10', math.log10), + (EXPR.sqrt, 'sqrt', math.sqrt), + (EXPR.sin, 'sin', math.sin), + (EXPR.cos, 'cos', math.cos), + (EXPR.tan, 'tan', math.tan), + (EXPR.asin, 'asin', math.asin), + (EXPR.acos, 'acos', math.acos), + (EXPR.atan, 'atan', math.atan), + (EXPR.sinh, 'sinh', math.sinh), + (EXPR.cosh, 'cosh', math.cosh), + (EXPR.tanh, 'tanh', math.tanh), + (EXPR.asinh, 'asinh', math.asinh), + (EXPR.acosh, 'acosh', math.acosh), + (EXPR.atanh, 'atanh', math.atanh), + ]: + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, UnaryFunctionExpression((self.bin,), name, fcn)), + (self.zero, ValueError if name in SKIP_0 else fcn(0)), + (self.one, ValueError if name in SKIP_1 else fcn(1)), + # 4: + (self.native, ValueError if name in SKIP_5 else fcn(5)), + (self.npv, NPV_UnaryFunctionExpression((self.npv,), name, fcn)), + (self.param, ValueError if name in SKIP_6 else fcn(6)), + ( + self.param_mut, + NPV_UnaryFunctionExpression((self.param_mut,), name, fcn), + ), + # 8: + (self.var, UnaryFunctionExpression((self.var,), name, fcn)), + ( + self.mon_native, + UnaryFunctionExpression((self.mon_native,), name, fcn), + ), + (self.mon_param, UnaryFunctionExpression((self.mon_param,), name, fcn)), + (self.mon_npv, UnaryFunctionExpression((self.mon_npv,), name, fcn)), + # 12: + (self.linear, UnaryFunctionExpression((self.linear,), name, fcn)), + (self.sum, UnaryFunctionExpression((self.sum,), name, fcn)), + (self.other, UnaryFunctionExpression((self.other,), name, fcn)), + (self.mutable_l0, ValueError if name in SKIP_0 else fcn(0)), + # 16: + (self.mutable_l1, UnaryFunctionExpression((self.mon_npv,), name, fcn)), + ( + self.mutable_l2, + UnaryFunctionExpression((self.mutable_l2,), name, fcn), + ), + (self.param0, ValueError if name in SKIP_0 else fcn(0)), + (self.param1, ValueError if name in SKIP_1 else fcn(1)), + # 20: + (self.mutable_l3, NPV_UnaryFunctionExpression((self.npv,), name, fcn)), + ] + self._run_cases(tests, op) + + # + # + # MUTABLE SUM IADD EXPRESSIONS + # + # + + def test_mutable_nvp_iadd(self): + mutable_npv = _MutableNPVSumExpression([]) + tests = [ + (mutable_npv, self.invalid, NotImplemented), + (mutable_npv, self.asbinary, _MutableLinearExpression([self.mon_bin])), + (mutable_npv, self.zero, _MutableNPVSumExpression([])), + (mutable_npv, self.one, _MutableNPVSumExpression([1])), + # 4: + (mutable_npv, self.native, _MutableNPVSumExpression([5])), + (mutable_npv, self.npv, _MutableNPVSumExpression([self.npv])), + (mutable_npv, self.param, _MutableNPVSumExpression([6])), + (mutable_npv, self.param_mut, _MutableNPVSumExpression([self.param_mut])), + # 8: + (mutable_npv, self.var, _MutableLinearExpression([self.mon_var])), + (mutable_npv, self.mon_native, _MutableLinearExpression([self.mon_native])), + (mutable_npv, self.mon_param, _MutableLinearExpression([self.mon_param])), + (mutable_npv, self.mon_npv, _MutableLinearExpression([self.mon_npv])), + # 12: + (mutable_npv, self.linear, _MutableLinearExpression(self.linear.args)), + (mutable_npv, self.sum, _MutableSumExpression(self.sum.args)), + (mutable_npv, self.other, _MutableSumExpression([self.other])), + (mutable_npv, self.mutable_l0, _MutableNPVSumExpression([])), + # 16: + ( + mutable_npv, + self.mutable_l1, + _MutableLinearExpression(self.mutable_l1.args), + ), + (mutable_npv, self.mutable_l2, _MutableSumExpression(self.mutable_l2.args)), + (mutable_npv, self.param0, _MutableNPVSumExpression([])), + (mutable_npv, self.param1, _MutableNPVSumExpression([1])), + # 20: + (mutable_npv, self.mutable_l3, _MutableNPVSumExpression([self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + mutable_npv = _MutableNPVSumExpression([10]) + tests = [ + (mutable_npv, self.invalid, NotImplemented), + (mutable_npv, self.asbinary, _MutableLinearExpression([10, self.mon_bin])), + (mutable_npv, self.zero, _MutableNPVSumExpression([10])), + (mutable_npv, self.one, _MutableNPVSumExpression([10, 1])), + # 4: + (mutable_npv, self.native, _MutableNPVSumExpression([10, 5])), + (mutable_npv, self.npv, _MutableNPVSumExpression([10, self.npv])), + (mutable_npv, self.param, _MutableNPVSumExpression([10, 6])), + ( + mutable_npv, + self.param_mut, + _MutableNPVSumExpression([10, self.param_mut]), + ), + # 8: + (mutable_npv, self.var, _MutableLinearExpression([10, self.mon_var])), + ( + mutable_npv, + self.mon_native, + _MutableLinearExpression([10, self.mon_native]), + ), + ( + mutable_npv, + self.mon_param, + _MutableLinearExpression([10, self.mon_param]), + ), + (mutable_npv, self.mon_npv, _MutableLinearExpression([10, self.mon_npv])), + # 12: + ( + mutable_npv, + self.linear, + _MutableLinearExpression([10] + self.linear.args), + ), + (mutable_npv, self.sum, _MutableSumExpression([10] + self.sum.args)), + (mutable_npv, self.other, _MutableSumExpression([10, self.other])), + (mutable_npv, self.mutable_l0, _MutableNPVSumExpression([10])), + # 16: + ( + mutable_npv, + self.mutable_l1, + _MutableLinearExpression([10] + self.mutable_l1.args), + ), + ( + mutable_npv, + self.mutable_l2, + _MutableSumExpression([10] + self.mutable_l2.args), + ), + (mutable_npv, self.param0, _MutableNPVSumExpression([10])), + (mutable_npv, self.param1, _MutableNPVSumExpression([10, 1])), + # 20: + (mutable_npv, self.mutable_l3, _MutableNPVSumExpression([10, self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + def test_mutable_lin_iadd(self): + mutable_lin = _MutableLinearExpression([]) + tests = [ + (mutable_lin, self.invalid, NotImplemented), + (mutable_lin, self.asbinary, _MutableLinearExpression([self.mon_bin])), + (mutable_lin, self.zero, _MutableLinearExpression([])), + (mutable_lin, self.one, _MutableLinearExpression([1])), + # 4: + (mutable_lin, self.native, _MutableLinearExpression([5])), + (mutable_lin, self.npv, _MutableLinearExpression([self.npv])), + (mutable_lin, self.param, _MutableLinearExpression([6])), + (mutable_lin, self.param_mut, _MutableLinearExpression([self.param_mut])), + # 8: + (mutable_lin, self.var, _MutableLinearExpression([self.mon_var])), + (mutable_lin, self.mon_native, _MutableLinearExpression([self.mon_native])), + (mutable_lin, self.mon_param, _MutableLinearExpression([self.mon_param])), + (mutable_lin, self.mon_npv, _MutableLinearExpression([self.mon_npv])), + # 12: + (mutable_lin, self.linear, _MutableLinearExpression(self.linear.args)), + (mutable_lin, self.sum, _MutableSumExpression(self.sum.args)), + (mutable_lin, self.other, _MutableSumExpression([self.other])), + (mutable_lin, self.mutable_l0, _MutableLinearExpression([])), + # 16: + ( + mutable_lin, + self.mutable_l1, + _MutableLinearExpression(self.mutable_l1.args), + ), + (mutable_lin, self.mutable_l2, _MutableSumExpression(self.mutable_l2.args)), + (mutable_lin, self.param0, _MutableLinearExpression([])), + (mutable_lin, self.param1, _MutableLinearExpression([1])), + # 20: + (mutable_lin, self.mutable_l3, _MutableLinearExpression([self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + mutable_lin = _MutableLinearExpression([self.mon_bin]) + tests = [ + (mutable_lin, self.invalid, NotImplemented), + ( + mutable_lin, + self.asbinary, + _MutableLinearExpression([self.mon_bin, self.mon_bin]), + ), + (mutable_lin, self.zero, _MutableLinearExpression([self.mon_bin])), + (mutable_lin, self.one, _MutableLinearExpression([self.mon_bin, 1])), + # 4: + (mutable_lin, self.native, _MutableLinearExpression([self.mon_bin, 5])), + (mutable_lin, self.npv, _MutableLinearExpression([self.mon_bin, self.npv])), + (mutable_lin, self.param, _MutableLinearExpression([self.mon_bin, 6])), + ( + mutable_lin, + self.param_mut, + _MutableLinearExpression([self.mon_bin, self.param_mut]), + ), + # 8: + ( + mutable_lin, + self.var, + _MutableLinearExpression([self.mon_bin, self.mon_var]), + ), + ( + mutable_lin, + self.mon_native, + _MutableLinearExpression([self.mon_bin, self.mon_native]), + ), + ( + mutable_lin, + self.mon_param, + _MutableLinearExpression([self.mon_bin, self.mon_param]), + ), + ( + mutable_lin, + self.mon_npv, + _MutableLinearExpression([self.mon_bin, self.mon_npv]), + ), + # 12: + ( + mutable_lin, + self.linear, + _MutableLinearExpression([self.mon_bin] + self.linear.args), + ), + ( + mutable_lin, + self.sum, + _MutableSumExpression([self.mon_bin] + self.sum.args), + ), + ( + mutable_lin, + self.other, + _MutableSumExpression([self.mon_bin, self.other]), + ), + (mutable_lin, self.mutable_l0, _MutableLinearExpression([self.mon_bin])), + # 16: + ( + mutable_lin, + self.mutable_l1, + _MutableLinearExpression([self.mon_bin] + self.mutable_l1.args), + ), + ( + mutable_lin, + self.mutable_l2, + _MutableSumExpression([self.mon_bin] + self.mutable_l2.args), + ), + (mutable_lin, self.param0, _MutableLinearExpression([self.mon_bin])), + (mutable_lin, self.param1, _MutableLinearExpression([self.mon_bin, 1])), + # 20: + ( + mutable_lin, + self.mutable_l3, + _MutableLinearExpression([self.mon_bin, self.npv]), + ), + ] + self._run_iadd_cases(tests, operator.iadd) + + def test_mutable_sum_iadd(self): + mutable_sum = _MutableSumExpression([]) + tests = [ + (mutable_sum, self.invalid, NotImplemented), + (mutable_sum, self.asbinary, _MutableSumExpression([self.bin])), + (mutable_sum, self.zero, _MutableSumExpression([])), + (mutable_sum, self.one, _MutableSumExpression([1])), + # 4: + (mutable_sum, self.native, _MutableSumExpression([5])), + (mutable_sum, self.npv, _MutableSumExpression([self.npv])), + (mutable_sum, self.param, _MutableSumExpression([6])), + (mutable_sum, self.param_mut, _MutableSumExpression([self.param_mut])), + # 8: + (mutable_sum, self.var, _MutableSumExpression([self.var])), + (mutable_sum, self.mon_native, _MutableSumExpression([self.mon_native])), + (mutable_sum, self.mon_param, _MutableSumExpression([self.mon_param])), + (mutable_sum, self.mon_npv, _MutableSumExpression([self.mon_npv])), + # 12: + (mutable_sum, self.linear, _MutableSumExpression([self.linear])), + (mutable_sum, self.sum, _MutableSumExpression(self.sum.args)), + (mutable_sum, self.other, _MutableSumExpression([self.other])), + (mutable_sum, self.mutable_l0, _MutableSumExpression([])), + # 16: + (mutable_sum, self.mutable_l1, _MutableSumExpression(self.mutable_l1.args)), + (mutable_sum, self.mutable_l2, _MutableSumExpression(self.mutable_l2.args)), + (mutable_sum, self.param0, _MutableSumExpression([])), + (mutable_sum, self.param1, _MutableSumExpression([1])), + # 20: + (mutable_sum, self.mutable_l3, _MutableSumExpression([self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + mutable_sum = _MutableSumExpression([self.other]) + tests = [ + (mutable_sum, self.invalid, NotImplemented), + (mutable_sum, self.asbinary, _MutableSumExpression([self.other, self.bin])), + (mutable_sum, self.zero, _MutableSumExpression([self.other])), + (mutable_sum, self.one, _MutableSumExpression([self.other, 1])), + # 4: + (mutable_sum, self.native, _MutableSumExpression([self.other, 5])), + (mutable_sum, self.npv, _MutableSumExpression([self.other, self.npv])), + (mutable_sum, self.param, _MutableSumExpression([self.other, 6])), + ( + mutable_sum, + self.param_mut, + _MutableSumExpression([self.other, self.param_mut]), + ), + # 8: + (mutable_sum, self.var, _MutableSumExpression([self.other, self.var])), + ( + mutable_sum, + self.mon_native, + _MutableSumExpression([self.other, self.mon_native]), + ), + ( + mutable_sum, + self.mon_param, + _MutableSumExpression([self.other, self.mon_param]), + ), + ( + mutable_sum, + self.mon_npv, + _MutableSumExpression([self.other, self.mon_npv]), + ), + # 12: + ( + mutable_sum, + self.linear, + _MutableSumExpression([self.other, self.linear]), + ), + ( + mutable_sum, + self.sum, + _MutableSumExpression([self.other] + self.sum.args), + ), + (mutable_sum, self.other, _MutableSumExpression([self.other, self.other])), + (mutable_sum, self.mutable_l0, _MutableSumExpression([self.other])), + # 16: + ( + mutable_sum, + self.mutable_l1, + _MutableSumExpression([self.other] + self.mutable_l1.args), + ), + ( + mutable_sum, + self.mutable_l2, + _MutableSumExpression([self.other] + self.mutable_l2.args), + ), + (mutable_sum, self.param0, _MutableSumExpression([self.other])), + (mutable_sum, self.param1, _MutableSumExpression([self.other, 1])), + # 20: + ( + mutable_sum, + self.mutable_l3, + _MutableSumExpression([self.other, self.npv]), + ), + ] + self._run_iadd_cases(tests, operator.iadd) + + +class TestExpressionGeneration_Misc(Base, unittest.TestCase): + def test_enable_optimizations(self): + enable_expression_optimizations(zero=False, one=False) + self.assertEqual(_zero_one_optimizations, set()) + + enable_expression_optimizations(zero=True, one=False) + self.assertEqual(_zero_one_optimizations, {0}) + + enable_expression_optimizations(zero=True, one=True) + self.assertEqual(_zero_one_optimizations, {0, 1}) + + enable_expression_optimizations(zero=False, one=True) + self.assertEqual(_zero_one_optimizations, {1}) + + enable_expression_optimizations() + self.assertEqual(_zero_one_optimizations, {1}) + + def test_unreachable_dispatchers(self): + # The following dispatchers cannot be triggered through the + # normal NumericValue operator overloads with regular Pyomo + # classes. We will declare a "fake" type that as all the + # algebraic operator overloads, but reports itself as a + # "non-numeric type" + class CustomAsNumeric(NumericValue): + def __init__(self, val): + self._value = val + + def is_numeric_type(self): + return False + + def as_numeric(self): + return self._value + + obj = CustomAsNumeric(self.var) + e = abs(obj) + assertExpressionsEqual(self, AbsExpression((self.var,)), e) + + e = obj**2 + assertExpressionsEqual(self, PowExpression((self.var, 2)), e) + + e = obj + obj + assertExpressionsEqual(self, LinearExpression((self.mon_var, self.mon_var)), e) + + def test_categorize_arg_type(self): + class CustomAsNumeric(NumericValue): + def __init__(self, val): + self._value = val + + def is_numeric_type(self): + return False + + def as_numeric(self): + return self._value + + class CustomNumeric(NumericValue): + def is_potentially_variable(self): + return True + + saved_known_arg_types = dict(_known_arg_types) + saved_native_numeric_types = set(native_numeric_types) + saved_native_types = set(native_types) + try: + _known_arg_types.clear() + native_types.discard(int) + native_numeric_types.discard(int) + self.assertIs(_categorize_arg_type(10), ARG_TYPE.NATIVE) + self.assertIn(int, _known_arg_types) + self.assertIn(int, native_numeric_types) + # This checks against the native_numeric_types cache... + _known_arg_types.clear() + self.assertIs(_categorize_arg_type(10), ARG_TYPE.NATIVE) + # This checks against the _known_arg_types cache... + self.assertIs(_categorize_arg_type(10), ARG_TYPE.NATIVE) + + self.assertIs(_categorize_arg_type(self.invalid), ARG_TYPE.INVALID) + self.assertIs(_categorize_arg_type(self.asbinary), ARG_TYPE.ASNUMERIC) + self.assertIs(_categorize_arg_type(self.zero), ARG_TYPE.NATIVE) + self.assertIs(_categorize_arg_type(self.one), ARG_TYPE.NATIVE) + self.assertIs(_categorize_arg_type(self.native), ARG_TYPE.NATIVE) + self.assertIs(_categorize_arg_type(self.npv), ARG_TYPE.NPV) + self.assertIs(_categorize_arg_type(self.param), ARG_TYPE.PARAM) + self.assertIs(_categorize_arg_type(self.param0), ARG_TYPE.PARAM) + self.assertIs(_categorize_arg_type(self.param1), ARG_TYPE.PARAM) + self.assertIs(_categorize_arg_type(self.param_mut), ARG_TYPE.PARAM) + self.assertIs(_categorize_arg_type(self.var), ARG_TYPE.VAR) + self.assertIs(_categorize_arg_type(self.mon_native), ARG_TYPE.MONOMIAL) + self.assertIs(_categorize_arg_type(self.mon_param), ARG_TYPE.MONOMIAL) + self.assertIs(_categorize_arg_type(self.mon_npv), ARG_TYPE.MONOMIAL) + self.assertIs(_categorize_arg_type(self.linear), ARG_TYPE.LINEAR) + self.assertIs(_categorize_arg_type(self.sum), ARG_TYPE.SUM) + self.assertIs(_categorize_arg_type(self.other), ARG_TYPE.OTHER) + self.assertIs(_categorize_arg_type(self.mutable_l0), ARG_TYPE.MUTABLE) + self.assertIs(_categorize_arg_type(self.mutable_l1), ARG_TYPE.MUTABLE) + self.assertIs(_categorize_arg_type(self.mutable_l2), ARG_TYPE.MUTABLE) + self.assertIs(_categorize_arg_type(self.mutable_l3), ARG_TYPE.MUTABLE) + + # Other interesting tests (partially for coverage) + self.assertIs(_categorize_arg_type(self.m), ARG_TYPE.INVALID) + self.assertIs(_categorize_arg_type(CustomAsNumeric(10)), ARG_TYPE.ASNUMERIC) + self.assertIs(_categorize_arg_type(CustomNumeric()), ARG_TYPE.OTHER) + + finally: + _known_arg_types.clear() + _known_arg_types.update(saved_known_arg_types) + native_numeric_types.clear() + native_numeric_types.update(saved_native_numeric_types) + native_types.clear() + native_types.update(saved_native_types) diff --git a/pyomo/core/tests/unit/test_numeric_expr_zerofilter.py b/pyomo/core/tests/unit/test_numeric_expr_zerofilter.py new file mode 100644 index 00000000000..3000f644e80 --- /dev/null +++ b/pyomo/core/tests/unit/test_numeric_expr_zerofilter.py @@ -0,0 +1,6341 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# +# Unit Tests for expression generation +# +import math +import operator + +import pyomo.common.unittest as unittest + +from pyomo.core.expr.compare import assertExpressionsEqual +import pyomo.core.expr as EXPR +from pyomo.core.expr import ( + DivisionExpression, + NPV_DivisionExpression, + SumExpression, + NPV_SumExpression, + LinearExpression, + MonomialTermExpression, + NegationExpression, + NPV_NegationExpression, + ProductExpression, + NPV_ProductExpression, + PowExpression, + NPV_PowExpression, + AbsExpression, + NPV_AbsExpression, + UnaryFunctionExpression, + NPV_UnaryFunctionExpression, +) +from pyomo.core.expr.numeric_expr import ( + ARG_TYPE, + _categorize_arg_type, + _known_arg_types, + _MutableSumExpression, + _MutableLinearExpression, + _MutableNPVSumExpression, + enable_expression_optimizations, +) +from pyomo.core.expr.numvalue import NumericValue, native_types, native_numeric_types + +from .test_numeric_expr_dispatcher import Base + + +class TestExpressionGeneration_ZeroFilter(Base, unittest.TestCase): + def setUp(self): + super().setUp() + enable_expression_optimizations(zero=True, one=True) + + # + # + # ADDITION + # + # + + def test_add_invalid(self): + tests = [ + # "invalid(str) + invalid(str)" is a legitimate Python + # operation and should never hit the Pyomo expression + # system + (self.invalid, self.invalid, self.SKIP), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support addition + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, self.bin), + (self.asbinary, self.one, LinearExpression([self.mon_bin, 1])), + # 4: + (self.asbinary, self.native, LinearExpression([self.mon_bin, 5])), + (self.asbinary, self.npv, LinearExpression([self.mon_bin, self.npv])), + (self.asbinary, self.param, LinearExpression([self.mon_bin, 6])), + ( + self.asbinary, + self.param_mut, + LinearExpression([self.mon_bin, self.param_mut]), + ), + # 8: + (self.asbinary, self.var, LinearExpression([self.mon_bin, self.mon_var])), + ( + self.asbinary, + self.mon_native, + LinearExpression([self.mon_bin, self.mon_native]), + ), + ( + self.asbinary, + self.mon_param, + LinearExpression([self.mon_bin, self.mon_param]), + ), + ( + self.asbinary, + self.mon_npv, + LinearExpression([self.mon_bin, self.mon_npv]), + ), + # 12: + ( + self.asbinary, + self.linear, + LinearExpression(self.linear.args + [self.mon_bin]), + ), + (self.asbinary, self.sum, SumExpression(self.sum.args + [self.bin])), + (self.asbinary, self.other, SumExpression([self.bin, self.other])), + (self.asbinary, self.mutable_l0, self.bin), + # 16: + ( + self.asbinary, + self.mutable_l1, + LinearExpression([self.mon_bin, self.mon_npv]), + ), + ( + self.asbinary, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.bin]), + ), + (self.asbinary, self.param0, self.bin), + (self.asbinary, self.param1, LinearExpression([self.mon_bin, 1])), + # 20: + ( + self.asbinary, + self.mutable_l3, + LinearExpression([self.mon_bin, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, self.bin), + (self.zero, self.zero, 0), + (self.zero, self.one, 1), + # 4: + (self.zero, self.native, 5), + (self.zero, self.npv, self.npv), + (self.zero, self.param, 6), + (self.zero, self.param_mut, self.param_mut), + # 8: + (self.zero, self.var, self.var), + (self.zero, self.mon_native, self.mon_native), + (self.zero, self.mon_param, self.mon_param), + (self.zero, self.mon_npv, self.mon_npv), + # 12: + (self.zero, self.linear, self.linear), + (self.zero, self.sum, self.sum), + (self.zero, self.other, self.other), + (self.zero, self.mutable_l0, 0), + # 16: + (self.zero, self.mutable_l1, self.mon_npv), + (self.zero, self.mutable_l2, self.mutable_l2), + (self.zero, self.param0, 0), + (self.zero, self.param1, 1), + # 20: + (self.zero, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, LinearExpression([1, self.mon_bin])), + (self.one, self.zero, 1), + (self.one, self.one, 2), + # 4: + (self.one, self.native, 6), + (self.one, self.npv, NPV_SumExpression([1, self.npv])), + (self.one, self.param, 7), + (self.one, self.param_mut, NPV_SumExpression([1, self.param_mut])), + # 8: + (self.one, self.var, LinearExpression([1, self.mon_var])), + (self.one, self.mon_native, LinearExpression([1, self.mon_native])), + (self.one, self.mon_param, LinearExpression([1, self.mon_param])), + (self.one, self.mon_npv, LinearExpression([1, self.mon_npv])), + # 12: + (self.one, self.linear, LinearExpression(self.linear.args + [1])), + (self.one, self.sum, SumExpression(self.sum.args + [1])), + (self.one, self.other, SumExpression([1, self.other])), + (self.one, self.mutable_l0, 1), + # 16: + (self.one, self.mutable_l1, LinearExpression([1] + self.mutable_l1.args)), + (self.one, self.mutable_l2, SumExpression(self.mutable_l2.args + [1])), + (self.one, self.param0, 1), + (self.one, self.param1, 2), + # 20: + (self.one, self.mutable_l3, NPV_SumExpression([1, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, LinearExpression([5, self.mon_bin])), + (self.native, self.zero, 5), + (self.native, self.one, 6), + # 4: + (self.native, self.native, 10), + (self.native, self.npv, NPV_SumExpression([5, self.npv])), + (self.native, self.param, 11), + (self.native, self.param_mut, NPV_SumExpression([5, self.param_mut])), + # 8: + (self.native, self.var, LinearExpression([5, self.mon_var])), + (self.native, self.mon_native, LinearExpression([5, self.mon_native])), + (self.native, self.mon_param, LinearExpression([5, self.mon_param])), + (self.native, self.mon_npv, LinearExpression([5, self.mon_npv])), + # 12: + (self.native, self.linear, LinearExpression(self.linear.args + [5])), + (self.native, self.sum, SumExpression(self.sum.args + [5])), + (self.native, self.other, SumExpression([5, self.other])), + (self.native, self.mutable_l0, 5), + # 16: + ( + self.native, + self.mutable_l1, + LinearExpression([5] + self.mutable_l1.args), + ), + (self.native, self.mutable_l2, SumExpression(self.mutable_l2.args + [5])), + (self.native, self.param0, 5), + (self.native, self.param1, 6), + # 20: + (self.native, self.mutable_l3, NPV_SumExpression([5, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, LinearExpression([self.npv, self.mon_bin])), + (self.npv, self.zero, self.npv), + (self.npv, self.one, NPV_SumExpression([self.npv, 1])), + # 4: + (self.npv, self.native, NPV_SumExpression([self.npv, 5])), + (self.npv, self.npv, NPV_SumExpression([self.npv, self.npv])), + (self.npv, self.param, NPV_SumExpression([self.npv, 6])), + (self.npv, self.param_mut, NPV_SumExpression([self.npv, self.param_mut])), + # 8: + (self.npv, self.var, LinearExpression([self.npv, self.mon_var])), + (self.npv, self.mon_native, LinearExpression([self.npv, self.mon_native])), + (self.npv, self.mon_param, LinearExpression([self.npv, self.mon_param])), + (self.npv, self.mon_npv, LinearExpression([self.npv, self.mon_npv])), + # 12: + (self.npv, self.linear, LinearExpression(self.linear.args + [self.npv])), + (self.npv, self.sum, SumExpression(self.sum.args + [self.npv])), + (self.npv, self.other, SumExpression([self.npv, self.other])), + (self.npv, self.mutable_l0, self.npv), + # 16: + ( + self.npv, + self.mutable_l1, + LinearExpression([self.npv] + self.mutable_l1.args), + ), + ( + self.npv, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.npv]), + ), + (self.npv, self.param0, self.npv), + (self.npv, self.param1, NPV_SumExpression([self.npv, 1])), + # 20: + (self.npv, self.mutable_l3, NPV_SumExpression([self.npv, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, LinearExpression([6, self.mon_bin])), + (self.param, self.zero, 6), + (self.param, self.one, 7), + # 4: + (self.param, self.native, 11), + (self.param, self.npv, NPV_SumExpression([6, self.npv])), + (self.param, self.param, 12), + (self.param, self.param_mut, NPV_SumExpression([6, self.param_mut])), + # 8: + (self.param, self.var, LinearExpression([6, self.mon_var])), + (self.param, self.mon_native, LinearExpression([6, self.mon_native])), + (self.param, self.mon_param, LinearExpression([6, self.mon_param])), + (self.param, self.mon_npv, LinearExpression([6, self.mon_npv])), + # 12: + (self.param, self.linear, LinearExpression(self.linear.args + [6])), + (self.param, self.sum, SumExpression(self.sum.args + [6])), + (self.param, self.other, SumExpression([6, self.other])), + (self.param, self.mutable_l0, 6), + # 16: + (self.param, self.mutable_l1, LinearExpression([6] + self.mutable_l1.args)), + (self.param, self.mutable_l2, SumExpression(self.mutable_l2.args + [6])), + (self.param, self.param0, 6), + (self.param, self.param1, 7), + # 20: + (self.param, self.mutable_l3, NPV_SumExpression([6, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + LinearExpression([self.param_mut, self.mon_bin]), + ), + (self.param_mut, self.zero, self.param_mut), + (self.param_mut, self.one, NPV_SumExpression([self.param_mut, 1])), + # 4: + (self.param_mut, self.native, NPV_SumExpression([self.param_mut, 5])), + (self.param_mut, self.npv, NPV_SumExpression([self.param_mut, self.npv])), + (self.param_mut, self.param, NPV_SumExpression([self.param_mut, 6])), + ( + self.param_mut, + self.param_mut, + NPV_SumExpression([self.param_mut, self.param_mut]), + ), + # 8: + ( + self.param_mut, + self.var, + LinearExpression([self.param_mut, self.mon_var]), + ), + ( + self.param_mut, + self.mon_native, + LinearExpression([self.param_mut, self.mon_native]), + ), + ( + self.param_mut, + self.mon_param, + LinearExpression([self.param_mut, self.mon_param]), + ), + ( + self.param_mut, + self.mon_npv, + LinearExpression([self.param_mut, self.mon_npv]), + ), + # 12: + ( + self.param_mut, + self.linear, + LinearExpression(self.linear.args + [self.param_mut]), + ), + (self.param_mut, self.sum, SumExpression(self.sum.args + [self.param_mut])), + (self.param_mut, self.other, SumExpression([self.param_mut, self.other])), + (self.param_mut, self.mutable_l0, self.param_mut), + # 16: + ( + self.param_mut, + self.mutable_l1, + LinearExpression([self.param_mut] + self.mutable_l1.args), + ), + ( + self.param_mut, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.param_mut]), + ), + (self.param_mut, self.param0, self.param_mut), + (self.param_mut, self.param1, NPV_SumExpression([self.param_mut, 1])), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_SumExpression([self.param_mut, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, LinearExpression([self.mon_var, self.mon_bin])), + (self.var, self.zero, self.var), + (self.var, self.one, LinearExpression([self.mon_var, 1])), + # 4: + (self.var, self.native, LinearExpression([self.mon_var, 5])), + (self.var, self.npv, LinearExpression([self.mon_var, self.npv])), + (self.var, self.param, LinearExpression([self.mon_var, 6])), + ( + self.var, + self.param_mut, + LinearExpression([self.mon_var, self.param_mut]), + ), + # 8: + (self.var, self.var, LinearExpression([self.mon_var, self.mon_var])), + ( + self.var, + self.mon_native, + LinearExpression([self.mon_var, self.mon_native]), + ), + ( + self.var, + self.mon_param, + LinearExpression([self.mon_var, self.mon_param]), + ), + (self.var, self.mon_npv, LinearExpression([self.mon_var, self.mon_npv])), + # 12: + ( + self.var, + self.linear, + LinearExpression(self.linear.args + [self.mon_var]), + ), + (self.var, self.sum, SumExpression(self.sum.args + [self.var])), + (self.var, self.other, SumExpression([self.var, self.other])), + (self.var, self.mutable_l0, self.var), + # 16: + ( + self.var, + self.mutable_l1, + LinearExpression([self.mon_var] + self.mutable_l1.args), + ), + ( + self.var, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.var]), + ), + (self.var, self.param0, self.var), + (self.var, self.param1, LinearExpression([self.mon_var, 1])), + # 20: + ( + self.var, + self.mutable_l3, + LinearExpression([MonomialTermExpression((1, self.var)), self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + LinearExpression([self.mon_native, self.mon_bin]), + ), + (self.mon_native, self.zero, self.mon_native), + (self.mon_native, self.one, LinearExpression([self.mon_native, 1])), + # 4: + (self.mon_native, self.native, LinearExpression([self.mon_native, 5])), + (self.mon_native, self.npv, LinearExpression([self.mon_native, self.npv])), + (self.mon_native, self.param, LinearExpression([self.mon_native, 6])), + ( + self.mon_native, + self.param_mut, + LinearExpression([self.mon_native, self.param_mut]), + ), + # 8: + ( + self.mon_native, + self.var, + LinearExpression([self.mon_native, self.mon_var]), + ), + ( + self.mon_native, + self.mon_native, + LinearExpression([self.mon_native, self.mon_native]), + ), + ( + self.mon_native, + self.mon_param, + LinearExpression([self.mon_native, self.mon_param]), + ), + ( + self.mon_native, + self.mon_npv, + LinearExpression([self.mon_native, self.mon_npv]), + ), + # 12: + ( + self.mon_native, + self.linear, + LinearExpression(self.linear.args + [self.mon_native]), + ), + ( + self.mon_native, + self.sum, + SumExpression(self.sum.args + [self.mon_native]), + ), + (self.mon_native, self.other, SumExpression([self.mon_native, self.other])), + (self.mon_native, self.mutable_l0, self.mon_native), + # 16: + ( + self.mon_native, + self.mutable_l1, + LinearExpression([self.mon_native] + self.mutable_l1.args), + ), + ( + self.mon_native, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.mon_native]), + ), + (self.mon_native, self.param0, self.mon_native), + (self.mon_native, self.param1, LinearExpression([self.mon_native, 1])), + # 20: + ( + self.mon_native, + self.mutable_l3, + LinearExpression([self.mon_native, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + LinearExpression([self.mon_param, self.mon_bin]), + ), + (self.mon_param, self.zero, self.mon_param), + (self.mon_param, self.one, LinearExpression([self.mon_param, 1])), + # 4: + (self.mon_param, self.native, LinearExpression([self.mon_param, 5])), + (self.mon_param, self.npv, LinearExpression([self.mon_param, self.npv])), + (self.mon_param, self.param, LinearExpression([self.mon_param, 6])), + ( + self.mon_param, + self.param_mut, + LinearExpression([self.mon_param, self.param_mut]), + ), + # 8: + ( + self.mon_param, + self.var, + LinearExpression([self.mon_param, self.mon_var]), + ), + ( + self.mon_param, + self.mon_native, + LinearExpression([self.mon_param, self.mon_native]), + ), + ( + self.mon_param, + self.mon_param, + LinearExpression([self.mon_param, self.mon_param]), + ), + ( + self.mon_param, + self.mon_npv, + LinearExpression([self.mon_param, self.mon_npv]), + ), + # 12: + ( + self.mon_param, + self.linear, + LinearExpression(self.linear.args + [self.mon_param]), + ), + (self.mon_param, self.sum, SumExpression(self.sum.args + [self.mon_param])), + (self.mon_param, self.other, SumExpression([self.mon_param, self.other])), + (self.mon_param, self.mutable_l0, self.mon_param), + # 16: + ( + self.mon_param, + self.mutable_l1, + LinearExpression([self.mon_param] + self.mutable_l1.args), + ), + ( + self.mon_param, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.mon_param]), + ), + (self.mon_param, self.param0, self.mon_param), + (self.mon_param, self.param1, LinearExpression([self.mon_param, 1])), + # 20: + ( + self.mon_param, + self.mutable_l3, + LinearExpression([self.mon_param, self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + ( + self.mon_npv, + self.asbinary, + LinearExpression([self.mon_npv, self.mon_bin]), + ), + (self.mon_npv, self.zero, self.mon_npv), + (self.mon_npv, self.one, LinearExpression([self.mon_npv, 1])), + # 4: + (self.mon_npv, self.native, LinearExpression([self.mon_npv, 5])), + (self.mon_npv, self.npv, LinearExpression([self.mon_npv, self.npv])), + (self.mon_npv, self.param, LinearExpression([self.mon_npv, 6])), + ( + self.mon_npv, + self.param_mut, + LinearExpression([self.mon_npv, self.param_mut]), + ), + # 8: + (self.mon_npv, self.var, LinearExpression([self.mon_npv, self.mon_var])), + ( + self.mon_npv, + self.mon_native, + LinearExpression([self.mon_npv, self.mon_native]), + ), + ( + self.mon_npv, + self.mon_param, + LinearExpression([self.mon_npv, self.mon_param]), + ), + ( + self.mon_npv, + self.mon_npv, + LinearExpression([self.mon_npv, self.mon_npv]), + ), + # 12: + ( + self.mon_npv, + self.linear, + LinearExpression(self.linear.args + [self.mon_npv]), + ), + (self.mon_npv, self.sum, SumExpression(self.sum.args + [self.mon_npv])), + (self.mon_npv, self.other, SumExpression([self.mon_npv, self.other])), + (self.mon_npv, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mon_npv, + self.mutable_l1, + LinearExpression([self.mon_npv] + self.mutable_l1.args), + ), + ( + self.mon_npv, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.mon_npv]), + ), + (self.mon_npv, self.param0, self.mon_npv), + (self.mon_npv, self.param1, LinearExpression([self.mon_npv, 1])), + # 20: + (self.mon_npv, self.mutable_l3, LinearExpression([self.mon_npv, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + ( + self.linear, + self.asbinary, + LinearExpression(self.linear.args + [self.mon_bin]), + ), + (self.linear, self.zero, self.linear), + (self.linear, self.one, LinearExpression(self.linear.args + [1])), + # 4: + (self.linear, self.native, LinearExpression(self.linear.args + [5])), + (self.linear, self.npv, LinearExpression(self.linear.args + [self.npv])), + (self.linear, self.param, LinearExpression(self.linear.args + [6])), + ( + self.linear, + self.param_mut, + LinearExpression(self.linear.args + [self.param_mut]), + ), + # 8: + ( + self.linear, + self.var, + LinearExpression(self.linear.args + [self.mon_var]), + ), + ( + self.linear, + self.mon_native, + LinearExpression(self.linear.args + [self.mon_native]), + ), + ( + self.linear, + self.mon_param, + LinearExpression(self.linear.args + [self.mon_param]), + ), + ( + self.linear, + self.mon_npv, + LinearExpression(self.linear.args + [self.mon_npv]), + ), + # 12: + ( + self.linear, + self.linear, + LinearExpression(self.linear.args + self.linear.args), + ), + (self.linear, self.sum, SumExpression(self.sum.args + [self.linear])), + (self.linear, self.other, SumExpression([self.linear, self.other])), + (self.linear, self.mutable_l0, self.linear), + # 16: + ( + self.linear, + self.mutable_l1, + LinearExpression(self.linear.args + self.mutable_l1.args), + ), + ( + self.linear, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.linear]), + ), + (self.linear, self.param0, self.linear), + (self.linear, self.param1, LinearExpression(self.linear.args + [1])), + # 20: + ( + self.linear, + self.mutable_l3, + LinearExpression(self.linear.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, SumExpression(self.sum.args + [self.bin])), + (self.sum, self.zero, self.sum), + (self.sum, self.one, SumExpression(self.sum.args + [1])), + # 4: + (self.sum, self.native, SumExpression(self.sum.args + [5])), + (self.sum, self.npv, SumExpression(self.sum.args + [self.npv])), + (self.sum, self.param, SumExpression(self.sum.args + [6])), + (self.sum, self.param_mut, SumExpression(self.sum.args + [self.param_mut])), + # 8: + (self.sum, self.var, SumExpression(self.sum.args + [self.var])), + ( + self.sum, + self.mon_native, + SumExpression(self.sum.args + [self.mon_native]), + ), + (self.sum, self.mon_param, SumExpression(self.sum.args + [self.mon_param])), + (self.sum, self.mon_npv, SumExpression(self.sum.args + [self.mon_npv])), + # 12: + (self.sum, self.linear, SumExpression(self.sum.args + [self.linear])), + (self.sum, self.sum, SumExpression(self.sum.args + self.sum.args)), + (self.sum, self.other, SumExpression(self.sum.args + [self.other])), + (self.sum, self.mutable_l0, self.sum), + # 16: + ( + self.sum, + self.mutable_l1, + SumExpression(self.sum.args + self.mutable_l1.args), + ), + ( + self.sum, + self.mutable_l2, + SumExpression(self.sum.args + self.mutable_l2.args), + ), + (self.sum, self.param0, self.sum), + (self.sum, self.param1, SumExpression(self.sum.args + [1])), + # 20: + (self.sum, self.mutable_l3, SumExpression(self.sum.args + [self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, SumExpression([self.other, self.bin])), + (self.other, self.zero, self.other), + (self.other, self.one, SumExpression([self.other, 1])), + # 4: + (self.other, self.native, SumExpression([self.other, 5])), + (self.other, self.npv, SumExpression([self.other, self.npv])), + (self.other, self.param, SumExpression([self.other, 6])), + (self.other, self.param_mut, SumExpression([self.other, self.param_mut])), + # 8: + (self.other, self.var, SumExpression([self.other, self.var])), + (self.other, self.mon_native, SumExpression([self.other, self.mon_native])), + (self.other, self.mon_param, SumExpression([self.other, self.mon_param])), + (self.other, self.mon_npv, SumExpression([self.other, self.mon_npv])), + # 12: + (self.other, self.linear, SumExpression([self.other, self.linear])), + (self.other, self.sum, SumExpression(self.sum.args + [self.other])), + (self.other, self.other, SumExpression([self.other, self.other])), + (self.other, self.mutable_l0, self.other), + # 16: + (self.other, self.mutable_l1, SumExpression([self.other, self.mon_npv])), + ( + self.other, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.other]), + ), + (self.other, self.param0, self.other), + (self.other, self.param1, SumExpression([self.other, 1])), + # 20: + (self.other, self.mutable_l3, SumExpression([self.other, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, self.bin), + (self.mutable_l0, self.zero, 0), + (self.mutable_l0, self.one, 1), + # 4: + (self.mutable_l0, self.native, 5), + (self.mutable_l0, self.npv, self.npv), + (self.mutable_l0, self.param, 6), + (self.mutable_l0, self.param_mut, self.param_mut), + # 8: + (self.mutable_l0, self.var, self.var), + (self.mutable_l0, self.mon_native, self.mon_native), + (self.mutable_l0, self.mon_param, self.mon_param), + (self.mutable_l0, self.mon_npv, self.mon_npv), + # 12: + (self.mutable_l0, self.linear, self.linear), + (self.mutable_l0, self.sum, self.sum), + (self.mutable_l0, self.other, self.other), + (self.mutable_l0, self.mutable_l0, 0), + # 16: + (self.mutable_l0, self.mutable_l1, self.mon_npv), + (self.mutable_l0, self.mutable_l2, self.mutable_l2), + (self.mutable_l0, self.param0, 0), + (self.mutable_l0, self.param1, 1), + # 20: + (self.mutable_l0, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + def test_add_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + LinearExpression(self.mutable_l1.args + [self.mon_bin]), + ), + (self.mutable_l1, self.zero, self.mon_npv), + (self.mutable_l1, self.one, LinearExpression(self.mutable_l1.args + [1])), + # 4: + ( + self.mutable_l1, + self.native, + LinearExpression(self.mutable_l1.args + [5]), + ), + ( + self.mutable_l1, + self.npv, + LinearExpression(self.mutable_l1.args + [self.npv]), + ), + (self.mutable_l1, self.param, LinearExpression(self.mutable_l1.args + [6])), + ( + self.mutable_l1, + self.param_mut, + LinearExpression(self.mutable_l1.args + [self.param_mut]), + ), + # 8: + ( + self.mutable_l1, + self.var, + LinearExpression(self.mutable_l1.args + [self.mon_var]), + ), + ( + self.mutable_l1, + self.mon_native, + LinearExpression(self.mutable_l1.args + [self.mon_native]), + ), + ( + self.mutable_l1, + self.mon_param, + LinearExpression(self.mutable_l1.args + [self.mon_param]), + ), + ( + self.mutable_l1, + self.mon_npv, + LinearExpression(self.mutable_l1.args + [self.mon_npv]), + ), + # 12: + ( + self.mutable_l1, + self.linear, + LinearExpression(self.linear.args + self.mutable_l1.args), + ), + ( + self.mutable_l1, + self.sum, + SumExpression(self.sum.args + self.mutable_l1.args), + ), + ( + self.mutable_l1, + self.other, + SumExpression(self.mutable_l1.args + [self.other]), + ), + (self.mutable_l1, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + LinearExpression(self.mutable_l1.args + self.mutable_l1.args), + ), + ( + self.mutable_l1, + self.mutable_l2, + SumExpression(self.mutable_l2.args + self.mutable_l1.args), + ), + (self.mutable_l1, self.param0, self.mon_npv), + ( + self.mutable_l1, + self.param1, + LinearExpression(self.mutable_l1.args + [1]), + ), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + LinearExpression(self.mutable_l1.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + def test_add_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + SumExpression(self.mutable_l2.args + [self.bin]), + ), + (self.mutable_l2, self.zero, self.mutable_l2), + (self.mutable_l2, self.one, SumExpression(self.mutable_l2.args + [1])), + # 4: + (self.mutable_l2, self.native, SumExpression(self.mutable_l2.args + [5])), + ( + self.mutable_l2, + self.npv, + SumExpression(self.mutable_l2.args + [self.npv]), + ), + (self.mutable_l2, self.param, SumExpression(self.mutable_l2.args + [6])), + ( + self.mutable_l2, + self.param_mut, + SumExpression(self.mutable_l2.args + [self.param_mut]), + ), + # 8: + ( + self.mutable_l2, + self.var, + SumExpression(self.mutable_l2.args + [self.var]), + ), + ( + self.mutable_l2, + self.mon_native, + SumExpression(self.mutable_l2.args + [self.mon_native]), + ), + ( + self.mutable_l2, + self.mon_param, + SumExpression(self.mutable_l2.args + [self.mon_param]), + ), + ( + self.mutable_l2, + self.mon_npv, + SumExpression(self.mutable_l2.args + [self.mon_npv]), + ), + # 12: + ( + self.mutable_l2, + self.linear, + SumExpression(self.mutable_l2.args + [self.linear]), + ), + ( + self.mutable_l2, + self.sum, + SumExpression(self.mutable_l2.args + self.sum.args), + ), + ( + self.mutable_l2, + self.other, + SumExpression(self.mutable_l2.args + [self.other]), + ), + (self.mutable_l2, self.mutable_l0, self.mutable_l2), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + SumExpression(self.mutable_l2.args + self.mutable_l1.args), + ), + ( + self.mutable_l2, + self.mutable_l2, + SumExpression(self.mutable_l2.args + self.mutable_l2.args), + ), + (self.mutable_l2, self.param0, self.mutable_l2), + (self.mutable_l2, self.param1, SumExpression(self.mutable_l2.args + [1])), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + SumExpression(self.mutable_l2.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + def test_add_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, self.bin), + (self.param0, self.zero, 0), + (self.param0, self.one, 1), + # 4: + (self.param0, self.native, 5), + (self.param0, self.npv, self.npv), + (self.param0, self.param, 6), + (self.param0, self.param_mut, self.param_mut), + # 8: + (self.param0, self.var, self.var), + (self.param0, self.mon_native, self.mon_native), + (self.param0, self.mon_param, self.mon_param), + (self.param0, self.mon_npv, self.mon_npv), + # 12: + (self.param0, self.linear, self.linear), + (self.param0, self.sum, self.sum), + (self.param0, self.other, self.other), + (self.param0, self.mutable_l0, 0), + # 16: + (self.param0, self.mutable_l1, self.mon_npv), + (self.param0, self.mutable_l2, self.mutable_l2), + (self.param0, self.param0, 0), + (self.param0, self.param1, 1), + # 20: + (self.param0, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, LinearExpression([1, self.mon_bin])), + (self.param1, self.zero, 1), + (self.param1, self.one, 2), + # 4: + (self.param1, self.native, 6), + (self.param1, self.npv, NPV_SumExpression([1, self.npv])), + (self.param1, self.param, 7), + (self.param1, self.param_mut, NPV_SumExpression([1, self.param_mut])), + # 8: + (self.param1, self.var, LinearExpression([1, self.mon_var])), + (self.param1, self.mon_native, LinearExpression([1, self.mon_native])), + (self.param1, self.mon_param, LinearExpression([1, self.mon_param])), + (self.param1, self.mon_npv, LinearExpression([1, self.mon_npv])), + # 12: + (self.param1, self.linear, LinearExpression(self.linear.args + [1])), + (self.param1, self.sum, SumExpression(self.sum.args + [1])), + (self.param1, self.other, SumExpression([1, self.other])), + (self.param1, self.mutable_l0, 1), + # 16: + ( + self.param1, + self.mutable_l1, + LinearExpression([1] + self.mutable_l1.args), + ), + (self.param1, self.mutable_l2, SumExpression(self.mutable_l2.args + [1])), + (self.param1, self.param0, 1), + (self.param1, self.param1, 2), + # 20: + (self.param1, self.mutable_l3, NPV_SumExpression([1, self.npv])), + ] + self._run_cases(tests, operator.add) + self._run_cases(tests, operator.iadd) + + def test_add_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + ( + self.mutable_l3, + self.asbinary, + LinearExpression(self.mutable_l3.args + [self.mon_bin]), + ), + (self.mutable_l3, self.zero, self.npv), + (self.mutable_l3, self.one, NPV_SumExpression(self.mutable_l3.args + [1])), + # 4: + ( + self.mutable_l3, + self.native, + NPV_SumExpression(self.mutable_l3.args + [5]), + ), + ( + self.mutable_l3, + self.npv, + NPV_SumExpression(self.mutable_l3.args + [self.npv]), + ), + ( + self.mutable_l3, + self.param, + NPV_SumExpression(self.mutable_l3.args + [6]), + ), + ( + self.mutable_l3, + self.param_mut, + NPV_SumExpression(self.mutable_l3.args + [self.param_mut]), + ), + # 8: + ( + self.mutable_l3, + self.var, + LinearExpression(self.mutable_l3.args + [self.mon_var]), + ), + ( + self.mutable_l3, + self.mon_native, + LinearExpression(self.mutable_l3.args + [self.mon_native]), + ), + ( + self.mutable_l3, + self.mon_param, + LinearExpression(self.mutable_l3.args + [self.mon_param]), + ), + ( + self.mutable_l3, + self.mon_npv, + LinearExpression(self.mutable_l3.args + [self.mon_npv]), + ), + # 12: + ( + self.mutable_l3, + self.linear, + LinearExpression(self.linear.args + self.mutable_l3.args), + ), + ( + self.mutable_l3, + self.sum, + SumExpression(self.sum.args + self.mutable_l3.args), + ), + ( + self.mutable_l3, + self.other, + SumExpression(self.mutable_l3.args + [self.other]), + ), + (self.mutable_l3, self.mutable_l0, self.npv), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + LinearExpression(self.mutable_l3.args + self.mutable_l1.args), + ), + ( + self.mutable_l3, + self.mutable_l2, + SumExpression(self.mutable_l2.args + self.mutable_l3.args), + ), + (self.mutable_l3, self.param0, self.npv), + ( + self.mutable_l3, + self.param1, + NPV_SumExpression(self.mutable_l3.args + [1]), + ), + # 20: + ( + self.mutable_l3, + self.mutable_l3, + NPV_SumExpression(self.mutable_l3.args + [self.npv]), + ), + ] + self._run_cases(tests, operator.add) + # Mutable iadd handled by separate tests + # self._run_cases(tests, operator.iadd) + + # + # + # SUBTRACTION + # + # + + def test_sub_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support addition + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, self.bin), + (self.asbinary, self.one, LinearExpression([self.mon_bin, -1])), + # 4: + (self.asbinary, self.native, LinearExpression([self.mon_bin, -5])), + (self.asbinary, self.npv, LinearExpression([self.mon_bin, self.minus_npv])), + (self.asbinary, self.param, LinearExpression([self.mon_bin, -6])), + ( + self.asbinary, + self.param_mut, + LinearExpression([self.mon_bin, self.minus_param_mut]), + ), + # 8: + (self.asbinary, self.var, LinearExpression([self.mon_bin, self.minus_var])), + ( + self.asbinary, + self.mon_native, + LinearExpression([self.mon_bin, self.minus_mon_native]), + ), + ( + self.asbinary, + self.mon_param, + LinearExpression([self.mon_bin, self.minus_mon_param]), + ), + ( + self.asbinary, + self.mon_npv, + LinearExpression([self.mon_bin, self.minus_mon_npv]), + ), + # 12: + (self.asbinary, self.linear, SumExpression([self.bin, self.minus_linear])), + (self.asbinary, self.sum, SumExpression([self.bin, self.minus_sum])), + (self.asbinary, self.other, SumExpression([self.bin, self.minus_other])), + (self.asbinary, self.mutable_l0, self.bin), + # 16: + ( + self.asbinary, + self.mutable_l1, + LinearExpression([self.mon_bin, self.minus_mon_npv]), + ), + ( + self.asbinary, + self.mutable_l2, + SumExpression([self.bin, self.minus_mutable_l2]), + ), + (self.asbinary, self.param0, self.bin), + (self.asbinary, self.param1, LinearExpression([self.mon_bin, -1])), + # 20: + ( + self.asbinary, + self.mutable_l3, + LinearExpression([self.mon_bin, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, self.minus_bin), + (self.zero, self.zero, 0), + (self.zero, self.one, -1), + # 4: + (self.zero, self.native, -5), + (self.zero, self.npv, self.minus_npv), + (self.zero, self.param, -6), + (self.zero, self.param_mut, self.minus_param_mut), + # 8: + (self.zero, self.var, self.minus_var), + (self.zero, self.mon_native, self.minus_mon_native), + (self.zero, self.mon_param, self.minus_mon_param), + (self.zero, self.mon_npv, self.minus_mon_npv), + # 12: + (self.zero, self.linear, self.minus_linear), + (self.zero, self.sum, self.minus_sum), + (self.zero, self.other, self.minus_other), + (self.zero, self.mutable_l0, 0), + # 16: + (self.zero, self.mutable_l1, self.minus_mon_npv), + (self.zero, self.mutable_l2, self.minus_mutable_l2), + (self.zero, self.param0, 0), + (self.zero, self.param1, -1), + # 20: + (self.zero, self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, LinearExpression([1, self.minus_bin])), + (self.one, self.zero, 1), + (self.one, self.one, 0), + # 4: + (self.one, self.native, -4), + (self.one, self.npv, NPV_SumExpression([1, self.minus_npv])), + (self.one, self.param, -5), + (self.one, self.param_mut, NPV_SumExpression([1, self.minus_param_mut])), + # 8: + (self.one, self.var, LinearExpression([1, self.minus_var])), + (self.one, self.mon_native, LinearExpression([1, self.minus_mon_native])), + (self.one, self.mon_param, LinearExpression([1, self.minus_mon_param])), + (self.one, self.mon_npv, LinearExpression([1, self.minus_mon_npv])), + # 12: + (self.one, self.linear, SumExpression([1, self.minus_linear])), + (self.one, self.sum, SumExpression([1, self.minus_sum])), + (self.one, self.other, SumExpression([1, self.minus_other])), + (self.one, self.mutable_l0, 1), + # 16: + (self.one, self.mutable_l1, LinearExpression([1, self.minus_mon_npv])), + (self.one, self.mutable_l2, SumExpression([1, self.minus_mutable_l2])), + (self.one, self.param0, 1), + (self.one, self.param1, 0), + # 20: + (self.one, self.mutable_l3, NPV_SumExpression([1, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, LinearExpression([5, self.minus_bin])), + (self.native, self.zero, 5), + (self.native, self.one, 4), + # 4: + (self.native, self.native, 0), + (self.native, self.npv, NPV_SumExpression([5, self.minus_npv])), + (self.native, self.param, -1), + (self.native, self.param_mut, NPV_SumExpression([5, self.minus_param_mut])), + # 8: + (self.native, self.var, LinearExpression([5, self.minus_var])), + ( + self.native, + self.mon_native, + LinearExpression([5, self.minus_mon_native]), + ), + (self.native, self.mon_param, LinearExpression([5, self.minus_mon_param])), + (self.native, self.mon_npv, LinearExpression([5, self.minus_mon_npv])), + # 12: + (self.native, self.linear, SumExpression([5, self.minus_linear])), + (self.native, self.sum, SumExpression([5, self.minus_sum])), + (self.native, self.other, SumExpression([5, self.minus_other])), + (self.native, self.mutable_l0, 5), + # 16: + (self.native, self.mutable_l1, LinearExpression([5, self.minus_mon_npv])), + (self.native, self.mutable_l2, SumExpression([5, self.minus_mutable_l2])), + (self.native, self.param0, 5), + (self.native, self.param1, 4), + # 20: + (self.native, self.mutable_l3, NPV_SumExpression([5, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, LinearExpression([self.npv, self.minus_bin])), + (self.npv, self.zero, self.npv), + (self.npv, self.one, NPV_SumExpression([self.npv, -1])), + # 4: + (self.npv, self.native, NPV_SumExpression([self.npv, -5])), + (self.npv, self.npv, NPV_SumExpression([self.npv, self.minus_npv])), + (self.npv, self.param, NPV_SumExpression([self.npv, -6])), + ( + self.npv, + self.param_mut, + NPV_SumExpression([self.npv, self.minus_param_mut]), + ), + # 8: + (self.npv, self.var, LinearExpression([self.npv, self.minus_var])), + ( + self.npv, + self.mon_native, + LinearExpression([self.npv, self.minus_mon_native]), + ), + ( + self.npv, + self.mon_param, + LinearExpression([self.npv, self.minus_mon_param]), + ), + (self.npv, self.mon_npv, LinearExpression([self.npv, self.minus_mon_npv])), + # 12: + (self.npv, self.linear, SumExpression([self.npv, self.minus_linear])), + (self.npv, self.sum, SumExpression([self.npv, self.minus_sum])), + (self.npv, self.other, SumExpression([self.npv, self.minus_other])), + (self.npv, self.mutable_l0, self.npv), + # 16: + ( + self.npv, + self.mutable_l1, + LinearExpression([self.npv, self.minus_mon_npv]), + ), + ( + self.npv, + self.mutable_l2, + SumExpression([self.npv, self.minus_mutable_l2]), + ), + (self.npv, self.param0, self.npv), + (self.npv, self.param1, NPV_SumExpression([self.npv, -1])), + # 20: + (self.npv, self.mutable_l3, NPV_SumExpression([self.npv, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, LinearExpression([6, self.minus_bin])), + (self.param, self.zero, 6), + (self.param, self.one, 5), + # 4: + (self.param, self.native, 1), + (self.param, self.npv, NPV_SumExpression([6, self.minus_npv])), + (self.param, self.param, 0), + (self.param, self.param_mut, NPV_SumExpression([6, self.minus_param_mut])), + # 8: + (self.param, self.var, LinearExpression([6, self.minus_var])), + (self.param, self.mon_native, LinearExpression([6, self.minus_mon_native])), + (self.param, self.mon_param, LinearExpression([6, self.minus_mon_param])), + (self.param, self.mon_npv, LinearExpression([6, self.minus_mon_npv])), + # 12: + (self.param, self.linear, SumExpression([6, self.minus_linear])), + (self.param, self.sum, SumExpression([6, self.minus_sum])), + (self.param, self.other, SumExpression([6, self.minus_other])), + (self.param, self.mutable_l0, 6), + # 16: + (self.param, self.mutable_l1, LinearExpression([6, self.minus_mon_npv])), + (self.param, self.mutable_l2, SumExpression([6, self.minus_mutable_l2])), + (self.param, self.param0, 6), + (self.param, self.param1, 5), + # 20: + (self.param, self.mutable_l3, NPV_SumExpression([6, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + LinearExpression([self.param_mut, self.minus_bin]), + ), + (self.param_mut, self.zero, self.param_mut), + (self.param_mut, self.one, NPV_SumExpression([self.param_mut, -1])), + # 4: + (self.param_mut, self.native, NPV_SumExpression([self.param_mut, -5])), + ( + self.param_mut, + self.npv, + NPV_SumExpression([self.param_mut, self.minus_npv]), + ), + (self.param_mut, self.param, NPV_SumExpression([self.param_mut, -6])), + ( + self.param_mut, + self.param_mut, + NPV_SumExpression([self.param_mut, self.minus_param_mut]), + ), + # 8: + ( + self.param_mut, + self.var, + LinearExpression([self.param_mut, self.minus_var]), + ), + ( + self.param_mut, + self.mon_native, + LinearExpression([self.param_mut, self.minus_mon_native]), + ), + ( + self.param_mut, + self.mon_param, + LinearExpression([self.param_mut, self.minus_mon_param]), + ), + ( + self.param_mut, + self.mon_npv, + LinearExpression([self.param_mut, self.minus_mon_npv]), + ), + # 12: + ( + self.param_mut, + self.linear, + SumExpression([self.param_mut, self.minus_linear]), + ), + (self.param_mut, self.sum, SumExpression([self.param_mut, self.minus_sum])), + ( + self.param_mut, + self.other, + SumExpression([self.param_mut, self.minus_other]), + ), + (self.param_mut, self.mutable_l0, self.param_mut), + # 16: + ( + self.param_mut, + self.mutable_l1, + LinearExpression([self.param_mut, self.minus_mon_npv]), + ), + ( + self.param_mut, + self.mutable_l2, + SumExpression([self.param_mut, self.minus_mutable_l2]), + ), + (self.param_mut, self.param0, self.param_mut), + (self.param_mut, self.param1, NPV_SumExpression([self.param_mut, -1])), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_SumExpression([self.param_mut, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, LinearExpression([self.mon_var, self.minus_bin])), + (self.var, self.zero, self.var), + (self.var, self.one, LinearExpression([self.mon_var, -1])), + # 4: + (self.var, self.native, LinearExpression([self.mon_var, -5])), + (self.var, self.npv, LinearExpression([self.mon_var, self.minus_npv])), + (self.var, self.param, LinearExpression([self.mon_var, -6])), + ( + self.var, + self.param_mut, + LinearExpression([self.mon_var, self.minus_param_mut]), + ), + # 8: + (self.var, self.var, LinearExpression([self.mon_var, self.minus_var])), + ( + self.var, + self.mon_native, + LinearExpression([self.mon_var, self.minus_mon_native]), + ), + ( + self.var, + self.mon_param, + LinearExpression([self.mon_var, self.minus_mon_param]), + ), + ( + self.var, + self.mon_npv, + LinearExpression([self.mon_var, self.minus_mon_npv]), + ), + # 12: + ( + self.var, + self.linear, + SumExpression([self.var, NegationExpression((self.linear,))]), + ), + (self.var, self.sum, SumExpression([self.var, self.minus_sum])), + (self.var, self.other, SumExpression([self.var, self.minus_other])), + (self.var, self.mutable_l0, self.var), + # 16: + ( + self.var, + self.mutable_l1, + LinearExpression([self.mon_var, self.minus_mon_npv]), + ), + ( + self.var, + self.mutable_l2, + SumExpression([self.var, self.minus_mutable_l2]), + ), + (self.var, self.param0, self.var), + (self.var, self.param1, LinearExpression([self.mon_var, -1])), + # 20: + ( + self.var, + self.mutable_l3, + LinearExpression([self.mon_var, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + LinearExpression([self.mon_native, self.minus_bin]), + ), + (self.mon_native, self.zero, self.mon_native), + (self.mon_native, self.one, LinearExpression([self.mon_native, -1])), + # 4: + (self.mon_native, self.native, LinearExpression([self.mon_native, -5])), + ( + self.mon_native, + self.npv, + LinearExpression([self.mon_native, self.minus_npv]), + ), + (self.mon_native, self.param, LinearExpression([self.mon_native, -6])), + ( + self.mon_native, + self.param_mut, + LinearExpression([self.mon_native, self.minus_param_mut]), + ), + # 8: + ( + self.mon_native, + self.var, + LinearExpression([self.mon_native, self.minus_var]), + ), + ( + self.mon_native, + self.mon_native, + LinearExpression([self.mon_native, self.minus_mon_native]), + ), + ( + self.mon_native, + self.mon_param, + LinearExpression([self.mon_native, self.minus_mon_param]), + ), + ( + self.mon_native, + self.mon_npv, + LinearExpression([self.mon_native, self.minus_mon_npv]), + ), + # 12: + ( + self.mon_native, + self.linear, + SumExpression([self.mon_native, self.minus_linear]), + ), + ( + self.mon_native, + self.sum, + SumExpression([self.mon_native, self.minus_sum]), + ), + ( + self.mon_native, + self.other, + SumExpression([self.mon_native, self.minus_other]), + ), + (self.mon_native, self.mutable_l0, self.mon_native), + # 16: + ( + self.mon_native, + self.mutable_l1, + LinearExpression([self.mon_native, self.minus_mon_npv]), + ), + ( + self.mon_native, + self.mutable_l2, + SumExpression([self.mon_native, self.minus_mutable_l2]), + ), + (self.mon_native, self.param0, self.mon_native), + (self.mon_native, self.param1, LinearExpression([self.mon_native, -1])), + # 20: + ( + self.mon_native, + self.mutable_l3, + LinearExpression([self.mon_native, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + LinearExpression([self.mon_param, self.minus_bin]), + ), + (self.mon_param, self.zero, self.mon_param), + (self.mon_param, self.one, LinearExpression([self.mon_param, -1])), + # 4: + (self.mon_param, self.native, LinearExpression([self.mon_param, -5])), + ( + self.mon_param, + self.npv, + LinearExpression([self.mon_param, self.minus_npv]), + ), + (self.mon_param, self.param, LinearExpression([self.mon_param, -6])), + ( + self.mon_param, + self.param_mut, + LinearExpression([self.mon_param, self.minus_param_mut]), + ), + # 8: + ( + self.mon_param, + self.var, + LinearExpression([self.mon_param, self.minus_var]), + ), + ( + self.mon_param, + self.mon_native, + LinearExpression([self.mon_param, self.minus_mon_native]), + ), + ( + self.mon_param, + self.mon_param, + LinearExpression([self.mon_param, self.minus_mon_param]), + ), + ( + self.mon_param, + self.mon_npv, + LinearExpression([self.mon_param, self.minus_mon_npv]), + ), + # 12: + ( + self.mon_param, + self.linear, + SumExpression([self.mon_param, self.minus_linear]), + ), + (self.mon_param, self.sum, SumExpression([self.mon_param, self.minus_sum])), + ( + self.mon_param, + self.other, + SumExpression([self.mon_param, self.minus_other]), + ), + (self.mon_param, self.mutable_l0, self.mon_param), + # 16: + ( + self.mon_param, + self.mutable_l1, + LinearExpression([self.mon_param, self.minus_mon_npv]), + ), + ( + self.mon_param, + self.mutable_l2, + SumExpression([self.mon_param, self.minus_mutable_l2]), + ), + (self.mon_param, self.param0, self.mon_param), + (self.mon_param, self.param1, LinearExpression([self.mon_param, -1])), + # 20: + ( + self.mon_param, + self.mutable_l3, + LinearExpression([self.mon_param, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + ( + self.mon_npv, + self.asbinary, + LinearExpression([self.mon_npv, self.minus_bin]), + ), + (self.mon_npv, self.zero, self.mon_npv), + (self.mon_npv, self.one, LinearExpression([self.mon_npv, -1])), + # 4: + (self.mon_npv, self.native, LinearExpression([self.mon_npv, -5])), + (self.mon_npv, self.npv, LinearExpression([self.mon_npv, self.minus_npv])), + (self.mon_npv, self.param, LinearExpression([self.mon_npv, -6])), + ( + self.mon_npv, + self.param_mut, + LinearExpression([self.mon_npv, self.minus_param_mut]), + ), + # 8: + (self.mon_npv, self.var, LinearExpression([self.mon_npv, self.minus_var])), + ( + self.mon_npv, + self.mon_native, + LinearExpression([self.mon_npv, self.minus_mon_native]), + ), + ( + self.mon_npv, + self.mon_param, + LinearExpression([self.mon_npv, self.minus_mon_param]), + ), + ( + self.mon_npv, + self.mon_npv, + LinearExpression([self.mon_npv, self.minus_mon_npv]), + ), + # 12: + ( + self.mon_npv, + self.linear, + SumExpression([self.mon_npv, self.minus_linear]), + ), + (self.mon_npv, self.sum, SumExpression([self.mon_npv, self.minus_sum])), + (self.mon_npv, self.other, SumExpression([self.mon_npv, self.minus_other])), + (self.mon_npv, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mon_npv, + self.mutable_l1, + LinearExpression([self.mon_npv, self.minus_mon_npv]), + ), + ( + self.mon_npv, + self.mutable_l2, + SumExpression([self.mon_npv, self.minus_mutable_l2]), + ), + (self.mon_npv, self.param0, self.mon_npv), + (self.mon_npv, self.param1, LinearExpression([self.mon_npv, -1])), + # 20: + ( + self.mon_npv, + self.mutable_l3, + LinearExpression([self.mon_npv, self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + ( + self.linear, + self.asbinary, + LinearExpression(self.linear.args + [self.minus_bin]), + ), + (self.linear, self.zero, self.linear), + (self.linear, self.one, LinearExpression(self.linear.args + [-1])), + # 4: + (self.linear, self.native, LinearExpression(self.linear.args + [-5])), + ( + self.linear, + self.npv, + LinearExpression(self.linear.args + [self.minus_npv]), + ), + (self.linear, self.param, LinearExpression(self.linear.args + [-6])), + ( + self.linear, + self.param_mut, + LinearExpression(self.linear.args + [self.minus_param_mut]), + ), + # 8: + ( + self.linear, + self.var, + LinearExpression(self.linear.args + [self.minus_var]), + ), + ( + self.linear, + self.mon_native, + LinearExpression(self.linear.args + [self.minus_mon_native]), + ), + ( + self.linear, + self.mon_param, + LinearExpression(self.linear.args + [self.minus_mon_param]), + ), + ( + self.linear, + self.mon_npv, + LinearExpression(self.linear.args + [self.minus_mon_npv]), + ), + # 12: + (self.linear, self.linear, SumExpression([self.linear, self.minus_linear])), + (self.linear, self.sum, SumExpression([self.linear, self.minus_sum])), + (self.linear, self.other, SumExpression([self.linear, self.minus_other])), + (self.linear, self.mutable_l0, self.linear), + # 16: + ( + self.linear, + self.mutable_l1, + LinearExpression(self.linear.args + [self.minus_mon_npv]), + ), + ( + self.linear, + self.mutable_l2, + SumExpression([self.linear, self.minus_mutable_l2]), + ), + (self.linear, self.param0, self.linear), + (self.linear, self.param1, LinearExpression(self.linear.args + [-1])), + # 20: + ( + self.linear, + self.mutable_l3, + LinearExpression(self.linear.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, SumExpression(self.sum.args + [self.minus_bin])), + (self.sum, self.zero, self.sum), + (self.sum, self.one, SumExpression(self.sum.args + [-1])), + # 4: + (self.sum, self.native, SumExpression(self.sum.args + [-5])), + (self.sum, self.npv, SumExpression(self.sum.args + [self.minus_npv])), + (self.sum, self.param, SumExpression(self.sum.args + [-6])), + ( + self.sum, + self.param_mut, + SumExpression(self.sum.args + [self.minus_param_mut]), + ), + # 8: + (self.sum, self.var, SumExpression(self.sum.args + [self.minus_var])), + ( + self.sum, + self.mon_native, + SumExpression(self.sum.args + [self.minus_mon_native]), + ), + ( + self.sum, + self.mon_param, + SumExpression(self.sum.args + [self.minus_mon_param]), + ), + ( + self.sum, + self.mon_npv, + SumExpression(self.sum.args + [self.minus_mon_npv]), + ), + # 12: + (self.sum, self.linear, SumExpression(self.sum.args + [self.minus_linear])), + (self.sum, self.sum, SumExpression(self.sum.args + [self.minus_sum])), + (self.sum, self.other, SumExpression(self.sum.args + [self.minus_other])), + (self.sum, self.mutable_l0, self.sum), + # 16: + ( + self.sum, + self.mutable_l1, + SumExpression(self.sum.args + [self.minus_mon_npv]), + ), + ( + self.sum, + self.mutable_l2, + SumExpression(self.sum.args + [self.minus_mutable_l2]), + ), + (self.sum, self.param0, self.sum), + (self.sum, self.param1, SumExpression(self.sum.args + [-1])), + # 20: + ( + self.sum, + self.mutable_l3, + SumExpression(self.sum.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, SumExpression([self.other, self.minus_bin])), + (self.other, self.zero, self.other), + (self.other, self.one, SumExpression([self.other, -1])), + # 4: + (self.other, self.native, SumExpression([self.other, -5])), + (self.other, self.npv, SumExpression([self.other, self.minus_npv])), + (self.other, self.param, SumExpression([self.other, -6])), + ( + self.other, + self.param_mut, + SumExpression([self.other, self.minus_param_mut]), + ), + # 8: + (self.other, self.var, SumExpression([self.other, self.minus_var])), + ( + self.other, + self.mon_native, + SumExpression([self.other, self.minus_mon_native]), + ), + ( + self.other, + self.mon_param, + SumExpression([self.other, self.minus_mon_param]), + ), + (self.other, self.mon_npv, SumExpression([self.other, self.minus_mon_npv])), + # 12: + (self.other, self.linear, SumExpression([self.other, self.minus_linear])), + (self.other, self.sum, SumExpression([self.other, self.minus_sum])), + (self.other, self.other, SumExpression([self.other, self.minus_other])), + (self.other, self.mutable_l0, self.other), + # 16: + ( + self.other, + self.mutable_l1, + SumExpression([self.other, self.minus_mon_npv]), + ), + ( + self.other, + self.mutable_l2, + SumExpression([self.other, self.minus_mutable_l2]), + ), + (self.other, self.param0, self.other), + (self.other, self.param1, SumExpression([self.other, -1])), + # 20: + (self.other, self.mutable_l3, SumExpression([self.other, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, self.minus_bin), + (self.mutable_l0, self.zero, 0), + (self.mutable_l0, self.one, -1), + # 4: + (self.mutable_l0, self.native, -5), + (self.mutable_l0, self.npv, self.minus_npv), + (self.mutable_l0, self.param, -6), + (self.mutable_l0, self.param_mut, self.minus_param_mut), + # 8: + (self.mutable_l0, self.var, self.minus_var), + (self.mutable_l0, self.mon_native, self.minus_mon_native), + (self.mutable_l0, self.mon_param, self.minus_mon_param), + (self.mutable_l0, self.mon_npv, self.minus_mon_npv), + # 12: + (self.mutable_l0, self.linear, self.minus_linear), + (self.mutable_l0, self.sum, self.minus_sum), + (self.mutable_l0, self.other, self.minus_other), + (self.mutable_l0, self.mutable_l0, self.mutable_l0), + # 16: + (self.mutable_l0, self.mutable_l1, self.minus_mon_npv), + (self.mutable_l0, self.mutable_l2, self.minus_mutable_l2), + (self.mutable_l0, self.param0, 0), + (self.mutable_l0, self.param1, -1), + # 20: + (self.mutable_l0, self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + def test_sub_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + LinearExpression(self.mutable_l1.args + [self.minus_bin]), + ), + (self.mutable_l1, self.zero, self.mon_npv), + (self.mutable_l1, self.one, LinearExpression(self.mutable_l1.args + [-1])), + # 4: + ( + self.mutable_l1, + self.native, + LinearExpression(self.mutable_l1.args + [-5]), + ), + ( + self.mutable_l1, + self.npv, + LinearExpression(self.mutable_l1.args + [self.minus_npv]), + ), + ( + self.mutable_l1, + self.param, + LinearExpression(self.mutable_l1.args + [-6]), + ), + ( + self.mutable_l1, + self.param_mut, + LinearExpression(self.mutable_l1.args + [self.minus_param_mut]), + ), + # 8: + ( + self.mutable_l1, + self.var, + LinearExpression(self.mutable_l1.args + [self.minus_var]), + ), + ( + self.mutable_l1, + self.mon_native, + LinearExpression(self.mutable_l1.args + [self.minus_mon_native]), + ), + ( + self.mutable_l1, + self.mon_param, + LinearExpression(self.mutable_l1.args + [self.minus_mon_param]), + ), + ( + self.mutable_l1, + self.mon_npv, + LinearExpression(self.mutable_l1.args + [self.minus_mon_npv]), + ), + # 12: + ( + self.mutable_l1, + self.linear, + SumExpression(self.mutable_l1.args + [self.minus_linear]), + ), + ( + self.mutable_l1, + self.sum, + SumExpression(self.mutable_l1.args + [self.minus_sum]), + ), + ( + self.mutable_l1, + self.other, + SumExpression(self.mutable_l1.args + [self.minus_other]), + ), + (self.mutable_l1, self.mutable_l0, self.mon_npv), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + LinearExpression(self.mutable_l1.args + [self.minus_mon_npv]), + ), + ( + self.mutable_l1, + self.mutable_l2, + SumExpression(self.mutable_l1.args + [self.minus_mutable_l2]), + ), + (self.mutable_l1, self.param0, self.mon_npv), + ( + self.mutable_l1, + self.param1, + LinearExpression(self.mutable_l1.args + [-1]), + ), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + LinearExpression(self.mutable_l1.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + def test_sub_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + SumExpression(self.mutable_l2.args + [self.minus_bin]), + ), + (self.mutable_l2, self.zero, self.mutable_l2), + (self.mutable_l2, self.one, SumExpression(self.mutable_l2.args + [-1])), + # 4: + (self.mutable_l2, self.native, SumExpression(self.mutable_l2.args + [-5])), + ( + self.mutable_l2, + self.npv, + SumExpression(self.mutable_l2.args + [self.minus_npv]), + ), + (self.mutable_l2, self.param, SumExpression(self.mutable_l2.args + [-6])), + ( + self.mutable_l2, + self.param_mut, + SumExpression(self.mutable_l2.args + [self.minus_param_mut]), + ), + # 8: + ( + self.mutable_l2, + self.var, + SumExpression(self.mutable_l2.args + [self.minus_var]), + ), + ( + self.mutable_l2, + self.mon_native, + SumExpression(self.mutable_l2.args + [self.minus_mon_native]), + ), + ( + self.mutable_l2, + self.mon_param, + SumExpression(self.mutable_l2.args + [self.minus_mon_param]), + ), + ( + self.mutable_l2, + self.mon_npv, + SumExpression(self.mutable_l2.args + [self.minus_mon_npv]), + ), + # 12: + ( + self.mutable_l2, + self.linear, + SumExpression(self.mutable_l2.args + [self.minus_linear]), + ), + ( + self.mutable_l2, + self.sum, + SumExpression(self.mutable_l2.args + [self.minus_sum]), + ), + ( + self.mutable_l2, + self.other, + SumExpression(self.mutable_l2.args + [self.minus_other]), + ), + (self.mutable_l2, self.mutable_l0, self.mutable_l2), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + SumExpression(self.mutable_l2.args + [self.minus_mon_npv]), + ), + ( + self.mutable_l2, + self.mutable_l2, + SumExpression(self.mutable_l2.args + [self.minus_mutable_l2]), + ), + (self.mutable_l2, self.param0, self.mutable_l2), + (self.mutable_l2, self.param1, SumExpression(self.mutable_l2.args + [-1])), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + SumExpression(self.mutable_l2.args + [self.minus_npv]), + ), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + def test_sub_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, self.minus_bin), + (self.param0, self.zero, 0), + (self.param0, self.one, -1), + # 4: + (self.param0, self.native, -5), + (self.param0, self.npv, self.minus_npv), + (self.param0, self.param, -6), + (self.param0, self.param_mut, self.minus_param_mut), + # 8: + (self.param0, self.var, self.minus_var), + (self.param0, self.mon_native, self.minus_mon_native), + (self.param0, self.mon_param, self.minus_mon_param), + (self.param0, self.mon_npv, self.minus_mon_npv), + # 12: + (self.param0, self.linear, self.minus_linear), + (self.param0, self.sum, self.minus_sum), + (self.param0, self.other, self.minus_other), + (self.param0, self.mutable_l0, 0), + # 16: + (self.param0, self.mutable_l1, self.minus_mon_npv), + (self.param0, self.mutable_l2, self.minus_mutable_l2), + (self.param0, self.param0, 0), + (self.param0, self.param1, -1), + # 20: + (self.param0, self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, LinearExpression([1, self.minus_bin])), + (self.param1, self.zero, 1), + (self.param1, self.one, 0), + # 4: + (self.param1, self.native, -4), + (self.param1, self.npv, NPV_SumExpression([1, self.minus_npv])), + (self.param1, self.param, -5), + (self.param1, self.param_mut, NPV_SumExpression([1, self.minus_param_mut])), + # 8: + (self.param1, self.var, LinearExpression([1, self.minus_var])), + ( + self.param1, + self.mon_native, + LinearExpression([1, self.minus_mon_native]), + ), + (self.param1, self.mon_param, LinearExpression([1, self.minus_mon_param])), + (self.param1, self.mon_npv, LinearExpression([1, self.minus_mon_npv])), + # 12: + (self.param1, self.linear, SumExpression([1, self.minus_linear])), + (self.param1, self.sum, SumExpression([1, self.minus_sum])), + (self.param1, self.other, SumExpression([1, self.minus_other])), + (self.param1, self.mutable_l0, 1), + # 16: + (self.param1, self.mutable_l1, LinearExpression([1, self.minus_mon_npv])), + (self.param1, self.mutable_l2, SumExpression([1, self.minus_mutable_l2])), + (self.param1, self.param0, 1), + (self.param1, self.param1, 0), + # 20: + (self.param1, self.mutable_l3, NPV_SumExpression([1, self.minus_npv])), + ] + self._run_cases(tests, operator.sub) + self._run_cases(tests, operator.isub) + + def test_sub_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + ( + self.mutable_l3, + self.asbinary, + LinearExpression(self.mutable_l3.args + [self.minus_bin]), + ), + (self.mutable_l3, self.zero, self.npv), + (self.mutable_l3, self.one, NPV_SumExpression(self.mutable_l3.args + [-1])), + # 4: + ( + self.mutable_l3, + self.native, + NPV_SumExpression(self.mutable_l3.args + [-5]), + ), + ( + self.mutable_l3, + self.npv, + NPV_SumExpression(self.mutable_l3.args + [self.minus_npv]), + ), + ( + self.mutable_l3, + self.param, + NPV_SumExpression(self.mutable_l3.args + [-6]), + ), + ( + self.mutable_l3, + self.param_mut, + NPV_SumExpression(self.mutable_l3.args + [self.minus_param_mut]), + ), + # 8: + ( + self.mutable_l3, + self.var, + LinearExpression(self.mutable_l3.args + [self.minus_var]), + ), + ( + self.mutable_l3, + self.mon_native, + LinearExpression(self.mutable_l3.args + [self.minus_mon_native]), + ), + ( + self.mutable_l3, + self.mon_param, + LinearExpression(self.mutable_l3.args + [self.minus_mon_param]), + ), + ( + self.mutable_l3, + self.mon_npv, + LinearExpression(self.mutable_l3.args + [self.minus_mon_npv]), + ), + # 12: + ( + self.mutable_l3, + self.linear, + SumExpression(self.mutable_l3.args + [self.minus_linear]), + ), + ( + self.mutable_l3, + self.sum, + SumExpression(self.mutable_l3.args + [self.minus_sum]), + ), + ( + self.mutable_l3, + self.other, + SumExpression(self.mutable_l3.args + [self.minus_other]), + ), + (self.mutable_l3, self.mutable_l0, self.npv), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + LinearExpression(self.mutable_l3.args + [self.minus_mon_npv]), + ), + ( + self.mutable_l3, + self.mutable_l2, + SumExpression(self.mutable_l3.args + [self.minus_mutable_l2]), + ), + (self.mutable_l3, self.param0, self.npv), + ( + self.mutable_l3, + self.param1, + NPV_SumExpression(self.mutable_l3.args + [-1]), + ), + # 20: + # Note that because the mutable is resolved to a NPV_Sum in + # the negation, the 1-term summation for the first arg is + # not resolved to a bare term + ( + self.mutable_l3, + self.mutable_l3, + NPV_SumExpression( + [NPV_SumExpression(self.mutable_l3.args), self.minus_npv] + ), + ), + ] + self._run_cases(tests, operator.sub) + # Mutable isub handled by separate tests + # self._run_cases(tests, operator.isub) + + # + # + # MULTIPLICATION + # + # + + def test_mul_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + # "invalid(str) * {0, 1, native}" are legitimate Python + # operations and should never hit the Pyomo expression + # system + (self.invalid, self.zero, self.SKIP), + (self.invalid, self.one, self.SKIP), + # 4: + (self.invalid, self.native, self.SKIP), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support multiplication + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, 0), + (self.asbinary, self.one, self.bin), + # 4: + (self.asbinary, self.native, MonomialTermExpression((5, self.bin))), + (self.asbinary, self.npv, MonomialTermExpression((self.npv, self.bin))), + (self.asbinary, self.param, MonomialTermExpression((6, self.bin))), + ( + self.asbinary, + self.param_mut, + MonomialTermExpression((self.param_mut, self.bin)), + ), + # 8: + (self.asbinary, self.var, ProductExpression((self.bin, self.var))), + ( + self.asbinary, + self.mon_native, + ProductExpression((self.bin, self.mon_native)), + ), + ( + self.asbinary, + self.mon_param, + ProductExpression((self.bin, self.mon_param)), + ), + (self.asbinary, self.mon_npv, ProductExpression((self.bin, self.mon_npv))), + # 12: + (self.asbinary, self.linear, ProductExpression((self.bin, self.linear))), + (self.asbinary, self.sum, ProductExpression((self.bin, self.sum))), + (self.asbinary, self.other, ProductExpression((self.bin, self.other))), + (self.asbinary, self.mutable_l0, 0), + # 16: + ( + self.asbinary, + self.mutable_l1, + ProductExpression((self.bin, self.mon_npv)), + ), + ( + self.asbinary, + self.mutable_l2, + ProductExpression((self.bin, self.mutable_l2)), + ), + (self.asbinary, self.param0, 0), + (self.asbinary, self.param1, self.bin), + # 20: + ( + self.asbinary, + self.mutable_l3, + MonomialTermExpression((self.npv, self.bin)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_zero(self): + tests = [ + # "Zero * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.zero, self.invalid, self.SKIP), + (self.zero, self.asbinary, 0), + (self.zero, self.zero, 0), + (self.zero, self.one, 0), + # 4: + (self.zero, self.native, 0), + (self.zero, self.npv, 0), + (self.zero, self.param, 0), + (self.zero, self.param_mut, 0), + # 8: + (self.zero, self.var, 0), + (self.zero, self.mon_native, 0), + (self.zero, self.mon_param, 0), + (self.zero, self.mon_npv, 0), + # 12: + (self.zero, self.linear, 0), + (self.zero, self.sum, 0), + (self.zero, self.other, 0), + (self.zero, self.mutable_l0, 0), + # 16: + (self.zero, self.mutable_l1, 0), + (self.zero, self.mutable_l2, 0), + (self.zero, self.param0, 0), + (self.zero, self.param1, 0), + # 20: + (self.zero, self.mutable_l3, 0), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_one(self): + tests = [ + # "One * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.one, self.invalid, self.SKIP), + (self.one, self.asbinary, self.bin), + (self.one, self.zero, 0), + (self.one, self.one, 1), + # 4: + (self.one, self.native, 5), + (self.one, self.npv, self.npv), + (self.one, self.param, self.param), + (self.one, self.param_mut, self.param_mut), + # 8: + (self.one, self.var, self.var), + (self.one, self.mon_native, self.mon_native), + (self.one, self.mon_param, self.mon_param), + (self.one, self.mon_npv, self.mon_npv), + # 12: + (self.one, self.linear, self.linear), + (self.one, self.sum, self.sum), + (self.one, self.other, self.other), + (self.one, self.mutable_l0, 0), + # 16: + (self.one, self.mutable_l1, self.mon_npv), + (self.one, self.mutable_l2, self.mutable_l2), + (self.one, self.param0, self.param0), + (self.one, self.param1, self.param1), + # 20: + (self.one, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_native(self): + tests = [ + # "Native * invalid(str) is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.native, self.invalid, self.SKIP), + (self.native, self.asbinary, MonomialTermExpression((5, self.bin))), + (self.native, self.zero, 0), + (self.native, self.one, 5), + # 4: + (self.native, self.native, 25), + (self.native, self.npv, NPV_ProductExpression((5, self.npv))), + (self.native, self.param, 30), + (self.native, self.param_mut, NPV_ProductExpression((5, self.param_mut))), + # 8: + (self.native, self.var, MonomialTermExpression((5, self.var))), + ( + self.native, + self.mon_native, + MonomialTermExpression((15, self.mon_native.arg(1))), + ), + ( + self.native, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((5, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.native, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((5, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.native, self.linear, ProductExpression((5, self.linear))), + (self.native, self.sum, ProductExpression((5, self.sum))), + (self.native, self.other, ProductExpression((5, self.other))), + (self.native, self.mutable_l0, 0), + # 16: + ( + self.native, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((5, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.native, self.mutable_l2, ProductExpression((5, self.mutable_l2))), + (self.native, self.param0, 0), + (self.native, self.param1, 5), + # 20: + (self.native, self.mutable_l3, NPV_ProductExpression((5, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, MonomialTermExpression((self.npv, self.bin))), + (self.npv, self.zero, 0), + (self.npv, self.one, self.npv), + # 4: + (self.npv, self.native, NPV_ProductExpression((self.npv, 5))), + (self.npv, self.npv, NPV_ProductExpression((self.npv, self.npv))), + (self.npv, self.param, NPV_ProductExpression((self.npv, 6))), + ( + self.npv, + self.param_mut, + NPV_ProductExpression((self.npv, self.param_mut)), + ), + # 8: + (self.npv, self.var, MonomialTermExpression((self.npv, self.var))), + ( + self.npv, + self.mon_native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_native.arg(0))), + self.mon_native.arg(1), + ) + ), + ), + ( + self.npv, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.npv, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.npv, self.linear, ProductExpression((self.npv, self.linear))), + (self.npv, self.sum, ProductExpression((self.npv, self.sum))), + (self.npv, self.other, ProductExpression((self.npv, self.other))), + (self.npv, self.mutable_l0, 0), + # 16: + ( + self.npv, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.npv, self.mutable_l2, ProductExpression((self.npv, self.mutable_l2))), + (self.npv, self.param0, 0), + (self.npv, self.param1, self.npv), + # 20: + (self.npv, self.mutable_l3, NPV_ProductExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, MonomialTermExpression((6, self.bin))), + (self.param, self.zero, 0), + (self.param, self.one, 6), + # 4: + (self.param, self.native, 30), + (self.param, self.npv, NPV_ProductExpression((6, self.npv))), + (self.param, self.param, 36), + (self.param, self.param_mut, NPV_ProductExpression((6, self.param_mut))), + # 8: + (self.param, self.var, MonomialTermExpression((6, self.var))), + ( + self.param, + self.mon_native, + MonomialTermExpression((18, self.mon_native.arg(1))), + ), + ( + self.param, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((6, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.param, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((6, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.param, self.linear, ProductExpression((6, self.linear))), + (self.param, self.sum, ProductExpression((6, self.sum))), + (self.param, self.other, ProductExpression((6, self.other))), + (self.param, self.mutable_l0, 0), + # 16: + ( + self.param, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((6, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + (self.param, self.mutable_l2, ProductExpression((6, self.mutable_l2))), + (self.param, self.param0, 0), + (self.param, self.param1, 6), + # 20: + (self.param, self.mutable_l3, NPV_ProductExpression((6, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + MonomialTermExpression((self.param_mut, self.bin)), + ), + (self.param_mut, self.zero, 0), + (self.param_mut, self.one, self.param_mut), + # 4: + (self.param_mut, self.native, NPV_ProductExpression((self.param_mut, 5))), + ( + self.param_mut, + self.npv, + NPV_ProductExpression((self.param_mut, self.npv)), + ), + (self.param_mut, self.param, NPV_ProductExpression((self.param_mut, 6))), + ( + self.param_mut, + self.param_mut, + NPV_ProductExpression((self.param_mut, self.param_mut)), + ), + # 8: + ( + self.param_mut, + self.var, + MonomialTermExpression((self.param_mut, self.var)), + ), + ( + self.param_mut, + self.mon_native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_native.arg(0))), + self.mon_native.arg(1), + ) + ), + ), + ( + self.param_mut, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.param_mut, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + ( + self.param_mut, + self.linear, + ProductExpression((self.param_mut, self.linear)), + ), + (self.param_mut, self.sum, ProductExpression((self.param_mut, self.sum))), + ( + self.param_mut, + self.other, + ProductExpression((self.param_mut, self.other)), + ), + (self.param_mut, self.mutable_l0, 0), + # 16: + ( + self.param_mut, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((self.param_mut, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.param_mut, + self.mutable_l2, + ProductExpression((self.param_mut, self.mutable_l2)), + ), + (self.param_mut, self.param0, 0), + (self.param_mut, self.param1, self.param_mut), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_ProductExpression((self.param_mut, self.npv)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, ProductExpression((self.var, self.bin))), + (self.var, self.zero, 0), + (self.var, self.one, self.var), + # 4: + (self.var, self.native, MonomialTermExpression((5, self.var))), + (self.var, self.npv, MonomialTermExpression((self.npv, self.var))), + (self.var, self.param, MonomialTermExpression((6, self.var))), + ( + self.var, + self.param_mut, + MonomialTermExpression((self.param_mut, self.var)), + ), + # 8: + (self.var, self.var, ProductExpression((self.var, self.var))), + (self.var, self.mon_native, ProductExpression((self.var, self.mon_native))), + (self.var, self.mon_param, ProductExpression((self.var, self.mon_param))), + (self.var, self.mon_npv, ProductExpression((self.var, self.mon_npv))), + # 12: + (self.var, self.linear, ProductExpression((self.var, self.linear))), + (self.var, self.sum, ProductExpression((self.var, self.sum))), + (self.var, self.other, ProductExpression((self.var, self.other))), + (self.var, self.mutable_l0, 0), + # 16: + (self.var, self.mutable_l1, ProductExpression((self.var, self.mon_npv))), + (self.var, self.mutable_l2, ProductExpression((self.var, self.mutable_l2))), + (self.var, self.param0, 0), + (self.var, self.param1, self.var), + # 20: + (self.var, self.mutable_l3, MonomialTermExpression((self.npv, self.var))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + ProductExpression((self.mon_native, self.bin)), + ), + (self.mon_native, self.zero, 0), + (self.mon_native, self.one, self.mon_native), + # 4: + ( + self.mon_native, + self.native, + MonomialTermExpression((15, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ( + self.mon_native, + self.param, + MonomialTermExpression((18, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_native.arg(0), self.param_mut)), + self.mon_native.arg(1), + ) + ), + ), + # 8: + (self.mon_native, self.var, ProductExpression((self.mon_native, self.var))), + ( + self.mon_native, + self.mon_native, + ProductExpression((self.mon_native, self.mon_native)), + ), + ( + self.mon_native, + self.mon_param, + ProductExpression((self.mon_native, self.mon_param)), + ), + ( + self.mon_native, + self.mon_npv, + ProductExpression((self.mon_native, self.mon_npv)), + ), + # 12: + ( + self.mon_native, + self.linear, + ProductExpression((self.mon_native, self.linear)), + ), + (self.mon_native, self.sum, ProductExpression((self.mon_native, self.sum))), + ( + self.mon_native, + self.other, + ProductExpression((self.mon_native, self.other)), + ), + (self.mon_native, self.mutable_l0, 0), + # 16: + ( + self.mon_native, + self.mutable_l1, + ProductExpression((self.mon_native, self.mon_npv)), + ), + ( + self.mon_native, + self.mutable_l2, + ProductExpression((self.mon_native, self.mutable_l2)), + ), + (self.mon_native, self.param0, 0), + (self.mon_native, self.param1, self.mon_native), + # 20: + ( + self.mon_native, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + ProductExpression((self.mon_param, self.bin)), + ), + (self.mon_param, self.zero, 0), + (self.mon_param, self.one, self.mon_param), + # 4: + ( + self.mon_param, + self.native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), 5)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), 6)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), self.param_mut)), + self.mon_param.arg(1), + ) + ), + ), + # 8: + (self.mon_param, self.var, ProductExpression((self.mon_param, self.var))), + ( + self.mon_param, + self.mon_native, + ProductExpression((self.mon_param, self.mon_native)), + ), + ( + self.mon_param, + self.mon_param, + ProductExpression((self.mon_param, self.mon_param)), + ), + ( + self.mon_param, + self.mon_npv, + ProductExpression((self.mon_param, self.mon_npv)), + ), + # 12: + ( + self.mon_param, + self.linear, + ProductExpression((self.mon_param, self.linear)), + ), + (self.mon_param, self.sum, ProductExpression((self.mon_param, self.sum))), + ( + self.mon_param, + self.other, + ProductExpression((self.mon_param, self.other)), + ), + (self.mon_param, self.mutable_l0, 0), + # 16: + ( + self.mon_param, + self.mutable_l1, + ProductExpression((self.mon_param, self.mon_npv)), + ), + ( + self.mon_param, + self.mutable_l2, + ProductExpression((self.mon_param, self.mutable_l2)), + ), + (self.mon_param, self.param0, 0), + (self.mon_param, self.param1, self.mon_param), + # 20: + ( + self.mon_param, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + (self.mon_npv, self.asbinary, ProductExpression((self.mon_npv, self.bin))), + (self.mon_npv, self.zero, 0), + (self.mon_npv, self.one, self.mon_npv), + # 4: + ( + self.mon_npv, + self.native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 5)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mon_npv, self.var, ProductExpression((self.mon_npv, self.var))), + ( + self.mon_npv, + self.mon_native, + ProductExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mon_npv, + self.mon_param, + ProductExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mon_npv, + self.mon_npv, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + (self.mon_npv, self.linear, ProductExpression((self.mon_npv, self.linear))), + (self.mon_npv, self.sum, ProductExpression((self.mon_npv, self.sum))), + (self.mon_npv, self.other, ProductExpression((self.mon_npv, self.other))), + (self.mon_npv, self.mutable_l0, 0), + # 16: + ( + self.mon_npv, + self.mutable_l1, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mon_npv, + self.mutable_l2, + ProductExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mon_npv, self.param0, 0), + (self.mon_npv, self.param1, self.mon_npv), + # 20: + ( + self.mon_npv, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + (self.linear, self.asbinary, ProductExpression((self.linear, self.bin))), + (self.linear, self.zero, 0), + (self.linear, self.one, self.linear), + # 4: + (self.linear, self.native, ProductExpression((self.linear, 5))), + (self.linear, self.npv, ProductExpression((self.linear, self.npv))), + (self.linear, self.param, ProductExpression((self.linear, 6))), + ( + self.linear, + self.param_mut, + ProductExpression((self.linear, self.param_mut)), + ), + # 8: + (self.linear, self.var, ProductExpression((self.linear, self.var))), + ( + self.linear, + self.mon_native, + ProductExpression((self.linear, self.mon_native)), + ), + ( + self.linear, + self.mon_param, + ProductExpression((self.linear, self.mon_param)), + ), + (self.linear, self.mon_npv, ProductExpression((self.linear, self.mon_npv))), + # 12: + (self.linear, self.linear, ProductExpression((self.linear, self.linear))), + (self.linear, self.sum, ProductExpression((self.linear, self.sum))), + (self.linear, self.other, ProductExpression((self.linear, self.other))), + (self.linear, self.mutable_l0, 0), + # 16: + ( + self.linear, + self.mutable_l1, + ProductExpression((self.linear, self.mon_npv)), + ), + ( + self.linear, + self.mutable_l2, + ProductExpression((self.linear, self.mutable_l2)), + ), + (self.linear, self.param0, 0), + (self.linear, self.param1, self.linear), + # 20: + (self.linear, self.mutable_l3, ProductExpression((self.linear, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, ProductExpression((self.sum, self.bin))), + (self.sum, self.zero, 0), + (self.sum, self.one, self.sum), + # 4: + (self.sum, self.native, ProductExpression((self.sum, 5))), + (self.sum, self.npv, ProductExpression((self.sum, self.npv))), + (self.sum, self.param, ProductExpression((self.sum, 6))), + (self.sum, self.param_mut, ProductExpression((self.sum, self.param_mut))), + # 8: + (self.sum, self.var, ProductExpression((self.sum, self.var))), + (self.sum, self.mon_native, ProductExpression((self.sum, self.mon_native))), + (self.sum, self.mon_param, ProductExpression((self.sum, self.mon_param))), + (self.sum, self.mon_npv, ProductExpression((self.sum, self.mon_npv))), + # 12: + (self.sum, self.linear, ProductExpression((self.sum, self.linear))), + (self.sum, self.sum, ProductExpression((self.sum, self.sum))), + (self.sum, self.other, ProductExpression((self.sum, self.other))), + (self.sum, self.mutable_l0, 0), + # 16: + (self.sum, self.mutable_l1, ProductExpression((self.sum, self.mon_npv))), + (self.sum, self.mutable_l2, ProductExpression((self.sum, self.mutable_l2))), + (self.sum, self.param0, 0), + (self.sum, self.param1, self.sum), + # 20: + (self.sum, self.mutable_l3, ProductExpression((self.sum, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, ProductExpression((self.other, self.bin))), + (self.other, self.zero, 0), + (self.other, self.one, self.other), + # 4: + (self.other, self.native, ProductExpression((self.other, 5))), + (self.other, self.npv, ProductExpression((self.other, self.npv))), + (self.other, self.param, ProductExpression((self.other, 6))), + ( + self.other, + self.param_mut, + ProductExpression((self.other, self.param_mut)), + ), + # 8: + (self.other, self.var, ProductExpression((self.other, self.var))), + ( + self.other, + self.mon_native, + ProductExpression((self.other, self.mon_native)), + ), + ( + self.other, + self.mon_param, + ProductExpression((self.other, self.mon_param)), + ), + (self.other, self.mon_npv, ProductExpression((self.other, self.mon_npv))), + # 12: + (self.other, self.linear, ProductExpression((self.other, self.linear))), + (self.other, self.sum, ProductExpression((self.other, self.sum))), + (self.other, self.other, ProductExpression((self.other, self.other))), + (self.other, self.mutable_l0, 0), + # 16: + ( + self.other, + self.mutable_l1, + ProductExpression((self.other, self.mon_npv)), + ), + ( + self.other, + self.mutable_l2, + ProductExpression((self.other, self.mutable_l2)), + ), + (self.other, self.param0, 0), + (self.other, self.param1, self.other), + # 20: + (self.other, self.mutable_l3, ProductExpression((self.other, self.npv))), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, 0), + (self.mutable_l0, self.zero, 0), + (self.mutable_l0, self.one, 0), + # 4: + (self.mutable_l0, self.native, 0), + (self.mutable_l0, self.npv, 0), + (self.mutable_l0, self.param, 0), + (self.mutable_l0, self.param_mut, 0), + # 8: + (self.mutable_l0, self.var, 0), + (self.mutable_l0, self.mon_native, 0), + (self.mutable_l0, self.mon_param, 0), + (self.mutable_l0, self.mon_npv, 0), + # 12: + (self.mutable_l0, self.linear, 0), + (self.mutable_l0, self.sum, 0), + (self.mutable_l0, self.other, 0), + (self.mutable_l0, self.mutable_l0, 0), + # 16: + (self.mutable_l0, self.mutable_l1, 0), + (self.mutable_l0, self.mutable_l2, 0), + (self.mutable_l0, self.param0, 0), + (self.mutable_l0, self.param1, 0), + # 20: + (self.mutable_l0, self.mutable_l3, 0), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + ProductExpression((self.mon_npv, self.bin)), + ), + (self.mutable_l1, self.zero, 0), + (self.mutable_l1, self.one, self.mon_npv), + # 4: + ( + self.mutable_l1, + self.native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 5)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param_mut, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mutable_l1, self.var, ProductExpression((self.mon_npv, self.var))), + ( + self.mutable_l1, + self.mon_native, + ProductExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mutable_l1, + self.mon_param, + ProductExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mutable_l1, + self.mon_npv, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + ( + self.mutable_l1, + self.linear, + ProductExpression((self.mon_npv, self.linear)), + ), + (self.mutable_l1, self.sum, ProductExpression((self.mon_npv, self.sum))), + ( + self.mutable_l1, + self.other, + ProductExpression((self.mon_npv, self.other)), + ), + (self.mutable_l1, self.mutable_l0, 0), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + ProductExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mutable_l1, + self.mutable_l2, + ProductExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mutable_l1, self.param0, 0), + (self.mutable_l1, self.param1, self.mon_npv), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_ProductExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + ProductExpression((self.mutable_l2, self.bin)), + ), + (self.mutable_l2, self.zero, 0), + (self.mutable_l2, self.one, self.mutable_l2), + # 4: + (self.mutable_l2, self.native, ProductExpression((self.mutable_l2, 5))), + (self.mutable_l2, self.npv, ProductExpression((self.mutable_l2, self.npv))), + (self.mutable_l2, self.param, ProductExpression((self.mutable_l2, 6))), + ( + self.mutable_l2, + self.param_mut, + ProductExpression((self.mutable_l2, self.param_mut)), + ), + # 8: + (self.mutable_l2, self.var, ProductExpression((self.mutable_l2, self.var))), + ( + self.mutable_l2, + self.mon_native, + ProductExpression((self.mutable_l2, self.mon_native)), + ), + ( + self.mutable_l2, + self.mon_param, + ProductExpression((self.mutable_l2, self.mon_param)), + ), + ( + self.mutable_l2, + self.mon_npv, + ProductExpression((self.mutable_l2, self.mon_npv)), + ), + # 12: + ( + self.mutable_l2, + self.linear, + ProductExpression((self.mutable_l2, self.linear)), + ), + (self.mutable_l2, self.sum, ProductExpression((self.mutable_l2, self.sum))), + ( + self.mutable_l2, + self.other, + ProductExpression((self.mutable_l2, self.other)), + ), + (self.mutable_l2, self.mutable_l0, 0), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + ProductExpression((self.mutable_l2, self.mon_npv)), + ), + ( + self.mutable_l2, + self.mutable_l2, + ProductExpression((self.mutable_l2, self.mutable_l2)), + ), + (self.mutable_l2, self.param0, 0), + (self.mutable_l2, self.param1, self.mutable_l2), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + ProductExpression((self.mutable_l2, self.npv)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param0(self): + tests = [ + # "Param0 * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.param0, self.invalid, self.SKIP), + (self.param0, self.asbinary, 0), + (self.param0, self.zero, 0), + (self.param0, self.one, 0), + # 4: + (self.param0, self.native, 0), + (self.param0, self.npv, 0), + (self.param0, self.param, 0), + (self.param0, self.param_mut, 0), + # 8: + (self.param0, self.var, 0), + (self.param0, self.mon_native, 0), + (self.param0, self.mon_param, 0), + (self.param0, self.mon_npv, 0), + # 12: + (self.param0, self.linear, 0), + (self.param0, self.sum, 0), + (self.param0, self.other, 0), + (self.param0, self.mutable_l0, 0), + # 16: + (self.param0, self.mutable_l1, 0), + (self.param0, self.mutable_l2, 0), + (self.param0, self.param0, 0), + (self.param0, self.param1, 0), + # 20: + (self.param0, self.mutable_l3, 0), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_param1(self): + tests = [ + # "One * invalid(str)" is a legitimate Python operation and + # should never hit the Pyomo expression system + (self.param1, self.invalid, self.SKIP), + (self.param1, self.asbinary, self.bin), + (self.param1, self.zero, 0), + (self.param1, self.one, 1), + # 4: + (self.param1, self.native, 5), + (self.param1, self.npv, self.npv), + (self.param1, self.param, self.param), + (self.param1, self.param_mut, self.param_mut), + # 8: + (self.param1, self.var, self.var), + (self.param1, self.mon_native, self.mon_native), + (self.param1, self.mon_param, self.mon_param), + (self.param1, self.mon_npv, self.mon_npv), + # 12: + (self.param1, self.linear, self.linear), + (self.param1, self.sum, self.sum), + (self.param1, self.other, self.other), + (self.param1, self.mutable_l0, 0), + # 16: + (self.param1, self.mutable_l1, self.mon_npv), + (self.param1, self.mutable_l2, self.mutable_l2), + (self.param1, self.param0, self.param0), + (self.param1, self.param1, self.param1), + # 20: + (self.param1, self.mutable_l3, self.npv), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + def test_mul_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + ( + self.mutable_l3, + self.asbinary, + MonomialTermExpression((self.npv, self.bin)), + ), + (self.mutable_l3, self.zero, 0), + (self.mutable_l3, self.one, self.npv), + # 4: + (self.mutable_l3, self.native, NPV_ProductExpression((self.npv, 5))), + (self.mutable_l3, self.npv, NPV_ProductExpression((self.npv, self.npv))), + (self.mutable_l3, self.param, NPV_ProductExpression((self.npv, 6))), + ( + self.mutable_l3, + self.param_mut, + NPV_ProductExpression((self.npv, self.param_mut)), + ), + # 8: + (self.mutable_l3, self.var, MonomialTermExpression((self.npv, self.var))), + ( + self.mutable_l3, + self.mon_native, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_native.arg(0))), + self.mon_native.arg(1), + ) + ), + ), + ( + self.mutable_l3, + self.mon_param, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_param.arg(0))), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mutable_l3, + self.mon_npv, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + # 12: + (self.mutable_l3, self.linear, ProductExpression((self.npv, self.linear))), + (self.mutable_l3, self.sum, ProductExpression((self.npv, self.sum))), + (self.mutable_l3, self.other, ProductExpression((self.npv, self.other))), + (self.mutable_l3, self.mutable_l0, 0), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + MonomialTermExpression( + ( + NPV_ProductExpression((self.npv, self.mon_npv.arg(0))), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l3, + self.mutable_l2, + ProductExpression((self.npv, self.mutable_l2)), + ), + (self.mutable_l3, self.param0, 0), + (self.mutable_l3, self.param1, self.npv), + # 20: + ( + self.mutable_l3, + self.mutable_l3, + NPV_ProductExpression((self.npv, self.npv)), + ), + ] + self._run_cases(tests, operator.mul) + self._run_cases(tests, operator.imul) + + # + # + # DIVISION + # + # + + def test_div_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support division + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, ZeroDivisionError), + (self.asbinary, self.one, self.bin), + # 4: + (self.asbinary, self.native, MonomialTermExpression((0.2, self.bin))), + ( + self.asbinary, + self.npv, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.bin) + ), + ), + (self.asbinary, self.param, MonomialTermExpression((1 / 6, self.bin))), + ( + self.asbinary, + self.param_mut, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.param_mut)), self.bin) + ), + ), + # 8: + (self.asbinary, self.var, DivisionExpression((self.bin, self.var))), + ( + self.asbinary, + self.mon_native, + DivisionExpression((self.bin, self.mon_native)), + ), + ( + self.asbinary, + self.mon_param, + DivisionExpression((self.bin, self.mon_param)), + ), + (self.asbinary, self.mon_npv, DivisionExpression((self.bin, self.mon_npv))), + # 12: + (self.asbinary, self.linear, DivisionExpression((self.bin, self.linear))), + (self.asbinary, self.sum, DivisionExpression((self.bin, self.sum))), + (self.asbinary, self.other, DivisionExpression((self.bin, self.other))), + (self.asbinary, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.asbinary, + self.mutable_l1, + DivisionExpression((self.bin, self.mon_npv)), + ), + ( + self.asbinary, + self.mutable_l2, + DivisionExpression((self.bin, self.mutable_l2)), + ), + (self.asbinary, self.param0, ZeroDivisionError), + (self.asbinary, self.param1, self.bin), + # 20: + ( + self.asbinary, + self.mutable_l3, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.bin) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, 0), + (self.zero, self.zero, ZeroDivisionError), + (self.zero, self.one, 0.0), + # 4: + (self.zero, self.native, 0.0), + (self.zero, self.npv, 0), + (self.zero, self.param, 0.0), + (self.zero, self.param_mut, 0), + # 8: + (self.zero, self.var, 0), + (self.zero, self.mon_native, 0), + (self.zero, self.mon_param, 0), + (self.zero, self.mon_npv, 0), + # 12: + (self.zero, self.linear, 0), + (self.zero, self.sum, 0), + (self.zero, self.other, 0), + (self.zero, self.mutable_l0, ZeroDivisionError), + # 16: + (self.zero, self.mutable_l1, 0), + (self.zero, self.mutable_l2, 0), + (self.zero, self.param0, ZeroDivisionError), + (self.zero, self.param1, 0.0), + # 20: + (self.zero, self.mutable_l3, 0), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, DivisionExpression((1, self.bin))), + (self.one, self.zero, ZeroDivisionError), + (self.one, self.one, 1.0), + # 4: + (self.one, self.native, 0.2), + (self.one, self.npv, NPV_DivisionExpression((1, self.npv))), + (self.one, self.param, 1 / 6), + (self.one, self.param_mut, NPV_DivisionExpression((1, self.param_mut))), + # 8: + (self.one, self.var, DivisionExpression((1, self.var))), + (self.one, self.mon_native, DivisionExpression((1, self.mon_native))), + (self.one, self.mon_param, DivisionExpression((1, self.mon_param))), + (self.one, self.mon_npv, DivisionExpression((1, self.mon_npv))), + # 12: + (self.one, self.linear, DivisionExpression((1, self.linear))), + (self.one, self.sum, DivisionExpression((1, self.sum))), + (self.one, self.other, DivisionExpression((1, self.other))), + (self.one, self.mutable_l0, ZeroDivisionError), + # 16: + (self.one, self.mutable_l1, DivisionExpression((1, self.mon_npv))), + (self.one, self.mutable_l2, DivisionExpression((1, self.mutable_l2))), + (self.one, self.param0, ZeroDivisionError), + (self.one, self.param1, 1.0), + # 20: + (self.one, self.mutable_l3, NPV_DivisionExpression((1, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, DivisionExpression((5, self.bin))), + (self.native, self.zero, ZeroDivisionError), + (self.native, self.one, 5.0), + # 4: + (self.native, self.native, 1.0), + (self.native, self.npv, NPV_DivisionExpression((5, self.npv))), + (self.native, self.param, 5 / 6), + (self.native, self.param_mut, NPV_DivisionExpression((5, self.param_mut))), + # 8: + (self.native, self.var, DivisionExpression((5, self.var))), + (self.native, self.mon_native, DivisionExpression((5, self.mon_native))), + (self.native, self.mon_param, DivisionExpression((5, self.mon_param))), + (self.native, self.mon_npv, DivisionExpression((5, self.mon_npv))), + # 12: + (self.native, self.linear, DivisionExpression((5, self.linear))), + (self.native, self.sum, DivisionExpression((5, self.sum))), + (self.native, self.other, DivisionExpression((5, self.other))), + (self.native, self.mutable_l0, ZeroDivisionError), + # 16: + (self.native, self.mutable_l1, DivisionExpression((5, self.mon_npv))), + (self.native, self.mutable_l2, DivisionExpression((5, self.mutable_l2))), + (self.native, self.param0, ZeroDivisionError), + (self.native, self.param1, 5.0), + # 20: + (self.native, self.mutable_l3, NPV_DivisionExpression((5, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, DivisionExpression((self.npv, self.bin))), + (self.npv, self.zero, ZeroDivisionError), + (self.npv, self.one, self.npv), + # 4: + (self.npv, self.native, NPV_DivisionExpression((self.npv, 5))), + (self.npv, self.npv, NPV_DivisionExpression((self.npv, self.npv))), + (self.npv, self.param, NPV_DivisionExpression((self.npv, 6))), + ( + self.npv, + self.param_mut, + NPV_DivisionExpression((self.npv, self.param_mut)), + ), + # 8: + (self.npv, self.var, DivisionExpression((self.npv, self.var))), + ( + self.npv, + self.mon_native, + DivisionExpression((self.npv, self.mon_native)), + ), + (self.npv, self.mon_param, DivisionExpression((self.npv, self.mon_param))), + (self.npv, self.mon_npv, DivisionExpression((self.npv, self.mon_npv))), + # 12: + (self.npv, self.linear, DivisionExpression((self.npv, self.linear))), + (self.npv, self.sum, DivisionExpression((self.npv, self.sum))), + (self.npv, self.other, DivisionExpression((self.npv, self.other))), + (self.npv, self.mutable_l0, ZeroDivisionError), + # 16: + (self.npv, self.mutable_l1, DivisionExpression((self.npv, self.mon_npv))), + ( + self.npv, + self.mutable_l2, + DivisionExpression((self.npv, self.mutable_l2)), + ), + (self.npv, self.param0, ZeroDivisionError), + (self.npv, self.param1, self.npv), + # 20: + (self.npv, self.mutable_l3, NPV_DivisionExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, DivisionExpression((6, self.bin))), + (self.param, self.zero, ZeroDivisionError), + (self.param, self.one, 6.0), + # 4: + (self.param, self.native, 1.2), + (self.param, self.npv, NPV_DivisionExpression((6, self.npv))), + (self.param, self.param, 1.0), + (self.param, self.param_mut, NPV_DivisionExpression((6, self.param_mut))), + # 8: + (self.param, self.var, DivisionExpression((6, self.var))), + (self.param, self.mon_native, DivisionExpression((6, self.mon_native))), + (self.param, self.mon_param, DivisionExpression((6, self.mon_param))), + (self.param, self.mon_npv, DivisionExpression((6, self.mon_npv))), + # 12: + (self.param, self.linear, DivisionExpression((6, self.linear))), + (self.param, self.sum, DivisionExpression((6, self.sum))), + (self.param, self.other, DivisionExpression((6, self.other))), + (self.param, self.mutable_l0, ZeroDivisionError), + # 16: + (self.param, self.mutable_l1, DivisionExpression((6, self.mon_npv))), + (self.param, self.mutable_l2, DivisionExpression((6, self.mutable_l2))), + (self.param, self.param0, ZeroDivisionError), + (self.param, self.param1, 6.0), + # 20: + (self.param, self.mutable_l3, NPV_DivisionExpression((6, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + ( + self.param_mut, + self.asbinary, + DivisionExpression((self.param_mut, self.bin)), + ), + (self.param_mut, self.zero, ZeroDivisionError), + (self.param_mut, self.one, self.param_mut), + # 4: + (self.param_mut, self.native, NPV_DivisionExpression((self.param_mut, 5))), + ( + self.param_mut, + self.npv, + NPV_DivisionExpression((self.param_mut, self.npv)), + ), + (self.param_mut, self.param, NPV_DivisionExpression((self.param_mut, 6))), + ( + self.param_mut, + self.param_mut, + NPV_DivisionExpression((self.param_mut, self.param_mut)), + ), + # 8: + (self.param_mut, self.var, DivisionExpression((self.param_mut, self.var))), + ( + self.param_mut, + self.mon_native, + DivisionExpression((self.param_mut, self.mon_native)), + ), + ( + self.param_mut, + self.mon_param, + DivisionExpression((self.param_mut, self.mon_param)), + ), + ( + self.param_mut, + self.mon_npv, + DivisionExpression((self.param_mut, self.mon_npv)), + ), + # 12: + ( + self.param_mut, + self.linear, + DivisionExpression((self.param_mut, self.linear)), + ), + (self.param_mut, self.sum, DivisionExpression((self.param_mut, self.sum))), + ( + self.param_mut, + self.other, + DivisionExpression((self.param_mut, self.other)), + ), + (self.param_mut, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.param_mut, + self.mutable_l1, + DivisionExpression((self.param_mut, self.mon_npv)), + ), + ( + self.param_mut, + self.mutable_l2, + DivisionExpression((self.param_mut, self.mutable_l2)), + ), + (self.param_mut, self.param0, ZeroDivisionError), + (self.param_mut, self.param1, self.param_mut), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_DivisionExpression((self.param_mut, self.npv)), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, DivisionExpression((self.var, self.bin))), + (self.var, self.zero, ZeroDivisionError), + (self.var, self.one, self.var), + # 4: + (self.var, self.native, MonomialTermExpression((0.2, self.var))), + ( + self.var, + self.npv, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.var) + ), + ), + (self.var, self.param, MonomialTermExpression((1 / 6.0, self.var))), + ( + self.var, + self.param_mut, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.param_mut)), self.var) + ), + ), + # 8: + (self.var, self.var, DivisionExpression((self.var, self.var))), + ( + self.var, + self.mon_native, + DivisionExpression((self.var, self.mon_native)), + ), + (self.var, self.mon_param, DivisionExpression((self.var, self.mon_param))), + (self.var, self.mon_npv, DivisionExpression((self.var, self.mon_npv))), + # 12: + (self.var, self.linear, DivisionExpression((self.var, self.linear))), + (self.var, self.sum, DivisionExpression((self.var, self.sum))), + (self.var, self.other, DivisionExpression((self.var, self.other))), + (self.var, self.mutable_l0, ZeroDivisionError), + # 16: + (self.var, self.mutable_l1, DivisionExpression((self.var, self.mon_npv))), + ( + self.var, + self.mutable_l2, + DivisionExpression((self.var, self.mutable_l2)), + ), + (self.var, self.param0, ZeroDivisionError), + (self.var, self.param1, self.var), + # 20: + ( + self.var, + self.mutable_l3, + MonomialTermExpression( + (NPV_DivisionExpression((1, self.npv)), self.var) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + DivisionExpression((self.mon_native, self.bin)), + ), + (self.mon_native, self.zero, ZeroDivisionError), + (self.mon_native, self.one, self.mon_native), + # 4: + ( + self.mon_native, + self.native, + MonomialTermExpression((0.6, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ( + self.mon_native, + self.param, + MonomialTermExpression((0.5, self.mon_native.arg(1))), + ), + ( + self.mon_native, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression( + (self.mon_native.arg(0), self.param_mut) + ), + self.mon_native.arg(1), + ) + ), + ), + # 8: + ( + self.mon_native, + self.var, + DivisionExpression((self.mon_native, self.var)), + ), + ( + self.mon_native, + self.mon_native, + DivisionExpression((self.mon_native, self.mon_native)), + ), + ( + self.mon_native, + self.mon_param, + DivisionExpression((self.mon_native, self.mon_param)), + ), + ( + self.mon_native, + self.mon_npv, + DivisionExpression((self.mon_native, self.mon_npv)), + ), + # 12: + ( + self.mon_native, + self.linear, + DivisionExpression((self.mon_native, self.linear)), + ), + ( + self.mon_native, + self.sum, + DivisionExpression((self.mon_native, self.sum)), + ), + ( + self.mon_native, + self.other, + DivisionExpression((self.mon_native, self.other)), + ), + (self.mon_native, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mon_native, + self.mutable_l1, + DivisionExpression((self.mon_native, self.mon_npv)), + ), + ( + self.mon_native, + self.mutable_l2, + DivisionExpression((self.mon_native, self.mutable_l2)), + ), + (self.mon_native, self.param0, ZeroDivisionError), + (self.mon_native, self.param1, self.mon_native), + # 20: + ( + self.mon_native, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_native.arg(0), self.npv)), + self.mon_native.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + ( + self.mon_param, + self.asbinary, + DivisionExpression((self.mon_param, self.bin)), + ), + (self.mon_param, self.zero, ZeroDivisionError), + (self.mon_param, self.one, self.mon_param), + # 4: + ( + self.mon_param, + self.native, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), 5)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), 6)), + self.mon_param.arg(1), + ) + ), + ), + ( + self.mon_param, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), self.param_mut)), + self.mon_param.arg(1), + ) + ), + ), + # 8: + (self.mon_param, self.var, DivisionExpression((self.mon_param, self.var))), + ( + self.mon_param, + self.mon_native, + DivisionExpression((self.mon_param, self.mon_native)), + ), + ( + self.mon_param, + self.mon_param, + DivisionExpression((self.mon_param, self.mon_param)), + ), + ( + self.mon_param, + self.mon_npv, + DivisionExpression((self.mon_param, self.mon_npv)), + ), + # 12: + ( + self.mon_param, + self.linear, + DivisionExpression((self.mon_param, self.linear)), + ), + (self.mon_param, self.sum, DivisionExpression((self.mon_param, self.sum))), + ( + self.mon_param, + self.other, + DivisionExpression((self.mon_param, self.other)), + ), + (self.mon_param, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mon_param, + self.mutable_l1, + DivisionExpression((self.mon_param, self.mon_npv)), + ), + ( + self.mon_param, + self.mutable_l2, + DivisionExpression((self.mon_param, self.mutable_l2)), + ), + (self.mon_param, self.param0, ZeroDivisionError), + (self.mon_param, self.param1, self.mon_param), + # 20: + ( + self.mon_param, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_param.arg(0), self.npv)), + self.mon_param.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + (self.mon_npv, self.asbinary, DivisionExpression((self.mon_npv, self.bin))), + (self.mon_npv, self.zero, ZeroDivisionError), + (self.mon_npv, self.one, self.mon_npv), + # 4: + ( + self.mon_npv, + self.native, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), 5)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mon_npv, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mon_npv, self.var, DivisionExpression((self.mon_npv, self.var))), + ( + self.mon_npv, + self.mon_native, + DivisionExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mon_npv, + self.mon_param, + DivisionExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mon_npv, + self.mon_npv, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + ( + self.mon_npv, + self.linear, + DivisionExpression((self.mon_npv, self.linear)), + ), + (self.mon_npv, self.sum, DivisionExpression((self.mon_npv, self.sum))), + (self.mon_npv, self.other, DivisionExpression((self.mon_npv, self.other))), + (self.mon_npv, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mon_npv, + self.mutable_l1, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mon_npv, + self.mutable_l2, + DivisionExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mon_npv, self.param0, ZeroDivisionError), + (self.mon_npv, self.param1, self.mon_npv), + # 20: + ( + self.mon_npv, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + (self.linear, self.asbinary, DivisionExpression((self.linear, self.bin))), + (self.linear, self.zero, ZeroDivisionError), + (self.linear, self.one, self.linear), + # 4: + (self.linear, self.native, DivisionExpression((self.linear, 5))), + (self.linear, self.npv, DivisionExpression((self.linear, self.npv))), + (self.linear, self.param, DivisionExpression((self.linear, 6))), + ( + self.linear, + self.param_mut, + DivisionExpression((self.linear, self.param_mut)), + ), + # 8: + (self.linear, self.var, DivisionExpression((self.linear, self.var))), + ( + self.linear, + self.mon_native, + DivisionExpression((self.linear, self.mon_native)), + ), + ( + self.linear, + self.mon_param, + DivisionExpression((self.linear, self.mon_param)), + ), + ( + self.linear, + self.mon_npv, + DivisionExpression((self.linear, self.mon_npv)), + ), + # 12: + (self.linear, self.linear, DivisionExpression((self.linear, self.linear))), + (self.linear, self.sum, DivisionExpression((self.linear, self.sum))), + (self.linear, self.other, DivisionExpression((self.linear, self.other))), + (self.linear, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.linear, + self.mutable_l1, + DivisionExpression((self.linear, self.mon_npv)), + ), + ( + self.linear, + self.mutable_l2, + DivisionExpression((self.linear, self.mutable_l2)), + ), + (self.linear, self.param0, ZeroDivisionError), + (self.linear, self.param1, self.linear), + # 20: + (self.linear, self.mutable_l3, DivisionExpression((self.linear, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, DivisionExpression((self.sum, self.bin))), + (self.sum, self.zero, ZeroDivisionError), + (self.sum, self.one, self.sum), + # 4: + (self.sum, self.native, DivisionExpression((self.sum, 5))), + (self.sum, self.npv, DivisionExpression((self.sum, self.npv))), + (self.sum, self.param, DivisionExpression((self.sum, 6))), + (self.sum, self.param_mut, DivisionExpression((self.sum, self.param_mut))), + # 8: + (self.sum, self.var, DivisionExpression((self.sum, self.var))), + ( + self.sum, + self.mon_native, + DivisionExpression((self.sum, self.mon_native)), + ), + (self.sum, self.mon_param, DivisionExpression((self.sum, self.mon_param))), + (self.sum, self.mon_npv, DivisionExpression((self.sum, self.mon_npv))), + # 12: + (self.sum, self.linear, DivisionExpression((self.sum, self.linear))), + (self.sum, self.sum, DivisionExpression((self.sum, self.sum))), + (self.sum, self.other, DivisionExpression((self.sum, self.other))), + (self.sum, self.mutable_l0, ZeroDivisionError), + # 16: + (self.sum, self.mutable_l1, DivisionExpression((self.sum, self.mon_npv))), + ( + self.sum, + self.mutable_l2, + DivisionExpression((self.sum, self.mutable_l2)), + ), + (self.sum, self.param0, ZeroDivisionError), + (self.sum, self.param1, self.sum), + # 20: + (self.sum, self.mutable_l3, DivisionExpression((self.sum, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, DivisionExpression((self.other, self.bin))), + (self.other, self.zero, ZeroDivisionError), + (self.other, self.one, self.other), + # 4: + (self.other, self.native, DivisionExpression((self.other, 5))), + (self.other, self.npv, DivisionExpression((self.other, self.npv))), + (self.other, self.param, DivisionExpression((self.other, 6))), + ( + self.other, + self.param_mut, + DivisionExpression((self.other, self.param_mut)), + ), + # 8: + (self.other, self.var, DivisionExpression((self.other, self.var))), + ( + self.other, + self.mon_native, + DivisionExpression((self.other, self.mon_native)), + ), + ( + self.other, + self.mon_param, + DivisionExpression((self.other, self.mon_param)), + ), + (self.other, self.mon_npv, DivisionExpression((self.other, self.mon_npv))), + # 12: + (self.other, self.linear, DivisionExpression((self.other, self.linear))), + (self.other, self.sum, DivisionExpression((self.other, self.sum))), + (self.other, self.other, DivisionExpression((self.other, self.other))), + (self.other, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.other, + self.mutable_l1, + DivisionExpression((self.other, self.mon_npv)), + ), + ( + self.other, + self.mutable_l2, + DivisionExpression((self.other, self.mutable_l2)), + ), + (self.other, self.param0, ZeroDivisionError), + (self.other, self.param1, self.other), + # 20: + (self.other, self.mutable_l3, DivisionExpression((self.other, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, 0), + (self.mutable_l0, self.zero, ZeroDivisionError), + (self.mutable_l0, self.one, 0.0), + # 4: + (self.mutable_l0, self.native, 0.0), + (self.mutable_l0, self.npv, 0), + (self.mutable_l0, self.param, 0.0), + (self.mutable_l0, self.param_mut, 0), + # 8: + (self.mutable_l0, self.var, 0), + (self.mutable_l0, self.mon_native, 0), + (self.mutable_l0, self.mon_param, 0), + (self.mutable_l0, self.mon_npv, 0), + # 12: + (self.mutable_l0, self.linear, 0), + (self.mutable_l0, self.sum, 0), + (self.mutable_l0, self.other, 0), + (self.mutable_l0, self.mutable_l0, ZeroDivisionError), + # 16: + (self.mutable_l0, self.mutable_l1, 0), + (self.mutable_l0, self.mutable_l2, 0), + (self.mutable_l0, self.param0, ZeroDivisionError), + (self.mutable_l0, self.param1, 0.0), + # 20: + (self.mutable_l0, self.mutable_l3, 0), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + ( + self.mutable_l1, + self.asbinary, + DivisionExpression((self.mon_npv, self.bin)), + ), + (self.mutable_l1, self.zero, ZeroDivisionError), + (self.mutable_l1, self.one, self.mon_npv), + # 4: + ( + self.mutable_l1, + self.native, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.native)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.npv, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), 6)), + self.mon_npv.arg(1), + ) + ), + ), + ( + self.mutable_l1, + self.param_mut, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.param_mut)), + self.mon_npv.arg(1), + ) + ), + ), + # 8: + (self.mutable_l1, self.var, DivisionExpression((self.mon_npv, self.var))), + ( + self.mutable_l1, + self.mon_native, + DivisionExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mutable_l1, + self.mon_param, + DivisionExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mutable_l1, + self.mon_npv, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + ( + self.mutable_l1, + self.linear, + DivisionExpression((self.mon_npv, self.linear)), + ), + (self.mutable_l1, self.sum, DivisionExpression((self.mon_npv, self.sum))), + ( + self.mutable_l1, + self.other, + DivisionExpression((self.mon_npv, self.other)), + ), + (self.mutable_l1, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + DivisionExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mutable_l1, + self.mutable_l2, + DivisionExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mutable_l1, self.param0, ZeroDivisionError), + (self.mutable_l1, self.param1, self.mon_npv), + # 20: + ( + self.mutable_l1, + self.mutable_l3, + MonomialTermExpression( + ( + NPV_DivisionExpression((self.mon_npv.arg(0), self.npv)), + self.mon_npv.arg(1), + ) + ), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + DivisionExpression((self.mutable_l2, self.bin)), + ), + (self.mutable_l2, self.zero, ZeroDivisionError), + (self.mutable_l2, self.one, self.mutable_l2), + # 4: + (self.mutable_l2, self.native, DivisionExpression((self.mutable_l2, 5))), + ( + self.mutable_l2, + self.npv, + DivisionExpression((self.mutable_l2, self.npv)), + ), + (self.mutable_l2, self.param, DivisionExpression((self.mutable_l2, 6))), + ( + self.mutable_l2, + self.param_mut, + DivisionExpression((self.mutable_l2, self.param_mut)), + ), + # 8: + ( + self.mutable_l2, + self.var, + DivisionExpression((self.mutable_l2, self.var)), + ), + ( + self.mutable_l2, + self.mon_native, + DivisionExpression((self.mutable_l2, self.mon_native)), + ), + ( + self.mutable_l2, + self.mon_param, + DivisionExpression((self.mutable_l2, self.mon_param)), + ), + ( + self.mutable_l2, + self.mon_npv, + DivisionExpression((self.mutable_l2, self.mon_npv)), + ), + # 12: + ( + self.mutable_l2, + self.linear, + DivisionExpression((self.mutable_l2, self.linear)), + ), + ( + self.mutable_l2, + self.sum, + DivisionExpression((self.mutable_l2, self.sum)), + ), + ( + self.mutable_l2, + self.other, + DivisionExpression((self.mutable_l2, self.other)), + ), + (self.mutable_l2, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + DivisionExpression((self.mutable_l2, self.mon_npv)), + ), + ( + self.mutable_l2, + self.mutable_l2, + DivisionExpression((self.mutable_l2, self.mutable_l2)), + ), + (self.mutable_l2, self.param0, ZeroDivisionError), + (self.mutable_l2, self.param1, self.mutable_l2), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + DivisionExpression((self.mutable_l2, self.npv)), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, 0), + (self.param0, self.zero, ZeroDivisionError), + (self.param0, self.one, 0.0), + # 4: + (self.param0, self.native, 0.0), + (self.param0, self.npv, 0), + (self.param0, self.param, 0.0), + (self.param0, self.param_mut, 0), + # 8: + (self.param0, self.var, 0), + (self.param0, self.mon_native, 0), + (self.param0, self.mon_param, 0), + (self.param0, self.mon_npv, 0), + # 12: + (self.param0, self.linear, 0), + (self.param0, self.sum, 0), + (self.param0, self.other, 0), + (self.param0, self.mutable_l0, ZeroDivisionError), + # 16: + (self.param0, self.mutable_l1, 0), + (self.param0, self.mutable_l2, 0), + (self.param0, self.param0, ZeroDivisionError), + (self.param0, self.param1, 0.0), + # 20: + (self.param0, self.mutable_l3, 0), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, DivisionExpression((1, self.bin))), + (self.param1, self.zero, ZeroDivisionError), + (self.param1, self.one, 1.0), + # 4: + (self.param1, self.native, 0.2), + (self.param1, self.npv, NPV_DivisionExpression((1, self.npv))), + (self.param1, self.param, 1 / 6), + (self.param1, self.param_mut, NPV_DivisionExpression((1, self.param_mut))), + # 8: + (self.param1, self.var, DivisionExpression((1, self.var))), + (self.param1, self.mon_native, DivisionExpression((1, self.mon_native))), + (self.param1, self.mon_param, DivisionExpression((1, self.mon_param))), + (self.param1, self.mon_npv, DivisionExpression((1, self.mon_npv))), + # 12: + (self.param1, self.linear, DivisionExpression((1, self.linear))), + (self.param1, self.sum, DivisionExpression((1, self.sum))), + (self.param1, self.other, DivisionExpression((1, self.other))), + (self.param1, self.mutable_l0, ZeroDivisionError), + # 16: + (self.param1, self.mutable_l1, DivisionExpression((1, self.mon_npv))), + (self.param1, self.mutable_l2, DivisionExpression((1, self.mutable_l2))), + (self.param1, self.param0, ZeroDivisionError), + (self.param1, self.param1, 1.0), + # 20: + (self.param1, self.mutable_l3, NPV_DivisionExpression((1, self.npv))), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + def test_div_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + (self.mutable_l3, self.asbinary, DivisionExpression((self.npv, self.bin))), + (self.mutable_l3, self.zero, ZeroDivisionError), + (self.mutable_l3, self.one, self.npv), + # 4: + (self.mutable_l3, self.native, NPV_DivisionExpression((self.npv, 5))), + (self.mutable_l3, self.npv, NPV_DivisionExpression((self.npv, self.npv))), + (self.mutable_l3, self.param, NPV_DivisionExpression((self.npv, 6))), + ( + self.mutable_l3, + self.param_mut, + NPV_DivisionExpression((self.npv, self.param_mut)), + ), + # 8: + (self.mutable_l3, self.var, DivisionExpression((self.npv, self.var))), + ( + self.mutable_l3, + self.mon_native, + DivisionExpression((self.npv, self.mon_native)), + ), + ( + self.mutable_l3, + self.mon_param, + DivisionExpression((self.npv, self.mon_param)), + ), + ( + self.mutable_l3, + self.mon_npv, + DivisionExpression((self.npv, self.mon_npv)), + ), + # 12: + (self.mutable_l3, self.linear, DivisionExpression((self.npv, self.linear))), + (self.mutable_l3, self.sum, DivisionExpression((self.npv, self.sum))), + (self.mutable_l3, self.other, DivisionExpression((self.npv, self.other))), + (self.mutable_l3, self.mutable_l0, ZeroDivisionError), + # 16: + ( + self.mutable_l3, + self.mutable_l1, + DivisionExpression((self.npv, self.mon_npv)), + ), + ( + self.mutable_l3, + self.mutable_l2, + DivisionExpression((self.npv, self.mutable_l2)), + ), + (self.mutable_l3, self.param0, ZeroDivisionError), + (self.mutable_l3, self.param1, self.npv), + # 20: + ( + self.mutable_l3, + self.mutable_l3, + NPV_DivisionExpression((self.npv, self.npv)), + ), + ] + self._run_cases(tests, operator.truediv) + self._run_cases(tests, operator.itruediv) + + # + # + # EXPONENTIATION + # + # + + def test_pow_invalid(self): + tests = [ + (self.invalid, self.invalid, NotImplemented), + (self.invalid, self.asbinary, NotImplemented), + (self.invalid, self.zero, NotImplemented), + (self.invalid, self.one, NotImplemented), + # 4: + (self.invalid, self.native, NotImplemented), + (self.invalid, self.npv, NotImplemented), + (self.invalid, self.param, NotImplemented), + (self.invalid, self.param_mut, NotImplemented), + # 8: + (self.invalid, self.var, NotImplemented), + (self.invalid, self.mon_native, NotImplemented), + (self.invalid, self.mon_param, NotImplemented), + (self.invalid, self.mon_npv, NotImplemented), + # 12: + (self.invalid, self.linear, NotImplemented), + (self.invalid, self.sum, NotImplemented), + (self.invalid, self.other, NotImplemented), + (self.invalid, self.mutable_l0, NotImplemented), + # 16: + (self.invalid, self.mutable_l1, NotImplemented), + (self.invalid, self.mutable_l2, NotImplemented), + (self.invalid, self.param0, NotImplemented), + (self.invalid, self.param1, NotImplemented), + # 20: + (self.invalid, self.mutable_l3, NotImplemented), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_asbinary(self): + tests = [ + (self.asbinary, self.invalid, NotImplemented), + # BooleanVar objects do not support exponentiation + (self.asbinary, self.asbinary, NotImplemented), + (self.asbinary, self.zero, 1), + (self.asbinary, self.one, self.bin), + # 4: + (self.asbinary, self.native, PowExpression((self.bin, 5))), + (self.asbinary, self.npv, PowExpression((self.bin, self.npv))), + (self.asbinary, self.param, PowExpression((self.bin, 6))), + (self.asbinary, self.param_mut, PowExpression((self.bin, self.param_mut))), + # 8: + (self.asbinary, self.var, PowExpression((self.bin, self.var))), + ( + self.asbinary, + self.mon_native, + PowExpression((self.bin, self.mon_native)), + ), + (self.asbinary, self.mon_param, PowExpression((self.bin, self.mon_param))), + (self.asbinary, self.mon_npv, PowExpression((self.bin, self.mon_npv))), + # 12: + (self.asbinary, self.linear, PowExpression((self.bin, self.linear))), + (self.asbinary, self.sum, PowExpression((self.bin, self.sum))), + (self.asbinary, self.other, PowExpression((self.bin, self.other))), + (self.asbinary, self.mutable_l0, 1), + # 16: + (self.asbinary, self.mutable_l1, PowExpression((self.bin, self.mon_npv))), + ( + self.asbinary, + self.mutable_l2, + PowExpression((self.bin, self.mutable_l2)), + ), + (self.asbinary, self.param0, 1), + (self.asbinary, self.param1, self.bin), + # 20: + (self.asbinary, self.mutable_l3, PowExpression((self.bin, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_zero(self): + tests = [ + (self.zero, self.invalid, NotImplemented), + (self.zero, self.asbinary, PowExpression((0, self.bin))), + (self.zero, self.zero, 1), + (self.zero, self.one, 0), + # 4: + (self.zero, self.native, 0), + (self.zero, self.npv, NPV_PowExpression((0, self.npv))), + (self.zero, self.param, 0), + (self.zero, self.param_mut, NPV_PowExpression((0, self.param_mut))), + # 8: + (self.zero, self.var, PowExpression((0, self.var))), + (self.zero, self.mon_native, PowExpression((0, self.mon_native))), + (self.zero, self.mon_param, PowExpression((0, self.mon_param))), + (self.zero, self.mon_npv, PowExpression((0, self.mon_npv))), + # 12: + (self.zero, self.linear, PowExpression((0, self.linear))), + (self.zero, self.sum, PowExpression((0, self.sum))), + (self.zero, self.other, PowExpression((0, self.other))), + (self.zero, self.mutable_l0, 1), + # 16: + (self.zero, self.mutable_l1, PowExpression((0, self.mon_npv))), + (self.zero, self.mutable_l2, PowExpression((0, self.mutable_l2))), + (self.zero, self.param0, 1), + (self.zero, self.param1, 0), + # 20: + (self.zero, self.mutable_l3, NPV_PowExpression((0, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_one(self): + tests = [ + (self.one, self.invalid, NotImplemented), + (self.one, self.asbinary, PowExpression((1, self.bin))), + (self.one, self.zero, 1), + (self.one, self.one, 1), + # 4: + (self.one, self.native, 1), + (self.one, self.npv, NPV_PowExpression((1, self.npv))), + (self.one, self.param, 1), + (self.one, self.param_mut, NPV_PowExpression((1, self.param_mut))), + # 8: + (self.one, self.var, PowExpression((1, self.var))), + (self.one, self.mon_native, PowExpression((1, self.mon_native))), + (self.one, self.mon_param, PowExpression((1, self.mon_param))), + (self.one, self.mon_npv, PowExpression((1, self.mon_npv))), + # 12: + (self.one, self.linear, PowExpression((1, self.linear))), + (self.one, self.sum, PowExpression((1, self.sum))), + (self.one, self.other, PowExpression((1, self.other))), + (self.one, self.mutable_l0, 1), + # 16: + (self.one, self.mutable_l1, PowExpression((1, self.mon_npv))), + (self.one, self.mutable_l2, PowExpression((1, self.mutable_l2))), + (self.one, self.param0, 1), + (self.one, self.param1, 1), + # 20: + (self.one, self.mutable_l3, NPV_PowExpression((1, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_native(self): + tests = [ + (self.native, self.invalid, NotImplemented), + (self.native, self.asbinary, PowExpression((5, self.bin))), + (self.native, self.zero, 1), + (self.native, self.one, 5), + # 4: + (self.native, self.native, 3125), + (self.native, self.npv, NPV_PowExpression((5, self.npv))), + (self.native, self.param, 15625), + (self.native, self.param_mut, NPV_PowExpression((5, self.param_mut))), + # 8: + (self.native, self.var, PowExpression((5, self.var))), + (self.native, self.mon_native, PowExpression((5, self.mon_native))), + (self.native, self.mon_param, PowExpression((5, self.mon_param))), + (self.native, self.mon_npv, PowExpression((5, self.mon_npv))), + # 12: + (self.native, self.linear, PowExpression((5, self.linear))), + (self.native, self.sum, PowExpression((5, self.sum))), + (self.native, self.other, PowExpression((5, self.other))), + (self.native, self.mutable_l0, 1), + # 16: + (self.native, self.mutable_l1, PowExpression((5, self.mon_npv))), + (self.native, self.mutable_l2, PowExpression((5, self.mutable_l2))), + (self.native, self.param0, 1), + (self.native, self.param1, 5), + # 20: + (self.native, self.mutable_l3, NPV_PowExpression((5, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_npv(self): + tests = [ + (self.npv, self.invalid, NotImplemented), + (self.npv, self.asbinary, PowExpression((self.npv, self.bin))), + (self.npv, self.zero, 1), + (self.npv, self.one, self.npv), + # 4: + (self.npv, self.native, NPV_PowExpression((self.npv, 5))), + (self.npv, self.npv, NPV_PowExpression((self.npv, self.npv))), + (self.npv, self.param, NPV_PowExpression((self.npv, 6))), + (self.npv, self.param_mut, NPV_PowExpression((self.npv, self.param_mut))), + # 8: + (self.npv, self.var, PowExpression((self.npv, self.var))), + (self.npv, self.mon_native, PowExpression((self.npv, self.mon_native))), + (self.npv, self.mon_param, PowExpression((self.npv, self.mon_param))), + (self.npv, self.mon_npv, PowExpression((self.npv, self.mon_npv))), + # 12: + (self.npv, self.linear, PowExpression((self.npv, self.linear))), + (self.npv, self.sum, PowExpression((self.npv, self.sum))), + (self.npv, self.other, PowExpression((self.npv, self.other))), + (self.npv, self.mutable_l0, 1), + # 16: + (self.npv, self.mutable_l1, PowExpression((self.npv, self.mon_npv))), + (self.npv, self.mutable_l2, PowExpression((self.npv, self.mutable_l2))), + (self.npv, self.param0, 1), + (self.npv, self.param1, self.npv), + # 20: + (self.npv, self.mutable_l3, NPV_PowExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param(self): + tests = [ + (self.param, self.invalid, NotImplemented), + (self.param, self.asbinary, PowExpression((6, self.bin))), + (self.param, self.zero, 1), + (self.param, self.one, 6), + # 4: + (self.param, self.native, 7776), + (self.param, self.npv, NPV_PowExpression((6, self.npv))), + (self.param, self.param, 46656), + (self.param, self.param_mut, NPV_PowExpression((6, self.param_mut))), + # 8: + (self.param, self.var, PowExpression((6, self.var))), + (self.param, self.mon_native, PowExpression((6, self.mon_native))), + (self.param, self.mon_param, PowExpression((6, self.mon_param))), + (self.param, self.mon_npv, PowExpression((6, self.mon_npv))), + # 12: + (self.param, self.linear, PowExpression((6, self.linear))), + (self.param, self.sum, PowExpression((6, self.sum))), + (self.param, self.other, PowExpression((6, self.other))), + (self.param, self.mutable_l0, 1), + # 16: + (self.param, self.mutable_l1, PowExpression((6, self.mon_npv))), + (self.param, self.mutable_l2, PowExpression((6, self.mutable_l2))), + (self.param, self.param0, 1), + (self.param, self.param1, 6), + # 20: + (self.param, self.mutable_l3, NPV_PowExpression((6, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param_mut(self): + tests = [ + (self.param_mut, self.invalid, NotImplemented), + (self.param_mut, self.asbinary, PowExpression((self.param_mut, self.bin))), + (self.param_mut, self.zero, 1), + (self.param_mut, self.one, self.param_mut), + # 4: + (self.param_mut, self.native, NPV_PowExpression((self.param_mut, 5))), + (self.param_mut, self.npv, NPV_PowExpression((self.param_mut, self.npv))), + (self.param_mut, self.param, NPV_PowExpression((self.param_mut, 6))), + ( + self.param_mut, + self.param_mut, + NPV_PowExpression((self.param_mut, self.param_mut)), + ), + # 8: + (self.param_mut, self.var, PowExpression((self.param_mut, self.var))), + ( + self.param_mut, + self.mon_native, + PowExpression((self.param_mut, self.mon_native)), + ), + ( + self.param_mut, + self.mon_param, + PowExpression((self.param_mut, self.mon_param)), + ), + ( + self.param_mut, + self.mon_npv, + PowExpression((self.param_mut, self.mon_npv)), + ), + # 12: + (self.param_mut, self.linear, PowExpression((self.param_mut, self.linear))), + (self.param_mut, self.sum, PowExpression((self.param_mut, self.sum))), + (self.param_mut, self.other, PowExpression((self.param_mut, self.other))), + (self.param_mut, self.mutable_l0, 1), + # 16: + ( + self.param_mut, + self.mutable_l1, + PowExpression((self.param_mut, self.mon_npv)), + ), + ( + self.param_mut, + self.mutable_l2, + PowExpression((self.param_mut, self.mutable_l2)), + ), + (self.param_mut, self.param0, 1), + (self.param_mut, self.param1, self.param_mut), + # 20: + ( + self.param_mut, + self.mutable_l3, + NPV_PowExpression((self.param_mut, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_var(self): + tests = [ + (self.var, self.invalid, NotImplemented), + (self.var, self.asbinary, PowExpression((self.var, self.bin))), + (self.var, self.zero, 1), + (self.var, self.one, self.var), + # 4: + (self.var, self.native, PowExpression((self.var, 5))), + (self.var, self.npv, PowExpression((self.var, self.npv))), + (self.var, self.param, PowExpression((self.var, 6))), + (self.var, self.param_mut, PowExpression((self.var, self.param_mut))), + # 8: + (self.var, self.var, PowExpression((self.var, self.var))), + (self.var, self.mon_native, PowExpression((self.var, self.mon_native))), + (self.var, self.mon_param, PowExpression((self.var, self.mon_param))), + (self.var, self.mon_npv, PowExpression((self.var, self.mon_npv))), + # 12: + (self.var, self.linear, PowExpression((self.var, self.linear))), + (self.var, self.sum, PowExpression((self.var, self.sum))), + (self.var, self.other, PowExpression((self.var, self.other))), + (self.var, self.mutable_l0, 1), + # 16: + (self.var, self.mutable_l1, PowExpression((self.var, self.mon_npv))), + (self.var, self.mutable_l2, PowExpression((self.var, self.mutable_l2))), + (self.var, self.param0, 1), + (self.var, self.param1, self.var), + # 20: + (self.var, self.mutable_l3, PowExpression((self.var, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mon_native(self): + tests = [ + (self.mon_native, self.invalid, NotImplemented), + ( + self.mon_native, + self.asbinary, + PowExpression((self.mon_native, self.bin)), + ), + (self.mon_native, self.zero, 1), + (self.mon_native, self.one, self.mon_native), + # 4: + (self.mon_native, self.native, PowExpression((self.mon_native, 5))), + (self.mon_native, self.npv, PowExpression((self.mon_native, self.npv))), + (self.mon_native, self.param, PowExpression((self.mon_native, 6))), + ( + self.mon_native, + self.param_mut, + PowExpression((self.mon_native, self.param_mut)), + ), + # 8: + (self.mon_native, self.var, PowExpression((self.mon_native, self.var))), + ( + self.mon_native, + self.mon_native, + PowExpression((self.mon_native, self.mon_native)), + ), + ( + self.mon_native, + self.mon_param, + PowExpression((self.mon_native, self.mon_param)), + ), + ( + self.mon_native, + self.mon_npv, + PowExpression((self.mon_native, self.mon_npv)), + ), + # 12: + ( + self.mon_native, + self.linear, + PowExpression((self.mon_native, self.linear)), + ), + (self.mon_native, self.sum, PowExpression((self.mon_native, self.sum))), + (self.mon_native, self.other, PowExpression((self.mon_native, self.other))), + (self.mon_native, self.mutable_l0, 1), + # 16: + ( + self.mon_native, + self.mutable_l1, + PowExpression((self.mon_native, self.mon_npv)), + ), + ( + self.mon_native, + self.mutable_l2, + PowExpression((self.mon_native, self.mutable_l2)), + ), + (self.mon_native, self.param0, 1), + (self.mon_native, self.param1, self.mon_native), + # 20: + ( + self.mon_native, + self.mutable_l3, + PowExpression((self.mon_native, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mon_param(self): + tests = [ + (self.mon_param, self.invalid, NotImplemented), + (self.mon_param, self.asbinary, PowExpression((self.mon_param, self.bin))), + (self.mon_param, self.zero, 1), + (self.mon_param, self.one, self.mon_param), + # 4: + (self.mon_param, self.native, PowExpression((self.mon_param, 5))), + (self.mon_param, self.npv, PowExpression((self.mon_param, self.npv))), + (self.mon_param, self.param, PowExpression((self.mon_param, 6))), + ( + self.mon_param, + self.param_mut, + PowExpression((self.mon_param, self.param_mut)), + ), + # 8: + (self.mon_param, self.var, PowExpression((self.mon_param, self.var))), + ( + self.mon_param, + self.mon_native, + PowExpression((self.mon_param, self.mon_native)), + ), + ( + self.mon_param, + self.mon_param, + PowExpression((self.mon_param, self.mon_param)), + ), + ( + self.mon_param, + self.mon_npv, + PowExpression((self.mon_param, self.mon_npv)), + ), + # 12: + (self.mon_param, self.linear, PowExpression((self.mon_param, self.linear))), + (self.mon_param, self.sum, PowExpression((self.mon_param, self.sum))), + (self.mon_param, self.other, PowExpression((self.mon_param, self.other))), + (self.mon_param, self.mutable_l0, 1), + # 16: + ( + self.mon_param, + self.mutable_l1, + PowExpression((self.mon_param, self.mon_npv)), + ), + ( + self.mon_param, + self.mutable_l2, + PowExpression((self.mon_param, self.mutable_l2)), + ), + (self.mon_param, self.param0, 1), + (self.mon_param, self.param1, self.mon_param), + # 20: + ( + self.mon_param, + self.mutable_l3, + PowExpression((self.mon_param, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mon_npv(self): + tests = [ + (self.mon_npv, self.invalid, NotImplemented), + (self.mon_npv, self.asbinary, PowExpression((self.mon_npv, self.bin))), + (self.mon_npv, self.zero, 1), + (self.mon_npv, self.one, self.mon_npv), + # 4: + (self.mon_npv, self.native, PowExpression((self.mon_npv, 5))), + (self.mon_npv, self.npv, PowExpression((self.mon_npv, self.npv))), + (self.mon_npv, self.param, PowExpression((self.mon_npv, 6))), + ( + self.mon_npv, + self.param_mut, + PowExpression((self.mon_npv, self.param_mut)), + ), + # 8: + (self.mon_npv, self.var, PowExpression((self.mon_npv, self.var))), + ( + self.mon_npv, + self.mon_native, + PowExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mon_npv, + self.mon_param, + PowExpression((self.mon_npv, self.mon_param)), + ), + (self.mon_npv, self.mon_npv, PowExpression((self.mon_npv, self.mon_npv))), + # 12: + (self.mon_npv, self.linear, PowExpression((self.mon_npv, self.linear))), + (self.mon_npv, self.sum, PowExpression((self.mon_npv, self.sum))), + (self.mon_npv, self.other, PowExpression((self.mon_npv, self.other))), + (self.mon_npv, self.mutable_l0, 1), + # 16: + ( + self.mon_npv, + self.mutable_l1, + PowExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mon_npv, + self.mutable_l2, + PowExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mon_npv, self.param0, 1), + (self.mon_npv, self.param1, self.mon_npv), + # 20: + (self.mon_npv, self.mutable_l3, PowExpression((self.mon_npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_linear(self): + tests = [ + (self.linear, self.invalid, NotImplemented), + (self.linear, self.asbinary, PowExpression((self.linear, self.bin))), + (self.linear, self.zero, 1), + (self.linear, self.one, self.linear), + # 4: + (self.linear, self.native, PowExpression((self.linear, 5))), + (self.linear, self.npv, PowExpression((self.linear, self.npv))), + (self.linear, self.param, PowExpression((self.linear, 6))), + (self.linear, self.param_mut, PowExpression((self.linear, self.param_mut))), + # 8: + (self.linear, self.var, PowExpression((self.linear, self.var))), + ( + self.linear, + self.mon_native, + PowExpression((self.linear, self.mon_native)), + ), + (self.linear, self.mon_param, PowExpression((self.linear, self.mon_param))), + (self.linear, self.mon_npv, PowExpression((self.linear, self.mon_npv))), + # 12: + (self.linear, self.linear, PowExpression((self.linear, self.linear))), + (self.linear, self.sum, PowExpression((self.linear, self.sum))), + (self.linear, self.other, PowExpression((self.linear, self.other))), + (self.linear, self.mutable_l0, 1), + # 16: + (self.linear, self.mutable_l1, PowExpression((self.linear, self.mon_npv))), + ( + self.linear, + self.mutable_l2, + PowExpression((self.linear, self.mutable_l2)), + ), + (self.linear, self.param0, 1), + (self.linear, self.param1, self.linear), + # 20: + (self.linear, self.mutable_l3, PowExpression((self.linear, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_sum(self): + tests = [ + (self.sum, self.invalid, NotImplemented), + (self.sum, self.asbinary, PowExpression((self.sum, self.bin))), + (self.sum, self.zero, 1), + (self.sum, self.one, self.sum), + # 4: + (self.sum, self.native, PowExpression((self.sum, 5))), + (self.sum, self.npv, PowExpression((self.sum, self.npv))), + (self.sum, self.param, PowExpression((self.sum, 6))), + (self.sum, self.param_mut, PowExpression((self.sum, self.param_mut))), + # 8: + (self.sum, self.var, PowExpression((self.sum, self.var))), + (self.sum, self.mon_native, PowExpression((self.sum, self.mon_native))), + (self.sum, self.mon_param, PowExpression((self.sum, self.mon_param))), + (self.sum, self.mon_npv, PowExpression((self.sum, self.mon_npv))), + # 12: + (self.sum, self.linear, PowExpression((self.sum, self.linear))), + (self.sum, self.sum, PowExpression((self.sum, self.sum))), + (self.sum, self.other, PowExpression((self.sum, self.other))), + (self.sum, self.mutable_l0, 1), + # 16: + (self.sum, self.mutable_l1, PowExpression((self.sum, self.mon_npv))), + (self.sum, self.mutable_l2, PowExpression((self.sum, self.mutable_l2))), + (self.sum, self.param0, 1), + (self.sum, self.param1, self.sum), + # 20: + (self.sum, self.mutable_l3, PowExpression((self.sum, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_other(self): + tests = [ + (self.other, self.invalid, NotImplemented), + (self.other, self.asbinary, PowExpression((self.other, self.bin))), + (self.other, self.zero, 1), + (self.other, self.one, self.other), + # 4: + (self.other, self.native, PowExpression((self.other, 5))), + (self.other, self.npv, PowExpression((self.other, self.npv))), + (self.other, self.param, PowExpression((self.other, 6))), + (self.other, self.param_mut, PowExpression((self.other, self.param_mut))), + # 8: + (self.other, self.var, PowExpression((self.other, self.var))), + (self.other, self.mon_native, PowExpression((self.other, self.mon_native))), + (self.other, self.mon_param, PowExpression((self.other, self.mon_param))), + (self.other, self.mon_npv, PowExpression((self.other, self.mon_npv))), + # 12: + (self.other, self.linear, PowExpression((self.other, self.linear))), + (self.other, self.sum, PowExpression((self.other, self.sum))), + (self.other, self.other, PowExpression((self.other, self.other))), + (self.other, self.mutable_l0, 1), + # 16: + (self.other, self.mutable_l1, PowExpression((self.other, self.mon_npv))), + (self.other, self.mutable_l2, PowExpression((self.other, self.mutable_l2))), + (self.other, self.param0, 1), + (self.other, self.param1, self.other), + # 20: + (self.other, self.mutable_l3, PowExpression((self.other, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l0(self): + tests = [ + (self.mutable_l0, self.invalid, NotImplemented), + (self.mutable_l0, self.asbinary, PowExpression((0, self.bin))), + (self.mutable_l0, self.zero, 1), + (self.mutable_l0, self.one, 0), + # 4: + (self.mutable_l0, self.native, 0), + (self.mutable_l0, self.npv, NPV_PowExpression((0, self.npv))), + (self.mutable_l0, self.param, 0), + (self.mutable_l0, self.param_mut, NPV_PowExpression((0, self.param_mut))), + # 8: + (self.mutable_l0, self.var, PowExpression((0, self.var))), + (self.mutable_l0, self.mon_native, PowExpression((0, self.mon_native))), + (self.mutable_l0, self.mon_param, PowExpression((0, self.mon_param))), + (self.mutable_l0, self.mon_npv, PowExpression((0, self.mon_npv))), + # 12: + (self.mutable_l0, self.linear, PowExpression((0, self.linear))), + (self.mutable_l0, self.sum, PowExpression((0, self.sum))), + (self.mutable_l0, self.other, PowExpression((0, self.other))), + (self.mutable_l0, self.mutable_l0, 1), + # 16: + (self.mutable_l0, self.mutable_l1, PowExpression((0, self.mon_npv))), + (self.mutable_l0, self.mutable_l2, PowExpression((0, self.mutable_l2))), + (self.mutable_l0, self.param0, 1), + (self.mutable_l0, self.param1, 0), + # 20: + (self.mutable_l0, self.mutable_l3, NPV_PowExpression((0, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l1(self): + tests = [ + (self.mutable_l1, self.invalid, NotImplemented), + (self.mutable_l1, self.asbinary, PowExpression((self.mon_npv, self.bin))), + (self.mutable_l1, self.zero, 1), + (self.mutable_l1, self.one, self.mon_npv), + # 4: + (self.mutable_l1, self.native, PowExpression((self.mon_npv, 5))), + (self.mutable_l1, self.npv, PowExpression((self.mon_npv, self.npv))), + (self.mutable_l1, self.param, PowExpression((self.mon_npv, 6))), + ( + self.mutable_l1, + self.param_mut, + PowExpression((self.mon_npv, self.param_mut)), + ), + # 8: + (self.mutable_l1, self.var, PowExpression((self.mon_npv, self.var))), + ( + self.mutable_l1, + self.mon_native, + PowExpression((self.mon_npv, self.mon_native)), + ), + ( + self.mutable_l1, + self.mon_param, + PowExpression((self.mon_npv, self.mon_param)), + ), + ( + self.mutable_l1, + self.mon_npv, + PowExpression((self.mon_npv, self.mon_npv)), + ), + # 12: + (self.mutable_l1, self.linear, PowExpression((self.mon_npv, self.linear))), + (self.mutable_l1, self.sum, PowExpression((self.mon_npv, self.sum))), + (self.mutable_l1, self.other, PowExpression((self.mon_npv, self.other))), + (self.mutable_l1, self.mutable_l0, 1), + # 16: + ( + self.mutable_l1, + self.mutable_l1, + PowExpression((self.mon_npv, self.mon_npv)), + ), + ( + self.mutable_l1, + self.mutable_l2, + PowExpression((self.mon_npv, self.mutable_l2)), + ), + (self.mutable_l1, self.param0, 1), + (self.mutable_l1, self.param1, self.mon_npv), + # 20: + (self.mutable_l1, self.mutable_l3, PowExpression((self.mon_npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l2(self): + tests = [ + (self.mutable_l2, self.invalid, NotImplemented), + ( + self.mutable_l2, + self.asbinary, + PowExpression((self.mutable_l2, self.bin)), + ), + (self.mutable_l2, self.zero, 1), + (self.mutable_l2, self.one, self.mutable_l2), + # 4: + (self.mutable_l2, self.native, PowExpression((self.mutable_l2, 5))), + (self.mutable_l2, self.npv, PowExpression((self.mutable_l2, self.npv))), + (self.mutable_l2, self.param, PowExpression((self.mutable_l2, 6))), + ( + self.mutable_l2, + self.param_mut, + PowExpression((self.mutable_l2, self.param_mut)), + ), + # 8: + (self.mutable_l2, self.var, PowExpression((self.mutable_l2, self.var))), + ( + self.mutable_l2, + self.mon_native, + PowExpression((self.mutable_l2, self.mon_native)), + ), + ( + self.mutable_l2, + self.mon_param, + PowExpression((self.mutable_l2, self.mon_param)), + ), + ( + self.mutable_l2, + self.mon_npv, + PowExpression((self.mutable_l2, self.mon_npv)), + ), + # 12: + ( + self.mutable_l2, + self.linear, + PowExpression((self.mutable_l2, self.linear)), + ), + (self.mutable_l2, self.sum, PowExpression((self.mutable_l2, self.sum))), + (self.mutable_l2, self.other, PowExpression((self.mutable_l2, self.other))), + (self.mutable_l2, self.mutable_l0, 1), + # 16: + ( + self.mutable_l2, + self.mutable_l1, + PowExpression((self.mutable_l2, self.mon_npv)), + ), + ( + self.mutable_l2, + self.mutable_l2, + PowExpression((self.mutable_l2, self.mutable_l2)), + ), + (self.mutable_l2, self.param0, 1), + (self.mutable_l2, self.param1, self.mutable_l2), + # 20: + ( + self.mutable_l2, + self.mutable_l3, + PowExpression((self.mutable_l2, self.npv)), + ), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param0(self): + tests = [ + (self.param0, self.invalid, NotImplemented), + (self.param0, self.asbinary, PowExpression((0, self.bin))), + (self.param0, self.zero, 1), + (self.param0, self.one, 0), + # 4: + (self.param0, self.native, 0), + (self.param0, self.npv, NPV_PowExpression((0, self.npv))), + (self.param0, self.param, 0), + (self.param0, self.param_mut, NPV_PowExpression((0, self.param_mut))), + # 8: + (self.param0, self.var, PowExpression((0, self.var))), + (self.param0, self.mon_native, PowExpression((0, self.mon_native))), + (self.param0, self.mon_param, PowExpression((0, self.mon_param))), + (self.param0, self.mon_npv, PowExpression((0, self.mon_npv))), + # 12: + (self.param0, self.linear, PowExpression((0, self.linear))), + (self.param0, self.sum, PowExpression((0, self.sum))), + (self.param0, self.other, PowExpression((0, self.other))), + (self.param0, self.mutable_l0, 1), + # 16: + (self.param0, self.mutable_l1, PowExpression((0, self.mon_npv))), + (self.param0, self.mutable_l2, PowExpression((0, self.mutable_l2))), + (self.param0, self.param0, 1), + (self.param0, self.param1, 0), + # 20: + (self.param0, self.mutable_l3, NPV_PowExpression((0, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_param1(self): + tests = [ + (self.param1, self.invalid, NotImplemented), + (self.param1, self.asbinary, PowExpression((1, self.bin))), + (self.param1, self.zero, 1), + (self.param1, self.one, 1), + # 4: + (self.param1, self.native, 1), + (self.param1, self.npv, NPV_PowExpression((1, self.npv))), + (self.param1, self.param, 1), + (self.param1, self.param_mut, NPV_PowExpression((1, self.param_mut))), + # 8: + (self.param1, self.var, PowExpression((1, self.var))), + (self.param1, self.mon_native, PowExpression((1, self.mon_native))), + (self.param1, self.mon_param, PowExpression((1, self.mon_param))), + (self.param1, self.mon_npv, PowExpression((1, self.mon_npv))), + # 12: + (self.param1, self.linear, PowExpression((1, self.linear))), + (self.param1, self.sum, PowExpression((1, self.sum))), + (self.param1, self.other, PowExpression((1, self.other))), + (self.param1, self.mutable_l0, 1), + # 16: + (self.param1, self.mutable_l1, PowExpression((1, self.mon_npv))), + (self.param1, self.mutable_l2, PowExpression((1, self.mutable_l2))), + (self.param1, self.param0, 1), + (self.param1, self.param1, 1), + # 20: + (self.param1, self.mutable_l3, NPV_PowExpression((1, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + def test_pow_mutable_l3(self): + tests = [ + (self.mutable_l3, self.invalid, NotImplemented), + (self.mutable_l3, self.asbinary, PowExpression((self.npv, self.bin))), + (self.mutable_l3, self.zero, 1), + (self.mutable_l3, self.one, self.npv), + # 4: + (self.mutable_l3, self.native, NPV_PowExpression((self.npv, 5))), + (self.mutable_l3, self.npv, NPV_PowExpression((self.npv, self.npv))), + (self.mutable_l3, self.param, NPV_PowExpression((self.npv, 6))), + ( + self.mutable_l3, + self.param_mut, + NPV_PowExpression((self.npv, self.param_mut)), + ), + # 8: + (self.mutable_l3, self.var, PowExpression((self.npv, self.var))), + ( + self.mutable_l3, + self.mon_native, + PowExpression((self.npv, self.mon_native)), + ), + ( + self.mutable_l3, + self.mon_param, + PowExpression((self.npv, self.mon_param)), + ), + (self.mutable_l3, self.mon_npv, PowExpression((self.npv, self.mon_npv))), + # 12: + (self.mutable_l3, self.linear, PowExpression((self.npv, self.linear))), + (self.mutable_l3, self.sum, PowExpression((self.npv, self.sum))), + (self.mutable_l3, self.other, PowExpression((self.npv, self.other))), + (self.mutable_l3, self.mutable_l0, 1), + # 16: + (self.mutable_l3, self.mutable_l1, PowExpression((self.npv, self.mon_npv))), + ( + self.mutable_l3, + self.mutable_l2, + PowExpression((self.npv, self.mutable_l2)), + ), + (self.mutable_l3, self.param0, 1), + (self.mutable_l3, self.param1, self.npv), + # 20: + (self.mutable_l3, self.mutable_l3, NPV_PowExpression((self.npv, self.npv))), + ] + self._run_cases(tests, operator.pow) + self._run_cases(tests, operator.ipow) + + # + # + # NEGATION + # + # + + def test_neg(self): + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, MonomialTermExpression((-1, self.bin))), + (self.zero, 0), + (self.one, -1), + # 4: + (self.native, -5), + (self.npv, NPV_NegationExpression((self.npv,))), + (self.param, -6), + (self.param_mut, NPV_NegationExpression((self.param_mut,))), + # 8: + (self.var, MonomialTermExpression((-1, self.var))), + (self.mon_native, self.minus_mon_native), + (self.mon_param, self.minus_mon_param), + (self.mon_npv, self.minus_mon_npv), + # 12: + (self.linear, NegationExpression((self.linear,))), + (self.sum, NegationExpression((self.sum,))), + (self.other, NegationExpression((self.other,))), + (self.mutable_l0, 0), + # 16: + (self.mutable_l1, self.minus_mon_npv), + (self.mutable_l2, NegationExpression((self.mutable_l2,))), + (self.param0, 0), + (self.param1, -1), + # 20: + (self.mutable_l3, self.minus_npv), + ] + self._run_cases(tests, operator.neg) + + def test_neg_neg(self): + def _neg_neg(x): + return operator.neg(operator.neg(x)) + + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, MonomialTermExpression((1, self.bin))), + (self.zero, 0), + (self.one, 1), + # 4: + (self.native, 5), + (self.npv, self.npv), + (self.param, 6), + (self.param_mut, self.param_mut), + # 8: + (self.var, MonomialTermExpression((1, self.var))), + (self.mon_native, self.mon_native), + (self.mon_param, self.mon_param), + (self.mon_npv, self.mon_npv), + # 12: + (self.linear, self.linear), + (self.sum, self.sum), + (self.other, self.other), + (self.mutable_l0, 0), + # 16: + (self.mutable_l1, self.mon_npv), + (self.mutable_l2, self.mutable_l2), + (self.param0, 0), + (self.param1, 1), + # 20: + (self.mutable_l3, self.npv), + ] + self._run_cases(tests, _neg_neg) + + # + # + # ABSOLUTE VALUE + # + # + + def test_abs(self): + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, AbsExpression((self.bin,))), + (self.zero, 0), + (self.one, 1), + # 4: + (self.native, 5), + (self.npv, NPV_AbsExpression((self.npv,))), + (self.param, 6), + (self.param_mut, NPV_AbsExpression((self.param_mut,))), + # 8: + (self.var, AbsExpression((self.var,))), + (self.mon_native, AbsExpression((self.mon_native,))), + (self.mon_param, AbsExpression((self.mon_param,))), + (self.mon_npv, AbsExpression((self.mon_npv,))), + # 12: + (self.linear, AbsExpression((self.linear,))), + (self.sum, AbsExpression((self.sum,))), + (self.other, AbsExpression((self.other,))), + (self.mutable_l0, 0), + # 16: + (self.mutable_l1, AbsExpression((self.mon_npv,))), + (self.mutable_l2, AbsExpression((self.mutable_l2,))), + (self.param0, 0), + (self.param1, 1), + # 20: + (self.mutable_l3, NPV_AbsExpression((self.npv,))), + ] + self._run_cases(tests, operator.abs) + + # + # + # UNARY FUNCTION + # + # + + def test_unary(self): + SKIP_0 = {'log', 'log10', 'acosh'} + SKIP_1 = {'atanh'} + SKIP_5 = {'asin', 'acos', 'atanh'} + SKIP_6 = SKIP_5 + for op, name, fcn in [ + (EXPR.ceil, 'ceil', math.ceil), + (EXPR.floor, 'floor', math.floor), + (EXPR.exp, 'exp', math.exp), + (EXPR.log, 'log', math.log), + (EXPR.log10, 'log10', math.log10), + (EXPR.sqrt, 'sqrt', math.sqrt), + (EXPR.sin, 'sin', math.sin), + (EXPR.cos, 'cos', math.cos), + (EXPR.tan, 'tan', math.tan), + (EXPR.asin, 'asin', math.asin), + (EXPR.acos, 'acos', math.acos), + (EXPR.atan, 'atan', math.atan), + (EXPR.sinh, 'sinh', math.sinh), + (EXPR.cosh, 'cosh', math.cosh), + (EXPR.tanh, 'tanh', math.tanh), + (EXPR.asinh, 'asinh', math.asinh), + (EXPR.acosh, 'acosh', math.acosh), + (EXPR.atanh, 'atanh', math.atanh), + ]: + tests = [ + (self.invalid, NotImplemented), + (self.asbinary, UnaryFunctionExpression((self.bin,), name, fcn)), + (self.zero, ValueError if name in SKIP_0 else fcn(0)), + (self.one, ValueError if name in SKIP_1 else fcn(1)), + # 4: + (self.native, ValueError if name in SKIP_5 else fcn(5)), + (self.npv, NPV_UnaryFunctionExpression((self.npv,), name, fcn)), + (self.param, ValueError if name in SKIP_6 else fcn(6)), + ( + self.param_mut, + NPV_UnaryFunctionExpression((self.param_mut,), name, fcn), + ), + # 8: + (self.var, UnaryFunctionExpression((self.var,), name, fcn)), + ( + self.mon_native, + UnaryFunctionExpression((self.mon_native,), name, fcn), + ), + (self.mon_param, UnaryFunctionExpression((self.mon_param,), name, fcn)), + (self.mon_npv, UnaryFunctionExpression((self.mon_npv,), name, fcn)), + # 12: + (self.linear, UnaryFunctionExpression((self.linear,), name, fcn)), + (self.sum, UnaryFunctionExpression((self.sum,), name, fcn)), + (self.other, UnaryFunctionExpression((self.other,), name, fcn)), + (self.mutable_l0, ValueError if name in SKIP_0 else fcn(0)), + # 16: + (self.mutable_l1, UnaryFunctionExpression((self.mon_npv,), name, fcn)), + ( + self.mutable_l2, + UnaryFunctionExpression((self.mutable_l2,), name, fcn), + ), + (self.param0, ValueError if name in SKIP_0 else fcn(0)), + (self.param1, ValueError if name in SKIP_1 else fcn(1)), + # 20: + (self.mutable_l3, NPV_UnaryFunctionExpression((self.npv,), name, fcn)), + ] + self._run_cases(tests, op) + + # + # + # MUTABLE SUM IADD EXPRESSIONS + # + # + + def test_mutable_nvp_iadd(self): + mutable_npv = _MutableNPVSumExpression([]) + tests = [ + (mutable_npv, self.invalid, NotImplemented), + (mutable_npv, self.asbinary, _MutableLinearExpression([self.mon_bin])), + (mutable_npv, self.zero, _MutableNPVSumExpression([])), + (mutable_npv, self.one, _MutableNPVSumExpression([1])), + # 4: + (mutable_npv, self.native, _MutableNPVSumExpression([5])), + (mutable_npv, self.npv, _MutableNPVSumExpression([self.npv])), + (mutable_npv, self.param, _MutableNPVSumExpression([6])), + (mutable_npv, self.param_mut, _MutableNPVSumExpression([self.param_mut])), + # 8: + (mutable_npv, self.var, _MutableLinearExpression([self.mon_var])), + (mutable_npv, self.mon_native, _MutableLinearExpression([self.mon_native])), + (mutable_npv, self.mon_param, _MutableLinearExpression([self.mon_param])), + (mutable_npv, self.mon_npv, _MutableLinearExpression([self.mon_npv])), + # 12: + (mutable_npv, self.linear, _MutableLinearExpression(self.linear.args)), + (mutable_npv, self.sum, _MutableSumExpression(self.sum.args)), + (mutable_npv, self.other, _MutableSumExpression([self.other])), + (mutable_npv, self.mutable_l0, _MutableNPVSumExpression([])), + # 16: + ( + mutable_npv, + self.mutable_l1, + _MutableLinearExpression(self.mutable_l1.args), + ), + (mutable_npv, self.mutable_l2, _MutableSumExpression(self.mutable_l2.args)), + (mutable_npv, self.param0, _MutableNPVSumExpression([])), + (mutable_npv, self.param1, _MutableNPVSumExpression([1])), + # 20: + (mutable_npv, self.mutable_l3, _MutableNPVSumExpression([self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + mutable_npv = _MutableNPVSumExpression([10]) + tests = [ + (mutable_npv, self.invalid, NotImplemented), + (mutable_npv, self.asbinary, _MutableLinearExpression([10, self.mon_bin])), + (mutable_npv, self.zero, _MutableNPVSumExpression([10])), + (mutable_npv, self.one, _MutableNPVSumExpression([10, 1])), + # 4: + (mutable_npv, self.native, _MutableNPVSumExpression([10, 5])), + (mutable_npv, self.npv, _MutableNPVSumExpression([10, self.npv])), + (mutable_npv, self.param, _MutableNPVSumExpression([10, 6])), + ( + mutable_npv, + self.param_mut, + _MutableNPVSumExpression([10, self.param_mut]), + ), + # 8: + (mutable_npv, self.var, _MutableLinearExpression([10, self.mon_var])), + ( + mutable_npv, + self.mon_native, + _MutableLinearExpression([10, self.mon_native]), + ), + ( + mutable_npv, + self.mon_param, + _MutableLinearExpression([10, self.mon_param]), + ), + (mutable_npv, self.mon_npv, _MutableLinearExpression([10, self.mon_npv])), + # 12: + ( + mutable_npv, + self.linear, + _MutableLinearExpression([10] + self.linear.args), + ), + (mutable_npv, self.sum, _MutableSumExpression([10] + self.sum.args)), + (mutable_npv, self.other, _MutableSumExpression([10, self.other])), + (mutable_npv, self.mutable_l0, _MutableNPVSumExpression([10])), + # 16: + ( + mutable_npv, + self.mutable_l1, + _MutableLinearExpression([10] + self.mutable_l1.args), + ), + ( + mutable_npv, + self.mutable_l2, + _MutableSumExpression([10] + self.mutable_l2.args), + ), + (mutable_npv, self.param0, _MutableNPVSumExpression([10])), + (mutable_npv, self.param1, _MutableNPVSumExpression([10, 1])), + # 20: + (mutable_npv, self.mutable_l3, _MutableNPVSumExpression([10, self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + def test_mutable_lin_iadd(self): + mutable_lin = _MutableLinearExpression([]) + tests = [ + (mutable_lin, self.invalid, NotImplemented), + (mutable_lin, self.asbinary, _MutableLinearExpression([self.mon_bin])), + (mutable_lin, self.zero, _MutableLinearExpression([])), + (mutable_lin, self.one, _MutableLinearExpression([1])), + # 4: + (mutable_lin, self.native, _MutableLinearExpression([5])), + (mutable_lin, self.npv, _MutableLinearExpression([self.npv])), + (mutable_lin, self.param, _MutableLinearExpression([6])), + (mutable_lin, self.param_mut, _MutableLinearExpression([self.param_mut])), + # 8: + (mutable_lin, self.var, _MutableLinearExpression([self.mon_var])), + (mutable_lin, self.mon_native, _MutableLinearExpression([self.mon_native])), + (mutable_lin, self.mon_param, _MutableLinearExpression([self.mon_param])), + (mutable_lin, self.mon_npv, _MutableLinearExpression([self.mon_npv])), + # 12: + (mutable_lin, self.linear, _MutableLinearExpression(self.linear.args)), + (mutable_lin, self.sum, _MutableSumExpression(self.sum.args)), + (mutable_lin, self.other, _MutableSumExpression([self.other])), + (mutable_lin, self.mutable_l0, _MutableLinearExpression([])), + # 16: + ( + mutable_lin, + self.mutable_l1, + _MutableLinearExpression(self.mutable_l1.args), + ), + (mutable_lin, self.mutable_l2, _MutableSumExpression(self.mutable_l2.args)), + (mutable_lin, self.param0, _MutableLinearExpression([])), + (mutable_lin, self.param1, _MutableLinearExpression([1])), + # 20: + (mutable_lin, self.mutable_l3, _MutableLinearExpression([self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + mutable_lin = _MutableLinearExpression([self.mon_bin]) + tests = [ + (mutable_lin, self.invalid, NotImplemented), + ( + mutable_lin, + self.asbinary, + _MutableLinearExpression([self.mon_bin, self.mon_bin]), + ), + (mutable_lin, self.zero, _MutableLinearExpression([self.mon_bin])), + (mutable_lin, self.one, _MutableLinearExpression([self.mon_bin, 1])), + # 4: + (mutable_lin, self.native, _MutableLinearExpression([self.mon_bin, 5])), + (mutable_lin, self.npv, _MutableLinearExpression([self.mon_bin, self.npv])), + (mutable_lin, self.param, _MutableLinearExpression([self.mon_bin, 6])), + ( + mutable_lin, + self.param_mut, + _MutableLinearExpression([self.mon_bin, self.param_mut]), + ), + # 8: + ( + mutable_lin, + self.var, + _MutableLinearExpression([self.mon_bin, self.mon_var]), + ), + ( + mutable_lin, + self.mon_native, + _MutableLinearExpression([self.mon_bin, self.mon_native]), + ), + ( + mutable_lin, + self.mon_param, + _MutableLinearExpression([self.mon_bin, self.mon_param]), + ), + ( + mutable_lin, + self.mon_npv, + _MutableLinearExpression([self.mon_bin, self.mon_npv]), + ), + # 12: + ( + mutable_lin, + self.linear, + _MutableLinearExpression([self.mon_bin] + self.linear.args), + ), + ( + mutable_lin, + self.sum, + _MutableSumExpression([self.mon_bin] + self.sum.args), + ), + ( + mutable_lin, + self.other, + _MutableSumExpression([self.mon_bin, self.other]), + ), + (mutable_lin, self.mutable_l0, _MutableLinearExpression([self.mon_bin])), + # 16: + ( + mutable_lin, + self.mutable_l1, + _MutableLinearExpression([self.mon_bin] + self.mutable_l1.args), + ), + ( + mutable_lin, + self.mutable_l2, + _MutableSumExpression([self.mon_bin] + self.mutable_l2.args), + ), + (mutable_lin, self.param0, _MutableLinearExpression([self.mon_bin])), + (mutable_lin, self.param1, _MutableLinearExpression([self.mon_bin, 1])), + # 20: + ( + mutable_lin, + self.mutable_l3, + _MutableLinearExpression([self.mon_bin, self.npv]), + ), + ] + self._run_iadd_cases(tests, operator.iadd) + + def test_mutable_sum_iadd(self): + mutable_sum = _MutableSumExpression([]) + tests = [ + (mutable_sum, self.invalid, NotImplemented), + (mutable_sum, self.asbinary, _MutableSumExpression([self.bin])), + (mutable_sum, self.zero, _MutableSumExpression([])), + (mutable_sum, self.one, _MutableSumExpression([1])), + # 4: + (mutable_sum, self.native, _MutableSumExpression([5])), + (mutable_sum, self.npv, _MutableSumExpression([self.npv])), + (mutable_sum, self.param, _MutableSumExpression([6])), + (mutable_sum, self.param_mut, _MutableSumExpression([self.param_mut])), + # 8: + (mutable_sum, self.var, _MutableSumExpression([self.var])), + (mutable_sum, self.mon_native, _MutableSumExpression([self.mon_native])), + (mutable_sum, self.mon_param, _MutableSumExpression([self.mon_param])), + (mutable_sum, self.mon_npv, _MutableSumExpression([self.mon_npv])), + # 12: + (mutable_sum, self.linear, _MutableSumExpression([self.linear])), + (mutable_sum, self.sum, _MutableSumExpression(self.sum.args)), + (mutable_sum, self.other, _MutableSumExpression([self.other])), + (mutable_sum, self.mutable_l0, _MutableSumExpression([])), + # 16: + (mutable_sum, self.mutable_l1, _MutableSumExpression(self.mutable_l1.args)), + (mutable_sum, self.mutable_l2, _MutableSumExpression(self.mutable_l2.args)), + (mutable_sum, self.param0, _MutableSumExpression([])), + (mutable_sum, self.param1, _MutableSumExpression([1])), + # 20: + (mutable_sum, self.mutable_l3, _MutableSumExpression([self.npv])), + ] + self._run_iadd_cases(tests, operator.iadd) + + mutable_sum = _MutableSumExpression([self.other]) + tests = [ + (mutable_sum, self.invalid, NotImplemented), + (mutable_sum, self.asbinary, _MutableSumExpression([self.other, self.bin])), + (mutable_sum, self.zero, _MutableSumExpression([self.other])), + (mutable_sum, self.one, _MutableSumExpression([self.other, 1])), + # 4: + (mutable_sum, self.native, _MutableSumExpression([self.other, 5])), + (mutable_sum, self.npv, _MutableSumExpression([self.other, self.npv])), + (mutable_sum, self.param, _MutableSumExpression([self.other, 6])), + ( + mutable_sum, + self.param_mut, + _MutableSumExpression([self.other, self.param_mut]), + ), + # 8: + (mutable_sum, self.var, _MutableSumExpression([self.other, self.var])), + ( + mutable_sum, + self.mon_native, + _MutableSumExpression([self.other, self.mon_native]), + ), + ( + mutable_sum, + self.mon_param, + _MutableSumExpression([self.other, self.mon_param]), + ), + ( + mutable_sum, + self.mon_npv, + _MutableSumExpression([self.other, self.mon_npv]), + ), + # 12: + ( + mutable_sum, + self.linear, + _MutableSumExpression([self.other, self.linear]), + ), + ( + mutable_sum, + self.sum, + _MutableSumExpression([self.other] + self.sum.args), + ), + (mutable_sum, self.other, _MutableSumExpression([self.other, self.other])), + (mutable_sum, self.mutable_l0, _MutableSumExpression([self.other])), + # 16: + ( + mutable_sum, + self.mutable_l1, + _MutableSumExpression([self.other] + self.mutable_l1.args), + ), + ( + mutable_sum, + self.mutable_l2, + _MutableSumExpression([self.other] + self.mutable_l2.args), + ), + (mutable_sum, self.param0, _MutableSumExpression([self.other])), + (mutable_sum, self.param1, _MutableSumExpression([self.other, 1])), + # 20: + ( + mutable_sum, + self.mutable_l3, + _MutableSumExpression([self.other, self.npv]), + ), + ] + self._run_iadd_cases(tests, operator.iadd) diff --git a/pyomo/core/tests/unit/test_numpy_expr.py b/pyomo/core/tests/unit/test_numpy_expr.py index 931a11378fb..df20f30f9b4 100644 --- a/pyomo/core/tests/unit/test_numpy_expr.py +++ b/pyomo/core/tests/unit/test_numpy_expr.py @@ -12,20 +12,31 @@ import pyomo.common.unittest as unittest from pyomo.common.dependencies import ( - numpy as np, numpy_available, - pandas as pd, pandas_available, + numpy as np, + numpy_available, + pandas as pd, + pandas_available, ) from pyomo.environ import ( - ConcreteModel, Var, RangeSet, Param, Objective, Set, Constraint, Reals + ConcreteModel, + Var, + RangeSet, + Param, + Objective, + Set, + Constraint, + Reals, ) -from pyomo.core.expr.current import MonomialTermExpression -from pyomo.core.expr.numvalue import NumericNDArray, as_numeric +from pyomo.core.expr import MonomialTermExpression +from pyomo.core.expr.numeric_expr import NumericNDArray +from pyomo.core.expr.numvalue import as_numeric from pyomo.core.expr.compare import compare_expressions -from pyomo.core.expr.logical_expr import InequalityExpression +from pyomo.core.expr.relational_expr import InequalityExpression from pyomo.repn import generate_standard_repn + @unittest.skipUnless(numpy_available, 'numpy is not available') class TestNumPy(unittest.TestCase): def test_numpy_scalar_times_scalar_var(self): @@ -34,17 +45,17 @@ def test_numpy_scalar_times_scalar_var(self): m.x = Var() e = np.float64(5) * m.x self.assertIs(type(e), MonomialTermExpression) - self.assertTrue(compare_expressions(e, 5.0*m.x)) + self.assertTrue(compare_expressions(e, 5.0 * m.x)) e = m.x * np.float64(5) self.assertIs(type(e), MonomialTermExpression) - self.assertTrue(compare_expressions(e, 5.0*m.x)) + self.assertTrue(compare_expressions(e, 5.0 * m.x)) def test_initialize_param_from_ndarray(self): # Test issue #611 samples = 10 - c1 = .5 - c2 = .5 + c1 = 0.5 + c2 = 0.5 model = ConcreteModel() model.i = RangeSet(samples) @@ -53,7 +64,7 @@ def init_x(model, i): return np.random.rand(1) def init_y(model, i): - return c1 * (model.x[i]**2) + c2 * model.x[i] + return c1 * (model.x[i] ** 2) + c2 * model.x[i] model.x = Param(model.i, initialize=init_x) model.y = Param(model.i, initialize=init_y, domain=Reals) @@ -61,11 +72,12 @@ def init_y(model, i): model.c_2 = Var(initialize=1) model.error = Objective( # Sum squared error of quadratic fit - expr=sum(( - model.c_1 * model.x[i]**2 + model.c_2 * model.x[i] - model.y[i] - )**2 for i in model.i) + expr=sum( + (model.c_1 * model.x[i] ** 2 + model.c_2 * model.x[i] - model.y[i]) ** 2 + for i in model.i + ) ) - #model.pprint() + # model.pprint() repn = generate_standard_repn(model.error.expr, compute_values=True) self.assertIsNone(repn.nonlinear_expr) @@ -84,56 +96,80 @@ def test_create_objective_from_numpy(self): nsample = 3 nvariables = 2 X0 = np.array(range(nsample)).reshape([nsample, 1]) - model.X = 1 + np.array(range(nsample*nvariables)).reshape( - (nsample, nvariables)) + model.X = 1 + np.array(range(nsample * nvariables)).reshape( + (nsample, nvariables) + ) X = np.concatenate([X0, model.X], axis=1) model.I = RangeSet(1, nsample) model.J = RangeSet(1, nvariables) - error = np.ones((nsample, 1)) - beta = np.ones((nvariables+1, 1)) - model.Y = np.dot(X,beta) + error + beta = np.ones((nvariables + 1, 1)) + model.Y = np.dot(X, beta) + error model.beta = Var(model.J) model.beta0 = Var() def obj_fun(model): - return sum(abs(model.Y[i-1]-(model.beta0 + sum( - model.X[i-1,j-1]*model.beta[j] for j in model.J) )) - for i in model.I) - model.OBJ = Objective(rule = obj_fun) + return sum( + abs( + model.Y[i - 1] + - ( + model.beta0 + + sum(model.X[i - 1, j - 1] * model.beta[j] for j in model.J) + ) + ) + for i in model.I + ) + + model.OBJ = Objective(rule=obj_fun) def obj_fun_quad(model): - return sum((model.Y[i-1]-(model.beta0 + sum( - model.X[i-1,j-1]*model.beta[j] for j in model.J) ))**2 - for i in model.I) - model.OBJ_QUAD = Objective(rule = obj_fun_quad) + return sum( + ( + model.Y[i - 1] + - ( + model.beta0 + + sum(model.X[i - 1, j - 1] * model.beta[j] for j in model.J) + ) + ) + ** 2 + for i in model.I + ) + + model.OBJ_QUAD = Objective(rule=obj_fun_quad) self.assertEqual( str(model.OBJ.expr), "abs(4.0 - (beta[1] + 2*beta[2] + beta0)) + " "abs(9.0 - (3*beta[1] + 4*beta[2] + beta0)) + " - "abs(14.0 - (5*beta[1] + 6*beta[2] + beta0))") + "abs(14.0 - (5*beta[1] + 6*beta[2] + beta0))", + ) self.assertEqual(model.OBJ.expr.polynomial_degree(), None) self.assertEqual(model.OBJ_QUAD.expr.polynomial_degree(), 2) - @unittest.skipUnless(pandas_available, "pandas is not available") def test_param_from_pandas(self): # Test issue #68 model = ConcreteModel() model.I = Set(initialize=range(6)) - model.P0 = Param(model.I, initialize={ - 0: 400.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 240.0}) - model.P1 = Param(model.I, initialize=pd.Series( - {0: 400.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 240.0}).to_dict()) - model.P2 = Param(model.I, initialize=pd.Series( - {0: 400.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 240.0})) + model.P0 = Param( + model.I, initialize={0: 400.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 240.0} + ) + model.P1 = Param( + model.I, + initialize=pd.Series( + {0: 400.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 240.0} + ).to_dict(), + ) + model.P2 = Param( + model.I, + initialize=pd.Series({0: 400.0, 1: 0.0, 2: 0.0, 3: 0.0, 4: 0.0, 5: 240.0}), + ) - #model.pprint() + # model.pprint() self.assertEqual(list(model.P0.values()), list(model.P1.values())) self.assertEqual(list(model.P0.values()), list(model.P2.values())) @@ -141,20 +177,23 @@ def test_param_from_pandas(self): def rule(m, l): return -m.P0[l] <= m.V[l] + model.Constraint0 = Constraint(model.I, rule=rule) def rule(m, l): return -m.P1[l] <= m.V[l] + model.Constraint1 = Constraint(model.I, rule=rule) def rule(m, l): return -m.P2[l] <= m.V[l] + model.Constraint2 = Constraint(model.I, rule=rule) # TODO: support vector operations between Indexed objects - #model.Constraint0a = Constraint(model.I, rule=model.P0 <= model.V) - #model.Constraint1a = Constraint(model.I, rule=model.P1 <= model.V) - #model.Constraint2a = Constraint(model.I, rule=model.P2 <= model.V) + # model.Constraint0a = Constraint(model.I, rule=model.P0 <= model.V) + # model.Constraint1a = Constraint(model.I, rule=model.P1 <= model.V) + # model.Constraint2a = Constraint(model.I, rule=model.P2 <= model.V) @unittest.skipUnless(pandas_available, "pandas is not available") def test_param_from_pandas_series_index(self): @@ -164,12 +203,13 @@ def test_param_from_pandas_series_index(self): # Params treat Series as maps (so the Series index matters) m.I = Set(initialize=s.index) m.p1 = Param(m.I, initialize=s) - self.assertEqual(m.p1.extract_values(), {'T1':1, 'T2':3, 'T3':5}) + self.assertEqual(m.p1.extract_values(), {'T1': 1, 'T2': 3, 'T3': 5}) m.p2 = Param(s.index, initialize=s) - self.assertEqual(m.p2.extract_values(), {'T1':1, 'T2':3, 'T3':5}) + self.assertEqual(m.p2.extract_values(), {'T1': 1, 'T2': 3, 'T3': 5}) with self.assertRaisesRegex( - KeyError, "Index 'T1' is not valid for indexed component 'p3'"): - m.p3 = Param([0,1,2], initialize=s) + KeyError, "Index 'T1' is not valid for indexed component 'p3'" + ): + m.p3 = Param([0, 1, 2], initialize=s) # Sets treat Series as lists m.J = Set(initialize=s) @@ -180,23 +220,26 @@ def test_numpy_float(self): m = ConcreteModel() m.T = Set(initialize=range(3)) - m.v = Var(initialize=1, bounds=(0,None)) + m.v = Var(initialize=1, bounds=(0, None)) m.c = Var(m.T, initialize=20) h = [np.float32(1.0), 1.0, 1] def rule(m, t): return m.c[0] == h[t] * m.c[0] + m.x = Constraint(m.T, rule=rule) def rule(m, t): return m.c[0] == h[t] * m.c[0] * m.v + m.y = Constraint(m.T, rule=rule) def rule(m, t): return m.c[0] == h[t] * m.v + m.z = Constraint(m.T, rule=rule) - #m.pprint() + # m.pprint() for t in m.T: self.assertTrue(compare_expressions(m.x[0].expr, m.x[t].expr)) self.assertTrue(compare_expressions(m.y[0].expr, m.y[t].expr)) @@ -204,16 +247,20 @@ def rule(m, t): def test_indexed_constraint(self): m = ConcreteModel() - m.x = Var([0,1,2,3]) + m.x = Var([0, 1, 2, 3]) A = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) b = np.array([10, 20]) - m.c = Constraint([0,1], expr=A @ m.x <= b) - self.assertTrue(compare_expressions( - m.c[0].expr, - m.x[0] + 2*m.x[1] + 3*m.x[2] + 4*m.x[3] <= 10)) - self.assertTrue(compare_expressions( - m.c[1].expr, - 5*m.x[0] + 6*m.x[1] + 7*m.x[2] + 8*m.x[3] <= 20)) + m.c = Constraint([0, 1], expr=A @ m.x <= b) + self.assertTrue( + compare_expressions( + m.c[0].expr, m.x[0] + 2 * m.x[1] + 3 * m.x[2] + 4 * m.x[3] <= 10 + ) + ) + self.assertTrue( + compare_expressions( + m.c[1].expr, 5 * m.x[0] + 6 * m.x[1] + 7 * m.x[2] + 8 * m.x[3] <= 20 + ) + ) def test_init_param_from_ndarray(self): # Test issue #2033 @@ -221,6 +268,7 @@ def test_init_param_from_ndarray(self): m.ix_set = RangeSet(2) p_init = np.array([0, 5]) + def init_workaround(model, i): return p_init[i - 1] diff --git a/pyomo/core/tests/unit/test_numvalue.py b/pyomo/core/tests/unit/test_numvalue.py index 7ff9151db32..0f9e42f552a 100644 --- a/pyomo/core/tests/unit/test_numvalue.py +++ b/pyomo/core/tests/unit/test_numvalue.py @@ -16,35 +16,53 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - value, ConcreteModel, Param, Var, - polynomial_degree, is_constant, is_fixed, - is_potentially_variable, is_variable_type + value, + ConcreteModel, + Param, + Var, + polynomial_degree, + is_constant, + is_fixed, + is_potentially_variable, + is_variable_type, ) from pyomo.core.pyomoobject import PyomoObject from pyomo.core.expr.numvalue import ( - NumericConstant, as_numeric, is_numeric_data, - native_types, native_numeric_types, native_integer_types, - native_boolean_types, + NumericConstant, + as_numeric, + is_numeric_data, + native_types, + native_numeric_types, + native_integer_types, ) +from pyomo.common.numeric_types import _native_boolean_types try: import numpy - numpy_available=True + + numpy_available = True except: - numpy_available=False + numpy_available = False + class MyBogusType(object): def __init__(self, val=0): self.val = float(val) + class MyBogusNumericType(MyBogusType): def __add__(self, other): return MyBogusNumericType(self.val + float(other)) + def __lt__(self, other): + return self.val < float(other) -class Test_is_numeric_data(unittest.TestCase): + def __ge__(self, other): + return self.val >= float(other) + +class Test_is_numeric_data(unittest.TestCase): def test_string(self): self.assertEqual(is_numeric_data("a"), False) self.assertEqual(is_numeric_data(b"a"), False) @@ -59,7 +77,9 @@ def test_NumericValue(self): self.assertEqual(is_numeric_data(NumericConstant(1.0)), True) def test_error(self): - class A(object): pass + class A(object): + pass + val = A() self.assertEqual(False, is_numeric_data(val)) @@ -73,7 +93,6 @@ def test_unknownNumericType(self): class Test_value(unittest.TestCase): - def test_none(self): val = None self.assertEqual(val, value(val)) @@ -144,24 +163,27 @@ def test_var2(self): self.assertEqual(None, value(m.x, exception=False)) def test_error1(self): - class A(object): pass + class A(object): + pass + val = A() with self.assertRaisesRegex( - TypeError, "Cannot evaluate object with unknown type: A"): + TypeError, "Cannot evaluate object with unknown type: A" + ): value(val) def test_unknownType(self): ref = MyBogusType(42) with self.assertRaisesRegex( - TypeError, - "Cannot evaluate object with unknown type: MyBogusType"): + TypeError, "Cannot evaluate object with unknown type: MyBogusType" + ): value(ref) def test_unknownNumericType(self): ref = MyBogusNumericType(42) val = value(ref) self.assertEqual(val.val, 42.0) - #self.assertEqual(val().val, 42) + # self.assertEqual(val().val, 42) self.assertIn(MyBogusNumericType, native_numeric_types) self.assertIn(MyBogusNumericType, native_types) native_numeric_types.remove(MyBogusNumericType) @@ -169,14 +191,17 @@ def test_unknownNumericType(self): class Test_polydegree(unittest.TestCase): - def test_none(self): val = None self.assertRaises(TypeError, polynomial_degree, val) def test_bool(self): val = False - self.assertEqual(0, polynomial_degree(val)) + with self.assertRaisesRegex( + TypeError, + "Cannot evaluate the polynomial degree of a non-numeric type: bool", + ): + polynomial_degree(val) def test_float(self): val = 1.1 @@ -230,18 +255,20 @@ def test_var1(self): self.assertTrue(1, polynomial_degree(m.x)) def test_error1(self): - class A(object): pass + class A(object): + pass + val = A() with self.assertRaisesRegex( - TypeError, "Cannot assess properties of object " - "with unknown type: A"): + TypeError, "Cannot assess properties of object with unknown type: A" + ): polynomial_degree(val) def test_unknownNumericType(self): ref = MyBogusNumericType(42) val = polynomial_degree(ref) self.assertEqual(val, 0) - #self.assertEqual(val().val, 42) + # self.assertEqual(val().val, 42) self.assertIn(MyBogusNumericType, native_numeric_types) self.assertIn(MyBogusNumericType, native_types) native_numeric_types.remove(MyBogusNumericType) @@ -249,7 +276,6 @@ def test_unknownNumericType(self): class Test_is_constant(unittest.TestCase): - def test_none(self): self.assertTrue(is_constant(None)) @@ -278,11 +304,13 @@ def test_const2(self): self.assertTrue(is_constant(val)) def test_error(self): - class A(object): pass + class A(object): + pass + val = A() with self.assertRaisesRegex( - TypeError, "Cannot assess properties of object " - "with unknown type: A"): + TypeError, "Cannot assess properties of object with unknown type: A" + ): is_constant(val) def test_unknownNumericType(self): @@ -295,7 +323,6 @@ def test_unknownNumericType(self): class Test_is_fixed(unittest.TestCase): - def test_none(self): self.assertTrue(is_fixed(None)) @@ -320,11 +347,13 @@ def test_const1(self): self.assertTrue(is_fixed(val)) def test_error(self): - class A(object): pass + class A(object): + pass + val = A() with self.assertRaisesRegex( - TypeError, "Cannot assess properties of object " - "with unknown type: A"): + TypeError, "Cannot assess properties of object with unknown type: A" + ): is_fixed(val) def test_unknownNumericType(self): @@ -337,7 +366,6 @@ def test_unknownNumericType(self): class Test_is_variable_type(unittest.TestCase): - def test_none(self): self.assertFalse(is_variable_type(None)) @@ -362,7 +390,9 @@ def test_const1(self): self.assertFalse(is_variable_type(val)) def test_error(self): - class A(object): pass + class A(object): + pass + val = A() self.assertFalse(is_variable_type(val)) @@ -372,7 +402,6 @@ def test_unknownNumericType(self): class Test_is_potentially_variable(unittest.TestCase): - def test_none(self): self.assertFalse(is_potentially_variable(None)) @@ -397,7 +426,9 @@ def test_const1(self): self.assertFalse(is_potentially_variable(val)) def test_error(self): - class A(object): pass + class A(object): + pass + val = A() self.assertFalse(is_potentially_variable(val)) @@ -407,43 +438,53 @@ def test_unknownNumericType(self): class Test_as_numeric(unittest.TestCase): - def test_none(self): val = None with self.assertRaisesRegex( - TypeError, r"NoneType values \('None'\) are not allowed " - "in Pyomo numeric expressions"): + TypeError, + r"NoneType values \('None'\) are not allowed " + "in Pyomo numeric expressions", + ): as_numeric(val) def test_bool(self): - self.assertEqual(as_numeric(False), 0) - self.assertEqual(as_numeric(True), 1) + with self.assertRaisesRegex( + TypeError, + r"bool values \('False'\) are not allowed in Pyomo numeric expressions", + ): + as_numeric(False) + with self.assertRaisesRegex( + TypeError, + r"bool values \('True'\) are not allowed in Pyomo numeric expressions", + ): + as_numeric(True) def test_float(self): val = 1.1 nval = as_numeric(val) self.assertEqual(val, nval) - self.assertEqual(nval/2, 0.55) + self.assertEqual(nval / 2, 0.55) def test_int(self): val = 1 nval = as_numeric(val) self.assertEqual(1.0, nval) - #self.assertEqual(val, nval) - self.assertEqual(nval/2, 0.5) + # self.assertEqual(val, nval) + self.assertEqual(nval / 2, 0.5) def test_long(self): val = int(1e10) nval = as_numeric(val) self.assertEqual(1.0e10, nval) - #self.assertEqual(val, as_numeric(val)) - self.assertEqual(nval/2, 5.0e9) + # self.assertEqual(val, as_numeric(val)) + self.assertEqual(nval / 2, 5.0e9) def test_string(self): val = 'foo' with self.assertRaisesRegex( - TypeError, r"str values \('foo'\) are not allowed " - "in Pyomo numeric expressions"): + TypeError, + r"str values \('foo'\) are not allowed in Pyomo numeric expressions", + ): as_numeric(val) def test_const1(self): @@ -451,32 +492,42 @@ def test_const1(self): self.assertEqual(1.0, as_numeric(val)) def test_error1(self): - class A(object): pass + class A(object): + pass + val = A() with self.assertRaisesRegex( - TypeError, r"Cannot treat the value '.*' as a " - "numeric value because it has unknown type 'A'"): + TypeError, + r"Cannot treat the value '.*' as a " + "numeric value because it has unknown type 'A'", + ): as_numeric(val) def test_unknownType(self): ref = MyBogusType(42) with self.assertRaisesRegex( - TypeError, r"Cannot treat the value '.*' as a " - "numeric value because it has unknown type 'MyBogusType'"): + TypeError, + r"Cannot treat the value '.*' as a " + "numeric value because it has unknown type 'MyBogusType'", + ): as_numeric(ref) def test_non_numeric_component(self): m = ConcreteModel() - m.v = Var([1,2]) + m.v = Var([1, 2]) with self.assertRaisesRegex( - TypeError, "The 'IndexedVar' object 'v' is not a valid " - "type for Pyomo numeric expressions"): + TypeError, + "The 'IndexedVar' object 'v' is not a valid " + "type for Pyomo numeric expressions", + ): as_numeric(m.v) obj = PyomoObject() with self.assertRaisesRegex( - TypeError, "The 'PyomoObject' object '.*' is not a valid " - "type for Pyomo numeric expressions"): + TypeError, + "The 'PyomoObject' object '.*' is not a valid " + "type for Pyomo numeric expressions", + ): as_numeric(obj) def test_unknownNumericType(self): @@ -495,7 +546,7 @@ def test_numpy_basic_float_registration(self): self.skipTest("This test requires NumPy") self.assertIn(numpy.float_, native_numeric_types) self.assertNotIn(numpy.float_, native_integer_types) - self.assertIn(numpy.float_, native_boolean_types) + self.assertIn(numpy.float_, _native_boolean_types) self.assertIn(numpy.float_, native_types) def test_numpy_basic_int_registration(self): @@ -503,7 +554,7 @@ def test_numpy_basic_int_registration(self): self.skipTest("This test requires NumPy") self.assertIn(numpy.int_, native_numeric_types) self.assertIn(numpy.int_, native_integer_types) - self.assertIn(numpy.int_, native_boolean_types) + self.assertIn(numpy.int_, _native_boolean_types) self.assertIn(numpy.int_, native_types) def test_numpy_basic_bool_registration(self): @@ -511,10 +562,9 @@ def test_numpy_basic_bool_registration(self): self.skipTest("This test requires NumPy") self.assertNotIn(numpy.bool_, native_numeric_types) self.assertNotIn(numpy.bool_, native_integer_types) - self.assertIn(numpy.bool_, native_boolean_types) + self.assertIn(numpy.bool_, _native_boolean_types) self.assertIn(numpy.bool_, native_types) if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/unit/test_obj.py b/pyomo/core/tests/unit/test_obj.py index 3d3c3099a02..d73bf7d6dfd 100644 --- a/pyomo/core/tests/unit/test_obj.py +++ b/pyomo/core/tests/unit/test_obj.py @@ -17,23 +17,38 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, AbstractModel, Objective, ObjectiveList, Var, Param, Set, RangeSet, value, maximize, minimize, simple_objective_rule, simple_objectivelist_rule +from pyomo.environ import ( + ConcreteModel, + AbstractModel, + Objective, + ObjectiveList, + Var, + Param, + Set, + RangeSet, + value, + maximize, + minimize, + simple_objective_rule, + simple_objectivelist_rule, +) -class TestScalarObj(unittest.TestCase): +class TestScalarObj(unittest.TestCase): def test_singleton_get_set(self): model = ConcreteModel() model.o = Objective(expr=1) self.assertEqual(len(model.o), 1) self.assertEqual(model.o.expr, 1) model.o.expr = 2 - self.assertEqual(model.o.expr(), 2) + self.assertEqual(model.o.expr, 2) model.o.expr += 2 - self.assertEqual(model.o.expr(), 4) + self.assertEqual(model.o.expr, 4) def test_singleton_get_set_value(self): model = ConcreteModel() @@ -41,9 +56,20 @@ def test_singleton_get_set_value(self): self.assertEqual(len(model.o), 1) self.assertEqual(model.o.expr, 1) model.o.expr = 2 - self.assertEqual(model.o.expr(), 2) + self.assertEqual(model.o.expr, 2) model.o.expr += 2 - self.assertEqual(model.o.expr(), 4) + self.assertEqual(model.o.expr, 4) + + def test_scalar_invalid_expr(self): + m = ConcreteModel() + m.x = Var() + with self.assertRaisesRegex( + ValueError, + "Cannot assign InequalityExpression to 'obj': " + "ScalarObjective components only allow numeric expression " + "types.", + ): + m.obj = Objective(expr=m.x <= 0) def test_empty_singleton(self): a = Objective() @@ -122,7 +148,7 @@ def test_numeric_expr(self): def test_mutable_param_expr(self): """Test expr option with a single mutable param""" model = ConcreteModel() - model.p = Param(initialize=1.0,mutable=True) + model.p = Param(initialize=1.0, mutable=True) model.obj = Objective(expr=model.p) self.assertEqual(model.obj(), 1.0) @@ -132,7 +158,7 @@ def test_mutable_param_expr(self): def test_immutable_param_expr(self): """Test expr option a single immutable param""" model = ConcreteModel() - model.p = Param(initialize=1.0,mutable=False) + model.p = Param(initialize=1.0, mutable=False) model.obj = Objective(expr=model.p) self.assertEqual(model.obj(), 1.0) @@ -152,9 +178,9 @@ def test_var_expr(self): def test_expr1_option(self): """Test expr option""" model = ConcreteModel() - model.B = RangeSet(1,4) - model.x = Var(model.B,initialize=2) - ans=0 + model.B = RangeSet(1, 4) + model.x = Var(model.B, initialize=2) + ans = 0 for i in model.B: ans = ans + model.x[i] model.obj = Objective(expr=ans) @@ -176,12 +202,14 @@ def test_expr2_option(self): def test_rule_option(self): """Test rule option""" model = ConcreteModel() + def f(model): - ans=0 - for i in [1,2,3,4]: + ans = 0 + for i in [1, 2, 3, 4]: ans = ans + model.x[i] return ans - model.x = Var(RangeSet(1,4),initialize=2) + + model.x = Var(RangeSet(1, 4), initialize=2) model.obj = Objective(rule=f) self.assertEqual(model.obj(), 8) @@ -191,8 +219,10 @@ def f(model): def test_arguments(self): """Test that arguments notare of type ScalarSet""" model = ConcreteModel() + def rule(model): return 1 + try: model.obj = Objective(model, rule=rule) except TypeError: @@ -203,8 +233,10 @@ def rule(model): def test_sense_option(self): """Test sense option""" model = ConcreteModel() + def rule(model): return 1.0 + model.obj = Objective(sense=maximize, rule=rule) self.assertEqual(model.obj.sense, maximize) @@ -213,53 +245,61 @@ def rule(model): def test_dim(self): """Test dim method""" model = ConcreteModel() + def rule(model): return 1 + model.obj = Objective(rule=rule) - self.assertEqual(model.obj.dim(),0) + self.assertEqual(model.obj.dim(), 0) def test_keys(self): """Test keys method""" model = ConcreteModel() + def rule(model): return 1 + model.obj = Objective(rule=rule) - self.assertEqual(list(model.obj.keys()),[None]) + self.assertEqual(list(model.obj.keys()), [None]) self.assertEqual(id(model.obj), id(model.obj[None])) def test_len(self): """Test len method""" model = AbstractModel() + def rule(model): return 1.0 + model.obj = Objective(rule=rule) - self.assertEqual(len(model.obj),0) + self.assertEqual(len(model.obj), 0) inst = model.create_instance() - self.assertEqual(len(inst.obj),1) + self.assertEqual(len(inst.obj), 1) model = AbstractModel() """Test rule option""" + def f(model): - ans=0 + ans = 0 for i in model.x.keys(): ans = ans + model.x[i] return ans + model = AbstractModel() - model.x = Var(RangeSet(1,4),initialize=2) + model.x = Var(RangeSet(1, 4), initialize=2) model.obj = Objective(rule=f) - self.assertEqual(len(model.obj),0) + self.assertEqual(len(model.obj), 0) inst = model.create_instance() - self.assertEqual(len(inst.obj),1) + self.assertEqual(len(inst.obj), 1) def test_keys_empty(self): """Test keys method""" model = ConcreteModel() model.o = Objective() - self.assertEqual(list(model.o.keys()),[]) + self.assertEqual(list(model.o.keys()), []) def test_len_empty(self): """Test len method""" @@ -269,40 +309,37 @@ def test_len_empty(self): class TestArrayObj(unittest.TestCase): - def create_model(self): # # Create Model # model = ConcreteModel() - model.A = Set(initialize=[1,2]) + model.A = Set(initialize=[1, 2]) return model def test_objdata_get_set(self): model = ConcreteModel() - model.o = Objective([1], rule=lambda m,i: 1) + model.o = Objective([1], rule=lambda m, i: 1) self.assertEqual(len(model.o), 1) self.assertEqual(model.o[1].expr, 1) model.o[1].expr = 2 - self.assertEqual(model.o[1].expr(), 2) + self.assertEqual(model.o[1].expr, 2) model.o[1].expr += 2 - self.assertEqual(model.o[1].expr(), 4) + self.assertEqual(model.o[1].expr, 4) def test_objdata_get_set_value(self): model = ConcreteModel() - model.o = Objective([1], rule=lambda m,i: 1) + model.o = Objective([1], rule=lambda m, i: 1) self.assertEqual(len(model.o), 1) self.assertEqual(model.o[1].expr, 1) model.o[1].expr = 2 - self.assertEqual(model.o[1].expr(), 2) + self.assertEqual(model.o[1].expr, 2) model.o[1].expr += 2 - self.assertEqual(model.o[1].expr(), 4) + self.assertEqual(model.o[1].expr, 4) def test_objdata_get_set_sense(self): model = ConcreteModel() - model.o = Objective([1], - rule=lambda m,i: 1, - sense=maximize) + model.o = Objective([1], rule=lambda m, i: 1, sense=maximize) self.assertEqual(len(model.o), 1) self.assertEqual(model.o[1].expr, 1) self.assertEqual(model.o[1].sense, maximize) @@ -314,15 +351,17 @@ def test_objdata_get_set_sense(self): def test_rule_option1(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans - model.x = Var(model.B,initialize=2) - model.obj = Objective(model.A,rule=f) + + model.x = Var(model.B, initialize=2) + model.obj = Objective(model.A, rule=f) self.assertEqual(model.obj[1](), 8) self.assertEqual(model.obj[2](), 16) @@ -332,17 +371,19 @@ def f(model, i): def test_rule_option2(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): if i == 1: return Objective.Skip - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans + model.x = Var(model.B, initialize=2) - model.obj = Objective(model.A,rule=f) + model.obj = Objective(model.A, rule=f) self.assertEqual(model.obj[2](), 16) self.assertEqual(value(model.obj[2]), 16) @@ -350,18 +391,20 @@ def f(model, i): def test_rule_option3(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + @simple_objective_rule def f(model, i): if i == 1: return None - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans + model.x = Var(model.B, initialize=2) - model.obj = Objective(model.A,rule=f) + model.obj = Objective(model.A, rule=f) self.assertEqual(model.obj[2](), 16) self.assertEqual(value(model.obj[2]), 16) @@ -369,9 +412,11 @@ def f(model, i): def test_rule_numeric_expr(self): """Test rule option with returns a single numeric constant for the expression""" model = self.create_model() + def f(model, i): return 1.0 - model.obj = Objective(model.A,rule=f) + + model.obj = Objective(model.A, rule=f) self.assertEqual(model.obj[2](), 1.0) self.assertEqual(value(model.obj[2]), 1.0) @@ -379,11 +424,13 @@ def f(model, i): def test_rule_immutable_param_expr(self): """Test rule option that returns a single immutable param for the expression""" model = self.create_model() + def f(model, i): return model.p[i] - model.p = Param(RangeSet(1,4),initialize=1.0,mutable=False) + + model.p = Param(RangeSet(1, 4), initialize=1.0, mutable=False) model.x = Var() - model.obj = Objective(model.A,rule=f) + model.obj = Objective(model.A, rule=f) self.assertEqual(model.obj[2](), 1.0) self.assertEqual(value(model.obj[2]), 1.0) @@ -391,12 +438,14 @@ def f(model, i): def test_rule_mutable_param_expr(self): """Test rule option that returns a single mutable param for the expression""" model = self.create_model() + def f(model, i): return model.p[i] - model.r = RangeSet(1,4) - model.p = Param(model.r,initialize=1.0,mutable=True) + + model.r = RangeSet(1, 4) + model.p = Param(model.r, initialize=1.0, mutable=True) model.x = Var() - model.obj = Objective(model.A,rule=f) + model.obj = Objective(model.A, rule=f) self.assertEqual(model.obj[2](), 1.0) self.assertEqual(value(model.obj[2]), 1.0) @@ -404,11 +453,13 @@ def f(model, i): def test_rule_var_expr(self): """Test rule option that returns a single var for the expression""" model = self.create_model() + def f(model, i): return model.x[i] - model.r = RangeSet(1,4) - model.x = Var(model.r,initialize=1.0) - model.obj = Objective(model.A,rule=f) + + model.r = RangeSet(1, 4) + model.x = Var(model.r, initialize=1.0) + model.obj = Objective(model.A, rule=f) self.assertEqual(model.obj[2](), 1.0) self.assertEqual(value(model.obj[2]), 1.0) @@ -416,14 +467,9 @@ def f(model, i): def test_sense_option(self): """Test sense option""" model = self.create_model() - model.obj1 = Objective(model.A, - rule=lambda m, i: 1.0, - sense=maximize) - model.obj2 = Objective(model.A, - rule=lambda m, i: 1.0, - sense=minimize) - model.obj3 = Objective(model.A, - rule=lambda m, i: 1.0) + model.obj1 = Objective(model.A, rule=lambda m, i: 1.0, sense=maximize) + model.obj2 = Objective(model.A, rule=lambda m, i: 1.0, sense=minimize) + model.obj3 = Objective(model.A, rule=lambda m, i: 1.0) self.assertTrue(len(model.A) > 0) self.assertEqual(len(model.obj1), len(model.A)) self.assertEqual(len(model.obj2), len(model.A)) @@ -441,133 +487,140 @@ def test_dim(self): model = self.create_model() model.obj = Objective(model.A) - self.assertEqual(model.obj.dim(),1) + self.assertEqual(model.obj.dim(), 1) def test_keys(self): """Test keys method""" model = self.create_model() + def A_rule(model, i): return model.x + model.x = Var() model.obj = Objective(model.A, rule=A_rule) - self.assertEqual(len(list(model.obj.keys())),2) + self.assertEqual(len(list(model.obj.keys())), 2) def test_len(self): """Test len method""" model = self.create_model() model.obj = Objective(model.A) - self.assertEqual(len(model.obj),0) + self.assertEqual(len(model.obj), 0) model = self.create_model() """Test rule option""" + def f(model): - ans=0 + ans = 0 for i in model.x.keys(): ans = ans + model.x[i] return ans - model.x = Var(RangeSet(1,4),initialize=2) + + model.x = Var(RangeSet(1, 4), initialize=2) model.obj = Objective(rule=f) - self.assertEqual(len(model.obj),1) + self.assertEqual(len(model.obj), 1) class Test2DArrayObj(unittest.TestCase): - def create_model(self): model = ConcreteModel() - model.A = Set(initialize=[1,2]) + model.A = Set(initialize=[1, 2]) return model def test_rule_option1(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i, k): - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans + model.x = Var(model.B, initialize=2) - model.obj = Objective(model.A,model.A, rule=f) + model.obj = Objective(model.A, model.A, rule=f) try: - self.assertEqual(model.obj(),None) + self.assertEqual(model.obj(), None) self.fail("Expected TypeError") except TypeError: pass - self.assertEqual(model.obj[1,1](), 8) - self.assertEqual(model.obj[2,1](), 16) - self.assertEqual(value(model.obj[1,1]), 8) - self.assertEqual(value(model.obj[2,1]), 16) + self.assertEqual(model.obj[1, 1](), 8) + self.assertEqual(model.obj[2, 1](), 16) + self.assertEqual(value(model.obj[1, 1]), 8) + self.assertEqual(value(model.obj[2, 1]), 16) def test_sense_option(self): """Test sense option""" model = self.create_model() - model.obj1 = Objective(model.A, model.A, - rule=lambda m, i, j: 1.0, - sense=maximize) - model.obj2 = Objective(model.A, model.A, - rule=lambda m, i, j: 1.0, - sense=minimize) - model.obj3 = Objective(model.A, model.A, - rule=lambda m, i, j: 1.0) + model.obj1 = Objective( + model.A, model.A, rule=lambda m, i, j: 1.0, sense=maximize + ) + model.obj2 = Objective( + model.A, model.A, rule=lambda m, i, j: 1.0, sense=minimize + ) + model.obj3 = Objective(model.A, model.A, rule=lambda m, i, j: 1.0) self.assertTrue(len(model.A) > 0) - self.assertEqual(len(model.obj1), len(model.A)*len(model.A)) - self.assertEqual(len(model.obj2), len(model.A)*len(model.A)) - self.assertEqual(len(model.obj3), len(model.A)*len(model.A)) + self.assertEqual(len(model.obj1), len(model.A) * len(model.A)) + self.assertEqual(len(model.obj2), len(model.A) * len(model.A)) + self.assertEqual(len(model.obj3), len(model.A) * len(model.A)) for i in model.A: for j in model.A: - self.assertEqual(model.obj1[i,j].sense, maximize) - self.assertEqual(model.obj1[i,j].is_minimizing(), False) - self.assertEqual(model.obj2[i,j].sense, minimize) - self.assertEqual(model.obj2[i,j].is_minimizing(), True) - self.assertEqual(model.obj3[i,j].sense, minimize) - self.assertEqual(model.obj3[i,j].is_minimizing(), True) + self.assertEqual(model.obj1[i, j].sense, maximize) + self.assertEqual(model.obj1[i, j].is_minimizing(), False) + self.assertEqual(model.obj2[i, j].sense, minimize) + self.assertEqual(model.obj2[i, j].is_minimizing(), True) + self.assertEqual(model.obj3[i, j].sense, minimize) + self.assertEqual(model.obj3[i, j].is_minimizing(), True) def test_dim(self): """Test dim method""" model = self.create_model() - model.obj = Objective(model.A,model.A) + model.obj = Objective(model.A, model.A) - self.assertEqual(model.obj.dim(),2) + self.assertEqual(model.obj.dim(), 2) def test_keys(self): """Test keys method""" model = self.create_model() + def A_rule(model, i, j): return model.x + model.x = Var() - model.obj = Objective(model.A,model.A, rule=A_rule) + model.obj = Objective(model.A, model.A, rule=A_rule) - self.assertEqual(len(list(model.obj.keys())),4) + self.assertEqual(len(list(model.obj.keys())), 4) def test_len(self): """Test len method""" model = self.create_model() - model.obj = Objective(model.A,model.A) - self.assertEqual(len(model.obj),0) + model.obj = Objective(model.A, model.A) + self.assertEqual(len(model.obj), 0) model = self.create_model() """Test rule option""" + def f(model): - ans=0 + ans = 0 for i in model.x.keys(): ans = ans + model.x[i] return ans - model.x = Var(RangeSet(1,4),initialize=2) + + model.x = Var(RangeSet(1, 4), initialize=2) model.obj = Objective(rule=f) - self.assertEqual(len(model.obj),1) + self.assertEqual(len(model.obj), 1) class TestObjList(unittest.TestCase): - def create_model(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3,4]) + model.A = Set(initialize=[1, 2, 3, 4]) return model # @@ -591,16 +644,18 @@ def test_conlist_skip(self): def test_rule_option1(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): if i > 4: return ObjectiveList.End - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans - model.x = Var(model.B,initialize=2) + + model.x = Var(model.B, initialize=2) model.o = ObjectiveList(rule=f) self.assertEqual(model.o[1](), 8) @@ -610,16 +665,18 @@ def f(model, i): def test_rule_option2(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + def f(model, i): if i > 2: return ObjectiveList.End - i = 2*i - 1 - ans=0 + i = 2 * i - 1 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans + model.x = Var(model.B, initialize=2) model.o = ObjectiveList(rule=f) @@ -629,16 +686,18 @@ def f(model, i): def test_rule_option1a(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + @simple_objectivelist_rule def f(model, i): if i > 4: return None - ans=0 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans + model.x = Var(model.B, initialize=2) model.o = ObjectiveList(rule=f) @@ -649,18 +708,20 @@ def f(model, i): def test_rule_option2a(self): """Test rule option""" model = self.create_model() - model.B = RangeSet(1,4) + model.B = RangeSet(1, 4) + @simple_objectivelist_rule def f(model, i): if i > 2: return None - i = 2*i - 1 - ans=0 + i = 2 * i - 1 + ans = 0 for j in model.B: ans = ans + model.x[j] ans *= i return ans - model.x = Var(model.B,initialize=2) + + model.x = Var(model.B, initialize=2) model.o = ObjectiveList(rule=f) self.assertEqual(model.o[1](), 8) @@ -670,11 +731,13 @@ def test_rule_option3(self): """Test rule option""" model = self.create_model() model.y = Var(initialize=2) + def f(model): yield model.y - yield 2*model.y - yield 2*model.y + yield 2 * model.y + yield 2 * model.y yield ObjectiveList.End + model.c = ObjectiveList(rule=f) self.assertEqual(len(model.c), 3) self.assertEqual(model.c[1](), 2) @@ -686,7 +749,7 @@ def test_rule_option4(self): """Test rule option""" model = self.create_model() model.y = Var(initialize=2) - model.c = ObjectiveList(rule=((i+1)*model.y for i in range(3))) + model.c = ObjectiveList(rule=((i + 1) * model.y for i in range(3))) self.assertEqual(len(model.c), 3) self.assertEqual(model.c[1](), 2) @@ -695,25 +758,24 @@ def test_dim(self): model = self.create_model() model.o = ObjectiveList() - self.assertEqual(model.o.dim(),1) + self.assertEqual(model.o.dim(), 1) def test_keys(self): """Test keys method""" model = self.create_model() model.o = ObjectiveList() - self.assertEqual(len(list(model.o.keys())),0) + self.assertEqual(len(list(model.o.keys())), 0) def test_len(self): """Test len method""" model = self.create_model() model.o = ObjectiveList() - self.assertEqual(len(model.o),0) + self.assertEqual(len(model.o), 0) class MiscObjTests(unittest.TestCase): - def test_constructor(self): a = Objective(name="b") self.assertEqual(a.local_name, "b") @@ -726,6 +788,7 @@ def test_constructor(self): def test_rule(self): def rule1(model): return [] + model = ConcreteModel() try: model.o = Objective(rule=rule1) @@ -734,19 +797,23 @@ def rule1(model): pass # model = ConcreteModel() + def rule1(model): return 1.1 + model = ConcreteModel() model.o = Objective(rule=rule1) - self.assertEqual(model.o(),1.1) + self.assertEqual(model.o(), 1.1) # model = ConcreteModel() + def rule1(model, i): return 1.1 + model = ConcreteModel() - model.a = Set(initialize=[1,2,3]) + model.a = Set(initialize=[1, 2, 3]) try: - model.o = Objective(model.a,rule=rule1) + model.o = Objective(model.a, rule=rule1) except Exception: self.fail("Error generating objective") @@ -758,6 +825,5 @@ def test_abstract_index(self): model.x = Objective(model.C) - if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_param.py b/pyomo/core/tests/unit/test_param.py index 708fb8d7ef7..7334a8a6e29 100644 --- a/pyomo/core/tests/unit/test_param.py +++ b/pyomo/core/tests/unit/test_param.py @@ -26,29 +26,59 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - Set, RangeSet, Param, ConcreteModel, AbstractModel, Block, Constraint, Var, - NonNegativeIntegers, Integers, NonNegativeReals, Boolean, Reals, Any, - display, value, set_options, - sin, cos, tan, log, log10, exp, sqrt, ceil, floor, asin, acos, atan, - sinh, cosh, tanh, asinh, acosh, atanh, + Set, + RangeSet, + Param, + ConcreteModel, + AbstractModel, + Block, + Constraint, + Var, + NonNegativeIntegers, + Integers, + NonNegativeReals, + Boolean, + Reals, + Any, + display, + value, + set_options, + sin, + cos, + tan, + log, + log10, + exp, + sqrt, + ceil, + floor, + asin, + acos, + atan, + sinh, + cosh, + tanh, + asinh, + acosh, + atanh, ) from pyomo.common.errors import PyomoException from pyomo.common.log import LoggingIntercept from pyomo.common.tempfiles import TempfileManager -from pyomo.core.base.param import _ParamData +from pyomo.core.base.param import _ParamData from pyomo.core.base.units_container import units, pint_available, UnitsError from io import StringIO NoValue = Param.NoValue -class ParamTester(object): +class ParamTester(object): def setUp(self, **kwds): # # Sparse single-index Param, no default # - self.model.Z = Set(initialize=[1,3]) + self.model.Z = Set(initialize=[1, 3]) self.model.A = Param(self.model.Z, **kwds) self.instance = self.model.create_instance() @@ -62,13 +92,13 @@ def tearDown(self): def validateDict(self, ref, test): test = dict(test) ref = dict(ref) - self.assertEqual( len(test), len(ref) ) + self.assertEqual(len(test), len(ref)) for key in test.keys(): - self.assertTrue( key in ref ) + self.assertTrue(key in ref) if ref[key] is None: - self.assertTrue( test[key] is None or test[key].value is None ) + self.assertTrue(test[key] is None or test[key].value is None) else: - self.assertEqual( ref[key], value( test[key] ) ) + self.assertEqual(ref[key], value(test[key])) def test_value(self): if self.instance.A.is_indexed(): @@ -92,7 +122,7 @@ def test_value(self): self.assertRaises(TypeError, int, self.instance.A) def test_call(self): - #"""Check the use of the __call__ method""" + # """Check the use of the __call__ method""" self.assertRaises(TypeError, self.instance.A) def test_get_valueattr(self): @@ -106,7 +136,7 @@ def test_get_valueattr(self): # prevent a user from adding new attributes to the (indexed) Param # instance. # - #def test_set_valueattr(self): + # def test_set_valueattr(self): # try: # self.instance.A.value = 4.3 # self.fail("Array Parameters should not contain a value") @@ -124,7 +154,7 @@ def test_getitem(self): for key, val in self.data.items(): try: test = self.instance.A[key] - self.assertEqual( value(test), val ) + self.assertEqual(value(test), val) except ValueError: if val is not NoValue: raise @@ -133,8 +163,7 @@ def test_setitem_index_error(self): try: self.instance.A[2] = 4.3 if not self.instance.A.mutable: - self.fail("Expected setitem[%s] to fail for immutable Params" - % (idx,)) + self.fail("Expected setitem[%s] to fail for immutable Params" % (idx,)) self.fail("Expected KeyError because 2 is not a valid key") except KeyError: pass @@ -151,19 +180,16 @@ def test_setitem_preexisting(self): idx = sorted(keys)[0] self.assertEqual(value(self.instance.A[idx]), self.data[idx]) if self.instance.A.mutable: - self.assertTrue( isinstance( self.instance.A[idx], - _ParamData ) ) + self.assertTrue(isinstance(self.instance.A[idx], _ParamData)) else: self.assertEqual(type(self.instance.A[idx]), float) try: self.instance.A[idx] = 4.3 if not self.instance.A.mutable: - self.fail("Expected setitem[%s] to fail for immutable Params" - % (idx,)) - self.assertEqual( value(self.instance.A[idx]), 4.3) - self.assertTrue( isinstance(self.instance.A[idx], - _ParamData ) ) + self.fail("Expected setitem[%s] to fail for immutable Params" % (idx,)) + self.assertEqual(value(self.instance.A[idx]), 4.3) + self.assertTrue(isinstance(self.instance.A[idx], _ParamData)) except TypeError: # immutable Params should raise a TypeError exception if self.instance.A.mutable: @@ -172,17 +198,16 @@ def test_setitem_preexisting(self): try: self.instance.A[idx] = -4.3 if not self.instance.A.mutable: - self.fail("Expected setitem[%s] to fail for immutable Params" - % (idx,)) + self.fail("Expected setitem[%s] to fail for immutable Params" % (idx,)) if self.expectNegativeDomainError: - self.fail("Expected setitem[%s] to fail with negative data" - % (idx,)) - self.assertEqual( value(self.instance.A[idx]), -4.3 ) + self.fail("Expected setitem[%s] to fail with negative data" % (idx,)) + self.assertEqual(value(self.instance.A[idx]), -4.3) except ValueError: if not self.expectNegativeDomainError: self.fail( "Unexpected exception (%s) for setitem[%s] = negative data" - % ( str(sys.exc_info()[1]), idx ) ) + % (str(sys.exc_info()[1]), idx) + ) except TypeError: # immutable Params should raise a TypeError exception if self.instance.A.mutable: @@ -191,17 +216,16 @@ def test_setitem_preexisting(self): try: self.instance.A[idx] = 'x' if not self.instance.A.mutable: - self.fail("Expected setitem[%s] to fail for immutable Params" - % (idx,)) + self.fail("Expected setitem[%s] to fail for immutable Params" % (idx,)) if self.expectTextDomainError: - self.fail("Expected setitem[%s] to fail with text data", - (idx,)) - self.assertEqual( value(self.instance.A[idx]), 'x' ) + self.fail("Expected setitem[%s] to fail with text data", (idx,)) + self.assertEqual(value(self.instance.A[idx]), 'x') except ValueError: if not self.expectTextDomainError: self.fail( "Unexpected exception (%s) for setitem[%s] with text data" - % ( str(sys.exc_info()[1]), idx ) ) + % (str(sys.exc_info()[1]), idx) + ) except TypeError: # immutable Params should raise a TypeError exception if self.instance.A.mutable: @@ -222,23 +246,20 @@ def test_setitem_default_override(self): if not idx in sparse_keys: break - self.assertEqual( value(self.instance.A[idx]), - self.instance.A._default_val ) + self.assertEqual(value(self.instance.A[idx]), self.instance.A._default_val) if self.instance.A.mutable: - self.assertIsInstance( self.instance.A[idx], - _ParamData ) + self.assertIsInstance(self.instance.A[idx], _ParamData) else: - self.assertEqual(type(self.instance.A[idx]), - type(value(self.instance.A._default_val))) + self.assertEqual( + type(self.instance.A[idx]), type(value(self.instance.A._default_val)) + ) try: self.instance.A[idx] = 4.3 if not self.instance.A.mutable: - self.fail("Expected setitem[%s] to fail for immutable Params" - % (idx,)) - self.assertEqual( self.instance.A[idx].value, 4.3) - self.assertIsInstance( self.instance.A[idx], - _ParamData ) + self.fail("Expected setitem[%s] to fail for immutable Params" % (idx,)) + self.assertEqual(self.instance.A[idx].value, 4.3) + self.assertIsInstance(self.instance.A[idx], _ParamData) except TypeError: # immutable Params should raise a TypeError exception if self.instance.A.mutable: @@ -247,17 +268,16 @@ def test_setitem_default_override(self): try: self.instance.A[idx] = -4.3 if not self.instance.A.mutable: - self.fail("Expected setitem[%s] to fail for immutable Params" - % (idx,)) + self.fail("Expected setitem[%s] to fail for immutable Params" % (idx,)) if self.expectNegativeDomainError: - self.fail("Expected setitem[%s] to fail with negative data" - % (idx,)) - self.assertEqual( self.instance.A[idx].value, -4.3 ) + self.fail("Expected setitem[%s] to fail with negative data" % (idx,)) + self.assertEqual(self.instance.A[idx].value, -4.3) except ValueError: if not self.expectNegativeDomainError: self.fail( "Unexpected exception (%s) for setitem[%s] = negative data" - % ( str(sys.exc_info()[1]), idx ) ) + % (str(sys.exc_info()[1]), idx) + ) except TypeError: # immutable Params should raise a TypeError exception if self.instance.A.mutable: @@ -266,17 +286,16 @@ def test_setitem_default_override(self): try: self.instance.A[idx] = 'x' if not self.instance.A.mutable: - self.fail("Expected setitem[%s] to fail for immutable Params" - % (idx,)) + self.fail("Expected setitem[%s] to fail for immutable Params" % (idx,)) if self.expectTextDomainError: - self.fail("Expected setitem[%s] to fail with text data" - % (idx,)) - self.assertEqual( value(self.instance.A[idx]), 'x' ) + self.fail("Expected setitem[%s] to fail with text data" % (idx,)) + self.assertEqual(value(self.instance.A[idx]), 'x') except ValueError: if not self.expectTextDomainError: self.fail( "Unexpected exception (%s) for setitem[%s] with text data" - % ( str(sys.exc_info()[1]), idx) ) + % (str(sys.exc_info()[1]), idx) + ) except TypeError: # immutable Params should raise a TypeError exception if self.instance.A.mutable: @@ -288,18 +307,18 @@ def test_dim(self): key = tuple(key) except TypeError: key = (key,) - self.assertEqual( self.instance.A.dim(), len(key)) + self.assertEqual(self.instance.A.dim(), len(key)) def test_is_indexed(self): self.assertTrue(self.instance.A.is_indexed()) def test_keys(self): test = self.instance.A.keys() - #self.assertEqual( type(test), list ) + # self.assertEqual( type(test), list ) if self.instance.A._default_val is NoValue: - self.assertEqual( sorted(test), sorted(self.sparse_data.keys()) ) + self.assertEqual(sorted(test), sorted(self.sparse_data.keys())) else: - self.assertEqual( sorted(test), sorted(self.data.keys()) ) + self.assertEqual(sorted(test), sorted(self.data.keys())) def test_values(self): expectException = False @@ -307,7 +326,7 @@ def test_values(self): # not self.instance.A.mutable try: test = self.instance.A.values() - #self.assertEqual( type(test), list ) + # self.assertEqual( type(test), list ) test = zip(self.instance.A.keys(), test) if self.instance.A._default_val is NoValue: self.validateDict(self.sparse_data.items(), test) @@ -325,7 +344,7 @@ def test_items(self): # not self.instance.A.mutable try: test = self.instance.A.items() - #self.assertEqual( type(test), list ) + # self.assertEqual( type(test), list ) if self.instance.A._default_val is NoValue: self.validateDict(self.sparse_data.items(), test) else: @@ -337,7 +356,7 @@ def test_items(self): def test_iterkeys(self): test = self.instance.A.iterkeys() - self.assertEqual( sorted(test), sorted(self.instance.A.keys()) ) + self.assertEqual(sorted(test), sorted(self.instance.A.keys())) def test_itervalues(self): expectException = False @@ -372,30 +391,28 @@ def test_iteritems(self): if not expectException: raise - def test_sparse_keys(self): test = self.instance.A.sparse_keys() - self.assertEqual( type(test), list ) - self.assertEqual( sorted(test), sorted(self.sparse_data.keys()) ) + self.assertEqual(type(test), list) + self.assertEqual(sorted(test), sorted(self.sparse_data.keys())) def test_sparse_values(self): - #self.instance.pprint() + # self.instance.pprint() test = self.instance.A.sparse_values() - self.assertEqual( type(test), list ) - #print test - #print self.sparse_data.items() + self.assertEqual(type(test), list) + # print test + # print self.sparse_data.items() test = zip(self.instance.A.keys(), test) self.validateDict(self.sparse_data.items(), test) def test_sparse_items(self): test = self.instance.A.sparse_items() - self.assertEqual( type(test), list ) + self.assertEqual(type(test), list) self.validateDict(self.sparse_data.items(), test) - def test_sparse_iterkeys(self): test = self.instance.A.sparse_iterkeys() - self.assertEqual( sorted(test), sorted(self.sparse_data.keys()) ) + self.assertEqual(sorted(test), sorted(self.sparse_data.keys())) def test_sparse_itervalues(self): test = self.instance.A.sparse_itervalues() @@ -406,28 +423,30 @@ def test_sparse_iteritems(self): test = self.instance.A.sparse_iteritems() self.validateDict(self.sparse_data.items(), test) - def test_len(self): - #"""Check the use of len""" + # """Check the use of len""" if self.instance.A._default_val is NoValue: - self.assertEqual( len(self.instance.A), len(self.sparse_data) ) - self.assertEqual( len(list(self.instance.A.keys())), len(self.sparse_data) ) + self.assertEqual(len(self.instance.A), len(self.sparse_data)) + self.assertEqual(len(list(self.instance.A.keys())), len(self.sparse_data)) else: - self.assertEqual( len(self.instance.A), len(self.data) ) - self.assertEqual( len(list(self.instance.A.keys())), len(self.data) ) - self.assertEqual( len(list(self.instance.A.sparse_keys())), len(self.sparse_data) ) + self.assertEqual(len(self.instance.A), len(self.data)) + self.assertEqual(len(list(self.instance.A.keys())), len(self.data)) + self.assertEqual( + len(list(self.instance.A.sparse_keys())), len(self.sparse_data) + ) def test_index(self): - #"""Check the use of index""" - self.assertEqual( len(self.instance.A.index_set()), len(list(self.data.keys())) ) + # """Check the use of index""" + self.assertEqual(len(self.instance.A.index_set()), len(list(self.data.keys()))) def test_get_default(self): if len(self.sparse_data) == len(self.data): # nothing to test return idx = list(set(self.data) - set(self.sparse_data))[0] - expectException = self.instance.A._default_val is NoValue \ - and not self.instance.A.mutable + expectException = ( + self.instance.A._default_val is NoValue and not self.instance.A.mutable + ) try: test = self.instance.A[idx] if expectException: @@ -446,50 +465,43 @@ def test_get_default(self): raise -class ArrayParam_mutable_sparse_noDefault\ - (ParamTester, unittest.TestCase): - +class ArrayParam_mutable_sparse_noDefault(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Sparse single-index Param, no default # self.model = AbstractModel() - ParamTester.setUp(self, mutable=True, initialize={1:1.3}, **kwds) + ParamTester.setUp(self, mutable=True, initialize={1: 1.3}, **kwds) - self.sparse_data = {1:1.3} - self.data = {1:1.3, 3:NoValue} + self.sparse_data = {1: 1.3} + self.data = {1: 1.3, 3: NoValue} -class ArrayParam_mutable_sparse_intDefault\ - (ParamTester, unittest.TestCase): +class ArrayParam_mutable_sparse_intDefault(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Sparse single-index Param, int default # self.model = AbstractModel() - ParamTester.setUp(self, mutable=True, initialize={1:1.3}, default=0, **kwds) - - self.sparse_data = {1:1.3} - self.data = {1:1.3, 3:0} + ParamTester.setUp(self, mutable=True, initialize={1: 1.3}, default=0, **kwds) + self.sparse_data = {1: 1.3} + self.data = {1: 1.3, 3: 0} -class ArrayParam_mutable_sparse_floatDefault\ - (ParamTester, unittest.TestCase): +class ArrayParam_mutable_sparse_floatDefault(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Sparse single-index Param, float default # self.model = AbstractModel() - ParamTester.setUp(self, mutable=True, initialize={1:1.3}, default=99.5, **kwds) - - self.sparse_data = {1:1.3} - self.data = {1:1.3, 3:99.5} + ParamTester.setUp(self, mutable=True, initialize={1: 1.3}, default=99.5, **kwds) + self.sparse_data = {1: 1.3} + self.data = {1: 1.3, 3: 99.5} -class ArrayParam_mutable_dense_intDefault_scalarInit\ - (ParamTester, unittest.TestCase): +class ArrayParam_mutable_dense_intDefault_scalarInit(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Dense single-index Param, float default, init with scalar @@ -497,129 +509,130 @@ def setUp(self, **kwds): self.model = AbstractModel() ParamTester.setUp(self, mutable=True, initialize=1.3, default=99.5, **kwds) - self.sparse_data = {1:1.3, 3:1.3} + self.sparse_data = {1: 1.3, 3: 1.3} self.data = self.sparse_data -class ArrayParam_mutable_dense_intDefault_scalarParamInit\ - (ParamTester, unittest.TestCase): +class ArrayParam_mutable_dense_intDefault_scalarParamInit( + ParamTester, unittest.TestCase +): def setUp(self, **kwds): # # Dense single-index Param, float default, init with scalar # self.model = AbstractModel() self.model.p = Param(initialize=1.3) - ParamTester.setUp(self, mutable=True, initialize=self.model.p, default=99.5, **kwds) + ParamTester.setUp( + self, mutable=True, initialize=self.model.p, default=99.5, **kwds + ) - self.sparse_data = {1:1.3, 3:1.3} + self.sparse_data = {1: 1.3, 3: 1.3} self.data = self.sparse_data -class ArrayParam_mutable_dense_intDefault_sparseParamInit\ - (ParamTester, unittest.TestCase): +class ArrayParam_mutable_dense_intDefault_sparseParamInit( + ParamTester, unittest.TestCase +): def setUp(self, **kwds): # # Dense single-index Param, float default, init with scalar # self.model = AbstractModel() - self.model.p = Param([1,3], initialize={1:1.3}, default=9.5) - ParamTester.setUp(self, mutable=True, initialize=self.model.p, default=99.5, **kwds) + self.model.p = Param([1, 3], initialize={1: 1.3}, default=9.5) + ParamTester.setUp( + self, mutable=True, initialize=self.model.p, default=99.5, **kwds + ) - self.sparse_data = {1:1.3, 3:9.5} + self.sparse_data = {1: 1.3, 3: 9.5} self.data = self.sparse_data -class ArrayParam_mutable_dense_intDefault_denseParamInit\ - (ParamTester, unittest.TestCase): +class ArrayParam_mutable_dense_intDefault_denseParamInit( + ParamTester, unittest.TestCase +): def setUp(self, **kwds): # # Dense single-index Param, float default, init with scalar # self.model = AbstractModel() - self.model.p = Param([1,3], initialize={1:1.3, 3:2.3}) - ParamTester.setUp(self, mutable=True, initialize=self.model.p, default=99.5, **kwds) + self.model.p = Param([1, 3], initialize={1: 1.3, 3: 2.3}) + ParamTester.setUp( + self, mutable=True, initialize=self.model.p, default=99.5, **kwds + ) - self.sparse_data = {1:1.3, 3:2.3} + self.sparse_data = {1: 1.3, 3: 2.3} self.data = self.sparse_data -class ArrayParam_mutable_dense_intDefault_dictInit\ - (ParamTester, unittest.TestCase): - +class ArrayParam_mutable_dense_intDefault_dictInit(ParamTester, unittest.TestCase): def setUp(self, **kwds): def A_init(model, i): - return 1.5+i + return 1.5 + i + # # Dense single-index Param, no default, init with rule # self.model = AbstractModel() ParamTester.setUp(self, mutable=True, initialize=A_init, **kwds) - self.sparse_data = {1:2.5, 3:4.5} + self.sparse_data = {1: 2.5, 3: 4.5} self.data = self.sparse_data -class ArrayParam_mutable_dense_intDefault_ruleInit\ - (ParamTester, unittest.TestCase): - +class ArrayParam_mutable_dense_intDefault_ruleInit(ParamTester, unittest.TestCase): def setUp(self, **kwds): def A_init(model): - return {1:2.5, 3:4.5} + return {1: 2.5, 3: 4.5} + # # Dense single-index Param, no default, init with rule # self.model = AbstractModel() ParamTester.setUp(self, mutable=True, initialize=A_init, **kwds) - self.sparse_data = {1:2.5, 3:4.5} + self.sparse_data = {1: 2.5, 3: 4.5} self.data = self.sparse_data -class ArrayParam_immutable_sparse_noDefault\ - (ParamTester, unittest.TestCase): - +class ArrayParam_immutable_sparse_noDefault(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Sparse single-index Param, no default # self.model = AbstractModel() - ParamTester.setUp(self, mutable=False, initialize={1:1.3}, **kwds) + ParamTester.setUp(self, mutable=False, initialize={1: 1.3}, **kwds) - self.sparse_data = {1:1.3} - self.data = {1:1.3, 3:NoValue} + self.sparse_data = {1: 1.3} + self.data = {1: 1.3, 3: NoValue} -class ArrayParam_immutable_sparse_intDefault\ - (ParamTester, unittest.TestCase): - +class ArrayParam_immutable_sparse_intDefault(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Sparse single-index Param, int default # self.model = AbstractModel() - ParamTester.setUp(self, mutable=False, initialize={1:1.3}, default=0, **kwds) - - self.sparse_data = {1:1.3} - self.data = {1:1.3, 3:0} + ParamTester.setUp(self, mutable=False, initialize={1: 1.3}, default=0, **kwds) + self.sparse_data = {1: 1.3} + self.data = {1: 1.3, 3: 0} -class ArrayParam_immutable_sparse_floatDefault\ - (ParamTester, unittest.TestCase): +class ArrayParam_immutable_sparse_floatDefault(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Sparse single-index Param, float default # self.model = AbstractModel() - ParamTester.setUp(self, mutable=False, initialize={1:1.3}, default=99.5, **kwds) - - self.sparse_data = {1:1.3} - self.data = {1:1.3, 3:99.5} + ParamTester.setUp( + self, mutable=False, initialize={1: 1.3}, default=99.5, **kwds + ) + self.sparse_data = {1: 1.3} + self.data = {1: 1.3, 3: 99.5} -class ArrayParam_immutable_dense_intDefault_scalarInit\ - (ParamTester, unittest.TestCase): +class ArrayParam_immutable_dense_intDefault_scalarInit(ParamTester, unittest.TestCase): def setUp(self, **kwds): # # Dense single-index Param, float default, init with scalar @@ -627,60 +640,58 @@ def setUp(self, **kwds): self.model = AbstractModel() ParamTester.setUp(self, mutable=False, initialize=1.3, default=99.5, **kwds) - self.sparse_data = {1:1.3, 3:1.3} + self.sparse_data = {1: 1.3, 3: 1.3} self.data = self.sparse_data - -class ArrayParam_immutable_dense_intDefault_scalarParamInit\ - (ParamTester, unittest.TestCase): - +class ArrayParam_immutable_dense_intDefault_scalarParamInit( + ParamTester, unittest.TestCase +): def setUp(self, **kwds): # # Dense single-index Param, float default, init with scalar # self.model = AbstractModel() self.model.p = Param(initialize=1.3) - ParamTester.setUp(self, mutable=False, initialize=self.model.p, default=99.5, **kwds) + ParamTester.setUp( + self, mutable=False, initialize=self.model.p, default=99.5, **kwds + ) - self.sparse_data = {1:1.3, 3:1.3} + self.sparse_data = {1: 1.3, 3: 1.3} self.data = self.sparse_data -class ArrayParam_immutable_dense_intDefault_dictInit\ - (ParamTester, unittest.TestCase): - +class ArrayParam_immutable_dense_intDefault_dictInit(ParamTester, unittest.TestCase): def setUp(self, **kwds): def A_init(model, i): - return 1.5+i + return 1.5 + i + # # Dense single-index Param, no default, init with rule # self.model = AbstractModel() ParamTester.setUp(self, mutable=False, initialize=A_init, **kwds) - self.sparse_data = {1:2.5, 3:4.5} + self.sparse_data = {1: 2.5, 3: 4.5} self.data = self.sparse_data -class ArrayParam_immutable_dense_intDefault_ruleInit\ - (ParamTester, unittest.TestCase): - +class ArrayParam_immutable_dense_intDefault_ruleInit(ParamTester, unittest.TestCase): def setUp(self, **kwds): def A_init(model): - return {1:2.5, 3:4.5} + return {1: 2.5, 3: 4.5} + # # Dense single-index Param, no default, init with rule # self.model = AbstractModel() ParamTester.setUp(self, mutable=False, initialize=A_init, **kwds) - self.sparse_data = {1:2.5, 3:4.5} + self.sparse_data = {1: 2.5, 3: 4.5} self.data = self.sparse_data class ArrayParam6(unittest.TestCase): - def setUp(self, **kwds): # # Create Model @@ -694,53 +705,66 @@ def tearDown(self): self.instance = None def test_index1(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + def B_index(model): for i in model.A: - if i%2 == 0: + if i % 2 == 0: yield i + def B_init(model, i, j): if j: - return 2+i - return -(2+i) - self.model.B = Param( B_index, [True,False], - initialize=B_init) + return 2 + i + return -(2 + i) + + self.model.B = Param(B_index, [True, False], initialize=B_init) self.instance = self.model.create_instance() - #self.instance.pprint() - self.assertEqual(set(self.instance.B.keys()), - set([(0,True),(2,True),(0, False),(2,False)])) - self.assertEqual(self.instance.B[0,True],2) - self.assertEqual(self.instance.B[0,False],-2) - self.assertEqual(self.instance.B[2,True],4) - self.assertEqual(self.instance.B[2,False],-4) + # self.instance.pprint() + self.assertEqual( + set(self.instance.B.keys()), + set([(0, True), (2, True), (0, False), (2, False)]), + ) + self.assertEqual(self.instance.B[0, True], 2) + self.assertEqual(self.instance.B[0, False], -2) + self.assertEqual(self.instance.B[2, True], 4) + self.assertEqual(self.instance.B[2, False], -4) def test_index2(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + @set_options(dimen=3) def B_index(model): - return [(i,2*i,i*i) for i in model.A if i%2 == 0] + return [(i, 2 * i, i * i) for i in model.A if i % 2 == 0] + def B_init(model, i, ii, iii, j): if j: - return 2+i - return -(2+i) - self.model.B = Param(B_index, [True,False], initialize=B_init) + return 2 + i + return -(2 + i) + + self.model.B = Param(B_index, [True, False], initialize=B_init) self.instance = self.model.create_instance() - #self.instance.pprint() - self.assertEqual(set(self.instance.B.keys()),set([(0,0,0,True),(2,4,4,True),(0,0,0,False),(2,4,4,False)])) - self.assertEqual(self.instance.B[0,0,0,True],2) - self.assertEqual(self.instance.B[0,0,0,False],-2) - self.assertEqual(self.instance.B[2,4,4,True],4) - self.assertEqual(self.instance.B[2,4,4,False],-4) + # self.instance.pprint() + self.assertEqual( + set(self.instance.B.keys()), + set([(0, 0, 0, True), (2, 4, 4, True), (0, 0, 0, False), (2, 4, 4, False)]), + ) + self.assertEqual(self.instance.B[0, 0, 0, True], 2) + self.assertEqual(self.instance.B[0, 0, 0, False], -2) + self.assertEqual(self.instance.B[2, 4, 4, True], 4) + self.assertEqual(self.instance.B[2, 4, 4, False], -4) def test_index3(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + def B_index(model): - return [(i,2*i,i*i) for i in model.A if i%2 == 0] + return [(i, 2 * i, i * i) for i in model.A if i % 2 == 0] + def B_init(model, i, ii, iii, j): if j: - return 2+i - return -(2+i) - self.model.B = Param(B_index, [True,False], initialize=B_init) + return 2 + i + return -(2 + i) + + self.model.B = Param(B_index, [True, False], initialize=B_init) # In the set rewrite, the following now works! # try: # self.instance = self.model.create_instance() @@ -748,107 +772,118 @@ def B_init(model, i, ii, iii, j): # except ValueError: # pass self.instance = self.model.create_instance() - self.assertEqual(set(self.instance.B.keys()),set([(0,0,0,True),(2,4,4,True),(0,0,0,False),(2,4,4,False)])) - self.assertEqual(self.instance.B[0,0,0,True],2) - self.assertEqual(self.instance.B[0,0,0,False],-2) - self.assertEqual(self.instance.B[2,4,4,True],4) - self.assertEqual(self.instance.B[2,4,4,False],-4) + self.assertEqual( + set(self.instance.B.keys()), + set([(0, 0, 0, True), (2, 4, 4, True), (0, 0, 0, False), (2, 4, 4, False)]), + ) + self.assertEqual(self.instance.B[0, 0, 0, True], 2) + self.assertEqual(self.instance.B[0, 0, 0, False], -2) + self.assertEqual(self.instance.B[2, 4, 4, True], 4) + self.assertEqual(self.instance.B[2, 4, 4, False], -4) def test_index4(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + @set_options(within=Integers) def B_index(model): - return [i/2.0 for i in model.A] + return [i / 2.0 for i in model.A] + def B_init(model, i, j): if j: - return 2+i - return -(2+i) - self.model.B = Param(B_index, [True,False], initialize=B_init) + return 2 + i + return -(2 + i) + + self.model.B = Param(B_index, [True, False], initialize=B_init) try: self.instance = self.model.create_instance() - self.fail("Expected ValueError because B_index returns invalid index values") + self.fail( + "Expected ValueError because B_index returns invalid index values" + ) except ValueError: pass def test_dimen1(self): - model=AbstractModel() - model.A = Set(dimen=2, initialize=[(1,2),(3,4)]) - model.B = Set(dimen=3, initialize=[(1,1,1),(2,2,2),(3,3,3)]) - model.C = Set(dimen=1, initialize=[9,8,7,6,5]) + model = AbstractModel() + model.A = Set(dimen=2, initialize=[(1, 2), (3, 4)]) + model.B = Set(dimen=3, initialize=[(1, 1, 1), (2, 2, 2), (3, 3, 3)]) + model.C = Set(dimen=1, initialize=[9, 8, 7, 6, 5]) model.x = Param(model.A, model.B, model.C, initialize=-1) - #model.y = Param(model.B, initialize=(1,1)) + # model.y = Param(model.B, initialize=(1,1)) model.y = Param(model.B, initialize=1) - instance=model.create_instance() - self.assertEqual( instance.x.dim(), 6) - self.assertEqual( instance.y.dim(), 3) + instance = model.create_instance() + self.assertEqual(instance.x.dim(), 6) + self.assertEqual(instance.y.dim(), 3) def test_setitem(self): model = ConcreteModel() - model.a = Set(initialize=[1,2,3]) - model.b = Set(initialize=['a','b','c']) + model.a = Set(initialize=[1, 2, 3]) + model.b = Set(initialize=['a', 'b', 'c']) model.c = model.b * model.b - model.p = Param(model.a, model.c, within=NonNegativeIntegers, default=0, mutable=True) - model.p[1,'a','b'] = 1 - model.p[1,('a','b')] = 1 - model.p[(1,'b'),'b'] = 1 + model.p = Param( + model.a, model.c, within=NonNegativeIntegers, default=0, mutable=True + ) + model.p[1, 'a', 'b'] = 1 + model.p[1, ('a', 'b')] = 1 + model.p[(1, 'b'), 'b'] = 1 try: - model.p[1,5,7] = 1 + model.p[1, 5, 7] = 1 self.fail("Expected KeyError") except KeyError: pass class ScalarTester(ParamTester): - def setUp(self, **kwds): # # "Sparse" scalar Param, no default # - self.model.Z = Set(initialize=[1,3]) + self.model.Z = Set(initialize=[1, 3]) self.model.A = Param(**kwds) self.instance = self.model.create_instance() - #self.instance.pprint() + # self.instance.pprint() self.expectTextDomainError = False self.expectNegativeDomainError = False - def test_value_scalar(self): - #"""Check the value of the parameter""" - if self.data.get(None,NoValue) is NoValue: + # """Check the value of the parameter""" + if self.data.get(None, NoValue) is NoValue: self.assertRaises(ValueError, value, self.instance.A) self.assertRaises(TypeError, float, self.instance.A) self.assertRaises(TypeError, int, self.instance.A) else: val = self.data[None] tmp = value(self.instance.A) - self.assertEqual( type(tmp), type(val)) - self.assertEqual( tmp, val ) + self.assertEqual(type(tmp), type(val)) + self.assertEqual(tmp, val) self.assertRaises(TypeError, float, self.instance.A) self.assertRaises(TypeError, int, self.instance.A) def test_call(self): - #"""Check the use of the __call__ method""" - if self.sparse_data.get(None,0) is NoValue or \ - self.data.get(None,NoValue) is NoValue: #not self.sparse_data: + # """Check the use of the __call__ method""" + if ( + self.sparse_data.get(None, 0) is NoValue + or self.data.get(None, NoValue) is NoValue + ): # not self.sparse_data: self.assertRaisesRegex( - ValueError, ".*currently set to an invalid value", - self.instance.A.__call__ ) + ValueError, + ".*currently set to an invalid value", + self.instance.A.__call__, + ) else: self.assertEqual(self.instance.A(), self.data[None]) def test_get_valueattr(self): - self.assertEqual( self.instance.A._value, - self.sparse_data.get(None,NoValue) ) - if self.data.get(None,0) is NoValue: #not self.sparse_data: + self.assertEqual(self.instance.A._value, self.sparse_data.get(None, NoValue)) + if self.data.get(None, 0) is NoValue: # not self.sparse_data: try: value(self.instance.A) self.fail("Expected value error") except ValueError: pass else: - self.assertEqual( self.instance.A.value, self.data[None]) + self.assertEqual(self.instance.A.value, self.data[None]) def test_set_valueattr(self): self.instance.A.value = 4.3 @@ -856,32 +891,32 @@ def test_set_valueattr(self): self.assertEqual(self.instance.A(), 4.3) def test_get_value(self): - if self.sparse_data.get(None,0) is NoValue or \ - self.data.get(None,NoValue) is NoValue: #not self.sparse_data: + if ( + self.sparse_data.get(None, 0) is NoValue + or self.data.get(None, NoValue) is NoValue + ): # not self.sparse_data: try: value(self.instance.A) self.fail("Expected value error") except ValueError: pass else: - self.assertEqual( self.instance.A.value, self.data[None]) + self.assertEqual(self.instance.A.value, self.data[None]) def test_set_value(self): self.instance.A = 4.3 self.assertEqual(self.instance.A.value, 4.3) self.assertEqual(self.instance.A(), 4.3) - def test_is_indexed(self): self.assertFalse(self.instance.A.is_indexed()) def test_dim(self): - #"""Check the use of dim""" - self.assertEqual( self.instance.A.dim(), 0) + # """Check the use of dim""" + self.assertEqual(self.instance.A.dim(), 0) class ScalarParam_mutable_noDefault(ScalarTester, unittest.TestCase): - def setUp(self, **kwds): # # Sparse single-index Param, no default @@ -890,11 +925,10 @@ def setUp(self, **kwds): ScalarTester.setUp(self, mutable=True, **kwds) self.sparse_data = {} - self.data = {None:NoValue} + self.data = {None: NoValue} class ScalarParam_mutable_init(ScalarTester, unittest.TestCase): - def setUp(self, **kwds): # # Sparse single-index Param, no default @@ -902,12 +936,11 @@ def setUp(self, **kwds): self.model = AbstractModel() ScalarTester.setUp(self, mutable=True, initialize=1.3, **kwds) - self.sparse_data = {None:1.3} - self.data = {None:1.3} + self.sparse_data = {None: 1.3} + self.data = {None: 1.3} class ScalarParam_mutable_floatDefault(ScalarTester, unittest.TestCase): - def setUp(self, **kwds): # # Sparse single-index Param, no default @@ -916,11 +949,10 @@ def setUp(self, **kwds): ScalarTester.setUp(self, mutable=True, default=1.3, **kwds) self.sparse_data = {} - self.data = {None:1.3} + self.data = {None: 1.3} class TestIO(unittest.TestCase): - def setUp(self): # # Create Model @@ -935,194 +967,204 @@ def tearDown(self): self.instance = None def test_io1(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "param A := 3.3;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("param A := 3.3;\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.A=Param() + self.model.A = Param() self.instance = self.model.create_instance("param.dat") - self.assertEqual( value(self.instance.A), 3.3 ) + self.assertEqual(value(self.instance.A), 3.3) def test_io2(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set Z := 1 3 5;\n" ) - OUTPUT.write( "param A :=\n" ) - OUTPUT.write( "1 2.2\n" ) - OUTPUT.write( "3 2.3\n" ) - OUTPUT.write( "5 2.5;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set Z := 1 3 5;\n") + OUTPUT.write("param A :=\n") + OUTPUT.write("1 2.2\n") + OUTPUT.write("3 2.3\n") + OUTPUT.write("5 2.5;\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.Z=Set() - self.model.A=Param(self.model.Z) + self.model.Z = Set() + self.model.A = Param(self.model.Z) self.instance = self.model.create_instance("param.dat") - self.assertEqual( len(self.instance.A), 3 ) + self.assertEqual(len(self.instance.A), 3) def test_io3(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set Z := 1 3 5;\n" ) - OUTPUT.write( "param : A B :=\n" ) - OUTPUT.write( "1 2.2 3.3\n" ) - OUTPUT.write( "3 2.3 3.4\n" ) - OUTPUT.write( "5 2.5 3.5;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set Z := 1 3 5;\n") + OUTPUT.write("param : A B :=\n") + OUTPUT.write("1 2.2 3.3\n") + OUTPUT.write("3 2.3 3.4\n") + OUTPUT.write("5 2.5 3.5;\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.Z=Set() - self.model.A=Param(self.model.Z) - self.model.B=Param(self.model.Z) + self.model.Z = Set() + self.model.A = Param(self.model.Z) + self.model.B = Param(self.model.Z) self.instance = self.model.create_instance("param.dat") - self.assertEqual( len(self.instance.A), 3 ) - self.assertEqual( len(self.instance.B), 3 ) - self.assertEqual( self.instance.B[5], 3.5 ) + self.assertEqual(len(self.instance.A), 3) + self.assertEqual(len(self.instance.B), 3) + self.assertEqual(self.instance.B[5], 3.5) def test_io4(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set Z := A1 A2 A3;\n" ) - OUTPUT.write( "set Y := 1 2 3;\n" ) - OUTPUT.write( "param A: A1 A2 A3 :=\n" ) - OUTPUT.write( "1 1.3 2.3 3.3\n" ) - OUTPUT.write( "2 1.4 2.4 3.4\n" ) - OUTPUT.write( "3 1.5 2.5 3.5\n" ) - OUTPUT.write( ";\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set Z := A1 A2 A3;\n") + OUTPUT.write("set Y := 1 2 3;\n") + OUTPUT.write("param A: A1 A2 A3 :=\n") + OUTPUT.write("1 1.3 2.3 3.3\n") + OUTPUT.write("2 1.4 2.4 3.4\n") + OUTPUT.write("3 1.5 2.5 3.5\n") + OUTPUT.write(";\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.Z=Set() - self.model.Y=Set() - self.model.A=Param(self.model.Y,self.model.Z) + self.model.Z = Set() + self.model.Y = Set() + self.model.A = Param(self.model.Y, self.model.Z) self.instance = self.model.create_instance("param.dat") - self.assertEqual( len(self.instance.Y), 3 ) - self.assertEqual( len(self.instance.Z), 3 ) - self.assertEqual( len(self.instance.A), 9 ) - self.assertEqual( self.instance.A[1, 'A2'], 2.3 ) + self.assertEqual(len(self.instance.Y), 3) + self.assertEqual(len(self.instance.Z), 3) + self.assertEqual(len(self.instance.A), 9) + self.assertEqual(self.instance.A[1, 'A2'], 2.3) def test_io5(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set Z := A1 A2 A3;\n" ) - OUTPUT.write( "set Y := 1 2 3;\n" ) - OUTPUT.write( "param A (tr): A1 A2 A3 :=\n" ) - OUTPUT.write( "1 1.3 2.3 3.3\n" ) - OUTPUT.write( "2 1.4 2.4 3.4\n" ) - OUTPUT.write( "3 1.5 2.5 3.5\n" ) - OUTPUT.write( ";\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set Z := A1 A2 A3;\n") + OUTPUT.write("set Y := 1 2 3;\n") + OUTPUT.write("param A (tr): A1 A2 A3 :=\n") + OUTPUT.write("1 1.3 2.3 3.3\n") + OUTPUT.write("2 1.4 2.4 3.4\n") + OUTPUT.write("3 1.5 2.5 3.5\n") + OUTPUT.write(";\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.Z=Set() - self.model.Y=Set() - self.model.A=Param(self.model.Z,self.model.Y) + self.model.Z = Set() + self.model.Y = Set() + self.model.A = Param(self.model.Z, self.model.Y) self.instance = self.model.create_instance("param.dat") - self.assertEqual( len(self.instance.Y), 3 ) - self.assertEqual( len(self.instance.Z), 3 ) - self.assertEqual( len(self.instance.A), 9 ) - self.assertEqual( self.instance.A['A2',1], 2.3 ) + self.assertEqual(len(self.instance.Y), 3) + self.assertEqual(len(self.instance.Z), 3) + self.assertEqual(len(self.instance.A), 9) + self.assertEqual(self.instance.A['A2', 1], 2.3) def test_io6(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set Z := 1 3 5;\n" ) - OUTPUT.write( "param A default 0.0 :=\n" ) - OUTPUT.write( "1 2.2\n" ) - OUTPUT.write( "3 .\n" ) - OUTPUT.write( "5 2.5;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set Z := 1 3 5;\n") + OUTPUT.write("param A default 0.0 :=\n") + OUTPUT.write("1 2.2\n") + OUTPUT.write("3 .\n") + OUTPUT.write("5 2.5;\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.Z=Set() - self.model.A=Param(self.model.Z) + self.model.Z = Set() + self.model.A = Param(self.model.Z) self.instance = self.model.create_instance("param.dat") - #self.instance.pprint() - self.assertEqual( len(self.instance.A), 3 ) - self.assertEqual( self.instance.A[3], 0.0 ) + # self.instance.pprint() + self.assertEqual(len(self.instance.A), 3) + self.assertEqual(self.instance.A[3], 0.0) def test_io7(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "param A := True;\n" ) - OUTPUT.write( "param B := False;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("param A := True;\n") + OUTPUT.write("param B := False;\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.A=Param(within=Boolean) - self.model.B=Param(within=Boolean) + self.model.A = Param(within=Boolean) + self.model.B = Param(within=Boolean) self.instance = self.model.create_instance("param.dat") - self.assertEqual( value(self.instance.A), True ) - self.assertEqual( value(self.instance.B), False ) + self.assertEqual(value(self.instance.A), True) + self.assertEqual(value(self.instance.B), False) def test_io8(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "param : A : B :=\n" ) - OUTPUT.write( "\"A\" 3.3\n" ) - OUTPUT.write( "\"B\" 3.4\n" ) - OUTPUT.write( "\"C\" 3.5;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("param : A : B :=\n") + OUTPUT.write("\"A\" 3.3\n") + OUTPUT.write("\"B\" 3.4\n") + OUTPUT.write("\"C\" 3.5;\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.A=Set() - self.model.B=Param(self.model.A) + self.model.A = Set() + self.model.B = Param(self.model.A) self.instance = self.model.create_instance("param.dat") - self.assertEqual( set(self.instance.A.data()), set(['A','B','C']) ) + self.assertEqual(set(self.instance.A.data()), set(['A', 'B', 'C'])) def test_io9(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "param : A : B :=\n" ) - OUTPUT.write( "\"A\" 0.1\n" ) - OUTPUT.write( "\"B\" 1e-1\n" ) - OUTPUT.write( "\"b\" 1.4e-1\n" ) - OUTPUT.write( "\"C\" 1E-1\n" ) - OUTPUT.write( "\"c\" 1.4E-1\n" ) - OUTPUT.write( "\"D\" 1E+1\n" ) - OUTPUT.write( "\"d\" 1.4E+1\n" ) - OUTPUT.write( "\"AA\" -0.1\n" ) - OUTPUT.write( "\"BB\" -1e-1\n" ) - OUTPUT.write( "\"bb\" -1.4e-1\n" ) - OUTPUT.write( "\"CC\" -1E-1\n" ) - OUTPUT.write( "\"cc\" -1.4E-1\n" ) - OUTPUT.write( "\"DD\" -1E+1\n" ) - OUTPUT.write( "\"dd\" -1.4E+1;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("param : A : B :=\n") + OUTPUT.write("\"A\" 0.1\n") + OUTPUT.write("\"B\" 1e-1\n") + OUTPUT.write("\"b\" 1.4e-1\n") + OUTPUT.write("\"C\" 1E-1\n") + OUTPUT.write("\"c\" 1.4E-1\n") + OUTPUT.write("\"D\" 1E+1\n") + OUTPUT.write("\"d\" 1.4E+1\n") + OUTPUT.write("\"AA\" -0.1\n") + OUTPUT.write("\"BB\" -1e-1\n") + OUTPUT.write("\"bb\" -1.4e-1\n") + OUTPUT.write("\"CC\" -1E-1\n") + OUTPUT.write("\"cc\" -1.4E-1\n") + OUTPUT.write("\"DD\" -1E+1\n") + OUTPUT.write("\"dd\" -1.4E+1;\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.A=Set() - self.model.B=Param(self.model.A) + self.model.A = Set() + self.model.B = Param(self.model.A) self.instance = self.model.create_instance("param.dat") - self.assertEqual( self.instance.B['A'], 0.1) - self.assertEqual( self.instance.B['B'], 0.1) - self.assertEqual( self.instance.B['b'], 0.14) - self.assertEqual( self.instance.B['C'], 0.1) - self.assertEqual( self.instance.B['c'], 0.14) - self.assertEqual( self.instance.B['D'], 10) - self.assertEqual( self.instance.B['d'], 14) - self.assertEqual( self.instance.B['AA'], -0.1) - self.assertEqual( self.instance.B['BB'], -0.1) - self.assertEqual( self.instance.B['bb'], -0.14) - self.assertEqual( self.instance.B['CC'], -0.1) - self.assertEqual( self.instance.B['cc'], -0.14) - self.assertEqual( self.instance.B['DD'], -10) - self.assertEqual( self.instance.B['dd'], -14) + self.assertEqual(self.instance.B['A'], 0.1) + self.assertEqual(self.instance.B['B'], 0.1) + self.assertEqual(self.instance.B['b'], 0.14) + self.assertEqual(self.instance.B['C'], 0.1) + self.assertEqual(self.instance.B['c'], 0.14) + self.assertEqual(self.instance.B['D'], 10) + self.assertEqual(self.instance.B['d'], 14) + self.assertEqual(self.instance.B['AA'], -0.1) + self.assertEqual(self.instance.B['BB'], -0.1) + self.assertEqual(self.instance.B['bb'], -0.14) + self.assertEqual(self.instance.B['CC'], -0.1) + self.assertEqual(self.instance.B['cc'], -0.14) + self.assertEqual(self.instance.B['DD'], -10) + self.assertEqual(self.instance.B['dd'], -14) def test_io10(self): - OUTPUT=open("param.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set A1 := a b c d e f g h i j k l ;\n" ) - OUTPUT.write( "set A2 := 2 4 6 ;\n" ) - OUTPUT.write( "param B :=\n" ) - OUTPUT.write( " [*,2,*] a b 1 c d 2 e f 3\n" ) - OUTPUT.write( " [*,4,*] g h 4 i j 5\n" ) - OUTPUT.write( " [*,6,*] k l 6\n" ) - OUTPUT.write( ";\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open("param.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set A1 := a b c d e f g h i j k l ;\n") + OUTPUT.write("set A2 := 2 4 6 ;\n") + OUTPUT.write("param B :=\n") + OUTPUT.write(" [*,2,*] a b 1 c d 2 e f 3\n") + OUTPUT.write(" [*,4,*] g h 4 i j 5\n") + OUTPUT.write(" [*,6,*] k l 6\n") + OUTPUT.write(";\n") + OUTPUT.write("end;\n") OUTPUT.close() - self.model.A1=Set() - self.model.A2=Set() - self.model.B=Param(self.model.A1,self.model.A2,self.model.A1) + self.model.A1 = Set() + self.model.A2 = Set() + self.model.B = Param(self.model.A1, self.model.A2, self.model.A1) self.instance = self.model.create_instance("param.dat") - self.assertEqual( set(self.instance.B.sparse_keys()), set([('e', 2, 'f'), ('c', 2, 'd'), ('a', 2, 'b'), ('i', 4, 'j'), ('g', 4, 'h'), ('k', 6, 'l')])) - + self.assertEqual( + set(self.instance.B.sparse_keys()), + set( + [ + ('e', 2, 'f'), + ('c', 2, 'd'), + ('a', 2, 'b'), + ('i', 4, 'j'), + ('g', 4, 'h'), + ('k', 6, 'l'), + ] + ), + ) class TestParamConditional(unittest.TestCase): - def setUp(self): self.model = AbstractModel() @@ -1132,8 +1174,9 @@ def tearDown(self): def test_if_const_param_1value(self): self.model.p = Param(initialize=1.0) with self.assertRaisesRegex( - PyomoException, r"Cannot convert non-constant Pyomo " - r"numeric value \(p\) to bool"): + PyomoException, + r"Cannot convert non-constant Pyomo " r"numeric value \(p\) to bool", + ): if self.model.p: pass instance = self.model.create_instance() @@ -1145,8 +1188,9 @@ def test_if_const_param_1value(self): def test_if_const_param_0value(self): self.model.p = Param(initialize=0.0) with self.assertRaisesRegex( - PyomoException, r"Cannot convert non-constant Pyomo " - r"numeric value \(p\) to bool"): + PyomoException, + r"Cannot convert non-constant Pyomo " r"numeric value \(p\) to bool", + ): if self.model.p: pass instance = self.model.create_instance() @@ -1157,7 +1201,6 @@ def test_if_const_param_0value(self): class MiscParamTests(unittest.TestCase): - def test_constructor(self): a = Param(name="a") try: @@ -1165,8 +1208,8 @@ def test_constructor(self): self.fail("Cannot pass in 'foo' as an option to Param") except ValueError: pass - model=AbstractModel() - model.b = Param(initialize=[1,2,3]) + model = AbstractModel() + model.b = Param(initialize=[1, 2, 3]) try: model.c = Param(model.b) self.fail("Can't index a parameter with a parameter") @@ -1174,23 +1217,27 @@ def test_constructor(self): pass # model = AbstractModel() - model.a = Param(initialize={None:3.3}) + model.a = Param(initialize={None: 3.3}) instance = model.create_instance() def test_empty_index(self): # Verify that we can initialize a parameter with an empty set. model = ConcreteModel() model.A = Set() + def rule(model, i): return 0.0 + model.p = Param(model.A, initialize=rule) def test_invalid_default(self): # Verify that we can initialize a parameter with an empty set. model = ConcreteModel() with self.assertRaisesRegex( - ValueError, r'Default value \(-1\) is not valid for ' - r'Param p domain NonNegativeIntegers'): + ValueError, + r'Default value \(-1\) is not valid for ' + r'Param p domain NonNegativeIntegers', + ): model.p = Param(default=-1, within=NonNegativeIntegers) def test_invalid_data(self): @@ -1198,8 +1245,8 @@ def test_invalid_data(self): model = AbstractModel() model.p = Param() with self.assertRaisesRegex( - ValueError, - r'Attempting to initialize parameter=p with data=\[\]'): + ValueError, r'Attempting to initialize parameter=p with data=\[\]' + ): model.create_instance(data={None: {'p': []}}) def test_param_validate(self): @@ -1215,19 +1262,18 @@ def test_param_validate(self): 5. Mutable Param (indexed) 6. Mutable Param (arbitrary validation rule) """ + def validation_rule(model, value): """Arbitrary validation rule that always returns False.""" return False # 1. Immutable Param (unindexed) - with self.assertRaisesRegex( - ValueError, "Value not in parameter domain"): + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): m = ConcreteModel() m.p1 = Param(initialize=-3, within=NonNegativeReals) # 2. Immutable Param (indexed) - with self.assertRaisesRegex( - ValueError, "Value not in parameter domain"): + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): m = ConcreteModel() m.A = RangeSet(1, 2) m.p2 = Param(m.A, initialize=-3, within=NonNegativeReals) @@ -1238,15 +1284,13 @@ def validation_rule(model, value): m.p5 = Param(initialize=1, validate=validation_rule) # 4. Mutable Param (unindexed) - with self.assertRaisesRegex( - ValueError, "Value not in parameter domain"): + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): m = ConcreteModel() m.p3 = Param(within=NonNegativeReals, mutable=True) m.p3 = -3 # 5. Mutable Param (indexed) - with self.assertRaisesRegex( - ValueError, "Value not in parameter domain"): + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): m = ConcreteModel() m.A = RangeSet(1, 2) m.p4 = Param(m.A, within=NonNegativeReals, mutable=True) @@ -1264,22 +1308,20 @@ def validation_rule(model, value): a = AbstractModel() a.p = Param(within=NonNegativeReals) a.p = 1 - with self.assertRaisesRegex( - ValueError, "Value not in parameter domain"): + with self.assertRaisesRegex(ValueError, "Value not in parameter domain"): a.p = -2 - with self.assertRaisesRegex( - RuntimeError, "Value not in parameter domain"): + with self.assertRaisesRegex(RuntimeError, "Value not in parameter domain"): m = a.create_instance({None: {'p': {None: -1}}}) m = a.create_instance() self.assertEqual(value(m.p), 1) def test_get_uninitialized(self): - model=AbstractModel() + model = AbstractModel() model.a = Param() - model.b = Set(initialize=[1,2,3]) + model.b = Set(initialize=[1, 2, 3]) model.c = Param(model.b, initialize=2, within=Reals) - instance=model.create_instance() + instance = model.create_instance() # Test that value(instance.a) throws ValueError self.assertRaises(ValueError, value, instance.a) # @@ -1287,42 +1329,40 @@ def test_get_uninitialized(self): # (like Var) do not raise a ValueError # # Test that instance.a() throws ValueError - #self.assertRaises(ValueError, instance.a) + # self.assertRaises(ValueError, instance.a) def test_indexOverRange_abstract(self): model = AbstractModel() - model.p = Param(range(1,3), range(2), initialize=1.0) + model.p = Param(range(1, 3), range(2), initialize=1.0) inst = model.create_instance() - self.assertEqual( sorted(inst.p.keys()), - [(1,0), (1,1), (2,0), (2,1)] ) - self.assertEqual( inst.p[1,0], 1.0 ) - self.assertRaises( KeyError, inst.p.__getitem__, (0, 0) ) + self.assertEqual(sorted(inst.p.keys()), [(1, 0), (1, 1), (2, 0), (2, 1)]) + self.assertEqual(inst.p[1, 0], 1.0) + self.assertRaises(KeyError, inst.p.__getitem__, (0, 0)) def test_indexOverRange_concrete(self): inst = ConcreteModel() - inst.p = Param(range(1,3), range(2), initialize=1.0) - self.assertEqual( sorted(inst.p.keys()), - [(1,0), (1,1), (2,0), (2,1)] ) - self.assertEqual( inst.p[1,0], 1.0 ) - self.assertRaises( KeyError, inst.p.__getitem__, (0, 0) ) + inst.p = Param(range(1, 3), range(2), initialize=1.0) + self.assertEqual(sorted(inst.p.keys()), [(1, 0), (1, 1), (2, 0), (2, 1)]) + self.assertEqual(inst.p[1, 0], 1.0) + self.assertRaises(KeyError, inst.p.__getitem__, (0, 0)) def test_get_set(self): - model=AbstractModel() + model = AbstractModel() model.a = Param(initialize=2, mutable=True) - model.b = Set(initialize=[1,2,3]) + model.b = Set(initialize=[1, 2, 3]) model.c = Param(model.b, initialize=2, within=Reals, mutable=True) - #try: - #model.a.value = 3 - #self.fail("can't set the value of an unitialized parameter") - #except AttributeError: - #pass + # try: + # model.a.value = 3 + # self.fail("can't set the value of an uninitialized parameter") + # except AttributeError: + # pass instance = model.create_instance() instance.a.value = 3 - #try: - #instance.a.default='2' - #self.fail("can't set a bad default value") - #except ValueError: - #pass + # try: + # instance.a.default='2' + # self.fail("can't set a bad default value") + # except ValueError: + # pass self.assertEqual(2 in instance.c, True) try: @@ -1344,9 +1384,9 @@ def test_get_set(self): self.assertEqual(value(instance.c[3]), 2) def test_iter(self): - model=AbstractModel() - model.b = Set(initialize=[1,2,3]) - model.c = Param(model.b,initialize=2) + model = AbstractModel() + model.b = Set(initialize=[1, 2, 3]) + model.c = Param(model.b, initialize=2) instance = model.create_instance() for i in instance.c: self.assertEqual(i in instance.c, True) @@ -1354,20 +1394,22 @@ def test_iter(self): def test_valid(self): def d_valid(model, a): return True + def e_valid(model, a, i, j): return True - model=AbstractModel() - model.b = Set(initialize=[1,3,5]) + + model = AbstractModel() + model.b = Set(initialize=[1, 3, 5]) model.c = Param(initialize=2, within=None) - model.d = Param(initialize=(2,3), validate=d_valid) - model.e = Param(model.b,model.b,initialize={(1,1):(2,3)}, validate=e_valid) + model.d = Param(initialize=(2, 3), validate=d_valid) + model.e = Param(model.b, model.b, initialize={(1, 1): (2, 3)}, validate=e_valid) instance = model.create_instance() - #instance.e.check_values() - #try: - #instance.c.value = 'b' - #self.fail("can't have a non-numerical parameter") - #except ValueError: - #pass + # instance.e.check_values() + # try: + # instance.c.value = 'b' + # self.fail("can't have a non-numerical parameter") + # except ValueError: + # pass def test_nonnumeric(self): m = ConcreteModel() @@ -1381,7 +1423,8 @@ def test_nonnumeric(self): p : Size=1, Index=None, Domain=Any, Default=None, Mutable=True Key : Value None : hi - """.strip()) + """.strip(), + ) m.q = Param(Any, mutable=True) m.q[1] = None m.q[2] @@ -1397,7 +1440,8 @@ def test_nonnumeric(self): 1 : None 2 : a : b - """.strip()) + """.strip(), + ) def test_domain_deprecation(self): m = ConcreteModel() @@ -1412,10 +1456,12 @@ def test_domain_deprecation(self): m.p = 'a' self.assertIn( "The default domain for Param objects is 'Any'", - log.getvalue().replace('\n', ' ')) + log.getvalue().replace('\n', ' '), + ) self.assertIn( "DEPRECATED: Param 'p' declared with an implicit domain of 'Any'", - log.getvalue().replace('\n', ' ')) + log.getvalue().replace('\n', ' '), + ) self.assertEqual(value(m.p), 'a') m.b = Block() @@ -1427,7 +1473,8 @@ def test_domain_deprecation(self): """ q : Size=0, Index=None, Domain=Any, Default=None, Mutable=False Key : Value - """.strip()) + """.strip(), + ) i = m.clone() self.assertIsNot(m.p.domain, i.p.domain) @@ -1440,23 +1487,26 @@ def test_set_value_units(self): m.p = Param(units=units.g) m.p = 5 self.assertEqual(value(m.p), 5) - m.p = 6*units.g + m.p = 6 * units.g self.assertEqual(value(m.p), 6) - m.p = 7*units.kg + m.p = 7 * units.kg self.assertEqual(value(m.p), 7000) with self.assertRaises(UnitsError): - m.p = 1*units.s + m.p = 1 * units.s out = StringIO() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 1 Param Declarations p : Size=1, Index=None, Domain=Any, Default=None, Mutable=True, Units=g Key : Value None : 7000.0 1 Declarations: p - """.strip()) + """.strip(), + ) def test_scalar_get_mutable_when_not_present(self): m = ConcreteModel() @@ -1486,12 +1536,11 @@ def test_scalar_set_mutable_when_not_present(self): def createNonIndexedParamMethod(func, init_xy, new_xy, tol=1e-10): - def testMethod(self): model = ConcreteModel() model.Q1 = Param(initialize=init_xy[0], mutable=True) model.x = Var() - model.CON = Constraint(expr=func(model.Q1)<=model.x) + model.CON = Constraint(expr=func(model.Q1) <= model.x) self.assertAlmostEqual(init_xy[1], value(model.CON[None].lower), delta=1e-10) @@ -1501,19 +1550,19 @@ def testMethod(self): return testMethod -def createIndexedParamMethod(func, init_xy, new_xy, tol=1e-10): +def createIndexedParamMethod(func, init_xy, new_xy, tol=1e-10): def testMethod(self): model = ConcreteModel() - model.P = Param([1,2],initialize=init_xy[0], mutable=True) - model.Q = Param([1,2],default=init_xy[0], mutable=True) - model.R = Param([1,2], mutable=True) + model.P = Param([1, 2], initialize=init_xy[0], mutable=True) + model.Q = Param([1, 2], default=init_xy[0], mutable=True) + model.R = Param([1, 2], mutable=True) model.R[1] = init_xy[0] model.R[2] = init_xy[0] model.x = Var() - model.CON1 = Constraint(expr=func(model.P[1])<=model.x) - model.CON2 = Constraint(expr=func(model.Q[1])<=model.x) - model.CON3 = Constraint(expr=func(model.R[1])<=model.x) + model.CON1 = Constraint(expr=func(model.P[1]) <= model.x) + model.CON2 = Constraint(expr=func(model.Q[1]) <= model.x) + model.CON3 = Constraint(expr=func(model.R[1]) <= model.x) self.assertAlmostEqual(init_xy[1], value(model.CON1[None].lower), delta=tol) self.assertAlmostEqual(init_xy[1], value(model.CON2[None].lower), delta=tol) @@ -1529,45 +1578,50 @@ def testMethod(self): return testMethod + def assignTestsNonIndexedParamTests(cls, problem_list): for val in problem_list: - attrName = 'test_mutable_'+val[0]+'_expr' - setattr(cls,attrName,createNonIndexedParamMethod(eval(val[0]),val[1],val[2])) + attrName = 'test_mutable_' + val[0] + '_expr' + setattr( + cls, attrName, createNonIndexedParamMethod(eval(val[0]), val[1], val[2]) + ) + def assignTestsIndexedParamTests(cls, problem_list): for val in problem_list: - attrName = 'test_mutable_'+val[0]+'_expr' - setattr(cls,attrName,createIndexedParamMethod(eval(val[0]),val[1],val[2])) - -instrinsic_test_list = [('sin', (0.0,0.0), (math.pi/2.0,1.0)), \ - ('cos', (0.0,1.0), (math.pi/2.0,0.0)), \ - ('log', (1.0,0.0), (math.e,1.0)), \ - ('log10', (1.0,0.0), (10.0,1.0)),\ - ('tan', (0.0,0.0), (math.pi/4.0,1.0)),\ - ('cosh', (0.0,1.0), (math.acosh(1.5),1.5)),\ - ('sinh', (0.0,0.0), (math.asinh(0.5),0.5)),\ - ('tanh', (0.0,0.0), (math.atanh(0.8),0.8)),\ - ('asin', (0.0,0.0), (math.sin(1.0),1.0)),\ - ('acos', (1.0,0.0), (math.cos(1.0),1.0)),\ - ('atan', (0.0,0.0), (math.tan(1.0),1.0)),\ - ('exp', (0.0,1.0), (math.log(2),2.0)),\ - ('sqrt', (1.0,1.0), (4.0,2.0)),\ - ('asinh', (0.0,0.0), (math.sinh(2.0),2.0)),\ - ('acosh', (1.0,0.0), (math.cosh(2.0),2.0)),\ - ('atanh', (0.0,0.0), (math.tanh(2.0),2.0)),\ - ('ceil', (0.5,1.0), (1.5,2.0)),\ - ('floor', (0.5,0.0), (1.5, 1.0))\ - ] + attrName = 'test_mutable_' + val[0] + '_expr' + setattr(cls, attrName, createIndexedParamMethod(eval(val[0]), val[1], val[2])) + + +intrinsic_test_list = [ + ('sin', (0.0, 0.0), (math.pi / 2.0, 1.0)), + ('cos', (0.0, 1.0), (math.pi / 2.0, 0.0)), + ('log', (1.0, 0.0), (math.e, 1.0)), + ('log10', (1.0, 0.0), (10.0, 1.0)), + ('tan', (0.0, 0.0), (math.pi / 4.0, 1.0)), + ('cosh', (0.0, 1.0), (math.acosh(1.5), 1.5)), + ('sinh', (0.0, 0.0), (math.asinh(0.5), 0.5)), + ('tanh', (0.0, 0.0), (math.atanh(0.8), 0.8)), + ('asin', (0.0, 0.0), (math.sin(1.0), 1.0)), + ('acos', (1.0, 0.0), (math.cos(1.0), 1.0)), + ('atan', (0.0, 0.0), (math.tan(1.0), 1.0)), + ('exp', (0.0, 1.0), (math.log(2), 2.0)), + ('sqrt', (1.0, 1.0), (4.0, 2.0)), + ('asinh', (0.0, 0.0), (math.sinh(2.0), 2.0)), + ('acosh', (1.0, 0.0), (math.cosh(2.0), 2.0)), + ('atanh', (0.0, 0.0), (math.tanh(2.0), 2.0)), + ('ceil', (0.5, 1.0), (1.5, 2.0)), + ('floor', (0.5, 0.0), (1.5, 1.0)), +] class MiscNonIndexedParamBehaviorTests(unittest.TestCase): - # Test that non-indexed params are mutable def test_mutable_self(self): model = ConcreteModel() model.Q = Param(initialize=0.0, mutable=True) model.x = Var() - model.CON = Constraint(expr=model.Q<=model.x) + model.CON = Constraint(expr=model.Q <= model.x) self.assertEqual(0.0, value(model.CON[None].lower)) @@ -1580,8 +1634,8 @@ def test_mutable_display(self): model = ConcreteModel() model.Q = Param(initialize=0.0, mutable=True) self.assertEqual(model.Q.value, 0.0) - #print model.Q._data - #print value(model.Q) + # print model.Q._data + # print value(model.Q) f = StringIO() display(model.Q, f) tmp = f.getvalue().splitlines() @@ -1589,9 +1643,9 @@ def test_mutable_display(self): self.assertEqual(model.Q.value, val) model.Q = 1.0 - self.assertEqual(model.Q.value,1.0) + self.assertEqual(model.Q.value, 1.0) f = StringIO() - display(model.Q,f) + display(model.Q, f) tmp = f.getvalue().splitlines() val = float(tmp[-1].split(':')[-1].strip()) self.assertEqual(model.Q.value, val) @@ -1608,7 +1662,7 @@ def test_mutable_pprint(self): buf.buf = '' model.Q = 1.0 - self.assertEqual(model.Q.value,1.0) + self.assertEqual(model.Q.value, 1.0) model.Q.pprint(ostream=buf) val = float(buf.getvalue().splitlines()[-1].split(':')[-1].strip()) self.assertEqual(model.Q.value, val) @@ -1620,7 +1674,7 @@ def test_mutable_sum_expr(self): model.Q1 = Param(initialize=0.0, mutable=True) model.Q2 = Param(initialize=0.0, mutable=True) model.x = Var() - model.CON = Constraint(expr=model.Q1+model.Q2<=model.x) + model.CON = Constraint(expr=model.Q1 + model.Q2 <= model.x) self.assertEqual(0.0, value(model.CON[None].lower)) @@ -1636,7 +1690,7 @@ def test_mutable_prod_expr(self): model.Q1 = Param(initialize=0.0, mutable=True) model.Q2 = Param(initialize=0.0, mutable=True) model.x = Var() - model.CON = Constraint(expr=model.Q1*model.Q2<=model.x) + model.CON = Constraint(expr=model.Q1 * model.Q2 <= model.x) self.assertEqual(0.0, value(model.CON[None].lower)) @@ -1652,7 +1706,7 @@ def test_mutable_pow_expr(self): model.Q1 = Param(initialize=1.0, mutable=True) model.Q2 = Param(initialize=1.0, mutable=True) model.x = Var() - model.CON = Constraint(expr=model.Q1**model.Q2<=model.x) + model.CON = Constraint(expr=model.Q1**model.Q2 <= model.x) self.assertEqual(1.0, value(model.CON[None].lower)) @@ -1667,7 +1721,7 @@ def test_mutable_abs_expr(self): model = ConcreteModel() model.Q1 = Param(initialize=-1.0, mutable=True) model.x = Var() - model.CON = Constraint(expr=abs(model.Q1)<=model.x) + model.CON = Constraint(expr=abs(model.Q1) <= model.x) self.assertEqual(1.0, value(model.CON[None].lower)) @@ -1677,18 +1731,17 @@ def test_mutable_abs_expr(self): # Add test methods for all intrinsic functions -assignTestsNonIndexedParamTests(MiscNonIndexedParamBehaviorTests,instrinsic_test_list) +assignTestsNonIndexedParamTests(MiscNonIndexedParamBehaviorTests, intrinsic_test_list) class MiscIndexedParamBehaviorTests(unittest.TestCase): - # Test that indexed params are mutable def test_mutable_self1(self): model = ConcreteModel() model.P = Param([1], mutable=True) model.P[1] = 1.0 model.x = Var() - model.CON = Constraint(expr=model.P[1]<=model.x) + model.CON = Constraint(expr=model.P[1] <= model.x) self.assertEqual(1.0, value(model.CON[None].lower)) @@ -1700,9 +1753,9 @@ def test_mutable_self1(self): # when initialized with 'initialize' def test_mutable_self2(self): model = ConcreteModel() - model.P = Param([1],initialize=1.0, mutable=True) + model.P = Param([1], initialize=1.0, mutable=True) model.x = Var() - model.CON = Constraint(expr=model.P[1]<=model.x) + model.CON = Constraint(expr=model.P[1] <= model.x) self.assertEqual(1.0, value(model.CON[None].lower)) @@ -1714,9 +1767,9 @@ def test_mutable_self2(self): # when initialized with 'default' def test_mutable_self3(self): model = ConcreteModel() - model.P = Param([1],default=1.0, mutable=True) + model.P = Param([1], default=1.0, mutable=True) model.x = Var() - model.CON = Constraint(expr=model.P[1]<=model.x) + model.CON = Constraint(expr=model.P[1] <= model.x) self.assertEqual(1.0, value(model.CON[None].lower)) @@ -1728,27 +1781,27 @@ def test_mutable_self3(self): # in param initialization def test_mutable_self4(self): model = ConcreteModel() - model.P = Param([1,2],default=1.0, mutable=True) + model.P = Param([1, 2], default=1.0, mutable=True) - self.assertEqual(model.P[1].value,1.0) - self.assertEqual(model.P[2].value,1.0) + self.assertEqual(model.P[1].value, 1.0) + self.assertEqual(model.P[2].value, 1.0) model.P[1].value = 0.0 - self.assertEqual(model.P[1].value,0.0) - self.assertEqual(model.P[2].value,1.0) + self.assertEqual(model.P[1].value, 0.0) + self.assertEqual(model.P[2].value, 1.0) - model.Q = Param([1,2],default=1.0, mutable=True) - self.assertEqual(model.Q[1].value,1.0) - self.assertEqual(model.Q[2].value,1.0) + model.Q = Param([1, 2], default=1.0, mutable=True) + self.assertEqual(model.Q[1].value, 1.0) + self.assertEqual(model.Q[2].value, 1.0) model.Q[1] = 0.0 - self.assertEqual(model.Q[1].value,0.0) - self.assertEqual(model.Q[2].value,1.0) + self.assertEqual(model.Q[1].value, 0.0) + self.assertEqual(model.Q[2].value, 1.0) # Test that display actually displays the correct param value def test_mutable_display(self): model = ConcreteModel() - model.P = Param([1,2],default=0.0, mutable=True) - model.Q = Param([1,2],initialize=0.0, mutable=True) - model.R = Param([1,2], mutable=True) + model.P = Param([1, 2], default=0.0, mutable=True) + model.Q = Param([1, 2], initialize=0.0, mutable=True) + model.R = Param([1, 2], mutable=True) model.R[1] = 0.0 model.R[2] = 0.0 # check initial values are correct @@ -1757,31 +1810,31 @@ def test_mutable_display(self): # Treat the param using default a little differently for Item in [model.P]: f = StringIO() - display(Item,f) + display(Item, f) tmp = f.getvalue().splitlines() self.assertEqual(len(tmp), 2) for Item in [model.Q, model.R]: f = StringIO() - display(Item,f) + display(Item, f) tmp = f.getvalue().splitlines() for tmp_ in tmp[2:]: val = float(tmp_.split(':')[-1].strip()) self.assertEqual(0, val) - #**** NOTE: Accessing the + # **** NOTE: Accessing the # value of indexed params which utilize # the default keyword actually causes the internal # rep to become dense for that index, which # changes display output for Item in [model.P, model.Q, model.R]: - for i in [1,2]: - self.assertEqual(Item[i].value,0.0) + for i in [1, 2]: + self.assertEqual(Item[i].value, 0.0) # check that the correct value is printed # Treat the param using default a little differently for Item in [model.P, model.Q, model.R]: f = StringIO() - display(Item,f) + display(Item, f) tmp = f.getvalue().splitlines() for tmp_ in tmp[2:]: val = float(tmp_.split(':')[-1].strip()) @@ -1797,7 +1850,7 @@ def test_mutable_display(self): # check that the correct value is printed for Item in [model.P, model.Q, model.R]: f = StringIO() - display(Item,f) + display(Item, f) tmp = f.getvalue().splitlines() i = 0 for tmp_ in tmp[2:]: @@ -1808,9 +1861,9 @@ def test_mutable_display(self): # Test that pprint actually displays the correct param value def test_mutable_pprint(self): model = ConcreteModel() - model.P = Param([1,2],default=0.0, mutable=True) - model.Q = Param([1,2],initialize=0.0, mutable=True) - model.R = Param([1,2], mutable=True) + model.P = Param([1, 2], default=0.0, mutable=True) + model.Q = Param([1, 2], initialize=0.0, mutable=True) + model.R = Param([1, 2], mutable=True) model.R[1] = 0.0 model.R[2] = 0.0 # check initial values are correct @@ -1819,32 +1872,32 @@ def test_mutable_pprint(self): # Treat the param using default a little differently for Item in [model.P]: f = StringIO() - display(Item,f) + display(Item, f) tmp = f.getvalue().splitlines() self.assertEqual(len(tmp), 2) for Item in [model.Q, model.R]: f = StringIO() - display(Item,f) + display(Item, f) tmp = f.getvalue().splitlines() for tmp_ in tmp[2:]: val = float(tmp_.split(':')[-1].strip()) self.assertEqual(0, val) - #**** NOTE: Accessing the + # **** NOTE: Accessing the # value of indexed params which utilize # the default keyword actually causes the internal # rep to become dense for that index, which # changes pprint output for Item in [model.P, model.Q, model.R]: - for i in [1,2]: - self.assertEqual(Item[i].value,0.0) + for i in [1, 2]: + self.assertEqual(Item[i].value, 0.0) for Item in [model.P, model.Q, model.R]: f = StringIO() Item.pprint(ostream=f) tmp = f.getvalue().splitlines() - for i in [1,2]: - val = float(tmp[i+1].split(':')[-1].strip()) + for i in [1, 2]: + val = float(tmp[i + 1].split(':')[-1].strip()) self.assertEqual(0, val) model.P[1] = 1.0 @@ -1859,8 +1912,8 @@ def test_mutable_pprint(self): f = StringIO() Item.pprint(ostream=f) tmp = f.getvalue().splitlines() - for i in [1,2]: - val = float(tmp[i+1].split(':')[-1].strip()) + for i in [1, 2]: + val = float(tmp[i + 1].split(':')[-1].strip()) self.assertEqual(i, val) # Test mutability of indexed @@ -1869,15 +1922,15 @@ def test_mutable_pprint(self): # different ways def test_mutable_sum_expr(self): model = ConcreteModel() - model.P = Param([1,2],default=0.0, mutable=True) - model.Q = Param([1,2],initialize=0.0, mutable=True) - model.R = Param([1,2], mutable=True) + model.P = Param([1, 2], default=0.0, mutable=True) + model.Q = Param([1, 2], initialize=0.0, mutable=True) + model.R = Param([1, 2], mutable=True) model.R[1] = 0.0 model.R[2] = 0.0 model.x = Var() - model.CON1 = Constraint(expr=model.P[1]+model.P[2]<=model.x) - model.CON2 = Constraint(expr=model.Q[1]+model.Q[2]<=model.x) - model.CON3 = Constraint(expr=model.R[1]+model.R[2]<=model.x) + model.CON1 = Constraint(expr=model.P[1] + model.P[2] <= model.x) + model.CON2 = Constraint(expr=model.Q[1] + model.Q[2] <= model.x) + model.CON3 = Constraint(expr=model.R[1] + model.R[2] <= model.x) self.assertEqual(0.0, value(model.CON1[None].lower)) self.assertEqual(0.0, value(model.CON2[None].lower)) @@ -1900,15 +1953,15 @@ def test_mutable_sum_expr(self): # different ways def test_mutable_prod_expr(self): model = ConcreteModel() - model.P = Param([1,2],initialize=0.0, mutable=True) - model.Q = Param([1,2],default=0.0, mutable=True) - model.R = Param([1,2], mutable=True) + model.P = Param([1, 2], initialize=0.0, mutable=True) + model.Q = Param([1, 2], default=0.0, mutable=True) + model.R = Param([1, 2], mutable=True) model.R[1] = 0.0 model.R[2] = 0.0 model.x = Var() - model.CON1 = Constraint(expr=model.P[1]*model.P[2]<=model.x) - model.CON2 = Constraint(expr=model.Q[1]*model.Q[2]<=model.x) - model.CON3 = Constraint(expr=model.R[1]*model.R[2]<=model.x) + model.CON1 = Constraint(expr=model.P[1] * model.P[2] <= model.x) + model.CON2 = Constraint(expr=model.Q[1] * model.Q[2] <= model.x) + model.CON3 = Constraint(expr=model.R[1] * model.R[2] <= model.x) self.assertEqual(0.0, value(model.CON1[None].lower)) self.assertEqual(0.0, value(model.CON2[None].lower)) @@ -1931,15 +1984,15 @@ def test_mutable_prod_expr(self): # different ways def test_mutable_pow_expr(self): model = ConcreteModel() - model.P = Param([1,2],initialize=0.0, mutable=True) - model.Q = Param([1,2],default=0.0, mutable=True) - model.R = Param([1,2], mutable=True) + model.P = Param([1, 2], initialize=0.0, mutable=True) + model.Q = Param([1, 2], default=0.0, mutable=True) + model.R = Param([1, 2], mutable=True) model.R[1] = 0.0 model.R[2] = 0.0 model.x = Var() - model.CON1 = Constraint(expr=model.P[1]**model.P[2]<=model.x) - model.CON2 = Constraint(expr=model.Q[1]**model.Q[2]<=model.x) - model.CON3 = Constraint(expr=model.R[1]**model.R[2]<=model.x) + model.CON1 = Constraint(expr=model.P[1] ** model.P[2] <= model.x) + model.CON2 = Constraint(expr=model.Q[1] ** model.Q[2] <= model.x) + model.CON3 = Constraint(expr=model.R[1] ** model.R[2] <= model.x) self.assertEqual(1.0, value(model.CON1[None].lower)) self.assertEqual(1.0, value(model.CON2[None].lower)) @@ -1962,15 +2015,15 @@ def test_mutable_pow_expr(self): # different ways def test_mutable_abs_expr(self): model = ConcreteModel() - model.P = Param([1,2],initialize=-1.0, mutable=True) - model.Q = Param([1,2],default=-1.0, mutable=True) - model.R = Param([1,2], mutable=True) + model.P = Param([1, 2], initialize=-1.0, mutable=True) + model.Q = Param([1, 2], default=-1.0, mutable=True) + model.R = Param([1, 2], mutable=True) model.R[1] = -1.0 model.R[2] = -1.0 model.x = Var() - model.CON1 = Constraint(expr=abs(model.P[1])<=model.x) - model.CON2 = Constraint(expr=abs(model.Q[1])<=model.x) - model.CON3 = Constraint(expr=abs(model.R[1])<=model.x) + model.CON1 = Constraint(expr=abs(model.P[1]) <= model.x) + model.CON2 = Constraint(expr=abs(model.Q[1]) <= model.x) + model.CON3 = Constraint(expr=abs(model.R[1]) <= model.x) self.assertEqual(1.0, value(model.CON1[None].lower)) self.assertEqual(1.0, value(model.CON2[None].lower)) @@ -2072,7 +2125,7 @@ def test_using_None_in_params(self): # Indexed # m = ConcreteModel() - m.p = Param([1,2], mutable=True) + m.p = Param([1, 2], mutable=True) self.assertEqual(len(m.p), 0) self.assertEqual(len(m.p._data), 0) m.p[1] = None @@ -2085,7 +2138,7 @@ def test_using_None_in_params(self): self.assertEqual(m.p[1].value, 1) m = ConcreteModel() - m.p = Param([1,2], mutable=True, initialize={1:None}) + m.p = Param([1, 2], mutable=True, initialize={1: None}) self.assertEqual(len(m.p), 1) self.assertEqual(len(m.p._data), 1) self.assertIs(m.p[1].value, None) @@ -2096,7 +2149,7 @@ def test_using_None_in_params(self): self.assertEqual(m.p[2].value, 1) m = ConcreteModel() - m.p = Param([1,2], mutable=True, default=None) + m.p = Param([1, 2], mutable=True, default=None) self.assertEqual(len(m.p), 2) self.assertEqual(len(m.p._data), 0) self.assertIs(m.p[1].value, None) @@ -2109,13 +2162,13 @@ def test_using_None_in_params(self): self.assertEqual(m.p[2].value, 1) m = ConcreteModel() - m.p = Param([1,2], mutable=False, initialize={1:None}) + m.p = Param([1, 2], mutable=False, initialize={1: None}) self.assertEqual(len(m.p), 1) self.assertEqual(len(m.p._data), 1) self.assertIs(m.p[1], None) m = ConcreteModel() - m.p = Param([1,2], mutable=False, default=None) + m.p = Param([1, 2], mutable=False, default=None) self.assertEqual(len(m.p), 2) self.assertEqual(len(m.p._data), 0) self.assertIs(m.p[1], None) @@ -2124,7 +2177,7 @@ def test_using_None_in_params(self): # Add test methods for all intrinsic functions -assignTestsIndexedParamTests(MiscIndexedParamBehaviorTests,instrinsic_test_list) +assignTestsIndexedParamTests(MiscIndexedParamBehaviorTests, intrinsic_test_list) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_pickle.py b/pyomo/core/tests/unit/test_pickle.py index f5444328fed..808db2e45f3 100644 --- a/pyomo/core/tests/unit/test_pickle.py +++ b/pyomo/core/tests/unit/test_pickle.py @@ -15,12 +15,24 @@ import pickle import os from os.path import abspath, dirname, join -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import platform from filecmp import cmp import pyomo.common.unittest as unittest -from pyomo.environ import AbstractModel, ConcreteModel, Set, Param, Var, Constraint, Objective, Reals, NonNegativeReals, sum_product +from pyomo.environ import ( + AbstractModel, + ConcreteModel, + Set, + Param, + Var, + Constraint, + Objective, + Reals, + NonNegativeReals, + sum_product, +) using_pypy = platform.python_implementation() == "PyPy" @@ -28,22 +40,26 @@ def obj_rule(model): return sum(model.x[a] + model.y[a] for a in model.A) -def constr_rule(model,a): + + +def constr_rule(model, a): return model.x[a] >= model.y[a] + + def simple_con_rule(model, i): return model.x <= i -class Test(unittest.TestCase): +class Test(unittest.TestCase): def verifyModel(self, ref, new): # Verify the block indices self.assertEqual(sorted(ref._data.keys()), sorted(new._data.keys())) for idx in ref._data.keys(): - self.assertEqual(type(ref._data[idx]), type(new._data[idx])) + self.assertEqual(type(ref._data[idx]), type(new._data[idx])) if idx is not None: - self.assertNotEqual(id(ref._data[idx]), id(new._data[idx])) - self.assertEqual( id(ref.solutions._instance()), id(ref) ) - self.assertEqual( id(new.solutions._instance()), id(new) ) + self.assertNotEqual(id(ref._data[idx]), id(new._data[idx])) + self.assertEqual(id(ref.solutions._instance()), id(ref)) + self.assertEqual(id(new.solutions._instance()), id(new)) # Verify the block attributes for idx in ref._data.keys(): @@ -53,10 +69,10 @@ def verifyModel(self, ref, new): # exception. ref_c = ref._data[idx].component_map() new_c = new._data[idx].component_map() - self.assertEqual( sorted(ref_c.keys()), sorted(new_c.keys()) ) + self.assertEqual(sorted(ref_c.keys()), sorted(new_c.keys())) for a in ref_c.keys(): - self.assertEqual(type(ref_c[a]), type(new_c[a])) - self.assertNotEqual(id(ref_c[a]), id(new_c[a])) + self.assertEqual(type(ref_c[a]), type(new_c[a])) + self.assertNotEqual(id(ref_c[a]), id(new_c[a])) def test_pickle_empty_abstract_model(self): model = AbstractModel() @@ -66,14 +82,14 @@ def test_pickle_empty_abstract_model(self): def test_pickle_abstract_model_set(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) + model.A = Set(initialize=[1, 2, 3]) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) def test_pickle_abstract_model_virtual_set(self): model = AbstractModel() - model._a = Set(initialize=[1,2,3]) + model._a = Set(initialize=[1, 2, 3]) model.A = model._a * model._a str = pickle.dumps(model) tmodel = pickle.loads(str) @@ -88,7 +104,7 @@ def test_pickle_abstract_model_param(self): def test_pickle_abstract_model_indexed_param(self): model = AbstractModel() - model.A = Param([1,2,3], initialize={1:100,2:200,3:300}) + model.A = Param([1, 2, 3], initialize={1: 100, 2: 200, 3: 300}) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -102,7 +118,7 @@ def test_pickle_abstract_model_mutable_param(self): def test_pickle_abstract_model_mutable_indexed_param(self): model = AbstractModel() - model.A = Param([1,2,3], initialize={1:100,3:300}, mutable=True) + model.A = Param([1, 2, 3], initialize={1: 100, 3: 300}, mutable=True) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -116,7 +132,7 @@ def test_pickle_abstract_model_var(self): def test_pickle_abstract_model_indexed_var(self): model = AbstractModel() - model.A = Var([1,2,3], initialize={1:100,2:200,3:300}) + model.A = Var([1, 2, 3], initialize={1: 100, 2: 200, 3: 300}) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -147,7 +163,7 @@ def test_pickle_abstract_model_constraint(self): def test_pickle_abstract_model_indexed_constraint(self): model = AbstractModel() model.x = Var() - model.A = Constraint([1,2,3], rule=simple_con_rule) + model.A = Constraint([1, 2, 3], rule=simple_con_rule) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -162,14 +178,14 @@ def test_pickle_empty_concrete_model(self): def test_pickle_concrete_model_set(self): model = ConcreteModel() - model.A = Set(initialize=[1,2,3]) + model.A = Set(initialize=[1, 2, 3]) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) def test_pickle_concrete_model_virtual_set(self): model = ConcreteModel() - model._a = Set(initialize=[1,2,3]) + model._a = Set(initialize=[1, 2, 3]) model.A = model._a * model._a str = pickle.dumps(model) tmodel = pickle.loads(str) @@ -184,7 +200,7 @@ def test_pickle_concrete_model_param(self): def test_pickle_concrete_model_indexed_param(self): model = ConcreteModel() - model.A = Param([1,2,3], initialize={1:100,2:200,3:300}) + model.A = Param([1, 2, 3], initialize={1: 100, 2: 200, 3: 300}) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -198,7 +214,7 @@ def test_pickle_concrete_model_mutable_param(self): def test_pickle_concrete_model_mutable_indexed_param(self): model = ConcreteModel() - model.A = Param([1,2,3], initialize={1:100,3:300}, mutable=True) + model.A = Param([1, 2, 3], initialize={1: 100, 3: 300}, mutable=True) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -212,7 +228,7 @@ def test_pickle_concrete_model_var(self): def test_pickle_concrete_model_indexed_var(self): model = ConcreteModel() - model.A = Var([1,2,3], initialize={1:100,2:200,3:300}) + model.A = Var([1, 2, 3], initialize={1: 100, 2: 200, 3: 300}) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -227,7 +243,7 @@ def test_pickle_concrete_model_constant_objective(self): def test_pickle_concrete_model_objective(self): model = ConcreteModel() model.x = Var() - model.A = Objective(expr=model.x <= 0) + model.A = Objective(expr=model.x**2) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) @@ -243,66 +259,66 @@ def test_pickle_concrete_model_constraint(self): def test_pickle_concrete_model_indexed_constraint(self): model = ConcreteModel() model.x = Var() - model.A = Constraint([1,2,3], rule=simple_con_rule) + model.A = Constraint([1, 2, 3], rule=simple_con_rule) str = pickle.dumps(model) tmodel = pickle.loads(str) self.verifyModel(model, tmodel) ########## - # tests the ability to pickle an abstract model prior to construction, # read it back it, and create an instance from it. validation is relatively # weak, in that it only tests the validity of an expression constructed # using the resulting model. def test_pickle1(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) model.x = Var(model.A) model.y = Var(model.A) model.obj = Objective(rule=obj_rule) - model.constr = Constraint(model.A,rule=constr_rule) + model.constr = Constraint(model.A, rule=constr_rule) pickle_str = pickle.dumps(model) tmodel = pickle.loads(pickle_str) - instance=tmodel.create_instance() - expr = sum_product(instance.x,instance.B,instance.y) + instance = tmodel.create_instance() + expr = sum_product(instance.x, instance.B, instance.y) baseline = "B[1]*x[1]*y[1] + B[2]*x[2]*y[2] + B[3]*x[3]*y[3]" - self.assertEqual( str(expr), baseline ) + self.assertEqual(str(expr), baseline) # same as above, but pickles the constructed AbstractModel and # then operates on the unpickled instance. def test_pickle2(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}, mutable=True) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}, mutable=True) model.x = Var(model.A) model.y = Var(model.A) model.obj = Objective(rule=obj_rule) - model.constr = Constraint(model.A,rule=constr_rule) - tmp=model.create_instance() + model.constr = Constraint(model.A, rule=constr_rule) + tmp = model.create_instance() pickle_str = pickle.dumps(tmp) instance = pickle.loads(pickle_str) - expr = sum_product(instance.x,instance.B,instance.y) + expr = sum_product(instance.x, instance.B, instance.y) baseline = "B[1]*x[1]*y[1] + B[2]*x[2]*y[2] + B[3]*x[3]*y[3]" - self.assertEqual( str(expr), baseline ) + self.assertEqual(str(expr), baseline) # verifies that the use of lambda expressions as rules yields model instances # that are not pickle'able. def test_pickle3(self): def rule1(model): - return (1,model.x+model.y[1],2) + return (1, model.x + model.y[1], 2) + def rule2(model, i): - return (1,model.x+model.y[1]+i,2) + return (1, model.x + model.y[1] + i, 2) model = AbstractModel() - model.a = Set(initialize=[1,2,3]) + model.a = Set(initialize=[1, 2, 3]) model.A = Param(initialize=1, mutable=True) model.B = Param(model.a, mutable=True) - model.x = Var(initialize=1,within=Reals) - model.y = Var(model.a, initialize=1,within=Reals) - model.obj = Objective(rule=lambda model: model.x+model.y[1]) - model.obj2 = Objective(model.a,rule=lambda model,i: i+model.x+model.y[1]) + model.x = Var(initialize=1, within=Reals) + model.y = Var(model.a, initialize=1, within=Reals) + model.obj = Objective(rule=lambda model: model.x + model.y[1]) + model.obj2 = Objective(model.a, rule=lambda model, i: i + model.x + model.y[1]) model.con = Constraint(rule=rule1) model.con2 = Constraint(model.a, rule=rule2) instance = model.create_instance() @@ -310,9 +326,7 @@ def rule2(model, i): str_ = pickle.dumps(instance) tmp_ = pickle.loads(str_) else: - with self.assertRaises((pickle.PicklingError, - TypeError, - AttributeError)): + with self.assertRaises((pickle.PicklingError, TypeError, AttributeError)): pickle.dumps(instance) # verifies that we can print a constructed model and @@ -322,30 +336,32 @@ def rule2(model, i): # components was incorrectly and unexpectedly modifying # object state. def test_pickle4(self): - model = ConcreteModel() - model.s = Set(initialize=[1,2]) + model.s = Set(initialize=[1, 2]) model.x = Var(within=NonNegativeReals) model.x_indexed = Var(model.s, within=NonNegativeReals) model.obj = Objective(expr=model.x + model.x_indexed[1] + model.x_indexed[2]) model.con = Constraint(expr=model.x >= 1) model.con2 = Constraint(expr=model.x_indexed[1] + model.x_indexed[2] >= 4) - OUTPUT=open(join(currdir, "test_pickle4_baseline.out"), "w") + OUTPUT = open(join(currdir, "test_pickle4_baseline.out"), "w") model.pprint(ostream=OUTPUT) OUTPUT.close() - _out, _txt = join(currdir, "test_pickle4_baseline.out"), join(currdir, "test_pickle4_baseline.txt") - self.assertTrue(cmp(_out, _txt), - msg="Files %s and %s differ" % (_out, _txt)) + _out, _txt = join(currdir, "test_pickle4_baseline.out"), join( + currdir, "test_pickle4_baseline.txt" + ) + self.assertTrue(cmp(_out, _txt), msg="Files %s and %s differ" % (_out, _txt)) str = pickle.dumps(model) - OUTPUT=open(join(currdir, "test_pickle4_after.out"), "w") + OUTPUT = open(join(currdir, "test_pickle4_after.out"), "w") model.pprint(ostream=OUTPUT) OUTPUT.close() - _out, _txt = join(currdir, "test_pickle4_after.out"), join(currdir, "test_pickle4_baseline.txt") - self.assertTrue(cmp(_out, _txt), - msg="Files %s and %s differ" % (_out, _txt)) + _out, _txt = join(currdir, "test_pickle4_after.out"), join( + currdir, "test_pickle4_baseline.txt" + ) + self.assertTrue(cmp(_out, _txt), msg="Files %s and %s differ" % (_out, _txt)) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_piecewise.py b/pyomo/core/tests/unit/test_piecewise.py index 9be9503843b..aeb02b82624 100644 --- a/pyomo/core/tests/unit/test_piecewise.py +++ b/pyomo/core/tests/unit/test_piecewise.py @@ -14,25 +14,28 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest -from pyomo.environ import AbstractModel, ConcreteModel, Set, Var, Piecewise, Constraint +from pyomo.environ import AbstractModel, ConcreteModel, Set, Var, Piecewise, Constraint -class TestMiscPiecewise(unittest.TestCase): +class TestMiscPiecewise(unittest.TestCase): # test that activate and deactivate work def test_activate_deactivate_indexed(self): model = ConcreteModel() model.s = Set(initialize=[1]) model.y = Var(model.s) - model.x = Var(model.s,bounds=(-1,1)) - args = ([1],model.y,model.x) - keywords = {'pw_pts':{1:[-1,0,1]},\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,i,x: x**2} - model.c = Piecewise(*args,**keywords) + model.x = Var(model.s, bounds=(-1, 1)) + args = ([1], model.y, model.x) + keywords = { + 'pw_pts': {1: [-1, 0, 1]}, + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, i, x: x**2, + } + model.c = Piecewise(*args, **keywords) self.assertTrue(len(model.c[1].component_map(Constraint)) > 0) self.assertTrue(len(model.c[1].component_map(Constraint, active=True)) > 0) self.assertEqual(model.c.active, True) @@ -57,12 +60,14 @@ def test_activate_deactivate_indexed(self): def test_activate_deactivate_nonindexed(self): model = ConcreteModel() model.y = Var() - model.x = Var(bounds=(-1,1)) - args = (model.y,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.c = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.y, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.c = Piecewise(*args, **keywords) self.assertTrue(len(model.c.component_map(Constraint)) > 0) self.assertTrue(len(model.c.component_map(Constraint, active=True)) > 0) self.assertEqual(model.c.active, True) @@ -81,20 +86,22 @@ def test_indexed_with_nonindexed_vars(self): model = ConcreteModel() model.range1 = Var() - model.x = Var(bounds=(-1,1)) - args = ([1],model.range1,model.x) - keywords = {'pw_pts':{1:[-1,0,1]},\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,i,x: x**2} - model.con1 = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = ([1], model.range1, model.x) + keywords = { + 'pw_pts': {1: [-1, 0, 1]}, + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, i, x: x**2, + } + model.con1 = Piecewise(*args, **keywords) model.range2 = Var([1]) - model.y = Var([1],bounds=(-1,1)) - args = ([1],model.range2,model.y) - model.con2 = Piecewise(*args,**keywords) + model.y = Var([1], bounds=(-1, 1)) + args = ([1], model.range2, model.y) + model.con2 = Piecewise(*args, **keywords) - args = ([1],model.range2,model.y[1]) - model.con3 = Piecewise(*args,**keywords) + args = ([1], model.range2, model.y[1]) + model.con3 = Piecewise(*args, **keywords) # test that nonindexed Piecewise can handle # _VarData (e.g model.x[1] @@ -102,12 +109,14 @@ def test_nonindexed_with_indexed_vars(self): model = ConcreteModel() model.range = Var([1]) - model.x = Var([1],bounds=(-1,1)) - args = (model.range[1],model.x[1]) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con1 = Piecewise(*args,**keywords) + model.x = Var([1], bounds=(-1, 1)) + args = (model.range[1], model.x[1]) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con1 = Piecewise(*args, **keywords) # test that Piecewise can be initialized on # an abstract model @@ -115,12 +124,14 @@ def test_abstract_piecewise(self): model = AbstractModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) instance = model.create_instance() # test that Piecewise can be initialized on @@ -129,17 +140,17 @@ def test_concrete_piecewise(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) class TestInvalidPiecewise(unittest.TestCase): - - # test the that Piecewise component raises # an exception when the LOG/DLOG reps # are requested without a (2^n)+1 length @@ -148,23 +159,27 @@ def test_dlog_bad_length(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'pw_repn':'DLOG',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'pw_repn': 'DLOG', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: - keywords['pw_pts'] = [-1,0,0.5,1] - model.con3 = Piecewise(*args,**keywords) + keywords['pw_pts'] = [-1, 0, 0.5, 1] + model.con3 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with DLOG an pw_pts list with length not "\ - "equal to (2^n)+1.") + self.fail( + "Piecewise should fail when initialized " + "with DLOG an pw_pts list with length not " + "equal to (2^n)+1." + ) # test the that Piecewise component raises # an exception when the LOG/DLOG reps @@ -174,23 +189,27 @@ def test_log_bad_length(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'pw_repn':'LOG',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'pw_repn': 'LOG', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: - keywords['pw_pts'] = [-1,0,0.5,1] - model.con3 = Piecewise(*args,**keywords) + keywords['pw_pts'] = [-1, 0, 0.5, 1] + model.con3 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with LOG an pw_pts list with length not "\ - "equal to (2^n)+1.") + self.fail( + "Piecewise should fail when initialized " + "with LOG an pw_pts list with length not " + "equal to (2^n)+1." + ) # test the that Piecewise component raises # an exception with an unsorted list of @@ -199,21 +218,24 @@ def test_unsorted_pw_pts(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: - keywords['pw_pts'] = [0,-1,1] - model.con3 = Piecewise(*args,**keywords) + keywords['pw_pts'] = [0, -1, 1] + model.con3 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with unsorted domain points.") + self.fail( + "Piecewise should fail when initialized with unsorted domain points." + ) # test the that Piecewise component raises # an exception when initialized without @@ -222,39 +244,47 @@ def test_bad_f_rules(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: del keywords['f_rule'] - model.con1 = Piecewise(*args,**keywords) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "without a proper f_rule keyword.") + self.fail( + "Piecewise should fail when initialized " + "without a proper f_rule keyword." + ) try: keywords['f_rule'] = None - model.con2 = Piecewise(*args,**keywords) + model.con2 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "without a proper f_rule keyword.") + self.fail( + "Piecewise should fail when initialized " + "without a proper f_rule keyword." + ) try: keywords['f_rule'] = model.x - model.con3 = Piecewise(*args,**keywords) + model.con3 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "without a proper f_rule keyword.") + self.fail( + "Piecewise should fail when initialized " + "without a proper f_rule keyword." + ) # test the that Piecewise component raises # an exception if the domain variable arguments @@ -263,29 +293,35 @@ def test_bad_var_args(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: - args = (None,model.x) - model.con1 = Piecewise(*args,**keywords) + args = (None, model.x) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "without Pyomo vars as variable args.") + self.fail( + "Piecewise should fail when initialized " + "without Pyomo vars as variable args." + ) try: - args = (model.range,None) - model.con2 = Piecewise(*args,**keywords) + args = (model.range, None) + model.con2 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "without Pyomo vars as variable args.") + self.fail( + "Piecewise should fail when initialized " + "without Pyomo vars as variable args." + ) # test the that Piecewise component raises # an exception if the piecewise bound type is @@ -294,28 +330,28 @@ def test_bad_bound_type(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: keywords['pw_constr_type'] = 1.0 - model.con1 = Piecewise(*args,**keywords) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with invalid bound type.") + self.fail("Piecewise should fail when initialized with invalid bound type.") try: del keywords['pw_constr_type'] - model.con1 = Piecewise(*args,**keywords) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with invalid bound type.") + self.fail("Piecewise should fail when initialized with invalid bound type.") # test the that Piecewise component raises # an exception if the piecewise representation @@ -324,20 +360,24 @@ def test_bad_repn(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: keywords['pw_repn'] = 1.0 - model.con1 = Piecewise(*args,**keywords) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with invalid piecewise representation.") + self.fail( + "Piecewise should fail when initialized " + "with invalid piecewise representation." + ) # test the that Piecewise component raises # an exception if the warning_tol @@ -346,20 +386,23 @@ def test_bad_warning_tol(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: keywords['warning_tol'] = None - model.con1 = Piecewise(*args,**keywords) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with invalid warning_tol.") + self.fail( + "Piecewise should fail when initialized with invalid warning_tol." + ) # test the that Piecewise component raises # an exception if initialized with an invalid @@ -368,20 +411,24 @@ def test_bad_args_count(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: args = (model.range,) - model.con1 = Piecewise(*args,**keywords) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with less than two arguments.") + self.fail( + "Piecewise should fail when initialized " + "with less than two arguments." + ) # test the that Piecewise component raises # an exception if initialized with an unbounded @@ -390,55 +437,65 @@ def test_unbounded_var(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) try: model.x.setlb(None) model.x.setub(None) - model.con1 = Piecewise(*args,**keywords) + model.con1 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with unbounded domain variable.") + self.fail( + "Piecewise should fail when initialized " + "with unbounded domain variable." + ) # Check that the unbounded_domain_var keyword works - model.con1 = Piecewise(unbounded_domain_var=True,*args,**keywords) - - model.y = Var(bounds=(0,None)) + model.con1 = Piecewise(unbounded_domain_var=True, *args, **keywords) + + model.y = Var(bounds=(0, None)) try: - args = (model.range,model.y) - model.con2 = Piecewise(*args,**keywords) + args = (model.range, model.y) + model.con2 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with unbounded domain variable.") - - model.z = Var(bounds=(None,10)) + self.fail( + "Piecewise should fail when initialized " + "with unbounded domain variable." + ) + + model.z = Var(bounds=(None, 10)) try: - args = (model.range,model.z) - model.con3 = Piecewise(*args,**keywords) + args = (model.range, model.z) + model.con3 = Piecewise(*args, **keywords) except Exception: pass else: - self.fail("Piecewise should fail when initialized "\ - "with unbounded domain variable.") + self.fail( + "Piecewise should fail when initialized " + "with unbounded domain variable." + ) def test_len(self): model = AbstractModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) self.assertEqual(len(model.con), 0) instance = model.create_instance() self.assertEqual(len(instance.con), 1) @@ -447,17 +504,16 @@ def test_None_key(self): model = ConcreteModel() model.range = Var() - model.x = Var(bounds=(-1,1)) - args = (model.range,model.x) - keywords = {'pw_pts':[-1,0,1],\ - 'pw_constr_type':'EQ',\ - 'f_rule':lambda model,x: x**2} - model.con = Piecewise(*args,**keywords) + model.x = Var(bounds=(-1, 1)) + args = (model.range, model.x) + keywords = { + 'pw_pts': [-1, 0, 1], + 'pw_constr_type': 'EQ', + 'f_rule': lambda model, x: x**2, + } + model.con = Piecewise(*args, **keywords) self.assertEqual(id(model.con), id(model.con[None])) - if __name__ == "__main__": unittest.main() - - diff --git a/pyomo/core/tests/unit/test_preprocess.py b/pyomo/core/tests/unit/test_preprocess.py index 6428178b711..d4c5ae75bb0 100644 --- a/pyomo/core/tests/unit/test_preprocess.py +++ b/pyomo/core/tests/unit/test_preprocess.py @@ -14,37 +14,39 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.environ import AbstractModel, Set, Param, Var, Objective -class TestPreprocess(unittest.TestCase): +class TestPreprocess(unittest.TestCase): def Xtest_label1(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}) model.x = Var(model.A) model.y = Var(model.A) - instance=model.create_instance() + instance = model.create_instance() instance.preprocess() - self.assertEqual(instance.num_used_variables(),0) + self.assertEqual(instance.num_used_variables(), 0) def Xtest_label2(self): model = AbstractModel() - model.A = Set(initialize=[1,2,3]) - model.B = Param(model.A,initialize={1:100,2:200,3:300}) + model.A = Set(initialize=[1, 2, 3]) + model.B = Param(model.A, initialize={1: 100, 2: 200, 3: 300}) model.x = Var(model.A) model.y = Var(model.A) model.obj = Objective(rule=lambda inst: inst.x[1]) - instance=model.create_instance() + instance = model.create_instance() instance.preprocess() - self.assertEqual(instance.num_used_variables(),1) - self.assertEqual(instance.x[1].label,"x(1)") - self.assertEqual(instance.x[2].label,"x(2)") - self.assertEqual(instance.y[1].label,"y(1)") + self.assertEqual(instance.num_used_variables(), 1) + self.assertEqual(instance.x[1].label, "x(1)") + self.assertEqual(instance.x[2].label, "x(2)") + self.assertEqual(instance.y[1].label, "y(1)") + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_range.py b/pyomo/core/tests/unit/test_range.py index 2bb919f9cf0..8cd1e7ce46c 100644 --- a/pyomo/core/tests/unit/test_range.py +++ b/pyomo/core/tests/unit/test_range.py @@ -14,15 +14,17 @@ import pyomo.common.unittest as unittest from pyomo.core.base.range import ( - NumericRange as NR, NonNumericRange as NNR, RangeProduct as RP, - AnyRange, RangeDifferenceError -) -from pyomo.core.base.set import ( - Any + NumericRange as NR, + NonNumericRange as NNR, + RangeProduct as RP, + AnyRange, + RangeDifferenceError, ) +from pyomo.core.base.set import Any _inf = float('inf') + class TestNumericRange(unittest.TestCase): def test_init(self): a = NR(None, None, 0) @@ -46,10 +48,10 @@ def test_init(self): self.assertEqual(a.step, 0) with self.assertRaisesRegex( - ValueError, '.*start must be <= end for continuous ranges'): + ValueError, '.*start must be <= end for continuous ranges' + ): NR(0, -1, 0) - with self.assertRaisesRegex(ValueError, '.*start must not be None'): NR(None, None, 1) @@ -57,18 +59,18 @@ def test_init(self): NR(None, None, 1.5) with self.assertRaisesRegex( - ValueError, - '.*start, end ordering incompatible with step direction'): + ValueError, '.*start, end ordering incompatible with step direction' + ): NR(0, 1, -1) with self.assertRaisesRegex( - ValueError, - '.*start, end ordering incompatible with step direction'): + ValueError, '.*start, end ordering incompatible with step direction' + ): NR(1, 0, 1) with self.assertRaisesRegex( - ValueError, - r'\[0:1\] is discrete, but passed closed=\(False, True\)'): + ValueError, r'\[0:1\] is discrete, but passed closed=\(False, True\)' + ): NR(0, 1, 1, "(]") a = NR(0, None, 1) @@ -107,11 +109,13 @@ def test_init(self): self.assertEqual(a.step, 1) with self.assertRaisesRegex( - ValueError, '.*start, end ordering incompatible with step'): + ValueError, '.*start, end ordering incompatible with step' + ): NR(0, -1, 1) with self.assertRaisesRegex( - ValueError, '.*start, end ordering incompatible with step'): + ValueError, '.*start, end ordering incompatible with step' + ): NR(0, 1, -2) def test_str(self): @@ -227,52 +231,67 @@ def test_contains(self): self.assertNotIn(['z'], NR(0, None, 0)) self.assertIn([1], NR(0, None, 0)) self.assertNotIn([-1], NR(0, None, 0)) - self.assertIn({0:1}, NR(0, None, 0)) - self.assertNotIn({0:-1}, NR(0, None, 0)) - self.assertNotIn({1:1}, NR(0, None, 0)) + self.assertIn({0: 1}, NR(0, None, 0)) + self.assertNotIn({0: -1}, NR(0, None, 0)) + self.assertNotIn({1: 1}, NR(0, None, 0)) class _Unrelated(object): pass + self.assertNotIn(_Unrelated(), NR(0, None, 0)) self.assertNotIn(_Unrelated, NR._types_comparable_to_int) class _NonComparable(_Unrelated): def __init__(self, val): self.val = val + def __sub__(self, other): return self + def __gt__(self, other): return True + def __le__(self, other): return True + self.assertNotIn(_NonComparable(1), NR(0, None, 0)) self.assertNotIn(_NonComparable, NR._types_comparable_to_int) class _NotCastable(_NonComparable): def __lt__(self, other): return True + def __eq__(self, other): return True + def __ne__(self, other): return True + self.assertNotIn(_NotCastable(1), NR(0, None, 0)) self.assertNotIn(_NotCastable, NR._types_comparable_to_int) class _Custom(object): def __init__(self, val): self.val = val + def __lt__(self, other): return self.val < other + def __gt__(self, other): return self.val > other + def __le__(self, other): return self.val <= other + def __ge__(self, other): return self.val >= other + def __eq__(self, other): return self.val == other + def __sub__(self, other): return self.val - other + self.assertIn(_Custom(1), NR(0, None, 0)) self.assertIn(_Custom, NR._types_comparable_to_int) NR._types_comparable_to_int.discard(_Custom) @@ -297,8 +316,8 @@ def _isdisjoint(expected_result, a, b): _isdisjoint(False, NR(0, 1, 0), NR(-1, 1, 0)) _isdisjoint(False, NR(0, 1, 0), NR(-1, 2, 0)) - _isdisjoint(True, NR(0, 1, 0, (True,False)), NR(1, 2, 0)) - _isdisjoint(True, NR(0, 1, 0, (False,True)), NR(-1, 0, 0)) + _isdisjoint(True, NR(0, 1, 0, (True, False)), NR(1, 2, 0)) + _isdisjoint(True, NR(0, 1, 0, (False, True)), NR(-1, 0, 0)) # # Continuous to discrete ranges (positive step) @@ -323,18 +342,18 @@ def _isdisjoint(expected_result, a, b): _isdisjoint(True, NR(0.1, 0.9, 0), NR(-1, 1, 1)) _isdisjoint(True, NR(0.1, 0.9, 0), NR(-1, 2, 1)) - _isdisjoint(False, NR(-.1, 1.1, 0), NR(-1, 2, 1)) - _isdisjoint(False, NR(-.1, 1.1, 0), NR(-2, 0, 2)) - _isdisjoint(True, NR(-.1, 1.1, 0), NR(-1, -1, 1)) - _isdisjoint(True, NR(-.1, 1.1, 0), NR(-2, -1, 1)) + _isdisjoint(False, NR(-0.1, 1.1, 0), NR(-1, 2, 1)) + _isdisjoint(False, NR(-0.1, 1.1, 0), NR(-2, 0, 2)) + _isdisjoint(True, NR(-0.1, 1.1, 0), NR(-1, -1, 1)) + _isdisjoint(True, NR(-0.1, 1.1, 0), NR(-2, -1, 1)) # (additional edge cases) - _isdisjoint(False, NR(0, 1, 0, closed=(True,True)), NR(-1, 2, 1)) - _isdisjoint(False, NR(0, 1, 0, closed=(True,False)), NR(-1, 2, 1)) - _isdisjoint(False, NR(0, 1, 0, closed=(False,True)), NR(-1, 2, 1)) - _isdisjoint(True, NR(0, 1, 0, closed=(False,False)), NR(-1, 2, 1)) - _isdisjoint(True, NR(0.1, 1, 0, closed=(True,False)), NR(-1, 2, 1)) - _isdisjoint(True, NR(0, 0.9, 0, closed=(False,True)), NR(-1, 2, 1)) + _isdisjoint(False, NR(0, 1, 0, closed=(True, True)), NR(-1, 2, 1)) + _isdisjoint(False, NR(0, 1, 0, closed=(True, False)), NR(-1, 2, 1)) + _isdisjoint(False, NR(0, 1, 0, closed=(False, True)), NR(-1, 2, 1)) + _isdisjoint(True, NR(0, 1, 0, closed=(False, False)), NR(-1, 2, 1)) + _isdisjoint(True, NR(0.1, 1, 0, closed=(True, False)), NR(-1, 2, 1)) + _isdisjoint(True, NR(0, 0.9, 0, closed=(False, True)), NR(-1, 2, 1)) _isdisjoint(False, NR(0, 0.99, 0), NR(-1, 1, 1)) _isdisjoint(True, NR(0.001, 0.99, 0), NR(-1, 1, 1)) @@ -363,15 +382,15 @@ def _isdisjoint(expected_result, a, b): # # Discrete to discrete sets # - _isdisjoint(False, NR(0,10,2), NR(2,10,2)) - _isdisjoint(True, NR(0,10,2), NR(1,10,2)) + _isdisjoint(False, NR(0, 10, 2), NR(2, 10, 2)) + _isdisjoint(True, NR(0, 10, 2), NR(1, 10, 2)) - _isdisjoint(False, NR(0,50,5), NR(0,50,7)) - _isdisjoint(False, NR(0,34,5), NR(0,34,7)) - _isdisjoint(False, NR(5,50,5), NR(7,50,7)) - _isdisjoint(True, NR(5,34,5), NR(7,34,7)) - _isdisjoint(False, NR(5,50,5), NR(49,7,-7)) - _isdisjoint(True, NR(5,34,5), NR(28,7,-7)) + _isdisjoint(False, NR(0, 50, 5), NR(0, 50, 7)) + _isdisjoint(False, NR(0, 34, 5), NR(0, 34, 7)) + _isdisjoint(False, NR(5, 50, 5), NR(7, 50, 7)) + _isdisjoint(True, NR(5, 34, 5), NR(7, 34, 7)) + _isdisjoint(False, NR(5, 50, 5), NR(49, 7, -7)) + _isdisjoint(True, NR(5, 34, 5), NR(28, 7, -7)) _isdisjoint(True, NR(0.25, 10, 1), NR(0.5, 20, 1)) _isdisjoint(True, NR(0.25, 10, 1), NR(0.5, 20, 2)) @@ -416,29 +435,29 @@ def test_issubset(self): self.assertFalse(NR(None, 0, 0).issubset(NR(None, -1, 0))) self.assertFalse(NR(None, 0, 0).issubset(NR(0, None, 0))) - B = True,True - self.assertTrue(NR(0,1,0,(True,True)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(True,False)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(False,True)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(False,False)).issubset(NR(0,1,0,B))) - - B = True,False - self.assertFalse(NR(0,1,0,(True,True)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(True,False)).issubset(NR(0,1,0,B))) - self.assertFalse(NR(0,1,0,(False,True)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(False,False)).issubset(NR(0,1,0,B))) - - B = False,True - self.assertFalse(NR(0,1,0,(True,True)).issubset(NR(0,1,0,B))) - self.assertFalse(NR(0,1,0,(True,False)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(False,True)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(False,False)).issubset(NR(0,1,0,B))) - - B = False,False - self.assertFalse(NR(0,1,0,(True,True)).issubset(NR(0,1,0,B))) - self.assertFalse(NR(0,1,0,(True,False)).issubset(NR(0,1,0,B))) - self.assertFalse(NR(0,1,0,(False,True)).issubset(NR(0,1,0,B))) - self.assertTrue(NR(0,1,0,(False,False)).issubset(NR(0,1,0,B))) + B = True, True + self.assertTrue(NR(0, 1, 0, (True, True)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (True, False)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (False, True)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (False, False)).issubset(NR(0, 1, 0, B))) + + B = True, False + self.assertFalse(NR(0, 1, 0, (True, True)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (True, False)).issubset(NR(0, 1, 0, B))) + self.assertFalse(NR(0, 1, 0, (False, True)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (False, False)).issubset(NR(0, 1, 0, B))) + + B = False, True + self.assertFalse(NR(0, 1, 0, (True, True)).issubset(NR(0, 1, 0, B))) + self.assertFalse(NR(0, 1, 0, (True, False)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (False, True)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (False, False)).issubset(NR(0, 1, 0, B))) + + B = False, False + self.assertFalse(NR(0, 1, 0, (True, True)).issubset(NR(0, 1, 0, B))) + self.assertFalse(NR(0, 1, 0, (True, False)).issubset(NR(0, 1, 0, B))) + self.assertFalse(NR(0, 1, 0, (False, True)).issubset(NR(0, 1, 0, B))) + self.assertTrue(NR(0, 1, 0, (False, False)).issubset(NR(0, 1, 0, B))) # Continuous - discrete self.assertTrue(NR(0, None, 1).issubset(NR(None, None, 0))) @@ -481,149 +500,115 @@ def test_issubset(self): self.assertFalse(NR(15, 15, 0).issubset(NR(0, 10, 1))) def test_lcm(self): + self.assertEqual(NR(None, None, 0)._step_lcm((NR(0, 1, 0),)), 0) + self.assertEqual(NR(None, None, 0)._step_lcm((NR(0, 0, 0),)), 1) + self.assertEqual(NR(0, None, 3)._step_lcm((NR(0, None, 1),)), 3) + self.assertEqual(NR(0, None, 3)._step_lcm((NR(0, None, 0),)), 3) + self.assertEqual(NR(0, None, 0)._step_lcm((NR(0, None, 1),)), 1) + self.assertEqual(NR(0, None, 3)._step_lcm((NR(0, None, 2),)), 6) + self.assertEqual(NR(0, None, 18)._step_lcm((NR(0, None, 12),)), 36) + self.assertEqual(NR(0, None, 3)._step_lcm((NR(0, None, 2), NR(0, None, 5))), 30) self.assertEqual( - NR(None,None,0)._step_lcm((NR(0,1,0),)), - 0 - ) - self.assertEqual( - NR(None,None,0)._step_lcm((NR(0,0,0),)), - 1 - ) - self.assertEqual( - NR(0,None,3)._step_lcm((NR(0,None,1),)), - 3 - ) - self.assertEqual( - NR(0,None,3)._step_lcm((NR(0,None,0),)), - 3 - ) - self.assertEqual( - NR(0,None,0)._step_lcm((NR(0,None,1),)), - 1 - ) - self.assertEqual( - NR(0,None,3)._step_lcm((NR(0,None,2),)), - 6 - ) - self.assertEqual( - NR(0,None,18)._step_lcm((NR(0,None,12),)), - 36 - ) - self.assertEqual( - NR(0,None,3)._step_lcm((NR(0,None,2),NR(0,None,5))), - 30 - ) - self.assertEqual( - NR(0,None,3)._step_lcm((NR(0,None,2),NR(0,None,10))), - 30 + NR(0, None, 3)._step_lcm((NR(0, None, 2), NR(0, None, 10))), 30 ) def test_range_difference(self): self.assertEqual( - NR(0,None,1).range_difference([NR(1,None,0)]), - [NR(0,0,0)], + NR(0, None, 1).range_difference([NR(1, None, 0)]), [NR(0, 0, 0)] ) self.assertEqual( - NR(0,None,1).range_difference([NR(0,0,0)]), - [NR(1,None,1)], + NR(0, None, 1).range_difference([NR(0, 0, 0)]), [NR(1, None, 1)] ) self.assertEqual( - NR(0,None,2).range_difference([NR(10,None,3)]), - [NR(0,None,6), NR(2,None,6), NR(4,4,0)], + NR(0, None, 2).range_difference([NR(10, None, 3)]), + [NR(0, None, 6), NR(2, None, 6), NR(4, 4, 0)], ) with self.assertRaisesRegex(ValueError, "Unknown range type, list"): - NR(0,None,0).range_difference([[0]]) + NR(0, None, 0).range_difference([[0]]) # test relatively prime ranges that don't expand to all offsets self.assertEqual( - NR(0,7,2).range_difference([NR(6,None,10)]), - [NR(0,0,0), NR(2,2,0), NR(4,4,0)], + NR(0, 7, 2).range_difference([NR(6, None, 10)]), + [NR(0, 0, 0), NR(2, 2, 0), NR(4, 4, 0)], ) # test ranges running in the other direction self.assertEqual( - NR(10,0,-1).range_difference([NR(7,4,-2)]), - [NR(10,0,-2), NR(1,3,2), NR(9,9,0)], + NR(10, 0, -1).range_difference([NR(7, 4, -2)]), + [NR(10, 0, -2), NR(1, 3, 2), NR(9, 9, 0)], ) self.assertEqual( - NR(0,None,-1).range_difference([NR(-10,10,0)]), - [NR(-11,None,-1)], + NR(0, None, -1).range_difference([NR(-10, 10, 0)]), [NR(-11, None, -1)] ) # Test non-overlapping ranges - self.assertEqual( - NR(0,4,0).range_difference([NR(5,10,0)]), - [NR(0,4,0)], - ) - self.assertEqual( - NR(5,10,0).range_difference([NR(0,4,0)]), - [NR(5,10,0)], - ) + self.assertEqual(NR(0, 4, 0).range_difference([NR(5, 10, 0)]), [NR(0, 4, 0)]) + self.assertEqual(NR(5, 10, 0).range_difference([NR(0, 4, 0)]), [NR(5, 10, 0)]) # Test continuous ranges # Subtracting a closed range from a closed range should # result in an open range. self.assertEqual( - NR(0,None,0).range_difference([NR(5,None,0)]), - [NR(0,5,0,'[)')], + NR(0, None, 0).range_difference([NR(5, None, 0)]), [NR(0, 5, 0, '[)')] ) self.assertEqual( - NR(0,None,0).range_difference([NR(5,10,0)]), - [NR(0,5,0,'[)'), NR(10,None,0,'(]')], + NR(0, None, 0).range_difference([NR(5, 10, 0)]), + [NR(0, 5, 0, '[)'), NR(10, None, 0, '(]')], ) self.assertEqual( - NR(None,0,0).range_difference([NR(-5,None,0)]), - [NR(None,-5,0,'[)')], + NR(None, 0, 0).range_difference([NR(-5, None, 0)]), [NR(None, -5, 0, '[)')] ) self.assertEqual( - NR(None,0,0).range_difference([NR(-5,0,0,'[]')]), - [NR(None,-5,0,'[)')], + NR(None, 0, 0).range_difference([NR(-5, 0, 0, '[]')]), + [NR(None, -5, 0, '[)')], ) self.assertEqual( - NR(None,0,0).range_difference([NR(-5,0,0,'[)')]), - [NR(None,-5,0,'[)'), NR(0,0,0)], + NR(None, 0, 0).range_difference([NR(-5, 0, 0, '[)')]), + [NR(None, -5, 0, '[)'), NR(0, 0, 0)], ) self.assertEqual( - NR(0,10,0).range_difference([NR(None,5,0,'[)')]), - [NR(5,10,0,'[]')], + NR(0, 10, 0).range_difference([NR(None, 5, 0, '[)')]), [NR(5, 10, 0, '[]')] ) # Subtracting an open range from a closed range gives a closed # range self.assertEqual( - NR(0,None,0).range_difference([NR(5,10,0,'()')]), - [NR(0,5,0,'[]'), NR(10,None,0,'[]')], + NR(0, None, 0).range_difference([NR(5, 10, 0, '()')]), + [NR(0, 5, 0, '[]'), NR(10, None, 0, '[]')], ) # Subtracting a discrete range from a continuous range gives a # set of open continuous ranges self.assertEqual( - NR(None,None,0).range_difference([NR(5,10,5)]), - [NR(None,5,0,'[)'), NR(5,10,0,'()'), NR(10,None,0,'(]')], + NR(None, None, 0).range_difference([NR(5, 10, 5)]), + [NR(None, 5, 0, '[)'), NR(5, 10, 0, '()'), NR(10, None, 0, '(]')], ) self.assertEqual( - NR(-10,20,0).range_difference([NR(5,10,5)]), - [NR(-10,5,0,'[)'), NR(5,10,0,'()'), NR(10,20,0,'(]')], + NR(-10, 20, 0).range_difference([NR(5, 10, 5)]), + [NR(-10, 5, 0, '[)'), NR(5, 10, 0, '()'), NR(10, 20, 0, '(]')], ) self.assertEqual( - NR(-10,20,0,"()").range_difference([NR(5,10,5)]), - [NR(-10,5,0,'()'), NR(5,10,0,'()'), NR(10,20,0,'()')], + NR(-10, 20, 0, "()").range_difference([NR(5, 10, 5)]), + [NR(-10, 5, 0, '()'), NR(5, 10, 0, '()'), NR(10, 20, 0, '()')], ) self.assertEqual( - NR(-3,3,0).range_difference([NR(0,None,5),NR(0,None,-5)]), - [NR(-3,0,0,'[)'), NR(0,3,0,'(]')], + NR(-3, 3, 0).range_difference([NR(0, None, 5), NR(0, None, -5)]), + [NR(-3, 0, 0, '[)'), NR(0, 3, 0, '(]')], ) # Disjoint ranges... a = NR(0.25, 10, 1) self.assertEqual(a.range_difference([NR(0.5, 20, 1)]), [a]) - self.assertEqual(a.range_difference([NR(0.5, 20, 2)]), - [NR(0.25, 8.25, 2), NR(1.25, 9.25, 2)]) + self.assertEqual( + a.range_difference([NR(0.5, 20, 2)]), [NR(0.25, 8.25, 2), NR(1.25, 9.25, 2)] + ) a = NR(0, 100, 2) - self.assertEqual(a.range_difference([NR(1, 100, 4)]), - [NR(0, 100, 4), NR(2, 98, 4)]) + self.assertEqual( + a.range_difference([NR(1, 100, 4)]), [NR(0, 100, 4), NR(2, 98, 4)] + ) a = NR(0, None, 2) - self.assertEqual(a.range_difference([NR(1, None, 4)]), - [NR(0, None, 4), NR(2, None, 4)]) + self.assertEqual( + a.range_difference([NR(1, None, 4)]), [NR(0, None, 4), NR(2, None, 4)] + ) a = NR(0.25, None, 1) self.assertEqual(a.range_difference([NR(0.5, None, 1)]), [a]) @@ -631,75 +616,58 @@ def test_range_difference(self): a = NR(None, None, 0) self.assertEqual( a.range_difference([NR(None, None, 0, "()")]), - [NR(-_inf, -_inf, 0), NR(_inf, _inf, 0)]) + [NR(-_inf, -_inf, 0), NR(_inf, _inf, 0)], + ) self.assertEqual( - a.range_difference([NR(None, None, 0, "()"), - NR(None, None, 0, "[)")]), - [NR(_inf, _inf, 0)]) + a.range_difference([NR(None, None, 0, "()"), NR(None, None, 0, "[)")]), + [NR(_inf, _inf, 0)], + ) # And the one thing we don't support: with self.assertRaisesRegex( - RangeDifferenceError, 'We do not support subtracting an ' - r'infinite discrete range \[0:inf\] from an infinite ' - r'continuous range \[-inf..inf\]'): - NR(None,None,0).range_difference([NR(0,None,1)]) + RangeDifferenceError, + 'We do not support subtracting an ' + r'infinite discrete range \[0:inf\] from an infinite ' + r'continuous range \[-inf..inf\]', + ): + NR(None, None, 0).range_difference([NR(0, None, 1)]) def test_range_intersection(self): self.assertEqual( - NR(0,None,1).range_intersection([NR(1,None,0)]), - [NR(1,None,1)], + NR(0, None, 1).range_intersection([NR(1, None, 0)]), [NR(1, None, 1)] ) self.assertEqual( - NR(0,None,1).range_intersection([NR(0,0,0)]), - [NR(0,0,0)], + NR(0, None, 1).range_intersection([NR(0, 0, 0)]), [NR(0, 0, 0)] ) self.assertEqual( - NR(0,None,1).range_intersection([NR(0.5,1.5,0)]), - [NR(1,1,0)], + NR(0, None, 1).range_intersection([NR(0.5, 1.5, 0)]), [NR(1, 1, 0)] ) self.assertEqual( - NR(0,None,2).range_intersection([NR(1,None,3)]), - [NR(4,None,6)], + NR(0, None, 2).range_intersection([NR(1, None, 3)]), [NR(4, None, 6)] ) with self.assertRaisesRegex(ValueError, "Unknown range type, list"): - NR(0,None,0).range_intersection([[0]]) + NR(0, None, 0).range_intersection([[0]]) # Test non-overlapping ranges - self.assertEqual( - NR(0,4,0).range_intersection([NR(5,10,0)]), - [], - ) - self.assertEqual( - NR(5,10,0).range_intersection([NR(0,4,0)]), - [], - ) - self.assertEqual( - NR(0,4,0).range_intersection([NNR('a')]), - [], - ) + self.assertEqual(NR(0, 4, 0).range_intersection([NR(5, 10, 0)]), []) + self.assertEqual(NR(5, 10, 0).range_intersection([NR(0, 4, 0)]), []) + self.assertEqual(NR(0, 4, 0).range_intersection([NNR('a')]), []) # test ranges running in the other direction self.assertEqual( - NR(10,0,-1).range_intersection([NR(7,4,-2)]), - [NR(5,7,2)], + NR(10, 0, -1).range_intersection([NR(7, 4, -2)]), [NR(5, 7, 2)] ) self.assertEqual( - NR(10,0,-1).range_intersection([NR(7,None,-2)]), - [NR(1,7,2)], + NR(10, 0, -1).range_intersection([NR(7, None, -2)]), [NR(1, 7, 2)] ) self.assertEqual( - NR(0,None,-1).range_intersection([NR(None,-10,0)]), - [NR(-10,None,-1)], + NR(0, None, -1).range_intersection([NR(None, -10, 0)]), [NR(-10, None, -1)] ) # Test continuous ranges + self.assertEqual(NR(0, 5, 0).range_intersection([NR(5, 10, 0)]), [NR(5, 5, 0)]) self.assertEqual( - NR(0,5,0).range_intersection([NR(5,10,0)]), - [NR(5,5,0)], - ) - self.assertEqual( - NR(0,None,0).range_intersection([NR(5,None,0)]), - [NR(5,None,0)], + NR(0, None, 0).range_intersection([NR(5, None, 0)]), [NR(5, None, 0)] ) # Disjoint ranges... @@ -714,10 +682,10 @@ def test_range_intersection(self): self.assertEqual(a.range_intersection([NR(0.5, None, 1)]), []) def test_pickle(self): - a = NR(0,100,5) + a = NR(0, 100, 5) b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) class TestAnyRange(unittest.TestCase): @@ -744,31 +712,19 @@ def test_contains(self): self.assertIn('a', a) def test_range_difference(self): - self.assertEqual( - AnyRange().range_difference([NR(0,None,1)]), - [AnyRange()] - ) - self.assertEqual( - NR(0,None,1).range_difference([AnyRange()]), - [] - ) - self.assertEqual( - AnyRange().range_difference([AnyRange()]), - [] - ) + self.assertEqual(AnyRange().range_difference([NR(0, None, 1)]), [AnyRange()]) + self.assertEqual(NR(0, None, 1).range_difference([AnyRange()]), []) + self.assertEqual(AnyRange().range_difference([AnyRange()]), []) def test_range_intersection(self): self.assertEqual( - AnyRange().range_intersection([NR(0,None,1)]), - [NR(0,None,1)] + AnyRange().range_intersection([NR(0, None, 1)]), [NR(0, None, 1)] ) self.assertEqual( - NR(0,None,1).range_intersection([AnyRange()]), - [NR(0,None,1)] + NR(0, None, 1).range_intersection([AnyRange()]), [NR(0, None, 1)] ) self.assertEqual( - NR(0,None,-1).range_intersection([AnyRange()]), - [NR(0,None,-1)] + NR(0, None, -1).range_intersection([AnyRange()]), [NR(0, None, -1)] ) def test_info_methods(self): @@ -779,8 +735,8 @@ def test_info_methods(self): def test_pickle(self): a = AnyRange() b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) class TestNonNumericRange(unittest.TestCase): @@ -821,26 +777,14 @@ def test_contains(self): def test_range_difference(self): a = NNR('a') b = NNR(None) - self.assertEqual( - a.range_difference([NNR('a')]), - [] - ) - self.assertEqual( - a.range_difference([b]), - [NNR('a')] - ) + self.assertEqual(a.range_difference([NNR('a')]), []) + self.assertEqual(a.range_difference([b]), [NNR('a')]) def test_range_intersection(self): a = NNR('a') b = NNR(None) - self.assertEqual( - a.range_intersection([b]), - [] - ) - self.assertEqual( - a.range_intersection([NNR('a')]), - [NNR('a')] - ) + self.assertEqual(a.range_intersection([b]), []) + self.assertEqual(a.range_intersection([NNR('a')]), [NNR('a')]) def test_info_methods(self): a = NNR('a') @@ -850,22 +794,22 @@ def test_info_methods(self): def test_pickle(self): a = NNR('a') b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) class TestRangeProduct(unittest.TestCase): def test_str(self): - a = RP([[NR(0,10,1)],[NR(0,10,0),NNR('a')]]) + a = RP([[NR(0, 10, 1)], [NR(0, 10, 0), NNR('a')]]) self.assertEqual(str(a), '<[0:10], ([0..10], {a})>') def test_range_relational(self): - a = RP([[NR(0,10,1)],[NR(0,10,0),NNR('a')]]) - aa = RP([[NR(0,10,1)],[NR(0,10,0),NNR('a')]]) - b = RP([[NR(0,10,1)],[NR(0,10,0),NNR('a'),NNR('b')]]) - c = RP([[NR(0,10,1)],[NR(0,10,0),NNR('b')]]) - d = RP([[NR(0,10,0)],[NR(0,10,0),NNR('a')]]) - d = RP([[NR(0,10,0)],[AnyRange()]]) + a = RP([[NR(0, 10, 1)], [NR(0, 10, 0), NNR('a')]]) + aa = RP([[NR(0, 10, 1)], [NR(0, 10, 0), NNR('a')]]) + b = RP([[NR(0, 10, 1)], [NR(0, 10, 0), NNR('a'), NNR('b')]]) + c = RP([[NR(0, 10, 1)], [NR(0, 10, 0), NNR('b')]]) + d = RP([[NR(0, 10, 0)], [NR(0, 10, 0), NNR('a')]]) + d = RP([[NR(0, 10, 0)], [AnyRange()]]) self.assertTrue(a.issubset(aa)) self.assertTrue(a.issubset(b)) @@ -873,37 +817,37 @@ def test_range_relational(self): self.assertTrue(a.issubset(d)) self.assertFalse(a.issubset(NNR('a'))) - self.assertFalse(a.issubset(NR(None,None,0))) + self.assertFalse(a.issubset(NR(None, None, 0))) self.assertTrue(a.issubset(AnyRange())) def test_contains(self): a = NNR('a') - b = NR(0,5,0) - c = NR(5,10,1) - x = RP([[a],[b,c]]) + b = NR(0, 5, 0) + c = NR(5, 10, 1) + x = RP([[a], [b, c]]) self.assertNotIn('a', x) self.assertNotIn(0, x) self.assertNotIn(None, x) - self.assertIn(('a',0), x) - self.assertIn(('a',6), x) - self.assertNotIn(('a',6.5), x) + self.assertIn(('a', 0), x) + self.assertIn(('a', 6), x) + self.assertNotIn(('a', 6.5), x) def test_equality(self): a = NNR('a') - b = NR(0,5,0) - c = NR(5,10,1) - x = RP([[a],[b,c]]) - y = RP([[a],[c]]) - self.assertEqual(x,x) - self.assertNotEqual(x,y) + b = NR(0, 5, 0) + c = NR(5, 10, 1) + x = RP([[a], [b, c]]) + y = RP([[a], [c]]) + self.assertEqual(x, x) + self.assertNotEqual(x, y) def test_isdisjoint(self): a = NNR('a') - b = NR(0,5,0) - c = NR(5,10,1) - x = RP([[a],[b,c]]) - y = RP([[a],[c]]) - z = RP([[a],[b],[c]]) + b = NR(0, 5, 0) + c = NR(5, 10, 1) + x = RP([[a], [b, c]]) + y = RP([[a], [c]]) + z = RP([[a], [b], [c]]) w = RP([[AnyRange()], [b]]) self.assertFalse(x.isdisjoint(x)) self.assertFalse(x.isdisjoint(y)) @@ -912,49 +856,49 @@ def test_isdisjoint(self): self.assertTrue(x.isdisjoint(a)) self.assertFalse(y.isdisjoint(w)) self.assertFalse(x.isdisjoint(AnyRange())) - v = RP([[AnyRange()],[NR(0,5,0,(False,False))]]) + v = RP([[AnyRange()], [NR(0, 5, 0, (False, False))]]) self.assertTrue(y.isdisjoint(v)) def test_range_difference(self): a = NNR('a') - b = NR(0,5,0) - b1 = NR(0,5,0,'[)') # Note: b & c overlap, so [b,c]-c != b - c = NR(5,10,1) - x = RP([[a],[b,c]]) - y = RP([[a],[c]]) - z = RP([[a],[b],[c]]) + b = NR(0, 5, 0) + b1 = NR(0, 5, 0, '[)') # Note: b & c overlap, so [b,c]-c != b + c = NR(5, 10, 1) + x = RP([[a], [b, c]]) + y = RP([[a], [c]]) + z = RP([[a], [b], [c]]) w = RP([list(Any.ranges()), [b]]) self.assertEqual(x.range_difference([x]), []) - self.assertEqual(x.range_difference([y]), [RP([[a],[b1]])]) + self.assertEqual(x.range_difference([y]), [RP([[a], [b1]])]) self.assertEqual(x.range_difference([z]), [x]) self.assertEqual(x.range_difference(Any.ranges()), []) - self.assertEqual(x.range_difference([w]), [RP([[a],[NR(6,10,1)]])]) - v = RP([[AnyRange()],[NR(0,5,0,(False,False))]]) + self.assertEqual(x.range_difference([w]), [RP([[a], [NR(6, 10, 1)]])]) + v = RP([[AnyRange()], [NR(0, 5, 0, (False, False))]]) self.assertEqual(y.range_difference([v]), [y]) def test_range_intersection(self): a = NNR('a') - b = NR(0,5,0) - c = NR(5,10,1) - x = RP([[a],[b,c]]) - y = RP([[a],[c]]) - z = RP([[a],[b],[c]]) + b = NR(0, 5, 0) + c = NR(5, 10, 1) + x = RP([[a], [b, c]]) + y = RP([[a], [c]]) + z = RP([[a], [b], [c]]) w = RP([list(Any.ranges()), [b]]) self.assertEqual(x.range_intersection([x]), [x]) self.assertEqual(x.range_intersection([y]), [y]) self.assertEqual(x.range_intersection([z]), []) self.assertEqual(x.range_intersection(Any.ranges()), [x]) - self.assertEqual(x.range_intersection([w]), [RP([[a],[b]])]) - self.assertEqual(y.range_intersection([w]), [RP([[a],[NR(5,5,0)]])]) - v = RP([[AnyRange()],[NR(0,5,0,(False,False))]]) + self.assertEqual(x.range_intersection([w]), [RP([[a], [b]])]) + self.assertEqual(y.range_intersection([w]), [RP([[a], [NR(5, 5, 0)]])]) + v = RP([[AnyRange()], [NR(0, 5, 0, (False, False))]]) self.assertEqual(y.range_intersection([v]), []) def test_info_methods(self): a = NNR('a') - b = NR(0,5,0) - c = NR(5,10,1) - x = RP([[a],[b,c]]) - y = RP([[a],[c]]) + b = NR(0, 5, 0) + c = NR(5, 10, 1) + x = RP([[a], [b, c]]) + y = RP([[a], [c]]) self.assertFalse(x.isdiscrete()) self.assertFalse(x.isfinite()) self.assertTrue(y.isdiscrete()) @@ -962,17 +906,15 @@ def test_info_methods(self): def test_pickle(self): a = NNR('a') - b = NR(0,5,0) - c = NR(5,10,1) - x = RP([[a],[b,c]]) - y = RP([[a],[c]]) + b = NR(0, 5, 0) + c = NR(5, 10, 1) + x = RP([[a], [b, c]]) + y = RP([[a], [c]]) xx = pickle.loads(pickle.dumps(x)) - self.assertIsNot(x,xx) - self.assertEqual(x,xx) + self.assertIsNot(x, xx) + self.assertEqual(x, xx) yy = pickle.loads(pickle.dumps(y)) - self.assertIsNot(y,yy) - self.assertEqual(y,yy) - - + self.assertIsNot(y, yy) + self.assertEqual(y, yy) diff --git a/pyomo/core/tests/unit/test_reference.py b/pyomo/core/tests/unit/test_reference.py index 961166f108f..a7a470b1a3b 100644 --- a/pyomo/core/tests/unit/test_reference.py +++ b/pyomo/core/tests/unit/test_reference.py @@ -14,42 +14,56 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from io import StringIO from pyomo.environ import ( - ConcreteModel, Block, Var, Set, RangeSet, Param, value, + ConcreteModel, + Block, + Var, + Set, + RangeSet, + Param, + value, NonNegativeIntegers, ) from pyomo.common.collections import ComponentSet +from pyomo.common.log import LoggingIntercept from pyomo.core.base.var import IndexedVar from pyomo.core.base.set import ( - SetProduct, FiniteSetOf, OrderedSetOf, UnknownSetDimen, normalize_index, -) -from pyomo.core.base.indexed_component import ( - UnindexedComponent_set, IndexedComponent + SetProduct, + FiniteSetOf, + OrderedSetOf, + UnknownSetDimen, + normalize_index, ) +from pyomo.core.base.indexed_component import UnindexedComponent_set, IndexedComponent from pyomo.core.base.indexed_component_slice import IndexedComponent_slice from pyomo.core.base.reference import ( - _ReferenceDict, _ReferenceSet, Reference + _ReferenceDict, + _ReferenceSet, + Reference, + UnindexedComponent_ReferenceSet, ) class TestReferenceDict(unittest.TestCase): def setUp(self): self.m = m = ConcreteModel() - @m.Block([1,2], [4,5]) - def b(b,i,j): - b.x = Var([7,8],[10,11], initialize=0) - b.y = Var([7,8], initialize=0) + + @m.Block([1, 2], [4, 5]) + def b(b, i, j): + b.x = Var([7, 8], [10, 11], initialize=0) + b.y = Var([7, 8], initialize=0) b.z = Var() - @m.Block([1,2]) - def c(b,i): - b.x = Var([7,8],[10,11], initialize=0) - b.y = Var([7,8], initialize=0) + @m.Block([1, 2]) + def c(b, i): + b.x = Var([7, 8], [10, 11], initialize=0) + b.y = Var([7, 8], initialize=0) b.z = Var() def _lookupTester(self, _slice, key, ans): @@ -66,7 +80,7 @@ def _lookupTester(self, _slice, key, ans): rd[None] for i in range(len(key)): - _ = tuple([0]*i) + _ = tuple([0] * i) self.assertNotIn(_, rd) with self.assertRaises(KeyError): rd[_] @@ -74,284 +88,429 @@ def _lookupTester(self, _slice, key, ans): def test_simple_lookup(self): m = self.m - self._lookupTester(m.b[:,:].x[:,:], (1,5,7,10), m.b[1,5].x[7,10]) - self._lookupTester(m.b[:,4].x[8,:], (1,10), m.b[1,4].x[8,10]) - self._lookupTester(m.b[:,4].x[8,10], (1,), m.b[1,4].x[8,10]) - self._lookupTester(m.b[1,4].x[8,:], (10,), m.b[1,4].x[8,10]) + self._lookupTester(m.b[:, :].x[:, :], (1, 5, 7, 10), m.b[1, 5].x[7, 10]) + self._lookupTester(m.b[:, 4].x[8, :], (1, 10), m.b[1, 4].x[8, 10]) + self._lookupTester(m.b[:, 4].x[8, 10], (1,), m.b[1, 4].x[8, 10]) + self._lookupTester(m.b[1, 4].x[8, :], (10,), m.b[1, 4].x[8, 10]) - self._lookupTester(m.b[:,:].y[:], (1,5,7), m.b[1,5].y[7]) - self._lookupTester(m.b[:,4].y[:], (1,7), m.b[1,4].y[7]) - self._lookupTester(m.b[:,4].y[8], (1,), m.b[1,4].y[8]) + self._lookupTester(m.b[:, :].y[:], (1, 5, 7), m.b[1, 5].y[7]) + self._lookupTester(m.b[:, 4].y[:], (1, 7), m.b[1, 4].y[7]) + self._lookupTester(m.b[:, 4].y[8], (1,), m.b[1, 4].y[8]) - self._lookupTester(m.b[:,:].z, (1,5), m.b[1,5].z) - self._lookupTester(m.b[:,4].z, (1,), m.b[1,4].z) + self._lookupTester(m.b[:, :].z, (1, 5), m.b[1, 5].z) + self._lookupTester(m.b[:, 4].z, (1,), m.b[1, 4].z) + self._lookupTester(m.c[:].x[:, :], (1, 7, 10), m.c[1].x[7, 10]) + self._lookupTester(m.c[:].x[8, :], (1, 10), m.c[1].x[8, 10]) + self._lookupTester(m.c[:].x[8, 10], (1,), m.c[1].x[8, 10]) + self._lookupTester(m.c[1].x[:, :], (8, 10), m.c[1].x[8, 10]) + self._lookupTester(m.c[1].x[8, :], (10,), m.c[1].x[8, 10]) - self._lookupTester(m.c[:].x[:,:], (1,7,10), m.c[1].x[7,10]) - self._lookupTester(m.c[:].x[8,:], (1,10), m.c[1].x[8,10]) - self._lookupTester(m.c[:].x[8,10], (1,), m.c[1].x[8,10]) - self._lookupTester(m.c[1].x[:,:], (8,10), m.c[1].x[8,10]) - self._lookupTester(m.c[1].x[8,:], (10,), m.c[1].x[8,10]) - - self._lookupTester(m.c[:].y[:], (1,7), m.c[1].y[7]) - self._lookupTester(m.c[:].y[8], (1,), m.c[1].y[8]) - self._lookupTester(m.c[1].y[:], (8,), m.c[1].y[8]) + self._lookupTester(m.c[:].y[:], (1, 7), m.c[1].y[7]) + self._lookupTester(m.c[:].y[8], (1,), m.c[1].y[8]) + self._lookupTester(m.c[1].y[:], (8,), m.c[1].y[8]) self._lookupTester(m.c[:].z, (1,), m.c[1].z) - m.jagged_set = Set(initialize=[1,(2,3)], dimen=None) + m.jagged_set = Set(initialize=[1, (2, 3)], dimen=None) m.jb = Block(m.jagged_set) - m.jb[1].x = Var([1,2,3]) - m.jb[2,3].x = Var([1,2,3]) + m.jb[1].x = Var([1, 2, 3]) + m.jb[2, 3].x = Var([1, 2, 3]) self._lookupTester(m.jb[...], (1,), m.jb[1]) - self._lookupTester(m.jb[...].x[:], (1,2), m.jb[1].x[2]) - self._lookupTester(m.jb[...].x[:], (2,3,2), m.jb[2,3].x[2]) + self._lookupTester(m.jb[...].x[:], (1, 2), m.jb[1].x[2]) + self._lookupTester(m.jb[...].x[:], (2, 3, 2), m.jb[2, 3].x[2]) - rd = _ReferenceDict(m.jb[:,:,:].x[:]) + rd = _ReferenceDict(m.jb[:, :, :].x[:]) with self.assertRaises(KeyError): - rd[2,3,4,2] - rd = _ReferenceDict(m.b[:,4].x[:]) + rd[2, 3, 4, 2] + rd = _ReferenceDict(m.b[:, 4].x[:]) with self.assertRaises(KeyError): - rd[1,0] + rd[1, 0] def test_len(self): m = self.m - rd = _ReferenceDict(m.b[:,:].x[:,:]) - self.assertEqual(len(rd), 2*2*2*2) + rd = _ReferenceDict(m.b[:, :].x[:, :]) + self.assertEqual(len(rd), 2 * 2 * 2 * 2) - rd = _ReferenceDict(m.b[:,4].x[8,:]) - self.assertEqual(len(rd), 2*2) + rd = _ReferenceDict(m.b[:, 4].x[8, :]) + self.assertEqual(len(rd), 2 * 2) def test_iterators(self): m = self.m - rd = _ReferenceDict(m.b[:,4].x[8,:]) + rd = _ReferenceDict(m.b[:, 4].x[8, :]) - self.assertEqual( - list(rd.keys()), - [(1,10), (1,11), (2,10), (2,11)] - ) + self.assertEqual(list(rd), [(1, 10), (1, 11), (2, 10), (2, 11)]) + self.assertEqual(list(rd.keys()), [(1, 10), (1, 11), (2, 10), (2, 11)]) self.assertEqual( list(rd.values()), - [m.b[1,4].x[8,10], m.b[1,4].x[8,11], - m.b[2,4].x[8,10], m.b[2,4].x[8,11]] + [ + m.b[1, 4].x[8, 10], + m.b[1, 4].x[8, 11], + m.b[2, 4].x[8, 10], + m.b[2, 4].x[8, 11], + ], ) self.assertEqual( list(rd.items()), - [((1,10), m.b[1,4].x[8,10]), - ((1,11), m.b[1,4].x[8,11]), - ((2,10), m.b[2,4].x[8,10]), - ((2,11), m.b[2,4].x[8,11])] + [ + ((1, 10), m.b[1, 4].x[8, 10]), + ((1, 11), m.b[1, 4].x[8, 11]), + ((2, 10), m.b[2, 4].x[8, 10]), + ((2, 11), m.b[2, 4].x[8, 11]), + ], + ) + + def test_ordered_iterators(self): + # Test slice; common indexing set + m = ConcreteModel() + m.I = Set(initialize=[3, 2]) + m.b = Block([1, 0]) + m.b[1].x = Var(m.I) + m.b[0].x = Var(m.I) + m.y = Reference(m.b[:].x[:]) + self.assertEqual(list(m.y.index_set().subsets()), [m.b.index_set(), m.I]) + self.assertEqual(list(m.y), [(1, 3), (1, 2), (0, 3), (0, 2)]) + self.assertEqual(list(m.y.keys()), [(1, 3), (1, 2), (0, 3), (0, 2)]) + self.assertEqual( + list(m.y.values()), [m.b[1].x[3], m.b[1].x[2], m.b[0].x[3], m.b[0].x[2]] + ) + self.assertEqual( + list(m.y.items()), + [ + ((1, 3), m.b[1].x[3]), + ((1, 2), m.b[1].x[2]), + ((0, 3), m.b[0].x[3]), + ((0, 2), m.b[0].x[2]), + ], + ) + self.assertEqual(list(m.y.keys(True)), [(0, 2), (0, 3), (1, 2), (1, 3)]) + self.assertEqual( + list(m.y.values(True)), [m.b[0].x[2], m.b[0].x[3], m.b[1].x[2], m.b[1].x[3]] + ) + self.assertEqual( + list(m.y.items(True)), + [ + ((0, 2), m.b[0].x[2]), + ((0, 3), m.b[0].x[3]), + ((1, 2), m.b[1].x[2]), + ((1, 3), m.b[1].x[3]), + ], + ) + + # Test slice; ReferenceSet indexing set + m = ConcreteModel() + m.b = Block([1, 0]) + m.b[1].x = Var([3, 2]) + m.b[0].x = Var([5, 4]) + m.y = Reference(m.b[:].x[:]) + self.assertIs(type(m.y.index_set()), FiniteSetOf) + self.assertEqual(list(m.y), [(1, 3), (1, 2), (0, 5), (0, 4)]) + self.assertEqual(list(m.y.keys()), [(1, 3), (1, 2), (0, 5), (0, 4)]) + self.assertEqual( + list(m.y.values()), [m.b[1].x[3], m.b[1].x[2], m.b[0].x[5], m.b[0].x[4]] + ) + self.assertEqual( + list(m.y.items()), + [ + ((1, 3), m.b[1].x[3]), + ((1, 2), m.b[1].x[2]), + ((0, 5), m.b[0].x[5]), + ((0, 4), m.b[0].x[4]), + ], + ) + self.assertEqual(list(m.y.keys(True)), [(0, 4), (0, 5), (1, 2), (1, 3)]) + self.assertEqual( + list(m.y.values(True)), [m.b[0].x[4], m.b[0].x[5], m.b[1].x[2], m.b[1].x[3]] + ) + self.assertEqual( + list(m.y.items(True)), + [ + ((0, 4), m.b[0].x[4]), + ((0, 5), m.b[0].x[5]), + ((1, 2), m.b[1].x[2]), + ((1, 3), m.b[1].x[3]), + ], + ) + + # Test dict, ReferenceSet indexing set + m = ConcreteModel() + m.b = Block([1, 0]) + m.b[1].x = Var([3, 2]) + m.b[0].x = Var([5, 4]) + m.y = Reference( + { + (1, 3): m.b[1].x[3], + (0, 5): m.b[0].x[5], + (1, 2): m.b[1].x[2], + (0, 4): m.b[0].x[4], + } + ) + self.assertIs(type(m.y.index_set()), FiniteSetOf) + self.assertEqual(list(m.y), [(1, 3), (0, 5), (1, 2), (0, 4)]) + self.assertEqual(list(m.y.keys()), [(1, 3), (0, 5), (1, 2), (0, 4)]) + self.assertEqual( + list(m.y.values()), [m.b[1].x[3], m.b[0].x[5], m.b[1].x[2], m.b[0].x[4]] + ) + self.assertEqual( + list(m.y.items()), + [ + ((1, 3), m.b[1].x[3]), + ((0, 5), m.b[0].x[5]), + ((1, 2), m.b[1].x[2]), + ((0, 4), m.b[0].x[4]), + ], + ) + self.assertEqual(list(m.y.keys(True)), [(0, 4), (0, 5), (1, 2), (1, 3)]) + self.assertEqual( + list(m.y.values(True)), [m.b[0].x[4], m.b[0].x[5], m.b[1].x[2], m.b[1].x[3]] + ) + self.assertEqual( + list(m.y.items(True)), + [ + ((0, 4), m.b[0].x[4]), + ((0, 5), m.b[0].x[5]), + ((1, 2), m.b[1].x[2]), + ((1, 3), m.b[1].x[3]), + ], ) def test_nested_assignment(self): m = self.m - rd = _ReferenceDict(m.b[:,:].x[:,:]) - self.assertEqual( sum(x.value for x in rd.values()), 0 ) - rd[1,5,7,10] = 10 - self.assertEqual( m.b[1,5].x[7,10].value, 10 ) - self.assertEqual( sum(x.value for x in rd.values()), 10 ) + rd = _ReferenceDict(m.b[:, :].x[:, :]) + self.assertEqual(sum(x.value for x in rd.values()), 0) + rd[1, 5, 7, 10] = 10 + self.assertEqual(m.b[1, 5].x[7, 10].value, 10) + self.assertEqual(sum(x.value for x in rd.values()), 10) - rd = _ReferenceDict(m.b[:,4].x[8,:]) - self.assertEqual( sum(x.value for x in rd.values()), 0 ) - rd[1,10] = 20 - self.assertEqual( m.b[1,4].x[8,10].value, 20 ) - self.assertEqual( sum(x.value for x in rd.values()), 20 ) + rd = _ReferenceDict(m.b[:, 4].x[8, :]) + self.assertEqual(sum(x.value for x in rd.values()), 0) + rd[1, 10] = 20 + self.assertEqual(m.b[1, 4].x[8, 10].value, 20) + self.assertEqual(sum(x.value for x in rd.values()), 20) def test_attribute_assignment(self): m = self.m - rd = _ReferenceDict(m.b[:,:].x[:,:].value) - self.assertEqual( sum(x for x in rd.values()), 0 ) - rd[1,5,7,10] = 10 - self.assertEqual( m.b[1,5].x[7,10].value, 10 ) - self.assertEqual( sum(x for x in rd.values()), 10 ) + rd = _ReferenceDict(m.b[:, :].x[:, :].value) + self.assertEqual(sum(x for x in rd.values()), 0) + rd[1, 5, 7, 10] = 10 + self.assertEqual(m.b[1, 5].x[7, 10].value, 10) + self.assertEqual(sum(x for x in rd.values()), 10) - rd = _ReferenceDict(m.b[:,4].x[8,:].value) - self.assertEqual( sum(x for x in rd.values()), 0 ) - rd[1,10] = 20 - self.assertEqual( m.b[1,4].x[8,10].value, 20 ) - self.assertEqual( sum(x for x in rd.values()), 20 ) + rd = _ReferenceDict(m.b[:, 4].x[8, :].value) + self.assertEqual(sum(x for x in rd.values()), 0) + rd[1, 10] = 20 + self.assertEqual(m.b[1, 4].x[8, 10].value, 20) + self.assertEqual(sum(x for x in rd.values()), 20) - m.x = Var([1,2], initialize=0) + m.x = Var([1, 2], initialize=0) rd = _ReferenceDict(m.x[:]) - self.assertEqual( sum(x.value for x in rd.values()), 0 ) + self.assertEqual(sum(x.value for x in rd.values()), 0) rd[2] = 10 - self.assertEqual( m.x[1].value, 0 ) - self.assertEqual( m.x[2].value, 10 ) - self.assertEqual( sum(x.value for x in rd.values()), 10 ) + self.assertEqual(m.x[1].value, 0) + self.assertEqual(m.x[2].value, 10) + self.assertEqual(sum(x.value for x in rd.values()), 10) def test_single_attribute_assignment(self): m = self.m - rd = _ReferenceDict(m.b[1,5].x[:,:]) - self.assertEqual( sum(x.value for x in rd.values()), 0 ) - rd[7,10].value = 10 - self.assertEqual( m.b[1,5].x[7,10].value, 10 ) - self.assertEqual( sum(x.value for x in rd.values()), 10 ) + rd = _ReferenceDict(m.b[1, 5].x[:, :]) + self.assertEqual(sum(x.value for x in rd.values()), 0) + rd[7, 10].value = 10 + self.assertEqual(m.b[1, 5].x[7, 10].value, 10) + self.assertEqual(sum(x.value for x in rd.values()), 10) - rd = _ReferenceDict(m.b[1,4].x[8,:]) - self.assertEqual( sum(x.value for x in rd.values()), 0 ) + rd = _ReferenceDict(m.b[1, 4].x[8, :]) + self.assertEqual(sum(x.value for x in rd.values()), 0) rd[10].value = 20 - self.assertEqual( m.b[1,4].x[8,10].value, 20 ) - self.assertEqual( sum(x.value for x in rd.values()), 20 ) + self.assertEqual(m.b[1, 4].x[8, 10].value, 20) + self.assertEqual(sum(x.value for x in rd.values()), 20) def test_nested_attribute_assignment(self): m = self.m - rd = _ReferenceDict(m.b[:,:].x[:,:]) - self.assertEqual( sum(x.value for x in rd.values()), 0 ) - rd[1,5,7,10].value = 10 - self.assertEqual( m.b[1,5].x[7,10].value, 10 ) - self.assertEqual( sum(x.value for x in rd.values()), 10 ) + rd = _ReferenceDict(m.b[:, :].x[:, :]) + self.assertEqual(sum(x.value for x in rd.values()), 0) + rd[1, 5, 7, 10].value = 10 + self.assertEqual(m.b[1, 5].x[7, 10].value, 10) + self.assertEqual(sum(x.value for x in rd.values()), 10) - rd = _ReferenceDict(m.b[:,4].x[8,:]) - self.assertEqual( sum(x.value for x in rd.values()), 0 ) - rd[1,10].value = 20 - self.assertEqual( m.b[1,4].x[8,10].value, 20 ) - self.assertEqual( sum(x.value for x in rd.values()), 20 ) + rd = _ReferenceDict(m.b[:, 4].x[8, :]) + self.assertEqual(sum(x.value for x in rd.values()), 0) + rd[1, 10].value = 20 + self.assertEqual(m.b[1, 4].x[8, 10].value, 20) + self.assertEqual(sum(x.value for x in rd.values()), 20) def test_single_deletion(self): m = self.m - rd = _ReferenceDict(m.b[1,5].x[:,:]) - self.assertEqual(len(list(x.value for x in rd.values())), 2*2) - self.assertTrue((7,10) in rd) - del rd[7,10] - self.assertFalse((7,10) in rd) + rd = _ReferenceDict(m.b[1, 5].x[:, :]) + self.assertEqual(len(list(x.value for x in rd.values())), 2 * 2) + self.assertTrue((7, 10) in rd) + del rd[7, 10] + self.assertFalse((7, 10) in rd) self.assertEqual(len(list(x.value for x in rd.values())), 3) - rd = _ReferenceDict(m.b[1,4].x[8,:]) + rd = _ReferenceDict(m.b[1, 4].x[8, :]) self.assertEqual(len(list(x.value for x in rd.values())), 2) self.assertTrue((10) in rd) del rd[10] self.assertFalse(10 in rd) - self.assertEqual(len(list(x.value for x in rd.values())), 2-1) + self.assertEqual(len(list(x.value for x in rd.values())), 2 - 1) with self.assertRaisesRegex( - KeyError, - r"\(8, 10\) is not valid for indexed component 'b\[1,4\].x'"): + KeyError, r"\(8, 10\) is not valid for indexed component 'b\[1,4\].x'" + ): del rd[10] - rd = _ReferenceDict(m.b[1,:].x[8,0]) + rd = _ReferenceDict(m.b[1, :].x[8, 0]) with self.assertRaisesRegex( - KeyError, - r"'\(8, 0\)' is not valid for indexed component 'b\[1,4\].x'"): + KeyError, r"'\(8, 0\)' is not valid for indexed component 'b\[1,4\].x'" + ): del rd[4] - def test_nested_deletion(self): m = self.m - rd = _ReferenceDict(m.b[:,:].x[:,:]) - self.assertEqual(len(list(x.value for x in rd.values())), 2*2*2*2) - self.assertTrue((1,5,7,10) in rd) - del rd[1,5,7,10] - self.assertFalse((1,5,7,10) in rd) - self.assertEqual(len(list(x.value for x in rd.values())), 2*2*2*2-1) + rd = _ReferenceDict(m.b[:, :].x[:, :]) + self.assertEqual(len(list(x.value for x in rd.values())), 2 * 2 * 2 * 2) + self.assertTrue((1, 5, 7, 10) in rd) + del rd[1, 5, 7, 10] + self.assertFalse((1, 5, 7, 10) in rd) + self.assertEqual(len(list(x.value for x in rd.values())), 2 * 2 * 2 * 2 - 1) - rd = _ReferenceDict(m.b[:,4].x[8,:]) - self.assertEqual(len(list(x.value for x in rd.values())), 2*2) - self.assertTrue((1,10) in rd) - del rd[1,10] - self.assertFalse((1,10) in rd) - self.assertEqual(len(list(x.value for x in rd.values())), 2*2-1) + rd = _ReferenceDict(m.b[:, 4].x[8, :]) + self.assertEqual(len(list(x.value for x in rd.values())), 2 * 2) + self.assertTrue((1, 10) in rd) + del rd[1, 10] + self.assertFalse((1, 10) in rd) + self.assertEqual(len(list(x.value for x in rd.values())), 2 * 2 - 1) def test_attribute_deletion(self): m = self.m - rd = _ReferenceDict(m.b[:,:].z) + rd = _ReferenceDict(m.b[:, :].z) rd._slice.attribute_errors_generate_exceptions = False - self.assertEqual(len(list(x.value for x in rd.values())), 2*2) - self.assertTrue((1,5) in rd) - self.assertTrue( hasattr(m.b[1,5], 'z') ) - self.assertTrue( hasattr(m.b[2,5], 'z') ) - del rd[1,5] - self.assertFalse((1,5) in rd) - self.assertFalse( hasattr(m.b[1,5], 'z') ) - self.assertTrue( hasattr(m.b[2,5], 'z') ) + self.assertEqual(len(list(x.value for x in rd.values())), 2 * 2) + self.assertTrue((1, 5) in rd) + self.assertTrue(hasattr(m.b[1, 5], 'z')) + self.assertTrue(hasattr(m.b[2, 5], 'z')) + del rd[1, 5] + self.assertFalse((1, 5) in rd) + self.assertFalse(hasattr(m.b[1, 5], 'z')) + self.assertTrue(hasattr(m.b[2, 5], 'z')) self.assertEqual(len(list(x.value for x in rd.values())), 3) - rd = _ReferenceDict(m.b[2,:].z) + rd = _ReferenceDict(m.b[2, :].z) rd._slice.attribute_errors_generate_exceptions = False self.assertEqual(len(list(x.value for x in rd.values())), 2) self.assertTrue(5 in rd) - self.assertTrue( hasattr(m.b[2,4], 'z') ) - self.assertTrue( hasattr(m.b[2,5], 'z') ) + self.assertTrue(hasattr(m.b[2, 4], 'z')) + self.assertTrue(hasattr(m.b[2, 5], 'z')) del rd[5] self.assertFalse(5 in rd) - self.assertTrue( hasattr(m.b[2,4], 'z') ) - self.assertFalse( hasattr(m.b[2,5], 'z') ) - self.assertEqual(len(list(x.value for x in rd.values())), 2-1) + self.assertTrue(hasattr(m.b[2, 4], 'z')) + self.assertFalse(hasattr(m.b[2, 5], 'z')) + self.assertEqual(len(list(x.value for x in rd.values())), 2 - 1) + + def test_deprecations(self): + m = self.m + rd = _ReferenceDict(m.b[:, :].z) + + items = rd.items() + with LoggingIntercept() as LOG: + iteritems = rd.iteritems() + self.assertIs(type(items), type(iteritems)) + self.assertEqual(list(items), list(iteritems)) + self.assertIn( + "DEPRECATED: The iteritems method is deprecated. Use dict.items", + LOG.getvalue(), + ) + + values = rd.values() + with LoggingIntercept() as LOG: + itervalues = rd.itervalues() + self.assertIs(type(values), type(itervalues)) + self.assertEqual(list(values), list(itervalues)) + self.assertIn( + "DEPRECATED: The itervalues method is deprecated. Use dict.values", + LOG.getvalue(), + ) + class TestReferenceSet(unittest.TestCase): def test_str(self): m = ConcreteModel() - @m.Block([1,2], [4,5]) - def b(b,i,j): - b.x = Var([7,8],[10,11], initialize=0) - b.y = Var([7,8], initialize=0) + + @m.Block([1, 2], [4, 5]) + def b(b, i, j): + b.x = Var([7, 8], [10, 11], initialize=0) + b.y = Var([7, 8], initialize=0) b.z = Var() - rs = _ReferenceSet(m.b[:,5].z) + rs = _ReferenceSet(m.b[:, 5].z) self.assertEqual(str(rs), 'ReferenceSet(b[:, 5].z)') def test_lookup_and_iter_dense_data(self): m = ConcreteModel() - @m.Block([1,2], [4,5]) - def b(b,i,j): - b.x = Var([7,8],[10,11], initialize=0) - b.y = Var([7,8], initialize=0) + + @m.Block([1, 2], [4, 5]) + def b(b, i, j): + b.x = Var([7, 8], [10, 11], initialize=0) + b.y = Var([7, 8], initialize=0) b.z = Var() - rs = _ReferenceSet(m.b[:,5].z) + rs = _ReferenceSet(m.b[:, 5].z) self.assertNotIn((0,), rs) self.assertIn(1, rs) self.assertIn((1,), rs) self.assertEqual(len(rs), 2) - self.assertEqual(list(rs), [1,2]) + self.assertEqual(list(rs), [1, 2]) - rs = _ReferenceSet(m.b[:,5].bad) + rs = _ReferenceSet(m.b[:, 5].bad) self.assertNotIn((0,), rs) self.assertNotIn((1,), rs) self.assertEqual(len(rs), 0) self.assertEqual(list(rs), []) - @m.Block([1,2,3]) + @m.Block([1, 2, 3]) def d(b, i): if i % 2: b.x = Var(range(i)) rs = _ReferenceSet(m.d[:].x[:]) - self.assertIn((1,0), rs) - self.assertIn((3,0), rs) - self.assertNotIn((2,0), rs) + self.assertIn((1, 0), rs) + self.assertIn((3, 0), rs) + self.assertNotIn((2, 0), rs) self.assertEqual(len(rs), 4) - self.assertEqual(list(rs), [(1,0), (3,0), (3,1), (3,2)]) + self.assertEqual(list(rs), [(1, 0), (3, 0), (3, 1), (3, 2)]) rs = _ReferenceSet(m.d[...].x[...]) - self.assertIn((1,0), rs) - self.assertIn((3,0), rs) - self.assertNotIn((2,0), rs) + self.assertIn((1, 0), rs) + self.assertIn((3, 0), rs) + self.assertNotIn((2, 0), rs) self.assertEqual(len(rs), 4) - self.assertEqual(list(rs), [(1,0), (3,0), (3,1), (3,2)]) + self.assertEqual(list(rs), [(1, 0), (3, 0), (3, 1), (3, 2)]) # Test the SliceEllipsisError case (lookup into a jagged set # with an ellipsis) - m.e_index = Set(initialize=[2,(2,3)], dimen=None) + m.e_index = Set(initialize=[2, (2, 3)], dimen=None) + @m.Block(m.e_index) def e(b, *args): - b.x_index = Set(initialize=[1,(3,4)], dimen=None) + b.x_index = Set(initialize=[1, (3, 4)], dimen=None) b.x = Var(b.x_index) + rs = _ReferenceSet(m.e[...].x[...]) - self.assertIn((2,1), rs) - self.assertIn((2,3,1), rs) - self.assertIn((2,3,4), rs) - self.assertNotIn((2,3,5), rs) + self.assertIn((2, 1), rs) + self.assertIn((2, 3, 1), rs) + self.assertIn((2, 3, 4), rs) + self.assertNotIn((2, 3, 5), rs) self.assertEqual(len(rs), 4) - self.assertEqual(list(rs), [(2,1), (2,3,4), (2,3,1), (2,3,3,4)]) + self.assertEqual(list(rs), [(2, 1), (2, 3, 4), (2, 3, 1), (2, 3, 3, 4)]) # Make sure scalars and tuples work for jagged sets rs = _ReferenceSet(m.e[...]) @@ -375,35 +534,148 @@ def test_lookup_and_iter_sparse_data(self): self.assertEqual(len(rs), 9) self.assertEqual(len(rd), 0) - self.assertIn((1,1), rs) + self.assertIn((1, 1), rs) self.assertEqual(len(rd), 0) self.assertEqual(len(rs), 9) + def test_otdered_sorted_iter(self): + # Test ordered reference + m = ConcreteModel() + + @m.Block([2, 1], [4, 5]) + def b(b, i, j): + b.x = Var([8, 7], initialize=0) + + rs = _ReferenceSet(m.b[...].x[:]) + self.assertEqual( + list(rs), + [ + (2, 4, 8), + (2, 4, 7), + (2, 5, 8), + (2, 5, 7), + (1, 4, 8), + (1, 4, 7), + (1, 5, 8), + (1, 5, 7), + ], + ) + + rs = _ReferenceSet(m.b[...].x[:]) + self.assertEqual( + list(rs.ordered_iter()), + [ + (2, 4, 8), + (2, 4, 7), + (2, 5, 8), + (2, 5, 7), + (1, 4, 8), + (1, 4, 7), + (1, 5, 8), + (1, 5, 7), + ], + ) + + rs = _ReferenceSet(m.b[...].x[:]) + self.assertEqual( + list(rs.sorted_iter()), + [ + (1, 4, 7), + (1, 4, 8), + (1, 5, 7), + (1, 5, 8), + (2, 4, 7), + (2, 4, 8), + (2, 5, 7), + (2, 5, 8), + ], + ) + + # Test unordered reference + m = ConcreteModel() + m.I = FiniteSetOf([2, 1]) + m.J = FiniteSetOf([4, 5]) + m.K = FiniteSetOf([8, 7]) + + @m.Block(m.I, m.J) + def b(b, i, j): + b.x = Var(m.K, initialize=0) + + rs = _ReferenceSet(m.b[...].x[:]) + self.assertEqual( + list(rs), + [ + (2, 4, 8), + (2, 4, 7), + (2, 5, 8), + (2, 5, 7), + (1, 4, 8), + (1, 4, 7), + (1, 5, 8), + (1, 5, 7), + ], + ) + + rs = _ReferenceSet(m.b[...].x[:]) + self.assertEqual( + list(rs.ordered_iter()), + [ + (1, 4, 7), + (1, 4, 8), + (1, 5, 7), + (1, 5, 8), + (2, 4, 7), + (2, 4, 8), + (2, 5, 7), + (2, 5, 8), + ], + ) + + rs = _ReferenceSet(m.b[...].x[:]) + self.assertEqual( + list(rs.sorted_iter()), + [ + (1, 4, 7), + (1, 4, 8), + (1, 5, 7), + (1, 5, 8), + (2, 4, 7), + (2, 4, 8), + (2, 5, 7), + (2, 5, 8), + ], + ) class TestReference(unittest.TestCase): def test_constructor_error(self): m = ConcreteModel() - m.x = Var([1,2]) - class Foo(object): pass + m.x = Var([1, 2]) + + class Foo(object): + pass + self.assertRaisesRegex( TypeError, "First argument to Reference constructors must be a " r"component, component slice, Sequence, or Mapping \(received Foo", - Reference, Foo() - ) + Reference, + Foo(), + ) self.assertRaisesRegex( TypeError, "First argument to Reference constructors must be a " r"component, component slice, Sequence, or Mapping \(received int", - Reference, 5 - ) + Reference, + 5, + ) self.assertRaisesRegex( TypeError, "First argument to Reference constructors must be a " r"component, component slice, Sequence, or Mapping \(received None", - Reference, None - ) + Reference, + None, + ) def test_component_reference(self): m = ConcreteModel() @@ -413,7 +685,7 @@ def test_component_reference(self): self.assertIs(m.r.ctype, Var) self.assertIsNot(m.r.index_set(), m.x.index_set()) self.assertIs(m.x.index_set(), UnindexedComponent_set) - self.assertIs(type(m.r.index_set()), OrderedSetOf) + self.assertIs(m.r.index_set(), UnindexedComponent_ReferenceSet) self.assertEqual(len(m.r), 1) self.assertTrue(m.r.is_indexed()) self.assertIn(None, m.r) @@ -436,7 +708,7 @@ def test_component_reference(self): with self.assertRaises(KeyError): m.s[1] - m.y = Var([1,2]) + m.y = Var([1, 2]) m.t = Reference(m.y) self.assertIs(m.t.ctype, Var) @@ -452,13 +724,13 @@ def test_component_reference(self): def test_component_data_reference(self): m = ConcreteModel() - m.y = Var([1,2]) + m.y = Var([1, 2]) m.r = Reference(m.y[2]) self.assertIs(m.r.ctype, Var) self.assertIsNot(m.r.index_set(), m.y.index_set()) self.assertIs(m.y.index_set(), m.y_index) - self.assertIs(type(m.r.index_set()), OrderedSetOf) + self.assertIs(m.r.index_set(), UnindexedComponent_ReferenceSet) self.assertEqual(len(m.r), 1) self.assertTrue(m.r.is_reference()) self.assertTrue(m.r.is_indexed()) @@ -471,7 +743,7 @@ def test_component_data_reference(self): def test_component_data_reference_clone(self): m = ConcreteModel() m.b = Block() - m.b.x = Var([1,2]) + m.b.x = Var([1, 2]) m.c = Block() m.c.r1 = Reference(m.b.x[2]) m.c.r2 = Reference(m.b.x) @@ -493,56 +765,63 @@ def test_component_data_reference_clone(self): self.assertIs(i.d.r2[1], i.b.x[1]) self.assertIs(i.d.r2[2], i.b.x[2]) - def test_reference_var_pprint(self): m = ConcreteModel() - m.x = Var([1,2], initialize={1:4,2:8}) + m.x = Var([1, 2], initialize={1: 4, 2: 8}) m.r = Reference(m.x) buf = StringIO() m.r.pprint(ostream=buf) - self.assertEqual(buf.getvalue(), -"""r : Size=2, Index=x_index, ReferenceTo=x + self.assertEqual( + buf.getvalue(), + """r : Size=2, Index=x_index, ReferenceTo=x Key : Lower : Value : Upper : Fixed : Stale : Domain 1 : None : 4 : None : False : False : Reals 2 : None : 8 : None : False : False : Reals -""") - m.s = Reference(m.x[:,...]) +""", + ) + m.s = Reference(m.x[:, ...]) buf = StringIO() m.s.pprint(ostream=buf) - self.assertEqual(buf.getvalue(), -"""s : Size=2, Index=x_index, ReferenceTo=x[:, ...] + self.assertEqual( + buf.getvalue(), + """s : Size=2, Index=x_index, ReferenceTo=x[:, ...] Key : Lower : Value : Upper : Fixed : Stale : Domain 1 : None : 4 : None : False : False : Reals 2 : None : 8 : None : False : False : Reals -""") +""", + ) def test_reference_indexedcomponent_pprint(self): m = ConcreteModel() - m.x = Var([1,2], initialize={1:4,2:8}) + m.x = Var([1, 2], initialize={1: 4, 2: 8}) m.r = Reference(m.x, ctype=IndexedComponent) buf = StringIO() m.r.pprint(ostream=buf) - self.assertEqual(buf.getvalue(), -"""r : Size=2, Index=x_index, ReferenceTo=x + self.assertEqual( + buf.getvalue(), + """r : Size=2, Index=x_index, ReferenceTo=x Key : Object 1 : 2 : -""") - m.s = Reference(m.x[:,...], ctype=IndexedComponent) +""", + ) + m.s = Reference(m.x[:, ...], ctype=IndexedComponent) buf = StringIO() m.s.pprint(ostream=buf) - self.assertEqual(buf.getvalue(), -"""s : Size=2, Index=x_index, ReferenceTo=x[:, ...] + self.assertEqual( + buf.getvalue(), + """s : Size=2, Index=x_index, ReferenceTo=x[:, ...] Key : Object 1 : 2 : -""") +""", + ) def test_single_reference(self): m = ConcreteModel() - m.b = Block([1,2]) - m.b[1].x = Var(bounds=(1,None)) - m.b[2].x = Var(bounds=(2,None)) + m.b = Block([1, 2]) + m.b[1].x = Var(bounds=(1, None)) + m.b[2].x = Var(bounds=(2, None)) m.r = Reference(m.b[:].x) self.assertIs(m.r.ctype, Var) @@ -558,11 +837,12 @@ def test_single_reference(self): def test_nested_reference(self): m = ConcreteModel() - m.I = Set(initialize=[1,2]) - m.J = Set(initialize=[3,4]) + m.I = Set(initialize=[1, 2]) + m.J = Set(initialize=[3, 4]) + @m.Block(m.I) - def b(b,i): - b.x = Var(b.model().J, bounds=(i,None)) + def b(b, i): + b.x = Var(b.model().J, bounds=(i, None)) m.r = Reference(m.b[:].x[:]) @@ -570,133 +850,137 @@ def b(b,i): self.assertIsInstance(m.r.index_set(), SetProduct) self.assertIs(m.r.index_set().set_tuple[0], m.I) self.assertIs(m.r.index_set().set_tuple[1], m.J) - self.assertEqual(len(m.r), 2*2) - self.assertEqual(m.r[1,3].lb, 1) - self.assertEqual(m.r[2,4].lb, 2) - self.assertIn((1,3), m.r) - self.assertIn((2,4), m.r) + self.assertEqual(len(m.r), 2 * 2) + self.assertEqual(m.r[1, 3].lb, 1) + self.assertEqual(m.r[2, 4].lb, 2) + self.assertIn((1, 3), m.r) + self.assertIn((2, 4), m.r) self.assertNotIn(0, m.r) - self.assertNotIn((1,0), m.r) - self.assertNotIn((1,3,0), m.r) + self.assertNotIn((1, 0), m.r) + self.assertNotIn((1, 3, 0), m.r) with self.assertRaises(KeyError): m.r[0] def test_nested_reference_multidim_set(self): m = ConcreteModel() - m.I = Set(initialize=[1,2]) - m.J = Set(initialize=[(3,3),(4,4)]) + m.I = Set(initialize=[1, 2]) + m.J = Set(initialize=[(3, 3), (4, 4)]) + @m.Block(m.I) - def b(b,i): - b.x = Var(b.model().J, bounds=(i,None)) + def b(b, i): + b.x = Var(b.model().J, bounds=(i, None)) - m.r = Reference(m.b[:].x[:,:]) + m.r = Reference(m.b[:].x[:, :]) self.assertIs(m.r.ctype, Var) self.assertIsInstance(m.r.index_set(), SetProduct) self.assertIs(m.r.index_set().set_tuple[0], m.I) self.assertIs(m.r.index_set().set_tuple[1], m.J) - self.assertEqual(len(m.r), 2*2) - self.assertEqual(m.r[1,3,3].lb, 1) - self.assertEqual(m.r[2,4,4].lb, 2) - self.assertIn((1,3,3), m.r) - self.assertIn((2,4,4), m.r) + self.assertEqual(len(m.r), 2 * 2) + self.assertEqual(m.r[1, 3, 3].lb, 1) + self.assertEqual(m.r[2, 4, 4].lb, 2) + self.assertIn((1, 3, 3), m.r) + self.assertIn((2, 4, 4), m.r) self.assertNotIn(0, m.r) - self.assertNotIn((1,0), m.r) - self.assertNotIn((1,3,0), m.r) - self.assertNotIn((1,3,3,0), m.r) + self.assertNotIn((1, 0), m.r) + self.assertNotIn((1, 3, 0), m.r) + self.assertNotIn((1, 3, 3, 0), m.r) with self.assertRaises(KeyError): m.r[0] def test_nested_reference_partial_multidim_set(self): m = ConcreteModel() - m.I = Set(initialize=[1,2]) - m.J = Set(initialize=[(3,3),(4,4)]) + m.I = Set(initialize=[1, 2]) + m.J = Set(initialize=[(3, 3), (4, 4)]) + @m.Block(m.I) - def b(b,i): - b.x = Var(b.model().J, bounds=(i,None)) + def b(b, i): + b.x = Var(b.model().J, bounds=(i, None)) - m.r = Reference(m.b[:].x[3,:]) + m.r = Reference(m.b[:].x[3, :]) self.assertIs(m.r.ctype, Var) self.assertIs(type(m.r.index_set()), FiniteSetOf) - self.assertEqual(len(m.r), 2*1) - self.assertEqual(m.r[1,3].lb, 1) - self.assertEqual(m.r[2,3].lb, 2) - self.assertIn((1,3), m.r) - self.assertNotIn((2,4), m.r) + self.assertEqual(len(m.r), 2 * 1) + self.assertEqual(m.r[1, 3].lb, 1) + self.assertEqual(m.r[2, 3].lb, 2) + self.assertIn((1, 3), m.r) + self.assertNotIn((2, 4), m.r) self.assertNotIn(0, m.r) - self.assertNotIn((1,0), m.r) - self.assertNotIn((1,3,0), m.r) + self.assertNotIn((1, 0), m.r) + self.assertNotIn((1, 3, 0), m.r) with self.assertRaises(KeyError): m.r[0] def test_nested_reference_nonuniform_indexes(self): m = ConcreteModel() - m.I = Set(initialize=[1,2]) - m.J = Set(initialize=[3,4]) + m.I = Set(initialize=[1, 2]) + m.J = Set(initialize=[3, 4]) + @m.Block(m.I) - def b(b,i): - b.x = Var([3,4], bounds=(i,None)) + def b(b, i): + b.x = Var([3, 4], bounds=(i, None)) m.r = Reference(m.b[:].x[:]) self.assertIs(m.r.ctype, Var) self.assertIs(type(m.r.index_set()), FiniteSetOf) - self.assertEqual(len(m.r), 2*2) - self.assertEqual(m.r[1,3].lb, 1) - self.assertEqual(m.r[2,4].lb, 2) - self.assertIn((1,3), m.r) - self.assertIn((2,4), m.r) + self.assertEqual(len(m.r), 2 * 2) + self.assertEqual(m.r[1, 3].lb, 1) + self.assertEqual(m.r[2, 4].lb, 2) + self.assertIn((1, 3), m.r) + self.assertIn((2, 4), m.r) self.assertNotIn(0, m.r) - self.assertNotIn((1,0), m.r) - self.assertNotIn((1,3,0), m.r) + self.assertNotIn((1, 0), m.r) + self.assertNotIn((1, 3, 0), m.r) with self.assertRaises(KeyError): m.r[0] def test_nested_reference_nondimen_set(self): m = ConcreteModel() - m.I = Set(initialize=[1,2]) - m.J = Set(initialize=[3,4], dimen=None) + m.I = Set(initialize=[1, 2]) + m.J = Set(initialize=[3, 4], dimen=None) + @m.Block(m.I) - def b(b,i): - b.x = Var(b.model().J, bounds=(i,None)) + def b(b, i): + b.x = Var(b.model().J, bounds=(i, None)) m.r = Reference(m.b[:].x[:]) self.assertIs(m.r.ctype, Var) self.assertIs(type(m.r.index_set()), FiniteSetOf) - self.assertEqual(len(m.r), 2*2) - self.assertEqual(m.r[1,3].lb, 1) - self.assertEqual(m.r[2,4].lb, 2) - self.assertIn((1,3), m.r) - self.assertIn((2,4), m.r) + self.assertEqual(len(m.r), 2 * 2) + self.assertEqual(m.r[1, 3].lb, 1) + self.assertEqual(m.r[2, 4].lb, 2) + self.assertIn((1, 3), m.r) + self.assertIn((2, 4), m.r) self.assertNotIn(0, m.r) - self.assertNotIn((1,0), m.r) - self.assertNotIn((1,3,0), m.r) + self.assertNotIn((1, 0), m.r) + self.assertNotIn((1, 3, 0), m.r) with self.assertRaises(KeyError): m.r[0] def test_nested_reference_nonuniform_index_size(self): m = ConcreteModel() - m.I = Set(initialize=[1,2]) - m.J = Set(initialize=[3,4]) + m.I = Set(initialize=[1, 2]) + m.J = Set(initialize=[3, 4]) m.b = Block(m.I) - m.b[1].x = Var([(3,3),(3,4),(4,3),(4,4)], bounds=(1,None)) - m.b[2].x = Var(m.J, m.J, bounds=(2,None)) + m.b[1].x = Var([(3, 3), (3, 4), (4, 3), (4, 4)], bounds=(1, None)) + m.b[2].x = Var(m.J, m.J, bounds=(2, None)) - m.r = Reference(m.b[:].x[:,:]) + m.r = Reference(m.b[:].x[:, :]) self.assertIs(m.r.ctype, Var) self.assertIs(type(m.r.index_set()), FiniteSetOf) - self.assertEqual(len(m.r), 2*2*2) - self.assertEqual(m.r[1,3,3].lb, 1) - self.assertEqual(m.r[2,4,3].lb, 2) - self.assertIn((1,3,3), m.r) - self.assertIn((2,4,4), m.r) + self.assertEqual(len(m.r), 2 * 2 * 2) + self.assertEqual(m.r[1, 3, 3].lb, 1) + self.assertEqual(m.r[2, 4, 3].lb, 2) + self.assertIn((1, 3, 3), m.r) + self.assertIn((2, 4, 4), m.r) self.assertNotIn(0, m.r) - self.assertNotIn((1,0), m.r) - self.assertNotIn((1,3,0), m.r) - self.assertNotIn((1,3,3,0), m.r) + self.assertNotIn((1, 0), m.r) + self.assertNotIn((1, 3, 0), m.r) + self.assertNotIn((1, 3, 3, 0), m.r) with self.assertRaises(KeyError): m.r[0] @@ -714,14 +998,14 @@ def test_nested_scalars(self): def test_ctype_detection(self): m = ConcreteModel() - m.js = Set(initialize=[1, (2,3)], dimen=None) - m.b = Block([1,2]) + m.js = Set(initialize=[1, (2, 3)], dimen=None) + m.b = Block([1, 2]) m.b[1].x = Var(m.js) m.b[1].y = Var() - m.b[1].z = Var([1,2]) + m.b[1].z = Var([1, 2]) m.b[2].x = Param(initialize=0) m.b[2].y = Var() - m.b[2].z = Var([1,2]) + m.b[2].z = Var([1, 2]) m.x = Reference(m.b[:].x[...]) self.assertIs(type(m.x), IndexedComponent) @@ -744,34 +1028,36 @@ def test_reference_to_sparse(self): m.xx = Reference(m.x[...], ctype=Var) self.assertEqual(len(m.x), 0) - self.assertNotIn((1,1), m.x) - self.assertNotIn((1,1), m.xx) - self.assertIn((1,1), m.x.index_set()) - self.assertIn((1,1), m.xx.index_set()) + self.assertNotIn((1, 1), m.x) + self.assertNotIn((1, 1), m.xx) + self.assertIn((1, 1), m.x.index_set()) + self.assertIn((1, 1), m.xx.index_set()) self.assertEqual(len(m.x), 0) - m.xx[1,2] + m.xx[1, 2] self.assertEqual(len(m.x), 1) - self.assertIs(m.xx[1,2], m.x[1,2]) + self.assertIs(m.xx[1, 2], m.x[1, 2]) self.assertEqual(len(m.x), 1) - m.xx[1,3] = 5 + m.xx[1, 3] = 5 self.assertEqual(len(m.x), 2) - self.assertIs(m.xx[1,3], m.x[1,3]) + self.assertIs(m.xx[1, 3], m.x[1, 3]) self.assertEqual(len(m.x), 2) - self.assertEqual(value(m.x[1,3]), 5) + self.assertEqual(value(m.x[1, 3]), 5) - m.xx.add((1,1)) + m.xx.add((1, 1)) self.assertEqual(len(m.x), 3) - self.assertIs(m.xx[1,1], m.x[1,1]) + self.assertIs(m.xx[1, 1], m.x[1, 1]) self.assertEqual(len(m.x), 3) def test_nested_reference_to_sparse(self): m = ConcreteModel() m.I = Set(initialize=[1]) + @m.Block(m.I) def b(b, i): b.x = Var(b.model().I, dense=False) + m.xx = Reference(m.b[:].x[:], ctype=Var) m.I.add(2) m.I.add(3) @@ -779,31 +1065,31 @@ def b(b, i): self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 0) self.assertIn(1, m.b) - self.assertNotIn((1,1), m.xx) + self.assertNotIn((1, 1), m.xx) self.assertIn(1, m.b[1].x.index_set()) - self.assertIn((1,1), m.xx.index_set()) + self.assertIn((1, 1), m.xx.index_set()) self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 0) - m.xx[1,2] + m.xx[1, 2] self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 1) - self.assertIs(m.xx[1,2], m.b[1].x[2]) + self.assertIs(m.xx[1, 2], m.b[1].x[2]) self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 1) - m.xx[1,3] = 5 + m.xx[1, 3] = 5 self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 2) - self.assertIs(m.xx[1,3], m.b[1].x[3]) + self.assertIs(m.xx[1, 3], m.b[1].x[3]) self.assertEqual(value(m.b[1].x[3]), 5) self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 2) - m.xx.add((1,1)) + m.xx.add((1, 1)) self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 3) - self.assertIs(m.xx[1,1], m.b[1].x[1]) + self.assertIs(m.xx[1, 1], m.b[1].x[1]) self.assertEqual(len(m.b), 1) self.assertEqual(len(m.b[1].x), 3) @@ -813,20 +1099,22 @@ def b(b, i): # b[2] block data, fire the rule, and then add the new value to # the Var x. self.assertEqual(len(m.xx), 3) - m.xx[2,2] = 10 + m.xx[2, 2] = 10 self.assertEqual(len(m.b), 2) self.assertEqual(len(list(m.b[2].component_objects())), 1) self.assertEqual(len(m.xx), 4) - self.assertIs(m.xx[2,2], m.b[2].x[2]) + self.assertIs(m.xx[2, 2], m.b[2].x[2]) self.assertEqual(value(m.b[2].x[2]), 10) def test_insert_var(self): m = ConcreteModel() - m.T = Set(initialize=[1,5]) - m.x = Var(m.T, initialize=lambda m,i: i) + m.T = Set(initialize=[1, 5]) + m.x = Var(m.T, initialize=lambda m, i: i) + @m.Block(m.T) def b(b, i): - b.y = Var(initialize=lambda b: 10*b.index()) + b.y = Var(initialize=lambda b: 10 * b.index()) + ref_x = Reference(m.x[:]) ref_y = Reference(m.b[:].y) @@ -857,46 +1145,48 @@ def b(b, i): def test_reference_to_dict(self): m = ConcreteModel() m.x = Var() - m.y = Var([1,2,3]) + m.y = Var([1, 2, 3]) m.r = Reference({1: m.x, 'a': m.y[2], 3: m.y[1]}) self.assertFalse(m.r.index_set().isordered()) self.assertEqual(len(m.r), 3) - self.assertEqual(set(m.r.keys()), {1,3,'a'}) - self.assertEqual( ComponentSet(m.r.values()), - ComponentSet([m.x, m.y[2], m.y[1]]) ) + self.assertEqual(set(m.r.keys()), {1, 3, 'a'}) + self.assertEqual( + ComponentSet(m.r.values()), ComponentSet([m.x, m.y[2], m.y[1]]) + ) # You can delete something from the reference del m.r[1] self.assertEqual(len(m.r), 2) - self.assertEqual(set(m.r.keys()), {3,'a'}) - self.assertEqual( ComponentSet(m.r.values()), - ComponentSet([m.y[2], m.y[1]]) ) + self.assertEqual(set(m.r.keys()), {3, 'a'}) + self.assertEqual(ComponentSet(m.r.values()), ComponentSet([m.y[2], m.y[1]])) # But not add it back with self.assertRaisesRegex( - KeyError, "Index '1' is not valid for indexed component 'r'"): + KeyError, "Index '1' is not valid for indexed component 'r'" + ): m.r[1] = m.x def test_reference_to_list(self): m = ConcreteModel() m.x = Var() - m.y = Var([1,2,3]) + m.y = Var([1, 2, 3]) m.r = Reference([m.x, m.y[2], m.y[1]]) self.assertTrue(m.r.index_set().isordered()) self.assertEqual(len(m.r), 3) - self.assertEqual(list(m.r.keys()), [0,1,2]) + self.assertEqual(list(m.r.keys()), [0, 1, 2]) self.assertEqual(list(m.r.values()), [m.x, m.y[2], m.y[1]]) # You can delete something from the reference del m.r[1] self.assertEqual(len(m.r), 2) - self.assertEqual(list(m.r.keys()), [0,2]) + self.assertEqual(list(m.r.keys()), [0, 2]) self.assertEqual(list(m.r.values()), [m.x, m.y[1]]) # But not add it back with self.assertRaisesRegex( - KeyError, "Index '1' is not valid for indexed component 'r'"): + KeyError, "Index '1' is not valid for indexed component 'r'" + ): m.r[1] = m.x def test_reference_to_set(self): m = ConcreteModel() - m.I = Set(initialize=[1,3,5]) + m.I = Set(initialize=[1, 3, 5]) m.r = Reference(m.I) self.assertEqual(len(m.r), 1) self.assertEqual(list(m.r.keys()), [None]) @@ -906,7 +1196,7 @@ def test_reference_to_set(self): # Test that a referent Set containing None doesn't break the # None index m = ConcreteModel() - m.I = Set(initialize=[1,3,None,5]) + m.I = Set(initialize=[1, 3, None, 5]) m.r = Reference(m.I) self.assertEqual(len(m.r), 1) self.assertEqual(list(m.r.keys()), [None]) @@ -916,7 +1206,7 @@ def test_reference_to_set(self): def test_is_reference(self): m = ConcreteModel() m.v0 = Var() - m.v1 = Var([1,2,3]) + m.v1 = Var([1, 2, 3]) m.ref0 = Reference(m.v0) m.ref1 = Reference(m.v1) @@ -930,37 +1220,31 @@ def test_is_reference(self): self.assertTrue(m.ref1.is_reference()) self.assertTrue(m.ref2.is_reference()) - unique_vars = list( - v for v in m.component_objects(Var) if not v.is_reference()) + unique_vars = list(v for v in m.component_objects(Var) if not v.is_reference()) self.assertEqual(len(unique_vars), 2) def test_referent(self): m = ConcreteModel() m.v0 = Var() - m.v2 = Var([1, 2, 3],['a', 'b']) + m.v2 = Var([1, 2, 3], ['a', 'b']) varlist = [m.v2[1, 'a'], m.v2[1, 'b']] - vardict = { - 0: m.v0, - 1: m.v2[1, 'a'], - 2: m.v2[2, 'a'], - 3: m.v2[3, 'a'], - } + vardict = {0: m.v0, 1: m.v2[1, 'a'], 2: m.v2[2, 'a'], 3: m.v2[3, 'a']} scalar_ref = Reference(m.v0) self.assertIs(scalar_ref.referent, m.v0) - sliced_ref = Reference(m.v2[:,'a']) + sliced_ref = Reference(m.v2[:, 'a']) referent = sliced_ref.referent self.assertIs(type(referent), IndexedComponent_slice) self.assertEqual(len(referent._call_stack), 1) call, info = referent._call_stack[0] self.assertEqual(call, IndexedComponent_slice.slice_info) self.assertIs(info[0], m.v2) - self.assertEqual(info[1], {1: 'a'}) # Fixed - self.assertEqual(info[2], {0: slice(None)}) # Sliced - self.assertIs(info[3], None) # Ellipsis + self.assertEqual(info[1], {1: 'a'}) # Fixed + self.assertEqual(info[2], {0: slice(None)}) # Sliced + self.assertIs(info[3], None) # Ellipsis list_ref = Reference(varlist) self.assertIs(list_ref.referent, varlist) @@ -976,9 +1260,9 @@ def test_UnknownSetDimen(self): m.v = Var(m.thinga | m.thingb) self.assertIs(m.v.dim(), UnknownSetDimen) with self.assertRaisesRegex( - IndexError, - 'Slicing components relies on knowing the underlying ' - 'set dimensionality'): + IndexError, + 'Slicing components relies on knowing the underlying set dimensionality', + ): Reference(m.v) def test_contains_with_nonflattened(self): @@ -987,45 +1271,86 @@ def test_contains_with_nonflattened(self): try: normalize_index.flatten = False m = ConcreteModel() - m.d1 = Set(initialize=[1,2]) + m.d1 = Set(initialize=[1, 2]) m.d2 = Set(initialize=[('a', 1), ('b', 2)]) m.v = Var(m.d2, m.d1) - m.ref = Reference(m.v[:,1]) + m.ref = Reference(m.v[:, 1]) self.assertIn(('a', 1), m.ref) self.assertNotIn(('a', 10), m.ref) finally: normalize_index.flatten = _old_flatten def test_pprint_nonfinite_sets(self): - # test issue #2039 self.maxDiff = None m = ConcreteModel() m.v = Var(NonNegativeIntegers, dense=False) m.ref = Reference(m.v) buf = StringIO() m.pprint(ostream=buf) - self.assertEqual(buf.getvalue().strip(), """ + self.assertEqual( + buf.getvalue().strip(), + """ +2 Var Declarations + ref : Size=0, Index=NonNegativeIntegers, ReferenceTo=v + Key : Lower : Value : Upper : Fixed : Stale : Domain + v : Size=0, Index=NonNegativeIntegers + Key : Lower : Value : Upper : Fixed : Stale : Domain + +2 Declarations: v ref +""".strip(), + ) + + m.v[3] + m.ref[5] + buf = StringIO() + m.pprint(ostream=buf) + self.assertEqual( + buf.getvalue().strip(), + """ +2 Var Declarations + ref : Size=2, Index=NonNegativeIntegers, ReferenceTo=v + Key : Lower : Value : Upper : Fixed : Stale : Domain + 3 : None : None : None : False : True : Reals + 5 : None : None : None : False : True : Reals + v : Size=2, Index=NonNegativeIntegers + Key : Lower : Value : Upper : Fixed : Stale : Domain + 3 : None : None : None : False : True : Reals + 5 : None : None : None : False : True : Reals + +2 Declarations: v ref +""".strip(), + ) + + def test_pprint_nonfinite_sets_ctypeNone(self): + # test issue #2039 + self.maxDiff = None + m = ConcreteModel() + m.v = Var(NonNegativeIntegers, dense=False) + m.ref = Reference(m.v, ctype=None) + buf = StringIO() + m.pprint(ostream=buf) + self.assertEqual( + buf.getvalue().strip(), + """ 1 Var Declarations v : Size=0, Index=NonNegativeIntegers Key : Lower : Value : Upper : Fixed : Stale : Domain 1 IndexedComponent Declarations - ref : Size=0, Index=ref_index, ReferenceTo=v + ref : Size=0, Index=NonNegativeIntegers, ReferenceTo=v Key : Object -1 SetOf Declarations - ref_index : Dimen=0, Size=0, Bounds=(None, None) - Key : Ordered : Members - None : False : ReferenceSet(v[...]) - -3 Declarations: v ref_index ref -""".strip()) +2 Declarations: v ref +""".strip(), + ) m.v[3] m.ref[5] buf = StringIO() m.pprint(ostream=buf) - self.assertEqual(buf.getvalue().strip(), """ + self.assertEqual( + buf.getvalue().strip(), + """ 1 Var Declarations v : Size=2, Index=NonNegativeIntegers Key : Lower : Value : Upper : Fixed : Stale : Domain @@ -1033,35 +1358,37 @@ def test_pprint_nonfinite_sets(self): 5 : None : None : None : False : True : Reals 1 IndexedComponent Declarations - ref : Size=2, Index=ref_index, ReferenceTo=v + ref : Size=2, Index=NonNegativeIntegers, ReferenceTo=v Key : Object 3 : 5 : -1 SetOf Declarations - ref_index : Dimen=1, Size=2, Bounds=(3, 5) - Key : Ordered : Members - None : False : ReferenceSet(v[...]) - -3 Declarations: v ref_index ref -""".strip()) +2 Declarations: v ref +""".strip(), + ) def test_pprint_nested(self): m = ConcreteModel() - @m.Block([1,2]) - def b(b,i): - b.x = Var([3,4], bounds=(i,None)) + + @m.Block([1, 2]) + def b(b, i): + b.x = Var([3, 4], bounds=(i, None)) + m.r = Reference(m.b[:].x[:]) buf = StringIO() m.r.pprint(ostream=buf) - self.assertEqual(buf.getvalue().strip(), """ + self.assertEqual( + buf.getvalue().strip(), + """ r : Size=4, Index=r_index, ReferenceTo=b[:].x[:] Key : Lower : Value : Upper : Fixed : Stale : Domain (1, 3) : 1 : None : None : False : True : Reals (1, 4) : 1 : None : None : False : True : Reals (2, 3) : 2 : None : None : False : True : Reals (2, 4) : 2 : None : None : False : True : Reals -""".strip()) +""".strip(), + ) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_logical_expr.py b/pyomo/core/tests/unit/test_relational_expr.py similarity index 82% rename from pyomo/core/tests/unit/test_logical_expr.py rename to pyomo/core/tests/unit/test_relational_expr.py index bec2668bd4d..f55bfff108c 100644 --- a/pyomo/core/tests/unit/test_logical_expr.py +++ b/pyomo/core/tests/unit/test_relational_expr.py @@ -17,19 +17,30 @@ import io import sys from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest -from pyomo.environ import AbstractModel, ConcreteModel, Set, Var, Param, Constraint, inequality, display -import pyomo.core.expr.logical_expr as logical_expr +from pyomo.environ import ( + AbstractModel, + ConcreteModel, + Set, + Var, + Param, + Constraint, + inequality, + display, +) from pyomo.core.expr.numvalue import value -from pyomo.core.expr.logical_expr import ( - InequalityExpression, EqualityExpression, RangedExpression, +from pyomo.core.expr.relational_expr import ( + InequalityExpression, + EqualityExpression, + RangedExpression, ) -class TestGenerate_RelationalExpression(unittest.TestCase): +class TestGenerate_RelationalExpression(unittest.TestCase): def setUp(self): m = AbstractModel() m.I = Set() @@ -64,51 +75,30 @@ def test_equalityErrors(self): # = 5 # / \ # a b - # Python 2.7 supports better testing of exceptions - if sys.hexversion >= 0x02070000: - self.assertRaisesRegex(TypeError, "EqualityExpression .*" - "sub-expressions is a relational", - e.__eq__, m.a) - self.assertRaisesRegex(TypeError, "EqualityExpression .*" - "sub-expressions is a relational", - m.a.__eq__, e) - - # NB: cannot test the reverse here: _VarArray (correctly) - # does not define __eq__ - self.assertRaisesRegex(TypeError, "Argument .*" - "is an indexed numeric value", - m.a.__eq__, m.x) - else: - self.assertRaises(TypeError, e.__eq__, m.a) - self.assertRaises(TypeError, m.a.__eq__, e) - self.assertRaises(TypeError, m.a.__eq__, m.x) - - try: + with self.assertRaisesRegex( + TypeError, + "Attempting to use a non-numeric type " + r"\(EqualityExpression\) in a numeric expression context.", + ): e == m.a - self.fail("expected nested equality expression to raise TypeError") - except TypeError: - pass - - try: + with self.assertRaisesRegex( + TypeError, + "Attempting to use a non-numeric type " + r"\(EqualityExpression\) in a numeric expression context.", + ): m.a == e - self.fail("expected nested equality expression to raise TypeError") - except TypeError: - pass # # Test expression with an indexed variable # - try: + with self.assertRaisesRegex( + TypeError, "Argument .* is an indexed numeric value" + ): m.x == m.a - self.fail("expected use of indexed variable to raise TypeError") - except TypeError: - pass - - try: + with self.assertRaisesRegex( + TypeError, "Argument .* is an indexed numeric value" + ): m.a == m.x - self.fail("expected use of indexed variable to raise TypeError") - except TypeError: - pass def test_simpleInequality1(self): # @@ -123,7 +113,7 @@ def test_simpleInequality1(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.a) self.assertIs(e.arg(1), m.b) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, True) # <= @@ -134,7 +124,7 @@ def test_simpleInequality1(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.a) self.assertIs(e.arg(1), m.b) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, False) # > @@ -145,7 +135,7 @@ def test_simpleInequality1(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.b) self.assertIs(e.arg(1), m.a) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, True) # >= @@ -156,7 +146,7 @@ def test_simpleInequality1(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.b) self.assertIs(e.arg(1), m.a) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, False) def test_simpleInequality2(self): @@ -172,7 +162,7 @@ def test_simpleInequality2(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.a) self.assertIs(e.arg(1), m.b) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, True) # <= @@ -183,7 +173,7 @@ def test_simpleInequality2(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.a) self.assertIs(e.arg(1), m.b) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, False) # > @@ -194,7 +184,7 @@ def test_simpleInequality2(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.b) self.assertIs(e.arg(1), m.a) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, True) # >= @@ -206,7 +196,7 @@ def test_simpleInequality2(self): self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.b) self.assertIs(e.arg(1), m.a) - #self.assertEqual(len(e._strict), 1) + # self.assertEqual(len(e._strict), 1) self.assertEqual(e._strict, False) try: @@ -223,7 +213,6 @@ def test_simpleInequality2(self): class TestGenerate_RangedExpression(unittest.TestCase): - def setUp(self): m = AbstractModel() m.I = Set() @@ -249,7 +238,7 @@ def test_compoundInequality(self): self.assertIs(e.arg(0), m.a) self.assertIs(e.arg(1), m.b) self.assertIs(e.arg(2), m.c) - #self.assertEqual(len(e._strict), 2) + # self.assertEqual(len(e._strict), 2) self.assertEqual(e._strict[0], True) self.assertEqual(e._strict[1], True) @@ -264,7 +253,7 @@ def test_compoundInequality(self): self.assertIs(e.arg(0), m.a) self.assertIs(e.arg(1), m.b) self.assertIs(e.arg(2), m.c) - #self.assertEqual(len(e._strict), 2) + # self.assertEqual(len(e._strict), 2) self.assertEqual(e._strict[0], False) self.assertEqual(e._strict[1], False) @@ -279,7 +268,7 @@ def test_compoundInequality(self): self.assertIs(e.arg(2), m.c) self.assertIs(e.arg(1), m.b) self.assertIs(e.arg(0), m.a) - #self.assertEqual(len(e._strict), 2) + # self.assertEqual(len(e._strict), 2) self.assertEqual(e._strict[0], True) self.assertEqual(e._strict[1], True) @@ -294,7 +283,7 @@ def test_compoundInequality(self): self.assertIs(e.arg(2), m.c) self.assertIs(e.arg(1), m.b) self.assertIs(e.arg(0), m.a) - #self.assertEqual(len(e._strict), 2) + # self.assertEqual(len(e._strict), 2) self.assertEqual(e._strict[0], False) self.assertEqual(e._strict[1], False) @@ -309,7 +298,7 @@ def test_compoundInequality(self): self.assertIs(e.arg(2), 0) self.assertIs(e.arg(1), m.a) self.assertIs(e.arg(0), 0) - #self.assertEqual(len(e._strict), 2) + # self.assertEqual(len(e._strict), 2) self.assertEqual(e._strict[0], False) self.assertEqual(e._strict[1], False) @@ -324,7 +313,7 @@ def test_compoundInequality(self): self.assertIs(e.arg(2), 0) self.assertIs(e.arg(1), m.a) self.assertIs(e.arg(0), 0) - #self.assertEqual(len(e._strict), 2) + # self.assertEqual(len(e._strict), 2) self.assertEqual(e._strict[0], True) self.assertEqual(e._strict[1], True) @@ -357,12 +346,12 @@ def test_val2(self): # PotentiallyVariable - Expr contains one or more variables # class TestIsFixedIsConstant(unittest.TestCase): - def setUp(self): # This class tests the Pyomo 5.x expression trees def d_fn(model): - return model.c+model.c + return model.c + model.c + self.model = AbstractModel() self.model.a = Var(initialize=1.0) self.model.b = Var(initialize=2.0) @@ -440,27 +429,27 @@ def test_relational_ops(self): class TestMultiArgumentExpressions(unittest.TestCase): - def test_double_sided_ineq(self): m = ConcreteModel() - m.s = Set(initialize=[1.0,2.0,3.0,4.0,5.0]) + m.s = Set(initialize=[1.0, 2.0, 3.0, 4.0, 5.0]) - m.vmin = Param(m.s, initialize=lambda m,i: i) - m.vmax = Param(m.s, initialize=lambda m,i: i**2) + m.vmin = Param(m.s, initialize=lambda m, i: i) + m.vmax = Param(m.s, initialize=lambda m, i: i**2) m.v = Var(m.s) def _con(m, i): - return inequality(m.vmin[i]**2, m.v[i], m.vmax[i]**2) + return inequality(m.vmin[i] ** 2, m.v[i], m.vmax[i] ** 2) + m.con = Constraint(m.s, rule=_con) OUT = io.StringIO() for i in m.s: - OUT.write(str(_con(m,i))) + OUT.write(str(_con(m, i))) OUT.write("\n") display(m.con, ostream=OUT) - reference="""1.0 <= v[1.0] <= 1.0 + reference = """1.0 <= v[1.0] <= 1.0 4.0 <= v[2.0] <= 16.0 9.0 <= v[3.0] <= 81.0 16.0 <= v[4.0] <= 256.0 @@ -480,8 +469,6 @@ def _con(m, i): # Test pickle logic # class Test_pickle(unittest.TestCase): - - def test_ineq(self): M = ConcreteModel() M.v = Var() diff --git a/pyomo/core/tests/unit/test_set.py b/pyomo/core/tests/unit/test_set.py index e20ee06390d..4263bdef153 100644 --- a/pyomo/core/tests/unit/test_set.py +++ b/pyomo/core/tests/unit/test_set.py @@ -31,37 +31,81 @@ import pyomo.core.base.set as SetModule from pyomo.core.base.indexed_component import normalize_index from pyomo.core.base.initializer import ( - ConstantInitializer, ItemInitializer, IndexedCallInitializer, + ConstantInitializer, + ItemInitializer, + IndexedCallInitializer, ) from pyomo.core.base.set import ( - NumericRange as NR, NonNumericRange as NNR, - AnyRange, _AnySet, Any, AnyWithNone, _EmptySet, EmptySet, Binary, - Reals, NonNegativeReals, PositiveReals, NonPositiveReals, NegativeReals, - Integers, PositiveIntegers, NegativeIntegers, + NumericRange as NR, + NonNumericRange as NNR, + AnyRange, + _AnySet, + Any, + AnyWithNone, + _EmptySet, + EmptySet, + Binary, + Reals, + NonNegativeReals, + PositiveReals, + NonPositiveReals, + NegativeReals, + Integers, + PositiveIntegers, + NegativeIntegers, NonNegativeIntegers, Set, - SetOf, OrderedSetOf, FiniteSetOf, InfiniteSetOf, - RangeSet, _FiniteRangeSetData, _InfiniteRangeSetData, - FiniteScalarRangeSet, InfiniteScalarRangeSet, + SetOf, + OrderedSetOf, + FiniteSetOf, + InfiniteSetOf, + RangeSet, + _FiniteRangeSetData, + _InfiniteRangeSetData, + FiniteScalarRangeSet, + InfiniteScalarRangeSet, AbstractFiniteScalarRangeSet, - SetUnion_InfiniteSet, SetUnion_FiniteSet, SetUnion_OrderedSet, - SetIntersection_InfiniteSet, SetIntersection_FiniteSet, + SetUnion_InfiniteSet, + SetUnion_FiniteSet, + SetUnion_OrderedSet, + SetIntersection_InfiniteSet, + SetIntersection_FiniteSet, SetIntersection_OrderedSet, - SetDifference_InfiniteSet, SetDifference_FiniteSet, + SetDifference_InfiniteSet, + SetDifference_FiniteSet, SetDifference_OrderedSet, - SetSymmetricDifference_InfiniteSet, SetSymmetricDifference_FiniteSet, + SetSymmetricDifference_InfiniteSet, + SetSymmetricDifference_FiniteSet, SetSymmetricDifference_OrderedSet, - SetProduct, SetProduct_InfiniteSet, SetProduct_FiniteSet, + SetProduct, + SetProduct_InfiniteSet, + SetProduct_FiniteSet, SetProduct_OrderedSet, - _SetData, _FiniteSetData, _InsertionOrderSetData, _SortedSetData, - _FiniteSetMixin, _OrderedSetMixin, - SetInitializer, SetIntersectInitializer, BoundsInitializer, - UnknownSetDimen, UnindexedComponent_set, - DeclareGlobalSet, IntegerSet, RealSet, - simple_set_rule, set_options, - ) + _SetData, + _FiniteSetData, + _InsertionOrderSetData, + _SortedSetData, + _FiniteSetMixin, + _OrderedSetMixin, + SetInitializer, + SetIntersectInitializer, + BoundsInitializer, + UnknownSetDimen, + UnindexedComponent_set, + DeclareGlobalSet, + IntegerSet, + RealSet, + simple_set_rule, + set_options, +) from pyomo.environ import ( - AbstractModel, ConcreteModel, Block, Var, Param, Suffix, Constraint, + AbstractModel, + ConcreteModel, + Block, + Var, + Param, + Suffix, + Constraint, Objective, ) @@ -71,19 +115,19 @@ def test_single_set(self): a = SetInitializer(None) self.assertIs(type(a), SetInitializer) self.assertIsNone(a._set) - self.assertIs(a(None,None), Any) + self.assertIs(a(None, None), Any) self.assertTrue(a.constant()) self.assertFalse(a.verified) a = SetInitializer(Reals) self.assertIs(type(a), SetInitializer) self.assertIs(type(a._set), ConstantInitializer) - self.assertIs(a(None,None), Reals) + self.assertIs(a(None, None), Reals) self.assertIs(a._set.val, Reals) self.assertTrue(a.constant()) self.assertFalse(a.verified) - a = SetInitializer({1:Reals}) + a = SetInitializer({1: Reals}) self.assertIs(type(a), SetInitializer) self.assertIs(type(a._set), ItemInitializer) self.assertIs(a(None, 1), Reals) @@ -97,7 +141,7 @@ def test_intersect(self): self.assertIsNone(a._set) self.assertTrue(a.constant()) self.assertFalse(a.verified) - self.assertIs(a(None,None), Any) + self.assertIs(a(None, None), Any) a = SetInitializer(None) a.intersect(SetInitializer(Reals)) @@ -106,7 +150,7 @@ def test_intersect(self): self.assertIs(a._set.val, Reals) self.assertTrue(a.constant()) self.assertFalse(a.verified) - self.assertIs(a(None,None), Reals) + self.assertIs(a(None, None), Reals) a = SetInitializer(None) a.intersect(BoundsInitializer(5, default_step=1)) @@ -114,7 +158,7 @@ def test_intersect(self): self.assertIs(type(a._set), BoundsInitializer) self.assertTrue(a.constant()) self.assertFalse(a.verified) - self.assertEqual(a(None,None), RangeSet(5)) + self.assertEqual(a(None, None), RangeSet(5)) a = SetInitializer(Reals) a.intersect(SetInitializer(None)) @@ -123,7 +167,7 @@ def test_intersect(self): self.assertIs(a._set.val, Reals) self.assertTrue(a.constant()) self.assertFalse(a.verified) - self.assertIs(a(None,None), Reals) + self.assertIs(a(None, None), Reals) a = SetInitializer(Reals) a.intersect(SetInitializer(Integers)) @@ -135,7 +179,7 @@ def test_intersect(self): self.assertIs(a._set._B.val, Integers) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) + s = a(None, None) self.assertIs(type(s), SetIntersection_InfiniteSet) self.assertIs(s._sets[0], Reals) self.assertIs(s._sets[1], Integers) @@ -151,7 +195,7 @@ def test_intersect(self): self.assertIs(a._set._A._B.val, Integers) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) + s = a(None, None) self.assertIs(type(s), SetIntersection_OrderedSet) self.assertIs(type(s._sets[0]), SetIntersection_InfiniteSet) self.assertIsInstance(s._sets[1], RangeSet) @@ -168,7 +212,7 @@ def test_intersect(self): self.assertIs(a._set._A._B.val, Integers) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) + s = a(None, None) self.assertIs(type(s), SetIntersection_InfiniteSet) p.construct() s.construct() @@ -181,7 +225,7 @@ def test_intersect(self): p = Param(initialize=3) a = SetInitializer(Reals) - a.intersect(SetInitializer({1:Integers})) + a.intersect(SetInitializer({1: Integers})) a.intersect(BoundsInitializer(p, default_step=0)) self.assertIs(type(a), SetInitializer) self.assertIs(type(a._set), SetIntersectInitializer) @@ -192,8 +236,8 @@ def test_intersect(self): self.assertFalse(a.constant()) self.assertFalse(a.verified) with self.assertRaises(KeyError): - a(None,None) - s = a(None,1) + a(None, None) + s = a(None, 1) self.assertIs(type(s), SetIntersection_InfiniteSet) p.construct() s.construct() @@ -208,119 +252,117 @@ def test_boundsinit(self): a = BoundsInitializer(5, default_step=1) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) + s = a(None, None) self.assertEqual(s, RangeSet(5)) - a = BoundsInitializer((0,5), default_step=1) + a = BoundsInitializer((0, 5), default_step=1) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) - self.assertEqual(s, RangeSet(0,5)) + s = a(None, None) + self.assertEqual(s, RangeSet(0, 5)) - a = BoundsInitializer((0,5,2)) + a = BoundsInitializer((0, 5, 2)) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) - self.assertEqual(s, RangeSet(0,5,2)) + s = a(None, None) + self.assertEqual(s, RangeSet(0, 5, 2)) a = BoundsInitializer(()) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) - self.assertEqual(s, RangeSet(None,None,0)) + s = a(None, None) + self.assertEqual(s, RangeSet(None, None, 0)) a = BoundsInitializer(5) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) - self.assertEqual(s, RangeSet(1,5,0)) + s = a(None, None) + self.assertEqual(s, RangeSet(1, 5, 0)) - a = BoundsInitializer((0,5)) + a = BoundsInitializer((0, 5)) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) - self.assertEqual(s, RangeSet(0,5,0)) + s = a(None, None) + self.assertEqual(s, RangeSet(0, 5, 0)) - a = BoundsInitializer((0,5,2)) + a = BoundsInitializer((0, 5, 2)) self.assertTrue(a.constant()) self.assertFalse(a.verified) - s = a(None,None) - self.assertEqual(s, RangeSet(0,5,2)) + s = a(None, None) + self.assertEqual(s, RangeSet(0, 5, 2)) - a = BoundsInitializer({1:5}, default_step=1) + a = BoundsInitializer({1: 5}, default_step=1) self.assertFalse(a.constant()) self.assertFalse(a.verified) - s = a(None,1) + s = a(None, 1) self.assertEqual(s, RangeSet(5)) - a = BoundsInitializer({1:(0,5)}, default_step=1) + a = BoundsInitializer({1: (0, 5)}, default_step=1) self.assertFalse(a.constant()) self.assertFalse(a.verified) - s = a(None,1) - self.assertEqual(s, RangeSet(0,5)) + s = a(None, 1) + self.assertEqual(s, RangeSet(0, 5)) def test_setdefault(self): a = SetInitializer(None) - self.assertIs(a(None,None), Any) + self.assertIs(a(None, None), Any) a.setdefault(Reals) - self.assertIs(a(None,None), Reals) + self.assertIs(a(None, None), Reals) a = SetInitializer(Integers) - self.assertIs(a(None,None), Integers) + self.assertIs(a(None, None), Integers) a.setdefault(Reals) - self.assertIs(a(None,None), Integers) + self.assertIs(a(None, None), Integers) a = BoundsInitializer(5, default_step=1) - self.assertEqual(a(None,None), RangeSet(5)) + self.assertEqual(a(None, None), RangeSet(5)) a.setdefault(Reals) - self.assertEqual(a(None,None), RangeSet(5)) + self.assertEqual(a(None, None), RangeSet(5)) a = SetInitializer(Reals) a.intersect(SetInitializer(Integers)) - self.assertIs(type(a(None,None)), SetIntersection_InfiniteSet) + self.assertIs(type(a(None, None)), SetIntersection_InfiniteSet) a.setdefault(RangeSet(5)) - self.assertIs(type(a(None,None)), SetIntersection_InfiniteSet) + self.assertIs(type(a(None, None)), SetIntersection_InfiniteSet) def test_indices(self): a = SetInitializer(None) self.assertFalse(a.contains_indices()) - with self.assertRaisesRegex( - RuntimeError, 'does not contain embedded indices'): + with self.assertRaisesRegex(RuntimeError, 'does not contain embedded indices'): a.indices() - a = SetInitializer([1,2,3]) + a = SetInitializer([1, 2, 3]) self.assertFalse(a.contains_indices()) - with self.assertRaisesRegex( - RuntimeError, 'does not contain embedded indices'): + with self.assertRaisesRegex(RuntimeError, 'does not contain embedded indices'): a.indices() # intersection initializers - a = SetInitializer({1: [1,2,3], 2: [4]}) + a = SetInitializer({1: [1, 2, 3], 2: [4]}) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [1,2]) + self.assertEqual(list(a.indices()), [1, 2]) - a.intersect(SetInitializer({1: [4], 2: [1,2]})) + a.intersect(SetInitializer({1: [4], 2: [1, 2]})) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [1,2]) + self.assertEqual(list(a.indices()), [1, 2]) # intersection initializer mismatch - a = SetInitializer({1: [1,2,3], 2: [4]}) + a = SetInitializer({1: [1, 2, 3], 2: [4]}) self.assertTrue(a.contains_indices()) - self.assertEqual(list(a.indices()), [1,2]) + self.assertEqual(list(a.indices()), [1, 2]) - a.intersect(SetInitializer({1: [4], 3: [1,2]})) + a.intersect(SetInitializer({1: [4], 3: [1, 2]})) self.assertTrue(a.contains_indices()) with self.assertRaisesRegex( - ValueError, 'contains two sub-initializers with inconsistent'): + ValueError, 'contains two sub-initializers with inconsistent' + ): a.indices() # intersection initializer mismatch (unindexed) - a = SetInitializer([1,2]) + a = SetInitializer([1, 2]) self.assertFalse(a.contains_indices()) - a.intersect(SetInitializer([1,2])) + a.intersect(SetInitializer([1, 2])) self.assertFalse(a.contains_indices()) - with self.assertRaisesRegex( - RuntimeError, 'does not contain embedded indices'): + with self.assertRaisesRegex(RuntimeError, 'does not contain embedded indices'): a.indices() @@ -338,15 +380,16 @@ def test_Reals(self): self.assertEqual(Reals.dim(), 0) self.assertIs(Reals.index_set(), UnindexedComponent_set) - with self.assertRaisesRegex( - TypeError, ".*'GlobalSet' has no len"): + with self.assertRaisesRegex(TypeError, ".*'GlobalSet' has no len"): len(Reals) with self.assertRaisesRegex( - TypeError, "'GlobalSet' object is not iterable " - r"\(non-finite Set 'Reals' is not iterable\)"): + TypeError, + "'GlobalSet' object is not iterable " + r"\(non-finite Set 'Reals' is not iterable\)", + ): list(Reals) - self.assertEqual(list(Reals.ranges()), [NR(None,None,0)]) - self.assertEqual(Reals.bounds(), (None,None)) + self.assertEqual(list(Reals.ranges()), [NR(None, None, 0)]) + self.assertEqual(Reals.bounds(), (None, None)) self.assertEqual(Reals.dimen, 1) tmp = RealSet() @@ -378,15 +421,16 @@ def test_Integers(self): self.assertEqual(Integers.dim(), 0) self.assertIs(Integers.index_set(), UnindexedComponent_set) - with self.assertRaisesRegex( - TypeError, ".*'GlobalSet' has no len"): + with self.assertRaisesRegex(TypeError, ".*'GlobalSet' has no len"): len(Integers) with self.assertRaisesRegex( - TypeError, "'GlobalSet' object is not iterable " - r"\(non-finite Set 'Integers' is not iterable\)"): + TypeError, + "'GlobalSet' object is not iterable " + r"\(non-finite Set 'Integers' is not iterable\)", + ): list(Integers) - self.assertEqual(list(Integers.ranges()), [NR(0,None,1),NR(0,None,-1)]) - self.assertEqual(Integers.bounds(), (None,None)) + self.assertEqual(list(Integers.ranges()), [NR(0, None, 1), NR(0, None, -1)]) + self.assertEqual(Integers.bounds(), (None, None)) self.assertEqual(Integers.dimen, 1) tmp = IntegerSet() @@ -418,15 +462,16 @@ def test_Any(self): self.assertEqual(Any.dim(), 0) self.assertIs(Any.index_set(), UnindexedComponent_set) - with self.assertRaisesRegex( - TypeError, ".*'Any' has no len"): + with self.assertRaisesRegex(TypeError, ".*'Any' has no len"): len(Any) with self.assertRaisesRegex( - TypeError, "'GlobalSet' object is not iterable " - r"\(non-finite Set 'Any' is not iterable\)"): + TypeError, + "'GlobalSet' object is not iterable " + r"\(non-finite Set 'Any' is not iterable\)", + ): list(Any) self.assertEqual(list(Any.ranges()), [AnyRange()]) - self.assertEqual(Any.bounds(), (None,None)) + self.assertEqual(Any.bounds(), (None, None)) self.assertEqual(Any.dimen, None) tmp = _AnySet() @@ -449,8 +494,8 @@ def test_AnyWithNone(self): self.assertIn(None, AnyWithNone) self.assertIn(1, AnyWithNone) self.assertRegex( - os.getvalue(), - "^DEPRECATED: The AnyWithNone set is deprecated") + os.getvalue(), "^DEPRECATED: The AnyWithNone set is deprecated" + ) self.assertEqual(Any, AnyWithNone) self.assertEqual(AnyWithNone, Any) @@ -471,7 +516,7 @@ def test_EmptySet(self): self.assertEqual(len(EmptySet), 0) self.assertEqual(list(EmptySet), []) self.assertEqual(list(EmptySet.ranges()), []) - self.assertEqual(EmptySet.bounds(), (None,None)) + self.assertEqual(EmptySet.bounds(), (None, None)) self.assertEqual(EmptySet.dimen, 0) tmp = _EmptySet() @@ -504,12 +549,12 @@ def test_relational_operators(self): self.assertTrue(Any.issuperset(Any2)) self.assertFalse(Any.isdisjoint(Any2)) - Reals2 = RangeSet(ranges=(NR(None,None,0),)) + Reals2 = RangeSet(ranges=(NR(None, None, 0),)) self.assertTrue(Reals.issubset(Reals2)) self.assertTrue(Reals.issuperset(Reals2)) self.assertFalse(Reals.isdisjoint(Reals2)) - Integers2 = RangeSet(ranges=(NR(0,None,-1), NR(0,None,1))) + Integers2 = RangeSet(ranges=(NR(0, None, -1), NR(0, None, 1))) self.assertTrue(Integers.issubset(Integers2)) self.assertTrue(Integers.issuperset(Integers2)) self.assertFalse(Integers.isdisjoint(Integers2)) @@ -552,21 +597,14 @@ def test_relational_operators(self): self.assertTrue(EmptySet.issuperset(tmp)) self.assertTrue(EmptySet.isdisjoint(tmp)) - def test_equality(self): self.assertEqual(Any, Any) self.assertEqual(Reals, Reals) self.assertEqual(PositiveIntegers, PositiveIntegers) self.assertEqual(Any, _AnySet()) - self.assertEqual( - Reals, - RangeSet(ranges=(NR(None,None,0),)) - ) - self.assertEqual( - Integers, - RangeSet(ranges=(NR(0,None,-1), NR(0,None,1))) - ) + self.assertEqual(Reals, RangeSet(ranges=(NR(None, None, 0),))) + self.assertEqual(Integers, RangeSet(ranges=(NR(0, None, -1), NR(0, None, 1)))) self.assertNotEqual(Integers, Reals) self.assertNotEqual(Reals, Integers) @@ -575,73 +613,66 @@ def test_equality(self): # For equality, ensure that the ranges can be in any order self.assertEqual( - RangeSet(ranges=(NR(0,None,-1), NR(0,None,1))), - RangeSet(ranges=(NR(0,None,1), NR(0,None,-1))) + RangeSet(ranges=(NR(0, None, -1), NR(0, None, 1))), + RangeSet(ranges=(NR(0, None, 1), NR(0, None, -1))), ) # And integer ranges can be grounded at different points self.assertEqual( - RangeSet(ranges=(NR(10,None,-1), NR(10,None,1))), - RangeSet(ranges=(NR(0,None,1), NR(0,None,-1))) + RangeSet(ranges=(NR(10, None, -1), NR(10, None, 1))), + RangeSet(ranges=(NR(0, None, 1), NR(0, None, -1))), ) self.assertEqual( - RangeSet(ranges=(NR(0,None,-1), NR(0,None,1))), - RangeSet(ranges=(NR(10,None,1), NR(10,None,-1))) + RangeSet(ranges=(NR(0, None, -1), NR(0, None, 1))), + RangeSet(ranges=(NR(10, None, 1), NR(10, None, -1))), ) # Odd positive integers and even positive integers are positive # integers self.assertEqual( - PositiveIntegers, - RangeSet(ranges=(NR(1,None,2), NR(2,None,2))) + PositiveIntegers, RangeSet(ranges=(NR(1, None, 2), NR(2, None, 2))) ) # Nututally prime sets of ranges self.assertEqual( - RangeSet(ranges=(NR(1,None,2), NR(2,None,2))), - RangeSet(ranges=( - NR(1,None,3), NR(2,None,3), NR(3,None,3) - )) + RangeSet(ranges=(NR(1, None, 2), NR(2, None, 2))), + RangeSet(ranges=(NR(1, None, 3), NR(2, None, 3), NR(3, None, 3))), ) # Nututally prime sets of ranges # ...omitting one of the subranges breaks equality self.assertNotEqual( - RangeSet(ranges=(NR(1,None,2), NR(2,None,2))), - RangeSet(ranges=( - NR(1,None,3), NR(2,None,3) - )) + RangeSet(ranges=(NR(1, None, 2), NR(2, None, 2))), + RangeSet(ranges=(NR(1, None, 3), NR(2, None, 3))), ) # Mututally prime sets of ranges # ...changing a reference point (so redundant NR) breaks equality self.assertNotEqual( - RangeSet(ranges=(NR(0,None,2), NR(0,None,2))), - RangeSet(ranges=( - NR(1,None,3), NR(2,None,3), NR(3,None,3) - )) + RangeSet(ranges=(NR(0, None, 2), NR(0, None, 2))), + RangeSet(ranges=(NR(1, None, 3), NR(2, None, 3), NR(3, None, 3))), ) def test_bounds(self): - self.assertEqual(Any.bounds(), (None,None)) - self.assertEqual(Reals.bounds(), (None,None)) - self.assertEqual(PositiveReals.bounds(), (0,None)) - self.assertEqual(NegativeIntegers.bounds(), (None,-1)) + self.assertEqual(Any.bounds(), (None, None)) + self.assertEqual(Reals.bounds(), (None, None)) + self.assertEqual(PositiveReals.bounds(), (0, None)) + self.assertEqual(NegativeIntegers.bounds(), (None, -1)) class TestRangeOperations(unittest.TestCase): def test_mixed_ranges_isdisjoint(self): - i = RangeSet(0,10,2) - j = SetOf([0,1,2,'a']) + i = RangeSet(0, 10, 2) + j = SetOf([0, 1, 2, 'a']) k = Any ir = list(i.ranges()) - self.assertEqual(ir, [NR(0,10,2)]) + self.assertEqual(ir, [NR(0, 10, 2)]) self.assertEqual(str(ir), "[[0:10:2]]") ir = ir[0] jr = list(j.ranges()) - self.assertEqual(jr, [NR(0,0,0), NR(1,1,0), NR(2,2,0), NNR('a')]) + self.assertEqual(jr, [NR(0, 0, 0), NR(1, 1, 0), NR(2, 2, 0), NNR('a')]) self.assertEqual(str(jr), "[[0], [1], [2], {a}]") jr0, jr1, jr2, jr3 = jr @@ -688,7 +719,7 @@ def test_mixed_ranges_issubset(self): # Note that these ranges are verified in the test above (ir,) = list(i.ranges()) jr0, jr1, jr2, jr3 = list(j.ranges()) - kr, = list(k.ranges()) + (kr,) = list(k.ranges()) self.assertTrue(ir.issubset(ir)) self.assertFalse(ir.issubset(jr0)) @@ -726,16 +757,16 @@ def test_mixed_ranges_range_difference(self): k = Any # Note that these ranges are verified in the test above - ir, = list(i.ranges()) + (ir,) = list(i.ranges()) jr0, jr1, jr2, jr3 = list(j.ranges()) - kr, = list(k.ranges()) + (kr,) = list(k.ranges()) self.assertEqual(ir.range_difference(i.ranges()), []) - self.assertEqual(ir.range_difference([jr0]), [NR(2,10,2)]) - self.assertEqual(ir.range_difference([jr1]), [NR(0,10,2)]) - self.assertEqual(ir.range_difference([jr2]), [NR(0,0,0), NR(4,10,2)]) - self.assertEqual(ir.range_difference([jr3]), [NR(0,10,2)]) - self.assertEqual(ir.range_difference(j.ranges()), [NR(4,10,2)]) + self.assertEqual(ir.range_difference([jr0]), [NR(2, 10, 2)]) + self.assertEqual(ir.range_difference([jr1]), [NR(0, 10, 2)]) + self.assertEqual(ir.range_difference([jr2]), [NR(0, 0, 0), NR(4, 10, 2)]) + self.assertEqual(ir.range_difference([jr3]), [NR(0, 10, 2)]) + self.assertEqual(ir.range_difference(j.ranges()), [NR(4, 10, 2)]) self.assertEqual(ir.range_difference(k.ranges()), []) self.assertEqual(jr0.range_difference(i.ranges()), []) @@ -776,9 +807,9 @@ def test_mixed_ranges_range_intersection(self): k = Any # Note that these ranges are verified in the test above - ir, = list(i.ranges()) + (ir,) = list(i.ranges()) jr0, jr1, jr2, jr3 = list(j.ranges()) - kr, = list(k.ranges()) + (kr,) = list(k.ranges()) self.assertEqual(ir.range_intersection(i.ranges()), [ir]) self.assertEqual(ir.range_intersection([jr0]), [jr0]) @@ -817,21 +848,21 @@ def test_mixed_ranges_range_intersection(self): self.assertEqual(kr.range_intersection([jr1]), [jr1]) self.assertEqual(kr.range_intersection([jr2]), [jr2]) self.assertEqual(kr.range_intersection([jr3]), [jr3]) - self.assertEqual(kr.range_intersection(j.ranges()), [jr0,jr1,jr2,jr3]) + self.assertEqual(kr.range_intersection(j.ranges()), [jr0, jr1, jr2, jr3]) self.assertEqual(kr.range_intersection(k.ranges()), [kr]) class Test_SetOf_and_RangeSet(unittest.TestCase): def test_constructor(self): - i = SetOf([1,2,3]) + i = SetOf([1, 2, 3]) self.assertIs(type(i), OrderedSetOf) - j = OrderedSetOf([1,2,3]) + j = OrderedSetOf([1, 2, 3]) self.assertIs(type(i), OrderedSetOf) self.assertEqual(i, j) - i = SetOf({1,2,3}) + i = SetOf({1, 2, 3}) self.assertIs(type(i), FiniteSetOf) - j = FiniteSetOf([1,2,3]) + j = FiniteSetOf([1, 2, 3]) self.assertIs(type(i), FiniteSetOf) self.assertEqual(i, j) @@ -847,7 +878,7 @@ def test_constructor(self): self.assertIs(type(i), OrderedSetOf) self.assertEqual(i, j) - I = Set(initialize={1,3,2}, ordered=False) + I = Set(initialize={1, 3, 2}, ordered=False) I.construct() i = SetOf(I) self.assertIs(type(i), FiniteSetOf) @@ -860,71 +891,81 @@ def test_constructor(self): self.assertEqual(len(i), 3) self.assertEqual(len(list(i.ranges())), 1) - i = RangeSet(1,3) + i = RangeSet(1, 3) self.assertTrue(i.is_constructed()) self.assertEqual(len(i), 3) self.assertEqual(len(list(i.ranges())), 1) - i = RangeSet(ranges=[NR(1,3,1)]) + i = RangeSet(ranges=[NR(1, 3, 1)]) self.assertTrue(i.is_constructed()) self.assertEqual(len(i), 3) - self.assertEqual(list(i.ranges()), [NR(1,3,1)]) + self.assertEqual(list(i.ranges()), [NR(1, 3, 1)]) - i = RangeSet(1,3,0) - with self.assertRaisesRegex( - TypeError, ".*'InfiniteScalarRangeSet' has no len"): + i = RangeSet(1, 3, 0) + with self.assertRaisesRegex(TypeError, ".*'InfiniteScalarRangeSet' has no len"): len(i) self.assertEqual(len(list(i.ranges())), 1) - with self.assertRaisesRegex( - TypeError, ".*'GlobalSet' has no len"): + with self.assertRaisesRegex(TypeError, ".*'GlobalSet' has no len"): len(Integers) self.assertEqual(len(list(Integers.ranges())), 2) with self.assertRaisesRegex( - ValueError, "RangeSet expects 3 or fewer positional " - r"arguments \(received 4\)"): - RangeSet(1,2,3,4) + ValueError, + "RangeSet expects 3 or fewer positional " r"arguments \(received 4\)", + ): + RangeSet(1, 2, 3, 4) with self.assertRaisesRegex( - TypeError, "'ranges' argument must be an iterable of " - "NumericRange objects"): - RangeSet(ranges=(NR(1,5,1), NNR('a'))) + TypeError, "'ranges' argument must be an iterable of NumericRange objects" + ): + RangeSet(ranges=(NR(1, 5, 1), NNR('a'))) with self.assertRaisesRegex( - ValueError, "Constructing a finite RangeSet over a " - "non-finite range "): - RangeSet(finite=True, ranges=(NR(1,5,0),)) + ValueError, "Constructing a finite RangeSet over a non-finite range " + ): + RangeSet(finite=True, ranges=(NR(1, 5, 0),)) with self.assertRaisesRegex( - ValueError, "RangeSet does not support unbounded ranges " - "with a non-integer step"): - RangeSet(0,None,0.5) + ValueError, + "RangeSet does not support unbounded ranges with a non-integer step", + ): + RangeSet(0, None, 0.5) with LoggingIntercept() as LOG: m = ConcreteModel() m.p = Param(initialize=5, mutable=False) m.I = RangeSet(0, m.p) self.assertEqual(LOG.getvalue(), "") - self.assertEqual(RangeSet(0,5,1), m.I) + self.assertEqual(RangeSet(0, 5, 1), m.I) with LoggingIntercept() as LOG: m = ConcreteModel() m.p = Param(initialize=5, mutable=True) m.I = RangeSet(0, m.p) - self.assertIn("Constructing RangeSet 'I' from non-constant data", - LOG.getvalue()) - self.assertEqual(RangeSet(0,5,1), m.I) + self.assertIn( + "Constructing RangeSet 'I' from non-constant data", LOG.getvalue() + ) + self.assertEqual(RangeSet(0, 5, 1), m.I) class _AlmostNumeric(object): def __init__(self, val): self.val = val + def __float__(self): return self.val + def __add__(self, other): - return self.val+other + return self.val + other + def __sub__(self, other): - return self.val-other + return self.val - other + + def __lt__(self, other): + return self.val < other + + def __ge__(self, other): + return self.val >= other i = RangeSet(_AlmostNumeric(1)) self.assertFalse(i.is_constructed()) @@ -940,8 +981,7 @@ def __sub__(self, other): with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertEqual(output.getvalue(), "") i.construct() - ref = 'Constructing RangeSet, ' \ - 'name=FiniteScalarRangeSet, from data=None\n' + ref = 'Constructing RangeSet, name=FiniteScalarRangeSet, from data=None\n' self.assertEqual(output.getvalue(), ref) self.assertTrue(i.is_constructed()) self.assertIs(type(i), FiniteScalarRangeSet) @@ -952,11 +992,10 @@ def __sub__(self, other): output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): - i = SetOf([1,2,3]) + i = SetOf([1, 2, 3]) self.assertEqual(output.getvalue(), "") i.construct() - ref = 'Constructing SetOf, name=OrderedSetOf, ' \ - 'from data=None\n' + ref = 'Constructing SetOf, name=OrderedSetOf, from data=None\n' self.assertEqual(output.getvalue(), ref) # Calling construct() twice bypasses construction the second # time around @@ -969,40 +1008,40 @@ def __sub__(self, other): # Special case: we do not error when the constructing a 0-length # RangeSetwith bounds (i, i-1) - i = RangeSet(0,-1) + i = RangeSet(0, -1) self.assertEqual(len(i), 0) self.assertEqual(len(list(i.ranges())), 0) # Test non-finite RangeSets - i = RangeSet(1,10) + i = RangeSet(1, 10) self.assertIs(type(i), FiniteScalarRangeSet) - i = RangeSet(1,10,0) + i = RangeSet(1, 10, 0) self.assertIs(type(i), InfiniteScalarRangeSet) - i = RangeSet(1,1,0) + i = RangeSet(1, 1, 0) self.assertIs(type(i), FiniteScalarRangeSet) j = RangeSet(1, float('inf')) self.assertIs(type(j), InfiniteScalarRangeSet) - i = RangeSet(1,None) + i = RangeSet(1, None) self.assertIs(type(i), InfiniteScalarRangeSet) - self.assertEqual(i,j) + self.assertEqual(i, j) self.assertIn(1, i) self.assertIn(100, i) self.assertNotIn(0, i) self.assertNotIn(1.5, i) - i = RangeSet(None,1) + i = RangeSet(None, 1) self.assertIs(type(i), InfiniteScalarRangeSet) self.assertIn(1, i) self.assertNotIn(100, i) self.assertIn(0, i) self.assertNotIn(0.5, i) - i = RangeSet(None,None) + i = RangeSet(None, None) self.assertIs(type(i), InfiniteScalarRangeSet) self.assertIn(1, i) self.assertIn(100, i) self.assertIn(0, i) self.assertNotIn(0.5, i) - i = RangeSet(None,None,bounds=(-5,10)) + i = RangeSet(None, None, bounds=(-5, 10)) self.assertIs(type(i), InfiniteScalarRangeSet) self.assertIn(10, i) self.assertNotIn(11, i) @@ -1017,7 +1056,6 @@ def __sub__(self, other): i = RangeSet(1, p, 1) self.assertIs(type(i), InfiniteScalarRangeSet) - # Test abstract RangeSets m = AbstractModel() m.p = Param() @@ -1026,92 +1064,111 @@ def __sub__(self, other): m.i = RangeSet(m.p, m.q, m.s, finite=True) self.assertIs(type(m.i), AbstractFiniteScalarRangeSet) i = m.create_instance( - data={None: {'p': {None: 1}, 'q': {None: 5}, 's': {None: 2}}}) + data={None: {'p': {None: 1}, 'q': {None: 5}, 's': {None: 2}}} + ) self.assertIs(type(i.i), FiniteScalarRangeSet) - self.assertEqual(list(i.i), [1,3,5]) + self.assertEqual(list(i.i), [1, 3, 5]) with self.assertRaisesRegex( - ValueError, - r"finite RangeSet over a non-finite range \(\[1..5\]\)"): + ValueError, r"finite RangeSet over a non-finite range \(\[1..5\]\)" + ): i = m.create_instance( - data={None: {'p': {None: 1}, 'q': {None: 5}, 's': {None: 0}}}) + data={None: {'p': {None: 1}, 'q': {None: 5}, 's': {None: 0}}} + ) with self.assertRaisesRegex( - ValueError, - r"RangeSet.construct\(\) does not support the data= argument."): + ValueError, r"RangeSet.construct\(\) does not support the data= argument." + ): i = m.create_instance( - data={None: {'p': {None: 1}, 'q': {None: 5}, 's': {None: 1}, - 'i': {None: [1,2,3]} }}) + data={ + None: { + 'p': {None: 1}, + 'q': {None: 5}, + 's': {None: 1}, + 'i': {None: [1, 2, 3]}, + } + } + ) def test_filter(self): def rFilter(m, i): return i % 2 + # Simple filter (beginning with the *first* element) r = RangeSet(10, filter=rFilter) - self.assertEqual(r, [1,3,5,7,9]) + self.assertEqual(r, [1, 3, 5, 7, 9]) # Nothing to remove r = RangeSet(1, filter=rFilter) self.assertEqual(r, [1]) # Remove the only element in the range - r = RangeSet(2,2, filter=rFilter) + r = RangeSet(2, 2, filter=rFilter) self.assertEqual(r, []) # remove the *second* element in the range - r = RangeSet(2,3, filter=rFilter) + r = RangeSet(2, 3, filter=rFilter) self.assertEqual(r, [3]) # Test a filter that doesn't raise an exception for "None" def rFilter(m, i): return i is None or i % 2 + r = RangeSet(10, filter=rFilter) - self.assertEqual(r, [1,3,5,7,9]) + self.assertEqual(r, [1, 3, 5, 7, 9]) with self.assertRaisesRegex( - ValueError, "The 'filter' keyword argument is not " - "valid for non-finite RangeSet component"): - r = RangeSet(1,10,0, filter=rFilter) + ValueError, + "The 'filter' keyword argument is not " + "valid for non-finite RangeSet component", + ): + r = RangeSet(1, 10, 0, filter=rFilter) def test_validate(self): def rFilter(m, i): self.assertIs(m, None) return i % 2 + # Simple validation - r = RangeSet(1,10,2, validate=rFilter) - self.assertEqual(r, [1,3,5,7,9]) + r = RangeSet(1, 10, 2, validate=rFilter) + self.assertEqual(r, [1, 3, 5, 7, 9]) # Failed validation with self.assertRaisesRegex( - ValueError, "The value=2 violates the validation rule"): + ValueError, "The value=2 violates the validation rule" + ): r = RangeSet(10, validate=rFilter) # Test a validation that doesn't raise an exception for "None" def rFilter(m, i): return i is None or i % 2 - r = RangeSet(1,10,2, validate=rFilter) - self.assertEqual(r, [1,3,5,7,9]) + + r = RangeSet(1, 10, 2, validate=rFilter) + self.assertEqual(r, [1, 3, 5, 7, 9]) with self.assertRaisesRegex( - ValueError, "The 'validate' keyword argument is not " - "valid for non-finite RangeSet component"): - r = RangeSet(1,10,0, validate=rFilter) + ValueError, + "The 'validate' keyword argument is not " + "valid for non-finite RangeSet component", + ): + r = RangeSet(1, 10, 0, validate=rFilter) def badRule(m, i): raise RuntimeError("ERROR: %s" % i) + output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - with self.assertRaisesRegex( - RuntimeError, "ERROR: 1"): + with self.assertRaisesRegex(RuntimeError, "ERROR: 1"): r = RangeSet(10, validate=badRule) self.assertEqual( output.getvalue(), "Exception raised while validating element " - "'1' for Set FiniteScalarRangeSet\n") + "'1' for Set FiniteScalarRangeSet\n", + ) def test_bounds(self): r = RangeSet(100, bounds=(2.5, 5.5)) - self.assertEqual(r, [3,4,5]) + self.assertEqual(r, [3, 4, 5]) def test_contains(self): r = RangeSet(5) @@ -1131,8 +1188,8 @@ def test_equality(self): m.I = RangeSet(3) m.NotI = RangeSet(4) - m.J = SetOf([1,2,3]) - m.NotJ = SetOf([1,2,3,4]) + m.J = SetOf([1, 2, 3]) + m.NotJ = SetOf([1, 2, 3, 4]) # Sets are equal to themselves self.assertEqual(m.I, m.I) @@ -1143,8 +1200,8 @@ def test_equality(self): self.assertEqual(m.J, m.I) # ordering shouldn't matter - self.assertEqual(SetOf([1,3,4,2]), SetOf({1,2,3,4})) - self.assertEqual(SetOf({1,2,3,4}), SetOf([1,3,4,2])) + self.assertEqual(SetOf([1, 3, 4, 2]), SetOf({1, 2, 3, 4})) + self.assertEqual(SetOf({1, 2, 3, 4}), SetOf([1, 3, 4, 2])) # Inequality... self.assertNotEqual(m.I, m.NotI) @@ -1153,72 +1210,65 @@ def test_equality(self): self.assertNotEqual(m.NotJ, m.I) self.assertNotEqual(m.J, m.NotJ) self.assertNotEqual(m.NotJ, m.J) - self.assertNotEqual(m.I, RangeSet(1,3,0)) - self.assertNotEqual(RangeSet(1,3,0), m.I) + self.assertNotEqual(m.I, RangeSet(1, 3, 0)) + self.assertNotEqual(RangeSet(1, 3, 0), m.I) - self.assertNotEqual(SetOf([1,3,5,2]), SetOf({1,2,3,4})) - self.assertNotEqual(SetOf({1,2,3,4}), SetOf([1,3,5,2])) + self.assertNotEqual(SetOf([1, 3, 5, 2]), SetOf({1, 2, 3, 4})) + self.assertNotEqual(SetOf({1, 2, 3, 4}), SetOf([1, 3, 5, 2])) # Sets can be compared against non-set objects - self.assertEqual( - RangeSet(0,4,1), - [0,1,2,3,4] - ) - self.assertEqual( - RangeSet(0,4), - [0,1,2,3,4] - ) - self.assertEqual( - RangeSet(4), - [1,2,3,4] - ) + self.assertEqual(RangeSet(0, 4, 1), [0, 1, 2, 3, 4]) + self.assertEqual(RangeSet(0, 4), [0, 1, 2, 3, 4]) + self.assertEqual(RangeSet(4), [1, 2, 3, 4]) # It can even work for non-iterable objects (that can't be cast # to set()) class _NonIterable(object): def __init__(self): - self.data = set({1,3,5}) + self.data = set({1, 3, 5}) + def __contains__(self, val): return val in self.data + def __len__(self): return len(self.data) - self.assertEqual(SetOf({1,3,5}), _NonIterable()) + + self.assertEqual(SetOf({1, 3, 5}), _NonIterable()) # Test types that cannot be case to set - self.assertNotEqual(SetOf({3,}), 3) + self.assertNotEqual(SetOf({3}), 3) def test_inequality(self): - self.assertTrue(SetOf([1,2,3]) <= SetOf({1,2,3})) - self.assertFalse(SetOf([1,2,3]) < SetOf({1,2,3})) + self.assertTrue(SetOf([1, 2, 3]) <= SetOf({1, 2, 3})) + self.assertFalse(SetOf([1, 2, 3]) < SetOf({1, 2, 3})) - self.assertTrue(SetOf([1,2,3]) <= SetOf({1,2,3,4})) - self.assertTrue(SetOf([1,2,3]) < SetOf({1,2,3,4})) + self.assertTrue(SetOf([1, 2, 3]) <= SetOf({1, 2, 3, 4})) + self.assertTrue(SetOf([1, 2, 3]) < SetOf({1, 2, 3, 4})) - self.assertFalse(SetOf([1,2,3]) <= SetOf({1,2})) - self.assertFalse(SetOf([1,2,3]) < SetOf({1,2})) + self.assertFalse(SetOf([1, 2, 3]) <= SetOf({1, 2})) + self.assertFalse(SetOf([1, 2, 3]) < SetOf({1, 2})) - self.assertTrue(SetOf([1,2,3]) >= SetOf({1,2,3})) - self.assertFalse(SetOf([1,2,3]) > SetOf({1,2,3})) + self.assertTrue(SetOf([1, 2, 3]) >= SetOf({1, 2, 3})) + self.assertFalse(SetOf([1, 2, 3]) > SetOf({1, 2, 3})) - self.assertFalse(SetOf([1,2,3]) >= SetOf({1,2,3,4})) - self.assertFalse(SetOf([1,2,3]) > SetOf({1,2,3,4})) - - self.assertTrue(SetOf([1,2,3]) >= SetOf({1,2})) - self.assertTrue(SetOf([1,2,3]) > SetOf({1,2})) + self.assertFalse(SetOf([1, 2, 3]) >= SetOf({1, 2, 3, 4})) + self.assertFalse(SetOf([1, 2, 3]) > SetOf({1, 2, 3, 4})) + self.assertTrue(SetOf([1, 2, 3]) >= SetOf({1, 2})) + self.assertTrue(SetOf([1, 2, 3]) > SetOf({1, 2})) def test_is_functions(self): - i = SetOf({1,2,3}) + i = SetOf({1, 2, 3}) self.assertTrue(i.isdiscrete()) self.assertTrue(i.isfinite()) self.assertFalse(i.isordered()) - i = SetOf([1,2,3]) + i = SetOf([1, 2, 3]) self.assertTrue(i.isdiscrete()) self.assertTrue(i.isfinite()) self.assertTrue(i.isordered()) - i = SetOf((1,2,3)) + i = SetOf((1, 2, 3)) self.assertTrue(i.isdiscrete()) self.assertTrue(i.isfinite()) self.assertTrue(i.isordered()) @@ -1229,13 +1279,13 @@ def test_is_functions(self): self.assertTrue(i.isordered()) self.assertIsInstance(i, _FiniteRangeSetData) - i = RangeSet(1,3) + i = RangeSet(1, 3) self.assertTrue(i.isdiscrete()) self.assertTrue(i.isfinite()) self.assertTrue(i.isordered()) self.assertIsInstance(i, _FiniteRangeSetData) - i = RangeSet(1,3,0) + i = RangeSet(1, 3, 0) self.assertFalse(i.isdiscrete()) self.assertFalse(i.isfinite()) self.assertFalse(i.isordered()) @@ -1246,12 +1296,14 @@ def test_pprint(self): m.I = RangeSet(3) m.K1 = RangeSet(0) m.K2 = RangeSet(10, 9) - m.NotI = RangeSet(1,3,0) - m.J = SetOf([1,2,3]) + m.NotI = RangeSet(1, 3, 0) + m.J = SetOf([1, 2, 3]) buf = StringIO() m.pprint(ostream=buf) - self.assertEqual(buf.getvalue().strip(), """ + self.assertEqual( + buf.getvalue().strip(), + """ 4 RangeSet Declarations I : Dimen=1, Size=3, Bounds=(1, 3) Key : Finite : Members @@ -1271,7 +1323,8 @@ def test_pprint(self): Key : Ordered : Members None : True : [1, 2, 3] -5 Declarations: I K1 K2 NotI J""".strip()) +5 Declarations: I K1 K2 NotI J""".strip(), + ) def test_naming(self): m = ConcreteModel() @@ -1281,17 +1334,17 @@ def test_naming(self): m.I = i self.assertEqual(str(i), "I") - j = RangeSet(ranges=(NR(1,3,0), NR(4,7,1))) + j = RangeSet(ranges=(NR(1, 3, 0), NR(4, 7, 1))) self.assertEqual(str(j), "([1..3] | [4:7])") m.J = j self.assertEqual(str(j), "J") - k = SetOf((1,3,5)) + k = SetOf((1, 3, 5)) self.assertEqual(str(k), "(1, 3, 5)") m.K = k self.assertEqual(str(k), "K") - l = SetOf([1,3,5]) + l = SetOf([1, 3, 5]) self.assertEqual(str(l), "[1, 3, 5]") m.L = l self.assertEqual(str(l), "L") @@ -1325,18 +1378,18 @@ def test_naming(self): self.assertEqual(str(pp), "PP") def test_isdisjoint(self): - i = SetOf({1,2,3}) - self.assertTrue(i.isdisjoint({4,5,6})) - self.assertFalse(i.isdisjoint({3,4,5,6})) + i = SetOf({1, 2, 3}) + self.assertTrue(i.isdisjoint({4, 5, 6})) + self.assertFalse(i.isdisjoint({3, 4, 5, 6})) - self.assertTrue(i.isdisjoint(SetOf({4,5,6}))) - self.assertFalse(i.isdisjoint(SetOf({3,4,5,6}))) + self.assertTrue(i.isdisjoint(SetOf({4, 5, 6}))) + self.assertFalse(i.isdisjoint(SetOf({3, 4, 5, 6}))) - self.assertTrue(i.isdisjoint(RangeSet(4,6,0))) - self.assertFalse(i.isdisjoint(RangeSet(3,6,0))) + self.assertTrue(i.isdisjoint(RangeSet(4, 6, 0))) + self.assertFalse(i.isdisjoint(RangeSet(3, 6, 0))) - self.assertTrue(RangeSet(4,6,0).isdisjoint(i)) - self.assertFalse(RangeSet(3,6,0).isdisjoint(i)) + self.assertTrue(RangeSet(4, 6, 0).isdisjoint(i)) + self.assertFalse(RangeSet(3, 6, 0).isdisjoint(i)) # It can even work for non-hashable objects (that can't be cast # to set()) @@ -1350,33 +1403,35 @@ def test_isdisjoint(self): # to set()) class _NonIterable(object): def __init__(self): - self.data = set({1,3,5}) + self.data = set({1, 3, 5}) + def __contains__(self, val): return val in self.data + def __len__(self): return len(self.data) - self.assertTrue(SetOf({2,4}).isdisjoint(_NonIterable())) - self.assertFalse(SetOf({2,3,4}).isdisjoint(_NonIterable())) + + self.assertTrue(SetOf({2, 4}).isdisjoint(_NonIterable())) + self.assertFalse(SetOf({2, 3, 4}).isdisjoint(_NonIterable())) # test bad type - with self.assertRaisesRegex( - TypeError, "'int' object is not iterable"): + with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): i.isdisjoint(1) def test_issubset(self): - i = SetOf({1,2,3}) - self.assertTrue(i.issubset({1,2,3,4})) - self.assertFalse(i.issubset({3,4,5,6})) + i = SetOf({1, 2, 3}) + self.assertTrue(i.issubset({1, 2, 3, 4})) + self.assertFalse(i.issubset({3, 4, 5, 6})) - self.assertTrue(i.issubset(SetOf({1,2,3,4}))) - self.assertFalse(i.issubset(SetOf({3,4,5,6}))) + self.assertTrue(i.issubset(SetOf({1, 2, 3, 4}))) + self.assertFalse(i.issubset(SetOf({3, 4, 5, 6}))) - self.assertTrue(i.issubset(RangeSet(1,4,0))) - self.assertFalse(i.issubset(RangeSet(3,6,0))) + self.assertTrue(i.issubset(RangeSet(1, 4, 0))) + self.assertFalse(i.issubset(RangeSet(3, 6, 0))) - self.assertTrue(RangeSet(1,3,0).issubset(RangeSet(0,100,0))) - self.assertFalse(RangeSet(1,3,0).issubset(i)) - self.assertFalse(RangeSet(3,6,0).issubset(i)) + self.assertTrue(RangeSet(1, 3, 0).issubset(RangeSet(0, 100, 0))) + self.assertFalse(RangeSet(1, 3, 0).issubset(i)) + self.assertFalse(RangeSet(3, 6, 0).issubset(i)) # It can even work for non-hashable objects (that can't be cast # to set()) @@ -1391,33 +1446,35 @@ def test_issubset(self): # to set()) class _NonIterable(object): def __init__(self): - self.data = set({1,3,5}) + self.data = set({1, 3, 5}) + def __contains__(self, val): return val in self.data + def __len__(self): return len(self.data) - self.assertTrue(SetOf({1,5}).issubset(_NonIterable())) - self.assertFalse(SetOf({1,3,4}).issubset(_NonIterable())) + + self.assertTrue(SetOf({1, 5}).issubset(_NonIterable())) + self.assertFalse(SetOf({1, 3, 4}).issubset(_NonIterable())) # test bad type - with self.assertRaisesRegex( - TypeError, "'int' object is not iterable"): + with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): i.issubset(1) def test_issuperset(self): - i = SetOf({1,2,3}) - self.assertTrue(i.issuperset({1,2})) - self.assertFalse(i.issuperset({3,4,5,6})) + i = SetOf({1, 2, 3}) + self.assertTrue(i.issuperset({1, 2})) + self.assertFalse(i.issuperset({3, 4, 5, 6})) - self.assertTrue(i.issuperset(SetOf({1,2}))) - self.assertFalse(i.issuperset(SetOf({3,4,5,6}))) + self.assertTrue(i.issuperset(SetOf({1, 2}))) + self.assertFalse(i.issuperset(SetOf({3, 4, 5, 6}))) - self.assertFalse(i.issuperset(RangeSet(1,3,0))) - self.assertFalse(i.issuperset(RangeSet(3,6,0))) + self.assertFalse(i.issuperset(RangeSet(1, 3, 0))) + self.assertFalse(i.issuperset(RangeSet(3, 6, 0))) - self.assertTrue(RangeSet(1,3,0).issuperset(RangeSet(1,2,0))) - self.assertTrue(RangeSet(1,3,0).issuperset(i)) - self.assertFalse(RangeSet(3,6,0).issuperset(i)) + self.assertTrue(RangeSet(1, 3, 0).issuperset(RangeSet(1, 2, 0))) + self.assertTrue(RangeSet(1, 3, 0).issuperset(i)) + self.assertFalse(RangeSet(3, 6, 0).issuperset(i)) # It can even work for non-hashable objects (that can't be cast # to set()) @@ -1432,53 +1489,54 @@ def test_issuperset(self): # does not implement isfinite() is a discrete set. class _NonIterable(object): def __init__(self): - self.data = set({1,3,5}) + self.data = set({1, 3, 5}) + def __contains__(self, val): return val in self.data + def __len__(self): return len(self.data) + with self.assertRaisesRegex(TypeError, 'not iterable'): - SetOf({1,5}).issuperset(_NonIterable()) + SetOf({1, 5}).issuperset(_NonIterable()) with self.assertRaisesRegex(TypeError, 'not iterable'): - SetOf({1,3,4,5}).issuperset(_NonIterable()) + SetOf({1, 3, 4, 5}).issuperset(_NonIterable()) # test bad type - with self.assertRaisesRegex( - TypeError, "'int' object is not iterable"): + with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): i.issuperset(1) def test_unordered_setof(self): - i = SetOf({1,3,2,0}) + i = SetOf({1, 3, 2, 0}) self.assertTrue(i.isfinite()) self.assertFalse(i.isordered()) - self.assertEqual(i.ordered_data(), (0,1,2,3)) - self.assertEqual(i.sorted_data(), (0,1,2,3)) - self.assertEqual( tuple(reversed(i)), - tuple(reversed(list(i))) ) + self.assertEqual(i.ordered_data(), (0, 1, 2, 3)) + self.assertEqual(i.sorted_data(), (0, 1, 2, 3)) + self.assertEqual(tuple(reversed(i)), tuple(reversed(list(i)))) def test_ordered_setof(self): - i = SetOf([1,3,2,0]) + i = SetOf([1, 3, 2, 0]) self.assertTrue(i.isfinite()) self.assertTrue(i.isordered()) - self.assertEqual(i.ordered_data(), (1,3,2,0)) - self.assertEqual(i.sorted_data(), (0,1,2,3)) - self.assertEqual(tuple(reversed(i)), (0,2,3,1)) + self.assertEqual(i.ordered_data(), (1, 3, 2, 0)) + self.assertEqual(i.sorted_data(), (0, 1, 2, 3)) + self.assertEqual(tuple(reversed(i)), (0, 2, 3, 1)) self.assertEqual(i[2], 3) self.assertEqual(i[-1], 0) with self.assertRaisesRegex( - IndexError, "valid index values for Sets are " - r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]"): + IndexError, + "valid index values for Sets are " + r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]", + ): i[0] - with self.assertRaisesRegex( - IndexError, "OrderedSetOf index out of range"): + with self.assertRaisesRegex(IndexError, "OrderedSetOf index out of range"): i[5] - with self.assertRaisesRegex( - IndexError, "OrderedSetOf index out of range"): + with self.assertRaisesRegex(IndexError, "OrderedSetOf index out of range"): i[-5] self.assertEqual(i.ord(3), 2) @@ -1492,48 +1550,52 @@ def test_ordered_setof(self): self.assertEqual(i.prev(2), 3) self.assertEqual(i.nextw(3), 2) self.assertEqual(i.prevw(2), 3) - self.assertEqual(i.next(3,2), 0) - self.assertEqual(i.prev(2,2), 1) - self.assertEqual(i.nextw(3,2), 0) - self.assertEqual(i.prevw(2,2), 1) + self.assertEqual(i.next(3, 2), 0) + self.assertEqual(i.prev(2, 2), 1) + self.assertEqual(i.nextw(3, 2), 0) + self.assertEqual(i.prevw(2, 2), 1) with self.assertRaisesRegex( - IndexError, "Cannot advance past the end of the Set"): + IndexError, "Cannot advance past the end of the Set" + ): i.next(0) with self.assertRaisesRegex( - IndexError, "Cannot advance before the beginning of the Set"): + IndexError, "Cannot advance before the beginning of the Set" + ): i.prev(1) self.assertEqual(i.nextw(0), 1) self.assertEqual(i.prevw(1), 0) with self.assertRaisesRegex( - IndexError, "Cannot advance past the end of the Set"): - i.next(0,2) + IndexError, "Cannot advance past the end of the Set" + ): + i.next(0, 2) with self.assertRaisesRegex( - IndexError, "Cannot advance before the beginning of the Set"): - i.prev(1,2) - self.assertEqual(i.nextw(0,2), 3) - self.assertEqual(i.prevw(1,2), 2) + IndexError, "Cannot advance before the beginning of the Set" + ): + i.prev(1, 2) + self.assertEqual(i.nextw(0, 2), 3) + self.assertEqual(i.prevw(1, 2), 2) - i = SetOf((1,3,2,0)) + i = SetOf((1, 3, 2, 0)) self.assertTrue(i.isfinite()) self.assertTrue(i.isordered()) - self.assertEqual(i.ordered_data(), (1,3,2,0)) - self.assertEqual(i.sorted_data(), (0,1,2,3)) - self.assertEqual(tuple(reversed(i)), (0,2,3,1)) + self.assertEqual(i.ordered_data(), (1, 3, 2, 0)) + self.assertEqual(i.sorted_data(), (0, 1, 2, 3)) + self.assertEqual(tuple(reversed(i)), (0, 2, 3, 1)) self.assertEqual(i[2], 3) self.assertEqual(i[-1], 0) with self.assertRaisesRegex( - IndexError, "valid index values for Sets are " - r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]"): + IndexError, + "valid index values for Sets are " + r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]", + ): i[0] - with self.assertRaisesRegex( - IndexError, "OrderedSetOf index out of range"): + with self.assertRaisesRegex(IndexError, "OrderedSetOf index out of range"): i[5] - with self.assertRaisesRegex( - IndexError, "OrderedSetOf index out of range"): + with self.assertRaisesRegex(IndexError, "OrderedSetOf index out of range"): i[-5] self.assertEqual(i.ord(3), 2) @@ -1545,21 +1607,20 @@ def test_ordered_setof(self): self.assertTrue(i.isfinite()) self.assertTrue(i.isordered()) - self.assertEqual(i.ordered_data(), (1,None,'a')) - self.assertEqual(i.sorted_data(), (None,1,'a')) - self.assertEqual(tuple(reversed(i)), ('a',None,1)) - + self.assertEqual(i.ordered_data(), (1, None, 'a')) + self.assertEqual(i.sorted_data(), (None, 1, 'a')) + self.assertEqual(tuple(reversed(i)), ('a', None, 1)) def test_ranges(self): - i_data = [1,3,2,0] + i_data = [1, 3, 2, 0] i = SetOf(i_data) r = list(i.ranges()) self.assertEqual(len(r), 4) for idx, x in enumerate(r): self.assertIsInstance(x, NR) self.assertTrue(x.isfinite()) - self.assertEqual(x.start, i[idx+1]) - self.assertEqual(x.end, i[idx+1]) + self.assertEqual(x.start, i[idx + 1]) + self.assertEqual(x.end, i[idx + 1]) self.assertEqual(x.step, 0) # Test that apparent numeric types that are not in native_types @@ -1576,8 +1637,8 @@ def test_ranges(self): for idx, x in enumerate(r): self.assertIsInstance(x, NR) self.assertTrue(x.isfinite()) - self.assertEqual(x.start, i[idx+1]) - self.assertEqual(x.end, i[idx+1]) + self.assertEqual(x.start, i[idx + 1]) + self.assertEqual(x.end, i[idx + 1]) self.assertEqual(x.step, 0) self.assertIn(int, native_types) @@ -1603,122 +1664,120 @@ def test_ranges(self): for idx, x in enumerate(r[:-1]): self.assertIsInstance(x, NR) self.assertTrue(x.isfinite()) - self.assertEqual(x.start, i[idx+1]) - self.assertEqual(x.end, i[idx+1]) + self.assertEqual(x.start, i[idx + 1]) + self.assertEqual(x.end, i[idx + 1]) self.assertEqual(x.step, 0) self.assertIs(type(r[-1]), NNR) finally: native_types.add(str) def test_bounds(self): - self.assertEqual(SetOf([1,3,2,0]).bounds(), (0,3)) - self.assertEqual(SetOf([1,3.0,2,0]).bounds(), (0,3.0)) - self.assertEqual(SetOf([None,1,'a']).bounds(), (None,None)) - self.assertEqual(SetOf(['apple','cat','bear']).bounds(), - ('apple','cat')) + self.assertEqual(SetOf([1, 3, 2, 0]).bounds(), (0, 3)) + self.assertEqual(SetOf([1, 3.0, 2, 0]).bounds(), (0, 3.0)) + self.assertEqual(SetOf([None, 1, 'a']).bounds(), (None, None)) + self.assertEqual(SetOf(['apple', 'cat', 'bear']).bounds(), ('apple', 'cat')) self.assertEqual( - RangeSet(ranges=(NR(0,10,2),NR(3,20,2))).bounds(), - (0,19) + RangeSet(ranges=(NR(0, 10, 2), NR(3, 20, 2))).bounds(), (0, 19) ) self.assertEqual( - RangeSet(ranges=(NR(None,None,0),NR(0,10,2))).bounds(), - (None,None) + RangeSet(ranges=(NR(None, None, 0), NR(0, 10, 2))).bounds(), (None, None) ) self.assertEqual( - RangeSet(ranges=(NR(100,None,-2),NR(0,10,2))).bounds(), - (None,100) + RangeSet(ranges=(NR(100, None, -2), NR(0, 10, 2))).bounds(), (None, 100) ) self.assertEqual( - RangeSet(ranges=(NR(-10,None,2),NR(0,10,2))).bounds(), - (-10,None) + RangeSet(ranges=(NR(-10, None, 2), NR(0, 10, 2))).bounds(), (-10, None) ) self.assertEqual( - RangeSet(ranges=(NR(0,10,2),NR(None,None,0))).bounds(), - (None,None) + RangeSet(ranges=(NR(0, 10, 2), NR(None, None, 0))).bounds(), (None, None) ) self.assertEqual( - RangeSet(ranges=(NR(0,10,2),NR(100,None,-2))).bounds(), - (None,100) + RangeSet(ranges=(NR(0, 10, 2), NR(100, None, -2))).bounds(), (None, 100) ) self.assertEqual( - RangeSet(ranges=(NR(0,10,2),NR(-10,None,2))).bounds(), - (-10,None) + RangeSet(ranges=(NR(0, 10, 2), NR(-10, None, 2))).bounds(), (-10, None) ) def test_dimen(self): self.assertEqual(SetOf([]).dimen, 0) - self.assertEqual(SetOf([1,2,3]).dimen, 1) - self.assertEqual(SetOf([(1,2),(2,3),(4,5)]).dimen, 2) - self.assertEqual(SetOf([1,(2,3)]).dimen, None) + self.assertEqual(SetOf([1, 2, 3]).dimen, 1) + self.assertEqual(SetOf([(1, 2), (2, 3), (4, 5)]).dimen, 2) + self.assertEqual(SetOf([1, (2, 3)]).dimen, None) self.assertEqual(SetOf(Integers).dimen, 1) self.assertEqual(SetOf(Binary).dimen, 1) m = ConcreteModel() - m.I = Set(initialize=[(1,2), (3,4)]) + m.I = Set(initialize=[(1, 2), (3, 4)]) self.assertEqual(SetOf(m.I).dimen, 2) - a = [1,2,3,'abc'] + a = [1, 2, 3, 'abc'] SetOf_a = SetOf(a) self.assertEqual(SetOf_a.dimen, 1) - a.append((1,2)) + a.append((1, 2)) self.assertEqual(SetOf_a.dimen, None) def test_rangeset_iter(self): - i = RangeSet(0,10,2) - self.assertEqual(tuple(i), (0,2,4,6,8,10)) + i = RangeSet(0, 10, 2) + self.assertEqual(tuple(i), (0, 2, 4, 6, 8, 10)) + self.assertEqual(tuple(i.ordered_iter()), (0, 2, 4, 6, 8, 10)) + self.assertEqual(tuple(i.sorted_iter()), (0, 2, 4, 6, 8, 10)) - i = RangeSet(ranges=(NR(0,5,2),NR(6,10,2))) - self.assertEqual(tuple(i), (0,2,4,6,8,10)) + i = RangeSet(ranges=(NR(0, 5, 2), NR(6, 10, 2))) + self.assertEqual(tuple(i), (0, 2, 4, 6, 8, 10)) - i = RangeSet(ranges=(NR(0,10,2),NR(0,10,2))) - self.assertEqual(tuple(i), (0,2,4,6,8,10)) + i = RangeSet(ranges=(NR(0, 10, 2), NR(0, 10, 2))) + self.assertEqual(tuple(i), (0, 2, 4, 6, 8, 10)) - i = RangeSet(ranges=(NR(0,10,2),NR(10,0,-2))) - self.assertEqual(tuple(i), (0,2,4,6,8,10)) + i = RangeSet(ranges=(NR(0, 10, 2), NR(10, 0, -2))) + self.assertEqual(tuple(i), (0, 2, 4, 6, 8, 10)) - i = RangeSet(ranges=(NR(0,10,2),NR(9,0,-2))) - self.assertEqual(tuple(i), (0,1,2,3,4,5,6,7,8,9,10)) + i = RangeSet(ranges=(NR(0, 10, 2), NR(9, 0, -2))) + self.assertEqual(tuple(i), (0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)) - i = RangeSet(ranges=(NR(0,10,2),NR(1,10,2))) + i = RangeSet(ranges=(NR(0, 10, 2), NR(1, 10, 2))) self.assertEqual(tuple(i), tuple(range(11))) - i = RangeSet(ranges=(NR(0,30,10),NR(12,14,1))) - self.assertEqual(tuple(i), (0,10,12,13,14,20,30)) + i = RangeSet(ranges=(NR(0, 30, 10), NR(12, 14, 1))) + self.assertEqual(tuple(i), (0, 10, 12, 13, 14, 20, 30)) - i = RangeSet(ranges=(NR(0,0,0),NR(3,3,0),NR(2,2,0))) - self.assertEqual(tuple(i), (0,2,3)) + i = RangeSet(ranges=(NR(0, 0, 0), NR(3, 3, 0), NR(2, 2, 0))) + self.assertEqual(tuple(i), (0, 2, 3)) def test_ord_index(self): - r = RangeSet(2,10,2) - for i,v in enumerate([2,4,6,8,10]): - self.assertEqual(r.ord(v), i+1) - self.assertEqual(r[i+1], v) + r = RangeSet(2, 10, 2) + for i, v in enumerate([2, 4, 6, 8, 10]): + self.assertEqual(r.ord(v), i + 1) + self.assertEqual(r[i + 1], v) with self.assertRaisesRegex( - IndexError, "valid index values for Sets are " - r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]"): + IndexError, + "valid index values for Sets are " + r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]", + ): r[0] with self.assertRaisesRegex( - IndexError, "FiniteScalarRangeSet index out of range"): + IndexError, "FiniteScalarRangeSet index out of range" + ): r[10] - with self.assertRaisesRegex( - ValueError, "Cannot identify position of 5 in Set"): + with self.assertRaisesRegex(ValueError, "Cannot identify position of 5 in Set"): r.ord(5) - r = RangeSet(ranges=(NR(2,10,2), NR(6,12,3))) - for i,v in enumerate([2,4,6,8,9,10,12]): - self.assertEqual(r.ord(v), i+1) - self.assertEqual(r[i+1], v) + r = RangeSet(ranges=(NR(2, 10, 2), NR(6, 12, 3))) + for i, v in enumerate([2, 4, 6, 8, 9, 10, 12]): + self.assertEqual(r.ord(v), i + 1) + self.assertEqual(r[i + 1], v) with self.assertRaisesRegex( - IndexError, "valid index values for Sets are " - r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]"): + IndexError, + "valid index values for Sets are " + r"\[1 .. len\(Set\)\] or \[-1 .. -len\(Set\)\]", + ): r[0] with self.assertRaisesRegex( - IndexError, "FiniteScalarRangeSet index out of range"): + IndexError, "FiniteScalarRangeSet index out of range" + ): r[10] - with self.assertRaisesRegex( - ValueError, "Cannot identify position of 5 in Set"): + with self.assertRaisesRegex(ValueError, "Cannot identify position of 5 in Set"): r.ord(5) so = SetOf([0, (1,), 1]) @@ -1726,14 +1785,16 @@ def test_ord_index(self): self.assertEqual(so.ord(1), 3) def test_float_steps(self): - a = RangeSet(0, 4, .5) + a = RangeSet(0, 4, 0.5) self.assertEqual(len(a), 9) - self.assertEqual(list(a - RangeSet(0,4,1)), [0.5, 1.5, 2.5, 3.5]) + self.assertEqual(list(a - RangeSet(0, 4, 1)), [0.5, 1.5, 2.5, 3.5]) with self.assertRaisesRegex( - ValueError, "RangeSet: start, end ordering incompatible with " - r"step direction \(got \[0:4:-0.5\]\)"): - RangeSet(0,4,-.5) + ValueError, + "RangeSet: start, end ordering incompatible with " + r"step direction \(got \[0:4:-0.5\]\)", + ): + RangeSet(0, 4, -0.5) def test_check_values(self): m = ConcreteModel() @@ -1743,8 +1804,8 @@ def test_check_values(self): with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertTrue(m.I.check_values()) self.assertRegex( - output.getvalue(), - r"^DEPRECATED: check_values\(\) is deprecated:") + output.getvalue(), r"^DEPRECATED: check_values\(\) is deprecated:" + ) class Test_SetOperator(unittest.TestCase): @@ -1758,12 +1819,14 @@ def test_construct(self): p.construct() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): i.construct() - ref = ('Constructing SetOperator, name=SetProduct_OrderedSet, ' - 'from data=None\n' - 'Constructing RangeSet, name=FiniteScalarRangeSet, ' - 'from data=None\n' - 'Constructing Set, name=SetProduct_OrderedSet, ' - 'from data=None\n') + ref = ( + 'Constructing SetOperator, name=SetProduct_OrderedSet, ' + 'from data=None\n' + 'Constructing RangeSet, name=FiniteScalarRangeSet, ' + 'from data=None\n' + 'Constructing Set, name=SetProduct_OrderedSet, ' + 'from data=None\n' + ) self.assertEqual(output.getvalue(), ref) # Calling construct() twice bypasses construction the second # time around @@ -1774,23 +1837,23 @@ def test_deepcopy(self): # This tests the example in Set.__deepcopy__() # This also tests that returning Set.Skip from a rule works... a = AbstractModel() - a.A = Set(initialize=[1,2]) - a.B = Set(initialize=[3,4]) - def x_init(m,i): + a.A = Set(initialize=[1, 2]) + a.B = Set(initialize=[3, 4]) + + def x_init(m, i): if i == 2: return Set.Skip else: return [] - a.x = Set( [1,2], - domain={1: a.A*a.B, 2: a.A*a.A}, - initialize=x_init ) + + a.x = Set([1, 2], domain={1: a.A * a.B, 2: a.A * a.A}, initialize=x_init) i = a.create_instance() self.assertEqual(len(i.x), 1) self.assertIn(1, i.x) self.assertNotIn(2, i.x) self.assertEqual(i.x[1].dimen, 2) - self.assertEqual(i.x[1].domain, i.A*i.B) + self.assertEqual(i.x[1].domain, i.A * i.B) self.assertEqual(i.x[1], []) @unittest.skipIf(not pandas_available, "pandas is not available") @@ -1800,14 +1863,10 @@ def test_pandas_multiindex_set_init(self): # ValueError: The truth value of a MultiIndex is ambiguous. # Use a.empty, a.bool(), a.item(), a.any() or a.all(). iterables = [['bar', 'baz', 'foo', 'qux'], ['one', 'two']] - pandas_index = pd.MultiIndex.from_product( - iterables, - names=['first', 'second'] - ) + pandas_index = pd.MultiIndex.from_product(iterables, names=['first', 'second']) model = ConcreteModel() - model.a = Set(initialize=pandas_index, - dimen=pandas_index.nlevels) + model.a = Set(initialize=pandas_index, dimen=pandas_index.nlevels) # we will confirm that dimension is inferred correctly model.b = Set(initialize=pandas_index) @@ -1823,21 +1882,22 @@ def test_pandas_multiindex_set_init(self): class TestSetUnion(unittest.TestCase): def test_pickle(self): - a = SetOf([1,3,5]) | SetOf([2,3,4]) + a = SetOf([1, 3, 5]) | SetOf([2, 3, 4]) b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) def test_len(self): - a = SetOf([1,2,3]) + a = SetOf([1, 2, 3]) self.assertEqual(len(a), 3) b = a | Reals with self.assertRaisesRegex( - OverflowError, 'The length of a non-finite Set is Inf'): + OverflowError, 'The length of a non-finite Set is Inf' + ): len(b) def test_bounds(self): - a = SetOf([-2,-1,0,1]) + a = SetOf([-2, -1, 0, 1]) b = a | NonNegativeReals self.assertEqual(b.bounds(), (-2, None)) b = NonNegativeReals | a @@ -1850,9 +1910,9 @@ def test_bounds(self): def test_naming(self): m = ConcreteModel() - m.I = SetOf([1,2]) - a = m.I | [3,4] - b = [-1,1] | a + m.I = SetOf([1, 2]) + a = m.I | [3, 4] + b = [-1, 1] | a self.assertEqual(str(a), "I | {3, 4}") self.assertEqual(str(b), "{-1, 1} | (I | {3, 4})") m.A = a @@ -1861,33 +1921,32 @@ def test_naming(self): def test_domain_and_pprint(self): m = ConcreteModel() - m.I = SetOf([1,2]) - m.A = m.I | [3,4] + m.I = SetOf([1, 2]) + m.A = m.I | [3, 4] self.assertIs(m.A._domain, m.A) # You can always set the domain to "Any" (we will just ignore it) m.A._domain = Any self.assertIs(m.A._domain, m.A) with self.assertRaisesRegex( - ValueError, - "Setting the domain of a Set Operator is not allowed"): + ValueError, "Setting the domain of a Set Operator is not allowed" + ): m.A._domain = None output = StringIO() m.A.pprint(ostream=output) - ref=""" + ref = """ A : Size=1, Index=None, Ordered=True Key : Dimen : Domain : Size : Members None : 1 : I | A_index_0 : 4 : {1, 2, 3, 4} """.strip() self.assertEqual(output.getvalue().strip(), ref) - def test_dimen(self): m = ConcreteModel() - m.I1 = SetOf([1,2,3,4]) - m.I2 = SetOf([(1,2), (3,4)]) - m.IN = SetOf([(1,2), (3,4), 1, 2]) + m.I1 = SetOf([1, 2, 3, 4]) + m.I2 = SetOf([(1, 2), (3, 4)]) + m.IN = SetOf([(1, 2), (3, 4), 1, 2]) m.J = Set() self.assertEqual((m.I1 | m.I1).dimen, 1) self.assertEqual((m.I2 | m.I2).dimen, 2) @@ -1924,9 +1983,9 @@ def _verify_ordered_union(self, a, b): self.assertTrue(x.isfinite()) self.assertTrue(x.isordered()) self.assertEqual(len(x), 5) - self.assertEqual(list(x), [1,3,2,5,4]) - self.assertEqual(x.ordered_data(), (1,3,2,5,4)) - self.assertEqual(x.sorted_data(), (1,2,3,4,5)) + self.assertEqual(list(x), [1, 3, 2, 5, 4]) + self.assertEqual(x.ordered_data(), (1, 3, 2, 5, 4)) + self.assertEqual(x.sorted_data(), (1, 2, 3, 4, 5)) self.assertIn(1, x) self.assertIn(2, x) @@ -1941,8 +2000,8 @@ def _verify_ordered_union(self, a, b): self.assertEqual(x.ord(4), 5) self.assertEqual(x.ord(5), 4) with self.assertRaisesRegex( - IndexError, - "Cannot identify position of 6 in Set SetUnion_OrderedSet"): + IndexError, "Cannot identify position of 6 in Set SetUnion_OrderedSet" + ): x.ord(6) self.assertEqual(x[1], 1) @@ -1951,8 +2010,8 @@ def _verify_ordered_union(self, a, b): self.assertEqual(x[4], 5) self.assertEqual(x[5], 4) with self.assertRaisesRegex( - IndexError, - "SetUnion_OrderedSet index out of range"): + IndexError, "SetUnion_OrderedSet index out of range" + ): x[6] self.assertEqual(x[-1], 4) @@ -1961,15 +2020,14 @@ def _verify_ordered_union(self, a, b): self.assertEqual(x[-4], 3) self.assertEqual(x[-5], 1) with self.assertRaisesRegex( - IndexError, - "SetUnion_OrderedSet index out of range"): + IndexError, "SetUnion_OrderedSet index out of range" + ): x[-6] def test_ordered_setunion(self): - self._verify_ordered_union(SetOf([1,3,2]), SetOf([5,3,4])) - self._verify_ordered_union([1,3,2], SetOf([5,3,4])) - self._verify_ordered_union(SetOf([1,3,2]), [5,3,4]) - + self._verify_ordered_union(SetOf([1, 3, 2]), SetOf([5, 3, 4])) + self._verify_ordered_union([1, 3, 2], SetOf([5, 3, 4])) + self._verify_ordered_union(SetOf([1, 3, 2]), [5, 3, 4]) def _verify_finite_union(self, a, b): # Note the placement of the second "3" in the middle of the set. @@ -1998,12 +2056,12 @@ def _verify_finite_union(self, a, b): self.assertFalse(x.isordered()) self.assertEqual(len(x), 5) if x._sets[0].isordered(): - self.assertEqual(list(x)[:3], [1,3,2]) + self.assertEqual(list(x)[:3], [1, 3, 2]) if x._sets[1].isordered(): - self.assertEqual(list(x)[-2:], [5,4]) - self.assertEqual(sorted(list(x)), [1,2,3,4,5]) - self.assertEqual(x.ordered_data(), (1,2,3,4,5)) - self.assertEqual(x.sorted_data(), (1,2,3,4,5)) + self.assertEqual(list(x)[-2:], [5, 4]) + self.assertEqual(sorted(list(x)), [1, 2, 3, 4, 5]) + self.assertEqual(x.ordered_data(), (1, 2, 3, 4, 5)) + self.assertEqual(x.sorted_data(), (1, 2, 3, 4, 5)) self.assertIn(1, x) self.assertIn(2, x) @@ -2013,17 +2071,15 @@ def _verify_finite_union(self, a, b): self.assertNotIn(6, x) # THe ranges should at least filter out the duplicates - self.assertEqual( - len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 6) + self.assertEqual(len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 6) self.assertEqual(len(list(x.ranges())), 5) def test_finite_setunion(self): - self._verify_finite_union(SetOf({1,3,2}), SetOf({5,3,4})) - self._verify_finite_union([1,3,2], SetOf({5,3,4})) - self._verify_finite_union(SetOf({1,3,2}), [5,3,4]) - self._verify_finite_union({1,3,2}, SetOf([5,3,4])) - self._verify_finite_union(SetOf([1,3,2]), {5,3,4}) - + self._verify_finite_union(SetOf({1, 3, 2}), SetOf({5, 3, 4})) + self._verify_finite_union([1, 3, 2], SetOf({5, 3, 4})) + self._verify_finite_union(SetOf({1, 3, 2}), [5, 3, 4]) + self._verify_finite_union({1, 3, 2}, SetOf([5, 3, 4])) + self._verify_finite_union(SetOf([1, 3, 2]), {5, 3, 4}) def _verify_infinite_union(self, a, b): # Note the placement of the second "3" in the middle of the set. @@ -2052,64 +2108,74 @@ def _verify_infinite_union(self, a, b): self.assertIn(5, x) self.assertNotIn(6, x) - self.assertEqual(list(x.ranges()), - list(x._sets[0].ranges()) + list(x._sets[1].ranges())) + self.assertEqual( + list(x.ranges()), list(x._sets[0].ranges()) + list(x._sets[1].ranges()) + ) def test_infinite_setunion(self): - self._verify_infinite_union(RangeSet(1,3,0), RangeSet(3,5,0)) - self._verify_infinite_union([1,3,2], RangeSet(3,5,0)) - self._verify_infinite_union(RangeSet(1,3,0), [5,3,4]) - self._verify_infinite_union({1,3,2}, RangeSet(3,5,0)) - self._verify_infinite_union(RangeSet(1,3,0), {5,3,4}) + self._verify_infinite_union(RangeSet(1, 3, 0), RangeSet(3, 5, 0)) + self._verify_infinite_union([1, 3, 2], RangeSet(3, 5, 0)) + self._verify_infinite_union(RangeSet(1, 3, 0), [5, 3, 4]) + self._verify_infinite_union({1, 3, 2}, RangeSet(3, 5, 0)) + self._verify_infinite_union(RangeSet(1, 3, 0), {5, 3, 4}) def test_invalid_operators(self): m = ConcreteModel() m.I = RangeSet(5) - m.J = Set([1,2]) + m.J = Set([1, 2]) with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to an " - r"indexed Set component \(J\)"): + TypeError, + "Cannot apply a Set operator to an " r"indexed Set component \(J\)", + ): m.I | m.J with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to an " - r"indexed Set component \(J\)"): + TypeError, + "Cannot apply a Set operator to an " r"indexed Set component \(J\)", + ): m.J | m.I m.x = Suffix() with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to a " - r"non-Set Suffix component \(x\)"): + TypeError, + "Cannot apply a Set operator to a " r"non-Set Suffix component \(x\)", + ): m.I | m.x with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to a " - r"non-Set Suffix component \(x\)"): + TypeError, + "Cannot apply a Set operator to a " r"non-Set Suffix component \(x\)", + ): m.x | m.I - m.y = Var([1,2]) + m.y = Var([1, 2]) with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to an " - r"indexed Var component \(y\)"): + TypeError, + "Cannot apply a Set operator to an " r"indexed Var component \(y\)", + ): m.I | m.y with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to a " - r"non-Set component data \(y\[1\]\)"): + TypeError, + "Cannot apply a Set operator to a " r"non-Set component data \(y\[1\]\)", + ): m.I | m.y[1] with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to an " - r"indexed Var component \(y\)"): + TypeError, + "Cannot apply a Set operator to an " r"indexed Var component \(y\)", + ): m.y | m.I with self.assertRaisesRegex( - TypeError, "Cannot apply a Set operator to a " - r"non-Set component data \(y\[1\]\)"): + TypeError, + "Cannot apply a Set operator to a " r"non-Set component data \(y\[1\]\)", + ): m.y[1] | m.I + class TestSetIntersection(unittest.TestCase): def test_pickle(self): - a = SetOf([1,3,5]) & SetOf([2,3,4]) + a = SetOf([1, 3, 5]) & SetOf([2, 3, 4]) b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) def test_bounds(self): - a = SetOf([-2,-1,0,1]) + a = SetOf([-2, -1, 0, 1]) b = a & NonNegativeReals self.assertEqual(b.bounds(), (0, 1)) b = NonNegativeReals & a @@ -2120,9 +2186,9 @@ def test_bounds(self): def test_naming(self): m = ConcreteModel() - m.I = SetOf([1,2]) - a = m.I & [3,4] - b = [-1,1] & a + m.I = SetOf([1, 2]) + a = m.I & [3, 4] + b = [-1, 1] & a self.assertEqual(str(a), "I & {3, 4}") self.assertEqual(str(b), "{-1, 1} & (I & {3, 4})") m.A = a @@ -2131,21 +2197,21 @@ def test_naming(self): def test_domain_and_pprint(self): m = ConcreteModel() - m.I = SetOf([1,2]) - m.A = m.I & [3,4] + m.I = SetOf([1, 2]) + m.A = m.I & [3, 4] self.assertIs(m.A._domain, m.A) # You can always set the domain to "Any" (we will just ignore it) m.A._domain = Any self.assertIs(m.A._domain, m.A) with self.assertRaisesRegex( - ValueError, - "Setting the domain of a Set Operator is not allowed"): + ValueError, "Setting the domain of a Set Operator is not allowed" + ): m.A._domain = None output = StringIO() m.A.pprint(ostream=output) - ref=""" + ref = """ A : Size=1, Index=None, Ordered=True Key : Dimen : Domain : Size : Members None : 1 : I & A_index_0 : 0 : {} @@ -2154,9 +2220,9 @@ def test_domain_and_pprint(self): def test_dimen(self): m = ConcreteModel() - m.I1 = SetOf([1,2,3,4]) - m.I2 = SetOf([(1,2), (3,4)]) - m.IN = SetOf([(1,2), (3,4), 1, 2]) + m.I1 = SetOf([1, 2, 3, 4]) + m.I2 = SetOf([(1, 2), (3, 4)]) + m.IN = SetOf([(1, 2), (3, 4), 1, 2]) m.J = Set() self.assertEqual((m.I1 & m.I1).dimen, 1) self.assertEqual((m.I2 & m.I2).dimen, 2) @@ -2185,9 +2251,9 @@ def _verify_ordered_intersection(self, a, b): self.assertTrue(a_ordered or b_ordered) if a_ordered: - ref = (3,2,5) + ref = (3, 2, 5) else: - ref = (2,3,5) + ref = (2, 3, 5) x = a & b self.assertIs(type(x), SetIntersection_OrderedSet) @@ -2196,7 +2262,7 @@ def _verify_ordered_intersection(self, a, b): self.assertEqual(len(x), 3) self.assertEqual(list(x), list(ref)) self.assertEqual(x.ordered_data(), tuple(ref)) - self.assertEqual(x.sorted_data(), (2,3,5)) + self.assertEqual(x.sorted_data(), (2, 3, 5)) self.assertNotIn(1, x) self.assertIn(2, x) @@ -2205,39 +2271,39 @@ def _verify_ordered_intersection(self, a, b): self.assertIn(5, x) self.assertNotIn(6, x) - self.assertEqual(x.ord(2), ref.index(2)+1) - self.assertEqual(x.ord(3), ref.index(3)+1) + self.assertEqual(x.ord(2), ref.index(2) + 1) + self.assertEqual(x.ord(3), ref.index(3) + 1) self.assertEqual(x.ord(5), 3) with self.assertRaisesRegex( - IndexError, "Cannot identify position of 6 in Set " - "SetIntersection_OrderedSet"): + IndexError, + "Cannot identify position of 6 in Set SetIntersection_OrderedSet", + ): x.ord(6) self.assertEqual(x[1], ref[0]) self.assertEqual(x[2], ref[1]) self.assertEqual(x[3], 5) with self.assertRaisesRegex( - IndexError, - "SetIntersection_OrderedSet index out of range"): + IndexError, "SetIntersection_OrderedSet index out of range" + ): x[4] self.assertEqual(x[-1], 5) self.assertEqual(x[-2], ref[-2]) self.assertEqual(x[-3], ref[-3]) with self.assertRaisesRegex( - IndexError, - "SetIntersection_OrderedSet index out of range"): + IndexError, "SetIntersection_OrderedSet index out of range" + ): x[-4] def test_ordered_setintersection(self): - self._verify_ordered_intersection(SetOf([1,3,2,5]), SetOf([0,2,3,4,5])) - self._verify_ordered_intersection(SetOf([1,3,2,5]), SetOf({0,2,3,4,5})) - self._verify_ordered_intersection(SetOf({1,3,2,5}), SetOf([0,2,3,4,5])) - self._verify_ordered_intersection(SetOf([1,3,2,5]), [0,2,3,4,5]) - self._verify_ordered_intersection(SetOf([1,3,2,5]), {0,2,3,4,5}) - self._verify_ordered_intersection([1,3,2,5], SetOf([0,2,3,4,5])) - self._verify_ordered_intersection({1,3,2,5}, SetOf([0,2,3,4,5])) - + self._verify_ordered_intersection(SetOf([1, 3, 2, 5]), SetOf([0, 2, 3, 4, 5])) + self._verify_ordered_intersection(SetOf([1, 3, 2, 5]), SetOf({0, 2, 3, 4, 5})) + self._verify_ordered_intersection(SetOf({1, 3, 2, 5}), SetOf([0, 2, 3, 4, 5])) + self._verify_ordered_intersection(SetOf([1, 3, 2, 5]), [0, 2, 3, 4, 5]) + self._verify_ordered_intersection(SetOf([1, 3, 2, 5]), {0, 2, 3, 4, 5}) + self._verify_ordered_intersection([1, 3, 2, 5], SetOf([0, 2, 3, 4, 5])) + self._verify_ordered_intersection({1, 3, 2, 5}, SetOf([0, 2, 3, 4, 5])) def _verify_finite_intersection(self, a, b): # Note the placement of the second "3" in the middle of the set. @@ -2259,10 +2325,10 @@ def _verify_finite_intersection(self, a, b): self.assertFalse(x.isordered()) self.assertEqual(len(x), 3) if x._sets[0].isordered(): - self.assertEqual(list(x)[:3], [3,2,5]) - self.assertEqual(sorted(list(x)), [2,3,5]) - self.assertEqual(x.ordered_data(), (2,3,5)) - self.assertEqual(x.sorted_data(), (2,3,5)) + self.assertEqual(list(x)[:3], [3, 2, 5]) + self.assertEqual(sorted(list(x)), [2, 3, 5]) + self.assertEqual(x.ordered_data(), (2, 3, 5)) + self.assertEqual(x.sorted_data(), (2, 3, 5)) self.assertNotIn(1, x) self.assertIn(2, x) @@ -2272,23 +2338,23 @@ def _verify_finite_intersection(self, a, b): self.assertNotIn(6, x) # The ranges should at least filter out the duplicates - self.assertEqual( - len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 9) + self.assertEqual(len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 9) self.assertEqual(len(list(x.ranges())), 3) - def test_finite_setintersection(self): - self._verify_finite_intersection(SetOf({1,3,2,5}), SetOf({0,2,3,4,5})) - self._verify_finite_intersection({1,3,2,5}, SetOf({0,2,3,4,5})) - self._verify_finite_intersection(SetOf({1,3,2,5}), {0,2,3,4,5}) + self._verify_finite_intersection(SetOf({1, 3, 2, 5}), SetOf({0, 2, 3, 4, 5})) + self._verify_finite_intersection({1, 3, 2, 5}, SetOf({0, 2, 3, 4, 5})) + self._verify_finite_intersection(SetOf({1, 3, 2, 5}), {0, 2, 3, 4, 5}) self._verify_finite_intersection( - RangeSet(ranges=(NR(-5,-1,0), NR(2,3,0), NR(5,5,0), NR(10,20,0))), - SetOf({0,2,3,4,5})) + RangeSet(ranges=(NR(-5, -1, 0), NR(2, 3, 0), NR(5, 5, 0), NR(10, 20, 0))), + SetOf({0, 2, 3, 4, 5}), + ) self._verify_finite_intersection( - SetOf({1,3,2,5}), - RangeSet(ranges=(NR(2,5,0), NR(2,5,0), NR(6,6,0), NR(6,6,0), - NR(6,6,0)))) - + SetOf({1, 3, 2, 5}), + RangeSet( + ranges=(NR(2, 5, 0), NR(2, 5, 0), NR(6, 6, 0), NR(6, 6, 0), NR(6, 6, 0)) + ), + ) def _verify_infinite_intersection(self, a, b): if isinstance(a, (Set, SetOf, RangeSet)): @@ -2299,7 +2365,7 @@ def _verify_infinite_intersection(self, a, b): b_finite = b.isfinite() else: b_finite = True - self.assertEqual([a_finite, b_finite], [False,False]) + self.assertEqual([a_finite, b_finite], [False, False]) x = a & b self.assertIs(type(x), SetIntersection_InfiniteSet) @@ -2313,11 +2379,10 @@ def _verify_infinite_intersection(self, a, b): self.assertNotIn(5, x) self.assertNotIn(6, x) - self.assertEqual(list(x.ranges()), - list(RangeSet(2,4,0).ranges())) + self.assertEqual(list(x.ranges()), list(RangeSet(2, 4, 0).ranges())) def test_infinite_setintersection(self): - self._verify_infinite_intersection(RangeSet(0,4,0), RangeSet(2,6,0)) + self._verify_infinite_intersection(RangeSet(0, 4, 0), RangeSet(2, 6, 0)) def test_odd_intersections(self): # Test the intersection of an infinite discrete range with a @@ -2325,7 +2390,7 @@ def test_odd_intersections(self): m = AbstractModel() m.p = Param(initialize=0) m.a = RangeSet(0, None, 2) - m.b = RangeSet(5,10,m.p, finite=False) + m.b = RangeSet(5, 10, m.p, finite=False) m.x = m.a & m.b self.assertTrue(m.a._constructed) self.assertFalse(m.b._constructed) @@ -2333,7 +2398,7 @@ def test_odd_intersections(self): self.assertIs(type(m.x), SetIntersection_InfiniteSet) i = m.create_instance() self.assertIs(type(i.x), SetIntersection_OrderedSet) - self.assertEqual(list(i.x), [6,8,10]) + self.assertEqual(list(i.x), [6, 8, 10]) self.assertEqual(i.x.ord(6), 1) self.assertEqual(i.x.ord(8), 2) @@ -2342,17 +2407,13 @@ def test_odd_intersections(self): self.assertEqual(i.x[1], 6) self.assertEqual(i.x[2], 8) self.assertEqual(i.x[3], 10) - with self.assertRaisesRegex( - IndexError, - "x index out of range"): + with self.assertRaisesRegex(IndexError, "x index out of range"): i.x[4] self.assertEqual(i.x[-3], 6) self.assertEqual(i.x[-2], 8) self.assertEqual(i.x[-1], 10) - with self.assertRaisesRegex( - IndexError, - "x index out of range"): + with self.assertRaisesRegex(IndexError, "x index out of range"): i.x[-4] def test_subsets(self): @@ -2365,36 +2426,36 @@ def test_subsets(self): self.assertEqual(len(x._sets), 2) self.assertEqual(list(x.subsets()), [x]) self.assertEqual(list(x.subsets(False)), [x]) - self.assertEqual(list(x.subsets(True)), [a,b]) + self.assertEqual(list(x.subsets(True)), [a, b]) x = a & b & c self.assertEqual(len(x._sets), 2) self.assertEqual(list(x.subsets()), [x]) self.assertEqual(list(x.subsets(False)), [x]) - self.assertEqual(list(x.subsets(True)), [a,b,c]) + self.assertEqual(list(x.subsets(True)), [a, b, c]) x = (a & b) & (c & d) self.assertEqual(len(x._sets), 2) self.assertEqual(list(x.subsets()), [x]) self.assertEqual(list(x.subsets(False)), [x]) - self.assertEqual(list(x.subsets(True)), [a,b,c,d]) + self.assertEqual(list(x.subsets(True)), [a, b, c, d]) x = (a & b) * (c & d) self.assertEqual(len(x._sets), 2) self.assertEqual(len(list(x.subsets())), 2) - self.assertEqual(list(x.subsets()), [a&b, c&d]) - self.assertEqual(list(x.subsets(False)), [a&b, c&d]) + self.assertEqual(list(x.subsets()), [a & b, c & d]) + self.assertEqual(list(x.subsets(False)), [a & b, c & d]) self.assertEqual(len(list(x.subsets(True))), 4) - self.assertEqual(list(x.subsets(True)), [a,b,c,d]) + self.assertEqual(list(x.subsets(True)), [a, b, c, d]) class TestSetDifference(unittest.TestCase): def test_pickle(self): - a = SetOf([1,3,5]) - SetOf([2,3,4]) + a = SetOf([1, 3, 5]) - SetOf([2, 3, 4]) b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) def test_bounds(self): - a = SetOf([-2,-1,0,1]) + a = SetOf([-2, -1, 0, 1]) b = a - NonNegativeReals self.assertEqual(b.bounds(), (-2, -1)) b = a - RangeSet(3) @@ -2403,9 +2464,9 @@ def test_bounds(self): def test_naming(self): m = ConcreteModel() - m.I = SetOf([1,2]) - a = m.I - [3,4] - b = [-1,1] - a + m.I = SetOf([1, 2]) + a = m.I - [3, 4] + b = [-1, 1] - a self.assertEqual(str(a), "I - {3, 4}") self.assertEqual(str(b), "{-1, 1} - (I - {3, 4})") m.A = a @@ -2414,21 +2475,21 @@ def test_naming(self): def test_domain_and_pprint(self): m = ConcreteModel() - m.I = SetOf([1,2]) - m.A = m.I - [3,4] + m.I = SetOf([1, 2]) + m.A = m.I - [3, 4] self.assertIs(m.A._domain, m.A) # You can always set the domain to "Any" (we will just ignore it) m.A._domain = Any self.assertIs(m.A._domain, m.A) with self.assertRaisesRegex( - ValueError, - "Setting the domain of a Set Operator is not allowed"): + ValueError, "Setting the domain of a Set Operator is not allowed" + ): m.A._domain = None output = StringIO() m.A.pprint(ostream=output) - ref=""" + ref = """ A : Size=1, Index=None, Ordered=True Key : Dimen : Domain : Size : Members None : 1 : I - A_index_0 : 2 : {1, 2} @@ -2437,9 +2498,9 @@ def test_domain_and_pprint(self): def test_dimen(self): m = ConcreteModel() - m.I1 = SetOf([1,2,3,4]) - m.I2 = SetOf([(1,2), (3,4)]) - m.IN = SetOf([(1,2), (3,4), 1, 2]) + m.I1 = SetOf([1, 2, 3, 4]) + m.I2 = SetOf([(1, 2), (3, 4)]) + m.IN = SetOf([(1, 2), (3, 4), 1, 2]) m.J = Set() self.assertEqual((m.I1 - m.I1).dimen, 1) self.assertEqual((m.I2 - m.I2).dimen, 2) @@ -2473,9 +2534,9 @@ def _verify_ordered_difference(self, a, b): self.assertTrue(x.isfinite()) self.assertTrue(x.isordered()) self.assertEqual(len(x), 3) - self.assertEqual(list(x), [3,2,5]) - self.assertEqual(x.ordered_data(), (3,2,5)) - self.assertEqual(x.sorted_data(), (2,3,5)) + self.assertEqual(list(x), [3, 2, 5]) + self.assertEqual(x.ordered_data(), (3, 2, 5)) + self.assertEqual(x.sorted_data(), (2, 3, 5)) self.assertNotIn(0, x) self.assertNotIn(1, x) @@ -2489,36 +2550,36 @@ def _verify_ordered_difference(self, a, b): self.assertEqual(x.ord(3), 1) self.assertEqual(x.ord(5), 3) with self.assertRaisesRegex( - IndexError, "Cannot identify position of 6 in Set " - "SetDifference_OrderedSet"): + IndexError, "Cannot identify position of 6 in Set SetDifference_OrderedSet" + ): x.ord(6) self.assertEqual(x[1], 3) self.assertEqual(x[2], 2) self.assertEqual(x[3], 5) with self.assertRaisesRegex( - IndexError, - "SetDifference_OrderedSet index out of range"): + IndexError, "SetDifference_OrderedSet index out of range" + ): x[4] self.assertEqual(x[-1], 5) self.assertEqual(x[-2], 2) self.assertEqual(x[-3], 3) with self.assertRaisesRegex( - IndexError, - "SetDifference_OrderedSet index out of range"): + IndexError, "SetDifference_OrderedSet index out of range" + ): x[-4] def test_ordered_setdifference(self): - self._verify_ordered_difference(SetOf([0,3,2,1,5,4]), SetOf([0,1,4])) - self._verify_ordered_difference(SetOf([0,3,2,1,5,4]), SetOf({0,1,4})) - self._verify_ordered_difference(SetOf([0,3,2,1,5,4]), [0,1,4]) - self._verify_ordered_difference(SetOf([0,3,2,1,5,4]), {0,1,4}) - self._verify_ordered_difference(SetOf([0,3,2,1,5,4]), - RangeSet(ranges=(NR(0,1,0),NR(4,4,0)))) - self._verify_ordered_difference([0,3,2,1,5,4], SetOf([0,1,4])) - self._verify_ordered_difference([0,3,2,1,5,4], SetOf({0,1,4})) - + self._verify_ordered_difference(SetOf([0, 3, 2, 1, 5, 4]), SetOf([0, 1, 4])) + self._verify_ordered_difference(SetOf([0, 3, 2, 1, 5, 4]), SetOf({0, 1, 4})) + self._verify_ordered_difference(SetOf([0, 3, 2, 1, 5, 4]), [0, 1, 4]) + self._verify_ordered_difference(SetOf([0, 3, 2, 1, 5, 4]), {0, 1, 4}) + self._verify_ordered_difference( + SetOf([0, 3, 2, 1, 5, 4]), RangeSet(ranges=(NR(0, 1, 0), NR(4, 4, 0))) + ) + self._verify_ordered_difference([0, 3, 2, 1, 5, 4], SetOf([0, 1, 4])) + self._verify_ordered_difference([0, 3, 2, 1, 5, 4], SetOf({0, 1, 4})) def _verify_finite_difference(self, a, b): # Note the placement of the second "3" in the middle of the set. @@ -2539,9 +2600,9 @@ def _verify_finite_difference(self, a, b): self.assertTrue(x.isfinite()) self.assertFalse(x.isordered()) self.assertEqual(len(x), 3) - self.assertEqual(sorted(list(x)), [2,3,5]) - self.assertEqual(x.ordered_data(), (2,3,5)) - self.assertEqual(x.sorted_data(), (2,3,5)) + self.assertEqual(sorted(list(x)), [2, 3, 5]) + self.assertEqual(x.ordered_data(), (2, 3, 5)) + self.assertEqual(x.sorted_data(), (2, 3, 5)) self.assertNotIn(0, x) self.assertNotIn(1, x) @@ -2552,25 +2613,23 @@ def _verify_finite_difference(self, a, b): self.assertNotIn(6, x) # The ranges should at least filter out the duplicates - self.assertEqual( - len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 9) + self.assertEqual(len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 9) self.assertEqual(len(list(x.ranges())), 3) - def test_finite_setdifference(self): - self._verify_finite_difference(SetOf({0,3,2,1,5,4}), SetOf({0,1,4})) - self._verify_finite_difference(SetOf({0,3,2,1,5,4}), SetOf([0,1,4])) - self._verify_finite_difference(SetOf({0,3,2,1,5,4}), [0,1,4]) - self._verify_finite_difference(SetOf({0,3,2,1,5,4}), {0,1,4}) + self._verify_finite_difference(SetOf({0, 3, 2, 1, 5, 4}), SetOf({0, 1, 4})) + self._verify_finite_difference(SetOf({0, 3, 2, 1, 5, 4}), SetOf([0, 1, 4])) + self._verify_finite_difference(SetOf({0, 3, 2, 1, 5, 4}), [0, 1, 4]) + self._verify_finite_difference(SetOf({0, 3, 2, 1, 5, 4}), {0, 1, 4}) self._verify_finite_difference( - SetOf({0,3,2,1,5,4}), - RangeSet(ranges=(NR(0,1,0),NR(4,4,0),NR(6,10,0)))) - self._verify_finite_difference({0,3,2,1,5,4}, SetOf([0,1,4])) - self._verify_finite_difference({0,3,2,1,5,4}, SetOf({0,1,4})) - + SetOf({0, 3, 2, 1, 5, 4}), + RangeSet(ranges=(NR(0, 1, 0), NR(4, 4, 0), NR(6, 10, 0))), + ) + self._verify_finite_difference({0, 3, 2, 1, 5, 4}, SetOf([0, 1, 4])) + self._verify_finite_difference({0, 3, 2, 1, 5, 4}, SetOf({0, 1, 4})) def test_infinite_setdifference(self): - x = RangeSet(0,4,0) - RangeSet(2,6,0) + x = RangeSet(0, 4, 0) - RangeSet(2, 6, 0) self.assertIs(type(x), SetDifference_InfiniteSet) self.assertFalse(x.isfinite()) self.assertFalse(x.isordered()) @@ -2585,18 +2644,19 @@ def test_infinite_setdifference(self): self.assertEqual( list(x.ranges()), - list(RangeSet(ranges=[NR(0,2,0,(True,False))]).ranges())) + list(RangeSet(ranges=[NR(0, 2, 0, (True, False))]).ranges()), + ) class TestSetSymmetricDifference(unittest.TestCase): def test_pickle(self): - a = SetOf([1,3,5]) ^ SetOf([2,3,4]) + a = SetOf([1, 3, 5]) ^ SetOf([2, 3, 4]) b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) def test_bounds(self): - a = SetOf([-2,-1,0,1]) + a = SetOf([-2, -1, 0, 1]) b = a ^ NonNegativeReals self.assertEqual(b.bounds(), (-2, None)) c = a ^ RangeSet(3) @@ -2605,9 +2665,9 @@ def test_bounds(self): def test_naming(self): m = ConcreteModel() - m.I = SetOf([1,2]) - a = m.I ^ [3,4] - b = [-1,1] ^ a + m.I = SetOf([1, 2]) + a = m.I ^ [3, 4] + b = [-1, 1] ^ a self.assertEqual(str(a), "I ^ {3, 4}") self.assertEqual(str(b), "{-1, 1} ^ (I ^ {3, 4})") m.A = a @@ -2616,21 +2676,21 @@ def test_naming(self): def test_domain_and_pprint(self): m = ConcreteModel() - m.I = SetOf([1,2]) - m.A = m.I ^ [3,4] + m.I = SetOf([1, 2]) + m.A = m.I ^ [3, 4] self.assertIs(m.A._domain, m.A) # You can always set the domain to "Any" (we will just ignore it) m.A._domain = Any self.assertIs(m.A._domain, m.A) with self.assertRaisesRegex( - ValueError, - "Setting the domain of a Set Operator is not allowed"): + ValueError, "Setting the domain of a Set Operator is not allowed" + ): m.A._domain = None output = StringIO() m.A.pprint(ostream=output) - ref=""" + ref = """ A : Size=1, Index=None, Ordered=True Key : Dimen : Domain : Size : Members None : 1 : I ^ A_index_0 : 4 : {1, 2, 3, 4} @@ -2639,9 +2699,9 @@ def test_domain_and_pprint(self): def test_dimen(self): m = ConcreteModel() - m.I1 = SetOf([1,2,3,4]) - m.I2 = SetOf([(1,2), (3,4)]) - m.IN = SetOf([(1,2), (3,4), 1, 2]) + m.I1 = SetOf([1, 2, 3, 4]) + m.I2 = SetOf([(1, 2), (3, 4)]) + m.IN = SetOf([(1, 2), (3, 4), 1, 2]) m.J = Set() self.assertEqual((m.I1 ^ m.I1).dimen, 1) self.assertEqual((m.I2 ^ m.I2).dimen, 2) @@ -2675,9 +2735,9 @@ def _verify_ordered_symdifference(self, a, b): self.assertTrue(x.isfinite()) self.assertTrue(x.isordered()) self.assertEqual(len(x), 4) - self.assertEqual(list(x), [3,2,5,0]) - self.assertEqual(x.ordered_data(), (3,2,5,0)) - self.assertEqual(x.sorted_data(), (0,2,3,5)) + self.assertEqual(list(x), [3, 2, 5, 0]) + self.assertEqual(x.ordered_data(), (3, 2, 5, 0)) + self.assertEqual(x.sorted_data(), (0, 2, 3, 5)) self.assertIn(0, x) self.assertNotIn(1, x) @@ -2692,8 +2752,9 @@ def _verify_ordered_symdifference(self, a, b): self.assertEqual(x.ord(3), 1) self.assertEqual(x.ord(5), 3) with self.assertRaisesRegex( - IndexError, "Cannot identify position of 6 in Set " - "SetSymmetricDifference_OrderedSet"): + IndexError, + "Cannot identify position of 6 in Set SetSymmetricDifference_OrderedSet", + ): x.ord(6) self.assertEqual(x[1], 3) @@ -2701,8 +2762,8 @@ def _verify_ordered_symdifference(self, a, b): self.assertEqual(x[3], 5) self.assertEqual(x[4], 0) with self.assertRaisesRegex( - IndexError, - "SetSymmetricDifference_OrderedSet index out of range"): + IndexError, "SetSymmetricDifference_OrderedSet index out of range" + ): x[5] self.assertEqual(x[-1], 0) @@ -2710,14 +2771,14 @@ def _verify_ordered_symdifference(self, a, b): self.assertEqual(x[-3], 2) self.assertEqual(x[-4], 3) with self.assertRaisesRegex( - IndexError, - "SetSymmetricDifference_OrderedSet index out of range"): + IndexError, "SetSymmetricDifference_OrderedSet index out of range" + ): x[-5] def test_ordered_setsymmetricdifference(self): - self._verify_ordered_symdifference(SetOf([3,2,1,5,4]), SetOf([0,1,4])) - self._verify_ordered_symdifference(SetOf([3,2,1,5,4]), [0,1,4]) - self._verify_ordered_symdifference([3,2,1,5,4], SetOf([0,1,4])) + self._verify_ordered_symdifference(SetOf([3, 2, 1, 5, 4]), SetOf([0, 1, 4])) + self._verify_ordered_symdifference(SetOf([3, 2, 1, 5, 4]), [0, 1, 4]) + self._verify_ordered_symdifference([3, 2, 1, 5, 4], SetOf([0, 1, 4])) def _verify_finite_symdifference(self, a, b): # Note the placement of the second "3" in the middle of the set. @@ -2738,9 +2799,9 @@ def _verify_finite_symdifference(self, a, b): self.assertTrue(x.isfinite()) self.assertFalse(x.isordered()) self.assertEqual(len(x), 4) - self.assertEqual(sorted(list(x)), [0,2,3,5]) - self.assertEqual(x.ordered_data(), (0,2,3,5)) - self.assertEqual(x.sorted_data(), (0,2,3,5)) + self.assertEqual(sorted(list(x)), [0, 2, 3, 5]) + self.assertEqual(x.ordered_data(), (0, 2, 3, 5)) + self.assertEqual(x.sorted_data(), (0, 2, 3, 5)) self.assertIn(0, x) self.assertNotIn(1, x) @@ -2751,25 +2812,22 @@ def _verify_finite_symdifference(self, a, b): self.assertNotIn(6, x) # The ranges should at least filter out the duplicates - self.assertEqual( - len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 8) + self.assertEqual(len(list(x._sets[0].ranges()) + list(x._sets[1].ranges())), 8) self.assertEqual(len(list(x.ranges())), 4) - def test_finite_setsymmetricdifference(self): - self._verify_finite_symdifference(SetOf([3,2,1,5,4]), SetOf({0,1,4})) - self._verify_finite_symdifference(SetOf([3,2,1,5,4]), {0,1,4}) - self._verify_finite_symdifference([3,2,1,5,4], SetOf({0,1,4})) - self._verify_finite_symdifference(SetOf({3,2,1,5,4}), SetOf({0,1,4})) - self._verify_finite_symdifference(SetOf({3,2,1,5,4}), SetOf([0,1,4])) - self._verify_finite_symdifference(SetOf({3,2,1,5,4}), [0,1,4]) - self._verify_finite_symdifference(SetOf({3,2,1,5,4}), {0,1,4}) - self._verify_finite_symdifference({3,2,1,5,4}, SetOf([0,1,4])) - self._verify_finite_symdifference({3,2,1,5,4}, SetOf({0,1,4})) - + self._verify_finite_symdifference(SetOf([3, 2, 1, 5, 4]), SetOf({0, 1, 4})) + self._verify_finite_symdifference(SetOf([3, 2, 1, 5, 4]), {0, 1, 4}) + self._verify_finite_symdifference([3, 2, 1, 5, 4], SetOf({0, 1, 4})) + self._verify_finite_symdifference(SetOf({3, 2, 1, 5, 4}), SetOf({0, 1, 4})) + self._verify_finite_symdifference(SetOf({3, 2, 1, 5, 4}), SetOf([0, 1, 4])) + self._verify_finite_symdifference(SetOf({3, 2, 1, 5, 4}), [0, 1, 4]) + self._verify_finite_symdifference(SetOf({3, 2, 1, 5, 4}), {0, 1, 4}) + self._verify_finite_symdifference({3, 2, 1, 5, 4}, SetOf([0, 1, 4])) + self._verify_finite_symdifference({3, 2, 1, 5, 4}, SetOf({0, 1, 4})) def test_infinite_setdifference(self): - x = RangeSet(0,4,0) ^ RangeSet(2,6,0) + x = RangeSet(0, 4, 0) ^ RangeSet(2, 6, 0) self.assertIs(type(x), SetSymmetricDifference_InfiniteSet) self.assertFalse(x.isfinite()) self.assertFalse(x.isordered()) @@ -2785,11 +2843,12 @@ def test_infinite_setdifference(self): self.assertEqual( sorted(str(_) for _ in x.ranges()), - sorted(str(_) for _ in [ - NR(0,2,0,(True,False)), NR(4,6,0,(False, True)) - ])) + sorted( + str(_) for _ in [NR(0, 2, 0, (True, False)), NR(4, 6, 0, (False, True))] + ), + ) - x = SetOf([3,2,1,5,4]) ^ RangeSet(3,6,0) + x = SetOf([3, 2, 1, 5, 4]) ^ RangeSet(3, 6, 0) self.assertIs(type(x), SetSymmetricDifference_InfiniteSet) self.assertFalse(x.isfinite()) self.assertFalse(x.isordered()) @@ -2806,15 +2865,19 @@ def test_infinite_setdifference(self): self.assertEqual( sorted(str(_) for _ in x.ranges()), - sorted(str(_) for _ in [ - NR(1,1,0), - NR(2,2,0), - NR(3,4,0,(False,False)), - NR(4,5,0,(False,False)), - NR(5,6,0,(False, True)) - ])) - - x = RangeSet(3,6,0) ^ SetOf([3,2,1,5,4]) + sorted( + str(_) + for _ in [ + NR(1, 1, 0), + NR(2, 2, 0), + NR(3, 4, 0, (False, False)), + NR(4, 5, 0, (False, False)), + NR(5, 6, 0, (False, True)), + ] + ), + ) + + x = RangeSet(3, 6, 0) ^ SetOf([3, 2, 1, 5, 4]) self.assertIs(type(x), SetSymmetricDifference_InfiniteSet) self.assertFalse(x.isfinite()) self.assertFalse(x.isordered()) @@ -2831,24 +2894,28 @@ def test_infinite_setdifference(self): self.assertEqual( sorted(str(_) for _ in x.ranges()), - sorted(str(_) for _ in [ - NR(1,1,0), - NR(2,2,0), - NR(3,4,0,(False,False)), - NR(4,5,0,(False,False)), - NR(5,6,0,(False, True)) - ])) + sorted( + str(_) + for _ in [ + NR(1, 1, 0), + NR(2, 2, 0), + NR(3, 4, 0, (False, False)), + NR(4, 5, 0, (False, False)), + NR(5, 6, 0, (False, True)), + ] + ), + ) class TestSetProduct(unittest.TestCase): def test_pickle(self): - a = SetOf([1,3,5]) * SetOf([2,3,4]) + a = SetOf([1, 3, 5]) * SetOf([2, 3, 4]) b = pickle.loads(pickle.dumps(a)) - self.assertIsNot(a,b) - self.assertEqual(a,b) + self.assertIsNot(a, b) + self.assertEqual(a, b) def test_bounds(self): - a = SetOf([-2,-1,0,1]) + a = SetOf([-2, -1, 0, 1]) b = a * NonNegativeReals self.assertEqual(b.bounds(), ((-2, 0), (1, None))) c = a * RangeSet(3) @@ -2857,35 +2924,35 @@ def test_bounds(self): def test_naming(self): m = ConcreteModel() - m.I = SetOf([1,2]) - a = m.I * [3,4] - b = [-1,1] * a + m.I = SetOf([1, 2]) + a = m.I * [3, 4] + b = [-1, 1] * a self.assertEqual(str(a), "I*{3, 4}") self.assertEqual(str(b), "{-1, 1}*(I*{3, 4})") m.A = a self.assertEqual(str(a), "A") self.assertEqual(str(b), "{-1, 1}*A") - c = SetProduct(m.I, [1,2], m.I) + c = SetProduct(m.I, [1, 2], m.I) self.assertEqual(str(c), "I*{1, 2}*I") def test_domain_and_pprint(self): m = ConcreteModel() - m.I = SetOf([1,2]) - m.A = m.I * [3,4] + m.I = SetOf([1, 2]) + m.A = m.I * [3, 4] self.assertIs(m.A._domain, m.A) # You can always set the domain to "Any" (we will just ignore it) m.A._domain = Any self.assertIs(m.A._domain, m.A) with self.assertRaisesRegex( - ValueError, - "Setting the domain of a Set Operator is not allowed"): + ValueError, "Setting the domain of a Set Operator is not allowed" + ): m.A._domain = None output = StringIO() m.A.pprint(ostream=output) - ref=""" + ref = """ A : Size=1, Index=None, Ordered=True Key : Dimen : Domain : Size : Members None : 2 : I*A_index_0 : 4 : {(1, 3), (1, 4), (2, 3), (2, 4)} @@ -2893,11 +2960,11 @@ def test_domain_and_pprint(self): self.assertEqual(output.getvalue().strip(), ref) m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) + m.I = Set(initialize=[1, 2, 3]) m.J = Reals * m.I output = StringIO() m.J.pprint(ostream=output) - ref=""" + ref = """ J : Size=1, Index=None, Ordered=False Key : Dimen : Domain : Size : Members None : 2 : Reals*I : Inf : <[-inf..inf], ([1], [2], [3])> @@ -2906,9 +2973,9 @@ def test_domain_and_pprint(self): def test_dimen(self): m = ConcreteModel() - m.I1 = SetOf([1,2,3,4]) - m.I2 = SetOf([(1,2), (3,4)]) - m.IN = SetOf([(1,2), (3,4), 1, 2]) + m.I1 = SetOf([1, 2, 3, 4]) + m.I2 = SetOf([(1, 2), (3, 4)]) + m.IN = SetOf([(1, 2), (3, 4), 1, 2]) m.J = Set() self.assertEqual((m.I1 * m.I1).dimen, 2) self.assertEqual((m.I2 * m.I2).dimen, 4) @@ -2929,25 +2996,29 @@ def test_dimen(self): def test_cutPointGenerator(self): CG = SetProduct_InfiniteSet._cutPointGenerator i = Any - j = SetOf([(1,1),(1,2),(2,1),(2,2)]) + j = SetOf([(1, 1), (1, 2), (2, 1), (2, 2)]) - test = list(tuple(_) for _ in CG((i,i), 3)) - ref = [(0,0,3),(0,1,3),(0,2,3),(0,3,3)] + test = list(tuple(_) for _ in CG((i, i), 3)) + ref = [(0, 0, 3), (0, 1, 3), (0, 2, 3), (0, 3, 3)] self.assertEqual(test, ref) - test = list(tuple(_) for _ in CG((i,i,i), 3)) - ref = [ - (0,0,0,3),(0,0,1,3),(0,0,2,3),(0,0,3,3), - (0,1,1,3),(0,1,2,3),(0,1,3,3), - (0,2,2,3),(0,2,3,3), - (0,3,3,3) + test = list(tuple(_) for _ in CG((i, i, i), 3)) + ref = [ + (0, 0, 0, 3), + (0, 0, 1, 3), + (0, 0, 2, 3), + (0, 0, 3, 3), + (0, 1, 1, 3), + (0, 1, 2, 3), + (0, 1, 3, 3), + (0, 2, 2, 3), + (0, 2, 3, 3), + (0, 3, 3, 3), ] self.assertEqual(test, ref) - test = list(tuple(_) for _ in CG((i,j,i), 5)) - ref = [ - (0,0,2,5),(0,1,3,5),(0,2,4,5),(0,3,5,5), - ] + test = list(tuple(_) for _ in CG((i, j, i), 5)) + ref = [(0, 0, 2, 5), (0, 1, 3, 5), (0, 2, 4, 5), (0, 3, 5, 5)] self.assertEqual(test, ref) def test_subsets(self): @@ -2958,27 +3029,27 @@ def test_subsets(self): x = a * b self.assertEqual(len(x._sets), 2) - self.assertEqual(list(x.subsets()), [a,b]) - self.assertEqual(list(x.subsets(True)), [a,b]) - self.assertEqual(list(x.subsets(False)), [a,b]) + self.assertEqual(list(x.subsets()), [a, b]) + self.assertEqual(list(x.subsets(True)), [a, b]) + self.assertEqual(list(x.subsets(False)), [a, b]) x = a * b * c self.assertEqual(len(x._sets), 2) - self.assertEqual(list(x.subsets()), [a,b,c]) - self.assertEqual(list(x.subsets(True)), [a,b,c]) - self.assertEqual(list(x.subsets(False)), [a,b,c]) + self.assertEqual(list(x.subsets()), [a, b, c]) + self.assertEqual(list(x.subsets(True)), [a, b, c]) + self.assertEqual(list(x.subsets(False)), [a, b, c]) x = (a * b) * (c * d) self.assertEqual(len(x._sets), 2) - self.assertEqual(list(x.subsets()), [a,b,c,d]) - self.assertEqual(list(x.subsets(True)), [a,b,c,d]) - self.assertEqual(list(x.subsets(False)), [a,b,c,d]) + self.assertEqual(list(x.subsets()), [a, b, c, d]) + self.assertEqual(list(x.subsets(True)), [a, b, c, d]) + self.assertEqual(list(x.subsets(False)), [a, b, c, d]) x = (a - b) * (c * d) self.assertEqual(len(x._sets), 2) self.assertEqual(len(list(x.subsets())), 3) self.assertEqual(len(list(x.subsets(False))), 3) - self.assertEqual(list(x.subsets()), [(a-b),c,d]) + self.assertEqual(list(x.subsets()), [(a - b), c, d]) self.assertEqual(len(list(x.subsets(True))), 4) - self.assertEqual(list(x.subsets(True)), [a,b,c,d]) + self.assertEqual(list(x.subsets(True)), [a, b, c, d]) def test_set_tuple(self): a = SetOf([1]) @@ -2986,61 +3057,61 @@ def test_set_tuple(self): x = a * b os = StringIO() with LoggingIntercept(os, 'pyomo'): - self.assertEqual(x.set_tuple, [a,b]) + self.assertEqual(x.set_tuple, [a, b]) self.assertRegex( - os.getvalue(), - '^DEPRECATED: SetProduct.set_tuple is deprecated.') + os.getvalue(), '^DEPRECATED: SetProduct.set_tuple is deprecated.' + ) def test_no_normalize_index(self): try: _oldFlatten = normalize_index.flatten - I = SetOf([1, (1,2)]) - J = SetOf([3, (2,3)]) + I = SetOf([1, (1, 2)]) + J = SetOf([3, (2, 3)]) x = I * J normalize_index.flatten = False self.assertIs(x.dimen, None) - self.assertIn(((1,2),3), x) - self.assertIn((1,(2,3)), x) + self.assertIn(((1, 2), 3), x) + self.assertIn((1, (2, 3)), x) # if we are not flattening, then lookup must match the # subsets exactly. - self.assertNotIn((1,2,3), x) + self.assertNotIn((1, 2, 3), x) normalize_index.flatten = True self.assertIs(x.dimen, None) - self.assertIn(((1,2),3), x) - self.assertIn((1,(2,3)), x) - self.assertIn((1,2,3), x) + self.assertIn(((1, 2), 3), x) + self.assertIn((1, (2, 3)), x) + self.assertIn((1, 2, 3), x) finally: normalize_index.flatten = _oldFlatten def test_infinite_setproduct(self): - x = PositiveIntegers * SetOf([2,3,5,7]) + x = PositiveIntegers * SetOf([2, 3, 5, 7]) self.assertFalse(x.isfinite()) self.assertFalse(x.isordered()) - self.assertIn((1,2), x) - self.assertNotIn((0,2), x) - self.assertNotIn((1,1), x) - self.assertNotIn(('a',2), x) - self.assertNotIn((2,'a'), x) + self.assertIn((1, 2), x) + self.assertNotIn((0, 2), x) + self.assertNotIn((1, 1), x) + self.assertNotIn(('a', 2), x) + self.assertNotIn((2, 'a'), x) - x = SetOf([2,3,5,7]) * PositiveIntegers + x = SetOf([2, 3, 5, 7]) * PositiveIntegers self.assertFalse(x.isfinite()) self.assertFalse(x.isordered()) - self.assertIn((3,2), x) - self.assertNotIn((1,2), x) - self.assertNotIn((2,0), x) - self.assertNotIn(('a',2), x) - self.assertNotIn((2,'a'), x) + self.assertIn((3, 2), x) + self.assertNotIn((1, 2), x) + self.assertNotIn((2, 0), x) + self.assertNotIn(('a', 2), x) + self.assertNotIn((2, 'a'), x) x = PositiveIntegers * PositiveIntegers self.assertFalse(x.isfinite()) self.assertFalse(x.isordered()) - self.assertIn((3,2), x) - self.assertNotIn((0,2), x) - self.assertNotIn((2,0), x) - self.assertNotIn(('a',2), x) - self.assertNotIn((2,'a'), x) + self.assertIn((3, 2), x) + self.assertNotIn((0, 2), x) + self.assertNotIn((2, 0), x) + self.assertNotIn(('a', 2), x) + self.assertNotIn((2, 'a'), x) def _verify_finite_product(self, a, b): if isinstance(a, (Set, SetOf, RangeSet)): @@ -3060,26 +3131,29 @@ def _verify_finite_product(self, a, b): self.assertFalse(x.isordered()) self.assertEqual(len(x), 6) self.assertEqual( - sorted(list(x)), [(1,5),(1,6),(2,5),(2,6),(3,5),(3,6)]) + sorted(list(x)), [(1, 5), (1, 6), (2, 5), (2, 6), (3, 5), (3, 6)] + ) self.assertEqual( - x.ordered_data(), ((1,5),(1,6),(2,5),(2,6),(3,5),(3,6))) + x.ordered_data(), ((1, 5), (1, 6), (2, 5), (2, 6), (3, 5), (3, 6)) + ) self.assertEqual( - x.sorted_data(), ((1,5),(1,6),(2,5),(2,6),(3,5),(3,6))) + x.sorted_data(), ((1, 5), (1, 6), (2, 5), (2, 6), (3, 5), (3, 6)) + ) self.assertNotIn(1, x) - self.assertIn((1,5), x) - self.assertIn(((1,),5), x) - self.assertNotIn((1,2,3), x) - self.assertNotIn((2,4), x) + self.assertIn((1, 5), x) + self.assertIn(((1,), 5), x) + self.assertNotIn((1, 2, 3), x) + self.assertNotIn((2, 4), x) def test_finite_setproduct(self): - self._verify_finite_product(SetOf({3,1,2}), SetOf({6,5})) - self._verify_finite_product(SetOf({3,1,2}), SetOf([6,5])) - self._verify_finite_product(SetOf([3,1,2]), SetOf({6,5})) - self._verify_finite_product(SetOf([3,1,2]), {6,5}) - self._verify_finite_product({3,1,2}, SetOf([6,5])) - self._verify_finite_product(SetOf({3,1,2}), [6,5]) - self._verify_finite_product([3,1,2], SetOf({6,5})) + self._verify_finite_product(SetOf({3, 1, 2}), SetOf({6, 5})) + self._verify_finite_product(SetOf({3, 1, 2}), SetOf([6, 5])) + self._verify_finite_product(SetOf([3, 1, 2]), SetOf({6, 5})) + self._verify_finite_product(SetOf([3, 1, 2]), {6, 5}) + self._verify_finite_product({3, 1, 2}, SetOf([6, 5])) + self._verify_finite_product(SetOf({3, 1, 2}), [6, 5]) + self._verify_finite_product([3, 1, 2], SetOf({6, 5})) def _verify_ordered_product(self, a, b): if isinstance(a, (Set, SetOf, RangeSet)): @@ -3099,111 +3173,131 @@ def _verify_ordered_product(self, a, b): self.assertTrue(x.isfinite()) self.assertTrue(x.isordered()) self.assertEqual(len(x), 6) - self.assertEqual(list(x), [(3,6),(3,5),(1,6),(1,5),(2,6),(2,5)]) + self.assertEqual(list(x), [(3, 6), (3, 5), (1, 6), (1, 5), (2, 6), (2, 5)]) self.assertEqual( - x.ordered_data(), ((3,6),(3,5),(1,6),(1,5),(2,6),(2,5))) + x.ordered_data(), ((3, 6), (3, 5), (1, 6), (1, 5), (2, 6), (2, 5)) + ) self.assertEqual( - x.sorted_data(), ((1,5),(1,6),(2,5),(2,6),(3,5),(3,6))) + x.sorted_data(), ((1, 5), (1, 6), (2, 5), (2, 6), (3, 5), (3, 6)) + ) self.assertNotIn(1, x) - self.assertIn((1,5), x) - self.assertIn(((1,),5), x) - self.assertNotIn((1,2,3), x) - self.assertNotIn((2,4), x) - - self.assertEqual(x.ord((3,6)), 1) - self.assertEqual(x.ord((3,5)), 2) - self.assertEqual(x.ord((1,6)), 3) - self.assertEqual(x.ord((1,5)), 4) - self.assertEqual(x.ord((2,6)), 5) - self.assertEqual(x.ord((2,5)), 6) + self.assertIn((1, 5), x) + self.assertIn(((1,), 5), x) + self.assertNotIn((1, 2, 3), x) + self.assertNotIn((2, 4), x) + + self.assertEqual(x.ord((3, 6)), 1) + self.assertEqual(x.ord((3, 5)), 2) + self.assertEqual(x.ord((1, 6)), 3) + self.assertEqual(x.ord((1, 5)), 4) + self.assertEqual(x.ord((2, 6)), 5) + self.assertEqual(x.ord((2, 5)), 6) with self.assertRaisesRegex( - IndexError, r"Cannot identify position of \(3, 4\) in Set " - "SetProduct_OrderedSet"): - x.ord((3,4)) - - self.assertEqual(x[1], (3,6)) - self.assertEqual(x[2], (3,5)) - self.assertEqual(x[3], (1,6)) - self.assertEqual(x[4], (1,5)) - self.assertEqual(x[5], (2,6)) - self.assertEqual(x[6], (2,5)) + IndexError, + r"Cannot identify position of \(3, 4\) in Set SetProduct_OrderedSet", + ): + x.ord((3, 4)) + + self.assertEqual(x[1], (3, 6)) + self.assertEqual(x[2], (3, 5)) + self.assertEqual(x[3], (1, 6)) + self.assertEqual(x[4], (1, 5)) + self.assertEqual(x[5], (2, 6)) + self.assertEqual(x[6], (2, 5)) with self.assertRaisesRegex( - IndexError, - "SetProduct_OrderedSet index out of range"): + IndexError, "SetProduct_OrderedSet index out of range" + ): x[7] - self.assertEqual(x[-6], (3,6)) - self.assertEqual(x[-5], (3,5)) - self.assertEqual(x[-4], (1,6)) - self.assertEqual(x[-3], (1,5)) - self.assertEqual(x[-2], (2,6)) - self.assertEqual(x[-1], (2,5)) + self.assertEqual(x[-6], (3, 6)) + self.assertEqual(x[-5], (3, 5)) + self.assertEqual(x[-4], (1, 6)) + self.assertEqual(x[-3], (1, 5)) + self.assertEqual(x[-2], (2, 6)) + self.assertEqual(x[-1], (2, 5)) with self.assertRaisesRegex( - IndexError, - "SetProduct_OrderedSet index out of range"): + IndexError, "SetProduct_OrderedSet index out of range" + ): x[-7] def test_ordered_setproduct(self): - self._verify_ordered_product(SetOf([3,1,2]), SetOf([6,5])) - self._verify_ordered_product(SetOf([3,1,2]), [6,5]) - self._verify_ordered_product([3,1,2], SetOf([6,5])) + self._verify_ordered_product(SetOf([3, 1, 2]), SetOf([6, 5])) + self._verify_ordered_product(SetOf([3, 1, 2]), [6, 5]) + self._verify_ordered_product([3, 1, 2], SetOf([6, 5])) def test_ordered_multidim_setproduct(self): - x = SetOf([(1,2),(3,4)]) * SetOf([(5,6),(7,8)]) + x = SetOf([(1, 2), (3, 4)]) * SetOf([(5, 6), (7, 8)]) self.assertEqual(x.dimen, 4) try: origFlattenCross = SetModule.FLATTEN_CROSS_PRODUCT SetModule.FLATTEN_CROSS_PRODUCT = True - ref = [(1,2,5,6), (1,2,7,8), (3,4,5,6), (3,4,7,8)] + ref = [(1, 2, 5, 6), (1, 2, 7, 8), (3, 4, 5, 6), (3, 4, 7, 8)] self.assertEqual(list(x), ref) self.assertEqual(x.dimen, 4) SetModule.FLATTEN_CROSS_PRODUCT = False - ref = [((1,2),(5,6)), ((1,2),(7,8)), ((3,4),(5,6)), ((3,4),(7,8))] + ref = [ + ((1, 2), (5, 6)), + ((1, 2), (7, 8)), + ((3, 4), (5, 6)), + ((3, 4), (7, 8)), + ] self.assertEqual(list(x), ref) self.assertEqual(x.dimen, None) finally: SetModule.FLATTEN_CROSS_PRODUCT = origFlattenCross - self.assertIn(((1,2),(5,6)), x) - self.assertIn((1,(2,5),6), x) - self.assertIn((1,2,5,6), x) - self.assertNotIn((5,6,1,2), x) + self.assertIn(((1, 2), (5, 6)), x) + self.assertIn((1, (2, 5), 6), x) + self.assertIn((1, 2, 5, 6), x) + self.assertNotIn((5, 6, 1, 2), x) def test_ordered_nondim_setproduct(self): - NonDim = Set(initialize=[2, (2,3)], dimen=None) + NonDim = Set(initialize=[2, (2, 3)], dimen=None) NonDim.construct() - NonDim2 = Set(initialize=[4, (3,4)], dimen=None) + NonDim2 = Set(initialize=[4, (3, 4)], dimen=None) NonDim2.construct() - x = SetOf([1]).cross(NonDim, SetOf([3,4,5])) + x = SetOf([1]).cross(NonDim, SetOf([3, 4, 5])) self.assertEqual(len(x), 6) try: origFlattenCross = SetModule.FLATTEN_CROSS_PRODUCT SetModule.FLATTEN_CROSS_PRODUCT = True - ref = [(1,2,3), (1,2,4), (1,2,5), - (1,2,3,3), (1,2,3,4), (1,2,3,5)] + ref = [ + (1, 2, 3), + (1, 2, 4), + (1, 2, 5), + (1, 2, 3, 3), + (1, 2, 3, 4), + (1, 2, 3, 5), + ] self.assertEqual(list(x), ref) self.assertEqual(x.dimen, None) SetModule.FLATTEN_CROSS_PRODUCT = False - ref = [(1,2,3), (1,2,4), (1,2,5), - (1,(2,3),3), (1,(2,3),4), (1,(2,3),5)] + ref = [ + (1, 2, 3), + (1, 2, 4), + (1, 2, 5), + (1, (2, 3), 3), + (1, (2, 3), 4), + (1, (2, 3), 5), + ] self.assertEqual(list(x), ref) self.assertEqual(x.dimen, None) finally: SetModule.FLATTEN_CROSS_PRODUCT = origFlattenCross - self.assertIn((1,2,3), x) - self.assertNotIn((1,2,6), x) - self.assertIn((1,(2,3),3), x) - self.assertIn((1,2,3,3), x) - self.assertNotIn((1,(2,4),3), x) + self.assertIn((1, 2, 3), x) + self.assertNotIn((1, 2, 6), x) + self.assertIn((1, (2, 3), 3), x) + self.assertIn((1, 2, 3, 3), x) + self.assertNotIn((1, (2, 4), 3), x) self.assertEqual(x.ord((1, 2, 3)), 1) self.assertEqual(x.ord((1, (2, 3), 3)), 4) @@ -3211,38 +3305,52 @@ def test_ordered_nondim_setproduct(self): self.assertEqual(x.ord((1, 2, 3, 3)), 4) self.assertEqual(x.ord((1, 2, 3, 5)), 6) - x = SetOf([1]).cross(NonDim, NonDim2, SetOf([0,1])) + x = SetOf([1]).cross(NonDim, NonDim2, SetOf([0, 1])) self.assertEqual(len(x), 8) try: origFlattenCross = SetModule.FLATTEN_CROSS_PRODUCT SetModule.FLATTEN_CROSS_PRODUCT = True - ref = [(1,2,4,0), (1,2,4,1), (1,2,3,4,0), (1,2,3,4,1), - (1,2,3,4,0), (1,2,3,4,1), (1,2,3,3,4,0), (1,2,3,3,4,1)] + ref = [ + (1, 2, 4, 0), + (1, 2, 4, 1), + (1, 2, 3, 4, 0), + (1, 2, 3, 4, 1), + (1, 2, 3, 4, 0), + (1, 2, 3, 4, 1), + (1, 2, 3, 3, 4, 0), + (1, 2, 3, 3, 4, 1), + ] self.assertEqual(list(x), ref) - for i,v in enumerate(ref): - self.assertEqual(x[i+1], v) + for i, v in enumerate(ref): + self.assertEqual(x[i + 1], v) self.assertEqual(x.dimen, None) SetModule.FLATTEN_CROSS_PRODUCT = False - ref = [(1,2,4,0), (1,2,4,1), - (1,2,(3,4),0), (1,2,(3,4),1), - (1,(2,3),4,0), (1,(2,3),4,1), - (1,(2,3),(3,4),0), (1,(2,3),(3,4),1)] + ref = [ + (1, 2, 4, 0), + (1, 2, 4, 1), + (1, 2, (3, 4), 0), + (1, 2, (3, 4), 1), + (1, (2, 3), 4, 0), + (1, (2, 3), 4, 1), + (1, (2, 3), (3, 4), 0), + (1, (2, 3), (3, 4), 1), + ] self.assertEqual(list(x), ref) - for i,v in enumerate(ref): - self.assertEqual(x[i+1], v) + for i, v in enumerate(ref): + self.assertEqual(x[i + 1], v) self.assertEqual(x.dimen, None) finally: SetModule.FLATTEN_CROSS_PRODUCT = origFlattenCross - self.assertIn((1,2,4,0), x) - self.assertNotIn((1,2,6), x) - self.assertIn((1,(2,3),4,0), x) - self.assertIn((1,2,(3,4),0), x) - self.assertIn((1,2,3,4,0), x) - self.assertNotIn((1,2,5,4,0), x) + self.assertIn((1, 2, 4, 0), x) + self.assertNotIn((1, 2, 6), x) + self.assertIn((1, (2, 3), 4, 0), x) + self.assertIn((1, 2, (3, 4), 0), x) + self.assertIn((1, 2, 3, 4, 0), x) + self.assertNotIn((1, 2, 5, 4, 0), x) self.assertEqual(x.ord((1, 2, 4, 0)), 1) self.assertEqual(x.ord((1, (2, 3), 4, 0)), 5) @@ -3251,70 +3359,78 @@ def test_ordered_nondim_setproduct(self): def test_setproduct_construct_data(self): m = AbstractModel() - m.I = Set(initialize=[1,2]) + m.I = Set(initialize=[1, 2]) m.J = m.I * m.I output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.create_instance( - data={None:{'J': {None: [(1,1),(1,2),(2,1),(2,2)]}}}) + data={None: {'J': {None: [(1, 1), (1, 2), (2, 1), (2, 2)]}}} + ) self.assertRegex( - output.getvalue().replace('\n',' '), + output.getvalue().replace('\n', ' '), "^DEPRECATED: Providing construction data to SetOperator objects " - "is deprecated") + "is deprecated", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): with self.assertRaisesRegex( - ValueError, "Constructing SetOperator J with " - r"incompatible data \(data=\{None: \[\(1, 1\), \(1, 2\), " - r"\(2, 1\)\]\}"): - m.create_instance( - data={None:{'J': {None: [(1,1),(1,2),(2,1)]}}}) + ValueError, + "Constructing SetOperator J with " + r"incompatible data \(data=\{None: \[\(1, 1\), \(1, 2\), " + r"\(2, 1\)\]\}", + ): + m.create_instance(data={None: {'J': {None: [(1, 1), (1, 2), (2, 1)]}}}) self.assertRegex( - output.getvalue().replace('\n',' '), + output.getvalue().replace('\n', ' '), "^DEPRECATED: Providing construction data to SetOperator objects " - "is deprecated") + "is deprecated", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): with self.assertRaisesRegex( - ValueError, "Constructing SetOperator J with " - r"incompatible data \(data=\{None: \[\(1, 3\), \(1, 2\), " - r"\(2, 1\), \(2, 2\)\]\}"): + ValueError, + "Constructing SetOperator J with " + r"incompatible data \(data=\{None: \[\(1, 3\), \(1, 2\), " + r"\(2, 1\), \(2, 2\)\]\}", + ): m.create_instance( - data={None:{'J': {None: [(1,3),(1,2),(2,1),(2,2)]}}}) + data={None: {'J': {None: [(1, 3), (1, 2), (2, 1), (2, 2)]}}} + ) self.assertRegex( - output.getvalue().replace('\n',' '), + output.getvalue().replace('\n', ' '), "^DEPRECATED: Providing construction data to SetOperator objects " - "is deprecated") + "is deprecated", + ) def test_setproduct_nondim_set(self): m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) + m.I = Set(initialize=[1, 2, 3]) m.J = Set() - m.K = Set(initialize=[4,5,6]) + m.K = Set(initialize=[4, 5, 6]) m.Z = m.I * m.J * m.K self.assertEqual(len(m.Z), 0) - self.assertNotIn((2,5), m.Z) + self.assertNotIn((2, 5), m.Z) m.J.add(0) self.assertEqual(len(m.Z), 9) - self.assertIn((2,0,5), m.Z) + self.assertIn((2, 0, 5), m.Z) def test_setproduct_toolong_val(self): m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) - m.J = Set(initialize=[4,5,6]) + m.I = Set(initialize=[1, 2, 3]) + m.J = Set(initialize=[4, 5, 6]) m.Z = m.I * m.J - self.assertIn((2,5), m.Z) - self.assertNotIn((2,5,3), m.Z) + self.assertIn((2, 5), m.Z) + self.assertNotIn((2, 5, 3), m.Z) m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) - m.J = Set(initialize=[4,5,6], dimen=None) + m.I = Set(initialize=[1, 2, 3]) + m.J = Set(initialize=[4, 5, 6], dimen=None) m.Z = m.I * m.J - self.assertIn((2,5), m.Z) - self.assertNotIn((2,5,3), m.Z) + self.assertIn((2, 5), m.Z) + self.assertNotIn((2, 5, 3), m.Z) class TestGlobalSets(unittest.TestCase): @@ -3337,62 +3453,62 @@ def test_name(self): def test_block_independent(self): m = ConcreteModel() with self.assertRaisesRegex( - RuntimeError, - "Cannot assign a GlobalSet 'Reals' to model 'unknown'"): + RuntimeError, "Cannot assign a GlobalSet 'Reals' to model 'unknown'" + ): m.a_set = Reals self.assertEqual(str(Reals), 'Reals') self.assertIsNone(Reals._parent) m.blk = Block() with self.assertRaisesRegex( - RuntimeError, - "Cannot assign a GlobalSet 'Reals' to block 'blk'"): + RuntimeError, "Cannot assign a GlobalSet 'Reals' to block 'blk'" + ): m.blk.a_set = Reals self.assertEqual(str(Reals), 'Reals') self.assertIsNone(Reals._parent) def test_iteration(self): with self.assertRaisesRegex( - TypeError, "'GlobalSet' object is not iterable " - r"\(non-finite Set 'Reals' is not iterable\)"): + TypeError, + "'GlobalSet' object is not iterable " + r"\(non-finite Set 'Reals' is not iterable\)", + ): iter(Reals) with self.assertRaisesRegex( - TypeError, "'GlobalSet' object is not iterable " - r"\(non-finite Set 'Integers' is not iterable\)"): + TypeError, + "'GlobalSet' object is not iterable " + r"\(non-finite Set 'Integers' is not iterable\)", + ): iter(Integers) - self.assertEqual(list(iter(Binary)), [0,1]) + self.assertEqual(list(iter(Binary)), [0, 1]) def test_declare(self): NS = {} - DeclareGlobalSet(RangeSet( name='TrinarySet', - ranges=(NR(0,2,1),) ), - NS) - self.assertEqual(list(NS['TrinarySet']), [0,1,2]) + DeclareGlobalSet(RangeSet(name='TrinarySet', ranges=(NR(0, 2, 1),)), NS) + self.assertEqual(list(NS['TrinarySet']), [0, 1, 2]) a = pickle.loads(pickle.dumps(NS['TrinarySet'])) self.assertIs(a, NS['TrinarySet']) - with self.assertRaisesRegex( - NameError, "name 'TrinarySet' is not defined"): + with self.assertRaisesRegex(NameError, "name 'TrinarySet' is not defined"): TrinarySet del SetModule.GlobalSets['TrinarySet'] del NS['TrinarySet'] # Now test the automatic identification of the globals() scope - DeclareGlobalSet(RangeSet( name='TrinarySet', - ranges=(NR(0,2,1),) )) - self.assertEqual(list(TrinarySet), [0,1,2]) + DeclareGlobalSet(RangeSet(name='TrinarySet', ranges=(NR(0, 2, 1),))) + self.assertEqual(list(TrinarySet), [0, 1, 2]) a = pickle.loads(pickle.dumps(TrinarySet)) self.assertIs(a, TrinarySet) del SetModule.GlobalSets['TrinarySet'] del globals()['TrinarySet'] - with self.assertRaisesRegex( - NameError, "name 'TrinarySet' is not defined"): + with self.assertRaisesRegex(NameError, "name 'TrinarySet' is not defined"): TrinarySet def test_exceptions(self): with self.assertRaisesRegex( - RuntimeError, "Duplicate Global Set declaration, Reals"): - DeclareGlobalSet(RangeSet( name='Reals', ranges=(NR(0,2,1),) )) + RuntimeError, "Duplicate Global Set declaration, Reals" + ): + DeclareGlobalSet(RangeSet(name='Reals', ranges=(NR(0, 2, 1),))) # But repeat declarations are OK a = Reals @@ -3402,8 +3518,7 @@ def test_exceptions(self): self.assertIs(a, SetModule.GlobalSets['Reals']) NS = {} - ts = DeclareGlobalSet( - RangeSet(name='TrinarySet', ranges=(NR(0,2,1),)), NS) + ts = DeclareGlobalSet(RangeSet(name='TrinarySet', ranges=(NR(0, 2, 1),)), NS) self.assertIs(NS['TrinarySet'], ts) # Repeat declaration is OK @@ -3413,9 +3528,9 @@ def test_exceptions(self): # but conflicting one raises exception NS['foo'] = None with self.assertRaisesRegex( - RuntimeError, "Refusing to overwrite global object, foo"): - DeclareGlobalSet( - RangeSet( name='foo', ranges=(NR(0,2,1),) ), NS) + RuntimeError, "Refusing to overwrite global object, foo" + ): + DeclareGlobalSet(RangeSet(name='foo', ranges=(NR(0, 2, 1),)), NS) def test_RealSet_IntegerSet(self): output = StringIO() @@ -3427,9 +3542,9 @@ def test_RealSet_IntegerSet(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - a = SetModule.RealSet(bounds=(1,3)) + a = SetModule.RealSet(bounds=(1, 3)) self.assertIn('DEPRECATED: The use of RealSet,', output.getvalue()) - self.assertEqual(a.bounds(), (1,3)) + self.assertEqual(a.bounds(), (1, 3)) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): @@ -3440,10 +3555,10 @@ def test_RealSet_IntegerSet(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - a = SetModule.IntegerSet(bounds=(1,3)) + a = SetModule.IntegerSet(bounds=(1, 3)) self.assertIn('DEPRECATED: The use of RealSet,', output.getvalue()) - self.assertEqual(a.bounds(), (1,3)) - self.assertEqual(list(a), [1,2,3]) + self.assertEqual(a.bounds(), (1, 3)) + self.assertEqual(list(a), [1, 2, 3]) m = ConcreteModel() @@ -3459,11 +3574,12 @@ def test_RealSet_IntegerSet(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - m.z = Var(within=SetModule.RealSet(bounds=(0,None))) + m.z = Var(within=SetModule.RealSet(bounds=(0, None))) self.assertIn('DEPRECATED: The use of RealSet,', output.getvalue()) with self.assertRaisesRegex( - RuntimeError, r"Unexpected keyword arguments: \{'foo': 5\}"): + RuntimeError, r"Unexpected keyword arguments: \{'foo': 5\}" + ): IntegerSet(foo=5) def test_intervals(self): @@ -3475,7 +3591,7 @@ def test_intervals(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - a = SetModule.RealInterval(bounds=(0,None)) + a = SetModule.RealInterval(bounds=(0, None)) self.assertIn("RealInterval has been deprecated.", output.getvalue()) self.assertEqual(a, NonNegativeReals) @@ -3483,14 +3599,13 @@ def test_intervals(self): with LoggingIntercept(output, 'pyomo.core'): a = SetModule.RealInterval(bounds=5) self.assertIn("RealInterval has been deprecated.", output.getvalue()) - self.assertEqual(a, RangeSet(1,5,0)) - + self.assertEqual(a, RangeSet(1, 5, 0)) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): a = SetModule.RealInterval(bounds=(5,)) self.assertIn("RealInterval has been deprecated.", output.getvalue()) - self.assertEqual(a, RangeSet(1,5,0)) + self.assertEqual(a, RangeSet(1, 5, 0)) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): @@ -3500,43 +3615,43 @@ def test_intervals(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - a = SetModule.IntegerInterval(bounds=(0,None)) + a = SetModule.IntegerInterval(bounds=(0, None)) self.assertIn("IntegerInterval has been deprecated.", output.getvalue()) self.assertEqual(a, NonNegativeIntegers) self.assertFalse(a.isfinite()) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - a = SetModule.IntegerInterval(bounds=(None,-1)) + a = SetModule.IntegerInterval(bounds=(None, -1)) self.assertIn("IntegerInterval has been deprecated.", output.getvalue()) self.assertEqual(a, NegativeIntegers) self.assertFalse(a.isfinite()) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - a = SetModule.IntegerInterval(bounds=(-float('inf'),-1)) + a = SetModule.IntegerInterval(bounds=(-float('inf'), -1)) self.assertIn("IntegerInterval has been deprecated.", output.getvalue()) self.assertEqual(a, NegativeIntegers) self.assertFalse(a.isfinite()) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - a = SetModule.IntegerInterval(bounds=(0,3)) + a = SetModule.IntegerInterval(bounds=(0, 3)) self.assertIn("IntegerInterval has been deprecated.", output.getvalue()) - self.assertEqual(list(a), [0,1,2,3]) + self.assertEqual(list(a), [0, 1, 2, 3]) self.assertTrue(a.isfinite()) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): a = SetModule.IntegerInterval(bounds=5) self.assertIn("IntegerInterval has been deprecated.", output.getvalue()) - self.assertEqual(list(a), [1,2,3,4,5]) + self.assertEqual(list(a), [1, 2, 3, 4, 5]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): a = SetModule.IntegerInterval(bounds=(5,)) self.assertIn("IntegerInterval has been deprecated.", output.getvalue()) - self.assertEqual(list(a), [1,2,3,4,5]) + self.assertEqual(list(a), [1, 2, 3, 4, 5]) def _init_set(m, *args): @@ -3555,7 +3670,8 @@ def test_deprecated_args(self): self.assertEqual(len(m.I), 0) self.assertRegex( output.getvalue(), - "^DEPRECATED: Pyomo Sets ignore the 'virtual' keyword argument") + "^DEPRECATED: Pyomo Sets ignore the 'virtual' keyword argument", + ) def test_scalar_set_initialize_and_iterate(self): m = ConcreteModel() @@ -3567,21 +3683,22 @@ def test_scalar_set_initialize_and_iterate(self): m = ConcreteModel() with self.assertRaisesRegex( - KeyError, "Cannot treat the scalar component 'I' " - "as an indexed component"): - m.I = Set(initialize={1:(1,3,2,4)}) + KeyError, "Cannot treat the scalar component 'I' as an indexed component" + ): + m.I = Set(initialize={1: (1, 3, 2, 4)}) m = ConcreteModel() - m.I = Set(initialize=(1,3,2,4)) + m.I = Set(initialize=(1, 3, 2, 4)) self.assertTrue(m.I._init_values.constant()) - self.assertEqual(list(m.I), [1,3,2,4]) - self.assertEqual(list(reversed(m.I)), [4,2,3,1]) - self.assertEqual(m.I.data(), (1,3,2,4)) + self.assertEqual(list(m.I), [1, 3, 2, 4]) + self.assertEqual(list(reversed(m.I)), [4, 2, 3, 1]) + self.assertEqual(m.I.data(), (1, 3, 2, 4)) self.assertEqual(m.I.dimen, 1) m = ConcreteModel() with self.assertRaisesRegex( - ValueError, 'Set rule or initializer returned None'): + ValueError, 'Set rule or initializer returned None' + ): m.I = Set(initialize=lambda m: None, dimen=2) self.assertTrue(m.I._init_values.constant()) self.assertEqual(list(m.I), []) @@ -3594,28 +3711,31 @@ def I_init(m): yield 3 yield 2 yield 4 + m = ConcreteModel() m.I = Set(initialize=I_init) - self.assertEqual(list(m.I), [1,3,2,4]) - self.assertEqual(list(reversed(m.I)), [4,2,3,1]) - self.assertEqual(m.I.data(), (1,3,2,4)) + self.assertEqual(list(m.I), [1, 3, 2, 4]) + self.assertEqual(list(reversed(m.I)), [4, 2, 3, 1]) + self.assertEqual(m.I.data(), (1, 3, 2, 4)) self.assertEqual(m.I.dimen, 1) m = ConcreteModel() - m.I = Set(initialize={None: (1,3,2,4)}) - self.assertEqual(list(m.I), [1,3,2,4]) - self.assertEqual(list(reversed(m.I)), [4,2,3,1]) - self.assertEqual(m.I.data(), (1,3,2,4)) + m.I = Set(initialize={None: (1, 3, 2, 4)}) + self.assertEqual(list(m.I), [1, 3, 2, 4]) + self.assertEqual(list(reversed(m.I)), [4, 2, 3, 1]) + self.assertEqual(m.I.data(), (1, 3, 2, 4)) self.assertEqual(m.I.dimen, 1) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m = ConcreteModel() - m.I = Set(initialize={1,3,2,4}) - ref = ("Initializing ordered Set I with a " - "fundamentally unordered data source (type: set).") + m.I = Set(initialize={1, 3, 2, 4}) + ref = ( + "Initializing ordered Set I with a " + "fundamentally unordered data source (type: set)." + ) self.assertIn(ref, output.getvalue()) - self.assertEqual(m.I.sorted_data(), (1,2,3,4)) + self.assertEqual(m.I.sorted_data(), (1, 2, 3, 4)) # We can't directly compare the reversed to a reference list # (because this is populated from an unordered set!) but we can # compare it with the forward list. @@ -3626,9 +3746,9 @@ def I_init(m): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m = ConcreteModel() - m.I = Set(initialize={1,3,2,4}, ordered=False) + m.I = Set(initialize={1, 3, 2, 4}, ordered=False) self.assertEqual(output.getvalue(), "") - self.assertEqual(sorted(list(m.I)), [1,2,3,4]) + self.assertEqual(sorted(list(m.I)), [1, 2, 3, 4]) # We can't directly compare the reversed to a reference list # (because this is an unordered set!) but we can compare it with # the forward list. @@ -3637,34 +3757,34 @@ def I_init(m): self.assertEqual(m.I.dimen, 1) m = ConcreteModel() - m.I = Set(initialize=[1,3,2,4], ordered=Set.SortedOrder) - self.assertEqual(list(m.I), [1,2,3,4]) - self.assertEqual(list(reversed(m.I)), [4,3,2,1]) - self.assertEqual(m.I.data(), (1,2,3,4)) + m.I = Set(initialize=[1, 3, 2, 4], ordered=Set.SortedOrder) + self.assertEqual(list(m.I), [1, 2, 3, 4]) + self.assertEqual(list(reversed(m.I)), [4, 3, 2, 1]) + self.assertEqual(m.I.data(), (1, 2, 3, 4)) self.assertEqual(m.I.dimen, 1) with self.assertRaisesRegex( - TypeError, r"Set 'ordered' argument is not valid \(must " - r"be one of {False, True, , Set.InsertionOrder, " - r"Set.SortedOrder}\)"): + TypeError, + r"Set 'ordered' argument is not valid \(must " + r"be one of {False, True, , Set.InsertionOrder, " + r"Set.SortedOrder}\)", + ): m = ConcreteModel() - m.I = Set(initialize=[1,3,2,4], ordered=Set) + m.I = Set(initialize=[1, 3, 2, 4], ordered=Set) m = ConcreteModel() - m.I = Set(initialize=[1,3,2,4], ordered=lambda x: reversed(sorted(x))) - self.assertEqual(list(m.I), [4,3,2,1]) - self.assertEqual(list(reversed(m.I)), [1,2,3,4]) - self.assertEqual(m.I.data(), (4,3,2,1)) + m.I = Set(initialize=[1, 3, 2, 4], ordered=lambda x: reversed(sorted(x))) + self.assertEqual(list(m.I), [4, 3, 2, 1]) + self.assertEqual(list(reversed(m.I)), [1, 2, 3, 4]) + self.assertEqual(m.I.data(), (4, 3, 2, 1)) self.assertEqual(m.I.dimen, 1) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - with self.assertRaisesRegex( - TypeError, "'int' object is not iterable"): + with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): m = ConcreteModel() m.I = Set(initialize=5) - ref = ("Initializer for Set I returned non-iterable object " - "of type int.") + ref = "Initializer for Set I returned non-iterable object of type int." self.assertIn(ref, output.getvalue()) def test_scalar_indexed_api(self): @@ -3678,23 +3798,26 @@ def test_insertion_deletion(self): def _verify(_s, _l): self.assertTrue(_s.isordered()) self.assertTrue(_s.isfinite()) - for i,v in enumerate(_l): - self.assertEqual(_s.at(i+1), v) + self.assertEqual(list(_s), list(reversed(list(reversed(_s))))) + self.assertEqual(list(_s.ordered_iter()), _l) + self.assertEqual(list(_s.sorted_iter()), sorted(_l)) + for i, v in enumerate(_l): + self.assertEqual(_s.at(i + 1), v) with self.assertRaisesRegex(IndexError, "I index out of range"): - _s.at(len(_l)+1) + _s.at(len(_l) + 1) with self.assertRaisesRegex(IndexError, "I index out of range"): - _s.at(len(_l)+2) + _s.at(len(_l) + 2) - for i,v in enumerate(reversed(_l)): - self.assertEqual(_s.at(-(i+1)), v) + for i, v in enumerate(reversed(_l)): + self.assertEqual(_s.at(-(i + 1)), v) with self.assertRaisesRegex(IndexError, "I index out of range"): - _s.at(-len(_l)-1) + _s.at(-len(_l) - 1) with self.assertRaisesRegex(IndexError, "I index out of range"): - _s.at(-len(_l)-2) + _s.at(-len(_l) - 2) - for i,v in enumerate(_l): - self.assertEqual(_s.ord(v), i+1) - self.assertEqual(_s.ord((v,)), i+1) + for i, v in enumerate(_l): + self.assertEqual(_s.ord(v), i + 1) + self.assertEqual(_s.ord((v,)), i + 1) if _l: _max = max(_l) @@ -3703,11 +3826,11 @@ def _verify(_s, _l): _max = 0 _min = 0 with self.assertRaisesRegex(ValueError, r"I.ord\(x\): x not in I"): - m.I.ord(_max+1) + m.I.ord(_max + 1) with self.assertRaisesRegex(ValueError, r"I.ord\(x\): x not in I"): - m.I.ord(_min-1) + m.I.ord(_min - 1) with self.assertRaisesRegex(ValueError, r"I.ord\(x\): x not in I"): - m.I.ord((_max+1,)) + m.I.ord((_max + 1,)) # Testing insertion order sets m = ConcreteModel() @@ -3716,48 +3839,48 @@ def _verify(_s, _l): m.I.add(1) _verify(m.I, [1]) m.I.add(3) - _verify(m.I, [1,3]) + _verify(m.I, [1, 3]) m.I.add(2) - _verify(m.I, [1,3,2]) + _verify(m.I, [1, 3, 2]) m.I.add(4) - _verify(m.I, [1,3,2,4]) + _verify(m.I, [1, 3, 2, 4]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.I.add(3) self.assertEqual( - output.getvalue(), - "Element 3 already exists in Set I; no action taken\n") - _verify(m.I, [1,3,2,4]) + output.getvalue(), "Element 3 already exists in Set I; no action taken\n" + ) + _verify(m.I, [1, 3, 2, 4]) m.I.remove(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) with self.assertRaisesRegex(KeyError, "^3$"): m.I.remove(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.add(3) - _verify(m.I, [1,2,4,3]) + _verify(m.I, [1, 2, 4, 3]) m.I.discard(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.discard(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.clear() _verify(m.I, []) m.I.add(6) m.I.add(5) - _verify(m.I, [6,5]) + _verify(m.I, [6, 5]) tmp = set() tmp.add(m.I.pop()) tmp.add(m.I.pop()) _verify(m.I, []) - self.assertEqual(tmp, {5,6}) + self.assertEqual(tmp, {5, 6}) with self.assertRaisesRegex(KeyError, 'pop from an empty set'): m.I.pop() @@ -3765,31 +3888,31 @@ def _verify(_s, _l): with LoggingIntercept(output, 'pyomo.core'): m.I.update([6]) _verify(m.I, [6]) - m.I.update([6,5,6]) - _verify(m.I, [6,5]) + m.I.update([6, 5, 6]) + _verify(m.I, [6, 5]) - m.I = [0,-1,1] - _verify(m.I, [0,-1,1]) + m.I = [0, -1, 1] + _verify(m.I, [0, -1, 1]) self.assertEqual(output.getvalue(), "") - # Assing unsorted data should generate warnings - m.I.update({3,4}) + # Assign unsorted data should generate warnings + m.I.update({3, 4}) self.assertIn( "Calling update() on an insertion order Set with a " "fundamentally unordered data source (type: set)", - output.getvalue() + output.getvalue(), ) self.assertEqual(set(m.I), {0, -1, 1, 3, 4}) output.truncate(0) - m.I = {5,6} + m.I = {5, 6} self.assertIn( "Calling set_value() on an insertion order Set with a " "fundamentally unordered data source (type: set)", - output.getvalue() + output.getvalue(), ) - self.assertEqual(set(m.I), {5,6}) + self.assertEqual(set(m.I), {5, 6}) # Testing sorted sets m = ConcreteModel() @@ -3798,48 +3921,48 @@ def _verify(_s, _l): m.I.add(1) _verify(m.I, [1]) m.I.add(3) - _verify(m.I, [1,3]) + _verify(m.I, [1, 3]) m.I.add(2) - _verify(m.I, [1,2,3]) + _verify(m.I, [1, 2, 3]) m.I.add(4) - _verify(m.I, [1,2,3,4]) + _verify(m.I, [1, 2, 3, 4]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.I.add(3) self.assertEqual( - output.getvalue(), - "Element 3 already exists in Set I; no action taken\n") - _verify(m.I, [1,2,3,4]) + output.getvalue(), "Element 3 already exists in Set I; no action taken\n" + ) + _verify(m.I, [1, 2, 3, 4]) m.I.remove(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) with self.assertRaisesRegex(KeyError, "^3$"): m.I.remove(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.add(3) - _verify(m.I, [1,2,3,4]) + _verify(m.I, [1, 2, 3, 4]) m.I.discard(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.discard(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.clear() _verify(m.I, []) m.I.add(6) m.I.add(5) - _verify(m.I, [5,6]) + _verify(m.I, [5, 6]) tmp = set() tmp.add(m.I.pop()) tmp.add(m.I.pop()) _verify(m.I, []) - self.assertEqual(tmp, {5,6}) + self.assertEqual(tmp, {5, 6}) with self.assertRaisesRegex(KeyError, 'pop from an empty set'): m.I.pop() @@ -3847,23 +3970,23 @@ def _verify(_s, _l): with LoggingIntercept(output, 'pyomo.core'): m.I.update([6]) _verify(m.I, [6]) - m.I.update([6,5,6]) - _verify(m.I, [5,6]) + m.I.update([6, 5, 6]) + _verify(m.I, [5, 6]) - m.I = [0,-1,1] - _verify(m.I, [-1,0,1]) + m.I = [0, -1, 1] + _verify(m.I, [-1, 0, 1]) self.assertEqual(output.getvalue(), "") - # Assing unsorted data should not generate warnings (since + # Assign unsorted data should not generate warnings (since # we are sorting the Set!) - m.I.update({3,4}) + m.I.update({3, 4}) self.assertEqual(output.getvalue(), "") - _verify(m.I, [-1,0,1,3,4]) + _verify(m.I, [-1, 0, 1, 3, 4]) - m.I = {5,6} + m.I = {5, 6} self.assertEqual(output.getvalue(), "") - _verify(m.I, [5,6]) + _verify(m.I, [5, 6]) def test_unordered_insertion_deletion(self): def _verify(_s, _l): @@ -3881,8 +4004,8 @@ def _verify(_s, _l): else: _max = 0 _min = 0 - self.assertNotIn(_max+1, _s) - self.assertNotIn(_min-1, _s) + self.assertNotIn(_max + 1, _s) + self.assertNotIn(_min - 1, _s) # Testing unordered sets m = ConcreteModel() @@ -3891,84 +4014,83 @@ def _verify(_s, _l): m.I.add(1) _verify(m.I, [1]) m.I.add(3) - _verify(m.I, [1,3]) + _verify(m.I, [1, 3]) m.I.add(2) - _verify(m.I, [1,2,3]) + _verify(m.I, [1, 2, 3]) m.I.add(4) - _verify(m.I, [1,2,3,4]) + _verify(m.I, [1, 2, 3, 4]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.I.add(3) self.assertEqual( - output.getvalue(), - "Element 3 already exists in Set I; no action taken\n") - _verify(m.I, [1,2,3,4]) + output.getvalue(), "Element 3 already exists in Set I; no action taken\n" + ) + _verify(m.I, [1, 2, 3, 4]) m.I.remove(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) with self.assertRaisesRegex(KeyError, "^3$"): m.I.remove(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.add(3) - _verify(m.I, [1,2,3,4]) + _verify(m.I, [1, 2, 3, 4]) m.I.discard(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.discard(3) - _verify(m.I, [1,2,4]) + _verify(m.I, [1, 2, 4]) m.I.clear() _verify(m.I, []) m.I.add(6) m.I.add(5) - _verify(m.I, [5,6]) + _verify(m.I, [5, 6]) tmp = set() tmp.add(m.I.pop()) tmp.add(m.I.pop()) _verify(m.I, []) - self.assertEqual(tmp, {5,6}) + self.assertEqual(tmp, {5, 6}) with self.assertRaisesRegex(KeyError, 'pop from an empty set'): m.I.pop() m.I.update([5]) _verify(m.I, [5]) - m.I.update([6,5]) - _verify(m.I, [5,6]) + m.I.update([6, 5]) + _verify(m.I, [5, 6]) - m.I = [0,-1,1] - _verify(m.I, [-1,0,1]) + m.I = [0, -1, 1] + _verify(m.I, [-1, 0, 1]) def test_multiple_insertion(self): m = ConcreteModel() m.I = Set(ordered=True, initialize=[1]) - self.assertEqual(m.I.add(3,2,4), 3) - self.assertEqual(tuple(m.I.data()), (1,3,2,4)) - - self.assertEqual(m.I.add(1,5,4), 1) - self.assertEqual(tuple(m.I.data()), (1,3,2,4,5)) + self.assertEqual(m.I.add(3, 2, 4), 3) + self.assertEqual(tuple(m.I.data()), (1, 3, 2, 4)) + self.assertEqual(m.I.add(1, 5, 4), 1) + self.assertEqual(tuple(m.I.data()), (1, 3, 2, 4, 5)) def test_indexed_set(self): # Implicit construction m = ConcreteModel() - m.I = Set([1,2,3], ordered=False) + m.I = Set([1, 2, 3], ordered=False) self.assertEqual(len(m.I), 0) self.assertEqual(m.I.data(), {}) m.I[1] self.assertEqual(len(m.I), 1) self.assertEqual(m.I[1], []) - self.assertEqual(m.I.data(), {1:()}) + self.assertEqual(m.I.data(), {1: ()}) self.assertEqual(m.I[2], []) self.assertEqual(len(m.I), 2) - self.assertEqual(m.I.data(), {1:(), 2:()}) + self.assertEqual(m.I.data(), {1: (), 2: ()}) m.I[1].add(1) m.I[2].add(2) @@ -3986,15 +4108,15 @@ def test_indexed_set(self): self.assertIs(type(m.I[1]), _FiniteSetData) self.assertIs(type(m.I[2]), _FiniteSetData) self.assertIs(type(m.I[3]), _FiniteSetData) - self.assertEqual(m.I.data(), {1:(1,), 2:(2,), 3:(4,)}) + self.assertEqual(m.I.data(), {1: (1,), 2: (2,), 3: (4,)}) # Explicit (constant) construction m = ConcreteModel() - m.I = Set([1,2,3], initialize=(4,2,5)) + m.I = Set([1, 2, 3], initialize=(4, 2, 5)) self.assertEqual(len(m.I), 3) - self.assertEqual(list(m.I[1]), [4,2,5]) - self.assertEqual(list(m.I[2]), [4,2,5]) - self.assertEqual(list(m.I[3]), [4,2,5]) + self.assertEqual(list(m.I[1]), [4, 2, 5]) + self.assertEqual(list(m.I[2]), [4, 2, 5]) + self.assertEqual(list(m.I[3]), [4, 2, 5]) self.assertIsNot(m.I[1], m.I[2]) self.assertIsNot(m.I[1], m.I[3]) self.assertIsNot(m.I[2], m.I[3]) @@ -4004,15 +4126,15 @@ def test_indexed_set(self): self.assertIs(type(m.I[1]), _InsertionOrderSetData) self.assertIs(type(m.I[2]), _InsertionOrderSetData) self.assertIs(type(m.I[3]), _InsertionOrderSetData) - self.assertEqual(m.I.data(), {1:(4,2,5), 2:(4,2,5), 3:(4,2,5)}) + self.assertEqual(m.I.data(), {1: (4, 2, 5), 2: (4, 2, 5), 3: (4, 2, 5)}) # Explicit (constant) construction m = ConcreteModel() - m.I = Set([1,2,3], initialize=(4,2,5), ordered=Set.SortedOrder) + m.I = Set([1, 2, 3], initialize=(4, 2, 5), ordered=Set.SortedOrder) self.assertEqual(len(m.I), 3) - self.assertEqual(list(m.I[1]), [2,4,5]) - self.assertEqual(list(m.I[2]), [2,4,5]) - self.assertEqual(list(m.I[3]), [2,4,5]) + self.assertEqual(list(m.I[1]), [2, 4, 5]) + self.assertEqual(list(m.I[2]), [2, 4, 5]) + self.assertEqual(list(m.I[3]), [2, 4, 5]) self.assertIsNot(m.I[1], m.I[2]) self.assertIsNot(m.I[1], m.I[3]) self.assertIsNot(m.I[2], m.I[3]) @@ -4022,20 +4144,19 @@ def test_indexed_set(self): self.assertIs(type(m.I[1]), _SortedSetData) self.assertIs(type(m.I[2]), _SortedSetData) self.assertIs(type(m.I[3]), _SortedSetData) - self.assertEqual(m.I.data(), {1:(2,4,5), 2:(2,4,5), 3:(2,4,5)}) + self.assertEqual(m.I.data(), {1: (2, 4, 5), 2: (2, 4, 5), 3: (2, 4, 5)}) # Explicit (procedural) construction m = ConcreteModel() - m.I = Set([1,2,3], ordered=True) + m.I = Set([1, 2, 3], ordered=True) self.assertEqual(len(m.I), 0) - m.I[1] = [1,2,3] - m.I[(2,)] = [4,5,6] + m.I[1] = [1, 2, 3] + m.I[(2,)] = [4, 5, 6] # test index mapping - self.assertEqual(sorted(m.I._data.keys()), [1,2]) - self.assertEqual(list(m.I[1]), [1,2,3]) - self.assertEqual(list(m.I[2]), [4,5,6]) - self.assertEqual(m.I.data(), {1:(1,2,3), 2:(4,5,6)}) - + self.assertEqual(sorted(m.I._data.keys()), [1, 2]) + self.assertEqual(list(m.I[1]), [1, 2, 3]) + self.assertEqual(list(m.I[2]), [4, 5, 6]) + self.assertEqual(m.I.data(), {1: (1, 2, 3), 2: (4, 5, 6)}) def test_naming(self): m = ConcreteModel() @@ -4047,17 +4168,18 @@ def test_naming(self): m.I = i self.assertEqual(str(i), "I") - j = Set(initialize=[1,2,3]) + j = Set(initialize=[1, 2, 3]) self.assertEqual(str(j), "AbstractOrderedScalarSet") j.construct() self.assertEqual(str(j), "{1, 2, 3}") m.J = j self.assertEqual(str(j), "J") - k = Set([1,2,3]) + k = Set([1, 2, 3]) self.assertEqual(str(k), "IndexedSet") with self.assertRaisesRegex( - ValueError, 'The component has not been constructed.'): + ValueError, 'The component has not been constructed.' + ): str(k[1]) m.K = k self.assertEqual(str(k), "K") @@ -4069,10 +4191,10 @@ def test_indexing(self): m.I = [1, 3, 2] self.assertEqual(m.I[2], 3) with self.assertRaisesRegex( - IndexError, "I indices must be integers, not float"): + IndexError, "I indices must be integers, not float" + ): m.I[2.5] - with self.assertRaisesRegex( - IndexError, "I indices must be integers, not str"): + with self.assertRaisesRegex(IndexError, "I indices must be integers, not str"): m.I['a'] def test_add_filter_validate(self): @@ -4080,63 +4202,66 @@ def test_add_filter_validate(self): m.I = Set(domain=Integers) self.assertIs(m.I.filter, None) with self.assertRaisesRegex( - ValueError, - r"Cannot add value 1.5 to Set I.\n" - r"\tThe value is not in the domain Integers"): + ValueError, + r"Cannot add value 1.5 to Set I.\n" + r"\tThe value is not in the domain Integers", + ): m.I.add(1.5) # Open question: should we cast the added value into the domain # (if we do, how?) - self.assertTrue( m.I.add(1.0) ) + self.assertTrue(m.I.add(1.0)) self.assertIn(1, m.I) - self.assertIn(1., m.I) + self.assertIn(1.0, m.I) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - self.assertFalse( m.I.add(1) ) + self.assertFalse(m.I.add(1)) self.assertEqual( - output.getvalue(), - "Element 1 already exists in Set I; no action taken\n") + output.getvalue(), "Element 1 already exists in Set I; no action taken\n" + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - self.assertFalse( m.I.add((1,)) ) + self.assertFalse(m.I.add((1,))) self.assertEqual( - output.getvalue(), - "Element (1,) already exists in Set I; no action taken\n") + output.getvalue(), "Element (1,) already exists in Set I; no action taken\n" + ) m.J = Set() # Note that pypy raises a different exception from cpython - err = (r"Unable to insert '{}' into Set J:\n\tTypeError: " - r"((unhashable type: 'dict')|('dict' objects are unhashable))") + err = ( + r"Unable to insert '{}' into Set J:\n\tTypeError: " + r"((unhashable type: 'dict')|('dict' objects are unhashable))" + ) with self.assertRaisesRegex(TypeError, err): m.J.add({}) - self.assertTrue( m.J.add((1,)) ) + self.assertTrue(m.J.add((1,))) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - self.assertFalse( m.J.add(1) ) + self.assertFalse(m.J.add(1)) self.assertEqual( - output.getvalue(), - "Element 1 already exists in Set J; no action taken\n") - + output.getvalue(), "Element 1 already exists in Set J; no action taken\n" + ) def _l_tri(model, i, j): self.assertIs(model, m) return i >= j - m.K = Set(initialize=RangeSet(3)*RangeSet(3), filter=_l_tri) + + m.K = Set(initialize=RangeSet(3) * RangeSet(3), filter=_l_tri) self.assertIsInstance(m.K.filter, IndexedCallInitializer) self.assertIs(m.K.filter._fcn, _l_tri) - self.assertEqual( - list(m.K), [(1,1), (2,1), (2,2), (3,1), (3,2), (3,3)]) + self.assertEqual(list(m.K), [(1, 1), (2, 1), (2, 2), (3, 1), (3, 2), (3, 3)]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - self.assertTrue( m.K.add((0,0)) ) - self.assertFalse( m.K.add((0,1)) ) + self.assertTrue(m.K.add((0, 0))) + self.assertFalse(m.K.add((0, 1))) self.assertEqual(output.getvalue(), "") self.assertEqual( - list(m.K), [(1,1), (2,1), (2,2), (3,1), (3,2), (3,3), (0,0)]) + list(m.K), [(1, 1), (2, 1), (2, 2), (3, 1), (3, 2), (3, 3), (0, 0)] + ) # This tests a filter that matches the dimentionality of the # component. construct() needs to recognize that the filter is @@ -4145,63 +4270,66 @@ def _l_tri(model, i, j): def _lt_3(model, i): self.assertIs(model, m) return i < 3 - m.L = Set([1,2,3,4,5], initialize=RangeSet(10), filter=_lt_3) + + m.L = Set([1, 2, 3, 4, 5], initialize=RangeSet(10), filter=_lt_3) self.assertEqual(len(m.L), 5) self.assertEqual(list(m.L[1]), [1, 2]) self.assertEqual(list(m.L[5]), [1, 2]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - self.assertTrue( m.L[2].add(0) ) - self.assertFalse( m.L[2].add((100)) ) + self.assertTrue(m.L[2].add(0)) + self.assertFalse(m.L[2].add((100))) self.assertEqual(output.getvalue(), "") - self.assertEqual(list(m.L[2]), [1,2,0]) - + self.assertEqual(list(m.L[2]), [1, 2, 0]) m = ConcreteModel() - def _validate(model,i,j): + + def _validate(model, i, j): self.assertIs(model, m) if i + j < 2: return True if i - j > 2: return False raise RuntimeError("Bogus value") + m.I = Set(validate=_validate) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - self.assertTrue( m.I.add((0,1)) ) + self.assertTrue(m.I.add((0, 1))) self.assertEqual(output.getvalue(), "") with self.assertRaisesRegex( - ValueError, - r"The value=\(4, 1\) violates the validation rule of " - r"Set I"): - m.I.add((4,1)) + ValueError, + r"The value=\(4, 1\) violates the validation rule of " r"Set I", + ): + m.I.add((4, 1)) self.assertEqual(output.getvalue(), "") with self.assertRaisesRegex(RuntimeError, "Bogus value"): - m.I.add((2,2)) + m.I.add((2, 2)) self.assertEqual( output.getvalue(), - "Exception raised while validating element '(2, 2)' for Set I\n") + "Exception raised while validating element '(2, 2)' for Set I\n", + ) # Note: one of these indices will trigger the exception in the # validot when it is called for the index. - m.J = Set([(0,0), (2,2)], validate=_validate) + m.J = Set([(0, 0), (2, 2)], validate=_validate) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): - self.assertTrue( m.J[2,2].add((0,1)) ) + self.assertTrue(m.J[2, 2].add((0, 1))) self.assertEqual(output.getvalue(), "") with self.assertRaisesRegex( - ValueError, - r"The value=\(4, 1\) violates the validation rule of " - r"Set J\[0,0\]"): - m.J[0,0].add((4,1)) + ValueError, + r"The value=\(4, 1\) violates the validation rule of " r"Set J\[0,0\]", + ): + m.J[0, 0].add((4, 1)) self.assertEqual(output.getvalue(), "") with self.assertRaisesRegex(RuntimeError, "Bogus value"): - m.J[2,2].add((2,2)) + m.J[2, 2].add((2, 2)) self.assertEqual( output.getvalue(), - "Exception raised while validating element '(2, 2)' for " - "Set J[2,2]\n") + "Exception raised while validating element '(2, 2)' for Set J[2,2]\n", + ) def test_domain(self): m = ConcreteModel() @@ -4212,68 +4340,74 @@ def test_domain(self): m.I = Set(domain=Integers) self.assertIs(m.I.domain, Integers) m.I.add(1) - m.I.add(2.) - self.assertEqual(list(m.I), [1, 2.]) + m.I.add(2.0) + self.assertEqual(list(m.I), [1, 2.0]) with self.assertRaisesRegex( - ValueError, 'The value is not in the domain Integers'): + ValueError, 'The value is not in the domain Integers' + ): m.I.add(1.5) m = ConcreteModel() m.I = Set(within=Integers) self.assertIs(m.I.domain, Integers) m.I.add(1) - m.I.add(2.) - self.assertEqual(list(m.I), [1, 2.]) + m.I.add(2.0) + self.assertEqual(list(m.I), [1, 2.0]) with self.assertRaisesRegex( - ValueError, 'The value is not in the domain Integers'): + ValueError, 'The value is not in the domain Integers' + ): m.I.add(1.5) m = ConcreteModel() - m.I = Set(bounds=(1,5)) - self.assertEqual(m.I.domain, RangeSet(1,5,0)) + m.I = Set(bounds=(1, 5)) + self.assertEqual(m.I.domain, RangeSet(1, 5, 0)) m.I.add(1) - m.I.add(2.) - self.assertEqual(list(m.I), [1, 2.]) + m.I.add(2.0) + self.assertEqual(list(m.I), [1, 2.0]) with self.assertRaisesRegex( - ValueError, r'The value is not in the domain \[1..5\]'): + ValueError, r'The value is not in the domain \[1..5\]' + ): m.I.add(5.5) m = ConcreteModel() - m.I = Set(domain=Integers, within=RangeSet(0, None, 2), bounds=(0,9)) - self.assertEqual(m.I.domain, RangeSet(0,9,2)) - m.I = [0,2.,4] - self.assertEqual(list(m.I), [0,2.,4]) + m.I = Set(domain=Integers, within=RangeSet(0, None, 2), bounds=(0, 9)) + self.assertEqual(m.I.domain, RangeSet(0, 9, 2)) + m.I = [0, 2.0, 4] + self.assertEqual(list(m.I), [0, 2.0, 4]) with self.assertRaisesRegex( - ValueError, 'The value is not in the domain ' - r'\(Integers & I_domain_index_0_index_1'): + ValueError, + 'The value is not in the domain ' r'\(Integers & I_domain_index_0_index_1', + ): m.I.add(1.5) with self.assertRaisesRegex( - ValueError, 'The value is not in the domain ' - r'\(Integers & I_domain_index_0_index_1'): + ValueError, + 'The value is not in the domain ' r'\(Integers & I_domain_index_0_index_1', + ): m.I.add(1) with self.assertRaisesRegex( - ValueError, 'The value is not in the domain ' - r'\(Integers & I_domain_index_0_index_1'): + ValueError, + 'The value is not in the domain ' r'\(Integers & I_domain_index_0_index_1', + ): m.I.add(10) - def test_pprint(self): def myFcn(x): return reversed(sorted(x)) m = ConcreteModel() m.I_index = RangeSet(3) - m.I = Set(m.I_index, initialize=lambda m,i: range(i+1), - domain=Integers) + m.I = Set(m.I_index, initialize=lambda m, i: range(i + 1), domain=Integers) m.J = Set(ordered=False) - m.K = Set(initialize=[(1,2), (3,4)], ordered=Set.SortedOrder) - m.L = Set(initialize=[(1,2), (3,4)], ordered=myFcn) + m.K = Set(initialize=[(1, 2), (3, 4)], ordered=Set.SortedOrder) + m.L = Set(initialize=[(1, 2), (3, 4)], ordered=myFcn) m.M = Reals - SetOf([0]) m.N = Integers - Reals buf = StringIO() m.pprint(ostream=buf) - self.assertEqual(buf.getvalue().strip(), """ + self.assertEqual( + buf.getvalue().strip(), + """ 6 Set Declarations I : Size=3, Index=I_index, Ordered=Insertion Key : Dimen : Domain : Size : Members @@ -4306,14 +4440,15 @@ def myFcn(x): Key : Ordered : Members None : True : [0] -8 Declarations: I_index I J K L M_index_1 M N""".strip()) +8 Declarations: I_index I J K L M_index_1 M N""".strip(), + ) def test_pickle(self): m = ConcreteModel() m.I = Set(initialize={1, 2, 'a'}, ordered=False) - m.J = Set(initialize=(2,4,1)) - m.K = Set(initialize=(2,4,1), ordered=Set.SortedOrder) - m.II = Set([1,2,3], m.J, initialize=_init_set) + m.J = Set(initialize=(2, 4, 1)) + m.K = Set(initialize=(2, 4, 1), ordered=Set.SortedOrder) + m.II = Set([1, 2, 3], m.J, initialize=_init_set) buf = StringIO() m.pprint(ostream=buf) @@ -4341,46 +4476,47 @@ def test_dimen(self): m = ConcreteModel() m.I = Set() self.assertEqual(m.I.dimen, UnknownSetDimen) - m.I.add((1,2)) + m.I.add((1, 2)) self.assertEqual(m.I.dimen, 2) - m.J = Set(initialize=[1,2,3]) + m.J = Set(initialize=[1, 2, 3]) self.assertEqual(m.J.dimen, 1) - m.K = Set(initialize=[(1,2,3)]) + m.K = Set(initialize=[(1, 2, 3)]) self.assertEqual(m.K.dimen, 3) with self.assertRaisesRegex( - ValueError, - "The value=1 has dimension 1 and is not valid for Set K " - "which has dimen=3"): + ValueError, + "The value=1 has dimension 1 and is not valid for Set K " + "which has dimen=3", + ): m.K.add(1) m.L = Set(dimen=None) self.assertIsNone(m.L.dimen) m.L.add(1) self.assertIsNone(m.L.dimen) - m.L.add((2,3)) + m.L.add((2, 3)) self.assertIsNone(m.L.dimen) - self.assertEqual(list(m.L), [1, (2,3)]) + self.assertEqual(list(m.L), [1, (2, 3)]) a = AbstractModel() - a.I = Set(initialize=[1,2,3]) + a.I = Set(initialize=[1, 2, 3]) self.assertEqual(a.I.dimen, UnknownSetDimen) - a.J = Set(initialize=[1,2,3], dimen=1) + a.J = Set(initialize=[1, 2, 3], dimen=1) self.assertEqual(a.J.dimen, 1) - m = a.create_instance(data={None:{'I': {None:[(1,2), (3,4)]}}}) + m = a.create_instance(data={None: {'I': {None: [(1, 2), (3, 4)]}}}) self.assertEqual(m.I.dimen, 2) self.assertEqual(m.J.dimen, 1) def test_construction(self): m = AbstractModel() - m.I = Set(initialize=[1,2,3]) - m.J = Set(initialize=[4,5,6]) - m.K = Set(initialize=[(1,4),(2,6),(3,5)], within=m.I*m.J) - m.II = Set([1,2,3], initialize={1:[0], 2:[1,2], 3: range(3)}) - m.JJ = Set([1,2,3], initialize={1:[0], 2:[1,2], 3: range(3)}) - m.KK = Set([1,2], initialize=[], dimen=lambda m,i: i) + m.I = Set(initialize=[1, 2, 3]) + m.J = Set(initialize=[4, 5, 6]) + m.K = Set(initialize=[(1, 4), (2, 6), (3, 5)], within=m.I * m.J) + m.II = Set([1, 2, 3], initialize={1: [0], 2: [1, 2], 3: range(3)}) + m.JJ = Set([1, 2, 3], initialize={1: [0], 2: [1, 2], 3: range(3)}) + m.KK = Set([1, 2], initialize=[], dimen=lambda m, i: i) output = StringIO() m.I.pprint(ostream=output) @@ -4398,50 +4534,50 @@ def test_construction(self): Not constructed""".strip() self.assertEqual(output.getvalue().strip(), ref) - i = m.create_instance(data={ - None: {'I': [-1,0], 'II': {1: [10,11], 3:[30]}, - 'K': [-1, 4, -1, 6, 0, 5]} - }) + i = m.create_instance( + data={ + None: { + 'I': [-1, 0], + 'II': {1: [10, 11], 3: [30]}, + 'K': [-1, 4, -1, 6, 0, 5], + } + } + ) - self.assertEqual(list(i.I), [-1,0]) - self.assertEqual(list(i.J), [4,5,6]) - self.assertEqual(list(i.K), [(-1,4),(-1,6),(0,5)]) - self.assertEqual(list(i.II[1]), [10,11]) + self.assertEqual(list(i.I), [-1, 0]) + self.assertEqual(list(i.J), [4, 5, 6]) + self.assertEqual(list(i.K), [(-1, 4), (-1, 6), (0, 5)]) + self.assertEqual(list(i.II[1]), [10, 11]) self.assertEqual(list(i.II[3]), [30]) self.assertEqual(list(i.JJ[1]), [0]) - self.assertEqual(list(i.JJ[2]), [1,2]) - self.assertEqual(list(i.JJ[3]), [0,1,2]) + self.assertEqual(list(i.JJ[2]), [1, 2]) + self.assertEqual(list(i.JJ[3]), [0, 1, 2]) self.assertEqual(list(i.KK[1]), []) self.assertEqual(list(i.KK[2]), []) # Implicitly-constructed set should fall back on initialize! - self.assertEqual(list(i.II[2]), [1,2]) + self.assertEqual(list(i.II[2]), [1, 2]) # Additional tests for tuplize: - i = m.create_instance(data={ - None: {'K': [(1,4),(2,6)], - 'KK': [1,4,2,6]} - }) - self.assertEqual(list(i.K), [(1,4),(2,6)]) - self.assertEqual(list(i.KK), [1,2]) - self.assertEqual(list(i.KK[1]), [1,4,2,6]) - self.assertEqual(list(i.KK[2]), [(1,4),(2,6)]) - i = m.create_instance(data={ - None: {'K': []} - }) + i = m.create_instance(data={None: {'K': [(1, 4), (2, 6)], 'KK': [1, 4, 2, 6]}}) + self.assertEqual(list(i.K), [(1, 4), (2, 6)]) + self.assertEqual(list(i.KK), [1, 2]) + self.assertEqual(list(i.KK[1]), [1, 4, 2, 6]) + self.assertEqual(list(i.KK[2]), [(1, 4), (2, 6)]) + i = m.create_instance(data={None: {'K': []}}) self.assertEqual(list(i.K), []) with self.assertRaisesRegex( - ValueError, "Cannot tuplize list data for set K because " - "its length 3 is not a multiple of dimen=2"): - i = m.create_instance(data={ - None: {'K': [1,2,3]} - }) + ValueError, + "Cannot tuplize list data for set K because " + "its length 3 is not a multiple of dimen=2", + ): + i = m.create_instance(data={None: {'K': [1, 2, 3]}}) with self.assertRaisesRegex( - ValueError, r"Cannot tuplize list data for set KK\[2\] " - "because its length 3 is not a multiple of dimen=2"): - i = m.create_instance(data={ - None: {'KK': {2: [1,2,3]}} - }) + ValueError, + r"Cannot tuplize list data for set KK\[2\] " + "because its length 3 is not a multiple of dimen=2", + ): + i = m.create_instance(data={None: {'KK': {2: [1, 2, 3]}}}) ref = """ Constructing AbstractOrderedScalarSet 'I' on [Model] from data=None @@ -4462,25 +4598,29 @@ def test_construction(self): # Test generators m = ConcreteModel() + def _i_init(m): yield 1 yield 3 yield 2 + m.I = Set(initialize=_i_init) - self.assertEqual(list(m.I), [1,3,2]) + self.assertEqual(list(m.I), [1, 3, 2]) m = ConcreteModel() + def _i_init(m): yield 1 yield 3 yield Set.End yield 2 + m.I = Set(initialize=_i_init) - self.assertEqual(list(m.I), [1,3]) + self.assertEqual(list(m.I), [1, 3]) m = ConcreteModel() - m.I = Set(initialize=[1,3,Set.End,2]) - self.assertEqual(list(m.I), [1,3]) + m.I = Set(initialize=[1, 3, Set.End, 2]) + self.assertEqual(list(m.I), [1, 3]) def test_unconstructed_api(self): m = AbstractModel() @@ -4489,100 +4629,118 @@ def test_unconstructed_api(self): m.K = Set(ordered=Set.SortedOrder) with self.assertRaisesRegex( - RuntimeError, - r"Cannot iterate over AbstractFiniteScalarSet 'I'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot iterate over AbstractFiniteScalarSet 'I'" + r" before it has been constructed \(initialized\)", + ): for i in m.I: pass with self.assertRaisesRegex( - RuntimeError, - r"Cannot iterate over AbstractOrderedScalarSet 'J'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot iterate over AbstractOrderedScalarSet 'J'" + r" before it has been constructed \(initialized\)", + ): for i in m.J: pass with self.assertRaisesRegex( - RuntimeError, - r"Cannot iterate over AbstractSortedScalarSet 'K'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot iterate over AbstractSortedScalarSet 'K'" + r" before it has been constructed \(initialized\)", + ): for i in m.K: pass with self.assertRaisesRegex( - RuntimeError, - r"Cannot test membership in AbstractFiniteScalarSet 'I'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot test membership in AbstractFiniteScalarSet 'I'" + r" before it has been constructed \(initialized\)", + ): 1 in m.I with self.assertRaisesRegex( - RuntimeError, - r"Cannot test membership in AbstractOrderedScalarSet 'J'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot test membership in AbstractOrderedScalarSet 'J'" + r" before it has been constructed \(initialized\)", + ): 1 in m.J with self.assertRaisesRegex( - RuntimeError, - r"Cannot test membership in AbstractSortedScalarSet 'K'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot test membership in AbstractSortedScalarSet 'K'" + r" before it has been constructed \(initialized\)", + ): 1 in m.K with self.assertRaisesRegex( - RuntimeError, - r"Cannot access '__len__' on AbstractFiniteScalarSet 'I'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot access '__len__' on AbstractFiniteScalarSet 'I'" + r" before it has been constructed \(initialized\)", + ): len(m.I) with self.assertRaisesRegex( - RuntimeError, - r"Cannot access '__len__' on AbstractOrderedScalarSet 'J'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot access '__len__' on AbstractOrderedScalarSet 'J'" + r" before it has been constructed \(initialized\)", + ): len(m.J) with self.assertRaisesRegex( - RuntimeError, - r"Cannot access '__len__' on AbstractSortedScalarSet 'K'" - r" before it has been constructed \(initialized\)"): + RuntimeError, + r"Cannot access '__len__' on AbstractSortedScalarSet 'K'" + r" before it has been constructed \(initialized\)", + ): len(m.K) def test_set_end(self): # Tested counted initialization m = ConcreteModel() + def _i_init(m, i): if i < 5: - return 2*i + return 2 * i return Set.End + m.I = Set(initialize=_i_init) - self.assertEqual(list(m.I), [2,4,6,8]) + self.assertEqual(list(m.I), [2, 4, 6, 8]) m = ConcreteModel() + def _i_init(m, i, j): if i < j: - return 2*i + return 2 * i return Set.End - m.I = Set([1,2,3], initialize=_i_init) + + m.I = Set([1, 2, 3], initialize=_i_init) self.assertEqual(list(m.I[1]), []) self.assertEqual(list(m.I[2]), [2]) - self.assertEqual(list(m.I[3]), [2,4]) + self.assertEqual(list(m.I[3]), [2, 4]) m = ConcreteModel() + def _i_init(m, i, j, k): - if i < j+k: - return 2*i + if i < j + k: + return 2 * i return Set.End - m.I = Set([1,2], [2,3], initialize=_i_init) - self.assertEqual(list(m.I[1,2]), [2,4]) - self.assertEqual(list(m.I[1,3]), [2,4,6]) - self.assertEqual(list(m.I[2,2]), [2,4,6]) - self.assertEqual(list(m.I[2,3]), [2,4,6,8]) + + m.I = Set([1, 2], [2, 3], initialize=_i_init) + self.assertEqual(list(m.I[1, 2]), [2, 4]) + self.assertEqual(list(m.I[1, 3]), [2, 4, 6]) + self.assertEqual(list(m.I[2, 2]), [2, 4, 6]) + self.assertEqual(list(m.I[2, 3]), [2, 4, 6, 8]) m = ConcreteModel() + def _i_init(m, i): if i > 3: return None return i + with self.assertRaisesRegex( - ValueError, "Set rule returned None instead of Set.End"): + ValueError, "Set rule returned None instead of Set.End" + ): m.I1 = Set(initialize=_i_init) @simple_set_rule @@ -4590,46 +4748,49 @@ def _j_init(m, i): if i > 3: return None return i + m.J = Set(initialize=_j_init) - self.assertEqual(list(m.J), [1,2,3]) + self.assertEqual(list(m.J), [1, 2, 3]) - # Backwards compatability: Test rule for indexed component that + # Backwards compatibility: Test rule for indexed component that # does not take the index @simple_set_rule def _k_init(m): - return [1,2,3] - m.K = Set([1], initialize=_k_init) - self.assertEqual(list(m.K[1]), [1,2,3]) + return [1, 2, 3] + m.K = Set([1], initialize=_k_init) + self.assertEqual(list(m.K[1]), [1, 2, 3]) @simple_set_rule def _l_init(m, l): if l > 3: return None return tuple(range(l)) + m.L = Set(initialize=_l_init, dimen=None) - self.assertEqual(list(m.L), [0, (0,1), (0,1,2)]) + self.assertEqual(list(m.L), [0, (0, 1), (0, 1, 2)]) - m.M = Set([1,2,3], initialize=_l_init) - self.assertEqual(list(m.M), [1,2,3]) + m.M = Set([1, 2, 3], initialize=_l_init) + self.assertEqual(list(m.M), [1, 2, 3]) self.assertEqual(list(m.M[1]), [0]) - self.assertEqual(list(m.M[2]), [0,1]) - self.assertEqual(list(m.M[3]), [0,1,2]) - + self.assertEqual(list(m.M[2]), [0, 1]) + self.assertEqual(list(m.M[3]), [0, 1, 2]) def test_set_skip(self): # Test Set.Skip m = ConcreteModel() - def _i_init(m,i): + + def _i_init(m, i): if i % 2: return Set.Skip return range(i) - m.I = Set([1,2,3,4,5], initialize=_i_init) + + m.I = Set([1, 2, 3, 4, 5], initialize=_i_init) self.assertEqual(len(m.I), 2) self.assertIn(2, m.I) - self.assertEqual(list(m.I[2]), [0,1]) + self.assertEqual(list(m.I[2]), [0, 1]) self.assertIn(4, m.I) - self.assertEqual(list(m.I[4]), [0,1,2,3]) + self.assertEqual(list(m.I[4]), [0, 1, 2, 3]) self.assertNotIn(1, m.I) self.assertNotIn(3, m.I) self.assertNotIn(5, m.I) @@ -4644,20 +4805,23 @@ def _i_init(m,i): self.assertEqual(output.getvalue().strip(), ref.strip()) m = ConcreteModel() - def _i_init(m,i): + + def _i_init(m, i): if i % 2: return None return range(i) + with self.assertRaisesRegex( - ValueError, - "Set rule or initializer returned None instead of Set.Skip"): - m.I = Set([1,2,3,4,5], initialize=_i_init) + ValueError, "Set rule or initializer returned None instead of Set.Skip" + ): + m.I = Set([1, 2, 3, 4, 5], initialize=_i_init) def _j_init(m): return None + with self.assertRaisesRegex( - ValueError, - "Set rule or initializer returned None instead of Set.Skip"): + ValueError, "Set rule or initializer returned None instead of Set.Skip" + ): m.J = Set(initialize=_j_init) def test_sorted_operations(self): @@ -4683,8 +4847,7 @@ def test_sorted_operations(self): i += 1 I.update((i, -i)) self.assertFalse(I._is_sorted) - self.assertEqual( - str(I), "{%s}" % ', '.join(str(_) for _ in range(-i,i+1))) + self.assertEqual(str(I), "{%s}" % ', '.join(str(_) for _ in range(-i, i + 1))) self.assertTrue(I._is_sorted) # ranges() @@ -4693,7 +4856,7 @@ def test_sorted_operations(self): self.assertFalse(I._is_sorted) self.assertEqual( ','.join(str(_) for _ in I.ranges()), - ','.join('[%s]' % _ for _ in range(-i,i+1)) + ','.join('[%s]' % _ for _ in range(-i, i + 1)), ) self.assertTrue(I._is_sorted) @@ -4702,8 +4865,7 @@ def test_sorted_operations(self): I.update((i, -i)) self.assertFalse(I._is_sorted) self.assertEqual( - ','.join(str(_) for _ in I), - ','.join(str(_) for _ in range(-i,i+1)) + ','.join(str(_) for _ in I), ','.join(str(_) for _ in range(-i, i + 1)) ) self.assertTrue(I._is_sorted) @@ -4713,7 +4875,7 @@ def test_sorted_operations(self): self.assertFalse(I._is_sorted) self.assertEqual( ','.join(str(_) for _ in reversed(I)), - ','.join(str(_) for _ in reversed(range(-i,i+1))) + ','.join(str(_) for _ in reversed(range(-i, i + 1))), ) self.assertTrue(I._is_sorted) @@ -4723,7 +4885,7 @@ def test_sorted_operations(self): self.assertFalse(I._is_sorted) self.assertEqual( ','.join(str(_) for _ in I.data()), - ','.join(str(_) for _ in range(-i,i+1)) + ','.join(str(_) for _ in range(-i, i + 1)), ) self.assertTrue(I._is_sorted) @@ -4733,7 +4895,7 @@ def test_sorted_operations(self): self.assertFalse(I._is_sorted) self.assertEqual( ','.join(str(_) for _ in I.ordered_data()), - ','.join(str(_) for _ in range(-i,i+1)) + ','.join(str(_) for _ in range(-i, i + 1)), ) self.assertTrue(I._is_sorted) @@ -4743,7 +4905,27 @@ def test_sorted_operations(self): self.assertFalse(I._is_sorted) self.assertEqual( ','.join(str(_) for _ in I.sorted_data()), - ','.join(str(_) for _ in range(-i,i+1)) + ','.join(str(_) for _ in range(-i, i + 1)), + ) + self.assertTrue(I._is_sorted) + + # ordered_iter()() + i += 1 + I.update((i, -i)) + self.assertFalse(I._is_sorted) + self.assertEqual( + ','.join(str(_) for _ in I.ordered_iter()), + ','.join(str(_) for _ in range(-i, i + 1)), + ) + self.assertTrue(I._is_sorted) + + # sorted_iter() + i += 1 + I.update((i, -i)) + self.assertFalse(I._is_sorted) + self.assertEqual( + ','.join(str(_) for _ in I.sorted_iter()), + ','.join(str(_) for _ in range(-i, i + 1)), ) self.assertTrue(I._is_sorted) @@ -4751,7 +4933,7 @@ def test_sorted_operations(self): i += 1 I.update((i, -i)) self.assertFalse(I._is_sorted) - self.assertEqual(I.bounds(), (-i,i)) + self.assertEqual(I.bounds(), (-i, i)) self.assertTrue(I._is_sorted) # remove() @@ -4759,7 +4941,7 @@ def test_sorted_operations(self): self.assertTrue(I._is_sorted) self.assertEqual( ','.join(str(_) for _ in I), - ','.join(str(_) for _ in range(-i,i+1) if _ != 0) + ','.join(str(_) for _ in range(-i, i + 1) if _ != 0), ) self.assertTrue(I._is_sorted) @@ -4767,8 +4949,7 @@ def test_sorted_operations(self): I.add(0) self.assertFalse(I._is_sorted) self.assertEqual( - ','.join(str(_) for _ in I), - ','.join(str(_) for _ in range(-i,i+1)) + ','.join(str(_) for _ in I), ','.join(str(_) for _ in range(-i, i + 1)) ) self.assertTrue(I._is_sorted) @@ -4777,7 +4958,7 @@ def test_sorted_operations(self): self.assertTrue(I._is_sorted) self.assertEqual( ','.join(str(_) for _ in I), - ','.join(str(_) for _ in range(-i,i+1) if _ != 0) + ','.join(str(_) for _ in range(-i, i + 1) if _ != 0), ) self.assertTrue(I._is_sorted) @@ -4789,11 +4970,10 @@ def test_sorted_operations(self): # set_value() i = 1 - I.set_value({-i,0,i}) + I.set_value({-i, 0, i}) self.assertFalse(I._is_sorted) self.assertEqual( - ','.join(str(_) for _ in I), - ','.join(str(_) for _ in range(-i,i+1)) + ','.join(str(_) for _ in I), ','.join(str(_) for _ in range(-i, i + 1)) ) self.assertTrue(I._is_sorted) @@ -4822,7 +5002,7 @@ def test_sorted_operations(self): i += 1 I.update((i, -i)) self.assertFalse(I._is_sorted) - self.assertEqual(I.next(-i), -i+1) + self.assertEqual(I.next(-i), -i + 1) self.assertTrue(I._is_sorted) # nextw() @@ -4836,7 +5016,7 @@ def test_sorted_operations(self): i += 1 I.update((i, -i)) self.assertFalse(I._is_sorted) - self.assertEqual(I.prev(i), i-1) + self.assertEqual(I.prev(i), i - 1) self.assertTrue(I._is_sorted) # prevw() @@ -4850,37 +5030,134 @@ def test_sorted_operations(self): i += 1 I.update((i, -i)) self.assertFalse(I._is_sorted) - self.assertEqual(I[i+1], 0) + self.assertEqual(I[i + 1], 0) self.assertTrue(I._is_sorted) # ord() i += 1 I.update((i, -i)) self.assertFalse(I._is_sorted) - self.assertEqual(I.ord(0), i+1) + self.assertEqual(I.ord(0), i + 1) self.assertTrue(I._is_sorted) + def test_sorted_operations(self): + I = UnindexedComponent_set + self.assertEqual(len(I), 1) + self.assertEqual(I.dimen, 0) + + self.assertTrue(I.isdiscrete()) + self.assertTrue(I.isfinite()) + self.assertTrue(I.isordered()) + + with self.assertRaisesRegex(AttributeError, "has no attribute 'add'"): + I.add(1) + with self.assertRaisesRegex(AttributeError, "has no attribute 'set_value'"): + I.set_value(1) + with self.assertRaisesRegex(AttributeError, "has no attribute 'remove'"): + I.remove(val[0]) + with self.assertRaisesRegex(AttributeError, "has no attribute 'discard'"): + I.discard(val[0]) + with self.assertRaisesRegex(AttributeError, "has no attribute 'pop'"): + I.pop() + with self.assertRaisesRegex(AttributeError, "has no attribute 'clear'"): + I.clear() + with self.assertRaisesRegex(AttributeError, "has no attribute 'update'"): + I.update() + + self.assertEqual(str(I), "UnindexedComponent_set") + self.assertEqual(','.join(str(_) for _ in I.ranges()), "{None}") + + self.assertIsNone(I.construct()) + + val = I.data() + self.assertIs(type(val), tuple) + self.assertEqual(len(val), 1) + self.assertEqual(I.ordered_data(), val) + self.assertEqual(I.sorted_data(), val) + self.assertEqual(I.get(val[0], 100), val[0]) + self.assertEqual(I.get(999, 100), 100) + + self.assertEqual(tuple(I), val) + self.assertEqual(tuple(reversed(I)), val) + self.assertEqual(tuple(I.sorted_iter()), val) + self.assertEqual(tuple(I.ordered_iter()), val) + + self.assertEqual(I.bounds(), (None, None)) + self.assertEqual(I.get_interval(), (None, None, None)) + self.assertEqual(I.subsets(), [I]) + + self.assertEqual(I.first(), val[0]) + self.assertEqual(I.last(), val[0]) + self.assertEqual(I.at(1), val[0]) + with self.assertRaisesRegex( + IndexError, 'UnindexedComponent_set index out of range' + ): + I.at(999) + self.assertEqual(I.ord(val[0]), 1) + with self.assertRaisesRegex( + IndexError, + "Cannot identify position of 999 in Set UnindexedComponent_set: " + "item not in Set", + ): + I.ord(999) + + with self.assertRaisesRegex( + IndexError, 'Cannot advance past the end of the Set' + ): + I.next(val[0]) + with self.assertRaisesRegex( + IndexError, + "Cannot identify position of 999 in Set UnindexedComponent_set: " + "item not in Set", + ): + I.next(999) + self.assertEqual(I.nextw(val[0]), val[0]) + with self.assertRaisesRegex( + IndexError, + "Cannot identify position of 999 in Set UnindexedComponent_set: " + "item not in Set", + ): + I.nextw(999) + + with self.assertRaisesRegex( + IndexError, 'Cannot advance before the beginning of the Set' + ): + I.prev(val[0]) + with self.assertRaisesRegex( + IndexError, + "Cannot identify position of 999 in Set UnindexedComponent_set: " + "item not in Set", + ): + I.prev(999) + self.assertEqual(I.prevw(val[0]), val[0]) + with self.assertRaisesRegex( + IndexError, + "Cannot identify position of 999 in Set UnindexedComponent_set: " + "item not in Set", + ): + I.prevw(999) + def test_process_setarg(self): m = AbstractModel() - m.I = Set([1,2,3]) + m.I = Set([1, 2, 3]) self.assertTrue(m.I.index_set().is_constructed()) self.assertTrue(m.I.index_set().isordered()) i = m.create_instance() - self.assertEqual(i.I.index_set(), [1,2,3]) + self.assertEqual(i.I.index_set(), [1, 2, 3]) m = AbstractModel() - m.I = Set({1,2,3}) + m.I = Set({1, 2, 3}) self.assertTrue(m.I.index_set().is_constructed()) self.assertFalse(m.I.index_set().isordered()) i = m.create_instance() - self.assertEqual(i.I.index_set(), [1,2,3]) + self.assertEqual(i.I.index_set(), [1, 2, 3]) m = AbstractModel() m.I = Set(RangeSet(3)) self.assertTrue(m.I.index_set().is_constructed()) self.assertTrue(m.I.index_set().isordered()) i = m.create_instance() - self.assertEqual(i.I.index_set(), [1,2,3]) + self.assertEqual(i.I.index_set(), [1, 2, 3]) m = AbstractModel() m.p = Param(initialize=3) @@ -4888,23 +5165,24 @@ def test_process_setarg(self): self.assertFalse(m.I.index_set().is_constructed()) self.assertTrue(m.I.index_set().isordered()) i = m.create_instance() - self.assertEqual(i.I.index_set(), [1,2,3]) + self.assertEqual(i.I.index_set(), [1, 2, 3]) m = AbstractModel() - m.I = Set(lambda m: [1,2,3]) + m.I = Set(lambda m: [1, 2, 3]) self.assertFalse(m.I.index_set().is_constructed()) self.assertTrue(m.I.index_set().isordered()) i = m.create_instance() - self.assertEqual(i.I.index_set(), [1,2,3]) + self.assertEqual(i.I.index_set(), [1, 2, 3]) def _i_idx(m): - return [1,2,3] + return [1, 2, 3] + m = AbstractModel() m.I = Set(_i_idx) self.assertFalse(m.I.index_set().is_constructed()) self.assertTrue(m.I.index_set().isordered()) i = m.create_instance() - self.assertEqual(i.I.index_set(), [1,2,3]) + self.assertEqual(i.I.index_set(), [1, 2, 3]) # Note: generators are uncopyable, so we will mock up the same # behavior as above using an unconstructed block @@ -4912,32 +5190,33 @@ def _i_idx(): yield 1 yield 2 yield 3 + m = Block() m.I = Set(_i_idx()) self.assertFalse(m.I.index_set().is_constructed()) self.assertTrue(m.I.index_set().isordered()) i = ConcreteModel() i.m = m - self.assertEqual(i.m.I.index_set(), [1,2,3]) + self.assertEqual(i.m.I.index_set(), [1, 2, 3]) def test_set_options(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): + @set_options(domain=Integers) def Bindex(m): return range(5) - self.assertIn( - "The set_options decorator is deprecated", - output.getvalue()) + + self.assertIn("The set_options decorator is deprecated", output.getvalue()) m = ConcreteModel() - m.I = Set(initialize=[8,9]) + m.I = Set(initialize=[8, 9]) m.J = m.I.cross(Bindex) self.assertIs(m.J._sets[1]._domain, Integers) m.K = Set(Bindex) self.assertIs(m.K.index_set()._domain, Integers) - self.assertEqual(m.K.index_set(), [0,1,2,3,4]) + self.assertEqual(m.K.index_set(), [0, 1, 2, 3, 4]) def test_no_normalize_index(self): try: @@ -4947,11 +5226,11 @@ def test_no_normalize_index(self): m = ConcreteModel() m.I = Set() self.assertIs(m.I._dimen, UnknownSetDimen) - self.assertTrue(m.I.add((1,(2,3)))) + self.assertTrue(m.I.add((1, (2, 3)))) self.assertIs(m.I._dimen, None) - self.assertNotIn(((1,2),3), m.I) - self.assertIn((1,(2,3)), m.I) - self.assertNotIn((1,2,3), m.I) + self.assertNotIn(((1, 2), 3), m.I) + self.assertIn((1, (2, 3)), m.I) + self.assertNotIn((1, 2, 3), m.I) m.J = Set() self.assertTrue(m.J.add(1)) @@ -4968,17 +5247,17 @@ def test_no_normalize_index(self): m = ConcreteModel() m.I = Set() self.assertIs(m.I._dimen, UnknownSetDimen) - m.I.add((1,(2,3))) + m.I.add((1, (2, 3))) self.assertIs(m.I._dimen, 3) - self.assertIn(((1,2),3), m.I) - self.assertIn((1,(2,3)), m.I) - self.assertIn((1,2,3), m.I) + self.assertIn(((1, 2), 3), m.I) + self.assertIn((1, (2, 3)), m.I) + self.assertIn((1, 2, 3), m.I) m.J = Set() self.assertTrue(m.J.add(1)) self.assertIn(1, m.J) self.assertIn((1,), m.J) - self.assertFalse(m.J.add((1,))) # Not added! + self.assertFalse(m.J.add((1,))) # Not added! self.assertIn(1, m.J) self.assertIn((1,), m.J) self.assertTrue(m.J.add((2,))) @@ -5050,10 +5329,12 @@ def test_SetData(self): self.assertIs(type(s.difference(m.I)), SetDifference_InfiniteSet) self.assertIs(type(m.I.difference(s)), SetDifference_OrderedSet) - self.assertIs(type(s.symmetric_difference(m.I)), - SetSymmetricDifference_InfiniteSet) - self.assertIs(type(m.I.symmetric_difference(s)), - SetSymmetricDifference_InfiniteSet) + self.assertIs( + type(s.symmetric_difference(m.I)), SetSymmetricDifference_InfiniteSet + ) + self.assertIs( + type(m.I.symmetric_difference(s)), SetSymmetricDifference_InfiniteSet + ) self.assertIs(type(s.cross(m.I)), SetProduct_InfiniteSet) self.assertIs(type(m.I.cross(s)), SetProduct_InfiniteSet) @@ -5147,10 +5428,12 @@ class FiniteMixin(_FiniteSetMixin, _SetData): self.assertIs(type(s.difference(m.I)), SetDifference_FiniteSet) self.assertIs(type(m.I.difference(s)), SetDifference_OrderedSet) - self.assertIs(type(s.symmetric_difference(m.I)), - SetSymmetricDifference_FiniteSet) - self.assertIs(type(m.I.symmetric_difference(s)), - SetSymmetricDifference_FiniteSet) + self.assertIs( + type(s.symmetric_difference(m.I)), SetSymmetricDifference_FiniteSet + ) + self.assertIs( + type(m.I.symmetric_difference(s)), SetSymmetricDifference_FiniteSet + ) self.assertIs(type(s.cross(m.I)), SetProduct_FiniteSet) self.assertIs(type(m.I.cross(s)), SetProduct_FiniteSet) @@ -5170,7 +5453,6 @@ class FiniteMixin(_FiniteSetMixin, _SetData): self.assertIs(type(s * m.I), SetProduct_FiniteSet) self.assertIs(type(m.I * s), SetProduct_FiniteSet) - with self.assertRaises(DeveloperError): self.assertFalse(s < m.I) with self.assertRaises(DeveloperError): @@ -5205,7 +5487,7 @@ class FiniteMixin(_FiniteSetMixin, _SetData): with self.assertRaises(DeveloperError): s.sorted_data() - self.assertEqual(s.bounds(), (None,None)) + self.assertEqual(s.bounds(), (None, None)) def test_OrderedMixin(self): # This tests an anstract ordered set API @@ -5271,10 +5553,12 @@ class OrderedMixin(_OrderedSetMixin, _FiniteSetMixin, _SetData): self.assertIs(type(s.difference(m.I)), SetDifference_OrderedSet) self.assertIs(type(m.I.difference(s)), SetDifference_OrderedSet) - self.assertIs(type(s.symmetric_difference(m.I)), - SetSymmetricDifference_OrderedSet) - self.assertIs(type(m.I.symmetric_difference(s)), - SetSymmetricDifference_OrderedSet) + self.assertIs( + type(s.symmetric_difference(m.I)), SetSymmetricDifference_OrderedSet + ) + self.assertIs( + type(m.I.symmetric_difference(s)), SetSymmetricDifference_OrderedSet + ) self.assertIs(type(s.cross(m.I)), SetProduct_OrderedSet) self.assertIs(type(m.I.cross(s)), SetProduct_OrderedSet) @@ -5294,7 +5578,6 @@ class OrderedMixin(_OrderedSetMixin, _FiniteSetMixin, _SetData): self.assertIs(type(s * m.I), SetProduct_OrderedSet) self.assertIs(type(m.I * s), SetProduct_OrderedSet) - with self.assertRaises(DeveloperError): self.assertFalse(s < m.I) with self.assertRaises(DeveloperError): @@ -5329,7 +5612,7 @@ class OrderedMixin(_OrderedSetMixin, _FiniteSetMixin, _SetData): with self.assertRaises(DeveloperError): s.sorted_data() - self.assertEqual(s.bounds(), (None,None)) + self.assertEqual(s.bounds(), (None, None)) # # _OrderedSetMixin API @@ -5362,11 +5645,11 @@ class OrderedMixin(_OrderedSetMixin, _FiniteSetMixin, _SetData): class TestSetUtils(unittest.TestCase): def test_get_continuous_interval(self): - self.assertEqual(Reals.get_interval(), (None,None,0)) - self.assertEqual(PositiveReals.get_interval(), (0,None,0)) - self.assertEqual(NonNegativeReals.get_interval(), (0,None,0)) - self.assertEqual(NonPositiveReals.get_interval(), (None,0,0)) - self.assertEqual(NegativeReals.get_interval(), (None,0,0)) + self.assertEqual(Reals.get_interval(), (None, None, 0)) + self.assertEqual(PositiveReals.get_interval(), (0, None, 0)) + self.assertEqual(NonNegativeReals.get_interval(), (0, None, 0)) + self.assertEqual(NonPositiveReals.get_interval(), (None, 0, 0)) + self.assertEqual(NegativeReals.get_interval(), (None, 0, 0)) a = NonNegativeReals | NonPositiveReals self.assertEqual(a.get_interval(), (None, None, 0)) @@ -5377,7 +5660,7 @@ def test_get_continuous_interval(self): self.assertEqual(a.get_interval(), (None, None, None)) a = NegativeReals | PositiveReals | [0] self.assertEqual(a.get_interval(), (None, None, 0)) - a = NegativeReals | PositiveReals | RangeSet(0,5) + a = NegativeReals | PositiveReals | RangeSet(0, 5) self.assertEqual(a.get_interval(), (None, None, 0)) a = NegativeReals | RangeSet(-3, 3) @@ -5387,14 +5670,14 @@ def test_get_continuous_interval(self): a = PositiveReals | Binary self.assertEqual(a.get_interval(), (0, None, 0)) - a = RangeSet(1,10,0) | RangeSet(5,15,0) - self.assertEqual(a.get_interval(), (1,15,0)) - a = RangeSet(5,15,0) | RangeSet(1,10,0) - self.assertEqual(a.get_interval(), (1,15,0)) + a = RangeSet(1, 10, 0) | RangeSet(5, 15, 0) + self.assertEqual(a.get_interval(), (1, 15, 0)) + a = RangeSet(5, 15, 0) | RangeSet(1, 10, 0) + self.assertEqual(a.get_interval(), (1, 15, 0)) - a = RangeSet(5,15,0) | RangeSet(1,4,0) + a = RangeSet(5, 15, 0) | RangeSet(1, 4, 0) self.assertEqual(a.get_interval(), (1, 15, None)) - a = RangeSet(1,4,0) | RangeSet(5,15,0) + a = RangeSet(1, 4, 0) | RangeSet(5, 15, 0) self.assertEqual(a.get_interval(), (1, 15, None)) a = NegativeReals | Any @@ -5407,163 +5690,183 @@ def test_get_continuous_interval(self): self.assertEqual(a.get_interval(), (None, None, None)) def test_get_discrete_interval(self): - self.assertEqual(Integers.get_interval(), (None,None,1)) - self.assertEqual(PositiveIntegers.get_interval(), (1,None,1)) - self.assertEqual(NegativeIntegers.get_interval(), (None,-1,1)) - self.assertEqual(Binary.get_interval(), (0,1,1)) + self.assertEqual(Integers.get_interval(), (None, None, 1)) + self.assertEqual(PositiveIntegers.get_interval(), (1, None, 1)) + self.assertEqual(NegativeIntegers.get_interval(), (None, -1, 1)) + self.assertEqual(Binary.get_interval(), (0, 1, 1)) a = PositiveIntegers | NegativeIntegers self.assertEqual(a.get_interval(), (None, None, None)) a = NegativeIntegers | NonNegativeIntegers self.assertEqual(a.get_interval(), (None, None, 1)) - a = SetOf([1,3,5,6,4,2]) + a = SetOf([1, 3, 5, 6, 4, 2]) self.assertEqual(a.get_interval(), (1, 6, 1)) - a = SetOf([1,3,5,6,2]) + a = SetOf([1, 3, 5, 6, 2]) self.assertEqual(a.get_interval(), (1, 6, None)) - a = SetOf([1,3,5,6,4,2,'a']) + a = SetOf([1, 3, 5, 6, 4, 2, 'a']) self.assertEqual(a.get_interval(), (None, None, None)) a = SetOf([3]) - self.assertEqual(a.get_interval(), (3,3,0)) + self.assertEqual(a.get_interval(), (3, 3, 0)) - a = RangeSet(ranges=(NR(0,5,1), NR(5,10,1))) + a = RangeSet(ranges=(NR(0, 5, 1), NR(5, 10, 1))) self.assertEqual(a.get_interval(), (0, 10, 1)) - a = RangeSet(ranges=(NR(5,10,1), NR(0,5,1))) + a = RangeSet(ranges=(NR(5, 10, 1), NR(0, 5, 1))) self.assertEqual(a.get_interval(), (0, 10, 1)) - a = RangeSet(ranges=(NR(0,4,1), NR(5,10,1))) + a = RangeSet(ranges=(NR(0, 4, 1), NR(5, 10, 1))) self.assertEqual(a.get_interval(), (0, 10, 1)) - a = RangeSet(ranges=(NR(5,10,1), NR(0,4,1))) + a = RangeSet(ranges=(NR(5, 10, 1), NR(0, 4, 1))) self.assertEqual(a.get_interval(), (0, 10, 1)) - a = RangeSet(ranges=(NR(0,3,1), NR(5,10,1))) + a = RangeSet(ranges=(NR(0, 3, 1), NR(5, 10, 1))) self.assertEqual(a.get_interval(), (0, 10, None)) - a = RangeSet(ranges=(NR(5,10,1), NR(0,3,1))) + a = RangeSet(ranges=(NR(5, 10, 1), NR(0, 3, 1))) self.assertEqual(a.get_interval(), (0, 10, None)) - a = RangeSet(ranges=(NR(0,4,2), NR(6,10,2))) + a = RangeSet(ranges=(NR(0, 4, 2), NR(6, 10, 2))) self.assertEqual(a.get_interval(), (0, 10, 2)) - a = RangeSet(ranges=(NR(6,10,2), NR(0,4,2))) + a = RangeSet(ranges=(NR(6, 10, 2), NR(0, 4, 2))) self.assertEqual(a.get_interval(), (0, 10, 2)) - a = RangeSet(ranges=(NR(0,4,2), NR(5,10,2))) + a = RangeSet(ranges=(NR(0, 4, 2), NR(5, 10, 2))) self.assertEqual(a.get_interval(), (0, 9, None)) - a = RangeSet(ranges=(NR(5,10,2), NR(0,4,2))) + a = RangeSet(ranges=(NR(5, 10, 2), NR(0, 4, 2))) self.assertEqual(a.get_interval(), (0, 9, None)) - a = RangeSet(ranges=(NR(0,10,2), NR(0,10,3))) + a = RangeSet(ranges=(NR(0, 10, 2), NR(0, 10, 3))) self.assertEqual(a.get_interval(), (0, 10, None)) - a = RangeSet(ranges=(NR(0,10,3), NR(0,10,2))) + a = RangeSet(ranges=(NR(0, 10, 3), NR(0, 10, 2))) self.assertEqual(a.get_interval(), (0, 10, None)) - a = RangeSet(ranges=(NR(2,10,2), NR(0,12,4))) - self.assertEqual(a.get_interval(), (0,12,2)) - a = RangeSet(ranges=(NR(0,12,4), NR(2,10,2))) - self.assertEqual(a.get_interval(), (0,12,2)) + a = RangeSet(ranges=(NR(2, 10, 2), NR(0, 12, 4))) + self.assertEqual(a.get_interval(), (0, 12, 2)) + a = RangeSet(ranges=(NR(0, 12, 4), NR(2, 10, 2))) + self.assertEqual(a.get_interval(), (0, 12, 2)) # Even though the following are reasonable intervals, we # currently don't support resolving it: - a = RangeSet(ranges=(NR(0,10,2), NR(1,10,2))) + a = RangeSet(ranges=(NR(0, 10, 2), NR(1, 10, 2))) self.assertEqual(a.get_interval(), (0, 10, None)) - a = RangeSet(ranges=(NR(0,10,3), NR(1,10,3), NR(2,10,3))) + a = RangeSet(ranges=(NR(0, 10, 3), NR(1, 10, 3), NR(2, 10, 3))) self.assertEqual(a.get_interval(), (0, 10, None)) + def test_get_interval(self): + self.assertEqual(Any.get_interval(), (None, None, None)) + a = UnindexedComponent_set + self.assertEqual(a.get_interval(), (None, None, None)) + a = Set(initialize=['a']) + a.construct() + self.assertEqual(a.get_interval(), ('a', 'a', None)) + a = Set(initialize=[1]) + a.construct() + self.assertEqual(a.get_interval(), (1, 1, 0)) + class TestDeprecation(unittest.TestCase): def test_filter(self): m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) - m.J = m.I*m.I - m.K = Set(initialize=[1,2,3], filter=lambda m,i: i%2) + m.I = Set(initialize=[1, 2, 3]) + m.J = m.I * m.I + m.K = Set(initialize=[1, 2, 3], filter=lambda m, i: i % 2) output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertIsNone(m.I.filter) self.assertRegex( - output.getvalue(), - "^DEPRECATED: 'filter' is no longer a public attribute") + output.getvalue(), "^DEPRECATED: 'filter' is no longer a public attribute" + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertIsNone(m.J.filter) self.assertRegex( - output.getvalue(), - "^DEPRECATED: 'filter' is no longer a public attribute") + output.getvalue(), "^DEPRECATED: 'filter' is no longer a public attribute" + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertIsInstance(m.K.filter, IndexedCallInitializer) self.assertRegex( - output.getvalue(), - "^DEPRECATED: 'filter' is no longer a public attribute") + output.getvalue(), "^DEPRECATED: 'filter' is no longer a public attribute" + ) def test_virtual(self): m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) - m.J = m.I*m.I + m.I = Set(initialize=[1, 2, 3]) + m.J = m.I * m.I output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertFalse(m.I.virtual) self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'virtual' attribute is no longer supported") + "^DEPRECATED: The 'virtual' attribute is no longer supported", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertTrue(m.J.virtual) self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'virtual' attribute is no longer supported") + "^DEPRECATED: The 'virtual' attribute is no longer supported", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.J.virtual = True self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'virtual' attribute is no longer supported") + "^DEPRECATED: The 'virtual' attribute is no longer supported", + ) with self.assertRaisesRegex( - ValueError, - r"Attempting to set the \(deprecated\) 'virtual' attribute " - r"on J to an invalid value \(False\)"): + ValueError, + r"Attempting to set the \(deprecated\) 'virtual' attribute " + r"on J to an invalid value \(False\)", + ): m.J.virtual = False def test_concrete(self): m = ConcreteModel() - m.I = Set(initialize=[1,2,3]) - m.J = m.I*m.I + m.I = Set(initialize=[1, 2, 3]) + m.J = m.I * m.I output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertTrue(m.I.concrete) self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'concrete' attribute is no longer supported") + "^DEPRECATED: The 'concrete' attribute is no longer supported", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertTrue(m.J.concrete) self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'concrete' attribute is no longer supported") + "^DEPRECATED: The 'concrete' attribute is no longer supported", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertFalse(Reals.concrete) self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'concrete' attribute is no longer supported") + "^DEPRECATED: The 'concrete' attribute is no longer supported", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.J.concrete = True self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'concrete' attribute is no longer supported.") + "^DEPRECATED: The 'concrete' attribute is no longer supported.", + ) with self.assertRaisesRegex( - ValueError, - r"Attempting to set the \(deprecated\) 'concrete' " - r"attribute on J to an invalid value \(False\)"): + ValueError, + r"Attempting to set the \(deprecated\) 'concrete' " + r"attribute on J to an invalid value \(False\)", + ): m.J.concrete = False def test_ordered_attr(self): @@ -5575,71 +5878,75 @@ def test_ordered_attr(self): self.assertTrue(m.J.ordered) self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'ordered' attribute is no longer supported.") + "^DEPRECATED: The 'ordered' attribute is no longer supported.", + ) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): self.assertFalse(m.K.ordered) self.assertRegex( output.getvalue(), - "^DEPRECATED: The 'ordered' attribute is no longer supported.") + "^DEPRECATED: The 'ordered' attribute is no longer supported.", + ) def test_value_attr(self): m = ConcreteModel() - m.J = Set(ordered=True, initialize=[1,3,2]) + m.J = Set(ordered=True, initialize=[1, 3, 2]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): tmp = m.J.value self.assertIs(type(tmp), set) - self.assertEqual(tmp, set([1,3,2])) + self.assertEqual(tmp, set([1, 3, 2])) self.assertRegex( output.getvalue(), - r"^DEPRECATED: The 'value' attribute is deprecated. Use .data\(\)") + r"^DEPRECATED: The 'value' attribute is deprecated. Use .data\(\)", + ) def test_value_list_attr(self): m = ConcreteModel() - m.J = Set(ordered=True, initialize=[1,3,2]) + m.J = Set(ordered=True, initialize=[1, 3, 2]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): tmp = m.J.value_list self.assertIs(type(tmp), list) - self.assertEqual(tmp, list([1,3,2])) + self.assertEqual(tmp, list([1, 3, 2])) self.assertRegex( - output.getvalue().replace('\n',' '), + output.getvalue().replace('\n', ' '), r"^DEPRECATED: The 'value_list' attribute is deprecated. " - r"Use .ordered_data\(\)") + r"Use .ordered_data\(\)", + ) def test_check_values(self): m = ConcreteModel() - m.I = Set(ordered=True, initialize=[1,3,2]) + m.I = Set(ordered=True, initialize=[1, 3, 2]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): self.assertTrue(m.I.check_values()) self.assertRegex( output.getvalue(), - r"^DEPRECATED: check_values\(\) is deprecated: Sets only " - r"contain valid") + r"^DEPRECATED: check_values\(\) is deprecated: Sets only " r"contain valid", + ) - m.J = m.I*m.I + m.J = m.I * m.I output = StringIO() with LoggingIntercept(output, 'pyomo.core', logging.DEBUG): self.assertTrue(m.J.check_values()) self.assertRegex( - output.getvalue(), - r"^DEPRECATED: check_values\(\) is deprecated:") + output.getvalue(), r"^DEPRECATED: check_values\(\) is deprecated:" + ) # We historically supported check_values on indexed sets - m.K = Set([1,2], ordered=True, initialize=[1,3,2]) + m.K = Set([1, 2], ordered=True, initialize=[1, 3, 2]) output = StringIO() with LoggingIntercept(output, 'pyomo.core'): self.assertTrue(m.K.check_values()) self.assertRegex( output.getvalue(), - r"^DEPRECATED: check_values\(\) is deprecated: Sets only " - r"contain valid") + r"^DEPRECATED: check_values\(\) is deprecated: Sets only " r"contain valid", + ) def test_getitem(self): m = ConcreteModel() - m.I = Set(initialize=['a','b']) + m.I = Set(initialize=['a', 'b']) with LoggingIntercept() as OUT: self.assertIs(m.I[None], m.I) self.assertEqual(OUT.getvalue(), "") @@ -5649,24 +5956,26 @@ def test_getitem(self): self.assertRegex( OUT.getvalue().replace('\n', ' '), r"^DEPRECATED: Using __getitem__ to return a set value from " - r"its \(ordered\) position is deprecated. Please use at\(\)") + r"its \(ordered\) position is deprecated. Please use at\(\)", + ) with LoggingIntercept() as OUT: self.assertEqual(m.I.card(2), 'b') self.assertRegex( OUT.getvalue().replace('\n', ' '), r"^DEPRECATED: card\(\) was incorrectly added to the Set API. " - r"Please use at\(\)") - + r"Please use at\(\)", + ) class TestIssues(unittest.TestCase): def test_issue_43(self): model = ConcreteModel() - model.Jobs = Set(initialize=[0,1,2,3]) - model.Dummy = Set(model.Jobs, within=model.Jobs, - initialize=lambda m,i: range(i)) - model.Cars = Set(initialize=['a','b']) + model.Jobs = Set(initialize=[0, 1, 2, 3]) + model.Dummy = Set( + model.Jobs, within=model.Jobs, initialize=lambda m, i: range(i) + ) + model.Cars = Set(initialize=['a', 'b']) a = model.Cars * model.Dummy[1] self.assertEqual(len(a), 2) @@ -5690,12 +5999,13 @@ def test_issue_116(self): with LoggingIntercept(output, 'pyomo.core'): self.assertTrue(m.s in m.s) self.assertIn( - "Testing for set subsets with 'a in b' is deprecated.", - output.getvalue() + "Testing for set subsets with 'a in b' is deprecated.", output.getvalue() ) # Note that pypy raises a different exception from cpython - err = ("((unhashable type: 'OrderedScalarSet')" - "|('OrderedScalarSet' objects are unhashable))") + err = ( + "((unhashable type: 'OrderedScalarSet')" + "|('OrderedScalarSet' objects are unhashable))" + ) with self.assertRaisesRegex(TypeError, err): self.assertFalse(m.s in m.t) with self.assertRaisesRegex(TypeError, err): @@ -5705,15 +6015,15 @@ def test_issue_116(self): def test_issue_121(self): model = ConcreteModel() - model.s = Set(initialize=[1,2,3]) - self.assertEqual(list(model.s), [1,2,3]) - model.s = [3,9] - self.assertEqual(list(model.s), [3,9]) + model.s = Set(initialize=[1, 2, 3]) + self.assertEqual(list(model.s), [1, 2, 3]) + model.s = [3, 9] + self.assertEqual(list(model.s), [3, 9]) def test_issue_134(self): m = ConcreteModel() - m.I = Set(initialize=[1,2]) - m.J = Set(initialize=[4,5]) + m.I = Set(initialize=[1, 2]) + m.J = Set(initialize=[4, 5]) m.IJ = m.I * m.J self.assertEqual(len(m.IJ), 4) self.assertEqual(m.IJ.dimen, 2) @@ -5724,7 +6034,7 @@ def test_issue_134(self): self.assertEqual(m.IJ.dimen, 2) def test_issue_142(self): - CHOICES = [((1,2,3), 4,3), ((1,2,2), 4,3), ((1,3,3), 4,3)] + CHOICES = [((1, 2, 3), 4, 3), ((1, 2, 2), 4, 3), ((1, 3, 3), 4, 3)] try: _oldFlatten = normalize_index.flatten @@ -5734,19 +6044,22 @@ def test_issue_142(self): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.CHOICES = Set(initialize=CHOICES, dimen=3) - self.assertIn('Ignoring non-None dimen (3) for set CHOICES', - output.getvalue()) + self.assertIn( + 'Ignoring non-None dimen (3) for set CHOICES', output.getvalue() + ) self.assertEqual(m.CHOICES.dimen, None) m.x = Var(m.CHOICES) + def c_rule(m, a, b, c): - return m.x[a,b,c] == 0 + return m.x[a, b, c] == 0 + m.c = Constraint(m.CHOICES, rule=c_rule) output = StringIO() m.CHOICES.pprint(ostream=output) m.x.pprint(ostream=output) m.c.pprint(ostream=output) - ref=""" + ref = """ CHOICES : Size=1, Index=None, Ordered=Insertion Key : Dimen : Domain : Size : Members None : None : Any : 3 : {((1, 2, 3), 4, 3), ((1, 2, 2), 4, 3), ((1, 3, 3), 4, 3)} @@ -5768,18 +6081,20 @@ def c_rule(m, a, b, c): output = StringIO() with LoggingIntercept(output, 'pyomo.core'): m.CHOICES = Set(initialize=CHOICES) - self.assertEqual('',output.getvalue()) + self.assertEqual('', output.getvalue()) self.assertEqual(m.CHOICES.dimen, 5) m.x = Var(m.CHOICES) + def c_rule(m, a1, a2, a3, b, c): - return m.x[a1,a2,a3,b,c] == 0 + return m.x[a1, a2, a3, b, c] == 0 + m.c = Constraint(m.CHOICES, rule=c_rule) output = StringIO() m.CHOICES.pprint(ostream=output) m.x.pprint(ostream=output) m.c.pprint(ostream=output) - ref=""" + ref = """ CHOICES : Size=1, Index=None, Ordered=Insertion Key : Dimen : Domain : Size : Members None : 5 : Any : 3 : {(1, 2, 3, 4, 3), (1, 2, 2, 4, 3), (1, 3, 3, 4, 3)} @@ -5799,10 +6114,10 @@ def c_rule(m, a1, a2, a3, b, c): normalize_index.flatten = _oldFlatten def test_issue_148(self): - legal = set(['a','b','c']) + legal = set(['a', 'b', 'c']) m = ConcreteModel() - m.s = Set(initialize=['a','b'], within=legal) - self.assertEqual(set(m.s), {'a','b'}) + m.s = Set(initialize=['a', 'b'], within=legal) + self.assertEqual(set(m.s), {'a', 'b'}) with self.assertRaisesRegex(ValueError, 'Cannot add value d to Set s'): m.s.add('d') @@ -5811,8 +6126,8 @@ def test_issue_165(self): m.x = Var() m.y = Var(domain=Binary) m_binaries = [ - v for v in m.component_data_objects( - ctype=Var, descend_into=True) + v + for v in m.component_data_objects(ctype=Var, descend_into=True) if v.domain is Binary and not v.fixed ] self.assertEqual(len(m_binaries), 1) @@ -5820,8 +6135,8 @@ def test_issue_165(self): m2 = m.clone() m2_binaries = [ - v for v in m2.component_data_objects( - ctype=Var, descend_into=True) + v + for v in m2.component_data_objects(ctype=Var, descend_into=True) if v.domain is Binary and not v.fixed ] self.assertEqual(len(m2_binaries), 1) @@ -5829,8 +6144,8 @@ def test_issue_165(self): def test_issue_191(self): m = ConcreteModel() - m.s = Set(['s1','s2'], initialize=[1,2,3]) - m.s2 = Set(initialize=['a','b','c']) + m.s = Set(['s1', 's2'], initialize=[1, 2, 3]) + m.s2 = Set(initialize=['a', 'b', 'c']) m.p = Param(m.s['s1'], initialize=10) temp = m.s['s1'] * m.s2 @@ -5863,23 +6178,20 @@ def test_issue_358(self): def _test(b, x, y, z): print(x, y, z) + m.test = Block(m.set_mult, m.s3, rule=_test) self.assertEqual(len(m.test), 1) m.test2 = Block(m.set_mult, m.s3, rule=_test) self.assertEqual(len(m.test2), 1) def test_issue_637(self): - constraints = { - c for c in itertools.product(['constrA', 'constrB'], range(5)) - } - vars = { - v for v in itertools.product(['var1', 'var2', 'var3'], range(5)) - } + constraints = {c for c in itertools.product(['constrA', 'constrB'], range(5))} + vars = {v for v in itertools.product(['var1', 'var2', 'var3'], range(5))} matrix_coefficients = {m for m in itertools.product(constraints, vars)} m = ConcreteModel() m.IDX = Set(initialize=matrix_coefficients) m.Matrix = Param(m.IDX, default=0) - self.assertEqual(len(m.Matrix), 2*5*3*5) + self.assertEqual(len(m.Matrix), 2 * 5 * 3 * 5) def test_issue_758(self): m = ConcreteModel() @@ -5888,13 +6200,15 @@ def test_issue_758(self): self.assertEqual(m.I.next(1), 2) self.assertEqual(m.I.next(4), 5) with self.assertRaisesRegex( - IndexError, "Cannot advance past the end of the Set"): + IndexError, "Cannot advance past the end of the Set" + ): m.I.next(5) self.assertEqual(m.I.prev(2), 1) self.assertEqual(m.I.prev(5), 4) with self.assertRaisesRegex( - IndexError, "Cannot advance before the beginning of the Set"): + IndexError, "Cannot advance before the beginning of the Set" + ): m.I.prev(1) self.assertEqual(m.I.nextw(1), 2) @@ -5920,32 +6234,32 @@ def test_issue_835(self): out2 = StringIO() model.OS.pprint(ostream=out2) - self.assertEqual( - out1.getvalue().strip(), - out2.getvalue().strip()[1:], - ) + self.assertEqual(out1.getvalue().strip(), out2.getvalue().strip()[1:]) @unittest.skipIf(NamedTuple is None, "typing module not available") def test_issue_938(self): self.maxDiff = None NodeKey = NamedTuple('NodeKey', [('id', int)]) - ArcKey = NamedTuple('ArcKey', - [('node_from', NodeKey), ('node_to', NodeKey)]) + ArcKey = NamedTuple('ArcKey', [('node_from', NodeKey), ('node_to', NodeKey)]) + def build_model(): model = ConcreteModel() - model.node_keys = Set(doc='Set of nodes', - initialize=[NodeKey(0), NodeKey(1)]) - model.arc_keys = Set(doc='Set of arcs', - within=model.node_keys * model.node_keys, - initialize=[ - ArcKey(NodeKey(0), NodeKey(0)), - ArcKey(NodeKey(0), NodeKey(1)), - ]) - model.arc_variables = Var(model.arc_keys, - within=Binary) + model.node_keys = Set( + doc='Set of nodes', initialize=[NodeKey(0), NodeKey(1)] + ) + model.arc_keys = Set( + doc='Set of arcs', + within=model.node_keys * model.node_keys, + initialize=[ + ArcKey(NodeKey(0), NodeKey(0)), + ArcKey(NodeKey(0), NodeKey(1)), + ], + ) + model.arc_variables = Var(model.arc_keys, within=Binary) def objective_rule(model_arg): return sum(var for var in model_arg.arc_variables.values()) + model.obj = Objective(rule=objective_rule) return model @@ -6039,7 +6353,7 @@ def b_rule(m): def test_issue_1112(self): m = ConcreteModel() - m.a = Set(initialize=[1,2,3]) + m.a = Set(initialize=[1, 2, 3]) # vals = list(m.a.values()) self.assertEqual(len(vals), 1) diff --git a/pyomo/core/tests/unit/test_sets.py b/pyomo/core/tests/unit/test_sets.py index 11d2d773b77..47cc14ce181 100644 --- a/pyomo/core/tests/unit/test_sets.py +++ b/pyomo/core/tests/unit/test_sets.py @@ -33,33 +33,57 @@ from os.path import abspath, dirname from io import StringIO -currdir = dirname(abspath(__file__))+os.sep +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest import pyomo.core.base from pyomo.core.base.util import flatten_tuple -from pyomo.environ import (Set, SetOf, RangeSet, Param, ConcreteModel, - AbstractModel, Expression, EmptySet, - NonPositiveIntegers, - NonPositiveReals, PositiveReals, NegativeReals, - IntegerSet, NegativeIntegers, - PositiveIntegers, RealSet, BooleanSet, - IntegerInterval, RealInterval, Binary, - PercentFraction, UnitInterval, NonNegativeIntegers, - Integers, NonNegativeReals, Boolean, Reals, - Any, value, set_options, simple_set_rule) +from pyomo.environ import ( + Set, + SetOf, + RangeSet, + Param, + ConcreteModel, + AbstractModel, + Expression, + EmptySet, + NonPositiveIntegers, + NonPositiveReals, + PositiveReals, + NegativeReals, + IntegerSet, + NegativeIntegers, + PositiveIntegers, + RealSet, + BooleanSet, + IntegerInterval, + RealInterval, + Binary, + PercentFraction, + UnitInterval, + NonNegativeIntegers, + Integers, + NonNegativeReals, + Boolean, + Reals, + Any, + value, + set_options, + simple_set_rule, +) from pyomo.core.base.set import _AnySet, RangeDifferenceError _has_numpy = False try: import numpy + _has_numpy = True except: pass -class PyomoModel(unittest.TestCase): +class PyomoModel(unittest.TestCase): def setUp(self): self.model = AbstractModel() @@ -67,12 +91,11 @@ def tearDown(self): self.model = None self.instance = None - def construct(self,filename): + def construct(self, filename): self.instance = self.model.create_instance(filename) class SimpleSetA(PyomoModel): - def setUp(self): # # Create Model @@ -81,8 +104,8 @@ def setUp(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write( "data; set A := 1 3 5 7; end;\n" ) + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write("data; set A := 1 3 5 7; end;\n") OUTPUT.close() # # Create model instance @@ -91,60 +114,80 @@ def setUp(self): # # Misc datasets # - self.model.tmpset1 = Set(initialize=[1,3,5,7]) - self.model.tmpset2 = Set(initialize=[1,2,3,5,7]) - self.model.tmpset3 = Set(initialize=[2,3,5,7,9]) + self.model.tmpset1 = Set(initialize=[1, 3, 5, 7]) + self.model.tmpset2 = Set(initialize=[1, 2, 3, 5, 7]) + self.model.tmpset3 = Set(initialize=[2, 3, 5, 7, 9]) - self.model.setunion = Set(initialize=[1,2,3,5,7,9]) - self.model.setintersection = Set(initialize=[3,5,7]) - self.model.setxor = Set(initialize=[1,2,9]) + self.model.setunion = Set(initialize=[1, 2, 3, 5, 7, 9]) + self.model.setintersection = Set(initialize=[3, 5, 7]) + self.model.setxor = Set(initialize=[1, 2, 9]) self.model.setdiff = Set(initialize=[1]) - self.model.setmul = Set(initialize=[(1,2), (1,3), (1,5), (1,7), (1,9), - (3,2), (3,3), (3,5), (3,7), (3,9), - (5,2), (5,3), (5,5), (5,7), (5,9), - (7,2), (7,3), (7,5), (7,7), (7,9)]) - - self.instance = self.model.create_instance(currdir+"setA.dat") - - self.e1=1 - self.e2=2 - self.e3=3 - self.e4=4 - self.e5=5 - self.e6=6 + self.model.setmul = Set( + initialize=[ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (1, 9), + (3, 2), + (3, 3), + (3, 5), + (3, 7), + (3, 9), + (5, 2), + (5, 3), + (5, 5), + (5, 7), + (5, 9), + (7, 2), + (7, 3), + (7, 5), + (7, 7), + (7, 9), + ] + ) + + self.instance = self.model.create_instance(currdir + "setA.dat") + + self.e1 = 1 + self.e2 = 2 + self.e3 = 3 + self.e4 = 4 + self.e5 = 5 + self.e6 = 6 def tearDown(self): # # Remove Set 'A' data file # - if os.path.exists(currdir+"setA.dat"): - os.remove(currdir+"setA.dat") + if os.path.exists(currdir + "setA.dat"): + os.remove(currdir + "setA.dat") PyomoModel.tearDown(self) def test_len(self): """Check that a simple set of numeric elements has the right size""" - self.assertEqual( len(self.instance.A), 4) + self.assertEqual(len(self.instance.A), 4) def test_data(self): """Check that we can access the underlying set data""" - self.assertEqual( len(self.instance.A.data()), 4) + self.assertEqual(len(self.instance.A.data()), 4) def test_dim(self): """Check that a simple set has dimension zero for its indexing""" - self.assertEqual( self.instance.A.dim(), 0) + self.assertEqual(self.instance.A.dim(), 0) def test_clear(self): """Check the clear() method empties the set""" self.instance.A.clear() - self.assertEqual( len(self.instance.A), 0) + self.assertEqual(len(self.instance.A), 0) def test_virtual(self): """Check if this is not a virtual set""" - self.assertEqual( self.instance.A.virtual, False) + self.assertEqual(self.instance.A.virtual, False) def test_bounds(self): """Verify the bounds on this set""" - self.assertEqual( self.instance.A.bounds(), (1,7)) + self.assertEqual(self.instance.A.bounds(), (1, 7)) def test_check_values(self): """Check if the values added to this set are valid""" @@ -155,10 +198,10 @@ def test_check_values(self): def test_addValid(self): """Check that we can add valid set elements""" - self.instance.A.add(self.e2,self.e4) - self.assertEqual( len(self.instance.A), 6) - self.assertFalse( self.e2 not in self.instance.A, "Cannot find new element in A") - self.assertFalse( self.e4 not in self.instance.A, "Cannot find new element in A") + self.instance.A.add(self.e2, self.e4) + self.assertEqual(len(self.instance.A), 6) + self.assertFalse(self.e2 not in self.instance.A, "Cannot find new element in A") + self.assertFalse(self.e4 not in self.instance.A, "Cannot find new element in A") def test_addInvalid(self): """Check that we get an error when adding invalid set elements""" @@ -166,31 +209,31 @@ def test_addInvalid(self): # This verifies that by default, all set elements are valid. That # is, the default within is None # - self.assertEqual( self.instance.A.domain, Any) - self.instance.A.add('2','3','4') - self.assertFalse( '2' not in self.instance.A, "Found invalid new element in A") + self.assertEqual(self.instance.A.domain, Any) + self.instance.A.add('2', '3', '4') + self.assertFalse('2' not in self.instance.A, "Found invalid new element in A") def test_removeValid(self): """Check that we can remove a valid set element""" self.instance.A.remove(self.e3) - self.assertEqual( len(self.instance.A), 3) - self.assertFalse( 3 in self.instance.A, "Found element in A that we removed") + self.assertEqual(len(self.instance.A), 3) + self.assertFalse(3 in self.instance.A, "Found element in A that we removed") def test_removeInvalid(self): """Check that we fail to remove an invalid set element""" self.assertRaises(KeyError, self.instance.A.remove, 2) - self.assertEqual( len(self.instance.A), 4) + self.assertEqual(len(self.instance.A), 4) def test_discardValid(self): """Check that we can discard a valid set element""" self.instance.A.discard(self.e3) - self.assertEqual( len(self.instance.A), 3) - self.assertFalse( 3 in self.instance.A, "Found element in A that we removed") + self.assertEqual(len(self.instance.A), 3) + self.assertFalse(3 in self.instance.A, "Found element in A that we removed") def test_discardInvalid(self): """Check that we fail to remove an invalid set element without an exception""" self.instance.A.discard(self.e2) - self.assertEqual( len(self.instance.A), 4) + self.assertEqual(len(self.instance.A), 4) def test_iterator(self): """Check that we can iterate through the set""" @@ -200,115 +243,120 @@ def test_iterator(self): self.assertTrue( self.tmp == set(self.instance.A.data()), "Set values found by the iterator appear to be different from " - "the underlying set (%s) (%s)" % ( - str(self.tmp), str(self.instance.A.data()))) + "the underlying set (%s) (%s)" + % (str(self.tmp), str(self.instance.A.data())), + ) def test_eq1(self): """Various checks for set equality and inequality (1)""" - self.assertEqual( self.instance.A == self.instance.tmpset1, True) - self.assertEqual( self.instance.tmpset1 == self.instance.A, True) - self.assertEqual( self.instance.A != self.instance.tmpset1, False) - self.assertEqual( self.instance.tmpset1 != self.instance.A, False) + self.assertEqual(self.instance.A == self.instance.tmpset1, True) + self.assertEqual(self.instance.tmpset1 == self.instance.A, True) + self.assertEqual(self.instance.A != self.instance.tmpset1, False) + self.assertEqual(self.instance.tmpset1 != self.instance.A, False) def test_eq2(self): """Various checks for set equality and inequality (2)""" - self.assertEqual( self.instance.A == self.instance.tmpset2, False) - self.assertEqual( self.instance.tmpset2 == self.instance.A, False) - self.assertEqual( self.instance.A != self.instance.tmpset2, True) - self.assertEqual( self.instance.tmpset2 != self.instance.A, True) + self.assertEqual(self.instance.A == self.instance.tmpset2, False) + self.assertEqual(self.instance.tmpset2 == self.instance.A, False) + self.assertEqual(self.instance.A != self.instance.tmpset2, True) + self.assertEqual(self.instance.tmpset2 != self.instance.A, True) def test_le1(self): """Various checks for set subset (1)""" - self.assertEqual( self.instance.A < self.instance.tmpset1, False) - self.assertEqual( self.instance.A <= self.instance.tmpset1, True) - self.assertEqual( self.instance.A > self.instance.tmpset1, False) - self.assertEqual( self.instance.A >= self.instance.tmpset1, True) - self.assertEqual( self.instance.tmpset1 < self.instance.A, False) - self.assertEqual( self.instance.tmpset1 <= self.instance.A, True) - self.assertEqual( self.instance.tmpset1 > self.instance.A, False) - self.assertEqual( self.instance.tmpset1 >= self.instance.A, True) + self.assertEqual(self.instance.A < self.instance.tmpset1, False) + self.assertEqual(self.instance.A <= self.instance.tmpset1, True) + self.assertEqual(self.instance.A > self.instance.tmpset1, False) + self.assertEqual(self.instance.A >= self.instance.tmpset1, True) + self.assertEqual(self.instance.tmpset1 < self.instance.A, False) + self.assertEqual(self.instance.tmpset1 <= self.instance.A, True) + self.assertEqual(self.instance.tmpset1 > self.instance.A, False) + self.assertEqual(self.instance.tmpset1 >= self.instance.A, True) def test_le2(self): """Various checks for set subset (2)""" - self.assertEqual( self.instance.A < self.instance.tmpset2, True) - self.assertEqual( self.instance.A <= self.instance.tmpset2, True) - self.assertEqual( self.instance.A > self.instance.tmpset2, False) - self.assertEqual( self.instance.A >= self.instance.tmpset2, False) - self.assertEqual( self.instance.tmpset2 < self.instance.A, False) - self.assertEqual( self.instance.tmpset2 <= self.instance.A, False) - self.assertEqual( self.instance.tmpset2 > self.instance.A, True) - self.assertEqual( self.instance.tmpset2 >= self.instance.A, True) + self.assertEqual(self.instance.A < self.instance.tmpset2, True) + self.assertEqual(self.instance.A <= self.instance.tmpset2, True) + self.assertEqual(self.instance.A > self.instance.tmpset2, False) + self.assertEqual(self.instance.A >= self.instance.tmpset2, False) + self.assertEqual(self.instance.tmpset2 < self.instance.A, False) + self.assertEqual(self.instance.tmpset2 <= self.instance.A, False) + self.assertEqual(self.instance.tmpset2 > self.instance.A, True) + self.assertEqual(self.instance.tmpset2 >= self.instance.A, True) def test_le3(self): """Various checks for set subset (3)""" - self.assertEqual( self.instance.A < self.instance.tmpset3, False) - self.assertEqual( self.instance.A <= self.instance.tmpset3, False) - self.assertEqual( self.instance.A > self.instance.tmpset3, False) - self.assertEqual( self.instance.A >= self.instance.tmpset3, False) - self.assertEqual( self.instance.tmpset3 < self.instance.A, False) - self.assertEqual( self.instance.tmpset3 <= self.instance.A, False) - self.assertEqual( self.instance.tmpset3 > self.instance.A, False) - self.assertEqual( self.instance.tmpset3 >= self.instance.A, False) + self.assertEqual(self.instance.A < self.instance.tmpset3, False) + self.assertEqual(self.instance.A <= self.instance.tmpset3, False) + self.assertEqual(self.instance.A > self.instance.tmpset3, False) + self.assertEqual(self.instance.A >= self.instance.tmpset3, False) + self.assertEqual(self.instance.tmpset3 < self.instance.A, False) + self.assertEqual(self.instance.tmpset3 <= self.instance.A, False) + self.assertEqual(self.instance.tmpset3 > self.instance.A, False) + self.assertEqual(self.instance.tmpset3 >= self.instance.A, False) def test_contains(self): """Various checks for contains() method""" - self.assertEqual( self.e1 in self.instance.A, True) - self.assertEqual( self.e2 in self.instance.A, False) - self.assertEqual( '2' in self.instance.A, False) + self.assertEqual(self.e1 in self.instance.A, True) + self.assertEqual(self.e2 in self.instance.A, False) + self.assertEqual('2' in self.instance.A, False) def test_or(self): """Check that set union works""" self.instance.tmp = self.instance.A | self.instance.tmpset3 self.instance.tmp.construct() - self.assertEqual( self.instance.tmp == self.instance.setunion, True) + self.assertEqual(self.instance.tmp == self.instance.setunion, True) def test_and(self): """Check that set intersection works""" self.instance.tmp = self.instance.A & self.instance.tmpset3 self.instance.tmp.construct() - self.assertEqual( self.instance.tmp == self.instance.setintersection, True) + self.assertEqual(self.instance.tmp == self.instance.setintersection, True) def test_xor(self): """Check that set exclusive or works""" self.instance.tmp = self.instance.A ^ self.instance.tmpset3 self.instance.tmp.construct() - self.assertEqual( self.instance.tmp == self.instance.setxor, True) + self.assertEqual(self.instance.tmp == self.instance.setxor, True) def test_diff(self): """Check that set difference works""" self.instance.tmp = self.instance.A - self.instance.tmpset3 self.instance.tmp.construct() - self.assertEqual( self.instance.tmp == self.instance.setdiff, True) + self.assertEqual(self.instance.tmp == self.instance.setdiff, True) def test_mul(self): """Check that set cross-product works""" self.instance.tmp = self.instance.A * self.instance.tmpset3 self.instance.tmp.construct() - self.assertEqual( self.instance.tmp == self.instance.setmul, True) + self.assertEqual(self.instance.tmp == self.instance.setmul, True) def test_filter_constructor(self): - """ Check that sets can filter out unwanted elements """ + """Check that sets can filter out unwanted elements""" + def evenFilter(model, el): return el % 2 == 0 - self.instance.tmp = Set(initialize=range(0,10), filter=evenFilter) - #self.instance.tmp.construct() - self.assertEqual(sorted([x for x in self.instance.tmp]), [0,2,4,6,8]) + + self.instance.tmp = Set(initialize=range(0, 10), filter=evenFilter) + # self.instance.tmp.construct() + self.assertEqual(sorted([x for x in self.instance.tmp]), [0, 2, 4, 6, 8]) def test_filter_attribute(self): - """ Check that sets can filter out unwanted elements """ + """Check that sets can filter out unwanted elements""" + def evenFilter(model, el): return el % 2 == 0 + # Note: we cannot use the (concrete) instance here: the set # would be immediately constructed and would never see the # filter m = AbstractModel() - m.tmp = Set(initialize=range(0,10), filter=evenFilter) - #m.tmp.filter = evenFilter + m.tmp = Set(initialize=range(0, 10), filter=evenFilter) + # m.tmp.filter = evenFilter m.tmp.construct() - self.assertEqual(sorted([x for x in m.tmp]), [0,2,4,6,8]) + self.assertEqual(sorted([x for x in m.tmp]), [0, 2, 4, 6, 8]) -class SimpleSetAordered(SimpleSetA): +class SimpleSetAordered(SimpleSetA): def setUp(self): # # Create Model @@ -317,7 +365,7 @@ def setUp(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := 1 3 5 7; end;\n") OUTPUT.close() # @@ -327,56 +375,75 @@ def setUp(self): # # Misc datasets # - self.model.tmpset1 = Set(initialize=[1,3,5,7]) - self.model.tmpset2 = Set(initialize=[1,2,3,5,7]) - self.model.tmpset3 = Set(initialize=[2,3,5,7,9]) + self.model.tmpset1 = Set(initialize=[1, 3, 5, 7]) + self.model.tmpset2 = Set(initialize=[1, 2, 3, 5, 7]) + self.model.tmpset3 = Set(initialize=[2, 3, 5, 7, 9]) - self.model.setunion = Set(initialize=[1,2,3,5,7,9]) - self.model.setintersection = Set(initialize=[3,5,7]) - self.model.setxor = Set(initialize=[1,2,9]) + self.model.setunion = Set(initialize=[1, 2, 3, 5, 7, 9]) + self.model.setintersection = Set(initialize=[3, 5, 7]) + self.model.setxor = Set(initialize=[1, 2, 9]) self.model.setdiff = Set(initialize=[1]) - self.model.setmul = Set(initialize=[(1,2), (1,3), (1,5), (1,7), (1,9), - (3,2), (3,3), (3,5), (3,7), (3,9), - (5,2), (5,3), (5,5), (5,7), (5,9), - (7,2), (7,3), (7,5), (7,7), (7,9)]) - - self.instance = self.model.create_instance(currdir+"setA.dat") - - self.e1=1 - self.e2=2 - self.e3=3 - self.e4=4 - self.e5=5 - self.e6=6 + self.model.setmul = Set( + initialize=[ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (1, 9), + (3, 2), + (3, 3), + (3, 5), + (3, 7), + (3, 9), + (5, 2), + (5, 3), + (5, 5), + (5, 7), + (5, 9), + (7, 2), + (7, 3), + (7, 5), + (7, 7), + (7, 9), + ] + ) + + self.instance = self.model.create_instance(currdir + "setA.dat") + + self.e1 = 1 + self.e2 = 2 + self.e3 = 3 + self.e4 = 4 + self.e5 = 5 + self.e6 = 6 def test_first(self): """Check that we can get the 'first' value in the set""" self.tmp = self.instance.A.first() - self.assertNotEqual( self.tmp, None ) - self.assertEqual( self.tmp, 1 ) + self.assertNotEqual(self.tmp, None) + self.assertEqual(self.tmp, 1) def test_ordered(self): - tmp=[] + tmp = [] for val in self.instance.A: tmp.append(val) - self.assertEqual( tmp, [1,3,5,7] ) + self.assertEqual(tmp, [1, 3, 5, 7]) def test_getitem(self): - self.assertEqual( self.instance.A[1], 1 ) - self.assertEqual( self.instance.A[2], 3 ) - self.assertEqual( self.instance.A[3], 5 ) - self.assertEqual( self.instance.A[4], 7 ) - self.assertEqual( self.instance.A[-1], 7 ) - self.assertEqual( self.instance.A[-2], 5 ) - self.assertEqual( self.instance.A[-3], 3 ) - self.assertEqual( self.instance.A[-4], 1 ) - self.assertRaises( IndexError, self.instance.A.__getitem__, 5) - self.assertRaises( IndexError, self.instance.A.__getitem__, 0) - self.assertRaises( IndexError, self.instance.A.__getitem__, -5) + self.assertEqual(self.instance.A[1], 1) + self.assertEqual(self.instance.A[2], 3) + self.assertEqual(self.instance.A[3], 5) + self.assertEqual(self.instance.A[4], 7) + self.assertEqual(self.instance.A[-1], 7) + self.assertEqual(self.instance.A[-2], 5) + self.assertEqual(self.instance.A[-3], 3) + self.assertEqual(self.instance.A[-4], 1) + self.assertRaises(IndexError, self.instance.A.__getitem__, 5) + self.assertRaises(IndexError, self.instance.A.__getitem__, 0) + self.assertRaises(IndexError, self.instance.A.__getitem__, -5) class TestRangeSet(SimpleSetA): - def setUp(self): # # Create Model @@ -385,32 +452,56 @@ def setUp(self): # # Create model instance # - self.model.A = RangeSet(1,5) + self.model.A = RangeSet(1, 5) # # Misc datasets # - self.model.tmpset1 = Set(initialize=[1,2,3,4,5]) - self.model.tmpset2 = Set(initialize=[1,2,3,4,5,7]) - self.model.tmpset3 = Set(initialize=[2,3,5,7,9]) - - self.model.setunion = Set(initialize=[1,2,3,4,5,7,9]) - self.model.setintersection = Set(initialize=[2,3,5]) - self.model.setxor = Set(initialize=[1,4,7,9]) - self.model.setdiff = Set(initialize=[1,4]) - self.model.setmul = Set(initialize=[(1,2), (1,3), (1,5), (1,7), (1,9), - (2,2), (2,3), (2,5), (2,7), (2,9), - (3,2), (3,3), (3,5), (3,7), (3,9), - (4,2), (4,3), (4,5), (4,7), (4,9), - (5,2), (5,3), (5,5), (5,7), (5,9)]) + self.model.tmpset1 = Set(initialize=[1, 2, 3, 4, 5]) + self.model.tmpset2 = Set(initialize=[1, 2, 3, 4, 5, 7]) + self.model.tmpset3 = Set(initialize=[2, 3, 5, 7, 9]) + + self.model.setunion = Set(initialize=[1, 2, 3, 4, 5, 7, 9]) + self.model.setintersection = Set(initialize=[2, 3, 5]) + self.model.setxor = Set(initialize=[1, 4, 7, 9]) + self.model.setdiff = Set(initialize=[1, 4]) + self.model.setmul = Set( + initialize=[ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (1, 9), + (2, 2), + (2, 3), + (2, 5), + (2, 7), + (2, 9), + (3, 2), + (3, 3), + (3, 5), + (3, 7), + (3, 9), + (4, 2), + (4, 3), + (4, 5), + (4, 7), + (4, 9), + (5, 2), + (5, 3), + (5, 5), + (5, 7), + (5, 9), + ] + ) self.instance = self.model.create_instance() - self.e1=1 - self.e2=2 - self.e3=3 - self.e4=4 - self.e5=5 - self.e6=6 + self.e1 = 1 + self.e2 = 2 + self.e3 = 3 + self.e4 = 4 + self.e5 = 5 + self.e6 = 6 def test_clear(self): """Check the clear() method empties the set""" @@ -425,27 +516,27 @@ def test_clear(self): def test_virtual(self): """Check if this is a virtual set""" - self.assertEqual( self.instance.A.virtual, True) + self.assertEqual(self.instance.A.virtual, True) def test_ordered_getitem(self): """Check if this is a virtual set""" - self.assertEqual( self.instance.A[1], 1) - self.assertEqual( self.instance.A[2], 2) - self.assertEqual( self.instance.A[3], 3) - self.assertEqual( self.instance.A[4], 4) - self.assertEqual( self.instance.A[5], 5) - self.assertEqual( self.instance.A[-1], 5) - self.assertEqual( self.instance.A[-2], 4) - self.assertEqual( self.instance.A[-3], 3) - self.assertEqual( self.instance.A[-4], 2) - self.assertEqual( self.instance.A[-5], 1) - self.assertRaises( IndexError, self.instance.A.__getitem__, 6) - self.assertRaises( IndexError, self.instance.A.__getitem__, 0) - self.assertRaises( IndexError, self.instance.A.__getitem__, -6) + self.assertEqual(self.instance.A[1], 1) + self.assertEqual(self.instance.A[2], 2) + self.assertEqual(self.instance.A[3], 3) + self.assertEqual(self.instance.A[4], 4) + self.assertEqual(self.instance.A[5], 5) + self.assertEqual(self.instance.A[-1], 5) + self.assertEqual(self.instance.A[-2], 4) + self.assertEqual(self.instance.A[-3], 3) + self.assertEqual(self.instance.A[-4], 2) + self.assertEqual(self.instance.A[-5], 1) + self.assertRaises(IndexError, self.instance.A.__getitem__, 6) + self.assertRaises(IndexError, self.instance.A.__getitem__, 0) + self.assertRaises(IndexError, self.instance.A.__getitem__, -6) def test_bounds(self): """Verify the bounds on this set""" - self.assertEqual( self.instance.A.bounds(), (1,5)) + self.assertEqual(self.instance.A.bounds(), (1, 5)) def test_addValid(self): """Check that we can add valid set elements""" @@ -459,34 +550,36 @@ def test_addInvalid(self): # is, the default within is None # with self.assertRaises(AttributeError): - self.instance.A.add('2','3','4') - self.assertFalse( '2' in self.instance.A, - "Value we attempted to add is not in A") + self.instance.A.add('2', '3', '4') + self.assertFalse( + '2' in self.instance.A, "Value we attempted to add is not in A" + ) def test_removeValid(self): """Check that we can remove a valid set element""" with self.assertRaises(AttributeError): self.instance.A.remove(self.e3) - self.assertEqual( len(self.instance.A), 5) - self.assertTrue( self.e3 in self.instance.A, "Element is still in A") + self.assertEqual(len(self.instance.A), 5) + self.assertTrue(self.e3 in self.instance.A, "Element is still in A") def test_removeInvalid(self): """Check that we fail to remove an invalid set element""" with self.assertRaises(AttributeError): self.instance.A.remove(6) - self.assertEqual( len(self.instance.A), 5) + self.assertEqual(len(self.instance.A), 5) def test_remove(self): - """ Check that the elements are properly removed by .remove """ + """Check that the elements are properly removed by .remove""" pass def test_discardValid(self): """Check that we can discard a valid set element""" with self.assertRaises(AttributeError): self.instance.A.discard(self.e3) - self.assertEqual( len(self.instance.A), 5) - self.assertTrue( self.e3 in self.instance.A, - "Found element in A that attemped to discard") + self.assertEqual(len(self.instance.A), 5) + self.assertTrue( + self.e3 in self.instance.A, "Found element in A that attempted to discard" + ) def test_discardInvalid(self): """Check that we fail to remove an invalid set element without an exception""" @@ -494,79 +587,106 @@ def test_discardInvalid(self): def test_contains(self): """Various checks for contains() method""" - self.assertEqual( self.e1 in self.instance.A, True) - self.assertEqual( self.e2 in self.instance.A, True) - self.assertEqual( '2' in self.instance.A, False) + self.assertEqual(self.e1 in self.instance.A, True) + self.assertEqual(self.e2 in self.instance.A, True) + self.assertEqual('2' in self.instance.A, False) def test_len(self): """Check that a simple set of numeric elements has the right size""" - self.assertEqual( len(self.instance.A), 5) + self.assertEqual(len(self.instance.A), 5) def test_data(self): """Check that we can access the underlying set data""" - self.assertEqual( len(self.instance.A.data()), 5) + self.assertEqual(len(self.instance.A.data()), 5) def test_filter_constructor(self): - """ Check that RangeSets can filter out unwanted elements """ + """Check that RangeSets can filter out unwanted elements""" + def evenFilter(model, el): return el % 2 == 0 - self.instance.tmp = RangeSet(0,10, filter=evenFilter) - #self.instance.tmp.construct() - self.assertEqual(sorted([x for x in self.instance.tmp]), [0,2,4,6,8,10]) + + self.instance.tmp = RangeSet(0, 10, filter=evenFilter) + # self.instance.tmp.construct() + self.assertEqual(sorted([x for x in self.instance.tmp]), [0, 2, 4, 6, 8, 10]) def test_filter_attribute(self): - """ Check that RangeSets can filter out unwanted elements """ + """Check that RangeSets can filter out unwanted elements""" + def evenFilter(model, el): return el % 2 == 0 - self.instance.tmp = RangeSet(0,10, filter=evenFilter) - #self.instance.tmp.filter = evenFilter + + self.instance.tmp = RangeSet(0, 10, filter=evenFilter) + # self.instance.tmp.filter = evenFilter self.instance.tmp.construct() - self.assertEqual(sorted([x for x in self.instance.tmp]), [0,2,4,6,8,10]) + self.assertEqual(sorted([x for x in self.instance.tmp]), [0, 2, 4, 6, 8, 10]) class TestRangeSet2(TestRangeSet): - def setUp(self): # # Create Model # PyomoModel.setUp(self) + # # Create model instance # def filter_fn(model, val): return (val >= 1) and (val <= 5) - self.model.A = RangeSet(1,10, filter=filter_fn) + self.model.A = RangeSet(1, 10, filter=filter_fn) # # Misc datasets # - self.model.tmpset1 = Set(initialize=[1,2,3,4,5]) - self.model.tmpset2 = Set(initialize=[1,2,3,4,5,7]) - self.model.tmpset3 = Set(initialize=[2,3,5,7,9]) - - self.model.setunion = Set(initialize=[1,2,3,4,5,7,9]) - self.model.setintersection = Set(initialize=[2,3,5]) - self.model.setxor = Set(initialize=[1,4,7,9]) - self.model.setdiff = Set(initialize=[1,4]) - self.model.setmul = Set(initialize=[(1,2), (1,3), (1,5), (1,7), (1,9), - (2,2), (2,3), (2,5), (2,7), (2,9), - (3,2), (3,3), (3,5), (3,7), (3,9), - (4,2), (4,3), (4,5), (4,7), (4,9), - (5,2), (5,3), (5,5), (5,7), (5,9)]) + self.model.tmpset1 = Set(initialize=[1, 2, 3, 4, 5]) + self.model.tmpset2 = Set(initialize=[1, 2, 3, 4, 5, 7]) + self.model.tmpset3 = Set(initialize=[2, 3, 5, 7, 9]) + + self.model.setunion = Set(initialize=[1, 2, 3, 4, 5, 7, 9]) + self.model.setintersection = Set(initialize=[2, 3, 5]) + self.model.setxor = Set(initialize=[1, 4, 7, 9]) + self.model.setdiff = Set(initialize=[1, 4]) + self.model.setmul = Set( + initialize=[ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (1, 9), + (2, 2), + (2, 3), + (2, 5), + (2, 7), + (2, 9), + (3, 2), + (3, 3), + (3, 5), + (3, 7), + (3, 9), + (4, 2), + (4, 3), + (4, 5), + (4, 7), + (4, 9), + (5, 2), + (5, 3), + (5, 5), + (5, 7), + (5, 9), + ] + ) self.instance = self.model.create_instance() - self.e1=1 - self.e2=2 - self.e3=3 - self.e4=4 - self.e5=5 - self.e6=6 + self.e1 = 1 + self.e2 = 2 + self.e3 = 3 + self.e4 = 4 + self.e5 = 5 + self.e6 = 6 class TestRangeSet3(PyomoModel): - def setUp(self): # # Create Model @@ -575,115 +695,135 @@ def setUp(self): # # Create model instance # - self.model.A = RangeSet(1.0,5.0,0.8) + self.model.A = RangeSet(1.0, 5.0, 0.8) # # Misc datasets # - self.model.tmpset1 = Set(initialize=[1,2,3,4,5]) - self.model.tmpset2 = Set(initialize=[1,2,3,4,5,7]) - self.model.tmpset3 = Set(initialize=[2,3,5,7,9]) - - self.model.setunion = Set(initialize=[1,2,3,4,5,7,9]) - self.model.setintersection = Set(initialize=[2,3,5]) - self.model.setxor = Set(initialize=[1,4,7,9]) - self.model.setdiff = Set(initialize=[1,4]) - self.model.setmul = Set(initialize=[(1,2), (1,3), (1,5), (1,7), (1,9), - (2,2), (2,3), (2,5), (2,7), (2,9), - (3,2), (3,3), (3,5), (3,7), (3,9), - (4,2), (4,3), (4,5), (4,7), (4,9), - (5,2), (5,3), (5,5), (5,7), (5,9)]) + self.model.tmpset1 = Set(initialize=[1, 2, 3, 4, 5]) + self.model.tmpset2 = Set(initialize=[1, 2, 3, 4, 5, 7]) + self.model.tmpset3 = Set(initialize=[2, 3, 5, 7, 9]) + + self.model.setunion = Set(initialize=[1, 2, 3, 4, 5, 7, 9]) + self.model.setintersection = Set(initialize=[2, 3, 5]) + self.model.setxor = Set(initialize=[1, 4, 7, 9]) + self.model.setdiff = Set(initialize=[1, 4]) + self.model.setmul = Set( + initialize=[ + (1, 2), + (1, 3), + (1, 5), + (1, 7), + (1, 9), + (2, 2), + (2, 3), + (2, 5), + (2, 7), + (2, 9), + (3, 2), + (3, 3), + (3, 5), + (3, 7), + (3, 9), + (4, 2), + (4, 3), + (4, 5), + (4, 7), + (4, 9), + (5, 2), + (5, 3), + (5, 5), + (5, 7), + (5, 9), + ] + ) self.instance = self.model.create_instance() - self.e1=1 - self.e2=2 - self.e3=3 - self.e4=4 - self.e5=5 - self.e6=6 + self.e1 = 1 + self.e2 = 2 + self.e3 = 3 + self.e4 = 4 + self.e5 = 5 + self.e6 = 6 def test_bounds(self): """Verify the bounds on this set""" - self.assertEqual( self.instance.A.bounds(), (1,5)) + self.assertEqual(self.instance.A.bounds(), (1, 5)) class TestRangeSet_AltArgs(PyomoModel): - def test_ImmutableParams(self): model = ConcreteModel() model.lb = Param(initialize=1) model.ub = Param(initialize=5) model.A = RangeSet(model.lb, model.ub) - self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) ) + self.assertEqual(set(model.A.data()), set([1, 2, 3, 4, 5])) def test_MutableParams(self): model = ConcreteModel() model.lb = Param(initialize=1, mutable=True) model.ub = Param(initialize=5, mutable=True) model.A = RangeSet(model.lb, model.ub) - self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) ) + self.assertEqual(set(model.A.data()), set([1, 2, 3, 4, 5])) model.lb = 2 model.ub = 4 model.B = RangeSet(model.lb, model.ub) # Note: rangesets are constant -- even if the mutable param # under the hood changes - self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) ) - self.assertEqual( set(model.B.data()), set([2,3,4]) ) + self.assertEqual(set(model.A.data()), set([1, 2, 3, 4, 5])) + self.assertEqual(set(model.B.data()), set([2, 3, 4])) def test_Expressions(self): model = ConcreteModel() model.p = Param(initialize=1, mutable=True) - model.lb = Expression(expr=model.p*2-1) - model.ub = Expression(expr=model.p*5) + model.lb = Expression(expr=model.p * 2 - 1) + model.ub = Expression(expr=model.p * 5) model.A = RangeSet(model.lb, model.ub) - self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) ) + self.assertEqual(set(model.A.data()), set([1, 2, 3, 4, 5])) model.p = 2 model.B = RangeSet(model.lb, model.ub) # Note: rangesets are constant -- even if the mutable param # under the hood changes - self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) ) - self.assertEqual( set(model.B.data()), set([3,4,5,6,7,8,9,10]) ) - + self.assertEqual(set(model.A.data()), set([1, 2, 3, 4, 5])) + self.assertEqual(set(model.B.data()), set([3, 4, 5, 6, 7, 8, 9, 10])) class TestRangeSetMisc(unittest.TestCase): - def test_constructor1(self): - a=RangeSet(10) + a = RangeSet(10) a.construct() - tmp=[] + tmp = [] for i in a: tmp.append(i) - self.assertEqual(tmp, list(range(1,11))) - self.assertEqual( a.bounds(), (1,10)) - + self.assertEqual(tmp, list(range(1, 11))) + self.assertEqual(a.bounds(), (1, 10)) def test_constructor2(self): - a=RangeSet(1,10,2) + a = RangeSet(1, 10, 2) a.construct() - tmp=[] + tmp = [] for i in a: tmp.append(i) - self.assertEqual(tmp, list(range(1,11,2))) - self.assertEqual( a.bounds(), (1,9)) + self.assertEqual(tmp, list(range(1, 11, 2))) + self.assertEqual(a.bounds(), (1, 9)) def test_constructor3(self): - model=AbstractModel() - model.a=Param(initialize=1) - model.b=Param(initialize=2) - model.c=Param(initialize=10) - model.d=RangeSet( model.a*model.a, model.c*model.a, model.a*model.b) - instance=model.create_instance() - tmp=[] + model = AbstractModel() + model.a = Param(initialize=1) + model.b = Param(initialize=2) + model.c = Param(initialize=10) + model.d = RangeSet(model.a * model.a, model.c * model.a, model.a * model.b) + instance = model.create_instance() + tmp = [] for i in instance.d: tmp.append(i) - self.assertEqual(tmp, list(range(1,11,2))) - self.assertEqual( instance.d.bounds(), (1,9)) + self.assertEqual(tmp, list(range(1, 11, 2))) + self.assertEqual(instance.d.bounds(), (1, 9)) -class SimpleSetB(SimpleSetA): +class SimpleSetB(SimpleSetA): def setUp(self): # # Create Model @@ -692,7 +832,7 @@ def setUp(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := A1 A3 A5 A7; end;\n") OUTPUT.close() # @@ -704,32 +844,52 @@ def setUp(self): # # Misc datasets # - self.model.tmpset1 = Set(initialize=['A1','A3','A5','A7']) - self.model.tmpset2 = Set(initialize=['A1','A2','A3','A5','A7']) - self.model.tmpset3 = Set(initialize=['A2','A3','A5','A7','A9']) + self.model.tmpset1 = Set(initialize=['A1', 'A3', 'A5', 'A7']) + self.model.tmpset2 = Set(initialize=['A1', 'A2', 'A3', 'A5', 'A7']) + self.model.tmpset3 = Set(initialize=['A2', 'A3', 'A5', 'A7', 'A9']) - self.model.setunion = Set(initialize=['A1','A2','A3','A5','A7','A9']) - self.model.setintersection = Set(initialize=['A3','A5','A7']) - self.model.setxor = Set(initialize=['A1','A2','A9']) + self.model.setunion = Set(initialize=['A1', 'A2', 'A3', 'A5', 'A7', 'A9']) + self.model.setintersection = Set(initialize=['A3', 'A5', 'A7']) + self.model.setxor = Set(initialize=['A1', 'A2', 'A9']) self.model.setdiff = Set(initialize=['A1']) - self.model.setmul = Set(initialize=[('A1','A2'), ('A1','A3'), ('A1','A5'), ('A1','A7'), ('A1','A9'), - ('A3','A2'), ('A3','A3'), ('A3','A5'), ('A3','A7'), ('A3','A9'), - ('A5','A2'), ('A5','A3'), ('A5','A5'), ('A5','A7'), ('A5','A9'), - ('A7','A2'), ('A7','A3'), ('A7','A5'), ('A7','A7'), ('A7','A9')]) - - self.instance = self.model.create_instance(currdir+"setA.dat") - self.e1='A1' - self.e2='A2' - self.e3='A3' - self.e4='A4' - self.e5='A5' - self.e6='A6' + self.model.setmul = Set( + initialize=[ + ('A1', 'A2'), + ('A1', 'A3'), + ('A1', 'A5'), + ('A1', 'A7'), + ('A1', 'A9'), + ('A3', 'A2'), + ('A3', 'A3'), + ('A3', 'A5'), + ('A3', 'A7'), + ('A3', 'A9'), + ('A5', 'A2'), + ('A5', 'A3'), + ('A5', 'A5'), + ('A5', 'A7'), + ('A5', 'A9'), + ('A7', 'A2'), + ('A7', 'A3'), + ('A7', 'A5'), + ('A7', 'A7'), + ('A7', 'A9'), + ] + ) + + self.instance = self.model.create_instance(currdir + "setA.dat") + self.e1 = 'A1' + self.e2 = 'A2' + self.e3 = 'A3' + self.e4 = 'A4' + self.e5 = 'A5' + self.e6 = 'A6' def test_bounds(self): - self.assertEqual( self.instance.A.bounds(), ('A1','A7')) + self.assertEqual(self.instance.A.bounds(), ('A1', 'A7')) -class SimpleSetC(SimpleSetA): +class SimpleSetC(SimpleSetA): def setUp(self): # # Create Model @@ -738,7 +898,7 @@ def setUp(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := (A1,1) (A3,1) (A5,1) (A7,1); end;\n") OUTPUT.close() # @@ -750,36 +910,71 @@ def setUp(self): # # Misc datasets # - self.model.tmpset1 = Set(initialize=[('A1',1), ('A3',1), ('A5',1), ('A7',1)]) - self.model.tmpset2 = Set(initialize=[('A1',1),('A2',1),('A3',1),('A5',1),('A7',1)]) - self.model.tmpset3 = Set(initialize=[('A2',1),('A3',1),('A5',1),('A7',1),('A9',1)]) - - self.model.setunion = Set(initialize=[('A1',1),('A2',1),('A3',1),('A5',1),('A7',1),('A9',1)]) - self.model.setintersection = Set(initialize=[('A3',1),('A5',1),('A7',1)]) - self.model.setxor = Set(initialize=[('A1',1),('A2',1),('A9',1)]) - self.model.setdiff = Set(initialize=[('A1',1)]) - self.model.setmul = Set(initialize=[(('A1',1,'A2',1)), (('A1',1,'A3',1)), (('A1',1,'A5',1)), (('A1',1,'A7',1)), (('A1',1,'A9',1)), - (('A3',1,'A2',1)), (('A3',1,'A3',1)), (('A3',1,'A5',1)), (('A3',1,'A7',1)), (('A3',1,'A9',1)), - (('A5',1,'A2',1)), (('A5',1,'A3',1)), (('A5',1,'A5',1)), (('A5',1,'A7',1)), (('A5',1,'A9',1)), - (('A7',1,'A2',1)), (('A7',1,'A3',1)), (('A7',1,'A5',1)), (('A7',1,'A7',1)), (('A7',1,'A9',1))]) - - self.instance = self.model.create_instance(currdir+"setA.dat") - self.e1=('A1',1) - self.e2=('A2',1) - self.e3=('A3',1) - self.e4=('A4',1) - self.e5=('A5',1) - self.e6=('A6',1) + self.model.tmpset1 = Set( + initialize=[('A1', 1), ('A3', 1), ('A5', 1), ('A7', 1)] + ) + self.model.tmpset2 = Set( + initialize=[('A1', 1), ('A2', 1), ('A3', 1), ('A5', 1), ('A7', 1)] + ) + self.model.tmpset3 = Set( + initialize=[('A2', 1), ('A3', 1), ('A5', 1), ('A7', 1), ('A9', 1)] + ) + + self.model.setunion = Set( + initialize=[ + ('A1', 1), + ('A2', 1), + ('A3', 1), + ('A5', 1), + ('A7', 1), + ('A9', 1), + ] + ) + self.model.setintersection = Set(initialize=[('A3', 1), ('A5', 1), ('A7', 1)]) + self.model.setxor = Set(initialize=[('A1', 1), ('A2', 1), ('A9', 1)]) + self.model.setdiff = Set(initialize=[('A1', 1)]) + self.model.setmul = Set( + initialize=[ + (('A1', 1, 'A2', 1)), + (('A1', 1, 'A3', 1)), + (('A1', 1, 'A5', 1)), + (('A1', 1, 'A7', 1)), + (('A1', 1, 'A9', 1)), + (('A3', 1, 'A2', 1)), + (('A3', 1, 'A3', 1)), + (('A3', 1, 'A5', 1)), + (('A3', 1, 'A7', 1)), + (('A3', 1, 'A9', 1)), + (('A5', 1, 'A2', 1)), + (('A5', 1, 'A3', 1)), + (('A5', 1, 'A5', 1)), + (('A5', 1, 'A7', 1)), + (('A5', 1, 'A9', 1)), + (('A7', 1, 'A2', 1)), + (('A7', 1, 'A3', 1)), + (('A7', 1, 'A5', 1)), + (('A7', 1, 'A7', 1)), + (('A7', 1, 'A9', 1)), + ] + ) + + self.instance = self.model.create_instance(currdir + "setA.dat") + self.e1 = ('A1', 1) + self.e2 = ('A2', 1) + self.e3 = ('A3', 1) + self.e4 = ('A4', 1) + self.e5 = ('A5', 1) + self.e6 = ('A6', 1) def tearDown(self): # # Remove Set 'A' data file # - os.remove(currdir+"setA.dat") + os.remove(currdir + "setA.dat") PyomoModel.tearDown(self) def test_bounds(self): - self.assertEqual( self.instance.A.bounds(), (('A1',1), ('A7',1))) + self.assertEqual(self.instance.A.bounds(), (('A1', 1), ('A7', 1))) def test_addInvalid(self): """Check that we get an error when adding invalid set elements""" @@ -787,19 +982,19 @@ def test_addInvalid(self): # This verifies that by default, all set elements are valid. That # is, the default within is None # - self.assertEqual( self.instance.A.domain, Any) + self.assertEqual(self.instance.A.domain, Any) try: - self.instance.A.add('2','3','4') + self.instance.A.add('2', '3', '4') except ValueError: pass else: self.fail("fail test_addInvalid") - self.assertFalse( '2' in self.instance.A, "Found invalid new element in A") - self.instance.A.add(('2','3')) + self.assertFalse('2' in self.instance.A, "Found invalid new element in A") + self.instance.A.add(('2', '3')) + @unittest.skipIf(not _has_numpy, "Numpy is not installed") class SimpleSetNumpy(SimpleSetA): - def setUp(self): # # Create Model @@ -808,7 +1003,7 @@ def setUp(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := 1.0 3 5.0 7.0; end;\n") OUTPUT.close() # @@ -820,45 +1015,64 @@ def setUp(self): # # Misc datasets # - self.model.tmpset1 = Set(initialize=[1.0,3.0,5,7]) - self.model.tmpset2 = Set(initialize=[1.0,2,3.0,5,7]) - self.model.tmpset3 = Set(initialize=[2,3.0,5,7,9.1]) + self.model.tmpset1 = Set(initialize=[1.0, 3.0, 5, 7]) + self.model.tmpset2 = Set(initialize=[1.0, 2, 3.0, 5, 7]) + self.model.tmpset3 = Set(initialize=[2, 3.0, 5, 7, 9.1]) - self.model.setunion = Set(initialize=[1.0,2,3.0,5,7,9.1]) - self.model.setintersection = Set(initialize=[3.0,5,7]) - self.model.setxor = Set(initialize=[1.0,2,9.1]) + self.model.setunion = Set(initialize=[1.0, 2, 3.0, 5, 7, 9.1]) + self.model.setintersection = Set(initialize=[3.0, 5, 7]) + self.model.setxor = Set(initialize=[1.0, 2, 9.1]) self.model.setdiff = Set(initialize=[1.0]) - self.model.setmul = Set(initialize=[(1.0,2), (1.0,3.0), (1.0,5), (1.0,7), (1.0,9.1), - (3.0,2), (3.0,3.0), (3.0,5), (3.0,7), (3.0,9.1), - (5,2), (5,3.0), (5,5), (5,7), (5,9.1), - (7,2), (7,3.0), (7,5), (7,7), (7,9.1)]) - - self.instance = self.model.create_instance(currdir+"setA.dat") - self.e1=numpy.bool_(1) - self.e2=numpy.int_(2) - self.e3=numpy.float_(3.0) - self.e4=numpy.int_(4) - self.e5=numpy.int_(5) - self.e6=numpy.int_(6) + self.model.setmul = Set( + initialize=[ + (1.0, 2), + (1.0, 3.0), + (1.0, 5), + (1.0, 7), + (1.0, 9.1), + (3.0, 2), + (3.0, 3.0), + (3.0, 5), + (3.0, 7), + (3.0, 9.1), + (5, 2), + (5, 3.0), + (5, 5), + (5, 7), + (5, 9.1), + (7, 2), + (7, 3.0), + (7, 5), + (7, 7), + (7, 9.1), + ] + ) + + self.instance = self.model.create_instance(currdir + "setA.dat") + self.e1 = numpy.bool_(1) + self.e2 = numpy.int_(2) + self.e3 = numpy.float_(3.0) + self.e4 = numpy.int_(4) + self.e5 = numpy.int_(5) + self.e6 = numpy.int_(6) def test_numpy_bool(self): model = ConcreteModel() model.A = Set(initialize=[numpy.bool_(False), numpy.bool_(True)]) - self.assertEqual( model.A.bounds(), (0,1)) + self.assertEqual(model.A.bounds(), (0, 1)) def test_numpy_int(self): model = ConcreteModel() model.A = Set(initialize=[numpy.int_(1.0), numpy.int_(0.0)]) - self.assertEqual( model.A.bounds(), (0,1)) + self.assertEqual(model.A.bounds(), (0, 1)) def test_numpy_float(self): model = ConcreteModel() model.A = Set(initialize=[numpy.float_(1.0), numpy.float_(0.0)]) - self.assertEqual( model.A.bounds(), (0,1)) + self.assertEqual(model.A.bounds(), (0, 1)) class ArraySet(PyomoModel): - def setUp(self): # # Create Model @@ -867,8 +1081,10 @@ def setUp(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write("data; set Z := A C; set A[A] := 1 3 5 7; set A[C] := 3 5 7 9; end;\n") + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write( + "data; set Z := A C; set A[A] := 1 3 5 7; set A[C] := 3 5 7 9; end;\n" + ) OUTPUT.close() # # Create model instance @@ -884,28 +1100,28 @@ def setUp(self): self.model.tmpset2 = Set() self.model.tmpset3 = Set() - self.model.S = RangeSet(0,5) - self.model.T = RangeSet(0,5) - self.model.R = RangeSet(0,3) - self.model.Q_a = Set(initialize=[1,3,5,7]) - self.model.Q_c = Set(initialize=[3,5,7,9]) + self.model.S = RangeSet(0, 5) + self.model.T = RangeSet(0, 5) + self.model.R = RangeSet(0, 3) + self.model.Q_a = Set(initialize=[1, 3, 5, 7]) + self.model.Q_c = Set(initialize=[3, 5, 7, 9]) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.e1=('A1',1) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.e1 = ('A1', 1) def Xtest_bounds(self): - self.assertEqual( self.instance.A.bounds(), None) + self.assertEqual(self.instance.A.bounds(), None) def test_getitem(self): """Check the access to items""" try: - tmp=[] + tmp = [] for val in self.instance.A['A']: tmp.append(val) tmp.sort() except: self.fail("Problems getting a valid set from a set array") - self.assertEqual( tmp, [1,3,5,7]) + self.assertEqual(tmp, [1, 3, 5, 7]) try: tmp = self.instance.A['D'] except KeyError: @@ -915,10 +1131,10 @@ def test_getitem(self): def test_setitem(self): """Check the access to items""" - self.model.Z = Set(initialize=['A','C']) - self.model.A = Set(self.model.Z,initialize={'A':[1]}) + self.model.Z = Set(initialize=['A', 'C']) + self.model.A = Set(self.model.Z, initialize={'A': [1]}) self.instance = self.model.create_instance() - tmp=[1,6,9] + tmp = [1, 6, 9] self.instance.A['A'] = tmp self.instance.A['C'] = tmp @@ -931,7 +1147,7 @@ def test_setitem(self): def test_keys(self): """Check the keys for the array""" - tmp=list(self.instance.A.keys()) + tmp = list(self.instance.A.keys()) tmp.sort() self.assertEqual(tmp, ['A', 'C']) @@ -955,13 +1171,13 @@ def test_data(self): def test_dim(self): """Check that a simple set has dimension zero for its indexing""" - self.assertEqual( self.instance.A.dim(), 1) + self.assertEqual(self.instance.A.dim(), 1) def test_clear(self): """Check the clear() method empties the set""" self.instance.A.clear() for key in self.instance.A: - self.assertEqual( len(self.instance.A[key]), 0) + self.assertEqual(len(self.instance.A[key]), 0) def test_virtual(self): """Check if this is not a virtual set""" @@ -971,8 +1187,7 @@ def test_virtual(self): # pass # else: # self.fail("Set arrays do not have a virtual data element") - with self.assertRaisesRegex( - AttributeError, ".*no attribute 'virtual'"): + with self.assertRaisesRegex(AttributeError, ".*no attribute 'virtual'"): self.instance.A.virtual def test_check_values(self): @@ -980,7 +1195,7 @@ def test_check_values(self): # # This should not throw an exception here # - self.assertTrue( self.instance.A.check_values() ) + self.assertTrue(self.instance.A.check_values()) def test_first(self): """Check that we can get the 'first' value in the set""" @@ -1007,24 +1222,24 @@ def test_iterator(self): tmp = 0 for key in self.instance.A: tmp += len(self.instance.A[key]) - self.assertEqual( tmp, 8) + self.assertEqual(tmp, 8) def test_eq1(self): - """ Various checks for set equality and inequality (1) """ + """Various checks for set equality and inequality (1)""" self.assertEqual(self.instance.A != self.instance.tmpset1, True) self.assertEqual(self.instance.tmpset1 != self.instance.A, True) self.assertEqual(self.instance.A == self.instance.tmpset1, False) self.assertEqual(self.instance.tmpset1 == self.instance.A, False) def test_eq2(self): - """ Various checks for set equality and inequality (2) """ + """Various checks for set equality and inequality (2)""" self.assertEqual(self.instance.A == self.instance.tmpset2, False) self.assertEqual(self.instance.tmpset2 == self.instance.A, False) self.assertEqual(self.instance.A != self.instance.tmpset2, True) self.assertEqual(self.instance.tmpset2 != self.instance.A, True) def test_eq3(self): - """ Various checks for set equality and inequality (3) """ + """Various checks for set equality and inequality (3)""" # Each test should be done with the arguments on each side to check # for commutativity @@ -1060,60 +1275,64 @@ def test_eq3(self): def test_contains(self): """Various checks for contains() method""" tmp = self.e1 in self.instance.A - self.assertEqual( tmp, False ) + self.assertEqual(tmp, False) def test_or(self): """Check that set union works""" with self.assertRaisesRegex( - TypeError, r'Cannot apply a Set operator to an indexed Set ' - r'component \(A\)'): + TypeError, + r'Cannot apply a Set operator to an indexed Set ' r'component \(A\)', + ): self.instance.A | self.instance.tmpset3 def test_and(self): """Check that set intersection works""" with self.assertRaisesRegex( - TypeError, r'Cannot apply a Set operator to an indexed Set ' - r'component \(A\)'): + TypeError, + r'Cannot apply a Set operator to an indexed Set ' r'component \(A\)', + ): self.instance.A & self.instance.tmpset3 def test_xor(self): """Check that set exclusive or works""" with self.assertRaisesRegex( - TypeError, r'Cannot apply a Set operator to an indexed Set ' - r'component \(A\)'): + TypeError, + r'Cannot apply a Set operator to an indexed Set ' r'component \(A\)', + ): self.instance.A ^ self.instance.tmpset3 def test_diff(self): """Check that set difference works""" with self.assertRaisesRegex( - TypeError, r'Cannot apply a Set operator to an indexed Set ' - r'component \(A\)'): + TypeError, + r'Cannot apply a Set operator to an indexed Set ' r'component \(A\)', + ): self.instance.A - self.instance.tmpset3 def test_mul(self): """Check that set cross-product works""" with self.assertRaisesRegex( - TypeError, r'Cannot apply a Set operator to an indexed Set ' - r'component \(A\)'): + TypeError, + r'Cannot apply a Set operator to an indexed Set ' r'component \(A\)', + ): self.instance.A * self.instance.tmpset3 def test_override_values(self): m = ConcreteModel() - m.I = Set([1,2,3]) - m.I[1] = [1,2,3] - self.assertEqual(sorted(m.I[1]), [1,2,3]) - m.I[1] = [4,5,6] - self.assertEqual(sorted(m.I[1]), [4,5,6]) + m.I = Set([1, 2, 3]) + m.I[1] = [1, 2, 3] + self.assertEqual(sorted(m.I[1]), [1, 2, 3]) + m.I[1] = [4, 5, 6] + self.assertEqual(sorted(m.I[1]), [4, 5, 6]) - m.J = Set([1,2,3], ordered=True) - m.J[1] = [1,3,2] - self.assertEqual(list(m.J[1]), [1,3,2]) - m.J[1] = [5,4,6] - self.assertEqual(list(m.J[1]), [5,4,6]) + m.J = Set([1, 2, 3], ordered=True) + m.J[1] = [1, 3, 2] + self.assertEqual(list(m.J[1]), [1, 3, 2]) + m.J[1] = [5, 4, 6] + self.assertEqual(list(m.J[1]), [5, 4, 6]) class ArraySet2(PyomoModel): - def setUp(self): # # Create Model @@ -1122,15 +1341,17 @@ def setUp(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write("data; set Z := A C; set Y := 1 2 ; set A[A,1] := 1 3 5 7; set A[C,2] := 3 5 7 9; end;") + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write( + "data; set Z := A C; set Y := 1 2 ; set A[A,1] := 1 3 5 7; set A[C,2] := 3 5 7 9; end;" + ) OUTPUT.close() # # Create model instance # self.model.Z = Set() self.model.Y = Set() - self.model.A = Set(self.model.Z,self.model.Y) + self.model.A = Set(self.model.Z, self.model.Y) # # Debugging # @@ -1140,31 +1361,30 @@ def setUp(self): self.model.tmpset2 = Set() self.model.tmpset3 = Set() - self.instance = self.model.create_instance(currdir+"setA.dat") - self.e1=('A1',1) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.e1 = ('A1', 1) def test_bounds(self): - self.assertEqual( self.instance.A['A',1].bounds(), (1,7)) + self.assertEqual(self.instance.A['A', 1].bounds(), (1, 7)) def test_getitem(self): """Check the access to items""" try: - tmp=[] - for val in self.instance.A['A',1]: + tmp = [] + for val in self.instance.A['A', 1]: tmp.append(val) tmp.sort() except: self.fail("Problems getting a valid subsetset from a set array") - self.assertEqual( tmp, [1,3,5,7]) + self.assertEqual(tmp, [1, 3, 5, 7]) try: - tmp = self.instance.A['A',2] + tmp = self.instance.A['A', 2] except: - self.fail( "Problems getting a valid uninitialized subset " - "from a set array" ) + self.fail("Problems getting a valid uninitialized subset from a set array") try: - tmp = self.instance.A['A',3] + tmp = self.instance.A['A', 3] except KeyError: pass else: @@ -1173,11 +1393,11 @@ def test_getitem(self): def Xtest_setitem(self): """Check the access to items""" try: - self.model.Y = Set(initialize=[1,2]) - self.model.Z = Set(initialize=['A','C']) - self.model.A = Set(self.model.Z,self.model.Y,initialize={'A':[1]}) + self.model.Y = Set(initialize=[1, 2]) + self.model.Z = Set(initialize=['A', 'C']) + self.model.A = Set(self.model.Z, self.model.Y, initialize={'A': [1]}) self.instance = self.model.create_instance() - tmp=[1,6,9] + tmp = [1, 6, 9] self.instance.A['A'] = tmp self.instance.A['C'] = tmp except: @@ -1191,7 +1411,7 @@ def Xtest_setitem(self): def Xtest_keys(self): """Check the keys for the array""" - tmp=self.instance.A.keys() + tmp = self.instance.A.keys() tmp.sort() self.assertEqual(tmp, ['A', 'C']) @@ -1215,17 +1435,17 @@ def Xtest_data(self): def Xtest_dim(self): """Check that a simple set has dimension zero for its indexing""" - self.assertEqual( self.instance.A.dim(), 1) + self.assertEqual(self.instance.A.dim(), 1) def Xtest_clear(self): """Check the clear() method empties the set""" self.instance.A.clear() for key in self.instance.A: - self.assertEqual( len(self.instance.A[key]), 0) + self.assertEqual(len(self.instance.A[key]), 0) def Xtest_virtual(self): """Check if this is not a virtual set""" - self.assertEqual( self.instance.A.virtual, False) + self.assertEqual(self.instance.A.virtual, False) def Xtest_check_values(self): """Check if the values added to this set are valid""" @@ -1259,15 +1479,15 @@ def Xtest_iterator(self): tmp = 0 for key in self.instance.A: tmp += len(self.instance.A[key]) - self.assertEqual( tmp, 8) + self.assertEqual(tmp, 8) def Xtest_eq1(self): """Various checks for set equality and inequality (1)""" try: - self.assertEqual( self.instance.A == self.instance.tmpset1, True) - self.assertEqual( self.instance.tmpset1 == self.instance.A, True) - self.assertEqual( self.instance.A != self.instance.tmpset1, False) - self.assertEqual( self.instance.tmpset1 != self.instance.A, False) + self.assertEqual(self.instance.A == self.instance.tmpset1, True) + self.assertEqual(self.instance.tmpset1 == self.instance.A, True) + self.assertEqual(self.instance.A != self.instance.tmpset1, False) + self.assertEqual(self.instance.tmpset1 != self.instance.A, False) except TypeError: pass else: @@ -1276,10 +1496,10 @@ def Xtest_eq1(self): def Xtest_eq2(self): """Various checks for set equality and inequality (2)""" try: - self.assertEqual( self.instance.A == self.instance.tmpset2, False) - self.assertEqual( self.instance.tmpset2 == self.instance.A, False) - self.assertEqual( self.instance.A != self.instance.tmpset2, True) - self.assertEqual( self.instance.tmpset2 != self.instance.A, True) + self.assertEqual(self.instance.A == self.instance.tmpset2, False) + self.assertEqual(self.instance.tmpset2 == self.instance.A, False) + self.assertEqual(self.instance.A != self.instance.tmpset2, True) + self.assertEqual(self.instance.tmpset2 != self.instance.A, True) except TypeError: pass else: @@ -1288,7 +1508,7 @@ def Xtest_eq2(self): def Xtest_contains(self): """Various checks for contains() method""" tmp = self.e1 in self.instance.A - self.assertEqual( tmp, False ) + self.assertEqual(tmp, False) def Xtest_or(self): """Check that set union works""" @@ -1337,11 +1557,10 @@ def Xtest_mul(self): class TestRealSet(unittest.TestCase): - def test_bounds(self): x = RealSet() self.assertEqual(x.bounds(), (None, None)) - x = RealSet(bounds=(1,2)) + x = RealSet(bounds=(1, 2)) self.assertEqual(x.bounds(), (1, 2)) def test_inequality_comparison_fails(self): @@ -1366,8 +1585,8 @@ def test_name(self): # After the set rewrite, RealSet is implemented on top of the # Reals global set # - #self.assertEqual(x.name, None) - #self.assertTrue('RealSet' in str(x)) + # self.assertEqual(x.name, None) + # self.assertTrue('RealSet' in str(x)) self.assertEqual(x.name, 'Reals') self.assertEqual('Reals', str(x)) x = RealSet(name="x") @@ -1503,7 +1722,7 @@ def test_RealInterval(self): self.assertTrue(-2.2 in x) self.assertTrue(-10 in x) - x = RealInterval(bounds=(-1,1)) + x = RealInterval(bounds=(-1, 1)) self.assertEqual(x.name, "'RealInterval(-1, 1)'") self.assertEqual(x.local_name, "RealInterval(-1, 1)") self.assertFalse(10 in x) @@ -1516,7 +1735,7 @@ def test_RealInterval(self): self.assertFalse(-2.2 in x) self.assertFalse(-10 in x) - x = RealInterval(bounds=(-1,1), name="JUNK") + x = RealInterval(bounds=(-1, 1), name="JUNK") self.assertEqual(x.name, "JUNK") self.assertFalse(10 in x) self.assertFalse(1.1 in x) @@ -1528,12 +1747,12 @@ def test_RealInterval(self): self.assertFalse(-2.2 in x) self.assertFalse(-10 in x) -class TestIntegerSet(unittest.TestCase): +class TestIntegerSet(unittest.TestCase): def test_bounds(self): x = IntegerSet() self.assertEqual(x.bounds(), (None, None)) - x = IntegerSet(bounds=(1,2)) + x = IntegerSet(bounds=(1, 2)) self.assertEqual(x.bounds(), (1, 2)) def test_inequality_comparison_fails(self): @@ -1548,10 +1767,10 @@ def test_inequality_comparison_fails(self): # x > y # with self.assertRaises(TypeError): # x >= y - self.assertFalse( x < y ) - self.assertTrue( x <= y ) - self.assertFalse( x > y ) - self.assertTrue( x >= y ) + self.assertFalse(x < y) + self.assertTrue(x <= y) + self.assertFalse(x > y) + self.assertTrue(x >= y) def test_name(self): x = IntegerSet() @@ -1657,7 +1876,7 @@ def test_IntegerInterval(self): self.assertFalse(-2.2 in x) self.assertTrue(-10 in x) - x = IntegerInterval(bounds=(-1,1)) + x = IntegerInterval(bounds=(-1, 1)) self.assertFalse(None in x) self.assertEqual(x.name, "'IntegerInterval(-1, 1)'") self.assertEqual(x.local_name, "IntegerInterval(-1, 1)") @@ -1671,7 +1890,7 @@ def test_IntegerInterval(self): self.assertFalse(-2.2 in x) self.assertFalse(-10 in x) - x = IntegerInterval(bounds=(-1,1), name="JUNK") + x = IntegerInterval(bounds=(-1, 1), name="JUNK") self.assertFalse(None in x) self.assertEqual(x.name, "JUNK") self.assertFalse(10 in x) @@ -1684,8 +1903,8 @@ def test_IntegerInterval(self): self.assertFalse(-2.2 in x) self.assertFalse(-10 in x) -class TestBooleanSet(unittest.TestCase): +class TestBooleanSet(unittest.TestCase): def test_bounds(self): x = BooleanSet() self.assertEqual(x.bounds(), (0, 1)) @@ -1772,8 +1991,8 @@ def test_Binary(self): self.assertFalse(-2.2 in x) self.assertFalse(-10 in x) -class TestAnySet(SimpleSetA): +class TestAnySet(SimpleSetA): def setUp(self): # # Create Model @@ -1783,45 +2002,45 @@ def setUp(self): # Create model instance # x = _AnySet() - #x.concrete=True + # x.concrete=True self.model.A = x - x.concrete=False + x.concrete = False # # Misc datasets # - self.model.tmpset1 = Set(initialize=[1,'3',5,7]) - self.model.tmpset2 = Set(initialize=[1,2,'3',5,7]) - self.model.tmpset3 = Set(initialize=[2,'3',5,7,9]) + self.model.tmpset1 = Set(initialize=[1, '3', 5, 7]) + self.model.tmpset2 = Set(initialize=[1, 2, '3', 5, 7]) + self.model.tmpset3 = Set(initialize=[2, '3', 5, 7, 9]) y = _AnySet() - #y.concrete=True + # y.concrete=True self.model.setunion = y - y.concrete=False - self.model.setintersection = Set(initialize=[1,'3',5,7]) + y.concrete = False + self.model.setintersection = Set(initialize=[1, '3', 5, 7]) self.model.setxor = Set(initialize=[]) self.model.setdiff = Set(initialize=[]) self.model.setmul = None self.instance = self.model.create_instance() - self.e1=1 - self.e2=2 - self.e3='3' - self.e4=4 - self.e5=5 - self.e6=6 + self.e1 = 1 + self.e2 = 2 + self.e3 = '3' + self.e4 = 4 + self.e5 = 5 + self.e6 = 6 def test_bounds(self): # In the set rewrite, bounds() always returns a tuple - self.assertEqual( self.instance.A.bounds(), (None, None)) + self.assertEqual(self.instance.A.bounds(), (None, None)) def test_contains(self): """Various checks for contains() method""" - self.assertEqual( self.e1 in self.instance.A, True) - self.assertEqual( self.e2 in self.instance.A, True) - self.assertEqual( '2' in self.instance.A, True) + self.assertEqual(self.e1 in self.instance.A, True) + self.assertEqual(self.e2 in self.instance.A, True) + self.assertEqual('2' in self.instance.A, True) def test_None1(self): - self.assertEqual( None in Any, True) + self.assertEqual(None in Any, True) def test_len(self): """Check that the set has the right size""" @@ -1833,8 +2052,7 @@ def test_len(self): # pass # else: # self.fail("test_len failure") - with self.assertRaisesRegex( - TypeError, "object of type 'Any' has no len()"): + with self.assertRaisesRegex(TypeError, "object of type 'Any' has no len()"): len(self.instance.A) def test_data(self): @@ -1848,7 +2066,7 @@ def test_clear(self): def test_virtual(self): """Check if this is not a virtual set""" - self.assertEqual( self.instance.A.virtual, True) + self.assertEqual(self.instance.A.virtual, True) def test_discardValid(self): """Check that we fail to remove an invalid set element without an exception""" @@ -1874,7 +2092,7 @@ def test_addInvalid(self): def test_addValid(self): """Check that we can add valid set elements""" - self.assertIs( self.instance.A.domain, Any) + self.assertIs(self.instance.A.domain, Any) with self.assertRaises(AttributeError): self.instance.A.add(2) @@ -1886,16 +2104,15 @@ def test_iterator(self): def test_eq1(self): """Various checks for set equality and inequality (1)""" - self.assertTrue(not(self.instance.A == self.instance.tmpset1)) - self.assertTrue(not(self.instance.tmpset1 == self.instance.A)) + self.assertTrue(not (self.instance.A == self.instance.tmpset1)) + self.assertTrue(not (self.instance.tmpset1 == self.instance.A)) self.assertTrue(self.instance.A != self.instance.tmpset1) self.assertTrue(self.instance.tmpset1 != self.instance.A) - def test_eq2(self): """Various checks for set equality and inequality (2)""" - self.assertTrue(not(self.instance.A == self.instance.tmpset2)) - self.assertTrue(not(self.instance.tmpset2 == self.instance.A)) + self.assertTrue(not (self.instance.A == self.instance.tmpset2)) + self.assertTrue(not (self.instance.tmpset2 == self.instance.A)) self.assertTrue(self.instance.A != self.instance.tmpset2) self.assertTrue(self.instance.tmpset2 != self.instance.A) @@ -1952,8 +2169,7 @@ def test_and(self): # pass # else: # self.fail("Operator __and__ should have failed.") - self.assertEqual(self.instance.A & self.instance.tmpset3, - self.instance.tmpset3) + self.assertEqual(self.instance.A & self.instance.tmpset3, self.instance.tmpset3) def test_xor(self): """Check that set exclusive or works""" @@ -1988,12 +2204,10 @@ def test_mul(self): # self.fail("Operator __mul__ should have failed.") x = self.instance.A * self.instance.tmpset3 self.assertIsNone(x.dimen) - self.assertEqual(list(x.subsets()), - [self.instance.A, self.instance.tmpset3]) + self.assertEqual(list(x.subsets()), [self.instance.A, self.instance.tmpset3]) class TestSetArgs1(PyomoModel): - def setUp(self): # # Create Model @@ -2004,103 +2218,143 @@ def tearDown(self): # # Remove Set 'A' data file # - if os.path.exists(currdir+"setA.dat"): - os.remove(currdir+"setA.dat") + if os.path.exists(currdir + "setA.dat"): + os.remove(currdir + "setA.dat") PyomoModel.tearDown(self) def test_initialize1_list(self): - self.model.A = Set(initialize=[1,2,3,'A']) + self.model.A = Set(initialize=[1, 2, 3, 'A']) self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.A),4) + self.assertEqual(len(self.instance.A), 4) def test_initialize2_listcomp(self): - self.model.A = Set(initialize=[(i,j) for i in range(0,3) for j in range(1,4) if (i+j)%2 == 0]) + self.model.A = Set( + initialize=[ + (i, j) for i in range(0, 3) for j in range(1, 4) if (i + j) % 2 == 0 + ] + ) self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.A),4) + self.assertEqual(len(self.instance.A), 4) def test_initialize3_generator(self): - self.model.A = Set(initialize=lambda m: ( - (i,j) for i in range(0,3) for j in range(1,4) if (i+j)%2 == 0)) + self.model.A = Set( + initialize=lambda m: ( + (i, j) for i in range(0, 3) for j in range(1, 4) if (i + j) % 2 == 0 + ) + ) self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.A),4) + self.assertEqual(len(self.instance.A), 4) m = ConcreteModel() - m.A = Set(initialize=( - (i,j) for i in range(0,3) for j in range(1,4) if (i+j)%2 == 0)) - self.assertEqual(len(m.A),4) + m.A = Set( + initialize=( + (i, j) for i in range(0, 3) for j in range(1, 4) if (i + j) % 2 == 0 + ) + ) + self.assertEqual(len(m.A), 4) def test_initialize4(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + def B_index(model): - return (i for i in model.A if i%2 == 0) + return (i for i in model.A if i % 2 == 0) + def B_init(model, i): - return range(i,2+i) + return range(i, 2 + i) + self.model.B = Set(B_index, initialize=B_init) self.instance = self.model.create_instance() - #self.instance.pprint() - self.assertEqual(self.instance.B[0].value,set([0,1])) - self.assertEqual(self.instance.B[2].value,set([2,3])) - self.assertEqual(list(sorted(self.instance.B.keys())),[0,2]) + # self.instance.pprint() + self.assertEqual(self.instance.B[0].value, set([0, 1])) + self.assertEqual(self.instance.B[2].value, set([2, 3])) + self.assertEqual(list(sorted(self.instance.B.keys())), [0, 2]) def test_initialize5(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + def B_index(model): for i in model.A: - if i%2 == 0: + if i % 2 == 0: yield i + def B_init(model, i): - return range(i,2+i) + return range(i, 2 + i) + self.model.B = Set(B_index, initialize=B_init) self.instance = self.model.create_instance() - #self.instance.pprint() - self.assertEqual(self.instance.B[0].value,set([0,1])) - self.assertEqual(self.instance.B[2].value,set([2,3])) - self.assertEqual(list(sorted(self.instance.B.keys())),[0,2]) + # self.instance.pprint() + self.assertEqual(self.instance.B[0].value, set([0, 1])) + self.assertEqual(self.instance.B[2].value, set([2, 3])) + self.assertEqual(list(sorted(self.instance.B.keys())), [0, 2]) def test_initialize6(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + def B_index(model): for i in model.A: - if i%2 == 0: + if i % 2 == 0: yield i + def B_init(model, i, j): - k=i+j # A dummy calculation + k = i + j # A dummy calculation if j: - return range(i,2+i) + return range(i, 2 + i) return [] - self.model.B = Set(B_index, [True,False], initialize=B_init) + + self.model.B = Set(B_index, [True, False], initialize=B_init) self.instance = self.model.create_instance() - #self.instance.pprint() - self.assertEqual(set(self.instance.B.keys()),set([(0,True),(2,True),(0,False),(2,False)])) - self.assertEqual(self.instance.B[0,True].value,set([0,1])) - self.assertEqual(self.instance.B[2,True].value,set([2,3])) + # self.instance.pprint() + self.assertEqual( + set(self.instance.B.keys()), + set([(0, True), (2, True), (0, False), (2, False)]), + ) + self.assertEqual(self.instance.B[0, True].value, set([0, 1])) + self.assertEqual(self.instance.B[2, True].value, set([2, 3])) def test_initialize7(self): - self.model.A = Set(initialize=range(0,3)) + self.model.A = Set(initialize=range(0, 3)) + @set_options(dimen=3) def B_index(model): - return [(i,i+1,i*i) for i in model.A] + return [(i, i + 1, i * i) for i in model.A] + def B_init(model, i, ii, iii, j): - k=i+j # A dummy calculation + k = i + j # A dummy calculation if j: - return range(i,2+i) + return range(i, 2 + i) return [] - self.model.B = Set(B_index, [True,False], initialize=B_init) + + self.model.B = Set(B_index, [True, False], initialize=B_init) self.instance = self.model.create_instance() - #self.instance.pprint() - self.assertEqual(set(self.instance.B.keys()),set([(0,1,0,True),(1,2,1,True),(2,3,4,True),(0,1,0,False),(1,2,1,False),(2,3,4,False)])) - self.assertEqual(self.instance.B[0,1,0,True].value,set([0,1])) - self.assertEqual(self.instance.B[2,3,4,True].value,set([2,3])) + # self.instance.pprint() + self.assertEqual( + set(self.instance.B.keys()), + set( + [ + (0, 1, 0, True), + (1, 2, 1, True), + (2, 3, 4, True), + (0, 1, 0, False), + (1, 2, 1, False), + (2, 3, 4, False), + ] + ), + ) + self.assertEqual(self.instance.B[0, 1, 0, True].value, set([0, 1])) + self.assertEqual(self.instance.B[2, 3, 4, True].value, set([2, 3])) def test_initialize8(self): - self.model.A = Set(initialize=range(0,3)) + self.model.A = Set(initialize=range(0, 3)) + def B_index(model): - return [(i,i+1,i*i) for i in model.A] + return [(i, i + 1, i * i) for i in model.A] + def B_init(model, i, ii, iii, j): if j: - return range(i,2+i) + return range(i, 2 + i) return [] - self.model.B = Set(B_index, [True,False], initialize=B_init) + + self.model.B = Set(B_index, [True, False], initialize=B_init) # In the set rewrite, the following now works! # try: # self.instance = self.model.create_instance() @@ -2109,23 +2363,26 @@ def B_init(model, i, ii, iii, j): # pass instance = self.model.create_instance() self.assertEqual(len(instance.B), 6) - self.assertEqual(instance.B[0,1,0,False], []) - self.assertEqual(instance.B[0,1,0,True], [0,1]) - self.assertEqual(instance.B[1,2,1,False], []) - self.assertEqual(instance.B[1,2,1,True], [1,2]) - self.assertEqual(instance.B[2,3,4,False], []) - self.assertEqual(instance.B[2,3,4,True], [2,3]) + self.assertEqual(instance.B[0, 1, 0, False], []) + self.assertEqual(instance.B[0, 1, 0, True], [0, 1]) + self.assertEqual(instance.B[1, 2, 1, False], []) + self.assertEqual(instance.B[1, 2, 1, True], [1, 2]) + self.assertEqual(instance.B[2, 3, 4, False], []) + self.assertEqual(instance.B[2, 3, 4, True], [2, 3]) def test_initialize9(self): - self.model.A = Set(initialize=range(0,3)) + self.model.A = Set(initialize=range(0, 3)) + @set_options(domain=Integers) def B_index(model): - return [i/2.0 for i in model.A] + return [i / 2.0 for i in model.A] + def B_init(model, i, j): if j: - return range(int(i),int(2+i)) + return range(int(i), int(2 + i)) return [] - self.model.B = Set(B_index, [True,False], initialize=B_init) + + self.model.B = Set(B_index, [True, False], initialize=B_init) try: self.instance = self.model.create_instance() self.fail("Expected ValueError because B_index returns invalid set values") @@ -2136,30 +2393,32 @@ def test_dimen1(self): # # Create model instance # - self.model.A = Set(initialize=[1,2,3], dimen=1) + self.model.A = Set(initialize=[1, 2, 3], dimen=1) self.instance = self.model.create_instance() # try: - self.model.A = Set(initialize=[4,5,6], dimen=2) + self.model.A = Set(initialize=[4, 5, 6], dimen=2) self.instance = self.model.create_instance() except ValueError: pass else: self.fail("test_dimen") # - self.model.A = Set(initialize=[(1,2), (2,3), (3,4)], dimen=2) + self.model.A = Set(initialize=[(1, 2), (2, 3), (3, 4)], dimen=2) self.instance = self.model.create_instance() # try: - self.model.A = Set(initialize=[(1,2), (2,3), (3,4)], dimen=1) + self.model.A = Set(initialize=[(1, 2), (2, 3), (3, 4)], dimen=1) self.instance = self.model.create_instance() except ValueError: pass else: self.fail("test_dimen") + # def f(model): - return [(1,1), (2,2), (3,3)] + return [(1, 1), (2, 2), (3, 3)] + self.model.A = Set(initialize=f, dimen=2) self.instance = self.model.create_instance() # @@ -2173,70 +2432,75 @@ def f(model): def test_dimen2(self): try: - self.model.A = Set(initialize=[1,2,(3,4)]) + self.model.A = Set(initialize=[1, 2, (3, 4)]) self.instance = self.model.create_instance() except ValueError: pass else: self.fail("test_dimen2") - self.model.A = Set(dimen=None, initialize=[1,2,(3,4)]) + self.model.A = Set(dimen=None, initialize=[1, 2, (3, 4)]) self.instance = self.model.create_instance() - def test_rule(self): # # Create model instance # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; end;") OUTPUT.close() + def tmp_init(model): ##model.n.pprint() ##print "HERE",model.n,value(model.n) - return range(0,value(model.n)) + return range(0, value(model.n)) + self.model.n = Param() self.model.A = Set(initialize=tmp_init) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual(len(self.instance.A),5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.A), 5) def test_rule2(self): # # Create model instance # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; end;") OUTPUT.close() + @simple_set_rule def tmp_init(model, z): - if z>value(model.n) or z == 11: + if z > value(model.n) or z == 11: return None return z + self.model.n = Param() self.model.A = Set(initialize=tmp_init) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual(len(self.instance.A),5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.A), 5) def test_rule3(self): # # Create model instance # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; end;") OUTPUT.close() + def tmp_init(model, z): - if z>value(model.n) or z == 11: + if z > value(model.n) or z == 11: return Set.End return z + self.model.n = Param() self.model.A = Set(initialize=tmp_init) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual(len(self.instance.A),5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.A), 5) def test_within1(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := 1 3 5 7.5; end;") OUTPUT.close() # @@ -2244,7 +2508,7 @@ def test_within1(self): # self.model.A = Set(within=Integers) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: pass else: @@ -2254,7 +2518,7 @@ def test_within2(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := 1 3 5 7.5; end;") OUTPUT.close() # @@ -2262,7 +2526,7 @@ def test_within2(self): # self.model.A = Set(within=Reals) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: self.fail("fail test_within2") else: @@ -2272,15 +2536,15 @@ def test_validation1(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := 1 3 5 7.5; end;") OUTPUT.close() # # Create A with an error # - self.model.A = Set(validate=lambda model, x:x<6) + self.model.A = Set(validate=lambda model, x: x < 6) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: pass else: @@ -2290,22 +2554,24 @@ def test_validation2(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := 1 3 5 5.5; end;") OUTPUT.close() # # Create A with an error # - self.model.A = Set(validate=lambda model, x:x<6) + self.model.A = Set(validate=lambda model, x: x < 6) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: self.fail("fail test_validation2") else: pass def test_other1(self): - self.model.A = Set(initialize=[1,2,3,'A'], validate=lambda model, x:x in Integers) + self.model.A = Set( + initialize=[1, 2, 3, 'A'], validate=lambda model, x: x in Integers + ) try: self.instance = self.model.create_instance() except ValueError: @@ -2314,7 +2580,7 @@ def test_other1(self): self.fail("fail test_other1") def test_other2(self): - self.model.A = Set(initialize=[1,2,3,'A'], within=Integers) + self.model.A = Set(initialize=[1, 2, 3, 'A'], within=Integers) try: self.instance = self.model.create_instance() except ValueError: @@ -2323,43 +2589,47 @@ def test_other2(self): self.fail("fail test_other1") def test_other3(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; end;") OUTPUT.close() + def tmp_init(model): - tmp=[] - for i in range(0,value(model.n)): - tmp.append(i/2.0) + tmp = [] + for i in range(0, value(model.n)): + tmp.append(i / 2.0) return tmp + self.model.n = Param() - self.model.A = Set(initialize=tmp_init, validate=lambda model, x:x in Integers) + self.model.A = Set(initialize=tmp_init, validate=lambda model, x: x in Integers) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: pass else: self.fail("fail test_other1") def test_other4(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; end;") OUTPUT.close() + def tmp_init(model): - tmp=[] - for i in range(0,value(model.n)): - tmp.append(i/2.0) + tmp = [] + for i in range(0, value(model.n)): + tmp.append(i / 2.0) return tmp + self.model.n = Param() self.model.A = Set(initialize=tmp_init, within=Integers) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: pass else: self.fail("fail test_other1") -class TestSetArgs2(PyomoModel): +class TestSetArgs2(PyomoModel): def setUp(self): # # Create Model @@ -2370,43 +2640,45 @@ def tearDown(self): # # Remove Set 'A' data file # - if os.path.exists(currdir+"setA.dat"): - os.remove(currdir+"setA.dat") + if os.path.exists(currdir + "setA.dat"): + os.remove(currdir + "setA.dat") PyomoModel.tearDown(self) def test_initialize(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set Z := A C; set A[A] := 1 3 5 7; end;") OUTPUT.close() # # Create model instance # self.model.Z = Set() - self.model.A = Set(self.model.Z, initialize={'A':[1,2,3,'A']}) - self.instance = self.model.create_instance(currdir+'setA.dat') - self.assertEqual(len(self.instance.A['A']),4) + self.model.A = Set(self.model.Z, initialize={'A': [1, 2, 3, 'A']}) + self.instance = self.model.create_instance(currdir + 'setA.dat') + self.assertEqual(len(self.instance.A['A']), 4) def test_dimen(self): # # Create model instance # - self.model.Z = Set(initialize=[1,2]) - self.model.A = Set(self.model.Z, initialize=[1,2,3], dimen=1) + self.model.Z = Set(initialize=[1, 2]) + self.model.A = Set(self.model.Z, initialize=[1, 2, 3], dimen=1) self.instance = self.model.create_instance() try: - self.model.A = Set(self.model.Z, initialize=[4,5,6], dimen=2) + self.model.A = Set(self.model.Z, initialize=[4, 5, 6], dimen=2) self.instance = self.model.create_instance() except ValueError: pass else: self.fail("test_dimen") - self.model.A = Set(self.model.Z, initialize=[(1,2), (2,3), (3,4)], dimen=2) + self.model.A = Set(self.model.Z, initialize=[(1, 2), (2, 3), (3, 4)], dimen=2) self.instance = self.model.create_instance() try: - self.model.A = Set(self.model.Z, initialize=[(1,2), (2,3), (3,4)], dimen=1) + self.model.A = Set( + self.model.Z, initialize=[(1, 2), (2, 3), (3, 4)], dimen=1 + ) self.instance = self.model.create_instance() except ValueError: pass @@ -2417,57 +2689,63 @@ def test_rule(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; set Z := A C; end;") OUTPUT.close() + def tmp_init(model, i): - return range(0,value(model.n)) + return range(0, value(model.n)) + self.model.n = Param() self.model.Z = Set() self.model.A = Set(self.model.Z, initialize=tmp_init) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual(len(self.instance.A['A']),5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.A['A']), 5) def test_rule2(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; set Z := A C; end;") OUTPUT.close() + @simple_set_rule def tmp_rule2(model, z, i): - if z>value(model.n): + if z > value(model.n): return None return z + self.model.n = Param() self.model.Z = Set() self.model.A = Set(self.model.Z, initialize=tmp_rule2) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual(len(self.instance.A['A']),5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.A['A']), 5) def test_rule3(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; param n := 5; set Z := A C; end;") OUTPUT.close() + def tmp_rule2(model, z, i): - if z>value(model.n): + if z > value(model.n): return Set.End return z + self.model.n = Param() self.model.Z = Set() self.model.A = Set(self.model.Z, initialize=tmp_rule2) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual(len(self.instance.A['A']),5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.A['A']), 5) def test_within1(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set Z := A C; set A[A] := 1 3 5 7.5; end;") OUTPUT.close() # @@ -2476,7 +2754,7 @@ def test_within1(self): self.model.Z = Set() self.model.A = Set(self.model.Z, within=Integers) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: pass else: @@ -2486,7 +2764,7 @@ def test_within2(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set Z := A C; set A[A] := 1 3 5 7.5; end;") OUTPUT.close() # @@ -2495,7 +2773,7 @@ def test_within2(self): self.model.Z = Set() self.model.A = Set(self.model.Z, within=Reals) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: self.fail("fail test_within2") else: @@ -2505,16 +2783,16 @@ def test_validation1(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set Z := A C; set A[A] := 1 3 5 7.5; end;") OUTPUT.close() # # Create A with an error # self.model.Z = Set() - self.model.A = Set(self.model.Z, validate=lambda model, x:x<6) + self.model.A = Set(self.model.Z, validate=lambda model, x: x < 6) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: pass else: @@ -2524,16 +2802,16 @@ def test_validation2(self): # # Create Set 'A' data file # - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set Z := A C; set A[A] := 1 3 5 5.5; end;") OUTPUT.close() # # Create A with an error # self.model.Z = Set() - self.model.A = Set(self.model.Z, validate=lambda model, x:x<6) + self.model.A = Set(self.model.Z, validate=lambda model, x: x < 6) try: - self.instance = self.model.create_instance(currdir+"setA.dat") + self.instance = self.model.create_instance(currdir + "setA.dat") except ValueError: self.fail("fail test_within2") else: @@ -2541,7 +2819,11 @@ def test_validation2(self): def test_other1(self): self.model.Z = Set(initialize=['A']) - self.model.A = Set(self.model.Z, initialize={'A':[1,2,3,'A']}, validate=lambda model, x:x in Integers) + self.model.A = Set( + self.model.Z, + initialize={'A': [1, 2, 3, 'A']}, + validate=lambda model, x: x in Integers, + ) try: self.instance = self.model.create_instance() except ValueError: @@ -2551,7 +2833,9 @@ def test_other1(self): def test_other2(self): self.model.Z = Set(initialize=['A']) - self.model.A = Set(self.model.Z, initialize={'A':[1,2,3,'A']}, within=Integers) + self.model.A = Set( + self.model.Z, initialize={'A': [1, 2, 3, 'A']}, within=Integers + ) try: self.instance = self.model.create_instance() except ValueError: @@ -2561,13 +2845,16 @@ def test_other2(self): def test_other3(self): def tmp_init(model, i): - tmp=[] - for i in range(0,value(model.n)): - tmp.append(i/2.0) + tmp = [] + for i in range(0, value(model.n)): + tmp.append(i / 2.0) return tmp + self.model.n = Param(initialize=5) self.model.Z = Set(initialize=['A']) - self.model.A = Set(self.model.Z,initialize=tmp_init, validate=lambda model, x:x in Integers) + self.model.A = Set( + self.model.Z, initialize=tmp_init, validate=lambda model, x: x in Integers + ) try: self.instance = self.model.create_instance() except ValueError: @@ -2577,10 +2864,11 @@ def tmp_init(model, i): def test_other4(self): def tmp_init(model, i): - tmp=[] - for i in range(0,value(model.n)): - tmp.append(i/2.0) + tmp = [] + for i in range(0, value(model.n)): + tmp.append(i / 2.0) return tmp + self.model.n = Param(initialize=5) self.model.Z = Set(initialize=['A']) self.model.A = Set(self.model.Z, initialize=tmp_init, within=Integers) @@ -2592,8 +2880,8 @@ def tmp_init(model, i): else: self.fail("fail test_other1") -class TestMisc(PyomoModel): +class TestMisc(PyomoModel): def setUp(self): # # Create Model @@ -2602,39 +2890,39 @@ def setUp(self): # # # - self.model.A = Set(initialize=[1,2,3]) - self.model.B = Set(initialize=['a','b','c']) - self.model.C = Set(initialize=[4,5,6]) + self.model.A = Set(initialize=[1, 2, 3]) + self.model.B = Set(initialize=['a', 'b', 'c']) + self.model.C = Set(initialize=[4, 5, 6]) def tearDown(self): # # Remove Set 'A' data file # - if os.path.exists(currdir+"setA.dat"): - os.remove(currdir+"setA.dat") + if os.path.exists(currdir + "setA.dat"): + os.remove(currdir + "setA.dat") PyomoModel.tearDown(self) def test_cross_set(self): self.model.C = self.model.A * self.model.B self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.C),9) + self.assertEqual(len(self.instance.C), 9) def test_tricross_set(self): self.model.D = self.model.A * self.model.B * self.model.C self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.D),27) + self.assertEqual(len(self.instance.D), 27) def test_virtual_cross_set(self): self.model.C = self.model.A * self.model.B self.model.C.virtual = True self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.C),9) + self.assertEqual(len(self.instance.C), 9) if self.instance.C.value is not None: - self.assertEqual(len(self.instance.C.value),9) - tmp=[] + self.assertEqual(len(self.instance.C.value), 9) + tmp = [] for item in self.instance.C: tmp.append(item) - self.assertEqual(len(tmp),9) + self.assertEqual(len(tmp), 9) class TestSetsInPython3(unittest.TestCase): @@ -2642,11 +2930,11 @@ def test_pprint_mixed(self): # In Python3, sorting a mixed string fails. We have added a # fallback more "robust" sorter, and this exercises that code m = ConcreteModel() - m.Z = Set(initialize=['A','C']) - m.A = Set(m.Z, initialize={'A':[1,2,3,'A']}) + m.Z = Set(initialize=['A', 'C']) + m.A = Set(m.Z, initialize={'A': [1, 2, 3, 'A']}) buf = StringIO() m.pprint(ostream=buf) - ref="""2 Set Declarations + ref = """2 Set Declarations A : Size=1, Index=Z, Ordered=Insertion Key : Dimen : Domain : Size : Members A : 1 : Any : 4 : {1, 2, 3, 'A'} @@ -2684,7 +2972,7 @@ def test_initialize_and_clone_from_dict_keys(self): # keys() # m = ConcreteModel() - v = {1:2,3:4,5:6} + v = {1: 2, 3: 4, 5: 6} m.INDEX = Set(initialize=v.keys()) m.p = Param(m.INDEX, initialize=v) buf = StringIO() @@ -2702,7 +2990,7 @@ def test_initialize_and_clone_from_dict_keys(self): self.assertEqual(ref, buf.getvalue()) m = ConcreteModel() - v = {1:2,3:4,5:6} + v = {1: 2, 3: 4, 5: 6} m.INDEX = Set(initialize=v.keys()) m.p = Param(m.INDEX, initialize=v) buf = StringIO() @@ -2721,7 +3009,6 @@ def test_initialize_and_clone_from_dict_keys(self): class TestSetIO(PyomoModel): - def setUp(self): # # Create Model @@ -2732,42 +3019,42 @@ def tearDown(self): # # Remove Set 'A' data file # - if os.path.exists(currdir+"setA.dat"): - os.remove(currdir+"setA.dat") + if os.path.exists(currdir + "setA.dat"): + os.remove(currdir + "setA.dat") PyomoModel.tearDown(self) def test_io1(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set A := A1 A2 A3; end;") OUTPUT.close() self.model.A = Set() - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.A), 3) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.A), 3) def test_io2(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data; set B := 1 2 3 4; end;") OUTPUT.close() self.model.B = Set() - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.B), 4) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.B), 4) def test_io3(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data;\n") OUTPUT.write("set A := A1 A2 A3;\n") OUTPUT.write("set B := 1 2 3 4;\n") - #OUTPUT.write("set C := (A1,1) (A2,2) (A3,3);\n") + # OUTPUT.write("set C := (A1,1) (A2,2) (A3,3);\n") OUTPUT.write("end;\n") OUTPUT.close() self.model.A = Set() self.model.B = Set() self.model.C = self.model.A * self.model.B - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.C), 12) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.C), 12) def test_io3a(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data;\n") OUTPUT.write("set A := A1 A2 A3;\n") OUTPUT.write("set B := 1 2 3 4;\n") @@ -2777,12 +3064,11 @@ def test_io3a(self): self.model.A = Set() self.model.B = Set() self.model.C = self.model.A * self.model.B - with self.assertRaisesRegex( - ValueError, "SetOperator C with incompatible data"): - self.instance = self.model.create_instance(currdir+"setA.dat") + with self.assertRaisesRegex(ValueError, "SetOperator C with incompatible data"): + self.instance = self.model.create_instance(currdir + "setA.dat") def test_io4(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data;\n") OUTPUT.write("set A := A1 A2 A3;\n") OUTPUT.write("set B := 1 2 3 4;\n") @@ -2791,12 +3077,12 @@ def test_io4(self): OUTPUT.close() self.model.A = Set() self.model.B = Set() - self.model.D = Set(within=self.model.A*self.model.B) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.D), 3) + self.model.D = Set(within=self.model.A * self.model.B) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.D), 3) def test_io5(self): - OUTPUT=open(currdir+"setA.dat","w") + OUTPUT = open(currdir + "setA.dat", "w") OUTPUT.write("data;\n") OUTPUT.write("set A := A1 A2 A3;\n") OUTPUT.write("set B := 1 2 3 4;\n") @@ -2809,158 +3095,156 @@ def test_io5(self): OUTPUT.close() self.model.A = Set() self.model.B = Set() - self.model.D = Set(within=self.model.A*self.model.B) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.D), 8) + self.model.D = Set(within=self.model.A * self.model.B) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.D), 8) def test_io6(self): - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write( "data;\n") - OUTPUT.write( "set A := A1 A2 A3;\n") - OUTPUT.write( "set B := 1 2 3 4;\n") - OUTPUT.write( "set E :=\n") - OUTPUT.write( "(A1,1,*) A1 A2\n") - OUTPUT.write( "(A2,2,*) A2 A3\n") - OUTPUT.write( "(A3,3,*) A1 A3 ;\n") - OUTPUT.write( "end;\n") + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set A := A1 A2 A3;\n") + OUTPUT.write("set B := 1 2 3 4;\n") + OUTPUT.write("set E :=\n") + OUTPUT.write("(A1,1,*) A1 A2\n") + OUTPUT.write("(A2,2,*) A2 A3\n") + OUTPUT.write("(A3,3,*) A1 A3 ;\n") + OUTPUT.write("end;\n") OUTPUT.close() self.model.A = Set() self.model.B = Set() - self.model.E = Set(within=self.model.A*self.model.B*self.model.A) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.E), 6) + self.model.E = Set(within=self.model.A * self.model.B * self.model.A) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.E), 6) def test_io7(self): - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write( "data;\n") - OUTPUT.write( "set A := A1 A2 A3;\n") - OUTPUT.write( "set B := 1 2 3 4;\n") - OUTPUT.write( "set F[A1] := 1 3 5;\n") - OUTPUT.write( "set F[A2] := 2 4 6;\n") - OUTPUT.write( "set F[A3] := 3 5 7;\n") - OUTPUT.write( "end;\n") + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set A := A1 A2 A3;\n") + OUTPUT.write("set B := 1 2 3 4;\n") + OUTPUT.write("set F[A1] := 1 3 5;\n") + OUTPUT.write("set F[A2] := 2 4 6;\n") + OUTPUT.write("set F[A3] := 3 5 7;\n") + OUTPUT.write("end;\n") OUTPUT.close() self.model.A = Set() self.model.B = Set() self.model.F = Set(self.model.A) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( self.instance.F.dim(), 1) - self.assertEqual( len(list(self.instance.F.keys())), 3) - self.assertEqual( len(self.instance.F['A1']), 3) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(self.instance.F.dim(), 1) + self.assertEqual(len(list(self.instance.F.keys())), 3) + self.assertEqual(len(self.instance.F['A1']), 3) def test_io8(self): - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set E :=\n" ) - OUTPUT.write( "(A1,1,*) A1 A2\n" ) - OUTPUT.write( "(*,2,*) A2 A3\n" ) - OUTPUT.write( "(A3,3,*) A1 A3 ;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set E :=\n") + OUTPUT.write("(A1,1,*) A1 A2\n") + OUTPUT.write("(*,2,*) A2 A3\n") + OUTPUT.write("(A3,3,*) A1 A3 ;\n") + OUTPUT.write("end;\n") OUTPUT.close() self.model.E = Set(dimen=3) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.E), 5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.E), 5) def test_io9(self): - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set E :=\n" ) - OUTPUT.write( "(A1,1,A1) (A1,1,A2)\n" ) - OUTPUT.write( "(A2,2,A3)\n" ) - OUTPUT.write( "(A3,3,A1) (A3,3,A3) ;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set E :=\n") + OUTPUT.write("(A1,1,A1) (A1,1,A2)\n") + OUTPUT.write("(A2,2,A3)\n") + OUTPUT.write("(A3,3,A1) (A3,3,A3) ;\n") + OUTPUT.write("end;\n") OUTPUT.close() self.model.E = Set(dimen=3) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( len(self.instance.E), 5) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(len(self.instance.E), 5) def test_io10(self): - OUTPUT=open(currdir+"setA.dat","w") - OUTPUT.write( "data;\n" ) - OUTPUT.write( "set A := 'A1 x' ' A2' \"A3\";\n" ) - OUTPUT.write( "set F['A1 x'] := 1 3 5;\n" ) - OUTPUT.write( "set F[\" A2\"] := 2 4 6;\n" ) - OUTPUT.write( "set F['A3'] := 3 5 7;\n" ) - OUTPUT.write( "end;\n" ) + OUTPUT = open(currdir + "setA.dat", "w") + OUTPUT.write("data;\n") + OUTPUT.write("set A := 'A1 x' ' A2' \"A3\";\n") + OUTPUT.write("set F['A1 x'] := 1 3 5;\n") + OUTPUT.write("set F[\" A2\"] := 2 4 6;\n") + OUTPUT.write("set F['A3'] := 3 5 7;\n") + OUTPUT.write("end;\n") OUTPUT.close() self.model.A = Set() self.model.F = Set(self.model.A) - self.instance = self.model.create_instance(currdir+"setA.dat") - self.assertEqual( self.instance.F.dim(), 1) - self.assertEqual( len(list(self.instance.F.keys())), 3) - self.assertEqual( len(self.instance.F['A1 x']), 3) + self.instance = self.model.create_instance(currdir + "setA.dat") + self.assertEqual(self.instance.F.dim(), 1) + self.assertEqual(len(list(self.instance.F.keys())), 3) + self.assertEqual(len(self.instance.F['A1 x']), 3) class TestSetErrors(PyomoModel): - def test_membership(self): - self.assertEqual( 0 in Boolean, True) - self.assertEqual( 1 in Boolean, True) - self.assertEqual( True in Boolean, True) - self.assertEqual( False in Boolean, True) - self.assertEqual( 1.1 in Boolean, False) - self.assertEqual( 2 in Boolean, False) - - self.assertEqual( 0 in Integers, True) - self.assertEqual( 1 in Integers, True) - self.assertEqual( True in Integers, True) - self.assertEqual( False in Integers, True) - self.assertEqual( 1.1 in Integers, False) - self.assertEqual( 2 in Integers, True) - - self.assertEqual( 0 in Reals, True) - self.assertEqual( 1 in Reals, True) - self.assertEqual( True in Reals, True) - self.assertEqual( False in Reals, True) - self.assertEqual( 1.1 in Reals, True) - self.assertEqual( 2 in Reals, True) - - self.assertEqual( 0 in Any, True) - self.assertEqual( 1 in Any, True) - self.assertEqual( True in Any, True) - self.assertEqual( False in Any, True) - self.assertEqual( 1.1 in Any, True) - self.assertEqual( 2 in Any, True) + self.assertEqual(0 in Boolean, True) + self.assertEqual(1 in Boolean, True) + self.assertEqual(True in Boolean, True) + self.assertEqual(False in Boolean, True) + self.assertEqual(1.1 in Boolean, False) + self.assertEqual(2 in Boolean, False) + + self.assertEqual(0 in Integers, True) + self.assertEqual(1 in Integers, True) + self.assertEqual(True in Integers, True) + self.assertEqual(False in Integers, True) + self.assertEqual(1.1 in Integers, False) + self.assertEqual(2 in Integers, True) + + self.assertEqual(0 in Reals, True) + self.assertEqual(1 in Reals, True) + self.assertEqual(True in Reals, True) + self.assertEqual(False in Reals, True) + self.assertEqual(1.1 in Reals, True) + self.assertEqual(2 in Reals, True) + + self.assertEqual(0 in Any, True) + self.assertEqual(1 in Any, True) + self.assertEqual(True in Any, True) + self.assertEqual(False in Any, True) + self.assertEqual(1.1 in Any, True) + self.assertEqual(2 in Any, True) @unittest.skipIf(not _has_numpy, "Numpy is not installed") def test_numpy_membership(self): - - self.assertEqual( numpy.int_(0) in Boolean, True) - self.assertEqual( numpy.int_(1) in Boolean, True) - self.assertEqual( numpy.bool_(True) in Boolean, True) - self.assertEqual( numpy.bool_(False) in Boolean, True) - self.assertEqual( numpy.float_(1.1) in Boolean, False) - self.assertEqual( numpy.int_(2) in Boolean, False) - - self.assertEqual( numpy.int_(0) in Integers, True) - self.assertEqual( numpy.int_(1) in Integers, True) + self.assertEqual(numpy.int_(0) in Boolean, True) + self.assertEqual(numpy.int_(1) in Boolean, True) + self.assertEqual(numpy.bool_(True) in Boolean, True) + self.assertEqual(numpy.bool_(False) in Boolean, True) + self.assertEqual(numpy.float_(1.1) in Boolean, False) + self.assertEqual(numpy.int_(2) in Boolean, False) + + self.assertEqual(numpy.int_(0) in Integers, True) + self.assertEqual(numpy.int_(1) in Integers, True) # Numpy.bool_(True) is NOT a numeric type, but it behaves # identically to 1 - self.assertEqual( numpy.bool_(True) in Integers, True) - self.assertEqual( numpy.bool_(False) in Integers, True) - self.assertEqual( numpy.float_(1.1) in Integers, False) - self.assertEqual( numpy.int_(2) in Integers, True) + self.assertEqual(numpy.bool_(True) in Integers, True) + self.assertEqual(numpy.bool_(False) in Integers, True) + self.assertEqual(numpy.float_(1.1) in Integers, False) + self.assertEqual(numpy.int_(2) in Integers, True) - self.assertEqual( numpy.int_(0) in Reals, True) - self.assertEqual( numpy.int_(1) in Reals, True) + self.assertEqual(numpy.int_(0) in Reals, True) + self.assertEqual(numpy.int_(1) in Reals, True) # Numpy.bool_(True) is NOT a numeric type, but it behaves # identically to 1 - self.assertEqual( numpy.bool_(True) in Reals, True) - self.assertEqual( numpy.bool_(False) in Reals, True) - self.assertEqual( numpy.float_(1.1) in Reals, True) - self.assertEqual( numpy.int_(2) in Reals, True) - - self.assertEqual( numpy.int_(0) in Any, True) - self.assertEqual( numpy.int_(1) in Any, True) - self.assertEqual( numpy.bool_(True) in Any, True) - self.assertEqual( numpy.bool_(False) in Any, True) - self.assertEqual( numpy.float_(1.1) in Any, True) - self.assertEqual( numpy.int_(2) in Any, True) + self.assertEqual(numpy.bool_(True) in Reals, True) + self.assertEqual(numpy.bool_(False) in Reals, True) + self.assertEqual(numpy.float_(1.1) in Reals, True) + self.assertEqual(numpy.int_(2) in Reals, True) + + self.assertEqual(numpy.int_(0) in Any, True) + self.assertEqual(numpy.int_(1) in Any, True) + self.assertEqual(numpy.bool_(True) in Any, True) + self.assertEqual(numpy.bool_(False) in Any, True) + self.assertEqual(numpy.float_(1.1) in Any, True) + self.assertEqual(numpy.int_(2) in Any, True) def test_setargs1(self): try: - a=Set() - c=Set(a,foo=None) + a = Set() + c = Set(a, foo=None) self.fail("test_setargs1 - expected error because of bad argument") except ValueError: pass @@ -2975,41 +3259,47 @@ def test_setargs2(self): # self.fail("test_setargs1 - expected error because of bad argument") # except ValueError: # pass - a=Set() - b=Set(a) + a = Set() + b = Set(a) with self.assertRaisesRegex( - #TypeError, "Cannot apply a Set operator to an indexed"): - ValueError, r"Error retrieving component IndexedSet\[None\]: " - r"The component has not been constructed."): - c=Set(within=b, dimen=2) + # TypeError, "Cannot apply a Set operator to an indexed"): + ValueError, + r"Error retrieving component IndexedSet\[None\]: " + r"The component has not been constructed.", + ): + c = Set(within=b, dimen=2) c.construct() - a=Set() - b=Set() - c=Set(within=b, dimen=1) + a = Set() + b = Set() + c = Set(within=b, dimen=1) c.construct() - self.assertEqual(c.domain,b) + self.assertEqual(c.domain, b) # After the set rewrite, we disallow setting the domain after # declaration - #c.domain = a - #self.assertEqual(c.domain,a) + # c.domain = a + # self.assertEqual(c.domain,a) def test_setargs3(self): model = ConcreteModel() - model.a=Set(dimen=1, initialize=(1,2,3)) + model.a = Set(dimen=1, initialize=(1, 2, 3)) try: - model.b=Set(dimen=2, initialize=(1,2,3)) - self.fail("test_setargs3 - expected error because dimen does not match set values") + model.b = Set(dimen=2, initialize=(1, 2, 3)) + self.fail( + "test_setargs3 - expected error because dimen does not match set values" + ) except ValueError: pass def test_setargs4(self): model = ConcreteModel() model.A = Set(initialize=[1]) - model.B = Set(model.A, initialize={1:[1]}) + model.B = Set(model.A, initialize={1: [1]}) try: model.C = Set(model.B) - self.fail("test_setargs4 - expected error when passing in a set that is indexed") + self.fail( + "test_setargs4 - expected error when passing in a set that is indexed" + ) except TypeError: pass @@ -3018,37 +3308,37 @@ def test_setargs5(self): model = AbstractModel() model.A = Set() model.B = Set() - model.C = model.A|model.B + model.C = model.A | model.B model.Z = Set(model.C) model.Y = RangeSet(model.C) model.X = Param(model.C, default=0.0) @unittest.skip("_verify was removed during the set rewrite") def test_verify(self): - a=Set(initialize=[1,2,3]) - b=Set(within=a) + a = Set(initialize=[1, 2, 3]) + b = Set(within=a) try: b._verify(4) self.fail("test_verify - bad value was expected") except ValueError: pass # - c=Set() + c = Set() try: - c._verify( (1,2) ) + c._verify((1, 2)) self.fail("test_verify - bad value was expected") except ValueError: pass # - c=Set(dimen=2) + c = Set(dimen=2) try: - c._verify( (1,2,3) ) + c._verify((1, 2, 3)) self.fail("test_verify - bad value was expected") except ValueError: pass def test_construct(self): - a = Set(initialize={1:2,3:4}) + a = Set(initialize={1: 2, 3: 4}) # After the set rewrite, this still fails, but with a different # exception: # try: @@ -3057,17 +3347,20 @@ def test_construct(self): # except ValueError: # pass with self.assertRaisesRegex( - KeyError, "Cannot treat the scalar component '[^']*' " - "as an indexed component"): + KeyError, + "Cannot treat the scalar component '[^']*' as an indexed component", + ): a.construct() # After the set rewrite, empty dictionaries are acceptable a = Set(initialize={}) a.construct() self.assertEqual(a, EmptySet) + # def init_fn(model): return [] + # After the set rewrite, model()==None is acceptable a = Set(initialize=init_fn) # try: @@ -3078,9 +3371,8 @@ def init_fn(model): a.construct() self.assertEqual(a, EmptySet) - def test_add(self): - a=Set() + a = Set() a.construct() a.add(1) a.add("a") @@ -3091,7 +3383,7 @@ def test_add(self): pass def test_getitem(self): - a=Set(initialize=[2,3]) + a = Set(initialize=[2, 3]) # With the set rewrite, sets are ordered by default # try: # a[0] @@ -3100,38 +3392,35 @@ def test_getitem(self): # pass # except IndexError: # pass - with self.assertRaisesRegex( - RuntimeError, ".*before it has been constructed"): + with self.assertRaisesRegex(RuntimeError, ".*before it has been constructed"): a[0] a.construct() - with self.assertRaisesRegex( - IndexError, "Pyomo Sets are 1-indexed"): + with self.assertRaisesRegex(IndexError, "Pyomo Sets are 1-indexed"): a[0] self.assertEqual(a[1], 2) - def test_eq(self): - a=Set(dimen=1,name="a",initialize=[1,2]) + a = Set(dimen=1, name="a", initialize=[1, 2]) a.construct() - b=Set(dimen=2) + b = Set(dimen=2) b.construct() - self.assertEqual(a==b,False) + self.assertEqual(a == b, False) self.assertTrue(not a.__eq__(Boolean)) self.assertTrue(not Boolean == a) def test_neq(self): - a=Set(dimen=1,initialize=[1,2]) + a = Set(dimen=1, initialize=[1, 2]) a.construct() - b=Set(dimen=2) + b = Set(dimen=2) b.construct() - self.assertEqual(a!=b,True) + self.assertEqual(a != b, True) self.assertTrue(a.__ne__(Boolean)) self.assertTrue(Boolean != a) def test_contains(self): - a=Set(initialize=[1,3,5,7]) + a = Set(initialize=[1, 3, 5, 7]) a.construct() - b=Set(initialize=[1,3]) + b = Set(initialize=[1, 3]) b.construct() self.assertEqual(b in a, True) self.assertEqual(a in b, False) @@ -3140,15 +3429,15 @@ def test_contains(self): def test_subset(self): # In the set rewrite, the following now works! - #try: + # try: # Integers in Reals # self.fail("test_subset - expected TypeError") - #except TypeError: + # except TypeError: # pass - #try: + # try: # Integers.issubset(Reals) # self.fail("test_subset - expected TypeError") - #except TypeError: + # except TypeError: # pass self.assertTrue(Integers.issubset(Reals)) # Prior to the set rewrite, SetOperators (like issubset) between @@ -3172,20 +3461,20 @@ def test_superset(self): # self.fail("test_subset - expected TypeError") # except TypeError: # pass - #try: + # try: # Integers.issubset(Reals) # self.fail("test_subset - expected TypeError") - #except TypeError: + # except TypeError: # pass self.assertTrue(Reals > Integers) self.assertTrue(Integers.issubset(Reals)) - a=Set(initialize=[1,3,5,7]) + a = Set(initialize=[1, 3, 5, 7]) a.construct() - b=Set(initialize=[1,3]) + b = Set(initialize=[1, 3]) b.construct() - #self.assertEqual(Reals >= b, True) - #self.assertEqual(Reals >= [1,3,7], True) - #self.assertEqual(Reals >= [1,3,7,"a"], False) + # self.assertEqual(Reals >= b, True) + # self.assertEqual(Reals >= [1,3,7], True) + # self.assertEqual(Reals >= [1,3,7,"a"], False) self.assertEqual(a >= b, True) def test_lt(self): @@ -3197,14 +3486,14 @@ def test_lt(self): # pass self.assertTrue(Integers < Reals) - a=Set(initialize=[1,3,5,7]) + a = Set(initialize=[1, 3, 5, 7]) a.construct() a < Reals - b=Set(initialize=[1,3,5]) + b = Set(initialize=[1, 3, 5]) b.construct() - self.assertEqual(a c) def test_or(self): - a=Set(initialize=[1,2,3]) - c=Set(initialize=[(1,2)]) + a = Set(initialize=[1, 2, 3]) + c = Set(initialize=[(1, 2)]) a.construct() c.construct() # In the set rewrite, the following now works! @@ -3250,11 +3539,11 @@ def test_or(self): # pass self.assertEqual(Reals | Integers, Reals) self.assertEqual(a | Integers, Integers) - self.assertEqual(a | c, [1,2,3,(1,2)]) + self.assertEqual(a | c, [1, 2, 3, (1, 2)]) def test_and(self): - a=Set(initialize=[1,2,3]) - c=Set(initialize=[(1,2)]) + a = Set(initialize=[1, 2, 3]) + c = Set(initialize=[(1, 2)]) a.construct() c.construct() # In the set rewrite, the following now works! @@ -3278,9 +3567,9 @@ def test_and(self): self.assertEqual(a & c, EmptySet) def test_xor(self): - a=Set(initialize=[1,2,3]) + a = Set(initialize=[1, 2, 3]) a.construct() - c=Set(initialize=[(1,2)]) + c = Set(initialize=[(1, 2)]) c.construct() # In the set rewrite, the following "mostly works" # try: @@ -3292,9 +3581,11 @@ def test_xor(self): self.assertIn(0.5, X) self.assertNotIn(1, X) with self.assertRaisesRegex( - RangeDifferenceError, r"We do not support subtracting an " - r"infinite discrete range \[0:inf\] from an infinite " - r"continuous range \[-inf..inf\]"): + RangeDifferenceError, + r"We do not support subtracting an " + r"infinite discrete range \[0:inf\] from an infinite " + r"continuous range \[-inf..inf\]", + ): X < Reals # In the set rewrite, the following now works! # try: @@ -3308,12 +3599,12 @@ def test_xor(self): # except ValueError: # pass self.assertEqual(a ^ Integers, Integers - a) - self.assertEqual(a ^ c, SetOf([1,2,3,(1,2)])) + self.assertEqual(a ^ c, SetOf([1, 2, 3, (1, 2)])) def test_sub(self): - a=Set(initialize=[1,2,3]) + a = Set(initialize=[1, 2, 3]) a.construct() - c=Set(initialize=[(1,2)]) + c = Set(initialize=[(1, 2)]) c.construct() # In the set rewrite, the following "mostly works" # try: @@ -3325,9 +3616,11 @@ def test_sub(self): self.assertIn(0.5, X) self.assertNotIn(1, X) with self.assertRaisesRegex( - RangeDifferenceError, r"We do not support subtracting an " - r"infinite discrete range \[0:inf\] from an infinite " - r"continuous range \[-inf..inf\]"): + RangeDifferenceError, + r"We do not support subtracting an " + r"infinite discrete range \[0:inf\] from an infinite " + r"continuous range \[-inf..inf\]", + ): X < Reals # In the set rewrite, the following now works! # try: @@ -3344,8 +3637,8 @@ def test_sub(self): self.assertEqual(a - c, a) def test_mul(self): - a=Set(initialize=[1,2,3]) - c=Set(initialize=[(1,2)]) + a = Set(initialize=[1, 2, 3]) + c = Set(initialize=[(1, 2)]) a.construct() c.construct() # In the set rewrite, the following now works! @@ -3376,15 +3669,15 @@ def tmp_constructor(model, ctr, index): else: return ctr - a=Set(initialize=[1,2,3]) + a = Set(initialize=[1, 2, 3]) a.construct() - b=Set(a, initialize=tmp_constructor) + b = Set(a, initialize=tmp_constructor) try: - b.construct({4:None}) + b.construct({4: None}) self.fail("test_arrayset_construct - expected KeyError") except KeyError: pass - b._constructed=False + b._constructed = False # In the set rewrite, the following now works! # try: # b.construct() @@ -3395,11 +3688,11 @@ def tmp_constructor(model, ctr, index): self.assertEqual(len(b), 3) for i in b: self.assertEqual(i in a, True) - self.assertEqual(b[1], [1,2,3,4,5,6,7,8,9]) - self.assertEqual(b[2], [1,2,3,4,5,6,7,8,9]) - self.assertEqual(b[3], [1,2,3,4,5,6,7,8,9]) + self.assertEqual(b[1], [1, 2, 3, 4, 5, 6, 7, 8, 9]) + self.assertEqual(b[2], [1, 2, 3, 4, 5, 6, 7, 8, 9]) + self.assertEqual(b[3], [1, 2, 3, 4, 5, 6, 7, 8, 9]) - b=Set(a,a, initialize=tmp_constructor) + b = Set(a, a, initialize=tmp_constructor) # In the set rewrite, the following still fails, but with a # different exception: # try: @@ -3407,25 +3700,24 @@ def tmp_constructor(model, ctr, index): # self.fail("test_arrayset_construct - expected ValueError") # except ValueError: # pass - with self.assertRaisesRegex( - TypeError, "'int' object is not iterable"): + with self.assertRaisesRegex(TypeError, "'int' object is not iterable"): b.construct() def test_prodset(self): - a=Set(initialize=[1,2]) + a = Set(initialize=[1, 2]) a.construct() - b=Set(initialize=[6,7]) + b = Set(initialize=[6, 7]) b.construct() - c=a*b + c = a * b c.construct() - self.assertEqual((6,2) in c, False) - c=pyomo.core.base.set.SetProduct(a,b) - c.virtual=True - self.assertEqual((6,2) in c, False) - self.assertEqual((1,7) in c, True) - #c=pyomo.core.base.set.SetProduct() - #c.virtual=True - #c.construct() + self.assertEqual((6, 2) in c, False) + c = pyomo.core.base.set.SetProduct(a, b) + c.virtual = True + self.assertEqual((6, 2) in c, False) + self.assertEqual((1, 7) in c, True) + # c=pyomo.core.base.set.SetProduct() + # c.virtual=True + # c.construct() # the set rewrite removed ALL support for 'initialize=' in # SetOperators (without deprecation). This "feature" is vaguely @@ -3437,196 +3729,166 @@ def test_prodset(self): def virt_constructor(model, y): - return RealSet(validate=lambda model, x: x>y) + return RealSet(validate=lambda model, x: x > y) class TestArraySetVirtual(unittest.TestCase): - def test_construct(self): - a=Set(initialize=[1,2,3]) + a = Set(initialize=[1, 2, 3]) a.construct() - b=Set(a, initialize=virt_constructor) - #b.construct() + b = Set(a, initialize=virt_constructor) + # b.construct() -class TestNestedSetOperations(unittest.TestCase): +class TestNestedSetOperations(unittest.TestCase): def test_union(self): - model = AbstractModel() - s1 = set([1,2]) + s1 = set([1, 2]) model.s1 = Set(initialize=s1) - s2 = set(['a','b']) + s2 = set(['a', 'b']) model.s2 = Set(initialize=s2) - s3 = set([None,True]) + s3 = set([None, True]) model.s3 = Set(initialize=s3) - model.union1 = (model.s1 | (model.s2 | (model.s3 | (model.s3 | model.s2)))) - model.union2 = model.s1 | (model.s2 | (model.s3 | (model.s3 | model.s2))) - model.union3 = ((((model.s1 | model.s2) | model.s3) | model.s3) | model.s2) + model.union1 = model.s1 | (model.s2 | (model.s3 | (model.s3 | model.s2))) + model.union2 = model.s1 | (model.s2 | (model.s3 | (model.s3 | model.s2))) + model.union3 = (((model.s1 | model.s2) | model.s3) | model.s3) | model.s2 inst = model.create_instance() union = s1 | s2 | s3 | s3 | s2 - self.assertTrue(isinstance(inst.union1, - pyomo.core.base.set.SetUnion)) - self.assertEqual(inst.union1, - (s1 | (s2 | (s3 | (s3 | s2))))) - self.assertTrue(isinstance(inst.union2, - pyomo.core.base.set.SetUnion)) - self.assertEqual(inst.union2, - s1 | (s2 | (s3 | (s3 | s2)))) - self.assertTrue(isinstance(inst.union3, - pyomo.core.base.set.SetUnion)) - self.assertEqual(inst.union3, - ((((s1 | s2) | s3) | s3) | s2)) + self.assertTrue(isinstance(inst.union1, pyomo.core.base.set.SetUnion)) + self.assertEqual(inst.union1, (s1 | (s2 | (s3 | (s3 | s2))))) + self.assertTrue(isinstance(inst.union2, pyomo.core.base.set.SetUnion)) + self.assertEqual(inst.union2, s1 | (s2 | (s3 | (s3 | s2)))) + self.assertTrue(isinstance(inst.union3, pyomo.core.base.set.SetUnion)) + self.assertEqual(inst.union3, ((((s1 | s2) | s3) | s3) | s2)) def test_intersection(self): - model = AbstractModel() - s1 = set([1,2]) + s1 = set([1, 2]) model.s1 = Set(initialize=s1) - s2 = set(['a','b']) + s2 = set(['a', 'b']) model.s2 = Set(initialize=s2) - s3 = set([None,True]) + s3 = set([None, True]) model.s3 = Set(initialize=s3) - model.intersection1 = \ - (model.s1 & (model.s2 & (model.s3 & (model.s3 & model.s2)))) - model.intersection2 = \ - model.s1 & (model.s2 & (model.s3 & (model.s3 & model.s2))) - model.intersection3 = \ - ((((model.s1 & model.s2) & model.s3) & model.s3) & model.s2) - model.intersection4 = \ - model.s3 & model.s1 & model.s3 + model.intersection1 = model.s1 & (model.s2 & (model.s3 & (model.s3 & model.s2))) + model.intersection2 = model.s1 & (model.s2 & (model.s3 & (model.s3 & model.s2))) + model.intersection3 = (((model.s1 & model.s2) & model.s3) & model.s3) & model.s2 + model.intersection4 = model.s3 & model.s1 & model.s3 inst = model.create_instance() - self.assertTrue(isinstance(inst.intersection1, - pyomo.core.base.set.SetIntersection)) - self.assertEqual(sorted(inst.intersection1), - sorted((s1 & (s2 & (s3 & (s3 & s2)))))) - self.assertTrue(isinstance(inst.intersection2, - pyomo.core.base.set.SetIntersection)) - self.assertEqual(sorted(inst.intersection2), - sorted(s1 & (s2 & (s3 & (s3 & s2))))) - self.assertTrue(isinstance(inst.intersection3, - pyomo.core.base.set.SetIntersection)) - self.assertEqual(sorted(inst.intersection3), - sorted(((((s1 & s2) & s3) & s3) & s2))) - self.assertTrue(isinstance(inst.intersection4, - pyomo.core.base.set.SetIntersection)) - self.assertEqual(sorted(inst.intersection4), - sorted(s3 & s1 & s3)) + self.assertTrue( + isinstance(inst.intersection1, pyomo.core.base.set.SetIntersection) + ) + self.assertEqual( + sorted(inst.intersection1), sorted((s1 & (s2 & (s3 & (s3 & s2))))) + ) + self.assertTrue( + isinstance(inst.intersection2, pyomo.core.base.set.SetIntersection) + ) + self.assertEqual( + sorted(inst.intersection2), sorted(s1 & (s2 & (s3 & (s3 & s2)))) + ) + self.assertTrue( + isinstance(inst.intersection3, pyomo.core.base.set.SetIntersection) + ) + self.assertEqual( + sorted(inst.intersection3), sorted(((((s1 & s2) & s3) & s3) & s2)) + ) + self.assertTrue( + isinstance(inst.intersection4, pyomo.core.base.set.SetIntersection) + ) + self.assertEqual(sorted(inst.intersection4), sorted(s3 & s1 & s3)) def test_difference(self): - model = AbstractModel() - s1 = set([1,2]) + s1 = set([1, 2]) model.s1 = Set(initialize=s1) - s2 = set(['a','b']) + s2 = set(['a', 'b']) model.s2 = Set(initialize=s2) - s3 = set([None,True]) + s3 = set([None, True]) model.s3 = Set(initialize=s3) - model.difference1 = \ - (model.s1 - (model.s2 - (model.s3 - (model.s3 - model.s2)))) - model.difference2 = \ - model.s1 - (model.s2 - (model.s3 - (model.s3 - model.s2))) - model.difference3 = \ - ((((model.s1 - model.s2) - model.s3) - model.s3) - model.s2) + model.difference1 = model.s1 - (model.s2 - (model.s3 - (model.s3 - model.s2))) + model.difference2 = model.s1 - (model.s2 - (model.s3 - (model.s3 - model.s2))) + model.difference3 = (((model.s1 - model.s2) - model.s3) - model.s3) - model.s2 inst = model.create_instance() - self.assertTrue(isinstance(inst.difference1, - pyomo.core.base.set.SetDifference)) - self.assertEqual(sorted(inst.difference1), - sorted((s1 - (s2 - (s3 - (s3 - s2)))))) - self.assertTrue(isinstance(inst.difference2, - pyomo.core.base.set.SetDifference)) - self.assertEqual(sorted(inst.difference2), - sorted(s1 - (s2 - (s3 - (s3 - s2))))) - self.assertTrue(isinstance(inst.difference3, - pyomo.core.base.set.SetDifference)) - self.assertEqual(sorted(inst.difference3), - sorted(((((s1 - s2) - s3) - s3) - s2))) + self.assertTrue(isinstance(inst.difference1, pyomo.core.base.set.SetDifference)) + self.assertEqual( + sorted(inst.difference1), sorted((s1 - (s2 - (s3 - (s3 - s2))))) + ) + self.assertTrue(isinstance(inst.difference2, pyomo.core.base.set.SetDifference)) + self.assertEqual(sorted(inst.difference2), sorted(s1 - (s2 - (s3 - (s3 - s2))))) + self.assertTrue(isinstance(inst.difference3, pyomo.core.base.set.SetDifference)) + self.assertEqual( + sorted(inst.difference3), sorted(((((s1 - s2) - s3) - s3) - s2)) + ) def test_symmetric_difference(self): - model = AbstractModel() - s1 = set([1,2]) + s1 = set([1, 2]) model.s1 = Set(initialize=s1) - s2 = set([4,5]) + s2 = set([4, 5]) model.s2 = Set(initialize=s2) - s3 = set([0,True]) + s3 = set([0, True]) model.s3 = Set(initialize=s3) - model.symdiff1 = \ - (model.s1 ^ (model.s2 ^ (model.s3 ^ (model.s3 ^ model.s2)))) - model.symdiff2 = \ - model.s1 ^ (model.s2 ^ (model.s3 ^ (model.s3 ^ model.s2))) - model.symdiff3 = \ - ((((model.s1 ^ model.s2) ^ model.s3) ^ model.s3) ^ model.s2) - model.symdiff4 = \ - model.s1 ^ model.s2 ^ model.s3 + model.symdiff1 = model.s1 ^ (model.s2 ^ (model.s3 ^ (model.s3 ^ model.s2))) + model.symdiff2 = model.s1 ^ (model.s2 ^ (model.s3 ^ (model.s3 ^ model.s2))) + model.symdiff3 = (((model.s1 ^ model.s2) ^ model.s3) ^ model.s3) ^ model.s2 + model.symdiff4 = model.s1 ^ model.s2 ^ model.s3 inst = model.create_instance() - self.assertTrue(isinstance(inst.symdiff1, - pyomo.core.base.set.SetSymmetricDifference)) - self.assertEqual(sorted(inst.symdiff1), - sorted((s1 ^ (s2 ^ (s3 ^ (s3 ^ s2)))))) - self.assertTrue(isinstance(inst.symdiff2, - pyomo.core.base.set.SetSymmetricDifference)) - self.assertEqual(sorted(inst.symdiff2), - sorted(s1 ^ (s2 ^ (s3 ^ (s3 ^ s2))))) - self.assertTrue(isinstance(inst.symdiff3, - pyomo.core.base.set.SetSymmetricDifference)) - self.assertEqual(sorted(inst.symdiff3), - sorted(((((s1 ^ s2) ^ s3) ^ s3) ^ s2))) - self.assertTrue(isinstance(inst.symdiff4, - pyomo.core.base.set.SetSymmetricDifference)) - self.assertEqual(sorted(inst.symdiff4), - sorted(s1 ^ s2 ^ s3)) + self.assertTrue( + isinstance(inst.symdiff1, pyomo.core.base.set.SetSymmetricDifference) + ) + self.assertEqual(sorted(inst.symdiff1), sorted((s1 ^ (s2 ^ (s3 ^ (s3 ^ s2)))))) + self.assertTrue( + isinstance(inst.symdiff2, pyomo.core.base.set.SetSymmetricDifference) + ) + self.assertEqual(sorted(inst.symdiff2), sorted(s1 ^ (s2 ^ (s3 ^ (s3 ^ s2))))) + self.assertTrue( + isinstance(inst.symdiff3, pyomo.core.base.set.SetSymmetricDifference) + ) + self.assertEqual(sorted(inst.symdiff3), sorted(((((s1 ^ s2) ^ s3) ^ s3) ^ s2))) + self.assertTrue( + isinstance(inst.symdiff4, pyomo.core.base.set.SetSymmetricDifference) + ) + self.assertEqual(sorted(inst.symdiff4), sorted(s1 ^ s2 ^ s3)) def test_product(self): - model = AbstractModel() - s1 = set([1,2]) + s1 = set([1, 2]) model.s1 = Set(initialize=s1) - s2 = set([4,5]) + s2 = set([4, 5]) model.s2 = Set(initialize=s2) - s3 = set([0,True]) + s3 = set([0, True]) model.s3 = Set(initialize=s3) - model.product1 = \ - (model.s1 * (model.s2 * (model.s3 * (model.s3 * model.s2)))) - model.product2 = \ - model.s1 * (model.s2 * (model.s3 * (model.s3 * model.s2))) - model.product3 = \ - ((((model.s1 * model.s2) * model.s3) * model.s3) * model.s2) + model.product1 = model.s1 * (model.s2 * (model.s3 * (model.s3 * model.s2))) + model.product2 = model.s1 * (model.s2 * (model.s3 * (model.s3 * model.s2))) + model.product3 = (((model.s1 * model.s2) * model.s3) * model.s3) * model.s2 inst = model.create_instance() p = itertools.product - self.assertTrue(isinstance(inst.product1, - pyomo.core.base.set.SetProduct)) - prod1 = set([flatten_tuple(i) \ - for i in set( p(s1,p(s2,p(s3,p(s3,s2)))) )]) - self.assertEqual(sorted(inst.product1), - sorted(prod1)) - self.assertTrue(isinstance(inst.product2, - pyomo.core.base.set.SetProduct)) - prod2 = set([flatten_tuple(i) \ - for i in set( p(s1,p(s2,p(s3,p(s3,s2)))) )]) - self.assertEqual(sorted(inst.product2), - sorted(prod2)) - self.assertTrue(isinstance(inst.product3, - pyomo.core.base.set.SetProduct)) - prod3 = set([flatten_tuple(i) \ - for i in set( p(p(p(p(s1,s2),s3),s3),s2) )]) - self.assertEqual(sorted(inst.product3), - sorted(prod3)) + self.assertTrue(isinstance(inst.product1, pyomo.core.base.set.SetProduct)) + prod1 = set([flatten_tuple(i) for i in set(p(s1, p(s2, p(s3, p(s3, s2)))))]) + self.assertEqual(sorted(inst.product1), sorted(prod1)) + self.assertTrue(isinstance(inst.product2, pyomo.core.base.set.SetProduct)) + prod2 = set([flatten_tuple(i) for i in set(p(s1, p(s2, p(s3, p(s3, s2)))))]) + self.assertEqual(sorted(inst.product2), sorted(prod2)) + self.assertTrue(isinstance(inst.product3, pyomo.core.base.set.SetProduct)) + prod3 = set([flatten_tuple(i) for i in set(p(p(p(p(s1, s2), s3), s3), s2))]) + self.assertEqual(sorted(inst.product3), sorted(prod3)) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_smap.py b/pyomo/core/tests/unit/test_smap.py index bfd0c27dd78..2b9d2f192c0 100644 --- a/pyomo/core/tests/unit/test_smap.py +++ b/pyomo/core/tests/unit/test_smap.py @@ -13,25 +13,36 @@ # import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Set, Var, Objective, Constraint, Block, SymbolMap, TextLabeler +from pyomo.environ import ( + ConcreteModel, + Set, + Var, + Objective, + Constraint, + Block, + SymbolMap, + TextLabeler, +) from pyomo.core.base.symbol_map import symbol_map_from_instance class Test(unittest.TestCase): - def setUp(self): # # Create model instance # model = ConcreteModel() - model.A = Set(initialize=[1,2,3]) + model.A = Set(initialize=[1, 2, 3]) model.x = Var() model.y = Var(model.A, dense=True) model.o1 = Objective(expr=model.x) + def o2_(model, i): return model.x + model.o2 = Objective(model.A, rule=o2_) model.c1 = Constraint(expr=model.x >= 1) + def c2_(model, i): if i == 1: return model.x <= 2 @@ -39,6 +50,7 @@ def c2_(model, i): return (3, model.x, 4) else: return model.x == 5 + model.c2 = Constraint(model.A, rule=c2_) model.b = Block() model.b.x = Var() @@ -49,64 +61,103 @@ def tearDown(self): self.instance = None def test_add(self): - smap = SymbolMap() + smap = SymbolMap() smap.addSymbol(self.instance.x, "x") smap.addSymbol(self.instance.y[1], "y[1]") - self.assertEqual( set(smap.bySymbol.keys()), set(['x','y[1]'])) + self.assertEqual(set(smap.bySymbol.keys()), set(['x', 'y[1]'])) def test_adds(self): - smap = SymbolMap() + smap = SymbolMap() labeler = TextLabeler() - smap.addSymbols((obj,labeler(obj)) for obj in self.instance.component_data_objects(Var)) - self.assertEqual( set(smap.bySymbol.keys()), set(['x','y(1)','y(2)','y(3)','b_x','b_y(1)','b_y(2)','b_y(3)'])) + smap.addSymbols( + (obj, labeler(obj)) for obj in self.instance.component_data_objects(Var) + ) + self.assertEqual( + set(smap.bySymbol.keys()), + set(['x', 'y(1)', 'y(2)', 'y(3)', 'b_x', 'b_y(1)', 'b_y(2)', 'b_y(3)']), + ) def test_create(self): - smap = SymbolMap() + smap = SymbolMap() labeler = TextLabeler() smap.createSymbol(self.instance.x, labeler) smap.createSymbol(self.instance.y[1], labeler) - self.assertEqual( set(smap.bySymbol.keys()), set(['x','y(1)'])) + self.assertEqual(set(smap.bySymbol.keys()), set(['x', 'y(1)'])) def test_creates(self): - smap = SymbolMap() + smap = SymbolMap() labeler = TextLabeler() smap.createSymbols(self.instance.component_data_objects(Var), labeler) - self.assertEqual( set(smap.bySymbol.keys()), set(['x','y(1)','y(2)','y(3)','b_x','b_y(1)','b_y(2)','b_y(3)'])) + self.assertEqual( + set(smap.bySymbol.keys()), + set(['x', 'y(1)', 'y(2)', 'y(3)', 'b_x', 'b_y(1)', 'b_y(2)', 'b_y(3)']), + ) def test_get(self): - smap = SymbolMap() + smap = SymbolMap() labeler = TextLabeler() self.assertEqual('x', smap.getSymbol(self.instance.x, labeler)) self.assertEqual('y(1)', smap.getSymbol(self.instance.y[1], labeler)) - self.assertEqual( set(smap.bySymbol.keys()), set(['x','y(1)'])) + self.assertEqual(set(smap.bySymbol.keys()), set(['x', 'y(1)'])) self.assertEqual('x', smap.getSymbol(self.instance.x, labeler)) def test_alias_and_getObject(self): - smap = SymbolMap() + smap = SymbolMap() smap.addSymbol(self.instance.x, 'x') smap.alias(self.instance.x, 'X') - self.assertEqual( set(smap.bySymbol.keys()), set(['x'])) - self.assertEqual( set(smap.aliases.keys()), set(['X'])) - self.assertEqual( id(smap.getObject('x')), id(self.instance.x) ) - self.assertEqual( id(smap.getObject('X')), id(self.instance.x) ) + self.assertEqual(set(smap.bySymbol.keys()), set(['x'])) + self.assertEqual(set(smap.aliases.keys()), set(['X'])) + self.assertEqual(id(smap.getObject('x')), id(self.instance.x)) + self.assertEqual(id(smap.getObject('X')), id(self.instance.x)) def test_from_instance(self): smap = symbol_map_from_instance(self.instance) - self.assertEqual( set(smap.bySymbol.keys()), set(['x','y(1)','y(2)','y(3)','b_x','b_y(1)','b_y(2)','b_y(3)','o1','o2(1)','o2(2)','o2(3)','c1','c2(1)','c2(2)','c2(3)'])) - self.assertEqual( set(smap.aliases.keys()), set(['c_e_c2(3)_', - '__default_objective__', - 'c_u_c2(1)_', - 'c_l_c1_', - 'r_u_c2(2)_', - 'r_l_c2(2)_']) ) + self.assertEqual( + set(smap.bySymbol.keys()), + set( + [ + 'x', + 'y(1)', + 'y(2)', + 'y(3)', + 'b_x', + 'b_y(1)', + 'b_y(2)', + 'b_y(3)', + 'o1', + 'o2(1)', + 'o2(2)', + 'o2(3)', + 'c1', + 'c2(1)', + 'c2(2)', + 'c2(3)', + ] + ), + ) + self.assertEqual( + set(smap.aliases.keys()), + set( + [ + 'c_e_c2(3)_', + '__default_objective__', + 'c_u_c2(1)_', + 'c_l_c1_', + 'r_u_c2(2)_', + 'r_l_c2(2)_', + ] + ), + ) def test_error1(self): - smap = SymbolMap() + smap = SymbolMap() labeler = TextLabeler() self.assertEqual('x', smap.getSymbol(self.instance.x, labeler)) + class FOO(object): def __call__(self, *args): return 'x' + labeler = FOO() try: self.assertEqual('x', smap.getSymbol(self.instance.y[1], labeler)) @@ -115,12 +166,13 @@ def __call__(self, *args): pass def test_error2(self): - smap = SymbolMap() + smap = SymbolMap() smap.addSymbol(self.instance.x, 'x') smap.alias(self.instance.x, 'X') - self.assertEqual( id(smap.getObject('x')), id(self.instance.x) ) - self.assertEqual( id(smap.getObject('X')), id(self.instance.x) ) - self.assertEqual( id(smap.getObject('y')), id(SymbolMap.UnknownSymbol) ) + self.assertEqual(id(smap.getObject('x')), id(self.instance.x)) + self.assertEqual(id(smap.getObject('X')), id(self.instance.x)) + self.assertEqual(id(smap.getObject('y')), id(SymbolMap.UnknownSymbol)) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_sos.py b/pyomo/core/tests/unit/test_sos.py index b908c0a9ed0..92a8a5eabaa 100644 --- a/pyomo/core/tests/unit/test_sos.py +++ b/pyomo/core/tests/unit/test_sos.py @@ -14,13 +14,13 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.environ import ConcreteModel, AbstractModel, SOSConstraint, Var, Set class TestErrors(unittest.TestCase): - def test_arg1(self): M = ConcreteModel() try: @@ -51,27 +51,26 @@ def test_negative_weights(self): M = ConcreteModel() M.v = Var() try: - M.c = SOSConstraint(var=M.v, weights={None:-1}, sos=1) + M.c = SOSConstraint(var=M.v, weights={None: -1}, sos=1) self.fail("Expected ValueError") except ValueError: pass def test_ordered(self): M = ConcreteModel() - M.v = Var({1,2,3}) + M.v = Var({1, 2, 3}) try: M.c = SOSConstraint(var=M.v, sos=2) self.fail("Expected ValueError") except ValueError: pass M = ConcreteModel() - M.s = Set(initialize=[1,2,3], ordered=True) + M.s = Set(initialize=[1, 2, 3], ordered=True) M.v = Var(M.s) M.c = SOSConstraint(var=M.v, sos=2) class TestSimple(unittest.TestCase): - def setUp(self): # # Create Model @@ -83,13 +82,13 @@ def tearDown(self): def test_num_vars(self): # Test the number of variables - self.M.x = Var([1,2,3]) + self.M.x = Var([1, 2, 3]) self.M.c = SOSConstraint(var=self.M.x, sos=1) self.assertEqual(self.M.c.num_variables(), 3) def test_level(self): # Test level property - self.M.x = Var([1,2,3]) + self.M.x = Var([1, 2, 3]) self.M.c = SOSConstraint(var=self.M.x, sos=1) self.assertEqual(self.M.c.level, 1) self.M.c.level = 2 @@ -102,67 +101,88 @@ def test_level(self): def test_get_variables(self): # Test that you get the correct variables - self.M.x = Var([1,2,3]) + self.M.x = Var([1, 2, 3]) self.M.c = SOSConstraint(var=self.M.x, sos=1) - self.assertEqual(set(id(v) for v in self.M.c.get_variables()), - set(id(v) for v in self.M.x.values())) + self.assertEqual( + set(id(v) for v in self.M.c.get_variables()), + set(id(v) for v in self.M.x.values()), + ) class TestExamples(unittest.TestCase): - def test1(self): M = ConcreteModel() M.x = Var(range(20)) M.c = SOSConstraint(var=M.x, sos=1) - self.assertEqual(set((v.name,w) for v,w in M.c.get_items()), - set((M.x[i].name, i+1) for i in range(20))) + self.assertEqual( + set((v.name, w) for v, w in M.c.get_items()), + set((M.x[i].name, i + 1) for i in range(20)), + ) def test2(self): # Use an index set, which is a subset of M.x.index_set() M = ConcreteModel() M.x = Var(range(20)) M.c = SOSConstraint(var=M.x, index=list(range(10)), sos=1) - self.assertEqual(set((v.name,w) for v,w in M.c.get_items()), - set((M.x[i].name, i+1) for i in range(10))) + self.assertEqual( + set((v.name, w) for v, w in M.c.get_items()), + set((M.x[i].name, i + 1) for i in range(10)), + ) def test3(self): # User-specified weights - w = {1:10, 2:2, 3:30} + w = {1: 10, 2: 2, 3: 30} M = ConcreteModel() - M.x = Var([1,2,3]) + M.x = Var([1, 2, 3]) M.c = SOSConstraint(var=M.x, weights=w, sos=1) - self.assertEqual(set((v.name,w) for v,w in M.c.get_items()), - set((M.x[i].name, w[i]) for i in [1,2,3])) + self.assertEqual( + set((v.name, w) for v, w in M.c.get_items()), + set((M.x[i].name, w[i]) for i in [1, 2, 3]), + ) def test4(self): # User-specified weights - w = {1:10, 2:2, 3:30} + w = {1: 10, 2: 2, 3: 30} + def rule(model): return list(M.x[i] for i in M.x), [10, 2, 30] + M = ConcreteModel() - M.x = Var([1,2,3], dense=True) + M.x = Var([1, 2, 3], dense=True) M.c = SOSConstraint(rule=rule, sos=1) - self.assertEqual(set((v.name,w) for v,w in M.c.get_items()), - set((M.x[i].name, w[i]) for i in [1,2,3])) + self.assertEqual( + set((v.name, w) for v, w in M.c.get_items()), + set((M.x[i].name, w[i]) for i in [1, 2, 3]), + ) def test10(self): M = ConcreteModel() - M.x = Var([1,2,3]) - M.c = SOSConstraint([0,1], var=M.x, sos=1, index={0:[1,2], 1:[2,3]}) - self.assertEqual(set((v.name,w) for v,w in M.c[0].get_items()), - set((M.x[i].name, i) for i in [1,2])) - self.assertEqual(set((v.name,w) for v,w in M.c[1].get_items()), - set((M.x[i].name, i-1) for i in [2,3])) + M.x = Var([1, 2, 3]) + M.c = SOSConstraint([0, 1], var=M.x, sos=1, index={0: [1, 2], 1: [2, 3]}) + self.assertEqual( + set((v.name, w) for v, w in M.c[0].get_items()), + set((M.x[i].name, i) for i in [1, 2]), + ) + self.assertEqual( + set((v.name, w) for v, w in M.c[1].get_items()), + set((M.x[i].name, i - 1) for i in [2, 3]), + ) def test11(self): - w = {1:10, 2:2, 3:30} - M = ConcreteModel() - M.x = Var([1,2,3], dense=True) - M.c = SOSConstraint([0,1], var=M.x, weights=w, sos=1, index={0:[1,2], 1:[2,3]}) - self.assertEqual(set((v.name,w) for v,w in M.c[0].get_items()), - set((M.x[i].name, w[i]) for i in [1,2])) - self.assertEqual(set((v.name,w) for v,w in M.c[1].get_items()), - set((M.x[i].name, w[i]) for i in [2,3])) + w = {1: 10, 2: 2, 3: 30} + M = ConcreteModel() + M.x = Var([1, 2, 3], dense=True) + M.c = SOSConstraint( + [0, 1], var=M.x, weights=w, sos=1, index={0: [1, 2], 1: [2, 3]} + ) + self.assertEqual( + set((v.name, w) for v, w in M.c[0].get_items()), + set((M.x[i].name, w[i]) for i in [1, 2]), + ) + self.assertEqual( + set((v.name, w) for v, w in M.c[1].get_items()), + set((M.x[i].name, w[i]) for i in [2, 3]), + ) def test12(self): def rule(model, i): @@ -170,24 +190,33 @@ def rule(model, i): return list(M.x[i] for i in M.x), [10, 2, 30] else: return list(M.x[i] for i in M.x), [1, 20, 3] - w = {0:{1:10, 2:2, 3:30}, 1:{1:1, 2:20, 3:3}} + + w = {0: {1: 10, 2: 2, 3: 30}, 1: {1: 1, 2: 20, 3: 3}} M = ConcreteModel() - M.x = Var([1,2,3], dense=True) - M.c = SOSConstraint([0,1], rule=rule, sos=1) - self.assertEqual(set((v.name,w) for v,w in M.c[0].get_items()), - set((M.x[i].name, w[0][i]) for i in [1,2,3])) - self.assertEqual(set((v.name,w) for v,w in M.c[1].get_items()), - set((M.x[i].name, w[1][i]) for i in [1,2,3])) + M.x = Var([1, 2, 3], dense=True) + M.c = SOSConstraint([0, 1], rule=rule, sos=1) + self.assertEqual( + set((v.name, w) for v, w in M.c[0].get_items()), + set((M.x[i].name, w[0][i]) for i in [1, 2, 3]), + ) + self.assertEqual( + set((v.name, w) for v, w in M.c[1].get_items()), + set((M.x[i].name, w[1][i]) for i in [1, 2, 3]), + ) def test13(self): - I = {0:[1,2], 1:[2,3]} - M = ConcreteModel() - M.x = Var([1,2,3], dense=True) - M.c = SOSConstraint([0,1], var=M.x, index=I, sos=1) - self.assertEqual(set((v.name,w) for v,w in M.c[0].get_items()), - set((M.x[i].name, i) for i in I[0])) - self.assertEqual(set((v.name,w) for v,w in M.c[1].get_items()), - set((M.x[i].name, i-1) for i in I[1])) + I = {0: [1, 2], 1: [2, 3]} + M = ConcreteModel() + M.x = Var([1, 2, 3], dense=True) + M.c = SOSConstraint([0, 1], var=M.x, index=I, sos=1) + self.assertEqual( + set((v.name, w) for v, w in M.c[0].get_items()), + set((M.x[i].name, i) for i in I[0]), + ) + self.assertEqual( + set((v.name, w) for v, w in M.c[1].get_items()), + set((M.x[i].name, i - 1) for i in I[1]), + ) def test14(self): def rule(model, i): @@ -195,13 +224,16 @@ def rule(model, i): return SOSConstraint.Skip else: return list(M.x[i] for i in M.x), [1, 20, 3] - w = {0:{1:10, 2:2, 3:30}, 1:{1:1, 2:20, 3:3}} + + w = {0: {1: 10, 2: 2, 3: 30}, 1: {1: 1, 2: 20, 3: 3}} M = ConcreteModel() - M.x = Var([1,2,3], dense=True) - M.c = SOSConstraint([0,1], rule=rule, sos=1) + M.x = Var([1, 2, 3], dense=True) + M.c = SOSConstraint([0, 1], rule=rule, sos=1) self.assertEqual(list(M.c.keys()), [1]) - self.assertEqual(set((v.name,w) for v,w in M.c[1].get_items()), - set((M.x[i].name, w[1][i]) for i in [1,2,3])) + self.assertEqual( + set((v.name, w) for v, w in M.c[1].get_items()), + set((M.x[i].name, w[1][i]) for i in [1, 2, 3]), + ) def test_abstract_index(self): model = AbstractModel() @@ -209,8 +241,8 @@ def test_abstract_index(self): model.B = Set(initialize=[1]) model.C = model.A | model.B M = ConcreteModel() - M.x = Var([1,2,3]) - M.c = SOSConstraint(model.C, var=M.x, sos=1, index={0:[1,2], 1:[2,3]}) + M.x = Var([1, 2, 3]) + M.c = SOSConstraint(model.C, var=M.x, sos=1, index={0: [1, 2], 1: [2, 3]}) if __name__ == "__main__": diff --git a/pyomo/core/tests/unit/test_sos_v2.py b/pyomo/core/tests/unit/test_sos_v2.py new file mode 100644 index 00000000000..8b6fab549a2 --- /dev/null +++ b/pyomo/core/tests/unit/test_sos_v2.py @@ -0,0 +1,1101 @@ +# ***************************************************************************** +# ***************************************************************************** + +import math +import pyomo.environ as pyo +import pyomo.common.unittest as unittest +from pyomo.opt import check_available_solvers + +solver_name = "scip" + +solver_available = bool(check_available_solvers(solver_name)) + +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** + + +@unittest.skipIf(not solver_available, "The solver is not available.") +class SOSProblem_nonindexed(object): + "Test non-indexed SOS using a single pyomo Var component." + + def verify( + self, model, sos, exp_res, abs_tol, use_rule, case, show_output: bool = False + ): + "Make sure the outcome is as expected." + + opt = pyo.SolverFactory(solver_name) + + opt.solve(model, tee=show_output) + + assert len(model.mysos) != 0 + + assert math.isclose(pyo.value(model.OBJ), exp_res, abs_tol=abs_tol) + + # ************************************************************************* + + def do_it(self, test_number): + # sos, expect. result, absolute tolerance, use rule parameter, case + (sos, exp_res, abs_tol, use_rule, case) = self.test_vectors[test_number] + + model = self.set_problem_up(case=case, n=sos, use_rule=use_rule) + + self.verify( + model=model, + sos=sos, + exp_res=exp_res, + abs_tol=abs_tol, + use_rule=use_rule, + case=case, + ) + + # ************************************************************************* + + test_vectors = [ + # sos, expect. result, absolute tolerance, use rule parameter, case + (1, 0.04999999999999999, 1e-3, True, 0), # 1 + (1, 0.04999999999999999, 1e-3, False, 0), # 2 + (2, -0.07500000000000001, 1e-3, True, 0), # 3 + (2, -0.07500000000000001, 1e-3, False, 0), # 4 + (1, 0.04999999999999999, 1e-3, True, 1), # 5 + (1, 0.04999999999999999, 1e-3, False, 1), # 6 + (2, -0.07500000000000001, 1e-3, True, 1), # 7 + (2, -0.07500000000000001, 1e-3, False, 1), # 8 + (1, 0.04999999999999999, 1e-3, True, 2), # 9 + (1, 0.04999999999999999, 1e-3, False, 2), # 10 + (2, -0.07500000000000001, 1e-3, True, 2), # 11 + (2, -0.07500000000000001, 1e-3, False, 2), # 12 + (1, 0.04999999999999999, 1e-3, True, 3), # 13 + (1, 0.04999999999999999, 1e-3, False, 3), # 14 + (2, -0.07500000000000001, 1e-3, True, 3), # 15 + (2, -0.07500000000000001, 1e-3, False, 3), # 16 + (1, 0.04999999999999999, 1e-3, True, 4), # 17 + (1, 0.04999999999999999, 1e-3, False, 4), # 18 + (2, -0.07500000000000001, 1e-3, True, 4), # 19 + (2, -0.07500000000000001, 1e-3, False, 4), # 20 + (1, 0.04999999999999999, 1e-3, True, 5), # 21 + (1, 0.04999999999999999, 1e-3, False, 5), # 22 + (2, -0.07500000000000001, 1e-3, True, 5), # 23 + (2, -0.07500000000000001, 1e-3, False, 5), # 24 + (1, 0.04999999999999999, 1e-3, True, 6), # 25 + (1, 0.04999999999999999, 1e-3, False, 6), # 26 + (2, -0.07500000000000001, 1e-3, True, 6), # 27 + (2, -0.07500000000000001, 1e-3, False, 6), # 28 + # trigger the error + (1, 0.04999999999999999, 1e-3, True, 7), + (1, 0.04999999999999999, 1e-3, False, 7), + (2, -0.07500000000000001, 1e-3, True, 7), + (2, -0.07500000000000001, 1e-3, False, 7), + ] + + def set_problem_up(self, case: int == 0, n: int = 1, use_rule: bool = False): + "Create the problem." + # concrete model + + model = pyo.ConcreteModel() + model.x = pyo.Var([1], domain=pyo.NonNegativeReals, bounds=(0, 40)) + + model.A = pyo.Set(initialize=[1, 2, 4, 6]) + model.y = pyo.Var(model.A, domain=pyo.NonNegativeReals, bounds=(0, 2)) + + model.OBJ = pyo.Objective( + expr=( + 1 * model.x[1] + + 2 * model.y[1] + + 3 * model.y[2] + + -0.1 * model.y[4] + + 0.5 * model.y[6] + ) + ) + + model.ConstraintYmin = pyo.Constraint( + expr=(model.x[1] + model.y[1] + model.y[2] + model.y[6] >= 0.25) + ) + + if case == 0: + if use_rule: + + def rule_mysos(m): + return [m.y[a] for a in m.A] + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + else: + # index is not provided, nor are the weights + model.mysos = pyo.SOSConstraint(var=model.y, sos=n) + + elif case == 1: + # no weights, but use a list + + index = [2, 4, 6] + + if use_rule: + + def rule_mysos(m): + return ( + [m.y[a] for a in index], + [i + 1 for i, _ in enumerate(index)], + ) + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint(var=model.y, index=index, sos=n) + + elif case == 2: + # no weights, but use pyo.Set component (has to be part of the model) + + model.mysosindex = pyo.Set(initialize=[2, 4, 6], within=model.A) + + if use_rule: + + def rule_mysos(m): + return ( + [m.y[a] for a in m.mysosindex], + [i + 1 for i, _ in enumerate(m.mysosindex)], + ) + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + var=model.y, index=model.mysosindex, sos=n + ) + + elif case == 3: + # with weights, using a list and a dict + + index = [2, 4, 6] + weights = {2: 25.0, 4: 18.0, 6: 22} + + if use_rule: + + def rule_mysos(m): + return [m.y[a] for a in index], [weights[a] for a in index] + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + var=model.y, index=index, weights=weights, sos=n + ) + + elif case == 4: + # with weights, using a set and a param + + model.mysosindex = pyo.Set(initialize=[2, 4, 6], within=model.A) + model.mysosweights = pyo.Param( + model.mysosindex, initialize={2: 25.0, 4: 18.0, 6: 22} + ) + + if use_rule: + + def rule_mysos(m): + return ( + [m.y[a] for a in m.mysosindex], + [m.mysosweights[a] for a in m.mysosindex], + ) + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + var=model.y, + index=model.mysosindex, + weights=model.mysosweights, + sos=n, + ) + + elif case == 5: + # index is not provided, but the weights are provided as a dict + + weights = {1: 3, 2: 25.0, 4: 18.0, 6: 22} + + if use_rule: + + def rule_mysos(m): + return ([m.y[a] for a in m.y], [weights[a] for a in m.y]) + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint(var=model.y, sos=n, weights=weights) + + elif case == 6: + # index is not provided, but the weights are provided as a dict + + model.mysosweights = pyo.Param( + [1, 2, 4, 6], initialize={1: 3, 2: 25.0, 4: 18.0, 6: 22} + ) + + if use_rule: + + def rule_mysos(m): + return ([m.y[a] for a in m.y], [m.mysosweights[a] for a in m.y]) + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + var=model.y, sos=n, weights=model.mysosweights + ) + + else: + raise NotImplementedError + + return model + + # ************************************************************************* + # ************************************************************************* + + +# ***************************************************************************** + + +class TestSOS_noindex_000(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(0) + + +# ***************************************************************************** + + +class TestSOS_noindex_001(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(1) + + +# ***************************************************************************** + + +class TestSOS_noindex_002(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(2) + + +# ***************************************************************************** + + +class TestSOS_noindex_003(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(3) + + +# ***************************************************************************** + + +class TestSOS_noindex_004(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(4) + + +# ***************************************************************************** + + +class TestSOS_noindex_005(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(5) + + +# ***************************************************************************** + + +class TestSOS_noindex_006(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(6) + + +# ***************************************************************************** + + +class TestSOS_noindex_007(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(7) + + +# ***************************************************************************** + + +class TestSOS_noindex_008(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(8) + + +# ***************************************************************************** + + +class TestSOS_noindex_009(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(9) + + +# ***************************************************************************** + + +class TestSOS_noindex_010(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(10) + + +# ***************************************************************************** + + +class TestSOS_noindex_011(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(11) + + +# ***************************************************************************** + + +class TestSOS_noindex_012(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(12) + + +# ***************************************************************************** + + +class TestSOS_noindex_013(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(13) + + +# ***************************************************************************** + + +class TestSOS_noindex_014(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(14) + + +# ***************************************************************************** + + +class TestSOS_noindex_015(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(15) + + +# ***************************************************************************** + + +class TestSOS_noindex_016(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(16) + + +# ***************************************************************************** + + +class TestSOS_noindex_017(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(17) + + +# ***************************************************************************** + + +class TestSOS_noindex_018(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(18) + + +# ***************************************************************************** + + +class TestSOS_noindex_019(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(19) + + +# ***************************************************************************** + + +class TestSOS_noindex_020(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(20) + + +# ***************************************************************************** + + +class TestSOS_noindex_021(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(21) + + +# ***************************************************************************** + + +class TestSOS_noindex_022(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(22) + + +# ***************************************************************************** + + +class TestSOS_noindex_023(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(23) + + +# ***************************************************************************** + + +class TestSOS_noindex_024(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(24) + + +# ***************************************************************************** + + +class TestSOS_noindex_025(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(25) + + +# ***************************************************************************** + + +class TestSOS_noindex_026(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(26) + + +# ***************************************************************************** + + +class TestSOS_noindex_027(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + self.do_it(27) + + +# ***************************************************************************** + + +class TestSOS_noindex_028(SOSProblem_nonindexed, unittest.TestCase): + def test(self): + error_triggered = False + try: + self.do_it(28) + except NotImplementedError: + error_triggered = True + assert error_triggered + + +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** + + +@unittest.skipIf(not solver_available, "The solver is not available.") +class SOSProblem_nonindexed_multivar(object): + "Test non-indexed SOS made up of different Var components." + + def verify(self, model, sos, exp_res, abs_tol, show_output: bool = False): + "Make sure the outcome is as expected." + + opt = pyo.SolverFactory(solver_name) + + problem = model.create_instance() + + opt.solve(problem, tee=show_output) + + assert len(problem.mysos) != 0 + + assert math.isclose(pyo.value(problem.OBJ), exp_res, abs_tol=abs_tol) + + # ************************************************************************* + + def do_it(self, test_number): + # sos, expect. result, absolute tolerance, use rule parameter, case + (sos, exp_res, abs_tol) = self.test_vectors[test_number] + + model = self.set_problem_up(n=sos) + + self.verify(model=model, sos=sos, exp_res=exp_res, abs_tol=abs_tol) + + # ************************************************************************* + + test_vectors = [ + # sos, expected result, absolute tolerance + (1, 0.125, 1e-3), + (2, -0.07500000000000001, 1e-3), + ] + + def set_problem_up(self, n: int = 1): + "Create the problem." + # concrete model + + model = pyo.ConcreteModel() + model.x = pyo.Var([1], domain=pyo.NonNegativeReals, bounds=(0, 40)) + + model.A = pyo.Set(initialize=[1, 2, 4, 6]) + model.y = pyo.Var(model.A, domain=pyo.NonNegativeReals, bounds=(0, 2)) + + model.OBJ = pyo.Objective( + expr=( + 1 * model.x[1] + + 2 * model.y[1] + + 3 * model.y[2] + + -0.1 * model.y[4] + + 0.5 * model.y[6] + ) + ) + + model.ConstraintYmin = pyo.Constraint( + expr=(model.x[1] + model.y[1] + model.y[2] + model.y[6] >= 0.25) + ) + + def rule_mysos(m): + var_list = [m.x[a] for a in m.x] + var_list.extend([m.y[a] for a in m.A]) + weight_list = [i + 1 for i in range(len(var_list))] + return (var_list, weight_list) + + model.mysos = pyo.SOSConstraint(rule=rule_mysos, sos=n) + + return model + + # ************************************************************************* + + +# ***************************************************************************** + + +class TestSOS_noindexmulti_000(SOSProblem_nonindexed_multivar, unittest.TestCase): + def test(self): + self.do_it(0) + + +# ***************************************************************************** + + +class TestSOS_noindexmulti_001(SOSProblem_nonindexed_multivar, unittest.TestCase): + def test(self): + self.do_it(1) + + +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** + + +@unittest.skipIf(not solver_available, "The solver is not available.") +class SOSProblem_indexed(object): + "Test indexed SOS using a single pyomo Var component." + + def verify( + self, model, sos, exp_res, abs_tol, use_rule, case, show_output: bool = False + ): + "Make sure the outcome is as expected." + + opt = pyo.SolverFactory(solver_name) + + problem = model.create_instance() + + opt.solve(problem, tee=show_output) + + assert len(problem.mysos) != 0 + + assert math.isclose(pyo.value(problem.OBJ), exp_res, abs_tol=abs_tol) + + # ************************************************************************* + + def do_it(self, test_number): + # sos, expect. result, absolute tolerance, use rule parameter, case + (sos, exp_res, abs_tol, use_rule, case) = self.test_vectors[test_number] + + model = self.set_problem_up(case=case, n=sos, use_rule=use_rule) + + self.verify( + model=model, + sos=sos, + exp_res=exp_res, + abs_tol=abs_tol, + use_rule=use_rule, + case=case, + ) + + # ************************************************************************* + + test_vectors = [ + # sos, expect. result, absolute tolerance, use rule parameter, case + (1, -7.5000000000e-02, 1e-3, True, 0), + (1, -7.5000000000e-02, 1e-3, False, 0), + (2, 1.1, 1e-3, True, 0), + (2, 1.1, 1e-3, False, 0), + (1, -7.5000000000e-02, 1e-3, True, 1), + (1, -7.5000000000e-02, 1e-3, False, 1), + (2, 1.1, 1e-3, True, 1), + (2, 1.1, 1e-3, False, 1), + (1, -7.5000000000e-02, 1e-3, True, 2), + (1, -7.5000000000e-02, 1e-3, False, 2), + (2, 1.1, 1e-3, True, 2), + (2, 1.1, 1e-3, False, 2), + (1, -7.5000000000e-02, 1e-3, True, 3), + (1, -7.5000000000e-02, 1e-3, False, 3), + (2, 1.1, 1e-3, True, 3), + (2, 1.1, 1e-3, False, 3), + # trigger the error + (1, -7.5000000000e-02, 1e-3, True, 4), + (1, -7.5000000000e-02, 1e-3, False, 4), + (2, 1.1, 1e-3, True, 4), + (2, 1.1, 1e-3, False, 4), + ] + + def set_problem_up(self, case: int == 0, n: int = 1, use_rule: bool = False): + "Create the problem." + # abstract model + + model = pyo.AbstractModel() + model.E = pyo.Set(initialize=[1]) + + model.A = pyo.Set(initialize=[1, 2, 3, 5, 6]) + model.B = pyo.Set(initialize=[2, 4]) + model.x = pyo.Var(model.E, domain=pyo.NonNegativeReals, bounds=(0, 40)) + model.y = pyo.Var(model.A, domain=pyo.NonNegativeReals) + + model.param_cx = pyo.Param(model.E, initialize={1: 1}) + model.param_cy = pyo.Param( + model.A, initialize={1: 2, 2: 3, 3: -0.1, 5: 0.5, 6: 4} + ) + + def obj_f(m): + return sum(m.param_cx[e] * m.x[e] for e in m.E) + sum( + m.param_cy[a] * m.y[a] for a in m.A + ) + + model.OBJ = pyo.Objective(rule=obj_f) + + def constr_ya_lb(m, a): + return m.y[a] <= 2 + + model.ConstraintYa_lb = pyo.Constraint(model.A, rule=constr_ya_lb) + + def constr_y_lb(m): + return m.x[1] + m.y[1] + m.y[2] + m.y[5] + m.y[6] >= 0.25 + + model.ConstraintY_lb = pyo.Constraint(rule=constr_y_lb) + + if n == 2: + # force the second SOS2 to have two non-zero variables + def constr_y2_lb(m): + return ( + # m.x[1]+ + # m.y[1]+ + m.y[2] + m.y[5] + m.y[6] + >= 2.1 + ) + + model.ConstraintY2_lb = pyo.Constraint(rule=constr_y2_lb) + + if case == 0: + # with index, no weights, using a dict + + index = {2: [1, 3], 4: [2, 5, 6]} + + if use_rule: + + def rule_mysos(m, b): + return ( + [m.y[a] for a in index[b]], + [i + 1 for i, _ in enumerate(index[b])], + ) + + model.mysos = pyo.SOSConstraint(model.B, rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + model.B, var=model.y, sos=n, index=index + ) + + elif case == 1: + # with index, no weights, using a Set object + + model.mysosindex = pyo.Set(model.B, initialize={2: [1, 3], 4: [2, 5, 6]}) + + if use_rule: + + def rule_mysos(m, b): + return ( + [m.y[a] for a in m.mysosindex[b]], + [i + 1 for i, _ in enumerate(m.mysosindex[b])], + ) + + model.mysos = pyo.SOSConstraint(model.B, rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + model.B, var=model.y, sos=n, index=model.mysosindex + ) + + elif case == 2: + # with weights, provided using a set and a dict + + index = {2: [1, 3], 4: [2, 5, 6]} + # the weights define adjacency + weights = {1: 25.0, 3: 18.0, 2: 3, 5: 7, 6: 10} + + if use_rule: + + def rule_mysos(m, b): + return ([m.y[a] for a in index[b]], [weights[a] for a in index[b]]) + + model.mysos = pyo.SOSConstraint(model.B, rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + model.B, var=model.y, sos=n, index=index, weights=weights + ) + + elif case == 3: + # with weights, using a Set and a Param + + model.mysosindex = pyo.Set(model.B, initialize={2: [1, 3], 4: [2, 5, 6]}) + model.mysosweights = pyo.Param( + model.A, # model.A or a subset that covers all relevant members + initialize={ + 1: 25.0, + 3: 18.0, + 2: 3, + 5: 7, + 6: 10, # weights define adjacency + }, + ) + + if use_rule: + + def rule_mysos(m, b): + return ( + [m.y[a] for a in m.mysosindex[b]], + [m.mysosweights[a] for a in m.mysosindex[b]], + ) + + model.mysos = pyo.SOSConstraint(model.B, rule=rule_mysos, sos=n) + + else: + model.mysos = pyo.SOSConstraint( + model.B, + var=model.y, + sos=n, + index=model.mysosindex, + weights=model.mysosweights, + ) + + else: + raise NotImplementedError + + return model + + # ************************************************************************* + # ************************************************************************* + + +# ***************************************************************************** + + +class TestSOS_indexed_000(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(0) + + +# ***************************************************************************** + + +class TestSOS_indexed_001(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(1) + + +# ***************************************************************************** + + +class TestSOS_indexed_002(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(2) + + +# ***************************************************************************** + + +class TestSOS_indexed_003(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(3) + + +# ***************************************************************************** + + +class TestSOS_indexed_004(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(4) + + +# ***************************************************************************** + + +class TestSOS_indexed_005(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(5) + + +# ***************************************************************************** + + +class TestSOS_indexed_006(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(6) + + +# ***************************************************************************** + + +class TestSOS_indexed_007(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(7) + + +# ***************************************************************************** + + +class TestSOS_indexed_008(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(8) + + +# ***************************************************************************** + + +class TestSOS_indexed_009(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(9) + + +# ***************************************************************************** + + +class TestSOS_indexed_010(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(10) + + +# ***************************************************************************** + + +class TestSOS_indexed_011(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(11) + + +# ***************************************************************************** + + +class TestSOS_indexed_012(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(12) + + +# ***************************************************************************** + + +class TestSOS_indexed_013(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(13) + + +# ***************************************************************************** + + +class TestSOS_indexed_014(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(14) + + +# ***************************************************************************** + + +class TestSOS_indexed_015(SOSProblem_indexed, unittest.TestCase): + def test(self): + self.do_it(15) + + +# ***************************************************************************** + + +class TestSOS_indexed_016(SOSProblem_indexed, unittest.TestCase): + def test(self): + error_triggered = False + try: + self.do_it(16) + except NotImplementedError: + error_triggered = True + assert error_triggered + + +# ***************************************************************************** + + +class TestSOS_indexed_017(SOSProblem_indexed, unittest.TestCase): + def test(self): + error_triggered = False + try: + self.do_it(17) + except NotImplementedError: + error_triggered = True + assert error_triggered + + +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** +# ***************************************************************************** + + +@unittest.skipIf(not solver_available, "The solver is not available.") +class SOSProblem_indexedmultivar(object): + "Test indexed SOS made up of different Var components." + + def verify(self, model, sos, exp_res, abs_tol, show_output: bool = False): + "Make sure the outcome is as expected." + + opt = pyo.SolverFactory(solver_name) + + problem = model.create_instance() + + opt.solve(problem, tee=show_output) + + assert len(problem.mysos) != 0 + + assert math.isclose(pyo.value(problem.OBJ), exp_res, abs_tol=abs_tol) + + # ************************************************************************* + + def do_it(self, test_number): + # sos, expect. result, absolute tolerance, use rule parameter, case + (sos, exp_res, abs_tol) = self.test_vectors[test_number] + + model = self.set_problem_up(n=sos) + + self.verify(model=model, sos=sos, exp_res=exp_res, abs_tol=abs_tol) + + # ************************************************************************* + + test_vectors = [ + # sos, expected result, absolute tolerance + (1, -7.5000000000e-02, 1e-3), + (2, 1.1, 1e-3), + ] + + def set_problem_up(self, n: int = 1): + "Create the problem." + # abstract model + + model = pyo.AbstractModel() + model.E = pyo.Set(initialize=[1, 2]) + + model.A = pyo.Set(initialize=[1, 2, 3, 5, 6]) + model.B = pyo.Set(initialize=[2, 4]) + model.x = pyo.Var(model.E, domain=pyo.NonNegativeReals, bounds=(0, 40)) + model.y = pyo.Var(model.A, domain=pyo.NonNegativeReals) + + model.param_cx = pyo.Param(model.E, initialize={1: 1, 2: 1.5}) + model.param_cy = pyo.Param( + model.A, initialize={1: 2, 2: 3, 3: -0.1, 5: 0.5, 6: 4} + ) + + def obj_f(m): + return sum(m.param_cx[e] * m.x[e] for e in m.E) + sum( + m.param_cy[a] * m.y[a] for a in m.A + ) + + model.OBJ = pyo.Objective(rule=obj_f) + + def constr_ya_lb(m, a): + return m.y[a] <= 2 + + model.ConstraintYa_lb = pyo.Constraint(model.A, rule=constr_ya_lb) + + def constr_y_lb(m): + return m.x[1] + m.x[2] + m.y[1] + m.y[2] + m.y[5] + m.y[6] >= 0.25 + + model.ConstraintY_lb = pyo.Constraint(rule=constr_y_lb) + + if n == 2: + # force the second SOS2 to have two non-zero variables + def constr_y2_lb(m): + return ( + # m.x[1]+ + # m.y[1]+ + m.y[2] + m.y[5] + m.y[6] + >= 2.1 + ) + + model.ConstraintY2_lb = pyo.Constraint(rule=constr_y2_lb) + + # with weights, using a Set and a Param + + model.mysosindex_x = pyo.Set(model.B, initialize={2: [1], 4: [2]}) + model.mysosindex_y = pyo.Set(model.B, initialize={2: [1, 3], 4: [2, 5, 6]}) + model.mysosweights_x = pyo.Param( + model.E, # model.A or a subset that covers all relevant members + initialize={1: 4, 2: 8}, # weights define adjacency + ) + model.mysosweights_y = pyo.Param( + model.A, # model.A or a subset that covers all relevant members + initialize={ + 1: 25.0, + 3: 18.0, + 2: 3, + 5: 7, + 6: 10, + }, # weights define adjacency + ) + + def rule_mysos(m, b): + var_list = [m.x[e] for e in m.mysosindex_x[b]] + var_list.extend([m.y[a] for a in m.mysosindex_y[b]]) + + weight_list = [m.mysosweights_x[e] for e in m.mysosindex_x[b]] + weight_list.extend([m.mysosweights_y[a] for a in m.mysosindex_y[b]]) + + return (var_list, weight_list) + + model.mysos = pyo.SOSConstraint(model.B, rule=rule_mysos, sos=n) + + return model + + # ************************************************************************* + # ************************************************************************* + + +# ***************************************************************************** + + +class TestSOS_indexedmulti_000(SOSProblem_indexedmultivar, unittest.TestCase): + def test(self): + self.do_it(0) + + +# ***************************************************************************** + + +class TestSOS_indexedmulti_001(SOSProblem_indexedmultivar, unittest.TestCase): + def test(self): + self.do_it(1) + + +# ***************************************************************************** +# ***************************************************************************** + +if __name__ == "__main__": + unittest.main() + +# ***************************************************************************** +# ***************************************************************************** diff --git a/pyomo/core/tests/unit/test_suffix.py b/pyomo/core/tests/unit/test_suffix.py index 72edd63c3da..55e3e9dbc3f 100644 --- a/pyomo/core/tests/unit/test_suffix.py +++ b/pyomo/core/tests/unit/test_suffix.py @@ -16,30 +16,44 @@ import itertools import pickle from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest -from pyomo.core.base.suffix import \ - (active_export_suffix_generator, - export_suffix_generator, - active_import_suffix_generator, - import_suffix_generator, - active_local_suffix_generator, - local_suffix_generator, - active_suffix_generator, - suffix_generator) -from pyomo.environ import ConcreteModel, Suffix, Var, Param, Set, Objective, Constraint, Block, sum_product +from pyomo.core.base.suffix import ( + active_export_suffix_generator, + export_suffix_generator, + active_import_suffix_generator, + import_suffix_generator, + active_local_suffix_generator, + local_suffix_generator, + active_suffix_generator, + suffix_generator, +) +from pyomo.environ import ( + ConcreteModel, + Suffix, + Var, + Param, + Set, + Objective, + Constraint, + Block, + sum_product, +) from io import StringIO -def simple_con_rule(model,i): + +def simple_con_rule(model, i): return model.x[i] == 1 -def simple_obj_rule(model,i): + + +def simple_obj_rule(model, i): return model.x[i] class TestSuffixMethods(unittest.TestCase): - # test __init__ def test_init(self): model = ConcreteModel() @@ -47,8 +61,10 @@ def test_init(self): model.junk = Suffix() model.del_component('junk') - for direction,datatype in itertools.product(Suffix.SuffixDirections,Suffix.SuffixDatatypes): - model.junk = Suffix(direction=direction,datatype=datatype) + for direction, datatype in itertools.product( + Suffix.SuffixDirections, Suffix.SuffixDatatypes + ): + model.junk = Suffix(direction=direction, datatype=datatype) model.del_component('junk') # test import_enabled @@ -88,25 +104,25 @@ def test_set_value_getValue_Var1(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3], dense=True) + model.X = Var([1, 2, 3], dense=True) - model.junk.set_value(model.X,1.0) - model.junk.set_value(model.X[1],2.0) + model.junk.set_value(model.X, 1.0) + model.junk.set_value(model.X[1], 2.0) self.assertEqual(model.junk.get(model.X), None) self.assertEqual(model.junk.get(model.X[1]), 2.0) self.assertEqual(model.junk.get(model.X[2]), 1.0) self.assertEqual(model.junk.get(model.x), None) - model.junk.set_value(model.x,3.0) - model.junk.set_value(model.X[2],3.0) + model.junk.set_value(model.x, 3.0) + model.junk.set_value(model.X[2], 3.0) self.assertEqual(model.junk.get(model.X), None) self.assertEqual(model.junk.get(model.X[1]), 2.0) self.assertEqual(model.junk.get(model.X[2]), 3.0) self.assertEqual(model.junk.get(model.x), 3.0) - model.junk.set_value(model.X,1.0,expand=False) + model.junk.set_value(model.X, 1.0, expand=False) self.assertEqual(model.junk.get(model.X), 1.0) @@ -116,7 +132,7 @@ def test_set_value_getValue_Var2(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3], dense=True) + model.X = Var([1, 2, 3], dense=True) model.X.set_suffix_value('junk', 1.0) model.X[1].set_suffix_value('junk', 2.0) @@ -144,7 +160,7 @@ def test_set_value_getValue_Var3(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3], dense=True) + model.X = Var([1, 2, 3], dense=True) model.X.set_suffix_value(model.junk, 1.0) model.X[1].set_suffix_value(model.junk, 2.0) @@ -172,12 +188,12 @@ def test_set_value_getValue_Constraint1(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.c = Constraint(expr=model.x>=1) - model.C = Constraint([1,2,3], rule=lambda model,i: model.X[i]>=1) + model.X = Var([1, 2, 3]) + model.c = Constraint(expr=model.x >= 1) + model.C = Constraint([1, 2, 3], rule=lambda model, i: model.X[i] >= 1) - model.junk.set_value(model.C,1.0) - model.junk.set_value(model.C[1],2.0) + model.junk.set_value(model.C, 1.0) + model.junk.set_value(model.C[1], 2.0) self.assertEqual(model.junk.get(model.C), None) self.assertEqual(model.junk.get(model.C[1]), 2.0) @@ -185,15 +201,15 @@ def test_set_value_getValue_Constraint1(self): self.assertEqual(model.junk.get(model.c), None) - model.junk.set_value(model.c,3.0) - model.junk.set_value(model.C[2],3.0) + model.junk.set_value(model.c, 3.0) + model.junk.set_value(model.C[2], 3.0) self.assertEqual(model.junk.get(model.C), None) self.assertEqual(model.junk.get(model.C[1]), 2.0) self.assertEqual(model.junk.get(model.C[2]), 3.0) self.assertEqual(model.junk.get(model.c), 3.0) - model.junk.set_value(model.C,1.0,expand=False) + model.junk.set_value(model.C, 1.0, expand=False) self.assertEqual(model.junk.get(model.C), 1.0) @@ -203,9 +219,9 @@ def test_set_value_getValue_Constraint2(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.c = Constraint(expr=model.x>=1) - model.C = Constraint([1,2,3], rule=lambda model,i: model.X[i]>=1) + model.X = Var([1, 2, 3]) + model.c = Constraint(expr=model.x >= 1) + model.C = Constraint([1, 2, 3], rule=lambda model, i: model.X[i] >= 1) model.C.set_suffix_value('junk', 1.0) model.C[1].set_suffix_value('junk', 2.0) @@ -233,9 +249,9 @@ def test_set_value_getValue_Constraint3(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.c = Constraint(expr=model.x>=1) - model.C = Constraint([1,2,3], rule=lambda model,i: model.X[i]>=1) + model.X = Var([1, 2, 3]) + model.c = Constraint(expr=model.x >= 1) + model.C = Constraint([1, 2, 3], rule=lambda model, i: model.X[i] >= 1) model.C.set_suffix_value(model.junk, 1.0) model.C[1].set_suffix_value(model.junk, 2.0) @@ -263,27 +279,27 @@ def test_set_value_getValue_Objective1(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.obj = Objective(expr=sum_product(model.X)+model.x) - model.OBJ = Objective([1,2,3], rule=lambda model,i: model.X[i]) + model.X = Var([1, 2, 3]) + model.obj = Objective(expr=sum_product(model.X) + model.x) + model.OBJ = Objective([1, 2, 3], rule=lambda model, i: model.X[i]) - model.junk.set_value(model.OBJ,1.0) - model.junk.set_value(model.OBJ[1],2.0) + model.junk.set_value(model.OBJ, 1.0) + model.junk.set_value(model.OBJ[1], 2.0) self.assertEqual(model.junk.get(model.OBJ), None) self.assertEqual(model.junk.get(model.OBJ[1]), 2.0) self.assertEqual(model.junk.get(model.OBJ[2]), 1.0) self.assertEqual(model.junk.get(model.obj), None) - model.junk.set_value(model.obj,3.0) - model.junk.set_value(model.OBJ[2],3.0) + model.junk.set_value(model.obj, 3.0) + model.junk.set_value(model.OBJ[2], 3.0) self.assertEqual(model.junk.get(model.OBJ), None) self.assertEqual(model.junk.get(model.OBJ[1]), 2.0) self.assertEqual(model.junk.get(model.OBJ[2]), 3.0) self.assertEqual(model.junk.get(model.obj), 3.0) - model.junk.set_value(model.OBJ,1.0,expand=False) + model.junk.set_value(model.OBJ, 1.0, expand=False) self.assertEqual(model.junk.get(model.OBJ), 1.0) @@ -293,9 +309,9 @@ def test_set_value_getValue_Objective2(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.obj = Objective(expr=sum_product(model.X)+model.x) - model.OBJ = Objective([1,2,3], rule=lambda model,i: model.X[i]) + model.X = Var([1, 2, 3]) + model.obj = Objective(expr=sum_product(model.X) + model.x) + model.OBJ = Objective([1, 2, 3], rule=lambda model, i: model.X[i]) model.OBJ.set_suffix_value('junk', 1.0) model.OBJ[1].set_suffix_value('junk', 2.0) @@ -323,9 +339,9 @@ def test_set_value_getValue_Objective3(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.obj = Objective(expr=sum_product(model.X)+model.x) - model.OBJ = Objective([1,2,3], rule=lambda model,i: model.X[i]) + model.X = Var([1, 2, 3]) + model.obj = Objective(expr=sum_product(model.X) + model.x) + model.OBJ = Objective([1, 2, 3], rule=lambda model, i: model.X[i]) model.OBJ.set_suffix_value(model.junk, 1.0) model.OBJ[1].set_suffix_value(model.junk, 2.0) @@ -353,27 +369,27 @@ def test_set_value_getValue_mutableParam1(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.p = Param(initialize=1.0,mutable=True) - model.P = Param([1,2,3],initialize=1.0,mutable=True) + model.X = Var([1, 2, 3]) + model.p = Param(initialize=1.0, mutable=True) + model.P = Param([1, 2, 3], initialize=1.0, mutable=True) - model.junk.set_value(model.P,1.0) - model.junk.set_value(model.P[1],2.0) + model.junk.set_value(model.P, 1.0) + model.junk.set_value(model.P[1], 2.0) self.assertEqual(model.junk.get(model.P), None) self.assertEqual(model.junk.get(model.P[1]), 2.0) self.assertEqual(model.junk.get(model.P[2]), 1.0) self.assertEqual(model.junk.get(model.p), None) - model.junk.set_value(model.p,3.0) - model.junk.set_value(model.P[2],3.0) + model.junk.set_value(model.p, 3.0) + model.junk.set_value(model.P[2], 3.0) self.assertEqual(model.junk.get(model.P), None) self.assertEqual(model.junk.get(model.P[1]), 2.0) self.assertEqual(model.junk.get(model.P[2]), 3.0) self.assertEqual(model.junk.get(model.p), 3.0) - model.junk.set_value(model.P,1.0,expand=False) + model.junk.set_value(model.P, 1.0, expand=False) self.assertEqual(model.junk.get(model.P), 1.0) @@ -383,9 +399,9 @@ def test_set_value_getValue_mutableParam2(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.p = Param(initialize=1.0,mutable=True) - model.P = Param([1,2,3],initialize=1.0,mutable=True) + model.X = Var([1, 2, 3]) + model.p = Param(initialize=1.0, mutable=True) + model.P = Param([1, 2, 3], initialize=1.0, mutable=True) model.P.set_suffix_value('junk', 1.0) model.P[1].set_suffix_value('junk', 2.0) @@ -413,9 +429,9 @@ def test_set_value_getValue_mutableParam3(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.p = Param(initialize=1.0,mutable=True) - model.P = Param([1,2,3],initialize=1.0,mutable=True) + model.X = Var([1, 2, 3]) + model.p = Param(initialize=1.0, mutable=True) + model.P = Param([1, 2, 3], initialize=1.0, mutable=True) model.P.set_suffix_value(model.junk, 1.0) model.P[1].set_suffix_value(model.junk, 2.0) @@ -443,13 +459,13 @@ def test_set_value_getValue_immutableParam1(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.p = Param(initialize=1.0,mutable=False) - model.P = Param([1,2,3],initialize=1.0,mutable=False) + model.X = Var([1, 2, 3]) + model.p = Param(initialize=1.0, mutable=False) + model.P = Param([1, 2, 3], initialize=1.0, mutable=False) self.assertEqual(model.junk.get(model.P), None) - model.junk.set_value(model.P,1.0,expand=False) + model.junk.set_value(model.P, 1.0, expand=False) self.assertEqual(model.junk.get(model.P), 1.0) @@ -459,9 +475,9 @@ def test_set_value_getValue_immutableParam2(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.p = Param(initialize=1.0,mutable=False) - model.P = Param([1,2,3],initialize=1.0,mutable=False) + model.X = Var([1, 2, 3]) + model.p = Param(initialize=1.0, mutable=False) + model.P = Param([1, 2, 3], initialize=1.0, mutable=False) self.assertEqual(model.P.get_suffix_value('junk'), None) @@ -475,9 +491,9 @@ def test_set_value_getValue_immutableParam3(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.p = Param(initialize=1.0,mutable=False) - model.P = Param([1,2,3],initialize=1.0,mutable=False) + model.X = Var([1, 2, 3]) + model.p = Param(initialize=1.0, mutable=False) + model.P = Param([1, 2, 3], initialize=1.0, mutable=False) self.assertEqual(model.P.get_suffix_value(model.junk), None) @@ -491,27 +507,27 @@ def test_set_value_getValue_Set1(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.s = Set(initialize=[1,2,3]) - model.S = Set([1,2,3],initialize={1:[1,2,3],2:[1,2,3],3:[1,2,3]}) + model.X = Var([1, 2, 3]) + model.s = Set(initialize=[1, 2, 3]) + model.S = Set([1, 2, 3], initialize={1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3]}) - model.junk.set_value(model.S,1.0) - model.junk.set_value(model.S[1],2.0) + model.junk.set_value(model.S, 1.0) + model.junk.set_value(model.S[1], 2.0) self.assertEqual(model.junk.get(model.S), None) self.assertEqual(model.junk.get(model.S[1]), 2.0) self.assertEqual(model.junk.get(model.S[2]), 1.0) self.assertEqual(model.junk.get(model.s), None) - model.junk.set_value(model.s,3.0) - model.junk.set_value(model.S[2],3.0) + model.junk.set_value(model.s, 3.0) + model.junk.set_value(model.S[2], 3.0) self.assertEqual(model.junk.get(model.S), None) self.assertEqual(model.junk.get(model.S[1]), 2.0) self.assertEqual(model.junk.get(model.S[2]), 3.0) self.assertEqual(model.junk.get(model.s), 3.0) - model.junk.set_value(model.S,1.0,expand=False) + model.junk.set_value(model.S, 1.0, expand=False) self.assertEqual(model.junk.get(model.S), 1.0) @@ -521,9 +537,9 @@ def test_set_value_getValue_Set2(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.s = Set(initialize=[1,2,3]) - model.S = Set([1,2,3],initialize={1:[1,2,3],2:[1,2,3],3:[1,2,3]}) + model.X = Var([1, 2, 3]) + model.s = Set(initialize=[1, 2, 3]) + model.S = Set([1, 2, 3], initialize={1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3]}) model.S.set_suffix_value('junk', 1.0) model.S[1].set_suffix_value('junk', 2.0) @@ -551,9 +567,9 @@ def test_set_value_getValue_Set3(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.X = Var([1,2,3]) - model.s = Set(initialize=[1,2,3]) - model.S = Set([1,2,3],initialize={1:[1,2,3],2:[1,2,3],3:[1,2,3]}) + model.X = Var([1, 2, 3]) + model.s = Set(initialize=[1, 2, 3]) + model.S = Set([1, 2, 3], initialize={1: [1, 2, 3], 2: [1, 2, 3], 3: [1, 2, 3]}) model.S.set_suffix_value(model.junk, 1.0) model.S[1].set_suffix_value(model.junk, 2.0) @@ -581,30 +597,30 @@ def test_set_value_getValue_Block1(self): model = ConcreteModel() model.junk = Suffix() model.b = Block() - model.B = Block([1,2,3]) + model.B = Block([1, 2, 3]) - # make sure each BlockData gets construced + # make sure each BlockData gets constructed model.B[1].x = 1 model.B[2].x = 2 model.B[3].x = 3 - model.junk.set_value(model.B,1.0) - model.junk.set_value(model.B[1],2.0) + model.junk.set_value(model.B, 1.0) + model.junk.set_value(model.B[1], 2.0) self.assertEqual(model.junk.get(model.B), None) self.assertEqual(model.junk.get(model.B[1]), 2.0) self.assertEqual(model.junk.get(model.B[2]), 1.0) self.assertEqual(model.junk.get(model.b), None) - model.junk.set_value(model.b,3.0) - model.junk.set_value(model.B[2],3.0) + model.junk.set_value(model.b, 3.0) + model.junk.set_value(model.B[2], 3.0) self.assertEqual(model.junk.get(model.B), None) self.assertEqual(model.junk.get(model.B[1]), 2.0) self.assertEqual(model.junk.get(model.B[2]), 3.0) self.assertEqual(model.junk.get(model.b), 3.0) - model.junk.set_value(model.B,1.0,expand=False) + model.junk.set_value(model.B, 1.0, expand=False) self.assertEqual(model.junk.get(model.B), 1.0) @@ -614,9 +630,9 @@ def test_set_value_getValue_Block2(self): model = ConcreteModel() model.junk = Suffix() model.b = Block() - model.B = Block([1,2,3]) + model.B = Block([1, 2, 3]) - # make sure each BlockData gets construced + # make sure each BlockData gets constructed model.B[1].x = 1 model.B[2].x = 2 model.B[3].x = 3 @@ -647,9 +663,9 @@ def test_set_value_getValue_Block3(self): model = ConcreteModel() model.junk = Suffix() model.b = Block() - model.B = Block([1,2,3]) + model.B = Block([1, 2, 3]) - # make sure each BlockData gets construced + # make sure each BlockData gets constructed model.B[1].x = 1 model.B[2].x = 2 model.B[3].x = 3 @@ -679,11 +695,11 @@ def test_set_all_values1(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.y = Var([1,2,3], dense=True) - model.z = Var([1,2,3], dense=True) + model.y = Var([1, 2, 3], dense=True) + model.z = Var([1, 2, 3], dense=True) - model.junk.set_value(model.y[2],1.0) - model.junk.set_value(model.z,2.0) + model.junk.set_value(model.y[2], 1.0) + model.junk.set_value(model.z, 2.0) self.assertTrue(model.junk.get(model.x) is None) self.assertTrue(model.junk.get(model.y) is None) @@ -706,8 +722,8 @@ def test_set_all_values2(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.y = Var([1,2,3], dense=True) - model.z = Var([1,2,3], dense=True) + model.y = Var([1, 2, 3], dense=True) + model.z = Var([1, 2, 3], dense=True) model.y[2].set_suffix_value('junk', 1.0) model.z.set_suffix_value('junk', 2.0) @@ -733,8 +749,8 @@ def test_set_all_values3(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.y = Var([1,2,3], dense=True) - model.z = Var([1,2,3], dense=True) + model.y = Var([1, 2, 3], dense=True) + model.z = Var([1, 2, 3], dense=True) model.y[2].set_suffix_value(model.junk, 1.0) model.z.set_suffix_value(model.junk, 2.0) @@ -762,29 +778,28 @@ def test_update_values1(self): model.x = Var() model.y = Var() model.z = Var() - model.junk.set_value(model.x,0.0) - self.assertEqual(model.junk.get(model.x),0.0) - self.assertEqual(model.junk.get(model.y),None) - self.assertEqual(model.junk.get(model.z),None) - model.junk.update_values([(model.x,1.0),(model.y,2.0),(model.z,3.0)]) - self.assertEqual(model.junk.get(model.x),1.0) - self.assertEqual(model.junk.get(model.y),2.0) - self.assertEqual(model.junk.get(model.z),3.0) - + model.junk.set_value(model.x, 0.0) + self.assertEqual(model.junk.get(model.x), 0.0) + self.assertEqual(model.junk.get(model.y), None) + self.assertEqual(model.junk.get(model.z), None) + model.junk.update_values([(model.x, 1.0), (model.y, 2.0), (model.z, 3.0)]) + self.assertEqual(model.junk.get(model.x), 1.0) + self.assertEqual(model.junk.get(model.y), 2.0) + self.assertEqual(model.junk.get(model.z), 3.0) # test clear_value def test_clear_value(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.y = Var([1,2,3], dense=True) - model.z = Var([1,2,3], dense=True) + model.y = Var([1, 2, 3], dense=True) + model.z = Var([1, 2, 3], dense=True) - model.junk.set_value(model.x,-1.0) - model.junk.set_value(model.y,-2.0) - model.junk.set_value(model.y[2],1.0) - model.junk.set_value(model.z,2.0) - model.junk.set_value(model.z[1],4.0) + model.junk.set_value(model.x, -1.0) + model.junk.set_value(model.y, -2.0) + model.junk.set_value(model.y[2], 1.0) + model.junk.set_value(model.z, 2.0) + model.junk.set_value(model.z[1], 4.0) self.assertTrue(model.junk.get(model.x) == -1.0) self.assertTrue(model.junk.get(model.y) == None) @@ -811,11 +826,11 @@ def test_clear_all_values(self): model = ConcreteModel() model.junk = Suffix() model.x = Var() - model.y = Var([1,2,3], dense=True) - model.z = Var([1,2,3], dense=True) + model.y = Var([1, 2, 3], dense=True) + model.z = Var([1, 2, 3], dense=True) - model.junk.set_value(model.y[2],1.0) - model.junk.set_value(model.z,2.0) + model.junk.set_value(model.y[2], 1.0) + model.junk.set_value(model.z, 2.0) self.assertTrue(model.junk.get(model.x) is None) self.assertTrue(model.junk.get(model.y) is None) @@ -903,25 +918,27 @@ def test_pprint_verbose(self): model.junk = Suffix() model.s = Block() model.s.b = Block() - model.s.B = Block([1,2,3]) + model.s.B = Block([1, 2, 3]) - model.junk.set_value(model.s.B,1.0) - model.junk.set_value(model.s.B[1],2.0) + model.junk.set_value(model.s.B, 1.0) + model.junk.set_value(model.s.B[1], 2.0) - model.junk.set_value(model.s.b,3.0) - model.junk.set_value(model.s.B[2],3.0) + model.junk.set_value(model.s.b, 3.0) + model.junk.set_value(model.s.B[2], 3.0) output = StringIO() - model.junk.pprint(ostream=output,verbose=True) - model.pprint(ostream=output,verbose=True) + model.junk.pprint(ostream=output, verbose=True) + model.pprint(ostream=output, verbose=True) def test_active_export_suffix_generator(self): model = ConcreteModel() - model.junk_EXPORT_int = Suffix(direction=Suffix.EXPORT,datatype=Suffix.INT) - model.junk_EXPORT_float = Suffix(direction=Suffix.EXPORT,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT_float = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=Suffix.FLOAT) - model.junk_IMPORT = Suffix(direction=Suffix.IMPORT,datatype=None) - model.junk_LOCAL = Suffix(direction=Suffix.LOCAL,datatype=None) + model.junk_EXPORT_int = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT) + model.junk_EXPORT_float = Suffix(direction=Suffix.EXPORT, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT_float = Suffix( + direction=Suffix.IMPORT_EXPORT, datatype=Suffix.FLOAT + ) + model.junk_IMPORT = Suffix(direction=Suffix.IMPORT, datatype=None) + model.junk_LOCAL = Suffix(direction=Suffix.LOCAL, datatype=None) suffixes = dict(active_export_suffix_generator(model)) self.assertTrue('junk_EXPORT_int' in suffixes) @@ -939,7 +956,7 @@ def test_active_export_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_EXPORT_float.activate() - suffixes = dict(active_export_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_export_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_EXPORT_int' not in suffixes) self.assertTrue('junk_EXPORT_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -947,7 +964,7 @@ def test_active_export_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_EXPORT_float.deactivate() - suffixes = dict(active_export_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_export_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_EXPORT_int' not in suffixes) self.assertTrue('junk_EXPORT_float' not in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -956,11 +973,13 @@ def test_active_export_suffix_generator(self): def test_export_suffix_generator(self): model = ConcreteModel() - model.junk_EXPORT_int = Suffix(direction=Suffix.EXPORT,datatype=Suffix.INT) - model.junk_EXPORT_float = Suffix(direction=Suffix.EXPORT,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT_float = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=Suffix.FLOAT) - model.junk_IMPORT = Suffix(direction=Suffix.IMPORT,datatype=None) - model.junk_LOCAL = Suffix(direction=Suffix.LOCAL,datatype=None) + model.junk_EXPORT_int = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT) + model.junk_EXPORT_float = Suffix(direction=Suffix.EXPORT, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT_float = Suffix( + direction=Suffix.IMPORT_EXPORT, datatype=Suffix.FLOAT + ) + model.junk_IMPORT = Suffix(direction=Suffix.IMPORT, datatype=None) + model.junk_LOCAL = Suffix(direction=Suffix.LOCAL, datatype=None) suffixes = dict(export_suffix_generator(model)) self.assertTrue('junk_EXPORT_int' in suffixes) @@ -978,7 +997,7 @@ def test_export_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_EXPORT_float.activate() - suffixes = dict(export_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(export_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_EXPORT_int' not in suffixes) self.assertTrue('junk_EXPORT_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -986,7 +1005,7 @@ def test_export_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_EXPORT_float.deactivate() - suffixes = dict(export_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(export_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_EXPORT_int' not in suffixes) self.assertTrue('junk_EXPORT_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -995,11 +1014,13 @@ def test_export_suffix_generator(self): def test_active_import_suffix_generator(self): model = ConcreteModel() - model.junk_IMPORT_int = Suffix(direction=Suffix.IMPORT,datatype=Suffix.INT) - model.junk_IMPORT_float = Suffix(direction=Suffix.IMPORT,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT_float = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=Suffix.FLOAT) - model.junk_EXPORT = Suffix(direction=Suffix.EXPORT,datatype=None) - model.junk_LOCAL = Suffix(direction=Suffix.LOCAL,datatype=None) + model.junk_IMPORT_int = Suffix(direction=Suffix.IMPORT, datatype=Suffix.INT) + model.junk_IMPORT_float = Suffix(direction=Suffix.IMPORT, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT_float = Suffix( + direction=Suffix.IMPORT_EXPORT, datatype=Suffix.FLOAT + ) + model.junk_EXPORT = Suffix(direction=Suffix.EXPORT, datatype=None) + model.junk_LOCAL = Suffix(direction=Suffix.LOCAL, datatype=None) suffixes = dict(active_import_suffix_generator(model)) self.assertTrue('junk_IMPORT_int' in suffixes) @@ -1017,7 +1038,7 @@ def test_active_import_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_IMPORT_float.activate() - suffixes = dict(active_import_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_import_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_IMPORT_int' not in suffixes) self.assertTrue('junk_IMPORT_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -1025,7 +1046,7 @@ def test_active_import_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_IMPORT_float.deactivate() - suffixes = dict(active_import_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_import_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_IMPORT_int' not in suffixes) self.assertTrue('junk_IMPORT_float' not in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -1034,11 +1055,13 @@ def test_active_import_suffix_generator(self): def test_import_suffix_generator(self): model = ConcreteModel() - model.junk_IMPORT_int = Suffix(direction=Suffix.IMPORT,datatype=Suffix.INT) - model.junk_IMPORT_float = Suffix(direction=Suffix.IMPORT,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT_float = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=Suffix.FLOAT) - model.junk_EXPORT = Suffix(direction=Suffix.EXPORT,datatype=None) - model.junk_LOCAL = Suffix(direction=Suffix.LOCAL,datatype=None) + model.junk_IMPORT_int = Suffix(direction=Suffix.IMPORT, datatype=Suffix.INT) + model.junk_IMPORT_float = Suffix(direction=Suffix.IMPORT, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT_float = Suffix( + direction=Suffix.IMPORT_EXPORT, datatype=Suffix.FLOAT + ) + model.junk_EXPORT = Suffix(direction=Suffix.EXPORT, datatype=None) + model.junk_LOCAL = Suffix(direction=Suffix.LOCAL, datatype=None) suffixes = dict(import_suffix_generator(model)) self.assertTrue('junk_IMPORT_int' in suffixes) @@ -1056,7 +1079,7 @@ def test_import_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_IMPORT_float.activate() - suffixes = dict(import_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(import_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_IMPORT_int' not in suffixes) self.assertTrue('junk_IMPORT_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -1064,7 +1087,7 @@ def test_import_suffix_generator(self): self.assertTrue('junk_LOCAL' not in suffixes) model.junk_IMPORT_float.deactivate() - suffixes = dict(import_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(import_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_IMPORT_int' not in suffixes) self.assertTrue('junk_IMPORT_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT_float' in suffixes) @@ -1073,11 +1096,11 @@ def test_import_suffix_generator(self): def test_active_local_suffix_generator(self): model = ConcreteModel() - model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL,datatype=Suffix.INT) - model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=None) - model.junk_EXPORT = Suffix(direction=Suffix.EXPORT,datatype=None) - model.junk_IMPORT = Suffix(direction=Suffix.IMPORT,datatype=None) + model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL, datatype=Suffix.INT) + model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT, datatype=None) + model.junk_EXPORT = Suffix(direction=Suffix.EXPORT, datatype=None) + model.junk_IMPORT = Suffix(direction=Suffix.IMPORT, datatype=None) suffixes = dict(active_local_suffix_generator(model)) self.assertTrue('junk_LOCAL_int' in suffixes) @@ -1095,7 +1118,7 @@ def test_active_local_suffix_generator(self): self.assertTrue('junk_IMPORT' not in suffixes) model.junk_LOCAL_float.activate() - suffixes = dict(active_local_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_local_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) @@ -1103,7 +1126,7 @@ def test_active_local_suffix_generator(self): self.assertTrue('junk_IMPORT' not in suffixes) model.junk_LOCAL_float.deactivate() - suffixes = dict(active_local_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_local_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' not in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) @@ -1112,11 +1135,11 @@ def test_active_local_suffix_generator(self): def test_local_suffix_generator(self): model = ConcreteModel() - model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL,datatype=Suffix.INT) - model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=None) - model.junk_EXPORT = Suffix(direction=Suffix.EXPORT,datatype=None) - model.junk_IMPORT = Suffix(direction=Suffix.IMPORT,datatype=None) + model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL, datatype=Suffix.INT) + model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT, datatype=None) + model.junk_EXPORT = Suffix(direction=Suffix.EXPORT, datatype=None) + model.junk_IMPORT = Suffix(direction=Suffix.IMPORT, datatype=None) suffixes = dict(local_suffix_generator(model)) self.assertTrue('junk_LOCAL_int' in suffixes) @@ -1134,7 +1157,7 @@ def test_local_suffix_generator(self): self.assertTrue('junk_IMPORT' not in suffixes) model.junk_LOCAL_float.activate() - suffixes = dict(local_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(local_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) @@ -1142,7 +1165,7 @@ def test_local_suffix_generator(self): self.assertTrue('junk_IMPORT' not in suffixes) model.junk_LOCAL_float.deactivate() - suffixes = dict(local_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(local_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) @@ -1151,11 +1174,11 @@ def test_local_suffix_generator(self): def test_active_suffix_generator(self): model = ConcreteModel() - model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL,datatype=Suffix.INT) - model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=None) - model.junk_EXPORT = Suffix(direction=Suffix.EXPORT,datatype=None) - model.junk_IMPORT = Suffix(direction=Suffix.IMPORT,datatype=None) + model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL, datatype=Suffix.INT) + model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT, datatype=None) + model.junk_EXPORT = Suffix(direction=Suffix.EXPORT, datatype=None) + model.junk_IMPORT = Suffix(direction=Suffix.IMPORT, datatype=None) suffixes = dict(active_suffix_generator(model)) self.assertTrue('junk_LOCAL_int' in suffixes) @@ -1173,7 +1196,7 @@ def test_active_suffix_generator(self): self.assertTrue('junk_IMPORT' in suffixes) model.junk_LOCAL_float.activate() - suffixes = dict(active_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) @@ -1181,7 +1204,7 @@ def test_active_suffix_generator(self): self.assertTrue('junk_IMPORT' not in suffixes) model.junk_LOCAL_float.deactivate() - suffixes = dict(active_suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(active_suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' not in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) @@ -1190,11 +1213,11 @@ def test_active_suffix_generator(self): def test_suffix_generator(self): model = ConcreteModel() - model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL,datatype=Suffix.INT) - model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL,datatype=Suffix.FLOAT) - model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT,datatype=None) - model.junk_EXPORT = Suffix(direction=Suffix.EXPORT,datatype=None) - model.junk_IMPORT = Suffix(direction=Suffix.IMPORT,datatype=None) + model.junk_LOCAL_int = Suffix(direction=Suffix.LOCAL, datatype=Suffix.INT) + model.junk_LOCAL_float = Suffix(direction=Suffix.LOCAL, datatype=Suffix.FLOAT) + model.junk_IMPORT_EXPORT = Suffix(direction=Suffix.IMPORT_EXPORT, datatype=None) + model.junk_EXPORT = Suffix(direction=Suffix.EXPORT, datatype=None) + model.junk_IMPORT = Suffix(direction=Suffix.IMPORT, datatype=None) suffixes = dict(suffix_generator(model)) self.assertTrue('junk_LOCAL_int' in suffixes) @@ -1212,7 +1235,7 @@ def test_suffix_generator(self): self.assertTrue('junk_IMPORT' in suffixes) model.junk_LOCAL_float.activate() - suffixes = dict(suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) @@ -1220,329 +1243,329 @@ def test_suffix_generator(self): self.assertTrue('junk_IMPORT' not in suffixes) model.junk_LOCAL_float.deactivate() - suffixes = dict(suffix_generator(model,datatype=Suffix.FLOAT)) + suffixes = dict(suffix_generator(model, datatype=Suffix.FLOAT)) self.assertTrue('junk_LOCAL_int' not in suffixes) self.assertTrue('junk_LOCAL_float' in suffixes) self.assertTrue('junk_IMPORT_EXPORT' not in suffixes) self.assertTrue('junk_EXPORT' not in suffixes) self.assertTrue('junk_IMPORT' not in suffixes) -class TestSuffixCloneUsage(unittest.TestCase): +class TestSuffixCloneUsage(unittest.TestCase): def test_clone_VarElement(self): model = ConcreteModel() model.x = Var() model.junk = Suffix() - self.assertEqual(model.junk.get(model.x),None) - model.junk.set_value(model.x,1.0) - self.assertEqual(model.junk.get(model.x),1.0) + self.assertEqual(model.junk.get(model.x), None) + model.junk.set_value(model.x, 1.0) + self.assertEqual(model.junk.get(model.x), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.x),None) - self.assertEqual(inst.junk.get(inst.x),1.0) + self.assertEqual(inst.junk.get(model.x), None) + self.assertEqual(inst.junk.get(inst.x), 1.0) def test_clone_VarArray(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) + model.x = Var([1, 2, 3], dense=True) model.junk = Suffix() - self.assertEqual(model.junk.get(model.x),None) - self.assertEqual(model.junk.get(model.x[1]),None) - model.junk.set_value(model.x,1.0) - self.assertEqual(model.junk.get(model.x),None) - self.assertEqual(model.junk.get(model.x[1]),1.0) + self.assertEqual(model.junk.get(model.x), None) + self.assertEqual(model.junk.get(model.x[1]), None) + model.junk.set_value(model.x, 1.0) + self.assertEqual(model.junk.get(model.x), None) + self.assertEqual(model.junk.get(model.x[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.x[1]),None) - self.assertEqual(inst.junk.get(inst.x[1]),1.0) + self.assertEqual(inst.junk.get(model.x[1]), None) + self.assertEqual(inst.junk.get(inst.x[1]), 1.0) def test_clone_VarData(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) + model.x = Var([1, 2, 3], dense=True) model.junk = Suffix() - self.assertEqual(model.junk.get(model.x[1]),None) - model.junk.set_value(model.x[1],1.0) - self.assertEqual(model.junk.get(model.x[1]),1.0) + self.assertEqual(model.junk.get(model.x[1]), None) + model.junk.set_value(model.x[1], 1.0) + self.assertEqual(model.junk.get(model.x[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.x[1]),None) - self.assertEqual(inst.junk.get(inst.x[1]),1.0) + self.assertEqual(inst.junk.get(model.x[1]), None) + self.assertEqual(inst.junk.get(inst.x[1]), 1.0) def test_clone_ConstraintElement(self): model = ConcreteModel() model.x = Var() model.c = Constraint(expr=model.x == 1.0) model.junk = Suffix() - self.assertEqual(model.junk.get(model.c),None) - model.junk.set_value(model.c,1.0) - self.assertEqual(model.junk.get(model.c),1.0) + self.assertEqual(model.junk.get(model.c), None) + model.junk.set_value(model.c, 1.0) + self.assertEqual(model.junk.get(model.c), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.c),None) - self.assertEqual(inst.junk.get(inst.c),1.0) + self.assertEqual(inst.junk.get(model.c), None) + self.assertEqual(inst.junk.get(inst.c), 1.0) def test_clone_ConstraintArray(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.c = Constraint([1,2,3],rule=lambda model,i: model.x[i] == 1.0) + model.x = Var([1, 2, 3], dense=True) + model.c = Constraint([1, 2, 3], rule=lambda model, i: model.x[i] == 1.0) model.junk = Suffix() - self.assertEqual(model.junk.get(model.c),None) - self.assertEqual(model.junk.get(model.c[1]),None) - model.junk.set_value(model.c,1.0) - self.assertEqual(model.junk.get(model.c),None) - self.assertEqual(model.junk.get(model.c[1]),1.0) + self.assertEqual(model.junk.get(model.c), None) + self.assertEqual(model.junk.get(model.c[1]), None) + model.junk.set_value(model.c, 1.0) + self.assertEqual(model.junk.get(model.c), None) + self.assertEqual(model.junk.get(model.c[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.c[1]),None) - self.assertEqual(inst.junk.get(inst.c[1]),1.0) + self.assertEqual(inst.junk.get(model.c[1]), None) + self.assertEqual(inst.junk.get(inst.c[1]), 1.0) def test_clone_ConstraintData(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.c = Constraint([1,2,3],rule=lambda model,i: model.x[i] == 1.0) + model.x = Var([1, 2, 3], dense=True) + model.c = Constraint([1, 2, 3], rule=lambda model, i: model.x[i] == 1.0) model.junk = Suffix() - self.assertEqual(model.junk.get(model.c[1]),None) - model.junk.set_value(model.c[1],1.0) - self.assertEqual(model.junk.get(model.c[1]),1.0) + self.assertEqual(model.junk.get(model.c[1]), None) + model.junk.set_value(model.c[1], 1.0) + self.assertEqual(model.junk.get(model.c[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.c[1]),None) - self.assertEqual(inst.junk.get(inst.c[1]),1.0) + self.assertEqual(inst.junk.get(model.c[1]), None) + self.assertEqual(inst.junk.get(inst.c[1]), 1.0) def test_clone_ObjectiveElement(self): model = ConcreteModel() model.x = Var() model.obj = Objective(expr=model.x) model.junk = Suffix() - self.assertEqual(model.junk.get(model.obj),None) - model.junk.set_value(model.obj,1.0) - self.assertEqual(model.junk.get(model.obj),1.0) + self.assertEqual(model.junk.get(model.obj), None) + model.junk.set_value(model.obj, 1.0) + self.assertEqual(model.junk.get(model.obj), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.obj),None) - self.assertEqual(inst.junk.get(inst.obj),1.0) + self.assertEqual(inst.junk.get(model.obj), None) + self.assertEqual(inst.junk.get(inst.obj), 1.0) def test_clone_ObjectiveArray(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.obj = Objective([1,2,3], rule=lambda model,i: model.x[i]) + model.x = Var([1, 2, 3], dense=True) + model.obj = Objective([1, 2, 3], rule=lambda model, i: model.x[i]) model.junk = Suffix() - self.assertEqual(model.junk.get(model.obj),None) - self.assertEqual(model.junk.get(model.obj[1]),None) - model.junk.set_value(model.obj,1.0) - self.assertEqual(model.junk.get(model.obj),None) - self.assertEqual(model.junk.get(model.obj[1]),1.0) + self.assertEqual(model.junk.get(model.obj), None) + self.assertEqual(model.junk.get(model.obj[1]), None) + model.junk.set_value(model.obj, 1.0) + self.assertEqual(model.junk.get(model.obj), None) + self.assertEqual(model.junk.get(model.obj[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.obj[1]),None) - self.assertEqual(inst.junk.get(inst.obj[1]),1.0) + self.assertEqual(inst.junk.get(model.obj[1]), None) + self.assertEqual(inst.junk.get(inst.obj[1]), 1.0) def test_clone_ObjectiveData(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.obj = Objective([1,2,3], rule=lambda model,i: model.x[i]) + model.x = Var([1, 2, 3], dense=True) + model.obj = Objective([1, 2, 3], rule=lambda model, i: model.x[i]) model.junk = Suffix() - self.assertEqual(model.junk.get(model.obj[1]),None) - model.junk.set_value(model.obj[1],1.0) - self.assertEqual(model.junk.get(model.obj[1]),1.0) + self.assertEqual(model.junk.get(model.obj[1]), None) + model.junk.set_value(model.obj[1], 1.0) + self.assertEqual(model.junk.get(model.obj[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.obj[1]),None) - self.assertEqual(inst.junk.get(inst.obj[1]),1.0) + self.assertEqual(inst.junk.get(model.obj[1]), None) + self.assertEqual(inst.junk.get(inst.obj[1]), 1.0) def test_clone_SimpleBlock(self): model = ConcreteModel() model.b = Block() model.junk = Suffix() - self.assertEqual(model.junk.get(model.b),None) - model.junk.set_value(model.b,1.0) - self.assertEqual(model.junk.get(model.b),1.0) + self.assertEqual(model.junk.get(model.b), None) + model.junk.set_value(model.b, 1.0) + self.assertEqual(model.junk.get(model.b), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.b),None) - self.assertEqual(inst.junk.get(inst.b),1.0) + self.assertEqual(inst.junk.get(model.b), None) + self.assertEqual(inst.junk.get(inst.b), 1.0) def test_clone_IndexedBlock(self): model = ConcreteModel() - model.b = Block([1,2,3]) + model.b = Block([1, 2, 3]) model.junk = Suffix() - self.assertEqual(model.junk.get(model.b),None) - self.assertEqual(model.junk.get(model.b[1]),None) - model.junk.set_value(model.b,1.0) - self.assertEqual(model.junk.get(model.b),None) - self.assertEqual(model.junk.get(model.b[1]),1.0) + self.assertEqual(model.junk.get(model.b), None) + self.assertEqual(model.junk.get(model.b[1]), None) + model.junk.set_value(model.b, 1.0) + self.assertEqual(model.junk.get(model.b), None) + self.assertEqual(model.junk.get(model.b[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.b[1]),None) - self.assertEqual(inst.junk.get(inst.b[1]),1.0) + self.assertEqual(inst.junk.get(model.b[1]), None) + self.assertEqual(inst.junk.get(inst.b[1]), 1.0) def test_clone_BlockData(self): model = ConcreteModel() - model.b = Block([1,2,3]) + model.b = Block([1, 2, 3]) model.junk = Suffix() - self.assertEqual(model.junk.get(model.b[1]),None) - model.junk.set_value(model.b[1],1.0) - self.assertEqual(model.junk.get(model.b[1]),1.0) + self.assertEqual(model.junk.get(model.b[1]), None) + model.junk.set_value(model.b[1], 1.0) + self.assertEqual(model.junk.get(model.b[1]), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model.b[1]),None) - self.assertEqual(inst.junk.get(inst.b[1]),1.0) + self.assertEqual(inst.junk.get(model.b[1]), None) + self.assertEqual(inst.junk.get(inst.b[1]), 1.0) def test_clone_model(self): model = ConcreteModel() model.junk = Suffix() - self.assertEqual(model.junk.get(model),None) - model.junk.set_value(model,1.0) - self.assertEqual(model.junk.get(model),1.0) + self.assertEqual(model.junk.get(model), None) + model.junk.set_value(model, 1.0) + self.assertEqual(model.junk.get(model), 1.0) inst = model.clone() - self.assertEqual(inst.junk.get(model),None) - self.assertEqual(inst.junk.get(inst),1.0) + self.assertEqual(inst.junk.get(model), None) + self.assertEqual(inst.junk.get(inst), 1.0) class TestSuffixPickleUsage(unittest.TestCase): - def test_pickle_VarElement(self): model = ConcreteModel() model.x = Var() model.junk = Suffix() - self.assertEqual(model.junk.get(model.x),None) - model.junk.set_value(model.x,1.0) - self.assertEqual(model.junk.get(model.x),1.0) + self.assertEqual(model.junk.get(model.x), None) + model.junk.set_value(model.x, 1.0) + self.assertEqual(model.junk.get(model.x), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.x),None) - self.assertEqual(inst.junk.get(inst.x),1.0) + self.assertEqual(inst.junk.get(model.x), None) + self.assertEqual(inst.junk.get(inst.x), 1.0) def test_pickle_VarArray(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) + model.x = Var([1, 2, 3], dense=True) model.junk = Suffix() - self.assertEqual(model.junk.get(model.x),None) - self.assertEqual(model.junk.get(model.x[1]),None) - model.junk.set_value(model.x,1.0) - self.assertEqual(model.junk.get(model.x),None) - self.assertEqual(model.junk.get(model.x[1]),1.0) + self.assertEqual(model.junk.get(model.x), None) + self.assertEqual(model.junk.get(model.x[1]), None) + model.junk.set_value(model.x, 1.0) + self.assertEqual(model.junk.get(model.x), None) + self.assertEqual(model.junk.get(model.x[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.x[1]),None) - self.assertEqual(inst.junk.get(inst.x[1]),1.0) + self.assertEqual(inst.junk.get(model.x[1]), None) + self.assertEqual(inst.junk.get(inst.x[1]), 1.0) def test_pickle_VarData(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) + model.x = Var([1, 2, 3], dense=True) model.junk = Suffix() - self.assertEqual(model.junk.get(model.x[1]),None) - model.junk.set_value(model.x[1],1.0) - self.assertEqual(model.junk.get(model.x[1]),1.0) + self.assertEqual(model.junk.get(model.x[1]), None) + model.junk.set_value(model.x[1], 1.0) + self.assertEqual(model.junk.get(model.x[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.x[1]),None) - self.assertEqual(inst.junk.get(inst.x[1]),1.0) + self.assertEqual(inst.junk.get(model.x[1]), None) + self.assertEqual(inst.junk.get(inst.x[1]), 1.0) def test_pickle_ConstraintElement(self): model = ConcreteModel() model.x = Var() model.c = Constraint(expr=model.x == 1.0) model.junk = Suffix() - self.assertEqual(model.junk.get(model.c),None) - model.junk.set_value(model.c,1.0) - self.assertEqual(model.junk.get(model.c),1.0) + self.assertEqual(model.junk.get(model.c), None) + model.junk.set_value(model.c, 1.0) + self.assertEqual(model.junk.get(model.c), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.c),None) - self.assertEqual(inst.junk.get(inst.c),1.0) + self.assertEqual(inst.junk.get(model.c), None) + self.assertEqual(inst.junk.get(inst.c), 1.0) def test_pickle_ConstraintArray(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.c = Constraint([1,2,3],rule=simple_con_rule) + model.x = Var([1, 2, 3], dense=True) + model.c = Constraint([1, 2, 3], rule=simple_con_rule) model.junk = Suffix() - self.assertEqual(model.junk.get(model.c),None) - self.assertEqual(model.junk.get(model.c[1]),None) - model.junk.set_value(model.c,1.0) - self.assertEqual(model.junk.get(model.c),None) - self.assertEqual(model.junk.get(model.c[1]),1.0) + self.assertEqual(model.junk.get(model.c), None) + self.assertEqual(model.junk.get(model.c[1]), None) + model.junk.set_value(model.c, 1.0) + self.assertEqual(model.junk.get(model.c), None) + self.assertEqual(model.junk.get(model.c[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.c[1]),None) - self.assertEqual(inst.junk.get(inst.c[1]),1.0) + self.assertEqual(inst.junk.get(model.c[1]), None) + self.assertEqual(inst.junk.get(inst.c[1]), 1.0) def test_pickle_ConstraintData(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.c = Constraint([1,2,3],rule=simple_con_rule) + model.x = Var([1, 2, 3], dense=True) + model.c = Constraint([1, 2, 3], rule=simple_con_rule) model.junk = Suffix() - self.assertEqual(model.junk.get(model.c[1]),None) - model.junk.set_value(model.c[1],1.0) - self.assertEqual(model.junk.get(model.c[1]),1.0) + self.assertEqual(model.junk.get(model.c[1]), None) + model.junk.set_value(model.c[1], 1.0) + self.assertEqual(model.junk.get(model.c[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.c[1]),None) - self.assertEqual(inst.junk.get(inst.c[1]),1.0) + self.assertEqual(inst.junk.get(model.c[1]), None) + self.assertEqual(inst.junk.get(inst.c[1]), 1.0) def test_pickle_ObjectiveElement(self): model = ConcreteModel() model.x = Var() model.obj = Objective(expr=model.x) model.junk = Suffix() - self.assertEqual(model.junk.get(model.obj),None) - model.junk.set_value(model.obj,1.0) - self.assertEqual(model.junk.get(model.obj),1.0) + self.assertEqual(model.junk.get(model.obj), None) + model.junk.set_value(model.obj, 1.0) + self.assertEqual(model.junk.get(model.obj), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.obj),None) - self.assertEqual(inst.junk.get(inst.obj),1.0) + self.assertEqual(inst.junk.get(model.obj), None) + self.assertEqual(inst.junk.get(inst.obj), 1.0) def test_pickle_ObjectiveArray(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.obj = Objective([1,2,3],rule=simple_obj_rule) + model.x = Var([1, 2, 3], dense=True) + model.obj = Objective([1, 2, 3], rule=simple_obj_rule) model.junk = Suffix() - self.assertEqual(model.junk.get(model.obj),None) - self.assertEqual(model.junk.get(model.obj[1]),None) - model.junk.set_value(model.obj,1.0) - self.assertEqual(model.junk.get(model.obj),None) - self.assertEqual(model.junk.get(model.obj[1]),1.0) + self.assertEqual(model.junk.get(model.obj), None) + self.assertEqual(model.junk.get(model.obj[1]), None) + model.junk.set_value(model.obj, 1.0) + self.assertEqual(model.junk.get(model.obj), None) + self.assertEqual(model.junk.get(model.obj[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.obj[1]),None) - self.assertEqual(inst.junk.get(inst.obj[1]),1.0) + self.assertEqual(inst.junk.get(model.obj[1]), None) + self.assertEqual(inst.junk.get(inst.obj[1]), 1.0) def test_pickle_ObjectiveData(self): model = ConcreteModel() - model.x = Var([1,2,3], dense=True) - model.obj = Objective([1,2,3],rule=simple_obj_rule) + model.x = Var([1, 2, 3], dense=True) + model.obj = Objective([1, 2, 3], rule=simple_obj_rule) model.junk = Suffix() - self.assertEqual(model.junk.get(model.obj[1]),None) - model.junk.set_value(model.obj[1],1.0) - self.assertEqual(model.junk.get(model.obj[1]),1.0) + self.assertEqual(model.junk.get(model.obj[1]), None) + model.junk.set_value(model.obj[1], 1.0) + self.assertEqual(model.junk.get(model.obj[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.obj[1]),None) - self.assertEqual(inst.junk.get(inst.obj[1]),1.0) + self.assertEqual(inst.junk.get(model.obj[1]), None) + self.assertEqual(inst.junk.get(inst.obj[1]), 1.0) def test_pickle_SimpleBlock(self): model = ConcreteModel() model.b = Block() model.junk = Suffix() - self.assertEqual(model.junk.get(model.b),None) - model.junk.set_value(model.b,1.0) - self.assertEqual(model.junk.get(model.b),1.0) + self.assertEqual(model.junk.get(model.b), None) + model.junk.set_value(model.b, 1.0) + self.assertEqual(model.junk.get(model.b), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.b),None) - self.assertEqual(inst.junk.get(inst.b),1.0) + self.assertEqual(inst.junk.get(model.b), None) + self.assertEqual(inst.junk.get(inst.b), 1.0) def test_pickle_IndexedBlock(self): model = ConcreteModel() - model.b = Block([1,2,3]) + model.b = Block([1, 2, 3]) model.junk = Suffix() - self.assertEqual(model.junk.get(model.b),None) - self.assertEqual(model.junk.get(model.b[1]),None) - model.junk.set_value(model.b,1.0) - self.assertEqual(model.junk.get(model.b),None) - self.assertEqual(model.junk.get(model.b[1]),1.0) + self.assertEqual(model.junk.get(model.b), None) + self.assertEqual(model.junk.get(model.b[1]), None) + model.junk.set_value(model.b, 1.0) + self.assertEqual(model.junk.get(model.b), None) + self.assertEqual(model.junk.get(model.b[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.b[1]),None) - self.assertEqual(inst.junk.get(inst.b[1]),1.0) + self.assertEqual(inst.junk.get(model.b[1]), None) + self.assertEqual(inst.junk.get(inst.b[1]), 1.0) def test_pickle_BlockData(self): model = ConcreteModel() - model.b = Block([1,2,3]) + model.b = Block([1, 2, 3]) model.junk = Suffix() - self.assertEqual(model.junk.get(model.b[1]),None) - model.junk.set_value(model.b[1],1.0) - self.assertEqual(model.junk.get(model.b[1]),1.0) + self.assertEqual(model.junk.get(model.b[1]), None) + model.junk.set_value(model.b[1], 1.0) + self.assertEqual(model.junk.get(model.b[1]), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model.b[1]),None) - self.assertEqual(inst.junk.get(inst.b[1]),1.0) + self.assertEqual(inst.junk.get(model.b[1]), None) + self.assertEqual(inst.junk.get(inst.b[1]), 1.0) def test_pickle_model(self): model = ConcreteModel() model.junk = Suffix() - self.assertEqual(model.junk.get(model),None) - model.junk.set_value(model,1.0) - self.assertEqual(model.junk.get(model),1.0) + self.assertEqual(model.junk.get(model), None) + model.junk.set_value(model, 1.0) + self.assertEqual(model.junk.get(model), 1.0) inst = pickle.loads(pickle.dumps(model)) - self.assertEqual(inst.junk.get(model),None) - self.assertEqual(inst.junk.get(inst),1.0) + self.assertEqual(inst.junk.get(model), None) + self.assertEqual(inst.junk.get(inst), 1.0) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_symbol_map.py b/pyomo/core/tests/unit/test_symbol_map.py index dc7302f9641..5f6416e2c8d 100644 --- a/pyomo/core/tests/unit/test_symbol_map.py +++ b/pyomo/core/tests/unit/test_symbol_map.py @@ -12,28 +12,128 @@ import pyomo.common.unittest as unittest from pyomo.core.expr.symbol_map import SymbolMap from pyomo.core.kernel.variable import variable +from pyomo.environ import ConcreteModel, Var -class TestSymbolMap(unittest.TestCase): +class TestSymbolMap(unittest.TestCase): def test_no_labeler(self): s = SymbolMap() v = variable() self.assertEqual(str(v), s.getSymbol(v)) + s = SymbolMap() + m = ConcreteModel() + m.x = Var() + self.assertEqual('x', s.createSymbol(m.x)) + + s = SymbolMap() + m.y = Var([1, 2, 3]) + s.createSymbols(m.y.values()) + self.assertEqual(s.bySymbol, {'y[1]': m.y[1], 'y[2]': m.y[2], 'y[3]': m.y[3]}) + self.assertEqual( + s.byObject, {id(m.y[1]): 'y[1]', id(m.y[2]): 'y[2]', id(m.y[3]): 'y[3]'} + ) + + def test_default_labeler(self): + s = SymbolMap(lambda x: "_" + str(x)) + v = variable() + self.assertEqual("_" + str(v), s.getSymbol(v)) + + s = SymbolMap(lambda x: "_" + str(x)) + m = ConcreteModel() + m.x = Var() + self.assertEqual('_x', s.createSymbol(m.x)) + + s = SymbolMap(lambda x: "_" + str(x)) + m.y = Var([1, 2, 3]) + s.createSymbols(m.y.values()) + self.assertEqual( + s.bySymbol, {'_y[1]': m.y[1], '_y[2]': m.y[2], '_y[3]': m.y[3]} + ) + self.assertEqual( + s.byObject, {id(m.y[1]): '_y[1]', id(m.y[2]): '_y[2]', id(m.y[3]): '_y[3]'} + ) + + def test_custom_labeler(self): + labeler = lambda x, y: "^" + str(x) + y + + s = SymbolMap(lambda x: "_" + str(x)) + v = variable() + self.assertEqual("^" + str(v) + "~", s.getSymbol(v, labeler, "~")) + + s = SymbolMap(lambda x: "_" + str(x)) + m = ConcreteModel() + m.x = Var() + self.assertEqual('^x~', s.createSymbol(m.x, labeler, "~")) + + s = SymbolMap(lambda x: "_" + str(x)) + m.y = Var([1, 2, 3]) + s.createSymbols(m.y.values(), labeler, "~") + self.assertEqual( + s.bySymbol, {'^y[1]~': m.y[1], '^y[2]~': m.y[2], '^y[3]~': m.y[3]} + ) + self.assertEqual( + s.byObject, + {id(m.y[1]): '^y[1]~', id(m.y[2]): '^y[2]~', id(m.y[3]): '^y[3]~'}, + ) + def test_existing_alias(self): s = SymbolMap() v1 = variable() s.alias(v1, "v") - self.assertIs(s.aliases["v"](), v1) + self.assertIs(s.aliases["v"], v1) v2 = variable() with self.assertRaises(RuntimeError): s.alias(v2, "v") s.alias(v1, "A") - self.assertIs(s.aliases["v"](), v1) - self.assertIs(s.aliases["A"](), v1) + self.assertIs(s.aliases["v"], v1) + self.assertIs(s.aliases["A"], v1) s.alias(v1, "A") - self.assertIs(s.aliases["v"](), v1) - self.assertIs(s.aliases["A"](), v1) + self.assertIs(s.aliases["v"], v1) + self.assertIs(s.aliases["A"], v1) + + def test_add_symbol(self): + s = SymbolMap() + m = ConcreteModel() + m.x = Var() + m.y = Var([1, 2, 3]) + s.addSymbol(m.x, 'x') + self.assertEqual(s.bySymbol, {'x': m.x}) + self.assertEqual(s.byObject, {id(m.x): 'x'}) + with self.assertRaisesRegex( + RuntimeError, r'SymbolMap.addSymbol\(\): duplicate symbol.' + ): + s.addSymbol(m.y, 'x') + s = SymbolMap() + s.addSymbol(m.x, 'x') + with self.assertRaisesRegex( + RuntimeError, r'SymbolMap.addSymbol\(\): duplicate object.' + ): + s.addSymbol(m.x, 'y') + + def test_add_symbols(self): + m = ConcreteModel() + m.x = Var() + m.y = Var([1, 2, 3]) + + s = SymbolMap() + s.addSymbols((m.y[i], str(i)) for i in (1, 2, 3)) + self.assertEqual(s.bySymbol, {'1': m.y[1], '2': m.y[2], '3': m.y[3]}) + self.assertEqual( + s.byObject, {id(m.y[1]): '1', id(m.y[2]): '2', id(m.y[3]): '3'} + ) + with self.assertRaisesRegex( + RuntimeError, r'SymbolMap.addSymbols\(\): duplicate symbol.' + ): + s.addSymbols([(m.y, '1')]) + + s = SymbolMap() + s.addSymbols((m.y[i], str(i)) for i in (1, 2, 3)) + with self.assertRaisesRegex( + RuntimeError, r'SymbolMap.addSymbols\(\): duplicate object.' + ): + s.addSymbols([(m.y[2], 'x')]) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_symbolic.py b/pyomo/core/tests/unit/test_symbolic.py index b37ab5fd9c6..bbac4599363 100644 --- a/pyomo/core/tests/unit/test_symbolic.py +++ b/pyomo/core/tests/unit/test_symbolic.py @@ -12,28 +12,54 @@ import pyomo.common.unittest as unittest from pyomo.common.errors import DeveloperError, NondifferentiableError -from pyomo.environ import (ConcreteModel, Var, Param, Set, NonNegativeReals, - Expression, RangeSet, sin, cos, tan, sinh, cosh, - tanh, asin, acos, atan, asinh, acosh, atanh, - log, log10, exp, sqrt, ceil, floor) +from pyomo.environ import ( + ConcreteModel, + Var, + Param, + Set, + NonNegativeReals, + Expression, + RangeSet, + sin, + cos, + tan, + sinh, + cosh, + tanh, + asin, + acos, + atan, + asinh, + acosh, + atanh, + log, + log10, + exp, + sqrt, + ceil, + floor, +) from pyomo.core.expr.calculus.diff_with_sympy import differentiate -from pyomo.core.expr.sympy_tools import PyomoSympyBimap, sympy_available, sympy2pyomo_expression +from pyomo.core.expr.sympy_tools import ( + PyomoSympyBimap, + sympy_available, + sympy2pyomo_expression, +) def s(e): - return str(e).replace(' ','').replace('1.0','1').replace('2.0','2') + return str(e).replace(' ', '').replace('1.0', '1').replace('2.0', '2') -@unittest.skipIf( not sympy_available, - "Symbolic derivatives require the sympy package" ) -class SymbolicDerivatives(unittest.TestCase): +@unittest.skipIf(not sympy_available, "Symbolic derivatives require the sympy package") +class SymbolicDerivatives(unittest.TestCase): def test_single_derivatives1(self): m = ConcreteModel() m.x = Var() m.y = Var() e = differentiate(1, wrt=m.x) - self.assertIn(type(e), (int,float)) + self.assertIn(type(e), (int, float)) self.assertEqual(e, 0) def test_single_derivatives2(self): @@ -42,7 +68,7 @@ def test_single_derivatives2(self): m.y = Var() e = differentiate(m.x, wrt=m.x) - self.assertIn(type(e), (int,float)) + self.assertIn(type(e), (int, float)) self.assertEqual(e, 1) def test_single_derivatives3(self): @@ -52,7 +78,7 @@ def test_single_derivatives3(self): e = differentiate(m.x**2, wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(2.*m.x)) + self.assertEqual(s(e), s(2.0 * m.x)) def test_single_derivatives4(self): m = ConcreteModel() @@ -60,7 +86,7 @@ def test_single_derivatives4(self): m.y = Var() e = differentiate(m.y, wrt=m.x) - self.assertIn(type(e), (int,float)) + self.assertIn(type(e), (int, float)) self.assertEqual(e, 0) def test_single_derivatives5(self): @@ -68,7 +94,7 @@ def test_single_derivatives5(self): m.x = Var() m.y = Var() - e = differentiate(m.x*m.y, wrt=m.x) + e = differentiate(m.x * m.y, wrt=m.x) self.assertIs(e, m.y) self.assertEqual(s(e), s(m.y)) @@ -77,18 +103,18 @@ def test_single_derivatives6(self): m.x = Var() m.y = Var() - e = differentiate(m.x**2*m.y, wrt=m.x) + e = differentiate(m.x**2 * m.y, wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(2.*m.x*m.y)) + self.assertEqual(s(e), s(2.0 * m.x * m.y)) def test_single_derivatives7(self): m = ConcreteModel() m.x = Var() m.y = Var() - e = differentiate(m.x**2/m.y, wrt=m.x) + e = differentiate(m.x**2 / m.y, wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(2.*m.x*m.y**-1.)) + self.assertEqual(s(e), s(2.0 * m.x * m.y**-1.0)) def test_single_derivative_list(self): m = ConcreteModel() @@ -99,14 +125,14 @@ def test_single_derivative_list(self): self.assertIs(type(e), list) self.assertEqual(len(e), 1) e = e[0] - self.assertIn(type(e), (int,float)) + self.assertIn(type(e), (int, float)) self.assertEqual(e, 0) e = differentiate(m.x, wrt_list=[m.x]) self.assertIs(type(e), list) self.assertEqual(len(e), 1) e = e[0] - self.assertIn(type(e), (int,float)) + self.assertIn(type(e), (int, float)) self.assertEqual(e, 1) e = differentiate(m.x**2, wrt_list=[m.x]) @@ -114,38 +140,37 @@ def test_single_derivative_list(self): self.assertEqual(len(e), 1) e = e[0] self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(2.*m.x)) + self.assertEqual(s(e), s(2.0 * m.x)) e = differentiate(m.y, wrt_list=[m.x]) self.assertIs(type(e), list) self.assertEqual(len(e), 1) e = e[0] - self.assertIn(type(e), (int,float)) + self.assertIn(type(e), (int, float)) self.assertEqual(e, 0) - e = differentiate(m.x*m.y, wrt_list=[m.x]) + e = differentiate(m.x * m.y, wrt_list=[m.x]) self.assertIs(type(e), list) self.assertEqual(len(e), 1) e = e[0] self.assertIs(e, m.y) self.assertEqual(s(e), s(m.y)) - e = differentiate(m.x**2*m.y, wrt_list=[m.x]) + e = differentiate(m.x**2 * m.y, wrt_list=[m.x]) self.assertIs(type(e), list) self.assertEqual(len(e), 1) e = e[0] self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(2.*m.x*m.y)) + self.assertEqual(s(e), s(2.0 * m.x * m.y)) - e = differentiate(m.x**2/m.y, wrt_list=[m.x]) + e = differentiate(m.x**2 / m.y, wrt_list=[m.x]) self.assertIs(type(e), list) self.assertEqual(len(e), 1) e = e[0] self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(2.*m.x*m.y**-1.)) - + self.assertEqual(s(e), s(2.0 * m.x * m.y**-1.0)) - def test_trig_fuctions(self): + def test_trig_functions(self): m = ConcreteModel() m.x = Var() @@ -155,11 +180,11 @@ def test_trig_fuctions(self): e = differentiate(cos(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(-1.0*sin(m.x))) + self.assertEqual(s(e), s(-1.0 * sin(m.x))) e = differentiate(tan(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(1.+tan(m.x)**2.)) + self.assertEqual(s(e), s(1.0 + tan(m.x) ** 2.0)) e = differentiate(sinh(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) @@ -171,37 +196,35 @@ def test_trig_fuctions(self): e = differentiate(tanh(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(1.0-tanh(m.x)**2.0)) - + self.assertEqual(s(e), s(1.0 - tanh(m.x) ** 2.0)) e = differentiate(asin(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s((1.0 + (-1.0)*m.x**2.)**-0.5)) + self.assertEqual(s(e), s((1.0 + (-1.0) * m.x**2.0) ** -0.5)) e = differentiate(acos(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(-1.*(1.+ (-1.0)*m.x**2.)**-0.5)) + self.assertEqual(s(e), s(-1.0 * (1.0 + (-1.0) * m.x**2.0) ** -0.5)) e = differentiate(atan(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s((1.+m.x**2.)**-1.)) + self.assertEqual(s(e), s((1.0 + m.x**2.0) ** -1.0)) e = differentiate(asinh(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s((1.+m.x**2)**-.5)) + self.assertEqual(s(e), s((1.0 + m.x**2) ** -0.5)) e = differentiate(acosh(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) # Older versions of sympy: - if s(e) == s((-1.+m.x**2.)**-.5): + if s(e) == s((-1.0 + m.x**2.0) ** -0.5): pass else: - self.assertEqual(s(e), s((1.+m.x)**-.5*(-1.+m.x)**-.5)) + self.assertEqual(s(e), s((1.0 + m.x) ** -0.5 * (-1.0 + m.x) ** -0.5)) e = differentiate(atanh(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s((1.+(-1.0)*m.x**2.)**-1.)) - + self.assertEqual(s(e), s((1.0 + (-1.0) * m.x**2.0) ** -1.0)) def test_intrinsic_functions1(self): m = ConcreteModel() @@ -209,7 +232,7 @@ def test_intrinsic_functions1(self): e = differentiate(log(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(m.x**-1.)) + self.assertEqual(s(e), s(m.x**-1.0)) def test_intrinsic_functions2(self): m = ConcreteModel() @@ -224,7 +247,7 @@ def test_intrinsic_functions3(self): m.x = Var() e = differentiate(exp(2 * m.x), wrt=m.x) - self.assertEqual(s(e), s(2. * exp(2. * m.x))) + self.assertEqual(s(e), s(2.0 * exp(2.0 * m.x))) def test_intrinsic_functions4(self): m = ConcreteModel() @@ -232,7 +255,7 @@ def test_intrinsic_functions4(self): e = differentiate(log10(m.x), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(m.x**-1.0 * (1.0/log(10)))) + self.assertEqual(s(e), s(m.x**-1.0 * (1.0 / log(10)))) def test_intrinsic_functions5(self): m = ConcreteModel() @@ -240,7 +263,7 @@ def test_intrinsic_functions5(self): e = differentiate(log10(log10(m.x)), wrt=m.x) self.assertTrue(e.is_expression_type()) - self.assertEqual(s(e), s(m.x**-1.0 * (1.0/log(10)) * log(m.x)**-1.0)) + self.assertEqual(s(e), s(m.x**-1.0 * (1.0 / log(10)) * log(m.x) ** -1.0)) def test_sqrt_function(self): m = ConcreteModel() @@ -265,7 +288,7 @@ def test_param(self): m.x = Var() m.p = Param(mutable=True, initialize=5) - e = differentiate(m.p*m.x, wrt=m.x) + e = differentiate(m.p * m.x, wrt=m.x) self.assertIs(type(e), float) self.assertEqual(e, 5.0) @@ -276,6 +299,7 @@ def test_Expression_component(self): def y_rule(m, s): return m.x[s] * 2 + m.y = Expression(m.s, rule=y_rule) expr = 1 - m.y['A'] ** 2 @@ -299,7 +323,7 @@ def test_jacobian(self): idxMap[i] = len(jacs) jacs.append(m.x[i]) - expr = m.x[1]+m.x[2]*m.x[3]**2 + expr = m.x[1] + m.x[2] * m.x[3] ** 2 ans = differentiate(expr, wrt_list=jacs) self.assertEqual(len(ans), len(m.I)) @@ -318,23 +342,23 @@ def test_hessian(self): hessian = [] for i in m.I: for j in m.I: - idxMap[i,j] = len(hessian) + idxMap[i, j] = len(hessian) hessian.append((m.x[i], m.x[j])) - expr = m.x[1]+m.x[2]*m.x[3]**2 + expr = m.x[1] + m.x[2] * m.x[3] ** 2 ans = differentiate(expr, wrt_list=hessian) - self.assertEqual(len(ans), len(m.I)**2) + self.assertEqual(len(ans), len(m.I) ** 2) for i in m.I: for j in m.I: - self.assertEqual(str(ans[idxMap[i,j]]), str(ans[idxMap[j,i]])) + self.assertEqual(str(ans[idxMap[i, j]]), str(ans[idxMap[j, i]])) # 0 calculated by sympy - self.assertEqual(str(ans[idxMap[1,1]]), "0.0") - self.assertEqual(str(ans[idxMap[2,2]]), "0.0") - self.assertEqual(str(ans[idxMap[3,3]]), "2.0*x[2]") + self.assertEqual(str(ans[idxMap[1, 1]]), "0.0") + self.assertEqual(str(ans[idxMap[2, 2]]), "0.0") + self.assertEqual(str(ans[idxMap[3, 3]]), "2.0*x[2]") # 0 calculated by bypassing sympy - self.assertEqual(str(ans[idxMap[4,4]]), "0.0") - self.assertEqual(str(ans[idxMap[2,3]]), "2.0*x[3]") + self.assertEqual(str(ans[idxMap[4, 4]]), "0.0") + self.assertEqual(str(ans[idxMap[2, 3]]), "2.0*x[3]") def test_nondifferentiable(self): m = ConcreteModel() @@ -342,15 +366,19 @@ def test_nondifferentiable(self): self.assertRaisesRegex( NondifferentiableError, - "The sub-expression '.*' is not differentiable " - "with respect to .*foo", - differentiate, ceil(m.foo), wrt=m.foo) + "The sub-expression '.*' is not differentiable with respect to .*foo", + differentiate, + ceil(m.foo), + wrt=m.foo, + ) self.assertRaisesRegex( NondifferentiableError, - "The sub-expression '.*' is not differentiable " - "with respect to .*foo", - differentiate, floor(m.foo), wrt=m.foo) + "The sub-expression '.*' is not differentiable with respect to .*foo", + differentiate, + floor(m.foo), + wrt=m.foo, + ) def test_errors(self): m = ConcreteModel() @@ -359,16 +387,27 @@ def test_errors(self): self.assertRaisesRegex( ValueError, "Must specify exactly one of wrt and wrt_list", - differentiate, m.x, wrt=m.x, wrt_list=[m.x]) + differentiate, + m.x, + wrt=m.x, + wrt_list=[m.x], + ) obj_map = PyomoSympyBimap() + class bogus(object): def __init__(self): - self._args = (obj_map.getSympySymbol(m.x),) + self.args = (obj_map.getSympySymbol(m.x),) + self.func = type(self) + self.assertRaisesRegex( DeveloperError, - "sympy expression .* not found in the operator map", - sympy2pyomo_expression, bogus(), obj_map) + "sympy expression .*bogus'> not found in the operator map", + sympy2pyomo_expression, + bogus(), + obj_map, + normalize_whitespace=True, + ) class SymbolicDerivatives_importTest(unittest.TestCase): @@ -379,5 +418,6 @@ def test_sympy_avail_flag(self): with self.assertRaises(ImportError): import sympy + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_taylor_series.py b/pyomo/core/tests/unit/test_taylor_series.py index 02ecddea2fb..d4fe5291b2d 100644 --- a/pyomo/core/tests/unit/test_taylor_series.py +++ b/pyomo/core/tests/unit/test_taylor_series.py @@ -12,8 +12,7 @@ import pyomo.common.unittest as unittest import pyomo.environ as pyo from pyomo.core.expr.taylor_series import taylor_series_expansion -from pyomo.core.expr.current import polynomial_degree -from pyomo.core.expr.calculus.derivatives import differentiate +from pyomo.core.expr import polynomial_degree, differentiate class TestTaylorSeries(unittest.TestCase): @@ -21,10 +20,12 @@ def test_first_order_taylor_series(self): m = pyo.ConcreteModel() m.x = pyo.Var() m.x.value = 1 - exprs_to_test = [m.x**2, pyo.exp(m.x), (m.x + 2)**2] + exprs_to_test = [m.x**2, pyo.exp(m.x), (m.x + 2) ** 2] for e in exprs_to_test: tsa = taylor_series_expansion(e) - self.assertAlmostEqual(pyo.differentiate(e, wrt=m.x), pyo.differentiate(tsa, wrt=m.x)) + self.assertAlmostEqual( + pyo.differentiate(e, wrt=m.x), pyo.differentiate(tsa, wrt=m.x) + ) self.assertAlmostEqual(pyo.value(e), pyo.value(tsa)) self.assertEqual(polynomial_degree(tsa), 1) @@ -34,7 +35,9 @@ def test_higher_order_taylor_series(self): m.y = pyo.Var(initialize=1.5) e = m.x * m.y - tse = taylor_series_expansion(e, diff_mode=differentiate.Modes.reverse_symbolic, order=2) + tse = taylor_series_expansion( + e, diff_mode=differentiate.Modes.reverse_symbolic, order=2 + ) for _x in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]: for _y in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]: m.x.value = _x @@ -42,15 +45,19 @@ def test_higher_order_taylor_series(self): self.assertAlmostEqual(pyo.value(e), pyo.value(tse)) e = m.x**3 + m.y**3 - tse = taylor_series_expansion(e, diff_mode=differentiate.Modes.reverse_symbolic, order=3) + tse = taylor_series_expansion( + e, diff_mode=differentiate.Modes.reverse_symbolic, order=3 + ) for _x in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]: for _y in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]: m.x.value = _x m.y.value = _y self.assertAlmostEqual(pyo.value(e), pyo.value(tse)) - e = (m.x*m.y)**2 - tse = taylor_series_expansion(e, diff_mode=differentiate.Modes.reverse_symbolic, order=4) + e = (m.x * m.y) ** 2 + tse = taylor_series_expansion( + e, diff_mode=differentiate.Modes.reverse_symbolic, order=4 + ) for _x in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]: for _y in [-2, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, 2]: m.x.value = _x diff --git a/pyomo/core/tests/unit/test_template_expr.py b/pyomo/core/tests/unit/test_template_expr.py index 6113cf2fe7a..069acc907cc 100644 --- a/pyomo/core/tests/unit/test_template_expr.py +++ b/pyomo/core/tests/unit/test_template_expr.py @@ -13,13 +13,21 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - ConcreteModel, AbstractModel, RangeSet, Param, Var, Set, value, + ConcreteModel, + AbstractModel, + RangeSet, + Param, + Var, + Set, + value, Integers, ) -import pyomo.core.expr.current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.expr.template_expr import ( IndexTemplate, TemplateExpressionError, + CallExpression, + GetAttrExpression, _GetItemIndexer, resolve_template, templatize_constraint, @@ -32,12 +40,12 @@ class TestTemplateExpressions(unittest.TestCase): def setUp(self): self.m = m = ConcreteModel() - m.I = RangeSet(1,9) - m.J = RangeSet(10,19) - m.x = Var(m.I, initialize=lambda m,i: i+1) - m.P = Param(m.I, initialize=lambda m,i: 10-i, mutable=True) - m.p = Param(m.I, m.J, initialize=lambda m,i,j: 100*i+j) - m.s = Set(m.I, initialize=lambda m,i:range(i)) + m.I = RangeSet(1, 9) + m.J = RangeSet(10, 19) + m.x = Var(m.I, initialize=lambda m, i: i + 1) + m.P = Param(m.I, initialize=lambda m, i: 10 - i, mutable=True) + m.p = Param(m.I, m.J, initialize=lambda m, i, j: 100 * i + j) + m.s = Set(m.I, initialize=lambda m, i: range(i)) def test_nonTemplates(self): m = self.m @@ -49,8 +57,8 @@ def test_IndexTemplate(self): m = self.m i = IndexTemplate(m.I) with self.assertRaisesRegex( - TemplateExpressionError, - "Evaluating uninitialized IndexTemplate"): + TemplateExpressionError, "Evaluating uninitialized IndexTemplate" + ): value(i) self.assertEqual(str(i), "{I}") @@ -63,7 +71,7 @@ def test_template_scalar(self): m = self.m t = IndexTemplate(m.I) e = m.x[t] - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.args, (m.x, t)) self.assertFalse(e.is_constant()) self.assertFalse(e.is_fixed()) @@ -76,9 +84,9 @@ def test_template_scalar(self): self.assertIs(resolve_template(e), m.x[5]) t.set_value() - e = m.p[t,10] - self.assertIs(type(e), EXPR.GetItemExpression) - self.assertEqual(e.args, (m.p,t,10)) + e = m.p[t, 10] + self.assertIs(type(e), EXPR.NPV_Numeric_GetItemExpression) + self.assertEqual(e.args, (m.p, t, 10)) self.assertFalse(e.is_constant()) self.assertTrue(e.is_fixed()) self.assertEqual(e.polynomial_degree(), 0) @@ -87,12 +95,12 @@ def test_template_scalar(self): v = e() self.assertIn(type(v), (int, float)) self.assertEqual(v, 510) - self.assertIs(resolve_template(e), m.p[5,10]) + self.assertIs(resolve_template(e), m.p[5, 10]) t.set_value() - e = m.p[5,t] - self.assertIs(type(e), EXPR.GetItemExpression) - self.assertEqual(e.args, (m.p,5,t)) + e = m.p[5, t] + self.assertIs(type(e), EXPR.NPV_Numeric_GetItemExpression) + self.assertEqual(e.args, (m.p, 5, t)) self.assertFalse(e.is_constant()) self.assertTrue(e.is_fixed()) self.assertEqual(e.polynomial_degree(), 0) @@ -101,18 +109,27 @@ def test_template_scalar(self): v = e() self.assertIn(type(v), (int, float)) self.assertEqual(v, 510) - self.assertIs(resolve_template(e), m.p[5,10]) + self.assertIs(resolve_template(e), m.p[5, 10]) t.set_value() def test_template_scalar_with_set(self): m = self.m t = IndexTemplate(m.I) e = m.s[t] - self.assertIs(type(e), EXPR.GetItemExpression) - self.assertEqual(e.args, (m.s,t)) + self.assertIs(type(e), EXPR.NPV_Structural_GetItemExpression) + self.assertEqual(e.args, (m.s, t)) self.assertFalse(e.is_constant()) self.assertTrue(e.is_fixed()) - self.assertEqual(e.polynomial_degree(), 0) + # Generic templates generate CallExpression objects + ee = e.polynomial_degree() + self.assertIs(type(ee), EXPR.CallExpression) + t.set_value(1) + # Note that structural expressions do not implement polynomial_degree + with self.assertRaisesRegex( + AttributeError, + "'_InsertionOrderSetData' object has " "no attribute 'polynomial_degree'", + ): + e.polynomial_degree() self.assertEqual(str(e), "s[{I}]") t.set_value(5) v = e() @@ -123,8 +140,8 @@ def test_template_scalar_with_set(self): def test_template_operation(self): m = self.m t = IndexTemplate(m.I) - e = m.x[t+m.P[5]] - self.assertIs(type(e), EXPR.GetItemExpression) + e = m.x[t + m.P[5]] + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) @@ -135,13 +152,13 @@ def test_template_operation(self): def test_nested_template_operation(self): m = self.m t = IndexTemplate(m.I) - e = m.x[t+m.P[t+1]] - self.assertIs(type(e), EXPR.GetItemExpression) + e = m.x[t + m.P[t + 1]] + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) self.assertEqual(str(e), "x[{I} + P[{I} + 1]]") @@ -149,19 +166,21 @@ def test_nested_template_operation(self): def test_block_templates(self): m = ConcreteModel() m.T = RangeSet(3) + @m.Block(m.T) def b(b, i): b.x = Var(initialize=i) @b.Block(m.T) def bb(bb, j): - bb.I =RangeSet(i*j) - bb.y = Var(bb.I, initialize=lambda m,i:i) + bb.I = RangeSet(i * j) + bb.y = Var(bb.I, initialize=lambda m, i: i) + t = IndexTemplate(m.T) e = m.b[t].x - self.assertIs(type(e), EXPR.GetAttrExpression) + self.assertIs(type(e), EXPR.Numeric_GetAttrExpression) self.assertEqual(e.nargs(), 2) - self.assertIs(type(e.arg(0)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(0)), EXPR.NPV_Structural_GetItemExpression) self.assertIs(e.arg(0).arg(0), m.b) self.assertEqual(e.arg(0).nargs(), 2) self.assertIs(e.arg(0).arg(1), t) @@ -174,7 +193,7 @@ def bb(bb, j): t.set_value() e = m.b[t].bb[t].y[1] - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.nargs(), 2) self.assertEqual(str(e), "b[{T}].bb[{T}].y[1]") t.set_value(2) @@ -187,61 +206,61 @@ def test_template_name(self): m = self.m t = IndexTemplate(m.I) - E = m.x[t+m.P[1+t]] + m.P[1] - self.assertEqual( str(E), "x[{I} + P[1 + {I}]] + P[1]") + E = m.x[t + m.P[1 + t]] + m.P[1] + self.assertEqual(str(E), "x[{I} + P[1 + {I}]] + P[1]") - E = m.x[t+m.P[1+t]**2.]**2. + m.P[1] - self.assertEqual( str(E), "x[{I} + P[1 + {I}]**2.0]**2.0 + P[1]") + E = m.x[t + m.P[1 + t] ** 2.0] ** 2.0 + m.P[1] + self.assertEqual(str(E), "x[{I} + P[1 + {I}]**2.0]**2.0 + P[1]") def test_template_in_expression(self): m = self.m t = IndexTemplate(m.I) - E = m.x[t+m.P[t+1]] + m.P[1] + E = m.x[t + m.P[t + 1]] + m.P[1] self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) - E = m.P[1] + m.x[t+m.P[t+1]] + E = m.P[1] + m.x[t + m.P[t + 1]] self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(1) - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) - E = m.x[t+m.P[t+1]] + 1 + E = m.x[t + m.P[t + 1]] + 1 self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) - E = 1 + m.x[t+m.P[t+1]] + E = 1 + m.x[t + m.P[t + 1]] self.assertIsInstance(E, EXPR.SumExpressionBase) - e = E.arg(E.nargs()-1) - self.assertIs(type(e), EXPR.GetItemExpression) + e = E.arg(E.nargs() - 1) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) @@ -249,75 +268,67 @@ def test_clone(self): m = self.m t = IndexTemplate(m.I) - E_base = m.x[t+m.P[t+1]] + m.P[1] + E_base = m.x[t + m.P[t + 1]] + m.P[1] E = E_base.clone() self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertIsNot(e, E_base.arg(0)) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(1).arg(1)), - type(E_base.arg(0).arg(1).arg(1))) - self.assertIsNot(e.arg(1).arg(1), - E_base.arg(0).arg(1).arg(1)) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), type(E_base.arg(0).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), E_base.arg(0).arg(1).arg(1)) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) - E_base = m.P[1] + m.x[t+m.P[t+1]] + E_base = m.P[1] + m.x[t + m.P[t + 1]] E = E_base.clone() self.assertTrue(isinstance(E, EXPR.SumExpressionBase)) e = E.arg(1) - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertIsNot(e, E_base.arg(0)) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(1).arg(1)), - type(E_base.arg(1).arg(1).arg(1))) - self.assertIsNot(e.arg(1).arg(1), - E_base.arg(1).arg(1).arg(1)) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), type(E_base.arg(1).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), E_base.arg(1).arg(1).arg(1)) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) - E_base = m.x[t+m.P[t+1]] + 1 + E_base = m.x[t + m.P[t + 1]] + 1 E = E_base.clone() self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(0) - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertIsNot(e, E_base.arg(0)) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(1).arg(1)), - type(E_base.arg(0).arg(1).arg(1))) - self.assertIsNot(e.arg(1).arg(1), - E_base.arg(0).arg(1).arg(1)) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), type(E_base.arg(0).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), E_base.arg(0).arg(1).arg(1)) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) - E_base = 1 + m.x[t+m.P[t+1]] + E_base = 1 + m.x[t + m.P[t + 1]] E = E_base.clone() self.assertIsInstance(E, EXPR.SumExpressionBase) e = E.arg(-1) - self.assertIs(type(e), EXPR.GetItemExpression) + self.assertIs(type(e), EXPR.Numeric_GetItemExpression) self.assertIsNot(e, E_base.arg(0)) self.assertEqual(e.nargs(), 2) self.assertIs(e.arg(0), m.x) self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(0), t) - self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression) - self.assertIs(type(e.arg(1).arg(1)), - type(E_base.arg(-1).arg(1).arg(1))) - self.assertIsNot(e.arg(1).arg(1), - E_base.arg(-1).arg(1).arg(1)) + self.assertIs(type(e.arg(1).arg(1)), EXPR.NPV_Numeric_GetItemExpression) + self.assertIs(type(e.arg(1).arg(1)), type(E_base.arg(-1).arg(1).arg(1))) + self.assertIsNot(e.arg(1).arg(1), E_base.arg(-1).arg(1).arg(1)) self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase) self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t) @@ -327,6 +338,7 @@ def test_simple_rule(self): m = ConcreteModel() m.I = RangeSet(3) m.x = Var(m.I) + @m.Constraint(m.I) def c(m, i): return m.x[i] <= 0 @@ -336,7 +348,7 @@ def c(m, i): self.assertIs(indices[0]._set, m.I) self.assertEqual(str(template), "x[_1] <= 0") # Test that the RangeSet iterator was put back - self.assertEqual(list(m.I), list(range(1,4))) + self.assertEqual(list(m.I), list(range(1, 4))) # Evaluate the template indices[0].set_value(2) self.assertEqual(str(resolve_template(template)), 'x[2] <= 0') @@ -344,6 +356,7 @@ def c(m, i): def test_simple_rule_nonfinite_set(self): m = ConcreteModel() m.x = Var(Integers, dense=False) + @m.Constraint(Integers) def c(m, i): return m.x[i] <= 0 @@ -360,6 +373,7 @@ def test_simple_abstract_rule(self): m = AbstractModel() m.I = RangeSet(3) m.x = Var(m.I) + @m.Constraint(m.I) def c(m, i): return m.x[i] <= 0 @@ -368,8 +382,7 @@ def c(m, i): # have been constructed (otherwise accessing the Set raises an # exception) - with self.assertRaisesRegex( - ValueError, ".*has not been constructed"): + with self.assertRaisesRegex(ValueError, ".*has not been constructed"): template, indices = templatize_constraint(m.c) m.I.construct() @@ -383,39 +396,36 @@ def test_simple_sum_rule(self): m = ConcreteModel() m.I = RangeSet(3) m.J = RangeSet(3) - m.x = Var(m.I,m.J) + m.x = Var(m.I, m.J) + @m.Constraint(m.I) def c(m, i): - return sum(m.x[i,j] for j in m.J) <= 0 + return sum(m.x[i, j] for j in m.J) <= 0 template, indices = templatize_constraint(m.c) self.assertEqual(len(indices), 1) self.assertIs(indices[0]._set, m.I) self.assertEqual( template.to_string(verbose=True), - "templatesum(getitem(x, _1, _2), iter(_2, J)) <= 0" - ) - self.assertEqual( - str(template), - "SUM(x[_1,_2] for _2 in J) <= 0" + "templatesum(getitem(x, _1, _2), iter(_2, J)) <= 0", ) + self.assertEqual(str(template), "SUM(x[_1,_2] for _2 in J) <= 0") # Evaluate the template indices[0].set_value(2) self.assertEqual( - str(resolve_template(template)), - 'x[2,1] + x[2,2] + x[2,3] <= 0' + str(resolve_template(template)), 'x[2,1] + x[2,2] + x[2,3] <= 0' ) def test_nested_sum_rule(self): m = ConcreteModel() m.I = RangeSet(3) m.J = RangeSet(3) - m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) - m.x = Var(m.I,m.J,[10,20,30]) + m.K = Set(m.I, initialize={1: [10], 2: [10, 20], 3: [10, 20, 30]}) + m.x = Var(m.I, m.J, [10, 20, 30]) + @m.Constraint() def c(m): - return sum( sum(m.x[i,j,k] for k in m.K[i]) - for j in m.J for i in m.I) <= 0 + return sum(sum(m.x[i, j, k] for k in m.K[i]) for j in m.J for i in m.I) <= 0 template, indices = templatize_constraint(m.c) self.assertEqual(len(indices), 0) @@ -423,12 +433,11 @@ def c(m): template.to_string(verbose=True), "templatesum(" "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), " - "iter(_1, J), iter(_2, I)) <= 0" + "iter(_1, J), iter(_2, I)) <= 0", ) self.assertEqual( str(template), - "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) " - "for _1 in J for _2 in I) <= 0" + "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) for _1 in J for _2 in I) <= 0", ) # Evaluate the template self.assertEqual( @@ -441,20 +450,20 @@ def c(m): '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' '(x[1,3,10]) + ' '(x[2,3,10] + x[2,3,20]) + ' - '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0', ) def test_multidim_nested_sum_rule(self): m = ConcreteModel() m.I = RangeSet(3) m.J = RangeSet(3) - m.JI = m.J*m.I - m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) - m.x = Var(m.I,m.J,[10,20,30]) + m.JI = m.J * m.I + m.K = Set(m.I, initialize={1: [10], 2: [10, 20], 3: [10, 20, 30]}) + m.x = Var(m.I, m.J, [10, 20, 30]) + @m.Constraint() def c(m): - return sum( sum(m.x[i,j,k] for k in m.K[i]) - for j,i in m.JI) <= 0 + return sum(sum(m.x[i, j, k] for k in m.K[i]) for j, i in m.JI) <= 0 template, indices = templatize_constraint(m.c) self.assertEqual(len(indices), 0) @@ -462,12 +471,11 @@ def c(m): template.to_string(verbose=True), "templatesum(" "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), " - "iter(_1, _2, JI)) <= 0" + "iter(_1, _2, JI)) <= 0", ) self.assertEqual( str(template), - "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) " - "for _1, _2 in JI) <= 0" + "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) for _1, _2 in JI) <= 0", ) # Evaluate the template self.assertEqual( @@ -480,20 +488,20 @@ def c(m): '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' '(x[1,3,10]) + ' '(x[2,3,10] + x[2,3,20]) + ' - '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0', ) def test_multidim_nested_sum_rule(self): m = ConcreteModel() m.I = RangeSet(3) m.J = RangeSet(3) - m.JI = m.J*m.I - m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) - m.x = Var(m.I,m.J,[10,20,30]) + m.JI = m.J * m.I + m.K = Set(m.I, initialize={1: [10], 2: [10, 20], 3: [10, 20, 30]}) + m.x = Var(m.I, m.J, [10, 20, 30]) + @m.Constraint() def c(m): - return sum( sum(m.x[i,j,k] for k in m.K[i]) - for j,i in m.JI) <= 0 + return sum(sum(m.x[i, j, k] for k in m.K[i]) for j, i in m.JI) <= 0 template, indices = templatize_constraint(m.c) self.assertEqual(len(indices), 0) @@ -501,12 +509,11 @@ def c(m): template.to_string(verbose=True), "templatesum(" "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), " - "iter(_1, _2, JI)) <= 0" + "iter(_1, _2, JI)) <= 0", ) self.assertEqual( str(template), - "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) " - "for _1, _2 in JI) <= 0" + "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) for _1, _2 in JI) <= 0", ) # Evaluate the template self.assertEqual( @@ -519,23 +526,24 @@ def c(m): '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' '(x[1,3,10]) + ' '(x[2,3,10] + x[2,3,20]) + ' - '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0', ) def test_multidim_nested_getattr_sum_rule(self): m = ConcreteModel() m.I = RangeSet(3) m.J = RangeSet(3) - m.JI = m.J*m.I - m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]}) - m.x = Var(m.I,m.J,[10,20,30]) + m.JI = m.J * m.I + m.K = Set(m.I, initialize={1: [10], 2: [10, 20], 3: [10, 20, 30]}) + m.x = Var(m.I, m.J, [10, 20, 30]) + @m.Block(m.I) def b(b, i): - b.K = RangeSet(10, 10*i, 10) + b.K = RangeSet(10, 10 * i, 10) + @m.Constraint() def c(m): - return sum( sum(m.x[i,j,k] for k in m.b[i].K) - for j,i in m.JI) <= 0 + return sum(sum(m.x[i, j, k] for k in m.b[i].K) for j, i in m.JI) <= 0 template, indices = templatize_constraint(m.c) self.assertEqual(len(indices), 0) @@ -544,12 +552,11 @@ def c(m): "templatesum(" "templatesum(getitem(x, _2, _1, _3), " "iter(_3, getattr(getitem(b, _2), 'K'))), " - "iter(_1, _2, JI)) <= 0" + "iter(_1, _2, JI)) <= 0", ) self.assertEqual( str(template), - "SUM(SUM(x[_2,_1,_3] for _3 in b[_2].K) " - "for _1, _2 in JI) <= 0" + "SUM(SUM(x[_2,_1,_3] for _3 in b[_2].K) for _1, _2 in JI) <= 0", ) # Evaluate the template self.assertEqual( @@ -562,102 +569,157 @@ def c(m): '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + ' '(x[1,3,10]) + ' '(x[2,3,10] + x[2,3,20]) + ' - '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0' + '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0', ) + def test_eval_getattr(self): + m = ConcreteModel() + m.T = RangeSet(3) -class TestTemplateSubstitution(unittest.TestCase): + @m.Block(m.T) + def b(b, i): + b.x = Var(initialize=i) + @b.Block(m.T) + def bb(bb, j): + bb.I = RangeSet(i * j) + bb.y = Var(bb.I, initialize=lambda m, i: i) + + t = IndexTemplate(m.T) + e = m.b[t].x + + with self.assertRaisesRegex( + ValueError, r'Evaluating uninitialized IndexTemplate \({T}\)' + ): + value(e()) + with self.assertRaisesRegex( + KeyError, r"Index 'None' is not valid for indexed component 'b'" + ): + self.assertIsNone(e(exception=False)) + with self.assertRaisesRegex( + KeyError, r"Index 'None' is not valid for indexed component 'b'" + ): + self.assertIsNone(e(False)) + + t.set_value(2) + self.assertEqual(e(), 2) + + f = e.set_value(5) + self.assertIs(f.__class__, CallExpression) + self.assertEqual(f._kwds, ()) + self.assertEqual(len(f._args_), 2) + self.assertIs(f._args_[0].__class__, EXPR.Structural_GetAttrExpression) + self.assertIs(f._args_[0]._args_[0], e) + self.assertEqual(f._args_[1], 5) + + self.assertEqual(value(m.b[2].x), 2) + f() + self.assertEqual(value(m.b[2].x), 5) + + f = e.set_value('a', skip_validation=True) + self.assertIs(f.__class__, CallExpression) + self.assertEqual(f._kwds, ('skip_validation',)) + self.assertEqual(len(f._args_), 3) + self.assertIs(f._args_[0].__class__, EXPR.Structural_GetAttrExpression) + self.assertIs(f._args_[0]._args_[0], e) + self.assertEqual(f._args_[1], 'a') + self.assertEqual(f._args_[2], True) + + f() + self.assertEqual(value(m.b[2].x), 'a') + + +class TestTemplateSubstitution(unittest.TestCase): def setUp(self): self.m = m = ConcreteModel() m.TRAY = Set(initialize=range(5)) - m.TIME = Set(bounds=(0,10), initialize=range(10)) + m.TIME = Set(bounds=(0, 10), initialize=range(10)) m.y = Var(initialize=1) - m.x = Var(m.TIME, m.TRAY, initialize=lambda _m,i,j: i) - m.dxdt = Var(m.TIME, m.TRAY, initialize=lambda _m,i,j: 2*i) + m.x = Var(m.TIME, m.TRAY, initialize=lambda _m, i, j: i) + m.dxdt = Var(m.TIME, m.TRAY, initialize=lambda _m, i, j: 2 * i) def test_simple_substitute_param(self): - def diffeq(m,t, i): - return m.dxdt[t, i] == t*m.x[t, i-1]**2 + m.y**2 + \ - m.x[t, i+1] + m.x[t, i-1] + def diffeq(m, t, i): + return ( + m.dxdt[t, i] + == t * m.x[t, i - 1] ** 2 + m.y**2 + m.x[t, i + 1] + m.x[t, i - 1] + ) m = self.m t = IndexTemplate(m.TIME) e = diffeq(m, t, 2) - self.assertTrue( isinstance(e, EXPR.ExpressionBase) ) + self.assertTrue(isinstance(e, EXPR.RelationalExpression)) _map = {} - E = substitute_template_expression( - e, substitute_getitem_with_param, _map ) - self.assertIsNot(e,E) - - self.assertEqual( len(_map), 3 ) - - idx1 = _GetItemIndexer( m.x[t,1] ) - self.assertEqual( idx1.nargs(), 2 ) - self.assertIs( idx1.base, m.x ) - self.assertIs( idx1.arg(0), t ) - self.assertEqual( idx1.arg(1), 1 ) - self.assertIn( idx1, _map ) - - idx2 = _GetItemIndexer( m.dxdt[t,2] ) - self.assertEqual( idx2.nargs(), 2 ) - self.assertIs( idx2.base, m.dxdt ) - self.assertIs( idx2.arg(0), t ) - self.assertEqual( idx2.arg(1), 2 ) - self.assertIn( idx2, _map ) - - idx3 = _GetItemIndexer( m.x[t,3] ) - self.assertEqual( idx3.nargs(), 2 ) - self.assertIs( idx3.base, m.x ) - self.assertIs( idx3.arg(0), t ) - self.assertEqual( idx3.arg(1), 3 ) - self.assertIn( idx3, _map ) - - self.assertFalse( idx1 == idx2 ) - self.assertFalse( idx1 == idx3 ) - self.assertFalse( idx2 == idx3 ) - - idx4 = _GetItemIndexer( m.x[t,2] ) - self.assertNotIn( idx4, _map ) + E = substitute_template_expression(e, substitute_getitem_with_param, _map) + self.assertIsNot(e, E) + + self.assertEqual(len(_map), 3) + + idx1 = _GetItemIndexer(m.x[t, 1]) + self.assertEqual(idx1.nargs(), 2) + self.assertIs(idx1.base, m.x) + self.assertIs(idx1.arg(0), t) + self.assertEqual(idx1.arg(1), 1) + self.assertIn(idx1, _map) + + idx2 = _GetItemIndexer(m.dxdt[t, 2]) + self.assertEqual(idx2.nargs(), 2) + self.assertIs(idx2.base, m.dxdt) + self.assertIs(idx2.arg(0), t) + self.assertEqual(idx2.arg(1), 2) + self.assertIn(idx2, _map) + + idx3 = _GetItemIndexer(m.x[t, 3]) + self.assertEqual(idx3.nargs(), 2) + self.assertIs(idx3.base, m.x) + self.assertIs(idx3.arg(0), t) + self.assertEqual(idx3.arg(1), 3) + self.assertIn(idx3, _map) + + self.assertFalse(idx1 == idx2) + self.assertFalse(idx1 == idx3) + self.assertFalse(idx2 == idx3) + + idx4 = _GetItemIndexer(m.x[t, 2]) + self.assertNotIn(idx4, _map) t.set_value(5) - self.assertEqual((e.arg(0)(), e.arg(1)()), (10,136)) + self.assertEqual((e.arg(0)(), e.arg(1)()), (10, 136)) self.assertEqual( str(E), "'dxdt[{TIME},2]' == " - "{TIME}*'x[{TIME},1]'**2 + y**2 + 'x[{TIME},3]' + 'x[{TIME},1]'" ) - - _map[idx1].set_value( value(m.x[value(t), 1]) ) - _map[idx2].set_value( value(m.dxdt[value(t), 2]) ) - _map[idx3].set_value( value(m.x[value(t), 3]) ) - self.assertEqual((E.arg(0)(), E.arg(1)()), (10,136)) + "{TIME}*'x[{TIME},1]'**2 + y**2 + 'x[{TIME},3]' + 'x[{TIME},1]'", + ) - _map[idx1].set_value( 12 ) - _map[idx2].set_value( 34 ) - self.assertEqual((E.arg(0)(), E.arg(1)()), (34,738)) + _map[idx1].set_value(value(m.x[value(t), 1])) + _map[idx2].set_value(value(m.dxdt[value(t), 2])) + _map[idx3].set_value(value(m.x[value(t), 3])) + self.assertEqual((E.arg(0)(), E.arg(1)()), (10, 136)) + _map[idx1].set_value(12) + _map[idx2].set_value(34) + self.assertEqual((E.arg(0)(), E.arg(1)()), (34, 738)) def test_simple_substitute_index(self): - def diffeq(m,t, i): + def diffeq(m, t, i): return m.dxdt[t, i] == t * m.x[t, i] ** 2 + m.y**2 m = self.m t = IndexTemplate(m.TIME) - e = diffeq(m,t, 2) + e = diffeq(m, t, 2) t.set_value(5) - self.assertTrue( isinstance(e, EXPR.ExpressionBase) ) - self.assertEqual((e.arg(0)(), e.arg(1)()), (10,126)) + self.assertTrue(isinstance(e, EXPR.RelationalExpression)) + self.assertEqual((e.arg(0)(), e.arg(1)()), (10, 126)) E = substitute_template_expression(e, substitute_template_with_value) - self.assertIsNot(e,E) + self.assertIsNot(e, E) + + self.assertEqual(str(E), 'dxdt[5,2] == 5.0*x[5,2]**2 + y**2') - self.assertEqual( - str(E), - 'dxdt[5,2] == 5.0*x[5,2]**2 + y**2' ) if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_units.py b/pyomo/core/tests/unit/test_units.py index 3da42927405..8ec83fe1a73 100644 --- a/pyomo/core/tests/unit/test_units.py +++ b/pyomo/core/tests/unit/test_units.py @@ -17,31 +17,62 @@ from pyomo.common.errors import PyomoException import pyomo.common.unittest as unittest from pyomo.environ import ( - ConcreteModel, Var, Param, Set, Constraint, Objective, Expression, - ExternalFunction, value, sum_product, maximize, units, - log, log10, exp, sqrt, cos, sin, tan, asin, acos, atan, cosh, sinh, - tanh, asinh, acosh, atanh, ceil, floor, - BooleanVar + ConcreteModel, + Var, + Param, + Set, + Constraint, + Objective, + Expression, + ExternalFunction, + value, + sum_product, + maximize, + units, + log, + log10, + exp, + sqrt, + cos, + sin, + tan, + asin, + acos, + atan, + cosh, + sinh, + tanh, + asinh, + acosh, + atanh, + ceil, + floor, + BooleanVar, ) from pyomo.common.log import LoggingIntercept -from pyomo.util.check_units import ( - assert_units_consistent, check_units_equivalent, -) +from pyomo.util.check_units import assert_units_consistent, check_units_equivalent from pyomo.core.expr import inequality from pyomo.core.expr.numvalue import NumericConstant -import pyomo.core.expr.current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.base.units_container import ( - pint_available, pint_module, _DeferredUnitsSingleton, - InconsistentUnitsError, UnitsError, PyomoUnitsContainer, as_quantity + pint_available, + pint_module, + _DeferredUnitsSingleton, + InconsistentUnitsError, + UnitsError, + PyomoUnitsContainer, + as_quantity, + _PyomoUnit, ) from io import StringIO + def python_callback_function(arg1, arg2): return 42.0 + @unittest.skipIf(not pint_available, 'Testing units requires pint') class TestPyomoUnit(unittest.TestCase): - def test_container_constructor(self): # Custom pint registry: um0 = PyomoUnitsContainer(None) @@ -51,16 +82,16 @@ def test_container_constructor(self): self.assertIsNotNone(um1.pint_registry) self.assertIsNotNone(um1._pint_dimensionless) with self.assertRaisesRegex( - ValueError, - 'Cannot operate with Unit and Unit of different registries'): + ValueError, 'Cannot operate with Unit and Unit of different registries' + ): self.assertEqual(um1._pint_dimensionless, units._pint_dimensionless) self.assertIsNot(um1.pint_registry, units.pint_registry) um2 = PyomoUnitsContainer(pint_module.UnitRegistry()) self.assertIsNotNone(um2.pint_registry) self.assertIsNotNone(um2._pint_dimensionless) with self.assertRaisesRegex( - ValueError, - 'Cannot operate with Unit and Unit of different registries'): + ValueError, 'Cannot operate with Unit and Unit of different registries' + ): self.assertEqual(um2._pint_dimensionless, units._pint_dimensionless) self.assertIsNot(um2.pint_registry, units.pint_registry) self.assertIsNot(um2.pint_registry, um1.pint_registry) @@ -91,7 +122,7 @@ def test_PyomoUnit_NumericValueMethods(self): self.assertEqual(kg.is_named_expression_type(), False) self.assertEqual(kg.is_expression_type(), False) self.assertEqual(kg.is_component_type(), False) - self.assertEqual(kg.is_relational(), False) + self.assertEqual(kg.is_expression_type(EXPR.ExpressionType.RELATIONAL), False) self.assertEqual(kg.is_indexed(), False) self.assertEqual(kg._compute_polynomial_degree(None), 0) @@ -122,18 +153,18 @@ def test_PyomoUnit_NumericValueMethods(self): # should not assert # check __mul__ - self.assertEqual(str(uc.get_units(kg*3)), 'kg') + self.assertEqual(str(uc.get_units(kg * 3)), 'kg') # check __rmul__ - self.assertEqual(str(uc.get_units(3*kg)), 'kg') + self.assertEqual(str(uc.get_units(3 * kg)), 'kg') # check div / truediv - self.assertEqual(str(uc.get_units(kg/3.0)), 'kg') + self.assertEqual(str(uc.get_units(kg / 3.0)), 'kg') # check rdiv / rtruediv - self.assertEqual(str(uc.get_units(3.0/kg)), '1/kg') + self.assertEqual(str(uc.get_units(3.0 / kg)), '1/kg') # check pow self.assertEqual(str(uc.get_units(kg**2)), 'kg**2') # check rpow - x = 2 ** kg # creation is allowed, only fails when units are "checked" + x = 2**kg # creation is allowed, only fails when units are "checked" with self.assertRaises(UnitsError): assert_units_consistent(x) @@ -142,7 +173,7 @@ def test_PyomoUnit_NumericValueMethods(self): self.assertEqual(str(uc.get_units(x)), 'kg') x = kg - x -= 2.0*kg + x -= 2.0 * kg self.assertEqual(str(uc.get_units(x)), 'kg') x = kg @@ -161,17 +192,18 @@ def test_PyomoUnit_NumericValueMethods(self): self.assertEqual(kg.to_string(), 'kg') # ToDo: is this really the correct behavior for verbose? self.assertEqual(kg.to_string(verbose=True), 'kg') - self.assertEqual((kg/uc.s).to_string(), 'kg/s') - self.assertEqual((kg*uc.m**2/uc.s).to_string(), 'kg*m**2/s') + self.assertEqual((kg / uc.s).to_string(), 'kg/s') + self.assertEqual((kg * uc.m**2 / uc.s).to_string(), 'kg*m**2/s') m.v = Var(initialize=3, units=uc.J) - e = uc.convert(m.v, uc.g*uc.m**2/uc.s**2) + e = uc.convert(m.v, uc.g * uc.m**2 / uc.s**2) self.assertEqual(e.to_string(), '1000.0*(g*m**2/J/s**2)*v') # check __nonzero__ / __bool__ with self.assertRaisesRegex( - PyomoException, r"Cannot convert non-constant Pyomo " - r"numeric value \(kg\) to bool."): + PyomoException, + r"Cannot convert non-constant Pyomo " r"numeric value \(kg\) to bool.", + ): bool(kg) # __call__ returns 1.0 @@ -187,9 +219,9 @@ def test_PyomoUnit_NumericValueMethods(self): dless = uc.dimensionless self.assertEqual('dimensionless', str(dless)) - - def _get_check_units_ok(self, x, pyomo_units_container, str_check=None, - expected_type=None): + def _get_check_units_ok( + self, x, pyomo_units_container, str_check=None, expected_type=None + ): if expected_type is not None: self.assertEqual(expected_type, type(x)) @@ -200,9 +232,13 @@ def _get_check_units_ok(self, x, pyomo_units_container, str_check=None, # if str_check is None, then we expect the units to be None self.assertIsNone(pyomo_units_container.get_units(x)) - def _get_check_units_fail(self, x, pyomo_units_container, - expected_type=None, - expected_error=InconsistentUnitsError): + def _get_check_units_fail( + self, + x, + pyomo_units_container, + expected_type=None, + expected_error=InconsistentUnitsError, + ): if expected_type is not None: self.assertEqual(expected_type, type(x)) @@ -232,218 +268,549 @@ def test_get_check_units_on_all_expressions(self): model.ym = Var(units=m) # test equality - self._get_check_units_ok(3.0*kg == 1.0*kg, uc, 'kg', EXPR.EqualityExpression) - self._get_check_units_fail(3.0*kg == 2.0*m, uc, EXPR.EqualityExpression) + self._get_check_units_ok( + 3.0 * kg == 1.0 * kg, uc, 'kg', EXPR.EqualityExpression + ) + self._get_check_units_fail(3.0 * kg == 2.0 * m, uc, EXPR.EqualityExpression) # test inequality - self._get_check_units_ok(3.0*kg <= 1.0*kg, uc, 'kg', EXPR.InequalityExpression) - self._get_check_units_fail(3.0*kg <= 2.0*m, uc, EXPR.InequalityExpression) - self._get_check_units_ok(3.0*kg >= 1.0*kg, uc, 'kg', EXPR.InequalityExpression) - self._get_check_units_fail(3.0*kg >= 2.0*m, uc, EXPR.InequalityExpression) + self._get_check_units_ok( + 3.0 * kg <= 1.0 * kg, uc, 'kg', EXPR.InequalityExpression + ) + self._get_check_units_fail(3.0 * kg <= 2.0 * m, uc, EXPR.InequalityExpression) + self._get_check_units_ok( + 3.0 * kg >= 1.0 * kg, uc, 'kg', EXPR.InequalityExpression + ) + self._get_check_units_fail(3.0 * kg >= 2.0 * m, uc, EXPR.InequalityExpression) # test RangedExpression - self._get_check_units_ok(inequality(3.0*kg, 4.0*kg, 5.0*kg), uc, 'kg', EXPR.RangedExpression) - self._get_check_units_fail(inequality(3.0*m, 4.0*kg, 5.0*kg), uc, EXPR.RangedExpression) - self._get_check_units_fail(inequality(3.0*kg, 4.0*m, 5.0*kg), uc, EXPR.RangedExpression) - self._get_check_units_fail(inequality(3.0*kg, 4.0*kg, 5.0*m), uc, EXPR.RangedExpression) + self._get_check_units_ok( + inequality(3.0 * kg, 4.0 * kg, 5.0 * kg), uc, 'kg', EXPR.RangedExpression + ) + self._get_check_units_fail( + inequality(3.0 * m, 4.0 * kg, 5.0 * kg), uc, EXPR.RangedExpression + ) + self._get_check_units_fail( + inequality(3.0 * kg, 4.0 * m, 5.0 * kg), uc, EXPR.RangedExpression + ) + self._get_check_units_fail( + inequality(3.0 * kg, 4.0 * kg, 5.0 * m), uc, EXPR.RangedExpression + ) # test SumExpression, NPV_SumExpression - self._get_check_units_ok(3.0*model.x*kg + 1.0*model.y*kg + 3.65*model.z*kg, uc, 'kg', EXPR.SumExpression) - self._get_check_units_fail(3.0*model.x*kg + 1.0*model.y*m + 3.65*model.z*kg, uc, EXPR.SumExpression) + self._get_check_units_ok( + 3.0 * model.x * kg + 1.0 * model.y * kg + 3.65 * model.z * kg, + uc, + 'kg', + EXPR.LinearExpression, + ) + self._get_check_units_fail( + 3.0 * model.x * kg + 1.0 * model.y * m + 3.65 * model.z * kg, + uc, + EXPR.LinearExpression, + ) - self._get_check_units_ok(3.0*kg + 1.0*kg + 2.0*kg, uc, 'kg', EXPR.NPV_SumExpression) - self._get_check_units_fail(3.0*kg + 1.0*kg + 2.0*m, uc, EXPR.NPV_SumExpression) + self._get_check_units_ok( + 3.0 * kg + 1.0 * kg + 2.0 * kg, uc, 'kg', EXPR.NPV_SumExpression + ) + self._get_check_units_fail( + 3.0 * kg + 1.0 * kg + 2.0 * m, uc, EXPR.NPV_SumExpression + ) # test ProductExpression, NPV_ProductExpression - self._get_check_units_ok(model.x*kg * model.y*m, uc, 'kg*m', EXPR.ProductExpression) - self._get_check_units_ok(3.0*kg * 1.0*m, uc, 'kg*m', EXPR.NPV_ProductExpression) - self._get_check_units_ok(3.0*kg*m, uc, 'kg*m', EXPR.NPV_ProductExpression) + self._get_check_units_ok( + model.x * kg * model.y * m, uc, 'kg*m', EXPR.ProductExpression + ) + self._get_check_units_ok( + 3.0 * kg * 1.0 * m, uc, 'kg*m', EXPR.NPV_ProductExpression + ) + self._get_check_units_ok(3.0 * kg * m, uc, 'kg*m', EXPR.NPV_ProductExpression) # I don't think that there are combinations that can "fail" for products # test MonomialTermExpression - self._get_check_units_ok(model.x*kg, uc, 'kg', EXPR.MonomialTermExpression) + self._get_check_units_ok(model.x * kg, uc, 'kg', EXPR.MonomialTermExpression) # test DivisionExpression, NPV_DivisionExpression - self._get_check_units_ok(1.0/(model.x*kg), uc, '1/kg', EXPR.DivisionExpression) - self._get_check_units_ok(2.0/kg, uc, '1/kg', EXPR.NPV_DivisionExpression) - self._get_check_units_ok((model.x*kg)/1.0, uc, 'kg', EXPR.MonomialTermExpression) - self._get_check_units_ok(kg/2.0, uc, 'kg', EXPR.NPV_DivisionExpression) - self._get_check_units_ok(model.y*m/(model.x*kg), uc, 'm/kg', EXPR.DivisionExpression) - self._get_check_units_ok(m/kg, uc, 'm/kg', EXPR.NPV_DivisionExpression) + self._get_check_units_ok( + 1.0 / (model.x * kg), uc, '1/kg', EXPR.DivisionExpression + ) + self._get_check_units_ok(2.0 / kg, uc, '1/kg', EXPR.NPV_DivisionExpression) + self._get_check_units_ok( + (model.x * kg) / 1.0, uc, 'kg', EXPR.MonomialTermExpression + ) + self._get_check_units_ok(kg / 2.0, uc, 'kg', EXPR.NPV_DivisionExpression) + self._get_check_units_ok( + model.y * m / (model.x * kg), uc, 'm/kg', EXPR.DivisionExpression + ) + self._get_check_units_ok(m / kg, uc, 'm/kg', EXPR.NPV_DivisionExpression) # I don't think that there are combinations that can "fail" for products # test PowExpression, NPV_PowExpression self._get_check_units_ok(kg**2, uc, 'kg**2', EXPR.NPV_PowExpression) - self._get_check_units_ok(model.p**model.p, uc, 'None', EXPR.NPV_PowExpression) + self._get_check_units_ok( + model.p**model.p, uc, 'dimensionless', EXPR.NPV_PowExpression + ) self._get_check_units_ok(kg**model.p, uc, 'kg**42', EXPR.NPV_PowExpression) - self._get_check_units_ok((model.x*kg**2)**3, uc, 'kg**6', EXPR.PowExpression) + self._get_check_units_ok( + (model.x * kg**2) ** 3, uc, 'kg**6', EXPR.PowExpression + ) self._get_check_units_fail(kg**model.x, uc, EXPR.PowExpression, UnitsError) self._get_check_units_fail(model.x**kg, uc, EXPR.PowExpression, UnitsError) self._get_check_units_ok(kg**2, uc, 'kg**2', EXPR.NPV_PowExpression) self._get_check_units_fail(3.0**kg, uc, EXPR.NPV_PowExpression, UnitsError) # test NegationExpression, NPV_NegationExpression - self._get_check_units_ok(-(kg*model.x*model.y), uc, 'kg', EXPR.NegationExpression) + self._get_check_units_ok( + -(kg * model.x * model.y), uc, 'kg', EXPR.NegationExpression + ) self._get_check_units_ok(-kg, uc, 'kg', EXPR.NPV_NegationExpression) # don't think there are combinations that fan "fail" for negation # test AbsExpression, NPV_AbsExpression - self._get_check_units_ok(abs(kg*model.x), uc, 'kg', EXPR.AbsExpression) + self._get_check_units_ok(abs(kg * model.x), uc, 'kg', EXPR.AbsExpression) self._get_check_units_ok(abs(kg), uc, 'kg', EXPR.NPV_AbsExpression) # don't think there are combinations that fan "fail" for abs # test the different UnaryFunctionExpression / NPV_UnaryFunctionExpression types # log - self._get_check_units_ok(log(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(log(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(log(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(log(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + log(3.0 * model.x), uc, 'dimensionless', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + log(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + log(3.0 * model.p), uc, 'dimensionless', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + log(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # log10 - self._get_check_units_ok(log10(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(log10(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(log10(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(log10(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + log10(3.0 * model.x), uc, 'dimensionless', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + log10(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + log10(3.0 * model.p), uc, 'dimensionless', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + log10(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # sin - self._get_check_units_ok(sin(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_ok(sin(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(sin(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_fail(sin(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(sin(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(sin(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + sin(3.0 * model.x), uc, 'dimensionless', EXPR.UnaryFunctionExpression + ) + self._get_check_units_ok( + sin(3.0 * model.x * uc.radians), + uc, + 'dimensionless', + EXPR.UnaryFunctionExpression, + ) + self._get_check_units_fail( + sin(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_fail( + sin(3.0 * kg * model.x * uc.kg), + uc, + EXPR.UnaryFunctionExpression, + UnitsError, + ) + self._get_check_units_ok( + sin(3.0 * model.p * uc.radians), + uc, + 'dimensionless', + EXPR.NPV_UnaryFunctionExpression, + ) + self._get_check_units_fail( + sin(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # cos - self._get_check_units_ok(cos(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(cos(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_fail(cos(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(cos(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(cos(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + cos(3.0 * model.x * uc.radians), + uc, + 'dimensionless', + EXPR.UnaryFunctionExpression, + ) + self._get_check_units_fail( + cos(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_fail( + cos(3.0 * kg * model.x * uc.kg), + uc, + EXPR.UnaryFunctionExpression, + UnitsError, + ) + self._get_check_units_ok( + cos(3.0 * model.p * uc.radians), + uc, + 'dimensionless', + EXPR.NPV_UnaryFunctionExpression, + ) + self._get_check_units_fail( + cos(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # tan - self._get_check_units_ok(tan(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(tan(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_fail(tan(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(tan(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(tan(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + tan(3.0 * model.x * uc.radians), + uc, + 'dimensionless', + EXPR.UnaryFunctionExpression, + ) + self._get_check_units_fail( + tan(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_fail( + tan(3.0 * kg * model.x * uc.kg), + uc, + EXPR.UnaryFunctionExpression, + UnitsError, + ) + self._get_check_units_ok( + tan(3.0 * model.p * uc.radians), + uc, + 'dimensionless', + EXPR.NPV_UnaryFunctionExpression, + ) + self._get_check_units_fail( + tan(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # sin - self._get_check_units_ok(sinh(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(sinh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_fail(sinh(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(sinh(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(sinh(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + sinh(3.0 * model.x * uc.radians), + uc, + 'dimensionless', + EXPR.UnaryFunctionExpression, + ) + self._get_check_units_fail( + sinh(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_fail( + sinh(3.0 * kg * model.x * uc.kg), + uc, + EXPR.UnaryFunctionExpression, + UnitsError, + ) + self._get_check_units_ok( + sinh(3.0 * model.p * uc.radians), + uc, + 'dimensionless', + EXPR.NPV_UnaryFunctionExpression, + ) + self._get_check_units_fail( + sinh(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # cos - self._get_check_units_ok(cosh(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(cosh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_fail(cosh(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(cosh(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(cosh(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + cosh(3.0 * model.x * uc.radians), + uc, + 'dimensionless', + EXPR.UnaryFunctionExpression, + ) + self._get_check_units_fail( + cosh(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_fail( + cosh(3.0 * kg * model.x * uc.kg), + uc, + EXPR.UnaryFunctionExpression, + UnitsError, + ) + self._get_check_units_ok( + cosh(3.0 * model.p * uc.radians), + uc, + 'dimensionless', + EXPR.NPV_UnaryFunctionExpression, + ) + self._get_check_units_fail( + cosh(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # tan - self._get_check_units_ok(tanh(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(tanh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_fail(tanh(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(tanh(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(tanh(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + tanh(3.0 * model.x * uc.radians), + uc, + 'dimensionless', + EXPR.UnaryFunctionExpression, + ) + self._get_check_units_fail( + tanh(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_fail( + tanh(3.0 * kg * model.x * uc.kg), + uc, + EXPR.UnaryFunctionExpression, + UnitsError, + ) + self._get_check_units_ok( + tanh(3.0 * model.p * uc.radians), + uc, + 'dimensionless', + EXPR.NPV_UnaryFunctionExpression, + ) + self._get_check_units_fail( + tanh(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # asin - self._get_check_units_ok(asin(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression) - self._get_check_units_fail(asin(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(asin(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(asin(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + asin(3.0 * model.x), uc, 'rad', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + asin(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + asin(3.0 * model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + asin(3.0 * model.p * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # acos - self._get_check_units_ok(acos(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression) - self._get_check_units_fail(acos(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(acos(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(acos(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + acos(3.0 * model.x), uc, 'rad', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + acos(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + acos(3.0 * model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + acos(3.0 * model.p * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # atan - self._get_check_units_ok(atan(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression) - self._get_check_units_fail(atan(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(atan(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(atan(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + atan(3.0 * model.x), uc, 'rad', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + atan(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + atan(3.0 * model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + atan(3.0 * model.p * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # exp - self._get_check_units_ok(exp(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_fail(exp(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(exp(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(exp(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + exp(3.0 * model.x), uc, 'dimensionless', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + exp(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + exp(3.0 * model.p), uc, 'dimensionless', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + exp(3.0 * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # sqrt - self._get_check_units_ok(sqrt(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression) - self._get_check_units_ok(sqrt(3.0*model.x*kg**2), uc, 'kg', EXPR.UnaryFunctionExpression) - self._get_check_units_ok(sqrt(3.0*model.x*kg), uc, 'kg**0.5', EXPR.UnaryFunctionExpression) - self._get_check_units_ok(sqrt(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_ok(sqrt(3.0*model.p*kg**2), uc, 'kg', EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_ok(sqrt(3.0*model.p*kg), uc, 'kg**0.5', EXPR.NPV_UnaryFunctionExpression) + self._get_check_units_ok( + sqrt(3.0 * model.x), uc, 'dimensionless', EXPR.UnaryFunctionExpression + ) + self._get_check_units_ok( + sqrt(3.0 * model.x * kg**2), uc, 'kg', EXPR.UnaryFunctionExpression + ) + self._get_check_units_ok( + sqrt(3.0 * model.x * kg), uc, 'kg**0.5', EXPR.UnaryFunctionExpression + ) + self._get_check_units_ok( + sqrt(3.0 * model.p), uc, 'dimensionless', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_ok( + sqrt(3.0 * model.p * kg**2), uc, 'kg', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_ok( + sqrt(3.0 * model.p * kg), uc, 'kg**0.5', EXPR.NPV_UnaryFunctionExpression + ) # asinh - self._get_check_units_ok(asinh(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression) - self._get_check_units_fail(asinh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(asinh(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(asinh(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + asinh(3.0 * model.x), uc, 'rad', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + asinh(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + asinh(3.0 * model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + asinh(3.0 * model.p * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # acosh - self._get_check_units_ok(acosh(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression) - self._get_check_units_fail(acosh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(acosh(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(acosh(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + acosh(3.0 * model.x), uc, 'rad', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + acosh(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + acosh(3.0 * model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + acosh(3.0 * model.p * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # atanh - self._get_check_units_ok(atanh(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression) - self._get_check_units_fail(atanh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError) - self._get_check_units_ok(atanh(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression) - self._get_check_units_fail(atanh(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError) + self._get_check_units_ok( + atanh(3.0 * model.x), uc, 'rad', EXPR.UnaryFunctionExpression + ) + self._get_check_units_fail( + atanh(3.0 * kg * model.x), uc, EXPR.UnaryFunctionExpression, UnitsError + ) + self._get_check_units_ok( + atanh(3.0 * model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression + ) + self._get_check_units_fail( + atanh(3.0 * model.p * kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError + ) # ceil - self._get_check_units_ok(ceil(kg*model.x), uc, 'kg', EXPR.UnaryFunctionExpression) + self._get_check_units_ok( + ceil(kg * model.x), uc, 'kg', EXPR.UnaryFunctionExpression + ) self._get_check_units_ok(ceil(kg), uc, 'kg', EXPR.NPV_UnaryFunctionExpression) # don't think there are combinations that fan "fail" for ceil # floor - self._get_check_units_ok(floor(kg*model.x), uc, 'kg', EXPR.UnaryFunctionExpression) + self._get_check_units_ok( + floor(kg * model.x), uc, 'kg', EXPR.UnaryFunctionExpression + ) self._get_check_units_ok(floor(kg), uc, 'kg', EXPR.NPV_UnaryFunctionExpression) # don't think there are combinations that fan "fail" for floor # test Expr_ifExpression # consistent if, consistent then/else - self._get_check_units_ok(EXPR.Expr_if(IF=model.x*kg + kg >= 2.0*kg, THEN=model.x*kg, ELSE=model.y*kg), - uc, 'kg', EXPR.Expr_ifExpression) + self._get_check_units_ok( + EXPR.Expr_if( + IF=model.x * kg + kg >= 2.0 * kg, THEN=model.x * kg, ELSE=model.y * kg + ), + uc, + 'kg', + EXPR.Expr_ifExpression, + ) # unitless if, consistent then/else - self._get_check_units_ok(EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.x*kg, ELSE=model.y*kg), - uc, 'kg', EXPR.Expr_ifExpression) + self._get_check_units_ok( + EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.x * kg, ELSE=model.y * kg), + uc, + 'kg', + EXPR.Expr_ifExpression, + ) # consistent if, unitless then/else - self._get_check_units_ok(EXPR.Expr_if(IF=model.x*kg + kg >= 2.0*kg, THEN=model.x, ELSE=model.x), - uc, None, EXPR.Expr_ifExpression) + self._get_check_units_ok( + EXPR.Expr_if(IF=model.x * kg + kg >= 2.0 * kg, THEN=model.x, ELSE=model.x), + uc, + 'dimensionless', + EXPR.Expr_ifExpression, + ) # inconsistent then/else - self._get_check_units_fail(EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.x*m, ELSE=model.y*kg), - uc, EXPR.Expr_ifExpression) + self._get_check_units_fail( + EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.x * m, ELSE=model.y * kg), + uc, + EXPR.Expr_ifExpression, + ) # inconsistent then/else NPV - self._get_check_units_fail(EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.p*m, ELSE=model.p*kg), - uc, EXPR.Expr_ifExpression) + self._get_check_units_fail( + EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.p * m, ELSE=model.p * kg), + uc, + EXPR.Expr_ifExpression, + ) # inconsistent then/else NPV units only - self._get_check_units_fail(EXPR.Expr_if(IF=model.x >= 2.0, THEN=m, ELSE=kg), - uc, EXPR.Expr_ifExpression) + self._get_check_units_fail( + EXPR.Expr_if(IF=model.x >= 2.0, THEN=m, ELSE=kg), uc, EXPR.Expr_ifExpression + ) # test EXPR.IndexTemplate and GetItemExpression model.S = Set() i = EXPR.IndexTemplate(model.S) j = EXPR.IndexTemplate(model.S) - self._get_check_units_ok(i, uc, None, EXPR.IndexTemplate) + self._get_check_units_ok(i, uc, 'dimensionless', EXPR.IndexTemplate) model.mat = Var(model.S, model.S) - self._get_check_units_ok(model.mat[i,j+1], uc, None, EXPR.GetItemExpression) + self._get_check_units_ok( + model.mat[i, j + 1], uc, 'dimensionless', EXPR.Numeric_GetItemExpression + ) # test ExternalFunctionExpression, NPV_ExternalFunctionExpression model.ef = ExternalFunction(python_callback_function) - self._get_check_units_ok(model.ef(model.x, model.y), uc, None, EXPR.ExternalFunctionExpression) - self._get_check_units_ok(model.ef(1.0, 2.0), uc, None, EXPR.NPV_ExternalFunctionExpression) - self._get_check_units_fail(model.ef(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError) - self._get_check_units_fail(model.ef(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError) + self._get_check_units_ok( + model.ef(model.x, model.y), + uc, + 'dimensionless', + EXPR.ExternalFunctionExpression, + ) + self._get_check_units_ok( + model.ef(1.0, 2.0), uc, 'dimensionless', EXPR.NPV_ExternalFunctionExpression + ) + self._get_check_units_fail( + model.ef(model.x * kg, model.y), + uc, + EXPR.ExternalFunctionExpression, + UnitsError, + ) + self._get_check_units_fail( + model.ef(2.0 * kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError + ) # test ExternalFunctionExpression, NPV_ExternalFunctionExpression model.ef2 = ExternalFunction(python_callback_function, units=uc.kg) - self._get_check_units_ok(model.ef2(model.x, model.y), uc, 'kg', EXPR.ExternalFunctionExpression) - self._get_check_units_ok(model.ef2(1.0, 2.0), uc, 'kg', EXPR.NPV_ExternalFunctionExpression) - self._get_check_units_fail(model.ef2(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError) - self._get_check_units_fail(model.ef2(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError) + self._get_check_units_ok( + model.ef2(model.x, model.y), uc, 'kg', EXPR.ExternalFunctionExpression + ) + self._get_check_units_ok( + model.ef2(1.0, 2.0), uc, 'kg', EXPR.NPV_ExternalFunctionExpression + ) + self._get_check_units_fail( + model.ef2(model.x * kg, model.y), + uc, + EXPR.ExternalFunctionExpression, + UnitsError, + ) + self._get_check_units_fail( + model.ef2(2.0 * kg, 1.0), + uc, + EXPR.NPV_ExternalFunctionExpression, + UnitsError, + ) # test ExternalFunctionExpression, NPV_ExternalFunctionExpression - model.ef3 = ExternalFunction(python_callback_function, units=uc.kg, arg_units=[uc.kg, uc.m]) - self._get_check_units_fail(model.ef3(model.x, model.y), uc, EXPR.ExternalFunctionExpression) - self._get_check_units_fail(model.ef3(1.0, 2.0), uc, EXPR.NPV_ExternalFunctionExpression) - self._get_check_units_fail(model.ef3(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError) - self._get_check_units_fail(model.ef3(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError) - self._get_check_units_ok(model.ef3(2.0*kg, 1.0*uc.m), uc, 'kg', EXPR.NPV_ExternalFunctionExpression) - self._get_check_units_ok(model.ef3(model.x*kg, model.y*m), uc, 'kg', EXPR.ExternalFunctionExpression) - self._get_check_units_ok(model.ef3(model.xkg, model.ym), uc, 'kg', EXPR.ExternalFunctionExpression) - self._get_check_units_fail(model.ef3(model.ym, model.xkg), uc, EXPR.ExternalFunctionExpression, InconsistentUnitsError) + model.ef3 = ExternalFunction( + python_callback_function, units=uc.kg, arg_units=[uc.kg, uc.m] + ) + self._get_check_units_fail( + model.ef3(model.x, model.y), uc, EXPR.ExternalFunctionExpression + ) + self._get_check_units_fail( + model.ef3(1.0, 2.0), uc, EXPR.NPV_ExternalFunctionExpression + ) + self._get_check_units_fail( + model.ef3(model.x * kg, model.y), + uc, + EXPR.ExternalFunctionExpression, + UnitsError, + ) + self._get_check_units_fail( + model.ef3(2.0 * kg, 1.0), + uc, + EXPR.NPV_ExternalFunctionExpression, + UnitsError, + ) + self._get_check_units_ok( + model.ef3(2.0 * kg, 1.0 * uc.m), + uc, + 'kg', + EXPR.NPV_ExternalFunctionExpression, + ) + self._get_check_units_ok( + model.ef3(model.x * kg, model.y * m), + uc, + 'kg', + EXPR.ExternalFunctionExpression, + ) + self._get_check_units_ok( + model.ef3(model.xkg, model.ym), uc, 'kg', EXPR.ExternalFunctionExpression + ) + self._get_check_units_fail( + model.ef3(model.ym, model.xkg), + uc, + EXPR.ExternalFunctionExpression, + InconsistentUnitsError, + ) # @unittest.skip('Skipped testing LinearExpression since StreamBasedExpressionVisitor does not handle LinearExpressions') def test_linear_expression(self): @@ -455,18 +822,25 @@ def test_linear_expression(self): # test LinearExpression # ToDo: Once this test is working correctly, this code should be moved to the test above model.vv = Var(['A', 'B', 'C']) - self._get_check_units_ok(sum_product(model.vv), uc, None, EXPR.LinearExpression) + self._get_check_units_ok( + sum_product(model.vv), uc, 'dimensionless', EXPR.LinearExpression + ) - linex1 = sum_product(model.vv, {'A': kg, 'B': kg, 'C':kg}, index=['A', 'B', 'C']) + linex1 = sum_product( + model.vv, {'A': kg, 'B': kg, 'C': kg}, index=['A', 'B', 'C'] + ) self._get_check_units_ok(linex1, uc, 'kg', EXPR.LinearExpression) - linex2 = sum_product(model.vv, {'A': kg, 'B': m, 'C':kg}, index=['A', 'B', 'C']) + linex2 = sum_product( + model.vv, {'A': kg, 'B': m, 'C': kg}, index=['A', 'B', 'C'] + ) self._get_check_units_fail(linex2, uc, EXPR.LinearExpression) def test_bad_units(self): uc = units with self.assertRaisesRegex( - AttributeError, "Attribute unknown_bogus_unit not found."): + AttributeError, "Attribute unknown_bogus_unit not found." + ): uc.unknown_bogus_unit def test_named_expression(self): @@ -474,17 +848,25 @@ def test_named_expression(self): m = ConcreteModel() m.x = Var(units=uc.kg) m.y = Var(units=uc.m) - m.e = Expression(expr=m.x/m.y) + m.e = Expression(expr=m.x / m.y) self.assertEqual(str(uc.get_units(m.e)), 'kg/m') def test_dimensionless(self): uc = units kg = uc.kg dless = uc.dimensionless - self._get_check_units_ok(2.0 == 2.0*dless, uc, None, EXPR.EqualityExpression) - self.assertEqual(uc.get_units(2.0*dless), uc.get_units(2.0)) - self.assertIsNone(uc.get_units(2.0*dless)) - self.assertIsNone(uc.get_units(kg/kg)) + self._get_check_units_ok( + 2.0 == 2.0 * dless, uc, 'dimensionless', EXPR.EqualityExpression + ) + x = uc.get_units(2.0) + self.assertIs(type(x), _PyomoUnit) + self.assertEqual(x, dless) + x = uc.get_units(2.0 * dless) + self.assertIs(type(x), _PyomoUnit) + self.assertEqual(x, dless) + x = uc.get_units(kg / kg) + self.assertIs(type(x), _PyomoUnit) + self.assertEqual(x, dless) def test_temperatures(self): uc = units @@ -506,21 +888,23 @@ def test_temperatures(self): # 'rankine' or '°R' (note UTF-8 encoding, which requires the # "coding: utf-8" comment flag at the top of this file). R_str = R.getname() - #self.assertIn(R_str, ['rankine', '°R']) + # self.assertIn(R_str, ['rankine', '°R']) - self._get_check_units_ok(2.0*R + 3.0*R, uc, R_str, EXPR.NPV_SumExpression) - self._get_check_units_ok(2.0*K + 3.0*K, uc, 'K', EXPR.NPV_SumExpression) + self._get_check_units_ok(2.0 * R + 3.0 * R, uc, R_str, EXPR.NPV_SumExpression) + self._get_check_units_ok(2.0 * K + 3.0 * K, uc, 'K', EXPR.NPV_SumExpression) - ex = 2.0*delta_degC + 3.0*delta_degC + 1.0*delta_degC + ex = 2.0 * delta_degC + 3.0 * delta_degC + 1.0 * delta_degC self.assertEqual(type(ex), EXPR.NPV_SumExpression) assert_units_consistent(ex) - ex = 2.0*delta_degF + 3.0*delta_degF + ex = 2.0 * delta_degF + 3.0 * delta_degF self.assertEqual(type(ex), EXPR.NPV_SumExpression) assert_units_consistent(ex) - self._get_check_units_fail(2.0*K + 3.0*R, uc, EXPR.NPV_SumExpression) - self._get_check_units_fail(2.0*delta_degC + 3.0*delta_degF, uc, EXPR.NPV_SumExpression) + self._get_check_units_fail(2.0 * K + 3.0 * R, uc, EXPR.NPV_SumExpression) + self._get_check_units_fail( + 2.0 * delta_degC + 3.0 * delta_degF, uc, EXPR.NPV_SumExpression + ) self.assertAlmostEqual(uc.convert_temp_K_to_C(323.15), 50.0, places=5) self.assertAlmostEqual(uc.convert_temp_C_to_K(50.0), 323.15, places=5) @@ -532,52 +916,67 @@ def test_temperatures(self): def test_module_example(self): from pyomo.environ import ConcreteModel, Var, Objective, units + model = ConcreteModel() model.acc = Var() - model.obj = Objective(expr=(model.acc*units.m/units.s**2 - 9.81*units.m/units.s**2)**2) + model.obj = Objective( + expr=(model.acc * units.m / units.s**2 - 9.81 * units.m / units.s**2) + ** 2 + ) self.assertEqual('m**2/s**4', str(units.get_units(model.obj.expr))) def test_convert_value(self): u = units x = 0.4535923 expected_lb_value = 1.0 - actual_lb_value = u.convert_value( - num_value=x, from_units=u.kg, to_units=u.lb) + actual_lb_value = u.convert_value(num_value=x, from_units=u.kg, to_units=u.lb) self.assertAlmostEqual(expected_lb_value, actual_lb_value, places=5) actual_lb_value = u.convert_value( - num_value=value(x*u.kg), from_units=u.kg, to_units=u.lb) + num_value=value(x * u.kg), from_units=u.kg, to_units=u.lb + ) self.assertAlmostEqual(expected_lb_value, actual_lb_value, places=5) src = 5 - ans = u.convert_value(src, u.m/u.s, u.m/u.s) + ans = u.convert_value(src, u.m / u.s, u.m / u.s) self.assertIs(src, ans) with self.assertRaises(UnitsError): # cannot convert from meters to pounds - actual_lb_value = u.convert_value(num_value=x, from_units=u.meters, to_units=u.lb) + actual_lb_value = u.convert_value( + num_value=x, from_units=u.meters, to_units=u.lb + ) with self.assertRaises(UnitsError): # num_value must be a native numerical type - actual_lb_value = u.convert_value(num_value=x*u.kg, from_units=u.kg, to_units=u.lb) + actual_lb_value = u.convert_value( + num_value=x * u.kg, from_units=u.kg, to_units=u.lb + ) def test_convert(self): u = units m = ConcreteModel() m.dx = Var(units=u.m, initialize=0.10188943773836046) m.dy = Var(units=u.m, initialize=0.0) - m.vx = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.vy = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.t = Var(units=u.min, bounds=(1e-5,10.0), initialize=0.0024015570927624456) - m.theta = Var(bounds=(0, 0.49*3.14), initialize=0.7853981693583533, units=u.radians) - m.a = Param(initialize=-32.2, units=u.ft/u.s**2) - - m.obj = Objective(expr = m.dx, sense=maximize) - m.vx_con = Constraint(expr = m.vx == 1.0*u.m/u.s*cos(m.theta)) - m.vy_con = Constraint(expr = m.vy == 1.0*u.m/u.s*sin(m.theta)) - m.dx_con = Constraint(expr = m.dx == m.vx*u.convert(m.t, to_units=u.s)) - m.dy_con = Constraint(expr = m.dy == m.vy*u.convert(m.t, to_units=u.s) - + 0.5*(u.convert(m.a, to_units=u.m/u.s**2))*(u.convert(m.t, to_units=u.s))**2) - m.ground = Constraint(expr = m.dy == 0) + m.vx = Var(units=u.m / u.s, initialize=0.7071067769802851) + m.vy = Var(units=u.m / u.s, initialize=0.7071067769802851) + m.t = Var(units=u.min, bounds=(1e-5, 10.0), initialize=0.0024015570927624456) + m.theta = Var( + bounds=(0, 0.49 * 3.14), initialize=0.7853981693583533, units=u.radians + ) + m.a = Param(initialize=-32.2, units=u.ft / u.s**2) + + m.obj = Objective(expr=m.dx, sense=maximize) + m.vx_con = Constraint(expr=m.vx == 1.0 * u.m / u.s * cos(m.theta)) + m.vy_con = Constraint(expr=m.vy == 1.0 * u.m / u.s * sin(m.theta)) + m.dx_con = Constraint(expr=m.dx == m.vx * u.convert(m.t, to_units=u.s)) + m.dy_con = Constraint( + expr=m.dy + == m.vy * u.convert(m.t, to_units=u.s) + + 0.5 + * (u.convert(m.a, to_units=u.m / u.s**2)) + * (u.convert(m.t, to_units=u.s)) ** 2 + ) + m.ground = Constraint(expr=m.dy == 0) with self.assertRaises(UnitsError): u.convert(m.a, to_units=u.kg) @@ -609,13 +1008,13 @@ def test_convert_dimensionless(self): def test_usd(self): u = units u.load_definitions_from_strings(["USD = [currency]"]) - expr = 3.0*u.USD + expr = 3.0 * u.USD self._get_check_units_ok(expr, u, 'USD') def test_clone(self): m = ConcreteModel() m.x = Var(units=units.kg) - m.c = Constraint(expr=m.x**2 <= 10*units.kg**2) + m.c = Constraint(expr=m.x**2 <= 10 * units.kg**2) i = m.clone() self.assertIs(m.x._units, i.x._units) self.assertEqual(str(m.c.upper), str(i.c.upper)) @@ -628,7 +1027,7 @@ def test_clone(self): def test_pickle(self): m = ConcreteModel() m.x = Var(units=units.kg) - m.c = Constraint(expr=m.x**2 <= 10*units.kg**2) + m.c = Constraint(expr=m.x**2 <= 10 * units.kg**2) log = StringIO() with LoggingIntercept(log, 'pyomo.core.base'): i = pickle.loads(pickle.dumps(m)) @@ -647,14 +1046,16 @@ def test_pickle(self): um = PyomoUnitsContainer(pint_module.UnitRegistry()) m = ConcreteModel() m.x = Var(units=um.kg) - m.c = Constraint(expr=m.x**2 <= 10*um.kg**2) + m.c = Constraint(expr=m.x**2 <= 10 * um.kg**2) log = StringIO() with LoggingIntercept(log, 'pyomo.core.base'): i = pickle.loads(pickle.dumps(m)) self.assertIn( "pickling a _PyomoUnit associated with a PyomoUnitsContainer " "that is not the default singleton " - "(pyomo.core.base.units_container.units)", log.getvalue()) + "(pyomo.core.base.units_container.units)", + log.getvalue(), + ) self.assertIsNot(m.x, i.x) self.assertIsNot(m.x._units, i.x._units) # Note that pint is inconsistent when comparing standard units @@ -689,7 +1090,7 @@ def test_set_pint_registry(self): self.assertIn( "Changing the pint registry used by the Pyomo Units " "system after the PyomoUnitsContainer was constructed", - LOG.getvalue() + LOG.getvalue(), ) def test_as_quantity_scalar(self): @@ -699,7 +1100,7 @@ def test_as_quantity_scalar(self): m.x = Var(initialize=1) m.y = Var(initialize=2, units=units.g) m.p = Param(initialize=3) - m.q = Param(initialize=4, units=1/units.s) + m.q = Param(initialize=4, units=1 / units.s) m.b = BooleanVar(initialize=True) q = as_quantity(0) @@ -747,7 +1148,7 @@ def test_as_quantity_scalar(self): self.assertEqual(q, True) class UnknownPyomoType(object): - def is_expression_type(self): + def is_expression_type(self, expression_system=None): return False def is_numeric_type(self): @@ -768,7 +1169,7 @@ def test_as_quantity_expression(self): m.x = Var(initialize=1) m.y = Var(initialize=2, units=units.g) m.p = Param(initialize=3) - m.q = Param(initialize=4, units=1/units.s) + m.q = Param(initialize=4, units=1 / units.s) q = as_quantity(m.x * m.p) self.assertIs(q.__class__, Quantity) @@ -786,30 +1187,30 @@ def test_as_quantity_expression(self): self.assertIs(q.__class__, Quantity) self.assertEqual(q, 8 * _pint.g / _pint.s) - q = as_quantity(m.y <= 2*m.y) + q = as_quantity(m.y <= 2 * m.y) self.assertIs(q.__class__, bool) self.assertEqual(q, True) - q = as_quantity(m.y >= 2*m.y) + q = as_quantity(m.y >= 2 * m.y) self.assertIs(q.__class__, bool) self.assertEqual(q, False) - q = as_quantity(EXPR.Expr_if(IF=m.y <= 2*m.y, THEN=m.x, ELSE=m.p)) + q = as_quantity(EXPR.Expr_if(IF=m.y <= 2 * m.y, THEN=m.x, ELSE=m.p)) self.assertIs(q.__class__, Quantity) self.assertEqual(q, 1 * _pint.dimensionless) - q = as_quantity(EXPR.Expr_if(IF=m.y >= 2*m.y, THEN=m.x, ELSE=m.p)) + q = as_quantity(EXPR.Expr_if(IF=m.y >= 2 * m.y, THEN=m.x, ELSE=m.p)) self.assertIs(q.__class__, Quantity) self.assertEqual(q, 3 * _pint.dimensionless) # NOTE: The following two tests are not unit consistent (but can # be evaluated) - q = as_quantity(EXPR.Expr_if(IF=m.x <= 2*m.x, THEN=m.y, ELSE=m.q)) + q = as_quantity(EXPR.Expr_if(IF=m.x <= 2 * m.x, THEN=m.y, ELSE=m.q)) self.assertIs(q.__class__, Quantity) self.assertEqual(q, 2 * _pint.g) - q = as_quantity(EXPR.Expr_if(IF=m.x >= 2*m.x, THEN=m.y, ELSE=m.q)) + q = as_quantity(EXPR.Expr_if(IF=m.x >= 2 * m.x, THEN=m.y, ELSE=m.q)) self.assertIs(q.__class__, Quantity) self.assertEqual(q, 4 / _pint.s) @@ -820,19 +1221,56 @@ def test_as_quantity_expression(self): self.assertEqual(q.units, _pint.radian) self.assertEqual(q, 0 * _pint.radian) - q = as_quantity(cos(m.x*math.pi)) + q = as_quantity(cos(m.x * math.pi)) self.assertIs(q.__class__, Quantity) self.assertEqual(q.units, _pint.dimensionless) self.assertAlmostEqual(q, -1 * _pint.dimensionless) def MyAdder(x, y): return x + y + m.EF = ExternalFunction(MyAdder, units=units.kg) ef = m.EF(m.x, m.y) q = as_quantity(ef) self.assertIs(q.__class__, Quantity) self.assertAlmostEqual(q, 3 * _pint.kg) + def test_var_set_value(self): + # Tests for #1570 + m = ConcreteModel() + + # Un-united variables just strip the units + m.x = Var() + m.x.value = 10 + self.assertEqual(m.x.value, 10) + m.x.value = 20 * units.kg + self.assertEqual(m.x.value, 20) + m.x.value = 30 * units.dimensionless + self.assertEqual(m.x.value, 30) + del m.x + + # Dimensionless variables require dimensionless args + m.x = Var(units=units.dimensionless) + m.x.value = 10 + self.assertEqual(m.x.value, 10) + with self.assertRaisesRegex(UnitsError, 'Cannot convert kg to dimensionless'): + m.x.value = 20 * units.kg + self.assertEqual(m.x.value, 10) + m.x.value = 30 * units.dimensionless + self.assertEqual(m.x.value, 30) + del m.x + + # Dimensioned variables require bare or dimensioned args + m.x = Var(units=units.gram) + m.x.value = 10 + self.assertEqual(m.x.value, 10) + m.x.value = 20 * units.kg + self.assertEqual(m.x.value, 20000) + with self.assertRaisesRegex(UnitsError, 'Cannot convert dimensionless to g'): + m.x.value = 30 * units.dimensionless + self.assertEqual(m.x.value, 20000) + del m.x + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_var.py b/pyomo/core/tests/unit/test_var.py index 65081809c74..bbd4bba030f 100644 --- a/pyomo/core/tests/unit/test_var.py +++ b/pyomo/core/tests/unit/test_var.py @@ -17,7 +17,8 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep from io import StringIO @@ -26,13 +27,29 @@ from pyomo.core.base import IntegerSet from pyomo.core.expr.numeric_expr import ( - NPV_ProductExpression, NPV_MaxExpression, NPV_MinExpression, + NPV_ProductExpression, + NPV_MaxExpression, + NPV_MinExpression, ) from pyomo.core.staleflag import StaleFlagManager from pyomo.environ import ( - AbstractModel, ConcreteModel, Set, Param, Var, VarList, RangeSet, - Suffix, Expression, NonPositiveReals, PositiveReals, Reals, RealSet, - NonNegativeReals, Integers, Binary, value + AbstractModel, + ConcreteModel, + Set, + Param, + Var, + VarList, + RangeSet, + Suffix, + Expression, + NonPositiveReals, + PositiveReals, + Reals, + RealSet, + NonNegativeReals, + Integers, + Binary, + value, ) from pyomo.core.base.units_container import units, pint_available, UnitsError @@ -47,15 +64,17 @@ def test_lower_bound(self): self.assertIs(type(m.x.lower), int) self.assertEqual(value(m.x.lower), 0) m.x.domain = Reals - m.x.setlb(5*m.p) + m.x.setlb(5 * m.p) self.assertIs(type(m.x.lower), NPV_ProductExpression) self.assertEqual(value(m.x.lower), 10) m.x.domain = NonNegativeReals self.assertIs(type(m.x.lower), NPV_MaxExpression) self.assertEqual(value(m.x.lower), 10) with self.assertRaisesRegex( - ValueError, "Potentially variable input of type 'ScalarVar' " - "supplied as lower bound for variable 'x'"): + ValueError, + "Potentially variable input of type 'ScalarVar' " + "supplied as lower bound for variable 'x'", + ): m.x.setlb(m.x) def test_lower_bound_setter(self): @@ -87,15 +106,17 @@ def test_upper_bound(self): self.assertIs(type(m.x.upper), int) self.assertEqual(value(m.x.upper), 0) m.x.domain = Reals - m.x.setub(-5*m.p) + m.x.setub(-5 * m.p) self.assertIs(type(m.x.upper), NPV_ProductExpression) self.assertEqual(value(m.x.upper), -10) m.x.domain = NonPositiveReals self.assertIs(type(m.x.upper), NPV_MinExpression) self.assertEqual(value(m.x.upper), -10) with self.assertRaisesRegex( - ValueError, "Potentially variable input of type 'ScalarVar' " - "supplied as upper bound for variable 'x'"): + ValueError, + "Potentially variable input of type 'ScalarVar' " + "supplied as upper bound for variable 'x'", + ): m.x.setub(m.x) def test_upper_bound_setter(self): @@ -120,7 +141,6 @@ def test_upper_bound_setter(self): class PyomoModel(unittest.TestCase): - def setUp(self): self.model = AbstractModel() self.instance = None @@ -129,7 +149,7 @@ def tearDown(self): self.model = None self.instance = None - def construct(self,filename=None): + def construct(self, filename=None): if filename is not None: self.instance = self.model.create_instance(filename) else: @@ -137,7 +157,6 @@ def construct(self,filename=None): class TestSimpleVar(PyomoModel): - def setUp(self): # # Create Model @@ -408,7 +427,7 @@ def test_lb_attr1(self): def test_lb_attr2(self): """Test lb attribute""" - self.model.x = Var(within=NonNegativeReals, bounds=(-1,2)) + self.model.x = Var(within=NonNegativeReals, bounds=(-1, 2)) self.instance = self.model.create_instance() self.assertEqual(value(self.instance.x.lb), 0.0) self.assertEqual(value(self.instance.x.ub), 2.0) @@ -416,7 +435,7 @@ def test_lb_attr2(self): def test_lb_attr3(self): """Test lb attribute""" self.model.p = Param(mutable=True, initialize=1) - self.model.x = Var(within=NonNegativeReals, bounds=(self.model.p,None)) + self.model.x = Var(within=NonNegativeReals, bounds=(self.model.p, None)) self.instance = self.model.create_instance() self.assertEqual(value(self.instance.x.lb), 1.0) self.instance.p = 2 @@ -431,7 +450,7 @@ def test_ub_attr1(self): def test_ub_attr2(self): """Test ub attribute""" - self.model.x = Var(within=NonPositiveReals, bounds=(-2,1)) + self.model.x = Var(within=NonPositiveReals, bounds=(-2, 1)) self.instance = self.model.create_instance() self.assertEqual(value(self.instance.x.lb), -2.0) self.assertEqual(value(self.instance.x.ub), 0.0) @@ -447,8 +466,10 @@ def test_within_option(self): def test_bounds_option1(self): """Test bounds option""" + def x_bounds(model): - return (-1.0,1.0) + return (-1.0, 1.0) + self.model.x = Var(bounds=x_bounds) self.instance = self.model.create_instance() self.assertEqual(value(self.instance.x.lb), -1.0) @@ -456,23 +477,27 @@ def x_bounds(model): def test_bounds_option2(self): """Test bounds option""" - self.model.x = Var(bounds=(-1.0,1.0)) + self.model.x = Var(bounds=(-1.0, 1.0)) self.instance = self.model.create_instance() self.assertEqual(value(self.instance.x.lb), -1.0) self.assertEqual(value(self.instance.x.ub), 1.0) def test_rule_option(self): """Test rule option""" + def x_init(model): return 1.3 + self.model.x = Var(initialize=x_init) self.instance = self.model.create_instance() self.assertEqual(self.instance.x.value, 1.3) def test_initialize_with_function(self): """Test initialize option with an initialization rule""" + def init_rule(model): return 1.3 + self.model.x = Var(initialize=init_rule) self.instance = self.model.create_instance() self.assertEqual(self.instance.x.value, 1.3) @@ -481,7 +506,7 @@ def init_rule(model): def test_initialize_with_dict(self): """Test initialize option with a dictionary""" - self.model.x = Var(initialize={None:1.3}) + self.model.x = Var(initialize={None: 1.3}) self.instance = self.model.create_instance() self.assertEqual(self.instance.x.value, 1.3) self.instance.x = 1 @@ -499,53 +524,52 @@ def test_without_initial_value(self): """Test default initial value""" self.model.x = Var() self.instance = self.model.create_instance() - self.assertEqual(self.instance.x.value,None) + self.assertEqual(self.instance.x.value, None) self.instance.x = 6 - self.assertEqual(self.instance.x.value,6) + self.assertEqual(self.instance.x.value, 6) def test_dim(self): """Test dim method""" self.model.x = Var() self.instance = self.model.create_instance() - self.assertEqual(self.instance.x.dim(),0) + self.assertEqual(self.instance.x.dim(), 0) def test_keys(self): """Test keys method""" self.model.x = Var() self.instance = self.model.create_instance() - self.assertEqual(list(self.instance.x.keys()),[None]) - self.assertEqual(id(self.instance.x),id(self.instance.x[None])) + self.assertEqual(list(self.instance.x.keys()), [None]) + self.assertEqual(id(self.instance.x), id(self.instance.x[None])) def test_len(self): """Test len method""" self.model.x = Var() - self.assertEqual(len(self.model.x),0) + self.assertEqual(len(self.model.x), 0) self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.x),1) + self.assertEqual(len(self.instance.x), 1) def test_value(self): """Check the value of the variable""" self.model.x = Var(initialize=3.3) self.instance = self.model.create_instance() tmp = value(self.instance.x.value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) tmp = float(self.instance.x.value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) tmp = int(self.instance.x.value) - self.assertEqual( type(tmp), int) - self.assertEqual( tmp, 3 ) + self.assertEqual(type(tmp), int) + self.assertEqual(tmp, 3) class TestArrayVar(TestSimpleVar): - def setUp(self): # # Create Model # PyomoModel.setUp(self) - self.model.A = Set(initialize=[1,2]) + self.model.A = Set(initialize=[1, 2]) def test_fixed_attr(self): """Test fixed attribute""" @@ -554,7 +578,7 @@ def test_fixed_attr(self): self.instance = self.model.create_instance() self.instance.x.fixed = True self.assertEqual(self.instance.x[1].fixed, False) - self.instance.y[1].fixed=True + self.instance.y[1].fixed = True self.assertEqual(self.instance.y[1].fixed, True) def test_value_attr(self): @@ -570,26 +594,28 @@ def test_value_attr(self): self.instance.y[1] = 3.5 self.assertEqual(self.instance.y[1].value, 3.5) - #def test_lb_attr(self): - #"""Test lb attribute""" - #self.model.x = Var(self.model.A) - #self.instance = self.model.create_instance() - #self.instance.x.setlb(-1.0) - #self.assertEqual(value(self.instance.x[1].lb), -1.0) + # def test_lb_attr(self): + # """Test lb attribute""" + # self.model.x = Var(self.model.A) + # self.instance = self.model.create_instance() + # self.instance.x.setlb(-1.0) + # self.assertEqual(value(self.instance.x[1].lb), -1.0) - #def test_ub_attr(self): - #"""Test ub attribute""" - #self.model.x = Var(self.model.A) - #self.instance = self.model.create_instance() - #self.instance.x.setub(1.0) - #self.assertEqual(value(self.instance.x[1].ub), 1.0) + # def test_ub_attr(self): + # """Test ub attribute""" + # self.model.x = Var(self.model.A) + # self.instance = self.model.create_instance() + # self.instance.x.setub(1.0) + # self.assertEqual(value(self.instance.x[1].ub), 1.0) def test_initialize_with_function(self): """Test initialize option with an initialization rule""" + def init_rule(model, key): - i = key+11 + i = key + 11 return key == 1 and 1.3 or 2.3 - self.model.x = Var(self.model.A,initialize=init_rule) + + self.model.x = Var(self.model.A, initialize=init_rule) self.instance = self.model.create_instance() self.assertEqual(self.instance.x[1].value, 1.3) self.assertEqual(self.instance.x[2].value, 2.3) @@ -600,7 +626,7 @@ def init_rule(model, key): def test_initialize_with_dict(self): """Test initialize option with a dictionary""" - self.model.x = Var(self.model.A,initialize={1:1.3,2:2.3}) + self.model.x = Var(self.model.A, initialize={1: 1.3, 2: 2.3}) self.instance = self.model.create_instance() self.assertEqual(self.instance.x[1].value, 1.3) self.assertEqual(self.instance.x[2].value, 2.3) @@ -611,7 +637,7 @@ def test_initialize_with_dict(self): def test_initialize_with_subdict(self): """Test initialize option method with a dictionary of subkeys""" - self.model.x = Var(self.model.A,initialize={1:1.3}) + self.model.x = Var(self.model.A, initialize={1: 1.3}) self.instance = self.model.create_instance() self.assertEqual(self.instance.x[1].value, 1.3) self.assertEqual(self.instance.x[2].value, None) @@ -622,7 +648,7 @@ def test_initialize_with_subdict(self): def test_initialize_with_const(self): """Test initialize option with a constant""" - self.model.x = Var(self.model.A,initialize=3) + self.model.x = Var(self.model.A, initialize=3) self.instance = self.model.create_instance() self.assertEqual(self.instance.x[1].value, 3) self.assertEqual(self.instance.x[2].value, 3) @@ -635,17 +661,19 @@ def test_without_initial_value(self): """Test default initial value""" self.model.x = Var(self.model.A) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x[1].value,None) - self.assertEqual(self.instance.x[2].value,None) + self.assertEqual(self.instance.x[1].value, None) + self.assertEqual(self.instance.x[2].value, None) self.instance.x[1] = 5 self.instance.x[2] = 6 - self.assertEqual(self.instance.x[1].value,5) - self.assertEqual(self.instance.x[2].value,6) + self.assertEqual(self.instance.x[1].value, 5) + self.assertEqual(self.instance.x[2].value, 6) def test_bounds_option1(self): """Test bounds option""" + def x_bounds(model, i): - return (-1.0,1.0) + return (-1.0, 1.0) + self.model.x = Var(self.model.A, bounds=x_bounds) self.instance = self.model.create_instance() self.assertEqual(value(self.instance.x[1].lb), -1.0) @@ -653,15 +681,17 @@ def x_bounds(model, i): def test_bounds_option2(self): """Test bounds option""" - self.model.x = Var(self.model.A, bounds=(-1.0,1.0)) + self.model.x = Var(self.model.A, bounds=(-1.0, 1.0)) self.instance = self.model.create_instance() self.assertEqual(value(self.instance.x[1].lb), -1.0) self.assertEqual(value(self.instance.x[1].ub), 1.0) def test_rule_option(self): """Test rule option""" + def x_init(model, i): return 1.3 + self.model.x = Var(self.model.A, initialize=x_init) self.instance = self.model.create_instance() self.assertEqual(self.instance.x[1].value, 1.3) @@ -670,7 +700,7 @@ def test_dim(self): """Test dim method""" self.model.x = Var(self.model.A) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x.dim(),1) + self.assertEqual(self.instance.x.dim(), 1) def test_keys(self): """Test keys method""" @@ -678,9 +708,9 @@ def test_keys(self): self.model.y = Var(self.model.A, dense=True) self.model.z = Var(self.model.A) self.instance = self.model.create_instance() - self.assertEqual(set(self.instance.x.keys()),set()) - self.assertEqual(set(self.instance.y.keys()),set([1,2])) - self.assertEqual(set(self.instance.z.keys()),set([1,2])) + self.assertEqual(set(self.instance.x.keys()), set()) + self.assertEqual(set(self.instance.y.keys()), set([1, 2])) + self.assertEqual(set(self.instance.z.keys()), set([1, 2])) def test_len(self): """Test len method""" @@ -688,27 +718,26 @@ def test_len(self): self.model.y = Var(self.model.A, dense=True) self.model.z = Var(self.model.A) self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.x),0) - self.assertEqual(len(self.instance.y),2) - self.assertEqual(len(self.instance.z),2) + self.assertEqual(len(self.instance.x), 0) + self.assertEqual(len(self.instance.y), 2) + self.assertEqual(len(self.instance.z), 2) def test_value(self): """Check the value of the variable""" - self.model.x = Var(self.model.A,initialize=3.3) + self.model.x = Var(self.model.A, initialize=3.3) self.instance = self.model.create_instance() tmp = value(self.instance.x[1].value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) tmp = float(self.instance.x[1].value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) tmp = int(self.instance.x[1].value) - self.assertEqual( type(tmp), int) - self.assertEqual( tmp, 3 ) + self.assertEqual(type(tmp), int) + self.assertEqual(tmp, 3) class TestVarList(PyomoModel): - def setUp(self): # # Create Model @@ -728,7 +757,7 @@ def test_fixed_attr(self): self.instance.y.add() self.instance.x.fixed = True self.assertEqual(self.instance.x[1].fixed, False) - self.instance.y[1].fixed=True + self.instance.y[1].fixed = True self.assertEqual(self.instance.y[1].fixed, True) def test_value_attr(self): @@ -752,9 +781,11 @@ def test_value_attr(self): def test_initialize_with_function(self): """Test initialize option with an initialization rule""" + def init_rule(model, key): - i = key+11 + i = key + 11 return key == 1 and 1.3 or 2.3 + self.model.x = VarList(initialize=init_rule) self.instance = self.model.create_instance() self.instance.x.add() @@ -769,7 +800,7 @@ def init_rule(model, key): def test_initialize_with_dict(self): """Test initialize option with a dictionary""" - self.model.x = VarList(initialize={1:1.3,2:2.3}) + self.model.x = VarList(initialize={1: 1.3, 2: 2.3}) self.instance = self.model.create_instance() self.assertEqual(self.instance.x[1].value, 1.3) self.assertEqual(self.instance.x[2].value, 2.3) @@ -780,10 +811,12 @@ def test_initialize_with_dict(self): def test_initialize_with_bad_dict(self): """Test initialize option with a dictionary of subkeys""" - self.model.x = VarList(initialize={0:1.3}) + self.model.x = VarList(initialize={0: 1.3}) self.assertRaisesRegex( - KeyError, ".*Index '0' is not valid for indexed component 'x'", - self.model.create_instance ) + KeyError, + ".*Index '0' is not valid for indexed component 'x'", + self.model.create_instance, + ) def test_initialize_with_const(self): """Test initialize option with a constant""" @@ -806,17 +839,19 @@ def test_without_initial_value(self): self.instance.x.add() self.instance.x.add() self.instance.x.add() - self.assertEqual(self.instance.x[1].value,None) - self.assertEqual(self.instance.x[2].value,None) + self.assertEqual(self.instance.x[1].value, None) + self.assertEqual(self.instance.x[2].value, None) self.instance.x[1] = 5 self.instance.x[2] = 6 - self.assertEqual(self.instance.x[1].value,5) - self.assertEqual(self.instance.x[2].value,6) + self.assertEqual(self.instance.x[1].value, 5) + self.assertEqual(self.instance.x[2].value, 6) def test_bounds_option1(self): """Test bounds option""" + def x_bounds(model, i): - return (-1.0,1.0) + return (-1.0, 1.0) + self.model.x = VarList(bounds=x_bounds) self.instance = self.model.create_instance() self.instance.x.add() @@ -826,7 +861,7 @@ def x_bounds(model, i): def test_bounds_option2(self): """Test bounds option""" - self.model.x = VarList(bounds=(-1.0,1.0)) + self.model.x = VarList(bounds=(-1.0, 1.0)) self.instance = self.model.create_instance() self.instance.x.add() self.instance.x.add() @@ -835,8 +870,10 @@ def test_bounds_option2(self): def test_rule_option(self): """Test rule option""" + def x_init(model, i): return 1.3 + self.model.x = VarList(initialize=x_init) self.instance = self.model.create_instance() self.instance.x.add() @@ -861,6 +898,7 @@ def x_domain(model, i): return Reals elif i == 3: return Integers + self.model.x = VarList(domain=x_domain) self.instance = self.model.create_instance() self.instance.x.add() @@ -886,6 +924,7 @@ def x_domain(model): yield NonNegativeReals yield Reals yield Integers + self.model.x = VarList(domain=x_domain) self.instance = self.model.create_instance() self.instance.x.add() @@ -906,7 +945,7 @@ def test_dim(self): self.model.x = VarList() self.instance = self.model.create_instance() self.instance.x.add() - self.assertEqual(self.instance.x.dim(),1) + self.assertEqual(self.instance.x.dim(), 1) def test_keys(self): """Test keys method""" @@ -914,7 +953,7 @@ def test_keys(self): self.instance = self.model.create_instance() self.instance.x.add() self.instance.x.add() - self.assertEqual(set(self.instance.x.keys()),set([1,2])) + self.assertEqual(set(self.instance.x.keys()), set([1, 2])) def test_len(self): """Test len method""" @@ -922,7 +961,7 @@ def test_len(self): self.instance = self.model.create_instance() self.instance.x.add() self.instance.x.add() - self.assertEqual(len(self.instance.x),2) + self.assertEqual(len(self.instance.x), 2) def test_value(self): """Check the value of the variable""" @@ -931,14 +970,14 @@ def test_value(self): self.instance.x.add() self.instance.x.add() tmp = value(self.instance.x[1].value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) tmp = float(self.instance.x[1].value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) tmp = int(self.instance.x[1].value) - self.assertEqual( type(tmp), int) - self.assertEqual( tmp, 3 ) + self.assertEqual(type(tmp), int) + self.assertEqual(tmp, 3) def test_0based_add(self): m = ConcreteModel() @@ -952,15 +991,16 @@ def test_0based_add(self): def test_0based_initialize_with_dict(self): """Test initialize option with a dictionary""" - self.model.x = VarList(initialize={1:1.3,2:2.3}, starting_index=0) + self.model.x = VarList(initialize={1: 1.3, 2: 2.3}, starting_index=0) self.assertRaisesRegex( - KeyError, ".*Index '2' is not valid for indexed component 'x'", - self.model.create_instance + KeyError, + ".*Index '2' is not valid for indexed component 'x'", + self.model.create_instance, ) def test_0based_initialize_with_bad_dict(self): """Test initialize option with a dictionary of subkeys""" - self.model.x = VarList(initialize={0:1.3, 1:2.3}, starting_index=0) + self.model.x = VarList(initialize={0: 1.3, 1: 2.3}, starting_index=0) self.instance = self.model.create_instance() self.assertEqual(self.instance.x[0].value, 1.3) self.assertEqual(self.instance.x[1].value, 2.3) @@ -973,212 +1013,231 @@ def test_0based_initialize_with_bad_dict(self): class Test2DArrayVar(TestSimpleVar): - def setUp(self): # # Create Model # PyomoModel.setUp(self) - self.model.A = Set(initialize=[1,2]) + self.model.A = Set(initialize=[1, 2]) def test_fixed_attr(self): """Test fixed attribute""" - self.model.x = Var(self.model.A,self.model.A) - self.model.y = Var(self.model.A,self.model.A) + self.model.x = Var(self.model.A, self.model.A) + self.model.y = Var(self.model.A, self.model.A) self.instance = self.model.create_instance() self.instance.x.fixed = True - self.assertEqual(self.instance.x[1,2].fixed, False) - self.instance.y[1,2].fixed=True - self.assertEqual(self.instance.y[1,2].fixed, True) + self.assertEqual(self.instance.x[1, 2].fixed, False) + self.instance.y[1, 2].fixed = True + self.assertEqual(self.instance.y[1, 2].fixed, True) def test_value_attr(self): """Test value attribute""" - self.model.x = Var(self.model.A,self.model.A, dense=True) - self.model.y = Var(self.model.A,self.model.A, dense=True) + self.model.x = Var(self.model.A, self.model.A, dense=True) + self.model.y = Var(self.model.A, self.model.A, dense=True) self.instance = self.model.create_instance() try: self.instance.x = 3.5 self.fail("Expected ValueError") except ValueError: pass - self.instance.y[1,2] = 3.5 - self.assertEqual(self.instance.y[1,2].value, 3.5) - - #def test_lb_attr(self): - #"""Test lb attribute""" - #self.model.x = Var(self.model.A,self.model.A) - #self.instance = self.model.create_instance() - #self.instance.x.setlb(-1.0) - #self.assertEqual(value(self.instance.x[2,1].lb), -1.0) - - #def test_ub_attr(self): - #"""Test ub attribute""" - #self.model.x = Var(self.model.A,self.model.A) - #self.instance = self.model.create_instance() - #self.instance.x.setub(1.0) - #self.assertEqual(value(self.instance.x[2,1].ub), 1.0) + self.instance.y[1, 2] = 3.5 + self.assertEqual(self.instance.y[1, 2].value, 3.5) + + # def test_lb_attr(self): + # """Test lb attribute""" + # self.model.x = Var(self.model.A,self.model.A) + # self.instance = self.model.create_instance() + # self.instance.x.setlb(-1.0) + # self.assertEqual(value(self.instance.x[2,1].lb), -1.0) + + # def test_ub_attr(self): + # """Test ub attribute""" + # self.model.x = Var(self.model.A,self.model.A) + # self.instance = self.model.create_instance() + # self.instance.x.setub(1.0) + # self.assertEqual(value(self.instance.x[2,1].ub), 1.0) def test_initialize_with_function(self): """Test initialize option with an initialization rule""" + def init_rule(model, key1, key2): - i = key1+1 + i = key1 + 1 return key1 == 1 and 1.3 or 2.3 - self.model.x = Var(self.model.A,self.model.A,initialize=init_rule) + + self.model.x = Var(self.model.A, self.model.A, initialize=init_rule) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x[1,1].value, 1.3) - self.assertEqual(self.instance.x[2,2].value, 2.3) - self.instance.x[1,1] = 1 - self.instance.x[2,2] = 2 - self.assertEqual(self.instance.x[1,1].value, 1) - self.assertEqual(self.instance.x[2,2].value, 2) + self.assertEqual(self.instance.x[1, 1].value, 1.3) + self.assertEqual(self.instance.x[2, 2].value, 2.3) + self.instance.x[1, 1] = 1 + self.instance.x[2, 2] = 2 + self.assertEqual(self.instance.x[1, 1].value, 1) + self.assertEqual(self.instance.x[2, 2].value, 2) def test_initialize_with_dict(self): """Test initialize option with a dictionary""" - self.model.x = Var(self.model.A,self.model.A, - initialize={(1,1):1.3,(2,2):2.3}) + self.model.x = Var( + self.model.A, self.model.A, initialize={(1, 1): 1.3, (2, 2): 2.3} + ) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x[1,1].value, 1.3) - self.assertEqual(self.instance.x[2,2].value, 2.3) - self.instance.x[1,1] = 1 - self.instance.x[2,2] = 2 - self.assertEqual(self.instance.x[1,1].value, 1) - self.assertEqual(self.instance.x[2,2].value, 2) + self.assertEqual(self.instance.x[1, 1].value, 1.3) + self.assertEqual(self.instance.x[2, 2].value, 2.3) + self.instance.x[1, 1] = 1 + self.instance.x[2, 2] = 2 + self.assertEqual(self.instance.x[1, 1].value, 1) + self.assertEqual(self.instance.x[2, 2].value, 2) def test_initialize_with_const(self): """Test initialize option with a constant""" - self.model.x = Var(self.model.A,self.model.A,initialize=3) + self.model.x = Var(self.model.A, self.model.A, initialize=3) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x[1,1].value, 3) - self.assertEqual(self.instance.x[2,2].value, 3) - self.instance.x[1,1] = 1 - self.instance.x[2,2] = 2 - self.assertEqual(self.instance.x[1,1].value, 1) - self.assertEqual(self.instance.x[2,2].value, 2) + self.assertEqual(self.instance.x[1, 1].value, 3) + self.assertEqual(self.instance.x[2, 2].value, 3) + self.instance.x[1, 1] = 1 + self.instance.x[2, 2] = 2 + self.assertEqual(self.instance.x[1, 1].value, 1) + self.assertEqual(self.instance.x[2, 2].value, 2) def test_without_initial_value(self): """Test default initialization""" - self.model.x = Var(self.model.A,self.model.A) + self.model.x = Var(self.model.A, self.model.A) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x[1,1].value,None) - self.assertEqual(self.instance.x[2,2].value,None) - self.instance.x[1,1] = 5 - self.instance.x[2,2] = 6 - self.assertEqual(self.instance.x[1,1].value,5) - self.assertEqual(self.instance.x[2,2].value,6) + self.assertEqual(self.instance.x[1, 1].value, None) + self.assertEqual(self.instance.x[2, 2].value, None) + self.instance.x[1, 1] = 5 + self.instance.x[2, 2] = 6 + self.assertEqual(self.instance.x[1, 1].value, 5) + self.assertEqual(self.instance.x[2, 2].value, 6) def test_initialize_option(self): """Test initialize option""" - self.model.x = Var(self.model.A,self.model.A,initialize={(1,1):1.3,(2,2):2.3}) + self.model.x = Var( + self.model.A, self.model.A, initialize={(1, 1): 1.3, (2, 2): 2.3} + ) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x[1,1].value, 1.3) - self.assertEqual(self.instance.x[2,2].value, 2.3) + self.assertEqual(self.instance.x[1, 1].value, 1.3) + self.assertEqual(self.instance.x[2, 2].value, 2.3) try: - value(self.instance.x[1,2]) + value(self.instance.x[1, 2]) self.fail("Expected ValueError") except ValueError: pass def test_bounds_option1(self): """Test bounds option""" + def x_bounds(model, i, j): - return (-1.0*(i+j),1.0*(i+j)) + return (-1.0 * (i + j), 1.0 * (i + j)) + self.model.x = Var(self.model.A, self.model.A, bounds=x_bounds) self.instance = self.model.create_instance() - self.assertEqual(value(self.instance.x[1,1].lb), -2.0) - self.assertEqual(value(self.instance.x[1,2].ub), 3.0) + self.assertEqual(value(self.instance.x[1, 1].lb), -2.0) + self.assertEqual(value(self.instance.x[1, 2].ub), 3.0) def test_bounds_option2(self): """Test bounds option""" - self.model.x = Var(self.model.A, self.model.A, bounds=(-1.0,1.0)) + self.model.x = Var(self.model.A, self.model.A, bounds=(-1.0, 1.0)) self.instance = self.model.create_instance() - self.assertEqual(value(self.instance.x[1,1].lb), -1.0) - self.assertEqual(value(self.instance.x[1,1].ub), 1.0) + self.assertEqual(value(self.instance.x[1, 1].lb), -1.0) + self.assertEqual(value(self.instance.x[1, 1].ub), 1.0) def test_rule_option(self): """Test rule option""" + def x_init(model, i, j): return 1.3 + self.model.x = Var(self.model.A, self.model.A, initialize=x_init) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x[1,2].value, 1.3) + self.assertEqual(self.instance.x[1, 2].value, 1.3) def test_dim(self): """Test dim method""" - self.model.x = Var(self.model.A,self.model.A) + self.model.x = Var(self.model.A, self.model.A) self.instance = self.model.create_instance() - self.assertEqual(self.instance.x.dim(),2) + self.assertEqual(self.instance.x.dim(), 2) def test_keys(self): """Test keys method""" - self.model.x = Var(self.model.A,self.model.A, dense=True) + self.model.x = Var(self.model.A, self.model.A, dense=True) self.instance = self.model.create_instance() - ans = [(1,1),(1,2),(2,1),(2,2)] - self.assertEqual(list(sorted(self.instance.x.keys())),ans) + ans = [(1, 1), (1, 2), (2, 1), (2, 2)] + self.assertEqual(list(sorted(self.instance.x.keys())), ans) def test_len(self): """Test len method""" - self.model.x = Var(self.model.A,self.model.A, dense=True) + self.model.x = Var(self.model.A, self.model.A, dense=True) self.instance = self.model.create_instance() - self.assertEqual(len(self.instance.x),4) + self.assertEqual(len(self.instance.x), 4) def test_value(self): """Check the value of the variable""" - self.model.x = Var(self.model.A,self.model.A,initialize=3.3) + self.model.x = Var(self.model.A, self.model.A, initialize=3.3) self.instance = self.model.create_instance() - tmp = value(self.instance.x[1,1].value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) - tmp = float(self.instance.x[1,1].value) - self.assertEqual( type(tmp), float) - self.assertEqual( tmp, 3.3 ) - tmp = int(self.instance.x[1,1].value) - self.assertEqual( type(tmp), int) - self.assertEqual( tmp, 3 ) + tmp = value(self.instance.x[1, 1].value) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) + tmp = float(self.instance.x[1, 1].value) + self.assertEqual(type(tmp), float) + self.assertEqual(tmp, 3.3) + tmp = int(self.instance.x[1, 1].value) + self.assertEqual(type(tmp), int) + self.assertEqual(tmp, 3) class TestVarComplexArray(PyomoModel): - def test_index1(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + def B_index(model): for i in model.A: - if i%2 == 0: + if i % 2 == 0: yield i + def B_init(model, i, j): if j: - return 2+i - return -(2+i) - self.model.B = Var(B_index, [True,False], initialize=B_init, dense=True) + return 2 + i + return -(2 + i) + + self.model.B = Var(B_index, [True, False], initialize=B_init, dense=True) self.instance = self.model.create_instance() - self.assertEqual(set(self.instance.B.keys()),set([(0,True),(2,True),(0,False),(2,False)])) - self.assertEqual(self.instance.B[0,True].value,2) - self.assertEqual(self.instance.B[0,False].value,-2) - self.assertEqual(self.instance.B[2,True].value,4) - self.assertEqual(self.instance.B[2,False].value,-4) + self.assertEqual( + set(self.instance.B.keys()), + set([(0, True), (2, True), (0, False), (2, False)]), + ) + self.assertEqual(self.instance.B[0, True].value, 2) + self.assertEqual(self.instance.B[0, False].value, -2) + self.assertEqual(self.instance.B[2, True].value, 4) + self.assertEqual(self.instance.B[2, False].value, -4) def test_index2(self): - self.model.A = Set(initialize=range(0,4)) + self.model.A = Set(initialize=range(0, 4)) + def B_index(model): for i in model.A: - if i%2 == 0: - yield i-1, i - B_index.dimen=2 + if i % 2 == 0: + yield i - 1, i + + B_index.dimen = 2 + def B_init(model, k, i, j): if j: - return (2+i)*k - return -(2+i)*k - self.model.B = Var(B_index, [True,False], initialize=B_init, dense=True) + return (2 + i) * k + return -(2 + i) * k + + self.model.B = Var(B_index, [True, False], initialize=B_init, dense=True) self.instance = self.model.create_instance() - #self.instance.pprint() - self.assertEqual(set(self.instance.B.keys()),set([(-1,0,True),(1,2,True),(-1,0,False),(1,2,False)])) - self.assertEqual(self.instance.B[-1,0,True].value,-2) - self.assertEqual(self.instance.B[-1,0,False].value,2) - self.assertEqual(self.instance.B[1,2,True].value,4) - self.assertEqual(self.instance.B[1,2,False].value,-4) + # self.instance.pprint() + self.assertEqual( + set(self.instance.B.keys()), + set([(-1, 0, True), (1, 2, True), (-1, 0, False), (1, 2, False)]), + ) + self.assertEqual(self.instance.B[-1, 0, True].value, -2) + self.assertEqual(self.instance.B[-1, 0, False].value, 2) + self.assertEqual(self.instance.B[1, 2, True].value, 4) + self.assertEqual(self.instance.B[1, 2, False].value, -4) class MiscVarTests(unittest.TestCase): - def test_error1(self): a = Var(name="a") try: @@ -1195,55 +1254,55 @@ def test_getattr1(self): model.a = Var() model.suffix = Suffix(datatype=Suffix.INT) instance = model.create_instance() - self.assertEqual(instance.suffix.get(instance.a),None) - instance.suffix.set_value(instance.a,True) - self.assertEqual(instance.suffix.get(instance.a),True) + self.assertEqual(instance.suffix.get(instance.a), None) + instance.suffix.set_value(instance.a, True) + self.assertEqual(instance.suffix.get(instance.a), True) def test_getattr2(self): """ Verify the behavior of non-standard suffixes with an array of variables """ model = AbstractModel() - model.X = Set(initialize=[1,3,5]) + model.X = Set(initialize=[1, 3, 5]) model.a = Var(model.X) model.suffix = Suffix(datatype=Suffix.INT) try: - self.assertEqual(model.a.suffix,None) + self.assertEqual(model.a.suffix, None) self.fail("Expected AttributeError") except AttributeError: pass instance = model.create_instance() - self.assertEqual(instance.suffix.get(instance.a[1]),None) + self.assertEqual(instance.suffix.get(instance.a[1]), None) instance.suffix.set_value(instance.a[1], True) - self.assertEqual(instance.suffix.get(instance.a[1]),True) + self.assertEqual(instance.suffix.get(instance.a[1]), True) def test_error2(self): try: - model=AbstractModel() - model.a = Var(initialize=[1,2,3]) + model = AbstractModel() + model.a = Var(initialize=[1, 2, 3]) model.b = Var(model.a) self.fail("test_error2") except TypeError: pass def test_contains(self): - model=AbstractModel() - model.a = Set(initialize=[1,2,3]) + model = AbstractModel() + model.a = Set(initialize=[1, 2, 3]) model.b = Var(model.a, dense=True) instance = model.create_instance() - self.assertEqual(1 in instance.b,True) + self.assertEqual(1 in instance.b, True) def test_float_int(self): - model=AbstractModel() - model.a = Set(initialize=[1,2,3]) - model.b = Var(model.a,initialize=1.1) + model = AbstractModel() + model.a = Set(initialize=[1, 2, 3]) + model.b = Var(model.a, initialize=1.1) model.c = Var(initialize=2.1) model.d = Var() instance = model.create_instance() - self.assertEqual(float(value(instance.b[1])),1.1) - self.assertEqual(int(value(instance.b[1])),1) - self.assertEqual(float(value(instance.c)),2.1) - self.assertEqual(int(value(instance.c)),2) + self.assertEqual(float(value(instance.b[1])), 1.1) + self.assertEqual(int(value(instance.b[1])), 1) + self.assertEqual(float(value(instance.c)), 2.1) + self.assertEqual(int(value(instance.c)), 2) try: float(instance.d) self.fail("expected TypeError") @@ -1266,25 +1325,28 @@ def test_float_int(self): pass def test_set_get(self): - model=AbstractModel() - model.a = Set(initialize=[1,2,3]) + model = AbstractModel() + model.a = Set(initialize=[1, 2, 3]) model.b = Var(model.a, initialize=1.1, within=PositiveReals) model.c = Var(initialize=2.1, within=PositiveReals, bounds=(1, 10)) with self.assertRaisesRegex( - ValueError, "Cannot set the value for the indexed " - "component 'b' without specifying an index value"): + ValueError, + "Cannot set the value for the indexed " + "component 'b' without specifying an index value", + ): model.b = 2.2 instance = model.create_instance() with self.assertRaisesRegex( - KeyError, "Cannot treat the scalar component 'c' " - "as an indexed component"): - instance.c[1]=2.2 + KeyError, "Cannot treat the scalar component 'c' as an indexed component" + ): + instance.c[1] = 2.2 - instance.b[1]=2.2 + instance.b[1] = 2.2 with self.assertRaisesRegex( - KeyError, "Index '4' is not valid for indexed component 'b'"): - instance.b[4]=2.2 + KeyError, "Index '4' is not valid for indexed component 'b'" + ): + instance.b[4] = 2.2 with LoggingIntercept() as LOG: instance.b[3] = -2.2 @@ -1295,29 +1357,26 @@ def test_set_get(self): ) with self.assertRaisesRegex( - KeyError, "Cannot treat the scalar component 'c' " - "as an indexed component"): + KeyError, "Cannot treat the scalar component 'c' as an indexed component" + ): tmp = instance.c[3] with LoggingIntercept() as LOG: instance.c = 'a' self.assertEqual( LOG.getvalue().strip(), - "Setting Var 'c' to a value `a` (str) " - "not in domain PositiveReals.", + "Setting Var 'c' to a value `a` (str) not in domain PositiveReals.", ) with LoggingIntercept() as LOG: instance.c = -2.2 self.assertEqual( LOG.getvalue().strip(), - "Setting Var 'c' to a value `-2.2` (float) " - "not in domain PositiveReals.", + "Setting Var 'c' to a value `-2.2` (float) not in domain PositiveReals.", ) with LoggingIntercept() as LOG: instance.c = 11 self.assertEqual( LOG.getvalue().strip(), - "Setting Var 'c' to a numeric value `11` " - "outside the bounds (1, 10).", + "Setting Var 'c' to a numeric value `11` outside the bounds (1, 10).", ) with LoggingIntercept() as LOG: @@ -1329,40 +1388,39 @@ def test_set_get(self): self.assertEqual(LOG.getvalue(), "") self.assertEqual(instance.c.value, -1) - #try: - #instance.c.ub = 'a' - #self.fail("can't set a bad ub for variable c") - #except ValueError: - #pass - #try: - #instance.c.ub = -1.0 - #self.fail("can't set a bad ub for variable c") - #except ValueError: - #pass - - #try: - #instance.c.fixed = 'a' - #self.fail("can't fix a variable with a non-boolean") - #except ValueError: - #pass + # try: + # instance.c.ub = 'a' + # self.fail("can't set a bad ub for variable c") + # except ValueError: + # pass + # try: + # instance.c.ub = -1.0 + # self.fail("can't set a bad ub for variable c") + # except ValueError: + # pass + + # try: + # instance.c.fixed = 'a' + # self.fail("can't fix a variable with a non-boolean") + # except ValueError: + # pass def test_set_index(self): - model = ConcreteModel() - model.s = Set(initialize=[1,2,3]) - model.x = Var(model.s,initialize=0, dense=True) + model.s = Set(initialize=[1, 2, 3]) + model.x = Var(model.s, initialize=0, dense=True) # test proper instantiation - self.assertEqual(len(model.x),3) + self.assertEqual(len(model.x), 3) for i in model.s: - self.assertEqual(value(model.x[i]),0) + self.assertEqual(value(model.x[i]), 0) # test mutability of index set model.s.add(4) - self.assertEqual(len(model.x),3) + self.assertEqual(len(model.x), 3) for i in model.s: - self.assertEqual(value(model.x[i]),0) - self.assertEqual(len(model.x),4) + self.assertEqual(value(model.x[i]), 0) + self.assertEqual(len(model.x), 4) def test_simple_default_domain(self): model = ConcreteModel() @@ -1450,62 +1508,60 @@ def test_list_bad_nondefault_domain_rule(self): model.x.add() def test_setdata_index(self): - model = ConcreteModel() model.sindex = Set(initialize=[1]) - model.s = Set(model.sindex,initialize=[1,2,3]) - model.x = Var(model.s[1],initialize=0, dense=True) + model.s = Set(model.sindex, initialize=[1, 2, 3]) + model.x = Var(model.s[1], initialize=0, dense=True) # test proper instantiation - self.assertEqual(len(model.x),3) + self.assertEqual(len(model.x), 3) for i in model.s[1]: - self.assertEqual(value(model.x[i]),0) + self.assertEqual(value(model.x[i]), 0) # test mutability of index set newIdx = 4 - self.assertFalse( newIdx in model.s[1] ) - self.assertFalse( newIdx in model.x ) + self.assertFalse(newIdx in model.s[1]) + self.assertFalse(newIdx in model.x) model.s[1].add(newIdx) - self.assertTrue( newIdx in model.s[1] ) - self.assertFalse( newIdx in model.x ) + self.assertTrue(newIdx in model.s[1]) + self.assertFalse(newIdx in model.x) - self.assertEqual(len(model.x),3) + self.assertEqual(len(model.x), 3) for i in model.s[1]: - self.assertEqual(value(model.x[i]),0) - self.assertEqual(len(model.x),4) + self.assertEqual(value(model.x[i]), 0) + self.assertEqual(len(model.x), 4) - self.assertTrue( newIdx in model.s[1] ) - self.assertTrue( newIdx in model.x ) + self.assertTrue(newIdx in model.s[1]) + self.assertTrue(newIdx in model.x) def test_setdata_multidimen_index(self): - model = ConcreteModel() model.sindex = Set(initialize=[1]) - model.s = Set(model.sindex,dimen=2,initialize=[(1,1),(1,2),(1,3)]) - model.x = Var(model.s[1],initialize=0, dense=True) + model.s = Set(model.sindex, dimen=2, initialize=[(1, 1), (1, 2), (1, 3)]) + model.x = Var(model.s[1], initialize=0, dense=True) # test proper instantiation - self.assertEqual(len(model.x),3) + self.assertEqual(len(model.x), 3) for i in model.s[1]: - self.assertEqual(value(model.x[i]),0) + self.assertEqual(value(model.x[i]), 0) # test mutability of index set - newIdx = (1,4) - self.assertFalse( newIdx in model.s[1] ) - self.assertFalse( newIdx in model.x ) + newIdx = (1, 4) + self.assertFalse(newIdx in model.s[1]) + self.assertFalse(newIdx in model.x) model.s[1].add(newIdx) - self.assertTrue( newIdx in model.s[1] ) - self.assertFalse( newIdx in model.x ) + self.assertTrue(newIdx in model.s[1]) + self.assertFalse(newIdx in model.x) - self.assertEqual(len(model.x),3) + self.assertEqual(len(model.x), 3) for i in model.s[1]: - self.assertEqual(value(model.x[i]),0) - self.assertEqual(len(model.x),4) + self.assertEqual(value(model.x[i]), 0) + self.assertEqual(len(model.x), 4) - self.assertTrue( newIdx in model.s[1] ) - self.assertTrue( newIdx in model.x ) + self.assertTrue(newIdx in model.s[1]) + self.assertTrue(newIdx in model.x) def test_abstract_index(self): model = AbstractModel() @@ -1520,25 +1576,28 @@ def test_set_value_units(self): m.x = Var(units=units.g) m.x = 5 self.assertEqual(value(m.x), 5) - m.x = 6*units.g + m.x = 6 * units.g self.assertEqual(value(m.x), 6) m.x = None self.assertIsNone(m.x.value, None) - m.x = 7*units.kg + m.x = 7 * units.kg self.assertEqual(value(m.x), 7000) with self.assertRaises(UnitsError): - m.x = 1*units.s + m.x = 1 * units.s out = StringIO() m.pprint(ostream=out) - self.assertEqual(out.getvalue().strip(), """ + self.assertEqual( + out.getvalue().strip(), + """ 1 Var Declarations x : Size=1, Index=None, Units=g Key : Lower : Value : Upper : Fixed : Stale : Domain None : None : 7000.0 : None : False : False : Reals 1 Declarations: x - """.strip()) + """.strip(), + ) @unittest.skipUnless(pint_available, "units test requires pint module") def test_set_bounds_units(self): @@ -1547,12 +1606,12 @@ def test_set_bounds_units(self): m.p = Param(mutable=True, initialize=1, units=units.kg) m.x.setlb(5) self.assertEqual(m.x.lb, 5) - m.x.setlb(6*units.g) + m.x.setlb(6 * units.g) self.assertEqual(m.x.lb, 6) - m.x.setlb(7*units.kg) + m.x.setlb(7 * units.kg) self.assertEqual(m.x.lb, 7000) with self.assertRaises(UnitsError): - m.x.setlb(1*units.s) + m.x.setlb(1 * units.s) m.x.setlb(m.p) self.assertEqual(m.x.lb, 1000) m.p = 2 * units.kg @@ -1560,12 +1619,12 @@ def test_set_bounds_units(self): m.x.setub(2) self.assertEqual(m.x.ub, 2) - m.x.setub(3*units.g) + m.x.setub(3 * units.g) self.assertEqual(m.x.ub, 3) - m.x.setub(4*units.kg) + m.x.setub(4 * units.kg) self.assertEqual(m.x.ub, 4000) with self.assertRaises(UnitsError): - m.x.setub(1*units.s) + m.x.setub(1 * units.s) m.x.setub(m.p) self.assertEqual(m.x.ub, 2000) m.p = 3 * units.kg @@ -1598,5 +1657,66 @@ def test_stale(self): self.assertFalse(m.x.stale) self.assertFalse(m.y.stale) + def test_stale_clone(self): + m = ConcreteModel() + m.x = Var(initialize=0) + self.assertFalse(m.x.stale) + m.y = Var() + self.assertTrue(m.y.stale) + m.z = Var(initialize=0) + self.assertFalse(m.z.stale) + + i = m.clone() + self.assertFalse(i.x.stale) + self.assertTrue(i.y.stale) + self.assertFalse(i.z.stale) + + StaleFlagManager.mark_all_as_stale(delayed=True) + m.z = 5 + i = m.clone() + self.assertTrue(i.x.stale) + self.assertTrue(i.y.stale) + self.assertFalse(i.z.stale) + + def test_domain_categories(self): + """Test domain attribute""" + x = Var() + x.construct() + self.assertEqual(x.is_integer(), False) + self.assertEqual(x.is_binary(), False) + self.assertEqual(x.is_continuous(), True) + self.assertEqual(x.bounds, (None, None)) + x.domain = Integers + self.assertEqual(x.is_integer(), True) + self.assertEqual(x.is_binary(), False) + self.assertEqual(x.is_continuous(), False) + self.assertEqual(x.bounds, (None, None)) + x.domain = Binary + self.assertEqual(x.is_integer(), True) + self.assertEqual(x.is_binary(), True) + self.assertEqual(x.is_continuous(), False) + self.assertEqual(x.bounds, (0, 1)) + x.domain = RangeSet(0, 10, 0) + self.assertEqual(x.is_integer(), False) + self.assertEqual(x.is_binary(), False) + self.assertEqual(x.is_continuous(), True) + self.assertEqual(x.bounds, (0, 10)) + x.domain = RangeSet(0, 10, 1) + self.assertEqual(x.is_integer(), True) + self.assertEqual(x.is_binary(), False) + self.assertEqual(x.is_continuous(), False) + self.assertEqual(x.bounds, (0, 10)) + x.domain = RangeSet(0.5, 10, 1) + self.assertEqual(x.is_integer(), False) + self.assertEqual(x.is_binary(), False) + self.assertEqual(x.is_continuous(), False) + self.assertEqual(x.bounds, (0.5, 9.5)) + x.domain = RangeSet(0, 1, 1) + self.assertEqual(x.is_integer(), True) + self.assertEqual(x.is_binary(), True) + self.assertEqual(x.is_continuous(), False) + self.assertEqual(x.bounds, (0, 1)) + + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_var_set_bounds.py b/pyomo/core/tests/unit/test_var_set_bounds.py index d88b30a398a..eb969c2ca73 100644 --- a/pyomo/core/tests/unit/test_var_set_bounds.py +++ b/pyomo/core/tests/unit/test_var_set_bounds.py @@ -14,12 +14,22 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.opt import check_available_solvers -from pyomo.environ import ConcreteModel, RangeSet, Var, Set, Objective, Constraint, SolverFactory, AbstractModel +from pyomo.environ import ( + ConcreteModel, + RangeSet, + Var, + Set, + Objective, + Constraint, + SolverFactory, + AbstractModel, +) solvers = check_available_solvers('glpk') @@ -27,130 +37,134 @@ # to validate its domain at the time of construction. It only needs to # ensure that whatever object is passed as its domain is suitable for # interacting with the _VarData interface (e.g., has a bounds method) -# The plan is to start adding functionality to the solver interfaces +# The plan is to start adding functionality to the solver interfaces # that will support custom domains. -class TestVarSetBounds(unittest.TestCase): +class TestVarSetBounds(unittest.TestCase): @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def Xtest_rangeset_domain(self): self.model = ConcreteModel() - self.model.s = RangeSet(3) #Set(initialize=[1,2,3]) - self.model.y = Var([1,2], within=self.model.s) - - self.model.obj = Objective(expr=self.model.y[1]-self.model.y[2]) + self.model.s = RangeSet(3) # Set(initialize=[1,2,3]) + self.model.y = Var([1, 2], within=self.model.s) + + self.model.obj = Objective(expr=self.model.y[1] - self.model.y[2]) self.model.con1 = Constraint(expr=self.model.y[1] >= 1.1) self.model.con2 = Constraint(expr=self.model.y[2] <= 2.9) - + self.instance = self.model.create_instance() self.opt = SolverFactory("glpk") self.results = self.opt.solve(self.instance) self.instance.load(self.results) - self.assertEqual(self.instance.y[1],2) - self.assertEqual(self.instance.y[2],2) - + self.assertEqual(self.instance.y[1], 2) + self.assertEqual(self.instance.y[2], 2) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def Xtest_pyomo_Set_domain(self): self.model = ConcreteModel() - self.model.s = Set(initialize=[1,2,3]) - self.model.y = Var([1,2], within=self.model.s) - - self.model.obj = Objective(expr=self.model.y[1]-self.model.y[2]) + self.model.s = Set(initialize=[1, 2, 3]) + self.model.y = Var([1, 2], within=self.model.s) + + self.model.obj = Objective(expr=self.model.y[1] - self.model.y[2]) self.model.con1 = Constraint(expr=self.model.y[1] >= 1.1) self.model.con2 = Constraint(expr=self.model.y[2] <= 2.9) - + self.instance = self.model.create_instance() self.opt = SolverFactory("glpk") self.results = self.opt.solve(self.instance) self.instance.load(self.results) - self.assertEqual(self.instance.y[1],2) - self.assertEqual(self.instance.y[2],2) - + self.assertEqual(self.instance.y[1], 2) + self.assertEqual(self.instance.y[2], 2) - #Bad pyomo Set for variable domain -- empty pyomo Set + # Bad pyomo Set for variable domain -- empty pyomo Set def Xtest_pyomo_Set_domain_empty(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() self.model.s = Set(initialize=[]) - self.model.y = Var([1,2], within=self.model.s) + self.model.y = Var([1, 2], within=self.model.s) - - #Bad pyomo Set for variable domain -- missing elements + # Bad pyomo Set for variable domain -- missing elements def Xtest_pyomo_Set_domain_missing(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.s = Set(initialize=[1,4,5]) - self.model.y = Var([1,2], within=self.model.s) - + self.model.s = Set(initialize=[1, 4, 5]) + self.model.y = Var([1, 2], within=self.model.s) - #Bad pyomo Set for variable domain -- noninteger elements + # Bad pyomo Set for variable domain -- noninteger elements def Xtest_pyomo_Set_domain_duplicates(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.s = Set(initialize=[1.7,2,3]) - self.model.y = Var([1,2], within=self.model.s) - + self.model.s = Set(initialize=[1.7, 2, 3]) + self.model.y = Var([1, 2], within=self.model.s) @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def Xtest_pyomo_Set_dat_file_domain(self): self.model = AbstractModel() self.model.s = Set() - self.model.y = Var([1,2], within=self.model.s) - + self.model.y = Var([1, 2], within=self.model.s) + def obj_rule(model): - return sum(model.y[i]*(-1)**(i-1) for i in model.y) - self.model.obj = Objective(rule=obj_rule) #sum(self.model.y[i]*(-1)**(i-1) for i in self.model.y)) - self.model.con = Constraint([1,2],rule=lambda model, i : model.y[i]*(-1)**(i-1) >= (1.1)**(2-i) * (-2.9)**(i-1)) - - self.instance = self.model.create_instance(currdir+"vars_dat_file.dat") + return sum(model.y[i] * (-1) ** (i - 1) for i in model.y) + + self.model.obj = Objective( + rule=obj_rule + ) # sum(self.model.y[i]*(-1)**(i-1) for i in self.model.y)) + self.model.con = Constraint( + [1, 2], + rule=lambda model, i: model.y[i] * (-1) ** (i - 1) + >= (1.1) ** (2 - i) * (-2.9) ** (i - 1), + ) + + self.instance = self.model.create_instance(currdir + "vars_dat_file.dat") self.opt = SolverFactory("glpk") self.results = self.opt.solve(self.instance) self.instance.load(self.results) - self.assertEqual(self.instance.y[1],2) - self.assertEqual(self.instance.y[2],2) - + self.assertEqual(self.instance.y[1], 2) + self.assertEqual(self.instance.y[2], 2) - #Bad pyomo Set for variable domain -- empty pyomo Set + # Bad pyomo Set for variable domain -- empty pyomo Set def Xtest_pyomo_Set_dat_file_domain_empty(self): with self.assertRaises(ValueError) as cm: self.model = AbstractModel() self.model.s = Set() - self.model.y = Var([1,2], within=self.model.s) - self.instance = self.model.create_instance(currdir+"vars_dat_file_empty.dat") - + self.model.y = Var([1, 2], within=self.model.s) + self.instance = self.model.create_instance( + currdir + "vars_dat_file_empty.dat" + ) - #Bad pyomo Set for variable domain -- missing elements + # Bad pyomo Set for variable domain -- missing elements def Xtest_pyomo_Set_dat_file_domain_missing(self): with self.assertRaises(ValueError) as cm: self.model = AbstractModel() self.model.s = Set() - self.model.y = Var([1,2], within=self.model.s) - self.instance = self.model.create_instance(currdir+"vars_dat_file_missing.dat") + self.model.y = Var([1, 2], within=self.model.s) + self.instance = self.model.create_instance( + currdir + "vars_dat_file_missing.dat" + ) - - #Bad pyomo Set for variable domain -- noninteger elements + # Bad pyomo Set for variable domain -- noninteger elements def Xtest_pyomo_Set_dat_file_domain_duplicates(self): with self.assertRaises(ValueError) as cm: self.model = AbstractModel() self.model.s = Set() - self.model.y = Var([1,2], within=self.model.s) - self.instance = self.model.create_instance(currdir+"vars_dat_file_nonint.dat") - + self.model.y = Var([1, 2], within=self.model.s) + self.instance = self.model.create_instance( + currdir + "vars_dat_file_nonint.dat" + ) - #Test within=list -- this works for range() since range() returns a list + # Test within=list -- this works for range() since range() returns a list @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def Xtest_list_domain(self): self.model = ConcreteModel() - self.model.y = Var([1,2], within=[1,2,3]) - - self.model.obj = Objective(expr=self.model.y[1]-self.model.y[2]) + self.model.y = Var([1, 2], within=[1, 2, 3]) + + self.model.obj = Objective(expr=self.model.y[1] - self.model.y[2]) self.model.con1 = Constraint(expr=self.model.y[1] >= 1.1) self.model.con2 = Constraint(expr=self.model.y[2] <= 2.9) - + self.instance = self.model.create_instance() # !! THIS SEEMS LIKE A BUG!! - mrmundt # I think it's supposed to be SolverFactory @@ -158,104 +172,93 @@ def Xtest_list_domain(self): self.results = self.opt.solve(self.instance) self.instance.load(self.results) - self.assertEqual(self.instance.y[1],2) - self.assertEqual(self.instance.y[2],2) + self.assertEqual(self.instance.y[1], 2) + self.assertEqual(self.instance.y[2], 2) - - #Bad list for variable domain -- empty list + # Bad list for variable domain -- empty list def Xtest_list_domain_empty(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([1,2], within=[]) - + self.model.y = Var([1, 2], within=[]) - #Bad list for variable domain -- missing elements + # Bad list for variable domain -- missing elements def Xtest_list_domain_bad_missing(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([1,2], within=[1,4,5]) + self.model.y = Var([1, 2], within=[1, 4, 5]) - - #Bad list for variable domain -- duplicate elements + # Bad list for variable domain -- duplicate elements def Xtest_list_domain_bad_duplicates(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([1,2], within=[1,1,2,3]) - + self.model.y = Var([1, 2], within=[1, 1, 2, 3]) - #Bad list for variable domain -- noninteger elements + # Bad list for variable domain -- noninteger elements def Xtest_list_domain_bad_duplicates(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([1,2], within=[1.7,2,3]) - + self.model.y = Var([1, 2], within=[1.7, 2, 3]) - #Test within=set() -- python native set, not pyomo Set object + # Test within=set() -- python native set, not pyomo Set object @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def Xtest_set_domain(self): self.model = ConcreteModel() - self.model.y = Var([1,2], within=set([1,2,3])) - - self.model.obj = Objective(expr=self.model.y[1]-self.model.y[2]) + self.model.y = Var([1, 2], within=set([1, 2, 3])) + + self.model.obj = Objective(expr=self.model.y[1] - self.model.y[2]) self.model.con1 = Constraint(expr=self.model.y[1] >= 1.1) self.model.con2 = Constraint(expr=self.model.y[2] <= 2.9) - + self.instance = self.model.create_instance() self.opt = solver["glpk"] self.results = self.opt.solve(self.instance) self.instance.load(self.results) - self.assertEqual(self.instance.y[1],2) - self.assertEqual(self.instance.y[2],2) - + self.assertEqual(self.instance.y[1], 2) + self.assertEqual(self.instance.y[2], 2) - #Bad set for variable domain -- empty set + # Bad set for variable domain -- empty set def Xtest_set_domain_empty(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([2,2], within=set([])) + self.model.y = Var([2, 2], within=set([])) - - #Bad set for variable domain -- missing elements + # Bad set for variable domain -- missing elements def Xtest_set_domain_bad_missing(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([1,2], within=set([1,4,5])) - + self.model.y = Var([1, 2], within=set([1, 4, 5])) - #Bad set for variable domain -- duplicate elements + # Bad set for variable domain -- duplicate elements def Xtest_set_domain_bad_duplicates(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([1,2], within=set([1,1,2,3])) - + self.model.y = Var([1, 2], within=set([1, 1, 2, 3])) - #Bad set for variable domain -- noninteger elements + # Bad set for variable domain -- noninteger elements def Xtest_set_domain_bad_duplicates(self): with self.assertRaises(ValueError) as cm: self.model = ConcreteModel() - self.model.y = Var([1,2], within=set([1.7,2,3])) - + self.model.y = Var([1, 2], within=set([1.7, 2, 3])) - #Test within=xrange() + # Test within=xrange() @unittest.skipIf(not 'glpk' in solvers, "glpk solver is not available") def Xtest_rangeset_domain(self): self.model = ConcreteModel() - self.model.y = Var([1,2], within=range(4)) - - self.model.obj = Objective(expr=self.model.y[1]-self.model.y[2]) + self.model.y = Var([1, 2], within=range(4)) + + self.model.obj = Objective(expr=self.model.y[1] - self.model.y[2]) self.model.con1 = Constraint(expr=self.model.y[1] >= 1.1) self.model.con2 = Constraint(expr=self.model.y[2] <= 2.9) - + self.instance = self.model.create_instance() self.opt = solver["glpk"] self.results = self.opt.solve(self.instance) self.instance.load(self.results) - self.assertEqual(self.instance.y[1],2) - self.assertEqual(self.instance.y[2],2) + self.assertEqual(self.instance.y[1], 2) + self.assertEqual(self.instance.y[2], 2) if __name__ == "__main__": unittest.main() - diff --git a/pyomo/core/tests/unit/test_visitor.py b/pyomo/core/tests/unit/test_visitor.py index e27301ba164..b70996a13dc 100644 --- a/pyomo/core/tests/unit/test_visitor.py +++ b/pyomo/core/tests/unit/test_visitor.py @@ -19,44 +19,74 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - ConcreteModel, RangeSet, Set, Param, Var, Expression, ExternalFunction, - VarList, sum_product, inequality, quicksum, sin, tanh, value, -) -from pyomo.core.expr.numvalue import ( - native_types, nonpyomo_leaf_types, NumericConstant, + ConcreteModel, + RangeSet, + Set, + Param, + Var, + Expression, + ExternalFunction, + VarList, + sum_product, + inequality, + quicksum, + sin, + tanh, + value, ) +from pyomo.core.expr.numvalue import native_types, nonpyomo_leaf_types, NumericConstant from pyomo.core.expr.numeric_expr import ( - SumExpression, ProductExpression, - MonomialTermExpression, LinearExpression, - NPV_SumExpression, NPV_ProductExpression, NegationExpression, - NPV_NegationExpression, PowExpression, NPV_PowExpression, - MaxExpression, NPV_MaxExpression, MinExpression, NPV_MinExpression, - DivisionExpression, NPV_DivisionExpression, UnaryFunctionExpression, - NPV_UnaryFunctionExpression, AbsExpression, NPV_AbsExpression) + SumExpression, + ProductExpression, + MonomialTermExpression, + LinearExpression, + NPV_SumExpression, + NPV_ProductExpression, + NegationExpression, + NPV_NegationExpression, + PowExpression, + NPV_PowExpression, + MaxExpression, + NPV_MaxExpression, + MinExpression, + NPV_MinExpression, + DivisionExpression, + NPV_DivisionExpression, + UnaryFunctionExpression, + NPV_UnaryFunctionExpression, + AbsExpression, + NPV_AbsExpression, +) from pyomo.core.expr.visitor import ( - FixedExpressionError, NonConstantExpressionError, - StreamBasedExpressionVisitor, ExpressionReplacementVisitor, - evaluate_expression, expression_to_string, replace_expressions, + FixedExpressionError, + NonConstantExpressionError, + StreamBasedExpressionVisitor, + ExpressionReplacementVisitor, + evaluate_expression, + expression_to_string, + replace_expressions, sizeof_expression, - identify_variables, identify_components, identify_mutable_parameters, - RECURSION_LIMIT, get_stack_depth, + identify_variables, + identify_components, + identify_mutable_parameters, + RECURSION_LIMIT, + get_stack_depth, ) from pyomo.core.base.param import _ParamData, ScalarParam from pyomo.core.expr.template_expr import IndexTemplate -from pyomo.core.expr.expr_errors import TemplateExpressionError from pyomo.common.collections import ComponentSet +from pyomo.common.errors import TemplateExpressionError from pyomo.common.log import LoggingIntercept from io import StringIO -from pyomo.core.expr.compare import compare_expressions +from pyomo.core.expr.compare import assertExpressionsEqual class TestExpressionUtilities(unittest.TestCase): - def test_identify_vars_numeric(self): # # There are no variables in a constant expression # - self.assertEqual( list(identify_variables(5)), [] ) + self.assertEqual(list(identify_variables(5)), []) def test_identify_vars_params(self): m = ConcreteModel() @@ -66,14 +96,14 @@ def test_identify_vars_params(self): # # There are no variables in expressions with only parameters # - self.assertEqual( list(identify_variables(m.a)), [] ) - self.assertEqual( list(identify_variables(m.b[1])), [] ) - self.assertEqual( list(identify_variables(m.a+m.b[1])), [] ) - self.assertEqual( list(identify_variables(m.a**m.b[1])), [] ) - self.assertEqual( list(identify_variables( - m.a**m.b[1] + m.b[2])), [] ) - self.assertEqual( list(identify_variables( - m.a**m.b[1] + m.b[2]*m.b[3]*m.b[2])), [] ) + self.assertEqual(list(identify_variables(m.a)), []) + self.assertEqual(list(identify_variables(m.b[1])), []) + self.assertEqual(list(identify_variables(m.a + m.b[1])), []) + self.assertEqual(list(identify_variables(m.a ** m.b[1])), []) + self.assertEqual(list(identify_variables(m.a ** m.b[1] + m.b[2])), []) + self.assertEqual( + list(identify_variables(m.a ** m.b[1] + m.b[2] * m.b[3] * m.b[2])), [] + ) def test_identify_duplicate_vars(self): # @@ -82,10 +112,9 @@ def test_identify_duplicate_vars(self): m = ConcreteModel() m.a = Var(initialize=1) - #self.assertEqual( list(identify_variables(2*m.a+2*m.a, allow_duplicates=True)), + # self.assertEqual( list(identify_variables(2*m.a+2*m.a, allow_duplicates=True)), # [ m.a, m.a ] ) - self.assertEqual( list(identify_variables(2*m.a+2*m.a)), - [ m.a ] ) + self.assertEqual(list(identify_variables(2 * m.a + 2 * m.a)), [m.a]) def test_identify_vars_expr(self): # @@ -94,12 +123,12 @@ def test_identify_vars_expr(self): m = ConcreteModel() m.a = Var(initialize=1) m.b = Var(initialize=2) - m.e = Expression(expr=3*m.a) - m.E = Expression([0,1], initialize={0:3*m.a, 1:4*m.b}) + m.e = Expression(expr=3 * m.a) + m.E = Expression([0, 1], initialize={0: 3 * m.a, 1: 4 * m.b}) - self.assertEqual( list(identify_variables(m.b+m.e)), [ m.b, m.a ] ) - self.assertEqual( list(identify_variables(m.E[0])), [ m.a ] ) - self.assertEqual( list(identify_variables(m.E[1])), [ m.b ] ) + self.assertEqual(list(identify_variables(m.b + m.e)), [m.b, m.a]) + self.assertEqual(list(identify_variables(m.E[0])), [m.a]) + self.assertEqual(list(identify_variables(m.E[1])), [m.b]) def test_identify_vars_vars(self): m = ConcreteModel() @@ -111,57 +140,53 @@ def test_identify_vars_vars(self): # # Identify variables in various algebraic expressions # - self.assertEqual( list(identify_variables(m.a)), [m.a] ) - self.assertEqual( list(identify_variables(m.b[1])), [m.b[1]] ) - self.assertEqual( list(identify_variables(m.a+m.b[1])), - [ m.a, m.b[1] ] ) - self.assertEqual( list(identify_variables(m.a**m.b[1])), - [ m.a, m.b[1] ] ) - self.assertEqual( list(identify_variables(m.a**m.b[1] + m.b[2])), - [ m.b[2], m.a, m.b[1]] ) - self.assertEqual( list(identify_variables( - m.a**m.b[1] + m.b[2]*m.b[3]*m.b[2])), - [ m.a, m.b[1], m.b[2], m.b[3] ] ) - self.assertEqual( list(identify_variables( - m.a**m.b[1] + m.b[2]/m.b[3]*m.b[2])), - [ m.a, m.b[1], m.b[2], m.b[3] ] ) + self.assertEqual(list(identify_variables(m.a)), [m.a]) + self.assertEqual(list(identify_variables(m.b[1])), [m.b[1]]) + self.assertEqual(list(identify_variables(m.a + m.b[1])), [m.a, m.b[1]]) + self.assertEqual(list(identify_variables(m.a ** m.b[1])), [m.a, m.b[1]]) + self.assertEqual( + list(identify_variables(m.a ** m.b[1] + m.b[2])), [m.b[2], m.a, m.b[1]] + ) + self.assertEqual( + list(identify_variables(m.a ** m.b[1] + m.b[2] * m.b[3] * m.b[2])), + [m.a, m.b[1], m.b[2], m.b[3]], + ) + self.assertEqual( + list(identify_variables(m.a ** m.b[1] + m.b[2] / m.b[3] * m.b[2])), + [m.a, m.b[1], m.b[2], m.b[3]], + ) # # Identify variables in the arguments to functions # - self.assertEqual( list(identify_variables( - m.x(m.a, 'string_param', 1, []) * m.b[1] )), - [ m.b[1], m.a ] ) - self.assertEqual( list(identify_variables( - m.x(m.p, 'string_param', 1, [])*m.b[1] )), - [ m.b[1] ] ) - self.assertEqual( list(identify_variables( - tanh(m.a)*m.b[1] )), [ m.b[1], m.a ] ) - self.assertEqual( list(identify_variables( - abs(m.a) * m.b[1] )), [ m.b[1], m.a ] ) + self.assertEqual( + list(identify_variables(m.x(m.a, 'string_param', 1, []) * m.b[1])), + [m.b[1], m.a], + ) + self.assertEqual( + list(identify_variables(m.x(m.p, 'string_param', 1, []) * m.b[1])), [m.b[1]] + ) + self.assertEqual(list(identify_variables(tanh(m.a) * m.b[1])), [m.b[1], m.a]) + self.assertEqual(list(identify_variables(abs(m.a) * m.b[1])), [m.b[1], m.a]) # # Check logic for allowing duplicates # - self.assertEqual( list(identify_variables(m.a**m.a + m.a)), - [ m.a ] ) - #self.assertEqual( list(identify_variables(m.a**m.a + m.a, allow_duplicates=True)), + self.assertEqual(list(identify_variables(m.a**m.a + m.a)), [m.a]) + # self.assertEqual( list(identify_variables(m.a**m.a + m.a, allow_duplicates=True)), # [ m.a, m.a, m.a, ] ) def test_identify_vars_linear_expression(self): m = ConcreteModel() m.x = Var() expr = quicksum([m.x, m.x], linear=True) - self.assertEqual(list(identify_variables( - expr, include_fixed=False)), [m.x]) - + self.assertEqual(list(identify_variables(expr, include_fixed=False)), [m.x]) class TestIdentifyParams(unittest.TestCase): - def test_identify_params_numeric(self): # # There are no parameters in a constant expression # - self.assertEqual( list(identify_mutable_parameters(5)), [] ) + self.assertEqual(list(identify_mutable_parameters(5)), []) def test_identify_mutable_parameters(self): m = ConcreteModel() @@ -171,14 +196,15 @@ def test_identify_mutable_parameters(self): # # There are no parameters in expressions with only vars # - self.assertEqual( list(identify_mutable_parameters(m.a)), [] ) - self.assertEqual( list(identify_mutable_parameters(m.b[1])), [] ) - self.assertEqual( list(identify_mutable_parameters(m.a+m.b[1])), [] ) - self.assertEqual( list(identify_mutable_parameters(m.a**m.b[1])), [] ) - self.assertEqual( list(identify_mutable_parameters( - m.a**m.b[1] + m.b[2])), [] ) - self.assertEqual( list(identify_mutable_parameters( - m.a**m.b[1] + m.b[2]*m.b[3]*m.b[2])), [] ) + self.assertEqual(list(identify_mutable_parameters(m.a)), []) + self.assertEqual(list(identify_mutable_parameters(m.b[1])), []) + self.assertEqual(list(identify_mutable_parameters(m.a + m.b[1])), []) + self.assertEqual(list(identify_mutable_parameters(m.a ** m.b[1])), []) + self.assertEqual(list(identify_mutable_parameters(m.a ** m.b[1] + m.b[2])), []) + self.assertEqual( + list(identify_mutable_parameters(m.a ** m.b[1] + m.b[2] * m.b[3] * m.b[2])), + [], + ) def test_identify_mutable_parameters_constants(self): # @@ -189,10 +215,10 @@ def test_identify_mutable_parameters_constants(self): m.x.fix() m.p = Param(initialize=2, mutable=False) m.p_m = Param(initialize=3, mutable=True) - e1 = (m.x + m.p + NumericConstant(5)) + e1 = m.x + m.p + NumericConstant(5) self.assertEqual(list(identify_mutable_parameters(e1)), []) - e2 = (5*m.x + NumericConstant(3)*m.p_m + m.p == 0) + e2 = 5 * m.x + NumericConstant(3) * m.p_m + m.p == 0 mut_params = list(identify_mutable_parameters(e2)) self.assertEqual(len(mut_params), 1) self.assertIs(mut_params[0], m.p_m) @@ -204,8 +230,7 @@ def test_identify_duplicate_params(self): m = ConcreteModel() m.a = Param(initialize=1, mutable=True) - self.assertEqual( list(identify_mutable_parameters(2*m.a+2*m.a)), - [ m.a ] ) + self.assertEqual(list(identify_mutable_parameters(2 * m.a + 2 * m.a)), [m.a]) def test_identify_mutable_parameters_expr(self): # @@ -214,12 +239,12 @@ def test_identify_mutable_parameters_expr(self): m = ConcreteModel() m.a = Param(initialize=1, mutable=True) m.b = Param(initialize=2, mutable=True) - m.e = Expression(expr=3*m.a) - m.E = Expression([0,1], initialize={0:3*m.a, 1:4*m.b}) + m.e = Expression(expr=3 * m.a) + m.E = Expression([0, 1], initialize={0: 3 * m.a, 1: 4 * m.b}) - self.assertEqual( list(identify_mutable_parameters(m.b+m.e)), [ m.b, m.a ] ) - self.assertEqual( list(identify_mutable_parameters(m.E[0])), [ m.a ] ) - self.assertEqual( list(identify_mutable_parameters(m.E[1])), [ m.b ] ) + self.assertEqual(list(identify_mutable_parameters(m.b + m.e)), [m.b, m.a]) + self.assertEqual(list(identify_mutable_parameters(m.E[0])), [m.a]) + self.assertEqual(list(identify_mutable_parameters(m.E[1])), [m.b]) def test_identify_mutable_parameters_logical_expr(self): # @@ -227,7 +252,7 @@ def test_identify_mutable_parameters_logical_expr(self): # m = ConcreteModel() m.a = Param(initialize=0, mutable=True) - expr = m.a+1 == 0 + expr = m.a + 1 == 0 param_set = ComponentSet(identify_mutable_parameters(expr)) self.assertEqual(len(param_set), 1) self.assertIn(m.a, param_set) @@ -242,52 +267,57 @@ def test_identify_mutable_parameters_params(self): # # Identify variables in various algebraic expressions # - self.assertEqual( list(identify_mutable_parameters(m.a)), [m.a] ) - self.assertEqual( list(identify_mutable_parameters(m.b[1])), [m.b[1]] ) - self.assertEqual( list(identify_mutable_parameters(m.a+m.b[1])), - [ m.a, m.b[1] ] ) - self.assertEqual( list(identify_mutable_parameters(m.a**m.b[1])), - [ m.a, m.b[1] ] ) - self.assertEqual( list(identify_mutable_parameters(m.a**m.b[1] + m.b[2])), - [ m.b[2], m.a, m.b[1] ] ) - self.assertEqual( list(identify_mutable_parameters( - m.a**m.b[1] + m.b[2]*m.b[3]*m.b[2])), - [ m.a, m.b[1], m.b[2], m.b[3] ] ) - self.assertEqual( list(identify_mutable_parameters( - m.a**m.b[1] + m.b[2]/m.b[3]*m.b[2])), - [ m.a, m.b[1], m.b[2], m.b[3] ] ) + self.assertEqual(list(identify_mutable_parameters(m.a)), [m.a]) + self.assertEqual(list(identify_mutable_parameters(m.b[1])), [m.b[1]]) + self.assertEqual(list(identify_mutable_parameters(m.a + m.b[1])), [m.a, m.b[1]]) + self.assertEqual( + list(identify_mutable_parameters(m.a ** m.b[1])), [m.a, m.b[1]] + ) + self.assertEqual( + list(identify_mutable_parameters(m.a ** m.b[1] + m.b[2])), + [m.b[2], m.a, m.b[1]], + ) + self.assertEqual( + list(identify_mutable_parameters(m.a ** m.b[1] + m.b[2] * m.b[3] * m.b[2])), + [m.a, m.b[1], m.b[2], m.b[3]], + ) + self.assertEqual( + list(identify_mutable_parameters(m.a ** m.b[1] + m.b[2] / m.b[3] * m.b[2])), + [m.a, m.b[1], m.b[2], m.b[3]], + ) # # Identify variables in the arguments to functions # - self.assertEqual( list(identify_mutable_parameters( - m.x(m.a, 'string_param', 1, [])*m.b[1] )), - [ m.b[1], m.a ] ) - self.assertEqual( list(identify_mutable_parameters( - m.x(m.p, 'string_param', 1, [])*m.b[1] )), - [ m.b[1] ] ) - self.assertEqual( list(identify_mutable_parameters( - tanh(m.a)*m.b[1] )), [ m.b[1], m.a ] ) - self.assertEqual( list(identify_mutable_parameters( - abs(m.a)*m.b[1] )), [ m.b[1], m.a ] ) + self.assertEqual( + list(identify_mutable_parameters(m.x(m.a, 'string_param', 1, []) * m.b[1])), + [m.b[1], m.a], + ) + self.assertEqual( + list(identify_mutable_parameters(m.x(m.p, 'string_param', 1, []) * m.b[1])), + [m.b[1]], + ) + self.assertEqual( + list(identify_mutable_parameters(tanh(m.a) * m.b[1])), [m.b[1], m.a] + ) + self.assertEqual( + list(identify_mutable_parameters(abs(m.a) * m.b[1])), [m.b[1], m.a] + ) # # Check logic for allowing duplicates # - self.assertEqual( list(identify_mutable_parameters(m.a**m.a + m.a)), - [ m.a ] ) + self.assertEqual(list(identify_mutable_parameters(m.a**m.a + m.a)), [m.a]) # # Replace all variables with new variables from a varlist # class ReplacementWalkerTest1(ExpressionReplacementVisitor): - def __init__(self, model): ExpressionReplacementVisitor.__init__(self) self.model = model def visiting_potential_leaf(self, node): - if node.__class__ in nonpyomo_leaf_types or\ - not node.is_potentially_variable(): + if node.__class__ in nonpyomo_leaf_types or not node.is_potentially_variable(): return True, node if node.is_variable_type(): if id(node) in self.substitute: @@ -298,18 +328,17 @@ def visiting_potential_leaf(self, node): class WalkerTests(unittest.TestCase): - def test_replacement_walker1(self): M = ConcreteModel() M.x = Var() M.y = Var() M.w = VarList() - e = sin(M.x) + M.x*M.y + 3 + e = sin(M.x) + M.x * M.y + 3 walker = ReplacementWalkerTest1(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(sin(M.x) + M.x*M.y + 3, e)) - self.assertTrue(compare_expressions(sin(M.w[1]) + M.w[1]*M.w[2] + 3, f)) + assertExpressionsEqual(self, sin(M.x) + M.x * M.y + 3, e) + assertExpressionsEqual(self, sin(M.w[1]) + M.w[1] * M.w[2] + 3, f) def test_replacement_walker2(self): M = ConcreteModel() @@ -319,8 +348,8 @@ def test_replacement_walker2(self): e = M.x walker = ReplacementWalkerTest1(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(M.x, e)) - self.assertTrue(compare_expressions(M.w[1], f)) + assertExpressionsEqual(self, M.x, e) + assertExpressionsEqual(self, M.w[1], f) def test_replacement_walker3(self): M = ConcreteModel() @@ -328,11 +357,11 @@ def test_replacement_walker3(self): M.y = Var() M.w = VarList() - e = sin(M.x) + M.x*M.y + 3 <= 0 + e = sin(M.x) + M.x * M.y + 3 <= 0 walker = ReplacementWalkerTest1(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(sin(M.x) + M.x*M.y + 3 <= 0, e)) - self.assertTrue(compare_expressions(sin(M.w[1]) + M.w[1]*M.w[2] + 3 <= 0, f)) + assertExpressionsEqual(self, sin(M.x) + M.x * M.y + 3 <= 0, e) + assertExpressionsEqual(self, sin(M.w[1]) + M.w[1] * M.w[2] + 3 <= 0, f) def test_replacement_walker4(self): M = ConcreteModel() @@ -340,11 +369,13 @@ def test_replacement_walker4(self): M.y = Var() M.w = VarList() - e = inequality(0, sin(M.x) + M.x*M.y + 3, 1) + e = inequality(0, sin(M.x) + M.x * M.y + 3, 1) walker = ReplacementWalkerTest1(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(inequality(0, sin(M.x) + M.x*M.y + 3, 1), e)) - self.assertTrue(compare_expressions(inequality(0, sin(M.w[1]) + M.w[1]*M.w[2] + 3, 1), f)) + assertExpressionsEqual(self, inequality(0, sin(M.x) + M.x * M.y + 3, 1), e) + assertExpressionsEqual( + self, inequality(0, sin(M.w[1]) + M.w[1] * M.w[2] + 3, 1), f + ) def test_replacement_walker0(self): M = ConcreteModel() @@ -356,21 +387,47 @@ def test_replacement_walker0(self): self.assertIs(type(e), LinearExpression) walker = ReplacementWalkerTest1(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.x.values()]), e)) - self.assertTrue(compare_expressions(LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.w.values()]), f)) + assertExpressionsEqual( + self, + LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.x.values()], + ), + e, + ) + assertExpressionsEqual( + self, + LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.w.values()], + ), + f, + ) del M.w del M.w_index M.w = VarList() - e = 2*sum_product(M.z, M.x) + e = 2 * sum_product(M.z, M.x) walker = ReplacementWalkerTest1(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(2*LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.x.values()]), e)) - self.assertTrue(compare_expressions(2*LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.w.values()]), f)) + assertExpressionsEqual( + self, + 2 + * LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.x.values()], + ), + e, + ) + assertExpressionsEqual( + self, + 2 + * LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.w.values()], + ), + f, + ) def test_replacement_linear_expression_with_constant(self): m = ConcreteModel() @@ -381,16 +438,17 @@ def test_replacement_linear_expression_with_constant(self): sub_map = dict() sub_map[id(m.x)] = 5 e2 = replace_expressions(e, sub_map) - self.assertTrue(compare_expressions(e2, SumExpression([10, m.y]))) + assertExpressionsEqual( + self, e2, LinearExpression([10, MonomialTermExpression((1, m.y))]) + ) e = LinearExpression(linear_coefs=[2, 3], linear_vars=[m.x, m.y]) sub_map = dict() sub_map[id(m.x)] = 5 e2 = replace_expressions(e, sub_map) - self.assertTrue(compare_expressions( - e2, - LinearExpression(constant=10, linear_coefs=[3], linear_vars=[m.y]) - )) + assertExpressionsEqual( + self, e2, LinearExpression([10, MonomialTermExpression((3, m.y))]) + ) def test_replacement_linear_expression_with_nonlinear(self): m = ConcreteModel() @@ -400,16 +458,13 @@ def test_replacement_linear_expression_with_nonlinear(self): sub_map = dict() sub_map[id(m.x)] = m.x**2 e2 = replace_expressions(e, sub_map) - self.assertTrue(compare_expressions( - e2, - SumExpression([2*m.x**2, 3*m.y]) - )) + assertExpressionsEqual(self, e2, SumExpression([2 * m.x**2, 3 * m.y])) def test_replace_expressions_with_monomial_term(self): M = ConcreteModel() M.x = Var() - e = 2.0*M.x - substitution_map = {id(M.x): 3.0*M.x} + e = 2.0 * M.x + substitution_map = {id(M.x): 3.0 * M.x} new_e = replace_expressions(e, substitution_map=substitution_map) self.assertEqual('6.0*x', str(new_e)) # See comment about this test in ExpressionReplacementVisitor @@ -421,7 +476,7 @@ def test_identify_components(self): M.y = Var() M.w = Var() - e = sin(M.x) + M.x*M.w + 3 + e = sin(M.x) + M.x * M.w + 3 v = list(str(v) for v in identify_components(e, set([M.x.__class__]))) self.assertEqual(v, ['x', 'w']) v = list(str(v) for v in identify_components(e, [M.x.__class__])) @@ -435,7 +490,7 @@ def test_identify_variables(self): M.w = 2 M.w.fixed = True - e = sin(M.x) + M.x*M.w + 3 + e = sin(M.x) + M.x * M.w + 3 v = list(str(v) for v in identify_variables(e)) self.assertEqual(v, ['x', 'w']) v = list(str(v) for v in identify_variables(e, include_fixed=False)) @@ -446,34 +501,36 @@ def test_expression_to_string(self): M.x = Var() M.w = Var() - e = sin(M.x) + M.x*M.w + 3 + e = sin(M.x) + M.x * M.w + 3 self.assertEqual("sin(x) + x*w + 3", expression_to_string(e)) M.w = 2 M.w.fixed = True - self.assertEqual("sin(x) + x*2 + 3", expression_to_string(e, compute_values=True)) + self.assertEqual( + "sin(x) + x*2 + 3", expression_to_string(e, compute_values=True) + ) def test_expression_component_to_string(self): m = ConcreteModel() m.x = Var() m.y = Var() - m.e = Expression(expr=m.x*m.y) + m.e = Expression(expr=m.x * m.y) m.f = Expression(expr=m.e) - e = m.x + m.f*m.y + e = m.x + m.f * m.y self.assertEqual("x + ((x*y))*y", str(e)) self.assertEqual("x + ((x*y))*y", expression_to_string(e)) + + # # Replace all variables with a product expression # class ReplacementWalkerTest2(ExpressionReplacementVisitor): - def __init__(self, model): ExpressionReplacementVisitor.__init__(self) self.model = model def visiting_potential_leaf(self, node): - if node.__class__ in nonpyomo_leaf_types or\ - not node.is_potentially_variable(): + if node.__class__ in nonpyomo_leaf_types or not node.is_potentially_variable(): return True, node if node.is_variable_type(): @@ -485,18 +542,17 @@ def visiting_potential_leaf(self, node): class WalkerTests2(unittest.TestCase): - def test_replacement_walker1(self): M = ConcreteModel() M.x = Var() M.y = Var() M.w = VarList() - e = sin(M.x) + M.x*M.y + 3 + e = sin(M.x) + M.x * M.y + 3 walker = ReplacementWalkerTest2(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(sin(M.x) + M.x*M.y + 3, e)) - self.assertTrue(compare_expressions(sin(2*M.w[1]) + 2*M.w[1]*(2*M.w[2]) + 3, f)) + assertExpressionsEqual(self, sin(M.x) + M.x * M.y + 3, e) + assertExpressionsEqual(self, sin(2 * M.w[1]) + 2 * M.w[1] * (2 * M.w[2]) + 3, f) def test_replacement_walker2(self): M = ConcreteModel() @@ -506,8 +562,8 @@ def test_replacement_walker2(self): e = M.x walker = ReplacementWalkerTest2(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(M.x, e)) - self.assertTrue(compare_expressions(2*M.w[1], f)) + assertExpressionsEqual(self, M.x, e) + assertExpressionsEqual(self, 2 * M.w[1], f) def test_replacement_walker3(self): M = ConcreteModel() @@ -515,11 +571,13 @@ def test_replacement_walker3(self): M.y = Var() M.w = VarList() - e = sin(M.x) + M.x*M.y + 3 <= 0 + e = sin(M.x) + M.x * M.y + 3 <= 0 walker = ReplacementWalkerTest2(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(sin(M.x) + M.x*M.y + 3 <= 0, e)) - self.assertTrue(compare_expressions(sin(2*M.w[1]) + 2*M.w[1]*(2*M.w[2]) + 3 <= 0, f)) + assertExpressionsEqual(self, sin(M.x) + M.x * M.y + 3 <= 0, e) + assertExpressionsEqual( + self, sin(2 * M.w[1]) + 2 * M.w[1] * (2 * M.w[2]) + 3 <= 0, f + ) def test_replacement_walker4(self): M = ConcreteModel() @@ -527,11 +585,13 @@ def test_replacement_walker4(self): M.y = Var() M.w = VarList() - e = inequality(0, sin(M.x) + M.x*M.y + 3, 1) + e = inequality(0, sin(M.x) + M.x * M.y + 3, 1) walker = ReplacementWalkerTest2(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(inequality(0 , sin(M.x) + M.x*M.y + 3 , 1), e)) - self.assertTrue(compare_expressions(inequality(0 , sin(2*M.w[1]) + 2*M.w[1]*(2*M.w[2]) + 3 , 1), f)) + assertExpressionsEqual(self, inequality(0, sin(M.x) + M.x * M.y + 3, 1), e) + assertExpressionsEqual( + self, inequality(0, sin(2 * M.w[1]) + 2 * M.w[1] * (2 * M.w[2]) + 3, 1), f + ) def test_replacement_walker5(self): M = ConcreteModel() @@ -539,13 +599,13 @@ def test_replacement_walker5(self): M.w = VarList() M.z = Param(mutable=True) - e = M.z*M.x + e = M.z * M.x walker = ReplacementWalkerTest2(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(e.__class__ is MonomialTermExpression) - self.assertTrue(f.__class__ is ProductExpression) - self.assertTrue(compare_expressions(M.z*M.x, e)) - self.assertTrue(compare_expressions(M.z*(2*M.w[1]), f)) + assertExpressionsEqual(self, e, MonomialTermExpression((M.z, M.x))) + assertExpressionsEqual( + self, f, MonomialTermExpression((NPV_ProductExpression((M.z, 2)), M.w[1])) + ) def test_replacement_walker0(self): M = ConcreteModel() @@ -557,22 +617,72 @@ def test_replacement_walker0(self): self.assertIs(type(e), LinearExpression) walker = ReplacementWalkerTest2(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.x.values()]), e)) - self.assertTrue(compare_expressions(M.z[0]*(2*M.w[1]) + M.z[1]*(2*M.w[2]) + M.z[2]*(2*M.w[3]), f)) + assertExpressionsEqual( + self, + e, + LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.x.values()], + ), + ) + assertExpressionsEqual( + self, + f, + LinearExpression( + [ + MonomialTermExpression( + (NPV_ProductExpression((M.z[0], 2)), M.w[1]) + ), + MonomialTermExpression( + (NPV_ProductExpression((M.z[1], 2)), M.w[2]) + ), + MonomialTermExpression( + (NPV_ProductExpression((M.z[2], 2)), M.w[3]) + ), + ] + ), + ) - e = 2*sum_product(M.z, M.x) + e = 2 * sum_product(M.z, M.x) walker = ReplacementWalkerTest2(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(2*LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.x.values()]), e)) - self.assertTrue(compare_expressions(2*(M.z[0]*(2*M.w[4]) + M.z[1]*(2*M.w[5]) + M.z[2]*(2*M.w[6])), f)) + assertExpressionsEqual( + self, + 2 + * LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.x.values()], + ), + e, + ) + assertExpressionsEqual( + self, + f, + ProductExpression( + ( + 2, + LinearExpression( + [ + MonomialTermExpression( + (NPV_ProductExpression((M.z[0], 2)), M.w[4]) + ), + MonomialTermExpression( + (NPV_ProductExpression((M.z[1], 2)), M.w[5]) + ), + MonomialTermExpression( + (NPV_ProductExpression((M.z[2], 2)), M.w[6]) + ), + ] + ), + ) + ), + ) + # # Replace all mutable parameters with variables # class ReplacementWalkerTest3(ExpressionReplacementVisitor): - def __init__(self, model): super().__init__(remove_named_expressions=False) self.model = model @@ -581,30 +691,31 @@ def visiting_potential_leaf(self, node): if node.__class__ in (_ParamData, ScalarParam): if id(node) in self.substitute: return True, self.substitute[id(node)] - self.substitute[id(node)] = 2*self.model.w.add() + self.substitute[id(node)] = 2 * self.model.w.add() return True, self.substitute[id(node)] - if node.__class__ in nonpyomo_leaf_types or \ - node.is_constant() or \ - node.is_variable_type(): + if ( + node.__class__ in nonpyomo_leaf_types + or node.is_constant() + or node.is_variable_type() + ): return True, node return False, None class WalkerTests3(unittest.TestCase): - def test_replacement_walker1(self): M = ConcreteModel() M.x = Param(mutable=True) M.y = Var() M.w = VarList() - e = sin(M.x) + M.x*M.y + 3 + e = sin(M.x) + M.x * M.y + 3 walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(sin(M.x) + M.x*M.y + 3, e)) - self.assertTrue(compare_expressions(sin(2*M.w[1]) + 2*M.w[1]*M.y + 3, f)) + assertExpressionsEqual(self, sin(M.x) + M.x * M.y + 3, e) + assertExpressionsEqual(self, sin(2 * M.w[1]) + 2 * M.w[1] * M.y + 3, f) def test_replacement_walker2(self): M = ConcreteModel() @@ -614,8 +725,8 @@ def test_replacement_walker2(self): e = M.x walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(M.x, e)) - self.assertTrue(compare_expressions(2*M.w[1], f)) + assertExpressionsEqual(self, M.x, e) + assertExpressionsEqual(self, 2 * M.w[1], f) def test_replacement_walker3(self): M = ConcreteModel() @@ -623,11 +734,11 @@ def test_replacement_walker3(self): M.y = Var() M.w = VarList() - e = sin(M.x) + M.x*M.y + 3 <= 0 + e = sin(M.x) + M.x * M.y + 3 <= 0 walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(sin(M.x) + M.x*M.y + 3 <= 0, e)) - self.assertTrue(compare_expressions(sin(2*M.w[1]) + 2*M.w[1]*M.y + 3 <= 0, f)) + assertExpressionsEqual(self, sin(M.x) + M.x * M.y + 3 <= 0, e) + assertExpressionsEqual(self, sin(2 * M.w[1]) + 2 * M.w[1] * M.y + 3 <= 0, f) def test_replacement_walker4(self): M = ConcreteModel() @@ -635,11 +746,13 @@ def test_replacement_walker4(self): M.y = Var() M.w = VarList() - e = inequality(0, sin(M.x) + M.x*M.y + 3, 1) + e = inequality(0, sin(M.x) + M.x * M.y + 3, 1) walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(inequality(0, sin(M.x) + M.x*M.y + 3, 1), e)) - self.assertTrue(compare_expressions(inequality(0, sin(2*M.w[1]) + 2*M.w[1]*M.y + 3, 1), f)) + assertExpressionsEqual(self, inequality(0, sin(M.x) + M.x * M.y + 3, 1), e) + assertExpressionsEqual( + self, inequality(0, sin(2 * M.w[1]) + 2 * M.w[1] * M.y + 3, 1), f + ) def test_replacement_walker5(self): M = ConcreteModel() @@ -647,14 +760,14 @@ def test_replacement_walker5(self): M.w = VarList() M.z = Param(mutable=True) - e = M.z*M.x + e = M.z * M.x walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) self.assertIs(e.__class__, MonomialTermExpression) self.assertIs(f.__class__, ProductExpression) self.assertTrue(f.arg(0).is_potentially_variable()) - self.assertTrue(compare_expressions(M.z*M.x, e)) - self.assertTrue(compare_expressions(2*M.w[1]*M.x, f)) + assertExpressionsEqual(self, M.z * M.x, e) + assertExpressionsEqual(self, 2 * M.w[1] * M.x, f) def test_replacement_walker6(self): M = ConcreteModel() @@ -662,25 +775,32 @@ def test_replacement_walker6(self): M.w = VarList() M.z = Param(mutable=True) - e = (M.z*2)*3 + e = (M.z * 2) * 3 walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) self.assertTrue(not e.is_potentially_variable()) self.assertTrue(f.is_potentially_variable()) - self.assertTrue(compare_expressions(M.z*2*3, e)) - self.assertTrue(compare_expressions(ProductExpression([ProductExpression([2*M.w[1], 2]), 3]), f)) + assertExpressionsEqual(self, M.z * 2 * 3, e) + assertExpressionsEqual( + self, ProductExpression([ProductExpression([2 * M.w[1], 2]), 3]), f + ) def test_replacement_walker7(self): M = ConcreteModel() M.x = Var() M.w = VarList() M.z = Param(mutable=True) - M.e = Expression(expr=M.z*2) + M.e = Expression(expr=M.z * 2) - e = M.x*M.e + e = M.x * M.e self.assertTrue(e.arg(1).is_potentially_variable()) self.assertTrue(not e.arg(1).arg(0).is_potentially_variable()) - self.assertTrue(compare_expressions(ProductExpression([M.x, (NPV_ProductExpression([M.z, 2]))]), e, include_named_exprs=False)) + assertExpressionsEqual( + self, + ProductExpression([M.x, (NPV_ProductExpression([M.z, 2]))]), + e, + include_named_exprs=False, + ) walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) self.assertTrue(e.__class__ is ProductExpression) @@ -688,7 +808,9 @@ def test_replacement_walker7(self): self.assertEqual(id(e), id(f)) self.assertTrue(f.arg(1).is_potentially_variable()) self.assertTrue(f.arg(1).arg(0).is_potentially_variable()) - self.assertTrue(compare_expressions(M.x*ProductExpression([2*M.w[1], 2]), f, include_named_exprs=False)) + assertExpressionsEqual( + self, M.x * ProductExpression([2 * M.w[1], 2]), f, include_named_exprs=False + ) def test_replacement_walker0(self): M = ConcreteModel() @@ -700,23 +822,41 @@ def test_replacement_walker0(self): self.assertIs(type(e), LinearExpression) walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.x.values()]), e)) - self.assertTrue(compare_expressions(2*M.w[1]*M.x[0] + 2*M.w[2]*M.x[1] + 2*M.w[3]*M.x[2], f)) + assertExpressionsEqual( + self, + LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.x.values()], + ), + e, + ) + assertExpressionsEqual( + self, 2 * M.w[1] * M.x[0] + 2 * M.w[2] * M.x[1] + 2 * M.w[3] * M.x[2], f + ) - e = 2*sum_product(M.z, M.x) + e = 2 * sum_product(M.z, M.x) walker = ReplacementWalkerTest3(M) f = walker.dfs_postorder_stack(e) - self.assertTrue(compare_expressions(2*LinearExpression(linear_coefs=[i for i in M.z.values()], - linear_vars=[i for i in M.x.values()]), e)) - self.assertTrue(compare_expressions(2*(2*M.w[4]*M.x[0] + 2*M.w[5]*M.x[1] + 2*M.w[6]*M.x[2]), f)) + assertExpressionsEqual( + self, + 2 + * LinearExpression( + linear_coefs=[i for i in M.z.values()], + linear_vars=[i for i in M.x.values()], + ), + e, + ) + assertExpressionsEqual( + self, + 2 * (2 * M.w[4] * M.x[0] + 2 * M.w[5] * M.x[1] + 2 * M.w[6] * M.x[2]), + f, + ) # # Replace all mutable parameters with variables # class ReplacementWalker_ReplaceInternal(ExpressionReplacementVisitor): - def exitNode(self, node, data): if type(node) == ProductExpression: return sum(data[1]) @@ -725,49 +865,61 @@ def exitNode(self, node, data): class WalkerTests_ReplaceInternal(unittest.TestCase): - def test_no_replacement(self): m = ConcreteModel() m.x = Param(mutable=True) - m.y = Var([1,2,3]) + m.y = Var([1, 2, 3]) e = sum(m.y[i] for i in m.y) == 0 f = ReplacementWalker_ReplaceInternal().dfs_postorder_stack(e) - self.assertTrue(compare_expressions(m.y[1] + m.y[2] + m.y[3] == 0, e)) - self.assertTrue(compare_expressions(m.y[1] + m.y[2] + m.y[3] == 0, f)) + assertExpressionsEqual(self, m.y[1] + m.y[2] + m.y[3] == 0, e) + assertExpressionsEqual(self, m.y[1] + m.y[2] + m.y[3] == 0, f) self.assertIs(e, f) def test_replace(self): m = ConcreteModel() m.x = Param(mutable=True) - m.y = Var([1,2,3]) + m.y = Var([1, 2, 3]) - e = m.y[1]*m.y[2] + m.y[2]*m.y[3] == 0 + e = m.y[1] * m.y[2] + m.y[2] * m.y[3] == 0 f = ReplacementWalker_ReplaceInternal().dfs_postorder_stack(e) - self.assertTrue(compare_expressions(m.y[1]*m.y[2] + m.y[2]*m.y[3] == 0, e)) - self.assertTrue(compare_expressions(SumExpression([SumExpression([m.y[1], m.y[2]]), SumExpression([m.y[2], m.y[3]])]) == 0, f)) - self.assertIs(type(f.arg(0)), SumExpression) - self.assertEqual(f.arg(0).nargs(), 2) - self.assertIs(type(f.arg(0).arg(0)), SumExpression) - self.assertEqual(f.arg(0).arg(0).nargs(), 2) - self.assertIs(type(f.arg(0).arg(1)), SumExpression) - self.assertEqual(f.arg(0).arg(1).nargs(), 2) + assertExpressionsEqual(self, m.y[1] * m.y[2] + m.y[2] * m.y[3] == 0, e) + assertExpressionsEqual( + self, + SumExpression( + [ + LinearExpression( + [ + MonomialTermExpression((1, m.y[1])), + MonomialTermExpression((1, m.y[2])), + ] + ), + LinearExpression( + [ + MonomialTermExpression((1, m.y[2])), + MonomialTermExpression((1, m.y[3])), + ] + ), + ] + ) + == 0, + f, + ) def test_replace_nested(self): m = ConcreteModel() m.x = Param(mutable=True) - m.y = Var([1,2,3]) + m.y = Var([1, 2, 3]) - e = m.y[1]*m.y[2]*m.y[2]*m.y[3] == 0 + e = m.y[1] * m.y[2] * m.y[2] * m.y[3] == 0 f = ReplacementWalker_ReplaceInternal().dfs_postorder_stack(e) - self.assertTrue(compare_expressions(m.y[1]*m.y[2]*m.y[2]*m.y[3] == 0, e)) - self.assertTrue(compare_expressions(m.y[1] + m.y[2] + m.y[2] + m.y[3] == 0, f)) - self.assertIs(type(f.arg(0)), SumExpression) + assertExpressionsEqual(self, m.y[1] * m.y[2] * m.y[2] * m.y[3] == 0, e) + assertExpressionsEqual(self, m.y[1] + m.y[2] + m.y[2] + m.y[3] == 0, f) + self.assertIs(type(f.arg(0)), LinearExpression) self.assertEqual(f.arg(0).nargs(), 4) class TestReplacementWithNPV(unittest.TestCase): - def test_npv_sum(self): m = ConcreteModel() m.p1 = Param(mutable=True) @@ -778,8 +930,10 @@ def test_npv_sum(self): e2 = replace_expressions(e1, {id(m.p1): m.p2}) e3 = replace_expressions(e1, {id(m.p1): m.x}) - self.assertTrue(compare_expressions(e2, m.p2 + 2)) - self.assertTrue(compare_expressions(e3, m.x + 2)) + assertExpressionsEqual(self, e2, m.p2 + 2) + assertExpressionsEqual( + self, e3, LinearExpression([MonomialTermExpression((1, m.x)), 2]) + ) def test_npv_negation(self): m = ConcreteModel() @@ -791,8 +945,8 @@ def test_npv_negation(self): e2 = replace_expressions(e1, {id(m.p1): m.p2}) e3 = replace_expressions(e1, {id(m.p1): m.x}) - self.assertTrue(compare_expressions(e2, -m.p2)) - self.assertTrue(compare_expressions(e3, NegationExpression([m.x]))) + assertExpressionsEqual(self, e2, -m.p2) + assertExpressionsEqual(self, e3, NegationExpression([m.x])) def test_npv_pow(self): m = ConcreteModel() @@ -804,8 +958,8 @@ def test_npv_pow(self): e2 = replace_expressions(e1, {id(m.p1): m.p2}) e3 = replace_expressions(e1, {id(m.p1): m.x}) - self.assertTrue(compare_expressions(e2, m.p2**3)) - self.assertTrue(compare_expressions(e3, m.x**3)) + assertExpressionsEqual(self, e2, m.p2**3) + assertExpressionsEqual(self, e3, m.x**3) def test_npv_product(self): m = ConcreteModel() @@ -813,12 +967,12 @@ def test_npv_product(self): m.p2 = Param(mutable=True) m.x = Var() - e1 = m.p1*3 + e1 = m.p1 * 3 e2 = replace_expressions(e1, {id(m.p1): m.p2}) e3 = replace_expressions(e1, {id(m.p1): m.x}) - self.assertTrue(compare_expressions(e2, m.p2*3)) - self.assertTrue(compare_expressions(e3, ProductExpression([m.x, 3]))) + assertExpressionsEqual(self, e2, m.p2 * 3) + assertExpressionsEqual(self, e3, ProductExpression([m.x, 3])) def test_npv_div(self): m = ConcreteModel() @@ -826,12 +980,12 @@ def test_npv_div(self): m.p2 = Param(mutable=True) m.x = Var() - e1 = m.p1/3 + e1 = m.p1 / 3 e2 = replace_expressions(e1, {id(m.p1): m.p2}) e3 = replace_expressions(e1, {id(m.p1): m.x}) - self.assertTrue(compare_expressions(e2, m.p2/3)) - self.assertTrue(compare_expressions(e3, DivisionExpression([m.x, 3]))) + assertExpressionsEqual(self, e2, m.p2 / 3) + assertExpressionsEqual(self, e3, DivisionExpression((m.x, 3))) def test_npv_unary(self): m = ConcreteModel() @@ -843,8 +997,8 @@ def test_npv_unary(self): e2 = replace_expressions(e1, {id(m.p1): m.p2}) e3 = replace_expressions(e1, {id(m.p1): m.x}) - self.assertTrue(compare_expressions(e2, sin(m.p2))) - self.assertTrue(compare_expressions(e3, sin(m.x))) + assertExpressionsEqual(self, e2, sin(m.p2)) + assertExpressionsEqual(self, e3, sin(m.x)) def test_npv_abs(self): m = ConcreteModel() @@ -856,8 +1010,8 @@ def test_npv_abs(self): e2 = replace_expressions(e1, {id(m.p1): m.p2}) e3 = replace_expressions(e1, {id(m.p1): m.x}) - self.assertTrue(compare_expressions(e2, abs(m.p2))) - self.assertTrue(compare_expressions(e3, abs(m.x))) + assertExpressionsEqual(self, e2, abs(m.p2)) + assertExpressionsEqual(self, e3, abs(m.x)) class BaseStreamBasedVisitorTests(object): @@ -866,36 +1020,38 @@ def setUp(self): m.x = Var() m.y = Var() m.z = Var() - self.e = m.x**2 + m.y + m.z*(m.x+m.y) + # Note: we do not use the operator overloading to generate the + # expression so that the structure is constant even when we make + # adjustments to the expression generators + self.e = SumExpression( + [ + PowExpression((m.x, 2)), + m.y, + ProductExpression((m.z, SumExpression([m.x, m.y]))), + ] + ) def test_bad_args(self): with self.assertRaisesRegex( - RuntimeError, "Unrecognized keyword arguments: {'foo': None}"): + RuntimeError, "Unrecognized keyword arguments: {'foo': None}" + ): StreamBasedExpressionVisitor(foo=None) def test_default(self): walker = StreamBasedExpressionVisitor() ans = self.walk(walker, self.e) - ref = [ - [[],[]], - [], - [[],[[],[]],] - ] + ref = [[[], []], [], [[], [[], []]]] self.assertEqual(ans, ref) def test_beforeChild(self): def before(node, child, child_idx): - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, [child] + walker = StreamBasedExpressionVisitor(beforeChild=before) ans = self.walk(walker, self.e) m = self.m - ref = [ - [[m.x], [2]], - [m.y], - [[m.z], [[m.x], [m.y]]] - ] + ref = [[[m.x], [2]], [m.y], [[m.z], [[m.x], [m.y]]]] self.assertEqual(str(ans), str(ref)) ans = self.walk(walker, m.x) @@ -908,24 +1064,22 @@ def before(node, child, child_idx): def test_initializeWalker_beforeChild(self): def before(node, child, child_idx): - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, child + def initialize(expr): ans = before(None, expr, 0) if ans is None: return True, expr else: return ans + walker = StreamBasedExpressionVisitor( - beforeChild=before, initializeWalker=initialize) + beforeChild=before, initializeWalker=initialize + ) ans = self.walk(walker, self.e) m = self.m - ref = [ - [m.x, 2], - m.y, - [m.z, [m.x, m.y]] - ] + ref = [[m.x, 2], m.y, [m.z, [m.x, m.y]]] self.assertEqual(str(ans), str(ref)) ans = self.walk(walker, m.x) @@ -938,23 +1092,24 @@ def initialize(expr): def test_beforeChild_exitNode(self): def before(node, child, child_idx): - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, [child] + def exit(node, data): if hasattr(node, 'getname'): data.insert(0, node.getname()) else: data.insert(0, str(node)) return data - walker = StreamBasedExpressionVisitor( - beforeChild=before, exitNode=exit) + + walker = StreamBasedExpressionVisitor(beforeChild=before, exitNode=exit) ans = self.walk(walker, self.e) m = self.m - ref = ['sum', - ['pow', [m.x], [2]], - [m.y], - ['prod', [m.z], ['sum', [m.x], [m.y]]] + ref = [ + 'sum', + ['pow', [m.x], [2]], + [m.y], + ['prod', [m.z], ['sum', [m.x], [m.y]]], ] self.assertEqual(str(ans), str(ref)) @@ -968,27 +1123,33 @@ def exit(node, data): def test_beforeChild_enterNode_exitNode(self): i = [0] + def before(node, child, child_idx): - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, [child] + def enter(node): i[0] += 1 return None, [i[0]] + def exit(node, data): if hasattr(node, 'getname'): data.insert(0, node.getname()) else: data.insert(0, str(node)) return data + walker = StreamBasedExpressionVisitor( - beforeChild=before, enterNode=enter, exitNode=exit) + beforeChild=before, enterNode=enter, exitNode=exit + ) ans = self.walk(walker, self.e) m = self.m - ref = ['sum', 1, - ['pow', 2, [m.x], [2]], - [m.y], - ['prod', 3, [m.z], ['sum', 4, [m.x], [m.y]]] + ref = [ + 'sum', + 1, + ['pow', 2, [m.x], [2]], + [m.y], + ['prod', 3, [m.z], ['sum', 4, [m.x], [m.y]]], ] self.assertEqual(str(ans), str(ref)) @@ -1002,24 +1163,22 @@ def exit(node, data): def test_old_beforeChild(self): def before(node, child): - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, [child] + os = StringIO() with LoggingIntercept(os, 'pyomo'): walker = StreamBasedExpressionVisitor(beforeChild=before) self.assertIn( "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the beforeChild() " - "method", os.getvalue().replace('\n',' ')) + "method", + os.getvalue().replace('\n', ' '), + ) ans = self.walk(walker, self.e) m = self.m - ref = [ - [[m.x], [2]], - [m.y], - [[m.z], [[m.x], [m.y]]] - ] + ref = [[[m.x], [2]], [m.y], [[m.z], [[m.x], [m.y]]]] self.assertEqual(str(ans), str(ref)) ans = self.walk(walker, m.x) @@ -1033,10 +1192,11 @@ def before(node, child): def test_reduce_in_accept(self): def enter(node): return None, 1 + def accept(node, data, child_result, child_idx): return data + child_result - walker = StreamBasedExpressionVisitor( - enterNode=enter, acceptChildResult=accept) + + walker = StreamBasedExpressionVisitor(enterNode=enter, acceptChildResult=accept) # 4 operators, 6 leaf nodes self.assertEqual(self.walk(walker, self.e), 10) @@ -1046,20 +1206,15 @@ def test_sizeof_expression(self): def test_enterNode(self): # This is an alternative way to implement the beforeChild test: def enter(node): - if type(node) in nonpyomo_leaf_types \ - or not node.is_expression_type(): + if type(node) in nonpyomo_leaf_types or not node.is_expression_type(): return (), [node] return node.args, [] - walker = StreamBasedExpressionVisitor( - enterNode=enter) + + walker = StreamBasedExpressionVisitor(enterNode=enter) m = self.m ans = self.walk(walker, self.e) - ref = [ - [[m.x], [2]], - [m.y], - [[m.z], [[m.x], [m.y]]] - ] + ref = [[[m.x], [2]], [m.y], [[m.z], [[m.x], [m.y]]]] self.assertEqual(str(ans), str(ref)) ans = self.walk(walker, m.x) @@ -1073,20 +1228,15 @@ def enter(node): def test_enterNode_noLeafList(self): # This is an alternative way to implement the beforeChild test: def enter(node): - if type(node) in nonpyomo_leaf_types \ - or not node.is_expression_type(): + if type(node) in nonpyomo_leaf_types or not node.is_expression_type(): return (), node return node.args, [] - walker = StreamBasedExpressionVisitor( - enterNode=enter) + + walker = StreamBasedExpressionVisitor(enterNode=enter) m = self.m ans = self.walk(walker, self.e) - ref = [ - [m.x, 2], - m.y, - [m.z, [m.x, m.y]] - ] + ref = [[m.x, 2], m.y, [m.z, [m.x, m.y]]] self.assertEqual(str(ans), str(ref)) ans = self.walk(walker, m.x) @@ -1100,25 +1250,21 @@ def enter(node): def test_enterNode_withFinalize(self): # This is an alternative way to implement the beforeChild test: def enter(node): - if type(node) in nonpyomo_leaf_types \ - or not node.is_expression_type(): + if type(node) in nonpyomo_leaf_types or not node.is_expression_type(): return (), node return node.args, [] + def finalize(result): if type(result) is list: return result else: - return[result] - walker = StreamBasedExpressionVisitor( - enterNode=enter, finalizeResult=finalize) + return [result] + + walker = StreamBasedExpressionVisitor(enterNode=enter, finalizeResult=finalize) m = self.m ans = self.walk(walker, self.e) - ref = [ - [m.x, 2], - m.y, - [m.z, [m.x, m.y]] - ] + ref = [[m.x, 2], m.y, [m.z, [m.x, m.y]]] self.assertEqual(str(ans), str(ref)) ans = self.walk(walker, m.x) @@ -1136,15 +1282,12 @@ def exit(node, data): return data else: return [node] + walker = StreamBasedExpressionVisitor(exitNode=exit) m = self.m ans = self.walk(walker, self.e) - ref = [ - [[m.x], [2]], - [m.y], - [[m.z], [[m.x], [m.y]]] - ] + ref = [[[m.x], [2]], [m.y], [[m.z], [[m.x], [m.y]]]] self.assertEqual(str(ans), str(ref)) ans = self.walk(walker, m.x) @@ -1156,71 +1299,88 @@ def exit(node, data): self.assertEqual(str(ans), str(ref)) def test_beforeChild_acceptChildResult_afterChild(self): - counts = [0,0,0] + counts = [0, 0, 0] + def before(node, child, child_idx): counts[0] += 1 - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, None + def accept(node, data, child_result, child_idx): counts[1] += 1 + def after(node, child, child_idx): counts[2] += 1 + walker = StreamBasedExpressionVisitor( - beforeChild=before, acceptChildResult=accept, afterChild=after) + beforeChild=before, acceptChildResult=accept, afterChild=after + ) ans = self.walk(walker, self.e) m = self.m self.assertEqual(ans, None) - self.assertEqual(counts, [9,9,9]) + self.assertEqual(counts, [9, 9, 9]) def test_OLD_beforeChild_acceptChildResult_afterChild(self): - counts = [0,0,0] + counts = [0, 0, 0] + def before(node, child): counts[0] += 1 - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, None + def accept(node, data, child_result): counts[1] += 1 + def after(node, child): counts[2] += 1 os = StringIO() with LoggingIntercept(os, 'pyomo'): walker = StreamBasedExpressionVisitor( - beforeChild=before, acceptChildResult=accept, afterChild=after) + beforeChild=before, acceptChildResult=accept, afterChild=after + ) self.assertIn( "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the " - "beforeChild() method", os.getvalue().replace('\n',' ')) + "beforeChild() method", + os.getvalue().replace('\n', ' '), + ) self.assertIn( "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the " - "acceptChildResult() method", os.getvalue().replace('\n',' ')) + "acceptChildResult() method", + os.getvalue().replace('\n', ' '), + ) self.assertIn( "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the " - "afterChild() method", os.getvalue().replace('\n',' ')) + "afterChild() method", + os.getvalue().replace('\n', ' '), + ) ans = self.walk(walker, self.e) m = self.m self.assertEqual(ans, None) - self.assertEqual(counts, [9,9,9]) + self.assertEqual(counts, [9, 9, 9]) def test_enterNode_acceptChildResult_beforeChild(self): ans = [] + def before(node, child, child_idx): - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, child + def accept(node, data, child_result, child_idx): if data is not child_result: data.append(child_result) return data + def enter(node): return node.args, ans + walker = StreamBasedExpressionVisitor( - enterNode=enter, beforeChild=before, acceptChildResult=accept) + enterNode=enter, beforeChild=before, acceptChildResult=accept + ) ans = self.walk(walker, self.e) m = self.m ref = [m.x, 2, m.y, m.z, m.x, m.y] @@ -1228,52 +1388,75 @@ def enter(node): def test_finalize(self): ans = [] + def before(node, child, child_idx): - if type(child) in nonpyomo_leaf_types \ - or not child.is_expression_type(): + if type(child) in nonpyomo_leaf_types or not child.is_expression_type(): return False, child + def accept(node, data, child_result, child_idx): if data is not child_result: data.append(child_result) return data + def enter(node): return node.args, ans + def finalize(result): return len(result) + walker = StreamBasedExpressionVisitor( - enterNode=enter, beforeChild=before, acceptChildResult=accept, - finalizeResult=finalize) + enterNode=enter, + beforeChild=before, + acceptChildResult=accept, + finalizeResult=finalize, + ) ans = self.walk(walker, self.e) self.assertEqual(ans, 6) def test_all_function_pointers(self): ans = [] + def name(x): if type(x) in nonpyomo_leaf_types: return str(x) else: return x.name + def initialize(expr): ans.append("Initialize") return True, None + def enter(node): ans.append("Enter %s" % (name(node))) + def exit(node, data): ans.append("Exit %s" % (name(node))) + def before(node, child, child_idx): ans.append("Before %s (from %s)" % (name(child), name(node))) + def accept(node, data, child_result, child_idx): ans.append("Accept into %s" % (name(node))) + def after(node, child, child_idx): ans.append("After %s (from %s)" % (name(child), name(node))) + def finalize(result): ans.append("Finalize") + walker = StreamBasedExpressionVisitor( initializeWalker=initialize, - enterNode=enter, exitNode=exit, beforeChild=before, - acceptChildResult=accept, afterChild=after, finalizeResult=finalize) - self.assertIsNone( self.walk(walker, self.e) ) - self.assertEqual("\n".join(ans),"""Initialize + enterNode=enter, + exitNode=exit, + beforeChild=before, + acceptChildResult=accept, + afterChild=after, + finalizeResult=finalize, + ) + self.assertIsNone(self.walk(walker, self.e)) + self.assertEqual( + "\n".join(ans), + """Initialize Enter sum Before pow (from sum) Enter pow @@ -1321,7 +1504,8 @@ def finalize(result): Accept into sum After prod (from sum) Exit sum -Finalize""") +Finalize""", + ) def test_all_derived_class(self): def name(x): @@ -1329,30 +1513,39 @@ def name(x): return str(x) else: return x.name + class all_callbacks(StreamBasedExpressionVisitor): def __init__(self): self.ans = [] super(all_callbacks, self).__init__() + def initializeWalker(self, expr): self.ans.append("Initialize") return True, None + def enterNode(self, node): self.ans.append("Enter %s" % (name(node))) + def exitNode(self, node, data): self.ans.append("Exit %s" % (name(node))) + def beforeChild(self, node, child, child_idx): - self.ans.append("Before %s (from %s)" - % (name(child), name(node))) + self.ans.append("Before %s (from %s)" % (name(child), name(node))) + def acceptChildResult(self, node, data, child_result, child_idx): self.ans.append("Accept into %s" % (name(node))) + def afterChild(self, node, child, child_idx): - self.ans.append("After %s (from %s)" - % (name(child), name(node))) + self.ans.append("After %s (from %s)" % (name(child), name(node))) + def finalizeResult(self, result): self.ans.append("Finalize") + walker = all_callbacks() - self.assertIsNone( self.walk(walker, self.e) ) - self.assertEqual("\n".join(walker.ans),"""Initialize + self.assertIsNone(self.walk(walker, self.e)) + self.assertEqual( + "\n".join(walker.ans), + """Initialize Enter sum Before pow (from sum) Enter pow @@ -1400,7 +1593,8 @@ def finalizeResult(self, result): Accept into sum After prod (from sum) Exit sum -Finalize""") +Finalize""", + ) def test_all_derived_class_oldAPI(self): def name(x): @@ -1408,42 +1602,56 @@ def name(x): return str(x) else: return x.name + class all_callbacks(StreamBasedExpressionVisitor): def __init__(self): self.ans = [] super(all_callbacks, self).__init__() + def enterNode(self, node): self.ans.append("Enter %s" % (name(node))) + def exitNode(self, node, data): self.ans.append("Exit %s" % (name(node))) + def beforeChild(self, node, child): - self.ans.append("Before %s (from %s)" - % (name(child), name(node))) + self.ans.append("Before %s (from %s)" % (name(child), name(node))) + def acceptChildResult(self, node, data, child_result): self.ans.append("Accept into %s" % (name(node))) + def afterChild(self, node, child): - self.ans.append("After %s (from %s)" - % (name(child), name(node))) + self.ans.append("After %s (from %s)" % (name(child), name(node))) + def finalizeResult(self, result): self.ans.append("Finalize") + os = StringIO() with LoggingIntercept(os, 'pyomo'): walker = all_callbacks() self.assertIn( "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the " - "beforeChild() method", os.getvalue().replace('\n',' ')) + "beforeChild() method", + os.getvalue().replace('\n', ' '), + ) self.assertIn( "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the " - "acceptChildResult() method", os.getvalue().replace('\n',' ')) + "acceptChildResult() method", + os.getvalue().replace('\n', ' '), + ) self.assertIn( "Note that the API for the StreamBasedExpressionVisitor " "has changed to include the child index for the " - "afterChild() method", os.getvalue().replace('\n',' ')) + "afterChild() method", + os.getvalue().replace('\n', ' '), + ) - self.assertIsNone( self.walk(walker, self.e) ) - self.assertEqual("\n".join(walker.ans),"""Enter sum + self.assertIsNone(self.walk(walker, self.e)) + self.assertEqual( + "\n".join(walker.ans), + """Enter sum Before pow (from sum) Enter pow Before x (from pow) @@ -1490,26 +1698,27 @@ def finalizeResult(self, result): Accept into sum After prod (from sum) Exit sum -Finalize""") +Finalize""", + ) class TestStreamBasedExpressionVisitor_Recursive( - BaseStreamBasedVisitorTests, unittest.TestCase): - + BaseStreamBasedVisitorTests, unittest.TestCase +): def walk(self, walker, expr): return walker.walk_expression(expr) class TestStreamBasedExpressionVisitor_NonRecursive( - BaseStreamBasedVisitorTests, unittest.TestCase): - + BaseStreamBasedVisitorTests, unittest.TestCase +): def walk(self, walker, expr): return walker.walk_expression_nonrecursive(expr) def fill_stack(n, fcn, *args): if n: - return fill_stack(n-1, fcn, *args) + return fill_stack(n - 1, fcn, *args) else: return fcn(*args) @@ -1518,12 +1727,14 @@ class TestStreamBasedExpressionVisitor_Deep(unittest.TestCase): def setUp(self): self.m = m = ConcreteModel() m.x = Var() - m.I = Set(initialize=range(2*RECURSION_LIMIT)) + m.I = Set(initialize=range(2 * RECURSION_LIMIT)) + def _rule(m, i): if i: - return m.e[i-1] + return m.e[i - 1] else: return m.x + m.e = Expression(m.I, rule=_rule) def evaluate_bx(self): @@ -1531,48 +1742,57 @@ def before(node, child, idx): if type(child) in native_types or not child.is_expression_type(): return False, value(child) return True, None + def exit(node, data): return data[0] + 1 - return StreamBasedExpressionVisitor( - beforeChild=before, exitNode=exit) + + return StreamBasedExpressionVisitor(beforeChild=before, exitNode=exit) def evaluate_bex(self): def before(node, child, idx): if type(child) in native_types or not child.is_expression_type(): return False, value(child) return True, None + def enter(node): return None, [] + def exit(node, data): return data[0] + 1 + return StreamBasedExpressionVisitor( - beforeChild=before, enterNode=enter, exitNode=exit) + beforeChild=before, enterNode=enter, exitNode=exit + ) def evaluate_abex(self): def before(node, child, idx): if type(child) in native_types or not child.is_expression_type(): return False, value(child) return True, None + def enter(node): return None, 0 + def accept(node, data, child_result, child_idx): return data + child_result + def exit(node, data): return data + 1 + return StreamBasedExpressionVisitor( - beforeChild=before, acceptChildResult=accept, - enterNode=enter, exitNode=exit) + beforeChild=before, acceptChildResult=accept, enterNode=enter, exitNode=exit + ) def run_walker(self, walker): m = self.m m.x = 10 self.assertEqual( - 2*RECURSION_LIMIT + 10, - walker.walk_expression(m.e[2*RECURSION_LIMIT-1]), + 2 * RECURSION_LIMIT + 10, + walker.walk_expression(m.e[2 * RECURSION_LIMIT - 1]), ) self.assertEqual( - 2*RECURSION_LIMIT + 10, - walker.walk_expression_nonrecursive(m.e[2*RECURSION_LIMIT-1]), + 2 * RECURSION_LIMIT + 10, + walker.walk_expression_nonrecursive(m.e[2 * RECURSION_LIMIT - 1]), ) # This is a "magic parameter" that quantifies the overhead @@ -1593,8 +1813,7 @@ def run_walker(self, walker): # We have not yet determined how to trigger the # RecursionError on PyPy cases = [(0, "")] - elif (os.environ.get('GITHUB_ACTIONS', '') - and sys.platform.startswith('win')): + elif os.environ.get('GITHUB_ACTIONS', '') and sys.platform.startswith('win'): # The test for handling RecursionError appears to fail # inexplicably on GHA/Windows under pytest: the # RecursionError that is supposed to be raised is not @@ -1602,17 +1821,20 @@ def run_walker(self, walker): # overflow error cases = [] else: - cases = [(0, ""), (3, warn_msg)] + # 3 sufficed through Python 3.10, but appeared to need to be + # raised to 5 for recent 3.11 builds (3.11.2) + cases = [(0, ""), (5, warn_msg)] head_room = sys.getrecursionlimit() - get_stack_depth() for n, msg in cases: with LoggingIntercept() as LOG: self.assertEqual( - 2*RECURSION_LIMIT + 10, + 2 * RECURSION_LIMIT + 10, fill_stack( head_room - RECURSION_LIMIT - TESTING_OVERHEAD + n, walker.walk_expression, - m.e[2*RECURSION_LIMIT-1]), + m.e[2 * RECURSION_LIMIT - 1], + ), ) self.assertEqual(msg, LOG.getvalue()) @@ -1627,7 +1849,6 @@ def test_evaluate_abex(self): class TestEvaluateExpression(unittest.TestCase): - def test_constant(self): m = ConcreteModel() m.p = Param(initialize=1) @@ -1650,7 +1871,9 @@ def test_variable(self): e = 1 + m.p self.assertRaises(ValueError, evaluate_expression, e) - self.assertRaises(NonConstantExpressionError, evaluate_expression, e, constant=True) + self.assertRaises( + NonConstantExpressionError, evaluate_expression, e, constant=True + ) def test_initialized_variable(self): m = ConcreteModel() @@ -1658,7 +1881,9 @@ def test_initialized_variable(self): e = 1 + m.p self.assertTrue(2, evaluate_expression(e)) - self.assertRaises(NonConstantExpressionError, evaluate_expression, e, constant=True) + self.assertRaises( + NonConstantExpressionError, evaluate_expression, e, constant=True + ) def test_fixed_variable(self): m = ConcreteModel() @@ -1671,14 +1896,17 @@ def test_fixed_variable(self): def test_template_expr(self): m = ConcreteModel() - m.I = RangeSet(1,9) - m.x = Var(m.I, initialize=lambda m,i: i+1) - m.P = Param(m.I, initialize=lambda m,i: 10-i, mutable=True) + m.I = RangeSet(1, 9) + m.x = Var(m.I, initialize=lambda m, i: i + 1) + m.P = Param(m.I, initialize=lambda m, i: 10 - i, mutable=True) t = IndexTemplate(m.I) - e = m.x[t+m.P[t+1]] + 3 + e = m.x[t + m.P[t + 1]] + 3 self.assertRaises(TemplateExpressionError, evaluate_expression, e) - self.assertRaises(TemplateExpressionError, evaluate_expression, e, constant=True) + self.assertRaises( + TemplateExpressionError, evaluate_expression, e, constant=True + ) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/test_xfrm_discrete_vars.py b/pyomo/core/tests/unit/test_xfrm_discrete_vars.py index 243ad1597bf..ae630586480 100644 --- a/pyomo/core/tests/unit/test_xfrm_discrete_vars.py +++ b/pyomo/core/tests/unit/test_xfrm_discrete_vars.py @@ -13,24 +13,35 @@ import pyomo.common.unittest as unittest -from pyomo.environ import ConcreteModel, Var, Constraint, Objective, Suffix, Binary, TransformationFactory, SolverFactory, Reals +from pyomo.environ import ( + ConcreteModel, + Var, + Constraint, + Objective, + Suffix, + Binary, + TransformationFactory, + SolverFactory, + Reals, +) from pyomo.opt import check_available_solvers solvers = check_available_solvers('cplex', 'gurobi', 'glpk') + def _generateModel(): model = ConcreteModel() model.x = Var(within=Binary) model.y = Var() model.c1 = Constraint(expr=model.y >= model.x) - model.c2 = Constraint(expr=model.y >= 1.5-model.x) + model.c2 = Constraint(expr=model.y >= 1.5 - model.x) model.obj = Objective(expr=model.y) model.dual = Suffix(direction=Suffix.IMPORT_EXPORT) return model -class Test(unittest.TestCase): - @unittest.skipIf( len(solvers) == 0, "LP/MIP solver not available") +class Test(unittest.TestCase): + @unittest.skipIf(len(solvers) == 0, "LP/MIP solver not available") def test_solve_relax_transform(self): s = SolverFactory(solvers[0]) m = _generateModel() @@ -49,8 +60,7 @@ def test_solve_relax_transform(self): self.assertAlmostEqual(m.dual[m.c1], -0.5, 4) self.assertAlmostEqual(m.dual[m.c2], -0.5, 4) - - @unittest.skipIf( len(solvers) == 0, "LP/MIP solver not available") + @unittest.skipIf(len(solvers) == 0, "LP/MIP solver not available") def test_solve_fix_transform(self): s = SolverFactory(solvers[0]) m = _generateModel() @@ -70,5 +80,6 @@ def test_solve_fix_transform(self): self.assertAlmostEqual(m.dual[m.c1], -1, 4) self.assertAlmostEqual(m.dual[m.c2], 0, 4) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/core/tests/unit/uninstantiated_model_linear.py b/pyomo/core/tests/unit/uninstantiated_model_linear.py index c7a0715ea89..387444b7bc5 100644 --- a/pyomo/core/tests/unit/uninstantiated_model_linear.py +++ b/pyomo/core/tests/unit/uninstantiated_model_linear.py @@ -13,13 +13,15 @@ model = AbstractModel() -model.indices = Set(initialize=[1,2]) +model.indices = Set(initialize=[1, 2]) model.p = Param(model.indices) model.x = Var(model.indices) -def objective_rule ( M ): + +def objective_rule(M): return sum([M.p[i] * M.x[i] for i in model.indices]) + model.objective = Objective(rule=objective_rule, sense=minimize) diff --git a/pyomo/core/tests/unit/uninstantiated_model_quadratic.py b/pyomo/core/tests/unit/uninstantiated_model_quadratic.py index 4540d9de4be..572c6a43a14 100644 --- a/pyomo/core/tests/unit/uninstantiated_model_quadratic.py +++ b/pyomo/core/tests/unit/uninstantiated_model_quadratic.py @@ -15,7 +15,9 @@ model.x = Var() -def objective_rule ( M ): - return M.x * M.x # should fail "gracefully" + +def objective_rule(M): + return M.x * M.x # should fail "gracefully" + model.objective = Objective(rule=objective_rule, sense=minimize) diff --git a/pyomo/core/util.py b/pyomo/core/util.py index 9d9adcff667..3f8a136e07d 100644 --- a/pyomo/core/util.py +++ b/pyomo/core/util.py @@ -13,17 +13,32 @@ # Utility functions # -__all__ = ['sum_product', 'summation', 'dot_product', 'sequence', 'prod', 'quicksum', 'target_list'] - +__all__ = [ + 'sum_product', + 'summation', + 'dot_product', + 'sequence', + 'prod', + 'quicksum', + 'target_list', +] + +from pyomo.common.deprecation import deprecation_warning from pyomo.core.expr.numvalue import native_numeric_types -from pyomo.core.expr.numeric_expr import decompose_term -from pyomo.core.expr import current as EXPR +from pyomo.core.expr.numeric_expr import ( + mutable_expression, + nonlinear_expression, + NPV_SumExpression, +) +import pyomo.core.expr as EXPR from pyomo.core.base.var import Var from pyomo.core.base.expression import Expression from pyomo.core.base.component import _ComponentBase import logging + logger = logging.getLogger(__name__) + def prod(terms): """ A utility function to compute the product of a list of terms. @@ -41,96 +56,69 @@ def prod(terms): def quicksum(args, start=0, linear=None): - """ - A utility function to compute a sum of Pyomo expressions. + """A utility function to compute a sum of Pyomo expressions. - The behavior of :func:`quicksum` is similar to the builtin :func:`sum` - function, but this function generates a more compact Pyomo - expression. + The behavior of :func:`quicksum` is similar to the builtin + :func:`sum` function, but this function can avoid the generation and + disposal of intermediate objects, and thus is slightly more + performant. - Args: - args: A generator for terms in the sum. - - start: A value that is initializes the sum. If - this value is not a numeric constant, then the += - operator is used to add terms to this object. - Defaults to zero. - - linear: If :attr:`start` is not a numeric constant, then this - option is ignored. Otherwise, this value indicates - whether the terms in the sum are linear. If the value - is :const:`False`, then the terms are - treated as nonlinear, and if :const:`True`, then - the terms are treated as linear. Default is - :const:`None`, which indicates that the first term - in the :attr:`args` is used to determine this value. + Parameters + ---------- + args: Iterable + A generator for terms in the sum. + + start: Any + A value that initializes the sum. If this value is not a + numeric constant, then the += operator is used to add terms to + this object. Defaults to 0. + + linear: bool + DEPRECATED: the linearity of the resulting expression is + determined automatically. This option is ignored. + + Returns + ------- + The value of the sum, which may be a Pyomo expression object. - Returns: - The value of the sum, which may be a Pyomo expression object. """ - # Ensure that args is an iterator (this manages things like IndexedComponent_slice objects) + # Ensure that args is an iterator (this manages things like + # IndexedComponent_slice objects) try: args = iter(args) except: logger.error('The argument `args` to quicksum() is not iterable!') raise + if linear is not None: + deprecation_warning( + "The quicksum(linear=...) argument is deprecated and ignored.", + version='6.6.0', + ) + # - # If we're starting with a numeric value, then - # create a new nonlinear sum expression but + # If we're starting with a numeric value, then + # create a new nonlinear sum expression but # return a static version to the user. # if start.__class__ in native_numeric_types: - if linear is None: - # - # Get the first term, which we will test for linearity - # - first = next(args, None) - if first is None: - return start - # - # Check if the first term is linear, and if so return the terms - # - linear, terms = decompose_term(first) - # - # Right now Pyomo5 expressions can only handle single linear - # terms. - # - # Also, we treat linear expressions as nonlinear if the constant - # term is not a native numeric type. Otherwise, large summation - # objects are created for the constant term. - # - if linear: - nvar=0 - for term in terms: - c,v = term - if not v is None: - nvar += 1 - elif not c.__class__ in native_numeric_types: - linear = False - if nvar > 1: - linear = False - start = start+first - if linear: - with EXPR.linear_expression() as e: - e += start - for arg in args: - e += arg - # Return the constant term if the linear expression does not contains variables - if e.is_constant(): - return e.constant + with mutable_expression() as e: + e += start + for arg in args: + e += arg + # Special case: reduce NPV sums of native types to a single + # constant + if e.__class__ is NPV_SumExpression and all( + arg.__class__ in native_numeric_types for arg in e.args + ): + return e() + if e.nargs() > 1: return e + elif not e.nargs(): + return 0 else: - with EXPR.nonlinear_expression() as e: - e += start - for arg in args: - e += arg - if e.nargs() == 0: - return 0 - elif e.nargs() == 1: - return e.arg(0) - return e + return e.arg(0) # # Otherwise, use the context that is provided and return it. # @@ -142,7 +130,7 @@ def quicksum(args, start=0, linear=None): def sum_product(*args, **kwds): """ - A utility function to compute a generalized dot product. + A utility function to compute a generalized dot product. This function accepts one or more components that provide terms that are multiplied together. These products are added together @@ -163,90 +151,76 @@ def sum_product(*args, **kwds): Returns: The value of the sum. """ - denom = kwds.pop('denom', tuple() ) + denom = kwds.pop('denom', tuple()) if type(denom) not in (list, tuple): denom = [denom] nargs = len(args) ndenom = len(denom) if nargs == 0 and ndenom == 0: - raise ValueError("The sum_product() command requires at least an " + \ - "argument or a denominator term") + raise ValueError( + "The sum_product() command requires at least an " + + "argument or a denominator term" + ) if 'index' in kwds: - index=kwds['index'] + index = kwds['index'] else: if nargs > 0: - iarg=args[-1] - if not isinstance(iarg,Var) and not isinstance(iarg, Expression): - raise ValueError("Error executing sum_product(): The last argument value must be a variable or expression object if no 'index' option is specified") + iarg = args[-1] + if not isinstance(iarg, Var) and not isinstance(iarg, Expression): + raise ValueError( + "Error executing sum_product(): The last argument value must be a variable or expression object if no 'index' option is specified" + ) else: - iarg=denom[-1] - if not isinstance(iarg,Var) and not isinstance(iarg, Expression): - raise ValueError("Error executing sum_product(): The last denom argument value must be a variable or expression object if no 'index' option is specified") + iarg = denom[-1] + if not isinstance(iarg, Var) and not isinstance(iarg, Expression): + raise ValueError( + "Error executing sum_product(): The last denom argument value must be a variable or expression object if no 'index' option is specified" + ) index = iarg.index_set() start = kwds.get("start", 0) - vars_ = [] - params_ = [] - for arg in args: - if isinstance(arg, Var): - vars_.append(arg) - else: - params_.append(arg) - nvars = len(vars_) - num_index = range(0,nargs) if ndenom == 0: # # Sum of polynomial terms # - if start.__class__ in native_numeric_types: - if nvars == 1: - v = vars_[0] - if len(params_) == 0: - with EXPR.linear_expression() as expr: - expr += start - for i in index: - expr += v[i] - elif len(params_) == 1: - p = params_[0] - with EXPR.linear_expression() as expr: - expr += start - for i in index: - expr += p[i]*v[i] - else: - with EXPR.linear_expression() as expr: - expr += start - for i in index: - term = 1 - for j in params_: - term *= params_[j][i] - expr += term * v[i] - return expr - # - with EXPR.nonlinear_expression() as expr: - expr += start + with mutable_expression() as expr: + expr += start + if nargs == 1: + arg1 = args[0] + for i in index: + expr += arg1[i] + elif nargs == 2: + arg1, arg2 = args for i in index: - term = 1 - for j in num_index: - term *= args[j][i] - expr += term + expr += arg1[i] * arg2[i] + else: + for i in index: + expr += prod(arg[i] for arg in args) + if expr.nargs() > 1: return expr - # - return quicksum((prod(args[j][i] for j in num_index) for i in index), start) + elif not expr.nargs(): + return 0 + else: + return expr.arg(0) elif nargs == 0: # # Sum of reciprocals # - denom_index = range(0,ndenom) - return quicksum((1/prod(denom[j][i] for j in denom_index) for i in index), start) + return quicksum((1 / prod(den[i] for den in denom) for i in index), start) else: # # Sum of fractions # - denom_index = range(0,ndenom) - return quicksum((prod(args[j][i] for j in num_index)/prod(denom[j][i] for j in denom_index) for i in index), start) + return quicksum( + ( + prod(arg[i] for arg in args) / prod(den[i] for den in denom) + for i in index + ), + start, + ) #: An alias for :func:`sum_product ` @@ -261,9 +235,9 @@ def sequence(*args): sequence([start,] stop[, step]) -> generator for a list of integers Return a generator that containing an arithmetic - progression of integers. - sequence(i, j) returns [i, i+1, i+2, ..., j]; - start defaults to 1. + progression of integers. + sequence(i, j) returns [i, i+1, i+2, ..., j]; + start defaults to 1. step specifies the increment (or decrement) For example, sequence(4) returns [1, 2, 3, 4]. """ @@ -272,14 +246,15 @@ def sequence(*args): if len(args) > 3: raise ValueError('sequence expected at most 3 arguments, got %d' % len(args)) if len(args) == 1: - return range(1,args[0]+1) + return range(1, args[0] + 1) if len(args) == 2: - return range(args[0],args[1]+1) - return range(args[0],args[1]+1,args[2]) + return range(args[0], args[1] + 1) + return range(args[0], args[1] + 1, args[2]) + def target_list(x): if isinstance(x, _ComponentBase): - return [ x ] + return [x] elif hasattr(x, '__iter__'): ans = [] for i in x: @@ -288,9 +263,10 @@ def target_list(x): else: raise ValueError( "Expected Component or list of Components." - "\n\tReceived %s" % (type(i),)) + "\n\tReceived %s" % (type(i),) + ) return ans else: raise ValueError( - "Expected Component or list of Components." - "\n\tReceived %s" % (type(x),)) \ No newline at end of file + "Expected Component or list of Components.\n\tReceived %s" % (type(x),) + ) diff --git a/pyomo/dae/__init__.py b/pyomo/dae/__init__.py index 801e96eb229..8d07b184336 100644 --- a/pyomo/dae/__init__.py +++ b/pyomo/dae/__init__.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# Import the key modeling componente here... +# Import the key modeling components here... from pyomo.dae.contset import ContinuousSet from pyomo.dae.diffvar import DAE_Error, DerivativeVar diff --git a/pyomo/dae/contset.py b/pyomo/dae/contset.py index 7103765ad5f..ee4c9f79e89 100644 --- a/pyomo/dae/contset.py +++ b/pyomo/dae/contset.py @@ -11,78 +11,81 @@ import logging import bisect +from pyomo.common.numeric_types import native_numeric_types from pyomo.common.timing import ConstructionTimer from pyomo.core.base.set import SortedScalarSet from pyomo.core.base.component import ModelComponentFactory -from pyomo.core.base.numvalue import native_numeric_types logger = logging.getLogger('pyomo.dae') __all__ = ['ContinuousSet'] @ModelComponentFactory.register( - "A bounded continuous numerical range optionally containing" - " discrete points of interest.") + "A bounded continuous numerical range optionally containing" + " discrete points of interest." +) class ContinuousSet(SortedScalarSet): - """ Represents a bounded continuous domain - - Minimally, this set must contain two numeric values defining the - bounds of a continuous range. Discrete points of interest may - be added to the continuous set. A continuous set is one - dimensional and may only contain numerical values. - - Parameters - ---------- - initialize : `list` - Default discretization points to be included - - bounds : `tuple` - The bounding points for the continuous domain. The bounds will - be included as discrete points in the :py:class:`ContinuousSet` - and will be used to bound the points added to the - :py:class:`ContinuousSet` through the 'initialize' argument, - a data file, or the add() method - - Attributes - ---------- - _changed : `boolean` - This keeps track of whether or not the ContinuousSet was changed - during discretization. If the user specifies all of the needed - discretization points before the discretization then there is no - need to go back through the model and reconstruct things indexed - by the :py:class:`ContinuousSet` - - _fe : `list` - This is a sorted list of the finite element points in the - :py:class:`ContinuousSet`. i.e. this list contains all the - discrete points in the :py:class:`ContinuousSet` that are not - collocation points. Points that are both finite element points - and collocation points will be included in this list. - - _discretization_info : `dict` - This is a dictionary which contains information on the - discretization transformation which has been applied to the - :py:class:`ContinuousSet`. + """Represents a bounded continuous domain + + Minimally, this set must contain two numeric values defining the + bounds of a continuous range. Discrete points of interest may + be added to the continuous set. A continuous set is one + dimensional and may only contain numerical values. + + Parameters + ---------- + initialize : `list` + Default discretization points to be included + + bounds : `tuple` + The bounding points for the continuous domain. The bounds will + be included as discrete points in the :py:class:`ContinuousSet` + and will be used to bound the points added to the + :py:class:`ContinuousSet` through the 'initialize' argument, + a data file, or the add() method + + Attributes + ---------- + _changed : `boolean` + This keeps track of whether or not the ContinuousSet was changed + during discretization. If the user specifies all of the needed + discretization points before the discretization then there is no + need to go back through the model and reconstruct things indexed + by the :py:class:`ContinuousSet` + + _fe : `list` + This is a sorted list of the finite element points in the + :py:class:`ContinuousSet`. i.e. this list contains all the + discrete points in the :py:class:`ContinuousSet` that are not + collocation points. Points that are both finite element points + and collocation points will be included in this list. + + _discretization_info : `dict` + This is a dictionary which contains information on the + discretization transformation which has been applied to the + :py:class:`ContinuousSet`. """ def __init__(self, *args, **kwds): - """ Constructor """ + """Constructor""" if kwds.pop("filter", None) is not None: - raise TypeError("'filter' is not a valid keyword argument for " - "ContinuousSet") + raise TypeError( + "'filter' is not a valid keyword argument for ContinuousSet" + ) # if kwds.pop("within", None) is not None: # raise TypeError("'within' is not a valid keyword argument for " # ContinuousSet") kwds.setdefault('dimen', 1) if kwds["dimen"] != 1: - raise TypeError("'dimen' is not a valid keyword argument for " - "ContinuousSet") + raise TypeError("'dimen' is not a valid keyword argument for ContinuousSet") if kwds.pop("virtual", None) is not None: - raise TypeError("'virtual' is not a valid keyword argument for " - "ContinuousSet") + raise TypeError( + "'virtual' is not a valid keyword argument for ContinuousSet" + ) if kwds.pop("validate", None) is not None: - raise TypeError("'validate' is not a valid keyword argument for " - "ContinuousSet") + raise TypeError( + "'validate' is not a valid keyword argument for ContinuousSet" + ) if len(args) != 0: raise TypeError("A ContinuousSet expects no arguments") @@ -93,7 +96,7 @@ def __init__(self, *args, **kwds): super(ContinuousSet, self).__init__(**kwds) def get_finite_elements(self): - """ Returns the finite element points + """Returns the finite element points If the :py:class:`ContinuousSet ` has been discretizaed using a collocation scheme, this method will return a @@ -121,7 +124,7 @@ def get_discretization_info(self): return self._discretization_info def get_changed(self): - """ Returns flag indicating if the :py:class:`ContinuousSet` was + """Returns flag indicating if the :py:class:`ContinuousSet` was changed during discretization Returns "True" if additional points were added to the @@ -135,7 +138,7 @@ def get_changed(self): return self._changed def set_changed(self, newvalue): - """ Sets the ``_changed`` flag to 'newvalue' + """Sets the ``_changed`` flag to 'newvalue' Parameters ---------- @@ -144,12 +147,14 @@ def set_changed(self, newvalue): """ # TODO: Check this if-statement if newvalue is not True and newvalue is not False: - raise ValueError("The _changed attribute on a ContinuousSet may " - "only be set to True or False") + raise ValueError( + "The _changed attribute on a ContinuousSet may " + "only be set to True or False" + ) self._changed = newvalue def get_upper_element_boundary(self, point): - """ Returns the first finite element point that is greater or equal + """Returns the first finite element point that is greater or equal to 'point' Parameters @@ -163,9 +168,11 @@ def get_upper_element_boundary(self, point): if point in self._fe: return point elif point > max(self._fe): - logger.warning("The point '%s' exceeds the upper bound " - "of the ContinuousSet '%s'. Returning the upper bound" - % (str(point), self.name)) + logger.warning( + "The point '%s' exceeds the upper bound " + "of the ContinuousSet '%s'. Returning the upper bound" + % (str(point), self.name) + ) return max(self._fe) else: for i in self._fe: @@ -174,7 +181,7 @@ def get_upper_element_boundary(self, point): return i def get_lower_element_boundary(self, point): - """ Returns the first finite element point that is less than or + """Returns the first finite element point that is less than or equal to 'point' Parameters @@ -196,9 +203,11 @@ def get_lower_element_boundary(self, point): return self._fe[tmp - 1] return point elif point < min(self._fe): - logger.warning("The point '%s' is less than the lower bound " - "of the ContinuousSet '%s'. Returning the lower bound " - % (str(point), self.name)) + logger.warning( + "The point '%s' is less than the lower bound " + "of the ContinuousSet '%s'. Returning the lower bound " + % (str(point), self.name) + ) return min(self._fe) else: rev_fe = list(self._fe) @@ -208,9 +217,7 @@ def get_lower_element_boundary(self, point): return i def construct(self, values=None): - """ Constructs a :py:class:`ContinuousSet` component - - """ + """Constructs a :py:class:`ContinuousSet` component""" if self._constructed: return timer = ConstructionTimer(self) @@ -220,8 +227,7 @@ def construct(self, values=None): if type(val) is tuple: raise ValueError("ContinuousSet cannot contain tuples") if val.__class__ not in native_numeric_types: - raise ValueError("ContinuousSet can only contain numeric " - "values") + raise ValueError("ContinuousSet can only contain numeric values") # TBD: If a user specifies bounds they will be added to the set # unless the user specified bounds have been overwritten during @@ -236,20 +242,24 @@ def construct(self, values=None): self.add(bnd) if None in self.bounds(): - raise ValueError("ContinuousSet '%s' must have at least two values" - " indicating the range over which a differential " - "equation is to be discretized" % self.name) + raise ValueError( + "ContinuousSet '%s' must have at least two values" + " indicating the range over which a differential " + "equation is to be discretized" % self.name + ) if len(self) < 2: # (reachable if lb==ub) - raise ValueError("ContinuousSet '%s' must have at least two values" - " indicating the range over which a differential " - "equation is to be discretized" % self.name) + raise ValueError( + "ContinuousSet '%s' must have at least two values" + " indicating the range over which a differential " + "equation is to be discretized" % self.name + ) self._fe = list(self) timer.report() def find_nearest_index(self, target, tolerance=None): - """ Returns the index of the nearest point in the + """Returns the index of the nearest point in the :py:class:`ContinuousSet `. If a tolerance is specified, the index will only be returned @@ -264,14 +274,14 @@ def find_nearest_index(self, target, tolerance=None): Returns ------- - `float` or `None` + `float` or `None` """ lo = 0 hi = len(self) arr = list(self) i = bisect.bisect_right(arr, target, lo=lo, hi=hi) # i is the index at which target should be inserted if it is to be - # right of any equal components. + # right of any equal components. if i == lo: # target is less than every entry of the set @@ -288,7 +298,7 @@ def find_nearest_index(self, target, tolerance=None): # delta = min(delta_left, delta_right) # Tie goes to the index on the left. delta, nearest_index = min( - (abs(target - self.at(j)), j) for j in [i, i+1] + (abs(target - self.at(j)), j) for j in [i, i + 1] ) if tolerance is not None: diff --git a/pyomo/dae/diffvar.py b/pyomo/dae/diffvar.py index bfe8a74342e..8d75b9ae148 100644 --- a/pyomo/dae/diffvar.py +++ b/pyomo/dae/diffvar.py @@ -16,7 +16,7 @@ from pyomo.core.base.var import Var from pyomo.dae.contset import ContinuousSet -__all__ = ('DerivativeVar', 'DAE_Error',) +__all__ = ('DerivativeVar', 'DAE_Error') def create_access_function(var): @@ -24,8 +24,10 @@ def create_access_function(var): This method returns a function that returns a component by calling it rather than indexing it """ + def _fun(*args): return var[args] + return _fun @@ -72,16 +74,17 @@ class DerivativeVar(Var): # linking the :class:`DerivativeVar` to its state :class:`Var`. def __init__(self, sVar, **kwds): - if not isinstance(sVar, Var): raise DAE_Error( "%s is not a variable. Can only take the derivative of a Var" - "component." % sVar) + "component." % sVar + ) if "wrt" in kwds and "withrespectto" in kwds: raise TypeError( "Cannot specify both 'wrt' and 'withrespectto keywords " - "in a DerivativeVar") + "in a DerivativeVar" + ) wrt = kwds.pop('wrt', None) wrt = kwds.pop('withrespectto', wrt) @@ -107,14 +110,15 @@ def __init__(self, sVar, **kwds): raise DAE_Error( "The variable %s is indexed by a Set (%s) with a " "non-fixed dimension. A DerivativeVar may only be " - "indexed by Sets with constant dimension" - % (sVar, s.name)) + "indexed by Sets with constant dimension" % (sVar, s.name) + ) elif _dim is UnknownSetDimen: raise DAE_Error( "The variable %s is indexed by a Set (%s) with an " "unknown dimension. A DerivativeVar may only be " "indexed by Sets with known constant dimension" - % (sVar, s.name)) + % (sVar, s.name) + ) loc += s.dimen num_contset = len(sVar._contset) @@ -122,7 +126,8 @@ def __init__(self, sVar, **kwds): raise DAE_Error( "The variable %s is not indexed by any ContinuousSets. A " "derivative may only be taken with respect to a continuous " - "domain" % sVar) + "domain" % sVar + ) if wrt is None: # Check to be sure Var is indexed by single ContinuousSet and take @@ -131,30 +136,35 @@ def __init__(self, sVar, **kwds): raise DAE_Error( "The variable %s is indexed by multiple ContinuousSets. " "The desired ContinuousSet must be specified using the " - "keyword argument 'wrt'" % sVar) - wrt = [next(iter(sVar._contset.keys())), ] + "keyword argument 'wrt'" % sVar + ) + wrt = [next(iter(sVar._contset.keys()))] elif type(wrt) is ContinuousSet: if wrt not in sVar._contset: raise DAE_Error( "Invalid derivative: The variable %s is not indexed by " - "the ContinuousSet %s" % (sVar, wrt)) - wrt = [wrt, ] + "the ContinuousSet %s" % (sVar, wrt) + ) + wrt = [wrt] elif type(wrt) is tuple or type(wrt) is list: for i in wrt: if type(i) is not ContinuousSet: raise DAE_Error( "Cannot take the derivative with respect to %s. " "Expected a ContinuousSet or a tuple of " - "ContinuousSets" % i) + "ContinuousSets" % i + ) if i not in sVar._contset: raise DAE_Error( "Invalid derivative: The variable %s is not indexed " - "by the ContinuousSet %s" % (sVar, i)) + "by the ContinuousSet %s" % (sVar, i) + ) wrt = list(wrt) else: raise DAE_Error( "Cannot take the derivative with respect to %s. " - "Expected a ContinuousSet or a tuple of ContinuousSets" % i) + "Expected a ContinuousSet or a tuple of ContinuousSets" % i + ) wrtkey = [str(i) for i in wrt] wrtkey.sort() @@ -164,7 +174,8 @@ def __init__(self, sVar, **kwds): raise DAE_Error( "Cannot create a new derivative variable for variable " "%s: derivative already defined as %s" - % (sVar.name, sVar._derivative[wrtkey]().name)) + % (sVar.name, sVar._derivative[wrtkey]().name) + ) sVar._derivative[wrtkey] = weakref.ref(self) self._sVar = sVar @@ -172,11 +183,10 @@ def __init__(self, sVar, **kwds): kwds.setdefault('ctype', DerivativeVar) - Var.__init__(self,sVar.index_set(),**kwds) - + Var.__init__(self, sVar.index_set(), **kwds) def get_continuousset_list(self): - """ Return the a list of :py:class:`ContinuousSet` components the + """Return the a list of :py:class:`ContinuousSet` components the derivative is being taken with respect to. Returns @@ -201,7 +211,7 @@ def is_fully_discretized(self): return True def get_state_var(self): - """ Return the :py:class:`Var` that is being differentiated. + """Return the :py:class:`Var` that is being differentiated. Returns ------- @@ -224,9 +234,8 @@ def get_derivative_expression(self): return self._expr def set_derivative_expression(self, expr): - """ Sets``_expr``, an expression representing the discretization + """Sets``_expr``, an expression representing the discretization equations linking the :class:`DerivativeVar` to its state :class:`Var` """ self._expr = expr - diff --git a/pyomo/dae/flatten.py b/pyomo/dae/flatten.py index 922ffa90d18..595f90b3dc7 100644 --- a/pyomo/dae/flatten.py +++ b/pyomo/dae/flatten.py @@ -8,14 +8,17 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +"""A module for "flattening" the components in a block-hierarchical model +with respect to common indexing sets + +""" + from pyomo.core.base import Block, Reference from pyomo.common.collections import ComponentSet, ComponentMap from pyomo.core.base.block import SubclassOf from pyomo.core.base.set import SetProduct -from pyomo.core.base.indexed_component import ( - UnindexedComponent_set, - normalize_index, - ) +from pyomo.core.base.indexed_component import UnindexedComponent_set, normalize_index +from pyomo.core.base.component import ActiveComponent from pyomo.core.base.indexed_component_slice import IndexedComponent_slice from collections import OrderedDict @@ -31,7 +34,7 @@ def get_slice_for_set(s): if s.dimen is not None: # We will arrive here and fail for sets of dimension # UnknownSetDimen. - return (slice(None),)*s.dimen + return (slice(None),) * s.dimen else: return (Ellipsis,) else: @@ -41,11 +44,12 @@ def get_slice_for_set(s): class _NotAnIndex(object): - """ - `None` is a valid index, so we use a dummy class to + """ + `None` is a valid index, so we use a dummy class to denote a slot that needs to get filled with indices from our product. """ + pass @@ -72,7 +76,7 @@ def _fill_indices(filled_index, index): def _fill_indices_from_product(partial_index_list, product): - """ + """ `partial_index_list` is a list of indices, each corresponding to a set. If an entry in `partial_index_list` is `_NotAnIndex`, that slot will get filled in by an entry from `product`. @@ -123,11 +127,8 @@ def _fill_indices_from_product(partial_index_list, product): normalize_index.flatten = _normalize_index_flatten -def slice_component_along_sets( - component, sets, context_slice=None, normalize=None, - ): - """ - This function generates all possible slices of the provided component +def slice_component_along_sets(component, sets, context_slice=None, normalize=None): + """This function generates all possible slices of the provided component along the provided sets. That is, it will iterate over the component's other indexing sets and, for each index, yield a slice along the sets specified in the call signature. @@ -156,8 +157,7 @@ def slice_component_along_sets( """ set_set = ComponentSet(sets) subsets = list(component.index_set().subsets()) - temp_idx = [get_slice_for_set(s) if s in set_set else _NotAnIndex - for s in subsets] + temp_idx = [get_slice_for_set(s) if s in set_set else _NotAnIndex for s in subsets] other_sets = [s for s in subsets if s not in set_set] if context_slice is None: @@ -179,10 +179,7 @@ def slice_component_along_sets( # singleton to work in the embedded call to _fill_indices. cross_prod = [tuple()] - for prod_index, new_index in _fill_indices_from_product( - temp_idx, - cross_prod, - ): + for prod_index, new_index in _fill_indices_from_product(temp_idx, cross_prod): try: if normalize_index.flatten: # This index is always normalized if normalize_index.flatten @@ -205,14 +202,13 @@ def slice_component_along_sets( # We enter this loop even if no sets need slicing. temp_slice = c_slice.duplicate() next(iter(temp_slice)) - if ((normalize is None and normalize_index.flatten) - or normalize): + if (normalize is None and normalize_index.flatten) or normalize: # Most users probably want this index to be normalized, # so they can more conveniently use it as a key in a # mapping. (E.g. they will get "a" as opposed to ("a",).) # However, to use it in the calling routine # generate_sliced_components, we need this index to not - # have been normalized, so that indices are tuples, + # have been normalized, so that indices are tuples, # partitioned according to their "factor sets." # This is why we allow the argument normalize=False to # override normalize_index.flatten. @@ -234,34 +230,75 @@ def slice_component_along_sets( yield (), c_slice -def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): - """ - Recursively generate sliced components of a block and its subblocks, along - with the sets that were sliced for each component. +def generate_sliced_components( + b, index_stack, slice_, sets, ctype, index_map, active=None +): + """Recursively generate slices of the specified ctype along the + specified sets + + Parameters + ---------- + + b: _BlockData + Block whose components will be sliced + + index_stack: list + Sets above ``b`` in the block hierarchy, including on its parent + component, that have been sliced. This is necessary to return the + sets that have been sliced. + + slice_: IndexedComponent_slice or _BlockData + Slice generated so far. This function will yield extensions to + this slice at the current level of the block hierarchy. - `b` is a _BlockData object. + sets: ComponentSet of Pyomo sets + Sets that will be sliced - `index_stack` is a list of indices "above" `b` in the - hierarchy. Note that `b` is a data object, so any index - of its parent component should be included in the stack. + ctype: Subclass of Component + Type of components to generate - `slice_` is the slice generated so far. Our goal here is to - yield extensions to `slice_` at this level of the hierarchy. + index_map: ComponentMap + Map from (some of) the specified sets to a "representative index" + to use when descending into subblocks. While this map does not need + to contain every set in the sliced sets, it must not contain any + sets that will not be sliced. - `sets` is a ComponentSet of Pyomo sets that should be sliced. + active: Bool or None + If not None, this is a boolean flag used to filter component objects + by their active status. - `ctype` is the type we are looking for. + Yields + ------ + + Tuple of Sets and an IndexedComponent_slice or ComponentData + The sets indexing the returned component or slice. If the component + is indexed, an IndexedComponent_slice is returned. Otherwise, a + ComponentData is returned. - `index_map` is potentially a map from each set in `sets` to a - "representative index" to use when descending into subblocks. """ if type(slice_) is IndexedComponent_slice: context_slice = slice_.duplicate() else: context_slice = None - # Looks for components indexed by these sets immediately in our block - for c in b.component_objects(ctype, descend_into=False): + # If active argument is specified and does not match the block's + # active flag, we return immediately. This matches the behavior of + # component_objects. We only need this check as we may modify the + # active argument sent to component_objects if ctype is not an + # ActiveComponent type. + if active is not None and active != b.active: + return + + # Define this class so we don't have to call issubclass again later. + check_active = issubclass(ctype, ActiveComponent) and (active != None) + + # If active=False and ctype is not an ActiveComponent (e.g. it is Var) + # we will not generate any components. To prevent this, only pass the + # active argument if we are looking for active components. + c_active = active if check_active else None + + # Looks for components indexed by specified sets immediately in our block. + for c in b.component_objects(ctype, descend_into=False, active=c_active): subsets = list(c.index_set().subsets()) new_sets = [s for s in subsets if s in sets] # Extend our "index stack" @@ -269,12 +306,31 @@ def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): # Extend our slice with this component for idx, new_slice in slice_component_along_sets( - c, sets, context_slice=context_slice, normalize=False - ): - yield sliced_sets, new_slice + c, sets, context_slice=context_slice, normalize=False + ): + # If we have to check activity, check data objects defined by + # slice. If any match, we yield the slice. This is done for + # compatibility with the behavior when slicing blocks, where + # we can only descend into a block that matches our active flag. + # + # Note that new_slice can be a data object. This happens if the + # component doesn't contain any sets we are slicing, i.e. new_sets + # is empty. + if ( + # Yield if (a) we're not checking activity + not check_active + # or (b) we have not sliced and data object activity matches + or (not sliced_sets and new_slice.active == c_active) + # or (c) we did slice and *any* data object activity matches + or ( + sliced_sets + and any(data.active == c_active for data in new_slice.duplicate()) + ) + ): + yield sliced_sets, new_slice # We now descend into subblocks - for sub in b.component_objects(Block, descend_into=False): + for sub in b.component_objects(Block, descend_into=False, active=active): subsets = list(sub.index_set().subsets()) new_sets = [s for s in subsets if s in sets] @@ -283,11 +339,20 @@ def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): # Need to construct an index to descend into for each slice-of-block # we are about generate. + # Note that any remaining _NotAnIndex placeholders after this loop + # will be replaced with the corresponding indices of the non-sliced + # sets. given_descend_idx = [_NotAnIndex for _ in subsets] for i, s in enumerate(subsets): + # NOTE: index_map better only contain sets that we are slicing. if s in index_map: - # Use a user-given index if available + # Use a user-given index if available. given_descend_idx[i] = index_map[s] + if s not in sets: + raise RuntimeError( + "Encountered a specified index for a set %s that we" + " are not slicing. This is not supported" % s + ) elif s in sets: # Otherwise use a slice. We will advanced the slice iter # to try to get a concrete component from this slice. @@ -295,8 +360,10 @@ def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): # Generate slices from this sub-block for idx, new_slice in slice_component_along_sets( - sub, sets, context_slice=context_slice, normalize=False - ): + sub, sets, context_slice=context_slice, normalize=False + ): + # TODO: Can this branch happen outside of the loop? + # If it's not indexed, we don't need to slice... if sub.is_indexed(): # fill any remaining placeholders with the "index" of our slice descend_idx = _fill_indices(list(given_descend_idx), idx) @@ -304,20 +371,42 @@ def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): descend_data = sub[descend_idx] if type(descend_data) is IndexedComponent_slice: try: - # Attempt to find a data object matching this slice - descend_data = next(iter(descend_data)) + slice_iter = iter(descend_data) + # Try to find a data object defined by the slice + # that matches the active argument. In doing so, + # we treat a slice as inactive if all of its data + # objects are inactive. We need to find a data obj + # with the correct active flag, otherwise we run into + # problems when we descend (component_objects will + # not yield anything). + _data = next(slice_iter) + while active is not None and _data.active != active: + _data = next(slice_iter) + descend_data = _data except StopIteration: - # For this particular idx (and given indices), no - # block data object exists to descend into. - # Not sure if we should raise an error here... -RBP + # For this particular idx, we have no BlockData + # to descend into. continue + elif active is not None and descend_data.active != active: + # descend_data is a BlockData object. This particular + # BlockData was specified by the index map. In this case, + # we want to respect "activity". + continue else: + # Have encountered a ScalarBlock. Do not need to check the + # active flag as this came straight from component_objects. descend_data = sub - + # Recursively generate sliced components from this data object for st, v in generate_sliced_components( - descend_data, index_stack, new_slice, sets, ctype, index_map - ): + descend_data, + index_stack, + new_slice, + sets, + ctype, + index_map, + active=active, + ): yield tuple(st), v # pop the index sets of the block whose sub-components @@ -326,25 +415,48 @@ def generate_sliced_components(b, index_stack, slice_, sets, ctype, index_map): index_stack.pop() -def flatten_components_along_sets(m, sets, ctype, indices=None): - """ - This function iterates over components (recursively) contained +def flatten_components_along_sets(m, sets, ctype, indices=None, active=None): + """This function iterates over components (recursively) contained in a block and partitions their data objects into components indexed only by the specified sets. - Args: - m : Block whose components (and their sub-components) will be - partitioned - sets : Possible indexing sets for the returned components - ctype : Type of component to identify and partition - indices : indices of sets to use when descending into subblocks - - Returns: - tuple: The first entry is a list of tuples of Pyomo Sets. The - second is a list of lists of components, each indexed by - the corresponding sets in the first entry. - + Parameters + ---------- + + m: _BlockData + Block whose components (and their sub-components) will be + partitioned + + sets: Tuple of Pyomo Sets + Sets to be sliced. Returned components will be indexed by + some combination of these sets, if at all. + + ctype: Subclass of Component + Type of component to identify and partition + + indices: Iterable or ComponentMap + Indices of sets to use when descending into subblocks. If an + iterable is provided, the order corresponds to the order in + ``sets``. If a ``ComponentMap`` is provided, the keys must be + in ``sets``. + + active: Bool or None + If not None, this is a boolean flag used to filter component objects + by their active status. A reference-to-slice is returned if any data + object defined by the slice matches this flag. + + Returns + ------- + + List of tuples of Sets, list of lists of Components + The first entry is a list of tuples of Pyomo Sets. The second is a + list of lists of Components, indexed by the corresponding sets in + the first list. If the components are unindexed, ComponentData are + returned and the tuple of sets contains only UnindexedComponent_set. + If the components are indexed, they are references-to-slices. + """ + set_of_sets = ComponentSet(sets) if indices is None: index_map = ComponentMap() elif type(indices) is ComponentMap: @@ -352,20 +464,26 @@ def flatten_components_along_sets(m, sets, ctype, indices=None): else: index_map = ComponentMap(zip(sets, indices)) for s, idx in index_map.items(): - if not idx in s: + if idx not in s: raise ValueError( "%s is a bad index for set %s. \nPlease provide an index " "that is in the set." % (idx, s.name) ) + if s not in set_of_sets: + raise RuntimeError( + "Index specified for set %s that is not one of the sets" + " that will be sliced. Indices should only be provided" + " for sets that will be sliced." % s.name + ) index_stack = [] - set_of_sets = ComponentSet(sets) # Using these two `OrderedDict`s is a workaround because I can't # reliably use tuples of components as keys in a `ComponentMap`. sets_dict = OrderedDict() comps_dict = OrderedDict() - for index_sets, slice_ in generate_sliced_components(m, index_stack, - m, set_of_sets, ctype, index_map): + for index_sets, slice_ in generate_sliced_components( + m, index_stack, m, set_of_sets, ctype, index_map, active=active + ): # Note that index_sets should always be a tuple, never a scalar. # TODO: Potentially re-order sets at this point. @@ -401,10 +519,44 @@ def flatten_components_along_sets(m, sets, ctype, indices=None): return sets_list, comps_list -def flatten_dae_components(model, time, ctype, indices=None): +def flatten_dae_components(model, time, ctype, indices=None, active=None): + """Partitions components into ComponentData and Components indexed only + by the provided set. + + Parameters + ---------- + + model: _BlockData + Block whose components are partitioned + + time: Set + Indexing by this set (and only this set) will be preserved in the + returned components. + + ctype: Subclass of Component + Type of component to identify, partition, and return + + indices: Tuple or ComponentMap + Contains the index of the specified set to be used when descending + into blocks + + active: Bool or None + If provided, used as a filter to only return components with the + specified active flag. A reference-to-slice is returned if any + data object defined by the slice matches this flag. + + Returns + ------- + List of ComponentData, list of Component + The first list contains ComponentData for all components not + indexed by the provided set. The second contains references-to + -slices for all components indexed by the provided set. + + """ target = ComponentSet((time,)) - sets_list, comps_list = flatten_components_along_sets(model, target, ctype, - indices=indices) + sets_list, comps_list = flatten_components_along_sets( + model, target, ctype, indices=indices, active=active + ) # Initialize these variables as, if no components of either category are # found, we expect to get an empty list. scalar_comps = [] @@ -412,13 +564,12 @@ def flatten_dae_components(model, time, ctype, indices=None): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is time: dae_comps = comps - elif len(sets) == 0 or (len(sets) == 1 and - sets[0] is UnindexedComponent_set): + elif len(sets) == 0 or (len(sets) == 1 and sets[0] is UnindexedComponent_set): scalar_comps = comps else: raise RuntimeError( "Invalid model for `flatten_dae_components`.\n" "This can happen if your model has components that are\n" "indexed by time (explicitly or implicitly) multiple times." - ) + ) return scalar_comps, dae_comps diff --git a/pyomo/dae/initialization.py b/pyomo/dae/initialization.py index 314e700fc90..c10ccb023d1 100644 --- a/pyomo/dae/initialization.py +++ b/pyomo/dae/initialization.py @@ -11,13 +11,18 @@ from pyomo.common.collections import ComponentSet from pyomo.core.base import Constraint, Block, value -from pyomo.dae.set_utils import (is_explicitly_indexed_by, - get_index_set_except, is_in_block_indexed_by, - deactivate_model_at, index_warning) - - -def get_inconsistent_initial_conditions(model, time, tol=1e-8, t0=None, - allow_skip=True, suppress_warnings=False): +from pyomo.dae.set_utils import ( + is_explicitly_indexed_by, + get_index_set_except, + is_in_block_indexed_by, + deactivate_model_at, + index_warning, +) + + +def get_inconsistent_initial_conditions( + model, time, tol=1e-8, t0=None, allow_skip=True, suppress_warnings=False +): """Finds constraints of the model that are implicitly or explicitly indexed by time and checks if they are consistent to within a tolerance at the initial value of time. @@ -54,8 +59,10 @@ def get_inconsistent_initial_conditions(model, time, tol=1e-8, t0=None, if not allow_skip: raise continue - if (value(condata.body) - value(condata.upper) > tol or - value(condata.lower) - value(condata.body) > tol): + if ( + value(condata.body) - value(condata.upper) > tol + or value(condata.lower) - value(condata.body) > tol + ): inconsistent.add(condata) for blk in model.component_objects(Block, active=True): @@ -74,22 +81,25 @@ def get_inconsistent_initial_conditions(model, time, tol=1e-8, t0=None, for non_time_index in non_time_set: index = index_getter(non_time_index, t0) blkdata = blk[index] - for condata in blkdata.component_data_objects(Constraint, - active=True): - if (value(condata.body) - value(condata.upper) > tol or - value(condata.lower) - value(condata.body) > tol): + for condata in blkdata.component_data_objects(Constraint, active=True): + if ( + value(condata.body) - value(condata.upper) > tol + or value(condata.lower) - value(condata.body) > tol + ): if condata in inconsistent: raise ValueError( '%s has already been visited. The only way this ' 'should happen is if the model has nested time-' - 'indexed blocks, which is not supported.') + 'indexed blocks, which is not supported.' + ) inconsistent.add(condata) return list(inconsistent) -def solve_consistent_initial_conditions(model, time, solver, tee=False, - allow_skip=True, suppress_warnings=False): +def solve_consistent_initial_conditions( + model, time, solver, tee=False, allow_skip=True, suppress_warnings=False +): """ Solves a model with all Constraints and Blocks deactivated except at the initial value of the Set time. Reactivates Constraints and @@ -114,7 +124,7 @@ def solve_consistent_initial_conditions(model, time, solver, tee=False, # is to identify_variables in the expression, find the (assume only one?) # DerivativeVar, and access its get_continuousset_list # I would like a get_continuousset_list for discretization equations. - # Possibly as a ComponentMap, possibly as an attribute of some new + # Possibly as a ComponentMap, possibly as an attribute of some new # DiscEquation subclass of Constraint # Until I have this, this function will only work for backward # discretization schemes @@ -123,16 +133,16 @@ def solve_consistent_initial_conditions(model, time, solver, tee=False, scheme = time.get_discretization_info()['scheme'] if scheme != 'LAGRANGE-RADAU' and scheme != 'BACKWARD Difference': - raise NotImplementedError( - '%s discretization scheme is not supported' % scheme) + raise NotImplementedError('%s discretization scheme is not supported' % scheme) timelist = list(time)[1:] deactivated_dict = deactivate_model_at( - model, - time, - timelist, - allow_skip=allow_skip, - suppress_warnings=suppress_warnings) + model, + time, + timelist, + allow_skip=allow_skip, + suppress_warnings=suppress_warnings, + ) result = solver.solve(model, tee=tee) @@ -141,4 +151,3 @@ def solve_consistent_initial_conditions(model, time, solver, tee=False, comp.activate() return result - diff --git a/pyomo/dae/integral.py b/pyomo/dae/integral.py index fc5d97d3688..302e50a007d 100644 --- a/pyomo/dae/integral.py +++ b/pyomo/dae/integral.py @@ -12,14 +12,16 @@ from pyomo.common.deprecation import RenamedClass from pyomo.core.base.component import ModelComponentFactory from pyomo.core.base.indexed_component import rule_wrapper -from pyomo.core.base.expression import (Expression, - _GeneralExpressionData, - ScalarExpression, - IndexedExpression) +from pyomo.core.base.expression import ( + Expression, + _GeneralExpressionData, + ScalarExpression, + IndexedExpression, +) from pyomo.dae.contset import ContinuousSet from pyomo.dae.diffvar import DAE_Error -__all__ = ('Integral', ) +__all__ = ('Integral',) @ModelComponentFactory.register("Integral Expression in a DAE model.") @@ -58,10 +60,8 @@ def __new__(cls, *args, **kwds): return IndexedIntegral.__new__(IndexedIntegral) def __init__(self, *args, **kwds): - if "wrt" in kwds and "withrespectto" in kwds: - raise TypeError( - "Cannot specify both 'wrt' and 'withrespectto keywords") + raise TypeError("Cannot specify both 'wrt' and 'withrespectto keywords") wrt = kwds.pop('wrt', None) wrt = kwds.pop('withrespectto', wrt) @@ -74,13 +74,15 @@ def __init__(self, *args, **kwds): raise ValueError( "Integral indexed by multiple ContinuousSets. " "The desired ContinuousSet must be specified using the " - "keyword argument 'wrt'") + "keyword argument 'wrt'" + ) wrt = args[0] if type(wrt) is not ContinuousSet: raise ValueError( "Cannot take the integral with respect to '%s'. Must take an " - "integral with respect to a ContinuousSet" % wrt) + "integral with respect to a ContinuousSet" % wrt + ) self._wrt = wrt loc = None @@ -92,44 +94,53 @@ def __init__(self, *args, **kwds): if loc is None: raise ValueError( "The ContinuousSet '%s' was not found in the indexing sets " - "of the Integral" % wrt.name) + "of the Integral" % wrt.name + ) self.loc = loc # Remove the index that the integral is being expanded over - arg = args[0:loc] + args[loc + 1:] + arg = args[0:loc] + args[loc + 1 :] # Check that if bounds are given bounds = kwds.pop('bounds', None) if bounds is not None: raise DAE_Error( "Setting bounds on integrals has not yet been implemented. " - "Integrals may only be taken over an entire ContinuousSet") + "Integrals may only be taken over an entire ContinuousSet" + ) # Create integral expression and pass to the expression initialization intexp = kwds.pop('expr', None) intexp = kwds.pop('rule', intexp) if intexp is None: - raise ValueError( - "Must specify an integral expression") + raise ValueError("Must specify an integral expression") _is_indexed = bool(len(arg)) def _trap_rule(rule, m, *a): ds = sorted(m.find_component(wrt.local_name)) - return sum(0.5 * (ds[i + 1] - ds[i]) * - (rule(m, * (a[0:loc] + (ds[i + 1],) + a[loc:])) + - rule(m, * (a[0:loc] + (ds[i],) + a[loc:]))) - for i in range(len(ds) - 1)) + return sum( + 0.5 + * (ds[i + 1] - ds[i]) + * ( + rule(m, *(a[0:loc] + (ds[i + 1],) + a[loc:])) + + rule(m, *(a[0:loc] + (ds[i],) + a[loc:])) + ) + for i in range(len(ds) - 1) + ) # Note that position_map is mapping arguments (block, *args), so # must be 1 more than len(args), and loc has to be offset by one - kwds['rule'] = rule_wrapper(intexp, _trap_rule, positional_arg_map=( - i for i in range(len(args)+1) if i != loc+1)) + kwds['rule'] = rule_wrapper( + intexp, + _trap_rule, + positional_arg_map=(i for i in range(len(args) + 1) if i != loc + 1), + ) kwds.setdefault('ctype', Integral) Expression.__init__(self, *arg, **kwds) def get_continuousset(self): - """ Return the :py:class:`ContinuousSet` + """Return the :py:class:`ContinuousSet` the integral is being taken over """ return self._wrt @@ -137,8 +148,8 @@ def get_continuousset(self): class ScalarIntegral(ScalarExpression, Integral): """ - An integral that will have no indexing sets after applying a numerical - integration transformation + An integral that will have no indexing sets after applying a numerical + integration transformation """ def __init__(self, *args, **kwds): @@ -180,7 +191,7 @@ def is_fully_discretized(self): setlist = [] if self.dim() == 1: - setlist = [self.index_set(), ] + setlist = [self.index_set()] else: setlist = self.index_set().set_tuple @@ -189,5 +200,3 @@ def is_fully_discretized(self): if 'scheme' not in i.get_discretization_info(): return False return True - - diff --git a/pyomo/dae/misc.py b/pyomo/dae/misc.py index 89ee6de129b..9b867bcfff4 100644 --- a/pyomo/dae/misc.py +++ b/pyomo/dae/misc.py @@ -18,6 +18,7 @@ from pyomo.core.base.misc import apply_indexed_rule from pyomo.core.base.block import IndexedBlock, SortComponents from pyomo.dae import ContinuousSet, DAE_Error +from pyomo.common.formatting import tostr from io import StringIO @@ -57,7 +58,7 @@ def generate_finite_elements(ds, nfe): # is placed at the midpoint of the largest step. This # process is repeated until we have achieved the desired # number of finite elements. If there are multiple "largest steps" - # the point will be placed at the first occurance of the + # the point will be placed at the first occurrence of the # largest step addpts = nfe - (len(ds) - 1) @@ -128,7 +129,6 @@ def expand_components(block): # swallowed by the LoggingIntercept context below are re-raised if the # discretization encounters an error it isn't expecting. try: - # Intercept logging to suppress Error messages arising from failed # constraint rules. These error messages get logged even though the # AttributeError causing the error is caught and handled by this @@ -137,11 +137,11 @@ def expand_components(block): # unexpected exception is raised. buf = StringIO() with LoggingIntercept(buf, 'pyomo.core', logging.ERROR): - # Identify components that need to be expanded and try expanding # them - for c in block.component_objects(descend_into=True, - sort=SortComponents.declOrder): + for c in block.component_objects( + descend_into=True, sort=SortComponents.declOrder + ): try: update_contset_indexed_component(c, expansion_map) except AttributeError: @@ -160,13 +160,14 @@ def expand_components(block): except AttributeError: redo_expansion.append(c) if len(redo_expansion) == N: - raise DAE_Error("Unable to fully discretize %s. Possible " - "circular references detected between " - "components %s. Reformulate your model to" - " remove circular references or apply a " - "discretization transformation before " - "linking blocks together." - % (block, str(redo_expansion))) + raise DAE_Error( + "Unable to fully discretize %s. Possible " + "circular references detected between " + "components %s. Reformulate your model to" + " remove circular references or apply a " + "discretization transformation before " + "linking blocks together." % (block, tostr(redo_expansion)) + ) N = len(redo_expansion) @@ -174,13 +175,14 @@ def expand_components(block): logger.error(buf.getvalue()) raise + def update_contset_indexed_component(comp, expansion_map): """ Update any model components which are indexed by a ContinuousSet that has changed """ - # This implemenation will *NOT* check for or update + # This implementation will *NOT* check for or update # components which use a ContinuousSet implicitly. ex) an # objective function which iterates through a ContinuousSet and # sums the squared error. If you use a ContinuousSet implicitly @@ -189,7 +191,7 @@ def update_contset_indexed_component(comp, expansion_map): if comp.ctype is Suffix: return - + # Params indexed by a ContinuousSet should include an initialize # and/or default rule which will be called automatically when the # parameter value at a new point in the ContinuousSet is @@ -201,6 +203,7 @@ def update_contset_indexed_component(comp, expansion_map): # Integral components are handled after every ContinuousSet has been # discretized. Import is deferred to here due to circular references. from pyomo.dae import Integral + if comp.ctype is Integral: return @@ -234,11 +237,11 @@ def update_contset_indexed_component(comp, expansion_map): expansion_map[comp] = _update_expression _update_expression(comp) elif isinstance(comp, Piecewise): - expansion_map[comp] =_update_piecewise + expansion_map[comp] = _update_piecewise _update_piecewise(comp) elif comp.ctype == Block: expansion_map[comp] = _update_block - _update_block(comp) + _update_block(comp) else: raise TypeError( "Found component %s of type %s indexed " @@ -247,7 +250,8 @@ def update_contset_indexed_component(comp, expansion_map): "discretization transformation in pyomo.dae. " "Try adding the component to the model " "after discretizing. Alert the pyomo developers " - "for more assistance." % (str(comp), comp.ctype)) + "for more assistance." % (str(comp), comp.ctype) + ) def _update_var(v): @@ -296,12 +300,12 @@ def _update_block(blk): sufficient to update it correctly. """ - + # Check if Block construct method is overridden # getattr needed below for Python 2, 3 compatibility - if blk.construct.__func__ is not getattr(IndexedBlock.construct, - '__func__', - IndexedBlock.construct): + if blk.construct.__func__ is not getattr( + IndexedBlock.construct, '__func__', IndexedBlock.construct + ): # check for custom update function if hasattr(blk, 'update_after_discretization'): blk.update_after_discretization() @@ -316,7 +320,8 @@ def _update_block(blk): 'that the component was expanded correctly. To suppress this ' 'warning, please provide an update_after_discretization() ' 'function on Block-derived components that override ' - 'construct()' % blk.name) + 'construct()' % blk.name + ) missing_idx = getattr(blk, '_dae_missing_idx', set([])) for idx in list(missing_idx): @@ -342,8 +347,10 @@ def create_access_function(var): This method returns a function that returns a component by calling it rather than indexing it """ + def _fun(*args): return var[args] + return _fun @@ -355,9 +362,10 @@ def create_partial_expression(scheme, expr, ind, loc): discretization scheme to one indexing set at a time but we also want the function to be expanded over any other indexing sets. """ + def _fun(*args): - return scheme(lambda i: expr(*(args[0:loc] + (i,) + args[loc + 1:])), - ind) + return scheme(lambda i: expr(*(args[0:loc] + (i,) + args[loc + 1 :])), ind) + return lambda *args: _fun(*args)(args[loc]) @@ -374,8 +382,9 @@ def _disc_eq(m, *args): except IndexError: return Constraint.Skip - block.add_component(d.local_name + '_disc_eq', - Constraint(d.index_set(), rule=_disc_eq)) + block.add_component( + d.local_name + '_disc_eq', Constraint(d.index_set(), rule=_disc_eq) + ) def add_continuity_equations(block, d, i, loc): @@ -394,16 +403,17 @@ def _cont_exp(v, s): def _fun(i): tmp = list(s) - idx = s.ord(i)-1 + idx = s.ord(i) - 1 low = s.get_lower_element_boundary(i) if i != low or idx == 0: raise IndexError("list index out of range") low = s.get_lower_element_boundary(tmp[idx - 1]) - lowidx = s.ord(low)-1 + lowidx = s.ord(low) - 1 return sum(v(tmp[lowidx + j]) * afinal[j] for j in range(ncp + 1)) + return _fun - expr = create_partial_expression(_cont_exp, create_access_function(svar), - i, loc) + + expr = create_partial_expression(_cont_exp, create_access_function(svar), i, loc) def _cont_eq(m, *args): try: @@ -411,8 +421,7 @@ def _cont_eq(m, *args): except IndexError: return Constraint.Skip - block.add_component(nme, Constraint(d.index_set(), - rule=_cont_eq)) + block.add_component(nme, Constraint(d.index_set(), rule=_cont_eq)) def block_fully_discretized(b): @@ -450,7 +459,7 @@ def get_index_information(var, ds): # If var is indexed by multiple ContinuousSets treat # other ContinuousSets like a normal indexing set indargs.append(index) - indCount += 1 # A ContinuousSet must be one dimensional + indCount += 1 # A ContinuousSet must be one dimensional else: indargs.append(index) indCount += index.dimen @@ -489,7 +498,7 @@ def _get_idx(l, ds, n, i, k): points. """ t = list(ds) - tmp = ds.ord(ds._fe[i])-1 + tmp = ds.ord(ds._fe[i]) - 1 tik = t[tmp + k] if n is None: return tik diff --git a/pyomo/dae/plugins/__init__.py b/pyomo/dae/plugins/__init__.py index ce455a55d77..96ab91b0ac0 100644 --- a/pyomo/dae/plugins/__init__.py +++ b/pyomo/dae/plugins/__init__.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.dae.plugins.colloc import pyomo.dae.plugins.finitedifference - diff --git a/pyomo/dae/plugins/colloc.py b/pyomo/dae/plugins/colloc.py index 0af6d2f4545..a0ad8bc108f 100644 --- a/pyomo/dae/plugins/colloc.py +++ b/pyomo/dae/plugins/colloc.py @@ -44,14 +44,18 @@ def _lagrange_radau_transform(v, s): def _fun(i): tmp = list(s) - idx = s.ord(i)-1 + idx = s.ord(i) - 1 if idx == 0: # Don't apply this equation at initial point raise IndexError("list index out of range") low = s.get_lower_element_boundary(i) - lowidx = s.ord(low)-1 - return sum(v(tmp[lowidx + j]) * adot[j][idx - lowidx] * - (1.0 / (tmp[lowidx + ncp] - tmp[lowidx])) - for j in range(ncp + 1)) + lowidx = s.ord(low) - 1 + return sum( + v(tmp[lowidx + j]) + * adot[j][idx - lowidx] + * (1.0 / (tmp[lowidx + ncp] - tmp[lowidx])) + for j in range(ncp + 1) + ) + return _fun @@ -61,14 +65,19 @@ def _lagrange_radau_transform_order2(v, s): def _fun(i): tmp = list(s) - idx = s.ord(i)-1 - if idx == 0: # Don't apply this equation at initial point + idx = s.ord(i) - 1 + if idx == 0: + # Don't apply this equation at initial point raise IndexError("list index out of range") low = s.get_lower_element_boundary(i) - lowidx = s.ord(low)-1 - return sum(v(tmp[lowidx + j]) * adotdot[j][idx - lowidx] * - (1.0 / (tmp[lowidx + ncp] - tmp[lowidx]) ** 2) - for j in range(ncp + 1)) + lowidx = s.ord(low) - 1 + return sum( + v(tmp[lowidx + j]) + * adotdot[j][idx - lowidx] + * (1.0 / (tmp[lowidx + ncp] - tmp[lowidx]) ** 2) + for j in range(ncp + 1) + ) + return _fun @@ -78,18 +87,23 @@ def _lagrange_legendre_transform(v, s): def _fun(i): tmp = list(s) - idx = s.ord(i)-1 - if idx == 0: # Don't apply this equation at initial point + idx = s.ord(i) - 1 + if idx == 0: + # Don't apply this equation at initial point raise IndexError("list index out of range") - elif i in s.get_finite_elements(): # Don't apply at finite element - # points continuity equations - # added later + elif i in s.get_finite_elements(): + # Don't apply at finite element points continuity equations + # added later raise IndexError("list index out of range") low = s.get_lower_element_boundary(i) - lowidx = s.ord(low)-1 - return sum(v(tmp[lowidx + j]) * adot[j][idx - lowidx] * - (1.0 / (tmp[lowidx + ncp + 1] - tmp[lowidx])) - for j in range(ncp + 1)) + lowidx = s.ord(low) - 1 + return sum( + v(tmp[lowidx + j]) + * adot[j][idx - lowidx] + * (1.0 / (tmp[lowidx + ncp + 1] - tmp[lowidx])) + for j in range(ncp + 1) + ) + return _fun @@ -99,18 +113,23 @@ def _lagrange_legendre_transform_order2(v, s): def _fun(i): tmp = list(s) - idx = s.ord(i)-1 - if idx == 0: # Don't apply this equation at initial point + idx = s.ord(i) - 1 + if idx == 0: + # Don't apply this equation at initial point raise IndexError("list index out of range") - elif i in s.get_finite_elements(): # Don't apply at finite element - # points continuity equations - # added later + elif i in s.get_finite_elements(): + # Don't apply at finite element points continuity equations + # added later raise IndexError("list index out of range") low = s.get_lower_element_boundary(i) - lowidx = s.ord(low)-1 - return sum(v(tmp[lowidx + j]) * adotdot[j][idx - lowidx] * - (1.0 / (tmp[lowidx + ncp + 1] - tmp[lowidx]) ** 2) \ - for j in range(ncp + 1)) + lowidx = s.ord(low) - 1 + return sum( + v(tmp[lowidx + j]) + * adotdot[j][idx - lowidx] + * (1.0 / (tmp[lowidx + ncp + 1] - tmp[lowidx]) ** 2) + for j in range(ncp + 1) + ) + return _fun @@ -138,7 +157,7 @@ def conv(a, b): def calc_cp(alpha, beta, k): gamma = [] factorial = numpy.math.factorial - + for i in range(k + 1): num = factorial(alpha + k) * factorial(alpha + beta + k + i) denom = factorial(alpha + i) * factorial(k - i) * factorial(i) @@ -162,10 +181,11 @@ def calc_cp(alpha, beta, k): cp = numpy.roots(poly) return numpy.sort(cp).tolist() + # BLN: This is a legacy function that was used to calculate the collocation # constants for an alternative form of the collocation equations described -# in Biegler's nonlinear programming book. The difference being whether the -# state or the derivative is approximated using lagrange polynomials. With +# in Biegler's nonlinear programming book. The difference being whether the +# state or the derivative is approximated using lagrange polynomials. With # the addition of PDE support and chained discretizations in Pyomo.DAE 2.0 # this function is no longer used but kept here for future reference. # @@ -234,38 +254,51 @@ def calc_afinal(cp): return afinal -@TransformationFactory.register('dae.collocation', - doc="Discretizes a DAE model using orthogonal collocation over" - " finite elements transforming the model into an NLP.") +@TransformationFactory.register( + 'dae.collocation', + doc="Discretizes a DAE model using orthogonal collocation over" + " finite elements transforming the model into an NLP.", +) class Collocation_Discretization_Transformation(Transformation): - CONFIG = ConfigBlock("dae.collocation") - CONFIG.declare('nfe', ConfigValue( - default=10, - domain=PositiveInt, - description="The desired number of finite element points to be " - "included in the discretization" - )) - CONFIG.declare('ncp', ConfigValue( - default=3, - domain=PositiveInt, - description="The desired number of collocation points over each " - "finite element" - )) - CONFIG.declare('wrt', ConfigValue( - default=None, - description="The ContinuousSet to be discretized", - doc="Indicates which ContinuousSet the transformation should be " + CONFIG.declare( + 'nfe', + ConfigValue( + default=10, + domain=PositiveInt, + description="The desired number of finite element points to be " + "included in the discretization", + ), + ) + CONFIG.declare( + 'ncp', + ConfigValue( + default=3, + domain=PositiveInt, + description="The desired number of collocation points over each " + "finite element", + ), + ) + CONFIG.declare( + 'wrt', + ConfigValue( + default=None, + description="The ContinuousSet to be discretized", + doc="Indicates which ContinuousSet the transformation should be " "applied to. If this keyword argument is not specified then the " - "same scheme will be applied to all ContinuousSets." - )) - CONFIG.declare('scheme', ConfigValue( - default='LAGRANGE-RADAU', - domain=In(['LAGRANGE-RADAU', 'LAGRANGE-LEGENDRE']), - description="Indicates which collocation scheme to apply", - doc="Options are 'LAGRANGE-RADAU' and 'LAGRANGE-LEGENDRE'. " - "The default scheme is Lagrange polynomials with Radau roots" - )) + "same scheme will be applied to all ContinuousSets.", + ), + ) + CONFIG.declare( + 'scheme', + ConfigValue( + default='LAGRANGE-RADAU', + domain=In(['LAGRANGE-RADAU', 'LAGRANGE-LEGENDRE']), + description="Indicates which collocation scheme to apply", + doc="Options are 'LAGRANGE-RADAU' and 'LAGRANGE-LEGENDRE'. " + "The default scheme is Lagrange polynomials with Radau roots", + ), + ) def __init__(self): super(Collocation_Discretization_Transformation, self).__init__() @@ -277,10 +310,15 @@ def __init__(self): self._tau = {} self._reduced_cp = {} self.all_schemes = { - 'LAGRANGE-RADAU': (_lagrange_radau_transform, - _lagrange_radau_transform_order2), - 'LAGRANGE-LEGENDRE': (_lagrange_legendre_transform, - _lagrange_legendre_transform_order2)} + 'LAGRANGE-RADAU': ( + _lagrange_radau_transform, + _lagrange_radau_transform_order2, + ), + 'LAGRANGE-LEGENDRE': ( + _lagrange_legendre_transform, + _lagrange_legendre_transform_order2, + ), + } def _get_radau_constants(self, currentds): """ @@ -290,10 +328,16 @@ def _get_radau_constants(self, currentds): """ if not numpy_available: if self._ncp[currentds] > 10: - raise ValueError("Numpy was not found so the maximum number " - "of collocation points is 10") - from pyomo.dae.utilities import (radau_tau_dict, radau_adot_dict, - radau_adotdot_dict) + raise ValueError( + "Numpy was not found so the maximum number " + "of collocation points is 10" + ) + from pyomo.dae.utilities import ( + radau_tau_dict, + radau_adot_dict, + radau_adotdot_dict, + ) + self._tau[currentds] = radau_tau_dict[self._ncp[currentds]] self._adot[currentds] = radau_adot_dict[self._ncp[currentds]] self._adotdot[currentds] = radau_adotdot_dict[self._ncp[currentds]] @@ -321,18 +365,21 @@ def _get_legendre_constants(self, currentds): """ if not numpy_available: if self._ncp[currentds] > 10: - raise ValueError("Numpy was not found so the maximum number " - "of collocation points is 10") - from pyomo.dae.utilities import (legendre_tau_dict, - legendre_adot_dict, - legendre_adotdot_dict, - legendre_afinal_dict) + raise ValueError( + "Numpy was not found so the maximum number " + "of collocation points is 10" + ) + from pyomo.dae.utilities import ( + legendre_tau_dict, + legendre_adot_dict, + legendre_adotdot_dict, + legendre_afinal_dict, + ) + self._tau[currentds] = legendre_tau_dict[self._ncp[currentds]] self._adot[currentds] = legendre_adot_dict[self._ncp[currentds]] - self._adotdot[currentds] = \ - legendre_adotdot_dict[self._ncp[currentds]] - self._afinal[currentds] = \ - legendre_afinal_dict[self._ncp[currentds]] + self._adotdot[currentds] = legendre_adotdot_dict[self._ncp[currentds]] + self._afinal[currentds] = legendre_afinal_dict[self._ncp[currentds]] else: alpha = 0 beta = 0 @@ -362,7 +409,7 @@ def _apply_to(self, instance, **kwds): specified then the same scheme will be applied to all ContinuousSets. scheme Indicates which collocation scheme to apply. - Options are 'LAGRANGE-RADAU' and 'LAGRANGE-LEGENDRE'. + Options are 'LAGRANGE-RADAU' and 'LAGRANGE-LEGENDRE'. The default scheme is Lagrange polynomials with Radau roots. """ @@ -375,13 +422,16 @@ def _apply_to(self, instance, **kwds): if tmpds is not None: if tmpds.ctype is not ContinuousSet: - raise TypeError("The component specified using the 'wrt' " - "keyword must be a continuous set") + raise TypeError( + "The component specified using the 'wrt' " + "keyword must be a continuous set" + ) elif 'scheme' in tmpds.get_discretization_info(): - raise ValueError("The discretization scheme '%s' has already " - "been applied to the ContinuousSet '%s'" - % (tmpds.get_discretization_info()['scheme'], - tmpds.name)) + raise ValueError( + "The discretization scheme '%s' has already " + "been applied to the ContinuousSet '%s'" + % (tmpds.get_discretization_info()['scheme'], tmpds.name) + ) if None in self._nfe: raise ValueError( @@ -389,7 +439,8 @@ def _apply_to(self, instance, **kwds): "to every ContinuousSet in the model. If you would like to " "specify a specific discretization scheme for one of the " "ContinuousSets you must discretize each ContinuousSet " - "separately.") + "separately." + ) if len(self._nfe) == 0 and tmpds is None: # Same discretization on all ContinuousSets @@ -414,22 +465,24 @@ def _apply_to(self, instance, **kwds): return instance def _transformBlock(self, block, currentds): - self._fe = {} for ds in block.component_objects(ContinuousSet, descend_into=True): if currentds is None or currentds == ds.name: if 'scheme' in ds.get_discretization_info(): - raise DAE_Error("Attempting to discretize ContinuousSet " - "'%s' after it has already been discretized. " - % ds.name) + raise DAE_Error( + "Attempting to discretize ContinuousSet " + "'%s' after it has already been discretized. " % ds.name + ) generate_finite_elements(ds, self._nfe[currentds]) if not ds.get_changed(): if len(ds) - 1 > self._nfe[currentds]: - logger.warning("More finite elements were found in " - "ContinuousSet '%s' than the number of " - "finite elements specified in apply. The " - "larger number of finite elements will be " - "used." % ds.name) + logger.warning( + "More finite elements were found in " + "ContinuousSet '%s' than the number of " + "finite elements specified in apply. The " + "larger number of finite elements will be " + "used." % ds.name + ) self._nfe[ds.name] = len(ds) - 1 self._fe[ds.name] = list(ds) @@ -460,11 +513,11 @@ def _transformBlock(self, block, currentds): "Error discretizing '%s' with respect to '%s'. " "Current implementation only allows for taking the" " first or second derivative with respect to a " - "particular ContinuousSet" % (d.name, i.name)) + "particular ContinuousSet" % (d.name, i.name) + ) scheme = self._scheme[count - 1] - newexpr = create_partial_expression(scheme, oldexpr, i, - loc) + newexpr = create_partial_expression(scheme, oldexpr, i, loc) d.set_derivative_expression(newexpr) if self._scheme_name == 'LAGRANGE-LEGENDRE': # Add continuity equations to DerivativeVar's parent @@ -485,19 +538,17 @@ def _transformBlock(self, block, currentds): # a Block to add things to the model and store discretization # information. Using a list for now because the simulator # does not yet support models containing active Blocks - reclassified_list = getattr(block, - '_pyomo_dae_reclassified_derivativevars', - None) + reclassified_list = getattr( + block, '_pyomo_dae_reclassified_derivativevars', None + ) if reclassified_list is None: block._pyomo_dae_reclassified_derivativevars = list() - reclassified_list = \ - block._pyomo_dae_reclassified_derivativevars + reclassified_list = block._pyomo_dae_reclassified_derivativevars reclassified_list.append(d) # Reclassify Integrals if all ContinuousSets have been discretized if block_fully_discretized(block): - if block.contains_component(Integral): for i in block.component_objects(Integral, descend_into=True): i.parent_block().reclassify_component_type(i, Expression) @@ -521,8 +572,7 @@ def _transformBlock(self, block, currentds): k._constructed = False k.construct() - def reduce_collocation_points(self, instance, var=None, ncp=None, - contset=None): + def reduce_collocation_points(self, instance, var=None, ncp=None, contset=None): """ This method will add additional constraints to a model to reduce the number of free collocation points (degrees of freedom) for a particular @@ -548,66 +598,82 @@ def reduce_collocation_points(self, instance, var=None, ncp=None, """ if contset is None: - raise TypeError("A continuous set must be specified using the " - "keyword 'contset'") + raise TypeError( + "A continuous set must be specified using the keyword 'contset'" + ) if contset.ctype is not ContinuousSet: - raise TypeError("The component specified using the 'contset' " - "keyword must be a ContinuousSet") + raise TypeError( + "The component specified using the 'contset' " + "keyword must be a ContinuousSet" + ) ds = contset if len(self._ncp) == 0: - raise RuntimeError("This method should only be called after using " - "the apply() method to discretize the model") + raise RuntimeError( + "This method should only be called after using " + "the apply() method to discretize the model" + ) elif None in self._ncp: tot_ncp = self._ncp[None] elif ds.name in self._ncp: tot_ncp = self._ncp[ds.name] else: - raise ValueError("ContinuousSet '%s' has not been discretized, " - "please call the apply_to() method with this " - "ContinuousSet to discretize it before calling " - "this method" % ds.name) + raise ValueError( + "ContinuousSet '%s' has not been discretized, " + "please call the apply_to() method with this " + "ContinuousSet to discretize it before calling " + "this method" % ds.name + ) if var is None: raise TypeError("A variable must be specified") if var.ctype is not Var: - raise TypeError("The component specified using the 'var' keyword " - "must be a variable") + raise TypeError( + "The component specified using the 'var' keyword must be a variable" + ) if ncp is None: - raise TypeError( - "The number of collocation points must be specified") + raise TypeError("The number of collocation points must be specified") if ncp <= 0: - raise ValueError( - "The number of collocation points must be at least 1") + raise ValueError("The number of collocation points must be at least 1") if ncp > tot_ncp: - raise ValueError("The number of collocation points used to " - "interpolate an individual variable must be less " - "than the number used to discretize the original " - "model") + raise ValueError( + "The number of collocation points used to " + "interpolate an individual variable must be less " + "than the number used to discretize the original " + "model" + ) if ncp == tot_ncp: # Nothing to be done return instance # Check to see if the continuousset is an indexing set of the variable if var.dim() == 0: - raise IndexError("ContinuousSet '%s' is not an indexing set of" - " the variable '%s'" % (ds.name, var.name)) + raise IndexError( + "ContinuousSet '%s' is not an indexing set of" + " the variable '%s'" % (ds.name, var.name) + ) varidx = var.index_set() if not varidx.subsets(): if ds is not varidx: - raise IndexError("ContinuousSet '%s' is not an indexing set of" - " the variable '%s'" % (ds.name, var.name)) + raise IndexError( + "ContinuousSet '%s' is not an indexing set of" + " the variable '%s'" % (ds.name, var.name) + ) elif ds not in varidx.subsets(): - raise IndexError("ContinuousSet '%s' is not an indexing set of the" - " variable '%s'" % (ds.name, var.name)) + raise IndexError( + "ContinuousSet '%s' is not an indexing set of the" + " variable '%s'" % (ds.name, var.name) + ) if var.name in self._reduced_cp: temp = self._reduced_cp[var.name] if ds.name in temp: - raise RuntimeError("Variable '%s' has already been constrained" - " to a reduced number of collocation points" - " over ContinuousSet '%s'.") + raise RuntimeError( + "Variable '%s' has already been constrained" + " to a reduced number of collocation points" + " over ContinuousSet '%s'." + ) else: temp[ds.name] = ncp else: @@ -633,23 +699,24 @@ def reduce_collocation_points(self, instance, var=None, ncp=None, for k in range(1, tot_ncp - ncp + 1): if ncp == 1: # Constant over each finite element - conlist.add(var[idx(n, i, k)] == - var[idx(n, i, tot_ncp)]) + conlist.add(var[idx(n, i, k)] == var[idx(n, i, tot_ncp)]) else: - tmp = ds.ord(fe[i])-1 - tmp2 = ds.ord(fe[i + 1])-1 + tmp = ds.ord(fe[i]) - 1 + tmp2 = ds.ord(fe[i + 1]) - 1 ti = t[tmp + k] - tfit = t[tmp2 - ncp + 1:tmp2 + 1] + tfit = t[tmp2 - ncp + 1 : tmp2 + 1] coeff = self._interpolation_coeffs(ti, tfit) - conlist.add(var[idx(n, i, k)] == - sum(var[idx(n, i, j)] * next(coeff) - for j in range(tot_ncp - ncp + 1, - tot_ncp + 1))) + conlist.add( + var[idx(n, i, k)] + == sum( + var[idx(n, i, j)] * next(coeff) + for j in range(tot_ncp - ncp + 1, tot_ncp + 1) + ) + ) return instance def _interpolation_coeffs(self, ti, tfit): - for i in tfit: l = 1 for j in tfit: diff --git a/pyomo/dae/plugins/finitedifference.py b/pyomo/dae/plugins/finitedifference.py index f296111be73..e21b724049b 100644 --- a/pyomo/dae/plugins/finitedifference.py +++ b/pyomo/dae/plugins/finitedifference.py @@ -33,13 +33,14 @@ def _central_transform(v, s): Applies the Central Difference formula of order O(h^2) for first derivatives """ + def _ctr_fun(i): tmp = list(s) - idx = s.ord(i)-1 + idx = s.ord(i) - 1 if idx == 0: # Needed since '-1' is considered a valid index in Python raise IndexError("list index out of range") - return 1 / (tmp[idx + 1] - tmp[idx - 1]) * \ - (v(tmp[idx + 1]) - v(tmp[idx - 1])) + return 1 / (tmp[idx + 1] - tmp[idx - 1]) * (v(tmp[idx + 1]) - v(tmp[idx - 1])) + return _ctr_fun @@ -48,13 +49,18 @@ def _central_transform_order2(v, s): Applies the Central Difference formula of order O(h^2) for second derivatives """ + def _ctr_fun2(i): tmp = list(s) - idx = s.ord(i)-1 + idx = s.ord(i) - 1 if idx == 0: # Needed since '-1' is considered a valid index in Python raise IndexError("list index out of range") - return 1 / ((tmp[idx + 1] - tmp[idx]) * (tmp[idx] - tmp[idx - 1])) * \ - (v(tmp[idx + 1]) - 2 * v(tmp[idx]) + v(tmp[idx - 1])) + return ( + 1 + / ((tmp[idx + 1] - tmp[idx]) * (tmp[idx] - tmp[idx - 1])) + * (v(tmp[idx + 1]) - 2 * v(tmp[idx]) + v(tmp[idx - 1])) + ) + return _ctr_fun2 @@ -62,10 +68,12 @@ def _forward_transform(v, s): """ Applies the Forward Difference formula of order O(h) for first derivatives """ + def _fwd_fun(i): tmp = list(s) - idx = s.ord(i)-1 + idx = s.ord(i) - 1 return 1 / (tmp[idx + 1] - tmp[idx]) * (v(tmp[idx + 1]) - v(tmp[idx])) + return _fwd_fun @@ -73,12 +81,16 @@ def _forward_transform_order2(v, s): """ Applies the Forward Difference formula of order O(h) for second derivatives """ + def _fwd_fun(i): tmp = list(s) - idx = s.ord(i)-1 - return 1 / ((tmp[idx + 2] - tmp[idx + 1]) * - (tmp[idx + 1] - tmp[idx])) *\ - (v(tmp[idx + 2]) - 2 * v(tmp[idx + 1]) + v(tmp[idx])) + idx = s.ord(i) - 1 + return ( + 1 + / ((tmp[idx + 2] - tmp[idx + 1]) * (tmp[idx + 1] - tmp[idx])) + * (v(tmp[idx + 2]) - 2 * v(tmp[idx + 1]) + v(tmp[idx])) + ) + return _fwd_fun @@ -86,12 +98,14 @@ def _backward_transform(v, s): """ Applies the Backward Difference formula of order O(h) for first derivatives """ + def _bwd_fun(i): tmp = list(s) - idx = s.ord(i)-1 + idx = s.ord(i) - 1 if idx == 0: # Needed since '-1' is considered a valid index in Python raise IndexError("list index out of range") return 1 / (tmp[idx] - tmp[idx - 1]) * (v(tmp[idx]) - v(tmp[idx - 1])) + return _bwd_fun @@ -100,21 +114,28 @@ def _backward_transform_order2(v, s): Applies the Backward Difference formula of order O(h) for second derivatives """ + def _bwd_fun(i): tmp = list(s) - idx = s.ord(i)-1 + idx = s.ord(i) - 1 # This check is needed since '-1' is considered a valid index in Python if idx == 0 or idx == 1: raise IndexError("list index out of range") - return 1 / ((tmp[idx - 1] - tmp[idx - 2]) * - (tmp[idx] - tmp[idx - 1])) * \ - (v(tmp[idx]) - 2 * v(tmp[idx - 1]) + v(tmp[idx - 2])) + return ( + 1 + / ((tmp[idx - 1] - tmp[idx - 2]) * (tmp[idx] - tmp[idx - 1])) + * (v(tmp[idx]) - 2 * v(tmp[idx - 1]) + v(tmp[idx - 2])) + ) + return _bwd_fun -@TransformationFactory.register('dae.finite_difference', doc="Discretizes a DAE model using " - "a finite difference method transforming the model into an NLP.") +@TransformationFactory.register( + 'dae.finite_difference', + doc="Discretizes a DAE model using " + "a finite difference method transforming the model into an NLP.", +) class Finite_Difference_Transformation(Transformation): """ Transformation that applies finite difference methods to @@ -122,26 +143,35 @@ class Finite_Difference_Transformation(Transformation): """ CONFIG = ConfigBlock("dae.finite_difference") - CONFIG.declare('nfe', ConfigValue( - default=10, - domain=PositiveInt, - description="The desired number of finite element points to be " - "included in the discretization" - )) - CONFIG.declare('wrt', ConfigValue( - default=None, - description="The ContinuousSet to be discretized", - doc="Indicates which ContinuousSet the transformation should be " + CONFIG.declare( + 'nfe', + ConfigValue( + default=10, + domain=PositiveInt, + description="The desired number of finite element points to be " + "included in the discretization", + ), + ) + CONFIG.declare( + 'wrt', + ConfigValue( + default=None, + description="The ContinuousSet to be discretized", + doc="Indicates which ContinuousSet the transformation should be " "applied to. If this keyword argument is not specified then the " - "same scheme will be applied to all ContinuousSets." - )) - CONFIG.declare('scheme', ConfigValue( - default='BACKWARD', - domain=In(['BACKWARD', 'CENTRAL', 'FORWARD']), - description="Indicates which finite difference scheme to apply", - doc="Options are BACKWARD, CENTRAL, or FORWARD. The default scheme is " - "the backward difference method" - )) + "same scheme will be applied to all ContinuousSets.", + ), + ) + CONFIG.declare( + 'scheme', + ConfigValue( + default='BACKWARD', + domain=In(['BACKWARD', 'CENTRAL', 'FORWARD']), + description="Indicates which finite difference scheme to apply", + doc="Options are BACKWARD, CENTRAL, or FORWARD. The default scheme is " + "the backward difference method", + ), + ) def __init__(self): super(Finite_Difference_Transformation, self).__init__() @@ -149,7 +179,8 @@ def __init__(self): self.all_schemes = { 'BACKWARD': (_backward_transform, _backward_transform_order2), 'CENTRAL': (_central_transform, _central_transform_order2), - 'FORWARD': (_forward_transform, _forward_transform_order2)} + 'FORWARD': (_forward_transform, _forward_transform_order2), + } def _apply_to(self, instance, **kwds): """ @@ -174,20 +205,24 @@ def _apply_to(self, instance, **kwds): if tmpds is not None: if tmpds.ctype is not ContinuousSet: - raise TypeError("The component specified using the 'wrt' " - "keyword must be a continuous set") + raise TypeError( + "The component specified using the 'wrt' " + "keyword must be a continuous set" + ) elif 'scheme' in tmpds.get_discretization_info(): - raise ValueError("The discretization scheme '%s' has already " - "been applied to the ContinuousSet '%s'" % - (tmpds.get_discretization_info()['scheme'], - tmpds.name)) + raise ValueError( + "The discretization scheme '%s' has already " + "been applied to the ContinuousSet '%s'" + % (tmpds.get_discretization_info()['scheme'], tmpds.name) + ) if None in self._nfe: raise ValueError( "A general discretization scheme has already been applied to " "to every continuous set in the model. If you would like to " "apply a different discretization scheme to each continuous " - "set, you must declare a new transformation object") + "set, you must declare a new transformation object" + ) if len(self._nfe) == 0 and tmpds is None: # Same discretization on all ContinuousSets @@ -205,22 +240,24 @@ def _apply_to(self, instance, **kwds): return instance def _transformBlock(self, block, currentds): - self._fe = {} for ds in block.component_objects(ContinuousSet): if currentds is None or currentds == ds.name or currentds is ds: if 'scheme' in ds.get_discretization_info(): - raise DAE_Error("Attempting to discretize ContinuousSet " - "'%s' after it has already been discretized. " - % ds.name) + raise DAE_Error( + "Attempting to discretize ContinuousSet " + "'%s' after it has already been discretized. " % ds.name + ) generate_finite_elements(ds, self._nfe[currentds]) if not ds.get_changed(): if len(ds) - 1 > self._nfe[currentds]: - logger.warning("More finite elements were found in " - "ContinuousSet '%s' than the number of " - "finite elements specified in apply. The " - "larger number of finite elements will be " - "used." % ds.name) + logger.warning( + "More finite elements were found in " + "ContinuousSet '%s' than the number of " + "finite elements specified in apply. The " + "larger number of finite elements will be " + "used." % ds.name + ) self._nfe[ds.name] = len(ds) - 1 self._fe[ds.name] = list(ds) @@ -248,10 +285,10 @@ def _transformBlock(self, block, currentds): "Error discretizing '%s' with respect to '%s'. " "Current implementation only allows for taking the" " first or second derivative with respect to " - "a particular ContinuousSet" % (d.name, i.name)) + "a particular ContinuousSet" % (d.name, i.name) + ) scheme = self._scheme[count - 1] - newexpr = create_partial_expression(scheme, oldexpr, i, - loc) + newexpr = create_partial_expression(scheme, oldexpr, i, loc) d.set_derivative_expression(newexpr) # Reclassify DerivativeVar if all indexing ContinuousSets have @@ -268,19 +305,17 @@ def _transformBlock(self, block, currentds): # a Block to add things to the model and store discretization # information. Using a list for now because the simulator # does not yet support models containing active Blocks - reclassified_list = getattr(block, - '_pyomo_dae_reclassified_derivativevars', - None) + reclassified_list = getattr( + block, '_pyomo_dae_reclassified_derivativevars', None + ) if reclassified_list is None: block._pyomo_dae_reclassified_derivativevars = list() - reclassified_list = \ - block._pyomo_dae_reclassified_derivativevars + reclassified_list = block._pyomo_dae_reclassified_derivativevars reclassified_list.append(d) # Reclassify Integrals if all ContinuousSets have been discretized if block_fully_discretized(block): - if block.contains_component(Integral): for i in block.component_objects(Integral, descend_into=True): i.parent_block().reclassify_component_type(i, Expression) diff --git a/pyomo/dae/set_utils.py b/pyomo/dae/set_utils.py index d6c314b9390..96a7489261e 100644 --- a/pyomo/dae/set_utils.py +++ b/pyomo/dae/set_utils.py @@ -21,7 +21,7 @@ def index_warning(name, index): def is_explicitly_indexed_by(comp, *sets, **kwargs): """ - Function for determining whether a pyomo component is indexed by a + Function for determining whether a pyomo component is indexed by a set or group of sets. Args: @@ -37,8 +37,7 @@ def is_explicitly_indexed_by(comp, *sets, **kwargs): return False for s in sets: if isinstance(s, SetProduct): - msg = ('Checking for explicit indexing by a SetProduct ' - 'is not supported') + msg = 'Checking for explicit indexing by a SetProduct is not supported' raise TypeError(msg) expand_all_set_operators = kwargs.pop('expand_all_set_operators', False) @@ -46,11 +45,12 @@ def is_explicitly_indexed_by(comp, *sets, **kwargs): keys = kwargs.keys() raise ValueError('Unrecognized keyword arguments: %s' % str(keys)) - projected_subsets = comp.index_set().subsets(expand_all_set_operators= - expand_all_set_operators) + projected_subsets = comp.index_set().subsets( + expand_all_set_operators=expand_all_set_operators + ) # Expanding all set operators here can be dangerous because it will not # distinguish between operators that contain their operands (e.g. union, - # where you might consider the component to be considered indexed by + # where you might consider the component to be considered indexed by # the operands) and operators that don't. # Ideally would like to check for containment by inclusion and containment # by product in one search of the set operators. @@ -61,15 +61,15 @@ def is_explicitly_indexed_by(comp, *sets, **kwargs): def is_in_block_indexed_by(comp, s, stop_at=None): """ - Function for determining whether a component is contained in a + Function for determining whether a component is contained in a block that is indexed by a particular set. - Args: + Args: comp : Component whose parent blocks are checked s : Set for which indices are checked stop_at : Block at which to stop searching if reached, regardless of whether or not it is indexed by s - + Returns: Bool that is true if comp is contained in a block indexed by s """ @@ -110,8 +110,10 @@ def get_indices_of_projection(index_set, *sets): try: total_s_dim = sum([s.dimen for s in sets]) except TypeError: - msg = ('get_indices_of_projection does not support sets with ' - 'dimen == None, including those with inconsistent dimen') + msg = ( + 'get_indices_of_projection does not support sets with ' + 'dimen == None, including those with inconsistent dimen' + ) raise TypeError(msg) subset_set = ComponentSet(index_set.subsets()) @@ -149,9 +151,9 @@ def get_indices_of_projection(index_set, *sets): location = {0: 0} other_ind_sets = [] - if index_set.dimen == total_s_dim: + if index_set.dimen == total_s_dim: # comp indexed by all sets and having this dimension - # is sufficient to know that comp is only indexed by + # is sufficient to know that comp is only indexed by # Sets in *sets # In this case, return the trivial set_except and index_getter @@ -162,9 +164,11 @@ def get_indices_of_projection(index_set, *sets): info['set_except'] = [None] # index_getter returns an index corresponding to the values passed to # it, re-ordered according to order of indexing sets in component. - info['index_getter'] = (lambda incomplete_index, *newvals: - newvals[0] if len(newvals) <= 1 else - tuple([newvals[location[i]] for i in location])) + info['index_getter'] = ( + lambda incomplete_index, *newvals: newvals[0] + if len(newvals) <= 1 + else tuple([newvals[location[i]] for i in location]) + ) return info # Now may assume other_ind_sets is nonempty. @@ -175,8 +179,9 @@ def get_indices_of_projection(index_set, *sets): else: raise ValueError('Did not expect this to happen') - index_getter = (lambda incomplete_index, *newvals: - _complete_index(location, incomplete_index, *newvals)) + index_getter = lambda incomplete_index, *newvals: _complete_index( + location, incomplete_index, *newvals + ) info['set_except'] = set_except info['index_getter'] = index_getter @@ -184,10 +189,10 @@ def get_indices_of_projection(index_set, *sets): def get_index_set_except(comp, *sets): - """ + """ Function for getting indices of a component over a product of its - indexing sets other than those specified. Indices for the specified - sets can be used to construct indices of the proper dimension for the + indexing sets other than those specified. Indices for the specified + sets can be used to construct indices of the proper dimension for the original component via the index_getter function. Args: @@ -203,8 +208,11 @@ def get_index_set_except(comp, *sets): in the same order their Sets were provided in the sets argument. """ if not is_explicitly_indexed_by(comp, *sets): - msg = (comp.name + ' is not indexed by at least one of ' + - str([s.name for s in sets])) + msg = ( + comp.name + + ' is not indexed by at least one of ' + + str([s.name for s in sets]) + ) raise ValueError(msg) return get_indices_of_projection(comp.index_set(), *sets) @@ -213,7 +221,7 @@ def get_index_set_except(comp, *sets): def _complete_index(loc, index, *newvals): """ Function for inserting new values into a partial index. - Used by get_index_set_except function to construct the + Used by get_index_set_except function to construct the index_getter function for completing indices of a particular component with particular sets excluded. @@ -225,7 +233,7 @@ def _complete_index(loc, index, *newvals): or tuples (for higher-dimension sets) Returns: - An index (tuple) with values from newvals inserted in + An index (tuple) with values from newvals inserted in locations specified by loc """ if type(index) is not tuple: @@ -241,12 +249,11 @@ def _complete_index(loc, index, *newvals): return index -def deactivate_model_at(b, cset, pts, allow_skip=True, - suppress_warnings=False): +def deactivate_model_at(b, cset, pts, allow_skip=True, suppress_warnings=False): """ - Finds any block or constraint in block b, indexed explicitly (and not - implicitly) by cset, and deactivates it at points specified. - Implicitly indexed components are excluded because one of their parent + Finds any block or constraint in block b, indexed explicitly (and not + implicitly) by cset, and deactivates it at points specified. + Implicitly indexed components are excluded because one of their parent blocks will be deactivated, so deactivating them too would be redundant. Args: @@ -274,8 +281,9 @@ def deactivate_model_at(b, cset, pts, allow_skip=True, continue visited.add(id(comp)) - if (is_explicitly_indexed_by(comp, cset) and - not is_in_block_indexed_by(comp, cset)): + if is_explicitly_indexed_by(comp, cset) and not is_in_block_indexed_by( + comp, cset + ): info = get_index_set_except(comp, cset) non_cset_set = info['set_except'] index_getter = info['index_getter'] diff --git a/pyomo/dae/simulator.py b/pyomo/dae/simulator.py index 283575189df..b869592553a 100644 --- a/pyomo/dae/simulator.py +++ b/pyomo/dae/simulator.py @@ -11,42 +11,52 @@ from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.diffvar import DAE_Error -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.expr.numvalue import native_numeric_types from pyomo.core.expr.template_expr import IndexTemplate, _GetItemIndexer import logging -__all__ = ('Simulator', ) +__all__ = ('Simulator',) logger = logging.getLogger('pyomo.core') from pyomo.common.dependencies import ( - numpy as np, numpy_available, scipy, scipy_available, attempt_import, + numpy as np, + numpy_available, + scipy, + scipy_available, + attempt_import, ) casadi_intrinsic = {} + + def _finalize_casadi(casadi, available): if available: - casadi_intrinsic.update({ - 'log': casadi.log, - 'log10': casadi.log10, - 'sin': casadi.sin, - 'cos': casadi.cos, - 'tan': casadi.tan, - 'cosh': casadi.cosh, - 'sinh': casadi.sinh, - 'tanh': casadi.tanh, - 'asin': casadi.asin, - 'acos': casadi.acos, - 'atan': casadi.atan, - 'exp': casadi.exp, - 'sqrt': casadi.sqrt, - 'asinh': casadi.asinh, - 'acosh': casadi.acosh, - 'atanh': casadi.atanh, - 'ceil': casadi.ceil, - 'floor': casadi.floor, - }) + casadi_intrinsic.update( + { + 'log': casadi.log, + 'log10': casadi.log10, + 'sin': casadi.sin, + 'cos': casadi.cos, + 'tan': casadi.tan, + 'cosh': casadi.cosh, + 'sinh': casadi.sinh, + 'tanh': casadi.tanh, + 'asin': casadi.asin, + 'acos': casadi.acos, + 'atan': casadi.atan, + 'exp': casadi.exp, + 'sqrt': casadi.sqrt, + 'asinh': casadi.asinh, + 'acosh': casadi.acosh, + 'atanh': casadi.atanh, + 'ceil': casadi.ceil, + 'floor': casadi.floor, + } + ) + + casadi, casadi_available = attempt_import('casadi', callback=_finalize_casadi) @@ -85,9 +95,11 @@ def _check_productexpression(expr, i): stack.append((curr.arg(1), e_)) elif curr.__class__ is EXPR.DivisionExpression: stack.append((curr.arg(0), e_)) - stack.append((curr.arg(1), - e_)) - elif type(curr) is EXPR.GetItemExpression and \ - type(curr.arg(0)) is DerivativeVar: + stack.append((curr.arg(1), -e_)) + elif ( + isinstance(curr, EXPR.GetItemExpression) + and type(curr.arg(0)) is DerivativeVar + ): dv = (curr, e_) else: pterms.append((curr, e_)) @@ -95,18 +107,18 @@ def _check_productexpression(expr, i): if dv is None: return None - numer = 1 + numerator = 1 denom = 1 for term, e_ in pterms: if e_ == 1: denom *= term else: - numer *= term + numerator *= term curr, e_ = dv if e_ == 1: - return [curr, expr.arg(1 - i) * numer / denom] + return [curr, expr.arg(1 - i) * numerator / denom] else: - return [curr, denom / (expr.arg(1 - i) * numer)] + return [curr, denom / (expr.arg(1 - i) * numerator)] def _check_negationexpression(expr, i): @@ -120,22 +132,22 @@ def _check_negationexpression(expr, i): """ arg = expr.arg(i).arg(0) - if type(arg) is EXPR.GetItemExpression and \ - type(arg.arg(0)) is DerivativeVar: - return [arg, - expr.arg(1 - i)] + if isinstance(arg, EXPR.GetItemExpression) and type(arg.arg(0)) is DerivativeVar: + return [arg, -expr.arg(1 - i)] if type(arg) is EXPR.ProductExpression: lhs = arg.arg(0) rhs = arg.arg(1) - if not (type(lhs) in native_numeric_types or - not lhs.is_potentially_variable()): + if not (type(lhs) in native_numeric_types or not lhs.is_potentially_variable()): return None - if not (type(rhs) is EXPR.GetItemExpression and - type(rhs.arg(0)) is DerivativeVar): + if not ( + isinstance(rhs, EXPR.GetItemExpression) + and type(rhs.arg(0)) is DerivativeVar + ): return None - return [rhs, - expr.arg(1 - i) / lhs] + return [rhs, -expr.arg(1 - i) / lhs] return None @@ -158,18 +170,22 @@ def _check_viewsumexpression(expr, i): for idx, item in enumerate(sumexp.args): if dv is not None: items.append(item) - elif type(item) is EXPR.GetItemExpression and \ - type(item.arg(0)) is DerivativeVar: + elif ( + isinstance(item, EXPR.GetItemExpression) + and type(item.arg(0)) is DerivativeVar + ): dv = item elif type(item) is EXPR.ProductExpression: # This will contain the constant coefficient if there is one lhs = item.arg(0) # This is a potentially variable expression rhs = item.arg(1) - if (type(lhs) in native_numeric_types or - not lhs.is_potentially_variable()) \ - and (type(rhs) is EXPR.GetItemExpression and - type(rhs.arg(0)) is DerivativeVar): + if ( + type(lhs) in native_numeric_types or not lhs.is_potentially_variable() + ) and ( + isinstance(rhs, EXPR.GetItemExpression) + and type(rhs.arg(0)) is DerivativeVar + ): dv = rhs dvcoef = lhs else: @@ -196,21 +212,24 @@ def __init__(self, templatemap): # Note because we are creating a "nonPyomo" expression tree, we # want to remove all Expression nodes (as opposed to replacing # them in place) - super().__init__(descend_into_named_expressions=True, - remove_named_expressions=True) + super().__init__( + descend_into_named_expressions=True, remove_named_expressions=True + ) self.templatemap = templatemap def beforeChild(self, node, child, child_idx): if type(child) is IndexTemplate: return False, child - if type(child) is EXPR.GetItemExpression: + if isinstance(child, EXPR.GetItemExpression): _id = _GetItemIndexer(child) if _id not in self.templatemap: self.templatemap[_id] = Param(mutable=True) self.templatemap[_id].construct() self.templatemap[_id]._name = "%s[%s]" % ( - _id.base.name, ','.join(str(x) for x in _id.args)) + _id.base.name, + ','.join(str(x) for x in _id.args), + ) return False, self.templatemap[_id] return super().beforeChild(node, child, child_idx) @@ -219,7 +238,7 @@ def beforeChild(self, node, child, child_idx): def convert_pyomo2scipy(expr, templatemap): """Substitute _GetItem nodes in an expression tree. - This substition function is used to replace Pyomo _GetItem + This substitution function is used to replace Pyomo _GetItem nodes with mutable Params. Args: @@ -230,8 +249,9 @@ def convert_pyomo2scipy(expr, templatemap): a new expression tree with all substitutions done """ if not scipy_available: - raise DAE_Error("SciPy is not installed. Cannot substitute SciPy " - "intrinsic functions.") + raise DAE_Error( + "SciPy is not installed. Cannot substitute SciPy intrinsic functions." + ) visitor = Pyomo2Scipy_Visitor(templatemap) return visitor.walk_expression(expr) @@ -251,8 +271,9 @@ def __init__(self, templatemap): # Note because we are creating a "nonPyomo" expression tree, we # want to remove all Expression nodes (as opposed to replacing # them in place) - super().__init__(descend_into_named_expressions=True, - remove_named_expressions=True) + super().__init__( + descend_into_named_expressions=True, remove_named_expressions=True + ) self.templatemap = templatemap def exitNode(self, node, data): @@ -260,18 +281,16 @@ def exitNode(self, node, data): ans = super().exitNode(node, data) if type(ans) is EXPR.UnaryFunctionExpression: return EXPR.UnaryFunctionExpression( - ans.args, - ans.getname(), - casadi_intrinsic[ans.getname()]) + ans.args, ans.getname(), casadi_intrinsic[ans.getname()] + ) return ans def beforeChild(self, node, child, child_idx): """Replace a node if it's a _GetItemExpression.""" - if type(child) is EXPR.GetItemExpression: + if isinstance(child, EXPR.GetItemExpression): _id = _GetItemIndexer(child) if _id not in self.templatemap: - name = "%s[%s]" % ( - _id.base.name, ','.join(str(x) for x in _id.args)) + name = "%s[%s]" % (_id.base.name, ','.join(str(x) for x in _id.args)) self.templatemap[_id] = casadi.SX.sym(name) return False, self.templatemap[_id] @@ -294,7 +313,7 @@ class Convert_Pyomo2Casadi_Visitor(EXPR.ExpressionValueVisitor): """ def visit(self, node, values): - """ Visit nodes that have been expanded """ + """Visit nodes that have been expanded""" return node._apply_operation(values) def visiting_potential_leaf(self, node): @@ -321,7 +340,7 @@ def visiting_potential_leaf(self, node): def substitute_pyomo2casadi(expr, templatemap): """Substitute IndexTemplates in an expression tree. - This substition function is used to replace Pyomo intrinsic + This substitution function is used to replace Pyomo intrinsic functions with CasADi functions. Args: @@ -333,8 +352,10 @@ def substitute_pyomo2casadi(expr, templatemap): a new expression tree with all substitutions done """ if not casadi_available: - raise DAE_Error("CASADI is not installed. Cannot substitute CasADi " - "variables and intrinsic functions.") + raise DAE_Error( + "CASADI is not installed. Cannot substitute CasADi " + "variables and intrinsic functions." + ) visitor = Substitute_Pyomo2Casadi_Visitor(templatemap) return visitor.walk_expression(expr) @@ -356,8 +377,10 @@ def convert_pyomo2casadi(expr): a CasADi expression tree. """ if not casadi_available: - raise DAE_Error("CASADI is not installed. Cannot convert a Pyomo " - "expression to a Casadi expression.") + raise DAE_Error( + "CASADI is not installed. Cannot convert a Pyomo " + "expression to a Casadi expression." + ) visitor = Convert_Pyomo2Casadi_Visitor() return visitor.dfs_postorder_stack(expr) @@ -378,12 +401,12 @@ class Simulator: """ def __init__(self, m, package='scipy'): - self._intpackage = package if self._intpackage not in ['scipy', 'casadi']: raise DAE_Error( "Unrecognized simulator package %s. Please select from " - "%s" % (self._intpackage, ['scipy', 'casadi'])) + "%s" % (self._intpackage, ['scipy', 'casadi']) + ) if self._intpackage == 'scipy': if not scipy_available: @@ -392,26 +415,29 @@ def __init__(self, m, package='scipy'): logger.warning( "The scipy module is not available. " "You may build the Simulator object but you will not " - "be able to run the simulation.") + "be able to run the simulation." + ) else: if not casadi_available: # Initializing the simulator for use with casadi requires # access to casadi objects. Therefore, we must throw an error # here instead of a warning. - raise ValueError("The casadi module is not available. " - "Cannot simulate model.") + raise ValueError( + "The casadi module is not available. Cannot simulate model." + ) # Check for active Blocks and throw error if any are found - if len(list(m.component_data_objects(Block, active=True, - descend_into=False))): - raise DAE_Error("The Simulator cannot handle hierarchical models " - "at the moment.") + if len(list(m.component_data_objects(Block, active=True, descend_into=False))): + raise DAE_Error( + "The Simulator cannot handle hierarchical models at the moment." + ) temp = m.component_map(ContinuousSet) if len(temp) != 1: raise DAE_Error( "Currently the simulator may only be applied to " - "Pyomo models with a single ContinuousSet") + "Pyomo models with a single ContinuousSet" + ) # Get the ContinuousSet in the model contset = list(temp.values())[0] @@ -438,7 +464,6 @@ def __init__(self, m, package='scipy'): # RHS. Must find a RHS for every derivative var otherwise ERROR. Build # dictionary of DerivativeVar:RHS equation. for con in m.component_objects(Constraint, active=True): - # Skip the discretization equations if model is discretized if '_disc_eq' in con.name: continue @@ -466,8 +491,8 @@ def __init__(self, m, package='scipy'): if csidx != -1: raise DAE_Error( "Cannot simulate the constraint %s because " - "it is indexed by duplicate ContinuousSets" - % con.name) + "it is indexed by duplicate ContinuousSets" % con.name + ) csidx = dimsum elif noncsidx is None: noncsidx = s @@ -501,24 +526,28 @@ def __init__(self, m, package='scipy'): # separable RHS args = None # Case 1: m.dxdt[t] = RHS - if type(tempexp.arg(0)) is EXPR.GetItemExpression: + if isinstance(tempexp.arg(0), EXPR.GetItemExpression): args = _check_getitemexpression(tempexp, 0) # Case 2: RHS = m.dxdt[t] if args is None: - if type(tempexp.arg(1)) is EXPR.GetItemExpression: + if isinstance(tempexp.arg(1), EXPR.GetItemExpression): args = _check_getitemexpression(tempexp, 1) # Case 3: m.p*m.dxdt[t] = RHS if args is None: - if type(tempexp.arg(0)) is EXPR.ProductExpression or \ - type(tempexp.arg(0)) is EXPR.DivisionExpression: + if ( + type(tempexp.arg(0)) is EXPR.ProductExpression + or type(tempexp.arg(0)) is EXPR.DivisionExpression + ): args = _check_productexpression(tempexp, 0) # Case 4: RHS = m.p*m.dxdt[t] if args is None: - if type(tempexp.arg(1)) is EXPR.ProductExpression or \ - type(tempexp.arg(1)) is EXPR.DivisionExpression: + if ( + type(tempexp.arg(1)) is EXPR.ProductExpression + or type(tempexp.arg(1)) is EXPR.DivisionExpression + ): args = _check_productexpression(tempexp, 1) # Case 5: m.dxdt[t] + sum(ELSE) = RHS @@ -560,8 +589,8 @@ def __init__(self, m, package='scipy'): "unrecognized differential equation. Constraint " "'%s' cannot be simulated using Scipy. If you are " "trying to simulate a DAE model you must use " - "CasADi as the integration package." - % str(con.name)) + "CasADi as the integration package." % str(con.name) + ) tempexp = tempexp.arg(0) - tempexp.arg(1) algexp = substitute_pyomo2casadi(tempexp, templatemap) alglist.append(algexp) @@ -574,7 +603,8 @@ def __init__(self, m, package='scipy'): if dvkey in rhsdict.keys(): raise DAE_Error( "Found multiple RHS expressions for the " - "DerivativeVar %s" % str(dvkey)) + "DerivativeVar %s" % str(dvkey) + ) derivlist.append(dvkey) if self._intpackage == 'casadi': @@ -608,7 +638,8 @@ def __init__(self, m, package='scipy'): # template map raise DAE_Error( "Cannot simulate a differential equation with " - "multiple DerivativeVars") + "multiple DerivativeVars" + ) if item not in diffvars: # Finds time varying parameters and algebraic vars algvars.append(item) @@ -626,6 +657,7 @@ def _rhsfun(t, x): residual.append(rhsdict[d]()) return residual + self._rhsfun = _rhsfun # Add any diffvars not added by expression walker to self._templatemap @@ -633,7 +665,9 @@ def _rhsfun(t, x): for _id in diffvars: if _id not in templatemap: name = "%s[%s]" % ( - _id.base.name, ','.join(str(x) for x in _id.args)) + _id.base.name, + ','.join(str(x) for x in _id.args), + ) templatemap[_id] = casadi.SX.sym(name) self._contset = contset @@ -688,8 +722,15 @@ def get_variable_order(self, vartype=None): else: return self._diffvars - def simulate(self, numpoints=None, tstep=None, integrator=None, - varying_inputs=None, initcon=None, integrator_options=None): + def simulate( + self, + numpoints=None, + tstep=None, + integrator=None, + varying_inputs=None, + initcon=None, + integrator_options=None, + ): """ Simulate the model. Integrator-specific options may be specified as keyword arguments and will be passed on to the integrator. @@ -735,8 +776,9 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, """ if not numpy_available: - raise ValueError("The numpy module is not available. " - "Cannot simulate the model.") + raise ValueError( + "The numpy module is not available. Cannot simulate the model." + ) if integrator_options is None: integrator_options = {} @@ -757,30 +799,33 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, integrator = 'idas' if integrator not in valid_integrators: - raise DAE_Error("Unrecognized %s integrator \'%s\'. Please select" - " an integrator from %s" % (self._intpackage, - integrator, - valid_integrators)) + raise DAE_Error( + "Unrecognized %s integrator \'%s\'. Please select" + " an integrator from %s" + % (self._intpackage, integrator, valid_integrators) + ) # Set the time step or the number of points for the lists # returned by the integrator - if tstep is not None and \ - tstep > (self._contset.last() - self._contset.first()): + if tstep is not None and tstep > (self._contset.last() - self._contset.first()): raise ValueError( "The step size %6.2f is larger than the span of the " - "ContinuousSet %s" % (tstep, self._contset.name())) + "ContinuousSet %s" % (tstep, self._contset.name()) + ) if tstep is not None and numpoints is not None: raise ValueError( "Cannot specify both the step size and the number of " - "points for the simulator") + "points for the simulator" + ) if tstep is None and numpoints is None: # Use 100 points by default numpoints = 100 if tstep is None: tsim = np.linspace( - self._contset.first(), self._contset.last(), num=numpoints) + self._contset.first(), self._contset.last(), num=numpoints + ) # Consider adding an option for log spaced time points. Can be # important for simulating stiff systems. @@ -789,8 +834,7 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, # self._contset.last()),num=1000, endpoint=True) else: - tsim = np.arange( - self._contset.first(), self._contset.last(), tstep) + tsim = np.arange(self._contset.first(), self._contset.last(), tstep) switchpts = [] self._siminputvars = {} @@ -799,7 +843,8 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, if type(varying_inputs) is not Suffix: raise TypeError( "Varying input values must be specified using a " - "Suffix. Please refer to the simulator documentation.") + "Suffix. Please refer to the simulator documentation." + ) for alg in self._algvars: if alg._base in varying_inputs: @@ -811,13 +856,15 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, self._simalgvars.append(alg) if self._intpackage == 'scipy' and len(self._simalgvars) != 0: - raise DAE_Error("When simulating with Scipy you must " - "provide values for all parameters " - "and algebraic variables that are indexed " - "by the ContinuoutSet using the " - "'varying_inputs' keyword argument. " - "Please refer to the simulator documentation " - "for more information.") + raise DAE_Error( + "When simulating with Scipy you must " + "provide values for all parameters " + "and algebraic variables that are indexed " + "by the ContinuoutSet using the " + "'varying_inputs' keyword argument. " + "Please refer to the simulator documentation " + "for more information." + ) # Get the set of unique points switchpts = list(set(switchpts)) @@ -825,11 +872,15 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, # Make sure all the switchpts are within the bounds of # the ContinuousSet - if switchpts[0] < self._contset.first() or \ - switchpts[-1] > self._contset.last(): - raise ValueError("Found a switching point for one or more of " - "the time-varying inputs that is not within " - "the bounds of the ContinuousSet.") + if ( + switchpts[0] < self._contset.first() + or switchpts[-1] > self._contset.last() + ): + raise ValueError( + "Found a switching point for one or more of " + "the time-varying inputs that is not within " + "the bounds of the ContinuousSet." + ) # Update tsim to include input switching points # This numpy function returns the unique, sorted points @@ -844,12 +895,14 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, raise ValueError( "Too many initial conditions were specified. The " "simulator was expecting a list with %i values." - % len(self._diffvars)) + % len(self._diffvars) + ) if len(initcon) < len(self._diffvars): raise ValueError( "Too few initial conditions were specified. The " "simulator was expecting a list with %i values." - % len(self._diffvars)) + % len(self._diffvars) + ) else: initcon = [] for v in self._diffvars: @@ -857,51 +910,45 @@ def simulate(self, numpoints=None, tstep=None, integrator=None, if type(i) is IndexTemplate: break initpoint = self._contset.first() - vidx = tuple(v._args[0:idx]) + (initpoint,) + \ - tuple(v._args[idx + 1:]) + vidx = tuple(v._args[0:idx]) + (initpoint,) + tuple(v._args[idx + 1 :]) # This line will raise an error if no value was set initcon.append(value(v._base[vidx])) # Call the integrator if self._intpackage == 'scipy': if not scipy_available: - raise ValueError("The scipy module is not available. " - "Cannot simulate the model.") - tsim, profile = self._simulate_with_scipy(initcon, tsim, switchpts, - varying_inputs, - integrator, - integrator_options) + raise ValueError( + "The scipy module is not available. Cannot simulate the model." + ) + tsim, profile = self._simulate_with_scipy( + initcon, tsim, switchpts, varying_inputs, integrator, integrator_options + ) else: - if len(switchpts) != 0: - tsim, profile = \ - self._simulate_with_casadi_with_inputs(initcon, tsim, - varying_inputs, - integrator, - integrator_options) + tsim, profile = self._simulate_with_casadi_with_inputs( + initcon, tsim, varying_inputs, integrator, integrator_options + ) else: - tsim, profile = \ - self._simulate_with_casadi_no_inputs(initcon, tsim, - integrator, - integrator_options) + tsim, profile = self._simulate_with_casadi_no_inputs( + initcon, tsim, integrator, integrator_options + ) self._tsim = tsim self._simsolution = profile return [tsim, profile] - def _simulate_with_scipy(self, initcon, tsim, switchpts, - varying_inputs, integrator, - integrator_options): - + def _simulate_with_scipy( + self, initcon, tsim, switchpts, varying_inputs, integrator, integrator_options + ): scipyint = scipy.integrate.ode(self._rhsfun).set_integrator( - integrator, **integrator_options) + integrator, **integrator_options + ) scipyint.set_initial_value(initcon, tsim[0]) profile = np.array(initcon) i = 1 while scipyint.successful() and scipyint.t < tsim[-1]: - # check if tsim[i-1] is a switching time and update value if tsim[i - 1] in switchpts: for v in self._siminputvars.keys(): @@ -914,19 +961,21 @@ def _simulate_with_scipy(self, initcon, tsim, switchpts, i += 1 if not scipyint.successful(): - raise DAE_Error("The Scipy integrator %s did not terminate " - "successfully." % integrator) + raise DAE_Error( + "The Scipy integrator %s did not terminate " + "successfully." % integrator + ) return [tsim, profile] - def _simulate_with_casadi_no_inputs(self, initcon, tsim, integrator, - integrator_options): + def _simulate_with_casadi_no_inputs( + self, initcon, tsim, integrator, integrator_options + ): # Old way (10 times faster, but can't incorporate time # varying parameters/controls) xalltemp = [self._templatemap[i] for i in self._diffvars] xall = casadi.vertcat(*xalltemp) - odealltemp = [convert_pyomo2casadi(self._rhsdict[i]) - for i in self._derivlist] + odealltemp = [convert_pyomo2casadi(self._rhsdict[i]) for i in self._derivlist] odeall = casadi.vertcat(*odealltemp) dae = {'x': xall, 'ode': odeall} @@ -951,21 +1000,21 @@ def _simulate_with_casadi_no_inputs(self, initcon, tsim, integrator, return [tsim, profile] - def _simulate_with_casadi_with_inputs(self, initcon, tsim, varying_inputs, - integrator, integrator_options): - + def _simulate_with_casadi_with_inputs( + self, initcon, tsim, varying_inputs, integrator, integrator_options + ): xalltemp = [self._templatemap[i] for i in self._diffvars] xall = casadi.vertcat(*xalltemp) time = casadi.SX.sym('time') - odealltemp = [time * convert_pyomo2casadi(self._rhsdict[i]) - for i in self._derivlist] + odealltemp = [ + time * convert_pyomo2casadi(self._rhsdict[i]) for i in self._derivlist + ] odeall = casadi.vertcat(*odealltemp) # Time-varying inputs - ptemp = [self._templatemap[i] - for i in self._siminputvars.values()] + ptemp = [self._templatemap[i] for i in self._siminputvars.values()] pall = casadi.vertcat(time, *ptemp) dae = {'x': xall, 'p': pall, 'ode': odeall} @@ -996,12 +1045,11 @@ def _simulate_with_casadi_with_inputs(self, initcon, tsim, varying_inputs, profile = varying_inputs[p] tswitch = list(profile.keys()) tswitch.sort() - tidx = [tsim.searchsorted(i) for i in tswitch] + \ - [len(tsim) - 1] - ptemp = [profile[0]] + \ - [casadi.repmat(profile[tswitch[i]], 1, - tidx[i + 1] - tidx[i]) - for i in range(len(tswitch))] + tidx = [tsim.searchsorted(i) for i in tswitch] + [len(tsim) - 1] + ptemp = [profile[0]] + [ + casadi.repmat(profile[tswitch[i]], 1, tidx[i + 1] - tidx[i]) + for i in range(len(tswitch)) + ] temp = casadi.horzcat(*ptemp) palltemp.append(temp) @@ -1021,8 +1069,7 @@ def initialize_model(self): from simulating the dynamic model. """ if self._tsim is None: - raise DAE_Error( - "Tried to initialize the model without simulating it first") + raise DAE_Error("Tried to initialize the model without simulating it first") tvals = list(self._contset) @@ -1032,11 +1079,9 @@ def initialize_model(self): for idx, v in enumerate(initvars): for idx2, i in enumerate(v._args): - if type(i) is IndexTemplate: - break - valinit = np.interp(tvals, self._tsim, - self._simsolution[:, idx]) + if type(i) is IndexTemplate: + break + valinit = np.interp(tvals, self._tsim, self._simsolution[:, idx]) for i, t in enumerate(tvals): - vidx = tuple(v._args[0:idx2]) + (t,) + \ - tuple(v._args[idx2 + 1:]) + vidx = tuple(v._args[0:idx2]) + (t,) + tuple(v._args[idx2 + 1 :]) v._base[vidx] = valinit[i] diff --git a/pyomo/dae/tests/test_colloc.py b/pyomo/dae/tests/test_colloc.py index e6e2c1c6283..dda928110ae 100644 --- a/pyomo/dae/tests/test_colloc.py +++ b/pyomo/dae/tests/test_colloc.py @@ -12,8 +12,7 @@ from __future__ import print_function import pyomo.common.unittest as unittest -from pyomo.environ import (Var, Set, ConcreteModel, - TransformationFactory, pyomo) +from pyomo.environ import Var, Set, ConcreteModel, TransformationFactory, pyomo from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.diffvar import DAE_Error @@ -21,9 +20,10 @@ from io import StringIO -from pyomo.common.log import LoggingIntercept +from pyomo.common.log import LoggingIntercept from os.path import abspath, dirname, normpath, join + currdir = dirname(abspath(__file__)) exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'dae')) @@ -49,24 +49,37 @@ def setUp(self): m.v1 = Var(m.t) m.dv1 = DerivativeVar(m.v1) m.s = Set(initialize=[1, 2, 3], ordered=True) - - # test collocation discretization with radau points + + # test collocation discretization with radau points # on var indexed by single ContinuousSet def test_disc_single_index_radau(self): m = self.m.clone() disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=5, ncp=3) - + self.assertTrue(hasattr(m, 'dv1_disc_eq')) self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1) == 16) - expected_tau_points = [0.0, 0.1550510257216822, - 0.64494897427831788, 1.0] - expected_disc_points = [0, 0.310102, 1.289898, 2.0, 2.310102, - 3.289898, 4.0, 4.310102, 5.289898, 6.0, - 6.310102, 7.289898, 8.0, 8.310102, 9.289898, - 10] + expected_tau_points = [0.0, 0.1550510257216822, 0.64494897427831788, 1.0] + expected_disc_points = [ + 0, + 0.310102, + 1.289898, + 2.0, + 2.310102, + 3.289898, + 4.0, + 4.310102, + 5.289898, + 6.0, + 6.310102, + 7.289898, + 8.0, + 8.310102, + 9.289898, + 10, + ] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-RADAU') @@ -81,21 +94,25 @@ def test_disc_single_index_radau(self): self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m._pyomo_dae_reclassified_derivativevars[0] is m.dv1) - repn_baseline = {id(m.dv1[2.0]): 1.0, - id(m.v1[0]): 1.5, - id(m.v1[0.310102]): -2.76599, - id(m.v1[1.289898]): 3.76599, - id(m.v1[2.0]): -2.5} + repn_baseline = { + id(m.dv1[2.0]): 1.0, + id(m.v1[0]): 1.5, + id(m.v1[0.310102]): -2.76599, + id(m.v1[1.289898]): 3.76599, + id(m.v1[2.0]): -2.5, + } repn = generate_standard_repn(m.dv1_disc_eq[2.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) - repn_baseline = {id(m.dv1[4.0]): 1.0, - id(m.v1[2.0]): 1.5, - id(m.v1[2.310102]): -2.76599, - id(m.v1[3.289898]): 3.76599, - id(m.v1[4.0]): -2.5} + repn_baseline = { + id(m.dv1[4.0]): 1.0, + id(m.v1[2.0]): 1.5, + id(m.v1[2.310102]): -2.76599, + id(m.v1[3.289898]): 3.76599, + id(m.v1[4.0]): -2.5, + } repn = generate_standard_repn(m.dv1_disc_eq[4.0].body) repn_gen = repn_to_rounded_dict(repn, 5) @@ -108,7 +125,7 @@ def test_disc_second_order_radau(self): m.dv1dt2 = DerivativeVar(m.v1, wrt=(m.t, m.t)) disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=2, ncp=2) - + self.assertTrue(hasattr(m, 'dv1dt2_disc_eq')) self.assertTrue(len(m.dv1dt2_disc_eq) == 4) self.assertTrue(len(m.v1) == 5) @@ -117,19 +134,23 @@ def test_disc_second_order_radau(self): self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) self.assertTrue(m.dv1dt2 in m._pyomo_dae_reclassified_derivativevars) - repn_baseline = {id(m.dv1dt2[5.0]): 1, - id(m.v1[0]): -0.24, - id(m.v1[1.666667]): 0.36, - id(m.v1[5.0]): -0.12} + repn_baseline = { + id(m.dv1dt2[5.0]): 1, + id(m.v1[0]): -0.24, + id(m.v1[1.666667]): 0.36, + id(m.v1[5.0]): -0.12, + } repn = generate_standard_repn(m.dv1dt2_disc_eq[5.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) - repn_baseline = {id(m.dv1dt2[10]): 1, - id(m.v1[5.0]): -0.24, - id(m.v1[6.666667]): 0.36, - id(m.v1[10]): -0.12} + repn_baseline = { + id(m.dv1dt2[10]): 1, + id(m.v1[5.0]): -0.24, + id(m.v1[6.666667]): 0.36, + id(m.v1[10]): -0.12, + } repn = generate_standard_repn(m.dv1dt2_disc_eq[10.0].body) repn_gen = repn_to_rounded_dict(repn, 5) @@ -138,35 +159,58 @@ def test_disc_second_order_radau(self): # test second order derivative with single collocation point def test_disc_second_order_1cp(self): m = ConcreteModel() - m.t = ContinuousSet(bounds=(0,1)) - m.t2 = ContinuousSet(bounds=(0,10)) + m.t = ContinuousSet(bounds=(0, 1)) + m.t2 = ContinuousSet(bounds=(0, 10)) m.v = Var(m.t, m.t2) m.dv = DerivativeVar(m.v, wrt=(m.t, m.t2)) TransformationFactory('dae.collocation').apply_to(m, nfe=2, ncp=1) self.assertTrue(hasattr(m, 'dv_disc_eq')) self.assertTrue(len(m.dv_disc_eq) == 4) - self.assertTrue(len(m.v) == 9) + self.assertTrue(len(m.v) == 9) - # test collocation discretization with legendre points + # test collocation discretization with legendre points # on var indexed by single ContinuousSet def test_disc_single_index_legendre(self): m = self.m.clone() disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=5, ncp=3, scheme='LAGRANGE-LEGENDRE') - + self.assertTrue(hasattr(m, 'dv1_disc_eq')) self.assertTrue(hasattr(m, 'v1_t_cont_eq')) self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1_t_cont_eq) == 5) self.assertTrue(len(m.v1) == 21) - expected_tau_points = [0.0, 0.11270166537925834, 0.49999999999999989, - 0.88729833462074226] - expected_disc_points = [0, 0.225403, 1.0, 1.774597, 2.0, 2.225403, - 3.0, 3.774597, 4.0, 4.225403, 5.0, 5.774597, - 6.0, 6.225403, 7.0, 7.774597, 8.0, - 8.225403, 9.0, 9.774597, 10] + expected_tau_points = [ + 0.0, + 0.11270166537925834, + 0.49999999999999989, + 0.88729833462074226, + ] + expected_disc_points = [ + 0, + 0.225403, + 1.0, + 1.774597, + 2.0, + 2.225403, + 3.0, + 3.774597, + 4.0, + 4.225403, + 5.0, + 5.774597, + 6.0, + 6.225403, + 7.0, + 7.774597, + 8.0, + 8.225403, + 9.0, + 9.774597, + 10, + ] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-LEGENDRE') @@ -181,21 +225,25 @@ def test_disc_single_index_legendre(self): self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) - repn_baseline = {id(m.dv1[3.0]): 1, - id(m.v1[2.0]): -1.5, - id(m.v1[2.225403]): 2.86374, - id(m.v1[3.0]): -1.0, - id(m.v1[3.774597]): -0.36374} + repn_baseline = { + id(m.dv1[3.0]): 1, + id(m.v1[2.0]): -1.5, + id(m.v1[2.225403]): 2.86374, + id(m.v1[3.0]): -1.0, + id(m.v1[3.774597]): -0.36374, + } repn = generate_standard_repn(m.dv1_disc_eq[3.0].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) - repn_baseline = {id(m.dv1[5.0]): 1, - id(m.v1[4.0]): -1.5, - id(m.v1[4.225403]): 2.86374, - id(m.v1[5.0]): -1.0, - id(m.v1[5.774597]): -0.36374} + repn_baseline = { + id(m.dv1[5.0]): 1, + id(m.v1[4.0]): -1.5, + id(m.v1[4.225403]): 2.86374, + id(m.v1[5.0]): -1.0, + id(m.v1[5.774597]): -0.36374, + } repn = generate_standard_repn(m.dv1_disc_eq[5.0].body) repn_gen = repn_to_rounded_dict(repn, 5) @@ -208,7 +256,7 @@ def test_disc_second_order_legendre(self): m.dv1dt2 = DerivativeVar(m.v1, wrt=(m.t, m.t)) disc = TransformationFactory('dae.collocation') disc.apply_to(m, nfe=2, ncp=2, scheme='LAGRANGE-LEGENDRE') - + self.assertTrue(hasattr(m, 'dv1dt2_disc_eq')) self.assertTrue(hasattr(m, 'v1_t_cont_eq')) self.assertTrue(len(m.dv1dt2_disc_eq) == 4) @@ -219,19 +267,23 @@ def test_disc_second_order_legendre(self): self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) self.assertTrue(m.dv1dt2 in m._pyomo_dae_reclassified_derivativevars) - repn_baseline = {id(m.dv1dt2[1.056624]): 1, - id(m.v1[0]): -0.48, - id(m.v1[1.056624]): 0.65569, - id(m.v1[3.943376]): -0.17569} + repn_baseline = { + id(m.dv1dt2[1.056624]): 1, + id(m.v1[0]): -0.48, + id(m.v1[1.056624]): 0.65569, + id(m.v1[3.943376]): -0.17569, + } repn = generate_standard_repn(m.dv1dt2_disc_eq[1.056624].body) repn_gen = repn_to_rounded_dict(repn, 5) self.assertEqual(repn_baseline, repn_gen) - repn_baseline = {id(m.dv1dt2[6.056624]): 1, - id(m.v1[5.0]): -0.48, - id(m.v1[6.056624]): 0.65569, - id(m.v1[8.943376]): -0.17569} + repn_baseline = { + id(m.dv1dt2[6.056624]): 1, + id(m.v1[5.0]): -0.48, + id(m.v1[6.056624]): 0.65569, + id(m.v1[8.943376]): -0.17569, + } repn = generate_standard_repn(m.dv1dt2_disc_eq[6.056624].body) repn_gen = repn_to_rounded_dict(repn, 5) @@ -251,13 +303,25 @@ def test_disc_multi_index(self): self.assertTrue(len(m.dv2_disc_eq) == 45) self.assertTrue(len(m.v2) == 48) - expected_tau_points = [0.0, 0.1550510257216822, - 0.64494897427831788, - 1.0] - expected_disc_points = [0, 0.310102, 1.289898, 2.0, 2.310102, - 3.289898, - 4.0, 4.310102, 5.289898, 6.0, 6.310102, - 7.289898, 8.0, 8.310102, 9.289898, 10] + expected_tau_points = [0.0, 0.1550510257216822, 0.64494897427831788, 1.0] + expected_disc_points = [ + 0, + 0.310102, + 1.289898, + 2.0, + 2.310102, + 3.289898, + 4.0, + 4.310102, + 5.289898, + 6.0, + 6.310102, + 7.289898, + 8.0, + 8.310102, + 9.289898, + 10, + ] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-RADAU') @@ -268,8 +332,7 @@ def test_disc_multi_index(self): for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_disc_points[idx]) - self.assertTrue( - hasattr(m, '_pyomo_dae_reclassified_derivativevars')) + self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) self.assertTrue(m.dv2 in m._pyomo_dae_reclassified_derivativevars) @@ -292,7 +355,7 @@ def test_disc_multi_index2(self): expected_t_disc_points = [0, 1.666667, 5.0, 6.666667, 10] expected_t2_disc_points = [0, 0.833333, 2.5, 3.333333, 5] - + for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_t_disc_points[idx]) @@ -325,13 +388,25 @@ def test_disc_multidimen_index(self): self.assertTrue(len(m.dv3_disc_eq) == 45) self.assertTrue(len(m.v3) == 48) - expected_tau_points = [0.0, 0.1550510257216822, - 0.64494897427831788, - 1.0] - expected_disc_points = [0, 0.310102, 1.289898, 2.0, 2.310102, - 3.289898, - 4.0, 4.310102, 5.289898, 6.0, 6.310102, - 7.289898, 8.0, 8.310102, 9.289898, 10] + expected_tau_points = [0.0, 0.1550510257216822, 0.64494897427831788, 1.0] + expected_disc_points = [ + 0, + 0.310102, + 1.289898, + 2.0, + 2.310102, + 3.289898, + 4.0, + 4.310102, + 5.289898, + 6.0, + 6.310102, + 7.289898, + 8.0, + 8.310102, + 9.289898, + 10, + ] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-RADAU') @@ -342,8 +417,7 @@ def test_disc_multidimen_index(self): for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_disc_points[idx]) - self.assertTrue( - hasattr(m, '_pyomo_dae_reclassified_derivativevars')) + self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars) self.assertTrue(m.dv2 in m._pyomo_dae_reclassified_derivativevars) self.assertTrue(m.dv3 in m._pyomo_dae_reclassified_derivativevars) @@ -393,13 +467,25 @@ def test_lookup_radau_collocation_points(self): self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1) == 16) - expected_tau_points = [0.0, 0.1550510257216822, - 0.64494897427831788, - 1.0] - expected_disc_points = [0, 0.310102, 1.289898, 2.0, 2.310102, - 3.289898, - 4.0, 4.310102, 5.289898, 6.0, 6.310102, - 7.289898, 8.0, 8.310102, 9.289898, 10] + expected_tau_points = [0.0, 0.1550510257216822, 0.64494897427831788, 1.0] + expected_disc_points = [ + 0, + 0.310102, + 1.289898, + 2.0, + 2.310102, + 3.289898, + 4.0, + 4.310102, + 5.289898, + 6.0, + 6.310102, + 7.289898, + 8.0, + 8.310102, + 9.289898, + 10, + ] disc_info = m.t.get_discretization_info() self.assertTrue(disc_info['scheme'] == 'LAGRANGE-RADAU') @@ -434,12 +520,35 @@ def test_lookup_legendre_collocation_points(self): self.assertTrue(len(m.dv1_disc_eq) == 15) self.assertTrue(len(m.v1) == 21) - expected_tau_points = [0.0, 0.11270166537925834, 0.49999999999999989, - 0.88729833462074226] - expected_disc_points = [0, 0.225403, 1.0, 1.774597, 2.0, 2.225403, - 3.0, 3.774597, 4.0, 4.225403, 5.0, 5.774597, - 6.0, 6.225403, 7.0, 7.774597, 8.0, 8.225403, - 9.0, 9.774597, 10] + expected_tau_points = [ + 0.0, + 0.11270166537925834, + 0.49999999999999989, + 0.88729833462074226, + ] + expected_disc_points = [ + 0, + 0.225403, + 1.0, + 1.774597, + 2.0, + 2.225403, + 3.0, + 3.774597, + 4.0, + 4.225403, + 5.0, + 5.774597, + 6.0, + 6.225403, + 7.0, + 7.774597, + 8.0, + 8.225403, + 9.0, + 9.774597, + 10, + ] disc_info = m.t.get_discretization_info() diff --git a/pyomo/dae/tests/test_contset.py b/pyomo/dae/tests/test_contset.py index cd7a0bb1ec3..ce13d53dfd5 100644 --- a/pyomo/dae/tests/test_contset.py +++ b/pyomo/dae/tests/test_contset.py @@ -27,7 +27,6 @@ class TestContinuousSet(unittest.TestCase): - # test __init__ def test_init(self): model = ConcreteModel() @@ -86,8 +85,7 @@ def test_valid_declaration(self): self.assertEqual(model.t.last(), 4) model = ConcreteModel() - with self.assertRaisesRegex( - ValueError, r"value is not in the domain \[0..4\]"): + with self.assertRaisesRegex(ValueError, r"value is not in the domain \[0..4\]"): model.t = ContinuousSet(bounds=(0, 4), initialize=[1, 2, 3, 5]) # self.assertEqual(len(model.t), 5) # self.assertEqual(model.t.first(), 0) @@ -96,8 +94,7 @@ def test_valid_declaration(self): # del model.t model = ConcreteModel() - with self.assertRaisesRegex( - ValueError, r"value is not in the domain \[2..6\]"): + with self.assertRaisesRegex(ValueError, r"value is not in the domain \[2..6\]"): model.t = ContinuousSet(bounds=(2, 6), initialize=[1, 2, 3, 5]) # self.assertEqual(len(model.t), 5) # self.assertEqual(model.t.first(), 1) @@ -105,8 +102,7 @@ def test_valid_declaration(self): # del model.t model = ConcreteModel() - with self.assertRaisesRegex( - ValueError, r"value is not in the domain \[2..4\]"): + with self.assertRaisesRegex(ValueError, r"value is not in the domain \[2..4\]"): model.t = ContinuousSet(bounds=(2, 4), initialize=[1, 3, 5]) # self.assertEqual(len(model.t), 3) # self.assertNotIn(2, model.t) @@ -191,16 +187,16 @@ def test_get_lower_element_boundary(self): def test_duplicate_construct(self): m = ConcreteModel() - m.t = ContinuousSet(initialize=[1,2,3]) - self.assertEqual(m.t, [1,2,3]) - self.assertEqual(m.t._fe, [1,2,3]) + m.t = ContinuousSet(initialize=[1, 2, 3]) + self.assertEqual(m.t, [1, 2, 3]) + self.assertEqual(m.t._fe, [1, 2, 3]) m.t.add(1.5) m.t.add(2.5) - self.assertEqual(m.t, [1,1.5,2,2.5,3]) - self.assertEqual(m.t._fe, [1,2,3]) + self.assertEqual(m.t, [1, 1.5, 2, 2.5, 3]) + self.assertEqual(m.t._fe, [1, 2, 3]) m.t.construct() - self.assertEqual(m.t, [1,1.5,2,2.5,3]) - self.assertEqual(m.t._fe, [1,2,3]) + self.assertEqual(m.t, [1, 1.5, 2, 2.5, 3]) + self.assertEqual(m.t._fe, [1, 2, 3]) def test_find_nearest_index(self): m = ConcreteModel() @@ -230,8 +226,8 @@ def test_find_nearest_index(self): init_list = [] for i in range(5): i0 = float(i) - i1 = round((i+0.15)*1e4)/1e4 - i2 = round((i+0.64)*1e4)/1e4 + i1 = round((i + 0.15) * 1e4) / 1e4 + i2 = round((i + 0.64) * 1e4) / 1e4 # Round to get rid of numerical error due to float addition init_list.extend([i, i1, i2]) init_list.append(5.0) @@ -274,12 +270,11 @@ def test_find_nearest_index(self): # delta_right == 2.15-2.075 == 0.07499999999999973 # Index 8 is returned (time[8] == 2.15), even though # "tie"-break logic suggests index 7 should be. - #i = m.time.find_nearest_index(2.075) - #self.assertEqual(i, 7) + # i = m.time.find_nearest_index(2.075) + # self.assertEqual(i, 7) class TestIO(unittest.TestCase): - def setUp(self): # # Create Model @@ -311,9 +306,10 @@ def test_io2(self): OUTPUT.close() self.model.A = ContinuousSet(bounds=(0, 4)) with self.assertRaisesRegex( - ValueError, r"The value is not in the domain \[0..4\]"): + ValueError, r"The value is not in the domain \[0..4\]" + ): self.instance = self.model.create_instance("diffset.dat") - #self.assertEqual(len(self.instance.A), 4) + # self.assertEqual(len(self.instance.A), 4) def test_io3(self): OUTPUT = open("diffset.dat", "w") @@ -323,9 +319,10 @@ def test_io3(self): OUTPUT.close() self.model.A = ContinuousSet(bounds=(2, 6)) with self.assertRaisesRegex( - ValueError, r"The value is not in the domain \[2..6\]"): + ValueError, r"The value is not in the domain \[2..6\]" + ): self.instance = self.model.create_instance("diffset.dat") - #self.assertEqual(len(self.instance.A), 4) + # self.assertEqual(len(self.instance.A), 4) def test_io4(self): OUTPUT = open("diffset.dat", "w") @@ -335,9 +332,10 @@ def test_io4(self): OUTPUT.close() self.model.A = ContinuousSet(bounds=(2, 4)) with self.assertRaisesRegex( - ValueError, r"The value is not in the domain \[2..4\]"): + ValueError, r"The value is not in the domain \[2..4\]" + ): self.instance = self.model.create_instance("diffset.dat") - #self.assertEqual(len(self.instance.A), 3) + # self.assertEqual(len(self.instance.A), 3) def test_io5(self): OUTPUT = open("diffset.dat", "w") @@ -372,5 +370,6 @@ def test_io7(self): self.instance = self.model.create_instance("diffset.dat") self.assertEqual(len(self.instance.B), 2) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/dae/tests/test_diffvar.py b/pyomo/dae/tests/test_diffvar.py index f0966316edd..718781d5916 100644 --- a/pyomo/dae/tests/test_diffvar.py +++ b/pyomo/dae/tests/test_diffvar.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# +# # Unit Tests for DerivativeVar Objects # @@ -26,7 +26,6 @@ class TestDerivativeVar(unittest.TestCase): - # test valid declarations def test_valid(self): m = ConcreteModel() diff --git a/pyomo/dae/tests/test_finite_diff.py b/pyomo/dae/tests/test_finite_diff.py index 95da52a2e72..9ae7ecdea91 100644 --- a/pyomo/dae/tests/test_finite_diff.py +++ b/pyomo/dae/tests/test_finite_diff.py @@ -12,8 +12,7 @@ from __future__ import print_function import pyomo.common.unittest as unittest -from pyomo.environ import (Var, Set, ConcreteModel, - TransformationFactory) +from pyomo.environ import Var, Set, ConcreteModel, TransformationFactory from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.diffvar import DAE_Error @@ -22,6 +21,7 @@ from pyomo.common.log import LoggingIntercept from os.path import abspath, dirname, normpath, join + currdir = dirname(abspath(__file__)) exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'dae')) @@ -40,14 +40,14 @@ def setUp(self): m.v1 = Var(m.t) m.dv1 = DerivativeVar(m.v1) m.s = Set(initialize=[1, 2, 3], ordered=True) - + # test backward finite difference discretization # on var indexed by single ContinuousSet def test_disc_single_index_backward(self): m = self.m.clone() disc = TransformationFactory('dae.finite_difference') disc.apply_to(m, nfe=5) - + self.assertTrue(hasattr(m, 'dv1_disc_eq')) self.assertEqual(len(m.dv1_disc_eq), 5) self.assertEqual(len(m.v1), 6) @@ -63,8 +63,7 @@ def test_disc_single_index_backward(self): self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars) - output = \ -"""\ + output = """\ dv1_disc_eq : Size=5, Index=t, Active=True Key : Lower : Body : Upper : Active 2.0 : 0.0 : dv1[2.0] - 0.5*(v1[2.0] - v1[0]) : 0.0 : True @@ -84,7 +83,7 @@ def test_disc_second_order_backward(self): m.dv1dt2 = DerivativeVar(m.v1, wrt=(m.t, m.t)) disc = TransformationFactory('dae.finite_difference') disc.apply_to(m, nfe=2) - + self.assertTrue(hasattr(m, 'dv1dt2_disc_eq')) self.assertEqual(len(m.dv1dt2_disc_eq), 1) self.assertEqual(len(m.v1), 3) @@ -93,8 +92,7 @@ def test_disc_second_order_backward(self): self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars) self.assertIn(m.dv1dt2, m._pyomo_dae_reclassified_derivativevars) - output = \ -"""\ + output = """\ dv1dt2_disc_eq : Size=1, Index=t, Active=True Key : Lower : Body : Upper : Active 10 : 0.0 : dv1dt2[10] - 0.04*(v1[10] - 2*v1[5.0] + v1[0]) : 0.0 : True @@ -125,8 +123,7 @@ def test_disc_single_index_forward(self): self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars')) self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars) - output = \ -"""\ + output = """\ dv1_disc_eq : Size=5, Index=t, Active=True Key : Lower : Body : Upper : Active 0 : 0.0 : dv1[0] - 0.5*(v1[2.0] - v1[0]) : 0.0 : True @@ -155,8 +152,7 @@ def test_disc_second_order_forward(self): self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars) self.assertIn(m.dv1dt2, m._pyomo_dae_reclassified_derivativevars) - output = \ -"""\ + output = """\ dv1dt2_disc_eq : Size=1, Index=t, Active=True Key : Lower : Body : Upper : Active 0 : 0.0 : dv1dt2[0] - 0.04*(v1[10] - 2*v1[5.0] + v1[0]) : 0.0 : True @@ -184,8 +180,7 @@ def test_disc_single_index_central(self): for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_disc_points[idx]) - output = \ -"""\ + output = """\ dv1_disc_eq : Size=4, Index=t, Active=True Key : Lower : Body : Upper : Active 2.0 : 0.0 : dv1[2.0] - 0.25*(v1[4.0] - v1[0]) : 0.0 : True @@ -209,8 +204,7 @@ def test_disc_second_order_central(self): self.assertEqual(len(m.dv1dt2_disc_eq), 1) self.assertEqual(len(m.v1), 3) - output = \ -"""\ + output = """\ dv1dt2_disc_eq : Size=1, Index=t, Active=True Key : Lower : Body : Upper : Active 5.0 : 0.0 : dv1dt2[5.0] - 0.04*(v1[10] - 2*v1[5.0] + v1[0]) : 0.0 : True @@ -260,7 +254,7 @@ def test_disc_multi_index2(self): expected_t_disc_points = [0, 5.0, 10] expected_t2_disc_points = [0, 2.5, 5] - + for idx, val in enumerate(list(m.t)): self.assertAlmostEqual(val, expected_t_disc_points[idx]) @@ -307,12 +301,10 @@ def test_disc_invalid_options(self): TransformationFactory('dae.finite_difference').apply_to(m, nfe=-1) with self.assertRaises(ValueError): - TransformationFactory('dae.finite_difference').apply_to(m, - scheme='foo') + TransformationFactory('dae.finite_difference').apply_to(m, scheme='foo') with self.assertRaises(ValueError): - TransformationFactory('dae.finite_difference').apply_to(m, - foo=True) + TransformationFactory('dae.finite_difference').apply_to(m, foo=True) TransformationFactory('dae.finite_difference').apply_to(m, wrt=m.t) with self.assertRaises(ValueError): diff --git a/pyomo/dae/tests/test_flatten.py b/pyomo/dae/tests/test_flatten.py index 768be9cd523..a6ea824c3ef 100644 --- a/pyomo/dae/tests/test_flatten.py +++ b/pyomo/dae/tests/test_flatten.py @@ -11,36 +11,49 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - ConcreteModel, - Block, - Var, - Reference, - Set, - Constraint, - ComponentUID, - ) + ConcreteModel, + Block, + Var, + Reference, + Set, + Constraint, + ComponentUID, +) from pyomo.dae import ContinuousSet from pyomo.common.collections import ComponentSet, ComponentMap -from pyomo.core.base.indexed_component import ( - UnindexedComponent_set, - normalize_index, - ) +from pyomo.core.base.indexed_component import UnindexedComponent_set, normalize_index from pyomo.dae.flatten import ( - flatten_dae_components, - flatten_components_along_sets, - slice_component_along_sets, - ) + flatten_dae_components, + flatten_components_along_sets, + slice_component_along_sets, +) + + +class _TestFlattenBase(object): + """A base class to hold the common _hashRef utility method. + We don't just derive from Test... classes directly as this + causes tests to run twice. + + """ + + def _hashRef(self, ref): + if not ref.is_indexed(): + return (id(ref),) + else: + return tuple(sorted(id(_) for _ in ref.values())) + class TestAssumedBehavior(unittest.TestCase): """ These are some behaviors we rely on that weren't immediately obvious would be the case. """ + def test_cross(self): m = ConcreteModel() - m.s1 = Set(initialize=[1,2]) - m.s2 = Set(initialize=[3,4]) - m.s3 = Set(initialize=['a','b']) + m.s1 = Set(initialize=[1, 2]) + m.s2 = Set(initialize=[3, 4]) + m.s3 = Set(initialize=['a', 'b']) normalize_index.flatten = True @@ -56,7 +69,7 @@ def test_cross(self): self.assertIsNot(type(j), tuple) normalize_index.flatten = False - # This behavior is consistent regardless of the value of + # This behavior is consistent regardless of the value of # normalize_index.flatten for i in m.s1.cross(): @@ -70,9 +83,9 @@ def test_cross(self): def test_subsets(self): m = ConcreteModel() - m.s1 = Set(initialize=[1,2]) - m.s2 = Set(initialize=[3,4]) - m.s3 = Set(initialize=['a','b']) + m.s1 = Set(initialize=[1, 2]) + m.s2 = Set(initialize=[3, 4]) + m.s3 = Set(initialize=['a', 'b']) normalize_index.flatten = True @@ -108,18 +121,15 @@ def test_subsets(self): normalize_index.flatten = True -class TestCategorize(unittest.TestCase): - def _hashRef(self, ref): - return tuple(sorted(id(_) for _ in ref.values())) - +class TestCategorize(_TestFlattenBase, unittest.TestCase): def test_flat_model(self): m = ConcreteModel() - m.T = ContinuousSet(bounds=(0,1)) + m.T = ContinuousSet(bounds=(0, 1)) m.x = Var() - m.y = Var([1,2]) + m.y = Var([1, 2]) m.a = Var(m.T) - m.b = Var(m.T, [1,2]) - m.c = Var([3,4], m.T) + m.b = Var(m.T, [1, 2]) + m.c = Var([3, 4], m.T) regular, time = flatten_dae_components(m, m.T, Var) regular_id = set(id(_) for _ in regular) @@ -128,16 +138,16 @@ def test_flat_model(self): self.assertIn(id(m.y[1]), regular_id) self.assertIn(id(m.y[2]), regular_id) # Output for debugging - #for v in time: + # for v in time: # v.pprint() # for _ in v.values(): # print" -> ", _.name ref_data = { self._hashRef(Reference(m.a[:])), - self._hashRef(Reference(m.b[:,1])), - self._hashRef(Reference(m.b[:,2])), - self._hashRef(Reference(m.c[3,:])), - self._hashRef(Reference(m.c[4,:])), + self._hashRef(Reference(m.b[:, 1])), + self._hashRef(Reference(m.b[:, 2])), + self._hashRef(Reference(m.c[3, :])), + self._hashRef(Reference(m.c[4, :])), } self.assertEqual(len(time), len(ref_data)) for ref in time: @@ -145,85 +155,85 @@ def test_flat_model(self): def test_1level_model(self): m = ConcreteModel() - m.T = ContinuousSet(bounds=(0,1)) - @m.Block([1,2],m.T) + m.T = ContinuousSet(bounds=(0, 1)) + + @m.Block([1, 2], m.T) def B(b, i, t): - b.x = Var(list(range(2*i, 2*i+2))) + b.x = Var(list(range(2 * i, 2 * i + 2))) regular, time = flatten_dae_components(m, m.T, Var) self.assertEqual(len(regular), 0) # Output for debugging - #for v in time: + # for v in time: # v.pprint() # for _ in v.values(): # print" -> ", _.name ref_data = { - self._hashRef(Reference(m.B[1,:].x[2])), - self._hashRef(Reference(m.B[1,:].x[3])), - self._hashRef(Reference(m.B[2,:].x[4])), - self._hashRef(Reference(m.B[2,:].x[5])), + self._hashRef(Reference(m.B[1, :].x[2])), + self._hashRef(Reference(m.B[1, :].x[3])), + self._hashRef(Reference(m.B[2, :].x[4])), + self._hashRef(Reference(m.B[2, :].x[5])), } self.assertEqual(len(time), len(ref_data)) for ref in time: self.assertIn(self._hashRef(ref), ref_data) - def test_2level_model(self): m = ConcreteModel() - m.T = ContinuousSet(bounds=(0,1)) - @m.Block([1,2],m.T) + m.T = ContinuousSet(bounds=(0, 1)) + + @m.Block([1, 2], m.T) def B(b, i, t): - @b.Block(list(range(2*i, 2*i+2))) + @b.Block(list(range(2 * i, 2 * i + 2))) def bb(bb, j): - bb.y = Var([10,11]) - b.x = Var(list(range(2*i, 2*i+2))) + bb.y = Var([10, 11]) + + b.x = Var(list(range(2 * i, 2 * i + 2))) regular, time = flatten_dae_components(m, m.T, Var) self.assertEqual(len(regular), 0) # Output for debugging - #for v in time: + # for v in time: # v.pprint() # for _ in v.values(): # print" -> ", _.name ref_data = { - self._hashRef(Reference(m.B[1,:].x[2])), - self._hashRef(Reference(m.B[1,:].x[3])), - self._hashRef(Reference(m.B[2,:].x[4])), - self._hashRef(Reference(m.B[2,:].x[5])), - self._hashRef(Reference(m.B[1,:].bb[2].y[10])), - self._hashRef(Reference(m.B[1,:].bb[2].y[11])), - self._hashRef(Reference(m.B[1,:].bb[3].y[10])), - self._hashRef(Reference(m.B[1,:].bb[3].y[11])), - self._hashRef(Reference(m.B[2,:].bb[4].y[10])), - self._hashRef(Reference(m.B[2,:].bb[4].y[11])), - self._hashRef(Reference(m.B[2,:].bb[5].y[10])), - self._hashRef(Reference(m.B[2,:].bb[5].y[11])), + self._hashRef(Reference(m.B[1, :].x[2])), + self._hashRef(Reference(m.B[1, :].x[3])), + self._hashRef(Reference(m.B[2, :].x[4])), + self._hashRef(Reference(m.B[2, :].x[5])), + self._hashRef(Reference(m.B[1, :].bb[2].y[10])), + self._hashRef(Reference(m.B[1, :].bb[2].y[11])), + self._hashRef(Reference(m.B[1, :].bb[3].y[10])), + self._hashRef(Reference(m.B[1, :].bb[3].y[11])), + self._hashRef(Reference(m.B[2, :].bb[4].y[10])), + self._hashRef(Reference(m.B[2, :].bb[4].y[11])), + self._hashRef(Reference(m.B[2, :].bb[5].y[10])), + self._hashRef(Reference(m.B[2, :].bb[5].y[11])), } self.assertEqual(len(time), len(ref_data)) for ref in time: self.assertIn(self._hashRef(ref), ref_data) - def test_2dim_set(self): m = ConcreteModel() - m.time = ContinuousSet(bounds=(0,1)) + m.time = ContinuousSet(bounds=(0, 1)) - m.v = Var(m.time, [('a',1), ('b',2)]) + m.v = Var(m.time, [('a', 1), ('b', 2)]) scalar, dae = flatten_dae_components(m, m.time, Var) self.assertEqual(len(scalar), 0) ref_data = { - self._hashRef(Reference(m.v[:,'a',1])), - self._hashRef(Reference(m.v[:,'b',2])), - } + self._hashRef(Reference(m.v[:, 'a', 1])), + self._hashRef(Reference(m.v[:, 'b', 2])), + } self.assertEqual(len(dae), len(ref_data)) for ref in dae: self.assertIn(self._hashRef(ref), ref_data) - def test_indexed_block(self): m = ConcreteModel() - m.time = ContinuousSet(bounds=(0,1)) + m.time = ContinuousSet(bounds=(0, 1)) m.comp = Set(initialize=['a', 'b']) def bb_rule(bb, t): @@ -237,34 +247,36 @@ def b_rule(b, c): scalar, dae = flatten_dae_components(m, m.time, Var) self.assertEqual(len(scalar), 0) ref_data = { - self._hashRef(Reference(m.b['a'].bb[:].dae_var)), - self._hashRef(Reference(m.b['b'].bb[:].dae_var)), - } + self._hashRef(Reference(m.b['a'].bb[:].dae_var)), + self._hashRef(Reference(m.b['b'].bb[:].dae_var)), + } self.assertEqual(len(dae), len(ref_data)) for ref in dae: self.assertIn(self._hashRef(ref), ref_data) - def test_constraint(self): m = ConcreteModel() - m.time = ContinuousSet(bounds=(0,1)) + m.time = ContinuousSet(bounds=(0, 1)) m.comp = Set(initialize=['a', 'b']) m.v0 = Var() m.v1 = Var(m.time) m.v2 = Var(m.time, m.comp) - + def c0_rule(m): return m.v0 == 1 + m.c0 = Constraint(rule=c0_rule) def c1_rule(m, t): return m.v1[t] == 3 + m.c1 = Constraint(m.time, rule=c1_rule) @m.Block(m.time) def b(b, t): def c2_rule(b, j): return b.model().v2[t, j] == 5 + b.c2 = Constraint(m.comp, rule=c2_rule) scalar, dae = flatten_dae_components(m, m.time, Constraint) @@ -272,50 +284,41 @@ def c2_rule(b, j): self.assertIn(id(m.c0), hash_scalar) ref_data = { - self._hashRef(Reference(m.c1[:])), - self._hashRef(Reference(m.b[:].c2['a'])), - self._hashRef(Reference(m.b[:].c2['b'])), - } + self._hashRef(Reference(m.c1[:])), + self._hashRef(Reference(m.b[:].c2['a'])), + self._hashRef(Reference(m.b[:].c2['b'])), + } self.assertEqual(len(dae), len(ref_data)) for ref in dae: self.assertIn(self._hashRef(ref), ref_data) - def test_constraint_skip(self): m = ConcreteModel() - m.time = ContinuousSet(bounds=(0,1)) + m.time = ContinuousSet(bounds=(0, 1)) m.v = Var(m.time) + def c_rule(m, t): if t == m.time.first(): return Constraint.Skip - return m.v[t] == 1. + return m.v[t] == 1.0 m.c = Constraint(m.time, rule=c_rule) scalar, dae = flatten_dae_components(m, m.time, Constraint) - ref_data = { - self._hashRef(Reference(m.c[:])), - } + ref_data = {self._hashRef(Reference(m.c[:]))} self.assertEqual(len(dae), len(ref_data)) for ref in dae: self.assertIn(self._hashRef(ref), ref_data) -class TestFlatten(TestCategorize): - - def _hashRef(self, ref): - if not ref.is_indexed(): - return (id(ref),) - else: - return tuple(sorted(id(_) for _ in ref.values())) - +class TestFlatten(_TestFlattenBase, unittest.TestCase): def _model1_1d_sets(self): # One-dimensional sets, no skipping. m = ConcreteModel() - m.time = Set(initialize=[1,2,3]) + m.time = Set(initialize=[1, 2, 3]) m.space = Set(initialize=[0.0, 0.5, 1.0]) - m.comp = Set(initialize=['a','b']) + m.comp = Set(initialize=['a', 'b']) m.v0 = Var() m.v1 = Var(m.time) @@ -327,7 +330,6 @@ def _model1_1d_sets(self): @m.Block() def b(b): - @b.Block(m.time) def b1(b1): b1.v0 = Var() @@ -350,7 +352,7 @@ def b2(b2): def test_flatten_m1_along_time_space(self): m = self._model1_1d_sets() - + sets = ComponentSet((m.time, m.space)) sets_list, comps_list = flatten_components_along_sets(m, sets, Var) assert len(sets_list) == len(comps_list) @@ -358,26 +360,22 @@ def test_flatten_m1_along_time_space(self): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is UnindexedComponent_set: - ref_data = { - self._hashRef(m.v0) - } + ref_data = {self._hashRef(m.v0)} assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 1 and sets[0] is m.time: ref_data = { - self._hashRef(Reference(m.v1)), - self._hashRef(Reference(m.b.b1[:].v0)), - } + self._hashRef(Reference(m.v1)), + self._hashRef(Reference(m.b.b1[:].v0)), + } assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.time and sets[1] is m.time: - ref_data = { - self._hashRef(Reference(m.v_tt)), - } + ref_data = {self._hashRef(Reference(m.v_tt))} assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -386,33 +384,45 @@ def test_flatten_m1_along_time_space(self): ref_data = { self._hashRef(m.v2), self._hashRef(Reference(m.b.b1[:].v1[:])), - self._hashRef(Reference(m.b.b2[:,:].v0)), + self._hashRef(Reference(m.b.b2[:, :].v0)), self._hashRef(Reference(m.b.b1[:].b_s[:].v0)), } - ref_data.update(self._hashRef(Reference(m.v3[:,:,j])) for j in m.comp) - ref_data.update(self._hashRef(Reference(m.b.b1[:].v2[:,j])) for j in m.comp) - ref_data.update(self._hashRef(Reference(m.b.b2[:,:].v1[j])) for j in m.comp) + ref_data.update(self._hashRef(Reference(m.v3[:, :, j])) for j in m.comp) + ref_data.update( + self._hashRef(Reference(m.b.b1[:].v2[:, j])) for j in m.comp + ) + ref_data.update( + self._hashRef(Reference(m.b.b2[:, :].v1[j])) for j in m.comp + ) assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) - elif (len(sets) == 3 and sets[0] is m.time and sets[1] is m.space - and sets[2] is m.time): - ref_data = { - self._hashRef(m.v_tst), - } - ref_data.update(self._hashRef(Reference(m.b.b2[:,:].v2[:,j])) for j in m.comp) + elif ( + len(sets) == 3 + and sets[0] is m.time + and sets[1] is m.space + and sets[2] is m.time + ): + ref_data = {self._hashRef(m.v_tst)} + ref_data.update( + self._hashRef(Reference(m.b.b2[:, :].v2[:, j])) for j in m.comp + ) assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) - elif (len(sets) == 3 and sets[0] is m.time and sets[1] is m.space - and sets[2] is m.space): - ref_data = { - self._hashRef(Reference(m.b.b1[:].b_s[:].v1[:])), - } - ref_data.update(self._hashRef(Reference(m.b.b1[:].b_s[:].v2[:,j])) for j in m.comp), + elif ( + len(sets) == 3 + and sets[0] is m.time + and sets[1] is m.space + and sets[2] is m.space + ): + ref_data = {self._hashRef(Reference(m.b.b1[:].b_s[:].v1[:]))} + ref_data.update( + self._hashRef(Reference(m.b.b1[:].b_s[:].v2[:, j])) for j in m.comp + ), assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -422,7 +432,7 @@ def test_flatten_m1_along_time_space(self): def test_flatten_m1_empty(self): m = self._model1_1d_sets() - + sets = ComponentSet() sets_list, comps_list = flatten_components_along_sets(m, sets, Var) assert len(sets_list) == len(comps_list) @@ -430,9 +440,7 @@ def test_flatten_m1_empty(self): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is UnindexedComponent_set: - ref_data = { - self._hashRef(v) for v in m.component_data_objects(Var) - } + ref_data = {self._hashRef(v) for v in m.component_data_objects(Var)} assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -441,49 +449,62 @@ def test_flatten_m1_empty(self): def test_flatten_m1_along_space(self): m = self._model1_1d_sets() - + sets = ComponentSet((m.space,)) sets_list, comps_list = flatten_components_along_sets(m, sets, Var) assert len(sets_list) == len(comps_list) assert len(sets_list) == 3 T = m.time - TC = m.time*m.comp - TT = m.time*m.time - TTC = m.time*m.time*m.comp + TC = m.time * m.comp + TT = m.time * m.time + TTC = m.time * m.time * m.comp # These products are nested, i.e. ((t,t),j). This is fine # as normalize_index.flatten is True. for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is UnindexedComponent_set: - ref_data = { - self._hashRef(m.v0), - } + ref_data = {self._hashRef(m.v0)} ref_data.update(self._hashRef(m.v1[t]) for t in T) - ref_data.update(self._hashRef(m.v_tt[t1,t2]) for t1, t2 in TT) + ref_data.update(self._hashRef(m.v_tt[t1, t2]) for t1, t2 in TT) ref_data.update(self._hashRef(m.b.b1[t].v0) for t in T) assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 1 and sets[0] is m.space: ref_data = set() - ref_data.update(self._hashRef(Reference(m.v2[t,:])) for t in T) - ref_data.update(self._hashRef(Reference(m.v3[t,:,j])) for t, j in TC) - ref_data.update(self._hashRef(Reference(m.v_tst[t1,:,t2])) for t1, t2 in TT) + ref_data.update(self._hashRef(Reference(m.v2[t, :])) for t in T) + ref_data.update(self._hashRef(Reference(m.v3[t, :, j])) for t, j in TC) + ref_data.update( + self._hashRef(Reference(m.v_tst[t1, :, t2])) for t1, t2 in TT + ) ref_data.update(self._hashRef(Reference(m.b.b1[t].v1[:])) for t in T) - ref_data.update(self._hashRef(Reference(m.b.b1[t].v2[:,j])) for t, j in TC) - ref_data.update(self._hashRef(Reference(m.b.b1[t].b_s[:].v0)) for t in T) - ref_data.update(self._hashRef(Reference(m.b.b2[t,:].v0)) for t in T) - ref_data.update(self._hashRef(Reference(m.b.b2[t,:].v1[j])) for t, j in TC) - ref_data.update(self._hashRef(Reference(m.b.b2[t1,:].v2[t2,j])) for t1, t2, j in TTC) + ref_data.update( + self._hashRef(Reference(m.b.b1[t].v2[:, j])) for t, j in TC + ) + ref_data.update( + self._hashRef(Reference(m.b.b1[t].b_s[:].v0)) for t in T + ) + ref_data.update(self._hashRef(Reference(m.b.b2[t, :].v0)) for t in T) + ref_data.update( + self._hashRef(Reference(m.b.b2[t, :].v1[j])) for t, j in TC + ) + ref_data.update( + self._hashRef(Reference(m.b.b2[t1, :].v2[t2, j])) + for t1, t2, j in TTC + ) assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.space and sets[1] is m.space: ref_data = set() - ref_data.update(self._hashRef(Reference(m.b.b1[t].b_s[:].v1[:])) for t in T) - ref_data.update(self._hashRef(Reference(m.b.b1[t].b_s[:].v2[:,j])) for t,j in TC) + ref_data.update( + self._hashRef(Reference(m.b.b1[t].b_s[:].v1[:])) for t in T + ) + ref_data.update( + self._hashRef(Reference(m.b.b1[t].b_s[:].v2[:, j])) for t, j in TC + ) assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -492,49 +513,58 @@ def test_flatten_m1_along_space(self): def test_flatten_m1_along_time(self): m = self._model1_1d_sets() - + sets = ComponentSet((m.time,)) sets_list, comps_list = flatten_components_along_sets(m, sets, Var) S = m.space - SS = m.space*m.space - SC = m.space*m.comp - SSC = m.space*m.space*m.comp + SS = m.space * m.space + SC = m.space * m.comp + SSC = m.space * m.space * m.comp assert len(sets_list) == 3 for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is UnindexedComponent_set: - ref_data = { - self._hashRef(Reference(m.v0)), - } + ref_data = {self._hashRef(Reference(m.v0))} assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 1 and sets[0] is m.time: ref_data = { - # Components indexed only by time; + # Components indexed only by time; self._hashRef(Reference(m.v1)), self._hashRef(Reference(m.b.b1[:].v0)), } # Components indexed by time and some other set(s) - ref_data.update(self._hashRef(Reference(m.v2[:,x])) for x in S) - ref_data.update(self._hashRef(Reference(m.v3[:,x,j])) for x, j in SC) + ref_data.update(self._hashRef(Reference(m.v2[:, x])) for x in S) + ref_data.update(self._hashRef(Reference(m.v3[:, x, j])) for x, j in SC) ref_data.update(self._hashRef(Reference(m.b.b1[:].v1[x])) for x in S) - ref_data.update(self._hashRef(Reference(m.b.b1[:].v2[x,j])) for x, j in SC) - ref_data.update(self._hashRef(Reference(m.b.b1[:].b_s[x].v0)) for x in S) - ref_data.update(self._hashRef(Reference(m.b.b1[:].b_s[x1].v1[x2])) for x1, x2 in SS) - ref_data.update(self._hashRef(Reference(m.b.b1[:].b_s[x1].v2[x2,j])) for x1,x2,j in SSC) - ref_data.update(self._hashRef(Reference(m.b.b2[:,x].v0)) for x in S) - ref_data.update(self._hashRef(Reference(m.b.b2[:,x].v1[j])) for x, j in SC) + ref_data.update( + self._hashRef(Reference(m.b.b1[:].v2[x, j])) for x, j in SC + ) + ref_data.update( + self._hashRef(Reference(m.b.b1[:].b_s[x].v0)) for x in S + ) + ref_data.update( + self._hashRef(Reference(m.b.b1[:].b_s[x1].v1[x2])) for x1, x2 in SS + ) + ref_data.update( + self._hashRef(Reference(m.b.b1[:].b_s[x1].v2[x2, j])) + for x1, x2, j in SSC + ) + ref_data.update(self._hashRef(Reference(m.b.b2[:, x].v0)) for x in S) + ref_data.update( + self._hashRef(Reference(m.b.b2[:, x].v1[j])) for x, j in SC + ) assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.time and sets[1] is m.time: - ref_data = { - self._hashRef(Reference(m.v_tt)), - } - ref_data.update(self._hashRef(Reference(m.v_tst[:,x,:])) for x in S) - ref_data.update(self._hashRef(Reference(m.b.b2[:,x].v2[:,j])) for x, j in SC) + ref_data = {self._hashRef(Reference(m.v_tt))} + ref_data.update(self._hashRef(Reference(m.v_tst[:, x, :])) for x in S) + ref_data.update( + self._hashRef(Reference(m.b.b2[:, x].v2[:, j])) for x, j in SC + ) assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -547,9 +577,9 @@ def _model2_nd_sets(self): normalize_index.flatten = False - m.d1 = Set(initialize=[1,2]) - m.d2 = Set(initialize=[('a',1), ('b',2)]) - m.dn = Set(initialize=[('c',3), ('d',4,5)], dimen=None) + m.d1 = Set(initialize=[1, 2]) + m.d2 = Set(initialize=[('a', 1), ('b', 2)]) + m.dn = Set(initialize=[('c', 3), ('d', 4, 5)], dimen=None) m.v_2n = Var(m.d2, m.dn) m.v_12 = Var(m.d1, m.d2) @@ -585,22 +615,22 @@ def test_flatten_m2_2d(self): normalize_index.flatten = False sets_list, comps_list = flatten_components_along_sets(m, sets, Var) - ref1 = Reference(m.v_2n[:,('c',3)]) + ref1 = Reference(m.v_2n[:, ('c', 3)]) - ref_set = ref1.index_set()._ref # _index is a _ReferenceSet + ref_set = ref1.index_set()._ref # _index is a _ReferenceSet - #next(ref_set._get_iter(ref_set._slice, ('a',1))) + # next(ref_set._get_iter(ref_set._slice, ('a',1))) # ^ This raises a somewhat cryptic error message. # _ReferenceSet.__contains__ seems to be the culprit, # which is called in validate_index for every __getitem__. - self.assertNotIn(('a',1), ref_set) + self.assertNotIn(('a', 1), ref_set) # ^ This does not seem to behave as expected... # Reason is incompatibility with flatten==False. self.assertEqual(len(sets_list), len(comps_list)) self.assertEqual(len(sets_list), 2) - #for sets, comps in zip(sets_list, comps_list): + # for sets, comps in zip(sets_list, comps_list): # if len(sets) == 1 and sets[0] is m.d2: # ref_data = { # self._hashRef(Reference(m.v_2n[:,('a',1)])), @@ -624,24 +654,38 @@ def test_flatten_m2_1d(self): assert len(sets_list) == len(comps_list) assert len(sets_list) == 3 - D22 = m.d2*m.d2 - D2N = m.d2*m.dn + D22 = m.d2 * m.d2 + D2N = m.d2 * m.dn DN2N = m.dn.cross(m.d2, m.dn) D2NN = m.d2.cross(m.dn, m.dn) D2N2 = m.d2.cross(m.dn, m.d2) for sets, comps in zip(sets_list, comps_list): - if len(sets) == 1 and sets[0] is m.d1: ref_data = set() # Don't expand indices: - ref_data.update(self._hashRef(Reference(m.v_12[:,i2])) for i2 in m.d2) - ref_data.update(self._hashRef(Reference(m.v_212[i2a,:,i2b])) for i2a, i2b in D22) - ref_data.update(self._hashRef(Reference(m.v_12n[:,i2,i_n])) for i2, i_n in D2N) - ref_data.update(self._hashRef(Reference(m.v_1n2n[:,i_na,i2,i_nb])) for i_na, i2, i_nb in DN2N) - ref_data.update(self._hashRef(Reference(m.b[:,i2,i_n].v0)) for i2, i_n in D2N) - ref_data.update(self._hashRef(Reference(m.b[:,i2a,i_n].v2[i2b])) for i2a, i_n, i2b in D2N2) - ref_data.update(self._hashRef(Reference(m.b[:,i2,i_na].vn[i_nb])) for i2, i_na, i_nb in D2NN) + ref_data.update(self._hashRef(Reference(m.v_12[:, i2])) for i2 in m.d2) + ref_data.update( + self._hashRef(Reference(m.v_212[i2a, :, i2b])) for i2a, i2b in D22 + ) + ref_data.update( + self._hashRef(Reference(m.v_12n[:, i2, i_n])) for i2, i_n in D2N + ) + ref_data.update( + self._hashRef(Reference(m.v_1n2n[:, i_na, i2, i_nb])) + for i_na, i2, i_nb in DN2N + ) + ref_data.update( + self._hashRef(Reference(m.b[:, i2, i_n].v0)) for i2, i_n in D2N + ) + ref_data.update( + self._hashRef(Reference(m.b[:, i2a, i_n].v2[i2b])) + for i2a, i_n, i2b in D2N2 + ) + ref_data.update( + self._hashRef(Reference(m.b[:, i2, i_na].vn[i_nb])) + for i2, i_na, i_nb in D2NN + ) # Expect length to be 38 assert len(ref_data) == len(comps) for comp in comps: @@ -654,7 +698,9 @@ def test_flatten_m2_1d(self): self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.d1 and sets[1] is m.d1: ref_data = set() - ref_data.update(self._hashRef(Reference(m.b[:,i2,i_n].v1[:])) for i2, i_n in D2N) + ref_data.update( + self._hashRef(Reference(m.b[:, i2, i_n].v1[:])) for i2, i_n in D2N + ) assert len(ref_data) == len(comps) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -667,9 +713,9 @@ def _model3_nd_sets_normalizeflatten(self): # The same as model 2, but now with `normalize_index.flatten == True` m = ConcreteModel() - m.d1 = Set(initialize=[1,2]) - m.d2 = Set(initialize=[('a',1), ('b',2)]) - m.dn = Set(initialize=[('c',3), ('d',4,5)], dimen=None) + m.d1 = Set(initialize=[1, 2]) + m.d2 = Set(initialize=[('a', 1), ('b', 2)]) + m.dn = Set(initialize=[('c', 3), ('d', 4, 5)], dimen=None) m.v_2n = Var(m.d2, m.dn) m.v_12 = Var(m.d1, m.d2) @@ -681,10 +727,10 @@ def _model3_nd_sets_normalizeflatten(self): for i1 in m.d1: for i2 in m.d2: for i_n in m.dn: - m.b[i1,i2,i_n].v0 = Var() - m.b[i1,i2,i_n].v1 = Var(m.d1) - m.b[i1,i2,i_n].v2 = Var(m.d2) - m.b[i1,i2,i_n].vn = Var(m.dn) + m.b[i1, i2, i_n].v0 = Var() + m.b[i1, i2, i_n].v1 = Var(m.d1) + m.b[i1, i2, i_n].v2 = Var(m.d2) + m.b[i1, i2, i_n].vn = Var(m.dn) return m @@ -698,25 +744,46 @@ def test_flatten_m3_1d(self): assert len(sets_list) == 3 for sets, comps in zip(sets_list, comps_list): - if len(sets) == 1 and sets[0] is m.d1: ref_data = set() # Must iterate and slice in a manner consistent with # `normalize_index.flatten == True` - ref_data.update(self._hashRef(Reference(m.v_12[:,i2])) - for i2 in m.d2) # 2 - ref_data.update(self._hashRef(Reference(m.v_212[i2a,:,i2b])) - for i2a in m.d2 for i2b in m.d2) # 4 - ref_data.update(self._hashRef(Reference(m.v_12n[:,i2,i_n])) - for i2 in m.d2 for i_n in m.dn) # 4 - ref_data.update(self._hashRef(Reference(m.v_1n2n[:,i_na,i2,i_nb])) - for i_na in m.dn for i2 in m.d2 for i_nb in m.dn) # 8 - ref_data.update(self._hashRef(Reference(m.b[:,i2,i_n].v0)) - for i2 in m.d2 for i_n in m.dn) # 4 - ref_data.update(self._hashRef(Reference(m.b[:,i2a,i_n].v2[i2b])) - for i2a in m.d2 for i_n in m.dn for i2b in m.d2) # 8 - ref_data.update(self._hashRef(Reference(m.b[:,i2,i_na].vn[i_nb])) - for i2 in m.d2 for i_na in m.dn for i_nb in m.dn) # 8 + ref_data.update( + self._hashRef(Reference(m.v_12[:, i2])) for i2 in m.d2 + ) # 2 + ref_data.update( + self._hashRef(Reference(m.v_212[i2a, :, i2b])) + for i2a in m.d2 + for i2b in m.d2 + ) # 4 + ref_data.update( + self._hashRef(Reference(m.v_12n[:, i2, i_n])) + for i2 in m.d2 + for i_n in m.dn + ) # 4 + ref_data.update( + self._hashRef(Reference(m.v_1n2n[:, i_na, i2, i_nb])) + for i_na in m.dn + for i2 in m.d2 + for i_nb in m.dn + ) # 8 + ref_data.update( + self._hashRef(Reference(m.b[:, i2, i_n].v0)) + for i2 in m.d2 + for i_n in m.dn + ) # 4 + ref_data.update( + self._hashRef(Reference(m.b[:, i2a, i_n].v2[i2b])) + for i2a in m.d2 + for i_n in m.dn + for i2b in m.d2 + ) # 8 + ref_data.update( + self._hashRef(Reference(m.b[:, i2, i_na].vn[i_nb])) + for i2 in m.d2 + for i_na in m.dn + for i_nb in m.dn + ) # 8 assert len(ref_data) == len(comps) assert len(ref_data) == 38 for comp in comps: @@ -729,8 +796,11 @@ def test_flatten_m3_1d(self): self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.d1 and sets[1] is m.d1: ref_data = set() - ref_data.update(self._hashRef(Reference(m.b[:,i2,i_n].v1[:])) - for i2 in m.d2 for i_n in m.dn) + ref_data.update( + self._hashRef(Reference(m.b[:, i2, i_n].v1[:])) + for i2 in m.d2 + for i_n in m.dn + ) assert len(ref_data) == len(comps) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -745,33 +815,56 @@ def test_flatten_m3_2d(self): assert len(sets_list) == 2 for sets, comps in zip(sets_list, comps_list): - if len(sets) == 1 and sets[0] is m.d2: ref_data = set() - ref_data.update(self._hashRef(Reference(m.v_2n[:,:,i_n])) - for i_n in m.dn) # 2 - ref_data.update(self._hashRef(Reference(m.v_12[i1,:,:])) - for i1 in m.d1) # 2 - ref_data.update(self._hashRef(Reference(m.v_12n[i1,:,:,i_n])) - for i1 in m.d1 for i_n in m.dn) # 4 - ref_data.update(self._hashRef(Reference(m.v_1n2n[i1,i_na,:,:,i_nb])) - for i1 in m.d1 for i_na in m.dn for i_nb in m.dn) # 8 - ref_data.update(self._hashRef(Reference(m.b[i1,:,:,i_n].v0)) - for i1 in m.d1 for i_n in m.dn) # 4 - ref_data.update(self._hashRef(Reference(m.b[i1a,:,:,i_n].v1[i1b])) - for i1a in m.d1 for i_n in m.dn for i1b in m.d1) # 8 - ref_data.update(self._hashRef(Reference(m.b[i1,:,:,i_na].vn[i_nb])) - for i1 in m.d1 for i_na in m.dn for i_nb in m.dn) # 8 + ref_data.update( + self._hashRef(Reference(m.v_2n[:, :, i_n])) for i_n in m.dn + ) # 2 + ref_data.update( + self._hashRef(Reference(m.v_12[i1, :, :])) for i1 in m.d1 + ) # 2 + ref_data.update( + self._hashRef(Reference(m.v_12n[i1, :, :, i_n])) + for i1 in m.d1 + for i_n in m.dn + ) # 4 + ref_data.update( + self._hashRef(Reference(m.v_1n2n[i1, i_na, :, :, i_nb])) + for i1 in m.d1 + for i_na in m.dn + for i_nb in m.dn + ) # 8 + ref_data.update( + self._hashRef(Reference(m.b[i1, :, :, i_n].v0)) + for i1 in m.d1 + for i_n in m.dn + ) # 4 + ref_data.update( + self._hashRef(Reference(m.b[i1a, :, :, i_n].v1[i1b])) + for i1a in m.d1 + for i_n in m.dn + for i1b in m.d1 + ) # 8 + ref_data.update( + self._hashRef(Reference(m.b[i1, :, :, i_na].vn[i_nb])) + for i1 in m.d1 + for i_na in m.dn + for i_nb in m.dn + ) # 8 assert len(ref_data) == len(comps) assert len(ref_data) == 36 for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.d2 and sets[1] is m.d2: ref_data = set() - ref_data.update(self._hashRef(Reference(m.v_212[:,:,i1,:,:])) - for i1 in m.d1) - ref_data.update(self._hashRef(Reference(m.b[i1,:,:,i_n].v2[:,:])) - for i1 in m.d1 for i_n in m.dn) + ref_data.update( + self._hashRef(Reference(m.v_212[:, :, i1, :, :])) for i1 in m.d1 + ) + ref_data.update( + self._hashRef(Reference(m.b[i1, :, :, i_n].v2[:, :])) + for i1 in m.d1 + for i_n in m.dn + ) assert len(ref_data) == len(comps) assert len(ref_data) == 6 for comp in comps: @@ -792,7 +885,6 @@ def test_flatten_m3_nd(self): assert len(sets_list) == 3 for sets, comps in zip(sets_list, comps_list): - if len(sets) == 1 and sets[0] is UnindexedComponent_set: ref_data = set() ref_data.update(self._hashRef(v) for v in m.v_12.values()) @@ -804,16 +896,31 @@ def test_flatten_m3_nd(self): elif len(sets) == 1 and sets[0] is m.dn: ref_data = set() - ref_data.update(self._hashRef(Reference(m.v_2n[i2,...])) - for i2 in m.d2) # 2 - ref_data.update(self._hashRef(Reference(m.v_12n[i1,i2,...])) - for i1 in m.d1 for i2 in m.d2) # 4 - ref_data.update(self._hashRef(Reference(m.b[i1,i2,...].v0)) - for i1 in m.d1 for i2 in m.d2) # 4 - ref_data.update(self._hashRef(Reference(m.b[i1a,i2,...].v1[i1b])) - for i1a in m.d1 for i2 in m.d2 for i1b in m.d1) # 8 - ref_data.update(self._hashRef(Reference(m.b[i1,i2a,...].v2[i2b])) - for i1 in m.d1 for i2a in m.d2 for i2b in m.d2) # 8 + ref_data.update( + self._hashRef(Reference(m.v_2n[i2, ...])) for i2 in m.d2 + ) # 2 + ref_data.update( + self._hashRef(Reference(m.v_12n[i1, i2, ...])) + for i1 in m.d1 + for i2 in m.d2 + ) # 4 + ref_data.update( + self._hashRef(Reference(m.b[i1, i2, ...].v0)) + for i1 in m.d1 + for i2 in m.d2 + ) # 4 + ref_data.update( + self._hashRef(Reference(m.b[i1a, i2, ...].v1[i1b])) + for i1a in m.d1 + for i2 in m.d2 + for i1b in m.d1 + ) # 8 + ref_data.update( + self._hashRef(Reference(m.b[i1, i2a, ...].v2[i2b])) + for i1 in m.d1 + for i2a in m.d2 + for i2b in m.d2 + ) # 8 assert len(comps) == len(ref_data) assert len(comps) == 26 for comp in comps: @@ -821,8 +928,11 @@ def test_flatten_m3_nd(self): elif len(sets) == 2 and sets[0] is m.dn and sets[1] is m.dn: ref_data = set() - ref_data.update(self._hashRef(Reference(m.b[i1,i2,...].vn[...])) - for i1 in m.d1 for i2 in m.d2) # 4 + ref_data.update( + self._hashRef(Reference(m.b[i1, i2, ...].vn[...])) + for i1 in m.d1 + for i2 in m.d2 + ) # 4 assert len(comps) == len(ref_data) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -832,7 +942,7 @@ def test_flatten_m3_nd(self): def test_flatten_m3_1_2(self): m = self._model3_nd_sets_normalizeflatten() - sets = ComponentSet((m.d1,m.d2)) + sets = ComponentSet((m.d1, m.d2)) sets_list, comps_list = flatten_components_along_sets(m, sets, Var) assert len(sets_list) == len(comps_list) @@ -841,52 +951,71 @@ def test_flatten_m3_1_2(self): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is m.d2: ref_data = set() - ref_data.update(self._hashRef(Reference(m.v_2n[:,:,i_n])) - for i_n in m.dn) + ref_data.update( + self._hashRef(Reference(m.v_2n[:, :, i_n])) for i_n in m.dn + ) self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.d1 and sets[1] is m.d2: - ref_data = { - self._hashRef(Reference(m.v_12[...])), # 1 - } - ref_data.update(self._hashRef(Reference(m.v_12n[:,:,:,i_n])) - for i_n in m.dn) # 2 - ref_data.update(self._hashRef(Reference(m.v_1n2n[:,i_na,:,:,i_nb])) - for i_na in m.dn for i_nb in m.dn) # 4 - ref_data.update(self._hashRef(Reference(m.b[:,:,:,i_n].v0)) - for i_n in m.dn) # 2 - ref_data.update(self._hashRef(Reference(m.b[:,:,:,i_na].vn[i_nb])) - for i_na in m.dn for i_nb in m.dn) # 4 + ref_data = {self._hashRef(Reference(m.v_12[...]))} # 1 + ref_data.update( + self._hashRef(Reference(m.v_12n[:, :, :, i_n])) for i_n in m.dn + ) # 2 + ref_data.update( + self._hashRef(Reference(m.v_1n2n[:, i_na, :, :, i_nb])) + for i_na in m.dn + for i_nb in m.dn + ) # 4 + ref_data.update( + self._hashRef(Reference(m.b[:, :, :, i_n].v0)) for i_n in m.dn + ) # 2 + ref_data.update( + self._hashRef(Reference(m.b[:, :, :, i_na].vn[i_nb])) + for i_na in m.dn + for i_nb in m.dn + ) # 4 self.assertEqual(len(ref_data), len(comps)) self.assertEqual(len(comps), 13) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) - elif (len(sets) == 3 and sets[0] is m.d1 and sets[1] is m.d2 - and sets[2] is m.d1): + elif ( + len(sets) == 3 + and sets[0] is m.d1 + and sets[1] is m.d2 + and sets[2] is m.d1 + ): ref_data = set() - ref_data.update(self._hashRef(Reference(m.b[:,:,:,i_n].v1[:])) - for i_n in m.dn) # 2 + ref_data.update( + self._hashRef(Reference(m.b[:, :, :, i_n].v1[:])) for i_n in m.dn + ) # 2 self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) - elif (len(sets) == 3 and sets[0] is m.d1 and sets[1] is m.d2 - and sets[2] is m.d2): + elif ( + len(sets) == 3 + and sets[0] is m.d1 + and sets[1] is m.d2 + and sets[2] is m.d2 + ): ref_data = set() - ref_data.update(self._hashRef(Reference(m.b[:,:,:,i_n].v2[:,:])) - for i_n in m.dn) # 2 + ref_data.update( + self._hashRef(Reference(m.b[:, :, :, i_n].v2[:, :])) for i_n in m.dn + ) # 2 self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) - elif (len(sets) == 3 and sets[0] is m.d2 and sets[1] is m.d1 - and sets[2] is m.d2): - ref_data = { - self._hashRef(Reference(m.v_212[...])), # 1 - } + elif ( + len(sets) == 3 + and sets[0] is m.d2 + and sets[1] is m.d1 + and sets[2] is m.d2 + ): + ref_data = {self._hashRef(Reference(m.v_212[...]))} # 1 self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -900,17 +1029,17 @@ def test_specified_index_1(self): """ m = ConcreteModel() - m.time = Set(initialize=[1,2,3]) - m.space = Set(initialize=[2,4,6]) + m.time = Set(initialize=[1, 2, 3]) + m.space = Set(initialize=[2, 4, 6]) m.phase = Set(initialize=['p1', 'p2']) - m.comp = Set(initialize=['a','b']) + m.comp = Set(initialize=['a', 'b']) phase_comp = m.comp * m.phase - n_phase_comp = len(m.phase)*len(m.comp) + n_phase_comp = len(m.phase) * len(m.comp) m.v = Var(m.time, m.comp, m.space, m.phase) - @m.Block(m.time, m.comp, m.space, m.phase) + @m.Block(m.time, m.comp, m.space, m.phase) def b(b, t, j, x, p): b.v1 = Var() @@ -930,12 +1059,14 @@ def b(b, t, j, x, p): if len(sets) == 2 and sets[0] is m.time and sets[1] is m.space: # We missed b.v2 by descending into the "first" index # of the block - self.assertEqual(len(comps), 2*n_phase_comp) + self.assertEqual(len(comps), 2 * n_phase_comp) ref_data = set() - ref_data.update(self._hashRef(Reference(m.v[:,j,:,p])) - for j, p in phase_comp) - ref_data.update(self._hashRef(Reference(m.b[:,j,:,p].v1)) - for j, p in phase_comp) + ref_data.update( + self._hashRef(Reference(m.v[:, j, :, p])) for j, p in phase_comp + ) + ref_data.update( + self._hashRef(Reference(m.b[:, j, :, p].v1)) for j, p in phase_comp + ) self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -944,8 +1075,9 @@ def b(b, t, j, x, p): # Space index specified: indices = ComponentMap([(m.space, 4)]) - sets_list, comps_list = flatten_components_along_sets(m, sets, Var, - indices=indices) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, indices=indices + ) self.assertEqual(len(comps_list), len(sets_list)) self.assertEqual(len(sets_list), 1) @@ -953,20 +1085,22 @@ def b(b, t, j, x, p): for sets, comps in zip(sets_list, comps_list): if len(sets) == 2 and sets[0] is m.time and sets[1] is m.space: # We descended into a block data that includes v2 - self.assertEqual(len(comps), 3*n_phase_comp) + self.assertEqual(len(comps), 3 * n_phase_comp) # Slices where we expect an attribute error somewhere, # due to v2 being "skipped" - incomplete_slices = list(m.b[:,j,:,p].v2 for j, p in phase_comp) + incomplete_slices = list(m.b[:, j, :, p].v2 for j, p in phase_comp) for ref in incomplete_slices: ref.attribute_errors_generate_exceptions = False incomplete_refs = list(Reference(sl) for sl in incomplete_slices) ref_data = set() - ref_data.update(self._hashRef(Reference(m.v[:,j,:,p])) - for j, p in phase_comp) - ref_data.update(self._hashRef(Reference(m.b[:,j,:,p].v1)) - for j, p in phase_comp) + ref_data.update( + self._hashRef(Reference(m.v[:, j, :, p])) for j, p in phase_comp + ) + ref_data.update( + self._hashRef(Reference(m.b[:, j, :, p].v1)) for j, p in phase_comp + ) ref_data.update(self._hashRef(ref) for ref in incomplete_refs) self.assertEqual(len(ref_data), len(comps)) for comp in comps: @@ -976,8 +1110,9 @@ def b(b, t, j, x, p): # Time and space indices specified indices = (3, 6) - sets_list, comps_list = flatten_components_along_sets(m, sets, Var, - indices=indices) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, indices=indices + ) self.assertEqual(len(comps_list), len(sets_list)) self.assertEqual(len(sets_list), 1) @@ -985,20 +1120,22 @@ def b(b, t, j, x, p): for sets, comps in zip(sets_list, comps_list): if len(sets) == 2 and sets[0] is m.time and sets[1] is m.space: # We descended into a block data that includes v2 - self.assertEqual(len(comps), 3*n_phase_comp) + self.assertEqual(len(comps), 3 * n_phase_comp) # Slices where we expect an attribute error somewhere, # due to v2 being "skipped" - incomplete_slices = list(m.b[:,j,:,p].v2 for j, p in phase_comp) + incomplete_slices = list(m.b[:, j, :, p].v2 for j, p in phase_comp) for ref in incomplete_slices: ref.attribute_errors_generate_exceptions = False incomplete_refs = list(Reference(sl) for sl in incomplete_slices) ref_data = set() - ref_data.update(self._hashRef(Reference(m.v[:,j,:,p])) - for j, p in phase_comp) - ref_data.update(self._hashRef(Reference(m.b[:,j,:,p].v1)) - for j, p in phase_comp) + ref_data.update( + self._hashRef(Reference(m.v[:, j, :, p])) for j, p in phase_comp + ) + ref_data.update( + self._hashRef(Reference(m.b[:, j, :, p].v1)) for j, p in phase_comp + ) ref_data.update(self._hashRef(ref) for ref in incomplete_refs) self.assertEqual(len(ref_data), len(comps)) for comp in comps: @@ -1012,8 +1149,8 @@ def test_specified_index_2(self): """ m = ConcreteModel() - m.time = Set(initialize=[1,2,3]) - m.space = Set(initialize=[2,4,6]) + m.time = Set(initialize=[1, 2, 3]) + m.space = Set(initialize=[2, 4, 6]) m.v = Var(m.time, m.space) @@ -1039,9 +1176,9 @@ def b(b, t, x): # of the block self.assertEqual(len(comps), 2) ref_data = { - self._hashRef(Reference(m.v[...])), - self._hashRef(Reference(m.b[...].v1)), - } + self._hashRef(Reference(m.v[...])), + self._hashRef(Reference(m.b[...].v1)), + } self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -1050,8 +1187,9 @@ def b(b, t, x): # Space index specified: indices = ComponentMap([(m.space, 4)]) - sets_list, comps_list = flatten_components_along_sets(m, sets, Var, - indices=indices) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, indices=indices + ) self.assertEqual(len(comps_list), len(sets_list)) self.assertEqual(len(sets_list), 1) @@ -1063,15 +1201,15 @@ def b(b, t, x): # Slices where we expect an attribute error somewhere, # due to v2 being "skipped" - incomplete_slice = m.b[:,:].v2 + incomplete_slice = m.b[:, :].v2 incomplete_slice.attribute_errors_generate_exceptions = False incomplete_ref = Reference(incomplete_slice) ref_data = { - self._hashRef(Reference(m.v[:,:])), - self._hashRef(Reference(m.b[:,:].v1)), - self._hashRef(incomplete_ref), - } + self._hashRef(Reference(m.v[:, :])), + self._hashRef(Reference(m.b[:, :].v1)), + self._hashRef(incomplete_ref), + } self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -1080,8 +1218,9 @@ def b(b, t, x): # Time and space indices specified indices = (3, 6) - sets_list, comps_list = flatten_components_along_sets(m, sets, Var, - indices=indices) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, indices=indices + ) self.assertEqual(len(comps_list), len(sets_list)) self.assertEqual(len(sets_list), 1) @@ -1093,15 +1232,15 @@ def b(b, t, x): # Slices where we expect an attribute error somewhere, # due to v2 being "skipped" - incomplete_slice = m.b[:,:].v2 + incomplete_slice = m.b[:, :].v2 incomplete_slice.attribute_errors_generate_exceptions = False incomplete_ref = Reference(incomplete_slice) ref_data = { - self._hashRef(Reference(m.v[:,:])), - self._hashRef(Reference(m.b[:,:].v1)), - self._hashRef(incomplete_ref), - } + self._hashRef(Reference(m.v[:, :])), + self._hashRef(Reference(m.b[:, :].v1)), + self._hashRef(incomplete_ref), + } self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) @@ -1112,9 +1251,9 @@ def _model4_three_1d_sets(self): # A simple model with three sets to slice m = ConcreteModel() - m.X = Set(initialize=[1,2,3]) - m.Y = Set(initialize=[1,2,3]) - m.Z = Set(initialize=[1,2,3]) + m.X = Set(initialize=[1, 2, 3]) + m.Y = Set(initialize=[1, 2, 3]) + m.Z = Set(initialize=[1, 2, 3]) m.comp = Set(initialize=['a', 'b']) @@ -1145,36 +1284,362 @@ def test_model4_xyz(self): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is UnindexedComponent_set: - ref_data = { - self._hashRef(Reference(m.u)), - } + ref_data = {self._hashRef(Reference(m.u))} self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) elif len(sets) == 2 and sets[0] is m.X and sets[1] is m.Y: ref_data = { - self._hashRef(Reference(m.base[:,:])), - self._hashRef(Reference(m.b2[:,:].base)), - } + self._hashRef(Reference(m.base[:, :])), + self._hashRef(Reference(m.b2[:, :].base)), + } self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) - elif (len(sets) == 3 and sets[0] is m.X and sets[1] is m.Y - and sets[2] is m.Z): + elif ( + len(sets) == 3 and sets[0] is m.X and sets[1] is m.Y and sets[2] is m.Z + ): ref_data = { - self._hashRef(Reference(m.v[:,:,:,'a'])), - self._hashRef(Reference(m.v[:,:,:,'b'])), - self._hashRef(Reference(m.b4[:,:,:,'a'].v)), - self._hashRef(Reference(m.b4[:,:,:,'b'].v)), - self._hashRef(Reference(m.b2[:,:].v[:,'a'])), - self._hashRef(Reference(m.b2[:,:].v[:,'b'])), - } + self._hashRef(Reference(m.v[:, :, :, 'a'])), + self._hashRef(Reference(m.v[:, :, :, 'b'])), + self._hashRef(Reference(m.b4[:, :, :, 'a'].v)), + self._hashRef(Reference(m.b4[:, :, :, 'b'].v)), + self._hashRef(Reference(m.b2[:, :].v[:, 'a'])), + self._hashRef(Reference(m.b2[:, :].v[:, 'b'])), + } self.assertEqual(len(ref_data), len(comps)) for comp in comps: self.assertIn(self._hashRef(comp), ref_data) else: raise RuntimeError() + def test_deactivated_block_active_true(self): + m = self._model1_1d_sets() + + # Deactivating b1 should get rid of both variables directly on it + # as well as those on the subblock b_s + m.b.b1.deactivate() + sets = (m.time,) + + # + # Test identifying active components + # + sets_list, comps_list = flatten_components_along_sets(m, sets, Var, active=True) + + expected_unindexed = [ComponentUID(m.v0)] + expected_unindexed = set(expected_unindexed) + expected_time = [ComponentUID(m.v1[:])] + expected_time.extend(ComponentUID(m.v2[:, x]) for x in m.space) + expected_time.extend( + ComponentUID(m.v3[:, x, j]) for x in m.space for j in m.comp + ) + expected_time.extend(ComponentUID(m.b.b2[:, x].v0) for x in m.space) + expected_time.extend( + ComponentUID(m.b.b2[:, x].v1[j]) for x in m.space for j in m.comp + ) + expected_time = set(expected_time) + + expected_2time = [ComponentUID(m.v_tt[:, :])] + expected_2time.extend(ComponentUID(m.v_tst[:, x, :]) for x in m.space) + expected_2time.extend( + ComponentUID(m.b.b2[:, x].v2[:, j]) for x in m.space for j in m.comp + ) + expected_2time = set(expected_2time) + + set_id_set = set(tuple(id(s) for s in sets) for sets in sets_list) + pred_sets = [(UnindexedComponent_set,), (m.time,), (m.time, m.time)] + pred_set_ids = set(tuple(id(s) for s in sets) for sets in pred_sets) + self.assertEqual(set_id_set, pred_set_ids) + for sets, comps in zip(sets_list, comps_list): + if len(sets) == 1 and sets[0] is UnindexedComponent_set: + comp_set = set(ComponentUID(comp) for comp in comps) + self.assertEqual(comp_set, expected_unindexed) + elif len(sets) == 1 and sets[0] is m.time: + comp_set = set(ComponentUID(comp.referent) for comp in comps) + self.assertEqual(comp_set, expected_time) + elif len(sets) == 2: + self.assertIs(sets[0], m.time) + self.assertIs(sets[1], m.time) + comp_set = set(ComponentUID(comp.referent) for comp in comps) + self.assertEqual(comp_set, expected_2time) + + def test_deactivated_block_active_false(self): + m = self._model1_1d_sets() + m.deactivate() + m.b.deactivate() + m.b.b1.deactivate() + m.b.b1[:].b_s.deactivate() + # Remove components to make this easier to test + m.del_component(m.v0) + m.del_component(m.v1) + m.del_component(m.v3) + m.del_component(m.v_tt) + m.del_component(m.v_tst) + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, active=False + ) + + expected_time = [ComponentUID(m.b.b1[:].v0)] + expected_time.extend(ComponentUID(m.v2[:, x]) for x in m.space) + expected_time.extend(ComponentUID(m.b.b1[:].v1[x]) for x in m.space) + expected_time.extend( + ComponentUID(m.b.b1[:].v2[x, j]) for x in m.space for j in m.comp + ) + expected_time.extend(ComponentUID(m.b.b1[:].b_s[x].v0) for x in m.space) + expected_time.extend( + ComponentUID(m.b.b1[:].b_s[x1].v1[x2]) for x1 in m.space for x2 in m.space + ) + expected_time.extend( + ComponentUID(m.b.b1[:].b_s[x1].v2[x2, j]) + for x1 in m.space + for x2 in m.space + for j in m.comp + ) + expected_time = set(expected_time) + + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], m.time) + + self.assertEqual(len(comps_list), 1) + comp_set = set(ComponentUID(comp.referent) for comp in comps_list[0]) + self.assertEqual(comp_set, expected_time) + + def test_partially_deactivated_slice_active_true(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2, 3]) + m.b = Block(m.time) + for t in m.time: + m.b[t].v = Var() + m.b[0].deactivate() + m.b[1].deactivate() + # m.b[:] is now a "partially deactivated slice" + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets(m, sets, Var, active=True) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], m.time) + self.assertIs(sets_list[0][0], m.time) + self.assertEqual(len(comps_list), 1) + self.assertEqual(len(comps_list[0]), 1) + self.assertEqual( + ComponentUID(comps_list[0][0].referent), ComponentUID(m.b[:].v) + ) + + def test_partially_activated_slice_active_false(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2, 3]) + m.b = Block(m.time) + m.deactivate() + m.b.deactivate() + for t in m.time: + m.b[t].v = Var() + m.b[0].deactivate() + m.b[1].deactivate() + # Note that m.b[2] and m.b[3] are active. + # m.b[:] is now a "partially activated slice" + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, active=False + ) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], m.time) + self.assertIs(sets_list[0][0], m.time) + self.assertEqual(len(comps_list), 1) + self.assertEqual(len(comps_list[0]), 1) + self.assertEqual( + ComponentUID(comps_list[0][0].referent), ComponentUID(m.b[:].v) + ) + + def test_partially_deactivated_slice_specified_index(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2, 3]) + m.b = Block(m.time) + for t in m.time: + m.b[t].v = Var() + m.b[0].deactivate() + m.b[1].deactivate() + # m.b[:] is now a "partially deactivated slice" + sets = (m.time,) + + # When we specify the index of a deactivated block, we + # respect the active argument when descending. + indices = (1,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, active=True, indices=indices + ) + self.assertEqual(len(sets_list), 0) + self.assertEqual(len(comps_list), 0) + + indices = (2,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Var, active=True, indices=indices + ) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], m.time) + self.assertIs(sets_list[0][0], m.time) + self.assertEqual(len(comps_list), 1) + self.assertEqual(len(comps_list[0]), 1) + self.assertEqual( + ComponentUID(comps_list[0][0].referent), ComponentUID(m.b[:].v) + ) + + def test_fully_deactivated_slice(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2, 3]) + m.b = Block(m.time) + for t in m.time: + m.b[t].v = Var() + m.b[:].deactivate() + sets = (m.time,) + + # We send active=True, but cannot find an active BlockData + # to descend into. + sets_list, comps_list = flatten_components_along_sets(m, sets, Var, active=True) + self.assertEqual(len(sets_list), 0) + self.assertEqual(len(comps_list), 0) + + def test_deactivated_model_active_false(self): + m = self._model1_1d_sets() + m.deactivate() + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets(m, sets, Var, active=True) + self.assertEqual(len(sets_list), 0) + self.assertEqual(len(comps_list), 0) + + def test_constraint_with_active_arg(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2, 3]) + m.b = Block(m.time) + for t in m.time: + m.b[t].v = Var() + m.b[t].c1 = Constraint(expr=m.b[t].v == 1) + + def c2_rule(m, t): + return m.b[t].v == 2 + + m.c2 = Constraint(m.time, rule=c2_rule) + m.c2.deactivate() + + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Constraint, active=True + ) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], m.time) + self.assertIs(sets_list[0][0], m.time) + self.assertEqual(len(comps_list), 1) + self.assertEqual(len(comps_list[0]), 1) + self.assertEqual( + ComponentUID(comps_list[0][0].referent), ComponentUID(m.b[:].c1) + ) + + m.deactivate() + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Constraint, active=False + ) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], m.time) + self.assertIs(sets_list[0][0], m.time) + self.assertEqual(len(comps_list), 1) + self.assertEqual(len(comps_list[0]), 1) + self.assertEqual(ComponentUID(comps_list[0][0].referent), ComponentUID(m.c2[:])) + + def test_constraint_partially_deactivated_slice(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2, 3]) + m.b = Block(m.time) + for t in m.time: + m.b[t].v = Var() + + def c2_rule(m, t): + return m.b[t].v == 2 + + m.c2 = Constraint(m.time, rule=c2_rule) + m.c2[0].deactivate() + m.c2[1].deactivate() + + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Constraint, active=True + ) + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], m.time) + self.assertIs(sets_list[0][0], m.time) + self.assertEqual(len(comps_list), 1) + self.assertEqual(len(comps_list[0]), 1) + self.assertEqual(ComponentUID(comps_list[0][0].referent), ComponentUID(m.c2[:])) + + def test_constraint_fully_deactivated_slice(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2, 3]) + m.b = Block(m.time) + for t in m.time: + m.b[t].v = Var() + + def c2_rule(m, t): + return m.b[t].v == 2 + + m.c2 = Constraint(m.time, rule=c2_rule) + m.c2[:].deactivate() + + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Constraint, active=True + ) + # Because all data objects in c2[:] are deactivated, we don't + # yield the slice. + self.assertEqual(len(sets_list), 0) + self.assertEqual(len(comps_list), 0) + + def test_scalar_con_active_true(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2]) + m.v = Var() + m.c = Constraint(expr=m.v == 1) + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Constraint, active=True + ) + + self.assertEqual(len(sets_list), 1) + self.assertEqual(len(sets_list[0]), 1) + self.assertIs(sets_list[0][0], UnindexedComponent_set) + self.assertEqual(len(comps_list), 1) + self.assertEqual(len(comps_list[0]), 1) + self.assertIs(comps_list[0][0], m.c) + + def test_deactivated_scalar_con_active_true(self): + m = ConcreteModel() + m.time = Set(initialize=[0, 1, 2]) + m.comp = Set(initialize=["A", "B"]) + m.v = Var() + + def c_rule(m, j): + return m.v == 1 + + m.c = Constraint(m.comp, rule=c_rule) + m.c[:].deactivate() + # Because only the data objects are deactivated, we will generate + # this component in the component_objects loop in the flattener. + # But because its data objects do not match the active argument, + # we hit the clause that checks for slice activity. This checks + # the part of the clause that makes sure we have sliced a set. + sets = (m.time,) + sets_list, comps_list = flatten_components_along_sets( + m, sets, Constraint, active=True + ) + + self.assertEqual(len(sets_list), 0) + self.assertEqual(len(comps_list), 0) + class TestCUID(unittest.TestCase): """ @@ -1221,7 +1686,7 @@ def test_cuids_no_sets_no_subblocks(self): sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is UnindexedComponent_set: - self.assertEqual(len(comps), len(m.s1)*len(m.s2)) + self.assertEqual(len(comps), len(m.s1) * len(m.s2)) cuid_set = set(str(ComponentUID(comp)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: @@ -1235,11 +1700,7 @@ def test_cuids_some_sets_no_subblocks(self): m.s4 = Set(initialize=["c", "d"]) m.v1 = Var(m.s1, m.s4) - pred_cuid_set = { - "v1[1,*]", - "v1[2,*]", - "v1[3,*]", - } + pred_cuid_set = {"v1[1,*]", "v1[2,*]", "v1[3,*]"} sets = (m.s3, m.s4) ctype = Var @@ -1247,8 +1708,7 @@ def test_cuids_some_sets_no_subblocks(self): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is m.s4: self.assertEqual(len(comps), len(m.s1)) - cuid_set = set(str(ComponentUID(comp.referent)) - for comp in comps) + cuid_set = set(str(ComponentUID(comp.referent)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: raise RuntimeError() @@ -1261,9 +1721,7 @@ def test_cuids_all_sets_no_subblocks(self): m.s4 = Set(initialize=["c", "d"]) m.v1 = Var(m.s3, m.s4) - pred_cuid_set = { - "v1[*,*]", - } + pred_cuid_set = {"v1[*,*]"} sets = (m.s3, m.s4) ctype = Var @@ -1271,8 +1729,7 @@ def test_cuids_all_sets_no_subblocks(self): for sets, comps in zip(sets_list, comps_list): if len(sets) == 2 and sets[0] is m.s3 and sets[1] is m.s4: self.assertEqual(len(comps), 1) - cuid_set = set(str(ComponentUID(comp.referent)) - for comp in comps) + cuid_set = set(str(ComponentUID(comp.referent)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: raise RuntimeError() @@ -1282,9 +1739,7 @@ def test_cuid_one_set_no_subblocks(self): m.s1 = Set(initialize=[1, 2, 3]) m.v = Var(m.s1) - pred_cuid_set = { - "v[*]", - } + pred_cuid_set = {"v[*]"} sets = (m.s1,) ctype = Var @@ -1294,8 +1749,7 @@ def test_cuid_one_set_no_subblocks(self): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is m.s1: self.assertEqual(len(comps), 1) - cuid_set = set(str(ComponentUID(comp.referent)) - for comp in comps) + cuid_set = set(str(ComponentUID(comp.referent)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: raise RuntimeError() @@ -1306,8 +1760,10 @@ def test_cuids_no_sets_with_subblocks(self): m.s2 = Set(initialize=["a", "b"]) m.s3 = Set(initialize=[4, 5, 6]) m.s4 = Set(initialize=["c", "d"]) + def block_rule(b, i, j): b.v = Var() + m.b = Block(m.s1, m.s2, rule=block_rule) pred_cuid_set = { @@ -1326,7 +1782,7 @@ def block_rule(b, i, j): self.assertEqual(len(comps_list), 1) for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is UnindexedComponent_set: - self.assertEqual(len(comps), len(m.s1)*len(m.s2)) + self.assertEqual(len(comps), len(m.s1) * len(m.s2)) cuid_set = set(str(ComponentUID(comp)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: @@ -1338,14 +1794,13 @@ def test_cuids_some_sets_with_subblocks(self): m.s2 = Set(initialize=["a", "b"]) m.s3 = Set(initialize=[4, 5, 6]) m.s4 = Set(initialize=["c", "d"]) + def block_rule(b, i, j): b.v = Var() + m.b = Block(m.s1, m.s2, rule=block_rule) - pred_cuid_set = { - "b[*,a].v", - "b[*,b].v", - } + pred_cuid_set = {"b[*,a].v", "b[*,b].v"} sets = (m.s1, m.s4) ctype = Var @@ -1355,8 +1810,7 @@ def block_rule(b, i, j): for sets, comps in zip(sets_list, comps_list): if len(sets) == 1 and sets[0] is m.s1: self.assertEqual(len(comps), len(m.s2)) - cuid_set = set(str(ComponentUID(comp.referent)) - for comp in comps) + cuid_set = set(str(ComponentUID(comp.referent)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: raise RuntimeError() @@ -1367,13 +1821,13 @@ def test_cuids_all_sets_with_subblocks(self): m.s2 = Set(initialize=["a", "b"]) m.s3 = Set(initialize=[4, 5, 6]) m.s4 = Set(initialize=["c", "d"]) + def block_rule(b, i, j): b.v = Var() + m.b = Block(m.s1, m.s2, rule=block_rule) - pred_cuid_set = { - "b[*,*].v", - } + pred_cuid_set = {"b[*,*].v"} sets = (m.s1, m.s2) ctype = Var @@ -1383,8 +1837,7 @@ def block_rule(b, i, j): for sets, comps in zip(sets_list, comps_list): if len(sets) == 2 and sets[0] is m.s1 and sets[1] is m.s2: self.assertEqual(len(comps), 1) - cuid_set = set(str(ComponentUID(comp.referent)) - for comp in comps) + cuid_set = set(str(ComponentUID(comp.referent)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: raise RuntimeError() @@ -1392,8 +1845,10 @@ def block_rule(b, i, j): def test_cuids_multiple_slices(self): m = ConcreteModel() m.s1 = Set(initialize=[1, 2, 3]) + def block_rule(b, i): b.v = Var(m.s1) + m.b = Block(m.s1, rule=block_rule) pred_cuid_set = {"b[*].v[*]"} @@ -1405,15 +1860,13 @@ def block_rule(b, i): for sets, comps in zip(sets_list, comps_list): if len(sets) == 2 and sets[0] is m.s1 and sets[1] is m.s1: self.assertEqual(len(comps), 1) - cuid_set = set(str(ComponentUID(comp.referent)) - for comp in comps) + cuid_set = set(str(ComponentUID(comp.referent)) for comp in comps) self.assertEqual(cuid_set, pred_cuid_set) else: raise RuntimeError() -class TestSliceComponent(TestFlatten): - +class TestSliceComponent(_TestFlattenBase, unittest.TestCase): def make_model(self): m = ConcreteModel() m.s1 = Set(initialize=[1, 2, 3]) @@ -1423,7 +1876,7 @@ def make_model(self): m.v12 = Var(m.s1, m.s2) m.v124 = Var(m.s1, m.s2, m.s4) return m - + def test_no_sets(self): m = self.make_model() var = m.v12 @@ -1432,7 +1885,7 @@ def test_no_sets(self): slices = [slice_ for _, slice_ in slice_component_along_sets(var, sets)] self.assertEqual(len(slices), len(ref_data)) - self.assertEqual(len(slices), len(m.s1)*len(m.s2)) + self.assertEqual(len(slices), len(m.s1) * len(m.s2)) for slice_ in slices: self.assertIn(self._hashRef(slice_), ref_data) @@ -1440,13 +1893,11 @@ def test_one_set(self): m = self.make_model() var = m.v124 sets = (m.s1, m.s3) - ref_data = { - self._hashRef(Reference(m.v124[:, i, j])) for i, j in m.s2*m.s4 - } + ref_data = {self._hashRef(Reference(m.v124[:, i, j])) for i, j in m.s2 * m.s4} slices = [s for _, s in slice_component_along_sets(var, sets)] self.assertEqual(len(slices), len(ref_data)) - self.assertEqual(len(slices), len(m.s2)*len(m.s4)) + self.assertEqual(len(slices), len(m.s2) * len(m.s4)) for slice_ in slices: self.assertIn(self._hashRef(Reference(slice_)), ref_data) @@ -1454,13 +1905,11 @@ def test_some_sets(self): m = self.make_model() var = m.v124 sets = (m.s1, m.s3) - ref_data = { - self._hashRef(Reference(m.v124[:, i, j])) for i, j in m.s2*m.s4 - } + ref_data = {self._hashRef(Reference(m.v124[:, i, j])) for i, j in m.s2 * m.s4} slices = [s for _, s in slice_component_along_sets(var, sets)] self.assertEqual(len(slices), len(ref_data)) - self.assertEqual(len(slices), len(m.s2)*len(m.s4)) + self.assertEqual(len(slices), len(m.s2) * len(m.s4)) for slice_ in slices: self.assertIn(self._hashRef(Reference(slice_)), ref_data) @@ -1482,6 +1931,7 @@ class TestExceptional(unittest.TestCase): These are the cases that motivate the try/excepts in the slice-checking part of the code. """ + def test_stop_iteration(self): """ StopIteration is raised if we create an empty slice somewhere @@ -1490,8 +1940,8 @@ def test_stop_iteration(self): don't return a reference that doesn't admit any valid indices. """ m = ConcreteModel() - m.s1 = Set(initialize=[1,2,3]) - m.s2 = Set(initialize=['a','b','c']) + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=['a', 'b', 'c']) m.v = Var(m.s1, m.s2) @@ -1499,7 +1949,7 @@ def con_rule(m, i, j): if j == 'a': # con[:, 'a'] will be an empty slice return Constraint.Skip - return m.v[i, j] == 5. + return m.v[i, j] == 5.0 def vacuous_con_rule(m, i, j): # A very odd case @@ -1514,7 +1964,7 @@ def vacuous_con_rule(m, i, j): ctype = Constraint sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) self.assertEqual(len(comps_list), 1) - self.assertEqual(len(comps_list[0]), len(m.s2)-1) + self.assertEqual(len(comps_list[0]), len(m.s2) - 1) m.del_component(m.con) m.vacuous_con = Constraint(m.s1, m.s2, rule=vacuous_con_rule) @@ -1528,10 +1978,10 @@ def vacuous_con_rule(m, i, j): self.assertEqual(len(comps_list), 0) m.del_component(m.vacuous_con) - m.del_component(m.v) # No longer necessary + m.del_component(m.v) # No longer necessary # Same behavior can happen for blocks: - + def block_rule(b, i, j): b.v = Var() @@ -1547,7 +1997,7 @@ def block_rule(b, i, j): ctype = Var sets_list, comps_list = flatten_components_along_sets(m, sets, ctype) self.assertEqual(len(comps_list), 1) - self.assertEqual(len(comps_list[0]), len(m.s2)-1) + self.assertEqual(len(comps_list[0]), len(m.s2) - 1) for idx in m.b: del m.b[idx] @@ -1579,6 +2029,7 @@ def test_descend_stop_iteration(self): def b_rule(b, i, j): b.v = Var() + m.b = Block(m.s1, m.s2, rule=b_rule) # 'b' will be a bad index to descend into @@ -1592,7 +2043,7 @@ def b_rule(b, i, j): ctype = Var indices = ComponentMap([(m.s2, 'b')]) sets_list, comps_list = flatten_components_along_sets( - m, sets, ctype, indices=indices, + m, sets, ctype, indices=indices ) for sets, comps in zip(sets_list, comps_list): # Here we just check that m.b[:,:].v was not encountered, @@ -1611,6 +2062,7 @@ def test_bad_descend_index(self): def b_rule(b, i, j): b.v = Var() + m.b = Block(m.s1, m.s2, rule=b_rule) sets = (m.s1, m.s2) @@ -1621,7 +2073,7 @@ def b_rule(b, i, j): # checked by the above test. with self.assertRaisesRegex(ValueError, "bad index"): sets_list, comps_list = flatten_components_along_sets( - m, sets, ctype, indices=indices, + m, sets, ctype, indices=indices ) def test_keyerror(self): @@ -1630,15 +2082,15 @@ def test_keyerror(self): doesn't have data for some members of its indexing set. """ m = ConcreteModel() - m.s1 = Set(initialize=[1,2,3]) - m.s2 = Set(initialize=['a','b','c']) + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=['a', 'b', 'c']) m.v = Var(m.s1, m.s2) def con_rule(m, i, j): if j == 'a': return Constraint.Skip - return m.v[i, j] == 5. + return m.v[i, j] == 5.0 m.con = Constraint(m.s1, m.s2, rule=con_rule) diff --git a/pyomo/dae/tests/test_initialization.py b/pyomo/dae/tests/test_initialization.py index fd01983e9d9..390b6ecc59e 100644 --- a/pyomo/dae/tests/test_initialization.py +++ b/pyomo/dae/tests/test_initialization.py @@ -18,9 +18,20 @@ import pyomo.common.unittest as unittest -from pyomo.environ import SolverFactory, ConcreteModel, Set, Block, Var, Constraint, TransformationFactory +from pyomo.environ import ( + SolverFactory, + ConcreteModel, + Set, + Block, + Var, + Constraint, + TransformationFactory, +) from pyomo.dae import ContinuousSet, DerivativeVar -from pyomo.dae.initialization import solve_consistent_initial_conditions, get_inconsistent_initial_conditions +from pyomo.dae.initialization import ( + solve_consistent_initial_conditions, + get_inconsistent_initial_conditions, +) currdir = dirname(abspath(__file__)) + os.sep @@ -42,8 +53,9 @@ def b1(b): b.v = Var(m.time, m.space, initialize=1) b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0) - b.con = Constraint(m.time, m.space, - rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x]) + b.con = Constraint( + m.time, m.space, rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x] + ) # Inconsistent @b.Block(m.time) @@ -60,25 +72,26 @@ def b3(b, c): @b.Constraint(m.set2) def con(b, s): - return (5*b.v[s] == - m.fs.b2[m.time.first(), m.space.first()].v[c]) + return 5 * b.v[s] == m.fs.b2[m.time.first(), m.space.first()].v[c] # inconsistent @m.fs.Constraint(m.time) def con1(fs, t): return fs.b1.v[t, m.space.last()] == 5 + # Will be inconsistent @m.fs.Constraint(m.space) def con2(fs, x): return fs.b1.v[m.time.first(), x] == fs.v0[x] + # will be consistent @m.fs.Constraint(m.time, m.space) def con3(fs, t, x): if x == m.space.first(): return Constraint.Skip - return fs.b2[t, x].v['a'] == 7. + return fs.b2[t, x].v['a'] == 7.0 disc = TransformationFactory('dae.collocation') disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU') @@ -88,18 +101,15 @@ def con3(fs, t, x): class TestDaeInitCond(unittest.TestCase): - def test_get_inconsistent_initial_conditions(self): m = make_model() inconsistent = get_inconsistent_initial_conditions(m, m.time) self.assertIn(m.fs.b1.con[m.time[1], m.space[1]], inconsistent) - self.assertIn(m.fs.b2[m.time[1], m.space[1]].b3['a'].con['d'], - inconsistent) + self.assertIn(m.fs.b2[m.time[1], m.space[1]].b3['a'].con['d'], inconsistent) self.assertIn(m.fs.con1[m.time[1]], inconsistent) self.assertNotIn(m.fs.con2[m.space[1]], inconsistent) - @unittest.skipIf(not ipopt_available, 'ipopt is not available') def test_solve_consistent_initial_conditions(self): m = make_model() @@ -114,12 +124,7 @@ def test_solve_consistent_initial_conditions(self): self.assertTrue(m.fs.b1.con[m.time[3], m.space[1]].active) with self.assertRaises(KeyError): - solve_consistent_initial_conditions( - m, - m.time, - solver, - allow_skip=False, - ) + solve_consistent_initial_conditions(m, m.time, solver, allow_skip=False) if __name__ == "__main__": diff --git a/pyomo/dae/tests/test_integral.py b/pyomo/dae/tests/test_integral.py index 65de2e677c3..77d6d4dd8a9 100644 --- a/pyomo/dae/tests/test_integral.py +++ b/pyomo/dae/tests/test_integral.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# +# # Unit Tests for Integral Objects # @@ -18,8 +18,7 @@ import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, Var, Set, TransformationFactory, - Expression) +from pyomo.environ import ConcreteModel, Var, Set, TransformationFactory, Expression from pyomo.dae import ContinuousSet, Integral from pyomo.dae.diffvar import DAE_Error @@ -30,7 +29,6 @@ class TestIntegral(unittest.TestCase): - # test valid declarations def test_valid(self): m = ConcreteModel() @@ -112,7 +110,7 @@ def _int2(m, x, t): return m.v3[x, t] def _int3(m, s, t): - return m.v2[s,t] + return m.v2[s, t] # Integrals must be indexed by a ContinuousSet with self.assertRaises(ValueError): @@ -124,7 +122,7 @@ def _int3(m, s, t): # No ContinuousSet specified with self.assertRaises(ValueError): - m.int2 = Integral(m.x, m.t, rule= _int2) + m.int2 = Integral(m.x, m.t, rule=_int2) # 'wrt' is not a ContinuousSet with self.assertRaises(ValueError): @@ -136,7 +134,7 @@ def _int3(m, s, t): # 'bounds' not supported with self.assertRaises(DAE_Error): - m.int = Integral(m.t, wrt=m.t, rule=_int, bounds=(0,0.5)) + m.int = Integral(m.t, wrt=m.t, rule=_int, bounds=(0, 0.5)) # No rule specified with self.assertRaises(ValueError): diff --git a/pyomo/dae/tests/test_misc.py b/pyomo/dae/tests/test_misc.py index 2790b28ce9d..11c4e44b7b0 100644 --- a/pyomo/dae/tests/test_misc.py +++ b/pyomo/dae/tests/test_misc.py @@ -20,16 +20,28 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - ConcreteModel, Set, Param, Var, Constraint, Expression, Block, - TransformationFactory, Piecewise, Objective, ExternalFunction, - Suffix, value, + ConcreteModel, + Set, + Param, + Var, + Constraint, + Expression, + Block, + TransformationFactory, + Piecewise, + Objective, + ExternalFunction, + Suffix, + value, ) from pyomo.common.collections import ComponentMap from pyomo.common.log import LoggingIntercept from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.misc import ( - generate_finite_elements, generate_colloc_points, - update_contset_indexed_component, expand_components, + generate_finite_elements, + generate_colloc_points, + update_contset_indexed_component, + expand_components, get_index_information, ) @@ -37,7 +49,6 @@ class TestDaeMisc(unittest.TestCase): - # test generate_finite_elements method def test_generate_finite_elements(self): m = ConcreteModel() @@ -67,13 +78,13 @@ def test_generate_finite_elements(self): t = sorted(m.t3) print(t[1]) self.assertTrue(t[1] == 0.142857) - + # test generate_collocation_points method def test_generate_collocation_points(self): m = ConcreteModel() m.t = ContinuousSet(initialize=[0, 1]) m.t2 = ContinuousSet(initialize=[0, 2, 4, 6]) - + tau1 = [1] oldt = sorted(m.t) generate_colloc_points(m.t, tau1) @@ -91,8 +102,8 @@ def test_generate_collocation_points(self): generate_colloc_points(m.t, tau2) self.assertTrue(len(m.t) == 11) self.assertTrue( - [0, 0.1, 0.15, 0.35, 0.4, 0.5, 0.6, 0.65, 0.85, 0.9, 1] == - sorted(m.t)) + [0, 0.1, 0.15, 0.35, 0.4, 0.5, 0.6, 0.65, 0.85, 0.9, 1] == sorted(m.t) + ) generate_colloc_points(m.t2, tau2) self.assertTrue(len(m.t2) == 16) @@ -110,12 +121,13 @@ def test_discretized_params_single(self): m.p1 = Param(m.t, initialize=1) m.p2 = Param(m.t, default=2) m.p3 = Param(m.t, initialize=1, default=2) - + def _rule1(m, i): return i**2 - + def _rule2(m, i): return 2 * i + m.p4 = Param(m.t, initialize={0: 5, 10: 5}, default=_rule1) m.p5 = Param(m.t, initialize=_rule1, default=_rule2) @@ -146,15 +158,18 @@ def test_discretized_params_multiple(self): def _rule1(m, i): return i**2 + m.p1 = Param(m.s1, m.t, initialize=2, default=_rule1) m.p2 = Param(m.t, m.s1, default=5) def _rule2(m, i, j): return i + j + m.p3 = Param(m.s1, m.t, initialize=2, default=_rule2) def _rule3(m, i, j, k): return i + j + k + m.p4 = Param(m.s2, m.t, default=_rule3) generate_finite_elements(m.t, 5) @@ -180,7 +195,7 @@ def _rule3(m, i, j, k): for j in m.s2: self.assertEqual(m.p4[j, i], sum(j, i)) - # test update_contset_indexed_component method for Vars with + # test update_contset_indexed_component method for Vars with # single index of the ContinuousSet def test_update_contset_indexed_component_vars_single(self): m = ConcreteModel() @@ -192,6 +207,7 @@ def test_update_contset_indexed_component_vars_single(self): def _init(m, i): return i + m.v3 = Var(m.t, bounds=(-5, 5), initialize=_init) m.v4 = Var(m.s, initialize=7, dense=True) m.v5 = Var(m.t2, dense=True) @@ -214,7 +230,7 @@ def _init(m, i): self.assertTrue(value(m.v1[2]) == 3) self.assertTrue(m.v1[4].ub is None) self.assertTrue(m.v1[6].lb is None) - + self.assertTrue(m.v2[2].value is None) self.assertTrue(m.v2[4].lb == 4) self.assertTrue(m.v2[8].ub == 10) @@ -225,7 +241,7 @@ def _init(m, i): self.assertTrue(m.v3[6].ub == 5) self.assertTrue(value(m.v3[8]) == 8) - # test update_contset_indexed_component method for Vars with + # test update_contset_indexed_component method for Vars with # multiple indices def test_update_contset_indexed_component_vars_multiple(self): m = ConcreteModel() @@ -234,11 +250,13 @@ def test_update_contset_indexed_component_vars_multiple(self): m.s = Set(initialize=[1, 2, 3]) m.s2 = Set(initialize=[(1, 1), (2, 2)]) m.v1 = Var(m.s, m.t, initialize=3) - m.v2 = Var(m.s, m.t, m.t2, bounds=(4, 10), - initialize={(1, 0, 1): 22, (2, 10, 2): 22}) + m.v2 = Var( + m.s, m.t, m.t2, bounds=(4, 10), initialize={(1, 0, 1): 22, (2, 10, 2): 22} + ) def _init(m, i, j, k): return i + m.v3 = Var(m.t, m.s2, bounds=(-5, 5), initialize=_init) m.v4 = Var(m.s, m.t2, initialize=7, dense=True) m.v5 = Var(m.s2) @@ -260,7 +278,7 @@ def _init(m, i, j, k): self.assertTrue(value(m.v1[1, 4]) == 3) self.assertTrue(m.v1[2, 2].ub is None) self.assertTrue(m.v1[3, 8].lb is None) - + self.assertTrue(value(m.v2[1, 0, 1]) == 22) self.assertTrue(m.v2[1, 2, 1].value is None) self.assertTrue(m.v2[2, 4, 3].lb == 4) @@ -278,15 +296,17 @@ def test_update_contset_indexed_component_constraints_single(self): m.t = ContinuousSet(bounds=(0, 10)) m.p = Param(m.t, default=3) m.v = Var(m.t, initialize=5) - + def _con1(m, i): return m.p[i] * m.v[i] <= 20 + m.con1 = Constraint(m.t, rule=_con1) - + # Rules that iterate over a ContinuouSet implicitly are not updated # after the discretization def _con2(m): return sum(m.v[i] for i in m.t) >= 0 + m.con2 = Constraint(rule=_con2) expansion_map = ComponentMap() @@ -313,6 +333,7 @@ def test_update_contset_indexed_component_constraints_multiple(self): def _init(m, i, j): return j + i + m.p1 = Param(m.s1, m.t, default=_init) m.v1 = Var(m.s1, m.t, initialize=5) m.v2 = Var(m.s2, m.t, initialize=2) @@ -320,14 +341,17 @@ def _init(m, i, j): def _con1(m, si, ti): return m.v1[si, ti] * m.p1[si, ti] >= 0 + m.con1 = Constraint(m.s1, m.t, rule=_con1) def _con2(m, i, j, ti): return m.v2[i, j, ti] + m.p1[1, ti] == 10 + m.con2 = Constraint(m.s2, m.t, rule=_con2) def _con3(m, i, ti, ti2, j, k): return m.v1[i, ti] - m.v3[ti2, j, k] * m.p1[i, ti] <= 20 + m.con3 = Constraint(m.s1, m.t, m.t2, m.s2, rule=_con3) expansion_map = ComponentMap() @@ -340,7 +364,7 @@ def _con3(m, i, ti, ti2, j, k): update_contset_indexed_component(m.con1, expansion_map) update_contset_indexed_component(m.con2, expansion_map) update_contset_indexed_component(m.con3, expansion_map) - + self.assertTrue(len(m.con1) == 18) self.assertTrue(len(m.con2) == 12) self.assertTrue(len(m.con3) == 108) @@ -356,7 +380,7 @@ def _con3(m, i, ti, ti2, j, k): self.assertEqual(m.con2[1, 1, 8](), 11) self.assertTrue(value(m.con2[2, 2, 6].lower) == 10) self.assertTrue(value(m.con2[1, 1, 10].upper) == 10) - + self.assertEqual(m.con3[1, 2, 1, 1, 1](), 2) self.assertEqual(m.con3[1, 4, 1, 2, 2](), 0) self.assertEqual(m.con3[2, 6, 3, 1, 1](), -3) @@ -374,12 +398,14 @@ def test_update_contset_indexed_component_expressions_single(self): def _con1(m, i): return m.p[i] * m.v[i] + m.con1 = Expression(m.t, rule=_con1) # Rules that iterate over a ContinuousSet implicitly are not updated # after the discretization def _con2(m): return sum(m.v[i] for i in m.t) + m.con2 = Expression(rule=_con2) expansion_map = ComponentMap() @@ -406,6 +432,7 @@ def test_update_contset_indexed_component_expressions_multiple(self): def _init(m, i, j): return j + i + m.p1 = Param(m.s1, m.t, default=_init) m.v1 = Var(m.s1, m.t, initialize=5) m.v2 = Var(m.s2, m.t, initialize=2) @@ -413,14 +440,17 @@ def _init(m, i, j): def _con1(m, si, ti): return m.v1[si, ti] * m.p1[si, ti] + m.con1 = Expression(m.s1, m.t, rule=_con1) def _con2(m, i, j, ti): return m.v2[i, j, ti] + m.p1[1, ti] + m.con2 = Expression(m.s2, m.t, rule=_con2) def _con3(m, i, ti, ti2, j, k): return m.v1[i, ti] - m.v3[ti2, j, k] * m.p1[i, ti] + m.con3 = Expression(m.s1, m.t, m.t2, m.s2, rule=_con3) expansion_map = ComponentMap() @@ -451,7 +481,7 @@ def _con3(m, i, ti, ti2, j, k): self.assertEqual(m.con3[2, 6, 3, 1, 1](), -3) self.assertEqual(m.con3[3, 8, 2, 2, 2](), -6) - # test update_contset_indexed_component method for Blocks + # test update_contset_indexed_component method for Blocks # indexed by a ContinuousSet def test_update_contset_indexed_component_block_single(self): model = ConcreteModel() @@ -459,26 +489,29 @@ def test_update_contset_indexed_component_block_single(self): def _block_rule(b, t): m = b.model() - + b.s1 = Set(initialize=['A1', 'A2', 'A3']) def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) b.v2 = Var(m.t, initialize=2) b.v3 = Var(m.t, b.s1, initialize=1) def _con1(_b, ti): - return _b.v1[ti] * _b.p1[ti] == _b.v1[t]**2 + return _b.v1[ti] * _b.p1[ti] == _b.v1[t] ** 2 + b.con1 = Constraint(m.t, rule=_con1) def _con2(_b, i, ti): return _b.v2[ti] - _b.v3[ti, i] + _b.p1[ti] + b.con2 = Expression(b.s1, m.t, rule=_con2) - + model.blk = Block(model.t, rule=_block_rule) - + self.assertTrue(len(model.blk), 2) expansion_map = ComponentMap() @@ -497,7 +530,7 @@ def _con2(_b, i, ti): self.assertEqual(model.blk[2].p1[2], 4) self.assertEqual(model.blk[8].p1[6], 12) - + self.assertEqual(model.blk[4].con1[4](), 15) self.assertEqual(model.blk[6].con1[8](), 55) @@ -514,22 +547,25 @@ def test_update_contset_indexed_component_block_multiple(self): def _block_rule(b, t, s1): m = b.model() - + def _init(m, i, j): return j * 2 + b.p1 = Param(m.s1, m.t, mutable=True, default=_init) b.v1 = Var(m.s1, m.t, initialize=5) b.v2 = Var(m.s2, m.t, initialize=2) b.v3 = Var(m.t, m.s2, initialize=1) def _con1(_b, si, ti): - return _b.v1[si, ti] * _b.p1[si, ti] == _b.v1[si, t]**2 + return _b.v1[si, ti] * _b.p1[si, ti] == _b.v1[si, t] ** 2 + b.con1 = Constraint(m.s1, m.t, rule=_con1) def _con2(_b, i, j, ti): return _b.v2[i, j, ti] - _b.v3[ti, i, j] + _b.p1['A', ti] + b.con2 = Expression(m.s2, m.t, rule=_con2) - + model.blk = Block(model.t, model.s1, rule=_block_rule) expansion_map = ComponentMap() @@ -550,14 +586,14 @@ def _con2(_b, i, j, ti): self.assertEqual(model.blk[2, 'A'].p1['A', 2].value, 4) self.assertEqual(model.blk[8, 'C'].p1['B', 6].value, 12) - + self.assertEqual(model.blk[4, 'B'].con1['B', 4](), 15) self.assertEqual(model.blk[6, 'A'].con1['C', 8](), 55) self.assertEqual(model.blk[0, 'A'].con2['x1', 'x1', 10](), 21) self.assertEqual(model.blk[4, 'C'].con2['x2', 'x2', 6](), 13) - # test update_contset_indexed_component method for Blocks + # test update_contset_indexed_component method for Blocks # indexed by a ContinuousSet. Block rule returns new block def test_update_contset_indexed_component_block_single2(self): model = ConcreteModel() @@ -571,24 +607,27 @@ def _block_rule(_b_, t): def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) b.v2 = Var(m.t, initialize=2) b.v3 = Var(m.t, b.s1, initialize=1) def _con1(_b, ti): - return _b.v1[ti] * _b.p1[ti] == _b.v1[t]**2 + return _b.v1[ti] * _b.p1[ti] == _b.v1[t] ** 2 + b.con1 = Constraint(m.t, rule=_con1) def _con2(_b, i, ti): return _b.v2[ti] - _b.v3[ti, i] + _b.p1[ti] + b.con2 = Expression(b.s1, m.t, rule=_con2) return b - + model.blk = Block(model.t, rule=_block_rule) expansion_map = ComponentMap() - + self.assertTrue(len(model.blk), 2) generate_finite_elements(model.t, 5) @@ -605,7 +644,7 @@ def _con2(_b, i, ti): self.assertEqual(model.blk[2].p1[2], 4) self.assertEqual(model.blk[8].p1[6], 12) - + self.assertEqual(model.blk[4].con1[4](), 15) self.assertEqual(model.blk[6].con1[8](), 55) @@ -626,20 +665,23 @@ def _block_rule(_b_, t, s1): def _init(m, i, j): return j * 2 + b.p1 = Param(m.s1, m.t, mutable=True, default=_init) b.v1 = Var(m.s1, m.t, initialize=5) b.v2 = Var(m.s2, m.t, initialize=2) b.v3 = Var(m.t, m.s2, initialize=1) def _con1(_b, si, ti): - return _b.v1[si, ti] * _b.p1[si, ti] == _b.v1[si, t]**2 + return _b.v1[si, ti] * _b.p1[si, ti] == _b.v1[si, t] ** 2 + b.con1 = Constraint(m.s1, m.t, rule=_con1) def _con2(_b, i, j, ti): return _b.v2[i, j, ti] - _b.v3[ti, i, j] + _b.p1['A', ti] + b.con2 = Expression(m.s2, m.t, rule=_con2) return b - + model.blk = Block(model.t, model.s1, rule=_block_rule) expansion_map = ComponentMap() @@ -660,7 +702,7 @@ def _con2(_b, i, j, ti): self.assertEqual(model.blk[2, 'A'].p1['A', 2].value, 4) self.assertEqual(model.blk[8, 'C'].p1['B', 6].value, 12) - + self.assertEqual(model.blk[4, 'B'].con1['B', 4](), 15) self.assertEqual(model.blk[6, 'A'].con1['C', 8](), 55) @@ -672,19 +714,17 @@ def _con2(_b, i, j, ti): def test_update_contset_indexed_component_piecewise_single(self): x = [0.0, 1.5, 3.0, 5.0] y = [1.1, -1.1, 2.0, 1.1] - + model = ConcreteModel() model.t = ContinuousSet(bounds=(0, 10)) model.x = Var(model.t, bounds=(min(x), max(x))) model.y = Var(model.t) - model.fx = Piecewise(model.t, - model.y, model.x, - pw_pts=x, - pw_constr_type='EQ', - f_rule=y) - + model.fx = Piecewise( + model.t, model.y, model.x, pw_pts=x, pw_constr_type='EQ', f_rule=y + ) + self.assertEqual(len(model.fx), 2) expansion_map = ComponentMap() @@ -695,12 +735,12 @@ def test_update_contset_indexed_component_piecewise_single(self): self.assertEqual(len(model.fx), 6) self.assertEqual(len(model.fx[2].SOS2_constraint), 3) - # test update_contset_indexed_component method for Piecewise + # test update_contset_indexed_component method for Piecewise # component with multiple indices def test_update_contset_indexed_component_piecewise_multiple(self): x = [0.0, 1.5, 3.0, 5.0] y = [1.1, -1.1, 2.0, 1.1] - + model = ConcreteModel() model.t = ContinuousSet(bounds=(0, 10)) model.s = Set(initialize=['A', 'B', 'C']) @@ -708,12 +748,10 @@ def test_update_contset_indexed_component_piecewise_multiple(self): model.x = Var(model.s, model.t, bounds=(min(x), max(x))) model.y = Var(model.s, model.t) - model.fx = Piecewise(model.s, model.t, - model.y, model.x, - pw_pts=x, - pw_constr_type='EQ', - f_rule=y) - + model.fx = Piecewise( + model.s, model.t, model.y, model.x, pw_pts=x, pw_constr_type='EQ', f_rule=y + ) + self.assertEqual(len(model.fx), 6) expansion_map = ComponentMap() @@ -734,6 +772,7 @@ def test_update_contset_indexed_component_other(self): def _obj(m): return sum(m.v[i] for i in m.s) + m.obj = Objective(rule=_obj) expansion_map = ComponentMap @@ -782,8 +821,10 @@ def _block_rule(b, t): def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) + m.foo = Foo(m.t, rule=_block_rule) generate_finite_elements(m.t, 5) @@ -796,7 +837,6 @@ def _init(m, j): def test_update_block_derived_override_construct_nofcn(self): class Foo(Block): - def construct(self, data=None): Block.construct(self, data) @@ -808,16 +848,19 @@ def _block_rule(b, t): def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) + m.foo = Foo(m.t, rule=_block_rule) generate_finite_elements(m.t, 5) OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.dae'): expand_components(m) - self.assertIn('transformation to the Block-derived component', - OUTPUT.getvalue()) + self.assertIn( + 'transformation to the Block-derived component', OUTPUT.getvalue() + ) self.assertEqual(len(m.foo), 6) self.assertEqual(len(m.foo[0].p1), 6) self.assertEqual(len(m.foo[2].v1), 6) @@ -829,9 +872,10 @@ class Foo(Block): def construct(self, data=None): Block.construct(self, data) - + def update_after_discretization(self): self.updated = True + m = ConcreteModel() m.t = ContinuousSet(bounds=(0, 10)) @@ -840,8 +884,10 @@ def _block_rule(b, t): def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) + m.foo = Foo(m.t, rule=_block_rule) generate_finite_elements(m.t, 5) @@ -864,8 +910,10 @@ def _block_rule(b, t, s): def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) + m.foo = Foo(m.t, m.s, rule=_block_rule) generate_finite_elements(m.t, 5) @@ -878,7 +926,6 @@ def _init(m, j): def test_update_block_derived_override_construct_nofcn2(self): class Foo(Block): - def construct(self, data=None): Block.construct(self, data) @@ -891,8 +938,10 @@ def _block_rule(b, t, s): def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) + m.foo = Foo(m.t, m.s, rule=_block_rule) generate_finite_elements(m.t, 5) @@ -900,8 +949,9 @@ def _init(m, j): OUTPUT = StringIO() with LoggingIntercept(OUTPUT, 'pyomo.dae'): expand_components(m) - self.assertIn('transformation to the Block-derived component', - OUTPUT.getvalue()) + self.assertIn( + 'transformation to the Block-derived component', OUTPUT.getvalue() + ) self.assertEqual(len(m.foo), 18) self.assertEqual(len(m.foo[0, 1].p1), 6) self.assertEqual(len(m.foo[2, 2].v1), 6) @@ -913,9 +963,10 @@ class Foo(Block): def construct(self, data=None): Block.construct(self, data) - + def update_after_discretization(self): self.updated = True + m = ConcreteModel() m.t = ContinuousSet(bounds=(0, 10)) m.s = Set(initialize=[1, 2, 3]) @@ -925,8 +976,10 @@ def _block_rule(b, t, s): def _init(m, j): return j * 2 + b.p1 = Param(m.t, default=_init) b.v1 = Var(m.t, initialize=5) + m.foo = Foo(m.t, m.s, rule=_block_rule) generate_finite_elements(m.t, 5) @@ -968,18 +1021,20 @@ def _con_rule(b, t): def test_external_function(self): m = ConcreteModel() m.t = ContinuousSet(bounds=(0, 10)) - + def _fun(x): return x**2 + m.x_func = ExternalFunction(_fun) m.y = Var(m.t, initialize=3) m.dy = DerivativeVar(m.y, initialize=3) - + def _con(m, t): return m.dy[t] == m.x_func(m.y[t]) + m.con = Constraint(m.t, rule=_con) - + generate_finite_elements(m.t, 5) expand_components(m) @@ -988,9 +1043,9 @@ def _con(m, t): def test_get_index_information(self): m = ConcreteModel() - m.t = ContinuousSet(bounds=(0,10)) - m.x = ContinuousSet(bounds=(0,10)) - m.s = Set(initialize=['a','b','c']) + m.t = ContinuousSet(bounds=(0, 10)) + m.x = ContinuousSet(bounds=(0, 10)) + m.s = Set(initialize=['a', 'b', 'c']) m.v = Var(m.t, m.x, m.s, initialize=1) m.v2 = Var(m.t, m.s, initialize=1) @@ -1001,20 +1056,19 @@ def test_get_index_information(self): info = get_index_information(m.v, m.t) nts = info['non_ds'] index_getter = info['index function'] - + self.assertEqual(len(nts), 33) self.assertTrue(m.x in nts.set_tuple) self.assertTrue(m.s in nts.set_tuple) - self.assertEqual(index_getter((8.0,'a'),1,0),(2.0,8.0,'a')) + self.assertEqual(index_getter((8.0, 'a'), 1, 0), (2.0, 8.0, 'a')) info = get_index_information(m.v2, m.t) nts = info['non_ds'] index_getter = info['index function'] - + self.assertEqual(len(nts), 3) self.assertTrue(m.s is nts) - self.assertEqual(index_getter('a',1,0),(2.0,'a')) - + self.assertEqual(index_getter('a', 1, 0), (2.0, 'a')) if __name__ == "__main__": diff --git a/pyomo/dae/tests/test_set_utils.py b/pyomo/dae/tests/test_set_utils.py index de23c2b8c71..fa592e05181 100644 --- a/pyomo/dae/tests/test_set_utils.py +++ b/pyomo/dae/tests/test_set_utils.py @@ -18,7 +18,12 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - Block, Constraint, ConcreteModel, Var, Set, TransformationFactory + Block, + Constraint, + ConcreteModel, + Var, + Set, + TransformationFactory, ) from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.set_utils import ( @@ -47,8 +52,9 @@ def b1(b): b.v = Var(m.time, m.space, initialize=1) b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0) - b.con = Constraint(m.time, m.space, - rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x]) + b.con = Constraint( + m.time, m.space, rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x] + ) # Inconsistent @b.Block(m.time) @@ -65,18 +71,19 @@ def b3(b, c): @b.Constraint(m.set2) def con(b, s): - return (5*b.v[s] == - m.fs.b2[m.time.first(), m.space.first()].v[c]) + return 5 * b.v[s] == m.fs.b2[m.time.first(), m.space.first()].v[c] # inconsistent @m.fs.Constraint(m.time) def con1(fs, t): return fs.b1.v[t, m.space.last()] == 5 + # Will be inconsistent @m.fs.Constraint(m.space) def con2(fs, x): return fs.b1.v[m.time.first(), x] == fs.v0[x] + # will be consistent disc = TransformationFactory('dae.collocation') @@ -87,7 +94,6 @@ def con2(fs, x): class TestDaeSetUtils(unittest.TestCase): - # Test explicit/implicit index detection functions def test_indexed_by(self): m = ConcreteModel() @@ -147,47 +153,66 @@ def b(bb): self.assertFalse(is_in_block_indexed_by(m.v2, m.set)) self.assertTrue(is_in_block_indexed_by(m.b1[m.time[1]].v2, m.time)) - self.assertTrue(is_in_block_indexed_by( - m.b2[m.time[1], m.space[1]].b.v1, m.time)) - self.assertTrue(is_in_block_indexed_by( - m.b2[m.time[1], m.space[1]].b.v2, m.time)) - self.assertTrue(is_explicitly_indexed_by( - m.b2[m.time[1], m.space[1]].b.v2, m.time)) - self.assertFalse(is_in_block_indexed_by( - m.b2[m.time[1], m.space[1]].b.v1, m.set)) - - self.assertFalse(is_in_block_indexed_by( - m.b2[m.time[1], m.space[1]].b.v1, - m.space, stop_at=m.b2[m.time[1], m.space[1]])) + self.assertTrue( + is_in_block_indexed_by(m.b2[m.time[1], m.space[1]].b.v1, m.time) + ) + self.assertTrue( + is_in_block_indexed_by(m.b2[m.time[1], m.space[1]].b.v2, m.time) + ) + self.assertTrue( + is_explicitly_indexed_by(m.b2[m.time[1], m.space[1]].b.v2, m.time) + ) + self.assertFalse( + is_in_block_indexed_by(m.b2[m.time[1], m.space[1]].b.v1, m.set) + ) + + self.assertFalse( + is_in_block_indexed_by( + m.b2[m.time[1], m.space[1]].b.v1, + m.space, + stop_at=m.b2[m.time[1], m.space[1]], + ) + ) # Explicit indexing with multi-dimensional set: self.assertTrue(is_explicitly_indexed_by(m.v4, m.time, m.set2)) self.assertTrue(is_explicitly_indexed_by(m.v5, m.time, m.set2, m.space)) # Implicit indexing with multi-dimensional set: - self.assertTrue(is_in_block_indexed_by( - m.b3['a', 1, m.time[1]].v, m.set2)) - self.assertTrue(is_in_block_indexed_by( - m.b3['a', 1, m.time[1]].v, m.time)) - self.assertTrue(is_in_block_indexed_by( - m.b3['a', 1, m.time[1]].v1[m.space[1]], m.set2)) - self.assertFalse(is_in_block_indexed_by( - m.b3['a', 1, m.time[1]].v1[m.space[1]], m.space)) - self.assertTrue(is_in_block_indexed_by( - m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set2)) - self.assertTrue(is_in_block_indexed_by( - m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time)) - self.assertTrue(is_in_block_indexed_by( - m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.space)) - self.assertFalse(is_in_block_indexed_by( - m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set)) - self.assertFalse(is_in_block_indexed_by( - m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time, - stop_at=m.b3['b', 2, m.time[2]])) - self.assertFalse(is_in_block_indexed_by( - m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time, - stop_at=m.b3)) - + self.assertTrue(is_in_block_indexed_by(m.b3['a', 1, m.time[1]].v, m.set2)) + self.assertTrue(is_in_block_indexed_by(m.b3['a', 1, m.time[1]].v, m.time)) + self.assertTrue( + is_in_block_indexed_by(m.b3['a', 1, m.time[1]].v1[m.space[1]], m.set2) + ) + self.assertFalse( + is_in_block_indexed_by(m.b3['a', 1, m.time[1]].v1[m.space[1]], m.space) + ) + self.assertTrue( + is_in_block_indexed_by(m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set2) + ) + self.assertTrue( + is_in_block_indexed_by(m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time) + ) + self.assertTrue( + is_in_block_indexed_by( + m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.space + ) + ) + self.assertFalse( + is_in_block_indexed_by(m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set) + ) + self.assertFalse( + is_in_block_indexed_by( + m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], + m.time, + stop_at=m.b3['b', 2, m.time[2]], + ) + ) + self.assertFalse( + is_in_block_indexed_by( + m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time, stop_at=m.b3 + ) + ) # Test get_index_set_except and _complete_index def test_get_index_set_except(self): @@ -236,9 +261,8 @@ def test_get_index_set_except(self): info = get_index_set_except(m.v2, m.time) set_except = info['set_except'] index_getter = info['index_getter'] - self.assertTrue(m.space[1] in set_except - and m.space.last() in set_except) - # Here (2,) is the partial index, corresponding to space. + self.assertTrue(m.space[1] in set_except and m.space.last() in set_except) + # Here (2,) is the partial index, corresponding to space. # Can be provided as a scalar or tuple. 4, the time index, # should be inserted before (2,) self.assertEqual(index_getter((2,), 4), (4, 2)) @@ -249,7 +273,7 @@ def test_get_index_set_except(self): set_except = info['set_except'] index_getter = info['index_getter'] self.assertTrue(set_except == [None]) - # 5, 7 are the desired index values for space, time + # 5, 7 are the desired index values for space, time # index_getter should put them in the right order for m.v2, # even if they are not valid indices for m.v2 self.assertEqual(index_getter((), 5, 7), (7, 5)) @@ -260,8 +284,9 @@ def test_get_index_set_except(self): # indexing v3 set_except = info['set_except'] index_getter = info['index_getter'] - self.assertTrue((m.space[1], 'b') in set_except - and (m.space.last(), 'a') in set_except) + self.assertTrue( + (m.space[1], 'b') in set_except and (m.space.last(), 'a') in set_except + ) # index_getter inserts a scalar index into an index of length 2 self.assertEqual(index_getter((2, 'b'), 7), (7, 2, 'b')) @@ -280,7 +305,7 @@ def test_get_index_set_except(self): index_getter = info['index_getter'] self.assertTrue((m.time[1], 'd') in set_except) self.assertEqual(index_getter((4, 'f'), 'b', 8), (4, 8, 'b', 'f')) - + # The intended usage of this function looks something like: index_set = m.v4.index_set() for partial_index in set_except: @@ -299,15 +324,19 @@ def test_get_index_set_except(self): set_except = info['set_except'] index_getter = info['index_getter'] self.assertTrue(m.space[1] in set_except) - self.assertEqual(index_getter(m.space[1], ('b', 2), m.time[1]), - (m.time[1], m.space[1], 'b', 2)) + self.assertEqual( + index_getter(m.space[1], ('b', 2), m.time[1]), + (m.time[1], m.space[1], 'b', 2), + ) info = get_index_set_except(m.v7, m.time) set_except = info['set_except'] index_getter = info['index_getter'] self.assertIn(('a', 1, m.space[1]), set_except) - self.assertEqual(index_getter(('a', 1, m.space[1]), m.time[1]), - ('a', 1, m.space[1], m.time[1])) + self.assertEqual( + index_getter(('a', 1, m.space[1]), m.time[1]), + ('a', 1, m.space[1], m.time[1]), + ) m.v8 = Var(m.time, m.set3, m.time) with self.assertRaises(ValueError): @@ -334,25 +363,26 @@ def test_deactivate_model_at(self): self.assertFalse(m.fs.b1.con[m.time[3], m.space[1]].active) with self.assertRaises(KeyError): - deactivate_model_at(m, m.time, m.time[1], allow_skip=False, - suppress_warnings=True) + deactivate_model_at( + m, m.time, m.time[1], allow_skip=False, suppress_warnings=True + ) def test_get_indices_of_projection(self): m = ConcreteModel() - m.s1 = Set(initialize=[1,2,3]) - m.s2 = Set(initialize=[4,5,6]) - m.s3 = Set(initialize=['a','b']) - m.s4 = Set(initialize=['c','d']) + m.s1 = Set(initialize=[1, 2, 3]) + m.s2 = Set(initialize=[4, 5, 6]) + m.s3 = Set(initialize=['a', 'b']) + m.s4 = Set(initialize=['c', 'd']) # Basic test: product = m.s1.cross(m.s2, m.s3, m.s4) info = get_indices_of_projection(product, m.s2) set_except = info['set_except'] index_getter = info['index_getter'] - predicted_len = len(m.s1)*len(m.s3)*len(m.s4) + predicted_len = len(m.s1) * len(m.s3) * len(m.s4) self.assertEqual(len(set_except), predicted_len) removed_index = 4 - for idx in m.s1*m.s3*m.s4: + for idx in m.s1 * m.s3 * m.s4: self.assertIn(idx, set_except) full_index = index_getter(idx, removed_index) self.assertIn(full_index, product) @@ -363,10 +393,10 @@ def test_get_indices_of_projection(self): info = get_indices_of_projection(product, m.s2) set_except = info['set_except'] index_getter = info['index_getter'] - predicted_len = len(m.s1)*len(m.s3)*len(m.s4) + predicted_len = len(m.s1) * len(m.s3) * len(m.s4) self.assertEqual(len(set_except), predicted_len) removed_index = 4 - for idx in m.s1*m.s3*m.s4: + for idx in m.s1 * m.s3 * m.s4: self.assertIn(idx, set_except) full_index = index_getter(idx, removed_index) self.assertIn(full_index, product) @@ -376,10 +406,10 @@ def test_get_indices_of_projection(self): info = get_indices_of_projection(product, m.s2, m.s4) set_except = info['set_except'] index_getter = info['index_getter'] - predicted_len = len(m.s1)*len(m.s3) + predicted_len = len(m.s1) * len(m.s3) self.assertEqual(len(set_except), predicted_len) removed_index = (4, 'd') - for idx in m.s1*m.s3: + for idx in m.s1 * m.s3: self.assertIn(idx, set_except) full_index = index_getter(idx, *removed_index) self.assertIn(full_index, product) diff --git a/pyomo/dae/tests/test_simulator.py b/pyomo/dae/tests/test_simulator.py index 137c1cc741d..b3003bb5a0d 100644 --- a/pyomo/dae/tests/test_simulator.py +++ b/pyomo/dae/tests/test_simulator.py @@ -13,31 +13,37 @@ import json import pyomo.common.unittest as unittest -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.environ import ( - ConcreteModel, Param, Var, Set, Constraint, - sin, log, sqrt, TransformationFactory) + ConcreteModel, + Param, + Var, + Set, + Constraint, + sin, + log, + sqrt, + TransformationFactory, +) from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.dae.diffvar import DAE_Error from pyomo.dae.simulator import ( scipy_available, casadi, casadi_available, - Simulator, - _check_getitemexpression, + Simulator, + _check_getitemexpression, _check_productexpression, _check_negationexpression, - _check_viewsumexpression, + _check_viewsumexpression, substitute_pyomo2casadi, ) -from pyomo.core.expr.template_expr import ( - IndexTemplate, - _GetItemIndexer, -) +from pyomo.core.expr.template_expr import IndexTemplate, _GetItemIndexer from pyomo.common.fileutils import import_file import os from os.path import abspath, dirname, normpath, join + currdir = dirname(abspath(__file__)) exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'dae')) @@ -59,7 +65,6 @@ def setUp(self): # Testing invalid simulator arguments def test_invalid_argument_values(self): - m = self.m m.w = Var(m.t) m.y = Var() @@ -68,7 +73,8 @@ def test_invalid_argument_values(self): Simulator(m, package='foo') def _con(m, i): - return m.v[i] == m.w[i]**2 + m.y + return m.v[i] == m.w[i] ** 2 + m.y + m.con = Constraint(m.t, rule=_con) with self.assertRaises(DAE_Error): @@ -82,11 +88,11 @@ def _con(m, i): # Testing the simulator's handling of inequality constraints @unittest.skipIf(not scipy_available, "Scipy is not available") def test_inequality_constraints(self): - m = self.m def _deq(m, i): - return m.dv[i] >= m.v[i]**2 + m.v[i] + return m.dv[i] >= m.v[i] ** 2 + m.v[i] + m.deq = Constraint(m.t, rule=_deq) mysim = Simulator(m) @@ -99,18 +105,19 @@ def _deq(m, i): # the simulator generates the correct RHS expression @unittest.skipIf(not scipy_available, "Scipy is not available") def test_separable_diffeq_case2(self): - m = self.m m.w = Var(m.t, m.s) m.dw = DerivativeVar(m.w) t = IndexTemplate(m.t) def _deqv(m, i): - return m.v[i]**2 + m.v[i] == m.dv[i] + return m.v[i] ** 2 + m.v[i] == m.dv[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.w[i, j]**2 + m.w[i, j] == m.dw[i, j] + return m.w[i, j] ** 2 + m.w[i, j] == m.dw[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -135,22 +142,23 @@ def _deqw(m, i, j): # the simulator generates the correct RHS expression @unittest.skipIf(not scipy_available, "Scipy is not available") def test_separable_diffeq_case3(self): - m = self.m m.w = Var(m.t, m.s) m.dw = DerivativeVar(m.w) m.p = Param(initialize=5) m.mp = Param(initialize=5, mutable=True) m.y = Var() - + t = IndexTemplate(m.t) def _deqv(m, i): - return m.p * m.dv[i] == m.v[i]**2 + m.v[i] + return m.p * m.dv[i] == m.v[i] ** 2 + m.v[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.p * m.dw[i, j] == m.w[i, j]**2 + m.w[i, j] + return m.p * m.dw[i, j] == m.w[i, j] ** 2 + m.w[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -170,11 +178,13 @@ def _deqw(m, i, j): m.del_component('deqw_index') def _deqv(m, i): - return m.mp * m.dv[i] == m.v[i]**2 + m.v[i] + return m.mp * m.dv[i] == m.v[i] ** 2 + m.v[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.y * m.dw[i, j] == m.w[i, j]**2 + m.w[i, j] + return m.y * m.dw[i, j] == m.w[i, j] ** 2 + m.w[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -202,22 +212,23 @@ def _deqw(m, i, j): # the simulator generates the correct RHS expression @unittest.skipIf(not scipy_available, "Scipy is not available") def test_separable_diffeq_case4(self): - m = self.m m.w = Var(m.t, m.s) m.dw = DerivativeVar(m.w) m.p = Param(initialize=5) m.mp = Param(initialize=5, mutable=True) m.y = Var() - + t = IndexTemplate(m.t) def _deqv(m, i): - return m.v[i]**2 + m.v[i] == m.p * m.dv[i] + return m.v[i] ** 2 + m.v[i] == m.p * m.dv[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.w[i, j]**2 + m.w[i, j] == m.p * m.dw[i, j] + return m.w[i, j] ** 2 + m.w[i, j] == m.p * m.dw[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -237,11 +248,13 @@ def _deqw(m, i, j): m.del_component('deqw_index') def _deqv(m, i): - return m.v[i]**2 + m.v[i] == m.mp * m.dv[i] + return m.v[i] ** 2 + m.v[i] == m.mp * m.dv[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.w[i, j]**2 + m.w[i, j] == m.y * m.dw[i, j] + return m.w[i, j] ** 2 + m.w[i, j] == m.y * m.dw[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -269,22 +282,23 @@ def _deqw(m, i, j): # the simulator generates the correct RHS expression @unittest.skipIf(not scipy_available, "Scipy is not available") def test_separable_diffeq_case5(self): - m = self.m m.w = Var(m.t, m.s) m.dw = DerivativeVar(m.w) m.p = Param(initialize=5) m.mp = Param(initialize=5, mutable=True) m.y = Var() - + t = IndexTemplate(m.t) def _deqv(m, i): - return m.dv[i] + m.y == m.v[i]**2 + m.v[i] + return m.dv[i] + m.y == m.v[i] ** 2 + m.v[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.y + m.dw[i, j] == m.w[i, j]**2 + m.w[i, j] + return m.y + m.dw[i, j] == m.w[i, j] ** 2 + m.w[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -304,11 +318,13 @@ def _deqw(m, i, j): m.del_component('deqw_index') def _deqv(m, i): - return m.mp + m.dv[i] == m.v[i]**2 + m.v[i] + return m.mp + m.dv[i] == m.v[i] ** 2 + m.v[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.dw[i, j] + m.p == m.w[i, j]**2 + m.w[i, j] + return m.dw[i, j] + m.p == m.w[i, j] ** 2 + m.w[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -336,22 +352,23 @@ def _deqw(m, i, j): # the simulator generates the correct RHS expression @unittest.skipIf(not scipy_available, "Scipy is not available") def test_separable_diffeq_case6(self): - m = self.m m.w = Var(m.t, m.s) m.dw = DerivativeVar(m.w) m.p = Param(initialize=5) m.mp = Param(initialize=5, mutable=True) m.y = Var() - + t = IndexTemplate(m.t) def _deqv(m, i): - return m.v[i]**2 + m.v[i] == m.dv[i] + m.y + return m.v[i] ** 2 + m.v[i] == m.dv[i] + m.y + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.w[i, j]**2 + m.w[i, j] == m.y + m.dw[i, j] + return m.w[i, j] ** 2 + m.w[i, j] == m.y + m.dw[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -371,11 +388,13 @@ def _deqw(m, i, j): m.del_component('deqw_index') def _deqv(m, i): - return m.v[i]**2 + m.v[i] == m.mp + m.dv[i] + return m.v[i] ** 2 + m.v[i] == m.mp + m.dv[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.w[i, j]**2 + m.w[i, j] == m.dw[i, j] + m.p + return m.w[i, j] ** 2 + m.w[i, j] == m.dw[i, j] + m.p + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -403,22 +422,23 @@ def _deqw(m, i, j): # the simulator generates the correct RHS expression @unittest.skipIf(not scipy_available, "Scipy is not available") def test_separable_diffeq_case8(self): - m = self.m m.w = Var(m.t, m.s) m.dw = DerivativeVar(m.w) m.p = Param(initialize=5) m.mp = Param(initialize=5, mutable=True) m.y = Var() - + t = IndexTemplate(m.t) def _deqv(m, i): - return -m.dv[i] == m.v[i]**2 + m.v[i] + return -m.dv[i] == m.v[i] ** 2 + m.v[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return -m.dw[i, j] == m.w[i, j]**2 + m.w[i, j] + return -m.dw[i, j] == m.w[i, j] ** 2 + m.w[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -441,22 +461,23 @@ def _deqw(m, i, j): # the simulator generates the correct RHS expression @unittest.skipIf(not scipy_available, "Scipy is not available") def test_separable_diffeq_case9(self): - m = self.m m.w = Var(m.t, m.s) m.dw = DerivativeVar(m.w) m.p = Param(initialize=5) m.mp = Param(initialize=5, mutable=True) m.y = Var() - + t = IndexTemplate(m.t) def _deqv(m, i): - return m.v[i]**2 + m.v[i] == -m.dv[i] + return m.v[i] ** 2 + m.v[i] == -m.dv[i] + m.deqv = Constraint(m.t, rule=_deqv) def _deqw(m, i, j): - return m.w[i, j]**2 + m.w[i, j] == -m.dw[i, j] + return m.w[i, j] ** 2 + m.w[i, j] == -m.dw[i, j] + m.deqw = Constraint(m.t, m.s, rule=_deqw) mysim = Simulator(m) @@ -479,19 +500,20 @@ def _deqw(m, i, j): # single index @unittest.skipIf(not scipy_available, "Scipy is not available") def test_sim_initialization_single_index(self): - m = self.m m.w = Var(m.t) m.dw = DerivativeVar(m.w) t = IndexTemplate(m.t) - + def _deq1(m, i): return m.dv[i] == m.v[i] + m.deq1 = Constraint(m.t, rule=_deq1) def _deq2(m, i): return m.dw[i] == m.v[i] + m.deq2 = Constraint(m.t, rule=_deq2) mysim = Simulator(m) @@ -507,14 +529,10 @@ def _deq2(m, i): self.assertTrue(_GetItemIndexer(m.v[t]) in mysim._templatemap) self.assertFalse(_GetItemIndexer(m.w[t]) in mysim._templatemap) self.assertEqual(len(mysim._rhsdict), 2) - self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dv[t])], Param)) - self.assertEqual( - mysim._rhsdict[_GetItemIndexer(m.dv[t])].name, "'v[{t}]'") - self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dw[t])], Param)) - self.assertEqual( - mysim._rhsdict[_GetItemIndexer(m.dw[t])].name, "'v[{t}]'") + self.assertTrue(isinstance(mysim._rhsdict[_GetItemIndexer(m.dv[t])], Param)) + self.assertEqual(mysim._rhsdict[_GetItemIndexer(m.dv[t])].name, "'v[{t}]'") + self.assertTrue(isinstance(mysim._rhsdict[_GetItemIndexer(m.dw[t])], Param)) + self.assertEqual(mysim._rhsdict[_GetItemIndexer(m.dw[t])].name, "'v[{t}]'") self.assertEqual(len(mysim._rhsfun(0, [0, 0])), 2) self.assertIsNone(mysim._tsim) self.assertIsNone(mysim._simsolution) @@ -527,7 +545,6 @@ def _deq2(m, i): # two indexing sets @unittest.skipIf(not scipy_available, "Scipy is not available") def test_sim_initialization_multi_index(self): - m = self.m m.w1 = Var(m.t, m.s) m.dw1 = DerivativeVar(m.w1) @@ -539,17 +556,20 @@ def test_sim_initialization_multi_index(self): m.dw3 = DerivativeVar(m.w3) t = IndexTemplate(m.t) - + def _deq1(m, t, s): return m.dw1[t, s] == m.w1[t, s] + m.deq1 = Constraint(m.t, m.s, rule=_deq1) def _deq2(m, s, t): return m.dw2[s, t] == m.w2[s, t] + m.deq2 = Constraint(m.s, m.t, rule=_deq2) def _deq3(m, i, t, s): return m.dw3[i, t, s] == m.w1[t, s] + m.w2[i + 1, t] + m.deq3 = Constraint([0, 1], m.t, m.s, rule=_deq3) mysim = Simulator(m) @@ -580,28 +600,32 @@ def _deq3(m, i, t, s): self.assertFalse(_GetItemIndexer(m.w3[1, t, 3]) in mysim._templatemap) self.assertEqual(len(mysim._rhsdict), 12) + self.assertTrue(isinstance(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1])], Param)) + self.assertTrue(isinstance(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 3])], Param)) + self.assertTrue(isinstance(mysim._rhsdict[_GetItemIndexer(m.dw2[1, t])], Param)) + self.assertTrue(isinstance(mysim._rhsdict[_GetItemIndexer(m.dw2[3, t])], Param)) self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1])], Param)) - self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 3])], Param)) + isinstance( + mysim._rhsdict[_GetItemIndexer(m.dw3[0, t, 1])], EXPR.SumExpression + ) + ) self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dw2[1, t])], Param)) - self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dw2[3, t])], Param)) - self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dw3[0, t, 1])], - EXPR.SumExpression)) - self.assertTrue( - isinstance(mysim._rhsdict[_GetItemIndexer(m.dw3[1, t, 3])], - EXPR.SumExpression)) + isinstance( + mysim._rhsdict[_GetItemIndexer(m.dw3[1, t, 3])], EXPR.SumExpression + ) + ) self.assertEqual( - mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1])].name, "'w1[{t},1]'") + mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1])].name, "'w1[{t},1]'" + ) self.assertEqual( - mysim._rhsdict[_GetItemIndexer(m.dw1[t, 3])].name, "'w1[{t},3]'") + mysim._rhsdict[_GetItemIndexer(m.dw1[t, 3])].name, "'w1[{t},3]'" + ) self.assertEqual( - mysim._rhsdict[_GetItemIndexer(m.dw2[1, t])].name, "'w2[1,{t}]'") + mysim._rhsdict[_GetItemIndexer(m.dw2[1, t])].name, "'w2[1,{t}]'" + ) self.assertEqual( - mysim._rhsdict[_GetItemIndexer(m.dw2[3, t])].name, "'w2[3,{t}]'") + mysim._rhsdict[_GetItemIndexer(m.dw2[3, t])].name, "'w2[3,{t}]'" + ) self.assertEqual(len(mysim._rhsfun(0, [0] * 12)), 12) self.assertIsNone(mysim._tsim) @@ -618,7 +642,6 @@ def _deq3(m, i, t, s): # multi-dimensional and multiple indexing sets @unittest.skipIf(not scipy_available, "Scipy is not available") def test_sim_initialization_multi_index2(self): - m = self.m m.s2 = Set(initialize=[(1, 1), (2, 2)]) m.w1 = Var(m.t, m.s2) @@ -631,17 +654,20 @@ def test_sim_initialization_multi_index2(self): m.dw3 = DerivativeVar(m.w3) t = IndexTemplate(m.t) - + def _deq1(m, t, i, j): return m.dw1[t, i, j] == m.w1[t, i, j] + m.deq1 = Constraint(m.t, m.s2, rule=_deq1) def _deq2(m, *idx): return m.dw2[idx] == m.w2[idx] + m.deq2 = Constraint(m.s2, m.t, rule=_deq2) def _deq3(m, i, t, j, k): return m.dw3[i, t, j, k] == m.w1[t, j, k] + m.w2[j, k, t] + m.deq3 = Constraint([0, 1], m.t, m.s2, rule=_deq3) mysim = Simulator(m) @@ -668,34 +694,44 @@ def _deq3(m, i, t, j, k): self.assertTrue(_GetItemIndexer(m.w1[t, 2, 2]) in mysim._templatemap) self.assertTrue(_GetItemIndexer(m.w2[1, 1, t]) in mysim._templatemap) self.assertTrue(_GetItemIndexer(m.w2[2, 2, t]) in mysim._templatemap) - self.assertFalse(_GetItemIndexer(m.w3[0, t, 1, 1]) in - mysim._templatemap) - self.assertFalse(_GetItemIndexer(m.w3[1, t, 2, 2]) in - mysim._templatemap) + self.assertFalse(_GetItemIndexer(m.w3[0, t, 1, 1]) in mysim._templatemap) + self.assertFalse(_GetItemIndexer(m.w3[1, t, 2, 2]) in mysim._templatemap) self.assertEqual(len(mysim._rhsdict), 8) - self.assertTrue(isinstance( - mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1, 1])], Param)) - self.assertTrue(isinstance( - mysim._rhsdict[_GetItemIndexer(m.dw1[t, 2, 2])], Param)) - self.assertTrue(isinstance( - mysim._rhsdict[_GetItemIndexer(m.dw2[1, 1, t])], Param)) - self.assertTrue(isinstance( - mysim._rhsdict[_GetItemIndexer(m.dw2[2, 2, t])], Param)) - self.assertTrue(isinstance( - mysim._rhsdict[_GetItemIndexer(m.dw3[0, t, 1, 1])], - EXPR.SumExpression)) - self.assertTrue(isinstance( - mysim._rhsdict[_GetItemIndexer(m.dw3[1, t, 2, 2])], - EXPR.SumExpression)) - self.assertEqual(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1, 1])].name, - "'w1[{t},1,1]'") - self.assertEqual(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 2, 2])].name, - "'w1[{t},2,2]'") - self.assertEqual(mysim._rhsdict[_GetItemIndexer(m.dw2[1, 1, t])].name, - "'w2[1,1,{t}]'") - self.assertEqual(mysim._rhsdict[_GetItemIndexer(m.dw2[2, 2, t])].name, - "'w2[2,2,{t}]'") + self.assertTrue( + isinstance(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1, 1])], Param) + ) + self.assertTrue( + isinstance(mysim._rhsdict[_GetItemIndexer(m.dw1[t, 2, 2])], Param) + ) + self.assertTrue( + isinstance(mysim._rhsdict[_GetItemIndexer(m.dw2[1, 1, t])], Param) + ) + self.assertTrue( + isinstance(mysim._rhsdict[_GetItemIndexer(m.dw2[2, 2, t])], Param) + ) + self.assertTrue( + isinstance( + mysim._rhsdict[_GetItemIndexer(m.dw3[0, t, 1, 1])], EXPR.SumExpression + ) + ) + self.assertTrue( + isinstance( + mysim._rhsdict[_GetItemIndexer(m.dw3[1, t, 2, 2])], EXPR.SumExpression + ) + ) + self.assertEqual( + mysim._rhsdict[_GetItemIndexer(m.dw1[t, 1, 1])].name, "'w1[{t},1,1]'" + ) + self.assertEqual( + mysim._rhsdict[_GetItemIndexer(m.dw1[t, 2, 2])].name, "'w1[{t},2,2]'" + ) + self.assertEqual( + mysim._rhsdict[_GetItemIndexer(m.dw2[1, 1, t])].name, "'w2[1,1,{t}]'" + ) + self.assertEqual( + mysim._rhsdict[_GetItemIndexer(m.dw2[2, 2, t])].name, "'w2[2,2,{t}]'" + ) self.assertEqual(len(mysim._rhsfun(0, [0] * 8)), 8) self.assertIsNone(mysim._tsim) @@ -711,8 +747,7 @@ def _deq3(m, i, t, j, k): # Testing the Simulator construction on un-supported models and # components with a single indexing set def test_non_supported_single_index(self): - - # Can't simulate a model with no ContinuousSet + # Can't simulate a model with no ContinuousSet m = ConcreteModel() with self.assertRaises(DAE_Error): Simulator(m) @@ -723,31 +758,33 @@ def test_non_supported_single_index(self): m.t = ContinuousSet(bounds=(0, 5)) with self.assertRaises(DAE_Error): Simulator(m) - + # Can't simulate a model with no Derivatives m = ConcreteModel() m.t = ContinuousSet(bounds=(0, 10)) with self.assertRaises(DAE_Error): Simulator(m) - # Can't simulate a model with multiple RHS for a derivative + # Can't simulate a model with multiple RHS for a derivative m = self.m def _diffeq(m, t): - return m.dv[t] == m.v[t]**2 + m.v[t] + return m.dv[t] == m.v[t] ** 2 + m.v[t] + m.con1 = Constraint(m.t, rule=_diffeq) m.con2 = Constraint(m.t, rule=_diffeq) with self.assertRaises(DAE_Error): Simulator(m) m.del_component('con1') m.del_component('con2') - + # Can't simulate a model with multiple derivatives in an # equation m = self.m def _diffeq(m, t): - return m.dv[t] == m.dv[t] + m.v[t]**2 + return m.dv[t] == m.dv[t] + m.v[t] ** 2 + m.con1 = Constraint(m.t, rule=_diffeq) with self.assertRaises(DAE_Error): Simulator(m) @@ -757,16 +794,16 @@ def _diffeq(m, t): # components with multiple indexing sets @unittest.skipIf(not scipy_available, "Scipy is not available") def test_non_supported_multi_index(self): - m = self.m m.v2 = Var(m.t, m.s) m.v3 = Var(m.s, m.t) m.dv2 = DerivativeVar(m.v2) m.dv3 = DerivativeVar(m.v3) - # Can't simulate a model with multiple RHS for a derivative + # Can't simulate a model with multiple RHS for a derivative def _diffeq(m, t, s): - return m.dv2[t, s] == m.v2[t, s]**2 + m.v2[t, s] + return m.dv2[t, s] == m.v2[t, s] ** 2 + m.v2[t, s] + m.con1 = Constraint(m.t, m.s, rule=_diffeq) m.con2 = Constraint(m.t, m.s, rule=_diffeq) with self.assertRaises(DAE_Error): @@ -777,7 +814,8 @@ def _diffeq(m, t, s): m.del_component('con2_index') def _diffeq(m, s, t): - return m.dv3[s, t] == m.v3[s, t]**2 + m.v3[s, t] + return m.dv3[s, t] == m.v3[s, t] ** 2 + m.v3[s, t] + m.con1 = Constraint(m.s, m.t, rule=_diffeq) m.con2 = Constraint(m.s, m.t, rule=_diffeq) with self.assertRaises(DAE_Error): @@ -790,7 +828,8 @@ def _diffeq(m, s, t): # Can't simulate a model with multiple derivatives in an # equation def _diffeq(m, t, s): - return m.dv2[t, s] == m.dv2[t, s] + m.v2[t, s]**2 + return m.dv2[t, s] == m.dv2[t, s] + m.v2[t, s] ** 2 + m.con1 = Constraint(m.t, m.s, rule=_diffeq) with self.assertRaises(DAE_Error): Simulator(m) @@ -798,7 +837,8 @@ def _diffeq(m, t, s): m.del_component('con1_index') def _diffeq(m, s, t): - return m.dv3[s, t] == m.dv3[s, t] + m.v3[s, t]**2 + return m.dv3[s, t] == m.dv3[s, t] + m.v3[s, t] ** 2 + m.con1 = Constraint(m.s, m.t, rule=_diffeq) with self.assertRaises(DAE_Error): Simulator(m) @@ -807,12 +847,12 @@ def _diffeq(m, s, t): # Testing the Simulator using scipy on unsupported models def test_scipy_unsupported(self): - m = self.m m.a = Var(m.t) def _diffeq(m, t): - return 0 == m.v[t]**2 + m.a[t] + return 0 == m.v[t] ** 2 + m.a[t] + m.con = Constraint(m.t, rule=_diffeq) # Can't simulate a model with algebraic equations using scipy @@ -824,12 +864,12 @@ def _diffeq(m, t): # variables @unittest.skipIf(not scipy_available, "Scipy is not available") def test_time_indexed_algebraic(self): - m = self.m m.a = Var(m.t) def _diffeq(m, t): - return m.dv[t] == m.v[t]**2 + m.a[t] + return m.dv[t] == m.v[t] ** 2 + m.a[t] + m.con = Constraint(m.t, rule=_diffeq) mysim = Simulator(m) @@ -844,7 +884,6 @@ def _diffeq(m, t): # indexed by time and other indexing sets @unittest.skipIf(not scipy_available, "Scipy is not available") def test_time_multi_indexed_algebraic(self): - m = self.m m.v2 = Var(m.t, m.s) m.v3 = Var(m.s, m.t) @@ -854,13 +893,15 @@ def test_time_multi_indexed_algebraic(self): m.a2 = Var(m.t, m.s) def _diffeq(m, t, s): - return m.dv2[t, s] == m.v2[t, s]**2 + m.a2[t, s] + return m.dv2[t, s] == m.v2[t, s] ** 2 + m.a2[t, s] + m.con = Constraint(m.t, m.s, rule=_diffeq) m.a3 = Var(m.s, m.t) def _diffeq2(m, s, t): - return m.dv3[s, t] == m.v3[s, t]**2 + m.a3[s, t] + return m.dv3[s, t] == m.v3[s, t] ** 2 + m.a3[s, t] + m.con2 = Constraint(m.s, m.t, rule=_diffeq2) mysim = Simulator(m) t = IndexTemplate(m.t) @@ -879,7 +920,6 @@ def _diffeq2(m, s, t): # appearing in RHS of a differential equation @unittest.skipIf(not casadi_available, "casadi not available") def test_nonRHS_vars(self): - m = self.m m.v2 = Var(m.t) m.dv2 = DerivativeVar(m.v2) @@ -888,17 +928,20 @@ def test_nonRHS_vars(self): def _con(m, t): return m.dv2[t] == 10 + m.p + m.con = Constraint(m.t, rule=_con) - mysim = Simulator(m,package='casadi') + mysim = Simulator(m, package='casadi') self.assertEqual(len(mysim._templatemap), 1) self.assertEqual(mysim._diffvars[0], _GetItemIndexer(m.v2[t])) m.del_component('con') + class TestExpressionCheckers(unittest.TestCase): """ Class for testing the pyomo.DAE simulator expression checkers. """ + def setUp(self): """ Setting up testing model @@ -910,7 +953,6 @@ def setUp(self): # Testing checker for GetItemExpression objects def test_check_getitemexpression(self): - m = self.m t = IndexTemplate(m.t) @@ -974,14 +1016,14 @@ def test_check_productexpression(self): temp = _check_productexpression(e, 0) self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) - self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal + self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal self.assertIs(e.arg(1), temp[1].arg(0)) e = m.v[t] == m.mp * m.dv[t] temp = _check_productexpression(e, 1) self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) - self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal + self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal self.assertIs(e.arg(0), temp[1].arg(0)) # Check multiplication by var @@ -990,7 +1032,7 @@ def test_check_productexpression(self): self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) self.assertIs(e.arg(1), temp[1].arg(0).arg(0)) - self.assertIs(m.z, temp[1].arg(0).arg(1)) + self.assertIs(m.z, temp[1].arg(0).arg(1)) e = m.v[t] == m.y * m.dv[t] / m.z temp = _check_productexpression(e, 1) @@ -1004,16 +1046,16 @@ def test_check_productexpression(self): temp = _check_productexpression(e, 0) self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) - self.assertIs(m.y, temp[1].arg(0)) + self.assertIs(m.y, temp[1].arg(0)) self.assertIs(e.arg(1), temp[1].arg(1).arg(0)) e = m.mp == m.y / (m.dv[t] * m.z) temp = _check_productexpression(e, 1) self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) - self.assertIs(m.y, temp[1].arg(0)) + self.assertIs(m.y, temp[1].arg(0)) self.assertIs(e.arg(0), temp[1].arg(1).arg(0)) - + # Check expression with no DerivativeVar e = m.v[t] * m.y / m.z == m.v[t] * m.y / m.z temp = _check_productexpression(e, 0) @@ -1023,7 +1065,6 @@ def test_check_productexpression(self): # Testing the checker for NegationExpressions def test_check_negationexpression(self): - m = self.m t = IndexTemplate(m.t) @@ -1051,11 +1092,9 @@ def test_check_negationexpression(self): temp = _check_negationexpression(e, 1) self.assertIsNone(temp) - # Testing the checker for SumExpressions def test_check_viewsumexpression(self): - - m = self.m + m = self.m m.p = Param(initialize=5) m.mp = Param(initialize=5, mutable=True) m.y = Var() @@ -1066,7 +1105,7 @@ def test_check_viewsumexpression(self): temp = _check_viewsumexpression(e, 0) self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.SumExpression) - self.assertIs(type(temp[1].arg(0)), EXPR.GetItemExpression) + self.assertIs(type(temp[1].arg(0)), EXPR.Numeric_GetItemExpression) self.assertIs(type(temp[1].arg(1)), EXPR.MonomialTermExpression) self.assertEqual(-1, temp[1].arg(1).arg(0)) self.assertIs(m.y, temp[1].arg(1).arg(1)) @@ -1078,7 +1117,7 @@ def test_check_viewsumexpression(self): temp = _check_viewsumexpression(e, 1) self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.SumExpression) - self.assertIs(type(temp[1].arg(0)), EXPR.GetItemExpression) + self.assertIs(type(temp[1].arg(0)), EXPR.Numeric_GetItemExpression) self.assertIs(type(temp[1].arg(1)), EXPR.MonomialTermExpression) self.assertIs(m.y, temp[1].arg(1).arg(1)) self.assertIs(type(temp[1].arg(2)), EXPR.MonomialTermExpression) @@ -1089,7 +1128,7 @@ def test_check_viewsumexpression(self): self.assertIs(m.dv, temp[0].arg(0)) self.assertIs(type(temp[1]), EXPR.DivisionExpression) - self.assertIs(type(temp[1].arg(0).arg(0)), EXPR.GetItemExpression) + self.assertIs(type(temp[1].arg(0).arg(0)), EXPR.Numeric_GetItemExpression) self.assertIs(m.y, temp[1].arg(0).arg(1).arg(1)) self.assertIs(m.z, temp[1].arg(0).arg(2).arg(1)) @@ -1097,6 +1136,7 @@ def test_check_viewsumexpression(self): temp = _check_viewsumexpression(e, 0) self.assertIs(temp, None) + @unittest.skipIf(not casadi_available, "Casadi is not available") class TestCasadiSubstituters(unittest.TestCase): """ @@ -1116,7 +1156,6 @@ def setUp(self): # Testing substituter for replacing GetItemExpression objects with # CasADi sym objects def test_substitute_casadi_sym(self): - m = self.m m.y = Var() t = IndexTemplate(m.t) @@ -1136,23 +1175,21 @@ def test_substitute_casadi_sym(self): # Testing substituter for replacing Pyomo intrinsic functions with # CasADi intrinsic functions def test_substitute_casadi_intrinsic1(self): - m = self.m m.y = Var() t = IndexTemplate(m.t) - e = m.v[t] + e = m.v[t] templatemap = {} e3 = substitute_pyomo2casadi(e, templatemap) self.assertIs(type(e3), casadi.SX) - + m.del_component('y') # Testing substituter for replacing Pyomo intrinsic functions with # CasADi intrinsic functions def test_substitute_casadi_intrinsic2(self): - m = self.m m.y = Var() t = IndexTemplate(m.t) @@ -1170,12 +1207,11 @@ def test_substitute_casadi_intrinsic2(self): # Testing substituter for replacing Pyomo intrinsic functions with # CasADi intrinsic functions def test_substitute_casadi_intrinsic3(self): - m = self.m m.y = Var() t = IndexTemplate(m.t) - e = sin(m.dv[t] + m.v[t]) + log(m.v[t] * m.y + m.dv[t]**2) + e = sin(m.dv[t] + m.v[t]) + log(m.v[t] * m.y + m.dv[t] ** 2) templatemap = {} e3 = substitute_pyomo2casadi(e, templatemap) @@ -1187,7 +1223,6 @@ def test_substitute_casadi_intrinsic3(self): # Testing substituter for replacing Pyomo intrinsic functions with # CasADi intrinsic functions def test_substitute_casadi_intrinsic4(self): - m = self.m m.y = Var() t = IndexTemplate(m.t) @@ -1203,7 +1238,7 @@ def test_substitute_casadi_intrinsic4(self): m.del_component('y') -class TestSimulationInterface(): +class TestSimulationInterface: """ Class to test running a simulation """ @@ -1220,7 +1255,6 @@ def _store_results(self, model, profiles): return results def _test(self, tname): - bfile = join(currdir, tname + '.' + self.sim_mod + '.json') # create model @@ -1231,8 +1265,7 @@ def _test(self, tname): sim = Simulator(m, package=self.sim_mod) if hasattr(m, 'var_input'): - tsim, profiles = sim.simulate(numpoints=100, - varying_inputs=m.var_input) + tsim, profiles = sim.simulate(numpoints=100, varying_inputs=m.var_input) else: tsim, profiles = sim.simulate(numpoints=100) @@ -1256,7 +1289,6 @@ def _test(self, tname): self.assertStructuredAlmostEqual(results, baseline, abstol=1e-2) def _test_disc_first(self, tname): - bfile = join(currdir, tname + '.' + self.sim_mod + '.json') # create model @@ -1271,8 +1303,7 @@ def _test_disc_first(self, tname): sim = Simulator(m, package=self.sim_mod) if hasattr(m, 'var_input'): - tsim, profiles = sim.simulate(numpoints=100, - varying_inputs=m.var_input) + tsim, profiles = sim.simulate(numpoints=100, varying_inputs=m.var_input) else: tsim, profiles = sim.simulate(numpoints=100) @@ -1351,5 +1382,4 @@ def test_dae_multindex_example2(self): if __name__ == "__main__": - unittest.main() diff --git a/pyomo/dae/utilities.py b/pyomo/dae/utilities.py index fc0555ac93b..e48c66e003d 100644 --- a/pyomo/dae/utilities.py +++ b/pyomo/dae/utilities.py @@ -9,597 +9,1374 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# If a user doesn't have numpy the needed collocation values can be read from +# If a user doesn't have numpy the needed collocation values can be read from # the following dictionaries for up to 10 collocation points. radau_tau_dict = dict() radau_adot_dict = dict() radau_adotdot_dict = dict() -radau_tau_dict[1] = [0.00000000000000000, 1.00000000000000000, ] -radau_adot_dict[1] = [[-1.00000000000000000, -1.00000000000000000, ], - [1.00000000000000000, 1.00000000000000000, ]] -radau_adotdot_dict[1] = [[0.00000000000000000, 0.00000000000000000, ], - [0.00000000000000000, 0.00000000000000000, ]] +radau_tau_dict[1] = [0.00000000000000000, 1.00000000000000000] +radau_adot_dict[1] = [ + [-1.00000000000000000, -1.00000000000000000], + [1.00000000000000000, 1.00000000000000000], +] +radau_adotdot_dict[1] = [ + [0.00000000000000000, 0.00000000000000000], + [0.00000000000000000, 0.00000000000000000], +] -radau_tau_dict[2] = [0.00000000000000000, 0.33333333333333331, - 1.00000000000000000, ] -radau_adot_dict[2] = [[-4.00000000000000000, -2.00000000000000000, - 2.00000000000000000, ], - [4.49999999999999911, 1.50000000000000000, - -4.49999999999999911, ], - [-0.49999999999999994, 0.49999999999999983, - 2.49999999999999956, ]] -radau_adotdot_dict[2] = [[6.00000000000000000, 6.00000000000000000, - 6.00000000000000000, ], - [-8.99999999999999822, -8.99999999999999822, - -8.99999999999999822, ], - [2.99999999999999956, 2.99999999999999956, - 2.99999999999999956, ]] +radau_tau_dict[2] = [0.00000000000000000, 0.33333333333333331, 1.00000000000000000] +radau_adot_dict[2] = [ + [-4.00000000000000000, -2.00000000000000000, 2.00000000000000000], + [4.49999999999999911, 1.50000000000000000, -4.49999999999999911], + [-0.49999999999999994, 0.49999999999999983, 2.49999999999999956], +] +radau_adotdot_dict[2] = [ + [6.00000000000000000, 6.00000000000000000, 6.00000000000000000], + [-8.99999999999999822, -8.99999999999999822, -8.99999999999999822], + [2.99999999999999956, 2.99999999999999956, 2.99999999999999956], +] -radau_tau_dict[3] = [0.00000000000000000, 0.15505102572168220, - 0.64494897427831788, 1.00000000000000000, ] -radau_adot_dict[3] = [[-9.00000000000000000, -4.13938769133981310, - 1.73938769133981630, -2.99999999999999289, ], - [10.04880939982741417, 3.22474487139158850, - -3.56784008469040437, 5.53197264742180472, ], - [-1.38214273316074876, 1.16784008469040512, - 0.77525512860840951, -7.53197264742180739, ], - [0.33333333333333343, -0.25319726474218079, - 1.05319726474218167, 5.00000000000000089, ]] -radau_adotdot_dict[3] = [[36.00000000000000000, 26.69693845669907262, - -2.69693845669906551, -23.99999999999998579, ], - [-51.25918289415326967, -36.76428546632148908, - 9.03367350481121889, 42.22550938934205078, ], - [20.59251622748660893, 12.29965982852211681, - -13.90238120034517166, -32.89217605600872218, ], - [-5.33333333333333393, -2.23231281889968969, - 7.56564615223302539, 14.66666666666666963, ]] +radau_tau_dict[3] = [ + 0.00000000000000000, + 0.15505102572168220, + 0.64494897427831788, + 1.00000000000000000, +] +radau_adot_dict[3] = [ + [ + -9.00000000000000000, + -4.13938769133981310, + 1.73938769133981630, + -2.99999999999999289, + ], + [ + 10.04880939982741417, + 3.22474487139158850, + -3.56784008469040437, + 5.53197264742180472, + ], + [ + -1.38214273316074876, + 1.16784008469040512, + 0.77525512860840951, + -7.53197264742180739, + ], + [ + 0.33333333333333343, + -0.25319726474218079, + 1.05319726474218167, + 5.00000000000000089, + ], +] +radau_adotdot_dict[3] = [ + [ + 36.00000000000000000, + 26.69693845669907262, + -2.69693845669906551, + -23.99999999999998579, + ], + [ + -51.25918289415326967, + -36.76428546632148908, + 9.03367350481121889, + 42.22550938934205078, + ], + [ + 20.59251622748660893, + 12.29965982852211681, + -13.90238120034517166, + -32.89217605600872218, + ], + [ + -5.33333333333333393, + -2.23231281889968969, + 7.56564615223302539, + 14.66666666666666963, + ], +] -radau_tau_dict[4] = [0.00000000000000000, 0.08858795951270398, - 0.40946686444073388, 0.78765946176084856, - 1.00000000000000000, ] -radau_adot_dict[4] = [[-16.00000000000000000, -7.15559202347522572, - 2.50822508194848126, -1.96487795643240304, - 4.00000000000000000, ], - [17.80758523451450515, 5.64410787595007868, - -5.04921463839143314, 3.49246615862542953, - -6.92348825644540966, ], - [-2.37791303706810631, 1.92350727705471236, - 1.22110002889472469, -3.98451789578249116, - 6.59523766962805347, ], - [0.82032780255359539, -0.58590148210381310, - 1.75468098876082568, 0.63479209515516966, - -12.17174941318265979, ], - [-0.25000000000000150, 0.17387835257424678, - -0.43479146121258294, 1.82213759843428735, - 8.50000000000003375, ]] -radau_adotdot_dict[4] = [[120.00000000000002842, 80.77386659356440646, - -6.12558744008319422, 2.49457798937631026, - 59.99999999999991473, ], - [-167.81099920936088665, -108.53014807707648970, - 19.13996123354024803, -5.55774820017956017, - -102.50641734094637059, ], - [64.47853847308765296, 33.70195692546346322, - -24.04456776658558681, 16.01268812735257541, - 89.78248626844741409, ], - [-24.16753926372676275, -8.29000996475936880, - 13.56458045001081913, -32.42528415633811534, - -92.27606892750115719, ], - [7.50000000000004352, 2.34433452280806431, - -2.53438647688229590, 19.47576623978871879, - 45.00000000000021316, ]] +radau_tau_dict[4] = [ + 0.00000000000000000, + 0.08858795951270398, + 0.40946686444073388, + 0.78765946176084856, + 1.00000000000000000, +] +radau_adot_dict[4] = [ + [ + -16.00000000000000000, + -7.15559202347522572, + 2.50822508194848126, + -1.96487795643240304, + 4.00000000000000000, + ], + [ + 17.80758523451450515, + 5.64410787595007868, + -5.04921463839143314, + 3.49246615862542953, + -6.92348825644540966, + ], + [ + -2.37791303706810631, + 1.92350727705471236, + 1.22110002889472469, + -3.98451789578249116, + 6.59523766962805347, + ], + [ + 0.82032780255359539, + -0.58590148210381310, + 1.75468098876082568, + 0.63479209515516966, + -12.17174941318265979, + ], + [ + -0.25000000000000150, + 0.17387835257424678, + -0.43479146121258294, + 1.82213759843428735, + 8.50000000000003375, + ], +] +radau_adotdot_dict[4] = [ + [ + 120.00000000000002842, + 80.77386659356440646, + -6.12558744008319422, + 2.49457798937631026, + 59.99999999999991473, + ], + [ + -167.81099920936088665, + -108.53014807707648970, + 19.13996123354024803, + -5.55774820017956017, + -102.50641734094637059, + ], + [ + 64.47853847308765296, + 33.70195692546346322, + -24.04456776658558681, + 16.01268812735257541, + 89.78248626844741409, + ], + [ + -24.16753926372676275, + -8.29000996475936880, + 13.56458045001081913, + -32.42528415633811534, + -92.27606892750115719, + ], + [ + 7.50000000000004352, + 2.34433452280806431, + -2.53438647688229590, + 19.47576623978871879, + 45.00000000000021316, + ], +] -radau_tau_dict[5] = [0.00000000000000000, 0.05710419611451772, - 0.27684301363812430, 0.58359043236891450, - 0.86024013565621971, 1.00000000000000000, ] -radau_adot_dict[5] = [[-24.99999999999998934, -11.03867924120895161, - 3.58306852250104768, -2.34417155790386644, - 2.28263550020568218, -4.99999999999998934, ], - [27.78093394406463190, 8.75592397793835531, - -7.16138072014532057, 4.12216524624339797, - -3.87866321972407846, 8.41242422359434627, ], - [-3.64147849804922608, 2.89194261538012576, - 1.80607772408358258, -4.49601712581350110, - 3.39315191806494454, -6.97025611665680067, ], - [1.25254772116912538, -0.87518639620026994, - 2.36379717606862361, 0.85676524539728360, - -5.18834090640715306, 8.77711420415049659, ], - [-0.59200316718453727, 0.39970520793996167, - -0.86590078028312090, 2.51832094921101479, - 0.58123305258075575, -18.21928231108803686, ], - [0.19999999999999910, -0.13370616384921521, - 0.27433807777519198, -0.65706275713435502, - 2.80998365527971972, 12.99999999999998046, ]] -radau_adotdot_dict[5] = [[299.99999999999988631, 193.30767250574382388, - -12.94260048470914626, 4.01680944012088048, - -2.65348639942789077, -120.00000000000011369, ], - [-416.05571460180254917, -256.99169401863719031, - 39.31282295335989829, -8.59570089776997293, - 5.14998177149959702, 200.87922525110633387, ], - [155.76675211023618317, 76.96489565450156078, - -45.97406446951362113, 21.61006933401930041, - -7.68796759900843085, -161.94936171559132276, ], - [-58.33482863565966170, -18.65076273063174028, - 23.95041306691950922, -35.27049804403287681, - 31.47710816272410739, 186.04880918852478544, ], - [28.22379112722612859, 7.99493810747577882, - -6.09624628722275475, 22.52106574808429329, - -69.76374346781608438, -212.97867272403951233, ], - [-9.59999999999995346, -2.62504951845222578, - 1.74967522116616792, -4.28174558042124787, - 43.47810753202831080, 107.99999999999987210, ]] +radau_tau_dict[5] = [ + 0.00000000000000000, + 0.05710419611451772, + 0.27684301363812430, + 0.58359043236891450, + 0.86024013565621971, + 1.00000000000000000, +] +radau_adot_dict[5] = [ + [ + -24.99999999999998934, + -11.03867924120895161, + 3.58306852250104768, + -2.34417155790386644, + 2.28263550020568218, + -4.99999999999998934, + ], + [ + 27.78093394406463190, + 8.75592397793835531, + -7.16138072014532057, + 4.12216524624339797, + -3.87866321972407846, + 8.41242422359434627, + ], + [ + -3.64147849804922608, + 2.89194261538012576, + 1.80607772408358258, + -4.49601712581350110, + 3.39315191806494454, + -6.97025611665680067, + ], + [ + 1.25254772116912538, + -0.87518639620026994, + 2.36379717606862361, + 0.85676524539728360, + -5.18834090640715306, + 8.77711420415049659, + ], + [ + -0.59200316718453727, + 0.39970520793996167, + -0.86590078028312090, + 2.51832094921101479, + 0.58123305258075575, + -18.21928231108803686, + ], + [ + 0.19999999999999910, + -0.13370616384921521, + 0.27433807777519198, + -0.65706275713435502, + 2.80998365527971972, + 12.99999999999998046, + ], +] +radau_adotdot_dict[5] = [ + [ + 299.99999999999988631, + 193.30767250574382388, + -12.94260048470914626, + 4.01680944012088048, + -2.65348639942789077, + -120.00000000000011369, + ], + [ + -416.05571460180254917, + -256.99169401863719031, + 39.31282295335989829, + -8.59570089776997293, + 5.14998177149959702, + 200.87922525110633387, + ], + [ + 155.76675211023618317, + 76.96489565450156078, + -45.97406446951362113, + 21.61006933401930041, + -7.68796759900843085, + -161.94936171559132276, + ], + [ + -58.33482863565966170, + -18.65076273063174028, + 23.95041306691950922, + -35.27049804403287681, + 31.47710816272410739, + 186.04880918852478544, + ], + [ + 28.22379112722612859, + 7.99493810747577882, + -6.09624628722275475, + 22.52106574808429329, + -69.76374346781608438, + -212.97867272403951233, + ], + [ + -9.59999999999995346, + -2.62504951845222578, + 1.74967522116616792, + -4.28174558042124787, + 43.47810753202831080, + 107.99999999999987210, + ], +] -radau_tau_dict[6] = [0.00000000000000000, 0.03980985705146874, - 0.19801341787360807, 0.43797481024738633, - 0.69546427335363914, 0.90146491420116948, - 1.00000000000000000, ] -radau_adot_dict[6] = [[-36.00000000000000711, -15.78653932217885725, - 4.92210694074628208, -2.96075238465032697, - 2.43407205775748992, -2.63456859759603645, - 6.00000000000010658, ], - [39.96978645997430846, 12.55970347629152073, - -9.80283871256804673, 5.18215783855881540, - -4.10823909345830174, 4.38578506340130758, - -9.94297742192795653, ], - [-5.18155025366218513, 4.07582598887629377, - 2.52508140796374825, -5.54452290952744065, - 3.49150290911372618, -3.46400504745112281, - 7.67606215723310026, ], - [1.76075790144825750, -1.21720380846425580, - 3.13222586264732650, 1.14161816684750228, - -5.06987875099333429, 3.95154245658764758, - -8.23273712821850268, ], - [-0.84940911985935841, 0.56623186694414962, - -1.15740999277439549, 2.97497625382130559, - 0.71894419189765457, -6.81053943889139468, - 11.63870727736906652, ], - [0.46708167876565621, -0.30710421225684736, - 0.58338219245412648, -1.17801932705885992, - 3.46004179608641227, 0.55465275700018646, - -25.63905488445393033, ], - [-0.16666666666666141, 0.10908601078800384, - -0.20254769846904672, 0.38454236200916614, - -0.92644311040306937, 4.01713280695099240, - 18.49999999999969447, ]] -radau_adotdot_dict[6] = [[630.00000000000011369, 396.54850560676482019, - -24.85744144817510914, 6.76009741970790401, - -3.49992393717195682, 2.92254147231687966, - 210.00000000000011369, ], - [-869.78996132627571569, -524.25860998479038244, - 74.42097200503803833, -14.19811474379628180, - 6.62453168299316530, -5.31473351050760812, - -347.17973033968110030, ], - [320.73627342386896544, 153.90868125004695344, - -84.06617913255860230, 33.55233528423758571, - -9.01718779800921766, 6.00595681392809411, - 264.87167990774685222, ], - [-118.73411607342019636, -36.68950585393311314, - 41.92433232904805607, -50.48791488764605617, - 32.08939067396651978, -12.66778060664290706, - -275.31458997144886780, ], - [58.71474561680227566, 15.95063527339483755, - -10.49847307491926074, 29.90013111545132318, - -57.34807867927669633, 58.56656747411289388, - 354.19642620195918425, ], - [-32.59360830764193651, -8.42709970780833828, - 4.60480310763264100, -7.77295210269013381, - 38.56768819963112804, -135.50588398238815557, - -428.24045246523792230, ], - [11.66666666666629837, 2.96739341632510722, - -1.52801378606603855, 2.24641791473642805, - -7.41642014213210032, 85.99333233918015651, - 221.66666666666105812, ]] +radau_tau_dict[6] = [ + 0.00000000000000000, + 0.03980985705146874, + 0.19801341787360807, + 0.43797481024738633, + 0.69546427335363914, + 0.90146491420116948, + 1.00000000000000000, +] +radau_adot_dict[6] = [ + [ + -36.00000000000000711, + -15.78653932217885725, + 4.92210694074628208, + -2.96075238465032697, + 2.43407205775748992, + -2.63456859759603645, + 6.00000000000010658, + ], + [ + 39.96978645997430846, + 12.55970347629152073, + -9.80283871256804673, + 5.18215783855881540, + -4.10823909345830174, + 4.38578506340130758, + -9.94297742192795653, + ], + [ + -5.18155025366218513, + 4.07582598887629377, + 2.52508140796374825, + -5.54452290952744065, + 3.49150290911372618, + -3.46400504745112281, + 7.67606215723310026, + ], + [ + 1.76075790144825750, + -1.21720380846425580, + 3.13222586264732650, + 1.14161816684750228, + -5.06987875099333429, + 3.95154245658764758, + -8.23273712821850268, + ], + [ + -0.84940911985935841, + 0.56623186694414962, + -1.15740999277439549, + 2.97497625382130559, + 0.71894419189765457, + -6.81053943889139468, + 11.63870727736906652, + ], + [ + 0.46708167876565621, + -0.30710421225684736, + 0.58338219245412648, + -1.17801932705885992, + 3.46004179608641227, + 0.55465275700018646, + -25.63905488445393033, + ], + [ + -0.16666666666666141, + 0.10908601078800384, + -0.20254769846904672, + 0.38454236200916614, + -0.92644311040306937, + 4.01713280695099240, + 18.49999999999969447, + ], +] +radau_adotdot_dict[6] = [ + [ + 630.00000000000011369, + 396.54850560676482019, + -24.85744144817510914, + 6.76009741970790401, + -3.49992393717195682, + 2.92254147231687966, + 210.00000000000011369, + ], + [ + -869.78996132627571569, + -524.25860998479038244, + 74.42097200503803833, + -14.19811474379628180, + 6.62453168299316530, + -5.31473351050760812, + -347.17973033968110030, + ], + [ + 320.73627342386896544, + 153.90868125004695344, + -84.06617913255860230, + 33.55233528423758571, + -9.01718779800921766, + 6.00595681392809411, + 264.87167990774685222, + ], + [ + -118.73411607342019636, + -36.68950585393311314, + 41.92433232904805607, + -50.48791488764605617, + 32.08939067396651978, + -12.66778060664290706, + -275.31458997144886780, + ], + [ + 58.71474561680227566, + 15.95063527339483755, + -10.49847307491926074, + 29.90013111545132318, + -57.34807867927669633, + 58.56656747411289388, + 354.19642620195918425, + ], + [ + -32.59360830764193651, + -8.42709970780833828, + 4.60480310763264100, + -7.77295210269013381, + 38.56768819963112804, + -135.50588398238815557, + -428.24045246523792230, + ], + [ + 11.66666666666629837, + 2.96739341632510722, + -1.52801378606603855, + 2.24641791473642805, + -7.41642014213210032, + 85.99333233918015651, + 221.66666666666105812, + ], +] -radau_tau_dict[7] = [0.00000000000000000, 0.02931642715978489, - 0.14807859966848425, 0.33698469028114536, - 0.55867151877159871, 0.76923386202996824, - 0.92694567131978900, 1.00000000000000000, ] -radau_adot_dict[7] = [[-49.00000000000001421, -21.39849085856454991, - 6.51502179852680996, -3.73823715601904638, - 2.82962981889280485, -2.61247344089161970, - 3.00318659221646556, -6.99999999999705835, ], - [54.37443689412862113, 17.05528430442161181, - -12.94898869881147618, 6.52679743370207177, - -4.76041564316764010, 4.32945101663698750, - -4.94362383351764834, 11.49545520510287133, ], - [-7.00002400425923099, 5.47529951218552391, - 3.37658514545231903, -6.91230492548291586, - 3.99086031809394370, -3.35352800168034548, - 3.70480267602441060, -8.51707242305759848, ], - [2.35566109198741191, -1.61858110519068976, - 4.05401350392539683, 1.48374693100539568, - -5.66068833365461455, 3.69061793186311915, - -3.74572843137643474, 8.38103130196178014, ], - [-1.13228906610625679, 0.74965412823858335, - -1.48631397600665593, 3.59460335445588974, - 0.89498029378042587, -6.03730918727312726, - 4.78166562576183818, -10.03344165195551696, ], - [0.64689132676740668, -0.42189137598304860, - 0.77285447377897387, -1.45021560122282223, - 3.73589939150200845, 0.64999738660694650, - -8.78338875592014467, 15.09439394298104631, ], - [-0.38753338537515553, 0.25105021424626478, - -0.44494694720083683, 0.76703844918100050, - -1.54197850254845936, 4.57730094140838428, - 0.53940593872494835, -34.42036637506053154, ], - [0.14285714285719683, -0.09232481935371889, - 0.16177470033544300, -0.27142848561999122, - 0.51171265709985936, -1.24405664667738725, - 5.44368018806711085, 25.00000000000752820, ]] -radau_adotdot_dict[7] = [[1176.00000000000113687, 729.91469055677328015, - -43.99705165440991550, 11.09319581520685460, - -5.06492585326554945, 3.39620181819555000, - -3.23987336608297483, -335.99999999998067324, ], - [-1619.20889513581641950, -961.80954755690038382, - 130.61915202258592217, -23.05926748673505244, - 9.46476140435538582, -6.07426097472375659, - 5.68160564288905334, 551.08748211418060237, ], - [591.45764413503638934, 278.97169877665686499, - -144.67589881629601223, 52.67022396165839382, - -12.29601631010507390, 6.43814372663587164, - -5.51652673198668708, -405.85864880032022484, ], - [-216.87396735187854802, -65.73232123018885886, - 70.29838369672336285, -76.03933271744909916, - 40.93682565375758031, -12.27856070330389571, - 8.65728797388706539, 393.77000318215408470, ], - [106.91082287427600761, 28.40345847945117441, - -17.27717360830020255, 43.09652520936204212, - -67.31347956551157097, 49.49615347850028257, - -20.80944990243477832, -456.20281388413985724, ], - [-61.71343933596624254, -15.53132892474618387, - 7.70766003923121445, -11.01359672248797494, - 42.17208432901723825, -92.57530312896015801, - 101.90967791837387324, 623.89986304565343289, ], - [37.14212052863845770, 9.12282838083766023, - -4.14735163396773032, 4.87648419403544153, - -11.13416017545824133, 63.99686169444935047, - -241.58643821502599280, -778.69588565766764532, ], - [-13.71428571429089871, -3.33947848188361718, - 1.47227995443322612, -1.62423225359393264, - 3.23491051720515799, -12.39923591080969345, - 154.90371668037039399, 408.00000000014506440, ]] +radau_tau_dict[7] = [ + 0.00000000000000000, + 0.02931642715978489, + 0.14807859966848425, + 0.33698469028114536, + 0.55867151877159871, + 0.76923386202996824, + 0.92694567131978900, + 1.00000000000000000, +] +radau_adot_dict[7] = [ + [ + -49.00000000000001421, + -21.39849085856454991, + 6.51502179852680996, + -3.73823715601904638, + 2.82962981889280485, + -2.61247344089161970, + 3.00318659221646556, + -6.99999999999705835, + ], + [ + 54.37443689412862113, + 17.05528430442161181, + -12.94898869881147618, + 6.52679743370207177, + -4.76041564316764010, + 4.32945101663698750, + -4.94362383351764834, + 11.49545520510287133, + ], + [ + -7.00002400425923099, + 5.47529951218552391, + 3.37658514545231903, + -6.91230492548291586, + 3.99086031809394370, + -3.35352800168034548, + 3.70480267602441060, + -8.51707242305759848, + ], + [ + 2.35566109198741191, + -1.61858110519068976, + 4.05401350392539683, + 1.48374693100539568, + -5.66068833365461455, + 3.69061793186311915, + -3.74572843137643474, + 8.38103130196178014, + ], + [ + -1.13228906610625679, + 0.74965412823858335, + -1.48631397600665593, + 3.59460335445588974, + 0.89498029378042587, + -6.03730918727312726, + 4.78166562576183818, + -10.03344165195551696, + ], + [ + 0.64689132676740668, + -0.42189137598304860, + 0.77285447377897387, + -1.45021560122282223, + 3.73589939150200845, + 0.64999738660694650, + -8.78338875592014467, + 15.09439394298104631, + ], + [ + -0.38753338537515553, + 0.25105021424626478, + -0.44494694720083683, + 0.76703844918100050, + -1.54197850254845936, + 4.57730094140838428, + 0.53940593872494835, + -34.42036637506053154, + ], + [ + 0.14285714285719683, + -0.09232481935371889, + 0.16177470033544300, + -0.27142848561999122, + 0.51171265709985936, + -1.24405664667738725, + 5.44368018806711085, + 25.00000000000752820, + ], +] +radau_adotdot_dict[7] = [ + [ + 1176.00000000000113687, + 729.91469055677328015, + -43.99705165440991550, + 11.09319581520685460, + -5.06492585326554945, + 3.39620181819555000, + -3.23987336608297483, + -335.99999999998067324, + ], + [ + -1619.20889513581641950, + -961.80954755690038382, + 130.61915202258592217, + -23.05926748673505244, + 9.46476140435538582, + -6.07426097472375659, + 5.68160564288905334, + 551.08748211418060237, + ], + [ + 591.45764413503638934, + 278.97169877665686499, + -144.67589881629601223, + 52.67022396165839382, + -12.29601631010507390, + 6.43814372663587164, + -5.51652673198668708, + -405.85864880032022484, + ], + [ + -216.87396735187854802, + -65.73232123018885886, + 70.29838369672336285, + -76.03933271744909916, + 40.93682565375758031, + -12.27856070330389571, + 8.65728797388706539, + 393.77000318215408470, + ], + [ + 106.91082287427600761, + 28.40345847945117441, + -17.27717360830020255, + 43.09652520936204212, + -67.31347956551157097, + 49.49615347850028257, + -20.80944990243477832, + -456.20281388413985724, + ], + [ + -61.71343933596624254, + -15.53132892474618387, + 7.70766003923121445, + -11.01359672248797494, + 42.17208432901723825, + -92.57530312896015801, + 101.90967791837387324, + 623.89986304565343289, + ], + [ + 37.14212052863845770, + 9.12282838083766023, + -4.14735163396773032, + 4.87648419403544153, + -11.13416017545824133, + 63.99686169444935047, + -241.58643821502599280, + -778.69588565766764532, + ], + [ + -13.71428571429089871, + -3.33947848188361718, + 1.47227995443322612, + -1.62423225359393264, + 3.23491051720515799, + -12.39923591080969345, + 154.90371668037039399, + 408.00000000014506440, + ], +] -radau_tau_dict[8] = [0.00000000000000000, 0.02247938643871247, - 0.11467905316090524, 0.26578982278458346, - 0.45284637366946501, 0.64737528288675206, - 0.81975930826325683, 0.94373743946299427, - 1.00000000000000000, ] -radau_adot_dict[8] = [[-64.00000000000002842, -27.87425774413988933, - 8.35812744119628803, -4.65663103170724213, - 3.35840944912858674, -2.86447035203517686, - 2.83231625843346535, -3.38129885029598398, - 8.00000000000247269, ], - [70.99500301886784825, 22.24259996433606545, - -16.59113019706197178, 8.11823601782297288, - -5.63972363739064519, 4.73592709079161978, - -4.64760820860593071, 5.52797002411787730, - -13.06099604196067787, ], - [-9.09751570679000920, 7.09031167389262862, - 4.35999414207241820, -8.54518895924011623, - 4.69201955824859773, -3.63184859257696058, - 3.43555148904386343, -4.01565632541511075, - 9.42749172353134313, ], - [3.03984525984412146, -2.08073537915066398, - 5.12492476602846203, 1.88118564797551024, - -6.56903294216630407, 3.92643535657839049, - -3.38635187509532765, 3.80301840720573914, - -8.80358194947172734, ], - [-1.45199315226135184, 0.95733575045937824, - -1.86370389789729907, 4.35063566670475144, - 1.10412720311081358, -6.26803995680457149, - 4.16083546606599874, -4.27430073943198074, - 9.61382400774353130, ], - [0.83299700675733690, -0.54072898089916266, - 0.97031561191428606, -1.74911317938669364, - 4.21599357684380305, 0.77234953700976872, - -7.26324408226995466, 5.80646148379917015, - -12.23423422630618695, ], - [-0.52539452304389933, 0.33849233242014176, - -0.58549937027751453, 0.96226841393810580, - -1.78522686380385132, 4.63313895406601084, - 0.60993512965452401, -11.08565997531045966, - 19.11652897581404886, ], - [0.33205809662591163, -0.21314332000205732, - 0.36230408377735768, -0.57210967985645711, - 0.97087801789264661, -1.96084361169319576, - 5.86878601947365564, 0.52980837586298679, - -44.55903248937006822, ], - [-0.12499999999989404, 0.08012570308357210, - -0.13533257975200122, 0.21071710374945074, - -0.34744436186362104, 0.65735157466000993, - -1.61022019669692296, 7.08965759946875007, - 32.49999999997948663, ]] -radau_adotdot_dict[8] = [[2015.99999999999886313, 1239.99192861138817534, - -72.88277336463374922, 17.51997492950863489, - -7.41622246398424068, 4.42474470018237298, - -3.45505836901634211, 3.58288090366227152, - 503.99999999988972377, ], - [-2770.90657995308492900, -1630.48585774586695152, - 215.22120434682165069, -36.18768862506749429, - 13.75495994294169577, -7.84190921667504881, - 5.98918207791984969, -6.14338585945915838, - -822.24204069615143453, ], - [1005.82154971304328228, 469.21734459367257841, - -235.46920462750665592, 80.94817109223765783, - -17.38851951467177059, 8.02560787757533944, - -5.55420891566086539, 5.43221355039884202, - 591.48961950584555325, ], - [-366.22614016052142460, -109.66547335908660443, - 112.51932381135310379, -114.03856697909043305, - 55.72970078745805722, -14.51442473251881893, - 8.09486048192019325, -7.18946818613449068, - -548.25173186668189373, ], - [179.44238293686481711, 47.03619970306451137, - -27.27385345069615141, 62.88550229946103798, - -87.72454888081045965, 54.76103907909609347, - -17.60455379230006656, 12.88533658273627225, - 589.75733674210323443, ], - [-104.05015745498809565, -25.78505782347295394, - 12.10417617061391127, -15.74842237796789846, - 52.65566281481832789, -94.24763499920995002, - 75.40796505095357816, -33.03228119939271323, - -725.83568034391191759, ], - [65.96867264341469195, 15.90701699696889904, - -6.76634970978365402, 7.09449560455202288, - -13.67330039262928665, 60.91049804570101855, - -144.88017667908607677, 167.08595546179009261, - 1030.45215074421412282, ], - [-41.79972772471506914, -9.94444549812449807, - 4.03330080026613302, -3.84025861091543419, - 6.09951986279361336, -16.26166633443870069, - 101.83369347076927625, -402.15401008789632442, - -1312.36965408491005292, ], - [15.74999999998665778, 3.72834452145616169, - -1.48582397643374797, 1.36679266728232918, - -2.03725215592977626, 4.74374558035409954, - -19.83170332537023484, 259.53275883437237326, - 692.99999999946010121, ]] +radau_tau_dict[8] = [ + 0.00000000000000000, + 0.02247938643871247, + 0.11467905316090524, + 0.26578982278458346, + 0.45284637366946501, + 0.64737528288675206, + 0.81975930826325683, + 0.94373743946299427, + 1.00000000000000000, +] +radau_adot_dict[8] = [ + [ + -64.00000000000002842, + -27.87425774413988933, + 8.35812744119628803, + -4.65663103170724213, + 3.35840944912858674, + -2.86447035203517686, + 2.83231625843346535, + -3.38129885029598398, + 8.00000000000247269, + ], + [ + 70.99500301886784825, + 22.24259996433606545, + -16.59113019706197178, + 8.11823601782297288, + -5.63972363739064519, + 4.73592709079161978, + -4.64760820860593071, + 5.52797002411787730, + -13.06099604196067787, + ], + [ + -9.09751570679000920, + 7.09031167389262862, + 4.35999414207241820, + -8.54518895924011623, + 4.69201955824859773, + -3.63184859257696058, + 3.43555148904386343, + -4.01565632541511075, + 9.42749172353134313, + ], + [ + 3.03984525984412146, + -2.08073537915066398, + 5.12492476602846203, + 1.88118564797551024, + -6.56903294216630407, + 3.92643535657839049, + -3.38635187509532765, + 3.80301840720573914, + -8.80358194947172734, + ], + [ + -1.45199315226135184, + 0.95733575045937824, + -1.86370389789729907, + 4.35063566670475144, + 1.10412720311081358, + -6.26803995680457149, + 4.16083546606599874, + -4.27430073943198074, + 9.61382400774353130, + ], + [ + 0.83299700675733690, + -0.54072898089916266, + 0.97031561191428606, + -1.74911317938669364, + 4.21599357684380305, + 0.77234953700976872, + -7.26324408226995466, + 5.80646148379917015, + -12.23423422630618695, + ], + [ + -0.52539452304389933, + 0.33849233242014176, + -0.58549937027751453, + 0.96226841393810580, + -1.78522686380385132, + 4.63313895406601084, + 0.60993512965452401, + -11.08565997531045966, + 19.11652897581404886, + ], + [ + 0.33205809662591163, + -0.21314332000205732, + 0.36230408377735768, + -0.57210967985645711, + 0.97087801789264661, + -1.96084361169319576, + 5.86878601947365564, + 0.52980837586298679, + -44.55903248937006822, + ], + [ + -0.12499999999989404, + 0.08012570308357210, + -0.13533257975200122, + 0.21071710374945074, + -0.34744436186362104, + 0.65735157466000993, + -1.61022019669692296, + 7.08965759946875007, + 32.49999999997948663, + ], +] +radau_adotdot_dict[8] = [ + [ + 2015.99999999999886313, + 1239.99192861138817534, + -72.88277336463374922, + 17.51997492950863489, + -7.41622246398424068, + 4.42474470018237298, + -3.45505836901634211, + 3.58288090366227152, + 503.99999999988972377, + ], + [ + -2770.90657995308492900, + -1630.48585774586695152, + 215.22120434682165069, + -36.18768862506749429, + 13.75495994294169577, + -7.84190921667504881, + 5.98918207791984969, + -6.14338585945915838, + -822.24204069615143453, + ], + [ + 1005.82154971304328228, + 469.21734459367257841, + -235.46920462750665592, + 80.94817109223765783, + -17.38851951467177059, + 8.02560787757533944, + -5.55420891566086539, + 5.43221355039884202, + 591.48961950584555325, + ], + [ + -366.22614016052142460, + -109.66547335908660443, + 112.51932381135310379, + -114.03856697909043305, + 55.72970078745805722, + -14.51442473251881893, + 8.09486048192019325, + -7.18946818613449068, + -548.25173186668189373, + ], + [ + 179.44238293686481711, + 47.03619970306451137, + -27.27385345069615141, + 62.88550229946103798, + -87.72454888081045965, + 54.76103907909609347, + -17.60455379230006656, + 12.88533658273627225, + 589.75733674210323443, + ], + [ + -104.05015745498809565, + -25.78505782347295394, + 12.10417617061391127, + -15.74842237796789846, + 52.65566281481832789, + -94.24763499920995002, + 75.40796505095357816, + -33.03228119939271323, + -725.83568034391191759, + ], + [ + 65.96867264341469195, + 15.90701699696889904, + -6.76634970978365402, + 7.09449560455202288, + -13.67330039262928665, + 60.91049804570101855, + -144.88017667908607677, + 167.08595546179009261, + 1030.45215074421412282, + ], + [ + -41.79972772471506914, + -9.94444549812449807, + 4.03330080026613302, + -3.84025861091543419, + 6.09951986279361336, + -16.26166633443870069, + 101.83369347076927625, + -402.15401008789632442, + -1312.36965408491005292, + ], + [ + 15.74999999998665778, + 3.72834452145616169, + -1.48582397643374797, + 1.36679266728232918, + -2.03725215592977626, + 4.74374558035409954, + -19.83170332537023484, + 259.53275883437237326, + 692.99999999946010121, + ], +] -radau_tau_dict[9] = [0.00000000000000000, 0.01777991514736346, - 0.09132360789979158, 0.21430847939565514, - 0.37193216458318179, 0.54518668480366450, - 0.71317524285503242, 0.85563374295856065, - 0.95536604470969477, 1.00000000000000000, ] -radau_adot_dict[9] = [[-81.00000000000004263, -35.21371041699546822, - 10.44981407275295737, -5.70845918991450674, - 3.99044636662769392, -3.24550641903763903, - 2.97505006484350076, -3.07507805297768755, - 3.76536828826786518, -8.99999999981996268, ], - [89.83153970403780875, 28.12161902100757516, - -20.72548579052329387, 9.94239702329991815, - -6.69350725779439415, 5.35846675253802118, - -4.87323434555169399, 5.01573191942259200, - -6.12808267753871405, 14.63498313988655752, ], - [-11.47427660946841854, 8.92081258803495913, - 5.47503555213022963, -10.42353285499872761, - 5.54233383841019744, -4.08518036375891924, - 3.57529706687566495, -3.60730450562997618, - 4.36265193579587596, -10.37866849427310001, ], - [3.81419058476079265, -2.60409926909004330, - 6.34280963158119349, 2.33308547290572088, - -7.69671638040896333, 4.37120622211315357, - -3.47654384844780662, 3.35357051926256311, - -3.96795795485506630, 9.36337005841867942, ], - [-1.81155300867822211, 1.19114944341875217, - -2.29142421583034039, 5.22939639019224956, - 1.34433116470738323, -6.88110084999686755, - 4.18930008478254123, -3.66506200710954655, - 4.15432435331247341, -9.65496582374520607, ], - [1.03663781378883102, -0.67091586184818763, - 1.18833546410413593, -2.08959625363386436, - 4.84142188082576919, 0.91711704250234427, - -7.13810812296376085, 4.78992579324240619, - -4.95650725767213451, 11.18360001233891587, ], - [-0.66086568819289371, 0.42434507579736425, - -0.72329307386970176, 1.15580121794353330, - -2.04989127776244118, 4.96429376658586285, - 0.70108995663206752, -8.70492717485264045, - 7.00050032393480937, -14.78888179412784076, ], - [0.44418925628006972, -0.28400708051187401, - 0.47454560842802779, -0.72499641695966344, - 1.16617483172783665, -2.16618785819994564, - 5.66054337792040574, 0.58436218058158362, - -13.70873474131953351, 23.69369348149827204, ], - [-0.29097316363860704, 0.18563208227508743, - -0.30702892266744786, 0.45891161293728622, - -0.70715775483021392, 1.19915865821626211, - -2.43531772552310599, 7.33383174778555080, - 0.52335960970632778, -56.05313057954246148, ], - [0.11111111111065723, -0.07082558208817376, - 0.11669167389433072, -0.17300700177212647, - 0.26256458849637060, -0.43226695095043116, - 0.82192349146213350, -2.02505041947615805, - 8.95507812083548060, 40.99999999984667909, ]] -radau_adotdot_dict[9] = [[3240.00000000000545697, 1980.53309732568800428, - -114.42620712287225615, 26.63664641748528084, - -10.72896282322562911, 5.95301849750194378, - -4.17155544005800039, 3.59391863655673660, - -3.94128334911420097, -719.99999999865576683, ], - [-4447.87608954420284135, -2600.49011655120557407, - 336.67820261409178784, -54.78724845336546423, - 19.80358715418515203, -10.49137062609861459, - 7.18257226707373775, -6.11080090247105545, - 6.65766018161684769, 1170.26881319972017081, ], - [1607.54452124670115154, 744.33437038919225870, - -365.33348732541799109, 120.87120189172378559, - -24.60077979956736272, 10.50864141856436618, - -6.48566513203763861, 5.22342907786128308, - -5.53176371170411585, -828.20732992586363252, ], - [-582.30354415434635484, -172.96394992509254962, - 172.60201919117605485, -167.60896467269702725, - 76.96551743950431046, -18.40402891132839613, - 9.06302501116192616, -6.53885215341313142, - 6.55556828523890545, 743.96162132556196411, ], - [283.73027874165160256, 73.72086649402709213, - -41.42307807001634501, 90.75418388888155619, - -117.99250491479705261, 66.81188544956808073, - -18.67902008318611706, 10.87078077136675347, - -9.89253198551369906, -760.96221976575043300, ], - [-164.13245300991698628, -40.27868700928837598, - 18.24889512084277499, -22.38102011530529012, - 68.90492055498853574, -110.01080844268177827, - 74.97435376749007219, -25.26014270155025088, - 18.97944038046850324, 867.87634172628565921, ], - [105.20693630086218207, 25.08698381043046766, - -10.24636666980788391, 10.02687310635811002, - -17.52572396327565230, 68.20843745319996287, - -132.64836072304211712, 112.03634693407799716, - -50.48223693779995358, -1109.56693125532456179, ], - [-70.92038990742307192, -16.65141728224165973, - 6.43807006935959691, -5.64388925268779929, - 7.95732764518548663, -17.92857590897373399, - 87.40646672305888387, -219.03517600681502131, - 260.56142963016372960, 1614.63866261219004627, ], - [46.52851810437505264, 10.83652812131320786, - -4.07266872679007008, 3.37989165362496635, - -4.32543145384469341, 8.04653426879890077, - -23.52549031272401692, 155.64156750393439665, - -633.54724802539033135, -2084.67562457484245897, ], - [-17.77777777770516465, -4.12767537282225661, - 1.53462091943353940, -1.24767446403127380, - 1.54205016083713886, -2.69373319840810233, - 6.88367392321031701, -30.42107115646609117, - 410.64096553667678791, 1106.66666666225228255, ]] +radau_tau_dict[9] = [ + 0.00000000000000000, + 0.01777991514736346, + 0.09132360789979158, + 0.21430847939565514, + 0.37193216458318179, + 0.54518668480366450, + 0.71317524285503242, + 0.85563374295856065, + 0.95536604470969477, + 1.00000000000000000, +] +radau_adot_dict[9] = [ + [ + -81.00000000000004263, + -35.21371041699546822, + 10.44981407275295737, + -5.70845918991450674, + 3.99044636662769392, + -3.24550641903763903, + 2.97505006484350076, + -3.07507805297768755, + 3.76536828826786518, + -8.99999999981996268, + ], + [ + 89.83153970403780875, + 28.12161902100757516, + -20.72548579052329387, + 9.94239702329991815, + -6.69350725779439415, + 5.35846675253802118, + -4.87323434555169399, + 5.01573191942259200, + -6.12808267753871405, + 14.63498313988655752, + ], + [ + -11.47427660946841854, + 8.92081258803495913, + 5.47503555213022963, + -10.42353285499872761, + 5.54233383841019744, + -4.08518036375891924, + 3.57529706687566495, + -3.60730450562997618, + 4.36265193579587596, + -10.37866849427310001, + ], + [ + 3.81419058476079265, + -2.60409926909004330, + 6.34280963158119349, + 2.33308547290572088, + -7.69671638040896333, + 4.37120622211315357, + -3.47654384844780662, + 3.35357051926256311, + -3.96795795485506630, + 9.36337005841867942, + ], + [ + -1.81155300867822211, + 1.19114944341875217, + -2.29142421583034039, + 5.22939639019224956, + 1.34433116470738323, + -6.88110084999686755, + 4.18930008478254123, + -3.66506200710954655, + 4.15432435331247341, + -9.65496582374520607, + ], + [ + 1.03663781378883102, + -0.67091586184818763, + 1.18833546410413593, + -2.08959625363386436, + 4.84142188082576919, + 0.91711704250234427, + -7.13810812296376085, + 4.78992579324240619, + -4.95650725767213451, + 11.18360001233891587, + ], + [ + -0.66086568819289371, + 0.42434507579736425, + -0.72329307386970176, + 1.15580121794353330, + -2.04989127776244118, + 4.96429376658586285, + 0.70108995663206752, + -8.70492717485264045, + 7.00050032393480937, + -14.78888179412784076, + ], + [ + 0.44418925628006972, + -0.28400708051187401, + 0.47454560842802779, + -0.72499641695966344, + 1.16617483172783665, + -2.16618785819994564, + 5.66054337792040574, + 0.58436218058158362, + -13.70873474131953351, + 23.69369348149827204, + ], + [ + -0.29097316363860704, + 0.18563208227508743, + -0.30702892266744786, + 0.45891161293728622, + -0.70715775483021392, + 1.19915865821626211, + -2.43531772552310599, + 7.33383174778555080, + 0.52335960970632778, + -56.05313057954246148, + ], + [ + 0.11111111111065723, + -0.07082558208817376, + 0.11669167389433072, + -0.17300700177212647, + 0.26256458849637060, + -0.43226695095043116, + 0.82192349146213350, + -2.02505041947615805, + 8.95507812083548060, + 40.99999999984667909, + ], +] +radau_adotdot_dict[9] = [ + [ + 3240.00000000000545697, + 1980.53309732568800428, + -114.42620712287225615, + 26.63664641748528084, + -10.72896282322562911, + 5.95301849750194378, + -4.17155544005800039, + 3.59391863655673660, + -3.94128334911420097, + -719.99999999865576683, + ], + [ + -4447.87608954420284135, + -2600.49011655120557407, + 336.67820261409178784, + -54.78724845336546423, + 19.80358715418515203, + -10.49137062609861459, + 7.18257226707373775, + -6.11080090247105545, + 6.65766018161684769, + 1170.26881319972017081, + ], + [ + 1607.54452124670115154, + 744.33437038919225870, + -365.33348732541799109, + 120.87120189172378559, + -24.60077979956736272, + 10.50864141856436618, + -6.48566513203763861, + 5.22342907786128308, + -5.53176371170411585, + -828.20732992586363252, + ], + [ + -582.30354415434635484, + -172.96394992509254962, + 172.60201919117605485, + -167.60896467269702725, + 76.96551743950431046, + -18.40402891132839613, + 9.06302501116192616, + -6.53885215341313142, + 6.55556828523890545, + 743.96162132556196411, + ], + [ + 283.73027874165160256, + 73.72086649402709213, + -41.42307807001634501, + 90.75418388888155619, + -117.99250491479705261, + 66.81188544956808073, + -18.67902008318611706, + 10.87078077136675347, + -9.89253198551369906, + -760.96221976575043300, + ], + [ + -164.13245300991698628, + -40.27868700928837598, + 18.24889512084277499, + -22.38102011530529012, + 68.90492055498853574, + -110.01080844268177827, + 74.97435376749007219, + -25.26014270155025088, + 18.97944038046850324, + 867.87634172628565921, + ], + [ + 105.20693630086218207, + 25.08698381043046766, + -10.24636666980788391, + 10.02687310635811002, + -17.52572396327565230, + 68.20843745319996287, + -132.64836072304211712, + 112.03634693407799716, + -50.48223693779995358, + -1109.56693125532456179, + ], + [ + -70.92038990742307192, + -16.65141728224165973, + 6.43807006935959691, + -5.64388925268779929, + 7.95732764518548663, + -17.92857590897373399, + 87.40646672305888387, + -219.03517600681502131, + 260.56142963016372960, + 1614.63866261219004627, + ], + [ + 46.52851810437505264, + 10.83652812131320786, + -4.07266872679007008, + 3.37989165362496635, + -4.32543145384469341, + 8.04653426879890077, + -23.52549031272401692, + 155.64156750393439665, + -633.54724802539033135, + -2084.67562457484245897, + ], + [ + -17.77777777770516465, + -4.12767537282225661, + 1.53462091943353940, + -1.24767446403127380, + 1.54205016083713886, + -2.69373319840810233, + 6.88367392321031701, + -30.42107115646609117, + 410.64096553667678791, + 1106.66666666225228255, + ], +] -radau_tau_dict[10] = [0.00000000000000000, 0.01441240964887660, - 0.07438738970919330, 0.17611665616301075, - 0.30966757992765476, 0.46197040108073445, - 0.61811723469592150, 0.76282301518486062, - 0.88192102120915938, 0.96374218711743631, - 1.00000000000000000, ] -radau_adot_dict[10] = [[-99.99999999999992895, -43.41678141933171986, - 12.78928064012717414, -6.89033214590773468, - 4.71424042220159834, -3.71562642618960126, - 3.25251070724323199, -3.12664012305617689, - 3.33194417248405728, -4.15343825603231664, - 10.00000000127609212, ], - [110.88407538476806735, 34.69232502969869358, - -25.35018316532213589, 11.99292010956605736, - -7.90161259937806904, 6.12918393752630664, - -5.32194911936971948, 5.09292790632591164, - -5.41326630798914721, 6.73840799406796975, - -16.21476095669183337, ], - [-14.13042563810675745, 10.96676844740408185, - 6.72156936753501100, -12.53860551309362670, - 6.52193803323750032, -4.65517513349904100, - 3.88652064846417034, -3.64160562711778191, - 3.82491118384537998, -4.73094762798258017, - 11.35599782133387237, ], - [4.67906394414677873, -3.18883618300925242, - 7.70653109821361504, 2.83902732934281943, - -9.00782587641477051, 4.94836491503828491, - -3.74822938920076343, 3.34996523906387900, - -3.43085593463729754, 4.18801525759520477, - -10.00215748986232356, ], - [-2.21205943883240552, 1.45174133981855347, - -2.76982675905947850, 6.22423848775331479, - 1.61463463535339669, -7.72023958723235815, - 4.46475705244499466, -3.60534661369233866, - 3.51747913458111894, -4.19212161008806028, - 9.92280056892257534, ], - [1.26107133935179649, -0.81451429917717033, - 1.42999497143838195, -2.47315028972017448, - 5.58410178361652942, 1.08232042324193944, - -7.50084536449850159, 4.61850801153105017, - -4.07645816576527142, 4.64746574640746335, - -10.82798481006249425, ], - [-0.80471029795411542, 0.51556091762280776, - -0.87030740153648645, 1.36561463645567671, - -2.35414707076444429, 5.46793601540036356, - 0.80890803856539795, -8.19834549755231023, - 5.54059999854366225, -5.76068266835737575, - 13.02507861791643684, ], - [0.54963518680933576, -0.35055212590198581, - 0.57940256827736913, -0.86719768277172593, - 1.35069903504403777, -2.39216273039997107, - 5.82508173308438071, 0.65546003463534208, - -10.34477943836590796, 8.35305931088237230, - -17.67770050629743039, ], - [-0.38587167045980814, 0.24546694513846129, - -0.40091971022846828, 0.58509830261706175, - -0.86814375212516004, 1.39097894572954850, - -2.59346954582678180, 6.81506156755171588, - 0.56694419152493958, -16.64851704635335139, - 28.82039979943904484, ], - [0.25922119027812629, -0.16466772675814162, - 0.26724017988160370, -0.38490377803763709, - 0.55758559986584344, -0.85461660474897316, - 1.45316814749785306, -2.96559387245064165, - 8.97207959068006033, 0.51881094967812857, - -68.90167304690199046, ], - [-0.10000000000111019, 0.06348907449566861, - -0.10278178932642598, 0.14729054379857887, - -0.21147021062723262, 0.31903624519012241, - -0.52645290827396329, 1.00560897518571801, - -2.48859842500850359, 11.03994795085468539, - 50.50000000038610182, ]] -radau_adotdot_dict[10] = [[4950.00000000001000444, 3012.45818548576608009, - -171.92807396683292609, 39.12368254110151611, - -15.22355173022060626, 8.04299673200512188, - -5.26196411361797800, 4.09877529060122470, - -3.77805277644074522, 4.30969851431291318, - 990.00000001395073923, ], - [-6789.50954148942582833, -3951.38566209134569363, - 504.57258995426855108, -80.23508601244702731, - 28.00752278820436914, -14.12196953781221964, - 9.02102948758601997, -6.93356185793709301, - 6.34198920238668506, -7.20421773849466263, - -1604.78711251807726512, ], - [2446.17058314418500231, 1126.63684055919816274, - -544.35613668750829675, 175.31442005468170464, - -34.37866077254329866, 13.94478215590061154, - -8.00810267008409937, 5.80551036253291386, - -5.13604733090323862, 5.73012087396318748, - 1122.41852183828268608, ], - [-882.67682717657521607, -260.69663442977798695, - 255.11056670417383430, -240.47405916706395601, - 105.80855022391517650, -23.91022250684795836, - 10.89634601251998447, -7.02802676028693440, - 5.83162488790799216, -6.28895769033908891, - -985.93738770249115078, ], - [428.12521662480554596, 110.56237292172352227, - -60.78008650703299054, 128.55302924034060652, - -159.40428515599177217, 84.66857988589373463, - -21.72650903317338589, 11.18586400415455273, - -8.30500240114122334, 8.46864426583795193, - 973.45496651867551918, ], - [-246.75473480762772738, -60.15460530445014342, - 26.60265923994862192, -31.34628853732772313, - 91.36150260216422225, -135.67104500401671885, - 83.93926689676683850, -24.64829978253280274, - 14.79173951493723393, -13.70190884599441006, - -1053.37594860374701966, ], - [158.33831307548763334, 37.48000390509794499, - -14.90091247305119282, 13.93327820916493920, - -22.86655933407018892, 81.87194083529310262, - -142.08646851108039755, 102.56317033977524034, - -35.72304028380662544, 27.35746667833063839, - 1247.31787653834112461, ], - [-108.48598176740225085, -25.25972749273964268, - 9.47223310966056431, -7.88015157350419315, - 10.32307656776792726, -21.08072888263477296, - 89.93322727799926497, -184.81219612064086277, - 161.98894275991787595, -74.48113721593693981, - -1636.38016330825212208, ], - [76.29926328362374477, 17.59755050097978568, - -6.38256792536444095, 4.98018184338134517, - -5.83759324925205192, 9.63545795644580494, - -23.85786681336078630, 123.37859657347641473, - -320.52177806741110544, 389.67404182444494154, - 2422.70575486484585781, ], - [-51.30629088729910592, -11.77232624230349245, - 4.19352221896064492, -3.16288229629453355, - 3.50555428416709702, -5.25633362139525673, - 10.75988955514463186, -33.40792424239023717, - 229.48283922249345324, -954.28836921297772733, - -3158.41650766262318939, ], - [19.80000000021980000, 4.53400218785288978, - -1.60379366721661398, 1.19387569800088045, - -1.29555622403296056, 1.87654198775976155, - -3.60884808750712338, 9.79809219080568994, - -44.97321472272450649, 620.42461854547525490, - 1683.00000001738249011, ]] +radau_tau_dict[10] = [ + 0.00000000000000000, + 0.01441240964887660, + 0.07438738970919330, + 0.17611665616301075, + 0.30966757992765476, + 0.46197040108073445, + 0.61811723469592150, + 0.76282301518486062, + 0.88192102120915938, + 0.96374218711743631, + 1.00000000000000000, +] +radau_adot_dict[10] = [ + [ + -99.99999999999992895, + -43.41678141933171986, + 12.78928064012717414, + -6.89033214590773468, + 4.71424042220159834, + -3.71562642618960126, + 3.25251070724323199, + -3.12664012305617689, + 3.33194417248405728, + -4.15343825603231664, + 10.00000000127609212, + ], + [ + 110.88407538476806735, + 34.69232502969869358, + -25.35018316532213589, + 11.99292010956605736, + -7.90161259937806904, + 6.12918393752630664, + -5.32194911936971948, + 5.09292790632591164, + -5.41326630798914721, + 6.73840799406796975, + -16.21476095669183337, + ], + [ + -14.13042563810675745, + 10.96676844740408185, + 6.72156936753501100, + -12.53860551309362670, + 6.52193803323750032, + -4.65517513349904100, + 3.88652064846417034, + -3.64160562711778191, + 3.82491118384537998, + -4.73094762798258017, + 11.35599782133387237, + ], + [ + 4.67906394414677873, + -3.18883618300925242, + 7.70653109821361504, + 2.83902732934281943, + -9.00782587641477051, + 4.94836491503828491, + -3.74822938920076343, + 3.34996523906387900, + -3.43085593463729754, + 4.18801525759520477, + -10.00215748986232356, + ], + [ + -2.21205943883240552, + 1.45174133981855347, + -2.76982675905947850, + 6.22423848775331479, + 1.61463463535339669, + -7.72023958723235815, + 4.46475705244499466, + -3.60534661369233866, + 3.51747913458111894, + -4.19212161008806028, + 9.92280056892257534, + ], + [ + 1.26107133935179649, + -0.81451429917717033, + 1.42999497143838195, + -2.47315028972017448, + 5.58410178361652942, + 1.08232042324193944, + -7.50084536449850159, + 4.61850801153105017, + -4.07645816576527142, + 4.64746574640746335, + -10.82798481006249425, + ], + [ + -0.80471029795411542, + 0.51556091762280776, + -0.87030740153648645, + 1.36561463645567671, + -2.35414707076444429, + 5.46793601540036356, + 0.80890803856539795, + -8.19834549755231023, + 5.54059999854366225, + -5.76068266835737575, + 13.02507861791643684, + ], + [ + 0.54963518680933576, + -0.35055212590198581, + 0.57940256827736913, + -0.86719768277172593, + 1.35069903504403777, + -2.39216273039997107, + 5.82508173308438071, + 0.65546003463534208, + -10.34477943836590796, + 8.35305931088237230, + -17.67770050629743039, + ], + [ + -0.38587167045980814, + 0.24546694513846129, + -0.40091971022846828, + 0.58509830261706175, + -0.86814375212516004, + 1.39097894572954850, + -2.59346954582678180, + 6.81506156755171588, + 0.56694419152493958, + -16.64851704635335139, + 28.82039979943904484, + ], + [ + 0.25922119027812629, + -0.16466772675814162, + 0.26724017988160370, + -0.38490377803763709, + 0.55758559986584344, + -0.85461660474897316, + 1.45316814749785306, + -2.96559387245064165, + 8.97207959068006033, + 0.51881094967812857, + -68.90167304690199046, + ], + [ + -0.10000000000111019, + 0.06348907449566861, + -0.10278178932642598, + 0.14729054379857887, + -0.21147021062723262, + 0.31903624519012241, + -0.52645290827396329, + 1.00560897518571801, + -2.48859842500850359, + 11.03994795085468539, + 50.50000000038610182, + ], +] +radau_adotdot_dict[10] = [ + [ + 4950.00000000001000444, + 3012.45818548576608009, + -171.92807396683292609, + 39.12368254110151611, + -15.22355173022060626, + 8.04299673200512188, + -5.26196411361797800, + 4.09877529060122470, + -3.77805277644074522, + 4.30969851431291318, + 990.00000001395073923, + ], + [ + -6789.50954148942582833, + -3951.38566209134569363, + 504.57258995426855108, + -80.23508601244702731, + 28.00752278820436914, + -14.12196953781221964, + 9.02102948758601997, + -6.93356185793709301, + 6.34198920238668506, + -7.20421773849466263, + -1604.78711251807726512, + ], + [ + 2446.17058314418500231, + 1126.63684055919816274, + -544.35613668750829675, + 175.31442005468170464, + -34.37866077254329866, + 13.94478215590061154, + -8.00810267008409937, + 5.80551036253291386, + -5.13604733090323862, + 5.73012087396318748, + 1122.41852183828268608, + ], + [ + -882.67682717657521607, + -260.69663442977798695, + 255.11056670417383430, + -240.47405916706395601, + 105.80855022391517650, + -23.91022250684795836, + 10.89634601251998447, + -7.02802676028693440, + 5.83162488790799216, + -6.28895769033908891, + -985.93738770249115078, + ], + [ + 428.12521662480554596, + 110.56237292172352227, + -60.78008650703299054, + 128.55302924034060652, + -159.40428515599177217, + 84.66857988589373463, + -21.72650903317338589, + 11.18586400415455273, + -8.30500240114122334, + 8.46864426583795193, + 973.45496651867551918, + ], + [ + -246.75473480762772738, + -60.15460530445014342, + 26.60265923994862192, + -31.34628853732772313, + 91.36150260216422225, + -135.67104500401671885, + 83.93926689676683850, + -24.64829978253280274, + 14.79173951493723393, + -13.70190884599441006, + -1053.37594860374701966, + ], + [ + 158.33831307548763334, + 37.48000390509794499, + -14.90091247305119282, + 13.93327820916493920, + -22.86655933407018892, + 81.87194083529310262, + -142.08646851108039755, + 102.56317033977524034, + -35.72304028380662544, + 27.35746667833063839, + 1247.31787653834112461, + ], + [ + -108.48598176740225085, + -25.25972749273964268, + 9.47223310966056431, + -7.88015157350419315, + 10.32307656776792726, + -21.08072888263477296, + 89.93322727799926497, + -184.81219612064086277, + 161.98894275991787595, + -74.48113721593693981, + -1636.38016330825212208, + ], + [ + 76.29926328362374477, + 17.59755050097978568, + -6.38256792536444095, + 4.98018184338134517, + -5.83759324925205192, + 9.63545795644580494, + -23.85786681336078630, + 123.37859657347641473, + -320.52177806741110544, + 389.67404182444494154, + 2422.70575486484585781, + ], + [ + -51.30629088729910592, + -11.77232624230349245, + 4.19352221896064492, + -3.16288229629453355, + 3.50555428416709702, + -5.25633362139525673, + 10.75988955514463186, + -33.40792424239023717, + 229.48283922249345324, + -954.28836921297772733, + -3158.41650766262318939, + ], + [ + 19.80000000021980000, + 4.53400218785288978, + -1.60379366721661398, + 1.19387569800088045, + -1.29555622403296056, + 1.87654198775976155, + -3.60884808750712338, + 9.79809219080568994, + -44.97321472272450649, + 620.42461854547525490, + 1683.00000001738249011, + ], +] legendre_tau_dict = dict() @@ -607,622 +1384,1446 @@ legendre_adotdot_dict = dict() legendre_afinal_dict = dict() -legendre_tau_dict[1] = [0.00000000000000000, 0.50000000000000000, ] -legendre_adot_dict[1] = [[-2.00000000000000000, -2.00000000000000000, ], - [2.00000000000000000, 2.00000000000000000, ]] -legendre_adotdot_dict[1] = [[0.00000000000000000, 0.00000000000000000, ], - [0.00000000000000000, 0.00000000000000000, ]] -legendre_afinal_dict[1] = [-1.00000000000000000, 2.00000000000000000, ] +legendre_tau_dict[1] = [0.00000000000000000, 0.50000000000000000] +legendre_adot_dict[1] = [ + [-2.00000000000000000, -2.00000000000000000], + [2.00000000000000000, 2.00000000000000000], +] +legendre_adotdot_dict[1] = [ + [0.00000000000000000, 0.00000000000000000], + [0.00000000000000000, 0.00000000000000000], +] +legendre_afinal_dict[1] = [-1.00000000000000000, 2.00000000000000000] -legendre_tau_dict[2] = [0.00000000000000000, 0.21132486540518713, - 0.78867513459481287, ] -legendre_adot_dict[2] = [[-6.00000000000000000, -3.46410161513775439, - 3.46410161513775350, ], - [6.46410161513775350, 2.99999999999999867, - -6.46410161513775527, ], - [-0.46410161513775472, 0.46410161513775461, - 3.00000000000000000, ]] -legendre_adotdot_dict[2] = [[12.00000000000000000, 12.00000000000000000, - 12.00000000000000000, ], - [-16.39230484541326405, -16.39230484541326405, - -16.39230484541326405, ], - [4.39230484541326405, 4.39230484541326405, - 4.39230484541326405, ]] -legendre_afinal_dict[2] = [1.00000000000000000, -1.73205080756887853, - 1.73205080756887719, ] +legendre_tau_dict[2] = [0.00000000000000000, 0.21132486540518713, 0.78867513459481287] +legendre_adot_dict[2] = [ + [-6.00000000000000000, -3.46410161513775439, 3.46410161513775350], + [6.46410161513775350, 2.99999999999999867, -6.46410161513775527], + [-0.46410161513775472, 0.46410161513775461, 3.00000000000000000], +] +legendre_adotdot_dict[2] = [ + [12.00000000000000000, 12.00000000000000000, 12.00000000000000000], + [-16.39230484541326405, -16.39230484541326405, -16.39230484541326405], + [4.39230484541326405, 4.39230484541326405, 4.39230484541326405], +] +legendre_afinal_dict[2] = [ + 1.00000000000000000, + -1.73205080756887853, + 1.73205080756887719, +] -legendre_tau_dict[3] = [0.00000000000000000, 0.11270166537925834, - 0.49999999999999989, 0.88729833462074226, ] -legendre_adot_dict[3] = [[-11.99999999999999645, -5.99999999999999822, - 3.00000000000000000, -6.00000000000001421, ], - [13.12163891034569119, 4.99999999999999645, - -5.72748612183952233, 10.16397779494323395, ], - [-1.33333333333333304, 1.16397779494322196, - 2.00000000000000533, -9.16397779494322151, ], - [0.21169442298763785, -0.16397779494322201, - 0.72748612183951156, 4.99999999999999289, ]] -legendre_adotdot_dict[3] = [[59.99999999999997158, 46.47580015448897939, - 0.00000000000002842, -46.47580015448902202, ], - [-82.06316679540748282, -62.06316679540748993, - 6.66666666666661456, 75.39650012874085405, ], - [26.66666666666665009, 17.65053343632599336, - -13.33333333333329307, -44.31720010299263635, ], - [-4.60349987125914950, -2.06316679540749437, - 6.66666666666664476, 15.39650012874080076, ]] -legendre_afinal_dict[3] = [-0.99999999999999645, 1.66666666666665364, - -1.33333333333332416, 1.66666666666666163, ] +legendre_tau_dict[3] = [ + 0.00000000000000000, + 0.11270166537925834, + 0.49999999999999989, + 0.88729833462074226, +] +legendre_adot_dict[3] = [ + [ + -11.99999999999999645, + -5.99999999999999822, + 3.00000000000000000, + -6.00000000000001421, + ], + [ + 13.12163891034569119, + 4.99999999999999645, + -5.72748612183952233, + 10.16397779494323395, + ], + [ + -1.33333333333333304, + 1.16397779494322196, + 2.00000000000000533, + -9.16397779494322151, + ], + [ + 0.21169442298763785, + -0.16397779494322201, + 0.72748612183951156, + 4.99999999999999289, + ], +] +legendre_adotdot_dict[3] = [ + [ + 59.99999999999997158, + 46.47580015448897939, + 0.00000000000002842, + -46.47580015448902202, + ], + [ + -82.06316679540748282, + -62.06316679540748993, + 6.66666666666661456, + 75.39650012874085405, + ], + [ + 26.66666666666665009, + 17.65053343632599336, + -13.33333333333329307, + -44.31720010299263635, + ], + [ + -4.60349987125914950, + -2.06316679540749437, + 6.66666666666664476, + 15.39650012874080076, + ], +] +legendre_afinal_dict[3] = [ + -0.99999999999999645, + 1.66666666666665364, + -1.33333333333332416, + 1.66666666666666163, +] -legendre_tau_dict[4] = [0.00000000000000000, 0.06943184420297367, - 0.33000947820757198, 0.66999052179243102, - 0.93056815579702290, ] -legendre_adot_dict[4] = [[-20.00000000000000355, -9.43327565893721243, - 3.72430573391146424, -3.72430573391140385, - 9.43327565893692110, ], - [21.98973890127313524, 7.73861278752584170, - -7.20134099970690400, 6.34362221862489939, - -15.56386959855438334, ], - [-2.46548206404896098, 2.04508965030390266, - 2.26138721247418006, -5.97155645948191527, - 11.89278387805641479, ], - [0.59815998477037069, -0.43707080239579987, - 1.44878203453368237, 2.26138721247403662, - -13.50080272596486530, ], - [-0.12241682199454294, 0.08664402350326535, - -0.23313398121242734, 1.09085276229439554, - 7.73861278752593940, ]] -legendre_adotdot_dict[4] = [[179.99999999999997158, 125.72670690061997334, - -5.72670690061985965, -5.72670690061895016, - 125.72670690061771381, ], - [-246.17014587396408842, -166.39765713180497642, - 22.70209346695287422, 7.56503572918063583, - -204.73823615716273139, ], - [83.67739618075455610, 47.34890006452557998, - -30.21982057995266757, 8.12075844540986225, - 144.46156392997050943, ], - [-22.14082193765889883, -8.22019077277292176, - 15.07523677232551762, -23.26534225303723247, - -105.33285463822021200, ], - [4.63357163086848001, 1.54224093943233154, - -1.83080275870586817, 13.30625497906559573, - 39.88281996479484093, ]] -legendre_afinal_dict[4] = [1.00000000000005329, -1.64070532173932548, - 1.21439396979861636, -1.21439396979864811, - 1.64070532173930372, ] +legendre_tau_dict[4] = [ + 0.00000000000000000, + 0.06943184420297367, + 0.33000947820757198, + 0.66999052179243102, + 0.93056815579702290, +] +legendre_adot_dict[4] = [ + [ + -20.00000000000000355, + -9.43327565893721243, + 3.72430573391146424, + -3.72430573391140385, + 9.43327565893692110, + ], + [ + 21.98973890127313524, + 7.73861278752584170, + -7.20134099970690400, + 6.34362221862489939, + -15.56386959855438334, + ], + [ + -2.46548206404896098, + 2.04508965030390266, + 2.26138721247418006, + -5.97155645948191527, + 11.89278387805641479, + ], + [ + 0.59815998477037069, + -0.43707080239579987, + 1.44878203453368237, + 2.26138721247403662, + -13.50080272596486530, + ], + [ + -0.12241682199454294, + 0.08664402350326535, + -0.23313398121242734, + 1.09085276229439554, + 7.73861278752593940, + ], +] +legendre_adotdot_dict[4] = [ + [ + 179.99999999999997158, + 125.72670690061997334, + -5.72670690061985965, + -5.72670690061895016, + 125.72670690061771381, + ], + [ + -246.17014587396408842, + -166.39765713180497642, + 22.70209346695287422, + 7.56503572918063583, + -204.73823615716273139, + ], + [ + 83.67739618075455610, + 47.34890006452557998, + -30.21982057995266757, + 8.12075844540986225, + 144.46156392997050943, + ], + [ + -22.14082193765889883, + -8.22019077277292176, + 15.07523677232551762, + -23.26534225303723247, + -105.33285463822021200, + ], + [ + 4.63357163086848001, + 1.54224093943233154, + -1.83080275870586817, + 13.30625497906559573, + 39.88281996479484093, + ], +] +legendre_afinal_dict[4] = [ + 1.00000000000005329, + -1.64070532173932548, + 1.21439396979861636, + -1.21439396979864811, + 1.64070532173930372, +] -legendre_tau_dict[5] = [0.00000000000000000, 0.04691007703066801, - 0.23076534494715878, 0.49999999999999767, - 0.76923465505284816, 0.95308992296932671, ] -legendre_adot_dict[5] = [[-29.99999999999999645, -13.74066696148972966, - 4.85177807260082616, -3.75000000000000355, - 4.85177807260098248, -13.74066696148843292, ], - [33.07195697163451342, 11.18330013267038581, - -9.44759960151609590, 6.42011650355944496, - -8.01592078481075987, 22.42091502590413299, ], - [-3.87041820427842742, 3.13131216201181450, - 2.81669986732958266, -6.22012045466986141, - 6.19052235495304970, -16.19339023999791749, ], - [1.06666666666666399, -0.75873179598080620, - 2.21788633227481746, 2.00000000000017453, - -7.39311627638205326, 15.41544322156842561, ], - [-0.34832238831592910, 0.23910122335368422, - -0.55712262029379156, 1.86599528883173571, - 2.81669986732908306, -19.08560117865692263, ], - [0.08011695429317560, -0.05431476056534416, - 0.11835794960467859, -0.31599133772139809, - 1.55003676631008291, 11.18330013267076595, ]] -legendre_adotdot_dict[5] = [[419.99999999999982947, 278.49806922095024220, - -14.71744604136580392, -0.00000000000119371, - 14.71744604136091539, -278.49806922093699768, ], - [-574.30238250892273300, -366.95347781100224438, - 49.55003419903368922, -2.65879371365303996, - -22.96209706247395843, 451.99517324935129636, ], - [198.68090298370464097, 104.09960256052515604, - -59.58901220959256761, 21.32546038032049296, - 11.88065010454926096, -317.35420636372941772, ], - [-59.73333333333316375, -20.31939521965934858, - 28.96972952388082945, -37.33333333333214910, - 13.27112041309614199, 276.74521194934067125, ], - [19.99370966420485374, 6.00991412485087828, - -5.20777714493021904, 21.32546038032136337, - -42.50058496011200759, -219.26451792806466301, ], - [-4.63889680565338036, -1.33471287566461294, - 0.99447167297441119, -2.65879371365547845, - 25.59346546357988927, 86.37640831403922448, ]] -legendre_afinal_dict[5] = [-1.00000000000002487, 1.62776671089034863, - -1.16110004422356416, 1.06666666666675636, - -1.16110004422360458, 1.62776671089023295, ] +legendre_tau_dict[5] = [ + 0.00000000000000000, + 0.04691007703066801, + 0.23076534494715878, + 0.49999999999999767, + 0.76923465505284816, + 0.95308992296932671, +] +legendre_adot_dict[5] = [ + [ + -29.99999999999999645, + -13.74066696148972966, + 4.85177807260082616, + -3.75000000000000355, + 4.85177807260098248, + -13.74066696148843292, + ], + [ + 33.07195697163451342, + 11.18330013267038581, + -9.44759960151609590, + 6.42011650355944496, + -8.01592078481075987, + 22.42091502590413299, + ], + [ + -3.87041820427842742, + 3.13131216201181450, + 2.81669986732958266, + -6.22012045466986141, + 6.19052235495304970, + -16.19339023999791749, + ], + [ + 1.06666666666666399, + -0.75873179598080620, + 2.21788633227481746, + 2.00000000000017453, + -7.39311627638205326, + 15.41544322156842561, + ], + [ + -0.34832238831592910, + 0.23910122335368422, + -0.55712262029379156, + 1.86599528883173571, + 2.81669986732908306, + -19.08560117865692263, + ], + [ + 0.08011695429317560, + -0.05431476056534416, + 0.11835794960467859, + -0.31599133772139809, + 1.55003676631008291, + 11.18330013267076595, + ], +] +legendre_adotdot_dict[5] = [ + [ + 419.99999999999982947, + 278.49806922095024220, + -14.71744604136580392, + -0.00000000000119371, + 14.71744604136091539, + -278.49806922093699768, + ], + [ + -574.30238250892273300, + -366.95347781100224438, + 49.55003419903368922, + -2.65879371365303996, + -22.96209706247395843, + 451.99517324935129636, + ], + [ + 198.68090298370464097, + 104.09960256052515604, + -59.58901220959256761, + 21.32546038032049296, + 11.88065010454926096, + -317.35420636372941772, + ], + [ + -59.73333333333316375, + -20.31939521965934858, + 28.96972952388082945, + -37.33333333333214910, + 13.27112041309614199, + 276.74521194934067125, + ], + [ + 19.99370966420485374, + 6.00991412485087828, + -5.20777714493021904, + 21.32546038032136337, + -42.50058496011200759, + -219.26451792806466301, + ], + [ + -4.63889680565338036, + -1.33471287566461294, + 0.99447167297441119, + -2.65879371365547845, + 25.59346546357988927, + 86.37640831403922448, + ], +] +legendre_afinal_dict[5] = [ + -1.00000000000002487, + 1.62776671089034863, + -1.16110004422356416, + 1.06666666666675636, + -1.16110004422360458, + 1.62776671089023295, +] -legendre_tau_dict[6] = [0.00000000000000000, 0.03376524289842411, - 0.16939530676686565, 0.38069040695841061, - 0.61930959304157429, 0.83060469323316066, - 0.96623475710156437, ] -legendre_adot_dict[6] = [[-41.99999999999993605, -18.91598687354210995, - 6.27707018598655964, -4.25786745784672149, - 4.25786745784724019, -6.27707018598746203, - 18.91598687353456398, ], - [46.36937471058886473, 15.32559943877114961, - -12.27449151016940476, 7.31525360482739728, - -7.05084663619737029, 10.24428508749317501, - -30.68867482146182368, ], - [-5.55188252334920573, 4.42878459321007956, - 3.55364671186248504, -7.21466693863796849, - 5.51203293000726458, -7.41573149790712893, - 21.57160573829168371, ], - [1.62055582214317195, -1.13579253120089896, - 3.10459433591240153, 2.12075384936568234, - -6.81758425836822912, 7.14920104705762327, - -19.25696288834685177, ], - [-0.61233946054691502, 0.41365582265250200, - -0.89624888663108360, 2.57607655963439974, - 2.12075384936873057, -9.35754649633609681, - 19.97909959689369330, ], - [0.23091612123217989, -0.15373639381797002, - 0.30843807418374597, -0.69100775654152158, - 2.39364176517328087, 3.55364671185861347, - -25.84665393768474928, ], - [-0.05662467006817981, 0.03747594392724378, - -0.07300891114466557, 0.15145813919897436, - -0.41586510782973252, 2.10321533382305148, - 15.32559943877357078, ]] -legendre_adotdot_dict[6] = [[840.00000000000034106, 540.64365692702745037, - -29.49852090486558609, 4.30940943239363605, - 4.30940943240420893, -29.49852090482625044, - 540.64365692693490928, ], - [-1148.45314479649096029, -710.55980476921922673, - 93.76115559609706906, -11.14423553547817392, - -5.82317129649572962, 47.09684608675820527, - -874.82230924609734757, ], - [400.80870642748203636, 201.05438486370465512, - -107.78321107866594275, 37.68889433782248943, - -1.12326115476952282, -30.27496837960165976, - 607.05266152757224063, ], - [-127.61291622770889376, -41.36116953176949096, - 51.45159825594288350, -58.52913567412112172, - 28.22512610388042731, 19.03118936364580804, - -524.47443280328877790, ], - [49.45902404982912515, 14.09194011206182040, - -10.35399107180189304, 32.51797774040460354, - -54.23628403759174432, 22.06641782049389278, - 497.20520338354441492, ], - [-18.84093488066641342, -5.09807019969807662, - 3.12511118370701624, -6.00264539161375055, - 32.80951010098536358, -74.38313151534973144, - -411.09634686360561773, ], - [4.63926542755519922, 1.22906259789328987, - -0.70214198041270404, 1.15973509059472679, - -4.16132914840768731, 45.96216752888985013, - 165.49156707494972807, ]] -legendre_afinal_dict[6] = [1.00000000000023448, -1.62038592448029561, - 1.13226285721569386, -0.99615775754512259, - 0.99615775754528868, -1.13226285721527953, - 1.62038592448068686, ] +legendre_tau_dict[6] = [ + 0.00000000000000000, + 0.03376524289842411, + 0.16939530676686565, + 0.38069040695841061, + 0.61930959304157429, + 0.83060469323316066, + 0.96623475710156437, +] +legendre_adot_dict[6] = [ + [ + -41.99999999999993605, + -18.91598687354210995, + 6.27707018598655964, + -4.25786745784672149, + 4.25786745784724019, + -6.27707018598746203, + 18.91598687353456398, + ], + [ + 46.36937471058886473, + 15.32559943877114961, + -12.27449151016940476, + 7.31525360482739728, + -7.05084663619737029, + 10.24428508749317501, + -30.68867482146182368, + ], + [ + -5.55188252334920573, + 4.42878459321007956, + 3.55364671186248504, + -7.21466693863796849, + 5.51203293000726458, + -7.41573149790712893, + 21.57160573829168371, + ], + [ + 1.62055582214317195, + -1.13579253120089896, + 3.10459433591240153, + 2.12075384936568234, + -6.81758425836822912, + 7.14920104705762327, + -19.25696288834685177, + ], + [ + -0.61233946054691502, + 0.41365582265250200, + -0.89624888663108360, + 2.57607655963439974, + 2.12075384936873057, + -9.35754649633609681, + 19.97909959689369330, + ], + [ + 0.23091612123217989, + -0.15373639381797002, + 0.30843807418374597, + -0.69100775654152158, + 2.39364176517328087, + 3.55364671185861347, + -25.84665393768474928, + ], + [ + -0.05662467006817981, + 0.03747594392724378, + -0.07300891114466557, + 0.15145813919897436, + -0.41586510782973252, + 2.10321533382305148, + 15.32559943877357078, + ], +] +legendre_adotdot_dict[6] = [ + [ + 840.00000000000034106, + 540.64365692702745037, + -29.49852090486558609, + 4.30940943239363605, + 4.30940943240420893, + -29.49852090482625044, + 540.64365692693490928, + ], + [ + -1148.45314479649096029, + -710.55980476921922673, + 93.76115559609706906, + -11.14423553547817392, + -5.82317129649572962, + 47.09684608675820527, + -874.82230924609734757, + ], + [ + 400.80870642748203636, + 201.05438486370465512, + -107.78321107866594275, + 37.68889433782248943, + -1.12326115476952282, + -30.27496837960165976, + 607.05266152757224063, + ], + [ + -127.61291622770889376, + -41.36116953176949096, + 51.45159825594288350, + -58.52913567412112172, + 28.22512610388042731, + 19.03118936364580804, + -524.47443280328877790, + ], + [ + 49.45902404982912515, + 14.09194011206182040, + -10.35399107180189304, + 32.51797774040460354, + -54.23628403759174432, + 22.06641782049389278, + 497.20520338354441492, + ], + [ + -18.84093488066641342, + -5.09807019969807662, + 3.12511118370701624, + -6.00264539161375055, + 32.80951010098536358, + -74.38313151534973144, + -411.09634686360561773, + ], + [ + 4.63926542755519922, + 1.22906259789328987, + -0.70214198041270404, + 1.15973509059472679, + -4.16132914840768731, + 45.96216752888985013, + 165.49156707494972807, + ], +] +legendre_afinal_dict[6] = [ + 1.00000000000023448, + -1.62038592448029561, + 1.13226285721569386, + -0.99615775754512259, + 0.99615775754528868, + -1.13226285721527953, + 1.62038592448068686, +] -legendre_tau_dict[7] = [0.00000000000000000, 0.02544604382862073, - 0.12923440720030088, 0.29707742431132150, - 0.49999999999991773, 0.70292257568894334, - 0.87076559279929833, 0.97455395617159768, ] -legendre_adot_dict[7] = [[-56.00000000000002132, -24.95699224983718167, - 7.97122139249848516, -5.00831198289676394, - 4.37500000000198241, -5.00831198289225199, - 7.97122139249369610, -24.95699225001910193, ], - [61.88240931737417583, 20.16247512645392703, - -15.62938350372294138, 8.62518797240788615, - -7.25855126339792633, 8.18261458478745851, - -12.92982669404072027, 40.35246077250539543, ], - [-7.51136417565297521, 5.93964959590168462, - 4.44314605069609136, -8.60507619697540882, - 5.72724300702663669, -5.95689502166200402, - 9.08643835931158250, -27.93020953999701916, ], - [2.26239965382932429, -1.57134500657698739, - 4.12514030348138228, 2.39437882284758219, - -7.24530497884105618, 5.83011981449630134, - -8.13184727997964174, 24.12919609619037331, ], - [-0.91428571428556327, 0.61175686984007160, - -1.27015154289550058, 3.35183473001727927, - 2.00000000001065326, -7.93086282864597791, - 8.55812538746169338, -23.42957835555795398, ], - [0.40410445842773468, -0.26626825561008405, - 0.51006822163588073, -1.04136216880175159, - 3.06209619082384332, 2.39437882281619707, - -11.74691936186807517, 25.43427284718976011, ], - [-0.16545222365648835, 0.10823443910957012, - -0.20014625792467550, 0.37364484914258889, - -0.85000700650864436, 3.02182602446248127, - 4.44314605078047542, -33.76162469667988120, ], - [0.04218868396380355, -0.02751051928098867, - 0.05010533623131365, -0.09029602574135479, - 0.18952405088461124, -0.53286941335658655, - 2.74966214585377111, 20.16247512639485961, ]] -legendre_adotdot_dict[7] = [[1512.00000000000136424, 955.17220976465057447, - -52.52606617900892161, 9.73362464766137236, - 0.00000000009731593, -9.73362464726801591, - 52.52606618036497821, -955.17220976860744486, ], - [-2067.01606297140551760, -1253.38842277582011775, - 162.29068757425784497, -22.20263024048540501, - 1.55684226134417258, 15.02840159386050800, - -84.30664536676113130, 1542.17857544127218716, ], - [725.02875733512803436, 353.97302134103586013, - -181.99436659032426178, 61.32957187503291152, - -7.98517305023528934, -7.75911289876353294, - 56.23751353436750833, -1060.20229529192101836, ], - [-238.15771727410418634, -74.93409607897174851, - 85.81194610517053434, -90.22157046901992317, - 42.42833078878044262, -0.81172913281977799, - -43.91260651839991169, 901.77606559404171094, ], - [98.74285714284027904, 27.24730481156274209, - -18.13844470779939400, 49.08672625341207407, - -71.99999999991771915, 40.18741229017862793, - 29.88538722871460607, -846.05300126176780395, ], - [-44.10991564822214883, -11.52331314083262725, - 6.31082242915802993, -10.11865122731538236, - 42.42833078884853393, -80.91464837445812464, - 35.58851715762842360, 838.36528265599690712, ], - [18.15063352060668578, 4.62062771987776166, - -2.31837696631370704, 3.09190071157253854, - -7.98517305038422975, 50.47855826441588079, - -123.43847608959974593, -710.84990166802811018], - [-4.63855210484354519, -1.16733164150181246, - 0.56379833485978015, -0.69897155085565510, - 1.55684226147673055, -6.47525709510402336, - 77.42024387376613959, 289.95748429913328437, ]] -legendre_afinal_dict[7] = [-0.99999999999388223, 1.61577764856546224, - -1.11479680011004589, 0.95616200868280021, - -0.91428571427904048, 0.95616200868308665, - -1.11479680010902915, 1.61577764856526751, ] +legendre_tau_dict[7] = [ + 0.00000000000000000, + 0.02544604382862073, + 0.12923440720030088, + 0.29707742431132150, + 0.49999999999991773, + 0.70292257568894334, + 0.87076559279929833, + 0.97455395617159768, +] +legendre_adot_dict[7] = [ + [ + -56.00000000000002132, + -24.95699224983718167, + 7.97122139249848516, + -5.00831198289676394, + 4.37500000000198241, + -5.00831198289225199, + 7.97122139249369610, + -24.95699225001910193, + ], + [ + 61.88240931737417583, + 20.16247512645392703, + -15.62938350372294138, + 8.62518797240788615, + -7.25855126339792633, + 8.18261458478745851, + -12.92982669404072027, + 40.35246077250539543, + ], + [ + -7.51136417565297521, + 5.93964959590168462, + 4.44314605069609136, + -8.60507619697540882, + 5.72724300702663669, + -5.95689502166200402, + 9.08643835931158250, + -27.93020953999701916, + ], + [ + 2.26239965382932429, + -1.57134500657698739, + 4.12514030348138228, + 2.39437882284758219, + -7.24530497884105618, + 5.83011981449630134, + -8.13184727997964174, + 24.12919609619037331, + ], + [ + -0.91428571428556327, + 0.61175686984007160, + -1.27015154289550058, + 3.35183473001727927, + 2.00000000001065326, + -7.93086282864597791, + 8.55812538746169338, + -23.42957835555795398, + ], + [ + 0.40410445842773468, + -0.26626825561008405, + 0.51006822163588073, + -1.04136216880175159, + 3.06209619082384332, + 2.39437882281619707, + -11.74691936186807517, + 25.43427284718976011, + ], + [ + -0.16545222365648835, + 0.10823443910957012, + -0.20014625792467550, + 0.37364484914258889, + -0.85000700650864436, + 3.02182602446248127, + 4.44314605078047542, + -33.76162469667988120, + ], + [ + 0.04218868396380355, + -0.02751051928098867, + 0.05010533623131365, + -0.09029602574135479, + 0.18952405088461124, + -0.53286941335658655, + 2.74966214585377111, + 20.16247512639485961, + ], +] +legendre_adotdot_dict[7] = [ + [ + 1512.00000000000136424, + 955.17220976465057447, + -52.52606617900892161, + 9.73362464766137236, + 0.00000000009731593, + -9.73362464726801591, + 52.52606618036497821, + -955.17220976860744486, + ], + [ + -2067.01606297140551760, + -1253.38842277582011775, + 162.29068757425784497, + -22.20263024048540501, + 1.55684226134417258, + 15.02840159386050800, + -84.30664536676113130, + 1542.17857544127218716, + ], + [ + 725.02875733512803436, + 353.97302134103586013, + -181.99436659032426178, + 61.32957187503291152, + -7.98517305023528934, + -7.75911289876353294, + 56.23751353436750833, + -1060.20229529192101836, + ], + [ + -238.15771727410418634, + -74.93409607897174851, + 85.81194610517053434, + -90.22157046901992317, + 42.42833078878044262, + -0.81172913281977799, + -43.91260651839991169, + 901.77606559404171094, + ], + [ + 98.74285714284027904, + 27.24730481156274209, + -18.13844470779939400, + 49.08672625341207407, + -71.99999999991771915, + 40.18741229017862793, + 29.88538722871460607, + -846.05300126176780395, + ], + [ + -44.10991564822214883, + -11.52331314083262725, + 6.31082242915802993, + -10.11865122731538236, + 42.42833078884853393, + -80.91464837445812464, + 35.58851715762842360, + 838.36528265599690712, + ], + [ + 18.15063352060668578, + 4.62062771987776166, + -2.31837696631370704, + 3.09190071157253854, + -7.98517305038422975, + 50.47855826441588079, + -123.43847608959974593, + -710.84990166802811018, + ], + [ + -4.63855210484354519, + -1.16733164150181246, + 0.56379833485978015, + -0.69897155085565510, + 1.55684226147673055, + -6.47525709510402336, + 77.42024387376613959, + 289.95748429913328437, + ], +] +legendre_afinal_dict[7] = [ + -0.99999999999388223, + 1.61577764856546224, + -1.11479680011004589, + 0.95616200868280021, + -0.91428571427904048, + 0.95616200868308665, + -1.11479680010902915, + 1.61577764856526751, +] -legendre_tau_dict[8] = [0.00000000000000000, 0.01985507175123186, - 0.10166676129318630, 0.23723379504185715, - 0.40828267875199609, 0.59171732124838583, - 0.76276620495730396, 0.89833323870751702, - 0.98014492824851973, ] -legendre_adot_dict[8] = [[-72.00000000000004263, -31.86270482565715412, - 9.92334055134359971, -5.93566167153326774, - 4.77763876751639316, -4.77763876749992278, - 5.93566167152931712, -9.92334055137200011, - 31.86270482530939319, ], - [79.61124909724310328, 25.69261129630993423, - -19.49249456998708752, 10.23940120319386438, - -7.93798136828674217, 7.81415574863481766, - -9.63317863204990488, 16.04021892350741751, - -51.40631755760969668, ], - [-9.74954002373760176, 7.66481813729861372, - 5.47461453891830097, -10.29567243097007001, - 6.30583618259248624, -5.71807461469641876, - 6.78823557065591743, -11.09128684263623477, - 35.23749930320694546, ], - [2.99348956666212596, -2.06676630520107674, - 5.28490391970722939, 2.76313437015332930, - -8.09856974150162934, 5.66351220251587861, - -6.11808325036549583, 9.57597223864975611, - -29.85316461647738606, ], - [-1.25563894938631093, 0.83496783603208580, - -1.68681681755419466, 4.22037639163487555, - 2.06963979464328407, -7.90081639272045244, - 6.54772240184247778, -9.32567356746433340, - 27.99672577169900123, ], - [0.59780304784827276, -0.39132273505648596, - 0.72822972542038944, -1.40514792078421857, - 3.76153680354067799, 2.06963979452872326, - -9.36295087277397542, 10.28426065963418168, - -28.44037087278024600, ], - [-0.28956627242951177, 0.18808607902682645, - -0.33706178120576785, 0.59181451004613739, - -1.21539550814094355, 3.65045304711559337, - 2.76313437031730214, -14.52381437699152933, - 31.73184484258205629, ], - [0.12487260336651484, -0.08078444839529625, - 0.14205776470902098, -0.23893707246388124, - 0.44651701311020470, -1.03427858098404957, - 3.74637393269219920, 5.47461453867772274, - -42.82153299233600308, ], - [-0.03266906956651982, 0.02109496564258326, - -0.03677333135146867, 0.06069262072337806, - -0.10922194347498948, 0.23304756309989128, - -0.66691519188618820, 3.48904897803242653, - 25.69261129644409536, ]] -legendre_adotdot_dict[8] = [[2520.00000000000090949, 1572.25586665412492948, - -86.56014613860952522, 17.23854645588744461, - -3.62760030437402747, -3.62760030387107690, - 17.23854645773781158, -86.56014613432716942, - 1572.25586664403408577, ], - [-3444.78427169258975482, -2060.96622834076197250, - 263.09322421714250595, -37.62226036411175301, - 8.01486084150565148, 5.01617110615279671, - -27.30193785095434578, 139.10983548470085225, - -2534.46090176581992637, ], - [1212.13971195587168950, 581.23547563107376845, - -290.55498788776435504, 94.99384966198795155, - -15.03020634190829696, -0.33203744280103820, - 16.97741290928615854, -93.59679903671849388, - 1730.46278385835080371, ], - [-405.82587761327783937, -125.21659712030918854, - 135.83301402521612999, -135.56529566622776883, - 61.17079963492420802, -8.51073841508491569, - -10.52680040431016550, 75.87967463193643880, - -1453.64344528041101512, ], - [174.66117736495289137, 47.20422794611815220, - -29.47214380806019562, 72.66995469746716196, - -98.05855537444153924, 53.43943395699633925, - -0.75784903103797774, -64.04888961811138870, - 1340.70375614392560237, ], - [-84.06306918099681980, -21.47679674537697991, - 10.94561364337234011, -15.69308523747156414, - 56.58233626219350754, -94.91565306948129432, - 57.73471849138900325, 45.52235945297945818, - -1314.97632494899630728, ], - [40.93829020959897491, 10.17119383366238239, - -4.71026735117333573, 5.52277345783835472, - -11.88813680317882415, 57.79340124653427324, - -119.51572180418217783, 55.24307204231907065, - 1338.59804199367067667, ], - [-17.70364534892667407, -4.33504594341068561, - 1.91205346445837066, -2.04327807251040916, - 3.67058914491560628, -11.02757975338868057, - 75.97315868135521555, -195.04613538682781382, - -1153.56235417437846991, ], - [4.63768430536677023, 1.12790408487965221, - -0.48636016458222553, 0.49879506714018795, - -0.83408705964564955, 2.16460267487280067, - -9.82152744934405852, 123.49702856379471427, - 474.62257752963921575, ]] -legendre_afinal_dict[8] = [1.00000000001314504, -1.61270748589470259, - 1.10338136852272584, -0.93102825702025305, - 0.86638605190870366, -0.86638605191483098, - 0.93102825702786762, -1.10338136853157120, - 1.61270748588940305, ] +legendre_tau_dict[8] = [ + 0.00000000000000000, + 0.01985507175123186, + 0.10166676129318630, + 0.23723379504185715, + 0.40828267875199609, + 0.59171732124838583, + 0.76276620495730396, + 0.89833323870751702, + 0.98014492824851973, +] +legendre_adot_dict[8] = [ + [ + -72.00000000000004263, + -31.86270482565715412, + 9.92334055134359971, + -5.93566167153326774, + 4.77763876751639316, + -4.77763876749992278, + 5.93566167152931712, + -9.92334055137200011, + 31.86270482530939319, + ], + [ + 79.61124909724310328, + 25.69261129630993423, + -19.49249456998708752, + 10.23940120319386438, + -7.93798136828674217, + 7.81415574863481766, + -9.63317863204990488, + 16.04021892350741751, + -51.40631755760969668, + ], + [ + -9.74954002373760176, + 7.66481813729861372, + 5.47461453891830097, + -10.29567243097007001, + 6.30583618259248624, + -5.71807461469641876, + 6.78823557065591743, + -11.09128684263623477, + 35.23749930320694546, + ], + [ + 2.99348956666212596, + -2.06676630520107674, + 5.28490391970722939, + 2.76313437015332930, + -8.09856974150162934, + 5.66351220251587861, + -6.11808325036549583, + 9.57597223864975611, + -29.85316461647738606, + ], + [ + -1.25563894938631093, + 0.83496783603208580, + -1.68681681755419466, + 4.22037639163487555, + 2.06963979464328407, + -7.90081639272045244, + 6.54772240184247778, + -9.32567356746433340, + 27.99672577169900123, + ], + [ + 0.59780304784827276, + -0.39132273505648596, + 0.72822972542038944, + -1.40514792078421857, + 3.76153680354067799, + 2.06963979452872326, + -9.36295087277397542, + 10.28426065963418168, + -28.44037087278024600, + ], + [ + -0.28956627242951177, + 0.18808607902682645, + -0.33706178120576785, + 0.59181451004613739, + -1.21539550814094355, + 3.65045304711559337, + 2.76313437031730214, + -14.52381437699152933, + 31.73184484258205629, + ], + [ + 0.12487260336651484, + -0.08078444839529625, + 0.14205776470902098, + -0.23893707246388124, + 0.44651701311020470, + -1.03427858098404957, + 3.74637393269219920, + 5.47461453867772274, + -42.82153299233600308, + ], + [ + -0.03266906956651982, + 0.02109496564258326, + -0.03677333135146867, + 0.06069262072337806, + -0.10922194347498948, + 0.23304756309989128, + -0.66691519188618820, + 3.48904897803242653, + 25.69261129644409536, + ], +] +legendre_adotdot_dict[8] = [ + [ + 2520.00000000000090949, + 1572.25586665412492948, + -86.56014613860952522, + 17.23854645588744461, + -3.62760030437402747, + -3.62760030387107690, + 17.23854645773781158, + -86.56014613432716942, + 1572.25586664403408577, + ], + [ + -3444.78427169258975482, + -2060.96622834076197250, + 263.09322421714250595, + -37.62226036411175301, + 8.01486084150565148, + 5.01617110615279671, + -27.30193785095434578, + 139.10983548470085225, + -2534.46090176581992637, + ], + [ + 1212.13971195587168950, + 581.23547563107376845, + -290.55498788776435504, + 94.99384966198795155, + -15.03020634190829696, + -0.33203744280103820, + 16.97741290928615854, + -93.59679903671849388, + 1730.46278385835080371, + ], + [ + -405.82587761327783937, + -125.21659712030918854, + 135.83301402521612999, + -135.56529566622776883, + 61.17079963492420802, + -8.51073841508491569, + -10.52680040431016550, + 75.87967463193643880, + -1453.64344528041101512, + ], + [ + 174.66117736495289137, + 47.20422794611815220, + -29.47214380806019562, + 72.66995469746716196, + -98.05855537444153924, + 53.43943395699633925, + -0.75784903103797774, + -64.04888961811138870, + 1340.70375614392560237, + ], + [ + -84.06306918099681980, + -21.47679674537697991, + 10.94561364337234011, + -15.69308523747156414, + 56.58233626219350754, + -94.91565306948129432, + 57.73471849138900325, + 45.52235945297945818, + -1314.97632494899630728, + ], + [ + 40.93829020959897491, + 10.17119383366238239, + -4.71026735117333573, + 5.52277345783835472, + -11.88813680317882415, + 57.79340124653427324, + -119.51572180418217783, + 55.24307204231907065, + 1338.59804199367067667, + ], + [ + -17.70364534892667407, + -4.33504594341068561, + 1.91205346445837066, + -2.04327807251040916, + 3.67058914491560628, + -11.02757975338868057, + 75.97315868135521555, + -195.04613538682781382, + -1153.56235417437846991, + ], + [ + 4.63768430536677023, + 1.12790408487965221, + -0.48636016458222553, + 0.49879506714018795, + -0.83408705964564955, + 2.16460267487280067, + -9.82152744934405852, + 123.49702856379471427, + 474.62257752963921575, + ], +] +legendre_afinal_dict[8] = [ + 1.00000000001314504, + -1.61270748589470259, + 1.10338136852272584, + -0.93102825702025305, + 0.86638605190870366, + -0.86638605191483098, + 0.93102825702786762, + -1.10338136853157120, + 1.61270748588940305, +] -legendre_tau_dict[9] = [0.00000000000000000, 0.01591988024618697, - 0.08198444633668106, 0.19331428364971090, - 0.33787328829809649, 0.49999999999998818, - 0.66212671170135462, 0.80668571635292641, - 0.91801555365888399, 0.98408011975617415, ] -legendre_adot_dict[9] = [[-90.00000000000000000, -39.63264119105327410, - 12.12851059859160330, -7.01511384936590332, - 5.34993851876735960, -4.92187499998496492, - 5.34993851884806304, -7.01511384918430281, - 12.12851059864581771, -39.63264119586531109, ], - [99.55598909732766799, 31.91536013986444686, - -23.85489675486203964, 12.11618628428273325, - -8.89850012739771046, 8.05731837847299914, - -8.68811464214157070, 11.34222819973757623, - -19.56194797369650473, 63.84742973644408437, ], - [-12.26675218688180102, 9.60473274422036560, - 6.64337085452452136, -12.25034031333548867, - 7.10415389171663669, -5.92064746522829122, - 6.14067981568525134, -7.85310000897673888, - 13.39356328769438598, -43.48024012045856779, ], - [3.81449796394589002, -2.62273389966038506, - 6.58610571835654568, 3.20628182148887131, - -9.22059257050451109, 5.91709260879362375, - -5.57175633530281722, 6.80325700462606520, - -11.32921505756870495, 36.36946459829309219, ], - [-1.63735741828795756, 1.08416937549847714, - -2.14973507073915204, 5.18980295717751616, - 2.23498718411912378, -8.39737137097137598, - 6.04369696171071880, -6.67786023102398563, - 10.61745319758343697, -33.38945107441553262, ], - [0.81269841269890952, -0.52963293110420040, - 0.96659804977583874, -1.79681984060012656, - 4.53051415078996200, 1.99999999999004086, - -8.87840069313420521, 7.49799173065867652, - -10.82341936169495789, 32.73901752203290272, ], - [-0.42635267436474061, 0.27563309854307844, - -0.48385422077774165, 0.81660051999053529, - -1.57372259341792353, 4.28505213279403385, - 2.23498718427579801, -11.05106266780826729, - 12.28333404764205561, -34.19798735097366915, ], - [0.21905670058542259, -0.14099568546145785, - 0.24246040539180438, -0.39069336168487667, - 0.68134087316442227, -1.41797294259394535, - 4.33017710836382896, 3.20628182069896717, - -17.67286037208154426, 38.85120281315978730, ], - [-0.09783459447325787, 0.06281779684442529, - -0.10682157859191509, 0.16806687860617453, - -0.27984054103791955, 0.52875030544621326, - -1.24331461720656522, 4.56530718293380744, - 6.64337085675415029, -53.02215506490960450, ], - [0.02605469944986686, -0.01670944769147080, - 0.02826199833055563, -0.04397109655906577, - 0.07172121380402063, -0.13034664667884382, - 0.28210669905274688, -0.81792918111806878, - 4.32121077772326245, 31.91536013826765483, ]] -legendre_adotdot_dict[9] = [[3960.00000000000636646, 2449.23244343801252398, - -134.72506496165851786, 27.59243049492033606, - -7.75422491796734903, 0.00000000015825208, - 7.75422491979861661, -27.59243048269217979, - 134.72506501639145426, -2449.23244361997058149, ], - [-5412.95014989901119407, -3208.17855536706520070, - 405.21532815706814290, -58.90585893610386847, - 15.50211814563954249, -1.05991994689429703, - -11.94607131914472120, 44.04606764111395023, - -216.54454283843188023, 3943.53308403724895470], - [1908.76955531210796835, 903.84506570014468707, - -442.97475672738892172, 141.51681733822647402, - -23.76992618115173173, 4.64481285598685645, - 6.27911597627007723, -28.68584939636843956, - 145.91599082331322279, -2678.97675480543648519], - [-647.14542122413854486, -196.98051639768942778, - 205.82487724277052621, -198.13462877746178492, - 86.35275504153446491, -14.91896698109690078, - -1.13594661549632292, 21.44316278575911383, - -119.26246439417218426, 2229.50369928581039858], - [285.03219640428966386, 75.93825793596778340, - -45.36507552646082786, 105.08180522210406593, - -134.00851970147965631, 70.00074073890996829, - -10.26244339015119067, -14.33379414972739596, - 104.46842886771196390, -2027.93288474663859233], - [-143.03492063500803511, -35.99505507929153225, - 17.46763759164375074, -23.23988396359330011, - 76.13984118543359614, -117.33333333354978834, - 69.83799490112349417, -0.81555950977733005, - -92.02320890177000479, 1954.49226417083968954, ], - [75.45565334056746565, 18.44693932638234912, - -8.09689973784180950, 8.72020106556462338, - -16.74124431754972875, 70.00074073864384161, - -127.52971877335448880, 82.02781000701139646, - 67.20025308688440191, -1970.44156610286177056, ], - [-38.88710314434736404, -9.35646156393163864, - 3.89064078190580886, -3.77926700503652313, - 5.95224352173367777, -14.91896698113937703, - 79.26456490565843183, -172.91219898934127741, - 82.67177206120041433, 2041.87964445595162033, ], - [17.39708335182820420, 4.14897602736370885, - -1.67485523073538189, 1.54156343853406241, - -2.21561079444067488, 4.64481285590496640, - -15.27519941418322347, 111.28940449399883050, - -295.38391066845593969, -1779.28066500595127764], - [-4.63689350629434749, -1.10109401989310340, - 0.43816841069640766, -0.39317887715633404, - 0.54256801829518952, -1.05991994662241940, - 3.01347881039413501, -14.46661239681800382, - 188.23261695225846779, 736.45562233795976681, ]] -legendre_afinal_dict[9] = [-0.99999999984402166, 1.61055933587154243, - -1.09549656564146680, 0.91410685277652037, - -0.83551882927246868, 0.81269841260834375, - -0.83551882923754750, 0.91410685274218284, - -1.09549656564436848, 1.61055933583909083, ] +legendre_tau_dict[9] = [ + 0.00000000000000000, + 0.01591988024618697, + 0.08198444633668106, + 0.19331428364971090, + 0.33787328829809649, + 0.49999999999998818, + 0.66212671170135462, + 0.80668571635292641, + 0.91801555365888399, + 0.98408011975617415, +] +legendre_adot_dict[9] = [ + [ + -90.00000000000000000, + -39.63264119105327410, + 12.12851059859160330, + -7.01511384936590332, + 5.34993851876735960, + -4.92187499998496492, + 5.34993851884806304, + -7.01511384918430281, + 12.12851059864581771, + -39.63264119586531109, + ], + [ + 99.55598909732766799, + 31.91536013986444686, + -23.85489675486203964, + 12.11618628428273325, + -8.89850012739771046, + 8.05731837847299914, + -8.68811464214157070, + 11.34222819973757623, + -19.56194797369650473, + 63.84742973644408437, + ], + [ + -12.26675218688180102, + 9.60473274422036560, + 6.64337085452452136, + -12.25034031333548867, + 7.10415389171663669, + -5.92064746522829122, + 6.14067981568525134, + -7.85310000897673888, + 13.39356328769438598, + -43.48024012045856779, + ], + [ + 3.81449796394589002, + -2.62273389966038506, + 6.58610571835654568, + 3.20628182148887131, + -9.22059257050451109, + 5.91709260879362375, + -5.57175633530281722, + 6.80325700462606520, + -11.32921505756870495, + 36.36946459829309219, + ], + [ + -1.63735741828795756, + 1.08416937549847714, + -2.14973507073915204, + 5.18980295717751616, + 2.23498718411912378, + -8.39737137097137598, + 6.04369696171071880, + -6.67786023102398563, + 10.61745319758343697, + -33.38945107441553262, + ], + [ + 0.81269841269890952, + -0.52963293110420040, + 0.96659804977583874, + -1.79681984060012656, + 4.53051415078996200, + 1.99999999999004086, + -8.87840069313420521, + 7.49799173065867652, + -10.82341936169495789, + 32.73901752203290272, + ], + [ + -0.42635267436474061, + 0.27563309854307844, + -0.48385422077774165, + 0.81660051999053529, + -1.57372259341792353, + 4.28505213279403385, + 2.23498718427579801, + -11.05106266780826729, + 12.28333404764205561, + -34.19798735097366915, + ], + [ + 0.21905670058542259, + -0.14099568546145785, + 0.24246040539180438, + -0.39069336168487667, + 0.68134087316442227, + -1.41797294259394535, + 4.33017710836382896, + 3.20628182069896717, + -17.67286037208154426, + 38.85120281315978730, + ], + [ + -0.09783459447325787, + 0.06281779684442529, + -0.10682157859191509, + 0.16806687860617453, + -0.27984054103791955, + 0.52875030544621326, + -1.24331461720656522, + 4.56530718293380744, + 6.64337085675415029, + -53.02215506490960450, + ], + [ + 0.02605469944986686, + -0.01670944769147080, + 0.02826199833055563, + -0.04397109655906577, + 0.07172121380402063, + -0.13034664667884382, + 0.28210669905274688, + -0.81792918111806878, + 4.32121077772326245, + 31.91536013826765483, + ], +] +legendre_adotdot_dict[9] = [ + [ + 3960.00000000000636646, + 2449.23244343801252398, + -134.72506496165851786, + 27.59243049492033606, + -7.75422491796734903, + 0.00000000015825208, + 7.75422491979861661, + -27.59243048269217979, + 134.72506501639145426, + -2449.23244361997058149, + ], + [ + -5412.95014989901119407, + -3208.17855536706520070, + 405.21532815706814290, + -58.90585893610386847, + 15.50211814563954249, + -1.05991994689429703, + -11.94607131914472120, + 44.04606764111395023, + -216.54454283843188023, + 3943.53308403724895470, + ], + [ + 1908.76955531210796835, + 903.84506570014468707, + -442.97475672738892172, + 141.51681733822647402, + -23.76992618115173173, + 4.64481285598685645, + 6.27911597627007723, + -28.68584939636843956, + 145.91599082331322279, + -2678.97675480543648519, + ], + [ + -647.14542122413854486, + -196.98051639768942778, + 205.82487724277052621, + -198.13462877746178492, + 86.35275504153446491, + -14.91896698109690078, + -1.13594661549632292, + 21.44316278575911383, + -119.26246439417218426, + 2229.50369928581039858, + ], + [ + 285.03219640428966386, + 75.93825793596778340, + -45.36507552646082786, + 105.08180522210406593, + -134.00851970147965631, + 70.00074073890996829, + -10.26244339015119067, + -14.33379414972739596, + 104.46842886771196390, + -2027.93288474663859233, + ], + [ + -143.03492063500803511, + -35.99505507929153225, + 17.46763759164375074, + -23.23988396359330011, + 76.13984118543359614, + -117.33333333354978834, + 69.83799490112349417, + -0.81555950977733005, + -92.02320890177000479, + 1954.49226417083968954, + ], + [ + 75.45565334056746565, + 18.44693932638234912, + -8.09689973784180950, + 8.72020106556462338, + -16.74124431754972875, + 70.00074073864384161, + -127.52971877335448880, + 82.02781000701139646, + 67.20025308688440191, + -1970.44156610286177056, + ], + [ + -38.88710314434736404, + -9.35646156393163864, + 3.89064078190580886, + -3.77926700503652313, + 5.95224352173367777, + -14.91896698113937703, + 79.26456490565843183, + -172.91219898934127741, + 82.67177206120041433, + 2041.87964445595162033, + ], + [ + 17.39708335182820420, + 4.14897602736370885, + -1.67485523073538189, + 1.54156343853406241, + -2.21561079444067488, + 4.64481285590496640, + -15.27519941418322347, + 111.28940449399883050, + -295.38391066845593969, + -1779.28066500595127764, + ], + [ + -4.63689350629434749, + -1.10109401989310340, + 0.43816841069640766, + -0.39317887715633404, + 0.54256801829518952, + -1.05991994662241940, + 3.01347881039413501, + -14.46661239681800382, + 188.23261695225846779, + 736.45562233795976681, + ], +] +legendre_afinal_dict[9] = [ + -0.99999999984402166, + 1.61055933587154243, + -1.09549656564146680, + 0.91410685277652037, + -0.83551882927246868, + 0.81269841260834375, + -0.83551882923754750, + 0.91410685274218284, + -1.09549656564436848, + 1.61055933583909083, +] -legendre_tau_dict[10] = [0.00000000000000000, 0.01304673574141415, - 0.06746831665550607, 0.16029521585049855, - 0.28330230293545766, 0.42556283050819915, - 0.57443716949547696, 0.71669769705075892, - 0.83970478417600680, 0.93253168331535052, - 0.98695326427132968, ] -legendre_adot_dict[10] = [[-110.00000000000000000, -48.26653972946562732, - 14.58422455235377413, -8.23538166798414295, - 6.04826207842279473, -5.26156988704238415, - 5.26156988692375194, -6.04826207854462439, - 8.23538166830557827, -14.58422454882743580, - 48.26653977557000985, ], - [121.71668148641451523, 38.83037195180988022, - -28.71203882173124100, 14.23657237574714429, - -10.06834355550596172, 8.61965936425299617, - -8.54958113447447943, 9.78275763528138498, - -13.28423132235461424, 23.48844594115318785, - -77.67431735310977103, ], - [-15.06318841233079908, 11.75963007377668035, - 7.94706076240921533, -14.45264154521517597, - 8.06821861591184231, -6.35474528458405885, - 6.05889437968213329, -6.78555270364352481, - 9.10074268431098687, -15.97775657714056585, - 52.65200477837512949, ], - [4.72579792623377593, -3.23961139769343909, - 8.02980653270048705, 3.71469204823513088, - -10.55227936000298250, 6.39426319021315681, - -5.52846235915797024, 5.90164742527308217, - -7.71035559947130622, 13.34112152034829357, - -43.65284594318689670, ], - [-2.06000933721320978, 1.35985522900652800, - -2.66061894362531248, 6.26315920858189656, - 2.46254338424009367, -9.18573497129368377, - 6.05874885367380678, -5.83716019930944618, - 7.25338273975789427, -12.22554045458789140, - 39.50979365427744483, ], - [1.04943919172252786, -0.68175463794580859, - 1.22717505143961469, -2.22249620311669460, - 5.37919464231942435, 2.04533185354943869, - -9.06690360499340997, 6.64956687773602972, - -7.45730831274040451, 11.98082234878989460, - -37.89646608805214356, ], - [-0.57596963692359993, 0.37112921771794782, - -0.64216120107171593, 1.05462287543726285, - -1.94728092377517736, 4.97623989924771859, - 2.04533185206467927, -10.08148059787618678, - 8.62518164085033590, -12.56583619935860696, - 38.20709151175913121, ], - [0.32188251103402238, -0.20645439625097245, - 0.34963785032787242, -0.54732920655391304, - 0.91207343001765540, -1.77426384051519537, - 4.90124995783930828, 2.46254338824522812, - -12.96921273733125624, 14.53652154514421113, - -40.66319448303887185, ], - [-0.17221184261765482, 0.11015691724576598, - -0.18425638727037441, 0.28097150273762012, - -0.44532931983758861, 0.78184280770199233, - -1.64764363828474392, 5.09596125379848708, - 3.71469203771722212, -21.18667167713655530, - 46.78230043223167911, ], - [0.07884777913692809, -0.05035660797934716, - 0.08363505704009452, -0.12569182927851971, - 0.19405981027277527, -0.32475123821449697, - 0.62060214289693660, -1.47672572334488761, - 5.47759069482251437, 7.94706078390704374, - -64.36127821899061985, ], - [-0.02126966545649496, 0.01357337977838241, - -0.02246445257250070, 0.03352244140928518, - -0.05111880205551231, 0.08372810675104618, - -0.15380633584957715, 0.33670472285016945, - -0.98586349356738034, 5.24605731940845121, - 38.83037193772991458, ]] -legendre_adotdot_dict[10] = [[5940.00000000000909495, 3650.60621119290635761, - -200.52466195021406747, 41.56886837981528515, - -12.91007541273756942, 3.20426898918685765, - 3.20426897979450587, -12.91007545982938609, - 41.56886820874206023, -200.52466253305647115, - 3650.60621304507822060, ], - [-8119.10522564299571968, - -4779.27030063000620430, 598.81843352328723995, - -87.59904001603354118, 24.92233012101314671, - -6.53052952505277062, -4.51486906193804316, - 20.37521927781926934, -66.55405223726302211, - 322.23777377075930417, -5872.73443966030481533], - [2867.37516521958332305, 1345.42951605546863902, - -649.94810035075261112, 204.01485853159874750, - -35.02651281911676051, 9.49687432999326120, - 0.88246687665969148, -12.51602976308367943, - 44.04307874586584148, -217.01232175581662887, - 3974.46886194086710020, ], - [-980.71186352483744031, -295.59259832645250299, - 300.63274824953089137, -281.96171854019064540, - 119.60100102986473303, -22.05312417493166777, - 4.08331288535759995, 7.85253436331038301, - -34.58596905278147915, 177.49349972571496892, - -3284.49966308706598284, ], - [438.65921908907404259, 115.67084484753382867, - -66.94251026972102636, 148.36553501226637763, - -181.80009152007932016, 91.56386361022600795, - -16.83729166214823181, -1.81163895240644024, - 27.81573489937818522, -156.65252812072441202, - 2956.06055632424750002, ], - [-225.94461629644376899, -56.25092040827590267, - 26.35878647458429214, -33.26841222999621550, - 102.11755671384275956, -148.44767755223068662, - 84.71647733172090966, -12.93062571870521538, - -19.38991195906297094, 143.16011660558126550, - -2808.05844193727216407, ], - [124.70798457908202295, 30.14434976384028175, - -12.73992408228727413, 12.92824859689875439, - -22.96770178736812795, 87.20767047396154226, - -145.95648441593758093, 92.08048065397863979, - -0.95025169407655596, -129.54125424532398370, - 2781.95187139803192622, ], - [-69.91591454459906174, -16.62021110238747212, - 6.63427221207166440, -6.03370483302470006, - 8.70100766266027392, -19.44652098139492580, - 88.95463428189493982, -171.28744490161898284, - 114.51609527899320540, 96.34429015810216868, - -2857.00992207556782887, ], - [37.47643302516524244, 8.82137958864823446, - -3.40579539515837126, 2.91455014697952919, - -3.79403081005474263, 6.97397976037029821, - -19.16245728452330610, 107.95443588945002489, - -244.46119936197482048, 119.73345306595348347, - 2997.72844436084369590, ], - [-17.17740663749788510, -4.02026384351714050, - 1.52266744980528657, -1.25933966773422412, - 1.55357726414079522, -2.60959678371050963, - 6.00481066076588021, -20.95690586320498028, - 158.71244001051152850, -431.41311106981731882, - -2633.05960863876953226, ], - [4.63622473345746045, 1.08199286224006919, - -0.40591586114592104, 0.33015461943831514, - -0.39706044187436440, 0.64079185494469826, - -1.37486858908956577, 4.15005048165886947, - -20.71483283092484839, 276.17474439959875099, - 1094.54612834228328211, ]] -legendre_afinal_dict[10] = [0.99999999956071406, -1.60899754383213178, - 1.08981601730200239, -0.90212990629010470, - 0.81429784307270081, -0.77746068027003834, - 0.77746068041548966, -0.81429784304977582, - 0.90212990635984425, -1.08981601689073226, - 1.60899754350164637, ] +legendre_tau_dict[10] = [ + 0.00000000000000000, + 0.01304673574141415, + 0.06746831665550607, + 0.16029521585049855, + 0.28330230293545766, + 0.42556283050819915, + 0.57443716949547696, + 0.71669769705075892, + 0.83970478417600680, + 0.93253168331535052, + 0.98695326427132968, +] +legendre_adot_dict[10] = [ + [ + -110.00000000000000000, + -48.26653972946562732, + 14.58422455235377413, + -8.23538166798414295, + 6.04826207842279473, + -5.26156988704238415, + 5.26156988692375194, + -6.04826207854462439, + 8.23538166830557827, + -14.58422454882743580, + 48.26653977557000985, + ], + [ + 121.71668148641451523, + 38.83037195180988022, + -28.71203882173124100, + 14.23657237574714429, + -10.06834355550596172, + 8.61965936425299617, + -8.54958113447447943, + 9.78275763528138498, + -13.28423132235461424, + 23.48844594115318785, + -77.67431735310977103, + ], + [ + -15.06318841233079908, + 11.75963007377668035, + 7.94706076240921533, + -14.45264154521517597, + 8.06821861591184231, + -6.35474528458405885, + 6.05889437968213329, + -6.78555270364352481, + 9.10074268431098687, + -15.97775657714056585, + 52.65200477837512949, + ], + [ + 4.72579792623377593, + -3.23961139769343909, + 8.02980653270048705, + 3.71469204823513088, + -10.55227936000298250, + 6.39426319021315681, + -5.52846235915797024, + 5.90164742527308217, + -7.71035559947130622, + 13.34112152034829357, + -43.65284594318689670, + ], + [ + -2.06000933721320978, + 1.35985522900652800, + -2.66061894362531248, + 6.26315920858189656, + 2.46254338424009367, + -9.18573497129368377, + 6.05874885367380678, + -5.83716019930944618, + 7.25338273975789427, + -12.22554045458789140, + 39.50979365427744483, + ], + [ + 1.04943919172252786, + -0.68175463794580859, + 1.22717505143961469, + -2.22249620311669460, + 5.37919464231942435, + 2.04533185354943869, + -9.06690360499340997, + 6.64956687773602972, + -7.45730831274040451, + 11.98082234878989460, + -37.89646608805214356, + ], + [ + -0.57596963692359993, + 0.37112921771794782, + -0.64216120107171593, + 1.05462287543726285, + -1.94728092377517736, + 4.97623989924771859, + 2.04533185206467927, + -10.08148059787618678, + 8.62518164085033590, + -12.56583619935860696, + 38.20709151175913121, + ], + [ + 0.32188251103402238, + -0.20645439625097245, + 0.34963785032787242, + -0.54732920655391304, + 0.91207343001765540, + -1.77426384051519537, + 4.90124995783930828, + 2.46254338824522812, + -12.96921273733125624, + 14.53652154514421113, + -40.66319448303887185, + ], + [ + -0.17221184261765482, + 0.11015691724576598, + -0.18425638727037441, + 0.28097150273762012, + -0.44532931983758861, + 0.78184280770199233, + -1.64764363828474392, + 5.09596125379848708, + 3.71469203771722212, + -21.18667167713655530, + 46.78230043223167911, + ], + [ + 0.07884777913692809, + -0.05035660797934716, + 0.08363505704009452, + -0.12569182927851971, + 0.19405981027277527, + -0.32475123821449697, + 0.62060214289693660, + -1.47672572334488761, + 5.47759069482251437, + 7.94706078390704374, + -64.36127821899061985, + ], + [ + -0.02126966545649496, + 0.01357337977838241, + -0.02246445257250070, + 0.03352244140928518, + -0.05111880205551231, + 0.08372810675104618, + -0.15380633584957715, + 0.33670472285016945, + -0.98586349356738034, + 5.24605731940845121, + 38.83037193772991458, + ], +] +legendre_adotdot_dict[10] = [ + [ + 5940.00000000000909495, + 3650.60621119290635761, + -200.52466195021406747, + 41.56886837981528515, + -12.91007541273756942, + 3.20426898918685765, + 3.20426897979450587, + -12.91007545982938609, + 41.56886820874206023, + -200.52466253305647115, + 3650.60621304507822060, + ], + [ + -8119.10522564299571968, + -4779.27030063000620430, + 598.81843352328723995, + -87.59904001603354118, + 24.92233012101314671, + -6.53052952505277062, + -4.51486906193804316, + 20.37521927781926934, + -66.55405223726302211, + 322.23777377075930417, + -5872.73443966030481533, + ], + [ + 2867.37516521958332305, + 1345.42951605546863902, + -649.94810035075261112, + 204.01485853159874750, + -35.02651281911676051, + 9.49687432999326120, + 0.88246687665969148, + -12.51602976308367943, + 44.04307874586584148, + -217.01232175581662887, + 3974.46886194086710020, + ], + [ + -980.71186352483744031, + -295.59259832645250299, + 300.63274824953089137, + -281.96171854019064540, + 119.60100102986473303, + -22.05312417493166777, + 4.08331288535759995, + 7.85253436331038301, + -34.58596905278147915, + 177.49349972571496892, + -3284.49966308706598284, + ], + [ + 438.65921908907404259, + 115.67084484753382867, + -66.94251026972102636, + 148.36553501226637763, + -181.80009152007932016, + 91.56386361022600795, + -16.83729166214823181, + -1.81163895240644024, + 27.81573489937818522, + -156.65252812072441202, + 2956.06055632424750002, + ], + [ + -225.94461629644376899, + -56.25092040827590267, + 26.35878647458429214, + -33.26841222999621550, + 102.11755671384275956, + -148.44767755223068662, + 84.71647733172090966, + -12.93062571870521538, + -19.38991195906297094, + 143.16011660558126550, + -2808.05844193727216407, + ], + [ + 124.70798457908202295, + 30.14434976384028175, + -12.73992408228727413, + 12.92824859689875439, + -22.96770178736812795, + 87.20767047396154226, + -145.95648441593758093, + 92.08048065397863979, + -0.95025169407655596, + -129.54125424532398370, + 2781.95187139803192622, + ], + [ + -69.91591454459906174, + -16.62021110238747212, + 6.63427221207166440, + -6.03370483302470006, + 8.70100766266027392, + -19.44652098139492580, + 88.95463428189493982, + -171.28744490161898284, + 114.51609527899320540, + 96.34429015810216868, + -2857.00992207556782887, + ], + [ + 37.47643302516524244, + 8.82137958864823446, + -3.40579539515837126, + 2.91455014697952919, + -3.79403081005474263, + 6.97397976037029821, + -19.16245728452330610, + 107.95443588945002489, + -244.46119936197482048, + 119.73345306595348347, + 2997.72844436084369590, + ], + [ + -17.17740663749788510, + -4.02026384351714050, + 1.52266744980528657, + -1.25933966773422412, + 1.55357726414079522, + -2.60959678371050963, + 6.00481066076588021, + -20.95690586320498028, + 158.71244001051152850, + -431.41311106981731882, + -2633.05960863876953226, + ], + [ + 4.63622473345746045, + 1.08199286224006919, + -0.40591586114592104, + 0.33015461943831514, + -0.39706044187436440, + 0.64079185494469826, + -1.37486858908956577, + 4.15005048165886947, + -20.71483283092484839, + 276.17474439959875099, + 1094.54612834228328211, + ], +] +legendre_afinal_dict[10] = [ + 0.99999999956071406, + -1.60899754383213178, + 1.08981601730200239, + -0.90212990629010470, + 0.81429784307270081, + -0.77746068027003834, + 0.77746068041548966, + -0.81429784304977582, + 0.90212990635984425, + -1.08981601689073226, + 1.60899754350164637, +] diff --git a/pyomo/dataportal/DataPortal.py b/pyomo/dataportal/DataPortal.py index e465d9f8f05..8eb577af013 100644 --- a/pyomo/dataportal/DataPortal.py +++ b/pyomo/dataportal/DataPortal.py @@ -39,7 +39,7 @@ class DataPortal(object): Default is :const:`None`. filename (str): A file from which data is loaded. Default is :const:`None`. - data_dict (dict): A dictionary used to initialize the data + data_dict (dict): A dictionary used to initialize the data in this object. Default is :const:`None`. """ @@ -48,16 +48,18 @@ def __init__(self, *args, **kwds): Constructor """ if len(args) > 0: - raise RuntimeError("Unexpected constructor argument for a DataPortal object") + raise RuntimeError( + "Unexpected constructor argument for a DataPortal object" + ) # Initialize this object with no data manager self._data_manager = None # Map initialization data as follows: _data[namespace][symbol] -> data - self._data={} + self._data = {} # This is the data that is imported from various sources - self._default={} + self._default = {} # Get the model for which this data is associated. self._model = kwds.pop('model', None) @@ -82,9 +84,9 @@ def connect(self, **kwds): This data manager is used to process future data imports and exports. Args: - filename (str): A filename that specifies the data source. + filename (str): A filename that specifies the data source. Default is :const:`None`. - server (str): The name of the remote server that hosts the data. + server (str): The name of the remote server that hosts the data. Default is :const:`None`. using (str): The name of the resource used to load the data. Default is :const:`None`. @@ -93,11 +95,11 @@ def connect(self, **kwds): """ if not self._data_manager is None: self._data_manager.close() - data = kwds.get('using',None) + data = kwds.get('using', None) if data is None: - data = kwds.get('filename',None) + data = kwds.get('filename', None) if data is None: - data = kwds.get('server',None) + data = kwds.get('server', None) if '.' in data: tmp = data.split(".")[-1] else: @@ -126,25 +128,25 @@ def load(self, **kwds): Other keyword arguments are passed to the :func:`connect()` method. """ - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover logger.debug("Loading data...") # # Process arguments # _model = kwds.pop('model', None) if not _model is None: - self._model=_model + self._model = _model # - # If _disconnect is True, then disconnect the data + # If _disconnect is True, then disconnect the data # manager after we load data # - _disconnect=False + _disconnect = False if self._data_manager is None: # # Start a new connection # self.connect(**kwds) - _disconnect=True + _disconnect = True elif len(kwds) > 0: # # We are continuing to store using an existing connection. @@ -160,7 +162,7 @@ def load(self, **kwds): # # Read from data manager into self._data and self._default # - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover logger.debug("Processing data ...") self._data_manager.read() status = self._data_manager.process(self._model, self._data, self._default) @@ -170,7 +172,7 @@ def load(self, **kwds): # if _disconnect: self.disconnect() - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover logger.debug("Done.") def store(self, **kwds): @@ -183,22 +185,22 @@ def store(self, **kwds): Other keyword arguments are passed to the :func:`connect()` method. """ - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover logger.debug("Storing data...") # # Process arguments # _model = kwds.pop('model', None) if not _model is None: - self._model=_model + self._model = _model # - # If _disconnect is True, then disconnect the data manager - # after we load data + # If _disconnect is True, then disconnect the data manager + # after we load data # - _disconnect=False + _disconnect = False if self._data_manager is None: self.connect(**kwds) - _disconnect=True + _disconnect = True elif len(kwds) > 0: # # Q: Should we reinitialize? The semantic difference between @@ -219,12 +221,12 @@ def store(self, **kwds): # if _disconnect: self.disconnect() - if is_debug_set(logger): #pragma:nocover + if is_debug_set(logger): # pragma:nocover logger.debug("Done.") def data(self, name=None, namespace=None): """ - Return the data associated with a symbol and namespace + Return the data associated with a symbol and namespace Args: name (str): The name of the symbol that is returned. @@ -235,11 +237,11 @@ def data(self, name=None, namespace=None): Returns: If ``name`` is :const:`None`, then the dictionary for - the namespace is returned. Otherwise, the data + the namespace is returned. Otherwise, the data associated with ``name`` in given namespace is returned. - The return value is a constant if :const:`None` if + The return value is a constant if :const:`None` if there is a single value in the symbol dictionary, and otherwise - the symbol dictionary is returned. + the symbol dictionary is returned. """ if not namespace in self._data: raise IOError("Unknown namespace '%s'" % str(namespace)) @@ -253,7 +255,7 @@ def data(self, name=None, namespace=None): def __getitem__(self, *args): """ - Return the specified data value. + Return the specified data value. If a single argument is given, then this is the symbol name:: @@ -267,7 +269,7 @@ def __getitem__(self, *args): dp[namespace, name] Args: - *args (str): A tuple of arguents. + *args (str): A tuple of arguments. Returns: If a single argument is given, then the data associated @@ -276,15 +278,17 @@ def __getitem__(self, *args): symbol in the given namespace is returned. """ if type(args[0]) is tuple or type(args[0]) is list: - assert(len(args) == 1) + assert len(args) == 1 args = args[0] if len(args) > 2: - raise IOError("Must specify data name: DataPortal[name] or Data[namespace, name]") + raise IOError( + "Must specify data name: DataPortal[name] or Data[namespace, name]" + ) elif len(args) == 2: namespace = args[0] name = args[1] else: - namespace=None + namespace = None name = args[0] ans = self._data[namespace][name] @@ -366,27 +370,31 @@ def _preprocess_options(self): """ options = self._data_manager.options # - if options.data is None and (not options.set is None or not options.param is None or not options.index is None): + if options.data is None and ( + not options.set is None + or not options.param is None + or not options.index is None + ): # # Set options.data to a list of elements of the options.set, # options.param and options.index values. # options.data = [] if not options.set is None: - assert(type(options.set) not in (list, tuple)) + assert type(options.set) not in (list, tuple) options.data.append(options.set) # # The set option should not be a list or tuple. # - #if type(options.set) in (list,tuple): + # if type(options.set) in (list,tuple): # for item in options.set: # options.data.append(item) - #else: + # else: # options.data.append(options.set) if not options.index is None: options.data.append(options.index) if not options.param is None: - if type(options.param) in (list,tuple): + if type(options.param) in (list, tuple): for item in options.param: options.data.append(item) else: @@ -418,7 +426,7 @@ def _preprocess_options(self): # try: self._model = options.data.model() - options.data = [ self._data_manager.options.data.local_name ] + options.data = [self._data_manager.options.data.local_name] except: pass @@ -434,4 +442,3 @@ def _load_data_from_model(self): self._data[name] = c.data() except: self._data[name] = c.extract_values() - diff --git a/pyomo/dataportal/TableData.py b/pyomo/dataportal/TableData.py index 306ea1716dd..1d428967449 100644 --- a/pyomo/dataportal/TableData.py +++ b/pyomo/dataportal/TableData.py @@ -25,8 +25,8 @@ def __init__(self): """ Constructor """ - self._info=None - self._data=None + self._info = None + self._data = None self.options = Bunch() self.options.ncolumns = 1 @@ -54,25 +54,25 @@ def add_options(self, **kwds): """ self.options.update(kwds) - def open(self): #pragma:nocover + def open(self): # pragma:nocover """ Open the data manager. """ pass - def read(self): #pragma:nocover + def read(self): # pragma:nocover """ Read data from the data manager. """ return False - def write(self, data): #pragma:nocover + def write(self, data): # pragma:nocover """ Write data to the data manager. """ return False - def close(self): #pragma:nocover + def close(self): # pragma:nocover """ Close the data manager. """ @@ -88,15 +88,16 @@ def process(self, model, data, default): if not self.options.namespace in data: data[self.options.namespace] = {} return _process_data( - self._info, - model, - data[self.options.namespace], - default, - self.filename, - index=self.options.index, - set=self.options.set, - param=self.options.param, - ncolumns = self.options.ncolumns) + self._info, + model, + data[self.options.namespace], + default, + self.filename, + index=self.options.index, + set=self.options.set, + param=self.options.param, + ncolumns=self.options.ncolumns, + ) def clear(self): """ @@ -117,7 +118,10 @@ def _set_data(self, headers, rows): try: header_index.append(headers.index(str(i))) except: - print("Model declaration '%s' not found in returned query columns" %str(i)) + print( + "Model declaration '%s' not found in returned query columns" + % str(i) + ) raise self.options.ncolumns = len(headers) @@ -167,7 +171,7 @@ def _set_data(self, headers, rows): msg = "Cannot specify index for data with the 'set' format: %s" raise IOError(msg % str(self.options.index)) - self._info = ["set",self.options.set,":="] + self._info = ["set", self.options.set, ":="] for row in rows: if self.options.ncolumns > 1: self._info.append(tuple(row)) @@ -176,25 +180,24 @@ def _set_data(self, headers, rows): elif self.options.format == 'set_array': if not self.options.index is None: - msg = "Cannot specify index for data with the 'set_array' " \ - 'format: %s' + msg = "Cannot specify index for data with the 'set_array' format: %s" raise IOError(msg % str(self.options.index)) - self._info = ["set",self.options.set, ":"] + self._info = ["set", self.options.set, ":"] self._info.extend(headers[1:]) self._info.append(":=") for row in rows: self._info.extend(row) elif self.options.format == 'transposed_array': - self._info = ["param",self.options.param[0],"(tr)",":"] + self._info = ["param", self.options.param[0], "(tr)", ":"] self._info.extend(headers[1:]) self._info.append(":=") for row in rows: self._info.extend(row) elif self.options.format == 'array': - self._info = ["param",self.options.param[0],":"] + self._info = ["param", self.options.param[0], ":"] self._info.extend(headers[1:]) self._info.append(":=") for row in rows: @@ -202,9 +205,9 @@ def _set_data(self, headers, rows): elif self.options.format == 'table': if self.options.index is not None: - self._info = ["param",":",self.options.index,":"] + self._info = ["param", ":", self.options.index, ":"] else: - self._info = ["param",":"] + self._info = ["param", ":"] for param in self.options.param: self._info.append(param) self._info.append(":=") @@ -227,7 +230,7 @@ def _get_table(self): if self.options.columns is None: cols = [] for i in range(self.options.set.dimen): - cols.append(self.options.set.local_name+str(i)) + cols.append(self.options.set.local_name + str(i)) tmp.append(cols) # Get rows if self.options.sort is not None: @@ -243,7 +246,7 @@ def _get_table(self): else: tmp.append([data]) elif self.options.param is not None: - if type(self.options.param) in (list,tuple): + if type(self.options.param) in (list, tuple): _param = self.options.param else: _param = [self.options.param] @@ -251,7 +254,7 @@ def _get_table(self): for index in _param[0]: if index is None: row = [] - elif type(index) in (list,tuple): + elif type(index) in (list, tuple): row = list(index) else: row = [index] @@ -261,9 +264,9 @@ def _get_table(self): # Create column names if self.options.columns is None: cols = [] - for i in range(len(tmp[0])-len(_param)): - cols.append('I'+str(i)) + for i in range(len(tmp[0]) - len(_param)): + cols.append('I' + str(i)) for param in _param: cols.append(param) - tmp.insert(0,cols) + tmp.insert(0, cols) return tmp diff --git a/pyomo/dataportal/factory.py b/pyomo/dataportal/factory.py index 2c35b450c51..f1c18dc05c9 100644 --- a/pyomo/dataportal/factory.py +++ b/pyomo/dataportal/factory.py @@ -9,20 +9,16 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = [ - 'DataManagerFactory', - 'UnknownDataManager' - ] +__all__ = ['DataManagerFactory', 'UnknownDataManager'] import logging from pyomo.common import Factory -from pyomo.common.plugin import PluginError +from pyomo.common.plugin_base import PluginError logger = logging.getLogger('pyomo.core') class UnknownDataManager(object): - def __init__(self, *args, **kwds): # # The 'type' is the class type of the solver instance @@ -34,21 +30,23 @@ def available(self): class DataManagerFactoryClass(Factory): - # # This is the custom __call__ method # def __call__(self, _name=None, args=[], **kwds): if _name is None: return self - _name=str(_name) + _name = str(_name) if _name in self._cls: dm = self._cls[_name](**kwds) if not dm.available(): - raise PluginError("Cannot process data in %s files. The following python packages need to be installed: %s" % (_name, dm.requirements())) + raise PluginError( + "Cannot process data in %s files. The following python packages need to be installed: %s" + % (_name, dm.requirements()) + ) else: dm = UnknownDataManager(type=_name) return dm -DataManagerFactory = DataManagerFactoryClass('data file') +DataManagerFactory = DataManagerFactoryClass('data file') diff --git a/pyomo/dataportal/parse_datacmds.py b/pyomo/dataportal/parse_datacmds.py index fc098c846a4..be363fdb64b 100644 --- a/pyomo/dataportal/parse_datacmds.py +++ b/pyomo/dataportal/parse_datacmds.py @@ -33,20 +33,18 @@ _parse_info = None -states = ( - ('data','inclusive'), -) +states = (('data', 'inclusive'),) reserved = { - 'data' : 'DATA', - 'set' : 'SET', - 'param' : 'PARAM', - 'end' : 'END', - 'store' : 'STORE', - 'load' : 'LOAD', - 'table' : 'TABLE', - 'include' : 'INCLUDE', - 'namespace' : 'NAMESPACE', + 'data': 'DATA', + 'set': 'SET', + 'param': 'PARAM', + 'end': 'END', + 'store': 'STORE', + 'load': 'LOAD', + 'table': 'TABLE', + 'include': 'INCLUDE', + 'namespace': 'NAMESPACE', } # Token names @@ -70,24 +68,25 @@ "TR", "ASTERISK", "NUM_VAL", - #"NONWORD", + # "NONWORD", ] + list(reserved.values()) # Ignore space and tab t_ignore = " \t\r" # Regular expression rules -t_COMMA = r"," -t_LBRACKET = r"\[" -t_RBRACKET = r"\]" -t_LBRACE = r"\{" -t_RBRACE = r"\}" -t_COLON = r":" -t_EQ = r"=" -t_TR = r"\(tr\)" -t_LPAREN = r"\(" -t_RPAREN = r"\)" -t_ASTERISK = r"\*" +t_COMMA = r"," +t_LBRACKET = r"\[" +t_RBRACKET = r"\]" +t_LBRACE = r"\{" +t_RBRACE = r"\}" +t_COLON = r":" +t_EQ = r"=" +t_TR = r"\(tr\)" +t_LPAREN = r"\(" +t_RPAREN = r"\)" +t_ASTERISK = r"\*" + # # Notes on PLY tokenization @@ -97,11 +96,14 @@ def t_newline(t): r'[\n]+' t.lexer.lineno += len(t.value) - t.lexer.linepos.extend(t.lexpos+i for i,_ in enumerate(t.value)) + t.lexer.linepos.extend(t.lexpos + i for i, _ in enumerate(t.value)) + # Discard comments _re_singleline_comment = r'(?:\#[^\n]*)' _re_multiline_comment = r'(?:/\*(?:[\n]|.)*?\*/)' + + @lex.TOKEN('|'.join([_re_singleline_comment, _re_multiline_comment])) def t_COMMENT(t): # Single-line and multi-line strings @@ -113,16 +115,19 @@ def t_COMMENT(t): lastpos = t.lexpos + t.value.rfind('\n') t.lexer.linepos.extend(lastpos for i in range(nlines)) + def t_COLONEQ(t): r':=' t.lexer.begin('data') return t + def t_SEMICOLON(t): r';' t.lexer.begin('INITIAL') return t + # Numbers must be followed by a delimiter token (EOF is not a concern, # as valid DAT files always end with a ';'). @lex.TOKEN(_re_number + r'(?=[\s()\[\]{}:;,])') @@ -135,22 +140,26 @@ def t_NUM_VAL(t): t.value = _int if _num == _int else _num return t + def t_WORDWITHLBRACKET(t): r'[a-zA-Z_][a-zA-Z0-9_\.\-]*\[' return t + def t_WORD(t): r'[a-zA-Z_][a-zA-Z_0-9\.+\-]*' if t.value in reserved: - t.type = reserved[t.value] # Check for reserved words + t.type = reserved[t.value] # Check for reserved words return t + def t_STRING(t): r'[a-zA-Z0-9_\.+\-\\\/]+' # Note: RE guarantees the string has no embedded quotation characters - t.value = '"'+t.value+'"' + t.value = '"' + t.value + '"' return t + def t_data_BRACKETEDSTRING(t): r'[a-zA-Z0-9_\.+\-]*\[[a-zA-Z0-9_\.+\-\*,\s]+\]' # NO SPACES @@ -158,20 +167,28 @@ def t_data_BRACKETEDSTRING(t): # [1,*,'foo bar'] return t + _re_quoted_str = r'"(?:[^"]|"")*"' -@lex.TOKEN("|".join([_re_quoted_str, _re_quoted_str.replace('"',"'")])) + + +@lex.TOKEN("|".join([_re_quoted_str, _re_quoted_str.replace('"', "'")])) def t_QUOTEDSTRING(t): # Normalize the quotes to use '"', and replace doubled ("escaped") # quotation characters with a single character - t.value = '"' + t.value[1:-1].replace(2*t.value[0], t.value[0]) + '"' + t.value = '"' + t.value[1:-1].replace(2 * t.value[0], t.value[0]) + '"' return t -#t_NONWORD = r"[^\.A-Za-z0-9,;:=<>\*\(\)\#{}\[\] \n\t\r]+" + +# t_NONWORD = r"[^\.A-Za-z0-9,;:=<>\*\(\)\#{}\[\] \n\t\r]+" + # Error handling rule def t_error(t): - raise IOError("ERROR: Token %s Value %s Line %s Column %s" - % (t.type, t.value, t.lineno, t.lexpos)) + raise IOError( + "ERROR: Token %s Value %s Line %s Column %s" + % (t.type, t.value, t.lineno, t.lexpos) + ) + ## DEBUGGING: uncomment to get tokenization information # def _wrap(_name, _fcn): @@ -185,23 +202,26 @@ def t_error(t): # if _name.startswith('t_') and inspect.isfunction(globals()[_name]): # globals()[_name] = _wrap(_name, globals()[_name]) + def _lex_token_position(t): i = bisect.bisect_left(t.lexer.linepos, t.lexpos) if i: - return t.lexpos - t.lexer.linepos[i-1] + return t.lexpos - t.lexer.linepos[i - 1] return t.lexpos + ## ----------------------------------------------------------- ## ## Yacc grammar for data commands ## ## ----------------------------------------------------------- + def p_expr(p): '''expr : statements - | ''' + |''' if len(p) == 2: - #print "STMTS",p[1] + # print "STMTS",p[1] for stmt in p[1]: if type(stmt) is list: _parse_info[None].append(stmt) @@ -212,12 +232,13 @@ def p_expr(p): else: _parse_info[key] = stmt[key] + def p_statements(p): '''statements : statements statement - | statement - | statements NAMESPACE WORD LBRACE statements RBRACE - | NAMESPACE WORD LBRACE statements RBRACE ''' - #print "STMT X",p[1:],p[1] + | statement + | statements NAMESPACE WORD LBRACE statements RBRACE + | NAMESPACE WORD LBRACE statements RBRACE''' + # print "STMT X",p[1:],p[1] len_p = len(p) if len_p == 3: # NB: statements will never be None, but statement *could* be None @@ -232,46 +253,50 @@ def p_statements(p): elif len_p == 7: # NB: statements will never be None p[0] = p[1] - p[0].append({p[3]:p[5]}) + p[0].append({p[3]: p[5]}) else: # NB: statements will never be None - p[0] = [{p[2] : p[4]}] + p[0] = [{p[2]: p[4]}] + def p_statement(p): '''statement : SET WORD COLONEQ datastar SEMICOLON - | SET WORDWITHLBRACKET args RBRACKET COLONEQ datastar SEMICOLON - | SET WORD COLON itemstar COLONEQ datastar SEMICOLON - | PARAM items COLONEQ datastar SEMICOLON - | TABLE items COLONEQ datastar SEMICOLON - | LOAD items SEMICOLON - | STORE items SEMICOLON - | INCLUDE WORD SEMICOLON - | INCLUDE QUOTEDSTRING SEMICOLON - | DATA SEMICOLON - | END SEMICOLON + | SET WORDWITHLBRACKET args RBRACKET COLONEQ datastar SEMICOLON + | SET WORD COLON itemstar COLONEQ datastar SEMICOLON + | PARAM items COLONEQ datastar SEMICOLON + | TABLE items COLONEQ datastar SEMICOLON + | LOAD items SEMICOLON + | STORE items SEMICOLON + | INCLUDE WORD SEMICOLON + | INCLUDE QUOTEDSTRING SEMICOLON + | DATA SEMICOLON + | END SEMICOLON ''' - #print "STATEMENT",len(p), p[1:] + # print "STATEMENT",len(p), p[1:] stmt = p[1] if stmt == 'set': if p[2][-1] == '[': # Just turn off the flatten_list and see what happens - p[0] = ['set', p[2][:-1], '['] + list(flatten_tuple([p[i] for i in range(3,len(p)-1)])) + p[0] = ['set', p[2][:-1], '['] + list( + flatten_tuple([p[i] for i in range(3, len(p) - 1)]) + ) else: - p[0] = list(flatten_tuple([p[i] for i in range(1,len(p)-1)])) + p[0] = list(flatten_tuple([p[i] for i in range(1, len(p) - 1)])) elif stmt == 'param': - p[0] = list(flatten_tuple([p[i] for i in range(1,len(p)-1)])) + p[0] = list(flatten_tuple([p[i] for i in range(1, len(p) - 1)])) elif stmt == 'include': - p[0] = [p[i] for i in range(1,len(p)-1)] + p[0] = [p[i] for i in range(1, len(p) - 1)] elif stmt == 'load': - p[0] = [p[1]]+ p[2] + p[0] = [p[1]] + p[2] elif stmt == 'store': - p[0] = [p[1]]+ p[2] + p[0] = [p[1]] + p[2] elif stmt == 'table': - p[0] = [p[1]]+ [p[2]] + [p[4]] + p[0] = [p[1]] + [p[2]] + [p[4]] else: # Not necessary, but nice to document how statement could end up None p[0] = None - #print(p[0]) + # print(p[0]) + def p_datastar(p): ''' @@ -283,6 +308,7 @@ def p_datastar(p): else: p[0] = [] + def p_data(p): ''' data : data NUM_VAL @@ -316,7 +342,7 @@ def p_data(p): tmp = p[1] else: tmp = p[2] - #if type(tmp) is str and tmp[0] == '"' and tmp[-1] == '"' and len(tmp) > 2 and not ' ' in tmp: + # if type(tmp) is str and tmp[0] == '"' and tmp[-1] == '"' and len(tmp) > 2 and not ' ' in tmp: # tmp = tmp[1:-1] # Grow items list according to parsed item length @@ -329,6 +355,7 @@ def p_data(p): tmp_lst.append(tmp) p[0] = tmp_lst + def p_args(p): ''' args : arg @@ -339,6 +366,7 @@ def p_args(p): else: p[0] = [] + def p_arg(p): ''' arg : arg COMMA NUM_VAL @@ -362,7 +390,13 @@ def p_arg(p): tmp = p[1] else: tmp = p[3] - if type(tmp) is str and tmp[0] == '"' and tmp[-1] == '"' and len(tmp) > 2 and not ' ' in tmp: + if ( + type(tmp) is str + and tmp[0] == '"' + and tmp[-1] == '"' + and len(tmp) > 2 + and not ' ' in tmp + ): tmp = tmp[1:-1] # Grow items list according to parsed item length @@ -375,6 +409,7 @@ def p_arg(p): tmp_lst.append(tmp) p[0] = tmp_lst + def p_itemstar(p): ''' itemstar : items @@ -385,6 +420,7 @@ def p_itemstar(p): else: p[0] = [] + def p_items(p): ''' items : items NUM_VAL @@ -430,7 +466,13 @@ def p_items(p): tmp = p[1] else: tmp = p[2] - if type(tmp) is str and tmp[0] == '"' and tmp[-1] == '"' and len(tmp) > 2 and not ' ' in tmp: + if ( + type(tmp) is str + and tmp[0] == '"' + and tmp[-1] == '"' + and len(tmp) > 2 + and not ' ' in tmp + ): tmp = tmp[1:-1] # Grow items list according to parsed item length @@ -443,14 +485,20 @@ def p_items(p): tmp_lst.append(tmp) p[0] = tmp_lst + def p_error(p): if p is None: tmp = "Syntax error at end of file." else: - tmp = "Syntax error at token '%s' with value '%s' (line %s, column %s)"\ - % (p.type, p.value, p.lineno, _lex_token_position(p)) + tmp = "Syntax error at token '%s' with value '%s' (line %s, column %s)" % ( + p.type, + p.value, + p.lineno, + _lex_token_position(p), + ) raise IOError(tmp) + # -------------------------------------------------------------- # the DAT file lexer and yaccer only need to be # created once, so have the corresponding objects @@ -463,41 +511,43 @@ def p_error(p): dat_yaccer = None dat_yaccer_tabfile = None + # # The function that performs the parsing # def parse_data_commands(data=None, filename=None, debug=0, outputdir=None): - global dat_lexer global dat_yaccer global dat_yaccer_tabfile if outputdir is None: # Try and write this into the module source... - outputdir = os.path.dirname(getfile( currentframe() )) - _tabfile = os.path.join(outputdir, tabmodule+".py") + outputdir = os.path.dirname(getfile(currentframe())) + _tabfile = os.path.join(outputdir, tabmodule + ".py") # Ideally, we would pollute a per-user configuration directory # first -- something like ~/.pyomo. if not os.access(outputdir, os.W_OK): _file = this_file() logger = logging.getLogger('pyomo.dataportal') - if os.path.exists(_tabfile) and \ - os.path.getmtime(_file) >= os.path.getmtime(_tabfile): + if os.path.exists(_tabfile) and os.path.getmtime(_file) >= os.path.getmtime( + _tabfile + ): logger.warning( "Potentially outdated DAT Parse Table found in source " "tree (%s), but you do not have write access to that " "directory, so we cannot update it. Please notify " - "you system administrator to remove that file" - % (_tabfile,)) - if os.path.exists(_tabfile+'c') and \ - os.path.getmtime(_file) >= os.path.getmtime(_tabfile+'c'): + "you system administrator to remove that file" % (_tabfile,) + ) + if os.path.exists(_tabfile + 'c') and os.path.getmtime( + _file + ) >= os.path.getmtime(_tabfile + 'c'): logger.warning( "Potentially outdated DAT Parse Table found in source " "tree (%s), but you do not have write access to that " "directory, so we cannot update it. Please notify " - "you system administrator to remove that file" - % (_tabfile+'c',)) + "you system administrator to remove that file" % (_tabfile + 'c',) + ) # Switch the directory for the tabmodule to the current directory outputdir = os.getcwd() @@ -512,31 +562,31 @@ def parse_data_commands(data=None, filename=None, debug=0, outputdir=None): if os.path.exists(_parser_out): os.remove(_parser_out) - _tabfile = dat_yaccer_tabfile = os.path.join(outputdir, tabmodule+".py") - if debug > 0 or \ - ( os.path.exists(_tabfile) and - os.path.getmtime(__file__) >= os.path.getmtime(_tabfile) ): + _tabfile = dat_yaccer_tabfile = os.path.join(outputdir, tabmodule + ".py") + if debug > 0 or ( + os.path.exists(_tabfile) + and os.path.getmtime(__file__) >= os.path.getmtime(_tabfile) + ): # # Remove the parsetab.py* files. These apparently need to # be removed to ensure the creation of a parser.out file. # if os.path.exists(_tabfile): os.remove(_tabfile) - if os.path.exists(_tabfile+"c"): - os.remove(_tabfile+"c") + if os.path.exists(_tabfile + "c"): + os.remove(_tabfile + "c") for _mod in list(sys.modules.keys()): - if _mod == tabmodule or _mod.endswith('.'+tabmodule): + if _mod == tabmodule or _mod.endswith('.' + tabmodule): del sys.modules[_mod] dat_lexer = lex.lex() # tmpsyspath = sys.path sys.path.append(outputdir) - dat_yaccer = yacc.yacc(debug=debug, - tabmodule=tabmodule, - outputdir=outputdir, - optimize=True) + dat_yaccer = yacc.yacc( + debug=debug, tabmodule=tabmodule, outputdir=outputdir, optimize=True + ) sys.path = tmpsyspath # @@ -552,8 +602,10 @@ def parse_data_commands(data=None, filename=None, debug=0, outputdir=None): # if filename is not None: if data is not None: - raise ValueError("parse_data_commands: cannot specify both " - "data and filename arguments") + raise ValueError( + "parse_data_commands: cannot specify both " + "data and filename arguments" + ) with open(filename, 'r') as FILE: data = FILE.read() @@ -563,5 +615,6 @@ def parse_data_commands(data=None, filename=None, debug=0, outputdir=None): dat_yaccer.parse(data, lexer=dat_lexer, debug=debug) return _parse_info + if __name__ == '__main__': parse_data_commands(filename=sys.argv[1], debug=100) diff --git a/pyomo/dataportal/plugins/__init__.py b/pyomo/dataportal/plugins/__init__.py index 1d4db8e18cb..e861233dc01 100644 --- a/pyomo/dataportal/plugins/__init__.py +++ b/pyomo/dataportal/plugins/__init__.py @@ -11,6 +11,7 @@ from pyomo.common.dependencies import pyutilib, pyutilib_available + def load(): import pyomo.dataportal.plugins.csv_table import pyomo.dataportal.plugins.datacommands @@ -18,6 +19,6 @@ def load(): import pyomo.dataportal.plugins.json_dict import pyomo.dataportal.plugins.text import pyomo.dataportal.plugins.xml_table + if pyutilib_available: import pyomo.dataportal.plugins.sheet - diff --git a/pyomo/dataportal/plugins/csv_table.py b/pyomo/dataportal/plugins/csv_table.py index c49a82aff40..6563a89df10 100644 --- a/pyomo/dataportal/plugins/csv_table.py +++ b/pyomo/dataportal/plugins/csv_table.py @@ -18,12 +18,11 @@ @DataManagerFactory.register("csv", "CSV file interface") class CSVTable(TableData): - def __init__(self): TableData.__init__(self) def open(self): - if self.filename is None: #pragma:nocover + if self.filename is None: # pragma:nocover raise IOError("No filename specified") def close(self): @@ -31,10 +30,11 @@ def close(self): def read(self): from pyomo.core.base.param import Param - if not os.path.exists(self.filename): #pragma:nocover + + if not os.path.exists(self.filename): # pragma:nocover raise IOError("Cannot find file '%s'" % self.filename) self.FILE = open(self.filename, 'r') - tmp=[] + tmp = [] for tokens in csv.reader(self.FILE): if tokens != ['']: tmp.append(tokens) @@ -50,11 +50,19 @@ def read(self): if isinstance(p, Param): self.options.model = p.model() p = p.local_name - self._info = ["param",p,":=",tmp[0][0]] + self._info = ["param", p, ":=", tmp[0][0]] elif len(self.options.symbol_map) == 1: - self._info = ["param",self.options.symbol_map[self.options.symbol_map.keys()[0]],":=",tmp[0][0]] + self._info = [ + "param", + self.options.symbol_map[self.options.symbol_map.keys()[0]], + ":=", + tmp[0][0], + ] else: - raise IOError("Data looks like a parameter, but multiple parameter names have been specified: %s" % str(self.options.symbol_map)) + raise IOError( + "Data looks like a parameter, but multiple parameter names have been specified: %s" + % str(self.options.symbol_map) + ) else: self._set_data(tmp[0], tmp[1:]) @@ -66,4 +74,3 @@ def write(self, data): writer = csv.writer(self.FILE) writer.writerows(table) self.FILE.close() - diff --git a/pyomo/dataportal/plugins/datacommands.py b/pyomo/dataportal/plugins/datacommands.py index 6c8434540f6..068a551d8d2 100644 --- a/pyomo/dataportal/plugins/datacommands.py +++ b/pyomo/dataportal/plugins/datacommands.py @@ -18,7 +18,6 @@ @DataManagerFactory.register("dat", "Pyomo data command file interface") class PyomoDataCommands(object): - def __init__(self): self._info = [] self.options = Bunch() @@ -34,9 +33,9 @@ def add_options(self, **kwds): self.options.update(kwds) def open(self): - if self.filename is None: #pragma:nocover + if self.filename is None: # pragma:nocover raise IOError("No filename specified") - if not os.path.exists(self.filename): #pragma:nocover + if not os.path.exists(self.filename): # pragma:nocover raise IOError("Cannot find file '%s'" % self.filename) def close(self): @@ -49,7 +48,7 @@ def read(self): """ pass - def write(self, data): #pragma:nocover + def write(self, data): # pragma:nocover """ This function does nothing, because we cannot write to a *.dat file. """ diff --git a/pyomo/dataportal/plugins/db_table.py b/pyomo/dataportal/plugins/db_table.py index abeeb50c199..682b87ab13e 100644 --- a/pyomo/dataportal/plugins/db_table.py +++ b/pyomo/dataportal/plugins/db_table.py @@ -31,8 +31,8 @@ # password= # table= -class db_Table(TableData): +class db_Table(TableData): def __init__(self): TableData.__init__(self) self.using = None @@ -79,6 +79,7 @@ def read(self): tmp = [tmp] except sqlite3.OperationalError: import logging + logging.getLogger('pyomo.core').error( """Fatal error reading from an external ODBC data source. @@ -94,13 +95,15 @@ def read(self): It is possible that you have an error in your external data file, the ODBC connector for this data source is not correctly installed, or that there is a bug in the ODBC connector. -""" % (self.filename, self.options.query) ) +""" + % (self.filename, self.options.query) + ) raise for row in rows: - #print("DATA %s" % str(list(row))) # XXX - ttmp=[] + # print("DATA %s" % str(list(row))) # XXX + ttmp = [] for data in list(row): - if isinstance(data,Decimal): + if isinstance(data, Decimal): ttmp.append(float(data)) elif data is None: ttmp.append('.') @@ -112,7 +115,7 @@ def read(self): else: ttmp.append(data) tmp.append(ttmp) - #print('FINAL %s' % str(tmp)) # XXX + # print('FINAL %s' % str(tmp)) # XXX # # Process data from the table # @@ -120,15 +123,23 @@ def read(self): if not self.options.param is None: self._info = ["param", self.options.param.local_name, ":=", tmp] elif len(self.options.symbol_map) == 1: - self._info = ["param", self.options.symbol_map[self.options.symbol_map.keys()[0]], ":=", tmp] + self._info = [ + "param", + self.options.symbol_map[self.options.symbol_map.keys()[0]], + ":=", + tmp, + ] else: - raise IOError("Data looks like a scalar parameter, but multiple parameter names have been specified: %s" % str(self.options.symbol_map)) + raise IOError( + "Data looks like a scalar parameter, but multiple parameter names have been specified: %s" + % str(self.options.symbol_map) + ) elif len(tmp) == 0: raise IOError("Empty range '%s'" % self.options.range) else: - #print("_info %s" % str(self._info)) - #print("SETTING DATA %s %s" % (str(tmp[0]), str(tmp[1:]))) # XXX - #print("OPTIONS %s" % str(self.options)) + # print("_info %s" % str(self._info)) + # print("SETTING DATA %s %s" % (str(tmp[0]), str(tmp[1:]))) # XXX + # print("OPTIONS %s" % str(self.options)) self._set_data(tmp[0], tmp[1:]) def close(self): @@ -149,6 +160,7 @@ def connect(self, connection, options, kwds={}): except ImportError: return None + # # NOTE: The pyodbc interface currently doesn't work. Notably, nothing # sets the "table" or "query" options, which causes db_table.read() to @@ -156,16 +168,19 @@ def connect(self, connection, options, kwds={}): # in sheet.py # + @DataManagerFactory.register('pyodbc', "%s database interface" % 'pyodbc') class pyodbc_db_Table(db_Table): - _drivers = { 'mdb': ["Microsoft Access Driver (*.mdb)"], - 'xls': ["Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)","Microsoft Excel Driver (*.xls)"], + 'xls': [ + "Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)", + "Microsoft Excel Driver (*.xls)", + ], 'xlsx': ["Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)"], 'xlsm': ["Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)"], 'xlsb': ["Microsoft Excel Driver (*.xls, *.xlsx, *.xlsm, *.xlsb)"], - 'mysql': ["MySQL"] + 'mysql': ["MySQL"], } _drivers['access'] = _drivers['mdb'] _drivers['excel'] = _drivers['xls'] @@ -190,7 +205,13 @@ def connect(self, connection, options): else: ctype = '' extras = {} - if ctype in ['xls', 'xlsx', 'xlsm', 'xlsb', 'excel'] or '.xls' in connection or '.xlsx' in connection or '.xlsm' in connection or '.xlsb' in connection: + if ( + ctype in ['xls', 'xlsx', 'xlsm', 'xlsb', 'excel'] + or '.xls' in connection + or '.xlsx' in connection + or '.xlsm' in connection + or '.xlsb' in connection + ): extras['autocommit'] = True connection = self.create_connection_string(ctype, connection, options) @@ -220,7 +241,9 @@ def connect(self, connection, options): config = ODBCConfig() dsninfo = self.create_dsn_dict(connection, config) - dsnid = re.sub('[^A-Za-z0-9]', '', dsninfo['Database']) # Strip filenames of funny characters + dsnid = re.sub( + '[^A-Za-z0-9]', '', dsninfo['Database'] + ) # Strip filenames of funny characters dsn = 'PYOMO{0}'.format(dsnid) config.add_source(dsn, dsninfo['Driver']) @@ -235,12 +258,14 @@ def connect(self, connection, options): connstr = [] for k, v in dsninfo.items(): if ' ' in v and (v[0] != "{" or v[-1] != "}"): - connstr.append("%s={%s}" % (k.upper(),v)) + connstr.append("%s={%s}" % (k.upper(), v)) else: - connstr.append("%s=%s" % (k.upper(),v)) + connstr.append("%s=%s" % (k.upper(), v)) connstr = ";".join(connstr) - conn = db_Table.connect(self, connstr, options, extras) # Will raise its own exception on failure + conn = db_Table.connect( + self, connstr, options, extras + ) # Will raise its own exception on failure # Propagate the exception else: @@ -255,7 +280,7 @@ def create_dsn_dict(self, argstr, existing_config): argdict = {} for part in parts: if len(part) > 0 and '=' in part: - key, val = part.split('=',1) + key, val = part.split('=', 1) argdict[key.lower().strip()] = val.strip() if 'driver' in argdict: @@ -266,8 +291,13 @@ def create_dsn_dict(self, argstr, existing_config): return existing_config.source_specs[argdict['dsn']] else: import logging + logger = logging.getLogger("pyomo.core") - logger.warning("DSN with name {0} not found. Attempting to continue with options...".format(argdict['dsn'])) + logger.warning( + "DSN with name {0} not found. Attempting to continue with options...".format( + argdict['dsn'] + ) + ) if 'dbq' in argdict: # Using a file for db access. @@ -297,7 +327,11 @@ def create_dsn_dict(self, argstr, existing_config): result['Password'] = argdict.get('password', '') result['Description'] = argdict.get('description', '') else: - raise Exception("Unknown driver type '{0}' for database connection".format(result['Driver'])) + raise Exception( + "Unknown driver type '{0}' for database connection".format( + result['Driver'] + ) + ) return result @@ -315,7 +349,7 @@ def _get_driver(self, ctype): # a match in the pyodbc.drivvers() list. If a match is found, # return it. Otherwise (arbitrarily) return the first one. If # the ctype is not known, return None. - drivers = self._drivers.get(ctype,[]) + drivers = self._drivers.get(ctype, []) for driver in drivers: if driver in pyodbc.drivers(): return driver @@ -328,11 +362,12 @@ def _get_driver(self, ctype): class ODBCError(Exception): def __init__(self, value): self.parameter = value + def __repr__(self): return repr(self.parameter) -class ODBCConfig(): +class ODBCConfig: """ Encapsulates an ODBC configuration file, usually odbc.ini or .odbc.ini, as specified by IBM. ODBC config data can be loaded @@ -391,11 +426,17 @@ def load(self, filename=None, data=None): self.source_specs.update(sections) def __str__(self): - return "".format(len(self.sources), len(self.source_specs)) + return "".format( + len(self.sources), len(self.source_specs) + ) def __eq__(self, other): if isinstance(other, ODBCConfig): - return self.sources == other.sources and self.source_specs == other.source_specs and self.odbc_info == other.odbc_info + return ( + self.sources == other.sources + and self.source_specs == other.source_specs + and self.odbc_info == other.odbc_info + ) return False def odbc_repr(self): @@ -468,9 +509,13 @@ def add_source_spec(self, name, spec): """ if name is None or spec is None or len(name) == 0: - raise ODBCError("A source spec must specify both a name and a spec dictionary") + raise ODBCError( + "A source spec must specify both a name and a spec dictionary" + ) if name not in self.sources: - raise ODBCError("A source spec must have a corresponding source; call .add_source() first") + raise ODBCError( + "A source spec must have a corresponding source; call .add_source() first" + ) self.source_specs[name] = dict(spec) @@ -522,7 +567,9 @@ def _get_sections(self, data): pass elif len(line) < 2: # Not enough room for even 'k='; can't contain info - raise ODBCError("Malformed line in ODBC config (no meaningful data): " + line) + raise ODBCError( + "Malformed line in ODBC config (no meaningful data): " + line + ) elif line[0] == '[' and line[-1] == ']': # Starts a new section; '=' has no special meaning here sections[sectionKey] = sectionContents @@ -531,11 +578,15 @@ def _get_sections(self, data): else: # Not whitespace or section header; must be key=value. No duplicate '=' permitted. if '=' not in line: - raise ODBCError("Malformed line in ODBC config (no key-value mapping): " + line) + raise ODBCError( + "Malformed line in ODBC config (no key-value mapping): " + line + ) - key, value = line.split("=",1) + key, value = line.split("=", 1) if '=' in value: - raise ODBCError("Malformed line in ODBC config (too many '='): " + line) + raise ODBCError( + "Malformed line in ODBC config (too many '='): " + line + ) sectionContents[key.strip()] = value.strip() sections[sectionKey] = sectionContents @@ -548,7 +599,6 @@ def _get_sections(self, data): @DataManagerFactory.register('pypyodbc', "%s database interface" % 'pypyodbc') class pypyodbc_db_Table(pyodbc_db_Table): - def __init__(self): pyodbc_db_Table.__init__(self) self.using = 'pypyodbc' @@ -560,14 +610,13 @@ def requirements(self): return 'pypyodbc' def connect(self, connection, options): - assert(options['using'] == 'pypyodbc') + assert options['using'] == 'pypyodbc' return pyodbc_db_Table.connect(self, connection, options) @DataManagerFactory.register('sqlite3', "sqlite3 database interface") class sqlite3_db_Table(db_Table): - def __init__(self): db_Table.__init__(self) self.using = 'sqlite3' @@ -579,7 +628,7 @@ def requirements(self): return 'sqlite3' def connect(self, connection, options): - assert(options['using'] == 'sqlite3') + assert options['using'] == 'sqlite3' filename = connection if not os.path.exists(filename): @@ -593,7 +642,6 @@ def connect(self, connection, options): @DataManagerFactory.register('pymysql', "pymysql database interface") class pymysql_db_Table(db_Table): - def __init__(self): db_Table.__init__(self) self.using = 'pymysql' diff --git a/pyomo/dataportal/plugins/json_dict.py b/pyomo/dataportal/plugins/json_dict.py index 8740c38d258..e42c040ad0b 100644 --- a/pyomo/dataportal/plugins/json_dict.py +++ b/pyomo/dataportal/plugins/json_dict.py @@ -18,11 +18,11 @@ def detuplize(d, sort=False): - #print("detuplize %s" % str(d)) - if type(d) in (list,tuple,set): + # print("detuplize %s" % str(d)) + if type(d) in (list, tuple, set): ans = [] for item in d: - if type(item) in (list,tuple,set): + if type(item) in (list, tuple, set): ans.append(list(item)) else: ans.append(item) @@ -36,17 +36,18 @@ def detuplize(d, sort=False): # De-tuplize keys via list of key/value pairs # ans = [] - for k,v in d.items(): + for k, v in d.items(): if type(k) is tuple: - ans.append( {'index':list(k), 'value':v} ) + ans.append({'index': list(k), 'value': v}) else: - ans.append( {'index':k, 'value':v} ) + ans.append({'index': k, 'value': v}) if sort: - return sorted(ans, key=lambda x:x['value']) + return sorted(ans, key=lambda x: x['value']) return ans + def tuplize(d): - #print("tuplize %s" % str(d)) + # print("tuplize %s" % str(d)) if type(d) is list and len(d) > 0 and not type(d[0]) is dict: ans = [] for val in d: @@ -85,12 +86,11 @@ def tuplize(d): elif type(d) is dict: return d else: - return {None:d} + return {None: d} @DataManagerFactory.register("json", "JSON file interface") class JSONDictionary(object): - def __init__(self): self._info = {} self.options = Bunch() @@ -125,7 +125,7 @@ def read(self): if jdata is None or len(jdata) == 0: raise IOError("Empty JSON data file") self._info = {} - for k,v in jdata.items(): + for k, v in jdata.items(): self._info[k] = tuplize(v) def write(self, data): @@ -135,7 +135,7 @@ def write(self, data): with open(self.filename, 'w') as OUTPUT: jdata = {} if self.options.data is None: - for k,v in data.items(): + for k, v in data.items(): jdata[k] = detuplize(v) elif type(self.options.data) in (list, tuple): for k in self.options.data: @@ -163,7 +163,10 @@ def process(self, model, data, default): key = self.options.data self._set_data(data, self.options.namespace, key, self._info[key]) except KeyError: - raise IOError("Data value for '%s' is not available in JSON file '%s'" % (key, self.filename)) + raise IOError( + "Data value for '%s' is not available in JSON file '%s'" + % (key, self.filename) + ) def _set_data(self, data, namespace, name, value): if type(value) is dict: @@ -175,10 +178,8 @@ def clear(self): self._info = {} - @DataManagerFactory.register("yaml", "YAML file interface") class YamlDictionary(object): - def __init__(self): self._info = {} self.options = Bunch() @@ -216,7 +217,7 @@ def read(self): if jdata is None: raise IOError("Empty YAML file") self._info = {} - for k,v in jdata.items(): + for k, v in jdata.items(): self._info[k] = tuplize(v) def write(self, data): @@ -226,7 +227,7 @@ def write(self, data): with open(self.filename, 'w') as OUTPUT: jdata = {} if self.options.data is None: - for k,v in data.items(): + for k, v in data.items(): jdata[k] = detuplize(v) elif type(self.options.data) in (list, tuple): for k in self.options.data: @@ -254,7 +255,10 @@ def process(self, model, data, default): key = self.options.data self._set_data(data, self.options.namespace, key, self._info[key]) except KeyError: - raise IOError("Data value for '%s' is not available in YAML file '%s'" % (key, self.filename)) + raise IOError( + "Data value for '%s' is not available in YAML file '%s'" + % (key, self.filename) + ) def _set_data(self, data, namespace, name, value): if type(value) is dict: @@ -264,5 +268,3 @@ def _set_data(self, data, namespace, name, value): def clear(self): self._info = {} - - diff --git a/pyomo/dataportal/plugins/sheet.py b/pyomo/dataportal/plugins/sheet.py index 266206ed952..bc7e4d06952 100644 --- a/pyomo/dataportal/plugins/sheet.py +++ b/pyomo/dataportal/plugins/sheet.py @@ -12,22 +12,21 @@ import os.path from pyomo.dataportal import TableData + # from pyomo.dataportal.plugins.db_table import ( # pyodbc_available, pyodbc_db_Table, pypyodbc_available, pypyodbc_db_Table # ) from pyomo.dataportal.factory import DataManagerFactory from pyomo.common.errors import ApplicationError -from pyomo.common.dependencies import pyutilib, pyutilib_available -if pyutilib_available: - from pyutilib.excel.spreadsheet import ExcelSpreadsheet, Interfaces -else: - raise(RuntimeError('PyUtilib is required to use pyomo.dataportal.plugins.sheet.')) +from pyomo.common.dependencies import attempt_import + +spreadsheet, spreadsheet_available = attempt_import('pyutilib.excel.spreadsheet') + def _attempt_open_excel(): if _attempt_open_excel.result is None: - from pyutilib.excel.spreadsheet_win32com import ( - ExcelSpreadsheet_win32com - ) + from pyutilib.excel.spreadsheet_win32com import ExcelSpreadsheet_win32com + try: tmp = ExcelSpreadsheet_win32com() tmp._excel_dispatch() @@ -37,14 +36,14 @@ def _attempt_open_excel(): _attempt_open_excel.result = False return _attempt_open_excel.result + _attempt_open_excel.result = None class SheetTable(TableData): - def __init__(self, ctype=None): TableData.__init__(self) - self.ctype=ctype + self.ctype = ctype def open(self): if self.filename is None: @@ -56,7 +55,9 @@ def open(self): self.sheet = self._data else: try: - self.sheet = ExcelSpreadsheet(self.filename, ctype=self.ctype) + self.sheet = spreadsheet.ExcelSpreadsheet( + self.filename, ctype=self.ctype + ) except ApplicationError: raise @@ -66,15 +67,23 @@ def read(self): tmp = self.sheet.get_range(self.options.range, raw=True) if type(tmp) is float or type(tmp) is int: if not self.options.param is None: - self._info = ["param"] + list(self.options.param) + [":=",tmp] + self._info = ["param"] + list(self.options.param) + [":=", tmp] elif len(self.options.symbol_map) == 1: - self._info = ["param",self.options.symbol_map[self.options.symbol_map.keys()[0]],":=",tmp] + self._info = [ + "param", + self.options.symbol_map[self.options.symbol_map.keys()[0]], + ":=", + tmp, + ] else: - raise IOError("Data looks like a parameter, but multiple parameter names have been specified: %s" % str(self.options.symbol_map)) + raise IOError( + "Data looks like a parameter, but multiple parameter names have been specified: %s" + % str(self.options.symbol_map) + ) elif len(tmp) == 0: raise IOError("Empty range '%s'" % self.options.range) else: - if type(tmp[1]) in (list,tuple): + if type(tmp[1]) in (list, tuple): tmp_ = tmp[1:] else: tmp_ = [[x] for x in tmp[1:]] @@ -85,24 +94,23 @@ def close(self): del self.sheet - - @DataManagerFactory.register("xls", "Excel XLS file interface") class SheetTable_xls(SheetTable): - def __init__(self): - if Interfaces()['win32com'].available and _attempt_open_excel(): + if spreadsheet.Interfaces()['win32com'].available and _attempt_open_excel(): SheetTable.__init__(self, ctype='win32com') - elif Interfaces()['xlrd'].available: + elif spreadsheet.Interfaces()['xlrd'].available: SheetTable.__init__(self, ctype='xlrd') else: - raise RuntimeError("No excel interface is available; install %s" - % self.requirements()) + raise RuntimeError( + "No excel interface is available; install %s" % self.requirements() + ) def available(self): - _inter = Interfaces() - return (_inter['win32com'].available and _attempt_open_excel()) \ - or _inter['xlrd'].available + _inter = spreadsheet.Interfaces() + return (_inter['win32com'].available and _attempt_open_excel()) or _inter[ + 'xlrd' + ].available def requirements(self): return "win32com or xlrd" @@ -127,24 +135,26 @@ def requirements(self): @DataManagerFactory.register("xlsx", "Excel XLSX file interface") class SheetTable_xlsx(SheetTable): - def __init__(self): - if Interfaces()['win32com'].available and _attempt_open_excel(): + if spreadsheet.Interfaces()['win32com'].available and _attempt_open_excel(): SheetTable.__init__(self, ctype='win32com') - elif Interfaces()['openpyxl'].available: + elif spreadsheet.Interfaces()['openpyxl'].available: SheetTable.__init__(self, ctype='openpyxl') else: - raise RuntimeError("No excel interface is available; install %s" - % self.requirements()) + raise RuntimeError( + "No excel interface is available; install %s" % self.requirements() + ) def available(self): - _inter = Interfaces() - return (_inter['win32com'].available and _attempt_open_excel()) \ - or _inter['openpyxl'].available + _inter = spreadsheet.Interfaces() + return (_inter['win32com'].available and _attempt_open_excel()) or _inter[ + 'openpyxl' + ].available def requirements(self): return "win32com or openpyxl" + # # This class is OK, but the pyodbc interface doesn't work right now. # @@ -185,24 +195,26 @@ def requirements(self): @DataManagerFactory.register("xlsm", "Excel XLSM file interface") class SheetTable_xlsm(SheetTable): - def __init__(self): - if Interfaces()['win32com'].available and _attempt_open_excel(): + if spreadsheet.Interfaces()['win32com'].available and _attempt_open_excel(): SheetTable.__init__(self, ctype='win32com') - elif Interfaces()['openpyxl'].available: + elif spreadsheet.Interfaces()['openpyxl'].available: SheetTable.__init__(self, ctype='openpyxl') else: - raise RuntimeError("No excel interface is available; install %s" - % self.requirements()) + raise RuntimeError( + "No excel interface is available; install %s" % self.requirements() + ) def available(self): - _inter = Interfaces() - return (_inter['win32com'].available and _attempt_open_excel()) \ - or _inter['openpyxl'].available + _inter = spreadsheet.Interfaces() + return (_inter['win32com'].available and _attempt_open_excel()) or _inter[ + 'openpyxl' + ].available def requirements(self): return "win32com or openpyxl" + # @DataManagerFactory.register("xlsm", "Excel XLSM file interface") # class SheetTable_xlsm(pyodbc_db_base): # @@ -218,4 +230,3 @@ def requirements(self): # if not os.path.exists(self.filename): # raise IOError("Cannot find file '%s'" % self.filename) # return pyodbc_db_base.open(self) - diff --git a/pyomo/dataportal/plugins/text.py b/pyomo/dataportal/plugins/text.py index 6351b205a6f..a9b169e27bd 100644 --- a/pyomo/dataportal/plugins/text.py +++ b/pyomo/dataportal/plugins/text.py @@ -19,7 +19,6 @@ @DataManagerFactory.register("tab", "TAB file interface") class TextTable(TableData): - def __init__(self): TableData.__init__(self) self.FILE = None @@ -39,10 +38,10 @@ def read(self): raise IOError("Cannot find file '%s'" % self.filename) self.FILE = open(self.filename, 'r') try: - tmp=[] + tmp = [] for line in self.FILE: - line=line.strip() - tokens = re.split("[\t ]+",line) + line = line.strip() + tokens = re.split("[\t ]+", line) if tokens != ['']: tmp.append(tokens) if len(tmp) == 0: @@ -56,11 +55,19 @@ def read(self): if isinstance(p, Param): self.options.model = p.model() p = p.local_name - self._info = ["param",p,":=",tmp[0][0]] + self._info = ["param", p, ":=", tmp[0][0]] elif len(self.options.symbol_map) == 1: - self._info = ["param",self.options.symbol_map[self.options.symbol_map.keys()[0]],":=",tmp[0][0]] + self._info = [ + "param", + self.options.symbol_map[self.options.symbol_map.keys()[0]], + ":=", + tmp[0][0], + ] else: - raise IOError("Data looks like a parameter, but multiple parameter names have been specified: %s" % str(self.options.symbol_map)) + raise IOError( + "Data looks like a parameter, but multiple parameter names have been specified: %s" + % str(self.options.symbol_map) + ) else: self._set_data(tmp[0], tmp[1:]) except Exception: @@ -78,7 +85,6 @@ def write(self, data): self.FILE = open(self.filename, 'w') table = self._get_table() for line in table: - self.FILE.write(' '.join(map(str, line))+'\n') + self.FILE.write(' '.join(map(str, line)) + '\n') self.FILE.close() self.FILE = None - diff --git a/pyomo/dataportal/plugins/xml_table.py b/pyomo/dataportal/plugins/xml_table.py index 25dd8a7cec9..79245c6d24a 100644 --- a/pyomo/dataportal/plugins/xml_table.py +++ b/pyomo/dataportal/plugins/xml_table.py @@ -14,9 +14,11 @@ from pyomo.dataportal.factory import DataManagerFactory from pyomo.dataportal import TableData + def _xml_importer(): try: from lxml import etree + return etree except ImportError: pass @@ -24,19 +26,22 @@ def _xml_importer(): try: # Python 2.5+ import xml.etree.cElementTree as etree + return etree except ImportError: pass # Python 2.5+ import xml.etree.ElementTree as etree + return etree + ET, ET_available = attempt_import('ET', importer=_xml_importer) + @DataManagerFactory.register("xml", "XML file interface") class XMLTable(TableData): - def __init__(self): TableData.__init__(self) @@ -60,7 +65,7 @@ def read(self): parents = [parent for parent in tree.findall(self.options.query)] else: parents = [parent for parent in tree.getroot()] - tmp=[] + tmp = [] labels = [] for parent in parents: if len(tmp) == 0: @@ -73,7 +78,7 @@ def read(self): row[child.tag] = child.get('value') else: row[child.tag] = child.text - tmp.append( [row.get(label,'.') for label in labels] ) + tmp.append([row.get(label, '.') for label in labels]) # if len(tmp) == 0: raise IOError("Empty *.xml file") @@ -86,11 +91,19 @@ def read(self): if isinstance(p, Param): self.options.model = p._model() p = p.local_name - self._info = ["param",p,":=",tmp[0][0]] + self._info = ["param", p, ":=", tmp[0][0]] elif len(self.options.symbol_map) == 1: - self._info = ["param",self.options.symbol_map[self.options.symbol_map.keys()[0]],":=",tmp[0][0]] + self._info = [ + "param", + self.options.symbol_map[self.options.symbol_map.keys()[0]], + ":=", + tmp[0][0], + ] else: - raise IOError("Data looks like a parameter, but multiple parameter names have been specified: %s" % str(self.options.symbol_map)) + raise IOError( + "Data looks like a parameter, but multiple parameter names have been specified: %s" + % str(self.options.symbol_map) + ) else: self._set_data(tmp[0], tmp[1:]) @@ -110,4 +123,3 @@ def write(self, data): # tree = ET.ElementTree(root) tree.write(self.filename) - diff --git a/pyomo/dataportal/process_data.py b/pyomo/dataportal/process_data.py index 3bd8f5ae9a0..5eb15269e0c 100644 --- a/pyomo/dataportal/process_data.py +++ b/pyomo/dataportal/process_data.py @@ -18,9 +18,7 @@ from pyomo.common.collections import Bunch, OrderedDict from pyomo.common.errors import ApplicationError -from pyomo.dataportal.parse_datacmds import ( - parse_data_commands, _re_number -) +from pyomo.dataportal.parse_datacmds import parse_data_commands, _re_number from pyomo.dataportal.factory import DataManagerFactory, UnknownDataManager from pyomo.core.base.set import UnknownSetDimen from pyomo.core.base.util import flatten_tuple @@ -32,11 +30,12 @@ global Lineno global Filename -_num_pattern = re.compile("^("+_re_number+")$") -_str_false_values = {'False','false','FALSE'} -_str_bool_values = {'True','true','TRUE'} +_num_pattern = re.compile("^(" + _re_number + ")$") +_str_false_values = {'False', 'false', 'FALSE'} +_str_bool_values = {'True', 'true', 'TRUE'} _str_bool_values.update(_str_false_values) + def _guess_set_dimen(index): d = 0 # Look through the subsets of this index and get their dimen @@ -58,8 +57,9 @@ def _guess_set_dimen(index): d += sub_d return d + def _process_token(token): - #print("TOKEN:", token, type(token)) + # print("TOKEN:", token, type(token)) if type(token) is tuple: return tuple(_process_token(i) for i in token) elif type(token) in numlist: @@ -74,7 +74,7 @@ def _process_token(token): token = token[1:-1] for item in token.split(","): if item[0] in '"\'' and item[0] == item[-1]: - vals.append( item[1:-1] ) + vals.append(item[1:-1]) elif _num_pattern.match(item): _num = float(item) if '.' in item: @@ -83,7 +83,7 @@ def _process_token(token): _int = int(_num) vals.append(_int if _int == _num else _num) else: - vals.append( item ) + vals.append(item) return tuple(vals) elif _num_pattern.match(token): _num = float(token) @@ -103,16 +103,18 @@ def _preprocess_data(cmd): """ generate_debug_messages = is_debug_set(logger) if generate_debug_messages: - logger.debug("_preprocess_data(start) %s",cmd) + logger.debug("_preprocess_data(start) %s", cmd) state = 0 - newcmd=[] + newcmd = [] tpl = [] for token in cmd: if state == 0: if type(token) in numlist: newcmd.append(token) elif token == ',': - raise ValueError("Unexpected comma outside of (), {} or [] declarations") + raise ValueError( + "Unexpected comma outside of (), {} or [] declarations" + ) elif token == '(': state = 1 elif token == ')': @@ -137,7 +139,7 @@ def _preprocess_data(cmd): elif token == '(': raise ValueError("Two '('s follow each other in the data") elif token == ')': - newcmd.append( tuple(tpl) ) + newcmd.append(tuple(tpl)) tpl = [] state = 0 else: @@ -152,7 +154,9 @@ def _preprocess_data(cmd): elif token == '{': raise ValueError("Two '{'s follow each other in the data") elif token == '}': - newcmd.append( tpl ) # Keep this as a list, so we can distinguish it while parsing tables + newcmd.append( + tpl + ) # Keep this as a list, so we can distinguish it while parsing tables tpl = [] state = 0 else: @@ -167,7 +171,7 @@ def _preprocess_data(cmd): elif token == '[': raise ValueError("Two '['s follow each other in the data") elif token == ']': - newcmd.append( tuple(tpl) ) + newcmd.append(tuple(tpl)) tpl = [] state = 0 else: @@ -188,10 +192,10 @@ def _process_set(cmd, _model, _data): """ Called by _process_data() to process a set declaration. """ - #print("SET %s" % cmd) + # print("SET %s" % cmd) generate_debug_messages = is_debug_set(logger) if generate_debug_messages: - logger.debug("DEBUG: _process_set(start) %s",cmd) + logger.debug("DEBUG: _process_set(start) %s", cmd) # # Process a set # @@ -199,13 +203,15 @@ def _process_set(cmd, _model, _data): # # An indexed set # - ndx=cmd[2] + ndx = cmd[2] if len(ndx) == 0: # At this point, if the index is an empty tuple, then there is an # issue with the specification of this indexed set. - raise ValueError("Illegal indexed set specification encountered: "+str(cmd[1])) + raise ValueError( + "Illegal indexed set specification encountered: " + str(cmd[1]) + ) elif len(ndx) == 1: - ndx=ndx[0] + ndx = ndx[0] if cmd[1] not in _data: _data[cmd[1]] = {} _data[cmd[1]][ndx] = _process_set_data(cmd[4:], cmd[1], _model) @@ -216,18 +222,18 @@ def _process_set(cmd, _model, _data): # _data[cmd[1]] = {} _data[cmd[1]][None] = [] - i=3 + i = 3 while cmd[i] != ":=": i += 1 ndx1 = cmd[3:i] i += 1 - while i 1 else 0 + finaldata = _process_data_list(pname, _dim, cmd) for key in finaldata: - _data[pname][key]=finaldata[key] + _data[pname][key] = finaldata[key] else: tmp = ["param", pname, ":="] - i=1 + i = 1 while i < len(cmd): i0 = i while cmd[i] != ":=": - i=i+1 + i = i + 1 ncol = i - i0 + 1 lcmd = i while lcmd < len(cmd) and cmd[lcmd] != ":": lcmd += 1 j0 = i0 - 1 - for j in range(1,ncol): + for j in range(1, ncol): ii = 1 + i kk = ii + j while kk < lcmd: if cmd[kk] != ".": - #if 1>0: - tmp.append(copy.copy(cmd[j+j0])) + # if 1>0: + tmp.append(copy.copy(cmd[j + j0])) tmp.append(copy.copy(cmd[ii])) tmp.append(copy.copy(cmd[kk])) ii = ii + ncol kk = kk + ncol i = lcmd + 1 - _process_param(tmp, _model, _data, _default, index=index, param=param, ncolumns=ncolumns) + _process_param( + tmp, + _model, + _data, + _default, + index=index, + param=param, + ncolumns=ncolumns, + ) else: tmp = ["param", pname, ":="] if param is None: - param = [ pname ] - i=1 + param = [pname] + i = 1 if generate_debug_messages: - logger.debug("DEBUG: _process_param (singledef with :...:=) %s",cmd) + logger.debug("DEBUG: _process_param (singledef with :...:=) %s", cmd) while i < len(cmd): i0 = i - while i 0: - np = i-1 - dnp = d+np-1 + np = i - 1 + dnp = d + np - 1 ii = i + 1 - kk = i + d + j-1 + kk = i + d + j - 1 else: np = i dnp = d + np ii = i + 1 kk = np + 1 + d + nsets + j - #print cmd[ii], d, np, dnp, ii, kk - tmp = [ "param", pname, ":=" ] + # print cmd[ii], d, np, dnp, ii, kk + tmp = ["param", pname, ":="] if generate_debug_messages: logger.debug('dnp %d\nnp %d', dnp, np) while kk < Lcmd: if generate_debug_messages: - logger.debug("kk %d, ii %d",kk,ii) + logger.debug("kk %d, ii %d", kk, ii) iid = ii + d while ii < iid: tmp.append(copy.copy(cmd[ii])) ii += 1 - ii += dnp-d + ii += dnp - d tmp.append(copy.copy(cmd[kk])) kk += dnp - #print "TMP", tmp, ncolumns-nparams+1 + # print "TMP", tmp, ncolumns-nparams+1 if not ncolumns is None: - nc = ncolumns-nparams+1 + nc = ncolumns - nparams + 1 else: nc = None - _process_param(tmp, _model, _data, _default, index=index, param=param[j-jstart], ncolumns=nc) + _process_param( + tmp, + _model, + _data, + _default, + index=index, + param=param[j - jstart], + ncolumns=nc, + ) j += 1 @@ -528,12 +565,12 @@ def _apply_templates(cmd): ans = [] i = 0 while i < len(cmd): - #print i, len(cmd), cmd[i], ilist, template, ans + # print i, len(cmd), cmd[i], ilist, template, ans if type(cmd[i]) is tuple and '*' in cmd[i]: j = i - tmp=list(cmd[j]) + tmp = list(cmd[j]) nindex = len(tmp) - template=tmp + template = tmp ilist = set() for kk in range(nindex): if tmp[kk] == '*': @@ -558,13 +595,15 @@ def _process_data_list(param_name, dim, cmd): """ generate_debug_messages = is_debug_set(logger) if generate_debug_messages: - logger.debug("process_data_list %d %s",dim,cmd) + logger.debug("process_data_list %d %s", dim, cmd) - if len(cmd) % (dim+1) != 0: - msg = "Parameter '%s' defined with '%d' dimensions, " \ - "but data has '%d' values: %s." + if len(cmd) % (dim + 1) != 0: + msg = ( + "Parameter '%s' defined with '%d' dimensions, " + "but data has '%d' values: %s." + ) msg = msg % (param_name, dim, len(cmd), cmd) - if len(cmd) % (dim+1) == dim: + if len(cmd) % (dim + 1) == dim: msg += " Are you missing a value for a %d-dimensional index?" % dim elif len(cmd) % dim == 0: msg += " Are you missing the values for %d-dimensional indices?" % dim @@ -572,19 +611,19 @@ def _process_data_list(param_name, dim, cmd): msg += " Data declaration must be given in multiples of %d." % (dim + 1) raise ValueError(msg) - ans={} + ans = {} if dim == 0: - ans[None]=cmd[0] + ans[None] = cmd[0] return ans - i=0 + i = 0 while i < len(cmd): if dim > 1: - ndx = tuple(cmd[i:i+dim]) + ndx = tuple(cmd[i : i + dim]) else: ndx = cmd[i] - if cmd[i+dim] != ".": - ans[ndx] = cmd[i+dim] - i += dim+1 + if cmd[i + dim] != ".": + ans[ndx] = cmd[i + dim] + i += dim + 1 return ans @@ -624,14 +663,16 @@ def _process_include(cmd, _model, _data, _default, options=None): if key is None: _data[scenario].update(_tmpdata[key]) else: - raise IOError("Cannot define a scenario within another scenario") + raise IOError( + "Cannot define a scenario within another scenario" + ) else: _process_data(cmd, _model, _data[scenario], _default, Filename, Lineno) return True def _process_table(cmd, _model, _data, _default, options=None): - #print("TABLE %s" % cmd) + # print("TABLE %s" % cmd) # _options = {} _set = OrderedDict() @@ -644,7 +685,7 @@ def _process_table(cmd, _model, _data, _default, options=None): i = 0 while i < _cmd_len: try: - #print("CMD i=%s cmd=%s" % (i, _cmd[i:])) + # print("CMD i=%s cmd=%s" % (i, _cmd[i:])) # # This should not be error prone, so we treat errors # with a general exception @@ -663,49 +704,50 @@ def _process_table(cmd, _model, _data, _default, options=None): # Processing options # name = _cmd[i] - if i+1 == _cmd_len: + if i + 1 == _cmd_len: _param[name] = [] _labels = ['Z'] i += 1 continue - if _cmd[i+1] == '=': - if type(_cmd[i+2]) is list: - _set[name] = _cmd[i+2] + if _cmd[i + 1] == '=': + if type(_cmd[i + 2]) is list: + _set[name] = _cmd[i + 2] else: - _options[name] = _cmd[i+2] + _options[name] = _cmd[i + 2] i += 3 continue # This should be a parameter declaration - if not type(_cmd[i+1]) is tuple: + if not type(_cmd[i + 1]) is tuple: raise IOError - if i+2 < _cmd_len and _cmd[i+2] == '=': - _param[name] = (_cmd[i+1], _cmd[i+3][0]) + if i + 2 < _cmd_len and _cmd[i + 2] == '=': + _param[name] = (_cmd[i + 1], _cmd[i + 3][0]) i += 4 else: - _param[name] = _cmd[i+1] + _param[name] = _cmd[i + 1] i += 2 except: raise IOError("Error parsing table options: %s" % name) - - #print("_options %s" % _options) - #print("_set %s" % _set) - #print("_param %s" % _param) - #print("_labels %s" % _labels) -# + # print("_options %s" % _options) + # print("_set %s" % _set) + # print("_param %s" % _param) + # print("_labels %s" % _labels) + # options = Bunch(**_options) for key in options: if not key in ['columns']: raise ValueError("Unknown table option '%s'" % key) # - ncolumns=options.columns + ncolumns = options.columns if ncolumns is None: ncolumns = len(_labels) if ncolumns == 0: if not (len(_set) == 1 and len(_set[_set.keys()[0]]) == 0): - raise IOError("Must specify either the 'columns' option or column headers") + raise IOError( + "Must specify either the 'columns' option or column headers" + ) else: - ncolumns=1 + ncolumns = 1 else: ncolumns = int(ncolumns) # @@ -715,7 +757,7 @@ def _process_table(cmd, _model, _data, _default, options=None): cmap = {} if len(_labels) == 0: for i in range(ncolumns): - cmap[i+1] = i + cmap[i + 1] = i for label in _param: ndx = cmap[_param[label][1]] if ndx < 0 or ndx >= ncolumns: @@ -727,17 +769,19 @@ def _process_table(cmd, _model, _data, _default, options=None): for label in _labels: cmap[label] = i i += 1 - #print("CMAP %s" % cmap) + # print("CMAP %s" % cmap) # - #print("_param %s" % _param) - #print("_set %s" % _set) + # print("_param %s" % _param) + # print("_set %s" % _set) for sname in _set: # Creating set sname cols = _set[sname] tmp = [] for col in cols: if not col in cmap: - raise IOError("Unexpected table column '%s' for index set '%s'" % (col, sname)) + raise IOError( + "Unexpected table column '%s' for index set '%s'" % (col, sname) + ) tmp.append(cmap[col]) if not sname in cmap: cmap[sname] = tmp @@ -747,20 +791,20 @@ def _process_table(cmd, _model, _data, _default, options=None): i = 0 while i < Ldata: row = [] - #print("COLS %s NCOLS %d" % (cols, ncolumns)) + # print("COLS %s NCOLS %d" % (cols, ncolumns)) for col in cols: - #print("Y %s %s" % (i, col)) - row.append( data[i+col] ) + # print("Y %s %s" % (i, col)) + row.append(data[i + col]) if len(row) > 1: - _cmd.append( tuple(row) ) + _cmd.append(tuple(row)) else: - _cmd.append( row[0] ) + _cmd.append(row[0]) i += ncolumns - #print("_data %s" % _data) + # print("_data %s" % _data) _process_set(_cmd, _model, _data) # - #print("CMAP %s" % cmap) - _i=0 + # print("CMAP %s" % cmap) + _i = 0 if ncolumns == 0: raise IOError for vname in _param: @@ -769,55 +813,57 @@ def _process_table(cmd, _model, _data, _default, options=None): cols = _param[vname] tmp = [] for col in cols: - #print("COL %s" % col) + # print("COL %s" % col) if not col in cmap: - raise IOError("Unexpected table column '%s' for table value '%s'" % (col, vname)) + raise IOError( + "Unexpected table column '%s' for table value '%s'" % (col, vname) + ) tmp.append(cmap[col]) - #print("X %s %s" % (len(cols), tmp)) + # print("X %s %s" % (len(cols), tmp)) cols = list(flatten_tuple(tmp)) - #print("X %s" % len(cols)) - #print("VNAME %s %s" % (vname, cmap[vname])) + # print("X %s" % len(cols)) + # print("VNAME %s %s" % (vname, cmap[vname])) if vname in cmap: cols.append(cmap[vname]) else: - cols.append( ncolumns-1 - (len(_param)-_i) ) - #print("X %s" % len(cols)) + cols.append(ncolumns - 1 - (len(_param) - _i)) + # print("X %s" % len(cols)) # _cmd = ['param', vname, ':='] i = 0 while i < Ldata: - #print("HERE %s %s %s" % (i, cols, ncolumns)) + # print("HERE %s %s %s" % (i, cols, ncolumns)) for col in cols: - _cmd.append( data[i+col] ) + _cmd.append(data[i + col]) i += ncolumns - #print("HERE %s" % _cmd) - #print("_data %s" % _data) + # print("HERE %s" % _cmd) + # print("_data %s" % _data) _process_param(_cmd, _model, _data, None, ncolumns=len(cols)) def _process_load(cmd, _model, _data, _default, options=None): - #print("LOAD %s" % cmd) + # print("LOAD %s" % cmd) from pyomo.core import Set _cmd_len = len(cmd) _options = {} _options['filename'] = cmd[1] - i=2 + i = 2 while cmd[i] != ':': - _options[cmd[i]] = cmd[i+2] + _options[cmd[i]] = cmd[i + 2] i += 3 i += 1 _Index = (None, []) if type(cmd[i]) is tuple: _Index = (None, cmd[i]) i += 1 - elif i+1 < _cmd_len and cmd[i+1] == '=': - _Index = (cmd[i], cmd[i+2]) + elif i + 1 < _cmd_len and cmd[i + 1] == '=': + _Index = (cmd[i], cmd[i + 2]) i += 3 _smap = OrderedDict() - while i<_cmd_len: - if i+2 < _cmd_len and cmd[i+1] == '=': - _smap[cmd[i+2]] = cmd[i] + while i < _cmd_len: + if i + 2 < _cmd_len and cmd[i + 1] == '=': + _smap[cmd[i + 2]] = cmd[i] i += 3 else: _smap[cmd[i]] = cmd[i] @@ -828,7 +874,18 @@ def _process_load(cmd, _model, _data, _default, options=None): options = Bunch(**_options) for key in options: - if not key in ['range','filename','format','using','driver','query','table','user','password','database']: + if not key in [ + 'range', + 'filename', + 'format', + 'using', + 'driver', + 'query', + 'table', + 'user', + 'password', + 'database', + ]: raise ValueError("Unknown load option '%s'" % key) global Filename @@ -842,29 +899,31 @@ def _process_load(cmd, _model, _data, _default, options=None): if options.using is None: tmp = options.filename.split(".")[-1] data = DataManagerFactory(tmp) - if (data is None) or \ - isinstance(data, UnknownDataManager): + if (data is None) or isinstance(data, UnknownDataManager): raise ApplicationError("Data manager '%s' is not available." % tmp) else: try: data = DataManagerFactory(options.using) except: data = None - if (data is None) or \ - isinstance(data, UnknownDataManager): - raise ApplicationError("Data manager '%s' is not available." % options.using) - set_name=None + if (data is None) or isinstance(data, UnknownDataManager): + raise ApplicationError( + "Data manager '%s' is not available." % options.using + ) + set_name = None # # Create symbol map # symb_map = _smap if len(symb_map) == 0: - raise IOError("Must specify at least one set or parameter name that will be loaded") + raise IOError( + "Must specify at least one set or parameter name that will be loaded" + ) # # Process index data # - _index=None - index_name=_Index[0] + _index = None + index_name = _Index[0] _select = None # # Set the 'set name' based on the format @@ -872,16 +931,26 @@ def _process_load(cmd, _model, _data, _default, options=None): _set = None if options.format == 'set' or options.format == 'set_array': if len(_smap) != 1: - raise IOError("A single set name must be specified when using format '%s'" % options.format) - set_name=list(_smap.keys())[0] + raise IOError( + "A single set name must be specified when using format '%s'" + % options.format + ) + set_name = list(_smap.keys())[0] _set = set_name # # Set the 'param name' based on the format # _param = None - if options.format == 'transposed_array' or options.format == 'array' or options.format == 'param': + if ( + options.format == 'transposed_array' + or options.format == 'array' + or options.format == 'param' + ): if len(_smap) != 1: - raise IOError("A single parameter name must be specified when using format '%s'" % options.format) + raise IOError( + "A single parameter name must be specified when using format '%s'" + % options.format + ) if options.format in ('transposed_array', 'array', 'param', None): if _Index[0] is None: _index = None @@ -890,21 +959,43 @@ def _process_load(cmd, _model, _data, _default, options=None): _param = [] _select = list(_Index[1]) for key in _smap: - _param.append( _smap[key] ) - _select.append( key ) + _param.append(_smap[key]) + _select.append(key) if options.format in ('transposed_array', 'array'): _select = None - #print "YYY", _param, options - if not _param is None and len(_param) == 1 and not _model is None and isinstance(getattr(_model, _param[0]), Set): + # print "YYY", _param, options + if ( + not _param is None + and len(_param) == 1 + and not _model is None + and isinstance(getattr(_model, _param[0]), Set) + ): _select = None _set = _param[0] _param = None _index = None - #print "SELECT", _param, _select + # print "SELECT", _param, _select # - data.initialize(model=options.model, filename=options.filename, index=_index, index_name=index_name, param_name=symb_map, set=_set, param=_param, format=options.format, range=options.range, query=options.query, using=options.using, table=options.table, select=_select,user=options.user,password=options.password,database=options.database) + data.initialize( + model=options.model, + filename=options.filename, + index=_index, + index_name=index_name, + param_name=symb_map, + set=_set, + param=_param, + format=options.format, + range=options.range, + query=options.query, + using=options.using, + table=options.table, + select=_select, + user=options.user, + password=options.password, + database=options.database, + ) # data.open() try: @@ -916,20 +1007,31 @@ def _process_load(cmd, _model, _data, _default, options=None): data.process(_model, _data, _default) -def _process_data(cmd, _model, _data, _default, Filename_, Lineno_=0, index=None, set=None, param=None, ncolumns=None): +def _process_data( + cmd, + _model, + _data, + _default, + Filename_, + Lineno_=0, + index=None, + set=None, + param=None, + ncolumns=None, +): """ Called by import_file() to (1) preprocess data and (2) call subroutines to process different types of data """ - #print("CMD %s" %cmd) + # print("CMD %s" %cmd) global Lineno global Filename - Lineno=Lineno_ - Filename=Filename_ + Lineno = Lineno_ + Filename = Filename_ generate_debug_messages = is_debug_set(logger) if generate_debug_messages: - logger.debug("DEBUG: _process_data (start) %s",cmd) - if len(cmd) == 0: #pragma:nocover + logger.debug("DEBUG: _process_data (start) %s", cmd) + if len(cmd) == 0: # pragma:nocover raise ValueError("ERROR: Empty list passed to Model::_process_data") if cmd[0] == "data": @@ -944,7 +1046,9 @@ def _process_data(cmd, _model, _data, _default, Filename_, Lineno_=0, index=None elif cmd[0].startswith('param'): cmd = _preprocess_data(cmd) - _process_param(cmd, _model, _data, _default, index=index, param=param, ncolumns=ncolumns) + _process_param( + cmd, _model, _data, _default, index=index, param=param, ncolumns=ncolumns + ) elif cmd[0] == 'include': cmd = _preprocess_data(cmd) @@ -959,6 +1063,6 @@ def _process_data(cmd, _model, _data, _default, Filename_, Lineno_=0, index=None _process_table(cmd, _model, _data, _default) else: - raise IOError("ERROR: Unknown data command: "+" ".join(cmd)) + raise IOError("ERROR: Unknown data command: " + " ".join(cmd)) return True diff --git a/pyomo/dataportal/tests/data17.dat b/pyomo/dataportal/tests/data17.dat new file mode 100644 index 00000000000..52af6779359 --- /dev/null +++ b/pyomo/dataportal/tests/data17.dat @@ -0,0 +1,10 @@ +param A := 1; + +param B := a 1; + +param C := +a 1 +b 2 +c 3; + +table D := 1; \ No newline at end of file diff --git a/pyomo/dataportal/tests/test_dat_parser.py b/pyomo/dataportal/tests/test_dat_parser.py index 598422baff2..0663279875d 100644 --- a/pyomo/dataportal/tests/test_dat_parser.py +++ b/pyomo/dataportal/tests/test_dat_parser.py @@ -17,6 +17,7 @@ import pyomo.dataportal.parse_datacmds as parser + class TestDatParser(unittest.TestCase): def test_update_parsetable(self): parser.parse_data_commands('') diff --git a/pyomo/dataportal/tests/test_dataportal.py b/pyomo/dataportal/tests/test_dataportal.py index 20faab717e3..db9423abff6 100644 --- a/pyomo/dataportal/tests/test_dataportal.py +++ b/pyomo/dataportal/tests/test_dataportal.py @@ -16,18 +16,52 @@ import json import os from os.path import abspath, dirname, join -pyomo_dir=dirname(dirname(abspath(__file__)))+os.sep+".." + +pyomo_dir = dirname(dirname(abspath(__file__))) + os.sep + ".." import pyomo.common.unittest as unittest from pyomo.common.errors import ApplicationError from pyomo.common.tee import capture_output from pyomo.dataportal.factory import DataManagerFactory -from pyomo.environ import AbstractModel, ConcreteModel, Set, DataPortal, Param, Boolean, Any, value - -currdir=dirname(abspath(__file__))+os.sep -example_dir=pyomo_dir+os.sep+".."+os.sep+"examples"+os.sep+"pyomo"+os.sep+"tutorials"+os.sep+"tab"+os.sep -tutorial_dir=pyomo_dir+os.sep+".."+os.sep+"examples"+os.sep+"pyomo"+os.sep+"tutorials"+os.sep +from pyomo.environ import ( + AbstractModel, + ConcreteModel, + Set, + DataPortal, + Param, + Boolean, + Any, + value, +) + +currdir = dirname(abspath(__file__)) + os.sep +example_dir = ( + pyomo_dir + + os.sep + + ".." + + os.sep + + "examples" + + os.sep + + "pyomo" + + os.sep + + "tutorials" + + os.sep + + "tab" + + os.sep +) +tutorial_dir = ( + pyomo_dir + + os.sep + + ".." + + os.sep + + "examples" + + os.sep + + "pyomo" + + os.sep + + "tutorials" + + os.sep +) try: xls_interface = DataManagerFactory('xls').available() @@ -52,90 +86,284 @@ yaml_interface = False - @unittest.skipIf(not xls_interface, "No XLS interface available") class PyomoTableData(unittest.TestCase): - def setUp(self): pass - def construct(self,filename): + def construct(self, filename): pass def test_read_set(self): td = DataManagerFactory('xls') - td.initialize(filename=currdir+"Book1.xls", range="TheRange", format='set', set="X") + td.initialize( + filename=currdir + "Book1.xls", range="TheRange", format='set', set="X" + ) try: td.open() td.read() td.close() - self.assertEqual( td._info, ['set', 'X', ':=', ('A1', 2.0, 3.0, 4.0), ('A5', 6.0, 7.0, 8.0), ('A9', 10.0, 11.0, 12.0), ('A13', 14.0, 15.0, 16.0)]) + self.assertEqual( + td._info, + [ + 'set', + 'X', + ':=', + ('A1', 2.0, 3.0, 4.0), + ('A5', 6.0, 7.0, 8.0), + ('A9', 10.0, 11.0, 12.0), + ('A13', 14.0, 15.0, 16.0), + ], + ) except ApplicationError: pass def test_read_param1(self): td = DataManagerFactory('xls') - td.initialize(filename=currdir+"Book1.xls", range="TheRange", param=['bb','cc','dd']) + td.initialize( + filename=currdir + "Book1.xls", range="TheRange", param=['bb', 'cc', 'dd'] + ) try: td.open() td.read() td.close() - self.assertEqual( td._info, ['param', ':', 'bb', 'cc', 'dd', ':=', 'A1', 2.0, 3.0, 4.0, 'A5', 6.0, 7.0, 8.0, 'A9', 10.0, 11.0, 12.0, 'A13', 14.0, 15.0, 16.0]) + self.assertEqual( + td._info, + [ + 'param', + ':', + 'bb', + 'cc', + 'dd', + ':=', + 'A1', + 2.0, + 3.0, + 4.0, + 'A5', + 6.0, + 7.0, + 8.0, + 'A9', + 10.0, + 11.0, + 12.0, + 'A13', + 14.0, + 15.0, + 16.0, + ], + ) except ApplicationError: pass def test_read_param2(self): td = DataManagerFactory('xls') - td.initialize(filename=currdir+"Book1.xls",range="TheRange", index="X", param=['bb','cc','dd']) + td.initialize( + filename=currdir + "Book1.xls", + range="TheRange", + index="X", + param=['bb', 'cc', 'dd'], + ) try: td.open() td.read() td.close() - self.assertEqual( td._info, ['param', ':', 'X', ':', 'bb', 'cc', 'dd', ':=', 'A1', 2.0, 3.0, 4.0, 'A5', 6.0, 7.0, 8.0, 'A9', 10.0, 11.0, 12.0, 'A13', 14.0, 15.0, 16.0]) + self.assertEqual( + td._info, + [ + 'param', + ':', + 'X', + ':', + 'bb', + 'cc', + 'dd', + ':=', + 'A1', + 2.0, + 3.0, + 4.0, + 'A5', + 6.0, + 7.0, + 8.0, + 'A9', + 10.0, + 11.0, + 12.0, + 'A13', + 14.0, + 15.0, + 16.0, + ], + ) except ApplicationError: pass def test_read_param3(self): td = DataManagerFactory('xls') - td.initialize(filename=currdir+"Book1.xls",range="TheRange", index="X", param=["a"]) + td.initialize( + filename=currdir + "Book1.xls", range="TheRange", index="X", param=["a"] + ) try: td.open() td.read() td.close() - self.assertEqual( td._info, ['param', ':', 'X', ':', 'a', ':=', 'A1', 2.0, 3.0, 4.0, 'A5', 6.0, 7.0, 8.0, 'A9', 10.0, 11.0, 12.0, 'A13', 14.0, 15.0, 16.0]) + self.assertEqual( + td._info, + [ + 'param', + ':', + 'X', + ':', + 'a', + ':=', + 'A1', + 2.0, + 3.0, + 4.0, + 'A5', + 6.0, + 7.0, + 8.0, + 'A9', + 10.0, + 11.0, + 12.0, + 'A13', + 14.0, + 15.0, + 16.0, + ], + ) except ApplicationError: pass def test_read_param4(self): td = DataManagerFactory('xls') - td.initialize(filename=currdir+"Book1.xls", range="TheRange", index="X", param=['a','b'],) + td.initialize( + filename=currdir + "Book1.xls", + range="TheRange", + index="X", + param=['a', 'b'], + ) try: td.open() td.read() td.close() - self.assertEqual( td._info, ['param', ':', 'X', ':', 'a', 'b', ':=', 'A1', 2.0, 3.0, 4.0, 'A5', 6.0, 7.0, 8.0, 'A9', 10.0, 11.0, 12.0, 'A13', 14.0, 15.0, 16.0]) + self.assertEqual( + td._info, + [ + 'param', + ':', + 'X', + ':', + 'a', + 'b', + ':=', + 'A1', + 2.0, + 3.0, + 4.0, + 'A5', + 6.0, + 7.0, + 8.0, + 'A9', + 10.0, + 11.0, + 12.0, + 'A13', + 14.0, + 15.0, + 16.0, + ], + ) except ApplicationError: pass def test_read_array1(self): td = DataManagerFactory('xls') - td.initialize(filename=currdir+"Book1.xls",range="TheRange", param="X", format="array") + td.initialize( + filename=currdir + "Book1.xls", range="TheRange", param="X", format="array" + ) try: td.open() td.read() td.close() - self.assertEqual( td._info, ['param', 'X', ':', 'bb', 'cc', 'dd', ':=', 'A1', 2.0, 3.0, 4.0, 'A5', 6.0, 7.0, 8.0, 'A9', 10.0, 11.0, 12.0, 'A13', 14.0, 15.0, 16.0]) + self.assertEqual( + td._info, + [ + 'param', + 'X', + ':', + 'bb', + 'cc', + 'dd', + ':=', + 'A1', + 2.0, + 3.0, + 4.0, + 'A5', + 6.0, + 7.0, + 8.0, + 'A9', + 10.0, + 11.0, + 12.0, + 'A13', + 14.0, + 15.0, + 16.0, + ], + ) except ApplicationError: pass def test_read_array2(self): td = DataManagerFactory('xls') - td.initialize(filename=currdir+"Book1.xls",range="TheRange",param="X",format="transposed_array") + td.initialize( + filename=currdir + "Book1.xls", + range="TheRange", + param="X", + format="transposed_array", + ) try: td.open() td.read() td.close() - self.assertEqual( td._info, ['param', 'X', '(tr)',':', 'bb', 'cc', 'dd', ':=', 'A1', 2.0, 3.0, 4.0, 'A5', 6.0, 7.0, 8.0, 'A9', 10.0, 11.0, 12.0, 'A13', 14.0, 15.0, 16.0]) + self.assertEqual( + td._info, + [ + 'param', + 'X', + '(tr)', + ':', + 'bb', + 'cc', + 'dd', + ':=', + 'A1', + 2.0, + 3.0, + 4.0, + 'A5', + 6.0, + 7.0, + 8.0, + 'A9', + 10.0, + 11.0, + 12.0, + 'A13', + 14.0, + 15.0, + 16.0, + ], + ) except ApplicationError: pass @@ -153,13 +381,13 @@ def test_error2(self): try: td.open() self.fail("Expected IOError because no file specified") - except (IOError,AttributeError): + except (IOError, AttributeError): pass def test_error3(self): td = DataManagerFactory('txt') try: - td.initialize(filename=currdir+"display.txt") + td.initialize(filename=currdir + "display.txt") td.open() self.fail("Expected IOError because of bad file type") except (IOError, AttributeError): @@ -168,7 +396,7 @@ def test_error3(self): def test_error4(self): td = DataManagerFactory('txt') try: - td.initialize(filename=currdir+"dummy") + td.initialize(filename=currdir + "dummy") td.open() self.fail("Expected IOError because of bad file type") except (IOError, AttributeError): @@ -176,7 +404,7 @@ def test_error4(self): def test_error5(self): td = DataManagerFactory('tab') - td.initialize(filename=example_dir+"D.tab", param="D", format="foo") + td.initialize(filename=example_dir + "D.tab", param="D", format="foo") td.open() try: td.read() @@ -186,33 +414,32 @@ def test_error5(self): class PyomoDataPortal(unittest.TestCase): - def test_tableA1_1(self): # Importing a single column of data - model=AbstractModel() + model = AbstractModel() model.A = Set() - data = DataPortal(filename=os.path.abspath(example_dir+'A.tab'), set=model.A) + data = DataPortal(filename=os.path.abspath(example_dir + 'A.tab'), set=model.A) self.assertEqual(set(data['A']), set(['A1', 'A2', 'A3'])) instance = model.create_instance(data) self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) def test_tableA1_2(self): # Importing a single column of data - model=AbstractModel() + model = AbstractModel() model.A = Set() data = DataPortal() - data.load(filename=os.path.abspath(example_dir+'A.tab'), set=model.A) + data.load(filename=os.path.abspath(example_dir + 'A.tab'), set=model.A) instance = model.create_instance(data) self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) def test_tableA1_3(self): # Importing a single column of data - model=AbstractModel() + model = AbstractModel() model.A = Set() data = DataPortal() - data.connect(filename=os.path.abspath(example_dir+'B.tab')) + data.connect(filename=os.path.abspath(example_dir + 'B.tab')) # The first connection will be closed here - data.connect(filename=os.path.abspath(example_dir+'A.tab')) + data.connect(filename=os.path.abspath(example_dir + 'A.tab')) data.load(set=model.A) data.disconnect() instance = model.create_instance(data) @@ -220,150 +447,159 @@ def test_tableA1_3(self): def test_md1(self): md = DataPortal() - md.connect(filename=example_dir+"A.tab") + md.connect(filename=example_dir + "A.tab") try: md.load() self.fail("Must specify a model") except ValueError: pass - model=AbstractModel() + model = AbstractModel() try: md.load(model=model) self.fail("Expected ValueError") except ValueError: pass - model.A=Set() + model.A = Set() def test_md2(self): md = DataPortal() - model=AbstractModel() - model.A=Set() - md.load(model=model, filename=currdir+"data1.dat") - self.assertEqual(set(md['A']), set([1,2,3])) + model = AbstractModel() + model.A = Set() + md.load(model=model, filename=currdir + "data1.dat") + self.assertEqual(set(md['A']), set([1, 2, 3])) def test_md3(self): md = DataPortal() - model=AbstractModel() - model.A=Set() + model = AbstractModel() + model.A = Set() try: - md.load(model=model, filename=currdir+"data2.dat") + md.load(model=model, filename=currdir + "data2.dat") self.fail("Expected error because of extraneous text") except IOError: pass def test_md4(self): md = DataPortal() - model=AbstractModel() - model.A=Set() - model.B=Set() - model.C=Set() - md.load(model=model, filename=currdir+"data3.dat") + model = AbstractModel() + model.A = Set() + model.B = Set() + model.C = Set() + md.load(model=model, filename=currdir + "data3.dat") self.assertEqual(set(md['A']), set([])) - self.assertEqual(set(md['B']), set([(1,2)])) - self.assertEqual(set(md['C']), set([('a','b','c')])) + self.assertEqual(set(md['B']), set([(1, 2)])) + self.assertEqual(set(md['C']), set([('a', 'b', 'c')])) def test_md5(self): md = DataPortal() - model=AbstractModel() - model.A=Set() + model = AbstractModel() + model.A = Set() try: - md.load(model=model, filename=currdir+"data4.dat") - except (ValueError,IOError): + md.load(model=model, filename=currdir + "data4.dat") + except (ValueError, IOError): pass def test_md6(self): md = DataPortal() - model=AbstractModel() - model.A=Set() + model = AbstractModel() + model.A = Set() try: - md.load(model=model, filename=currdir+"data5.dat") + md.load(model=model, filename=currdir + "data5.dat") except ValueError: pass def test_md7(self): md = DataPortal() - model=AbstractModel() + model = AbstractModel() try: - md.load(model=model, filename=currdir+"data1.tab") + md.load(model=model, filename=currdir + "data1.tab") self.fail("Expected IOError") except IOError: pass def test_md8(self): md = DataPortal() - model=AbstractModel() - model.A=Set() + model = AbstractModel() + model.A = Set() try: - md.load(model=model, filename=currdir+"data6.dat") + md.load(model=model, filename=currdir + "data6.dat") self.fail("Expected IOError") except IOError: pass def test_md9(self): md = DataPortal() - model=AbstractModel() - model.A=Set() - model.B=Param(model.A) - md.load(model=model, filename=currdir+"data7.dat") - self.assertEqual(set(md['A']), set(['a','b','c'])) - self.assertEqual(md['B'], {'a':1.0, 'c':3.0}) + model = AbstractModel() + model.A = Set() + model.B = Param(model.A) + md.load(model=model, filename=currdir + "data7.dat") + self.assertEqual(set(md['A']), set(['a', 'b', 'c'])) + self.assertEqual(md['B'], {'a': 1.0, 'c': 3.0}) def test_md10(self): md = DataPortal() - model=AbstractModel() - model.A=Param(within=Boolean) - model.B=Param(within=Boolean) - model.Z=Set() - model.Y=Set(model.Z) - md.load(model=model, filename=currdir+"data8.dat") + model = AbstractModel() + model.A = Param(within=Boolean) + model.B = Param(within=Boolean) + model.Z = Set() + model.Y = Set(model.Z) + md.load(model=model, filename=currdir + "data8.dat") self.assertEqual(md['A'], False) self.assertEqual(md['B'], True) - self.assertEqual(md['Z'], ['foo[*]', 'bar[ * ]', 'bar[1,*,a,*]', 'foo-bar', 'hello-goodbye']) - self.assertEqual(md['Y']['foo-bar'], ['foo[*]', 'bar[ * ]', 'bar[1,*,a,*]', 'foo-bar', 'hello-goodbye']) + self.assertEqual( + md['Z'], ['foo[*]', 'bar[ * ]', 'bar[1,*,a,*]', 'foo-bar', 'hello-goodbye'] + ) + self.assertEqual( + md['Y']['foo-bar'], + ['foo[*]', 'bar[ * ]', 'bar[1,*,a,*]', 'foo-bar', 'hello-goodbye'], + ) instance = model.create_instance(md) def test_md11(self): cwd = os.getcwd() os.chdir(currdir) md = DataPortal() - model=AbstractModel() - model.A=Set() - model.B=Set() - model.C=Set() - model.D=Set() - md.load(model=model, filename=currdir+"data11.dat") + model = AbstractModel() + model.A = Set() + model.B = Set() + model.C = Set() + model.D = Set() + md.load(model=model, filename=currdir + "data11.dat") self.assertEqual(set(md['A']), set([])) - self.assertEqual(set(md['B']), set([(1,2)])) - self.assertEqual(set(md['C']), set([('a','b','c')])) - self.assertEqual(set(md['D']), set([1,3,5])) + self.assertEqual(set(md['B']), set([(1, 2)])) + self.assertEqual(set(md['C']), set([('a', 'b', 'c')])) + self.assertEqual(set(md['D']), set([1, 3, 5])) os.chdir(cwd) def test_md11a(self): cwd = os.getcwd() os.chdir(currdir) - model=AbstractModel() - model.a=Param() - model.b=Param() - model.c=Param() - model.d=Param() + model = AbstractModel() + model.a = Param() + model.b = Param() + model.c = Param() + model.d = Param() # Test 1 - instance = model.create_instance(currdir+'data14.dat', namespaces=['ns1','ns2']) - self.assertEqual( value(instance.a), 1) - self.assertEqual( value(instance.b), 2) - self.assertEqual( value(instance.c), 2) - self.assertEqual( value(instance.d), 2) + instance = model.create_instance( + currdir + 'data14.dat', namespaces=['ns1', 'ns2'] + ) + self.assertEqual(value(instance.a), 1) + self.assertEqual(value(instance.b), 2) + self.assertEqual(value(instance.c), 2) + self.assertEqual(value(instance.d), 2) # Test 2 - instance = model.create_instance(currdir+'data14.dat', namespaces=['ns1','ns3','nsX']) - self.assertEqual( value(instance.a), 1) - self.assertEqual( value(instance.b), 100) - self.assertEqual( value(instance.c), 3) - self.assertEqual( value(instance.d), 100) + instance = model.create_instance( + currdir + 'data14.dat', namespaces=['ns1', 'ns3', 'nsX'] + ) + self.assertEqual(value(instance.a), 1) + self.assertEqual(value(instance.b), 100) + self.assertEqual(value(instance.c), 3) + self.assertEqual(value(instance.d), 100) # Test None - instance = model.create_instance(currdir+'data14.dat') - self.assertEqual( value(instance.a), -1) - self.assertEqual( value(instance.b), -2) - self.assertEqual( value(instance.c), -3) - self.assertEqual( value(instance.d), -4) + instance = model.create_instance(currdir + 'data14.dat') + self.assertEqual(value(instance.a), -1) + self.assertEqual(value(instance.b), -2) + self.assertEqual(value(instance.c), -3) + self.assertEqual(value(instance.d), -4) # os.chdir(cwd) @@ -372,12 +608,12 @@ def test_md12(self): model.A = Set() md = DataPortal() try: - md.load(filename=example_dir+'A.tab', format='bad', set=model.A) + md.load(filename=example_dir + 'A.tab', format='bad', set=model.A) self.fail("Bad format error") except ValueError: pass try: - md.load(filename=example_dir+'A.tab') + md.load(filename=example_dir + 'A.tab') self.fail("Bad format error") except ValueError: pass @@ -385,12 +621,12 @@ def test_md12(self): @unittest.expectedFailure def test_md13(self): md = DataPortal() - model=AbstractModel() - model.p=Param() - instance = model.create_instance(currdir+"data15.dat") - md.load(model=model, filename=currdir+"data15.dat") + model = AbstractModel() + model.p = Param() + instance = model.create_instance(currdir + "data15.dat") + md.load(model=model, filename=currdir + "data15.dat") try: - md.load(model=model, filename=currdir+"data15.dat") + md.load(model=model, filename=currdir + "data15.dat") self.fail("Expected IOError") except IOError: pass @@ -406,7 +642,7 @@ def test_md14(self): self.fail("Expected ValueError") except ValueError: pass - + def test_md15(self): md = DataPortal() try: @@ -428,41 +664,55 @@ def test_md16(self): def test_md17(self): md = DataPortal() try: - md[1,2,3,4] + md[1, 2, 3, 4] self.fail("Expected IOError") except IOError: pass + def test_md18(self): + cwd = os.getcwd() + os.chdir(currdir) + + md = DataPortal() + md.load(filename=currdir + "data17.dat") + + self.assertEqual(md['A'], 1) + self.assertEqual(md['B'], {'a': 1}) + self.assertEqual(md['C'], {'a': 1, 'b': 2, 'c': 3}) + self.assertEqual(md['D'], 1) + + os.chdir(cwd) + def test_dat_type_conversion(self): model = AbstractModel() model.I = Set() model.p = Param(model.I, domain=Any) - i = model.create_instance(currdir+"data_types.dat") + i = model.create_instance(currdir + "data_types.dat") ref = { - 50: (int, 2), + 50: (int, 2), 55: (int, -2), - 51: (int, 200), + 51: (int, 200), 52: (int, -200), 53: (float, 0.02), 54: (float, -0.02), - 10: (float, 1.), - 11: (float, -1.), - 12: (float, .1), - 13: (float, -.1), + 10: (float, 1.0), + 11: (float, -1.0), + 12: (float, 0.1), + 13: (float, -0.1), 14: (float, 1.1), 15: (float, -1.1), - 20: (float, 200.), - 21: (float, -200.), - 22: (float, .02), - 23: (float, -.02), - 30: (float, 210.), - 31: (float, -210.), - 32: (float, .021), - 33: (float, -.021), - 40: (float, 10.), - 41: (float, -10.), - 42: (float, .001), - 43: (float, -.001), + 20: (float, 200.0), + 21: (float, -200.0), + 22: (float, 0.02), + 23: (float, -0.02), + 30: (float, 210.0), + 31: (float, -210.0), + 32: (float, 0.021), + 33: (float, -0.021), + 40: (float, 10.0), + 41: (float, -10.0), + 42: (float, 0.001), + 43: (float, -0.001), 1000: (str, "a_string"), 1001: (str, "a_string"), 1002: (str, 'a_string'), @@ -472,48 +722,94 @@ def test_dat_type_conversion(self): 1006: (str, '123'), } for k, v in i.p.items(): - #print(k,v, type(v)) + # print(k,v, type(v)) if k in ref: - err="index %s: (%s, %s) does not match ref %s" % ( - k, type(v), v, ref[k],) + err = "index %s: (%s, %s) does not match ref %s" % ( + k, + type(v), + v, + ref[k], + ) self.assertIs(type(v), ref[k][0], err) self.assertEqual(v, ref[k][1], err) else: n = k // 10 - err="index %s: (%s, %s) does not match ref %s" % ( - k, type(v), v, ref[n],) + err = "index %s: (%s, %s) does not match ref %s" % ( + k, + type(v), + v, + ref[n], + ) self.assertIs(type(v), ref[n][0], err) self.assertEqual(v, ref[n][1], err) def test_data_namespace(self): - model=AbstractModel() - model.a=Param() - model.b=Param() - model.c=Param() - model.d=Param() - model.A=Set() - model.e=Param(model.A) + model = AbstractModel() + model.a = Param() + model.b = Param() + model.c = Param() + model.d = Param() + model.A = Set() + model.e = Param(model.A) md = DataPortal() - md.load(model=model, filename=currdir+"data16.dat") + md.load(model=model, filename=currdir + "data16.dat") # data() - self.assertEqual(md.data(namespace='ns1'), {'a': {None: 1}, 'A': {None:[7,9,11]}, 'e': {9:90, 7:70, 11:110}}) + self.assertEqual( + md.data(namespace='ns1'), + {'a': {None: 1}, 'A': {None: [7, 9, 11]}, 'e': {9: 90, 7: 70, 11: 110}}, + ) # __getitem__ self.assertEqual(md['ns1', 'a'], 1) # namespaces() - self.assertEqual(sorted(md.namespaces(), key=lambda x: 'None' if x is None else x), [None, 'ns1', 'ns2', 'ns3', 'nsX']) + self.assertEqual( + sorted(md.namespaces(), key=lambda x: 'None' if x is None else x), + [None, 'ns1', 'ns2', 'ns3', 'nsX'], + ) # keys() - self.assertEqual(sorted(md.keys()), ['A', 'a','b','c','d','e']) + self.assertEqual(sorted(md.keys()), ['A', 'a', 'b', 'c', 'd', 'e']) self.assertEqual(sorted(md.keys('ns1')), ['A', 'a', 'e']) # values() - self.assertEqual(sorted(md.values(), key=lambda x: tuple(sorted(x)+[0]) if type(x) is list else tuple(sorted(x.values())) if not type(x) is int else (x, )), [-4, -3, -2, -1, [1,3,5], {1:10, 3:30, 5:50}]) - self.assertEqual(sorted(md.values('ns1'), key=lambda x: tuple(sorted(x)+[0]) if type(x) is list else tuple(sorted(x.values())) if not type(x) is int else (x, )), [1, [7,9,11], {7:70, 9:90, 11:110}]) + self.assertEqual( + sorted( + md.values(), + key=lambda x: tuple(sorted(x) + [0]) + if type(x) is list + else tuple(sorted(x.values())) + if not type(x) is int + else (x,), + ), + [-4, -3, -2, -1, [1, 3, 5], {1: 10, 3: 30, 5: 50}], + ) + self.assertEqual( + sorted( + md.values('ns1'), + key=lambda x: tuple(sorted(x) + [0]) + if type(x) is list + else tuple(sorted(x.values())) + if not type(x) is int + else (x,), + ), + [1, [7, 9, 11], {7: 70, 9: 90, 11: 110}], + ) # items() - self.assertEqual(sorted(md.items()), [('A', [1,3,5]), ('a',-1), ('b',-2), ('c',-3), ('d',-4), ('e', {1:10, 3:30, 5:50})]) - self.assertEqual(sorted(md.items('ns1')), [('A', [7,9,11]), ('a',1), ('e',{7:70, 9:90, 11:110})]) + self.assertEqual( + sorted(md.items()), + [ + ('A', [1, 3, 5]), + ('a', -1), + ('b', -2), + ('c', -3), + ('d', -4), + ('e', {1: 10, 3: 30, 5: 50}), + ], + ) + self.assertEqual( + sorted(md.items('ns1')), + [('A', [7, 9, 11]), ('a', 1), ('e', {7: 70, 9: 90, 11: 110})], + ) class TestOnlyTextPortal(unittest.TestCase): - suffix = '.tab' skiplist = [] @@ -522,17 +818,27 @@ def check_skiplist(self, name): self.skipTest('Skipping test %s' % name) def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'tab'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'tab' + os.sep + name + self.suffix + ) + } def create_write_options(self, name): - return {'filename':os.path.abspath(currdir+os.sep+name+self.suffix), 'sort':True} + return { + 'filename': os.path.abspath(currdir + os.sep + name + self.suffix), + 'sort': True, + } def test_empty(self): # Importing an empty file self.check_skiplist('empty') dp = DataPortal() try: - dp.load(set='A', filename=os.path.abspath(currdir+os.sep+'empty'+self.suffix)) + dp.load( + set='A', + filename=os.path.abspath(currdir + os.sep + 'empty' + self.suffix), + ) self.fail("Expected IOError") except IOError: pass @@ -557,14 +863,29 @@ def test_tableC(self): self.check_skiplist('tableC') dp = DataPortal() dp.load(set='C', **self.create_options('C')) - self.assertEqual(set(dp.data('C')), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)])) + self.assertEqual( + set(dp.data('C')), + set( + [ + ('A1', 1), + ('A1', 2), + ('A1', 3), + ('A2', 1), + ('A2', 2), + ('A2', 3), + ('A3', 1), + ('A3', 2), + ('A3', 3), + ] + ), + ) def test_tableD(self): # Importing a 2D array of data as a set. self.check_skiplist('tableD') dp = DataPortal() dp.load(set='D', format='set_array', **self.create_options('D')) - self.assertEqual(set(dp.data('D')), set([('A1',1), ('A2',2), ('A3',3)])) + self.assertEqual(set(dp.data('D')), set([('A1', 1), ('A2', 2), ('A3', 3)])) def test_tableZ(self): # Importing a single parameter @@ -578,7 +899,7 @@ def test_tableY(self): self.check_skiplist('tableY') dp = DataPortal() dp.load(param='Y', **self.create_options('Y')) - self.assertEqual(dp.data('Y'), {'A1':3.3,'A2':3.4,'A3':3.5}) + self.assertEqual(dp.data('Y'), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) def test_tableXW_1(self): # Importing a table, but only reporting the values for the non-index @@ -587,40 +908,77 @@ def test_tableXW_1(self): self.check_skiplist('tableXW_1') dp = DataPortal() dp.load(param=('X', 'W'), **self.create_options('XW')) - self.assertEqual(dp.data('X'), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(dp.data('W'), {'A1':4.3,'A2':4.4,'A3':4.5}) + self.assertEqual(dp.data('X'), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(dp.data('W'), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) def test_tableXW_3(self): - # Like test_tableXW_1, except that set A is defined in the load statment. + # Like test_tableXW_1, except that set A is defined in the load statement. self.check_skiplist('tableXW_3') dp = DataPortal() dp.load(index='A', param=('X', 'W'), **self.create_options('XW')) - self.assertEqual(set(dp.data('A')), set(['A1','A2','A3'])) - self.assertEqual(dp.data('X'), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(dp.data('W'), {'A1':4.3,'A2':4.4,'A3':4.5}) + self.assertEqual(set(dp.data('A')), set(['A1', 'A2', 'A3'])) + self.assertEqual(dp.data('X'), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(dp.data('W'), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) def test_tableXW_4(self): - # Like test_tableXW_1, except that set A is defined in the load statment and all values are mapped. + # Like test_tableXW_1, except that set A is defined in the load statement and all values are mapped. self.check_skiplist('tableXW_4') dp = DataPortal() - dp.load(select=('A', 'W', 'X'), index='B', param=('R', 'S'), **self.create_options('XW')) - self.assertEqual(set(dp.data('B')), set(['A1','A2','A3'])) - self.assertEqual(dp.data('S'), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(dp.data('R'), {'A1':4.3,'A2':4.4,'A3':4.5}) + dp.load( + select=('A', 'W', 'X'), + index='B', + param=('R', 'S'), + **self.create_options('XW') + ) + self.assertEqual(set(dp.data('B')), set(['A1', 'A2', 'A3'])) + self.assertEqual(dp.data('S'), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(dp.data('R'), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) def test_tableT(self): # Importing a 2D array of parameters that are transposed. self.check_skiplist('tableT') dp = DataPortal() dp.load(format='transposed_array', param='T', **self.create_options('T')) - self.assertEqual(dp.data('T'), {('A2', 'I1'): 2.3, ('A1', 'I2'): 1.4, ('A1', 'I3'): 1.5, ('A1', 'I4'): 1.6, ('A1', 'I1'): 1.3, ('A3', 'I4'): 3.6, ('A2', 'I4'): 2.6, ('A3', 'I1'): 3.3, ('A2', 'I3'): 2.5, ('A3', 'I2'): 3.4, ('A2', 'I2'): 2.4, ('A3', 'I3'): 3.5}) + self.assertEqual( + dp.data('T'), + { + ('A2', 'I1'): 2.3, + ('A1', 'I2'): 1.4, + ('A1', 'I3'): 1.5, + ('A1', 'I4'): 1.6, + ('A1', 'I1'): 1.3, + ('A3', 'I4'): 3.6, + ('A2', 'I4'): 2.6, + ('A3', 'I1'): 3.3, + ('A2', 'I3'): 2.5, + ('A3', 'I2'): 3.4, + ('A2', 'I2'): 2.4, + ('A3', 'I3'): 3.5, + }, + ) def test_tableU(self): # Importing a 2D array of parameters. self.check_skiplist('tableU') dp = DataPortal() dp.load(format='array', param='U', **self.create_options('U')) - self.assertEqual(dp.data('U'), {('I2', 'A1'): 1.4, ('I3', 'A1'): 1.5, ('I3', 'A2'): 2.5, ('I4', 'A1'): 1.6, ('I3', 'A3'): 3.5, ('I1', 'A2'): 2.3, ('I4', 'A3'): 3.6, ('I1', 'A3'): 3.3, ('I4', 'A2'): 2.6, ('I2', 'A3'): 3.4, ('I1', 'A1'): 1.3, ('I2', 'A2'): 2.4}) + self.assertEqual( + dp.data('U'), + { + ('I2', 'A1'): 1.4, + ('I3', 'A1'): 1.5, + ('I3', 'A2'): 2.5, + ('I4', 'A1'): 1.6, + ('I3', 'A3'): 3.5, + ('I1', 'A2'): 2.3, + ('I4', 'A3'): 3.6, + ('I1', 'A3'): 3.3, + ('I4', 'A2'): 2.6, + ('I2', 'A3'): 3.4, + ('I1', 'A1'): 1.3, + ('I2', 'A2'): 2.4, + }, + ) def test_tableS(self): # Importing a table, but only reporting the values for the non-index @@ -629,78 +987,100 @@ def test_tableS(self): self.check_skiplist('tableS') dp = DataPortal() dp.load(param='S', **self.create_options('S')) - self.assertEqual(dp.data('S'), {'A1':3.3,'A3':3.5}) + self.assertEqual(dp.data('S'), {'A1': 3.3, 'A3': 3.5}) def test_tablePO(self): # Importing a table that has multiple indexing columns self.check_skiplist('tablePO') dp = DataPortal() dp.load(index='J', param=('P', 'O'), **self.create_options('PO')) - self.assertEqual(set(dp.data('J')), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) ) - self.assertEqual(dp.data('P'), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} ) - self.assertEqual(dp.data('O'), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}) + self.assertEqual( + set(dp.data('J')), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) + ) + self.assertEqual( + dp.data('P'), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} + ) + self.assertEqual( + dp.data('O'), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4} + ) def test_tablePP(self): # Importing a table that has a 2-d indexing self.check_skiplist('tablePP') dp = DataPortal() dp.load(param='PP', **self.create_options('PP')) - #self.assertEqual(set(dp.data('J')), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) ) - self.assertEqual(dp.data('PP'), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} ) + # self.assertEqual(set(dp.data('J')), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) ) + self.assertEqual( + dp.data('PP'), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} + ) class TestOnlyCsvPortal(TestOnlyTextPortal): - suffix = '.csv' def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'csv'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'csv' + os.sep + name + self.suffix + ) + } class TestOnlyXmlPortal(TestOnlyTextPortal): - suffix = '.xml' skiplist = ['tableD', 'tableT', 'tableU'] def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'xml'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'xml' + os.sep + name + self.suffix + ) + } class TestOnlyJsonPortal(TestOnlyTextPortal): - suffix = '.json' skiplist = ['tableD', 'tableT', 'tableU', 'tableXW_4'] def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'json'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'json' + os.sep + name + self.suffix + ) + } def compare_data(self, name, file_suffix): if file_suffix == '.json': - with open(join(currdir, name+file_suffix), 'r') as out, \ - open(join(currdir, name+'.baseline'+file_suffix), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - allow_second_superset=True, - abstol=0) + with open(join(currdir, name + file_suffix), 'r') as out, open( + join(currdir, name + '.baseline' + file_suffix), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), allow_second_superset=True, abstol=0 + ) elif file_suffix == '.yaml': - with open(join(currdir, name+file_suffix), 'r') as out, \ - open(join(currdir, name+'.baseline'+file_suffix), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True, - abstol=0) + with open(join(currdir, name + file_suffix), 'r') as out, open( + join(currdir, name + '.baseline' + file_suffix), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), + yaml.full_load(out), + allow_second_superset=True, + abstol=0, + ) else: - with open(join(currdir, name+file_suffix), 'r') as f1, \ - open(join(currdir, name+'.baseline'+file_suffix), 'r') as f2: - f1_contents = list(filter(None, f1.read().split())) - f2_contents = list(filter(None, f2.read().split())) - for item1, item2 in zip_longest(f1_contents, f2_contents): - self.assertEqual(item1, item2) - os.remove(currdir+name+file_suffix) + with open(join(currdir, name + file_suffix), 'r') as f1, open( + join(currdir, name + '.baseline' + file_suffix), 'r' + ) as f2: + f1_contents = list(filter(None, f1.read().split())) + f2_contents = list(filter(None, f2.read().split())) + for item1, item2 in zip_longest(f1_contents, f2_contents): + self.assertEqual(item1, item2) + os.remove(currdir + name + file_suffix) def test_store_set1(self): # Write 1-D set model = ConcreteModel() - model.A = Set(initialize=[1,3,5]) + model.A = Set(initialize=[1, 3, 5]) data = DataPortal() data.store(data=model.A, **self.create_write_options('set1')) self.compare_data('set1', self.suffix) @@ -708,7 +1088,7 @@ def test_store_set1(self): def test_store_set1a(self): # Write 1-D set model = ConcreteModel() - model.A = Set(initialize=[1,3,5]) + model.A = Set(initialize=[1, 3, 5]) data = DataPortal() data.store(data="A", model=model, **self.create_write_options('set1')) self.compare_data('set1', self.suffix) @@ -716,7 +1096,7 @@ def test_store_set1a(self): def test_store_set2(self): # Write 2-D set model = ConcreteModel() - model.A = Set(initialize=[(1,2),(3,4),(5,6)], dimen=2) + model.A = Set(initialize=[(1, 2), (3, 4), (5, 6)], dimen=2) data = DataPortal() data.store(data=model.A, **self.create_write_options('set2')) self.compare_data('set2', self.suffix) @@ -732,8 +1112,8 @@ def test_store_param1(self): def test_store_param2(self): # Write 1-D param model = ConcreteModel() - model.A = Set(initialize=[1,2,3]) - model.p = Param(model.A, initialize={1:10, 2:20, 3:30}) + model.A = Set(initialize=[1, 2, 3]) + model.p = Param(model.A, initialize={1: 10, 2: 20, 3: 30}) data = DataPortal() data.store(data=model.p, **self.create_write_options('param2')) self.compare_data('param2', self.suffix) @@ -741,35 +1121,41 @@ def test_store_param2(self): def test_store_param3(self): # Write 2-D params model = ConcreteModel() - model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) - model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) - model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) + model.A = Set(initialize=[(1, 2), (2, 3), (3, 4)], dimen=2) + model.p = Param(model.A, initialize={(1, 2): 10, (2, 3): 20, (3, 4): 30}) + model.q = Param(model.A, initialize={(1, 2): 11, (2, 3): 21, (3, 4): 31}) data = DataPortal() - data.store(data=(model.p,model.q), **self.create_write_options('param3')) + data.store(data=(model.p, model.q), **self.create_write_options('param3')) self.compare_data('param3', self.suffix) def test_store_param4(self): # Write 2-D params model = ConcreteModel() - model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) - model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) - model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) + model.A = Set(initialize=[(1, 2), (2, 3), (3, 4)], dimen=2) + model.p = Param(model.A, initialize={(1, 2): 10, (2, 3): 20, (3, 4): 30}) + model.q = Param(model.A, initialize={(1, 2): 11, (2, 3): 21, (3, 4): 31}) data = DataPortal() - data.store(data=(model.p,model.q), columns=('a','b','c','d'), **self.create_write_options('param4')) + data.store( + data=(model.p, model.q), + columns=('a', 'b', 'c', 'd'), + **self.create_write_options('param4') + ) self.compare_data('param4', self.suffix) @unittest.skipIf(not yaml_interface, "No YAML interface available") class TestOnlyYamlPortal(TestOnlyJsonPortal): - suffix = '.yaml' def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'yaml'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'yaml' + os.sep + name + self.suffix + ) + } class TestTextPortal(unittest.TestCase): - suffix = '.tab' skiplist = [] @@ -778,44 +1164,59 @@ def check_skiplist(self, name): self.skipTest('Skipping test %s' % name) def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'tab'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'tab' + os.sep + name + self.suffix + ) + } def create_write_options(self, name): - return {'filename':os.path.abspath(currdir+os.sep+name+self.suffix), 'sort':True} + return { + 'filename': os.path.abspath(currdir + os.sep + name + self.suffix), + 'sort': True, + } def compare_data(self, name, file_suffix): if file_suffix == '.json': - with open(join(currdir, name+file_suffix), 'r') as out, \ - open(join(currdir, name+'.baseline'+file_suffix), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - allow_second_superset=True, - abstol=0) + with open(join(currdir, name + file_suffix), 'r') as out, open( + join(currdir, name + '.baseline' + file_suffix), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), allow_second_superset=True, abstol=0 + ) elif file_suffix == '.yaml': - with open(join(currdir, name+file_suffix), 'r') as out, \ - open(join(currdir, name+'.baseline'+file_suffix), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True, - abstol=0) + with open(join(currdir, name + file_suffix), 'r') as out, open( + join(currdir, name + '.baseline' + file_suffix), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), + yaml.full_load(out), + allow_second_superset=True, + abstol=0, + ) else: try: - with open(join(currdir, name+file_suffix), 'r') as f1, \ - open(join(currdir, name+'.baseline'+file_suffix), 'r') as f2: + with open(join(currdir, name + file_suffix), 'r') as f1, open( + join(currdir, name + '.baseline' + file_suffix), 'r' + ) as f2: f1_contents = list(filter(None, f1.read().split())) f2_contents = list(filter(None, f2.read().split())) for item1, item2 in zip_longest(f1_contents, f2_contents): self.assertEqual(item1, item2) except: - with open(join(currdir, name+file_suffix), 'r') as out, \ - open(join(currdir, name+'.baseline'+file_suffix), 'r') as txt: - self.assertEqual(out.read().strip().replace(' ',''), - txt.read().strip().replace(' ','')) - os.remove(currdir+name+file_suffix) + with open(join(currdir, name + file_suffix), 'r') as out, open( + join(currdir, name + '.baseline' + file_suffix), 'r' + ) as txt: + self.assertEqual( + out.read().strip().replace(' ', ''), + txt.read().strip().replace(' ', ''), + ) + os.remove(currdir + name + file_suffix) def test_tableA(self): # Importing an unordered set of arbitrary data self.check_skiplist('tableA') - model=AbstractModel() + model = AbstractModel() model.A = Set() data = DataPortal() data.load(set=model.A, **self.create_options('A')) @@ -825,7 +1226,7 @@ def test_tableA(self): def test_tableB(self): # Importing an unordered set of numeric data self.check_skiplist('tableB') - model=AbstractModel() + model = AbstractModel() model.B = Set() data = DataPortal() data.load(set=model.B, **self.create_options('B')) @@ -834,29 +1235,44 @@ def test_tableB(self): def test_tableC(self): # Importing a multi-column table, where all columns are - #treated as values for a set with tuple values. + # treated as values for a set with tuple values. self.check_skiplist('tableC') - model=AbstractModel() + model = AbstractModel() model.C = Set(dimen=2) data = DataPortal() data.load(set=model.C, **self.create_options('C')) instance = model.create_instance(data) - self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)])) + self.assertEqual( + set(instance.C.data()), + set( + [ + ('A1', 1), + ('A1', 2), + ('A1', 3), + ('A2', 1), + ('A2', 2), + ('A2', 3), + ('A3', 1), + ('A3', 2), + ('A3', 3), + ] + ), + ) def test_tableD(self): # Importing a 2D array of data as a set. self.check_skiplist('tableD') - model=AbstractModel() + model = AbstractModel() model.C = Set(dimen=2) data = DataPortal() data.load(set=model.C, format='set_array', **self.create_options('D')) instance = model.create_instance(data) - self.assertEqual(set(instance.C.data()), set([('A1',1), ('A2',2), ('A3',3)])) + self.assertEqual(set(instance.C.data()), set([('A1', 1), ('A2', 2), ('A3', 3)])) def test_tableZ(self): # Importing a single parameter self.check_skiplist('tableZ') - model=AbstractModel() + model = AbstractModel() model.Z = Param(default=99.0) data = DataPortal() data.load(param=model.Z, **self.create_options('Z')) @@ -866,130 +1282,175 @@ def test_tableZ(self): def test_tableY(self): # Same as tableXW. self.check_skiplist('tableY') - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.Y = Param(model.A) data = DataPortal() data.load(param=model.Y, **self.create_options('Y')) instance = model.create_instance(data) - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.Y.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) def test_tableXW_1(self): # Importing a table, but only reporting the values for the non-index - #parameter columns. The first column is assumed to represent an - #index column. + # parameter columns. The first column is assumed to represent an + # index column. self.check_skiplist('tableXW_1') - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.X = Param(model.A) model.W = Param(model.A) data = DataPortal() data.load(param=(model.X, model.W), **self.create_options('XW')) instance = model.create_instance(data) - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) def test_tableXW_2(self): # Like test_tableXW_1, except that set A is not defined. self.check_skiplist('tableXW_2') - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3']) model.X = Param(model.A) model.W = Param(model.A) data = DataPortal() data.load(param=(model.X, model.W), **self.create_options('XW')) instance = model.create_instance(data) - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) def test_tableXW_3(self): - # Like test_tableXW_1, except that set A is defined in the load statment. + # Like test_tableXW_1, except that set A is defined in the load statement. self.check_skiplist('tableXW_3') - model=AbstractModel() + model = AbstractModel() model.A = Set() model.X = Param(model.A) model.W = Param(model.A) data = DataPortal() data.load(index=model.A, param=(model.X, model.W), **self.create_options('XW')) instance = model.create_instance(data) - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) def test_tableXW_4(self): - # Like test_tableXW_1, except that set A is defined in the load statment and all values are mapped. + # Like test_tableXW_1, except that set A is defined in the load statement and all values are mapped. self.check_skiplist('tableXW_4') - model=AbstractModel() + model = AbstractModel() model.B = Set() model.R = Param(model.B) model.S = Param(model.B) data = DataPortal() - data.load(select=('A', 'W', 'X'), index=model.B, param=(model.R, model.S), **self.create_options('XW')) + data.load( + select=('A', 'W', 'X'), + index=model.B, + param=(model.R, model.S), + **self.create_options('XW') + ) instance = model.create_instance(data) - self.assertEqual(set(instance.B.data()), set(['A1','A2','A3'])) - self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.R.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) + self.assertEqual(set(instance.B.data()), set(['A1', 'A2', 'A3'])) + self.assertEqual(instance.S.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.R.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) def test_tableT(self): # Importing a 2D array of parameters that are transposed. self.check_skiplist('tableT') - model=AbstractModel() - model.B = Set(initialize=['I1','I2','I3','I4']) - model.A = Set(initialize=['A1','A2','A3']) + model = AbstractModel() + model.B = Set(initialize=['I1', 'I2', 'I3', 'I4']) + model.A = Set(initialize=['A1', 'A2', 'A3']) model.T = Param(model.A, model.B) data = DataPortal() data.load(format='transposed_array', param=model.T, **self.create_options('T')) instance = model.create_instance(data) - self.assertEqual(instance.T.extract_values(), {('A2', 'I1'): 2.3, ('A1', 'I2'): 1.4, ('A1', 'I3'): 1.5, ('A1', 'I4'): 1.6, ('A1', 'I1'): 1.3, ('A3', 'I4'): 3.6, ('A2', 'I4'): 2.6, ('A3', 'I1'): 3.3, ('A2', 'I3'): 2.5, ('A3', 'I2'): 3.4, ('A2', 'I2'): 2.4, ('A3', 'I3'): 3.5}) + self.assertEqual( + instance.T.extract_values(), + { + ('A2', 'I1'): 2.3, + ('A1', 'I2'): 1.4, + ('A1', 'I3'): 1.5, + ('A1', 'I4'): 1.6, + ('A1', 'I1'): 1.3, + ('A3', 'I4'): 3.6, + ('A2', 'I4'): 2.6, + ('A3', 'I1'): 3.3, + ('A2', 'I3'): 2.5, + ('A3', 'I2'): 3.4, + ('A2', 'I2'): 2.4, + ('A3', 'I3'): 3.5, + }, + ) def test_tableU(self): # Importing a 2D array of parameters. self.check_skiplist('tableU') - model=AbstractModel() - model.A = Set(initialize=['I1','I2','I3','I4']) - model.B = Set(initialize=['A1','A2','A3']) + model = AbstractModel() + model.A = Set(initialize=['I1', 'I2', 'I3', 'I4']) + model.B = Set(initialize=['A1', 'A2', 'A3']) model.U = Param(model.A, model.B) data = DataPortal() data.load(format='array', param=model.U, **self.create_options('U')) instance = model.create_instance(data) - self.assertEqual(instance.U.extract_values(), {('I2', 'A1'): 1.4, ('I3', 'A1'): 1.5, ('I3', 'A2'): 2.5, ('I4', 'A1'): 1.6, ('I3', 'A3'): 3.5, ('I1', 'A2'): 2.3, ('I4', 'A3'): 3.6, ('I1', 'A3'): 3.3, ('I4', 'A2'): 2.6, ('I2', 'A3'): 3.4, ('I1', 'A1'): 1.3, ('I2', 'A2'): 2.4}) + self.assertEqual( + instance.U.extract_values(), + { + ('I2', 'A1'): 1.4, + ('I3', 'A1'): 1.5, + ('I3', 'A2'): 2.5, + ('I4', 'A1'): 1.6, + ('I3', 'A3'): 3.5, + ('I1', 'A2'): 2.3, + ('I4', 'A3'): 3.6, + ('I1', 'A3'): 3.3, + ('I4', 'A2'): 2.6, + ('I2', 'A3'): 3.4, + ('I1', 'A1'): 1.3, + ('I2', 'A2'): 2.4, + }, + ) def test_tableS(self): # Importing a table, but only reporting the values for the non-index - #parameter columns. The first column is assumed to represent an - #index column. A missing value is represented in the column data. + # parameter columns. The first column is assumed to represent an + # index column. A missing value is represented in the column data. self.check_skiplist('tableS') - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.S = Param(model.A) data = DataPortal() data.load(param=model.S, **self.create_options('S')) instance = model.create_instance(data) - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5}) + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.S.extract_values(), {'A1': 3.3, 'A3': 3.5}) def test_tablePO(self): # Importing a table that has multiple indexing columns self.check_skiplist('tablePO') - model=AbstractModel() + model = AbstractModel() model.J = Set(dimen=2) model.P = Param(model.J) model.O = Param(model.J) data = DataPortal() data.load(index=model.J, param=(model.P, model.O), **self.create_options('PO')) instance = model.create_instance(data) - self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) ) - self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} ) - self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}) + self.assertEqual( + set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) + ) + self.assertEqual( + instance.P.extract_values(), + {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4}, + ) + self.assertEqual( + instance.O.extract_values(), + {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}, + ) def test_store_set1(self): # Write 1-D set self.check_skiplist('store_set1') model = ConcreteModel() - model.A = Set(initialize=[1,3,5]) + model.A = Set(initialize=[1, 3, 5]) data = DataPortal() data.store(set=model.A, **self.create_write_options('set1')) self.compare_data('set1', self.suffix) @@ -998,7 +1459,7 @@ def test_store_set2(self): # Write 2-D set self.check_skiplist('store_set2') model = ConcreteModel() - model.A = Set(initialize=[(1,2),(3,4),(5,6)], dimen=2) + model.A = Set(initialize=[(1, 2), (3, 4), (5, 6)], dimen=2) data = DataPortal() data.store(set=model.A, **self.create_write_options('set2')) self.compare_data('set2', self.suffix) @@ -1016,8 +1477,8 @@ def test_store_param2(self): # Write 1-D param self.check_skiplist('store_param2') model = ConcreteModel() - model.A = Set(initialize=[1,2,3]) - model.p = Param(model.A, initialize={1:10, 2:20, 3:30}) + model.A = Set(initialize=[1, 2, 3]) + model.p = Param(model.A, initialize={1: 10, 2: 20, 3: 30}) data = DataPortal() data.store(param=model.p, **self.create_write_options('param2')) self.compare_data('param2', self.suffix) @@ -1026,63 +1487,74 @@ def test_store_param3(self): # Write 2-D params self.check_skiplist('store_param3') model = ConcreteModel() - model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) - model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) - model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) + model.A = Set(initialize=[(1, 2), (2, 3), (3, 4)], dimen=2) + model.p = Param(model.A, initialize={(1, 2): 10, (2, 3): 20, (3, 4): 30}) + model.q = Param(model.A, initialize={(1, 2): 11, (2, 3): 21, (3, 4): 31}) data = DataPortal() - data.store(param=(model.p,model.q), **self.create_write_options('param3')) + data.store(param=(model.p, model.q), **self.create_write_options('param3')) self.compare_data('param3', self.suffix) def test_store_param4(self): # Write 2-D params self.check_skiplist('store_param4') model = ConcreteModel() - model.A = Set(initialize=[(1,2),(2,3),(3,4)], dimen=2) - model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30}) - model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31}) + model.A = Set(initialize=[(1, 2), (2, 3), (3, 4)], dimen=2) + model.p = Param(model.A, initialize={(1, 2): 10, (2, 3): 20, (3, 4): 30}) + model.q = Param(model.A, initialize={(1, 2): 11, (2, 3): 21, (3, 4): 31}) data = DataPortal() - data.store(param=(model.p,model.q), **self.create_write_options('param4')) + data.store(param=(model.p, model.q), **self.create_write_options('param4')) self.compare_data('param4', self.suffix) class TestCsvPortal(TestTextPortal): - suffix = '.csv' def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'csv'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'csv' + os.sep + name + self.suffix + ) + } class TestXmlPortal(TestTextPortal): - suffix = '.xml' skiplist = ['tableD', 'tableT', 'tableU'] def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'xml'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'xml' + os.sep + name + self.suffix + ) + } class TestJsonPortal(TestTextPortal): - suffix = '.json' skiplist = ['tableD', 'tableT', 'tableU', 'tableXW_4'] def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'json'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'json' + os.sep + name + self.suffix + ) + } @unittest.skipIf(not yaml_interface, "YAML interface not available") class TestYamlPortal(TestTextPortal): - suffix = '.yaml' skiplist = ['tableD', 'tableT', 'tableU', 'tableXW_4'] def create_options(self, name): - return {'filename':os.path.abspath(tutorial_dir+os.sep+'yaml'+os.sep+name+self.suffix)} + return { + 'filename': os.path.abspath( + tutorial_dir + os.sep + 'yaml' + os.sep + name + self.suffix + ) + } class LoadTests(object): - skiplist = [] # @@ -1102,234 +1574,288 @@ def Xcheck_skiplist(self, name): self.skipTest('Skipping test %s' % name) def filename(self, tname): - return os.path.abspath(tutorial_dir+os.sep+self.suffix+os.sep+tname+'.'+self.suffix) + return os.path.abspath( + tutorial_dir + os.sep + self.suffix + os.sep + tname + '.' + self.suffix + ) def test_tableA1(self): # Importing a single column of data self.check_skiplist('tableA1') - with capture_output(currdir+'loadA1.dat'): - print("load "+self.filename('A')+" format=set : A;") - model=AbstractModel() + with capture_output(currdir + 'loadA1.dat'): + print("load " + self.filename('A') + " format=set : A;") + model = AbstractModel() model.A = Set() - instance = model.create_instance(currdir+'loadA1.dat') + instance = model.create_instance(currdir + 'loadA1.dat') self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) - os.remove(currdir+'loadA1.dat') + os.remove(currdir + 'loadA1.dat') def test_tableA2(self): # Importing a single column of data self.check_skiplist('tableA2') - with capture_output(currdir+'loadA2.dat'): - print("load "+self.filename('A')+" ;") - model=AbstractModel() + with capture_output(currdir + 'loadA2.dat'): + print("load " + self.filename('A') + " ;") + model = AbstractModel() model.A = Set() try: - instance = model.create_instance(currdir+'loadA2.dat') + instance = model.create_instance(currdir + 'loadA2.dat') self.fail("Should fail because no set name is specified") except IOError: pass except IndexError: pass - os.remove(currdir+'loadA2.dat') + os.remove(currdir + 'loadA2.dat') def test_tableA3(self): # Importing a single column of data self.check_skiplist('tableA3') - with capture_output(currdir+'loadA3.dat'): - print("load "+self.filename('A')+" format=set : A ;") - model=AbstractModel() + with capture_output(currdir + 'loadA3.dat'): + print("load " + self.filename('A') + " format=set : A ;") + model = AbstractModel() model.A = Set() - instance = model.create_instance(currdir+'loadA3.dat') + instance = model.create_instance(currdir + 'loadA3.dat') self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) - os.remove(currdir+'loadA3.dat') + os.remove(currdir + 'loadA3.dat') def test_tableB1(self): # Same as test_tableA self.check_skiplist('tableB1') - with capture_output(currdir+'loadB.dat'): - print("load "+self.filename('B')+" format=set : B;") - model=AbstractModel() + with capture_output(currdir + 'loadB.dat'): + print("load " + self.filename('B') + " format=set : B;") + model = AbstractModel() model.B = Set() - instance = model.create_instance(currdir+'loadB.dat') + instance = model.create_instance(currdir + 'loadB.dat') self.assertEqual(set(instance.B.data()), set([1, 2, 3])) - os.remove(currdir+'loadB.dat') + os.remove(currdir + 'loadB.dat') def test_tableC(self): # Importing a multi-column table, where all columns are # treated as values for a set with tuple values. self.check_skiplist('tableC') - with capture_output(currdir+'loadC.dat'): - print("load "+self.filename('C')+" format=set : C ;") - model=AbstractModel() + with capture_output(currdir + 'loadC.dat'): + print("load " + self.filename('C') + " format=set : C ;") + model = AbstractModel() model.C = Set(dimen=2) - instance = model.create_instance(currdir+'loadC.dat') - self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)])) - os.remove(currdir+'loadC.dat') + instance = model.create_instance(currdir + 'loadC.dat') + self.assertEqual( + set(instance.C.data()), + set( + [ + ('A1', 1), + ('A1', 2), + ('A1', 3), + ('A2', 1), + ('A2', 2), + ('A2', 3), + ('A3', 1), + ('A3', 2), + ('A3', 3), + ] + ), + ) + os.remove(currdir + 'loadC.dat') def test_tableD(self): # Importing a 2D array of data as a set. self.check_skiplist('tableD') - with capture_output(currdir+'loadD.dat'): - print("load "+self.filename('D')+" format=set_array : C ;") - model=AbstractModel() + with capture_output(currdir + 'loadD.dat'): + print("load " + self.filename('D') + " format=set_array : C ;") + model = AbstractModel() model.C = Set(dimen=2) - instance = model.create_instance(currdir+'loadD.dat') - self.assertEqual(set(instance.C.data()), set([('A1',1), ('A2',2), ('A3',3)])) - os.remove(currdir+'loadD.dat') + instance = model.create_instance(currdir + 'loadD.dat') + self.assertEqual(set(instance.C.data()), set([('A1', 1), ('A2', 2), ('A3', 3)])) + os.remove(currdir + 'loadD.dat') def test_tableZ(self): # Importing a single parameter self.check_skiplist('tableZ') - with capture_output(currdir+'loadZ.dat'): - print("load "+self.filename('Z')+" : Z ;") - model=AbstractModel() + with capture_output(currdir + 'loadZ.dat'): + print("load " + self.filename('Z') + " : Z ;") + model = AbstractModel() model.Z = Param(default=99.0) - instance = model.create_instance(currdir+'loadZ.dat') + instance = model.create_instance(currdir + 'loadZ.dat') self.assertEqual(instance.Z, 1.01) - os.remove(currdir+'loadZ.dat') + os.remove(currdir + 'loadZ.dat') def test_tableY(self): # Same as tableXW. self.check_skiplist('tableY') - with capture_output(currdir+'loadY.dat'): - print("load "+self.filename('Y')+" : [A] Y;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + with capture_output(currdir + 'loadY.dat'): + print("load " + self.filename('Y') + " : [A] Y;") + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.Y = Param(model.A) - instance = model.create_instance(currdir+'loadY.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - os.remove(currdir+'loadY.dat') + instance = model.create_instance(currdir + 'loadY.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.Y.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + os.remove(currdir + 'loadY.dat') def test_tableXW_1(self): # Importing a table, but only reporting the values for the non-index # parameter columns. The first column is assumed to represent an # index column. self.check_skiplist('tableXW_1') - with capture_output(currdir+'loadXW.dat'): - print("load "+self.filename('XW')+" : [A] X W;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + with capture_output(currdir + 'loadXW.dat'): + print("load " + self.filename('XW') + " : [A] X W;") + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableXW_2(self): # Like test_tableXW_1, except that set A is not defined. self.check_skiplist('tableXW_2') - with capture_output(currdir+'loadXW.dat'): - print("load "+self.filename('XW')+" : [A] X W;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3']) + with capture_output(currdir + 'loadXW.dat'): + print("load " + self.filename('XW') + " : [A] X W;") + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3']) model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableXW_3(self): - # Like test_tableXW_1, except that set A is defined in the load statment. + # Like test_tableXW_1, except that set A is defined in the load statement. self.check_skiplist('tableXW_3') - with capture_output(currdir+'loadXW.dat'): - print("load "+self.filename('XW')+" : A=[A] X W;") - model=AbstractModel() + with capture_output(currdir + 'loadXW.dat'): + print("load " + self.filename('XW') + " : A=[A] X W;") + model = AbstractModel() model.A = Set() model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableXW_4(self): - # Like test_tableXW_1, except that set A is defined in the load statment and all values are mapped. + # Like test_tableXW_1, except that set A is defined in the load statement and all values are mapped. self.check_skiplist('tableXW_4') - with capture_output(currdir+'loadXW.dat'): - print("load "+self.filename('XW')+" : B=[A] R=X S=W;") - model=AbstractModel() + with capture_output(currdir + 'loadXW.dat'): + print("load " + self.filename('XW') + " : B=[A] R=X S=W;") + model = AbstractModel() model.B = Set() model.R = Param(model.B) model.S = Param(model.B) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.B.data()), set(['A1','A2','A3'])) - self.assertEqual(instance.R.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.S.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.B.data()), set(['A1', 'A2', 'A3'])) + self.assertEqual(instance.R.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.S.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableT(self): # Importing a 2D array of parameters that are transposed. self.check_skiplist('tableT') - with capture_output(currdir+'loadT.dat'): - print("load "+self.filename('T')+" format=transposed_array : T;") - model=AbstractModel() - model.B = Set(initialize=['I1','I2','I3','I4']) - model.A = Set(initialize=['A1','A2','A3']) + with capture_output(currdir + 'loadT.dat'): + print("load " + self.filename('T') + " format=transposed_array : T;") + model = AbstractModel() + model.B = Set(initialize=['I1', 'I2', 'I3', 'I4']) + model.A = Set(initialize=['A1', 'A2', 'A3']) model.T = Param(model.A, model.B) - instance = model.create_instance(currdir+'loadT.dat') - self.assertEqual(instance.T.extract_values(), {('A2', 'I1'): 2.3, ('A1', 'I2'): 1.4, ('A1', 'I3'): 1.5, ('A1', 'I4'): 1.6, ('A1', 'I1'): 1.3, ('A3', 'I4'): 3.6, ('A2', 'I4'): 2.6, ('A3', 'I1'): 3.3, ('A2', 'I3'): 2.5, ('A3', 'I2'): 3.4, ('A2', 'I2'): 2.4, ('A3', 'I3'): 3.5}) - os.remove(currdir+'loadT.dat') + instance = model.create_instance(currdir + 'loadT.dat') + self.assertEqual( + instance.T.extract_values(), + { + ('A2', 'I1'): 2.3, + ('A1', 'I2'): 1.4, + ('A1', 'I3'): 1.5, + ('A1', 'I4'): 1.6, + ('A1', 'I1'): 1.3, + ('A3', 'I4'): 3.6, + ('A2', 'I4'): 2.6, + ('A3', 'I1'): 3.3, + ('A2', 'I3'): 2.5, + ('A3', 'I2'): 3.4, + ('A2', 'I2'): 2.4, + ('A3', 'I3'): 3.5, + }, + ) + os.remove(currdir + 'loadT.dat') def test_tableU(self): # Importing a 2D array of parameters. self.check_skiplist('tableU') - with capture_output(currdir+'loadU.dat'): - print("load "+self.filename('U')+" format=array : U;") - model=AbstractModel() - model.A = Set(initialize=['I1','I2','I3','I4']) - model.B = Set(initialize=['A1','A2','A3']) + with capture_output(currdir + 'loadU.dat'): + print("load " + self.filename('U') + " format=array : U;") + model = AbstractModel() + model.A = Set(initialize=['I1', 'I2', 'I3', 'I4']) + model.B = Set(initialize=['A1', 'A2', 'A3']) model.U = Param(model.A, model.B) - instance = model.create_instance(currdir+'loadU.dat') - self.assertEqual(instance.U.extract_values(), {('I2', 'A1'): 1.4, ('I3', 'A1'): 1.5, ('I3', 'A2'): 2.5, ('I4', 'A1'): 1.6, ('I3', 'A3'): 3.5, ('I1', 'A2'): 2.3, ('I4', 'A3'): 3.6, ('I1', 'A3'): 3.3, ('I4', 'A2'): 2.6, ('I2', 'A3'): 3.4, ('I1', 'A1'): 1.3, ('I2', 'A2'): 2.4}) - os.remove(currdir+'loadU.dat') + instance = model.create_instance(currdir + 'loadU.dat') + self.assertEqual( + instance.U.extract_values(), + { + ('I2', 'A1'): 1.4, + ('I3', 'A1'): 1.5, + ('I3', 'A2'): 2.5, + ('I4', 'A1'): 1.6, + ('I3', 'A3'): 3.5, + ('I1', 'A2'): 2.3, + ('I4', 'A3'): 3.6, + ('I1', 'A3'): 3.3, + ('I4', 'A2'): 2.6, + ('I2', 'A3'): 3.4, + ('I1', 'A1'): 1.3, + ('I2', 'A2'): 2.4, + }, + ) + os.remove(currdir + 'loadU.dat') def test_tableS(self): # Importing a table, but only reporting the values for the non-index # parameter columns. The first column is assumed to represent an # index column. A missing value is represented in the column data. self.check_skiplist('tableS') - with capture_output(currdir+'loadS.dat'): - print("load "+self.filename('S')+" : [A] S ;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + with capture_output(currdir + 'loadS.dat'): + print("load " + self.filename('S') + " : [A] S ;") + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.S = Param(model.A) - instance = model.create_instance(currdir+'loadS.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5}) - os.remove(currdir+'loadS.dat') + instance = model.create_instance(currdir + 'loadS.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.S.extract_values(), {'A1': 3.3, 'A3': 3.5}) + os.remove(currdir + 'loadS.dat') def test_tablePO(self): # Importing a table that has multiple indexing columns self.check_skiplist('tablePO') - with capture_output(currdir+'loadPO.dat'): - print("load "+self.filename('PO')+" : J=[A,B] P O;") - model=AbstractModel() + with capture_output(currdir + 'loadPO.dat'): + print("load " + self.filename('PO') + " : J=[A,B] P O;") + model = AbstractModel() model.J = Set(dimen=2) model.P = Param(model.J) model.O = Param(model.J) - instance = model.create_instance(currdir+'loadPO.dat') - self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) ) - self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} ) - self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}) - os.remove(currdir+'loadPO.dat') + instance = model.create_instance(currdir + 'loadPO.dat') + self.assertEqual( + set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) + ) + self.assertEqual( + instance.P.extract_values(), + {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4}, + ) + self.assertEqual( + instance.O.extract_values(), + {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}, + ) + os.remove(currdir + 'loadPO.dat') class TestTextLoad(LoadTests, unittest.TestCase): - suffix = 'tab' class TestCsvLoad(LoadTests, unittest.TestCase): - suffix = 'csv' class TestXmlLoad(LoadTests, unittest.TestCase): - suffix = 'xml' skiplist = ['tableD', 'tableT', 'tableU'] @@ -1338,324 +1864,409 @@ def test_tableXW_nested1(self): # parameter columns. The first column is assumed to represent an # index column. self.check_skiplist('tableXW_1') - with capture_output(currdir+'loadXW.dat'): - print("load "+self.filename('XW_nested1')+" query='./bar/table/*' : [A] X W;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + with capture_output(currdir + 'loadXW.dat'): + print( + "load " + + self.filename('XW_nested1') + + " query='./bar/table/*' : [A] X W;" + ) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableXW_nested2(self): # Importing a table, but only reporting the values for the non-index # parameter columns. The first column is assumed to represent an # index column. self.check_skiplist('tableXW_1') - with capture_output(currdir+'loadXW.dat'): - print("load "+self.filename('XW_nested2')+" query='./bar/table/row' : [A] X W;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + with capture_output(currdir + 'loadXW.dat'): + print( + "load " + + self.filename('XW_nested2') + + " query='./bar/table/row' : [A] X W;" + ) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') class Spreadsheet(LoadTests): - def filename(self, tname): if tname == "Z": - return os.path.abspath(tutorial_dir+os.sep+self._filename)+" range="+tname+"param" + return ( + os.path.abspath(tutorial_dir + os.sep + self._filename) + + " range=" + + tname + + "param" + ) else: - return os.path.abspath(tutorial_dir+os.sep+self._filename)+" range="+tname+"table" + return ( + os.path.abspath(tutorial_dir + os.sep + self._filename) + + " range=" + + tname + + "table" + ) @unittest.skipIf(not xls_interface, "No XLS interface available") class TestSpreadsheetXLS(Spreadsheet, unittest.TestCase): - - _filename='excel.xls' + _filename = 'excel.xls' @unittest.skipIf(not xlsx_interface, "No XLSX interface available") class TestSpreadsheetXLSX(Spreadsheet, unittest.TestCase): - - _filename='excel.xlsx' + _filename = 'excel.xlsx' @unittest.skipIf(not xlsb_interface, "No XLSB interface available") class TestSpreadsheetXLSB(Spreadsheet, unittest.TestCase): - - _filename='excel.xlsb' + _filename = 'excel.xlsb' @unittest.skipIf(not xlsm_interface, "No XLSM interface available") class TestSpreadsheetXLSM(Spreadsheet, unittest.TestCase): - - _filename='excel.xlsm' - + _filename = 'excel.xlsm' class TestTableCmd(unittest.TestCase): - def test_tableA1_1(self): # Importing a single column of data as a set - with capture_output(currdir+'loadA1.dat'): + with capture_output(currdir + 'loadA1.dat'): print("table columns=1 A={1} := A1 A2 A3 ;") - model=AbstractModel() + model = AbstractModel() model.A = Set() - instance = model.create_instance(currdir+'loadA1.dat') + instance = model.create_instance(currdir + 'loadA1.dat') self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) - os.remove(currdir+'loadA1.dat') + os.remove(currdir + 'loadA1.dat') def test_tableA1_2(self): # Importing a single column of data as a set - with capture_output(currdir+'loadA1.dat'): + with capture_output(currdir + 'loadA1.dat'): print("table A={A} : A := A1 A2 A3 ;") - model=AbstractModel() + model = AbstractModel() model.A = Set() - instance = model.create_instance(currdir+'loadA1.dat') + instance = model.create_instance(currdir + 'loadA1.dat') self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) - os.remove(currdir+'loadA1.dat') + os.remove(currdir + 'loadA1.dat') def test_tableB1_1(self): # Same as test_tableA - with capture_output(currdir+'loadB.dat'): + with capture_output(currdir + 'loadB.dat'): print("table columns=1 B={1} := 1 2 3 ;") - model=AbstractModel() + model = AbstractModel() model.B = Set() - instance = model.create_instance(currdir+'loadB.dat') + instance = model.create_instance(currdir + 'loadB.dat') self.assertEqual(set(instance.B.data()), set([1, 2, 3])) - os.remove(currdir+'loadB.dat') + os.remove(currdir + 'loadB.dat') def test_tableB1_2(self): # Same as test_tableA - with capture_output(currdir+'loadB.dat'): + with capture_output(currdir + 'loadB.dat'): print("table B={B} : B := 1 2 3 ;") - model=AbstractModel() + model = AbstractModel() model.B = Set() - instance = model.create_instance(currdir+'loadB.dat') + instance = model.create_instance(currdir + 'loadB.dat') self.assertEqual(set(instance.B.data()), set([1, 2, 3])) - os.remove(currdir+'loadB.dat') + os.remove(currdir + 'loadB.dat') def test_tableC_1(self): # Importing a multi-column table, where all columns are # treated as values for a set with tuple values. - with capture_output(currdir+'loadC.dat'): - print("table columns=2 C={1,2} := A1 1 A1 2 A1 3 A2 1 A2 2 A2 3 A3 1 A3 2 A3 3 ;") - model=AbstractModel() + with capture_output(currdir + 'loadC.dat'): + print( + "table columns=2 C={1,2} := A1 1 A1 2 A1 3 A2 1 A2 2 A2 3 A3 1 A3 2 A3 3 ;" + ) + model = AbstractModel() model.C = Set(dimen=2) - instance = model.create_instance(currdir+'loadC.dat') - self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)])) - os.remove(currdir+'loadC.dat') + instance = model.create_instance(currdir + 'loadC.dat') + self.assertEqual( + set(instance.C.data()), + set( + [ + ('A1', 1), + ('A1', 2), + ('A1', 3), + ('A2', 1), + ('A2', 2), + ('A2', 3), + ('A3', 1), + ('A3', 2), + ('A3', 3), + ] + ), + ) + os.remove(currdir + 'loadC.dat') def test_tableC_2(self): # Importing a multi-column table, where all columns are # treated as values for a set with tuple values. - with capture_output(currdir+'loadC.dat'): - print("table C={a,b} : a b := A1 1 A1 2 A1 3 A2 1 A2 2 A2 3 A3 1 A3 2 A3 3 ;") - model=AbstractModel() + with capture_output(currdir + 'loadC.dat'): + print( + "table C={a,b} : a b := A1 1 A1 2 A1 3 A2 1 A2 2 A2 3 A3 1 A3 2 A3 3 ;" + ) + model = AbstractModel() model.C = Set(dimen=2) - instance = model.create_instance(currdir+'loadC.dat') - self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)])) - os.remove(currdir+'loadC.dat') + instance = model.create_instance(currdir + 'loadC.dat') + self.assertEqual( + set(instance.C.data()), + set( + [ + ('A1', 1), + ('A1', 2), + ('A1', 3), + ('A2', 1), + ('A2', 2), + ('A2', 3), + ('A3', 1), + ('A3', 2), + ('A3', 3), + ] + ), + ) + os.remove(currdir + 'loadC.dat') def test_tableZ(self): # Importing a single parameter - with capture_output(currdir+'loadZ.dat'): + with capture_output(currdir + 'loadZ.dat'): print("table Z := 1.01 ;") - model=AbstractModel() + model = AbstractModel() model.Z = Param(default=99.0) - instance = model.create_instance(currdir+'loadZ.dat') + instance = model.create_instance(currdir + 'loadZ.dat') self.assertEqual(instance.Z, 1.01) - os.remove(currdir+'loadZ.dat') + os.remove(currdir + 'loadZ.dat') def test_tableY_1(self): # Same as tableXW. - with capture_output(currdir+'loadY.dat'): + with capture_output(currdir + 'loadY.dat'): print("table columns=2 Y(1)={2} := A1 3.3 A2 3.4 A3 3.5 ;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.Y = Param(model.A) - instance = model.create_instance(currdir+'loadY.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - os.remove(currdir+'loadY.dat') + instance = model.create_instance(currdir + 'loadY.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.Y.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + os.remove(currdir + 'loadY.dat') def test_tableY_2(self): # Same as tableXW. - with capture_output(currdir+'loadY.dat'): + with capture_output(currdir + 'loadY.dat'): print("table Y(A) : A Y := A1 3.3 A2 3.4 A3 3.5 ;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.Y = Param(model.A) - instance = model.create_instance(currdir+'loadY.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - os.remove(currdir+'loadY.dat') + instance = model.create_instance(currdir + 'loadY.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.Y.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + os.remove(currdir + 'loadY.dat') def test_tableXW_1_1(self): # Importing a table, but only reporting the values for the non-index # parameter columns. The first column is assumed to represent an # index column. - with capture_output(currdir+'loadXW.dat'): - print("table columns=3 X(1)={2} W(1)={3} := A1 3.3 4.3 A2 3.4 4.4 A3 3.5 4.5 ;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + with capture_output(currdir + 'loadXW.dat'): + print( + "table columns=3 X(1)={2} W(1)={3} := A1 3.3 4.3 A2 3.4 4.4 A3 3.5 4.5 ;" + ) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableXW_1_2(self): # Importing a table, but only reporting the values for the non-index # parameter columns. The first column is assumed to represent an # index column. - with capture_output(currdir+'loadXW.dat'): + with capture_output(currdir + 'loadXW.dat'): print("table X(A) W(A) : A X W := A1 3.3 4.3 A2 3.4 4.4 A3 3.5 4.5 ;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableXW_3_1(self): - # Like test_tableXW_1, except that set A is defined in the load statment. - with capture_output(currdir+'loadXW.dat'): - print("table columns=3 A={1} X(A)={2} W(A)={3} := A1 3.3 4.3 A2 3.4 4.4 A3 3.5 4.5 ;") - model=AbstractModel() + # Like test_tableXW_1, except that set A is defined in the load statement. + with capture_output(currdir + 'loadXW.dat'): + print( + "table columns=3 A={1} X(A)={2} W(A)={3} := A1 3.3 4.3 A2 3.4 4.4 A3 3.5 4.5 ;" + ) + model = AbstractModel() model.A = Set() model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableXW_3_2(self): - # Like test_tableXW_1, except that set A is defined in the load statment. - with capture_output(currdir+'loadXW.dat'): + # Like test_tableXW_1, except that set A is defined in the load statement. + with capture_output(currdir + 'loadXW.dat'): print("table A={A} X(A) W(A) : A X W := A1 3.3 4.3 A2 3.4 4.4 A3 3.5 4.5 ;") - model=AbstractModel() + model = AbstractModel() model.A = Set() model.X = Param(model.A) model.W = Param(model.A) - instance = model.create_instance(currdir+'loadXW.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3'])) - self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5}) - self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5}) - os.remove(currdir+'loadXW.dat') + instance = model.create_instance(currdir + 'loadXW.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3'])) + self.assertEqual(instance.X.extract_values(), {'A1': 3.3, 'A2': 3.4, 'A3': 3.5}) + self.assertEqual(instance.W.extract_values(), {'A1': 4.3, 'A2': 4.4, 'A3': 4.5}) + os.remove(currdir + 'loadXW.dat') def test_tableS_1(self): # Importing a table, but only reporting the values for the non-index # parameter columns. The first column is assumed to represent an # index column. A missing value is represented in the column data. - with capture_output(currdir+'loadS.dat'): + with capture_output(currdir + 'loadS.dat'): print("table columns=2 S(1)={2} := A1 3.3 A2 . A3 3.5 ;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.S = Param(model.A) - instance = model.create_instance(currdir+'loadS.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5}) - os.remove(currdir+'loadS.dat') + instance = model.create_instance(currdir + 'loadS.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.S.extract_values(), {'A1': 3.3, 'A3': 3.5}) + os.remove(currdir + 'loadS.dat') def test_tableS_2(self): # Importing a table, but only reporting the values for the non-index # parameter columns. The first column is assumed to represent an # index column. A missing value is represented in the column data. - with capture_output(currdir+'loadS.dat'): + with capture_output(currdir + 'loadS.dat'): print("table S(A) : A S := A1 3.3 A2 . A3 3.5 ;") - model=AbstractModel() - model.A = Set(initialize=['A1','A2','A3','A4']) + model = AbstractModel() + model.A = Set(initialize=['A1', 'A2', 'A3', 'A4']) model.S = Param(model.A) - instance = model.create_instance(currdir+'loadS.dat') - self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4'])) - self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5}) - os.remove(currdir+'loadS.dat') + instance = model.create_instance(currdir + 'loadS.dat') + self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3', 'A4'])) + self.assertEqual(instance.S.extract_values(), {'A1': 3.3, 'A3': 3.5}) + os.remove(currdir + 'loadS.dat') def test_tablePO_1(self): # Importing a table that has multiple indexing columns - with capture_output(currdir+'loadPO.dat'): - print("table columns=4 J={1,2} P(J)={3} O(J)={4} := A1 B1 4.3 5.3 A2 B2 4.4 5.4 A3 B3 4.5 5.5 ;") - model=AbstractModel() + with capture_output(currdir + 'loadPO.dat'): + print( + "table columns=4 J={1,2} P(J)={3} O(J)={4} := A1 B1 4.3 5.3 A2 B2 4.4 5.4 A3 B3 4.5 5.5 ;" + ) + model = AbstractModel() model.J = Set(dimen=2) model.P = Param(model.J) model.O = Param(model.J) - instance = model.create_instance(currdir+'loadPO.dat') - self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) ) - self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} ) - self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}) - os.remove(currdir+'loadPO.dat') + instance = model.create_instance(currdir + 'loadPO.dat') + self.assertEqual( + set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) + ) + self.assertEqual( + instance.P.extract_values(), + {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4}, + ) + self.assertEqual( + instance.O.extract_values(), + {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}, + ) + os.remove(currdir + 'loadPO.dat') def test_tablePO_2(self): # Importing a table that has multiple indexing columns - with capture_output(currdir+'loadPO.dat'): - print("table J={A,B} P(J) O(J) : A B P O := A1 B1 4.3 5.3 A2 B2 4.4 5.4 A3 B3 4.5 5.5 ;") - model=AbstractModel() + with capture_output(currdir + 'loadPO.dat'): + print( + "table J={A,B} P(J) O(J) : A B P O := A1 B1 4.3 5.3 A2 B2 4.4 5.4 A3 B3 4.5 5.5 ;" + ) + model = AbstractModel() model.J = Set(dimen=2) model.P = Param(model.J) model.O = Param(model.J) - instance = model.create_instance(currdir+'loadPO.dat') - self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) ) - self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} ) - self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}) - os.remove(currdir+'loadPO.dat') + instance = model.create_instance(currdir + 'loadPO.dat') + self.assertEqual( + set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) + ) + self.assertEqual( + instance.P.extract_values(), + {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4}, + ) + self.assertEqual( + instance.O.extract_values(), + {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4}, + ) + os.remove(currdir + 'loadPO.dat') def test_complex_1(self): # Importing a table with multiple indexing columns - with capture_output(currdir+'loadComplex.dat'): + with capture_output(currdir + 'loadComplex.dat'): print("table columns=8 I={4} J={3,5} A(I)={1} B(J)={7} :=") print("A1 x1 J311 I1 J321 y1 B1 z1") print("A2 x2 J312 I2 J322 y2 B2 z2") print("A3 x3 J313 I3 J323 y3 B3 z3") print(";") - model=AbstractModel() + model = AbstractModel() model.I = Set() model.J = Set(dimen=2) model.A = Param(model.I) model.B = Param(model.J) - instance = model.create_instance(currdir+'loadComplex.dat') - self.assertEqual(set(instance.J.data()), set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]) ) + instance = model.create_instance(currdir + 'loadComplex.dat') + self.assertEqual( + set(instance.J.data()), + set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]), + ) self.assertEqual(set(instance.I.data()), set(['I1', 'I2', 'I3'])) - self.assertEqual(instance.B.extract_values(), {('J311', 'J321'): 'B1', ('J312', 'J322'): 'B2', ('J313', 'J323'): 'B3'} ) - self.assertEqual(instance.A.extract_values(), {'I1': 'A1', 'I2': 'A2', 'I3': 'A3'}) - os.remove(currdir+'loadComplex.dat') + self.assertEqual( + instance.B.extract_values(), + {('J311', 'J321'): 'B1', ('J312', 'J322'): 'B2', ('J313', 'J323'): 'B3'}, + ) + self.assertEqual( + instance.A.extract_values(), {'I1': 'A1', 'I2': 'A2', 'I3': 'A3'} + ) + os.remove(currdir + 'loadComplex.dat') def test_complex_2(self): # Importing a table with multiple indexing columns - with capture_output(currdir+'loadComplex.dat'): + with capture_output(currdir + 'loadComplex.dat'): print("table I={I} J={J1,J2} A(J) B(I) :") print("A x J1 I J2 y B z :=") print("A1 x1 J311 I1 J321 y1 B1 z1") print("A2 x2 J312 I2 J322 y2 B2 z2") print("A3 x3 J313 I3 J323 y3 B3 z3") print(";") - model=AbstractModel() + model = AbstractModel() model.I = Set() model.J = Set(dimen=2) model.A = Param(model.J) model.B = Param(model.I) - instance = model.create_instance(currdir+'loadComplex.dat') - self.assertEqual(set(instance.J.data()), set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]) ) + instance = model.create_instance(currdir + 'loadComplex.dat') + self.assertEqual( + set(instance.J.data()), + set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]), + ) self.assertEqual(set(instance.I.data()), set(['I1', 'I2', 'I3'])) - self.assertEqual(instance.A.extract_values(), {('J311', 'J321'): 'A1', ('J312', 'J322'): 'A2', ('J313', 'J323'): 'A3'} ) - self.assertEqual(instance.B.extract_values(), {'I1': 'B1', 'I2': 'B2', 'I3': 'B3'}) - os.remove(currdir+'loadComplex.dat') + self.assertEqual( + instance.A.extract_values(), + {('J311', 'J321'): 'A1', ('J312', 'J322'): 'A2', ('J313', 'J323'): 'A3'}, + ) + self.assertEqual( + instance.B.extract_values(), {'I1': 'B1', 'I2': 'B2', 'I3': 'B3'} + ) + os.remove(currdir + 'loadComplex.dat') if __name__ == "__main__": diff --git a/pyomo/duality/collect.py b/pyomo/duality/collect.py index 93efb7d5351..a8b62cb8dfe 100644 --- a/pyomo/duality/collect.py +++ b/pyomo/duality/collect.py @@ -12,7 +12,7 @@ # Routines to collect data in a structured format from pyomo.common.collections import Bunch -from pyomo.core.base import Var, Constraint, Objective, maximize, minimize +from pyomo.core.base import Var, Constraint, Objective, maximize, minimize from pyomo.repn.standard_repn import generate_standard_repn @@ -23,10 +23,14 @@ def collect_linear_terms(block, unfixed): # vnames = set() for obj in block.component_objects(Constraint, active=True): - vnames.add((obj.getname(fully_qualified=True, relative_to=block), obj.is_indexed())) + vnames.add( + (obj.getname(fully_qualified=True, relative_to=block), obj.is_indexed()) + ) cnames = set(unfixed) for obj in block.component_objects(Var, active=True): - cnames.add((obj.getname(fully_qualified=True, relative_to=block), obj.is_indexed())) + cnames.add( + (obj.getname(fully_qualified=True, relative_to=block), obj.is_indexed()) + ) # A = {} b_coef = {} @@ -40,13 +44,15 @@ def collect_linear_terms(block, unfixed): for odata in block.component_objects(Objective, active=True): for ndx in odata: if odata[ndx].sense == maximize: - o_terms = generate_standard_repn(-1*odata[ndx].expr, compute_values=False) + o_terms = generate_standard_repn( + -1 * odata[ndx].expr, compute_values=False + ) d_sense = minimize else: o_terms = generate_standard_repn(odata[ndx].expr, compute_values=False) d_sense = maximize for var, coef in zip(o_terms.linear_vars, o_terms.linear_coefs): - c_rhs[ var.parent_component().local_name, var.index() ] = coef + c_rhs[var.parent_component().local_name, var.index()] = coef # Stop after the first objective break # @@ -62,23 +68,43 @@ def collect_linear_terms(block, unfixed): # If a constraint has a fixed body, then don't collect it. # continue - lower_terms = generate_standard_repn(con.lower, compute_values=False) if not con.lower is None else None - upper_terms = generate_standard_repn(con.upper, compute_values=False) if not con.upper is None else None + lower_terms = ( + generate_standard_repn(con.lower, compute_values=False) + if not con.lower is None + else None + ) + upper_terms = ( + generate_standard_repn(con.upper, compute_values=False) + if not con.upper is None + else None + ) # if not lower_terms is None and not lower_terms.is_constant(): - raise(RuntimeError, "Error during dualization: Constraint '%s' has a lower bound that is non-constant") + raise ( + RuntimeError, + "Error during dualization: Constraint '%s' has a lower bound that is non-constant", + ) if not upper_terms is None and not upper_terms.is_constant(): - raise(RuntimeError, "Error during dualization: Constraint '%s' has an upper bound that is non-constant") + raise ( + RuntimeError, + "Error during dualization: Constraint '%s' has an upper bound that is non-constant", + ) # for var, coef in zip(body_terms.linear_vars, body_terms.linear_coefs): try: # The variable is in the subproblem - varname = var.parent_component().getname(fully_qualified=True, relative_to=block) + varname = var.parent_component().getname( + fully_qualified=True, relative_to=block + ) except: # The variable is somewhere else in the model - varname = var.parent_component().getname(fully_qualified=True, relative_to=block.model()) + varname = var.parent_component().getname( + fully_qualified=True, relative_to=block.model() + ) varndx = var.index() - A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=coef, var=name, ndx=ndx) ) + A.setdefault(varname, {}).setdefault(varndx, []).append( + Bunch(coef=coef, var=name, ndx=ndx) + ) # if not con.equality: # @@ -89,13 +115,13 @@ def collect_linear_terms(block, unfixed): # body <= upper # v_domain[name, ndx] = -1 - b_coef[name,ndx] = upper_terms.constant - body_terms.constant + b_coef[name, ndx] = upper_terms.constant - body_terms.constant elif upper_terms is None: # # lower <= body # v_domain[name, ndx] = 1 - b_coef[name,ndx] = lower_terms.constant - body_terms.constant + b_coef[name, ndx] = lower_terms.constant - body_terms.constant else: # # lower <= body <= upper @@ -104,19 +130,20 @@ def collect_linear_terms(block, unfixed): # ndx_ = tuple(list(ndx).append('lb')) v_domain[name, ndx_] = 1 - b_coef[name,ndx] = lower_terms.constant - body_terms.constant + b_coef[name, ndx] = lower_terms.constant - body_terms.constant # # Dual for upper bound # ndx_ = tuple(list(ndx).append('ub')) v_domain[name, ndx_] = -1 - b_coef[name,ndx] = upper_terms.constant - body_terms.constant + b_coef[name, ndx] = upper_terms.constant - body_terms.constant else: # # Equality constraint # v_domain[name, ndx] = 0 - b_coef[name,ndx] = lower_terms.constant - body_terms.constant + b_coef[name, ndx] = lower_terms.constant - body_terms.constant + # # Collect bound constraints # @@ -151,59 +178,75 @@ def all_vars(b): var = data[ndx] bounds = var.bounds if bounds[0] is None and bounds[1] is None: - c_sense[name,ndx] = 'e' + c_sense[name, ndx] = 'e' elif bounds[0] is None: if bounds[1] == 0.0: - c_sense[name,ndx] = 'g' + c_sense[name, ndx] = 'g' else: - c_sense[name,ndx] = 'e' + c_sense[name, ndx] = 'e' # # Add constraint that defines the upper bound # name_ = name + "_upper_" - varname = data.parent_component().getname(fully_qualified=True, relative_to=block) + varname = data.parent_component().getname( + fully_qualified=True, relative_to=block + ) varndx = data[ndx].index() - A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) + A.setdefault(varname, {}).setdefault(varndx, []).append( + Bunch(coef=1.0, var=name_, ndx=ndx) + ) # - v_domain[name_,ndx] = -1 - b_coef[name_,ndx] = bounds[1] + v_domain[name_, ndx] = -1 + b_coef[name_, ndx] = bounds[1] elif bounds[1] is None: if bounds[0] == 0.0: - c_sense[name,ndx] = 'l' + c_sense[name, ndx] = 'l' else: - c_sense[name,ndx] = 'e' + c_sense[name, ndx] = 'e' # # Add constraint that defines the lower bound # name_ = name + "_lower_" - varname = data.parent_component().getname(fully_qualified=True, relative_to=block) + varname = data.parent_component().getname( + fully_qualified=True, relative_to=block + ) varndx = data[ndx].index() - A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) + A.setdefault(varname, {}).setdefault(varndx, []).append( + Bunch(coef=1.0, var=name_, ndx=ndx) + ) # - v_domain[name_,ndx] = 1 - b_coef[name_,ndx] = bounds[0] + v_domain[name_, ndx] = 1 + b_coef[name_, ndx] = bounds[0] else: # Bounded above and below - c_sense[name,ndx] = 'e' + c_sense[name, ndx] = 'e' # # Add constraint that defines the upper bound # name_ = name + "_upper_" - varname = data.parent_component().getname(fully_qualified=True, relative_to=block) + varname = data.parent_component().getname( + fully_qualified=True, relative_to=block + ) varndx = data[ndx].index() - A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) + A.setdefault(varname, {}).setdefault(varndx, []).append( + Bunch(coef=1.0, var=name_, ndx=ndx) + ) # - v_domain[name_,ndx] = -1 - b_coef[name_,ndx] = bounds[1] + v_domain[name_, ndx] = -1 + b_coef[name_, ndx] = bounds[1] # # Add constraint that defines the lower bound # name_ = name + "_lower_" - varname = data.parent_component().getname(fully_qualified=True, relative_to=block) + varname = data.parent_component().getname( + fully_qualified=True, relative_to=block + ) varndx = data[ndx].index() - A.setdefault(varname, {}).setdefault(varndx,[]).append( Bunch(coef=1.0, var=name_, ndx=ndx) ) + A.setdefault(varname, {}).setdefault(varndx, []).append( + Bunch(coef=1.0, var=name_, ndx=ndx) + ) # - v_domain[name_,ndx] = 1 - b_coef[name_,ndx] = bounds[0] + v_domain[name_, ndx] = 1 + b_coef[name_, ndx] = bounds[0] # return (A, b_coef, c_rhs, c_sense, d_sense, vnames, cnames, v_domain) diff --git a/pyomo/duality/lagrangian_dual.py b/pyomo/duality/lagrangian_dual.py index e616cd46f8f..1b27a3f93d4 100644 --- a/pyomo/duality/lagrangian_dual.py +++ b/pyomo/duality/lagrangian_dual.py @@ -13,7 +13,15 @@ # NOTE: deprecated code # from pyomo.common.deprecation import deprecated -from pyomo.core import TransformationFactory, Constraint, Set, Var, Objective, AbstractModel, maximize +from pyomo.core import ( + TransformationFactory, + Constraint, + Set, + Var, + Objective, + AbstractModel, + maximize, +) from pyomo.repn import generate_standard_repn from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation from pyomo.core.plugins.transform.standard_form import StandardForm @@ -42,14 +50,15 @@ class DualTransformation(IsomorphicTransformation): "Development of dualization capabilities has been shifted to " "the Pyomo Adversarial Optimization (PAO) library. Please contact " "William Hart for further details (wehart@sandia.gov).", - version='5.6.2') + version='5.6.2', + ) def __init__(self, **kwds): kwds['name'] = "linear_dual" super(DualTransformation, self).__init__(**kwds) def _create_using(self, model, **kwds): """ - Tranform a model to its Lagrangian dual. + Transform a model to its Lagrangian dual. """ # Optional naming schemes for dual variables and constraints @@ -88,67 +97,63 @@ def _create_using(self, model, **kwds): # {variable_name: coefficient} c = _sparse(0) - # Walk constaints - for (con_name, con_array) in sf.component_map(Constraint, active=True).items(): + # Walk constraints + for con_name, con_array in sf.component_map(Constraint, active=True).items(): for con in (con_array[ndx] for ndx in con_array.index_set()): # The qualified constraint name cname = "%s%s" % (variable_prefix, con.local_name) # Process the body of the constraint - body_terms = process_canonical_repn( - generate_standard_repn(con.body)) + body_terms = process_canonical_repn(generate_standard_repn(con.body)) # Add a numeric constant to the 'b' vector, if present b[cname] -= body_terms.pop(None, 0) # Add variable coefficients to the 'A' matrix row = _sparse(0) - for (vname, coef) in body_terms.items(): + for vname, coef in body_terms.items(): row["%s%s" % (vname, constraint_suffix)] += coef # Process the upper bound of the constraint. We rely on # StandardForm to produce equality constraints, thus # requiring us only to check the lower bounds. - lower_terms = process_canonical_repn( - generate_standard_repn(con.lower)) + lower_terms = process_canonical_repn(generate_standard_repn(con.lower)) # Add a numeric constant to the 'b' matrix, if present b[cname] += lower_terms.pop(None, 0) # Add any variables to the 'A' matrix, if present - for (vname, coef) in lower_terms.items(): + for vname, coef in lower_terms.items(): row["%s%s" % (vname, constraint_suffix)] -= coef A[cname] = row # Walk objectives. Multiply all coefficients by the objective's 'sense' # to convert maximizing objectives to minimizing ones. - for (obj_name, obj_array) in sf.component_map(Objective, active=True).items(): + for obj_name, obj_array in sf.component_map(Objective, active=True).items(): for obj in (obj_array[ndx] for ndx in obj_array.index_set()): # The qualified objective name # Process the objective - terms = process_canonical_repn( - generate_standard_repn(obj.expr)) + terms = process_canonical_repn(generate_standard_repn(obj.expr)) # Add coefficients - for (name, coef) in terms.items(): - c["%s%s" % (name, constraint_suffix)] += coef*obj_array.sense + for name, coef in terms.items(): + c["%s%s" % (name, constraint_suffix)] += coef * obj_array.sense # Form the dual dual = AbstractModel() # Make constraint index set constraint_set_init = [] - for (var_name, var_array) in sf.component_map(Var, active=True).items(): + for var_name, var_array in sf.component_map(Var, active=True).items(): for var in (var_array[ndx] for ndx in var_array.index_set()): - constraint_set_init.append("%s%s" % - (var.local_name, constraint_suffix)) + constraint_set_init.append("%s%s" % (var.local_name, constraint_suffix)) # Make variable index set variable_set_init = [] dual_variable_roots = [] - for (con_name, con_array) in sf.component_map(Constraint, active=True).items(): + for con_name, con_array in sf.component_map(Constraint, active=True).items(): for con in (con_array[ndx] for ndx in con_array.index_set()): dual_variable_roots.append(con.local_name) variable_set_init.append("%s%s" % (variable_prefix, con.local_name)) @@ -160,14 +165,14 @@ def _create_using(self, model, **kwds): # Make the dual constraints def constraintRule(A, c, ndx, model): - return sum(A[v][ndx] * model.vars[v] for v in model.var_set) <= \ - c[ndx] - dual.cons = Constraint(dual.con_set, - rule=partial(constraintRule, A, c)) + return sum(A[v][ndx] * model.vars[v] for v in model.var_set) <= c[ndx] + + dual.cons = Constraint(dual.con_set, rule=partial(constraintRule, A, c)) # Make the dual objective (maximizing) def objectiveRule(b, model): return sum(b[v] * model.vars[v] for v in model.var_set) + dual.obj = Objective(rule=partial(objectiveRule, b), sense=maximize) return dual.create() @@ -207,4 +212,3 @@ def __getitem__(self, ndx): return self._default_func() else: return self._default_value - diff --git a/pyomo/duality/plugins.py b/pyomo/duality/plugins.py index 2d67c223371..9a8e10b4cfc 100644 --- a/pyomo/duality/plugins.py +++ b/pyomo/duality/plugins.py @@ -12,23 +12,27 @@ import logging from pyomo.common.deprecation import deprecated -from pyomo.core.base import (Transformation, - TransformationFactory, - Var, - Constraint, - Objective, - minimize, - NonNegativeReals, - NonPositiveReals, - Reals, - Block, - Model, - ConcreteModel) +from pyomo.core.base import ( + Transformation, + TransformationFactory, + Var, + Constraint, + Objective, + minimize, + NonNegativeReals, + NonPositiveReals, + Reals, + Block, + Model, + ConcreteModel, +) from pyomo.duality.collect import collect_linear_terms + def load(): pass + logger = logging.getLogger('pyomo.core') @@ -39,22 +43,23 @@ def load(): # This returns a new Block object. # @TransformationFactory.register( - 'duality.linear_dual', doc="[DEPRECATED] Dualize a linear model") + 'duality.linear_dual', doc="[DEPRECATED] Dualize a linear model" +) @deprecated( "Use of the pyomo.duality package is deprecated. There are known bugs " "in pyomo.duality, and we do not recommend the use of this code. " "Development of dualization capabilities has been shifted to " "the Pyomo Adversarial Optimization (PAO) library. Please contact " "William Hart for further details (wehart@sandia.gov).", - version='5.6.2') + version='5.6.2', +) class LinearDual_PyomoTransformation(Transformation): - def __init__(self): super(LinearDual_PyomoTransformation, self).__init__() def _create_using(self, instance, **kwds): options = kwds.pop('options', {}) - bname = options.get('block',None) + bname = options.get('block', None) # # Iterate over the model collecting variable data, # until the block is found. @@ -63,11 +68,11 @@ def _create_using(self, instance, **kwds): if block is None: block = instance else: - for (name, data) in instance.component_map(Block, active=True).items(): + for name, data in instance.component_map(Block, active=True).items(): if name == bname: block = instance if block is None: - raise RuntimeError("Missing block: "+bname) + raise RuntimeError("Missing block: " + bname) # # Generate the dual # @@ -82,7 +87,16 @@ def _dualize(self, block, unfixed=[]): # # Collect linear terms from the block # - A, b_coef, c_rhs, c_sense, d_sense, vnames, cnames, v_domain = collect_linear_terms(block, unfixed) + ( + A, + b_coef, + c_rhs, + c_sense, + d_sense, + vnames, + cnames, + v_domain, + ) = collect_linear_terms(block, unfixed) ##print(A) ##print(vnames) ##print(cnames) @@ -100,26 +114,36 @@ def _dualize(self, block, unfixed=[]): dual = Block() dual.construct() _vars = {} + def getvar(name, ndx=None): - v = _vars.get((name,ndx), None) + v = _vars.get((name, ndx), None) if v is None: v = Var() if ndx is None: v_name = name elif type(ndx) is tuple: - v_name = "%s[%s]" % (name, ','.join(map(str,ndx))) + v_name = "%s[%s]" % (name, ','.join(map(str, ndx))) else: v_name = "%s[%s]" % (name, str(ndx)) setattr(dual, v_name, v) - _vars[name,ndx] = v + _vars[name, ndx] = v return v + # # Construct the objective # if d_sense == minimize: - dual.o = Objective(expr=sum(- b_coef[name,ndx]*getvar(name,ndx) for name,ndx in b_coef), sense=d_sense) + dual.o = Objective( + expr=sum( + -b_coef[name, ndx] * getvar(name, ndx) for name, ndx in b_coef + ), + sense=d_sense, + ) else: - dual.o = Objective(expr=sum(b_coef[name,ndx]*getvar(name,ndx) for name,ndx in b_coef), sense=d_sense) + dual.o = Objective( + expr=sum(b_coef[name, ndx] * getvar(name, ndx) for name, ndx in b_coef), + sense=d_sense, + ) # # Construct the constraints # @@ -131,16 +155,16 @@ def getvar(name, ndx=None): if not (cname, ndx) in c_rhs: c_rhs[cname, ndx] = 0.0 if c_sense[cname, ndx] == 'e': - e = expr - c_rhs[cname,ndx] == 0 + e = expr - c_rhs[cname, ndx] == 0 elif c_sense[cname, ndx] == 'l': - e = expr - c_rhs[cname,ndx] <= 0 + e = expr - c_rhs[cname, ndx] <= 0 else: - e = expr - c_rhs[cname,ndx] >= 0 + e = expr - c_rhs[cname, ndx] >= 0 c = Constraint(expr=e) if ndx is None: c_name = cname elif type(ndx) is tuple: - c_name = "%s[%s]" % (cname, ','.join(map(str,ndx))) + c_name = "%s[%s]" % (cname, ','.join(map(str, ndx))) else: c_name = "%s[%s]" % (cname, str(ndx)) setattr(dual, c_name, c) diff --git a/pyomo/duality/tests/t1_linear_dual.lp b/pyomo/duality/tests/t1_linear_dual.lp index 37acf8f3d96..f5d9dc6aa73 100644 --- a/pyomo/duality/tests/t1_linear_dual.lp +++ b/pyomo/duality/tests/t1_linear_dual.lp @@ -2,9 +2,9 @@ max o: -+5 c1 -+3 c2 -+4 c3 ++5.0 c1 ++3.0 c2 ++4.0 c3 s.t. @@ -24,9 +24,6 @@ c_u_x3_: +1 c3 <= 2 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds 0 <= c1 <= +inf 0 <= c2 <= +inf diff --git a/pyomo/duality/tests/t5_linear_dual.lp b/pyomo/duality/tests/t5_linear_dual.lp index 8d0cf076cb9..9e3f6d0b3bf 100644 --- a/pyomo/duality/tests/t5_linear_dual.lp +++ b/pyomo/duality/tests/t5_linear_dual.lp @@ -2,31 +2,28 @@ min o: --100 c1 --100 c2 --100 c3 --100 c4 +-100.0 c1 +-100.0 c2 +-100.0 c3 +-100.0 c4 s.t. c_u_x1_: -+4.4400000000000004 c1 ++4.44 c1 +4 c3 +3 c4 <= -3 c_u_x2_: -+6.6699999999999999 c2 -+2.8599999999999999 c3 ++6.67 c2 ++2.86 c3 +6 c4 <= -2.5 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds - -inf <= c1 <= 0 - -inf <= c2 <= 0 - -inf <= c3 <= 0 - -inf <= c4 <= 0 + -inf <= c1 <= 0 + -inf <= c2 <= 0 + -inf <= c3 <= 0 + -inf <= c4 <= 0 end diff --git a/pyomo/duality/tests/test_linear_dual.py b/pyomo/duality/tests/test_linear_dual.py index 8ab75931d9d..ba3554bdc50 100644 --- a/pyomo/duality/tests/test_linear_dual.py +++ b/pyomo/duality/tests/test_linear_dual.py @@ -16,25 +16,26 @@ from os.path import abspath, dirname, normpath, join currdir = dirname(abspath(__file__)) -exdir = normpath(join(currdir,'..','..','..','examples','pyomo','core')) +exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'pyomo', 'core')) -from filecmp import cmp import pyomo.common.unittest as unittest from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args +from pyomo.repn.tests.lp_diff import load_and_compare_lp_baseline from pyomo.scripting.util import cleanup import pyomo.scripting.pyomo_main as main solver = None -class CommonTests(object): + +class CommonTests(object): solve = True def run_bilevel(self, *_args, **kwds): if self.solve: args = ['solve'] - _solver = kwds.get('solver','glpk') + _solver = kwds.get('solver', 'glpk') args.append('--solver=%s' % _solver) args.append('--save-results=result.yml') args.append('--results-format=json') @@ -48,7 +49,7 @@ def run_bilevel(self, *_args, **kwds): if False: args.append('--stream-solver') - args.append('--tempdir='+currdir) + args.append('--tempdir=' + currdir) args.append('--keepfiles') args.append('--debug') args.append('--logging=verbose') @@ -57,7 +58,7 @@ def run_bilevel(self, *_args, **kwds): os.chdir(currdir) print('***') - #print(' '.join(args)) + # print(' '.join(args)) output = main.main(args) try: output = main.main(args) @@ -71,7 +72,7 @@ def check(self, problem, solver): pass def referenceFile(self, problem, solver): - return join(currdir, problem+'.txt') + return join(currdir, problem + '.txt') def getObjective(self, fname): FILE = open(fname) @@ -86,98 +87,103 @@ def getObjective(self, fname): def updateDocStrings(self): for key in dir(self): if key.startswith('test'): - getattr(self,key).__doc__ = " (%s)" % getattr(self,key).__name__ + getattr(self, key).__doc__ = " (%s)" % getattr(self, key).__name__ def test_t5(self): - self.problem='test_t5' - self.run_bilevel(join(exdir,'t5.py')) - self.check( 't5', 'linear_dual' ) + self.problem = 'test_t5' + self.run_bilevel(join(exdir, 't5.py')) + self.check('t5', 'linear_dual') def test_t1(self): - self.problem='test_t1' - self.run_bilevel(join(exdir,'t1.py')) - self.check( 't1', 'linear_dual' ) + self.problem = 'test_t1' + self.run_bilevel(join(exdir, 't1.py')) + self.check('t1', 'linear_dual') class Reformulate(unittest.TestCase, CommonTests): - - solve=False + solve = False def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) @classmethod def setUpClass(cls): import pyomo.environ - def run_bilevel(self, *args, **kwds): + def run_bilevel(self, *args, **kwds): args = list(args) - args.append('--output='+self.problem+'_result.lp') + args.append('--output=' + self.problem + '_result.lp') kwds['transform'] = 'duality.linear_dual' CommonTests.run_bilevel(self, *args, **kwds) def referenceFile(self, problem, solver): - return join(currdir, problem+"_"+solver+'.lp') + return join(currdir, problem + "_" + solver + '.lp') def check(self, problem, solver): - _prob, _solv = join(currdir,self.problem+'_result.lp'), self.referenceFile(problem,solver) - self.assertTrue(cmp(_prob, _solv), - msg="Files %s and %s differ" % (_prob, _solv)) + self.assertEqual( + *load_and_compare_lp_baseline( + self.referenceFile(problem, solver), + join(currdir, self.problem + '_result.lp'), + ) + ) class Solver(unittest.TestCase): - @classmethod def setUpClass(cls): import pyomo.environ def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) def check(self, problem, solver): - refObj = self.getObjective(self.referenceFile(problem,solver)) - ansObj = self.getObjective(join(currdir,'result.yml')) + refObj = self.getObjective(self.referenceFile(problem, solver)) + ansObj = self.getObjective(join(currdir, 'result.yml')) self.assertEqual(len(refObj), len(ansObj)) for i in range(len(refObj)): self.assertEqual(len(refObj[i]), len(ansObj[i])) - for key,val in refObj[i].items(): - self.assertAlmostEqual(val['Value'], ansObj[i].get(key,None)['Value'], places=3) + for key, val in refObj[i].items(): + self.assertAlmostEqual( + val['Value'], ansObj[i].get(key, None)['Value'], places=3 + ) class Solve_GLPK(Solver, CommonTests): - @classmethod def setUpClass(cls): global solvers import pyomo.environ + solvers = pyomo.opt.check_available_solvers('glpk') def setUp(self): if (not yaml_available) or (not 'glpk' in solvers): - self.skipTest("YAML is not available or " - "the 'glpk' executable is not available") + self.skipTest( + "YAML is not available or the 'glpk' executable is not available" + ) - def run_bilevel(self, *args, **kwds): + def run_bilevel(self, *args, **kwds): kwds['solver'] = 'glpk' CommonTests.run_bilevel(self, *args, **kwds) class Solve_CPLEX(Solver, CommonTests): - @classmethod def setUpClass(cls): global solvers import pyomo.environ + solvers = pyomo.opt.check_available_solvers('cplex') def setUp(self): if (not yaml_available) or (not 'cplex' in solvers): - self.skipTest("YAML is not available or " - "the 'cplex' executable is not available") + self.skipTest( + "YAML is not available or the 'cplex' executable is not available" + ) - def run_bilevel(self, *args, **kwds): + def run_bilevel(self, *args, **kwds): kwds['solver'] = 'cplex' CommonTests.run_bilevel(self, *args, **kwds) diff --git a/pyomo/environ/__init__.py b/pyomo/environ/__init__.py index 99c3a0149d1..51c68449247 100644 --- a/pyomo/environ/__init__.py +++ b/pyomo/environ/__init__.py @@ -12,8 +12,11 @@ import sys as _sys import importlib + + def _do_import(pkg_name): - importlib.import_module(pkg_name) + importlib.import_module(pkg_name) + # # These packages contain plugins that need to be loaded @@ -24,7 +27,6 @@ def _do_import(pkg_name): 'pyomo.opt', 'pyomo.dataportal', 'pyomo.duality', - 'pyomo.checker', 'pyomo.repn', 'pyomo.neos', 'pyomo.solvers', @@ -36,6 +38,7 @@ def _do_import(pkg_name): 'pyomo.contrib.ampl_function_demo', 'pyomo.contrib.appsi', 'pyomo.contrib.community_detection', + 'pyomo.contrib.cp', 'pyomo.contrib.example', 'pyomo.contrib.fme', 'pyomo.contrib.gdp_bounds', @@ -62,10 +65,12 @@ def _import_packages(): except ImportError: exctype, err, tb = _sys.exc_info() # BUG? import traceback - msg = "pyomo.environ failed to import %s:\nOriginal %s: %s\n" \ - "Traceback:\n%s" \ - % (pname, exctype.__name__, err, - ''.join(traceback.format_tb(tb)),) + + msg = ( + "pyomo.environ failed to import %s:\nOriginal %s: %s\n" + "Traceback:\n%s" + % (pname, exctype.__name__, err, ''.join(traceback.format_tb(tb))) + ) # clear local variables to remove circular references exctype = err = tb = None # TODO: Should this just log an error and re-raise the @@ -86,75 +91,185 @@ def _import_packages(): from pyomo.common.collections import ComponentMap import pyomo.core.base.indexed_component import pyomo.core.base.util -from pyomo.core import expr, base, beta, kernel, plugins +from pyomo.core import expr, base, kernel, plugins from pyomo.core.base import util -from pyomo.core import (numvalue, numeric_expr, boolean_value, - current, symbol_map, sympy_tools, - taylor_series, visitor, expr_common, expr_errors, - calculus, native_types, - linear_expression, nonlinear_expression, - land, lor, equivalent, exactly, - atleast, atmost, implies, lnot, - xor, inequality, log, log10, sin, cos, tan, cosh, - sinh, tanh, asin, acos, atan, exp, sqrt, asinh, acosh, - atanh, ceil, floor, Expr_if, differentiate, - taylor_series_expansion, SymbolMap, PyomoObject, - nonpyomo_leaf_types, native_numeric_types, - value, is_constant, is_fixed, is_variable_type, - is_potentially_variable, polynomial_degree, - NumericValue, ZeroConstant, as_boolean, BooleanConstant, - BooleanValue, native_logical_values, minimize, - maximize, PyomoOptions, Expression, CuidLabeler, - CounterLabeler, NumericLabeler, - CNameLabeler, TextLabeler, - AlphaNumericTextLabeler, NameLabeler, ShortNameLabeler, - name, Component, ComponentUID, BuildAction, - BuildCheck, Set, SetOf, simple_set_rule, RangeSet, - Param, Var, VarList, ScalarVar, - BooleanVar, BooleanVarList, ScalarBooleanVar, - logical_expr, simple_constraint_rule, - simple_constraintlist_rule, ConstraintList, - Constraint, LogicalConstraint, - LogicalConstraintList, simple_objective_rule, - simple_objectivelist_rule, Objective, - ObjectiveList, Connector, SOSConstraint, - Piecewise, active_export_suffix_generator, - active_import_suffix_generator, Suffix, - ExternalFunction, symbol_map_from_instance, - Reference, Reals, PositiveReals, NonPositiveReals, - NegativeReals, NonNegativeReals, Integers, - PositiveIntegers, NonPositiveIntegers, - NegativeIntegers, NonNegativeIntegers, - Boolean, Binary, Any, AnyWithNone, EmptySet, - UnitInterval, PercentFraction, RealInterval, - IntegerInterval, display, SortComponents, - TraversalStrategy, Block, ScalarBlock, - active_components, components, - active_components_data, components_data, - global_option, Model, ConcreteModel, - AbstractModel, - ModelComponentFactory, Transformation, - TransformationFactory, instance2dat, - set_options, RealSet, IntegerSet, BooleanSet, - prod, quicksum, sum_product, dot_product, - summation, sequence) +from pyomo.core import ( + numvalue, + numeric_expr, + boolean_value, + symbol_map, + sympy_tools, + taylor_series, + visitor, + expr_common, + expr_errors, + calculus, + native_types, + linear_expression, + nonlinear_expression, + land, + lor, + equivalent, + exactly, + atleast, + atmost, + implies, + lnot, + xor, + inequality, + log, + log10, + sin, + cos, + tan, + cosh, + sinh, + tanh, + asin, + acos, + atan, + exp, + sqrt, + asinh, + acosh, + atanh, + ceil, + floor, + Expr_if, + differentiate, + taylor_series_expansion, + SymbolMap, + PyomoObject, + nonpyomo_leaf_types, + native_numeric_types, + value, + is_constant, + is_fixed, + is_variable_type, + is_potentially_variable, + polynomial_degree, + NumericValue, + ZeroConstant, + as_boolean, + BooleanConstant, + BooleanValue, + native_logical_values, + minimize, + maximize, + PyomoOptions, + Expression, + CuidLabeler, + CounterLabeler, + NumericLabeler, + CNameLabeler, + TextLabeler, + AlphaNumericTextLabeler, + NameLabeler, + ShortNameLabeler, + name, + Component, + ComponentUID, + BuildAction, + BuildCheck, + Set, + SetOf, + simple_set_rule, + RangeSet, + Param, + Var, + VarList, + ScalarVar, + BooleanVar, + BooleanVarList, + ScalarBooleanVar, + logical_expr, + simple_constraint_rule, + simple_constraintlist_rule, + ConstraintList, + Constraint, + LogicalConstraint, + LogicalConstraintList, + simple_objective_rule, + simple_objectivelist_rule, + Objective, + ObjectiveList, + Connector, + SOSConstraint, + Piecewise, + active_export_suffix_generator, + active_import_suffix_generator, + Suffix, + ExternalFunction, + symbol_map_from_instance, + Reference, + Reals, + PositiveReals, + NonPositiveReals, + NegativeReals, + NonNegativeReals, + Integers, + PositiveIntegers, + NonPositiveIntegers, + NegativeIntegers, + NonNegativeIntegers, + Boolean, + Binary, + Any, + AnyWithNone, + EmptySet, + UnitInterval, + PercentFraction, + RealInterval, + IntegerInterval, + display, + SortComponents, + TraversalStrategy, + Block, + ScalarBlock, + active_components, + components, + active_components_data, + components_data, + global_option, + Model, + ConcreteModel, + AbstractModel, + ModelComponentFactory, + Transformation, + TransformationFactory, + instance2dat, + set_options, + RealSet, + IntegerSet, + BooleanSet, + prod, + quicksum, + sum_product, + dot_product, + summation, + sequence, +) from pyomo.opt import ( - SolverFactory, SolverManagerFactory, UnknownSolver, - TerminationCondition, SolverStatus, check_optimal_termination, - assert_optimal_termination - ) + SolverFactory, + SolverManagerFactory, + UnknownSolver, + TerminationCondition, + SolverStatus, + check_optimal_termination, + assert_optimal_termination, +) from pyomo.core.base.units_container import units, as_quantity # These APIs are deprecated and should be removed in the near future from pyomo.common.deprecation import relocated_module_attribute + relocated_module_attribute( - 'SimpleBlock', 'pyomo.core.base.block.SimpleBlock', version='6.0') -relocated_module_attribute( - 'SimpleVar', 'pyomo.core.base.var.SimpleVar', version='6.0') + 'SimpleBlock', 'pyomo.core.base.block.SimpleBlock', version='6.0' +) +relocated_module_attribute('SimpleVar', 'pyomo.core.base.var.SimpleVar', version='6.0') relocated_module_attribute( - 'SimpleBooleanVar', 'pyomo.core.base.boolean_var.SimpleBooleanVar', - version='6.0' + 'SimpleBooleanVar', 'pyomo.core.base.boolean_var.SimpleBooleanVar', version='6.0' ) del relocated_module_attribute diff --git a/pyomo/environ/tests/standalone_minimal_pyomo_driver.py b/pyomo/environ/tests/standalone_minimal_pyomo_driver.py index 6151a642e99..88f8e9f8651 100644 --- a/pyomo/environ/tests/standalone_minimal_pyomo_driver.py +++ b/pyomo/environ/tests/standalone_minimal_pyomo_driver.py @@ -13,9 +13,9 @@ from io import StringIO from pyomo.common.log import LoggingIntercept from pyomo.common.tee import capture_output +from pyomo.repn.tests.lp_diff import lp_diff -_baseline = """ -\\* Source Pyomo model name=unknown *\\ +_baseline = """\\* Source Pyomo model name=unknown *\\ min x2: @@ -37,31 +37,35 @@ end """ + def _check_log_and_out(LOG, OUT, offset, msg=None): sys.stdout.flush() sys.stderr.flush() msg = str(msg) + ': ' if msg else '' if LOG.getvalue(): raise RuntimeError( - "FAIL: %sMessage logged to the Logger:\n>>>\n%s<<<" % ( - msg, LOG.getvalue(),)) + "FAIL: %sMessage logged to the Logger:\n>>>\n%s<<<" % (msg, LOG.getvalue()) + ) if OUT.getvalue(): raise RuntimeError( - "FAIL: %sMessage sent to stdout/stderr:\n>>>\n%s<<<" % ( - msg, OUT.getvalue(),)) + "FAIL: %sMessage sent to stdout/stderr:\n>>>\n%s<<<" % (msg, OUT.getvalue()) + ) def import_pyomo_environ(): with LoggingIntercept() as LOG, capture_output(capture_fd=True) as OUT: import pyomo.environ as pyo + globals()['pyo'] = pyo _check_log_and_out(LOG, OUT, 0) + def run_writer_test(): with LoggingIntercept() as LOG, capture_output(capture_fd=True) as OUT: # Enumerate the writers... from pyomo.opt import WriterFactory + info = [] for writer in sorted(WriterFactory): info.append(" %s: %s" % (writer, WriterFactory.doc(writer))) @@ -79,16 +83,19 @@ def run_writer_test(): m.o = pyo.Objective(expr=m.x**2) from pyomo.common.tempfiles import TempfileManager + with TempfileManager: - fname = TempfileManager.create_tempfile(suffix='pyomo.lp') - m.write(fname) + fname = TempfileManager.create_tempfile(suffix='pyomo.lp_v1') + m.write(fname, format='lp_v1') with open(fname, 'r') as FILE: data = FILE.read() - if not all(d.strip() == b.strip() for d,b in zip( - data.strip().splitlines(), _baseline.strip().splitlines())): - print("Result did not match baseline.\nRESULT:\n%s\nBASELINE:\n%s" - % (data, _baseline)) + base, test = lp_diff(_baseline, data) + if base != test: + print( + "Result did not match baseline.\nRESULT:\n%s\nBASELINE:\n%s" + % (data, _baseline) + ) print(data.strip().splitlines()) print(_baseline.strip().splitlines()) sys.exit(2) @@ -97,12 +104,7 @@ def run_writer_test(): def run_solverfactory_test(): - skip_solvers = { - 'py', - 'xpress', - '_xpress_shell', - '_mock_xpress', - } + skip_solvers = {'py', 'xpress', '_xpress_shell', '_mock_xpress'} with LoggingIntercept() as LOG, capture_output(capture_fd=True) as OUT: info = [] @@ -115,7 +117,7 @@ def run_solverfactory_test(): else: _avail = str(pyo.SolverFactory(solver).available(False)) info.append(" %s(%s): %s" % (solver, _avail, _doc)) - #_check_log_and_out(LOG, OUT, 20, solver) + # _check_log_and_out(LOG, OUT, 20, solver) glpk = pyo.SolverFactory('glpk') @@ -143,15 +145,13 @@ def run_transformationfactory_test(): bigm = pyo.TransformationFactory('gdp.bigm') - print("") print("Pyomo Transformations") print("---------------------") print('\n'.join(info)) if not isinstance(bigm, pyo.Transformation): - print("TransformationFactory(gdp.bigm) did not return a " - "transformation") + print("TransformationFactory(gdp.bigm) did not return a transformation") sys.exit(4) _check_log_and_out(LOG, OUT, 30) diff --git a/pyomo/environ/tests/test_environ.py b/pyomo/environ/tests/test_environ.py index e6c6ee10f98..e22fce7546c 100644 --- a/pyomo/environ/tests/test_environ.py +++ b/pyomo/environ/tests/test_environ.py @@ -21,8 +21,10 @@ import pyomo.common.unittest as unittest from pyomo.common.dependencies import numpy_available, attempt_import + pyro4, pyro4_available = attempt_import('Pyro4') + class ImportData(object): def __init__(self): self.tpl = {} @@ -32,84 +34,104 @@ def update(self, other): self.tpl.update(other.tpl) self.pyomo.update(other.pyomo) + def collect_import_time(module): - output = subprocess.check_output( - [sys.executable, '-X', 'importtime', '-c', 'import %s' % (module,)], - stderr=subprocess.STDOUT) - # Note: test only runs in PY3 - output = output.decode() - line_re = re.compile(r'.*:\s*(\d+) \|\s*(\d+) \| ( *)([^ ]+)') - data = [] - for line in output.splitlines(): - g = line_re.match(line) - if not g: - continue - _self = int(g.group(1)) - _cumul = int(g.group(2)) - _level = len(g.group(3)) // 2 - _module = g.group(4) - #print("%6d %8d %2d %s" % (_self, _cumul, _level, _module)) - while len(data) < _level+1: - data.append(ImportData()) - if len(data) > _level+1: - assert len(data) == _level+2 - inner = data.pop() - inner.tpl = { - (k if '(from' in k else "%s (from %s)" % (k, _module), - v) for k,v in inner.tpl.items() } - if _module.startswith('pyomo'): - data[_level].update(inner) - data[_level].pyomo[_module] = _self - else: - if _level > 0: - data[_level].tpl[_module] = _cumul - elif _module.startswith('pyomo'): + output = subprocess.check_output( + [sys.executable, '-X', 'importtime', '-c', 'import %s' % (module,)], + stderr=subprocess.STDOUT, + ) + # Note: test only runs in PY3 + output = output.decode() + line_re = re.compile(r'.*:\s*(\d+) \|\s*(\d+) \| ( *)([^ ]+)') + data = [] + for line in output.splitlines(): + g = line_re.match(line) + if not g: + continue + _self = int(g.group(1)) + _cumul = int(g.group(2)) + _level = len(g.group(3)) // 2 + _module = g.group(4) + # print("%6d %8d %2d %s" % (_self, _cumul, _level, _module)) + while len(data) < _level + 1: + data.append(ImportData()) + if len(data) > _level + 1: + assert len(data) == _level + 2 + inner = data.pop() + inner.tpl = { + (k if '(from' in k else "%s (from %s)" % (k, _module), v) + for k, v in inner.tpl.items() + } + if _module.startswith('pyomo'): + data[_level].update(inner) data[_level].pyomo[_module] = _self - elif _level > 0: - data[_level].tpl[_module] = _self - assert len(data) == 1 - return data[0] + else: + if _level > 0: + data[_level].tpl[_module] = _cumul + elif _module.startswith('pyomo'): + data[_level].pyomo[_module] = _self + elif _level > 0: + data[_level].tpl[_module] = _self + assert len(data) == 1 + return data[0] class TestPyomoEnviron(unittest.TestCase): - def test_not_auto_imported(self): - rc = subprocess.call([ - sys.executable, '-c', + rc = subprocess.call( + [ + sys.executable, + '-c', 'import pyomo.core, sys; ' - 'sys.exit( 1 if "pyomo.environ" in sys.modules else 0 )']) + 'sys.exit( 1 if "pyomo.environ" in sys.modules else 0 )', + ] + ) if rc: - self.fail("Importing pyomo.core automatically imports " - "pyomo.environ and it should not.") - + self.fail( + "Importing pyomo.core automatically imports " + "pyomo.environ and it should not." + ) - @unittest.skipIf(sys.version_info[:2] < (3,7), - "Import timing introduced in python 3.7") - @unittest.skipIf('pypy_version_info' in dir(sys), - "PyPy does not support '-X importtime") + @unittest.skipIf( + sys.version_info[:2] < (3, 7), "Import timing introduced in python 3.7" + ) + @unittest.skipIf( + 'pypy_version_info' in dir(sys), "PyPy does not support '-X importtime" + ) def test_tpl_import_time(self): data = collect_import_time('pyomo.environ') pyomo_time = sum(data.pyomo.values()) tpl_time = sum(data.tpl.values()) total = float(pyomo_time + tpl_time) print("Pyomo (by module time):") - print("\n".join(" %s: %s" % i for i in sorted( - data.pyomo.items(), key=lambda x: x[1]))) + print( + "\n".join( + " %s: %s" % i for i in sorted(data.pyomo.items(), key=lambda x: x[1]) + ) + ) print("TPLS:") _line_fmt = " %%%ds: %%6d %%s" % ( - max(len(k[:k.find(' ')]) for k in data.tpl),) - print("\n".join(_line_fmt % (k[:k.find(' ')], v, k[k.find(' '):]) - for k,v in sorted(data.tpl.items()))) + max(len(k[: k.find(' ')]) for k in data.tpl), + ) + print( + "\n".join( + _line_fmt % (k[: k.find(' ')], v, k[k.find(' ') :]) + for k, v in sorted(data.tpl.items()) + ) + ) tpl = {} for k, v in data.tpl.items(): - _mod = k[:k.find(' ')].split('.')[0] - tpl[_mod] = tpl.get(_mod,0) + v + _mod = k[: k.find(' ')].split('.')[0] + tpl[_mod] = tpl.get(_mod, 0) + v tpl_by_time = sorted(tpl.items(), key=lambda x: x[1]) print("TPLS (by package time):") - print("\n".join(" %12s: %6d (%4.1f%%)" % ( - m, t, 100*t/total) for m, t in tpl_by_time)) - print("Pyomo: %6d (%4.1f%%)" % ( - pyomo_time, 100 * pyomo_time / total)) + print( + "\n".join( + " %12s: %6d (%4.1f%%)" % (m, t, 100 * t / total) + for m, t in tpl_by_time + ) + ) + print("Pyomo: %6d (%4.1f%%)" % (pyomo_time, 100 * pyomo_time / total)) print("TPL: %6d (%4.1f%%)" % (tpl_time, 100 * tpl_time / total)) # Arbitrarily choose a threshold 10% more than the expected # value (at time of writing, TPL imports were 52-57% of the @@ -121,30 +143,31 @@ def test_tpl_import_time(self): ref = { '__future__', 'argparse', - 'ast', # Imported on Windows - 'base64', # Imported on Windows + 'ast', # Imported on Windows + 'backports_abc', # Imported by cython on Linux + 'base64', # Imported on Windows 'cPickle', 'csv', 'ctypes', 'decimal', - 'gc', # Imported on MacOS, Windows; Linux in 3.10 + 'gc', # Imported on MacOS, Windows; Linux in 3.10 'glob', - 'heapq', # Added in Python 3.10 - 'importlib', # Imported on Windows + 'heapq', # Added in Python 3.10 + 'importlib', # Imported on Windows 'inspect', - 'json', # Imported on Windows - 'locale', # Added in Python 3.9 + 'json', # Imported on Windows + 'locale', # Added in Python 3.9 'logging', 'pickle', 'platform', - 'random', # Imported on MacOS, Windows + 'random', # Imported on MacOS, Windows 'shlex', - 'socket', # Imported on MacOS, Windows; Linux in 3.10 + 'socket', # Imported on MacOS, Windows; Linux in 3.10 'tempfile', # Imported on MacOS, Windows 'textwrap', 'typing', - 'win32file', # Imported on Windows - 'win32pipe', # Imported on Windows + 'win32file', # Imported on Windows + 'win32pipe', # Imported on Windows } # Non-standard-library TPLs that Pyomo will load unconditionally ref.add('ply') @@ -153,8 +176,8 @@ def test_tpl_import_time(self): ref.add('numpy') diff = set(_[0] for _ in tpl_by_time[-5:]).difference(ref) self.assertEqual( - diff, set(), - "Unexpected module found in 5 slowest-loading TPL modules") + diff, set(), "Unexpected module found in 5 slowest-loading TPL modules" + ) if __name__ == "__main__": diff --git a/pyomo/environ/tests/test_package_layout.py b/pyomo/environ/tests/test_package_layout.py index e905ff7ef44..0bc8c55113a 100644 --- a/pyomo/environ/tests/test_package_layout.py +++ b/pyomo/environ/tests/test_package_layout.py @@ -11,21 +11,31 @@ # # Unit Tests for pyomo.base.misc # + +import glob +import importlib import os +import subprocess +import sys + +from itertools import filterfalse from os.path import join +import pyomo.common.dependencies as dependencies from pyomo.common.fileutils import PYOMO_ROOT_DIR import pyomo.common.unittest as unittest +parameterized, param_available = dependencies.attempt_import('parameterized') +parameterized = parameterized.parameterized + +_FAST_TEST = False # List of directories under `pyomo` that intentionally do NOT have # __init__.py files (because they either contain no Python files - or # contain Python files that are only used in testing and explicitly NOT # part of the "Pyomo package") _NON_MODULE_DIRS = { - join('checker', 'doc'), - join('checker', 'tests', 'examples'), join('contrib', 'ampl_function_demo', 'src'), join('contrib', 'appsi', 'cmodel', 'src'), join('contrib', 'pynumero', 'src'), @@ -43,6 +53,36 @@ join('solvers', 'tests', 'piecewise_linear', 'problems'), } +_DO_NOT_IMPORT_MODULES = { + 'pyomo.common.tests.dep_mod_except', + 'pyomo.contrib.interior_point.examples.ex1', +} + +try: + _cwd = os.getcwd() + os.chdir(os.path.join(PYOMO_ROOT_DIR, 'pyomo')) + modules = sorted( + os.path.join('pyomo', os.path.splitext(fname)[0]).replace(os.path.sep, '.') + for fname in glob.glob(os.path.join('**', '*.py'), recursive=True) + if not fname.endswith('__init__.py') + ) + modules = list(filterfalse(_DO_NOT_IMPORT_MODULES.__contains__, modules)) +finally: + os.chdir(_cwd) + + +import_test = """ +import pyomo.common.dependencies +pyomo.common.dependencies.SUPPRESS_DEPENDENCY_WARNINGS = True +import unittest +try: + import %s +except unittest.case.SkipTest as e: + # suppress the exception, but print the message + print(e) +""" + + class TestPackageLayout(unittest.TestCase): def test_for_init_files(self): _NMD = set(_NON_MODULE_DIRS) @@ -50,7 +90,7 @@ def test_for_init_files(self): module_dir = os.path.join(PYOMO_ROOT_DIR, 'pyomo') for path, subdirs, files in os.walk(module_dir): assert path.startswith(module_dir) - relpath = path[1+len(module_dir):] + relpath = path[1 + len(module_dir) :] # Skip all __pycache__ directories try: subdirs.remove('__pycache__') @@ -80,4 +120,44 @@ def test_for_init_files(self): + "\n\t".join(sorted(_NMD)) ) + @parameterized.expand(modules) + @unittest.pytest.mark.importtest + def test_module_import(self, module): + # We will go through the entire package and ensure that all the + # python modules are a least importable. This is important to + # be tested on the newest Python version (in part to catch + # deprecation warnings before they become fatal parse errors). + module_file = ( + os.path.join(PYOMO_ROOT_DIR, module.replace('.', os.path.sep)) + '.py' + ) + # we need to delete the .pyc file, because some things (like + # invalid docstrings) only toss the warning when the module is + # initially byte-compiled. + pyc = importlib.util.cache_from_source(module_file) + if os.path.isfile(pyc): + os.remove(pyc) + test_code = import_test % module + if _FAST_TEST: + # This is much faster, as it only reloads each module once + # (no subprocess, and no reloading and dependent modules). + # However, it will generate false positives when reimporting + # a single module creates side effects (this happens in some + # of the testing harness for auto-registered test cases) + from pyomo.common.fileutils import import_file + import warnings + + try: + _dep_warn = dependencies.SUPPRESS_DEPENDENCY_WARNINGS + dependencies.SUPPRESS_DEPENDENCY_WARNINGS = True + with warnings.catch_warnings(): + warnings.resetwarnings() + warnings.filterwarnings('error') + import_file(module_file, clear_cache=True) + except unittest.SkipTest as e: + # suppress the exception, but print the message + print(e) + finally: + dependencies.SUPPRESS_DEPENDENCY_WARNINGS = _dep_warn + else: + subprocess.run([sys.executable, '-Werror', '-c', test_code], check=True) diff --git a/pyomo/gdp/__init__.py b/pyomo/gdp/__init__.py index 8e39088bad4..6fc2d4b7351 100644 --- a/pyomo/gdp/__init__.py +++ b/pyomo/gdp/__init__.py @@ -13,6 +13,5 @@ # Do not import these files: importing them registers the transformation # plugins with the pyomo script so that they get automatically invoked. -#import pyomo.gdp.bigm -#import pyomo.gdp.hull - +# import pyomo.gdp.bigm +# import pyomo.gdp.hull diff --git a/pyomo/gdp/basic_step.py b/pyomo/gdp/basic_step.py index e44f94c6ed4..69313ac2b1b 100644 --- a/pyomo/gdp/basic_step.py +++ b/pyomo/gdp/basic_step.py @@ -17,19 +17,18 @@ from pyomo.gdp.disjunct import Disjunct, Disjunction import logging + logger = logging.getLogger('pyomo.gdp') def _clone_all_but_indicator_vars(self): """Clone everything in a Disjunct except for the indicator_vars""" - memo = { - '__block_scope__': {id(self): True, id(None): False}, - id(self.indicator_var): self.indicator_var, - id(self.binary_indicator_var): self.binary_indicator_var, - } - new_block = copy.deepcopy(self, memo) - new_block._parent = None - return new_block + return self.clone( + { + id(self.indicator_var): self.indicator_var, + id(self.binary_indicator_var): self.binary_indicator_var, + } + ) def _squish_singletons(tuple_iter): @@ -45,25 +44,41 @@ def apply_basic_step(disjunctions_or_constraints): # # Basic steps only apply to XOR'd disjunctions # - disjunctions = list(obj for obj in disjunctions_or_constraints - if obj.ctype == Disjunction) - constraints = list(obj for obj in disjunctions_or_constraints - if obj.ctype == Constraint) + disjunctions = list( + obj for obj in disjunctions_or_constraints if obj.ctype is Disjunction + ) + constraints = list( + obj for obj in disjunctions_or_constraints if obj.ctype is Constraint + ) + if len(disjunctions) + len(constraints) != len(disjunctions_or_constraints): + raise ValueError( + 'apply_basic_step only accepts a list containing ' + 'Disjunctions or Constraints' + ) + if not disjunctions: + raise ValueError( + 'apply_basic_step: argument list must contain at least one Disjunction' + ) for d in disjunctions: if not d.xor: raise ValueError( "Basic steps can only be applied to XOR'd disjunctions\n\t" - "(raised by disjunction %s)" % (d.name,)) + "(raised by disjunction %s)" % (d.name,) + ) if not d.active: - logger.warning("Warning: applying basic step to a previously " - "deactivated disjunction (%s)" % (d.name,)) + logger.warning( + "Warning: applying basic step to a previously " + "deactivated disjunction (%s)" % (d.name,) + ) ans = Block(concrete=True) ans.DISJUNCTIONS = Set(initialize=range(len(disjunctions))) ans.INDEX = Set( dimen=len(disjunctions), - initialize=_squish_singletons(itertools.product( - *tuple( range(len(d.disjuncts)) for d in disjunctions )))) + initialize=_squish_singletons( + itertools.product(*tuple(range(len(d.disjuncts)) for d in disjunctions)) + ), + ) # # Form the individual disjuncts for the new basic step @@ -76,14 +91,16 @@ def apply_basic_step(disjunctions_or_constraints): # ans.disjuncts[idx].src = Block(ans.DISJUNCTIONS) for i in ans.DISJUNCTIONS: - tmp = _clone_all_but_indicator_vars(disjunctions[i].disjuncts[ - idx[i] if isinstance(idx, tuple) else idx]) - for k,v in list(tmp.component_map().items()): + src_disj = disjunctions[i].disjuncts[ + idx[i] if isinstance(idx, tuple) else idx + ] + tmp = _clone_all_but_indicator_vars(src_disj) + for k, v in list(tmp.component_map().items()): if v.parent_block() is not tmp: # Skip indicator_var and binary_indicator_var continue tmp.del_component(k) - ans.disjuncts[idx].src[i].add_component(k,v) + ans.disjuncts[idx].src[i].add_component(k, v) # Copy in the constraints corresponding to the improper disjunctions ans.disjuncts[idx].improper_constraints = ConstraintList() for constr in constraints: @@ -111,15 +128,17 @@ def apply_basic_step(disjunctions_or_constraints): orig_var = disjunctions[i].disjuncts[j].indicator_var orig_binary_var = orig_var.get_associated_binary() ans.indicator_links.add( - orig_binary_var == - sum( ans.disjuncts[idx].binary_indicator_var - for idx in ans.INDEX - if (idx[i] if isinstance(idx, tuple) else idx) == j )) + orig_binary_var + == sum( + ans.disjuncts[idx].binary_indicator_var + for idx in ans.INDEX + if (idx[i] if isinstance(idx, tuple) else idx) == j + ) + ) # and throw on a Reference to original on the block for v in (orig_var, orig_binary_var): name_base = v.getname(fully_qualified=True) - ans.add_component(unique_component_name( ans, name_base), - Reference(v)) + ans.add_component(unique_component_name(ans, name_base), Reference(v)) # Form the new disjunction ans.disjunction = Disjunction(expr=[ans.disjuncts[i] for i in ans.INDEX]) @@ -137,14 +156,19 @@ def apply_basic_step(disjunctions_or_constraints): if __name__ == '__main__': from pyomo.environ import ConcreteModel, Constraint, Var + m = ConcreteModel() + def _d(d, i): d.x = Var(range(i)) d.silly = Constraint(expr=d.indicator_var == i) - m.d = Disjunct([1,2], rule=_d) + + m.d = Disjunct([1, 2], rule=_d) + def _e(e, i): - e.y = Var(range(2,i)) - m.e = Disjunct([3,4,5], rule=_e) + e.y = Var(range(2, i)) + + m.e = Disjunct([3, 4, 5], rule=_e) m.dd = Disjunction(expr=[m.d[1], m.d[2]]) m.ee = Disjunction(expr=[m.e[3], m.e[4], m.e[5]]) diff --git a/pyomo/gdp/disjunct.py b/pyomo/gdp/disjunct.py index 3203fc775d2..b95ce252536 100644 --- a/pyomo/gdp/disjunct.py +++ b/pyomo/gdp/disjunct.py @@ -16,24 +16,35 @@ from math import fabs from weakref import ref as weakref_ref -from pyomo.common.deprecation import RenamedClass, deprecation_warning +from pyomo.common.autoslots import AutoSlots +from pyomo.common.deprecation import deprecation_warning, RenamedClass from pyomo.common.errors import PyomoException from pyomo.common.log import is_debug_set +from pyomo.common.numeric_types import native_logical_types, native_types from pyomo.common.modeling import unique_component_name, NOTSET from pyomo.common.timing import ConstructionTimer from pyomo.core import ( - ModelComponentFactory, Binary, Block, ConstraintList, Any, - LogicalConstraintList, BooleanValue, ScalarBooleanVar, ScalarVar, - value) + ModelComponentFactory, + Binary, + Block, + ConstraintList, + Any, + LogicalConstraintList, + BooleanValue, + ScalarBooleanVar, + ScalarVar, + value, +) from pyomo.core.base.component import ( - ActiveComponent, ActiveComponentData, ComponentData + ActiveComponent, + ActiveComponentData, + ComponentData, ) from pyomo.core.base.global_set import UnindexedComponent_index -from pyomo.core.base.numvalue import native_types from pyomo.core.base.block import _BlockData from pyomo.core.base.misc import apply_indexed_rule from pyomo.core.base.indexed_component import ActiveIndexedComponent - +from pyomo.core.expr.expr_common import ExpressionType logger = logging.getLogger('pyomo.gdp') @@ -45,6 +56,7 @@ your rule. """ + class GDP_Error(PyomoException): """Exception raised while processing GDP Models""" @@ -63,6 +75,8 @@ class AutoLinkedBinaryVar(ScalarVar): INTEGER_TOLERANCE = 0.001 + __autoslot_mappers__ = {'_associated_boolean': AutoSlots.weakref_mapper} + def __init__(self, boolean_var=None): super().__init__(domain=Binary) self._associated_boolean = weakref_ref(boolean_var) @@ -83,7 +97,8 @@ def set_value(self, val, skip_validation=False, _propagate_value=True): bool_val = bool(int(val + 0.5)) # (Setting _propagate_value prevents infinite recursion.) self.get_associated_boolean().set_value( - bool_val, skip_validation, _propagate_value=False) + bool_val, skip_validation, _propagate_value=False + ) def fix(self, value=NOTSET, skip_validation=False): super().fix(value, skip_validation) @@ -97,17 +112,6 @@ def unfix(self): if bool_var.is_fixed(): bool_var.unfix() - def __getstate__(self): - state = super().__getstate__() - if self._associated_boolean is not None: - state['_associated_boolean'] = self._associated_boolean() - return state - - def __setstate__(self, state): - super().__setstate__(state) - if self._associated_boolean is not None: - self._associated_boolean = weakref_ref(self._associated_boolean) - class AutoLinkedBooleanVar(ScalarBooleanVar): """A Boolean variable implicitly linked to its equivalent binary variable. @@ -132,7 +136,7 @@ class AutoLinkedBooleanVar(ScalarBooleanVar): """ - def as_binary(self): + def as_numeric(self): """Return the binary variable associated with this Boolean variable. This method returns the associated binary variable along with a @@ -145,10 +149,14 @@ def as_binary(self): "binary variable is deprecated and will be removed. " "Either express constraints on indicator_var using " "LogicalConstraints or work with the associated binary " - "variable from indicator_var.get_associated_binary()" - % (self.name,), version='6.0') + "variable from indicator_var.get_associated_binary()" % (self.name,), + version='6.0', + ) return self.get_associated_binary() + def as_binary(self): + return self.as_numeric() + def set_value(self, val, skip_validation=False, _propagate_value=True): # super() does not work as expected for properties; we will call # the property setter explicitly. @@ -162,7 +170,8 @@ def set_value(self, val, skip_validation=False, _propagate_value=True): val = int(val) # (Setting _propagate_value prevents infinite recursion.) self.get_associated_binary().set_value( - val, skip_validation, _propagate_value=False) + val, skip_validation, _propagate_value=False + ) def fix(self, value=NOTSET, skip_validation=False): super().fix(value, skip_validation) @@ -178,113 +187,197 @@ def unfix(self): # # Duck-type the numeric expression API, but route the conversion to - # Binary through as_binary to generate the deprecation warning + # Binary through as_numeric to generate the deprecation warning # @property def bounds(self): - return self.as_binary().bounds + return self.as_numeric().bounds @bounds.setter def bounds(self, value): - self.as_binary().bounds = value + self.as_numeric().bounds = value @property def lb(self): - return self.as_binary().lb + return self.as_numeric().lb @lb.setter def lb(self, value): - self.as_binary().lb = value + self.as_numeric().lb = value @property def ub(self): - return self.as_binary().ub + return self.as_numeric().ub @ub.setter def ub(self, value): - self.as_binary().ub = value + self.as_numeric().ub = value def __abs__(self): - return self.as_binary().__abs__() + return self.as_numeric().__abs__() + def __float__(self): - return self.as_binary().__float__() + return self.as_numeric().__float__() + def __int__(self): - return self.as_binary().__int__() + return self.as_numeric().__int__() + def __neg__(self): - return self.as_binary().__neg__() + return self.as_numeric().__neg__() + def __bool__(self): - return self.as_binary().__bool__() + return self.as_numeric().__bool__() + def __pos__(self): - return self.as_binary().__pos__() + return self.as_numeric().__pos__() + def get_units(self): - return self.as_binary().get_units() + return self.as_numeric().get_units() + def has_lb(self): - return self.as_binary().has_lb() + return self.as_numeric().has_lb() + def has_ub(self): - return self.as_binary().has_ub() + return self.as_numeric().has_ub() + def is_binary(self): - return self.as_binary().is_binary() + return self.as_numeric().is_binary() + def is_continuous(self): - return self.as_binary().is_continuous() + return self.as_numeric().is_continuous() + def is_integer(self): - return self.as_binary().is_integer() + return self.as_numeric().is_integer() + def polynomial_degree(self): - return self.as_binary().polynomial_degree() + return self.as_numeric().polynomial_degree() def __le__(self, arg): - return self.as_binary().__le__(arg) + return self.as_numeric().__le__(arg) + def __lt__(self, arg): - return self.as_binary().__lt__(arg) + return self.as_numeric().__lt__(arg) + def __ge__(self, arg): - return self.as_binary().__ge__(arg) + return self.as_numeric().__ge__(arg) + def __gt__(self, arg): - return self.as_binary().__gt__(arg) + return self.as_numeric().__gt__(arg) + def __eq__(self, arg): - return self.as_binary().__eq__(arg) + # If the other operand is a Boolean, then we want to fall back + # on the "normal" implementation of __eq__ for Boolean values + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return super().__eq__(arg) + # Otherwise, we will treat this as a binary operand and use the + # (numeric) relational expression system + return self.as_numeric().__eq__(arg) + def __ne__(self, arg): - return self.as_binary().__ne__(arg) + # If the other operand is a Boolean, then we want to fall back + # on the "normal" implementation of __ne__ for Boolean values + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return super().__ne__(arg) + # Otherwise, we will treat this as a binary operand and use the + # (numeric) relational expression system + return self.as_numeric().__ne__(arg) def __add__(self, arg): - return self.as_binary().__add__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__add__(arg) + def __div__(self, arg): - return self.as_binary().__div__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__div__(arg) + def __mul__(self, arg): - return self.as_binary().__mul__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__mul__(arg) + def __pow__(self, arg): - return self.as_binary().__pow__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__pow__(arg) + def __sub__(self, arg): - return self.as_binary().__sub__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__sub__(arg) + def __truediv__(self, arg): - return self.as_binary().__truediv__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__truediv__(arg) + def __iadd__(self, arg): - return self.as_binary().__iadd__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__iadd__(arg) + def __idiv__(self, arg): - return self.as_binary().__idiv__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__idiv__(arg) + def __imul__(self, arg): - return self.as_binary().__imul__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__imul__(arg) + def __ipow__(self, arg): - return self.as_binary().__ipow__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__ipow__(arg) + def __isub__(self, arg): - return self.as_binary().__isub__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__isub__(arg) + def __itruediv__(self, arg): - return self.as_binary().__itruediv__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__itruediv__(arg) + def __radd__(self, arg): - return self.as_binary().__radd__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__radd__(arg) + def __rdiv__(self, arg): - return self.as_binary().__rdiv__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__rdiv__(arg) + def __rmul__(self, arg): - return self.as_binary().__rmul__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__rmul__(arg) + def __rpow__(self, arg): - return self.as_binary().__rpow__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__rpow__(arg) + def __rsub__(self, arg): - return self.as_binary().__rsub__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__rsub__(arg) + def __rtruediv__(self, arg): - return self.as_binary().__rtruediv__(arg) + if isinstance(arg, BooleanValue) or arg.__class__ in native_logical_types: + return NotImplemented + return self.as_numeric().__rtruediv__(arg) + def setlb(self, arg): - return self.as_binary().setlb(arg) + return self.as_numeric().setlb(arg) + def setub(self, arg): - return self.as_binary().setub(arg) + return self.as_numeric().setub(arg) # The following should eventually be promoted so that all @@ -320,12 +413,15 @@ def process(arg): class _DisjunctData(_BlockData): + __autoslot_mappers__ = {'_transformation_block': AutoSlots.weakref_mapper} _Block_reserved_words = set() @property def transformation_block(self): - return self._transformation_block + return ( + None if self._transformation_block is None else self._transformation_block() + ) def __init__(self, component): _BlockData.__init__(self, component) @@ -354,7 +450,6 @@ def _activate_without_unfixing_indicator(self): @ModelComponentFactory.register("Disjunctive blocks.") class Disjunct(Block): - _ComponentDataClass = _DisjunctData def __new__(cls, *args, **kwds): @@ -377,10 +472,10 @@ def __init__(self, *args, **kwargs): # For the time being, this method is not needed. # - #def _deactivate_without_fixing_indicator(self): + # def _deactivate_without_fixing_indicator(self): # # Ideally, this would be a super call from this class. However, # # doing that would trigger a call to deactivate() on all the - # # _DisjunctData objects (exactly what we want to aviod!) + # # _DisjunctData objects (exactly what we want to avoid!) # # # # For the time being, we will do something bad and directly call # # the base class method from where we would otherwise want to @@ -389,7 +484,7 @@ def __init__(self, *args, **kwargs): def _activate_without_unfixing_indicator(self): # Ideally, this would be a super call from this class. However, # doing that would trigger a call to deactivate() on all the - # _DisjunctData objects (exactly what we want to aviod!) + # _DisjunctData objects (exactly what we want to avoid!) # # For the time being, we will do something bad and directly call # the base class method from where we would otherwise want to @@ -401,7 +496,6 @@ def _activate_without_unfixing_indicator(self): class ScalarDisjunct(_DisjunctData, Disjunct): - def __init__(self, *args, **kwds): ## FIXME: This is a HACK to get around a chicken-and-egg issue ## where _BlockData creates the indicator_var *before* @@ -434,12 +528,15 @@ def active(self): class _DisjunctionData(ActiveComponentData): - __slots__ = ('disjuncts','xor', '_algebraic_constraint') + __slots__ = ('disjuncts', 'xor', '_algebraic_constraint', '_transformation_map') + __autoslot_mappers__ = {'_algebraic_constraint': AutoSlots.weakref_mapper} _NoArgument = (0,) @property def algebraic_constraint(self): - return self._algebraic_constraint + return ( + None if self._algebraic_constraint is None else self._algebraic_constraint() + ) def __init__(self, component=None): # @@ -448,8 +545,7 @@ def __init__(self, component=None): # - _ConstraintData, # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._active = True self.disjuncts = [] @@ -457,15 +553,8 @@ def __init__(self, component=None): # pointer to XOR (or OR) constraint if this disjunction has been # transformed. None if it has not been transformed self._algebraic_constraint = None - - def __getstate__(self): - """ - This method must be defined because this class uses slots. - """ - result = super(_DisjunctionData, self).__getstate__() - for i in _DisjunctionData.__slots__: - result[i] = getattr(self, i) - return result + # Dictionary to notate information from partial transformations + self._transformation_map = {} def set_value(self, expr): for e in expr: @@ -476,7 +565,6 @@ def set_value(self, expr): # the new Disjuncts are Blocks already. This catches them for who # they are anyway. if isinstance(e, _DisjunctData): - #if hasattr(e, 'type') and e.ctype == Disjunct: self.disjuncts.append(e) continue # The user was lazy and gave us a single constraint @@ -488,33 +576,19 @@ def set_value(self, expr): e_iter = [e] for _tmpe in e_iter: try: - isexpr = _tmpe.is_expression_type() - except AttributeError: - isexpr = False - if not isexpr or not _tmpe.is_relational(): - try: - isvar = _tmpe.is_variable_type() - except AttributeError: - isvar = False - if isvar and _tmpe.is_relational(): - expressions.append(_tmpe) - continue - try: - isbool = _tmpe.is_logical_type() - except AttributeError: - isbool = False - if isbool: + if _tmpe.is_expression_type(): expressions.append(_tmpe) continue - msg = "\n\tin %s" % (type(e),) if e_iter is e else "" - raise ValueError( - "Unexpected term for Disjunction %s.\n" - "\tExpected a Disjunct object, relational expression, " - "or iterable of\n" - "\trelational expressions but got %s%s" - % (self.name, type(_tmpe), msg) ) - else: - expressions.append(_tmpe) + except AttributeError: + pass + msg = "\n\tin %s" % (type(e),) if e_iter is e else "" + raise ValueError( + "Unexpected term for Disjunction %s.\n" + "\tExpected a Disjunct object, relational expression, " + "or iterable of\n" + "\trelational expressions but got %s%s" + % (self.name, type(_tmpe), msg) + ) comp = self.parent_component() if comp._autodisjuncts is None: @@ -522,7 +596,8 @@ def set_value(self, expr): comp._autodisjuncts = Disjunct(Any) b.add_component( unique_component_name(b, comp.local_name + "_disjuncts"), - comp._autodisjuncts ) + comp._autodisjuncts, + ) # TODO: I am not at all sure why we need to # explicitly construct this block - that should # happen automatically. @@ -531,10 +606,16 @@ def set_value(self, expr): disjunct.constraint = c = ConstraintList() disjunct.propositions = p = LogicalConstraintList() for e in expressions: - if isinstance(e, BooleanValue): + if e.is_expression_type(ExpressionType.RELATIONAL): + c.add(e) + elif e.is_expression_type(ExpressionType.LOGICAL): p.add(e) else: - c.add(e) + raise RuntimeError( + "Unsupported expression type on Disjunct " + f"{disjunct.name}: expected either relational or " + f"logical expression, found {e.__class__.__name__}" + ) self.disjuncts.append(disjunct) @@ -555,14 +636,13 @@ def __init__(self, *args, **kwargs): self._init_expr = kwargs.pop('expr', None) self._init_xor = _Initializer.process(kwargs.pop('xor', True)) self._autodisjuncts = None - self._algebraic_constraint = None kwargs.setdefault('ctype', Disjunction) super(Disjunction, self).__init__(*args, **kwargs) if self._init_expr is not None and self._init_rule is not None: raise ValueError( - "Cannot specify both rule= and expr= for Disjunction %s" - % ( self.name, )) + "Cannot specify both rule= and expr= for Disjunction %s" % (self.name,) + ) # # TODO: Ideally we would not override these methods and instead add @@ -586,37 +666,38 @@ def _setitem_when_not_present(self, index, value): return None else: ans = super(Disjunction, self)._setitem_when_not_present( - index=index, value=value) + index=index, value=value + ) self._initialize_members((index,)) return ans def _initialize_members(self, init_set): - if self._init_xor[0] == _Initializer.value: # POD data + if self._init_xor[0] == _Initializer.value: # POD data val = self._init_xor[1] for key in init_set: self._data[key].xor = val - elif self._init_xor[0] == _Initializer.deferred_value: # Param data - val = bool(value( self._init_xor[1] )) + elif self._init_xor[0] == _Initializer.deferred_value: # Param data + val = bool(value(self._init_xor[1])) for key in init_set: self._data[key].xor = val - elif self._init_xor[0] == _Initializer.function: # rule + elif self._init_xor[0] == _Initializer.function: # rule fcn = self._init_xor[1] for key in init_set: - self._data[key].xor = bool(value(apply_indexed_rule( - self, fcn, self._parent(), key))) - elif self._init_xor[0] == _Initializer.dict_like: # dict-like thing + self._data[key].xor = bool( + value(apply_indexed_rule(self, fcn, self._parent(), key)) + ) + elif self._init_xor[0] == _Initializer.dict_like: # dict-like thing val = self._init_xor[1] for key in init_set: self._data[key].xor = bool(value(val[key])) def construct(self, data=None): if is_debug_set(logger): - logger.debug("Constructing disjunction %s" - % (self.name)) + logger.debug("Constructing disjunction %s" % (self.name)) if self._constructed: return timer = ConstructionTimer(self) - self._constructed=True + self._constructed = True _self_parent = self.parent_block() if not self.is_indexed(): @@ -629,38 +710,33 @@ def construct(self, data=None): return if expr is None: - raise ValueError( _rule_returned_none_error % (self.name,) ) + raise ValueError(_rule_returned_none_error % (self.name,)) if expr is Disjunction.Skip: timer.report() return self._data[None] = self - self._setitem_when_not_present( None, expr ) + self._setitem_when_not_present(None, expr) elif self._init_expr is not None: raise IndexError( "Disjunction '%s': Cannot initialize multiple indices " - "of a disjunction with a single disjunction list" % - (self.name,) ) + "of a disjunction with a single disjunction list" % (self.name,) + ) elif self._init_rule is not None: _init_rule = self._init_rule for ndx in self._index_set: try: - expr = apply_indexed_rule(self, - _init_rule, - _self_parent, - ndx) + expr = apply_indexed_rule(self, _init_rule, _self_parent, ndx) except Exception: err = sys.exc_info()[1] logger.error( "Rule failed when generating expression for " "disjunction %s with index %s:\n%s: %s" - % (self.name, - str(ndx), - type(err).__name__, - err)) + % (self.name, str(ndx), type(err).__name__, err) + ) raise if expr is None: _name = "%s[%s]" % (self.name, str(ndx)) - raise ValueError( _rule_returned_none_error % (_name,) ) + raise ValueError(_rule_returned_none_error % (_name,)) if expr is Disjunction.Skip: continue self._setitem_when_not_present(ndx, expr) @@ -671,18 +747,18 @@ def _pprint(self): Return data that will be printed for this component. """ return ( - [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ("Active", self.active), - ], + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ("Active", self.active), + ], self.items(), - ( "Disjuncts", "Active", "XOR" ), - lambda k, v: [ [x.name for x in v.disjuncts], v.active, v.xor] - ) + ("Disjuncts", "Active", "XOR"), + lambda k, v: [[x.name for x in v.disjuncts], v.active, v.xor], + ) class ScalarDisjunction(_DisjunctionData, Disjunction): - def __init__(self, *args, **kwds): _DisjunctionData.__init__(self, component=self) Disjunction.__init__(self, *args, **kwds) @@ -705,8 +781,8 @@ def set_value(self, expr): raise ValueError( "Setting the value of disjunction '%s' " "before the Disjunction has been constructed (there " - "is currently no object to set)." - % (self.name)) + "is currently no object to set)." % (self.name) + ) if len(self._data) == 0: self._data[None] = self diff --git a/pyomo/gdp/plugins/__init__.py b/pyomo/gdp/plugins/__init__.py index 6d8ba98d5b8..6f1f3e5d6c0 100644 --- a/pyomo/gdp/plugins/__init__.py +++ b/pyomo/gdp/plugins/__init__.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.gdp.plugins.bigm import pyomo.gdp.plugins.hull @@ -18,3 +19,5 @@ def load(): import pyomo.gdp.plugins.fix_disjuncts import pyomo.gdp.plugins.partition_disjuncts import pyomo.gdp.plugins.between_steps + import pyomo.gdp.plugins.multiple_bigm + import pyomo.gdp.plugins.bound_pretransformation diff --git a/pyomo/gdp/plugins/between_steps.py b/pyomo/gdp/plugins/between_steps.py index 79e27946268..fad783d595d 100644 --- a/pyomo/gdp/plugins/between_steps.py +++ b/pyomo/gdp/plugins/between_steps.py @@ -17,17 +17,20 @@ """ from pyomo.core import Transformation, TransformationFactory -@TransformationFactory.register('gdp.between_steps', - doc="Reformulates a convex disjunctive model " - "by splitting additively separable constraints" - "on P sets of variables, then taking hull " - "reformulation.") + +@TransformationFactory.register( + 'gdp.between_steps', + doc="Reformulates a convex disjunctive model " + "by splitting additively separable constraints" + "on P sets of variables, then taking hull " + "reformulation.", +) class BetweenSteps_Transformation(Transformation): """ Transform disjunctive model to equivalent MI(N)LP using the between steps transformation from Konqvist et al. 2021 [1]. - This transformation first calls the 'gdp.partition_disjuncts' + This transformation first calls the 'gdp.partition_disjuncts' transformation, resulting in an equivalent GDP with the constraints partitioned, and then takes the hull reformulation of that model to get an algebraic model. @@ -37,10 +40,10 @@ class BetweenSteps_Transformation(Transformation): [1] J. Kronqvist, R. Misener, and C. Tsay, "Between Steps: Intermediate Relaxations between big-M and Convex Hull Reformulations," 2021. """ + def __init__(self): super(BetweenSteps_Transformation, self).__init__() def _apply_to(self, instance, **kwds): - TransformationFactory('gdp.partition_disjuncts').apply_to(instance, - **kwds) + TransformationFactory('gdp.partition_disjuncts').apply_to(instance, **kwds) TransformationFactory('gdp.hull').apply_to(instance) diff --git a/pyomo/gdp/plugins/bigm.py b/pyomo/gdp/plugins/bigm.py index b53eb561af3..bb731363898 100644 --- a/pyomo/gdp/plugins/bigm.py +++ b/pyomo/gdp/plugins/bigm.py @@ -13,38 +13,54 @@ import logging -from pyomo.common.collections import ComponentMap, ComponentSet -from pyomo.common.config import ConfigBlock, ConfigValue -from pyomo.common.log import is_debug_set +from pyomo.common.collections import ComponentMap +from pyomo.common.config import ConfigDict, ConfigValue from pyomo.common.modeling import unique_component_name from pyomo.common.deprecation import deprecated, deprecation_warning -from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +from pyomo.contrib.cp.transform.logical_to_disjunctive_program import ( + LogicalToDisjunctive, +) from pyomo.core import ( - Block, BooleanVar, Connector, Constraint, Param, Set, SetOf, Suffix, Var, - Expression, SortComponents, TraversalStrategy, value, RangeSet, - NonNegativeIntegers, Binary, ) -from pyomo.core.base.boolean_var import ( - _DeprecatedImplicitAssociatedBinaryVariable) -from pyomo.core.base.external import ExternalFunction -from pyomo.core.base import Transformation, TransformationFactory, Reference -import pyomo.core.expr.current as EXPR + Block, + BooleanVar, + Connector, + Constraint, + Param, + Set, + SetOf, + Var, + Expression, + SortComponents, + TraversalStrategy, + value, + RangeSet, + NonNegativeIntegers, + Binary, + Any, +) +from pyomo.core.base import TransformationFactory, Reference +import pyomo.core.expr as EXPR from pyomo.gdp import Disjunct, Disjunction, GDP_Error -from pyomo.gdp.util import ( - is_child_of, get_src_disjunction, get_src_constraint, - get_transformed_constraints, _get_constraint_transBlock, get_src_disjunct, - _warn_for_active_disjunction, _warn_for_active_disjunct, preprocess_targets, - _to_dict) +from pyomo.gdp.plugins.bigm_mixin import ( + _BigM_MixIn, + _get_bigM_suffix_list, + _warn_for_unused_bigM_args, +) +from pyomo.gdp.plugins.gdp_to_mip_transformation import GDP_to_MIP_Transformation +from pyomo.gdp.transformed_disjunct import _TransformedDisjunct +from pyomo.gdp.util import is_child_of, _get_constraint_transBlock, _to_dict from pyomo.core.util import target_list from pyomo.network import Port from pyomo.repn import generate_standard_repn -from functools import wraps from weakref import ref as weakref_ref, ReferenceType logger = logging.getLogger('pyomo.gdp.bigm') -@TransformationFactory.register('gdp.bigm', doc="Relax disjunctive model using " - "big-M terms.") -class BigM_Transformation(Transformation): + +@TransformationFactory.register( + 'gdp.bigm', doc="Relax disjunctive model using big-M terms." +) +class BigM_Transformation(GDP_to_MIP_Transformation, _BigM_MixIn): """Relax disjunctive model using big-M terms. Relaxes a disjunctive model into an algebraic model by adding Big-M @@ -87,40 +103,48 @@ class BigM_Transformation(Transformation): All transformed Disjuncts will have a pointer to the block their transformed constraints are on, and all transformed Disjunctions will have a - pointer to the corresponding OR or XOR constraint. + pointer to the corresponding 'Or' or 'ExactlyOne' constraint. """ - CONFIG = ConfigBlock("gdp.bigm") - CONFIG.declare('targets', ConfigValue( - default=None, - domain=target_list, - description="target or list of targets that will be relaxed", - doc=""" + CONFIG = ConfigDict("gdp.bigm") + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be relaxed", + doc=""" This specifies the list of components to relax. If None (default), the entire model is transformed. Note that if the transformation is done out of place, the list of targets should be attached to the model before it is cloned, and the list will specify the targets on the cloned - instance.""" - )) - CONFIG.declare('bigM', ConfigValue( - default=None, - domain=_to_dict, - description="Big-M value used for constraint relaxation", - doc=""" + instance.""", + ), + ) + CONFIG.declare( + 'bigM', + ConfigValue( + default=None, + domain=_to_dict, + description="Big-M value used for constraint relaxation", + doc=""" A user-specified value, dict, or ComponentMap of M values that override M-values found through model Suffixes or that would otherwise be - calculated using variable domains.""" - )) - CONFIG.declare('assume_fixed_vars_permanent', ConfigValue( - default=False, - domain=bool, - description="Boolean indicating whether or not to transform so that " - "the transformed model will still be valid when fixed Vars are " - "unfixed.", - doc=""" + calculated using variable domains.""", + ), + ) + CONFIG.declare( + 'assume_fixed_vars_permanent', + ConfigValue( + default=False, + domain=bool, + description="Boolean indicating whether or not to transform so that " + "the transformed model will still be valid when fixed Vars are " + "unfixed.", + doc=""" This is only relevant when the transformation will be estimating values for M. If True, the transformation will calculate M values assuming that fixed variables will always be fixed to their current values. This means @@ -130,287 +154,74 @@ class BigM_Transformation(Transformation): future and will use their bounds to calculate the M value rather than their value. Note that this could make for a weaker LP relaxation while the variables remain fixed. - """ - )) + """, + ), + ) + transformation_name = 'bigm' def __init__(self): - """Initialize transformation object.""" - super(BigM_Transformation, self).__init__() - self.handlers = { - Constraint: self._transform_constraint, - Var: False, # Note that if a Var appears on a Disjunct, we - # still treat its bounds as global. If the - # intent is for its bounds to be on the - # disjunct, it should be declared with no bounds - # and the bounds should be set in constraints on - # the Disjunct. - BooleanVar: False, - Connector: False, - Expression: False, - Suffix: False, - Param: False, - Set: False, - SetOf: False, - RangeSet: False, - Disjunction: self._warn_for_active_disjunction, - Disjunct: self._warn_for_active_disjunct, - Block: self._transform_block_on_disjunct, - ExternalFunction: False, - Port: False, # not Arcs, because those are deactivated after - # the network.expand_arcs transformation - } - self._generate_debug_messages = False - - def _get_bigm_suffix_list(self, block, stopping_block=None): - # Note that you can only specify suffixes on BlockData objects or - # ScalarBlocks. Though it is possible at this point to stick them - # on whatever components you want, we won't pick them up. - suffix_list = [] - - # go searching above block in the tree, stop when we hit stopping_block - # (This is so that we can search on each Disjunct once, but get any - # information between a constraint and its Disjunct while transforming - # the constraint). - while block is not stopping_block: - bigm = block.component('BigM') - if type(bigm) is Suffix: - suffix_list.append(bigm) - block = block.parent_block() - - return suffix_list - - def _get_bigm_arg_list(self, bigm_args, block): - # Gather what we know about blocks from args exactly once. We'll still - # check for constraints in the moment, but if that fails, we've - # preprocessed the time-consuming part of traversing up the tree. - arg_list = [] - if bigm_args is None: - return arg_list - while block is not None: - if block in bigm_args: - arg_list.append({block: bigm_args[block]}) - block = block.parent_block() - return arg_list + super().__init__(logger) def _apply_to(self, instance, **kwds): - self._generate_debug_messages = is_debug_set(logger) - self.used_args = ComponentMap() # If everything was sure to go well, - # this could be a dictionary. But if - # someone messes up and gives us a Var - # as a key in bigMargs, I need the error - # not to be when I try to put it into - # this map! + self.used_args = ComponentMap() # If everything was sure to go well, + # this could be a dictionary. But if + # someone messes up and gives us a Var + # as a key in bigMargs, I need the error + # not to be when I try to put it into + # this map! try: self._apply_to_impl(instance, **kwds) finally: - # same for our bookkeeping about what we used from bigM arg dict + self._restore_state() self.used_args.clear() def _apply_to_impl(self, instance, **kwds): - if not instance.ctype in (Block, Disjunct): - raise GDP_Error("Transformation called on %s of type %s. " - "'instance' must be a ConcreteModel, Block, or " - "Disjunct (in the case of nested disjunctions)." % - (instance.name, instance.ctype)) - - config = self.CONFIG(kwds.pop('options', {})) - - # We will let args override suffixes and estimate as a last - # resort. More specific args/suffixes override ones anywhere in - # the tree. Suffixes lower down in the tree override ones higher - # up. - config.set_value(kwds) - bigM = config.bigM - self.assume_fixed_vars_permanent = config.assume_fixed_vars_permanent - - targets = config.targets - # We need to check that all the targets are in fact on instance. As we - # do this, we will use the set below to cache components we know to be - # in the tree rooted at instance. - knownBlocks = {} - if targets is None: - targets = (instance, ) - - # FIXME: For historical reasons, BigM would silently skip - # any targets that were explicitly deactivated. This - # preserves that behavior (although adds a warning). We - # should revisit that design decision and probably remove - # this filter, as it is slightly ambiguous as to what it - # means for the target to be deactivated: is it just the - # target itself [historical implementation] or any block in - # the hierarchy? - def _filter_inactive(targets): - for t in targets: - if not t.active: - logger.warning( - 'GDP.BigM transformation passed a deactivated ' - f'target ({t.name}). Skipping.') - else: - yield t - targets = list(_filter_inactive(targets)) + self._process_arguments(instance, **kwds) + # filter out inactive targets and handle case where targets aren't + # specified. + targets = self._filter_targets(instance) + # transform logical constraints based on targets + self._transform_logical_constraints(instance, targets) # we need to preprocess targets to make sure that if there are any # disjunctions in targets that their disjuncts appear before them in # the list. - preprocessed_targets = preprocess_targets(targets, instance, - knownBlocks) - - # transform any logical constraints that might be anywhere on the stuff - # we're about to transform. - TransformationFactory('core.logical_to_linear').apply_to( - instance, - targets=[blk for blk in targets if blk.ctype is Block] + - [disj for disj in preprocessed_targets if disj.ctype is Disjunct]) + gdp_tree = self._get_gdp_tree_from_targets(instance, targets) + preprocessed_targets = gdp_tree.reverse_topological_sort() + bigM = self._config.bigM for t in preprocessed_targets: if t.ctype is Disjunction: - if t.is_indexed(): - self._transform_disjunction(t, bigM) - else: - self._transform_disjunctionData( t, bigM, t.index()) - else:# We know t.ctype in (Block, Disjunct) after preprocessing - if t.is_indexed(): - self._transform_block(t, bigM) - else: - self._transform_blockData(t, bigM) + self._transform_disjunctionData( + t, + t.index(), + parent_disjunct=gdp_tree.parent(t), + root_disjunct=gdp_tree.root_disjunct(t), + ) + else: # We know t is a Disjunct after preprocessing + self._transform_disjunct( + t, bigM, root_disjunct=gdp_tree.root_disjunct(t) + ) # issue warnings about anything that was in the bigM args dict that we # didn't use - if bigM is not None: - unused_args = ComponentSet(bigM.keys()) - \ - ComponentSet(self.used_args.keys()) - if len(unused_args) > 0: - warning_msg = ("Unused arguments in the bigM map! " - "These arguments were not used by the " - "transformation:\n") - for component in unused_args: - if hasattr(component, 'name'): - warning_msg += "\t%s\n" % component.name - else: - warning_msg += "\t%s\n" % component - logger.warning(warning_msg) - - def _add_transformation_block(self, instance): - # make a transformation block on instance to put transformed disjuncts - # on - transBlockName = unique_component_name( - instance, - '_pyomo_gdp_bigm_reformulation') - transBlock = Block() - instance.add_component(transBlockName, transBlock) - transBlock.relaxedDisjuncts = Block(NonNegativeIntegers) - transBlock.lbub = Set(initialize=['lb', 'ub']) - - return transBlock - - def _transform_block(self, obj, bigM): - for i in sorted(obj.keys()): - self._transform_blockData(obj[i], bigM) - - def _transform_blockData(self, obj, bigM): - # Transform every (active) disjunction in the block - for disjunction in obj.component_objects( - Disjunction, - active=True, - sort=SortComponents.deterministic, - descend_into=(Block, Disjunct), - descent_order=TraversalStrategy.PostfixDFS): - self._transform_disjunction(disjunction, bigM) - - def _add_xor_constraint(self, disjunction, transBlock): - # Put the disjunction constraint on the transformation block and - # determine whether it is an OR or XOR constraint. - - # We never do this for just a DisjunctionData because we need to know - # about the index set of its parent component (so that we can make the - # index of this constraint match). So if we called this on a - # DisjunctionData, we did something wrong. - assert isinstance(disjunction, Disjunction) - - # first check if the constraint already exists - if disjunction._algebraic_constraint is not None: - return disjunction._algebraic_constraint() - - # add the XOR (or OR) constraints to parent block (with unique name) - # It's indexed if this is an IndexedDisjunction, not otherwise - orC = Constraint(disjunction.index_set()) if \ - disjunction.is_indexed() else Constraint() - # The name used to indicate if there were OR or XOR disjunctions, - # however now that Disjunctions are allowed to mix the state we - # can no longer make that distinction in the name. - # nm = '_xor' if xor else '_or' - nm = '_xor' - orCname = unique_component_name( transBlock, disjunction.getname( - fully_qualified=True) + nm) - transBlock.add_component(orCname, orC) - disjunction._algebraic_constraint = weakref_ref(orC) - - return orC - - def _transform_disjunction(self, obj, bigM): - if not obj.active: - return - - # if this is an IndexedDisjunction we have seen in a prior call to the - # transformation, we already have a transformation block for it. We'll - # use that. - if obj._algebraic_constraint is not None: - transBlock = obj._algebraic_constraint().parent_block() - else: - transBlock = self._add_transformation_block(obj.parent_block()) - - # relax each of the disjunctionDatas - for i in sorted(obj.keys()): - self._transform_disjunctionData(obj[i], bigM, i, transBlock) - - # deactivate so the writers don't scream - obj.deactivate() + _warn_for_unused_bigM_args(bigM, self.used_args, logger) - def _transform_disjunctionData(self, obj, bigM, index, transBlock=None): - if not obj.active: - return # Do not process a deactivated disjunction - # We won't have these arguments if this got called straight from - # targets. But else, we created them earlier, and have just been passing - # them through. - if transBlock is None: - # It's possible that we have already created a transformation block - # for another disjunctionData from this same container. If that's - # the case, let's use the same transformation block. (Else it will - # be really confusing that the XOR constraint goes to that old block - # but we create a new one here.) - if obj.parent_component()._algebraic_constraint is not None: - transBlock = obj.parent_component()._algebraic_constraint().\ - parent_block() - else: - transBlock = self._add_transformation_block(obj.parent_block()) - # create or fetch the xor constraint - xorConstraint = self._add_xor_constraint(obj.parent_component(), - transBlock) - - xor = obj.xor - or_expr = 0 - # Just because it's unlikely this is what someone meant to do... - if len(obj.disjuncts) == 0: - raise GDP_Error("Disjunction '%s' is empty. This is " - "likely indicative of a modeling error." % - obj.getname(fully_qualified=True)) - for disjunct in obj.disjuncts: - or_expr += disjunct.binary_indicator_var - # make suffix list. (We don't need it until we are - # transforming constraints, but it gets created at the - # disjunct level, so more efficient to make it here and - # pass it down.) - suffix_list = self._get_bigm_suffix_list(disjunct) - arg_list = self._get_bigm_arg_list(bigM, disjunct) - # relax the disjunct - self._transform_disjunct(disjunct, transBlock, bigM, arg_list, - suffix_list) + def _transform_disjunctionData( + self, obj, index, parent_disjunct=None, root_disjunct=None + ): + (transBlock, xorConstraint) = self._setup_transform_disjunctionData( + obj, root_disjunct + ) # add or (or xor) constraint - if xor: - xorConstraint[index] = or_expr == 1 + or_expr = sum(disjunct.binary_indicator_var for disjunct in obj.disjuncts) + + rhs = 1 if parent_disjunct is None else parent_disjunct.binary_indicator_var + if obj.xor: + xorConstraint[index] = or_expr == rhs else: - xorConstraint[index] = or_expr >= 1 + xorConstraint[index] = or_expr >= rhs # Mark the DisjunctionData as transformed by mapping it to its XOR # constraint. obj._algebraic_constraint = weakref_ref(xorConstraint[index]) @@ -418,39 +229,18 @@ def _transform_disjunctionData(self, obj, bigM, index, transBlock=None): # and deactivate for the writers obj.deactivate() - def _transform_disjunct(self, obj, transBlock, bigM, arg_list, suffix_list): - # deactivated -> either we've already transformed or user deactivated - if not obj.active: - if obj.indicator_var.is_fixed(): - if not value(obj.indicator_var): - # The user cleanly deactivated the disjunct: there - # is nothing for us to do here. - return - else: - raise GDP_Error( - "The disjunct '%s' is deactivated, but the " - "indicator_var is fixed to %s. This makes no sense." - % ( obj.name, value(obj.indicator_var) )) - if obj._transformation_block is None: - raise GDP_Error( - "The disjunct '%s' is deactivated, but the " - "indicator_var is not fixed and the disjunct does not " - "appear to have been relaxed. This makes no sense. " - "(If the intent is to deactivate the disjunct, fix its " - "indicator_var to False.)" - % ( obj.name, )) - - if obj._transformation_block is not None: - # we've transformed it, which means this is the second time it's - # appearing in a Disjunction - raise GDP_Error( - "The disjunct '%s' has been transformed, but a disjunction " - "it appears in has not. Putting the same disjunct in " - "multiple disjunctions is not supported." % obj.name) + def _transform_disjunct(self, obj, bigM, root_disjunct): + root = ( + root_disjunct.parent_block() + if root_disjunct is not None + else obj.parent_block() + ) + transBlock = self._add_transformation_block(root)[0] + suffix_list = _get_bigM_suffix_list(obj) + arg_list = self._get_bigM_arg_list(bigM, obj) + + relaxationBlock = self._get_disjunct_transformation_block(obj, transBlock) - # add reference to original disjunct on transformation block - relaxedDisjuncts = transBlock.relaxedDisjuncts - relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)] # we will keep a map of constraints (hashable, ha!) to a tuple to # indicate what their M value is and where it came from, of the form: # ((lower_value, lower_source, lower_key), (upper_value, upper_source, @@ -462,9 +252,6 @@ def _transform_disjunct(self, obj, transBlock, bigM, arg_list, suffix_list): # user-specified and the other is not, hence the need to store # information for both.) relaxationBlock.bigm_src = {} - relaxationBlock.localVarReferences = Block() - obj._transformation_block = weakref_ref(relaxationBlock) - relaxationBlock._srcDisjunct = weakref_ref(obj) # This is crazy, but if the disjunction has been previously # relaxed, the disjunct *could* be deactivated. This is a big @@ -480,183 +267,21 @@ def _transform_disjunct(self, obj, transBlock, bigM, arg_list, suffix_list): # deactivate disjunct to keep the writers happy obj._deactivate_without_fixing_indicator() - def _transform_block_components(self, block, disjunct, bigM, arg_list, - suffix_list): - # We find any transformed disjunctions that might be here because we - # need to move their transformation blocks up onto the parent block - # before we transform anything else on this block. Note that we do this - # before we create references to local variables because we do not want - # duplicate references to indicator variables and local variables on - # nested disjuncts. - disjunctBlock = disjunct._transformation_block() - destinationBlock = disjunctBlock.parent_block() - for obj in block.component_data_objects( - Disjunction, - sort=SortComponents.deterministic, - descend_into=(Block)): - if obj.algebraic_constraint is None: - # This could be bad if it's active since that means its - # untransformed, but we'll wait to yell until the next loop - continue - # get this disjunction's relaxation block. - transBlock = obj.algebraic_constraint().parent_block() - - # move transBlock up to parent component - self._transfer_transBlock_data(transBlock, destinationBlock) - # we leave the transformation block because it still has the XOR - # constraints, which we want to be on the parent disjunct. - - # We don't know where all the BooleanVars are used, so if there are any - # that the above transformation didn't transform, we need to do it now, - # so that the Reference gets moved up. This won't be necessary when the - # writers are willing to find Vars not in the active subtree. - for boolean in block.component_data_objects(BooleanVar, - descend_into=Block, - active=None): - if isinstance(boolean._associated_binary, - _DeprecatedImplicitAssociatedBinaryVariable): - parent_block = boolean.parent_block() - new_var = Var(domain=Binary) - parent_block.add_component( - unique_component_name(parent_block, - boolean.local_name + "_asbinary"), - new_var) - boolean.associate_binary_var(new_var) - - # Find all the variables declared here (including the indicator_var) and - # add a reference on the transformation block so these will be - # accessible when the Disjunct is deactivated. We don't descend into - # Disjuncts because we just moved the references to their local - # variables up in the previous loop. - varRefBlock = disjunctBlock.localVarReferences - for v in block.component_objects(Var, descend_into=Block, active=None): - varRefBlock.add_component(unique_component_name( - varRefBlock, v.getname(fully_qualified=True)), Reference(v)) - - # Now look through the component map of block and transform everything - # we have a handler for. Yell if we don't know how to handle it. (Note - # that because we only iterate through active components, this means - # non-ActiveComponent types cannot have handlers.) - for obj in block.component_objects(active=True, descend_into=False): - handler = self.handlers.get(obj.ctype, None) - if not handler: - if handler is None: - raise GDP_Error( - "No BigM transformation handler registered " - "for modeling components of type %s. If your " - "disjuncts contain non-GDP Pyomo components that " - "require transformation, please transform them first." - % obj.ctype) - continue - # obj is what we are transforming, we pass disjunct - # through so that we will have access to the indicator - # variables down the line. - handler(obj, disjunct, bigM, arg_list, suffix_list) - - def _transfer_transBlock_data(self, fromBlock, toBlock): - # We know that we have a list of transformed disjuncts on both. We need - # to move those over. We know the XOR constraints are on the block, and - # we need to leave those on the disjunct. - disjunctList = toBlock.relaxedDisjuncts - to_delete = [] - for idx, disjunctBlock in fromBlock.relaxedDisjuncts.items(): - newblock = disjunctList[len(disjunctList)] - newblock.transfer_attributes_from(disjunctBlock) - - # update the mappings - original = disjunctBlock._srcDisjunct() - original._transformation_block = weakref_ref(newblock) - newblock._srcDisjunct = weakref_ref(original) - - # save index of what we just moved so that we can delete it - to_delete.append(idx) - - # delete everything we moved. - for idx in to_delete: - del fromBlock.relaxedDisjuncts[idx] - - # Note that we could handle other components here if we ever needed - # to, but we control what is on the transformation block and - # currently everything is on the blocks that we just moved... - - def _warn_for_active_disjunction(self, disjunction, disjunct, bigMargs, - arg_list, suffix_list): - _warn_for_active_disjunction(disjunction, disjunct) - - def _warn_for_active_disjunct(self, innerdisjunct, outerdisjunct, bigMargs, - arg_list, suffix_list): - _warn_for_active_disjunct(innerdisjunct, outerdisjunct) - - def _warn_for_active_logical_statement( - self, logical_statment, disjunct, infodict, bigMargs, suffix_list): - _warn_for_active_logical_constraint(logical_statment, disjunct) - - def _transform_block_on_disjunct(self, block, disjunct, bigMargs, arg_list, - suffix_list): - # We look through everything on the component map of the block - # and transform it just as we would if it was on the disjunct - # directly. (We are passing the disjunct through so that when - # we find constraints, _xform_constraint will have access to - # the correct indicator variable.) - for i in sorted(block.keys()): - self._transform_block_components( block[i], disjunct, bigMargs, - arg_list, suffix_list) - - def _get_constraint_map_dict(self, transBlock): - if not hasattr(transBlock, "_constraintMap"): - transBlock._constraintMap = { - 'srcConstraints': ComponentMap(), - 'transformedConstraints': ComponentMap()} - return transBlock._constraintMap - - def _convert_M_to_tuple(self, M, constraint_name): - if not isinstance(M, (tuple, list)): - if M is None: - M = (None, None) - else: - try: - M = (-M, M) - except: - logger.error("Error converting scalar M-value %s " - "to (-M,M). Is %s not a numeric type?" - % (M, type(M))) - raise - if len(M) != 2: - raise GDP_Error("Big-M %s for constraint %s is not of " - "length two. " - "Expected either a single value or " - "tuple or list of length two for M." - % (str(M), constraint_name)) - - return M - - def _transform_constraint(self, obj, disjunct, bigMargs, arg_list, - disjunct_suffix_list): + def _transform_constraint( + self, obj, disjunct, bigMargs, arg_list, disjunct_suffix_list + ): # add constraint to the transformation block, we'll transform it there. transBlock = disjunct._transformation_block() bigm_src = transBlock.bigm_src - constraintMap = self._get_constraint_map_dict(transBlock) + constraintMap = transBlock._constraintMap disjunctionRelaxationBlock = transBlock.parent_block() - # Though rare, it is possible to get naming conflicts here - # since constraints from all blocks are getting moved onto the - # same block. So we get a unique name - cons_name = obj.getname(fully_qualified=True) - name = unique_component_name(transBlock, cons_name) - - if obj.is_indexed(): - newConstraint = Constraint(obj.index_set(), - disjunctionRelaxationBlock.lbub) - # we map the container of the original to the container of the - # transformed constraint. Don't do this if obj is a ScalarConstraint - # because we will treat that like a _ConstraintData and map to a - # list of transformed _ConstraintDatas - constraintMap['transformedConstraints'][obj] = newConstraint - else: - newConstraint = Constraint(disjunctionRelaxationBlock.lbub) - transBlock.add_component(name, newConstraint) - # add mapping of transformed constraint to original constraint - constraintMap['srcConstraints'][newConstraint] = obj + + # We will make indexes from ({obj.local_name} x obj.index_set() x ['lb', + # 'ub']), but don't bother construct that set here, as taking Cartesian + # products is kind of expensive (and redundant since we have the + # original model) + newConstraint = transBlock.transformedConstraints for i in sorted(obj.keys()): c = obj[i] @@ -668,189 +293,80 @@ def _transform_constraint(self, obj, disjunct, bigMargs, arg_list, # first, we see if an M value was specified in the arguments. # (This returns None if not) - lower, upper = self._get_M_from_args(c, bigMargs, arg_list, lower, - upper) + lower, upper = self._get_M_from_args(c, bigMargs, arg_list, lower, upper) M = (lower[0], upper[0]) if self._generate_debug_messages: - _name = obj.getname(fully_qualified=True) - logger.debug("GDP(BigM): The value for M for constraint '%s' " - "from the BigM argument is %s." % (cons_name, - str(M))) + logger.debug( + "GDP(BigM): The value for M for constraint '%s' " + "from the BigM argument is %s." % (c.name, str(M)) + ) # if we didn't get something we need from args, try suffixes: - if (M[0] is None and c.lower is not None) or \ - (M[1] is None and c.upper is not None): + if (M[0] is None and c.lower is not None) or ( + M[1] is None and c.upper is not None + ): # first get anything parent to c but below disjunct - suffix_list = self._get_bigm_suffix_list( - c.parent_block(), - stopping_block=disjunct) + suffix_list = _get_bigM_suffix_list( + c.parent_block(), stopping_block=disjunct + ) # prepend that to what we already collected for the disjunct. suffix_list.extend(disjunct_suffix_list) - lower, upper = self._update_M_from_suffixes(c, suffix_list, - lower, upper) + lower, upper = self._update_M_from_suffixes( + c, suffix_list, lower, upper + ) M = (lower[0], upper[0]) if self._generate_debug_messages: - _name = obj.getname(fully_qualified=True) - logger.debug("GDP(BigM): The value for M for constraint '%s' " - "after checking suffixes is %s." % (cons_name, - str(M))) + logger.debug( + "GDP(BigM): The value for M for constraint '%s' " + "after checking suffixes is %s." % (c.name, str(M)) + ) if c.lower is not None and M[0] is None: - M = (self._estimate_M(c.body, name)[0] - c.lower, M[1]) + M = (self._estimate_M(c.body, c)[0] - c.lower, M[1]) lower = (M[0], None, None) if c.upper is not None and M[1] is None: - M = (M[0], self._estimate_M(c.body, name)[1] - c.upper) + M = (M[0], self._estimate_M(c.body, c)[1] - c.upper) upper = (M[1], None, None) if self._generate_debug_messages: - _name = obj.getname(fully_qualified=True) - logger.debug("GDP(BigM): The value for M for constraint '%s' " - "after estimating (if needed) is %s." % - (cons_name, str(M))) + logger.debug( + "GDP(BigM): The value for M for constraint '%s' " + "after estimating (if needed) is %s." % (c.name, str(M)) + ) # save the source information bigm_src[c] = (lower, upper) - # Handle indices for both ScalarConstraint and IndexedConstraint - if i.__class__ is tuple: - i_lb = i + ('lb',) - i_ub = i + ('ub',) - elif obj.is_indexed(): - i_lb = (i, 'lb',) - i_ub = (i, 'ub',) - else: - i_lb = 'lb' - i_ub = 'ub' - - if c.lower is not None: - if M[0] is None: - raise GDP_Error("Cannot relax disjunctive constraint '%s' " - "because M is not defined." % name) - M_expr = M[0] * (1 - disjunct.binary_indicator_var) - newConstraint.add(i_lb, c.lower <= c. body - M_expr) - constraintMap[ - 'transformedConstraints'][c] = [newConstraint[i_lb]] - constraintMap['srcConstraints'][newConstraint[i_lb]] = c - if c.upper is not None: - if M[1] is None: - raise GDP_Error("Cannot relax disjunctive constraint '%s' " - "because M is not defined." % name) - M_expr = M[1] * (1 - disjunct.binary_indicator_var) - newConstraint.add(i_ub, c.body - M_expr <= c.upper) - transformed = constraintMap['transformedConstraints'].get(c) - if transformed is not None: - constraintMap['transformedConstraints'][ - c].append(newConstraint[i_ub]) - else: - constraintMap[ - 'transformedConstraints'][c] = [newConstraint[i_ub]] - constraintMap['srcConstraints'][newConstraint[i_ub]] = c + self._add_constraint_expressions( + c, i, M, disjunct.binary_indicator_var, newConstraint, constraintMap + ) # deactivate because we relaxed c.deactivate() - def _process_M_value(self, m, lower, upper, need_lower, need_upper, src, - key, constraint_name, from_args=False): - m = self._convert_M_to_tuple(m, constraint_name) - if need_lower and m[0] is not None: - if from_args: - self.used_args[key] = m - lower = (m[0], src, key) - need_lower = False - if need_upper and m[1] is not None: - if from_args: - self.used_args[key] = m - upper = (m[1], src, key) - need_upper = False - return lower, upper, need_lower, need_upper - - def _get_M_from_args(self, constraint, bigMargs, arg_list, lower, upper): - # check args: we first look in the keys for constraint and - # constraintdata. In the absence of those, we traverse up the blocks, - # and as a last resort check for a value for None - if bigMargs is None: - return (lower, upper) - - # since we check for args first, we know lower[0] and upper[0] are both - # None - need_lower = constraint.lower is not None - need_upper = constraint.upper is not None - constraint_name = constraint.getname(fully_qualified=True) - - # check for the constraint itself and its container - parent = constraint.parent_component() - if constraint in bigMargs: - m = bigMargs[constraint] - (lower, upper, - need_lower, need_upper) = self._process_M_value(m, lower, upper, - need_lower, - need_upper, - bigMargs, - constraint, - constraint_name, - from_args=True) - if not need_lower and not need_upper: - return lower, upper - elif parent in bigMargs: - m = bigMargs[parent] - (lower, upper, - need_lower, need_upper) = self._process_M_value(m, lower, upper, - need_lower, - need_upper, - bigMargs, parent, - constraint_name, - from_args=True) - if not need_lower and not need_upper: - return lower, upper - - # use the precomputed traversal up the blocks - for arg in arg_list: - for block, val in arg.items(): - (lower, upper, - need_lower, - need_upper) = self._process_M_value( val, lower, upper, - need_lower, need_upper, - bigMargs, block, - constraint_name, - from_args=True) - if not need_lower and not need_upper: - return lower, upper - - # last check for value for None! - if None in bigMargs: - m = bigMargs[None] - (lower, upper, - need_lower, need_upper) = self._process_M_value(m, lower, upper, - need_lower, - need_upper, - bigMargs, None, - constraint_name, - from_args=True) - if not need_lower and not need_upper: - return lower, upper - - return lower, upper - def _update_M_from_suffixes(self, constraint, suffix_list, lower, upper): # It's possible we found half the answer in args, but we are still # looking for half the answer. need_lower = constraint.lower is not None and lower[0] is None need_upper = constraint.upper is not None and upper[0] is None - constraint_name = constraint.getname(fully_qualified=True) M = None # first we check if the constraint or its parent is a key in any of the # suffix lists for bigm in suffix_list: if constraint in bigm: M = bigm[constraint] - (lower, upper, - need_lower, - need_upper) = self._process_M_value(M, lower, upper, - need_lower, need_upper, - bigm, constraint, - constraint_name) + (lower, upper, need_lower, need_upper) = self._process_M_value( + M, + lower, + upper, + need_lower, + need_upper, + bigm, + constraint, + constraint, + ) if not need_lower and not need_upper: return lower, upper @@ -858,12 +374,9 @@ def _update_M_from_suffixes(self, constraint, suffix_list, lower, upper): if constraint.parent_component() in bigm: parent = constraint.parent_component() M = bigm[parent] - (lower, upper, - need_lower, - need_upper) = self._process_M_value(M, lower, upper, - need_lower, need_upper, - bigm, parent, - constraint_name) + (lower, upper, need_lower, need_upper) = self._process_M_value( + M, lower, upper, need_lower, need_upper, bigm, parent, constraint + ) if not need_lower and not need_upper: return lower, upper @@ -873,77 +386,38 @@ def _update_M_from_suffixes(self, constraint, suffix_list, lower, upper): for bigm in suffix_list: if None in bigm: M = bigm[None] - (lower, upper, - need_lower, - need_upper) = self._process_M_value(M, lower, upper, - need_lower, need_upper, - bigm, None, - constraint_name) + (lower, upper, need_lower, need_upper) = self._process_M_value( + M, lower, upper, need_lower, need_upper, bigm, None, constraint + ) if not need_lower and not need_upper: return lower, upper return lower, upper - def _estimate_M(self, expr, name): - # If there are fixed variables here, unfix them for this calculation, - # and we'll restore them at the end. - fixed_vars = ComponentMap() - if not self.assume_fixed_vars_permanent: - for v in EXPR.identify_variables(expr, include_fixed=True): - if v.fixed: - fixed_vars[v] = value(v) - v.fixed = False - - expr_lb, expr_ub = compute_bounds_on_expr(expr) - if expr_lb is None or expr_ub is None: - raise GDP_Error("Cannot estimate M for unbounded " - "expressions.\n\t(found while processing " - "constraint '%s'). Please specify a value of M " - "or ensure all variables that appear in the " - "constraint are bounded." % name) - else: - M = (expr_lb, expr_ub) - - # clean up if we unfixed things (fixed_vars is empty if we were assuming - # fixed vars are fixed for life) - for v, val in fixed_vars.items(): - v.fix(val) - - return tuple(M) - - # These are all functions to retrieve transformed components from - # original ones and vice versa. - - @wraps(get_src_disjunct) - def get_src_disjunct(self, transBlock): - return get_src_disjunct(transBlock) - - @wraps(get_src_disjunction) - def get_src_disjunction(self, xor_constraint): - return get_src_disjunction(xor_constraint) - - @wraps(get_src_constraint) - def get_src_constraint(self, transformedConstraint): - return get_src_constraint(transformedConstraint) - - @wraps(get_transformed_constraints) - def get_transformed_constraints(self, srcConstraint): - return get_transformed_constraints(srcConstraint) - - @deprecated("The get_m_value_src function is deprecated. Use " - "the get_M_value_src function if you need source " - "information or the get_M_value function if you " - "only need values.", version='5.7.1') + @deprecated( + "The get_m_value_src function is deprecated. Use " + "the get_M_value_src function if you need source " + "information or the get_M_value function if you " + "only need values.", + version='5.7.1', + ) def get_m_value_src(self, constraint): transBlock = _get_constraint_transBlock(constraint) - ((lower_val, lower_source, lower_key), - (upper_val, upper_source, upper_key)) = transBlock.bigm_src[constraint] - - if constraint.lower is not None and constraint.upper is not None and \ - (not lower_source is upper_source or not lower_key is upper_key): - raise GDP_Error("This is why this method is deprecated: The lower " - "and upper M values for constraint %s came from " - "different sources, please use the get_M_value_src " - "method." % constraint.name) + ( + (lower_val, lower_source, lower_key), + (upper_val, upper_source, upper_key), + ) = transBlock.bigm_src[constraint] + + if ( + constraint.lower is not None + and constraint.upper is not None + and (not lower_source is upper_source or not lower_key is upper_key) + ): + raise GDP_Error( + "This is why this method is deprecated: The lower " + "and upper M values for constraint %s came from " + "different sources, please use the get_M_value_src " + "method." % constraint.name + ) # if source and key are equal for the two, this is representable in the # old format. if constraint.lower is not None and lower_source is not None: @@ -1014,12 +488,11 @@ def get_all_M_values_by_constraint(self, model): """ m_values = {} for disj in model.component_data_objects( - Disjunct, - active=None, - descend_into=(Block, Disjunct)): + Disjunct, active=None, descend_into=(Block, Disjunct) + ): + transBlock = disj.transformation_block # First check if it was transformed at all. - if disj.transformation_block is not None: - transBlock = disj.transformation_block() + if transBlock is not None: # If it was transformed with BigM, we get the M values. if hasattr(transBlock, 'bigm_src'): for cons in transBlock.bigm_src: @@ -1033,5 +506,7 @@ def get_largest_M_value(self, model): ---------- model: A GDP model that has been transformed with BigM """ - return max(max(abs(m) for m in m_values if m is not None) for m_values - in self.get_all_M_values_by_constraint(model).values()) + return max( + max(abs(m) for m in m_values if m is not None) + for m_values in self.get_all_M_values_by_constraint(model).values() + ) diff --git a/pyomo/gdp/plugins/bigm_mixin.py b/pyomo/gdp/plugins/bigm_mixin.py new file mode 100644 index 00000000000..ba25dfeffd0 --- /dev/null +++ b/pyomo/gdp/plugins/bigm_mixin.py @@ -0,0 +1,269 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.gdp import GDP_Error +from pyomo.common.collections import ComponentSet +from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr +from pyomo.core import Suffix + + +def _convert_M_to_tuple(M, constraint, disjunct=None): + if not isinstance(M, (tuple, list)): + if M is None: + M = (None, None) + else: + try: + M = (-M, M) + except: + logger.error( + "Error converting scalar M-value %s " + "to (-M,M). Is %s not a numeric type?" % (M, type(M)) + ) + raise + if len(M) != 2: + constraint_name = constraint.name + if disjunct is not None: + constraint_name += " relative to Disjunct %s" % disjunct.name + raise GDP_Error( + "Big-M %s for constraint %s is not of " + "length two. " + "Expected either a single value or " + "tuple or list of length two specifying M values for " + "the lower and upper sides of the constraint " + "respectively." % (str(M), constraint.name) + ) + + return M + + +def _get_bigM_suffix_list(block, stopping_block=None): + # Note that you can only specify suffixes on BlockData objects or + # ScalarBlocks. Though it is possible at this point to stick them + # on whatever components you want, we won't pick them up. + suffix_list = [] + + # go searching above block in the tree, stop when we hit stopping_block + # (This is so that we can search on each Disjunct once, but get any + # information between a constraint and its Disjunct while transforming + # the constraint). + while block is not None: + bigm = block.component('BigM') + if type(bigm) is Suffix: + suffix_list.append(bigm) + if block is stopping_block: + break + block = block.parent_block() + + return suffix_list + + +def _warn_for_unused_bigM_args(bigM, used_args, logger): + # issue warnings about anything that was in the bigM args dict that we + # didn't use + if bigM is not None: + unused_args = ComponentSet(bigM.keys()) - ComponentSet(used_args.keys()) + if len(unused_args) > 0: + warning_msg = ( + "Unused arguments in the bigM map! " + "These arguments were not used by the " + "transformation:\n" + ) + for component in unused_args: + if isinstance(component, (tuple, list)) and len(component) == 2: + warning_msg += "\t(%s, %s)\n" % ( + component[0].name, + component[1].name, + ) + elif hasattr(component, 'name'): + warning_msg += "\t%s\n" % component.name + else: + warning_msg += "\t%s\n" % component + logger.warning(warning_msg) + + +class _BigM_MixIn(object): + def _get_bigM_arg_list(self, bigm_args, block): + # Gather what we know about blocks from args exactly once. We'll still + # check for constraints in the moment, but if that fails, we've + # preprocessed the time-consuming part of traversing up the tree. + arg_list = [] + if bigm_args is None: + return arg_list + while block is not None: + if block in bigm_args: + arg_list.append({block: bigm_args[block]}) + block = block.parent_block() + return arg_list + + def _process_M_value( + self, + m, + lower, + upper, + need_lower, + need_upper, + src, + key, + constraint, + from_args=False, + ): + m = _convert_M_to_tuple(m, constraint) + if need_lower and m[0] is not None: + if from_args: + self.used_args[key] = m + lower = (m[0], src, key) + need_lower = False + if need_upper and m[1] is not None: + if from_args: + self.used_args[key] = m + upper = (m[1], src, key) + need_upper = False + return lower, upper, need_lower, need_upper + + def _get_M_from_args(self, constraint, bigMargs, arg_list, lower, upper): + # check args: we first look in the keys for constraint and + # constraintdata. In the absence of those, we traverse up the blocks, + # and as a last resort check for a value for None + if bigMargs is None: + return (lower, upper) + + # since we check for args first, we know lower[0] and upper[0] are both + # None + need_lower = constraint.lower is not None + need_upper = constraint.upper is not None + + # check for the constraint itself and its container + parent = constraint.parent_component() + if constraint in bigMargs: + m = bigMargs[constraint] + (lower, upper, need_lower, need_upper) = self._process_M_value( + m, + lower, + upper, + need_lower, + need_upper, + bigMargs, + constraint, + constraint, + from_args=True, + ) + if not need_lower and not need_upper: + return lower, upper + elif parent in bigMargs: + m = bigMargs[parent] + (lower, upper, need_lower, need_upper) = self._process_M_value( + m, + lower, + upper, + need_lower, + need_upper, + bigMargs, + parent, + constraint, + from_args=True, + ) + if not need_lower and not need_upper: + return lower, upper + + # use the precomputed traversal up the blocks + for arg in arg_list: + for block, val in arg.items(): + (lower, upper, need_lower, need_upper) = self._process_M_value( + val, + lower, + upper, + need_lower, + need_upper, + bigMargs, + block, + constraint, + from_args=True, + ) + if not need_lower and not need_upper: + return lower, upper + + # last check for value for None! + if None in bigMargs: + m = bigMargs[None] + (lower, upper, need_lower, need_upper) = self._process_M_value( + m, + lower, + upper, + need_lower, + need_upper, + bigMargs, + None, + constraint, + from_args=True, + ) + if not need_lower and not need_upper: + return lower, upper + + return lower, upper + + def _estimate_M(self, expr, constraint): + expr_lb, expr_ub = compute_bounds_on_expr( + expr, ignore_fixed=not self._config.assume_fixed_vars_permanent + ) + if expr_lb is None or expr_ub is None: + raise GDP_Error( + "Cannot estimate M for unbounded " + "expressions.\n\t(found while processing " + "constraint '%s'). Please specify a value of M " + "or ensure all variables that appear in the " + "constraint are bounded." % constraint.name + ) + else: + M = (expr_lb, expr_ub) + return tuple(M) + + def _add_constraint_expressions( + self, c, i, M, indicator_var, newConstraint, constraintMap + ): + # Since we are both combining components from multiple blocks and using + # local names, we need to make sure that the first index for + # transformedConstraints is guaranteed to be unique. We just grab the + # current length of the list here since that will be monotonically + # increasing and hence unique. We'll append it to the + # slightly-more-human-readable constraint name for something familiar + # but unique. (Note that we really could do this outside of the loop + # over the constraint indices, but I don't think it matters a lot.) + unique = len(newConstraint) + name = c.local_name + "_%s" % unique + + if c.lower is not None: + if M[0] is None: + raise GDP_Error( + "Cannot relax disjunctive constraint '%s' " + "because M is not defined." % name + ) + M_expr = M[0] * (1 - indicator_var) + newConstraint.add((name, i, 'lb'), c.lower <= c.body - M_expr) + constraintMap['transformedConstraints'][c] = [newConstraint[name, i, 'lb']] + constraintMap['srcConstraints'][newConstraint[name, i, 'lb']] = c + if c.upper is not None: + if M[1] is None: + raise GDP_Error( + "Cannot relax disjunctive constraint '%s' " + "because M is not defined." % name + ) + M_expr = M[1] * (1 - indicator_var) + newConstraint.add((name, i, 'ub'), c.body - M_expr <= c.upper) + transformed = constraintMap['transformedConstraints'].get(c) + if transformed is not None: + constraintMap['transformedConstraints'][c].append( + newConstraint[name, i, 'ub'] + ) + else: + constraintMap['transformedConstraints'][c] = [ + newConstraint[name, i, 'ub'] + ] + constraintMap['srcConstraints'][newConstraint[name, i, 'ub']] = c diff --git a/pyomo/gdp/plugins/bilinear.py b/pyomo/gdp/plugins/bilinear.py index b22bb5d535d..feacaaddefc 100644 --- a/pyomo/gdp/plugins/bilinear.py +++ b/pyomo/gdp/plugins/bilinear.py @@ -11,16 +11,27 @@ import logging -from pyomo.core import TransformationFactory, Transformation, Block, VarList, Set, SortComponents, Objective, Constraint +from pyomo.core import ( + TransformationFactory, + Transformation, + Block, + VarList, + Set, + SortComponents, + Objective, + Constraint, +) from pyomo.gdp import Disjunct, Disjunction from pyomo.repn import generate_standard_repn logger = logging.getLogger('pyomo.gdp') -@TransformationFactory.register('gdp.bilinear', doc="Creates a disjunctive model where bilinear terms are replaced with disjunctive expressions.") +@TransformationFactory.register( + 'gdp.bilinear', + doc="Creates a disjunctive model where bilinear terms are replaced with disjunctive expressions.", +) class Bilinear_Transformation(Transformation): - def __init__(self): super(Bilinear_Transformation, self).__init__() @@ -32,7 +43,9 @@ def _apply_to(self, instance, **kwds): instance.bilinear_data_.vlist = VarList() instance.bilinear_data_.vlist_boolean = [] instance.bilinear_data_.IDX = Set() - instance.bilinear_data_.disjuncts_ = Disjunct(instance.bilinear_data_.IDX*[0,1]) + instance.bilinear_data_.disjuncts_ = Disjunct( + instance.bilinear_data_.IDX * [0, 1] + ) instance.bilinear_data_.disjunction_data = {} instance.bilinear_data_.o_expr = {} instance.bilinear_data_.c_body = {} @@ -40,23 +53,32 @@ def _apply_to(self, instance, **kwds): # Iterate over all blocks # for block in instance.block_data_objects( - active=True, sort=SortComponents.deterministic ): + active=True, sort=SortComponents.deterministic + ): self._transformBlock(block, instance) + # # WEH: I wish I had a DisjunctList and DisjunctionList object... # def rule(block, i): return instance.bilinear_data_.disjunction_data[i] - instance.bilinear_data_.disjunction_ = Disjunction(instance.bilinear_data_.IDX, rule=rule) + + instance.bilinear_data_.disjunction_ = Disjunction( + instance.bilinear_data_.IDX, rule=rule + ) def _transformBlock(self, block, instance): - for component in block.component_objects(Objective, active=True, descend_into=False): + for component in block.component_objects( + Objective, active=True, descend_into=False + ): expr = self._transformExpression(component.expr, instance) - instance.bilinear_data_.o_expr[ id(component) ] = component.expr + instance.bilinear_data_.o_expr[id(component)] = component.expr component.expr = expr - for component in block.component_data_objects(Constraint, active=True, descend_into=False): + for component in block.component_data_objects( + Constraint, active=True, descend_into=False + ): expr = self._transformExpression(component.body, instance) - instance.bilinear_data_.c_body[ id(component) ] = component.body + instance.bilinear_data_.c_body[id(component)] = component.body component._body = expr def _transformExpression(self, expr, instance): @@ -81,7 +103,9 @@ def _replace_bilinear(self, expr, instance): for vars_, coef_ in zip(terms.quadratic_vars, terms.quadratic_coefs): # if vars_[0].is_binary(): - v = instance.bilinear_data_.cache.get( (id(vars_[0]),id(vars_[1])), None ) + v = instance.bilinear_data_.cache.get( + (id(vars_[0]), id(vars_[1])), None + ) if v is None: instance.bilinear_data_.vlist_boolean.append(vars_[0]) v = instance.bilinear_data_.vlist.add() @@ -92,21 +116,29 @@ def _replace_bilinear(self, expr, instance): id_ = len(instance.bilinear_data_.vlist) instance.bilinear_data_.IDX.add(id_) # First disjunct - d0 = instance.bilinear_data_.disjuncts_[id_,0] + d0 = instance.bilinear_data_.disjuncts_[id_, 0] d0.c1 = Constraint(expr=vars_[0] == 1) d0.c2 = Constraint(expr=v == vars_[1]) # Second disjunct - d1 = instance.bilinear_data_.disjuncts_[id_,1] + d1 = instance.bilinear_data_.disjuncts_[id_, 1] d1.c1 = Constraint(expr=vars_[0] == 0) d1.c2 = Constraint(expr=v == 0) # Disjunction - instance.bilinear_data_.disjunction_data[id_] = [instance.bilinear_data_.disjuncts_[id_,0], instance.bilinear_data_.disjuncts_[id_,1]] - instance.bilinear_data_.disjunction_data[id_] = [instance.bilinear_data_.disjuncts_[id_,0], instance.bilinear_data_.disjuncts_[id_,1]] + instance.bilinear_data_.disjunction_data[id_] = [ + instance.bilinear_data_.disjuncts_[id_, 0], + instance.bilinear_data_.disjuncts_[id_, 1], + ] + instance.bilinear_data_.disjunction_data[id_] = [ + instance.bilinear_data_.disjuncts_[id_, 0], + instance.bilinear_data_.disjuncts_[id_, 1], + ] # The disjunctive variable is the expression - e += coef_*v + e += coef_ * v # elif vars_[1].is_binary(): - v = instance.bilinear_data_.cache.get( (id(vars_[1]),id(vars_[0])), None ) + v = instance.bilinear_data_.cache.get( + (id(vars_[1]), id(vars_[0])), None + ) if v is None: instance.bilinear_data_.vlist_boolean.append(vars_[1]) v = instance.bilinear_data_.vlist.add() @@ -117,20 +149,22 @@ def _replace_bilinear(self, expr, instance): id_ = len(instance.bilinear_data_.vlist) instance.bilinear_data_.IDX.add(id_) # First disjunct - d0 = instance.bilinear_data_.disjuncts_[id_,0] + d0 = instance.bilinear_data_.disjuncts_[id_, 0] d0.c1 = Constraint(expr=vars_[1] == 1) d0.c2 = Constraint(expr=v == vars_[0]) # Second disjunct - d1 = instance.bilinear_data_.disjuncts_[id_,1] + d1 = instance.bilinear_data_.disjuncts_[id_, 1] d1.c1 = Constraint(expr=vars_[1] == 0) d1.c2 = Constraint(expr=v == 0) # Disjunction - instance.bilinear_data_.disjunction_data[id_] = [instance.bilinear_data_.disjuncts_[id_,0], instance.bilinear_data_.disjuncts_[id_,1]] + instance.bilinear_data_.disjunction_data[id_] = [ + instance.bilinear_data_.disjuncts_[id_, 0], + instance.bilinear_data_.disjuncts_[id_, 1], + ] # The disjunctive variable is the expression - e += coef_*v + e += coef_ * v else: # If neither variable is boolean, just reinsert the original bilinear term - e += coef_*vars_[0]*vars_[1] + e += coef_ * vars_[0] * vars_[1] # return e - diff --git a/pyomo/gdp/plugins/bound_pretransformation.py b/pyomo/gdp/plugins/bound_pretransformation.py new file mode 100644 index 00000000000..8987dbc6066 --- /dev/null +++ b/pyomo/gdp/plugins/bound_pretransformation.py @@ -0,0 +1,337 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.collections import ComponentMap +from pyomo.common.config import ConfigDict, ConfigValue +from pyomo.common.modeling import unique_component_name +from pyomo.core import ( + Any, + Block, + Constraint, + NonNegativeIntegers, + SortComponents, + value, + Var, +) +from pyomo.core.base import Transformation, TransformationFactory +from pyomo.core.expr import identify_variables +from pyomo.core.util import target_list +from pyomo.gdp import Disjunct, Disjunction, GDP_Error +from pyomo.gdp.util import is_child_of, get_gdp_tree +from pyomo.repn.standard_repn import generate_standard_repn +import logging + +logger = logging.getLogger(__name__) + + +@TransformationFactory.register( + 'gdp.bound_pretransformation', + doc="Partially transforms a GDP to a MIP by finding all disjunctive " + "constraints with common left-hand sides and transforming them according " + "to the formulation in Balas 1988", +) +class BoundPretransformation(Transformation): + """ + Implements a special case of the transformation mentioned in [1] for + handling disjunctive constraints with common left-hand sides (i.e., + Constraint bodies). Automatically detects univariate disjunctive + Constraints (bounds or equalities involving one variable), and + transforms them according to [1]. The transformed Constraints are + deactivated, but the remainder of the GDP is untouched. That is, + to completely transform the GDP, a GDP-to-MIP transformation is + needed that will transform the remaining disjunctive constraints as + well as any LogicalConstraints and the logic of the disjunctions + themselves. + + NOTE: Because this transformation allows tighter bound values higher in + the GDP hierarchy to supersede looser ones that are lower, the transformed + model will not necessarily still be valid in the case that there are + mutable Params in disjunctive variable bounds or in the transformed + Constraints and the values of those mutable Params are later changed. + Similarly, if this transformation is called when Vars are fixed, it will + only be guaranteed to be valid when those Vars remain fixed to the same + values. + + [1] Egon Balas, "On the convex hull of the union of certain polyhedra," + Operations Research Letters, vol. 7, 1988, pp. 279-283 + """ + + CONFIG = ConfigDict('gdp.bound_pretransformation') + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="target or list of targets to transform", + doc=""" + This specifies the list of Disjunctions or Blocks to be (partially) + transformed. If None (default), the entire model is transformed. + Note that if the transformation is done out of place, the list of + targets should be attached to the model before it is cloned, and + the list will specify the targets on the cloned instance. + """, + ), + ) + transformation_name = 'bound_pretransformation' + + def __init__(self): + super().__init__() + self.logger = logger + + def _apply_to(self, instance, **kwds): + if not instance.ctype in (Block, Disjunct): + raise GDP_Error( + "Transformation called on %s of type %s. 'instance'" + " must be a ConcreteModel, Block, or Disjunct (in " + "the case of nested disjunctions)." % (instance.name, instance.ctype) + ) + + self._config = self.CONFIG(kwds.pop('options', {})) + self._config.set_value(kwds) + + targets = self._config.targets + if targets is None: + targets = (instance,) + + transformation_blocks = {} + bound_dict = ComponentMap() + self._update_bounds_from_constraints(instance, bound_dict, None, is_root=True) + # [ESJ 05/04/23]: In the future, I should think about getting my little + # trees from this tree, or asking for leaves rooted somewhere specific + # or something. Because this transformation currently does the work of + # getting the GDP tree twice... + whole_tree = get_gdp_tree(targets, instance) + for t in whole_tree.topological_sort(): + if t.ctype is Disjunction and whole_tree.in_degree(t) == 0: + self._transform_disjunction( + t, instance, bound_dict, transformation_blocks + ) + + def _transform_disjunction( + self, disjunction, instance, bound_dict, transformation_blocks + ): + # We go from root to leaves so that whenever we hit a variable, we can + # ask if the bounds we're seeing on its parent Disjunct (or if we're at + # the root, in the global scope) are looser or tighter than the bounds + # on it, and we pass the tightest ones down. For sane models, the bounds + # will tighten as we go down the tree, but that's of course not + # guaranteed since not all models are sane... + disjunctions_to_transform = set() + gdp_forest = get_gdp_tree((disjunction,), instance) + for d in gdp_forest.topological_sort(): + if d.ctype is Disjunct: + self._update_bounds_from_constraints(d, bound_dict, gdp_forest) + self._create_transformation_constraints( + disjunction, bound_dict, gdp_forest, transformation_blocks + ) + + def _get_bound_dict_for_var(self, bound_dict, v): + v_bounds = bound_dict.get(v) + if v_bounds is None: + v_bounds = bound_dict[v] = {None: (v.lb, v.ub), 'to_deactivate': set()} + return v_bounds + + def _update_bounds_from_constraints( + self, disjunct, bound_dict, gdp_forest, is_root=False + ): + bound_dict_key = None if is_root else disjunct + for constraint in disjunct.component_data_objects( + Constraint, + active=True, + descend_into=Block, + sort=SortComponents.deterministic, + ): + # Avoid walking the whole expression tree if we have more than one + # variable by just trying to get two. If we succeed at one but not + # two, then the constraint is a bound or equality constraint and we + # save it. Otherwise, we just keep going to the next constraint. + var_gen = identify_variables(constraint.body, include_fixed=False) + try: + next(var_gen) + except StopIteration: + # No variables + continue + try: + next(var_gen) + except StopIteration: + # There was one but not two: This is what we want. + repn = generate_standard_repn(constraint.body) + if not repn.is_linear(): + continue + v = repn.linear_vars[0] + v_bounds = self._get_bound_dict_for_var(bound_dict, v) + coef = repn.linear_coefs[0] + constant = repn.constant + self._update_bounds_dict( + v_bounds, + (value(constraint.lower) - constant) / coef + if constraint.lower is not None + else None, + (value(constraint.upper) - constant) / coef + if constraint.upper is not None + else None, + bound_dict_key, + gdp_forest, + ) + if not is_root: + v_bounds['to_deactivate'].add(constraint) + + def _return_nonNone_bounds(self, parent_lb, parent_ub, lb, ub): + if lb is None: + # either we replace None with None, or we fill in the parent value + # (which we know if the tightest of the ancestral values because + # we've already come down the tree once.) + lb = parent_lb + if ub is None: + ub = parent_ub + return (lb, ub) + + def _get_tightest_ancestral_bounds(self, v_bounds, disjunct, gdp_forest): + lb = None + ub = None + parent = disjunct + ancestors = set() + while lb is None or ub is None: + if parent in v_bounds: + lb, ub = self._return_nonNone_bounds(*v_bounds[parent], lb, ub) + if parent is None: + break + ancestors.add(parent) + parent = gdp_forest.parent_disjunct(parent) + # we fill in the bounds not only for 'disjunct', but also for all the + # ancestors we passed on the way up to finding the bounds. That way this + # is a shorter traversal the next time up, if there is a next time. + for ancestor in ancestors: + v_bounds[ancestor] = (lb, ub) + return v_bounds[disjunct] + + def _update_bounds_dict(self, v_bounds, lower, upper, disjunct, gdp_forest): + (lb, ub) = self._get_tightest_ancestral_bounds(v_bounds, disjunct, gdp_forest) + if lower is not None: + if lb is None or lower > lb: + # This GDP is more constrained here than it was in the parent + # Disjunct (what we would expect, usually. If it's looser, we're + # essentially just ignoring it...) + lb = lower + if upper is not None: + if ub is None or upper < ub: + # Same case as above in the UB + ub = upper + # In all other cases, there is nothing to do... The parent gives more + # information, so we just propagate that down + v_bounds[disjunct] = (lb, ub) + + def _create_transformation_constraints( + self, disjunction, bound_dict, gdp_forest, transformation_blocks + ): + trans_block = self._add_transformation_block(disjunction, transformation_blocks) + if self.transformation_name not in disjunction._transformation_map: + disjunction._transformation_map[self.transformation_name] = ComponentMap() + trans_map = disjunction._transformation_map[self.transformation_name] + for v, v_bounds in bound_dict.items(): + unique_id = len(trans_block.transformed_bound_constraints) + lb_expr = 0 + ub_expr = 0 + all_lbs = True + all_ubs = True + for disjunct in gdp_forest.leaves: + indicator_var = disjunct.binary_indicator_var + need_lb = True + need_ub = True + while need_lb or need_ub: + if disjunct in v_bounds: + (lb, ub) = v_bounds[disjunct] + if need_lb and lb is not None: + lb_expr += lb * indicator_var + need_lb = False + if need_ub and ub is not None: + ub_expr += ub * indicator_var + need_ub = False + if disjunct is None: + break + disjunct = gdp_forest.parent_disjunct(disjunct) + if need_lb: + all_lbs = False + if need_ub: + all_ubs = False + deactivate_lower = set() + deactivate_upper = set() + if all_lbs: + idx = (v.local_name + '_lb', unique_id) + trans_block.transformed_bound_constraints[idx] = lb_expr <= v + trans_map[v] = [trans_block.transformed_bound_constraints[idx]] + for c in v_bounds['to_deactivate']: + if c.upper is None: + c.deactivate() + elif c.lower is not None: + deactivate_lower.add(c) + disjunction._transformation_map + if all_ubs: + idx = (v.local_name + '_ub', unique_id + 1) + trans_block.transformed_bound_constraints[idx] = ub_expr >= v + if v in trans_map: + trans_map[v].append(trans_block.transformed_bound_constraints[idx]) + else: + trans_map[v] = [trans_block.transformed_bound_constraints[idx]] + for c in v_bounds['to_deactivate']: + if c.lower is None or c in deactivate_lower: + c.deactivate() + deactivate_lower.discard(c) + elif c.upper is not None: + deactivate_upper.add(c) + # Now we mess up the user's model, if we are only deactivating the + # lower or upper part of a constraint that has both + for c in deactivate_lower: + c.deactivate() + c.parent_block().add_component( + unique_component_name(c.parent_block(), c.local_name + '_ub'), + Constraint(expr=v <= c.upper), + ) + for c in deactivate_upper: + c.deactivate() + c.parent_block().add_component( + unique_component_name(c.parent_block(), c.local_name + '_lb'), + Constraint(expr=v >= c.lower), + ) + + def _add_transformation_block(self, disjunction, transformation_blocks): + to_block = disjunction.parent_block() + if to_block in transformation_blocks: + return transformation_blocks[to_block] + + trans_block_name = unique_component_name( + to_block, '_pyomo_gdp_%s_reformulation' % self.transformation_name + ) + transformation_blocks[to_block] = trans_block = Block() + to_block.add_component(trans_block_name, trans_block) + + trans_block.transformed_bound_constraints = Constraint( + Any * NonNegativeIntegers + ) + + return trans_block + + def get_transformed_constraints(self, v, disjunction): + if self.transformation_name not in disjunction._transformation_map: + logger.debug( + "No variable on Disjunction '%s' was transformed with the " + "gdp.%s transformation" % (disjunction.name, self.transformation_name) + ) + return [] + trans_map = disjunction._transformation_map[self.transformation_name] + if v not in trans_map: + logger.debug( + "Constraint bounding variable '%s' on Disjunction '%s' was " + "not transformed by the 'gdp.%s' transformation" + % (v.name, disjunction.name, self.transformation_name) + ) + return [] + return trans_map[v] diff --git a/pyomo/gdp/plugins/chull.py b/pyomo/gdp/plugins/chull.py index 3ddab99067d..d226c57aae7 100644 --- a/pyomo/gdp/plugins/chull.py +++ b/pyomo/gdp/plugins/chull.py @@ -10,9 +10,11 @@ # ___________________________________________________________________________ from pyomo.common.deprecation import deprecation_warning + deprecation_warning( 'The pyomo.gdp.plugins.chull module is deprecated. ' 'Import the Hull reformulation objects from pyomo.gdp.plugins.hull.', - version='5.7') + version='5.7', +) from .hull import _Deprecated_Name_Hull as ConvexHull_Transformation diff --git a/pyomo/gdp/plugins/cuttingplane.py b/pyomo/gdp/plugins/cuttingplane.py index c4d41d2c605..49d984a0712 100644 --- a/pyomo/gdp/plugins/cuttingplane.py +++ b/pyomo/gdp/plugins/cuttingplane.py @@ -17,42 +17,68 @@ """ from __future__ import division -from pyomo.common.config import (ConfigBlock, ConfigValue, - NonNegativeFloat, PositiveInt, In) +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + NonNegativeFloat, + PositiveInt, + In, +) from pyomo.common.modeling import unique_component_name -from pyomo.core import ( Any, Block, Constraint, Objective, Param, Var, - SortComponents, Transformation, TransformationFactory, - value, NonNegativeIntegers, Reals, NonNegativeReals, - Suffix, ComponentMap ) +from pyomo.core import ( + Any, + Block, + Constraint, + Objective, + Param, + Var, + SortComponents, + Transformation, + TransformationFactory, + value, + NonNegativeIntegers, + Reals, + NonNegativeReals, + Suffix, + ComponentMap, +) from pyomo.core.expr import differentiate from pyomo.common.collections import ComponentSet from pyomo.opt import SolverFactory from pyomo.repn import generate_standard_repn from pyomo.gdp import Disjunct, Disjunction, GDP_Error -from pyomo.gdp.util import ( verify_successful_solve, NORMAL, - clone_without_expression_components ) +from pyomo.gdp.util import ( + verify_successful_solve, + NORMAL, + clone_without_expression_components, +) -from pyomo.contrib.fme.fourier_motzkin_elimination import \ - Fourier_Motzkin_Elimination_Transformation +from pyomo.contrib.fme.fourier_motzkin_elimination import ( + Fourier_Motzkin_Elimination_Transformation, +) import logging logger = logging.getLogger('pyomo.gdp.cuttingplane') + def do_not_tighten(m): return m + def _get_constraint_exprs(constraints, hull_to_bigm_map): - """Returns a list of expressions which are constrain.expr translated + """Returns a list of expressions which are constrain.expr translated into the bigm space, for each constraint in constraints. """ cuts = [] for cons in constraints: - cuts.append(clone_without_expression_components( - cons.expr, substitute=hull_to_bigm_map)) + cuts.append( + clone_without_expression_components(cons.expr, substitute=hull_to_bigm_map) + ) return cuts + def _constraint_tight(model, constraint, TOL): """ Returns a list [a,b] where a is -1 if the lower bound is tight or @@ -74,24 +100,22 @@ def _constraint_tight(model, constraint, TOL): return ans + def _get_linear_approximation_expr(normal_vec, point): """Returns constraint linearly approximating constraint normal to normal_vec at point""" body = 0 for coef, v in zip(point, normal_vec): - body -= coef*v - return body >= -sum(normal_vec[idx]*v.value for (idx, v) in - enumerate(point)) + body -= coef * v + return body >= -sum(normal_vec[idx] * v.value for (idx, v) in enumerate(point)) -def _precompute_potentially_useful_constraints(transBlock_rHull, - disaggregated_vars): + +def _precompute_potentially_useful_constraints(transBlock_rHull, disaggregated_vars): instance_rHull = transBlock_rHull.model() constraints = transBlock_rHull.constraints_for_FME = [] for constraint in instance_rHull.component_data_objects( - Constraint, - active=True, - descend_into=Block, - sort=SortComponents.deterministic): + Constraint, active=True, descend_into=Block, sort=SortComponents.deterministic + ): # we don't care about anything that does not involve at least one # disaggregated variable. repn = generate_standard_repn(constraint.body) @@ -101,31 +125,41 @@ def _precompute_potentially_useful_constraints(transBlock_rHull, constraints.append(constraint) break -def create_cuts_fme(transBlock_rHull, var_info, hull_to_bigm_map, - rBigM_linear_constraints, rHull_vars, disaggregated_vars, - norm, cut_threshold, zero_tolerance, integer_arithmetic, - constraint_tolerance): + +def create_cuts_fme( + transBlock_rHull, + var_info, + hull_to_bigm_map, + rBigM_linear_constraints, + rHull_vars, + disaggregated_vars, + norm, + cut_threshold, + zero_tolerance, + integer_arithmetic, + constraint_tolerance, +): """Returns a cut which removes x* from the relaxed bigm feasible region. - Finds all the constraints which are tight at xhat (assumed to be the + Finds all the constraints which are tight at xhat (assumed to be the solution currently in instance_rHull), and calculates a composite normal vector by summing the vectors normal to each of these constraints. Then Fourier-Motzkin elimination is used to project the disaggregated variables - out of the polyhedron formed by the composite normal and the collection + out of the polyhedron formed by the composite normal and the collection of tight constraints. This results in multiple cuts, of which we select one that cuts of x* by the greatest margin, as long as that margin is - more than cut_threshold. If no cut satisfies the margin specified by + more than cut_threshold. If no cut satisfies the margin specified by cut_threshold, we return None. Parameters ----------- - transBlock_rHull: transformation blcok on relaxed hull instance + transBlock_rHull: transformation block on relaxed hull instance var_info: List of tuples (rBigM_var, rHull_var, xstar_param) - hull_to_bigm_map: For expression substition, maps id(hull_var) to - coresponding bigm var + hull_to_bigm_map: For expression substitution, maps id(hull_var) to + corresponding bigm var rBigM_linear_constraints: list of linear constraints in relaxed bigM rHull_vars: list of all variables in relaxed hull - disaggregated_vars: ComponentSet of disaggregated variables in hull + disaggregated_vars: ComponentSet of disaggregated variables in hull reformulation norm: norm used in the separation problem cut_threshold: Amount x* needs to be infeasible in generated cut in order @@ -133,9 +167,9 @@ def create_cuts_fme(transBlock_rHull, var_info, hull_to_bigm_map, zero_tolerance: Tolerance at which a float will be treated as 0 during Fourier-Motzkin elimination integer_arithmetic: boolean, whether or not to require Fourier-Motzkin - Elimination does integer arithmetic. Only possible + Elimination does integer arithmetic. Only possible when all data is integer. - constraint_tolerance: Tolerance at which we will consider a constraint + constraint_tolerance: Tolerance at which we will consider a constraint tight. """ instance_rHull = transBlock_rHull.model() @@ -143,31 +177,31 @@ def create_cuts_fme(transBlock_rHull, var_info, hull_to_bigm_map, # ever be interesting: Everything that involves at least one disaggregated # variable. if transBlock_rHull.component("constraints_for_FME") is None: - _precompute_potentially_useful_constraints( transBlock_rHull, - disaggregated_vars) + _precompute_potentially_useful_constraints(transBlock_rHull, disaggregated_vars) tight_constraints = Block() - conslist = tight_constraints.constraints = Constraint( - NonNegativeIntegers) + conslist = tight_constraints.constraints = Constraint(NonNegativeIntegers) conslist.construct() something_interesting = False for constraint in transBlock_rHull.constraints_for_FME: - multipliers = _constraint_tight(instance_rHull, constraint, - constraint_tolerance) + multipliers = _constraint_tight( + instance_rHull, constraint, constraint_tolerance + ) for multiplier in multipliers: if multiplier: something_interesting = True f = constraint.body firstDerivs = differentiate(f, wrt_list=rHull_vars) - normal_vec = [multiplier*value(_) for _ in firstDerivs] + normal_vec = [multiplier * value(_) for _ in firstDerivs] # check if constraint is linear if f.polynomial_degree() == 1: conslist[len(conslist)] = constraint.expr - else: + else: # we will use the linear approximation of this constraint at # x_hat conslist[len(conslist)] = _get_linear_approximation_expr( - normal_vec, rHull_vars) + normal_vec, rHull_vars + ) # NOTE: we now have all the tight Constraints (in the pyomo sense of the # word "Constraint"), but we are missing some variable bounds. The ones for @@ -181,14 +215,17 @@ def create_cuts_fme(transBlock_rHull, var_info, hull_to_bigm_map, return None tight_constraints.construct() - logger.info("Calling FME transformation on %s constraints to eliminate" - " %s variables" % (len(tight_constraints.constraints), - len(disaggregated_vars))) - TransformationFactory('contrib.fourier_motzkin_elimination').\ - apply_to(tight_constraints, vars_to_eliminate=disaggregated_vars, - zero_tolerance=zero_tolerance, - do_integer_arithmetic=integer_arithmetic, - projected_constraints_name="fme_constraints") + logger.info( + "Calling FME transformation on %s constraints to eliminate" + " %s variables" % (len(tight_constraints.constraints), len(disaggregated_vars)) + ) + TransformationFactory('contrib.fourier_motzkin_elimination').apply_to( + tight_constraints, + vars_to_eliminate=disaggregated_vars, + zero_tolerance=zero_tolerance, + do_integer_arithmetic=integer_arithmetic, + projected_constraints_name="fme_constraints", + ) fme_results = tight_constraints.fme_constraints projected_constraints = [cons for i, cons in fme_results.items()] @@ -212,7 +249,7 @@ def create_cuts_fme(transBlock_rHull, var_info, hull_to_bigm_map, logger.info("FME:\t Doesn't cut off x*") continue # we have found a constraint which cuts of x* by some convincing amount - # and is not already in rBigM. + # and is not already in rBigM. cuts_to_keep.append(i) # We know cut is lb <= expr and that it's violated assert len(cut.args) == 2 @@ -232,14 +269,23 @@ def create_cuts_fme(transBlock_rHull, var_info, hull_to_bigm_map, return None -def create_cuts_normal_vector(transBlock_rHull, var_info, hull_to_bigm_map, - rBigM_linear_constraints, rHull_vars, - disaggregated_vars, norm, cut_threshold, - zero_tolerance, integer_arithmetic, - constraint_tolerance): + +def create_cuts_normal_vector( + transBlock_rHull, + var_info, + hull_to_bigm_map, + rBigM_linear_constraints, + rHull_vars, + disaggregated_vars, + norm, + cut_threshold, + zero_tolerance, + integer_arithmetic, + constraint_tolerance, +): """Returns a cut which removes x* from the relaxed bigm feasible region. - Ignores all parameters except var_info and cut_threshold, and constructs + Ignores all parameters except var_info and cut_threshold, and constructs a cut at x_hat, the projection of the relaxed bigM solution x* onto the hull, which is perpendicular to the vector from x* to x_hat. @@ -249,15 +295,15 @@ def create_cuts_normal_vector(transBlock_rHull, var_info, hull_to_bigm_map, Parameters ----------- - transBlock_rHull: transformation blcok on relaxed hull instance. Ignored by + transBlock_rHull: transformation block on relaxed hull instance. Ignored by this callback. var_info: List of tuples (rBigM_var, rHull_var, xstar_param) - hull_to_bigm_map: For expression substition, maps id(hull_var) to - coresponding bigm var. Ignored by this callback + hull_to_bigm_map: For expression substitution, maps id(hull_var) to + corresponding bigm var. Ignored by this callback rBigM_linear_constraints: list of linear constraints in relaxed bigM. Ignored by this callback. rHull_vars: list of all variables in relaxed hull. Ignored by this callback. - disaggregated_vars: ComponentSet of disaggregated variables in hull + disaggregated_vars: ComponentSet of disaggregated variables in hull reformulation. Ignored by this callback norm: The norm used in the separation problem, will be used to calculate the subgradient used to generate the cut @@ -273,37 +319,42 @@ def create_cuts_normal_vector(transBlock_rHull, var_info, hull_to_bigm_map, cutexpr = 0 if norm == 2: for x_rbigm, x_hull, x_star in var_info: - cutexpr += (x_hull.value - x_star.value)*(x_rbigm - x_hull.value) + cutexpr += (x_hull.value - x_star.value) * (x_rbigm - x_hull.value) elif norm == float('inf'): duals = transBlock_rHull.model().dual if len(duals) == 0: - raise GDP_Error("No dual information in the separation problem! " - "To use the infinity norm and the " - "create_cuts_normal_vector method, you must use " - "a solver which provides dual information.") + raise GDP_Error( + "No dual information in the separation problem! " + "To use the infinity norm and the " + "create_cuts_normal_vector method, you must use " + "a solver which provides dual information." + ) i = 0 for x_rbigm, x_hull, x_star in var_info: # ESJ: We wrote this so duals will be nonnegative mu_plus = value(duals[transBlock_rHull.inf_norm_linearization[i]]) - mu_minus = value(duals[transBlock_rHull.inf_norm_linearization[i+1]]) + mu_minus = value(duals[transBlock_rHull.inf_norm_linearization[i + 1]]) assert mu_plus >= 0 assert mu_minus >= 0 - cutexpr += (mu_plus - mu_minus)*(x_rbigm - x_hull.value) + cutexpr += (mu_plus - mu_minus) * (x_rbigm - x_hull.value) i += 2 # make sure we're cutting off x* by enough. if value(cutexpr) < -cut_threshold: return [cutexpr >= 0] - logger.warning("Generated cut did not remove relaxed BigM solution by more " - "than the specified threshold of %s. Stopping cut " - "generation." % cut_threshold) + logger.warning( + "Generated cut did not remove relaxed BigM solution by more " + "than the specified threshold of %s. Stopping cut " + "generation." % cut_threshold + ) return None -def back_off_constraint_with_calculated_cut_violation(cut, transBlock_rHull, - bigm_to_hull_map, opt, - stream_solver, TOL): + +def back_off_constraint_with_calculated_cut_violation( + cut, transBlock_rHull, bigm_to_hull_map, opt, stream_solver, TOL +): """Calculates the maximum violation of cut subject to the relaxed hull - constraints. Increases this violation by TOL (to account for optimality + constraints. Increases this violation by TOL (to account for optimality tolerance in solving the problem), and, if it finds that cut can be violated up to this tolerance, makes it more conservative such that it no longer can. @@ -311,7 +362,7 @@ def back_off_constraint_with_calculated_cut_violation(cut, transBlock_rHull, ---------- cut: The cut to be made more conservative, a Constraint transBlock_rHull: the relaxed hull model's transformation Block - bigm_to_hull_map: Dictionary mapping ids of bigM variables to the + bigm_to_hull_map: Dictionary mapping ids of bigM variables to the corresponding variables on the relaxed hull instance opt: SolverFactory object for solving the maximum violation problem stream_solver: Whether or not to set tee=True while solving the maximum @@ -328,15 +379,17 @@ def back_off_constraint_with_calculated_cut_violation(cut, transBlock_rHull, transBlock_rHull.separation_objective.deactivate() transBlock_rHull.infeasibility_objective = Objective( - expr=clone_without_expression_components(cut.body, - substitute=bigm_to_hull_map)) + expr=clone_without_expression_components(cut.body, substitute=bigm_to_hull_map) + ) results = opt.solve(instance_rHull, tee=stream_solver, load_solutions=False) if verify_successful_solve(results) is not NORMAL: - logger.warning("Problem to determine how much to " - "back off the new cut " - "did not solve normally. Leaving the constraint as is, " - "which could lead to numerical trouble%s" % (results,)) + logger.warning( + "Problem to determine how much to " + "back off the new cut " + "did not solve normally. Leaving the constraint as is, " + "which could lead to numerical trouble%s" % (results,) + ) # restore the objective transBlock_rHull.del_component(transBlock_rHull.infeasibility_objective) transBlock_rHull.separation_objective.activate() @@ -352,9 +405,10 @@ def back_off_constraint_with_calculated_cut_violation(cut, transBlock_rHull, transBlock_rHull.del_component(transBlock_rHull.infeasibility_objective) transBlock_rHull.separation_objective.activate() -def back_off_constraint_by_fixed_tolerance(cut, transBlock_rHull, - bigm_to_hull_map, opt, stream_solver, - TOL): + +def back_off_constraint_by_fixed_tolerance( + cut, transBlock_rHull, bigm_to_hull_map, opt, stream_solver, TOL +): """Makes cut more conservative by absolute tolerance TOL Parameters @@ -362,7 +416,7 @@ def back_off_constraint_by_fixed_tolerance(cut, transBlock_rHull, cut: the cut to be made more conservative, a Constraint transBlock_rHull: the relaxed hull model's transformation Block. Ignored by this callback - bigm_to_hull_map: Dictionary mapping ids of bigM variables to the + bigm_to_hull_map: Dictionary mapping ids of bigM variables to the corresponding variables on the relaxed hull instance. Ignored by this callback. opt: SolverFactory object. Ignored by this callback @@ -372,10 +426,13 @@ def back_off_constraint_by_fixed_tolerance(cut, transBlock_rHull, """ cut._body += TOL -@TransformationFactory.register('gdp.cuttingplane', - doc="Relaxes a linear disjunctive model by " - "adding cuts from convex hull to Big-M " - "reformulation.") + +@TransformationFactory.register( + 'gdp.cuttingplane', + doc="Relaxes a linear disjunctive model by " + "adding cuts from convex hull to Big-M " + "reformulation.", +) class CuttingPlane_Transformation(Transformation): """Relax convex disjunctive model by forming the bigm relaxation and then iteratively adding cuts from the hull relaxation (or the hull relaxation @@ -386,7 +443,7 @@ class CuttingPlane_Transformation(Transformation): after transformation will very likely result in an invalid model. This transformation accepts the following keyword arguments: - + Parameters ---------- solver : Solver name (as string) to use to solve relaxed BigM and separation @@ -397,21 +454,21 @@ class CuttingPlane_Transformation(Transformation): cuts_name : Optional name for the IndexedConstraint containing the projected cuts (must be a unique name with respect to the instance) minimum_improvement_threshold : Stopping criterion based on improvement in - Big-M relaxation. This is the minimum + Big-M relaxation. This is the minimum difference in relaxed BigM objective values between consecutive iterations - separation_objective_threshold : Stopping criterion based on separation - objective. If separation objective is not - at least this large, cut generation will + separation_objective_threshold : Stopping criterion based on separation + objective. If separation objective is not + at least this large, cut generation will terminate. - cut_filtering_threshold : Stopping criterion based on effectiveness of the - generated cut: This is the amount by which - a cut must be violated at the relaxed bigM + cut_filtering_threshold : Stopping criterion based on effectiveness of the + generated cut: This is the amount by which + a cut must be violated at the relaxed bigM solution in order to be added to the bigM model max_number_of_cuts : The maximum number of cuts to add to the big-M model norm : norm to use in the objective of the separation problem - tighten_relaxation : callback to modify the GDP model before the hull - relaxation is taken (e.g. could be used to perform + tighten_relaxation : callback to modify the GDP model before the hull + relaxation is taken (e.g. could be used to perform basic steps) create_cuts : callback to create cuts using the solved relaxed bigM and hull problems @@ -428,79 +485,92 @@ class CuttingPlane_Transformation(Transformation): By default, the callbacks will be set such that the algorithm performed is as presented in [1], but with an additional post-processing procedure to - reduce numerical error, which calculates the maximum violation of the cut - subject to the relaxed hull constraints, and then pads the constraint by + reduce numerical error, which calculates the maximum violation of the cut + subject to the relaxed hull constraints, and then pads the constraint by this violation plus an additional user-specified tolerance. In addition, the create_cuts_fme function provides an (exponential time) - method of generating cuts which reduces numerical error (and can eliminate - it if all data is integer). It collects the hull constraints which are - tight at the solution of the separation problem. It creates a cut in the - extended space perpendicular to a composite normal vector created by - summing the directions normal to these constraints. It then performs + method of generating cuts which reduces numerical error (and can eliminate + it if all data is integer). It collects the hull constraints which are + tight at the solution of the separation problem. It creates a cut in the + extended space perpendicular to a composite normal vector created by + summing the directions normal to these constraints. It then performs fourier-motzkin elimination on the collection of constraints and the cut to project out the disaggregated variables. The resulting constraint which is most violated by the relaxed bigM solution is then returned. References ---------- - [1] Sawaya, N. W., Grossmann, I. E. (2005). A cutting plane method for + [1] Sawaya, N. W., Grossmann, I. E. (2005). A cutting plane method for solving linear generalized disjunctive programming problems. Computers - and Chemical Engineering, 29, 1891-1913 + and Chemical Engineering, 29, 1891-1913 """ CONFIG = ConfigBlock("gdp.cuttingplane") - CONFIG.declare('solver', ConfigValue( - default='ipopt', - domain=str, - description="""Solver to use for relaxed BigM problem and the separation + CONFIG.declare( + 'solver', + ConfigValue( + default='ipopt', + domain=str, + description="""Solver to use for relaxed BigM problem and the separation problem""", - doc=""" + doc=""" This specifies the solver which will be used to solve LP relaxation of the BigM problem and the separation problem. Note that this solver must be able to handle a quadratic objective because of the separation problem. - """ - )) - CONFIG.declare('minimum_improvement_threshold', ConfigValue( - default=0.01, - domain=NonNegativeFloat, - description="Threshold value for difference in relaxed bigM problem " - "objectives used to decide when to stop adding cuts", - doc=""" + """, + ), + ) + CONFIG.declare( + 'minimum_improvement_threshold', + ConfigValue( + default=0.01, + domain=NonNegativeFloat, + description="Threshold value for difference in relaxed bigM problem " + "objectives used to decide when to stop adding cuts", + doc=""" If the difference between the objectives in two consecutive iterations is less than this value, the algorithm terminates without adding the cut generated in the last iteration. - """ - )) - CONFIG.declare('separation_objective_threshold', ConfigValue( - default=0.01, - domain=NonNegativeFloat, - description="Threshold value used to decide when to stop adding cuts: " - "If separation problem objective is not at least this quantity, cut " - "generation will terminate.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'separation_objective_threshold', + ConfigValue( + default=0.01, + domain=NonNegativeFloat, + description="Threshold value used to decide when to stop adding cuts: " + "If separation problem objective is not at least this quantity, cut " + "generation will terminate.", + doc=""" If the separation problem objective (distance between relaxed bigM solution and its projection onto the relaxed hull feasible region) does not exceed this threshold, the algorithm will terminate. - """ - )) - CONFIG.declare('max_number_of_cuts', ConfigValue( - default=100, - domain=PositiveInt, - description="The maximum number of cuts to add before the algorithm " - "terminates.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'max_number_of_cuts', + ConfigValue( + default=100, + domain=PositiveInt, + description="The maximum number of cuts to add before the algorithm " + "terminates.", + doc=""" If the algorithm does not terminate due to another criterion first, cut generation will stop after adding this many cuts. - """ - )) - CONFIG.declare('norm', ConfigValue( - default=2, - domain=In([2, float('inf')]), - description="Norm to use in the separation problem: 2, or " - "float('inf')", - doc=""" + """, + ), + ) + CONFIG.declare( + 'norm', + ConfigValue( + default=2, + domain=In([2, float('inf')]), + description="Norm to use in the separation problem: 2, or float('inf')", + doc=""" Norm used to calculate distance in the objective of the separation problem which finds the nearest point on the hull relaxation region to the current solution of the relaxed bigm problem. @@ -508,39 +578,51 @@ class CuttingPlane_Transformation(Transformation): Supported norms are the Euclidean norm (specify 2) and the infinity norm (specify float('inf')). Note that the first makes the separation problem objective quadratic and the latter makes it linear. - """ - )) - CONFIG.declare('verbose', ConfigValue( - default=False, - domain=bool, - description="Flag to enable verbose output", - doc=""" + """, + ), + ) + CONFIG.declare( + 'verbose', + ConfigValue( + default=False, + domain=bool, + description="Flag to enable verbose output", + doc=""" If True, prints subproblem solutions, as well as potential and added cuts during algorithm. If False, only the relaxed BigM objective and minimal information about cuts is logged. - """ - )) - CONFIG.declare('stream_solver', ConfigValue( - default=False, - domain=bool, - description="""If true, sets tee=True for every solve performed over - "the course of the algorithm""" - )) - CONFIG.declare('solver_options', ConfigBlock( - implicit=True, - description="Dictionary of solver options", - doc=""" + """, + ), + ) + CONFIG.declare( + 'stream_solver', + ConfigValue( + default=False, + domain=bool, + description="""If true, sets tee=True for every solve performed over + "the course of the algorithm""", + ), + ) + CONFIG.declare( + 'solver_options', + ConfigBlock( + implicit=True, + description="Dictionary of solver options", + doc=""" Dictionary of solver options that will be set for the solver for both the relaxed BigM and separation problem solves. - """ - )) - CONFIG.declare('tighten_relaxation', ConfigValue( - default=do_not_tighten, - description="Callback which takes the GDP formulation and returns a " - "GDP formulation with a tighter hull relaxation", - doc=""" + """, + ), + ) + CONFIG.declare( + 'tighten_relaxation', + ConfigValue( + default=do_not_tighten, + description="Callback which takes the GDP formulation and returns a " + "GDP formulation with a tighter hull relaxation", + doc=""" Function which accepts the GDP formulation of the problem and returns a GDP formulation which the transformation will then take the hull reformulation of. @@ -548,24 +630,27 @@ class CuttingPlane_Transformation(Transformation): Most typically, this callback would be used to apply basic steps before taking the hull reformulation, but anything which tightens the GDP can be performed here. - """ - )) - CONFIG.declare('create_cuts', ConfigValue( - default=create_cuts_normal_vector, - description="Callback which generates a list of cuts, given the solved " - "relaxed bigM and relaxed hull solutions. If no cuts can be " - "generated, returns None", - doc=""" + """, + ), + ) + CONFIG.declare( + 'create_cuts', + ConfigValue( + default=create_cuts_normal_vector, + description="Callback which generates a list of cuts, given the solved " + "relaxed bigM and relaxed hull solutions. If no cuts can be " + "generated, returns None", + doc=""" Callback to generate cuts to be added to the bigM problem based on solutions to the relaxed bigM problem and the separation problem. Arguments --------- transBlock_rBigm: transformation block on relaxed bigM instance - transBlock_rHull: transformation blcok on relaxed hull instance + transBlock_rHull: transformation block on relaxed hull instance var_info: List of tuples (rBigM_var, rHull_var, xstar_param) - hull_to_bigm_map: For expression substition, maps id(hull_var) to - coresponding bigm var + hull_to_bigm_map: For expression substitution, maps id(hull_var) to + corresponding bigm var rBigM_linear_constraints: list of linear constraints in relaxed bigM rHull_vars: list of all variables in relaxed hull disaggregated_vars: ComponentSet of disaggregated variables in hull @@ -578,14 +663,17 @@ class CuttingPlane_Transformation(Transformation): ------- list of cuts to be added to bigM problem (and relaxed bigM problem), represented as expressions using variables from the bigM model - """ - )) - CONFIG.declare('post_process_cut', ConfigValue( - default=back_off_constraint_with_calculated_cut_violation, - description="Callback which takes a generated cut and post processes " - "it, presumably to back it off in the case of numerical error. Set to " - "None if not post-processing is desired.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'post_process_cut', + ConfigValue( + default=back_off_constraint_with_calculated_cut_violation, + description="Callback which takes a generated cut and post processes " + "it, presumably to back it off in the case of numerical error. Set to " + "None if not post-processing is desired.", + doc=""" Callback to adjust a cut returned from create_cuts before adding it to the model, presumably to make it more conservative in case of numerical error. @@ -603,62 +691,77 @@ class CuttingPlane_Transformation(Transformation): Returns ------- None, modifies the cut in place - """ - )) + """, + ), + ) # back off problem tolerance (on top of the solver's (sometimes)) - CONFIG.declare('back_off_problem_tolerance', ConfigValue( - default=1e-8, - domain=NonNegativeFloat, - description="Tolerance to pass to the post_process_cut callback.", - doc=""" + CONFIG.declare( + 'back_off_problem_tolerance', + ConfigValue( + default=1e-8, + domain=NonNegativeFloat, + description="Tolerance to pass to the post_process_cut callback.", + doc=""" Tolerance passed to the post_process_cut callback. Depending on the callback, different values could make sense, but something on the order of the solver's optimality or constraint tolerances is appropriate. - """ - )) - CONFIG.declare('cut_filtering_threshold', ConfigValue( - default=0.001, - domain=NonNegativeFloat, - description="Tolerance used to decide if a cut removes x* from the " - "relaxed BigM problem by enough to be added to the bigM problem.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'cut_filtering_threshold', + ConfigValue( + default=0.001, + domain=NonNegativeFloat, + description="Tolerance used to decide if a cut removes x* from the " + "relaxed BigM problem by enough to be added to the bigM problem.", + doc=""" Absolute tolerance used to decide whether to keep a cut. We require that, when evaluated at x* (the relaxed BigM optimal solution), the cut be infeasible by at least this tolerance. - """ - )) - CONFIG.declare('zero_tolerance', ConfigValue( - default=1e-9, - domain=NonNegativeFloat, - description="Tolerance at which floats are assumed to be 0 while " - "performing Fourier-Motzkin elimination", - doc=""" + """, + ), + ) + CONFIG.declare( + 'zero_tolerance', + ConfigValue( + default=1e-9, + domain=NonNegativeFloat, + description="Tolerance at which floats are assumed to be 0 while " + "performing Fourier-Motzkin elimination", + doc=""" Only relevant when create_cuts=create_cuts_fme, this sets the zero_tolerance option for the Fourier-Motzkin elimination transformation. - """ - )) - CONFIG.declare('do_integer_arithmetic', ConfigValue( - default=False, - domain=bool, - description="Only relevant if using Fourier-Motzkin Elimination (FME) " - "and if all problem data is integer, requires FME transformation to " - "perform integer arithmetic to eliminate numerical error.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'do_integer_arithmetic', + ConfigValue( + default=False, + domain=bool, + description="Only relevant if using Fourier-Motzkin Elimination (FME) " + "and if all problem data is integer, requires FME transformation to " + "perform integer arithmetic to eliminate numerical error.", + doc=""" Only relevant when create_cuts=create_cuts_fme and if all problem data is integer, this sets the do_integer_arithmetic flag to true for the FME transformation, meaning that the projection to the big-M space can be done with exact precision. - """ - )) - CONFIG.declare('cuts_name', ConfigValue( - default=None, - domain=str, - description="Optional name for the IndexedConstraint containing the " - "projected cuts. Must be a unique name with respect to the " - "instance.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'cuts_name', + ConfigValue( + default=None, + domain=str, + description="Optional name for the IndexedConstraint containing the " + "projected cuts. Must be a unique name with respect to the " + "instance.", + doc=""" Optional name for the IndexedConstraint containing the projected constraints. If not specified, the cuts will be stored on a private block created by the transformation, so if you want access @@ -666,22 +769,27 @@ class CuttingPlane_Transformation(Transformation): Must be a string which is a unique component name with respect to the Block on which the transformation is called. - """ - )) - CONFIG.declare('tight_constraint_tolerance', ConfigValue( - default=1e-6, # Gurobi constraint tolerance - domain=NonNegativeFloat, - description="Tolerance at which a constraint is considered tight for " - "the Fourier-Motzkin cut generation procedure.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'tight_constraint_tolerance', + ConfigValue( + default=1e-6, # Gurobi constraint tolerance + domain=NonNegativeFloat, + description="Tolerance at which a constraint is considered tight for " + "the Fourier-Motzkin cut generation procedure.", + doc=""" For a constraint a^Tx <= b, the Fourier-Motzkin cut generation procedure will consider the constraint tight (and add it to the set of constraints being projected) when a^Tx - b is less than this tolerance. It is recommended to set this tolerance to the constraint tolerance of the solver being used. - """ - )) + """, + ), + ) + def __init__(self): super(CuttingPlane_Transformation, self).__init__() @@ -700,18 +808,22 @@ def _apply_to(self, instance, bigM=None, **kwds): else: self.verbose = False - (instance_rBigM, cuts_obj, instance_rHull, var_info, - transBlockName) = self._setup_subproblems( instance, bigM, - self._config.\ - tighten_relaxation) + ( + instance_rBigM, + cuts_obj, + instance_rHull, + var_info, + transBlockName, + ) = self._setup_subproblems(instance, bigM, self._config.tighten_relaxation) - self._generate_cuttingplanes( instance_rBigM, cuts_obj, - instance_rHull, var_info, - transBlockName) + self._generate_cuttingplanes( + instance_rBigM, cuts_obj, instance_rHull, var_info, transBlockName + ) # restore integrality - TransformationFactory('core.relax_integer_vars').apply_to(instance, - undo=True) + TransformationFactory('core.relax_integer_vars').apply_to( + instance, undo=True + ) finally: del self._config del self.verbose @@ -725,10 +837,13 @@ def _setup_subproblems(self, instance, bigM, tighten_relaxation_callback): # We store a list of all vars so that we can efficiently # generate maps among the subproblems - transBlock.all_vars = list(v for v in instance.component_data_objects( - Var, - descend_into=(Block, Disjunct), - sort=SortComponents.deterministic) if not v.is_fixed()) + transBlock.all_vars = list( + v + for v in instance.component_data_objects( + Var, descend_into=(Block, Disjunct), sort=SortComponents.deterministic + ) + if not v.is_fixed() + ) # we'll store all the cuts we add together nm = self._config.cuts_name @@ -737,9 +852,11 @@ def _setup_subproblems(self, instance, bigM, tighten_relaxation_callback): else: # check that this really is an available name if instance.component(nm) is not None: - raise GDP_Error("cuts_name was specified as '%s', but this is " - "already a component on the instance! Please " - "specify a unique name." % nm) + raise GDP_Error( + "cuts_name was specified as '%s', but this is " + "already a component on the instance! Please " + "specify a unique name." % nm + ) instance.add_component(nm, Constraint(NonNegativeIntegers)) cuts_obj = instance.component(nm) @@ -754,8 +871,7 @@ def _setup_subproblems(self, instance, bigM, tighten_relaxation_callback): # tighter_instance = tighten_relaxation_callback(instance) instance_rHull = hullRelaxation.create_using(tighter_instance) - relaxIntegrality.apply_to(instance_rHull, - transform_deactivated_blocks=True) + relaxIntegrality.apply_to(instance_rHull, transform_deactivated_blocks=True) # # Reformulate the instance using the BigM relaxation (this will @@ -777,8 +893,9 @@ def _setup_subproblems(self, instance, bigM, tighten_relaxation_callback): # this will hold the solution to rbigm each time we solve it. We # add it to the transformation block so that we don't have to # worry about name conflicts. - transBlock_rHull.xstar = Param( range(len(transBlock.all_vars)), - mutable=True, default=0, within=Reals) + transBlock_rHull.xstar = Param( + range(len(transBlock.all_vars)), mutable=True, default=0, within=Reals + ) # we will add a block that we will deactivate to use to store the # extended space cuts. We never need to solve these, but we need them to @@ -792,10 +909,13 @@ def _setup_subproblems(self, instance, bigM, tighten_relaxation_callback): # instances and the xstar parameter. # var_info = [ - (v, # this is the bigM variable - transBlock_rHull.all_vars[i], - transBlock_rHull.xstar[i]) - for i,v in enumerate(transBlock.all_vars)] + ( + v, # this is the bigM variable + transBlock_rHull.all_vars[i], + transBlock_rHull.xstar[i], + ) + for i, v in enumerate(transBlock.all_vars) + ] # NOTE: we wait to add the separation objective to the rHull problem # because it is best to do it in the first iteration, so that we can @@ -806,37 +926,37 @@ def _setup_subproblems(self, instance, bigM, tighten_relaxation_callback): # this is the map that I need to translate my projected cuts and add # them to bigM def _create_hull_to_bigm_substitution_map(self, var_info): - return dict((id(var_info[i][1]), var_info[i][0]) for i in - range(len(var_info))) + return dict((id(var_info[i][1]), var_info[i][0]) for i in range(len(var_info))) # this map is needed to solve the back-off problem for post-processing - def _create_bigm_to_hull_substition_map(self, var_info): - return dict((id(var_info[i][0]), var_info[i][1]) for i in - range(len(var_info))) + def _create_bigm_to_hull_substitution_map(self, var_info): + return dict((id(var_info[i][0]), var_info[i][1]) for i in range(len(var_info))) def _get_disaggregated_vars(self, hull): disaggregatedVars = ComponentSet() - for disjunction in hull.component_data_objects( Disjunction, - descend_into=(Disjunct, - Block)): + for disjunction in hull.component_data_objects( + Disjunction, descend_into=(Disjunct, Block) + ): for disjunct in disjunction.disjuncts: - if disjunct.transformation_block is not None: - transBlock = disjunct.transformation_block() - for v in transBlock.disaggregatedVars.\ - component_data_objects(Var): + transBlock = disjunct.transformation_block + if transBlock is not None: + for v in transBlock.disaggregatedVars.component_data_objects(Var): disaggregatedVars.add(v) - + return disaggregatedVars def _get_rBigM_obj_and_constraints(self, instance_rBigM): # We try to grab the first active objective. If there is more # than one, the writer will yell when we try to solve below. If # there are 0, we will yell here. - rBigM_obj = next(instance_rBigM.component_data_objects( - Objective, active=True), None) + rBigM_obj = next( + instance_rBigM.component_data_objects(Objective, active=True), None + ) if rBigM_obj is None: - raise GDP_Error("Cannot apply cutting planes transformation " - "without an active objective in the model!") + raise GDP_Error( + "Cannot apply cutting planes transformation " + "without an active objective in the model!" + ) # # Collect all of the linear constraints that are in the rBigM @@ -848,10 +968,11 @@ def _get_rBigM_obj_and_constraints(self, instance_rBigM): fme = TransformationFactory('contrib.fourier_motzkin_elimination') rBigM_linear_constraints = [] for cons in instance_rBigM.component_data_objects( - Constraint, - descend_into=Block, - sort=SortComponents.deterministic, - active=True): + Constraint, + descend_into=Block, + sort=SortComponents.deterministic, + active=True, + ): body = cons.body if body.polynomial_degree() != 1: # We will never get a nonlinear constraint out of FME, so we @@ -868,9 +989,9 @@ def _get_rBigM_obj_and_constraints(self, instance_rBigM): return rBigM_obj, rBigM_linear_constraints - def _generate_cuttingplanes( self, instance_rBigM, cuts_obj, instance_rHull, - var_info, transBlockName): - + def _generate_cuttingplanes( + self, instance_rBigM, cuts_obj, instance_rHull, var_info, transBlockName + ): opt = SolverFactory(self._config.solver) stream_solver = self._config.stream_solver opt.options = dict(self._config.solver_options) @@ -882,32 +1003,35 @@ def _generate_cuttingplanes( self, instance_rBigM, cuts_obj, instance_rHull, transBlock_rHull = instance_rHull.component(transBlockName) - rBigM_obj, rBigM_linear_constraints = self.\ - _get_rBigM_obj_and_constraints( - instance_rBigM) + rBigM_obj, rBigM_linear_constraints = self._get_rBigM_obj_and_constraints( + instance_rBigM + ) # Get list of all variables in the rHull model which we will use when # calculating the composite normal vector. - rHull_vars = [i for i in instance_rHull.component_data_objects( - Var, - descend_into=Block, - sort=SortComponents.deterministic)] + rHull_vars = [ + i + for i in instance_rHull.component_data_objects( + Var, descend_into=Block, sort=SortComponents.deterministic + ) + ] # collect a list of disaggregated variables. - disaggregated_vars = self._get_disaggregated_vars( instance_rHull) + disaggregated_vars = self._get_disaggregated_vars(instance_rHull) hull_to_bigm_map = self._create_hull_to_bigm_substitution_map(var_info) - bigm_to_hull_map = self._create_bigm_to_hull_substition_map(var_info) + bigm_to_hull_map = self._create_bigm_to_hull_substitution_map(var_info) xhat = ComponentMap() - while (improving): + while improving: # solve rBigM, solution is xstar - results = opt.solve(instance_rBigM, tee=stream_solver, - load_solutions=False) + results = opt.solve(instance_rBigM, tee=stream_solver, load_solutions=False) if verify_successful_solve(results) is not NORMAL: - logger.warning("Relaxed BigM subproblem " - "did not solve normally. Stopping cutting " - "plane generation.\n\n%s" % (results,)) + logger.warning( + "Relaxed BigM subproblem " + "did not solve normally. Stopping cutting " + "plane generation.\n\n%s" % (results,) + ) return instance_rBigM.solutions.load_from(results) @@ -922,7 +1046,7 @@ def _generate_cuttingplanes( self, instance_rBigM, cuts_obj, instance_rHull, # if transBlock_rHull.component("separation_objective") is None: self._add_separation_objective(var_info, transBlock_rHull) - + # copy over xstar logger.info("x* is:") for x_rbigm, x_hull, x_star in var_info: @@ -931,9 +1055,10 @@ def _generate_cuttingplanes( self, instance_rBigM, cuts_obj, instance_rHull, # initialize the X values x_hull.set_value(x_rbigm.value, skip_validation=True) if self.verbose: - logger.info("\t%s = %s" % - (x_rbigm.getname(fully_qualified=True), - x_rbigm.value)) + logger.info( + "\t%s = %s" + % (x_rbigm.getname(fully_qualified=True), x_rbigm.value) + ) # compare objectives: check absolute difference close to 0, relative # difference further from 0. @@ -941,20 +1066,26 @@ def _generate_cuttingplanes( self, instance_rBigM, cuts_obj, instance_rHull, improving = True else: obj_diff = prev_obj - rBigM_objVal - improving = ( abs(obj_diff) > epsilon if abs(rBigM_objVal) < 1 - else abs(obj_diff/prev_obj) > epsilon ) + improving = ( + abs(obj_diff) > epsilon + if abs(rBigM_objVal) < 1 + else abs(obj_diff / prev_obj) > epsilon + ) # solve separation problem to get xhat. - results = opt.solve(instance_rHull, tee=stream_solver, - load_solutions=False) + results = opt.solve(instance_rHull, tee=stream_solver, load_solutions=False) if verify_successful_solve(results) is not NORMAL: - logger.warning("Hull separation subproblem " - "did not solve normally. Stopping cutting " - "plane generation.\n\n%s" % (results,)) + logger.warning( + "Hull separation subproblem " + "did not solve normally. Stopping cutting " + "plane generation.\n\n%s" % (results,) + ) return instance_rHull.solutions.load_from(results) - logger.warning("separation problem objective value: %s" % - value(transBlock_rHull.separation_objective)) + logger.warning( + "separation problem objective value: %s" + % value(transBlock_rHull.separation_objective) + ) # save xhat to initialize rBigM with in the next iteration if self.verbose: @@ -962,43 +1093,51 @@ def _generate_cuttingplanes( self, instance_rBigM, cuts_obj, instance_rHull, for x_rbigm, x_hull, x_star in var_info: xhat[x_rbigm] = value(x_hull) if self.verbose: - logger.info("\t%s = %s" % - (x_hull.getname(fully_qualified=True), - x_hull.value)) + logger.info( + "\t%s = %s" + % (x_hull.getname(fully_qualified=True), x_hull.value) + ) # [JDS 19 Dec 18] Note: we check that the separation objective was # significantly nonzero. If it is too close to zero, either the # rBigM solution was in the convex hull, or the separation vector is # so close to zero that the resulting cut is likely to have # numerical issues. - if value(transBlock_rHull.separation_objective) < \ - self._config.separation_objective_threshold: - logger.warning("Separation problem objective below threshold of" - " %s: Stopping cut generation." % - self._config.separation_objective_threshold) + if ( + value(transBlock_rHull.separation_objective) + < self._config.separation_objective_threshold + ): + logger.warning( + "Separation problem objective below threshold of" + " %s: Stopping cut generation." + % self._config.separation_objective_threshold + ) break - cuts = self._config.create_cuts(transBlock_rHull, var_info, - hull_to_bigm_map, - rBigM_linear_constraints, - rHull_vars, disaggregated_vars, - self._config.norm, - self._config.cut_filtering_threshold, - self._config.zero_tolerance, - self._config.do_integer_arithmetic, - self._config.\ - tight_constraint_tolerance) - + cuts = self._config.create_cuts( + transBlock_rHull, + var_info, + hull_to_bigm_map, + rBigM_linear_constraints, + rHull_vars, + disaggregated_vars, + self._config.norm, + self._config.cut_filtering_threshold, + self._config.zero_tolerance, + self._config.do_integer_arithmetic, + self._config.tight_constraint_tolerance, + ) + # We are done if the cut generator couldn't return a valid cut if cuts is None: - logger.warning("Did not generate a valid cut, stopping cut " - "generation.") + logger.warning("Did not generate a valid cut, stopping cut generation.") break if not improving: - logger.warning("Difference in relaxed BigM problem objective " - "values from past two iterations is below " - "threshold of %s: Stopping cut generation." % - epsilon) + logger.warning( + "Difference in relaxed BigM problem objective " + "values from past two iterations is below " + "threshold of %s: Stopping cut generation." % epsilon + ) break for cut in cuts: @@ -1008,14 +1147,18 @@ def _generate_cuttingplanes( self, instance_rBigM, cuts_obj, instance_rHull, cuts_obj.add(cut_number, cut) if self._config.post_process_cut is not None: self._config.post_process_cut( - cuts_obj[cut_number], transBlock_rHull, - bigm_to_hull_map, opt, stream_solver, - self._config.back_off_problem_tolerance) + cuts_obj[cut_number], + transBlock_rHull, + bigm_to_hull_map, + opt, + stream_solver, + self._config.back_off_problem_tolerance, + ) if cut_number + 1 == self._config.max_number_of_cuts: logger.warning("Reached maximum number of cuts.") break - + prev_obj = rBigM_objVal # Initialize rbigm with xhat (for the next iteration) @@ -1026,16 +1169,15 @@ def _add_transformation_block(self, instance): # creates transformation block with a unique name based on name, adds it # to instance, and returns it. transBlockName = unique_component_name( - instance, - '_pyomo_gdp_cuttingplane_transformation') + instance, '_pyomo_gdp_cuttingplane_transformation' + ) transBlock = Block() instance.add_component(transBlockName, transBlock) return transBlockName, transBlock - def _add_separation_objective(self, var_info, transBlock_rHull): # creates the separation objective. That is just adding an objective for - # Euclidean norm, it means adding an auxilary variable to linearize the + # Euclidean norm, it means adding an auxiliary variable to linearize the # L-infinity norm. We do this assuming that rBigM has been solved, and # if any variables come back stale, we leave them out of the separation # problem, as they aren't doing anything and they could cause numerical @@ -1051,37 +1193,40 @@ def _add_separation_objective(self, var_info, transBlock_rHull): obj_expr = 0 for i, (x_rbigm, x_hull, x_star) in enumerate(var_info): if not x_rbigm.stale: - obj_expr += (x_hull - x_star)**2 + obj_expr += (x_hull - x_star) ** 2 else: if self.verbose: - logger.info("The variable %s will not be included in " - "the separation problem: It was stale in " - "the rBigM solve." % x_rbigm.getname( - fully_qualified=True)) + logger.info( + "The variable %s will not be included in " + "the separation problem: It was stale in " + "the rBigM solve." % x_rbigm.getname(fully_qualified=True) + ) to_delete.append(i) elif norm == float('inf'): u = transBlock_rHull.u = Var(domain=NonNegativeReals) inf_cons = transBlock_rHull.inf_norm_linearization = Constraint( - NonNegativeIntegers) + NonNegativeIntegers + ) i = 0 for j, (x_rbigm, x_hull, x_star) in enumerate(var_info): if not x_rbigm.stale: # NOTE: these are written as >= constraints so that we know # the duals will come back nonnegative. - inf_cons[i] = u - x_hull >= - x_star - inf_cons[i+1] = u + x_hull >= x_star + inf_cons[i] = u - x_hull >= -x_star + inf_cons[i + 1] = u + x_hull >= x_star i += 2 else: if self.verbose: - logger.info("The variable %s will not be included in " - "the separation problem: It was stale in " - "the rBigM solve." % x_rbigm.getname( - fully_qualified=True)) + logger.info( + "The variable %s will not be included in " + "the separation problem: It was stale in " + "the rBigM solve." % x_rbigm.getname(fully_qualified=True) + ) to_delete.append(j) # we'll need the duals of these to get the subgradient self._add_dual_suffix(transBlock_rHull.model()) obj_expr = u - + # delete the unneeded x_stars so that we don't add cuts involving # useless variables later. for i in sorted(to_delete, reverse=True): diff --git a/pyomo/gdp/plugins/fix_disjuncts.py b/pyomo/gdp/plugins/fix_disjuncts.py index 69768a43022..933a73c0622 100644 --- a/pyomo/gdp/plugins/fix_disjuncts.py +++ b/pyomo/gdp/plugins/fix_disjuncts.py @@ -4,21 +4,38 @@ import logging from math import fabs -from pyomo.common.config import ConfigBlock, NonNegativeFloat +from pyomo.common.config import ConfigDict, ConfigValue, NonNegativeFloat +from pyomo.contrib.cp.transform.logical_to_disjunctive_program import ( + LogicalToDisjunctive, +) from pyomo.core.base import Transformation, TransformationFactory from pyomo.core.base.block import Block from pyomo.core.expr.numvalue import value from pyomo.gdp import GDP_Error from pyomo.gdp.disjunct import Disjunct, Disjunction +from pyomo.gdp.plugins.bigm import BigM_Transformation logger = logging.getLogger('pyomo.gdp.fix_disjuncts') +def _transformation_name_or_object(transformation_name_or_object): + if isinstance(transformation_name_or_object, Transformation): + return transformation_name_or_object + xform = TransformationFactory(transformation_name_or_object) + if xform is None: + raise ValueError( + "Expected valid name for a registered Pyomo transformation. " + "\n\tRecieved: %s" % transformation_name_or_object + ) + return xform + + @TransformationFactory.register( 'gdp.fix_disjuncts', doc="""Fix disjuncts to their current Boolean values and transforms any LogicalConstraints and BooleanVars so that the resulting model is a - (MI)(N)LP.""") + (MI)(N)LP.""", +) class GDP_Disjunct_Fixer(Transformation): """Fix disjuncts to their current Boolean values. @@ -35,9 +52,27 @@ def __init__(self, **kwargs): # standardized. super(GDP_Disjunct_Fixer, self).__init__(**kwargs) - CONFIG = ConfigBlock("gdp.fix_disjuncts") + CONFIG = ConfigDict("gdp.fix_disjuncts") + CONFIG.declare( + "GDP_to_MIP_transformation", + ConfigValue( + default=BigM_Transformation(), + domain=_transformation_name_or_object, + description="The name of the transformation to call after the " + "'logical_to_disjunctive' transformation in order to finish " + "transforming to a MI(N)LP.", + doc=""" + If there are no logical constraints on the model being transformed, + this option is not used. However, if there are logical constraints + that involve mixtures of Boolean and integer variables, this option + specifies the transformation to use to transform the model with fixed + Disjuncts to a MI(N)LP. Uses 'gdp.bigm' by default. + """, + ), + ) + def _apply_to(self, model, **kwds): - """Fix all disjuncts in the given model and reclassify them to + """Fix all disjuncts in the given model and reclassify them to Blocks.""" config = self.config = self.CONFIG(kwds.pop('options', {})) config.set_value(kwds) @@ -45,25 +80,31 @@ def _apply_to(self, model, **kwds): self._transformContainer(model) # Reclassify all disjuncts - for disjunct_object in model.component_objects(Disjunct, - descend_into=(Block, - Disjunct)): + for disjunct_object in model.component_objects( + Disjunct, descend_into=(Block, Disjunct) + ): disjunct_object.parent_block().reclassify_component_type( - disjunct_object, Block) + disjunct_object, Block + ) # Transform any remaining logical stuff - TransformationFactory('core.logical_to_linear').apply_to(model) + TransformationFactory('contrib.logical_to_disjunctive').apply_to(model) + # Transform anything disjunctive that the above created: + config.GDP_to_MIP_transformation.apply_to(model) def _transformContainer(self, obj): """Find all disjuncts in the container and transform them.""" - for disjunct in obj.component_data_objects(ctype=Disjunct, active=True, - descend_into=True): + for disjunct in obj.component_data_objects( + ctype=Disjunct, active=True, descend_into=True + ): _bool = disjunct.indicator_var if _bool.value is None: - raise GDP_Error("The value of the indicator_var of " - "Disjunct '%s' is None. All indicator_vars " - "must have values before calling " - "'fix_disjuncts'." % disjunct.name) + raise GDP_Error( + "The value of the indicator_var of " + "Disjunct '%s' is None. All indicator_vars " + "must have values before calling " + "'fix_disjuncts'." % disjunct.name + ) elif _bool.value: disjunct.indicator_var.fix(True) self._transformContainer(disjunct) @@ -71,16 +112,17 @@ def _transformContainer(self, obj): # Deactivating fixes the indicator_var to False disjunct.deactivate() - for disjunction in obj.component_data_objects(ctype=Disjunction, - active=True, - descend_into=True): + for disjunction in obj.component_data_objects( + ctype=Disjunction, active=True, descend_into=True + ): self._transformDisjunctionData(disjunction) def _transformDisjunctionData(self, disjunction): # the sum of all the indicator variable values of disjuncts in the # disjunction - logical_sum = sum(value(disj.binary_indicator_var) - for disj in disjunction.disjuncts) + logical_sum = sum( + value(disj.binary_indicator_var) for disj in disjunction.disjuncts + ) # Check that the disjunctions are not being fixed to an infeasible # realization. @@ -89,14 +131,16 @@ def _transformDisjunctionData(self, disjunction): raise GDP_Error( "Disjunction %s violated. " "Expected 1 disjunct to be active, but %s were active." - % (disjunction.name, logical_sum)) + % (disjunction.name, logical_sum) + ) elif not logical_sum >= 1: # for non-XOR disjunctions, the sum of all disjunct values should # be at least 1 raise GDP_Error( "Disjunction %s violated. " "Expected at least 1 disjunct to be active, " - "but none were active.") + "but none were active." + ) else: # disjunction is in feasible realization. Deactivate it. disjunction.deactivate() diff --git a/pyomo/gdp/plugins/gdp_to_mip_transformation.py b/pyomo/gdp/plugins/gdp_to_mip_transformation.py new file mode 100644 index 00000000000..a54cf284258 --- /dev/null +++ b/pyomo/gdp/plugins/gdp_to_mip_transformation.py @@ -0,0 +1,315 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from functools import wraps + +from pyomo.common.collections import ComponentMap +from pyomo.common.log import is_debug_set +from pyomo.common.modeling import unique_component_name + +from pyomo.core.base import Transformation, TransformationFactory +from pyomo.core.base.external import ExternalFunction +from pyomo.core import ( + Any, + Block, + BooleanVar, + Connector, + Constraint, + Expression, + NonNegativeIntegers, + Param, + RangeSet, + Reference, + Set, + SetOf, + Suffix, + Var, +) +from pyomo.gdp import Disjunct, Disjunction, GDP_Error +from pyomo.gdp.transformed_disjunct import _TransformedDisjunct +from pyomo.gdp.util import ( + get_gdp_tree, + get_src_constraint, + get_src_disjunct, + get_src_disjunction, + get_transformed_constraints, + _warn_for_active_disjunct, +) +from pyomo.network import Port + +from weakref import ref as weakref_ref + + +class GDP_to_MIP_Transformation(Transformation): + """ + Base class for transformations from GDP to MIP + """ + + def __init__(self, logger): + """Initialize transformation object.""" + super(GDP_to_MIP_Transformation, self).__init__() + self.logger = logger + self.handlers = { + Constraint: self._transform_constraint, + Var: False, # Note that if a Var appears on a Disjunct, we + # still treat its bounds as global. If the + # intent is for its bounds to be on the + # disjunct, it should be declared with no bounds + # and the bounds should be set in constraints on + # the Disjunct. + BooleanVar: False, + Connector: False, + Expression: False, + Suffix: False, + Param: False, + Set: False, + SetOf: False, + RangeSet: False, + Disjunction: False, # In BigM, it's impossible to encounter an active + # Disjunction because preprocessing would have + # put it before its parent Disjunct in the order + # of transformation. In hull, we intentionally + # pass over active Disjunctions that are on + # Disjuncts because we know they are in the list + # of objects to transform after preprocessing, so + # they will be transformed later. + Disjunct: self._warn_for_active_disjunct, + Block: False, + ExternalFunction: False, + Port: False, # not Arcs, because those are deactivated after + # the network.expand_arcs transformation + } + self._generate_debug_messages = False + self._transformation_blocks = {} + self._algebraic_constraints = {} + + def _restore_state(self): + self._transformation_blocks.clear() + self._algebraic_constraints.clear() + if hasattr(self, '_config'): + del self._config + + def _process_arguments(self, instance, **kwds): + if not instance.ctype in (Block, Disjunct): + raise GDP_Error( + "Transformation called on %s of type %s. 'instance'" + " must be a ConcreteModel, Block, or Disjunct (in " + "the case of nested disjunctions)." % (instance.name, instance.ctype) + ) + + self._config = self.CONFIG(kwds.pop('options', {})) + self._config.set_value(kwds) + self._generate_debug_messages = is_debug_set(self.logger) + + def _transform_logical_constraints(self, instance, targets): + # transform any logical constraints that might be anywhere on the stuff + # we're about to transform. We do this before we preprocess targets + # because we will likely create more disjunctive components that will + # need transformation. + disj_targets = [] + for t in targets: + disj_datas = t.values() if t.is_indexed() else [t] + if t.ctype is Disjunct: + disj_targets.extend(disj_datas) + if t.ctype is Disjunction: + disj_targets.extend( + [d for disjunction in disj_datas for d in disjunction.disjuncts] + ) + TransformationFactory('contrib.logical_to_disjunctive').apply_to( + instance, + targets=[blk for blk in targets if blk.ctype is Block] + disj_targets, + ) + + def _filter_targets(self, instance): + targets = self._config.targets + if targets is None: + targets = (instance,) + + # FIXME: For historical reasons, Hull would silently skip + # any targets that were explicitly deactivated. This + # preserves that behavior (although adds a warning). We + # should revisit that design decision and probably remove + # this filter, as it is slightly ambiguous as to what it + # means for the target to be deactivated: is it just the + # target itself [historical implementation] or any block in + # the hierarchy? + def _filter_inactive(targets): + for t in targets: + if not t.active: + self.logger.warning( + 'GDP.Hull transformation passed a deactivated ' + f'target ({t.name}). Skipping.' + ) + else: + yield t + + return list(_filter_inactive(targets)) + + def _get_gdp_tree_from_targets(self, instance, targets): + knownBlocks = {} + # we need to preprocess targets to make sure that if there are any + # disjunctions in targets that they appear before disjunctions that are + # contained in their disjuncts. That is, in hull, we will transform from + # root to leaf in order to avoid have to modify transformed constraints + # more than once: It is most efficient to build nested transformed + # constraints when we already have the disaggregated variables of the + # parent disjunct. + return get_gdp_tree(targets, instance) + + def _add_transformation_block(self, to_block): + if to_block in self._transformation_blocks: + return self._transformation_blocks[to_block], False + + # make a transformation block on to_block to put transformed disjuncts + # on + transBlockName = unique_component_name( + to_block, '_pyomo_gdp_%s_reformulation' % self.transformation_name + ) + self._transformation_blocks[to_block] = transBlock = Block() + to_block.add_component(transBlockName, transBlock) + transBlock.relaxedDisjuncts = _TransformedDisjunct(NonNegativeIntegers) + + return transBlock, True + + def _add_xor_constraint(self, disjunction, transBlock): + # Put the disjunction constraint on the transformation block and + # determine whether it is an OR or XOR constraint. + # We never do this for just a DisjunctionData because we need to know + # about the index set of its parent component (so that we can make the + # index of this constraint match). So if we called this on a + # DisjunctionData, we did something wrong. + + # first check if the constraint already exists + if disjunction in self._algebraic_constraints: + return self._algebraic_constraints[disjunction] + + # add the XOR (or OR) constraints to parent block (with unique name) + # It's indexed if this is an IndexedDisjunction, not otherwise + if disjunction.is_indexed(): + orC = Constraint(Any) + else: + orC = Constraint() + orCname = unique_component_name( + transBlock, disjunction.getname(fully_qualified=False) + '_xor' + ) + transBlock.add_component(orCname, orC) + self._algebraic_constraints[disjunction] = orC + + return orC + + def _setup_transform_disjunctionData(self, obj, root_disjunct): + # Just because it's unlikely this is what someone meant to do... + if len(obj.disjuncts) == 0: + raise GDP_Error( + "Disjunction '%s' is empty. This is " + "likely indicative of a modeling error." % obj.name + ) + + # Create or fetch the transformation block + if root_disjunct is not None: + # We want to put all the transformed things on the root + # Disjunct's parent's block so that they do not get + # re-transformed + transBlock, new_block = self._add_transformation_block( + root_disjunct.parent_block() + ) + else: + # This isn't nested--just put it on the parent block. + transBlock, new_block = self._add_transformation_block(obj.parent_block()) + + xorConstraint = self._add_xor_constraint(obj.parent_component(), transBlock) + + return transBlock, xorConstraint + + def _get_disjunct_transformation_block(self, disjunct, transBlock): + if disjunct.transformation_block is not None: + return disjunct.transformation_block + + # create a relaxation block for this disjunct + relaxedDisjuncts = transBlock.relaxedDisjuncts + relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)] + + relaxationBlock.transformedConstraints = Constraint(Any) + + relaxationBlock.localVarReferences = Block() + # add the map that will link back and forth between transformed + # constraints and their originals. + relaxationBlock._constraintMap = { + 'srcConstraints': ComponentMap(), + 'transformedConstraints': ComponentMap(), + } + + # add mappings to source disjunct (so we'll know we've relaxed) + disjunct._transformation_block = weakref_ref(relaxationBlock) + relaxationBlock._src_disjunct = weakref_ref(disjunct) + + return relaxationBlock + + def _transform_block_components(self, block, disjunct, *args): + # Find all the variables declared here (including the indicator_var) and + # add a reference on the transformation block so these will be + # accessible when the Disjunct is deactivated. Note that in hull, we do + # this after we have moved up the transformation blocks for nested + # disjunctions, so that we don't have duplicate references. + varRefBlock = disjunct._transformation_block().localVarReferences + for v in block.component_objects(Var, descend_into=Block, active=None): + varRefBlock.add_component( + unique_component_name(varRefBlock, v.getname(fully_qualified=False)), + Reference(v), + ) + + # Now look through the component map of block and transform everything + # we have a handler for. Yell if we don't know how to handle it. (Note + # that because we only iterate through active components, this means + # non-ActiveComponent types cannot have handlers.) + for obj in block.component_objects(active=True, descend_into=Block): + handler = self.handlers.get(obj.ctype, None) + if not handler: + if handler is None: + raise GDP_Error( + "No %s transformation handler registered " + "for modeling components of type %s. If your " + "disjuncts contain non-GDP Pyomo components that " + "require transformation, please transform them first." + % (self.transformation_name, obj.ctype) + ) + continue + # obj is what we are transforming, we pass disjunct + # through so that we will have access to the indicator + # variables down the line. + handler(obj, disjunct, *args) + + def _transform_constraint(self, obj, disjunct, *args): + raise NotImplementedError( + "Class %s failed to implement '_transform_constraint'" % self.__class__ + ) + + def _warn_for_active_disjunct(self, innerdisjunct, outerdisjunct, *args): + _warn_for_active_disjunct(innerdisjunct, outerdisjunct) + + # These are all functions to retrieve transformed components from + # original ones and vice versa. + + @wraps(get_src_disjunct) + def get_src_disjunct(self, transBlock): + return get_src_disjunct(transBlock) + + @wraps(get_src_disjunction) + def get_src_disjunction(self, xor_constraint): + return get_src_disjunction(xor_constraint) + + @wraps(get_src_constraint) + def get_src_constraint(self, transformedConstraint): + return get_src_constraint(transformedConstraint) + + @wraps(get_transformed_constraints) + def get_transformed_constraints(self, srcConstraint): + return get_transformed_constraints(srcConstraint) diff --git a/pyomo/gdp/plugins/gdp_var_mover.py b/pyomo/gdp/plugins/gdp_var_mover.py index 6d1e7a0dc31..df659670bf4 100644 --- a/pyomo/gdp/plugins/gdp_var_mover.py +++ b/pyomo/gdp/plugins/gdp_var_mover.py @@ -23,9 +23,9 @@ from pyomo.common.deprecation import deprecated logger = logging.getLogger('pyomo.gdp') - -@TransformationFactory.register('gdp.reclassify', - doc="Reclassify Disjuncts to Blocks.") + + +@TransformationFactory.register('gdp.reclassify', doc="Reclassify Disjuncts to Blocks.") class HACK_GDP_Disjunct_Reclassifier(Transformation): """Reclassify Disjuncts to Blocks. @@ -33,38 +33,47 @@ class HACK_GDP_Disjunct_Reclassifier(Transformation): can find the variables """ - @deprecated(msg="The gdp.reclasify transformation has been deprecated in " - "favor of the gdp transformations creating References to " - "variables local to each Disjunct during the transformation. " - "Validation that the model has been completely transformed " - "to an algebraic model has been moved to the " - "check_model_algebraic function in gdp.util.", - version='5.7') + + @deprecated( + msg="The gdp.reclasify transformation has been deprecated in " + "favor of the gdp transformations creating References to " + "variables local to each Disjunct during the transformation. " + "Validation that the model has been completely transformed " + "to an algebraic model has been moved to the " + "check_model_algebraic function in gdp.util.", + version='5.7', + ) def _apply_to(self, instance, **kwds): assert not kwds # no keywords expected to the transformation disjunct_generator = instance.component_objects( - Disjunct, descend_into=(Block, Disjunct), - descent_order=TraversalStrategy.PostfixDFS) + Disjunct, + descend_into=(Block, Disjunct), + descent_order=TraversalStrategy.PostfixDFS, + ) for disjunct_component in disjunct_generator: # Check that the disjuncts being reclassified are all relaxed or # are not on an active block. for disjunct in disjunct_component.values(): - if (disjunct.active and - self._disjunct_not_relaxed(disjunct) and - self._disjunct_on_active_block(disjunct) and - self._disjunct_not_fixed_true(disjunct)): - + if ( + disjunct.active + and self._disjunct_not_relaxed(disjunct) + and self._disjunct_on_active_block(disjunct) + and self._disjunct_not_fixed_true(disjunct) + ): # First, do a couple checks in order to give a more # useful error message - disjunction_set = {i for i in - instance.component_data_objects( - Disjunction, descend_into=True, - active=None)} - active_disjunction_set = {i for i in - instance.component_data_objects( - Disjunction, - descend_into=True, - active=True)} + disjunction_set = { + i + for i in instance.component_data_objects( + Disjunction, descend_into=True, active=None + ) + } + active_disjunction_set = { + i + for i in instance.component_data_objects( + Disjunction, descend_into=True, active=True + ) + } disjuncts_in_disjunctions = set() for i in disjunction_set: disjuncts_in_disjunctions.update(i.disjuncts) @@ -73,31 +82,39 @@ def _apply_to(self, instance, **kwds): disjuncts_in_active_disjunctions.update(i.disjuncts) if disjunct not in disjuncts_in_disjunctions: - raise GDP_Error('Disjunct "%s" is currently active, ' - 'but was not found in any Disjunctions. ' - 'This is generally an error as the model ' - 'has not been fully relaxed to a ' - 'pure algebraic form.' % (disjunct.name,)) + raise GDP_Error( + 'Disjunct "%s" is currently active, ' + 'but was not found in any Disjunctions. ' + 'This is generally an error as the model ' + 'has not been fully relaxed to a ' + 'pure algebraic form.' % (disjunct.name,) + ) elif disjunct not in disjuncts_in_active_disjunctions: - raise GDP_Error('Disjunct "%s" is currently active. While ' - 'it participates in a Disjunction, ' - 'that Disjunction is currently deactivated. ' - 'This is generally an error as the ' - 'model has not been fully relaxed to a pure ' - 'algebraic form. Did you deactivate ' - 'the Disjunction without addressing the ' - 'individual Disjuncts?' % (disjunct.name,)) + raise GDP_Error( + 'Disjunct "%s" is currently active. While ' + 'it participates in a Disjunction, ' + 'that Disjunction is currently deactivated. ' + 'This is generally an error as the ' + 'model has not been fully relaxed to a pure ' + 'algebraic form. Did you deactivate ' + 'the Disjunction without addressing the ' + 'individual Disjuncts?' % (disjunct.name,) + ) else: - raise GDP_Error(""" + raise GDP_Error( + """ Reclassifying active Disjunct "%s" as a Block. This is generally an error as it indicates that the model was not completely relaxed before applying the - gdp.reclassify transformation""" % (disjunct.name,)) + gdp.reclassify transformation""" + % (disjunct.name,) + ) # Reclassify this disjunct as a block disjunct_component.parent_block().reclassify_component_type( - disjunct_component, Block) - # HACK: activate teh block, but do not activate the + disjunct_component, Block + ) + # HACK: activate the block, but do not activate the # _BlockData objects super(ActiveIndexedComponent, disjunct_component).activate() @@ -116,19 +133,19 @@ def _apply_to(self, instance, **kwds): disjunct._activate_without_unfixing_indicator() cons_in_disjunct = disjunct.component_objects( - Constraint, descend_into=Block, active=True) + Constraint, descend_into=Block, active=True + ) for con in cons_in_disjunct: con.deactivate() def _disjunct_not_fixed_true(self, disjunct): # Return true if the disjunct indicator variable is not fixed to True - return not (disjunct.indicator_var.fixed and - disjunct.indicator_var.value) + return not (disjunct.indicator_var.fixed and disjunct.indicator_var.value) def _disjunct_not_relaxed(self, disjunct): # Return True if the disjunct was not relaxed by a transformation. return disjunct.transformation_block is None - + def _disjunct_on_active_block(self, disjunct): # Check first to make sure that the disjunct is not a # descendent of an inactive Block or fixed and deactivated @@ -137,9 +154,12 @@ def _disjunct_on_active_block(self, disjunct): while parent_block is not None: if parent_block.ctype is Block and not parent_block.active: return False - elif (parent_block.ctype is Disjunct and not parent_block.active - and parent_block.indicator_var.value == False - and parent_block.indicator_var.fixed): + elif ( + parent_block.ctype is Disjunct + and not parent_block.active + and parent_block.indicator_var.value == False + and parent_block.indicator_var.fixed + ): return False else: # Step up one level in the hierarchy diff --git a/pyomo/gdp/plugins/hull.py b/pyomo/gdp/plugins/hull.py index 29a4ad34cd8..b8e2b3e3699 100644 --- a/pyomo/gdp/plugins/hull.py +++ b/pyomo/gdp/plugins/hull.py @@ -14,34 +14,48 @@ import pyomo.common.config as cfg from pyomo.common import deprecated from pyomo.common.collections import ComponentMap, ComponentSet -from pyomo.common.log import is_debug_set from pyomo.common.modeling import unique_component_name from pyomo.core.expr.numvalue import ZeroConstant -import pyomo.core.expr.current as EXPR -from pyomo.core.base import Transformation, TransformationFactory, Reference +import pyomo.core.expr as EXPR +from pyomo.core.base import TransformationFactory, Reference from pyomo.core import ( - Block, BooleanVar, Connector, Constraint, Param, Set, SetOf, Suffix, Var, - Expression, SortComponents, TraversalStrategy, Any, RangeSet, Reals, value, - NonNegativeIntegers, Binary ) -from pyomo.core.base.boolean_var import ( - _DeprecatedImplicitAssociatedBinaryVariable) + Block, + BooleanVar, + Connector, + Constraint, + Param, + Set, + SetOf, + Suffix, + Var, + Expression, + SortComponents, + TraversalStrategy, + Any, + RangeSet, + Reals, + value, + NonNegativeIntegers, + Binary, +) from pyomo.gdp import Disjunct, Disjunction, GDP_Error +from pyomo.gdp.plugins.gdp_to_mip_transformation import GDP_to_MIP_Transformation +from pyomo.gdp.transformed_disjunct import _TransformedDisjunct from pyomo.gdp.util import ( - clone_without_expression_components, is_child_of, get_src_disjunction, - get_src_constraint, get_transformed_constraints, - get_src_disjunct, _warn_for_active_disjunction, _warn_for_active_disjunct, - preprocess_targets) + clone_without_expression_components, + is_child_of, + _warn_for_active_disjunct, +) from pyomo.core.util import target_list -from pyomo.network import Port -from functools import wraps from weakref import ref as weakref_ref logger = logging.getLogger('pyomo.gdp.hull') + @TransformationFactory.register( - 'gdp.hull', - doc="Relax disjunctive model by forming the hull reformulation.") -class Hull_Reformulation(Transformation): + 'gdp.hull', doc="Relax disjunctive model by forming the hull reformulation." +) +class Hull_Reformulation(GDP_to_MIP_Transformation): """Relax disjunctive model by forming the hull reformulation. Relaxes a disjunctive model into an algebraic model by forming the @@ -93,25 +107,29 @@ class Hull_Reformulation(Transformation): """ - - CONFIG = cfg.ConfigBlock('gdp.hull') - CONFIG.declare('targets', cfg.ConfigValue( - default=None, - domain=target_list, - description="target or list of targets that will be relaxed", - doc=""" + CONFIG = cfg.ConfigDict('gdp.hull') + CONFIG.declare( + 'targets', + cfg.ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be relaxed", + doc=""" This specifies the target or list of targets to relax as either a component or a list of components. If None (default), the entire model is transformed. Note that if the transformation is done out of place, the list of targets should be attached to the model before it is cloned, - and the list will specify the targets on the cloned instance.""" - )) - CONFIG.declare('perspective function', cfg.ConfigValue( - default='FurmanSawayaGrossmann', - domain=cfg.In(['FurmanSawayaGrossmann','LeeGrossmann','GrossmannLee']), - description='perspective function used for variable disaggregation', - doc=""" + and the list will specify the targets on the cloned instance.""", + ), + ) + CONFIG.declare( + 'perspective function', + cfg.ConfigValue( + default='FurmanSawayaGrossmann', + domain=cfg.In(['FurmanSawayaGrossmann', 'LeeGrossmann', 'GrossmannLee']), + description='perspective function used for variable disaggregation', + doc=""" The perspective function used for variable disaggregation "LeeGrossmann" is the original NL convex hull from Lee & @@ -152,47 +170,39 @@ class Hull_Reformulation(Transformation): useful algebraic representation of nonlinear disjunctive convex sets using the perspective function. Optimization Online (2016). http://www.optimization-online.org/DB_HTML/2016/07/5544.html. - """ - )) - CONFIG.declare('EPS', cfg.ConfigValue( - default=1e-4, - domain=cfg.PositiveFloat, - description="Epsilon value to use in perspective function", - )) - CONFIG.declare('assume_fixed_vars_permanent', cfg.ConfigValue( - default=False, - domain=bool, - description="Boolean indicating whether or not to transform so that " - "the transformed model will still be valid when fixed Vars are " - "unfixed.", - doc=""" + """, + ), + ) + CONFIG.declare( + 'EPS', + cfg.ConfigValue( + default=1e-4, + domain=cfg.PositiveFloat, + description="Epsilon value to use in perspective function", + ), + ) + CONFIG.declare( + 'assume_fixed_vars_permanent', + cfg.ConfigValue( + default=False, + domain=bool, + description="Boolean indicating whether or not to transform so that " + "the transformed model will still be valid when fixed Vars are " + "unfixed.", + doc=""" If True, the transformation will not disaggregate fixed variables. This means that if a fixed variable is unfixed after transformation, the transformed model is no longer valid. By default, the transformation will disagregate fixed variables so that any later fixing and unfixing will be valid in the transformed model. - """ - )) + """, + ), + ) + transformation_name = 'hull' def __init__(self): - super(Hull_Reformulation, self).__init__() - self.handlers = { - Constraint : self._transform_constraint, - Var : False, - BooleanVar: False, - Connector : False, - Expression : False, - Param : False, - Set : False, - SetOf : False, - RangeSet: False, - Suffix : False, - Disjunction: self._warn_for_active_disjunction, - Disjunct: self._warn_for_active_disjunct, - Block: self._transform_block_on_disjunct, - Port: False, - } - self._generate_debug_messages = False + super().__init__(logger) + self._targets = set() def _add_local_vars(self, block, local_var_dict): localVars = block.component('LocalVars') @@ -212,9 +222,8 @@ def _get_local_var_suffixes(self, block, local_var_dict): # first look beneath where we are (there could be Blocks on this # disjunct) for b in block.component_data_objects( - Block, descend_into=(Block), - active=True, - sort=SortComponents.deterministic): + Block, descend_into=(Block), active=True, sort=SortComponents.deterministic + ): self._add_local_vars(b, local_var_dict) # now traverse upwards and get what's above while block is not None: @@ -224,76 +233,47 @@ def _get_local_var_suffixes(self, block, local_var_dict): return local_var_dict def _apply_to(self, instance, **kwds): - self._apply_to_impl(instance, **kwds) + try: + self._apply_to_impl(instance, **kwds) + finally: + self._restore_state() + self._transformation_blocks.clear() + self._algebraic_constraints.clear() + self._targets_set = set() def _apply_to_impl(self, instance, **kwds): - if not instance.ctype in (Block, Disjunct): - raise GDP_Error("Transformation called on %s of type %s. 'instance'" - " must be a ConcreteModel, Block, or Disjunct (in " - "the case of nested disjunctions)." % - (instance.name, instance.ctype)) - - self._config = self.CONFIG(kwds.pop('options', {})) - self._config.set_value(kwds) - self._generate_debug_messages = is_debug_set(logger) - - targets = self._config.targets - knownBlocks = {} - if targets is None: - targets = ( instance, ) - - # FIXME: For historical reasons, Hull would silently skip - # any targets that were explicitly deactivated. This - # preserves that behavior (although adds a warning). We - # should revisit that design decision and probably remove - # this filter, as it is slightly ambiguous as to what it - # means for the target to be deactivated: is it just the - # target itself [historical implementation] or any block in - # the hierarchy? - def _filter_inactive(targets): - for t in targets: - if not t.active: - logger.warning( - 'GDP.Hull transformation passed a deactivated ' - f'target ({t.name}). Skipping.') - else: - yield t - targets = list(_filter_inactive(targets)) - - # we need to preprocess targets to make sure that if there are any - # disjunctions in targets that their disjuncts appear before them in - # the list. - preprocessed_targets = preprocess_targets(targets, instance, - knownBlocks) - # transform any logical constraints that might be anywhere on the stuff - # we're about to transform. - TransformationFactory('core.logical_to_linear').apply_to( - instance, - targets=[blk for blk in targets if blk.ctype is Block] + - [disj for disj in preprocessed_targets if disj.ctype is Disjunct]) + self._process_arguments(instance, **kwds) + + # filter out inactive targets and handle case where targets aren't + # specified. + targets = self._filter_targets(instance) + # transform logical constraints based on targets + self._transform_logical_constraints(instance, targets) + + # Preprocess in order to find what disjunctive components need + # transformation + gdp_tree = self._get_gdp_tree_from_targets(instance, targets) + preprocessed_targets = gdp_tree.topological_sort() + self._targets_set = set(preprocessed_targets) for t in preprocessed_targets: if t.ctype is Disjunction: - if t.is_indexed(): - self._transform_disjunction(t) - else: - self._transform_disjunctionData(t, t.index()) - else:# t.ctype in (Block, Disjunct): - if t.is_indexed(): - self._transform_block(t) - else: - self._transform_blockData(t) - - def _add_transformation_block(self, instance): - # make a transformation block on instance where we will store - # transformed components - transBlockName = unique_component_name( - instance, - '_pyomo_gdp_hull_reformulation') - transBlock = Block() - instance.add_component(transBlockName, transBlock) - transBlock.relaxedDisjuncts = Block(NonNegativeIntegers) - transBlock.lbub = Set(initialize = ['lb','ub','eq']) + self._transform_disjunctionData( + t, + t.index(), + parent_disjunct=gdp_tree.parent(t), + root_disjunct=gdp_tree.root_disjunct(t), + ) + # We skip disjuncts now, because we need information from the + # disjunctions to transform them (which variables to disaggregate), + # so for hull's purposes, they need not be in the tree. + + def _add_transformation_block(self, to_block): + transBlock, new_block = super()._add_transformation_block(to_block) + if not new_block: + return transBlock, new_block + + transBlock.lbub = Set(initialize=['lb', 'ub', 'eq']) # Map between disaggregated variables and their # originals transBlock._disaggregatedVarMap = { @@ -305,8 +285,7 @@ def _add_transformation_block(self, instance): transBlock._bigMConstraintMap = ComponentMap() # We will store all of the disaggregation constraints for any # Disjunctions we transform onto this block here. - transBlock.disaggregationConstraints = Constraint(NonNegativeIntegers, - Any) + transBlock.disaggregationConstraints = Constraint(NonNegativeIntegers) # This will map from srcVar to a map of srcDisjunction to the # disaggregation constraint corresponding to srcDisjunction @@ -315,110 +294,31 @@ def _add_transformation_block(self, instance): # we are going to store some of the disaggregated vars directly here # when we have vars that don't appear in every disjunct transBlock._disaggregatedVars = Var(NonNegativeIntegers, dense=False) - transBlock._boundsConstraints = Constraint(NonNegativeIntegers, - transBlock.lbub) + transBlock._boundsConstraints = Constraint(NonNegativeIntegers, transBlock.lbub) - return transBlock + return transBlock, True - def _transform_block(self, obj): - for i in sorted(obj.keys()): - self._transform_blockData(obj[i]) - - def _transform_blockData(self, obj): - # Transform every (active) disjunction in the block - for disjunction in obj.component_objects( - Disjunction, - active=True, - sort=SortComponents.deterministic, - descend_into=(Block,Disjunct), - descent_order=TraversalStrategy.PostfixDFS): - self._transform_disjunction(disjunction) - - def _add_xor_constraint(self, disjunction, transBlock): - # Put XOR constraint on the transformation block - - # We never do this for just a DisjunctionData because we need - # to know about the index set of its parent component. So if - # we called this on a DisjunctionData, we did something wrong. - assert isinstance(disjunction, Disjunction) - - # check if the constraint already exists - if disjunction._algebraic_constraint is not None: - return disjunction._algebraic_constraint() - - # add the XOR (or OR) constraints to parent block (with - # unique name) It's indexed if this is an - # IndexedDisjunction, not otherwise - orC = Constraint(disjunction.index_set()) - transBlock.add_component( - unique_component_name(transBlock, - disjunction.getname( - fully_qualified=True) + '_xor'), orC) - disjunction._algebraic_constraint = weakref_ref(orC) - - return orC - - def _transform_disjunction(self, obj): - # NOTE: this check is actually necessary because it's possible we go - # straight to this function when we use targets. - if not obj.active: - return - - # put the transformation block on the parent block of the Disjunction, - # unless this is a disjunction we have seen in a prior call to hull, in - # which case we will use the same transformation block we created - # before. - if obj._algebraic_constraint is not None: - transBlock = obj._algebraic_constraint().parent_block() - else: - transBlock = self._add_transformation_block(obj.parent_block()) - # and create the xor constraint - xorConstraint = self._add_xor_constraint(obj, transBlock) - - # create the disjunction constraint and disaggregation - # constraints and then relax each of the disjunctionDatas - for i in sorted(obj.keys()): - self._transform_disjunctionData(obj[i], i, transBlock) - - # deactivate so the writers will be happy - obj.deactivate() - - def _transform_disjunctionData(self, obj, index, transBlock=None): - if not obj.active: - return + def _transform_disjunctionData( + self, obj, index, parent_disjunct=None, root_disjunct=None + ): # Hull reformulation doesn't work if this is an OR constraint. So if # xor is false, give up if not obj.xor: - raise GDP_Error("Cannot do hull reformulation for " - "Disjunction '%s' with OR constraint. " - "Must be an XOR!" % obj.name) - - if transBlock is None: - # It's possible that we have already created a transformation block - # for another disjunctionData from this same container. If that's - # the case, let's use the same transformation block. (Else it will - # be really confusing that the XOR constraint goes to that old block - # but we create a new one here.) - if obj.parent_component()._algebraic_constraint is not None: - transBlock = obj.parent_component()._algebraic_constraint().\ - parent_block() - else: - transBlock = self._add_transformation_block(obj.parent_block()) + raise GDP_Error( + "Cannot do hull reformulation for " + "Disjunction '%s' with OR constraint. " + "Must be an XOR!" % obj.name + ) - parent_component = obj.parent_component() + transBlock, xorConstraint = self._setup_transform_disjunctionData( + obj, root_disjunct + ) - orConstraint = self._add_xor_constraint(parent_component, transBlock) disaggregationConstraint = transBlock.disaggregationConstraints disaggregationConstraintMap = transBlock._disaggregationConstraintMap disaggregatedVars = transBlock._disaggregatedVars disaggregated_var_bounds = transBlock._boundsConstraints - # Just because it's unlikely this is what someone meant to do... - if len(obj.disjuncts) == 0: - raise GDP_Error("Disjunction '%s' is empty. This is " - "likely indicative of a modeling error." % - obj.getname(fully_qualified=True)) - # We first go through and collect all the variables that we # are going to disaggregate. varOrder_set = ComponentSet() @@ -432,12 +332,14 @@ def _transform_disjunctionData(self, obj, index, transBlock=None): disjunctVars = varsByDisjunct[disjunct] = ComponentSet() # create the key for each disjunct now transBlock._disaggregatedVarMap['disaggregatedVar'][ - disjunct] = ComponentMap() + disjunct + ] = ComponentMap() for cons in disjunct.component_data_objects( - Constraint, - active = True, - sort=SortComponents.deterministic, - descend_into=Block): + Constraint, + active=True, + sort=SortComponents.deterministic, + descend_into=(Block, Disjunct), + ): # [ESJ 02/14/2020] By default, we disaggregate fixed variables # on the philosophy that fixing is not a promise for the future # and we are mathematically wrong if we don't transform these @@ -446,7 +348,8 @@ def _transform_disjunctionData(self, obj, index, transBlock=None): # assume_fixed_vars_permanent to True in which case we will skip # them for var in EXPR.identify_variables( - cons.body, include_fixed=include_fixed_vars): + cons.body, include_fixed=include_fixed_vars + ): # Note the use of a list so that we will # eventually disaggregate the vars in a # deterministic order (the order that we found @@ -458,11 +361,13 @@ def _transform_disjunctionData(self, obj, index, transBlock=None): # check for LocalVars Suffix localVarsByDisjunct = self._get_local_var_suffixes( - disjunct, localVarsByDisjunct) + disjunct, localVarsByDisjunct + ) - # We will disaggregate all variables which are not explicitly declared - # as being local. Note however, that we do declare our own disaggregated - # variables as local, so they will not be re-disaggregated. + # We will disaggregate all variables that are not explicitly declared as + # being local. Since we transform from leaf to root, we are implicitly + # treating our own disaggregated variables as local, so they will not be + # re-disaggregated. varSet = [] varSet = {disj: [] for disj in obj.disjuncts} # Note that variables are local with respect to a Disjunct. We deal with @@ -476,15 +381,17 @@ def _transform_disjunctionData(self, obj, index, transBlock=None): varsToDisaggregate = [] disjunctsVarAppearsIn = ComponentMap() for var in varOrder: - disjuncts = disjunctsVarAppearsIn[var] = [d for d in varsByDisjunct - if var in - varsByDisjunct[d]] + disjuncts = disjunctsVarAppearsIn[var] = [ + d for d in varsByDisjunct if var in varsByDisjunct[d] + ] # clearly not local if used in more than one disjunct if len(disjuncts) > 1: if self._generate_debug_messages: - logger.debug("Assuming '%s' is not a local var since it is" - "used in multiple disjuncts." % - var.getname(fully_qualified=True)) + logger.debug( + "Assuming '%s' is not a local var since it is" + "used in multiple disjuncts." + % var.getname(fully_qualified=True) + ) for disj in disjuncts: varSet[disj].append(var) varsToDisaggregate.append(var) @@ -511,12 +418,18 @@ def _transform_disjunctionData(self, obj, index, transBlock=None): or_expr = 0 for disjunct in obj.disjuncts: or_expr += disjunct.indicator_var.get_associated_binary() - self._transform_disjunct(disjunct, transBlock, varSet[disjunct], - localVars.get(disjunct, []), local_var_set) - orConstraint.add(index, (or_expr, 1)) + self._transform_disjunct( + disjunct, + transBlock, + varSet[disjunct], + localVars.get(disjunct, []), + local_var_set, + ) + rhs = 1 if parent_disjunct is None else parent_disjunct.binary_indicator_var + xorConstraint.add(index, (or_expr, rhs)) # map the DisjunctionData to its XOR constraint to mark it as # transformed - obj._algebraic_constraint = weakref_ref(orConstraint[index]) + obj._algebraic_constraint = weakref_ref(xorConstraint[index]) # add the reaggregation constraints for i, var in enumerate(varsToDisaggregate): @@ -533,30 +446,39 @@ def _transform_disjunctionData(self, obj, index, transBlock=None): # mark this as local because we won't re-disaggregate if this is # a nested disjunction if local_var_set is not None: - local_var_set.append(disaggregatedVar) - var_free = 1 - sum(disj.indicator_var.get_associated_binary() - for disj in disjunctsVarAppearsIn[var]) - self._declare_disaggregated_var_bounds(var, disaggregated_var, - obj, - disaggregated_var_bounds, - (idx,'lb'), (idx,'ub'), - var_free) + local_var_set.append(disaggregated_var) + var_free = 1 - sum( + disj.indicator_var.get_associated_binary() + for disj in disjunctsVarAppearsIn[var] + ) + self._declare_disaggregated_var_bounds( + var, + disaggregated_var, + obj, + disaggregated_var_bounds, + (idx, 'lb'), + (idx, 'ub'), + var_free, + ) # maintain the mappings for disj in obj.disjuncts: # Because we called _transform_disjunct above, we know that # if this isn't transformed it is because it was cleanly # deactivated, and we can just skip it. - if disj._transformation_block is not None and \ - disj not in disjunctsVarAppearsIn[var]: - relaxationBlock = disj._transformation_block().\ - parent_block() + if ( + disj._transformation_block is not None + and disj not in disjunctsVarAppearsIn[var] + ): + relaxationBlock = disj._transformation_block().parent_block() relaxationBlock._bigMConstraintMap[ - disaggregated_var] = Reference( - disaggregated_var_bounds[idx, :]) + disaggregated_var + ] = Reference(disaggregated_var_bounds[idx, :]) relaxationBlock._disaggregatedVarMap['srcVar'][ - disaggregated_var] = var - relaxationBlock._disaggregatedVarMap[ - 'disaggregatedVar'][disj][var] = disaggregated_var + disaggregated_var + ] = var + relaxationBlock._disaggregatedVarMap['disaggregatedVar'][disj][ + var + ] = disaggregated_var disaggregatedExpr = disaggregated_var else: @@ -568,81 +490,53 @@ def _transform_disjunctionData(self, obj, index, transBlock=None): # deactivated, and we can just skip it. continue - disaggregatedVar = disjunct._transformation_block().\ - parent_block()._disaggregatedVarMap[ - 'disaggregatedVar'][disjunct][var] + disaggregatedVar = ( + disjunct._transformation_block() + .parent_block() + ._disaggregatedVarMap['disaggregatedVar'][disjunct][var] + ) disaggregatedExpr += disaggregatedVar - disaggregationConstraint.add((i, index), var == disaggregatedExpr) + # We equate the sum of the disaggregated vars to var (the original) + # if parent_disjunct is None, else it needs to be the disaggregated + # var corresponding to var on the parent disjunct. This is the + # reason we transform from root to leaf: This constraint is now + # correct regardless of how nested something may have been. + parent_var = ( + var + if parent_disjunct is None + else self.get_disaggregated_var(var, parent_disjunct) + ) + cons_idx = len(disaggregationConstraint) + disaggregationConstraint.add(cons_idx, parent_var == disaggregatedExpr) # and update the map so that we can find this later. We index by # variable and the particular disjunction because there is a # different one for each disjunction if disaggregationConstraintMap.get(var) is not None: - disaggregationConstraintMap[var][ - obj] = disaggregationConstraint[(i, index)] + disaggregationConstraintMap[var][obj] = disaggregationConstraint[ + cons_idx + ] else: thismap = disaggregationConstraintMap[var] = ComponentMap() - thismap[obj] = disaggregationConstraint[(i, index)] + thismap[obj] = disaggregationConstraint[cons_idx] # deactivate for the writers obj.deactivate() - def _transform_disjunct(self, obj, transBlock, varSet, localVars, - local_var_set): - # deactivated should only come from the user + def _transform_disjunct(self, obj, transBlock, varSet, localVars, local_var_set): + # We're not using the preprocessed list here, so this could be + # inactive. We've already done the error checking in preprocessing, so + # we just skip it here. if not obj.active: - if obj.indicator_var.is_fixed(): - if not value(obj.indicator_var): - # The user cleanly deactivated the disjunct: there - # is nothing for us to do here. - return - else: - raise GDP_Error( - "The disjunct '%s' is deactivated, but the " - "indicator_var is fixed to %s. This makes no sense." - % ( obj.name, value(obj.indicator_var) )) - if obj._transformation_block is None: - raise GDP_Error( - "The disjunct '%s' is deactivated, but the " - "indicator_var is not fixed and the disjunct does not " - "appear to have been relaxed. This makes no sense. " - "(If the intent is to deactivate the disjunct, fix its " - "indicator_var to False.)" - % ( obj.name, )) - - if obj._transformation_block is not None: - # we've transformed it, which means this is the second time it's - # appearing in a Disjunction - raise GDP_Error( - "The disjunct '%s' has been transformed, but a disjunction " - "it appears in has not. Putting the same disjunct in " - "multiple disjunctions is not supported." % obj.name) - - # create a relaxation block for this disjunct - relaxedDisjuncts = transBlock.relaxedDisjuncts - relaxationBlock = relaxedDisjuncts[len(relaxedDisjuncts)] - transBlock = relaxationBlock.parent_block() + return - relaxationBlock.localVarReferences = Block() + relaxationBlock = self._get_disjunct_transformation_block(obj, transBlock) # Put the disaggregated variables all on their own block so that we can # isolate the name collisions and still have complete control over the - # names on this block. (This is for peace of mind now, but will matter - # in the future for adding the binaries corresponding to Boolean - # indicator vars.) + # names on this block. relaxationBlock.disaggregatedVars = Block() - # add the map that will link back and forth between transformed - # constraints and their originals. - relaxationBlock._constraintMap = { - 'srcConstraints': ComponentMap(), - 'transformedConstraints': ComponentMap() - } - - # add mappings to source disjunct (so we'll know we've relaxed) - obj._transformation_block = weakref_ref(relaxationBlock) - relaxationBlock._srcDisjunct = weakref_ref(obj) - # add the disaggregated variables and their bigm constraints # to the relaxationBlock for var in varSet: @@ -651,10 +545,11 @@ def _transform_disjunct(self, obj, transBlock, varSet, localVars, # of variables from different blocks coming together, so we # get a unique name disaggregatedVarName = unique_component_name( - relaxationBlock.disaggregatedVars, - var.getname(fully_qualified=True)) + relaxationBlock.disaggregatedVars, var.getname(fully_qualified=True) + ) relaxationBlock.disaggregatedVars.add_component( - disaggregatedVarName, disaggregatedVar) + disaggregatedVarName, disaggregatedVar + ) # mark this as local because we won't re-disaggregate if this is a # nested disjunction if local_var_set is not None: @@ -663,12 +558,19 @@ def _transform_disjunct(self, obj, transBlock, varSet, localVars, # add the bigm constraint bigmConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component( - disaggregatedVarName + "_bounds", bigmConstraint) + disaggregatedVarName + "_bounds", bigmConstraint + ) self._declare_disaggregated_var_bounds( - var, disaggregatedVar, obj, - bigmConstraint, 'lb', 'ub', - obj.indicator_var.get_associated_binary(), transBlock) + var, + disaggregatedVar, + obj, + bigmConstraint, + 'lb', + 'ub', + obj.indicator_var.get_associated_binary(), + transBlock, + ) for var in localVars: # we don't need to disaggregated, we can use this Var, but we do @@ -678,136 +580,81 @@ def _transform_disjunct(self, obj, transBlock, varSet, localVars, # of variables from different blocks coming together, so we # get a unique name conName = unique_component_name( - relaxationBlock, - var.getname(fully_qualified=False) + "_bounds") + relaxationBlock, var.getname(fully_qualified=False) + "_bounds" + ) bigmConstraint = Constraint(transBlock.lbub) relaxationBlock.add_component(conName, bigmConstraint) self._declare_disaggregated_var_bounds( - var, var, obj, - bigmConstraint, 'lb', 'ub', - obj.indicator_var.get_associated_binary(), transBlock) - - var_substitute_map = dict((id(v), newV) for v, newV in - transBlock._disaggregatedVarMap[ - 'disaggregatedVar'][obj].items()) - zero_substitute_map = dict((id(v), ZeroConstant) for v, newV in \ - transBlock._disaggregatedVarMap[ - 'disaggregatedVar'][obj].items()) + var, + var, + obj, + bigmConstraint, + 'lb', + 'ub', + obj.indicator_var.get_associated_binary(), + transBlock, + ) + + var_substitute_map = dict( + (id(v), newV) + for v, newV in transBlock._disaggregatedVarMap['disaggregatedVar'][ + obj + ].items() + ) + zero_substitute_map = dict( + (id(v), ZeroConstant) + for v, newV in transBlock._disaggregatedVarMap['disaggregatedVar'][ + obj + ].items() + ) zero_substitute_map.update((id(v), ZeroConstant) for v in localVars) # Transform each component within this disjunct - self._transform_block_components(obj, obj, var_substitute_map, - zero_substitute_map) + self._transform_block_components( + obj, obj, var_substitute_map, zero_substitute_map + ) # deactivate disjunct so writers can be happy obj._deactivate_without_fixing_indicator() - def _transform_block_components( self, block, disjunct, var_substitute_map, - zero_substitute_map): - # As opposed to bigm, in hull the only special thing we need to do for - # nested Disjunctions is to make sure that we move up local var - # references and also references to the disaggregated variables so that - # all will be accessible after we transform this Disjunct. The indicator - # variables and disaggregated variables of the inner disjunction will - # need to be disaggregated again, but the transformed constraints will - # not be. But this way nothing will get double-bigm-ed. (If an - # untransformed disjunction is lurking here, we will catch it below). - - disjunctBlock = disjunct._transformation_block() - destinationBlock = disjunctBlock.parent_block() - for obj in block.component_data_objects( - Disjunction, - sort=SortComponents.deterministic, - descend_into=(Block)): - if obj.algebraic_constraint is None: - # This could be bad if it's active since that means its - # untransformed, but we'll wait to yell until the next loop - continue - # get this disjunction's relaxation block. - transBlock = obj.algebraic_constraint().parent_block() - - self._transfer_var_references(transBlock, destinationBlock) - - # We don't know where all the BooleanVars are used, so if there are any - # that the above transformation didn't transform, we need to do it now, - # so that the Reference gets moved up. This won't be necessary when the - # writers are willing to find Vars not in the active subtree. - for boolean in block.component_data_objects(BooleanVar, - descend_into=Block, - active=None): - if isinstance(boolean._associated_binary, - _DeprecatedImplicitAssociatedBinaryVariable): - parent_block = boolean.parent_block() - new_var = Var(domain=Binary) - parent_block.add_component( - unique_component_name(parent_block, - boolean.local_name + "_asbinary"), - new_var) - boolean.associate_binary_var(new_var) - - # add references to all local variables on block (including the - # indicator_var). Note that we do this after we have moved up the - # transformation blocks for nested disjunctions, so that we don't have - # duplicate references. - varRefBlock = disjunctBlock.localVarReferences - for v in block.component_objects(Var, descend_into=Block, active=None): - if len(v) > 0: - varRefBlock.add_component(unique_component_name( - varRefBlock, v.getname(fully_qualified=True)), - Reference(v)) - - # Look through the component map of block and transform everything we - # have a handler for. Yell if we don't know how to handle it. (Note that - # because we only iterate through active components, this means - # non-ActiveComponent types cannot have handlers.) - for obj in block.component_objects(active=True, descend_into=False): - handler = self.handlers.get(obj.ctype, None) - if not handler: - if handler is None: - raise GDP_Error( - "No hull transformation handler registered " - "for modeling components of type %s. If your " - "disjuncts contain non-GDP Pyomo components that " - "require transformation, please transform them first." - % obj.ctype ) - continue - # obj is what we are transforming, we pass disjunct - # through so that we will have access to the indicator - # variables down the line. - handler(obj, disjunct, var_substitute_map, zero_substitute_map) - - def _declare_disaggregated_var_bounds(self, original_var, disaggregatedVar, - disjunct, bigmConstraint, lb_idx, - ub_idx, var_free_indicator, - transBlock=None): + def _declare_disaggregated_var_bounds( + self, + original_var, + disaggregatedVar, + disjunct, + bigmConstraint, + lb_idx, + ub_idx, + var_free_indicator, + transBlock=None, + ): # If transBlock is None then this is a disaggregated variable for # multiple Disjuncts and we will handle the mappings separately. lb = original_var.lb ub = original_var.ub if lb is None or ub is None: - raise GDP_Error("Variables that appear in disjuncts must be " - "bounded in order to use the hull " - "transformation! Missing bound for %s." - % (original_var.name)) + raise GDP_Error( + "Variables that appear in disjuncts must be " + "bounded in order to use the hull " + "transformation! Missing bound for %s." % (original_var.name) + ) disaggregatedVar.setlb(min(0, lb)) disaggregatedVar.setub(max(0, ub)) if lb: - bigmConstraint.add( - lb_idx, var_free_indicator*lb <= disaggregatedVar) + bigmConstraint.add(lb_idx, var_free_indicator * lb <= disaggregatedVar) if ub: - bigmConstraint.add( - ub_idx, disaggregatedVar <= ub*var_free_indicator) + bigmConstraint.add(ub_idx, disaggregatedVar <= ub * var_free_indicator) # store the mappings from variables to their disaggregated selves on # the transformation block. if transBlock is not None: transBlock._disaggregatedVarMap['disaggregatedVar'][disjunct][ - original_var] = disaggregatedVar - transBlock._disaggregatedVarMap['srcVar'][ - disaggregatedVar] = original_var + original_var + ] = disaggregatedVar + transBlock._disaggregatedVarMap['srcVar'][disaggregatedVar] = original_var transBlock._bigMConstraintMap[disaggregatedVar] = bigmConstraint def _get_local_var_set(self, disjunction): @@ -832,72 +679,46 @@ def _get_local_var_set(self, disjunction): return local_var_set - def _transfer_var_references(self, fromBlock, toBlock): - disjunctList = toBlock.relaxedDisjuncts - for idx, disjunctBlock in fromBlock.relaxedDisjuncts.items(): - # move all the of the local var references - newblock = disjunctList[len(disjunctList)] - newblock.localVarReferences = Block() - newblock.localVarReferences.transfer_attributes_from( - disjunctBlock.localVarReferences) - - def _warn_for_active_disjunction( self, disjunction, disjunct, - var_substitute_map, zero_substitute_map): - _warn_for_active_disjunction(disjunction, disjunct) - - def _warn_for_active_disjunct( self, innerdisjunct, outerdisjunct, - var_substitute_map, zero_substitute_map): - _warn_for_active_disjunct(innerdisjunct, outerdisjunct) - - def _warn_for_active_logical_statement( - self, logical_statment, disjunct, var_substitute_map, - zero_substitute_map): - _warn_for_active_logical_constraint(logical_statment, disjunct) - - def _transform_block_on_disjunct( self, block, disjunct, var_substitute_map, - zero_substitute_map): - # We look through everything on the component map of the block - # and transform it just as we would if it was on the disjunct - # directly. (We are passing the disjunct through so that when - # we find constraints, _transform_constraint will have access to - # the correct indicator variable. - for i in sorted(block.keys()): - self._transform_block_components( block[i], disjunct, - var_substitute_map, - zero_substitute_map) - - def _transform_constraint(self, obj, disjunct, var_substitute_map, - zero_substitute_map): + def _warn_for_active_disjunct( + self, innerdisjunct, outerdisjunct, var_substitute_map, zero_substitute_map + ): + # We override the base class method because in hull, it might just be + # that we haven't gotten here yet. + disjuncts = ( + innerdisjunct.values() if innerdisjunct.is_indexed() else (innerdisjunct,) + ) + for disj in disjuncts: + if disj in self._targets_set: + # We're getting to this, have some patience. + continue + else: + # But if it wasn't in the targets after preprocessing, it + # doesn't belong in an active Disjunction that we are + # transforming and we should be confused. + _warn_for_active_disjunct(innerdisjunct, outerdisjunct) + + def _transform_constraint( + self, obj, disjunct, var_substitute_map, zero_substitute_map + ): # we will put a new transformed constraint on the relaxation block. relaxationBlock = disjunct._transformation_block() - transBlock = relaxationBlock.parent_block() constraintMap = relaxationBlock._constraintMap - # Though rare, it is possible to get naming conflicts here - # since constraints from all blocks are getting moved onto the - # same block. So we get a unique name - name = unique_component_name(relaxationBlock, obj.getname( - fully_qualified=True)) - - if obj.is_indexed(): - newConstraint = Constraint(obj.index_set(), transBlock.lbub) - else: - newConstraint = Constraint(transBlock.lbub) - relaxationBlock.add_component(name, newConstraint) - # map the containers: - # add mapping of original constraint to transformed constraint - if obj.is_indexed(): - constraintMap['transformedConstraints'][obj] = newConstraint - # add mapping of transformed constraint container back to original - # constraint container (or ScalarConstraint) - constraintMap['srcConstraints'][newConstraint] = obj + # We will make indexes from ({obj.local_name} x obj.index_set() x ['lb', + # 'ub']), but don't bother construct that set here, as taking Cartesian + # products is kind of expensive (and redundant since we have the + # original model) + newConstraint = relaxationBlock.transformedConstraints for i in sorted(obj.keys()): c = obj[i] if not c.active: continue - NL = c.body.polynomial_degree() not in (0,1) + unique = len(newConstraint) + name = c.local_name + "_%s" % unique + + NL = c.body.polynomial_degree() not in (0, 1) EPS = self._config.EPS mode = self._config.perspective_function @@ -906,7 +727,8 @@ def _transform_constraint(self, obj, disjunct, var_substitute_map, # disaggregated variables if not NL or mode == "FurmanSawayaGrossmann": h_0 = clone_without_expression_components( - c.body, substitute=zero_substitute_map) + c.body, substitute=zero_substitute_map + ) y = disjunct.binary_indicator_var if NL: @@ -914,38 +736,41 @@ def _transform_constraint(self, obj, disjunct, var_substitute_map, sub_expr = clone_without_expression_components( c.body, substitute=dict( - (var, subs/y) - for var, subs in var_substitute_map.items() ) + (var, subs / y) for var, subs in var_substitute_map.items() + ), ) expr = sub_expr * y elif mode == "GrossmannLee": sub_expr = clone_without_expression_components( c.body, substitute=dict( - (var, subs/(y + EPS)) - for var, subs in var_substitute_map.items() ) + (var, subs / (y + EPS)) + for var, subs in var_substitute_map.items() + ), ) expr = (y + EPS) * sub_expr elif mode == "FurmanSawayaGrossmann": sub_expr = clone_without_expression_components( c.body, substitute=dict( - (var, subs/((1 - EPS)*y + EPS)) - for var, subs in var_substitute_map.items() ) + (var, subs / ((1 - EPS) * y + EPS)) + for var, subs in var_substitute_map.items() + ), ) - expr = ((1-EPS)*y + EPS)*sub_expr - EPS*h_0*(1-y) + expr = ((1 - EPS) * y + EPS) * sub_expr - EPS * h_0 * (1 - y) else: raise RuntimeError("Unknown NL Hull mode") else: expr = clone_without_expression_components( - c.body, substitute=var_substitute_map) + c.body, substitute=var_substitute_map + ) if c.equality: if NL: # ESJ TODO: This can't happen right? This is the only # obvious case where someone has messed up, but this has to # be nonconvex, right? Shouldn't we tell them? - newConsExpr = expr == c.lower*y + newConsExpr = expr == c.lower * y else: v = list(EXPR.identify_variables(expr)) if len(v) == 1 and not c.lower: @@ -963,16 +788,17 @@ def _transform_constraint(self, obj, disjunct, var_substitute_map, # Reverse map also (this is strange) constraintMap['srcConstraints'][v[0]] = c continue - newConsExpr = expr - (1-y)*h_0 == c.lower*y + newConsExpr = expr - (1 - y) * h_0 == c.lower * y if obj.is_indexed(): - newConstraint.add((i, 'eq'), newConsExpr) + newConstraint.add((name, i, 'eq'), newConsExpr) # map the _ConstraintDatas (we mapped the container above) - constraintMap[ - 'transformedConstraints'][c] = [newConstraint[i,'eq']] - constraintMap['srcConstraints'][newConstraint[i,'eq']] = c + constraintMap['transformedConstraints'][c] = [ + newConstraint[name, i, 'eq'] + ] + constraintMap['srcConstraints'][newConstraint[name, i, 'eq']] = c else: - newConstraint.add('eq', newConsExpr) + newConstraint.add((name, 'eq'), newConsExpr) # map to the _ConstraintData (And yes, for # ScalarConstraints, this is overwriting the map to the # container we made above, and that is what I want to @@ -980,63 +806,66 @@ def _transform_constraint(self, obj, disjunct, var_substitute_map, # IndexedConstraints, we can map the container to the # container, but more importantly, we are mapping the # _ConstraintDatas to each other above) - constraintMap[ - 'transformedConstraints'][c] = [newConstraint['eq']] - constraintMap['srcConstraints'][newConstraint['eq']] = c + constraintMap['transformedConstraints'][c] = [ + newConstraint[name, 'eq'] + ] + constraintMap['srcConstraints'][newConstraint[name, 'eq']] = c continue if c.lower is not None: if self._generate_debug_messages: _name = c.getname(fully_qualified=True) - logger.debug("GDP(Hull): Transforming constraint " + - "'%s'", _name) + logger.debug("GDP(Hull): Transforming constraint " + "'%s'", _name) if NL: - newConsExpr = expr >= c.lower*y + newConsExpr = expr >= c.lower * y else: - newConsExpr = expr - (1-y)*h_0 >= c.lower*y + newConsExpr = expr - (1 - y) * h_0 >= c.lower * y if obj.is_indexed(): - newConstraint.add((i, 'lb'), newConsExpr) - constraintMap[ - 'transformedConstraints'][c] = [newConstraint[i,'lb']] - constraintMap['srcConstraints'][newConstraint[i,'lb']] = c + newConstraint.add((name, i, 'lb'), newConsExpr) + constraintMap['transformedConstraints'][c] = [ + newConstraint[name, i, 'lb'] + ] + constraintMap['srcConstraints'][newConstraint[name, i, 'lb']] = c else: - newConstraint.add('lb', newConsExpr) - constraintMap[ - 'transformedConstraints'][c] = [newConstraint['lb']] - constraintMap['srcConstraints'][newConstraint['lb']] = c + newConstraint.add((name, 'lb'), newConsExpr) + constraintMap['transformedConstraints'][c] = [ + newConstraint[name, 'lb'] + ] + constraintMap['srcConstraints'][newConstraint[name, 'lb']] = c if c.upper is not None: if self._generate_debug_messages: _name = c.getname(fully_qualified=True) - logger.debug("GDP(Hull): Transforming constraint " + - "'%s'", _name) + logger.debug("GDP(Hull): Transforming constraint " + "'%s'", _name) if NL: - newConsExpr = expr <= c.upper*y + newConsExpr = expr <= c.upper * y else: - newConsExpr = expr - (1-y)*h_0 <= c.upper*y + newConsExpr = expr - (1 - y) * h_0 <= c.upper * y if obj.is_indexed(): - newConstraint.add((i, 'ub'), newConsExpr) + newConstraint.add((name, i, 'ub'), newConsExpr) # map (have to account for fact we might have created list # above transformed = constraintMap['transformedConstraints'].get(c) if transformed is not None: - transformed.append(newConstraint[i,'ub']) + transformed.append(newConstraint[name, i, 'ub']) else: - constraintMap['transformedConstraints'][ - c] = [newConstraint[i,'ub']] - constraintMap['srcConstraints'][newConstraint[i,'ub']] = c + constraintMap['transformedConstraints'][c] = [ + newConstraint[name, i, 'ub'] + ] + constraintMap['srcConstraints'][newConstraint[name, i, 'ub']] = c else: - newConstraint.add('ub', newConsExpr) + newConstraint.add((name, 'ub'), newConsExpr) transformed = constraintMap['transformedConstraints'].get(c) if transformed is not None: - transformed.append(newConstraint['ub']) + transformed.append(newConstraint[name, 'ub']) else: - constraintMap['transformedConstraints'][ - c] = [newConstraint['ub']] - constraintMap['srcConstraints'][newConstraint['ub']] = c + constraintMap['transformedConstraints'][c] = [ + newConstraint[name, 'ub'] + ] + constraintMap['srcConstraints'][newConstraint[name, 'ub']] = c # deactivate now that we have transformed obj.deactivate() @@ -1050,29 +879,11 @@ def _add_local_var_suffix(self, disjunct): else: if localSuffix.ctype is Suffix: return - raise GDP_Error("A component called 'LocalVars' is declared on " - "Disjunct %s, but it is of type %s, not Suffix." - % (disjunct.getname(fully_qualified=True), - localSuffix.ctype)) - - # These are all functions to retrieve transformed components from - # original ones and vice versa. - - @wraps(get_src_disjunct) - def get_src_disjunct(self, transBlock): - return get_src_disjunct(transBlock) - - @wraps(get_src_disjunction) - def get_src_disjunction(self, xor_constraint): - return get_src_disjunction(xor_constraint) - - @wraps(get_src_constraint) - def get_src_constraint(self, transformedConstraint): - return get_src_constraint(transformedConstraint) - - @wraps(get_transformed_constraints) - def get_transformed_constraints(self, srcConstraint): - return get_transformed_constraints(srcConstraint) + raise GDP_Error( + "A component called 'LocalVars' is declared on " + "Disjunct %s, but it is of type %s, not Suffix." + % (disjunct.getname(fully_qualified=True), localSuffix.ctype) + ) def get_disaggregated_var(self, v, disjunct): """ @@ -1083,20 +894,19 @@ def get_disaggregated_var(self, v, disjunct): Parameters ---------- - v: a Var which appears in a constraint in a transformed Disjunct + v: a Var that appears in a constraint in a transformed Disjunct disjunct: a transformed Disjunct in which v appears """ if disjunct._transformation_block is None: - raise GDP_Error("Disjunct '%s' has not been transformed" - % disjunct.name) + raise GDP_Error("Disjunct '%s' has not been transformed" % disjunct.name) transBlock = disjunct._transformation_block().parent_block() try: - return transBlock._disaggregatedVarMap['disaggregatedVar'][ - disjunct][v] + return transBlock._disaggregatedVarMap['disaggregatedVar'][disjunct][v] except: - logger.error("It does not appear '%s' is a " - "variable which appears in disjunct '%s'" - % (v.name, disjunct.name)) + logger.error( + "It does not appear '%s' is a " + "variable that appears in disjunct '%s'" % (v.name, disjunct.name) + ) raise def get_src_var(self, disaggregated_var): @@ -1111,8 +921,10 @@ def get_src_var(self, disaggregated_var): (and so appears on a transformation block of some Disjunct) """ - msg = ("'%s' does not appear to be a " - "disaggregated variable" % disaggregated_var.name) + msg = ( + "'%s' does not appear to be a " + "disaggregated variable" % disaggregated_var.name + ) # There are two possibilities: It is declared on a Disjunct # transformation Block, or it is declared on the parent of a Disjunct # transformation block (if it is a single variable for multiple @@ -1149,18 +961,24 @@ def get_disaggregation_constraint(self, original_var, disjunction): if transBlock is not None: break if transBlock is None: - raise GDP_Error("Disjunction '%s' has not been properly " - "transformed:" - " None of its disjuncts are transformed." - % disjunction.name) + raise GDP_Error( + "Disjunction '%s' has not been properly " + "transformed:" + " None of its disjuncts are transformed." % disjunction.name + ) try: - return transBlock().parent_block()._disaggregationConstraintMap[ - original_var][disjunction] + return ( + transBlock() + .parent_block() + ._disaggregationConstraintMap[original_var][disjunction] + ) except: - logger.error("It doesn't appear that '%s' is a variable that was " - "disaggregated by Disjunction '%s'" % - (original_var.name, disjunction.name)) + logger.error( + "It doesn't appear that '%s' is a variable that was " + "disaggregated by Disjunction '%s'" + % (original_var.name, disjunction.name) + ) raise def get_var_bounds_constraint(self, v): @@ -1176,9 +994,11 @@ def get_var_bounds_constraint(self, v): disaggregated variable (and so appears on a transformation block of some Disjunct) """ - msg = ("Either '%s' is not a disaggregated variable, or " - "the disjunction that disaggregates it has not " - "been properly transformed." % v.name) + msg = ( + "Either '%s' is not a disaggregated variable, or " + "the disjunction that disaggregates it has not " + "been properly transformed." % v.name + ) # This can only go well if v is a disaggregated var transBlock = v.parent_block() if not hasattr(transBlock, '_bigMConstraintMap'): @@ -1196,11 +1016,14 @@ def get_var_bounds_constraint(self, v): @TransformationFactory.register( 'gdp.chull', - doc="[DEPRECATED] please use 'gdp.hull' to get the Hull transformation.") -@deprecated("The 'gdp.chull' name is deprecated. " - "Please use the more apt 'gdp.hull' instead.", - logger='pyomo.gdp', - version="5.7") + doc="[DEPRECATED] please use 'gdp.hull' to get the Hull transformation.", +) +@deprecated( + "The 'gdp.chull' name is deprecated. " + "Please use the more apt 'gdp.hull' instead.", + logger='pyomo.gdp', + version="5.7", +) class _Deprecated_Name_Hull(Hull_Reformulation): def __init__(self): super(_Deprecated_Name_Hull, self).__init__() diff --git a/pyomo/gdp/plugins/multiple_bigm.py b/pyomo/gdp/plugins/multiple_bigm.py new file mode 100644 index 00000000000..8f0592f204d --- /dev/null +++ b/pyomo/gdp/plugins/multiple_bigm.py @@ -0,0 +1,709 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import itertools +import logging + +from pyomo.common.collections import ComponentMap +from pyomo.common.config import ConfigDict, ConfigValue +from pyomo.common.modeling import unique_component_name + +from pyomo.core import ( + Any, + Binary, + Block, + BooleanVar, + Connector, + Constraint, + Expression, + ExternalFunction, + maximize, + minimize, + NonNegativeIntegers, + Objective, + Param, + RangeSet, + Set, + SetOf, + SortComponents, + Suffix, + value, + Var, +) +from pyomo.core.base import Reference, TransformationFactory +import pyomo.core.expr as EXPR +from pyomo.core.util import target_list + +from pyomo.gdp import Disjunct, Disjunction, GDP_Error +from pyomo.gdp.plugins.bigm_mixin import ( + _BigM_MixIn, + _convert_M_to_tuple, + _warn_for_unused_bigM_args, +) +from pyomo.gdp.plugins.gdp_to_mip_transformation import GDP_to_MIP_Transformation +from pyomo.gdp.transformed_disjunct import _TransformedDisjunct +from pyomo.gdp.util import get_gdp_tree, _to_dict +from pyomo.network import Port +from pyomo.opt import SolverFactory, TerminationCondition +from pyomo.repn import generate_standard_repn + +from weakref import ref as weakref_ref + +logger = logging.getLogger('pyomo.gdp.mbigm') + + +@TransformationFactory.register( + 'gdp.mbigm', + doc="Relax disjunctive model using big-M terms specific to each disjunct", +) +class MultipleBigMTransformation(GDP_to_MIP_Transformation, _BigM_MixIn): + """ + Implements the multiple big-M transformation from [1]. Note that this + transformation is no different than the big-M transformation for two- + term disjunctions, but that it may provide a tighter relaxation for + models containing some disjunctions with three or more terms. + + [1] Francisco Trespalaios and Ignacio E. Grossmann, "Improved Big-M + reformulation for generalized disjunctive programs," Computers and + Chemical Engineering, vol. 76, 2015, pp. 98-103. + """ + + CONFIG = ConfigDict('gdp.mbigm') + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="target or list of targets that will be relaxed", + doc=""" + This specifies the list of components to relax. If None (default), the + entire model is transformed. Note that if the transformation is done out + of place, the list of targets should be attached to the model before it + is cloned, and the list will specify the targets on the cloned + instance.""", + ), + ) + CONFIG.declare( + 'assume_fixed_vars_permanent', + ConfigValue( + default=False, + domain=bool, + description="Boolean indicating whether or not to transform so that " + "the transformed model will still be valid when fixed Vars are " + "unfixed.", + doc=""" + This is only relevant when the transformation will be calculating M + values. If True, the transformation will calculate M values assuming + that fixed variables will always be fixed to their current values. This + means that if a fixed variable is unfixed after transformation, the + transformed model is potentially no longer valid. By default, the + transformation will assume fixed variables could be unfixed in the + future and will use their bounds to calculate the M value rather than + their value. Note that this could make for a weaker LP relaxation + while the variables remain fixed. + """, + ), + ) + CONFIG.declare( + 'solver', + ConfigValue( + default=SolverFactory('gurobi'), + description="A solver to use to solve the continuous subproblems for " + "calculating the M values", + ), + ) + CONFIG.declare( + 'bigM', + ConfigValue( + default=None, + domain=_to_dict, + description="Big-M values to use while relaxing constraints", + doc=""" + A user-specified dict or ComponentMap mapping tuples of Constraints + and Disjuncts to Big-M values valid for relaxing the constraint if + the Disjunct is chosen. + + Note: Unlike in the bigm transformation, we require the keys in this + mapping specify the components the M value applies to exactly in order + to avoid ambiguity. However, if the 'only_mbigm_bound_constraints' + option is True, this argument can be used as it would be in the + traditional bigm transformation for the non-bound constraints. + """, + ), + ) + CONFIG.declare( + 'reduce_bound_constraints', + ConfigValue( + default=True, + domain=bool, + description="Flag indicating whether or not to handle disjunctive " + "constraints that bound a single variable in a single (tighter) " + "constraint, rather than one per Disjunct.", + doc=""" + Given the not-uncommon special structure: + + [l_1 <= x <= u_1] v [l_2 <= x <= u_2] v ... v [l_K <= x <= u_K], + + instead of applying the rote transformation that would create 2*K + different constraints in the relaxation, we can write two constraints: + + x >= l_1*y_1 + l_2*y_2 + ... + l_K*y_k + x <= u_1*y_1 + u_2*y_2 + ... + u_K*y_K. + + This relaxation is as tight and has fewer constraints. This option is + a flag to tell the mbigm transformation to detect this structure and + handle it specially. Note that this is a special case of the 'Hybrid + Big-M Formulation' from [2] that takes advantage of the common left- + hand side matrix for disjunctive constraints that bound a single + variable. + + Note that we do not use user-specified M values for these constraints + when this flag is set to True: If tighter bounds exist then they + they should be put in the constraints. + + [2] Juan Pablo Vielma, "Mixed Integer Linear Programming Formluation + Techniques," SIAM Review, vol. 57, no. 1, 2015, pp. 3-57. + """, + ), + ) + CONFIG.declare( + 'only_mbigm_bound_constraints', + ConfigValue( + default=False, + domain=bool, + description="Flag indicating if only bound constraints should be " + "transformed with multiple-bigm, or if all the disjunctive " + "constraints should.", + doc=""" + Sometimes it is only computationally advantageous to apply multiple- + bigm to disjunctive constraints with the special structure: + + [l_1 <= x <= u_1] v [l_2 <= x <= u_2] v ... v [l_K <= x <= u_K], + + and transform other disjunctive constraints with the traditional + big-M transformation. This flag is used to set the above behavior. + + Note that the reduce_bound_constraints flag must also be True when + this flag is set to True. + """, + ), + ) + transformation_name = 'mbigm' + + def __init__(self): + super().__init__(logger) + self.handlers[Suffix] = self._warn_for_active_suffix + self._arg_list = {} + + def _apply_to(self, instance, **kwds): + self.used_args = ComponentMap() + try: + self._apply_to_impl(instance, **kwds) + finally: + self._restore_state() + self.used_args.clear() + self._arg_list.clear() + + def _apply_to_impl(self, instance, **kwds): + self._process_arguments(instance, **kwds) + + if ( + self._config.only_mbigm_bound_constraints + and not self._config.reduce_bound_constraints + ): + raise GDP_Error( + "The 'only_mbigm_bound_constraints' option is set " + "to True, but the 'reduce_bound_constraints' " + "option is not. This is not supported--please also " + "set 'reduce_bound_constraints' to True if you " + "only wish to transform the bound constraints with " + "multiple bigm." + ) + + # filter out inactive targets and handle case where targets aren't + # specified. + targets = self._filter_targets(instance) + # transform any logical constraints that might be anywhere on the stuff + # we're about to transform. We do this before we preprocess targets + # because we will likely create more disjunctive components that will + # need transformation. + self._transform_logical_constraints(instance, targets) + # We don't allow nested, so it doesn't much matter which way we sort + # this. But transforming from leaf to root makes the error checking for + # complaining about nested smoother, so we do that. We have to transform + # a Disjunction at a time because, more similarly to hull than bigm, we + # need information from the other Disjuncts in the Disjunction. + gdp_tree = self._get_gdp_tree_from_targets(instance, targets) + preprocessed_targets = gdp_tree.reverse_topological_sort() + + for t in preprocessed_targets: + if t.ctype is Disjunction: + self._transform_disjunctionData( + t, + t.index(), + parent_disjunct=gdp_tree.parent(t), + root_disjunct=gdp_tree.root_disjunct(t), + ) + + # issue warnings about anything that was in the bigM args dict that we + # didn't use + _warn_for_unused_bigM_args(self._config.bigM, self.used_args, logger) + + def _transform_disjunctionData(self, obj, index, parent_disjunct, root_disjunct): + if root_disjunct is not None: + # We do not support nested because, unlike in regular bigM, the + # constraints are not fully relaxed when the exactly-one constraint + # is not enforced. (For example, in this model: [1 <= x <= 3, [1 <= + # y <= 5] v [6 <= y <= 10]] v [5 <= x <= 10, 15 <= y <= 20]), we + # would need to put the relaxed inner-disjunction constraints on the + # parent Disjunct and process them again. This means the order in + # which we transformed Disjuncts would change the calculated M + # values. This is crazy, so we skip it. + raise GDP_Error( + "Found nested Disjunction '%s'. The multiple bigm " + "transformation does not support nested GDPs. " + "Please flatten the model before calling the " + "transformation" % obj.name + ) + + if not obj.xor: + # This transformation assumes it can relax constraints assuming that + # another Disjunct is chosen. If it could be possible to choose both + # then that logic might fail. + raise GDP_Error( + "Cannot do multiple big-M reformulation for " + "Disjunction '%s' with OR constraint. " + "Must be an XOR!" % obj.name + ) + + (transBlock, algebraic_constraint) = self._setup_transform_disjunctionData( + obj, root_disjunct + ) + + ## Here's the logic for the actual transformation + + arg_Ms = self._config.bigM if self._config.bigM is not None else {} + + # First handle the bound constraints if we are dealing with them + # separately + active_disjuncts = [disj for disj in obj.disjuncts if disj.active] + transformed_constraints = set() + if self._config.reduce_bound_constraints: + transformed_constraints = self._transform_bound_constraints( + active_disjuncts, transBlock, arg_Ms + ) + + Ms = arg_Ms + if not self._config.only_mbigm_bound_constraints: + Ms = ( + transBlock.calculated_missing_m_values + ) = self._calculate_missing_M_values( + active_disjuncts, arg_Ms, transBlock, transformed_constraints + ) + + # Now we can deactivate the constraints we deferred, so that we don't + # re-transform them + for cons in transformed_constraints: + cons.deactivate() + + or_expr = 0 + for disjunct in active_disjuncts: + or_expr += disjunct.indicator_var.get_associated_binary() + self._transform_disjunct(disjunct, transBlock, active_disjuncts, Ms) + rhs = 1 if parent_disjunct is None else parent_disjunct.binary_indicator_var + algebraic_constraint.add(index, (or_expr, rhs)) + # map the DisjunctionData to its XOR constraint to mark it as + # transformed + obj._algebraic_constraint = weakref_ref(algebraic_constraint[index]) + + obj.deactivate() + + def _transform_disjunct(self, obj, transBlock, active_disjuncts, Ms): + # We've already filtered out deactivated disjuncts, so we know obj is + # active. + + # Make a relaxation block if we haven't already. + relaxationBlock = self._get_disjunct_transformation_block(obj, transBlock) + + # Transform everything on the disjunct + self._transform_block_components(obj, obj, active_disjuncts, Ms) + + # deactivate disjunct so writers can be happy + obj._deactivate_without_fixing_indicator() + + def _warn_for_active_suffix(self, obj, disjunct, active_disjuncts, Ms): + raise GDP_Error( + "Found active Suffix '{0}' on Disjunct '{1}'. " + "The multiple bigM transformation does not currently " + "support Suffixes.".format(obj.name, disjunct.name) + ) + + def _transform_constraint(self, obj, disjunct, active_disjuncts, Ms): + # we will put a new transformed constraint on the relaxation block. + relaxationBlock = disjunct._transformation_block() + constraintMap = relaxationBlock._constraintMap + transBlock = relaxationBlock.parent_block() + + # Though rare, it is possible to get naming conflicts here + # since constraints from all blocks are getting moved onto the + # same block. So we get a unique name + name = unique_component_name( + relaxationBlock, obj.getname(fully_qualified=False) + ) + + newConstraint = Constraint(Any) + relaxationBlock.add_component(name, newConstraint) + + for i in sorted(obj.keys()): + c = obj[i] + if not c.active: + continue + + if not self._config.only_mbigm_bound_constraints: + transformed = [] + if c.lower is not None: + rhs = sum( + Ms[c, disj][0] * disj.indicator_var.get_associated_binary() + for disj in active_disjuncts + if disj is not disjunct + ) + newConstraint.add((i, 'lb'), c.body - c.lower >= rhs) + transformed.append(newConstraint[i, 'lb']) + + if c.upper is not None: + rhs = sum( + Ms[c, disj][1] * disj.indicator_var.get_associated_binary() + for disj in active_disjuncts + if disj is not disjunct + ) + newConstraint.add((i, 'ub'), c.body - c.upper <= rhs) + transformed.append(newConstraint[i, 'ub']) + for c_new in transformed: + constraintMap['srcConstraints'][c_new] = [c] + constraintMap['transformedConstraints'][c] = transformed + else: + lower = (None, None, None) + upper = (None, None, None) + + if disjunct not in self._arg_list: + self._arg_list[disjunct] = self._get_bigM_arg_list( + self._config.bigM, disjunct + ) + arg_list = self._arg_list[disjunct] + + # first, we see if an M value was specified in the arguments. + # (This returns None if not) + lower, upper = self._get_M_from_args(c, Ms, arg_list, lower, upper) + M = (lower[0], upper[0]) + + # estimate if we don't have what we need + if c.lower is not None and M[0] is None: + M = (self._estimate_M(c.body, c)[0] - c.lower, M[1]) + lower = (M[0], None, None) + if c.upper is not None and M[1] is None: + M = (M[0], self._estimate_M(c.body, c)[1] - c.upper) + upper = (M[1], None, None) + self._add_constraint_expressions( + c, + i, + M, + disjunct.indicator_var.get_associated_binary(), + newConstraint, + constraintMap, + ) + + # deactivate now that we have transformed + c.deactivate() + + def _transform_bound_constraints(self, active_disjuncts, transBlock, Ms): + # first we're just going to find all of them + bounds_cons = ComponentMap() + lower_bound_constraints_by_var = ComponentMap() + upper_bound_constraints_by_var = ComponentMap() + transformed_constraints = set() + for disj in active_disjuncts: + for c in disj.component_data_objects( + Constraint, + active=True, + descend_into=Block, + sort=SortComponents.deterministic, + ): + repn = generate_standard_repn(c.body) + if repn.is_linear() and len(repn.linear_vars) == 1: + # We can treat this as a bounds constraint + v = repn.linear_vars[0] + if v not in bounds_cons: + bounds_cons[v] = [{}, {}] + M = [None, None] + if c.lower is not None: + M[0] = (c.lower - repn.constant) / repn.linear_coefs[0] + if disj in bounds_cons[v][0]: + # this is a redundant bound, we need to keep the + # better one + M[0] = max(M[0], bounds_cons[v][0][disj]) + bounds_cons[v][0][disj] = M[0] + if v in lower_bound_constraints_by_var: + lower_bound_constraints_by_var[v].add((c, disj)) + else: + lower_bound_constraints_by_var[v] = {(c, disj)} + if c.upper is not None: + M[1] = (c.upper - repn.constant) / repn.linear_coefs[0] + if disj in bounds_cons[v][1]: + # this is a redundant bound, we need to keep the + # better one + M[1] = min(M[1], bounds_cons[v][1][disj]) + bounds_cons[v][1][disj] = M[1] + if v in upper_bound_constraints_by_var: + upper_bound_constraints_by_var[v].add((c, disj)) + else: + upper_bound_constraints_by_var[v] = {(c, disj)} + # Add the M values to the dictionary + transBlock._mbm_values[c, disj] = M + + # We can't deactivate yet because we will still be solving + # this Disjunct when we calculate M values for non-bounds + # constraints. We track that it is transformed instead by + # adding it to this set. + transformed_constraints.add(c) + + # Now we actually construct the constraints. We do this separately so + # that we can make sure that we have a term for every active disjunct in + # the disjunction (falling back on the variable bounds if they are there + transformed = transBlock.transformed_bound_constraints = Constraint( + NonNegativeIntegers, ['lb', 'ub'] + ) + for idx, (v, (lower_dict, upper_dict)) in enumerate(bounds_cons.items()): + lower_rhs = 0 + upper_rhs = 0 + for disj in active_disjuncts: + relaxationBlock = self._get_disjunct_transformation_block( + disj, transBlock + ) + if len(lower_dict) > 0: + M = lower_dict.get(disj, None) + if M is None: + # substitute the lower bound if it has one + M = v.lb + if M is None: + raise GDP_Error( + "There is no lower bound for variable '%s', and " + "Disjunct '%s' does not specify one in its " + "constraints. The transformation cannot construct " + "the special bound constraint relaxation without " + "one of these." % (v.name, disj.name) + ) + lower_rhs += M * disj.indicator_var.get_associated_binary() + if len(upper_dict) > 0: + M = upper_dict.get(disj, None) + if M is None: + # substitute the upper bound if it has one + M = v.ub + if M is None: + raise GDP_Error( + "There is no upper bound for variable '%s', and " + "Disjunct '%s' does not specify one in its " + "constraints. The transformation cannot construct " + "the special bound constraint relaxation without " + "one of these." % (v.name, disj.name) + ) + upper_rhs += M * disj.indicator_var.get_associated_binary() + if len(lower_dict) > 0: + transformed.add((idx, 'lb'), v >= lower_rhs) + relaxationBlock._constraintMap['srcConstraints'][ + transformed[idx, 'lb'] + ] = [] + for c, disj in lower_bound_constraints_by_var[v]: + relaxationBlock._constraintMap['srcConstraints'][ + transformed[idx, 'lb'] + ].append(c) + disj.transformation_block._constraintMap['transformedConstraints'][ + c + ] = [transformed[idx, 'lb']] + if len(upper_dict) > 0: + transformed.add((idx, 'ub'), v <= upper_rhs) + relaxationBlock._constraintMap['srcConstraints'][ + transformed[idx, 'ub'] + ] = [] + for c, disj in upper_bound_constraints_by_var[v]: + relaxationBlock._constraintMap['srcConstraints'][ + transformed[idx, 'ub'] + ].append(c) + # might already be here if it had an upper bound + if ( + c + in disj.transformation_block._constraintMap[ + 'transformedConstraints' + ] + ): + disj.transformation_block._constraintMap[ + 'transformedConstraints' + ][c].append(transformed[idx, 'ub']) + else: + disj.transformation_block._constraintMap[ + 'transformedConstraints' + ][c] = [transformed[idx, 'ub']] + + return transformed_constraints + + def _add_transformation_block(self, to_block): + transBlock, new_block = super()._add_transformation_block(to_block) + + if new_block: + # Will store M values as we transform + transBlock._mbm_values = {} + return transBlock, new_block + + def _get_all_var_objects(self, active_disjuncts): + # This is actually a general utility for getting all Vars that appear in + # active Disjuncts in a Disjunction. + seen = set() + for disj in active_disjuncts: + for constraint in disj.component_data_objects( + Constraint, + active=True, + sort=SortComponents.deterministic, + descend_into=Block, + ): + for var in EXPR.identify_variables(constraint.expr, include_fixed=True): + if id(var) not in seen: + seen.add(id(var)) + yield var + + def _calculate_missing_M_values( + self, active_disjuncts, arg_Ms, transBlock, transformed_constraints + ): + scratch_blocks = {} + all_vars = list(self._get_all_var_objects(active_disjuncts)) + for disjunct, other_disjunct in itertools.product( + active_disjuncts, active_disjuncts + ): + if disjunct is other_disjunct: + continue + if id(other_disjunct) in scratch_blocks: + scratch = scratch_blocks[id(other_disjunct)] + else: + scratch = scratch_blocks[id(other_disjunct)] = Block() + other_disjunct.add_component( + unique_component_name(other_disjunct, "scratch"), scratch + ) + scratch.obj = Objective(expr=0) # placeholder, but I want to + # take the name before I add a + # bunch of random reference + # objects. + + # If the writers don't assume Vars are declared on the Block + # being solved, we won't need this! + for v in all_vars: + ref = Reference(v) + scratch.add_component(unique_component_name(scratch, v.name), ref) + + for constraint in disjunct.component_data_objects( + Constraint, + active=True, + descend_into=Block, + sort=SortComponents.deterministic, + ): + if constraint in transformed_constraints: + continue + # First check args + if (constraint, other_disjunct) in arg_Ms: + (lower_M, upper_M) = _convert_M_to_tuple( + arg_Ms[constraint, other_disjunct], constraint, other_disjunct + ) + self.used_args[constraint, other_disjunct] = (lower_M, upper_M) + else: + (lower_M, upper_M) = (None, None) + if constraint.lower is not None and lower_M is None: + # last resort: calculate + if lower_M is None: + scratch.obj.expr = constraint.body - constraint.lower + scratch.obj.sense = minimize + results = self._config.solver.solve(other_disjunct) + if ( + results.solver.termination_condition + is not TerminationCondition.optimal + ): + raise GDP_Error( + "Unsuccessful solve to calculate M value to " + "relax constraint '%s' on Disjunct '%s' when " + "Disjunct '%s' is selected." + % (constraint.name, disjunct.name, other_disjunct.name) + ) + lower_M = value(scratch.obj.expr) + if constraint.upper is not None and upper_M is None: + # last resort: calculate + if upper_M is None: + scratch.obj.expr = constraint.body - constraint.upper + scratch.obj.sense = maximize + results = self._config.solver.solve(other_disjunct) + if ( + results.solver.termination_condition + is not TerminationCondition.optimal + ): + raise GDP_Error( + "Unsuccessful solve to calculate M value to " + "relax constraint '%s' on Disjunct '%s' when " + "Disjunct '%s' is selected." + % (constraint.name, disjunct.name, other_disjunct.name) + ) + upper_M = value(scratch.obj.expr) + arg_Ms[constraint, other_disjunct] = (lower_M, upper_M) + transBlock._mbm_values[constraint, other_disjunct] = (lower_M, upper_M) + + # clean up the scratch blocks + for blk in scratch_blocks.values(): + blk.parent_block().del_component(blk) + + return arg_Ms + + # These are all functions to retrieve transformed components from + # original ones and vice versa. + + def get_src_constraints(self, transformedConstraint): + """Return the original Constraints whose transformed counterpart is + transformedConstraint + + Parameters + ---------- + transformedConstraint: Constraint, which must be a component on one of + the BlockDatas in the relaxedDisjuncts Block of + a transformation block + """ + # This is silly, but we rename this function for multiple bigm because + # transformed constraints have multiple source constraints. + return super().get_src_constraint(transformedConstraint) + + def get_all_M_values(self, model): + """Returns a dictionary mapping each constraint, disjunct pair (where + the constraint is on a disjunct and the disjunct is in the same + disjunction as that disjunct) to a tuple: (lower_M_value, + upper_M_value), where either can be None if the constraint does not + have a lower or upper bound (respectively). + + Parameters + ---------- + model: A GDP model that has been transformed with multiple-BigM + """ + all_ms = {} + for disjunction in model.component_data_objects( + Disjunction, + active=None, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ): + if disjunction.algebraic_constraint is not None: + transBlock = disjunction.algebraic_constraint.parent_block() + # Don't necessarily assume all disjunctions were transformed + # with multiple bigm... + if hasattr(transBlock, "_mbm_values"): + all_ms.update(transBlock._mbm_values) + + return all_ms diff --git a/pyomo/gdp/plugins/partition_disjuncts.py b/pyomo/gdp/plugins/partition_disjuncts.py index b85c5bbd763..57cfe1852c3 100644 --- a/pyomo/gdp/plugins/partition_disjuncts.py +++ b/pyomo/gdp/plugins/partition_disjuncts.py @@ -17,26 +17,57 @@ """ from __future__ import division -from pyomo.common.config import (ConfigBlock, ConfigValue, add_docstring_list) +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + document_kwargs_from_configdict, +) from pyomo.common.modeling import unique_component_name -from pyomo.core import ( Block, Constraint, Var, SortComponents, Transformation, - TransformationFactory, TraversalStrategy, - NonNegativeIntegers, value, ConcreteModel, Objective, - ComponentMap, BooleanVar, LogicalConstraint, Connector, - Expression, Suffix, Param, Set, SetOf, RangeSet, - Reference, Binary, LogicalConstraintList, maximize) +from pyomo.core import ( + Block, + Constraint, + Var, + SortComponents, + Transformation, + TransformationFactory, + TraversalStrategy, + NonNegativeIntegers, + value, + ConcreteModel, + Objective, + ComponentMap, + BooleanVar, + LogicalConstraint, + Connector, + Expression, + Suffix, + Param, + Set, + SetOf, + RangeSet, + Reference, + Binary, + LogicalConstraintList, + maximize, +) from pyomo.core.base.external import ExternalFunction from pyomo.network import Port from pyomo.common.collections import ComponentSet from pyomo.repn import generate_standard_repn -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.opt import SolverFactory from pyomo.util.vars_from_expressions import get_vars_from_components from pyomo.gdp import Disjunct, Disjunction, GDP_Error -from pyomo.gdp.util import (is_child_of, _to_dict, verify_successful_solve, - NORMAL, clone_without_expression_components, - _warn_for_active_disjunct, get_gdp_tree) +from pyomo.gdp.util import ( + is_child_of, + _to_dict, + verify_successful_solve, + NORMAL, + clone_without_expression_components, + _warn_for_active_disjunct, + get_gdp_tree, +) from pyomo.core.util import target_list from pyomo.contrib.fbbt.fbbt import compute_bounds_on_expr from weakref import ref as weakref_ref @@ -44,25 +75,32 @@ from math import floor import logging + logger = logging.getLogger('pyomo.gdp.partition_disjuncts') + def _generate_additively_separable_repn(nonlinear_part): if nonlinear_part.__class__ is not EXPR.SumExpression: # This isn't separable, so we just have the one expression - return {'nonlinear_vars': [tuple(v for v in EXPR.identify_variables( - nonlinear_part))], 'nonlinear_exprs': [nonlinear_part]} + return { + 'nonlinear_vars': [ + tuple(v for v in EXPR.identify_variables(nonlinear_part)) + ], + 'nonlinear_exprs': [nonlinear_part], + } # else, it was a SumExpression, and we will break it into the summands, # recording which variables are there. - nonlinear_decomp = {'nonlinear_vars': [], - 'nonlinear_exprs': []} + nonlinear_decomp = {'nonlinear_vars': [], 'nonlinear_exprs': []} for summand in nonlinear_part.args: nonlinear_decomp['nonlinear_exprs'].append(summand) nonlinear_decomp['nonlinear_vars'].append( - tuple(v for v in EXPR.identify_variables(summand))) + tuple(v for v in EXPR.identify_variables(summand)) + ) return nonlinear_decomp + def arbitrary_partition(disjunction, P): """ Returns a valid partition into P sets of the variables that appear in @@ -79,8 +117,9 @@ def arbitrary_partition(disjunction, P): # collect variables v_set = ComponentSet() for disj in disjunction.disjuncts: - v_set.update(get_vars_from_components(disj, Constraint, - descend_into=Block, active=True)) + v_set.update( + get_vars_from_components(disj, Constraint, descend_into=Block, active=True) + ) # assign them to partitions partitions = [ComponentSet() for i in range(P)] for i, v in enumerate(v_set): @@ -88,6 +127,7 @@ def arbitrary_partition(disjunction, P): return partitions + def compute_optimal_bounds(expr, global_constraints, opt): """ Returns a tuple (LB, UB) where LB and UB are the results of minimizing @@ -105,21 +145,26 @@ def compute_optimal_bounds(expr, global_constraints, opt): opt will need to be capable of optimizing nonconvex problems. """ if opt is None: - raise GDP_Error("No solver was specified to optimize the " - "subproblems for computing expression bounds! " - "Please specify a configured solver in the " - "'compute_bounds_solver' argument if using " - "'compute_optimal_bounds.'") + raise GDP_Error( + "No solver was specified to optimize the " + "subproblems for computing expression bounds! " + "Please specify a configured solver in the " + "'compute_bounds_solver' argument if using " + "'compute_optimal_bounds.'" + ) # add temporary objective and calculate bounds obj = Objective(expr=expr) - global_constraints.add_component(unique_component_name(global_constraints, - "tmp_obj"), obj) + global_constraints.add_component( + unique_component_name(global_constraints, "tmp_obj"), obj + ) # Solve first minimizing, to get a lower bound results = opt.solve(global_constraints) if verify_successful_solve(results) is not NORMAL: - logger.warning("Problem to find lower bound for expression %s" - "did not solve normally.\n\n%s" % (expr, results)) + logger.warning( + "Problem to find lower bound for expression %s" + "did not solve normally.\n\n%s" % (expr, results) + ) LB = None else: # This has some risks, if you're using a solver the gives a lower bound, @@ -129,8 +174,10 @@ def compute_optimal_bounds(expr, global_constraints, opt): obj.sense = maximize results = opt.solve(global_constraints) if verify_successful_solve(results) is not NORMAL: - logger.warning("Problem to find upper bound for expression %s" - "did not solve normally.\n\n%s" % (expr, results)) + logger.warning( + "Problem to find upper bound for expression %s" + "did not solve normally.\n\n%s" % (expr, results) + ) UB = None else: UB = value(obj.expr) @@ -141,6 +188,7 @@ def compute_optimal_bounds(expr, global_constraints, opt): return (LB, UB) + def compute_fbbt_bounds(expr, global_constraints, opt): """ Calls fbbt on expr and returns the lower and upper bounds on the expression @@ -149,10 +197,14 @@ def compute_fbbt_bounds(expr, global_constraints, opt): """ return compute_bounds_on_expr(expr) -@TransformationFactory.register('gdp.partition_disjuncts', - doc="Reformulates a convex disjunctive model " - "into a new GDP by splitting additively " - "separable constraints on P sets of variables") + +@TransformationFactory.register( + 'gdp.partition_disjuncts', + doc="Reformulates a convex disjunctive model " + "into a new GDP by splitting additively " + "separable constraints on P sets of variables", +) +@document_kwargs_from_configdict('CONFIG') class PartitionDisjuncts_Transformation(Transformation): """ Transform disjunctive model to equivalent disjunctive model (with @@ -180,12 +232,15 @@ class PartitionDisjuncts_Transformation(Transformation): Relaxations between big-M and Convex Hull Reformulations," 2021. """ + CONFIG = ConfigBlock("gdp.partition_disjuncts") - CONFIG.declare('targets', ConfigValue( - default=None, - domain=target_list, - description="""target or list of targets that will be relaxed""", - doc=""" + CONFIG.declare( + 'targets', + ConfigValue( + default=None, + domain=target_list, + description="""target or list of targets that will be relaxed""", + doc=""" Specifies the target or list of targets to relax as either a component or a list of components. @@ -193,16 +248,19 @@ class PartitionDisjuncts_Transformation(Transformation): transformation is done out of place, the list of targets should be attached to the model before it is cloned, and the list will specify the targets on the cloned instance. - """ - )) - CONFIG.declare('variable_partitions', ConfigValue( - default=None, - domain=_to_dict, - description="""Set of sets of variables which define valid partitions + """, + ), + ) + CONFIG.declare( + 'variable_partitions', + ConfigValue( + default=None, + domain=_to_dict, + description="""Set of sets of variables which define valid partitions (i.e., the constraints are additively separable across these partitions). These can be specified globally (for all active Disjunctions), or by Disjunction.""", - doc=""" + doc=""" Specified variable partitions, either globally or per Disjunction. Expects either a set of disjoint ComponentSets whose union is all the @@ -217,16 +275,19 @@ class PartitionDisjuncts_Transformation(Transformation): Last, note that in the case of constraints containing partially additively separable functions, it is required that the user specify - the variable parition(s). - """ - )) - CONFIG.declare('num_partitions', ConfigValue( - default=None, - domain=_to_dict, - description="""Number of partitions of variables, if variable_partitions - is not specifed. Can be specified separately for specific Disjunctions + the variable partition(s). + """, + ), + ) + CONFIG.declare( + 'num_partitions', + ConfigValue( + default=None, + domain=_to_dict, + description="""Number of partitions of variables, if variable_partitions + is not specified. Can be specified separately for specific Disjunctions if desired.""", - doc=""" + doc=""" Either a single value so that all Disjunctions will have variables partitioned into P sets, or a map of Disjunctions to a value of P for each active Disjunction. Mapping None to a value of P will specify @@ -236,14 +297,17 @@ class PartitionDisjuncts_Transformation(Transformation): Note that if any constraints contain partially additively separable functions, the partitions for the Disjunctions with these Constraints must be specified in the variable_partitions argument. - """ - )) - CONFIG.declare('variable_partitioning_method', ConfigValue( - default=arbitrary_partition, - domain=_to_dict, - description="""Method to partition the variables. By default, the + """, + ), + ) + CONFIG.declare( + 'variable_partitioning_method', + ConfigValue( + default=arbitrary_partition, + domain=_to_dict, + description="""Method to partition the variables. By default, the partitioning will be done arbitrarily.""", - doc=""" + doc=""" A function which takes a Disjunction object and a number P and return a valid partitioning of the variables that appear in the disjunction into P partitions. @@ -255,32 +319,38 @@ class PartitionDisjuncts_Transformation(Transformation): functions, the partitions for the Disjunctions cannot be calculated automatically. Please specify the partitions for the Disjunctions with these Constraints in the variable_partitions argument. - """ - )) - CONFIG.declare('assume_fixed_vars_permanent', ConfigValue( - default=False, - domain=bool, - description="""Boolean indicating whether or not to transform so that + """, + ), + ) + CONFIG.declare( + 'assume_fixed_vars_permanent', + ConfigValue( + default=False, + domain=bool, + description="""Boolean indicating whether or not to transform so that the transformed model will still be valid when fixed Vars are unfixed.""", - doc=""" + doc=""" If True, the transformation will create a correct model even if fixed variables are later unfixed. That is, bounds will be calculated based on fixed variables' bounds, not their values. However, if fixed - variables will never be unfixed, a possibly tigher model will result, + variables will never be unfixed, a possibly tighter model will result, and fixed variables need not have bounds. Note that this has no effect on fixed BooleanVars, including the indicator variables of Disjuncts. The transformation is always correct whether or not these remain fixed. - """ - )) - CONFIG.declare('compute_bounds_method', ConfigValue( - default=compute_fbbt_bounds, - description="""Function that takes an expression, a Block containing + """, + ), + ) + CONFIG.declare( + 'compute_bounds_method', + ConfigValue( + default=compute_fbbt_bounds, + description="""Function that takes an expression, a Block containing the global constraints of the original problem, and a configured solver, and returns both a lower and upper bound for the expression.""", - doc=""" + doc=""" Callback for computing bounds on expressions, in order to bound the auxiliary variables created by the transformation. @@ -291,47 +361,53 @@ class PartitionDisjuncts_Transformation(Transformation): a model containing the variables and global constraints of the original instance, and a configured solver and returns a tuple (LB, UB) where either element can be None if no valid bound could be found. - """ - )) - CONFIG.declare('compute_bounds_solver', ConfigValue( - default=None, - description="""Solver object to pass to compute_bounds_method. + """, + ), + ) + CONFIG.declare( + 'compute_bounds_solver', + ConfigValue( + default=None, + description="""Solver object to pass to compute_bounds_method. This is required if you are using 'compute_optimal_bounds'.""", - doc=""" + doc=""" Configured solver object for use in the compute_bounds_method. In particular, if compute_bounds_method is 'compute_optimal_bounds', this will be used to solve the subproblems, so needs to handle non-convex problems if any Disjunctions contain nonlinear constraints. - """ - )) + """, + ), + ) + def __init__(self): super(PartitionDisjuncts_Transformation, self).__init__() self.handlers = { - Constraint: self._transform_constraint, - Var: False, # these will be already dealt with--we add - # references to them before we call handlers. - BooleanVar: False, - Connector: False, - Expression: False, - Suffix: False, - Param: False, - Set: False, - SetOf: False, - RangeSet: False, - Disjunct: self._warn_for_active_disjunct, - Block: False, + Constraint: self._transform_constraint, + Var: False, # these will be already dealt with--we add + # references to them before we call handlers. + BooleanVar: False, + Connector: False, + Expression: False, + Suffix: False, + Param: False, + Set: False, + SetOf: False, + RangeSet: False, + Disjunct: self._warn_for_active_disjunct, + Block: False, ExternalFunction: False, - Port: False, # not Arcs, because those are deactivated after - # the network.expand_arcs transformation + Port: False, # not Arcs, because those are deactivated after + # the network.expand_arcs transformation } def _apply_to(self, instance, **kwds): if not instance.ctype in (Block, Disjunct): - raise GDP_Error("Transformation called on %s of type %s. 'instance'" - " must be a ConcreteModel, Block, or Disjunct (in " - "the case of nested disjunctions)." % - (instance.name, instance.ctype)) + raise GDP_Error( + "Transformation called on %s of type %s. 'instance'" + " must be a ConcreteModel, Block, or Disjunct (in " + "the case of nested disjunctions)." % (instance.name, instance.ctype) + ) try: self._config = self.CONFIG(kwds.pop('options', {})) self._config.set_value(kwds) @@ -339,11 +415,13 @@ def _apply_to(self, instance, **kwds): if not self._config.assume_fixed_vars_permanent: fixed_vars = ComponentMap() - for v in get_vars_from_components(instance, Constraint, - include_fixed=True, - active=True, - descend_into=(Block, - Disjunct)): + for v in get_vars_from_components( + instance, + Constraint, + include_fixed=True, + active=True, + descend_into=(Block, Disjunct), + ): if v.fixed: fixed_vars[v] = value(v) v.fixed = False @@ -360,9 +438,11 @@ def _apply_to(self, instance, **kwds): del self._transformation_blocks def _apply_to_impl(self, instance): - self.variable_partitions = self._config.variable_partitions if \ - self._config.variable_partitions is not \ - None else {} + self.variable_partitions = ( + self._config.variable_partitions + if self._config.variable_partitions is not None + else {} + ) self.partitioning_method = self._config.variable_partitioning_method # create a model to store the global constraints on that we will pass to @@ -370,24 +450,33 @@ def _apply_to_impl(self, instance): # separate model because we don't need it again global_constraints = ConcreteModel() for cons in instance.component_objects( - Constraint, active=True, descend_into=Block, - sort=SortComponents.deterministic): - global_constraints.add_component(unique_component_name( - global_constraints, cons.getname(fully_qualified=True)), - Reference(cons)) + Constraint, + active=True, + descend_into=Block, + sort=SortComponents.deterministic, + ): + global_constraints.add_component( + unique_component_name( + global_constraints, cons.getname(fully_qualified=True) + ), + Reference(cons), + ) for var in instance.component_objects( - Var, descend_into=(Block, Disjunct), - sort=SortComponents.deterministic): - global_constraints.add_component(unique_component_name( - global_constraints, var.getname(fully_qualified=True)), - Reference(var)) + Var, descend_into=(Block, Disjunct), sort=SortComponents.deterministic + ): + global_constraints.add_component( + unique_component_name( + global_constraints, var.getname(fully_qualified=True) + ), + Reference(var), + ) self._global_constraints = global_constraints # we can support targets as usual. targets = self._config.targets knownBlocks = {} if targets is None: - targets = ( instance, ) + targets = (instance,) # Disjunctions in targets will transform their Disjuncts which will in # turn transform all the GDP components declared on themselves. So we # only need to list root nodes of the GDP tree as targets, and @@ -397,7 +486,7 @@ def _apply_to_impl(self, instance): if t.ctype is Disjunction: # After preprocessing, we know that this is not indexed. self._transform_disjunctionData(t, t.index()) - else: # We know this is a DisjunctData after preprocessing + else: # We know this is a DisjunctData after preprocessing self._transform_blockData(t) def _preprocess_targets(self, targets, instance, knownBlocks): @@ -424,12 +513,14 @@ def _get_transformation_block(self, block): self._transformation_blocks[block] = transformation_block = Block() block.add_component( unique_component_name( - block, - '_pyomo_gdp_partition_disjuncts_reformulation'), - transformation_block) + block, '_pyomo_gdp_partition_disjuncts_reformulation' + ), + transformation_block, + ) transformation_block.indicator_var_equalities = LogicalConstraint( - NonNegativeIntegers) + NonNegativeIntegers + ) return transformation_block @@ -440,29 +531,32 @@ def _transform_blockData(self, obj): # Transform every (active) disjunction in the block. Don't descend into # Disjuncts because we'll transform what's on them recursively. for disjunction in obj.component_data_objects( - Disjunction, - active=True, - sort=SortComponents.deterministic, - descend_into=Block): + Disjunction, + active=True, + sort=SortComponents.deterministic, + descend_into=Block, + ): to_transform.append(disjunction) for disjunction in to_transform: self._transform_disjunctionData(disjunction, disjunction.index()) - def _transform_disjunctionData(self, obj, idx, transBlock=None, - transformed_parent_disjunct=None): + def _transform_disjunctionData( + self, obj, idx, transBlock=None, transformed_parent_disjunct=None + ): if not obj.active: return # Just because it's unlikely this is what someone meant to do... if len(obj.disjuncts) == 0: - raise GDP_Error("Disjunction '%s' is empty. This is " - "likely indicative of a modeling error." % - obj.getname(fully_qualified=True)) + raise GDP_Error( + "Disjunction '%s' is empty. This is " + "likely indicative of a modeling error." + % obj.getname(fully_qualified=True) + ) if transBlock is None and transformed_parent_disjunct is not None: - transBlock = self._get_transformation_block( - transformed_parent_disjunct) + transBlock = self._get_transformation_block(transformed_parent_disjunct) if transBlock is None: transBlock = self._get_transformation_block(obj.parent_block()) @@ -494,11 +588,13 @@ def _transform_disjunctionData(self, obj, idx, transBlock=None, if P is None: P = self._config.num_partitions.get(None) if P is None: - raise GDP_Error("No value for P was given for disjunction " - "%s! Please specify a value of P " - "(number of " - "partitions), if you do not specify the " - "partitions directly." % obj.name) + raise GDP_Error( + "No value for P was given for disjunction " + "%s! Please specify a value of P " + "(number of " + "partitions), if you do not specify the " + "partitions directly." % obj.name + ) # it's this method's job to scream if it can't handle what's # here, we can only assume it worked for now, since it's a # callback. @@ -508,24 +604,27 @@ def _transform_disjunctionData(self, obj, idx, transBlock=None, transformed_disjuncts = [] for disjunct in obj.disjuncts: - transformed_disjunct = self._transform_disjunct(disjunct, partition, - transBlock) + transformed_disjunct = self._transform_disjunct( + disjunct, partition, transBlock + ) if transformed_disjunct is not None: transformed_disjuncts.append(transformed_disjunct) # These require transformation, but that's okay because we are # going to a GDP transBlock.indicator_var_equalities[ - len(transBlock.indicator_var_equalities)] = \ - disjunct.indicator_var.equivalent_to( - transformed_disjunct.indicator_var) + len(transBlock.indicator_var_equalities) + ] = disjunct.indicator_var.equivalent_to( + transformed_disjunct.indicator_var + ) # make a new disjunction with the transformed guys - transformed_disjunction = Disjunction(expr=[disj for disj in - transformed_disjuncts]) + transformed_disjunction = Disjunction( + expr=[disj for disj in transformed_disjuncts] + ) transBlock.add_component( - unique_component_name(transBlock, - obj.getname(fully_qualified=True)), - transformed_disjunction) + unique_component_name(transBlock, obj.getname(fully_qualified=True)), + transformed_disjunction, + ) obj._algebraic_constraint = weakref_ref(transformed_disjunction) obj.deactivate() @@ -550,55 +649,61 @@ def _transform_disjunct(self, disjunct, partition, transBlock): raise GDP_Error( "The disjunct '%s' is deactivated, but the " "indicator_var is fixed to %s. This makes no sense." - % ( disjunct.name, value(disjunct.indicator_var) )) + % (disjunct.name, value(disjunct.indicator_var)) + ) if disjunct._transformation_block is None: raise GDP_Error( "The disjunct '%s' is deactivated, but the " "indicator_var is not fixed and the disjunct does not " "appear to have been relaxed. This makes no sense. " "(If the intent is to deactivate the disjunct, fix its " - "indicator_var to False.)" - % ( disjunct.name, )) + "indicator_var to False.)" % (disjunct.name,) + ) if disjunct._transformation_block is not None: # we've transformed it, which means this is the second time it's # appearing in a Disjunction raise GDP_Error( - "The disjunct '%s' has been transformed, but a disjunction " - "it appears in has not. Putting the same disjunct in " - "multiple disjunctions is not supported." % disjunct.name) + "The disjunct '%s' has been transformed, but a disjunction " + "it appears in has not. Putting the same disjunct in " + "multiple disjunctions is not supported." % disjunct.name + ) transformed_disjunct = Disjunct() disjunct._transformation_block = weakref_ref(transformed_disjunct) transBlock.add_component( - unique_component_name(transBlock, disjunct.getname( - fully_qualified=True)), transformed_disjunct) + unique_component_name(transBlock, disjunct.getname(fully_qualified=True)), + transformed_disjunct, + ) # If the original has an indicator_var fixed to something, fix this one # too. if disjunct.indicator_var.fixed: - transformed_disjunct.indicator_var.fix( - value(disjunct.indicator_var)) + transformed_disjunct.indicator_var.fix(value(disjunct.indicator_var)) # need to transform inner Disjunctions first (before we complain about # active Disjuncts) for disjunction in disjunct.component_data_objects( - Disjunction, - active=True, - sort=SortComponents.deterministic, - descend_into=Block): - self._transform_disjunctionData(disjunction, disjunction.index(), - None, transformed_disjunct) + Disjunction, + active=True, + sort=SortComponents.deterministic, + descend_into=Block, + ): + self._transform_disjunctionData( + disjunction, disjunction.index(), None, transformed_disjunct + ) # create references to any variables declared here on the transformed # Disjunct (this will include the indicator_var) NOTE that we will not # have to do this when #1032 is implemented for the writers. But right # now, we are going to deactivate this and hide it from the active # subtree, so we need to be safe. - for var in disjunct.component_objects(Var, descend_into=Block, - active=None): - transformed_disjunct.add_component(unique_component_name( - transformed_disjunct, var.getname(fully_qualified=True)), - Reference(var)) + for var in disjunct.component_objects(Var, descend_into=Block, active=None): + transformed_disjunct.add_component( + unique_component_name( + transformed_disjunct, var.getname(fully_qualified=True) + ), + Reference(var), + ) # Since this transformation is GDP -> GDP and it is based on # partitioning algebraic expressions, we will copy over @@ -609,11 +714,13 @@ def _transform_disjunct(self, disjunct, partition, transBlock): # who their parent block is, we would like these constraints to answer # that it is the transformed Disjunct. logical_constraints = LogicalConstraintList() - transformed_disjunct.add_component(unique_component_name( - transformed_disjunct, 'logical_constraints'), logical_constraints) - for cons in disjunct.component_data_objects(LogicalConstraint, - descend_into=Block, - active=None): + transformed_disjunct.add_component( + unique_component_name(transformed_disjunct, 'logical_constraints'), + logical_constraints, + ) + for cons in disjunct.component_data_objects( + LogicalConstraint, descend_into=Block, active=None + ): # Add a copy of it on the new Disjunct logical_constraints.add(cons.expr) @@ -623,9 +730,8 @@ def _transform_disjunct(self, disjunct, partition, transBlock): # transform everything else for obj in disjunct.component_data_objects( - active=True, - sort=SortComponents.deterministic, - descend_into=Block): + active=True, sort=SortComponents.deterministic, descend_into=Block + ): handler = self.handlers.get(obj.ctype, None) if not handler: if handler is None: @@ -635,7 +741,8 @@ def _transform_disjunct(self, disjunct, partition, transBlock): "for modeling components of type %s. If your " "disjuncts contain non-GDP Pyomo components that " "require transformation, please transform them first." - % obj.ctype) + % obj.ctype + ) continue # we are really only transforming constraints and checking for # anything nutty (active Disjuncts, etc) here, so pass through what @@ -645,40 +752,47 @@ def _transform_disjunct(self, disjunct, partition, transBlock): disjunct._deactivate_without_fixing_indicator() return transformed_disjunct - def _transform_constraint(self, cons, disjunct, transformed_disjunct, - transBlock, partition): + def _transform_constraint( + self, cons, disjunct, transformed_disjunct, transBlock, partition + ): instance = disjunct.model() cons_name = cons.getname(fully_qualified=True) # create place on transformed Disjunct for the new constraint and # for the auxiliary variables transformed_constraint = Constraint(NonNegativeIntegers) - transformed_disjunct.add_component(unique_component_name( - transformed_disjunct, cons_name), transformed_constraint) + transformed_disjunct.add_component( + unique_component_name(transformed_disjunct, cons_name), + transformed_constraint, + ) aux_vars = Var(NonNegativeIntegers, dense=False) - transformed_disjunct.add_component(unique_component_name( - transformed_disjunct, cons_name + "_aux_vars"), aux_vars) + transformed_disjunct.add_component( + unique_component_name(transformed_disjunct, cons_name + "_aux_vars"), + aux_vars, + ) # create a place on the transBlock for the split constraints split_constraints = Constraint(NonNegativeIntegers) - transBlock.add_component(unique_component_name( - transBlock, cons_name + "_split_constraints"), split_constraints) + transBlock.add_component( + unique_component_name(transBlock, cons_name + "_split_constraints"), + split_constraints, + ) # this is a list which might have two constraints in it if we had # both a lower and upper value. leq_constraints = self._get_leq_constraints(cons) - for (body, rhs) in leq_constraints: + for body, rhs in leq_constraints: repn = generate_standard_repn(body, compute_values=True) nonlinear_repn = None if repn.nonlinear_expr is not None: nonlinear_repn = _generate_additively_separable_repn( - repn.nonlinear_expr) + repn.nonlinear_expr + ) split_exprs = [] split_aux_vars = [] - vars_not_accounted_for = ComponentSet(v for v in - EXPR.identify_variables( - body, - include_fixed=False)) + vars_not_accounted_for = ComponentSet( + v for v in EXPR.identify_variables(body, include_fixed=False) + ) vars_accounted_for = ComponentSet() for idx, var_list in enumerate(partition): # we are going to recreate the piece of the expression @@ -687,27 +801,27 @@ def _transform_constraint(self, cons, disjunct, transformed_disjunct, expr = split_exprs[-1] for i, v in enumerate(repn.linear_vars): if v in var_list: - expr += repn.linear_coefs[i]*v + expr += repn.linear_coefs[i] * v vars_accounted_for.add(v) for i, (v1, v2) in enumerate(repn.quadratic_vars): if v1 in var_list: if v2 not in var_list: - raise GDP_Error("Variables '%s' and '%s' are " - "multiplied in Constraint '%s', " - "but they are in different " - "partitions! Please ensure that " - "all the constraints in the " - "disjunction are " - "additively separable with " - "respect to the specified " - "partition." % (v1.name, v2.name, - cons.name)) - expr += repn.quadratic_coefs[i]*v1*v2 + raise GDP_Error( + "Variables '%s' and '%s' are " + "multiplied in Constraint '%s', " + "but they are in different " + "partitions! Please ensure that " + "all the constraints in the " + "disjunction are " + "additively separable with " + "respect to the specified " + "partition." % (v1.name, v2.name, cons.name) + ) + expr += repn.quadratic_coefs[i] * v1 * v2 vars_accounted_for.add(v1) vars_accounted_for.add(v2) if nonlinear_repn is not None: - for i, expr_var_set in enumerate( - nonlinear_repn['nonlinear_vars']): + for i, expr_var_set in enumerate(nonlinear_repn['nonlinear_vars']): # check if v_list is a subset of var_list. If it is # not and there is no intersection, we move on. If # it is not and there is an intersection, we raise @@ -720,40 +834,41 @@ def _transform_constraint(self, cons, disjunct, transformed_disjunct, vars_accounted_for.add(var) # intersection? elif len(ComponentSet(expr_var_set) & var_list) != 0: - raise GDP_Error("Variables which appear in the " - "expression %s are in different " - "partitions, but this " - "expression doesn't appear " - "additively separable. Please " - "expand it if it is additively " - "separable or, more likely, " - "ensure that all the " - "constraints in the disjunction " - "are additively separable with " - "respect to the specified " - "partition. If you did not " - "specify a partition, only " - "a value of P, note that to " - "automatically partition the " - "variables, we assume all the " - "expressions are additively " - "separable." % - nonlinear_repn[ - 'nonlinear_exprs'][i]) + raise GDP_Error( + "Variables which appear in the " + "expression %s are in different " + "partitions, but this " + "expression doesn't appear " + "additively separable. Please " + "expand it if it is additively " + "separable or, more likely, " + "ensure that all the " + "constraints in the disjunction " + "are additively separable with " + "respect to the specified " + "partition. If you did not " + "specify a partition, only " + "a value of P, note that to " + "automatically partition the " + "variables, we assume all the " + "expressions are additively " + "separable." % nonlinear_repn['nonlinear_exprs'][i] + ) expr_lb, expr_ub = self._config.compute_bounds_method( - expr, self._global_constraints, - self._config.compute_bounds_solver) + expr, self._global_constraints, self._config.compute_bounds_solver + ) if expr_lb is None or expr_ub is None: - raise GDP_Error("Expression %s from constraint '%s' " - "is unbounded! Please ensure all " - "variables that appear " - "in the constraint are bounded or " - "specify compute_bounds_method=" - "compute_optimal_bounds" - " if the expression is bounded by the " - "global constraints." % - (expr, cons.name)) + raise GDP_Error( + "Expression %s from constraint '%s' " + "is unbounded! Please ensure all " + "variables that appear " + "in the constraint are bounded or " + "specify compute_bounds_method=" + "compute_optimal_bounds" + " if the expression is bounded by the " + "global constraints." % (expr, cons.name) + ) # if the expression was empty wrt the partition, we don't # need to bother with any of this. The aux_var doesn't need # to exist because it would be 0. @@ -762,35 +877,34 @@ def _transform_constraint(self, cons, disjunct, transformed_disjunct, aux_var.setlb(expr_lb) aux_var.setub(expr_ub) split_aux_vars.append(aux_var) - split_constraints[ - len(split_constraints)] = expr <= aux_var + split_constraints[len(split_constraints)] = expr <= aux_var if len(vars_accounted_for) < len(vars_not_accounted_for): - orphans = vars_not_accounted_for - vars_accounted_for - orphan_string = "" - for v in orphans: - orphan_string += "'%s', " % v.name - orphan_string = orphan_string[:-2] - raise GDP_Error("Partition specified for disjunction " - "containing Disjunct '%s' does not " - "include all the variables that appear " - "in the disjunction. The following " - "variables are not assigned to any part " - "of the partition: %s" % (disjunct.name, - orphan_string)) - transformed_constraint[ - len(transformed_constraint)] = sum(v for v in - split_aux_vars) <= \ - rhs - repn.constant + orphans = vars_not_accounted_for - vars_accounted_for + orphan_string = "" + for v in orphans: + orphan_string += "'%s', " % v.name + orphan_string = orphan_string[:-2] + raise GDP_Error( + "Partition specified for disjunction " + "containing Disjunct '%s' does not " + "include all the variables that appear " + "in the disjunction. The following " + "variables are not assigned to any part " + "of the partition: %s" % (disjunct.name, orphan_string) + ) + transformed_constraint[len(transformed_constraint)] = ( + sum(v for v in split_aux_vars) <= rhs - repn.constant + ) # deactivate the constraint since we've transformed it cons.deactivate() - def _warn_for_active_disjunct(self, disjunct, parent_disjunct, - transformed_parent_disjunct, transBlock, - partition): + def _warn_for_active_disjunct( + self, + disjunct, + parent_disjunct, + transformed_parent_disjunct, + transBlock, + partition, + ): _warn_for_active_disjunct(disjunct, parent_disjunct) - -# Add the CONFIG arguments to the transformation's docstring -PartitionDisjuncts_Transformation.__doc__ = add_docstring_list( - PartitionDisjuncts_Transformation.__doc__, - PartitionDisjuncts_Transformation.CONFIG, indent_by=8) diff --git a/pyomo/gdp/tests/common_tests.py b/pyomo/gdp/tests/common_tests.py index 8c7653e238a..d021c436f22 100644 --- a/pyomo/gdp/tests/common_tests.py +++ b/pyomo/gdp/tests/common_tests.py @@ -9,27 +9,41 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import pickle +from pyomo.common.dependencies import dill from pyomo.environ import ( - TransformationFactory, ConcreteModel, Constraint, Var, Objective, - Block, Any, RangeSet, Expression, value, BooleanVar, SolverFactory, - TerminationCondition + TransformationFactory, + ConcreteModel, + Constraint, + Var, + Objective, + Block, + Any, + RangeSet, + Expression, + value, + BooleanVar, + SolverFactory, + TerminationCondition, ) from pyomo.gdp import Disjunct, Disjunction, GDP_Error +from pyomo.core.expr.compare import assertExpressionsEqual from pyomo.core.base import constraint, ComponentUID from pyomo.core.base.block import _BlockData from pyomo.repn import generate_standard_repn +import pyomo.core.expr as EXPR import pyomo.gdp.tests.models as models from io import StringIO import random import pyomo.opt -linear_solvers = pyomo.opt.check_available_solvers( - 'glpk','cbc','gurobi','cplex') +linear_solvers = pyomo.opt.check_available_solvers('glpk', 'cbc', 'gurobi', 'cplex') # utility functions + def check_linear_coef(self, repn, var, coef): # Map logical variables to their Boolean counterparts if isinstance(var, BooleanVar): @@ -37,26 +51,27 @@ def check_linear_coef(self, repn, var, coef): # utility used to check a variable-coefficient pair in a standard_repn var_id = None - for i,v in enumerate(repn.linear_vars): + for i, v in enumerate(repn.linear_vars): if v is var: var_id = i self.assertIsNotNone(var_id) - self.assertEqual(repn.linear_coefs[var_id], coef) + self.assertAlmostEqual(repn.linear_coefs[var_id], coef) + def check_squared_term_coef(self, repn, var, coef): var_id = None - for i,(v1, v2) in enumerate(repn.quadratic_vars): + for i, (v1, v2) in enumerate(repn.quadratic_vars): if v1 is var and v2 is var: var_id = i break self.assertIsNotNone(var_id) self.assertEqual(repn.quadratic_coefs[var_id], coef) + def diff_apply_to_and_create_using(self, model, transformation, **kwargs): # compares the pprint from the transformed model after using both apply_to # and create_using to make sure the two do the same thing - modelcopy = TransformationFactory(transformation).create_using(model, - **kwargs) + modelcopy = TransformationFactory(transformation).create_using(model, **kwargs) modelcopy_buf = StringIO() modelcopy.pprint(ostream=modelcopy_buf) modelcopy_output = modelcopy_buf.getvalue() @@ -69,6 +84,22 @@ def diff_apply_to_and_create_using(self, model, transformation, **kwargs): model_output = model_buf.getvalue() self.assertMultiLineEqual(modelcopy_output, model_output) + +def check_obj_in_active_tree(self, obj, root=None): + # Utility for checking that transformed components are indeed on the new + # model, but without relying on private names to locate them. Basically, + # I've been known to change where transformed components go, and I don't + # want to test that. But I would like to check that they went somewhere that + # the writers can find them. + self.assertTrue(obj.active) + parent = obj.parent_component() + self.assertTrue(parent.active) + blk = parent.parent_block() + while blk is not root: + self.assertTrue(blk.active) + blk = blk.parent_block() + + def check_relaxation_block(self, m, name, numdisjuncts): # utility for checking the transformation block (this method is generic to # bigm and hull though there is more on the hull transformation block, and @@ -78,12 +109,13 @@ def check_relaxation_block(self, m, name, numdisjuncts): self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) self.assertEqual(len(transBlock.relaxedDisjuncts), numdisjuncts) + def checkb0TargetsInactive(self, m): self.assertTrue(m.disjunct1.active) - self.assertTrue(m.disjunct1[1,0].active) - self.assertTrue(m.disjunct1[1,1].active) - self.assertTrue(m.disjunct1[2,0].active) - self.assertTrue(m.disjunct1[2,1].active) + self.assertTrue(m.disjunct1[1, 0].active) + self.assertTrue(m.disjunct1[1, 1].active) + self.assertTrue(m.disjunct1[2, 0].active) + self.assertTrue(m.disjunct1[2, 1].active) self.assertFalse(m.b[0].disjunct.active) self.assertFalse(m.b[0].disjunct[0].active) @@ -91,33 +123,39 @@ def checkb0TargetsInactive(self, m): self.assertTrue(m.b[1].disjunct0.active) self.assertTrue(m.b[1].disjunct1.active) + def checkb0TargetsTransformed(self, m, transformation): trans = TransformationFactory('gdp.%s' % transformation) - disjBlock = m.b[0].component( - "_pyomo_gdp_%s_reformulation" % transformation).relaxedDisjuncts + disjBlock = ( + m.b[0] + .component("_pyomo_gdp_%s_reformulation" % transformation) + .relaxedDisjuncts + ) self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("b[0].disjunct[0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("b[0].disjunct[1].c"), - Constraint) + self.assertIs( + trans.get_transformed_constraints(m.b[0].disjunct[0].c)[0].parent_block(), + disjBlock[0], + ) + self.assertIs( + trans.get_transformed_constraints(m.b[0].disjunct[1].c)[0].parent_block(), + disjBlock[1], + ) # This relies on the disjunctions being transformed in the same order # every time. This dictionary maps the block index to the list of # pairs of (originalDisjunctIndex, transBlockIndex) - pairs = [ - (0,0), - (1,1), - ] + pairs = [(0, 0), (1, 1)] for i, j in pairs: - self.assertIs(m.b[0].disjunct[i].transformation_block(), - disjBlock[j]) - self.assertIs(trans.get_src_disjunct(disjBlock[j]), - m.b[0].disjunct[i]) + self.assertIs(m.b[0].disjunct[i].transformation_block, disjBlock[j]) + self.assertIs(trans.get_src_disjunct(disjBlock[j]), m.b[0].disjunct[i]) + # active status checks -def check_user_deactivated_disjuncts(self, transformation, - check_trans_block=True, **kwargs): + +def check_user_deactivated_disjuncts( + self, transformation, check_trans_block=True, **kwargs +): # check that we do not transform a deactivated DisjunctData m = models.makeTwoTermDisj() m.d[0].deactivate() @@ -131,37 +169,40 @@ def check_user_deactivated_disjuncts(self, transformation, rBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) disjBlock = rBlock.relaxedDisjuncts self.assertEqual(len(disjBlock), 1) - self.assertIs(disjBlock[0], m.d[1].transformation_block()) + self.assertIs(disjBlock[0], m.d[1].transformation_block) self.assertIs(transform.get_src_disjunct(disjBlock[0]), m.d[1]) + def check_improperly_deactivated_disjuncts(self, transformation, **kwargs): # check that if a Disjunct is deactivated but its indicator variable is not # fixed to 0, we express our confusion. m = models.makeTwoTermDisj() m.d[0].deactivate() - self.assertEqual(value(m.d[0].indicator_var), 0) + self.assertEqual(value(m.d[0].indicator_var), False) self.assertTrue(m.d[0].indicator_var.is_fixed()) - m.d[0].indicator_var.fix(1) + m.d[0].indicator_var.fix(True) self.assertRaisesRegex( GDP_Error, r"The disjunct 'd\[0\]' is deactivated, but the " r"indicator_var is fixed to True. This makes no sense.", TransformationFactory('gdp.%s' % transformation).apply_to, m, - **kwargs) + **kwargs + ) + def check_indexed_disjunction_not_transformed(self, m, transformation): # no transformation block, nothing transformed - self.assertIsNone(m.component("_pyomo_gdp_%s_transformation" - % transformation)) + self.assertIsNone(m.component("_pyomo_gdp_%s_transformation" % transformation)) for idx in m.disjunct: self.assertIsNone(m.disjunct[idx].transformation_block) for idx in m.disjunction: self.assertIsNone(m.disjunction[idx].algebraic_constraint) -def check_do_not_transform_userDeactivated_indexedDisjunction(self, - transformation, - **kwargs): + +def check_do_not_transform_userDeactivated_indexedDisjunction( + self, transformation, **kwargs +): # check that we do not transform a deactivated disjunction m = models.makeTwoTermIndexedDisjunction() # If you truly want to transform nothing, deactivate everything @@ -169,33 +210,36 @@ def check_do_not_transform_userDeactivated_indexedDisjunction(self, for idx in m.disjunct: m.disjunct[idx].deactivate() directly = TransformationFactory('gdp.%s' % transformation).create_using( - m, **kwargs) + m, **kwargs + ) check_indexed_disjunction_not_transformed(self, directly, transformation) targets = TransformationFactory('gdp.%s' % transformation).create_using( - m, targets=(m.disjunction), **kwargs) + m, targets=(m.disjunction), **kwargs + ) check_indexed_disjunction_not_transformed(self, targets, transformation) + def check_disjunction_deactivated(self, transformation, **kwargs): # check that we deactivate disjunctions after we transform them m = models.makeTwoTermDisj() - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), **kwargs) oldblock = m.component("disjunction") self.assertIsInstance(oldblock, Disjunction) self.assertFalse(oldblock.active) + def check_disjunctDatas_deactivated(self, transformation, **kwargs): # check that we deactivate disjuncts after we transform them m = models.makeTwoTermDisj() - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), **kwargs) oldblock = m.component("disjunction") self.assertFalse(oldblock.disjuncts[0].active) self.assertFalse(oldblock.disjuncts[1].active) + def check_deactivated_constraints(self, transformation, **kwargs): # test that we deactivate constraints after we transform them m = models.makeTwoTermDisj() @@ -214,23 +258,23 @@ def check_deactivated_constraints(self, transformation, **kwargs): self.assertIsInstance(oldc, Constraint) self.assertFalse(oldc.active) + def check_deactivated_disjuncts(self, transformation, **kwargs): # another test that we deactivated transformed Disjuncts, but this one # includes a SimpleDisjunct as well m = models.makeTwoTermMultiIndexedDisjunction() - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), **kwargs) # all the disjuncts got transformed, so all should be deactivated for i in m.disjunct.index_set(): self.assertFalse(m.disjunct[i].active) self.assertFalse(m.disjunct.active) + def check_deactivated_disjunctions(self, transformation, **kwargs): # another test that we deactivated transformed Disjunctions, but including a # SimpleDisjunction m = models.makeTwoTermMultiIndexedDisjunction() - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), **kwargs) # all the disjunctions got transformed, so they should be # deactivated too @@ -238,8 +282,8 @@ def check_deactivated_disjunctions(self, transformation, **kwargs): self.assertFalse(m.disjunction[i].active) self.assertFalse(m.disjunction.active) -def check_do_not_transform_twice_if_disjunction_reactivated(self, - transformation): + +def check_do_not_transform_twice_if_disjunction_reactivated(self, transformation): # test that if an already-transformed disjunction is reactivated, we will # not retransform it in a subsequent call to the transformation. m = models.makeTwoTermDisj() @@ -265,14 +309,15 @@ def check_do_not_transform_twice_if_disjunction_reactivated(self, # get an error. self.assertRaisesRegex( GDP_Error, - r"The disjunct 'd\[0\]' has been transformed, but a disjunction " - r"it appears in has not. Putting the same disjunct in " + r"The disjunct 'd\[0\]' has been transformed, but 'disjunction', " + r"a disjunction it appears in, has not. Putting the same disjunct in " r"multiple disjunctions is not supported.", TransformationFactory('gdp.%s' % transformation).apply_to, - m) + m, + ) + -def check_constraints_deactivated_indexedDisjunction(self, transformation, - **kwargs): +def check_constraints_deactivated_indexedDisjunction(self, transformation, **kwargs): # check that we deactivate transformed constraints m = models.makeTwoTermMultiIndexedDisjunction() TransformationFactory('gdp.%s' % transformation).apply_to(m, **kwargs) @@ -280,10 +325,12 @@ def check_constraints_deactivated_indexedDisjunction(self, transformation, for i in m.disjunct.index_set(): self.assertFalse(m.disjunct[i].c.active) + def check_partial_deactivate_indexed_disjunction(self, transformation): """Test for partial deactivation of an indexed disjunction.""" m = ConcreteModel() m.x = Var(bounds=(0, 10)) + @m.Disjunction([0, 1]) def disj(m, i): if i == 0: @@ -297,12 +344,16 @@ def disj(m, i): TransformationFactory('gdp.%s' % transformation).apply_to(m) transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) self.assertEqual( - len(transBlock.disj_xor), 1, - "There should only be one XOR constraint generated. Found %s." % - len(transBlock.disj_xor)) + len(transBlock.disj_xor), + 1, + "There should only be one XOR constraint generated. Found %s." + % len(transBlock.disj_xor), + ) + # transformation block + def check_transformation_block_name_collision(self, transformation): # make sure that if the model already has a block called # _pyomo_gdp_*_relaxation that we come up with a different name for the @@ -320,16 +371,18 @@ def check_transformation_block_name_collision(self, transformation): disjBlock = transBlock.relaxedDisjuncts self.assertIsInstance(disjBlock, Block) self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("d[0].c"), Constraint) - self.assertIsInstance(disjBlock[1].component("d[1].c1"), Constraint) - self.assertIsInstance(disjBlock[1].component("d[1].c2"), Constraint) + self.assertIs(m.d[0].transformation_block, disjBlock[0]) + self.assertIs(m.d[1].transformation_block, disjBlock[1]) # we didn't add to the block that wasn't ours - self.assertEqual(len(m.component("_pyomo_gdp_%s_reformulation" % - transformation)), 0) + self.assertEqual( + len(m.component("_pyomo_gdp_%s_reformulation" % transformation)), 0 + ) + # XOR constraints + def check_indicator_vars(self, transformation): # particularly paranoid test checking that the indicator_vars are intact # after transformation @@ -347,19 +400,24 @@ def check_indicator_vars(self, transformation): self.assertTrue(_binary1.active) self.assertTrue(_binary1.is_binary()) + def check_two_term_disjunction_xor(self, xor, disj1, disj2): self.assertIsInstance(xor, Constraint) self.assertEqual(len(xor), 1) - self.assertIs(disj1.binary_indicator_var, xor.body.arg(0)) - self.assertIs(disj2.binary_indicator_var, xor.body.arg(1)) - repn = generate_standard_repn(xor.body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - check_linear_coef(self, repn, disj1.indicator_var, 1) - check_linear_coef(self, repn, disj2.indicator_var, 1) + assertExpressionsEqual( + self, + xor.body, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, disj1.binary_indicator_var)), + EXPR.MonomialTermExpression((1, disj2.binary_indicator_var)), + ] + ), + ) self.assertEqual(xor.lower, 1) self.assertEqual(xor.upper, 1) + def check_xor_constraint(self, transformation): # verify xor constraint for a SimpleDisjunction m = models.makeTwoTermDisj() @@ -370,41 +428,41 @@ def check_xor_constraint(self, transformation): xor = rBlock.component("disjunction_xor") check_two_term_disjunction_xor(self, xor, m.d[0], m.d[1]) + def check_indexed_xor_constraints(self, transformation): # verify xor constraint for an IndexedDisjunction m = models.makeTwoTermMultiIndexedDisjunction() TransformationFactory('gdp.%s' % transformation).apply_to(m) - xor = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ - component("disjunction_xor") + xor = m.component("_pyomo_gdp_%s_reformulation" % transformation).component( + "disjunction_xor" + ) self.assertIsInstance(xor, Constraint) for i in m.disjunction.index_set(): repn = generate_standard_repn(xor[i].body) self.assertEqual(repn.constant, 0) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) - check_linear_coef( - self, repn, m.disjunction[i].disjuncts[0].indicator_var, 1) - check_linear_coef( - self, repn, m.disjunction[i].disjuncts[1].indicator_var, 1) + check_linear_coef(self, repn, m.disjunction[i].disjuncts[0].indicator_var, 1) + check_linear_coef(self, repn, m.disjunction[i].disjuncts[1].indicator_var, 1) self.assertEqual(xor[i].lower, 1) self.assertEqual(xor[i].upper, 1) + def check_indexed_xor_constraints_with_targets(self, transformation): - # check that when we use targets to specfy some DisjunctionDatas in an + # check that when we use targets to specify some DisjunctionDatas in an # IndexedDisjunction, the xor constraint is indexed correctly m = models.makeTwoTermIndexedDisjunction_BoundedVars() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.disjunction[1], - m.disjunction[3]]) + m, targets=[m.disjunction[1], m.disjunction[3]] + ) - xorC = m.disjunction[1].algebraic_constraint().parent_component() + xorC = m.disjunction[1].algebraic_constraint.parent_component() self.assertIsInstance(xorC, Constraint) self.assertEqual(len(xorC), 2) # check the constraints - for i in [1,3]: + for i in [1, 3]: self.assertEqual(xorC[i].lower, 1) self.assertEqual(xorC[i].upper, 1) repn = generate_standard_repn(xorC[i].body) @@ -413,14 +471,16 @@ def check_indexed_xor_constraints_with_targets(self, transformation): check_linear_coef(self, repn, m.disjunct[i, 0].indicator_var, 1) check_linear_coef(self, repn, m.disjunct[i, 1].indicator_var, 1) + def check_three_term_xor_constraint(self, transformation): # check that the xor constraint has all the indicator variables from a # three-term disjunction m = models.makeThreeTermIndexedDisj() TransformationFactory('gdp.%s' % transformation).apply_to(m) - xor = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ - component("disjunction_xor") + xor = m.component("_pyomo_gdp_%s_reformulation" % transformation).component( + "disjunction_xor" + ) self.assertIsInstance(xor, Constraint) self.assertEqual(xor[1].lower, 1) self.assertEqual(xor[1].upper, 1) @@ -432,18 +492,19 @@ def check_three_term_xor_constraint(self, transformation): self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 3) for i in range(3): - check_linear_coef(self, repn, m.disjunct[i,1].indicator_var, 1) + check_linear_coef(self, repn, m.disjunct[i, 1].indicator_var, 1) repn = generate_standard_repn(xor[2].body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 3) for i in range(3): - check_linear_coef(self, repn, m.disjunct[i,2].indicator_var, 1) + check_linear_coef(self, repn, m.disjunct[i, 2].indicator_var, 1) # mappings + def check_xor_constraint_mapping(self, transformation): # test that we correctly map between disjunctions and XOR constraints m = models.makeTwoTermDisj() @@ -451,10 +512,8 @@ def check_xor_constraint_mapping(self, transformation): trans.apply_to(m) transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) - self.assertIs( trans.get_src_disjunction(transBlock.disjunction_xor), - m.disjunction) - self.assertIs( m.disjunction.algebraic_constraint(), - transBlock.disjunction_xor) + self.assertIs(trans.get_src_disjunction(transBlock.disjunction_xor), m.disjunction) + self.assertIs(m.disjunction.algebraic_constraint, transBlock.disjunction_xor) def check_xor_constraint_mapping_two_disjunctions(self, transformation): @@ -465,16 +524,11 @@ def check_xor_constraint_mapping_two_disjunctions(self, transformation): trans.apply_to(m) transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) - transBlock2 = m.component("_pyomo_gdp_%s_reformulation_4" % transformation) - self.assertIs( trans.get_src_disjunction(transBlock.disjunction_xor), - m.disjunction) - self.assertIs( trans.get_src_disjunction(transBlock2.disjunction2_xor), - m.disjunction2) + self.assertIs(trans.get_src_disjunction(transBlock.disjunction_xor), m.disjunction) + + self.assertIs(m.disjunction.algebraic_constraint, transBlock.disjunction_xor) + self.assertIs(m.disjunction2.algebraic_constraint, transBlock.disjunction2_xor) - self.assertIs( m.disjunction.algebraic_constraint(), - transBlock.disjunction_xor) - self.assertIs( m.disjunction2.algebraic_constraint(), - transBlock2.disjunction2_xor) def check_disjunct_mapping(self, transformation): # check that we correctly map between Disjuncts and their transformation @@ -483,24 +537,26 @@ def check_disjunct_mapping(self, transformation): trans = TransformationFactory('gdp.%s' % transformation) trans.apply_to(m) - disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ - relaxedDisjuncts + disjBlock = m.component( + "_pyomo_gdp_%s_reformulation" % transformation + ).relaxedDisjuncts # the disjuncts will always be transformed in the same order, # and d[0] goes first, so we can check in a loop. - for i in [0,1]: - self.assertIs(disjBlock[i]._srcDisjunct(), m.d[i]) + for i in [0, 1]: + self.assertIs(disjBlock[i]._src_disjunct(), m.d[i]) self.assertIs(trans.get_src_disjunct(disjBlock[i]), m.d[i]) + # targets + def check_only_targets_inactive(self, transformation, **kwargs): # test that we only transform targets (by checking active status) m = models.makeTwoSimpleDisjunctions() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.disjunction1], - **kwargs) + m, targets=[m.disjunction1], **kwargs + ) self.assertFalse(m.disjunction1.active) # disjunction2 still active @@ -513,37 +569,28 @@ def check_only_targets_inactive(self, transformation, **kwargs): self.assertTrue(m.disjunct2[1].active) self.assertTrue(m.disjunct2.active) + def check_only_targets_get_transformed(self, transformation): # test that we only transform targets (by checking the actual components) m = models.makeTwoSimpleDisjunctions() trans = TransformationFactory('gdp.%s' % transformation) - trans.apply_to( - m, - targets=[m.disjunction1]) + trans.apply_to(m, targets=[m.disjunction1]) - disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ - relaxedDisjuncts + disjBlock = m.component( + "_pyomo_gdp_%s_reformulation" % transformation + ).relaxedDisjuncts # only two disjuncts relaxed self.assertEqual(len(disjBlock), 2) - # Note that in hull, these aren't the only components that get created, but - # they are a proxy for which disjuncts got relaxed, which is what we want to - # check. - self.assertIsInstance(disjBlock[0].component("disjunct1[0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("disjunct1[1].c"), - Constraint) - - pairs = [ - (0, 0), - (1, 1) - ] + + pairs = [(0, 0), (1, 1)] for i, j in pairs: - self.assertIs(disjBlock[i], m.disjunct1[j].transformation_block()) + self.assertIs(disjBlock[i], m.disjunct1[j].transformation_block) self.assertIs(trans.get_src_disjunct(disjBlock[i]), m.disjunct1[j]) self.assertIsNone(m.disjunct2[0].transformation_block) self.assertIsNone(m.disjunct2[1].transformation_block) + def check_target_not_a_component_error(self, transformation, **kwargs): # test error message for crazy targets decoy = ConcreteModel() @@ -554,7 +601,10 @@ def check_target_not_a_component_error(self, transformation, **kwargs): "Target 'block' is not a component on instance 'unknown'!", TransformationFactory('gdp.%s' % transformation).apply_to, m, - targets=[decoy.block], **kwargs) + targets=[decoy.block], + **kwargs + ) + def check_targets_cannot_be_cuids(self, transformation): # check that we scream if targets are cuids @@ -568,23 +618,25 @@ def check_targets_cannot_be_cuids(self, transformation): r"\n\tReceived %s" % type(ComponentUID(m.disjunction)), TransformationFactory('gdp.%s' % transformation).apply_to, m, - targets=[ComponentUID(m.disjunction)]) + targets=[ComponentUID(m.disjunction)], + ) + def check_indexedDisj_targets_inactive(self, transformation, **kwargs): # check that targets are deactivated (when target is IndexedDisjunction) m = models.makeDisjunctionsOnIndexedBlock() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.disjunction1], **kwargs) + m, targets=[m.disjunction1], **kwargs + ) self.assertFalse(m.disjunction1.active) self.assertFalse(m.disjunction1[1].active) self.assertFalse(m.disjunction1[2].active) - self.assertFalse(m.disjunct1[1,0].active) - self.assertFalse(m.disjunct1[1,1].active) - self.assertFalse(m.disjunct1[2,0].active) - self.assertFalse(m.disjunct1[2,1].active) + self.assertFalse(m.disjunct1[1, 0].active) + self.assertFalse(m.disjunct1[1, 1].active) + self.assertFalse(m.disjunct1[2, 0].active) + self.assertFalse(m.disjunct1[2, 1].active) self.assertFalse(m.disjunct1.active) self.assertTrue(m.b[0].disjunct[0].active) @@ -592,58 +644,96 @@ def check_indexedDisj_targets_inactive(self, transformation, **kwargs): self.assertTrue(m.b[1].disjunct0.active) self.assertTrue(m.b[1].disjunct1.active) + def check_indexedDisj_only_targets_transformed(self, transformation): # check that only the targets are transformed (with IndexedDisjunction as # target) m = models.makeDisjunctionsOnIndexedBlock() trans = TransformationFactory('gdp.%s' % transformation) - trans.apply_to( - m, - targets=[m.disjunction1]) + trans.apply_to(m, targets=[m.disjunction1]) - disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ - relaxedDisjuncts + disjBlock = m.component( + "_pyomo_gdp_%s_reformulation" % transformation + ).relaxedDisjuncts self.assertEqual(len(disjBlock), 4) - self.assertIsInstance(disjBlock[0].component("disjunct1[1,0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("disjunct1[1,1].c"), - Constraint) - self.assertIsInstance(disjBlock[2].component("disjunct1[2,0].c"), - Constraint) - self.assertIsInstance(disjBlock[3].component("disjunct1[2,1].c"), - Constraint) + if transformation == 'bigm': + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[1, 0].c)[0].parent_block(), + disjBlock[0], + ) + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[1, 1].c)[0].parent_block(), + disjBlock[1], + ) + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[2, 0].c)[0].parent_block(), + disjBlock[2], + ) + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[2, 1].c)[0].parent_block(), + disjBlock[3], + ) + elif transformation == 'hull': + # In the disaggregated var bounds + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[1, 0].c)[0] + .parent_block() + .parent_block(), + disjBlock[2], + ) + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[1, 1].c)[0].parent_block(), + disjBlock[3], + ) + # In the disaggregated var bounds + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[2, 0].c)[0] + .parent_block() + .parent_block(), + disjBlock[0], + ) + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[2, 1].c)[0].parent_block(), + disjBlock[1], + ) # This relies on the disjunctions being transformed in the same order # every time. These are the mappings between the indices of the original # disjuncts and the indices on the indexed block on the transformation # block. - pairs = [ - ((1,0), 0), - ((1,1), 1), - ((2,0), 2), - ((2,1), 3), - ] + if transformation == 'bigm': + pairs = [((1, 0), 0), ((1, 1), 1), ((2, 0), 2), ((2, 1), 3)] + elif transformation == 'hull': + pairs = [((2, 0), 0), ((2, 1), 1), ((1, 0), 2), ((1, 1), 3)] + for i, j in pairs: self.assertIs(trans.get_src_disjunct(disjBlock[j]), m.disjunct1[i]) - self.assertIs(disjBlock[j], m.disjunct1[i].transformation_block()) + self.assertIs(disjBlock[j], m.disjunct1[i].transformation_block) + def check_warn_for_untransformed(self, transformation, **kwargs): # Check that we complain if we find an untransformed Disjunct inside of # another Disjunct we are transforming m = models.makeDisjunctionsOnIndexedBlock() + def innerdisj_rule(d, flag): m = d.model() if flag: d.c = Constraint(expr=m.a[1] <= 2) else: d.c = Constraint(expr=m.a[1] >= 65) - m.disjunct1[1,1].innerdisjunct = Disjunct([0,1], rule=innerdisj_rule) - m.disjunct1[1,1].innerdisjunction = Disjunction([0], - rule=lambda a,i: [m.disjunct1[1,1].innerdisjunct[0], - m.disjunct1[1,1].innerdisjunct[1]]) + + m.disjunct1[1, 1].innerdisjunct = Disjunct([0, 1], rule=innerdisj_rule) + m.disjunct1[1, 1].innerdisjunction = Disjunction( + [0], + rule=lambda a, i: [ + m.disjunct1[1, 1].innerdisjunct[0], + m.disjunct1[1, 1].innerdisjunct[1], + ], + ) # if the disjunction doesn't drive the transformation of the Disjuncts, we # get the error - m.disjunct1[1,1].innerdisjunction.deactivate() + m.disjunct1[1, 1].innerdisjunction.deactivate() # This test relies on the order that the component objects of # the disjunct get considered. In this case, the disjunct # causes the error, but in another world, it could be the @@ -655,28 +745,29 @@ def innerdisj_rule(d, flag): TransformationFactory('gdp.%s' % transformation).create_using, m, targets=[m.disjunction1[1]], - **kwargs) - m.disjunct1[1,1].innerdisjunction.activate() + **kwargs + ) + m.disjunct1[1, 1].innerdisjunction.activate() + def check_disjData_targets_inactive(self, transformation, **kwargs): # check targets deactivated with DisjunctionData is the target m = models.makeDisjunctionsOnIndexedBlock() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.disjunction1[2]], - **kwargs) + m, targets=[m.disjunction1[2]], **kwargs + ) self.assertFalse(m.disjunction1[2].active) self.assertTrue(m.disjunct1.active) - self.assertTrue(m.disjunct1[1,0].active) - self.assertIsNone(m.disjunct1[1,0]._transformation_block) - self.assertTrue(m.disjunct1[1,1].active) - self.assertIsNone(m.disjunct1[1,1]._transformation_block) - self.assertFalse(m.disjunct1[2,0].active) - self.assertIsNotNone(m.disjunct1[2,0]._transformation_block) - self.assertFalse(m.disjunct1[2,1].active) - self.assertIsNotNone(m.disjunct1[2,1]._transformation_block) + self.assertTrue(m.disjunct1[1, 0].active) + self.assertIsNone(m.disjunct1[1, 0]._transformation_block) + self.assertTrue(m.disjunct1[1, 1].active) + self.assertIsNone(m.disjunct1[1, 1]._transformation_block) + self.assertFalse(m.disjunct1[2, 0].active) + self.assertIsNotNone(m.disjunct1[2, 0]._transformation_block) + self.assertFalse(m.disjunct1[2, 1].active) + self.assertIsNotNone(m.disjunct1[2, 1]._transformation_block) self.assertTrue(m.b[0].disjunct.active) self.assertTrue(m.b[0].disjunct[0].active) @@ -688,51 +779,60 @@ def check_disjData_targets_inactive(self, transformation, **kwargs): self.assertTrue(m.b[1].disjunct1.active) self.assertIsNone(m.b[1].disjunct1._transformation_block) + def check_disjData_only_targets_transformed(self, transformation): # check that targets are transformed when DisjunctionData is the target m = models.makeDisjunctionsOnIndexedBlock() trans = TransformationFactory('gdp.%s' % transformation) - trans.apply_to( - m, - targets=[m.disjunction1[2]]) + trans.apply_to(m, targets=[m.disjunction1[2]]) - disjBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation).\ - relaxedDisjuncts + disjBlock = m.component( + "_pyomo_gdp_%s_reformulation" % transformation + ).relaxedDisjuncts self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("disjunct1[2,0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("disjunct1[2,1].c"), - Constraint) + if transformation == 'bigm': + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[2, 0].c)[0].parent_block(), + disjBlock[0], + ) + elif transformation == 'hull': + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[2, 0].c)[0] + .parent_block() + .parent_block(), + disjBlock[0], + ) + self.assertIs( + trans.get_transformed_constraints(m.disjunct1[2, 1].c)[0].parent_block(), + disjBlock[1], + ) # This relies on the disjunctions being transformed in the same order # every time. These are the mappings between the indices of the original # disjuncts and the indices on the indexed block on the transformation # block. - pairs = [ - ((2,0), 0), - ((2,1), 1), - ] + pairs = [((2, 0), 0), ((2, 1), 1)] for i, j in pairs: - self.assertIs(m.disjunct1[i].transformation_block(), disjBlock[j]) + self.assertIs(m.disjunct1[i].transformation_block, disjBlock[j]) self.assertIs(trans.get_src_disjunct(disjBlock[j]), m.disjunct1[i]) + def check_indexedBlock_targets_inactive(self, transformation, **kwargs): # check that targets are deactivated when target is an IndexedBlock m = models.makeDisjunctionsOnIndexedBlock() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.b], - **kwargs) + m, targets=[m.b], **kwargs + ) self.assertTrue(m.disjunct1.active) - self.assertTrue(m.disjunct1[1,0].active) - self.assertTrue(m.disjunct1[1,1].active) - self.assertTrue(m.disjunct1[2,0].active) - self.assertTrue(m.disjunct1[2,1].active) - self.assertIsNone(m.disjunct1[1,0].transformation_block) - self.assertIsNone(m.disjunct1[1,1].transformation_block) - self.assertIsNone(m.disjunct1[2,0].transformation_block) - self.assertIsNone(m.disjunct1[2,1].transformation_block) + self.assertTrue(m.disjunct1[1, 0].active) + self.assertTrue(m.disjunct1[1, 1].active) + self.assertTrue(m.disjunct1[2, 0].active) + self.assertTrue(m.disjunct1[2, 1].active) + self.assertIsNone(m.disjunct1[1, 0].transformation_block) + self.assertIsNone(m.disjunct1[1, 1].transformation_block) + self.assertIsNone(m.disjunct1[2, 0].transformation_block) + self.assertIsNone(m.disjunct1[2, 1].transformation_block) self.assertFalse(m.b[0].disjunct.active) self.assertFalse(m.b[0].disjunct[0].active) @@ -740,43 +840,49 @@ def check_indexedBlock_targets_inactive(self, transformation, **kwargs): self.assertFalse(m.b[1].disjunct0.active) self.assertFalse(m.b[1].disjunct1.active) + def check_indexedBlock_only_targets_transformed(self, transformation): # check that targets are transformed when target is an IndexedBlock m = models.makeDisjunctionsOnIndexedBlock() trans = TransformationFactory('gdp.%s' % transformation) - trans.apply_to( - m, - targets=[m.b]) + trans.apply_to(m, targets=[m.b]) - disjBlock1 = m.b[0].component( - "_pyomo_gdp_%s_reformulation" % transformation).relaxedDisjuncts + disjBlock1 = ( + m.b[0] + .component("_pyomo_gdp_%s_reformulation" % transformation) + .relaxedDisjuncts + ) self.assertEqual(len(disjBlock1), 2) - self.assertIsInstance(disjBlock1[0].component("b[0].disjunct[0].c"), - Constraint) - self.assertIsInstance(disjBlock1[1].component("b[0].disjunct[1].c"), - Constraint) - disjBlock2 = m.b[1].component( - "_pyomo_gdp_%s_reformulation" % transformation).relaxedDisjuncts + self.assertIs( + trans.get_transformed_constraints(m.b[0].disjunct[0].c)[0].parent_block(), + disjBlock1[0], + ) + self.assertIs( + trans.get_transformed_constraints(m.b[0].disjunct[1].c)[0].parent_block(), + disjBlock1[1], + ) + + disjBlock2 = ( + m.b[1] + .component("_pyomo_gdp_%s_reformulation" % transformation) + .relaxedDisjuncts + ) self.assertEqual(len(disjBlock2), 2) - self.assertIsInstance(disjBlock2[0].component("b[1].disjunct0.c"), - Constraint) - self.assertIsInstance(disjBlock2[1].component("b[1].disjunct1.c"), - Constraint) + self.assertIs( + trans.get_transformed_constraints(m.b[1].disjunct0.c)[0].parent_block(), + disjBlock2[0], + ) + self.assertIs( + trans.get_transformed_constraints(m.b[1].disjunct1.c)[0].parent_block(), + disjBlock2[1], + ) # This relies on the disjunctions being transformed in the same order # every time. This dictionary maps the block index to the list of # pairs of (originalDisjunctIndex, transBlockIndex) pairs = { - 0: - [ - ('disjunct',0,0), - ('disjunct',1,1), - ], - 1: - [ - ('disjunct0',None,0), - ('disjunct1',None,1), - ] + 0: [('disjunct', 0, 0), ('disjunct', 1, 1)], + 1: [('disjunct0', None, 0), ('disjunct1', None, 1)], } for blocknum, lst in pairs.items(): @@ -786,27 +892,27 @@ def check_indexedBlock_only_targets_transformed(self, transformation): disjBlock = disjBlock1 if blocknum == 1: disjBlock = disjBlock2 - self.assertIs(original[i].transformation_block(), disjBlock[j]) + self.assertIs(original[i].transformation_block, disjBlock[j]) self.assertIs(trans.get_src_disjunct(disjBlock[j]), original[i]) + def check_blockData_targets_inactive(self, transformation, **kwargs): # test that BlockData target is deactivated m = models.makeDisjunctionsOnIndexedBlock() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.b[0]], - **kwargs) + m, targets=[m.b[0]], **kwargs + ) checkb0TargetsInactive(self, m) + def check_blockData_only_targets_transformed(self, transformation): # test that BlockData target is transformed m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.b[0]]) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m.b[0]]) checkb0TargetsTransformed(self, m, transformation) + def check_do_not_transform_deactivated_targets(self, transformation): # test that if a deactivated component is given as a target, we don't # transform it. (This is actually an important test because it is the only @@ -816,39 +922,43 @@ def check_do_not_transform_deactivated_targets(self, transformation): m = models.makeDisjunctionsOnIndexedBlock() m.b[1].deactivate() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.b[0], m.b[1]]) + m, targets=[m.b[0], m.b[1]] + ) checkb0TargetsInactive(self, m) checkb0TargetsTransformed(self, m, transformation) + def check_disjunction_data_target(self, transformation): # test that if we transform DisjunctionDatas one at a time, we get what we # expect in terms of using the same transformation block and the indexing of # the xor constraint. m = models.makeThreeTermIndexedDisj() TransformationFactory('gdp.%s' % transformation).apply_to( - m, targets=[m.disjunction[2]]) + m, targets=[m.disjunction[2]] + ) # we got a transformation block on the model transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) self.assertIsInstance(transBlock, Block) - self.assertIsInstance(transBlock.component("disjunction_xor"), - Constraint) - self.assertIsInstance(transBlock.disjunction_xor[2], - constraint._GeneralConstraintData) + self.assertIsInstance(transBlock.component("disjunction_xor"), Constraint) + self.assertIsInstance( + transBlock.disjunction_xor[2], constraint._GeneralConstraintData + ) self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 3) # suppose we transform the next one separately TransformationFactory('gdp.%s' % transformation).apply_to( - m, targets=[m.disjunction[1]]) - # we added to the same XOR constraint before - self.assertIsInstance(transBlock.disjunction_xor[1], - constraint._GeneralConstraintData) - # we used the same transformation block, so we have more relaxed - # disjuncts - self.assertEqual(len(transBlock.relaxedDisjuncts), 6) + m, targets=[m.disjunction[1]] + ) + self.assertIsInstance( + m.disjunction[1].algebraic_constraint, constraint._GeneralConstraintData + ) + transBlock = m.component("_pyomo_gdp_%s_reformulation_4" % transformation) + self.assertIsInstance(transBlock, Block) + self.assertEqual(len(transBlock.relaxedDisjuncts), 3) + def check_disjunction_data_target_any_index(self, transformation): # check the same as the above, but that it still works when the Disjunction @@ -857,34 +967,43 @@ def check_disjunction_data_target_any_index(self, transformation): m.x = Var(bounds=(-100, 100)) m.disjunct3 = Disjunct(Any) m.disjunct4 = Disjunct(Any) - m.disjunction2=Disjunction(Any) + m.disjunction2 = Disjunction(Any) for i in range(2): m.disjunct3[i].cons = Constraint(expr=m.x == 2) m.disjunct4[i].cons = Constraint(expr=m.x <= 3) m.disjunction2[i] = [m.disjunct3[i], m.disjunct4[i]] TransformationFactory('gdp.%s' % transformation).apply_to( - m, targets=[m.disjunction2[i]]) + m, targets=[m.disjunction2[i]] + ) if i == 0: - check_relaxation_block(self, m, "_pyomo_gdp_%s_reformulation" % - transformation, 2) + check_relaxation_block( + self, m, "_pyomo_gdp_%s_reformulation" % transformation, 2 + ) if i == 2: - check_relaxation_block(self, m, "_pyomo_gdp_%s_reformulation" % - transformation, 4) + check_relaxation_block( + self, m, "_pyomo_gdp_%s_reformulation" % transformation, 4 + ) + # tests that we treat disjunctions on blocks correctly (the main issue here is # that if you were to solve that block post-transformation that you would have # the whole transformed model) + def check_xor_constraint_added(self, transformation): # test we put the xor on the transformation block m = models.makeTwoTermDisjOnBlock() TransformationFactory('gdp.%s' % transformation).apply_to(m) self.assertIsInstance( - m.b.component("_pyomo_gdp_%s_reformulation" % transformation).\ - component('b.disjunction_xor'), Constraint) + m.b.component("_pyomo_gdp_%s_reformulation" % transformation).component( + m.b.disjunction.algebraic_constraint.local_name + ), + Constraint, + ) + def check_trans_block_created(self, transformation): # check we put the transformation block on the parent block of the @@ -899,47 +1018,44 @@ def check_trans_block_created(self, transformation): self.assertIsInstance(disjBlock, Block) self.assertEqual(len(disjBlock), 2) # and that it didn't get created on the model - self.assertIsNone( - m.component('_pyomo_gdp_%s_reformulation' % transformation)) + self.assertIsNone(m.component('_pyomo_gdp_%s_reformulation' % transformation)) # disjunction generation tests: These all suppose that you are doing some sort # of column and constraint generation algorithm, but you are in fact generating # Disjunctions and retransforming the model after each addition. -def check_iteratively_adding_to_indexed_disjunction_on_block(self, - transformation): + +def check_iteratively_adding_to_indexed_disjunction_on_block(self, transformation): # check that we can iteratively add to an IndexedDisjunction and transform # the block it lives on m = ConcreteModel() m.b = Block() m.b.x = Var(bounds=(-100, 100)) - m.b.firstTerm = Disjunct([1,2]) + m.b.firstTerm = Disjunct([1, 2]) m.b.firstTerm[1].cons = Constraint(expr=m.b.x == 0) m.b.firstTerm[2].cons = Constraint(expr=m.b.x == 2) - m.b.secondTerm = Disjunct([1,2]) + m.b.secondTerm = Disjunct([1, 2]) m.b.secondTerm[1].cons = Constraint(expr=m.b.x >= 2) m.b.secondTerm[2].cons = Constraint(expr=m.b.x >= 3) m.b.disjunctionList = Disjunction(Any) m.b.obj = Objective(expr=m.b.x) - for i in range(1,3): - m.b.disjunctionList[i] = [m.b.firstTerm[i], m.b.secondTerm[i]] - - TransformationFactory('gdp.%s' % transformation).apply_to(m, - targets=[m.b]) + for i in range(1, 3): m.b.disjunctionList[i] = [m.b.firstTerm[i], m.b.secondTerm[i]] - TransformationFactory('gdp.%s' % transformation).apply_to(m, - targets=[m.b]) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m.b]) if i == 1: - check_relaxation_block(self, m.b, "_pyomo_gdp_%s_reformulation" % - transformation, 2) + check_relaxation_block( + self, m.b, "_pyomo_gdp_%s_reformulation" % transformation, 2 + ) if i == 2: - check_relaxation_block(self, m.b, "_pyomo_gdp_%s_reformulation" % - transformation, 4) + check_relaxation_block( + self, m.b, "_pyomo_gdp_%s_reformulation_4" % transformation, 2 + ) + def check_simple_disjunction_of_disjunct_datas(self, transformation): # This is actually a reasonable use case if you are generating @@ -951,16 +1067,13 @@ def check_simple_disjunction_of_disjunct_datas(self, transformation): self.check_trans_block_disjunctions_of_disjunct_datas(m) transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) - self.assertIsInstance( transBlock.component("disjunction_xor"), - Constraint) - transBlock2 = m.component("_pyomo_gdp_%s_reformulation_4" % transformation) - self.assertIsInstance( transBlock2.component("disjunction2_xor"), - Constraint) + self.assertIsInstance(transBlock.component("disjunction_xor"), Constraint) + self.assertIsInstance(transBlock.component("disjunction2_xor"), Constraint) + # these tests have different checks for what ends up on the model between bigm # and hull, but they have the same structure -def check_iteratively_adding_disjunctions_transform_container(self, - transformation): +def check_iteratively_adding_disjunctions_transform_container(self, transformation): # Check that we can play the same game with iteratively adding Disjunctions, # but this time just specify the IndexedDisjunction as the argument. Note # that the success of this depends on our rebellion regarding the active @@ -972,26 +1085,28 @@ def check_iteratively_adding_disjunctions_transform_container(self, for i in range(2): firstTermName = "firstTerm[%s]" % i model.add_component(firstTermName, Disjunct()) - model.component(firstTermName).cons = Constraint( - expr=model.x == 2*i) + model.component(firstTermName).cons = Constraint(expr=model.x == 2 * i) secondTermName = "secondTerm[%s]" % i model.add_component(secondTermName, Disjunct()) - model.component(secondTermName).cons = Constraint( - expr=model.x >= i + 2) - model.disjunctionList[i] = [model.component(firstTermName), - model.component(secondTermName)] + model.component(secondTermName).cons = Constraint(expr=model.x >= i + 2) + model.disjunctionList[i] = [ + model.component(firstTermName), + model.component(secondTermName), + ] # we're lazy and we just transform the disjunctionList (and in # theory we are transforming at every iteration because we are # solving at every iteration) TransformationFactory('gdp.%s' % transformation).apply_to( - model, targets=[model.disjunctionList]) + model, targets=[model.disjunctionList] + ) if i == 0: self.check_first_iteration(model) if i == 1: self.check_second_iteration(model) + def check_disjunction_and_disjuncts_indexed_by_any(self, transformation): # check that we can play the same game when the Disjuncts also are indexed # by Any @@ -1005,7 +1120,7 @@ def check_disjunction_and_disjuncts_indexed_by_any(self, transformation): model.obj = Objective(expr=model.x) for i in range(2): - model.firstTerm[i].cons = Constraint(expr=model.x == 2*i) + model.firstTerm[i].cons = Constraint(expr=model.x == 2 * i) model.secondTerm[i].cons = Constraint(expr=model.x >= i + 2) model.disjunctionList[i] = [model.firstTerm[i], model.secondTerm[i]] @@ -1017,6 +1132,7 @@ def check_disjunction_and_disjuncts_indexed_by_any(self, transformation): if i == 1: self.check_second_iteration(model) + def check_iteratively_adding_disjunctions_transform_model(self, transformation): # Same as above, but transforming whole model in every iteration model = ConcreteModel() @@ -1026,14 +1142,14 @@ def check_iteratively_adding_disjunctions_transform_model(self, transformation): for i in range(2): firstTermName = "firstTerm[%s]" % i model.add_component(firstTermName, Disjunct()) - model.component(firstTermName).cons = Constraint( - expr=model.x == 2*i) + model.component(firstTermName).cons = Constraint(expr=model.x == 2 * i) secondTermName = "secondTerm[%s]" % i model.add_component(secondTermName, Disjunct()) - model.component(secondTermName).cons = Constraint( - expr=model.x >= i + 2) - model.disjunctionList[i] = [model.component(firstTermName), - model.component(secondTermName)] + model.component(secondTermName).cons = Constraint(expr=model.x >= i + 2) + model.disjunctionList[i] = [ + model.component(firstTermName), + model.component(secondTermName), + ] # we're lazy and we just transform the model (and in # theory we are transforming at every iteration because we are @@ -1045,8 +1161,10 @@ def check_iteratively_adding_disjunctions_transform_model(self, transformation): if i == 1: self.check_second_iteration(model) + # transforming blocks + # If you transform a block as if it is a model, the transformation should # only modify the block you passed it, else when you solve the block, you # are missing the disjunction you thought was on there. @@ -1055,70 +1173,76 @@ def check_transformation_simple_block(self, transformation, **kwargs): TransformationFactory('gdp.%s' % transformation).apply_to(m.b, **kwargs) # transformation block not on m - self.assertIsNone( - m.component("_pyomo_gdp_%s_reformulation" % transformation)) + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) # transformation block on m.b - self.assertIsInstance(m.b.component("_pyomo_gdp_%s_reformulation" % - transformation), Block) + self.assertIsInstance( + m.b.component("_pyomo_gdp_%s_reformulation" % transformation), Block + ) + def check_transform_block_data(self, transformation, **kwargs): m = models.makeDisjunctionsOnIndexedBlock() TransformationFactory('gdp.%s' % transformation).apply_to(m.b[0], **kwargs) - self.assertIsNone( - m.component("_pyomo_gdp_%s_reformulation" % transformation)) + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) + + self.assertIsInstance( + m.b[0].component("_pyomo_gdp_%s_reformulation" % transformation), Block + ) - self.assertIsInstance(m.b[0].component("_pyomo_gdp_%s_reformulation" % - transformation), Block) def check_simple_block_target(self, transformation, **kwargs): m = models.makeTwoTermDisjOnBlock() - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m.b], - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to( + m, targets=[m.b], **kwargs + ) # transformation block not on m - self.assertIsNone( - m.component("_pyomo_gdp_%s_reformulation" % transformation)) + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) # transformation block on m.b - self.assertIsInstance(m.b.component("_pyomo_gdp_%s_reformulation" % - transformation), Block) + self.assertIsInstance( + m.b.component("_pyomo_gdp_%s_reformulation" % transformation), Block + ) + def check_block_data_target(self, transformation, **kwargs): m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.%s' % transformation).apply_to(m, - targets=[m.b[0]], - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to( + m, targets=[m.b[0]], **kwargs + ) + + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) - self.assertIsNone( - m.component("_pyomo_gdp_%s_reformulation" % transformation)) + self.assertIsInstance( + m.b[0].component("_pyomo_gdp_%s_reformulation" % transformation), Block + ) - self.assertIsInstance(m.b[0].component("_pyomo_gdp_%s_reformulation" % - transformation), Block) def check_indexed_block_target(self, transformation, **kwargs): m = models.makeDisjunctionsOnIndexedBlock() - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m.b], - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to( + m, targets=[m.b], **kwargs + ) # We expect the transformation block on each of the BlockDatas. Because # it is always going on the parent block of the disjunction. - self.assertIsNone( - m.component("_pyomo_gdp_%s_reformulation" % transformation)) + self.assertIsNone(m.component("_pyomo_gdp_%s_reformulation" % transformation)) + + for i in [0, 1]: + self.assertIsInstance( + m.b[i].component("_pyomo_gdp_%s_reformulation" % transformation), Block + ) - for i in [0,1]: - self.assertIsInstance( m.b[i].component("_pyomo_gdp_%s_reformulation" % - transformation), Block) def check_block_targets_inactive(self, transformation, **kwargs): m = models.makeTwoTermDisjOnBlock() m = models.add_disj_not_on_block(m) TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.b], - **kwargs) + m, targets=[m.b], **kwargs + ) self.assertFalse(m.b.disjunct[0].active) self.assertFalse(m.b.disjunct[1].active) @@ -1126,34 +1250,46 @@ def check_block_targets_inactive(self, transformation, **kwargs): self.assertTrue(m.simpledisj.active) self.assertTrue(m.simpledisj2.active) + def check_block_only_targets_transformed(self, transformation): m = models.makeTwoTermDisjOnBlock() m = models.add_disj_not_on_block(m) trans = TransformationFactory('gdp.%s' % transformation) - trans.apply_to( - m, - targets=[m.b]) + trans.apply_to(m, targets=[m.b]) - disjBlock = m.b.component("_pyomo_gdp_%s_reformulation" % transformation).\ - relaxedDisjuncts + disjBlock = m.b.component( + "_pyomo_gdp_%s_reformulation" % transformation + ).relaxedDisjuncts self.assertEqual(len(disjBlock), 2) - self.assertIsInstance(disjBlock[0].component("b.disjunct[0].c"), - Constraint) - self.assertIsInstance(disjBlock[1].component("b.disjunct[1].c"), - Constraint) + if transformation == 'bigm': + self.assertIs( + disjBlock[0], + trans.get_transformed_constraints(m.b.disjunct[0].c)[0].parent_block(), + ) + elif transformation == 'hull': + # this constraint is on the bounds of the disaggregated var + self.assertIs( + disjBlock[0], + trans.get_transformed_constraints(m.b.disjunct[0].c)[0] + .parent_block() + .parent_block(), + ) + self.assertIs( + disjBlock[1], + trans.get_transformed_constraints(m.b.disjunct[1].c)[0].parent_block(), + ) # this relies on the disjuncts being transformed in the same order every # time - pairs = [ - (0,0), - (1,1), - ] + pairs = [(0, 0), (1, 1)] for i, j in pairs: - self.assertIs(m.b.disjunct[i].transformation_block(), disjBlock[j]) + self.assertIs(m.b.disjunct[i].transformation_block, disjBlock[j]) self.assertIs(trans.get_src_disjunct(disjBlock[j]), m.b.disjunct[i]) + # common error messages + def check_transform_empty_disjunction(self, transformation, **kwargs): m = ConcreteModel() m.empty = Disjunction(expr=[]) @@ -1163,12 +1299,14 @@ def check_transform_empty_disjunction(self, transformation, **kwargs): "Disjunction 'empty' is empty. This is likely indicative of a " "modeling error.*", TransformationFactory('gdp.%s' % transformation).apply_to, - m, **kwargs) + m, + **kwargs + ) -def check_deactivated_disjunct_nonzero_indicator_var(self, transformation, - **kwargs): + +def check_deactivated_disjunct_nonzero_indicator_var(self, transformation, **kwargs): m = ConcreteModel() - m.x = Var(bounds=(0,8)) + m.x = Var(bounds=(0, 8)) m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) m.disjunction.disjuncts[0].deactivate() @@ -1179,12 +1317,14 @@ def check_deactivated_disjunct_nonzero_indicator_var(self, transformation, r"The disjunct 'disjunction_disjuncts\[0\]' is deactivated, but the " r"indicator_var is fixed to True. This makes no sense.", TransformationFactory('gdp.%s' % transformation).apply_to, - m, **kwargs) + m, + **kwargs + ) -def check_deactivated_disjunct_unfixed_indicator_var(self, transformation, - **kwargs): + +def check_deactivated_disjunct_unfixed_indicator_var(self, transformation, **kwargs): m = ConcreteModel() - m.x = Var(bounds=(0,8)) + m.x = Var(bounds=(0, 8)) m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) m.disjunction.disjuncts[0].deactivate() @@ -1194,11 +1334,14 @@ def check_deactivated_disjunct_unfixed_indicator_var(self, transformation, GDP_Error, r"The disjunct 'disjunction_disjuncts\[0\]' is deactivated, but the " r"indicator_var is not fixed and the disjunct does not " - r"appear to have been relaxed. This makes no sense. " + r"appear to have been transformed. This makes no sense. " r"\(If the intent is to deactivate the disjunct, fix its " r"indicator_var to False.\)", TransformationFactory('gdp.%s' % transformation).apply_to, - m, **kwargs) + m, + **kwargs + ) + def check_retrieving_nondisjunctive_components(self, transformation): m = models.makeTwoTermDisj() @@ -1211,36 +1354,41 @@ def check_retrieving_nondisjunctive_components(self, transformation): self.assertRaisesRegex( GDP_Error, - "Constraint 'b.global_cons' is not on a disjunct and so was not " - "transformed", + "Constraint 'b.global_cons' is not on a disjunct and so was not transformed", trans.get_transformed_constraints, - m.b.global_cons) + m.b.global_cons, + ) self.assertRaisesRegex( GDP_Error, "Constraint 'b.global_cons' is not a transformed constraint", trans.get_src_constraint, - m.b.global_cons) + m.b.global_cons, + ) self.assertRaisesRegex( GDP_Error, "Constraint 'another_global_cons' is not a transformed constraint", trans.get_src_constraint, - m.another_global_cons) + m.another_global_cons, + ) self.assertRaisesRegex( GDP_Error, "Block 'b' doesn't appear to be a transformation block for a " "disjunct. No source disjunct found.", trans.get_src_disjunct, - m.b) + m.b, + ) self.assertRaisesRegex( GDP_Error, "It appears that 'another_global_cons' is not an XOR or OR" " constraint resulting from transforming a Disjunction.", trans.get_src_disjunction, - m.another_global_cons) + m.another_global_cons, + ) + def check_silly_target(self, transformation, **kwargs): m = models.makeTwoTermDisj() @@ -1252,10 +1400,14 @@ def check_silly_target(self, transformation, **kwargs): r"can't be transformed.", TransformationFactory('gdp.%s' % transformation).apply_to, m, - targets=[m.d[1].c1], **kwargs) + targets=[m.d[1].c1], + **kwargs + ) + def check_ask_for_transformed_constraint_from_untransformed_disjunct( - self, transformation): + self, transformation +): m = models.makeTwoTermIndexedDisjunction() trans = TransformationFactory('gdp.%s' % transformation) trans.apply_to(m, targets=m.disjunction[1]) @@ -1265,21 +1417,26 @@ def check_ask_for_transformed_constraint_from_untransformed_disjunct( r"Constraint 'disjunct\[2,b\].cons_b' is on a disjunct which has " r"not been transformed", trans.get_transformed_constraints, - m.disjunct[2, 'b'].cons_b) + m.disjunct[2, 'b'].cons_b, + ) + -def check_error_for_same_disjunct_in_multiple_disjunctions(self, transformation, - **kwargs): +def check_error_for_same_disjunct_in_multiple_disjunctions( + self, transformation, **kwargs +): m = models.makeDisjunctInMultipleDisjunctions() self.assertRaisesRegex( GDP_Error, r"The disjunct 'disjunct1\[1\]' has been transformed, " - r"but a disjunction it appears in has not. Putting the same " - r"disjunct in multiple disjunctions is not supported.", + r"but 'disjunction2', a disjunction it appears in, has not. " + r"Putting the same disjunct in multiple disjunctions is not supported.", TransformationFactory('gdp.%s' % transformation).apply_to, - m, **kwargs) + m, + **kwargs + ) + -def check_cannot_call_transformation_on_disjunction(self, transformation, - **kwargs): +def check_cannot_call_transformation_on_disjunction(self, transformation, **kwargs): m = models.makeTwoTermIndexedDisjunction() trans = TransformationFactory('gdp.%s' % transformation) self.assertRaisesRegex( @@ -1289,22 +1446,24 @@ def check_cannot_call_transformation_on_disjunction(self, transformation, r"must be a ConcreteModel, Block, or Disjunct \(in " r"the case of nested disjunctions\).", trans.apply_to, - m.disjunction, + m.disjunction, targets=m.disjunction[1], **kwargs ) + # This is really neurotic, but test that we will create an infeasible XOR # constraint. We have to because in the case of nested disjunctions, our model # is not necessarily infeasible because of this. It just might make a Disjunct # infeasible. def setup_infeasible_xor_because_all_disjuncts_deactivated(self, transformation): m = ConcreteModel() - m.x = Var(bounds=(0,8)) - m.y = Var(bounds=(0,7)) + m.x = Var(bounds=(0, 8)) + m.y = Var(bounds=(0, 7)) m.disjunction = Disjunction(expr=[m.x == 0, m.x >= 4]) m.disjunction_disjuncts[0].nestedDisjunction = Disjunction( - expr=[m.y == 6, m.y <= 1]) + expr=[m.y == 6, m.y <= 1] + ) # Note that this fixes the indicator variables to 0, but since the # disjunction is still active, the XOR constraint will be created. So we # will have to land in the second disjunct of m.disjunction @@ -1312,14 +1471,11 @@ def setup_infeasible_xor_because_all_disjuncts_deactivated(self, transformation) m.disjunction.disjuncts[0].nestedDisjunction.disjuncts[1].deactivate() # This should create a 0 = 1 XOR constraint, actually... TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=m.disjunction.disjuncts[0].nestedDisjunction) + m, targets=m.disjunction.disjuncts[0].nestedDisjunction + ) # check that our XOR is the bad thing it should be. - transBlock = m.disjunction.disjuncts[0].component( - "_pyomo_gdp_%s_reformulation" % transformation) - xor = transBlock.component( - "disjunction_disjuncts[0].nestedDisjunction_xor") + xor = m.disjunction_disjuncts[0].nestedDisjunction.algebraic_constraint self.assertIsInstance(xor, Constraint) self.assertEqual(value(xor.lower), 1) self.assertEqual(value(xor.upper), 1) @@ -1333,6 +1489,7 @@ def setup_infeasible_xor_because_all_disjuncts_deactivated(self, transformation) return m + def check_disjunction_target_err(self, transformation, **kwargs): m = models.makeNestedDisjunctions() # deactivate the disjunction that would transform the nested Disjuncts so @@ -1344,17 +1501,18 @@ def check_disjunction_target_err(self, transformation, **kwargs): "disjunct 'simpledisjunct'!.*", TransformationFactory('gdp.%s' % transformation).apply_to, m, - targets=[m.disjunction], - **kwargs) + targets=[m.disjunction], + **kwargs + ) # nested disjunctions: hull and bigm have very different handling for nested # disjunctions, but these tests check *that* everything is transformed, not how + def check_disjuncts_inactive_nested(self, transformation, **kwargs): m = models.makeNestedDisjunctions() - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=(m,), **kwargs) self.assertFalse(m.disjunction.active) self.assertFalse(m.simpledisjunct.active) @@ -1362,15 +1520,15 @@ def check_disjuncts_inactive_nested(self, transformation, **kwargs): self.assertFalse(m.disjunct[1].active) self.assertFalse(m.disjunct.active) -def check_deactivated_disjunct_leaves_nested_disjunct_active(self, - transformation, - **kwargs): + +def check_deactivated_disjunct_leaves_nested_disjunct_active( + self, transformation, **kwargs +): m = models.makeNestedDisjunctions_FlatDisjuncts() m.d1.deactivate() # Specifying 'targets' prevents the HACK_GDP_Disjunct_Reclassifier # transformation of Disjuncts to Blocks - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m], - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m], **kwargs) self.assertFalse(m.d1.active) self.assertTrue(m.d1.indicator_var.fixed) @@ -1389,8 +1547,7 @@ def check_deactivated_disjunct_leaves_nested_disjunct_active(self, m.d1.deactivate() # Specifying 'targets' prevents the HACK_GDP_Disjunct_Reclassifier # transformation of Disjuncts to Blocks - TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m], - **kwargs) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m], **kwargs) self.assertFalse(m.d1.active) self.assertTrue(m.d1.indicator_var.fixed) @@ -1405,36 +1562,12 @@ def check_deactivated_disjunct_leaves_nested_disjunct_active(self, self.assertTrue(m.d1.d4.active) self.assertFalse(m.d1.d4.indicator_var.fixed) -def check_mappings_between_disjunctions_and_xors(self, transformation): - m = models.makeNestedDisjunctions() - transform = TransformationFactory('gdp.%s' % transformation) - transform.apply_to(m) - - transBlock = m.component("_pyomo_gdp_%s_reformulation" % transformation) - - disjunctionPairs = [ - (m.disjunction, transBlock.disjunction_xor), - (m.disjunct[1].innerdisjunction[0], - m.disjunct[1].component("_pyomo_gdp_%s_reformulation" - % transformation).\ - component("disjunct[1].innerdisjunction_xor")[0]), - (m.simpledisjunct.innerdisjunction, - m.simpledisjunct.component( - "_pyomo_gdp_%s_reformulation" % transformation).component( - "simpledisjunct.innerdisjunction_xor")) - ] - - # check disjunction mappings - for disjunction, xor in disjunctionPairs: - self.assertIs(disjunction.algebraic_constraint(), xor) - self.assertIs(transform.get_src_disjunction(xor), disjunction) def check_disjunct_targets_inactive(self, transformation, **kwargs): m = models.makeNestedDisjunctions() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.simpledisjunct], - **kwargs) + m, targets=[m.simpledisjunct], **kwargs + ) self.assertTrue(m.disjunct.active) self.assertTrue(m.disjunct[0].active) @@ -1450,42 +1583,54 @@ def check_disjunct_targets_inactive(self, transformation, **kwargs): self.assertFalse(m.simpledisjunct.innerdisjunct0.active) self.assertFalse(m.simpledisjunct.innerdisjunct1.active) + def check_disjunct_only_targets_transformed(self, transformation): m = models.makeNestedDisjunctions() transform = TransformationFactory('gdp.%s' % transformation) - transform.apply_to( - m, - targets=[m.simpledisjunct]) + transform.apply_to(m, targets=[m.simpledisjunct]) - disjBlock = m.simpledisjunct.component("_pyomo_gdp_%s_reformulation" % - transformation).relaxedDisjuncts + disjBlock = m.simpledisjunct.component( + "_pyomo_gdp_%s_reformulation" % transformation + ).relaxedDisjuncts self.assertEqual(len(disjBlock), 2) - self.assertIsInstance( - disjBlock[0].component("simpledisjunct.innerdisjunct0.c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("simpledisjunct.innerdisjunct1.c"), - Constraint) + self.assertIs( + transform.get_transformed_constraints(m.simpledisjunct.innerdisjunct0.c)[ + 0 + ].parent_block(), + disjBlock[0], + ) + self.assertIs( + transform.get_transformed_constraints(m.simpledisjunct.innerdisjunct0.c)[ + 0 + ].parent_block(), + disjBlock[0], + ) + self.assertIs( + transform.get_transformed_constraints(m.simpledisjunct.innerdisjunct1.c)[ + 0 + ].parent_block(), + disjBlock[1], + ) # This also relies on the disjuncts being transformed in the same # order every time. - pairs = [ - (0,0), - (1,1), - ] + pairs = [(0, 0), (1, 1)] for i, j in pairs: - self.assertIs(m.simpledisjunct.component('innerdisjunct%d'%i), - transform.get_src_disjunct(disjBlock[j])) - self.assertIs(disjBlock[j], - m.simpledisjunct.component( - 'innerdisjunct%d'%i).transformation_block()) + self.assertIs( + m.simpledisjunct.component('innerdisjunct%d' % i), + transform.get_src_disjunct(disjBlock[j]), + ) + self.assertIs( + disjBlock[j], + m.simpledisjunct.component('innerdisjunct%d' % i).transformation_block, + ) + def check_disjunctData_targets_inactive(self, transformation, **kwargs): m = models.makeNestedDisjunctions() TransformationFactory('gdp.%s' % transformation).apply_to( - m, - targets=[m.disjunct[1]], - **kwargs) + m, targets=[m.disjunct[1]], **kwargs + ) self.assertTrue(m.disjunct[0].active) self.assertTrue(m.disjunct[1].active) @@ -1498,63 +1643,78 @@ def check_disjunctData_targets_inactive(self, transformation, **kwargs): self.assertTrue(m.simpledisjunct.innerdisjunct0.active) self.assertTrue(m.simpledisjunct.innerdisjunct1.active) + def check_disjunctData_only_targets_transformed(self, transformation): m = models.makeNestedDisjunctions() # This is so convoluted, but you can treat a disjunct like a block: transform = TransformationFactory('gdp.%s' % transformation) - transform.apply_to( - m, - targets=[m.disjunct[1]]) + transform.apply_to(m, targets=[m.disjunct[1]]) - disjBlock = m.disjunct[1].component("_pyomo_gdp_%s_reformulation" % - transformation).relaxedDisjuncts + disjBlock = ( + m.disjunct[1] + .component("_pyomo_gdp_%s_reformulation" % transformation) + .relaxedDisjuncts + ) self.assertEqual(len(disjBlock), 2) - self.assertIsInstance( - disjBlock[0].component("disjunct[1].innerdisjunct[0].c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("disjunct[1].innerdisjunct[1].c"), - Constraint) + if transformation == 'bigm': + self.assertIs( + transform.get_transformed_constraints(m.disjunct[1].innerdisjunct[0].c)[ + 0 + ].parent_block(), + disjBlock[0], + ) + elif transformation == 'hull': + # This constraint is on Block deeper because it is in the bounds of a + # disaggregated var + self.assertIs( + transform.get_transformed_constraints(m.disjunct[1].innerdisjunct[0].c)[0] + .parent_block() + .parent_block(), + disjBlock[0], + ) + self.assertIs( + transform.get_transformed_constraints(m.disjunct[1].innerdisjunct[1].c)[ + 0 + ].parent_block(), + disjBlock[1], + ) # This also relies on the disjuncts being transformed in the same # order every time. - pairs = [ - (0,0), - (1,1), - ] + pairs = [(0, 0), (1, 1)] for i, j in pairs: - self.assertIs(transform.get_src_disjunct(disjBlock[j]), - m.disjunct[1].innerdisjunct[i]) - self.assertIs(m.disjunct[1].innerdisjunct[i].transformation_block(), - disjBlock[j]) + self.assertIs( + transform.get_src_disjunct(disjBlock[j]), m.disjunct[1].innerdisjunct[i] + ) + self.assertIs(m.disjunct[1].innerdisjunct[i].transformation_block, disjBlock[j]) + def check_all_components_transformed(self, m): # checks that all the disjunctive components claim to be transformed in the # makeNestedDisjunctions_NestedDisjuncts model. - self.assertIsInstance(m.disj.algebraic_constraint(), Constraint) - self.assertIsInstance(m.d1.disj2.algebraic_constraint(), Constraint) - self.assertIsInstance(m.d1.transformation_block(), _BlockData) - self.assertIsInstance(m.d2.transformation_block(), _BlockData) - self.assertIsInstance(m.d1.d3.transformation_block(), _BlockData) - self.assertIsInstance(m.d1.d4.transformation_block(), _BlockData) + self.assertIsInstance(m.disj.algebraic_constraint, Constraint) + self.assertIsInstance(m.d1.disj2.algebraic_constraint, Constraint) + self.assertIsInstance(m.d1.transformation_block, _BlockData) + self.assertIsInstance(m.d2.transformation_block, _BlockData) + self.assertIsInstance(m.d1.d3.transformation_block, _BlockData) + self.assertIsInstance(m.d1.d4.transformation_block, _BlockData) + def check_transformation_blocks_nestedDisjunctions(self, m, transformation): - disjunctionTransBlock = m.disj.algebraic_constraint().parent_block() + disjunctionTransBlock = m.disj.algebraic_constraint.parent_block() transBlocks = disjunctionTransBlock.relaxedDisjuncts - self.assertTrue(len(transBlocks), 4) - self.assertIs(transBlocks[0], m.d1.transformation_block()) - self.assertIs(transBlocks[3], m.d2.transformation_block()) + self.assertEqual(len(transBlocks), 4) if transformation == 'bigm': - # we moved the blocks up - self.assertIs(transBlocks[1], m.d1.d3.transformation_block()) - self.assertIs(transBlocks[2], m.d1.d4.transformation_block()) + self.assertIs(transBlocks[0], m.d1.d3.transformation_block) + self.assertIs(transBlocks[1], m.d1.d4.transformation_block) + self.assertIs(transBlocks[2], m.d1.transformation_block) + self.assertIs(transBlocks[3], m.d2.transformation_block) if transformation == 'hull': - # we only moved the references up, these still point to the inner - # transformation blocks - inner = m.d1.disj2.algebraic_constraint().parent_block().\ - relaxedDisjuncts - self.assertIs(inner[0], m.d1.d3.transformation_block()) - self.assertIs(inner[1], m.d1.d4.transformation_block()) + self.assertIs(transBlocks[2], m.d1.d3.transformation_block) + self.assertIs(transBlocks[3], m.d1.d4.transformation_block) + self.assertIs(transBlocks[0], m.d1.transformation_block) + self.assertIs(transBlocks[1], m.d2.transformation_block) + def check_nested_disjunction_target(self, transformation): m = models.makeNestedDisjunctions_NestedDisjuncts() @@ -1567,6 +1727,7 @@ def check_nested_disjunction_target(self, transformation): check_all_components_transformed(self, m) check_transformation_blocks_nestedDisjunctions(self, m, transformation) + def check_target_appears_twice(self, transformation): m = models.makeNestedDisjunctions_NestedDisjuncts() # Because of the way we preprocess targets, the result here will be that @@ -1574,20 +1735,20 @@ def check_target_appears_twice(self, transformation): # the transformation will not try to retransform anything that has already # been transformed. m1 = TransformationFactory('gdp.%s' % transformation).create_using( - m, targets=[m.d1, m.disj]) - + m, targets=[m.d1, m.disj] + ) + check_all_components_transformed(self, m1) # check we have correct number of transformation blocks check_transformation_blocks_nestedDisjunctions(self, m1, transformation) # Now check the same thing, but if the already-transformed disjunct appears # after its disjunction. - TransformationFactory('gdp.%s' % transformation).apply_to( m, - targets=[m.disj, - m.d1]) + TransformationFactory('gdp.%s' % transformation).apply_to(m, targets=[m.disj, m.d1]) check_all_components_transformed(self, m) check_transformation_blocks_nestedDisjunctions(self, m, transformation) + def check_unique_reference_to_nested_indicator_var(self, transformation): m = models.makeNestedDisjunctions_NestedDisjuncts() TransformationFactory('gdp.%s' % transformation).apply_to(m) @@ -1602,47 +1763,48 @@ def check_unique_reference_to_nested_indicator_var(self, transformation): self.assertEqual(num_references_d3, 1) self.assertEqual(num_references_d4, 1) + # checks for handling of benign types that could be on disjuncts we're # transforming + def check_RangeSet(self, transformation, **kwargs): m = models.makeDisjunctWithRangeSet() TransformationFactory('gdp.%s' % transformation).apply_to(m, **kwargs) self.assertIsInstance(m.d1.s, RangeSet) + def check_Expression(self, transformation, **kwargs): m = models.makeDisjunctWithExpression() TransformationFactory('gdp.%s' % transformation).apply_to(m, **kwargs) self.assertIsInstance(m.d1.e, Expression) + def check_untransformed_network_raises_GDPError(self, transformation, **kwargs): m = models.makeNetworkDisjunction() - if transformation == 'bigm': - error_name = 'BigM' - elif transformation == 'partition_disjuncts': - error_name = 'partition_disjuncts' - else: - error_name = 'hull' self.assertRaisesRegex( GDP_Error, "No %s transformation handler registered for modeling " "components of type . If " "your disjuncts contain non-GDP Pyomo components that require " - "transformation, please transform them first." % error_name, + "transformation, please transform them first." % transformation, TransformationFactory('gdp.%s' % transformation).apply_to, - m, **kwargs) + m, + **kwargs + ) + def check_network_disjuncts(self, minimize, transformation, **kwds): m = models.makeExpandedNetworkDisjunction(minimize=minimize) TransformationFactory('gdp.%s' % transformation).apply_to(m, **kwds) results = SolverFactory(linear_solvers[0]).solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal) if minimize: self.assertAlmostEqual(value(m.dest.x), 0.42) else: self.assertAlmostEqual(value(m.dest.x), 0.84) + def check_solution_obeys_logical_constraints(self, transformation, m): # m is expected to either by models.makeLogicalConstraintsOnDisjuncts or # models.makeBooleanVarsOnDisjuncts @@ -1654,8 +1816,7 @@ def check_solution_obeys_logical_constraints(self, transformation, m): no_logic = trans.create_using(m) results = SolverFactory(linear_solvers[0]).solve(no_logic) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal) self.assertAlmostEqual(value(no_logic.x), 2.5) # with logical constraints @@ -1663,6 +1824,44 @@ def check_solution_obeys_logical_constraints(self, transformation, m): m.bwahaha.activate() trans.apply_to(m) results = SolverFactory(linear_solvers[0]).solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual(results.solver.termination_condition, TerminationCondition.optimal) self.assertAlmostEqual(value(m.x), 8) + + +# test pickling transformed models + + +def check_pprint_equal(self, m, unpickle): + # This is almost the same as in the diff_apply_to_and_create_using test but + # we don't have to transform in the middle or mess with seeds. + m_buf = StringIO() + m.pprint(ostream=m_buf) + m_output = m_buf.getvalue() + + unpickle_buf = StringIO() + unpickle.pprint(ostream=unpickle_buf) + unpickle_output = unpickle_buf.getvalue() + self.assertMultiLineEqual(m_output, unpickle_output) + + +def check_transformed_model_pickles(self, transformation): + # Do a model where we'll have to call logical_to_disjunctive too. + m = models.makeLogicalConstraintsOnDisjuncts_NonlinearConvex() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to(m) + + # pickle and unpickle the transformed model + unpickle = pickle.loads(pickle.dumps(m)) + + check_pprint_equal(self, m, unpickle) + + +def check_transformed_model_pickles_with_dill(self, transformation): + m = models.makeLogicalConstraintsOnDisjuncts_NonlinearConvex() + trans = TransformationFactory('gdp.%s' % transformation) + trans.apply_to(m) + + # pickle and unpickle the transformed model + unpickle = dill.loads(dill.dumps(m)) + + check_pprint_equal(self, m, unpickle) diff --git a/pyomo/gdp/tests/jobshop_large_bigm.lp b/pyomo/gdp/tests/jobshop_large_bigm.lp index b9c04ccd828..21db864ec2b 100644 --- a/pyomo/gdp/tests/jobshop_large_bigm.lp +++ b/pyomo/gdp/tests/jobshop_large_bigm.lp @@ -216,431 +216,428 @@ c_e__pyomo_gdp_bigm_reformulation_disj_xor(F_G_4)_: +1 NoClash(F_G_4_1)_binary_indicator_var = 1 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)__NoClash(A_B_3_0)_c_(ub)_: -+96 NoClash(A_B_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(B) -<= 92 ++96.0 NoClash(A_B_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)__NoClash(A_B_3_1)_c_(ub)_: -+97 NoClash(A_B_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(B) -<= 92 ++97.0 NoClash(A_B_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)__NoClash(A_B_5_0)_c_(ub)_: -+94 NoClash(A_B_5_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(B) -<= 92 ++94.0 NoClash(A_B_5_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)__NoClash(A_B_5_1)_c_(ub)_: -+95 NoClash(A_B_5_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(B) -<= 92 ++95.0 NoClash(A_B_5_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)__NoClash(A_C_1_0)_c_(ub)_: -+98 NoClash(A_C_1_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(C) -<= 92 ++98.0 NoClash(A_C_1_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)__NoClash(A_C_1_1)_c_(ub)_: -+95 NoClash(A_C_1_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(C) -<= 92 ++95.0 NoClash(A_C_1_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(6)__NoClash(A_D_3_0)_c_(ub)_: -+102 NoClash(A_D_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(6)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(D) -<= 92 ++102.0 NoClash(A_D_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(7)__NoClash(A_D_3_1)_c_(ub)_: -+92 NoClash(A_D_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(7)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(D) -<= 92 ++92.0 NoClash(A_D_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(8)__NoClash(A_E_3_0)_c_(ub)_: -+99 NoClash(A_E_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(8)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(E) -<= 92 ++99.0 NoClash(A_E_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(9)__NoClash(A_E_3_1)_c_(ub)_: -+96 NoClash(A_E_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(9)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(E) -<= 92 ++96.0 NoClash(A_E_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(10)__NoClash(A_E_5_0)_c_(ub)_: -+96 NoClash(A_E_5_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(10)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(E) -<= 92 ++96.0 NoClash(A_E_5_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(11)__NoClash(A_E_5_1)_c_(ub)_: -+92 NoClash(A_E_5_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(11)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(E) -<= 92 ++92.0 NoClash(A_E_5_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(12)__NoClash(A_F_1_0)_c_(ub)_: -+94 NoClash(A_F_1_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(12)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(F) -<= 92 ++94.0 NoClash(A_F_1_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(13)__NoClash(A_F_1_1)_c_(ub)_: -+95 NoClash(A_F_1_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(13)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(F) -<= 92 ++95.0 NoClash(A_F_1_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(14)__NoClash(A_F_3_0)_c_(ub)_: -+96 NoClash(A_F_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(14)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(F) -<= 92 ++96.0 NoClash(A_F_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(15)__NoClash(A_F_3_1)_c_(ub)_: -+98 NoClash(A_F_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(15)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(F) -<= 92 ++98.0 NoClash(A_F_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(16)__NoClash(A_G_5_0)_c_(ub)_: -+101 NoClash(A_G_5_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(16)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(G) -<= 92 ++101.0 NoClash(A_G_5_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(17)__NoClash(A_G_5_1)_c_(ub)_: -+89 NoClash(A_G_5_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(17)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(G) -<= 92 ++89.0 NoClash(A_G_5_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(18)__NoClash(B_C_2_0)_c_(ub)_: -+101 NoClash(B_C_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(18)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(C) -<= 92 ++101.0 NoClash(B_C_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(19)__NoClash(B_C_2_1)_c_(ub)_: -+89 NoClash(B_C_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(19)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(C) -<= 92 ++89.0 NoClash(B_C_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(20)__NoClash(B_D_2_0)_c_(ub)_: -+100 NoClash(B_D_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(20)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(D) -<= 92 ++100.0 NoClash(B_D_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(21)__NoClash(B_D_2_1)_c_(ub)_: -+95 NoClash(B_D_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(21)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(D) -<= 92 ++95.0 NoClash(B_D_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(22)__NoClash(B_D_3_0)_c_(ub)_: -+102 NoClash(B_D_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(22)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(D) -<= 92 ++102.0 NoClash(B_D_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(23)__NoClash(B_D_3_1)_c_(ub)_: -+91 NoClash(B_D_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(23)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(D) -<= 92 ++91.0 NoClash(B_D_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(24)__NoClash(B_E_2_0)_c_(ub)_: -+96 NoClash(B_E_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(24)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(E) -<= 92 ++96.0 NoClash(B_E_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(25)__NoClash(B_E_2_1)_c_(ub)_: -+95 NoClash(B_E_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(25)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(E) -<= 92 ++95.0 NoClash(B_E_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(26)__NoClash(B_E_3_0)_c_(ub)_: -+99 NoClash(B_E_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(26)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(E) -<= 92 ++99.0 NoClash(B_E_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(27)__NoClash(B_E_3_1)_c_(ub)_: -+95 NoClash(B_E_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(27)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(E) -<= 92 ++95.0 NoClash(B_E_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(28)__NoClash(B_E_5_0)_c_(ub)_: -+97 NoClash(B_E_5_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(28)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(E) -<= 92 ++97.0 NoClash(B_E_5_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(29)__NoClash(B_E_5_1)_c_(ub)_: -+92 NoClash(B_E_5_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(29)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(E) -<= 92 ++92.0 NoClash(B_E_5_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(30)__NoClash(B_F_3_0)_c_(ub)_: -+96 NoClash(B_F_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(30)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(F) -<= 92 ++96.0 NoClash(B_F_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(31)__NoClash(B_F_3_1)_c_(ub)_: -+97 NoClash(B_F_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(31)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(F) -<= 92 ++97.0 NoClash(B_F_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(32)__NoClash(B_G_2_0)_c_(ub)_: -+100 NoClash(B_G_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(32)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(G) -<= 92 ++100.0 NoClash(B_G_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(33)__NoClash(B_G_2_1)_c_(ub)_: -+95 NoClash(B_G_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(33)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(G) -<= 92 ++95.0 NoClash(B_G_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(34)__NoClash(B_G_5_0)_c_(ub)_: -+102 NoClash(B_G_5_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(34)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(G) -<= 92 ++102.0 NoClash(B_G_5_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(35)__NoClash(B_G_5_1)_c_(ub)_: -+89 NoClash(B_G_5_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(35)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(G) -<= 92 ++89.0 NoClash(B_G_5_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(36)__NoClash(C_D_2_0)_c_(ub)_: -+94 NoClash(C_D_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(36)_transformedConstraints(c_0_None_ub)_: -1 t(C) +1 t(D) -<= 92 ++94.0 NoClash(C_D_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(37)__NoClash(C_D_2_1)_c_(ub)_: -+101 NoClash(C_D_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(37)_transformedConstraints(c_0_None_ub)_: +1 t(C) -1 t(D) -<= 92 ++101.0 NoClash(C_D_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(38)__NoClash(C_D_4_0)_c_(ub)_: -+97 NoClash(C_D_4_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(38)_transformedConstraints(c_0_None_ub)_: -1 t(C) +1 t(D) -<= 92 ++97.0 NoClash(C_D_4_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(39)__NoClash(C_D_4_1)_c_(ub)_: -+94 NoClash(C_D_4_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(39)_transformedConstraints(c_0_None_ub)_: +1 t(C) -1 t(D) -<= 92 ++94.0 NoClash(C_D_4_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(40)__NoClash(C_E_2_0)_c_(ub)_: -+90 NoClash(C_E_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(40)_transformedConstraints(c_0_None_ub)_: -1 t(C) +1 t(E) -<= 92 ++90.0 NoClash(C_E_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(41)__NoClash(C_E_2_1)_c_(ub)_: -+101 NoClash(C_E_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(41)_transformedConstraints(c_0_None_ub)_: +1 t(C) -1 t(E) -<= 92 ++101.0 NoClash(C_E_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(42)__NoClash(C_F_1_0)_c_(ub)_: -+94 NoClash(C_F_1_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(42)_transformedConstraints(c_0_None_ub)_: -1 t(C) +1 t(F) -<= 92 ++94.0 NoClash(C_F_1_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(43)__NoClash(C_F_1_1)_c_(ub)_: -+98 NoClash(C_F_1_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(43)_transformedConstraints(c_0_None_ub)_: +1 t(C) -1 t(F) -<= 92 ++98.0 NoClash(C_F_1_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(44)__NoClash(C_F_4_0)_c_(ub)_: -+97 NoClash(C_F_4_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(44)_transformedConstraints(c_0_None_ub)_: -1 t(C) +1 t(F) -<= 92 ++97.0 NoClash(C_F_4_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(45)__NoClash(C_F_4_1)_c_(ub)_: -+100 NoClash(C_F_4_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(45)_transformedConstraints(c_0_None_ub)_: +1 t(C) -1 t(F) -<= 92 ++100.0 NoClash(C_F_4_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(46)__NoClash(C_G_2_0)_c_(ub)_: -+94 NoClash(C_G_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(46)_transformedConstraints(c_0_None_ub)_: -1 t(C) +1 t(G) -<= 92 ++94.0 NoClash(C_G_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(47)__NoClash(C_G_2_1)_c_(ub)_: -+101 NoClash(C_G_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(47)_transformedConstraints(c_0_None_ub)_: +1 t(C) -1 t(G) -<= 92 ++101.0 NoClash(C_G_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(48)__NoClash(C_G_4_0)_c_(ub)_: -+96 NoClash(C_G_4_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(48)_transformedConstraints(c_0_None_ub)_: -1 t(C) +1 t(G) -<= 92 ++96.0 NoClash(C_G_4_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(49)__NoClash(C_G_4_1)_c_(ub)_: -+99 NoClash(C_G_4_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(49)_transformedConstraints(c_0_None_ub)_: +1 t(C) -1 t(G) -<= 92 ++99.0 NoClash(C_G_4_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(50)__NoClash(D_E_2_0)_c_(ub)_: -+96 NoClash(D_E_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(50)_transformedConstraints(c_0_None_ub)_: -1 t(D) +1 t(E) -<= 92 ++96.0 NoClash(D_E_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(51)__NoClash(D_E_2_1)_c_(ub)_: -+100 NoClash(D_E_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(51)_transformedConstraints(c_0_None_ub)_: +1 t(D) -1 t(E) -<= 92 ++100.0 NoClash(D_E_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(52)__NoClash(D_E_3_0)_c_(ub)_: -+94 NoClash(D_E_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(52)_transformedConstraints(c_0_None_ub)_: -1 t(D) +1 t(E) -<= 92 ++94.0 NoClash(D_E_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(53)__NoClash(D_E_3_1)_c_(ub)_: -+101 NoClash(D_E_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(53)_transformedConstraints(c_0_None_ub)_: +1 t(D) -1 t(E) -<= 92 ++101.0 NoClash(D_E_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(54)__NoClash(D_F_3_0)_c_(ub)_: -+91 NoClash(D_F_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(54)_transformedConstraints(c_0_None_ub)_: -1 t(D) +1 t(F) -<= 92 ++91.0 NoClash(D_F_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(55)__NoClash(D_F_3_1)_c_(ub)_: -+103 NoClash(D_F_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(55)_transformedConstraints(c_0_None_ub)_: +1 t(D) -1 t(F) -<= 92 ++103.0 NoClash(D_F_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(56)__NoClash(D_F_4_0)_c_(ub)_: -+93 NoClash(D_F_4_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(56)_transformedConstraints(c_0_None_ub)_: -1 t(D) +1 t(F) -<= 92 ++93.0 NoClash(D_F_4_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(57)__NoClash(D_F_4_1)_c_(ub)_: -+99 NoClash(D_F_4_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(57)_transformedConstraints(c_0_None_ub)_: +1 t(D) -1 t(F) -<= 92 ++99.0 NoClash(D_F_4_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(58)__NoClash(D_G_2_0)_c_(ub)_: -+100 NoClash(D_G_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(58)_transformedConstraints(c_0_None_ub)_: -1 t(D) +1 t(G) -<= 92 ++100.0 NoClash(D_G_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(59)__NoClash(D_G_2_1)_c_(ub)_: -+100 NoClash(D_G_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(59)_transformedConstraints(c_0_None_ub)_: +1 t(D) -1 t(G) -<= 92 ++100.0 NoClash(D_G_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(60)__NoClash(D_G_4_0)_c_(ub)_: -+92 NoClash(D_G_4_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(60)_transformedConstraints(c_0_None_ub)_: -1 t(D) +1 t(G) -<= 92 ++92.0 NoClash(D_G_4_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(61)__NoClash(D_G_4_1)_c_(ub)_: -+98 NoClash(D_G_4_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(61)_transformedConstraints(c_0_None_ub)_: +1 t(D) -1 t(G) -<= 92 ++98.0 NoClash(D_G_4_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(62)__NoClash(E_F_3_0)_c_(ub)_: -+95 NoClash(E_F_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(62)_transformedConstraints(c_0_None_ub)_: -1 t(E) +1 t(F) -<= 92 ++95.0 NoClash(E_F_3_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(63)__NoClash(E_F_3_1)_c_(ub)_: -+100 NoClash(E_F_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(63)_transformedConstraints(c_0_None_ub)_: +1 t(E) -1 t(F) -<= 92 ++100.0 NoClash(E_F_3_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(64)__NoClash(E_G_2_0)_c_(ub)_: -+100 NoClash(E_G_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(64)_transformedConstraints(c_0_None_ub)_: -1 t(E) +1 t(G) -<= 92 ++100.0 NoClash(E_G_2_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(65)__NoClash(E_G_2_1)_c_(ub)_: -+96 NoClash(E_G_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(65)_transformedConstraints(c_0_None_ub)_: +1 t(E) -1 t(G) -<= 92 ++96.0 NoClash(E_G_2_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(66)__NoClash(E_G_5_0)_c_(ub)_: -+99 NoClash(E_G_5_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(66)_transformedConstraints(c_0_None_ub)_: -1 t(E) +1 t(G) -<= 92 ++99.0 NoClash(E_G_5_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(67)__NoClash(E_G_5_1)_c_(ub)_: -+91 NoClash(E_G_5_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(67)_transformedConstraints(c_0_None_ub)_: +1 t(E) -1 t(G) -<= 92 ++91.0 NoClash(E_G_5_1)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(68)__NoClash(F_G_4_0)_c_(ub)_: -+98 NoClash(F_G_4_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(68)_transformedConstraints(c_0_None_ub)_: -1 t(F) +1 t(G) -<= 92 ++98.0 NoClash(F_G_4_0)_binary_indicator_var +<= 92.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(69)__NoClash(F_G_4_1)_c_(ub)_: -+98 NoClash(F_G_4_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(69)_transformedConstraints(c_0_None_ub)_: +1 t(F) -1 t(G) -<= 92 - -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 ++98.0 NoClash(F_G_4_1)_binary_indicator_var +<= 92.0 bounds - -inf <= ms <= +inf + -inf <= ms <= +inf 0 <= t(A) <= 92 0 <= t(B) <= 92 0 <= t(C) <= 92 diff --git a/pyomo/gdp/tests/jobshop_large_hull.lp b/pyomo/gdp/tests/jobshop_large_hull.lp index 6ee2849f020..983770880b7 100644 --- a/pyomo/gdp/tests/jobshop_large_hull.lp +++ b/pyomo/gdp/tests/jobshop_large_hull.lp @@ -41,509 +41,509 @@ c_u_Feas(G)_: +1 t(G) <= -17 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_B_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ -+1 t(B) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_B_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(B)_ -+1 t(B) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1)_: ++1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_C_1)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(2)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_D_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(3)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_E_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(4)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_E_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(E)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(5)_: +1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_F_1)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(F)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(6)_: +1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(F)_ -+1 t(F) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(7)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_G_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(G)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(8)_: +1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_C_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(C)_ -+1 t(C) -= 0 - -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_D_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(D)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(9)_: +1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_D_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(10)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_E_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(11)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_E_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(12)_: ++1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_E_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(13)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(F)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(14)_: +1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(15)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_G_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(16)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_D_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(D)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(17)_: +1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_D_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(18)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_E_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(19)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_F_1)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(F)_ -+1 t(F) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(20)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_F_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(F)_ -+1 t(F) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(21)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(G)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(22)_: +1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_C_G_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(23)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_E_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(24)_: ++1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_E_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(25)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(F)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(26)_: +1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_F_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(F)_ -+1 t(F) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(27)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(28)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_D_G_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(29)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_E_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(F)_ -+1 t(F) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(30)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_E_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(31)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_E_G_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(32)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_F_G_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(G)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(G)_ -+1 t(G) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(33)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_B_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(34)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_B_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(35)_: ++1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_C_1)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(36)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_D_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(37)_: ++1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_E_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(38)_: ++1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_E_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(39)_: ++1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_F_1)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(40)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(41)_: ++1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_G_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(A)_ -+1 t(A) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(42)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_C_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(B)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(43)_: +1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_D_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(B)_ -+1 t(B) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(44)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_D_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(B)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(45)_: +1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_E_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(B)_ -+1 t(B) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(46)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_E_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(B)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(47)_: +1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_E_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(B)_ -+1 t(B) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(48)_: ++1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(B)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(49)_: +1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(B)_ -+1 t(B) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(50)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_G_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(B)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(51)_: +1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_D_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(52)_: ++1 t(G) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(G)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_D_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(53)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_E_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(54)_: ++1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_F_1)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(55)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_F_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(56)_: ++1 t(F) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(F)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(57)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_C_G_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(C)_ -+1 t(C) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(58)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_E_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(59)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_E_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(60)_: ++1 t(E) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(E)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(61)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_F_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(D)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(62)_: +1 t(D) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(D)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(63)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_D_G_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(D)_ -+1 t(D) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(64)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(C)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_E_F_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(65)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_E_G_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(66)_: ++1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_E_G_5)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(E)_ -+1 t(E) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(67)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(A)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_F_G_4)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(F)_ -+1 t(F) +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(68)_: ++1 t(B) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(B)_ = 0 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_3)_: -+1 NoClash(A_B_3_0)_binary_indicator_var -+1 NoClash(A_B_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(69)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(A)_ += 0 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(F_G_4)_: ++1 NoClash(F_G_4_0)_binary_indicator_var ++1 NoClash(F_G_4_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_5)_: -+1 NoClash(A_B_5_0)_binary_indicator_var -+1 NoClash(A_B_5_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(E_G_5)_: ++1 NoClash(E_G_5_0)_binary_indicator_var ++1 NoClash(E_G_5_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_C_1)_: -+1 NoClash(A_C_1_0)_binary_indicator_var -+1 NoClash(A_C_1_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(E_G_2)_: ++1 NoClash(E_G_2_0)_binary_indicator_var ++1 NoClash(E_G_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_D_3)_: -+1 NoClash(A_D_3_0)_binary_indicator_var -+1 NoClash(A_D_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(E_F_3)_: ++1 NoClash(E_F_3_0)_binary_indicator_var ++1 NoClash(E_F_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_E_3)_: -+1 NoClash(A_E_3_0)_binary_indicator_var -+1 NoClash(A_E_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_G_4)_: ++1 NoClash(D_G_4_0)_binary_indicator_var ++1 NoClash(D_G_4_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_E_5)_: -+1 NoClash(A_E_5_0)_binary_indicator_var -+1 NoClash(A_E_5_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_G_2)_: ++1 NoClash(D_G_2_0)_binary_indicator_var ++1 NoClash(D_G_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_F_1)_: -+1 NoClash(A_F_1_0)_binary_indicator_var -+1 NoClash(A_F_1_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_F_4)_: ++1 NoClash(D_F_4_0)_binary_indicator_var ++1 NoClash(D_F_4_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_F_3)_: -+1 NoClash(A_F_3_0)_binary_indicator_var -+1 NoClash(A_F_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_F_3)_: ++1 NoClash(D_F_3_0)_binary_indicator_var ++1 NoClash(D_F_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_G_5)_: -+1 NoClash(A_G_5_0)_binary_indicator_var -+1 NoClash(A_G_5_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_E_3)_: ++1 NoClash(D_E_3_0)_binary_indicator_var ++1 NoClash(D_E_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_C_2)_: -+1 NoClash(B_C_2_0)_binary_indicator_var -+1 NoClash(B_C_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(D_E_2)_: ++1 NoClash(D_E_2_0)_binary_indicator_var ++1 NoClash(D_E_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_D_2)_: -+1 NoClash(B_D_2_0)_binary_indicator_var -+1 NoClash(B_D_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_G_4)_: ++1 NoClash(C_G_4_0)_binary_indicator_var ++1 NoClash(C_G_4_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_D_3)_: -+1 NoClash(B_D_3_0)_binary_indicator_var -+1 NoClash(B_D_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_G_2)_: ++1 NoClash(C_G_2_0)_binary_indicator_var ++1 NoClash(C_G_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_2)_: -+1 NoClash(B_E_2_0)_binary_indicator_var -+1 NoClash(B_E_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_F_4)_: ++1 NoClash(C_F_4_0)_binary_indicator_var ++1 NoClash(C_F_4_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_3)_: -+1 NoClash(B_E_3_0)_binary_indicator_var -+1 NoClash(B_E_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_F_1)_: ++1 NoClash(C_F_1_0)_binary_indicator_var ++1 NoClash(C_F_1_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_5)_: -+1 NoClash(B_E_5_0)_binary_indicator_var -+1 NoClash(B_E_5_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_E_2)_: ++1 NoClash(C_E_2_0)_binary_indicator_var ++1 NoClash(C_E_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_F_3)_: -+1 NoClash(B_F_3_0)_binary_indicator_var -+1 NoClash(B_F_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_D_4)_: ++1 NoClash(C_D_4_0)_binary_indicator_var ++1 NoClash(C_D_4_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_G_2)_: -+1 NoClash(B_G_2_0)_binary_indicator_var -+1 NoClash(B_G_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(C_D_2)_: ++1 NoClash(C_D_2_0)_binary_indicator_var ++1 NoClash(C_D_2_1)_binary_indicator_var = 1 c_e__pyomo_gdp_hull_reformulation_disj_xor(B_G_5)_: @@ -551,1212 +551,1209 @@ c_e__pyomo_gdp_hull_reformulation_disj_xor(B_G_5)_: +1 NoClash(B_G_5_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(C_D_2)_: -+1 NoClash(C_D_2_0)_binary_indicator_var -+1 NoClash(C_D_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_G_2)_: ++1 NoClash(B_G_2_0)_binary_indicator_var ++1 NoClash(B_G_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(C_D_4)_: -+1 NoClash(C_D_4_0)_binary_indicator_var -+1 NoClash(C_D_4_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_F_3)_: ++1 NoClash(B_F_3_0)_binary_indicator_var ++1 NoClash(B_F_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(C_E_2)_: -+1 NoClash(C_E_2_0)_binary_indicator_var -+1 NoClash(C_E_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_5)_: ++1 NoClash(B_E_5_0)_binary_indicator_var ++1 NoClash(B_E_5_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(C_F_1)_: -+1 NoClash(C_F_1_0)_binary_indicator_var -+1 NoClash(C_F_1_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_3)_: ++1 NoClash(B_E_3_0)_binary_indicator_var ++1 NoClash(B_E_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(C_F_4)_: -+1 NoClash(C_F_4_0)_binary_indicator_var -+1 NoClash(C_F_4_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_E_2)_: ++1 NoClash(B_E_2_0)_binary_indicator_var ++1 NoClash(B_E_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(C_G_2)_: -+1 NoClash(C_G_2_0)_binary_indicator_var -+1 NoClash(C_G_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_D_3)_: ++1 NoClash(B_D_3_0)_binary_indicator_var ++1 NoClash(B_D_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(C_G_4)_: -+1 NoClash(C_G_4_0)_binary_indicator_var -+1 NoClash(C_G_4_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_D_2)_: ++1 NoClash(B_D_2_0)_binary_indicator_var ++1 NoClash(B_D_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(D_E_2)_: -+1 NoClash(D_E_2_0)_binary_indicator_var -+1 NoClash(D_E_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_C_2)_: ++1 NoClash(B_C_2_0)_binary_indicator_var ++1 NoClash(B_C_2_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(D_E_3)_: -+1 NoClash(D_E_3_0)_binary_indicator_var -+1 NoClash(D_E_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_G_5)_: ++1 NoClash(A_G_5_0)_binary_indicator_var ++1 NoClash(A_G_5_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(D_F_3)_: -+1 NoClash(D_F_3_0)_binary_indicator_var -+1 NoClash(D_F_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_F_3)_: ++1 NoClash(A_F_3_0)_binary_indicator_var ++1 NoClash(A_F_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(D_F_4)_: -+1 NoClash(D_F_4_0)_binary_indicator_var -+1 NoClash(D_F_4_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_F_1)_: ++1 NoClash(A_F_1_0)_binary_indicator_var ++1 NoClash(A_F_1_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(D_G_2)_: -+1 NoClash(D_G_2_0)_binary_indicator_var -+1 NoClash(D_G_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_E_5)_: ++1 NoClash(A_E_5_0)_binary_indicator_var ++1 NoClash(A_E_5_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(D_G_4)_: -+1 NoClash(D_G_4_0)_binary_indicator_var -+1 NoClash(D_G_4_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_E_3)_: ++1 NoClash(A_E_3_0)_binary_indicator_var ++1 NoClash(A_E_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(E_F_3)_: -+1 NoClash(E_F_3_0)_binary_indicator_var -+1 NoClash(E_F_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_D_3)_: ++1 NoClash(A_D_3_0)_binary_indicator_var ++1 NoClash(A_D_3_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(E_G_2)_: -+1 NoClash(E_G_2_0)_binary_indicator_var -+1 NoClash(E_G_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_C_1)_: ++1 NoClash(A_C_1_0)_binary_indicator_var ++1 NoClash(A_C_1_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(E_G_5)_: -+1 NoClash(E_G_5_0)_binary_indicator_var -+1 NoClash(E_G_5_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_5)_: ++1 NoClash(A_B_5_0)_binary_indicator_var ++1 NoClash(A_B_5_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(F_G_4)_: -+1 NoClash(F_G_4_0)_binary_indicator_var -+1 NoClash(F_G_4_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_3)_: ++1 NoClash(A_B_3_0)_binary_indicator_var ++1 NoClash(A_B_3_1)_binary_indicator_var = 1 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(B)_bounds_(ub)_: --92 NoClash(A_B_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(F)_ ++6.0 NoClash(F_G_4_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(A)_bounds_(ub)_: --92 NoClash(A_B_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(G)_ +-92 NoClash(F_G_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__NoClash(A_B_3_0)_c_(ub)_: -+4 NoClash(A_B_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(F)_ +-92 NoClash(F_G_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(B)_bounds_(ub)_: --92 NoClash(A_B_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(F)_ ++6.0 NoClash(F_G_4_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(A)_bounds_(ub)_: --92 NoClash(A_B_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(G)_ +-92 NoClash(F_G_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__NoClash(A_B_3_1)_c_(ub)_: -+5 NoClash(A_B_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(F)_ +-92 NoClash(F_G_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__t(B)_bounds_(ub)_: --92 NoClash(A_B_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(B)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(E)_ ++7.0 NoClash(E_G_5_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__t(A)_bounds_(ub)_: --92 NoClash(A_B_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(G)_ +-92 NoClash(E_G_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__NoClash(A_B_5_0)_c_(ub)_: -+2 NoClash(A_B_5_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(E)_ +-92 NoClash(E_G_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__t(B)_bounds_(ub)_: --92 NoClash(A_B_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(B)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(E)_ +-1 NoClash(E_G_5_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__t(A)_bounds_(ub)_: --92 NoClash(A_B_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(G)_ +-92 NoClash(E_G_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__NoClash(A_B_5_1)_c_(ub)_: -+3 NoClash(A_B_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(E)_ +-92 NoClash(E_G_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__t(C)_bounds_(ub)_: --92 NoClash(A_C_1_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(E)_ ++8.0 NoClash(E_G_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__t(A)_bounds_(ub)_: --92 NoClash(A_C_1_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(G)_ +-92 NoClash(E_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__NoClash(A_C_1_0)_c_(ub)_: -+6 NoClash(A_C_1_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(E)_ +-92 NoClash(E_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__t(C)_bounds_(ub)_: --92 NoClash(A_C_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(E)_ ++4.0 NoClash(E_G_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__t(A)_bounds_(ub)_: --92 NoClash(A_C_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(G)_ +-92 NoClash(E_G_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__NoClash(A_C_1_1)_c_(ub)_: -+3 NoClash(A_C_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(E)_ +-92 NoClash(E_G_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)__t(D)_bounds_(ub)_: --92 NoClash(A_D_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(E)_ ++3.0 NoClash(E_F_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)__t(A)_bounds_(ub)_: --92 NoClash(A_D_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(F)_ +-92 NoClash(E_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)__NoClash(A_D_3_0)_c_(ub)_: -+10 NoClash(A_D_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(E)_ +-92 NoClash(E_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)__t(D)_bounds_(ub)_: --92 NoClash(A_D_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(E)_ ++8.0 NoClash(E_F_3_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)__t(A)_bounds_(ub)_: --92 NoClash(A_D_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(F)_ +-92 NoClash(E_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)__NoClash(A_D_3_1)_c_(ub)_: -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(E)_ +-92 NoClash(E_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)__t(E)_bounds_(ub)_: --92 NoClash(A_E_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(D)_ +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)__t(A)_bounds_(ub)_: --92 NoClash(A_E_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(G)_ +-92 NoClash(D_G_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)__NoClash(A_E_3_0)_c_(ub)_: -+7 NoClash(A_E_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(D)_ +-92 NoClash(D_G_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)__t(E)_bounds_(ub)_: --92 NoClash(A_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(D)_ ++6.0 NoClash(D_G_4_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)__t(A)_bounds_(ub)_: --92 NoClash(A_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(G)_ +-92 NoClash(D_G_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)__NoClash(A_E_3_1)_c_(ub)_: -+4 NoClash(A_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(D)_ +-92 NoClash(D_G_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)__t(E)_bounds_(ub)_: --92 NoClash(A_E_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(D)_ ++8.0 NoClash(D_G_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)__t(A)_bounds_(ub)_: --92 NoClash(A_E_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(G)_ +-92 NoClash(D_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)__NoClash(A_E_5_0)_c_(ub)_: -+4 NoClash(A_E_5_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(D)_ +-92 NoClash(D_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)__t(E)_bounds_(ub)_: --92 NoClash(A_E_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(D)_ ++8.0 NoClash(D_G_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)__t(A)_bounds_(ub)_: --92 NoClash(A_E_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(G)_ +-92 NoClash(D_G_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)__NoClash(A_E_5_1)_c_(ub)_: -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(D)_ +-92 NoClash(D_G_2_1)_binary_indicator_var <= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(D)_ ++1 NoClash(D_F_4_0)_binary_indicator_var +<= 0.0 + c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)__t(F)_bounds_(ub)_: --92 NoClash(A_F_1_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(F)_ +-92 NoClash(D_F_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)__t(A)_bounds_(ub)_: --92 NoClash(A_F_1_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(D)_ +-92 NoClash(D_F_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)__NoClash(A_F_1_0)_c_(ub)_: -+2 NoClash(A_F_1_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(D)_ ++7.0 NoClash(D_F_4_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)__t(F)_bounds_(ub)_: --92 NoClash(A_F_1_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(F)_ +-92 NoClash(D_F_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)__t(A)_bounds_(ub)_: --92 NoClash(A_F_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(D)_ +-92 NoClash(D_F_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)__NoClash(A_F_1_1)_c_(ub)_: -+3 NoClash(A_F_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(D)_ +-1 NoClash(D_F_3_0)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)__t(F)_bounds_(ub)_: --92 NoClash(A_F_3_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(F)_ +-92 NoClash(D_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)__t(A)_bounds_(ub)_: --92 NoClash(A_F_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(D)_ +-92 NoClash(D_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)__NoClash(A_F_3_0)_c_(ub)_: -+4 NoClash(A_F_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(D)_ ++11.0 NoClash(D_F_3_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)__t(F)_bounds_(ub)_: --92 NoClash(A_F_3_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(F)_ +-92 NoClash(D_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)__t(A)_bounds_(ub)_: --92 NoClash(A_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(D)_ +-92 NoClash(D_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)__NoClash(A_F_3_1)_c_(ub)_: -+6 NoClash(A_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(D)_ ++2.0 NoClash(D_E_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)__t(G)_bounds_(ub)_: --92 NoClash(A_G_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(E)_ +-92 NoClash(D_E_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)__t(A)_bounds_(ub)_: --92 NoClash(A_G_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(D)_ +-92 NoClash(D_E_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)__NoClash(A_G_5_0)_c_(ub)_: -+9 NoClash(A_G_5_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(D)_ ++9.0 NoClash(D_E_3_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)__t(G)_bounds_(ub)_: --92 NoClash(A_G_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(E)_ +-92 NoClash(D_E_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)__t(A)_bounds_(ub)_: --92 NoClash(A_G_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(D)_ +-92 NoClash(D_E_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)__NoClash(A_G_5_1)_c_(ub)_: --3 NoClash(A_G_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(D)_ ++4.0 NoClash(D_E_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)__t(C)_bounds_(ub)_: --92 NoClash(B_C_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(E)_ +-92 NoClash(D_E_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)__t(B)_bounds_(ub)_: --92 NoClash(B_C_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(D)_ +-92 NoClash(D_E_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)__NoClash(B_C_2_0)_c_(ub)_: -+9 NoClash(B_C_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(C)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(D)_ ++8.0 NoClash(D_E_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)__t(C)_bounds_(ub)_: --92 NoClash(B_C_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(E)_ +-92 NoClash(D_E_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)__t(B)_bounds_(ub)_: --92 NoClash(B_C_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(D)_ +-92 NoClash(D_E_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)__NoClash(B_C_2_1)_c_(ub)_: --3 NoClash(B_C_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(C)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(C)_ ++4.0 NoClash(C_G_4_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)__t(D)_bounds_(ub)_: --92 NoClash(B_D_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(G)_ +-92 NoClash(C_G_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)__t(B)_bounds_(ub)_: --92 NoClash(B_D_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(C)_ +-92 NoClash(C_G_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)__NoClash(B_D_2_0)_c_(ub)_: -+8 NoClash(B_D_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(C)_ ++7.0 NoClash(C_G_4_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)__t(D)_bounds_(ub)_: --92 NoClash(B_D_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(G)_ +-92 NoClash(C_G_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)__t(B)_bounds_(ub)_: --92 NoClash(B_D_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(C)_ +-92 NoClash(C_G_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)__NoClash(B_D_2_1)_c_(ub)_: -+3 NoClash(B_D_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(C)_ ++2.0 NoClash(C_G_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)__t(D)_bounds_(ub)_: --92 NoClash(B_D_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(G)_ +-92 NoClash(C_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)__t(B)_bounds_(ub)_: --92 NoClash(B_D_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(C)_ +-92 NoClash(C_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)__NoClash(B_D_3_0)_c_(ub)_: -+10 NoClash(B_D_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(C)_ ++9.0 NoClash(C_G_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)__t(D)_bounds_(ub)_: --92 NoClash(B_D_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(G)_ +-92 NoClash(C_G_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)__t(B)_bounds_(ub)_: --92 NoClash(B_D_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(C)_ +-92 NoClash(C_G_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)__NoClash(B_D_3_1)_c_(ub)_: --1 NoClash(B_D_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(C)_ ++5.0 NoClash(C_F_4_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)__t(E)_bounds_(ub)_: --92 NoClash(B_E_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(F)_ +-92 NoClash(C_F_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)__t(B)_bounds_(ub)_: --92 NoClash(B_E_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(C)_ +-92 NoClash(C_F_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)__NoClash(B_E_2_0)_c_(ub)_: -+4 NoClash(B_E_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(C)_ ++8.0 NoClash(C_F_4_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)__t(E)_bounds_(ub)_: --92 NoClash(B_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(F)_ +-92 NoClash(C_F_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)__t(B)_bounds_(ub)_: --92 NoClash(B_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(C)_ +-92 NoClash(C_F_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)__NoClash(B_E_2_1)_c_(ub)_: -+3 NoClash(B_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(C)_ ++2.0 NoClash(C_F_1_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)__t(E)_bounds_(ub)_: --92 NoClash(B_E_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(F)_ +-92 NoClash(C_F_1_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)__t(B)_bounds_(ub)_: --92 NoClash(B_E_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(C)_ +-92 NoClash(C_F_1_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)__NoClash(B_E_3_0)_c_(ub)_: -+7 NoClash(B_E_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(C)_ ++6.0 NoClash(C_F_1_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)__t(E)_bounds_(ub)_: --92 NoClash(B_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(F)_ +-92 NoClash(C_F_1_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)__t(B)_bounds_(ub)_: --92 NoClash(B_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(C)_ +-92 NoClash(C_F_1_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)__NoClash(B_E_3_1)_c_(ub)_: -+3 NoClash(B_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(C)_ +-2.0 NoClash(C_E_2_0)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)__t(E)_bounds_(ub)_: --92 NoClash(B_E_5_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(E)_ +-92 NoClash(C_E_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)__t(B)_bounds_(ub)_: --92 NoClash(B_E_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(C)_ +-92 NoClash(C_E_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)__NoClash(B_E_5_0)_c_(ub)_: -+5 NoClash(B_E_5_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(C)_ ++9.0 NoClash(C_E_2_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)__t(E)_bounds_(ub)_: --92 NoClash(B_E_5_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(E)_ +-92 NoClash(C_E_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)__t(B)_bounds_(ub)_: --92 NoClash(B_E_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(C)_ +-92 NoClash(C_E_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)__NoClash(B_E_5_1)_c_(ub)_: -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(C)_ ++5.0 NoClash(C_D_4_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)__t(F)_bounds_(ub)_: --92 NoClash(B_F_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(D)_ +-92 NoClash(C_D_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)__t(B)_bounds_(ub)_: --92 NoClash(B_F_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(C)_ +-92 NoClash(C_D_4_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)__NoClash(B_F_3_0)_c_(ub)_: -+4 NoClash(B_F_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(D)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(C)_ ++2.0 NoClash(C_D_4_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)__t(F)_bounds_(ub)_: --92 NoClash(B_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(D)_ +-92 NoClash(C_D_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)__t(B)_bounds_(ub)_: --92 NoClash(B_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(C)_ +-92 NoClash(C_D_4_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)__NoClash(B_F_3_1)_c_(ub)_: -+5 NoClash(B_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(C)_ ++2.0 NoClash(C_D_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)__t(G)_bounds_(ub)_: --92 NoClash(B_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(D)_ +-92 NoClash(C_D_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)__t(B)_bounds_(ub)_: --92 NoClash(B_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(C)_ +-92 NoClash(C_D_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)__NoClash(B_G_2_0)_c_(ub)_: -+8 NoClash(B_G_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(D)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(C)_ ++9.0 NoClash(C_D_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)__t(G)_bounds_(ub)_: --92 NoClash(B_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(D)_ +-92 NoClash(C_D_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)__t(B)_bounds_(ub)_: --92 NoClash(B_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(C)_ +-92 NoClash(C_D_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)__NoClash(B_G_2_1)_c_(ub)_: -+3 NoClash(B_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(B)_ ++10.0 NoClash(B_G_5_0)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)__t(G)_bounds_(ub)_: --92 NoClash(B_G_5_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(G)_ +-92 NoClash(B_G_5_0)_binary_indicator_var <= 0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)__t(B)_bounds_(ub)_: --92 NoClash(B_G_5_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(B)_ +-92 NoClash(B_G_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)__NoClash(B_G_5_0)_c_(ub)_: -+10 NoClash(B_G_5_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(B)_ +-3.0 NoClash(B_G_5_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)__t(G)_bounds_(ub)_: --92 NoClash(B_G_5_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(G)_ +-92 NoClash(B_G_5_1)_binary_indicator_var <= 0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)__t(B)_bounds_(ub)_: --92 NoClash(B_G_5_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(B)_ +-92 NoClash(B_G_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)__NoClash(B_G_5_1)_c_(ub)_: --3 NoClash(B_G_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(B)_ ++8.0 NoClash(B_G_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)__t(D)_bounds_(ub)_: --92 NoClash(C_D_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(G)_ +-92 NoClash(B_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)__t(C)_bounds_(ub)_: --92 NoClash(C_D_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(B)_ +-92 NoClash(B_G_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)__NoClash(C_D_2_0)_c_(ub)_: -+2 NoClash(C_D_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(C)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(B)_ ++3.0 NoClash(B_G_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)__t(D)_bounds_(ub)_: --92 NoClash(C_D_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(G)_ +-92 NoClash(B_G_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)__t(C)_bounds_(ub)_: --92 NoClash(C_D_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(B)_ +-92 NoClash(B_G_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)__NoClash(C_D_2_1)_c_(ub)_: -+9 NoClash(C_D_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(B)_ ++4.0 NoClash(B_F_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)__t(D)_bounds_(ub)_: --92 NoClash(C_D_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(F)_ +-92 NoClash(B_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)__t(C)_bounds_(ub)_: --92 NoClash(C_D_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(B)_ +-92 NoClash(B_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)__NoClash(C_D_4_0)_c_(ub)_: -+5 NoClash(C_D_4_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(C)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(B)_ ++5.0 NoClash(B_F_3_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)__t(D)_bounds_(ub)_: --92 NoClash(C_D_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)__t(F)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(F)_ +-92 NoClash(B_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)__t(C)_bounds_(ub)_: --92 NoClash(C_D_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(B)_ +-92 NoClash(B_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)__NoClash(C_D_4_1)_c_(ub)_: -+2 NoClash(C_D_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(D)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(B)_ ++5.0 NoClash(B_E_5_0)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)__t(E)_bounds_(ub)_: --92 NoClash(C_E_2_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(E)_ +-92 NoClash(B_E_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)__t(C)_bounds_(ub)_: --92 NoClash(C_E_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(B)_ +-92 NoClash(B_E_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)__NoClash(C_E_2_0)_c_(ub)_: --2 NoClash(C_E_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(C)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(B)_ +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)__t(E)_bounds_(ub)_: --92 NoClash(C_E_2_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(E)_ +-92 NoClash(B_E_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)__t(C)_bounds_(ub)_: --92 NoClash(C_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(B)_ +-92 NoClash(B_E_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)__NoClash(C_E_2_1)_c_(ub)_: -+9 NoClash(C_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(B)_ ++7.0 NoClash(B_E_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)__t(F)_bounds_(ub)_: --92 NoClash(C_F_1_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(E)_ +-92 NoClash(B_E_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)__t(C)_bounds_(ub)_: --92 NoClash(C_F_1_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(B)_ +-92 NoClash(B_E_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)__NoClash(C_F_1_0)_c_(ub)_: -+2 NoClash(C_F_1_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(C)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(B)_ ++3.0 NoClash(B_E_3_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)__t(F)_bounds_(ub)_: --92 NoClash(C_F_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(E)_ +-92 NoClash(B_E_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)__t(C)_bounds_(ub)_: --92 NoClash(C_F_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(B)_ +-92 NoClash(B_E_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)__NoClash(C_F_1_1)_c_(ub)_: -+6 NoClash(C_F_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(B)_ ++4.0 NoClash(B_E_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)__t(F)_bounds_(ub)_: --92 NoClash(C_F_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(E)_ +-92 NoClash(B_E_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)__t(C)_bounds_(ub)_: --92 NoClash(C_F_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(B)_ +-92 NoClash(B_E_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)__NoClash(C_F_4_0)_c_(ub)_: -+5 NoClash(C_F_4_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(C)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(B)_ ++3.0 NoClash(B_E_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)__t(F)_bounds_(ub)_: --92 NoClash(C_F_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(E)_ +-92 NoClash(B_E_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)__t(C)_bounds_(ub)_: --92 NoClash(C_F_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(B)_ +-92 NoClash(B_E_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)__NoClash(C_F_4_1)_c_(ub)_: -+8 NoClash(C_F_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(B)_ ++10.0 NoClash(B_D_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)__t(G)_bounds_(ub)_: --92 NoClash(C_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(D)_ +-92 NoClash(B_D_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)__t(C)_bounds_(ub)_: --92 NoClash(C_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(B)_ +-92 NoClash(B_D_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)__NoClash(C_G_2_0)_c_(ub)_: -+2 NoClash(C_G_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(C)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(D)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(B)_ +-1 NoClash(B_D_3_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)__t(G)_bounds_(ub)_: --92 NoClash(C_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(D)_ +-92 NoClash(B_D_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)__t(C)_bounds_(ub)_: --92 NoClash(C_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(B)_ +-92 NoClash(B_D_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)__NoClash(C_G_2_1)_c_(ub)_: -+9 NoClash(C_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(B)_ ++8.0 NoClash(B_D_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)__t(G)_bounds_(ub)_: --92 NoClash(C_G_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(D)_ +-92 NoClash(B_D_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)__t(C)_bounds_(ub)_: --92 NoClash(C_G_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(B)_ +-92 NoClash(B_D_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)__NoClash(C_G_4_0)_c_(ub)_: -+4 NoClash(C_G_4_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(C)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(D)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(B)_ ++3.0 NoClash(B_D_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)__t(G)_bounds_(ub)_: --92 NoClash(C_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(D)_ +-92 NoClash(B_D_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)__t(C)_bounds_(ub)_: --92 NoClash(C_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(B)_ +-92 NoClash(B_D_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)__NoClash(C_G_4_1)_c_(ub)_: -+7 NoClash(C_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(B)_ ++9.0 NoClash(B_C_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)__t(E)_bounds_(ub)_: --92 NoClash(D_E_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(C)_ +-92 NoClash(B_C_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)__t(D)_bounds_(ub)_: --92 NoClash(D_E_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(B)_ +-92 NoClash(B_C_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)__NoClash(D_E_2_0)_c_(ub)_: -+4 NoClash(D_E_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(D)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(C)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(B)_ +-3.0 NoClash(B_C_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)__t(E)_bounds_(ub)_: --92 NoClash(D_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(C)_ +-92 NoClash(B_C_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)__t(D)_bounds_(ub)_: --92 NoClash(D_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(B)_ +-92 NoClash(B_C_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)__NoClash(D_E_2_1)_c_(ub)_: -+8 NoClash(D_E_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(G)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(A)_ ++9.0 NoClash(A_G_5_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)__t(E)_bounds_(ub)_: --92 NoClash(D_E_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(G)_ +-92 NoClash(A_G_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)__t(D)_bounds_(ub)_: --92 NoClash(D_E_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(A)_ +-92 NoClash(A_G_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)__NoClash(D_E_3_0)_c_(ub)_: -+2 NoClash(D_E_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(D)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(G)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(A)_ +-3.0 NoClash(A_G_5_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)__t(E)_bounds_(ub)_: --92 NoClash(D_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)__t(G)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(G)_ +-92 NoClash(A_G_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)__t(D)_bounds_(ub)_: --92 NoClash(D_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(A)_ +-92 NoClash(A_G_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)__NoClash(D_E_3_1)_c_(ub)_: -+9 NoClash(D_E_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(E)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(A)_ ++4.0 NoClash(A_F_3_0)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)__t(F)_bounds_(ub)_: --92 NoClash(D_F_3_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(F)_ +-92 NoClash(A_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)__t(D)_bounds_(ub)_: --92 NoClash(D_F_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(A)_ +-92 NoClash(A_F_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)__NoClash(D_F_3_0)_c_(ub)_: --1 NoClash(D_F_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(D)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(A)_ ++6.0 NoClash(A_F_3_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)__t(F)_bounds_(ub)_: --92 NoClash(D_F_3_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(F)_ +-92 NoClash(A_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)__t(D)_bounds_(ub)_: --92 NoClash(D_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(A)_ +-92 NoClash(A_F_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)__NoClash(D_F_3_1)_c_(ub)_: -+11 NoClash(D_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(F)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(A)_ ++2.0 NoClash(A_F_1_0)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)__t(F)_bounds_(ub)_: --92 NoClash(D_F_4_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(F)_ +-92 NoClash(A_F_1_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)__t(D)_bounds_(ub)_: --92 NoClash(D_F_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(A)_ +-92 NoClash(A_F_1_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)__NoClash(D_F_4_0)_c_(ub)_: -+1 NoClash(D_F_4_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(D)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(F)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(A)_ ++3.0 NoClash(A_F_1_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)__t(F)_bounds_(ub)_: --92 NoClash(D_F_4_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(F)_ +-92 NoClash(A_F_1_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)__t(D)_bounds_(ub)_: --92 NoClash(D_F_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(A)_ +-92 NoClash(A_F_1_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)__NoClash(D_F_4_1)_c_(ub)_: -+7 NoClash(D_F_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(A)_ ++4.0 NoClash(A_E_5_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)__t(G)_bounds_(ub)_: --92 NoClash(D_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(E)_ +-92 NoClash(A_E_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)__t(D)_bounds_(ub)_: --92 NoClash(D_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(A)_ +-92 NoClash(A_E_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)__NoClash(D_G_2_0)_c_(ub)_: -+8 NoClash(D_G_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(D)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(A)_ +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)__t(G)_bounds_(ub)_: --92 NoClash(D_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(E)_ +-92 NoClash(A_E_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)__t(D)_bounds_(ub)_: --92 NoClash(D_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(A)_ +-92 NoClash(A_E_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)__NoClash(D_G_2_1)_c_(ub)_: -+8 NoClash(D_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(E)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(A)_ ++7.0 NoClash(A_E_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)__t(G)_bounds_(ub)_: --92 NoClash(D_G_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(E)_ +-92 NoClash(A_E_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)__t(D)_bounds_(ub)_: --92 NoClash(D_G_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(A)_ +-92 NoClash(A_E_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)__NoClash(D_G_4_0)_c_(ub)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(D)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(E)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(A)_ ++4.0 NoClash(A_E_3_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)__t(G)_bounds_(ub)_: --92 NoClash(D_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)__t(E)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(E)_ +-92 NoClash(A_E_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)__t(D)_bounds_(ub)_: --92 NoClash(D_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(D)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(A)_ +-92 NoClash(A_E_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)__NoClash(D_G_4_1)_c_(ub)_: -+6 NoClash(D_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(D)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(D)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(A)_ ++10.0 NoClash(A_D_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)__t(F)_bounds_(ub)_: --92 NoClash(E_F_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(D)_ +-92 NoClash(A_D_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)__t(E)_bounds_(ub)_: --92 NoClash(E_F_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(A)_ +-92 NoClash(A_D_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)__NoClash(E_F_3_0)_c_(ub)_: -+3 NoClash(E_F_3_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(E)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(D)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(A)_ +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)__t(F)_bounds_(ub)_: --92 NoClash(E_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)__t(D)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(D)_ +-92 NoClash(A_D_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)__t(E)_bounds_(ub)_: --92 NoClash(E_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(A)_ +-92 NoClash(A_D_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)__NoClash(E_F_3_1)_c_(ub)_: -+8 NoClash(E_F_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(F)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(A)_ ++6.0 NoClash(A_C_1_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)__t(G)_bounds_(ub)_: --92 NoClash(E_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(C)_ +-92 NoClash(A_C_1_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)__t(E)_bounds_(ub)_: --92 NoClash(E_G_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(A)_ +-92 NoClash(A_C_1_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)__NoClash(E_G_2_0)_c_(ub)_: -+8 NoClash(E_G_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(E)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(C)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(A)_ ++3.0 NoClash(A_C_1_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)__t(G)_bounds_(ub)_: --92 NoClash(E_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(C)_ +-92 NoClash(A_C_1_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)__t(E)_bounds_(ub)_: --92 NoClash(E_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(A)_ +-92 NoClash(A_C_1_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)__NoClash(E_G_2_1)_c_(ub)_: -+4 NoClash(E_G_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(A)_ ++2.0 NoClash(A_B_5_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)__t(G)_bounds_(ub)_: --92 NoClash(E_G_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(B)_ +-92 NoClash(A_B_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)__t(E)_bounds_(ub)_: --92 NoClash(E_G_5_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(A)_ +-92 NoClash(A_B_5_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)__NoClash(E_G_5_0)_c_(ub)_: -+7 NoClash(E_G_5_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(E)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(B)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(A)_ ++3.0 NoClash(A_B_5_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)__t(G)_bounds_(ub)_: --92 NoClash(E_G_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(B)_ +-92 NoClash(A_B_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)__t(E)_bounds_(ub)_: --92 NoClash(E_G_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(E)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(A)_ +-92 NoClash(A_B_5_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)__NoClash(E_G_5_1)_c_(ub)_: --1 NoClash(E_G_5_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(E)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(A)_ ++4.0 NoClash(A_B_3_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)__t(G)_bounds_(ub)_: --92 NoClash(F_G_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(G)_ -<= 0 - -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)__t(F)_bounds_(ub)_: --92 NoClash(F_G_4_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(B)_ +-92 NoClash(A_B_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)__NoClash(F_G_4_0)_c_(ub)_: -+6 NoClash(F_G_4_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(F)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(A)_ +-92 NoClash(A_B_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)__t(G)_bounds_(ub)_: --92 NoClash(F_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(G)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(B)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(A)_ ++5.0 NoClash(A_B_3_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)__t(F)_bounds_(ub)_: --92 NoClash(F_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(F)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(B)_ +-92 NoClash(A_B_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)__NoClash(F_G_4_1)_c_(ub)_: -+6 NoClash(F_G_4_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(F)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(G)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(A)_ +-92 NoClash(A_B_3_1)_binary_indicator_var <= 0 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds - -inf <= ms <= +inf + -inf <= ms <= +inf 0 <= t(A) <= 92 0 <= t(B) <= 92 0 <= t(C) <= 92 @@ -1764,285 +1761,285 @@ bounds 0 <= t(E) <= 92 0 <= t(F) <= 92 0 <= t(G) <= 92 - 0 <= NoClash(A_B_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_B_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_B_5_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(B)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_B_5_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(B)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_C_1_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_C_1_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_D_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_D_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_E_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_E_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_E_5_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_E_5_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_F_1_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(6)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(7)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(8)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(9)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(10)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(11)_disaggregatedVars__t(D)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_F_1_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_F_3_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(12)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(13)_disaggregatedVars__t(D)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_F_3_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_G_5_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(A_G_5_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(A)_ <= 92 - 0 <= NoClash(B_C_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(C)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_C_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(C)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_D_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_D_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_D_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_D_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_E_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_E_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_E_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_E_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_E_5_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(14)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(15)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(16)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(17)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(18)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(19)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(20)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(21)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(22)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(23)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(24)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(25)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(26)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(27)_disaggregatedVars__t(C)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_E_5_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_F_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_F_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_G_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_G_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_G_5_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(28)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(29)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(30)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(31)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(32)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(33)_disaggregatedVars__t(C)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(B_G_5_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(34)_disaggregatedVars__t(B)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(35)_disaggregatedVars__t(B)_ <= 92 - 0 <= NoClash(C_D_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_D_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_D_4_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_D_4_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(D)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_E_2_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(36)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(37)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(F)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(38)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(39)_disaggregatedVars__t(B)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_E_2_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_F_1_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_F_1_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_F_4_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_F_4_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_G_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_G_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_G_4_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(C_G_4_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(C)_ <= 92 - 0 <= NoClash(D_E_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_E_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_E_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_E_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(E)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_F_3_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(40)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(41)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(42)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(43)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(44)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(45)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(46)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(47)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(48)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(49)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(50)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(51)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(G)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(52)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(53)_disaggregatedVars__t(A)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_F_3_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_F_4_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(54)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(55)_disaggregatedVars__t(A)_ <= 92 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_F_4_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_G_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_G_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_G_4_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(D_G_4_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(D)_ <= 92 - 0 <= NoClash(E_F_3_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(E)_ <= 92 - 0 <= NoClash(E_F_3_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(F)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(E)_ <= 92 - 0 <= NoClash(E_G_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(E)_ <= 92 - 0 <= NoClash(E_G_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(E)_ <= 92 - 0 <= NoClash(E_G_5_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(E)_ <= 92 - 0 <= NoClash(E_G_5_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(56)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(57)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(58)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(59)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(E)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(60)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(61)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(D)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(62)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(63)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(C)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(64)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(65)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(66)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(67)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(B)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(A)_ <= 92 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(A)_ <= 92 0 <= NoClash(F_G_4_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(68)_disaggregatedVars__t(F)_ <= 92 0 <= NoClash(F_G_4_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(G)_ <= 92 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(69)_disaggregatedVars__t(F)_ <= 92 + 0 <= NoClash(E_G_5_0)_binary_indicator_var <= 1 + 0 <= NoClash(E_G_5_1)_binary_indicator_var <= 1 + 0 <= NoClash(E_G_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(E_G_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(E_F_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(E_F_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(D_G_4_0)_binary_indicator_var <= 1 + 0 <= NoClash(D_G_4_1)_binary_indicator_var <= 1 + 0 <= NoClash(D_G_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(D_G_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(D_F_4_0)_binary_indicator_var <= 1 + 0 <= NoClash(D_F_4_1)_binary_indicator_var <= 1 + 0 <= NoClash(D_F_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(D_F_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(D_E_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(D_E_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(D_E_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(D_E_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(C_G_4_0)_binary_indicator_var <= 1 + 0 <= NoClash(C_G_4_1)_binary_indicator_var <= 1 + 0 <= NoClash(C_G_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(C_G_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(C_F_4_0)_binary_indicator_var <= 1 + 0 <= NoClash(C_F_4_1)_binary_indicator_var <= 1 + 0 <= NoClash(C_F_1_0)_binary_indicator_var <= 1 + 0 <= NoClash(C_F_1_1)_binary_indicator_var <= 1 + 0 <= NoClash(C_E_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(C_E_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(C_D_4_0)_binary_indicator_var <= 1 + 0 <= NoClash(C_D_4_1)_binary_indicator_var <= 1 + 0 <= NoClash(C_D_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(C_D_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_G_5_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_G_5_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_G_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_G_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_F_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_F_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_E_5_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_E_5_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_E_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_E_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_E_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_E_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_D_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_D_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_D_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_D_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(B_C_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_C_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_G_5_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_G_5_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_F_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_F_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_F_1_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_F_1_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_E_5_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_E_5_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_E_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_E_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_D_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_D_3_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_C_1_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_C_1_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_B_5_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_B_5_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_B_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_B_3_1)_binary_indicator_var <= 1 binary - NoClash(A_B_3_0)_binary_indicator_var - NoClash(A_B_3_1)_binary_indicator_var - NoClash(A_B_5_0)_binary_indicator_var - NoClash(A_B_5_1)_binary_indicator_var - NoClash(A_C_1_0)_binary_indicator_var - NoClash(A_C_1_1)_binary_indicator_var - NoClash(A_D_3_0)_binary_indicator_var - NoClash(A_D_3_1)_binary_indicator_var - NoClash(A_E_3_0)_binary_indicator_var - NoClash(A_E_3_1)_binary_indicator_var - NoClash(A_E_5_0)_binary_indicator_var - NoClash(A_E_5_1)_binary_indicator_var - NoClash(A_F_1_0)_binary_indicator_var - NoClash(A_F_1_1)_binary_indicator_var - NoClash(A_F_3_0)_binary_indicator_var - NoClash(A_F_3_1)_binary_indicator_var - NoClash(A_G_5_0)_binary_indicator_var - NoClash(A_G_5_1)_binary_indicator_var - NoClash(B_C_2_0)_binary_indicator_var - NoClash(B_C_2_1)_binary_indicator_var - NoClash(B_D_2_0)_binary_indicator_var - NoClash(B_D_2_1)_binary_indicator_var - NoClash(B_D_3_0)_binary_indicator_var - NoClash(B_D_3_1)_binary_indicator_var - NoClash(B_E_2_0)_binary_indicator_var - NoClash(B_E_2_1)_binary_indicator_var - NoClash(B_E_3_0)_binary_indicator_var - NoClash(B_E_3_1)_binary_indicator_var - NoClash(B_E_5_0)_binary_indicator_var - NoClash(B_E_5_1)_binary_indicator_var - NoClash(B_F_3_0)_binary_indicator_var - NoClash(B_F_3_1)_binary_indicator_var - NoClash(B_G_2_0)_binary_indicator_var - NoClash(B_G_2_1)_binary_indicator_var - NoClash(B_G_5_0)_binary_indicator_var - NoClash(B_G_5_1)_binary_indicator_var - NoClash(C_D_2_0)_binary_indicator_var - NoClash(C_D_2_1)_binary_indicator_var - NoClash(C_D_4_0)_binary_indicator_var - NoClash(C_D_4_1)_binary_indicator_var - NoClash(C_E_2_0)_binary_indicator_var - NoClash(C_E_2_1)_binary_indicator_var - NoClash(C_F_1_0)_binary_indicator_var - NoClash(C_F_1_1)_binary_indicator_var - NoClash(C_F_4_0)_binary_indicator_var - NoClash(C_F_4_1)_binary_indicator_var - NoClash(C_G_2_0)_binary_indicator_var - NoClash(C_G_2_1)_binary_indicator_var - NoClash(C_G_4_0)_binary_indicator_var - NoClash(C_G_4_1)_binary_indicator_var - NoClash(D_E_2_0)_binary_indicator_var - NoClash(D_E_2_1)_binary_indicator_var - NoClash(D_E_3_0)_binary_indicator_var - NoClash(D_E_3_1)_binary_indicator_var - NoClash(D_F_3_0)_binary_indicator_var - NoClash(D_F_3_1)_binary_indicator_var - NoClash(D_F_4_0)_binary_indicator_var - NoClash(D_F_4_1)_binary_indicator_var - NoClash(D_G_2_0)_binary_indicator_var - NoClash(D_G_2_1)_binary_indicator_var - NoClash(D_G_4_0)_binary_indicator_var - NoClash(D_G_4_1)_binary_indicator_var - NoClash(E_F_3_0)_binary_indicator_var - NoClash(E_F_3_1)_binary_indicator_var - NoClash(E_G_2_0)_binary_indicator_var - NoClash(E_G_2_1)_binary_indicator_var - NoClash(E_G_5_0)_binary_indicator_var - NoClash(E_G_5_1)_binary_indicator_var NoClash(F_G_4_0)_binary_indicator_var NoClash(F_G_4_1)_binary_indicator_var + NoClash(E_G_5_0)_binary_indicator_var + NoClash(E_G_5_1)_binary_indicator_var + NoClash(E_G_2_0)_binary_indicator_var + NoClash(E_G_2_1)_binary_indicator_var + NoClash(E_F_3_0)_binary_indicator_var + NoClash(E_F_3_1)_binary_indicator_var + NoClash(D_G_4_0)_binary_indicator_var + NoClash(D_G_4_1)_binary_indicator_var + NoClash(D_G_2_0)_binary_indicator_var + NoClash(D_G_2_1)_binary_indicator_var + NoClash(D_F_4_0)_binary_indicator_var + NoClash(D_F_4_1)_binary_indicator_var + NoClash(D_F_3_0)_binary_indicator_var + NoClash(D_F_3_1)_binary_indicator_var + NoClash(D_E_3_0)_binary_indicator_var + NoClash(D_E_3_1)_binary_indicator_var + NoClash(D_E_2_0)_binary_indicator_var + NoClash(D_E_2_1)_binary_indicator_var + NoClash(C_G_4_0)_binary_indicator_var + NoClash(C_G_4_1)_binary_indicator_var + NoClash(C_G_2_0)_binary_indicator_var + NoClash(C_G_2_1)_binary_indicator_var + NoClash(C_F_4_0)_binary_indicator_var + NoClash(C_F_4_1)_binary_indicator_var + NoClash(C_F_1_0)_binary_indicator_var + NoClash(C_F_1_1)_binary_indicator_var + NoClash(C_E_2_0)_binary_indicator_var + NoClash(C_E_2_1)_binary_indicator_var + NoClash(C_D_4_0)_binary_indicator_var + NoClash(C_D_4_1)_binary_indicator_var + NoClash(C_D_2_0)_binary_indicator_var + NoClash(C_D_2_1)_binary_indicator_var + NoClash(B_G_5_0)_binary_indicator_var + NoClash(B_G_5_1)_binary_indicator_var + NoClash(B_G_2_0)_binary_indicator_var + NoClash(B_G_2_1)_binary_indicator_var + NoClash(B_F_3_0)_binary_indicator_var + NoClash(B_F_3_1)_binary_indicator_var + NoClash(B_E_5_0)_binary_indicator_var + NoClash(B_E_5_1)_binary_indicator_var + NoClash(B_E_3_0)_binary_indicator_var + NoClash(B_E_3_1)_binary_indicator_var + NoClash(B_E_2_0)_binary_indicator_var + NoClash(B_E_2_1)_binary_indicator_var + NoClash(B_D_3_0)_binary_indicator_var + NoClash(B_D_3_1)_binary_indicator_var + NoClash(B_D_2_0)_binary_indicator_var + NoClash(B_D_2_1)_binary_indicator_var + NoClash(B_C_2_0)_binary_indicator_var + NoClash(B_C_2_1)_binary_indicator_var + NoClash(A_G_5_0)_binary_indicator_var + NoClash(A_G_5_1)_binary_indicator_var + NoClash(A_F_3_0)_binary_indicator_var + NoClash(A_F_3_1)_binary_indicator_var + NoClash(A_F_1_0)_binary_indicator_var + NoClash(A_F_1_1)_binary_indicator_var + NoClash(A_E_5_0)_binary_indicator_var + NoClash(A_E_5_1)_binary_indicator_var + NoClash(A_E_3_0)_binary_indicator_var + NoClash(A_E_3_1)_binary_indicator_var + NoClash(A_D_3_0)_binary_indicator_var + NoClash(A_D_3_1)_binary_indicator_var + NoClash(A_C_1_0)_binary_indicator_var + NoClash(A_C_1_1)_binary_indicator_var + NoClash(A_B_5_0)_binary_indicator_var + NoClash(A_B_5_1)_binary_indicator_var + NoClash(A_B_3_0)_binary_indicator_var + NoClash(A_B_3_1)_binary_indicator_var end diff --git a/pyomo/gdp/tests/jobshop_small_bigm.lp b/pyomo/gdp/tests/jobshop_small_bigm.lp index 0be8eab77c0..836e2f4644d 100644 --- a/pyomo/gdp/tests/jobshop_small_bigm.lp +++ b/pyomo/gdp/tests/jobshop_small_bigm.lp @@ -36,47 +36,44 @@ c_e__pyomo_gdp_bigm_reformulation_disj_xor(B_C_2)_: +1 NoClash(B_C_2_1)_binary_indicator_var = 1 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)__NoClash(A_B_3_0)_c_(ub)_: -+19 NoClash(A_B_3_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(0)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(B) -<= 19 ++19.0 NoClash(A_B_3_0)_binary_indicator_var +<= 19.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)__NoClash(A_B_3_1)_c_(ub)_: -+24 NoClash(A_B_3_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(1)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(B) -<= 19 ++24.0 NoClash(A_B_3_1)_binary_indicator_var +<= 19.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)__NoClash(A_C_1_0)_c_(ub)_: -+21 NoClash(A_C_1_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(2)_transformedConstraints(c_0_None_ub)_: -1 t(A) +1 t(C) -<= 19 ++21.0 NoClash(A_C_1_0)_binary_indicator_var +<= 19.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)__NoClash(A_C_1_1)_c_(ub)_: -+24 NoClash(A_C_1_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(3)_transformedConstraints(c_0_None_ub)_: +1 t(A) -1 t(C) -<= 19 ++24.0 NoClash(A_C_1_1)_binary_indicator_var +<= 19.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)__NoClash(B_C_2_0)_c_(ub)_: -+25 NoClash(B_C_2_0)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(4)_transformedConstraints(c_0_None_ub)_: -1 t(B) +1 t(C) -<= 19 ++25.0 NoClash(B_C_2_0)_binary_indicator_var +<= 19.0 -c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)__NoClash(B_C_2_1)_c_(ub)_: -+20 NoClash(B_C_2_1)_binary_indicator_var +c_u__pyomo_gdp_bigm_reformulation_relaxedDisjuncts(5)_transformedConstraints(c_0_None_ub)_: +1 t(B) -1 t(C) -<= 19 - -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 ++20.0 NoClash(B_C_2_1)_binary_indicator_var +<= 19.0 bounds - -inf <= ms <= +inf + -inf <= ms <= +inf 0 <= t(A) <= 19 0 <= t(B) <= 19 0 <= t(C) <= 19 diff --git a/pyomo/gdp/tests/jobshop_small_hull.lp b/pyomo/gdp/tests/jobshop_small_hull.lp index 10edf91b02f..95434e3122f 100644 --- a/pyomo/gdp/tests/jobshop_small_hull.lp +++ b/pyomo/gdp/tests/jobshop_small_hull.lp @@ -21,45 +21,45 @@ c_u_Feas(C)_: +1 t(C) <= -6 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_B_3)_: +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0)_: ++1 t(C) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(C)_ += 0 + +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1)_: ++1 t(B) -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ -+1 t(B) = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_A_C_1)_: +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(2)_: ++1 t(C) -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(C)_ -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(C)_ -+1 t(C) = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(0_B_C_2)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ -+1 t(C) -= 0 - -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_B_3)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(3)_: +1 t(A) -= 0 - -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_A_C_1)_: -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ -+1 t(A) = 0 -c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(1_B_C_2)_: +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(4)_: ++1 t(B) -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(B)_ -1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(B)_ -+1 t(B) = 0 -c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_3)_: -+1 NoClash(A_B_3_0)_binary_indicator_var -+1 NoClash(A_B_3_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disaggregationConstraints(5)_: ++1 t(A) +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ += 0 + +c_e__pyomo_gdp_hull_reformulation_disj_xor(B_C_2)_: ++1 NoClash(B_C_2_0)_binary_indicator_var ++1 NoClash(B_C_2_1)_binary_indicator_var = 1 c_e__pyomo_gdp_hull_reformulation_disj_xor(A_C_1)_: @@ -67,137 +67,134 @@ c_e__pyomo_gdp_hull_reformulation_disj_xor(A_C_1)_: +1 NoClash(A_C_1_1)_binary_indicator_var = 1 -c_e__pyomo_gdp_hull_reformulation_disj_xor(B_C_2)_: -+1 NoClash(B_C_2_0)_binary_indicator_var -+1 NoClash(B_C_2_1)_binary_indicator_var +c_e__pyomo_gdp_hull_reformulation_disj_xor(A_B_3)_: ++1 NoClash(A_B_3_0)_binary_indicator_var ++1 NoClash(A_B_3_1)_binary_indicator_var = 1 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(B)_bounds_(ub)_: --19 NoClash(A_B_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ ++6.0 NoClash(B_C_2_0)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(A)_bounds_(ub)_: --19 NoClash(A_B_3_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(C)_ +-19 NoClash(B_C_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__NoClash(A_B_3_0)_c_(ub)_: --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)__t(B)_bounds_(ub)_: +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ +-19 NoClash(B_C_2_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(B)_bounds_(ub)_: --19 NoClash(A_B_3_1)_binary_indicator_var +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(C)_ +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ -<= 0 ++1 NoClash(B_C_2_1)_binary_indicator_var +<= 0.0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(A)_bounds_(ub)_: --19 NoClash(A_B_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(C)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(C)_ +-19 NoClash(B_C_2_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__NoClash(A_B_3_1)_c_(ub)_: -+5 NoClash(A_B_3_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)__t(B)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ +-19 NoClash(B_C_2_1)_binary_indicator_var <= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(C)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ ++2.0 NoClash(A_C_1_0)_binary_indicator_var +<= 0.0 + c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__t(C)_bounds_(ub)_: --19 NoClash(A_C_1_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(C)_ +-19 NoClash(A_C_1_0)_binary_indicator_var <= 0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__t(A)_bounds_(ub)_: --19 NoClash(A_C_1_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ +-19 NoClash(A_C_1_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)__NoClash(A_C_1_0)_c_(ub)_: -+2 NoClash(A_C_1_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(C)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(C)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ ++5.0 NoClash(A_C_1_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__t(C)_bounds_(ub)_: --19 NoClash(A_C_1_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(C)_ -<= 0 - -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__t(A)_bounds_(ub)_: -19 NoClash(A_C_1_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__NoClash(A_C_1_1)_c_(ub)_: -+5 NoClash(A_C_1_1)_binary_indicator_var +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)__t(A)_bounds_(ub)_: +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(C)_ +-19 NoClash(A_C_1_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__t(C)_bounds_(ub)_: --19 NoClash(B_C_2_0)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_transformedConstraints(c_0_ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(B)_ +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__t(B)_bounds_(ub)_: --19 NoClash(B_C_2_0)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(B)_ +-19 NoClash(A_B_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__NoClash(B_C_2_0)_c_(ub)_: -+6 NoClash(B_C_2_0)_binary_indicator_var --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(B)_ -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ +-19 NoClash(A_B_3_0)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__t(C)_bounds_(ub)_: --19 NoClash(B_C_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ -<= 0 +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_transformedConstraints(c_0_ub)_: +-1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(B)_ ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ ++5.0 NoClash(A_B_3_1)_binary_indicator_var +<= 0.0 c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__t(B)_bounds_(ub)_: --19 NoClash(B_C_2_1)_binary_indicator_var +1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(B)_ +-19 NoClash(A_B_3_1)_binary_indicator_var <= 0 -c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__NoClash(B_C_2_1)_c_(ub)_: -+1 NoClash(B_C_2_1)_binary_indicator_var -+1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(B)_ --1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ +c_u__pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)__t(A)_bounds_(ub)_: ++1 _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ +-19 NoClash(A_B_3_1)_binary_indicator_var <= 0 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds - -inf <= ms <= +inf + -inf <= ms <= +inf 0 <= t(A) <= 19 0 <= t(B) <= 19 0 <= t(C) <= 19 - 0 <= NoClash(A_B_3_0)_binary_indicator_var <= 1 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(C)_ <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(C)_ <= 19 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(B)_ <= 19 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(0)_disaggregatedVars__t(A)_ <= 19 - 0 <= NoClash(A_B_3_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(B)_ <= 19 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(1)_disaggregatedVars__t(A)_ <= 19 - 0 <= NoClash(A_C_1_0)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(C)_ <= 19 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ <= 19 - 0 <= NoClash(A_C_1_1)_binary_indicator_var <= 1 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(C)_ <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(2)_disaggregatedVars__t(A)_ <= 19 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(3)_disaggregatedVars__t(A)_ <= 19 - 0 <= NoClash(B_C_2_0)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(C)_ <= 19 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(B)_ <= 19 - 0 <= NoClash(B_C_2_1)_binary_indicator_var <= 1 - 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(C)_ <= 19 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(B)_ <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(4)_disaggregatedVars__t(A)_ <= 19 + 0 <= _pyomo_gdp_hull_reformulation_relaxedDisjuncts(5)_disaggregatedVars__t(A)_ <= 19 + 0 <= NoClash(B_C_2_0)_binary_indicator_var <= 1 + 0 <= NoClash(B_C_2_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_C_1_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_C_1_1)_binary_indicator_var <= 1 + 0 <= NoClash(A_B_3_0)_binary_indicator_var <= 1 + 0 <= NoClash(A_B_3_1)_binary_indicator_var <= 1 binary - NoClash(A_B_3_0)_binary_indicator_var - NoClash(A_B_3_1)_binary_indicator_var - NoClash(A_C_1_0)_binary_indicator_var - NoClash(A_C_1_1)_binary_indicator_var NoClash(B_C_2_0)_binary_indicator_var NoClash(B_C_2_1)_binary_indicator_var + NoClash(A_C_1_0)_binary_indicator_var + NoClash(A_C_1_1)_binary_indicator_var + NoClash(A_B_3_0)_binary_indicator_var + NoClash(A_B_3_1)_binary_indicator_var end diff --git a/pyomo/gdp/tests/models.py b/pyomo/gdp/tests/models.py index 0a6aa6e60f2..723e25a4a9e 100644 --- a/pyomo/gdp/tests/models.py +++ b/pyomo/gdp/tests/models.py @@ -1,23 +1,39 @@ -from pyomo.core import (Block, ConcreteModel, Constraint, Objective, Param, Set, - Var, inequality, RangeSet, Any, Expression, maximize, - TransformationFactory, BooleanVar, LogicalConstraint, - exactly) -from pyomo.core.expr.current import sqrt +from pyomo.core import ( + Block, + ConcreteModel, + Constraint, + Objective, + Param, + Set, + Var, + inequality, + RangeSet, + Any, + Expression, + maximize, + TransformationFactory, + BooleanVar, + LogicalConstraint, + exactly, +) +from pyomo.core.expr import sqrt from pyomo.gdp import Disjunct, Disjunction import pyomo.network as ntwk + def oneVarDisj_2pts(): m = ConcreteModel() m.x = Var(bounds=(0, 10)) m.disj1 = Disjunct() - m.disj1.xTrue = Constraint(expr=m.x==1) + m.disj1.xTrue = Constraint(expr=m.x == 1) m.disj2 = Disjunct() - m.disj2.xFalse = Constraint(expr=m.x==0) + m.disj2.xFalse = Constraint(expr=m.x == 0) m.disjunction = Disjunction(expr=[m.disj1, m.disj2]) m.obj = Objective(expr=m.x) return m + def twoSegments_SawayaGrossmann(): m = ConcreteModel() m.x = Var(bounds=(0, 3)) @@ -33,9 +49,9 @@ def twoSegments_SawayaGrossmann(): return m + def makeTwoTermDisj(): - """Single two-term disjunction which has all of ==, <=, and >= constraints - """ + """Single two-term disjunction which has all of ==, <=, and >= constraints""" m = ConcreteModel() m.a = Var(bounds=(2, 7)) m.x = Var(bounds=(4, 9)) @@ -47,13 +63,14 @@ def d_rule(disjunct, flag): disjunct.c2 = Constraint(expr=m.x <= 7) else: disjunct.c = Constraint(expr=m.a >= 5) + m.d = Disjunct([0, 1], rule=d_rule) m.disjunction = Disjunction(expr=[m.d[0], m.d[1]]) return m def makeTwoTermDisj_Nonlinear(): - """Single two-term disjunction which has all of ==, <=, and >= and + """Single two-term disjunction which has all of ==, <=, and >= and one nonlinear constraint. """ m = ConcreteModel() @@ -69,14 +86,15 @@ def d_rule(disjunct, flag): disjunct.c3 = Constraint(expr=(1, m.x, 3)) else: disjunct.c = Constraint(expr=m.x + m.y**2 <= 14) + m.d = Disjunct([0, 1], rule=d_rule) m.disjunction = Disjunction(expr=[m.d[0], m.d[1]]) return m def makeTwoTermDisj_IndexedConstraints(): - """Single two-term disjunction with IndexedConstraints on both disjuncts. - Does not bound the variables, so cannot be transformed by hull at all and + """Single two-term disjunction with IndexedConstraints on both disjuncts. + Does not bound the variables, so cannot be transformed by hull at all and requires specifying m values in bigm. """ m = ConcreteModel() @@ -89,7 +107,9 @@ def disj1_rule(disjunct): def c_rule(d, s): return m.a[s] == 0 + disjunct.c = Constraint(m.s, rule=c_rule) + m.b.simpledisj1 = Disjunct(rule=disj1_rule) def disj2_rule(disjunct): @@ -97,15 +117,16 @@ def disj2_rule(disjunct): def c_rule(d, s): return m.a[s] <= 3 + disjunct.c = Constraint(m.s, rule=c_rule) + m.b.simpledisj2 = Disjunct(rule=disj2_rule) m.b.disjunction = Disjunction(expr=[m.b.simpledisj1, m.b.simpledisj2]) return m def makeTwoTermDisj_IndexedConstraints_BoundedVars(): - """Single two-term disjunction with IndexedConstraints on both disjuncts. - """ + """Single two-term disjunction with IndexedConstraints on both disjuncts.""" m = ConcreteModel() m.s = Set(initialize=[1, 2]) m.lbs = Param(m.s, initialize={1: 2, 2: 4}) @@ -113,6 +134,7 @@ def makeTwoTermDisj_IndexedConstraints_BoundedVars(): def bounds_rule(m, s): return (m.lbs[s], m.ubs[s]) + m.a = Var(m.s, bounds=bounds_rule) def d_rule(disjunct, flag): @@ -123,30 +145,33 @@ def true_rule(d, s): def false_rule(d, s): return m.a[s] >= 5 + if flag: disjunct.c = Constraint(m.s, rule=true_rule) else: disjunct.c = Constraint(m.s, rule=false_rule) + m.disjunct = Disjunct([0, 1], rule=d_rule) m.disjunction = Disjunction(expr=[m.disjunct[0], m.disjunct[1]]) return m + def localVar(): - """Two-term disjunction which declares a local variable y on one of the + """Two-term disjunction which declares a local variable y on one of the disjuncts, which is used in the objective function as well. - Used to test that we will treat y as global in the transformations, + Used to test that we will treat y as global in the transformations, despite where it is declared. """ # y appears in a global constraint and a single disjunct. m = ConcreteModel() - m.x = Var(bounds=(0,3)) + m.x = Var(bounds=(0, 3)) m.disj1 = Disjunct() m.disj1.cons = Constraint(expr=m.x >= 1) m.disj2 = Disjunct() - m.disj2.y = Var(bounds=(1,3)) + m.disj2.y = Var(bounds=(1, 3)) m.disj2.cons = Constraint(expr=m.x + m.disj2.y == 3) m.disjunction = Disjunction(expr=[m.disj1, m.disj2]) @@ -156,6 +181,15 @@ def localVar(): return m +def make_infeasible_gdp_model(): + m = ConcreteModel() + m.x = Var(bounds=(0, 2)) + m.d = Disjunction(expr=[[m.x**2 >= 3, m.x >= 3], [m.x**2 <= -1, m.x <= -1]]) + m.o = Objective(expr=m.x) + + return m + + def makeThreeTermIndexedDisj(): """Three-term indexed disjunction""" m = ConcreteModel() @@ -170,18 +204,21 @@ def d_rule(disjunct, flag, s): disjunct.c = Constraint(expr=m.a[s] >= 5) else: disjunct.c = Constraint(expr=inequality(2, m.a[s], 4)) + m.disjunct = Disjunct([0, 1, 2], m.s, rule=d_rule) def disj_rule(m, s): return [m.disjunct[0, s], m.disjunct[1, s], m.disjunct[2, s]] + m.disjunction = Disjunction(m.s, rule=disj_rule) return m def makeTwoTermDisj_boxes(): m = ConcreteModel() - m.x = Var(bounds=(0,5)) - m.y = Var(bounds=(0,5)) + m.x = Var(bounds=(0, 5)) + m.y = Var(bounds=(0, 5)) + def d_rule(disjunct, flag): m = disjunct.model() if flag: @@ -190,11 +227,14 @@ def d_rule(disjunct, flag): else: disjunct.c1 = Constraint(expr=inequality(3, m.x, 4)) disjunct.c2 = Constraint(expr=inequality(1, m.y, 2)) - m.d = Disjunct([0,1], rule=d_rule) + + m.d = Disjunct([0, 1], rule=d_rule) + def disj_rule(m): return [m.d[0], m.d[1]] + m.disjunction = Disjunction(rule=disj_rule) - m.obj = Objective(expr=m.x + 2*m.y) + m.obj = Objective(expr=m.x + 2 * m.y) return m @@ -211,6 +251,7 @@ def c_rule(b, i): def d_rule(d, j): m = d.model() d.c = Constraint(m.I[:j], rule=c_rule) + m.d = Disjunct(m.I, rule=d_rule) m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) return m @@ -229,10 +270,12 @@ def disjunct_rule(d, i, k): d.cons_a = Constraint(expr=m.x[i] >= 5) if k == 'b': d.cons_b = Constraint(expr=m.x[i] <= 0) + m.disjunct = Disjunct(m.A, m.B, rule=disjunct_rule) def disj_rule(m, i): return [m.disjunct[i, k] for k in m.B] + m.disjunction = Disjunction(m.A, rule=disj_rule) return m @@ -250,10 +293,12 @@ def disjunct_rule(d, s, flag): d.c = Constraint(expr=m.a[s] >= 6) else: d.c = Constraint(expr=m.a[s] <= 3) + m.disjunct = Disjunct(m.s, [0, 1], rule=disjunct_rule) def disjunction_rule(m, s): return [m.disjunct[s, flag] for flag in [0, 1]] + m.disjunction = Disjunction(m.s, rule=disjunction_rule) return m @@ -262,12 +307,13 @@ def makeIndexedDisjunction_SkipIndex(): """Two-term indexed disjunction where one of the two indices is skipped""" m = ConcreteModel() m.x = Var(bounds=(0, 10)) - @m.Disjunct([0,1]) + + @m.Disjunct([0, 1]) def disjuncts(d, i): m = d.model() d.cons = Constraint(expr=m.x == i) - @m.Disjunction([0,1]) + @m.Disjunction([0, 1]) def disjunctions(m, i): if i == 0: return Disjunction.Skip @@ -275,6 +321,7 @@ def disjunctions(m, i): return m + def makeTwoTermMultiIndexedDisjunction(): """Two-term indexed disjunction with tuple indices""" m = ConcreteModel() @@ -288,10 +335,12 @@ def d_rule(disjunct, flag, s, t): disjunct.c = Constraint(expr=m.a[s, t] == 0) else: disjunct.c = Constraint(expr=m.a[s, t] >= 5) + m.disjunct = Disjunct([0, 1], m.s, m.t, rule=d_rule) def disj_rule(m, s, t): return [m.disjunct[0, s, t], m.disjunct[1, s, t]] + m.disjunction = Disjunction(m.s, m.t, rule=disj_rule) return m @@ -316,20 +365,25 @@ def disjunction(m): return m + def add_disj_not_on_block(m): def simpdisj_rule(disjunct): m = disjunct.model() disjunct.c = Constraint(expr=m.a >= 3) + m.simpledisj = Disjunct(rule=simpdisj_rule) + def simpledisj2_rule(disjunct): m = disjunct.model() disjunct.c = Constraint(expr=m.a <= 3.5) + m.simpledisj2 = Disjunct(rule=simpledisj2_rule) m.disjunction2 = Disjunction(expr=[m.simpledisj, m.simpledisj2]) return m + def makeDisjunctionsOnIndexedBlock(): - """Two disjunctions (one indexed an one not), each on a separate + """Two disjunctions (one indexed and one not), each on a separate BlockData of an IndexedBlock of length 2 """ m = ConcreteModel() @@ -346,6 +400,7 @@ def disjunct1(disjunct, s, flag): def disjunction1_rule(m, s): return [m.disjunct1[s, flag] for flag in [0, 1]] + m.disjunction1 = Disjunction(m.s, rule=disjunction1_rule) m.b = Block([0, 1]) @@ -356,10 +411,12 @@ def disjunct2_rule(disjunct, flag): disjunct.c = Constraint(expr=m.b[0].x <= 0) else: disjunct.c = Constraint(expr=m.b[0].x >= 0) + m.b[0].disjunct = Disjunct([0, 1], rule=disjunct2_rule) def disjunction(b, i): return [b.disjunct[0], b.disjunct[1]] + m.b[0].disjunction = Disjunction([0], rule=disjunction) m.b[1].y = Var(bounds=(-3, 3)) @@ -367,13 +424,12 @@ def disjunction(b, i): m.b[1].disjunct0.c = Constraint(expr=m.b[1].y <= 0) m.b[1].disjunct1 = Disjunct() m.b[1].disjunct1.c = Constraint(expr=m.b[1].y >= 0) - m.b[1].disjunction = Disjunction( - expr=[m.b[1].disjunct0, m.b[1].disjunct1]) + m.b[1].disjunction = Disjunction(expr=[m.b[1].disjunct0, m.b[1].disjunct1]) return m def makeTwoTermDisj_BlockOnDisj(): - """SimpleDisjunction where one of the Disjuncts contains three different + """SimpleDisjunction where one of the Disjuncts contains three different blocks: two simple and one indexed""" m = ConcreteModel() m.x = Var(bounds=(0, 1000)) @@ -391,6 +447,7 @@ def disj_rule(d, flag): d.bb[1].c = Constraint(expr=m.x == 0) else: d.c = Constraint(expr=m.x >= 80) + m.evil = Disjunct([0, 1], rule=disj_rule) m.disjunction = Disjunction(expr=[m.evil[0], m.evil[1]]) return m @@ -399,10 +456,10 @@ def disj_rule(d, flag): def makeNestedDisjunctions(): """Three-term SimpleDisjunction built from two IndexedDisjuncts and one SimpleDisjunct. The SimpleDisjunct and one of the DisjunctDatas each - contain a nested SimpleDisjunction (the disjuncts of which are declared + contain a nested SimpleDisjunction (the disjuncts of which are declared on the same disjunct as the disjunction). - (makeNestedDisjunctions_NestedDisjuncts is a much simpler model. All + (makeNestedDisjunctions_NestedDisjuncts is a much simpler model. All this adds is that it has a nested disjunction on a DisjunctData as well as on a SimpleDisjunct. So mostly it exists for historical reasons.) """ @@ -414,20 +471,24 @@ def makeNestedDisjunctions(): def disjunct_rule(disjunct, flag): m = disjunct.model() if flag: + def innerdisj_rule(disjunct, flag): m = disjunct.model() if flag: disjunct.c = Constraint(expr=m.z >= 5) else: disjunct.c = Constraint(expr=m.z == 0) + disjunct.innerdisjunct = Disjunct([0, 1], rule=innerdisj_rule) @disjunct.Disjunction([0]) def innerdisjunction(b, i): return [b.innerdisjunct[0], b.innerdisjunct[1]] + disjunct.c = Constraint(expr=m.a <= 2) else: disjunct.c = Constraint(expr=m.x == 2) + m.disjunct = Disjunct([0, 1], rule=disjunct_rule) # I want a SimpleDisjunct with a disjunction in it too @@ -443,10 +504,11 @@ def innerdisjunct1(disjunct): disjunct.c = Constraint(expr=m.x >= 4) disjunct.innerdisjunction = Disjunction( - expr=[disjunct.innerdisjunct0, disjunct.innerdisjunct1]) + expr=[disjunct.innerdisjunct0, disjunct.innerdisjunct1] + ) + m.simpledisjunct = Disjunct(rule=simpledisj_rule) - m.disjunction = Disjunction( - expr=[m.simpledisjunct, m.disjunct[0], m.disjunct[1]]) + m.disjunction = Disjunction(expr=[m.simpledisjunct, m.disjunct[0], m.disjunct[1]]) return m @@ -499,6 +561,7 @@ def d1_rule(disjunct, flag): disjunct.c = Constraint(expr=m.a == 0) else: disjunct.c = Constraint(expr=m.a >= 5) + m.disjunct1 = Disjunct([0, 1], rule=d1_rule) def d2_rule(disjunct, flag): @@ -506,6 +569,7 @@ def d2_rule(disjunct, flag): disjunct.c = Constraint(expr=m.a >= 30) else: disjunct.c = Constraint(expr=m.a == 100) + m.disjunct2 = Disjunct([0, 1], rule=d2_rule) m.disjunction1 = Disjunction(expr=[m.disjunct1[0], m.disjunct1[1]]) @@ -514,7 +578,7 @@ def d2_rule(disjunct, flag): def makeDisjunctInMultipleDisjunctions(): - """This is not a transformable model! Two SimpleDisjunctions which have + """This is not a transformable model! Two SimpleDisjunctions which have a shared disjunct. """ m = ConcreteModel() @@ -526,6 +590,7 @@ def d1_rule(disjunct, flag): disjunct.c = Constraint(expr=m.a == 0) else: disjunct.c = Constraint(expr=m.a >= 5) + m.disjunct1 = Disjunct([0, 1], rule=d1_rule) def d2_rule(disjunct, flag): @@ -533,6 +598,7 @@ def d2_rule(disjunct, flag): disjunct.c = Constraint(expr=m.a >= 30) else: disjunct.c = Constraint(expr=m.a == 100) + m.disjunct2 = Disjunct([0, 1], rule=d2_rule) m.disjunction1 = Disjunction(expr=[m.disjunct1[0], m.disjunct1[1]]) @@ -543,7 +609,7 @@ def d2_rule(disjunct, flag): def makeDuplicatedNestedDisjunction(): - """Not a transformable model (because of disjuncts shared between + """Not a transformable model (because of disjuncts shared between disjunctions): A SimpleDisjunction where one of the disjuncts contains two SimpleDisjunctions with the same Disjuncts. """ @@ -553,26 +619,31 @@ def makeDuplicatedNestedDisjunction(): def outerdisj_rule(d, flag): m = d.model() if flag: + def innerdisj_rule(d, flag): m = d.model() if flag: d.c = Constraint(expr=m.x >= 2) else: d.c = Constraint(expr=m.x == 0) + d.innerdisjunct = Disjunct([0, 1], rule=innerdisj_rule) - d.innerdisjunction = Disjunction(expr=[d.innerdisjunct[0], - d.innerdisjunct[1]]) - d.duplicateddisjunction = Disjunction(expr=[d.innerdisjunct[0], - d.innerdisjunct[1]]) + d.innerdisjunction = Disjunction( + expr=[d.innerdisjunct[0], d.innerdisjunct[1]] + ) + d.duplicateddisjunction = Disjunction( + expr=[d.innerdisjunct[0], d.innerdisjunct[1]] + ) else: d.c = Constraint(expr=m.x == 8) + m.outerdisjunct = Disjunct([0, 1], rule=outerdisj_rule) - m.disjunction = Disjunction(expr=[m.outerdisjunct[0], - m.outerdisjunct[1]]) + m.disjunction = Disjunction(expr=[m.outerdisjunct[0], m.outerdisjunct[1]]) return m - + + def makeDisjunctWithRangeSet(): - """Two-term SimpleDisjunction where one of the disjuncts contains a + """Two-term SimpleDisjunction where one of the disjuncts contains a RangeSet""" m = ConcreteModel() m.x = Var(bounds=(0, 1)) @@ -583,13 +654,15 @@ def makeDisjunctWithRangeSet(): m.disj = Disjunction(expr=[m.d1, m.d2]) return m + ########################## # Grossmann lecture models ########################## + def grossmann_oneDisj(): m = ConcreteModel() - m.x = Var(bounds=(0,20)) + m.x = Var(bounds=(0, 20)) m.y = Var(bounds=(0, 20)) m.disjunct1 = Disjunct() m.disjunct1.constraintx = Constraint(expr=inequality(0, m.x, 2)) @@ -601,10 +674,11 @@ def grossmann_oneDisj(): m.disjunction = Disjunction(expr=[m.disjunct1, m.disjunct2]) - m.objective = Objective(expr=m.x + 2*m.y, sense=maximize) + m.objective = Objective(expr=m.x + 2 * m.y, sense=maximize) return m + def to_break_constraint_tolerances(): m = ConcreteModel() m.x = Var(bounds=(0, 130)) @@ -619,58 +693,62 @@ def to_break_constraint_tolerances(): m.disjunction = Disjunction(expr=[m.disjunct1, m.disjunct2]) - m.objective = Objective(expr=m.x + 2*m.y, sense=maximize) + m.objective = Objective(expr=m.x + 2 * m.y, sense=maximize) return m + def grossmann_twoDisj(): m = grossmann_oneDisj() m.disjunct3 = Disjunct() m.disjunct3.constraintx = Constraint(expr=inequality(1, m.x, 2.5)) m.disjunct3.constrainty = Constraint(expr=inequality(6.5, m.y, 8)) - + m.disjunct4 = Disjunct() m.disjunct4.constraintx = Constraint(expr=inequality(9, m.x, 11)) m.disjunct4.constrainty = Constraint(expr=inequality(2, m.y, 3.5)) m.disjunction2 = Disjunction(expr=[m.disjunct3, m.disjunct4]) - + return m + def twoDisj_twoCircles_easy(): m = ConcreteModel() - m.x = Var(bounds=(0,8)) - m.y = Var(bounds=(0,10)) + m.x = Var(bounds=(0, 8)) + m.y = Var(bounds=(0, 10)) m.upper_circle = Disjunct() - m.upper_circle.cons = Constraint(expr=(m.x - 1)**2 + (m.y - 6)**2 <= 2) + m.upper_circle.cons = Constraint(expr=(m.x - 1) ** 2 + (m.y - 6) ** 2 <= 2) m.lower_circle = Disjunct() - m.lower_circle.cons = Constraint(expr=(m.x - 4)**2 + (m.y - 2)**2 <= 2) + m.lower_circle.cons = Constraint(expr=(m.x - 4) ** 2 + (m.y - 2) ** 2 <= 2) m.disjunction = Disjunction(expr=[m.upper_circle, m.lower_circle]) - + m.obj = Objective(expr=m.x + m.y, sense=maximize) return m + def fourCircles(): m = twoDisj_twoCircles_easy() # and add two more overlapping circles, a la the Grossmann test case with # the rectangles. (but not change my nice integral optimal solution...) m.upper_circle2 = Disjunct() - m.upper_circle2.cons = Constraint(expr=(m.x - 2)**2 + (m.y - 7)**2 <= 1) + m.upper_circle2.cons = Constraint(expr=(m.x - 2) ** 2 + (m.y - 7) ** 2 <= 1) m.lower_circle2 = Disjunct() - m.lower_circle2.cons = Constraint(expr=(m.x - 5)**2 + (m.y - 3)**2 <= 2) + m.lower_circle2.cons = Constraint(expr=(m.x - 5) ** 2 + (m.y - 3) ** 2 <= 2) m.disjunction2 = Disjunction(expr=[m.upper_circle2, m.lower_circle2]) return m + def makeDisjunctWithExpression(): - """Two-term SimpleDisjunction where one of the disjuncts contains an - Expression. This is used to make sure that we correctly handle types we + """Two-term SimpleDisjunction where one of the disjuncts contains an + Expression. This is used to make sure that we correctly handle types we hit in disjunct.component_objects(active=True)""" m = ConcreteModel() m.x = Var(bounds=(0, 1)) @@ -681,10 +759,11 @@ def makeDisjunctWithExpression(): m.disj = Disjunction(expr=[m.d1, m.d2]) return m + def makeDisjunctionOfDisjunctDatas(): """Two SimpleDisjunctions, where each are disjunctions of DisjunctDatas. This adds nothing to makeTwoSimpleDisjunctions but exists for convenience - because it has the same mathematical meaning as + because it has the same mathematical meaning as makeAnyIndexedDisjunctionOfDisjunctDatas """ m = ConcreteModel() @@ -692,46 +771,48 @@ def makeDisjunctionOfDisjunctDatas(): m.obj = Objective(expr=m.x) - m.idx = Set(initialize=[1,2]) + m.idx = Set(initialize=[1, 2]) m.firstTerm = Disjunct(m.idx) m.firstTerm[1].cons = Constraint(expr=m.x == 0) m.firstTerm[2].cons = Constraint(expr=m.x == 2) m.secondTerm = Disjunct(m.idx) m.secondTerm[1].cons = Constraint(expr=m.x >= 2) m.secondTerm[2].cons = Constraint(expr=m.x >= 3) - + m.disjunction = Disjunction(expr=[m.firstTerm[1], m.secondTerm[1]]) m.disjunction2 = Disjunction(expr=[m.firstTerm[2], m.secondTerm[2]]) return m + def makeAnyIndexedDisjunctionOfDisjunctDatas(): """An IndexedDisjunction indexed by Any, with two two-term DisjunctionDatas - build from DisjunctDatas. Identical mathematically to + build from DisjunctDatas. Identical mathematically to makeDisjunctionOfDisjunctDatas. Used to test that the right things happen for a case where soemone - implements an algorithm which iteratively generates disjuncts and + implements an algorithm which iteratively generates disjuncts and retransforms""" m = ConcreteModel() m.x = Var(bounds=(-100, 100)) m.obj = Objective(expr=m.x) - m.idx = Set(initialize=[1,2]) + m.idx = Set(initialize=[1, 2]) m.firstTerm = Disjunct(m.idx) m.firstTerm[1].cons = Constraint(expr=m.x == 0) m.firstTerm[2].cons = Constraint(expr=m.x == 2) m.secondTerm = Disjunct(m.idx) m.secondTerm[1].cons = Constraint(expr=m.x >= 2) m.secondTerm[2].cons = Constraint(expr=m.x >= 3) - + m.disjunction = Disjunction(Any) m.disjunction[1] = [m.firstTerm[1], m.secondTerm[1]] m.disjunction[2] = [m.firstTerm[2], m.secondTerm[2]] return m + def makeNetworkDisjunction(minimize=True): - """ creates a GDP model with pyomo.network components """ + """creates a GDP model with pyomo.network components""" m = ConcreteModel() m.feed = feed = Block() @@ -741,33 +822,31 @@ def makeNetworkDisjunction(minimize=True): m.orange = orange = Disjunct() m.blue = blue = Disjunct() - m.orange_or_blue = Disjunction(expr=[orange,blue]) + m.orange_or_blue = Disjunction(expr=[orange, blue]) blue.blue_box = blue_box = Block() - feed.x = Var(bounds=(0,1)) - wkbx.x = Var(bounds=(0,1)) - dest.x = Var(bounds=(0,1)) + feed.x = Var(bounds=(0, 1)) + wkbx.x = Var(bounds=(0, 1)) + dest.x = Var(bounds=(0, 1)) - wkbx.inlet = ntwk.Port(initialize={"x":wkbx.x}) - wkbx.outlet = ntwk.Port(initialize={"x":wkbx.x}) + wkbx.inlet = ntwk.Port(initialize={"x": wkbx.x}) + wkbx.outlet = ntwk.Port(initialize={"x": wkbx.x}) - feed.outlet = ntwk.Port(initialize={"x":feed.x}) - dest.inlet = ntwk.Port(initialize={"x":dest.x}) + feed.outlet = ntwk.Port(initialize={"x": feed.x}) + dest.inlet = ntwk.Port(initialize={"x": dest.x}) - blue_box.x = Var(bounds=(0,1)) - blue_box.x_wkbx = Var(bounds=(0,1)) - blue_box.x_dest = Var(bounds=(0,1)) + blue_box.x = Var(bounds=(0, 1)) + blue_box.x_wkbx = Var(bounds=(0, 1)) + blue_box.x_dest = Var(bounds=(0, 1)) + blue_box.inlet_feed = ntwk.Port(initialize={"x": blue_box.x}) + blue_box.outlet_wkbx = ntwk.Port(initialize={"x": blue_box.x}) - blue_box.inlet_feed = ntwk.Port(initialize={"x":blue_box.x}) - blue_box.outlet_wkbx = ntwk.Port(initialize={"x":blue_box.x}) + blue_box.inlet_wkbx = ntwk.Port(initialize={"x": blue_box.x_wkbx}) + blue_box.outlet_dest = ntwk.Port(initialize={"x": blue_box.x_dest}) - blue_box.inlet_wkbx = ntwk.Port(initialize={"x":blue_box.x_wkbx}) - blue_box.outlet_dest = ntwk.Port(initialize={"x":blue_box.x_dest}) - - blue_box.multiplier_constr = Constraint(expr=blue_box.x_dest == \ - 2*blue_box.x_wkbx) + blue_box.multiplier_constr = Constraint(expr=blue_box.x_dest == 2 * blue_box.x_wkbx) # orange arcs orange.a1 = ntwk.Arc(source=feed.outlet, destination=wkbx.inlet) @@ -790,20 +869,22 @@ def makeNetworkDisjunction(minimize=True): return m + def makeExpandedNetworkDisjunction(minimize=True): m = makeNetworkDisjunction(minimize) TransformationFactory('network.expand_arcs').apply_to(m) return m + def makeThreeTermDisjunctionWithOneVarInOneDisjunct(): - """This is to make sure hull doesn't create more disaggregated variables - than it needs to: Here, x only appears in the first Disjunct, so we only - need two copies: one as usual for that disjunct and then one other that is + """This is to make sure hull doesn't create more disaggregated variables + than it needs to: Here, x only appears in the first Disjunct, so we only + need two copies: one as usual for that disjunct and then one other that is free if either of the second two Disjuncts is active and 0 otherwise. """ m = ConcreteModel() - m.x = Var(bounds=(-2,8)) - m.y = Var(bounds=(3,4)) + m.x = Var(bounds=(-2, 8)) + m.y = Var(bounds=(3, 4)) m.d1 = Disjunct() m.d1.c1 = Constraint(expr=m.x <= 3) m.d1.c2 = Constraint(expr=m.y >= 3.5) @@ -816,138 +897,166 @@ def makeThreeTermDisjunctionWithOneVarInOneDisjunct(): return m + def makeNestedNonlinearModel(): - """This is actually a disjunction between two points, but it's written + """This is actually a disjunction between two points, but it's written as a nested disjunction over four circles!""" m = ConcreteModel() m.x = Var(bounds=(-10, 10)) m.y = Var(bounds=(-10, 10)) m.d1 = Disjunct() m.d1.lower_circle = Constraint(expr=m.x**2 + m.y**2 <= 1) - m.disj = Disjunction(expr=[[m.x == 10], [(sqrt(2) - m.x)**2 + (sqrt(2) - - m.y)**2 <= - 1]]) + m.disj = Disjunction( + expr=[[m.x == 10], [(sqrt(2) - m.x) ** 2 + (sqrt(2) - m.y) ** 2 <= 1]] + ) m.d2 = Disjunct() - m.d2.upper_circle = Constraint(expr=(3 - m.x)**2 + (3 - m.y)**2 <= 1) - m.d2.inner = Disjunction(expr=[[m.y == 10], [(sqrt(2) - m.x)**2 + (sqrt(2) - - m.y)**2 - <= 1]]) + m.d2.upper_circle = Constraint(expr=(3 - m.x) ** 2 + (3 - m.y) ** 2 <= 1) + m.d2.inner = Disjunction( + expr=[[m.y == 10], [(sqrt(2) - m.x) ** 2 + (sqrt(2) - m.y) ** 2 <= 1]] + ) m.outer = Disjunction(expr=[m.d1, m.d2]) m.obj = Objective(expr=m.x + m.y) return m + ## # Variations on the example from the Kronqvist et al. Between Steps paper ## + def makeBetweenStepsPaperExample(): """Original example model, implicit disjunction""" m = ConcreteModel() - m.I = RangeSet(1,4) - m.x = Var(m.I, bounds=(-2,6)) + m.I = RangeSet(1, 4) + m.x = Var(m.I, bounds=(-2, 6)) - m.disjunction = Disjunction(expr=[[sum(m.x[i]**2 for i in m.I) <= 1], - [sum((3 - m.x[i])**2 for i in m.I) <= - 1]]) + m.disjunction = Disjunction( + expr=[ + [sum(m.x[i] ** 2 for i in m.I) <= 1], + [sum((3 - m.x[i]) ** 2 for i in m.I) <= 1], + ] + ) m.obj = Objective(expr=m.x[2] - m.x[1], sense=maximize) return m + def makeBetweenStepsPaperExample_DeclareVarOnDisjunct(): - """Exactly the same model as above, but declaring the Disjuncts explicitly + """Exactly the same model as above, but declaring the Disjuncts explicitly and declaring the variables on one of them. """ m = ConcreteModel() - m.I = RangeSet(1,4) + m.I = RangeSet(1, 4) m.disj1 = Disjunct() - m.disj1.x = Var(m.I, bounds=(-2,6)) - m.disj1.c = Constraint(expr=sum(m.disj1.x[i]**2 for i in m.I) <= 1) + m.disj1.x = Var(m.I, bounds=(-2, 6)) + m.disj1.c = Constraint(expr=sum(m.disj1.x[i] ** 2 for i in m.I) <= 1) m.disj2 = Disjunct() - m.disj2.c = Constraint(expr=sum((3 - m.disj1.x[i])**2 for i in m.I) <= - 1) + m.disj2.c = Constraint(expr=sum((3 - m.disj1.x[i]) ** 2 for i in m.I) <= 1) m.disjunction = Disjunction(expr=[m.disj1, m.disj2]) m.obj = Objective(expr=m.disj1.x[2] - m.disj1.x[1], sense=maximize) return m + def makeBetweenStepsPaperExample_Nested(): - """Mathematically, this is really dumb, but I am nesting this model on + """Mathematically, this is really dumb, but I am nesting this model on itself because it makes writing tests simpler (I can recycle.)""" m = makeBetweenStepsPaperExample_DeclareVarOnDisjunct() m.disj2.disjunction = Disjunction( - expr=[[sum(m.disj1.x[i]**2 for i in m.I) <= 1], - [sum((3 - m.disj1.x[i])**2 for i in m.I) <= 1]]) - + expr=[ + [sum(m.disj1.x[i] ** 2 for i in m.I) <= 1], + [sum((3 - m.disj1.x[i]) ** 2 for i in m.I) <= 1], + ] + ) + return m + def instantiate_hierarchical_nested_model(m): - """helper function to instantiate a nested version of the model with + """helper function to instantiate a nested version of the model with the Disjuncts and Disjunctions on blocks""" m.disj1 = Disjunct() m.disjunct_block.disj2 = Disjunct() - m.disj1.c = Constraint(expr=sum(m.x[i]**2 for i in m.I) <= 1) - m.disjunct_block.disj2.c = Constraint(expr=sum((3 - m.x[i])**2 for i in - m.I) <= 1) + m.disj1.c = Constraint(expr=sum(m.x[i] ** 2 for i in m.I) <= 1) + m.disjunct_block.disj2.c = Constraint(expr=sum((3 - m.x[i]) ** 2 for i in m.I) <= 1) m.disjunct_block.disj2.disjunction = Disjunction( - expr=[[sum(m.x[i]**2 for i in m.I) <= 1], - [sum((3 - m.x[i])**2 for i in m.I) <= 1]]) + expr=[ + [sum(m.x[i] ** 2 for i in m.I) <= 1], + [sum((3 - m.x[i]) ** 2 for i in m.I) <= 1], + ] + ) m.disjunction_block.disjunction = Disjunction( - expr=[m.disj1, m.disjunct_block.disj2]) + expr=[m.disj1, m.disjunct_block.disj2] + ) + -def makeHierarchicalNested_DeclOrderMatchesInstantationOrder(): - """Here, we put the disjunctive components on Blocks, but we do it in the +def makeHierarchicalNested_DeclOrderMatchesInstantiationOrder(): + """Here, we put the disjunctive components on Blocks, but we do it in the same order that we declared the blocks, that is, on each block, decl order matches instantiation order.""" m = ConcreteModel() - m.I = RangeSet(1,4) - m.x = Var(m.I, bounds=(-2,6)) + m.I = RangeSet(1, 4) + m.x = Var(m.I, bounds=(-2, 6)) m.disjunct_block = Block() m.disjunction_block = Block() instantiate_hierarchical_nested_model(m) return m -def makeHierarchicalNested_DeclOrderOppositeInstantationOrder(): - """Here, we declare the Blocks in the opposite order. This means that - decl order will be *opposite* instantiation order, which means that we - can break our targets preprocessing without even using targets if we + +def makeHierarchicalNested_DeclOrderOppositeInstantiationOrder(): + """Here, we declare the Blocks in the opposite order. This means that + decl order will be *opposite* instantiation order, which means that we + can break our targets preprocessing without even using targets if we are not correctly identifying what is nested in what!""" m = ConcreteModel() - m.I = RangeSet(1,4) - m.x = Var(m.I, bounds=(-2,6)) + m.I = RangeSet(1, 4) + m.x = Var(m.I, bounds=(-2, 6)) m.disjunction_block = Block() m.disjunct_block = Block() instantiate_hierarchical_nested_model(m) return m + def makeNonQuadraticNonlinearGDP(): - """We use this in testing between steps--Needed non-quadratic and not + """We use this in testing between steps--Needed non-quadratic and not additively separable constraint expressions on a Disjunct.""" m = ConcreteModel() - m.I = RangeSet(1,4) - m.I1 = RangeSet(1,2) - m.I2 = RangeSet(3,4) - m.x = Var(m.I, bounds=(-2,6)) + m.I = RangeSet(1, 4) + m.I1 = RangeSet(1, 2) + m.I2 = RangeSet(3, 4) + m.x = Var(m.I, bounds=(-2, 6)) # sum of 4-norms... m.disjunction = Disjunction( - expr=[[sum(m.x[i]**4 for i in m.I1)**(1/4) + \ - sum(m.x[i]**4 for i in m.I2)**(1/4) <= 1], - [sum((3 - m.x[i])**4 for i in m.I1)**(1/4) + - sum((3 - m.x[i])**4 for i in m.I2)**(1/4) <= 1]]) + expr=[ + [ + sum(m.x[i] ** 4 for i in m.I1) ** (1 / 4) + + sum(m.x[i] ** 4 for i in m.I2) ** (1 / 4) + <= 1 + ], + [ + sum((3 - m.x[i]) ** 4 for i in m.I1) ** (1 / 4) + + sum((3 - m.x[i]) ** 4 for i in m.I2) ** (1 / 4) + <= 1 + ], + ] + ) m.obj = Objective(expr=m.x[2] - m.x[1], sense=maximize) return m + # # Logical Constraints on Disjuncts # + def makeLogicalConstraintsOnDisjuncts(): m = ConcreteModel() m.s = RangeSet(4) @@ -967,13 +1076,13 @@ def makeLogicalConstraintsOnDisjuncts(): m.o = Objective(expr=m.x) # Add the logical proposition - m.p = LogicalConstraint( - expr=m.d[1].indicator_var.implies(m.d[4].indicator_var)) + m.p = LogicalConstraint(expr=m.d[1].indicator_var.implies(m.d[4].indicator_var)) # Use the logical stuff to make choosing d1 and d4 infeasible: m.bwahaha = LogicalConstraint(expr=m.Y[1].xor(m.Y[2])) return m + def makeLogicalConstraintsOnDisjuncts_NonlinearConvex(): # same game as the previous model, but include some nonlinear # constraints. This is to test gdpopt because it needs to handle the logical @@ -1001,6 +1110,7 @@ def makeLogicalConstraintsOnDisjuncts_NonlinearConvex(): return m + def makeBooleanVarsOnDisjuncts(): # same as linear model above, but declare the BooleanVar on one of the # Disjuncts, just to make sure we make references and stuff correctly. @@ -1017,14 +1127,12 @@ def makeBooleanVarsOnDisjuncts(): m.d[1].logical = LogicalConstraint(expr=~m.d[1].Y[1]) m.d[2].c = Constraint(expr=m.x >= 3) m.d[3].c = Constraint(expr=m.x >= 8) - m.d[4].logical = LogicalConstraint( - expr=m.d[1].Y[1].equivalent_to(m.d[1].Y[2])) + m.d[4].logical = LogicalConstraint(expr=m.d[1].Y[1].equivalent_to(m.d[1].Y[2])) m.d[4].c = Constraint(expr=m.x == 2.5) m.o = Objective(expr=m.x) # Add the logical proposition - m.p = LogicalConstraint( - expr=m.d[1].indicator_var.implies(m.d[4].indicator_var)) + m.p = LogicalConstraint(expr=m.d[1].indicator_var.implies(m.d[4].indicator_var)) # Use the logical stuff to make choosing d1 and d4 infeasible: m.bwahaha = LogicalConstraint(expr=m.d[1].Y[1].xor(m.d[1].Y[2])) diff --git a/pyomo/gdp/tests/test_basic_step.py b/pyomo/gdp/tests/test_basic_step.py index d0c15627a89..631611a2651 100644 --- a/pyomo/gdp/tests/test_basic_step.py +++ b/pyomo/gdp/tests/test_basic_step.py @@ -20,6 +20,7 @@ from pyomo.common.fileutils import import_file from os.path import abspath, dirname, normpath, join + currdir = dirname(abspath(__file__)) exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'gdp')) @@ -28,48 +29,33 @@ class TestBasicStep(unittest.TestCase): """Tests disjunctive basic steps.""" def test_improper_basic_step(self): - model_builder = import_file( - join(exdir, 'two_rxn_lee', 'two_rxn_model.py')) + model_builder = import_file(join(exdir, 'two_rxn_lee', 'two_rxn_model.py')) m = model_builder.build_model() m.basic_step = apply_basic_step([m.reactor_choice, m.max_demand]) for disj in m.basic_step.disjuncts.values(): - self.assertEqual( - disj.improper_constraints[1].body.polynomial_degree(), 2) - self.assertEqual( - disj.improper_constraints[1].lower, None) - self.assertEqual( - disj.improper_constraints[1].upper, 2) - self.assertEqual( - len(disj.improper_constraints), 1) + self.assertEqual(disj.improper_constraints[1].body.polynomial_degree(), 2) + self.assertEqual(disj.improper_constraints[1].lower, None) + self.assertEqual(disj.improper_constraints[1].upper, 2) + self.assertEqual(len(disj.improper_constraints), 1) self.assertFalse(m.max_demand.active) def test_improper_basic_step_linear(self): - model_builder = import_file( - join(exdir, 'two_rxn_lee', 'two_rxn_model.py')) + model_builder = import_file(join(exdir, 'two_rxn_lee', 'two_rxn_model.py')) m = model_builder.build_model(use_mccormick=True) - m.basic_step = apply_basic_step([ - m.reactor_choice, m.max_demand, m.mccormick_1, m.mccormick_2]) + m.basic_step = apply_basic_step( + [m.reactor_choice, m.max_demand, m.mccormick_1, m.mccormick_2] + ) for disj in m.basic_step.disjuncts.values(): - self.assertIs( - disj.improper_constraints[1].body, m.P) - self.assertEqual( - disj.improper_constraints[1].lower, None) - self.assertEqual( - disj.improper_constraints[1].upper, 2) - self.assertEqual( - disj.improper_constraints[2].body.polynomial_degree(), 1) - self.assertEqual( - disj.improper_constraints[2].lower, None) - self.assertEqual( - disj.improper_constraints[2].upper, 0) - self.assertEqual( - disj.improper_constraints[3].body.polynomial_degree(), 1) - self.assertEqual( - disj.improper_constraints[3].lower, None) - self.assertEqual( - disj.improper_constraints[3].upper, 0) - self.assertEqual( - len(disj.improper_constraints), 3) + self.assertIs(disj.improper_constraints[1].body, m.P) + self.assertEqual(disj.improper_constraints[1].lower, None) + self.assertEqual(disj.improper_constraints[1].upper, 2) + self.assertEqual(disj.improper_constraints[2].body.polynomial_degree(), 1) + self.assertEqual(disj.improper_constraints[2].lower, None) + self.assertEqual(disj.improper_constraints[2].upper, 0) + self.assertEqual(disj.improper_constraints[3].body.polynomial_degree(), 1) + self.assertEqual(disj.improper_constraints[3].lower, None) + self.assertEqual(disj.improper_constraints[3].upper, 0) + self.assertEqual(len(disj.improper_constraints), 3) self.assertFalse(m.max_demand.active) self.assertFalse(m.mccormick_1.active) self.assertFalse(m.mccormick_2.active) @@ -101,19 +87,21 @@ def test_improper_basic_step_simpleConstraint(self): def test_improper_basic_step_constraintData(self): m = models.makeTwoTermDisj() + @m.Constraint([1, 2]) def indexed(m, i): return m.x <= m.a + i m.basic_step = apply_basic_step([m.disjunction, m.indexed[1]]) self.check_after_improper_basic_step(m) - + self.assertFalse(m.indexed[1].active) self.assertTrue(m.indexed[2].active) self.assertFalse(m.disjunction.active) def test_improper_basic_step_indexedConstraint(self): m = models.makeTwoTermDisj() + @m.Constraint([1, 2]) def indexed(m, i): return m.x <= m.a + i @@ -133,11 +121,32 @@ def test_indicator_var_references(self): m.basic_step = apply_basic_step([m.disjunction, m.simple]) - refs = [v for v in m.basic_step.component_data_objects( - BooleanVar, sort=SortComponents.deterministic)] + refs = [ + v + for v in m.basic_step.component_data_objects( + BooleanVar, sort=SortComponents.deterministic + ) + ] self.assertEqual(len(refs), 2) self.assertIs(refs[0][None], m.d[0].indicator_var) self.assertIs(refs[1][None], m.d[1].indicator_var) + def test_arg_errors(self): + m = models.makeTwoTermDisj() + m.simple = Constraint(expr=m.x <= m.a + 1) + + with self.assertRaisesRegex( + ValueError, + 'apply_basic_step only accepts a list containing ' + 'Disjunctions or Constraints', + ): + apply_basic_step([m.disjunction, m.simple, m.x]) + with self.assertRaisesRegex( + ValueError, + 'apply_basic_step: argument list must contain at least one Disjunction', + ): + apply_basic_step([m.simple, m.simple]) + + if __name__ == '__main__': unittest.main() diff --git a/pyomo/gdp/tests/test_bigm.py b/pyomo/gdp/tests/test_bigm.py index 2ff06ad6d24..b3bc5dd74b3 100644 --- a/pyomo/gdp/tests/test_bigm.py +++ b/pyomo/gdp/tests/test_bigm.py @@ -9,19 +9,33 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from pyomo.common.dependencies import dill_available import pyomo.common.unittest as unittest from pyomo.common.deprecation import RenamedClass -from pyomo.environ import (TransformationFactory, Block, Set, Constraint, - ComponentMap, Suffix, ConcreteModel, Var, - Any, value) +from pyomo.environ import ( + TransformationFactory, + Block, + Set, + Constraint, + ComponentMap, + Suffix, + ConcreteModel, + Var, + Any, + value, +) from pyomo.gdp import Disjunct, Disjunction, GDP_Error from pyomo.core.base import constraint, _ConstraintData -from pyomo.core.expr.sympy_tools import sympy_available +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) from pyomo.repn import generate_standard_repn from pyomo.common.log import LoggingIntercept import logging +import pyomo.core.expr as EXPR import pyomo.gdp.tests.models as models import pyomo.gdp.tests.common_tests as ct @@ -31,10 +45,12 @@ from io import StringIO + class CommonTests: def diff_apply_to_and_create_using(self, model): ct.diff_apply_to_and_create_using(self, model, 'gdp.bigm') + class TwoTermDisj(unittest.TestCase, CommonTests): def setUp(self): # set seed so we can test name collisions predictably @@ -48,19 +64,12 @@ def test_new_block_created(self): transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) - # check that we have the lbub set on the transformation block - lbub = transBlock.component("lbub") - self.assertIsInstance(lbub, Set) - self.assertEqual(len(lbub), 2) - self.assertEqual(lbub, ['lb', 'ub']) - disjBlock = transBlock.component("relaxedDisjuncts") self.assertIsInstance(disjBlock, Block) self.assertEqual(len(disjBlock), 2) # it has the disjuncts on it - self.assertIsInstance( disjBlock[1].component("d[1].c1"), Constraint) - self.assertIsInstance( disjBlock[1].component("d[1].c2"), Constraint) - self.assertIsInstance( disjBlock[0].component("d[0].c"), Constraint) + self.assertIs(m.d[0].transformation_block, disjBlock[0]) + self.assertIs(m.d[1].transformation_block, disjBlock[1]) def test_disjunction_deactivated(self): ct.check_disjunction_deactivated(self, 'bigm') @@ -93,66 +102,30 @@ def test_disjunct_and_constraint_maps(self): # we are counting on the fact that the disjuncts get relaxed in the # same order every time. - for i in [0,1]: - self.assertIs(oldblock[i].transformation_block(), disjBlock[i]) + for i in [0, 1]: + self.assertIs(oldblock[i].transformation_block, disjBlock[i]) self.assertIs(bigm.get_src_disjunct(disjBlock[i]), oldblock[i]) - # check the constraint mappings - constraintdict1 = disjBlock[0]._constraintMap - self.assertIsInstance(constraintdict1, dict) - self.assertEqual(len(constraintdict1), 2) - - constraintdict2 = disjBlock[1]._constraintMap - self.assertIsInstance(constraintdict2, dict) - self.assertEqual(len(constraintdict2), 2) - - # original -> transformed - transformedConstraints1 = constraintdict1['transformedConstraints'] - self.assertIsInstance(transformedConstraints1, ComponentMap) - self.assertEqual(len(transformedConstraints1), 1) - transformedConstraints2 = constraintdict2['transformedConstraints'] - self.assertIsInstance(transformedConstraints2, ComponentMap) - self.assertEqual(len(transformedConstraints2), 2) # check constraint dict has right mapping - c1_list = transformedConstraints2[oldblock[1].c1] - self.assertEqual(len(c1_list), 2) + c1_list = bigm.get_transformed_constraints(oldblock[1].c1) # this is an equality, so we have both lb and ub - self.assertIs(c1_list[0], - disjBlock[1].component(oldblock[1].c1.name)['lb']) - self.assertIs(c1_list[1], - disjBlock[1].component(oldblock[1].c1.name)['ub']) - c2_list = transformedConstraints2[oldblock[1].c2] + self.assertEqual(len(c1_list), 2) + self.assertIs(c1_list[0].parent_block(), disjBlock[1]) + self.assertIs(bigm.get_src_constraint(c1_list[0]), oldblock[1].c1) + self.assertIs(c1_list[1].parent_block(), disjBlock[1]) + self.assertIs(bigm.get_src_constraint(c1_list[0]), oldblock[1].c1) + + c2_list = bigm.get_transformed_constraints(oldblock[1].c2) # just ub self.assertEqual(len(c2_list), 1) - self.assertIs(c2_list[0], - disjBlock[1].component(oldblock[1].c2.name)['ub']) - c_list = transformedConstraints1[oldblock[0].c] + self.assertIs(c2_list[0].parent_block(), disjBlock[1]) + self.assertIs(bigm.get_src_constraint(c2_list[0]), oldblock[1].c2) + + c_list = bigm.get_transformed_constraints(oldblock[0].c) # just lb self.assertEqual(len(c_list), 1) - self.assertIs(c_list[0], - disjBlock[0].component(oldblock[0].c.name)['lb']) - - # transformed -> original - srcdict1 = constraintdict1['srcConstraints'] - self.assertIsInstance(srcdict1, ComponentMap) - self.assertEqual(len(srcdict1), 2) - self.assertIs(srcdict1[disjBlock[0].component(oldblock[0].c.name)], - oldblock[0].c) - self.assertIs(srcdict1[disjBlock[0].component( - oldblock[0].c.name)['lb']], oldblock[0].c) - srcdict2 = constraintdict2['srcConstraints'] - self.assertIsInstance(srcdict2, ComponentMap) - self.assertEqual(len(srcdict2), 5) - self.assertIs(srcdict2[disjBlock[1].component("d[1].c1")], - oldblock[1].c1) - self.assertIs(srcdict2[disjBlock[1].component("d[1].c1")['lb']], - oldblock[1].c1) - self.assertIs(srcdict2[disjBlock[1].component("d[1].c1")['ub']], - oldblock[1].c1) - self.assertIs(srcdict2[disjBlock[1].component("d[1].c2")], - oldblock[1].c2) - self.assertIs(srcdict2[disjBlock[1].component("d[1].c2")['ub']], - oldblock[1].c2) + self.assertIs(c_list[0].parent_block(), disjBlock[0]) + self.assertIs(bigm.get_src_constraint(c_list[0]), oldblock[0].c) def test_new_block_nameCollision(self): ct.check_transformation_block_name_collision(self, 'bigm') @@ -171,11 +144,16 @@ def test_or_constraints(self): # check or constraint is an or (upper bound is None) orcons = m._pyomo_gdp_bigm_reformulation.component("disjunction_xor") self.assertIsInstance(orcons, Constraint) - self.assertIs(m.d[0].binary_indicator_var, orcons.body.arg(0)) - self.assertIs(m.d[1].binary_indicator_var, orcons.body.arg(1)) - repn = generate_standard_repn(orcons.body) - ct.check_linear_coef(self, repn, m.d[0].binary_indicator_var, 1) - ct.check_linear_coef(self, repn, m.d[1].binary_indicator_var, 1) + assertExpressionsEqual( + self, + orcons.body, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression((1, m.d[0].binary_indicator_var)), + EXPR.MonomialTermExpression((1, m.d[1].binary_indicator_var)), + ] + ), + ) self.assertEqual(orcons.lower, 1) self.assertIsNone(orcons.upper) @@ -184,8 +162,9 @@ def test_deactivated_constraints(self): def test_transformed_constraints(self): m = models.makeTwoTermDisj() - TransformationFactory('gdp.bigm').apply_to(m) - self.checkMs(m, -3, 2, 7, 2) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + self.checkMs(m, bigm, -3, 2, 7, 2) def test_do_not_transform_userDeactivated_disjuncts(self): ct.check_user_deactivated_disjuncts(self, 'bigm') @@ -194,62 +173,65 @@ def test_improperly_deactivated_disjuncts(self): ct.check_improperly_deactivated_disjuncts(self, 'bigm') def test_do_not_transform_userDeactivated_IndexedDisjunction(self): - ct.check_do_not_transform_userDeactivated_indexedDisjunction(self, - 'bigm') + ct.check_do_not_transform_userDeactivated_indexedDisjunction(self, 'bigm') # helper method to check the M values in all of the transformed # constraints (m, M) is the tuple for M. This also relies on the # disjuncts being transformed in the same order every time. - def checkMs(self, model, cons1lb, cons2lb, cons2ub, cons3ub): + def checkMs(self, model, bigm, cons1lb, cons2lb, cons2ub, cons3ub): disjBlock = model._pyomo_gdp_bigm_reformulation.relaxedDisjuncts # first constraint - c = disjBlock[0].component("d[0].c") + c = bigm.get_transformed_constraints(model.d[0].c) self.assertEqual(len(c), 1) - self.assertTrue(c['lb'].active) - repn = generate_standard_repn(c['lb'].body) + c_lb = c[0] + self.assertTrue(c[0].active) + repn = generate_standard_repn(c[0].body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) ct.check_linear_coef(self, repn, model.a, 1) ct.check_linear_coef(self, repn, model.d[0].indicator_var, cons1lb) self.assertEqual(repn.constant, -cons1lb) - self.assertEqual(c['lb'].lower, model.d[0].c.lower) - self.assertIsNone(c['lb'].upper) + self.assertEqual(c[0].lower, model.d[0].c.lower) + self.assertIsNone(c[0].upper) # second constraint - c = disjBlock[1].component("d[1].c1") + c = bigm.get_transformed_constraints(model.d[1].c1) self.assertEqual(len(c), 2) - self.assertTrue(c['lb'].active) - repn = generate_standard_repn(c['lb'].body) + c_lb = c[0] + c_ub = c[1] + self.assertTrue(c[0].active) + repn = generate_standard_repn(c[0].body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) ct.check_linear_coef(self, repn, model.a, 1) ct.check_linear_coef(self, repn, model.d[1].indicator_var, cons2lb) self.assertEqual(repn.constant, -cons2lb) - self.assertEqual(c['lb'].lower, model.d[1].c1.lower) - self.assertIsNone(c['lb'].upper) - self.assertTrue(c['ub'].active) - repn = generate_standard_repn(c['ub'].body) + self.assertEqual(c[0].lower, model.d[1].c1.lower) + self.assertIsNone(c[0].upper) + self.assertTrue(c_ub.active) + repn = generate_standard_repn(c_ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) ct.check_linear_coef(self, repn, model.a, 1) ct.check_linear_coef(self, repn, model.d[1].indicator_var, cons2ub) self.assertEqual(repn.constant, -cons2ub) - self.assertIsNone(c['ub'].lower) - self.assertEqual(c['ub'].upper, model.d[1].c1.upper) + self.assertIsNone(c_ub.lower) + self.assertEqual(c_ub.upper, model.d[1].c1.upper) # third constraint - c = disjBlock[1].component("d[1].c2") + c = bigm.get_transformed_constraints(model.d[1].c2) self.assertEqual(len(c), 1) - self.assertTrue(c['ub'].active) - repn = generate_standard_repn(c['ub'].body) + c_ub = c[0] + self.assertTrue(c_ub.active) + repn = generate_standard_repn(c_ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) ct.check_linear_coef(self, repn, model.x, 1) ct.check_linear_coef(self, repn, model.d[1].indicator_var, cons3ub) self.assertEqual(repn.constant, -cons3ub) - self.assertIsNone(c['ub'].lower) - self.assertEqual(c['ub'].upper, model.d[1].c2.upper) + self.assertIsNone(c_ub.lower) + self.assertEqual(c_ub.upper, model.d[1].c2.upper) def test_suffix_M_None(self): m = models.makeTwoTermDisj() @@ -257,8 +239,9 @@ def test_suffix_M_None(self): m.BigM = Suffix(direction=Suffix.LOCAL) m.BigM[None] = 20 - TransformationFactory('gdp.bigm').apply_to(m) - self.checkMs(m, -20, -20, 20, 20) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + self.checkMs(m, bigm, -20, -20, 20, 20) def test_suffix_M_None_on_disjunctData(self): m = models.makeTwoTermDisj() @@ -269,9 +252,10 @@ def test_suffix_M_None_on_disjunctData(self): m.d[0].BigM = Suffix(direction=Suffix.LOCAL) m.d[0].BigM[None] = 18 - TransformationFactory('gdp.bigm').apply_to(m) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) # there should now be different values of m on d[0] and d[1] - self.checkMs(m, -18, -20, 20, 20) + self.checkMs(m, bigm, -18, -20, 20, 20) def test_suffix_M_simpleConstraint_on_disjunctData(self): m = models.makeTwoTermDisj() @@ -282,8 +266,9 @@ def test_suffix_M_simpleConstraint_on_disjunctData(self): m.d[0].BigM = Suffix(direction=Suffix.LOCAL) m.d[0].BigM[m.d[0].c] = 18 - TransformationFactory('gdp.bigm').apply_to(m) - self.checkMs(m, -18, -20, 20, 20) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + self.checkMs(m, bigm, -18, -20, 20, 20) def test_arg_M_None(self): m = models.makeTwoTermDisj() @@ -292,8 +277,9 @@ def test_arg_M_None(self): m.BigM[None] = 20 # give an arg - TransformationFactory('gdp.bigm').apply_to(m, bigM={None: 19}) - self.checkMs(m, -19, -19, 19, 19) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m, bigM={None: 19}) + self.checkMs(m, bigm, -19, -19, 19, 19) def test_arg_M_singleNum(self): m = models.makeTwoTermDisj() @@ -302,8 +288,9 @@ def test_arg_M_singleNum(self): m.BigM[None] = 20 # give an arg - TransformationFactory('gdp.bigm').apply_to(m, bigM=19.2) - self.checkMs(m, -19.2, -19.2, 19.2, 19.2) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m, bigM=19.2) + self.checkMs(m, bigm, -19.2, -19.2, 19.2, 19.2) def test_singleArg_M_tuple(self): m = models.makeTwoTermDisj() @@ -312,8 +299,9 @@ def test_singleArg_M_tuple(self): m.BigM[None] = 20 # give an arg - TransformationFactory('gdp.bigm').apply_to(m, bigM=(-18, 19.2)) - self.checkMs(m, -18, -18, 19.2, 19.2) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m, bigM=(-18, 19.2)) + self.checkMs(m, bigm, -18, -18, 19.2, 19.2) def test_singleArg_M_tuple_wrongLength(self): m = models.makeTwoTermDisj() @@ -326,10 +314,12 @@ def test_singleArg_M_tuple_wrongLength(self): GDP_Error, r"Big-M \([^)]*\) for constraint d\[0\].c is not of " r"length two. Expected either a single value or " - r"tuple or list of length two for M.*", + r"tuple or list of length two specifying M values for the lower " + "and upper sides of the constraint respectively.*", TransformationFactory('gdp.bigm').apply_to, m, - bigM=(-18, 19.2, 3)) + bigM=(-18, 19.2, 3), + ) def test_singleArg_M_list(self): m = models.makeTwoTermDisj() @@ -338,8 +328,9 @@ def test_singleArg_M_list(self): m.BigM[None] = 20 # give an arg - TransformationFactory('gdp.bigm').apply_to(m, bigM=[-18, 19.2]) - self.checkMs(m, -18, -18, 19.2, 19.2) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m, bigM=[-18, 19.2]) + self.checkMs(m, bigm, -18, -18, 19.2, 19.2) def test_singleArg_M_list_wrongLength(self): m = models.makeTwoTermDisj() @@ -352,10 +343,11 @@ def test_singleArg_M_list_wrongLength(self): GDP_Error, r"Big-M \[[^\]]*\] for constraint d\[0\].c is not of " r"length two. Expected either a single value or " - r"tuple or list of length two for M.*", + r"tuple or list of length two*", TransformationFactory('gdp.bigm').apply_to, m, - bigM=[-18, 19.2, 3]) + bigM=[-18, 19.2, 3], + ) def test_arg_M_simpleConstraint(self): m = models.makeTwoTermDisj() @@ -368,55 +360,52 @@ def test_arg_M_simpleConstraint(self): m.BigM[m.d[1].c2] = 200 # give an arg - TransformationFactory('gdp.bigm').apply_to( - m, - bigM={None: 19, - m.d[0].c: 18, - m.d[1].c1: 17, - m.d[1].c2: 16}) - self.checkMs(m, -18, -17, 17, 16) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m, bigM={None: 19, m.d[0].c: 18, m.d[1].c1: 17, m.d[1].c2: 16}) + self.checkMs(m, bigm, -18, -17, 17, 16) def test_tuple_M_arg(self): m = models.makeTwoTermDisj() # give a tuple arg - TransformationFactory('gdp.bigm').apply_to( - m, - bigM={None: (-20,19)}) - self.checkMs(m, -20, -20, 19, 19) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m, bigM={None: (-20, 19)}) + self.checkMs(m, bigm, -20, -20, 19, 19) def test_tuple_M_suffix(self): m = models.makeTwoTermDisj() m.BigM = Suffix(direction=Suffix.LOCAL) m.BigM[None] = (-18, 20) - TransformationFactory('gdp.bigm').apply_to(m) - self.checkMs(m, -18, -18, 20, 20) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + self.checkMs(m, bigm, -18, -18, 20, 20) def test_list_M_arg(self): m = models.makeTwoTermDisj() # give a tuple arg - TransformationFactory('gdp.bigm').apply_to( - m, - bigM={None: [-20,19]}) - self.checkMs(m, -20, -20, 19, 19) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m, bigM={None: [-20, 19]}) + self.checkMs(m, bigm, -20, -20, 19, 19) def test_list_M_suffix(self): m = models.makeTwoTermDisj() m.BigM = Suffix(direction=Suffix.LOCAL) m.BigM[None] = [-18, 20] - TransformationFactory('gdp.bigm').apply_to(m) - self.checkMs(m, -18, -18, 20, 20) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + self.checkMs(m, bigm, -18, -18, 20, 20) def test_tuple_wrong_length_err(self): m = models.makeTwoTermDisj() - M = (-20,19, 32) + M = (-20, 19, 32) self.assertRaisesRegex( GDP_Error, r"Big-M \(-20, 19, 32\) for constraint d\[0\].c is not of " r"length two. Expected either a single value or " - r"tuple or list of length two for M.*", + r"tuple or list of length two*", TransformationFactory('gdp.bigm').apply_to, m, - bigM={None: M}) + bigM={None: M}, + ) def test_list_wrong_length_err(self): m = models.makeTwoTermDisj() @@ -425,10 +414,11 @@ def test_list_wrong_length_err(self): GDP_Error, r"Big-M \[-20, 19, 34\] for constraint d\[0\].c is not of " r"length two. Expected either a single value or " - r"tuple or list of length two for M.*", + r"tuple or list of length two*", TransformationFactory('gdp.bigm').apply_to, m, - bigM={None: M}) + bigM={None: M}, + ) def test_create_using(self): m = models.makeTwoTermDisj() @@ -436,14 +426,17 @@ def test_create_using(self): def test_indexed_constraints_in_disjunct(self): m = ConcreteModel() - m.I = [1,2,3] - m.x = Var(m.I, bounds=(0,10)) - def c_rule(b,i): + m.I = [1, 2, 3] + m.x = Var(m.I, bounds=(0, 10)) + + def c_rule(b, i): m = b.model() return m.x[i] >= i - def d_rule(d,j): + + def d_rule(d, j): m = d.model() d.c = Constraint(m.I[:j], rule=c_rule) + m.d = Disjunct(m.I, rule=d_rule) m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) @@ -451,32 +444,29 @@ def d_rule(d,j): transBlock = m._pyomo_gdp_bigm_reformulation # 2 blocks: the original Disjunct and the transformation block - self.assertEqual( - len(list(m.component_objects(Block, descend_into=False))), 1) - self.assertEqual( - len(list(m.component_objects(Disjunct))), 1) + self.assertEqual(len(list(m.component_objects(Block, descend_into=False))), 1) + self.assertEqual(len(list(m.component_objects(Disjunct))), 1) # Each relaxed disjunct should have 1 var (the reference to the # indicator var), and i "d[i].c" Constraints - for i in [1,2,3]: - relaxed = transBlock.relaxedDisjuncts[i-1] + for i in [1, 2, 3]: + relaxed = transBlock.relaxedDisjuncts[i - 1] self.assertEqual(len(list(relaxed.component_objects(Var))), 1) self.assertEqual(len(list(relaxed.component_data_objects(Var))), 1) - self.assertEqual( - len(list(relaxed.component_objects(Constraint))), 1) - self.assertEqual( - len(list(relaxed.component_data_objects(Constraint))), i) - self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) + self.assertEqual(len(list(relaxed.component_objects(Constraint))), 1) + self.assertEqual(len(list(relaxed.component_data_objects(Constraint))), i) def test_virtual_indexed_constraints_in_disjunct(self): m = ConcreteModel() - m.I = [1,2,3] - m.x = Var(m.I, bounds=(0,10)) - def d_rule(d,j): + m.I = [1, 2, 3] + m.x = Var(m.I, bounds=(0, 10)) + + def d_rule(d, j): m = d.model() d.c = Constraint(Any) for k in range(j): - d.c[k+1] = m.x[k+1] >= k+1 + d.c[k + 1] = m.x[k + 1] >= k + 1 + m.d = Disjunct(m.I, rule=d_rule) m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) @@ -484,22 +474,17 @@ def d_rule(d,j): transBlock = m._pyomo_gdp_bigm_reformulation # 2 blocks: the original Disjunct and the transformation block - self.assertEqual( - len(list(m.component_objects(Block, descend_into=False))), 1) - self.assertEqual( - len(list(m.component_objects(Disjunct))), 1) + self.assertEqual(len(list(m.component_objects(Block, descend_into=False))), 1) + self.assertEqual(len(list(m.component_objects(Disjunct))), 1) # Each relaxed disjunct should have 1 var (the reference to the # indicator var), and i "d[i].c" Constraints - for i in [1,2,3]: - relaxed = transBlock.relaxedDisjuncts[i-1] + for i in [1, 2, 3]: + relaxed = transBlock.relaxedDisjuncts[i - 1] self.assertEqual(len(list(relaxed.component_objects(Var))), 1) self.assertEqual(len(list(relaxed.component_data_objects(Var))), 1) - self.assertEqual( - len(list(relaxed.component_objects(Constraint))), 1) - self.assertEqual( - len(list(relaxed.component_data_objects(Constraint))), i) - self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) + self.assertEqual(len(list(relaxed.component_objects(Constraint))), 1) + self.assertEqual(len(list(relaxed.component_data_objects(Constraint))), i) def test_local_var(self): m = models.localVar() @@ -519,24 +504,27 @@ def test_local_var(self): self.assertTrue(repn.is_linear()) ct.check_linear_coef(self, repn, m.disj2.indicator_var, 3) + class TwoTermDisjNonlinear(unittest.TestCase, CommonTests): def test_nonlinear_bigM(self): m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.bigm').apply_to(m) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) disjBlock = m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts # first constraint - c = disjBlock[0].component("d[0].c") + c = bigm.get_transformed_constraints(m.d[0].c) self.assertEqual(len(c), 1) - self.assertTrue(c['ub'].active) - repn = generate_standard_repn(c['ub'].body) + c_ub = c[0] + self.assertTrue(c_ub.active) + repn = generate_standard_repn(c_ub.body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) ct.check_linear_coef(self, repn, m.x, 1) ct.check_linear_coef(self, repn, m.d[0].indicator_var, 94) self.assertEqual(repn.constant, -94) - self.assertEqual(c['ub'].upper, m.d[0].c.upper) - self.assertIsNone(c['ub'].lower) + self.assertEqual(c_ub.upper, m.d[0].c.upper) + self.assertIsNone(c_ub.lower) def test_nonlinear_bigM_missing_var_bounds(self): m = models.makeTwoTermDisj_Nonlinear() @@ -547,53 +535,58 @@ def test_nonlinear_bigM_missing_var_bounds(self): r"expressions.\n\t\(found while processing " r"constraint 'd\[0\].c'\)", TransformationFactory('gdp.bigm').apply_to, - m) + m, + ) def test_nonlinear_disjoint(self): m = ConcreteModel() x = m.x = Var(bounds=(-4, 4)) y = m.y = Var(bounds=(-10, 10)) - m.disj = Disjunction(expr=[ - [x**2 + y**2 <= 2, x**3 + y**2 + x * y >= 1.0/2.0], - [(x - 3)**2 + (y - 3)**2 <= 1] - ]) - TransformationFactory('gdp.bigm').apply_to(m) + m.disj = Disjunction( + expr=[ + [x**2 + y**2 <= 2, x**3 + y**2 + x * y >= 1.0 / 2.0], + [(x - 3) ** 2 + (y - 3) ** 2 <= 1], + ] + ) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) disjBlock = m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts # first disjunct, first constraint - c = disjBlock[0].component("disj_disjuncts[0].constraint") - self.assertEqual(len(c), 2) - repn = generate_standard_repn(c[1, 'ub'].body) + c = bigm.get_transformed_constraints(m.disj_disjuncts[0].constraint[1]) + self.assertEqual(len(c), 1) + c_ub = c[0] + repn = generate_standard_repn(c_ub.body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 1) ct.check_linear_coef(self, repn, m.disj_disjuncts[0].indicator_var, 114) self.assertEqual(repn.constant, -114) - self.assertEqual(c[1, 'ub'].upper, - m.disj_disjuncts[0].constraint[1].upper) - self.assertIsNone(c[1, 'ub'].lower) + self.assertEqual(c_ub.upper, m.disj_disjuncts[0].constraint[1].upper) + self.assertIsNone(c_ub.lower) # first disjunct, second constraint - repn = generate_standard_repn(c[2, 'lb'].body) + c = bigm.get_transformed_constraints(m.disj_disjuncts[0].constraint[2]) + self.assertEqual(len(c), 1) + c_lb = c[0] + repn = generate_standard_repn(c_lb.body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 1) - ct.check_linear_coef(self, repn, m.disj_disjuncts[0].indicator_var, - -104.5) + ct.check_linear_coef(self, repn, m.disj_disjuncts[0].indicator_var, -104.5) self.assertEqual(repn.constant, 104.5) - self.assertEqual(c[2, 'lb'].lower, - m.disj_disjuncts[0].constraint[2].lower) - self.assertIsNone(c[2, 'lb'].upper) + self.assertEqual(c_lb.lower, m.disj_disjuncts[0].constraint[2].lower) + self.assertIsNone(c_lb.upper) # second disjunct, first constraint - c = disjBlock[1].component("disj_disjuncts[1].constraint") + c = bigm.get_transformed_constraints(m.disj_disjuncts[1].constraint[1]) self.assertEqual(len(c), 1) - repn = generate_standard_repn(c[1, 'ub'].body) + c_ub = c[0] + repn = generate_standard_repn(c_ub.body) self.assertFalse(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 3) ct.check_linear_coef(self, repn, m.x, -6) ct.check_linear_coef(self, repn, m.y, -6) ct.check_linear_coef(self, repn, m.disj_disjuncts[1].indicator_var, 217) self.assertEqual(repn.constant, -199) - self.assertEqual(c[1, 'ub'].upper, - m.disj_disjuncts[1].constraint[1].upper) - self.assertIsNone(c[1, 'ub'].lower) + self.assertEqual(c_ub.upper, m.disj_disjuncts[1].constraint[1].upper) + self.assertIsNone(c_ub.lower) class TwoTermIndexedDisj(unittest.TestCase, CommonTests): @@ -605,14 +598,14 @@ def setUp(self): # block. This is needed in multiple tests, so I am storing it # here. self.pairs = [ - ( (0,1,'A'), 0 ), - ( (1,1,'A'), 1 ), - ( (0,1,'B'), 2 ), - ( (1,1,'B'), 3 ), - ( (0,2,'A'), 4 ), - ( (1,2,'A'), 5 ), - ( (0,2,'B'), 6 ), - ( (1,2,'B'), 7 ), + ((0, 1, 'A'), 0), + ((1, 1, 'A'), 1), + ((0, 1, 'B'), 2), + ((1, 1, 'B'), 3), + ((0, 2, 'A'), 4), + ((1, 2, 'A'), 5), + ((0, 2, 'B'), 6), + ((1, 2, 'B'), 7), ] def test_xor_constraints(self): @@ -627,22 +620,13 @@ def test_transformed_block_structure(self): transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) - # check that we have the lbub set on the transformation block - lbub = transBlock.component("lbub") - self.assertIsInstance(lbub, Set) - self.assertEqual(len(lbub), 2) - self.assertEqual(lbub, ['lb', 'ub']) - # check the IndexedBlock of transformed disjuncts disjBlock = transBlock.relaxedDisjuncts self.assertEqual(len(disjBlock), 8) - # check that all 8 blocks have the right constraint on them. - # this relies on the order in which they are transformed. - for i,j in self.pairs: - self.assertIsInstance( - disjBlock[j].component(m.disjunct[i].c.name), - Constraint) + # check that all 8 blocks have exactly one constraint on them. + for i, j in self.pairs: + self.assertEqual(len(disjBlock[j].component_map(Constraint)), 1) def test_disjunct_and_constraint_maps(self): m = models.makeTwoTermMultiIndexedDisjunction() @@ -658,10 +642,8 @@ def test_disjunct_and_constraint_maps(self): for src, dest in self.pairs: srcDisjunct = oldblock[src] transformedDisjunct = disjBlock[dest] - self.assertIs(bigm.get_src_disjunct(transformedDisjunct), - srcDisjunct) - self.assertIs(transformedDisjunct, - srcDisjunct.transformation_block()) + self.assertIs(bigm.get_src_disjunct(transformedDisjunct), srcDisjunct) + self.assertIs(transformedDisjunct, srcDisjunct.transformation_block) transformed = bigm.get_transformed_constraints(srcDisjunct.c) if src[0]: @@ -669,33 +651,14 @@ def test_disjunct_and_constraint_maps(self): self.assertEqual(len(transformed), 2) self.assertIsInstance(transformed[0], _ConstraintData) self.assertIsInstance(transformed[1], _ConstraintData) - self.assertIs( - transformed[0], - disjBlock[dest].component(srcDisjunct.c.name)['lb']) - self.assertIs( - transformed[1], - disjBlock[dest].component(srcDisjunct.c.name)['ub']) - # check reverse maps from the _ConstraintDatas - self.assertIs(bigm.get_src_constraint( - disjBlock[dest].component(srcDisjunct.c.name)['lb']), - srcDisjunct.c) - self.assertIs(bigm.get_src_constraint( - disjBlock[dest].component(srcDisjunct.c.name)['ub']), - srcDisjunct.c) + self.assertIs(bigm.get_src_constraint(transformed[0]), srcDisjunct.c) + self.assertIs(bigm.get_src_constraint(transformed[1]), srcDisjunct.c) else: # >= self.assertEqual(len(transformed), 1) self.assertIsInstance(transformed[0], _ConstraintData) - self.assertIs( - transformed[0], - disjBlock[dest].component(srcDisjunct.c.name)['lb']) - self.assertIs(bigm.get_src_constraint( - disjBlock[dest].component(srcDisjunct.c.name)['lb']), - srcDisjunct.c) - # check reverse map from the container - self.assertIs(bigm.get_src_constraint( - disjBlock[dest].component(srcDisjunct.c.name)), - srcDisjunct.c) + # check reverse map from the container + self.assertIs(bigm.get_src_constraint(transformed[0]), srcDisjunct.c) def test_deactivated_disjuncts(self): ct.check_deactivated_disjuncts(self, 'bigm') @@ -707,6 +670,7 @@ def test_create_using(self): m = models.makeTwoTermMultiIndexedDisjunction() self.diff_apply_to_and_create_using(m) + class DisjOnBlock(unittest.TestCase, CommonTests): # when the disjunction is on a block, we want all of the stuff created by # the transformation to go on that block also so that solving the block @@ -728,13 +692,11 @@ def checkFirstDisjMs(self, model, disj1c1lb, disj1c1ub, disj1c2): repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1lb) - ct.check_linear_coef( - self, repn, model.b.disjunct[0].indicator_var, disj1c1lb) + ct.check_linear_coef(self, repn, model.b.disjunct[0].indicator_var, disj1c1lb) repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1ub) - ct.check_linear_coef( - self, repn, model.b.disjunct[0].indicator_var, disj1c1ub) + ct.check_linear_coef(self, repn, model.b.disjunct[0].indicator_var, disj1c1ub) c2 = bigm.get_transformed_constraints(model.b.disjunct[1].c) self.assertEqual(len(c2), 1) @@ -742,8 +704,7 @@ def checkFirstDisjMs(self, model, disj1c1lb, disj1c1ub, disj1c2): repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c2) - ct.check_linear_coef( - self, repn, model.b.disjunct[1].indicator_var, disj1c2) + ct.check_linear_coef(self, repn, model.b.disjunct[1].indicator_var, disj1c2) def checkMs(self, model, disj1c1lb, disj1c1ub, disj1c2, disj2c1, disj2c2): bigm = TransformationFactory('gdp.bigm') @@ -755,8 +716,7 @@ def checkMs(self, model, disj1c1lb, disj1c1ub, disj1c2, disj2c1, disj2c2): repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c1) - ct.check_linear_coef( - self, repn, model.simpledisj.indicator_var, disj2c1) + ct.check_linear_coef(self, repn, model.simpledisj.indicator_var, disj2c1) c = bigm.get_transformed_constraints(model.simpledisj2.c) self.assertEqual(len(c), 1) @@ -764,8 +724,7 @@ def checkMs(self, model, disj1c1lb, disj1c1ub, disj1c2, disj2c1, disj2c2): repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c2) - ct.check_linear_coef( - self, repn, model.simpledisj2.indicator_var, disj2c2) + ct.check_linear_coef(self, repn, model.simpledisj2.indicator_var, disj2c2) def test_suffix_M_onBlock(self): m = models.makeTwoTermDisjOnBlock() @@ -781,8 +740,9 @@ def test_suffix_M_onBlock(self): self.checkMs(m, -34, 34, 34, -3, 1.5) # check the source of the values - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj.c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -793,8 +753,9 @@ def test_suffix_M_onBlock(self): self.assertEqual(l_val, -3) self.assertIsNone(u_val) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj2.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj2.c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -805,8 +766,9 @@ def test_suffix_M_onBlock(self): self.assertIsNone(l_val) self.assertEqual(u_val, 1.5) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[0].c + ) self.assertIs(l_src, m.b.BigM) self.assertIs(u_src, m.b.BigM) self.assertIsNone(l_key) @@ -817,8 +779,9 @@ def test_suffix_M_onBlock(self): self.assertEqual(l_val, -34) self.assertEqual(u_val, 34) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[1].c + ) self.assertIsNone(l_src) self.assertIs(u_src, m.b.BigM) self.assertIsNone(l_key) @@ -838,8 +801,9 @@ def test_block_M_arg(self): self.checkMs(m, -100, 100, 13, -3, 1.5) # check the source of the values - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj.c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -850,8 +814,9 @@ def test_block_M_arg(self): self.assertEqual(l_val, -3) self.assertIsNone(u_val) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj2.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj2.c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -862,8 +827,9 @@ def test_block_M_arg(self): self.assertIsNone(l_val) self.assertEqual(u_val, 1.5) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[0].c + ) self.assertIs(l_src, bigms) self.assertIs(u_src, bigms) self.assertIs(l_key, m.b) @@ -874,8 +840,9 @@ def test_block_M_arg(self): self.assertEqual(l_val, -100) self.assertEqual(u_val, 100) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[1].c + ) self.assertIsNone(l_src) self.assertIs(u_src, bigms) self.assertIsNone(l_key) @@ -895,8 +862,9 @@ def test_disjunct_M_arg(self): self.checkMs(m, -100, 100, 13, -3, 1.5) # check the source of the values - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj.c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -907,8 +875,9 @@ def test_disjunct_M_arg(self): self.assertEqual(l_val, -3) self.assertIsNone(u_val) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj2.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj2.c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -919,8 +888,9 @@ def test_disjunct_M_arg(self): self.assertIsNone(l_val) self.assertEqual(u_val, 1.5) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[0].c + ) self.assertIs(l_src, bigms) self.assertIs(u_src, bigms) self.assertIs(l_key, m.b) @@ -931,8 +901,9 @@ def test_disjunct_M_arg(self): self.assertEqual(l_val, -100) self.assertEqual(u_val, 100) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[1].c + ) self.assertIsNone(l_src) self.assertIs(u_src, bigms) self.assertIsNone(l_key) @@ -947,14 +918,19 @@ def test_block_M_arg_with_default(self): m = models.makeTwoTermDisjOnBlock() m = models.add_disj_not_on_block(m) bigm = TransformationFactory('gdp.bigm') - bigms = {m.b: 100, m.b.disjunct[1].c: 13, - m.b.disjunct[0].c: (None, 50), None: 34} + bigms = { + m.b: 100, + m.b.disjunct[1].c: 13, + m.b.disjunct[0].c: (None, 50), + None: 34, + } bigm.apply_to(m, bigM=bigms) self.checkMs(m, -100, 50, 13, -34, 34) # check the source of the values - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj.c + ) self.assertIs(l_src, bigms) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -965,8 +941,9 @@ def test_block_M_arg_with_default(self): self.assertEqual(l_val, -34) self.assertIsNone(u_val) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj2.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj2.c + ) self.assertIsNone(l_src) self.assertIs(u_src, bigms) self.assertIsNone(l_key) @@ -977,8 +954,9 @@ def test_block_M_arg_with_default(self): self.assertIsNone(l_val) self.assertEqual(u_val, 34) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[0].c + ) self.assertIs(l_src, bigms) self.assertIs(u_src, bigms) self.assertIs(l_key, m.b) @@ -989,8 +967,9 @@ def test_block_M_arg_with_default(self): self.assertEqual(l_val, -100) self.assertEqual(u_val, 50) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[1].c + ) self.assertIsNone(l_src) self.assertIs(u_src, bigms) self.assertIsNone(l_key) @@ -1007,9 +986,8 @@ def test_model_M_arg(self): out = StringIO() with LoggingIntercept(out, 'pyomo.gdp.bigm'): TransformationFactory('gdp.bigm').apply_to( - m, - bigM={m: 100, - m.b.disjunct[1].c: 13}) + m, bigM={m: 100, m.b.disjunct[1].c: 13} + ) self.checkMs(m, -100, 100, 13, -100, 100) # make sure we didn't get any warnings when we used all the args self.assertEqual(out.getvalue(), '') @@ -1020,15 +998,15 @@ def test_model_M_arg_overrides_None(self): out = StringIO() with LoggingIntercept(out, 'pyomo.gdp.bigm'): TransformationFactory('gdp.bigm').apply_to( - m, - bigM={m: 100, - m.b.disjunct[1].c: 13, - None: 34}) + m, bigM={m: 100, m.b.disjunct[1].c: 13, None: 34} + ) self.checkMs(m, -100, 100, 13, -100, 100) - self.assertEqual(out.getvalue(), - "Unused arguments in the bigM map! " - "These arguments were not used by the " - "transformation:\n\tNone\n\n") + self.assertEqual( + out.getvalue(), + "Unused arguments in the bigM map! " + "These arguments were not used by the " + "transformation:\n\tNone\n\n", + ) def test_warning_for_crazy_bigm_args(self): m = models.makeTwoTermDisjOnBlock() @@ -1038,12 +1016,14 @@ def test_warning_for_crazy_bigm_args(self): # this is silly bigM[m.a] = 34 with LoggingIntercept(out, 'pyomo.gdp.bigm'): - TransformationFactory('gdp.bigm').apply_to( m, bigM=bigM) + TransformationFactory('gdp.bigm').apply_to(m, bigM=bigM) self.checkMs(m, -100, 100, 13, -100, 100) - self.assertEqual(out.getvalue(), - "Unused arguments in the bigM map! " - "These arguments were not used by the " - "transformation:\n\ta\n\n") + self.assertEqual( + out.getvalue(), + "Unused arguments in the bigM map! " + "These arguments were not used by the " + "transformation:\n\ta\n\n", + ) def test_use_above_scope_m_value(self): m = models.makeTwoTermDisjOnBlock() @@ -1053,7 +1033,7 @@ def test_use_above_scope_m_value(self): # transform just the block. We expect to use the M value specified on # the model, and we should comment on nothing. with LoggingIntercept(out, 'pyomo.gdp.bigm'): - TransformationFactory('gdp.bigm').apply_to( m.b, bigM=bigM) + TransformationFactory('gdp.bigm').apply_to(m.b, bigM=bigM) self.checkFirstDisjMs(m, -100, 100, 13) self.assertEqual(out.getvalue(), '') @@ -1069,19 +1049,19 @@ def test_unused_arguments_transform_block(self): out = StringIO() with LoggingIntercept(out, 'pyomo.gdp.bigm'): TransformationFactory('gdp.bigm').apply_to( - m.b, - bigM={m: 100, - m.b: 13, - m.simpledisj2.c: 10}) + m.b, bigM={m: 100, m.b: 13, m.simpledisj2.c: 10} + ) self.checkFirstDisjMs(m, -13, 13, 13) # The order these get printed depends on a dictionary order, so test # this way... - self.assertIn("Unused arguments in the bigM map! " - "These arguments were not used by the " - "transformation:", - out.getvalue()) + self.assertIn( + "Unused arguments in the bigM map! " + "These arguments were not used by the " + "transformation:", + out.getvalue(), + ) self.assertIn("simpledisj2.c", out.getvalue()) self.assertIn("unknown", out.getvalue()) @@ -1098,8 +1078,9 @@ def test_suffix_M_simple_disj(self): self.checkMs(m, -20, 20, 20, -45, 20) # check source of the m values - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj.c + ) self.assertIs(l_src, m.simpledisj.BigM) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -1110,8 +1091,9 @@ def test_suffix_M_simple_disj(self): self.assertEqual(l_val, -45) self.assertIsNone(u_val) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj2.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj2.c + ) self.assertIsNone(l_src) self.assertIs(u_src, m.BigM) self.assertIsNone(l_key) @@ -1122,8 +1104,9 @@ def test_suffix_M_simple_disj(self): self.assertIsNone(l_val) self.assertEqual(u_val, 20) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[0].c + ) self.assertIs(l_src, m.BigM) self.assertIs(u_src, m.BigM) self.assertIsNone(l_key) @@ -1134,8 +1117,9 @@ def test_suffix_M_simple_disj(self): self.assertEqual(l_val, -20) self.assertEqual(u_val, 20) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[1].c + ) self.assertIsNone(l_src) self.assertIs(u_src, m.BigM) self.assertIsNone(l_key) @@ -1181,8 +1165,9 @@ def test_suffix_M_constraintKeyOnSimpleDisj(self): self.checkMs(m, -15, 20, 20, -87, 20) # check source of the m values - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj.c + ) self.assertIs(l_src, m.simpledisj.BigM) self.assertIsNone(u_src) self.assertIs(l_key, m.simpledisj.c) @@ -1193,8 +1178,9 @@ def test_suffix_M_constraintKeyOnSimpleDisj(self): self.assertEqual(l_val, -87) self.assertIsNone(u_val) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.simpledisj2.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisj2.c + ) self.assertIsNone(l_src) self.assertIs(u_src, m.BigM) self.assertIsNone(l_key) @@ -1205,8 +1191,9 @@ def test_suffix_M_constraintKeyOnSimpleDisj(self): self.assertIsNone(l_val) self.assertEqual(u_val, 20) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[0].c + ) self.assertIs(l_src, bigms) self.assertIs(u_src, m.BigM) self.assertIs(l_key, m.b.disjunct[0].c) @@ -1217,8 +1204,9 @@ def test_suffix_M_constraintKeyOnSimpleDisj(self): self.assertEqual(l_val, -15) self.assertEqual(u_val, 20) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.b.disjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.b.disjunct[1].c + ) self.assertIsNone(l_src) self.assertIs(u_src, m.BigM) self.assertIsNone(l_key) @@ -1257,7 +1245,8 @@ def test_suffix_M_constraintKeyOnSimpleDisj_deprecated_m_src_method(self): r"came from different sources, please use the " r"get_M_value_src method.", bigm.get_m_value_src, - m.b.disjunct[0].c) + m.b.disjunct[0].c, + ) (src, key) = bigm.get_m_value_src(m.b.disjunct[1].c) self.assertIs(src, m.BigM) self.assertIsNone(key) @@ -1326,10 +1315,12 @@ def test_do_not_transform_deactivated_constraintDatas(self): KeyError, r".*b.simpledisj1.c\[1\]", bigm.get_transformed_constraints, - m.b.simpledisj1.c[1]) - self.assertRegex(log.getvalue(), - r".*Constraint 'b.simpledisj1.c\[1\]' " - r"has not been transformed.") + m.b.simpledisj1.c[1], + ) + self.assertRegex( + log.getvalue(), + r".*Constraint 'b.simpledisj1.c\[1\]' has not been transformed.", + ) # and the rest of the container was transformed cons_list = bigm.get_transformed_constraints(m.b.simpledisj1.c[2]) @@ -1339,8 +1330,9 @@ def test_do_not_transform_deactivated_constraintDatas(self): self.assertIsInstance(lb, constraint._GeneralConstraintData) self.assertIsInstance(ub, constraint._GeneralConstraintData) - def checkMs(self, m, disj1c1lb, disj1c1ub, disj1c2lb, disj1c2ub, disj2c1ub, - disj2c2ub): + def checkMs( + self, m, disj1c1lb, disj1c1ub, disj1c2lb, disj1c2ub, disj2c1ub, disj2c2ub + ): bigm = TransformationFactory('gdp.bigm') m_values = bigm.get_all_M_values_by_constraint(m) @@ -1351,13 +1343,11 @@ def checkMs(self, m, disj1c1lb, disj1c1ub, disj1c2lb, disj1c2ub, disj2c1ub, repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1lb) - ct.check_linear_coef( - self, repn, m.b.simpledisj1.indicator_var, disj1c1lb) + ct.check_linear_coef(self, repn, m.b.simpledisj1.indicator_var, disj1c1lb) repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c1ub) - ct.check_linear_coef( - self, repn, m.b.simpledisj1.indicator_var, disj1c1ub) + ct.check_linear_coef(self, repn, m.b.simpledisj1.indicator_var, disj1c1ub) self.assertIn(m.b.simpledisj1.c[1], m_values.keys()) self.assertEqual(m_values[m.b.simpledisj1.c[1]][0], disj1c1lb) self.assertEqual(m_values[m.b.simpledisj1.c[1]][1], disj1c1ub) @@ -1369,13 +1359,11 @@ def checkMs(self, m, disj1c1lb, disj1c1ub, disj1c2lb, disj1c2ub, disj2c1ub, repn = generate_standard_repn(lb.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c2lb) - ct.check_linear_coef( - self, repn, m.b.simpledisj1.indicator_var, disj1c2lb) + ct.check_linear_coef(self, repn, m.b.simpledisj1.indicator_var, disj1c2lb) repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj1c2ub) - ct.check_linear_coef( - self, repn, m.b.simpledisj1.indicator_var, disj1c2ub) + ct.check_linear_coef(self, repn, m.b.simpledisj1.indicator_var, disj1c2ub) self.assertIn(m.b.simpledisj1.c[2], m_values.keys()) self.assertEqual(m_values[m.b.simpledisj1.c[2]][0], disj1c2lb) self.assertEqual(m_values[m.b.simpledisj1.c[2]][1], disj1c2ub) @@ -1386,8 +1374,7 @@ def checkMs(self, m, disj1c1lb, disj1c1ub, disj1c2lb, disj1c2ub, disj2c1ub, repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c1ub) - ct.check_linear_coef( - self, repn, m.b.simpledisj2.indicator_var, disj2c1ub) + ct.check_linear_coef(self, repn, m.b.simpledisj2.indicator_var, disj2c1ub) self.assertIn(m.b.simpledisj2.c[1], m_values.keys()) self.assertEqual(m_values[m.b.simpledisj2.c[1]][1], disj2c1ub) self.assertIsNone(m_values[m.b.simpledisj2.c[1]][0]) @@ -1398,8 +1385,7 @@ def checkMs(self, m, disj1c1lb, disj1c1ub, disj1c2lb, disj1c2ub, disj2c1ub, repn = generate_standard_repn(ub.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, -disj2c2ub) - ct.check_linear_coef( - self, repn, m.b.simpledisj2.indicator_var, disj2c2ub) + ct.check_linear_coef(self, repn, m.b.simpledisj2.indicator_var, disj2c2ub) self.assertIn(m.b.simpledisj2.c[2], m_values.keys()) self.assertEqual(m_values[m.b.simpledisj2.c[2]][1], disj2c2ub) self.assertIsNone(m_values[m.b.simpledisj2.c[2]][0]) @@ -1451,12 +1437,13 @@ def test_unbounded_var_m_estimation_err(self): GDP_Error, r"Cannot estimate M for unbounded " r"expressions.\n\t\(found while processing " - r"constraint 'b.simpledisj1.c'\). " + r"constraint 'b.simpledisj1.c\[1\]'\). " r"Please specify a value of M " r"or ensure all variables that appear in the " r"constraint are bounded.", TransformationFactory('gdp.bigm').apply_to, - m) + m, + ) def test_create_using(self): m = models.makeTwoTermDisj_IndexedConstraints() @@ -1493,7 +1480,8 @@ def test_transformed_constraints_on_block(self): # just add ['lb', 'ub'] as another index (using both for equality and # both bounds and the one that we need when we only have one bound) m = models.makeTwoTermDisj_IndexedConstraints_BoundedVars() - TransformationFactory('gdp.bigm').apply_to(m) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) @@ -1501,19 +1489,33 @@ def test_transformed_constraints_on_block(self): self.assertIsInstance(disjBlock, Block) self.assertEqual(len(disjBlock), 2) - cons1 = disjBlock[0].component("disjunct[0].c") - self.assertIsInstance(cons1, Constraint) - self.assertTrue(cons1.active) - self.assertTrue(cons1[1,'lb'].active) - self.assertTrue(cons1[2,'lb'].active) - - cons2 = disjBlock[1].component("disjunct[1].c") - self.assertIsInstance(cons2, Constraint) - self.assertTrue(cons2.active) - self.assertTrue(cons2[1,'lb'].active) - self.assertTrue(cons2[1,'ub'].active) - self.assertTrue(cons2[2,'lb'].active) - self.assertTrue(cons2[2,'ub'].active) + cons11 = bigm.get_transformed_constraints(m.disjunct[0].c[1]) + self.assertEqual(len(cons11), 1) + cons11_lb = cons11[0] + self.assertIsInstance(cons11_lb.parent_component(), Constraint) + self.assertTrue(cons11_lb.active) + cons12 = bigm.get_transformed_constraints(m.disjunct[0].c[2]) + self.assertEqual(len(cons12), 1) + cons12_lb = cons12[0] + self.assertIsInstance(cons12_lb.parent_component(), Constraint) + self.assertTrue(cons12_lb.active) + + cons21 = bigm.get_transformed_constraints(m.disjunct[1].c[1]) + self.assertEqual(len(cons21), 2) + cons21_lb = cons21[0] + cons21_ub = cons21[1] + self.assertIsInstance(cons21_lb.parent_component(), Constraint) + self.assertIsInstance(cons21_ub.parent_component(), Constraint) + self.assertTrue(cons21_lb.active) + self.assertTrue(cons21_ub.active) + cons22 = bigm.get_transformed_constraints(m.disjunct[1].c[2]) + self.assertEqual(len(cons22), 2) + cons22_lb = cons22[0] + cons22_ub = cons22[1] + self.assertIsInstance(cons22_lb.parent_component(), Constraint) + self.assertIsInstance(cons22_ub.parent_component(), Constraint) + self.assertTrue(cons22_lb.active) + self.assertTrue(cons22_ub.active) def checkMs(self, model, c11lb, c12lb, c21lb, c21ub, c22lb, c22ub): bigm = TransformationFactory('gdp.bigm') @@ -1573,9 +1575,8 @@ def test_arg_M_constraintdata(self): # give an arg TransformationFactory('gdp.bigm').apply_to( - m, - bigM={None: 19, m.disjunct[0].c[1]: 17, - m.disjunct[0].c[2]: 18}) + m, bigM={None: 19, m.disjunct[0].c[1]: 17, m.disjunct[0].c[2]: 18} + ) # check that m values are what we expect self.checkMs(m, -17, -18, -19, 19, -19, 19) @@ -1590,8 +1591,8 @@ def test_arg_M_indexedConstraint(self): # give an arg. Doing this one as a ComponentMap, just to make sure. TransformationFactory('gdp.bigm').apply_to( - m, - bigM=ComponentMap({None: 19, m.disjunct[0].c: 17})) + m, bigM=ComponentMap({None: 19, m.disjunct[0].c: 17}) + ) self.checkMs(m, -17, -17, -19, 19, -19, 19) def test_suffix_M_None_on_indexedConstraint(self): @@ -1671,6 +1672,7 @@ def test_targets_cannot_be_cuids(self): # # No error, and we've transformed the whole model # m.pprint() + class TestTargets_IndexedDisjunction(unittest.TestCase, CommonTests): def test_indexedDisj_targets_inactive(self): ct.check_indexedDisj_targets_inactive(self, 'bigm') @@ -1716,64 +1718,69 @@ def test_disjuncts_inactive(self): ct.check_disjuncts_inactive_nested(self, 'bigm') def test_deactivated_disjunct_leaves_nested_disjuncts_active(self): - ct.check_deactivated_disjunct_leaves_nested_disjunct_active(self, - 'bigm') - - def test_transformation_block_structure(self): - m = models.makeNestedDisjunctions() - TransformationFactory('gdp.bigm').apply_to(m) + ct.check_deactivated_disjunct_leaves_nested_disjunct_active(self, 'bigm') - transBlock = m._pyomo_gdp_bigm_reformulation + def check_disjunction_transformation_block_structure(self, transBlock, pairs): self.assertIsInstance(transBlock, Block) - # check that we have the lbub set on the transformation block - lbub = transBlock.component("lbub") - self.assertIsInstance(lbub, Set) - self.assertEqual(len(lbub), 2) - self.assertEqual(lbub, ['lb', 'ub']) - - # we have the XOR constraint - self.assertIsInstance(transBlock.component("disjunction_xor"), - Constraint) - disjBlock = transBlock.relaxedDisjuncts self.assertIsInstance(disjBlock, Block) - # All the outer and inner disjuncts should be on Block: - self.assertEqual(len(disjBlock), 7) - pairs = [ - (0, ["simpledisjunct._pyomo_gdp_bigm_reformulation.'simpledisjunct." - "innerdisjunction_xor'"]), - (1, ["simpledisjunct.innerdisjunct0.c"]), - (2, ["simpledisjunct.innerdisjunct1.c"]), - (3, ["disjunct[0].c"]), - (4, ["disjunct[1]._pyomo_gdp_bigm_reformulation.'disjunct[1]." - "innerdisjunction_xor'", - "disjunct[1].c"]), - (5, ["disjunct[1].innerdisjunct[0].c"]), - (6, ["disjunct[1].innerdisjunct[1].c"]), - ] + # All the transformed outer disjuncts should be on Block: + self.assertEqual(len(disjBlock), len(pairs)) + # This test will also rely on the disjunctions being relaxed in the same # order every time (and moved up to the new transformation block in the # same order) + bigm = TransformationFactory('gdp.bigm') for i, j in pairs: - for nm in j: - self.assertIsInstance( - disjBlock[i].component(nm), - Constraint) + for comp in j: + self.assertIs( + bigm.get_transformed_constraints(comp)[0].parent_block(), + disjBlock[i], + ) + + def test_transformation_block_structure(self): + m = models.makeNestedDisjunctions() + TransformationFactory('gdp.bigm').apply_to(m) + + # This is the transformation block for the outer disjunction + transBlock = m.disjunction.algebraic_constraint.parent_block() + pairs = [ + (0, [m.simpledisjunct.innerdisjunct0.c]), + (1, [m.simpledisjunct.innerdisjunct1.c]), + (2, []), # No constraints, just a reference to simpledisjunct's + # indicator_var + (3, [m.disjunct[0].c]), + (4, [m.disjunct[1].innerdisjunct[0].c]), + (5, [m.disjunct[1].innerdisjunct[1].c]), + (6, []), # Again no constraints, just indicator var ref + ] + self.check_disjunction_transformation_block_structure(transBlock, pairs) + # we have the XOR constraints for both the outer and inner disjunctions + self.assertIsInstance(transBlock.component("disjunction_xor"), Constraint) - def test_transformation_block_on_disjunct_empty(self): + def test_transformation_block_on_inner_disjunct_empty(self): m = models.makeNestedDisjunctions() TransformationFactory('gdp.bigm').apply_to(m) - self.assertEqual(len(m.disjunct[1]._pyomo_gdp_bigm_reformulation.\ - component("relaxedDisjuncts")), 0) - self.assertEqual(len(m.simpledisjunct._pyomo_gdp_bigm_reformulation.\ - component("relaxedDisjuncts")), 0) + self.assertIsNone(m.disjunct[1].component("_pyomo_gdp_bigm_reformulation")) def test_mappings_between_disjunctions_and_xors(self): - # Note this test actually checks that the inner disjunction maps to its - # original xor (which will be transformed again by the outer - # disjunction.) - ct.check_mappings_between_disjunctions_and_xors(self, 'bigm') + m = models.makeNestedDisjunctions() + transform = TransformationFactory('gdp.bigm') + transform.apply_to(m) + + transBlock1 = m.component("_pyomo_gdp_bigm_reformulation") + + disjunctionPairs = [ + (m.disjunction, transBlock1.disjunction_xor), + (m.disjunct[1].innerdisjunction[0], transBlock1.innerdisjunction_xor_4[0]), + (m.simpledisjunct.innerdisjunction, transBlock1.innerdisjunction_xor), + ] + + # check disjunction mappings + for disjunction, xor in disjunctionPairs: + self.assertIs(disjunction.algebraic_constraint, xor) + self.assertIs(transform.get_src_disjunction(xor), disjunction) def test_disjunct_mappings(self): m = models.makeNestedDisjunctions() @@ -1784,25 +1791,29 @@ def test_disjunct_mappings(self): # I want to check that I correctly updated the pointers to the # transformation blocks on the inner Disjuncts. - self.assertIs(m.disjunct[1].innerdisjunct[0].transformation_block(), - disjunctBlocks[5]) - self.assertIs(disjunctBlocks[5]._srcDisjunct(), - m.disjunct[1].innerdisjunct[0]) - - self.assertIs(m.disjunct[1].innerdisjunct[1].transformation_block(), - disjunctBlocks[6]) - self.assertIs(disjunctBlocks[6]._srcDisjunct(), - m.disjunct[1].innerdisjunct[1]) - - self.assertIs(m.simpledisjunct.innerdisjunct0.transformation_block(), - disjunctBlocks[1]) - self.assertIs(disjunctBlocks[1]._srcDisjunct(), - m.simpledisjunct.innerdisjunct0) - - self.assertIs(m.simpledisjunct.innerdisjunct1.transformation_block(), - disjunctBlocks[2]) - self.assertIs(disjunctBlocks[2]._srcDisjunct(), - m.simpledisjunct.innerdisjunct1) + self.assertIs( + m.disjunct[1].innerdisjunct[0].transformation_block, disjunctBlocks[4] + ) + self.assertIs(disjunctBlocks[4]._src_disjunct(), m.disjunct[1].innerdisjunct[0]) + + self.assertIs( + m.disjunct[1].innerdisjunct[1].transformation_block, disjunctBlocks[5] + ) + self.assertIs(disjunctBlocks[5]._src_disjunct(), m.disjunct[1].innerdisjunct[1]) + + self.assertIs( + m.simpledisjunct.innerdisjunct0.transformation_block, disjunctBlocks[0] + ) + self.assertIs( + disjunctBlocks[0]._src_disjunct(), m.simpledisjunct.innerdisjunct0 + ) + + self.assertIs( + m.simpledisjunct.innerdisjunct1.transformation_block, disjunctBlocks[1] + ) + self.assertIs( + disjunctBlocks[1]._src_disjunct(), m.simpledisjunct.innerdisjunct1 + ) def test_m_value_mappings(self): m = models.makeNestedDisjunctions() @@ -1813,9 +1824,9 @@ def test_m_value_mappings(self): bigms = {m.disjunct[1].innerdisjunct[0]: 89} bigm.apply_to(m, bigM=bigms) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src( - m.disjunct[1].innerdisjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.disjunct[1].innerdisjunct[0].c + ) self.assertIs(l_src, bigms) self.assertIs(u_src, bigms) self.assertIs(l_key, m.disjunct[1].innerdisjunct[0]) @@ -1823,9 +1834,9 @@ def test_m_value_mappings(self): self.assertEqual(l_val, -89) self.assertEqual(u_val, 89) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src( - m.disjunct[1].innerdisjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.disjunct[1].innerdisjunct[1].c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -1833,8 +1844,9 @@ def test_m_value_mappings(self): self.assertEqual(l_val, -5) self.assertIsNone(u_val) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.disjunct[0].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.disjunct[0].c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -1842,8 +1854,9 @@ def test_m_value_mappings(self): self.assertEqual(l_val, -11) self.assertEqual(u_val, 7) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src(m.disjunct[1].c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.disjunct[1].c + ) self.assertIsNone(l_src) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -1851,9 +1864,9 @@ def test_m_value_mappings(self): self.assertIsNone(l_val) self.assertEqual(u_val, 21) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src( - m.simpledisjunct.innerdisjunct0.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisjunct.innerdisjunct0.c + ) self.assertIsNone(l_src) self.assertIs(u_src, m.simpledisjunct.BigM) self.assertIsNone(l_key) @@ -1861,9 +1874,9 @@ def test_m_value_mappings(self): self.assertIsNone(l_val) self.assertEqual(u_val, 42) - ((l_val, l_src, l_key), - (u_val, u_src, u_key)) = bigm.get_M_value_src( - m.simpledisjunct.innerdisjunct1.c) + ((l_val, l_src, l_key), (u_val, u_src, u_key)) = bigm.get_M_value_src( + m.simpledisjunct.innerdisjunct1.c + ) self.assertIs(l_src, m.simpledisjunct.BigM) self.assertIsNone(u_src) self.assertIsNone(l_key) @@ -1881,22 +1894,19 @@ def check_bigM_constraint(self, cons, variable, M, indicator_var): ct.check_linear_coef(self, repn, variable, 1) ct.check_linear_coef(self, repn, indicator_var, M) - def check_xor_relaxation(self, cons, indvar1, indvar2, indvar3, lb): + def check_inner_xor_constraint( + self, inner_disjunction, outer_disjunct, inner_disjuncts + ): + self.assertIsNotNone(inner_disjunction.algebraic_constraint) + cons = inner_disjunction.algebraic_constraint + self.assertEqual(cons.lower, 0) + self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) - self.assertEqual(len(repn.linear_vars), 3) - ct.check_linear_coef(self, repn, indvar1, 1) - ct.check_linear_coef(self, repn, indvar2, 1) - if not lb: - self.assertEqual(cons.upper, 1) - self.assertIsNone(cons.lower) - self.assertEqual(repn.constant, -1) - ct.check_linear_coef(self, repn, indvar3, 1) - else: - self.assertEqual(cons.lower, 1) - self.assertIsNone(cons.upper) - self.assertEqual(repn.constant, 1) - ct.check_linear_coef(self, repn, indvar3, -1) + self.assertEqual(repn.constant, 0) + for disj in inner_disjuncts: + ct.check_linear_coef(self, repn, disj.binary_indicator_var, 1) + ct.check_linear_coef(self, repn, outer_disjunct.binary_indicator_var, -1) def test_transformed_constraints(self): # We'll check all the transformed constraints to make sure @@ -1904,101 +1914,126 @@ def test_transformed_constraints(self): # xor constraints created by the inner disjunctions get # transformed by the outer ones. m = models.makeNestedDisjunctions() - TransformationFactory('gdp.bigm').apply_to(m) - cons1 = m.disjunct[1].innerdisjunct[0].transformation_block().component( - m.disjunct[1].innerdisjunct[0].c.name) - cons1lb = cons1['lb'] + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) + cons1 = bigm.get_transformed_constraints(m.disjunct[1].innerdisjunct[0].c) + self.assertEqual(len(cons1), 2) + cons1lb = cons1[0] + cons1ub = cons1[1] self.assertEqual(cons1lb.lower, 0) self.assertIsNone(cons1lb.upper) - self.assertIs(cons1lb.body, m.z) - cons1ub = cons1['ub'] + assertExpressionsEqual( + self, + cons1lb.body, + EXPR.SumExpression( + [ + m.z, + EXPR.NegationExpression( + ( + EXPR.ProductExpression( + ( + 0.0, + EXPR.LinearExpression( + [ + 1, + EXPR.MonomialTermExpression( + ( + -1, + m.disjunct[1] + .innerdisjunct[0] + .binary_indicator_var, + ) + ), + ] + ), + ) + ), + ) + ), + ] + ), + ) self.assertIsNone(cons1ub.lower) self.assertEqual(cons1ub.upper, 0) - self.check_bigM_constraint(cons1ub, m.z, 10, - m.disjunct[1].innerdisjunct[0].indicator_var) - - cons2 = m.disjunct[1].innerdisjunct[1].transformation_block().component( - m.disjunct[1].innerdisjunct[1].c.name)['lb'] - self.assertEqual(cons2.lower, 5) - self.assertIsNone(cons2.upper) - self.check_bigM_constraint(cons2, m.z, -5, - m.disjunct[1].innerdisjunct[1].indicator_var) - - cons3 = m.simpledisjunct.innerdisjunct0.transformation_block().\ - component( - m.simpledisjunct.innerdisjunct0.c.name)['ub'] - self.assertEqual(cons3.upper, 2) - self.assertIsNone(cons3.lower) self.check_bigM_constraint( - cons3, m.x, 7, - m.simpledisjunct.innerdisjunct0.indicator_var) - - cons4 = m.simpledisjunct.innerdisjunct1.transformation_block().\ - component( - m.simpledisjunct.innerdisjunct1.c.name)['lb'] - self.assertEqual(cons4.lower, 4) - self.assertIsNone(cons4.upper) + cons1ub, m.z, 10, m.disjunct[1].innerdisjunct[0].indicator_var + ) + + cons2 = bigm.get_transformed_constraints(m.disjunct[1].innerdisjunct[1].c) + self.assertEqual(len(cons2), 1) + cons2lb = cons2[0] + self.assertEqual(cons2lb.lower, 5) + self.assertIsNone(cons2lb.upper) + self.check_bigM_constraint( + cons2lb, m.z, -5, m.disjunct[1].innerdisjunct[1].indicator_var + ) + + cons3 = bigm.get_transformed_constraints(m.simpledisjunct.innerdisjunct0.c) + self.assertEqual(len(cons3), 1) + cons3ub = cons3[0] + self.assertEqual(cons3ub.upper, 2) + self.assertIsNone(cons3ub.lower) + self.check_bigM_constraint( + cons3ub, m.x, 7, m.simpledisjunct.innerdisjunct0.indicator_var + ) + + cons4 = bigm.get_transformed_constraints(m.simpledisjunct.innerdisjunct1.c) + self.assertEqual(len(cons4), 1) + cons4lb = cons4[0] + self.assertEqual(cons4lb.lower, 4) + self.assertIsNone(cons4lb.upper) self.check_bigM_constraint( - cons4, m.x, -13, - m.simpledisjunct.innerdisjunct1.indicator_var) + cons4lb, m.x, -13, m.simpledisjunct.innerdisjunct1.indicator_var + ) # Here we check that the xor constraint from # simpledisjunct.innerdisjunction is transformed. - cons5 = m.simpledisjunct.transformation_block().component( - "simpledisjunct._pyomo_gdp_bigm_reformulation.'simpledisjunct." - "innerdisjunction_xor'") - cons5lb = cons5['lb'] - self.check_xor_relaxation( - cons5lb, - m.simpledisjunct.innerdisjunct0.indicator_var, - m.simpledisjunct.innerdisjunct1.indicator_var, - m.simpledisjunct.indicator_var, - lb=True) - cons5ub = cons5['ub'] - self.check_xor_relaxation( - cons5ub, - m.simpledisjunct.innerdisjunct0.indicator_var, - m.simpledisjunct.innerdisjunct1.indicator_var, - m.simpledisjunct.indicator_var, - lb=False) - - cons6 = m.disjunct[0].transformation_block().component("disjunct[0].c") - cons6lb = cons6['lb'] + cons5 = m.simpledisjunct.innerdisjunction.algebraic_constraint + self.assertIsNotNone(cons5) + self.check_inner_xor_constraint( + m.simpledisjunct.innerdisjunction, + m.simpledisjunct, + [m.simpledisjunct.innerdisjunct0, m.simpledisjunct.innerdisjunct1], + ) + self.assertIsInstance(cons5, Constraint) + self.assertEqual(cons5.lower, 0) + self.assertEqual(cons5.upper, 0) + repn = generate_standard_repn(cons5.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + ct.check_linear_coef( + self, repn, m.simpledisjunct.innerdisjunct0.binary_indicator_var, 1 + ) + ct.check_linear_coef( + self, repn, m.simpledisjunct.innerdisjunct1.binary_indicator_var, 1 + ) + ct.check_linear_coef(self, repn, m.simpledisjunct.binary_indicator_var, -1) + + cons6 = bigm.get_transformed_constraints(m.disjunct[0].c) + self.assertEqual(len(cons6), 2) + cons6lb = cons6[0] self.assertIsNone(cons6lb.upper) self.assertEqual(cons6lb.lower, 2) - self.check_bigM_constraint(cons6lb, m.x, -11, - m.disjunct[0].indicator_var) - cons6ub = cons6['ub'] + self.check_bigM_constraint(cons6lb, m.x, -11, m.disjunct[0].indicator_var) + cons6ub = cons6[1] self.assertIsNone(cons6ub.lower) self.assertEqual(cons6ub.upper, 2) self.check_bigM_constraint(cons6ub, m.x, 7, m.disjunct[0].indicator_var) - # now we check that the xor constraint from - # disjunct[1].innerdisjunction gets transformed alongside the - # other constraint in disjunct[1]. - cons7 = m.disjunct[1].transformation_block().component( - "disjunct[1]._pyomo_gdp_bigm_reformulation.'disjunct[1]." - "innerdisjunction_xor'") - cons7lb = cons7[0,'lb'] - self.check_xor_relaxation( - cons7lb, - m.disjunct[1].innerdisjunct[0].indicator_var, - m.disjunct[1].innerdisjunct[1].indicator_var, - m.disjunct[1].indicator_var, - lb=True) - cons7ub = cons7[0,'ub'] - self.check_xor_relaxation( - cons7ub, - m.disjunct[1].innerdisjunct[0].indicator_var, - m.disjunct[1].innerdisjunct[1].indicator_var, - m.disjunct[1].indicator_var, - lb=False) - - cons8 = m.disjunct[1].transformation_block().component( - "disjunct[1].c")['ub'] - self.assertIsNone(cons8.lower) - self.assertEqual(cons8.upper, 2) - self.check_bigM_constraint(cons8, m.a, 21, m.disjunct[1].indicator_var) + # now we check that the xor constraint from disjunct[1].innerdisjunction + # is correct. + self.check_inner_xor_constraint( + m.disjunct[1].innerdisjunction[0], + m.disjunct[1], + [m.disjunct[1].innerdisjunct[0], m.disjunct[1].innerdisjunct[1]], + ) + + cons8 = bigm.get_transformed_constraints(m.disjunct[1].c) + self.assertEqual(len(cons8), 1) + cons8ub = cons8[0] + self.assertIsNone(cons8ub.lower) + self.assertEqual(cons8ub.upper, 2) + self.check_bigM_constraint(cons8ub, m.a, 21, m.disjunct[1].indicator_var) def test_unique_reference_to_nested_indicator_var(self): ct.check_unique_reference_to_nested_indicator_var(self, 'bigm') @@ -2039,11 +2074,13 @@ def test_indexed_nested_disjunction(self): # the second time. m = ConcreteModel() m.d1 = Disjunct() - m.d1.indexedDisjunct1 = Disjunct([0,1]) - m.d1.indexedDisjunct2 = Disjunct([0,1]) - @m.d1.Disjunction([0,1]) + m.d1.indexedDisjunct1 = Disjunct([0, 1]) + m.d1.indexedDisjunct2 = Disjunct([0, 1]) + + @m.d1.Disjunction([0, 1]) def innerIndexed(d, i): return [d.indexedDisjunct1[i], d.indexedDisjunct2[i]] + m.d2 = Disjunct() m.outer = Disjunction(expr=[m.d1, m.d2]) @@ -2051,16 +2088,19 @@ def innerIndexed(d, i): # we check that they all ended up on the same Block in the end (I don't # really care in what order for this test) - disjuncts = [m.d1, m.d2, m.d1.indexedDisjunct1[0], - m.d1.indexedDisjunct1[1], m.d1.indexedDisjunct2[0], - m.d1.indexedDisjunct2[1]] + disjuncts = [ + m.d1, + m.d2, + m.d1.indexedDisjunct1[0], + m.d1.indexedDisjunct1[1], + m.d1.indexedDisjunct2[0], + m.d1.indexedDisjunct2[1], + ] for disjunct in disjuncts: - self.assertIs(disjunct.transformation_block().parent_component(), - m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts) - - # and we check that nothing remains on original transformation block - self.assertEqual(len(m.d1._pyomo_gdp_bigm_reformulation.\ - relaxedDisjuncts), 0) + self.assertIs( + disjunct.transformation_block.parent_component(), + m._pyomo_gdp_bigm_reformulation.relaxedDisjuncts, + ) def check_first_disjunct_constraint(self, disj1c, x, ind_var): self.assertEqual(len(disj1c), 1) @@ -2085,63 +2125,71 @@ def check_second_disjunct_constraint(self, disj2c, x, ind_var): self.assertTrue(repn.is_quadratic()) self.assertEqual(len(repn.linear_vars), 5) self.assertEqual(len(repn.quadratic_vars), 4) - self.assertEqual(repn.constant, -63) # M = 99, so this is 36 - 99 + self.assertEqual(repn.constant, -63) # M = 99, so this is 36 - 99 ct.check_linear_coef(self, repn, ind_var, 99) for i in range(1, 5): ct.check_squared_term_coef(self, repn, x[i], 1) ct.check_linear_coef(self, repn, x[i], -6) def check_hierarchical_nested_model(self, m, bigm): - outer_xor = m.disjunction_block.disjunction.algebraic_constraint() - ct.check_two_term_disjunction_xor(self, outer_xor, m.disj1, - m.disjunct_block.disj2) - - inner_xor = m.disjunct_block.disj2.disjunction.algebraic_constraint() - xformed = bigm.get_transformed_constraints(inner_xor) - self.assertEqual(len(xformed), 2) - leq = xformed[0] - self.assertIsNone(leq.upper) - self.assertEqual(leq.lower, 1) - repn = generate_standard_repn(leq.body) + outer_xor = m.disjunction_block.disjunction.algebraic_constraint + ct.check_two_term_disjunction_xor( + self, outer_xor, m.disj1, m.disjunct_block.disj2 + ) + + inner_xor = m.disjunct_block.disj2.disjunction.algebraic_constraint + self.assertEqual(inner_xor.lower, 0) + self.assertEqual(inner_xor.upper, 0) + repn = generate_standard_repn(inner_xor.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 3) - self.assertEqual(repn.constant, 1) - ct.check_linear_coef(self, repn, - m.disjunct_block.disj2.disjunction_disjuncts[0].\ - binary_indicator_var, 1) - ct.check_linear_coef(self, repn, - m.disjunct_block.disj2.disjunction_disjuncts[1].\ - binary_indicator_var, 1) - ct.check_linear_coef(self, repn, - m.disjunct_block.disj2.binary_indicator_var, -1) + self.assertEqual(repn.constant, 0) + ct.check_linear_coef( + self, + repn, + m.disjunct_block.disj2.disjunction_disjuncts[0].binary_indicator_var, + 1, + ) + ct.check_linear_coef( + self, + repn, + m.disjunct_block.disj2.disjunction_disjuncts[1].binary_indicator_var, + 1, + ) + ct.check_linear_coef( + self, repn, m.disjunct_block.disj2.binary_indicator_var, -1 + ) # outer disjunction constraints disj1c = bigm.get_transformed_constraints(m.disj1.c) - self.check_first_disjunct_constraint(disj1c, m.x, - m.disj1.binary_indicator_var) + self.check_first_disjunct_constraint(disj1c, m.x, m.disj1.binary_indicator_var) disj2c = bigm.get_transformed_constraints(m.disjunct_block.disj2.c) self.check_second_disjunct_constraint( - disj2c, m.x, - m.disjunct_block.disj2.binary_indicator_var) + disj2c, m.x, m.disjunct_block.disj2.binary_indicator_var + ) # inner disjunction constraints innerd1c = bigm.get_transformed_constraints( - m.disjunct_block.disj2.disjunction_disjuncts[0].constraint[1]) + m.disjunct_block.disj2.disjunction_disjuncts[0].constraint[1] + ) self.check_first_disjunct_constraint( - innerd1c, m.x, - m.disjunct_block.disj2.disjunction_disjuncts[0].\ - binary_indicator_var) + innerd1c, + m.x, + m.disjunct_block.disj2.disjunction_disjuncts[0].binary_indicator_var, + ) innerd2c = bigm.get_transformed_constraints( - m.disjunct_block.disj2.disjunction_disjuncts[1].constraint[1]) + m.disjunct_block.disj2.disjunction_disjuncts[1].constraint[1] + ) self.check_second_disjunct_constraint( - innerd2c, m.x, - m.disjunct_block.disj2.disjunction_disjuncts[1].\ - binary_indicator_var) + innerd2c, + m.x, + m.disjunct_block.disj2.disjunction_disjuncts[1].binary_indicator_var, + ) def test_hierarchical_badly_ordered_targets(self): - m = models.makeHierarchicalNested_DeclOrderMatchesInstantationOrder() + m = models.makeHierarchicalNested_DeclOrderMatchesInstantiationOrder() bigm = TransformationFactory('gdp.bigm') bigm.apply_to(m, targets=[m.disjunction_block, m.disjunct_block.disj2]) @@ -2153,7 +2201,7 @@ def test_hierarchical_badly_ordered_targets(self): def test_decl_order_opposite_instantiation_order(self): # In this test, we create the same problem as above, but we don't even # need targets! - m = models.makeHierarchicalNested_DeclOrderOppositeInstantationOrder() + m = models.makeHierarchicalNested_DeclOrderOppositeInstantiationOrder() bigm = TransformationFactory('gdp.bigm') bigm.apply_to(m) @@ -2161,6 +2209,7 @@ def test_decl_order_opposite_instantiation_order(self): # the same check to make sure everything is transformed correctly. self.check_hierarchical_nested_model(m, bigm) + class IndexedDisjunction(unittest.TestCase): # this tests that if the targets are a subset of the # _DisjunctDatas in an IndexedDisjunction that the xor constraint @@ -2171,6 +2220,7 @@ def test_xor_constraint(self): def test_partial_deactivate_indexed_disjunction(self): ct.check_partial_deactivate_indexed_disjunction(self, 'bigm') + class BlocksOnDisjuncts(unittest.TestCase): # ESJ: All of these tests are specific to bigm because they check how much # stuff is on the transformation blocks. @@ -2180,79 +2230,80 @@ def setUp(self): def test_transformed_constraint_nameConflicts(self): m = models.makeTwoTermDisj_BlockOnDisj() - TransformationFactory('gdp.bigm').apply_to(m) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) transBlock = m._pyomo_gdp_bigm_reformulation disjBlock = transBlock.relaxedDisjuncts self.assertIsInstance(disjBlock, Block) self.assertEqual(len(disjBlock), 2) - self.assertEqual(len(disjBlock[0].component_map()), 2) - self.assertEqual(len(disjBlock[1].component_map()), 5) - self.assertIsInstance(disjBlock[0].component("evil[0].c"), Constraint) - self.assertIsInstance(disjBlock[1].component("evil[1].'b.c'"), - Constraint) - self.assertIsInstance(disjBlock[1].component("evil[1].bb[1].c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("evil[1].'b.c'"), Constraint) - self.assertIsInstance( - disjBlock[1].component("evil[1].b.anotherblock.c"), - Constraint) - self.assertIsInstance(disjBlock[0].component("localVarReferences"), - Block) - self.assertIsInstance(disjBlock[1].component("localVarReferences"), - Block) - - def test_do_not_transform_deactivated_constraint(self): - m = models.makeTwoTermDisj_BlockOnDisj() - m.evil[1].b.anotherblock.c.deactivate() - - TransformationFactory('gdp.bigm').apply_to(m) + evil0 = bigm.get_transformed_constraints(m.evil[0].c) + self.assertEqual(len(evil0), 1) + self.assertIs(evil0[0].parent_block(), disjBlock[0]) + + evil1 = bigm.get_transformed_constraints(m.evil[1].component('b.c')) + self.assertEqual(len(evil1), 1) + self.assertIs(evil1[0].parent_block(), disjBlock[1]) + evil1 = bigm.get_transformed_constraints(m.evil[1].b.c) + self.assertEqual(len(evil1), 2) + self.assertIs(evil1[0].parent_block(), disjBlock[1]) + self.assertIs(evil1[1].parent_block(), disjBlock[1]) + evil1 = bigm.get_transformed_constraints(m.evil[1].b.anotherblock.c) + self.assertEqual(len(evil1), 1) + self.assertIs(evil1[0].parent_block(), disjBlock[1]) + evil1 = bigm.get_transformed_constraints(m.evil[1].bb[1].c) + self.assertEqual(len(evil1), 2) + self.assertIs(evil1[0].parent_block(), disjBlock[1]) + self.assertIs(evil1[1].parent_block(), disjBlock[1]) + + def check_all_but_evil1_b_anotherblock_constraint_transformed(self, m): + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) transBlock = m._pyomo_gdp_bigm_reformulation disjBlock = transBlock.relaxedDisjuncts self.assertIsInstance(disjBlock, Block) self.assertEqual(len(disjBlock), 2) - self.assertEqual(len(disjBlock[0].component_map()), 2) - self.assertEqual(len(disjBlock[1].component_map()), 4) - self.assertIsInstance(disjBlock[0].component("evil[0].c"), Constraint) - self.assertIsInstance(disjBlock[1].component("evil[1].'b.c'"), - Constraint) - self.assertIsInstance(disjBlock[1].component("evil[1].bb[1].c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("evil[1].'b.c'"), Constraint) - self.assertIsInstance(disjBlock[0].component("localVarReferences"), - Block) - self.assertIsInstance(disjBlock[1].component("localVarReferences"), - Block) + evil0 = bigm.get_transformed_constraints(m.evil[0].c) + self.assertEqual(len(evil0), 1) + self.assertIs(evil0[0].parent_block(), disjBlock[0]) + + evil1 = bigm.get_transformed_constraints(m.evil[1].component('b.c')) + self.assertEqual(len(evil1), 1) + self.assertIs(evil1[0].parent_block(), disjBlock[1]) + evil1 = bigm.get_transformed_constraints(m.evil[1].b.c) + self.assertEqual(len(evil1), 2) + self.assertIs(evil1[0].parent_block(), disjBlock[1]) + self.assertIs(evil1[1].parent_block(), disjBlock[1]) + out = StringIO() + with LoggingIntercept(out, 'pyomo.gdp', logging.ERROR): + self.assertRaisesRegex( + KeyError, + r".*.evil\[1\].b.anotherblock.c", + bigm.get_transformed_constraints, + m.evil[1].b.anotherblock.c, + ) + self.assertRegex( + out.getvalue(), + r".*Constraint 'evil\[1\].b.anotherblock.c' has not been transformed.", + ) + evil1 = bigm.get_transformed_constraints(m.evil[1].bb[1].c) + self.assertEqual(len(evil1), 2) + self.assertIs(evil1[0].parent_block(), disjBlock[1]) + self.assertIs(evil1[1].parent_block(), disjBlock[1]) + + def test_do_not_transform_deactivated_constraint(self): + m = models.makeTwoTermDisj_BlockOnDisj() + m.evil[1].b.anotherblock.c.deactivate() + self.check_all_but_evil1_b_anotherblock_constraint_transformed(m) def test_do_not_transform_deactivated_block(self): m = models.makeTwoTermDisj_BlockOnDisj() m.evil[1].b.anotherblock.deactivate() - TransformationFactory('gdp.bigm').apply_to(m) - - transBlock = m._pyomo_gdp_bigm_reformulation - disjBlock = transBlock.relaxedDisjuncts - - self.assertIsInstance(disjBlock, Block) - self.assertEqual(len(disjBlock), 2) - self.assertEqual(len(disjBlock[0].component_map()), 2) - self.assertEqual(len(disjBlock[1].component_map()), 4) - self.assertIsInstance(disjBlock[0].component("evil[0].c"), Constraint) - self.assertIsInstance(disjBlock[1].component("evil[1].'b.c'"), - Constraint) - self.assertIsInstance(disjBlock[1].component("evil[1].bb[1].c"), - Constraint) - self.assertIsInstance( - disjBlock[1].component("evil[1].'b.c'"), Constraint) - self.assertIsInstance(disjBlock[0].component("localVarReferences"), - Block) - self.assertIsInstance(disjBlock[1].component("localVarReferences"), - Block) + self.check_all_but_evil1_b_anotherblock_constraint_transformed(m) def test_pick_up_bigm_suffix_on_block(self): m = models.makeTwoTermDisj_BlockOnDisj() @@ -2264,7 +2315,6 @@ def test_pick_up_bigm_suffix_on_block(self): # check that the m value got used cons_list = bigm.get_transformed_constraints(m.evil[1].b.c) ub = cons_list[1] - self.assertEqual(ub.index(), 'ub') self.assertEqual(ub.upper, 0) self.assertIsNone(ub.lower) repn = generate_standard_repn(ub.body) @@ -2283,7 +2333,7 @@ def test_use_correct_none_suffix(self): m.b.d = Disjunct() m.b.d.foo = Block() - m.b.d.c = Constraint(expr=m.x>=9) + m.b.d.c = Constraint(expr=m.x >= 9) m.b.BigM = Suffix() m.b.BigM[None] = 10 @@ -2299,7 +2349,6 @@ def test_use_correct_none_suffix(self): # we should have picked up 10 for m.b.d.c cons_list = bigm.get_transformed_constraints(m.b.d.c) lb = cons_list[0] - self.assertEqual(lb.index(), 'lb') self.assertEqual(lb.lower, 9) self.assertIsNone(lb.upper) repn = generate_standard_repn(lb.body) @@ -2311,6 +2360,7 @@ def test_use_correct_none_suffix(self): self.assertIs(repn.linear_vars[1], m.b.d.binary_indicator_var) self.assertEqual(repn.linear_coefs[1], -10) + class UntransformableObjectsOnDisjunct(unittest.TestCase): def test_RangeSet(self): ct.check_RangeSet(self, 'bigm') @@ -2318,6 +2368,7 @@ def test_RangeSet(self): def test_Expression(self): ct.check_Expression(self, 'bigm') + class TransformABlock(unittest.TestCase): def test_transformation_simple_block(self): ct.check_transformation_simple_block(self, 'bigm') @@ -2334,6 +2385,7 @@ def test_block_data_target(self): def test_indexed_block_target(self): ct.check_indexed_block_target(self, 'bigm') + class IndexedDisjunctions(unittest.TestCase): def setUp(self): # set seed so we can test name collisions predictably @@ -2343,7 +2395,7 @@ def test_disjunction_data_target(self): ct.check_disjunction_data_target(self, 'bigm') def test_disjunction_data_target_any_index(self): - ct.check_disjunction_data_target_any_index(self, 'bigm') + ct.check_disjunction_data_target_any_index(self, 'bigm') # ESJ: This and the following tests are *very* similar to those in hull, # but I actually bothered to check the additional transformed objects in @@ -2355,90 +2407,88 @@ def check_trans_block_disjunctions_of_disjunct_datas(self, m): self.assertIsInstance(transBlock1.component("relaxedDisjuncts"), Block) # We end up with a transformation block for every ScalarDisjunction or # IndexedDisjunction. - self.assertEqual(len(transBlock1.relaxedDisjuncts), 2) - self.assertIsInstance(transBlock1.relaxedDisjuncts[0].component( - "firstTerm[1].cons"), Constraint) - self.assertEqual(len(transBlock1.relaxedDisjuncts[0].component( - "firstTerm[1].cons")), 2) - self.assertIsInstance(transBlock1.relaxedDisjuncts[1].component( - "secondTerm[1].cons"), Constraint) - self.assertEqual(len(transBlock1.relaxedDisjuncts[1].component( - "secondTerm[1].cons")), 1) - transBlock2 = m.component("_pyomo_gdp_bigm_reformulation_4") - self.assertIsInstance(transBlock2, Block) - self.assertIsInstance(transBlock2.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock2.relaxedDisjuncts), 2) - self.assertIsInstance(transBlock2.relaxedDisjuncts[0].component( - "firstTerm[2].cons"), Constraint) - self.assertEqual(len(transBlock2.relaxedDisjuncts[0].component( - "firstTerm[2].cons")), 2) - self.assertIsInstance(transBlock2.relaxedDisjuncts[1].component( - "secondTerm[2].cons"), Constraint) - self.assertEqual(len(transBlock2.relaxedDisjuncts[1].component( - "secondTerm[2].cons")), 1) + bigm = TransformationFactory('gdp.bigm') + self.assertEqual(len(transBlock1.relaxedDisjuncts), 4) + firstTerm1 = bigm.get_transformed_constraints(m.firstTerm[1].cons) + self.assertIs(firstTerm1[0].parent_block(), transBlock1.relaxedDisjuncts[0]) + self.assertEqual(len(firstTerm1), 2) + secondTerm1 = bigm.get_transformed_constraints(m.secondTerm[1].cons) + self.assertIs(secondTerm1[0].parent_block(), transBlock1.relaxedDisjuncts[1]) + self.assertEqual(len(secondTerm1), 1) + firstTerm2 = bigm.get_transformed_constraints(m.firstTerm[2].cons) + self.assertIs(firstTerm2[0].parent_block(), transBlock1.relaxedDisjuncts[2]) + self.assertEqual(len(firstTerm2), 2) + secondTerm2 = bigm.get_transformed_constraints(m.secondTerm[2].cons) + self.assertIs(secondTerm2[0].parent_block(), transBlock1.relaxedDisjuncts[3]) + self.assertEqual(len(secondTerm2), 1) def test_simple_disjunction_of_disjunct_datas(self): ct.check_simple_disjunction_of_disjunct_datas(self, 'bigm') def test_any_indexed_disjunction_of_disjunct_datas(self): m = models.makeAnyIndexedDisjunctionOfDisjunctDatas() - TransformationFactory('gdp.bigm').apply_to(m) + bigm = TransformationFactory('gdp.bigm') + bigm.apply_to(m) transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 4) - self.assertIsInstance(transBlock.relaxedDisjuncts[0].component( - "firstTerm[1].cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[0].component( - "firstTerm[1].cons")), 2) - self.assertIsInstance(transBlock.relaxedDisjuncts[1].component( - "secondTerm[1].cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[1].component( - "secondTerm[1].cons")), 1) - self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( - "firstTerm[2].cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( - "firstTerm[2].cons")), 2) - self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( - "secondTerm[2].cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( - "secondTerm[2].cons")), 1) - self.assertIsInstance( transBlock.component("disjunction_xor"), - Constraint) - self.assertEqual( len(transBlock.component("disjunction_xor")), 2) + firstTerm1 = bigm.get_transformed_constraints(m.firstTerm[1].cons) + self.assertIs(firstTerm1[0].parent_block(), transBlock.relaxedDisjuncts[0]) + self.assertEqual(len(firstTerm1), 2) + secondTerm1 = bigm.get_transformed_constraints(m.secondTerm[1].cons) + self.assertIs(secondTerm1[0].parent_block(), transBlock.relaxedDisjuncts[1]) + self.assertEqual(len(secondTerm1), 1) + firstTerm2 = bigm.get_transformed_constraints(m.firstTerm[2].cons) + self.assertIs(firstTerm2[0].parent_block(), transBlock.relaxedDisjuncts[2]) + self.assertEqual(len(firstTerm1), 2) + secondTerm2 = bigm.get_transformed_constraints(m.secondTerm[2].cons) + self.assertIs(secondTerm2[0].parent_block(), transBlock.relaxedDisjuncts[3]) + self.assertEqual(len(secondTerm1), 1) + + self.assertIsInstance( + m.disjunction[1].algebraic_constraint.parent_component(), Constraint + ) + self.assertIsInstance( + m.disjunction[2].algebraic_constraint.parent_component(), Constraint + ) def check_first_iteration(self, model): transBlock = model.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) - self.assertIsInstance( - transBlock.component("disjunctionList_xor"), - Constraint) - self.assertEqual( - len(transBlock.disjunctionList_xor), 1) + self.assertIsInstance(transBlock.component("disjunctionList_xor"), Constraint) + self.assertEqual(len(transBlock.disjunctionList_xor), 1) self.assertFalse(model.disjunctionList[0].active) def check_second_iteration(self, model): - transBlock = model.component("_pyomo_gdp_bigm_reformulation") + transBlock = model.component("_pyomo_gdp_bigm_reformulation_4") self.assertIsInstance(transBlock, Block) self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock.relaxedDisjuncts), 4) + self.assertEqual(len(transBlock.relaxedDisjuncts), 2) + bigm = TransformationFactory('gdp.bigm') + if model.component('firstTerm') is None: - firstTerm = "'firstTerm[1]'.cons" - secondTerm = "'secondTerm[1]'.cons" + firstTerm1 = model.component('firstTerm[1]') + secondTerm1 = model.component('secondTerm[1]') else: - firstTerm = "firstTerm[1].cons" - secondTerm = "secondTerm[1].cons" - self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( - firstTerm), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( - firstTerm)), 2) - self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( - secondTerm), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( - secondTerm)), 1) - self.assertEqual( - len(model._pyomo_gdp_bigm_reformulation.disjunctionList_xor), 2) + firstTerm1 = model.firstTerm[1] + secondTerm1 = model.secondTerm[1] + + firstTerm = bigm.get_transformed_constraints(firstTerm1.cons) + self.assertIs(firstTerm[0].parent_block(), transBlock.relaxedDisjuncts[0]) + self.assertEqual(len(firstTerm), 2) + + secondTerm = bigm.get_transformed_constraints(secondTerm1.cons) + self.assertIs(secondTerm[0].parent_block(), transBlock.relaxedDisjuncts[1]) + self.assertEqual(len(secondTerm), 1) + + self.assertIsInstance( + model.disjunctionList[1].algebraic_constraint.parent_component(), Constraint + ) + self.assertIsInstance( + model.disjunctionList[0].algebraic_constraint.parent_component(), Constraint + ) self.assertFalse(model.disjunctionList[1].active) self.assertFalse(model.disjunctionList[0].active) @@ -2446,61 +2496,62 @@ def test_disjunction_and_disjuncts_indexed_by_any(self): ct.check_disjunction_and_disjuncts_indexed_by_any(self, 'bigm') def test_iteratively_adding_disjunctions_transform_container(self): - ct.check_iteratively_adding_disjunctions_transform_container(self, - 'bigm') + ct.check_iteratively_adding_disjunctions_transform_container(self, 'bigm') def test_iteratively_adding_disjunctions_transform_model(self): ct.check_iteratively_adding_disjunctions_transform_model(self, 'bigm') def test_iteratively_adding_to_indexed_disjunction_on_block(self): - ct.check_iteratively_adding_to_indexed_disjunction_on_block(self, - 'bigm') + ct.check_iteratively_adding_to_indexed_disjunction_on_block(self, 'bigm') + class TestErrors(unittest.TestCase): def test_transform_empty_disjunction(self): ct.check_transform_empty_disjunction(self, 'bigm') def test_deactivated_disjunct_nonzero_indicator_var(self): - ct.check_deactivated_disjunct_nonzero_indicator_var(self, - 'bigm') + ct.check_deactivated_disjunct_nonzero_indicator_var(self, 'bigm') def test_deactivated_disjunct_unfixed_indicator_var(self): ct.check_deactivated_disjunct_unfixed_indicator_var(self, 'bigm') def test_infeasible_xor_because_all_disjuncts_deactivated(self): - m = ct.setup_infeasible_xor_because_all_disjuncts_deactivated(self, - 'bigm') + m = ct.setup_infeasible_xor_because_all_disjuncts_deactivated(self, 'bigm') + bigm = TransformationFactory('gdp.bigm') transBlock = m.component("_pyomo_gdp_bigm_reformulation") self.assertIsInstance(transBlock, Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 2) - self.assertIsInstance(transBlock.component("disjunction_xor"), - Constraint) + self.assertIsInstance(transBlock.component("disjunction_xor"), Constraint) disjunct1 = transBlock.relaxedDisjuncts[0] - # longest constraint name EVER... - relaxed_xor = disjunct1.component( - "disjunction_disjuncts[0]._pyomo_gdp_bigm_reformulation." - "'disjunction_disjuncts[0].nestedDisjunction_xor'") - self.assertIsInstance(relaxed_xor, Constraint) - repn = generate_standard_repn(relaxed_xor['lb'].body) - self.assertEqual(relaxed_xor['lb'].lower, 1) - self.assertIsNone(relaxed_xor['lb'].upper) + relaxed_xor = bigm.get_transformed_constraints( + m.disjunction_disjuncts[0].nestedDisjunction.algebraic_constraint + ) + # It was an equality + self.assertEqual(len(relaxed_xor), 2) + self.assertIsInstance(relaxed_xor[0].parent_component(), Constraint) + relaxed_xor_lb = relaxed_xor[0] + relaxed_xor_ub = relaxed_xor[1] + repn = generate_standard_repn(relaxed_xor_lb.body) + self.assertEqual(relaxed_xor_lb.lower, 1) + self.assertIsNone(relaxed_xor_lb.upper) # the other variables got eaten in the constant because they are fixed. self.assertEqual(len(repn.linear_vars), 1) - ct.check_linear_coef( self, repn, - m.disjunction.disjuncts[0].indicator_var, -1) + ct.check_linear_coef(self, repn, m.disjunction.disjuncts[0].indicator_var, -1) self.assertEqual(repn.constant, 1) - repn = generate_standard_repn(relaxed_xor['ub'].body) - self.assertIsNone(relaxed_xor['ub'].lower) - self.assertEqual(value(relaxed_xor['ub'].upper), 1) + repn = generate_standard_repn(relaxed_xor_ub.body) + self.assertIsNone(relaxed_xor_ub.lower) + self.assertEqual(value(relaxed_xor_ub.upper), 1) self.assertEqual(len(repn.linear_vars), 1) - ct.check_linear_coef( self, repn, - m.disjunction.disjuncts[0].indicator_var, 1) + ct.check_linear_coef(self, repn, m.disjunction.disjuncts[0].indicator_var, 1) # and last check that the other constraints here look fine - x0 = disjunct1.component("disjunction_disjuncts[0].constraint") - self.assertIsInstance(x0, Constraint) - lb = x0[(1, 'lb')] + x0 = bigm.get_transformed_constraints(m.disjunction_disjuncts[0].constraint[1]) + self.assertEqual(len(x0), 2) + lb = x0[0] + ub = x0[1] + self.assertIsInstance(lb.parent_component(), Constraint) + # lb = x0[(1, 'lb')] self.assertEqual(value(lb.lower), 0) self.assertIsNone(lb.upper) repn = generate_standard_repn(lb.body) @@ -2508,22 +2559,22 @@ def test_infeasible_xor_because_all_disjuncts_deactivated(self): self.assertEqual(len(repn.linear_vars), 1) ct.check_linear_coef(self, repn, m.x, 1) - ub = x0[(1, 'ub')] + self.assertIsInstance(ub.parent_component(), Constraint) self.assertIsNone(ub.lower) self.assertEqual(value(ub.upper), 0) repn = generate_standard_repn(ub.body) self.assertEqual(repn.constant, -8) self.assertEqual(len(repn.linear_vars), 2) ct.check_linear_coef(self, repn, m.x, 1) - ct.check_linear_coef(self, repn, - m.disjunction_disjuncts[0].indicator_var, 8) + ct.check_linear_coef(self, repn, m.disjunction_disjuncts[0].indicator_var, 8) def test_retrieving_nondisjunctive_components(self): ct.check_retrieving_nondisjunctive_components(self, 'bigm') def test_ask_for_transformed_constraint_from_untransformed_disjunct(self): ct.check_ask_for_transformed_constraint_from_untransformed_disjunct( - self, 'bigm') + self, 'bigm' + ) def test_silly_target(self): ct.check_silly_target(self, 'bigm') @@ -2531,6 +2582,7 @@ def test_silly_target(self): def test_untransformed_arcs(self): ct.check_untransformed_network_raises_GDPError(self, 'bigm') + class EstimatingMwithFixedVars(unittest.TestCase): def test_tighter_Ms_when_vars_fixed_forever(self): m = ConcreteModel() @@ -2571,8 +2623,8 @@ def test_tighter_Ms_when_vars_fixed_forever(self): ct.check_linear_coef(self, repn, promise.x, 1) ct.check_linear_coef(self, repn, promise.d.indicator_var, 7) -class NetworkDisjuncts(unittest.TestCase, CommonTests): +class NetworkDisjuncts(unittest.TestCase, CommonTests): @unittest.skipIf(not ct.linear_solvers, "No linear solver available") def test_solution_maximize(self): ct.check_network_disjuncts(self, minimize=False, transformation='bigm') @@ -2581,7 +2633,7 @@ def test_solution_maximize(self): def test_solution_minimize(self): ct.check_network_disjuncts(self, minimize=True, transformation='bigm') -@unittest.skipUnless(sympy_available, "Sympy not available") + class LogicalConstraintsOnDisjuncts(unittest.TestCase): def test_logical_constraints_transformed(self): m = models.makeLogicalConstraintsOnDisjuncts() @@ -2595,56 +2647,178 @@ def test_logical_constraints_transformed(self): # first d[1]: cons = bigm.get_transformed_constraints( - m.d[1].logic_to_linear.transformed_constraints[1]) + m.d[1]._logical_to_disjunctive.transformed_constraints[1] + ) + # big-M transformation of z = 1 - y1: + # z <= 1 - y1 + (1 - d[1].indicator_var) + # z >= 1 - y1 - (1 - d[1].indicator_var) + z = m.d[1]._logical_to_disjunctive.auxiliary_vars[1] self.assertEqual(len(cons), 2) leq = cons[0] - self.assertEqual(leq.lower, 1) + self.assertEqual(leq.lower, 0) self.assertIsNone(leq.upper) repn = generate_standard_repn(leq.body) self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 2) - self.assertEqual(len(repn.linear_vars), 2) - ct.check_linear_coef(self, repn, y1, -1) - ct.check_linear_coef(self, repn, m.d[1].binary_indicator_var, -1) - # this is a stupid constraint + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, z + y1 - m.d[1].binary_indicator_var + ) geq = cons[1] + self.assertEqual(geq.upper, 0) self.assertIsNone(geq.lower) - self.assertEqual(geq.upper, 1) repn = generate_standard_repn(geq.body) self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 1) - self.assertEqual(len(repn.linear_vars), 1) - ct.check_linear_coef(self, repn, y1, -1) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, z + y1 + m.d[1].binary_indicator_var - 2 + ) # then d[4]: - # 1 <= 1 - Y[2] + Y[1] + z1 = m.d[4]._logical_to_disjunctive.auxiliary_vars[1] + z2 = m.d[4]._logical_to_disjunctive.auxiliary_vars[2] + z3 = m.d[4]._logical_to_disjunctive.auxiliary_vars[3] # fixed True cons = bigm.get_transformed_constraints( - m.d[4].logic_to_linear.transformed_constraints[1]) + m.d[4]._logical_to_disjunctive.transformed_constraints[1] + ) self.assertEqual(len(cons), 1) - leq = cons[0] - self.assertEqual(leq.lower, 1) - self.assertIsNone(leq.upper) - repn = generate_standard_repn(leq.body) + c = cons[0] + # (1 - z1) + (1 - y1) + y2 >= 1 - (1 - d4.ind_var) + self.assertIsNone(c.upper) + self.assertEqual(c.lower, 1) + repn = generate_standard_repn(c.body) self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 2) - self.assertEqual(len(repn.linear_vars), 3) - ct.check_linear_coef(self, repn, y1, 1) - ct.check_linear_coef(self, repn, y2, -1) - ct.check_linear_coef(self, repn, m.d[4].binary_indicator_var, -1) - # 1 <= 1 - Y[1] + Y[2] + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, -z1 - y1 + y2 - m.d[4].binary_indicator_var + 3 + ) cons = bigm.get_transformed_constraints( - m.d[4].logic_to_linear.transformed_constraints[2]) + m.d[4]._logical_to_disjunctive.transformed_constraints[2] + ) self.assertEqual(len(cons), 1) - leq = cons[0] - self.assertEqual(leq.lower, 1) - self.assertIsNone(leq.upper) - repn = generate_standard_repn(leq.body) + c = cons[0] + # z1 + 1 - (1 - y1) >= 1 - (1 - d4.ind_var) + self.assertIsNone(c.upper) + self.assertEqual(c.lower, 1) + repn = generate_standard_repn(c.body) self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 2) - self.assertEqual(len(repn.linear_vars), 3) - ct.check_linear_coef(self, repn, y2, 1) - ct.check_linear_coef(self, repn, y1, -1) - ct.check_linear_coef(self, repn, m.d[4].binary_indicator_var, -1) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, y1 + z1 - m.d[4].binary_indicator_var + 1 + ) + cons = bigm.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[3] + ) + self.assertEqual(len(cons), 1) + c = cons[0] + # z1 + (1 - y2) >= 1 - (1 - d4.ind_var) + self.assertIsNone(c.upper) + self.assertEqual(c.lower, 1) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, -y2 + z1 - m.d[4].binary_indicator_var + 2 + ) + cons = bigm.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[4] + ) + self.assertEqual(len(cons), 1) + c = cons[0] + # (1 - z2) + y1 + (1 - y2) >= 1 - (1 - d4.ind_var) + self.assertIsNone(c.upper) + self.assertEqual(c.lower, 1) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, -z2 - y2 + y1 - m.d[4].binary_indicator_var + 3 + ) + cons = bigm.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[5] + ) + self.assertEqual(len(cons), 1) + c = cons[0] + # z2 + (1 - y1) >= 1 - (1 - d4.ind_var) + self.assertIsNone(c.upper) + self.assertEqual(c.lower, 1) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, -y1 + z2 - m.d[4].binary_indicator_var + 2 + ) + cons = bigm.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[6] + ) + self.assertEqual(len(cons), 1) + c = cons[0] + # z2 + 1 - (1 - y2) >= 1 - (1 - d4.ind_var) + self.assertIsNone(c.upper) + self.assertEqual(c.lower, 1) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, y2 + z2 - m.d[4].binary_indicator_var + 1 + ) + cons = bigm.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[7] + ) + self.assertEqual(len(cons), 1) + c = cons[0] + # z3 <= z1 + (1 - d4.ind_var) + self.assertIsNone(c.lower) + self.assertEqual(c.upper, 0) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, z3 - z1 + m.d[4].binary_indicator_var - 1 + ) + cons = bigm.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[8] + ) + self.assertEqual(len(cons), 1) + c = cons[0] + # z3 <= z2 + (1 - d4.ind_var) + self.assertIsNone(c.lower) + self.assertEqual(c.upper, 0) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, z3 - z2 + m.d[4].binary_indicator_var - 1 + ) # check that the global logical constraints were also transformed. self.assertFalse(m.p.active) @@ -2662,5 +2836,13 @@ def test_boolean_vars_on_disjunct(self): m = models.makeBooleanVarsOnDisjuncts() ct.check_solution_obeys_logical_constraints(self, 'bigm', m) + def test_pickle(self): + ct.check_transformed_model_pickles(self, 'bigm') + + @unittest.skipIf(not dill_available, "Dill is not available") + def test_dill_pickle(self): + ct.check_transformed_model_pickles_with_dill(self, 'bigm') + + if __name__ == '__main__': unittest.main() diff --git a/pyomo/gdp/tests/test_bound_pretransformation.py b/pyomo/gdp/tests/test_bound_pretransformation.py new file mode 100644 index 00000000000..154914b680d --- /dev/null +++ b/pyomo/gdp/tests/test_bound_pretransformation.py @@ -0,0 +1,918 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from io import StringIO +import logging +import pyomo.common.unittest as unittest +from pyomo.core.expr.compare import assertExpressionsEqual +from pyomo.environ import ( + Any, + Block, + ConcreteModel, + Constraint, + TransformationFactory, + Var, +) +from pyomo.gdp import Disjunct, Disjunction +from pyomo.common.log import LoggingIntercept + + +class TestBoundPretransformation(unittest.TestCase): + def create_nested_structure(self): + """ + Creates a two-term Disjunction with on nested two-term Disjunction on + the first Disjunct + """ + m = ConcreteModel() + m.outer_d1 = Disjunct() + m.outer_d1.inner_d1 = Disjunct() + m.outer_d1.inner_d2 = Disjunct() + m.outer_d1.inner = Disjunction(expr=[m.outer_d1.inner_d1, m.outer_d1.inner_d2]) + m.outer_d2 = Disjunct() + m.outer = Disjunction(expr=[m.outer_d1, m.outer_d2]) + return m + + def create_nested_model(self): + """ + -100 <= x <= 102 + [-10 <= x <= 11, [x <= 3] v [x >= -17]] v [x == 0] + """ + m = self.create_nested_structure() + m.x = Var(bounds=(-100, 102)) + m.outer_d1.c = Constraint(expr=(-10, m.x, 11)) + m.outer_d1.inner_d1.c = Constraint(expr=m.x <= 3) + m.outer_d1.inner_d2.c = Constraint(expr=m.x >= -7) + m.outer_d2.c = Constraint(expr=m.x == 0) + + return m + + def check_nested_model_disjunction(self, m, bt): + # We expect: -10w_1 -7w_2 <= x <= 3w_1 + 11w_2 + + cons = bt.get_transformed_constraints(m.x, m.outer) + self.assertEqual(len(cons), 2) + lb = cons[0] + ub = cons[1] + assertExpressionsEqual( + self, + lb.expr, + -10.0 * m.outer_d1.inner_d1.binary_indicator_var + - 7.0 * m.outer_d1.inner_d2.binary_indicator_var + + 0.0 * m.outer_d2.binary_indicator_var + <= m.x, + ) + assertExpressionsEqual( + self, + ub.expr, + 3.0 * m.outer_d1.inner_d1.binary_indicator_var + + 11.0 * m.outer_d1.inner_d2.binary_indicator_var + + 0.0 * m.outer_d2.binary_indicator_var + >= m.x, + ) + + # All the disjunctive constraints were transformed + self.assertFalse(m.outer_d1.c.active) + self.assertFalse(m.outer_d1.inner_d1.c.active) + self.assertFalse(m.outer_d1.inner_d2.c.active) + self.assertFalse(m.outer_d2.c.active) + + def test_transform_nested_model(self): + m = self.create_nested_model() + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + self.check_nested_model_disjunction(m, bt) + + # There aren't any other constraints on the model other than what we + # added + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 2, + ) + + def test_transform_nested_model_no_0_terms(self): + m = self.create_nested_model() + m.outer_d2.c.deactivate() + m.outer_d2.c2 = Constraint(expr=m.x == 101) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + # We expect: -10w_1 -7w_2 + 101 y_2 <= x <= 3w_1 + 11w_2 + 101y_2 + + cons = bt.get_transformed_constraints(m.x, m.outer) + self.assertEqual(len(cons), 2) + lb = cons[0] + ub = cons[1] + assertExpressionsEqual( + self, + lb.expr, + -10.0 * m.outer_d1.inner_d1.binary_indicator_var + - 7.0 * m.outer_d1.inner_d2.binary_indicator_var + + 101.0 * m.outer_d2.binary_indicator_var + <= m.x, + ) + assertExpressionsEqual( + self, + ub.expr, + 3.0 * m.outer_d1.inner_d1.binary_indicator_var + + 11.0 * m.outer_d1.inner_d2.binary_indicator_var + + 101.0 * m.outer_d2.binary_indicator_var + >= m.x, + ) + + # All the disjunctive constraints were transformed + self.assertFalse(m.outer_d1.c.active) + self.assertFalse(m.outer_d1.inner_d1.c.active) + self.assertFalse(m.outer_d1.inner_d2.c.active) + self.assertFalse(m.outer_d2.c2.active) + + # There aren't any other constraints on the model other than what we + # added + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 2, + ) + + def test_transformation_gives_up_without_enough_bound_info(self): + """ + If we have unbounded variables and not enough bounding constraints, + we want the transformation to just leave the bounding constraints + be to be transformed later. + """ + m = self.create_nested_structure() + m.x = Var() + m.y = Var(bounds=(4, 67)) + m.outer_d1.c = Constraint(Any) + m.outer_d1.c[1] = m.x >= 3 + m.outer_d1.c[2] = 5 <= m.y + m.outer_d1.inner_d1.c = Constraint(Any) + m.outer_d1.inner_d1.c[1] = m.x >= 4 + m.outer_d1.inner_d2.c = Constraint(Any) + m.outer_d1.inner_d2.c[1] = m.x >= 17 + m.outer_d2.c = Constraint(Any) + m.outer_d2.c[1] = m.x <= 1 + m.outer_d2.c[2] = m.y <= 66 + m.outer_d2.c[3] = m.x >= 2 + + # y constraints should be fully transformed, and x can do lower but not + # upper. + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + # We expect: 4w_1 + 17w_2 + 2y_2 <= x + # 5w_1 + 5w_2 + 4y_2 <= y <= 67w_1 + 67w_2 + 66y_2 + + cons = bt.get_transformed_constraints(m.x, m.outer) + self.assertEqual(len(cons), 1) + lb = cons[0] + assertExpressionsEqual( + self, + lb.expr, + 4.0 * m.outer_d1.inner_d1.binary_indicator_var + + 17.0 * m.outer_d1.inner_d2.binary_indicator_var + + 2.0 * m.outer_d2.binary_indicator_var + <= m.x, + ) + + cons = bt.get_transformed_constraints(m.y, m.outer) + self.assertEqual(len(cons), 2) + lb = cons[0] + assertExpressionsEqual( + self, + lb.expr, + 5.0 * m.outer_d1.inner_d1.binary_indicator_var + + 5.0 * m.outer_d1.inner_d2.binary_indicator_var + + 4 * m.outer_d2.binary_indicator_var + <= m.y, + ) + ub = cons[1] + assertExpressionsEqual( + self, + ub.expr, + 67 * m.outer_d1.inner_d1.binary_indicator_var + + 67 * m.outer_d1.inner_d2.binary_indicator_var + + 66.0 * m.outer_d2.binary_indicator_var + >= m.y, + ) + + # check that all the y constraints are deactivated, and that the + # lower bound ones for x are, but not the upper bound ones + self.assertFalse(m.outer_d1.c[1].active) + self.assertFalse(m.outer_d1.c[2].active) + self.assertFalse(m.outer_d1.inner_d1.c[1].active) + self.assertFalse(m.outer_d1.inner_d2.c[1].active) + self.assertTrue(m.outer_d2.c[1].active) + self.assertFalse(m.outer_d2.c[2].active) + self.assertFalse(m.outer_d2.c[3].active) + + # and check that there are only four active constraints, the ones we + # made and the remaining upper bound for x: + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 4, + ) + + def test_partially_deactivating_constraint_lb_transformed_but_ub_not(self): + m = ConcreteModel() + m.x = Var() + m.x.setlb(3.0) + m.d1 = Disjunct() + m.d1.c = Constraint(expr=(4.5, m.x, 6)) + m.d2 = Disjunct() + m.d2.c = Constraint(expr=5.6 <= m.x) + m.disj = Disjunction(expr=[m.d1, m.d2]) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + # We expect: 4.5*y_1 + 5.6*y_2 <= x + + cons = bt.get_transformed_constraints(m.x, m.disj) + self.assertEqual(len(cons), 1) + lb = cons[0] + assertExpressionsEqual( + self, + lb.expr, + 4.5 * m.d1.binary_indicator_var + 5.6 * m.d2.binary_indicator_var <= m.x, + ) + + self.assertFalse(m.d1.c.active) + self.assertFalse(m.d2.c.active) + c_ub = m.d1.component('c_ub') + self.assertIsInstance(c_ub, Constraint) + self.assertTrue(c_ub.active) + assertExpressionsEqual(self, c_ub.expr, m.x <= 6.0) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 2, + ) + + def test_partially_deactivating_constraint_ub_transformed_but_lb_not(self): + m = ConcreteModel() + m.w = Var() + m.d = Disjunct([1, 2, 3]) + m.disjunction = Disjunction(expr=[m.d[1], m.d[2], m.d[3]]) + + m.d[1].c = Constraint(expr=m.w == 45) + m.d[2].c = Constraint(expr=m.w <= 36) + m.d[3].c = Constraint(expr=m.w <= 232) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + # We expect: 45*y_1 + 36y_2 + 232y_3 >= w + + cons = bt.get_transformed_constraints(m.w, m.disjunction) + self.assertEqual(len(cons), 1) + ub = cons[0] + assertExpressionsEqual( + self, + ub.expr, + 45.0 * m.d[1].binary_indicator_var + + 36.0 * m.d[2].binary_indicator_var + + 232.0 * m.d[3].binary_indicator_var + >= m.w, + ) + + self.assertFalse(m.d[1].c.active) + self.assertFalse(m.d[2].c.active) + self.assertFalse(m.d[3].c.active) + c_lb = m.d[1].component('c_lb') + self.assertIsInstance(c_lb, Constraint) + self.assertTrue(c_lb.active) + assertExpressionsEqual(self, c_lb.expr, m.w >= 45.0) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 2, + ) + + def create_two_disjunction_model(self): + m = self.create_nested_model() + m.y = Var() + m.d1 = Disjunct() + m.d2 = Disjunct() + m.d3 = Disjunct() + m.disjunction = Disjunction(expr=[m.d1, m.d2, m.d3]) + + m.d1.c = Constraint(expr=m.y == 7.8) + m.d1.c_x = Constraint(expr=m.x <= 27) + m.d2.c = Constraint(expr=m.y == 8.9) + m.d2.c_x = Constraint(expr=m.x >= 34) + m.d3.c = Constraint(expr=m.y <= 45.7) + return m + + def test_transform_multiple_disjunctions(self): + m = self.create_two_disjunction_model() + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + self.check_nested_model_disjunction(m, bt) + + cons = bt.get_transformed_constraints(m.x, m.disjunction) + self.assertEqual(len(cons), 2) + lb = cons[0] + assertExpressionsEqual( + self, + lb.expr, + -100 * m.d1.binary_indicator_var + + 34.0 * m.d2.binary_indicator_var + + -100 * m.d3.binary_indicator_var + <= m.x, + ) + ub = cons[1] + assertExpressionsEqual( + self, + ub.expr, + 27.0 * m.d1.binary_indicator_var + + 102 * m.d2.binary_indicator_var + + 102 * m.d3.binary_indicator_var + >= m.x, + ) + + cons = bt.get_transformed_constraints(m.y, m.disjunction) + self.assertEqual(len(cons), 1) + ub = cons[0] + assertExpressionsEqual( + self, + ub.expr, + 7.8 * m.d1.binary_indicator_var + + 8.9 * m.d2.binary_indicator_var + + 45.7 * m.d3.binary_indicator_var + >= m.y, + ) + + self.assertFalse(m.d1.c.active) + self.assertFalse(m.d1.c_x.active) + self.assertFalse(m.d2.c.active) + self.assertFalse(m.d2.c_x.active) + self.assertFalse(m.d3.c.active) + + c_lb = m.d1.component('c_lb') + self.assertIsInstance(c_lb, Constraint) + self.assertTrue(c_lb.active) + assertExpressionsEqual(self, c_lb.expr, 7.8 <= m.y) + c_lb = m.d2.component('c_lb') + self.assertIsInstance(c_lb, Constraint) + self.assertTrue(c_lb.active) + assertExpressionsEqual(self, c_lb.expr, 8.9 <= m.y) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 7, + ) + + def test_disjunction_target(self): + m = self.create_two_disjunction_model() + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m, targets=m.outer) + + self.check_nested_model_disjunction(m, bt) + + self.assertTrue(m.d1.c.active) + self.assertTrue(m.d1.c_x.active) + self.assertTrue(m.d2.c.active) + self.assertTrue(m.d2.c_x.active) + self.assertTrue(m.d3.c.active) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 7, + ) + + def test_get_transformed_constraint_errors(self): + m = self.create_two_disjunction_model() + m.z = Var() + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m, targets=m.outer) + + out = StringIO() + with LoggingIntercept( + out, 'pyomo.gdp.plugins.bound_pretransformation', logging.DEBUG + ): + nothing = bt.get_transformed_constraints(m.z, m.outer) + self.assertEqual(len(nothing), 0) + # ...And we log that we're a bit confused. + self.assertEqual( + out.getvalue(), + "Constraint bounding variable 'z' on Disjunction 'outer' was " + "not transformed by the 'gdp.bound_pretransformation' " + "transformation\n", + ) + + out = StringIO() + with LoggingIntercept( + out, 'pyomo.gdp.plugins.bound_pretransformation', logging.DEBUG + ): + nothing = bt.get_transformed_constraints(m.x, m.disjunction) + self.assertEqual(len(nothing), 0) + self.assertEqual( + out.getvalue(), + "No variable on Disjunction 'disjunction' was transformed with the " + "gdp.bound_pretransformation transformation\n", + ) + + def test_univariate_constraints_with_expression_bodies(self): + m = self.create_nested_structure() + + # This is a convoluted way to write the same model as the nested model + m.x = Var(bounds=(-100, 102)) + m.outer_d1.c = Constraint(expr=-20 <= 2 * m.x) + m.outer_d1.c2 = Constraint(expr=m.x - 1 <= 10) + m.outer_d1.inner_d1.c = Constraint(expr=3 * m.x - 7 <= 2) + m.outer_d1.inner_d2.c = Constraint(expr=m.x >= -7) + m.outer_d2.c = Constraint(expr=m.x + 4 == 4) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + self.check_nested_model_disjunction(m, bt) + + self.assertFalse(m.outer_d1.c.active) + self.assertFalse(m.outer_d1.c2.active) + self.assertFalse(m.outer_d1.inner_d1.c.active) + self.assertFalse(m.outer_d1.inner_d2.c.active) + self.assertFalse(m.outer_d2.c.active) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, descend_into=(Block, Disjunct), active=True + ) + ) + ), + 2, + ) + + def test_bound_constraints_skip_levels_in_hierarchy(self): + m = ConcreteModel() + m.x = Var(bounds=(0, 10)) + m.y = Var() + m.Y = Disjunct([1, 2]) + m.Z = Disjunct([1, 2, 3]) + m.W = Disjunct([1, 2]) + m.W[1].c = Constraint(expr=m.x <= 7) + m.W[2].c = Constraint(expr=m.x <= 9) + m.Z[1].c = Constraint(expr=m.y == 0) + m.Z[1].w_disj = Disjunction(expr=[m.W[i] for i in [1, 2]]) + m.Z[2].c = Constraint(expr=m.y == 1) + m.Z[3].c = Constraint(expr=m.y == 2) + m.Y[1].c = Constraint(expr=m.x >= 2) + m.Y[1].z_disj = Disjunction(expr=[m.Z[i] for i in [1, 2, 3]]) + m.Y[2].c1 = Constraint(expr=m.x == 0) + m.Y[2].c2 = Constraint(expr=(3, m.y, 17)) + m.y_disj = Disjunction(expr=[m.Y[i] for i in [1, 2]]) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + cons = bt.get_transformed_constraints(m.x, m.y_disj) + self.assertEqual(len(cons), 2) + x_lb = cons[0] + assertExpressionsEqual( + self, + x_lb.expr, + 2.0 * m.Z[2].binary_indicator_var + + 2.0 * m.Z[3].binary_indicator_var + + 2.0 * m.W[1].binary_indicator_var + + 2.0 * m.W[2].binary_indicator_var + + 0 * m.Y[2].binary_indicator_var + <= m.x, + ) + x_ub = cons[1] + assertExpressionsEqual( + self, + x_ub.expr, + 10 * m.Z[2].binary_indicator_var + + 10 * m.Z[3].binary_indicator_var + + 7.0 * m.W[1].binary_indicator_var + + 9.0 * m.W[2].binary_indicator_var + + 0.0 * m.Y[2].binary_indicator_var + >= m.x, + ) + + cons = bt.get_transformed_constraints(m.y, m.y_disj) + y_lb = cons[0] + assertExpressionsEqual( + self, + y_lb.expr, + 1.0 * m.Z[2].binary_indicator_var + + 2.0 * m.Z[3].binary_indicator_var + + 0.0 * m.W[1].binary_indicator_var + + 0.0 * m.W[2].binary_indicator_var + + 3.0 * m.Y[2].binary_indicator_var + <= m.y, + ) + y_ub = cons[1] + assertExpressionsEqual( + self, + y_ub.expr, + 1.0 * m.Z[2].binary_indicator_var + + 2.0 * m.Z[3].binary_indicator_var + + 0.0 * m.W[1].binary_indicator_var + + 0.0 * m.W[2].binary_indicator_var + + 17.0 * m.Y[2].binary_indicator_var + >= m.y, + ) + + self.assertFalse(m.W[1].c.active) + self.assertFalse(m.W[2].c.active) + self.assertFalse(m.Z[1].c.active) + self.assertFalse(m.Z[2].c.active) + self.assertFalse(m.Z[3].c.active) + self.assertFalse(m.Y[1].c.active) + self.assertFalse(m.Y[2].c1.active) + self.assertFalse(m.Y[2].c2.active) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, descend_into=(Block, Disjunct), active=True + ) + ) + ), + 4, + ) + + def test_skip_nonlinear_and_multivariate_constraints(self): + m = self.create_nested_model() + m.y = Var() + m.z = Var() + m.outer_d1.nonlinear = Constraint(expr=m.y**2 <= 7) + m.outer_d1.inner_d2.multivariate = Constraint(expr=m.x + m.y <= m.z) + m.outer_d2.leave_it = Constraint(expr=m.z == 7) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + self.check_nested_model_disjunction(m, bt) + + self.assertTrue(m.outer_d1.nonlinear.active) + self.assertTrue(m.outer_d1.inner_d2.multivariate.active) + self.assertTrue(m.outer_d2.leave_it.active) + + self.assertFalse(m.outer_d1.c.active) + self.assertFalse(m.outer_d1.inner_d1.c.active) + self.assertFalse(m.outer_d1.inner_d2.c.active) + self.assertFalse(m.outer_d2.c.active) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, descend_into=(Block, Disjunct), active=True + ) + ) + ), + 5, + ) + + def test_tightest_bound_is_at_root(self): + """ + x >= 60 + [[x >= 55, [ ] v [x >= 66]] v [ ]] v [x >= 5] + """ + m = ConcreteModel() + m.x = Var() + m.x.setlb(4) + m.c = Constraint(expr=m.x >= 60) + m.d = Disjunct([1, 2]) + m.inner1 = Disjunct([1, 2]) + m.inner2 = Disjunct([1, 2]) + m.disjunction = Disjunction(expr=[m.d[1], m.d[2]]) + m.d[1].disjunction = Disjunction(expr=[m.inner1[1], m.inner1[2]]) + m.inner1[1].disjunction = Disjunction(expr=[m.inner2[1], m.inner2[2]]) + + m.d[2].c = Constraint(expr=m.x >= 5) + m.inner1[1].c = Constraint(expr=m.x >= 55) + m.inner2[2].c = Constraint(expr=m.x >= 66) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + cons = bt.get_transformed_constraints(m.x, m.disjunction) + self.assertEqual(len(cons), 1) + lb = cons[0] + assertExpressionsEqual( + self, + lb.expr, + 60.0 * m.inner1[2].binary_indicator_var + + 60.0 * m.inner2[1].binary_indicator_var + + 66.0 * m.inner2[2].binary_indicator_var + + 60.0 * m.d[2].binary_indicator_var + <= m.x, + ) + + # We shouldn't deactivate global constraints. Reason 1 being that we + # don't deactivate bounds and Reason 2 being that generally the global + # part of the model is none of pyomo.gdp's beeswax. + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, descend_into=(Block, Disjunct), active=True + ) + ) + ), + 2, + ) + + def test_bounds_on_disjuncts_with_block_hierarchies(self): + m = ConcreteModel() + m.x = Var() + m.b = Block() + m.b.c = Constraint(expr=m.x <= 4) + m.d = Disjunct([1, 2]) + m.d[1].b = Block() + m.d[1].b.c = Constraint(expr=m.x <= 5) + m.d[2].b = Block() + m.d[2].b.c = Constraint(expr=m.x <= 3) + m.d[2].c = Constraint(expr=m.x <= 4.1) + m.disjunction = Disjunction(expr=[m.d[1], m.d[2]]) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + cons = bt.get_transformed_constraints(m.x, m.disjunction) + self.assertEqual(len(cons), 1) + ub = cons[0] + + assertExpressionsEqual( + self, + ub.expr, + 4.0 * m.d[1].binary_indicator_var + 3.0 * m.d[2].binary_indicator_var + >= m.x, + ) + # just the one we made and the global one are active. + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, descend_into=(Block, Disjunct), active=True + ) + ) + ), + 2, + ) + + def test_indexed_disjunction_target(self): + m = ConcreteModel() + m.x = Var() + m.d = Disjunct([1, 2, 3, 4, 5]) + m.d[1].c = Constraint(expr=m.x <= 1) + m.d[2].c = Constraint(expr=m.x <= 2) + m.d[3].c = Constraint(expr=m.x <= 3) + m.d[4].c = Constraint(expr=m.x >= -5) + m.d[5].c = Constraint(expr=m.x >= -8) + m.disjunction = Disjunction(['pos', 'neg']) + m.disjunction['pos'] = [m.d[1], m.d[2], m.d[3]] + m.disjunction['neg'] = [m.d[4], m.d[5]] + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m, targets=m.disjunction) + + cons = bt.get_transformed_constraints(m.x, m.disjunction['pos']) + self.assertEqual(len(cons), 1) + ub = cons[0] + assertExpressionsEqual( + self, + ub.expr, + 1.0 * m.d[1].binary_indicator_var + + 2.0 * m.d[2].binary_indicator_var + + 3.0 * m.d[3].binary_indicator_var + >= m.x, + ) + cons = bt.get_transformed_constraints(m.x, m.disjunction['neg']) + self.assertEqual(len(cons), 1) + lb = cons[0] + assertExpressionsEqual( + self, + lb.expr, + -5.0 * m.d[4].binary_indicator_var - 8.0 * m.d[5].binary_indicator_var + <= m.x, + ) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, descend_into=(Block, Disjunct), active=True + ) + ) + ), + 2, + ) + + def test_nested_target(self): + m = self.create_nested_model() + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m, targets=[m.outer_d1.inner]) + + cons = bt.get_transformed_constraints(m.x, m.outer_d1.inner) + self.assertEqual(len(cons), 2) + lb = cons[0] + ub = cons[1] + assertExpressionsEqual( + self, + lb.expr, + -100 * m.outer_d1.inner_d1.binary_indicator_var + - 7.0 * m.outer_d1.inner_d2.binary_indicator_var + <= m.x, + ) + self.assertIs(lb.parent_block().parent_block(), m.outer_d1) + assertExpressionsEqual( + self, + ub.expr, + 3.0 * m.outer_d1.inner_d1.binary_indicator_var + + 102 * m.outer_d1.inner_d2.binary_indicator_var + >= m.x, + ) + self.assertIs(ub.parent_block().parent_block(), m.outer_d1) + + self.assertTrue(m.outer_d1.c.active) + self.assertTrue(m.outer_d2.c.active) + self.assertTrue(lb.active) + self.assertTrue(ub.active) + + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 4, + ) + + def test_targets_nested_in_each_other(self): + m = self.create_nested_model() + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m, targets=[m.outer_d1.inner, m.outer]) + + # This should do the outermost disjunctions only--we should + # get the same result as if we had transformed the whole + # model. + + self.check_nested_model_disjunction(m, bt) + # There aren't any other constraints on the model other than what we + # added + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 2, + ) + + def test_variables_not_in_any_leaves(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + m.disjunct1 = Disjunct() + m.disjunct1.c = Constraint(expr=m.x <= 9.7) + m.disjunct1.disjunct1 = Disjunct() + m.disjunct1.disjunct1.c = Constraint(expr=m.x + m.y <= 4) + m.disjunct1.disjunct2 = Disjunct() + m.disjunct1.disjunct2.c = Constraint(expr=m.y <= 9) + m.disjunct1.disjunction = Disjunction( + expr=[m.disjunct1.disjunct1, m.disjunct1.disjunct2] + ) + m.disjunct2 = Disjunct() + m.disjunct2.c = Constraint(expr=m.x <= 9) + m.disjunction = Disjunction(expr=[m.disjunct1, m.disjunct2]) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + cons = bt.get_transformed_constraints(m.x, m.disjunction) + self.assertEqual(len(cons), 1) + ub = cons[0] + assertExpressionsEqual( + self, + ub.expr, + 9.7 * m.disjunct1.disjunct1.binary_indicator_var + + 9.7 * m.disjunct1.disjunct2.binary_indicator_var + + 9.0 * m.disjunct2.binary_indicator_var + >= m.x, + ) + + cons = bt.get_transformed_constraints(m.y, m.disjunction) + self.assertEqual(len(cons), 0) + + self.assertFalse(m.disjunct1.c.active) + self.assertFalse(m.disjunct2.c.active) + + # two leftover and the one we added + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 3, + ) + + def test_fixed_vars_handled_correctly(self): + m = ConcreteModel() + m.x = Var() + m.x.setub(78) + m.y = Var() + m.y.fix(1) + m.z = Var() + + m.disjunction = Disjunction(expr=[[m.x + m.y <= 5], [m.x <= 17], [m.z == 0]]) + + bt = TransformationFactory('gdp.bound_pretransformation') + bt.apply_to(m) + + cons = bt.get_transformed_constraints(m.x, m.disjunction) + self.assertEqual(len(cons), 1) + ub = cons[0] + assertExpressionsEqual( + self, + ub.expr, + 4.0 * m.disjunction.disjuncts[0].binary_indicator_var + + 17.0 * m.disjunction.disjuncts[1].binary_indicator_var + + 78 * m.disjunction.disjuncts[2].binary_indicator_var + >= m.x, + ) + + self.assertFalse(m.disjunction.disjuncts[0].constraint[1].active) + self.assertFalse(m.disjunction.disjuncts[1].constraint[1].active) + # Only have the one we added and the one on z. + self.assertEqual( + len( + list( + m.component_data_objects( + Constraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 2, + ) diff --git a/pyomo/gdp/tests/test_cuttingplane.py b/pyomo/gdp/tests/test_cuttingplane.py index 43064cf4170..827eac9aa6a 100644 --- a/pyomo/gdp/tests/test_cuttingplane.py +++ b/pyomo/gdp/tests/test_cuttingplane.py @@ -11,10 +11,18 @@ import pyomo.common.unittest as unittest -from pyomo.environ import (Var, Constraint, Objective, Block, - TransformationFactory, value, maximize, Suffix) +from pyomo.environ import ( + Var, + Constraint, + Objective, + Block, + TransformationFactory, + value, + maximize, + Suffix, +) from pyomo.gdp import GDP_Error -from pyomo.gdp.plugins.cuttingplane import create_cuts_fme +from pyomo.gdp.plugins.cuttingplane import create_cuts_fme import pyomo.opt import pyomo.gdp.tests.models as models @@ -24,12 +32,14 @@ solvers = pyomo.opt.check_available_solvers('ipopt', 'gurobi') + def check_validity(self, body, lower, upper, TOL=0): if lower is not None: self.assertGreaterEqual(value(body), value(lower) - TOL) if upper is not None: self.assertLessEqual(value(body), value(upper) + TOL) + class OneVarDisj(unittest.TestCase): def check_no_cuts_for_optimal_m(self, m): cuts = m._pyomo_gdp_cuttingplane_transformation.cuts @@ -103,9 +113,7 @@ def test_no_cuts_for_optimal_m_fme(self): m = models.oneVarDisj_2pts() TransformationFactory('gdp.cuttingplane').apply_to( - m, - create_cuts=create_cuts_fme, - post_process_cut=None + m, create_cuts=create_cuts_fme, post_process_cut=None ) self.check_no_cuts_for_optimal_m(m) @@ -114,9 +122,7 @@ def test_no_cuts_for_optimal_m_inf_norm(self): m = models.oneVarDisj_2pts() TransformationFactory('gdp.cuttingplane').apply_to( - m, - norm=float('inf'), - post_process_cut=None + m, norm=float('inf'), post_process_cut=None ) self.check_no_cuts_for_optimal_m(m) @@ -125,32 +131,31 @@ def test_expected_two_segment_cut(self): m = models.twoSegments_SawayaGrossmann() # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) - TransformationFactory('gdp.cuttingplane').apply_to(m, bigM=1e6, - verbose=True) + TransformationFactory('gdp.cuttingplane').apply_to(m, bigM=1e6, verbose=True) self.check_expected_two_segment_cut(m) - + @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_expected_two_segment_cut_fme(self): m = models.twoSegments_SawayaGrossmann() # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, - post_process_cut=None) + m, bigM=1e6, create_cuts=create_cuts_fme, post_process_cut=None + ) self.check_expected_two_segment_cut(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_expected_two_segment_cut_inf_norm(self): m = models.twoSegments_SawayaGrossmann() - + # make sure this is fine if dual Suffix is already on model m.dual = Suffix(direction=Suffix.IMPORT) # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf'), - post_process_cut=None) + m, bigM=1e6, norm=float('inf'), post_process_cut=None + ) self.check_expected_two_segment_cut(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -159,22 +164,25 @@ def test_expected_two_segment_cut_inf_norm_fme(self): # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf'), create_cuts=create_cuts_fme, - post_process_cut=None, verbose=True) + m, + bigM=1e6, + norm=float('inf'), + create_cuts=create_cuts_fme, + post_process_cut=None, + verbose=True, + ) self.check_expected_two_segment_cut(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_deactivated_objectives_ignored(self): m = models.twoSegments_SawayaGrossmann() # add an opposite direction objective, but deactivate it - m.another_obj = Objective(expr=m.x - m.disj2.indicator_var, - sense=maximize) + m.another_obj = Objective(expr=m.x - m.disj2.indicator_var, sense=maximize) m.another_obj.deactivate() # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) - TransformationFactory('gdp.cuttingplane').apply_to(m, bigM=1e6, - verbose=True) + TransformationFactory('gdp.cuttingplane').apply_to(m, bigM=1e6, verbose=True) self.check_expected_two_segment_cut(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -196,8 +204,8 @@ def test_two_segment_cuts_valid_fme(self): # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, - post_process_cut=None) + m, bigM=1e6, create_cuts=create_cuts_fme, post_process_cut=None + ) self.check_two_segment_cuts_valid(m) @@ -217,7 +225,8 @@ def test_two_segment_cuts_valid_inf_norm(self): # This one has to post process, but it is correct with the default # settings. TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf')) + m, bigM=1e6, norm=float('inf') + ) self.check_two_segment_cuts_valid(m) @@ -250,8 +259,12 @@ def test_integer_arithmetic_cuts_valid_l2(self): # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, - post_process_cut=None, do_integer_arithmetic=True) + m, + bigM=1e6, + create_cuts=create_cuts_fme, + post_process_cut=None, + do_integer_arithmetic=True, + ) cuts = m._pyomo_gdp_cuttingplane_transformation.cuts self.check_expected_two_segment_cut_exact(cuts) @@ -262,8 +275,13 @@ def test_integer_arithmetic_cuts_valid_inf_norm(self): # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, norm=float('inf'), - post_process_cut=None, do_integer_arithmetic=True) + m, + bigM=1e6, + create_cuts=create_cuts_fme, + norm=float('inf'), + post_process_cut=None, + do_integer_arithmetic=True, + ) cuts = m._pyomo_gdp_cuttingplane_transformation.cuts self.check_expected_two_segment_cut_exact(cuts) @@ -277,8 +295,12 @@ def test_maximization(self): # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, - post_process_cut=None, do_integer_arithmetic=True) + m, + bigM=1e6, + create_cuts=create_cuts_fme, + post_process_cut=None, + do_integer_arithmetic=True, + ) cuts = m._pyomo_gdp_cuttingplane_transformation.cuts self.check_expected_two_segment_cut_exact(cuts) @@ -289,12 +311,16 @@ def test_cuts_named_correctly(self): # have to make M big for the bigm relaxation to be the box 0 <= x <= 3, # 0 <= Y <= 1 (in the limit) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, cuts_name="perfect_cuts", - post_process_cut=None, do_integer_arithmetic=True) + m, + bigM=1e6, + create_cuts=create_cuts_fme, + cuts_name="perfect_cuts", + post_process_cut=None, + do_integer_arithmetic=True, + ) cuts = m.component("perfect_cuts") self.assertIsInstance(cuts, Constraint) - self.assertIsNone( - m._pyomo_gdp_cuttingplane_transformation.component("cuts")) + self.assertIsNone(m._pyomo_gdp_cuttingplane_transformation.component("cuts")) self.check_expected_two_segment_cut_exact(cuts) @@ -308,18 +334,20 @@ def test_non_unique_cut_name_error(self): "specify a unique name.", TransformationFactory('gdp.cuttingplane').apply_to, m, - cuts_name="disj1") - + cuts_name="disj1", + ) + + class TwoTermDisj(unittest.TestCase): extreme_points = [ - (1,0,4,1), - (1,0,4,2), - (1,0,3,1), - (1,0,3,2), - (0,1,1,3), - (0,1,1,4), - (0,1,2,3), - (0,1,2,4) + (1, 0, 4, 1), + (1, 0, 4, 2), + (1, 0, 3, 1), + (1, 0, 3, 2), + (0, 1, 1, 3), + (0, 1, 1, 4), + (0, 1, 2, 3), + (0, 1, 2, 4), ] @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -351,7 +379,8 @@ def check_cuts_valid_for_optimal(self, m, TOL): def test_cuts_valid_for_optimal_fme(self): m = models.makeTwoTermDisj_boxes() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None) + m, create_cuts=create_cuts_fme, post_process_cut=None + ) self.check_cuts_valid_for_optimal(m, TOL=0) @@ -359,15 +388,15 @@ def test_cuts_valid_for_optimal_fme(self): def test_cuts_valid_for_optimal_with_tolerance(self): m = models.makeTwoTermDisj_boxes() TransformationFactory('gdp.cuttingplane').apply_to( - m, back_off_problem_tolerance=1e-7) + m, back_off_problem_tolerance=1e-7 + ) self.check_cuts_valid_for_optimal(m, TOL=1e-8) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_for_optimal_inf_norm(self): m = models.makeTwoTermDisj_boxes() - TransformationFactory('gdp.cuttingplane').apply_to( m, - norm=float('inf')) + TransformationFactory('gdp.cuttingplane').apply_to(m, norm=float('inf')) # same tolerance as the l-2 norm version: self.check_cuts_valid_for_optimal(m, TOL=1e-8) @@ -389,7 +418,8 @@ def check_cuts_valid_on_hull_vertices(self, m, TOL=0): def test_cuts_valid_on_hull_vertices_fme(self): m = models.makeTwoTermDisj_boxes() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None) + m, create_cuts=create_cuts_fme, post_process_cut=None + ) self.check_cuts_valid_on_hull_vertices(m, TOL=0) @@ -397,7 +427,8 @@ def test_cuts_valid_on_hull_vertices_fme(self): def test_cuts_valid_on_hull_vertices_with_tolerance(self): m = models.makeTwoTermDisj_boxes() TransformationFactory('gdp.cuttingplane').apply_to( - m, back_off_problem_tolerance=2e-8, verbose=True) + m, back_off_problem_tolerance=2e-8, verbose=True + ) self.check_cuts_valid_on_hull_vertices(m, TOL=1e-8) @@ -407,16 +438,17 @@ def test_cuts_valid_on_hull_vertices_inf_norm(self): # we actually don't have to adjust the back-off problem tolerance for # this norm. TransformationFactory('gdp.cuttingplane').apply_to( - m, norm=float('inf'), verbose=True) + m, norm=float('inf'), verbose=True + ) self.check_cuts_valid_on_hull_vertices(m, TOL=1e-8) - + @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_are_correct_facets_fme(self): m = models.makeTwoTermDisj_boxes() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None, - zero_tolerance=0) + m, create_cuts=create_cuts_fme, post_process_cut=None, zero_tolerance=0 + ) # This would also be a valid cut, it just doesn't happen to be what we # choose. # facet_extreme_pts = [ @@ -425,13 +457,8 @@ def test_cuts_are_correct_facets_fme(self): # (0,1,1,3), # (0,1,1,4) # ] - facet_extreme_pts = [ - (0,1,1,3), - (0,1,2,3), - (1,0,3,1), - (1,0,4,1) - ] - + facet_extreme_pts = [(0, 1, 1, 3), (0, 1, 2, 3), (1, 0, 3, 1), (1, 0, 4, 1)] + cuts = m._pyomo_gdp_cuttingplane_transformation.cuts # Here, we get just one facet self.assertEqual(len(cuts), 1) @@ -450,14 +477,8 @@ def test_cuts_are_correct_facets_fme(self): self.assertEqual(value(upper), value(cut_expr)) def check_cuts_are_correct_facets(self, m): - cut1_tight_pts = [ - (1,0,3,1), - (0,1,1,3) - ] - facet2_extreme_pts = [ - (1,0,3,1), - (1,0,4,1) - ] + cut1_tight_pts = [(1, 0, 3, 1), (0, 1, 1, 3)] + facet2_extreme_pts = [(1, 0, 3, 1), (1, 0, 4, 1)] cuts = m._pyomo_gdp_cuttingplane_transformation.cuts # ESJ: In this version, we don't get the facets, but we still get two # cuts, and we check they are tight at points on the relevant facets. @@ -505,7 +526,7 @@ def test_cuts_are_correct_facets_inf_norm(self): m = models.makeTwoTermDisj_boxes() TransformationFactory('gdp.cuttingplane').apply_to(m, norm=float('inf')) self.check_cuts_are_correct_facets(m) - + @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_create_using(self): m = models.makeTwoTermDisj_boxes() @@ -520,7 +541,7 @@ def test_active_objective_err(self): "Cannot apply cutting planes transformation without an active " "objective in the model*", TransformationFactory('gdp.cuttingplane').apply_to, - m + m, ) # I'm doing this test with Gurobi because ipopt doesn't really catch this @@ -536,14 +557,18 @@ def test_equality_constraints_on_disjuncts_with_fme(self): TransformationFactory('gdp.cuttingplane').apply_to( m, create_cuts=create_cuts_fme, - post_process_cut=None, verbose=True, solver='gurobi', + post_process_cut=None, + verbose=True, + solver='gurobi', # don't actually need this, but taking the excuse to set solver # options solver_options={'FeasibilityTol': 1e-8}, - cuts_name="cuts", bigM=5) + cuts_name="cuts", + bigM=5, + ) # rBigM first iteration solve will give (x = 3, Y = 0.6). If we don't - # catch equality constraints, we don't get a cut. But we need to get + # catch equality constraints, we don't get a cut. But we need to get # x + Y <= 1. (Where Y is the indicator that x = 0). self.assertEqual(len(m.cuts), 1) cut = m.cuts[0] @@ -557,17 +582,18 @@ def test_equality_constraints_on_disjuncts_with_fme(self): self.assertIs(repn.linear_vars[1], m.x) self.assertEqual(repn.linear_coefs[1], -1) + class Grossmann_TestCases(unittest.TestCase): def check_cuts_valid_at_extreme_pts(self, m): extreme_points = [ - (1,0,2,10), - (1,0,0,10), - (1,0,0,7), - (1,0,2,7), - (0,1,8,0), - (0,1,8,3), - (0,1,10,0), - (0,1,10,3) + (1, 0, 2, 10), + (1, 0, 0, 10), + (1, 0, 0, 7), + (1, 0, 2, 7), + (0, 1, 8, 0), + (0, 1, 8, 3), + (0, 1, 10, 0), + (0, 1, 10, 3), ] cuts = m._pyomo_gdp_cuttingplane_transformation.cuts @@ -588,7 +614,8 @@ def check_cuts_valid_at_extreme_pts(self, m): def test_cut_valid_at_extreme_pts_fme(self): m = models.grossmann_oneDisj() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None) + m, create_cuts=create_cuts_fme, post_process_cut=None + ) self.check_cuts_valid_at_extreme_pts(m) @@ -612,7 +639,8 @@ def test_cut_valid_at_extreme_pts_inf_norm(self): def test_cut_is_correct_facet_fme(self): m = models.grossmann_oneDisj() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None) + m, create_cuts=create_cuts_fme, post_process_cut=None + ) cuts = m._pyomo_gdp_cuttingplane_transformation.cuts # ESJ: Again, for FME, we don't mind getting both the possible facets, # as long as they are beautiful. @@ -620,16 +648,16 @@ def test_cut_is_correct_facet_fme(self): # similar to the two boxes example, this is on the line where two facets # intersect facet2_extreme_points = [ - (1,0,2,10), - (1,0,2,7), - (0,1,10,0), - (0,1,10,3) + (1, 0, 2, 10), + (1, 0, 2, 7), + (0, 1, 10, 0), + (0, 1, 10, 3), ] facet_extreme_points = [ - (1,0,2,10), - (1,0,0,10), - (0,1,8,3), - (0,1,10,3) + (1, 0, 2, 10), + (1, 0, 0, 10), + (0, 1, 8, 3), + (0, 1, 10, 3), ] for pt in facet_extreme_points: @@ -652,29 +680,21 @@ def check_cut_is_correct_facet(self, m): # similar to the two boxes example, this is on the line where two facets # intersect, we get cuts which intersect the two facets from FME. This # makes sense because these are angled. - cut1_tight_points = [ - (1,0,2,10), - (0,1,10,3) - ] - cut2_tight_points = [ - (1,0,2,10), - (1,0,0,10) - ] + cut1_tight_points = [(1, 0, 2, 10), (0, 1, 10, 3)] + cut2_tight_points = [(1, 0, 2, 10), (1, 0, 0, 10)] for pt in cut1_tight_points: m.x.fix(pt[2]) m.y.fix(pt[3]) m.disjunct1.binary_indicator_var.fix(pt[0]) m.disjunct2.binary_indicator_var.fix(pt[1]) - self.assertAlmostEqual(value(cuts[0].lower), value(cuts[0].body), - places=6) + self.assertAlmostEqual(value(cuts[0].lower), value(cuts[0].body), places=6) for pt in cut2_tight_points: m.x.fix(pt[2]) m.y.fix(pt[3]) m.disjunct1.binary_indicator_var.fix(pt[0]) m.disjunct2.binary_indicator_var.fix(pt[1]) - self.assertAlmostEqual(value(cuts[1].lower), value(cuts[1].body), - places=6) + self.assertAlmostEqual(value(cuts[1].lower), value(cuts[1].body), places=6) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cut_is_correct_facet_projection(self): @@ -686,22 +706,23 @@ def test_cut_is_correct_facet_projection(self): def test_cut_is_correct_facet_inf_norm(self): m = models.grossmann_oneDisj() # without the increase of cut_filtering_threshold, we get a third cut, - # whcih is also tight where cut 2 is. It doesn't improve the objective + # which is also tight where cut 2 is. It doesn't improve the objective # by much at all, so it's redundant. TransformationFactory('gdp.cuttingplane').apply_to( - m, norm=float('inf'), cut_filtering_threshold=0.2) + m, norm=float('inf'), cut_filtering_threshold=0.2 + ) self.check_cut_is_correct_facet(m) def check_cuts_valid_at_extreme_pts_rescaled(self, m): extreme_points = [ - (1,0,2,127), - (1,0,0,127), - (1,0,0,117), - (1,0,2,117), - (0,1,118,0), - (0,1,118,3), - (0,1,120,0), - (0,1,120,3) + (1, 0, 2, 127), + (1, 0, 0, 127), + (1, 0, 0, 117), + (1, 0, 2, 117), + (0, 1, 118, 0), + (0, 1, 118, 3), + (0, 1, 120, 0), + (0, 1, 120, 3), ] cuts = m._pyomo_gdp_cuttingplane_transformation.cuts @@ -722,7 +743,8 @@ def check_cuts_valid_at_extreme_pts_rescaled(self, m): def test_cuts_valid_at_extreme_pts_rescaled_fme(self): m = models.to_break_constraint_tolerances() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None) + m, create_cuts=create_cuts_fme, post_process_cut=None + ) self.check_cuts_valid_at_extreme_pts_rescaled(m) # Again, this actually passes without tolerance, so leaving it for now... @@ -738,27 +760,26 @@ def test_cuts_valid_at_extreme_pts_rescaled_inf_norm(self): # this cuts off by a little more than 1e-8 without the adjusted back-off # problem tolerance TransformationFactory('gdp.cuttingplane').apply_to( - m, norm=float('inf'), back_off_problem_tolerance=1e-7, verbose=True) + m, norm=float('inf'), back_off_problem_tolerance=1e-7, verbose=True + ) self.check_cuts_valid_at_extreme_pts_rescaled(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cut_is_correct_facet_rescaled_fme(self): m = models.to_break_constraint_tolerances() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None) + m, create_cuts=create_cuts_fme, post_process_cut=None + ) cuts = m._pyomo_gdp_cuttingplane_transformation.cuts self.assertEqual(len(cuts), 1) - + # we don't get a whole facet. We get 0 <= 129y_1 + 123y_2 - x - y, which - # is the sum of two facets: - # 0 <= 2y_1 + 120y_2 - x and + # is the sum of two facets: + # 0 <= 2y_1 + 120y_2 - x and # 0 <= 127y_1 + 3y_2 - y # But this is valid and the only cut needed, so we won't complain. - cut_extreme_points = [ - (1,0,2,127), - (0,1,120,3) - ] + cut_extreme_points = [(1, 0, 2, 127), (0, 1, 120, 3)] for pt in cut_extreme_points: m.x.fix(pt[2]) @@ -772,11 +793,8 @@ def test_cut_is_correct_facet_rescaled_fme(self): def check_cut_is_correct_facet_rescaled(self, m): cuts = m._pyomo_gdp_cuttingplane_transformation.cuts self.assertEqual(len(cuts), 1) - - cut_tight_points = [ - (1,0,2,127), - (0,1,120,3) - ] + + cut_tight_points = [(1, 0, 2, 127), (0, 1, 120, 3)] for pt in cut_tight_points: m.x.fix(pt[2]) @@ -786,14 +804,13 @@ def check_cut_is_correct_facet_rescaled(self, m): # ESJ: 5 places is not ideal... But it's in the direction of valid, # so I think that's just the price we pay. This test still seems # useful to me as a sanity check that the cut is where it should be. - self.assertAlmostEqual(value(cuts[0].lower), value(cuts[0].body), - places=5) + self.assertAlmostEqual(value(cuts[0].lower), value(cuts[0].body), places=5) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cut_is_correct_facet_rescaled_projection(self): m = models.to_break_constraint_tolerances() TransformationFactory('gdp.cuttingplane').apply_to(m) - self.check_cut_is_correct_facet_rescaled(m) + self.check_cut_is_correct_facet_rescaled(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cut_is_correct_facet_rescaled_inf_norm(self): @@ -801,19 +818,20 @@ def test_cut_is_correct_facet_rescaled_inf_norm(self): # This would give two cuts, the second improving by about 0.05, without # the tighter threshold. TransformationFactory('gdp.cuttingplane').apply_to( - m, norm=float('inf'), cut_filtering_threshold=0.1) + m, norm=float('inf'), cut_filtering_threshold=0.1 + ) self.check_cut_is_correct_facet_rescaled(m) def check_2disj_cuts_valid_for_extreme_pts(self, m): extreme_points = [ - (1,0,1,0,1,7), - (1,0,1,0,1,8), - (1,0,1,0,2,7), - (1,0,1,0,2,8), - (0,1,0,1,9,2), - (0,1,0,1,9,3), - (0,1,0,1,10,2), - (0,1,0,1,10,3) + (1, 0, 1, 0, 1, 7), + (1, 0, 1, 0, 1, 8), + (1, 0, 1, 0, 2, 7), + (1, 0, 1, 0, 2, 8), + (0, 1, 0, 1, 9, 2), + (0, 1, 0, 1, 9, 3), + (0, 1, 0, 1, 10, 2), + (0, 1, 0, 1, 10, 3), ] cuts = m._pyomo_gdp_cuttingplane_transformation.cuts @@ -835,7 +853,8 @@ def check_2disj_cuts_valid_for_extreme_pts(self, m): def test_2disj_cuts_valid_for_extreme_pts_fme(self): m = models.grossmann_twoDisj() TransformationFactory('gdp.cuttingplane').apply_to( - m, create_cuts=create_cuts_fme, post_process_cut=None) + m, create_cuts=create_cuts_fme, post_process_cut=None + ) self.check_2disj_cuts_valid_for_extreme_pts(m) @@ -854,10 +873,11 @@ def test_2disj_cuts_valid_for_extreme_pts_inf_norm(self): self.check_2disj_cuts_valid_for_extreme_pts(m) + class NonlinearConvex_TwoCircles(unittest.TestCase): def check_cuts_valid_for_optimal(self, m): cuts = m._pyomo_gdp_cuttingplane_transformation.cuts - self.assertGreaterEqual(len(cuts), 1) # we should get at least one. + self.assertGreaterEqual(len(cuts), 1) # we should get at least one. m.x.fix(2) m.y.fix(7) @@ -877,7 +897,8 @@ def test_cuts_valid_for_optimal(self): def test_cuts_valid_for_optimal_fme(self): m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, verbose=True) + m, bigM=1e6, create_cuts=create_cuts_fme, verbose=True + ) self.check_cuts_valid_for_optimal(m) @@ -885,13 +906,14 @@ def test_cuts_valid_for_optimal_fme(self): def test_cuts_valid_for_optimal_inf_norm(self): m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf'), verbose=True) + m, bigM=1e6, norm=float('inf'), verbose=True + ) self.check_cuts_valid_for_optimal(m) def check_cuts_valid_on_facet_containing_optimal(self, m): cuts = m._pyomo_gdp_cuttingplane_transformation.cuts - self.assertGreaterEqual(len(cuts), 1) # we should get at least one. + self.assertGreaterEqual(len(cuts), 1) # we should get at least one. m.x.fix(5) m.y.fix(3) @@ -910,19 +932,21 @@ def test_cuts_valid_on_facet_containing_optimal(self): def test_cuts_valid_on_facet_containing_optimal_fme(self): m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, verbose=True) + m, bigM=1e6, create_cuts=create_cuts_fme, verbose=True + ) self.check_cuts_valid_on_facet_containing_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_on_facet_containing_optimal_inf_norm(self): m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf'), verbose=True) + m, bigM=1e6, norm=float('inf'), verbose=True + ) self.check_cuts_valid_on_facet_containing_optimal(m) def check_cuts_valid_for_other_extreme_points(self, m): cuts = m._pyomo_gdp_cuttingplane_transformation.cuts - self.assertGreaterEqual(len(cuts), 1) # we should get at least one. + self.assertGreaterEqual(len(cuts), 1) # we should get at least one. m.x.fix(3) m.y.fix(1) @@ -956,7 +980,8 @@ def test_cuts_valid_for_other_extreme_points_fme(self): # confidence about in the case of numerical difficulties...) m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme, verbose=True) + m, bigM=1e6, create_cuts=create_cuts_fme, verbose=True + ) self.check_cuts_valid_for_other_extreme_points(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -967,9 +992,10 @@ def test_cuts_valid_for_other_extreme_points_inf_norm(self): # confidence about in the case of numerical difficulties...) m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf'), cut_filtering_threshold=0.5) + m, bigM=1e6, norm=float('inf'), cut_filtering_threshold=0.5 + ) self.check_cuts_valid_for_other_extreme_points(m) - + @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_for_optimal_tighter_m(self): m = models.twoDisj_twoCircles_easy() @@ -983,8 +1009,9 @@ def test_cuts_valid_for_optimal_tighter_m_inf_norm(self): m = models.twoDisj_twoCircles_easy() # this M comes from the fact that y \in (0,8) and x \in (0,6) - TransformationFactory('gdp.cuttingplane').apply_to(m, bigM=83, - norm=float('inf')) + TransformationFactory('gdp.cuttingplane').apply_to( + m, bigM=83, norm=float('inf') + ) self.check_cuts_valid_for_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -993,7 +1020,8 @@ def test_cuts_valid_for_optimal_tighter_m_fme(self): # this M comes from the fact that y \in (0,8) and x \in (0,6) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=83, create_cuts=create_cuts_fme) + m, bigM=83, create_cuts=create_cuts_fme + ) self.check_cuts_valid_for_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -1010,7 +1038,8 @@ def test_cuts_valid_for_optimalFacet_tighter_m_fme(self): # this M comes from the fact that y \in (0,8) and x \in (0,6) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=83, create_cuts=create_cuts_fme) + m, bigM=83, create_cuts=create_cuts_fme + ) self.check_cuts_valid_on_facet_containing_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -1019,7 +1048,8 @@ def test_cuts_valid_for_optimalFacet_tighter_m_inf_norm(self): # this M comes from the fact that y \in (0,8) and x \in (0,6) TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=83, norm=float('inf')) + m, bigM=83, norm=float('inf') + ) self.check_cuts_valid_on_facet_containing_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -1032,21 +1062,24 @@ def test_cuts_valid_for_other_extreme_points_tighter_m(self): def test_cuts_valid_for_other_extreme_points_tighter_m_fme(self): m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=83, create_cuts=create_cuts_fme) + m, bigM=83, create_cuts=create_cuts_fme + ) self.check_cuts_valid_for_other_extreme_points(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_for_other_extreme_points_tighter_m_inf_norm(self): m = models.twoDisj_twoCircles_easy() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=83, norm=float('inf'), cut_filtering_threshold=0.5) + m, bigM=83, norm=float('inf'), cut_filtering_threshold=0.5 + ) self.check_cuts_valid_for_other_extreme_points(m) - -class NonlinearConvex_OverlappingCircles(unittest.TestCase): + + +class NonlinearConvex_OverlappingCircles(unittest.TestCase): def check_cuts_valid_for_optimal(self, m): cuts = m._pyomo_gdp_cuttingplane_transformation.cuts - self.assertGreaterEqual(len(cuts), 1) # we should get at least one. - + self.assertGreaterEqual(len(cuts), 1) # we should get at least one. + m.x.fix(2) m.y.fix(7) m.upper_circle.indicator_var.fix(True) @@ -1055,30 +1088,32 @@ def check_cuts_valid_for_optimal(self, m): m.lower_circle2.indicator_var.fix(False) for i in range(len(cuts)): self.assertGreaterEqual(value(cuts[i].body), 0) - + @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_for_optimal(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to(m, bigM=1e6) self.check_cuts_valid_for_optimal(m) - + @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_for_optimal_fme(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, create_cuts=create_cuts_fme) + m, bigM=1e6, create_cuts=create_cuts_fme + ) self.check_cuts_valid_for_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_for_optimal_inf_norm(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf')) + m, bigM=1e6, norm=float('inf') + ) self.check_cuts_valid_for_optimal(m) def check_cuts_valid_on_facet_containing_optimal(self, m): cuts = m._pyomo_gdp_cuttingplane_transformation.cuts - self.assertGreaterEqual(len(cuts), 1) # we should get at least one. + self.assertGreaterEqual(len(cuts), 1) # we should get at least one. m.x.fix(5) m.y.fix(3) @@ -1099,14 +1134,16 @@ def test_cuts_valid_on_facet_containing_optimal(self): def test_cuts_valid_on_facet_containing_optimal_fme(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6,create_cuts=create_cuts_fme) + m, bigM=1e6, create_cuts=create_cuts_fme + ) self.check_cuts_valid_on_facet_containing_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_on_facet_containing_optimal_inf_norm(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf')) + m, bigM=1e6, norm=float('inf') + ) self.check_cuts_valid_on_facet_containing_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -1119,14 +1156,16 @@ def test_cuts_valid_for_optimal_tightM(self): def test_cuts_valid_for_optimal_tightM_fme(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6,create_cuts=create_cuts_fme) + m, bigM=1e6, create_cuts=create_cuts_fme + ) self.check_cuts_valid_for_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_for_optimal_tightM_inf_norm(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf')) + m, bigM=1e6, norm=float('inf') + ) self.check_cuts_valid_for_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") @@ -1139,12 +1178,14 @@ def test_cuts_valid_on_facet_containing_optimal_tightM(self): def test_cuts_valid_on_facet_containing_optimal_tightM_fme(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6,create_cuts=create_cuts_fme) + m, bigM=1e6, create_cuts=create_cuts_fme + ) self.check_cuts_valid_on_facet_containing_optimal(m) @unittest.skipIf('ipopt' not in solvers, "Ipopt solver not available") def test_cuts_valid_on_facet_containing_optimal_tightM_inf_norm(self): m = models.fourCircles() TransformationFactory('gdp.cuttingplane').apply_to( - m, bigM=1e6, norm=float('inf')) + m, bigM=1e6, norm=float('inf') + ) self.check_cuts_valid_on_facet_containing_optimal(m) diff --git a/pyomo/gdp/tests/test_disjunct.py b/pyomo/gdp/tests/test_disjunct.py index ef821c2ec5a..ccf5b8c2d6c 100644 --- a/pyomo/gdp/tests/test_disjunct.py +++ b/pyomo/gdp/tests/test_disjunct.py @@ -12,9 +12,11 @@ from io import StringIO import pyomo.common.unittest as unittest +import pyomo.core.expr as EXPR from pyomo.common.errors import PyomoException from pyomo.common.log import LoggingIntercept +from pyomo.core.expr.compare import assertExpressionsEqual from pyomo.core import ConcreteModel, Var, Constraint from pyomo.gdp import Disjunction, Disjunct from pyomo.gdp.disjunct import AutoLinkedBooleanVar, AutoLinkedBinaryVar @@ -33,7 +35,7 @@ def test_empty_disjunction(self): self.assertEqual(len(m.x1), 1) self.assertEqual(m.x1.disjuncts, [m.d, m.e]) - m.x2 = Disjunction([1,2,3,4]) + m.x2 = Disjunction([1, 2, 3, 4]) self.assertEqual(len(m.x2), 0) m.x2[2] = [m.d, m.e] @@ -44,7 +46,7 @@ def test_construct_implicit_disjuncts(self): m = ConcreteModel() m.x = Var() m.y = Var() - m.d = Disjunction(expr=[m.x<=0, m.y>=1]) + m.d = Disjunction(expr=[m.x <= 0, m.y >= 1]) self.assertEqual(len(m.component_map(Disjunction)), 1) self.assertEqual(len(m.component_map(Disjunct)), 1) @@ -59,7 +61,7 @@ def test_construct_implicit_disjuncts(self): # Test that the implicit disjuncts get a unique name m.add_component('e_disjuncts', Var()) - m.e = Disjunction(expr=[m.y<=0, m.x>=1]) + m.e = Disjunction(expr=[m.y <= 0, m.x >= 1]) self.assertEqual(len(m.component_map(Disjunction)), 2) self.assertEqual(len(m.component_map(Disjunct)), 2) implicit_disjuncts = list(m.component_map(Disjunct).keys()) @@ -75,14 +77,10 @@ def test_construct_implicit_disjuncts(self): # Test that the implicit disjuncts can be lists/tuples/generators def _gen(): - yield m.y<=4 - yield m.x>=5 - m.f = Disjunction(expr=[ - [ m.y<=0, - m.x>=1 ], - ( m.y<=2, - m.x>=3 ), - _gen() ]) + yield m.y <= 4 + yield m.x >= 5 + + m.f = Disjunction(expr=[[m.y <= 0, m.x >= 1], (m.y <= 2, m.x >= 3), _gen()]) self.assertEqual(len(m.component_map(Disjunction)), 3) self.assertEqual(len(m.component_map(Disjunct)), 3) implicit_disjuncts = list(m.component_map(Disjunct).keys()) @@ -116,8 +114,8 @@ def test_deactivate(self): m = ConcreteModel() m.x = Var() m.d1 = Disjunct() - m.d1.constraint = Constraint(expr=m.x<=0) - m.d = Disjunction(expr=[m.d1, m.x>=1, m.x>=5]) + m.d1.constraint = Constraint(expr=m.x <= 0) + m.d = Disjunction(expr=[m.d1, m.x >= 1, m.x >= 5]) d2 = m.d.disjuncts[1].parent_component() self.assertEqual(len(m.component_map(Disjunction)), 1) self.assertEqual(len(m.component_map(Disjunct)), 2) @@ -196,8 +194,8 @@ def test_deactivate_without_fixing_indicator(self): m = ConcreteModel() m.x = Var() m.d1 = Disjunct() - m.d1.constraint = Constraint(expr=m.x<=0) - m.d = Disjunction(expr=[m.d1, m.x>=1, m.x>=5]) + m.d1.constraint = Constraint(expr=m.x <= 0) + m.d = Disjunction(expr=[m.d1, m.x >= 1, m.x >= 5]) d2 = m.d.disjuncts[1].parent_component() self.assertEqual(len(m.component_map(Disjunction)), 1) self.assertEqual(len(m.component_map(Disjunct)), 2) @@ -235,6 +233,7 @@ def test_deactivate_without_fixing_indicator(self): def test_indexed_disjunct_active_property(self): m = ConcreteModel() m.x = Var(bounds=(0, 12)) + @m.Disjunct([0, 1, 2]) def disjunct(d, i): m = d.model() @@ -261,9 +260,10 @@ def disjunct(d, i): def test_indexed_disjunction_active_property(self): m = ConcreteModel() m.x = Var(bounds=(0, 12)) + @m.Disjunction([0, 1, 2]) def disjunction(m, i): - return [m.x == i*5, m.x == i*5 + 1] + return [m.x == i * 5, m.x == i * 5 + 1] self.assertTrue(m.disjunction.active) m.disjunction[2].deactivate() @@ -278,6 +278,7 @@ def disjunction(m, i): for i in range(3): self.assertFalse(m.disjunction[i].active) + class TestAutoVars(unittest.TestCase): def test_synchronize_value(self): m = ConcreteModel() @@ -379,8 +380,10 @@ def test_fix_value(self): with LoggingIntercept() as LOG: m.biv.fix(0.5) - self.assertEqual(LOG.getvalue().strip(), "Setting Var 'biv' to a " - "value `0.5` (float) not in domain Binary.") + self.assertEqual( + LOG.getvalue().strip(), + "Setting Var 'biv' to a value `0.5` (float) not in domain Binary.", + ) self.assertEqual(m.iv.value, None) self.assertEqual(m.biv.value, 0.5) @@ -398,11 +401,14 @@ def test_fix_value(self): # Note that fixing to a near-True value will toggle the iv with LoggingIntercept() as LOG: - m.biv.fix(1-eps) - self.assertEqual(LOG.getvalue().strip(), "Setting Var 'biv' to a " - "value `%s` (float) not in domain Binary." % (1-eps)) + m.biv.fix(1 - eps) + self.assertEqual( + LOG.getvalue().strip(), + "Setting Var 'biv' to a " + "value `%s` (float) not in domain Binary." % (1 - eps), + ) self.assertEqual(m.iv.value, True) - self.assertEqual(m.biv.value, 1-eps) + self.assertEqual(m.biv.value, 1 - eps) with LoggingIntercept() as LOG: m.biv.fix(eps, True) @@ -456,8 +462,7 @@ def test_cast_to_binary(self): m.biv = 1 - deprecation_msg = ( - "Implicit conversion of the Boolean indicator_var 'iv'") + deprecation_msg = "Implicit conversion of the Boolean indicator_var 'iv'" out = StringIO() with LoggingIntercept(out): @@ -471,7 +476,7 @@ def test_cast_to_binary(self): out = StringIO() with LoggingIntercept(out): - self.assertEqual(m.iv.bounds, (0,1)) + self.assertEqual(m.iv.bounds, (0, 1)) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() @@ -486,7 +491,7 @@ def test_cast_to_binary(self): out = StringIO() with LoggingIntercept(out): - m.iv.bounds = (1,1) + m.iv.bounds = (1, 1) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() @@ -511,24 +516,26 @@ def test_cast_to_binary(self): out = StringIO() with LoggingIntercept(out): with self.assertRaisesRegex( - PyomoException, r"Cannot convert non-constant Pyomo " - r"numeric value \(biv\) to bool"): + PyomoException, + r"Cannot convert non-constant Pyomo numeric value \(biv\) to bool", + ): bool(m.iv) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() with LoggingIntercept(out): with self.assertRaisesRegex( - TypeError, r"Implicit conversion of Pyomo numeric " - r"value \(biv\) to float"): + TypeError, + r"Implicit conversion of Pyomo numeric value \(biv\) to float", + ): float(m.iv) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() with LoggingIntercept(out): with self.assertRaisesRegex( - TypeError, r"Implicit conversion of Pyomo numeric " - r"value \(biv\) to int"): + TypeError, r"Implicit conversion of Pyomo numeric value \(biv\) to int" + ): int(m.iv) self.assertIn(deprecation_msg, out.getvalue()) @@ -597,15 +604,22 @@ def test_cast_to_binary(self): self.assertIs((m.iv > 0).args[1], m.biv) self.assertIn(deprecation_msg, out.getvalue()) - out = StringIO() with LoggingIntercept(out): - self.assertIs((m.iv + 1).args[0], m.biv) + e = m.iv + 1 + assertExpressionsEqual( + self, e, EXPR.LinearExpression([EXPR.MonomialTermExpression((1, m.biv)), 1]) + ) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() with LoggingIntercept(out): - self.assertIs((m.iv - 1).args[0], m.biv) + e = m.iv - 1 + assertExpressionsEqual( + self, + e, + EXPR.LinearExpression([EXPR.MonomialTermExpression((1, m.biv)), -1]), + ) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() @@ -620,18 +634,25 @@ def test_cast_to_binary(self): out = StringIO() with LoggingIntercept(out): - self.assertIs((m.iv ** 2).args[0], m.biv) + self.assertIs((m.iv**2).args[0], m.biv) self.assertIn(deprecation_msg, out.getvalue()) - out = StringIO() with LoggingIntercept(out): - self.assertIs((1 + m.iv).args[1], m.biv) + e = 1 + m.iv + assertExpressionsEqual( + self, e, EXPR.LinearExpression([1, EXPR.MonomialTermExpression((1, m.biv))]) + ) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() with LoggingIntercept(out): - self.assertIs((1 - m.iv).args[1].args[1], m.biv) + e = 1 - m.iv + assertExpressionsEqual( + self, + e, + EXPR.LinearExpression([1, EXPR.MonomialTermExpression((-1, m.biv))]), + ) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() @@ -646,22 +667,27 @@ def test_cast_to_binary(self): out = StringIO() with LoggingIntercept(out): - self.assertIs((2 ** m.iv).args[1], m.biv) + self.assertIs((2**m.iv).args[1], m.biv) self.assertIn(deprecation_msg, out.getvalue()) - out = StringIO() with LoggingIntercept(out): a = m.iv a += 1 - self.assertIs(a.args[0], m.biv) + assertExpressionsEqual( + self, a, EXPR.LinearExpression([EXPR.MonomialTermExpression((1, m.biv)), 1]) + ) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() with LoggingIntercept(out): a = m.iv a -= 1 - self.assertIs(a.args[0], m.biv) + assertExpressionsEqual( + self, + a, + EXPR.LinearExpression([EXPR.MonomialTermExpression((1, m.biv)), -1]), + ) self.assertIn(deprecation_msg, out.getvalue()) out = StringIO() @@ -686,7 +712,5 @@ def test_cast_to_binary(self): self.assertIn(deprecation_msg, out.getvalue()) - if __name__ == '__main__': unittest.main() - diff --git a/pyomo/gdp/tests/test_fix_disjuncts.py b/pyomo/gdp/tests/test_fix_disjuncts.py index a313cdc113c..1b741f7a840 100644 --- a/pyomo/gdp/tests/test_fix_disjuncts.py +++ b/pyomo/gdp/tests/test_fix_disjuncts.py @@ -14,14 +14,25 @@ """Tests disjunct fixing.""" import pyomo.common.unittest as unittest from pyomo.environ import ( - Block, Constraint, ConcreteModel, TransformationFactory, NonNegativeReals, - BooleanVar, LogicalConstraint, SolverFactory, Objective, value, Var, - implies) + Block, + Constraint, + ConcreteModel, + TransformationFactory, + NonNegativeReals, + BooleanVar, + LogicalConstraint, + SolverFactory, + Objective, + value, + Var, + implies, +) from pyomo.gdp import Disjunct, Disjunction, GDP_Error from pyomo.opt import check_available_solvers solvers = check_available_solvers('gurobi') + class TestFixDisjuncts(unittest.TestCase): """Tests fixing of disjuncts.""" @@ -74,11 +85,12 @@ def test_disjunct_not_binary(self): m.d1.binary_indicator_var.set_value(0.5) m.d2.binary_indicator_var.set_value(0.5) with self.assertRaisesRegex( - GDP_Error, - "The value of the indicator_var of " - "Disjunct 'd1' is None. All indicator_vars " - "must have values before calling " - "'fix_disjuncts'."): + GDP_Error, + "The value of the indicator_var of " + "Disjunct 'd1' is None. All indicator_vars " + "must have values before calling " + "'fix_disjuncts'.", + ): TransformationFactory('gdp.fix_disjuncts').apply_to(m) def test_disjuncts_partially_fixed(self): @@ -94,17 +106,18 @@ def test_disjuncts_partially_fixed(self): m.d2.indicator_var.set_value(False) with self.assertRaisesRegex( - GDP_Error, - "The value of the indicator_var of " - "Disjunct 'another1' is None. All indicator_vars " - "must have values before calling " - "'fix_disjuncts'."): + GDP_Error, + "The value of the indicator_var of " + "Disjunct 'another1' is None. All indicator_vars " + "must have values before calling " + "'fix_disjuncts'.", + ): TransformationFactory('gdp.fix_disjuncts').apply_to(m) @unittest.skipIf('gurobi' not in solvers, "Gurobi solver not available") def test_logical_constraints_transformed(self): """It is expected that the result of this transformation is a MI(N)LP, - so check that LogicalConstraints are handeled correctly""" + so check that LogicalConstraints are handled correctly""" m = ConcreteModel() m.x = Var(bounds=(0, 10)) m.d1 = Disjunct() @@ -115,7 +128,8 @@ def test_logical_constraints_transformed(self): m.Y = BooleanVar() m.global_logical = LogicalConstraint(expr=m.Y.xor(m.d1.indicator_var)) m.d1.logical = LogicalConstraint( - expr=implies(~m.Y, m.another.disjuncts[0].indicator_var)) + expr=implies(~m.Y, m.another.disjuncts[0].indicator_var) + ) m.obj = Objective(expr=m.x) m.d1.indicator_var.set_value(True) @@ -127,10 +141,15 @@ def test_logical_constraints_transformed(self): # Make sure there are no active LogicalConstraints self.assertEqual( - len(list(m.component_data_objects(LogicalConstraint, - active=True, - descend_into=(Block, - Disjunct)))), 0) + len( + list( + m.component_data_objects( + LogicalConstraint, active=True, descend_into=(Block, Disjunct) + ) + ) + ), + 0, + ) # See that it solves as expected SolverFactory('gurobi').solve(m) self.assertTrue(value(m.d1.indicator_var)) @@ -147,7 +166,7 @@ def test_reclassify_deactivated_disjuncts(self): m.d[1].deactivate() m.d[2].indicator_var = True m.d[3].indicator_var = False - + TransformationFactory('gdp.fix_disjuncts').apply_to(m) self.assertTrue(m.d[1].indicator_var.fixed) @@ -164,5 +183,6 @@ def test_reclassify_deactivated_disjuncts(self): self.assertEqual(m.d[1].ctype, Block) self.assertEqual(m.d[2].ctype, Block) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/gdp/tests/test_gdp.py b/pyomo/gdp/tests/test_gdp.py index 9ab6d7dd837..5c810dcce18 100644 --- a/pyomo/gdp/tests/test_gdp.py +++ b/pyomo/gdp/tests/test_gdp.py @@ -17,33 +17,39 @@ import sys from os.path import abspath, dirname, normpath, join from pyomo.common.fileutils import import_file +from pyomo.repn.tests.lp_diff import load_and_compare_lp_baseline + currdir = dirname(abspath(__file__)) -exdir = normpath(join(currdir,'..','..','..','examples', 'gdp')) +exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'gdp')) try: import new except: import types as new -from filecmp import cmp import pyomo.common.unittest as unittest from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args import pyomo.opt from pyomo.environ import SolverFactory, TransformationFactory -solvers = pyomo.opt.check_available_solvers('cplex', 'glpk','gurobi') +solvers = pyomo.opt.check_available_solvers('cplex', 'glpk', 'gurobi') if False: - if os.path.exists(sys.exec_prefix+os.sep+'bin'+os.sep+'coverage'): - executable=sys.exec_prefix+os.sep+'bin'+os.sep+'coverage -x ' + if os.path.exists(sys.exec_prefix + os.sep + 'bin' + os.sep + 'coverage'): + executable = sys.exec_prefix + os.sep + 'bin' + os.sep + 'coverage -x ' else: - executable=sys.executable + executable = sys.executable def copyfunc(func): - return new.function(func.__code__, func.func_globals, func.func_name, - func.func_defaults, func.func_closure) + return new.function( + func.__code__, + func.func_globals, + func.func_name, + func.func_defaults, + func.func_closure, + ) class Labeler(type): def __new__(meta, name, bases, attrs): @@ -53,8 +59,7 @@ def __new__(meta, name, bases, attrs): original = getattr(base, key, None) if original is not None: copy = copyfunc(original) - copy.__doc__ = attrs[key].__doc__ + \ - " (%s)" % copy.__name__ + copy.__doc__ = attrs[key].__doc__ + " (%s)" % copy.__name__ attrs[key] = copy break for base in bases: @@ -68,10 +73,10 @@ def __new__(meta, name, bases, attrs): class CommonTests: - #__metaclass__ = Labeler + # __metaclass__ = Labeler + + solve = True - solve=True - def pyomo(self, *args, **kwds): exfile = import_file(join(exdir, 'jobshop.py')) m_jobshop = exfile.build_model() @@ -84,8 +89,10 @@ def pyomo(self, *args, **kwds): transformation = kwds['preprocess'] TransformationFactory('gdp.%s' % transformation).apply_to(m) - m.write(join(currdir, '%s_result.lp' % self.problem), - io_options={'symbolic_solver_labels': True}) + m.write( + join(currdir, '%s_result.lp' % self.problem), + io_options={'symbolic_solver_labels': True}, + ) if self.solve: solver = 'glpk' @@ -99,7 +106,7 @@ def check(self, problem, solver): pass def referenceFile(self, problem, solver): - return join(currdir, problem+'.txt') + return join(currdir, problem + '.txt') def getObjective(self, fname): FILE = open(fname) @@ -114,122 +121,118 @@ def getObjective(self, fname): def updateDocStrings(self): for key in dir(self): if key.startswith('test'): - getattr(self,key).__doc__ = " (%s)" % getattr(self,key).__name__ + getattr(self, key).__doc__ = " (%s)" % getattr(self, key).__name__ def test_bigm_jobshop_small(self): - self.problem='test_bigm_jobshop_small' + self.problem = 'test_bigm_jobshop_small' # Run the small jobshop example using the BigM transformation self.pyomo('jobshop-small.dat', preprocess='bigm') # ESJ: TODO: Right now the indicator variables have names they won't # have when they don't have to be reclassified. So I think this LP file # will need to change again. - self.check( 'jobshop_small', 'bigm' ) + self.check('jobshop_small', 'bigm') def test_bigm_jobshop_large(self): - self.problem='test_bigm_jobshop_large' + self.problem = 'test_bigm_jobshop_large' # Run the large jobshop example using the BigM transformation self.pyomo('jobshop.dat', preprocess='bigm') # ESJ: TODO: this LP file also will need to change with the # indicator variable change. - self.check( 'jobshop_large', 'bigm' ) + self.check('jobshop_large', 'bigm') # def test_bigm_constrained_layout(self): # self.problem='test_bigm_constrained_layout' # # Run the constrained layout example with the bigm transformation - # self.pyomo( join(exdir,'ConstrainedLayout.py'), - # join(exdir,'ConstrainedLayout_BigM.dat'), + # self.pyomo( join(exdir,'ConstrainedLayout.py'), + # join(exdir,'ConstrainedLayout_BigM.dat'), # preprocess='bigm', solver='cplex') # self.check( 'constrained_layout', 'bigm') def test_hull_jobshop_small(self): - self.problem='test_hull_jobshop_small' + self.problem = 'test_hull_jobshop_small' # Run the small jobshop example using the Hull transformation self.pyomo('jobshop-small.dat', preprocess='hull') - self.check( 'jobshop_small', 'hull' ) + self.check('jobshop_small', 'hull') def test_hull_jobshop_large(self): - self.problem='test_hull_jobshop_large' + self.problem = 'test_hull_jobshop_large' # Run the large jobshop example using the Hull transformation self.pyomo('jobshop.dat', preprocess='hull') - self.check( 'jobshop_large', 'hull' ) + self.check('jobshop_large', 'hull') @unittest.skip("cutting plane LP file tests are too fragile") @unittest.skipIf('gurobi' not in solvers, 'Gurobi solver not available') def test_cuttingplane_jobshop_small(self): - self.problem='test_cuttingplane_jobshop_small' + self.problem = 'test_cuttingplane_jobshop_small' self.pyomo('jobshop-small.dat', preprocess='cuttingplane') - self.check( 'jobshop_small', 'cuttingplane' ) + self.check('jobshop_small', 'cuttingplane') @unittest.skip("cutting plane LP file tests are too fragile") @unittest.skipIf('gurobi' not in solvers, 'Gurobi solver not available') def test_cuttingplane_jobshop_large(self): - self.problem='test_cuttingplane_jobshop_large' + self.problem = 'test_cuttingplane_jobshop_large' self.pyomo('jobshop.dat', preprocess='cuttingplane') - self.check( 'jobshop_large', 'cuttingplane' ) + self.check('jobshop_large', 'cuttingplane') class Reformulate(unittest.TestCase, CommonTests): - - solve=False + solve = False def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) - def pyomo(self, *args, **kwds): + def pyomo(self, *args, **kwds): args = list(args) - args.append('--output='+self.problem+'_result.lp') + args.append('--output=' + self.problem + '_result.lp') CommonTests.pyomo(self, *args, **kwds) def referenceFile(self, problem, solver): - return join(currdir, problem+"_"+solver+'.lp') + return join(currdir, problem + "_" + solver + '.lp') def check(self, problem, solver): - _prob, _solv = join(currdir,self.problem+'_result.lp'), self.referenceFile(problem,solver) - self.assertTrue(cmp(_prob, _solv), - msg="Files %s and %s differ" % (_prob, _solv)) - if os.path.exists(join(currdir,self.problem+'_result.lp')): - os.remove(join(currdir,self.problem+'_result.lp')) + self.assertEqual( + *load_and_compare_lp_baseline( + self.referenceFile(problem, solver), + join(currdir, self.problem + '_result.lp'), + ) + ) + if os.path.exists(join(currdir, self.problem + '_result.lp')): + os.remove(join(currdir, self.problem + '_result.lp')) class Solver(unittest.TestCase): - def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) def check(self, problem, solver): - refObj = self.getObjective(self.referenceFile(problem,solver)) - ansObj = self.getObjective(join(currdir,'result.yml')) + refObj = self.getObjective(self.referenceFile(problem, solver)) + ansObj = self.getObjective(join(currdir, 'result.yml')) self.assertEqual(len(refObj), len(ansObj)) for i in range(len(refObj)): self.assertEqual(len(refObj[i]), len(ansObj[i])) - for key,val in refObj[i].items(): + for key, val in refObj[i].items(): self.assertAlmostEqual( - val.get('Value', None), - ansObj[i].get(key,{}).get('Value', None), - 6 + val.get('Value', None), ansObj[i].get(key, {}).get('Value', None), 6 ) # Clean up test files - if os.path.exists(join(currdir,self.problem+'_result.lp')): - os.remove(join(currdir,self.problem+'_result.lp')) + if os.path.exists(join(currdir, self.problem + '_result.lp')): + os.remove(join(currdir, self.problem + '_result.lp')) @unittest.skipIf(not yaml_available, "YAML is not available") @unittest.skipIf(not 'glpk' in solvers, "The 'glpk' executable is not available") class Solve_GLPK(Solver, CommonTests): - - def pyomo(self, *args, **kwds): + def pyomo(self, *args, **kwds): kwds['solver'] = 'glpk' CommonTests.pyomo(self, *args, **kwds) @unittest.skipIf(not yaml_available, "YAML is not available") -@unittest.skipIf(not 'cplex' in solvers, - "The 'cplex' executable is not available") +@unittest.skipIf(not 'cplex' in solvers, "The 'cplex' executable is not available") class Solve_CPLEX(Solver, CommonTests): - - def pyomo(self, *args, **kwds): + def pyomo(self, *args, **kwds): kwds['solver'] = 'cplex' CommonTests.pyomo(self, *args, **kwds) diff --git a/pyomo/gdp/tests/test_gdp_reclassification_error.py b/pyomo/gdp/tests/test_gdp_reclassification_error.py index 646ad045ca2..a65ccac2d8f 100644 --- a/pyomo/gdp/tests/test_gdp_reclassification_error.py +++ b/pyomo/gdp/tests/test_gdp_reclassification_error.py @@ -32,8 +32,7 @@ def test_disjunct_not_in_disjunction(self): log = StringIO() with LoggingIntercept(log, 'pyomo.gdp', logging.WARNING): check_model_algebraic(m) - self.assertRegex( log.getvalue(), - '.*not found in any Disjunctions.*') + self.assertRegex(log.getvalue(), '.*not found in any Disjunctions.*') def test_disjunct_not_in_active_disjunction(self): m = pyo.ConcreteModel() @@ -48,6 +47,8 @@ def test_disjunct_not_in_active_disjunction(self): log = StringIO() with LoggingIntercept(log, 'pyomo.gdp', logging.WARNING): check_model_algebraic(m) - self.assertRegex(log.getvalue(), - '.*While it participates in a Disjunction, ' - 'that Disjunction is currently deactivated.*') + self.assertRegex( + log.getvalue(), + '.*While it participates in a Disjunction, ' + 'that Disjunction is currently deactivated.*', + ) diff --git a/pyomo/gdp/tests/test_hull.py b/pyomo/gdp/tests/test_hull.py index 94bb8f912f2..5d86247b435 100644 --- a/pyomo/gdp/tests/test_hull.py +++ b/pyomo/gdp/tests/test_hull.py @@ -9,15 +9,37 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from pyomo.common.dependencies import dill_available import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept import logging -from pyomo.environ import (TransformationFactory, Block, Set, Constraint, Var, - RealSet, ComponentMap, value, log, ConcreteModel, - Any, Suffix, SolverFactory, RangeSet, Param, - Objective, TerminationCondition, Reference) -from pyomo.core.expr.sympy_tools import sympy_available +from pyomo.environ import ( + TransformationFactory, + Block, + Set, + Constraint, + Var, + RealSet, + ComponentMap, + value, + log, + ConcreteModel, + Any, + Suffix, + SolverFactory, + RangeSet, + Param, + Objective, + TerminationCondition, + Reference, +) +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) +import pyomo.core.expr as EXPR +from pyomo.core.base import constraint from pyomo.repn import generate_standard_repn from pyomo.gdp import Disjunct, Disjunction, GDP_Error @@ -28,12 +50,14 @@ from io import StringIO import os from os.path import abspath, dirname, join + currdir = dirname(abspath(__file__)) from filecmp import cmp EPS = TransformationFactory('gdp.hull').CONFIG.EPS linear_solvers = ct.linear_solvers + class CommonTests: def setUp(self): # set seed so we can test name collisions predictably @@ -42,6 +66,7 @@ def setUp(self): def diff_apply_to_and_create_using(self, model): ct.diff_apply_to_and_create_using(self, model, 'gdp.hull') + class TwoTermDisj(unittest.TestCase, CommonTests): def setUp(self): # set seed to test unique namer @@ -71,13 +96,13 @@ def test_disaggregated_vars(self): transBlock = m._pyomo_gdp_hull_reformulation disjBlock = transBlock.relaxedDisjuncts # same on both disjuncts - for i in [0,1]: + for i in [0, 1]: relaxationBlock = disjBlock[i] x = relaxationBlock.disaggregatedVars.x - if i == 1: # this disjunct as x, w, and no y + if i == 1: # this disjunct as x, w, and no y w = relaxationBlock.disaggregatedVars.w y = transBlock._disaggregatedVars[0] - elif i == 0: # this disjunct as x, y, and no w + elif i == 0: # this disjunct as x, y, and no w y = relaxationBlock.disaggregatedVars.y w = transBlock._disaggregatedVars[1] # variables created (w and y can be Vars or VarDatas depending on @@ -106,16 +131,16 @@ def check_furman_et_al_denominator(self, expr, ind_var): def test_transformed_constraint_nonlinear(self): m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.hull').apply_to(m) + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts # the only constraint on the first block is the non-linear one - disj1c = disjBlock[0].component("d[0].c") - self.assertIsInstance(disj1c, Constraint) + disj1c = hull.get_transformed_constraints(m.d[0].c) # we only have an upper bound self.assertEqual(len(disj1c), 1) - cons = disj1c['ub'] + cons = disj1c[0] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) @@ -123,31 +148,102 @@ def test_transformed_constraint_nonlinear(self): self.assertEqual(len(repn.linear_vars), 1) # This is a weak test, but as good as any to ensure that the # substitution was done correctly - EPS_1 = 1-EPS - self.assertEqual( - str(cons.body), - "(%s*d[0].binary_indicator_var + %s)*(" - "_pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]." - "disaggregatedVars.x" - "/(%s*d[0].binary_indicator_var + %s) + " - "(_pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]." - "disaggregatedVars.y/" - "(%s*d[0].binary_indicator_var + %s))**2) - " - "%s*(0.0 + 0.0**2)*(1 - d[0].binary_indicator_var) " - "- 14.0*d[0].binary_indicator_var" - % (EPS_1, EPS, EPS_1, EPS, EPS_1, EPS, EPS)) + EPS_1 = 1 - EPS + _disj = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0] + assertExpressionsEqual( + self, + cons.body, + EXPR.SumExpression( + [ + EXPR.ProductExpression( + ( + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression( + (EPS_1, m.d[0].binary_indicator_var) + ), + EPS, + ] + ), + EXPR.SumExpression( + [ + EXPR.DivisionExpression( + ( + _disj.disaggregatedVars.x, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression( + ( + EPS_1, + m.d[0].binary_indicator_var, + ) + ), + EPS, + ] + ), + ) + ), + EXPR.PowExpression( + ( + EXPR.DivisionExpression( + ( + _disj.disaggregatedVars.y, + EXPR.LinearExpression( + [ + EXPR.MonomialTermExpression( + ( + EPS_1, + m.d[ + 0 + ].binary_indicator_var, + ) + ), + EPS, + ] + ), + ) + ), + 2, + ) + ), + ] + ), + ) + ), + EXPR.NegationExpression( + ( + EXPR.ProductExpression( + ( + 0.0, + EXPR.LinearExpression( + [ + 1, + EXPR.MonomialTermExpression( + (-1, m.d[0].binary_indicator_var) + ), + ] + ), + ) + ), + ) + ), + EXPR.MonomialTermExpression((-14.0, m.d[0].binary_indicator_var)), + ] + ), + ) def test_transformed_constraints_linear(self): m = models.makeTwoTermDisj_Nonlinear() - TransformationFactory('gdp.hull').apply_to(m) + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) disjBlock = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts # the only constraint on the first block is the non-linear one - c1 = disjBlock[1].component("d[1].c1") - # has only lb + c1 = hull.get_transformed_constraints(m.d[1].c1) self.assertEqual(len(c1), 1) - cons = c1['lb'] + cons = c1[0] + # has only lb self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) @@ -159,10 +255,10 @@ def test_transformed_constraints_linear(self): self.assertEqual(disjBlock[1].disaggregatedVars.x.lb, 0) self.assertEqual(disjBlock[1].disaggregatedVars.x.ub, 8) - c2 = disjBlock[1].component("d[1].c2") - # 'eq' is preserved + c2 = hull.get_transformed_constraints(m.d[1].c2) self.assertEqual(len(c2), 1) - cons = c2['eq'] + cons = c2[0] + # 'eq' is preserved self.assertEqual(cons.lower, 0) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) @@ -174,10 +270,11 @@ def test_transformed_constraints_linear(self): self.assertEqual(disjBlock[1].disaggregatedVars.w.lb, 0) self.assertEqual(disjBlock[1].disaggregatedVars.w.ub, 7) - c3 = disjBlock[1].component("d[1].c3") + c3 = hull.get_transformed_constraints(m.d[1].c3) # bounded inequality is split self.assertEqual(len(c3), 2) - cons = c3['lb'] + # lb + cons = c3[0] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) @@ -187,7 +284,8 @@ def test_transformed_constraints_linear(self): ct.check_linear_coef(self, repn, m.d[1].indicator_var, 1) self.assertEqual(repn.constant, 0) - cons = c3['ub'] + # ub + cons = c3[1] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) @@ -197,8 +295,7 @@ def test_transformed_constraints_linear(self): ct.check_linear_coef(self, repn, m.d[1].indicator_var, -3) self.assertEqual(repn.constant, 0) - def check_bound_constraints_on_disjBlock(self, cons, disvar, indvar, lb, - ub): + def check_bound_constraints_on_disjBlock(self, cons, disvar, indvar, lb, ub): self.assertIsInstance(cons, Constraint) # both lb and ub @@ -223,8 +320,9 @@ def check_bound_constraints_on_disjBlock(self, cons, disvar, indvar, lb, ct.check_linear_coef(self, repn, indvar, -ub) ct.check_linear_coef(self, repn, disvar, 1) - def check_bound_constraints_on_disjunctionBlock(self, varlb, varub, disvar, - indvar, lb, ub): + def check_bound_constraints_on_disjunctionBlock( + self, varlb, varub, disvar, indvar, lb, ub + ): self.assertIsNone(varlb.lower) self.assertEqual(varlb.upper, 0) repn = generate_standard_repn(varlb.body) @@ -249,33 +347,48 @@ def test_disaggregatedVar_bounds(self): transBlock = m._pyomo_gdp_hull_reformulation disjBlock = transBlock.relaxedDisjuncts - for i in [0,1]: + for i in [0, 1]: # check bounds constraints for each variable on each of the two # disjuncts. self.check_bound_constraints_on_disjBlock( disjBlock[i].x_bounds, disjBlock[i].disaggregatedVars.x, - m.d[i].indicator_var, 1, 8) - if i == 1: # this disjunct has x, w, and no y + m.d[i].indicator_var, + 1, + 8, + ) + if i == 1: # this disjunct has x, w, and no y self.check_bound_constraints_on_disjBlock( disjBlock[i].w_bounds, disjBlock[i].disaggregatedVars.w, - m.d[i].indicator_var, 2, 7) + m.d[i].indicator_var, + 2, + 7, + ) self.check_bound_constraints_on_disjunctionBlock( - transBlock._boundsConstraints[0,'lb'], - transBlock._boundsConstraints[0,'ub'], + transBlock._boundsConstraints[0, 'lb'], + transBlock._boundsConstraints[0, 'ub'], transBlock._disaggregatedVars[0], - m.d[0].indicator_var, -10, -3) - elif i == 0: # this disjunct has x, y, and no w + m.d[0].indicator_var, + -10, + -3, + ) + elif i == 0: # this disjunct has x, y, and no w self.check_bound_constraints_on_disjBlock( disjBlock[i].y_bounds, disjBlock[i].disaggregatedVars.y, - m.d[i].indicator_var, -10, -3) + m.d[i].indicator_var, + -10, + -3, + ) self.check_bound_constraints_on_disjunctionBlock( - transBlock._boundsConstraints[1,'lb'], - transBlock._boundsConstraints[1,'ub'], + transBlock._boundsConstraints[1, 'lb'], + transBlock._boundsConstraints[1, 'ub'], transBlock._disaggregatedVars[1], - m.d[1].indicator_var, 2, 7) + m.d[1].indicator_var, + 2, + 7, + ) def test_error_for_or(self): m = models.makeTwoTermDisj_Nonlinear() @@ -286,7 +399,8 @@ def test_error_for_or(self): "Cannot do hull reformulation for Disjunction " "'disjunction' with OR constraint. Must be an XOR!*", TransformationFactory('gdp.hull').apply_to, - m) + m, + ) def check_disaggregation_constraint(self, cons, var, disvar1, disvar2): repn = generate_standard_repn(cons.body) @@ -305,14 +419,23 @@ def test_disaggregation_constraint(self): disjBlock = transBlock.relaxedDisjuncts self.check_disaggregation_constraint( - hull.get_disaggregation_constraint(m.w, m.disjunction), m.w, - disjBlock[1].disaggregatedVars.w, transBlock._disaggregatedVars[1]) + hull.get_disaggregation_constraint(m.w, m.disjunction), + m.w, + disjBlock[1].disaggregatedVars.w, + transBlock._disaggregatedVars[1], + ) self.check_disaggregation_constraint( - hull.get_disaggregation_constraint(m.x, m.disjunction), m.x, - disjBlock[0].disaggregatedVars.x, disjBlock[1].disaggregatedVars.x) + hull.get_disaggregation_constraint(m.x, m.disjunction), + m.x, + disjBlock[0].disaggregatedVars.x, + disjBlock[1].disaggregatedVars.x, + ) self.check_disaggregation_constraint( - hull.get_disaggregation_constraint(m.y, m.disjunction), m.y, - disjBlock[0].disaggregatedVars.y, transBlock._disaggregatedVars[0]) + hull.get_disaggregation_constraint(m.y, m.disjunction), + m.y, + disjBlock[0].disaggregatedVars.y, + transBlock._disaggregatedVars[0], + ) def test_xor_constraint_mapping(self): ct.check_xor_constraint_mapping(self, 'hull') @@ -336,43 +459,40 @@ def test_transformed_constraint_mappings(self): # first disjunct orig1 = m.d[0].c - trans1 = disjBlock[0].component("d[0].c") + cons = hull.get_transformed_constraints(orig1) + self.assertEqual(len(cons), 1) + trans1 = cons[0] + self.assertIs(trans1.parent_block(), disjBlock[0]) self.assertIs(hull.get_src_constraint(trans1), orig1) - self.assertIs(hull.get_src_constraint(trans1['ub']), orig1) - trans_list = hull.get_transformed_constraints(orig1) - self.assertEqual(len(trans_list), 1) - self.assertIs(trans_list[0], trans1['ub']) # second disjunct # first constraint orig1 = m.d[1].c1 - trans1 = disjBlock[1].component("d[1].c1") + cons = hull.get_transformed_constraints(orig1) + self.assertEqual(len(cons), 1) + trans1 = cons[0] + self.assertIs(trans1.parent_block(), disjBlock[1]) self.assertIs(hull.get_src_constraint(trans1), orig1) - self.assertIs(hull.get_src_constraint(trans1['lb']), orig1) - trans_list = hull.get_transformed_constraints(orig1) - self.assertEqual(len(trans_list), 1) - self.assertIs(trans_list[0], trans1['lb']) # second constraint orig2 = m.d[1].c2 - trans2 = disjBlock[1].component("d[1].c2") + cons = hull.get_transformed_constraints(orig2) + self.assertEqual(len(cons), 1) + trans2 = cons[0] + self.assertIs(trans1.parent_block(), disjBlock[1]) self.assertIs(hull.get_src_constraint(trans2), orig2) - self.assertIs(hull.get_src_constraint(trans2['eq']), orig2) - trans_list = hull.get_transformed_constraints(orig2) - self.assertEqual(len(trans_list), 1) - self.assertIs(trans_list[0], trans2['eq']) # third constraint orig3 = m.d[1].c3 - trans3 = disjBlock[1].component("d[1].c3") + cons = hull.get_transformed_constraints(orig3) + self.assertEqual(len(cons), 2) + trans3 = cons[0] self.assertIs(hull.get_src_constraint(trans3), orig3) - self.assertIs(hull.get_src_constraint(trans3['lb']), orig3) - self.assertIs(hull.get_src_constraint(trans3['ub']), orig3) - trans_list = hull.get_transformed_constraints(orig3) - self.assertEqual(len(trans_list), 2) - self.assertIs(trans_list[0], trans3['lb']) - self.assertIs(trans_list[1], trans3['ub']) + self.assertIs(trans3.parent_block(), disjBlock[1]) + trans32 = cons[1] + self.assertIs(hull.get_src_constraint(trans32), orig3) + self.assertIs(trans32.parent_block(), disjBlock[1]) def test_disaggregatedVar_mappings(self): m = models.makeTwoTermDisj_Nonlinear() @@ -382,13 +502,13 @@ def test_disaggregatedVar_mappings(self): transBlock = m._pyomo_gdp_hull_reformulation disjBlock = transBlock.relaxedDisjuncts - for i in [0,1]: + for i in [0, 1]: mappings = ComponentMap() mappings[m.x] = disjBlock[i].disaggregatedVars.x - if i == 1: # this disjunct as x, w, and no y + if i == 1: # this disjunct as x, w, and no y mappings[m.w] = disjBlock[i].disaggregatedVars.w mappings[m.y] = transBlock._disaggregatedVars[0] - elif i == 0: # this disjunct as x, y, and no w + elif i == 0: # this disjunct as x, y, and no w mappings[m.y] = disjBlock[i].disaggregatedVars.y mappings[m.w] = transBlock._disaggregatedVars[1] @@ -404,21 +524,21 @@ def test_bigMConstraint_mappings(self): transBlock = m._pyomo_gdp_hull_reformulation disjBlock = transBlock.relaxedDisjuncts - for i in [0,1]: + for i in [0, 1]: mappings = ComponentMap() mappings[disjBlock[i].disaggregatedVars.x] = disjBlock[i].x_bounds - if i == 1: # this disjunct has x, w, and no y - mappings[disjBlock[i].disaggregatedVars.w] = disjBlock[i].\ - w_bounds + if i == 1: # this disjunct has x, w, and no y + mappings[disjBlock[i].disaggregatedVars.w] = disjBlock[i].w_bounds mappings[transBlock._disaggregatedVars[0]] = Reference( - transBlock._boundsConstraints[0,...]) - elif i == 0: # this disjunct has x, y, and no w - mappings[disjBlock[i].disaggregatedVars.y] = disjBlock[i].\ - y_bounds + transBlock._boundsConstraints[0, ...] + ) + elif i == 0: # this disjunct has x, y, and no w + mappings[disjBlock[i].disaggregatedVars.y] = disjBlock[i].y_bounds mappings[transBlock._disaggregatedVars[1]] = Reference( - transBlock._boundsConstraints[1,...]) + transBlock._boundsConstraints[1, ...] + ) for var, cons in mappings.items(): - returned_cons = hull.get_var_bounds_constraint(var) + returned_cons = hull.get_var_bounds_constraint(var) # This sometimes refers a reference to the right part of a # larger indexed constraint, so the indexed constraints # themselves might not be the same object. The ConstraintDatas @@ -442,8 +562,7 @@ def test_locally_declared_var_bounds_used_globally(self): # check that we used the bounds on the local variable as if they are # global. Which means checking the bounds constraints... - y_disagg = m.disj2.transformation_block().disaggregatedVars.component( - "disj2.y") + y_disagg = m.disj2.transformation_block.disaggregatedVars.component("disj2.y") cons = hull.get_var_bounds_constraint(y_disagg) lb = cons['lb'] self.assertIsNone(lb.lower) @@ -470,12 +589,12 @@ def test_locally_declared_variables_disaggregated(self): # two birds one stone: test the mappings too disj1y = hull.get_disaggregated_var(m.disj2.y, m.disj1) disj2y = hull.get_disaggregated_var(m.disj2.y, m.disj2) - self.assertIs(disj1y, - m.disj1._transformation_block().parent_block().\ - _disaggregatedVars[0]) - self.assertIs(disj2y, - m.disj2._transformation_block().disaggregatedVars.\ - component("disj2.y")) + self.assertIs( + disj1y, m.disj1.transformation_block.parent_block()._disaggregatedVars[0] + ) + self.assertIs( + disj2y, m.disj2.transformation_block.disaggregatedVars.component("disj2.y") + ) self.assertIs(hull.get_src_var(disj1y), m.disj2.y) self.assertIs(hull.get_src_var(disj2y), m.disj2.y) @@ -513,10 +632,9 @@ def test_global_vars_local_to_a_disjunction_disaggregated(self): # check that all the variables are disaggregated # disj1 has both x and y disj = m.disj1 - transBlock = disj.transformation_block() + transBlock = disj.transformation_block varBlock = transBlock.disaggregatedVars - self.assertEqual(len([v for v in - varBlock.component_data_objects(Var)]), 2) + self.assertEqual(len([v for v in varBlock.component_data_objects(Var)]), 2) x = varBlock.component("disj1.x") y = varBlock.component("disj1.y") self.assertIsInstance(x, Var) @@ -527,36 +645,31 @@ def test_global_vars_local_to_a_disjunction_disaggregated(self): self.assertIs(hull.get_src_var(y), m.disj1.y) # disj2 and disj4 have just y for disj in [m.disj2, m.disj4]: - transBlock = disj.transformation_block() + transBlock = disj.transformation_block varBlock = transBlock.disaggregatedVars - self.assertEqual(len([v for v in - varBlock.component_data_objects(Var)]), 1) + self.assertEqual(len([v for v in varBlock.component_data_objects(Var)]), 1) y = varBlock.component("disj1.y") self.assertIsInstance(y, Var) self.assertIs(hull.get_disaggregated_var(m.disj1.y, disj), y) self.assertIs(hull.get_src_var(y), m.disj1.y) # disj3 has just x disj = m.disj3 - transBlock = disj.transformation_block() + transBlock = disj.transformation_block varBlock = transBlock.disaggregatedVars - self.assertEqual(len([v for v in - varBlock.component_data_objects(Var)]), 1) + self.assertEqual(len([v for v in varBlock.component_data_objects(Var)]), 1) x = varBlock.component("disj1.x") self.assertIsInstance(x, Var) self.assertIs(hull.get_disaggregated_var(m.disj1.x, disj), x) self.assertIs(hull.get_src_var(x), m.disj1.x) # there is a spare x on disjunction1's block - x2 = m.disjunction1.algebraic_constraint().parent_block().\ - _disaggregatedVars[0] + x2 = m.disjunction1.algebraic_constraint.parent_block()._disaggregatedVars[2] self.assertIs(hull.get_disaggregated_var(m.disj1.x, m.disj2), x2) self.assertIs(hull.get_src_var(x2), m.disj1.x) # and both a spare x and y on disjunction2's block - x2 = m.disjunction2.algebraic_constraint().parent_block().\ - _disaggregatedVars[0] - y1 = m.disjunction2.algebraic_constraint().parent_block().\ - _disaggregatedVars[1] + x2 = m.disjunction2.algebraic_constraint.parent_block()._disaggregatedVars[0] + y1 = m.disjunction2.algebraic_constraint.parent_block()._disaggregatedVars[1] self.assertIs(hull.get_disaggregated_var(m.disj1.x, m.disj4), x2) self.assertIs(hull.get_src_var(x2), m.disj1.x) self.assertIs(hull.get_disaggregated_var(m.disj1.y, m.disj3), y1) @@ -564,10 +677,9 @@ def test_global_vars_local_to_a_disjunction_disaggregated(self): def check_name_collision_disaggregated_vars(self, m, disj): hull = TransformationFactory('gdp.hull') - transBlock = disj.transformation_block() + transBlock = disj.transformation_block varBlock = transBlock.disaggregatedVars - self.assertEqual(len([v for v in - varBlock.component_data_objects(Var)]), 2) + self.assertEqual(len([v for v in varBlock.component_data_objects(Var)]), 2) # ESJ: This is not what I expected. *Can* we still get name collisions, # if we're using a fully qualified name here? x2 = varBlock.component("'disj1.x'") @@ -616,8 +728,7 @@ def test_improperly_deactivated_disjuncts(self): ct.check_improperly_deactivated_disjuncts(self, 'hull') def test_do_not_transform_userDeactivated_IndexedDisjunction(self): - ct.check_do_not_transform_userDeactivated_indexedDisjunction(self, - 'hull') + ct.check_do_not_transform_userDeactivated_indexedDisjunction(self, 'hull') def test_disjunction_deactivated(self): ct.check_disjunction_deactivated(self, 'hull') @@ -629,8 +740,7 @@ def test_deactivated_constraints(self): ct.check_deactivated_constraints(self, 'hull') def check_no_double_transformation(self): - ct.check_do_not_transform_twice_if_disjunction_reactivated(self, - 'hull') + ct.check_do_not_transform_twice_if_disjunction_reactivated(self, 'hull') def test_indicator_vars(self): ct.check_indicator_vars(self, 'hull') @@ -649,62 +759,86 @@ def test_unbounded_var_error(self): "bounded in order to use the hull " "transformation! Missing bound for w.*", TransformationFactory('gdp.hull').apply_to, - m) + m, + ) def check_threeTermDisj_IndexedConstraints(self, m, lb): transBlock = m._pyomo_gdp_hull_reformulation + hull = TransformationFactory('gdp.hull') # 2 blocks: the original Disjunct and the transformation block - self.assertEqual( - len(list(m.component_objects(Block, descend_into=False))), 1) - self.assertEqual( - len(list(m.component_objects(Disjunct))), 1) + self.assertEqual(len(list(m.component_objects(Block, descend_into=False))), 1) + self.assertEqual(len(list(m.component_objects(Disjunct))), 1) # Each relaxed disjunct should have i disaggregated vars and i "d[i].c" # Constraints - for i in [1,2,3]: - relaxed = transBlock.relaxedDisjuncts[i-1] + for i in [1, 2, 3]: + relaxed = transBlock.relaxedDisjuncts[i - 1] self.assertEqual( - len(list(relaxed.disaggregatedVars.component_objects( Var))), i) + len(list(relaxed.disaggregatedVars.component_objects(Var))), i + ) self.assertEqual( - len(list(relaxed.disaggregatedVars.component_data_objects( - Var))), i) + len(list(relaxed.disaggregatedVars.component_data_objects(Var))), i + ) # we always have the x[1] bounds constraint, then however many # original constraints were on the Disjunct - self.assertEqual( - len(list(relaxed.component_objects(Constraint))), 1+i) + self.assertEqual(len(list(relaxed.component_objects(Constraint))), 1 + i) if lb == 0: # i bounds constraints and i transformed constraints self.assertEqual( - len(list(relaxed.component_data_objects(Constraint))), i+i) + len(list(relaxed.component_data_objects(Constraint))), i + i + ) else: # 2*i bounds constraints and i transformed constraints self.assertEqual( - len(list(relaxed.component_data_objects(Constraint))), - 2*i+i) + len(list(relaxed.component_data_objects(Constraint))), 2 * i + i + ) - self.assertEqual(len(relaxed.component('d[%s].c'%i)), i) + # Check that there are i transformed constraints on relaxed: + for j in range(1, i + 1): + cons = hull.get_transformed_constraints(m.d[i].c[j]) + self.assertEqual(len(cons), 1) + self.assertIs(cons[0].parent_block(), relaxed) # the remaining disaggregated variables are on the disjunction # transformation block - self.assertEqual(len(list(transBlock.component_objects( - Var, descend_into=False))), 1) - self.assertEqual(len(list(transBlock.component_data_objects( - Var, descend_into=False))), 2) + self.assertEqual( + len(list(transBlock.component_objects(Var, descend_into=False))), 1 + ) + self.assertEqual( + len(list(transBlock.component_data_objects(Var, descend_into=False))), 2 + ) # as are the XOR, reaggregation and their bounds constraints - self.assertEqual(len(list(transBlock.component_objects( - Constraint, descend_into=False))), 3) + self.assertEqual( + len(list(transBlock.component_objects(Constraint, descend_into=False))), 3 + ) if lb == 0: # 3 reaggregation + 2 bounds + 1 xor (because one bounds constraint # is on the parent transformation block, and we don't need lb # constraints if lb = 0) - self.assertEqual(len(list(transBlock.component_data_objects( - Constraint, descend_into=False))), 6) + self.assertEqual( + len( + list( + transBlock.component_data_objects( + Constraint, descend_into=False + ) + ) + ), + 6, + ) else: # 3 reaggregation + 4 bounds + 1 xor - self.assertEqual(len(list(transBlock.component_data_objects( - Constraint, descend_into=False))), 8) + self.assertEqual( + len( + list( + transBlock.component_data_objects( + Constraint, descend_into=False + ) + ) + ), + 8, + ) def test_indexed_constraints_in_disjunct(self): m = models.makeThreeTermDisj_IndexedConstraints() @@ -715,13 +849,15 @@ def test_indexed_constraints_in_disjunct(self): def test_virtual_indexed_constraints_in_disjunct(self): m = ConcreteModel() - m.I = [1,2,3] - m.x = Var(m.I, bounds=(-1,10)) - def d_rule(d,j): + m.I = [1, 2, 3] + m.x = Var(m.I, bounds=(-1, 10)) + + def d_rule(d, j): m = d.model() d.c = Constraint(Any) for k in range(j): - d.c[k+1] = m.x[k+1] >= k+1 + d.c[k + 1] = m.x[k + 1] >= k + 1 + m.d = Disjunct(m.I, rule=d_rule) m.disjunction = Disjunction(expr=[m.d[i] for i in m.I]) @@ -745,10 +881,12 @@ def test_do_not_transform_deactivated_constraintDatas(self): KeyError, r".*b.simpledisj1.c\[1\]", hull.get_transformed_constraints, - m.b.simpledisj1.c[1]) - self.assertRegex(log.getvalue(), - r".*Constraint 'b.simpledisj1.c\[1\]' has not " - r"been transformed.") + m.b.simpledisj1.c[1], + ) + self.assertRegex( + log.getvalue(), + r".*Constraint 'b.simpledisj1.c\[1\]' has not been transformed.", + ) # this fixes a[2] to 0, so we should get the disggregated var transformed = hull.get_transformed_constraints(m.b.simpledisj1.c[2]) @@ -762,16 +900,16 @@ def test_do_not_transform_deactivated_constraintDatas(self): transformed = hull.get_transformed_constraints(m.b.simpledisj2.c[1]) # simpledisj2.c[1] is a <= constraint self.assertEqual(len(transformed), 1) - self.assertIs(transformed[0], - m.b.simpledisj2.transformation_block().\ - component("b.simpledisj2.c")[(1,'ub')]) + self.assertIs( + transformed[0].parent_block(), m.b.simpledisj2.transformation_block + ) transformed = hull.get_transformed_constraints(m.b.simpledisj2.c[2]) # simpledisj2.c[2] is a <= constraint self.assertEqual(len(transformed), 1) - self.assertIs(transformed[0], - m.b.simpledisj2.transformation_block().\ - component("b.simpledisj2.c")[(2,'ub')]) + self.assertIs( + transformed[0].parent_block(), m.b.simpledisj2.transformation_block + ) class MultiTermDisj(unittest.TestCase, CommonTests): @@ -793,8 +931,7 @@ def test_do_not_disaggregate_more_than_necessary(self): self.assertEqual(x1.ub, 8) self.assertIs(hull.get_src_var(x1), m.x) - x2 = m.disjunction.algebraic_constraint().parent_block().\ - _disaggregatedVars[0] + x2 = m.disjunction.algebraic_constraint.parent_block()._disaggregatedVars[0] self.assertIs(hull.get_src_var(x2), m.x) self.assertIs(hull.get_disaggregated_var(m.x, m.d2), x2) self.assertIs(hull.get_disaggregated_var(m.x, m.d3), x2) @@ -809,8 +946,7 @@ def test_do_not_disaggregate_more_than_necessary(self): self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertIs(repn.linear_vars[1], x2) - self.assertIs(repn.linear_vars[0], - m.d1.indicator_var.get_associated_binary()) + self.assertIs(repn.linear_vars[0], m.d1.indicator_var.get_associated_binary()) self.assertEqual(repn.linear_coefs[0], 2) self.assertEqual(repn.linear_coefs[1], -1) self.assertEqual(repn.constant, -2) @@ -821,8 +957,7 @@ def test_do_not_disaggregate_more_than_necessary(self): self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertIs(repn.linear_vars[0], x2) - self.assertIs(repn.linear_vars[1], - m.d1.indicator_var.get_associated_binary()) + self.assertIs(repn.linear_vars[1], m.d1.indicator_var.get_associated_binary()) self.assertEqual(repn.linear_coefs[1], 8) self.assertEqual(repn.linear_coefs[0], 1) self.assertEqual(repn.constant, -8) @@ -842,6 +977,7 @@ def test_do_not_disaggregate_more_than_necessary(self): self.assertEqual(repn.linear_coefs[2], -1) self.assertEqual(repn.constant, 0) + class IndexedDisjunction(unittest.TestCase, CommonTests): def setUp(self): # set seed so we can test name collisions predictably @@ -854,17 +990,22 @@ def test_disaggregation_constraints(self): relaxedDisjuncts = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts disaggregatedVars = { - 1: [relaxedDisjuncts[0].disaggregatedVars.component('x[1]'), - relaxedDisjuncts[1].disaggregatedVars.component('x[1]')], - 2: [relaxedDisjuncts[2].disaggregatedVars.component('x[2]'), - relaxedDisjuncts[3].disaggregatedVars.component('x[2]')], - 3: [relaxedDisjuncts[4].disaggregatedVars.component('x[3]'), - relaxedDisjuncts[5].disaggregatedVars.component('x[3]')], + 1: [ + hull.get_disaggregated_var(m.x[1], m.disjunct[1, 'a']), + hull.get_disaggregated_var(m.x[1], m.disjunct[1, 'b']), + ], + 2: [ + hull.get_disaggregated_var(m.x[2], m.disjunct[2, 'a']), + hull.get_disaggregated_var(m.x[2], m.disjunct[2, 'b']), + ], + 3: [ + hull.get_disaggregated_var(m.x[3], m.disjunct[3, 'a']), + hull.get_disaggregated_var(m.x[3], m.disjunct[3, 'b']), + ], } for i, disVars in disaggregatedVars.items(): - cons = hull.get_disaggregation_constraint(m.x[i], - m.disjunction[i]) + cons = hull.get_disaggregation_constraint(m.x[i], m.disjunction[i]) self.assertEqual(cons.lower, 0) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) @@ -882,23 +1023,26 @@ def test_disaggregation_constraints_tuple_indices(self): relaxedDisjuncts = m._pyomo_gdp_hull_reformulation.relaxedDisjuncts disaggregatedVars = { - (1,'A'): - [relaxedDisjuncts[0].disaggregatedVars.component('a[1,A]'), - relaxedDisjuncts[1].disaggregatedVars.component('a[1,A]')], - (1,'B'): - [relaxedDisjuncts[2].disaggregatedVars.component('a[1,B]'), - relaxedDisjuncts[3].disaggregatedVars.component('a[1,B]')], - (2,'A'): - [relaxedDisjuncts[4].disaggregatedVars.component('a[2,A]'), - relaxedDisjuncts[5].disaggregatedVars.component('a[2,A]')], - (2,'B'): - [relaxedDisjuncts[6].disaggregatedVars.component('a[2,B]'), - relaxedDisjuncts[7].disaggregatedVars.component('a[2,B]')], + (1, 'A'): [ + hull.get_disaggregated_var(m.a[1, 'A'], m.disjunct[0, 1, 'A']), + hull.get_disaggregated_var(m.a[1, 'A'], m.disjunct[1, 1, 'A']), + ], + (1, 'B'): [ + hull.get_disaggregated_var(m.a[1, 'B'], m.disjunct[0, 1, 'B']), + hull.get_disaggregated_var(m.a[1, 'B'], m.disjunct[1, 1, 'B']), + ], + (2, 'A'): [ + hull.get_disaggregated_var(m.a[2, 'A'], m.disjunct[0, 2, 'A']), + hull.get_disaggregated_var(m.a[2, 'A'], m.disjunct[1, 2, 'A']), + ], + (2, 'B'): [ + hull.get_disaggregated_var(m.a[2, 'B'], m.disjunct[0, 2, 'B']), + hull.get_disaggregated_var(m.a[2, 'B'], m.disjunct[1, 2, 'B']), + ], } for i, disVars in disaggregatedVars.items(): - cons = hull.get_disaggregation_constraint(m.a[i], - m.disjunction[i]) + cons = hull.get_disaggregation_constraint(m.a[i], m.disjunction[i]) self.assertEqual(cons.lower, 0) self.assertEqual(cons.upper, 0) # NOTE: fixed variables are evaluated here. @@ -948,62 +1092,69 @@ def check_trans_block_disjunctions_of_disjunct_datas(self, m): transBlock1 = m.component("_pyomo_gdp_hull_reformulation") self.assertIsInstance(transBlock1, Block) self.assertIsInstance(transBlock1.component("relaxedDisjuncts"), Block) - # We end up with a transformation block for every SimpleDisjunction or - # IndexedDisjunction. - self.assertEqual(len(transBlock1.relaxedDisjuncts), 2) - self.assertIsInstance(transBlock1.relaxedDisjuncts[0].\ - disaggregatedVars.component("x"), Var) - self.assertTrue(transBlock1.relaxedDisjuncts[0].disaggregatedVars.x.\ - is_fixed()) - self.assertEqual(value(transBlock1.relaxedDisjuncts[0].\ - disaggregatedVars.x), 0) - self.assertIsInstance(transBlock1.relaxedDisjuncts[0].component( - "firstTerm[1].cons"), Constraint) - # No constraint becuase disaggregated variable fixed to 0 - self.assertEqual(len(transBlock1.relaxedDisjuncts[0].component( - "firstTerm[1].cons")), 0) - self.assertIsInstance(transBlock1.relaxedDisjuncts[0].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock1.relaxedDisjuncts[0].component( - "x_bounds")), 2) - - self.assertIsInstance(transBlock1.relaxedDisjuncts[1].\ - disaggregatedVars.component("x"), Var) - self.assertIsInstance(transBlock1.relaxedDisjuncts[1].component( - "secondTerm[1].cons"), Constraint) - self.assertEqual(len(transBlock1.relaxedDisjuncts[1].component( - "secondTerm[1].cons")), 1) - self.assertIsInstance(transBlock1.relaxedDisjuncts[1].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock1.relaxedDisjuncts[1].component( - "x_bounds")), 2) - - transBlock2 = m.component("_pyomo_gdp_hull_reformulation_4") - self.assertIsInstance(transBlock2, Block) - self.assertIsInstance(transBlock2.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock2.relaxedDisjuncts), 2) - self.assertIsInstance(transBlock2.relaxedDisjuncts[0].\ - disaggregatedVars.component("x"), Var) - self.assertIsInstance(transBlock2.relaxedDisjuncts[0].component( - "firstTerm[2].cons"), Constraint) - # we have an equality constraint - self.assertEqual(len(transBlock2.relaxedDisjuncts[0].component( - "firstTerm[2].cons")), 1) - self.assertIsInstance(transBlock2.relaxedDisjuncts[0].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock2.relaxedDisjuncts[0].component( - "x_bounds")), 2) - - self.assertIsInstance(transBlock2.relaxedDisjuncts[1].\ - disaggregatedVars.component("x"), Var) - self.assertIsInstance(transBlock2.relaxedDisjuncts[1].component( - "secondTerm[2].cons"), Constraint) - self.assertEqual(len(transBlock2.relaxedDisjuncts[1].component( - "secondTerm[2].cons")), 1) - self.assertIsInstance(transBlock2.relaxedDisjuncts[1].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock2.relaxedDisjuncts[1].component( - "x_bounds")), 2) + # All of the transformed Disjuncts are here + self.assertEqual(len(transBlock1.relaxedDisjuncts), 4) + + hull = TransformationFactory('gdp.hull') + firstTerm2 = transBlock1.relaxedDisjuncts[0] + self.assertIs(firstTerm2, m.firstTerm[2].transformation_block) + self.assertIsInstance(firstTerm2.disaggregatedVars.component("x"), Var) + constraints = hull.get_transformed_constraints(m.firstTerm[2].cons) + self.assertEqual(len(constraints), 1) # one equality constraint + cons = constraints[0] + self.assertIs(cons.parent_block(), firstTerm2) + # also check for the bounds constraints for x + dis_x = hull.get_disaggregated_var(m.x, m.firstTerm[2]) + cons = hull.get_var_bounds_constraint(dis_x) + self.assertIsInstance(cons, Constraint) + self.assertIs(cons.parent_block(), firstTerm2) + self.assertEqual(len(cons), 2) + + secondTerm2 = transBlock1.relaxedDisjuncts[1] + self.assertIs(secondTerm2, m.secondTerm[2].transformation_block) + self.assertIsInstance(secondTerm2.disaggregatedVars.component("x"), Var) + constraints = hull.get_transformed_constraints(m.secondTerm[2].cons) + self.assertEqual(len(constraints), 1) + cons = constraints[0] + self.assertIs(cons.parent_block(), secondTerm2) + # also check for the bounds constraints for x + dis_x = hull.get_disaggregated_var(m.x, m.secondTerm[2]) + cons = hull.get_var_bounds_constraint(dis_x) + self.assertIsInstance(cons, Constraint) + self.assertIs(cons.parent_block(), secondTerm2) + self.assertEqual(len(cons), 2) + + firstTerm1 = transBlock1.relaxedDisjuncts[2] + self.assertIs(firstTerm1, m.firstTerm[1].transformation_block) + self.assertIsInstance(firstTerm1.disaggregatedVars.component("x"), Var) + self.assertTrue(firstTerm1.disaggregatedVars.x.is_fixed()) + self.assertEqual(value(firstTerm1.disaggregatedVars.x), 0) + constraints = hull.get_transformed_constraints(m.firstTerm[1].cons) + self.assertEqual(len(constraints), 1) + cons = constraints[0] + # It's just fixed to 0--so it's on the disaggregatedVar block, which is + # fine. + self.assertIs(cons.parent_block(), firstTerm1.disaggregatedVars) + # also check for the bounds constraints for x + dis_x = hull.get_disaggregated_var(m.x, m.firstTerm[1]) + cons = hull.get_var_bounds_constraint(dis_x) + self.assertIsInstance(cons, Constraint) + self.assertIs(cons.parent_block(), firstTerm1) + self.assertEqual(len(cons), 2) + + secondTerm1 = transBlock1.relaxedDisjuncts[3] + self.assertIs(secondTerm1, m.secondTerm[1].transformation_block) + self.assertIsInstance(secondTerm1.disaggregatedVars.component("x"), Var) + constraints = hull.get_transformed_constraints(m.secondTerm[1].cons) + self.assertEqual(len(constraints), 1) + cons = constraints[0] + self.assertIs(cons.parent_block(), secondTerm1) + # also check for the bounds constraints for x + dis_x = hull.get_disaggregated_var(m.x, m.secondTerm[1]) + cons = hull.get_var_bounds_constraint(dis_x) + self.assertIsInstance(cons, Constraint) + self.assertIs(cons.parent_block(), secondTerm1) + self.assertEqual(len(cons), 2) def test_simple_disjunction_of_disjunct_datas(self): ct.check_simple_disjunction_of_disjunct_datas(self, 'hull') @@ -1012,131 +1163,88 @@ def test_any_indexed_disjunction_of_disjunct_datas(self): m = models.makeAnyIndexedDisjunctionOfDisjunctDatas() TransformationFactory('gdp.hull').apply_to(m) + self.check_trans_block_disjunctions_of_disjunct_datas(m) + transBlock = m.component("_pyomo_gdp_hull_reformulation") - self.assertIsInstance(transBlock, Block) - self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock.relaxedDisjuncts), 4) - self.assertIsInstance(transBlock.relaxedDisjuncts[0].\ - disaggregatedVars.component("x"), Var) - self.assertTrue(transBlock.relaxedDisjuncts[0].disaggregatedVars.\ - x.is_fixed()) - self.assertEqual(value(transBlock.relaxedDisjuncts[0].\ - disaggregatedVars.x), 0) - self.assertIsInstance(transBlock.relaxedDisjuncts[0].component( - "firstTerm[1].cons"), Constraint) - # No constraint becuase disaggregated variable fixed to 0 - self.assertEqual(len(transBlock.relaxedDisjuncts[0].component( - "firstTerm[1].cons")), 0) - self.assertIsInstance(transBlock.relaxedDisjuncts[0].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[0].component( - "x_bounds")), 2) - - self.assertIsInstance(transBlock.relaxedDisjuncts[1].disaggregatedVars.\ - component("x"), Var) - self.assertIsInstance(transBlock.relaxedDisjuncts[1].component( - "secondTerm[1].cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[1].component( - "secondTerm[1].cons")), 1) - self.assertIsInstance(transBlock.relaxedDisjuncts[1].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[1].component( - "x_bounds")), 2) - - self.assertIsInstance(transBlock.relaxedDisjuncts[2].disaggregatedVars.\ - component("x"), Var) - self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( - "firstTerm[2].cons"), Constraint) - # we have an equality constraint - self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( - "firstTerm[2].cons")), 1) - self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( - "x_bounds")), 2) - - self.assertIsInstance(transBlock.relaxedDisjuncts[3].disaggregatedVars.\ - component("x"), Var) - self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( - "secondTerm[2].cons"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( - "secondTerm[2].cons")), 1) - self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( - "x_bounds"), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( - "x_bounds")), 2) - - self.assertIsInstance(transBlock.component("disjunction_xor"), - Constraint) + self.assertIsInstance(transBlock.component("disjunction_xor"), Constraint) self.assertEqual(len(transBlock.component("disjunction_xor")), 2) def check_first_iteration(self, model): transBlock = model.component("_pyomo_gdp_hull_reformulation") self.assertIsInstance(transBlock, Block) - self.assertIsInstance( - transBlock.component("disjunctionList_xor"), Constraint) + self.assertIsInstance(transBlock.component("disjunctionList_xor"), Constraint) self.assertEqual(len(transBlock.disjunctionList_xor), 1) self.assertFalse(model.disjunctionList[0].active) + hull = TransformationFactory('gdp.hull') if model.component('firstTerm') is None: - firstTerm = "'firstTerm[0]'.cons" - secondTerm = "'secondTerm[0]'.cons" + firstTerm_cons = hull.get_transformed_constraints( + model.component("firstTerm[0]").cons + ) + secondTerm_cons = hull.get_transformed_constraints( + model.component("secondTerm[0]").cons + ) + else: - firstTerm = "firstTerm[0].cons" - secondTerm = "secondTerm[0].cons" + firstTerm_cons = hull.get_transformed_constraints(model.firstTerm[0].cons) + secondTerm_cons = hull.get_transformed_constraints(model.secondTerm[0].cons) self.assertIsInstance(transBlock.relaxedDisjuncts, Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 2) - self.assertIsInstance(transBlock.relaxedDisjuncts[0].\ - disaggregatedVars.x, Var) - self.assertTrue(transBlock.relaxedDisjuncts[0].disaggregatedVars.x.\ - is_fixed()) - self.assertEqual(value(transBlock.relaxedDisjuncts[0].\ - disaggregatedVars.x), 0) - self.assertIsInstance(transBlock.relaxedDisjuncts[0].component( - firstTerm), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[0].component( - firstTerm)), 0) - self.assertIsInstance(transBlock.relaxedDisjuncts[0].x_bounds, - Constraint) + self.assertIsInstance(transBlock.relaxedDisjuncts[0].disaggregatedVars.x, Var) + self.assertTrue(transBlock.relaxedDisjuncts[0].disaggregatedVars.x.is_fixed()) + self.assertEqual(value(transBlock.relaxedDisjuncts[0].disaggregatedVars.x), 0) + self.assertEqual(len(firstTerm_cons), 1) + self.assertIs( + firstTerm_cons[0].parent_block(), + # It fixes a var to 0 + transBlock.relaxedDisjuncts[0].disaggregatedVars, + ) + self.assertIsInstance(transBlock.relaxedDisjuncts[0].x_bounds, Constraint) self.assertEqual(len(transBlock.relaxedDisjuncts[0].x_bounds), 2) - self.assertIsInstance(transBlock.relaxedDisjuncts[1].\ - disaggregatedVars.x, Var) - self.assertFalse(transBlock.relaxedDisjuncts[1].disaggregatedVars.\ - x.is_fixed()) - self.assertIsInstance(transBlock.relaxedDisjuncts[1].component( - secondTerm), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[1].component( - secondTerm)), 1) - self.assertIsInstance(transBlock.relaxedDisjuncts[1].x_bounds, - Constraint) + self.assertIsInstance(transBlock.relaxedDisjuncts[1].disaggregatedVars.x, Var) + self.assertFalse(transBlock.relaxedDisjuncts[1].disaggregatedVars.x.is_fixed()) + + self.assertEqual(len(secondTerm_cons), 1) + self.assertIs(secondTerm_cons[0].parent_block(), transBlock.relaxedDisjuncts[1]) + self.assertIsInstance(transBlock.relaxedDisjuncts[1].x_bounds, Constraint) self.assertEqual(len(transBlock.relaxedDisjuncts[1].x_bounds), 2) def check_second_iteration(self, model): - transBlock = model.component("_pyomo_gdp_hull_reformulation") + transBlock = model.component("_pyomo_gdp_hull_reformulation_4") self.assertIsInstance(transBlock, Block) self.assertIsInstance(transBlock.component("relaxedDisjuncts"), Block) - self.assertEqual(len(transBlock.relaxedDisjuncts), 4) + self.assertEqual(len(transBlock.relaxedDisjuncts), 2) + hull = TransformationFactory('gdp.hull') if model.component('firstTerm') is None: - firstTerm = "'firstTerm[1]'.cons" - secondTerm = "'secondTerm[1]'.cons" + firstTerm_cons = hull.get_transformed_constraints( + model.component("firstTerm[1]").cons + ) + secondTerm_cons = hull.get_transformed_constraints( + model.component("secondTerm[1]").cons + ) + else: - firstTerm = "firstTerm[1].cons" - secondTerm = "secondTerm[1].cons" - - self.assertIsInstance(transBlock.relaxedDisjuncts[2].component( - firstTerm), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[2].component( - firstTerm)), 1) - self.assertIsInstance(transBlock.relaxedDisjuncts[3].component( - secondTerm), Constraint) - self.assertEqual(len(transBlock.relaxedDisjuncts[3].component( - secondTerm)), 1) - self.assertEqual( - len(transBlock.disjunctionList_xor), 2) + firstTerm_cons = hull.get_transformed_constraints(model.firstTerm[1].cons) + secondTerm_cons = hull.get_transformed_constraints(model.secondTerm[1].cons) + + self.assertEqual(len(firstTerm_cons), 1) + self.assertIs(firstTerm_cons[0].parent_block(), transBlock.relaxedDisjuncts[0]) + self.assertEqual(len(secondTerm_cons), 1) + self.assertIs(secondTerm_cons[0].parent_block(), transBlock.relaxedDisjuncts[1]) + + orig = model.component("_pyomo_gdp_hull_reformulation") + self.assertIsInstance( + model.disjunctionList[1].algebraic_constraint, + constraint._GeneralConstraintData, + ) + self.assertIsInstance( + model.disjunctionList[0].algebraic_constraint, + constraint._GeneralConstraintData, + ) self.assertFalse(model.disjunctionList[1].active) self.assertFalse(model.disjunctionList[0].active) @@ -1144,15 +1252,14 @@ def test_disjunction_and_disjuncts_indexed_by_any(self): ct.check_disjunction_and_disjuncts_indexed_by_any(self, 'hull') def test_iteratively_adding_disjunctions_transform_container(self): - ct.check_iteratively_adding_disjunctions_transform_container(self, - 'hull') + ct.check_iteratively_adding_disjunctions_transform_container(self, 'hull') def test_iteratively_adding_disjunctions_transform_model(self): ct.check_iteratively_adding_disjunctions_transform_model(self, 'hull') def test_iteratively_adding_to_indexed_disjunction_on_block(self): - ct.check_iteratively_adding_to_indexed_disjunction_on_block(self, - 'hull') + ct.check_iteratively_adding_to_indexed_disjunction_on_block(self, 'hull') + class TestTargets_SingleDisjunction(unittest.TestCase, CommonTests): def test_only_targets_inactive(self): @@ -1167,6 +1274,7 @@ def test_target_not_a_component_err(self): def test_targets_cannot_be_cuids(self): ct.check_targets_cannot_be_cuids(self, 'hull') + class TestTargets_IndexedDisjunction(unittest.TestCase, CommonTests): # There are a couple tests for targets above, but since I had the patience # to make all these for bigm also, I may as well reap the benefits here too. @@ -1205,6 +1313,7 @@ def test_create_using(self): m = models.makeDisjunctionsOnIndexedBlock() ct.diff_apply_to_and_create_using(self, m, 'gdp.hull') + class DisaggregatedVarNamingConflict(unittest.TestCase): @staticmethod def makeModel(): @@ -1212,14 +1321,16 @@ def makeModel(): m.b = Block() m.b.x = Var(bounds=(0, 10)) m.add_component("b.x", Var(bounds=(-9, 9))) + def disjunct_rule(d, i): m = d.model() if i: d.cons_block = Constraint(expr=m.b.x >= 5) - d.cons_model = Constraint(expr=m.component("b.x")==0) + d.cons_model = Constraint(expr=m.component("b.x") == 0) else: d.cons_model = Constraint(expr=m.component("b.x") <= -5) - m.disjunct = Disjunct([0,1], rule=disjunct_rule) + + m.disjunct = Disjunct([0, 1], rule=disjunct_rule) m.disjunction = Disjunction(expr=[m.disjunct[0], m.disjunct[1]]) return m @@ -1229,21 +1340,24 @@ def test_disaggregation_constraints(self): hull = TransformationFactory('gdp.hull') hull.apply_to(m) - disaggregationConstraints = m._pyomo_gdp_hull_reformulation.\ - disaggregationConstraints + disaggregationConstraints = ( + m._pyomo_gdp_hull_reformulation.disaggregationConstraints + ) consmap = [ - (m.component("b.x"), disaggregationConstraints[(0, None)]), - (m.b.x, disaggregationConstraints[(1, None)]) + (m.component("b.x"), disaggregationConstraints[0]), + (m.b.x, disaggregationConstraints[1]), ] for v, cons in consmap: disCons = hull.get_disaggregation_constraint(v, m.disjunction) self.assertIs(disCons, cons) + class DisjunctInMultipleDisjunctions(unittest.TestCase, CommonTests): def test_error_for_same_disjunct_in_multiple_disjunctions(self): ct.check_error_for_same_disjunct_in_multiple_disjunctions(self, 'hull') + class NestedDisjunction(unittest.TestCase, CommonTests): def setUp(self): # set seed so we can test name collisions predictably @@ -1253,16 +1367,28 @@ def test_disjuncts_inactive(self): ct.check_disjuncts_inactive_nested(self, 'hull') def test_deactivated_disjunct_leaves_nested_disjuncts_active(self): - ct.check_deactivated_disjunct_leaves_nested_disjunct_active(self, - 'hull') + ct.check_deactivated_disjunct_leaves_nested_disjunct_active(self, 'hull') def test_mappings_between_disjunctions_and_xors(self): - # For the sake of not second-guessing anyone, we will let the inner - # disjunction point to its original XOR constraint. This constraint - # itself will be transformed by the outer disjunction, so if you want to - # find what it became you will have to follow its map to the transformed - # version. (But this behaves the same as bigm) - ct.check_mappings_between_disjunctions_and_xors(self, 'hull') + # This test is nearly identical to the one in bigm, but because of + # different transformation orders, the name conflict gets resolved in + # the opposite way. + m = models.makeNestedDisjunctions() + transform = TransformationFactory('gdp.hull') + transform.apply_to(m) + + transBlock = m.component("_pyomo_gdp_hull_reformulation") + + disjunctionPairs = [ + (m.disjunction, transBlock.disjunction_xor), + (m.disjunct[1].innerdisjunction[0], transBlock.innerdisjunction_xor[0]), + (m.simpledisjunct.innerdisjunction, transBlock.innerdisjunction_xor_4), + ] + + # check disjunction mappings + for disjunction, xor in disjunctionPairs: + self.assertIs(disjunction.algebraic_constraint, xor) + self.assertIs(transform.get_src_disjunction(xor), disjunction) def test_unique_reference_to_nested_indicator_var(self): ct.check_unique_reference_to_nested_indicator_var(self, 'hull') @@ -1296,17 +1422,17 @@ def test_relaxation_feasibility(self): solver = SolverFactory(linear_solvers[0]) cases = [ - (1,1,1,1,None), - (0,0,0,0,None), - (1,0,0,0,None), - (0,1,0,0,1.1), - (0,0,1,0,None), - (0,0,0,1,None), - (1,1,0,0,None), - (1,0,1,0,1.2), - (1,0,0,1,1.3), - (1,0,1,1,None), - ] + (1, 1, 1, 1, None), + (0, 0, 0, 0, None), + (1, 0, 0, 0, None), + (0, 1, 0, 0, 1.1), + (0, 0, 1, 0, None), + (0, 0, 0, 1, None), + (1, 1, 0, 0, None), + (1, 0, 1, 0, 1.2), + (1, 0, 0, 1, 1.3), + (1, 0, 1, 1, None), + ] for case in cases: m.d1.indicator_var.fix(case[0]) m.d2.indicator_var.fix(case[1]) @@ -1314,11 +1440,14 @@ def test_relaxation_feasibility(self): m.d4.indicator_var.fix(case[3]) results = solver.solve(m) if case[4] is None: - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.infeasible, + ) else: - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertEqual(value(m.obj), case[4]) @unittest.skipIf(not linear_solvers, "No linear solver available") @@ -1334,17 +1463,17 @@ def test_relaxation_feasibility_transform_inner_first(self): solver = SolverFactory(linear_solvers[0]) cases = [ - (1,1,1,1,None), - (0,0,0,0,None), - (1,0,0,0,None), - (0,1,0,0,1.1), - (0,0,1,0,None), - (0,0,0,1,None), - (1,1,0,0,None), - (1,0,1,0,1.2), - (1,0,0,1,1.3), - (1,0,1,1,None), - ] + (1, 1, 1, 1, None), + (0, 0, 0, 0, None), + (1, 0, 0, 0, None), + (0, 1, 0, 0, 1.1), + (0, 0, 1, 0, None), + (0, 0, 0, 1, None), + (1, 1, 0, 0, None), + (1, 0, 1, 0, 1.2), + (1, 0, 0, 1, 1.3), + (1, 0, 1, 1, None), + ] for case in cases: m.d1.indicator_var.fix(case[0]) m.d2.indicator_var.fix(case[1]) @@ -1352,18 +1481,23 @@ def test_relaxation_feasibility_transform_inner_first(self): m.d4.indicator_var.fix(case[3]) results = solver.solve(m) if case[4] is None: - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.infeasible, + ) else: - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertEqual(value(m.obj), case[4]) def test_create_using(self): m = models.makeNestedDisjunctions_FlatDisjuncts() self.diff_apply_to_and_create_using(m) - def check_outer_disaggregation_constraint(self, cons, var, disj1, disj2): + def check_outer_disaggregation_constraint(self, cons, var, disj1, disj2, rhs=None): + if rhs is None: + rhs = var hull = TransformationFactory('gdp.hull') self.assertTrue(cons.active) self.assertEqual(cons.lower, 0) @@ -1371,11 +1505,9 @@ def check_outer_disaggregation_constraint(self, cons, var, disj1, disj2): repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) - ct.check_linear_coef(self, repn, var, 1) - ct.check_linear_coef(self, repn, hull.get_disaggregated_var(var, disj1), - -1) - ct.check_linear_coef(self, repn, hull.get_disaggregated_var(var, disj2), - -1) + ct.check_linear_coef(self, repn, rhs, 1) + ct.check_linear_coef(self, repn, hull.get_disaggregated_var(var, disj1), -1) + ct.check_linear_coef(self, repn, hull.get_disaggregated_var(var, disj2), -1) def check_bounds_constraint_ub(self, constraint, ub, dis_var, ind_var): hull = TransformationFactory('gdp.hull') @@ -1393,68 +1525,14 @@ def check_bounds_constraint_ub(self, constraint, ub, dis_var, ind_var): ct.check_linear_coef(self, repn, ind_var, -ub) self.assertIs(constraint, hull.get_var_bounds_constraint(dis_var)) - def check_inner_disaggregated_var_bounds(self, cons, dis, ind_var, - original_cons): + def check_transformed_constraint(self, cons, dis, lb, ind_var): hull = TransformationFactory('gdp.hull') - self.assertIsInstance(cons, Constraint) - self.assertTrue(cons.active) - self.assertEqual(len(cons), 1) - self.assertTrue(cons[('ub', 'ub')].active) - self.assertIsNone(cons[('ub', 'ub')].lower) - self.assertEqual(cons[('ub', 'ub')].upper, 0) - repn = generate_standard_repn(cons[('ub', 'ub')].body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 2) - ct.check_linear_coef(self, repn, dis, 1) - ct.check_linear_coef(self, repn, ind_var, -2) - - self.assertIs(hull.get_var_bounds_constraint(dis), original_cons) - transformed_list = hull.get_transformed_constraints(original_cons['ub']) - self.assertEqual(len(transformed_list), 1) - self.assertIs(transformed_list[0], cons[('ub', 'ub')]) - - def check_inner_transformed_constraint(self, cons, dis, lb, ind_var, - first_transformed, original): - hull = TransformationFactory('gdp.hull') - self.assertIsInstance(cons, Constraint) - self.assertTrue(cons.active) self.assertEqual(len(cons), 1) - # Ha, this really isn't lovely, but its just chance that it's ub the - # second time. - self.assertTrue(cons[('lb', 'ub')].active) - self.assertIsNone(cons[('lb', 'ub')].lower) - self.assertEqual(cons[('lb', 'ub')].upper, 0) - repn = generate_standard_repn(cons[('lb', 'ub')].body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 2) - ct.check_linear_coef(self, repn, dis, -1) - ct.check_linear_coef(self, repn, ind_var, lb) - - self.assertIs(hull.get_src_constraint(first_transformed), - original) - trans_list = hull.get_transformed_constraints(original) - self.assertEqual(len(trans_list), 1) - self.assertIs(trans_list[0], first_transformed['lb']) - self.assertIs(hull.get_src_constraint(first_transformed['lb']), - original) - self.assertIs(hull.get_src_constraint(cons), first_transformed) - trans_list = hull.get_transformed_constraints(first_transformed['lb']) - self.assertEqual(len(trans_list), 1) - self.assertIs(trans_list[0], cons[('lb', 'ub')]) - self.assertIs(hull.get_src_constraint(cons[('lb', 'ub')]), - first_transformed['lb']) - - def check_outer_transformed_constraint(self, cons, dis, lb, ind_var): - hull = TransformationFactory('gdp.hull') - self.assertIsInstance(cons, Constraint) + cons = cons[0] self.assertTrue(cons.active) - self.assertEqual(len(cons), 1) - self.assertTrue(cons['lb'].active) - self.assertIsNone(cons['lb'].lower) - self.assertEqual(cons['lb'].upper, 0) - repn = generate_standard_repn(cons['lb'].body) + self.assertIsNone(cons.lower) + self.assertEqual(value(cons.upper), 0) + repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 2) @@ -1463,9 +1541,6 @@ def check_outer_transformed_constraint(self, cons, dis, lb, ind_var): orig = ind_var.parent_block().c self.assertIs(hull.get_src_constraint(cons), orig) - trans_list = hull.get_transformed_constraints(orig) - self.assertEqual(len(trans_list), 1) - self.assertIs(trans_list[0], cons['lb']) def test_transformed_model_nestedDisjuncts(self): # This test tests *everything* for a simple nested disjunction case. @@ -1486,195 +1561,71 @@ def test_transformed_model_nestedDisjuncts(self): repn = generate_standard_repn(xor.body) self.assertTrue(repn.is_linear()) self.assertEqual(repn.constant, 0) - ct.check_linear_coef(self, repn, m.d1.indicator_var, 1) - ct.check_linear_coef(self, repn, m.d2.indicator_var, 1) - self.assertIs(xor, m.disj.algebraic_constraint()) + ct.check_linear_coef(self, repn, m.d1.binary_indicator_var, 1) + ct.check_linear_coef(self, repn, m.d2.binary_indicator_var, 1) + self.assertIs(xor, m.disj.algebraic_constraint) self.assertIs(m.disj, hull.get_src_disjunction(xor)) - # so should the outer disaggregation constraint + # inner xor should be on this block + xor = m.d1.disj2.algebraic_constraint + self.assertIs(xor.parent_block(), transBlock) + self.assertIsInstance(xor, Constraint) + self.assertTrue(xor.active) + self.assertEqual(xor.lower, 0) + self.assertEqual(xor.upper, 0) + repn = generate_standard_repn(xor.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(repn.constant, 0) + ct.check_linear_coef(self, repn, m.d1.d3.binary_indicator_var, 1) + ct.check_linear_coef(self, repn, m.d1.d4.binary_indicator_var, 1) + ct.check_linear_coef(self, repn, m.d1.binary_indicator_var, -1) + self.assertIs(m.d1.disj2, hull.get_src_disjunction(xor)) + + # so should both disaggregation constraints dis = transBlock.disaggregationConstraints self.assertIsInstance(dis, Constraint) self.assertTrue(dis.active) - self.assertEqual(len(dis), 3) - self.check_outer_disaggregation_constraint(dis[0,None], m.x, m.d1, - m.d2) - self.assertIs(hull.get_disaggregation_constraint(m.x, m.disj), - dis[0, None]) - self.check_outer_disaggregation_constraint( - dis[1,None], - m.d1.d3.binary_indicator_var, - m.d1, - m.d2) - self.assertIs(hull.get_disaggregation_constraint( - m.d1.d3.binary_indicator_var, - m.disj), dis[1,None]) + self.assertEqual(len(dis), 2) + self.check_outer_disaggregation_constraint(dis[0], m.x, m.d1, m.d2) + self.assertIs(hull.get_disaggregation_constraint(m.x, m.disj), dis[0]) self.check_outer_disaggregation_constraint( - dis[2,None], - m.d1.d4.binary_indicator_var, - m.d1, - m.d2) - self.assertIs(hull.get_disaggregation_constraint( - m.d1.d4.binary_indicator_var, - m.disj), dis[2,None]) - - # we should have four disjunct transformation blocks: 2 real ones and - # then two that are just home to indicator_var and disaggregated var - # References. + dis[1], m.x, m.d1.d3, m.d1.d4, rhs=hull.get_disaggregated_var(m.x, m.d1) + ) + self.assertIs(hull.get_disaggregation_constraint(m.x, m.d1.disj2), dis[1]) + + # we should have four disjunct transformation blocks disjBlocks = transBlock.relaxedDisjuncts self.assertTrue(disjBlocks.active) self.assertEqual(len(disjBlocks), 4) + ## d1's transformation block + disj1 = disjBlocks[0] self.assertTrue(disj1.active) - self.assertIs(disj1, m.d1.transformation_block()) + self.assertIs(disj1, m.d1.transformation_block) self.assertIs(m.d1, hull.get_src_disjunct(disj1)) - - # check the disaggregated vars are here + # check the disaggregated x is here self.assertIsInstance(disj1.disaggregatedVars.x, Var) self.assertEqual(disj1.disaggregatedVars.x.lb, 0) self.assertEqual(disj1.disaggregatedVars.x.ub, 2) - self.assertIs(disj1.disaggregatedVars.x, - hull.get_disaggregated_var(m.x, m.d1)) + self.assertIs(disj1.disaggregatedVars.x, hull.get_disaggregated_var(m.x, m.d1)) self.assertIs(m.x, hull.get_src_var(disj1.disaggregatedVars.x)) - d3 = disj1.disaggregatedVars.component("d1.d3.binary_indicator_var") - self.assertEqual(d3.lb, 0) - self.assertEqual(d3.ub, 1) - self.assertIsInstance(d3, Var) - self.assertIs(d3, hull.get_disaggregated_var( - m.d1.d3.binary_indicator_var, m.d1)) - self.assertIs(m.d1.d3.binary_indicator_var, hull.get_src_var(d3)) - d4 = disj1.disaggregatedVars.component("d1.d4.binary_indicator_var") - self.assertIsInstance(d4, Var) - self.assertEqual(d4.lb, 0) - self.assertEqual(d4.ub, 1) - self.assertIs(d4, hull.get_disaggregated_var( - m.d1.d4.binary_indicator_var, m.d1)) - self.assertIs(m.d1.d4.binary_indicator_var, hull.get_src_var(d4)) - - # check inner disjunction disaggregated vars - x3 = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].\ - disaggregatedVars.x - self.assertIsInstance(x3, Var) - self.assertEqual(x3.lb, 0) - self.assertEqual(x3.ub, 2) - self.assertIs(hull.get_disaggregated_var(m.x, m.d1.d3), x3) - self.assertIs(hull.get_src_var(x3), m.x) - - x4 = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].\ - disaggregatedVars.x - self.assertIsInstance(x4, Var) - self.assertEqual(x4.lb, 0) - self.assertEqual(x4.ub, 2) - self.assertIs(hull.get_disaggregated_var(m.x, m.d1.d4), x4) - self.assertIs(hull.get_src_var(x4), m.x) - # check the bounds constraints - self.check_bounds_constraint_ub(disj1.x_bounds, 2, - disj1.disaggregatedVars.x, - m.d1.indicator_var) - self.check_bounds_constraint_ub( - disj1.component("d1.d3.binary_indicator_var_bounds"), 1, - disj1.disaggregatedVars.component("d1.d3.binary_indicator_var"), - m.d1.indicator_var) self.check_bounds_constraint_ub( - disj1.component("d1.d4.binary_indicator_var_bounds"), 1, - disj1.disaggregatedVars.component("d1.d4.binary_indicator_var"), - m.d1.indicator_var) + disj1.x_bounds, 2, disj1.disaggregatedVars.x, m.d1.indicator_var + ) + # transformed constraint x >= 1 + cons = hull.get_transformed_constraints(m.d1.c) + self.check_transformed_constraint( + cons, disj1.disaggregatedVars.x, 1, m.d1.indicator_var + ) - # check the transformed constraints + ## d2's transformation block - # transformed xor - xor = disj1.component("d1._pyomo_gdp_hull_reformulation.'d1.disj2_xor'") - self.assertIsInstance(xor, Constraint) - self.assertTrue(xor.active) - self.assertEqual(len(xor), 1) - self.assertTrue(xor['eq'].active) - self.assertEqual(xor['eq'].lower, 0) - self.assertEqual(xor['eq'].upper, 0) - repn = generate_standard_repn(xor['eq'].body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 3) - ct.check_linear_coef( - self, repn, - disj1.disaggregatedVars.component("d1.d3.binary_indicator_var"), 1) - ct.check_linear_coef( - self, repn, - disj1.disaggregatedVars.component("d1.d4.binary_indicator_var"), 1) - ct.check_linear_coef(self, repn, m.d1.indicator_var, -1) - - # inner disjunction disaggregation constraint - dis_cons_inner_disjunction = disj1.component( - "d1._pyomo_gdp_hull_reformulation.disaggregationConstraints") - self.assertIsInstance(dis_cons_inner_disjunction, Constraint) - self.assertTrue(dis_cons_inner_disjunction.active) - self.assertEqual(len(dis_cons_inner_disjunction), 1) - self.assertTrue(dis_cons_inner_disjunction[(0,None,'eq')].active) - self.assertEqual(dis_cons_inner_disjunction[(0,None,'eq')].lower, 0) - self.assertEqual(dis_cons_inner_disjunction[(0,None,'eq')].upper, 0) - repn = generate_standard_repn(dis_cons_inner_disjunction[(0, None, - 'eq')].body) - self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 3) - ct.check_linear_coef(self, repn, x3, -1) - ct.check_linear_coef(self, repn, x4, -1) - ct.check_linear_coef(self, repn, disj1.disaggregatedVars.x, 1) - - # disaggregated d3.x bounds constraints - x3_bounds = disj1.component( - "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].x_bounds") - original_cons = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].\ - x_bounds - self.check_inner_disaggregated_var_bounds( - x3_bounds, x3, - disj1.disaggregatedVars.component("d1.d3.binary_indicator_var"), - original_cons) - - - # disaggregated d4.x bounds constraints - x4_bounds = disj1.component( - "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].x_bounds") - original_cons = m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].\ - x_bounds - self.check_inner_disaggregated_var_bounds( - x4_bounds, x4, - disj1.disaggregatedVars.component("d1.d4.binary_indicator_var"), - original_cons) - - # transformed x >= 1.2 - cons = disj1.component( - "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0].'d1.d3.c'") - first_transformed = m.d1._pyomo_gdp_hull_reformulation.\ - relaxedDisjuncts[0].component("d1.d3.c") - original = m.d1.d3.c - self.check_inner_transformed_constraint( - cons, x3, 1.2, - disj1.disaggregatedVars.component("d1.d3.binary_indicator_var"), - first_transformed, original) - - # transformed x >= 1.3 - cons = disj1.component( - "d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1].'d1.d4.c'") - first_transformed = m.d1._pyomo_gdp_hull_reformulation.\ - relaxedDisjuncts[1].component("d1.d4.c") - original = m.d1.d4.c - self.check_inner_transformed_constraint( - cons, x4, 1.3, - disj1.disaggregatedVars.component("d1.d4.binary_indicator_var"), - first_transformed, original) - - # outer disjunction transformed constraint - cons = disj1.component("d1.c") - self.check_outer_transformed_constraint(cons, disj1.disaggregatedVars.x, - 1, m.d1.indicator_var) - - # and last, check the second transformed outer disjunct - disj2 = disjBlocks[3] + disj2 = disjBlocks[1] self.assertTrue(disj2.active) - self.assertIs(disj2, m.d2.transformation_block()) + self.assertIs(disj2, m.d2.transformation_block) self.assertIs(m.d2, hull.get_src_disjunct(disj2)) - # disaggregated var x2 = disj2.disaggregatedVars.x self.assertIsInstance(x2, Var) @@ -1682,58 +1633,54 @@ def test_transformed_model_nestedDisjuncts(self): self.assertEqual(x2.ub, 2) self.assertIs(hull.get_disaggregated_var(m.x, m.d2), x2) self.assertIs(hull.get_src_var(x2), m.x) - # bounds constraint x_bounds = disj2.x_bounds - self.check_bounds_constraint_ub(x_bounds, 2, x2, m.d2.indicator_var) - + self.check_bounds_constraint_ub(x_bounds, 2, x2, m.d2.binary_indicator_var) # transformed constraint x >= 1.1 - cons = disj2.component("d2.c") - self.check_outer_transformed_constraint(cons, x2, 1.1, - m.d2.indicator_var) - - # check inner xor mapping: Note that this maps to a now deactivated - # (transformed again) constraint, but that it is possible to go full - # circle, like so: - orig_inner_xor = m.d1._pyomo_gdp_hull_reformulation.component( - "d1.disj2_xor") - self.assertIs(m.d1.disj2.algebraic_constraint(), orig_inner_xor) - self.assertFalse(orig_inner_xor.active) - trans_list = hull.get_transformed_constraints(orig_inner_xor) - self.assertEqual(len(trans_list), 1) - self.assertIs(trans_list[0], xor['eq']) - self.assertIs(hull.get_src_constraint(xor), orig_inner_xor) - self.assertIs(hull.get_src_disjunction(orig_inner_xor), m.d1.disj2) - - # the same goes for the disaggregation constraint - orig_dis_container = m.d1._pyomo_gdp_hull_reformulation.\ - disaggregationConstraints - orig_dis = orig_dis_container[0,None] - self.assertIs(hull.get_disaggregation_constraint(m.x, m.d1.disj2), - orig_dis) - self.assertFalse(orig_dis.active) - transformedList = hull.get_transformed_constraints(orig_dis) - self.assertEqual(len(transformedList), 1) - self.assertIs(transformedList[0], dis_cons_inner_disjunction[(0, None, - 'eq')]) - - self.assertIs(hull.get_src_constraint( - dis_cons_inner_disjunction[(0, None, 'eq')]), orig_dis) - self.assertIs(hull.get_src_constraint( dis_cons_inner_disjunction), - orig_dis_container) - # though we don't have a map back from the disaggregation constraint to - # the variable because I'm not sure why you would... The variable is in - # the constraint. - - # check the inner disjunct mappings - self.assertIs(m.d1.d3.transformation_block(), - m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]) - self.assertIs(hull.get_src_disjunct( - m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]), m.d1.d3) - self.assertIs(m.d1.d4.transformation_block(), - m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1]) - self.assertIs(hull.get_src_disjunct( - m.d1._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1]), m.d1.d4) + cons = hull.get_transformed_constraints(m.d2.c) + self.check_transformed_constraint(cons, x2, 1.1, m.d2.binary_indicator_var) + + ## d1.d3's transformation block + + disj3 = disjBlocks[2] + self.assertTrue(disj3.active) + self.assertIs(disj3, m.d1.d3.transformation_block) + self.assertIs(m.d1.d3, hull.get_src_disjunct(disj3)) + # disaggregated var + x3 = disj3.disaggregatedVars.x + self.assertIsInstance(x3, Var) + self.assertEqual(x3.lb, 0) + self.assertEqual(x3.ub, 2) + self.assertIs(hull.get_disaggregated_var(m.x, m.d1.d3), x3) + self.assertIs(hull.get_src_var(x3), m.x) + # bounds constraints + self.check_bounds_constraint_ub( + disj3.x_bounds, 2, x3, m.d1.d3.binary_indicator_var + ) + # transformed x >= 1.2 + cons = hull.get_transformed_constraints(m.d1.d3.c) + self.check_transformed_constraint(cons, x3, 1.2, m.d1.d3.binary_indicator_var) + + ## d1.d4's transformation block + + disj4 = disjBlocks[3] + self.assertTrue(disj4.active) + self.assertIs(disj4, m.d1.d4.transformation_block) + self.assertIs(m.d1.d4, hull.get_src_disjunct(disj4)) + # disaggregated var + x4 = disj4.disaggregatedVars.x + self.assertIsInstance(x4, Var) + self.assertEqual(x4.lb, 0) + self.assertEqual(x4.ub, 2) + self.assertIs(hull.get_disaggregated_var(m.x, m.d1.d4), x4) + self.assertIs(hull.get_src_var(x4), m.x) + # bounds constraints + self.check_bounds_constraint_ub( + disj4.x_bounds, 2, x4, m.d1.d4.binary_indicator_var + ) + # transformed x >= 1.3 + cons = hull.get_transformed_constraints(m.d1.d4.c) + self.check_transformed_constraint(cons, x4, 1.3, m.d1.d4.binary_indicator_var) @unittest.skipIf(not linear_solvers, "No linear solver available") def test_solve_nested_model(self): @@ -1776,8 +1723,9 @@ def test_disaggregated_vars_are_set_to_0_correctly(self): m.d4.indicator_var.fix(0) results = SolverFactory(linear_solvers[0]).solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertEqual(value(m.x), 1.1) self.assertEqual(value(hull.get_disaggregated_var(m.x, m.d1)), 0) @@ -1792,8 +1740,9 @@ def test_disaggregated_vars_are_set_to_0_correctly(self): m.d4.indicator_var.fix(0) results = SolverFactory(linear_solvers[0]).solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) self.assertEqual(value(m.x), 1.2) self.assertEqual(value(hull.get_disaggregated_var(m.x, m.d1)), 1.2) @@ -1801,61 +1750,91 @@ def test_disaggregated_vars_are_set_to_0_correctly(self): self.assertEqual(value(hull.get_disaggregated_var(m.x, m.d3)), 1.2) self.assertEqual(value(hull.get_disaggregated_var(m.x, m.d4)), 0) -class TestHierarchicalNestedModels(unittest.TestCase): - def tearDown(self): - if os.path.exists(join(currdir, 'm.nl')): - os.remove(join(currdir, 'm.nl')) - if os.path.exists(join(currdir, 'm1.nl')): - os.remove(join(currdir, 'm1.nl')) + def test_nested_with_local_vars(self): + m = ConcreteModel() - def test_hierarchical_badly_ordered_targets(self): - m = models.makeHierarchicalNested_DeclOrderMatchesInstantationOrder() - hull = TransformationFactory('gdp.hull') - m1 = hull.create_using(m, targets=[m.disjunction_block, - m.disjunct_block.disj2]) - - # the real test here is that the above doesn't scream about there being - # an untransformed Disjunction inside of a Disjunct it's trying to - # transform. This kinda too big a test for hull because the nonlinear - # expressions are ugly and the inner disjunction variables are - # disaggregated twice... So let's just make sure that this is the same - # as manually transforming in the correct order. - hull.apply_to(m, targets=m.disjunct_block.disj2.disjunction) - hull.apply_to(m, targets=m.disjunction_block.disjunction) - - # intentionally not using symbolic_solver_labels because there's nothing - # to guarantee that name collisions are named the same. And there are - # plenty of name collisions. But these nl files should be the same. - m1.write(join(currdir, 'm1.nl')) - m.write(join(currdir, 'm.nl')) - - self.assertTrue(cmp(join(currdir, 'm.nl'), join(currdir, 'm1.nl'))) - - def test_decl_order_opposite_instantiation_order(self): - # In this test, we create the same problem as above, but we don't even - # need targets! - m = models.makeHierarchicalNested_DeclOrderOppositeInstantationOrder() - hull = TransformationFactory('gdp.hull') - m1 = hull.create_using(m) + m.x = Var(bounds=(0, 10)) + m.S = RangeSet(2) - # Like above, the real test is that the above doesn't scream. We can use - # the same check to make sure everything looks like it would have if we - # manually handled the nested - hull.apply_to(m, targets=m.disjunct_block.disj2.disjunction) - hull.apply_to(m, targets=m.disjunction_block.disjunction) + @m.Disjunct() + def d_l(d): + d.lambdas = Var(m.S, bounds=(0, 1)) + d.LocalVars = Suffix(direction=Suffix.LOCAL) + d.LocalVars[d] = list(d.lambdas.values()) + d.c1 = Constraint(expr=d.lambdas[1] + d.lambdas[2] == 1) + d.c2 = Constraint(expr=m.x == 2 * d.lambdas[1] + 3 * d.lambdas[2]) + + @m.Disjunct() + def d_r(d): + @d.Disjunct() + def d_l(e): + e.lambdas = Var(m.S, bounds=(0, 1)) + e.LocalVars = Suffix(direction=Suffix.LOCAL) + e.LocalVars[e] = list(e.lambdas.values()) + e.c1 = Constraint(expr=e.lambdas[1] + e.lambdas[2] == 1) + e.c2 = Constraint(expr=m.x == 2 * e.lambdas[1] + 3 * e.lambdas[2]) + + @d.Disjunct() + def d_r(e): + e.lambdas = Var(m.S, bounds=(0, 1)) + e.LocalVars = Suffix(direction=Suffix.LOCAL) + e.LocalVars[e] = list(e.lambdas.values()) + e.c1 = Constraint(expr=e.lambdas[1] + e.lambdas[2] == 1) + e.c2 = Constraint(expr=m.x == 2 * e.lambdas[1] + 3 * e.lambdas[2]) + + d.inner_disj = Disjunction(expr=[d.d_l, d.d_r]) + + m.disj = Disjunction(expr=[m.d_l, m.d_r]) + m.obj = Objective(expr=m.x) + + hull = TransformationFactory('gdp.hull') + hull.apply_to(m) - m1.write(join(currdir, 'm1.nl')) - m.write(join(currdir, 'm.nl')) + x1 = hull.get_disaggregated_var(m.x, m.d_l) + x2 = hull.get_disaggregated_var(m.x, m.d_r) + x3 = hull.get_disaggregated_var(m.x, m.d_r.d_l) + x4 = hull.get_disaggregated_var(m.x, m.d_r.d_r) + + for d, x in [(m.d_l, x1), (m.d_r.d_l, x3), (m.d_r.d_r, x4)]: + lambda1 = hull.get_disaggregated_var(d.lambdas[1], d) + self.assertIs(lambda1, d.lambdas[1]) + lambda2 = hull.get_disaggregated_var(d.lambdas[2], d) + self.assertIs(lambda2, d.lambdas[2]) + + cons = hull.get_transformed_constraints(d.c1) + self.assertEqual(len(cons), 1) + convex_combo = cons[0] + assertExpressionsEqual( + self, + convex_combo.expr, + lambda1 + lambda2 - (1 - d.indicator_var.get_associated_binary()) * 0.0 + == d.indicator_var.get_associated_binary(), + ) + cons = hull.get_transformed_constraints(d.c2) + self.assertEqual(len(cons), 1) + get_x = cons[0] + assertExpressionsEqual( + self, + get_x.expr, + x + - (2 * lambda1 + 3 * lambda2) + - (1 - d.indicator_var.get_associated_binary()) * 0.0 + == 0.0 * d.indicator_var.get_associated_binary(), + ) + + cons = hull.get_disaggregation_constraint(m.x, m.disj) + assertExpressionsEqual(self, cons.expr, m.x == x1 + x2) + cons = hull.get_disaggregation_constraint(m.x, m.d_r.inner_disj) + assertExpressionsEqual(self, cons.expr, x2 == x3 + x4) - self.assertTrue(cmp(join(currdir, 'm.nl'), join(currdir, 'm1.nl'))) class TestSpecialCases(unittest.TestCase): def test_local_vars(self): - """ checks that if nothing is marked as local, we assume it is all + """checks that if nothing is marked as local, we assume it is all global. We disaggregate everything to be safe.""" m = ConcreteModel() - m.x = Var(bounds=(5,100)) - m.y = Var(bounds=(0,100)) + m.x = Var(bounds=(5, 100)) + m.y = Var(bounds=(0, 100)) m.d1 = Disjunct() m.d1.c = Constraint(expr=m.y >= m.x) m.d2 = Disjunct() @@ -1867,13 +1846,15 @@ def test_local_vars(self): GDP_Error, ".*Missing bound for d2.z.*", TransformationFactory('gdp.hull').create_using, - m) + m, + ) m.d2.z.setlb(7) self.assertRaisesRegex( GDP_Error, ".*Missing bound for d2.z.*", TransformationFactory('gdp.hull').create_using, - m) + m, + ) m.d2.z.setub(9) i = TransformationFactory('gdp.hull').create_using(m) @@ -1882,15 +1863,15 @@ def test_local_vars(self): # z should be disaggregated because we can't be sure it's not somewhere # else on the model. (Note however that the copy of x corresponding to # this disjunct is on the disjunction block) - self.assertEqual(sorted(varBlock.component_map(Var)), ['d2.z','y']) + self.assertEqual(sorted(varBlock.component_map(Var)), ['d2.z', 'y']) # constraint on the disjunction block self.assertEqual(len(rd.component_map(Constraint)), 3) # bounds haven't changed on original - self.assertEqual(i.d2.z.bounds, (7,9)) + self.assertEqual(i.d2.z.bounds, (7, 9)) # check disaggregated variable z = varBlock.component('d2.z') self.assertIsInstance(z, Var) - self.assertEqual(z.bounds, (0,9)) + self.assertEqual(z.bounds, (0, 9)) z_bounds = rd.component("d2.z_bounds") self.assertEqual(len(z_bounds), 2) self.assertEqual(z_bounds['lb'].lower, None) @@ -1907,14 +1888,14 @@ def test_local_vars(self): i = TransformationFactory('gdp.hull').create_using(m) rd = i._pyomo_gdp_hull_reformulation.relaxedDisjuncts[1] varBlock = rd.disaggregatedVars - self.assertEqual(sorted(varBlock.component_map(Var)), ['d2.z','y']) + self.assertEqual(sorted(varBlock.component_map(Var)), ['d2.z', 'y']) self.assertEqual(len(rd.component_map(Constraint)), 3) # original bounds unchanged - self.assertEqual(i.d2.z.bounds, (-9,-7)) + self.assertEqual(i.d2.z.bounds, (-9, -7)) # check disaggregated variable z = varBlock.component("d2.z") self.assertIsInstance(z, Var) - self.assertEqual(z.bounds, (-9,0)) + self.assertEqual(z.bounds, (-9, 0)) z_bounds = rd.component("d2.z_bounds") self.assertEqual(len(z_bounds), 2) self.assertEqual(z_bounds['lb'].lower, None) @@ -1930,8 +1911,8 @@ def test_local_var_suffix(self): hull = TransformationFactory('gdp.hull') model = ConcreteModel() - model.x = Var(bounds=(5,100)) - model.y = Var(bounds=(0,100)) + model.x = Var(bounds=(5, 100)) + model.y = Var(bounds=(0, 100)) model.d1 = Disjunct() model.d1.c = Constraint(expr=model.y >= model.x) model.d2 = Disjunct() @@ -1943,11 +1924,9 @@ def test_local_var_suffix(self): m = hull.create_using(model) self.assertEqual(m.d2.z.lb, -9) self.assertEqual(m.d2.z.ub, -7) - z_disaggregated = m.d2.transformation_block().disaggregatedVars.\ - component("d2.z") + z_disaggregated = m.d2.transformation_block.disaggregatedVars.component("d2.z") self.assertIsInstance(z_disaggregated, Var) - self.assertIs(z_disaggregated, - hull.get_disaggregated_var(m.d2.z, m.d2)) + self.assertIs(z_disaggregated, hull.get_disaggregated_var(m.d2.z, m.d2)) # we do declare z local model.d2.LocalVars = Suffix(direction=Suffix.LOCAL) @@ -1961,8 +1940,8 @@ def test_local_var_suffix(self): # it is its own disaggregated variable self.assertIs(hull.get_disaggregated_var(m.d2.z, m.d2), m.d2.z) # it does not exist on the transformation block - self.assertIsNone(m.d2.transformation_block().disaggregatedVars.\ - component("z")) + self.assertIsNone(m.d2.transformation_block.disaggregatedVars.component("z")) + class UntransformableObjectsOnDisjunct(unittest.TestCase): def test_RangeSet(self): @@ -1971,6 +1950,7 @@ def test_RangeSet(self): def test_Expression(self): ct.check_Expression(self, 'hull') + class TransformABlock(unittest.TestCase, CommonTests): def test_transformation_simple_block(self): ct.check_transformation_simple_block(self, 'hull') @@ -1997,6 +1977,7 @@ def test_create_using(self): m = models.makeTwoTermDisjOnBlock() ct.diff_apply_to_and_create_using(self, m, 'gdp.hull') + class DisjOnBlock(unittest.TestCase, CommonTests): # when the disjunction is on a block, we want all of the stuff created by # the transformation to go on that block also so that solving the block @@ -2008,6 +1989,7 @@ def test_xor_constraint_added(self): def test_trans_block_created(self): ct.check_trans_block_created(self, 'hull') + class TestErrors(unittest.TestCase): def setUp(self): # set seed so we can test name collisions predictably @@ -2015,7 +1997,8 @@ def setUp(self): def test_ask_for_transformed_constraint_from_untransformed_disjunct(self): ct.check_ask_for_transformed_constraint_from_untransformed_disjunct( - self, 'hull') + self, 'hull' + ) def test_silly_target(self): ct.check_silly_target(self, 'hull') @@ -2027,66 +2010,68 @@ def test_transform_empty_disjunction(self): ct.check_transform_empty_disjunction(self, 'hull') def test_deactivated_disjunct_nonzero_indicator_var(self): - ct.check_deactivated_disjunct_nonzero_indicator_var(self, - 'hull') + ct.check_deactivated_disjunct_nonzero_indicator_var(self, 'hull') def test_deactivated_disjunct_unfixed_indicator_var(self): ct.check_deactivated_disjunct_unfixed_indicator_var(self, 'hull') def test_infeasible_xor_because_all_disjuncts_deactivated(self): - m = ct.setup_infeasible_xor_because_all_disjuncts_deactivated(self, - 'hull') + m = ct.setup_infeasible_xor_because_all_disjuncts_deactivated(self, 'hull') hull = TransformationFactory('gdp.hull') transBlock = m.component("_pyomo_gdp_hull_reformulation") self.assertIsInstance(transBlock, Block) self.assertEqual(len(transBlock.relaxedDisjuncts), 2) - self.assertIsInstance(transBlock.component("disjunction_xor"), - Constraint) + self.assertIsInstance(transBlock.component("disjunction_xor"), Constraint) disjunct1 = transBlock.relaxedDisjuncts[0] # we disaggregated the (deactivated) indicator variables - d3_ind = m.disjunction_disjuncts[0].nestedDisjunction_disjuncts[0].\ - binary_indicator_var - d4_ind = m.disjunction_disjuncts[0].nestedDisjunction_disjuncts[1].\ - binary_indicator_var + d3_ind = ( + m.disjunction_disjuncts[0] + .nestedDisjunction_disjuncts[0] + .binary_indicator_var + ) + d4_ind = ( + m.disjunction_disjuncts[0] + .nestedDisjunction_disjuncts[1] + .binary_indicator_var + ) d3_ind_dis = disjunct1.disaggregatedVars.component( "disjunction_disjuncts[0].nestedDisjunction_" - "disjuncts[0].binary_indicator_var") - self.assertIs(hull.get_disaggregated_var(d3_ind, - m.disjunction_disjuncts[0]), - d3_ind_dis) + "disjuncts[0].binary_indicator_var" + ) + self.assertIs( + hull.get_disaggregated_var(d3_ind, m.disjunction_disjuncts[0]), d3_ind_dis + ) self.assertIs(hull.get_src_var(d3_ind_dis), d3_ind) d4_ind_dis = disjunct1.disaggregatedVars.component( "disjunction_disjuncts[0].nestedDisjunction_" - "disjuncts[1].binary_indicator_var") - self.assertIs(hull.get_disaggregated_var(d4_ind, - m.disjunction_disjuncts[0]), - d4_ind_dis) + "disjuncts[1].binary_indicator_var" + ) + self.assertIs( + hull.get_disaggregated_var(d4_ind, m.disjunction_disjuncts[0]), d4_ind_dis + ) self.assertIs(hull.get_src_var(d4_ind_dis), d4_ind) - relaxed_xor = disjunct1.component( - "disjunction_disjuncts[0]._pyomo_gdp_hull_reformulation." - "'disjunction_disjuncts[0].nestedDisjunction_xor'") - self.assertIsInstance(relaxed_xor, Constraint) + relaxed_xor = hull.get_transformed_constraints( + m.disjunction_disjuncts[0].nestedDisjunction.algebraic_constraint + ) self.assertEqual(len(relaxed_xor), 1) - repn = generate_standard_repn(relaxed_xor['eq'].body) - self.assertEqual(relaxed_xor['eq'].lower, 0) - self.assertEqual(relaxed_xor['eq'].upper, 0) + relaxed_xor = relaxed_xor[0] + repn = generate_standard_repn(relaxed_xor.body) + self.assertEqual(value(relaxed_xor.lower), 0) + self.assertEqual(value(relaxed_xor.upper), 0) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 3) # constraint says that the disaggregated indicator variables of the # nested disjuncts sum to the indicator variable of the outer disjunct. - ct.check_linear_coef( - self, repn, m.disjunction.disjuncts[0].indicator_var, -1) - ct.check_linear_coef( - self, repn, d3_ind_dis, 1) - ct.check_linear_coef( - self, repn, d4_ind_dis, 1) + ct.check_linear_coef(self, repn, m.disjunction.disjuncts[0].indicator_var, -1) + ct.check_linear_coef(self, repn, d3_ind_dis, 1) + ct.check_linear_coef(self, repn, d4_ind_dis, 1) self.assertEqual(repn.constant, 0) # but the disaggregation constraints are going to force them to 0 (which # will in turn force the outer disjunct indicator variable to 0, which # is what we want) - d3_ind_dis_cons = transBlock.disaggregationConstraints[1, None] + d3_ind_dis_cons = transBlock.disaggregationConstraints[1] self.assertEqual(d3_ind_dis_cons.lower, 0) self.assertEqual(d3_ind_dis_cons.upper, 0) repn = generate_standard_repn(d3_ind_dis_cons.body) @@ -2095,15 +2080,15 @@ def test_infeasible_xor_because_all_disjuncts_deactivated(self): self.assertEqual(repn.constant, 0) ct.check_linear_coef(self, repn, d3_ind_dis, -1) ct.check_linear_coef(self, repn, transBlock._disaggregatedVars[0], -1) - d4_ind_dis_cons = transBlock.disaggregationConstraints[2, None] + d4_ind_dis_cons = transBlock.disaggregationConstraints[2] self.assertEqual(d4_ind_dis_cons.lower, 0) self.assertEqual(d4_ind_dis_cons.upper, 0) repn = generate_standard_repn(d4_ind_dis_cons.body) self.assertTrue(repn.is_linear()) self.assertEqual(len(repn.linear_vars), 2) self.assertEqual(repn.constant, 0) - ct.check_linear_coef( self, repn, d4_ind_dis, -1) - ct.check_linear_coef( self, repn, transBlock._disaggregatedVars[1], -1) + ct.check_linear_coef(self, repn, d4_ind_dis, -1) + ct.check_linear_coef(self, repn, transBlock._disaggregatedVars[1], -1) def test_mapping_method_errors(self): m = models.makeTwoTermDisj_Nonlinear() @@ -2116,12 +2101,14 @@ def test_mapping_method_errors(self): AttributeError, "'NoneType' object has no attribute 'parent_block'", hull.get_var_bounds_constraint, - m.w) + m.w, + ) self.assertRegex( log.getvalue(), ".*Either 'w' is not a disaggregated variable, " "or the disjunction that disaggregates it has " - "not been properly transformed.") + "not been properly transformed.", + ) log = StringIO() with LoggingIntercept(log, 'pyomo.gdp.hull', logging.ERROR): @@ -2130,13 +2117,17 @@ def test_mapping_method_errors(self): r".*_pyomo_gdp_hull_reformulation.relaxedDisjuncts\[1\]." r"disaggregatedVars.w", hull.get_disaggregation_constraint, - m.d[1].transformation_block().disaggregatedVars.w, - m.disjunction) - self.assertRegex(log.getvalue(), ".*It doesn't appear that " - r"'_pyomo_gdp_hull_reformulation." - r"relaxedDisjuncts\[1\].disaggregatedVars.w' " - r"is a variable that was disaggregated by " - r"Disjunction 'disjunction'") + m.d[1].transformation_block.disaggregatedVars.w, + m.disjunction, + ) + self.assertRegex( + log.getvalue(), + ".*It doesn't appear that " + r"'_pyomo_gdp_hull_reformulation." + r"relaxedDisjuncts\[1\].disaggregatedVars.w' " + r"is a variable that was disaggregated by " + r"Disjunction 'disjunction'", + ) log = StringIO() with LoggingIntercept(log, 'pyomo.gdp.hull', logging.ERROR): @@ -2144,10 +2135,11 @@ def test_mapping_method_errors(self): AttributeError, "'NoneType' object has no attribute 'parent_block'", hull.get_src_var, - m.w) + m.w, + ) self.assertRegex( - log.getvalue(), - ".*'w' does not appear to be a disaggregated variable") + log.getvalue(), ".*'w' does not appear to be a disaggregated variable" + ) log = StringIO() with LoggingIntercept(log, 'pyomo.gdp.hull', logging.ERROR): @@ -2156,14 +2148,17 @@ def test_mapping_method_errors(self): r".*_pyomo_gdp_hull_reformulation.relaxedDisjuncts\[1\]." r"disaggregatedVars.w", hull.get_disaggregated_var, - m.d[1].transformation_block().disaggregatedVars.w, - m.d[1]) - self.assertRegex(log.getvalue(), - r".*It does not appear " - r"'_pyomo_gdp_hull_reformulation." - r"relaxedDisjuncts\[1\].disaggregatedVars.w' " - r"is a variable which appears in disjunct " - r"'d\[1\]'") + m.d[1].transformation_block.disaggregatedVars.w, + m.d[1], + ) + self.assertRegex( + log.getvalue(), + r".*It does not appear " + r"'_pyomo_gdp_hull_reformulation." + r"relaxedDisjuncts\[1\].disaggregatedVars.w' " + r"is a variable that appears in disjunct " + r"'d\[1\]'", + ) m.random_disjunction = Disjunction(expr=[m.w == 2, m.w >= 7]) self.assertRaisesRegex( @@ -2172,7 +2167,8 @@ def test_mapping_method_errors(self): "transformed: None of its disjuncts are transformed.", hull.get_disaggregation_constraint, m.w, - m.random_disjunction) + m.random_disjunction, + ) self.assertRaisesRegex( GDP_Error, @@ -2180,11 +2176,13 @@ def test_mapping_method_errors(self): r"transformed", hull.get_disaggregated_var, m.w, - m.random_disjunction.disjuncts[0]) + m.random_disjunction.disjuncts[0], + ) def test_untransformed_arcs(self): ct.check_untransformed_network_raises_GDPError(self, 'hull') + class BlocksOnDisjuncts(unittest.TestCase): def setUp(self): # set seed so we can test name collisions predictably @@ -2215,28 +2213,28 @@ def test_transformed_constraint_name_conflict(self): hull = TransformationFactory('gdp.hull') hull.apply_to(m) - transBlock = m.disj1.transformation_block() - self.assertIsInstance(transBlock.component("disj1.b.any_index"), - Constraint) - self.assertIsInstance(transBlock.component("disj1.'b.any_index'"), - Constraint) - xformed = hull.get_transformed_constraints( - m.disj1.component("b.any_index")) - self.assertEqual(len(xformed), 1) - self.assertIs(xformed[0], - transBlock.component("disj1.'b.any_index'")['lb']) - - xformed = hull.get_transformed_constraints(m.disj1.b.any_index['local']) - self.assertEqual(len(xformed), 1) - self.assertIs(xformed[0], - transBlock.component("disj1.b.any_index")[ - ('local','ub')]) - xformed = hull.get_transformed_constraints( - m.disj1.b.any_index['nonlin-ub']) - self.assertEqual(len(xformed), 1) - self.assertIs(xformed[0], - transBlock.component("disj1.b.any_index")[ - ('nonlin-ub','ub')]) + transBlock = m.disj1.transformation_block + # Just make sure exactly the expected number of constraints are here and + # that they are mapped to the correct original components. + self.assertEqual(len(transBlock.component_map(Constraint)), 3) + self.assertIs( + hull.get_transformed_constraints(m.disj1.b.any_index['local'])[ + 0 + ].parent_block(), + transBlock, + ) + self.assertIs( + hull.get_transformed_constraints(m.disj1.b.any_index['nonlin-ub'])[ + 0 + ].parent_block(), + transBlock, + ) + self.assertIs( + hull.get_transformed_constraints(m.disj1.component('b.any_index'))[ + 0 + ].parent_block(), + transBlock, + ) def test_local_var_handled_correctly(self): m = self.makeModel() @@ -2248,10 +2246,10 @@ def test_local_var_handled_correctly(self): self.assertIs(hull.get_disaggregated_var(m.x, m.disj1), m.x) self.assertEqual(m.x.lb, 0) self.assertEqual(m.x.ub, 5) - self.assertIsNone(m.disj1.transformation_block().disaggregatedVars.\ - component("x")) - self.assertIsInstance(m.disj1.transformation_block().disaggregatedVars.\ - component("y"), Var) + self.assertIsNone(m.disj1.transformation_block.disaggregatedVars.component("x")) + self.assertIsInstance( + m.disj1.transformation_block.disaggregatedVars.component("y"), Var + ) # this doesn't require the block, I'm just coopting this test to make sure # of some nonlinear expressions. @@ -2263,23 +2261,24 @@ def test_transformed_constraints(self): # test the transformed nonlinear constraints nonlin_ub_list = hull.get_transformed_constraints( - m.disj1.b.any_index['nonlin-ub']) + m.disj1.b.any_index['nonlin-ub'] + ) self.assertEqual(len(nonlin_ub_list), 1) cons = nonlin_ub_list[0] - self.assertEqual(cons.index(), ('nonlin-ub', 'ub')) self.assertIs(cons.ctype, Constraint) self.assertIsNone(cons.lower) self.assertEqual(value(cons.upper), 0) repn = generate_standard_repn(cons.body) - self.assertEqual(str(repn.nonlinear_expr), - "(0.9999*disj1.binary_indicator_var + 0.0001)*" - "(_pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]." - "disaggregatedVars.y/" - "(0.9999*disj1.binary_indicator_var + 0.0001))**2") + self.assertEqual( + str(repn.nonlinear_expr), + "(0.9999*disj1.binary_indicator_var + 0.0001)*" + "(_pyomo_gdp_hull_reformulation.relaxedDisjuncts[0]." + "disaggregatedVars.y/" + "(0.9999*disj1.binary_indicator_var + 0.0001))**2", + ) self.assertEqual(len(repn.nonlinear_vars), 2) self.assertIs(repn.nonlinear_vars[0], m.disj1.binary_indicator_var) - self.assertIs(repn.nonlinear_vars[1], - hull.get_disaggregated_var(m.y, m.disj1)) + self.assertIs(repn.nonlinear_vars[1], hull.get_disaggregated_var(m.y, m.disj1)) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], m.disj1.binary_indicator_var) @@ -2288,26 +2287,27 @@ def test_transformed_constraints(self): nonlin_lb_list = hull.get_transformed_constraints(m.disj2.non_lin_lb) self.assertEqual(len(nonlin_lb_list), 1) cons = nonlin_lb_list[0] - self.assertEqual(cons.index(), 'lb') self.assertIs(cons.ctype, Constraint) self.assertIsNone(cons.lower) self.assertEqual(value(cons.upper), 0) repn = generate_standard_repn(cons.body) - self.assertEqual(str(repn.nonlinear_expr), - "- ((0.9999*disj2.binary_indicator_var + 0.0001)*" - "log(1 + " - "_pyomo_gdp_hull_reformulation.relaxedDisjuncts[1]." - "disaggregatedVars.y/" - "(0.9999*disj2.binary_indicator_var + 0.0001)))") + self.assertEqual( + str(repn.nonlinear_expr), + "- ((0.9999*disj2.binary_indicator_var + 0.0001)*" + "log(1 + " + "_pyomo_gdp_hull_reformulation.relaxedDisjuncts[1]." + "disaggregatedVars.y/" + "(0.9999*disj2.binary_indicator_var + 0.0001)))", + ) self.assertEqual(len(repn.nonlinear_vars), 2) self.assertIs(repn.nonlinear_vars[0], m.disj2.binary_indicator_var) - self.assertIs(repn.nonlinear_vars[1], - hull.get_disaggregated_var(m.y, m.disj2)) + self.assertIs(repn.nonlinear_vars[1], hull.get_disaggregated_var(m.y, m.disj2)) self.assertEqual(repn.constant, 0) self.assertEqual(len(repn.linear_vars), 1) self.assertIs(repn.linear_vars[0], m.disj2.binary_indicator_var) self.assertEqual(repn.linear_coefs[0], 1) + class DisaggregatingFixedVars(unittest.TestCase): def test_disaggregate_fixed_variables(self): m = models.makeTwoTermDisj() @@ -2315,10 +2315,11 @@ def test_disaggregate_fixed_variables(self): hull = TransformationFactory('gdp.hull') hull.apply_to(m) # check that we did indeed disaggregate x - transBlock = m.d[1]._transformation_block() + transBlock = m.d[1].transformation_block self.assertIsInstance(transBlock.disaggregatedVars.component("x"), Var) - self.assertIs(hull.get_disaggregated_var(m.x, m.d[1]), - transBlock.disaggregatedVars.x) + self.assertIs( + hull.get_disaggregated_var(m.x, m.d[1]), transBlock.disaggregatedVars.x + ) self.assertIs(hull.get_src_var(transBlock.disaggregatedVars.x), m.x) def test_do_not_disaggregate_fixed_variables(self): @@ -2327,18 +2328,21 @@ def test_do_not_disaggregate_fixed_variables(self): hull = TransformationFactory('gdp.hull') hull.apply_to(m, assume_fixed_vars_permanent=True) # check that we didn't disaggregate x - transBlock = m.d[1]._transformation_block() + transBlock = m.d[1].transformation_block self.assertIsNone(transBlock.disaggregatedVars.component("x")) + class NameDeprecationTest(unittest.TestCase): def test_name_deprecated(self): m = models.makeTwoTermDisj() output = StringIO() with LoggingIntercept(output, 'pyomo.gdp', logging.WARNING): TransformationFactory('gdp.chull').apply_to(m) - self.assertIn("DEPRECATED: The 'gdp.chull' name is deprecated. " - "Please use the more apt 'gdp.hull' instead.", - output.getvalue().replace('\n', ' ')) + self.assertIn( + "DEPRECATED: The 'gdp.chull' name is deprecated. " + "Please use the more apt 'gdp.hull' instead.", + output.getvalue().replace('\n', ' '), + ) def test_hull_chull_equivalent(self): m = models.makeTwoTermDisj() @@ -2350,32 +2354,35 @@ def test_hull_chull_equivalent(self): m2.pprint(ostream=out2) self.assertMultiLineEqual(out1.getvalue(), out2.getvalue()) + class KmeansTest(unittest.TestCase): - @unittest.skipIf('gurobi' not in linear_solvers, - "Gurobi solver not available") + @unittest.skipIf('gurobi' not in linear_solvers, "Gurobi solver not available") def test_optimal_soln_feasible(self): m = ConcreteModel() m.Points = RangeSet(3) m.Centroids = RangeSet(2) - m.X = Param(m.Points, initialize={1:0.3672, 2:0.8043, 3:0.3059}) + m.X = Param(m.Points, initialize={1: 0.3672, 2: 0.8043, 3: 0.3059}) - m.cluster_center = Var(m.Centroids, bounds=(0,2)) - m.distance = Var(m.Points, bounds=(0,2)) - m.t = Var(m.Points, m.Centroids, bounds=(0,2)) + m.cluster_center = Var(m.Centroids, bounds=(0, 2)) + m.distance = Var(m.Points, bounds=(0, 2)) + m.t = Var(m.Points, m.Centroids, bounds=(0, 2)) @m.Disjunct(m.Points, m.Centroids) def AssignPoint(d, i, k): m = d.model() d.LocalVars = Suffix(direction=Suffix.LOCAL) - d.LocalVars[d] = [m.t[i,k]] + d.LocalVars[d] = [m.t[i, k]] + def distance1(d): - return m.t[i,k] >= m.X[i] - m.cluster_center[k] + return m.t[i, k] >= m.X[i] - m.cluster_center[k] + def distance2(d): - return m.t[i,k] >= - (m.X[i] - m.cluster_center[k]) + return m.t[i, k] >= -(m.X[i] - m.cluster_center[k]) + d.dist1 = Constraint(rule=distance1) d.dist2 = Constraint(rule=distance2) - d.define_distance = Constraint(expr=m.distance[i] == m.t[i,k]) + d.define_distance = Constraint(expr=m.distance[i] == m.t[i, k]) @m.Disjunction(m.Points) def OneCentroidPerPt(m, i): @@ -2386,12 +2393,12 @@ def OneCentroidPerPt(m, i): TransformationFactory('gdp.hull').apply_to(m) # fix an optimal solution - m.AssignPoint[1,1].indicator_var.fix(1) - m.AssignPoint[1,2].indicator_var.fix(0) - m.AssignPoint[2,1].indicator_var.fix(0) - m.AssignPoint[2,2].indicator_var.fix(1) - m.AssignPoint[3,1].indicator_var.fix(1) - m.AssignPoint[3,2].indicator_var.fix(0) + m.AssignPoint[1, 1].indicator_var.fix(1) + m.AssignPoint[1, 2].indicator_var.fix(0) + m.AssignPoint[2, 1].indicator_var.fix(0) + m.AssignPoint[2, 2].indicator_var.fix(1) + m.AssignPoint[3, 1].indicator_var.fix(1) + m.AssignPoint[3, 2].indicator_var.fix(0) m.cluster_center[1].fix(0.3059) m.cluster_center[2].fix(0.8043) @@ -2400,17 +2407,18 @@ def OneCentroidPerPt(m, i): m.distance[2].fix(0) m.distance[3].fix(0) - m.t[1,1].fix(0.0613) - m.t[1,2].fix(0) - m.t[2,1].fix(0) - m.t[2,2].fix(0) - m.t[3,1].fix(0) - m.t[3,2].fix(0) + m.t[1, 1].fix(0.0613) + m.t[1, 2].fix(0) + m.t[2, 1].fix(0) + m.t[2, 2].fix(0) + m.t[3, 1].fix(0) + m.t[3, 2].fix(0) results = SolverFactory('gurobi').solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) TOL = 1e-8 for c in m.component_data_objects(Constraint, active=True): @@ -2419,8 +2427,8 @@ def OneCentroidPerPt(m, i): if c.upper is not None: self.assertLessEqual(value(c.body) - TOL, value(c.upper)) -class NetworkDisjuncts(unittest.TestCase, CommonTests): +class NetworkDisjuncts(unittest.TestCase, CommonTests): @unittest.skipIf(not ct.linear_solvers, "No linear solver available") def test_solution_maximize(self): ct.check_network_disjuncts(self, minimize=False, transformation='hull') @@ -2429,7 +2437,7 @@ def test_solution_maximize(self): def test_solution_minimize(self): ct.check_network_disjuncts(self, minimize=True, transformation='hull') -@unittest.skipUnless(sympy_available, "Sympy not available") + class LogicalConstraintsOnDisjuncts(unittest.TestCase): def test_logical_constraints_transformed(self): m = models.makeLogicalConstraintsOnDisjuncts() @@ -2443,25 +2451,61 @@ def test_logical_constraints_transformed(self): # first d[1]: cons = hull.get_transformed_constraints( - m.d[1].logic_to_linear.transformed_constraints[1]) + m.d[1]._logical_to_disjunctive.transformed_constraints[1] + ) + dis_z1 = hull.get_disaggregated_var( + m.d[1]._logical_to_disjunctive.auxiliary_vars[1], m.d[1] + ) + dis_y1 = hull.get_disaggregated_var(y1, m.d[1]) + self.assertEqual(len(cons), 1) # this simplifies because the dissaggregated variable is *always* 0 c = cons[0] + # hull transformation of z1 = 1 - y1: + # dis_z1 + dis_y1 = d[1].ind_var self.assertEqual(c.lower, 0) self.assertEqual(c.upper, 0) repn = generate_standard_repn(c.body) self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 1) - ct.check_linear_coef(self, repn, hull.get_disaggregated_var(y1, m.d[1]), - -1) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, dis_z1 + dis_y1 - m.d[1].binary_indicator_var + ) + + cons = hull.get_transformed_constraints( + m.d[1]._logical_to_disjunctive.transformed_constraints[2] + ) + self.assertEqual(len(cons), 1) + c = cons[0] + # hull transformation of z1 >= 1 + assertExpressionsStructurallyEqual( + self, + c.expr, + dis_z1 - (1 - m.d[1].binary_indicator_var) * 0 + >= m.d[1].binary_indicator_var, + ) # then d[4]: y1d = hull.get_disaggregated_var(y1, m.d[4]) y2d = hull.get_disaggregated_var(y2, m.d[4]) - # 1 <= 1 - Y[2] + Y[1] + z1d = hull.get_disaggregated_var( + m.d[4]._logical_to_disjunctive.auxiliary_vars[1], m.d[4] + ) + z2d = hull.get_disaggregated_var( + m.d[4]._logical_to_disjunctive.auxiliary_vars[2], m.d[4] + ) + z3d = hull.get_disaggregated_var( + m.d[4]._logical_to_disjunctive.auxiliary_vars[3], m.d[4] + ) + + # hull transformation of (1 - z1) + (1 - y1) + y2 >= 1: + # dz1 + dy1 - dy2 <= m.d[4].ind_var cons = hull.get_transformed_constraints( - m.d[4].logic_to_linear.transformed_constraints[1]) + m.d[4]._logical_to_disjunctive.transformed_constraints[1] + ) # these also are simple because it's really an equality, and since both # disaggregated variables will be 0 when the disjunct isn't selected, it # doesn't even need big-Ming. @@ -2471,24 +2515,163 @@ def test_logical_constraints_transformed(self): self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 2) - ct.check_linear_coef(self, repn, y2d, 1) - ct.check_linear_coef(self, repn, y1d, -1) - - # 1 <= 1 - Y[1] + Y[2] + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, -m.d[4].binary_indicator_var + z1d + y1d - y2d + ) + + # hull transformation of z1 + 1 - (1 - y1) >= 1 + # -y1d - z1d <= -d[4].ind_var cons = hull.get_transformed_constraints( - m.d[4].logic_to_linear.transformed_constraints[2]) + m.d[4]._logical_to_disjunctive.transformed_constraints[2] + ) self.assertEqual(len(cons), 1) cons = cons[0] self.assertIsNone(cons.lower) self.assertEqual(cons.upper, 0) repn = generate_standard_repn(cons.body) self.assertTrue(repn.is_linear()) - self.assertEqual(repn.constant, 0) - self.assertEqual(len(repn.linear_vars), 2) - ct.check_linear_coef(self, repn, y2d, -1) - ct.check_linear_coef(self, repn, y1d, 1) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, m.d[4].binary_indicator_var - y1d - z1d + ) + + # hull transformation of z1 + (1 - y2) >= 1 + # y2d - z1d <= 0 + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[3] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual(self, simplified, y2d - z1d) + + # hull transformation of (1 - z2) + y1 + (1 - y2) >= 1 + # z2d - y1d + y2d <= m.d[4].ind_var + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[4] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, -m.d[4].binary_indicator_var + z2d + y2d - y1d + ) + + # hull transformation of z2 + (1 - y1) >= 1 + # y1d - z2d <= 0 + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[5] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual(self, simplified, y1d - z2d) + + # hull transformation of z2 + 1 - (1 - y2) >= 1 + # -y2d - z2d <= -d[4].ind_var + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[6] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, simplified, m.d[4].binary_indicator_var - y2d - z2d + ) + + # hull transformation of z3 <= z1 + # z3d - z1d <= 0 + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[7] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual(self, simplified, z3d - z1d) + + # hull transformation of z3 <= z2 + # z3d - z2d <= 0 + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[8] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + self.assertIsNone(cons.lower) + self.assertEqual(cons.upper, 0) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual(self, simplified, z3d - z2d) + + # hull transformation of 1 - z3 <= 2 - (z1 + z2) + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[9] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + assertExpressionsStructurallyEqual( + self, + cons.expr, + 1 - z3d - (2 - (z1d + z2d)) - (1 - m.d[4].binary_indicator_var) * (-1) + <= 0 * m.d[4].binary_indicator_var, + ) + + # hull transformation of z3 >= 1 + cons = hull.get_transformed_constraints( + m.d[4]._logical_to_disjunctive.transformed_constraints[10] + ) + self.assertEqual(len(cons), 1) + cons = cons[0] + assertExpressionsStructurallyEqual( + self, + cons.expr, + z3d - (1 - m.d[4].binary_indicator_var) * 0 >= m.d[4].binary_indicator_var, + ) self.assertFalse(m.bwahaha.active) self.assertFalse(m.p.active) @@ -2505,3 +2688,10 @@ def test_boolean_vars_on_disjunct(self): # of the Disjuncts m = models.makeBooleanVarsOnDisjuncts() ct.check_solution_obeys_logical_constraints(self, 'hull', m) + + def test_pickle(self): + ct.check_transformed_model_pickles(self, 'hull') + + @unittest.skipIf(not dill_available, "Dill is not available") + def test_dill_pickle(self): + ct.check_transformed_model_pickles_with_dill(self, 'hull') diff --git a/pyomo/gdp/tests/test_mbigm.py b/pyomo/gdp/tests/test_mbigm.py new file mode 100644 index 00000000000..9c400dfcd29 --- /dev/null +++ b/pyomo/gdp/tests/test_mbigm.py @@ -0,0 +1,862 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from io import StringIO +from os.path import join, normpath +import pickle + +from pyomo.common.fileutils import import_file, PYOMO_ROOT_DIR +from pyomo.common.log import LoggingIntercept +import pyomo.common.unittest as unittest +from pyomo.core.expr.compare import ( + assertExpressionsEqual, + assertExpressionsStructurallyEqual, +) + +from pyomo.environ import ( + BooleanVar, + ConcreteModel, + Constraint, + LogicalConstraint, + NonNegativeIntegers, + SolverFactory, + Suffix, + TransformationFactory, + value, + Var, +) +from pyomo.gdp import Disjunct, Disjunction, GDP_Error +from pyomo.gdp.tests.common_tests import ( + check_linear_coef, + check_obj_in_active_tree, + check_pprint_equal, +) +from pyomo.repn import generate_standard_repn + +gurobi_available = SolverFactory('gurobi').available() +exdir = normpath(join(PYOMO_ROOT_DIR, 'examples', 'gdp')) + + +class LinearModelDecisionTreeExample(unittest.TestCase): + def make_model(self): + m = ConcreteModel() + m.x1 = Var(bounds=(-10, 10)) + m.x2 = Var(bounds=(-20, 20)) + m.d = Var(bounds=(-1000, 1000)) + + m.d1 = Disjunct() + m.d1.x1_bounds = Constraint(expr=(0.5, m.x1, 2)) + m.d1.x2_bounds = Constraint(expr=(0.75, m.x2, 3)) + m.d1.func = Constraint(expr=m.x1 + m.x2 == m.d) + + m.d2 = Disjunct() + m.d2.x1_bounds = Constraint(expr=(0.65, m.x1, 3)) + m.d2.x2_bounds = Constraint(expr=(3, m.x2, 10)) + m.d2.func = Constraint(expr=2 * m.x1 + 4 * m.x2 + 7 == m.d) + + m.d3 = Disjunct() + m.d3.x1_bounds = Constraint(expr=(2, m.x1, 10)) + m.d3.x2_bounds = Constraint(expr=(0.55, m.x2, 1)) + m.d3.func = Constraint(expr=m.x1 - 5 * m.x2 - 3 == m.d) + + m.disjunction = Disjunction(expr=[m.d1, m.d2, m.d3]) + + return m + + def get_Ms(self, m): + return { + (m.d1.x1_bounds, m.d2): (0.15, 1), + (m.d1.x2_bounds, m.d2): (2.25, 7), + (m.d1.x1_bounds, m.d3): (1.5, 8), + (m.d1.x2_bounds, m.d3): (-0.2, -2), + (m.d2.x1_bounds, m.d1): (-0.15, -1), + (m.d2.x2_bounds, m.d1): (-2.25, -7), + (m.d2.x1_bounds, m.d3): (1.35, 7), + (m.d2.x2_bounds, m.d3): (-2.45, -9), + (m.d3.x1_bounds, m.d1): (-1.5, -8), + (m.d3.x2_bounds, m.d1): (0.2, 2), + (m.d3.x1_bounds, m.d2): (-1.35, -7), + (m.d3.x2_bounds, m.d2): (2.45, 9), + (m.d1.func, m.d2): (-40, -16.65), + (m.d1.func, m.d3): (6.3, 9), + (m.d2.func, m.d1): (9.75, 18), + (m.d2.func, m.d3): (16.95, 29), + (m.d3.func, m.d1): (-21, -7.5), + (m.d3.func, m.d2): (-103, -37.65), + } + + def check_untightened_bounds_constraint( + self, cons, var, parent_disj, disjunction, Ms, lower=None, upper=None + ): + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 3) + self.assertIsNone(cons.lower) + self.assertEqual(value(cons.upper), 0) + if lower is not None: + self.assertEqual(repn.constant, lower) + check_linear_coef(self, repn, var, -1) + for disj in disjunction.disjuncts: + if disj is not parent_disj: + check_linear_coef( + self, repn, disj.binary_indicator_var, Ms[disj] - lower + ) + if upper is not None: + self.assertEqual(repn.constant, -upper) + check_linear_coef(self, repn, var, 1) + for disj in disjunction.disjuncts: + if disj is not parent_disj: + check_linear_coef( + self, repn, disj.binary_indicator_var, -Ms[disj] + upper + ) + + def check_all_untightened_bounds_constraints(self, m, mbm): + # d1.x1_bounds + cons = mbm.get_transformed_constraints(m.d1.x1_bounds) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.check_untightened_bounds_constraint( + lower, m.x1, m.d1, m.disjunction, {m.d2: 0.65, m.d3: 2}, lower=0.5 + ) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.check_untightened_bounds_constraint( + upper, m.x1, m.d1, m.disjunction, {m.d2: 3, m.d3: 10}, upper=2 + ) + + # d1.x2_bounds + cons = mbm.get_transformed_constraints(m.d1.x2_bounds) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.check_untightened_bounds_constraint( + lower, m.x2, m.d1, m.disjunction, {m.d2: 3, m.d3: 0.55}, lower=0.75 + ) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.check_untightened_bounds_constraint( + upper, m.x2, m.d1, m.disjunction, {m.d2: 10, m.d3: 1}, upper=3 + ) + + # d2.x1_bounds + cons = mbm.get_transformed_constraints(m.d2.x1_bounds) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.check_untightened_bounds_constraint( + lower, m.x1, m.d2, m.disjunction, {m.d1: 0.5, m.d3: 2}, lower=0.65 + ) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.check_untightened_bounds_constraint( + upper, m.x1, m.d2, m.disjunction, {m.d1: 2, m.d3: 10}, upper=3 + ) + + # d2.x2_bounds + cons = mbm.get_transformed_constraints(m.d2.x2_bounds) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.check_untightened_bounds_constraint( + lower, m.x2, m.d2, m.disjunction, {m.d1: 0.75, m.d3: 0.55}, lower=3 + ) + upper = cons[1] + self.check_untightened_bounds_constraint( + upper, m.x2, m.d2, m.disjunction, {m.d1: 3, m.d3: 1}, upper=10 + ) + + # d3.x1_bounds + cons = mbm.get_transformed_constraints(m.d3.x1_bounds) + self.assertEqual(len(cons), 2) + lower = cons[0] + self.check_untightened_bounds_constraint( + lower, m.x1, m.d3, m.disjunction, {m.d1: 0.5, m.d2: 0.65}, lower=2 + ) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.check_untightened_bounds_constraint( + upper, m.x1, m.d3, m.disjunction, {m.d1: 2, m.d2: 3}, upper=10 + ) + + # d3.x2_bounds + cons = mbm.get_transformed_constraints(m.d3.x2_bounds) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.check_untightened_bounds_constraint( + lower, m.x2, m.d3, m.disjunction, {m.d1: 0.75, m.d2: 3}, lower=0.55 + ) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.check_untightened_bounds_constraint( + upper, m.x2, m.d3, m.disjunction, {m.d1: 3, m.d2: 10}, upper=1 + ) + + def check_linear_func_constraints(self, m, mbm, Ms=None): + if Ms is None: + Ms = self.get_Ms(m) + + # d1.func + cons = mbm.get_transformed_constraints(m.d1.func) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.assertEqual(value(lower.upper), 0) + self.assertIsNone(lower.lower) + repn = generate_standard_repn(lower.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 5) + self.assertEqual(repn.constant, 0) + check_linear_coef(self, repn, m.x1, -1) + check_linear_coef(self, repn, m.x2, -1) + check_linear_coef(self, repn, m.d, 1) + check_linear_coef(self, repn, m.d2.binary_indicator_var, Ms[m.d1.func, m.d2][0]) + check_linear_coef(self, repn, m.d3.binary_indicator_var, Ms[m.d1.func, m.d3][0]) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.assertEqual(value(upper.upper), 0) + self.assertIsNone(upper.lower) + repn = generate_standard_repn(upper.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 5) + self.assertEqual(repn.constant, 0) + check_linear_coef(self, repn, m.x1, 1) + check_linear_coef(self, repn, m.x2, 1) + check_linear_coef(self, repn, m.d, -1) + check_linear_coef( + self, repn, m.d2.binary_indicator_var, -Ms[m.d1.func, m.d2][1] + ) + check_linear_coef( + self, repn, m.d3.binary_indicator_var, -Ms[m.d1.func, m.d3][1] + ) + + # d2.func + cons = mbm.get_transformed_constraints(m.d2.func) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.assertEqual(value(lower.upper), 0) + self.assertIsNone(lower.lower) + repn = generate_standard_repn(lower.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 5) + self.assertEqual(repn.constant, -7) + check_linear_coef(self, repn, m.x1, -2) + check_linear_coef(self, repn, m.x2, -4) + check_linear_coef(self, repn, m.d, 1) + check_linear_coef(self, repn, m.d1.binary_indicator_var, Ms[m.d2.func, m.d1][0]) + check_linear_coef(self, repn, m.d3.binary_indicator_var, Ms[m.d2.func, m.d3][0]) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.assertEqual(value(upper.upper), 0) + self.assertIsNone(upper.lower) + repn = generate_standard_repn(upper.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 5) + self.assertEqual(repn.constant, 7) + check_linear_coef(self, repn, m.x1, 2) + check_linear_coef(self, repn, m.x2, 4) + check_linear_coef(self, repn, m.d, -1) + check_linear_coef( + self, repn, m.d1.binary_indicator_var, -Ms[m.d2.func, m.d1][1] + ) + check_linear_coef( + self, repn, m.d3.binary_indicator_var, -Ms[m.d2.func, m.d3][1] + ) + + # d3.func + cons = mbm.get_transformed_constraints(m.d3.func) + self.assertEqual(len(cons), 2) + lower = cons[0] + check_obj_in_active_tree(self, lower) + self.assertEqual(value(lower.upper), 0) + self.assertIsNone(lower.lower) + repn = generate_standard_repn(lower.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 5) + self.assertEqual(repn.constant, 3) + check_linear_coef(self, repn, m.x1, -1) + check_linear_coef(self, repn, m.x2, 5) + check_linear_coef(self, repn, m.d, 1) + check_linear_coef(self, repn, m.d1.binary_indicator_var, Ms[m.d3.func, m.d1][0]) + check_linear_coef(self, repn, m.d2.binary_indicator_var, Ms[m.d3.func, m.d2][0]) + upper = cons[1] + check_obj_in_active_tree(self, upper) + self.assertEqual(value(upper.upper), 0) + self.assertIsNone(upper.lower) + repn = generate_standard_repn(upper.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), 5) + self.assertEqual(repn.constant, -3) + check_linear_coef(self, repn, m.x1, 1) + check_linear_coef(self, repn, m.x2, -5) + check_linear_coef(self, repn, m.d, -1) + check_linear_coef( + self, repn, m.d1.binary_indicator_var, -Ms[m.d3.func, m.d1][1] + ) + check_linear_coef( + self, repn, m.d2.binary_indicator_var, -Ms[m.d3.func, m.d2][1] + ) + + @unittest.skipUnless(gurobi_available, "Gurobi is not available") + def test_calculated_Ms_correct(self): + # Calculating all the Ms is expensive, so we just do it in this one test + # and then specify them for the others + m = self.make_model() + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, reduce_bound_constraints=False) + + self.check_all_untightened_bounds_constraints(m, mbm) + self.check_linear_func_constraints(m, mbm) + + self.assertStructuredAlmostEqual(mbm.get_all_M_values(m), self.get_Ms(m)) + + def test_transformed_constraints_correct_Ms_specified(self): + m = self.make_model() + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, bigM=self.get_Ms(m), reduce_bound_constraints=False) + + self.check_all_untightened_bounds_constraints(m, mbm) + self.check_linear_func_constraints(m, mbm) + + def test_pickle_transformed_model(self): + m = self.make_model() + TransformationFactory('gdp.mbigm').apply_to(m, bigM=self.get_Ms(m)) + + # pickle and unpickle the transformed model + unpickle = pickle.loads(pickle.dumps(m)) + + check_pprint_equal(self, m, unpickle) + + def test_mappings_between_original_and_transformed_components(self): + m = self.make_model() + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, bigM=self.get_Ms(m), reduce_bound_constraints=False) + + d1_block = m.d1.transformation_block + self.assertIs(mbm.get_src_disjunct(d1_block), m.d1) + d2_block = m.d2.transformation_block + self.assertIs(mbm.get_src_disjunct(d2_block), m.d2) + d3_block = m.d3.transformation_block + self.assertIs(mbm.get_src_disjunct(d3_block), m.d3) + + for disj in [m.d1, m.d2, m.d3]: + for comp in ['x1_bounds', 'x2_bounds', 'func']: + original_cons = disj.component(comp) + transformed = mbm.get_transformed_constraints(original_cons) + for cons in transformed: + self.assertIn(original_cons, mbm.get_src_constraints(cons)) + + def test_algebraic_constraints(self): + m = self.make_model() + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, bigM=self.get_Ms(m), reduce_bound_constraints=False) + + self.assertIsNotNone(m.disjunction.algebraic_constraint) + xor = m.disjunction.algebraic_constraint + self.assertIs(mbm.get_src_disjunction(xor), m.disjunction) + + self.assertEqual(value(xor.lower), 1) + self.assertEqual(value(xor.upper), 1) + repn = generate_standard_repn(xor.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(value(repn.constant), 0) + self.assertEqual(len(repn.linear_vars), 3) + check_linear_coef(self, repn, m.d1.binary_indicator_var, 1) + check_linear_coef(self, repn, m.d2.binary_indicator_var, 1) + check_linear_coef(self, repn, m.d3.binary_indicator_var, 1) + check_obj_in_active_tree(self, xor) + + def check_pretty_bound_constraints(self, cons, var, bounds, lb): + self.assertEqual(value(cons.upper), 0) + self.assertIsNone(cons.lower) + repn = generate_standard_repn(cons.body) + self.assertTrue(repn.is_linear()) + self.assertEqual(len(repn.linear_vars), len(bounds) + 1) + self.assertEqual(repn.constant, 0) + if lb: + check_linear_coef(self, repn, var, -1) + for disj, bnd in bounds.items(): + check_linear_coef(self, repn, disj.binary_indicator_var, bnd) + else: + check_linear_coef(self, repn, var, 1) + for disj, bnd in bounds.items(): + check_linear_coef(self, repn, disj.binary_indicator_var, -bnd) + + def test_bounds_constraints_correct(self): + m = self.make_model() + + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, bigM=self.get_Ms(m), reduce_bound_constraints=True) + + # Check that all the constraints are mapped to the same transformed + # constraints. + cons = mbm.get_transformed_constraints(m.d1.x1_bounds) + self.assertEqual(len(cons), 2) + same = mbm.get_transformed_constraints(m.d2.x1_bounds) + self.assertEqual(len(same), 2) + self.assertIs(same[0], cons[0]) + self.assertIs(same[1], cons[1]) + sameagain = mbm.get_transformed_constraints(m.d3.x1_bounds) + self.assertEqual(len(sameagain), 2) + self.assertIs(sameagain[0], cons[0]) + self.assertIs(sameagain[1], cons[1]) + + self.check_pretty_bound_constraints( + cons[0], m.x1, {m.d1: 0.5, m.d2: 0.65, m.d3: 2}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x1, {m.d1: 2, m.d2: 3, m.d3: 10}, lb=False + ) + + cons = mbm.get_transformed_constraints(m.d1.x2_bounds) + self.assertEqual(len(cons), 2) + same = mbm.get_transformed_constraints(m.d2.x2_bounds) + self.assertEqual(len(same), 2) + self.assertIs(same[0], cons[0]) + self.assertIs(same[1], cons[1]) + sameagain = mbm.get_transformed_constraints(m.d3.x2_bounds) + self.assertEqual(len(sameagain), 2) + self.assertIs(sameagain[0], cons[0]) + self.assertIs(sameagain[1], cons[1]) + + self.check_pretty_bound_constraints( + cons[0], m.x2, {m.d1: 0.75, m.d2: 3, m.d3: 0.55}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x2, {m.d1: 3, m.d2: 10, m.d3: 1}, lb=False + ) + + def test_bound_constraints_correct_with_redundant_constraints(self): + m = self.make_model() + + m.d1.bogus_x1_bounds = Constraint(expr=(0.25, m.x1, 2.25)) + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, reduce_bound_constraints=True, bigM=self.get_Ms(m)) + + cons = mbm.get_transformed_constraints(m.d1.x1_bounds) + self.assertEqual(len(cons), 2) + self.check_pretty_bound_constraints( + cons[0], m.x1, {m.d1: 0.5, m.d2: 0.65, m.d3: 2}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x1, {m.d1: 2, m.d2: 3, m.d3: 10}, lb=False + ) + + cons = mbm.get_transformed_constraints(m.d1.x2_bounds) + self.assertEqual(len(cons), 2) + self.check_pretty_bound_constraints( + cons[0], m.x2, {m.d1: 0.75, m.d2: 3, m.d3: 0.55}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x2, {m.d1: 3, m.d2: 10, m.d3: 1}, lb=False + ) + + def test_Ms_specified_as_args_honored(self): + m = self.make_model() + + Ms = self.get_Ms(m) + # now modify some of them--these might not be valid and I don't care + Ms[m.d2.x2_bounds, m.d3] = (-100, 100) + Ms[m.d3.func, m.d1] = [10, 20] + + mbigm = TransformationFactory('gdp.mbigm') + mbigm.apply_to(m, bigM=Ms, reduce_bound_constraints=False) + + self.assertStructuredAlmostEqual(mbigm.get_all_M_values(m), Ms) + self.check_linear_func_constraints(m, mbigm, Ms) + + # Just check the constraint we should have changed + cons = mbigm.get_transformed_constraints(m.d2.x2_bounds) + self.assertEqual(len(cons), 2) + # This is a little backwards because I structured these so we give the + # bound not the value of M. So the logic here is that if we want M to + # turn out to be -100, we need b - 3 = -100, so we pretend the bound was + # b=-97. The same logic holds for the next one too. + self.check_untightened_bounds_constraint( + cons[0], m.x2, m.d2, m.disjunction, {m.d1: 0.75, m.d3: -97}, lower=3 + ) + self.check_untightened_bounds_constraint( + cons[1], m.x2, m.d2, m.disjunction, {m.d1: 3, m.d3: 110}, upper=10 + ) + + # TODO: If Suffixes allow tuple keys then we can support them and it will + # look something like this: + # def test_Ms_specified_as_suffixes_honored(self): + # m = self.make_model() + # m.BigM = Suffix(direction=Suffix.LOCAL) + # m.BigM[(m.d2.x2_bounds, m.d3)] = (-100, 100) + # m.d3.BigM = Suffix(direction=Suffix.LOCAL) + # m.d3.BigM[(m.d3.func, m.d1)] = [10, 20] + + # arg_Ms = self.get_Ms(m) + # # delete the keys we replaced above + # del arg_Ms[m.d2.x2_bounds, m.d3] + # del arg_Ms[m.d3.func, m.d1] + + # mbigm = TransformationFactory('gdp.mbigm') + # mbigm.apply_to(m, bigM=arg_Ms) + + # Ms = self.get_Ms(m) + # self.assertStructuredAlmostEqual(mbigm.get_all_M_values(m), Ms) + # self.check_linear_func_constraints(m, mbigm, Ms) + + # # Just check the constraint we should have changed + # cons = mbigm.get_transformed_constraints(m.d2.x2_bounds) + # self.assertEqual(len(cons), 2) + # # This is a little backwards because I structured these so we give the + # # bound not the value of M. So the logic here is that if we want M to + # # turn out to be -100, we need b - 3 = -100, so we pretend the bound was + # # b=-97. The same logic holds for the next one too. + # self.check_untightened_bounds_constraint(cons[0], m.x2, m.d2, + # m.disjunction, {m.d1: 0.75, + # m.d3: -97}, + # lower=3) + # self.check_untightened_bounds_constraint(cons[1], m.x2, m.d2, + # m.disjunction, {m.d1: 3, + # m.d3: 110}, + # upper=10) + + def add_fourth_disjunct(self, m): + m.disjunction.deactivate() + + # Add a disjunct + m.d4 = Disjunct() + m.d4.x1_ub = Constraint(expr=m.x1 <= 8) + m.d4.x2_lb = Constraint(expr=m.x2 >= -5) + + # Make a four-term disjunction + m.disjunction2 = Disjunction(expr=[m.d1, m.d2, m.d3, m.d4]) + + def test_deactivated_disjunct(self): + m = self.make_model() + # Add a new thing and deactivate it + self.add_fourth_disjunct(m) + m.d4.deactivate() + + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, bigM=self.get_Ms(m), reduce_bound_constraints=False) + + # we don't transform d1 + self.assertIsNone(m.d4.transformation_block) + # and everything else is the same + self.check_linear_func_constraints(m, mbm) + self.check_all_untightened_bounds_constraints(m, mbm) + + @unittest.skipUnless(gurobi_available, "Gurobi is not available") + def test_var_bounds_substituted_for_missing_bound_constraints(self): + m = self.make_model() + # Add a new thing with constraints that don't give both bounds on x1 and + # x2 + self.add_fourth_disjunct(m) + + mbm = TransformationFactory('gdp.mbigm') + # We will ignore the specified M values for the bounds constraints, but + # issue a warning about what was unnecessary. + out = StringIO() + with LoggingIntercept(out, 'pyomo.gdp.mbigm'): + mbm.apply_to(m, bigM=self.get_Ms(m), reduce_bound_constraints=True) + + warnings = out.getvalue() + self.assertIn( + "Unused arguments in the bigM map! " + "These arguments were not used by the " + "transformation:", + warnings, + ) + for cons, disj in [ + (m.d1.x1_bounds, m.d2), + (m.d1.x2_bounds, m.d2), + (m.d1.x1_bounds, m.d3), + (m.d1.x2_bounds, m.d3), + (m.d2.x1_bounds, m.d1), + (m.d2.x2_bounds, m.d1), + (m.d2.x1_bounds, m.d3), + (m.d2.x2_bounds, m.d3), + (m.d3.x1_bounds, m.d1), + (m.d3.x2_bounds, m.d1), + (m.d3.x1_bounds, m.d2), + (m.d3.x2_bounds, m.d2), + ]: + self.assertIn("(%s, %s)" % (cons.name, disj.name), warnings) + + # check that the bounds constraints are right + # for x1: + cons = mbm.get_transformed_constraints(m.d1.x1_bounds) + self.assertEqual(len(cons), 2) + sameish = mbm.get_transformed_constraints(m.d4.x1_ub) + self.assertEqual(len(sameish), 1) + self.assertIs(sameish[0], cons[1]) + + self.check_pretty_bound_constraints( + cons[1], m.x1, {m.d1: 2, m.d2: 3, m.d3: 10, m.d4: 8}, lb=False + ) + self.check_pretty_bound_constraints( + cons[0], m.x1, {m.d1: 0.5, m.d2: 0.65, m.d3: 2, m.d4: -10}, lb=True + ) + + # and for x2: + cons = mbm.get_transformed_constraints(m.d1.x2_bounds) + self.assertEqual(len(cons), 2) + sameish = mbm.get_transformed_constraints(m.d4.x2_lb) + self.assertEqual(len(sameish), 1) + self.assertIs(sameish[0], cons[0]) + + self.check_pretty_bound_constraints( + cons[1], m.x2, {m.d1: 3, m.d2: 10, m.d3: 1, m.d4: 20}, lb=False + ) + self.check_pretty_bound_constraints( + cons[0], m.x2, {m.d1: 0.75, m.d2: 3, m.d3: 0.55, m.d4: -5}, lb=True + ) + + def test_nested_gdp_error(self): + m = self.make_model() + m.d1.disjunction = Disjunction(expr=[m.x1 >= 5, m.x1 <= 4]) + with self.assertRaisesRegex( + GDP_Error, + "Found nested Disjunction 'd1.disjunction'. The multiple bigm " + "transformation does not support nested GDPs. " + "Please flatten the model before calling the " + "transformation", + ): + TransformationFactory('gdp.mbigm').apply_to(m) + + @unittest.skipUnless(gurobi_available, "Gurobi is not available") + def test_logical_constraints_on_disjuncts(self): + m = self.make_model() + m.d1.Y = BooleanVar() + m.d1.Z = BooleanVar() + m.d1.logical = LogicalConstraint(expr=m.d1.Y.implies(m.d1.Z)) + + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, bigM=self.get_Ms(m), reduce_bound_constraints=False) + + y = m.d1.Y.get_associated_binary() + z = m.d1.Z.get_associated_binary() + z1 = m.d1._logical_to_disjunctive.auxiliary_vars[3] + + # MbigM transformation of: (1 - z1) + (1 - y) + z >= 1 + # (1 - z1) + (1 - y) + z >= 1 - d2.ind_var - d3.ind_var + transformed = mbm.get_transformed_constraints( + m.d1._logical_to_disjunctive.transformed_constraints[1] + ) + self.assertEqual(len(transformed), 1) + c = transformed[0] + check_obj_in_active_tree(self, c) + self.assertIsNone(c.lower) + self.assertEqual(value(c.upper), 0) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, + simplified, + -m.d2.binary_indicator_var - m.d3.binary_indicator_var + z1 + y - z - 1, + ) + + # MbigM transformation of: z1 + 1 - (1 - y) >= 1 + # z1 + y >= 1 - d2.ind_var - d3.ind_var + transformed = mbm.get_transformed_constraints( + m.d1._logical_to_disjunctive.transformed_constraints[2] + ) + self.assertEqual(len(transformed), 1) + c = transformed[0] + check_obj_in_active_tree(self, c) + self.assertIsNone(c.lower) + self.assertEqual(value(c.upper), 0) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, + simplified, + -m.d2.binary_indicator_var - m.d3.binary_indicator_var - y - z1 + 1, + ) + + # MbigM transformation of: z1 + 1 - z >= 1 + # z1 + 1 - z >= 1 - d2.ind_var - d3.ind_var + transformed = mbm.get_transformed_constraints( + m.d1._logical_to_disjunctive.transformed_constraints[3] + ) + self.assertEqual(len(transformed), 1) + c = transformed[0] + check_obj_in_active_tree(self, c) + self.assertIsNone(c.lower) + self.assertEqual(value(c.upper), 0) + repn = generate_standard_repn(c.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsStructurallyEqual( + self, + simplified, + -m.d2.binary_indicator_var - m.d3.binary_indicator_var + z - z1, + ) + + def check_traditionally_bigmed_constraints(self, m, mbm, Ms): + cons = mbm.get_transformed_constraints(m.d1.func) + self.assertEqual(len(cons), 2) + lb = cons[0] + ub = cons[1] + assertExpressionsEqual( + self, + lb.expr, + 0.0 <= m.x1 + m.x2 - m.d - Ms[m.d1][0] * (1 - m.d1.binary_indicator_var), + ) + # [ESJ 11/23/22]: It's really hard to use assertExpressionsEqual on the + # ub constraints because SumExpressions are sharing args, I think. So + # when they get constructed in the transformation (because they come + # after the lb constraints), there are nested SumExpressions. Instead of + # trying to reproduce them I am just building a "flat" SumExpression + # with generate_standard_repn and comparing that. + self.assertIsNone(ub.lower) + self.assertEqual(ub.upper, 0) + repn = generate_standard_repn(ub.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsEqual( + self, + simplified, + m.x1 + m.x2 - m.d + Ms[m.d1][1] * m.d1.binary_indicator_var - Ms[m.d1][1], + ) + + cons = mbm.get_transformed_constraints(m.d2.func) + self.assertEqual(len(cons), 2) + lb = cons[0] + ub = cons[1] + assertExpressionsEqual( + self, + lb.expr, + 0.0 + <= 2 * m.x1 + + 4 * m.x2 + + 7 + - m.d + - Ms[m.d2][0] * (1 - m.d2.binary_indicator_var), + ) + self.assertIsNone(ub.lower) + self.assertEqual(ub.upper, 0) + repn = generate_standard_repn(ub.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsEqual( + self, + simplified, + 2 * m.x1 + + 4 * m.x2 + - m.d + + Ms[m.d2][1] * m.d2.binary_indicator_var + - (Ms[m.d2][1] - 7), + ) + + cons = mbm.get_transformed_constraints(m.d3.func) + self.assertEqual(len(cons), 2) + lb = cons[0] + ub = cons[1] + assertExpressionsEqual( + self, + lb.expr, + 0.0 + <= m.x1 + - 5 * m.x2 + - 3 + - m.d + - Ms[m.d3][0] * (1 - m.d3.binary_indicator_var), + ) + self.assertIsNone(ub.lower) + self.assertEqual(ub.upper, 0) + repn = generate_standard_repn(ub.body) + self.assertTrue(repn.is_linear()) + simplified = repn.constant + sum( + repn.linear_coefs[i] * repn.linear_vars[i] + for i in range(len(repn.linear_vars)) + ) + assertExpressionsEqual( + self, + simplified, + m.x1 + - 5 * m.x2 + - m.d + + Ms[m.d3][1] * m.d3.binary_indicator_var + - (Ms[m.d3][1] + 3), + ) + + def test_only_multiple_bigm_bound_constraints(self): + m = self.make_model() + mbm = TransformationFactory('gdp.mbigm') + mbm.apply_to(m, only_mbigm_bound_constraints=True) + + cons = mbm.get_transformed_constraints(m.d1.x1_bounds) + self.assertEqual(len(cons), 2) + self.check_pretty_bound_constraints( + cons[0], m.x1, {m.d1: 0.5, m.d2: 0.65, m.d3: 2}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x1, {m.d1: 2, m.d2: 3, m.d3: 10}, lb=False + ) + + cons = mbm.get_transformed_constraints(m.d1.x2_bounds) + self.assertEqual(len(cons), 2) + self.check_pretty_bound_constraints( + cons[0], m.x2, {m.d1: 0.75, m.d2: 3, m.d3: 0.55}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x2, {m.d1: 3, m.d2: 10, m.d3: 1}, lb=False + ) + + self.check_traditionally_bigmed_constraints( + m, + mbm, + {m.d1: (-1030.0, 1030.0), m.d2: (-1093.0, 1107.0), m.d3: (-1113.0, 1107.0)}, + ) + + def test_only_multiple_bigm_bound_constraints_arg_Ms(self): + m = self.make_model() + mbm = TransformationFactory('gdp.mbigm') + Ms = {m.d1: 1050, m.d2.func: (-2000, 1200), None: 4000} + mbm.apply_to(m, only_mbigm_bound_constraints=True, bigM=Ms) + + cons = mbm.get_transformed_constraints(m.d1.x1_bounds) + self.assertEqual(len(cons), 2) + self.check_pretty_bound_constraints( + cons[0], m.x1, {m.d1: 0.5, m.d2: 0.65, m.d3: 2}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x1, {m.d1: 2, m.d2: 3, m.d3: 10}, lb=False + ) + + cons = mbm.get_transformed_constraints(m.d1.x2_bounds) + self.assertEqual(len(cons), 2) + self.check_pretty_bound_constraints( + cons[0], m.x2, {m.d1: 0.75, m.d2: 3, m.d3: 0.55}, lb=True + ) + self.check_pretty_bound_constraints( + cons[1], m.x2, {m.d1: 3, m.d2: 10, m.d3: 1}, lb=False + ) + + self.check_traditionally_bigmed_constraints( + m, mbm, {m.d1: (-1050, 1050), m.d2: (-2000, 1200), m.d3: (-4000, 4000)} + ) diff --git a/pyomo/gdp/tests/test_partition_disjuncts.py b/pyomo/gdp/tests/test_partition_disjuncts.py index a6fd926943c..38e7ae19676 100644 --- a/pyomo/gdp/tests/test_partition_disjuncts.py +++ b/pyomo/gdp/tests/test_partition_disjuncts.py @@ -11,17 +11,33 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - TransformationFactory, Constraint, ConcreteModel, Var, RangeSet, Objective, - maximize, SolverFactory, Any, Reference, LogicalConstraint) + TransformationFactory, + Constraint, + ConcreteModel, + Var, + RangeSet, + Objective, + maximize, + SolverFactory, + Any, + Reference, + LogicalConstraint, +) from pyomo.core.expr.logical_expr import ( - EquivalenceExpression, NotExpression, AndExpression, ExactlyExpression) -from pyomo.core.expr.sympy_tools import sympy_available + EquivalenceExpression, + NotExpression, + AndExpression, + ExactlyExpression, +) from pyomo.gdp import Disjunct, Disjunction from pyomo.gdp.util import GDP_Error, check_model_algebraic from pyomo.gdp.plugins.partition_disjuncts import ( - arbitrary_partition, compute_optimal_bounds, compute_fbbt_bounds) + arbitrary_partition, + compute_optimal_bounds, + compute_fbbt_bounds, +) from pyomo.core import Block, value -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR import pyomo.gdp.tests.common_tests as ct import pyomo.gdp.tests.models as models from pyomo.repn import generate_standard_repn @@ -30,10 +46,13 @@ solvers = check_available_solvers('gurobi_direct') + class CommonTests: def diff_apply_to_and_create_using(self, model, **kwargs): - ct.diff_apply_to_and_create_using(self, model, - 'gdp.partition_disjuncts', **kwargs) + ct.diff_apply_to_and_create_using( + self, model, 'gdp.partition_disjuncts', **kwargs + ) + class PaperTwoCircleExample(unittest.TestCase, CommonTests): def check_disj_constraint(self, c1, upper, auxVar1, auxVar2): @@ -45,7 +64,7 @@ def check_disj_constraint(self, c1, upper, auxVar1, auxVar2): self.assertEqual(repn.constant, 0) self.assertIs(repn.linear_vars[0], auxVar1) self.assertIs(repn.linear_vars[1], auxVar2) - self.assertEqual(repn.linear_coefs[0], 1) + self.assertEqual(repn.linear_coefs[0], 1) self.assertEqual(repn.linear_coefs[1], 1) def check_global_constraint_disj1(self, c1, auxVar, var1, var2): @@ -86,9 +105,19 @@ def check_global_constraint_disj2(self, c1, auxVar, var1, var2): self.assertIs(repn.quadratic_vars[1][1], var2) self.assertIsNone(repn.nonlinear_expr) - def check_aux_var_bounds(self, aux_vars1, aux_vars2, aux11lb, aux11ub, - aux12lb, aux12ub, aux21lb, aux21ub, aux22lb, - aux22ub): + def check_aux_var_bounds( + self, + aux_vars1, + aux_vars2, + aux11lb, + aux11ub, + aux12lb, + aux12ub, + aux21lb, + aux21ub, + aux22lb, + aux22ub, + ): self.assertEqual(len(aux_vars1), 2) # Gurobi default constraint tolerance is 1e-6, so let's say that's # our goal too. Have to tighten Gurobi's tolerance to even get here @@ -105,7 +134,8 @@ def check_aux_var_bounds(self, aux_vars1, aux_vars2, aux11lb, aux11ub, self.assertAlmostEqual(aux_vars2[1].ub, aux22ub, places=6) def check_transformation_block_disjuncts_and_constraints( - self, m, original_disjunction, disjunction_name=None): + self, m, original_disjunction, disjunction_name=None + ): b = m.component("_pyomo_gdp_partition_disjuncts_reformulation") self.assertIsInstance(b, Block) @@ -133,10 +163,11 @@ def check_transformation_block_disjuncts_and_constraints( self.assertIsInstance(equivalence, LogicalConstraint) self.assertEqual(len(equivalence), 2) for i, variables in enumerate( - [(original_disjunction.disjuncts[0].indicator_var, - disj1.indicator_var), - (original_disjunction.disjuncts[1].indicator_var, - disj2.indicator_var)]): + [ + (original_disjunction.disjuncts[0].indicator_var, disj1.indicator_var), + (original_disjunction.disjuncts[1].indicator_var, disj2.indicator_var), + ] + ): cons = equivalence[i] self.assertIsInstance(cons.body, EquivalenceExpression) self.assertIs(cons.body.args[0], variables[0]) @@ -144,26 +175,32 @@ def check_transformation_block_disjuncts_and_constraints( return b, disj1, disj2 - def check_transformation_block_structure(self, m, aux11lb, aux11ub, aux12lb, - aux12ub, aux21lb, aux21ub, aux22lb, - aux22ub): - (b, disj1, - disj2) = self.check_transformation_block_disjuncts_and_constraints( - m, - m.disjunction) + def check_transformation_block_structure( + self, m, aux11lb, aux11ub, aux12lb, aux12ub, aux21lb, aux21ub, aux22lb, aux22ub + ): + (b, disj1, disj2) = self.check_transformation_block_disjuncts_and_constraints( + m, m.disjunction + ) # each Disjunct has two variables declared on it (aux vars and indicator # var), plus a reference to the indicator_var from the original Disjunct self.assertEqual(len(disj1.component_map(Var)), 3) self.assertEqual(len(disj2.component_map(Var)), 3) - aux_vars1 = disj1.component( - "disjunction_disjuncts[0].constraint[1]_aux_vars") - aux_vars2 = disj2.component( - "disjunction_disjuncts[1].constraint[1]_aux_vars") - self.check_aux_var_bounds(aux_vars1, aux_vars2, aux11lb, aux11ub, - aux12lb, aux12ub, aux21lb, aux21ub, aux22lb, - aux22ub) + aux_vars1 = disj1.component("disjunction_disjuncts[0].constraint[1]_aux_vars") + aux_vars2 = disj2.component("disjunction_disjuncts[1].constraint[1]_aux_vars") + self.check_aux_var_bounds( + aux_vars1, + aux_vars2, + aux11lb, + aux11ub, + aux12lb, + aux12ub, + aux21lb, + aux21ub, + aux22lb, + aux22ub, + ) return b, disj1, disj2, aux_vars1, aux_vars2 @@ -177,42 +214,53 @@ def check_disjunct_constraints(self, disj1, disj2, aux_vars1, aux_vars2): c2 = c[0] self.check_disj_constraint(c2, -35, aux_vars2[0], aux_vars2[1]) - def check_transformation_block(self, m, aux11lb, aux11ub, aux12lb, aux12ub, - aux21lb, aux21ub, aux22lb, aux22ub, - partitions): - (b, disj1, disj2, - aux_vars1, - aux_vars2) = self.check_transformation_block_structure( m, aux11lb, - aux11ub, - aux12lb, - aux12ub, - aux21lb, - aux21ub, - aux22lb, - aux22ub) + def check_transformation_block( + self, + m, + aux11lb, + aux11ub, + aux12lb, + aux12ub, + aux21lb, + aux21ub, + aux22lb, + aux22ub, + partitions, + ): + ( + b, + disj1, + disj2, + aux_vars1, + aux_vars2, + ) = self.check_transformation_block_structure( + m, aux11lb, aux11ub, aux12lb, aux12ub, aux21lb, aux21ub, aux22lb, aux22ub + ) self.check_disjunct_constraints(disj1, disj2, aux_vars1, aux_vars2) # check the global constraints - c = b.component( - "disjunction_disjuncts[0].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[0].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] - self.check_global_constraint_disj1(c1, aux_vars1[0], partitions[0][0], - partitions[0][1]) + self.check_global_constraint_disj1( + c1, aux_vars1[0], partitions[0][0], partitions[0][1] + ) c2 = c[1] - self.check_global_constraint_disj1(c2, aux_vars1[1], partitions[1][0], - partitions[1][1]) + self.check_global_constraint_disj1( + c2, aux_vars1[1], partitions[1][0], partitions[1][1] + ) - c = b.component( - "disjunction_disjuncts[1].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[1].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] - self.check_global_constraint_disj2(c1, aux_vars2[0], partitions[0][0], - partitions[0][1]) + self.check_global_constraint_disj2( + c1, aux_vars2[0], partitions[0][0], partitions[0][1] + ) c2 = c[1] - self.check_global_constraint_disj2(c2, aux_vars2[1], partitions[1][0], - partitions[1][1]) + self.check_global_constraint_disj2( + c2, aux_vars2[1], partitions[1][0], partitions[1][1] + ) def test_transformation_block_fbbt_bounds(self): m = models.makeBetweenStepsPaperExample() @@ -220,17 +268,28 @@ def test_transformation_block_fbbt_bounds(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) - self.check_transformation_block(m, 0, 72, 0, 72, -72, 96, -72, 96, - partitions=[[m.x[1], m.x[2]], [m.x[3], - m.x[4]]]) + self.check_transformation_block( + m, + 0, + 72, + 0, + 72, + -72, + 96, + -72, + 96, + partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], + ) def check_transformation_block_indexed_var_on_disjunct( - self, m, original_disjunction): - (b, disj1, - disj2) = self.check_transformation_block_disjuncts_and_constraints( - m, original_disjunction) + self, m, original_disjunction + ): + (b, disj1, disj2) = self.check_transformation_block_disjuncts_and_constraints( + m, original_disjunction + ) # Has it's own indicator var, a Reference to the original Disjunct's # indicator var, the aux vars, and the Reference to x @@ -240,8 +299,7 @@ def check_transformation_block_indexed_var_on_disjunct( aux_vars1 = disj1.component("disj1.c_aux_vars") aux_vars2 = disj2.component("disj2.c_aux_vars") - self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -72, 96, - -72, 96) + self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -72, 96, -72, 96) # check the transformed constraints on the disjuncts c = disj1.component("disj1.c") @@ -257,20 +315,16 @@ def check_transformation_block_indexed_var_on_disjunct( c = b.component("disj1.c_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] - self.check_global_constraint_disj1(c1, aux_vars1[0], m.disj1.x[1], - m.disj1.x[2]) + self.check_global_constraint_disj1(c1, aux_vars1[0], m.disj1.x[1], m.disj1.x[2]) c2 = c[1] - self.check_global_constraint_disj1(c2, aux_vars1[1], m.disj1.x[3], - m.disj1.x[4]) + self.check_global_constraint_disj1(c2, aux_vars1[1], m.disj1.x[3], m.disj1.x[4]) c = b.component("disj2.c_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] - self.check_global_constraint_disj2(c1, aux_vars2[0], m.disj1.x[1], - m.disj1.x[2]) + self.check_global_constraint_disj2(c1, aux_vars2[0], m.disj1.x[1], m.disj1.x[2]) c2 = c[1] - self.check_global_constraint_disj2(c2, aux_vars2[1], m.disj1.x[3], - m.disj1.x[4]) + self.check_global_constraint_disj2(c2, aux_vars2[1], m.disj1.x[3], m.disj1.x[4]) return b, disj1, disj2 @@ -279,64 +333,76 @@ def test_transformation_block_indexed_var_on_disjunct(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, - variable_partitions=[[m.disj1.x[1], m.disj1.x[2]], \ - [m.disj1.x[3], m.disj1.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + variable_partitions=[ + [m.disj1.x[1], m.disj1.x[2]], + [m.disj1.x[3], m.disj1.x[4]], + ], + compute_bounds_method=compute_fbbt_bounds, + ) - self.check_transformation_block_indexed_var_on_disjunct(m, - m.disjunction) + self.check_transformation_block_indexed_var_on_disjunct(m, m.disjunction) - def check_transformation_block_nested_disjunction(self, m, disj2, x, - disjunction_block=None): + def check_transformation_block_nested_disjunction( + self, m, disj2, x, disjunction_block=None + ): if disjunction_block is None: block_prefix = "" disjunction_parent = m else: block_prefix = disjunction_block + "." disjunction_parent = m.component(disjunction_block) - (inner_b, inner_disj1, - inner_disj2) = self.\ - check_transformation_block_disjuncts_and_constraints( - disj2, disjunction_parent.disj2.disjunction, - "%sdisj2.disjunction" % block_prefix) + ( + inner_b, + inner_disj1, + inner_disj2, + ) = self.check_transformation_block_disjuncts_and_constraints( + disj2, + disjunction_parent.disj2.disjunction, + "%sdisj2.disjunction" % block_prefix, + ) # Has it's own indicator var, the aux vars, and the Reference to the # original indicator_var self.assertEqual(len(inner_disj1.component_map(Var)), 3) self.assertEqual(len(inner_disj2.component_map(Var)), 3) - aux_vars1 = inner_disj1.component("%sdisj2.disjunction_disjuncts[0]." - "constraint[1]_aux_vars" % - block_prefix) - aux_vars2 = inner_disj2.component("%sdisj2.disjunction_disjuncts[1]." - "constraint[1]_aux_vars" % - block_prefix) - self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -72, 96, - -72, 96) + aux_vars1 = inner_disj1.component( + "%sdisj2.disjunction_disjuncts[0].constraint[1]_aux_vars" % block_prefix + ) + aux_vars2 = inner_disj2.component( + "%sdisj2.disjunction_disjuncts[1].constraint[1]_aux_vars" % block_prefix + ) + self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -72, 96, -72, 96) # check the transformed constraints on the disjuncts c = inner_disj1.component( - "%sdisj2.disjunction_disjuncts[0].constraint[1]" % block_prefix) + "%sdisj2.disjunction_disjuncts[0].constraint[1]" % block_prefix + ) self.assertEqual(len(c), 1) c1 = c[0] self.check_disj_constraint(c1, 1, aux_vars1[0], aux_vars1[1]) c = inner_disj2.component( - "%sdisj2.disjunction_disjuncts[1].constraint[1]" % block_prefix) + "%sdisj2.disjunction_disjuncts[1].constraint[1]" % block_prefix + ) self.assertEqual(len(c), 1) c2 = c[0] self.check_disj_constraint(c2, -35, aux_vars2[0], aux_vars2[1]) # check the global constraints - c = inner_b.component("%sdisj2.disjunction_disjuncts[0].constraint[1]" - "_split_constraints" % block_prefix) + c = inner_b.component( + "%sdisj2.disjunction_disjuncts[0].constraint[1]" + "_split_constraints" % block_prefix + ) self.assertEqual(len(c), 2) c1 = c[0] self.check_global_constraint_disj1(c1, aux_vars1[0], x[1], x[2]) c2 = c[1] self.check_global_constraint_disj1(c2, aux_vars1[1], x[3], x[4]) - c = inner_b.component("%sdisj2.disjunction_disjuncts[1].constraint[1]" - "_split_constraints" % block_prefix) + c = inner_b.component( + "%sdisj2.disjunction_disjuncts[1].constraint[1]" + "_split_constraints" % block_prefix + ) self.assertEqual(len(c), 2) c1 = c[0] self.check_global_constraint_disj2(c1, aux_vars2[0], x[1], x[2]) @@ -348,21 +414,23 @@ def test_transformation_block_nested_disjunction(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, - variable_partitions=[[m.disj1.x[1], m.disj1.x[2]], - [m.disj1.x[3], m.disj1.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + variable_partitions=[ + [m.disj1.x[1], m.disj1.x[2]], + [m.disj1.x[3], m.disj1.x[4]], + ], + compute_bounds_method=compute_fbbt_bounds, + ) # everything for the outer disjunction should look exactly the same as # the test above: - (b, disj1, - disj2) = self.check_transformation_block_indexed_var_on_disjunct( - m, m.disjunction) + (b, disj1, disj2) = self.check_transformation_block_indexed_var_on_disjunct( + m, m.disjunction + ) # AND, we should have a transformed inner disjunction on disj2: self.check_transformation_block_nested_disjunction(m, disj2, m.disj1.x) - def test_transformation_block_nested_disjunction_outer_disjunction_target( - self): + def test_transformation_block_nested_disjunction_outer_disjunction_target(self): """We should get identical behavior to the previous test if we specify the outer disjunction as the target""" m = models.makeBetweenStepsPaperExample_Nested() @@ -370,21 +438,23 @@ def test_transformation_block_nested_disjunction_outer_disjunction_target( TransformationFactory('gdp.partition_disjuncts').apply_to( m, targets=m.disjunction, - variable_partitions=[[m.disj1.x[1], m.disj1.x[2]], - [m.disj1.x[3], m.disj1.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + variable_partitions=[ + [m.disj1.x[1], m.disj1.x[2]], + [m.disj1.x[3], m.disj1.x[4]], + ], + compute_bounds_method=compute_fbbt_bounds, + ) # everything for the outer disjunction should look exactly the same as # the test above: - (b, disj1, - disj2) = self.check_transformation_block_indexed_var_on_disjunct( - m, m.disjunction) + (b, disj1, disj2) = self.check_transformation_block_indexed_var_on_disjunct( + m, m.disjunction + ) # AND, we should have a transformed inner disjunction on disj2: self.check_transformation_block_nested_disjunction(m, disj2, m.disj1.x) - def test_transformation_block_nested_disjunction_badly_ordered_targets( - self): + def test_transformation_block_nested_disjunction_badly_ordered_targets(self): """This tests that we preprocess targets correctly becuase we don't want to double transform the inner disjunct, which is what would happen if we did things in the order given.""" @@ -393,24 +463,28 @@ def test_transformation_block_nested_disjunction_badly_ordered_targets( TransformationFactory('gdp.partition_disjuncts').apply_to( m, targets=[m.disj2, m.disjunction], - variable_partitions=[[m.disj1.x[1], m.disj1.x[2]], - [m.disj1.x[3], m.disj1.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + variable_partitions=[ + [m.disj1.x[1], m.disj1.x[2]], + [m.disj1.x[3], m.disj1.x[4]], + ], + compute_bounds_method=compute_fbbt_bounds, + ) # everything for the outer disjunction should look exactly the same as # the test above: - (b, disj1, - disj2) = self.check_transformation_block_indexed_var_on_disjunct( - m, m.disjunction) + (b, disj1, disj2) = self.check_transformation_block_indexed_var_on_disjunct( + m, m.disjunction + ) # AND, we should have a transformed inner disjunction on disj2: self.check_transformation_block_nested_disjunction(m, disj2, m.disj1.x) def check_hierarchical_nested_model(self, m): - (b, disj1, - disj2) = self.check_transformation_block_disjuncts_and_constraints( - m.disjunction_block, m.disjunction_block.disjunction, - "disjunction_block.disjunction") + (b, disj1, disj2) = self.check_transformation_block_disjuncts_and_constraints( + m.disjunction_block, + m.disjunction_block.disjunction, + "disjunction_block.disjunction", + ) # each Disjunct has two variables declared on it (aux vars and indicator # var), plus a reference to the indicator_var from the original Disjunct self.assertEqual(len(disj1.component_map(Var)), 3) @@ -418,8 +492,7 @@ def check_hierarchical_nested_model(self, m): aux_vars1 = disj1.component("disj1.c_aux_vars") aux_vars2 = disj2.component("disjunct_block.disj2.c_aux_vars") - self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -72, 96, - -72, 96) + self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -72, 96, -72, 96) # check the transformed constraints on the disjuncts c = disj1.component("disj1.c") self.assertEqual(len(c), 1) @@ -446,16 +519,17 @@ def check_hierarchical_nested_model(self, m): self.check_global_constraint_disj2(c2, aux_vars2[1], m.x[3], m.x[4]) # check the inner disjunction - self.check_transformation_block_nested_disjunction(m, disj2, m.x, - "disjunct_block") + self.check_transformation_block_nested_disjunction( + m, disj2, m.x, "disjunct_block" + ) def test_hierarchical_nested_badly_ordered_targets(self): - m = models.makeHierarchicalNested_DeclOrderMatchesInstantationOrder() + m = models.makeHierarchicalNested_DeclOrderMatchesInstantiationOrder() # If we don't preprocess targets by actually finding who is nested in # who, this would force the Disjunct to be transformed before its # Disjunction because they are hidden on blocks. Then this would fail - # because the partition doesn't specify what to do with the auxilary + # because the partition doesn't specify what to do with the auxiliary # variables created by the inner disjunction. If we correctly descend # into Blocks and order according to the nesting structure, all will be # well. @@ -463,16 +537,18 @@ def test_hierarchical_nested_badly_ordered_targets(self): m, targets=[m.disjunction_block, m.disjunct_block.disj2], variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) self.check_hierarchical_nested_model(m) def test_hierarchical_nested_decl_order_opposite_instantiation_order(self): - m = models.makeHierarchicalNested_DeclOrderOppositeInstantationOrder() + m = models.makeHierarchicalNested_DeclOrderOppositeInstantiationOrder() TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) self.check_hierarchical_nested_model(m) @@ -482,12 +558,14 @@ def test_transformation_block_nested_disjunction_target(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, targets=m.disj2.disjunction, - variable_partitions=[[m.disj1.x[1], m.disj1.x[2]], - [m.disj1.x[3], m.disj1.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + variable_partitions=[ + [m.disj1.x[1], m.disj1.x[2]], + [m.disj1.x[3], m.disj1.x[4]], + ], + compute_bounds_method=compute_fbbt_bounds, + ) - self.check_transformation_block_nested_disjunction(m, m.disj2, - m.disj1.x) + self.check_transformation_block_nested_disjunction(m, m.disj2, m.disj1.x) # NOTE: If you then transformed the whole model (or the outer # disjunction), you would double-transform in the sense that you would # again transform the Disjunction this creates. But I think it serves @@ -496,8 +574,9 @@ def test_transformation_block_nested_disjunction_target(self): # for us to know. It is confusing though since bigm and hull need to go # from the leaves up and this is opposite. - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_transformation_block_optimized_bounds(self): m = models.makeBetweenStepsPaperExample() @@ -509,35 +588,49 @@ def test_transformation_block_optimized_bounds(self): m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], compute_bounds_solver=SolverFactory('gurobi_direct'), - compute_bounds_method=compute_optimal_bounds) + compute_bounds_method=compute_optimal_bounds, + ) - self.check_transformation_block(m, 0, 72, 0, 72, -18, 32, -18, 32, - partitions=[[m.x[1], m.x[2]], [m.x[3], - m.x[4]]]) + self.check_transformation_block( + m, + 0, + 72, + 0, + 72, + -18, + 32, + -18, + 32, + partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], + ) def test_no_solver_error(self): m = models.makeBetweenStepsPaperExample() - with self.assertRaisesRegex(GDP_Error, - "No solver was specified to optimize the " - "subproblems for computing expression " - "bounds! " - "Please specify a configured solver in the " - "'compute_bounds_solver' argument if using " - "'compute_optimal_bounds.'"): + with self.assertRaisesRegex( + GDP_Error, + "No solver was specified to optimize the " + "subproblems for computing expression " + "bounds! " + "Please specify a configured solver in the " + "'compute_bounds_solver' argument if using " + "'compute_optimal_bounds.'", + ): TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_optimal_bounds) + compute_bounds_method=compute_optimal_bounds, + ) - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_transformation_block_better_bounds_in_global_constraints(self): m = models.makeBetweenStepsPaperExample() - m.c1 = Constraint(expr=m.x[1]**2 + m.x[2]**2 <= 32) - m.c2 = Constraint(expr=m.x[3]**2 + m.x[4]**2 <= 32) - m.c3 = Constraint(expr=(3 - m.x[1])**2 + (3 - m.x[2])**2 <= 32) - m.c4 = Constraint(expr=(3 - m.x[3])**2 + (3 - m.x[4])**2 <= 32) + m.c1 = Constraint(expr=m.x[1] ** 2 + m.x[2] ** 2 <= 32) + m.c2 = Constraint(expr=m.x[3] ** 2 + m.x[4] ** 2 <= 32) + m.c3 = Constraint(expr=(3 - m.x[1]) ** 2 + (3 - m.x[2]) ** 2 <= 32) + m.c4 = Constraint(expr=(3 - m.x[3]) ** 2 + (3 - m.x[4]) ** 2 <= 32) opt = SolverFactory('gurobi_direct') opt.options['NonConvex'] = 2 opt.options['FeasibilityTol'] = 1e-8 @@ -546,14 +639,25 @@ def test_transformation_block_better_bounds_in_global_constraints(self): m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], compute_bounds_solver=opt, - compute_bounds_method=compute_optimal_bounds) - - self.check_transformation_block(m, 0, 32, 0, 32, -18, 14, -18, 14, - partitions=[[m.x[1], m.x[2]], [m.x[3], - m.x[4]]]) + compute_bounds_method=compute_optimal_bounds, + ) - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + self.check_transformation_block( + m, + 0, + 32, + 0, + 32, + -18, + 14, + -18, + 14, + partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], + ) + + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_transformation_block_arbitrary_even_partition(self): m = models.makeBetweenStepsPaperExample() @@ -565,14 +669,25 @@ def test_transformation_block_arbitrary_even_partition(self): m, num_partitions=2, compute_bounds_solver=SolverFactory('gurobi_direct'), - compute_bounds_method=compute_optimal_bounds) + compute_bounds_method=compute_optimal_bounds, + ) # The above will partition as [[x[1], x[3]], [x[2], x[4]]] - self.check_transformation_block(m, 0, 72, 0, 72, -18, 32, -18, 32, - partitions=[[m.x[1], m.x[3]], [m.x[2], - m.x[4]]]) - - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + self.check_transformation_block( + m, + 0, + 72, + 0, + 72, + -18, + 32, + -18, + 32, + partitions=[[m.x[1], m.x[3]], [m.x[2], m.x[4]]], + ) + + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_assume_fixed_vars_not_permanent(self): m = models.makeBetweenStepsPaperExample() m.x[1].fix(0) @@ -587,7 +702,8 @@ def test_assume_fixed_vars_not_permanent(self): variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], assume_fixed_vars_permanent=False, compute_bounds_solver=SolverFactory('gurobi_direct'), - compute_bounds_method=compute_optimal_bounds) + compute_bounds_method=compute_optimal_bounds, + ) self.assertTrue(m.x[1].fixed) self.assertEqual(value(m.x[1]), 0) @@ -596,12 +712,22 @@ def test_assume_fixed_vars_not_permanent(self): m.x[1].fixed = False # should be identical to the case where x[1] was not fixed - self.check_transformation_block(m, 0, 72, 0, 72, -18, 32, -18, 32, - partitions=[[m.x[1], m.x[2]], [m.x[3], - m.x[4]]]) - - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + self.check_transformation_block( + m, + 0, + 72, + 0, + 72, + -18, + 32, + -18, + 32, + partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], + ) + + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_assume_fixed_vars_permanent(self): m = models.makeBetweenStepsPaperExample() m.x[1].fix(0) @@ -616,7 +742,8 @@ def test_assume_fixed_vars_permanent(self): variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], assume_fixed_vars_permanent=True, compute_bounds_solver=SolverFactory('gurobi_direct'), - compute_bounds_method=compute_optimal_bounds) + compute_bounds_method=compute_optimal_bounds, + ) # Fixing BooleanVars is the same either way. We just check that it was # maintained through the transformation. @@ -626,18 +753,20 @@ def test_assume_fixed_vars_permanent(self): # This actually changes the structure of the model because fixed vars # move to the constants. I think this is fair, and we should allow it # because it will allow for a tighter relaxation. - (b, disj1, disj2, - aux_vars1, - aux_vars2) = self.check_transformation_block_structure(m, 0, 36, 0, 72, - -9, 16, -18, 32) + ( + b, + disj1, + disj2, + aux_vars1, + aux_vars2, + ) = self.check_transformation_block_structure(m, 0, 36, 0, 72, -9, 16, -18, 32) # check disjunct constraints self.check_disjunct_constraints(disj1, disj2, aux_vars1, aux_vars2) # now we can check the global constraints--these are what is different # because x[1] is gone. - c = b.component( - "disjunction_disjuncts[0].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[0].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] self.assertIsNone(c1.lower) @@ -655,8 +784,7 @@ def test_assume_fixed_vars_permanent(self): c2 = c[1] self.check_global_constraint_disj1(c2, aux_vars1[1], m.x[3], m.x[4]) - c = b.component( - "disjunction_disjuncts[1].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[1].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] self.assertIsNone(c1.lower) @@ -676,8 +804,9 @@ def test_assume_fixed_vars_permanent(self): c2 = c[1] self.check_global_constraint_disj2(c2, aux_vars2[1], m.x[3], m.x[4]) - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_transformation_block_arbitrary_odd_partition(self): m = models.makeBetweenStepsPaperExample() @@ -689,11 +818,12 @@ def test_transformation_block_arbitrary_odd_partition(self): m, num_partitions=3, compute_bounds_solver=SolverFactory('gurobi_direct'), - compute_bounds_method=compute_optimal_bounds) + compute_bounds_method=compute_optimal_bounds, + ) - (b, disj1, - disj2) = self.check_transformation_block_disjuncts_and_constraints( - m, m.disjunction) + (b, disj1, disj2) = self.check_transformation_block_disjuncts_and_constraints( + m, m.disjunction + ) # each Disjunct has three variables declared on it (aux vars and # indicator var), plus a reference to the indicator_var of the original @@ -701,8 +831,7 @@ def test_transformation_block_arbitrary_odd_partition(self): self.assertEqual(len(disj1.component_map(Var)), 3) self.assertEqual(len(disj2.component_map(Var)), 3) - aux_vars1 = disj1.component( - "disjunction_disjuncts[0].constraint[1]_aux_vars") + aux_vars1 = disj1.component("disjunction_disjuncts[0].constraint[1]_aux_vars") self.assertEqual(len(aux_vars1), 3) self.assertEqual(aux_vars1[0].lb, 0) self.assertEqual(aux_vars1[0].ub, 72) @@ -710,8 +839,7 @@ def test_transformation_block_arbitrary_odd_partition(self): self.assertEqual(aux_vars1[1].ub, 36) self.assertEqual(aux_vars1[2].lb, 0) self.assertEqual(aux_vars1[2].ub, 36) - aux_vars2 = disj2.component( - "disjunction_disjuncts[1].constraint[1]_aux_vars") + aux_vars2 = disj2.component("disjunction_disjuncts[1].constraint[1]_aux_vars") self.assertEqual(len(aux_vars2), 3) # min and max of x1^2 - 6x1 + x2^2 - 6x2 self.assertEqual(aux_vars2[0].lb, -18) @@ -756,8 +884,7 @@ def test_transformation_block_arbitrary_odd_partition(self): self.assertEqual(repn.linear_coefs[2], 1) # check the global constraints - c = b.component( - "disjunction_disjuncts[0].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[0].constraint[1]_split_constraints") self.assertEqual(len(c), 3) c.pprint() c1 = c[0] @@ -790,8 +917,7 @@ def test_transformation_block_arbitrary_odd_partition(self): self.assertIs(repn.quadratic_vars[0][1], m.x[3]) self.assertIsNone(repn.nonlinear_expr) - c = b.component( - "disjunction_disjuncts[1].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[1].constraint[1]_split_constraints") self.assertEqual(len(c), 3) c1 = c[0] self.check_global_constraint_disj2(c1, aux_vars2[0], m.x[1], m.x[4]) @@ -834,13 +960,16 @@ def test_transformed_disjuncts_mapped_correctly(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) b = m.component("_pyomo_gdp_partition_disjuncts_reformulation") - self.assertIs(m.disjunction.disjuncts[0].transformation_block(), - b.disjunction.disjuncts[0]) - self.assertIs(m.disjunction.disjuncts[1].transformation_block(), - b.disjunction.disjuncts[1]) + self.assertIs( + m.disjunction.disjuncts[0].transformation_block, b.disjunction.disjuncts[0] + ) + self.assertIs( + m.disjunction.disjuncts[1].transformation_block, b.disjunction.disjuncts[1] + ) def test_transformed_disjunctions_mapped_correctly(self): # we map disjunctions to disjunctions because this is a GDP -> GDP @@ -850,17 +979,21 @@ def test_transformed_disjunctions_mapped_correctly(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) b = m.component("_pyomo_gdp_partition_disjuncts_reformulation") - self.assertIs(m.disjunction.algebraic_constraint(), b.disjunction) + self.assertIs(m.disjunction.algebraic_constraint, b.disjunction) def add_disjunction(self, b): m = b.model() b.another_disjunction = Disjunction( - expr=[[(m.x[1] - 1)**2 + m.x[2]**2 <= 1], - # writing this constraint backwards to test the flipping logic - [-(m.x[1] - 2)**2 - (m.x[2] - 3)**2 >= -1]]) + expr=[ + [(m.x[1] - 1) ** 2 + m.x[2] ** 2 <= 1], + # writing this constraint backwards to test the flipping logic + [-((m.x[1] - 2) ** 2) - (m.x[2] - 3) ** 2 >= -1], + ] + ) def make_model_with_added_disjunction_on_block(self): m = models.makeBetweenStepsPaperExample() @@ -962,22 +1095,31 @@ def test_disjunction_target(self): m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], compute_bounds_method=compute_fbbt_bounds, - targets=[m.disjunction]) + targets=[m.disjunction], + ) # should be the same as before - self.check_transformation_block(m, 0, 72, 0, 72, -72, 96, -72, 96, - partitions=[[m.x[1], m.x[2]], [m.x[3], - m.x[4]]]) + self.check_transformation_block( + m, + 0, + 72, + 0, + 72, + -72, + 96, + -72, + 96, + partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], + ) # and another_disjunction should be untransformed self.assertIsNone(m.b.another_disjunction.algebraic_constraint) - self.assertIsNone( - m.b.another_disjunction.disjuncts[0].transformation_block) - self.assertIsNone( - m.b.another_disjunction.disjuncts[1].transformation_block) + self.assertIsNone(m.b.another_disjunction.disjuncts[0].transformation_block) + self.assertIsNone(m.b.another_disjunction.disjuncts[1].transformation_block) - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_block_target(self): m = self.make_model_with_added_disjunction_on_block() @@ -986,7 +1128,8 @@ def test_block_target(self): variable_partitions=[[m.x[1]], [m.x[2]]], compute_bounds_solver=SolverFactory('gurobi_direct'), compute_bounds_method=compute_optimal_bounds, - targets=[m.b]) + targets=[m.b], + ) # we didn't transform the disjunction not on b self.assertIsNone(m.disjunction.algebraic_constraint) @@ -999,8 +1142,8 @@ def test_block_target(self): # check we declared the right things self.assertEqual(len(b.component_map(Disjunction)), 1) self.assertEqual(len(b.component_map(Disjunct)), 2) - self.assertEqual(len(b.component_map(Constraint)), 2)# global - # constraints + self.assertEqual(len(b.component_map(Constraint)), 2) # global + # constraints disjunction = b.component("b.another_disjunction") self.assertIsInstance(disjunction, Disjunction) @@ -1017,10 +1160,12 @@ def test_block_target(self): self.assertEqual(len(disj2.component_map(Var)), 3) aux_vars1 = disj1.component( - "b.another_disjunction_disjuncts[0].constraint[1]_aux_vars") + "b.another_disjunction_disjuncts[0].constraint[1]_aux_vars" + ) aux_vars2 = disj2.component( - "b.another_disjunction_disjuncts[1].constraint[1]_aux_vars") + "b.another_disjunction_disjuncts[1].constraint[1]_aux_vars" + ) self.check_second_disjunction_aux_vars(aux_vars1, aux_vars2) # check constraints on disjuncts @@ -1034,18 +1179,18 @@ def test_block_target(self): # check global constraints c = b.component( - "b.another_disjunction_disjuncts[0].constraint[1]" - "_split_constraints") + "b.another_disjunction_disjuncts[0].constraint[1]_split_constraints" + ) self.check_second_disjunction_global_constraint_disj1(c, aux_vars1) c = b.component( - "b.another_disjunction_disjuncts[1].constraint[1]" - "_split_constraints") + "b.another_disjunction_disjuncts[1].constraint[1]_split_constraints" + ) self.check_second_disjunction_global_constraint_disj2(c, aux_vars2) - - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_indexed_block_target(self): m = ConcreteModel() m.b = Block(Any) @@ -1057,10 +1202,12 @@ def test_indexed_block_target(self): m, variable_partitions={ m.b[1].another_disjunction: [[m.x[1]], [m.x[2]]], - m.b[0].disjunction: [[m.x[1], m.x[2]], [m.x[3], m.x[4]]]}, + m.b[0].disjunction: [[m.x[1], m.x[2]], [m.x[3], m.x[4]]], + }, compute_bounds_solver=SolverFactory('gurobi_direct'), compute_bounds_method=compute_optimal_bounds, - targets=[m.b]) + targets=[m.b], + ) b0 = m.b[0].component("_pyomo_gdp_partition_disjuncts_reformulation") self.assertIsInstance(b0, Block) @@ -1068,16 +1215,16 @@ def test_indexed_block_target(self): # check we declared the right things self.assertEqual(len(b0.component_map(Disjunction)), 1) self.assertEqual(len(b0.component_map(Disjunct)), 2) - self.assertEqual(len(b0.component_map(Constraint)), 2) # global - # constraints + self.assertEqual(len(b0.component_map(Constraint)), 2) # global + # constraints b1 = m.b[1].component("_pyomo_gdp_partition_disjuncts_reformulation") self.assertIsInstance(b1, Block) # check we declared the right things self.assertEqual(len(b1.component_map(Disjunction)), 1) self.assertEqual(len(b1.component_map(Disjunct)), 2) - self.assertEqual(len(b1.component_map(Constraint)), 2) # global - # constraints + self.assertEqual(len(b1.component_map(Constraint)), 2) # global + # constraints ############################ # Check the added disjunction @@ -1097,29 +1244,31 @@ def test_indexed_block_target(self): self.assertEqual(len(disj2.component_map(Var)), 3) aux_vars1 = disj1.component( - "b[1].another_disjunction_disjuncts[0].constraint[1]_aux_vars") + "b[1].another_disjunction_disjuncts[0].constraint[1]_aux_vars" + ) aux_vars2 = disj2.component( - "b[1].another_disjunction_disjuncts[1].constraint[1]_aux_vars") + "b[1].another_disjunction_disjuncts[1].constraint[1]_aux_vars" + ) self.check_second_disjunction_aux_vars(aux_vars1, aux_vars2) # check constraints on disjuncts - c1 = disj1.component( - "b[1].another_disjunction_disjuncts[0].constraint[1]") + c1 = disj1.component("b[1].another_disjunction_disjuncts[0].constraint[1]") self.assertEqual(len(c1), 1) self.check_disj_constraint(c1[0], 0, aux_vars1[0], aux_vars1[1]) - c2 = disj2.component( - "b[1].another_disjunction_disjuncts[1].constraint[1]") + c2 = disj2.component("b[1].another_disjunction_disjuncts[1].constraint[1]") self.assertEqual(len(c2), 1) self.check_disj_constraint(c2[0], -12, aux_vars2[0], aux_vars2[1]) # check global constraints - c = b1.component("b[1].another_disjunction_disjuncts[0]." - "constraint[1]_split_constraints") + c = b1.component( + "b[1].another_disjunction_disjuncts[0].constraint[1]_split_constraints" + ) self.check_second_disjunction_global_constraint_disj1(c, aux_vars1) - c = b1.component("b[1].another_disjunction_disjuncts[1]." - "constraint[1]_split_constraints") + c = b1.component( + "b[1].another_disjunction_disjuncts[1].constraint[1]_split_constraints" + ) self.check_second_disjunction_global_constraint_disj2(c, aux_vars2) ############################ @@ -1140,11 +1289,12 @@ def test_indexed_block_target(self): self.assertEqual(len(disj2.component_map(Var)), 3) aux_vars1 = disj1.component( - "b[0].disjunction_disjuncts[0].constraint[1]_aux_vars") + "b[0].disjunction_disjuncts[0].constraint[1]_aux_vars" + ) aux_vars2 = disj2.component( - "b[0].disjunction_disjuncts[1].constraint[1]_aux_vars") - self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -18, 32, - -18, 32) + "b[0].disjunction_disjuncts[1].constraint[1]_aux_vars" + ) + self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -18, 32, -18, 32) # check constraints on disjuncts c1 = disj1.component("b[0].disjunction_disjuncts[0].constraint[1]") @@ -1157,7 +1307,8 @@ def test_indexed_block_target(self): # check global constraints c = b0.component( - "b[0].disjunction_disjuncts[0].constraint[1]_split_constraints") + "b[0].disjunction_disjuncts[0].constraint[1]_split_constraints" + ) self.assertEqual(len(c), 2) c1 = c[0] self.check_global_constraint_disj1(c1, aux_vars1[0], m.x[1], m.x[2]) @@ -1165,32 +1316,40 @@ def test_indexed_block_target(self): self.check_global_constraint_disj1(c2, aux_vars1[1], m.x[3], m.x[4]) c = b0.component( - "b[0].disjunction_disjuncts[1].constraint[1]_split_constraints") + "b[0].disjunction_disjuncts[1].constraint[1]_split_constraints" + ) self.assertEqual(len(c), 2) c1 = c[0] self.check_global_constraint_disj2(c1, aux_vars2[0], m.x[1], m.x[2]) c2 = c[1] self.check_global_constraint_disj2(c2, aux_vars2[1], m.x[3], m.x[4]) - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_indexed_disjunction_target(self): m = ConcreteModel() - m.I = RangeSet(1,4) - m.x = Var(m.I, bounds=(-2,6)) + m.I = RangeSet(1, 4) + m.x = Var(m.I, bounds=(-2, 6)) m.indexed = Disjunction(Any) - m.indexed[1] = [[sum(m.x[i]**2 for i in m.I) <= 1], - [sum((3 - m.x[i])**2 for i in m.I) <= 1]] - m.indexed[0] = [[(m.x[1] - 1)**2 + m.x[2]**2 <= 1], - [-(m.x[1] - 2)**2 - (m.x[2] - 3)**2 >= -1]] + m.indexed[1] = [ + [sum(m.x[i] ** 2 for i in m.I) <= 1], + [sum((3 - m.x[i]) ** 2 for i in m.I) <= 1], + ] + m.indexed[0] = [ + [(m.x[1] - 1) ** 2 + m.x[2] ** 2 <= 1], + [-((m.x[1] - 2) ** 2) - (m.x[2] - 3) ** 2 >= -1], + ] TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions={ m.indexed[0]: [[m.x[1]], [m.x[2]]], - m.indexed[1]: [[m.x[1], m.x[2]], [m.x[3], m.x[4]]]}, + m.indexed[1]: [[m.x[1], m.x[2]], [m.x[3], m.x[4]]], + }, compute_bounds_solver=SolverFactory('gurobi_direct'), compute_bounds_method=compute_optimal_bounds, - targets=[m.indexed]) + targets=[m.indexed], + ) b = m.component("_pyomo_gdp_partition_disjuncts_reformulation") self.assertIsInstance(b, Block) @@ -1198,8 +1357,8 @@ def test_indexed_disjunction_target(self): # check we declared the right things self.assertEqual(len(b.component_map(Disjunction)), 2) self.assertEqual(len(b.component_map(Disjunct)), 4) - self.assertEqual(len(b.component_map(Constraint)), 4) # global - # constraints + self.assertEqual(len(b.component_map(Constraint)), 4) # global + # constraints ############################ # Check the added disjunction ############################# @@ -1216,30 +1375,24 @@ def test_indexed_disjunction_target(self): # Disjunct self.assertEqual(len(disj1.component_map(Var)), 3) self.assertEqual(len(disj2.component_map(Var)), 3) - aux_vars1 = disj1.component( - "indexed_disjuncts[2].constraint[1]_aux_vars") - aux_vars2 = disj2.component( - "indexed_disjuncts[3].constraint[1]_aux_vars") + aux_vars1 = disj1.component("indexed_disjuncts[2].constraint[1]_aux_vars") + aux_vars2 = disj2.component("indexed_disjuncts[3].constraint[1]_aux_vars") self.check_second_disjunction_aux_vars(aux_vars1, aux_vars2) # check constraints on disjuncts - c1 = disj1.component( - "indexed_disjuncts[2].constraint[1]") + c1 = disj1.component("indexed_disjuncts[2].constraint[1]") self.assertEqual(len(c1), 1) self.check_disj_constraint(c1[0], 0, aux_vars1[0], aux_vars1[1]) - c2 = disj2.component( - "indexed_disjuncts[3].constraint[1]") + c2 = disj2.component("indexed_disjuncts[3].constraint[1]") self.assertEqual(len(c2), 1) self.check_disj_constraint(c2[0], -12, aux_vars2[0], aux_vars2[1]) # check global constraints - c = b.component("indexed_disjuncts[2]." - "constraint[1]_split_constraints") + c = b.component("indexed_disjuncts[2].constraint[1]_split_constraints") self.check_second_disjunction_global_constraint_disj1(c, aux_vars1) - c = b.component("indexed_disjuncts[3]." - "constraint[1]_split_constraints") + c = b.component("indexed_disjuncts[3].constraint[1]_split_constraints") self.check_second_disjunction_global_constraint_disj2(c, aux_vars2) ############################ @@ -1259,12 +1412,9 @@ def test_indexed_disjunction_target(self): self.assertEqual(len(disj1.component_map(Var)), 3) self.assertEqual(len(disj2.component_map(Var)), 3) - aux_vars1 = disj1.component( - "indexed_disjuncts[0].constraint[1]_aux_vars") - aux_vars2 = disj2.component( - "indexed_disjuncts[1].constraint[1]_aux_vars") - self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -18, 32, - -18, 32) + aux_vars1 = disj1.component("indexed_disjuncts[0].constraint[1]_aux_vars") + aux_vars2 = disj2.component("indexed_disjuncts[1].constraint[1]_aux_vars") + self.check_aux_var_bounds(aux_vars1, aux_vars2, 0, 72, 0, 72, -18, 32, -18, 32) # check constraints on disjuncts c1 = disj1.component("indexed_disjuncts[0].constraint[1]") @@ -1276,16 +1426,14 @@ def test_indexed_disjunction_target(self): self.check_disj_constraint(c2[0], -35, aux_vars2[0], aux_vars2[1]) # check global constraints - c = b.component( - "indexed_disjuncts[0].constraint[1]_split_constraints") + c = b.component("indexed_disjuncts[0].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] self.check_global_constraint_disj1(c1, aux_vars1[0], m.x[1], m.x[2]) c2 = c[1] self.check_global_constraint_disj1(c2, aux_vars1[1], m.x[3], m.x[4]) - c = b.component( - "indexed_disjuncts[1].constraint[1]_split_constraints") + c = b.component("indexed_disjuncts[1].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] self.check_global_constraint_disj2(c1, aux_vars2[0], m.x[1], m.x[2]) @@ -1305,7 +1453,8 @@ def test_incomplete_partition_error(self): TransformationFactory('gdp.partition_disjuncts').apply_to, m, variable_partitions=[[m.x[1]], [m.x[2]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) def test_unbounded_expression_error(self): m = models.makeBetweenStepsPaperExample() @@ -1322,22 +1471,25 @@ def test_unbounded_expression_error(self): TransformationFactory('gdp.partition_disjuncts').apply_to, m, variable_partitions=[[m.x[1]], [m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) def test_no_value_for_P_error(self): m = models.makeBetweenStepsPaperExample() with self.assertRaisesRegex( - GDP_Error, - "No value for P was given for disjunction " - "disjunction! Please specify a value of P " - r"\(number of partitions\), if you do not specify the " - "partitions directly."): + GDP_Error, + "No value for P was given for disjunction " + "disjunction! Please specify a value of P " + r"\(number of partitions\), if you do not specify the " + "partitions directly.", + ): TransformationFactory('gdp.partition_disjuncts').apply_to(m) def test_create_using(self): m = models.makeBetweenStepsPaperExample() self.diff_apply_to_and_create_using(m, num_partitions=2) + class NonQuadraticNonlinear(unittest.TestCase, CommonTests): def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): b = m.component("_pyomo_gdp_partition_disjuncts_reformulation") @@ -1368,23 +1520,22 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertIsInstance(equivalence, LogicalConstraint) self.assertEqual(len(equivalence), 2) for i, variables in enumerate( - [(m.disjunction.disjuncts[0].indicator_var, - disj1.indicator_var), - (m.disjunction.disjuncts[1].indicator_var, - disj2.indicator_var)]): + [ + (m.disjunction.disjuncts[0].indicator_var, disj1.indicator_var), + (m.disjunction.disjuncts[1].indicator_var, disj2.indicator_var), + ] + ): cons = equivalence[i] self.assertIsInstance(cons.body, EquivalenceExpression) self.assertEqual(cons.body.args, variables) - aux_vars1 = disj1.component( - "disjunction_disjuncts[0].constraint[1]_aux_vars") + aux_vars1 = disj1.component("disjunction_disjuncts[0].constraint[1]_aux_vars") self.assertEqual(len(aux_vars1), 2) self.assertEqual(aux_vars1[0].lb, aux1lb) self.assertEqual(aux_vars1[0].ub, aux1ub) self.assertEqual(aux_vars1[1].lb, aux1lb) self.assertEqual(aux_vars1[1].ub, aux1ub) - aux_vars2 = disj2.component( - "disjunction_disjuncts[1].constraint[1]_aux_vars") + aux_vars2 = disj2.component("disjunction_disjuncts[1].constraint[1]_aux_vars") self.assertEqual(len(aux_vars2), 2) self.assertEqual(aux_vars2[0].lb, aux2lb) self.assertEqual(aux_vars2[0].ub, aux2ub) @@ -1421,8 +1572,7 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertEqual(repn.linear_coefs[1], 1) # check the global constraints - c = b.component( - "disjunction_disjuncts[0].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[0].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] self.assertIsNone(c1.lower) @@ -1439,10 +1589,8 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertEqual(repn.nonlinear_expr.args[1], 0.25) self.assertIsInstance(repn.nonlinear_expr.args[0], EXPR.SumExpression) self.assertEqual(len(repn.nonlinear_expr.args[0].args), 2) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], - EXPR.PowExpression) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], - EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], EXPR.PowExpression) self.assertIs(repn.nonlinear_expr.args[0].args[0].args[0], m.x[1]) self.assertEqual(repn.nonlinear_expr.args[0].args[0].args[1], 4) self.assertIs(repn.nonlinear_expr.args[0].args[1].args[0], m.x[2]) @@ -1462,17 +1610,14 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertEqual(repn.nonlinear_expr.args[1], 0.25) self.assertIsInstance(repn.nonlinear_expr.args[0], EXPR.SumExpression) self.assertEqual(len(repn.nonlinear_expr.args[0].args), 2) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], - EXPR.PowExpression) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], - EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], EXPR.PowExpression) self.assertIs(repn.nonlinear_expr.args[0].args[0].args[0], m.x[3]) self.assertEqual(repn.nonlinear_expr.args[0].args[0].args[1], 4) self.assertIs(repn.nonlinear_expr.args[0].args[1].args[0], m.x[4]) self.assertEqual(repn.nonlinear_expr.args[0].args[1].args[1], 4) - c = b.component( - "disjunction_disjuncts[1].constraint[1]_split_constraints") + c = b.component("disjunction_disjuncts[1].constraint[1]_split_constraints") self.assertEqual(len(c), 2) c1 = c[0] self.assertIsNone(c1.lower) @@ -1489,10 +1634,8 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertEqual(repn.nonlinear_expr.args[1], 0.25) self.assertIsInstance(repn.nonlinear_expr.args[0], EXPR.SumExpression) self.assertEqual(len(repn.nonlinear_expr.args[0].args), 2) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], - EXPR.PowExpression) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], - EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], EXPR.PowExpression) sum_expr = repn.nonlinear_expr.args[0].args[0].args[0] self.assertIsInstance(sum_expr, EXPR.SumExpression) sum_repn = generate_standard_repn(sum_expr) @@ -1502,8 +1645,7 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertEqual(sum_repn.linear_coefs[0], -1) self.assertIs(sum_repn.linear_vars[0], m.x[1]) self.assertEqual(repn.nonlinear_expr.args[0].args[0].args[1], 4) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], - EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], EXPR.PowExpression) sum_expr = repn.nonlinear_expr.args[0].args[1].args[0] self.assertIsInstance(sum_expr, EXPR.SumExpression) sum_repn = generate_standard_repn(sum_expr) @@ -1529,10 +1671,8 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertEqual(repn.nonlinear_expr.args[1], 0.25) self.assertIsInstance(repn.nonlinear_expr.args[0], EXPR.SumExpression) self.assertEqual(len(repn.nonlinear_expr.args[0].args), 2) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], - EXPR.PowExpression) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], - EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[0], EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], EXPR.PowExpression) sum_expr = repn.nonlinear_expr.args[0].args[0].args[0] self.assertIsInstance(sum_expr, EXPR.SumExpression) sum_repn = generate_standard_repn(sum_expr) @@ -1542,8 +1682,7 @@ def check_transformation_block(self, m, aux1lb, aux1ub, aux2lb, aux2ub): self.assertEqual(sum_repn.linear_coefs[0], -1) self.assertIs(sum_repn.linear_vars[0], m.x[3]) self.assertEqual(repn.nonlinear_expr.args[0].args[0].args[1], 4) - self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], - EXPR.PowExpression) + self.assertIsInstance(repn.nonlinear_expr.args[0].args[1], EXPR.PowExpression) sum_expr = repn.nonlinear_expr.args[0].args[1].args[0] self.assertIsInstance(sum_expr, EXPR.SumExpression) sum_repn = generate_standard_repn(sum_expr) @@ -1560,9 +1699,12 @@ def test_transformation_block_fbbt_bounds(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) - self.check_transformation_block(m, 0, (2*6**4)**0.25, 0, (2*5**4)**0.25) + self.check_transformation_block( + m, 0, (2 * 6**4) ** 0.25, 0, (2 * 5**4) ** 0.25 + ) def test_invalid_partition_error(self): m = models.makeNonQuadraticNonlinearGDP() @@ -1582,31 +1724,35 @@ def test_invalid_partition_error(self): TransformationFactory('gdp.partition_disjuncts').apply_to, m, variable_partitions=[[m.x[3], m.x[2]], [m.x[1], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) def test_invalid_partition_error_multiply_vars_in_different_partition(self): m = ConcreteModel() - m.x = Var(bounds=(-10,10)) - m.y = Var(bounds=(-60,56)) + m.x = Var(bounds=(-10, 10)) + m.y = Var(bounds=(-60, 56)) m.d1 = Disjunct() - m.d1.c = Constraint(expr=m.x**2 + m.x*m.y + m.y**2 <= 32) + m.d1.c = Constraint(expr=m.x**2 + m.x * m.y + m.y**2 <= 32) m.d2 = Disjunct() m.d2.c = Constraint(expr=m.x**2 + m.y**2 <= 3) m.disjunction = Disjunction(expr=[m.d1, m.d2]) - with self.assertRaisesRegex(GDP_Error, - "Variables 'x' and 'y' are " - "multiplied in Constraint 'd1.c', " - "but they are in different " - "partitions! Please ensure that " - "all the constraints in the " - "disjunction are " - "additively separable with " - "respect to the specified " - "partition."): + with self.assertRaisesRegex( + GDP_Error, + "Variables 'x' and 'y' are " + "multiplied in Constraint 'd1.c', " + "but they are in different " + "partitions! Please ensure that " + "all the constraints in the " + "disjunction are " + "additively separable with " + "respect to the specified " + "partition.", + ): TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x], [m.y]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) def test_non_additively_separable_expression(self): m = models.makeNonQuadraticNonlinearGDP() @@ -1616,29 +1762,31 @@ def test_non_additively_separable_expression(self): # how things work when part of the expression is empty for one part in # the partition. m.disjunction.disjuncts[0].another_constraint = Constraint( - expr=m.x[1]**3 <= 0.5) + expr=m.x[1] ** 3 <= 0.5 + ) TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]], - compute_bounds_method=compute_fbbt_bounds) + compute_bounds_method=compute_fbbt_bounds, + ) # we just need to check the first Disjunct's transformation b = m.component("_pyomo_gdp_partition_disjuncts_reformulation") disj1 = b.disjunction.disjuncts[0] self.assertEqual(len(disj1.component_map(Constraint)), 2) - # has indicator_var and two sets of auxilary variables, plus a reference + # has indicator_var and two sets of auxiliary variables, plus a reference # to the indicator_var on the original Disjunct self.assertEqual(len(disj1.component_map(Var)), 4) self.assertEqual(len(disj1.component_map(Constraint)), 2) - aux_vars1 = disj1.component( - "disjunction_disjuncts[0].constraint[1]_aux_vars") + aux_vars1 = disj1.component("disjunction_disjuncts[0].constraint[1]_aux_vars") # we check these in test_transformation_block_fbbt_bounds aux_vars2 = disj1.component( - "disjunction_disjuncts[0].another_constraint_aux_vars") + "disjunction_disjuncts[0].another_constraint_aux_vars" + ) self.assertEqual(len(aux_vars2), 1) self.assertEqual(aux_vars2[0].lb, -8) self.assertEqual(aux_vars2[0].ub, 216) @@ -1658,7 +1806,8 @@ def test_non_additively_separable_expression(self): # now check the global constraint cons = b.component( - "disjunction_disjuncts[0].another_constraint_split_constraints") + "disjunction_disjuncts[0].another_constraint_split_constraints" + ) self.assertEqual(len(cons), 1) cons = cons[0] self.assertIsNone(cons.lower) @@ -1677,10 +1826,9 @@ def test_non_additively_separable_expression(self): def test_create_using(self): m = models.makeNonQuadraticNonlinearGDP() - self.diff_apply_to_and_create_using(m, variable_partitions=[[m.x[1], - m.x[2]], - [m.x[3], - m.x[4]]]) + self.diff_apply_to_and_create_using( + m, variable_partitions=[[m.x[1], m.x[2]], [m.x[3], m.x[4]]] + ) def test_infeasible_value_of_P(self): m = models.makeNonQuadraticNonlinearGDP() @@ -1708,154 +1856,159 @@ def test_infeasible_value_of_P(self): "separable.", TransformationFactory('gdp.partition_disjuncts').apply_to, m, - num_partitions=3) + num_partitions=3, + ) + # This is just a pile of tests that are structural that we use for bigm and # hull, so might as well for this too. class CommonModels(unittest.TestCase, CommonTests): def test_user_deactivated_disjuncts(self): - ct.check_user_deactivated_disjuncts(self, 'partition_disjuncts', - check_trans_block=False, - num_partitions=2) + ct.check_user_deactivated_disjuncts( + self, 'partition_disjuncts', check_trans_block=False, num_partitions=2 + ) def test_improperly_deactivated_disjuncts(self): - ct.check_improperly_deactivated_disjuncts(self, 'partition_disjuncts', - num_partitions=2) + ct.check_improperly_deactivated_disjuncts( + self, 'partition_disjuncts', num_partitions=2 + ) def test_do_not_transform_userDeactivated_indexedDisjunction(self): ct.check_do_not_transform_userDeactivated_indexedDisjunction( - self, 'partition_disjuncts', num_partitions=2) + self, 'partition_disjuncts', num_partitions=2 + ) def test_disjunction_deactivated(self): - ct.check_disjunction_deactivated(self, 'partition_disjuncts', - num_partitions=2) + ct.check_disjunction_deactivated(self, 'partition_disjuncts', num_partitions=2) def test_disjunctDatas_deactivated(self): - ct.check_disjunctDatas_deactivated(self, 'partition_disjuncts', - num_partitions=2) + ct.check_disjunctDatas_deactivated( + self, 'partition_disjuncts', num_partitions=2 + ) def test_deactivated_constraints(self): - ct.check_deactivated_constraints(self, 'partition_disjuncts', - num_partitions=2) + ct.check_deactivated_constraints(self, 'partition_disjuncts', num_partitions=2) def test_deactivated_disjuncts(self): - ct.check_deactivated_disjuncts(self, 'partition_disjuncts', - num_partitions=2) + ct.check_deactivated_disjuncts(self, 'partition_disjuncts', num_partitions=2) def test_deactivated_disjunctions(self): - ct.check_deactivated_disjunctions(self, 'partition_disjuncts', - num_partitions=2) + ct.check_deactivated_disjunctions(self, 'partition_disjuncts', num_partitions=2) def test_constraints_deactivated_indexedDisjunction(self): ct.check_constraints_deactivated_indexedDisjunction( - self, - 'partition_disjuncts', num_partitions=2) + self, 'partition_disjuncts', num_partitions=2 + ) # targets def test_only_targets_inactive(self): - ct.check_only_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_only_targets_inactive(self, 'partition_disjuncts', num_partitions=2) def test_target_not_a_component_error(self): - ct.check_target_not_a_component_error(self, 'partition_disjuncts', - num_partitions=2) + ct.check_target_not_a_component_error( + self, 'partition_disjuncts', num_partitions=2 + ) def test_indexedDisj_targets_inactive(self): - ct.check_indexedDisj_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_indexedDisj_targets_inactive( + self, 'partition_disjuncts', num_partitions=2 + ) def test_warn_for_untransformed(self): - ct.check_warn_for_untransformed(self, 'partition_disjuncts', - num_partitions=2) + ct.check_warn_for_untransformed(self, 'partition_disjuncts', num_partitions=2) def test_disjData_targets_inactive(self): - ct.check_disjData_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_disjData_targets_inactive( + self, 'partition_disjuncts', num_partitions=2 + ) def test_indexedBlock_targets_inactive(self): - ct.check_indexedBlock_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_indexedBlock_targets_inactive( + self, 'partition_disjuncts', num_partitions=2 + ) def test_blockData_targets_inactive(self): - ct.check_blockData_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_blockData_targets_inactive( + self, 'partition_disjuncts', num_partitions=2 + ) # transforming blocks def test_transformation_simple_block(self): - ct.check_transformation_simple_block(self, 'partition_disjuncts', - num_partitions=2) + ct.check_transformation_simple_block( + self, 'partition_disjuncts', num_partitions=2 + ) def test_transform_block_data(self): - ct.check_transform_block_data(self, 'partition_disjuncts', - num_partitions=2) + ct.check_transform_block_data(self, 'partition_disjuncts', num_partitions=2) def test_simple_block_target(self): - ct.check_simple_block_target(self, 'partition_disjuncts', - num_partitions=2) + ct.check_simple_block_target(self, 'partition_disjuncts', num_partitions=2) def test_block_data_target(self): - ct.check_block_data_target(self, 'partition_disjuncts', - num_partitions=2) + ct.check_block_data_target(self, 'partition_disjuncts', num_partitions=2) def test_indexed_block_target(self): - ct.check_indexed_block_target(self, 'partition_disjuncts', - num_partitions=2) + ct.check_indexed_block_target(self, 'partition_disjuncts', num_partitions=2) def test_block_targets_inactive(self): - ct.check_block_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_block_targets_inactive(self, 'partition_disjuncts', num_partitions=2) # common error messages def test_transform_empty_disjunction(self): - ct.check_transform_empty_disjunction(self, 'partition_disjuncts', - num_partitions=2) + ct.check_transform_empty_disjunction( + self, 'partition_disjuncts', num_partitions=2 + ) def test_deactivated_disjunct_nonzero_indicator_var(self): ct.check_deactivated_disjunct_nonzero_indicator_var( - self, - 'partition_disjuncts', num_partitions=2) + self, 'partition_disjuncts', num_partitions=2 + ) def test_deactivated_disjunct_unfixed_indicator_var(self): ct.check_deactivated_disjunct_unfixed_indicator_var( - self, - 'partition_disjuncts', num_partitions=2) + self, 'partition_disjuncts', num_partitions=2 + ) def test_silly_target(self): ct.check_silly_target(self, 'partition_disjuncts', num_partitions=2) def test_error_for_same_disjunct_in_multiple_disjunctions(self): ct.check_error_for_same_disjunct_in_multiple_disjunctions( - self, 'partition_disjuncts', num_partitions=2) + self, 'partition_disjuncts', num_partitions=2 + ) def test_cannot_call_transformation_on_disjunction(self): ct.check_cannot_call_transformation_on_disjunction( - self, - 'partition_disjuncts', num_partitions=2) + self, 'partition_disjuncts', num_partitions=2 + ) def test_disjunction_target_err(self): - ct.check_disjunction_target_err(self, 'partition_disjuncts', - num_partitions=2) + ct.check_disjunction_target_err(self, 'partition_disjuncts', num_partitions=2) # nested disjunctions (only checking that everything is transformed) def test_disjuncts_inactive_nested(self): - ct.check_disjuncts_inactive_nested(self, 'partition_disjuncts', - num_partitions=2) + ct.check_disjuncts_inactive_nested( + self, 'partition_disjuncts', num_partitions=2 + ) def test_deactivated_disjunct_leaves_nested_disjunct_active(self): ct.check_deactivated_disjunct_leaves_nested_disjunct_active( - self, 'partition_disjuncts', num_partitions=2) + self, 'partition_disjuncts', num_partitions=2 + ) def test_disjunct_targets_inactive(self): - ct.check_disjunct_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_disjunct_targets_inactive( + self, 'partition_disjuncts', num_partitions=2 + ) def test_disjunctData_targets_inactive(self): - ct.check_disjunctData_targets_inactive(self, 'partition_disjuncts', - num_partitions=2) + ct.check_disjunctData_targets_inactive( + self, 'partition_disjuncts', num_partitions=2 + ) # check handling for benign types @@ -1866,17 +2019,15 @@ def test_Expression(self): ct.check_Expression(self, 'partition_disjuncts', num_partitions=2) def test_untransformed_network_raises_GDPError(self): - ct.check_untransformed_network_raises_GDPError( self, - 'partition_disjuncts', - num_partitions=2) + ct.check_untransformed_network_raises_GDPError( + self, 'partition_disjuncts', num_partitions=2 + ) @unittest.skipUnless(ct.linear_solvers, "Could not find a linear solver") - @unittest.skipUnless(sympy_available, "Sympy not available") def test_network_disjuncts(self): - ct.check_network_disjuncts(self, True, 'between_steps', - num_partitions=2) - ct.check_network_disjuncts(self, False, 'between_steps', - num_partitions=2) + ct.check_network_disjuncts(self, True, 'between_steps', num_partitions=2) + ct.check_network_disjuncts(self, False, 'between_steps', num_partitions=2) + class LogicalExpressions(unittest.TestCase, CommonTests): def test_logical_constraints_on_disjunct_copied(self): @@ -1884,8 +2035,9 @@ def test_logical_constraints_on_disjunct_copied(self): TransformationFactory('gdp.partition_disjuncts').apply_to( m, variable_partitions=[[m.x], [m.y]], - compute_bounds_method=compute_fbbt_bounds) - d1 = m.d[1].transformation_block() + compute_bounds_method=compute_fbbt_bounds, + ) + d1 = m.d[1].transformation_block self.assertEqual(len(d1.component_map(LogicalConstraint)), 1) c = d1.component("logical_constraints") self.assertIsInstance(c, LogicalConstraint) @@ -1893,7 +2045,7 @@ def test_logical_constraints_on_disjunct_copied(self): self.assertIsInstance(c[1].expr, NotExpression) self.assertIs(c[1].expr.args[0], m.Y[1]) - d2 = m.d[2].transformation_block() + d2 = m.d[2].transformation_block self.assertEqual(len(d2.component_map(LogicalConstraint)), 1) c = d2.component("logical_constraints") self.assertIsInstance(c, LogicalConstraint) @@ -1903,12 +2055,12 @@ def test_logical_constraints_on_disjunct_copied(self): self.assertIs(c[1].expr.args[0], m.Y[1]) self.assertIs(c[1].expr.args[1], m.Y[2]) - d3 = m.d[3].transformation_block() + d3 = m.d[3].transformation_block self.assertEqual(len(d3.component_map(LogicalConstraint)), 1) c = d3.component("logical_constraints") self.assertEqual(len(c), 0) - d4 = m.d[4].transformation_block() + d4 = m.d[4].transformation_block self.assertEqual(len(d4.component_map(LogicalConstraint)), 1) c = d4.component("logical_constraints") self.assertIsInstance(c, LogicalConstraint) @@ -1949,13 +2101,14 @@ def test_logical_constraints_on_disjunct_copied(self): # self.assertTrue(value(m.d[3].indicator_var)) # self.assertFalse(value(m.d[4].indicator_var)) - @unittest.skipIf('gurobi_direct' not in solvers, - 'Gurobi direct solver not available') + @unittest.skipIf( + 'gurobi_direct' not in solvers, 'Gurobi direct solver not available' + ) def test_original_indicator_vars_in_logical_constraints(self): m = models.makeLogicalConstraintsOnDisjuncts() TransformationFactory('gdp.between_steps').apply_to( - m, variable_partitions=[[m.x]], - compute_bounds_method=compute_fbbt_bounds) + m, variable_partitions=[[m.x]], compute_bounds_method=compute_fbbt_bounds + ) self.assertTrue(check_model_algebraic(m)) diff --git a/pyomo/gdp/tests/test_reclassify.py b/pyomo/gdp/tests/test_reclassify.py index 652225be0ba..fd98f8f0954 100644 --- a/pyomo/gdp/tests/test_reclassify.py +++ b/pyomo/gdp/tests/test_reclassify.py @@ -1,7 +1,14 @@ # -*- coding: UTF-8 -*- """Tests disjunct reclassifier transformation.""" import pyomo.common.unittest as unittest -from pyomo.core import (Block, ConcreteModel, TransformationFactory, RangeSet, Constraint, Var) +from pyomo.core import ( + Block, + ConcreteModel, + TransformationFactory, + RangeSet, + Constraint, + Var, +) from pyomo.gdp import Disjunct, Disjunction, GDP_Error @@ -77,7 +84,8 @@ def test_deactivate_nested_disjunction(self): # print(disj.name) # There should be no active Disjunction objects. self.assertIsNone( - next(m.component_data_objects(Disjunction, active=True), None)) + next(m.component_data_objects(Disjunction, active=True), None) + ) def test_do_not_reactivate_disjuncts_with_abandon(self): m = ConcreteModel() diff --git a/pyomo/gdp/tests/test_util.py b/pyomo/gdp/tests/test_util.py index 24e633e3945..90c63717b81 100644 --- a/pyomo/gdp/tests/test_util.py +++ b/pyomo/gdp/tests/test_util.py @@ -12,12 +12,16 @@ import pyomo.common.unittest as unittest from pyomo.core import ConcreteModel, Var, Expression, Block, RangeSet, Any -import pyomo.core.expr.current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.base.expression import _ExpressionData -from pyomo.gdp.util import (clone_without_expression_components, is_child_of, - get_gdp_tree) +from pyomo.gdp.util import ( + clone_without_expression_components, + is_child_of, + get_gdp_tree, +) from pyomo.gdp import Disjunct, Disjunction + class TestGDPUtils(unittest.TestCase): def test_clone_without_expression_components(self): m = ConcreteModel() @@ -30,7 +34,7 @@ def test_clone_without_expression_components(self): self.assertIs(base, test) self.assertEqual(base(), test()) test = clone_without_expression_components(base, {id(m.x): m.y}) - self.assertEqual(3**2+1, test()) + self.assertEqual(3**2 + 1, test()) base = m.e test = clone_without_expression_components(base, {}) @@ -39,7 +43,7 @@ def test_clone_without_expression_components(self): self.assertIsInstance(base, _ExpressionData) self.assertIsInstance(test, EXPR.SumExpression) test = clone_without_expression_components(base, {id(m.x): m.y}) - self.assertEqual(3**2+3-1, test()) + self.assertEqual(3**2 + 3 - 1, test()) base = m.e + m.x test = clone_without_expression_components(base, {}) @@ -50,22 +54,24 @@ def test_clone_without_expression_components(self): self.assertIsInstance(base.arg(0), _ExpressionData) self.assertIsInstance(test.arg(0), EXPR.SumExpression) test = clone_without_expression_components(base, {id(m.x): m.y}) - self.assertEqual(3**2+3-1 + 3, test()) + self.assertEqual(3**2 + 3 - 1 + 3, test()) def test_is_child_of(self): m = ConcreteModel() m.b = Block() - m.b.b_indexed = Block([1,2]) + m.b.b_indexed = Block([1, 2]) m.b_parallel = Block() - + knownBlocks = {} - self.assertFalse(is_child_of(parent=m.b, child=m.b_parallel, - knownBlocks=knownBlocks)) + self.assertFalse( + is_child_of(parent=m.b, child=m.b_parallel, knownBlocks=knownBlocks) + ) self.assertEqual(len(knownBlocks), 2) self.assertFalse(knownBlocks.get(m)) self.assertFalse(knownBlocks.get(m.b_parallel)) - self.assertTrue(is_child_of(parent=m.b, child=m.b.b_indexed[1], - knownBlocks=knownBlocks)) + self.assertTrue( + is_child_of(parent=m.b, child=m.b.b_indexed[1], knownBlocks=knownBlocks) + ) self.assertEqual(len(knownBlocks), 4) self.assertFalse(knownBlocks.get(m)) self.assertFalse(knownBlocks.get(m.b_parallel)) @@ -83,11 +89,10 @@ def test_gdp_tree(self): m.block.d1.b = Block() m.block.d1.b.dd2 = Disjunct() m.block.d1.b.dd3 = Disjunct() - m.block.d1.disjunction = Disjunction(expr=[m.block.d1.dd1, - m.block.d1.b.dd2, - m.block.d1.b.dd3]) - m.block.d1.b.dd2.disjunction = Disjunction(expr=[[m.x >= 1], [m.x <= - -1]]) + m.block.d1.disjunction = Disjunction( + expr=[m.block.d1.dd1, m.block.d1.b.dd2, m.block.d1.b.dd3] + ) + m.block.d1.b.dd2.disjunction = Disjunction(expr=[[m.x >= 1], [m.x <= -1]]) targets = (m,) knownBlocks = {} tree = get_gdp_tree(targets, m, knownBlocks) @@ -95,44 +100,56 @@ def test_gdp_tree(self): # check tree structure first vertices = tree.vertices self.assertEqual(len(vertices), 10) - in_degrees = {m.block.d1 : 1, - m.block.disjunction : 0, - m.disj1 : 1, - m.block.d1.disjunction : 1, - m.block.d1.dd1 : 1, - m.block.d1.b.dd2 : 1, - m.block.d1.b.dd3 : 1, - m.block.d1.b.dd2.disjunction : 1, - m.block.d1.b.dd2.disjunction.disjuncts[0] : 1, - m.block.d1.b.dd2.disjunction.disjuncts[1] : 1 - } + in_degrees = { + m.block.d1: 1, + m.block.disjunction: 0, + m.disj1: 1, + m.block.d1.disjunction: 1, + m.block.d1.dd1: 1, + m.block.d1.b.dd2: 1, + m.block.d1.b.dd3: 1, + m.block.d1.b.dd2.disjunction: 1, + m.block.d1.b.dd2.disjunction.disjuncts[0]: 1, + m.block.d1.b.dd2.disjunction.disjuncts[1]: 1, + } for key, val in in_degrees.items(): self.assertEqual(tree.in_degree(key), val) # This should be deterministic, so we can just check the order - topo_sort = [m.block.disjunction, m.disj1, m.block.d1, - m.block.d1.disjunction, m.block.d1.b.dd3, m.block.d1.b.dd2, - m.block.d1.b.dd2.disjunction, - m.block.d1.b.dd2.disjunction.disjuncts[1], - m.block.d1.b.dd2.disjunction.disjuncts[0], m.block.d1.dd1] + topo_sort = [ + m.block.disjunction, + m.disj1, + m.block.d1, + m.block.d1.disjunction, + m.block.d1.b.dd3, + m.block.d1.b.dd2, + m.block.d1.b.dd2.disjunction, + m.block.d1.b.dd2.disjunction.disjuncts[1], + m.block.d1.b.dd2.disjunction.disjuncts[0], + m.block.d1.dd1, + ] sort = tree.topological_sort() for i, node in enumerate(sort): self.assertIs(node, topo_sort[i]) def add_indexed_disjunction(self, parent, m): parent.indexed = Disjunction(Any) - parent.indexed[1] = [[sum(m.x[i]**2 for i in m.I) <= 1], - [sum((3 - m.x[i])**2 for i in m.I) <= 1]] - parent.indexed[0] = [[(m.x[1] - 1)**2 + m.x[2]**2 <= 1], - [-(m.x[1] - 2)**2 - (m.x[2] - 3)**2 >= -1]] + parent.indexed[1] = [ + [sum(m.x[i] ** 2 for i in m.I) <= 1], + [sum((3 - m.x[i]) ** 2 for i in m.I) <= 1], + ] + parent.indexed[0] = [ + [(m.x[1] - 1) ** 2 + m.x[2] ** 2 <= 1], + [-((m.x[1] - 2) ** 2) - (m.x[2] - 3) ** 2 >= -1], + ] def test_gdp_tree_indexed_disjunction(self): # This is to check that indexed components never actually appear as # nodes in the tree. We should only have DisjunctionDatas and # DisjunctDatas. m = ConcreteModel() - m.I = RangeSet(1,4) - m.x = Var(m.I, bounds=(-2,6)) + m.I = RangeSet(1, 4) + m.x = Var(m.I, bounds=(-2, 6)) self.add_indexed_disjunction(m, m) targets = (m.indexed,) @@ -141,26 +158,33 @@ def test_gdp_tree_indexed_disjunction(self): vertices = tree.vertices self.assertEqual(len(vertices), 6) - in_degrees = {m.indexed[0] : 0, - m.indexed[1] : 0, - m.indexed[0].disjuncts[0] : 1, - m.indexed[0].disjuncts[1] : 1, - m.indexed[1].disjuncts[0] : 1, - m.indexed[1].disjuncts[1] : 1} + in_degrees = { + m.indexed[0]: 0, + m.indexed[1]: 0, + m.indexed[0].disjuncts[0]: 1, + m.indexed[0].disjuncts[1]: 1, + m.indexed[1].disjuncts[0]: 1, + m.indexed[1].disjuncts[1]: 1, + } for key, val in in_degrees.items(): self.assertEqual(tree.in_degree(key), val) - topo_sort = [m.indexed[0], m.indexed[0].disjuncts[1], - m.indexed[0].disjuncts[0], m.indexed[1], - m.indexed[1].disjuncts[1], m.indexed[1].disjuncts[0]] + topo_sort = [ + m.indexed[0], + m.indexed[0].disjuncts[1], + m.indexed[0].disjuncts[0], + m.indexed[1], + m.indexed[1].disjuncts[1], + m.indexed[1].disjuncts[0], + ] sort = tree.topological_sort() for i, node in enumerate(sort): self.assertIs(node, topo_sort[i]) def test_gdp_tree_nested_indexed_disjunction(self): m = ConcreteModel() - m.I = RangeSet(1,4) - m.x = Var(m.I, bounds=(-2,6)) + m.I = RangeSet(1, 4) + m.x = Var(m.I, bounds=(-2, 6)) m.disj1 = Disjunct() self.add_indexed_disjunction(m.disj1, m) m.disj2 = Disjunct() @@ -175,19 +199,25 @@ def test_gdp_tree_nested_indexed_disjunction(self): vertices = tree.vertices self.assertEqual(len(vertices), 6) - in_degrees = {m.disj1.indexed[0] : 0, - m.disj1.indexed[1] : 0, - m.disj1.indexed[0].disjuncts[0] : 1, - m.disj1.indexed[0].disjuncts[1] : 1, - m.disj1.indexed[1].disjuncts[0] : 1, - m.disj1.indexed[1].disjuncts[1] : 1} + in_degrees = { + m.disj1.indexed[0]: 0, + m.disj1.indexed[1]: 0, + m.disj1.indexed[0].disjuncts[0]: 1, + m.disj1.indexed[0].disjuncts[1]: 1, + m.disj1.indexed[1].disjuncts[0]: 1, + m.disj1.indexed[1].disjuncts[1]: 1, + } for key, val in in_degrees.items(): self.assertEqual(tree.in_degree(key), val) - topo_sort = [m.disj1.indexed[0], m.disj1.indexed[0].disjuncts[1], - m.disj1.indexed[0].disjuncts[0], m.disj1.indexed[1], - m.disj1.indexed[1].disjuncts[1], - m.disj1.indexed[1].disjuncts[0]] + topo_sort = [ + m.disj1.indexed[0], + m.disj1.indexed[0].disjuncts[1], + m.disj1.indexed[0].disjuncts[0], + m.disj1.indexed[1], + m.disj1.indexed[1].disjuncts[1], + m.disj1.indexed[1].disjuncts[0], + ] sort = tree.topological_sort() for i, node in enumerate(sort): self.assertIs(node, topo_sort[i]) @@ -208,14 +238,21 @@ def test_gdp_tree_nested_indexed_disjunction(self): for key, val in in_degrees.items(): self.assertEqual(tree.in_degree(key), val) - topo_sort = [m.another_disjunction, m.disj2, m.disj1, - m.disj1.indexed[1], m.disj1.indexed[1].disjuncts[1], - m.disj1.indexed[1].disjuncts[0], m.disj1.indexed[0], - m.disj1.indexed[0].disjuncts[1], - m.disj1.indexed[0].disjuncts[0]] + topo_sort = [ + m.another_disjunction, + m.disj2, + m.disj1, + m.disj1.indexed[1], + m.disj1.indexed[1].disjuncts[1], + m.disj1.indexed[1].disjuncts[0], + m.disj1.indexed[0], + m.disj1.indexed[0].disjuncts[1], + m.disj1.indexed[0].disjuncts[0], + ] sort = tree.topological_sort() for i, node in enumerate(sort): self.assertIs(node, topo_sort[i]) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/gdp/transformed_disjunct.py b/pyomo/gdp/transformed_disjunct.py new file mode 100644 index 00000000000..400f77a31f6 --- /dev/null +++ b/pyomo/gdp/transformed_disjunct.py @@ -0,0 +1,32 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.autoslots import AutoSlots +from pyomo.core.base.block import _BlockData, IndexedBlock +from pyomo.core.base.global_set import UnindexedComponent_index, UnindexedComponent_set + + +class _TransformedDisjunctData(_BlockData): + __slots__ = ('_src_disjunct',) + __autoslot_mappers__ = {'_src_disjunct': AutoSlots.weakref_mapper} + + @property + def src_disjunct(self): + return None if self._src_disjunct is None else self._src_disjunct() + + def __init__(self, component): + _BlockData.__init__(self, component) + # pointer to the Disjunct whose transformation block this is. + self._src_disjunct = None + + +class _TransformedDisjunct(IndexedBlock): + _ComponentDataClass = _TransformedDisjunctData diff --git a/pyomo/gdp/util.py b/pyomo/gdp/util.py index 561320d8d26..1a0a60e6beb 100644 --- a/pyomo/gdp/util.py +++ b/pyomo/gdp/util.py @@ -12,10 +12,16 @@ from pyomo.gdp import GDP_Error, Disjunction from pyomo.gdp.disjunct import _DisjunctData, Disjunct -import pyomo.core.expr.current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.base.component import _ComponentBase from pyomo.core import ( - Block, TraversalStrategy, SortComponents, LogicalConstraint) + Block, + Suffix, + TraversalStrategy, + SortComponents, + LogicalConstraint, + value, +) from pyomo.core.base.block import _BlockData from pyomo.common.collections import ComponentMap, ComponentSet, OrderedSet from pyomo.opt import TerminationCondition, SolverStatus @@ -26,20 +32,29 @@ logger = logging.getLogger('pyomo.gdp') -_acceptable_termination_conditions = set([ - TerminationCondition.optimal, - TerminationCondition.globallyOptimal, - TerminationCondition.locallyOptimal, -]) -_infeasible_termination_conditions = set([ - TerminationCondition.infeasible, - TerminationCondition.invalidProblem, -]) +_acceptable_termination_conditions = set( + [ + TerminationCondition.optimal, + TerminationCondition.globallyOptimal, + TerminationCondition.locallyOptimal, + ] +) +_infeasible_termination_conditions = set( + [TerminationCondition.infeasible, TerminationCondition.invalidProblem] +) -class NORMAL(object): pass -class INFEASIBLE(object): pass -class NONOPTIMAL(object): pass +class NORMAL(object): + pass + + +class INFEASIBLE(object): + pass + + +class NONOPTIMAL(object): + pass + def verify_successful_solve(results): status = results.solver.status @@ -81,15 +96,38 @@ def clone_without_expression_components(expr, substitute=None): if substitute is None: substitute = {} # - visitor = EXPR.ExpressionReplacementVisitor(substitute=substitute, - remove_named_expressions=True) + visitor = EXPR.ExpressionReplacementVisitor( + substitute=substitute, remove_named_expressions=True + ) return visitor.walk_expression(expr) +def _raise_disjunct_in_multiple_disjunctions_error(disjunct, disjunction): + # we've transformed it, which means this is the second time it's appearing + # in a Disjunction + raise GDP_Error( + "The disjunct '%s' has been transformed, but '%s', a disjunction " + "it appears in, has not. Putting the same disjunct in " + "multiple disjunctions is not supported." % (disjunct.name, disjunction.name) + ) + + class GDPTree: + """ + Stores a forest representing the hierarchy between GDP components on a + model: for single-level GDPs, each tree is rooted at a Disjunction and + each of the Disjuncts in the Disjunction is a leaf. For nested GDPs, the + Disjuncts may not be leaves, and could have child Disjunctions of their + own. + """ + def __init__(self): - self._adjacency_list = {} + self._children = {} self._in_degrees = {} + # Every node has exactly one or 0 parents. + self._parent = {} + + self._root_disjunct = {} # This needs to be ordered so that topological sort is deterministic self._vertices = OrderedSet() @@ -100,31 +138,71 @@ def vertices(self): def add_node(self, u): self._vertices.add(u) - def _update_in_degree(self, v): - if v not in self._in_degrees: - self._in_degrees[v] = 1 + def parent(self, u): + """Returns the parent node of u, or None if u is a root. + + Arg: + u : A node in the tree + """ + if u not in self._vertices: + raise ValueError( + "'%s' is not a vertex in the GDP tree. Cannot " + "retrieve its parent." % u + ) + if u in self._parent: + return self._parent[u] else: - self._in_degrees[v] += 1 + return None + + def parent_disjunct(self, u): + """Returns the parent Disjunct of u, or None if u is the + closest-to-root Disjunct in the forest. + + Arg: + u : A node in the forest + """ + return self.parent(self.parent(u)) + + def root_disjunct(self, u): + """Returns the highest parent Disjunct in the hierarchy, or None if + the component is not nested. + + Arg: + u : A node in the tree + """ + rootmost_disjunct = None + parent = self.parent(u) + while True: + if parent is None: + return rootmost_disjunct + if isinstance(parent, _DisjunctData) or parent.ctype is Disjunct: + rootmost_disjunct = parent + parent = self.parent(parent) - def add_edge(self, u, v): - if u not in self._adjacency_list: - self._adjacency_list[u] = OrderedSet() - self._adjacency_list[u].add(v) - self._update_in_degree(v) + def add_node(self, u): + if u not in self._children: + self._children[u] = OrderedSet() self._vertices.add(u) - self._vertices.add(v) + + def add_edge(self, u, v): + self.add_node(u) + self.add_node(v) + self._children[u].add(v) + if v in self._parent and self._parent[v] is not u: + _raise_disjunct_in_multiple_disjunctions_error(v, u) + self._parent[v] = u def _visit_vertex(self, u, leaf_to_root): - if u in self._adjacency_list: - for v in self._adjacency_list[u]: + if u in self._children: + for v in self._children[u]: if v not in leaf_to_root: self._visit_vertex(v, leaf_to_root) # we're done--we've been to all its children leaf_to_root.add(u) - def _topological_sort(self): - # this is reverse of the list we should return (but happens to be what - # we want for hull and bigm) + def _reverse_topological_iterator(self): + # this returns nodes of the tree ordered so that no node is before any + # of its descendents. leaf_to_root = OrderedSet() for u in self.vertices: if u not in leaf_to_root: @@ -133,15 +211,27 @@ def _topological_sort(self): return leaf_to_root def topological_sort(self): - return reversed(self._topological_sort()) + return list(reversed(self._reverse_topological_iterator())) def reverse_topological_sort(self): - return self._topological_sort() + return self._reverse_topological_iterator() def in_degree(self, u): - if u not in self._in_degrees: + if u not in self._parent: return 0 - return self._in_degrees[u] + return 1 + + def is_leaf(self, u): + if len(self._children[u]) == 0: + return True + return False + + @property + def leaves(self): + for u, children in self._children.items(): + if len(children) == 0: + yield u + def _parent_disjunct(obj): parent = obj.parent_block() @@ -152,53 +242,100 @@ def _parent_disjunct(obj): return None -def _gather_disjunctions(block, gdp_tree): + +def _check_properly_deactivated(disjunct): + if disjunct.indicator_var.is_fixed(): + if not value(disjunct.indicator_var): + # The user cleanly deactivated the disjunct: there + # is nothing for us to do here. + return + else: + raise GDP_Error( + "The disjunct '%s' is deactivated, but the " + "indicator_var is fixed to %s. This makes no sense." + % (disjunct.name, value(disjunct.indicator_var)) + ) + if disjunct._transformation_block is None: + raise GDP_Error( + "The disjunct '%s' is deactivated, but the " + "indicator_var is not fixed and the disjunct does not " + "appear to have been transformed. This makes no sense. " + "(If the intent is to deactivate the disjunct, fix its " + "indicator_var to False.)" % (disjunct.name,) + ) + + +def _gather_disjunctions(block, gdp_tree, include_root=True): + if not include_root: + # The argument 'block' may be a root node (it was in the list of + # targets): We will not add it to the tree in this call. It may be added + # later if in fact it is a descendent of another target, but as far as + # we know now, it does not belong in the tree. + root = block to_explore = [block] while to_explore: block = to_explore.pop() - if block.ctype is Disjunct: - gdp_tree.add_node(block) for disjunction in block.component_data_objects( - Disjunction, - active=True, - sort=SortComponents.deterministic, - descend_into=Block): + Disjunction, + active=True, + sort=SortComponents.deterministic, + descend_into=Block, + ): # add the node (because it might be an empty Disjunction and block # might be a Block, in case it wouldn't get added below.) gdp_tree.add_node(disjunction) for disjunct in disjunction.disjuncts: if not disjunct.active: + if disjunct.transformation_block is not None: + _raise_disjunct_in_multiple_disjunctions_error( + disjunct, disjunction + ) + _check_properly_deactivated(disjunct) continue gdp_tree.add_edge(disjunction, disjunct) to_explore.append(disjunct) if block.ctype is Disjunct: + if not include_root and block is root: + continue gdp_tree.add_edge(block, disjunction) return gdp_tree -def get_gdp_tree(targets, instance, knownBlocks): + +def get_gdp_tree(targets, instance, knownBlocks=None): + if knownBlocks is None: + knownBlocks = {} gdp_tree = GDPTree() for t in targets: # first check it's not insane, that is, it is at least on the instance - if not is_child_of(parent=instance, child=t, - knownBlocks=knownBlocks): - raise GDP_Error("Target '%s' is not a component on instance " - "'%s'!" % (t.name, instance.name)) + if not is_child_of(parent=instance, child=t, knownBlocks=knownBlocks): + raise GDP_Error( + "Target '%s' is not a component on instance " + "'%s'!" % (t.name, instance.name) + ) if t.ctype is Block or isinstance(t, _BlockData): _blocks = t.values() if t.is_indexed() else (t,) for block in _blocks: if not block.active: continue - gdp_tree = _gather_disjunctions(block, gdp_tree) + gdp_tree = _gather_disjunctions(block, gdp_tree, include_root=False) elif t.ctype is Disjunction: parent = _parent_disjunct(t) if parent is not None and parent in targets: gdp_tree.add_edge(parent, t) _disjunctions = t.values() if t.is_indexed() else (t,) for disjunction in _disjunctions: + if disjunction.algebraic_constraint is not None: + # It's already transformed. + continue gdp_tree.add_node(disjunction) for disjunct in disjunction.disjuncts: if not disjunct.active: + if disjunct.transformation_block is not None: + _raise_disjunct_in_multiple_disjunctions_error( + disjunct, disjunction + ) + _check_properly_deactivated(disjunct) continue gdp_tree.add_edge(disjunction, disjunct) gdp_tree = _gather_disjunctions(disjunct, gdp_tree) @@ -207,16 +344,19 @@ def get_gdp_tree(targets, instance, knownBlocks): # deal with this raise GDP_Error( "Target '%s' was not a Block, Disjunct, or Disjunction. " - "It was of type %s and can't be transformed." - % (t.name, type(t)) ) + "It was of type %s and can't be transformed." % (t.name, type(t)) + ) return gdp_tree -def preprocess_targets(targets, instance, knownBlocks): - gdp_tree = get_gdp_tree(targets, instance, knownBlocks) - # this is for bigm and hull: We need to transform from the leaves up, so we - # want a reverse of a topological sort: no parent can come before its child. + +def preprocess_targets(targets, instance, knownBlocks, gdp_tree=None): + if gdp_tree is None: + gdp_tree = get_gdp_tree(targets, instance, knownBlocks) + # this is for bigm: We need to transform from the leaves up, so we want a + # reverse of a topological sort: no parent can come before its child. return gdp_tree.reverse_topological_sort() + # [ESJ 07/09/2019 Should this be a more general utility function elsewhere? I'm # putting it here for now so that all the gdp transformations can use it. # Returns True if child is a node or leaf in the tree rooted at parent, False @@ -230,7 +370,7 @@ def is_child_of(parent, child, knownBlocks=None): if knownBlocks is None: knownBlocks = {} tmp = set() - node = child + node = child if isinstance(child, (Block, _BlockData)) else child.parent_block() while True: known = knownBlocks.get(node) if known: @@ -253,11 +393,13 @@ def is_child_of(parent, child, knownBlocks=None): else: node = container + def _to_dict(val): if isinstance(val, (dict, ComponentMap)): - return val + return val return {None: val} + def get_src_disjunction(xor_constraint): """Return the Disjunction corresponding to xor_constraint @@ -275,14 +417,17 @@ def get_src_disjunction(xor_constraint): # block while we do the transformation. And then this method could query # that map. m = xor_constraint.model() - for disjunction in m.component_data_objects(Disjunction, - descend_into=(Block, Disjunct)): + for disjunction in m.component_data_objects( + Disjunction, descend_into=(Block, Disjunct) + ): if disjunction._algebraic_constraint: if disjunction._algebraic_constraint() is xor_constraint: return disjunction - raise GDP_Error("It appears that '%s' is not an XOR or OR constraint " - "resulting from transforming a Disjunction." - % xor_constraint.name) + raise GDP_Error( + "It appears that '%s' is not an XOR or OR constraint " + "resulting from transforming a Disjunction." % xor_constraint.name + ) + def get_src_disjunct(transBlock): """Return the Disjunct object whose transformed components are on @@ -293,12 +438,16 @@ def get_src_disjunct(transBlock): transBlock: _BlockData which is in the relaxedDisjuncts IndexedBlock on a transformation block. """ - if not hasattr(transBlock, "_srcDisjunct") or \ - type(transBlock._srcDisjunct) is not weakref_ref: - raise GDP_Error("Block '%s' doesn't appear to be a transformation " - "block for a disjunct. No source disjunct found." - % transBlock.name) - return transBlock._srcDisjunct() + if ( + not hasattr(transBlock, "_src_disjunct") + or type(transBlock._src_disjunct) is not weakref_ref + ): + raise GDP_Error( + "Block '%s' doesn't appear to be a transformation " + "block for a disjunct. No source disjunct found." % transBlock.name + ) + return transBlock._src_disjunct() + def get_src_constraint(transformedConstraint): """Return the original Constraint whose transformed counterpart is @@ -315,11 +464,14 @@ def get_src_constraint(transformedConstraint): # us the wrong thing. If they happen to also have a _constraintMap then # the world is really against us. if not hasattr(transBlock, "_constraintMap"): - raise GDP_Error("Constraint '%s' is not a transformed constraint" - % transformedConstraint.name) + raise GDP_Error( + "Constraint '%s' is not a transformed constraint" + % transformedConstraint.name + ) # if something goes wrong here, it's a bug in the mappings. return transBlock._constraintMap['srcConstraints'][transformedConstraint] + def _find_parent_disjunct(constraint): # traverse up until we find the disjunct this constraint lives on parent_disjunct = constraint.parent_block() @@ -327,24 +479,29 @@ def _find_parent_disjunct(constraint): if parent_disjunct is None: raise GDP_Error( "Constraint '%s' is not on a disjunct and so was not " - "transformed" % constraint.name) + "transformed" % constraint.name + ) parent_disjunct = parent_disjunct.parent_block() return parent_disjunct + def _get_constraint_transBlock(constraint): parent_disjunct = _find_parent_disjunct(constraint) # we know from _find_parent_disjunct that parent_disjunct is a Disjunct, # so the below is OK transBlock = parent_disjunct._transformation_block if transBlock is None: - raise GDP_Error("Constraint '%s' is on a disjunct which has not been " - "transformed" % constraint.name) + raise GDP_Error( + "Constraint '%s' is on a disjunct which has not been " + "transformed" % constraint.name + ) # if it's not None, it's the weakref we wanted. transBlock = transBlock() return transBlock + def get_transformed_constraints(srcConstraint): """Return the transformed version of srcConstraint @@ -354,43 +511,21 @@ def get_transformed_constraints(srcConstraint): the subtree of a transformed Disjunct """ if srcConstraint.is_indexed(): - raise GDP_Error("Argument to get_transformed_constraint should be " - "a ScalarConstraint or _ConstraintData. (If you " - "want the container for all transformed constraints " - "from an IndexedDisjunction, this is the parent " - "component of a transformed constraint originating " - "from any of its _ComponentDatas.)") + raise GDP_Error( + "Argument to get_transformed_constraint should be " + "a ScalarConstraint or _ConstraintData. (If you " + "want the container for all transformed constraints " + "from an IndexedDisjunction, this is the parent " + "component of a transformed constraint originating " + "from any of its _ComponentDatas.)" + ) transBlock = _get_constraint_transBlock(srcConstraint) try: - return transBlock._constraintMap['transformedConstraints'][ - srcConstraint] + return transBlock._constraintMap['transformedConstraints'][srcConstraint] except: - logger.error("Constraint '%s' has not been transformed." - % srcConstraint.name) + logger.error("Constraint '%s' has not been transformed." % srcConstraint.name) raise -def _warn_for_active_disjunction(disjunction, disjunct): - # this should only have gotten called if the disjunction is active - assert disjunction.active - problemdisj = disjunction - if disjunction.is_indexed(): - for i in sorted(disjunction.keys()): - if disjunction[i].active: - # a _DisjunctionData is active, we will yell about - # it specifically. - problemdisj = disjunction[i] - break - - parentblock = problemdisj.parent_block() - # the disjunction should only have been active if it wasn't transformed - assert problemdisj.algebraic_constraint is None - _probDisjName = problemdisj.getname(fully_qualified=True) - _disjName = disjunct.getname(fully_qualified=True) - raise GDP_Error("Found untransformed disjunction '%s' in disjunct '%s'! " - "The disjunction must be transformed before the " - "disjunct. If you are using targets, put the " - "disjunction before the disjunct in the list." - % (_probDisjName, _disjName)) def _warn_for_active_disjunct(innerdisjunct, outerdisjunct): assert innerdisjunct.active @@ -402,13 +537,17 @@ def _warn_for_active_disjunct(innerdisjunct, outerdisjunct): problemdisj = innerdisjunct[i] break - raise GDP_Error("Found active disjunct '{0}' in disjunct '{1}'! Either {0} " - "is not in a disjunction or the disjunction it is in " - "has not been transformed. {0} needs to be deactivated " - "or its disjunction transformed before {1} can be " - "transformed.".format( - problemdisj.getname(fully_qualified=True), - outerdisjunct.getname(fully_qualified=True))) + raise GDP_Error( + "Found active disjunct '{0}' in disjunct '{1}'! Either {0} " + "is not in a disjunction or the disjunction it is in " + "has not been transformed. {0} needs to be deactivated " + "or its disjunction transformed before {1} can be " + "transformed.".format( + problemdisj.getname(fully_qualified=True), + outerdisjunct.getname(fully_qualified=True), + ) + ) + def check_model_algebraic(instance): """Checks if there are any active Disjuncts or Disjunctions reachable via @@ -420,10 +559,18 @@ def check_model_algebraic(instance): ---------- instance: a Model or Block """ - disjunction_set = {i for i in instance.component_data_objects( - Disjunction, descend_into=(Block, Disjunct), active=None)} - active_disjunction_set = {i for i in instance.component_data_objects( - Disjunction, descend_into=(Block, Disjunct), active=True)} + disjunction_set = { + i + for i in instance.component_data_objects( + Disjunction, descend_into=(Block, Disjunct), active=None + ) + } + active_disjunction_set = { + i + for i in instance.component_data_objects( + Disjunction, descend_into=(Block, Disjunct), active=True + ) + } disjuncts_in_disjunctions = set() for i in disjunction_set: disjuncts_in_disjunctions.update(i.disjuncts) @@ -432,56 +579,68 @@ def check_model_algebraic(instance): disjuncts_in_active_disjunctions.update(i.disjuncts) for disjunct in instance.component_data_objects( - Disjunct, descend_into=(Block,), - descent_order=TraversalStrategy.PostfixDFS): + Disjunct, descend_into=(Block,), descent_order=TraversalStrategy.PostfixDFS + ): # check if it's relaxed if disjunct.transformation_block is not None: continue # It's not transformed, check if we should complain - elif disjunct.active and _disjunct_not_fixed_true(disjunct) and \ - _disjunct_on_active_block(disjunct): + elif ( + disjunct.active + and _disjunct_not_fixed_true(disjunct) + and _disjunct_on_active_block(disjunct) + ): # If someone thinks they've transformed the whole instance, but # there is still an active Disjunct on the model, we will warn # them. In the future this should be the writers' job.) if disjunct not in disjuncts_in_disjunctions: - logger.warning('Disjunct "%s" is currently active, ' - 'but was not found in any Disjunctions. ' - 'This is generally an error as the model ' - 'has not been fully relaxed to a ' - 'pure algebraic form.' % (disjunct.name,)) + logger.warning( + 'Disjunct "%s" is currently active, ' + 'but was not found in any Disjunctions. ' + 'This is generally an error as the model ' + 'has not been fully relaxed to a ' + 'pure algebraic form.' % (disjunct.name,) + ) return False elif disjunct not in disjuncts_in_active_disjunctions: - logger.warning('Disjunct "%s" is currently active. While ' - 'it participates in a Disjunction, ' - 'that Disjunction is currently deactivated. ' - 'This is generally an error as the ' - 'model has not been fully relaxed to a pure ' - 'algebraic form. Did you deactivate ' - 'the Disjunction without addressing the ' - 'individual Disjuncts?' % (disjunct.name,)) + logger.warning( + 'Disjunct "%s" is currently active. While ' + 'it participates in a Disjunction, ' + 'that Disjunction is currently deactivated. ' + 'This is generally an error as the ' + 'model has not been fully relaxed to a pure ' + 'algebraic form. Did you deactivate ' + 'the Disjunction without addressing the ' + 'individual Disjuncts?' % (disjunct.name,) + ) return False else: - logger.warning('Disjunct "%s" is currently active. It must be ' - 'transformed or deactivated before solving the ' - 'model.' % (disjunct.name,)) + logger.warning( + 'Disjunct "%s" is currently active. It must be ' + 'transformed or deactivated before solving the ' + 'model.' % (disjunct.name,) + ) return False - for cons in instance.component_data_objects(LogicalConstraint, - descend_into=Block, - active=True): + for cons in instance.component_data_objects( + LogicalConstraint, descend_into=Block, active=True + ): if cons.active: - logger.warning('LogicalConstraint "%s" is currently active. It ' - 'must be transformed or deactivated before solving ' - 'the model.' % cons.name) + logger.warning( + 'LogicalConstraint "%s" is currently active. It ' + 'must be transformed or deactivated before solving ' + 'the model.' % cons.name + ) return False # We didn't find anything bad. return True + def _disjunct_not_fixed_true(disjunct): # Return true if the disjunct indicator variable is not fixed to True - return not (disjunct.indicator_var.fixed and - disjunct.indicator_var.value) + return not (disjunct.indicator_var.fixed and disjunct.indicator_var.value) + def _disjunct_on_active_block(disjunct): # Check first to make sure that the disjunct is not a descendent of an @@ -493,9 +652,12 @@ def _disjunct_on_active_block(disjunct): if parent_block.ctype is Block and not parent_block.active: return False # properly deactivated Disjunct - elif (parent_block.ctype is Disjunct and not parent_block.active - and parent_block.indicator_var.value == False - and parent_block.indicator_var.fixed): + elif ( + parent_block.ctype is Disjunct + and not parent_block.active + and parent_block.indicator_var.value == False + and parent_block.indicator_var.fixed + ): return False else: # Step up one level in the hierarchy diff --git a/pyomo/kernel/__init__.py b/pyomo/kernel/__init__.py index e9794ee56d7..6ecea6343cd 100644 --- a/pyomo/kernel/__init__.py +++ b/pyomo/kernel/__init__.py @@ -16,149 +16,167 @@ # import pyomo.environ import pyomo.opt -from pyomo.opt import (SolverFactory, - SolverStatus, - TerminationCondition) +from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition # # Define the modeling namespace # from pyomo.common.collections import ComponentMap, ComponentSet from pyomo.core.expr import ( - numvalue, numeric_expr, boolean_value, logical_expr, current, - calculus, symbol_map, expr_errors, visitor, sympy_tools, taylor_series, - expr_common, cnf_walker, template_expr + numvalue, + numeric_expr, + boolean_value, + logical_expr, + calculus, + symbol_map, + expr_errors, + visitor, + sympy_tools, + taylor_series, + expr_common, + cnf_walker, + template_expr, ) from pyomo.core.expr.numvalue import ( - value, is_constant, is_fixed, is_variable_type, - is_potentially_variable, NumericValue, ZeroConstant, - native_numeric_types, native_types, polynomial_degree, + value, + is_constant, + is_fixed, + is_variable_type, + is_potentially_variable, + NumericValue, + ZeroConstant, + native_numeric_types, + native_types, + polynomial_degree, ) from pyomo.core.expr.boolean_value import BooleanValue -from pyomo.core.expr.numeric_expr import linear_expression, nonlinear_expression - -from pyomo.core.expr.logical_expr import (land, lor, equivalent, exactly, - atleast, atmost, implies, lnot, - xor, inequality) - -from pyomo.core.expr.current import ( - log, log10, sin, cos, tan, cosh, sinh, tanh, - asin, acos, atan, exp, sqrt, asinh, acosh, - atanh, ceil, floor, +from pyomo.core.expr import ( + linear_expression, + nonlinear_expression, + land, + lor, + equivalent, + exactly, + atleast, + atmost, + implies, + lnot, + xor, + inequality, + log, + log10, + sin, + cos, + tan, + cosh, + sinh, + tanh, + asin, + acos, + atan, + exp, + sqrt, + asinh, + acosh, + atanh, + ceil, + floor, Expr_if, ) from pyomo.core.expr.calculus.derivatives import differentiate from pyomo.core.expr.taylor_series import taylor_series_expansion import pyomo.core.kernel -from pyomo.kernel.util import (generate_names, - preorder_traversal, - pprint) -from pyomo.core.kernel.variable import \ - (variable, - variable_tuple, - variable_list, - variable_dict) -from pyomo.core.kernel.constraint import \ - (constraint, - linear_constraint, - constraint_tuple, - constraint_list, - constraint_dict) -from pyomo.core.kernel.matrix_constraint import \ - matrix_constraint +from pyomo.kernel.util import generate_names, preorder_traversal, pprint +from pyomo.core.kernel.variable import ( + variable, + variable_tuple, + variable_list, + variable_dict, +) +from pyomo.core.kernel.constraint import ( + constraint, + linear_constraint, + constraint_tuple, + constraint_list, + constraint_dict, +) +from pyomo.core.kernel.matrix_constraint import matrix_constraint import pyomo.core.kernel.conic as conic -from pyomo.core.kernel.parameter import \ - (parameter, - functional_value, - parameter_tuple, - parameter_list, - parameter_dict) -from pyomo.core.kernel.expression import \ - (noclone, - expression, - data_expression, - expression_tuple, - expression_list, - expression_dict) -from pyomo.core.kernel.objective import \ - (maximize, - minimize, - objective, - objective_tuple, - objective_list, - objective_dict) -from pyomo.core.kernel.sos import \ - (sos, - sos1, - sos2, - sos_tuple, - sos_list, - sos_dict) -from pyomo.core.kernel.suffix import \ - (suffix, - suffix_dict, - export_suffix_generator, - import_suffix_generator, - local_suffix_generator, - suffix_generator) -from pyomo.core.kernel.block import \ - (block, - block_tuple, - block_list, - block_dict) -from pyomo.core.kernel.piecewise_library.transforms import \ - piecewise -from pyomo.core.kernel.piecewise_library.transforms_nd import \ - piecewise_nd -from pyomo.core.kernel.set_types import \ - (RealSet, - IntegerSet, - BooleanSet) +from pyomo.core.kernel.parameter import ( + parameter, + functional_value, + parameter_tuple, + parameter_list, + parameter_dict, +) +from pyomo.core.kernel.expression import ( + noclone, + expression, + data_expression, + expression_tuple, + expression_list, + expression_dict, +) +from pyomo.core.kernel.objective import ( + maximize, + minimize, + objective, + objective_tuple, + objective_list, + objective_dict, +) +from pyomo.core.kernel.sos import sos, sos1, sos2, sos_tuple, sos_list, sos_dict +from pyomo.core.kernel.suffix import ( + suffix, + suffix_dict, + export_suffix_generator, + import_suffix_generator, + local_suffix_generator, + suffix_generator, +) +from pyomo.core.kernel.block import block, block_tuple, block_list, block_dict +from pyomo.core.kernel.piecewise_library.transforms import piecewise +from pyomo.core.kernel.piecewise_library.transforms_nd import piecewise_nd +from pyomo.core.kernel.set_types import RealSet, IntegerSet, BooleanSet from pyomo.environ import ( - Reals, - PositiveReals, - NonPositiveReals, - NegativeReals, - NonNegativeReals, - PercentFraction, - UnitInterval, - Integers, - PositiveIntegers, - NonPositiveIntegers, - NegativeIntegers, - NonNegativeIntegers, - Boolean, - Binary, - RealInterval, - IntegerInterval, + Reals, + PositiveReals, + NonPositiveReals, + NegativeReals, + NonNegativeReals, + PercentFraction, + UnitInterval, + Integers, + PositiveIntegers, + NonPositiveIntegers, + NegativeIntegers, + NonNegativeIntegers, + Boolean, + Binary, + RealInterval, + IntegerInterval, ) + # # allow the use of standard kernel modeling components # as the ctype argument for the general iterator method # from pyomo.core.kernel.base import _convert_ctype -_convert_ctype[block] = \ - pyomo.core.kernel.block.IBlock -_convert_ctype[variable] = \ - pyomo.core.kernel.variable.IVariable -_convert_ctype[constraint] = \ - pyomo.core.kernel.constraint.IConstraint -_convert_ctype[parameter] = \ - pyomo.core.kernel.parameter.IParameter -_convert_ctype[expression] = \ - pyomo.core.kernel.expression.IExpression -_convert_ctype[objective] = \ - pyomo.core.kernel.objective.IObjective -_convert_ctype[sos] = \ - pyomo.core.kernel.sos.ISOS -_convert_ctype[suffix] = \ - pyomo.core.kernel.suffix.ISuffix + +_convert_ctype[block] = pyomo.core.kernel.block.IBlock +_convert_ctype[variable] = pyomo.core.kernel.variable.IVariable +_convert_ctype[constraint] = pyomo.core.kernel.constraint.IConstraint +_convert_ctype[parameter] = pyomo.core.kernel.parameter.IParameter +_convert_ctype[expression] = pyomo.core.kernel.expression.IExpression +_convert_ctype[objective] = pyomo.core.kernel.objective.IObjective +_convert_ctype[sos] = pyomo.core.kernel.sos.ISOS +_convert_ctype[suffix] = pyomo.core.kernel.suffix.ISuffix del _convert_ctype # @@ -171,29 +189,24 @@ # # Set up mappings between AML and Kernel ctypes # -_convert_ctype[pyomo.environ.Block] = \ - pyomo.core.kernel.block.IBlock -_convert_ctype[pyomo.environ.Var] = \ - pyomo.core.kernel.variable.IVariable -_convert_ctype[pyomo.environ.Constraint] = \ - pyomo.core.kernel.constraint.IConstraint -_convert_ctype[pyomo.environ.Param] = \ - pyomo.core.kernel.parameter.IParameter -_convert_ctype[pyomo.environ.Expression] = \ - pyomo.core.kernel.expression.IExpression -_convert_ctype[pyomo.environ.Objective] = \ - pyomo.core.kernel.objective.IObjective -_convert_ctype[pyomo.environ.SOSConstraint] = \ - pyomo.core.kernel.sos.ISOS -_convert_ctype[pyomo.environ.Suffix] = \ - pyomo.core.kernel.suffix.ISuffix +_convert_ctype[pyomo.environ.Block] = pyomo.core.kernel.block.IBlock +_convert_ctype[pyomo.environ.Var] = pyomo.core.kernel.variable.IVariable +_convert_ctype[pyomo.environ.Constraint] = pyomo.core.kernel.constraint.IConstraint +_convert_ctype[pyomo.environ.Param] = pyomo.core.kernel.parameter.IParameter +_convert_ctype[pyomo.environ.Expression] = pyomo.core.kernel.expression.IExpression +_convert_ctype[pyomo.environ.Objective] = pyomo.core.kernel.objective.IObjective +_convert_ctype[pyomo.environ.SOSConstraint] = pyomo.core.kernel.sos.ISOS +_convert_ctype[pyomo.environ.Suffix] = pyomo.core.kernel.suffix.ISuffix # # Set up back mappings from Kernel back to AML ctypes # _kernel_ctype_backmap.update( - {v: k for k, v in _convert_ctype.items() - if not issubclass(k, pyomo.core.kernel.base.ICategorizedObject)} + { + v: k + for k, v in _convert_ctype.items() + if not issubclass(k, pyomo.core.kernel.base.ICategorizedObject) + } ) del _convert_ctype @@ -203,8 +216,8 @@ # Now cleanup the namespace a bit # -import pyomo.core.kernel.piecewise_library.util as \ - piecewise_util +import pyomo.core.kernel.piecewise_library.util as piecewise_util + del util del pyomo @@ -212,18 +225,24 @@ # Ducktyping to work with a solver interfaces. Ideally, # everything below here could be deleted one day. # -from pyomo.core.kernel.heterogeneous_container import (heterogeneous_containers, - IHeterogeneousContainer) +from pyomo.core.kernel.heterogeneous_container import ( + heterogeneous_containers, + IHeterogeneousContainer, +) + + def _component_data_objects(self, *args, **kwds): # this is not yet handled kwds.pop('sort', None) if 'active' not in kwds: kwds['active'] = None yield from self.components(*args, **kwds) -IHeterogeneousContainer.component_data_objects = \ - _component_data_objects + + +IHeterogeneousContainer.component_data_objects = _component_data_objects del _component_data_objects + def _component_objects(self, *args, **kwds): # this is not yet handled kwds.pop('sort', None) @@ -231,33 +250,40 @@ def _component_objects(self, *args, **kwds): assert kwds.pop('descent_order', None) is None active = kwds.pop('active', None) descend_into = kwds.pop('descend_into', True) - for item in heterogeneous_containers(self, - active=active, - descend_into=descend_into): + for item in heterogeneous_containers( + self, active=active, descend_into=descend_into + ): yield from item.children(*args, **kwds) -IHeterogeneousContainer.component_objects = \ - _component_objects + + +IHeterogeneousContainer.component_objects = _component_objects del _component_objects del IHeterogeneousContainer + def _block_data_objects(self, **kwds): # this is not yet handled kwds.pop('sort', None) active = kwds.get("active", None) assert active in (None, True) # if not active, then nothing below is active - if (active is not None) and \ - (not self.active): + if (active is not None) and (not self.active): return yield self yield from self.components(ctype=self.ctype, **kwds) + + block.block_data_objects = _block_data_objects del _block_data_objects + # Note sure where this gets used or why we need it def _valid_problem_types(self): import pyomo.opt + return [pyomo.opt.base.ProblemFormat.pyomo] + + block.valid_problem_types = _valid_problem_types del _valid_problem_types diff --git a/pyomo/kernel/util.py b/pyomo/kernel/util.py index 34158493887..5fba6a2c2d9 100644 --- a/pyomo/kernel/util.py +++ b/pyomo/kernel/util.py @@ -14,18 +14,16 @@ from pyomo.common.collections import ComponentMap import pyomo.core -from pyomo.core.expr.numvalue import \ - NumericValue -from pyomo.core.kernel.base import \ - (ICategorizedObject, - _no_ctype, - _convert_ctype, - _convert_descend_into) - -def preorder_traversal(node, - ctype=_no_ctype, - active=True, - descend=True): +from pyomo.core.expr.numvalue import NumericValue +from pyomo.core.kernel.base import ( + ICategorizedObject, + _no_ctype, + _convert_ctype, + _convert_descend_into, +) + + +def preorder_traversal(node, ctype=_no_ctype, active=True, descend=True): """ A generator that yields each object in the storage tree (including the root object) using a preorder traversal. @@ -57,8 +55,7 @@ def preorder_traversal(node, assert active in (None, True) # if not active, then nothing below is active - if (active is not None) and \ - (not node.active): + if (active is not None) and (not node.active): return # convert AML types into Kernel types (hack for the @@ -68,32 +65,29 @@ def preorder_traversal(node, # convert descend to a function descend = _convert_descend_into(descend) - if (ctype is _no_ctype) or \ - (node.ctype is ctype) or \ - (node.ctype._is_heterogeneous_container): + if ( + (ctype is _no_ctype) + or (node.ctype is ctype) + or (node.ctype._is_heterogeneous_container) + ): yield node - if (not node._is_container) or \ - (not descend(node)): + if (not node._is_container) or (not descend(node)): return for child in node.children(): child_ctype = child.ctype if not child._is_container: # not a container - if (active is None) or \ - child.active: - if (ctype is _no_ctype) or \ - (child_ctype is ctype): + if (active is None) or child.active: + if (ctype is _no_ctype) or (child_ctype is ctype): yield child elif child._is_heterogeneous_container: # a heterogeneous container, so use # its traversal method for obj in preorder_traversal( - child, - ctype=ctype, - active=active, - descend=descend): + child, ctype=ctype, active=active, descend=descend + ): yield obj else: # a homogeneous container @@ -106,10 +100,8 @@ def descend_(obj_): return False else: return descend(obj_) - for obj in preorder_traversal( - child, - active=active, - descend=descend_): + + for obj in preorder_traversal(child, active=active, descend=descend_): if not obj._is_heterogeneous_container: yield obj else: @@ -117,23 +109,15 @@ def descend_(obj_): # its traversal method and reapply the # ctype filter for item in preorder_traversal( - obj, - ctype=ctype, - active=active, - descend=descend): + obj, ctype=ctype, active=active, descend=descend + ): yield item - elif (ctype is _no_ctype) or \ - (child_ctype is ctype): - for obj in preorder_traversal( - child, - active=active, - descend=descend): + elif (ctype is _no_ctype) or (child_ctype is ctype): + for obj in preorder_traversal(child, active=active, descend=descend): yield obj -def generate_names(node, - convert=str, - prefix="", - **kwds): + +def generate_names(node, convert=str, prefix="", **kwds): """ Generate names relative to this object for all objects stored under it. @@ -168,82 +152,105 @@ def generate_names(node, for obj in traversal: parent = obj.parent - name = (parent._child_storage_entry_string - % convert(obj.storage_key)) + name = parent._child_storage_entry_string % convert(obj.storage_key) if parent is not node: - names[obj] = (names[parent] + - parent._child_storage_delimiter_string + - name) + names[obj] = names[parent] + parent._child_storage_delimiter_string + name else: names[obj] = prefix + name return names + def pprint(obj, indent=0, stream=sys.stdout): """pprint a kernel modeling object""" if not isinstance(obj, ICategorizedObject): if isinstance(obj, NumericValue): prefix = "" if indent > 0: - prefix = (" "*indent)+" - " - stream.write(prefix+str(obj)+"\n") + prefix = (" " * indent) + " - " + stream.write(prefix + str(obj) + "\n") else: assert indent == 0 - _pprint_.pprint(obj, indent=indent+1, stream=stream) + _pprint_.pprint(obj, indent=indent + 1, stream=stream) return if not obj._is_container: prefix = "" if indent > 0: - prefix = (" "*indent)+" - " + prefix = (" " * indent) + " - " # not a block clsname = obj.__class__.__name__ if obj.ctype is pyomo.core.kernel.variable.IVariable: - stream.write(prefix+"%s: %s(active=%s, value=%s, bounds=(%s,%s), domain_type=%s, fixed=%s, stale=%s)\n" - % (str(obj), - clsname, - obj.active, - obj.value, - obj.lb, - obj.ub, - obj.domain_type.__name__, - obj.fixed, - obj.stale)) + stream.write( + prefix + + "%s: %s(active=%s, value=%s, bounds=(%s,%s), domain_type=%s, fixed=%s, stale=%s)\n" + % ( + str(obj), + clsname, + obj.active, + obj.value, + obj.lb, + obj.ub, + obj.domain_type.__name__, + obj.fixed, + obj.stale, + ) + ) elif obj.ctype is pyomo.core.kernel.constraint.IConstraint: - stream.write(prefix+"%s: %s(active=%s, expr=%s)\n" - % (str(obj), - clsname, - obj.active, - str(obj.expr))) + stream.write( + prefix + + "%s: %s(active=%s, expr=%s)\n" + % (str(obj), clsname, obj.active, str(obj.expr)) + ) elif obj.ctype is pyomo.core.kernel.objective.IObjective: - stream.write(prefix+"%s: %s(active=%s, expr=%s)\n" - % (str(obj), clsname, obj.active, str(obj.expr))) + stream.write( + prefix + + "%s: %s(active=%s, expr=%s)\n" + % (str(obj), clsname, obj.active, str(obj.expr)) + ) elif obj.ctype is pyomo.core.kernel.expression.IExpression: - stream.write(prefix+"%s: %s(active=%s, expr=%s)\n" - % (str(obj), clsname, obj.active, str(obj.expr))) + stream.write( + prefix + + "%s: %s(active=%s, expr=%s)\n" + % (str(obj), clsname, obj.active, str(obj.expr)) + ) elif obj.ctype is pyomo.core.kernel.parameter.IParameter: - stream.write(prefix+"%s: %s(active=%s, value=%s)\n" - % (str(obj), clsname, obj.active, str(obj()))) + stream.write( + prefix + + "%s: %s(active=%s, value=%s)\n" + % (str(obj), clsname, obj.active, str(obj())) + ) elif obj.ctype is pyomo.core.kernel.sos.ISOS: - stream.write(prefix+"%s: %s(active=%s, level=%s, entries=%s)\n" - % (str(obj), - clsname, - obj.active, - obj.level, - str(["(%s,%s)" % (str(v), w) - for v,w in zip(obj.variables, - obj.weights)]))) + stream.write( + prefix + + "%s: %s(active=%s, level=%s, entries=%s)\n" + % ( + str(obj), + clsname, + obj.active, + obj.level, + str( + [ + "(%s,%s)" % (str(v), w) + for v, w in zip(obj.variables, obj.weights) + ] + ), + ) + ) else: assert obj.ctype is pyomo.core.kernel.suffix.ISuffix - stream.write(prefix+"%s: %s(active=%s, size=%s)\n" - % (str(obj.name), clsname, obj.active, str(len(obj)))) + stream.write( + prefix + + "%s: %s(active=%s, size=%s)\n" + % (str(obj.name), clsname, obj.active, str(len(obj))) + ) else: prefix = "" if indent > 0: - prefix = (" "*indent)+" - " - stream.write(prefix+"%s: %s(active=%s, ctype=%s)\n" - % (str(obj), - obj.__class__.__name__, - obj.active, - obj.ctype.__name__)) + prefix = (" " * indent) + " - " + stream.write( + prefix + + "%s: %s(active=%s, ctype=%s)\n" + % (str(obj), obj.__class__.__name__, obj.active, obj.ctype.__name__) + ) for c in obj.children(): - pprint(c, indent=indent+1, stream=stream) + pprint(c, indent=indent + 1, stream=stream) diff --git a/pyomo/mpec/complementarity.py b/pyomo/mpec/complementarity.py index 6d264af8fae..df991ce9686 100644 --- a/pyomo/mpec/complementarity.py +++ b/pyomo/mpec/complementarity.py @@ -14,7 +14,7 @@ from pyomo.common.deprecation import RenamedClass from pyomo.common.log import is_debug_set from pyomo.common.timing import ConstructionTimer -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.expr.numvalue import ZeroConstant, native_numeric_types, as_numeric from pyomo.core import Constraint, Var, Block, Set from pyomo.core.base.component import ModelComponentFactory @@ -22,10 +22,13 @@ from pyomo.core.base.block import _BlockData from pyomo.core.base.disable_methods import disable_methods from pyomo.core.base.initializer import ( - Initializer, IndexedCallInitializer, CountedCallInitializer, + Initializer, + IndexedCallInitializer, + CountedCallInitializer, ) import logging + logger = logging.getLogger('pyomo.core') @@ -36,12 +39,11 @@ def complements(a, b): - """ Return a named 2-tuple """ - return ComplementarityTuple(a,b) + """Return a named 2-tuple""" + return ComplementarityTuple(a, b) class _ComplementarityData(_BlockData): - def _canonical_expression(self, e): # Note: as the complimentarity component maintains references to # the original expression (e), it is NOT safe or valid to bypass @@ -56,31 +58,32 @@ def _canonical_expression(self, e): # The first argument of an equality is never fixed # else: - _e = ( ZeroConstant, e.arg(0) - e.arg(1)) + _e = (ZeroConstant, e.arg(0) - e.arg(1)) elif e.__class__ is EXPR.InequalityExpression: if e.arg(1).__class__ in native_numeric_types or e.arg(1).is_fixed(): _e = (None, e.arg(0), e.arg(1)) elif e.arg(0).__class__ in native_numeric_types or e.arg(0).is_fixed(): _e = (e.arg(0), e.arg(1), None) else: - _e = ( ZeroConstant, e.arg(1) - e.arg(0), None ) + _e = (ZeroConstant, e.arg(1) - e.arg(0), None) elif e.__class__ is EXPR.RangedExpression: - _e = (e.arg(0), e.arg(1), e.arg(2)) + _e = (e.arg(0), e.arg(1), e.arg(2)) else: _e = (None, e, None) return _e def to_standard_form(self): # - # Add auxilliary variables and constraints that ensure + # Add auxiliary variables and constraints that ensure # a monotone transformation of general complementary constraints to # the form: # l1 <= v1 <= u1 OR l2 <= v2 <= u2 # - # Note that this transformation creates more variables and constraints - # than are strictly necessary. However, we don't have a complete list of - # the variables used in a model's complementarity conditions when adding - # a single condition, so we add additional variables. + # Note that this transformation creates more variables and + # constraints than are strictly necessary. However, we don't + # have a complete list of the variables used in a model's + # complementarity conditions when adding a single condition, so + # we add additional variables. # # This has the form: # @@ -103,8 +106,13 @@ def to_standard_form(self): self.c = Constraint(expr=_e2) return # - if (_e1[0] is None) + (_e1[2] is None) + (_e2[0] is None) + (_e2[2] is None) != 2: - raise RuntimeError("Complementarity condition %s must have exactly two finite bounds" % self.name) + if (_e1[0] is None) + (_e1[2] is None) + (_e2[0] is None) + ( + _e2[2] is None + ) != 2: + raise RuntimeError( + "Complementarity condition %s must have exactly two finite bounds" + % self.name + ) # if _e1[0] is None and _e1[2] is None: # Only e2 will be an unconstrained expression @@ -117,14 +125,18 @@ def to_standard_form(self): self.c = Constraint(expr=_e2[0] <= _e2[1]) self.c._complementarity_type = 1 elif _e2[0] is None: - self.c = Constraint(expr=- _e2[2] <= - _e2[1]) + self.c = Constraint(expr=-_e2[2] <= -_e2[1]) self.c._complementarity_type = 1 # if not _e1[0] is None and not _e1[2] is None: if not (_e1[0].__class__ in native_numeric_types or _e1[0].is_constant()): - raise RuntimeError("Cannot express a complementarity problem of the form L < v < U _|_ g(x) where L is not a constant value") + raise RuntimeError( + "Cannot express a complementarity problem of the form L < v < U _|_ g(x) where L is not a constant value" + ) if not (_e1[2].__class__ in native_numeric_types or _e1[2].is_constant()): - raise RuntimeError("Cannot express a complementarity problem of the form L < v < U _|_ g(x) where U is not a constant value") + raise RuntimeError( + "Cannot express a complementarity problem of the form L < v < U _|_ g(x) where U is not a constant value" + ) self.v = Var(bounds=(_e1[0], _e1[2])) self.ve = Constraint(expr=self.v == _e1[1]) elif _e1[2] is None: @@ -144,14 +156,15 @@ def set_value(self, cc): # The ComplementarityTuple has a fixed length, so we initialize # the _args component and return # - self._args = ( as_numeric(cc.arg0), as_numeric(cc.arg1) ) + self._args = (cc.arg0, cc.arg1) # elif cc.__class__ is tuple: if len(cc) != 2: raise ValueError( "Invalid tuple for Complementarity %s (expected 2-tuple):" - "\n\t%s" % (self.name, cc) ) - self._args = tuple( as_numeric(x) for x in cc ) + "\n\t%s" % (self.name, cc) + ) + self._args = cc elif cc is Complementarity.Skip: del self.parent_component()[self.index()] elif cc.__class__ is list: @@ -162,13 +175,12 @@ def set_value(self, cc): return self.set_value(tuple(cc)) else: raise ValueError( - "Unexpected value for Complementarity %s:\n\t%s" - % (self.name, cc) ) + "Unexpected value for Complementarity %s:\n\t%s" % (self.name, cc) + ) @ModelComponentFactory.register("Complementarity conditions.") class Complementarity(Block): - _ComponentDataClass = _ComplementarityData def __new__(cls, *args, **kwds): @@ -186,25 +198,34 @@ def _complementarity_rule(b, *idx): return cc = _rule(b.parent_block(), idx) if cc is None: - raise ValueError(""" + raise ValueError( + """ Invalid complementarity condition. The complementarity condition is None instead of a 2-tuple. Please modify your rule to return Complementarity.Skip instead of None. -Error thrown for Complementarity "%s".""" % ( b.name, ) ) +Error thrown for Complementarity "%s".""" + % (b.name,) + ) b.set_value(cc) def __init__(self, *args, **kwargs): kwargs.setdefault('ctype', Complementarity) kwargs.setdefault('dense', False) - _init = tuple( _arg for _arg in ( - kwargs.pop('initialize', None), - kwargs.pop('rule', None), - kwargs.pop('expr', None) ) if _arg is not None ) + _init = tuple( + _arg + for _arg in ( + kwargs.pop('initialize', None), + kwargs.pop('rule', None), + kwargs.pop('expr', None), + ) + if _arg is not None + ) if len(_init) > 1: raise ValueError( "Duplicate initialization: Complementarity() only accepts " - "one of 'initialize=', 'rule=', and 'expr='") + "one of 'initialize=', 'rule=', and 'expr='" + ) elif _init: _init = _init[0] else: @@ -221,11 +242,12 @@ def __init__(self, *args, **kwargs): # HACK to make the "counted call" syntax work. We wait until # after the base class is set up so that is_indexed() is # reliable. - if self._init_rule is not None \ - and self._init_rule.__class__ is IndexedCallInitializer: + if ( + self._init_rule is not None + and self._init_rule.__class__ is IndexedCallInitializer + ): self._init_rule = CountedCallInitializer(self, self._init_rule) - def add(self, index, cc): """ Add a complementarity condition with a specified index. @@ -240,9 +262,7 @@ def _pprint(self): """ Return data that will be printed for this component. """ - _table_data = lambda k, v: [ - v._args[0], v._args[1], v.active, - ] + _table_data = lambda k, v: [v._args[0], v._args[1], v.active] # This is a bit weird, but is being implemented to preserve # backwards compatibility. The Complementarity transformation @@ -261,23 +281,24 @@ def _pprint(self): # updates and a check that we do not break anything in the # Book). _transformed = not issubclass(self.ctype, Complementarity) + def _conditional_block_printer(ostream, idx, data): if _transformed or len(data.component_map()): self._pprint_callback(ostream, idx, data) return ( - [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ("Active", self.active), - ], + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ("Active", self.active), + ], self._data.items(), - ( "Arg0","Arg1","Active" ), + ("Arg0", "Arg1", "Active"), (_table_data, _conditional_block_printer), - ) + ) class ScalarComplementarity(_ComplementarityData, Complementarity): - def __init__(self, *args, **kwds): _ComplementarityData.__init__(self, self) Complementarity.__init__(self, *args, **kwds) @@ -312,7 +333,7 @@ class ComplementarityList(IndexedComplementarity): an index value is not specified. """ - End = (1003,) + End = (1003,) def __init__(self, **kwargs): """Constructor""" @@ -341,7 +362,7 @@ def construct(self, data=None): if self._constructed: return timer = ConstructionTimer(self) - self._constructed=True + self._constructed = True if self._init_rule is not None: _init = self._init_rule(self.parent_block(), ()) @@ -353,4 +374,3 @@ def construct(self, data=None): self.add(cc) timer.report() - diff --git a/pyomo/mpec/plugins/__init__.py b/pyomo/mpec/plugins/__init__.py index e5550c43bec..3317e1ce829 100644 --- a/pyomo/mpec/plugins/__init__.py +++ b/pyomo/mpec/plugins/__init__.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.mpec.plugins.mpec1 import pyomo.mpec.plugins.mpec2 @@ -17,4 +18,3 @@ def load(): import pyomo.mpec.plugins.solver1 import pyomo.mpec.plugins.solver2 import pyomo.mpec.plugins.pathampl - diff --git a/pyomo/mpec/plugins/mpec1.py b/pyomo/mpec/plugins/mpec1.py index f8ab6d54483..ad6905158c7 100644 --- a/pyomo/mpec/plugins/mpec1.py +++ b/pyomo/mpec/plugins/mpec1.py @@ -11,13 +11,15 @@ import logging -from pyomo.core.base import (Transformation, - TransformationFactory, - Constraint, - Block, - Param, - SortComponents, - ComponentUID) +from pyomo.core.base import ( + Transformation, + TransformationFactory, + Constraint, + Block, + Param, + SortComponents, + ComponentUID, +) from pyomo.mpec.complementarity import Complementarity from pyomo.gdp import Disjunct @@ -25,9 +27,9 @@ # -# This transformation reworks each Complementarity block to +# This transformation reworks each Complementarity block to # add a constraint that ensures the complementarity condition. -# Specifically, +# Specifically, # # x1 >= 0 OR x2 >= 0 # @@ -37,10 +39,11 @@ # x2 >= 0 # x1*x2 <= 0 # -@TransformationFactory.register('mpec.simple_nonlinear', doc="Nonlinear transformations of complementarity conditions when all variables are non-negative") +@TransformationFactory.register( + 'mpec.simple_nonlinear', + doc="Nonlinear transformations of complementarity conditions when all variables are non-negative", +) class MPEC1_Transformation(Transformation): - - def __init__(self): super(MPEC1_Transformation, self).__init__() @@ -61,9 +64,12 @@ def _apply_to(self, instance, **kwds): # # Iterate over the model finding Complementarity components # - for complementarity in instance.component_objects(Complementarity, active=True, - descend_into=(Block, Disjunct), - sort=SortComponents.deterministic): + for complementarity in instance.component_objects( + Complementarity, + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ): block = complementarity.parent_block() for index in sorted(complementarity.keys()): _data = complementarity[index] @@ -74,20 +80,31 @@ def _apply_to(self, instance, **kwds): _type = getattr(_data.c, "_complementarity_type", 0) if _type == 1: # - # Constraint expression is bounded below, so we can replace + # Constraint expression is bounded below, so we can replace # constraint c with a constraint that ensures that either # constraint c is active or variable v is at its lower bound. # - _data.ccon = Constraint(expr=(_data.c.body - _data.c.lower)*_data.v <= instance.mpec_bound) + _data.ccon = Constraint( + expr=(_data.c.body - _data.c.lower) * _data.v + <= instance.mpec_bound + ) del _data.c._complementarity_type elif _type == 3: # # Variable v is bounded above and below. We can define # - _data.ccon_l = Constraint(expr=(_data.v - _data.v.bounds[0])*_data.c.body <= instance.mpec_bound) - _data.ccon_u = Constraint(expr=(_data.v - _data.v.bounds[1])*_data.c.body <= instance.mpec_bound) + _data.ccon_l = Constraint( + expr=(_data.v - _data.v.bounds[0]) * _data.c.body + <= instance.mpec_bound + ) + _data.ccon_u = Constraint( + expr=(_data.v - _data.v.bounds[1]) * _data.c.body + <= instance.mpec_bound + ) del _data.c._complementarity_type - elif _type == 2: #pragma:nocover - raise ValueError("to_standard_form does not generate _type 2 expressions") - tdata.compl_cuids.append( ComponentUID(complementarity) ) + elif _type == 2: # pragma:nocover + raise ValueError( + "to_standard_form does not generate _type 2 expressions" + ) + tdata.compl_cuids.append(ComponentUID(complementarity)) block.reclassify_component_type(complementarity, Block) diff --git a/pyomo/mpec/plugins/mpec2.py b/pyomo/mpec/plugins/mpec2.py index 32f402e5f29..d019424ea4b 100644 --- a/pyomo/mpec/plugins/mpec2.py +++ b/pyomo/mpec/plugins/mpec2.py @@ -12,12 +12,14 @@ import logging from pyomo.core.expr import inequality -from pyomo.core.base import (Transformation, - TransformationFactory, - Constraint, - Block, - SortComponents, - ComponentUID) +from pyomo.core.base import ( + Transformation, + TransformationFactory, + Constraint, + Block, + SortComponents, + ComponentUID, +) from pyomo.mpec.complementarity import Complementarity from pyomo.gdp.disjunct import Disjunct, Disjunction @@ -25,10 +27,11 @@ logger = logging.getLogger('pyomo.core') -@TransformationFactory.register('mpec.simple_disjunction', doc="Disjunctive transformations of complementarity conditions when all variables are non-negative") +@TransformationFactory.register( + 'mpec.simple_disjunction', + doc="Disjunctive transformations of complementarity conditions when all variables are non-negative", +) class MPEC2_Transformation(Transformation): - - def __init__(self): super(MPEC2_Transformation, self).__init__() @@ -41,9 +44,12 @@ def _apply_to(self, instance, **kwds): # # Iterate over the model finding Complementarity components # - for complementarity in instance.component_objects(Complementarity, active=True, - descend_into=(Block, Disjunct), - sort=SortComponents.deterministic): + for complementarity in instance.component_objects( + Complementarity, + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ): block = complementarity.parent_block() for index in sorted(complementarity.keys()): @@ -53,11 +59,22 @@ def _apply_to(self, instance, **kwds): # _e1 = _data._canonical_expression(_data._args[0]) _e2 = _data._canonical_expression(_data._args[1]) - if len(_e1)==3 and len(_e2) == 3 and (_e1[0] is None) + (_e1[2] is None) + (_e2[0] is None) + (_e2[2] is None) != 2: - raise RuntimeError("Complementarity condition %s must have exactly two finite bounds" % _data.name) + if ( + len(_e1) == 3 + and len(_e2) == 3 + and (_e1[0] is None) + + (_e1[2] is None) + + (_e2[0] is None) + + (_e2[2] is None) + != 2 + ): + raise RuntimeError( + "Complementarity condition %s must have exactly two finite bounds" + % _data.name + ) if len(_e1) == 3 and _e1[0] is None and _e1[2] is None: # - # Swap _e1 and _e2. The ensures that + # Swap _e1 and _e2. The ensures that # only e2 will be an unconstrained expression # _e1, _e2 = _e2, _e1 @@ -66,17 +83,21 @@ def _apply_to(self, instance, **kwds): _data.c = Constraint(expr=_e1) else: _data.expr1 = Disjunct() - _data.expr1.c0 = Constraint(expr= _e1[0] == _e1[1]) - _data.expr1.c1 = Constraint(expr= _e2[1] >= 0) + _data.expr1.c0 = Constraint(expr=_e1[0] == _e1[1]) + _data.expr1.c1 = Constraint(expr=_e2[1] >= 0) # _data.expr2 = Disjunct() - _data.expr2.c0 = Constraint(expr= _e1[1] == _e1[2]) - _data.expr2.c1 = Constraint(expr= _e2[1] <= 0) + _data.expr2.c0 = Constraint(expr=_e1[1] == _e1[2]) + _data.expr2.c1 = Constraint(expr=_e2[1] <= 0) # _data.expr3 = Disjunct() - _data.expr3.c0 = Constraint(expr= inequality(_e1[0], _e1[1], _e1[2])) - _data.expr3.c1 = Constraint(expr= _e2[1] == 0) - _data.complements = Disjunction(expr=(_data.expr1, _data.expr2, _data.expr3)) + _data.expr3.c0 = Constraint( + expr=inequality(_e1[0], _e1[1], _e1[2]) + ) + _data.expr3.c1 = Constraint(expr=_e2[1] == 0) + _data.complements = Disjunction( + expr=(_data.expr1, _data.expr2, _data.expr3) + ) else: if _e1[0] is None: tmp1 = _e1[2] - _e1[1] @@ -87,13 +108,13 @@ def _apply_to(self, instance, **kwds): else: tmp2 = _e2[1] - _e2[0] _data.expr1 = Disjunct() - _data.expr1.c0 = Constraint(expr= tmp1 >= 0) - _data.expr1.c1 = Constraint(expr= tmp2 == 0) + _data.expr1.c0 = Constraint(expr=tmp1 >= 0) + _data.expr1.c1 = Constraint(expr=tmp2 == 0) # _data.expr2 = Disjunct() - _data.expr2.c0 = Constraint(expr= tmp1 == 0) - _data.expr2.c1 = Constraint(expr= tmp2 >= 0) + _data.expr2.c0 = Constraint(expr=tmp1 == 0) + _data.expr2.c1 = Constraint(expr=tmp2 >= 0) # _data.complements = Disjunction(expr=(_data.expr1, _data.expr2)) - tdata.compl_cuids.append( ComponentUID(complementarity) ) + tdata.compl_cuids.append(ComponentUID(complementarity)) block.reclassify_component_type(complementarity, Block) diff --git a/pyomo/mpec/plugins/mpec3.py b/pyomo/mpec/plugins/mpec3.py index 0f2e3eb0cd6..d681c305a2d 100644 --- a/pyomo/mpec/plugins/mpec3.py +++ b/pyomo/mpec/plugins/mpec3.py @@ -11,10 +11,7 @@ import logging -from pyomo.core.base import (Transformation, - TransformationFactory, - Block, - SortComponents) +from pyomo.core.base import Transformation, TransformationFactory, Block, SortComponents from pyomo.mpec.complementarity import Complementarity from pyomo.gdp import Disjunct @@ -22,13 +19,13 @@ # -# This transformation reworks each Complementarity block to +# This transformation reworks each Complementarity block to # setup a standard form. # -@TransformationFactory.register('mpec.standard_form', doc="Standard reformulation of complementarity condition") +@TransformationFactory.register( + 'mpec.standard_form', doc="Standard reformulation of complementarity condition" +) class MPEC3_Transformation(Transformation): - - def __init__(self): super(MPEC3_Transformation, self).__init__() @@ -36,9 +33,12 @@ def _apply_to(self, instance, **kwds): # # Iterate over the model finding Complementarity components # - for complementarity in instance.component_objects(Complementarity, active=True, - descend_into=(Block, Disjunct), - sort=SortComponents.deterministic): + for complementarity in instance.component_objects( + Complementarity, + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ): block = complementarity.parent_block() for index in sorted(complementarity.keys()): _data = complementarity[index] diff --git a/pyomo/mpec/plugins/mpec4.py b/pyomo/mpec/plugins/mpec4.py index 853cd8f37d3..5b32886711a 100644 --- a/pyomo/mpec/plugins/mpec4.py +++ b/pyomo/mpec/plugins/mpec4.py @@ -11,27 +11,30 @@ import logging -from pyomo.core.base import (Transformation, - TransformationFactory, - Constraint, - Var, - Block, - ComponentUID, - SortComponents, - value) +from pyomo.core.base import ( + Transformation, + TransformationFactory, + Constraint, + Var, + Block, + ComponentUID, + SortComponents, + value, +) from pyomo.mpec.complementarity import Complementarity from pyomo.gdp import Disjunct logger = logging.getLogger('pyomo.core') + # -# This transformation reworks each Complementarity block to +# This transformation reworks each Complementarity block to # create a mixed-complementarity problem that can be written to an NL file. # -@TransformationFactory.register('mpec.nl', doc="Transform a MPEC into a form suitable for the NL writer") +@TransformationFactory.register( + 'mpec.nl', doc="Transform a MPEC into a form suitable for the NL writer" +) class MPEC4_Transformation(Transformation): - - def __init__(self): super(MPEC4_Transformation, self).__init__() @@ -42,18 +45,24 @@ def _apply_to(self, instance, **kwds): free_vars = {} id_list = [] # [ESJ 07/12/2019] Look on the whole model in case instance is a Block or a Disjunct - for vdata in instance.model().component_data_objects(Var, active=True, - sort=SortComponents.deterministic, - descend_into=(Block, Disjunct)): - id_list.append( id(vdata) ) + for vdata in instance.model().component_data_objects( + Var, + active=True, + sort=SortComponents.deterministic, + descend_into=(Block, Disjunct), + ): + id_list.append(id(vdata)) free_vars[id(vdata)] = vdata # # Iterate over the Complementarity components # cobjs = [] - for cobj in instance.component_objects(Complementarity, active=True, - descend_into=(Block, Disjunct), - sort=SortComponents.deterministic): + for cobj in instance.component_objects( + Complementarity, + active=True, + descend_into=(Block, Disjunct), + sort=SortComponents.deterministic, + ): cobjs.append(cobj) for index in sorted(cobj.keys()): _cdata = cobj[index] @@ -67,13 +76,13 @@ def _apply_to(self, instance, **kwds): tdata = instance._transformation_data['mpec.nl'] tdata.compl_cuids = [] for cobj in cobjs: - tdata.compl_cuids.append( ComponentUID(cobj) ) + tdata.compl_cuids.append(ComponentUID(cobj)) cobj.parent_block().reclassify_component_type(cobj, Block) - #instance.pprint() - #self.print_nl_form(instance) + # instance.pprint() + # self.print_nl_form(instance) - def print_nl_form(self, instance): #pragma:nocover + def print_nl_form(self, instance): # pragma:nocover """ Summarize the complementarity relations in this problem. """ @@ -81,9 +90,24 @@ def print_nl_form(self, instance): #pragma:nocover for vdata in instance.component_data_objects(Var, active=True): vmap[id(vdata)] = vdata print("-------------------- Complementary Relations ----------------------") - for bdata in instance.block_data_objects(active=True, sort=SortComponents. deterministic): - for cobj in bdata.component_data_objects(Constraint, active=True, descend_into=False): - print("%s %s\t\t\t%s" % (getattr(cobj, '_complementarity', None), str(cobj. lower)+" < "+str(cobj.body)+" < "+str(cobj.upper) , vmap.get(getattr(cobj, '_vid', None), None))) + for bdata in instance.block_data_objects( + active=True, sort=SortComponents.deterministic + ): + for cobj in bdata.component_data_objects( + Constraint, active=True, descend_into=False + ): + print( + "%s %s\t\t\t%s" + % ( + getattr(cobj, '_complementarity', None), + str(cobj.lower) + + " < " + + str(cobj.body) + + " < " + + str(cobj.upper), + vmap.get(getattr(cobj, '_vid', None), None), + ) + ) print("-------------------- Complementary Relations ----------------------") def to_common_form(self, cdata, free_vars): @@ -92,7 +116,7 @@ def to_common_form(self, cdata, free_vars): """ _e1 = cdata._canonical_expression(cdata._args[0]) _e2 = cdata._canonical_expression(cdata._args[1]) - if False: #pragma:nocover + if False: # pragma:nocover if _e1[0] is None: print(None) else: @@ -125,8 +149,13 @@ def to_common_form(self, cdata, free_vars): if len(_e2) == 2: cdata.c = Constraint(expr=_e2) return - if (_e1[0] is None) + (_e1[2] is None) + (_e2[0] is None) + (_e2[2] is None) != 2: - raise RuntimeError("Complementarity condition %s must have exactly two finite bounds" % cdata.name) + if (_e1[0] is None) + (_e1[2] is None) + (_e2[0] is None) + ( + _e2[2] is None + ) != 2: + raise RuntimeError( + "Complementarity condition %s must have exactly two finite bounds" + % cdata.name + ) # # Swap if the body of the second constraint is not a free variable # @@ -179,4 +208,3 @@ def to_common_form(self, cdata, free_vars): if var.ub is None or value(_e2[2]) > value(var.ub): var.setub(_e2[2]) cdata.c._complementarity += 2 - diff --git a/pyomo/mpec/plugins/pathampl.py b/pyomo/mpec/plugins/pathampl.py index 360636e7fcb..7875251c04b 100644 --- a/pyomo/mpec/plugins/pathampl.py +++ b/pyomo/mpec/plugins/pathampl.py @@ -38,9 +38,11 @@ def __init__(self, **kwds): def _default_executable(self): executable = Executable("pathampl") - if not executable: #pragma:nocover - logger.warning("Could not locate the 'pathampl' executable, " - "which is required for solver %s" % self.name) + if not executable: # pragma:nocover + logger.warning( + "Could not locate the 'pathampl' executable, " + "which is required for solver %s" % self.name + ) self.enable = False return None return executable.path() diff --git a/pyomo/mpec/plugins/solver1.py b/pyomo/mpec/plugins/solver1.py index 19b4de5bf7d..0ac1af85522 100644 --- a/pyomo/mpec/plugins/solver1.py +++ b/pyomo/mpec/plugins/solver1.py @@ -16,13 +16,13 @@ from pyomo.common.collections import Bunch -@SolverFactory.register('mpec_nlp', doc='MPEC solver that optimizes a nonlinear transformation') +@SolverFactory.register( + 'mpec_nlp', doc='MPEC solver that optimizes a nonlinear transformation' +) class MPEC_Solver1(pyomo.opt.OptSolver): - - def __init__(self, **kwds): kwds['type'] = 'mpec_nlp' - pyomo.opt.OptSolver.__init__(self,**kwds) + pyomo.opt.OptSolver.__init__(self, **kwds) self._metasolver = True def _presolve(self, *args, **kwds): @@ -43,7 +43,7 @@ def _apply_solver(self): # Solve with a specified solver # solver = self.options.solver - if not self.options.solver: #pragma:nocover + if not self.options.solver: # pragma:nocover self.options.solver = solver = 'ipopt' # use the with block here so that deactivation of the @@ -53,7 +53,7 @@ def _apply_solver(self): self.results = [] epsilon_final = self.options.get('epsilon_final', 1e-7) epsilon = self.options.get('epsilon_initial', epsilon_final) - while (True): + while True: self._instance.mpec_bound.set_value(epsilon) # # **NOTE: It would be better to override _presolve on the @@ -63,9 +63,9 @@ def _apply_solver(self): # io_options are getting relayed to the subsolver # here). # - res = opt.solve(self._instance, - tee=self._tee, - timelimit=self._timelimit) + res = opt.solve( + self._instance, tee=self._tee, timelimit=self._timelimit + ) self.results.append(res) epsilon /= 10.0 if epsilon < epsilon_final: @@ -74,7 +74,10 @@ def _apply_solver(self): # Reclassify the Complementarity components # from pyomo.mpec import Complementarity - for cuid in self._instance._transformation_data['mpec.simple_nonlinear'].compl_cuids: + + for cuid in self._instance._transformation_data[ + 'mpec.simple_nonlinear' + ].compl_cuids: cobj = cuid.find_component_on(self._instance) cobj.parent_block().reclassify_component_type(cobj, Complementarity) # @@ -85,8 +88,7 @@ def _apply_solver(self): # # Return the sub-solver return condition value and log # - return Bunch(rc=getattr(opt,'_rc', None), - log=getattr(opt,'_log',None)) + return Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None)) def _postsolve(self): # @@ -100,12 +102,12 @@ def _postsolve(self): solv.name = self.options.subsolver solv.wallclock_time = self.wall_time cpu_ = [] - for res in self.results: #pragma:nocover + for res in self.results: # pragma:nocover if not getattr(res.solver, 'cpu_time', None) is None: - cpu_.append( res.solver.cpu_time ) - if len(cpu_) > 0: #pragma:nocover + cpu_.append(res.solver.cpu_time) + if len(cpu_) > 0: # pragma:nocover solv.cpu_time = sum(cpu_) - #solv.termination_condition = pyomo.opt.TerminationCondition.maxIterations + # solv.termination_condition = pyomo.opt.TerminationCondition.maxIterations # # PROBLEM # @@ -114,9 +116,15 @@ def _postsolve(self): prob.name = self._instance.name prob.number_of_constraints = self._instance.statistics.number_of_constraints prob.number_of_variables = self._instance.statistics.number_of_variables - prob.number_of_binary_variables = self._instance.statistics.number_of_binary_variables - prob.number_of_integer_variables = self._instance.statistics.number_of_integer_variables - prob.number_of_continuous_variables = self._instance.statistics.number_of_continuous_variables + prob.number_of_binary_variables = ( + self._instance.statistics.number_of_binary_variables + ) + prob.number_of_integer_variables = ( + self._instance.statistics.number_of_integer_variables + ) + prob.number_of_continuous_variables = ( + self._instance.statistics.number_of_continuous_variables + ) prob.number_of_objectives = self._instance.statistics.number_of_objectives # # SOLUTION(S) diff --git a/pyomo/mpec/plugins/solver2.py b/pyomo/mpec/plugins/solver2.py index 7ec96952c7e..491c8122d2e 100644 --- a/pyomo/mpec/plugins/solver2.py +++ b/pyomo/mpec/plugins/solver2.py @@ -18,11 +18,9 @@ @SolverFactory.register('mpec_minlp', doc='MPEC solver transforms to a MINLP') class MPEC_Solver2(pyomo.opt.OptSolver): - - def __init__(self, **kwds): kwds['type'] = 'mpec_minlp' - pyomo.opt.OptSolver.__init__(self,**kwds) + pyomo.opt.OptSolver.__init__(self, **kwds) self._metasolver = True def _presolve(self, *args, **kwds): @@ -41,12 +39,12 @@ def _apply_solver(self): xfrm.apply_to(self._instance) xfrm = TransformationFactory('gdp.bigm') - xfrm.apply_to(self._instance, bigM=self.options.get('bigM',10**6)) + xfrm.apply_to(self._instance, bigM=self.options.get('bigM', 10**6)) # # Solve with a specified solver # solver = self.options.solver - if not self.options.solver: #pragma:nocover + if not self.options.solver: # pragma:nocover self.options.solver = solver = 'glpk' # use the with block here so that deactivation of the @@ -61,14 +59,17 @@ def _apply_solver(self): # io_options are getting relayed to the subsolver # here). # - self.results = opt.solve(self._instance, - tee=self._tee, - timelimit=self._timelimit) + self.results = opt.solve( + self._instance, tee=self._tee, timelimit=self._timelimit + ) # # Reclassify the Complementarity components # from pyomo.mpec import Complementarity - for cuid in self._instance._transformation_data['mpec.simple_disjunction'].compl_cuids: + + for cuid in self._instance._transformation_data[ + 'mpec.simple_disjunction' + ].compl_cuids: cobj = cuid.find_component_on(self._instance) cobj.parent_block().reclassify_component_type(cobj, Complementarity) # @@ -84,8 +85,7 @@ def _apply_solver(self): # # Return the sub-solver return condition value and log # - return Bunch(rc=getattr(opt,'_rc', None), - log=getattr(opt,'_log',None)) + return Bunch(rc=getattr(opt, '_rc', None), log=getattr(opt, '_log', None)) def _postsolve(self): # @@ -106,4 +106,3 @@ def _postsolve(self): # Return the results # return self.results - diff --git a/pyomo/mpec/tests/t11_nlxfrm.nl b/pyomo/mpec/tests/t11_nlxfrm.nl index 3a3b18a1a93..f8888c4b96f 100644 --- a/pyomo/mpec/tests/t11_nlxfrm.nl +++ b/pyomo/mpec/tests/t11_nlxfrm.nl @@ -1,5 +1,5 @@ g3 1 1 0 # problem unknown - 3 1 0 0 1 # vars, constraints, objectives, ranges, eqns + 3 1 0 0 1 # vars, constraints, objectives, ranges, eqns; KNOWN BUG WITH NLv1 WRTIER!!! 0 0 1 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb 0 0 # network constraints: nonlinear, linear 0 0 0 # nonlinear vars in constraints, objectives, both diff --git a/pyomo/mpec/tests/t11_nlxfrm.nl_v2 b/pyomo/mpec/tests/t11_nlxfrm.nl_v2 new file mode 100644 index 00000000000..8038ffe6d15 --- /dev/null +++ b/pyomo/mpec/tests/t11_nlxfrm.nl_v2 @@ -0,0 +1,31 @@ +g3 1 1 0 # problem unknown + 3 2 0 0 1 # vars, constraints, objectives, ranges, eqns + 0 0 1 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 # network constraints: nonlinear, linear + 0 0 0 # nonlinear vars in constraints, objectives, both + 0 0 0 1 # linear network variables; functions; arith, flags + 0 0 0 0 0 # discrete variables: binary, integer, nonlinear (b,c,o) + 4 0 # nonzeros in Jacobian, obj. gradient + 0 0 # max name lengths: constraints, variables + 0 0 0 0 0 # common exprs: b,c,o,c1,o1 +C0 +n0 +C1 +n0 +x0 +r +5 0 2 +4 2.0 +b +3 +3 +3 +k2 +1 +2 +J0 1 +2 1 +J1 3 +0 1 +1 1 +2 1 diff --git a/pyomo/mpec/tests/t12_nlxfrm.nl b/pyomo/mpec/tests/t12_nlxfrm.nl index 3a3b18a1a93..f8888c4b96f 100644 --- a/pyomo/mpec/tests/t12_nlxfrm.nl +++ b/pyomo/mpec/tests/t12_nlxfrm.nl @@ -1,5 +1,5 @@ g3 1 1 0 # problem unknown - 3 1 0 0 1 # vars, constraints, objectives, ranges, eqns + 3 1 0 0 1 # vars, constraints, objectives, ranges, eqns; KNOWN BUG WITH NLv1 WRTIER!!! 0 0 1 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb 0 0 # network constraints: nonlinear, linear 0 0 0 # nonlinear vars in constraints, objectives, both diff --git a/pyomo/mpec/tests/t12_nlxfrm.nl_v2 b/pyomo/mpec/tests/t12_nlxfrm.nl_v2 new file mode 100644 index 00000000000..8038ffe6d15 --- /dev/null +++ b/pyomo/mpec/tests/t12_nlxfrm.nl_v2 @@ -0,0 +1,31 @@ +g3 1 1 0 # problem unknown + 3 2 0 0 1 # vars, constraints, objectives, ranges, eqns + 0 0 1 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 # network constraints: nonlinear, linear + 0 0 0 # nonlinear vars in constraints, objectives, both + 0 0 0 1 # linear network variables; functions; arith, flags + 0 0 0 0 0 # discrete variables: binary, integer, nonlinear (b,c,o) + 4 0 # nonzeros in Jacobian, obj. gradient + 0 0 # max name lengths: constraints, variables + 0 0 0 0 0 # common exprs: b,c,o,c1,o1 +C0 +n0 +C1 +n0 +x0 +r +5 0 2 +4 2.0 +b +3 +3 +3 +k2 +1 +2 +J0 1 +2 1 +J1 3 +0 1 +1 1 +2 1 diff --git a/pyomo/mpec/tests/t2a_mpec.nl.txt b/pyomo/mpec/tests/t2a_mpec.nl.txt index 15eb740a836..895ace16d86 100644 --- a/pyomo/mpec/tests/t2a_mpec.nl.txt +++ b/pyomo/mpec/tests/t2a_mpec.nl.txt @@ -26,8 +26,8 @@ 3 Constraint Declarations bc : Size=1, Index=None, Active=True - Key : Lower : Body : Upper : Active - None : 0.0 : cc.bv + (y + x2) : 0.0 : True + Key : Lower : Body : Upper : Active + None : 0.0 : y + x2 + cc.bv : 0.0 : True c : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active None : 0.0 : cc.bv : +Inf : True diff --git a/pyomo/mpec/tests/t2a_nlxfrm.nl_v2 b/pyomo/mpec/tests/t2a_nlxfrm.nl_v2 new file mode 100644 index 00000000000..3d4bfb62b7a --- /dev/null +++ b/pyomo/mpec/tests/t2a_nlxfrm.nl_v2 @@ -0,0 +1,42 @@ +g3 1 1 0 # problem unknown + 5 3 0 0 2 # vars, constraints, objectives, ranges, eqns + 0 0 1 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 # network constraints: nonlinear, linear + 0 0 0 # nonlinear vars in constraints, objectives, both + 0 0 0 1 # linear network variables; functions; arith, flags + 0 0 0 0 0 # discrete variables: binary, integer, nonlinear (b,c,o) + 7 0 # nonzeros in Jacobian, obj. gradient + 0 0 # max name lengths: constraints, variables + 0 0 0 0 0 # common exprs: b,c,o,c1,o1 +C0 +n0 +C1 +n0 +C2 +n0 +x0 +r +5 2 5 +4 0.0 +4 0.0 +b +3 +3 +3 +3 +1 -1 +k4 +1 +3 +4 +6 +J0 1 +3 1 +J1 3 +0 1 +1 1 +3 1 +J2 3 +1 -1 +2 1 +4 1 diff --git a/pyomo/mpec/tests/t2b_nlxfrm.nl_v2 b/pyomo/mpec/tests/t2b_nlxfrm.nl_v2 new file mode 100644 index 00000000000..3b16b9b8d00 --- /dev/null +++ b/pyomo/mpec/tests/t2b_nlxfrm.nl_v2 @@ -0,0 +1,42 @@ +g3 1 1 0 # problem unknown + 5 3 0 0 2 # vars, constraints, objectives, ranges, eqns + 0 0 1 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 # network constraints: nonlinear, linear + 0 0 0 # nonlinear vars in constraints, objectives, both + 0 0 0 1 # linear network variables; functions; arith, flags + 0 0 0 0 0 # discrete variables: binary, integer, nonlinear (b,c,o) + 7 0 # nonzeros in Jacobian, obj. gradient + 0 0 # max name lengths: constraints, variables + 0 0 0 0 0 # common exprs: b,c,o,c1,o1 +C0 +n0 +C1 +n0 +C2 +n0 +x0 +r +5 1 5 +4 -1.0 +4 0.0 +b +3 +3 +3 +3 +2 0 +k4 +1 +3 +4 +6 +J0 1 +3 1 +J1 3 +1 1 +2 -1 +3 1 +J2 3 +0 -1 +1 -1 +4 1 diff --git a/pyomo/mpec/tests/test_complementarity.py b/pyomo/mpec/tests/test_complementarity.py index ada1203ac7c..1eb0385c3e5 100644 --- a/pyomo/mpec/tests/test_complementarity.py +++ b/pyomo/mpec/tests/test_complementarity.py @@ -25,18 +25,24 @@ from pyomo.common.tee import capture_output from pyomo.common.tempfiles import TempfileManager from pyomo.core import ( - ConcreteModel, Var, Constraint, TransformationFactory, Objective, - Block, inequality + ConcreteModel, + Var, + Constraint, + TransformationFactory, + Objective, + Block, + inequality, ) from pyomo.gdp import Disjunct, Disjunction from pyomo.mpec import Complementarity, complements, ComplementarityList from pyomo.opt import ProblemFormat -from pyomo.repn.tests.ampl.nl_diff import load_and_compare_nl_baseline +from pyomo.repn.plugins.nl_writer import FileDeterminism +from pyomo.repn.tests.nl_diff import load_and_compare_nl_baseline currdir = this_file_dir() -class CCTests(object): +class CCTests(object): @classmethod def setUpClass(cls): import pyomo.environ @@ -62,8 +68,9 @@ def _test(self, tname, M): with capture_output(ofile): self._print(M) try: - self.assertTrue(cmp(ofile, bfile), - msg="Files %s and %s differ" % (ofile, bfile)) + self.assertTrue( + cmp(ofile, bfile), msg="Files %s and %s differ" % (ofile, bfile) + ) except: with open(ofile, 'r') as f1, open(bfile, 'r') as f2: f1_contents = list(filter(None, f1.read().split())) @@ -74,23 +81,28 @@ def test_t1a(self): # y + x1 >= 0 _|_ x1 + 2*x2 + 3*x3 >= 1 M = self._setup() M.c = Constraint(expr=M.y + M.x3 >= M.x2) - M.cc = Complementarity(expr=complements(M.y + M.x1 >= 0, M.x1 + 2*M.x2 + 3*M.x3 >= 1)) + M.cc = Complementarity( + expr=complements(M.y + M.x1 >= 0, M.x1 + 2 * M.x2 + 3 * M.x3 >= 1) + ) self._test("t1a", M) def test_t1b(self): # Reversing the expressions in test t1a: # x1 + 2*x2 + 3*x3 >= 1 _|_ y + x1 >= 0 M = self._setup() - M.cc = Complementarity(expr=complements(M.x1 + 2*M.x2 + 3*M.x3 >= 1, M.y + M.x1 >= 0)) + M.cc = Complementarity( + expr=complements(M.x1 + 2 * M.x2 + 3 * M.x3 >= 1, M.y + M.x1 >= 0) + ) self._test("t1b", M) def test_t1c(self): # y >= - x1 _|_ x1 + 2*x2 >= 1 - 3*x3 M = self._setup() - M.cc = Complementarity(expr=complements(M.y >= - M.x1, M.x1 + 2*M.x2 >= 1 - 3*M.x3)) + M.cc = Complementarity( + expr=complements(M.y >= -M.x1, M.x1 + 2 * M.x2 >= 1 - 3 * M.x3) + ) self._test("t1c", M) - def test_t2a(self): # y + x2 >= 0 _|_ x2 - x3 <= -1 M = self._setup() @@ -104,7 +116,6 @@ def test_t2b(self): M.cc = Complementarity(expr=complements(M.x2 - M.x3 <= -1, M.y + M.x2 >= 0)) self._test("t2b", M) - def test_t3a(self): # y + x3 >= 0 _|_ x1 + x2 >= -1 M = self._setup() @@ -118,37 +129,43 @@ def test_t3b(self): M.cc = Complementarity(expr=complements(M.x1 + M.x2 >= -1, M.y + M.x3 >= 0)) self._test("t3b", M) - def test_t4a(self): # x1 + 2*x2 + 3*x3 = 1 _|_ y + x3 M = self._setup() - M.cc = Complementarity(expr=complements(M.x1 + 2*M.x2 + 3*M.x3 == 1, M.y + M.x3)) + M.cc = Complementarity( + expr=complements(M.x1 + 2 * M.x2 + 3 * M.x3 == 1, M.y + M.x3) + ) self._test("t4a", M) def test_t4b(self): # Reversing the expressions in test t7b: # y + x3 _|_ x1 + 2*x2 + 3*x3 = 1 M = self._setup() - M.cc = Complementarity(expr=complements(M.y + M.x3, M.x1 + 2*M.x2 + 3*M.x3 == 1)) + M.cc = Complementarity( + expr=complements(M.y + M.x3, M.x1 + 2 * M.x2 + 3 * M.x3 == 1) + ) self._test("t4b", M) def test_t4c(self): # 1 = x1 + 2*x2 + 3*x3 _|_ y + x3 M = self._setup() - M.cc = Complementarity(expr=complements(1 == M.x1 + 2*M.x2 + 3*M.x3, M.y + M.x3)) + M.cc = Complementarity( + expr=complements(1 == M.x1 + 2 * M.x2 + 3 * M.x3, M.y + M.x3) + ) self._test("t4c", M) def test_t4d(self): # x1 + 2*x2 == 1 - 3*x3 _|_ y + x3 M = self._setup() - M.cc = Complementarity(expr=complements(M.x1 + 2*M.x2 == 1 - 3*M.x3, M.y + M.x3)) + M.cc = Complementarity( + expr=complements(M.x1 + 2 * M.x2 == 1 - 3 * M.x3, M.y + M.x3) + ) self._test("t4d", M) - def test_t9(self): # Testing that we can skip deactivated complementarity conditions M = self._setup() - M.cc = Complementarity(expr=complements(M.y + M.x3, M.x1 + 2*M.x2 == 1)) + M.cc = Complementarity(expr=complements(M.y + M.x3, M.x1 + 2 * M.x2 == 1)) M.cc.deactivate() # AMPL needs at least one variable in the problem therefore # we need to have a constraint that keeps them around @@ -158,9 +175,11 @@ def test_t9(self): def test_t10(self): # Testing that we can skip an array of deactivated complementarity conditions M = self._setup() + def f(model, i): - return complements(M.y + M.x3, M.x1 + 2*M.x2 == i) - M.cc = Complementarity([0,1,2], rule=f) + return complements(M.y + M.x3, M.x1 + 2 * M.x2 == i) + + M.cc = Complementarity([0, 1, 2], rule=f) M.cc[1].deactivate() self._test("t10", M) @@ -179,47 +198,55 @@ def test_t12(self): def test_t13(self): # Testing that we can skip an array of deactivated complementarity conditions M = self._setup() + def f(model, i): if i == 0: - return complements(M.y + M.x3, M.x1 + 2*M.x2 == 0) + return complements(M.y + M.x3, M.x1 + 2 * M.x2 == 0) if i == 1: return Complementarity.Skip if i == 2: - return complements(M.y + M.x3, M.x1 + 2*M.x2 == 2) - M.cc = Complementarity([0,1,2], rule=f) + return complements(M.y + M.x3, M.x1 + 2 * M.x2 == 2) + + M.cc = Complementarity([0, 1, 2], rule=f) self._test("t13", M) def test_cov2(self): # Testing warning for no rule""" M = self._setup() - M.cc = Complementarity([0,1,2]) + M.cc = Complementarity([0, 1, 2]) # AMPL needs at least one variable in the problem therefore # we need to have a constraint that keeps them around - M.keep_var_con = Constraint(expr=M.x1 == 0.5) + M.keep_var_con = Constraint(expr=M.x1 == 0.5) self._test("cov2", M) def test_cov4(self): # Testing construction with no indexing and a rule M = self._setup() + def f(model): - return complements(M.y + M.x3, M.x1 + 2*M.x2 == 1) + return complements(M.y + M.x3, M.x1 + 2 * M.x2 == 1) + M.cc = Complementarity(rule=f) self._test("cov4", M) def test_cov5(self): # Testing construction with rules that generate an exception M = self._setup() + def f(model): raise IOError("cov5 error") + try: M.cc1 = Complementarity(rule=f) self.fail("Expected an IOError") except IOError: pass + def f(model, i): raise IOError("cov5 error") + try: - M.cc2 = Complementarity([0,1], rule=f) + M.cc2 = Complementarity([0, 1], rule=f) self.fail("Expected an IOError") except IOError: pass @@ -227,29 +254,34 @@ def f(model, i): def test_cov6(self): # Testing construction with indexing and an expression M = self._setup() - with self.assertRaisesRegex( - ValueError, "Invalid tuple for Complementarity"): - M.cc = Complementarity([0,1], expr=()) + with self.assertRaisesRegex(ValueError, "Invalid tuple for Complementarity"): + M.cc = Complementarity([0, 1], expr=()) def test_cov7(self): # Testing error checking with return value M = self._setup() + def f(model): return () + try: M.cc = Complementarity(rule=f) self.fail("Expected ValueError") except ValueError: pass + def f(model): return + try: M.cc = Complementarity(rule=f) self.fail("Expected ValueError") except ValueError: pass + def f(model): return {} + try: M.cc = Complementarity(rule=f) self.fail("Expected ValueError") @@ -259,16 +291,20 @@ def f(model): def test_cov8(self): # Testing construction with a list M = self._setup() + def f(model): - return [M.y + M.x3, M.x1 + 2*M.x2 == 1] + return [M.y + M.x3, M.x1 + 2 * M.x2 == 1] + M.cc = Complementarity(rule=f) self._test("cov8", M) def test_cov9(self): # Testing construction with a tuple M = self._setup() + def f(model): - return (M.y + M.x3, M.x1 + 2*M.x2 == 1) + return (M.y + M.x3, M.x1 + 2 * M.x2 == 1) + M.cc = Complementarity(rule=f) self._test("cov8", M) @@ -295,44 +331,47 @@ def test_cov11(self): def test_list1(self): M = self._setup() M.cc = ComplementarityList() - M.cc.add( complements(M.y + M.x3, M.x1 + 2*M.x2 == 0) ) - M.cc.add( complements(M.y + M.x3, M.x1 + 2*M.x2 == 2) ) + M.cc.add(complements(M.y + M.x3, M.x1 + 2 * M.x2 == 0)) + M.cc.add(complements(M.y + M.x3, M.x1 + 2 * M.x2 == 2)) self._test("list1", M) def test_list2(self): M = self._setup() M.cc = ComplementarityList() - M.cc.add( complements(M.y + M.x3, M.x1 + 2*M.x2 == 0) ) - M.cc.add( complements(M.y + M.x3, M.x1 + 2*M.x2 == 1) ) - M.cc.add( complements(M.y + M.x3, M.x1 + 2*M.x2 == 2) ) + M.cc.add(complements(M.y + M.x3, M.x1 + 2 * M.x2 == 0)) + M.cc.add(complements(M.y + M.x3, M.x1 + 2 * M.x2 == 1)) + M.cc.add(complements(M.y + M.x3, M.x1 + 2 * M.x2 == 2)) M.cc[2].deactivate() self._test("list2", M) def test_list3(self): M = self._setup() + def f(M, i): if i == 1: - return complements(M.y + M.x3, M.x1 + 2*M.x2 == 0) + return complements(M.y + M.x3, M.x1 + 2 * M.x2 == 0) elif i == 2: - return complements(M.y + M.x3, M.x1 + 2*M.x2 == 2) + return complements(M.y + M.x3, M.x1 + 2 * M.x2 == 2) return ComplementarityList.End + M.cc = ComplementarityList(rule=f) self._test("list1", M) def test_list4(self): M = self._setup() + def f(M): - yield complements(M.y + M.x3, M.x1 + 2*M.x2 == 0) - yield complements(M.y + M.x3, M.x1 + 2*M.x2 == 2) + yield complements(M.y + M.x3, M.x1 + 2 * M.x2 == 0) + yield complements(M.y + M.x3, M.x1 + 2 * M.x2 == 2) yield ComplementarityList.End + M.cc = ComplementarityList(rule=f) self._test("list1", M) def test_list5(self): M = self._setup() M.cc = ComplementarityList( - rule=( complements(M.y + M.x3, M.x1 + 2*M.x2 == i) - for i in range(3) ) + rule=(complements(M.y + M.x3, M.x1 + 2 * M.x2 == i) for i in range(3)) ) self._test("list5", M) @@ -346,16 +385,20 @@ def test_list6(self): def test_list7(self): M = self._setup() + def f(M): return None + try: M.cc = ComplementarityList(rule=f) self.fail("Expected a ValueError") except: pass M = self._setup() + def f(M): yield None + try: M.cc = ComplementarityList(rule=f) self.fail("Expected a ValueError") @@ -364,12 +407,10 @@ def f(M): class CCTests_none(CCTests, unittest.TestCase): - xfrm = None class CCTests_nl(CCTests, unittest.TestCase): - xfrm = 'mpec.nl' def _print(self, model): @@ -377,30 +418,41 @@ def _print(self, model): class CCTests_standard_form(CCTests, unittest.TestCase): - xfrm = 'mpec.standard_form' class CCTests_simple_nonlinear(CCTests, unittest.TestCase): - xfrm = 'mpec.simple_nonlinear' class CCTests_simple_disjunction(CCTests, unittest.TestCase): - xfrm = 'mpec.simple_disjunction' -class CCTests_nl_nlxfrm(CCTests, unittest.TestCase): - +class CCTests_nl_nlxfrm(CCTests): def _test(self, tname, M): bfile = os.path.join(currdir, tname + '_nlxfrm.nl') xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(M) + fd = FileDeterminism.SORT_INDICES if self._nl_version == 'nl_v2' else 1 with TempfileManager: ofile = TempfileManager.create_tempfile(suffix='_nlxfrm.out') - M.write(ofile, format=ProblemFormat.nl) - self.assertEqual(*load_and_compare_nl_baseline(bfile, ofile)) + M.write( + ofile, + format=self._nl_version, + io_options={'symbolic_solver_labels': False, 'file_determinism': fd}, + ) + self.assertEqual( + *load_and_compare_nl_baseline(bfile, ofile, self._nl_version) + ) + + +class CCTests_nl_nlxfrm_nlv1(CCTests_nl_nlxfrm, unittest.TestCase): + _nl_version = 'nl_v1' + + +class CCTests_nl_nlxfrm_nlv2(CCTests_nl_nlxfrm, unittest.TestCase): + _nl_version = 'nl_v2' class DescendIntoDisjunct(unittest.TestCase): @@ -411,7 +463,7 @@ def get_model(self): m.obj = Objective(expr=m.x) m.disjunct1 = Disjunct() - m.disjunct1.comp = Complementarity(expr=complements(m.x >= 0, 4*m.x - 3 >= 0)) + m.disjunct1.comp = Complementarity(expr=complements(m.x >= 0, 4 * m.x - 3 >= 0)) m.disjunct2 = Disjunct() m.disjunct2.cons = Constraint(expr=m.x >= 2) @@ -436,7 +488,7 @@ def test_simple_disjunction_on_disjunct(self): m = self.get_model() TransformationFactory('mpec.simple_disjunction').apply_to(m.disjunct1) self.check_simple_disjunction(m) - + def check_simple_nonlinear(self, m): # check that we have what we expect on disjunct1 compBlock = m.disjunct1.component('comp') @@ -453,8 +505,6 @@ def test_simple_nonlinear_descend_into_disjunct(self): def test_simple_nonlinear_on_disjunct(self): m = self.get_model() - TransformationFactory('mpec.simple_nonlinear').apply_to(m.disjunct1) - self.check_simple_nonlinear(m) def check_standard_form(self, m): # check that we have what we expect on disjunct1 @@ -491,5 +541,6 @@ def test_nl_on_disjunct(self): TransformationFactory('mpec.nl').apply_to(m.disjunct1) self.check_nl(m) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/mpec/tests/test_minlp.py b/pyomo/mpec/tests/test_minlp.py index b6834a344aa..367a57b817e 100644 --- a/pyomo/mpec/tests/test_minlp.py +++ b/pyomo/mpec/tests/test_minlp.py @@ -15,8 +15,9 @@ import os from os.path import abspath, dirname, normpath, join + currdir = dirname(abspath(__file__)) -exdir = normpath(join(currdir,'..','..','..','examples','mpec')) +exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'mpec')) import pyomo.common.unittest as unittest @@ -27,17 +28,17 @@ solvers = pyomo.opt.check_available_solvers('cplex', 'glpk') -class CommonTests: +class CommonTests: solve = True - solver='mpec_minlp' + solver = 'mpec_minlp' def run_solver(self, *_args, **kwds): if self.solve: args = ['solve'] if 'solver' in kwds: - _solver = kwds.get('solver','glpk') - args.append('--solver='+self.solver) + _solver = kwds.get('solver', 'glpk') + args.append('--solver=' + self.solver) args.append('--solver-options="solver=%s"' % _solver) args.append('--save-results=result.yml') args.append('--results-format=yaml') @@ -47,12 +48,12 @@ def run_solver(self, *_args, **kwds): # These were being ignored by the solvers for this package, # which now causes a helpful error message. - #args.append('--symbolic-solver-labels') - #args.append('--file-determinism=2') + # args.append('--symbolic-solver-labels') + # args.append('--file-determinism=2') if False: args.append('--stream-solver') - args.append('--tempdir='+currdir) + args.append('--tempdir=' + currdir) args.append('--keepfiles') args.append('--logging=debug') @@ -60,7 +61,7 @@ def run_solver(self, *_args, **kwds): os.chdir(currdir) print('***') - #print(' '.join(args)) + # print(' '.join(args)) try: output = pyomo_main.main(args) except SystemExit: @@ -73,10 +74,10 @@ def run_solver(self, *_args, **kwds): return output def referenceFile(self, problem, solver): - return join(currdir, problem+'.txt') + return join(currdir, problem + '.txt') def getObjective(self, fname): - FILE = open(fname,'r') + FILE = open(fname, 'r') data = yaml.load(FILE, **yaml_load_args) FILE.close() solutions = data.get('Solution', []) @@ -88,37 +89,38 @@ def getObjective(self, fname): def updateDocStrings(self): for key in dir(self): if key.startswith('test'): - getattr(self,key).__doc__ = " (%s)" % getattr(self,key).__name__ + getattr(self, key).__doc__ = " (%s)" % getattr(self, key).__name__ def test_linear1(self): - self.problem='test_linear1' - self.run_solver( join(exdir,'linear1.py') ) - self.check( 'linear1', self.solver ) + self.problem = 'test_linear1' + self.run_solver(join(exdir, 'linear1.py')) + self.check('linear1', self.solver) def test_scholtes4(self): - self.problem='test_scholtes4' - self.run_solver( join(exdir,'scholtes4.py') ) - self.check( 'scholtes4', self.solver ) + self.problem = 'test_scholtes4' + self.run_solver(join(exdir, 'scholtes4.py')) + self.check('scholtes4', self.solver) def check(self, problem, solver): - refObj = self.getObjective(self.referenceFile(problem,solver)) - ansObj = self.getObjective(join(currdir,'result.yml')) + refObj = self.getObjective(self.referenceFile(problem, solver)) + ansObj = self.getObjective(join(currdir, 'result.yml')) self.assertEqual(len(refObj), len(ansObj)) for i in range(len(refObj)): self.assertEqual(len(refObj[i]), len(ansObj[i])) - for key,val in refObj[i].items(): - self.assertAlmostEqual(val['Value'], ansObj[i].get(key,None)['Value'], places=3) + for key, val in refObj[i].items(): + self.assertAlmostEqual( + val['Value'], ansObj[i].get(key, None)['Value'], places=3 + ) @unittest.skipIf(not yaml_available, "YAML is not available") @unittest.skipIf(not 'glpk' in solvers, "The 'glpk' executable is not available") class Solve_GLPK(unittest.TestCase, CommonTests): - def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) - def run_solver(self, *args, **kwds): + def run_solver(self, *args, **kwds): kwds['solver'] = 'glpk' CommonTests.run_solver(self, *args, **kwds) @@ -126,12 +128,11 @@ def run_solver(self, *args, **kwds): @unittest.skipIf(not yaml_available, "YAML is not available") @unittest.skipIf(not 'cplex' in solvers, "The 'cplex' executable is not available") class Solve_CPLEX(unittest.TestCase, CommonTests): - def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) - def run_solver(self, *args, **kwds): + def run_solver(self, *args, **kwds): kwds['solver'] = 'cplex' CommonTests.run_solver(self, *args, **kwds) diff --git a/pyomo/mpec/tests/test_nlp.py b/pyomo/mpec/tests/test_nlp.py index a3598dbe1b7..be5234136a1 100644 --- a/pyomo/mpec/tests/test_nlp.py +++ b/pyomo/mpec/tests/test_nlp.py @@ -15,6 +15,7 @@ import os from os.path import abspath, dirname, normpath, join + currdir = dirname(abspath(__file__)) exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'mpec')) @@ -29,16 +30,15 @@ class CommonTests: - solve = True - solver='mpec_nlp' + solver = 'mpec_nlp' def run_solver(self, *_args, **kwds): if self.solve: args = ['solve'] if 'solver' in kwds: - _solver = kwds.get('solver','glpk') - args.append('--solver='+self.solver) + _solver = kwds.get('solver', 'glpk') + args.append('--solver=' + self.solver) args.append('--solver-options="solver=%s"' % _solver) args.append('--save-results=result.yml') args.append('--results-format=yaml') @@ -48,12 +48,12 @@ def run_solver(self, *_args, **kwds): # These were being ignored by the solvers for this package, # which now causes a helpful error message. - #args.append('--symbolic-solver-labels') - #args.append('--file-determinism=2') + # args.append('--symbolic-solver-labels') + # args.append('--file-determinism=2') if False: args.append('--stream-solver') - args.append('--tempdir='+currdir) + args.append('--tempdir=' + currdir) args.append('--keepfiles') args.append('--logging=debug') @@ -61,7 +61,7 @@ def run_solver(self, *_args, **kwds): os.chdir(currdir) print('***') - #print(' '.join(args)) + # print(' '.join(args)) try: output = pyomo_main.main(args) except SystemExit: @@ -74,10 +74,10 @@ def run_solver(self, *_args, **kwds): return output def referenceFile(self, problem, solver): - return join(currdir, problem+'.txt') + return join(currdir, problem + '.txt') def getObjective(self, fname): - FILE = open(fname,'r') + FILE = open(fname, 'r') data = yaml.load(FILE, **yaml_load_args) FILE.close() solutions = data.get('Solution', []) @@ -89,42 +89,43 @@ def getObjective(self, fname): def updateDocStrings(self): for key in dir(self): if key.startswith('test'): - getattr(self,key).__doc__ = " (%s)" % getattr(self,key).__name__ + getattr(self, key).__doc__ = " (%s)" % getattr(self, key).__name__ def test_linear1(self): - self.problem='test_linear1' - self.run_solver( join(exdir,'linear1.py') ) - self.check( 'linear1', self.solver ) + self.problem = 'test_linear1' + self.run_solver(join(exdir, 'linear1.py')) + self.check('linear1', self.solver) def test_bard1(self): - self.problem='test_bard1' - self.run_solver( join(exdir,'bard1.py') ) - self.check( 'bard1', self.solver ) + self.problem = 'test_bard1' + self.run_solver(join(exdir, 'bard1.py')) + self.check('bard1', self.solver) def test_scholtes4(self): - self.problem='test_scholtes4' - self.run_solver( join(exdir,'scholtes4.py') ) - self.check( 'scholtes4', self.solver ) + self.problem = 'test_scholtes4' + self.run_solver(join(exdir, 'scholtes4.py')) + self.check('scholtes4', self.solver) def check(self, problem, solver): - refObj = self.getObjective(self.referenceFile(problem,solver)) - ansObj = self.getObjective(join(currdir,'result.yml')) + refObj = self.getObjective(self.referenceFile(problem, solver)) + ansObj = self.getObjective(join(currdir, 'result.yml')) self.assertEqual(len(refObj), len(ansObj)) for i in range(len(refObj)): self.assertEqual(len(refObj[i]), len(ansObj[i])) - for key,val in refObj[i].items(): - self.assertAlmostEqual(val['Value'], ansObj[i].get(key,None)['Value'], places=2) + for key, val in refObj[i].items(): + self.assertAlmostEqual( + val['Value'], ansObj[i].get(key, None)['Value'], places=2 + ) @unittest.skipIf(not yaml_available, "YAML is not available") @unittest.skipIf(not 'ipopt' in solvers, "The 'ipopt' executable is not available") class Solve_IPOPT(unittest.TestCase, CommonTests): - def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) - def run_solver(self, *args, **kwds): + def run_solver(self, *args, **kwds): kwds['solver'] = 'ipopt' CommonTests.run_solver(self, *args, **kwds) diff --git a/pyomo/mpec/tests/test_path.py b/pyomo/mpec/tests/test_path.py index 8709597cf25..5dd7178acf5 100644 --- a/pyomo/mpec/tests/test_path.py +++ b/pyomo/mpec/tests/test_path.py @@ -31,15 +31,15 @@ solvers = pyomo.opt.check_available_solvers('path') -class CommonTests: +class CommonTests: solve = True - solver='path' + solver = 'path' def run_solver(self, *_args, **kwds): if self.solve: args = ['solve'] - args.append('--solver='+self.solver) + args.append('--solver=' + self.solver) args.append('--save-results=result.yml') args.append('--results-format=yaml') args.append('--solver-options="lemke_start=automatic output_options=yes"') @@ -51,7 +51,7 @@ def run_solver(self, *_args, **kwds): if False: args.append('--stream-solver') - args.append('--tempdir='+currdir) + args.append('--tempdir=' + currdir) args.append('--keepfiles') args.append('--logging=debug') @@ -72,10 +72,10 @@ def run_solver(self, *_args, **kwds): return output def referenceFile(self, problem, solver): - return os.path.join(currdir, problem+'.txt') + return os.path.join(currdir, problem + '.txt') def getObjective(self, fname): - FILE = open(fname,'r') + FILE = open(fname, 'r') data = yaml.load(FILE, **yaml_load_args) FILE.close() solutions = data.get('Solution', []) @@ -87,47 +87,48 @@ def getObjective(self, fname): def updateDocStrings(self): for key in dir(self): if key.startswith('test'): - getattr(self,key).__doc__ = " (%s)" % getattr(self,key).__name__ + getattr(self, key).__doc__ = " (%s)" % getattr(self, key).__name__ def test_munson1a(self): - self.problem='test_munson1a' - self.run_solver(os.path.join(exdir,'munson1a.py')) + self.problem = 'test_munson1a' + self.run_solver(os.path.join(exdir, 'munson1a.py')) self.check('munson1a', self.solver) def test_munson1b(self): - self.problem='test_munson1b' - self.run_solver(os.path.join(exdir,'munson1b.py')) + self.problem = 'test_munson1b' + self.run_solver(os.path.join(exdir, 'munson1b.py')) self.check('munson1b', self.solver) def test_munson1c(self): - self.problem='test_munson1c' - self.run_solver(os.path.join(exdir,'munson1c.py')) + self.problem = 'test_munson1c' + self.run_solver(os.path.join(exdir, 'munson1c.py')) self.check('munson1c', self.solver) def test_munson1d(self): - self.problem='test_munson1d' - self.run_solver(os.path.join(exdir,'munson1d.py')) + self.problem = 'test_munson1d' + self.run_solver(os.path.join(exdir, 'munson1d.py')) self.check('munson1d', self.solver) def check(self, problem, solver): - refObj = self.getObjective(self.referenceFile(problem,solver)) - ansObj = self.getObjective(os.path.join(currdir,'result.yml')) + refObj = self.getObjective(self.referenceFile(problem, solver)) + ansObj = self.getObjective(os.path.join(currdir, 'result.yml')) self.assertEqual(len(refObj), len(ansObj)) for i in range(len(refObj)): self.assertEqual(len(refObj[i]), len(ansObj[i])) if isinstance(refObj[i], str): continue - for key,val in refObj[i].items(): - self.assertAlmostEqual(val['Value'], ansObj[i].get(key,None)['Value'], places=2) + for key, val in refObj[i].items(): + self.assertAlmostEqual( + val['Value'], ansObj[i].get(key, None)['Value'], places=2 + ) @unittest.skipIf(not yaml_available, "YAML is not available") @unittest.skipIf(not 'path' in solvers, "The 'path' executable is not available") class Solve_PATH(unittest.TestCase, CommonTests): - def tearDown(self): - if os.path.exists(os.path.join(currdir,'result.yml')): - os.remove(os.path.join(currdir,'result.yml')) + if os.path.exists(os.path.join(currdir, 'result.yml')): + os.remove(os.path.join(currdir, 'result.yml')) if __name__ == "__main__": diff --git a/pyomo/neos/__init__.py b/pyomo/neos/__init__.py index b5cc6771a74..73ac0c51216 100644 --- a/pyomo/neos/__init__.py +++ b/pyomo/neos/__init__.py @@ -14,27 +14,26 @@ # Static documentation for NEOS solvers doc = { - 'bonmin': 'Heuristic MINLP solver', - 'cbc': 'MILP solver', - 'conopt': 'Feasible path NLP solver', - 'couenne': 'Deterministic global MINLP solver', - 'cplex': 'MILP solver', - 'filmint': 'Heuristic MINLP solver', - 'filter': 'SQP NLP solver', - 'ipopt': 'Interior point NLP solver', - 'knitro': 'Convex MINLP solver', + 'bonmin': 'Heuristic MINLP solver', + 'cbc': 'MILP solver', + 'conopt': 'Feasible path NLP solver', + 'couenne': 'Deterministic global MINLP solver', + 'cplex': 'MILP solver', + 'filmint': 'Heuristic MINLP solver', + 'filter': 'SQP NLP solver', + 'ipopt': 'Interior point NLP solver', + 'knitro': 'Convex MINLP solver', 'l-bfgs-b': 'Bound-constrained NLP solver', 'lancelot': 'Augmented Lagrangian NLP solver', - 'loqo': 'Interior point NLP solver', - 'minlp': 'Heuristic MINLP solver', - 'minos': 'SLC NLP solver', - 'minto': 'MILP solver', - 'mosek': 'Interior point NLP solver', + 'loqo': 'Interior point NLP solver', + 'minlp': 'Heuristic MINLP solver', + 'minos': 'SLC NLP solver', + 'minto': 'MILP solver', + 'mosek': 'Interior point NLP solver', 'octeract': 'Deterministic global MINLP solver', - 'ooqp': 'Convex QP solver', - 'path': 'Nonlinear MCP solver', - 'snopt': 'SQP NLP solver', - 'raposa': 'A Global Solver for Polynomial Programming Problems', - 'lgo': 'Lipschitz-Continuous Global Optimizer', + 'ooqp': 'Convex QP solver', + 'path': 'Nonlinear MCP solver', + 'snopt': 'SQP NLP solver', + 'raposa': 'A Global Solver for Polynomial Programming Problems', + 'lgo': 'Lipschitz-Continuous Global Optimizer', } - diff --git a/pyomo/neos/kestrel.py b/pyomo/neos/kestrel.py index 94335febe18..44734294eb4 100644 --- a/pyomo/neos/kestrel.py +++ b/pyomo/neos/kestrel.py @@ -34,18 +34,21 @@ _email_re = re.compile(r'([^@]+@[^@]+\.[a-zA-Z0-9]+)$') + class NEOS(object): # NEOS currently only supports HTTPS access scheme = 'https' host = 'neos-server.org' port = '3333' # Legacy NEOS HTTP interface - #urlscheme = 'http' - #port = '3332' + # urlscheme = 'http' + # port = '3332' + def ProxiedTransport(): from urllib.parse import urlparse import http.client as httplib + # ProxiedTransport from Python 3.x documentation # (https://docs.python.org/3/library/xmlrpc.client.html) class ProxiedTransport_PY3(xmlrpclib.Transport): @@ -53,7 +56,7 @@ def set_proxy(self, host): self.proxy = urlparse(host) if not self.proxy.hostname: # User omitted scheme from the proxy; assume http - self.proxy = urlparse('http://'+host) + self.proxy = urlparse('http://' + host) def make_connection(self, host): scheme = urlparse(host).scheme @@ -77,7 +80,6 @@ def make_connection(self, host): class kestrelAMPL(object): - def __init__(self): self.setup_connection() @@ -97,16 +99,13 @@ def __del__(self): def setup_connection(self): import http.client + # on *NIX, the proxy can show up either upper or lowercase. # Prefer lower case, and prefer HTTPS over HTTP if the # NEOS.scheme is https. - proxy = os.environ.get( - 'http_proxy', os.environ.get( - 'HTTP_PROXY', '')) + proxy = os.environ.get('http_proxy', os.environ.get('HTTP_PROXY', '')) if NEOS.scheme == 'https': - proxy = os.environ.get( - 'https_proxy', os.environ.get( - 'HTTPS_PROXY', proxy)) + proxy = os.environ.get('https_proxy', os.environ.get('HTTPS_PROXY', proxy)) if proxy: self.transport = ProxiedTransport() self.transport.set_proxy(proxy) @@ -116,25 +115,24 @@ def setup_connection(self): self.transport = xmlrpclib.Transport() self.neos = xmlrpclib.ServerProxy( - "%s://%s:%s" % (NEOS.scheme, NEOS.host, NEOS.port), - transport=self.transport) + "%s://%s:%s" % (NEOS.scheme, NEOS.host, NEOS.port), transport=self.transport + ) logger.info("Connecting to the NEOS server ... ") try: result = self.neos.ping() logger.info("OK.") - except (socket.error, xmlrpclib.ProtocolError, - http.client.BadStatusLine): + except (socket.error, xmlrpclib.ProtocolError, http.client.BadStatusLine): e = sys.exc_info()[1] self.neos = None logger.info("Fail: %s" % (e,)) logger.warning("NEOS is temporarily unavailable:\n\t(%s)" % (e,)) def tempfile(self): - return os.path.join(tempfile.gettempdir(),'at%s.jobs' % os.getenv('ampl_id')) + return os.path.join(tempfile.gettempdir(), 'at%s.jobs' % os.getenv('ampl_id')) - def kill(self,jobnumber,password): - response = self.neos.killJob(jobNumber,password) + def kill(self, jobNumber, password): + response = self.neos.killJob(jobNumber, password) logger.info(response) def solvers(self): @@ -149,16 +147,16 @@ def solvers(self): attempt += 1 return [] - def retrieve(self,stub,jobNumber,password): + def retrieve(self, stub, jobNumber, password): # NEOS should return results as uu-encoded xmlrpclib.Binary data - results = self.neos.getFinalResults(jobNumber,password) - if isinstance(results,xmlrpclib.Binary): + results = self.neos.getFinalResults(jobNumber, password) + if isinstance(results, xmlrpclib.Binary): results = results.data # decode results to kestrel.sol; well try to anyway, any errors # will result in error strings in .sol file instead of solution. if stub[-4:] == '.sol': stub = stub[:-4] - solfile = open(stub + ".sol","wb") + solfile = open(stub + ".sol", "wb") solfile.write(results) solfile.close() @@ -168,18 +166,17 @@ def submit(self, xml): # sudo. We include USERNAME to cover Windows, where LOGNAME and # USER may not be defined. user = self.getEmailAddress() - (jobNumber,password) = self.neos.submitJob(xml,user,"kestrel") + (jobNumber, password) = self.neos.submitJob(xml, user, "kestrel") if jobNumber == 0: raise RuntimeError("%s\n\tJob not submitted" % (password,)) - logger.info("Job %d submitted to NEOS, password='%s'\n" % - (jobNumber,password)) + logger.info("Job %d submitted to NEOS, password='%s'\n" % (jobNumber, password)) logger.info("Check the following URL for progress report :\n") logger.info( "%s://www.neos-server.org/neos/cgi-bin/nph-neos-solver.cgi" - "?admin=results&jobnumber=%d&pass=%s\n" - % (NEOS.scheme, jobNumber,password)) - return (jobNumber,password) + "?admin=results&jobnumber=%d&pass=%s\n" % (NEOS.scheme, jobNumber, password) + ) + return (jobNumber, password) def getEmailAddress(self): # Note: the NEOS email address parser is more restrictive than @@ -190,15 +187,16 @@ def getEmailAddress(self): raise RuntimeError( "NEOS requires a valid email address. " - "Please set the 'NEOS_EMAIL' environment variable.") + "Please set the 'NEOS_EMAIL' environment variable." + ) def getJobAndPassword(self): """ If kestrel_options is set to job/password, then return the job and password values """ - jobNumber=0 - password="" + jobNumber = 0 + password = "" options = os.getenv("kestrel_options") if options is not None: m = re.search(r'job\s*=\s*(\d+)', options, re.IGNORECASE) @@ -207,7 +205,7 @@ def getJobAndPassword(self): m = re.search(r'password\s*=\s*(\S+)', options, re.IGNORECASE) if m: password = m.groups()[0] - return (jobNumber,password) + return (jobNumber, password) def getSolverName(self): """ @@ -232,50 +230,52 @@ def getSolverName(self): # if self.options is not None: m = re.search(r'solver\s*=*\s*(\S+)', self.options, re.IGNORECASE) - NEOS_solver_name=None + NEOS_solver_name = None if m: - solver_name=m.groups()[0] + solver_name = m.groups()[0] for s in kestrelAmplSolvers: if s.upper() == solver_name.upper(): - NEOS_solver_name=s - break + NEOS_solver_name = s + break # if not NEOS_solver_name: raise RuntimeError( "%s is not available on NEOS. Choose from:\n\t%s" - % (solver_name, "\n\t".join(kestrelAmplSolvers))) + % (solver_name, "\n\t".join(kestrelAmplSolvers)) + ) # if self.options is None or m is None: raise RuntimeError( "%s is not available on NEOS. Choose from:\n\t%s" - % (solver_name, "\n\t".join(kestrelAmplSolvers))) + % (solver_name, "\n\t".join(kestrelAmplSolvers)) + ) return NEOS_solver_name - def formXML(self,stub): + def formXML(self, stub): solver = self.getSolverName() zipped_nl_file = io.BytesIO() if os.path.exists(stub) and stub[-3:] == '.nl': stub = stub[:-3] - nlfile = open(stub+".nl","rb") - zipper = gzip.GzipFile(mode='wb',fileobj=zipped_nl_file) + nlfile = open(stub + ".nl", "rb") + zipper = gzip.GzipFile(mode='wb', fileobj=zipped_nl_file) zipper.write(nlfile.read()) zipper.close() nlfile.close() # - ampl_files={} - for key in ['adj','col','env','fix','spc','row','slc','unv']: - if os.access(stub+"."+key,os.R_OK): - f = open(stub+"." +key,"r") - val="" + ampl_files = {} + for key in ['adj', 'col', 'env', 'fix', 'spc', 'row', 'slc', 'unv']: + if os.access(stub + "." + key, os.R_OK): + f = open(stub + "." + key, "r") + val = "" buf = f.read() while buf: val += buf - buf=f.read() + buf = f.read() f.close() ampl_files[key] = val # Get priority priority = "" - m = re.search(r'priority[\s=]+(\S+)',self.options) + m = re.search(r'priority[\s=]+(\S+)', self.options) if m: priority = "%s\n" % (m.groups()[0]) # Add any AMPL-created environment variables to dictionary @@ -301,116 +301,121 @@ def formXML(self,stub): AMPL %s %s - %s\n""" %\ - (self.getEmailAddress(), - solver,priority, - solver_options, - nl_string) + %s\n""" % ( + self.getEmailAddress(), + solver, + priority, + solver_options, + nl_string, + ) # for key in ampl_files: - xml += "<%s>\n" % (key,ampl_files[key],key) + xml += "<%s>\n" % (key, ampl_files[key], key) # - for option in ["kestrel_auxfiles","mip_priorities","objective_precision"]: + for option in ["kestrel_auxfiles", "mip_priorities", "objective_precision"]: if option in os.environ: - xml += "<%s>\n" % (option,os.getenv(option),option) + xml += "<%s>\n" % (option, os.getenv(option), option) # xml += "" return xml +if __name__ == "__main__": # pragma:nocover + if len(sys.argv) < 2: + sys.stdout.write("kestrel should be called from inside AMPL.\n") + sys.exit(1) -if __name__=="__main__": #pragma:nocover - if len(sys.argv) < 2: - sys.stdout.write("kestrel should be called from inside AMPL.\n") - sys.exit(1) + kestrel = kestrelAMPL() - kestrel = kestrelAMPL() + if sys.argv[1] == "solvers": + for s in sorted(kestrel.neos.listSolversInCategory("kestrel")): + print(" " + s) + sys.exit(0) - if sys.argv[1] == "solvers": - for s in sorted(kestrel.neos.listSolversInCategory("kestrel")): - print(" "+s) - sys.exit(0) + elif sys.argv[1] == "submit": + xml = kestrel.formXML("kestproblem") + (jobNumber, password) = kestrel.submit(xml) - elif sys.argv[1] == "submit": - xml = kestrel.formXML("kestproblem") - (jobNumber,password) = kestrel.submit(xml) + # Add the job,pass to the stack + jobfile = open(kestrel.tempfile(), 'a') + jobfile.write("%d %s\n" % (jobNumber, password)) + jobfile.close() + elif sys.argv[1] == "retrieve": + # Pop job,pass from the stack + try: + jobfile = open(kestrel.tempfile(), 'r') + except IOError: + e = sys.exc_info()[1] + sys.stdout.write("Error, could not open file %s.\n") + sys.stdout.write("Did you use kestrelsub?\n") + sys.exit(1) - # Add the job,pass to the stack - jobfile = open(kestrel.tempfile(),'a') - jobfile.write("%d %s\n" % (jobNumber,password)) - jobfile.close() - - elif sys.argv[1] == "retrieve": - # Pop job,pass from the stack - try: - jobfile = open(kestrel.tempfile(),'r') - except IOError: - e = sys.exc_info()[1] - sys.stdout.write("Error, could not open file %s.\n") - sys.stdout.write("Did you use kestrelsub?\n") - sys.exit(1) - - m = re.match(r'(\d+) ([a-zA-Z]+)',jobfile.readline()) - if m: - jobNumber = int(m.groups()[0]) - password = m.groups()[1] - restofstack = jobfile.read() - jobfile.close() - - kestrel.retrieve('kestresult',jobNumber,password) + m = re.match(r'(\d+) ([a-zA-Z]+)', jobfile.readline()) + if m: + jobNumber = int(m.groups()[0]) + password = m.groups()[1] + restofstack = jobfile.read() + jobfile.close() + + kestrel.retrieve('kestresult', jobNumber, password) + + if restofstack: + sys.stdout.write("restofstack: %s\n" % restofstack) + jobfile = open(kestrel.tempfile(), 'w') + jobfile.write(restofstack) + jobfile.close() + else: + os.unlink(kestrel.tempfile()) - if restofstack: - sys.stdout.write("restofstack: %s\n" % restofstack) - jobfile = open(kestrel.tempfile(),'w') - jobfile.write(restofstack) - jobfile.close() + elif sys.argv[1] == "kill": + (jobNumber, password) = kestrel.getJobAndPassword() + if jobNumber: + kestrel.kill(jobNumber, password) + else: + sys.stdout.write( + "To kill a NEOS job, first set kestrel_options variable:\n" + ) + sys.stdout.write( + '\tampl: option kestrel_options "job=#### password=xxxx";\n' + ) else: - os.unlink(kestrel.tempfile()) + try: + stub = sys.argv[1] + # See if kestrel_options has job=.. password=.. + (jobNumber, password) = kestrel.getJobAndPassword() - elif sys.argv[1] == "kill": - (jobNumber,password) = kestrel.getJobAndPassword() - if jobNumber: - kestrel.kill(jobNumber,password) - else: - sys.stdout.write("To kill a NEOS job, first set kestrel_options variable:\n") - sys.stdout.write('\tampl: option kestrel_options "job=#### password=xxxx";\n') - else: - try: - stub = sys.argv[1] - # See if kestrel_options has job=.. password=.. - (jobNumber,password) = kestrel.getJobAndPassword() - - # otherwise, submit current problem to NEOS - if not jobNumber: - xml = kestrel.formXML(stub) - (jobNumber,password) = kestrel.submit(xml) - - except KeyboardInterrupt: - e = sys.exc_info()[1] - sys.stdout.write("Keyboard Interrupt while submitting problem.\n") - sys.exit(1) - try: - # Get intermediate results - time.sleep(1) - status = "Running" - offset = 0 - while status == "Running" or status == "Waiting": - (output,offset) = kestrel.neos.getIntermediateResults(jobNumber, - password,offset) - - if isinstance(output,xmlrpclib.Binary): - output = output.data - sys.stdout.write(output) - status = kestrel.neos.getJobStatus(jobNumber,password) - time.sleep(5) - - # Get final results - kestrel.retrieve(stub,jobNumber,password) - sys.exit(0) - except KeyboardInterrupt: - e = sys.exc_info()[1] - msg = ''' + # otherwise, submit current problem to NEOS + if not jobNumber: + xml = kestrel.formXML(stub) + (jobNumber, password) = kestrel.submit(xml) + + except KeyboardInterrupt: + e = sys.exc_info()[1] + sys.stdout.write("Keyboard Interrupt while submitting problem.\n") + sys.exit(1) + try: + # Get intermediate results + time.sleep(1) + status = "Running" + offset = 0 + while status == "Running" or status == "Waiting": + (output, offset) = kestrel.neos.getIntermediateResults( + jobNumber, password, offset + ) + + if isinstance(output, xmlrpclib.Binary): + output = output.data + sys.stdout.write(output) + status = kestrel.neos.getJobStatus(jobNumber, password) + time.sleep(5) + + # Get final results + kestrel.retrieve(stub, jobNumber, password) + sys.exit(0) + except KeyboardInterrupt: + e = sys.exc_info()[1] + msg = ''' Keyboard Interrupt\n\ Job is still running on remote machine\n\ To stop job:\n\ @@ -418,6 +423,11 @@ def formXML(self,stub): \tampl: commands kestrelkill;\n\ To retrieve results:\n\ \tampl: option kestrel_options "job=%d password=%s";\n\ -\tampl: solve;\n''' % (jobNumber,password,jobNumber,password) - sys.stdout.write(msg) - sys.exit(1) +\tampl: solve;\n''' % ( + jobNumber, + password, + jobNumber, + password, + ) + sys.stdout.write(msg) + sys.exit(1) diff --git a/pyomo/neos/plugins/NEOS.py b/pyomo/neos/plugins/NEOS.py index 2ca99937214..2d5929fa9a1 100644 --- a/pyomo/neos/plugins/NEOS.py +++ b/pyomo/neos/plugins/NEOS.py @@ -18,6 +18,7 @@ logger = logging.getLogger('pyomo.neos') + @SolverFactory.register('_neos', 'Interface for solvers hosted on NEOS') class NEOSRemoteSolver(SystemCallSolver): """A wrapper class for NEOS Remote Solvers""" @@ -37,12 +38,10 @@ def create_command_line(self, executable, problem_files): populated by NEOS. """ if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix=".neos.log") + self._log_file = TempfileManager.create_tempfile(suffix=".neos.log") if self._soln_file is None: - self._soln_file = TempfileManager.\ - create_tempfile(suffix=".neos.sol") - self._results_file = self._soln_file + self._soln_file = TempfileManager.create_tempfile(suffix=".neos.sol") + self._results_file = self._soln_file # display the log/solver file names prior to execution. this is useful # in case something crashes unexpectedly, which is not without precedent. diff --git a/pyomo/neos/plugins/__init__.py b/pyomo/neos/plugins/__init__.py index a39dfe53bc1..323f96e9bdc 100644 --- a/pyomo/neos/plugins/__init__.py +++ b/pyomo/neos/plugins/__init__.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.neos.plugins.NEOS import pyomo.neos.plugins.kestrel_plugin - diff --git a/pyomo/neos/plugins/kestrel_plugin.py b/pyomo/neos/plugins/kestrel_plugin.py index c77df3c2c50..72d73d15ace 100644 --- a/pyomo/neos/plugins/kestrel_plugin.py +++ b/pyomo/neos/plugins/kestrel_plugin.py @@ -17,9 +17,7 @@ from pyomo.common.dependencies import attempt_import from pyomo.opt import SolverFactory, SolverManagerFactory, OptSolver from pyomo.opt.parallel.manager import ActionManagerError, ActionStatus -from pyomo.opt.parallel.async_solver import ( - AsynchronousSolverManager -) +from pyomo.opt.parallel.async_solver import AsynchronousSolverManager from pyomo.core.base import Block import pyomo.neos.kestrel @@ -32,8 +30,7 @@ def _neos_error(msg, results, current_message): error_re = re.compile('error', flags=re.I) warn_re = re.compile('warn', flags=re.I) - logger.error("%s NEOS log:\n%s" % ( msg, current_message, ), - exc_info=sys.exc_info()) + logger.error("%s NEOS log:\n%s" % (msg, current_message), exc_info=sys.exc_info()) soln_data = results.data.decode('utf-8') for line in soln_data.splitlines(): if error_re.search(line): @@ -43,17 +40,17 @@ def _neos_error(msg, results, current_message): @SolverManagerFactory.register( - 'neos', doc="Asynchronously execute solvers on the NEOS server") + 'neos', doc="Asynchronously execute solvers on the NEOS server" +) class SolverManager_NEOS(AsynchronousSolverManager): - def clear(self): """ Clear manager state """ AsynchronousSolverManager.clear(self) self.kestrel = pyomo.neos.kestrel.kestrelAMPL() - self._ah = {} # maps NEOS job numbers to their corresponding - # action handle. + self._ah = {} # maps NEOS job numbers to their corresponding + # action handle. self._args = {} self._opt_data = {} @@ -76,12 +73,12 @@ def _perform_queue(self, ah, *args, **kwds): if solver is None: raise ActionManagerError( "No solver passed to %s, use keyword option 'solver'" - % (type(self).__name__) ) + % (type(self).__name__) + ) if not isinstance(solver, str): solver_name = solver.name if solver_name == 'asl': - solver_name = \ - os.path.basename(solver.executable()) + solver_name = os.path.basename(solver.executable()) else: solver_name = solver solver = None @@ -104,14 +101,15 @@ def _perform_queue(self, ah, *args, **kwds): _options = OptSolver._options_string_to_dict(_options) user_solver_options.update(_options) user_solver_options.update( - OptSolver._options_string_to_dict(kwds.pop('options_string', ''))) + OptSolver._options_string_to_dict(kwds.pop('options_string', '')) + ) # JDS: [5/13/17] The following is a HACK. This timeout flag is # set by pyomo/scripting/util.py:apply_optimizer. If we do not # remove it, it will get passed to the NEOS solver. For solvers # like CPLEX 12.7.0, this will cause a fatal error as it is not # a known option. - if user_solver_options.get('timelimit',0) is None: + if user_solver_options.get('timelimit', 0) is None: del user_solver_options['timelimit'] opt = SolverFactory('_neos') @@ -122,12 +120,13 @@ def _perform_queue(self, ah, *args, **kwds): if len(self._solvers) == 0: for name in self.kestrel.solvers(): if name.endswith('AMPL'): - self._solvers[ name[:-5].lower() ] = name[:-5] + self._solvers[name[:-5].lower()] = name[:-5] if solver_name not in self._solvers: raise ActionManagerError( "Solver '%s' is not recognized by NEOS. " "Solver names recognized:\n%s" - % (solver_name, str(sorted(self._solvers.keys())))) + % (solver_name, str(sorted(self._solvers.keys()))) + ) # # Apply kestrel # @@ -140,11 +139,11 @@ def _perform_queue(self, ah, *args, **kwds): # solver_options = {} for key in opt.options: - solver_options[key]=opt.options[key] + solver_options[key] = opt.options[key] solver_options.update(user_solver_options) options = opt._get_options_string(solver_options) if not options == "": - os.environ[neos_sname+'_options'] = options + os.environ[neos_sname + '_options'] = options # # Generate an XML string using these two environment variables # @@ -157,7 +156,7 @@ def _perform_queue(self, ah, *args, **kwds): # del os.environ['kestrel_options'] try: - del os.environ[neos_sname+"_options"] + del os.environ[neos_sname + "_options"] except: pass # @@ -165,11 +164,13 @@ def _perform_queue(self, ah, *args, **kwds): # self._ah[jobNumber] = ah self._neos_log[jobNumber] = (0, "") - self._opt_data[jobNumber] = (opt, - opt._smap_id, - opt._load_solutions, - opt._select_index, - opt._default_variable_value) + self._opt_data[jobNumber] = ( + opt, + opt._smap_id, + opt._load_solutions, + opt._select_index, + opt._default_variable_value, + ) self._args[jobNumber] = args return ah @@ -182,9 +183,9 @@ def _perform_wait_any(self): to indicate an error. """ for jobNumber in self._ah: - - status = self.kestrel.neos.getJobStatus(jobNumber, - self._ah[jobNumber].password) + status = self.kestrel.neos.getJobStatus( + jobNumber, self._ah[jobNumber].password + ) if status not in ("Running", "Waiting"): # the job is done. @@ -192,11 +193,13 @@ def _perform_wait_any(self): del self._ah[jobNumber] ah.status = ActionStatus.done - (opt, - smap_id, - load_solutions, - select_index, - default_variable_value) = self._opt_data[jobNumber] + ( + opt, + smap_id, + load_solutions, + select_index, + default_variable_value, + ) = self._opt_data[jobNumber] del self._opt_data[jobNumber] args = self._args[jobNumber] @@ -215,8 +218,9 @@ def _perform_wait_any(self): try: solver_results = opt.process_output(rc) except: - _neos_error( "Error parsing NEOS solution file", - results, current_message ) + _neos_error( + "Error parsing NEOS solution file", results, current_message + ) return ah solver_results._smap_id = smap_id @@ -229,11 +233,14 @@ def _perform_wait_any(self): _model.solutions.load_from( solver_results, select=select_index, - default_variable_value=default_variable_value) + default_variable_value=default_variable_value, + ) except: _neos_error( "Error loading NEOS solution into model", - results, current_message ) + results, + current_message, + ) solver_results._smap_id = None solver_results.solution.clear() else: @@ -255,16 +262,17 @@ def _perform_wait_any(self): # minutes. If NEOS doesn't produce intermediate results # by then we will need to catch (and eat) the exception try: - (message_fragment, new_offset) \ - = self.kestrel.neos.getIntermediateResults( - jobNumber, - self._ah[jobNumber].password, - current_offset ) + ( + message_fragment, + new_offset, + ) = self.kestrel.neos.getIntermediateResults( + jobNumber, self._ah[jobNumber].password, current_offset + ) logger.info(message_fragment) self._neos_log[jobNumber] = ( new_offset, - current_message + ( - (message_fragment.data).decode('utf-8') ) ) + current_message + ((message_fragment.data).decode('utf-8')), + ) except xmlrpc_client.ProtocolError: # The command probably timed out pass diff --git a/pyomo/neos/tests/model_min_lp.py b/pyomo/neos/tests/model_min_lp.py index 0a09aeb3299..56e1b124cd4 100644 --- a/pyomo/neos/tests/model_min_lp.py +++ b/pyomo/neos/tests/model_min_lp.py @@ -12,14 +12,16 @@ import pyomo.environ as pyo model = pyo.ConcreteModel() -model.y = pyo.Var(bounds=(-10,10), initialize=0.5) -model.x = pyo.Var(bounds=(-5,5), initialize=0.5) +model.y = pyo.Var(bounds=(-10, 10), initialize=0.5) +model.x = pyo.Var(bounds=(-5, 5), initialize=0.5) + @model.ConstraintList() def c(m): yield m.y >= m.x - 2 - yield m.y >= - m.x + yield m.y >= -m.x yield m.y <= m.x yield m.y <= 2 - m.x + model.obj = pyo.Objective(expr=model.y, sense=pyo.minimize) diff --git a/pyomo/neos/tests/test_neos.py b/pyomo/neos/tests/test_neos.py index 5bf509dd61e..c43869e65cc 100644 --- a/pyomo/neos/tests/test_neos.py +++ b/pyomo/neos/tests/test_neos.py @@ -32,6 +32,7 @@ import pyomo.environ as pyo from pyomo.common.fileutils import this_file_dir + currdir = this_file_dir() neos_available = False @@ -51,13 +52,13 @@ def _model(sense): # - linear # - solution has nonzero variable values (so they appear in the results) model = pyo.ConcreteModel() - model.y = pyo.Var(bounds=(-10,10), initialize=0.5) - model.x = pyo.Var(bounds=(-5,5), initialize=0.5) + model.y = pyo.Var(bounds=(-10, 10), initialize=0.5) + model.x = pyo.Var(bounds=(-5, 5), initialize=0.5) @model.ConstraintList() def c(m): yield m.y >= m.x - 2 - yield m.y >= - m.x + yield m.y >= -m.x yield m.y <= m.x yield m.y <= 2 - m.x @@ -70,20 +71,19 @@ def c(m): @unittest.skipIf(not neos_available, "Cannot make connection to NEOS server") @unittest.skipUnless(email_set, "NEOS_EMAIL not set") class TestKestrel(unittest.TestCase): - def test_doc(self): kestrel = kestrelAMPL() tmp = [tuple(name.split(':')) for name in kestrel.solvers()] - amplsolvers = set(v[0].lower() for v in tmp if v[1]=='AMPL') + amplsolvers = set(v[0].lower() for v in tmp if v[1] == 'AMPL') doc = pyomo.neos.doc dockeys = set(doc.keys()) self.assertEqual(amplsolvers, dockeys) - #gamssolvers = set(v[0].lower() for v in tmp if v[1]=='GAMS') - #missing = gamssolvers - amplsolvers - #self.assertEqual(len(missing) == 0) + # gamssolvers = set(v[0].lower() for v in tmp if v[1]=='GAMS') + # missing = gamssolvers - amplsolvers + # self.assertEqual(len(missing) == 0) def test_connection_failed(self): try: @@ -92,8 +92,9 @@ def test_connection_failed(self): with LoggingIntercept() as LOG: kestrel = kestrelAMPL() self.assertIsNone(kestrel.neos) - self.assertRegex(LOG.getvalue(), - r"NEOS is temporarily unavailable:\n\t\(.+\)") + self.assertRegex( + LOG.getvalue(), r"NEOS is temporarily unavailable:\n\t\(.+\)" + ) finally: pyomo.neos.kestrel.NEOS.host = orig_host @@ -155,15 +156,14 @@ def test_ooqp(self): if self.sense == pyo.maximize: # OOQP does not recognize maximization problems and # minimizes instead. - with self.assertRaisesRegex( - AssertionError, '.* != 1 within'): + with self.assertRaisesRegex(AssertionError, '.* != 1 within'): self._run('ooqp') else: self._run('ooqp') - # The simple tests aren't complementarity + # The simple tests aren't complementarity # problems - #def test_path(self): + # def test_path(self): # self._run('path') def test_snopt(self): @@ -184,7 +184,7 @@ def _run(self, opt, constrained=True): expected_y = { (pyo.minimize, True): -1, - (pyo.maximize, True): 1, + (pyo.maximize, True): 1, (pyo.minimize, False): -10, (pyo.maximize, False): 10, }[self.sense, constrained] @@ -196,18 +196,19 @@ def _run(self, opt, constrained=True): self.assertAlmostEqual(pyo.value(m.obj), expected_y, delta=1e-5) self.assertAlmostEqual(pyo.value(m.y), expected_y, delta=1e-5) -class PyomoCommandDriver(object): +class PyomoCommandDriver(object): def _run(self, opt, constrained=True): expected_y = { (pyo.minimize, True): -1, - (pyo.maximize, True): 1, + (pyo.maximize, True): 1, (pyo.minimize, False): -10, (pyo.maximize, False): 10, }[self.sense, constrained] - filename = 'model_min_lp.py' if self.sense == pyo.minimize \ - else 'model_max_lp.py' + filename = ( + 'model_min_lp.py' if self.sense == pyo.minimize else 'model_max_lp.py' + ) results = os.path.join(currdir, 'result.json') args = [ @@ -218,8 +219,8 @@ def _run(self, opt, constrained=True): '--logging=quiet', '--save-results=%s' % results, '--results-format=json', - '-c' - ] + '-c', + ] try: output = main(args) self.assertEqual(output.errorcode, 0) @@ -231,44 +232,41 @@ def _run(self, opt, constrained=True): if os.path.exists(results): os.remove(results) - self.assertEqual( - data['Solver'][0]['Status'], 'ok') - self.assertEqual( - data['Solution'][1]['Status'], 'optimal') + self.assertEqual(data['Solver'][0]['Status'], 'ok') + self.assertEqual(data['Solution'][1]['Status'], 'optimal') self.assertAlmostEqual( - data['Solution'][1]['Objective']['obj']['Value'], - expected_y, delta=1e-5) + data['Solution'][1]['Objective']['obj']['Value'], expected_y, delta=1e-5 + ) if constrained: # If the solver ignores constraints, x is degenerate self.assertAlmostEqual( - data['Solution'][1]['Variable']['x']['Value'], - 1, delta=1e-5) + data['Solution'][1]['Variable']['x']['Value'], 1, delta=1e-5 + ) self.assertAlmostEqual( - data['Solution'][1]['Variable']['y']['Value'], - expected_y, delta=1e-5) + data['Solution'][1]['Variable']['y']['Value'], expected_y, delta=1e-5 + ) @unittest.pytest.mark.neos @unittest.skipIf(not neos_available, "Cannot make connection to NEOS server") @unittest.skipUnless(email_set, "NEOS_EMAIL not set") -class TestSolvers_direct_call_min(RunAllNEOSSolvers, DirectDriver, - unittest.TestCase): +class TestSolvers_direct_call_min(RunAllNEOSSolvers, DirectDriver, unittest.TestCase): sense = pyo.minimize @unittest.pytest.mark.neos @unittest.skipIf(not neos_available, "Cannot make connection to NEOS server") @unittest.skipUnless(email_set, "NEOS_EMAIL not set") -class TestSolvers_direct_call_max(RunAllNEOSSolvers, DirectDriver, - unittest.TestCase): +class TestSolvers_direct_call_max(RunAllNEOSSolvers, DirectDriver, unittest.TestCase): sense = pyo.maximize @unittest.pytest.mark.neos @unittest.skipIf(not neos_available, "Cannot make connection to NEOS server") @unittest.skipUnless(email_set, "NEOS_EMAIL not set") -class TestSolvers_pyomo_cmd_min(RunAllNEOSSolvers, PyomoCommandDriver, - unittest.TestCase): +class TestSolvers_pyomo_cmd_min( + RunAllNEOSSolvers, PyomoCommandDriver, unittest.TestCase +): sense = pyo.minimize @@ -277,6 +275,7 @@ class TestSolvers_pyomo_cmd_min(RunAllNEOSSolvers, PyomoCommandDriver, @unittest.skipUnless(email_set, "NEOS_EMAIL not set") class TestCBC_timeout_direct_call(DirectDriver, unittest.TestCase): sense = pyo.minimize + @unittest.timeout(60, timeout_raises=unittest.SkipTest) def test_cbc_timeout(self): super()._run('cbc') @@ -287,6 +286,7 @@ def test_cbc_timeout(self): @unittest.skipUnless(email_set, "NEOS_EMAIL not set") class TestCBC_timeout_pyomo_cmd(PyomoCommandDriver, unittest.TestCase): sense = pyo.minimize + @unittest.timeout(60, timeout_raises=unittest.SkipTest) def test_cbc_timeout(self): super()._run('cbc') diff --git a/pyomo/network/arc.py b/pyomo/network/arc.py index e992f54c8cc..ff1874b0274 100644 --- a/pyomo/network/arc.py +++ b/pyomo/network/arc.py @@ -9,12 +9,14 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = [ 'Arc' ] +__all__ = ['Arc'] from pyomo.network.port import Port from pyomo.core.base.component import ActiveComponentData, ModelComponentFactory from pyomo.core.base.indexed_component import ( - ActiveIndexedComponent, UnindexedComponent_set) + ActiveIndexedComponent, + UnindexedComponent_set, +) from pyomo.core.base.global_set import UnindexedComponent_index from pyomo.core.base.misc import apply_indexed_rule from pyomo.common.deprecation import RenamedClass @@ -37,14 +39,16 @@ def _iterable_to_dict(vals, directed, name): if ports is None or len(ports) != 2: raise ValueError( "Value for arc '%s' is not either a " - "dict or a two-member iterable." % name) + "dict or a two-member iterable." % name + ) if directed: source, destination = ports ports = None else: source = destination = None - vals = dict(source=source, destination=destination, - ports=ports, directed=directed) + vals = dict( + source=source, destination=destination, ports=ports, directed=directed + ) elif "directed" not in vals: vals["directed"] = directed return vals @@ -78,8 +82,7 @@ def __init__(self, component=None, **kwds): # following constructors: # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self._active = True @@ -89,16 +92,6 @@ def __init__(self, component=None, **kwds): if len(kwds): self.set_value(kwds) - def __getstate__(self): - state = super(_ArcData, self).__getstate__() - for i in _ArcData.__slots__: - state[i] = getattr(self, i) - return state - - # Note: None of the slots on this class need to be edited, so we - # don't need to implement a specialized __setstate__ method, and - # can quietly rely on the super() class's implementation. - def __getattr__(self, name): """Returns `self.expanded_block.name` if it exists""" eb = self.expanded_block @@ -110,24 +103,21 @@ def __getattr__(self, name): pass # Since the base classes don't support getattr, we can just # throw the "normal" AttributeError - raise AttributeError("'%s' object has no attribute '%s'" - % (self.__class__.__name__, name)) + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__.__name__, name) + ) @property def source(self): # directed can be true before construction # so make sure ports is not None - return self._ports[0] if ( - self._directed and self._ports is not None - ) else None + return self._ports[0] if (self._directed and self._ports is not None) else None src = source @property def destination(self): - return self._ports[1] if ( - self._directed and self._ports is not None - ) else None + return self._ports[1] if (self._directed and self._ports is not None) else None dest = destination @@ -147,8 +137,11 @@ def set_value(self, vals): """Set the port attributes on this arc""" # the following allows m.a = Arc(directed=True); m.a = (m.p, m.q) # and m.a will be directed - d = self._directed if self._directed is not None else \ - self.parent_component()._init_directed + d = ( + self._directed + if self._directed is not None + else self.parent_component()._init_directed + ) vals = _iterable_to_dict(vals, d, self.name) @@ -159,8 +152,9 @@ def set_value(self, vals): if len(vals): raise ValueError( - "set_value passed unrecognized keywords in val:\n\t" + - "\n\t".join("%s = %s" % (k, v) for k, v in vals.items())) + "set_value passed unrecognized keywords in val:\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in vals.items()) + ) if directed is not None: if source is None and destination is None: @@ -172,13 +166,14 @@ def set_value(self, vals): except: raise ValueError( "Failed to unpack 'ports' argument of arc '%s'. " - "Argument must be a 2-member tuple or list." - % self.name) + "Argument must be a 2-member tuple or list." % self.name + ) elif not directed: # throw an error if they gave an inconsistent directed value raise ValueError( "Passed False value for 'directed' for arc '%s', but " - "specified source or destination." % self.name) + "specified source or destination." % self.name + ) self._validate_ports(source, destination, ports) @@ -191,8 +186,7 @@ def set_value(self, vals): self.source._dests.remove(weakref_self) self.destination._sources.remove(weakref_self) - self._ports = tuple(ports) if ports is not None \ - else (source, destination) + self._ports = tuple(ports) if ports is not None else (source, destination) self._directed = source is not None weakref_self = weakref_ref(self) for port in self._ports: @@ -205,44 +199,53 @@ def _validate_ports(self, source, destination, ports): msg = "Arc %s: " % self.name if ports is not None: if source is not None or destination is not None: - raise ValueError(msg + - "cannot specify 'source' or 'destination' " - "when using 'ports' argument.") - if (type(ports) not in (list, tuple) or len(ports) != 2): - raise ValueError(msg + - "argument 'ports' must be list or tuple " - "containing exactly 2 Ports.") + raise ValueError( + msg + "cannot specify 'source' or 'destination' " + "when using 'ports' argument." + ) + if type(ports) not in (list, tuple) or len(ports) != 2: + raise ValueError( + msg + "argument 'ports' must be list or tuple " + "containing exactly 2 Ports." + ) for p in ports: try: if p.ctype is not Port: - raise ValueError(msg + - "found object '%s' in 'ports' not " - "of type Port." % p.name) + raise ValueError( + msg + "found object '%s' in 'ports' not " + "of type Port." % p.name + ) elif p.is_indexed(): - raise ValueError(msg + - "found indexed Port '%s' in 'ports', must " - "use single Ports for Arc." % p.name) + raise ValueError( + msg + "found indexed Port '%s' in 'ports', must " + "use single Ports for Arc." % p.name + ) except AttributeError: - raise ValueError(msg + - "found object '%s' in 'ports' not " - "of type Port." % str(p)) + raise ValueError( + msg + "found object '%s' in 'ports' not " + "of type Port." % str(p) + ) else: if source is None or destination is None: - raise ValueError(msg + - "must specify both 'source' and 'destination' " - "for directed Arc.") + raise ValueError( + msg + "must specify both 'source' and 'destination' " + "for directed Arc." + ) for p, side in [(source, "source"), (destination, "destination")]: try: if p.ctype is not Port: - raise ValueError(msg + - "%s object '%s' not of type Port." % (p.name, side)) + raise ValueError( + msg + "%s object '%s' not of type Port." % (p.name, side) + ) elif p.is_indexed(): - raise ValueError(msg + - "found indexed Port '%s' as %s, must use " - "single Ports for Arc." % (source.name, side)) + raise ValueError( + msg + "found indexed Port '%s' as %s, must use " + "single Ports for Arc." % (source.name, side) + ) except AttributeError: - raise ValueError(msg + - "%s object '%s' not of type Port." % (str(p), side)) + raise ValueError( + msg + "%s object '%s' not of type Port." % (str(p), side) + ) @ModelComponentFactory.register("Component used for connecting two Ports.") @@ -289,8 +292,7 @@ def __init__(self, *args, **kwds): if source is None and destination is None and ports is None: self._init_vals = None else: - self._init_vals = dict( - source=source, destination=destination, ports=ports) + self._init_vals = dict(source=source, destination=destination, ports=ports) def construct(self, data=None): """Initialize the Arc""" @@ -309,7 +311,8 @@ def construct(self, data=None): elif self._rule is not None and self._init_vals is not None: raise ValueError( "Cannot specify rule along with source/destination/ports " - "keywords for arc '%s'" % self.name) + "keywords for arc '%s'" % self.name + ) self_parent = self._parent() @@ -324,8 +327,8 @@ def construct(self, data=None): err = sys.exc_info()[1] logger.error( "Rule failed when generating values for " - "arc %s:\n%s: %s" - % (self.name, type(err).__name__, err)) + "arc %s:\n%s: %s" % (self.name, type(err).__name__, err) + ) raise tmp = _iterable_to_dict(tmp, self._init_directed, self.name) self._setitem_when_not_present(None, tmp) @@ -333,7 +336,8 @@ def construct(self, data=None): if self._init_vals is not None: raise IndexError( "Arc '%s': Cannot initialize multiple indices " - "of an arc with single ports" % self.name) + "of an arc with single ports" % self.name + ) for idx in self._index_set: try: tmp = apply_indexed_rule(self, self._rule, self_parent, idx) @@ -342,7 +346,8 @@ def construct(self, data=None): logger.error( "Rule failed when generating values for " "arc %s with index %s:\n%s: %s" - % (self.name, str(idx), type(err).__name__, err)) + % (self.name, str(idx), type(err).__name__, err) + ) raise tmp = _iterable_to_dict(tmp, self._init_directed, self.name) self._setitem_when_not_present(idx, tmp) @@ -351,18 +356,22 @@ def construct(self, data=None): def _pprint(self): """Return data that will be printed for this component.""" return ( - [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None), - ("Active", self.active)], + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ("Active", self.active), + ], self.items(), ("Ports", "Directed", "Active"), - lambda k, v: ["(%s, %s)" % v.ports if v.ports is not None else None, - v.directed, - v.active]) + lambda k, v: [ + "(%s, %s)" % v.ports if v.ports is not None else None, + v.directed, + v.active, + ], + ) class ScalarArc(_ArcData, Arc): - def __init__(self, *args, **kwds): _ArcData.__init__(self, self) Arc.__init__(self, *args, **kwds) @@ -376,9 +385,11 @@ def set_value(self, vals): are still None, so you may need to repass some attributes. """ if not self._constructed: - raise ValueError("Setting the value of arc '%s' before " - "the Arc has been constructed (there " - "is currently no object to set)." % self.name) + raise ValueError( + "Setting the value of arc '%s' before " + "the Arc has been constructed (there " + "is currently no object to set)." % self.name + ) if len(self._data) == 0: self._data[None] = self try: @@ -403,5 +414,3 @@ def __init__(self, *args, **kwds): def expanded_block(self): # indexed block that contains all the blocks for this arc return self._expanded_block - - diff --git a/pyomo/network/decomposition.py b/pyomo/network/decomposition.py index 52d49563ff0..ae306766ae0 100644 --- a/pyomo/network/decomposition.py +++ b/pyomo/network/decomposition.py @@ -9,26 +9,37 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = [ 'SequentialDecomposition' ] +__all__ = ['SequentialDecomposition'] from pyomo.network import Port, Arc from pyomo.network.foqus_graph import FOQUSGraph -from pyomo.core import Constraint, value, Objective, Var, ConcreteModel, \ - Binary, minimize, Expression +from pyomo.core import ( + Constraint, + value, + Objective, + Var, + ConcreteModel, + Binary, + minimize, + Expression, +) from pyomo.common.collections import ComponentSet, ComponentMap, Bunch -from pyomo.core.expr.current import identify_variables +from pyomo.core.expr import identify_variables from pyomo.repn import generate_standard_repn import logging, time from pyomo.common.dependencies import ( - networkx as nx, networkx_available, - numpy, numpy_available, + networkx as nx, + networkx_available, + numpy, + numpy_available, ) imports_available = networkx_available & numpy_available logger = logging.getLogger('pyomo.network') + class SequentialDecomposition(FOQUSGraph): """ A sequential decomposition tool for Pyomo Network models @@ -155,11 +166,11 @@ def __init__(self, **kwds): options["solve_tears"] = True options["guesses"] = ComponentMap() options["default_guess"] = None - options["almost_equal_tol"] = 1.0E-8 + options["almost_equal_tol"] = 1.0e-8 options["log_info"] = False options["tear_method"] = "Direct" options["iterLim"] = 40 - options["tol"] = 1.0E-5 + options["tol"] = 1.0e-5 options["tol_type"] = "abs" options["report_diffs"] = False options["accel_min"] = -5 @@ -292,7 +303,6 @@ def run(self, model, function): if self.options["log_info"]: logger.setLevel(old_log_level) - def _run_impl(self, model, function): start = time.time() logger.info("Starting Sequential Decomposition") @@ -311,8 +321,9 @@ def _run_impl(self, model, function): if not self.options["solve_tears"] or not len(tset): # Not solving tears, we're done end = time.time() - logger.info("Finished Sequential Decomposition in %.2f seconds" % - (end - start)) + logger.info( + "Finished Sequential Decomposition in %.2f seconds" % (end - start) + ) return logger.info("Starting tear convergence procedure") @@ -329,11 +340,17 @@ def _run_impl(self, model, function): if ei in sccEdges[sccIndex]: tears.append(ei) - kwds = dict(G=G, order=order, function=function, tears=tears, - iterLim=self.options["iterLim"], tol=self.options["tol"], + kwds = dict( + G=G, + order=order, + function=function, + tears=tears, + iterLim=self.options["iterLim"], + tol=self.options["tol"], tol_type=self.options["tol_type"], report_diffs=self.options["report_diffs"], - outEdges=outEdges[sccIndex]) + outEdges=outEdges[sccIndex], + ) tear_method = self.options["tear_method"] @@ -346,12 +363,10 @@ def _run_impl(self, model, function): self.solve_tear_wegstein(**kwds) else: - raise ValueError( - "Invalid tear_method '%s'" % (tear_method,)) + raise ValueError("Invalid tear_method '%s'" % (tear_method,)) end = time.time() - logger.info("Finished Sequential Decomposition in %.2f seconds" % - (end - start)) + logger.info("Finished Sequential Decomposition in %.2f seconds" % (end - start)) def run_order(self, G, order, function, ignore=None, use_guesses=False): """ @@ -439,7 +454,8 @@ def pass_values(self, arc, fixed_inputs): "Found free splitfrac for arc '%s' with no current value. " "Please use the set_split_fraction method on its source " "port to set this value before expansion, or set its value " - "manually if expansion has already occured." % arc.name) + "manually if expansion has already occurred." % arc.name + ) elif sf is None: # if there is no splitfrac, but we have extensive members, then we # need to manually set the evar values because there will be no @@ -465,8 +481,8 @@ def pass_values(self, arc, fixed_inputs): continue if len(src.dests()) > 1: raise Exception( - "This still needs to be figured out (arc '%s')" % - arc.name) + "This still needs to be figured out (arc '%s')" % arc.name + ) # TODO: for now we know it's obvious what to do if there is # only 1 destination if mem.is_indexed(): @@ -487,21 +503,24 @@ def pass_values(self, arc, fixed_inputs): # or if the user puts something unexpected on the eblock. raise RuntimeError( "Found inequality constraint '%s'. Please do not modify " - "the expanded block." % con.name) + "the expanded block." % con.name + ) repn = generate_standard_repn(con.body) if repn.is_fixed(): # the port member's peer was already fixed if abs(value(con.lower) - repn.constant) > eq_tol: raise RuntimeError( "Found connected ports '%s' and '%s' both with fixed " - "but different values (by > %s) for constraint '%s'" % - (src, dest, eq_tol, con.name)) + "but different values (by > %s) for constraint '%s'" + % (src, dest, eq_tol, con.name) + ) continue if not (repn.is_linear() and len(repn.linear_vars) == 1): raise RuntimeError( "Constraint '%s' had more than one free variable when " "trying to pass a value to its destination. Please fix " - "more variables before passing across this arc." % con.name) + "more variables before passing across this arc." % con.name + ) # fix the value of the single variable to satisfy the constraint # con.lower is usually a NumericConstant but call value on it # just in case it is something else @@ -525,7 +544,8 @@ def pass_single_value(self, port, name, member, val, fixed): raise RuntimeError( "Member '%s' of port '%s' is already fixed but has a " "different value (by > %s) than what is being passed to it" - % (name, port.name, eq_tol)) + % (name, port.name, eq_tol) + ) elif member.is_expression_type(): repn = generate_standard_repn(member - val) if repn.is_linear() and len(repn.linear_vars) == 1: @@ -540,7 +560,8 @@ def pass_single_value(self, port, name, member, val, fixed): "Member '%s' of port '%s' had more than " "one free variable when trying to pass a value " "to it. Please fix more variables before passing " - "to this port." % (name, port.name)) + "to this port." % (name, port.name) + ) else: fixed.add(member) # val are numpy.float64; coerce val back to float @@ -559,7 +580,8 @@ def load_guesses(self, guesses, port, fixed): elif mem.is_indexed(): raise TypeError( "Guess for indexed member '%s' in port '%s' must map to a " - "dict of indexes" % (name, port.name)) + "dict of indexes" % (name, port.name) + ) else: itr = [(mem, entry, None)] @@ -574,7 +596,8 @@ def load_guesses(self, guesses, port, fixed): raise ValueError( "Found a guess for extensive member '%s' on " "port '%s' using arc '%s' that is not a source " - "of this port" % (name, port.name, arc.name)) + "of this port" % (name, port.name, arc.name) + ) evar = arc.expanded_block.component(name) if evar is None: # no evars, 1-to-1 arc @@ -595,10 +618,13 @@ def load_guesses(self, guesses, port, fixed): raise ValueError( "Cannot provide guess for expression type member " "'%s%s' of port '%s', must set current value of " - "variables within expression" % ( + "variables within expression" + % ( name, ("[%s]" % str(idx)) if mem.is_indexed() else "", - port.name)) + port.name, + ) + ) else: fixed.add(var) var.fix(float(entry)) @@ -625,22 +651,20 @@ def load_values(self, port, default, fixed, use_guesses): for evar in evars: if evar.is_fixed(): continue - self.check_value_fix(port, evar, default, fixed, - use_guesses, extensive=True) + self.check_value_fix( + port, evar, default, fixed, use_guesses, extensive=True + ) # now all evars should be fixed so combine them # and fix the value of the extensive port member self.combine_and_fix(port, name, obj, evars, fixed) else: if obj.is_expression_type(): for var in identify_variables(obj, include_fixed=False): - self.check_value_fix(port, var, default, fixed, - use_guesses) + self.check_value_fix(port, var, default, fixed, use_guesses) else: - self.check_value_fix(port, obj, default, fixed, - use_guesses) + self.check_value_fix(port, obj, default, fixed, use_guesses) - def check_value_fix(self, port, var, default, fixed, use_guesses, - extensive=False): + def check_value_fix(self, port, var, default, fixed, use_guesses, extensive=False): """ Try to fix the var at its current value or the default, else error """ @@ -654,12 +678,15 @@ def check_value_fix(self, port, var, default, fixed, use_guesses, raise RuntimeError( "Encountered a free inlet %svariable '%s' %s port '%s' with no " "%scurrent value, or default_guess option, while attempting " - "to compute the unit." % ( + "to compute the unit." + % ( "extensive " if extensive else "", var.name, ("on", "to")[int(extensive)], port.name, - "guess, " if use_guesses else "")) + "guess, " if use_guesses else "", + ) + ) fixed.add(var) var.fix(float(val)) @@ -710,13 +737,17 @@ def create_graph(self, model): for blk in model.block_data_objects(descend_into=True, active=True): for arc in blk.component_data_objects(Arc, descend_into=False): if not arc.directed: - raise ValueError("All Arcs must be directed when creating " - "a graph for a model. Found undirected " - "Arc: '%s'" % arc.name) + raise ValueError( + "All Arcs must be directed when creating " + "a graph for a model. Found undirected " + "Arc: '%s'" % arc.name + ) if arc.expanded_block is None: - raise ValueError("All Arcs must be expanded when creating " - "a graph for a model. Found unexpanded " - "Arc: '%s'" % arc.name) + raise ValueError( + "All Arcs must be expanded when creating " + "a graph for a model. Found unexpanded " + "Arc: '%s'" % arc.name + ) src, dest = arc.src.parent_block(), arc.dest.parent_block() G.add_edge(src, dest, arc=arc) @@ -776,7 +807,7 @@ def select_tear_mip(self, G, solver, solver_io=None, solver_options={}): """ This finds optimal sets of tear edges based on two criteria. The primary objective is to minimize the maximum number of - times any cycle is broken. The seconday criteria is to + times any cycle is broken. The secondary criteria is to minimize the number of tears. This function creates a MIP problem in Pyomo with a doubly @@ -785,10 +816,13 @@ def select_tear_mip(self, G, solver, solver_io=None, solver_options={}): model, bin_list = self.select_tear_mip_model(G) from pyomo.environ import SolverFactory + opt = SolverFactory(solver, solver_io=solver_io) if not opt.available(exception_flag=False): - raise ValueError("Solver '%s' (solver_io=%r) is not available, please pass a " - "different solver" % (solver, solver_io)) + raise ValueError( + "Solver '%s' (solver_io=%r) is not available, please pass a " + "different solver" % (solver, solver_io) + ) opt.solve(model, **solver_options) # collect final list by adding every edge with a "True" binary var @@ -859,7 +893,6 @@ def pass_edges(self, G, edges): var.free() fixed_outputs.clear() - def pass_tear_direct(self, G, tears): """Pass values across all tears in the given tear set""" fixed_outputs = ComponentSet() @@ -894,8 +927,7 @@ def pass_tear_wegstein(self, G, tears, x): for name, index, mem in src.iter_vars(names=True): peer = self.source_dest_peer(arc, name, index) - self.pass_single_value(dest, name, peer, x[i], - fixed_inputs[dest_unit]) + self.pass_single_value(dest, name, peer, x[i], fixed_inputs[dest_unit]) i += 1 def generate_gofx(self, G, tears): @@ -934,6 +966,7 @@ def cacher(self, key, fcn, *args): def tear_set(self, G): key = "tear_set" + def fcn(G): tset = self.options[key] if tset is not None: @@ -943,32 +976,38 @@ def fcn(G): for arc in tset: res.append(edge_map[arc_map[arc]]) if not self.check_tear_set(G, res): - raise ValueError("Tear set found in options is " - "insufficient to solve network") + raise ValueError( + "Tear set found in options is insufficient to solve network" + ) self.cache[key] = res return res method = self.options["select_tear_method"] if method == "mip": - return self.select_tear_mip(G, - self.options["tear_solver"], - self.options["tear_solver_io"], - self.options["tear_solver_options"]) + return self.select_tear_mip( + G, + self.options["tear_solver"], + self.options["tear_solver_io"], + self.options["tear_solver_options"], + ) elif method == "heuristic": # tset is the first list in the first return value return self.select_tear_heuristic(G)[0][0] else: raise ValueError("Invalid select_tear_method '%s'" % (method,)) + return self.cacher(key, fcn, G) def arc_to_edge(self, G): """Returns a mapping from arcs to edges for a graph""" + def fcn(G): res = ComponentMap() for edge in G.edges: arc = G.edges[edge]["arc"] res[arc] = edge return res + return self.cacher("arc_to_edge", fcn, G) def fixed_inputs(self): @@ -980,6 +1019,7 @@ def idx_to_node(self, G): def node_to_idx(self, G): """Returns a mapping from nodes to indexes for a graph""" + def fcn(G): res = dict() i = -1 @@ -987,6 +1027,7 @@ def fcn(G): i += 1 res[node] = i return res + return self.cacher("node_to_idx", fcn, G) def idx_to_edge(self, G): @@ -995,6 +1036,7 @@ def idx_to_edge(self, G): def edge_to_idx(self, G): """Returns a mapping from edges to indexes for a graph""" + def fcn(G): res = dict() i = -1 @@ -1002,4 +1044,5 @@ def fcn(G): i += 1 res[edge] = i return res + return self.cacher("edge_to_idx", fcn, G) diff --git a/pyomo/network/foqus_graph.py b/pyomo/network/foqus_graph.py index 698d9098ae0..e6fc34aaf62 100644 --- a/pyomo/network/foqus_graph.py +++ b/pyomo/network/foqus_graph.py @@ -82,8 +82,9 @@ class FOQUSGraph(object): - def solve_tear_direct(self, G, order, function, tears, outEdges, iterLim, - tol, tol_type, report_diffs): + def solve_tear_direct( + self, G, order, function, tears, outEdges, iterLim, tol, tol_type, report_diffs + ): """ Use direct substitution to solve tears. If multiple tears are given they are solved simultaneously. @@ -105,7 +106,7 @@ def solve_tear_direct(self, G, order, function, tears, outEdges, iterLim, List of lists of diff history, differences between input and output values at each iteration """ - hist = [] # diff at each iteration in every variable + hist = [] # diff at each iteration in every variable if not len(tears): # no need to iterate just run the calculations @@ -129,8 +130,7 @@ def solve_tear_direct(self, G, order, function, tears, outEdges, iterLim, break if itercount >= iterLim: - logger.warning("Direct failed to converge in %s iterations" - % iterLim) + logger.warning("Direct failed to converge in %s iterations" % iterLim) return hist self.pass_tear_direct(G, tears) @@ -145,8 +145,20 @@ def solve_tear_direct(self, G, order, function, tears, outEdges, iterLim, return hist - def solve_tear_wegstein(self, G, order, function, tears, outEdges, iterLim, - tol, tol_type, report_diffs, accel_min, accel_max): + def solve_tear_wegstein( + self, + G, + order, + function, + tears, + outEdges, + iterLim, + tol, + tol_type, + report_diffs, + accel_min, + accel_max, + ): """ Use Wegstein to solve tears. If multiple tears are given they are solved simultaneously. @@ -175,7 +187,7 @@ def solve_tear_wegstein(self, G, order, function, tears, outEdges, iterLim, List of lists of diff history, differences between input and output values at each iteration """ - hist = [] # diff at each iteration in every variable + hist = [] # diff at each iteration in every variable if not len(tears): # no need to iterate just run the calculations @@ -225,8 +237,7 @@ def solve_tear_wegstein(self, G, order, function, tears, outEdges, iterLim, break if itercount > iterLim: - logger.warning("Wegstein failed to converge in %s iterations" - % iterLim) + logger.warning("Wegstein failed to converge in %s iterations" % iterLim) return hist denom = x - x_prev @@ -270,7 +281,8 @@ def scc_collect(self, G, excludeEdges=None): outEdges List of lists of edge indexes leaving the SCC """ - def sc(v, stk, depth, strngComps): + + def sc(v, stk, depth, stringComps): # recursive sub-function for backtracking ndepth[v] = depth back[v] = depth @@ -278,7 +290,7 @@ def sc(v, stk, depth, strngComps): stk.append(v) for w in adj[v]: if ndepth[w] == None: - sc(w, stk, depth, strngComps) + sc(w, stk, depth, stringComps) back[v] = min(back[w], back[v]) elif w in stk: back[v] = min(back[w], back[v]) @@ -289,27 +301,27 @@ def sc(v, stk, depth, strngComps): scomp.append(i2n[w]) if w == v: break - strngComps.append(scomp) + stringComps.append(scomp) return depth i2n, adj, _ = self.adj_lists(G, excludeEdges=excludeEdges) - stk = [] # node stack - strngComps = [] # list of SCCs - ndepth = [None] * len(i2n) - back = [None] * len(i2n) + stk = [] # node stack + stringComps = [] # list of SCCs + ndepth = [None] * len(i2n) + back = [None] * len(i2n) # find the SCCs for v in range(len(i2n)): if ndepth[v] == None: - sc(v, stk, 0, strngComps) + sc(v, stk, 0, stringComps) # Find the rest of the information about SCCs given the node partition - sccNodes = strngComps + sccNodes = stringComps sccEdges = [] outEdges = [] inEdges = [] - for nset in strngComps: + for nset in stringComps: e, ie, oe = self.sub_graph_edges(G, nset) sccEdges.append(e) inEdges.append(ie) @@ -335,8 +347,8 @@ def scc_calculation_order(self, sccNodes, ie, oe): List of lists of out edge indexes to SCCs """ - adj = [] # SCC adjacency list - adjR = [] # SCC reverse adjacency list + adj = [] # SCC adjacency list + adjR = [] # SCC reverse adjacency list # populate with empty lists before running the loop below for i in range(len(sccNodes)): adj.append([]) @@ -404,7 +416,7 @@ def tree_order(self, adj, adjR, roots=None): a tree the results are not valid. In the returned order, it is sometimes possible for more - than one node to be caclulated at once. So a list of lists + than one node to be calculated at once. So a list of lists is returned by this function. These represent a bredth first search order of the tree. Following the order, all nodes that lead to a particular node will be visited @@ -434,7 +446,7 @@ def tree_order(self, adj, adjR, roots=None): if roots is None: roots = [] - mark = [True] * len(adj) # mark all nodes if no roots specified + mark = [True] * len(adj) # mark all nodes if no roots specified r = [True] * len(adj) # no root specified so find roots of tree by marking every # successor of every node, since roots have no predecessors @@ -454,30 +466,29 @@ def tree_order(self, adj, adjR, roots=None): for i in lst: mark[i] = True lst2 += adj[i] - lst = set(lst2) # remove dupes + lst = set(lst2) # remove dupes # Now we have list of roots, and roots and their desendants are marked ndepth = [None] * len(adj) lst = copy.deepcopy(roots) order = [] - checknodes = set() # list of candidate nodes for next depth - for i in roots: # nodes adjacent to roots are candidates + checknodes = set() # list of candidate nodes for next depth + for i in roots: # nodes adjacent to roots are candidates checknodes.update(adj[i]) depth = 0 while len(lst) > 0: order.append(lst) depth += 1 - lst = [] # nodes to add to the next depth in order - delSet = set() # nodes to delete from checknodes - checkUpdate = set() # nodes to add to checknodes + lst = [] # nodes to add to the next depth in order + delSet = set() # nodes to delete from checknodes + checkUpdate = set() # nodes to add to checknodes for i in checknodes: if ndepth[i] != None: # This means there is a cycle in the graph # this will lead to nonsense so throw exception - raise RuntimeError( - "Function tree_order does not work with cycles") - remSet = set() # to remove from a nodes rev adj list + raise RuntimeError("Function tree_order does not work with cycles") + remSet = set() # to remove from a nodes rev adj list for j in adjR[i]: if j in order[depth - 1]: # ancestor already placed @@ -517,7 +528,7 @@ def select_tear_heuristic(self, G): """ This finds optimal sets of tear edges based on two criteria. The primary objective is to minimize the maximum number of - times any cycle is broken. The seconday criteria is to + times any cycle is broken. The secondary criteria is to minimize the number of tears. This function uses a branch and bound type approach. @@ -533,20 +544,20 @@ def select_tear_heuristic(self, G): upperbound_total The total number of loops - Improvemnts for the future + Improvements for the future - I think I can imporve the efficency of this, but it is good + I think I can improve the efficiency of this, but it is good enough for now. Here are some ideas for improvement: 1. Reduce the number of redundant solutions. It is possible to find tears sets [1,2] and [2,1]. I eliminate - redundent solutions from the results, but they can - occur and it reduces efficency. + redundant solutions from the results, but they can + occur and it reduces efficiency. 2. Look at strongly connected components instead of whole graph. This would cut back on the size of graph we are looking at. The flowsheets are rarely one strongly - conneted component. + connected component. 3. When you add an edge to a tear set you could reduce the size of the problem in the branch by only looking at @@ -554,7 +565,7 @@ def select_tear_heuristic(self, G): 4. This returns all equally good optimal tear sets. That may not really be necessary. For very large flowsheets, - there could be an extremely large number of optimial tear + there could be an extremely large number of optimal tear edge sets. """ @@ -570,7 +581,7 @@ def sear(depth, prevY): for i in range(len(cycleEdges[depth])): # Loop through all the edges in cycle with index depth - y = list(prevY) # get list of already selected tear stream + y = list(prevY) # get list of already selected tear stream y[cycleEdges[depth][i]] = 1 # calculate number of times each cycle is torn Ay = numpy.dot(A, y) @@ -586,7 +597,7 @@ def sear(depth, prevY): if min(Ay) > 0: if maxAy < upperBound[0]: upperBound[0] = maxAy # most important factor - upperBound[1] = sumY # second most important + upperBound[1] = sumY # second most important elif sumY < upperBound[1]: upperBound[1] = sumY # record solution @@ -607,21 +618,21 @@ def sear(depth, prevY): if nr == 0: # no cycles so we are done - return [[[]], 0 , 0] + return [[[]], 0, 0] # Else there are cycles, so find edges to tear - y_init = [False] * G.number_of_edges() # whether edge j is in tear set + y_init = [False] * G.number_of_edges() # whether edge j is in tear set for j in tearUB: # y for initial u.b. solution y_init[j] = 1 - Ay_init = numpy.dot(A, y_init) # number of times each loop torn + Ay_init = numpy.dot(A, y_init) # number of times each loop torn # Set two upper bounds. The fist upper bound is on number of times # a loop is broken. Second upper bound is on number of tears. upperBound = [max(Ay_init), sum(y_init)] - y_init = [False] * G.number_of_edges() #clear y vector to start search + y_init = [False] * G.number_of_edges() # clear y vector to start search ySet = [] # a list of tear sets # Three elements are stored in each tear set: # 0 = y vector (tear set), 1 = max(Ay), 2 = sum(y) @@ -673,8 +684,8 @@ def sear(depth, prevY): def tear_upper_bound(self, G): """ This function quickly finds a sub-optimal set of tear - edges. This serves as an inital upperbound when looking - for an optimal tear set. Having an inital upper bound + edges. This serves as an initial upperbound when looking + for an optimal tear set. Having an initial upper bound improves efficiency. This works by constructing a search tree and just makes a @@ -700,8 +711,8 @@ def cyc(node, depth): parents = {} for node in G.nodes: - depths[node] = None - parents[node] = None + depths[node] = None + parents[node] = None for node in G.nodes: if depths[node] is None: @@ -725,7 +736,7 @@ def sub_graph_edges(self, G, nodes): List of edge indexes starting inside the subgraph and ending outside """ - e = [] # edges that connect two nodes in the subgraph + e = [] # edges that connect two nodes in the subgraph ie = [] # in edges oe = [] # out edges edge_list = self.idx_to_edge(G) @@ -739,7 +750,7 @@ def sub_graph_edges(self, G, nodes): # it's an out edge of the subgraph oe.append(i) elif dest in nodes: - #its a in edge of the subgraph + # its a in edge of the subgraph ie.append(i) return e, ie, oe @@ -748,11 +759,12 @@ def cycle_edge_matrix(self, G): Return a cycle-edge incidence matrix, a list of list of nodes in each cycle, and a list of list of edge indexes in each cycle. """ - cycleNodes, cycleEdges = self.all_cycles(G) # call cycle finding algorithm + cycleNodes, cycleEdges = self.all_cycles(G) # call cycle finding algorithm # Create empty incidence matrix and then fill it out - ceMat = numpy.zeros((len(cycleEdges), G.number_of_edges()), - dtype=numpy.dtype(int)) + ceMat = numpy.zeros( + (len(cycleEdges), G.number_of_edges()), dtype=numpy.dtype(int) + ) for i in range(len(cycleEdges)): for e in cycleEdges[i]: ceMat[i, e] = 1 @@ -787,7 +799,7 @@ def backtrack(v, pre_key=None): adj[v].remove((si, key)) elif si == ni: f = True - cyc = list(pointStack) # copy + cyc = list(pointStack) # copy # append the original point again so we get the last edge cyc.append((si, key)) cycles.append(cyc) @@ -806,10 +818,10 @@ def backtrack(v, pre_key=None): return f i2n, adj, _ = self.adj_lists(G, multi=True) - pointStack = [] # stack of (node, key) tuples - markStack = [] # nodes that have been marked - cycles = [] # list of cycles found - mark = [False] * len(i2n) # if a node is marked + pointStack = [] # stack of (node, key) tuples + markStack = [] # nodes that have been marked + cycles = [] # list of cycles found + mark = [False] * len(i2n) # if a node is marked for ni in range(len(i2n)): # iterate over node indexes diff --git a/pyomo/network/plugins/__init__.py b/pyomo/network/plugins/__init__.py index a07cc1f2519..5e9677d2bc4 100644 --- a/pyomo/network/plugins/__init__.py +++ b/pyomo/network/plugins/__init__.py @@ -9,5 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.network.plugins.expand_arcs diff --git a/pyomo/network/plugins/expand_arcs.py b/pyomo/network/plugins/expand_arcs.py index b835079b5b4..4f6185d3173 100644 --- a/pyomo/network/plugins/expand_arcs.py +++ b/pyomo/network/plugins/expand_arcs.py @@ -10,6 +10,7 @@ # ___________________________________________________________________________ import logging + logger = logging.getLogger('pyomo.network') from pyomo.common.log import is_debug_set @@ -24,28 +25,31 @@ from pyomo.network.util import replicate_var # keyword arguments for component_objects and component_data_objects -obj_iter_kwds = dict(ctype=Arc, active=True, sort=SortComponents.deterministic, - descend_into=(Block,Disjunct)) +obj_iter_kwds = dict( + ctype=Arc, + active=True, + sort=SortComponents.deterministic, + descend_into=(Block, Disjunct), +) -@TransformationFactory.register('network.expand_arcs', - doc="Expand all Arcs in the model to simple constraints") +@TransformationFactory.register( + 'network.expand_arcs', doc="Expand all Arcs in the model to simple constraints" +) class ExpandArcs(Transformation): - def _apply_to(self, instance, **kwds): if is_debug_set(logger): logger.debug("Calling ArcExpander") # need to collect all ports to see every port each # is related to so that we can expand empty ports - port_list, known_port_sets, matched_ports = \ - self._collect_ports(instance) + port_list, known_port_sets, matched_ports = self._collect_ports(instance) self._add_blocks(instance) for port in port_list: # iterate over ref so that the index set is the same - # for all occurences of this member in related ports + # for all occurrences of this member in related ports # and so we iterate over members deterministically ref = known_port_sets[id(matched_ports[port])] for k, v in sorted(ref.items()): @@ -116,8 +120,7 @@ def _collect_ports(self, instance): # Validate all port sets and expand the empty ones known_port_sets = {} for groupID, port_set in sorted(port_groups.values()): - known_port_sets[id(port_set)] \ - = self._validate_and_expand_port_set(port_set) + known_port_sets[id(port_set)] = self._validate_and_expand_port_set(port_set) return port_list, known_port_sets, matched_ports @@ -133,16 +136,15 @@ def _validate_and_expand_port_set(self, ports): # This is an implicit var continue # OK: New var, so add it to the reference list - _len = ( - -1 if not v.is_indexed() - else len(v)) + _len = -1 if not v.is_indexed() else len(v) ref[k] = (v, _len, p, p.rule_for(k)) if not ref: logger.warning( "Cannot identify a reference port: no ports " "in the port set have assigned variables:\n\t(%s)" - % ', '.join(sorted(p.name for p in ports.values()))) + % ', '.join(sorted(p.name for p in ports.values())) + ) return ref # Now make sure that ports match @@ -159,39 +161,41 @@ def _validate_and_expand_port_set(self, ports): if k not in p.vars: raise ValueError( "Port mismatch: Port '%s' missing variable " - "'%s' (appearing in reference port '%s')" % - (p.name, k, v[2].name)) + "'%s' (appearing in reference port '%s')" + % (p.name, k, v[2].name) + ) _v = p.vars[k] if _v is None: if not p_is_partial: empty_or_partial.append(p) p_is_partial = True continue - _len = ( - -1 if not _v.is_indexed() - else len(_v)) + _len = -1 if not _v.is_indexed() else len(_v) if (_len >= 0) ^ (v[1] >= 0): raise ValueError( "Port mismatch: Port variable '%s' mixing " "indexed and non-indexed targets on ports '%s' " - "and '%s'" % - (k, v[2].name, p.name)) + "and '%s'" % (k, v[2].name, p.name) + ) if _len >= 0 and _len != v[1]: raise ValueError( "Port mismatch: Port variable '%s' index " "mismatch (%s elements in reference port '%s', " - "but %s elements in port '%s')" % - (k, v[1], v[2].name, _len, p.name)) + "but %s elements in port '%s')" + % (k, v[1], v[2].name, _len, p.name) + ) if v[1] >= 0 and len(v[0].index_set() ^ _v.index_set()): raise ValueError( "Port mismatch: Port variable '%s' has " - "mismatched indices on ports '%s' and '%s'" % - (k, v[2].name, p.name)) + "mismatched indices on ports '%s' and '%s'" + % (k, v[2].name, p.name) + ) if p.rule_for(k) is not v[3]: raise ValueError( "Port mismatch: Port variable '%s' has " - "different rules on ports '%s' and '%s'" % - (k, v[2].name, p.name)) + "different rules on ports '%s' and '%s'" + % (k, v[2].name, p.name) + ) # as we are adding things to the model, sort by key so that # the order things are added is deterministic @@ -209,7 +213,8 @@ def _validate_and_expand_port_set(self, ports): continue vname = unique_component_name( - block, '%s_auto_%s' % (p.getname(fully_qualified=True),k)) + block, '%s_auto_%s' % (p.getname(fully_qualified=True), k) + ) new_var = replicate_var(v[0], vname, block) @@ -223,7 +228,8 @@ def _add_blocks(self, instance): for arc in instance.component_objects(**obj_iter_kwds): blk = Block(arc.index_set()) bname = unique_component_name( - arc.parent_block(), "%s_expanded" % arc.local_name) + arc.parent_block(), "%s_expanded" % arc.local_name + ) arc.parent_block().add_component(bname, blk) arc._expanded_block = blk if arc.is_indexed(): diff --git a/pyomo/network/port.py b/pyomo/network/port.py index e824e977a75..4afb0e23ed0 100644 --- a/pyomo/network/port.py +++ b/pyomo/network/port.py @@ -9,27 +9,28 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = [ 'Port' ] +__all__ = ['Port'] import logging, sys from weakref import ref as weakref_ref +from pyomo.common.autoslots import AutoSlots from pyomo.common.collections import ComponentMap from pyomo.common.deprecation import RenamedClass from pyomo.common.formatting import tabular_writer from pyomo.common.log import is_debug_set from pyomo.common.modeling import unique_component_name, NOTSET +from pyomo.common.numeric_types import value from pyomo.common.timing import ConstructionTimer from pyomo.core.base.var import Var from pyomo.core.base.constraint import Constraint from pyomo.core.base.component import ComponentData, ModelComponentFactory from pyomo.core.base.global_set import UnindexedComponent_index -from pyomo.core.base.indexed_component import \ - IndexedComponent, UnindexedComponent_set +from pyomo.core.base.indexed_component import IndexedComponent, UnindexedComponent_set from pyomo.core.base.misc import apply_indexed_rule -from pyomo.core.base.numvalue import as_numeric, value -from pyomo.core.expr.current import identify_variables +from pyomo.core.expr.numvalue import as_numeric +from pyomo.core.expr import identify_variables from pyomo.core.base.label import alphanum_label_from_name from pyomo.network.util import create_var, tighten_var_domain @@ -48,6 +49,11 @@ class _PortData(ComponentData): """ __slots__ = ('vars', '_arcs', '_sources', '_dests', '_rules', '_splitfracs') + __autoslot_mappers__ = { + '_arcs': AutoSlots.weakref_sequence_mapper, + '_sources': AutoSlots.weakref_sequence_mapper, + '_dests': AutoSlots.weakref_sequence_mapper, + } def __init__(self, component=None): # @@ -55,8 +61,7 @@ def __init__(self, component=None): # following constructors: # - ComponentData # - NumericValue - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = NOTSET self.vars = {} @@ -66,34 +71,15 @@ def __init__(self, component=None): self._rules = {} self._splitfracs = ComponentMap() - def __getstate__(self): - state = super(_PortData, self).__getstate__() - for i in _PortData.__slots__: - state[i] = getattr(self, i) - - # Remove/resolve weak references - for i in ('_arcs', '_sources', '_dests'): - state[i] = [ref() for ref in state[i]] - return state - - def __setstate__(self, state): - state['_arcs'] = [weakref_ref(i) for i in state['_arcs']] - state['_sources'] = [weakref_ref(i) for i in state['_sources']] - state['_dests'] = [weakref_ref(i) for i in state['_dests']] - super(_PortData, self).__setstate__(state) - - # Note: None of the slots on this class need to be edited, so we - # don't need to implement a specialized __setstate__ method, and - # can quietly rely on the super() class's implementation. - def __getattr__(self, name): """Returns `self.vars[name]` if it exists""" if name in self.vars: return self.vars[name] # Since the base classes don't support getattr, we can just # throw the "normal" AttributeError - raise AttributeError("'%s' object has no attribute '%s'" - % (self.__class__.__name__, name)) + raise AttributeError( + "'%s' object has no attribute '%s'" % (self.__class__.__name__, name) + ) def arcs(self, active=None): """A list of Arcs in which this Port is a member""" @@ -142,18 +128,17 @@ def is_potentially_variable(self): def is_binary(self): """Return True if all variables in the Port are binary""" - return len(self) and all( - v.is_binary() for v in self.iter_vars(expr_vars=True)) + return len(self) and all(v.is_binary() for v in self.iter_vars(expr_vars=True)) def is_integer(self): """Return True if all variables in the Port are integer""" - return len(self) and all( - v.is_integer() for v in self.iter_vars(expr_vars=True)) + return len(self) and all(v.is_integer() for v in self.iter_vars(expr_vars=True)) def is_continuous(self): """Return True if all variables in the Port are continuous""" return len(self) and all( - v.is_continuous() for v in self.iter_vars(expr_vars=True)) + v.is_continuous() for v in self.iter_vars(expr_vars=True) + ) def add(self, var, name=None, rule=None, **kwds): """ @@ -183,26 +168,32 @@ def add(self, var, name=None, rule=None, **kwds): name = var.local_name if name in self.vars and self.vars[name] is not None: # don't throw warning if replacing an implicit (None) var - logger.warning("Implicitly replacing variable '%s' in Port '%s'.\n" - "To avoid this warning, use Port.remove() first." - % (name, self.name)) + logger.warning( + "Implicitly replacing variable '%s' in Port '%s'.\n" + "To avoid this warning, use Port.remove() first." % (name, self.name) + ) self.vars[name] = var if rule is None: rule = Port.Equality if rule is Port.Extensive: # avoid name collisions - if (name.endswith("_split") or name.endswith("_equality") or - name == "splitfrac"): + if ( + name.endswith("_split") + or name.endswith("_equality") + or name == "splitfrac" + ): raise ValueError( "Extensive variable '%s' on Port '%s' may not end " - "with '_split' or '_equality'" % (name, self.name)) + "with '_split' or '_equality'" % (name, self.name) + ) self._rules[name] = (rule, kwds) def remove(self, name): """Remove this member from the port""" if name not in self.vars: - raise ValueError("Cannot remove member '%s' not in Port '%s'" - % (name, self.name)) + raise ValueError( + "Cannot remove member '%s' not in Port '%s'" % (name, self.name) + ) self.vars.pop(name) self._rules.pop(name) @@ -278,8 +269,10 @@ def set_split_fraction(self, arc, val, fix=True): arc expansion when using `Port.Extensive`. """ if arc not in self.dests(): - raise ValueError("Port '%s' is not a source of Arc '%s', cannot " - "set split fraction" % (self.name, arc.name)) + raise ValueError( + "Port '%s' is not a source of Arc '%s', cannot " + "set split fraction" % (self.name, arc.name) + ) self._splitfracs[arc] = (val, fix) def get_split_fraction(self, arc): @@ -294,7 +287,9 @@ def get_split_fraction(self, arc): return res -@ModelComponentFactory.register("A bundle of variables that can be connected to other ports.") +@ModelComponentFactory.register( + "A bundle of variables that can be connected to other ports." +) class Port(IndexedComponent): """ A collection of variables, which may be connected to other ports @@ -351,9 +346,8 @@ def _getitem_when_not_present(self, idx): return tmp def construct(self, data=None): - if is_debug_set(logger): #pragma:nocover - logger.debug( "Constructing Port, name=%s, from data=%s" - % (self.name, data) ) + if is_debug_set(logger): # pragma:nocover + logger.debug("Constructing Port, name=%s, from data=%s" % (self.name, data)) if self._constructed: return @@ -372,7 +366,7 @@ def construct(self, data=None): self._rule = None self._initialize = None self._implicit = None - self._extends = None # especially important as this is another port + self._extends = None # especially important as this is another port timer.report() @@ -387,8 +381,7 @@ def _initialize_members(self, initSet): if self._initialize: self._add_from_container(tmp, self._initialize) if self._rule: - items = apply_indexed_rule( - self, self._rule, self._parent(), idx) + items = apply_indexed_rule(self, self._rule, self._parent(), idx) self._add_from_container(tmp, items) def _add_from_container(self, port, items): @@ -417,6 +410,7 @@ def _add_from_container(self, port, items): def _pprint(self, ostream=None, verbose=False): """Print component information.""" + def _line_generator(k, v): for _k, _v in sorted(v.vars.items()): if _v is None: @@ -426,12 +420,16 @@ def _line_generator(k, v): else: _len = 1 yield _k, _len, str(_v) + return ( - [("Size", len(self)), - ("Index", self._index_set if self.is_indexed() else None)], - self._data.items(), - ( "Name", "Size", "Variable"), - _line_generator) + [ + ("Size", len(self)), + ("Index", self._index_set if self.is_indexed() else None), + ], + self._data.items(), + ("Name", "Size", "Variable"), + _line_generator, + ) def display(self, prefix="", ostream=None): """ @@ -448,7 +446,8 @@ def display(self, prefix="", ostream=None): ostream.write("Size=" + str(len(self))) ostream.write("\n") - def _line_generator(k,v): + + def _line_generator(k, v): for _k, _v in sorted(v.vars.items()): if _v is None: _val = '-' @@ -456,12 +455,19 @@ def _line_generator(k,v): _val = str(value(_v)) else: _val = "{%s}" % ( - ', '.join('%r: %r' % ( - x, value(_v[x])) for x in sorted(_v._data))) + ', '.join( + '%r: %r' % (x, value(_v[x])) for x in sorted(_v._data) + ) + ) yield _k, _val - tabular_writer(ostream, prefix+tab, - ((k, v) for k, v in self._data.items()), - ("Name", "Value"), _line_generator) + + tabular_writer( + ostream, + prefix + tab, + ((k, v) for k, v in self._data.items()), + ("Name", "Value"), + _line_generator, + ) @staticmethod def Equality(port, name, index_set): @@ -472,8 +478,7 @@ def Equality(port, name, index_set): Port._add_equality_constraint(arc, name, index_set) @staticmethod - def Extensive(port, name, index_set, include_splitfrac=None, - write_var_sum=True): + def Extensive(port, name, index_set, include_splitfrac=None, write_var_sum=True): """Arc Expansion procedure for extensive variable properties This procedure is the rule to use when variable quantities should @@ -528,8 +533,13 @@ def Extensive(port, name, index_set, include_splitfrac=None, """ port_parent = port.parent_block() - out_vars = Port._Split(port, name, index_set, - include_splitfrac=include_splitfrac, write_var_sum=write_var_sum) + out_vars = Port._Split( + port, + name, + index_set, + include_splitfrac=include_splitfrac, + write_var_sum=write_var_sum, + ) in_vars = Port._Combine(port, name, index_set) @staticmethod @@ -560,22 +570,27 @@ def _Combine(port, name, index_set): # Create constraint: var == sum of evars # Same logic as Port._Split - cname = unique_component_name(port_parent, "%s_%s_insum" % - (alphanum_label_from_name(port.local_name), name)) + cname = unique_component_name( + port_parent, + "%s_%s_insum" % (alphanum_label_from_name(port.local_name), name), + ) if index_set is not UnindexedComponent_set: + def rule(m, *args): return sum(evar[args] for evar in in_vars) == var[args] + else: + def rule(m): return sum(evar for evar in in_vars) == var + con = Constraint(index_set, rule=rule) port_parent.add_component(cname, con) return in_vars @staticmethod - def _Split(port, name, index_set, include_splitfrac=None, - write_var_sum=True): + def _Split(port, name, index_set, include_splitfrac=None, write_var_sum=True): port_parent = port.parent_block() var = port.vars[name] out_vars = [] @@ -592,7 +607,8 @@ def _Split(port, name, index_set, include_splitfrac=None, if splitfracspec[0] != 1 and splitfracspec[1] == True: raise ValueError( "Cannot fix splitfrac not at 1 for port '%s' with a " - "single dest '%s'" % (port.name, dests[0].name)) + "single dest '%s'" % (port.name, dests[0].name) + ) if include_splitfrac is not True: include_splitfrac = False @@ -643,8 +659,9 @@ def _Split(port, name, index_set, include_splitfrac=None, "(found arc '%s') because this port only " "has one variable. To have control over " "splitfracs, please pass the " - " include_splitfrac=True argument." % - (port.name, arc.name)) + " include_splitfrac=True argument." + % (port.name, arc.name) + ) include_splitfrac = False continue @@ -658,11 +675,15 @@ def _Split(port, name, index_set, include_splitfrac=None, # Create constraint for this member using splitfrac. cname = "%s_split" % name if index_set is not UnindexedComponent_set: + def rule(m, *args): return evar[args] == eblock.splitfrac * var[args] + else: + def rule(m): return evar == eblock.splitfrac * var + con = Constraint(index_set, rule=rule) eblock.add_component(cname, con) @@ -672,14 +693,20 @@ def rule(m): if write_var_sum: # Create var total sum constraint: var == sum of evars # Need to alphanum port name in case it is indexed. - cname = unique_component_name(port_parent, "%s_%s_outsum" % - (alphanum_label_from_name(port.local_name), name)) + cname = unique_component_name( + port_parent, + "%s_%s_outsum" % (alphanum_label_from_name(port.local_name), name), + ) if index_set is not UnindexedComponent_set: + def rule(m, *args): return sum(evar[args] for evar in out_vars) == var[args] + else: + def rule(m): return sum(evar for evar in out_vars) == var + con = Constraint(index_set, rule=rule) port_parent.add_component(cname, con) else: @@ -690,11 +717,12 @@ def rule(m): "ports with a single destination or a single Extensive " "variable.\nSplit fractions are skipped in this case to " "simplify the model.\nPlease use write_var_sum=True on " - "this port (the default).") - cname = unique_component_name(port_parent, - "%s_frac_sum" % alphanum_label_from_name(port.local_name)) - con = Constraint(expr= - sum(a.expanded_block.splitfrac for a in dests) == 1) + "this port (the default)." + ) + cname = unique_component_name( + port_parent, "%s_frac_sum" % alphanum_label_from_name(port.local_name) + ) + con = Constraint(expr=sum(a.expanded_block.splitfrac for a in dests) == 1) port_parent.add_component(cname, con) return out_vars @@ -709,11 +737,15 @@ def _add_equality_constraint(arc, name, index_set): return port1, port2 = arc.ports if index_set is not UnindexedComponent_set: + def rule(m, *args): return port1.vars[name][args] == port2.vars[name][args] + else: + def rule(m): return port1.vars[name] == port2.vars[name] + con = Constraint(index_set, rule=rule) eblock.add_component(cname, con) @@ -728,8 +760,8 @@ def _create_evar(member, name, eblock, index_set): evar = create_var(member, name, eblock, index_set) return evar -class ScalarPort(Port, _PortData): +class ScalarPort(Port, _PortData): def __init__(self, *args, **kwd): _PortData.__init__(self, component=self) Port.__init__(self, *args, **kwd) @@ -743,4 +775,3 @@ class SimplePort(metaclass=RenamedClass): class IndexedPort(Port): pass - diff --git a/pyomo/network/tests/__init__.py b/pyomo/network/tests/__init__.py index f5881910ad7..1eb6d95e148 100644 --- a/pyomo/network/tests/__init__.py +++ b/pyomo/network/tests/__init__.py @@ -9,6 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -# +# # pyomo.network tests -# +# diff --git a/pyomo/network/tests/test_arc.py b/pyomo/network/tests/test_arc.py index fac258ca972..cd340cace7a 100644 --- a/pyomo/network/tests/test_arc.py +++ b/pyomo/network/tests/test_arc.py @@ -16,13 +16,26 @@ from io import StringIO import logging -from pyomo.environ import ConcreteModel, AbstractModel, Var, Set, Constraint, RangeSet, NonNegativeReals, Reals, Binary, TransformationFactory, Block, value +from pyomo.environ import ( + ConcreteModel, + AbstractModel, + Var, + Set, + Constraint, + RangeSet, + NonNegativeReals, + Reals, + Binary, + TransformationFactory, + Block, + value, +) from pyomo.network import Arc, Port from pyomo.core.expr.visitor import identify_variables from pyomo.common.collections.component_set import ComponentSet -class TestArc(unittest.TestCase): +class TestArc(unittest.TestCase): def test_default_scalar_constructor(self): m = ConcreteModel() m.c1 = Arc() @@ -58,7 +71,6 @@ def test_default_indexed_constructor(self): self.assertEqual(len(m.c1), 0) self.assertIs(m.c1.ctype, Arc) - inst = m.create_instance() self.assertEqual(len(m.c1), 0) self.assertIs(m.c1.ctype, Arc) @@ -122,8 +134,10 @@ def rule(m): def test_with_indexed_ports(self): def rule1(m, i): return dict(source=m.prt1[i], destination=m.prt2[i]) + def rule2(m, i): return dict(ports=(m.prt1[i], m.prt2[i])) + def rule3(m, i): # should accept any two-member iterable return (c for c in (m.prt1[i], m.prt2[i])) @@ -288,15 +302,17 @@ def friend(m, i): os = StringIO() m.friend.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""friend : Size=5, Index=s, Active=True + self.assertEqual( + os.getvalue(), + """friend : Size=5, Index=s, Active=True Key : Ports : Directed : Active 1 : (prt1[1], prt2[1]) : True : True 2 : (prt1[2], prt2[2]) : True : True 3 : (prt1[3], prt2[3]) : True : True 4 : (prt1[4], prt2[4]) : True : True 5 : (prt1[5], prt2[5]) : True : True -""") +""", + ) m = ConcreteModel() m.z = RangeSet(1, 2) @@ -311,13 +327,14 @@ def pal(m, i): os = StringIO() m.pal.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""pal : Size=2, Index=z, Active=True + self.assertEqual( + os.getvalue(), + """pal : Size=2, Index=z, Active=True Key : Ports : Directed : Active 1 : (prt1[1], prt2[1]) : False : True 2 : (prt1[2], prt2[2]) : False : False -""") - +""", + ) def test_expand_single_scalar(self): m = ConcreteModel() @@ -348,16 +365,17 @@ def test_expand_single_scalar(self): os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 1 Constraint Declarations v_equality : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active None : 0.0 : x - y : 0.0 : True 1 Declarations: v_equality -""") - +""", + ) def test_expand_scalar(self): m = ConcreteModel() @@ -389,8 +407,9 @@ def test_expand_scalar(self): os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations a_equality : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active @@ -400,8 +419,8 @@ def test_expand_scalar(self): None : 0.0 : y - w : 0.0 : True 2 Declarations: a_equality b_equality -""") - +""", + ) def test_expand_expression(self): m = ConcreteModel() @@ -433,8 +452,9 @@ def test_expand_expression(self): os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations expr1_equality : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active @@ -444,16 +464,16 @@ def test_expand_expression(self): None : 0.0 : 1 + y - (1 + w) : 0.0 : True 2 Declarations: expr1_equality expr2_equality -""") - +""", + ) def test_expand_indexed(self): m = ConcreteModel() - m.x = Var([1,2]) - m.y = Var([1,2], [1,2]) + m.x = Var([1, 2]) + m.y = Var([1, 2], [1, 2]) m.z = Var() - m.t = Var([1,2]) - m.u = Var([1,2], [1,2]) + m.t = Var([1, 2]) + m.u = Var([1, 2], [1, 2]) m.v = Var() m.prt1 = Port() m.prt1.add(m.x, "a") @@ -480,8 +500,9 @@ def test_expand_indexed(self): os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 3 Constraint Declarations a_equality : Size=2, Index=x_index, Active=True Key : Lower : Body : Upper : Active @@ -498,8 +519,8 @@ def test_expand_indexed(self): None : 0.0 : z - v : 0.0 : True 3 Declarations: a_equality b_equality c_equality -""") - +""", + ) def test_expand_trivial(self): m = ConcreteModel() @@ -523,20 +544,21 @@ def test_expand_trivial(self): os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 1 Constraint Declarations a_equality : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active None : 0.0 : x - x : 0.0 : True 1 Declarations: a_equality -""") - +""", + ) def test_expand_empty_scalar(self): m = ConcreteModel() - m.x = Var(bounds=(1,3)) + m.x = Var(bounds=(1, 3)) m.y = Var(domain=Binary) m.PRT = Port() m.PRT.add(m.x) @@ -557,15 +579,16 @@ def test_expand_empty_scalar(self): self.assertTrue(blk.component('x_equality').active) self.assertTrue(blk.component('y_equality').active) - self.assertIs( m.x.domain, m.component('EPRT_auto_x').domain ) - self.assertIs( m.y.domain, m.component('EPRT_auto_y').domain ) - self.assertEqual( m.x.bounds, m.component('EPRT_auto_x').bounds ) - self.assertEqual( m.y.bounds, m.component('EPRT_auto_y').bounds ) + self.assertIs(m.x.domain, m.component('EPRT_auto_x').domain) + self.assertIs(m.y.domain, m.component('EPRT_auto_y').domain) + self.assertEqual(m.x.bounds, m.component('EPRT_auto_x').bounds) + self.assertEqual(m.y.bounds, m.component('EPRT_auto_y').bounds) os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active @@ -575,8 +598,8 @@ def test_expand_empty_scalar(self): None : 0.0 : y - EPRT_auto_y : 0.0 : True 2 Declarations: x_equality y_equality -""") - +""", + ) def test_expand_empty_expression(self): m = ConcreteModel() @@ -603,8 +626,9 @@ def test_expand_empty_expression(self): os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active @@ -614,13 +638,13 @@ def test_expand_empty_expression(self): None : 0.0 : 1 + y - EPRT_auto_y : 0.0 : True 2 Declarations: x_equality y_equality -""") - +""", + ) def test_expand_empty_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.PRT = Port() m.PRT.add(m.x) m.PRT.add(m.y) @@ -640,17 +664,18 @@ def test_expand_empty_indexed(self): self.assertTrue(blk.component('x_equality').active) self.assertTrue(blk.component('y_equality').active) - self.assertIs( m.x[1].domain, m.component('EPRT_auto_x')[1].domain ) - self.assertIs( m.x[2].domain, m.component('EPRT_auto_x')[2].domain ) - self.assertIs( m.y.domain, m.component('EPRT_auto_y').domain ) - self.assertEqual( m.x[1].bounds, m.component('EPRT_auto_x')[1].bounds ) - self.assertEqual( m.x[2].bounds, m.component('EPRT_auto_x')[2].bounds ) - self.assertEqual( m.y.bounds, m.component('EPRT_auto_y').bounds ) + self.assertIs(m.x[1].domain, m.component('EPRT_auto_x')[1].domain) + self.assertIs(m.x[2].domain, m.component('EPRT_auto_x')[2].domain) + self.assertIs(m.y.domain, m.component('EPRT_auto_y').domain) + self.assertEqual(m.x[1].bounds, m.component('EPRT_auto_x')[1].bounds) + self.assertEqual(m.x[2].bounds, m.component('EPRT_auto_x')[2].bounds) + self.assertEqual(m.y.bounds, m.component('EPRT_auto_y').bounds) os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=2, Index=x_index, Active=True Key : Lower : Body : Upper : Active @@ -661,13 +686,13 @@ def test_expand_empty_indexed(self): None : 0.0 : y - EPRT_auto_y : 0.0 : True 2 Declarations: x_equality y_equality -""") - +""", + ) def test_expand_multiple_empty_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.PRT = Port() m.PRT.add(m.x) m.PRT.add(m.y) @@ -694,24 +719,25 @@ def test_expand_multiple_empty_indexed(self): self.assertTrue(blk_d.component('x_equality').active) self.assertTrue(blk_d.component('y_equality').active) - self.assertIs( m.x[1].domain, m.component('EPRT1_auto_x')[1].domain ) - self.assertIs( m.x[2].domain, m.component('EPRT1_auto_x')[2].domain ) - self.assertIs( m.y.domain, m.component('EPRT1_auto_y').domain ) - self.assertEqual( m.x[1].bounds, m.component('EPRT1_auto_x')[1].bounds ) - self.assertEqual( m.x[2].bounds, m.component('EPRT1_auto_x')[2].bounds ) - self.assertEqual( m.y.bounds, m.component('EPRT1_auto_y').bounds ) + self.assertIs(m.x[1].domain, m.component('EPRT1_auto_x')[1].domain) + self.assertIs(m.x[2].domain, m.component('EPRT1_auto_x')[2].domain) + self.assertIs(m.y.domain, m.component('EPRT1_auto_y').domain) + self.assertEqual(m.x[1].bounds, m.component('EPRT1_auto_x')[1].bounds) + self.assertEqual(m.x[2].bounds, m.component('EPRT1_auto_x')[2].bounds) + self.assertEqual(m.y.bounds, m.component('EPRT1_auto_y').bounds) - self.assertIs( m.x[1].domain, m.component('EPRT2_auto_x')[1].domain ) - self.assertIs( m.x[2].domain, m.component('EPRT2_auto_x')[2].domain ) - self.assertIs( m.y.domain, m.component('EPRT2_auto_y').domain ) - self.assertEqual( m.x[1].bounds, m.component('EPRT2_auto_x')[1].bounds ) - self.assertEqual( m.x[2].bounds, m.component('EPRT2_auto_x')[2].bounds ) - self.assertEqual( m.y.bounds, m.component('EPRT2_auto_y').bounds ) + self.assertIs(m.x[1].domain, m.component('EPRT2_auto_x')[1].domain) + self.assertIs(m.x[2].domain, m.component('EPRT2_auto_x')[2].domain) + self.assertIs(m.y.domain, m.component('EPRT2_auto_y').domain) + self.assertEqual(m.x[1].bounds, m.component('EPRT2_auto_x')[1].bounds) + self.assertEqual(m.x[2].bounds, m.component('EPRT2_auto_x')[2].bounds) + self.assertEqual(m.y.bounds, m.component('EPRT2_auto_y').bounds) os = StringIO() blk_c.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=2, Index=x_index, Active=True Key : Lower : Body : Upper : Active @@ -722,12 +748,14 @@ def test_expand_multiple_empty_indexed(self): None : 0.0 : y - EPRT1_auto_y : 0.0 : True 2 Declarations: x_equality y_equality -""") +""", + ) os = StringIO() blk_d.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""d_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """d_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=2, Index=x_index, Active=True Key : Lower : Body : Upper : Active @@ -738,26 +766,26 @@ def test_expand_multiple_empty_indexed(self): None : 0.0 : EPRT2_auto_y - EPRT1_auto_y : 0.0 : True 2 Declarations: x_equality y_equality -""") - +""", + ) def test_expand_multiple_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.PRT = Port() m.PRT.add(m.x) m.PRT.add(m.y) - m.a1 = Var([1,2]) - m.a2 = Var([1,2]) + m.a1 = Var([1, 2]) + m.a2 = Var([1, 2]) m.b1 = Var() m.b2 = Var() m.EPRT1 = Port() - m.EPRT1.add(m.a1,'x') - m.EPRT1.add(m.b1,'y') + m.EPRT1.add(m.a1, 'x') + m.EPRT1.add(m.b1, 'y') m.EPRT2 = Port() - m.EPRT2.add(m.a2,'x') - m.EPRT2.add(m.b2,'y') + m.EPRT2.add(m.a2, 'x') + m.EPRT2.add(m.b2, 'y') m.c = Arc(ports=(m.PRT, m.EPRT1)) m.d = Arc(ports=(m.EPRT2, m.EPRT1)) @@ -780,8 +808,9 @@ def test_expand_multiple_indexed(self): os = StringIO() blk_c.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=2, Index=x_index, Active=True Key : Lower : Body : Upper : Active @@ -792,12 +821,14 @@ def test_expand_multiple_indexed(self): None : 0.0 : y - b1 : 0.0 : True 2 Declarations: x_equality y_equality -""") +""", + ) os = StringIO() blk_d.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""d_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """d_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=2, Index=x_index, Active=True Key : Lower : Body : Upper : Active @@ -808,22 +839,22 @@ def test_expand_multiple_indexed(self): None : 0.0 : b2 - b1 : 0.0 : True 2 Declarations: x_equality y_equality -""") - +""", + ) def test_expand_implicit_indexed(self): m = ConcreteModel() - m.x = Var([1,2], domain=Binary) - m.y = Var(bounds=(1,3)) + m.x = Var([1, 2], domain=Binary) + m.y = Var(bounds=(1, 3)) m.PRT = Port() m.PRT.add(m.x) m.PRT.add(m.y) - m.a2 = Var([1,2]) + m.a2 = Var([1, 2]) m.b1 = Var() m.EPRT2 = Port(implicit=['x']) - m.EPRT2.add(m.b1,'y') + m.EPRT2.add(m.b1, 'y') m.EPRT1 = Port(implicit=['y']) - m.EPRT1.add(m.a2,'x') + m.EPRT1.add(m.a2, 'x') m.c = Arc(ports=(m.EPRT1, m.PRT)) m.d = Arc(ports=(m.EPRT2, m.PRT)) @@ -833,23 +864,27 @@ def test_expand_implicit_indexed(self): os = StringIO() m.EPRT1.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""EPRT1 : Size=1, Index=None + self.assertEqual( + os.getvalue(), + """EPRT1 : Size=1, Index=None Key : Name : Size : Variable None : x : 2 : a2 : y : - : None -""") +""", + ) TransformationFactory('network.expand_arcs').apply_to(m) os = StringIO() m.EPRT1.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""EPRT1 : Size=1, Index=None + self.assertEqual( + os.getvalue(), + """EPRT1 : Size=1, Index=None Key : Name : Size : Variable None : x : 2 : a2 : y : 1 : EPRT1_auto_y -""") +""", + ) self.assertEqual(len(list(m.component_objects(Constraint))), 4) self.assertEqual(len(list(m.component_data_objects(Constraint))), 6) @@ -864,8 +899,9 @@ def test_expand_implicit_indexed(self): os = StringIO() blk_c.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=2, Index=a2_index, Active=True Key : Lower : Body : Upper : Active @@ -876,12 +912,14 @@ def test_expand_implicit_indexed(self): None : 0.0 : EPRT1_auto_y - y : 0.0 : True 2 Declarations: x_equality y_equality -""") +""", + ) os = StringIO() blk_d.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""d_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """d_expanded : Size=1, Index=None, Active=True 2 Constraint Declarations x_equality : Size=2, Index=a2_index, Active=True Key : Lower : Body : Upper : Active @@ -892,8 +930,8 @@ def test_expand_implicit_indexed(self): None : 0.0 : b1 - y : 0.0 : True 2 Declarations: x_equality y_equality -""") - +""", + ) def test_expand_indexed_arc(self): def rule(m, i): @@ -924,8 +962,9 @@ def rule(m, i): os = StringIO() m.component('eq_expanded').pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""eq_expanded : Size=2, Index=eq_index, Active=True + self.assertEqual( + os.getvalue(), + """eq_expanded : Size=2, Index=eq_index, Active=True eq_expanded[1] : Active=True 1 Constraint Declarations v_equality : Size=1, Index=None, Active=True @@ -940,8 +979,8 @@ def rule(m, i): None : 0.0 : y - w : 0.0 : True 1 Declarations: t_equality -""") - +""", + ) def test_inactive(self): m = ConcreteModel() @@ -977,24 +1016,32 @@ def test_inactive(self): os = StringIO() blk.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""c_expanded : Size=1, Index=None, Active=True + self.assertEqual( + os.getvalue(), + """c_expanded : Size=1, Index=None, Active=True 1 Constraint Declarations v_equality : Size=1, Index=None, Active=True Key : Lower : Body : Upper : Active None : 0.0 : x - y : 0.0 : True 1 Declarations: v_equality -""") +""", + ) def test_extensive_no_splitfrac_single_var(self): m = ConcreteModel() m.x = Var() m.y = Var() m.z = Var() - m.p1 = Port(initialize={'v': (m.x, Port.Extensive, {'include_splitfrac':False})}) - m.p2 = Port(initialize={'v': (m.y, Port.Extensive, {'include_splitfrac':False})}) - m.p3 = Port(initialize={'v': (m.z, Port.Extensive, {'include_splitfrac':False})}) + m.p1 = Port( + initialize={'v': (m.x, Port.Extensive, {'include_splitfrac': False})} + ) + m.p2 = Port( + initialize={'v': (m.y, Port.Extensive, {'include_splitfrac': False})} + ) + m.p3 = Port( + initialize={'v': (m.z, Port.Extensive, {'include_splitfrac': False})} + ) m.a1 = Arc(source=m.p1, destination=m.p2) m.a2 = Arc(source=m.p1, destination=m.p3) @@ -1002,8 +1049,9 @@ def test_extensive_no_splitfrac_single_var(self): os = StringIO() m.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""3 Var Declarations + self.assertEqual( + os.getvalue(), + """3 Var Declarations x : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain None : None : None : None : False : True : Reals @@ -1061,7 +1109,8 @@ def test_extensive_no_splitfrac_single_var(self): None : v : 1 : z 13 Declarations: x y z p1 p2 p3 a1 a2 a1_expanded a2_expanded p1_v_outsum p2_v_insum p3_v_insum -""") +""", + ) def test_extensive_single_var(self): m = ConcreteModel() @@ -1078,8 +1127,9 @@ def test_extensive_single_var(self): os = StringIO() m.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""3 Var Declarations + self.assertEqual( + os.getvalue(), + """3 Var Declarations x : Size=1, Index=None Key : Lower : Value : Upper : Fixed : Stale : Domain None : None : None : None : False : True : Reals @@ -1137,7 +1187,8 @@ def test_extensive_single_var(self): None : v : 1 : z 13 Declarations: x y z p1 p2 p3 a1 a2 a1_expanded a2_expanded p1_v_outsum p2_v_insum p3_v_insum -""") +""", + ) def test_extensive_no_splitfrac_expansion(self): m = ConcreteModel() @@ -1149,11 +1200,17 @@ def test_extensive_no_splitfrac_expansion(self): def source_block(b): b.p_out = Var(b.model().time) - b.outlet = Port(initialize={'p': (b.p_out, Port.Extensive, {'include_splitfrac':False})}) + b.outlet = Port( + initialize={ + 'p': (b.p_out, Port.Extensive, {'include_splitfrac': False}) + } + ) def load_block(b): b.p_in = Var(b.model().time) - b.inlet = Port(initialize={'p': (b.p_in, Port.Extensive, {'include_splitfrac':False})}) + b.inlet = Port( + initialize={'p': (b.p_in, Port.Extensive, {'include_splitfrac': False})} + ) source_block(m.source) load_block(m.load1) @@ -1305,18 +1362,26 @@ def test_extensive_expansion(self): m.node1.mass = Var() m.node1.temp = Var() - m.node1.port = Port(initialize=[(m.node1.flow, Port.Extensive), - (m.node1.mass, Port.Extensive), - m.node1.temp]) + m.node1.port = Port( + initialize=[ + (m.node1.flow, Port.Extensive), + (m.node1.mass, Port.Extensive), + m.node1.temp, + ] + ) m.node2 = Block() m.node2.flow = Var(m.comp, domain=NonNegativeReals) m.node2.mass = Var() m.node2.temp = Var() - m.node2.port = Port(initialize=[(m.node2.flow, Port.Extensive), - (m.node2.mass, Port.Extensive), - m.node2.temp]) + m.node2.port = Port( + initialize=[ + (m.node2.flow, Port.Extensive), + (m.node2.mass, Port.Extensive), + m.node2.temp, + ] + ) # Port with multiple inlets and outlets m.multi = Block() @@ -1324,9 +1389,13 @@ def test_extensive_expansion(self): m.multi.mass = Var() m.multi.temp = Var() - m.multi.port = Port(initialize=[(m.multi.flow, Port.Extensive), - (m.multi.mass, Port.Extensive), - m.multi.temp]) + m.multi.port = Port( + initialize=[ + (m.multi.flow, Port.Extensive), + (m.multi.mass, Port.Extensive), + m.multi.temp, + ] + ) # Product m.prod = Block() @@ -1353,7 +1422,7 @@ def test_extensive_expansion(self): m.stream10 = Arc(source=m.multi.port, destination=m.tru.inlet) # SplitFrac specifications - m.feed.outlet.set_split_fraction(m.stream1, .6, fix=True) + m.feed.outlet.set_split_fraction(m.stream1, 0.6, fix=True) m.stream0.deactivate() @@ -1361,8 +1430,9 @@ def test_extensive_expansion(self): os = StringIO() m.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""1 Set Declarations + self.assertEqual( + os.getvalue(), + """1 Set Declarations comp : Size=1, Index=None, Ordered=Insertion Key : Dimen : Domain : Size : Members None : 1 : Any : 3 : {'a', 'b', 'c'} @@ -1877,7 +1947,8 @@ def test_extensive_expansion(self): None : (multi.port, prod.inlet) : True : False 28 Declarations: comp feed tru node1 node2 multi prod stream0 stream1 stream2 stream3 stream4 stream5 stream6 stream7 stream8 stream9 stream10 stream1_expanded stream2_expanded stream3_expanded stream4_expanded stream5_expanded stream6_expanded stream7_expanded stream8_expanded stream9_expanded stream10_expanded -""") +""", + ) def test_clone(self): m = ConcreteModel() diff --git a/pyomo/network/tests/test_decomposition.py b/pyomo/network/tests/test_decomposition.py index a0f9a803478..4e4d0231d00 100644 --- a/pyomo/network/tests/test_decomposition.py +++ b/pyomo/network/tests/test_decomposition.py @@ -15,7 +15,17 @@ import pyomo.common.unittest as unittest from pyomo.common.dependencies import numpy_available, networkx_available -from pyomo.environ import SolverFactory, value, ConcreteModel, Set, Block, Var, TransformationFactory, Reference, Constraint +from pyomo.environ import ( + SolverFactory, + value, + ConcreteModel, + Set, + Block, + Var, + TransformationFactory, + Reference, + Constraint, +) from pyomo.network import Port, SequentialDecomposition, Arc from pyomo.gdp.tests.models import makeExpandedNetworkDisjunction from types import MethodType @@ -25,10 +35,10 @@ glpk_available = SolverFactory('glpk').available(exception_flag=False) ipopt_available = SolverFactory('ipopt').available(exception_flag=False) + @unittest.skipIf(not import_available, "numpy or networkx not available") class TestSequentialDecomposition(unittest.TestCase): - - def is_converged(self, arc, rel=False, tol=1.0E-5): + def is_converged(self, arc, rel=False, tol=1.0e-5): eblock = arc.expanded_block for name in arc.src.vars: if arc.src.vars[name].is_indexed(): @@ -71,7 +81,7 @@ def is_converged(self, arc, rel=False, tol=1.0E-5): return True - def intensive_equal(self, port, tol=1.0E-5, **kwds): + def intensive_equal(self, port, tol=1.0e-5, **kwds): for name in kwds: if port.vars[name].is_indexed(): for i in kwds[name]: @@ -99,6 +109,7 @@ def simple_recycle_model(self): m.feed.pressure_out = Var() m.feed.expr_var_idx_out = Var(m.comps) + @m.feed.Expression(m.comps) def expr_idx_out(b, i): return -b.expr_var_idx_out[i] @@ -108,9 +119,13 @@ def expr_idx_out(b, i): @m.feed.Port() def outlet(b): - return dict(flow=b.flow_out, temperature=b.temperature_out, - pressure=b.pressure_out, expr_idx=b.expr_idx_out, - expr=b.expr_out) + return dict( + flow=b.flow_out, + temperature=b.temperature_out, + pressure=b.pressure_out, + expr_idx=b.expr_idx_out, + expr=b.expr_out, + ) def initialize_feed(self): pass @@ -125,6 +140,7 @@ def initialize_feed(self): m.mixer.pressure_in_side_1 = Var() m.mixer.expr_var_idx_in_side_1 = Var(m.comps) + @m.mixer.Expression(m.comps) def expr_idx_in_side_1(b, i): return -b.expr_var_idx_in_side_1[i] @@ -137,6 +153,7 @@ def expr_idx_in_side_1(b, i): m.mixer.pressure_in_side_2 = Var() m.mixer.expr_var_idx_in_side_2 = Var(m.comps) + @m.mixer.Expression(m.comps) def expr_idx_in_side_2(b, i): return -b.expr_var_idx_in_side_2[i] @@ -149,6 +166,7 @@ def expr_idx_in_side_2(b, i): m.mixer.pressure_out = Var() m.mixer.expr_var_idx_out = Var(m.comps) + @m.mixer.Expression(m.comps) def expr_idx_out(b, i): return -b.expr_var_idx_out[i] @@ -158,38 +176,46 @@ def expr_idx_out(b, i): @m.mixer.Port() def inlet_side_1(b): - return dict(flow=b.flow_in_side_1, + return dict( + flow=b.flow_in_side_1, temperature=b.temperature_in_side_1, pressure=b.pressure_in_side_1, expr_idx=b.expr_idx_in_side_1, - expr=b.expr_in_side_1) + expr=b.expr_in_side_1, + ) @m.mixer.Port() def inlet_side_2(b): - return dict(flow=b.flow_in_side_2, + return dict( + flow=b.flow_in_side_2, temperature=b.temperature_in_side_2, pressure=b.pressure_in_side_2, expr_idx=b.expr_idx_in_side_2, - expr=b.expr_in_side_2) + expr=b.expr_in_side_2, + ) @m.mixer.Port() def outlet(b): - return dict(flow=b.flow_out, + return dict( + flow=b.flow_out, temperature=b.temperature_out, pressure=b.pressure_out, expr_idx=b.expr_idx_out, - expr=b.expr_out) + expr=b.expr_out, + ) def initialize_mixer(self): for i in self.flow_out: - self.flow_out[i].value = \ - value(self.flow_in_side_1[i] + self.flow_in_side_2[i]) + self.flow_out[i].value = value( + self.flow_in_side_1[i] + self.flow_in_side_2[i] + ) for i in self.expr_var_idx_out: - self.expr_var_idx_out[i].value = \ - value(self.expr_var_idx_in_side_1[i] + - self.expr_var_idx_in_side_2[i]) - self.expr_var_out.value = \ - value(self.expr_var_in_side_1 + self.expr_var_in_side_2) + self.expr_var_idx_out[i].value = value( + self.expr_var_idx_in_side_1[i] + self.expr_var_idx_in_side_2[i] + ) + self.expr_var_out.value = value( + self.expr_var_in_side_1 + self.expr_var_in_side_2 + ) assert self.temperature_in_side_1.value == self.temperature_in_side_2.value self.temperature_out.value = value(self.temperature_in_side_1) assert self.pressure_in_side_1.value == self.pressure_in_side_2.value @@ -205,6 +231,7 @@ def initialize_mixer(self): m.unit.pressure_in = Var() m.unit.expr_var_idx_in = Var(m.comps) + @m.unit.Expression(m.comps) def expr_idx_in(b, i): return -b.expr_var_idx_in[i] @@ -217,6 +244,7 @@ def expr_idx_in(b, i): m.unit.pressure_out = Var() m.unit.expr_var_idx_out = Var(m.comps) + @m.unit.Expression(m.comps) def expr_idx_out(b, i): return -b.expr_var_idx_out[i] @@ -226,15 +254,23 @@ def expr_idx_out(b, i): @m.unit.Port() def inlet(b): - return dict(flow=b.flow_in, temperature=b.temperature_in, - pressure=b.pressure_in, expr_idx=b.expr_idx_in, - expr=b.expr_in) + return dict( + flow=b.flow_in, + temperature=b.temperature_in, + pressure=b.pressure_in, + expr_idx=b.expr_idx_in, + expr=b.expr_in, + ) @m.unit.Port() def outlet(b): - return dict(flow=b.flow_out, temperature=b.temperature_out, - pressure=b.pressure_out, expr_idx=b.expr_idx_out, - expr=b.expr_out) + return dict( + flow=b.flow_out, + temperature=b.temperature_out, + pressure=b.pressure_out, + expr_idx=b.expr_idx_out, + expr=b.expr_out, + ) def initialize_unit(self): for i in self.flow_out: @@ -253,10 +289,12 @@ def initialize_unit(self): @m.splitter.Block(m.comps) def flow_in(b, i): b.flow = Var() + m.splitter.temperature_in = Var() m.splitter.pressure_in = Var() m.splitter.expr_var_idx_in = Var(m.comps) + @m.splitter.Expression(m.comps) def expr_idx_in(b, i): return -b.expr_var_idx_in[i] @@ -269,6 +307,7 @@ def expr_idx_in(b, i): m.splitter.pressure_out_side_1 = Var() m.splitter.expr_var_idx_out_side_1 = Var(m.comps) + @m.splitter.Expression(m.comps) def expr_idx_out_side_1(b, i): return -b.expr_var_idx_out_side_1[i] @@ -281,6 +320,7 @@ def expr_idx_out_side_1(b, i): m.splitter.pressure_out_side_2 = Var() m.splitter.expr_var_idx_out_side_2 = Var(m.comps) + @m.splitter.Expression(m.comps) def expr_idx_out_side_2(b, i): return -b.expr_var_idx_out_side_2[i] @@ -290,41 +330,47 @@ def expr_idx_out_side_2(b, i): @m.splitter.Port() def inlet(b): - return dict(flow=Reference(b.flow_in[:].flow), + return dict( + flow=Reference(b.flow_in[:].flow), temperature=b.temperature_in, pressure=b.pressure_in, expr_idx=b.expr_idx_in, - expr=b.expr_in) + expr=b.expr_in, + ) @m.splitter.Port() def outlet_side_1(b): - return dict(flow=b.flow_out_side_1, + return dict( + flow=b.flow_out_side_1, temperature=b.temperature_out_side_1, pressure=b.pressure_out_side_1, expr_idx=b.expr_idx_out_side_1, - expr=b.expr_out_side_1) + expr=b.expr_out_side_1, + ) @m.splitter.Port() def outlet_side_2(b): - return dict(flow=b.flow_out_side_2, + return dict( + flow=b.flow_out_side_2, temperature=b.temperature_out_side_2, pressure=b.pressure_out_side_2, expr_idx=b.expr_idx_out_side_2, - expr=b.expr_out_side_2) + expr=b.expr_out_side_2, + ) def initialize_splitter(self): recycle = 0.1 prod = 1 - recycle for i in self.flow_in: - self.flow_out_side_1[i].value \ - = prod * value(self.flow_in[i].flow) - self.flow_out_side_2[i].value \ - = recycle * value(self.flow_in[i].flow) + self.flow_out_side_1[i].value = prod * value(self.flow_in[i].flow) + self.flow_out_side_2[i].value = recycle * value(self.flow_in[i].flow) for i in self.expr_var_idx_in: - self.expr_var_idx_out_side_1[i].value = \ - prod * value(self.expr_var_idx_in[i]) - self.expr_var_idx_out_side_2[i].value = \ - recycle * value(self.expr_var_idx_in[i]) + self.expr_var_idx_out_side_1[i].value = prod * value( + self.expr_var_idx_in[i] + ) + self.expr_var_idx_out_side_2[i].value = recycle * value( + self.expr_var_idx_in[i] + ) self.expr_var_out_side_1.value = prod * value(self.expr_var_in) self.expr_var_out_side_2.value = recycle * value(self.expr_var_in) self.temperature_out_side_1.value = value(self.temperature_in) @@ -346,9 +392,13 @@ def initialize_splitter(self): @m.prod.Port() def inlet(b): - return dict(flow=b.flow_in, temperature=b.temperature_in, - pressure=b.pressure_in, expr_idx=b.actual_var_idx_in, - expr=b.actual_var_in) + return dict( + flow=b.flow_in, + temperature=b.temperature_in, + pressure=b.pressure_in, + expr_idx=b.actual_var_idx_in, + expr=b.actual_var_in, + ) def initialize_prod(self): pass @@ -399,14 +449,14 @@ def simple_recycle_run(self, tear_method, tol_type): def function(unit): unit.initialize() - seq = SequentialDecomposition(tear_method=tear_method, - tol_type=tol_type) + seq = SequentialDecomposition(tear_method=tear_method, tol_type=tol_type) tset = [m.stream_splitter_to_mixer] seq.set_tear_set(tset) splitter_to_mixer_guess = { "flow": {"A": 0, "B": 0, "C": 0}, "temperature": 450, - "pressure": 128} + "pressure": 128, + } seq.set_guesses_for(m.mixer.inlet_side_2, splitter_to_mixer_guess) # need to set guesses for expression members by initializing those vars m.mixer.expr_var_idx_in_side_2["A"] = 0 @@ -425,6 +475,7 @@ def build_in_out(b): b.pressure_in = Var() b.expr_var_idx_in = Var(m.comps) + @b.Expression(m.comps) def expr_idx_in(b, i): return -b.expr_var_idx_in[i] @@ -438,6 +489,7 @@ def expr_idx_in(b, i): b.pressure_out = Var() b.expr_var_idx_out = Var(m.comps) + @b.Expression(m.comps) def expr_idx_out(b, i): return -b.expr_var_idx_out[i] @@ -451,18 +503,24 @@ def expr_idx_out(b, i): b.initialize = MethodType(initialize, b) def inlet(b): - return dict(flow=(b.flow_in, Port.Extensive), + return dict( + flow=(b.flow_in, Port.Extensive), mass=(b.mass_in, Port.Extensive), - temperature=b.temperature_in, pressure=b.pressure_in, + temperature=b.temperature_in, + pressure=b.pressure_in, expr_idx=(b.expr_idx_in, Port.Extensive), - expr=(b.expr_in, Port.Extensive)) + expr=(b.expr_in, Port.Extensive), + ) def outlet(b): - return dict(flow=(b.flow_out, Port.Extensive), + return dict( + flow=(b.flow_out, Port.Extensive), mass=(b.mass_out, Port.Extensive), - temperature=b.temperature_out, pressure=b.pressure_out, + temperature=b.temperature_out, + pressure=b.pressure_out, expr_idx=(b.expr_idx_out, Port.Extensive), - expr=(b.expr_out, Port.Extensive)) + expr=(b.expr_out, Port.Extensive), + ) def initialize(self): for i in self.flow_out: @@ -489,6 +547,7 @@ def nop(self): m.feed.pressure_out = Var() m.feed.expr_var_idx_out = Var(m.comps) + @m.feed.Expression(m.comps) def expr_idx_out(b, i): return -b.expr_var_idx_out[i] @@ -525,11 +584,14 @@ def expr_idx_out(b, i): @m.prod.Port() def inlet(b): - return dict(flow=(b.flow_in, Port.Extensive), + return dict( + flow=(b.flow_in, Port.Extensive), mass=(b.mass_in, Port.Extensive), - temperature=b.temperature_in, pressure=b.pressure_in, + temperature=b.temperature_in, + pressure=b.pressure_in, expr_idx=(b.actual_var_idx_in, Port.Extensive), - expr=(b.actual_var_in, Port.Extensive)) + expr=(b.actual_var_in, Port.Extensive), + ) m.prod.initialize = MethodType(nop, m.prod) @@ -584,21 +646,25 @@ def extensive_recycle_run(self, tear_method, tol_type): def function(unit): unit.initialize() - seq = SequentialDecomposition(tear_method=tear_method, - tol_type=tol_type) + seq = SequentialDecomposition(tear_method=tear_method, tol_type=tol_type) tset = [m.stream_splitter_to_mixer] seq.set_tear_set(tset) splitter_to_mixer_guess = { - "flow": {"A": [(m.stream_splitter_to_mixer, 0)], - "B": [(m.stream_splitter_to_mixer, 0)], - "C": [(m.stream_splitter_to_mixer, 0)]}, + "flow": { + "A": [(m.stream_splitter_to_mixer, 0)], + "B": [(m.stream_splitter_to_mixer, 0)], + "C": [(m.stream_splitter_to_mixer, 0)], + }, "mass": [(m.stream_splitter_to_mixer, 0)], - "expr_idx": {"A": [(m.stream_splitter_to_mixer, 0)], - "B": [(m.stream_splitter_to_mixer, 0)], - "C": [(m.stream_splitter_to_mixer, 0)]}, + "expr_idx": { + "A": [(m.stream_splitter_to_mixer, 0)], + "B": [(m.stream_splitter_to_mixer, 0)], + "C": [(m.stream_splitter_to_mixer, 0)], + }, "expr": [(m.stream_splitter_to_mixer, 0)], "temperature": 450, - "pressure": 128} + "pressure": 128, + } seq.set_guesses_for(m.mixer.inlet, splitter_to_mixer_guess) seq.run(m, function) @@ -607,84 +673,73 @@ def function(unit): if rel: s = value(m.prod.inlet.mass) d = value(m.feed.outlet.mass) - self.assertAlmostEqual( - (s - d) / s, 0, - places=5) + self.assertAlmostEqual((s - d) / s, 0, places=5) else: self.assertAlmostEqual( - value(m.prod.inlet.mass), - value(m.feed.outlet.mass), - places=5) + value(m.prod.inlet.mass), value(m.feed.outlet.mass), places=5 + ) def check_recycle_model(self, m, rel=False): for arc in m.component_data_objects(Arc): self.assertTrue(self.is_converged(arc, rel=rel)) for port in m.component_data_objects(Port): - self.assertTrue(self.intensive_equal( - port, - temperature=value(m.feed.outlet.temperature), - pressure=value(m.feed.outlet.pressure))) + self.assertTrue( + self.intensive_equal( + port, + temperature=value(m.feed.outlet.temperature), + pressure=value(m.feed.outlet.pressure), + ) + ) if rel: # in == out for i in m.feed.outlet.flow: s = value(m.prod.inlet.flow[i]) d = value(m.feed.outlet.flow[i]) - self.assertAlmostEqual( - (s - d) / s, 0, - places=5) + self.assertAlmostEqual((s - d) / s, 0, places=5) for i in m.feed.outlet.expr_idx: s = value(m.prod.inlet.expr_idx[i]) d = value(m.feed.outlet.expr_idx[i]) - self.assertAlmostEqual( - (s - d) / s, 0, - places=5) + self.assertAlmostEqual((s - d) / s, 0, places=5) s = value(m.prod.inlet.expr) d = value(m.feed.outlet.expr) - self.assertAlmostEqual( - (s - d) / s, 0, - places=5) + self.assertAlmostEqual((s - d) / s, 0, places=5) # check the expressions work, should be negative in prod for i in m.feed.outlet.expr_idx: s = value(-m.prod.actual_var_idx_in[i]) d = value(m.feed.expr_var_idx_out[i]) - self.assertAlmostEqual( - (s - d) / s, 0, - places=5) + self.assertAlmostEqual((s - d) / s, 0, places=5) s = value(-m.prod.actual_var_in) d = value(m.feed.expr_var_out) - self.assertAlmostEqual( - (s - d) / s, 0, - places=5) + self.assertAlmostEqual((s - d) / s, 0, places=5) else: # in == out for i in m.feed.outlet.flow: self.assertAlmostEqual( - value(m.prod.inlet.flow[i]), - value(m.feed.outlet.flow[i]), - places=5) + value(m.prod.inlet.flow[i]), value(m.feed.outlet.flow[i]), places=5 + ) for i in m.feed.outlet.expr_idx: self.assertAlmostEqual( value(m.prod.inlet.expr_idx[i]), value(m.feed.outlet.expr_idx[i]), - places=5) + places=5, + ) self.assertAlmostEqual( - value(m.prod.inlet.expr), - value(m.feed.outlet.expr), - places=5) + value(m.prod.inlet.expr), value(m.feed.outlet.expr), places=5 + ) # check the expressions work, should be negative in prod for i in m.feed.outlet.expr_idx: self.assertAlmostEqual( value(-m.prod.actual_var_idx_in[i]), value(m.feed.expr_var_idx_out[i]), - places=5) + places=5, + ) self.assertAlmostEqual( - value(-m.prod.actual_var_in), - value(m.feed.expr_var_out), - places=5) + value(-m.prod.actual_var_in), value(m.feed.expr_var_out), places=5 + ) def test_simple_recycle_direct_abs(self): self.simple_recycle_run(tear_method="Direct", tol_type="abs") @@ -723,8 +778,11 @@ def test_tear_selection(self): all_tsets = [] for tset in heu_result[0]: all_tsets.append(seq.indexes_to_arcs(G, tset)) - for arc in (m.stream_mixer_to_unit, m.stream_unit_to_splitter, - m.stream_splitter_to_mixer): + for arc in ( + m.stream_mixer_to_unit, + m.stream_unit_to_splitter, + m.stream_splitter_to_mixer, + ): self.assertIn([arc], all_tsets) tset_mip = seq.tear_set_arcs(G, "mip", solver="glpk") @@ -746,7 +804,8 @@ def function(unit): splitter_to_mixer_guess = { "flow": {"A": 0, "B": 0, "C": 0}, "temperature": 450, - "pressure": 128} + "pressure": 128, + } seq.set_guesses_for(m.mixer.inlet_side_2, splitter_to_mixer_guess) # need to set guesses for expression members by initializing those vars m.mixer.expr_var_idx_in_side_2["A"] = 0 @@ -758,13 +817,13 @@ def function(unit): # we shouldn't need to know which streams are torn since everything # should already have values set so we don't need guesses, but we # just make sure it is able to select a tear set on its own - seq = SequentialDecomposition(tear_solver="glpk", - select_tear_method="mip") + seq = SequentialDecomposition(tear_solver="glpk", select_tear_method="mip") seq.run(m, function) self.check_recycle_model(m) - seq = SequentialDecomposition(tear_solver="glpk", - select_tear_method="heuristic") + seq = SequentialDecomposition( + tear_solver="glpk", select_tear_method="heuristic" + ) seq.run(m, function) self.check_recycle_model(m) @@ -786,14 +845,15 @@ def initializer(blk): seq = SequentialDecomposition(select_tear_method="heuristic", default_guess=0.5) seq.run(m, initializer) if blue_on: - self.assertAlmostEqual( value(m.dest.x), 0.84) + self.assertAlmostEqual(value(m.dest.x), 0.84) else: - self.assertAlmostEqual( value(m.dest.x), 0.42) + self.assertAlmostEqual(value(m.dest.x), 0.42) @unittest.skipIf(not ipopt_available, "ipopt solver not available") def test_fixed_disjuncts(self): self._test_disjuncts(True) self._test_disjuncts(False) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/network/tests/test_port.py b/pyomo/network/tests/test_port.py index 36403dd1985..bc9a6fc527f 100644 --- a/pyomo/network/tests/test_port.py +++ b/pyomo/network/tests/test_port.py @@ -15,11 +15,21 @@ import pyomo.common.unittest as unittest from io import StringIO -from pyomo.environ import ConcreteModel, AbstractModel, Var, Set, NonNegativeReals, Binary, Reals, Integers, RangeSet +from pyomo.environ import ( + ConcreteModel, + AbstractModel, + Var, + Set, + NonNegativeReals, + Binary, + Reals, + Integers, + RangeSet, +) from pyomo.network import Port, Arc -class TestPort(unittest.TestCase): +class TestPort(unittest.TestCase): def test_default_scalar_constructor(self): model = ConcreteModel() model.c = Port() @@ -31,7 +41,7 @@ def test_default_scalar_constructor(self): self.assertEqual(len(model.c), 0) # FIXME: Not sure I like this behavior: but since this is # (currently) an attribute, there is no way to check for - # construction withough converting it to a property. + # construction without converting it to a property. # # TODO: if we move away from multiple inheritance for # simplevars, then this can trigger an exception (cleanly) @@ -59,9 +69,9 @@ def test_default_indexed_constructor(self): def test_add_scalar_vars(self): pipe = ConcreteModel() pipe.flow = Var() - pipe.pIn = Var( within=NonNegativeReals ) - pipe.pOut = Var( within=NonNegativeReals ) - + pipe.pIn = Var(within=NonNegativeReals) + pipe.pOut = Var(within=NonNegativeReals) + pipe.OUT = Port() pipe.OUT.add(pipe.flow, "flow") pipe.OUT.add(pipe.pOut, "pressure") @@ -75,13 +85,13 @@ def test_add_scalar_vars(self): self.assertEqual(len(pipe.IN), 1) self.assertEqual(len(pipe.IN.vars), 2) self.assertTrue(pipe.IN.vars['flow'].is_expression_type()) - + def test_add_indexed_vars(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Port() pipe.OUT.add(pipe.flow, "flow") @@ -93,41 +103,41 @@ def test_add_indexed_vars(self): def test_fixed(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Port() - self.assertTrue( pipe.OUT.is_fixed()) + self.assertTrue(pipe.OUT.is_fixed()) pipe.OUT.add(pipe.flow, "flow") - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.flow.fix(0) - self.assertTrue( pipe.OUT.is_fixed()) + self.assertTrue(pipe.OUT.is_fixed()) pipe.OUT.add(-pipe.pIn, "pressure") - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.pIn.fix(1) - self.assertTrue( pipe.OUT.is_fixed()) + self.assertTrue(pipe.OUT.is_fixed()) pipe.OUT.add(pipe.composition, "composition") - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.composition['a'].fix(1) - self.assertFalse( pipe.OUT.is_fixed()) + self.assertFalse(pipe.OUT.is_fixed()) pipe.composition['b'].fix(1) pipe.composition['c'].fix(1) - self.assertTrue( pipe.OUT.is_fixed()) + self.assertTrue(pipe.OUT.is_fixed()) m = ConcreteModel() - m.SPECIES = Set(initialize=['a','b','c']) + m.SPECIES = Set(initialize=['a', 'b', 'c']) m.flow = Var() m.composition = Var(m.SPECIES) - m.pIn = Var( within=NonNegativeReals ) + m.pIn = Var(within=NonNegativeReals) m.port = Port() m.port.add(m.flow, "flow") @@ -138,47 +148,47 @@ def test_fixed(self): def test_polynomial_degree(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Port() - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.OUT.add(pipe.flow, "flow") - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.flow.fix(0) - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.OUT.add(-pipe.pIn, "pressure") - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.pIn.fix(1) - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.OUT.add(pipe.composition, "composition") - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.composition['a'].fix(1) - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.composition['b'].fix(1) pipe.composition['c'].fix(1) - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + self.assertEqual(pipe.OUT.polynomial_degree(), 0) - pipe.OUT.add(pipe.flow*pipe.pIn, "quadratic") - self.assertEqual( pipe.OUT.polynomial_degree(), 0) + pipe.OUT.add(pipe.flow * pipe.pIn, "quadratic") + self.assertEqual(pipe.OUT.polynomial_degree(), 0) pipe.flow.unfix() - self.assertEqual( pipe.OUT.polynomial_degree(), 1) + self.assertEqual(pipe.OUT.polynomial_degree(), 1) pipe.pIn.unfix() - self.assertEqual( pipe.OUT.polynomial_degree(), 2) + self.assertEqual(pipe.OUT.polynomial_degree(), 2) - pipe.OUT.add(pipe.flow/pipe.pIn, "nonLin") - self.assertEqual( pipe.OUT.polynomial_degree(), None) + pipe.OUT.add(pipe.flow / pipe.pIn, "nonLin") + self.assertEqual(pipe.OUT.polynomial_degree(), None) def test_potentially_variable(self): m = ConcreteModel() @@ -346,8 +356,9 @@ def test_add_from_containers(self): m.p1 = Port(initialize=[m.x, m.y]) m.p2 = Port(initialize=[(m.x, Port.Equality), (m.y, Port.Extensive)]) m.p3 = Port(initialize=dict(this=m.x, that=m.y)) - m.p4 = Port(initialize=dict(this=(m.x, Port.Equality), - that=(m.y, Port.Extensive))) + m.p4 = Port( + initialize=dict(this=(m.x, Port.Equality), that=(m.y, Port.Extensive)) + ) self.assertIs(m.p1.x, m.x) self.assertIs(m.p1.y, m.y) @@ -455,10 +466,10 @@ def contains(item, container): def test_pprint(self): pipe = ConcreteModel() - pipe.SPECIES = Set(initialize=['a','b','c']) + pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var() pipe.composition = Var(pipe.SPECIES) - pipe.pIn = Var( within=NonNegativeReals ) + pipe.pIn = Var(within=NonNegativeReals) pipe.OUT = Port(implicit=['imp']) pipe.OUT.add(-pipe.flow, "flow") @@ -468,25 +479,27 @@ def test_pprint(self): os = StringIO() pipe.OUT.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""OUT : Size=1, Index=None + self.assertEqual( + os.getvalue(), + """OUT : Size=1, Index=None Key : Name : Size : Variable None : comp_a : 1 : composition[a] : composition : 3 : composition : flow : 1 : - flow : imp : - : None : pressure : 1 : pIn -""") +""", + ) def _IN(m, i): - return { 'pressure': pipe.pIn, - 'flow': pipe.composition[i] * pipe.flow } + return {'pressure': pipe.pIn, 'flow': pipe.composition[i] * pipe.flow} pipe.IN = Port(pipe.SPECIES, rule=_IN) os = StringIO() pipe.IN.pprint(ostream=os) - self.assertEqual(os.getvalue(), -"""IN : Size=3, Index=SPECIES + self.assertEqual( + os.getvalue(), + """IN : Size=3, Index=SPECIES Key : Name : Size : Variable a : flow : 1 : composition[a]*flow : pressure : 1 : pIn @@ -494,15 +507,15 @@ def _IN(m, i): : pressure : 1 : pIn c : flow : 1 : composition[c]*flow : pressure : 1 : pIn -""") - +""", + ) + def test_display(self): pipe = ConcreteModel() pipe.SPECIES = Set(initialize=['a', 'b', 'c']) pipe.flow = Var(initialize=10) - pipe.composition = Var( pipe.SPECIES, - initialize=lambda m,i: ord(i)-ord('a') ) - pipe.pIn = Var( within=NonNegativeReals, initialize=3.14 ) + pipe.composition = Var(pipe.SPECIES, initialize=lambda m, i: ord(i) - ord('a')) + pipe.pIn = Var(within=NonNegativeReals, initialize=3.14) pipe.OUT = Port(implicit=['imp']) pipe.OUT.add(-pipe.flow, "flow") @@ -511,24 +524,26 @@ def test_display(self): os = StringIO() pipe.OUT.display(ostream=os) - self.assertEqual(os.getvalue(), -"""OUT : Size=1 + self.assertEqual( + os.getvalue(), + """OUT : Size=1 Key : Name : Value None : composition : {'a': 0, 'b': 1, 'c': 2} : flow : -10 : imp : - : pressure : 3.14 -""") +""", + ) def _IN(m, i): - return { 'pressure': pipe.pIn, - 'flow': pipe.composition[i] * pipe.flow } + return {'pressure': pipe.pIn, 'flow': pipe.composition[i] * pipe.flow} pipe.IN = Port(pipe.SPECIES, rule=_IN) os = StringIO() pipe.IN.display(ostream=os) - self.assertEqual(os.getvalue(), -"""IN : Size=3 + self.assertEqual( + os.getvalue(), + """IN : Size=3 Key : Name : Value a : flow : 0 : pressure : 3.14 @@ -536,7 +551,8 @@ def _IN(m, i): : pressure : 3.14 c : flow : 20 : pressure : 3.14 -""") +""", + ) if __name__ == "__main__": diff --git a/pyomo/network/util.py b/pyomo/network/util.py index 43a1d0c2b6f..be0fa2c84d1 100644 --- a/pyomo/network/util.py +++ b/pyomo/network/util.py @@ -12,6 +12,7 @@ from pyomo.core import Var from pyomo.core.base.indexed_component import UnindexedComponent_set + def create_var(comp, name, block, index_set=None): if index_set is None: if comp.is_indexed(): @@ -23,6 +24,7 @@ def create_var(comp, name, block, index_set=None): block.add_component(name, new_var) return new_var + def _tighten(src, dest): starting_lb = dest.lb starting_ub = dest.ub @@ -39,6 +41,7 @@ def _tighten(src, dest): else: dest.setub(min(starting_ub, src.ub)) + def tighten_var_domain(comp, new_var, index_set=None): if index_set is None: if comp.is_indexed(): @@ -62,6 +65,7 @@ def tighten_var_domain(comp, new_var, index_set=None): return new_var + def replicate_var(comp, name, block, index_set=None): """ Create a new variable that will have the same indexing set, domain, diff --git a/pyomo/opt/__init__.py b/pyomo/opt/__init__.py index 2a0ab645738..8c12d3fa201 100644 --- a/pyomo/opt/__init__.py +++ b/pyomo/opt/__init__.py @@ -13,33 +13,56 @@ import pyomo.opt.solver from pyomo.opt.base import ( - check_available_solvers, convert, convert_problem, error, formats, - guess_format, opt_config, solvers, - AbstractProblemWriter, AbstractResultsReader, - BranchDirection, ConverterError, OptSolver, - ProblemFormat, ReaderFactory, ResultsFormat, SolverFactory, - UnknownSolver, WriterFactory, + check_available_solvers, + convert, + convert_problem, + error, + formats, + guess_format, + opt_config, + solvers, + AbstractProblemWriter, + AbstractResultsReader, + BranchDirection, + ConverterError, + OptSolver, + ProblemFormat, + ReaderFactory, + ResultsFormat, + SolverFactory, + UnknownSolver, + WriterFactory, ) from pyomo.opt.results import ( - container, problem, solution, - ScalarData, ScalarType, + container, + problem, + solution, + ScalarData, + ScalarType, default_print_options, - ListContainer, MapContainer, - UndefinedData, undefined, ignore, - SolverStatus, TerminationCondition, - check_optimal_termination, assert_optimal_termination, + ListContainer, + MapContainer, + UndefinedData, + undefined, + ignore, + SolverStatus, + TerminationCondition, + check_optimal_termination, + assert_optimal_termination, ProblemSense, - SolutionStatus, Solution, results_, - SolverResults + SolutionStatus, + Solution, + results_, + SolverResults, ) -from pyomo.opt.problem import ( - ampl, AmplModel -) +from pyomo.opt.problem import ampl, AmplModel from pyomo.opt.parallel import ( - manager, async_solver, local, - SolverManagerFactory, AsynchronousSolverManager + manager, + async_solver, + local, + SolverManagerFactory, + AsynchronousSolverManager, ) - diff --git a/pyomo/opt/base/__init__.py b/pyomo/opt/base/__init__.py index 17d5bf2e93b..9d29efc859d 100644 --- a/pyomo/opt/base/__init__.py +++ b/pyomo/opt/base/__init__.py @@ -14,13 +14,11 @@ from pyomo.opt.base.error import ConverterError from pyomo.opt.base.convert import convert_problem from pyomo.opt.base.solvers import ( - UnknownSolver, SolverFactory, check_available_solvers, OptSolver, + UnknownSolver, + SolverFactory, + check_available_solvers, + OptSolver, ) from pyomo.opt.base.results import ReaderFactory, AbstractResultsReader -from pyomo.opt.base.problem import ( - AbstractProblemWriter, - BranchDirection, WriterFactory -) -from pyomo.opt.base.formats import ( - ProblemFormat, ResultsFormat, guess_format, -) +from pyomo.opt.base.problem import AbstractProblemWriter, BranchDirection, WriterFactory +from pyomo.opt.base.formats import ProblemFormat, ResultsFormat, guess_format diff --git a/pyomo/opt/base/convert.py b/pyomo/opt/base/convert.py index 7bd22b07392..972239a65cd 100644 --- a/pyomo/opt/base/convert.py +++ b/pyomo/opt/base/convert.py @@ -24,11 +24,13 @@ ProblemConverterFactory = Factory('problem converter') -def convert_problem(args, - target_problem_type, - valid_problem_types, - has_capability=lambda x: False, - **kwds): +def convert_problem( + args, + target_problem_type, + valid_problem_types, + has_capability=lambda x: False, + **kwds +): """ Convert a problem, defined by the 'args' tuple, into another problem. @@ -37,10 +39,9 @@ def convert_problem(args, if len(valid_problem_types) == 0: raise ConverterError("No valid problem types") - if not (target_problem_type is None or \ - target_problem_type in valid_problem_types): + if not (target_problem_type is None or target_problem_type in valid_problem_types): msg = "Problem type '%s' is not valid" - raise ConverterError(msg % str( target_problem_type )) + raise ConverterError(msg % str(target_problem_type)) if len(args) == 0: raise ConverterError("Empty argument list") @@ -51,11 +52,11 @@ def convert_problem(args, tmp = args[0] if isinstance(tmp, str): fname = tmp.split(os.sep)[-1] - if os.sep in fname: #pragma:nocover + if os.sep in fname: # pragma:nocover fname = tmp.split(os.sep)[-1] source_ptype = [guess_format(fname)] if source_ptype is [None]: - raise ConverterError("Unknown suffix type: "+tmp) + raise ConverterError("Unknown suffix type: " + tmp) else: source_ptype = args[0].valid_problem_types() @@ -65,10 +66,10 @@ def convert_problem(args, valid_ptypes = copy.copy(valid_problem_types) if target_problem_type is not None: valid_ptypes.remove(target_problem_type) - valid_ptypes = [target_problem_type] + valid_ptypes + valid_ptypes = [target_problem_type] + valid_ptypes if source_ptype[0] in valid_ptypes: valid_ptypes.remove(source_ptype[0]) - valid_ptypes = [source_ptype[0]] + valid_ptypes + valid_ptypes = [source_ptype[0]] + valid_ptypes # # Iterate over the valid problem types, starting with the target type @@ -76,24 +77,21 @@ def convert_problem(args, # Apply conversion and return for first match # for ptype in valid_ptypes: - for s_ptype in source_ptype: - - # - # If the source and target types are equal, then simply the return - # the args (return just the first element of the tuple if it has length - # one. - # + # + # If the source and target types are equal, then simply the return + # the args (return just the first element of the tuple if it has length + # one. + # if s_ptype == ptype: - return (args,ptype,None) + return (args, ptype, None) # # Otherwise, try to convert # for name in ProblemConverterFactory: - converter = ProblemConverterFactory(name) if converter.can_convert(s_ptype, ptype): - tmp = [s_ptype,ptype] + list(args) + tmp = [s_ptype, ptype] + list(args) tmp = tuple(tmp) # propagate input keywords to the converter tmpkw = kwds @@ -101,6 +99,5 @@ def convert_problem(args, problem_files, symbol_map = converter.apply(*tmp, **tmpkw) return problem_files, ptype, symbol_map - msg = 'No conversion possible. Source problem type: %s. Valid target ' \ - 'types: %s' + msg = 'No conversion possible. Source problem type: %s. Valid target types: %s' raise ConverterError(msg % (str(source_ptype[0]), list(map(str, valid_ptypes)))) diff --git a/pyomo/opt/base/error.py b/pyomo/opt/base/error.py index c4066e867a7..aa97469f6d0 100644 --- a/pyomo/opt/base/error.py +++ b/pyomo/opt/base/error.py @@ -9,10 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + class ConverterError(Exception): """ An exception used when there is an error converting a problem. """ - def __init__(self,*args,**kargs): - Exception.__init__(self,*args,**kargs) #pragma:nocover + def __init__(self, *args, **kargs): + Exception.__init__(self, *args, **kargs) # pragma:nocover diff --git a/pyomo/opt/base/formats.py b/pyomo/opt/base/formats.py index f6da8db8d56..2acd77b80e4 100644 --- a/pyomo/opt/base/formats.py +++ b/pyomo/opt/base/formats.py @@ -14,7 +14,8 @@ # __all__ = ['ProblemFormat', 'ResultsFormat', 'guess_format'] -import enum +import enum + # # pyomo - A pyomo.core.PyomoModel object, or a *.py file that defines such an object @@ -28,16 +29,16 @@ # gams - A GAMS input file # class ProblemFormat(str, enum.Enum): - pyomo='pyomo' - cpxlp='cpxlp' - nl='nl' - mps='mps' - mod='mod' - lpxlp='lpxlp' - osil='osil' - bar='bar' - gams='gams' - + pyomo = 'pyomo' + cpxlp = 'cpxlp' + nl = 'nl' + mps = 'mps' + mod = 'mod' + lpxlp = 'lpxlp' + osil = 'osil' + bar = 'bar' + gams = 'gams' + # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the # code base that expect the string representation for items in the @@ -56,12 +57,12 @@ def __str__(self): # json - A Pyomo results file in JSON format # class ResultsFormat(str, enum.Enum): - osrl='osrl' - results='results' - sol='sol' - soln='soln' - yaml='yaml' - json='json' + osrl = 'osrl' + results = 'results' + sol = 'sol' + soln = 'soln' + yaml = 'yaml' + json = 'json' # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the @@ -74,24 +75,24 @@ def __str__(self): def guess_format(filename): formats = {} - formats['py']=ProblemFormat.pyomo - formats['nl']=ProblemFormat.nl - formats['bar']=ProblemFormat.bar - formats['mps']=ProblemFormat.mps - formats['mod']=ProblemFormat.mod - formats['lp']=ProblemFormat.cpxlp - formats['osil']=ProblemFormat.osil - formats['gms']=ProblemFormat.gams - formats['gams']=ProblemFormat.gams + formats['py'] = ProblemFormat.pyomo + formats['nl'] = ProblemFormat.nl + formats['bar'] = ProblemFormat.bar + formats['mps'] = ProblemFormat.mps + formats['mod'] = ProblemFormat.mod + formats['lp'] = ProblemFormat.cpxlp + formats['osil'] = ProblemFormat.osil + formats['gms'] = ProblemFormat.gams + formats['gams'] = ProblemFormat.gams - formats['sol']=ResultsFormat.sol - formats['osrl']=ResultsFormat.osrl - formats['soln']=ResultsFormat.soln - formats['yml']=ResultsFormat.yaml - formats['yaml']=ResultsFormat.yaml - formats['jsn']=ResultsFormat.json - formats['json']=ResultsFormat.json - formats['results']=ResultsFormat.yaml + formats['sol'] = ResultsFormat.sol + formats['osrl'] = ResultsFormat.osrl + formats['soln'] = ResultsFormat.soln + formats['yml'] = ResultsFormat.yaml + formats['yaml'] = ResultsFormat.yaml + formats['jsn'] = ResultsFormat.json + formats['json'] = ResultsFormat.json + formats['results'] = ResultsFormat.yaml if filename: return formats.get(filename.split('.')[-1].strip(), None) else: diff --git a/pyomo/opt/base/opt_config.py b/pyomo/opt/base/opt_config.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/opt/base/opt_config.py +++ b/pyomo/opt/base/opt_config.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/opt/base/problem.py b/pyomo/opt/base/problem.py index 8c0fd0e16f9..6be1d4d6db6 100644 --- a/pyomo/opt/base/problem.py +++ b/pyomo/opt/base/problem.py @@ -9,11 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = [ - "AbstractProblemWriter", - "WriterFactory", - "BranchDirection", -] +__all__ = ["AbstractProblemWriter", "WriterFactory", "BranchDirection"] from pyomo.common import Factory @@ -24,11 +20,13 @@ class AbstractProblemWriter(object): """Base class that can write optimization problems.""" - def __init__(self, problem_format): #pragma:nocover - self.format=problem_format + def __init__(self, problem_format): # pragma:nocover + self.format = problem_format - def __call__(self, model, filename, solver_capability, **kwds): #pragma:nocover - raise TypeError("Method __call__ undefined in writer for format "+str(self.format)) + def __call__(self, model, filename, solver_capability, **kwds): # pragma:nocover + raise TypeError( + "Method __call__ undefined in writer for format " + str(self.format) + ) # # Support "with" statements. @@ -41,7 +39,7 @@ def __exit__(self, t, v, traceback): class BranchDirection(object): - """ Allowed values for MIP variable branching directions in the `direction` Suffix of a model. """ + """Allowed values for MIP variable branching directions in the `direction` Suffix of a model.""" default = 0 down = -1 diff --git a/pyomo/opt/base/results.py b/pyomo/opt/base/results.py index 6d7bed93af9..68999fae6e4 100644 --- a/pyomo/opt/base/results.py +++ b/pyomo/opt/base/results.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = [ 'AbstractResultsReader', 'ReaderFactory' ] +__all__ = ['AbstractResultsReader', 'ReaderFactory'] from pyomo.common import Factory @@ -21,10 +21,12 @@ class AbstractResultsReader(object): """Base class that can read optimization results.""" def __init__(self, results_format): - self.format=results_format + self.format = results_format - def __call__(self, filename, res=None, suffixes=[]): #pragma:nocover - raise TypeError("Method __call__ undefined in reader for format "+str(self.format)) + def __call__(self, filename, res=None, suffixes=[]): # pragma:nocover + raise TypeError( + "Method __call__ undefined in reader for format " + str(self.format) + ) # # Support "with" statements. Forgetting to call deactivate @@ -35,4 +37,3 @@ def __enter__(self): def __exit__(self, t, v, traceback): pass - diff --git a/pyomo/opt/base/solvers.py b/pyomo/opt/base/solvers.py index 652ccfaedce..30b29f99b2d 100644 --- a/pyomo/opt/base/solvers.py +++ b/pyomo/opt/base/solvers.py @@ -9,10 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ('OptSolver', - 'SolverFactory', - 'UnknownSolver', - 'check_available_solvers') +__all__ = ('OptSolver', 'SolverFactory', 'UnknownSolver', 'check_available_solvers') import re import sys @@ -31,6 +28,7 @@ logger = logging.getLogger('pyomo.opt') + # The version string is first searched for trunk/Trunk, and if # found a tuple of infinities is returned. Otherwise, the first # match of number[.number] where [.number] can repeat 1-3 times @@ -43,7 +41,7 @@ def _extract_version(x, length=4): Attempts to extract solver version information from a string. """ assert (1 <= length) and (length <= 4) - m = re.search('[t,T]runk',x) + m = re.search('[t,T]runk', x) if m is not None: # Since most version checks are comparing if the current # version is greater/less than some other version, it makes @@ -53,25 +51,23 @@ def _extract_version(x, length=4): m = re.search(r'[0-9]+(\.[0-9]+){1,3}', x) if not m is None: version = tuple(int(i) for i in m.group(0).split('.')[:length]) - while(len(version) < length): + while len(version) < length: version += (0,) return version - return None #(0,0,0,0)[:length] + return None # (0,0,0,0)[:length] class UnknownSolver(object): - def __init__(self, *args, **kwds): - #super(UnknownSolver,self).__init__(**kwds) + # super(UnknownSolver,self).__init__(**kwds) # # The 'type' is the class type of the solver instance # if "type" in kwds: self.type = kwds["type"] - else: #pragma:nocover - raise ValueError( - "Expected option 'type' for UnknownSolver constructor") + else: # pragma:nocover + raise ValueError("Expected option 'type' for UnknownSolver constructor") self.options = {} self._args = args @@ -98,7 +94,7 @@ def license_is_valid(self): return False def warm_start_capable(self): - """ True is the solver can accept a warm-start solution.""" + """True is the solver can accept a warm-start solution.""" return False def solve(self, *args, **kwds): @@ -120,7 +116,8 @@ def __getattr__(self, attr): self._solver_error(attr) def _solver_error(self, method_name): - raise RuntimeError("""Attempting to use an unavailable solver. + raise RuntimeError( + """Attempting to use an unavailable solver. The SolverFactory was unable to create the solver "%s" and returned an UnknownSolver object. This error is raised at the point @@ -128,20 +125,21 @@ def _solver_error(self, method_name): method "%s"). The original solver was created with the following parameters: -\t""" % ( self.type, method_name ) -+ "\n\t".join("%s: %s" % i for i in sorted(self._kwds.items())) -+ "\n\t_args: %s" % ( self._args, ) -+ "\n\toptions: %s" % ( self.options, ) ) +\t""" + % (self.type, method_name) + + "\n\t".join("%s: %s" % i for i in sorted(self._kwds.items())) + + "\n\t_args: %s" % (self._args,) + + "\n\toptions: %s" % (self.options,) + ) class SolverFactoryClass(Factory): - def __call__(self, _name=None, **kwds): if _name is None: return self - _name=str(_name) + _name = str(_name) if ':' in _name: - _name, subsolver = _name.split(':',1) + _name, subsolver = _name.split(':', 1) kwds['solver'] = subsolver elif 'solver' in kwds: subsolver = kwds['solver'] @@ -155,32 +153,37 @@ def __call__(self, _name=None, **kwds): mode = kwds.get('solver_io', 'nl') if mode is None: mode = 'nl' - _implicit_solvers = {'nl': 'asl' } + _implicit_solvers = {'nl': 'asl'} if "executable" not in kwds: kwds["executable"] = _name if mode in _implicit_solvers: if _implicit_solvers[mode] not in self._cls: raise RuntimeError( " The solver plugin was not registered.\n" - " Please confirm that the 'pyomo.environ' package has been imported.") + " Please confirm that the 'pyomo.environ' package has been imported." + ) opt = self._cls[_implicit_solvers[mode]](**kwds) if opt is not None: - opt.set_options('solver='+_name) + opt.set_options('solver=' + _name) except: err = sys.exc_info() - logger.warning("Failed to create solver with name '%s':\n%s" - % (_name, err[1]), exc_info=err) + logger.warning( + "Failed to create solver with name '%s':\n%s" % (_name, err[1]), + exc_info=err, + ) opt = None if opt is not None and _name != "py" and subsolver is not None: # py just creates instance of its subsolver, no need for this option - opt.set_options('solver='+subsolver) + opt.set_options('solver=' + subsolver) if opt is None: - opt = UnknownSolver( type=_name, **kwds ) + opt = UnknownSolver(type=_name, **kwds) opt.name = _name return opt + SolverFactory = SolverFactoryClass('solver type') + # # TODO: It is impossible to load CBC with NL file-io using this function, # i.e., SolverFactory("cbc", solver_io='nl'), @@ -196,23 +199,23 @@ def check_available_solvers(*args): ans = [] for arg in args: - if not isinstance(arg,tuple): + if not isinstance(arg, tuple): name = arg arg = (arg,) else: name = arg[0] opt = SolverFactory(*arg) if opt is None or isinstance(opt, UnknownSolver): - continue # not available + continue # not available if not opt.available(exception_flag=False): - continue # not available + continue # not available if hasattr(opt, 'executable') and opt.executable() is None: - continue # not available + continue # not available if not opt.license_is_valid(): - continue # not available + continue # not available # At this point, the solver is available (and licensed) ans.append(name) @@ -221,13 +224,15 @@ def check_available_solvers(*args): return ans + def _raise_ephemeral_error(name, keyword=""): raise AttributeError( "The property '%s' can no longer be set directly on " "the solver object. It should instead be passed as a " "keyword into the solve method%s. It will automatically " "be reset to its default value after each invocation of " - "solve." % (name, keyword)) + "solve." % (name, keyword) + ) class OptSolver(object): @@ -250,6 +255,7 @@ def __exit__(self, t, v, traceback): @property def tee(self): _raise_ephemeral_error('tee') + @tee.setter def tee(self, val): _raise_ephemeral_error('tee') @@ -257,6 +263,7 @@ def tee(self, val): @property def suffixes(self): _raise_ephemeral_error('suffixes') + @suffixes.setter def suffixes(self, val): _raise_ephemeral_error('suffixes') @@ -264,6 +271,7 @@ def suffixes(self, val): @property def keepfiles(self): _raise_ephemeral_error('keepfiles') + @keepfiles.setter def keepfiles(self, val): _raise_ephemeral_error('keepfiles') @@ -271,6 +279,7 @@ def keepfiles(self, val): @property def soln_file(self): _raise_ephemeral_error('soln_file') + @soln_file.setter def soln_file(self, val): _raise_ephemeral_error('soln_file') @@ -278,6 +287,7 @@ def soln_file(self, val): @property def log_file(self): _raise_ephemeral_error('log_file') + @log_file.setter def log_file(self, val): _raise_ephemeral_error('log_file') @@ -285,6 +295,7 @@ def log_file(self, val): @property def symbolic_solver_labels(self): _raise_ephemeral_error('symbolic_solver_labels') + @symbolic_solver_labels.setter def symbolic_solver_labels(self, val): _raise_ephemeral_error('symbolic_solver_labels') @@ -292,6 +303,7 @@ def symbolic_solver_labels(self, val): @property def warm_start_solve(self): _raise_ephemeral_error('warm_start_solve', keyword=" (warmstart)") + @warm_start_solve.setter def warm_start_solve(self, val): _raise_ephemeral_error('warm_start_solve', keyword=" (warmstart)") @@ -299,18 +311,19 @@ def warm_start_solve(self, val): @property def warm_start_file_name(self): _raise_ephemeral_error('warm_start_file_name', keyword=" (warmstart_file)") + @warm_start_file_name.setter def warm_start_file_name(self, val): _raise_ephemeral_error('warm_start_file_name', keyword=" (warmstart_file)") def __init__(self, **kwds): - """ Constructor """ + """Constructor""" # # The 'type' is the class type of the solver instance # if "type" in kwds: self.type = kwds["type"] - else: #pragma:nocover + else: # pragma:nocover raise ValueError("Expected option 'type' for OptSolver constructor") # @@ -325,12 +338,12 @@ def __init__(self, **kwds): if "doc" in kwds: self._doc = kwds["doc"] else: - if self.type is None: # pragma:nocover + if self.type is None: # pragma:nocover self._doc = "" elif self.name == self.type: self._doc = "%s OptSolver" % self.name else: - self._doc = "%s OptSolver (type %s)" % (self.name,self.type) + self._doc = "%s OptSolver (type %s)" % (self.name, self.type) # # Options are persistent, meaning users must modify the # options dict directly rather than pass them into _presolve @@ -400,11 +413,12 @@ def _options_string_to_dict(istr): index = token.find('=') if index == -1: raise ValueError( - "Solver options must have the form option=value: '%s'" % istr) + "Solver options must have the form option=value: '%s'" % istr + ) try: - val = eval(token[(index+1):]) + val = eval(token[(index + 1) :]) except: - val = token[(index+1):] + val = token[(index + 1) :] ans[token[:index]] = val return ans @@ -439,8 +453,9 @@ def set_problem_format(self, format): if format in self._valid_problem_formats: self._problem_format = format else: - raise ValueError("%s is not a valid problem format for solver plugin %s" - % (format, self)) + raise ValueError( + "%s is not a valid problem format for solver plugin %s" % (format, self) + ) self._results_format = self._default_results_format(self._problem_format) def results_format(self): @@ -449,18 +464,21 @@ def results_format(self): """ return self._results_format - def set_results_format(self,format): + def set_results_format(self, format): """ Set the current results format (if it's valid for the current problem format). """ - if (self._problem_format in self._valid_results_formats) and \ - (format in self._valid_results_formats[self._problem_format]): + if (self._problem_format in self._valid_results_formats) and ( + format in self._valid_results_formats[self._problem_format] + ): self._results_format = format else: - raise ValueError("%s is not a valid results format for " - "problem format %s with solver plugin %s" - % (format, self._problem_format, self)) + raise ValueError( + "%s is not a valid results format for " + "problem format %s with solver plugin %s" + % (format, self._problem_format, self) + ) def has_capability(self, cap): """ @@ -486,8 +504,10 @@ def has_capability(self, cap): Whether or not the solver has the specified capability. """ if not isinstance(cap, str): - raise TypeError("Expected argument to be of type '%s', not " - "'%s'." % (type(str()), type(cap))) + raise TypeError( + "Expected argument to be of type '%s', not " + "'%s'." % (type(str()), type(cap)) + ) else: val = self._capabilities[str(cap)] if val is None: @@ -496,7 +516,7 @@ def has_capability(self, cap): return val def available(self, exception_flag=True): - """ True if the solver is available """ + """True if the solver is available""" return True def license_is_valid(self): @@ -504,11 +524,11 @@ def license_is_valid(self): return True def warm_start_capable(self): - """ True is the solver can accept a warm-start solution """ + """True is the solver can accept a warm-start solution""" return False def solve(self, *args, **kwds): - """ Solve the problem """ + """Solve the problem""" self.available(exception_flag=True) # @@ -519,6 +539,7 @@ def solve(self, *args, **kwds): import pyomo.core.base.suffix from pyomo.core.kernel.block import IBlock import pyomo.core.kernel.suffix + _model = None for arg in args: if isinstance(arg, (_BlockData, IBlock)): @@ -526,24 +547,30 @@ def solve(self, *args, **kwds): if not arg.is_constructed(): raise RuntimeError( "Attempting to solve model=%s with unconstructed " - "component(s)" % (arg.name,) ) + "component(s)" % (arg.name,) + ) _model = arg # import suffixes must be on the top-level model if isinstance(arg, _BlockData): - model_suffixes = list(name for (name,comp) \ - in pyomo.core.base.suffix.\ - active_import_suffix_generator(arg)) + model_suffixes = list( + name + for ( + name, + comp, + ) in pyomo.core.base.suffix.active_import_suffix_generator(arg) + ) else: assert isinstance(arg, IBlock) - model_suffixes = list(comp.storage_key for comp - in pyomo.core.kernel.suffix.\ - import_suffix_generator(arg, - active=True, - descend_into=False)) + model_suffixes = list( + comp.storage_key + for comp in pyomo.core.kernel.suffix.import_suffix_generator( + arg, active=True, descend_into=False + ) + ) if len(model_suffixes) > 0: - kwds_suffixes = kwds.setdefault('suffixes',[]) + kwds_suffixes = kwds.setdefault('suffixes', []) for name in model_suffixes: if name not in kwds_suffixes: kwds_suffixes.append(name) @@ -561,9 +588,9 @@ def solve(self, *args, **kwds): self.options.update(orig_options) self.options.update(kwds.pop('options', {})) self.options.update( - self._options_string_to_dict(kwds.pop('options_string', ''))) + self._options_string_to_dict(kwds.pop('options_string', '')) + ) try: - # we're good to go. initial_time = time.time() @@ -571,7 +598,10 @@ def solve(self, *args, **kwds): presolve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for presolve" % (presolve_completion_time - initial_time)) + print( + " %6.2f seconds required for presolve" + % (presolve_completion_time - initial_time) + ) if not _model is None: self._initialize_callbacks(_model) @@ -583,21 +613,24 @@ def solve(self, *args, **kwds): logger.warning( "Solver (%s) did not return a solver status code.\n" "This is indicative of an internal solver plugin error.\n" - "Please report this to the Pyomo developers." ) + "Please report this to the Pyomo developers." + ) elif _status.rc: logger.error( "Solver (%s) returned non-zero return code (%s)" - % (self.name, _status.rc,)) + % (self.name, _status.rc) + ) if self._tee: - logger.error( - "See the solver log above for diagnostic information." ) + logger.error("See the solver log above for diagnostic information.") elif hasattr(_status, 'log') and _status.log: logger.error("Solver log:\n" + str(_status.log)) - raise ApplicationError( - "Solver (%s) did not exit normally" % self.name) + raise ApplicationError("Solver (%s) did not exit normally" % self.name) solve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for solver" % (solve_completion_time - presolve_completion_time)) + print( + " %6.2f seconds required for solver" + % (solve_completion_time - presolve_completion_time) + ) result = self._postsolve() result._smap_id = self._smap_id @@ -605,10 +638,12 @@ def solve(self, *args, **kwds): if _model: if isinstance(_model, IBlock): if len(result.solution) == 1: - result.solution(0).symbol_map = \ - getattr(_model, "._symbol_maps")[result._smap_id] - result.solution(0).default_variable_value = \ - self._default_variable_value + result.solution(0).symbol_map = getattr( + _model, "._symbol_maps" + )[result._smap_id] + result.solution( + 0 + ).default_variable_value = self._default_variable_value if self._load_solutions: _model.load_solution(result.solution(0)) else: @@ -619,15 +654,15 @@ def solve(self, *args, **kwds): assert len(getattr(_model, "._symbol_maps")) == 1 delattr(_model, "._symbol_maps") del result._smap_id - if self._load_solutions and \ - (len(result.solution) == 0): + if self._load_solutions and (len(result.solution) == 0): logger.error("No solution is available") else: if self._load_solutions: _model.solutions.load_from( result, select=self._select_index, - default_variable_value=self._default_variable_value) + default_variable_value=self._default_variable_value, + ) result._smap_id = None result.solution.clear() else: @@ -636,8 +671,10 @@ def solve(self, *args, **kwds): postsolve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for postsolve" - % (postsolve_completion_time - solve_completion_time)) + print( + " %6.2f seconds required for postsolve" + % (postsolve_completion_time - solve_completion_time) + ) finally: # @@ -648,37 +685,42 @@ def solve(self, *args, **kwds): return result def _presolve(self, *args, **kwds): - - self._log_file = kwds.pop("logfile", None) - self._soln_file = kwds.pop("solnfile", None) - self._select_index = kwds.pop("select", 0) - self._load_solutions = kwds.pop("load_solutions", True) - self._timelimit = kwds.pop("timelimit", None) - self._report_timing = kwds.pop("report_timing", False) - self._tee = kwds.pop("tee", False) - self._assert_available = kwds.pop("available", True) - self._suffixes = kwds.pop("suffixes", []) + self._log_file = kwds.pop("logfile", None) + self._soln_file = kwds.pop("solnfile", None) + self._select_index = kwds.pop("select", 0) + self._load_solutions = kwds.pop("load_solutions", True) + self._timelimit = kwds.pop("timelimit", None) + self._report_timing = kwds.pop("report_timing", False) + self._tee = kwds.pop("tee", False) + self._assert_available = kwds.pop("available", True) + self._suffixes = kwds.pop("suffixes", []) self.available() if self._problem_format: write_start_time = time.time() - (self._problem_files, self._problem_format, self._smap_id) = \ - self._convert_problem(args, - self._problem_format, - self._valid_problem_formats, - **kwds) + ( + self._problem_files, + self._problem_format, + self._smap_id, + ) = self._convert_problem( + args, self._problem_format, self._valid_problem_formats, **kwds + ) total_time = time.time() - write_start_time if self._report_timing: print(" %6.2f seconds required to write file" % total_time) else: if len(kwds): raise ValueError( - "Solver="+self.type+" passed unrecognized keywords: \n\t" - +("\n\t".join("%s = %s" % (k,v) for k,v in kwds.items()))) - - if (type(self._problem_files) in (list,tuple)) and \ - (not isinstance(self._problem_files[0], str)): + "Solver=" + + self.type + + " passed unrecognized keywords: \n\t" + + ("\n\t".join("%s = %s" % (k, v) for k, v in kwds.items())) + ) + + if (type(self._problem_files) in (list, tuple)) and ( + not isinstance(self._problem_files[0], str) + ): self._problem_files = self._problem_files[0]._problem_files() if self._results_format is None: self._results_format = self._default_results_format(self._problem_format) @@ -687,7 +729,7 @@ def _presolve(self, *args, **kwds): # Disabling this check for now. A solver doesn't have just # _one_ results format. # - #if self._results_format not in \ + # if self._results_format not in \ # self._valid_result_formats[self._problem_format]: # raise ValueError("Results format '"+str(self._results_format)+"' " # "cannot be used with problem format '" @@ -695,8 +737,9 @@ def _presolve(self, *args, **kwds): if self._results_format == ResultsFormat.soln: self._results_reader = None else: - self._results_reader = \ - pyomo.opt.base.results.ReaderFactory(self._results_format) + self._results_reader = pyomo.opt.base.results.ReaderFactory( + self._results_format + ) def _initialize_callbacks(self, model): """Initialize call-back functions""" @@ -704,26 +747,20 @@ def _initialize_callbacks(self, model): def _apply_solver(self): """The routine that performs the solve""" - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def _postsolve(self): """The routine that does solve post-processing""" return self.results - def _convert_problem(self, - args, - problem_format, - valid_problem_formats, - **kwds): - return convert_problem(args, - problem_format, - valid_problem_formats, - self.has_capability, - **kwds) + def _convert_problem(self, args, problem_format, valid_problem_formats, **kwds): + return convert_problem( + args, problem_format, valid_problem_formats, self.has_capability, **kwds + ) def _default_results_format(self, prob_format): """Returns the default results format for different problem - formats. + formats. """ return ResultsFormat.results @@ -765,8 +802,7 @@ def fn(solver, model): a Pyomo model instance object. """ if not self._allow_callbacks: - raise ApplicationError( - "Callbacks disabled for solver %s" % self.name) + raise ApplicationError("Callbacks disabled for solver %s" % self.name) if callback_fn is None: if name in self._callback: del self._callback[name] @@ -775,4 +811,5 @@ def fn(solver, model): def config_block(self, init=False): from pyomo.scripting.solve_config import default_config_block + return default_config_block(self, init)[0] diff --git a/pyomo/opt/parallel/__init__.py b/pyomo/opt/parallel/__init__.py index 8a7c35c266f..9820f39afd4 100644 --- a/pyomo/opt/parallel/__init__.py +++ b/pyomo/opt/parallel/__init__.py @@ -9,6 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.opt.parallel.async_solver import (Factory, AsynchronousActionManager, SolverManagerFactory, AsynchronousSolverManager) +from pyomo.opt.parallel.async_solver import ( + Factory, + AsynchronousActionManager, + SolverManagerFactory, + AsynchronousSolverManager, +) import pyomo.opt.parallel.manager import pyomo.opt.parallel.local diff --git a/pyomo/opt/parallel/async_solver.py b/pyomo/opt/parallel/async_solver.py index 62478fd87eb..e9806b7125a 100644 --- a/pyomo/opt/parallel/async_solver.py +++ b/pyomo/opt/parallel/async_solver.py @@ -16,12 +16,10 @@ from pyomo.opt.parallel.manager import AsynchronousActionManager - SolverManagerFactory = Factory('solver manager') class AsynchronousSolverManager(AsynchronousActionManager): - def __init__(self, **kwds): AsynchronousActionManager.__init__(self) @@ -60,4 +58,3 @@ def __enter__(self): def __exit__(self, t, v, traceback): pass - diff --git a/pyomo/opt/parallel/local.py b/pyomo/opt/parallel/local.py index 8c1b7b38edf..a7a80a7d33c 100644 --- a/pyomo/opt/parallel/local.py +++ b/pyomo/opt/parallel/local.py @@ -17,15 +17,15 @@ from pyomo.common.collections import OrderedDict import pyomo.opt -from pyomo.opt.parallel.manager import (ActionManagerError, - ActionStatus, - ActionHandle) -from pyomo.opt.parallel.async_solver import AsynchronousSolverManager, SolverManagerFactory +from pyomo.opt.parallel.manager import ActionManagerError, ActionStatus, ActionHandle +from pyomo.opt.parallel.async_solver import ( + AsynchronousSolverManager, + SolverManagerFactory, +) @SolverManagerFactory.register("serial", doc="Synchronously execute solvers locally") class SolverManager_Serial(AsynchronousSolverManager): - def clear(self): """ Clear manager state @@ -43,7 +43,8 @@ def _perform_queue(self, ah, *args, **kwds): if opt is None: raise ActionManagerError( "No solver passed to %s, use keyword option 'solver'" - % (type(self).__name__) ) + % (type(self).__name__) + ) time_start = time.time() if isinstance(opt, str): @@ -51,7 +52,7 @@ def _perform_queue(self, ah, *args, **kwds): results = _opt.solve(*args, **kwds) else: results = opt.solve(*args, **kwds) - results.pyomo_solve_time = time.time()-time_start + results.pyomo_solve_time = time.time() - time_start self.results[ah.id] = results ah.status = ActionStatus.done @@ -71,7 +72,11 @@ def _perform_wait_any(self): ah_id, result = self.results.popitem(last=False) self.results[ah_id] = result return self.event_handle[ah_id] - return ActionHandle(error=True, - explanation=("No queued evaluations available in " - "the 'serial' solver manager, which " - "executes solvers synchronously")) + return ActionHandle( + error=True, + explanation=( + "No queued evaluations available in " + "the 'serial' solver manager, which " + "executes solvers synchronously" + ), + ) diff --git a/pyomo/opt/parallel/manager.py b/pyomo/opt/parallel/manager.py index f834e0ec6a7..a97f6ae1d27 100644 --- a/pyomo/opt/parallel/manager.py +++ b/pyomo/opt/parallel/manager.py @@ -10,16 +10,24 @@ # ___________________________________________________________________________ -__all__ = ['ActionManagerError', 'ActionHandle', 'AsynchronousActionManager', 'ActionStatus', 'FailedActionHandle', 'solve_all_instances'] +__all__ = [ + 'ActionManagerError', + 'ActionHandle', + 'AsynchronousActionManager', + 'ActionStatus', + 'FailedActionHandle', + 'solve_all_instances', +] import enum + class ActionStatus(str, enum.Enum): - done='done' - error='error' - queued='queued' - executing='executing' - unknown='unknown' + done = 'done' + error = 'error' + queued = 'queued' + executing = 'executing' + unknown = 'unknown' # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the @@ -29,22 +37,24 @@ class ActionStatus(str, enum.Enum): def __str__(self): return self.value + def solve_all_instances(solver_manager, solver, instances, **kwds): """ A simple utility to apply a solver to a list of problem instances. """ solver_manager.solve_all(solver, instances, **kwds) + class ActionManagerError(Exception): """ An exception used when an error occurs within an ActionManager. """ - def __init__(self,*args,**kargs): - Exception.__init__(self,*args,**kargs) #pragma:nocover + def __init__(self, *args, **kargs): + Exception.__init__(self, *args, **kargs) # pragma:nocover -class ActionHandle(object): +class ActionHandle(object): id_counter = 0 def __init__(self, error=False, explanation=""): @@ -58,7 +68,7 @@ def __init__(self, error=False, explanation=""): self.explanation = explanation def update(self, ah): - """ Update the contents of the provided ActionHandle """ + """Update the contents of the provided ActionHandle""" self.id = ah.id self.status = ah.status @@ -69,9 +79,11 @@ def __hash__(self): return self.id.__hash__() def __eq__(self, other): - return (self.__class__ is other.__class__) and \ - (self.id.__hash__() == other.__hash__()) and \ - (self.id == other.id) + return ( + (self.__class__ is other.__class__) + and (self.id.__hash__() == other.__hash__()) + and (self.id == other.id) + ) def __ne__(self, other): return not self.__eq__(other) @@ -82,8 +94,8 @@ def __str__(self): FailedActionHandle = ActionHandle(error=True) -class AsynchronousActionManager(object): +class AsynchronousActionManager(object): @staticmethod def _flatten(*args): ahs = set() @@ -93,10 +105,10 @@ def _flatten(*args): ahs.add(item) elif type(item) in (list, tuple, dict, set): for ah in item: - if type(ah) is not ActionHandle: #pragma:nocover + if type(ah) is not ActionHandle: # pragma:nocover raise ActionManagerError("Bad argument type %s" % str(ah)) ahs.add(ah) - else: #pragma:nocover + else: # pragma:nocover raise ActionManagerError("Bad argument type %s" % str(item)) return ahs @@ -120,7 +132,8 @@ def execute(self, *args, **kwds): results = self.wait_for(ah) if results is None: raise ActionManagerError( - "Problem executing an event. No results are available.") + "Problem executing an event. No results are available." + ) return results def queue(self, *args, **kwds): @@ -145,8 +158,11 @@ def wait_all(self, *args): # ahs = set() if len(args) == 0: - ahs.update(ah for ah in self.event_handle.values() - if ah.status == ActionStatus.queued) + ahs.update( + ah + for ah in self.event_handle.values() + if ah.status == ActionStatus.queued + ) else: ahs = self._flatten(*args) # @@ -184,8 +200,7 @@ def wait_for(self, ah): while tmp != ah: tmp = self.wait_any() if tmp == FailedActionHandle: - raise ActionManagerError( - "Action %s failed: %s" % (ah, tmp.explanation)) + raise ActionManagerError("Action %s failed: %s" % (ah, tmp.explanation)) return self.get_results(ah) def num_queued(self): @@ -220,7 +235,9 @@ def _perform_queue(self, ah, *args, **kwds): ActionHandle, and the ActionHandle status indicates whether the queue was successful. """ - raise ActionManagerError("The _perform_queue method is not defined") #pragma:nocover + raise ActionManagerError( + "The _perform_queue method is not defined" + ) # pragma:nocover def _perform_wait_any(self): """ @@ -230,4 +247,6 @@ def _perform_wait_any(self): again. Note that an ActionHandle can be returned with a dummy value, to indicate an error. """ - raise ActionManagerError("The _perform_wait_any method is not defined") #pragma:nocover + raise ActionManagerError( + "The _perform_wait_any method is not defined" + ) # pragma:nocover diff --git a/pyomo/opt/plugins/__init__.py b/pyomo/opt/plugins/__init__.py index 99d8955ae03..797147f5f69 100644 --- a/pyomo/opt/plugins/__init__.py +++ b/pyomo/opt/plugins/__init__.py @@ -9,8 +9,8 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.opt.plugins.driver import pyomo.opt.plugins.res import pyomo.opt.plugins.sol - diff --git a/pyomo/opt/plugins/driver.py b/pyomo/opt/plugins/driver.py index 90b788cd69f..23757053beb 100644 --- a/pyomo/opt/plugins/driver.py +++ b/pyomo/opt/plugins/driver.py @@ -18,25 +18,47 @@ def setup_test_parser(parser): - parser.add_argument('--csv-file', '--csv', action='store', dest='csv', default=None, - help='Save test results to this file in a CSV format') - parser.add_argument("-d", "--debug", action="store_true", dest="debug", default=False, - help="Show debugging information and text generated during tests.") - parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", default=False, - help="Show verbose results output.") - parser.add_argument("solver", metavar="SOLVER", default=None, nargs='*', - help="a solver name") + parser.add_argument( + '--csv-file', + '--csv', + action='store', + dest='csv', + default=None, + help='Save test results to this file in a CSV format', + ) + parser.add_argument( + "-d", + "--debug", + action="store_true", + dest="debug", + default=False, + help="Show debugging information and text generated during tests.", + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + dest="verbose", + default=False, + help="Show verbose results output.", + ) + parser.add_argument( + "solver", metavar="SOLVER", default=None, nargs='*', help="a solver name" + ) + def test_exec(options): import pyomo.solvers.tests.testcases + pyomo.solvers.tests.testcases.run_test_scenarios(options) - - + + # # Add a subparser for the pyomo command # setup_test_parser( - pyomo.scripting.pyomo_parser.add_subparser('test-solvers', + pyomo.scripting.pyomo_parser.add_subparser( + 'test-solvers', func=test_exec, help='Test Pyomo solvers', description='This pyomo subcommand is used to run tests on installed solvers.', @@ -57,7 +79,6 @@ def test_exec(options): performed (both passed and failed checks). Additionally, this option prints information about the optimization process, such as the pyomo command-line that was executed.""", - formatter_class=argparse.RawDescriptionHelpFormatter + formatter_class=argparse.RawDescriptionHelpFormatter, ) ) - diff --git a/pyomo/opt/plugins/res.py b/pyomo/opt/plugins/res.py index e7ee29fe9fd..25d25d5feb0 100644 --- a/pyomo/opt/plugins/res.py +++ b/pyomo/opt/plugins/res.py @@ -40,7 +40,6 @@ def __call__(self, filename, res=None, soln=None, suffixes=[]): return res - @results.ReaderFactory.register(str(ResultsFormat.json)) class ResultsReader_json(results.AbstractResultsReader): """ @@ -60,4 +59,3 @@ def __call__(self, filename, res=None, soln=None, suffixes=[]): # res.read(filename, using_yaml=False) return res - diff --git a/pyomo/opt/plugins/sol.py b/pyomo/opt/plugins/sol.py index a605345a0eb..6e1ca666633 100644 --- a/pyomo/opt/plugins/sol.py +++ b/pyomo/opt/plugins/sol.py @@ -17,10 +17,7 @@ from pyomo.opt.base import results from pyomo.opt.base.formats import ResultsFormat -from pyomo.opt import (SolverResults, - SolutionStatus, - SolverStatus, - TerminationCondition) +from pyomo.opt import SolverResults, SolutionStatus, SolverStatus, TerminationCondition @results.ReaderFactory.register(str(ResultsFormat.sol)) @@ -31,7 +28,7 @@ class ResultsReader_sol(results.AbstractResultsReader): """ def __init__(self, name=None): - results.AbstractResultsReader.__init__(self,ResultsFormat.sol) + results.AbstractResultsReader.__init__(self, ResultsFormat.sol) if not name is None: self.name = name @@ -40,18 +37,17 @@ def __call__(self, filename, res=None, soln=None, suffixes=[]): Parse a *.sol file """ try: - with open(filename,"r") as f: + with open(filename, "r") as f: return self._load(f, res, soln, suffixes) except ValueError as e: - with open(filename,"r") as f: + with open(filename, "r") as f: fdata = f.read() raise ValueError( "Error reading '%s': %s.\n" - "SOL File Output:\n%s" - % (filename, str(e), fdata)) + "SOL File Output:\n%s" % (filename, str(e), fdata) + ) def _load(self, fin, res, soln, suffixes): - if res is None: res = SolverResults() # @@ -74,19 +70,19 @@ def _load(self, fin, res, soln, suffixes): line = fin.readline() nopts = int(line) need_vbtol = False - if nopts > 4: # WEH - when is this true? + if nopts > 4: # WEH - when is this true? nopts -= 2 need_vbtol = True for i in range(nopts + 4): line = fin.readline() z += [int(line)] - if need_vbtol: # WEH - when is this true? + if need_vbtol: # WEH - when is this true? line = fin.readline() z += [float(line)] else: raise ValueError("no Options line found") - n = z[nopts + 3] # variables - m = z[nopts + 1] # constraints + n = z[nopts + 3] # variables + m = z[nopts + 1] # constraints x = [] y = [] i = 0 @@ -99,18 +95,19 @@ def _load(self, fin, res, soln, suffixes): line = fin.readline() x.append(float(line)) i += 1 - objno = [0,0] + objno = [0, 0] line = fin.readline() - if line: # WEH - when is this true? - if line[:5] != "objno": #pragma:nocover + if line: # WEH - when is this true? + if line[:5] != "objno": # pragma:nocover raise ValueError("expected 'objno', found '%s'" % (line)) t = line.split() if len(t) != 3: - raise ValueError("expected two numbers in objno line, " - "but found '%s'" % (line)) + raise ValueError( + "expected two numbers in objno line, but found '%s'" % (line) + ) objno = [int(t[1]), int(t[2])] res.solver.message = msg.strip() - res.solver.message = res.solver.message.replace("\n","; ") + res.solver.message = res.solver.message.replace("\n", "; ") if isinstance(res.solver.message, str): res.solver.message = res.solver.message.replace(':', '\\x3a') ##res.solver.instanceName = osrl.header.instanceName @@ -133,50 +130,57 @@ def _load(self, fin, res, soln, suffixes): res.solver.status = SolverStatus.warning soln_status = SolutionStatus.infeasible elif (objno[1] >= 300) and (objno[1] <= 399): - objno_message = "UNBOUNDED PROBLEM: the objective can be improved without limit!" + objno_message = ( + "UNBOUNDED PROBLEM: the objective can be improved without limit!" + ) res.solver.termination_condition = TerminationCondition.unbounded res.solver.status = SolverStatus.warning soln_status = SolutionStatus.unbounded elif (objno[1] >= 400) and (objno[1] <= 499): - objno_message = ("EXCEEDED MAXIMUM NUMBER OF ITERATIONS: the solver " - "was stopped by a limit that you set!") + objno_message = ( + "EXCEEDED MAXIMUM NUMBER OF ITERATIONS: the solver " + "was stopped by a limit that you set!" + ) res.solver.termination_condition = TerminationCondition.maxIterations res.solver.status = SolverStatus.warning soln_status = SolutionStatus.stoppedByLimit elif (objno[1] >= 500) and (objno[1] <= 599): - objno_message = ("FAILURE: the solver stopped by an error condition " - "in the solver routines!") + objno_message = ( + "FAILURE: the solver stopped by an error condition " + "in the solver routines!" + ) res.solver.termination_condition = TerminationCondition.internalSolverError res.solver.status = SolverStatus.error soln_status = SolutionStatus.error res.solver.id = objno[1] ##res.problem.name = osrl.header.instanceName - if res.solver.termination_condition in [TerminationCondition.unknown, - TerminationCondition.maxIterations, - TerminationCondition.minFunctionValue, - TerminationCondition.minStepLength, - TerminationCondition.globallyOptimal, - TerminationCondition.locallyOptimal, - TerminationCondition.optimal, - TerminationCondition.maxEvaluations, - TerminationCondition.other, - TerminationCondition.infeasible]: - + if res.solver.termination_condition in [ + TerminationCondition.unknown, + TerminationCondition.maxIterations, + TerminationCondition.minFunctionValue, + TerminationCondition.minStepLength, + TerminationCondition.globallyOptimal, + TerminationCondition.locallyOptimal, + TerminationCondition.optimal, + TerminationCondition.maxEvaluations, + TerminationCondition.other, + TerminationCondition.infeasible, + ]: if soln is None: soln = res.solution.add() res.solution.status = soln_status soln.status_description = objno_message soln.message = msg.strip() - soln.message = res.solver.message.replace("\n","; ") + soln.message = res.solver.message.replace("\n", "; ") soln_variable = soln.variable i = 0 for var_value in x: - soln_variable["v"+str(i)] = {"Value" : var_value} + soln_variable["v" + str(i)] = {"Value": var_value} i = i + 1 soln_constraint = soln.constraint - if any(re.match(suf,"dual") for suf in suffixes): - for i in range(0,len(y)): - soln_constraint["c"+str(i)] = {"Dual" : y[i]} + if any(re.match(suf, "dual") for suf in suffixes): + for i in range(0, len(y)): + soln_constraint["c" + str(i)] = {"Dual": y[i]} ### Read suffixes ### line = fin.readline() @@ -192,12 +196,12 @@ def _load(self, fin, res, soln, suffixes): remaining = "" line = fin.readline() while line: - remaining += line.strip()+"; " + remaining += line.strip() + "; " line = fin.readline() res.solver.message += remaining break unmasked_kind = int(line[1]) - kind = unmasked_kind & 3 # 0-var, 1-con, 2-obj, 3-prob + kind = unmasked_kind & 3 # 0-var, 1-con, 2-obj, 3-prob convert_function = int if (unmasked_kind & 4) == 4: convert_function = float @@ -206,23 +210,24 @@ def _load(self, fin, res, soln, suffixes): # tablen = int(line[4]) tabline = int(line[5]) suffix_name = fin.readline().strip() - if any(re.match(suf,suffix_name) for suf in suffixes): + if any(re.match(suf, suffix_name) for suf in suffixes): # ignore translation of the table number to string value for now, # this information can be obtained from the solver documentation for n in range(tabline): fin.readline() - if kind == 0: # Var + if kind == 0: # Var for cnt in range(nvalues): suf_line = fin.readline().split() - key = "v"+suf_line[0] + key = "v" + suf_line[0] if key not in soln_variable: soln_variable[key] = {} - soln_variable[key][suffix_name] = \ - convert_function(suf_line[1]) - elif kind == 1: # Con + soln_variable[key][suffix_name] = convert_function( + suf_line[1] + ) + elif kind == 1: # Con for cnt in range(nvalues): suf_line = fin.readline().split() - key = "c"+suf_line[0] + key = "c" + suf_line[0] if key not in soln_constraint: soln_constraint[key] = {} # GH: About the comment below: This makes for a @@ -233,15 +238,19 @@ def _load(self, fin, res, soln, suffixes): # convert the first letter of the suffix name to upper case, # mainly for pretty-print / output purposes. these are lower-cased # when loaded into real suffixes, so it is largely redundant. - translated_suffix_name = suffix_name[0].upper() + suffix_name[1:] - soln_constraint[key][translated_suffix_name] = \ - convert_function(suf_line[1]) - elif kind == 2: # Obj + translated_suffix_name = ( + suffix_name[0].upper() + suffix_name[1:] + ) + soln_constraint[key][ + translated_suffix_name + ] = convert_function(suf_line[1]) + elif kind == 2: # Obj for cnt in range(nvalues): suf_line = fin.readline().split() - soln.objective.setdefault("o"+suf_line[0],{})[suffix_name] = \ - convert_function(suf_line[1]) - elif kind == 3: # Prob + soln.objective.setdefault("o" + suf_line[0], {})[ + suffix_name + ] = convert_function(suf_line[1]) + elif kind == 3: # Prob # Skip problem kind suffixes for now. Not sure the # best place to put them in the results object for cnt in range(nvalues): diff --git a/pyomo/opt/problem/__init__.py b/pyomo/opt/problem/__init__.py index a7b7e810a73..1b1a5328beb 100644 --- a/pyomo/opt/problem/__init__.py +++ b/pyomo/opt/problem/__init__.py @@ -9,5 +9,9 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.opt.problem.ampl import (ProblemFormat, convert_problem, - guess_format, AmplModel) +from pyomo.opt.problem.ampl import ( + ProblemFormat, + convert_problem, + guess_format, + AmplModel, +) diff --git a/pyomo/opt/problem/ampl.py b/pyomo/opt/problem/ampl.py index 2e85366dd2b..625c342f005 100644 --- a/pyomo/opt/problem/ampl.py +++ b/pyomo/opt/problem/ampl.py @@ -18,9 +18,8 @@ import os -from pyomo.opt.base import (ProblemFormat, - convert_problem, - guess_format) +from pyomo.opt.base import ProblemFormat, convert_problem, guess_format + class AmplModel(object): """ @@ -67,4 +66,3 @@ def write(self, filename, format=None, solver_capability=None): if os.path.exists(filename): os.remove(filename) os.rename(res[0][0], filename) - diff --git a/pyomo/opt/results/__init__.py b/pyomo/opt/results/__init__.py index 4739142a275..8b2933adfe0 100644 --- a/pyomo/opt/results/__init__.py +++ b/pyomo/opt/results/__init__.py @@ -9,13 +9,24 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.opt.results.container import (ScalarData, ScalarType, - default_print_options, strict, - ListContainer, MapContainer, - UndefinedData, undefined, ignore) +from pyomo.opt.results.container import ( + ScalarData, + ScalarType, + default_print_options, + strict, + ListContainer, + MapContainer, + UndefinedData, + undefined, + ignore, +) import pyomo.opt.results.problem -from pyomo.opt.results.solver import SolverStatus, TerminationCondition, \ - check_optimal_termination, assert_optimal_termination +from pyomo.opt.results.solver import ( + SolverStatus, + TerminationCondition, + check_optimal_termination, + assert_optimal_termination, +) from pyomo.opt.results.problem import ProblemSense from pyomo.opt.results.solution import SolutionStatus, Solution from pyomo.opt.results.results_ import SolverResults diff --git a/pyomo/opt/results/container.py b/pyomo/opt/results/container.py index 9fcbf60a1ad..98a68048b45 100644 --- a/pyomo/opt/results/container.py +++ b/pyomo/opt/results/container.py @@ -9,7 +9,16 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ['UndefinedData', 'undefined', 'ignore', 'ScalarData', 'ListContainer', 'MapContainer', 'default_print_options', 'ScalarType'] +__all__ = [ + 'UndefinedData', + 'undefined', + 'ignore', + 'ScalarData', + 'ListContainer', + 'MapContainer', + 'default_print_options', + 'ScalarType', +] import copy @@ -21,12 +30,12 @@ class ScalarType(str, enum.Enum): - int='int' - time='time' - string='string' - float='float' - enum='enum' - undefined='undefined' + int = 'int' + time = 'time' + string = 'string' + float = 'float' + enum = 'enum' + undefined = 'undefined' # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the @@ -36,28 +45,37 @@ class ScalarType(str, enum.Enum): def __str__(self): return self.value + default_print_options = Bunch(schema=False, ignore_time=False) -strict=False +strict = False -class UndefinedData(object): +class UndefinedData(object): def __str__(self): return "" + undefined = UndefinedData() -ignore = UndefinedData() +ignore = UndefinedData() class ScalarData(object): - - def __init__(self, value=undefined, description=None, units=None, scalar_description=None, type=ScalarType.undefined, required=False): + def __init__( + self, + value=undefined, + description=None, + units=None, + scalar_description=None, + type=ScalarType.undefined, + required=False, + ): self.value = value self.description = description self.units = units self.scalar_description = scalar_description self.scalar_type = type - self._required=required + self._required = required def get_value(self): if isinstance(self.value, enum.Enum): @@ -76,7 +94,7 @@ def _repn_(self, option): value = self.get_value() if option.schema: - tmp = {'value':value} + tmp = {'value': value} if not self.description is None: tmp['description'] = self.description if not self.units is None: @@ -87,7 +105,7 @@ def _repn_(self, option): tmp['type'] = self.scalar_type return tmp if not (self.description is None and self.units is None): - tmp = {'value':value} + tmp = {'value': value} if not self.description is None: tmp['description'] = self.description if not self.units is None: @@ -105,24 +123,36 @@ def pprint(self, ostream, option, prefix="", repn=None): if value is inf: value = '.inf' - elif value is - inf: + elif value is -inf: value = '-.inf' if not option.schema and self.description is None and self.units is None: - ostream.write(str(value)+'\n') + ostream.write(str(value) + '\n') else: ostream.write("\n") - ostream.write(prefix+'Value: '+str(value)+'\n') + ostream.write(prefix + 'Value: ' + str(value) + '\n') if not option.schema: if not self.description is None: - ostream.write(prefix+'Description: '+self.yaml_fix(self.description)+'\n') + ostream.write( + prefix + + 'Description: ' + + self.yaml_fix(self.description) + + '\n' + ) if not self.units is None: - ostream.write(prefix+'Units: '+str(self.units)+'\n') + ostream.write(prefix + 'Units: ' + str(self.units) + '\n') else: if not self.scalar_description is None: - ostream.write(prefix+'Description: '+self.yaml_fix(self.scalar_description)+'\n') + ostream.write( + prefix + + 'Description: ' + + self.yaml_fix(self.scalar_description) + + '\n' + ) if not self.scalar_type is ScalarType.undefined: - ostream.write(prefix+'Type: '+self.yaml_fix(self.scalar_type)+'\n') + ostream.write( + prefix + 'Type: ' + self.yaml_fix(self.scalar_type) + '\n' + ) def yaml_fix(self, val): if not isinstance(val, str): @@ -141,19 +171,18 @@ def load(self, repn): # This class manages a list of MapContainer objects. # class ListContainer(object): - def __init__(self, cls): - self._cls=cls + self._cls = cls self._list = [] - self._active=True - self._required=False + self._active = True + self._required = False def __len__(self): if '_list' in self.__dict__: return len(self.__dict__['_list']) return 0 - def __getitem__(self,i): + def __getitem__(self, i): return self._list[i] def clear(self): @@ -162,10 +191,10 @@ def clear(self): def delete(self, i): del self._list[i] - def __call__(self,i=0): + def __call__(self, i=0): return self._list[i] - def __getattr__(self,name): + def __getattr__(self, name): try: return self.__dict__[name] except: @@ -174,7 +203,7 @@ def __getattr__(self,name): self.add() return getattr(self._list[0], name) - def __setattr__(self,name,val): + def __setattr__(self, name, val): if name == "__class__": self.__class__ = val return @@ -186,13 +215,13 @@ def __setattr__(self,name,val): setattr(self._list[0], name, val) def insert(self, obj): - self._active=True - self._list.append( obj ) + self._active = True + self._list.append(obj) def add(self): - self._active=True + self._active = True obj = self._cls() - self._list.append( obj ) + self._list.append(obj) return obj def _repn_(self, option): @@ -202,18 +231,20 @@ def _repn_(self, option): self.add() tmp = [] for item in self._list: - tmp.append( item._repn_(option) ) + tmp.append(item._repn_(option)) return tmp def pprint(self, ostream, option, prefix="", repn=None): if not option.schema and not self._active and not self._required: return ignore ostream.write("\n") - i=0 + i = 0 for i in range(len(self._list)): item = self._list[i] - ostream.write(prefix+'- ') - item.pprint(ostream, option, from_list=True, prefix=prefix+" ", repn=repn[i]) + ostream.write(prefix + '- ') + item.pprint( + ostream, option, from_list=True, prefix=prefix + " ", repn=repn[i] + ) def load(self, repn): for data in repn: @@ -228,7 +259,7 @@ def __setstate__(self, state): def __str__(self): ostream = StringIO() - option=default_print_options + option = default_print_options self.pprint(ostream, self._option, repn=self._repn_(self._option)) return ostream.getvalue() @@ -240,77 +271,91 @@ def __str__(self): # first letter is capitalized. # class MapContainer(dict): - def __getnewargs_ex__(self): # Pass arguments to __new__ when unpickling - return ((0,0),{}) + return ((0, 0), {}) def __getnewargs__(self): # Pass arguments to __new__ when unpickling - return (0,0) + return (0, 0) def __new__(cls, *args, **kwargs): # - # If the user provides "too many" arguments, then + # If the user provides "too many" arguments, then # pre-initialize the '_order' attribute. This pre-initializes # the class during unpickling. # _instance = super(MapContainer, cls).__new__(cls, *args, **kwargs) if len(args) > 1: - super(MapContainer, _instance).__setattr__('_order',[]) + super(MapContainer, _instance).__setattr__('_order', []) return _instance def __init__(self, ordered=False): dict.__init__(self) - self._active=True - self._required=False - self._ordered=ordered - self._order=[] - self._option=default_print_options + self._active = True + self._required = False + self._ordered = ordered + self._order = [] + self._option = default_print_options def keys(self): return self._order - def __getattr__(self,name): + def __getattr__(self, name): try: return self.__dict__[name] except: pass try: - self._active=True + self._active = True return self[self._convert(name)] except Exception: pass - raise AttributeError("Unknown attribute `"+str(name)+"' for object with type "+str(type(self))) - - def __setattr__(self,name,val): + raise AttributeError( + "Unknown attribute `" + + str(name) + + "' for object with type " + + str(type(self)) + ) + + def __setattr__(self, name, val): if name == "__class__": self.__class__ = val return if name[0] == "_": self.__dict__[name] = val return - self._active=True + self._active = True tmp = self._convert(name) if tmp not in self: if strict: - raise AttributeError("Unknown attribute `"+str(name)+"' for object with type "+str(type(self))) + raise AttributeError( + "Unknown attribute `" + + str(name) + + "' for object with type " + + str(type(self)) + ) self.declare(tmp) - self._set_value(tmp,val) + self._set_value(tmp, val) def __setitem__(self, name, val): - self._active=True + self._active = True tmp = self._convert(name) if tmp not in self: if strict: - raise AttributeError("Unknown attribute `"+str(name)+"' for object with type "+str(type(self))) + raise AttributeError( + "Unknown attribute `" + + str(name) + + "' for object with type " + + str(type(self)) + ) self.declare(tmp) - self._set_value(tmp,val) + self._set_value(tmp, val) def _set_value(self, name, val): - if isinstance(val,ListContainer) or isinstance(val,MapContainer): + if isinstance(val, ListContainer) or isinstance(val, MapContainer): dict.__setitem__(self, name, val) - elif isinstance(val,ScalarData): + elif isinstance(val, ScalarData): dict.__getitem__(self, name).value = val.value else: dict.__getitem__(self, name).value = val @@ -318,9 +363,14 @@ def _set_value(self, name, val): def __getitem__(self, name): tmp = self._convert(name) if tmp not in self: - raise AttributeError("Unknown attribute `"+str(name)+"' for object with type "+str(type(self))) + raise AttributeError( + "Unknown attribute `" + + str(name) + + "' for object with type " + + str(type(self)) + ) item = dict.__getitem__(self, tmp) - if isinstance(item,ListContainer) or isinstance(item,MapContainer): + if isinstance(item, ListContainer) or isinstance(item, MapContainer): return item return item.value @@ -329,7 +379,10 @@ def declare(self, name, **kwds): return tmp = self._convert(name) self._order.append(tmp) - if 'value' in kwds and (isinstance(kwds['value'],MapContainer) or isinstance(kwds['value'],ListContainer)): + if 'value' in kwds and ( + isinstance(kwds['value'], MapContainer) + or isinstance(kwds['value'], ListContainer) + ): if 'active' in kwds: kwds['value']._active = kwds['active'] if 'required' in kwds and kwds['required'] is True: @@ -344,7 +397,7 @@ def declare(self, name, **kwds): # initial value of an attribute. I don't think we need this, # but for now I'm going to leave this logic in the code. # - #if 'value' in kwds: + # if 'value' in kwds: # data._default = kwds['value'] dict.__setitem__(self, tmp, data) @@ -356,7 +409,7 @@ def _repn_(self, option): for key in self._order: rep = dict.__getitem__(self, key)._repn_(option) if not rep == ignore: - tmp.append({key:rep}) + tmp.append({key: rep}) else: tmp = {} for key in self.keys(): @@ -368,7 +421,7 @@ def _repn_(self, option): def _convert(self, name): if not isinstance(name, str): return name - tmp = name.replace('_',' ') + tmp = name.replace('_', ' ') return tmp[0].upper() + tmp[1:] def __repr__(self): @@ -376,34 +429,34 @@ def __repr__(self): def __str__(self): ostream = StringIO() - option=default_print_options + option = default_print_options self.pprint(ostream, self._option, repn=self._repn_(self._option)) return ostream.getvalue() def pprint(self, ostream, option, from_list=False, prefix="", repn=None): if from_list: - _prefix="" + _prefix = "" else: - _prefix=prefix + _prefix = prefix ostream.write('\n') for key in self._order: if not key in repn: continue - item = dict.__getitem__(self,key) - ostream.write(_prefix+key+": ") - _prefix=prefix + item = dict.__getitem__(self, key) + ostream.write(_prefix + key + ": ") + _prefix = prefix if isinstance(item, ListContainer): item.pprint(ostream, option, prefix=_prefix, repn=repn[key]) else: - item.pprint(ostream, option, prefix=_prefix+" ", repn=repn[key]) + item.pprint(ostream, option, prefix=_prefix + " ", repn=repn[key]) def load(self, repn): for key in repn: tmp = self._convert(key) if tmp not in self: self.declare(tmp) - item = dict.__getitem__(self,tmp) - item._active=True + item = dict.__getitem__(self, tmp) + item._active = True item.load(repn[key]) def __getnewargs__(self): @@ -417,7 +470,7 @@ def __setstate__(self, state): if __name__ == '__main__': - d=MapContainer() + d = MapContainer() d.declare('f') d.declare('g') d.declare('h') diff --git a/pyomo/opt/results/problem.py b/pyomo/opt/results/problem.py index d5c3ab146d7..71fd748dd81 100644 --- a/pyomo/opt/results/problem.py +++ b/pyomo/opt/results/problem.py @@ -14,10 +14,11 @@ import enum from pyomo.opt.results.container import MapContainer + class ProblemSense(str, enum.Enum): - unknown='unknown' - minimize='minimize' - maximize='maximize' + unknown = 'unknown' + minimize = 'minimize' + maximize = 'maximize' # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the @@ -29,7 +30,6 @@ def __str__(self): class ProblemInformation(MapContainer): - def __init__(self): MapContainer.__init__(self) self.declare('name') @@ -43,4 +43,3 @@ def __init__(self): self.declare('number_of_continuous_variables') self.declare('number_of_nonzeros') self.declare('sense', value=ProblemSense.unknown, required=True) - diff --git a/pyomo/opt/results/results_.py b/pyomo/opt/results/results_.py index b696b578993..2852bb72e8a 100644 --- a/pyomo/opt/results/results_.py +++ b/pyomo/opt/results/results_.py @@ -20,10 +20,7 @@ from pyomo.common.dependencies import yaml, yaml_load_args, yaml_available import pyomo.opt -from pyomo.opt.results.container import (undefined, - ignore, - ListContainer, - MapContainer) +from pyomo.opt.results.container import undefined, ignore, ListContainer, MapContainer import pyomo.opt.results.solution from pyomo.opt.results.solution import default_print_options as dpo import pyomo.opt.results.problem @@ -33,8 +30,8 @@ logger = logging.getLogger(__name__) -class SolverResults(MapContainer): +class SolverResults(MapContainer): undefined = undefined default_print_options = dpo @@ -42,24 +39,30 @@ def __init__(self): MapContainer.__init__(self) self._sections = [] self._descriptions = {} - self.add('problem', - ListContainer(pyomo.opt.results.problem.ProblemInformation), - False, - "Problem Information") - self.add('solver', - ListContainer(pyomo.opt.results.solver.SolverInformation), - False, - "Solver Information") - self.add('solution', - pyomo.opt.results.solution.SolutionSet(), - False, - "Solution Information") + self.add( + 'problem', + ListContainer(pyomo.opt.results.problem.ProblemInformation), + False, + "Problem Information", + ) + self.add( + 'solver', + ListContainer(pyomo.opt.results.solver.SolverInformation), + False, + "Solver Information", + ) + self.add( + 'solution', + pyomo.opt.results.solution.SolutionSet(), + False, + "Solution Information", + ) def add(self, name, value, active, description): self.declare(name, value=value, active=active) tmp = self._convert(name) self._sections.append(tmp) - self._descriptions[tmp]=description + self._descriptions[tmp] = description def json_repn(self, options=None): if options is None: @@ -96,7 +99,8 @@ def write(self, **kwds): elif normalized_ext and _fmt != normalized_ext: logger.warning( "writing results to file (%s) using what appears " - "to be an incompatible format (%s)" % (fname, _fmt)) + "to be an incompatible format (%s)" % (fname, _fmt) + ) with open(fname, "w") as OUTPUT: kwds['ostream'] = OUTPUT kwds['format'] = _fmt @@ -121,7 +125,7 @@ def write_json(self, **kwds): option = copy.copy(SolverResults.default_print_options) # TODO: verify that we need this for-loop for key in kwds: - setattr(option,key,kwds[key]) + setattr(option, key, kwds[key]) repn = self.json_repn(option) for soln in repn.get('Solution', []): @@ -130,7 +134,7 @@ def write_json(self, **kwds): if data not in soln: continue data_value = soln[data] - if not isinstance(data_value,dict): + if not isinstance(data_value, dict): continue if not data_value: # a variable/constraint/objective may have no @@ -138,19 +142,19 @@ def write_json(self, **kwds): # extracted in a solution. soln[data] = "No values" continue - for kk,vv in data_value.items(): + for kk, vv in data_value.items(): # TODO: remove this if-block. This is a hack if not type(vv) is dict: - vv = {'Value':vv} + vv = {'Value': vv} tmp = {} - for k,v in vv.items(): + for k, v in vv.items(): # TODO: remove this if-block. This is a hack if v is not None and math.fabs(v) > 1e-16: tmp[k] = v if len(tmp) > 0: soln[data][kk] = tmp else: - remove.add((data,kk)) + remove.add((data, kk)) for item in remove: del soln[item[0]][item[1]] json.dump(repn, ostream, indent=4, sort_keys=True) @@ -165,7 +169,7 @@ def write_yaml(self, **kwds): option = copy.copy(SolverResults.default_print_options) # TODO: verify that we need this for-loop for key in kwds: - setattr(option,key,kwds[key]) + setattr(option, key, kwds[key]) repn = self._repn_(option) ostream.write("# ==========================================================\n") @@ -175,11 +179,15 @@ def write_yaml(self, **kwds): key = self._order[i] if not key in repn: continue - item = dict.__getitem__(self,key) - ostream.write("# ----------------------------------------------------------\n") + item = dict.__getitem__(self, key) + ostream.write( + "# ----------------------------------------------------------\n" + ) ostream.write("# %s\n" % self._descriptions[key]) - ostream.write("# ----------------------------------------------------------\n") - ostream.write(key+": ") + ostream.write( + "# ----------------------------------------------------------\n" + ) + ostream.write(key + ": ") if isinstance(item, ListContainer): item.pprint(ostream, option, prefix="", repn=repn[key]) else: @@ -192,9 +200,9 @@ def read(self, **kwds): else: ostream = sys.stdin if 'filename' in kwds: - INPUT=open(kwds['filename'],"r") + INPUT = open(kwds['filename'], "r") del kwds['filename'] - kwds['istream']=INPUT + kwds['istream'] = INPUT self.read(**kwds) INPUT.close() return @@ -207,7 +215,7 @@ def read(self, **kwds): key = self._order[i] if not key in repn: continue - item = dict.__getitem__(self,key) + item = dict.__getitem__(self, key) item.load(repn[key]) def __repr__(self): @@ -215,7 +223,7 @@ def __repr__(self): def __str__(self): ostream = StringIO() - option=SolverResults.default_print_options + option = SolverResults.default_print_options self.pprint(ostream, option, repn=self._repn_(option)) return ostream.getvalue() @@ -223,4 +231,4 @@ def __str__(self): if __name__ == '__main__': results = SolverResults() results.write(schema=True) - #print results + # print results diff --git a/pyomo/opt/results/solution.py b/pyomo/opt/results/solution.py index 68ee4558b0d..0cb8e92e730 100644 --- a/pyomo/opt/results/solution.py +++ b/pyomo/opt/results/solution.py @@ -16,25 +16,28 @@ from pyomo.opt.results.container import MapContainer, ListContainer, ignore from pyomo.common.collections import Bunch, OrderedDict -default_print_options = Bunch(schema=False, - sparse=True, - num_solutions=None, - ignore_time=False, - ignore_defaults=False) +default_print_options = Bunch( + schema=False, + sparse=True, + num_solutions=None, + ignore_time=False, + ignore_defaults=False, +) + class SolutionStatus(str, enum.Enum): - bestSoFar='bestSoFar' - error='error' - feasible='feasible' - globallyOptimal='globallyOptimal' - infeasible='infeasible' - locallyOptimal='locallyOptimal' - optimal='optimal' - other='other' - stoppedByLimit='stoppedByLimit' - unbounded='unbounded' - unknown='unknown' - unsure='unsure' + bestSoFar = 'bestSoFar' + error = 'error' + feasible = 'feasible' + globallyOptimal = 'globallyOptimal' + infeasible = 'infeasible' + locallyOptimal = 'locallyOptimal' + optimal = 'optimal' + other = 'other' + stoppedByLimit = 'stoppedByLimit' + unbounded = 'unbounded' + unknown = 'unknown' + unsure = 'unsure' # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the @@ -45,12 +48,11 @@ def __str__(self): return self.value -intlist = (int, ) +intlist = (int,) numlist = (float, int) class Solution(MapContainer): - def __init__(self): MapContainer.__init__(self) @@ -89,32 +91,32 @@ def pprint(self, ostream, option, from_list=False, prefix="", repn=None): # # the following is specialized logic for handling variable and # constraint maps - which are dictionaries of dictionaries, with - # at a minimum an "id" element per sub-directionary. + # at a minimum an "id" element per sub-directionary. # first = True for key in self._order: if not key in repn or key == 'Problem': continue - item = dict.__getitem__(self,key) + item = dict.__getitem__(self, key) if not type(item.value) is dict: # # Do a normal print # if first: - ostream.write(key+": ") + ostream.write(key + ": ") first = False else: - ostream.write(prefix+key+": ") - item.pprint(ostream, option, prefix=prefix+" ", repn=repn[key]) + ostream.write(prefix + key + ": ") + item.pprint(ostream, option, prefix=prefix + " ", repn=repn[key]) elif len(item.value) == 0: # # The dictionary is empty # - ostream.write(prefix+key+": No values\n") + ostream.write(prefix + key + ": No values\n") else: print_zeros = key in ['Objective'] first = True - ostream.write(prefix+key+":") + ostream.write(prefix + key + ":") prefix_ = prefix prefix = prefix + " " # @@ -123,8 +125,10 @@ def pprint(self, ostream, option, from_list=False, prefix="", repn=None): value = item.value id_ctr = 0 id_dict_map = {} - id_name_map = {} # the name could be an integer or float - so convert prior to printing (see code below) - id_nonzeros_map = {} # are any of the non-id entries are non-zero? + id_name_map = ( + {} + ) # the name could be an integer or float - so convert prior to printing (see code below) + id_nonzeros_map = {} # are any of the non-id entries are non-zero? entries_to_print = False for entry_name, entry_dict in value.items(): @@ -132,35 +136,44 @@ def pprint(self, ostream, option, from_list=False, prefix="", repn=None): id_ctr += 1 id_name_map[entry_id] = entry_name id_dict_map[entry_id] = entry_dict - id_nonzeros_map[entry_id] = False # until proven otherwise + id_nonzeros_map[entry_id] = False # until proven otherwise for attr_name, attr_value in entry_dict.items(): if print_zeros or math.fabs(attr_value) > 1e-16: id_nonzeros_map[entry_id] = True entries_to_print = True if entries_to_print: - for entry_id in sorted(id_dict_map.keys(), key=lambda id:id_name_map[id]): + for entry_id in sorted( + id_dict_map.keys(), key=lambda id: id_name_map[id] + ): if id_nonzeros_map[entry_id]: if first: ostream.write("\n") first = False - ostream.write(prefix+str(id_name_map[entry_id])+":\n") + ostream.write(prefix + str(id_name_map[entry_id]) + ":\n") entry_dict = id_dict_map[entry_id] for attr_name in sorted(entry_dict.keys()): attr_value = entry_dict[attr_name] - if isinstance(attr_value,float) and (math.floor(attr_value) == attr_value): + if isinstance(attr_value, float) and ( + math.floor(attr_value) == attr_value + ): attr_value = int(attr_value) - ostream.write(prefix+" "+attr_name.capitalize()+": "+str(attr_value)+'\n') + ostream.write( + prefix + + " " + + attr_name.capitalize() + + ": " + + str(attr_value) + + '\n' + ) else: ostream.write(" No nonzero values\n") prefix = prefix_ - class SolutionSet(ListContainer): - def __init__(self): - ListContainer.__init__(self,Solution) + ListContainer.__init__(self, Solution) self._option = default_print_options def _repn_(self, option): @@ -173,41 +186,50 @@ def _repn_(self, option): num = len(self) else: num = min(option.num_solutions, len(self)) - i=0 + i = 0 tmp = [] for item in self._list: - tmp.append( item._repn_(option) ) - i=i+1 + tmp.append(item._repn_(option)) + i = i + 1 if i == num: break - return [OrderedDict([('number of solutions',len(self)), ('number of solutions displayed',num)])]+ tmp + return [ + OrderedDict( + [ + ('number of solutions', len(self)), + ('number of solutions displayed', num), + ] + ) + ] + tmp def __len__(self): return len(self._list) def __call__(self, i=1): - return self._list[i-1] + return self._list[i - 1] def pprint(self, ostream, option, prefix="", repn=None): if not option.schema and not self._active and not self._required: return ignore ostream.write("\n") - ostream.write(prefix+"- ") - spaces="" + ostream.write(prefix + "- ") + spaces = "" for key in repn[0]: - ostream.write(prefix+spaces+key+": "+str(repn[0][key])+'\n') - spaces=" " - i=0 + ostream.write(prefix + spaces + key + ": " + str(repn[0][key]) + '\n') + spaces = " " + i = 0 for i in range(len(self._list)): item = self._list[i] - ostream.write(prefix+'- ') - item.pprint(ostream, option, from_list=True, prefix=prefix+" ", repn=repn[i+1]) + ostream.write(prefix + '- ') + item.pprint( + ostream, option, from_list=True, prefix=prefix + " ", repn=repn[i + 1] + ) def load(self, repn): # # Note: we ignore the first element of the repn list, since # it was generated on the fly by the SolutionSet object. # - for data in repn[1:]: # repn items 1 through N are individual solutions. + for data in repn[1:]: # repn items 1 through N are individual solutions. item = self.add() item.load(data) diff --git a/pyomo/opt/results/solver.py b/pyomo/opt/results/solver.py index 2338e2f0a2d..5f9ceb3b68e 100644 --- a/pyomo/opt/results/solver.py +++ b/pyomo/opt/results/solver.py @@ -9,7 +9,13 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ['SolverInformation', 'SolverStatus', 'TerminationCondition', 'check_optimal_termination', 'assert_optimal_termination'] +__all__ = [ + 'SolverInformation', + 'SolverStatus', + 'TerminationCondition', + 'check_optimal_termination', + 'assert_optimal_termination', +] import enum from pyomo.opt.results.container import MapContainer, ScalarType @@ -19,12 +25,12 @@ # A coarse summary of how the solver terminated. # class SolverStatus(str, enum.Enum): - ok='ok' # Normal termination - warning='warning' # Termination with unusual condition - error='error' # Terminated internally with error - aborted='aborted' # Terminated due to external conditions - # (e.g. interrupts) - unknown='unknown' # An unitialized value + ok = 'ok' # Normal termination + warning = 'warning' # Termination with unusual condition + error = 'error' # Terminated internally with error + aborted = 'aborted' # Terminated due to external conditions + # (e.g. interrupts) + unknown = 'unknown' # An uninitialized value # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the @@ -34,45 +40,52 @@ class SolverStatus(str, enum.Enum): def __str__(self): return self.value + # # A description of how the solver terminated # class TerminationCondition(str, enum.Enum): # UNKNOWN - unknown='unknown' # An unitialized value + unknown = 'unknown' # An uninitialized value # OK - maxTimeLimit='maxTimeLimit' # Exceeded maximum time limited allowed by user - # but having return a feasible solution - maxIterations='maxIterations' # Exceeded maximum number of iterations allowed - # by user (e.g., simplex iterations) - minFunctionValue='minFunctionValue' # Found solution smaller than specified function - # value - minStepLength='minStepLength' # Step length is smaller than specified limit - globallyOptimal='globallyOptimal' # Found a globally optimal solution - locallyOptimal='locallyOptimal' # Found a locally optimal solution - feasible='feasible' # Found a solution that is feasible - optimal='optimal' # Found an optimal solution - maxEvaluations='maxEvaluations' # Exceeded maximum number of problem evaluations - # (e.g., branch and bound nodes) - other='other' # Other, uncategorized normal termination + maxTimeLimit = 'maxTimeLimit' # Exceeded maximum time limited allowed by user + # but having return a feasible solution + maxIterations = 'maxIterations' # Exceeded maximum number of iterations allowed + # by user (e.g., simplex iterations) + minFunctionValue = ( + 'minFunctionValue' # Found solution smaller than specified function + ) + # value + minStepLength = 'minStepLength' # Step length is smaller than specified limit + globallyOptimal = 'globallyOptimal' # Found a globally optimal solution + locallyOptimal = 'locallyOptimal' # Found a locally optimal solution + feasible = 'feasible' # Found a solution that is feasible + optimal = 'optimal' # Found an optimal solution + maxEvaluations = 'maxEvaluations' # Exceeded maximum number of problem evaluations + # (e.g., branch and bound nodes) + other = 'other' # Other, uncategorized normal termination # WARNING - unbounded='unbounded' # Demonstrated that problem is unbounded - infeasible='infeasible' # Demonstrated that the problem is infeasible - infeasibleOrUnbounded='infeasibleOrUnbounded' # Problem is either infeasible or unbounded - invalidProblem='invalidProblem' # The problem setup or characteristics are not - # valid for the solver - intermediateNonInteger='intermediateNonInteger' # A non-integer solution has been returned - noSolution='noSolution' # No feasible solution found but infeasibility - # not proven + unbounded = 'unbounded' # Demonstrated that problem is unbounded + infeasible = 'infeasible' # Demonstrated that the problem is infeasible + infeasibleOrUnbounded = ( + 'infeasibleOrUnbounded' # Problem is either infeasible or unbounded + ) + invalidProblem = 'invalidProblem' # The problem setup or characteristics are not + # valid for the solver + intermediateNonInteger = ( + 'intermediateNonInteger' # A non-integer solution has been returned + ) + noSolution = 'noSolution' # No feasible solution found but infeasibility + # not proven # ERROR - solverFailure='solverFailure' # Solver failed to terminate correctly - internalSolverError='internalSolverError' # Internal solver error - error='error' # Other errors + solverFailure = 'solverFailure' # Solver failed to terminate correctly + internalSolverError = 'internalSolverError' # Internal solver error + error = 'error' # Other errors # ABORTED - userInterrupt='userInterrupt' # Interrupt signal generated by user - resourceInterrupt='resourceInterrupt' # Interrupt signal in resources used by - # optimizer - licensingProblems='licensingProblems' # Problem accessing solver license + userInterrupt = 'userInterrupt' # Interrupt signal generated by user + resourceInterrupt = 'resourceInterrupt' # Interrupt signal in resources used by + # optimizer + licensingProblems = 'licensingProblems' # Problem accessing solver license # Overloading __str__ is needed to match the behavior of the old # pyutilib.enum class (removed June 2020). There are spots in the @@ -95,34 +108,38 @@ def to_solver_status(tc): SolverStatus """ if tc in { - TerminationCondition.maxTimeLimit, - TerminationCondition.maxIterations, - TerminationCondition.minFunctionValue, - TerminationCondition.minStepLength, - TerminationCondition.globallyOptimal, - TerminationCondition.locallyOptimal, - TerminationCondition.feasible, - TerminationCondition.optimal, - TerminationCondition.maxEvaluations, - TerminationCondition.other }: + TerminationCondition.maxTimeLimit, + TerminationCondition.maxIterations, + TerminationCondition.minFunctionValue, + TerminationCondition.minStepLength, + TerminationCondition.globallyOptimal, + TerminationCondition.locallyOptimal, + TerminationCondition.feasible, + TerminationCondition.optimal, + TerminationCondition.maxEvaluations, + TerminationCondition.other, + }: return SolverStatus.ok if tc in { - TerminationCondition.unbounded, - TerminationCondition.infeasible, - TerminationCondition.infeasibleOrUnbounded, - TerminationCondition.invalidProblem, - TerminationCondition.intermediateNonInteger, - TerminationCondition.noSolution }: + TerminationCondition.unbounded, + TerminationCondition.infeasible, + TerminationCondition.infeasibleOrUnbounded, + TerminationCondition.invalidProblem, + TerminationCondition.intermediateNonInteger, + TerminationCondition.noSolution, + }: return SolverStatus.warning if tc in { - TerminationCondition.solverFailure, - TerminationCondition.internalSolverError, - TerminationCondition.error }: + TerminationCondition.solverFailure, + TerminationCondition.internalSolverError, + TerminationCondition.error, + }: return SolverStatus.error if tc in { - TerminationCondition.userInterrupt, - TerminationCondition.resourceInterrupt, - TerminationCondition.licensingProblems }: + TerminationCondition.userInterrupt, + TerminationCondition.resourceInterrupt, + TerminationCondition.licensingProblems, + }: return SolverStatus.aborted return SolverStatus.unknown @@ -140,10 +157,11 @@ def check_optimal_termination(results): ------- `bool` """ - if results.solver.status == SolverStatus.ok and \ - (results.solver.termination_condition == TerminationCondition.optimal + if results.solver.status == SolverStatus.ok and ( + results.solver.termination_condition == TerminationCondition.optimal or results.solver.termination_condition == TerminationCondition.locallyOptimal - or results.solver.termination_condition == TerminationCondition.globallyOptimal): + or results.solver.termination_condition == TerminationCondition.globallyOptimal + ): return True return False @@ -159,14 +177,16 @@ def assert_optimal_termination(results): results : Pyomo results object returned from solver.solve """ if not check_optimal_termination(results): - msg = 'Solver failed to return an optimal solution. ' \ - 'Solver status: {}, Termination condition: {}'.format(results.solver.status, - results.solver.termination_condition) + msg = ( + 'Solver failed to return an optimal solution. ' + 'Solver status: {}, Termination condition: {}'.format( + results.solver.status, results.solver.termination_condition + ) + ) raise RuntimeError(msg) - -class BranchAndBoundStats(MapContainer): +class BranchAndBoundStats(MapContainer): def __init__(self): MapContainer.__init__(self) self.declare('number of bounded subproblems') @@ -174,7 +194,6 @@ def __init__(self): class BlackBoxStats(MapContainer): - def __init__(self): MapContainer.__init__(self) self.declare('number of function evaluations') @@ -183,16 +202,13 @@ def __init__(self): class SolverStatistics(MapContainer): - def __init__(self): MapContainer.__init__(self) - self.declare("branch_and_bound", value=BranchAndBoundStats(), - active=False) + self.declare("branch_and_bound", value=BranchAndBoundStats(), active=False) self.declare("black_box", value=BlackBoxStats(), active=False) class SolverInformation(MapContainer): - def __init__(self): MapContainer.__init__(self) self.declare('name') @@ -206,8 +222,7 @@ def __init__(self): self.declare('wallclock_time', type=ScalarType.time) # Semantics: The specific condition that caused the solver to # terminate. - self.declare('termination_condition', - value=TerminationCondition.unknown) + self.declare('termination_condition', value=TerminationCondition.unknown) # Semantics: A string printed by the solver that summarizes the # termination status. self.declare('termination_message') diff --git a/pyomo/opt/solver/__init__.py b/pyomo/opt/solver/__init__.py index 1c5cf44f943..961d7e0edbd 100644 --- a/pyomo/opt/solver/__init__.py +++ b/pyomo/opt/solver/__init__.py @@ -9,6 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.opt.solver.shellcmd import (ResultsFormat, OptSolver, SolverStatus, - SolverResults, SystemCallSolver) +from pyomo.opt.solver.shellcmd import ( + ResultsFormat, + OptSolver, + SolverStatus, + SolverResults, + SystemCallSolver, +) from pyomo.opt.solver.ilmcmd import ILMLicensedSystemCallSolver diff --git a/pyomo/opt/solver/ilmcmd.py b/pyomo/opt/solver/ilmcmd.py index 1162d0ba970..d08feab7d9a 100644 --- a/pyomo/opt/solver/ilmcmd.py +++ b/pyomo/opt/solver/ilmcmd.py @@ -24,17 +24,19 @@ class ILMLicensedSystemCallSolver(SystemCallSolver): - """ A command line solver that launches executables licensed with ILM """ + """A command line solver that launches executables licensed with ILM""" def __init__(self, **kwds): - """ Constructor """ + """Constructor""" pyomo.opt.solver.shellcmd.SystemCallSolver.__init__(self, **kwds) def available(self, exception_flag=False): - """ True if the solver is available """ + """True if the solver is available""" if self._assert_available: return True - if not pyomo.opt.solver.shellcmd.SystemCallSolver.available(self, exception_flag): + if not pyomo.opt.solver.shellcmd.SystemCallSolver.available( + self, exception_flag + ): return False executable = pyomo.common.Executable("ilmlist") if executable: @@ -48,23 +50,39 @@ def available(self, exception_flag=False): # this behavior, this command will stall until the # user hits Ctrl-C. cmd.append("-batch") - result = subprocess.run(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + result = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) except OSError: msg = sys.exc_info()[1] - raise ApplicationError("Could not execute the command: ilmtest\n\tError message: "+msg) + raise ApplicationError( + "Could not execute the command: ilmtest\n\tError message: " + msg + ) sys.stdout.flush() for line in result.stdout.split("\n"): - tokens = re.split('[\t ]+',line.strip()) - if len(tokens) == 5 and tokens[0] == 'tokens' and tokens[1] == 'reserved:' and tokens[4] == os.environ.get('USER',None): + tokens = re.split('[\t ]+', line.strip()) + if ( + len(tokens) == 5 + and tokens[0] == 'tokens' + and tokens[1] == 'reserved:' + and tokens[4] == os.environ.get('USER', None) + ): if not (tokens[2] == 'none' or tokens[2] == '0'): return True return False - elif len(tokens) == 3 and tokens[0] == 'available' and tokens[1] == 'tokens:': + elif ( + len(tokens) == 3 + and tokens[0] == 'available' + and tokens[1] == 'tokens:' + ): if tokens[2] == '0': return False break - elif len(tokens) == 6 and tokens[1] == 'server' and tokens[5] == 'DOWN.': + elif ( + len(tokens) == 6 and tokens[1] == 'server' and tokens[5] == 'DOWN.' + ): return False return True diff --git a/pyomo/opt/solver/shellcmd.py b/pyomo/opt/solver/shellcmd.py index 84b74b16344..c45ec94ae1e 100644 --- a/pyomo/opt/solver/shellcmd.py +++ b/pyomo/opt/solver/shellcmd.py @@ -32,20 +32,31 @@ logger = logging.getLogger('pyomo.opt') +# The minimum absolute time (in seconds) to add to the solver timeout +# when setting the timeout for the solver subprocess. This provides +# time for a solver that timed out to clean up / report a solution +# before we forcibly kill the subprocess. +SUBPROCESS_TIMEOUT_ABS_ADJUST = 1 +# The additional time (relative to the user-specified timeout) to add to +# the solver timeout when setting the timeout for the solver subprocess. +# This provides time for a solver that timed out to clean up / report a +# solution before we forcibly kill the subprocess. +SUBPROCESS_TIMEOUT_REL_ADJUST = 0.01 + class SystemCallSolver(OptSolver): - """ A generic command line solver """ + """A generic command line solver""" def __init__(self, **kwargs): - """ Constructor """ + """Constructor""" executable = kwargs.pop('executable', None) validate = kwargs.pop('validate', True) OptSolver.__init__(self, **kwargs) - self._keepfiles = False + self._keepfiles = False self._results_file = None - self._timer = '' + self._timer = '' self._user_executable = None # broadly useful for reporting, and in cases where # a solver plugin may not report execution time. @@ -84,7 +95,8 @@ def set_executable(self, name=None, validate=True): raise ValueError( "Failed to set executable for solver %s to " "its default value. No available solver " - "executable was found." % (self.name)) + "executable was found." % (self.name) + ) return if not validate: @@ -105,15 +117,15 @@ def set_executable(self, name=None, validate=True): "Failed to set executable for solver %s. File " "with name=%s either does not exist or it is " "not executable. To skip this validation, " - "call set_executable with validate=False." - % (self.name, name)) + "call set_executable with validate=False." % (self.name, name) + ) self._user_executable = exe def available(self, exception_flag=False): - """ True if the solver is available """ + """True if the solver is available""" if self._assert_available: return True - if not OptSolver.available(self,exception_flag): + if not OptSolver.available(self, exception_flag): return False try: # HACK: Suppress logged warnings about the executable not @@ -130,11 +142,11 @@ def available(self, exception_flag=False): return False return True - def create_command_line(self,executable,problem_files): + def create_command_line(self, executable, problem_files): """ Create the command line that is executed. """ - raise NotImplementedError #pragma:nocover + raise NotImplementedError # pragma:nocover def process_logfile(self): """ @@ -142,9 +154,9 @@ def process_logfile(self): """ return SolverResults() - def process_soln_file(self,results): + def process_soln_file(self, results): """ - Process auxilliary data files generated by the optimizer (e.g. solution + Process auxiliary data files generated by the optimizer (e.g. solution files) """ return results @@ -188,8 +200,11 @@ def executable(self): """ Returns the executable used by this solver. """ - return self._user_executable if (self._user_executable is not None) else \ - self._default_executable() + return ( + self._user_executable + if (self._user_executable is not None) + else self._default_executable() + ) def _default_executable(self): """ @@ -199,12 +214,12 @@ def _default_executable(self): def _presolve(self, *args, **kwds): """ - Peform presolves. + Perform presolves. """ TempfileManager.push() self._keepfiles = kwds.pop("keepfiles", False) - self._define_signal_handlers = kwds.pop('use_signal_handling',None) + self._define_signal_handlers = kwds.pop('use_signal_handling', None) OptSolver._presolve(self, *args, **kwds) @@ -218,18 +233,15 @@ def _presolve(self, *args, **kwds): # # Create command line # - self._command = self.create_command_line( - self.executable(), self._problem_files) + self._command = self.create_command_line(self.executable(), self._problem_files) - self._log_file=self._command.log_file + self._log_file = self._command.log_file # - # The pre-cleanup is probably unncessary, but also not harmful. + # The pre-cleanup is probably unnecessary, but also not harmful. # - if (self._log_file is not None) and \ - os.path.exists(self._log_file): + if (self._log_file is not None) and os.path.exists(self._log_file): os.remove(self._log_file) - if (self._soln_file is not None) and \ - os.path.exists(self._soln_file): + if (self._soln_file is not None) and os.path.exists(self._soln_file): os.remove(self._soln_file) def _apply_solver(self): @@ -257,12 +269,11 @@ def _apply_solver(self): return Bunch(rc=self._rc, log=self._log) def _postsolve(self): - if self._log_file is not None: - OUTPUT=open(self._log_file,"w") - OUTPUT.write("Solver command line: "+str(self._command.cmd)+'\n') + OUTPUT = open(self._log_file, "w") + OUTPUT.write("Solver command line: " + str(self._command.cmd) + '\n') OUTPUT.write("\n") - OUTPUT.write(self._log+'\n') + OUTPUT.write(self._log + '\n') OUTPUT.close() # JPW: The cleanup of the problem file probably shouldn't be here, but @@ -271,8 +282,7 @@ def _postsolve(self): # class, which I didn't feel like doing at this present time. the # base class remove_files method should clean up the problem file. - if (self._log_file is not None) and \ - (not os.path.exists(self._log_file)): + if (self._log_file is not None) and (not os.path.exists(self._log_file)): msg = "File '%s' not generated while executing %s" raise IOError(msg % (self._log_file, self.path)) results = None @@ -290,15 +300,14 @@ def _postsolve(self): # instead being automatically derived from # the input lp/nl filename. so, we may have # to clean it up manually. - if (not self._soln_file is None) and \ - os.path.exists(self._soln_file): + if (not self._soln_file is None) and os.path.exists(self._soln_file): os.remove(self._soln_file) TempfileManager.pop(remove=not self._keepfiles) return results - def _execute_command(self,command): + def _execute_command(self, command): """ Execute the command """ @@ -312,7 +321,10 @@ def _execute_command(self,command): timeout = self._timelimit if timeout is not None: - timeout += max(1, 0.01*self._timelimit) + timeout += max( + SUBPROCESS_TIMEOUT_ABS_ADJUST, + SUBPROCESS_TIMEOUT_REL_ADJUST * self._timelimit, + ) ostreams = [StringIO()] if self._tee: @@ -343,7 +355,7 @@ def _execute_command(self,command): self._last_solve_time = time.time() - start_time - return [rc,log] + return [rc, log] def process_output(self, rc): """ @@ -355,12 +367,18 @@ def process_output(self, rc): results = self.process_logfile() log_file_completion_time = time.time() if self._report_timing is True: - print(" %6.2f seconds required to read logfile " % (log_file_completion_time - start_time)) + print( + " %6.2f seconds required to read logfile " + % (log_file_completion_time - start_time) + ) if self._results_reader is None: self.process_soln_file(results) soln_file_completion_time = time.time() if self._report_timing is True: - print(" %6.2f seconds required to read solution file " % (soln_file_completion_time - log_file_completion_time)) + print( + " %6.2f seconds required to read solution file " + % (soln_file_completion_time - log_file_completion_time) + ) else: # There is some ambiguity here as to where the solution data # It's natural to expect that the log file contains solution @@ -368,29 +386,34 @@ def process_output(self, rc): # For now, if there is a single solution, then we assume that # the results file is going to add more data to it. if len(results.solution) == 1: - results = self._results_reader(self._results_file, - res=results, - soln=results.solution(0), - suffixes=self._suffixes) + results = self._results_reader( + self._results_file, + res=results, + soln=results.solution(0), + suffixes=self._suffixes, + ) else: - results = self._results_reader(self._results_file, - res=results, - suffixes=self._suffixes) + results = self._results_reader( + self._results_file, res=results, suffixes=self._suffixes + ) results_reader_completion_time = time.time() if self._report_timing is True: - print(" %6.2f seconds required to read solution file" % (results_reader_completion_time - log_file_completion_time)) + print( + " %6.2f seconds required to read solution file" + % (results_reader_completion_time - log_file_completion_time) + ) if rc != None: - results.solver.error_rc=rc + results.solver.error_rc = rc if rc != 0: - results.solver.status=SolverStatus.error + results.solver.status = SolverStatus.error if self._last_solve_time != None: - results.solver.time=self._last_solve_time + results.solver.time = self._last_solve_time return results def _default_results_format(self, prob_format): - """ Returns the default results format for different problem - formats. + """Returns the default results format for different problem + formats. """ return ResultsFormat.soln diff --git a/pyomo/opt/testing/__init__.py b/pyomo/opt/testing/__init__.py index 4ab8b0782b8..5d0d8ebd8d7 100644 --- a/pyomo/opt/testing/__init__.py +++ b/pyomo/opt/testing/__init__.py @@ -9,4 +9,4 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.opt.testing.pyunit import (_failIfPyomoResultsDiffer, TestCase) \ No newline at end of file +from pyomo.opt.testing.pyunit import _failIfPyomoResultsDiffer, TestCase diff --git a/pyomo/opt/testing/pyunit.py b/pyomo/opt/testing/pyunit.py index 5b23f6f769c..527b72cec7a 100644 --- a/pyomo/opt/testing/pyunit.py +++ b/pyomo/opt/testing/pyunit.py @@ -20,6 +20,7 @@ import pyomo.common.unittest as unittest import subprocess + def _failIfPyomoResultsDiffer(self, cmd=None, baseline=None, cwd=None): if cwd is None: cwd = os.path.dirname(os.path.abspath(getfile(self.__class__))) @@ -32,13 +33,16 @@ def _failIfPyomoResultsDiffer(self, cmd=None, baseline=None, cwd=None): INPUT = open(baseline, 'r') baseline = "\n".join(INPUT.readlines()) INPUT.close() - - output = subprocess.run(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + + output = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) finally: os.chdir(oldpwd) - + if output.returncode != 0: self.fail("Command terminated with nonzero status: '%s'" % cmd) # !!THIS SEEMS LIKE A BUG!! - mrmundt # @@ -48,14 +52,16 @@ def _failIfPyomoResultsDiffer(self, cmd=None, baseline=None, cwd=None): compare_results(results, baseline) except IOError: err = sys.exc_info()[1] - self.fail("Command failed to generate results that can be compared with the baseline: '%s'" % err) + self.fail( + "Command failed to generate results that can be compared with the baseline: '%s'" + % err + ) except ValueError: err = sys.exc_info()[1] self.fail("Difference between results and baseline: '%s'" % err) class TestCase(unittest.TestCase): - def __init__(self, methodName='runTest'): unittest.TestCase.__init__(self, methodName) @@ -64,28 +70,42 @@ def failIfPyomoResultsDiffer(self, cmd, baseline, cwd=None): def add_pyomo_results(cls, name=None, cmd=None, fn=None, baseline=None, cwd=None): if cmd is None and fn is None: - print("ERROR: must specify either the 'cmd' or 'fn' option to define how the output file is generated") + print( + "ERROR: must specify either the 'cmd' or 'fn' option to define how the output file is generated" + ) return if name is None and baseline is None: print("ERROR: must specify a baseline comparison file, or the test name") return if baseline is None: - baseline=name+".txt" - tmp = name.replace("/","_") - tmp = tmp.replace("\\","_") - tmp = tmp.replace(".","_") + baseline = name + ".txt" + tmp = name.replace("/", "_") + tmp = tmp.replace("\\", "_") + tmp = tmp.replace(".", "_") # # Create an explicit function so we can assign it a __name__ attribute. # This is needed by the 'nose' package # if fn is None: - func = lambda self,c1=cwd,c2=cmd,c3=tmp+".out",c4=baseline: _failIfPyomoResultsDiffer(self,cwd=c1,cmd=c2,baseline=c4) + func = lambda self, c1=cwd, c2=cmd, c3=tmp + ".out", c4=baseline: _failIfPyomoResultsDiffer( + self, cwd=c1, cmd=c2, baseline=c4 + ) else: # This option isn't implemented... sys.exit(1) - func = lambda self,c1=fn,c2=tmp,c3=baseline: _failIfPyomoResultsDiffer(self,fn=c1,name=c2,baseline=c3) - func.__name__ = "test_"+tmp - func.__doc__ = "pyomo result test: "+func.__name__+ \ - " ("+str(cls.__module__)+'.'+str(cls.__name__)+")" - setattr(cls, "test_"+tmp, func) - add_pyomo_results=classmethod(add_pyomo_results) + func = lambda self, c1=fn, c2=tmp, c3=baseline: _failIfPyomoResultsDiffer( + self, fn=c1, name=c2, baseline=c3 + ) + func.__name__ = "test_" + tmp + func.__doc__ = ( + "pyomo result test: " + + func.__name__ + + " (" + + str(cls.__module__) + + '.' + + str(cls.__name__) + + ")" + ) + setattr(cls, "test_" + tmp, func) + + add_pyomo_results = classmethod(add_pyomo_results) diff --git a/pyomo/opt/tests/base/test_ampl.py b/pyomo/opt/tests/base/test_ampl.py index ebc342c5f0c..1baffcbb0af 100644 --- a/pyomo/opt/tests/base/test_ampl.py +++ b/pyomo/opt/tests/base/test_ampl.py @@ -30,12 +30,13 @@ currdir = this_file_dir() deleteFiles = True -class Test(unittest.TestCase): +class Test(unittest.TestCase): @classmethod def setUpClass(cls): global solvers import pyomo.environ + solvers = pyomo.opt.check_available_solvers('glpk') def setUp(self): @@ -45,7 +46,7 @@ def tearDown(self): TempfileManager.pop(remove=deleteFiles or self.currentTestPassed()) def test3_write_nl(self): - """ Convert from AMPL to NL """ + """Convert from AMPL to NL""" self.model = pyomo.opt.AmplModel(join(currdir, 'test3.mod')) """ Convert from MOD+DAT to NL """ _test = TempfileManager.create_tempfile(suffix='test3.nl') @@ -54,29 +55,32 @@ def test3_write_nl(self): except ApplicationError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ApplicationError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ApplicationError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return except pyomo.opt.ConverterError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ConverterError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ConverterError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return _base = join(currdir, 'test3.baseline.nl') with open(_test, 'r') as run, open(_base, 'r') as baseline: for line1, line2 in zip_longest(run, baseline): for _pattern in ('# problem',): if line1.find(_pattern) >= 0: - line1 = line1[:line1.find(_pattern)+len(_pattern)] - line2 = line2[:line2.find(_pattern)+len(_pattern)] + line1 = line1[: line1.find(_pattern) + len(_pattern)] + line2 = line2[: line2.find(_pattern) + len(_pattern)] self.assertEqual( - line1, line2, - msg="Files %s and %s differ" % (_test, _base) + line1, line2, msg="Files %s and %s differ" % (_test, _base) ) def test3_write_lp(self): - """ Convert from AMPL to LP """ + """Convert from AMPL to LP""" self.model = pyomo.opt.AmplModel(join(currdir, 'test3.mod')) _test = TempfileManager.create_tempfile(suffix='test3.lp') try: @@ -84,29 +88,32 @@ def test3_write_lp(self): except ApplicationError: err = sys.exc_info()[1] if pyomo.common.Executable("glpsol"): - self.fail("Unexpected ApplicationError - glpsol is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ApplicationError - glpsol is enabled " + "but not available: '%s'" % str(err) + ) return except pyomo.opt.ConverterError: err = sys.exc_info()[1] if pyomo.common.Executable("glpsol"): - self.fail("Unexpected ConverterError - glpsol is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ConverterError - glpsol is enabled " + "but not available: '%s'" % str(err) + ) return _base = join(currdir, 'test3.baseline.lp') with open(_test, 'r') as run, open(_base, 'r') as baseline: for line1, line2 in zip_longest(run, baseline): for _pattern in ('Problem:',): if line1.find(_pattern) >= 0: - line1 = line1[:line1.find(_pattern)+len(_pattern)] - line2 = line2[:line2.find(_pattern)+len(_pattern)] + line1 = line1[: line1.find(_pattern) + len(_pattern)] + line2 = line2[: line2.find(_pattern) + len(_pattern)] self.assertEqual( - line1, line2, - msg="Files %s and %s differ" % (_test, _base) + line1, line2, msg="Files %s and %s differ" % (_test, _base) ) def test3_write_mps(self): - """ Convert from AMPL to MPS """ + """Convert from AMPL to MPS""" if not pyomo.common.Executable("ampl"): self.skipTest("The ampl executable is not available") self.model = pyomo.opt.AmplModel(join(currdir, 'test3.mod')) @@ -116,120 +123,135 @@ def test3_write_mps(self): except ApplicationError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ApplicationError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ApplicationError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return except pyomo.opt.ConverterError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ConverterError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ConverterError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return _base = join(currdir, 'test3.baseline.mps') with open(_test, 'r') as run, open(_base, 'r') as baseline: for line1, line2 in zip_longest(run, baseline): for _pattern in ('NAME',): if line1.find(_pattern) >= 0: - line1 = line1[:line1.find(_pattern)+len(_pattern)] - line2 = line2[:line2.find(_pattern)+len(_pattern)] + line1 = line1[: line1.find(_pattern) + len(_pattern)] + line2 = line2[: line2.find(_pattern) + len(_pattern)] self.assertEqual( - line1, line2, - msg="Files %s and %s differ" % (_test, _base) + line1, line2, msg="Files %s and %s differ" % (_test, _base) ) def test3a_write_nl(self): - """ Convert from AMPL to NL """ + """Convert from AMPL to NL""" self.model = pyomo.opt.AmplModel( - join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat')) + join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat') + ) _test = TempfileManager.create_tempfile(suffix='test3a.nl') try: self.model.write(_test) except ApplicationError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ApplicationError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ApplicationError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return except pyomo.opt.ConverterError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ConverterError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ConverterError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return _base = join(currdir, 'test3.baseline.nl') with open(_test, 'r') as run, open(_base, 'r') as baseline: for line1, line2 in zip_longest(run, baseline): for _pattern in ('# problem',): if line1.find(_pattern) >= 0: - line1 = line1[:line1.find(_pattern)+len(_pattern)] - line2 = line2[:line2.find(_pattern)+len(_pattern)] + line1 = line1[: line1.find(_pattern) + len(_pattern)] + line2 = line2[: line2.find(_pattern) + len(_pattern)] self.assertEqual( - line1, line2, - msg="Files %s and %s differ" % (_test, _base) + line1, line2, msg="Files %s and %s differ" % (_test, _base) ) def test3a_write_lp(self): - """ Convert from AMPL to LP """ + """Convert from AMPL to LP""" self.model = pyomo.opt.AmplModel( - join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat')) + join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat') + ) _test = TempfileManager.create_tempfile(suffix='test3a.lp') try: self.model.write(_test) except ApplicationError: err = sys.exc_info()[1] if pyomo.common.Executable("glpsol"): - self.fail("Unexpected ApplicationError - glpsol is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ApplicationError - glpsol is enabled " + "but not available: '%s'" % str(err) + ) return except pyomo.opt.ConverterError: err = sys.exc_info()[1] if pyomo.common.Executable("glpsol"): - self.fail("Unexpected ConverterError - glpsol is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ConverterError - glpsol is enabled " + "but not available: '%s'" % str(err) + ) return _base = join(currdir, 'test3.baseline.lp') with open(_test, 'r') as run, open(_base, 'r') as baseline: for line1, line2 in zip_longest(run, baseline): for _pattern in ('Problem:',): if line1.find(_pattern) >= 0: - line1 = line1[:line1.find(_pattern)+len(_pattern)] - line2 = line2[:line2.find(_pattern)+len(_pattern)] + line1 = line1[: line1.find(_pattern) + len(_pattern)] + line2 = line2[: line2.find(_pattern) + len(_pattern)] self.assertEqual( - line1, line2, - msg="Files %s and %s differ" % (_test, _base) + line1, line2, msg="Files %s and %s differ" % (_test, _base) ) def test3a_write_mps(self): - """ Convert from AMPL to MPS """ + """Convert from AMPL to MPS""" if not pyomo.common.Executable("ampl"): self.skipTest("The ampl executable is not available") self.model = pyomo.opt.AmplModel( - join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat')) + join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat') + ) _test = TempfileManager.create_tempfile(suffix='test3a.mps') try: self.model.write(_test) except ApplicationError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ApplicationError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ApplicationError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return except pyomo.opt.ConverterError: err = sys.exc_info()[1] if pyomo.common.Executable("ampl"): - self.fail("Unexpected ConverterError - ampl is enabled " - "but not available: '%s'" % str(err)) + self.fail( + "Unexpected ConverterError - ampl is enabled " + "but not available: '%s'" % str(err) + ) return _base = join(currdir, 'test3.baseline.mps') with open(_test, 'r') as run, open(_base, 'r') as baseline: for line1, line2 in zip_longest(run, baseline): for _pattern in ('NAME',): if line1.find(_pattern) >= 0: - line1 = line1[:line1.find(_pattern)+len(_pattern)] - line2 = line2[:line2.find(_pattern)+len(_pattern)] + line1 = line1[: line1.find(_pattern) + len(_pattern)] + line2 = line2[: line2.find(_pattern) + len(_pattern)] self.assertEqual( - line1, line2, - msg="Files %s and %s differ" % (_test, _base) + line1, line2, msg="Files %s and %s differ" % (_test, _base) ) def test3_solve(self): @@ -240,29 +262,31 @@ def test3_solve(self): _test = TempfileManager.create_tempfile(suffix='test3.out') results = opt.solve(self.model, keepfiles=False) results.write(filename=_test, format='json') - with open(_test, 'r') as out, \ - open(join(currdir,"test3.baseline.out"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-6, - allow_second_superset=True) + with open(_test, 'r') as out, open( + join(currdir, "test3.baseline.out"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-6, allow_second_superset=True + ) def test3a_solve(self): if not 'glpk' in solvers: self.skipTest("glpk solver is not available") self.model = pyomo.opt.AmplModel( - join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat')) + join(currdir, 'test3a.mod'), join(currdir, 'test3a.dat') + ) opt = pyomo.opt.SolverFactory('glpk') results = opt.solve(self.model, keepfiles=False) _test = TempfileManager.create_tempfile(suffix='test3a.out') results.write(filename=_test, format='json') - with open(_test, 'r') as out, \ - open(join(currdir,"test3.baseline.out"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-6, - allow_second_superset=True) + with open(_test, 'r') as out, open( + join(currdir, "test3.baseline.out"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-6, allow_second_superset=True + ) if __name__ == "__main__": deleteFiles = False unittest.main() - diff --git a/pyomo/opt/tests/base/test_convert.py b/pyomo/opt/tests/base/test_convert.py index c58282670dd..f8f0bef0fe4 100644 --- a/pyomo/opt/tests/base/test_convert.py +++ b/pyomo/opt/tests/base/test_convert.py @@ -14,8 +14,9 @@ import os from os.path import abspath, dirname -pyomodir = dirname(abspath(__file__))+os.sep+".."+os.sep+".."+os.sep -currdir = dirname(abspath(__file__))+os.sep + +pyomodir = dirname(abspath(__file__)) + os.sep + ".." + os.sep + ".." + os.sep +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest @@ -26,51 +27,50 @@ old_tempdir = TempfileManager.tempdir -class MockArg(object): +class MockArg(object): def __init__(self): pass def valid_problem_types(self): return [pyomo.opt.ProblemFormat.pyomo] - def write(self,filename="", format=None): + def write(self, filename="", format=None): pass -class MockArg2(MockArg): +class MockArg2(MockArg): def valid_problem_types(self): return [pyomo.opt.ProblemFormat.nl] - def write(self,filename="", format=None): - OUTPUT=open(filename,"w") - INPUT=open(currdir+"test4.nl") + def write(self, filename="", format=None): + OUTPUT = open(filename, "w") + INPUT = open(currdir + "test4.nl") for line in INPUT: - print >>OUTPUT, line, + print >> OUTPUT, line, OUTPUT.close() INPUT.close() -class MockArg3(MockArg): +class MockArg3(MockArg): def valid_problem_types(self): return [pyomo.opt.ProblemFormat.mod] - def write(self,filename="", format=None): + def write(self, filename="", format=None): pass -class MockArg4(MockArg): - def write(self,filename="", format=None): - OUTPUT=open(filename,"w") - INPUT=open(currdir+"test4.nl") +class MockArg4(MockArg): + def write(self, filename="", format=None): + OUTPUT = open(filename, "w") + INPUT = open(currdir + "test4.nl") for line in INPUT: - print >>OUTPUT, line, + print >> OUTPUT, line, OUTPUT.close() INPUT.close() class OptConvertDebug(unittest.TestCase): - def setUp(self): TempfileManager.push() TempfileManager.tempdir = currdir @@ -80,101 +80,133 @@ def tearDown(self): TempfileManager.tempdir = old_tempdir def test_nl_nl1(self): - """ Convert from NL to NL """ - ans = pyomo.opt.convert_problem( ("test4.nl",), None, [pyomo.opt.ProblemFormat.nl]) - self.assertEqual(ans[0],("test4.nl",)) + """Convert from NL to NL""" + ans = pyomo.opt.convert_problem( + ("test4.nl",), None, [pyomo.opt.ProblemFormat.nl] + ) + self.assertEqual(ans[0], ("test4.nl",)) def test_nl_nl2(self): - """ Convert from NL to NL """ - ans = pyomo.opt.convert_problem( ("test4.nl","tmp.nl"), None, [pyomo.opt.ProblemFormat.nl]) - self.assertEqual(ans[0],("test4.nl","tmp.nl")) + """Convert from NL to NL""" + ans = pyomo.opt.convert_problem( + ("test4.nl", "tmp.nl"), None, [pyomo.opt.ProblemFormat.nl] + ) + self.assertEqual(ans[0], ("test4.nl", "tmp.nl")) def test_error1(self): - """ No valid problem types """ + """No valid problem types""" try: - pyomo.opt.convert_problem( ("test4.nl","tmp.nl"), pyomo.opt.ProblemFormat.nl, []) + pyomo.opt.convert_problem( + ("test4.nl", "tmp.nl"), pyomo.opt.ProblemFormat.nl, [] + ) self.fail("Expected pyomo.opt.ConverterError exception") except pyomo.opt.ConverterError: pass def test_error2(self): - """ Target problem type is not valid """ + """Target problem type is not valid""" try: - pyomo.opt.convert_problem( ("test4.nl","tmp.nl"), pyomo.opt.ProblemFormat.nl, [pyomo.opt.ProblemFormat.mps]) + pyomo.opt.convert_problem( + ("test4.nl", "tmp.nl"), + pyomo.opt.ProblemFormat.nl, + [pyomo.opt.ProblemFormat.mps], + ) self.fail("Expected pyomo.opt.ConverterError exception") except pyomo.opt.ConverterError: pass def test_error3(self): - """ Empty argument list """ + """Empty argument list""" try: - pyomo.opt.convert_problem( (), None, [pyomo.opt.ProblemFormat.mps]) + pyomo.opt.convert_problem((), None, [pyomo.opt.ProblemFormat.mps]) self.fail("Expected pyomo.opt.ConverterError exception") except pyomo.opt.ConverterError: pass def test_error4(self): - """ Unknown source type """ + """Unknown source type""" try: - pyomo.opt.convert_problem( ("prob.foo",), None, [pyomo.opt.ProblemFormat.mps]) + pyomo.opt.convert_problem( + ("prob.foo",), None, [pyomo.opt.ProblemFormat.mps] + ) self.fail("Expected pyomo.opt.ConverterError exception") except pyomo.opt.ConverterError: pass def test_error5(self): - """ Unknown source type """ + """Unknown source type""" try: - pyomo.opt.convert_problem( ("prob.lp",), pyomo.opt.ProblemFormat.nl, [pyomo.opt.ProblemFormat.nl]) + pyomo.opt.convert_problem( + ("prob.lp",), pyomo.opt.ProblemFormat.nl, [pyomo.opt.ProblemFormat.nl] + ) self.fail("Expected pyomo.opt.ConverterError exception") except pyomo.opt.ConverterError: pass def test_error6(self): - """ Cannot use pico_convert with more than one file """ + """Cannot use pico_convert with more than one file""" try: - ans = pyomo.opt.convert_problem( (currdir+"test4.nl","foo"), None, [pyomo.opt.ProblemFormat.cpxlp]) + ans = pyomo.opt.convert_problem( + (currdir + "test4.nl", "foo"), None, [pyomo.opt.ProblemFormat.cpxlp] + ) self.fail("Expected pyomo.opt.ConverterError exception") except pyomo.opt.ConverterError: pass def test_error8(self): - """ Error when source file cannot be found """ + """Error when source file cannot be found""" try: - ans = pyomo.opt.convert_problem( (currdir+"unknown.nl",), None, [pyomo.opt.ProblemFormat.cpxlp]) + ans = pyomo.opt.convert_problem( + (currdir + "unknown.nl",), None, [pyomo.opt.ProblemFormat.cpxlp] + ) self.fail("Expected pyomo.opt.ConverterError exception") except ApplicationError: if pyomo.common.Executable("pico_convert"): - self.fail("Expected ApplicationError because pico_convert is not available") + self.fail( + "Expected ApplicationError because pico_convert is not available" + ) return except pyomo.opt.ConverterError: pass def test_error9(self): - """ The Opt configuration has not been initialized """ + """The Opt configuration has not been initialized""" cmd = pyomo.common.Executable("pico_convert") if cmd: cmd.disable() try: - ans = pyomo.opt.convert_problem( (currdir+"test4.nl",), None, [pyomo.opt.ProblemFormat.cpxlp]) + ans = pyomo.opt.convert_problem( + (currdir + "test4.nl",), None, [pyomo.opt.ProblemFormat.cpxlp] + ) self.fail("This test didn't fail, but pico_convert should not be defined.") except pyomo.opt.ConverterError: pass cmd.rehash() def test_error10(self): - """ GLPSOL can only convert file data """ + """GLPSOL can only convert file data""" try: arg = MockArg3() - ans = pyomo.opt.convert_problem( (arg,pyomo.opt.ProblemFormat.cpxlp,arg), None, [pyomo.opt.ProblemFormat.cpxlp]) + ans = pyomo.opt.convert_problem( + (arg, pyomo.opt.ProblemFormat.cpxlp, arg), + None, + [pyomo.opt.ProblemFormat.cpxlp], + ) self.fail("This test didn't fail, but glpsol cannot handle objects.") except pyomo.opt.ConverterError: pass def test_error11(self): - """ Cannot convert MOD that contains data """ + """Cannot convert MOD that contains data""" try: - ans = pyomo.opt.convert_problem( (currdir+"test3.mod",currdir+"test5.dat"), None, [pyomo.opt.ProblemFormat.cpxlp]) - self.fail("Expected pyomo.opt.ConverterError exception because we provided a MOD file with a 'data;' declaration") + ans = pyomo.opt.convert_problem( + (currdir + "test3.mod", currdir + "test5.dat"), + None, + [pyomo.opt.ProblemFormat.cpxlp], + ) + self.fail( + "Expected pyomo.opt.ConverterError exception because we provided a MOD file with a 'data;' declaration" + ) except ApplicationError: if pyomo.common.Executable("glpsol"): self.fail("Expected ApplicationError because glpsol is not available") @@ -182,5 +214,6 @@ def test_error11(self): except pyomo.opt.ConverterError: pass + if __name__ == "__main__": unittest.main() diff --git a/pyomo/opt/tests/base/test_factory.py b/pyomo/opt/tests/base/test_factory.py index 4e475c808f5..ab2a64a6330 100644 --- a/pyomo/opt/tests/base/test_factory.py +++ b/pyomo/opt/tests/base/test_factory.py @@ -14,8 +14,9 @@ import os from os.path import abspath, dirname -pyomodir = dirname(abspath(__file__))+"/../.." -currdir = dirname(abspath(__file__))+os.sep + +pyomodir = dirname(abspath(__file__)) + "/../.." +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest @@ -28,33 +29,29 @@ class MockWriter(pyomo.opt.AbstractProblemWriter): - def __init__(self, name=None): - pyomo.opt.AbstractProblemWriter.__init__(self,name) + pyomo.opt.AbstractProblemWriter.__init__(self, name) class MockReader(pyomo.opt.AbstractResultsReader): - def __init__(self, name=None): - pyomo.opt.AbstractResultsReader.__init__(self,name) + pyomo.opt.AbstractResultsReader.__init__(self, name) class MockSolver(pyomo.opt.OptSolver): - def __init__(self, **kwds): kwds['type'] = 'stest_type' kwds['doc'] = 'MockSolver Documentation' - pyomo.opt.OptSolver.__init__(self,**kwds) + pyomo.opt.OptSolver.__init__(self, **kwds) class Test(unittest.TestCase): - @classmethod def setUpClass(cls): import pyomo.environ def run(self, result=None): - unittest.TestCase.run(self,result) + unittest.TestCase.run(self, result) def setUp(self): pyomo.opt.WriterFactory.register('wtest')(MockWriter) @@ -70,63 +67,63 @@ def tearDown(self): pyomo.opt.SolverFactory.unregister('stest') def test_solver_factory(self): - #""" - #Testing the pyomo.opt solver factory - #""" + # """ + # Testing the pyomo.opt solver factory + # """ ans = sorted(list(pyomo.opt.SolverFactory)) - #self.assertEqual(len(ans),8) + # self.assertEqual(len(ans),8) self.assertTrue(set(['stest']) <= set(ans)) def test_solver_instance(self): - #""" - #Testing that we get a specific solver instance - #""" + # """ + # Testing that we get a specific solver instance + # """ ans = pyomo.opt.SolverFactory("none") self.assertTrue(isinstance(ans, UnknownSolver)) ans = pyomo.opt.SolverFactory("stest") self.assertEqual(type(ans), MockSolver) ans = pyomo.opt.SolverFactory("stest", name="mymock") self.assertEqual(type(ans), MockSolver) - self.assertEqual(ans.name, "mymock") + self.assertEqual(ans.name, "mymock") def test_writer_factory(self): - #""" - #Testing the pyomo.opt writer factory - #""" + # """ + # Testing the pyomo.opt writer factory + # """ factory = pyomo.opt.WriterFactory self.assertTrue(set(['wtest']) <= set(factory)) def test_writer_instance(self): - #""" - #Testing that we get a specific writer instance + # """ + # Testing that we get a specific writer instance # - #Note: this simply provides code coverage right now, but - #later it should be adapted to generate a specific writer. - #""" + # Note: this simply provides code coverage right now, but + # later it should be adapted to generate a specific writer. + # """ ans = pyomo.opt.WriterFactory("none") self.assertEqual(ans, None) ans = pyomo.opt.WriterFactory("wtest") self.assertNotEqual(ans, None) - def test_reader_factory(self): - #""" - #Testing the pyomo.opt reader factory - #""" + # """ + # Testing the pyomo.opt reader factory + # """ ans = pyomo.opt.ReaderFactory self.assertTrue(set(ans) >= set(["rtest", "sol", "yaml", "json"])) def test_reader_instance(self): - #""" - #Testing that we get a specific reader instance - #""" + # """ + # Testing that we get a specific reader instance + # """ ans = pyomo.opt.ReaderFactory("none") self.assertEqual(ans, None) ans = pyomo.opt.ReaderFactory("sol") self.assertEqual(type(ans), pyomo.opt.plugins.sol.ResultsReader_sol) - #ans = pyomo.opt.ReaderFactory("osrl", "myreader") - #self.assertEqual(type(ans), pyomo.opt.reader.OS.ResultsReader_osrl) - #self.assertEqual(ans.name, "myreader") + # ans = pyomo.opt.ReaderFactory("osrl", "myreader") + # self.assertEqual(type(ans), pyomo.opt.reader.OS.ResultsReader_osrl) + # self.assertEqual(ans.name, "myreader") + if __name__ == "__main__": unittest.main() diff --git a/pyomo/opt/tests/base/test_sol.py b/pyomo/opt/tests/base/test_sol.py index d0d6ec04cb1..ff233b42a43 100644 --- a/pyomo/opt/tests/base/test_sol.py +++ b/pyomo/opt/tests/base/test_sol.py @@ -27,14 +27,14 @@ SolutionStatus, SolverStatus, check_optimal_termination, - assert_optimal_termination + assert_optimal_termination, ) currdir = this_file_dir() deleteFiles = True -class Test(unittest.TestCase): +class Test(unittest.TestCase): @classmethod def setUpClass(cls): import pyomo.environ @@ -52,23 +52,24 @@ def test_factory(self): soln = reader(join(currdir, "test4_sol.sol"), suffixes=["dual"]) _test = TempfileManager.create_tempfile('factory.txt') soln.write(filename=_test, format='json') - with open(_test, 'r') as out, \ - open(join(currdir, "test4_sol.jsn"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - allow_second_superset=True) + with open(_test, 'r') as out, open( + join(currdir, "test4_sol.jsn"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), allow_second_superset=True + ) def test_infeasible1(self): with ReaderFactory("sol") as reader: if reader is None: raise IOError("Reader 'sol' is not registered") soln = reader(join(currdir, "infeasible1.sol")) - self.assertEqual(soln.solver.termination_condition, - TerminationCondition.infeasible) - self.assertEqual(soln.solution.status, - SolutionStatus.infeasible) - self.assertEqual(soln.solver.status, - SolverStatus.warning) - + self.assertEqual( + soln.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(soln.solution.status, SolutionStatus.infeasible) + self.assertEqual(soln.solver.status, SolverStatus.warning) + self.assertFalse(check_optimal_termination(soln)) with self.assertRaises(RuntimeError): @@ -79,24 +80,22 @@ def test_infeasible2(self): if reader is None: raise IOError("Reader 'sol' is not registered") soln = reader(join(currdir, "infeasible2.sol")) - self.assertEqual(soln.solver.termination_condition, - TerminationCondition.infeasible) - self.assertEqual(soln.solution.status, - SolutionStatus.infeasible) - self.assertEqual(soln.solver.status, - SolverStatus.warning) + self.assertEqual( + soln.solver.termination_condition, TerminationCondition.infeasible + ) + self.assertEqual(soln.solution.status, SolutionStatus.infeasible) + self.assertEqual(soln.solver.status, SolverStatus.warning) def test_conopt_optimal(self): with ReaderFactory("sol") as reader: if reader is None: raise IOError("Reader 'sol' is not registered") soln = reader(join(currdir, "conopt_optimal.sol")) - self.assertEqual(soln.solver.termination_condition, - TerminationCondition.optimal) - self.assertEqual(soln.solution.status, - SolutionStatus.optimal) - self.assertEqual(soln.solver.status, - SolverStatus.ok) + self.assertEqual( + soln.solver.termination_condition, TerminationCondition.optimal + ) + self.assertEqual(soln.solution.status, SolutionStatus.optimal) + self.assertEqual(soln.solver.status, SolverStatus.ok) self.assertTrue(check_optimal_termination(soln)) assert_optimal_termination(soln) @@ -125,8 +124,9 @@ def test_iis_no_variable_values(self): with ReaderFactory("sol") as reader: if reader is None: raise IOError("Reader 'sol' is not registered") - result = reader(join(currdir, "iis_no_variable_values.sol"), - suffixes=["iis"]) + result = reader( + join(currdir, "iis_no_variable_values.sol"), suffixes=["iis"] + ) soln = result.solution(0) self.assertEqual(len(list(soln.variable['v0'].keys())), 1) self.assertEqual(soln.variable['v0']['iis'], 1) @@ -135,12 +135,14 @@ def test_iis_no_variable_values(self): self.assertEqual(len(list(soln.constraint['c0'].keys())), 1) self.assertEqual(soln.constraint['c0']['Iis'], 4) import pyomo.kernel as pmo + m = pmo.block() m.v0 = pmo.variable() m.v1 = pmo.variable() m.c0 = pmo.constraint() m.iis = pmo.suffix(direction=pmo.suffix.IMPORT) from pyomo.core.expr.symbol_map import SymbolMap + soln.symbol_map = SymbolMap() soln.symbol_map.addSymbol(m.v0, 'v0') soln.symbol_map.addSymbol(m.v1, 'v1') @@ -151,6 +153,7 @@ def test_iis_no_variable_values(self): self.assertEqual(m.iis[m.v1], 1) self.assertEqual(m.iis[m.c0], 4) + if __name__ == "__main__": deleteFiles = False unittest.main() diff --git a/pyomo/opt/tests/base/test_soln.py b/pyomo/opt/tests/base/test_soln.py index 04f92ff0238..0511b3ceb9c 100644 --- a/pyomo/opt/tests/base/test_soln.py +++ b/pyomo/opt/tests/base/test_soln.py @@ -16,8 +16,9 @@ import pickle import os from os.path import abspath, dirname, join -pyomodir = dirname(abspath(__file__))+os.sep+".."+os.sep+".."+os.sep -currdir = dirname(abspath(__file__))+os.sep + +pyomodir = dirname(abspath(__file__)) + os.sep + ".." + os.sep + ".." + os.sep +currdir = dirname(abspath(__file__)) + os.sep from filecmp import cmp import pyomo.common.unittest as unittest @@ -29,15 +30,15 @@ old_tempdir = TempfileManager.tempdir -class Test(unittest.TestCase): +class Test(unittest.TestCase): def setUp(self): TempfileManager.tempdir = currdir self.results = pyomo.opt.SolverResults() self.soln = self.results.solution.add() - self.soln.variable[1]={"Value" : 0} - self.soln.variable[2]={"Value" : 0} - self.soln.variable[4]={"Value" : 0} + self.soln.variable[1] = {"Value": 0} + self.soln.variable[2] = {"Value": 0} + self.soln.variable[4] = {"Value": 0} def tearDown(self): TempfileManager.clear_tempfiles() @@ -45,40 +46,43 @@ def tearDown(self): del self.results def test_write_solution1(self): - """ Write a SolverResults Object with solutions """ + """Write a SolverResults Object with solutions""" self.results.write(filename=join(currdir, "write_solution1.txt")) if not os.path.exists(join(currdir, "write_solution1.txt")): self.fail("test_write_solution - failed to write write_solution1.txt") - _log, _out = join(currdir, "write_solution1.txt"), join(currdir, "test1_soln.txt") - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + _log, _out = join(currdir, "write_solution1.txt"), join( + currdir, "test1_soln.txt" + ) + self.assertTrue(cmp(_out, _log), msg="Files %s and %s differ" % (_out, _log)) def test_write_solution2(self): - """ Write a SolverResults Object without solutions """ - self.results.write(num=None,filename=join(currdir, "write_solution2.txt")) + """Write a SolverResults Object without solutions""" + self.results.write(num=None, filename=join(currdir, "write_solution2.txt")) if not os.path.exists(join(currdir, "write_solution2.txt")): self.fail("test_write_solution - failed to write write_solution2.txt") - _out, _log = join(currdir, "write_solution2.txt"), join(currdir, "test2_soln.txt") - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + _out, _log = join(currdir, "write_solution2.txt"), join( + currdir, "test2_soln.txt" + ) + self.assertTrue(cmp(_out, _log), msg="Files %s and %s differ" % (_out, _log)) @unittest.skipIf(not yaml_available, "Cannot import 'yaml'") def test_read_solution1(self): - """ Read a SolverResults Object""" + """Read a SolverResults Object""" self.results = pyomo.opt.SolverResults() self.results.read(filename=join(currdir, "test4_sol.txt")) self.results.write(filename=join(currdir, "read_solution1.out")) if not os.path.exists(join(currdir, "read_solution1.out")): self.fail("test_read_solution1 - failed to write read_solution1.out") - with open(join(currdir, "read_solution1.out"), 'r') as out, \ - open(join(currdir, "test4_sol.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + with open(join(currdir, "read_solution1.out"), 'r') as out, open( + join(currdir, "test4_sol.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) @unittest.skipIf(not yaml_available, "Cannot import 'yaml'") def test_pickle_solution1(self): - """ Read a SolverResults Object""" + """Read a SolverResults Object""" self.results = pyomo.opt.SolverResults() self.results.read(filename=join(currdir, "test4_sol.txt")) str = pickle.dumps(self.results) @@ -86,26 +90,29 @@ def test_pickle_solution1(self): self.results.write(filename=join(currdir, "read_solution1.out")) if not os.path.exists(join(currdir, "read_solution1.out")): self.fail("test_read_solution1 - failed to write read_solution1.out") - with open(join(currdir, "read_solution1.out"), 'r') as out, \ - open(join(currdir, "test4_sol.txt"), 'r') as txt: - self.assertStructuredAlmostEqual(yaml.full_load(txt), - yaml.full_load(out), - allow_second_superset=True) + with open(join(currdir, "read_solution1.out"), 'r') as out, open( + join(currdir, "test4_sol.txt"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + yaml.full_load(txt), yaml.full_load(out), allow_second_superset=True + ) def test_read_solution2(self): - """ Read a SolverResults Object""" + """Read a SolverResults Object""" self.results = pyomo.opt.SolverResults() self.results.read(filename=join(currdir, "test4_sol.jsn"), format='json') self.results.write(filename=join(currdir, "read_solution2.out"), format='json') if not os.path.exists(join(currdir, "read_solution2.out")): self.fail("test_read_solution2 - failed to write read_solution2.out") - with open(join(currdir, "read_solution2.out"), 'r') as out, \ - open(join(currdir, "test4_sol.jsn"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - allow_second_superset=True) + with open(join(currdir, "read_solution2.out"), 'r') as out, open( + join(currdir, "test4_sol.jsn"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), allow_second_superset=True + ) def test_pickle_solution2(self): - """ Read a SolverResults Object""" + """Read a SolverResults Object""" self.results = pyomo.opt.SolverResults() self.results.read(filename=join(currdir, "test4_sol.jsn"), format='json') str = pickle.dumps(self.results) @@ -113,31 +120,34 @@ def test_pickle_solution2(self): self.results.write(filename=join(currdir, "read_solution2.out"), format='json') if not os.path.exists(join(currdir, "read_solution2.out")): self.fail("test_read_solution2 - failed to write read_solution2.out") - with open(join(currdir, "read_solution2.out"), 'r') as out, \ - open(join(currdir, "test4_sol.jsn"), 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - allow_second_superset=True) + with open(join(currdir, "read_solution2.out"), 'r') as out, open( + join(currdir, "test4_sol.jsn"), 'r' + ) as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), allow_second_superset=True + ) # # deleting is not supported right now # def Xtest_delete_solution(self): - """ Delete a solution from a SolverResults object """ + """Delete a solution from a SolverResults object""" self.results.solution.delete(0) self.results.write(filename=join(currdir, "delete_solution.txt")) if not os.path.exists(join(currdir, "delete_solution.txt")): self.fail("test_write_solution - failed to write delete_solution.txt") - _out, _log = join(currdir, "delete_solution.txt"), join(currdir, "test4_soln.txt") - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + _out, _log = join(currdir, "delete_solution.txt"), join( + currdir, "test4_soln.txt" + ) + self.assertTrue(cmp(_out, _log), msg="Files %s and %s differ" % (_out, _log)) def test_get_solution(self): - """ Get a solution from a SolverResults object """ + """Get a solution from a SolverResults object""" tmp = self.results.solution[0] - self.assertEqual(tmp,self.soln) + self.assertEqual(tmp, self.soln) def test_get_solution_attr_error(self): - """ Create an error with a solution suffix """ + """Create an error with a solution suffix""" try: tmp = self.soln.bad self.fail("Expected attribute error failure for 'bad'") @@ -149,7 +159,7 @@ def test_get_solution_attr_error(self): # soln.variable.value = True # def Xtest_set_solution_attr_error(self): - """ Create an error with a solution suffix """ + """Create an error with a solution suffix""" try: self.soln.variable = True self.fail("Expected attribute error failure for 'variable'") @@ -157,44 +167,44 @@ def Xtest_set_solution_attr_error(self): pass def test_soln_pprint1(self): - """ Write a solution with only zero values, using the results 'write()' method """ - self.soln.variable[1]["Value"]=0.0 - self.soln.variable[2]["Value"]=0.0 - self.soln.variable[4]["Value"]=0.0 + """Write a solution with only zero values, using the results 'write()' method""" + self.soln.variable[1]["Value"] = 0.0 + self.soln.variable[2]["Value"] = 0.0 + self.soln.variable[4]["Value"] = 0.0 self.results.write(filename=join(currdir, "soln_pprint.txt")) if not os.path.exists(join(currdir, "soln_pprint.txt")): self.fail("test_write_solution - failed to write soln_pprint.txt") _out, _log = join(currdir, "soln_pprint.txt"), join(currdir, "test3_soln.txt") - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + self.assertTrue(cmp(_out, _log), msg="Files %s and %s differ" % (_out, _log)) def test_soln_pprint2(self): - """ Write a solution with only zero values, using the Solution.pprint() method """ - self.soln.variable[1]["Value"]=0.0 - self.soln.variable[2]["Value"]=0.0 - self.soln.variable[4]["Value"]=0.0 + """Write a solution with only zero values, using the Solution.pprint() method""" + self.soln.variable[1]["Value"] = 0.0 + self.soln.variable[2]["Value"] = 0.0 + self.soln.variable[4]["Value"] = 0.0 with open(join(currdir, 'soln_pprint2.out'), 'w') as f: f.write(str(self.soln)) - with open(join(currdir, "soln_pprint2.out"), 'r') as f1, \ - open(join(currdir, "soln_pprint2.txt"), 'r') as f2: - self.assertEqual(f1.read().strip(), - f2.read().strip()) + with open(join(currdir, "soln_pprint2.out"), 'r') as f1, open( + join(currdir, "soln_pprint2.txt"), 'r' + ) as f2: + self.assertEqual(f1.read().strip(), f2.read().strip()) def test_soln_suffix_getiter(self): - self.soln.variable[1]["Value"]=0.0 - self.soln.variable[2]["Value"]=0.1 - self.soln.variable[4]["Value"]=0.3 - self.assertEqual(self.soln.variable[4]["Value"],0.3) - self.assertEqual(self.soln.variable[2]["Value"],0.1) + self.soln.variable[1]["Value"] = 0.0 + self.soln.variable[2]["Value"] = 0.1 + self.soln.variable[4]["Value"] = 0.3 + self.assertEqual(self.soln.variable[4]["Value"], 0.3) + self.assertEqual(self.soln.variable[2]["Value"], 0.1) def test_soln_suffix_setattr(self): self.soln.variable[1]["Value"] = 0.0 - self.soln.variable[4]["Value"] =0.3 + self.soln.variable[4]["Value"] = 0.3 self.soln.variable[4]["Slack"] = 0.4 - self.assertEqual(list(self.soln.variable.keys()),[1,2,4]) - self.assertEqual(self.soln.variable[1]["Value"],0.0) - self.assertEqual(self.soln.variable[4]["Value"],0.3) - self.assertEqual(self.soln.variable[4]["Slack"],0.4) + self.assertEqual(list(self.soln.variable.keys()), [1, 2, 4]) + self.assertEqual(self.soln.variable[1]["Value"], 0.0) + self.assertEqual(self.soln.variable[4]["Value"], 0.3) + self.assertEqual(self.soln.variable[4]["Slack"], 0.4) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/opt/tests/base/test_solver.py b/pyomo/opt/tests/base/test_solver.py index 12db9749463..73d6067efe4 100644 --- a/pyomo/opt/tests/base/test_solver.py +++ b/pyomo/opt/tests/base/test_solver.py @@ -14,8 +14,9 @@ import os from os.path import abspath, dirname -pyomodir = dirname(abspath(__file__))+"/../.." -currdir = dirname(abspath(__file__))+os.sep + +pyomodir = dirname(abspath(__file__)) + "/../.." +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest from pyomo.common.tempfiles import TempfileManager @@ -26,15 +27,13 @@ class MockSolver1(pyomo.opt.OptSolver): - def __init__(self, **kwds): kwds['type'] = 'stest_type' kwds['doc'] = 'MockSolver1 Documentation' - pyomo.opt.OptSolver.__init__(self,**kwds) + pyomo.opt.OptSolver.__init__(self, **kwds) class OptSolverDebug(unittest.TestCase): - def setUp(self): pyomo.opt.SolverFactory.register('stest1')(MockSolver1) TempfileManager.tempdir = currdir @@ -44,7 +43,6 @@ def tearDown(self): TempfileManager.clear_tempfiles() TempfileManager.tempdir = old_tempdir - def test_solver_init1(self): """ Verify the processing of 'type', 'name' and 'doc' options @@ -99,7 +97,9 @@ def test_set_problem_format(self): except ValueError: pass else: - self.fail("Should not be able to set the problem format undless it's declared as valid.") + self.fail( + "Should not be able to set the problem format undless it's declared as valid." + ) opt._valid_problem_formats = ['a'] self.assertEqual(opt.results_format(), None) opt.set_problem_format('a') @@ -109,15 +109,17 @@ def test_set_problem_format(self): def test_set_results_format(self): opt = pyomo.opt.SolverFactory("stest1") opt._valid_problem_formats = ['a'] - opt._valid_results_formats = {'a':'b'} + opt._valid_results_formats = {'a': 'b'} self.assertEqual(opt.problem_format(), None) try: opt.set_results_format('b') except ValueError: pass else: - self.fail("Should not be able to set the results format unless it's "\ - "declared as valid for the current problem format.") + self.fail( + "Should not be able to set the results format unless it's " + "declared as valid for the current problem format." + ) opt.set_problem_format('a') self.assertEqual(opt.problem_format(), 'a') opt.set_results_format('b') diff --git a/pyomo/opt/tests/solver/test_shellcmd.py b/pyomo/opt/tests/solver/test_shellcmd.py index 708be68b0bf..f71fcf07c6d 100644 --- a/pyomo/opt/tests/solver/test_shellcmd.py +++ b/pyomo/opt/tests/solver/test_shellcmd.py @@ -30,24 +30,20 @@ exedir_user = exedir if user_home_pos >= 0: _test_user_exedir = os.path.join( - user_home, exedir[user_home_pos + len(user_home) + 1:]) + user_home, exedir[user_home_pos + len(user_home) + 1 :] + ) if os.path.samefile(exedir, _test_user_exedir): - exedir_user = os.path.join( - "~", exedir[user_home_pos + len(user_home) + 1:]) + exedir_user = os.path.join("~", exedir[user_home_pos + len(user_home) + 1 :]) -notexe_nopath = "file_not_executable" +notexe_nopath = "file_not_executable" notexe_abspath = os.path.join(exedir, notexe_nopath) notexe_abspath_user = os.path.join(exedir_user, notexe_nopath) -notexe_relpath = (os.path.curdir + os.path.sep + - exedirname + os.path.sep + - notexe_nopath) +notexe_relpath = os.path.curdir + os.path.sep + exedirname + os.path.sep + notexe_nopath -isexe_nopath = "file_is_executable" +isexe_nopath = "file_is_executable" isexe_abspath = os.path.join(exedir, isexe_nopath) isexe_abspath_user = os.path.join(exedir_user, isexe_nopath) -isexe_relpath = (os.path.curdir + os.path.sep + \ - exedirname + os.path.sep + - isexe_nopath) +isexe_relpath = os.path.curdir + os.path.sep + exedirname + os.path.sep + isexe_nopath # Names to test the "executable" functionality with through the # SolverFactory. These tests are necessary due to logic that is in @@ -59,7 +55,6 @@ class TestSystemCallSolver(unittest.TestCase): - @classmethod def setUpClass(cls): import pyomo.environ @@ -120,7 +115,7 @@ def test_available(self): # a partial implementation with self.assertRaises(ApplicationError): opt.available(exception_flag=True) - #with self.assertRaises(ApplicationError): + # with self.assertRaises(ApplicationError): # opt.available(exception_flag=False) def test_reset_executable(self): @@ -144,8 +139,10 @@ def test_set_executable_notexe_nopath(self): self.assertEqual(opt._user_executable, notexe_nopath) self.assertEqual(opt.executable(), notexe_nopath) - - @unittest.skipIf(is_windows, "Skipping test because it requires testing if a file is executable on Windows") + @unittest.skipIf( + is_windows, + "Skipping test because it requires testing if a file is executable on Windows", + ) def test_set_executable_notexe_relpath(self): with SystemCallSolver(type='test') as opt: self.assertEqual(id(opt._user_executable), id(None)) @@ -156,7 +153,10 @@ def test_set_executable_notexe_relpath(self): self.assertEqual(opt._user_executable, notexe_relpath) self.assertEqual(opt.executable(), notexe_relpath) - @unittest.skipIf(is_windows, "Skipping test because it requires testing if a file is executable on Windows") + @unittest.skipIf( + is_windows, + "Skipping test because it requires testing if a file is executable on Windows", + ) def test_set_executable_notexe_abspath(self): with SystemCallSolver(type='test') as opt: self.assertEqual(id(opt._user_executable), id(None)) @@ -167,7 +167,10 @@ def test_set_executable_notexe_abspath(self): self.assertEqual(opt._user_executable, notexe_abspath) self.assertEqual(opt.executable(), notexe_abspath) - @unittest.skipIf(is_windows, "Skipping test because it requires testing if a file is executable on Windows") + @unittest.skipIf( + is_windows, + "Skipping test because it requires testing if a file is executable on Windows", + ) def test_set_executable_notexe_abspath_user(self): with SystemCallSolver(type='test') as opt: self.assertEqual(id(opt._user_executable), id(None)) @@ -235,16 +238,13 @@ def test_set_executable_isexe_abspath_user(self): with SystemCallSolver(type='test') as opt: self.assertEqual(id(opt._user_executable), id(None)) opt.set_executable(isexe_abspath_user) - self.assertTrue(os.path.samefile( - opt._user_executable, isexe_abspath)) - self.assertTrue(os.path.samefile( - opt.executable(), isexe_abspath)) + self.assertTrue(os.path.samefile(opt._user_executable, isexe_abspath)) + self.assertTrue(os.path.samefile(opt.executable(), isexe_abspath)) opt._user_executable = None opt.set_executable(isexe_abspath_user, validate=False) self.assertEqual(opt._user_executable, isexe_abspath_user) self.assertEqual(opt.executable(), isexe_abspath_user) - def test_SolverFactory_executable_isexe_nopath(self): for name in _test_names: with SolverFactory(name, executable=isexe_nopath) as opt: @@ -296,10 +296,9 @@ def test_executable_isexe_abspath_user(self): # Note the user path could be different from our # computed path due to symlinks. Check that the # executable is the same file. - self.assertTrue(os.path.samefile( - opt._user_executable, isexe_abspath)) - self.assertTrue(os.path.samefile( - opt.executable(), isexe_abspath)) + self.assertTrue(os.path.samefile(opt._user_executable, isexe_abspath)) + self.assertTrue(os.path.samefile(opt.executable(), isexe_abspath)) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/pysp/__init__.py b/pyomo/pysp/__init__.py index fe30dd9e91b..3fb4abbbd42 100644 --- a/pyomo/pysp/__init__.py +++ b/pyomo/pysp/__init__.py @@ -11,21 +11,21 @@ import logging import sys -from pyomo.common.deprecation import ( - deprecation_warning, in_testing_environment, -) +from pyomo.common.deprecation import deprecation_warning, in_testing_environment try: # Warn the user deprecation_warning( "PySP has been removed from the pyomo.pysp namespace. " "Please import PySP directly from the pysp namespace.", - version='6.0') + version='6.0', + ) from pysp import * + # Redirect all (imported) pysp modules into the pyomo.pysp namespace for mod in list(sys.modules): if mod.startswith('pysp.'): - sys.modules['pyomo.'+mod] = sys.modules[mod] + sys.modules['pyomo.' + mod] = sys.modules[mod] except ImportError: # Only raise the exception if nose/pytest/sphinx are NOT running # (otherwise test discovery can result in exceptions) @@ -34,5 +34,5 @@ "No module named 'pyomo.pysp'. " "Beginning in Pyomo 6.0, PySP is distributed as a separate " "package. Please see https://github.com/Pyomo/pysp for " - "information on downloading and installing PySP") - + "information on downloading and installing PySP" + ) diff --git a/pyomo/repn/beta/matrix.py b/pyomo/repn/beta/matrix.py index b44e6ef6f8b..ff2d6857bd6 100644 --- a/pyomo/repn/beta/matrix.py +++ b/pyomo/repn/beta/matrix.py @@ -9,8 +9,11 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ("_LinearConstraintData", "MatrixConstraint", - "compile_block_linear_constraints",) +__all__ = ( + "_LinearConstraintData", + "MatrixConstraint", + "compile_block_linear_constraints", +) import time import logging @@ -18,17 +21,17 @@ from weakref import ref as weakref_ref from pyomo.common.log import is_debug_set +from pyomo.common.numeric_types import value +from pyomo.core.expr.numvalue import is_fixed, ZeroConstant from pyomo.core.base.set_types import Any -from pyomo.core.base import (SortComponents, - Var) -from pyomo.core.base.numvalue import (is_fixed, - value, - ZeroConstant) +from pyomo.core.base import SortComponents, Var from pyomo.core.base.component import ModelComponentFactory -from pyomo.core.base.constraint import (Constraint, - IndexedConstraint, - ScalarConstraint, - _ConstraintData) +from pyomo.core.base.constraint import ( + Constraint, + IndexedConstraint, + ScalarConstraint, + _ConstraintData, +) from pyomo.core.expr.numvalue import native_numeric_types from pyomo.repn import generate_standard_repn @@ -37,35 +40,40 @@ logger = logging.getLogger('pyomo.core') + def _label_bytes(x): if x < 1e3: - return str(x)+" B" + return str(x) + " B" if x < 1e6: - return str(x / 1.0e3)+" KB" + return str(x / 1.0e3) + " KB" if x < 1e9: - return str(x / 1.0e6)+" MB" - return str(x / 1.0e9)+" GB" + return str(x / 1.0e6) + " MB" + return str(x / 1.0e9) + " GB" + # # Compile a Pyomo constructed model in-place, storing the compiled # sparse constraint object on the model under constraint_name. # -def compile_block_linear_constraints(parent_block, - constraint_name, - skip_trivial_constraints=False, - single_precision_storage=False, - verbose=False, - descend_into=True): - +def compile_block_linear_constraints( + parent_block, + constraint_name, + skip_trivial_constraints=False, + single_precision_storage=False, + verbose=False, + descend_into=True, +): if verbose: print("") - print("Compiling linear constraints on block with name: %s" - % (parent_block.name)) + print( + "Compiling linear constraints on block with name: %s" % (parent_block.name) + ) if not parent_block.is_constructed(): raise RuntimeError( "Attempting to compile block '%s' with unconstructed " - "component(s)" % (parent_block.name)) + "component(s)" % (parent_block.name) + ) # # Linear MatrixConstraint in CSR format @@ -88,15 +96,16 @@ def _get_bound(exp): print("Sorting active blocks...") sortOrder = SortComponents.indices | SortComponents.alphabetical - all_blocks = [_b for _b in parent_block.block_data_objects( - active=True, - sort=sortOrder, - descend_into=descend_into)] + all_blocks = [ + _b + for _b in parent_block.block_data_objects( + active=True, sort=sortOrder, descend_into=descend_into + ) + ] stop_time = time.time() if verbose: - print("Time to sort active blocks: %.2f seconds" - % (stop_time-start_time)) + print("Time to sort active blocks: %.2f seconds" % (stop_time - start_time)) start_time = time.time() if verbose: @@ -109,17 +118,18 @@ def _get_bound(exp): VarSymbolToVarObject = [] for block in all_blocks: VarSymbolToVarObject.extend( - block.component_data_objects(Var, - sort=sortOrder, - descend_into=False)) - VarIDToVarSymbol = \ - dict((id(vardata), index) - for index, vardata in enumerate(VarSymbolToVarObject)) + block.component_data_objects(Var, sort=sortOrder, descend_into=False) + ) + VarIDToVarSymbol = dict( + (id(vardata), index) for index, vardata in enumerate(VarSymbolToVarObject) + ) stop_time = time.time() if verbose: - print("Time to collect variables on active blocks: %.2f seconds" - % (stop_time-start_time)) + print( + "Time to collect variables on active blocks: %.2f seconds" + % (stop_time - start_time) + ) start_time = time.time() if verbose: @@ -137,32 +147,28 @@ def _get_bound(exp): nrows = 0 SparseMat_pRows = [0] for block in all_blocks: - if hasattr(block, '_repn'): del block._repn - for constraint in block.component_objects(Constraint, - active=True, - sort=sortOrder, - descend_into=False): - + for constraint in block.component_objects( + Constraint, active=True, sort=sortOrder, descend_into=False + ): assert not isinstance(constraint, MatrixConstraint) if len(constraint) == 0: - empty_constraint_containers_to_remove.append((block, constraint)) else: - singleton = isinstance(constraint, ScalarConstraint) # Note that as we may be removing items from the _data # dictionary, we need to make a copy of the items list # before iterating: for index, constraint_data in list(constraint.items()): - - if constraint_data.body.__class__ in native_numeric_types or constraint_data.body.polynomial_degree() <= 1: - + if ( + constraint_data.body.__class__ in native_numeric_types + or constraint_data.body.polynomial_degree() <= 1 + ): # collect for removal if singleton: constraint_containers_to_remove.append((block, constraint)) @@ -180,16 +186,17 @@ def _get_bound(exp): if skip_trivial_constraints: continue else: - row_variable_symbols = \ - [VarIDToVarSymbol[id(vardata)] - for vardata in repn.linear_vars] - referenced_variable_symbols.update( - row_variable_symbols) + row_variable_symbols = [ + VarIDToVarSymbol[id(vardata)] + for vardata in repn.linear_vars + ] + referenced_variable_symbols.update(row_variable_symbols) assert repn.linear_coefs is not None row_coefficients = repn.linear_coefs - SparseMat_pRows.append(SparseMat_pRows[-1] + \ - len(row_variable_symbols)) + SparseMat_pRows.append( + SparseMat_pRows[-1] + len(row_variable_symbols) + ) SparseMat_jCols.extend(row_variable_symbols) SparseMat_Vals.extend(row_coefficients) @@ -202,11 +209,15 @@ def _get_bound(exp): Ranges.append(L - constant if (L is not None) else 0) Ranges.append(U - constant if (U is not None) else 0) - if (L is not None) and \ - (U is not None) and \ - (not constraint_data.equality): - RangeTypes.append(MatrixConstraint.LowerBound | - MatrixConstraint.UpperBound) + if ( + (L is not None) + and (U is not None) + and (not constraint_data.equality) + ): + RangeTypes.append( + MatrixConstraint.LowerBound + | MatrixConstraint.UpperBound + ) elif constraint_data.equality: RangeTypes.append(MatrixConstraint.Equality) elif L is not None: @@ -223,8 +234,10 @@ def _get_bound(exp): stop_time = time.time() if verbose: - print("Time to compile active linear constraints: %.2f seconds" - % (stop_time-start_time)) + print( + "Time to compile active linear constraints: %.2f seconds" + % (stop_time - start_time) + ) start_time = time.time() if verbose: @@ -241,7 +254,7 @@ def _get_bound(exp): for constraint, index in constraint_data_to_remove: # Note that this del is not needed: assigning Constraint.Skip # above removes the _ConstraintData from the _data dict. - #del constraint[index] + # del constraint[index] constraints_removed += 1 for block, constraint in constraint_containers_to_remove: block.del_component(constraint) @@ -254,10 +267,14 @@ def _get_bound(exp): stop_time = time.time() if verbose: - print("Eliminated %s constraints and %s Constraint container objects" - % (constraints_removed, constraint_containers_removed)) - print("Time to remove compiled constraint objects: %.2f seconds" - % (stop_time-start_time)) + print( + "Eliminated %s constraints and %s Constraint container objects" + % (constraints_removed, constraint_containers_removed) + ) + print( + "Time to remove compiled constraint objects: %.2f seconds" + % (stop_time - start_time) + ) start_time = time.time() if verbose: @@ -267,23 +284,29 @@ def _get_bound(exp): # Assign a column index to the set of referenced variables # ColumnIndexToVarSymbol = sorted(referenced_variable_symbols) - VarSymbolToColumnIndex = dict((symbol, column) - for column, symbol in enumerate(ColumnIndexToVarSymbol)) + VarSymbolToColumnIndex = dict( + (symbol, column) for column, symbol in enumerate(ColumnIndexToVarSymbol) + ) SparseMat_jCols = [VarSymbolToColumnIndex[symbol] for symbol in SparseMat_jCols] del VarSymbolToColumnIndex - ColumnIndexToVarObject = [VarSymbolToVarObject[var_symbol] - for var_symbol in ColumnIndexToVarSymbol] + ColumnIndexToVarObject = [ + VarSymbolToVarObject[var_symbol] for var_symbol in ColumnIndexToVarSymbol + ] stop_time = time.time() if verbose: - print("Time to assign variable column indices: %.2f seconds" - % (stop_time-start_time)) + print( + "Time to assign variable column indices: %.2f seconds" + % (stop_time - start_time) + ) start_time = time.time() if verbose: print("Converting compiled constraint data to array storage...") - print(" - Using %s precision for numeric values" - % ('single' if single_precision_storage else 'double')) + print( + " - Using %s precision for numeric values" + % ('single' if single_precision_storage else 'double') + ) # # Convert to array storage @@ -298,30 +321,40 @@ def _get_bound(exp): stop_time = time.time() if verbose: - storage_bytes = \ - SparseMat_pRows.buffer_info()[1] * SparseMat_pRows.itemsize + \ - SparseMat_jCols.buffer_info()[1] * SparseMat_jCols.itemsize + \ - SparseMat_Vals.buffer_info()[1] * SparseMat_Vals.itemsize + \ - Ranges.buffer_info()[1] * Ranges.itemsize + \ - RangeTypes.buffer_info()[1] * RangeTypes.itemsize + storage_bytes = ( + SparseMat_pRows.buffer_info()[1] * SparseMat_pRows.itemsize + + SparseMat_jCols.buffer_info()[1] * SparseMat_jCols.itemsize + + SparseMat_Vals.buffer_info()[1] * SparseMat_Vals.itemsize + + Ranges.buffer_info()[1] * Ranges.itemsize + + RangeTypes.buffer_info()[1] * RangeTypes.itemsize + ) print("Sparse Matrix Dimension:") - print(" - Rows: "+str(nrows)) - print(" - Cols: "+str(ncols)) - print(" - Nonzeros: "+str(nnz)) - print("Compiled Data Storage: "+str(_label_bytes(storage_bytes))) - print("Time to convert compiled constraint data to " - "array storage: %.2f seconds" % (stop_time-start_time)) - - parent_block.add_component(constraint_name, - MatrixConstraint(nrows, ncols, nnz, - SparseMat_pRows, - SparseMat_jCols, - SparseMat_Vals, - Ranges, - RangeTypes, - ColumnIndexToVarObject)) - -#class _LinearConstraintData(_ConstraintData,LinearCanonicalRepn): + print(" - Rows: " + str(nrows)) + print(" - Cols: " + str(ncols)) + print(" - Nonzeros: " + str(nnz)) + print("Compiled Data Storage: " + str(_label_bytes(storage_bytes))) + print( + "Time to convert compiled constraint data to " + "array storage: %.2f seconds" % (stop_time - start_time) + ) + + parent_block.add_component( + constraint_name, + MatrixConstraint( + nrows, + ncols, + nnz, + SparseMat_pRows, + SparseMat_jCols, + SparseMat_Vals, + Ranges, + RangeTypes, + ColumnIndexToVarObject, + ), + ) + + +# class _LinearConstraintData(_ConstraintData,LinearCanonicalRepn): # # This change breaks this class, but it's unclear whether this # is being used... @@ -369,11 +402,11 @@ def __init__(self, index, component=None): # - _ConstraintData, # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._index = index self._active = True + class _LinearMatrixConstraintData(_LinearConstraintData): """ This class defines the data for a single linear constraint @@ -418,8 +451,7 @@ def __init__(self, index, component=None): # - _ConstraintData, # - ActiveComponentData # - ComponentData - self._component = weakref_ref(component) if (component is not None) \ - else None + self._component = weakref_ref(component) if (component is not None) else None self._active = True # row index into the sparse matrix stored on the parent @@ -453,9 +485,10 @@ def __call__(self, exception=True): varmap = comp._varmap vals = comp._vals try: - return sum(varmap[jcols[p]]() * vals[p] - for p in range(prows[index], - prows[index+1])) + return sum( + varmap[jcols[p]]() * vals[p] + for p in range(prows[index], prows[index + 1]) + ) except (ValueError, TypeError): if exception: raise @@ -465,15 +498,13 @@ def has_lb(self): """Returns :const:`False` when the lower bound is :const:`None` or negative infinity""" lb = self.lower - return (lb is not None) and \ - (lb != float('-inf')) + return (lb is not None) and (lb != float('-inf')) def has_ub(self): """Returns :const:`False` when the upper bound is :const:`None` or positive infinity""" ub = self.upper - return (ub is not None) and \ - (ub != float('inf')) + return (ub is not None) and (ub != float('inf')) def lslack(self): """ @@ -509,12 +540,13 @@ def variables(self): prows = comp._prows jcols = comp._jcols varmap = comp._varmap - if prows[self._index] == prows[self._index+1]: - return() - variables = tuple(varmap[jcols[p]] - for p in range(prows[self._index], - prows[self._index+1]) - if not varmap[jcols[p]].fixed) + if prows[self._index] == prows[self._index + 1]: + return () + variables = tuple( + varmap[jcols[p]] + for p in range(prows[self._index], prows[self._index + 1]) + if not varmap[jcols[p]].fixed + ) return variables @@ -526,16 +558,18 @@ def coefficients(self): jcols = comp._jcols vals = comp._vals varmap = comp._varmap - if prows[self._index] == prows[self._index+1]: + if prows[self._index] == prows[self._index + 1]: return () - coefs = tuple(vals[p] for p in range(prows[self._index], - prows[self._index+1]) - if not varmap[jcols[p]].fixed) + coefs = tuple( + vals[p] + for p in range(prows[self._index], prows[self._index + 1]) + if not varmap[jcols[p]].fixed + ) return coefs # for backwards compatibility - linear=coefficients + linear = coefficients @property def constant(self): @@ -545,12 +579,13 @@ def constant(self): jcols = comp._jcols vals = comp._vals varmap = comp._varmap - if prows[self._index] == prows[self._index+1]: + if prows[self._index] == prows[self._index + 1]: return 0 - terms = tuple(vals[p] * varmap[jcols[p]]() - for p in range(prows[self._index], - prows[self._index+1]) - if varmap[jcols[p]].fixed) + terms = tuple( + vals[p] * varmap[jcols[p]]() + for p in range(prows[self._index], prows[self._index + 1]) + if varmap[jcols[p]].fixed + ) return sum(terms) @@ -567,18 +602,18 @@ def body(self): jcols = comp._jcols varmap = comp._varmap vals = comp._vals - if prows[self._index] == prows[self._index+1]: + if prows[self._index] == prows[self._index + 1]: return ZeroConstant - return sum(varmap[jcols[p]] * vals[p] - for p in range(prows[index], - prows[index+1])) + return sum( + varmap[jcols[p]] * vals[p] for p in range(prows[index], prows[index + 1]) + ) @property def lower(self): """Access the lower bound of a constraint expression.""" comp = self.parent_component() index = self.index() - if (comp._range_types[index] & MatrixConstraint.LowerBound): + if comp._range_types[index] & MatrixConstraint.LowerBound: return comp._ranges[2 * index] return None @@ -587,7 +622,7 @@ def upper(self): """Access the upper bound of a constraint expression.""" comp = self.parent_component() index = self.index() - if (comp._range_types[index] & MatrixConstraint.UpperBound): + if comp._range_types[index] & MatrixConstraint.UpperBound: return comp._ranges[(2 * index) + 1] return None @@ -604,55 +639,49 @@ def ub(self): @property def equality(self): """A boolean indicating whether this is an equality constraint.""" - return (self.parent_component()._range_types[self.index()] & \ - MatrixConstraint.Equality) == MatrixConstraint.Equality + return ( + self.parent_component()._range_types[self.index()] + & MatrixConstraint.Equality + ) == MatrixConstraint.Equality @property def strict_lower(self): """A boolean indicating whether this constraint has a strict lower bound.""" - return (self.parent_component()._range_types[self.index()] & \ - MatrixConstraint.StrictLowerBound) == \ - MatrixConstraint.StrictLowerBound + return ( + self.parent_component()._range_types[self.index()] + & MatrixConstraint.StrictLowerBound + ) == MatrixConstraint.StrictLowerBound @property def strict_upper(self): """A boolean indicating whether this constraint has a strict upper bound.""" - return (self.parent_component()._range_types[self.index()] & \ - MatrixConstraint.StrictUpperBound) == \ - MatrixConstraint.StrictUpperBound + return ( + self.parent_component()._range_types[self.index()] + & MatrixConstraint.StrictUpperBound + ) == MatrixConstraint.StrictUpperBound def set_value(self, expr): """Set the expression on this constraint.""" - raise NotImplementedError("MatrixConstraint row elements can not " - "be updated") + raise NotImplementedError("MatrixConstraint row elements can not be updated") -@ModelComponentFactory.register( - "A set of constraint expressions in Ax=b form.") -class MatrixConstraint(Mapping, IndexedConstraint): +@ModelComponentFactory.register("A set of constraint expressions in Ax=b form.") +class MatrixConstraint(Mapping, IndexedConstraint): # # Bound types # (make sure the maximum value here # will fit in an unsigned char) # StrictUpperBound = 0b00011 - UpperBound = 0b00010 - Equality = 0b01110 - LowerBound = 0b01000 + UpperBound = 0b00010 + Equality = 0b01110 + LowerBound = 0b01000 StrictLowerBound = 0b11000 - NoBound = 0b00000 - - def __init__(self, - nrows, - ncols, - nnz, - prows, - jcols, - vals, - ranges, - range_types, - varmap): + NoBound = 0b00000 + def __init__( + self, nrows, ncols, nnz, prows, jcols, vals, ranges, range_types, varmap + ): assert len(prows) == nrows + 1 assert len(jcols) == nnz assert len(vals) == nnz @@ -660,8 +689,7 @@ def __init__(self, assert len(range_types) == nrows assert len(varmap) == ncols - IndexedConstraint.__init__(self, - Any) + IndexedConstraint.__init__(self, Any) self._prows = prows self._jcols = jcols @@ -675,15 +703,15 @@ def construct(self, data=None): Construct the expression(s) for this constraint. """ if is_debug_set(logger): - logger.debug("Constructing constraint %s" - % (self.name)) + logger.debug("Constructing constraint %s" % (self.name)) if self._constructed: return - self._constructed=True + self._constructed = True _init = _LinearMatrixConstraintData - self._data = tuple(_init(i, component=self) - for i in range(len(self._range_types))) + self._data = tuple( + _init(i, component=self) for i in range(len(self._range_types)) + ) # # Override some IndexedComponent methods @@ -707,3 +735,21 @@ def add(self, index, expr): def __delitem__(self): raise NotImplementedError + + # + # Pyomo components support an extended dict API + # + def keys(self, sort=None): + # The 0..n-1 indices are always ordered and sorted; we can + # ignore the `sort` argument + return super().keys() + + def values(self, sort=None): + # The 0..n-1 indices are always ordered and sorted; we can + # ignore the `sort` argument + return super().values() + + def items(self, sort=None): + # The 0..n-1 indices are always ordered and sorted; we can + # ignore the `sort` argument + return super().items() diff --git a/pyomo/repn/linear.py b/pyomo/repn/linear.py new file mode 100644 index 00000000000..a963a5b9216 --- /dev/null +++ b/pyomo/repn/linear.py @@ -0,0 +1,943 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +import collections +import logging +import sys +from operator import itemgetter +from itertools import filterfalse + +from pyomo.common.deprecation import deprecation_warning +from pyomo.common.numeric_types import native_types, native_numeric_types +from pyomo.core.expr.numeric_expr import ( + NegationExpression, + ProductExpression, + DivisionExpression, + PowExpression, + AbsExpression, + UnaryFunctionExpression, + Expr_ifExpression, + MonomialTermExpression, + LinearExpression, + SumExpression, + NPV_SumExpression, + ExternalFunctionExpression, +) +from pyomo.core.expr.relational_expr import ( + EqualityExpression, + InequalityExpression, + RangedExpression, +) +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor, _EvaluationVisitor +from pyomo.core.expr import is_fixed, value +from pyomo.core.base.expression import ScalarExpression, _GeneralExpressionData +from pyomo.core.base.objective import ScalarObjective, _GeneralObjectiveData +import pyomo.core.kernel as kernel +from pyomo.repn.util import ( + ExprType, + apply_node_operation, + complex_number_error, + InvalidNumber, +) + +logger = logging.getLogger(__name__) + +nan = float("nan") + +_CONSTANT = ExprType.CONSTANT +_LINEAR = ExprType.LINEAR +_GENERAL = ExprType.GENERAL + +_SumLikeExpression = {SumExpression, LinearExpression, NPV_SumExpression} + + +def _merge_dict(dest_dict, mult, src_dict): + if mult == 1: + for vid, coef in src_dict.items(): + if vid in dest_dict: + dest_dict[vid] += coef + else: + dest_dict[vid] = coef + else: + for vid, coef in src_dict.items(): + if vid in dest_dict: + dest_dict[vid] += mult * coef + else: + dest_dict[vid] = mult * coef + + +class LinearRepn(object): + __slots__ = ("multiplier", "constant", "linear", "nonlinear") + + def __init__(self): + self.multiplier = 1 + self.constant = 0 + self.linear = {} + self.nonlinear = None + + def __str__(self): + return ( + f"LinearRepn(mult={self.multiplier}, const={self.constant}, " + f"linear={self.linear}, nonlinear={self.nonlinear})" + ) + + def __repr__(self): + return str(self) + + def walker_exitNode(self): + if self.nonlinear is not None: + return _GENERAL, self + elif self.linear: + return _LINEAR, self + else: + return _CONSTANT, self.multiplier * self.constant + + def duplicate(self): + ans = self.__class__.__new__(self.__class__) + ans.multiplier = self.multiplier + ans.constant = self.constant + ans.linear = dict(self.linear) + ans.nonlinear = self.nonlinear + return ans + + def to_expression(self, visitor): + if self.nonlinear is not None: + # We want to start with the nonlinear term (and use + # assignment) in case the term is a non-numeric node (like a + # relational expression) + ans = self.nonlinear + else: + ans = 0 + if self.linear: + var_map = visitor.var_map + if len(self.linear) == 1: + vid, coef = next(iter(self.linear.items())) + if coef == 1: + ans += var_map[vid] + elif coef: + ans += MonomialTermExpression((coef, var_map[vid])) + else: + pass + else: + ans += LinearExpression( + [ + MonomialTermExpression((coef, var_map[vid])) + for vid, coef in self.linear.items() + if coef + ] + ) + if self.constant: + ans += self.constant + if self.multiplier != 1: + ans *= self.multiplier + return ans + + def append(self, other): + """Append a child result from acceptChildResult + + Notes + ----- + This method assumes that the operator was "+". It is implemented + so that we can directly use a LinearRepn() as a data object in + the expression walker (thereby avoiding the function call for a + custom callback) + + """ + # Note that self.multiplier will always be 1 (we only call append() + # within a sum, so there is no opportunity for self.multiplier to + # change). Omitting the assertion for efficiency. + # assert self.multiplier == 1 + _type, other = other + if _type is _CONSTANT: + self.constant += other + return + + mult = other.multiplier + if other.constant: + self.constant += mult * other.constant + if other.linear: + _merge_dict(self.linear, mult, other.linear) + if other.nonlinear is not None: + if mult != 1: + nl = mult * other.nonlinear + else: + nl = other.nonlinear + if self.nonlinear is None: + self.nonlinear = nl + else: + self.nonlinear += nl + + +def to_expression(visitor, arg): + if arg[0] is _CONSTANT: + return arg[1] + else: + return arg[1].to_expression(visitor) + + +_exit_node_handlers = {} + +# +# NEGATION handlers +# + + +def _handle_negation_constant(visitor, node, arg): + return (_CONSTANT, -1 * arg[1]) + + +def _handle_negation_ANY(visitor, node, arg): + arg[1].multiplier *= -1 + return arg + + +_exit_node_handlers[NegationExpression] = { + (_CONSTANT,): _handle_negation_constant, + (_LINEAR,): _handle_negation_ANY, + (_GENERAL,): _handle_negation_ANY, +} + +# +# PRODUCT handlers +# + + +def _handle_product_constant_constant(visitor, node, arg1, arg2): + _, arg1 = arg1 + _, arg2 = arg2 + ans = arg1 * arg2 + if ans != ans: + if not arg1 or not arg2: + deprecation_warning( + f"Encountered {str(arg1)}*{str(arg2)} in expression tree. " + "Mapping the NaN result to 0 for compatibility " + "with the lp_v1 writer. In the future, this NaN " + "will be preserved/emitted to comply with IEEE-754.", + version='6.6.0', + ) + return _, 0 + return _, arg1 * arg2 + + +def _handle_product_constant_ANY(visitor, node, arg1, arg2): + arg2[1].multiplier *= arg1[1] + return arg2 + + +def _handle_product_ANY_constant(visitor, node, arg1, arg2): + arg1[1].multiplier *= arg2[1] + return arg1 + + +def _handle_product_nonlinear(visitor, node, arg1, arg2): + ans = visitor.Result() + if not visitor.expand_nonlinear_products: + ans.nonlinear = to_expression(visitor, arg1) * to_expression(visitor, arg2) + return _GENERAL, ans + + # We are multiplying (A + Bx + C(x)) * (A + Bx + C(x)) + _, x1 = arg1 + _, x2 = arg2 + ans.multiplier = x1.multiplier * x2.multiplier + x1.multiplier = x2.multiplier = 1 + # x1.const * x2.const [AA] + ans.constant = x1.constant * x2.constant + # x1.linear * x2.const [BA] + x1.const * x2.linear [AB] + if x2.constant: + c = x2.constant + if c == 1: + ans.linear = dict(x1.linear) + else: + ans.linear = {vid: c * coef for vid, coef in x1.linear.items()} + if x1.constant: + _merge_dict(ans.linear, x1.constant, x2.linear) + ans.nonlinear = 0 + if x1.constant and x2.nonlinear is not None: + # [AC] + ans.nonlinear += x1.constant * x2.nonlinear + if x1.nonlinear is not None: + # [CA] + [CB] + [CC] + ans.nonlinear += x1.nonlinear * to_expression(visitor, arg2) + if x1.linear: + # [BB] + [BC] + x1.constant = 0 + x1.nonlinear = None + x2.constant = 0 + ans.nonlinear += to_expression(visitor, arg1) * to_expression(visitor, arg2) + return _GENERAL, ans + + +_exit_node_handlers[ProductExpression] = { + (_CONSTANT, _CONSTANT): _handle_product_constant_constant, + (_CONSTANT, _LINEAR): _handle_product_constant_ANY, + (_CONSTANT, _GENERAL): _handle_product_constant_ANY, + (_LINEAR, _CONSTANT): _handle_product_ANY_constant, + (_LINEAR, _LINEAR): _handle_product_nonlinear, + (_LINEAR, _GENERAL): _handle_product_nonlinear, + (_GENERAL, _CONSTANT): _handle_product_ANY_constant, + (_GENERAL, _LINEAR): _handle_product_nonlinear, + (_GENERAL, _GENERAL): _handle_product_nonlinear, +} +_exit_node_handlers[MonomialTermExpression] = _exit_node_handlers[ProductExpression] + +# +# DIVISION handlers +# + + +def _handle_division_constant_constant(visitor, node, arg1, arg2): + return _CONSTANT, apply_node_operation(node, (arg1[1], arg2[1])) + + +def _handle_division_ANY_constant(visitor, node, arg1, arg2): + arg1[1].multiplier /= arg2[1] + return arg1 + + +def _handle_division_nonlinear(visitor, node, arg1, arg2): + ans = visitor.Result() + ans.nonlinear = to_expression(visitor, arg1) / to_expression(visitor, arg2) + return _GENERAL, ans + + +_exit_node_handlers[DivisionExpression] = { + (_CONSTANT, _CONSTANT): _handle_division_constant_constant, + (_CONSTANT, _LINEAR): _handle_division_nonlinear, + (_CONSTANT, _GENERAL): _handle_division_nonlinear, + (_LINEAR, _CONSTANT): _handle_division_ANY_constant, + (_LINEAR, _LINEAR): _handle_division_nonlinear, + (_LINEAR, _GENERAL): _handle_division_nonlinear, + (_GENERAL, _CONSTANT): _handle_division_ANY_constant, + (_GENERAL, _LINEAR): _handle_division_nonlinear, + (_GENERAL, _GENERAL): _handle_division_nonlinear, +} + +# +# EXPONENTIATION handlers +# + + +def _handle_pow_constant_constant(visitor, node, *args): + arg1, arg2 = args + ans = apply_node_operation(node, (arg1[1], arg2[1])) + if ans.__class__ in _complex_types: + ans = complex_number_error(ans, visitor, node) + return _CONSTANT, ans + + +def _handle_pow_ANY_constant(visitor, node, arg1, arg2): + _, exp = arg2 + if exp == 1: + return arg1 + elif exp > 1 and exp <= visitor.max_exponential_expansion and int(exp) == exp: + _type, _arg = arg1 + ans = _type, _arg.duplicate() + for i in range(1, int(exp)): + ans = visitor.exit_node_dispatcher[(ProductExpression, ans[0], _type)]( + visitor, None, ans, (_type, _arg.duplicate()) + ) + return ans + elif exp == 0: + return _CONSTANT, 1 + else: + return _handle_pow_nonlinear(visitor, node, arg1, arg2) + + +def _handle_pow_nonlinear(visitor, node, arg1, arg2): + ans = visitor.Result() + ans.nonlinear = to_expression(visitor, arg1) ** to_expression(visitor, arg2) + return _GENERAL, ans + + +_exit_node_handlers[PowExpression] = { + (_CONSTANT, _CONSTANT): _handle_pow_constant_constant, + (_CONSTANT, _LINEAR): _handle_pow_nonlinear, + (_CONSTANT, _GENERAL): _handle_pow_nonlinear, + (_LINEAR, _CONSTANT): _handle_pow_ANY_constant, + (_LINEAR, _LINEAR): _handle_pow_nonlinear, + (_LINEAR, _GENERAL): _handle_pow_nonlinear, + (_GENERAL, _CONSTANT): _handle_pow_ANY_constant, + (_GENERAL, _LINEAR): _handle_pow_nonlinear, + (_GENERAL, _GENERAL): _handle_pow_nonlinear, +} + +# +# ABS and UNARY handlers +# + + +def _handle_unary_constant(visitor, node, arg): + ans = apply_node_operation(node, (arg[1],)) + # Unary includes sqrt() which can return complex numbers + if ans.__class__ in _complex_types: + ans = complex_number_error(ans, visitor, node) + return _CONSTANT, ans + + +def _handle_unary_nonlinear(visitor, node, arg): + ans = visitor.Result() + ans.nonlinear = node.create_node_with_local_data((to_expression(visitor, arg),)) + return _GENERAL, ans + + +_exit_node_handlers[UnaryFunctionExpression] = { + (_CONSTANT,): _handle_unary_constant, + (_LINEAR,): _handle_unary_nonlinear, + (_GENERAL,): _handle_unary_nonlinear, +} +_exit_node_handlers[AbsExpression] = _exit_node_handlers[UnaryFunctionExpression] + +# +# NAMED EXPRESSION handlers +# + + +def _handle_named_constant(visitor, node, arg1): + # Record this common expression + visitor.subexpression_cache[id(node)] = arg1 + return arg1 + + +def _handle_named_ANY(visitor, node, arg1): + # Record this common expression + visitor.subexpression_cache[id(node)] = arg1 + _type, arg1 = arg1 + return _type, arg1.duplicate() + + +_exit_node_handlers[ScalarExpression] = { + (_CONSTANT,): _handle_named_constant, + (_LINEAR,): _handle_named_ANY, + (_GENERAL,): _handle_named_ANY, +} + +_named_subexpression_types = [ + ScalarExpression, + _GeneralExpressionData, + kernel.expression.expression, + kernel.expression.noclone, + # Note: objectives are special named expressions + _GeneralObjectiveData, + ScalarObjective, + kernel.objective.objective, +] + +# +# EXPR_IF handlers +# + + +def _handle_expr_if_const(visitor, node, arg1, arg2, arg3): + _type, _test = arg1 + assert _type is _CONSTANT + if _test: + if _test != _test: + # nan + return _handle_expr_if_nonlinear(visitor, node, arg1, arg2, arg3) + return arg2 + else: + return arg3 + + +def _handle_expr_if_nonlinear(visitor, node, arg1, arg2, arg3): + # Note: guaranteed that arg1 is not _CONSTANT + ans = visitor.Result() + ans.nonlinear = Expr_ifExpression( + ( + to_expression(visitor, arg1), + to_expression(visitor, arg2), + to_expression(visitor, arg3), + ) + ) + return _GENERAL, ans + + +_exit_node_handlers[Expr_ifExpression] = { + (i, j, k): _handle_expr_if_nonlinear + for i in (_LINEAR, _GENERAL) + for j in (_CONSTANT, _LINEAR, _GENERAL) + for k in (_CONSTANT, _LINEAR, _GENERAL) +} +for j in (_CONSTANT, _LINEAR, _GENERAL): + for k in (_CONSTANT, _LINEAR, _GENERAL): + _exit_node_handlers[Expr_ifExpression][_CONSTANT, j, k] = _handle_expr_if_const + +# +# Relational expression handlers +# + + +def _handle_equality_const(visitor, node, arg1, arg2): + return _CONSTANT, arg1[1] == arg2[1] + + +def _handle_equality_general(visitor, node, arg1, arg2): + ans = visitor.Result() + ans.nonlinear = EqualityExpression( + (to_expression(visitor, arg1), to_expression(visitor, arg2)) + ) + return _GENERAL, ans + + +_exit_node_handlers[EqualityExpression] = { + (i, j): _handle_equality_general + for i in (_CONSTANT, _LINEAR, _GENERAL) + for j in (_CONSTANT, _LINEAR, _GENERAL) +} +_exit_node_handlers[EqualityExpression][_CONSTANT, _CONSTANT] = _handle_equality_const + + +def _handle_inequality_const(visitor, node, arg1, arg2): + return _CONSTANT, arg1[1] <= arg2[1] + + +def _handle_inequality_general(visitor, node, arg1, arg2): + ans = visitor.Result() + ans.nonlinear = InequalityExpression( + (to_expression(visitor, arg1), to_expression(visitor, arg2)), node.strict + ) + return _GENERAL, ans + + +_exit_node_handlers[InequalityExpression] = { + (i, j): _handle_inequality_general + for i in (_CONSTANT, _LINEAR, _GENERAL) + for j in (_CONSTANT, _LINEAR, _GENERAL) +} +_exit_node_handlers[InequalityExpression][ + _CONSTANT, _CONSTANT +] = _handle_inequality_const + + +def _handle_ranged_const(visitor, node, arg1, arg2, arg3): + return _CONSTANT, arg1[1] <= arg2[1] <= arg3[1] + + +def _handle_ranged_general(visitor, node, arg1, arg2, arg3): + ans = visitor.Result() + ans.nonlinear = RangedExpression( + ( + to_expression(visitor, arg1), + to_expression(visitor, arg2), + to_expression(visitor, arg3), + ), + node.strict, + ) + return _GENERAL, ans + + +_exit_node_handlers[RangedExpression] = { + (i, j, k): _handle_ranged_general + for i in (_CONSTANT, _LINEAR, _GENERAL) + for j in (_CONSTANT, _LINEAR, _GENERAL) + for k in (_CONSTANT, _LINEAR, _GENERAL) +} +_exit_node_handlers[RangedExpression][ + _CONSTANT, _CONSTANT, _CONSTANT +] = _handle_ranged_const + + +def _before_native(visitor, child): + return False, (_CONSTANT, child) + + +def _before_complex(visitor, child): + return False, (_CONSTANT, complex_number_error(child, visitor, child)) + + +def _before_var(visitor, child): + _id = id(child) + if _id not in visitor.var_map: + if child.fixed: + ans = child() + if ans is None or ans != ans: + ans = InvalidNumber(nan) + elif ans.__class__ in _complex_types: + ans = complex_number_error(ans, visitor, child) + return False, (_CONSTANT, ans) + visitor.var_map[_id] = child + visitor.var_order[_id] = len(visitor.var_order) + ans = visitor.Result() + ans.linear[_id] = 1 + return False, (_LINEAR, ans) + + +def _before_param(visitor, child): + ans = child() + if ans is None or ans != ans: + ans = InvalidNumber(nan) + elif ans.__class__ in _complex_types: + ans = complex_number_error(ans, visitor, child) + return False, (_CONSTANT, ans) + + +def _before_npv(visitor, child): + # TBD: It might be more efficient to cache the value of NPV + # expressions to avoid duplicate evaluations. However, current + # examples do not benefit from this cache. + # + # _id = id(child) + # if _id in visitor.value_cache: + # child = visitor.value_cache[_id] + # else: + # child = visitor.value_cache[_id] = child() + # return False, (_CONSTANT, child) + try: + return False, (_CONSTANT, visitor._eval_expr(child)) + except: + # If there was an exception evaluating the subexpression, then + # we need to descend into it (in case there is something like 0 * + # nan that we need to map to 0) + return True, None + + +def _before_monomial(visitor, child): + # + # The following are performance optimizations for common + # situations (Monomial terms and Linear expressions) + # + arg1, arg2 = child._args_ + if arg1.__class__ not in native_types: + # TBD: It might be more efficient to cache the value of NPV + # expressions to avoid duplicate evaluations. However, current + # examples do not benefit from this cache. + # + # _id = id(arg1) + # if _id in visitor.value_cache: + # arg1 = visitor.value_cache[_id] + # else: + # arg1 = visitor.value_cache[_id] = arg1() + try: + arg1 = visitor._eval_expr(arg1) + except: + # If there was an exception evaluating the subexpression, + # then we need to descend into it (in case there is something + # like 0 * nan that we need to map to 0) + return True, None + + # Trap multiplication by 0 and nan. + if not arg1: + if arg2.fixed and arg2.value != arg2.value: + deprecation_warning( + f"Encountered {arg1}*{str(arg2.value)} in expression tree. " + "Mapping the NaN result to 0 for compatibility " + "with the lp_v1 writer. In the future, this NaN " + "will be preserved/emitted to comply with IEEE-754.", + version='6.6.0', + ) + return False, (_CONSTANT, arg1) + + _id = id(arg2) + if _id not in visitor.var_map: + if arg2.fixed: + return False, (_CONSTANT, arg1 * visitor._eval_expr(arg2)) + visitor.var_map[_id] = arg2 + visitor.var_order[_id] = len(visitor.var_order) + ans = visitor.Result() + ans.linear[_id] = arg1 + return False, (_LINEAR, ans) + + +def _before_linear(visitor, child): + var_map = visitor.var_map + var_order = visitor.var_order + next_i = len(var_order) + ans = visitor.Result() + const = 0 + linear = ans.linear + for arg in child.args: + if arg.__class__ is MonomialTermExpression: + arg1, arg2 = arg._args_ + if arg1.__class__ not in native_types: + try: + arg1 = visitor._eval_expr(arg1) + except: + # If there was an exception evaluating the + # subexpression, then we need to descend into it (in + # case there is something like 0 * nan that we need + # to map to 0) + return True, None + if not arg1: + if arg2.fixed and arg2.value != arg2.value: + deprecation_warning( + f"Encountered {arg1}*{str(arg2.value)} in expression tree. " + "Mapping the NaN result to 0 for compatibility " + "with the lp_v1 writer. In the future, this NaN " + "will be preserved/emitted to comply with IEEE-754.", + version='6.6.0', + ) + continue + _id = id(arg2) + if _id not in var_map: + if arg2.fixed: + const += arg1 * visitor._eval_expr(arg2) + continue + var_map[_id] = arg2 + var_order[_id] = next_i + next_i += 1 + linear[_id] = arg1 + elif _id in linear: + linear[_id] += arg1 + else: + linear[_id] = arg1 + elif arg.__class__ not in native_numeric_types: + try: + const += visitor._eval_expr(arg) + except: + # If there was an exception evaluating the + # subexpression, then we need to descend into it (in + # case there is something like 0 * nan that we need to + # map to 0) + return True, None + else: + const += arg + if linear: + ans.constant = const + return False, (_LINEAR, ans) + else: + return False, (_CONSTANT, const) + + +def _before_named_expression(visitor, child): + _id = id(child) + if _id in visitor.subexpression_cache: + _type, expr = visitor.subexpression_cache[_id] + if _type is _CONSTANT: + return False, (_type, expr) + else: + return False, (_type, expr.duplicate()) + else: + return True, None + + +def _before_expr_if(visitor, child): + test, t, f = child.args + if is_fixed(test): + try: + test = test() + except: + return True, None + subexpr = LinearRepnVisitor( + visitor.subexpression_cache, visitor.var_map, visitor.var_order + ).walk_expression(t if test else f) + if subexpr.nonlinear is not None: + return False, (_GENERAL, subexpr) + elif subexpr.linear: + return False, (_LINEAR, subexpr) + else: + return False, (_CONSTANT, subexpr.constant) + return True, None + + +def _before_external(visitor, child): + ans = visitor.Result() + if all(is_fixed(arg) for arg in child.args): + try: + ans.constant = visitor._eval_expr(child) + return False, (_CONSTANT, ans) + except: + pass + ans.nonlinear = child + return False, (_GENERAL, ans) + + +def _before_general_expression(visitor, child): + return True, None + + +def _register_new_before_child_dispatcher(visitor, child): + dispatcher = _before_child_dispatcher + child_type = child.__class__ + if child_type in native_numeric_types: + if issubclass(child_type, complex): + _complex_types.add(child_type) + dispatcher[child_type] = _before_complex + else: + dispatcher[child_type] = _before_native + elif not child.is_expression_type(): + if child.is_potentially_variable(): + dispatcher[child_type] = _before_var + else: + dispatcher[child_type] = _before_param + elif not child.is_potentially_variable(): + dispatcher[child_type] = _before_npv + # If we descend into the named expression (because of an + # evaluation error), then on the way back out, we will use + # the potentially variable handler to process the result. + pv_base_type = child.potentially_variable_base_class() + if pv_base_type not in dispatcher: + try: + child.__class__ = pv_base_type + _register_new_before_child_dispatcher(visitor, child) + finally: + child.__class__ = child_type + if pv_base_type in visitor.exit_node_handlers: + visitor.exit_node_handlers[child_type] = visitor.exit_node_handlers[ + pv_base_type + ] + for args, fcn in visitor.exit_node_handlers[child_type].items(): + visitor.exit_node_dispatcher[(child_type, *args)] = fcn + elif id(child) in visitor.subexpression_cache or issubclass( + child_type, _GeneralExpressionData + ): + dispatcher[child_type] = _before_named_expression + visitor.exit_node_handlers[child_type] = visitor.exit_node_handlers[ + ScalarExpression + ] + for args, fcn in visitor.exit_node_handlers[child_type].items(): + visitor.exit_node_dispatcher[(child_type, *args)] = fcn + else: + dispatcher[child_type] = _before_general_expression + return dispatcher[child_type](visitor, child) + + +_before_child_dispatcher = collections.defaultdict( + lambda: _register_new_before_child_dispatcher +) + +# For efficiency reasons, we will maintain a separate list of all +# complex number types +_complex_types = set((complex,)) + +# Register an initial set of known expression types with the "before +# child" expression handler lookup table. +for _type in native_numeric_types: + _before_child_dispatcher[_type] = _before_native +# We do not support writing complex numbers out +_before_child_dispatcher[complex] = _before_complex +# general operators +for _type in _exit_node_handlers: + _before_child_dispatcher[_type] = _before_general_expression +# override for named subexpressions +for _type in _named_subexpression_types: + _before_child_dispatcher[_type] = _before_named_expression +# Special handling for expr_if and external functions: will be handled +# as terminal nodes from the point of view of the visitor +_before_child_dispatcher[Expr_ifExpression] = _before_expr_if +_before_child_dispatcher[ExternalFunctionExpression] = _before_external +# Special linear / summation expressions +_before_child_dispatcher[MonomialTermExpression] = _before_monomial +_before_child_dispatcher[LinearExpression] = _before_linear +_before_child_dispatcher[SumExpression] = _before_general_expression + + +# +# Initialize the _exit_node_dispatcher +# +def _initialize_exit_node_dispatcher(exit_handlers): + # expand the knowns set of named expressiosn + for expr in _named_subexpression_types: + exit_handlers[expr] = exit_handlers[ScalarExpression] + + exit_dispatcher = {} + for cls, handlers in exit_handlers.items(): + for args, fcn in handlers.items(): + exit_dispatcher[(cls, *args)] = fcn + return exit_dispatcher + + +class LinearRepnVisitor(StreamBasedExpressionVisitor): + Result = LinearRepn + exit_node_handlers = _exit_node_handlers + exit_node_dispatcher = _initialize_exit_node_dispatcher(_exit_node_handlers) + expand_nonlinear_products = False + max_exponential_expansion = 1 + + def __init__(self, subexpression_cache, var_map, var_order): + super().__init__() + self.subexpression_cache = subexpression_cache + self.var_map = var_map + self.var_order = var_order + self._eval_expr_visitor = _EvaluationVisitor(True) + + def _eval_expr(self, expr): + ans = self._eval_expr_visitor.dfs_postorder_stack(expr) + if ans.__class__ not in native_types: + ans = value(ans) + if ans != ans: + return InvalidNumber(ans) + if ans.__class__ in _complex_types: + return complex_number_error(ans, self, expr) + return ans + + def initializeWalker(self, expr): + walk, result = self.beforeChild(None, expr, 0) + if not walk: + return False, self.finalizeResult(result) + return True, expr + + def beforeChild(self, node, child, child_idx): + return _before_child_dispatcher[child.__class__](self, child) + + def enterNode(self, node): + # SumExpression are potentially large nary operators. Directly + # populate the result + if node.__class__ in _SumLikeExpression: + return node.args, self.Result() + else: + return node.args, [] + + def exitNode(self, node, data): + if data.__class__ is self.Result: + return data.walker_exitNode() + # + # General expressions... + # + return self.exit_node_dispatcher[(node.__class__, *map(itemgetter(0), data))]( + self, node, *data + ) + + def finalizeResult(self, result): + ans = result[1] + if ans.__class__ is self.Result: + mult = ans.multiplier + if mult == 1: + # mult is identity: only thing to do is filter out zero coefficients + zeros = list(filterfalse(itemgetter(1), ans.linear.items())) + for vid, coef in zeros: + del ans.linear[vid] + elif not mult: + # the mulltiplier has cleared out the entire expression. + # Warn if this is suppressing a NaN (unusual, and + # non-standard, but we will wait to remove this behavior + # for the time being) + if ans.constant != ans.constant or any( + c != c for c in ans.linear.values() + ): + deprecation_warning( + f"Encountered {str(mult)}*nan in expression tree. " + "Mapping the NaN result to 0 for compatibility " + "with the lp_v1 writer. In the future, this NaN " + "will be preserved/emitted to comply with IEEE-754.", + version='6.6.0', + ) + return self.Result() + else: + # mult not in {0, 1}: factor it into the constant, + # linear coefficients, and nonlinear term + linear = ans.linear + zeros = [] + for vid, coef in linear.items(): + if coef: + linear[vid] = coef * mult + else: + zeros.append(vid) + for vid in zeros: + del linear[vid] + if ans.nonlinear is not None: + ans.nonlinear *= mult + if ans.constant: + ans.constant *= mult + ans.multiplier = 1 + return ans + ans = self.Result() + assert result[0] is _CONSTANT + ans.constant = result[1] + return ans diff --git a/pyomo/repn/plugins/__init__.py b/pyomo/repn/plugins/__init__.py index 9aacacd69fd..f1e8270b8c7 100644 --- a/pyomo/repn/plugins/__init__.py +++ b/pyomo/repn/plugins/__init__.py @@ -9,14 +9,33 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.repn.plugins.cpxlp import pyomo.repn.plugins.ampl import pyomo.repn.plugins.baron_writer import pyomo.repn.plugins.mps import pyomo.repn.plugins.gams_writer + import pyomo.repn.plugins.lp_writer import pyomo.repn.plugins.nl_writer from pyomo.opt import WriterFactory + + # Register the "default" versions of writers that have more than one + # implementation WriterFactory.register('nl', 'Generate the corresponding AMPL NL file.')( - WriterFactory.get_class('nl_v1')) + WriterFactory.get_class('nl_v2') + ) + WriterFactory.register('lp', 'Generate the corresponding CPLEX LP file.')( + WriterFactory.get_class('lp_v2') + ) + WriterFactory.register('cpxlp', 'Generate the corresponding CPLEX LP file.')( + WriterFactory.get_class('cpxlp_v2') + ) + + +def activate_writer_version(name, ver): + """DEBUGGING TOOL to switch the "default" writer implementation""" + doc = WriterFactory.doc(name) + WriterFactory.unregister(name) + WriterFactory.register(name, doc)(WriterFactory.get_class(f'{name}_v{ver}')) diff --git a/pyomo/repn/plugins/ampl/ampl_.py b/pyomo/repn/plugins/ampl/ampl_.py index 8fd4f5a9d57..a2bd55cb73a 100644 --- a/pyomo/repn/plugins/ampl/ampl_.py +++ b/pyomo/repn/plugins/ampl/ampl_.py @@ -25,12 +25,28 @@ from pyomo.common.fileutils import find_library from pyomo.common.gc_manager import PauseGC from pyomo.opt import ProblemFormat, AbstractProblemWriter, WriterFactory -from pyomo.core.expr import current as EXPR -from pyomo.core.expr.numvalue import (NumericConstant, - native_numeric_types, - value, - is_fixed) -from pyomo.core.base import SymbolMap, NameLabeler, _ExpressionData, SortComponents, var, param, Var, ExternalFunction, ComponentMap, Objective, Constraint, SOSConstraint, Suffix +import pyomo.core.expr as EXPR +from pyomo.core.expr.numvalue import ( + NumericConstant, + native_numeric_types, + value, + is_fixed, +) +from pyomo.core.base import ( + SymbolMap, + NameLabeler, + _ExpressionData, + SortComponents, + var, + param, + Var, + ExternalFunction, + ComponentMap, + Objective, + Constraint, + SOSConstraint, + Suffix, +) import pyomo.core.base.suffix from pyomo.repn.standard_repn import generate_standard_repn @@ -48,14 +64,14 @@ def set_pyomo_amplfunc_env(external_libs): # sometime between 2010 and 2012, the ASL added support for # simple quoted strings: the first non-whitespace character # can be either " or '. When that is detected, the ASL - # parser will continue to the next occurance of that + # parser will continue to the next occurrence of that # character (i.e., no escaping is allowed). We will use # that same logic here to quote any strings with spaces # ... bearing in mind that this will only work with solvers # compiled against versions of the ASL more recent than # ~2012. # - # We are (arbitrarily) chosing to use newline as the field + # We are (arbitrarily) choosing to use newline as the field # separator. env_str = '' for _lib in external_libs: @@ -64,11 +80,11 @@ def set_pyomo_amplfunc_env(external_libs): _abs_lib = find_library(_lib) if _abs_lib is not None: _lib = _abs_lib - if ( ' ' not in _lib - or ( _lib[0]=='"' and _lib[-1]=='"' - and '"' not in _lib[1:-1] ) - or ( _lib[0]=="'" and _lib[-1]=="'" - and "'" not in _lib[1:-1] ) ): + if ( + ' ' not in _lib + or (_lib[0] == '"' and _lib[-1] == '"' and '"' not in _lib[1:-1]) + or (_lib[0] == "'" and _lib[-1] == "'" and "'" not in _lib[1:-1]) + ): pass elif '"' not in _lib: _lib = '"' + _lib + '"' @@ -78,7 +94,8 @@ def set_pyomo_amplfunc_env(external_libs): raise RuntimeError( "Cannot pass the AMPL external function library\n\t%s\n" "to the ASL because the string contains spaces, " - "single quote and\ndouble quote characters." % (_lib,)) + "single quote and\ndouble quote characters." % (_lib,) + ) if env_str: env_str += "\n" env_str += _lib @@ -86,28 +103,29 @@ def set_pyomo_amplfunc_env(external_libs): _intrinsic_function_operators = { - 'log': 'o43', - 'log10': 'o42', - 'sin': 'o41', - 'cos': 'o46', - 'tan': 'o38', - 'sinh': 'o40', - 'cosh': 'o45', - 'tanh': 'o37', - 'asin': 'o51', - 'acos': 'o53', - 'atan': 'o49', - 'exp': 'o44', - 'sqrt': 'o39', - 'asinh': 'o50', - 'acosh': 'o52', - 'atanh': 'o47', - 'pow': 'o5', - 'abs': 'o15', - 'ceil': 'o14', - 'floor': 'o13' + 'log': 'o43', + 'log10': 'o42', + 'sin': 'o41', + 'cos': 'o46', + 'tan': 'o38', + 'sinh': 'o40', + 'cosh': 'o45', + 'tanh': 'o37', + 'asin': 'o51', + 'acos': 'o53', + 'atan': 'o49', + 'exp': 'o44', + 'sqrt': 'o39', + 'asinh': 'o50', + 'acosh': 'o52', + 'atanh': 'o47', + 'pow': 'o5', + 'abs': 'o15', + 'ceil': 'o14', + 'floor': 'o13', } + # build string templates def _build_op_template(): _op_template = {} @@ -122,24 +140,32 @@ def _build_op_template(): _op_template[EXPR.DivisionExpression] = div_template _op_comment[EXPR.DivisionExpression] = div_comment - _op_template[EXPR.ExternalFunctionExpression] = ("f%d %d{C}\n", #function - "h%d:%s{C}\n") #string arg - _op_comment[EXPR.ExternalFunctionExpression] = ("\t#%s", #function - "") #string arg + _op_template[EXPR.ExternalFunctionExpression] = ( + "f%d %d{C}\n", # function + "h%d:%s{C}\n", + ) # string arg + _op_comment[EXPR.ExternalFunctionExpression] = ( + "\t#%s", # function + "", + ) # string arg for opname in _intrinsic_function_operators: - _op_template[opname] = _intrinsic_function_operators[opname]+"{C}\n" - _op_comment[opname] = "\t#"+opname + _op_template[opname] = _intrinsic_function_operators[opname] + "{C}\n" + _op_comment[opname] = "\t#" + opname _op_template[EXPR.Expr_ifExpression] = "o35{C}\n" _op_comment[EXPR.Expr_ifExpression] = "\t#if" - _op_template[EXPR.InequalityExpression] = ("o21{C}\n", # and - "o22{C}\n", # < - "o23{C}\n") # <= - _op_comment[EXPR.InequalityExpression] = ("\t#and", # and - "\t#lt", # < - "\t#le") # <= + _op_template[EXPR.InequalityExpression] = ( + "o21{C}\n", # and + "o22{C}\n", # < + "o23{C}\n", + ) # <= + _op_comment[EXPR.InequalityExpression] = ( + "\t#and", # and + "\t#lt", # < + "\t#le", + ) # <= _op_template[EXPR.EqualityExpression] = "o24{C}\n" _op_comment[EXPR.EqualityExpression] = "\t#eq" @@ -154,19 +180,21 @@ def _build_op_template(): _op_comment[NumericConstant] = "" _op_template[EXPR.SumExpressionBase] = ( - "o54{C}\n%d\n", # nary + - "o0{C}\n", # + - "o2\n" + _op_template[NumericConstant] ) # * coef - _op_comment[EXPR.SumExpressionBase] = ("\t#sumlist", # nary + - "\t#+", # + - _op_comment[NumericConstant]) # * coef + "o54{C}\n%d\n", # nary + + "o0{C}\n", # + + "o2\n" + _op_template[NumericConstant], + ) # * coef + _op_comment[EXPR.SumExpressionBase] = ( + "\t#sumlist", # nary + + "\t#+", # + + _op_comment[NumericConstant], + ) # * coef _op_template[EXPR.NegationExpression] = "o16{C}\n" _op_comment[EXPR.NegationExpression] = "\t#-" return _op_template, _op_comment - def _get_bound(exp): if exp is None: return None @@ -174,19 +202,19 @@ def _get_bound(exp): return value(exp) raise ValueError("non-fixed bound or weight: " + str(exp)) -class StopWatch(object): +class StopWatch(object): def __init__(self): self.start = time.time() def report(self, msg): - print(msg+" (seconds): "+str(time.time()-self.start)) + print(msg + " (seconds): " + str(time.time() - self.start)) def reset(self): self.start = time.time() -class _Counter(object): +class _Counter(object): def __init__(self, start): self._id = start @@ -195,44 +223,45 @@ def __call__(self, obj): self._id += 1 return tmp -class ModelSOS(object): +class ModelSOS(object): class AmplSuffix(object): - - def __init__(self,name): + def __init__(self, name): self.name = name self.ids = [] self.vals = [] - def add(self,idx,val): + def add(self, idx, val): if idx in self.ids: raise RuntimeError( "The NL file format does not support multiple nonzero " "values for a single component and suffix. \n" "Suffix Name: %s\n" - "Component ID: %s\n" % (self.name, idx)) + "Component ID: %s\n" % (self.name, idx) + ) else: self.ids.append(idx) self.vals.append(val) def genfilelines(self): base_line = "{0} {1}\n" - return [base_line.format(idx, val) - for idx, val in zip(self.ids,self.vals) if val != 0] + return [ + base_line.format(idx, val) + for idx, val in zip(self.ids, self.vals) + if val != 0 + ] def is_empty(self): return not bool(len(self.ids)) - def __init__(self,ampl_var_id, varID_map): - + def __init__(self, ampl_var_id, varID_map): self.ampl_var_id = ampl_var_id self.sosno = self.AmplSuffix('sosno') self.ref = self.AmplSuffix('ref') self.block_cntr = 0 self.varID_map = varID_map - def count_constraint(self,soscondata): - + def count_constraint(self, soscondata): ampl_var_id = self.ampl_var_id varID_map = self.varID_map @@ -257,9 +286,11 @@ def count_constraint(self,soscondata): elif level == 2: sign_tag = -1 else: - raise ValueError("SOSContraint '%s' has sos type='%s', " - "which is not supported by the NL file interface" \ - % (soscondata.name, level)) + raise ValueError( + "SOSConstraint '%s' has sos type='%s', " + "which is not supported by the NL file interface" + % (soscondata.name, level) + ) for vardata, weight in sos_items: weight = _get_bound(weight) @@ -267,33 +298,31 @@ def count_constraint(self,soscondata): raise ValueError( "Cannot use negative weight %f " "for variable %s is special ordered " - "set %s " % (weight, vardata.name, soscondata.name)) + "set %s " % (weight, vardata.name, soscondata.name) + ) if vardata.fixed: raise ValueError( "SOSConstraint '%s' includes a fixed Variable '%s'. " "This is currently not supported. Deactivate this constraint " - "in order to proceed" - % (soscondata.name, vardata.name)) + "in order to proceed" % (soscondata.name, vardata.name) + ) ID = ampl_var_id[varID_map[id(vardata)]] - self.sosno.add(ID,self.block_cntr*sign_tag) - self.ref.add(ID,weight) + self.sosno.add(ID, self.block_cntr * sign_tag) + self.ref.add(ID, weight) -class RepnWrapper(object): - __slots__ = ('repn','linear_vars','nonlinear_vars') +class RepnWrapper(object): + __slots__ = ('repn', 'linear_vars', 'nonlinear_vars') - def __init__(self,repn,linear,nonlinear): + def __init__(self, repn, linear, nonlinear): self.repn = repn self.linear_vars = linear self.nonlinear_vars = nonlinear -@WriterFactory.register( - 'nl_v1', 'Generate the corresponding AMPL NL file (version 1).') +@WriterFactory.register('nl_v1', 'Generate the corresponding AMPL NL file (version 1).') class ProblemWriter_nl(AbstractProblemWriter): - - def __init__(self): AbstractProblemWriter.__init__(self, ProblemFormat.nl) self._ampl_var_id = {} @@ -302,12 +331,7 @@ def __init__(self): self._OUTPUT = None self._varID_map = None - def __call__(self, - model, - filename, - solver_capability, - io_options): - + def __call__(self, model, filename, solver_capability, io_options): # Rebuild the OP template (as the expression tree system may # have been switched) _op_template, _op_comment = _build_op_template() @@ -347,28 +371,30 @@ def __call__(self, # preprocessed after the variable was fixed). If True, we # allow this case and modify the variable bounds section to # fix the variable. - output_fixed_variable_bounds = \ - io_options.pop("output_fixed_variable_bounds", False) + output_fixed_variable_bounds = io_options.pop( + "output_fixed_variable_bounds", False + ) # If False, unused variables will not be included in # the NL file. Otherwise, include all variables in # the bounds sections. - include_all_variable_bounds = \ - io_options.pop("include_all_variable_bounds", False) + include_all_variable_bounds = io_options.pop( + "include_all_variable_bounds", False + ) # List of variables that don't appear in constraints to force into the # nl-file - export_nonlinear_variables = \ - io_options.pop("export_nonlinear_variables", False) + export_nonlinear_variables = io_options.pop("export_nonlinear_variables", False) # column_order is a new option supported by the nl writer v2 _column_order = io_options.pop("column_order", True) - assert _column_order in {True,} + assert _column_order in {True} if len(io_options): raise ValueError( - "ProblemWriter_nl passed unrecognized io_options:\n\t" + - "\n\t".join("%s = %s" % (k,v) for k,v in io_options.items())) + "ProblemWriter_nl passed unrecognized io_options:\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) if filename is None: filename = model.name + ".nl" @@ -405,7 +431,7 @@ def __call__(self, # Pause the GC for the duration of this method with PauseGC() as pgc: - with open(filename,"w") as f: + with open(filename, "w") as f: self._OUTPUT = f symbol_map = self._print_model_NL( model, @@ -414,7 +440,8 @@ def __call__(self, skip_trivial_constraints=skip_trivial_constraints, file_determinism=file_determinism, include_all_variable_bounds=include_all_variable_bounds, - export_nonlinear_variables=export_nonlinear_variables) + export_nonlinear_variables=export_nonlinear_variables, + ) self._symbolic_solver_labels = False self._output_fixed_variable_bounds = False @@ -438,12 +465,11 @@ def _print_quad_term(self, v1, v2): self._print_nonlinear_terms_NL(v1) OUTPUT.write(self._op_string[NumericConstant] % (2)) - def _print_standard_quadratic_NL(self, - quadratic_vars, - quadratic_coefs): + def _print_standard_quadratic_NL(self, quadratic_vars, quadratic_coefs): OUTPUT = self._OUTPUT - nary_sum_str, binary_sum_str, coef_term_str = \ - self._op_string[EXPR.SumExpressionBase] + nary_sum_str, binary_sum_str, coef_term_str = self._op_string[ + EXPR.SumExpressionBase + ] assert len(quadratic_vars) == len(quadratic_coefs) if len(quadratic_vars) == 1: pass @@ -461,14 +487,18 @@ def _print_standard_quadratic_NL(self, self_varID_map = self._varID_map quadratic_vars = [] quadratic_coefs = [] - for (i, (v1, v2)) in sorted(enumerate(old_quadratic_vars), - key=lambda x: (self_varID_map[id(x[1][0])], - self_varID_map[id(x[1][1])])): + for i, (v1, v2) in sorted( + enumerate(old_quadratic_vars), + key=lambda x: ( + self_varID_map[id(x[1][0])], + self_varID_map[id(x[1][1])], + ), + ): quadratic_coefs.append(old_quadratic_coefs[i]) if self_varID_map[id(v1)] <= self_varID_map[id(v2)]: - quadratic_vars.append((v1,v2)) + quadratic_vars.append((v1, v2)) else: - quadratic_vars.append((v2,v1)) + quadratic_vars.append((v2, v1)) for i in range(len(quadratic_vars)): coef = quadratic_coefs[i] v1, v2 = quadratic_vars[i] @@ -486,32 +516,32 @@ def _print_nonlinear_terms_NL(self, exp): # create a new sum expression for efficiency) this should # be a list of tuples where [0] is the coeff and [1] is # the expr to write - nary_sum_str, binary_sum_str, coef_term_str = \ - self._op_string[EXPR.SumExpressionBase] + nary_sum_str, binary_sum_str, coef_term_str = self._op_string[ + EXPR.SumExpressionBase + ] n = len(exp) if n > 2: OUTPUT.write(nary_sum_str % (n)) - for i in range(0,n): - assert(exp[i].__class__ is tuple) + for i in range(0, n): + assert exp[i].__class__ is tuple coef = exp[i][0] child_exp = exp[i][1] if coef != 1: OUTPUT.write(coef_term_str % (coef)) self._print_nonlinear_terms_NL(child_exp) - else: # n == 1 or 2 - for i in range(0,n): - assert(exp[i].__class__ is tuple) + else: # n == 1 or 2 + for i in range(0, n): + assert exp[i].__class__ is tuple coef = exp[i][0] child_exp = exp[i][1] - if i != n-1: + if i != n - 1: # need the + op if it is not the last entry in the list OUTPUT.write(binary_sum_str) if coef != 1: OUTPUT.write(coef_term_str % (coef)) self._print_nonlinear_terms_NL(child_exp) elif exp_type in native_numeric_types: - OUTPUT.write(self._op_string[NumericConstant] - % (exp)) + OUTPUT.write(self._op_string[NumericConstant] % (exp)) elif exp.is_expression_type(): # @@ -523,9 +553,13 @@ def _print_nonlinear_terms_NL(self, exp): # We are assuming that _Constant_* expression objects # have been preprocessed to form constant values. # - elif exp.__class__ is EXPR.SumExpression: - nary_sum_str, binary_sum_str, coef_term_str = \ - self._op_string[EXPR.SumExpressionBase] + elif ( + exp.__class__ is EXPR.SumExpression + or exp.__class__ is EXPR.LinearExpression + ): + nary_sum_str, binary_sum_str, coef_term_str = self._op_string[ + EXPR.SumExpressionBase + ] n = exp.nargs() const = 0 vargs = [] @@ -549,8 +583,9 @@ def _print_nonlinear_terms_NL(self, exp): self._print_nonlinear_terms_NL(child_exp) elif exp_type is EXPR.SumExpressionBase: - nary_sum_str, binary_sum_str, coef_term_str = \ - self._op_string[EXPR.SumExpressionBase] + nary_sum_str, binary_sum_str, coef_term_str = self._op_string[ + EXPR.SumExpressionBase + ] OUTPUT.write(binary_sum_str) self._print_nonlinear_terms_NL(exp.arg(0)) self._print_nonlinear_terms_NL(exp.arg(1)) @@ -589,18 +624,24 @@ def _print_nonlinear_terms_NL(self, exp): if exp.is_fixed(): self._print_nonlinear_terms_NL(exp()) return - fun_str, string_arg_str = \ - self._op_string[EXPR.ExternalFunctionExpression] + fun_str, string_arg_str = self._op_string[ + EXPR.ExternalFunctionExpression + ] if not self._symbolic_solver_labels: - OUTPUT.write(fun_str - % (self.external_byFcn[exp._fcn._function][1], - exp.nargs())) + OUTPUT.write( + fun_str + % (self.external_byFcn[exp._fcn._function][1], exp.nargs()) + ) else: # Note: exp.name fails - OUTPUT.write(fun_str - % (self.external_byFcn[exp._fcn._function][1], - exp.nargs(), - exp.name)) + OUTPUT.write( + fun_str + % ( + self.external_byFcn[exp._fcn._function][1], + exp.nargs(), + exp.name, + ) + ) for arg in exp.args: if isinstance(arg, str): # Note: ASL does not handle '\r\n' as the EOL @@ -616,10 +657,9 @@ def _print_nonlinear_terms_NL(self, exp): # would force us to change a large number of # baselines / file comparisons OUTPUT.flush() - with os.fdopen(OUTPUT.fileno(), - mode='w+', - closefd=False, - newline='\n') as TMP: + with os.fdopen( + OUTPUT.fileno(), mode='w+', closefd=False, newline='\n' + ) as TMP: TMP.write(string_arg_str % (len(arg), arg)) elif type(arg) in native_numeric_types: self._print_nonlinear_terms_NL(arg) @@ -641,19 +681,18 @@ def _print_nonlinear_terms_NL(self, exp): OUTPUT.write(intr_expr_str) else: logger.error("Unsupported unary function ({0})".format(exp.name)) - raise TypeError("ASL writer does not support '%s' expressions" - % (exp.name)) + raise TypeError( + "ASL writer does not support '%s' expressions" % (exp.name) + ) self._print_nonlinear_terms_NL(exp.arg(0)) elif exp_type is EXPR.Expr_ifExpression: OUTPUT.write(self._op_string[EXPR.Expr_ifExpression]) - self._print_nonlinear_terms_NL(exp._if) - self._print_nonlinear_terms_NL(exp._then) - self._print_nonlinear_terms_NL(exp._else) + for arg in exp.args: + self._print_nonlinear_terms_NL(arg) elif exp_type is EXPR.InequalityExpression: - and_str, lt_str, le_str = \ - self._op_string[EXPR.InequalityExpression] + and_str, lt_str, le_str = self._op_string[EXPR.InequalityExpression] left = exp.arg(0) right = exp.arg(1) if exp._strict: @@ -664,8 +703,7 @@ def _print_nonlinear_terms_NL(self, exp): self._print_nonlinear_terms_NL(right) elif exp_type is EXPR.RangedExpression: - and_str, lt_str, le_str = \ - self._op_string[EXPR.InequalityExpression] + and_str, lt_str, le_str = self._op_string[EXPR.InequalityExpression] left = exp.arg(0) middle = exp.arg(1) right = exp.arg(2) @@ -694,40 +732,47 @@ def _print_nonlinear_terms_NL(self, exp): else: raise ValueError( "Unsupported expression type (%s) in _print_nonlinear_terms_NL" - % (exp_type)) + % (exp_type) + ) - elif isinstance(exp, (var._VarData, IVariable)) and \ - (not exp.is_fixed()): - #(self._output_fixed_variable_bounds or + elif isinstance(exp, (var._VarData, IVariable)) and (not exp.is_fixed()): + # (self._output_fixed_variable_bounds or if not self._symbolic_solver_labels: - OUTPUT.write(self._op_string[var._VarData] - % (self.ampl_var_id[self._varID_map[id(exp)]])) + OUTPUT.write( + self._op_string[var._VarData] + % (self.ampl_var_id[self._varID_map[id(exp)]]) + ) else: - OUTPUT.write(self._op_string[var._VarData] - % (self.ampl_var_id[self._varID_map[id(exp)]], - self._name_labeler(exp))) + OUTPUT.write( + self._op_string[var._VarData] + % ( + self.ampl_var_id[self._varID_map[id(exp)]], + self._name_labeler(exp), + ) + ) - elif isinstance(exp,param._ParamData): - OUTPUT.write(self._op_string[param._ParamData] - % (value(exp))) + elif isinstance(exp, param._ParamData): + OUTPUT.write(self._op_string[param._ParamData] % (value(exp))) - elif isinstance(exp,NumericConstant) or exp.is_fixed(): - OUTPUT.write(self._op_string[NumericConstant] - % (value(exp))) + elif isinstance(exp, NumericConstant) or exp.is_fixed(): + OUTPUT.write(self._op_string[NumericConstant] % (value(exp))) else: raise ValueError( "Unsupported expression type (%s) in _print_nonlinear_terms_NL" - % (exp_type)) - - def _print_model_NL(self, model, - solver_capability, - show_section_timing=False, - skip_trivial_constraints=False, - file_determinism=1, - include_all_variable_bounds=False, - export_nonlinear_variables=False): - + % (exp_type) + ) + + def _print_model_NL( + self, + model, + solver_capability, + show_section_timing=False, + skip_trivial_constraints=False, + file_determinism=1, + include_all_variable_bounds=False, + export_nonlinear_variables=False, + ): output_fixed_variable_bounds = self._output_fixed_variable_bounds symbolic_solver_labels = self._symbolic_solver_labels @@ -793,15 +838,17 @@ def _print_model_NL(self, model, "The same external function name (%s) is associated " "with two different libraries (%s through %s, and %s " "through %s). The ASL solver will fail to link " - "correctly." % - (fcn._function, - self.external_byFcn[fcn._function]._library, - self.external_byFcn[fcn._function]._library.name, - fcn._library, - fcn.name)) + "correctly." + % ( + fcn._function, + self.external_byFcn[fcn._function]._library, + self.external_byFcn[fcn._function]._library.name, + fcn._library, + fcn.name, + ) + ) else: - self.external_byFcn[fcn._function] = \ - (fcn, len(self.external_byFcn)) + self.external_byFcn[fcn._function] = (fcn, len(self.external_byFcn)) external_Libs.add(fcn._library) if external_Libs: set_pyomo_amplfunc_env(external_Libs) @@ -815,8 +862,7 @@ def _print_model_NL(self, model, all_blocks_list = list(model.block_data_objects(active=True, sort=sorter)) # create a deterministic var labeling - Vars_dict = dict( enumerate( model.component_data_objects( - Var, sort=sorter) ) ) + Vars_dict = dict(enumerate(model.component_data_objects(Var, sort=sorter))) cntr = len(Vars_dict) # cntr = 0 # for block in all_blocks_list: @@ -828,7 +874,7 @@ def _print_model_NL(self, model, # cntr)) # cntr += len(vars_counter) # Vars_dict.update(vars_counter) - self._varID_map = dict((id(val),key) for key,val in Vars_dict.items()) + self._varID_map = dict((id(val), key) for key, val in Vars_dict.items()) self_varID_map = self._varID_map # Use to label the rest of the components (which we will not encounter twice) trivial_labeler = _Counter(cntr) @@ -842,19 +888,17 @@ def _print_model_NL(self, model, ObjNonlinearVars = set() ObjNonlinearVarsInt = set() for block in all_blocks_list: - gen_obj_repn = getattr(block, "_gen_obj_repn", None) if gen_obj_repn is not None: gen_obj_repn = bool(gen_obj_repn) # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn - for active_objective in block.component_data_objects(Objective, - active=True, - sort=sorter, - descend_into=False): + for active_objective in block.component_data_objects( + Objective, active=True, sort=sorter, descend_into=False + ): if symbolic_solver_labels: objname = name_labeler(active_objective) if len(objname) > max_rowname_len: @@ -873,7 +917,7 @@ def _print_model_NL(self, model, # Note that this is fragile: # generate_standard_repn can leave nonlinear # terms in both quadratic and nonlinear fields. - # However, when this was writen the assumption + # However, when this was written the assumption # is that generate_standard_repn is only called # with quadratic=True for QCQPs (by the LP # writer). So, quadratic and nonlinear_expr @@ -889,8 +933,9 @@ def _print_model_NL(self, model, else: nonlinear_vars = repn.nonlinear_vars else: - repn = generate_standard_repn(active_objective.expr, - quadratic=False) + repn = generate_standard_repn( + active_objective.expr, quadratic=False + ) linear_vars = repn.linear_vars nonlinear_vars = repn.nonlinear_vars if gen_obj_repn: @@ -899,11 +944,15 @@ def _print_model_NL(self, model, wrapped_repn = RepnWrapper( repn, list(self_varID_map[id(var)] for var in linear_vars), - list(self_varID_map[id(var)] for var in nonlinear_vars)) + list(self_varID_map[id(var)] for var in nonlinear_vars), + ) except KeyError as err: - self._symbolMapKeyError(err, model, self_varID_map, - list(linear_vars) + - list(nonlinear_vars)) + self._symbolMapKeyError( + err, + model, + self_varID_map, + list(linear_vars) + list(nonlinear_vars), + ) raise LinearVars.update(wrapped_repn.linear_vars) @@ -915,7 +964,7 @@ def _print_model_NL(self, model, obj_ID = trivial_labeler(active_objective) Objectives_dict[obj_ID] = (active_objective, wrapped_repn) self_ampl_obj_id[obj_ID] = n_objs - symbol_map.addSymbols([(active_objective, "o%d"%n_objs)]) + symbol_map.addSymbols([(active_objective, "o%d" % n_objs)]) n_objs += 1 if repn.is_nonlinear(): @@ -928,9 +977,10 @@ def _print_model_NL(self, model, raise ValueError( "The NL writer has detected multiple active objective functions " "on model %s, but currently only handles a single objective." - % (model.name)) + % (model.name) + ) elif n_objs == 1: - symbol_map.alias(symbol_map.bySymbol["o0"](),"__default_objective__") + symbol_map.alias(symbol_map.bySymbol["o0"], "__default_objective__") if show_section_timing: subsection_timer.report("Generate objective representation") @@ -962,18 +1012,15 @@ def _print_model_NL(self, model, if gen_con_repn is not None: gen_con_repn = bool(gen_con_repn) # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn # Initializing the constraint dictionary - for constraint_data in block.component_data_objects(Constraint, - active=True, - sort=sorter, - descend_into=False): - - if (not constraint_data.has_lb()) and \ - (not constraint_data.has_ub()): + for constraint_data in block.component_data_objects( + Constraint, active=True, sort=sorter, descend_into=False + ): + if (not constraint_data.has_lb()) and (not constraint_data.has_ub()): assert not constraint_data.equality continue # non-binding, so skip @@ -995,7 +1042,7 @@ def _print_model_NL(self, model, # Note that this is fragile: # generate_standard_repn can leave nonlinear # terms in both quadratic and nonlinear fields. - # However, when this was writen the assumption + # However, when this was written the assumption # is that generate_standard_repn is only called # with quadratic=True for QCQPs (by the LP # writer). So, quadratic and nonlinear_expr @@ -1016,8 +1063,9 @@ def _print_model_NL(self, model, linear_vars = repn.linear_vars nonlinear_vars = repn.nonlinear_vars else: - repn = generate_standard_repn(constraint_data.body, - quadratic=False) + repn = generate_standard_repn( + constraint_data.body, quadratic=False + ) linear_vars = repn.linear_vars nonlinear_vars = repn.nonlinear_vars if gen_con_repn: @@ -1036,11 +1084,15 @@ def _print_model_NL(self, model, wrapped_repn = RepnWrapper( repn, list(self_varID_map[id(var)] for var in linear_vars), - list(self_varID_map[id(var)] for var in nonlinear_vars)) + list(self_varID_map[id(var)] for var in nonlinear_vars), + ) except KeyError as err: - self._symbolMapKeyError(err, model, self_varID_map, - list(linear_vars) + - list(nonlinear_vars)) + self._symbolMapKeyError( + err, + model, + self_varID_map, + list(linear_vars) + list(nonlinear_vars), + ) raise if repn.is_nonlinear(): @@ -1054,9 +1106,9 @@ def _print_model_NL(self, model, LinearVars.update(wrapped_repn.linear_vars) ConNonlinearVars.update(wrapped_repn.nonlinear_vars) - nnz_grad_constraints += \ - len(set(wrapped_repn.linear_vars).union( - wrapped_repn.nonlinear_vars)) + nnz_grad_constraints += len( + set(wrapped_repn.linear_vars).union(wrapped_repn.nonlinear_vars) + ) L = None U = None @@ -1075,9 +1127,8 @@ def _print_model_NL(self, model, _type = getattr(constraint_data, '_complementarity', None) _vid = getattr(constraint_data, '_vid', None) if not _type is None: - _vid = self_varID_map[_vid]+1 - constraint_bounds_dict[con_ID] = \ - "5 {0} {1}\n".format(_type, _vid) + _vid = self_varID_map[_vid] + 1 + constraint_bounds_dict[con_ID] = "5 {0} {1}\n".format(_type, _vid) if _type == 1 or _type == 2: n_single_sided_ineq += 1 elif _type == 3: @@ -1095,23 +1146,27 @@ def _print_model_NL(self, model, constraint_bounds_dict[con_ID] = "3\n" n_unbounded += 1 else: - constraint_bounds_dict[con_ID] = \ - "4 %r\n" % (L-offset) + constraint_bounds_dict[con_ID] = "4 %r\n" % (L - offset) n_equals += 1 elif L is None: - constraint_bounds_dict[con_ID] = "1 %r\n" % (U-offset) + constraint_bounds_dict[con_ID] = "1 %r\n" % (U - offset) n_single_sided_ineq += 1 elif U is None: - constraint_bounds_dict[con_ID] = "2 %r\n" % (L-offset) + constraint_bounds_dict[con_ID] = "2 %r\n" % (L - offset) n_single_sided_ineq += 1 - elif (L > U): - msg = 'Constraint {0}: lower bound greater than upper' \ + elif L > U: + msg = ( + 'Constraint {0}: lower bound greater than upper' ' bound ({1} > {2})' - raise ValueError(msg.format(constraint_data.name, - str(L), str(U))) + ) + raise ValueError( + msg.format(constraint_data.name, str(L), str(U)) + ) else: - constraint_bounds_dict[con_ID] = \ - "0 %r %r\n" % (L-offset, U-offset) + constraint_bounds_dict[con_ID] = "0 %r %r\n" % ( + L - offset, + U - offset, + ) # double sided inequality # both are not none and they are valid n_ranges += 1 @@ -1119,30 +1174,40 @@ def _print_model_NL(self, model, sos1 = solver_capability("sos1") sos2 = solver_capability("sos2") for block in all_blocks_list: - for soscondata in block.component_data_objects(SOSConstraint, - active=True, - sort=sorter, - descend_into=False): + for soscondata in block.component_data_objects( + SOSConstraint, active=True, sort=sorter, descend_into=False + ): level = soscondata.level if (level == 1 and not sos1) or (level == 2 and not sos2): raise Exception( - "Solver does not support SOS level %s constraints" - % (level,)) + "Solver does not support SOS level %s constraints" % (level,) + ) if hasattr(soscondata, "get_variables"): - LinearVars.update(self_varID_map[id(vardata)] - for vardata in soscondata.get_variables()) + LinearVars.update( + self_varID_map[id(vardata)] + for vardata in soscondata.get_variables() + ) else: - LinearVars.update(self_varID_map[id(vardata)] - for vardata in soscondata.variables) + LinearVars.update( + self_varID_map[id(vardata)] for vardata in soscondata.variables + ) # create the ampl constraint ids self_ampl_con_id.update( - (con_ID,row_id) for row_id,con_ID in \ - enumerate(itertools.chain(nonlin_con_order_list,lin_con_order_list))) + (con_ID, row_id) + for row_id, con_ID in enumerate( + itertools.chain(nonlin_con_order_list, lin_con_order_list) + ) + ) # populate the symbol_map symbol_map.addSymbols( - [(Constraints_dict[con_ID][0],"c%d"%row_id) for row_id,con_ID in \ - enumerate(itertools.chain(nonlin_con_order_list,lin_con_order_list))]) + [ + (Constraints_dict[con_ID][0], "c%d" % row_id) + for row_id, con_ID in enumerate( + itertools.chain(nonlin_con_order_list, lin_con_order_list) + ) + ] + ) if show_section_timing: subsection_timer.report("Generate constraint representations") @@ -1157,8 +1222,7 @@ def _print_model_NL(self, model, if include_all_variable_bounds: # classify unused vars as linear - AllVars = set(self_varID_map[id(vardata)] - for vardata in Vars_dict.values()) + AllVars = set(self_varID_map[id(vardata)] for vardata in Vars_dict.values()) UnusedVars = AllVars.difference(UsedVars) LinearVars.update(UnusedVars) @@ -1170,7 +1234,6 @@ def _print_model_NL(self, model, Vars_dict[id(vi)] = vi ConNonlinearVars.update([self_varID_map[id(vi)]]) - ### There used to be an if statement here for the following code block ### checking model.statistics.num_binary_vars was greater than zero. ### To this day, I don't know how it worked. @@ -1183,14 +1246,19 @@ def _print_model_NL(self, model, L = var.lb U = var.ub if (L is None) or (U is None): - raise ValueError("Variable " + str(var.name) +\ - "is binary, but does not have lb and ub set") + raise ValueError( + "Variable " + + str(var.name) + + "is binary, but does not have lb and ub set" + ) LinearVarsBool.add(var_ID) elif var.is_integer(): LinearVarsInt.add(var_ID) elif not var.is_continuous(): - raise TypeError("Invalid domain type for variable with name '%s'. " - "Variable is not continuous, integer, or binary.") + raise TypeError( + "Invalid domain type for variable with name '%s'. " + "Variable is not continuous, integer, or binary." + ) LinearVars.difference_update(LinearVarsInt) LinearVars.difference_update(LinearVarsBool) @@ -1199,33 +1267,41 @@ def _print_model_NL(self, model, if var.is_integer() or var.is_binary(): ObjNonlinearVarsInt.add(var_ID) elif not var.is_continuous(): - raise TypeError("Invalid domain type for variable with name '%s'. " - "Variable is not continuous, integer, or binary.") + raise TypeError( + "Invalid domain type for variable with name '%s'. " + "Variable is not continuous, integer, or binary." + ) ObjNonlinearVars.difference_update(ObjNonlinearVarsInt) for var_ID in ConNonlinearVars: var = Vars_dict[var_ID] if var.is_integer() or var.is_binary(): ConNonlinearVarsInt.add(var_ID) elif not var.is_continuous(): - raise TypeError("Invalid domain type for variable with name '%s'. " - "Variable is not continuous, integer, or binary.") + raise TypeError( + "Invalid domain type for variable with name '%s'. " + "Variable is not continuous, integer, or binary." + ) ConNonlinearVars.difference_update(ConNonlinearVarsInt) ################## - Nonlinear_Vars_in_Objs_and_Constraints = \ - ObjNonlinearVars.intersection(ConNonlinearVars) - Discrete_Nonlinear_Vars_in_Objs_and_Constraints = \ + Nonlinear_Vars_in_Objs_and_Constraints = ObjNonlinearVars.intersection( + ConNonlinearVars + ) + Discrete_Nonlinear_Vars_in_Objs_and_Constraints = ( ObjNonlinearVarsInt.intersection(ConNonlinearVarsInt) - ObjNonlinearVars = \ - ObjNonlinearVars.difference(Nonlinear_Vars_in_Objs_and_Constraints) - ConNonlinearVars = \ - ConNonlinearVars.difference(Nonlinear_Vars_in_Objs_and_Constraints) - ObjNonlinearVarsInt = \ - ObjNonlinearVarsInt.difference( - Discrete_Nonlinear_Vars_in_Objs_and_Constraints) - ConNonlinearVarsInt = \ - ConNonlinearVarsInt.difference( - Discrete_Nonlinear_Vars_in_Objs_and_Constraints) + ) + ObjNonlinearVars = ObjNonlinearVars.difference( + Nonlinear_Vars_in_Objs_and_Constraints + ) + ConNonlinearVars = ConNonlinearVars.difference( + Nonlinear_Vars_in_Objs_and_Constraints + ) + ObjNonlinearVarsInt = ObjNonlinearVarsInt.difference( + Discrete_Nonlinear_Vars_in_Objs_and_Constraints + ) + ConNonlinearVarsInt = ConNonlinearVarsInt.difference( + Discrete_Nonlinear_Vars_in_Objs_and_Constraints + ) # put the ampl variable id into the variable full_var_list = [] @@ -1245,30 +1321,35 @@ def _print_model_NL(self, model, full_var_list.extend(sorted(LinearVarsBool)) full_var_list.extend(sorted(LinearVarsInt)) - if (idx_nl_obj == idx_nl_con): + if idx_nl_obj == idx_nl_con: idx_nl_obj = idx_nl_both # create the ampl variable column ids - self_ampl_var_id.update((var_ID,column_id) - for column_id,var_ID in enumerate(full_var_list)) + self_ampl_var_id.update( + (var_ID, column_id) for column_id, var_ID in enumerate(full_var_list) + ) # populate the symbol_map - symbol_map.addSymbols([(Vars_dict[var_ID],"v%d"%column_id) - for column_id,var_ID in enumerate(full_var_list)]) + symbol_map.addSymbols( + [ + (Vars_dict[var_ID], "v%d" % column_id) + for column_id, var_ID in enumerate(full_var_list) + ] + ) if show_section_timing: subsection_timer.report("Partition variable types") subsection_timer.reset() -# end_time = time.clock() -# print (end_time - start_time) + # end_time = time.clock() + # print (end_time - start_time) colfilename = None if OUTPUT.name.endswith('.nl'): - colfilename = OUTPUT.name.replace('.nl','.col') + colfilename = OUTPUT.name.replace('.nl', '.col') else: - colfilename = OUTPUT.name+'.col' + colfilename = OUTPUT.name + '.col' if symbolic_solver_labels: - colf = open(colfilename,'w') + colf = open(colfilename, 'w') colfile_line_template = "%s\n" for var_ID in full_var_list: varname = name_labeler(Vars_dict[var_ID]) @@ -1282,8 +1363,10 @@ def _print_model_NL(self, model, subsection_timer.reset() if len(full_var_list) < 1: - raise ValueError("No variables appear in the Pyomo model constraints or" - " objective. This is not supported by the NL file interface") + raise ValueError( + "No variables appear in the Pyomo model constraints or" + " objective. This is not supported by the NL file interface" + ) # # Print Header @@ -1294,24 +1377,30 @@ def _print_model_NL(self, model, # # LINE 2 # - OUTPUT.write(" {0} {1} {2} {3} {4} \t# vars, constraints, " - "objectives, ranges, eqns\n" .format( - len(full_var_list), - n_single_sided_ineq + n_ranges+n_equals+n_unbounded, - n_objs, - n_ranges, - n_equals)) + OUTPUT.write( + " {0} {1} {2} {3} {4} \t# vars, constraints, " + "objectives, ranges, eqns\n".format( + len(full_var_list), + n_single_sided_ineq + n_ranges + n_equals + n_unbounded, + n_objs, + n_ranges, + n_equals, + ) + ) # # LINE 3 # - OUTPUT.write(" {0} {1} {2} {3} {4} {5}\t# nonlinear constrs, " - "objs; ccons: lin, nonlin, nd, nzlb\n".format( - n_nonlinear_constraints, - n_nonlinear_objs, - ccons_lin, - ccons_nonlin, - ccons_nd, - ccons_nzlb)) + OUTPUT.write( + " {0} {1} {2} {3} {4} {5}\t# nonlinear constrs, " + "objs; ccons: lin, nonlin, nd, nzlb\n".format( + n_nonlinear_constraints, + n_nonlinear_objs, + ccons_lin, + ccons_nonlin, + ccons_nd, + ccons_nzlb, + ) + ) # # LINE 4 # @@ -1319,79 +1408,87 @@ def _print_model_NL(self, model, # # LINE 5 # - OUTPUT.write(" {0} {1} {2} \t# nonlinear vars in constraints, " - "objectives, both\n".format( - idx_nl_con, - idx_nl_obj, - idx_nl_both)) + OUTPUT.write( + " {0} {1} {2} \t# nonlinear vars in constraints, " + "objectives, both\n".format(idx_nl_con, idx_nl_obj, idx_nl_both) + ) # # LINE 6 # - OUTPUT.write(" 0 {0} 0 1\t# linear network variables; functions; " - "arith, flags\n".format(len(self.external_byFcn))) + OUTPUT.write( + " 0 {0} 0 1\t# linear network variables; functions; " + "arith, flags\n".format(len(self.external_byFcn)) + ) # # LINE 7 # n_int_nonlinear_b = len(Discrete_Nonlinear_Vars_in_Objs_and_Constraints) n_int_nonlinear_c = len(ConNonlinearVarsInt) n_int_nonlinear_o = len(ObjNonlinearVarsInt) - OUTPUT.write(" {0} {1} {2} {3} {4} \t# discrete variables: binary, " - "integer, nonlinear (b,c,o)\n".format( - len(LinearVarsBool), - len(LinearVarsInt), - n_int_nonlinear_b, - n_int_nonlinear_c, - n_int_nonlinear_o)) + OUTPUT.write( + " {0} {1} {2} {3} {4} \t# discrete variables: binary, " + "integer, nonlinear (b,c,o)\n".format( + len(LinearVarsBool), + len(LinearVarsInt), + n_int_nonlinear_b, + n_int_nonlinear_c, + n_int_nonlinear_o, + ) + ) # # LINE 8 # # objective info computed above - OUTPUT.write(" {0} {1} \t# nonzeros in Jacobian, obj. gradient\n".format( - nnz_grad_constraints, - len(ObjVars))) + OUTPUT.write( + " {0} {1} \t# nonzeros in Jacobian, obj. gradient\n".format( + nnz_grad_constraints, len(ObjVars) + ) + ) # # LINE 9 # - OUTPUT.write(" %d %d\t# max name lengths: constraints, variables\n" - % (max_rowname_len, max_colname_len)) + OUTPUT.write( + " %d %d\t# max name lengths: constraints, variables\n" + % (max_rowname_len, max_colname_len) + ) # # LINE 10 # OUTPUT.write(" 0 0 0 0 0\t# common exprs: b,c,o,c1,o1\n") -# end_time = time.clock() -# print (end_time - start_time) - -# print "Printing constraints:", -# start_time = time.clock() + # end_time = time.clock() + # print (end_time - start_time) + # print "Printing constraints:", + # start_time = time.clock() # # "F" lines # - for fcn, fid in sorted(self.external_byFcn.values(), - key=operator.itemgetter(1)): + for fcn, fid in sorted( + self.external_byFcn.values(), key=operator.itemgetter(1) + ): OUTPUT.write("F%d 1 -1 %s\n" % (fid, fcn._function)) # # "S" lines # - # Tranlate the SOSConstraint component into ampl suffixes + # Translate the SOSConstraint component into ampl suffixes sos1 = solver_capability("sos1") sos2 = solver_capability("sos2") modelSOS = ModelSOS(self_ampl_var_id, self_varID_map) for block in all_blocks_list: - for soscondata in block.component_data_objects(SOSConstraint, - active=True, - sort=sorter, - descend_into=False): + for soscondata in block.component_data_objects( + SOSConstraint, active=True, sort=sorter, descend_into=False + ): level = soscondata.level if (level == 1 and not sos1) or (level == 2 and not sos2): raise ValueError( - "Solver does not support SOS level %s constraints" % (level)) + "Solver does not support SOS level %s constraints" % (level) + ) modelSOS.count_constraint(soscondata) symbol_map_byObject = symbol_map.byObject @@ -1409,25 +1506,27 @@ def _print_model_NL(self, model, prob_tag = 3 suffix_dict = {} if isinstance(model, IBlock): - suffix_gen = lambda b: ((suf.storage_key, suf) \ - for suf in pyomo.core.kernel.suffix.\ - export_suffix_generator(b, - active=True, - descend_into=False)) + suffix_gen = lambda b: ( + (suf.storage_key, suf) + for suf in pyomo.core.kernel.suffix.export_suffix_generator( + b, active=True, descend_into=False + ) + ) else: - suffix_gen = lambda b: pyomo.core.base.suffix.\ - active_export_suffix_generator(b) + suffix_gen = ( + lambda b: pyomo.core.base.suffix.active_export_suffix_generator(b) + ) for block in all_blocks_list: for name, suf in suffix_gen(block): if len(suf): - suffix_dict.setdefault(name,[]).append(suf) + suffix_dict.setdefault(name, []).append(suf) if not ('sosno' in suffix_dict): # We still need to write out the SOSConstraint suffixes # even though these may have not been "declared" on the model s_lines = var_sosno_suffix.genfilelines() len_s_lines = len(s_lines) if len_s_lines > 0: - OUTPUT.write(suffix_header_line.format(var_tag,len_s_lines,'sosno')) + OUTPUT.write(suffix_header_line.format(var_tag, len_s_lines, 'sosno')) OUTPUT.writelines(s_lines) else: # I am choosing not to allow a user to mix the use of the Pyomo @@ -1447,14 +1546,15 @@ def _print_model_NL(self, model, "declared 'sosno' suffixes as well as SOSConstraint " "components to exist on a single model. To avoid this " "error please use only one of these methods to define " - "special ordered sets.") + "special ordered sets." + ) if not ('ref' in suffix_dict): # We still need to write out the SOSConstraint suffixes # even though these may have not been "declared" on the model s_lines = var_ref_suffix.genfilelines() len_s_lines = len(s_lines) if len_s_lines > 0: - OUTPUT.write(suffix_header_line.format(var_tag,len_s_lines,'ref')) + OUTPUT.write(suffix_header_line.format(var_tag, len_s_lines, 'ref')) OUTPUT.writelines(s_lines) else: # see reason (1) in the paragraph above for why we raise this @@ -1465,7 +1565,8 @@ def _print_model_NL(self, model, "declared 'ref' suffixes as well as SOSConstraint " "components to exist on a single model. To avoid this " "error please use only one of these methods to define " - "special ordered sets.") + "special ordered sets." + ) # do a sort to make sure NL file output is deterministic # across python versions for suffix_name in sorted(suffix_dict): @@ -1476,17 +1577,19 @@ def _print_model_NL(self, model, datatype = suffix.datatype except AttributeError: datatype = suffix.get_datatype() - if datatype not in (Suffix.FLOAT,Suffix.INT): + if datatype not in (Suffix.FLOAT, Suffix.INT): raise ValueError( "The Pyomo NL file writer requires that all active export " "Suffix components declare a numeric datatype. Suffix " - "component: %s with " % (suffix_name)) + "component: %s with " % (suffix_name) + ) datatypes.add(datatype) if len(datatypes) != 1: raise ValueError( "The Pyomo NL file writer found multiple active export suffix " "components with name %s with different datatypes. A single " - "datatype must be declared." % (suffix_name)) + "datatype must be declared." % (suffix_name) + ) if suffix_name == "dual": # The NL file format has a special section for dual initializations continue @@ -1500,7 +1603,6 @@ def _print_model_NL(self, model, mod_s_lines = [] for suffix in suffixes: for component_data, suffix_value in suffix.items(): - try: symbol = symbol_map_byObject[id(component_data)] type_tag = symbol[0] @@ -1520,41 +1622,54 @@ def _print_model_NL(self, model, ################## vars if len(var_s_lines) > 0: - OUTPUT.write(suffix_header_line.format(var_tag | float_tag, - len(var_s_lines), - suffix_name)) - OUTPUT.writelines(suffix_line.format(*_l) - for _l in sorted(var_s_lines, - key=operator.itemgetter(0))) + OUTPUT.write( + suffix_header_line.format( + var_tag | float_tag, len(var_s_lines), suffix_name + ) + ) + OUTPUT.writelines( + suffix_line.format(*_l) + for _l in sorted(var_s_lines, key=operator.itemgetter(0)) + ) ################## constraints if len(con_s_lines) > 0: - OUTPUT.write(suffix_header_line.format(con_tag | float_tag, - len(con_s_lines), - suffix_name)) - OUTPUT.writelines(suffix_line.format(*_l) - for _l in sorted(con_s_lines, - key=operator.itemgetter(0))) + OUTPUT.write( + suffix_header_line.format( + con_tag | float_tag, len(con_s_lines), suffix_name + ) + ) + OUTPUT.writelines( + suffix_line.format(*_l) + for _l in sorted(con_s_lines, key=operator.itemgetter(0)) + ) ################## objectives if len(obj_s_lines) > 0: - OUTPUT.write(suffix_header_line.format(obj_tag | float_tag, - len(obj_s_lines), - suffix_name)) - OUTPUT.writelines(suffix_line.format(*_l) - for _l in sorted(obj_s_lines, - key=operator.itemgetter(0))) + OUTPUT.write( + suffix_header_line.format( + obj_tag | float_tag, len(obj_s_lines), suffix_name + ) + ) + OUTPUT.writelines( + suffix_line.format(*_l) + for _l in sorted(obj_s_lines, key=operator.itemgetter(0)) + ) ################## problems (in this case the one problem) if len(mod_s_lines) > 0: if len(mod_s_lines) > 1: logger.warning( "ProblemWriter_nl: Collected multiple values for Suffix %s " "referencing model %s. This is likely a bug." - % (suffix_name, model.name)) - OUTPUT.write(suffix_header_line.format(prob_tag | float_tag, - len(mod_s_lines), - suffix_name)) - OUTPUT.writelines(suffix_line.format(*_l) - for _l in sorted(mod_s_lines, - key=operator.itemgetter(0))) + % (suffix_name, model.name) + ) + OUTPUT.write( + suffix_header_line.format( + prob_tag | float_tag, len(mod_s_lines), suffix_name + ) + ) + OUTPUT.writelines( + suffix_line.format(*_l) + for _l in sorted(mod_s_lines, key=operator.itemgetter(0)) + ) del modelSOS @@ -1563,11 +1678,11 @@ def _print_model_NL(self, model, # rowfilename = None if OUTPUT.name.endswith('.nl'): - rowfilename = OUTPUT.name.replace('.nl','.row') + rowfilename = OUTPUT.name.replace('.nl', '.row') else: - rowfilename = OUTPUT.name+'.row' + rowfilename = OUTPUT.name + '.row' if symbolic_solver_labels: - rowf = open(rowfilename,'w') + rowf = open(rowfilename, 'w') cu = [0 for i in range(len(full_var_list))] for con_ID in nonlin_con_order_list: @@ -1577,21 +1692,21 @@ def _print_model_NL(self, model, if symbolic_solver_labels: lbl = name_labeler(con_data) OUTPUT.write("\t#%s" % (lbl)) - rowf.write(lbl+"\n") + rowf.write(lbl + "\n") OUTPUT.write("\n") if wrapped_repn.repn.nonlinear_expr is not None: assert not wrapped_repn.repn.is_quadratic() - self._print_nonlinear_terms_NL( - wrapped_repn.repn.nonlinear_expr) + self._print_nonlinear_terms_NL(wrapped_repn.repn.nonlinear_expr) else: assert wrapped_repn.repn.is_quadratic() self._print_standard_quadratic_NL( - wrapped_repn.repn.quadratic_vars, - wrapped_repn.repn.quadratic_coefs) + wrapped_repn.repn.quadratic_vars, wrapped_repn.repn.quadratic_coefs + ) for var_ID in set(wrapped_repn.linear_vars).union( - wrapped_repn.nonlinear_vars): + wrapped_repn.nonlinear_vars + ): cu[self_ampl_var_id[var_ID]] += 1 for con_ID in lin_con_order_list: @@ -1604,7 +1719,7 @@ def _print_model_NL(self, model, if symbolic_solver_labels: lbl = name_labeler(con_data) OUTPUT.write("\t#%s" % (lbl)) - rowf.write(lbl+"\n") + rowf.write(lbl + "\n") OUTPUT.write("\n") OUTPUT.write("n0\n") @@ -1616,7 +1731,6 @@ def _print_model_NL(self, model, # "O" lines # for obj_ID, (obj, wrapped_repn) in Objectives_dict.items(): - k = 0 if not obj.is_minimizing(): k = 1 @@ -1625,27 +1739,29 @@ def _print_model_NL(self, model, if symbolic_solver_labels: lbl = name_labeler(obj) OUTPUT.write("\t#%s" % (lbl)) - rowf.write(lbl+"\n") + rowf.write(lbl + "\n") OUTPUT.write("\n") if wrapped_repn.repn.is_linear(): - OUTPUT.write(self._op_string[NumericConstant] - % (wrapped_repn.repn.constant)) + OUTPUT.write( + self._op_string[NumericConstant] % (wrapped_repn.repn.constant) + ) else: if wrapped_repn.repn.constant != 0: _, binary_sum_str, _ = self._op_string[EXPR.SumExpressionBase] OUTPUT.write(binary_sum_str) - OUTPUT.write(self._op_string[NumericConstant] - % (wrapped_repn.repn.constant)) + OUTPUT.write( + self._op_string[NumericConstant] % (wrapped_repn.repn.constant) + ) if wrapped_repn.repn.nonlinear_expr is not None: assert not wrapped_repn.repn.is_quadratic() - self._print_nonlinear_terms_NL( - wrapped_repn.repn.nonlinear_expr) + self._print_nonlinear_terms_NL(wrapped_repn.repn.nonlinear_expr) else: assert wrapped_repn.repn.is_quadratic() self._print_standard_quadratic_NL( wrapped_repn.repn.quadratic_vars, - wrapped_repn.repn.quadratic_coefs) + wrapped_repn.repn.quadratic_coefs, + ) if symbolic_solver_labels: rowf.close() @@ -1662,7 +1778,6 @@ def _print_model_NL(self, model, if 'dual' in suffix_dict: s_lines = [] for dual_suffix in suffix_dict['dual']: - for constraint_data, suffix_value in dual_suffix.items(): try: # a constraint might not be referenced @@ -1680,9 +1795,10 @@ def _print_model_NL(self, model, if symbolic_solver_labels: OUTPUT.write("\t# dual initial guess") OUTPUT.write("\n") - OUTPUT.writelines(suffix_line.format(*_l) - for _l in sorted(s_lines, - key=operator.itemgetter(0))) + OUTPUT.writelines( + suffix_line.format(*_l) + for _l in sorted(s_lines, key=operator.itemgetter(0)) + ) # # "x" lines @@ -1702,7 +1818,8 @@ def _print_model_NL(self, model, "indicative of a preprocessing error. Use the IO-option " "'output_fixed_variable_bounds=True' to suppress this error " "and fix the variable by overwriting its bounds in the NL " - "file." % (var.name, model.name)) + "file." % (var.name, model.name) + ) if var.value is None: raise ValueError("Variable cannot be fixed to a value of None.") L = U = _get_bound(var.value) @@ -1742,13 +1859,16 @@ def _print_model_NL(self, model, # OUTPUT.write("r") if symbolic_solver_labels: - OUTPUT.write("\t#%d ranges (rhs's)" - % (len(nonlin_con_order_list) + len(lin_con_order_list))) + OUTPUT.write( + "\t#%d ranges (rhs's)" + % (len(nonlin_con_order_list) + len(lin_con_order_list)) + ) OUTPUT.write("\n") # *NOTE: This iteration follows the assignment of the ampl_con_id - OUTPUT.writelines(constraint_bounds_dict[con_ID] - for con_ID in itertools.chain(nonlin_con_order_list, - lin_con_order_list)) + OUTPUT.writelines( + constraint_bounds_dict[con_ID] + for con_ID in itertools.chain(nonlin_con_order_list, lin_con_order_list) + ) if show_section_timing: subsection_timer.report("Write constraint bounds") @@ -1759,8 +1879,7 @@ def _print_model_NL(self, model, # OUTPUT.write("b") if symbolic_solver_labels: - OUTPUT.write("\t#%d bounds (on variables)" - % (len(var_bound_list))) + OUTPUT.write("\t#%d bounds (on variables)" % (len(var_bound_list))) OUTPUT.write("\n") OUTPUT.writelines(var_bound_list) del var_bound_list @@ -1781,7 +1900,7 @@ def _print_model_NL(self, model, ktot = 0 for i in range(n1): ktot += cu[i] - OUTPUT.write("%d\n"%(ktot)) + OUTPUT.write("%d\n" % (ktot)) del cu if show_section_timing: @@ -1791,48 +1910,49 @@ def _print_model_NL(self, model, # # "J" lines # - for nc, con_ID in enumerate(itertools.chain(nonlin_con_order_list, - lin_con_order_list)): + for nc, con_ID in enumerate( + itertools.chain(nonlin_con_order_list, lin_con_order_list) + ): con_data, wrapped_repn = Constraints_dict[con_ID] numnonlinear_vars = len(wrapped_repn.nonlinear_vars) numlinear_vars = len(wrapped_repn.linear_vars) if numnonlinear_vars == 0: if numlinear_vars > 0: - linear_dict = dict((var_ID, coef) - for var_ID, coef in - zip(wrapped_repn.linear_vars, - wrapped_repn.repn.linear_coefs)) - OUTPUT.write("J%d %d\n"%(nc, numlinear_vars)) + linear_dict = dict( + (var_ID, coef) + for var_ID, coef in zip( + wrapped_repn.linear_vars, wrapped_repn.repn.linear_coefs + ) + ) + OUTPUT.write("J%d %d\n" % (nc, numlinear_vars)) OUTPUT.writelines( - "%d %r\n" % (self_ampl_var_id[con_var], - linear_dict[con_var]) - for con_var in sorted(linear_dict.keys())) + "%d %r\n" % (self_ampl_var_id[con_var], linear_dict[con_var]) + for con_var in sorted(linear_dict.keys()) + ) elif numlinear_vars == 0: - nl_con_vars = \ - sorted(wrapped_repn.nonlinear_vars) - OUTPUT.write("J%d %d\n"%(nc, numnonlinear_vars)) + nl_con_vars = sorted(wrapped_repn.nonlinear_vars) + OUTPUT.write("J%d %d\n" % (nc, numnonlinear_vars)) OUTPUT.writelines( - "%d 0\n"%(self_ampl_var_id[con_var]) - for con_var in nl_con_vars) + "%d 0\n" % (self_ampl_var_id[con_var]) for con_var in nl_con_vars + ) else: con_vars = set(wrapped_repn.nonlinear_vars) - nl_con_vars = sorted( - con_vars.difference( - wrapped_repn.linear_vars)) + nl_con_vars = sorted(con_vars.difference(wrapped_repn.linear_vars)) con_vars.update(wrapped_repn.linear_vars) linear_dict = dict( - (var_ID, coef) for var_ID, coef in - zip(wrapped_repn.linear_vars, - wrapped_repn.repn.linear_coefs)) - OUTPUT.write("J%d %d\n"%(nc, len(con_vars))) + (var_ID, coef) + for var_ID, coef in zip( + wrapped_repn.linear_vars, wrapped_repn.repn.linear_coefs + ) + ) + OUTPUT.write("J%d %d\n" % (nc, len(con_vars))) OUTPUT.writelines( - "%d %r\n" % (self_ampl_var_id[con_var], - linear_dict[con_var]) - for con_var in sorted(linear_dict.keys())) + "%d %r\n" % (self_ampl_var_id[con_var], linear_dict[con_var]) + for con_var in sorted(linear_dict.keys()) + ) OUTPUT.writelines( - "%d 0\n"%(self_ampl_var_id[con_var]) - for con_var in nl_con_vars) - + "%d 0\n" % (self_ampl_var_id[con_var]) for con_var in nl_con_vars + ) if show_section_timing: subsection_timer.report("Write J lines") @@ -1841,24 +1961,20 @@ def _print_model_NL(self, model, # # "G" lines # - for obj_ID, (obj, wrapped_repn) in \ - Objectives_dict.items(): - + for obj_ID, (obj, wrapped_repn) in Objectives_dict.items(): grad_entries = {} - for idx, obj_var in enumerate( - wrapped_repn.linear_vars): - grad_entries[self_ampl_var_id[obj_var]] = \ - wrapped_repn.repn.linear_coefs[idx] + for idx, obj_var in enumerate(wrapped_repn.linear_vars): + grad_entries[ + self_ampl_var_id[obj_var] + ] = wrapped_repn.repn.linear_coefs[idx] for obj_var in wrapped_repn.nonlinear_vars: if obj_var not in wrapped_repn.linear_vars: grad_entries[self_ampl_var_id[obj_var]] = 0 len_ge = len(grad_entries) if len_ge > 0: - OUTPUT.write("G%d %d\n" % (self_ampl_obj_id[obj_ID], - len_ge)) + OUTPUT.write("G%d %d\n" % (self_ampl_obj_id[obj_ID], len_ge)) for var_ID in sorted(grad_entries.keys()): - OUTPUT.write("%d %r\n" % (var_ID, - grad_entries[var_ID])) + OUTPUT.write("%d %r\n" % (var_ID, grad_entries[var_ID])) if show_section_timing: subsection_timer.report("Write G lines") @@ -1876,7 +1992,8 @@ def _symbolMapKeyError(self, err, model, map, vars): _errors.append( "Variable '%s' is not part of the model " "being written out, but appears in an " - "expression used on this model." % (v.name,)) + "expression used on this model." % (v.name,) + ) else: _parent = v.parent_block() while _parent is not None and _parent is not model: @@ -1887,8 +2004,8 @@ def _symbolMapKeyError(self, err, model, map, vars): "expression. Currently variables " "must be reachable through a tree " "of active Blocks." - % (v.name, _parent.ctype.__name__, - _parent.name)) + % (v.name, _parent.ctype.__name__, _parent.name) + ) if not _parent.active: _errors.append( "Variable '%s' exists within " @@ -1896,8 +2013,8 @@ def _symbolMapKeyError(self, err, model, map, vars): "an active expression. Currently " "variables must be reachable through " "a tree of active Blocks." - % (v.name, _parent.ctype.__name__, - _parent.name)) + % (v.name, _parent.ctype.__name__, _parent.name) + ) _parent = _parent.parent_block() if _errors: diff --git a/pyomo/repn/plugins/baron_writer.py b/pyomo/repn/plugins/baron_writer.py index a939248216e..2ba178c5cd8 100644 --- a/pyomo/repn/plugins/baron_writer.py +++ b/pyomo/repn/plugins/baron_writer.py @@ -23,98 +23,100 @@ from pyomo.opt import ProblemFormat from pyomo.opt.base import AbstractProblemWriter, WriterFactory from pyomo.core.expr.numvalue import ( - value, native_numeric_types, native_types, nonpyomo_leaf_types, + value, + native_numeric_types, + native_types, + nonpyomo_leaf_types, +) +from pyomo.core.expr.visitor import _ToStringVisitor +import pyomo.core.expr as EXPR +from pyomo.core.base import ( + SortComponents, + SymbolMap, + ShortNameLabeler, + NumericLabeler, + Constraint, + Objective, + Param, ) -from pyomo.core.expr import current as EXPR -from pyomo.core.base import (SortComponents, - SymbolMap, - ShortNameLabeler, - NumericLabeler, - Constraint, - Objective, - Param) from pyomo.core.base.component import ActiveComponent -#CLH: EXPORT suffixes "constraint_types" and "branching_priorities" + +# CLH: EXPORT suffixes "constraint_types" and "branching_priorities" # pass their respective information to the .bar file import pyomo.core.base.suffix import pyomo.core.kernel.suffix from pyomo.core.kernel.block import IBlock -from pyomo.repn.util import valid_expr_ctypes_minlp, \ - valid_active_ctypes_minlp, ftoa +from pyomo.repn.util import valid_expr_ctypes_minlp, valid_active_ctypes_minlp, ftoa logger = logging.getLogger('pyomo.core') + +def _handle_PowExpression(visitor, node, values): + # Per the BARON manual, x ^ y is allowed as long as x and y are not + # both variables. There is an issue that if one of the arguments + # contains "0*var", Pyomo will see that as fixed, but Baron will see + # it as variable. We will work around that by resolving any fixed + # expressions to their corresponding fixed value. + unfixed_count = 0 + for i, arg in enumerate(node.args): + if type(arg) in native_types: + pass + elif arg.is_fixed(): + values[i] = ftoa(value(arg), True) + else: + unfixed_count += 1 + + if unfixed_count < 2: + return f"{values[0]} ^ {values[1]}" + else: + return f"exp(({values[0]}) * log({values[1]}))" + + +_allowableUnaryFunctions = {'exp', 'log10', 'log', 'sqrt'} + +_log10_e = ftoa(math.log10(math.e)) + + +def _handle_UnaryFunctionExpression(visitor, node, values): + if node.name == "sqrt": + # Parens are necessary because sqrt() and "^" have different + # precedence levels. Instead of parsing the arg, be safe and + # explicitly add parens + return f"(({values[0]}) ^ 0.5)" + elif node.name == 'log10': + return f"({_log10_e} * log({values[0]}))" + elif node.name not in _allowableUnaryFunctions: + raise RuntimeError( + 'The BARON .BAR format does not support the unary ' + 'function "%s".' % (node.name,) + ) + return node._to_string(values, visitor.verbose, visitor.smap) + + +def _handle_AbsExpression(visitor, node, values): + # Parens are necessary because abs() and "^" have different + # precedence levels. Instead of parsing the arg, be safe and + # explicitly add parens + return f"((({values[0]}) ^ 2) ^ 0.5)" + + +_plusMinusOne = {-1, 1} + + # # A visitor pattern that creates a string for an expression # that is compatible with the BARON syntax. # -class ToBaronVisitor(EXPR.ExpressionValueVisitor): +class ToBaronVisitor(_ToStringVisitor): + _expression_handlers = { + EXPR.PowExpression: _handle_PowExpression, + EXPR.UnaryFunctionExpression: _handle_UnaryFunctionExpression, + EXPR.AbsExpression: _handle_AbsExpression, + } def __init__(self, variables, smap): - super(ToBaronVisitor, self).__init__() + super(ToBaronVisitor, self).__init__(False, smap) self.variables = variables - self.smap = smap - - def visit(self, node, values): - """ Visit nodes that have been expanded """ - tmp = [] - for i,val in enumerate(values): - arg = node._args_[i] - - if arg is None: - tmp.append('Undefined') # TODO: coverage - else: - parens = False - if val and val[0] in '-+': - parens = True - elif arg.__class__ in native_numeric_types: - pass - elif arg.__class__ in nonpyomo_leaf_types: - val = "'{0}'".format(val) - elif arg.is_expression_type(): - if node._precedence() < arg._precedence(): - parens = True - elif node._precedence() == arg._precedence(): - if i == 0: - parens = node._associativity() != 1 - elif i == len(node._args_)-1: - parens = node._associativity() != -1 - else: - parens = True - if parens: - tmp.append("({0})".format(val)) - else: - tmp.append(val) - - if node.__class__ is EXPR.PowExpression: - x,y = node.args - if type(x) not in native_types and not x.is_fixed() and \ - type(y) not in native_types and not y.is_fixed(): - # Per the BARON manual, x ^ y is allowed as long as x - # and y are not both variables - return "exp(({1}) * log({0}))".format(tmp[0], tmp[1]) - else: - return "{0} ^ {1}".format(tmp[0], tmp[1]) - elif node.__class__ is EXPR.UnaryFunctionExpression: - if node.name == "sqrt": - # Parens are necessary because sqrt() and "^" have - # different precedence levels. Instead of parsing the - # arg, be safe and explicitly add parens - return "(({0}) ^ 0.5)".format(tmp[0]) - elif node.name == 'log10': - return "({0} * log({1}))".format(math.log10(math.e), tmp[0]) - elif node.name in {'exp','log'}: - pass - else: - raise RuntimeError( - 'The BARON .BAR format does not support the unary ' - 'function "%s".' % (node.name,)) - elif node.__class__ is EXPR.AbsExpression: - # Parens are necessary because abs() and "^" have different - # precedence levels. Instead of parsing the arg, be safe - # and explicitly add parens - return "((({0}) ^ 2) ^ 0.5)".format(tmp[0]) - return node._to_string(tmp, None, self.smap, True) def visiting_potential_leaf(self, node): """ @@ -122,16 +124,13 @@ def visiting_potential_leaf(self, node): Return True if the node is not expanded. """ - #print("ISLEAF") - #print(node.__class__) - if node.__class__ in native_types: - return True, ftoa(node) + return True, ftoa(node, True) if node.is_expression_type(): # Special handling if NPV and semi-NPV types: if not node.is_potentially_variable(): - return True, ftoa(value(node)) + return True, ftoa(node(), True) if node.__class__ is EXPR.MonomialTermExpression: return True, self._monomial_to_string(node) if node.__class__ is EXPR.LinearExpression: @@ -147,10 +146,11 @@ def visiting_potential_leaf(self, node): "Unallowable component '%s' of type %s found in an active " "constraint or objective.\nThe GAMS writer cannot export " "expressions with this component type." - % (node.name, node.ctype.__name__)) + % (node.name, node.ctype.__name__) + ) if node.is_fixed(): - return True, ftoa(value(node)) + return True, ftoa(node(), True) else: assert node.is_variable_type() self.variables.add(id(node)) @@ -158,37 +158,37 @@ def visiting_potential_leaf(self, node): def _monomial_to_string(self, node): const, var = node.args - const = value(const) + if const.__class__ not in native_types: + const = value(const) if var.is_fixed(): - return ftoa(const * var.value) - self.variables.add(id(var)) + return ftoa(const * var.value, True) # Special handling: ftoa is slow, so bypass _to_string when this # is a trivial term - if const in {-1, 1}: + if not const: + return '0' + self.variables.add(id(var)) + if const in _plusMinusOne: if const < 0: return '-' + self.smap.getSymbol(var) else: return self.smap.getSymbol(var) - return node._to_string((ftoa(const), self.smap.getSymbol(var)), - False, self.smap, True) + return ftoa(const, True) + '*' + self.smap.getSymbol(var) def _linear_to_string(self, node): - iter_ = iter(node.args) - values = [] - if node.constant: - next(iter_) - values.append(ftoa(node.constant)) - values.extend(map(self._monomial_to_string, iter_)) - return node._to_string(values, False, self.smap, True) + values = [ + self._monomial_to_string(arg) + if ( + arg.__class__ is EXPR.MonomialTermExpression + and not arg.arg(1).is_fixed() + ) + else ftoa(value(arg)) + for arg in node.args + ] + return node._to_string(values, False, self.smap) -def expression_to_string(expr, variables, labeler=None, smap=None): - if labeler is not None: - if smap is None: - smap = SymbolMap() - smap.default_labeler = labeler - visitor = ToBaronVisitor(variables, smap) - return visitor.dfs_postorder_stack(expr) +def expression_to_string(expr, variables, smap): + return ToBaronVisitor(variables, smap).dfs_postorder_stack(expr) # TODO: The to_string function is handy, but the fact that @@ -199,32 +199,31 @@ def expression_to_string(expr, variables, labeler=None, smap=None): # function that takes a "labeler" or "symbol_map" for # writing non-expression components. + @WriterFactory.register('bar', 'Generate the corresponding BARON BAR file.') class ProblemWriter_bar(AbstractProblemWriter): - def __init__(self): - AbstractProblemWriter.__init__(self, ProblemFormat.bar) - def _write_equations_section(self, - model, - output_file, - all_blocks_list, - active_components_data_var, - symbol_map, - c_labeler, - output_fixed_variable_bounds, - skip_trivial_constraints, - sorter): - + def _write_equations_section( + self, + model, + output_file, + all_blocks_list, + active_components_data_var, + symbol_map, + c_labeler, + output_fixed_variable_bounds, + skip_trivial_constraints, + sorter, + ): referenced_variable_ids = OrderedSet() def _skip_trivial(constraint_data): if skip_trivial_constraints: if constraint_data._linear_canonical_form: repn = constraint_data.canonical_form() - if (repn.variables is None) or \ - (len(repn.variables) == 0): + if (repn.variables is None) or (len(repn.variables) == 0): return True elif constraint_data.body.polynomial_degree() == 0: return True @@ -234,14 +233,16 @@ def _skip_trivial(constraint_data): # Check for active suffixes to export # if isinstance(model, IBlock): - suffix_gen = lambda b: ((suf.storage_key, suf) \ - for suf in pyomo.core.kernel.suffix.\ - export_suffix_generator(b, - active=True, - descend_into=False)) + suffix_gen = lambda b: ( + (suf.storage_key, suf) + for suf in pyomo.core.kernel.suffix.export_suffix_generator( + b, active=True, descend_into=False + ) + ) else: - suffix_gen = lambda b: pyomo.core.base.suffix.\ - active_export_suffix_generator(b) + suffix_gen = ( + lambda b: pyomo.core.base.suffix.active_export_suffix_generator(b) + ) r_o_eqns = [] c_eqns = [] l_eqns = [] @@ -263,7 +264,8 @@ def _skip_trivial(constraint_data): raise ValueError( "A suffix '%s' contained an invalid value: %s\n" "Choices are: [relaxationonly, convex, local]" - % (suffix.name, constraint_type)) + % (suffix.name, constraint_type) + ) else: if block is block.model(): if block.name == 'unknown': @@ -276,7 +278,8 @@ def _skip_trivial(constraint_data): raise ValueError( "The BARON writer can not export suffix with name '%s'. " "Either remove it from the %s or deactivate it." - % (name, _location)) + % (name, _location) + ) non_standard_eqns = r_o_eqns + c_eqns + l_eqns @@ -284,7 +287,7 @@ def _skip_trivial(constraint_data): # EQUATIONS # - #Equation Declaration + # Equation Declaration n_roeqns = len(r_o_eqns) n_ceqns = len(c_eqns) n_leqns = len(l_eqns) @@ -302,29 +305,24 @@ def _skip_trivial(constraint_data): output_file.write("c_e_FIX_ONE_VAR_CONST__") order_counter += 1 for block in all_blocks_list: - - for constraint_data in block.component_data_objects(Constraint, - active=True, - sort=sorter, - descend_into=False): - - if (not constraint_data.has_lb()) and \ - (not constraint_data.has_ub()): + for constraint_data in block.component_data_objects( + Constraint, active=True, sort=sorter, descend_into=False + ): + if (not constraint_data.has_lb()) and (not constraint_data.has_ub()): assert not constraint_data.equality - continue # non-binding, so skip - - if (not _skip_trivial(constraint_data)) and \ - (constraint_data not in non_standard_eqns): + continue # non-binding, so skip + if (not _skip_trivial(constraint_data)) and ( + constraint_data not in non_standard_eqns + ): eqns.append(constraint_data) con_symbol = symbol_map.createSymbol(constraint_data, c_labeler) assert not con_symbol.startswith('.') assert con_symbol != "c_e_FIX_ONE_VAR_CONST__" - symbol_map.alias(constraint_data, - alias_template % order_counter) - output_file.write(", "+str(con_symbol)) + symbol_map.alias(constraint_data, alias_template % order_counter) + output_file.write(", " + str(con_symbol)) order_counter += 1 output_file.write(";\n\n") @@ -335,12 +333,11 @@ def _skip_trivial(constraint_data): con_symbol = symbol_map.createSymbol(constraint_data, c_labeler) assert not con_symbol.startswith('.') assert con_symbol != "c_e_FIX_ONE_VAR_CONST__" - symbol_map.alias(constraint_data, - alias_template % order_counter) - if i == n_roeqns-1: - output_file.write(str(con_symbol)+';\n\n') + symbol_map.alias(constraint_data, alias_template % order_counter) + if i == n_roeqns - 1: + output_file.write(str(con_symbol) + ';\n\n') else: - output_file.write(str(con_symbol)+', ') + output_file.write(str(con_symbol) + ', ') order_counter += 1 if n_ceqns > 0: @@ -349,12 +346,11 @@ def _skip_trivial(constraint_data): con_symbol = symbol_map.createSymbol(constraint_data, c_labeler) assert not con_symbol.startswith('.') assert con_symbol != "c_e_FIX_ONE_VAR_CONST__" - symbol_map.alias(constraint_data, - alias_template % order_counter) - if i == n_ceqns-1: - output_file.write(str(con_symbol)+';\n\n') + symbol_map.alias(constraint_data, alias_template % order_counter) + if i == n_ceqns - 1: + output_file.write(str(con_symbol) + ';\n\n') else: - output_file.write(str(con_symbol)+', ') + output_file.write(str(con_symbol) + ', ') order_counter += 1 if n_leqns > 0: @@ -363,12 +359,11 @@ def _skip_trivial(constraint_data): con_symbol = symbol_map.createSymbol(constraint_data, c_labeler) assert not con_symbol.startswith('.') assert con_symbol != "c_e_FIX_ONE_VAR_CONST__" - symbol_map.alias(constraint_data, - alias_template % order_counter) - if i == n_leqns-1: - output_file.write(str(con_symbol)+';\n\n') + symbol_map.alias(constraint_data, alias_template % order_counter) + if i == n_leqns - 1: + output_file.write(str(con_symbol) + ';\n\n') else: - output_file.write(str(con_symbol)+', ') + output_file.write(str(con_symbol) + ', ') order_counter += 1 # Create a dictionary of baron variable names to match to the @@ -377,19 +372,18 @@ def _skip_trivial(constraint_data): # that whole variable names are recognized, and simple # variable names are not identified inside longer names. # Example: ' x[1] ' -> ' x3 ' - #FIXME: 7/18/14 CLH: This may cause mistakes if spaces in + # FIXME: 7/18/14 CLH: This may cause mistakes if spaces in # variable names are allowed if isinstance(model, IBlock): - mutable_param_gen = lambda b: \ - b.components(ctype=Param, - descend_into=False) + mutable_param_gen = lambda b: b.components(ctype=Param, descend_into=False) else: + def mutable_param_gen(b): for param in block.component_objects(Param): if param.mutable and param.is_indexed(): - param_data_iter = \ - (param_data for index, param_data - in param.items()) + param_data_iter = ( + param_data for index, param_data in param.items() + ) elif not param.is_indexed(): param_data_iter = iter([param]) else: @@ -398,48 +392,15 @@ def mutable_param_gen(b): for param_data in param_data_iter: yield param_data - if False: - # - # This was part of a merge from master that caused - # test failures. But commenting this out didn't cause additional failures!?! - # - vstring_to_var_dict = {} - vstring_to_bar_dict = {} - pstring_to_bar_dict = {} - for block in all_blocks_list: - for var_data in active_components_data_var[id(block)]: - variable_stream = StringIO() - var_data.to_string(ostream=variable_stream, verbose=False) - variable_string = variable_stream.getvalue() - variable_string = ' '+variable_string+' ' - vstring_to_var_dict[variable_string] = var_data - if output_fixed_variable_bounds or (not var_data.fixed): - vstring_to_bar_dict[variable_string] = \ - ' '+object_symbol_dictionary[id(var_data)]+' ' - else: - assert var_data.value is not None - vstring_to_bar_dict[variable_string] = \ - ftoa(var_data.value) - - for param_data in mutable_param_gen(block): - param_stream = StringIO() - param_data.to_string(ostream=param_stream, verbose=False) - param_string = param_stream.getvalue() - - param_string = ' '+param_string+' ' - pstring_to_bar_dict[param_string] = ftoa(param_data()) - # Equation Definition - output_file.write('c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1;\n'); - for constraint_data in itertools.chain(eqns, - r_o_eqns, - c_eqns, - l_eqns): - + output_file.write('c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1;\n') + for constraint_data in itertools.chain(eqns, r_o_eqns, c_eqns, l_eqns): variables = OrderedSet() - #print(symbol_map.byObject.keys()) - eqn_body = expression_to_string(constraint_data.body, variables, smap=symbol_map) - #print(symbol_map.byObject.keys()) + # print(symbol_map.byObject.keys()) + eqn_body = expression_to_string( + constraint_data.body, variables, smap=symbol_map + ) + # print(symbol_map.byObject.keys()) referenced_variable_ids.update(variables) if len(variables) == 0: @@ -447,7 +408,7 @@ def mutable_param_gen(b): eqn_body += " + 0 * ONE_VAR_CONST__ " # 7/29/14 CLH: - #FIXME: Baron doesn't handle many of the + # FIXME: Baron doesn't handle many of the # intrinsic_functions available in pyomo. The # error message given by baron is also very # weak. Either a function here to re-write @@ -478,10 +439,8 @@ def mutable_param_gen(b): eqn_lhs = '' # Double-sided constraint - elif constraint_data.has_lb() and \ - constraint_data.has_ub(): - eqn_lhs = ftoa(constraint_data.lower) + \ - ' <= ' + elif constraint_data.has_lb() and constraint_data.has_ub(): + eqn_lhs = ftoa(constraint_data.lower) + ' <= ' eqn_rhs = ' <= ' + ftoa(constraint_data.upper) eqn_string = eqn_lhs + eqn_body + eqn_rhs + ';\n' @@ -495,18 +454,16 @@ def mutable_param_gen(b): n_objs = 0 for block in all_blocks_list: - - for objective_data in block.component_data_objects(Objective, - active=True, - sort=sorter, - descend_into=False): - + for objective_data in block.component_data_objects( + Objective, active=True, sort=sorter, descend_into=False + ): n_objs += 1 if n_objs > 1: - raise ValueError("The BARON writer has detected multiple active " - "objective functions on model %s, but " - "currently only handles a single objective." - % (model.name)) + raise ValueError( + "The BARON writer has detected multiple active " + "objective functions on model %s, but " + "currently only handles a single objective." % (model.name) + ) # create symbol symbol_map.createSymbol(objective_data, c_labeler) @@ -518,22 +475,19 @@ def mutable_param_gen(b): output_file.write("maximize ") variables = OrderedSet() - #print(symbol_map.byObject.keys()) - obj_string = expression_to_string(objective_data.expr, variables, smap=symbol_map) - #print(symbol_map.byObject.keys()) + # print(symbol_map.byObject.keys()) + obj_string = expression_to_string( + objective_data.expr, variables, smap=symbol_map + ) + # print(symbol_map.byObject.keys()) referenced_variable_ids.update(variables) - - output_file.write(obj_string+";\n\n") - #referenced_variable_ids.update(symbol_map.byObject.keys()) + output_file.write(obj_string + ";\n\n") + # referenced_variable_ids.update(symbol_map.byObject.keys()) return referenced_variable_ids, branching_priorities_suffixes - def __call__(self, - model, - output_filename, - solver_capability, - io_options): + def __call__(self, model, output_filename, solver_capability, io_options): if output_filename is None: output_filename = model.name + ".bar" @@ -548,24 +502,19 @@ def __call__(self, with output_file as FILE: symbol_map = self._write_bar_file( - model, FILE, solver_capability, io_options) + model, FILE, solver_capability, io_options + ) return output_filename, symbol_map - - def _write_bar_file(self, - model, - output_file, - solver_capability, - io_options): + def _write_bar_file(self, model, output_file, solver_capability, io_options): # Make sure not to modify the user's dictionary, they may be # reusing it outside of this call io_options = dict(io_options) # NOTE: io_options is a simple dictionary of keyword-value # pairs specific to this writer. - symbolic_solver_labels = \ - io_options.pop("symbolic_solver_labels", False) + symbolic_solver_labels = io_options.pop("symbolic_solver_labels", False) labeler = io_options.pop("labeler", None) # How much effort do we want to put into ensuring the @@ -581,13 +530,13 @@ def _write_bar_file(self, if file_determinism >= 2: sorter = sorter | SortComponents.alphabetical - output_fixed_variable_bounds = \ - io_options.pop("output_fixed_variable_bounds", False) + output_fixed_variable_bounds = io_options.pop( + "output_fixed_variable_bounds", False + ) # Skip writing constraints whose body section is fixed (i.e., # no variables) - skip_trivial_constraints = \ - io_options.pop("skip_trivial_constraints", False) + skip_trivial_constraints = io_options.pop("skip_trivial_constraints", False) # Note: Baron does not allow specification of runtime # option outside of this file, so we add support @@ -596,27 +545,30 @@ def _write_bar_file(self, if len(io_options): raise ValueError( - "ProblemWriter_baron_writer passed unrecognized io_options:\n\t" + - "\n\t".join("%s = %s" % (k,v) for k,v in io_options.items())) + "ProblemWriter_baron_writer passed unrecognized io_options:\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) if symbolic_solver_labels and (labeler is not None): - raise ValueError("Baron problem writer: Using both the " - "'symbolic_solver_labels' and 'labeler' " - "I/O options is forbidden") + raise ValueError( + "Baron problem writer: Using both the " + "'symbolic_solver_labels' and 'labeler' " + "I/O options is forbidden" + ) # Make sure there are no strange ActiveComponents. The expression # walker will handle strange things in constraints later. model_ctypes = model.collect_ctypes(active=True) invalids = set() - for t in (model_ctypes - valid_active_ctypes_minlp): + for t in model_ctypes - valid_active_ctypes_minlp: if issubclass(t, ActiveComponent): invalids.add(t) if len(invalids): invalids = [t.__name__ for t in invalids] raise RuntimeError( "Unallowable active component(s) %s.\nThe BARON writer cannot " - "export models with this component type." % - ", ".join(invalids)) + "export models with this component type." % ", ".join(invalids) + ) # Process the options. Rely on baron to catch # and reset bad option values @@ -624,12 +576,12 @@ def _write_bar_file(self, summary_found = False if len(solver_options): for key, val in solver_options.items(): - if (key.lower() == 'summary'): + if key.lower() == 'summary': summary_found = True if key.endswith("Name"): - output_file.write(key+": \""+str(val)+"\";\n") + output_file.write(key + ": \"" + str(val) + "\";\n") else: - output_file.write(key+": "+str(val)+";\n") + output_file.write(key + ": " + str(val) + ";\n") if not summary_found: # The 'summary option is defaulted to 0, so that no # summary file is generated in the directory where the @@ -646,8 +598,12 @@ def _write_bar_file(self, # to start with a letter. We will (randomly) choose "s_" # (for 'shortened') v_labeler = c_labeler = ShortNameLabeler( - 15, prefix='s_', suffix='_', caseInsensitive=True, - legalRegex='^[a-zA-Z]') + 15, + prefix='s_', + suffix='_', + caseInsensitive=True, + legalRegex='^[a-zA-Z]', + ) elif labeler is None: v_labeler = NumericLabeler('x') c_labeler = NumericLabeler('c') @@ -656,31 +612,31 @@ def _write_bar_file(self, symbol_map = SymbolMap() symbol_map.default_labeler = v_labeler - #sm_bySymbol = symbol_map.bySymbol + # sm_bySymbol = symbol_map.bySymbol # Cache the list of model blocks so we don't have to call # model.block_data_objects() many many times, which is slow # for indexed blocks - all_blocks_list = list(model.block_data_objects(active=True, - sort=sorter, - descend_into=True)) + all_blocks_list = list( + model.block_data_objects(active=True, sort=sorter, descend_into=True) + ) active_components_data_var = {} - #for block in all_blocks_list: + # for block in all_blocks_list: # tmp = active_components_data_var[id(block)] = \ # list(obj for obj in block.component_data_objects(Var, # sort=sorter, # descend_into=False)) # create_symbols_func(symbol_map, tmp, labeler) - # GAH: Not sure this is necessary, and also it would break for - # non-mutable indexed params so I am commenting out for now. - #for param_data in active_components_data(block, Param, sort=sorter): - #instead of checking if param_data.mutable: - #if not param_data.is_constant(): - # create_symbol_func(symbol_map, param_data, labeler) + # GAH: Not sure this is necessary, and also it would break for + # non-mutable indexed params so I am commenting out for now. + # for param_data in active_components_data(block, Param, sort=sorter): + # instead of checking if param_data.mutable: + # if not param_data.is_constant(): + # create_symbol_func(symbol_map, param_data, labeler) - #symbol_map_variable_ids = set(symbol_map.byObject.keys()) - #object_symbol_dictionary = symbol_map.byObject + # symbol_map_variable_ids = set(symbol_map.byObject.keys()) + # object_symbol_dictionary = symbol_map.byObject # # Go through the objectives and constraints and generate @@ -688,17 +644,20 @@ def _write_bar_file(self, # variables. # equation_section_stream = StringIO() - referenced_variable_ids, branching_priorities_suffixes = \ - self._write_equations_section( - model, - equation_section_stream, - all_blocks_list, - active_components_data_var, - symbol_map, - c_labeler, - output_fixed_variable_bounds, - skip_trivial_constraints, - sorter) + ( + referenced_variable_ids, + branching_priorities_suffixes, + ) = self._write_equations_section( + model, + equation_section_stream, + all_blocks_list, + active_components_data_var, + symbol_map, + c_labeler, + output_fixed_variable_bounds, + skip_trivial_constraints, + sorter, + ) # # BINARY_VARIABLES, INTEGER_VARIABLES, POSITIVE_VARIABLES, VARIABLES @@ -710,7 +669,7 @@ def _write_bar_file(self, Vars = [] for vid in referenced_variable_ids: name = symbol_map.byObject[vid] - var_data = symbol_map.bySymbol[name]() + var_data = symbol_map.bySymbol[name] if var_data.is_continuous(): if var_data.has_lb() and (value(var_data.lb) >= 0): @@ -756,26 +715,25 @@ def _write_bar_file(self, lbounds = {} for vid in referenced_variable_ids: name = symbol_map.byObject[vid] - var_data = symbol_map.bySymbol[name]() + var_data = symbol_map.bySymbol[name] if var_data.fixed: if output_fixed_variable_bounds: - var_data_lb = ftoa(var_data.value) + var_data_lb = ftoa(var_data.value, False) else: var_data_lb = None else: var_data_lb = None if var_data.has_lb(): - var_data_lb = ftoa(var_data.lb) + var_data_lb = ftoa(var_data.lb, False) if var_data_lb is not None: name_to_output = symbol_map.getSymbol(var_data) - lbounds[name_to_output] = '%s: %s;\n' % ( - name_to_output, var_data_lb) + lbounds[name_to_output] = '%s: %s;\n' % (name_to_output, var_data_lb) if len(lbounds) > 0: output_file.write("LOWER_BOUNDS{\n") - output_file.write("".join( lbounds[key] for key in sorted(lbounds.keys()) ) ) + output_file.write("".join(lbounds[key] for key in sorted(lbounds.keys()))) output_file.write("}\n\n") lbounds = None @@ -786,26 +744,25 @@ def _write_bar_file(self, ubounds = {} for vid in referenced_variable_ids: name = symbol_map.byObject[vid] - var_data = symbol_map.bySymbol[name]() + var_data = symbol_map.bySymbol[name] if var_data.fixed: if output_fixed_variable_bounds: - var_data_ub = ftoa(var_data.value) + var_data_ub = ftoa(var_data.value, False) else: var_data_ub = None else: var_data_ub = None if var_data.has_ub(): - var_data_ub = ftoa(var_data.ub) + var_data_ub = ftoa(var_data.ub, False) if var_data_ub is not None: name_to_output = symbol_map.getSymbol(var_data) - ubounds[name_to_output] = '%s: %s;\n' % ( - name_to_output, var_data_ub) + ubounds[name_to_output] = '%s: %s;\n' % (name_to_output, var_data_ub) if len(ubounds) > 0: output_file.write("UPPER_BOUNDS{\n") - output_file.write("".join( ubounds[key] for key in sorted(ubounds.keys()) ) ) + output_file.write("".join(ubounds[key] for key in sorted(ubounds.keys()))) output_file.write("}\n\n") ubounds = None @@ -830,8 +787,9 @@ def _write_bar_file(self, if not BranchingPriorityHeader: output_file.write('BRANCHING_PRIORITIES{\n') BranchingPriorityHeader = True - output_file.write( "%s: %s;\n" % ( - symbol_map.getSymbol(var_data), priority)) + output_file.write( + "%s: %s;\n" % (symbol_map.getSymbol(var_data), priority) + ) if BranchingPriorityHeader: output_file.write("}\n\n") @@ -848,16 +806,14 @@ def _write_bar_file(self, tmp = {} for vid in referenced_variable_ids: name = symbol_map.byObject[vid] - var_data = symbol_map.bySymbol[name]() + var_data = symbol_map.bySymbol[name] starting_point = var_data.value if starting_point is not None: var_name = symbol_map.getSymbol(var_data) - tmp[var_name] = "%s: %s;\n" % ( - var_name, ftoa(starting_point)) + tmp[var_name] = "%s: %s;\n" % (var_name, ftoa(starting_point, False)) - output_file.write("".join( tmp[key] for key in sorted(tmp.keys()) )) + output_file.write("".join(tmp[key] for key in sorted(tmp.keys()))) output_file.write('}\n\n') return symbol_map - diff --git a/pyomo/repn/plugins/cpxlp.py b/pyomo/repn/plugins/cpxlp.py index ffd7cfb41bb..cdcb4b42c3b 100644 --- a/pyomo/repn/plugins/cpxlp.py +++ b/pyomo/repn/plugins/cpxlp.py @@ -18,22 +18,31 @@ from pyomo.common.gc_manager import PauseGC from pyomo.opt import ProblemFormat from pyomo.opt.base import AbstractProblemWriter, WriterFactory -from pyomo.core.base import \ - (SymbolMap, TextLabeler, - NumericLabeler, Constraint, SortComponents, - Var, value, - SOSConstraint, Objective, - ComponentMap, is_fixed) +from pyomo.core.base import ( + SymbolMap, + TextLabeler, + NumericLabeler, + Constraint, + SortComponents, + Var, + value, + SOSConstraint, + Objective, + ComponentMap, + is_fixed, +) from pyomo.repn import generate_standard_repn logger = logging.getLogger('pyomo.core') + def _no_negative_zero(val): """Make sure -0 is never output. Makes diff tests easier.""" if val == 0: return 0 return val + def _get_bound(exp): if exp is None: return None @@ -42,12 +51,10 @@ def _get_bound(exp): raise ValueError("non-fixed bound or weight: " + str(exp)) -@WriterFactory.register('cpxlp', 'Generate the corresponding CPLEX LP file') -@WriterFactory.register('lp', 'Generate the corresponding CPLEX LP file') +@WriterFactory.register('cpxlp_v1', 'Generate the corresponding CPLEX LP file') +@WriterFactory.register('lp_v1', 'Generate the corresponding CPLEX LP file') class ProblemWriter_cpxlp(AbstractProblemWriter): - def __init__(self): - AbstractProblemWriter.__init__(self, ProblemFormat.cpxlp) # The LP writer tracks which variables are @@ -67,44 +74,39 @@ def __init__(self): # and you will need to go add extra logic to output # the number's sign. self._precision_string = '.17g' - self.linear_coef_string_template = '%+'+self._precision_string+' %s\n' - self.quad_coef_string_template = '%+'+self._precision_string+' ' - self.obj_string_template = '%+'+self._precision_string+' %s\n' - self.sos_template_string = "%s:%"+self._precision_string+"\n" - self.eq_string_template = "= %"+self._precision_string+'\n' - self.geq_string_template = ">= %"+self._precision_string+'\n\n' - self.leq_string_template = "<= %"+self._precision_string+'\n\n' - self.lb_string_template = "%"+self._precision_string+" <= " - self.ub_string_template = " <= %"+self._precision_string+"\n" - - def __call__(self, - model, - output_filename, - solver_capability, - io_options): - + self.linear_coef_string_template = '%+' + self._precision_string + ' %s\n' + self.quad_coef_string_template = '%+' + self._precision_string + ' ' + self.obj_string_template = '%+' + self._precision_string + ' %s\n' + self.sos_template_string = "%s:%" + self._precision_string + "\n" + self.eq_string_template = "= %" + self._precision_string + '\n' + self.geq_string_template = ">= %" + self._precision_string + '\n\n' + self.leq_string_template = "<= %" + self._precision_string + '\n\n' + self.lb_string_template = "%" + self._precision_string + " <= " + self.ub_string_template = " <= %" + self._precision_string + "\n" + + def __call__(self, model, output_filename, solver_capability, io_options): # Make sure not to modify the user's dictionary, # they may be reusing it outside of this call io_options = dict(io_options) # Skip writing constraints whose body section is # fixed (i.e., no variables) - skip_trivial_constraints = \ - io_options.pop("skip_trivial_constraints", False) + skip_trivial_constraints = io_options.pop("skip_trivial_constraints", False) # Use full Pyomo component names in the LP file rather # than shortened symbols (slower, but useful for debugging). - symbolic_solver_labels = \ - io_options.pop("symbolic_solver_labels", False) + symbolic_solver_labels = io_options.pop("symbolic_solver_labels", False) - output_fixed_variable_bounds = \ - io_options.pop("output_fixed_variable_bounds", False) + output_fixed_variable_bounds = io_options.pop( + "output_fixed_variable_bounds", False + ) # If False, unused variables will not be included in # the LP file. Otherwise, include all variables in # the bounds sections. - include_all_variable_bounds = \ - io_options.pop("include_all_variable_bounds", False) + include_all_variable_bounds = io_options.pop( + "include_all_variable_bounds", False + ) labeler = io_options.pop("labeler", None) @@ -122,18 +124,20 @@ def __call__(self, # Make sure the ONE_VAR_CONSTANT variable appears in # the objective even if the constant part of the # objective is zero - force_objective_constant = \ - io_options.pop("force_objective_constant", False) + force_objective_constant = io_options.pop("force_objective_constant", False) if len(io_options): raise ValueError( - "ProblemWriter_cpxlp passed unrecognized io_options:\n\t" + - "\n\t".join("%s = %s" % (k,v) for k,v in io_options.items())) + "ProblemWriter_cpxlp passed unrecognized io_options:\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) if symbolic_solver_labels and (labeler is not None): - raise ValueError("ProblemWriter_cpxlp: Using both the " - "'symbolic_solver_labels' and 'labeler' " - "I/O options is forbidden") + raise ValueError( + "ProblemWriter_cpxlp: Using both the " + "'symbolic_solver_labels' and 'labeler' " + "I/O options is forbidden" + ) # # Create labeler @@ -168,21 +172,24 @@ def __call__(self, column_order=column_order, skip_trivial_constraints=skip_trivial_constraints, force_objective_constant=force_objective_constant, - include_all_variable_bounds=include_all_variable_bounds) + include_all_variable_bounds=include_all_variable_bounds, + ) self._referenced_variable_ids.clear() return output_filename, symbol_map - def _print_expr_canonical(self, - x, - output, - object_symbol_dictionary, - variable_symbol_dictionary, - is_objective, - column_order, - force_objective_constant=False): - + def _print_expr_canonical( + self, + x, + output, + object_symbol_dictionary, + variable_symbol_dictionary, + is_objective, + column_order, + file_determinism, + force_objective_constant=False, + ): """ Return a expression as a string in LP format. @@ -197,12 +204,12 @@ def _print_expr_canonical(self, linear_coef_string_template = self.linear_coef_string_template quad_coef_string_template = self.quad_coef_string_template - constant=True + constant = True # # Linear # if len(x.linear_vars) > 0: - constant=False + constant = False for vardata in x.linear_vars: self._referenced_variable_ids[id(vardata)] = vardata @@ -211,21 +218,30 @@ def _print_expr_canonical(self, # Order columns by dictionary names # names = [variable_symbol_dictionary[id(var)] for var in x.linear_vars] - - for i, name in sorted(enumerate(names), key=lambda x: x[1]): - output.append(linear_coef_string_template % (x.linear_coefs[i], name)) + + term_iterator = zip(x.linear_coefs, names) + if file_determinism > 0: + term_iterator = sorted(term_iterator, key=lambda x: x[1]) + + for coef, name in term_iterator: + output.append(linear_coef_string_template % (coef, name)) else: # # Order columns by the value of column_order[] # - for i, var in sorted(enumerate(x.linear_vars), key=lambda x: column_order[x[1]]): + for i, var in sorted( + enumerate(x.linear_vars), key=lambda x: column_order[x[1]] + ): name = variable_symbol_dictionary[id(var)] - output.append(linear_coef_string_template % (x.linear_coefs[i], name)) + output.append( + linear_coef_string_template % (x.linear_coefs[i], name) + ) + # # Quadratic # if len(x.quadratic_vars) > 0: - constant=False + constant = False for var1, var2 in x.quadratic_vars: self._referenced_variable_ids[id(var1)] = var1 self._referenced_variable_ids[id(var2)] = var2 @@ -243,14 +259,18 @@ def _print_expr_canonical(self, name1 = variable_symbol_dictionary[id(var1)] name2 = variable_symbol_dictionary[id(var2)] if name1 < name2: - names.append( (name1,name2) ) + names.append((name1, name2)) elif name1 > name2: - names.append( (name2,name1) ) + names.append((name2, name1)) else: quad.add(i) - names.append( (name1,name1) ) + names.append((name1, name1)) i += 1 - for i, names_ in sorted(enumerate(names), key=lambda x: x[1]): + + term_iterator = enumerate(names) + if file_determinism > 0: + term_iterator = sorted(term_iterator, key=lambda x: x[1]) + for i, names_ in term_iterator: # # Times 2 because LP format requires /2 for all the quadratic # terms /of the objective only/. Discovered the last bit thru @@ -258,7 +278,7 @@ def _print_expr_canonical(self, # Ref: ILog CPlex 8.0 User's Manual, p197. # if is_objective: - tmp = 2*x.quadratic_coefs[i] + tmp = 2 * x.quadratic_coefs[i] output.append(quad_coef_string_template % tmp) else: output.append(quad_coef_string_template % x.quadratic_coefs[i]) @@ -277,12 +297,30 @@ def _print_expr_canonical(self, col1 = column_order[var1] col2 = column_order[var2] if col1 < col2: - cols.append( (((col1,col2) , variable_symbol_dictionary[id(var1)], variable_symbol_dictionary[id(var2)])) ) + cols.append( + ( + ( + (col1, col2), + variable_symbol_dictionary[id(var1)], + variable_symbol_dictionary[id(var2)], + ) + ) + ) elif col1 > col2: - cols.append( (((col2,col1) , variable_symbol_dictionary[id(var2)], variable_symbol_dictionary[id(var1)])) ) + cols.append( + ( + ( + (col2, col1), + variable_symbol_dictionary[id(var2)], + variable_symbol_dictionary[id(var1)], + ) + ) + ) else: quad.add(i) - cols.append( ((col1,col1), variable_symbol_dictionary[id(var1)]) ) + cols.append( + ((col1, col1), variable_symbol_dictionary[id(var1)]) + ) i += 1 for i, cols_ in sorted(enumerate(cols), key=lambda x: x[1][0]): # @@ -292,7 +330,9 @@ def _print_expr_canonical(self, # Ref: ILog CPlex 8.0 User's Manual, p197. # if is_objective: - output.append(quad_coef_string_template % 2*x.quadratic_coefs[i]) + output.append( + quad_coef_string_template % 2 * x.quadratic_coefs[i] + ) else: output.append(quad_coef_string_template % x.quadratic_coefs[i]) if i in quad: @@ -310,7 +350,7 @@ def _print_expr_canonical(self, output.append("\n") if constant and not is_objective: - # If we made it to here we are outputing + # If we made it to here we are outputting # trivial constraints place 0 * # ONE_VAR_CONSTANT on this side of the # constraint for the benefit of solvers like @@ -332,12 +372,7 @@ def _print_expr_canonical(self, # return x.constant - def printSOS(self, - symbol_map, - labeler, - variable_symbol_map, - soscondata, - output): + def printSOS(self, symbol_map, labeler, variable_symbol_map, soscondata, output): """ Prints the SOS constraint associated with the _SOSConstraintData object """ @@ -353,8 +388,9 @@ def printSOS(self, level = soscondata.level - output.append('%s: S%s::\n' - % (symbol_map.getSymbol(soscondata,labeler), level)) + output.append( + '%s: S%s::\n' % (symbol_map.getSymbol(soscondata, labeler), level) + ) for vardata, weight in sos_items: weight = _get_bound(weight) @@ -362,30 +398,33 @@ def printSOS(self, raise ValueError( "Cannot use negative weight %f " "for variable %s is special ordered " - "set %s " % (weight, vardata.name, soscondata.name)) + "set %s " % (weight, vardata.name, soscondata.name) + ) if vardata.fixed: raise RuntimeError( "SOSConstraint '%s' includes a fixed variable '%s'. This is " - "currently not supported. Deactive this constraint in order to " - "proceed." % (soscondata.name, vardata.name)) + "currently not supported. Deactivate this constraint in order to " + "proceed." % (soscondata.name, vardata.name) + ) self._referenced_variable_ids[id(vardata)] = vardata - output.append(sos_template_string - % (variable_symbol_map.getSymbol(vardata), - weight)) - - def _print_model_LP(self, - model, - output_file, - solver_capability, - labeler, - output_fixed_variable_bounds=False, - file_determinism=1, - row_order=None, - column_order=None, - skip_trivial_constraints=False, - force_objective_constant=False, - include_all_variable_bounds=False): - + output.append( + sos_template_string % (variable_symbol_map.getSymbol(vardata), weight) + ) + + def _print_model_LP( + self, + model, + output_file, + solver_capability, + labeler, + output_fixed_variable_bounds=False, + file_determinism=1, + row_order=None, + column_order=None, + skip_trivial_constraints=False, + force_objective_constant=False, + include_all_variable_bounds=False, + ): eq_string_template = self.eq_string_template leq_string_template = self.leq_string_template geq_string_template = self.geq_string_template @@ -404,7 +443,7 @@ def _print_model_LP(self, variable_label_pairs = [] # populate the symbol map in a single pass. - #objective_list, constraint_list, sosconstraint_list, variable_list \ + # objective_list, constraint_list, sosconstraint_list, variable_list \ # = self._populate_symbol_map(model, # symbol_map, # labeler, @@ -425,7 +464,7 @@ def _print_model_LP(self, # WEH - TODO: See if this is faster # NOTE: This loop doesn't find all of the variables. :( # - #for block in model.block_data_objects(active=True, + # for block in model.block_data_objects(active=True, # sort=sortOrder): # # all_blocks.append(block) @@ -441,13 +480,12 @@ def _print_model_LP(self, # (vardata,create_symbol_func(symbol_map, # vardata, # labeler))) - all_blocks = list( model.block_data_objects( - active=True, sort=sortOrder) ) - variable_list = list( model.component_data_objects( - Var, sort=sortOrder) ) + all_blocks = list(model.block_data_objects(active=True, sort=sortOrder)) + variable_list = list(model.component_data_objects(Var, sort=sortOrder)) variable_label_pairs = list( (vardata, create_symbol_func(symbol_map, vardata, labeler)) - for vardata in variable_list ) + for vardata in variable_list + ) variable_symbol_map.addSymbols(variable_label_pairs) # and extract the information we'll need for rapid labeling. @@ -459,14 +497,16 @@ def _print_model_LP(self, # LP writer may be more accepting in what expressions/variables # they accept. def print_expr_canonical( - obj, - x, - output, - object_symbol_dictionary, - variable_symbol_dictionary, - is_objective, - column_order, - force_objective_constant=False): + obj, + x, + output, + object_symbol_dictionary, + variable_symbol_dictionary, + is_objective, + column_order, + file_determinism, + force_objective_constant=False, + ): try: return self._print_expr_canonical( x=x, @@ -475,7 +515,9 @@ def print_expr_canonical( variable_symbol_dictionary=variable_symbol_dictionary, is_objective=is_objective, column_order=column_order, - force_objective_constant=force_objective_constant) + file_determinism=file_determinism, + force_objective_constant=force_objective_constant, + ) except KeyError as e: _id = e.args[0] _var = None @@ -494,8 +536,8 @@ def print_expr_canonical( logger.error( "Model contains an expression (%s) that contains " "a variable (%s) that is not attached to an active " - "block on the submodel being written" - % (obj.name, _var.name)) + "block on the submodel being written" % (obj.name, _var.name) + ) raise # print the model name and the source, so we know roughly where @@ -504,8 +546,7 @@ def print_expr_canonical( # NOTE: this *must* use the "\* ... *\" comment format: the GLPK # LP parser does not correctly handle other formats (notably, "%"). output = [] - output.append( - "\\* Source Pyomo model name=%s *\\\n\n" % (model.name,) ) + output.append("\\* Source Pyomo model name=%s *\\\n\n" % (model.name,)) # # Objective @@ -516,32 +557,27 @@ def print_expr_canonical( numObj = 0 onames = [] for block in all_blocks: - gen_obj_repn = getattr(block, "_gen_obj_repn", None) if gen_obj_repn is not None: gen_obj_repn = bool(gen_obj_repn) # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn for objective_data in block.component_data_objects( - Objective, - active=True, - sort=sortOrder, - descend_into=False): - + Objective, active=True, sort=sortOrder, descend_into=False + ): numObj += 1 onames.append(objective_data.name) if numObj > 1: raise ValueError( "More than one active objective defined for input " "model '%s'; Cannot write legal LP file\n" - "Objectives: %s" % (model.name, ' '.join(onames))) + "Objectives: %s" % (model.name, ' '.join(onames)) + ) - create_symbol_func(symbol_map, - objective_data, - labeler) + create_symbol_func(symbol_map, objective_data, labeler) symbol_map.alias(objective_data, '__default_objective__') if objective_data.is_minimizing(): @@ -552,31 +588,35 @@ def print_expr_canonical( if gen_obj_repn == False: repn = block_repn[objective_data] else: - repn = generate_standard_repn(objective_data.expr) + repn = generate_standard_repn( + objective_data.expr, quadratic=supports_quadratic_objective + ) if gen_obj_repn: block_repn[objective_data] = repn degree = repn.polynomial_degree() if degree == 0: - logger.warning("Constant objective detected, replacing " - "with a placeholder to prevent solver failure.") + logger.warning( + "Constant objective detected, replacing " + "with a placeholder to prevent solver failure." + ) force_objective_constant = True elif degree == 2: if not supports_quadratic_objective: raise RuntimeError( "Selected solver is unable to handle " "objective functions with quadratic terms. " - "Objective at issue: %s." - % objective_data.name) + "Objective at issue: %s." % objective_data.name + ) elif degree is None: raise RuntimeError( "Cannot write legal LP file. Objective '%s' " "has nonlinear terms that are not quadratic." - % objective_data.name) + % objective_data.name + ) - output.append( - object_symbol_dictionary[id(objective_data)]+':\n') + output.append(object_symbol_dictionary[id(objective_data)] + ':\n') offset = print_expr_canonical( objective_data, @@ -586,12 +626,15 @@ def print_expr_canonical( variable_symbol_dictionary, True, column_order, - force_objective_constant=force_objective_constant) + file_determinism, + force_objective_constant=force_objective_constant, + ) if numObj == 0: raise ValueError( "ERROR: No objectives defined for input model. " - "Cannot write legal LP file.") + "Cannot write legal LP file." + ) # Constraints # @@ -611,25 +654,22 @@ def print_expr_canonical( def constraint_generator(): for block in all_blocks: - gen_con_repn = getattr(block, "_gen_con_repn", None) if gen_con_repn is not None: gen_con_repn = bool(gen_con_repn) # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn for constraint_data in block.component_data_objects( - Constraint, - active=True, - sort=sortOrder, - descend_into=False): - - if (not constraint_data.has_lb()) and \ - (not constraint_data.has_ub()): + Constraint, active=True, sort=sortOrder, descend_into=False + ): + if (not constraint_data.has_lb()) and ( + not constraint_data.has_ub() + ): assert not constraint_data.equality - continue # non-binding, so skip + continue # non-binding, so skip if gen_con_repn == False: repn = block_repn[constraint_data] @@ -637,7 +677,10 @@ def constraint_generator(): if constraint_data._linear_canonical_form: repn = constraint_data.canonical_form() else: - repn = generate_standard_repn(constraint_data.body) + repn = generate_standard_repn( + constraint_data.body, + quadratic=supports_quadratic_constraint, + ) if gen_con_repn: block_repn[constraint_data] = repn @@ -646,8 +689,10 @@ def constraint_generator(): if row_order is not None: sorted_constraint_list = list(constraint_generator()) sorted_constraint_list.sort(key=lambda x: row_order[x[0]]) + def yield_all_constraints(): yield from sorted_constraint_list + else: yield_all_constraints = constraint_generator @@ -674,18 +719,19 @@ def yield_all_constraints(): if not supports_quadratic_constraint: raise ValueError( "Solver unable to handle quadratic expressions. Constraint" - " at issue: '%s'" % (constraint_data.name)) + " at issue: '%s'" % (constraint_data.name) + ) elif degree is None: raise ValueError( "Cannot write legal LP file. Constraint '%s' has a body " - "with nonlinear terms." % (constraint_data.name)) + "with nonlinear terms." % (constraint_data.name) + ) # Create symbol con_symbol = create_symbol_func(symbol_map, constraint_data, labeler) if constraint_data.equality: - assert value(constraint_data.lower) == \ - value(constraint_data.upper) + assert value(constraint_data.lower) == value(constraint_data.upper) label = 'c_e_%s_' % con_symbol alias_symbol_func(symbol_map, constraint_data, label) output.append(label) @@ -697,12 +743,12 @@ def yield_all_constraints(): object_symbol_dictionary, variable_symbol_dictionary, False, - column_order + column_order, + file_determinism, ) bound = constraint_data.lower bound = _get_bound(bound) - offset - output.append(eq_string_template - % (_no_negative_zero(bound))) + output.append(eq_string_template % (_no_negative_zero(bound))) output.append("\n") else: if constraint_data.has_lb(): @@ -720,12 +766,12 @@ def yield_all_constraints(): object_symbol_dictionary, variable_symbol_dictionary, False, - column_order + column_order, + file_determinism, ) bound = constraint_data.lower bound = _get_bound(bound) - offset - output.append(geq_string_template - % (_no_negative_zero(bound))) + output.append(geq_string_template % (_no_negative_zero(bound))) else: assert constraint_data.has_ub() @@ -744,23 +790,24 @@ def yield_all_constraints(): object_symbol_dictionary, variable_symbol_dictionary, False, - column_order + column_order, + file_determinism, ) bound = constraint_data.upper bound = _get_bound(bound) - offset - output.append(leq_string_template - % (_no_negative_zero(bound))) + output.append(leq_string_template % (_no_negative_zero(bound))) else: assert constraint_data.has_lb() # A simple hack to avoid caching super large files if len(output) > 1024: - output_file.write( "".join(output) ) + output_file.write("".join(output)) output = [] if not have_nontrivial: - logger.warning('Empty constraint block written in LP format ' \ - '- solver may error') + logger.warning( + 'Empty constraint block written in LP format - solver may error' + ) # the CPLEX LP format doesn't allow constants in the objective (or # constraint body), which is a bit silly. To avoid painful @@ -791,21 +838,20 @@ def yield_all_constraints(): sos2 = solver_capability("sos2") writtenSOS = False for block in all_blocks: - for soscondata in block.component_data_objects( - SOSConstraint, - active=True, - sort=sortOrder, - descend_into=False): - + SOSConstraint, active=True, sort=sortOrder, descend_into=False + ): create_symbol_func(symbol_map, soscondata, labeler) level = soscondata.level - if (level == 1 and not sos1) or \ - (level == 2 and not sos2) or \ - (level > 2): + if ( + (level == 1 and not sos1) + or (level == 2 and not sos2) + or (level > 2) + ): raise ValueError( - "Solver does not support SOS level %s constraints" % (level)) + "Solver does not support SOS level %s constraints" % (level) + ) if writtenSOS == False: SOSlines.append("SOS\n") writtenSOS = True @@ -814,11 +860,9 @@ def yield_all_constraints(): # SOSConstraint, in which case this needs to be known # before we write the "bounds" section (Cplex does not # handle this correctly, Gurobi does) - self.printSOS(symbol_map, - labeler, - variable_symbol_map, - soscondata, - SOSlines) + self.printSOS( + symbol_map, labeler, variable_symbol_map, soscondata, SOSlines + ) # # Bounds @@ -834,14 +878,14 @@ def yield_all_constraints(): integer_vars = [] binary_vars = [] for vardata in variable_list: - # TODO: We could just loop over the set of items in # self._referenced_variable_ids, except this is # a dictionary that is hashed by id(vardata) # which would make the bounds section # nondeterministic (bad for unit testing) - if (not include_all_variable_bounds) and \ - (id(vardata) not in self._referenced_variable_ids): + if (not include_all_variable_bounds) and ( + id(vardata) not in self._referenced_variable_ids + ): continue name_to_output = variable_symbol_dictionary[id(vardata)] @@ -849,7 +893,8 @@ def yield_all_constraints(): raise ValueError( "Attempting to write variable with name 'e' in a CPLEX LP " "formatted file will cause a parse failure due to confusion with " - "numeric values expressed in scientific notation") + "numeric values expressed in scientific notation" + ) # track the number of integer and binary variables, so we know whether # to output the general / binary sections below. @@ -858,9 +903,10 @@ def yield_all_constraints(): elif vardata.is_integer(): integer_vars.append(name_to_output) elif not vardata.is_continuous(): - raise TypeError("Invalid domain type for variable with name '%s'. " - "Variable is not continuous, integer, or binary." - % (vardata.name)) + raise TypeError( + "Invalid domain type for variable with name '%s'. " + "Variable is not continuous, integer, or binary." % (vardata.name) + ) if vardata.fixed: if not output_fixed_variable_bounds: @@ -870,18 +916,17 @@ def yield_all_constraints(): "usually indicative of a preprocessing error. Use the " "IO-option 'output_fixed_variable_bounds=True' to suppress " "this error and fix the variable by overwriting its bounds " - "in the LP file." % (vardata.name, model.name)) + "in the LP file." % (vardata.name, model.name) + ) if vardata.value is None: raise ValueError("Variable cannot be fixed to a value of None.") vardata_lb = value(vardata.value) vardata_ub = value(vardata.value) output.append(" ") - output.append(lb_string_template - % (_no_negative_zero(vardata_lb))) + output.append(lb_string_template % (_no_negative_zero(vardata_lb))) output.append(name_to_output) - output.append(ub_string_template - % (_no_negative_zero(vardata_ub))) + output.append(ub_string_template % (_no_negative_zero(vardata_ub))) else: vardata_lb = _get_bound(vardata.lb) vardata_ub = _get_bound(vardata.ub) @@ -889,46 +934,42 @@ def yield_all_constraints(): # Pyomo assumes that the default variable bounds are -inf and +inf output.append(" ") if vardata.has_lb(): - output.append(lb_string_template - % (_no_negative_zero(vardata_lb))) + output.append(lb_string_template % (_no_negative_zero(vardata_lb))) else: output.append(" -inf <= ") output.append(name_to_output) if vardata.has_ub(): - output.append(ub_string_template - % (_no_negative_zero(vardata_ub))) + output.append(ub_string_template % (_no_negative_zero(vardata_ub))) else: output.append(" <= +inf\n") if len(integer_vars) > 0: - output.append("general\n") for var_name in integer_vars: output.append(' %s\n' % var_name) if len(binary_vars) > 0: - output.append("binary\n") for var_name in binary_vars: output.append(' %s\n' % var_name) - # Write the SOS section - output.append( "".join(SOSlines) ) + output.append("".join(SOSlines)) # # wrap-up # output.append("end\n") - output_file.write( "".join(output) ) + output_file.write("".join(output)) # Clean up the symbol map to only contain variables referenced # in the active constraints **Note**: warm start method may # rely on this for choosing the set of potential warm start # variables - vars_to_delete = set(variable_symbol_map.byObject.keys()) - \ - set(self._referenced_variable_ids.keys()) + vars_to_delete = set(variable_symbol_map.byObject.keys()) - set( + self._referenced_variable_ids.keys() + ) sm_byObject = symbol_map.byObject sm_bySymbol = symbol_map.bySymbol var_sm_byObject = variable_symbol_map.byObject @@ -939,4 +980,3 @@ def yield_all_constraints(): del variable_symbol_map return symbol_map - diff --git a/pyomo/repn/plugins/gams_writer.py b/pyomo/repn/plugins/gams_writer.py index a53d1b9612c..de0e4684fc4 100644 --- a/pyomo/repn/plugins/gams_writer.py +++ b/pyomo/repn/plugins/gams_writer.py @@ -16,98 +16,103 @@ from io import StringIO from pyomo.common.gc_manager import PauseGC -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.core.expr.numvalue import ( - value, as_numeric, native_types, native_numeric_types, + value, + as_numeric, + native_types, + native_numeric_types, nonpyomo_leaf_types, ) +from pyomo.core.expr.visitor import _ToStringVisitor from pyomo.core.base import ( - SymbolMap, ShortNameLabeler, NumericLabeler, Constraint, - Objective, Var, minimize, SortComponents) + SymbolMap, + ShortNameLabeler, + NumericLabeler, + Constraint, + Objective, + Var, + minimize, + SortComponents, +) from pyomo.core.base.component import ActiveComponent from pyomo.core.kernel.base import ICategorizedObject from pyomo.opt import ProblemFormat from pyomo.opt.base import AbstractProblemWriter, WriterFactory -from pyomo.repn.util import valid_expr_ctypes_minlp, \ - valid_active_ctypes_minlp, ftoa +from pyomo.repn.util import valid_expr_ctypes_minlp, valid_active_ctypes_minlp, ftoa import logging logger = logging.getLogger('pyomo.core') _legal_unary_functions = { - 'ceil','floor','exp','log','log10','sqrt', - 'sin','cos','tan','asin','acos','atan','sinh','cosh','tanh', + 'ceil', + 'floor', + 'exp', + 'log', + 'log10', + 'sqrt', + 'sin', + 'cos', + 'tan', + 'asin', + 'acos', + 'atan', + 'sinh', + 'cosh', + 'tanh', } -_arc_functions = {'acos','asin','atan'} -_dnlp_functions = {'ceil','floor','abs'} +_arc_functions = {'acos', 'asin', 'atan'} +_dnlp_functions = {'ceil', 'floor', 'abs'} _zero_one = {0, 1} +_plusMinusOne = {-1, 1} + + +def _handle_PowExpression(visitor, node, values): + # If the exponent is a positive integer, use the power() function. + # Otherwise, use the ** operator. + exponent = node.arg(1) + if exponent.__class__ in native_numeric_types and exponent == int(exponent): + return f"power({values[0]}, {values[1]})" + else: + return f"{values[0]} ** {values[1]}" + + +def _handle_UnaryFunctionExpression(visitor, node, values): + if node.name not in _legal_unary_functions: + raise RuntimeError( + "GAMS files cannot represent the unary function %s" % (node.name,) + ) + if node.name in _dnlp_functions: + visitor.is_discontinuous = True + if node.name in _arc_functions: + return f"arc{node.name[1:]}({values[0]})" + else: + return node._to_string(values, False, visitor.smap) + + +def _handle_AbsExpression(visitor, node, values): + visitor.is_discontinuous = True + return node._to_string(values, False, visitor.smap) + + # # A visitor pattern that creates a string for an expression # that is compatible with the GAMS syntax. # -class ToGamsVisitor(EXPR.ExpressionValueVisitor): +class ToGamsVisitor(_ToStringVisitor): + _expression_handlers = { + EXPR.PowExpression: _handle_PowExpression, + EXPR.UnaryFunctionExpression: _handle_UnaryFunctionExpression, + EXPR.AbsExpression: _handle_AbsExpression, + } def __init__(self, smap, treechecker, output_fixed_variables=False): - super(ToGamsVisitor, self).__init__() - self.smap = smap + super(ToGamsVisitor, self).__init__(False, smap) self.treechecker = treechecker self.is_discontinuous = False self.output_fixed_variables = output_fixed_variables - def visit(self, node, values): - """ Visit nodes that have been expanded """ - tmp = [] - for i,val in enumerate(values): - arg = node._args_[i] - - parens = False - if val[0] in '-+': - # Note: This is technically only necessary for i > 0 - parens = True - elif arg.__class__ in native_types: - pass - elif arg.is_expression_type(): - if node._precedence() < arg._precedence(): - parens = True - elif node._precedence() == arg._precedence(): - if i == 0: - parens = node._associativity() != 1 - elif i == len(node._args_)-1: - parens = node._associativity() != -1 - else: - parens = True - if parens: - tmp.append("(" + val + ")") - else: - tmp.append(val) - - if node.__class__ is EXPR.PowExpression: - # If the exponent is a positive integer, use the power() function. - # Otherwise, use the ** operator. - exponent = node.arg(1) - if (exponent.__class__ in native_numeric_types and - exponent == int(exponent)): - return "power({0}, {1})".format(tmp[0], tmp[1]) - else: - return "{0} ** {1}".format(tmp[0], tmp[1]) - elif node.__class__ is EXPR.UnaryFunctionExpression: - if node.name not in _legal_unary_functions: - raise RuntimeError( - "GAMS files cannot represent the unary function %s" - % ( node.name, )) - if node.name in _dnlp_functions: - self.is_discontinuous = True - if node.name in _arc_functions: - return "arc{0}({1})".format(node.name[1:], tmp[0]) - else: - return node._to_string(tmp, None, self.smap, True) - elif node.__class__ is EXPR.AbsExpression: - self.is_discontinuous = True - return node._to_string(tmp, None, self.smap, True) - else: - return node._to_string(tmp, None, self.smap, True) - def visiting_potential_leaf(self, node): """ Visiting a potential leaf. @@ -116,14 +121,14 @@ def visiting_potential_leaf(self, node): """ if node.__class__ in native_types: try: - return True, ftoa(node) + return True, ftoa(node, True) except TypeError: return True, repr(node) if node.is_expression_type(): # Special handling if NPV and semi-NPV types: if not node.is_potentially_variable(): - return True, ftoa(value(node)) + return True, ftoa(node(), True) if node.__class__ is EXPR.MonomialTermExpression: return True, self._monomial_to_string(node) if node.__class__ is EXPR.LinearExpression: @@ -141,50 +146,49 @@ def visiting_potential_leaf(self, node): "Unallowable component '%s' of type %s found in an active " "constraint or objective.\nThe GAMS writer cannot export " "expressions with this component type." - % (node.name, node.ctype.__name__)) + % (node.name, node.ctype.__name__) + ) if node.ctype is not Var: # For these, make sure it's on the right model. We can check # Vars later since they don't disappear from the expressions self.treechecker(node) if node.is_fixed() and not ( - self.output_fixed_variables and node.is_potentially_variable()): - return True, ftoa(value(node)) + self.output_fixed_variables and node.is_potentially_variable() + ): + return True, ftoa(node(), True) else: assert node.is_variable_type() return True, self.smap.getSymbol(node) def _monomial_to_string(self, node): const, var = node.args - const = value(const) + if const.__class__ not in native_types: + const = value(const) if var.is_fixed() and not self.output_fixed_variables: - return ftoa(const * var.value) + return ftoa(const * var.value, True) # Special handling: ftoa is slow, so bypass _to_string when this # is a trivial term - if const in {-1, 1}: + if not const: + return '0' + if const in _plusMinusOne: if const < 0: return '-' + self.smap.getSymbol(var) else: return self.smap.getSymbol(var) - return node._to_string((ftoa(const), self.smap.getSymbol(var)), - False, self.smap, True) + return ftoa(const, True) + '*' + self.smap.getSymbol(var) def _linear_to_string(self, node): - iter_ = iter(node.args) - values = [] - if node.constant: - next(iter_) - values.append(ftoa(node.constant)) - values.extend(map(self._monomial_to_string, iter_)) - return node._to_string(values, False, self.smap, True) - - -def expression_to_string(expr, treechecker, labeler=None, smap=None, - output_fixed_variables=False): - if labeler is not None: - if smap is None: - smap = SymbolMap() - smap.default_labeler = labeler + values = [ + self._monomial_to_string(arg) + if arg.__class__ is EXPR.MonomialTermExpression + else ftoa(arg, True) + for arg in node.args + ] + return node._to_string(values, False, self.smap) + + +def expression_to_string(expr, treechecker, smap=None, output_fixed_variables=False): visitor = ToGamsVisitor(smap, treechecker, output_fixed_variables) expr_str = visitor.dfs_postorder_stack(expr) return expr_str, visitor.is_discontinuous @@ -223,7 +227,8 @@ def __init__(self, var_list, symbol_map): else: raise RuntimeError( "Cannot output variable to GAMS: effective variable " - "domain is not in {Reals, Integers, Binary}") + "domain is not in {Reals, Integers, Binary}" + ) def __iter__(self): """Iterate over all variables. @@ -269,8 +274,7 @@ def __call__(self, comp, exception_flag=True): def parent_block(self, comp): if isinstance(comp, ICategorizedObject): parent = comp.parent - while (parent is not None) and \ - (not parent._is_heterogeneous_container): + while (parent is not None) and (not parent._is_heterogeneous_container): parent = parent.parent return parent else: @@ -279,7 +283,8 @@ def parent_block(self, comp): def raise_error(self, comp): raise RuntimeError( "GAMS writer: found component '%s' not on same model tree.\n" - "All components must have the same parent model." % comp.name) + "All components must have the same parent model." % comp.name + ) def split_long_line(line): @@ -294,8 +299,7 @@ def split_long_line(line): # Walk backwards to find closest space, # where it is safe to split to a new line if i < 0: - raise RuntimeError( - "Found an 80,000+ character string with no spaces") + raise RuntimeError("Found an 80,000+ character string with no spaces") i -= 1 new_lines += line[:i] + '\n' # the space will be the first character in the next line, @@ -305,17 +309,33 @@ def split_long_line(line): return new_lines +class GAMSSymbolMap(SymbolMap): + def __init__(self, var_labeler, var_list): + super().__init__(self.var_label) + self.var_labeler = var_labeler + self.var_list = var_list + + def var_label(self, obj): + # if obj.is_fixed(): + # return str(value(obj)) + return self.getSymbol(obj, self.var_recorder) + + def var_recorder(self, obj): + ans = self.var_labeler(obj) + try: + if obj.is_variable_type(): + self.var_list.append(ans) + except: + pass + return ans + + @WriterFactory.register('gams', 'Generate the corresponding GAMS file') class ProblemWriter_gams(AbstractProblemWriter): - def __init__(self): AbstractProblemWriter.__init__(self, ProblemFormat.gams) - def __call__(self, - model, - output_filename, - solver_capability, - io_options): + def __call__(self, model, output_filename, solver_capability, io_options): """ Write a model in the GAMS modeling language format. @@ -393,12 +413,10 @@ def __call__(self, # Skip writing constraints whose body section is # fixed (i.e., no variables) - skip_trivial_constraints = \ - io_options.pop("skip_trivial_constraints", False) + skip_trivial_constraints = io_options.pop("skip_trivial_constraints", False) # Output fixed variables as variables - output_fixed_variables = \ - io_options.pop("output_fixed_variables", False) + output_fixed_variables = io_options.pop("output_fixed_variables", False) # How much effort do we want to put into ensuring the # GAMS file is written deterministically for a Pyomo model: @@ -406,9 +424,11 @@ def __call__(self, # 1 : sort keys of indexed components (default) # 2 : sort keys AND sort names (over declaration order) file_determinism = io_options.pop("file_determinism", 1) - sorter_map = {0:SortComponents.unsorted, - 1:SortComponents.deterministic, - 2:SortComponents.sortBoth} + sorter_map = { + 0: SortComponents.unsorted, + 1: SortComponents.deterministic, + 2: SortComponents.sortBoth, + } sort = sorter_map[file_determinism] # Warmstart by initializing model's variables to their values. @@ -418,38 +438,59 @@ def __call__(self, # Set to True by GAMSSolver put_results = io_options.pop("put_results", None) put_results_format = io_options.pop("put_results_format", 'gdx') - assert put_results_format in ('gdx','dat') + assert put_results_format in ('gdx', 'dat') if len(io_options): raise ValueError( - "GAMS writer passed unrecognized io_options:\n\t" + - "\n\t".join("%s = %s" - % (k,v) for k,v in io_options.items())) + "GAMS writer passed unrecognized io_options:\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) if solver is not None and solver.upper() not in valid_solvers: - raise ValueError( - "GAMS writer passed unrecognized solver: %s" % solver) + raise ValueError("GAMS writer passed unrecognized solver: %s" % solver) if mtype is not None: - valid_mtypes = set([ - 'lp', 'qcp', 'nlp', 'dnlp', 'rmip', 'mip', 'rmiqcp', 'rminlp', - 'miqcp', 'minlp', 'rmpec', 'mpec', 'mcp', 'cns', 'emp']) + valid_mtypes = set( + [ + 'lp', + 'qcp', + 'nlp', + 'dnlp', + 'rmip', + 'mip', + 'rmiqcp', + 'rminlp', + 'miqcp', + 'minlp', + 'rmpec', + 'mpec', + 'mcp', + 'cns', + 'emp', + ] + ) if mtype.lower() not in valid_mtypes: - raise ValueError("GAMS writer passed unrecognized " - "model type: %s" % mtype) - if (solver is not None and - mtype.upper() not in valid_solvers[solver.upper()]): - raise ValueError("GAMS writer passed solver (%s) " - "unsuitable for given model type (%s)" - % (solver, mtype)) + raise ValueError( + "GAMS writer passed unrecognized model type: %s" % mtype + ) + if ( + solver is not None + and mtype.upper() not in valid_solvers[solver.upper()] + ): + raise ValueError( + "GAMS writer passed solver (%s) " + "unsuitable for given model type (%s)" % (solver, mtype) + ) if output_filename is None: output_filename = model.name + ".gms" if symbolic_solver_labels and (labeler is not None): - raise ValueError("GAMS writer: Using both the " - "'symbolic_solver_labels' and 'labeler' " - "I/O options is forbidden") + raise ValueError( + "GAMS writer: Using both the " + "'symbolic_solver_labels' and 'labeler' " + "I/O options is forbidden" + ) if symbolic_solver_labels: # Note that the Var and Constraint labelers must use the @@ -459,8 +500,12 @@ def __call__(self, # to start with a letter. We will (randomly) choose "s_" # (for 'shortened') var_labeler = con_labeler = ShortNameLabeler( - 60, prefix='s_', suffix='_', caseInsensitive=True, - legalRegex='^[a-zA-Z]') + 60, + prefix='s_', + suffix='_', + caseInsensitive=True, + legalRegex='^[a-zA-Z]', + ) elif labeler is None: var_labeler = NumericLabeler('x') con_labeler = NumericLabeler('c') @@ -469,21 +514,7 @@ def __call__(self, var_list = [] - def var_recorder(obj): - ans = var_labeler(obj) - try: - if obj.is_variable_type(): - var_list.append(ans) - except: - pass - return ans - - def var_label(obj): - #if obj.is_fixed(): - # return str(value(obj)) - return symbolMap.getSymbol(obj, var_recorder) - - symbolMap = SymbolMap(var_label) + symbolMap = GAMSSymbolMap(var_labeler, var_list) # when sorting, there are a non-trivial number of # temporary objects created. these all yield @@ -504,7 +535,7 @@ def var_label(obj): output_file=output_file, solver_capability=solver_capability, var_list=var_list, - var_label=var_label, + var_label=symbolMap.var_label, symbolMap=symbolMap, con_labeler=con_labeler, sort=sort, @@ -527,47 +558,48 @@ def var_label(obj): return output_filename, symbolMap - def _write_model(self, - model, - output_file, - solver_capability, - var_list, - var_label, - symbolMap, - con_labeler, - sort, - skip_trivial_constraints, - output_fixed_variables, - warmstart, - solver, - mtype, - solprint, - limrow, - limcol, - solvelink, - add_options, - put_results, - put_results_format, - ): + def _write_model( + self, + model, + output_file, + solver_capability, + var_list, + var_label, + symbolMap, + con_labeler, + sort, + skip_trivial_constraints, + output_fixed_variables, + warmstart, + solver, + mtype, + solprint, + limrow, + limcol, + solvelink, + add_options, + put_results, + put_results_format, + ): constraint_names = [] ConstraintIO = StringIO() linear = True - linear_degree = set([0,1]) + linear_degree = set([0, 1]) dnlp = False # Make sure there are no strange ActiveComponents. The expression # walker will handle strange things in constraints later. model_ctypes = model.collect_ctypes(active=True) invalids = set() - for t in (model_ctypes - valid_active_ctypes_minlp): + for t in model_ctypes - valid_active_ctypes_minlp: if issubclass(t, ActiveComponent): invalids.add(t) if len(invalids): invalids = [t.__name__ for t in invalids] raise RuntimeError( "Unallowable active component(s) %s.\nThe GAMS writer cannot " - "export models with this component type." % - ", ".join(invalids)) + "export models with this component type." % ", ".join(invalids) + ) tc = StorageTreeChecker(model) @@ -575,13 +607,10 @@ def _write_model(self, # for all active constraints. Any Vars / Expressions that are # encountered will be added to the var_list due to the labeler # defined above. - for con in model.component_data_objects(Constraint, - active=True, - sort=sort): - + for con in model.component_data_objects(Constraint, active=True, sort=sort): if not con.has_lb() and not con.has_ub(): assert not con.equality - continue # non-binding, so skip + continue # non-binding, so skip con_body = as_numeric(con.body) if skip_trivial_constraints and con_body.is_fixed(): @@ -592,55 +621,49 @@ def _write_model(self, cName = symbolMap.getSymbol(con, con_labeler) con_body_str, con_discontinuous = expression_to_string( - con_body, tc, smap=symbolMap, - output_fixed_variables=output_fixed_variables + con_body, + tc, + smap=symbolMap, + output_fixed_variables=output_fixed_variables, ) dnlp |= con_discontinuous if con.equality: constraint_names.append('%s' % cName) - ConstraintIO.write('%s.. %s =e= %s ;\n' % ( - constraint_names[-1], - con_body_str, - ftoa(con.upper) - )) + ConstraintIO.write( + '%s.. %s =e= %s ;\n' + % (constraint_names[-1], con_body_str, ftoa(con.upper, False)) + ) else: if con.has_lb(): constraint_names.append('%s_lo' % cName) - ConstraintIO.write('%s.. %s =l= %s ;\n' % ( - constraint_names[-1], - ftoa(con.lower), - con_body_str, - )) + ConstraintIO.write( + '%s.. %s =l= %s ;\n' + % (constraint_names[-1], ftoa(con.lower, False), con_body_str) + ) if con.has_ub(): constraint_names.append('%s_hi' % cName) - ConstraintIO.write('%s.. %s =l= %s ;\n' % ( - constraint_names[-1], - con_body_str, - ftoa(con.upper) - )) - - obj = list(model.component_data_objects(Objective, - active=True, - sort=sort)) + ConstraintIO.write( + '%s.. %s =l= %s ;\n' + % (constraint_names[-1], con_body_str, ftoa(con.upper, False)) + ) + + obj = list(model.component_data_objects(Objective, active=True, sort=sort)) if len(obj) != 1: raise RuntimeError( "GAMS writer requires exactly one active objective (found %s)" - % (len(obj))) + % (len(obj)) + ) obj = obj[0] if linear: - if obj.expr.polynomial_degree() not in linear_degree: + if obj.polynomial_degree() not in linear_degree: linear = False obj_expr_str, obj_discontinuous = expression_to_string( - obj.expr, tc, smap=symbolMap, - output_fixed_variables=output_fixed_variables, + obj.expr, tc, smap=symbolMap, output_fixed_variables=output_fixed_variables ) dnlp |= obj_discontinuous oName = symbolMap.getSymbol(obj, con_labeler) constraint_names.append(oName) - ConstraintIO.write('%s.. GAMS_OBJECTIVE =e= %s ;\n' % ( - oName, - obj_expr_str, - )) + ConstraintIO.write('%s.. GAMS_OBJECTIVE =e= %s ;\n' % (oName, obj_expr_str)) # Categorize the variables that we found categorized_vars = Categorizer(var_list, symbolMap) @@ -662,15 +685,13 @@ def _write_model(self, output_file.write(";\n\nPOSITIVE VARIABLES\n\t") output_file.write("\n\t".join(categorized_vars.positive)) output_file.write(";\n\nVARIABLES\n\tGAMS_OBJECTIVE\n\t") - output_file.write("\n\t".join( - categorized_vars.reals + categorized_vars.fixed - )) + output_file.write("\n\t".join(categorized_vars.reals + categorized_vars.fixed)) output_file.write(";\n\n") for var in categorized_vars.fixed: - output_file.write("%s.fx = %s;\n" % ( - var, ftoa(value(symbolMap.getObject(var))) - )) + output_file.write( + "%s.fx = %s;\n" % (var, ftoa(value(symbolMap.getObject(var)), False)) + ) output_file.write("\n") for line in ConstraintIO.getvalue().splitlines(): @@ -687,43 +708,45 @@ def _write_model(self, lb, ub = var.bounds if category == 'positive': if ub is not None: - output_file.write("%s.up = %s;\n" % - (var_name, ftoa(ub))) + output_file.write("%s.up = %s;\n" % (var_name, ftoa(ub, False))) elif category == 'ints': if lb is None: warn_int_bounds = True # GAMS doesn't allow -INF lower bound for ints - logger.warning("Lower bound for integer variable %s set " - "to -1.0E+100." % var.name) + logger.warning( + "Lower bound for integer variable %s set " + "to -1.0E+100." % var.name + ) output_file.write("%s.lo = -1.0E+100;\n" % (var_name)) elif lb != 0: - output_file.write("%s.lo = %s;\n" % (var_name, ftoa(lb))) + output_file.write("%s.lo = %s;\n" % (var_name, ftoa(lb, False))) if ub is None: warn_int_bounds = True # GAMS has an option value called IntVarUp that is the # default upper integer bound, which it applies if the # integer's upper bound is INF. This option maxes out at # 2147483647, so we can go higher by setting the bound. - logger.warning("Upper bound for integer variable %s set " - "to +1.0E+100." % var.name) + logger.warning( + "Upper bound for integer variable %s set " + "to +1.0E+100." % var.name + ) output_file.write("%s.up = +1.0E+100;\n" % (var_name)) else: - output_file.write("%s.up = %s;\n" % (var_name, ftoa(ub))) + output_file.write("%s.up = %s;\n" % (var_name, ftoa(ub, False))) elif category == 'binary': if lb != 0: - output_file.write("%s.lo = %s;\n" % (var_name, ftoa(lb))) + output_file.write("%s.lo = %s;\n" % (var_name, ftoa(lb, False))) if ub != 1: - output_file.write("%s.up = %s;\n" % (var_name, ftoa(ub))) + output_file.write("%s.up = %s;\n" % (var_name, ftoa(ub, False))) elif category == 'reals': if lb is not None: - output_file.write("%s.lo = %s;\n" % (var_name, ftoa(lb))) + output_file.write("%s.lo = %s;\n" % (var_name, ftoa(lb, False))) if ub is not None: - output_file.write("%s.up = %s;\n" % (var_name, ftoa(ub))) + output_file.write("%s.up = %s;\n" % (var_name, ftoa(ub, False))) else: raise KeyError('Category %s not supported' % category) if warmstart and var.value is not None: - output_file.write("%s.l = %s;\n" % - (var_name, ftoa(var.value))) + output_file.write("%s.l = %s;\n" % (var_name, ftoa(var.value, False))) if warn_int_bounds: logger.warning( @@ -731,31 +754,33 @@ def _write_model(self, "is as extreme as GAMS will define, and should be enough to " "appear unbounded. If the solver cannot handle this bound, " "explicitly set a smaller bound on the pyomo model, or try a " - "different GAMS solver.") + "different GAMS solver." + ) model_name = "GAMS_MODEL" output_file.write("\nMODEL %s /all/ ;\n" % model_name) if mtype is None: - mtype = ('lp','nlp','mip','minlp')[ - (0 if linear else 1) + - (2 if (categorized_vars.binary or categorized_vars.ints) - else 0)] + mtype = ('lp', 'nlp', 'mip', 'minlp')[ + (0 if linear else 1) + + (2 if (categorized_vars.binary or categorized_vars.ints) else 0) + ] if mtype == 'nlp' and dnlp: mtype = 'dnlp' if solver is not None: if mtype.upper() not in valid_solvers[solver.upper()]: - raise ValueError("GAMS writer passed solver (%s) " - "unsuitable for model type (%s)" - % (solver, mtype)) + raise ValueError( + "GAMS writer passed solver (%s) " + "unsuitable for model type (%s)" % (solver, mtype) + ) output_file.write("option %s=%s;\n" % (mtype, solver)) output_file.write("option solprint=%s;\n" % solprint) output_file.write("option limrow=%d;\n" % limrow) output_file.write("option limcol=%d;\n" % limcol) output_file.write("option solvelink=%d;\n" % solvelink) - + if put_results is not None and put_results_format == 'gdx': output_file.write("option savepoint=1;\n") @@ -767,20 +792,28 @@ def _write_model(self, output_file.write( "SOLVE %s USING %s %simizing GAMS_OBJECTIVE;\n\n" - % ( model_name, - mtype, - 'min' if obj.sense == minimize else 'max')) + % (model_name, mtype, 'min' if obj.sense == minimize else 'max') + ) # Set variables to store certain statuses and attributes - stat_vars = ['MODELSTAT', 'SOLVESTAT', 'OBJEST', 'OBJVAL', 'NUMVAR', - 'NUMEQU', 'NUMDVAR', 'NUMNZ', 'ETSOLVE'] - output_file.write("Scalars MODELSTAT 'model status', " - "SOLVESTAT 'solve status';\n") + stat_vars = [ + 'MODELSTAT', + 'SOLVESTAT', + 'OBJEST', + 'OBJVAL', + 'NUMVAR', + 'NUMEQU', + 'NUMDVAR', + 'NUMNZ', + 'ETSOLVE', + ] + output_file.write( + "Scalars MODELSTAT 'model status', SOLVESTAT 'solve status';\n" + ) output_file.write("MODELSTAT = %s.modelstat;\n" % model_name) output_file.write("SOLVESTAT = %s.solvestat;\n\n" % model_name) - output_file.write("Scalar OBJEST 'best objective', " - "OBJVAL 'objective value';\n") + output_file.write("Scalar OBJEST 'best objective', OBJVAL 'objective value';\n") output_file.write("OBJEST = %s.objest;\n" % model_name) output_file.write("OBJVAL = %s.objval;\n\n" % model_name) @@ -816,8 +849,10 @@ def _write_model(self, output_file.write("\nput %s ' ' %s.l ' ' %s.m /;" % (var, var, var)) for con in constraint_names: output_file.write("\nput %s ' ' %s.l ' ' %s.m /;" % (con, con, con)) - output_file.write("\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l " - "GAMS_OBJECTIVE.m;\n") + output_file.write( + "\nput GAMS_OBJECTIVE ' ' GAMS_OBJECTIVE.l " + "' ' GAMS_OBJECTIVE.m;\n" + ) statresults = put_results + 'stat.dat' output_file.write("\nfile statresults /'%s'/;" % statresults) @@ -828,86 +863,328 @@ def _write_model(self, for stat in stat_vars: output_file.write("\nput '%s' ' ' %s /;\n" % (stat, stat)) + valid_solvers = { -'ALPHAECP': {'MINLP','MIQCP'}, -'AMPL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'}, -'ANTIGONE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'BARON': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'BDMLP': {'LP','MIP','RMIP'}, -'BDMLPD': {'LP','RMIP'}, -'BENCH': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'BONMIN': {'MINLP','MIQCP'}, -'BONMINH': {'MINLP','MIQCP'}, -'CBC': {'LP','MIP','RMIP'}, -'COINBONMIN': {'MINLP','MIQCP'}, -'COINCBC': {'LP','MIP','RMIP'}, -'COINCOUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'COINIPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'COINOS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'COINSCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'CONOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'CONOPT3': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'CONOPT4': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'CONOPTD': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'CONVERT': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'CONVERTD': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'}, -'COUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'CPLEX': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}, -'CPLEXD': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}, -'CPOPTIMIZER': {'MIP','MINLP','MIQCP'}, -'DE': {'EMP'}, -'DECIS': {'EMP'}, -'DECISC': {'LP'}, -'DECISM': {'LP'}, -'DICOPT': {'MINLP','MIQCP'}, -'DICOPTD': {'MINLP','MIQCP'}, -'EXAMINER': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'EXAMINER2': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'GAMSCHK': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'GLOMIQO': {'QCP','MIQCP','RMIQCP'}, -'GUROBI': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}, -'GUSS': {'LP', 'MIP', 'NLP', 'MCP', 'CNS', 'DNLP', 'MINLP', 'QCP', 'MIQCP'}, -'IPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'IPOPTH': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'JAMS': {'EMP'}, -'KESTREL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'}, -'KNITRO': {'LP','RMIP','NLP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'LGO': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'}, -'LGOD': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'}, -'LINDO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'}, -'LINDOGLOBAL': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'LINGO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP'}, -'LOCALSOLVER': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'LOGMIP': {'EMP'}, -'LS': {'LP','RMIP'}, -'MILES': {'MCP'}, -'MILESE': {'MCP'}, -'MINOS': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'MINOS5': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'MINOS55': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'MOSEK': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','QCP','MIQCP','RMIQCP'}, -'MPECDUMP': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'}, -'MPSGE': {}, -'MSNLP': {'NLP','DNLP','RMINLP','QCP','RMIQCP'}, -'NLPEC': {'MCP','MPEC','RMPEC'}, -'OQNLP': {'NLP', 'DNLP', 'MINLP', 'QCP', 'MIQCP'}, -'OS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'OSICPLEX': {'LP','MIP','RMIP'}, -'OSIGUROBI': {'LP','MIP','RMIP'}, -'OSIMOSEK': {'LP','MIP','RMIP'}, -'OSISOPLEX': {'LP','RMIP'}, -'OSIXPRESS': {'LP','MIP','RMIP'}, -'PATH': {'MCP','CNS'}, -'PATHC': {'MCP','CNS'}, -'PATHNLP': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'}, -'PYOMO': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'}, -'QUADMINOS': {'LP'}, -'SBB': {'MINLP','MIQCP'}, -'SCENSOLVER': {'LP','MIP','RMIP','NLP','MCP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'SCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'}, -'SHOT': {'MINLP','MIQCP'}, -'SNOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'}, -'SOPLEX': {'LP','RMIP'}, -'XA': {'LP','MIP','RMIP'}, -'XPRESS': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'} + 'ALPHAECP': {'MINLP', 'MIQCP'}, + 'AMPL': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + }, + 'ANTIGONE': {'NLP', 'CNS', 'DNLP', 'RMINLP', 'MINLP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'BARON': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'BDMLP': {'LP', 'MIP', 'RMIP'}, + 'BDMLPD': {'LP', 'RMIP'}, + 'BENCH': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'BONMIN': {'MINLP', 'MIQCP'}, + 'BONMINH': {'MINLP', 'MIQCP'}, + 'CBC': {'LP', 'MIP', 'RMIP'}, + 'COINBONMIN': {'MINLP', 'MIQCP'}, + 'COINCBC': {'LP', 'MIP', 'RMIP'}, + 'COINCOUENNE': {'NLP', 'CNS', 'DNLP', 'RMINLP', 'MINLP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'COINIPOPT': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'COINOS': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'COINSCIP': { + 'MIP', + 'NLP', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'CONOPT': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'CONOPT3': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'CONOPT4': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'CONOPTD': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'CONVERT': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'CONVERTD': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + 'EMP', + }, + 'COUENNE': {'NLP', 'CNS', 'DNLP', 'RMINLP', 'MINLP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'CPLEX': {'LP', 'MIP', 'RMIP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'CPLEXD': {'LP', 'MIP', 'RMIP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'CPOPTIMIZER': {'MIP', 'MINLP', 'MIQCP'}, + 'DE': {'EMP'}, + 'DECIS': {'EMP'}, + 'DECISC': {'LP'}, + 'DECISM': {'LP'}, + 'DICOPT': {'MINLP', 'MIQCP'}, + 'DICOPTD': {'MINLP', 'MIQCP'}, + 'EXAMINER': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'EXAMINER2': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'GAMSCHK': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'GLOMIQO': {'QCP', 'MIQCP', 'RMIQCP'}, + 'GUROBI': {'LP', 'MIP', 'RMIP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'GUSS': {'LP', 'MIP', 'NLP', 'MCP', 'CNS', 'DNLP', 'MINLP', 'QCP', 'MIQCP'}, + 'IPOPT': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'IPOPTH': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'JAMS': {'EMP'}, + 'KESTREL': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + 'EMP', + }, + 'KNITRO': { + 'LP', + 'RMIP', + 'NLP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'LGO': {'LP', 'RMIP', 'NLP', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'LGOD': {'LP', 'RMIP', 'NLP', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'LINDO': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + 'EMP', + }, + 'LINDOGLOBAL': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'LINGO': {'LP', 'MIP', 'RMIP', 'NLP', 'DNLP', 'RMINLP', 'MINLP'}, + 'LOCALSOLVER': { + 'MIP', + 'NLP', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'LOGMIP': {'EMP'}, + 'LS': {'LP', 'RMIP'}, + 'MILES': {'MCP'}, + 'MILESE': {'MCP'}, + 'MINOS': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'MINOS5': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'MINOS55': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'MOSEK': {'LP', 'MIP', 'RMIP', 'NLP', 'DNLP', 'RMINLP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'MPECDUMP': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + }, + 'MPSGE': {}, + 'MSNLP': {'NLP', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'NLPEC': {'MCP', 'MPEC', 'RMPEC'}, + 'OQNLP': {'NLP', 'DNLP', 'MINLP', 'QCP', 'MIQCP'}, + 'OS': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'OSICPLEX': {'LP', 'MIP', 'RMIP'}, + 'OSIGUROBI': {'LP', 'MIP', 'RMIP'}, + 'OSIMOSEK': {'LP', 'MIP', 'RMIP'}, + 'OSISOPLEX': {'LP', 'RMIP'}, + 'OSIXPRESS': {'LP', 'MIP', 'RMIP'}, + 'PATH': {'MCP', 'CNS'}, + 'PATHC': {'MCP', 'CNS'}, + 'PATHNLP': {'LP', 'RMIP', 'NLP', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'PYOMO': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'MPEC', + 'RMPEC', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + }, + 'QUADMINOS': {'LP'}, + 'SBB': {'MINLP', 'MIQCP'}, + 'SCENSOLVER': { + 'LP', + 'MIP', + 'RMIP', + 'NLP', + 'MCP', + 'CNS', + 'DNLP', + 'RMINLP', + 'MINLP', + 'QCP', + 'MIQCP', + 'RMIQCP', + }, + 'SCIP': {'MIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'MINLP', 'QCP', 'MIQCP', 'RMIQCP'}, + 'SHOT': {'MINLP', 'MIQCP'}, + 'SNOPT': {'LP', 'RMIP', 'NLP', 'CNS', 'DNLP', 'RMINLP', 'QCP', 'RMIQCP'}, + 'SOPLEX': {'LP', 'RMIP'}, + 'XA': {'LP', 'MIP', 'RMIP'}, + 'XPRESS': {'LP', 'MIP', 'RMIP', 'QCP', 'MIQCP', 'RMIQCP'}, } diff --git a/pyomo/repn/plugins/lp_writer.py b/pyomo/repn/plugins/lp_writer.py new file mode 100644 index 00000000000..8b04ebf1750 --- /dev/null +++ b/pyomo/repn/plugins/lp_writer.py @@ -0,0 +1,600 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import logging +from io import StringIO +from operator import itemgetter, attrgetter + +from pyomo.common.config import ( + ConfigBlock, + ConfigValue, + InEnum, + document_kwargs_from_configdict, +) +from pyomo.common.gc_manager import PauseGC +from pyomo.common.timing import TicTocTimer + +from pyomo.core.base import ( + Block, + Objective, + Constraint, + Var, + Param, + Expression, + SOSConstraint, + SortComponents, + Suffix, + SymbolMap, + minimize, +) +from pyomo.core.base.component import ActiveComponent +from pyomo.core.base.label import LPFileLabeler, NumericLabeler +from pyomo.opt import WriterFactory +from pyomo.repn.linear import LinearRepnVisitor +from pyomo.repn.quadratic import QuadraticRepnVisitor +from pyomo.repn.util import ( + FileDeterminism, + FileDeterminism_to_SortComponents, + categorize_valid_components, + initialize_var_map_from_column_order, + ordered_active_constraints, +) + +### FIXME: Remove the following as soon as non-active components no +### longer report active==True +from pyomo.core.base import Set, RangeSet, ExternalFunction +from pyomo.network import Port + +logger = logging.getLogger(__name__) +inf = float('inf') +neg_inf = float('-inf') + + +# TODO: make a proper base class +class LPWriterInfo(object): + """Return type for LPWriter.write() + + Attributes + ---------- + symbol_map: SymbolMap + + The :py:class:`SymbolMap` bimap between row/column labels and + Pyomo components. + + """ + + def __init__(self, symbol_map): + self.symbol_map = symbol_map + + +@WriterFactory.register( + 'cpxlp_v2', 'Generate the corresponding CPLEX LP file (version 2).' +) +@WriterFactory.register('lp_v2', 'Generate the corresponding LP file (version 2).') +class LPWriter(object): + CONFIG = ConfigBlock('lpwriter') + CONFIG.declare( + 'show_section_timing', + ConfigValue( + default=False, + domain=bool, + description='Print timing after writing each section of the LP file', + ), + ) + CONFIG.declare( + 'skip_trivial_constraints', + ConfigValue( + default=False, + domain=bool, + description='Skip writing constraints whose body is constant', + ), + ) + CONFIG.declare( + 'file_determinism', + ConfigValue( + default=FileDeterminism.ORDERED, + domain=InEnum(FileDeterminism), + description='How much effort to ensure file is deterministic', + doc=""" + How much effort do we want to put into ensuring the + LP file is written deterministically for a Pyomo model: + NONE (0) : None + ORDERED (10): rely on underlying component ordering (default) + SORT_INDICES (20) : sort keys of indexed components + SORT_SYMBOLS (30) : sort keys AND sort names (not declaration order) + """, + ), + ) + CONFIG.declare( + 'symbolic_solver_labels', + ConfigValue( + default=False, + domain=bool, + description='Write variables/constraints using model names', + doc=""" + Export variables and constraints to the LP file using human-readable + text names derived from the corresponding Pyomo component names. + """, + ), + ) + CONFIG.declare( + 'row_order', + ConfigValue( + default=None, + description='Preferred constraint ordering', + doc=""" + List of constraints in the order that they should appear in the + LP file. Unspecified constraints will appear at the end.""", + ), + ) + CONFIG.declare( + 'column_order', + ConfigValue( + default=None, + description='Preferred variable ordering', + doc=""" + + + List of variables in the order that they should appear in + the LP file. Note that this is only a suggestion, as the LP + file format is row-major and the columns are inferred from + the order in which variables appear in the objective + followed by each constraint.""", + ), + ) + CONFIG.declare( + 'labeler', + ConfigValue( + default=None, + description='Callable to use to generate symbol names in LP file', + doc=""" + Export variables and constraints to the LP file using human-readable + text names derived from the corresponding Pyomo component names. + """, + ), + ) + CONFIG.declare( + 'output_fixed_variable_bounds', + ConfigValue( + default=False, + domain=bool, + description='DEPRECATED option from LPv1 that has no effect in the LPv2', + ), + ) + CONFIG.declare( + 'allow_quadratic_objective', + ConfigValue( + default=True, + domain=bool, + description='If True, allow quadratic terms in the model objective', + ), + ) + CONFIG.declare( + 'allow_quadratic_constraint', + ConfigValue( + default=True, + domain=bool, + description='If True, allow quadratic terms in the model constraints', + ), + ) + + def __init__(self): + self.config = self.CONFIG() + + def __call__(self, model, filename, solver_capability, io_options): + if filename is None: + filename = model.name + ".lp" + + # Duplicate io_options to avoid side-effects + io_options = dict(io_options) + # Map old solver capabilities to new writer options + qp = solver_capability('quadratic_objective') + if 'allow_quadratic_objective' not in io_options: + io_options['allow_quadratic_objective'] = qp + qc = solver_capability('quadratic_constraint') + if 'allow_quadratic_constraint' not in io_options: + io_options['allow_quadratic_constraint'] = qc + + with open(filename, 'w', newline='') as FILE: + info = self.write(model, FILE, **io_options) + return filename, info.symbol_map + + @document_kwargs_from_configdict(CONFIG) + def write(self, model, ostream, **options): + """Write a model in LP format. + + Returns + ------- + LPWriterInfo + + Parameters + ---------- + model: ConcreteModel + The concrete Pyomo model to write out. + + ostream: io.TextIOBase + The text output stream where the LP "file" will be written. + Could be an opened file or a io.StringIO. + + """ + config = self.config(options) + + if config.output_fixed_variable_bounds: + deprecation_warning( + "The 'output_fixed_variable_bounds' option to the LP " + "writer is deprecated and is ignored by the lp_v2 writer." + ) + + # Pause the GC, as the walker that generates the compiled LP + # representation generates (and disposes of) a large number of + # small objects. + with PauseGC(): + return _LPWriter_impl(ostream, config).write(model) + + +class _LPWriter_impl(object): + def __init__(self, ostream, config): + self.ostream = ostream + self.config = config + self.symbol_map = None + + def write(self, model): + timing_logger = logging.getLogger('pyomo.common.timing.writer') + timer = TicTocTimer(logger=timing_logger) + with_debug_timing = ( + timing_logger.isEnabledFor(logging.DEBUG) and timing_logger.hasHandlers() + ) + + ostream = self.ostream + + labeler = self.config.labeler + if labeler is None: + if self.config.symbolic_solver_labels: + labeler = LPFileLabeler() + else: + labeler = NumericLabeler('x') + self.symbol_map = SymbolMap(labeler) + addSymbol = self.symbol_map.addSymbol + aliasSymbol = self.symbol_map.alias + getSymbol = self.symbol_map.getSymbol + + sorter = FileDeterminism_to_SortComponents(self.config.file_determinism) + component_map, unknown = categorize_valid_components( + model, + active=True, + sort=sorter, + valid={ + Block, + Constraint, + Var, + Param, + Expression, + # FIXME: Non-active components should not report as Active + ExternalFunction, + Set, + RangeSet, + Port, + # TODO: Piecewise, Complementarity + }, + targets={Suffix, SOSConstraint, Objective}, + ) + if unknown: + raise ValueError( + "The model ('%s') contains the following active components " + "that the LP writer does not know how to process:\n\t%s" + % ( + model.name, + "\n\t".join( + "%s:\n\t\t%s" % (k, "\n\t\t".join(map(attrgetter('name'), v))) + for k, v in unknown.items() + ), + ) + ) + + ONE_VAR_CONSTANT = Var(name='ONE_VAR_CONSTANT', bounds=(1, 1)) + ONE_VAR_CONSTANT.construct() + + self.var_map = var_map = {id(ONE_VAR_CONSTANT): ONE_VAR_CONSTANT} + initialize_var_map_from_column_order(model, self.config, var_map) + self.var_order = {_id: i for i, _id in enumerate(var_map)} + + _qp = self.config.allow_quadratic_objective + _qc = self.config.allow_quadratic_constraint + objective_visitor = (QuadraticRepnVisitor if _qp else LinearRepnVisitor)( + {}, var_map, self.var_order + ) + constraint_visitor = (QuadraticRepnVisitor if _qc else LinearRepnVisitor)( + objective_visitor.subexpression_cache if _qp == _qc else {}, + var_map, + self.var_order, + ) + + timer.toc('Initialized column order', level=logging.DEBUG) + + ostream.write(f"\\* Source Pyomo model name={model.name} *\\\n\n") + + # + # Process objective + # + if not component_map[Objective]: + objectives = [Objective(expr=1)] + objectives[0].construct() + else: + objectives = [] + for blk in component_map[Objective]: + objectives.extend( + blk.component_data_objects( + Objective, active=True, descend_into=False, sort=sorter + ) + ) + if len(objectives) > 1: + raise ValueError( + "More than one active objective defined for input model '%s'; " + "Cannot write legal LP file\nObjectives: %s" + % (model.name, ' '.join(obj.name for obj in objectives)) + ) + + obj = objectives[0] + ostream.write( + ("min \n%s:\n" if obj.sense == minimize else "max \n%s:\n") + % (getSymbol(obj, labeler),) + ) + repn = objective_visitor.walk_expression(obj.expr) + if repn.nonlinear is not None: + raise ValueError( + f"Model objective ({obj.name}) contains nonlinear terms that " + "cannot be written to LP format" + ) + if repn.constant or not (repn.linear or getattr(repn, 'quadratic', None)): + # Older versions of CPLEX (including 12.6) and all versions + # of GLPK (through 5.0) do not support constants in the + # objective in LP format. To avoid painful bookkeeping, we + # introduce the following "variable", constrained to the + # value 1. + # + # In addition, most solvers do no tolerate an empty + # objective, this will ensure we at least write out + # 0*ONE_VAR_CONSTANT. + repn.linear[id(ONE_VAR_CONSTANT)] = repn.constant + repn.constant = 0 + self.write_expression(ostream, repn, True) + aliasSymbol(obj, '__default_objective__') + if with_debug_timing: + timer.toc('Objective %s', obj, level=logging.DEBUG) + + ostream.write("\ns.t.\n") + + # + # Tabulate constraints + # + skip_trivial_constraints = self.config.skip_trivial_constraints + have_nontrivial = False + last_parent = None + for con in ordered_active_constraints(model, self.config): + if with_debug_timing and con.parent_component() is not last_parent: + timer.toc('Constraint %s', last_parent, level=logging.DEBUG) + last_parent = con.parent_component() + lb = con.lb + ub = con.ub + if lb is None and ub is None: + # Note: you *cannot* output trivial (unbounded) + # constraints in LP format. I suppose we could add a + # slack variable if skip_trivial_constraints is False, + # but that seems rather silly. + continue + repn = constraint_visitor.walk_expression(con.body) + if repn.nonlinear is not None: + raise ValueError( + f"Model constraint ({con.name}) contains nonlinear terms that " + "cannot be written to LP format" + ) + + # Pull out the constant: we will move it to the bounds + offset = repn.constant + repn.constant = 0 + + if repn.linear or getattr(repn, 'quadratic', None): + have_nontrivial = True + else: + if ( + skip_trivial_constraints + and (lb is None or lb <= offset) + and (ub is None or ub >= offset) + ): + continue + # This is a trivially infeasible model. We could raise + # an exception, or we could allow the solver to return + # infeasible. There are fewer logic paths (in + # particular related to mapping solver result status) if + # we just defer to the solver. + # + # Add a dummy (fixed) variable to the constraint, + # because some solvers (including versions of GLPK) + # cannot parse an LP file without a variable on the left + # hand side. + repn.linear[id(ONE_VAR_CONSTANT)] = 0 + + symbol = labeler(con) + if lb == ub and lb is not None: + label = f'c_e_{symbol}_' + addSymbol(con, label) + ostream.write(f'\n{label}:\n') + self.write_expression(ostream, repn, False) + ostream.write(f'= {(lb - offset)!r}\n') + elif lb is not None and lb != neg_inf: + if ub is not None and ub != inf: + # We will need the constraint body twice. Generate + # in a buffer so we only have to do that once. + buf = StringIO() + self.write_expression(buf, repn, False) + buf = buf.getvalue() + # + label = f'r_l_{symbol}_' + addSymbol(con, label) + ostream.write(f'\n{label}:\n') + ostream.write(buf) + ostream.write(f'>= {(lb - offset)!r}\n') + label = f'r_u_{symbol}_' + aliasSymbol(con, label) + ostream.write(f'\n{label}:\n') + ostream.write(buf) + ostream.write(f'<= {(ub - offset)!r}\n') + else: + label = f'c_l_{symbol}_' + addSymbol(con, label) + ostream.write(f'\n{label}:\n') + self.write_expression(ostream, repn, False) + ostream.write(f'>= {(lb - offset)!r}\n') + elif ub is not None and ub != inf: + label = f'c_u_{symbol}_' + addSymbol(con, label) + ostream.write(f'\n{label}:\n') + self.write_expression(ostream, repn, False) + ostream.write(f'<= {(ub - offset)!r}\n') + + if with_debug_timing: + # report the last constraint + timer.toc('Constraint %s', last_parent, level=logging.DEBUG) + if not have_nontrivial: + # Some solvers (notably CBC through at least 2.10.4) will + # return a nonzero return code when the model has no + # constraints. To work around the original Pyomo solver + # hierarchy (where the return code was processed in the base + # class), we will add a dummy constraint here. + repn = constraint_visitor.Result() # walk_expression(ONE_VAR_CONSTANT) + repn.linear[id(ONE_VAR_CONSTANT)] = 1 + ostream.write(f'\nc_e_ONE_VAR_CONSTANT:\n') + self.write_expression(ostream, repn, False) + ostream.write(f'= 1\n') + + ostream.write("\nbounds") + + # Track the number of integer and binary variables, so you can + # output their status later. + integer_vars = [] + binary_vars = [] + getSymbolByObjectID = self.symbol_map.byObject.get + for vid, v in var_map.items(): + # Some variables in the var_map may not actually have been + # written out to the LP file (e.g., added from col_order, or + # multiplied by 0 in the expressions). Check to see that + # the variable is in the symbol_map before outputting. + v_symbol = getSymbolByObjectID(vid, None) + if not v_symbol: + continue + if v.is_binary(): + binary_vars.append(v_symbol) + elif v.is_integer(): + integer_vars.append(v_symbol) + + lb, ub = v.bounds + lb = '-inf' if lb is None else repr(lb) + ub = '+inf' if ub is None or ub == inf else repr(ub) + ostream.write(f"\n {lb} <= {v_symbol} <= {ub}") + + if integer_vars: + ostream.write("\ngeneral\n ") + ostream.write("\n ".join(integer_vars)) + + if binary_vars: + ostream.write("\nbinary\n ") + ostream.write("\n ".join(binary_vars)) + + timer.toc("Wrote variable bounds and domains", level=logging.DEBUG) + + # + # Tabulate SOS constraints + # + if component_map[SOSConstraint]: + sos = [] + for blk in component_map[SOSConstraint]: + sos.extend( + blk.component_data_objects( + SOSConstraint, active=True, descend_into=False, sort=sorter + ) + ) + if self.config.row_order: + # sort() is stable (per Python docs), so we can let + # all unspecified rows have a row number one bigger than + # the number of rows specified by the user ordering. + _n = len(row_order) + sos.sort(key=lambda x: _row_getter(x, _n)) + + ostream.write("\nSOS\n") + for soscon in sos: + ostream.write(f'\n{getSymbol(soscon)}: S{soscon.level}::\n') + for v, w in getattr(soscon, 'get_items', soscon.items)(): + ostream.write(f" {getSymbol(v)}:{w!r}\n") + + ostream.write("\nend\n") + + info = LPWriterInfo(self.symbol_map) + timer.toc("Generated LP representation", delta=False) + return info + + def write_expression(self, ostream, expr, is_objective): + assert not expr.constant + getSymbol = self.symbol_map.getSymbol + getVarOrder = self.var_order.__getitem__ + getVar = self.var_map.__getitem__ + + if expr.linear: + for vid, coef in sorted( + expr.linear.items(), key=lambda x: getVarOrder(x[0]) + ): + if coef < 0: + ostream.write(f'{coef!r} {getSymbol(getVar(vid))}\n') + else: + ostream.write(f'+{coef!r} {getSymbol(getVar(vid))}\n') + + quadratic = getattr(expr, 'quadratic', None) + if quadratic: + + def _normalize_constraint(data): + (vid1, vid2), coef = data + c1 = getVarOrder(vid1) + c2 = getVarOrder(vid2) + if c2 < c1: + col = c2, c1 + sym = f' {getSymbol(getVar(vid2))} * {getSymbol(getVar(vid1))}\n' + elif c1 == c2: + col = c1, c1 + sym = f' {getSymbol(getVar(vid2))} ^ 2\n' + else: + col = c1, c2 + sym = f' {getSymbol(getVar(vid1))} * {getSymbol(getVar(vid2))}\n' + if coef < 0: + return col, repr(coef) + sym + else: + return col, '+' + repr(coef) + sym + + if is_objective: + # + # Times 2 because LP format requires /2 for all the + # quadratic terms /of the objective only/. Discovered + # the last bit through trial and error. + # Ref: ILog CPlex 8.0 User's Manual, p197. + # + def _normalize_objective(data): + vids, coef = data + return _normalize_constraint((vids, 2 * coef)) + + _normalize = _normalize_objective + else: + _normalize = _normalize_constraint + + ostream.write('+ [\n') + quadratic = sorted(map(_normalize, quadratic.items()), key=itemgetter(0)) + ostream.write(''.join(map(itemgetter(1), quadratic))) + if is_objective: + ostream.write("] / 2\n") + else: + ostream.write("]\n") diff --git a/pyomo/repn/plugins/mps.py b/pyomo/repn/plugins/mps.py index 1ee774da9ac..89420929778 100644 --- a/pyomo/repn/plugins/mps.py +++ b/pyomo/repn/plugins/mps.py @@ -20,22 +20,31 @@ from pyomo.common.gc_manager import PauseGC from pyomo.opt import ProblemFormat from pyomo.opt.base import AbstractProblemWriter, WriterFactory -from pyomo.core.base import \ - (SymbolMap, TextLabeler, - NumericLabeler, Constraint, SortComponents, - Var, value, - SOSConstraint, Objective, - ComponentMap, is_fixed) +from pyomo.core.base import ( + SymbolMap, + TextLabeler, + NumericLabeler, + Constraint, + SortComponents, + Var, + value, + SOSConstraint, + Objective, + ComponentMap, + is_fixed, +) from pyomo.repn import generate_standard_repn logger = logging.getLogger('pyomo.core') + def _no_negative_zero(val): """Make sure -0 is never output. Makes diff tests easier.""" if val == 0: return 0 return val + def _get_bound(exp): if exp is None: return None @@ -46,9 +55,7 @@ def _get_bound(exp): @WriterFactory.register('mps', 'Generate the corresponding MPS file') class ProblemWriter_mps(AbstractProblemWriter): - def __init__(self): - AbstractProblemWriter.__init__(self, ProblemFormat.mps) # the MPS writer is responsible for tracking which variables are @@ -71,34 +78,29 @@ def __init__(self): # the number's sign. self._precision_string = '.17g' - def __call__(self, - model, - output_filename, - solver_capability, - io_options): - + def __call__(self, model, output_filename, solver_capability, io_options): # Make sure not to modify the user's dictionary, # they may be reusing it outside of this call io_options = dict(io_options) # Skip writing constraints whose body section is # fixed (i.e., no variables) - skip_trivial_constraints = \ - io_options.pop("skip_trivial_constraints", False) + skip_trivial_constraints = io_options.pop("skip_trivial_constraints", False) # Use full Pyomo component names in the MPS file rather # than shortened symbols (slower, but useful for debugging). - symbolic_solver_labels = \ - io_options.pop("symbolic_solver_labels", False) + symbolic_solver_labels = io_options.pop("symbolic_solver_labels", False) - output_fixed_variable_bounds = \ - io_options.pop("output_fixed_variable_bounds", False) + output_fixed_variable_bounds = io_options.pop( + "output_fixed_variable_bounds", False + ) # If False, unused variables will not be included in # the MPS file. Otherwise, include all variables in # the bounds sections. - include_all_variable_bounds = \ - io_options.pop("include_all_variable_bounds", False) + include_all_variable_bounds = io_options.pop( + "include_all_variable_bounds", False + ) labeler = io_options.pop("labeler", None) @@ -117,25 +119,26 @@ def __call__(self, # make sure the ONE_VAR_CONSTANT variable appears in # the objective even if the constant part of the # objective is zero - force_objective_constant = \ - io_options.pop("force_objective_constant", False) + force_objective_constant = io_options.pop("force_objective_constant", False) # Whether or not to include the OBJSENSE section in # the MPS file. Some solvers, like GLPK and CBC, # either throw an error or flat out ignore this # section (I assume the default is to minimize) - skip_objective_sense = \ - io_options.pop("skip_objective_sense", False) + skip_objective_sense = io_options.pop("skip_objective_sense", False) if len(io_options): raise ValueError( - "ProblemWriter_mps passed unrecognized io_options:\n\t" + - "\n\t".join("%s = %s" % (k,v) for k,v in io_options.items())) + "ProblemWriter_mps passed unrecognized io_options:\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) if symbolic_solver_labels and (labeler is not None): - raise ValueError("ProblemWriter_mps: Using both the " - "'symbolic_solver_labels' and 'labeler' " - "I/O options is forbidden") + raise ValueError( + "ProblemWriter_mps: Using both the " + "'symbolic_solver_labels' and 'labeler' " + "I/O options is forbidden" + ) if symbolic_solver_labels: labeler = TextLabeler() @@ -168,20 +171,16 @@ def __call__(self, skip_trivial_constraints=skip_trivial_constraints, force_objective_constant=force_objective_constant, include_all_variable_bounds=include_all_variable_bounds, - skip_objective_sense=skip_objective_sense) + skip_objective_sense=skip_objective_sense, + ) self._referenced_variable_ids.clear() return output_filename, symbol_map def _extract_variable_coefficients( - self, - row_label, - repn, - column_data, - quadratic_data, - variable_to_column): - + self, row_label, repn, column_data, quadratic_data, variable_to_column + ): # # Linear # @@ -198,7 +197,7 @@ def _extract_variable_coefficients( for vardata, coef in zip(repn.quadratic_vars, repn.quadratic_coefs): self._referenced_variable_ids[id(vardata[0])] = vardata[0] self._referenced_variable_ids[id(vardata[1])] = vardata[1] - quad_terms.append( (vardata, coef) ) + quad_terms.append((vardata, coef)) quadratic_data.append((row_label, quad_terms)) # @@ -206,13 +205,9 @@ def _extract_variable_coefficients( # return repn.constant - def _printSOS(self, - symbol_map, - labeler, - variable_symbol_map, - soscondata, - output_file): - + def _printSOS( + self, symbol_map, labeler, variable_symbol_map, soscondata, output_file + ): if hasattr(soscondata, 'get_items'): sos_items = list(soscondata.get_items()) else: @@ -228,42 +223,45 @@ def _printSOS(self, # I think there are many flavors to the SOS # section in the Free MPS format. I'm going with # what Cplex and Gurobi seem to recognize - output_file.write(" S%d %s\n" - % (level, - symbol_map.getSymbol(soscondata,labeler))) + output_file.write( + " S%d %s\n" % (level, symbol_map.getSymbol(soscondata, labeler)) + ) - sos_template_string = " %s %"+self._precision_string+"\n" + sos_template_string = " %s %" + self._precision_string + "\n" for vardata, weight in sos_items: weight = _get_bound(weight) if weight < 0: raise ValueError( "Cannot use negative weight %f " "for variable %s is special ordered " - "set %s " % (weight, vardata.name, soscondata.name)) + "set %s " % (weight, vardata.name, soscondata.name) + ) if vardata.fixed: raise RuntimeError( "SOSConstraint '%s' includes a fixed variable '%s'. This is " - "currently not supported. Deactive this constraint in order to " - "proceed." % (soscondata.name, vardata.name)) + "currently not supported. Deactivate this constraint in order to " + "proceed." % (soscondata.name, vardata.name) + ) self._referenced_variable_ids[id(vardata)] = vardata - output_file.write(sos_template_string - % (variable_symbol_map.getSymbol(vardata), - weight)) - - def _print_model_MPS(self, - model, - output_file, - solver_capability, - labeler, - output_fixed_variable_bounds=False, - file_determinism=1, - row_order=None, - column_order=None, - skip_trivial_constraints=False, - force_objective_constant=False, - include_all_variable_bounds=False, - skip_objective_sense=False): - + output_file.write( + sos_template_string % (variable_symbol_map.getSymbol(vardata), weight) + ) + + def _print_model_MPS( + self, + model, + output_file, + solver_capability, + labeler, + output_fixed_variable_bounds=False, + file_determinism=1, + row_order=None, + column_order=None, + skip_trivial_constraints=False, + force_objective_constant=False, + include_all_variable_bounds=False, + skip_objective_sense=False, + ): symbol_map = SymbolMap() variable_symbol_map = SymbolMap() # NOTE: we use createSymbol instead of getSymbol because we @@ -287,22 +285,16 @@ def _print_model_MPS(self, # all_blocks = [] variable_list = [] - for block in model.block_data_objects(active=True, - sort=sortOrder): - + for block in model.block_data_objects(active=True, sort=sortOrder): all_blocks.append(block) for vardata in block.component_data_objects( - Var, - active=True, - sort=sortOrder, - descend_into=False): - + Var, active=True, sort=sortOrder, descend_into=False + ): variable_list.append(vardata) variable_label_pairs.append( - (vardata,create_symbol_func(symbol_map, - vardata, - labeler))) + (vardata, create_symbol_func(symbol_map, vardata, labeler)) + ) variable_symbol_map.addSymbols(variable_label_pairs) @@ -317,9 +309,10 @@ def _print_model_MPS(self, # prepare to hold the sparse columns variable_to_column = ComponentMap( - (vardata, i) for i, vardata in enumerate(variable_list)) + (vardata, i) for i, vardata in enumerate(variable_list) + ) # add one position for ONE_VAR_CONSTANT - column_data = [[] for i in range(len(variable_list)+1)] + column_data = [[] for i in range(len(variable_list) + 1)] quadobj_data = [] quadmatrix_data = [] # constraint rhs @@ -340,31 +333,27 @@ def _print_model_MPS(self, numObj = 0 onames = [] for block in all_blocks: - - gen_obj_repn = \ - getattr(block, "_gen_obj_repn", True) + gen_obj_repn = getattr(block, "_gen_obj_repn", True) # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn for objective_data in block.component_data_objects( - Objective, - active=True, - sort=sortOrder, - descend_into=False): - + Objective, active=True, sort=sortOrder, descend_into=False + ): numObj += 1 onames.append(objective_data.name) if numObj > 1: raise ValueError( "More than one active objective defined for input " "model '%s'; Cannot write legal MPS file\n" - "Objectives: %s" % (model.name, ' '.join(onames))) + "Objectives: %s" % (model.name, ' '.join(onames)) + ) - objective_label = create_symbol_func(symbol_map, - objective_data, - labeler) + objective_label = create_symbol_func( + symbol_map, objective_data, labeler + ) symbol_map.alias(objective_data, '__default_objective__') if not skip_objective_sense: @@ -375,35 +364,34 @@ def _print_model_MPS(self, output_file.write(" MAX\n") # This section is not recognized by the COIN-OR # MPS reader - #output_file.write("OBJNAME\n") - #output_file.write(" %s\n" % (objective_label)) + # output_file.write("OBJNAME\n") + # output_file.write(" %s\n" % (objective_label)) output_file.write("ROWS\n") output_file.write(" N %s\n" % (objective_label)) if gen_obj_repn: - repn = \ - generate_standard_repn(objective_data.expr) + repn = generate_standard_repn(objective_data.expr) block_repn[objective_data] = repn else: repn = block_repn[objective_data] degree = repn.polynomial_degree() if degree == 0: - logger.warning("Constant objective detected, replacing " - "with a placeholder to prevent solver failure.") + logger.warning( + "Constant objective detected, replacing " + "with a placeholder to prevent solver failure." + ) force_objective_constant = True elif degree is None: raise RuntimeError( "Cannot write legal MPS file. Objective '%s' " "has nonlinear terms that are not quadratic." - % objective_data.name) + % objective_data.name + ) constant = extract_variable_coefficients( - objective_label, - repn, - column_data, - quadobj_data, - variable_to_column) + objective_label, repn, column_data, quadobj_data, variable_to_column + ) if force_objective_constant or (constant != 0.0): # ONE_VAR_CONSTANT column_data[-1].append((objective_label, constant)) @@ -411,31 +399,28 @@ def _print_model_MPS(self, if numObj == 0: raise ValueError( "Cannot write legal MPS file: No objective defined " - "for input model '%s'." % str(model)) + "for input model '%s'." % str(model) + ) assert objective_label is not None # Constraints def constraint_generator(): for block in all_blocks: - - gen_con_repn = \ - getattr(block, "_gen_con_repn", True) + gen_con_repn = getattr(block, "_gen_con_repn", True) # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn for constraint_data in block.component_data_objects( - Constraint, - active=True, - sort=sortOrder, - descend_into=False): - - if (not constraint_data.has_lb()) and \ - (not constraint_data.has_ub()): + Constraint, active=True, sort=sortOrder, descend_into=False + ): + if (not constraint_data.has_lb()) and ( + not constraint_data.has_ub() + ): assert not constraint_data.equality - continue # non-binding, so skip + continue # non-binding, so skip if constraint_data._linear_canonical_form: repn = constraint_data.canonical_form() @@ -450,14 +435,15 @@ def constraint_generator(): if row_order is not None: sorted_constraint_list = list(constraint_generator()) sorted_constraint_list.sort(key=lambda x: row_order[x[0]]) + def yield_all_constraints(): for constraint_data, repn in sorted_constraint_list: yield constraint_data, repn + else: yield_all_constraints = constraint_generator for constraint_data, repn in yield_all_constraints(): - degree = repn.polynomial_degree() # Write constraint @@ -467,26 +453,20 @@ def yield_all_constraints(): elif degree is None: raise RuntimeError( "Cannot write legal MPS file. Constraint '%s' " - "has nonlinear terms that are not quadratic." - % constraint_data.name) + "has nonlinear terms that are not quadratic." % constraint_data.name + ) # Create symbol - con_symbol = create_symbol_func(symbol_map, - constraint_data, - labeler) + con_symbol = create_symbol_func(symbol_map, constraint_data, labeler) if constraint_data.equality: - assert value(constraint_data.lower) == \ - value(constraint_data.upper) + assert value(constraint_data.lower) == value(constraint_data.upper) label = 'c_e_' + con_symbol + '_' alias_symbol_func(symbol_map, constraint_data, label) output_file.write(" E %s\n" % (label)) offset = extract_variable_coefficients( - label, - repn, - column_data, - quadmatrix_data, - variable_to_column) + label, repn, column_data, quadmatrix_data, variable_to_column + ) bound = constraint_data.lower bound = _get_bound(bound) - offset rhs_data.append((label, _no_negative_zero(bound))) @@ -499,11 +479,8 @@ def yield_all_constraints(): alias_symbol_func(symbol_map, constraint_data, label) output_file.write(" G %s\n" % (label)) offset = extract_variable_coefficients( - label, - repn, - column_data, - quadmatrix_data, - variable_to_column) + label, repn, column_data, quadmatrix_data, variable_to_column + ) bound = constraint_data.lower bound = _get_bound(bound) - offset rhs_data.append((label, _no_negative_zero(bound))) @@ -518,11 +495,8 @@ def yield_all_constraints(): alias_symbol_func(symbol_map, constraint_data, label) output_file.write(" L %s\n" % (label)) offset = extract_variable_coefficients( - label, - repn, - column_data, - quadmatrix_data, - variable_to_column) + label, repn, column_data, quadmatrix_data, variable_to_column + ) bound = constraint_data.upper bound = _get_bound(bound) - offset rhs_data.append((label, _no_negative_zero(bound))) @@ -532,13 +506,13 @@ def yield_all_constraints(): if len(column_data[-1]) > 0: # ONE_VAR_CONSTANT = 1 output_file.write(" E c_e_ONE_VAR_CONSTANT\n") - column_data[-1].append(("c_e_ONE_VAR_CONSTANT",1)) - rhs_data.append(("c_e_ONE_VAR_CONSTANT",1)) + column_data[-1].append(("c_e_ONE_VAR_CONSTANT", 1)) + rhs_data.append(("c_e_ONE_VAR_CONSTANT", 1)) # # COLUMNS section # - column_template = " %s %s %"+self._precision_string+"\n" + column_template = " %s %s %" + self._precision_string + "\n" output_file.write("COLUMNS\n") cnt = 0 for vardata in variable_list: @@ -547,10 +521,10 @@ def yield_all_constraints(): if len(col_entries) > 0: var_label = variable_symbol_dictionary[id(vardata)] for i, (row_label, coef) in enumerate(col_entries): - output_file.write(column_template - % (var_label, - row_label, - _no_negative_zero(coef))) + output_file.write( + column_template + % (var_label, row_label, _no_negative_zero(coef)) + ) elif include_all_variable_bounds: # the column is empty, so add a (0 * var) # term to the objective @@ -560,25 +534,21 @@ def yield_all_constraints(): # seem to work for CPLEX 12.6, so I am # doing it this way so that it will work for both var_label = variable_symbol_dictionary[id(vardata)] - output_file.write(column_template - % (var_label, - objective_label, - 0)) + output_file.write(column_template % (var_label, objective_label, 0)) - assert cnt == len(column_data)-1 + assert cnt == len(column_data) - 1 if len(column_data[-1]) > 0: col_entries = column_data[-1] var_label = "ONE_VAR_CONSTANT" for i, (row_label, coef) in enumerate(col_entries): - output_file.write(column_template - % (var_label, - row_label, - _no_negative_zero(coef))) + output_file.write( + column_template % (var_label, row_label, _no_negative_zero(coef)) + ) # # RHS section # - rhs_template = " RHS %s %"+self._precision_string+"\n" + rhs_template = " RHS %s %" + self._precision_string + "\n" output_file.write("RHS\n") for i, (row_label, rhs) in enumerate(rhs_data): # note: we have already converted any -0 to 0 by this point @@ -589,40 +559,38 @@ def yield_all_constraints(): sos1 = solver_capability("sos1") sos2 = solver_capability("sos2") for block in all_blocks: - for soscondata in block.component_data_objects( - SOSConstraint, - active=True, - sort=sortOrder, - descend_into=False): - + SOSConstraint, active=True, sort=sortOrder, descend_into=False + ): create_symbol_func(symbol_map, soscondata, labeler) level = soscondata.level - if (level == 1 and not sos1) or \ - (level == 2 and not sos2) or \ - (level > 2): + if ( + (level == 1 and not sos1) + or (level == 2 and not sos2) + or (level > 2) + ): raise ValueError( - "Solver does not support SOS level %s constraints" % (level)) + "Solver does not support SOS level %s constraints" % (level) + ) # This updates the referenced_variable_ids, just in case # there is a variable that only appears in an # SOSConstraint, in which case this needs to be known # before we write the "bounds" section (Cplex does not # handle this correctly, Gurobi does) - self._printSOS(symbol_map, - labeler, - variable_symbol_map, - soscondata, - SOSlines) + self._printSOS( + symbol_map, labeler, variable_symbol_map, soscondata, SOSlines + ) # # BOUNDS section # - entry_template = "%s %"+self._precision_string+"\n" + entry_template = "%s %" + self._precision_string + "\n" output_file.write("BOUNDS\n") for vardata in variable_list: - if include_all_variable_bounds or \ - (id(vardata) in self._referenced_variable_ids): + if include_all_variable_bounds or ( + id(vardata) in self._referenced_variable_ids + ): var_label = variable_symbol_dictionary[id(vardata)] if vardata.fixed: if not output_fixed_variable_bounds: @@ -632,12 +600,14 @@ def yield_all_constraints(): "usually indicative of a preprocessing error. Use the " "IO-option 'output_fixed_variable_bounds=True' to suppress " "this error and fix the variable by overwriting its bounds " - "in the MPS file." % (vardata.name, model.name)) + "in the MPS file." % (vardata.name, model.name) + ) if vardata.value is None: raise ValueError("Variable cannot be fixed to a value of None.") - output_file.write((" FX BOUND "+entry_template) - % (var_label, - _no_negative_zero(value(vardata.value)))) + output_file.write( + (" FX BOUND " + entry_template) + % (var_label, _no_negative_zero(value(vardata.value))) + ) continue # convert any -0 to 0 to make baseline diffing easier @@ -662,13 +632,15 @@ def yield_all_constraints(): # but CPLEX 12.6 does not, so I am just # using a large value if not unbounded_lb: - output_file.write((" LI BOUND "+entry_template) - % (var_label, vardata_lb)) + output_file.write( + (" LI BOUND " + entry_template) % (var_label, vardata_lb) + ) else: output_file.write(" LI BOUND %s -10E20\n" % (var_label)) if not unbounded_ub: - output_file.write((" UI BOUND "+entry_template) - % (var_label, vardata_ub)) + output_file.write( + (" UI BOUND " + entry_template) % (var_label, vardata_ub) + ) else: output_file.write(" UI BOUND %s 10E20\n" % (var_label)) else: @@ -677,14 +649,18 @@ def yield_all_constraints(): output_file.write(" FR BOUND %s\n" % (var_label)) else: if not unbounded_lb: - output_file.write((" LO BOUND "+entry_template) - % (var_label, vardata_lb)) + output_file.write( + (" LO BOUND " + entry_template) + % (var_label, vardata_lb) + ) else: output_file.write(" MI BOUND %s\n" % (var_label)) if not unbounded_ub: - output_file.write((" UP BOUND "+entry_template) - % (var_label, vardata_ub)) + output_file.write( + (" UP BOUND " + entry_template) + % (var_label, vardata_ub) + ) # # SOS section @@ -703,39 +679,40 @@ def yield_all_constraints(): # recognizes QUADOBJ (Gurobi and Cplex seem to # be okay with this) output_file.write("QUADOBJ\n") - #output_file.write("QMATRIX\n") + # output_file.write("QMATRIX\n") label, quad_terms = quadobj_data[0] assert label == objective_label # sort by the sorted tuple of symbols (or column assignments) # for the variables appearing in the term - quad_terms = sorted(quad_terms, - key=lambda _x: \ - sorted((variable_to_column[_x[0][0]], - variable_to_column[_x[0][1]]))) + quad_terms = sorted( + quad_terms, + key=lambda _x: sorted( + (variable_to_column[_x[0][0]], variable_to_column[_x[0][1]]) + ), + ) for term, coef in quad_terms: # sort the term for consistent output - var1, var2 = sorted(term, - key=lambda _x: variable_to_column[_x]) + var1, var2 = sorted(term, key=lambda _x: variable_to_column[_x]) var1_label = variable_symbol_dictionary[id(var1)] var2_label = variable_symbol_dictionary[id(var2)] # Don't forget that a quadratic objective is always # assumed to be divided by 2 if var1_label == var2_label: - output_file.write(column_template - % (var1_label, - var2_label, - _no_negative_zero(coef * 2))) + output_file.write( + column_template + % (var1_label, var2_label, _no_negative_zero(coef * 2)) + ) else: # the matrix needs to be symmetric so split # the coefficient (but remember it is divided by 2) - output_file.write(column_template - % (var1_label, - var2_label, - _no_negative_zero(coef))) - output_file.write(column_template - % (var2_label, - var1_label, - _no_negative_zero(coef))) + output_file.write( + column_template + % (var1_label, var2_label, _no_negative_zero(coef)) + ) + output_file.write( + column_template + % (var2_label, var1_label, _no_negative_zero(coef)) + ) # # QCMATRIX section @@ -746,32 +723,32 @@ def yield_all_constraints(): # sort by the sorted tuple of symbols (or # column assignments) for the variables # appearing in the term - quad_terms = sorted(quad_terms, - key=lambda _x: \ - sorted((variable_to_column[_x[0][0]], - variable_to_column[_x[0][1]]))) + quad_terms = sorted( + quad_terms, + key=lambda _x: sorted( + (variable_to_column[_x[0][0]], variable_to_column[_x[0][1]]) + ), + ) for term, coef in quad_terms: # sort the term for consistent output - var1, var2 = sorted(term, - key=lambda _x: variable_to_column[_x]) + var1, var2 = sorted(term, key=lambda _x: variable_to_column[_x]) var1_label = variable_symbol_dictionary[id(var1)] var2_label = variable_symbol_dictionary[id(var2)] if var1_label == var2_label: - output_file.write(column_template - % (var1_label, - var2_label, - _no_negative_zero(coef))) + output_file.write( + column_template + % (var1_label, var2_label, _no_negative_zero(coef)) + ) else: # the matrix needs to be symmetric so split # the coefficient - output_file.write(column_template - % (var1_label, - var2_label, - _no_negative_zero(coef * 0.5))) - output_file.write(column_template - % (var2_label, - var1_label, - coef * 0.5)) + output_file.write( + column_template + % (var1_label, var2_label, _no_negative_zero(coef * 0.5)) + ) + output_file.write( + column_template % (var2_label, var1_label, coef * 0.5) + ) output_file.write("ENDATA\n") @@ -779,8 +756,9 @@ def yield_all_constraints(): # in the active constraints **Note**: warm start method may # rely on this for choosing the set of potential warm start # variables - vars_to_delete = set(variable_symbol_map.byObject.keys()) - \ - set(self._referenced_variable_ids.keys()) + vars_to_delete = set(variable_symbol_map.byObject.keys()) - set( + self._referenced_variable_ids.keys() + ) sm_byObject = symbol_map.byObject sm_bySymbol = symbol_map.bySymbol var_sm_byObject = variable_symbol_map.byObject diff --git a/pyomo/repn/plugins/nl_writer.py b/pyomo/repn/plugins/nl_writer.py index 4c8707696cf..c7d10883461 100644 --- a/pyomo/repn/plugins/nl_writer.py +++ b/pyomo/repn/plugins/nl_writer.py @@ -9,35 +9,58 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -import enum import logging import os -import sys from collections import deque from operator import itemgetter, attrgetter, setitem from pyomo.common.backports import nullcontext from pyomo.common.config import ( - ConfigBlock, ConfigValue, InEnum, add_docstring_list, + ConfigBlock, + ConfigValue, + InEnum, + document_kwargs_from_configdict, ) +from pyomo.common.deprecation import deprecation_warning from pyomo.common.errors import DeveloperError from pyomo.common.gc_manager import PauseGC from pyomo.common.timing import TicTocTimer -from pyomo.core.expr.current import ( - NegationExpression, ProductExpression, DivisionExpression, - PowExpression, AbsExpression, UnaryFunctionExpression, - MonomialTermExpression, LinearExpression, SumExpression, - EqualityExpression, InequalityExpression, RangedExpression, - Expr_ifExpression, ExternalFunctionExpression, - native_types, native_numeric_types, value, +from pyomo.core.expr import ( + NegationExpression, + ProductExpression, + DivisionExpression, + PowExpression, + AbsExpression, + UnaryFunctionExpression, + MonomialTermExpression, + LinearExpression, + SumExpression, + EqualityExpression, + InequalityExpression, + RangedExpression, + Expr_ifExpression, + ExternalFunctionExpression, + native_types, + native_numeric_types, + value, ) -from pyomo.core.expr.visitor import StreamBasedExpressionVisitor +from pyomo.core.expr.visitor import StreamBasedExpressionVisitor, _EvaluationVisitor from pyomo.core.base import ( - Block, Objective, Constraint, Var, Param, Expression, ExternalFunction, - Suffix, SOSConstraint, SymbolMap, NameLabeler, SortComponents, minimize, + Block, + Objective, + Constraint, + Var, + Param, + Expression, + ExternalFunction, + Suffix, + SOSConstraint, + SymbolMap, + NameLabeler, + SortComponents, + minimize, ) -from pyomo.core.base.block import SortComponents from pyomo.core.base.component import ActiveComponent from pyomo.core.base.expression import ScalarExpression, _GeneralExpressionData from pyomo.core.base.objective import ScalarObjective, _GeneralObjectiveData @@ -45,29 +68,37 @@ from pyomo.core.pyomoobject import PyomoObject from pyomo.opt import WriterFactory +from pyomo.repn.util import ( + ExprType, + FileDeterminism, + FileDeterminism_to_SortComponents, + apply_node_operation, + categorize_valid_components, + complex_number_error, + initialize_var_map_from_column_order, + ordered_active_constraints, +) from pyomo.repn.plugins.ampl.ampl_ import set_pyomo_amplfunc_env -if sys.version_info[:2] >= (3,7): - _deterministic_dict = dict -else: - from pyomo.common.collections import OrderedDict - _deterministic_dict = OrderedDict - ### FIXME: Remove the following as soon as non-active components no ### longer report active==True from pyomo.core.base import Set, RangeSet from pyomo.network import Port + ### -logger=logging.getLogger(__name__) +logger = logging.getLogger(__name__) # Feasibility tolerance for trivial (fixed) constraints TOL = 1e-8 inf = float('inf') +minus_inf = -inf + + +_CONSTANT = ExprType.CONSTANT +_MONOMIAL = ExprType.MONOMIAL +_GENERAL = ExprType.GENERAL -class _CONSTANT(object): pass -class _MONOMIAL(object): pass -class _GENERAL(object): pass # TODO: make a proper base class class NLWriterInfo(object): @@ -99,7 +130,7 @@ class NLWriterInfo(object): The list of string names for the constraints / objectives written to the NL file in the same order as - :py:attr:`constraints` + :\p:attr:`objectives` and the generated + :py:attr:`constraints` + :py:attr:`objectives` and the generated .row file. column_labels: List[str] @@ -109,6 +140,7 @@ class NLWriterInfo(object): .col file. """ + def __init__(self, var, con, obj, extlib, row_lbl, col_lbl): self.variables = var self.constraints = con @@ -118,104 +150,78 @@ def __init__(self, var, con, obj, extlib, row_lbl, col_lbl): self.column_labels = col_lbl -class FileDeterminism(enum.IntEnum): - NONE = 0 - ORDERED = 1 - SORT_INDICES = 2 - SORT_SYMBOLS = 3 - - -def _activate_nl_writer_version(n): - """DEBUGGING TOOL to switch the "default" NL writer""" - doc = WriterFactory.doc('nl') - WriterFactory.unregister('nl') - WriterFactory.register('nl', doc)(WriterFactory.get_class(f'nl_v{n}')) - -def categorize_valid_components( - model, active=True, sort=None, valid=set(), targets=set()): - assert active in (True, None) - unrecognized = {} - component_map = {k: [] for k in targets} - for block in model.block_data_objects(active=active, - descend_into=True, - sort=sort): - local_ctypes = block.collect_ctypes(active=None, descend_into=False) - for ctype in local_ctypes: - if ctype in kernel.base._kernel_ctype_backmap: - ctype = kernel.base._kernel_ctype_backmap[ctype] - if ctype in targets: - component_map[ctype].append(block) - continue - if ctype in valid: - continue - # TODO: we should rethink the definition of "active" for - # Components that are not subclasses of ActiveComponent - if not issubclass(ctype, ActiveComponent) and \ - not issubclass(ctype, kernel.base.ICategorizedObject): - continue - if ctype not in unrecognized: - unrecognized[ctype] = [] - unrecognized[ctype].extend( - block.component_data_objects( - ctype=ctype, - active=active, - descend_into=False, - sort=SortComponents.unsorted)) - return component_map, {k:v for k,v in unrecognized.items() if v} - -@WriterFactory.register( - 'nl_v2', 'Generate the corresponding AMPL NL file (version 2).') +@WriterFactory.register('nl_v2', 'Generate the corresponding AMPL NL file (version 2).') class NLWriter(object): CONFIG = ConfigBlock('nlwriter') - CONFIG.declare('show_section_timing', ConfigValue( - default=False, - domain=bool, - description='Print timing after writing each section of the NL file', - )) - CONFIG.declare('skip_trivial_constraints', ConfigValue( - default=False, - domain=bool, - description='Skip writing constraints whose body is constant' - )) - CONFIG.declare('file_determinism', ConfigValue( - default=FileDeterminism.ORDERED, - domain=InEnum(FileDeterminism), - description='How much effort to ensure file is deterministic', - doc=""" + CONFIG.declare( + 'show_section_timing', + ConfigValue( + default=False, + domain=bool, + description='Print timing after writing each section of the NL file', + ), + ) + CONFIG.declare( + 'skip_trivial_constraints', + ConfigValue( + default=False, + domain=bool, + description='Skip writing constraints whose body is constant', + ), + ) + CONFIG.declare( + 'file_determinism', + ConfigValue( + default=FileDeterminism.ORDERED, + domain=InEnum(FileDeterminism), + description='How much effort to ensure file is deterministic', + doc=""" How much effort do we want to put into ensuring the NL file is written deterministically for a Pyomo model: NONE (0) : None - ORDERED (1): rely on underlying component ordering (default) - SORT_INDICES (2) : sort keys of indexed components - SORT_SYMBOLS (3) : sort keys AND sort names (over declaration order) - """ - )) - CONFIG.declare('symbolic_solver_labels', ConfigValue( - default=False, - domain=bool, - description='Write the corresponding .row and .col files', - )) - CONFIG.declare('export_nonlinear_variables', ConfigValue( - default=None, - domain=list, - description='Extra variables to include in NL file', - doc=""" + ORDERED (10): rely on underlying component ordering (default) + SORT_INDICES (20) : sort keys of indexed components + SORT_SYMBOLS (30) : sort keys AND sort names (not declaration order) + """, + ), + ) + CONFIG.declare( + 'symbolic_solver_labels', + ConfigValue( + default=False, + domain=bool, + description='Write the corresponding .row and .col files', + ), + ) + CONFIG.declare( + 'export_nonlinear_variables', + ConfigValue( + default=None, + domain=list, + description='Extra variables to include in NL file', + doc=""" List of variables to ensure are in the NL file (even if they - don't appear in any constraints).""" - )) - CONFIG.declare('row_order', ConfigValue( - default=None, - description='Preferred constraint ordering', - doc=""" + don't appear in any constraints).""", + ), + ) + CONFIG.declare( + 'row_order', + ConfigValue( + default=None, + description='Preferred constraint ordering', + doc=""" List of constraints in the order that they should appear in the NL file. Note that this is only a suggestion, as the NL writer will move all nonlinear constraints before linear ones - (preserving row_order within each group).""" - )) - CONFIG.declare('column_order', ConfigValue( - default=None, - description='Preferred variable ordering', - doc=""" + (preserving row_order within each group).""", + ), + ) + CONFIG.declare( + 'column_order', + ConfigValue( + default=None, + description='Preferred variable ordering', + doc=""" List of variables in the order that they should appear in the NL file. Note that this is only a suggestion, as the NL writer will move all nonlinear variables before linear ones, and within @@ -224,8 +230,20 @@ class NLWriter(object): which appear before variables appearing only in objectives. Within each group, continuous variables appear before discrete variables. In all cases, column_order is preserved within each - group.""" - )) + group.""", + ), + ) + CONFIG.declare( + 'export_defined_variables', + ConfigValue( + default=True, + domain=bool, + description='Preferred variable ordering', + doc=""" + If True, export Expression objects to the NL file as 'defined + variables'.""", + ), + ) def __init__(self): self.config = self.CONFIG() @@ -242,11 +260,10 @@ def __call__(self, model, filename, solver_capability, io_options): _open = lambda fname: open(fname, 'w') else: _open = nullcontext - with open(filename, 'w') as FILE, \ - _open(row_fname) as ROWFILE, \ - _open(col_fname) as COLFILE: - info = self.write( - model, FILE, ROWFILE, COLFILE, config=config) + with open(filename, 'w', newline='') as FILE, _open( + row_fname + ) as ROWFILE, _open(col_fname) as COLFILE: + info = self.write(model, FILE, ROWFILE, COLFILE, config=config) # Historically, the NL writer communicated the external function # libraries back to the ASL interface through the PYOMO_AMPLFUNC # environment variable. @@ -257,6 +274,7 @@ def __call__(self, model, filename, solver_capability, io_options): # was generated and the symbol_map return filename, symbol_map + @document_kwargs_from_configdict(CONFIG) def write(self, model, ostream, rowstream=None, colstream=None, **options): """Write a model in NL format. @@ -291,8 +309,6 @@ def write(self, model, ostream, rowstream=None, colstream=None, **options): with _NLWriter_impl(ostream, rowstream, colstream, config) as impl: return impl.write(model) - write.__doc__ = add_docstring_list(write.__doc__, CONFIG) - def _generate_symbol_map(self, info): # Now that the row/column ordering is resolved, create the labels symbol_map = SymbolMap() @@ -307,18 +323,19 @@ def _generate_symbol_map(self, info): ) return symbol_map + def _RANGE_TYPE(lb, ub): if lb == ub: if lb is None: - return 3 # -inf <= c <= inf + return 3 # -inf <= c <= inf else: - return 4 # L == c == U + return 4 # L == c == U elif lb is None: - return 1 # c <= U + return 1 # c <= U elif ub is None: - return 2 # L <= c + return 2 # L <= c else: - return 0 # L <= c <= U + return 0 # L <= c <= U class _SuffixData(object): @@ -335,11 +352,57 @@ def __init__(self, name, column_order, row_order, obj_order, model_id): self.datatype = set() def update(self, suffix): + missing_component = missing_other = 0 self.datatype.add(suffix.datatype) for item in suffix.items(): - self.store(*item) + missing = self._store(*item) + if missing: + if missing > 0: + missing_component += missing + else: + missing_other -= missing + if missing_component: + logger.warning( + f"model contains export suffix '{suffix.name}' that " + f"contains {missing_component} component keys that are " + "not exported as part of the NL file. " + "Skipping." + ) + if missing_other: + logger.warning( + f"model contains export suffix '{suffix.name}' that " + f"contains {missing_other} keys that are not " + "Var, Constraint, Objective, or the model. Skipping." + ) def store(self, obj, val): + missing = self._store(obj, val) + if not missing: + return + if missing == 1: + logger.warning( + f"model contains export suffix '{self._name}' with " + f"{obj.ctype.__name__} key '{obj.name}', but that " + "object is not exported as part of the NL file. " + "Skipping." + ) + elif missing > 1: + logger.warning( + f"model contains export suffix '{self._name}' with " + f"{obj.ctype.__name__} key '{obj.name}', but that " + "object contained {missing} data objects that are " + "not exported as part of the NL file. " + "Skipping." + ) + else: + logger.warning( + f"model contains export suffix '{self._name}' with " + f"{obj.__class__.__name__} key '{obj}' that is not " + "a Var, Constraint, Objective, or the model. Skipping." + ) + + def _store(self, obj, val): + missing_ct = 0 _id = id(obj) if _id in self._column_order: self.var[self._column_order[_id]] = val @@ -352,22 +415,15 @@ def store(self, obj, val): elif isinstance(obj, PyomoObject): if obj.is_indexed(): for o in obj.values(): - self.store(o, val) + missing_ct += self._store(o, val) else: - logger.warning( - f"model contained export suffix {self._name} with " - f"{obj.ctype.__name__} key '{obj.name}', but that " - "object is not exported as part of the NL file. " - "Skipping.") + missing_ct = 1 else: - logger.warning( - f"model contained export suffix {self._name} with " - f"{obj.__class__.__name__} key '{obj}' that is not " - "a Var, Constrtaint, Objective, or the model. Skipping.") + missing_ct = -1 + return missing_ct class _NLWriter_impl(object): - def __init__(self, ostream, rowstream, colstream, config): self.ostream = ostream self.rowstream = rowstream @@ -382,7 +438,7 @@ def __init__(self, ostream, rowstream, colstream, config): self.subexpression_order = [] self.external_functions = {} self.used_named_expressions = set() - self.var_map = _deterministic_dict() + self.var_map = {} self.visitor = AMPLRepnVisitor( self.template, self.subexpression_cache, @@ -390,7 +446,8 @@ def __init__(self, ostream, rowstream, colstream, config): self.external_functions, self.var_map, self.used_named_expressions, - config.symbolic_solver_labels, + self.symbolic_solver_labels, + self.config.export_defined_variables, ) self.next_V_line_id = 0 self.pause_gc = None @@ -408,97 +465,75 @@ def __exit__(self, exc_type, exc_value, tb): AMPLRepn.ActiveVisitor = None def write(self, model): - timer = TicTocTimer( - logger=logging.getLogger('pyomo.common.timing.writer') + timing_logger = logging.getLogger('pyomo.common.timing.writer') + timer = TicTocTimer(logger=timing_logger) + with_debug_timing = ( + timing_logger.isEnabledFor(logging.DEBUG) and timing_logger.hasHandlers() ) - sorter = SortComponents.unsorted - if self.config.file_determinism >= FileDeterminism.SORT_INDICES: - sorter = sorter | SortComponents.indices - if self.config.file_determinism >= FileDeterminism.SORT_SYMBOLS: - sorter = sorter | SortComponents.alphabetical - + sorter = FileDeterminism_to_SortComponents(self.config.file_determinism) component_map, unknown = categorize_valid_components( model, active=True, sort=sorter, valid={ - Block, Objective, Constraint, Var, Param, Expression, - ExternalFunction, Suffix, SOSConstraint, + Block, + Objective, + Constraint, + Var, + Param, + Expression, # FIXME: Non-active components should not report as Active - Set, RangeSet, Port, + ExternalFunction, + Set, + RangeSet, + Port, # TODO: Piecewise, Complementarity }, - targets={ - Objective, Constraint, Suffix, SOSConstraint, - } + targets={Suffix, SOSConstraint}, ) if unknown: raise ValueError( "The model ('%s') contains the following active components " - "that the NL writer does not know how to process:\n\t%s" % - (model.name, "\n\t".join("%s:\n\t\t%s" % ( - k, "\n\t\t".join(map(attrgetter('name'), v))) - for k, v in unknown.items()))) + "that the NL writer does not know how to process:\n\t%s" + % ( + model.name, + "\n\t".join( + "%s:\n\t\t%s" % (k, "\n\t\t".join(map(attrgetter('name'), v))) + for k, v in unknown.items() + ), + ) + ) # Caching some frequently-used objects into the locals() symbolic_solver_labels = self.symbolic_solver_labels visitor = self.visitor ostream = self.ostream - var_map = self.var_map - if self.config.column_order == True: - self.config.column_order = list(model.component_data_objects( - Var, descend_into=True, sort=sorter)) - elif self.config.file_determinism > FileDeterminism.ORDERED: - # We will pre-gather the variables so that their order - # matches the file_determinism flag. This is a little - # cumbersome, but is implemented this way for consistency - # with the original NL writer. - if self.config.column_order is None: - self.config.column_order = [] - self.config.column_order.extend(model.component_data_objects( - Var, descend_into=True, sort=sorter)) - if self.config.column_order is not None: - # Note that Vars that appear twice (e.g., through a - # Reference) will be sorted with the FIRST occurrence. - for var in self.config.column_order: - if var.is_indexed(): - for _v in var.values(): - if not _v.fixed: - var_map[id(_v)] = _v - elif not var.fixed: - var_map[id(var)] = var + var_map = self.var_map + initialize_var_map_from_column_order(model, self.config, var_map) + timer.toc('Initialized column order', level=logging.DEBUG) # # Tabulate the model expressions # objectives = [] linear_objs = [] - for block in component_map[Objective]: - for obj_comp in block.component_objects( - Objective, active=True, descend_into=False, sort=sorter): - try: - obj_vals = obj_comp.values() - except AttributeError: - # kernel does not define values() for scalar - # objectives or list/tuple containers - try: - # This could be a list/tuple container. Try to - # iterate over it, and if that fails assume it - # is a scalar - obj_vals = iter(obj_comp) - except: - obj_vals = (obj_comp,) - for obj in obj_vals: - if not obj.active: - continue - expr = visitor.walk_expression((obj.expr, obj, 1)) - if expr.nonlinear: - objectives.append((obj, expr)) - else: - linear_objs.append((obj, expr)) - timer.toc('Objective %s', obj_comp, level=logging.DEBUG) + last_parent = None + for obj in model.component_data_objects(Objective, active=True, sort=sorter): + if with_debug_timing and obj.parent_component() is not last_parent: + timer.toc('Objective %s', last_parent, level=logging.DEBUG) + last_parent = obj.parent_component() + expr = visitor.walk_expression((obj.expr, obj, 1)) + if expr.named_exprs: + self._record_named_expression_usage(expr.named_exprs, obj, 1) + if expr.nonlinear: + objectives.append((obj, expr)) + else: + linear_objs.append((obj, expr)) + if with_debug_timing: + # report the last objective + timer.toc('Objective %s', last_parent, level=logging.DEBUG) # Order the objectives, moving all nonlinear objectives to # the beginning @@ -510,84 +545,79 @@ def write(self, model): linear_cons = [] n_ranges = 0 n_equality = 0 - for block in component_map[Constraint]: - for con_comp in block.component_objects( - Constraint, active=True, descend_into=False, sort=sorter): - try: - con_vals = con_comp.values() - except AttributeError: - # kernel does not define values() for scalar - # constraints or list/tuple containers - try: - # This could be a list/tuple container. Try to - # iterate over it, and if that fails assume it - # is a scalar - con_vals = iter(con_comp) - except: - con_vals = (con_comp,) - for con in con_vals: - if not con.active: - continue - expr = visitor.walk_expression((con.body, con, 0)) - lb = con.lb - if lb is not None: - lb = repr(lb - expr.const) - ub = con.ub - if ub is not None: - ub = repr(ub - expr.const) - _type = _RANGE_TYPE(lb, ub) - if _type == 4: - n_equality += 1 - elif _type == 0: - n_ranges += 1 - elif _type == 3: #and self.config.skip_trivial_constraints: - # FIXME: historically the NL writer was - # hard-coded to skip all unbounded constraints - continue - if expr.nonlinear: - constraints.append((con, expr, _type, lb, ub)) - elif expr.linear: - linear_cons.append((con, expr, _type, lb, ub)) - elif not self.config.skip_trivial_constraints: - linear_cons.append((con, expr, _type, lb, ub)) - else: # constant constraint and skip_trivial_constraints - # - # TODO: skip_trivial_constraints should be an - # enum that also accepts "Exception" so that - # solvers can be (easily) notified of infeasible - # trivial constraints. - if (lb is not None and float(lb) > TOL) or ( - ub is not None and float(ub) < -TOL): - logger.warning( - "model contains a trivially infeasible " - f"constraint {con.name}, but " - "skip_trivial_constraints==True and the " - "constraint is being omitted from the NL " - "file. Solving the model may incorrectly " - "report a feasible solution.") - timer.toc('Constraint %s', con_comp, level=logging.DEBUG) - - if self.config.row_order: - # Note: this relies on two things: 1) dict are ordered, and - # 2) updating an entry in a dict does not change its - # ordering. - row_order = {} - for con in self.config.row_order: - if con.is_indexed(): - for c in con.values(): - row_order[id(c)] = c + n_complementarity_nonlin = 0 + n_complementarity_lin = 0 + # TODO: update the writer to tabulate and report the range and + # nzlb values. Low priority, as they do not appear to be + # required for solvers like PATH. + n_complementarity_range = 0 + n_complementarity_nz_var_lb = 0 + for con in ordered_active_constraints(model, self.config): + if with_debug_timing and con.parent_component() is not last_parent: + timer.toc('Constraint %s', last_parent, level=logging.DEBUG) + last_parent = con.parent_component() + expr = visitor.walk_expression((con.body, con, 0)) + if expr.named_exprs: + self._record_named_expression_usage(expr.named_exprs, con, 0) + lb = con.lb + if lb == minus_inf: + lb = None + elif lb is not None: + lb = repr(lb - expr.const) + ub = con.ub + if ub == inf: + ub = None + elif ub is not None: + ub = repr(ub - expr.const) + _type = _RANGE_TYPE(lb, ub) + if _type == 4: + n_equality += 1 + elif _type == 0: + n_ranges += 1 + elif _type == 3: # and self.config.skip_trivial_constraints: + continue + pass + # FIXME: this is a HACK to be compatible with the NLv1 + # writer. In the future, this writer should be expanded to + # look for and process Complementarity components (assuming + # that they are in an acceptable form). + if hasattr(con, '_complementarity'): + _type = 5 + # we are going to pass the complementarity type and the + # corresponding variable id() as the "lb" and "ub" for + # the range. + lb = con._complementarity + ub = con._vid + if expr.nonlinear: + n_complementarity_nonlin += 1 else: - row_order[id(con)] = con - for c in constraints: - row_order[id(c)] = c - for c in linear_cons: - row_order[id(c)] = c - # map the implicit dict ordering to an explicit 0..n ordering - row_order = {_id: i for i, _id in enumerate(row_order.keys())} - constraints.sort(key=itemgetter(row_order)) - linear_cons.sort(key=itemgetter(row_order)) - else: - row_order = {} + n_complementarity_lin += 1 + if expr.nonlinear: + constraints.append((con, expr, _type, lb, ub)) + elif expr.linear: + linear_cons.append((con, expr, _type, lb, ub)) + elif not self.config.skip_trivial_constraints: + linear_cons.append((con, expr, _type, lb, ub)) + else: # constant constraint and skip_trivial_constraints + # + # TODO: skip_trivial_constraints should be an + # enum that also accepts "Exception" so that + # solvers can be (easily) notified of infeasible + # trivial constraints. + if (lb is not None and float(lb) > TOL) or ( + ub is not None and float(ub) < -TOL + ): + logger.warning( + "model contains a trivially infeasible " + f"constraint {con.name}, but " + "skip_trivial_constraints==True and the " + "constraint is being omitted from the NL " + "file. Solving the model may incorrectly " + "report a feasible solution." + ) + if with_debug_timing: + # report the last constraint + timer.toc('Constraint %s', last_parent, level=logging.DEBUG) # Order the constraints, moving all nonlinear constraints to # the beginning @@ -595,6 +625,9 @@ def write(self, model): constraints.extend(linear_cons) n_cons = len(constraints) + # initialize an empty row order, to be populated later if we need it + row_order = {} + # # Collect constraints and objectives into the groupings # necessary for AMPL @@ -603,6 +636,11 @@ def write(self, model): # var objects themselves) # + # Filter out any unused named expressions + self.subexpression_order = list( + filter(self.used_named_expressions.__contains__, self.subexpression_order) + ) + # linear contribution by (constraint, objective) component. # Keys are component id(), Values are dicts mapping variable # id() to linear coefficient. All nonzeros in the component @@ -614,16 +652,16 @@ def write(self, model): # we know their linear / nonlinear vars when we encounter them # in constraints / objectives self._categorize_vars( - map(self.subexpression_cache.__getitem__, - filter(self.used_named_expressions.__contains__, - self.subexpression_order)), - linear_by_comp + map(self.subexpression_cache.__getitem__, self.subexpression_order), + linear_by_comp, + ) + n_subexpressions = self._count_subexpression_occurrences() + obj_vars_linear, obj_vars_nonlinear, obj_nnz_by_var = self._categorize_vars( + objectives, linear_by_comp + ) + con_vars_linear, con_vars_nonlinear, con_nnz_by_var = self._categorize_vars( + constraints, linear_by_comp ) - n_subexpressions = self._count_subexpression_occurances() - obj_vars_linear, obj_vars_nonlinear, obj_nnz_by_var \ - = self._categorize_vars(objectives, linear_by_comp) - con_vars_linear, con_vars_nonlinear, con_nnz_by_var \ - = self._categorize_vars(constraints, linear_by_comp) if self.config.export_nonlinear_variables: for v in self.config.export_nonlinear_variables: @@ -638,18 +676,17 @@ def write(self, model): con_vars_nonlinear.add(_id) con_nnz = sum(con_nnz_by_var.values()) - timer.toc('Categorized model variables: %s nnz', con_nnz, - level=logging.DEBUG) + timer.toc('Categorized model variables: %s nnz', con_nnz, level=logging.DEBUG) - n_lcons = 0 # We do not yet support logical constraints + n_lcons = 0 # We do not yet support logical constraints # We need to check the SOS constraints before finalizing the # variable order because the SOS constraint *could* reference a # variable not yet seen in the model. for block in component_map[SOSConstraint]: - for sos in block.component_objects( - SOSConstraint, active=True, descend_into=False, - sort=sorter): + for sos in block.component_data_objects( + SOSConstraint, active=True, descend_into=False, sort=sorter + ): for v in sos.variables: if id(v) not in var_map: _id = id(v) @@ -682,65 +719,82 @@ def write(self, model): else: raise ValueError( f"Variable '{v.name}' has a domain that is not Real, " - f"Integer, or Binary: Cannot write a legal NL file.") + f"Integer, or Binary: Cannot write a legal NL file." + ) discrete_vars = binary_vars | integer_vars nonlinear_vars = con_vars_nonlinear | obj_vars_nonlinear linear_only_vars = (con_vars_linear | obj_vars_linear) - nonlinear_vars - self.column_order = column_order = { - _id: i for i, _id in enumerate(var_map) - } + self.column_order = column_order = {_id: i for i, _id in enumerate(var_map)} variables = [] # both_vars_nonlinear = con_vars_nonlinear & obj_vars_nonlinear if both_vars_nonlinear: - variables.extend(sorted( - both_vars_nonlinear & continuous_vars, - key=column_order.__getitem__)) - variables.extend(sorted( - both_vars_nonlinear & discrete_vars, - key=column_order.__getitem__)) + variables.extend( + sorted( + both_vars_nonlinear & continuous_vars, key=column_order.__getitem__ + ) + ) + variables.extend( + sorted( + both_vars_nonlinear & discrete_vars, key=column_order.__getitem__ + ) + ) # con_only_nonlinear_vars = con_vars_nonlinear - both_vars_nonlinear if con_only_nonlinear_vars: - variables.extend(sorted( - con_only_nonlinear_vars & continuous_vars, - key=column_order.__getitem__)) - variables.extend(sorted( - con_only_nonlinear_vars & discrete_vars, - key=column_order.__getitem__)) + variables.extend( + sorted( + con_only_nonlinear_vars & continuous_vars, + key=column_order.__getitem__, + ) + ) + variables.extend( + sorted( + con_only_nonlinear_vars & discrete_vars, + key=column_order.__getitem__, + ) + ) # obj_only_nonlinear_vars = obj_vars_nonlinear - both_vars_nonlinear if obj_vars_nonlinear: - variables.extend(sorted( - obj_only_nonlinear_vars & continuous_vars, - key=column_order.__getitem__)) - variables.extend(sorted( - obj_only_nonlinear_vars & discrete_vars, - key=column_order.__getitem__)) + variables.extend( + sorted( + obj_only_nonlinear_vars & continuous_vars, + key=column_order.__getitem__, + ) + ) + variables.extend( + sorted( + obj_only_nonlinear_vars & discrete_vars, + key=column_order.__getitem__, + ) + ) # if linear_only_vars: - variables.extend(sorted( - linear_only_vars - discrete_vars, - key=column_order.__getitem__)) + variables.extend( + sorted(linear_only_vars - discrete_vars, key=column_order.__getitem__) + ) linear_binary_vars = linear_only_vars & binary_vars - variables.extend(sorted( - linear_binary_vars, - key=column_order.__getitem__)) + variables.extend(sorted(linear_binary_vars, key=column_order.__getitem__)) linear_integer_vars = linear_only_vars & integer_vars - variables.extend(sorted( - linear_integer_vars, - key=column_order.__getitem__)) + variables.extend(sorted(linear_integer_vars, key=column_order.__getitem__)) else: linear_binary_vars = linear_integer_vars = set() assert len(variables) == n_vars timer.toc( 'Set row / column ordering: %s variables [%s, %s, %s R/B/Z], ' '%s constraints [%s, %s L/NL]', - n_vars, len(continuous_vars), len(binary_vars), len(integer_vars), - len(constraints), n_cons-n_nonlinear_cons, n_nonlinear_cons, - level=logging.DEBUG) + n_vars, + len(continuous_vars), + len(binary_vars), + len(integer_vars), + len(constraints), + n_cons - n_nonlinear_cons, + n_nonlinear_cons, + level=logging.DEBUG, + ) # Fill in the variable list and update the new column order. # @@ -749,15 +803,17 @@ def write(self, model): # originally generated from var_map), we will rebuild the # column_order to *just* contain the variables that we are # sending to the NL. - self.column_order = column_order = { - _id: i for i, _id in enumerate(variables) - } + self.column_order = column_order = {_id: i for i, _id in enumerate(variables)} for idx, _id in enumerate(variables): v = var_map[_id] lb, ub = v.bounds - if lb is not None: + if lb == minus_inf: + lb = None + elif lb is not None: lb = repr(lb) - if ub is not None: + if ub == inf: + ub = None + elif ub is not None: ub = repr(ub) variables[idx] = (v, _id, _RANGE_TYPE(lb, ub), lb, ub) timer.toc("Computed variable bounds", level=logging.DEBUG) @@ -773,13 +829,15 @@ def write(self, model): # components override lower level ones. for block in reversed(component_map[Suffix]): for suffix in block.component_objects( - Suffix, active=True, descend_into=False, sort=sorter): + Suffix, active=True, descend_into=False, sort=sorter + ): if not (suffix.direction & Suffix.EXPORT): continue name = suffix.local_name if name not in suffix_data: suffix_data[name] = _SuffixData( - name, column_order, row_order, obj_order, model_id) + name, column_order, row_order, obj_order, model_id + ) suffix_data[name].update(suffix) timer.toc("Collected suffixes", level=logging.DEBUG) @@ -808,17 +866,19 @@ def write(self, model): f"manually declared '{name}' suffixes as well " "as SOSConstraint components to exist on a single " "model. To avoid this error please use only one of " - "these methods to define special ordered sets.") + "these methods to define special ordered sets." + ) suffix_data[name] = _SuffixData( - name, column_order, row_order, obj_order, model_id) + name, column_order, row_order, obj_order, model_id + ) suffix_data[name].datatype.add(Suffix.INT) sos_id = 0 sosno = suffix_data['sosno'] ref = suffix_data['ref'] for block in reversed(component_map[SOSConstraint]): for sos in block.component_data_objects( - SOSConstraint, active=True, descend_into=False, - sort=sorter): + SOSConstraint, active=True, descend_into=False, sort=sorter + ): sos_id += 1 if sos.level == 1: tag = sos_id @@ -826,9 +886,10 @@ def write(self, model): tag = -sos_id else: raise ValueError( - f"SOSContraint '{sos.name}' has sos " + f"SOSConstraint '{sos.name}' has sos " f"type='{sos.level}', which is not supported " - "by the NL file interface") + "by the NL file interface" + ) try: _items = sos.get_items() except AttributeError: @@ -840,8 +901,9 @@ def write(self, model): if symbolic_solver_labels: labeler = NameLabeler() - row_labels = [labeler(info[0]) for info in constraints] \ - + [labeler(info[0]) for info in objectives] + row_labels = [labeler(info[0]) for info in constraints] + [ + labeler(info[0]) for info in objectives + ] row_comments = [f'\t#{lbl}' for lbl in row_labels] col_labels = [labeler(info[0]) for info in variables] col_comments = [f'\t#{lbl}' for lbl in col_labels] @@ -869,32 +931,82 @@ def write(self, model): # # LINE 1 # - ostream.write("g3 1 1 0\t# problem %s\n" % (model.name,)) + if visitor.encountered_string_arguments and 'b' not in getattr( + ostream, 'mode', '' + ): + # Not all streams support tell() + try: + _written_bytes = ostream.tell() + except IOError: + _written_bytes = None + + line_1_txt = f"g3 1 1 0\t# problem {model.name}\n" + ostream.write(line_1_txt) + + # If there were any string arguments, then we need to ensure + # that ostream is not converting newlines to something other + # than '\n'. Binary files do not perform newline mapping (of + # course, we will also need to map all the str to bytes for + # binary-mode I/O). + if visitor.encountered_string_arguments and 'b' not in getattr( + ostream, 'mode', '' + ): + if _written_bytes is None: + _written_bytes = 0 + else: + _written_bytes = ostream.tell() - _written_bytes + if not _written_bytes: + if os.linesep != '\n': + logger.warning( + "Writing NL file containing string arguments to a " + "text output stream that does not support tell() on " + "a platform with default line endings other than " + "'\\n'. Current versions of the ASL " + "(through at least 20190605) require UNIX-style " + "newlines as terminators for string arguments: " + "it is possible that the ASL may refuse to read " + "the NL file." + ) + else: + if ostream.encoding: + line_1_txt = line_1_txt.encode(ostream.encoding) + if len(line_1_txt) != _written_bytes: + logger.error( + "Writing NL file containing string arguments to a " + "text output stream with line endings other than '\\n' " + "Current versions of the ASL " + "(through at least 20190605) require UNIX-style " + "newlines as terminators for string arguments." + ) + # # LINE 2 # ostream.write( " %d %d %d %d %d \t" "# vars, constraints, objectives, ranges, eqns\n" - % ( n_vars, - n_cons, - n_objs, - n_ranges, - n_equality, - )) + % (n_vars, n_cons, n_objs, n_ranges, n_equality) + ) # # LINE 3 # ostream.write( " %d %d %d %d %d %d\t" "# nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb\n" - % ( n_nonlinear_cons, + % ( + n_nonlinear_cons, n_nonlinear_objs, - 0, # ccons_lin, - 0, # ccons_nonlin, - 0, # ccons_nd, - 0, # ccons_nzlb, - )) + # num linear complementarity constraints + n_complementarity_lin, + # num nonlinear complementarity constraints + n_complementarity_nonlin, + # num complementarities involving double inequalities + n_complementarity_range, + # num complemented variables with either a nonzero lower + # bound or any upper bound (excluding ranges) + n_complementarity_nz_var_lb, + ) + ) # # LINE 4 # @@ -919,7 +1031,8 @@ def write(self, model): ostream.write( " %d %d %d \t" "# nonlinear vars in constraints, objectives, both\n" - % (_n_con_vars, _n_obj_vars, _n_both_vars)) + % (_n_con_vars, _n_obj_vars, _n_both_vars) + ) # # LINE 6 @@ -927,42 +1040,46 @@ def write(self, model): ostream.write( " 0 %d 0 1\t" "# linear network variables; functions; arith, flags\n" - % ( len(self.external_functions), - )) + % (len(self.external_functions),) + ) # # LINE 7 # ostream.write( " %d %d %d %d %d \t" "# discrete variables: binary, integer, nonlinear (b,c,o)\n" - % ( len(linear_binary_vars), + % ( + len(linear_binary_vars), len(linear_integer_vars), len(both_vars_nonlinear.intersection(discrete_vars)), len(con_vars_nonlinear.intersection(discrete_vars)), len(obj_vars_nonlinear.intersection(discrete_vars)), - )) + ) + ) # # LINE 8 # # objective info computed above ostream.write( " %d %d \t# nonzeros in Jacobian, obj. gradient\n" - % ( sum(con_nnz_by_var.values()), - sum(obj_nnz_by_var.values()), - )) + % (sum(con_nnz_by_var.values()), sum(obj_nnz_by_var.values())) + ) # # LINE 9 # ostream.write( " %d %d\t# max name lengths: constraints, variables\n" - % ( max(map(len, row_labels), default=0), + % ( + max(map(len, row_labels), default=0), max(map(len, col_labels), default=0), - )) + ) + ) # # LINE 10 # - ostream.write(" %d %d %d %d %d\t# common exprs: b,c,o,c1,o1\n" - % tuple(n_subexpressions)) + ostream.write( + " %d %d %d %d %d\t# common exprs: b,c,o,c1,o1\n" % tuple(n_subexpressions) + ) # # "F" lines (external function definitions) @@ -982,7 +1099,8 @@ def write(self, model): raise ValueError( "The NL file writer found multiple active export " "suffix components with name '{name}' and different " - "datatypes. A single datatype must be declared.") + "datatypes. A single datatype must be declared." + ) _type = next(iter(data.datatype)) if _type == Suffix.FLOAT: _float = 4 @@ -992,15 +1110,17 @@ def write(self, model): raise ValueError( "The NL file writer only supports export suffixes " "declared with a numeric datatype. Suffix " - f"component '{name}' declares type '{_type}'") + f"component '{name}' declares type '{_type}'" + ) for _field, _vals in zip( - range(4), - (data.var, data.con, data.obj, data.prob)): + range(4), (data.var, data.con, data.obj, data.prob) + ): if not _vals: continue ostream.write(f"S{_field|_float} {len(_vals)} {name}\n") - ostream.write(''.join(f"{_id} {_vals[_id]!r}\n" - for _id in sorted(_vals))) + ostream.write( + ''.join(f"{_id} {_vals[_id]!r}\n" for _id in sorted(_vals)) + ) # # "V" lines (common subexpressions) @@ -1012,17 +1132,22 @@ def write(self, model): single_use_subexpressions = {} self.next_V_line_id = n_vars for _id in self.subexpression_order: - if _id not in self.used_named_expressions: - continue - cache_info = self.subexpression_cache[_id][2] - if cache_info[2]: + _con_id, _obj_id, _sub = self.subexpression_cache[_id][2] + if _sub: # substitute expression directly into expression trees # and do NOT emit the V line continue - elif 0 in cache_info[:2] or None not in cache_info[:2]: + target_expr = 0 + if _obj_id is None: + target_expr = _con_id + elif _con_id is None: + target_expr = _obj_id + if target_expr == 0: + # Note: checking target_expr == 0 is equivalent to + # testing "(_con_id is not None and _obj_id is not None) + # or _con_id == 0 or _obj_id == 0" self._write_v_line(_id, 0) else: - target_expr = tuple(filter(None, cache_info))[0] if target_expr not in single_use_subexpressions: single_use_subexpressions[target_expr] = [] single_use_subexpressions[target_expr].append(_id) @@ -1038,8 +1163,11 @@ def write(self, model): # are the constant 0). _expr = self.template.const % 0 ostream.write( - _expr.join(f'C{i}{row_comments[i]}\n' - for i in range(row_idx, len(constraints)))) + _expr.join( + f'C{i}{row_comments[i]}\n' + for i in range(row_idx, len(constraints)) + ) + ) # We know that there is at least one linear expression # (row_idx), so we can unconditionally emit the last "0 # expression": @@ -1079,8 +1207,9 @@ def write(self, model): logger.warning("ignoring 'dual' suffix for Model") if _data.con: ostream.write(f"d{len(_data.con)}\n") - ostream.write(''.join(f"{_id} {_data.con[_id]!r}\n" - for _id in sorted(_data.con))) + ostream.write( + ''.join(f"{_id} {_data.con[_id]!r}\n" for _id in sorted(_data.con)) + ) # # "x" lines (variable initialization) @@ -1090,62 +1219,79 @@ def write(self, model): for var_idx, info in enumerate(variables) if info[0].value is not None ] - ostream.write('x%d%s\n' % ( - len(_init_lines), - "\t# initial guess" if symbolic_solver_labels else '', - )) + ostream.write( + 'x%d%s\n' + % (len(_init_lines), "\t# initial guess" if symbolic_solver_labels else '') + ) ostream.write(''.join(_init_lines)) # # "r" lines (constraint bounds) # - ostream.write('r%s\n' % ( - "\t#%d ranges (rhs's)" % len(constraints) - if symbolic_solver_labels else '', - )) + ostream.write( + 'r%s\n' + % ( + "\t#%d ranges (rhs's)" % len(constraints) + if symbolic_solver_labels + else '', + ) + ) for row_idx, info in enumerate(constraints): i = info[2] - if i == 4: # == + if i == 4: # == ostream.write(f"4 {info[3]}{row_comments[row_idx]}\n") - elif i == 1: # body <= ub + elif i == 1: # body <= ub ostream.write(f"1 {info[4]}{row_comments[row_idx]}\n") - elif i == 2: # lb <= body + elif i == 2: # lb <= body ostream.write(f"2 {info[3]}{row_comments[row_idx]}\n") - elif i == 0: # lb <= body <= ub + elif i == 0: # lb <= body <= ub ostream.write(f"0 {info[3]} {info[4]}{row_comments[row_idx]}\n") - else: # i == 3; unbounded + elif i == 5: # complementarity + ostream.write( + f"5 {info[3]} {1+column_order[info[4]]}" + f"{row_comments[row_idx]}\n" + ) + else: # i == 3; unbounded ostream.write(f"3{row_comments[row_idx]}\n") # # "b" lines (variable bounds) # - ostream.write('b%s\n' % ( - "\t#%d bounds (on variables)" % len(variables) - if symbolic_solver_labels else '', - )) + ostream.write( + 'b%s\n' + % ( + "\t#%d bounds (on variables)" % len(variables) + if symbolic_solver_labels + else '', + ) + ) for var_idx, info in enumerate(variables): # _bound_writer[info[2]](info, col_comments[var_idx]) ### i = info[2] - if i == 0: # lb <= body <= ub + if i == 0: # lb <= body <= ub ostream.write(f"0 {info[3]} {info[4]}{col_comments[var_idx]}\n") - elif i == 2: # lb <= body + elif i == 2: # lb <= body ostream.write(f"2 {info[3]}{col_comments[var_idx]}\n") - elif i == 1: # body <= ub + elif i == 1: # body <= ub ostream.write(f"1 {info[4]}{col_comments[var_idx]}\n") - elif i == 4: # == + elif i == 4: # == ostream.write(f"4 {info[3]}{col_comments[var_idx]}\n") - else: # i == 3; unbounded + else: # i == 3; unbounded ostream.write(f"3{col_comments[var_idx]}\n") # # "k" lines (column offsets in Jacobian NNZ) # - ostream.write('k%d%s\n' % ( - len(variables) - 1, - "\t#intermediate Jacobian column lengths" - if symbolic_solver_labels else '', - )) + ostream.write( + 'k%d%s\n' + % ( + len(variables) - 1, + "\t#intermediate Jacobian column lengths" + if symbolic_solver_labels + else '', + ) + ) ktot = 0 for var_idx, info in enumerate(variables[:-1]): ktot += con_nnz_by_var.get(info[1], 0) @@ -1162,9 +1308,7 @@ def write(self, model): continue ostream.write(f'J{row_idx} {len(linear)}{row_comments[row_idx]}\n') for _id in sorted(linear.keys(), key=column_order.__getitem__): - ostream.write( - f'{column_order[_id]} {linear[_id]!r}\n' - ) + ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') # # "G" lines (non-empty terms in the Objective) @@ -1175,17 +1319,19 @@ def write(self, model): # (i.e., a constant objective), then skip this entry if not linear: continue - ostream.write( - f'G{obj_idx} {len(linear)}{row_comments[obj_idx + n_cons]}\n') + ostream.write(f'G{obj_idx} {len(linear)}{row_comments[obj_idx + n_cons]}\n') for _id in sorted(linear.keys(), key=column_order.__getitem__): - ostream.write( - f'{column_order[_id]} {linear[_id]!r}\n' - ) + ostream.write(f'{column_order[_id]} {linear[_id]!r}\n') # Generate the return information info = NLWriterInfo( - variables, constraints, objectives, sorted(amplfunc_libraries), - row_labels, col_labels) + variables, + constraints, + objectives, + sorted(amplfunc_libraries), + row_labels, + col_labels, + ) timer.toc("Wrote NL stream", level=logging.DEBUG) timer.toc("Generated NL representation", delta=False) return info @@ -1237,14 +1383,6 @@ def _categorize_vars(self, comp_list, linear_by_comp): # # Process the linear portion of this component if expr_info.linear: - if expr_info.linear.__class__ is list: - linear = {} - for v, c in expr_info.linear: - if v in linear: - linear[v] += c - else: - linear[v] = c - expr_info.linear = linear linear_vars = set(expr_info.linear) all_linear_vars.update(linear_vars) # else: @@ -1289,67 +1427,30 @@ def _categorize_vars(self, comp_list, linear_by_comp): all_linear_vars -= all_nonlinear_vars return all_linear_vars, all_nonlinear_vars, nnz_by_var - def _count_subexpression_occurances(self): - # We now need to go through the subexpression cache and update - # the flag for nested subexpressions used by multiple components - # (the walker can only update the flag in subexpressions - # appearing explicitly in the tree, so we now need to propagate - # this usage info into subexpressions nested in other - # subexpressions). - # - # We need to walk twice: once to sort out the use in Constraints - # and once to sort out the use in Objectives - for idx in (0, 1): - cache = self.subexpression_cache - for id_ in self.subexpression_order: - src_id = cache[id_][2][idx] - if src_id is None: - continue - # This expression is used by this component type - # (constraint or objective); ensure that all - # subexpressions (recursively) used by this expression - # are also marked as being used by this component type - queue = [id_] - while queue: - info = cache[queue.pop()] - if not info[1].nonlinear: - # Subexpressions can only appear in the - # nonlinear terms. If there are none, then we - # are done. - continue - for subid in info[1].nonlinear[1]: - # Check if this "id" (normally a var id, but - # could be a subexpression id) is a - # subexpression id - if subid not in cache: - continue - # Check if we need to update this subexpression: - # either it has never been marked as being used - # by this component type, or else it was used by - # a different id. If we need to update the - # flag, then do so and recurse into it - target = cache[subid][2] - if target[idx] is None: - target[idx] = src_id - queue.append(subid) - elif target[idx] and target[idx] != src_id: - target[idx] = 0 - queue.append(subid) - # Now we can reliably know where nested subexpressions are used. + def _count_subexpression_occurrences(self): + """Categorize named subexpressions based on where they are used. + + This iterates through the `subexpression_order` and categorizes + each _id based on where it is used (1 constraint, many + constraints, 1 objective, many objectives, constraints and + objectives). + + """ # Group them into: # [ used in both objectives and constraints, # used by more than one constraint (but no objectives), # used by more than one objective (but no constraints), # used by one constraint, # used by one objective ] - n_subexpressions = [0]*5 - for info in map(itemgetter(2), - map(self.subexpression_cache.__getitem__, - filter(self.used_named_expressions.__contains__, - self.subexpression_order))): + n_subexpressions = [0] * 5 + for info in map( + itemgetter(2), + map(self.subexpression_cache.__getitem__, self.subexpression_order), + ): if info[2]: pass elif info[1] is None: + # assert info[0] is not None: n_subexpressions[3 if info[0] else 1] += 1 elif info[0] is None: n_subexpressions[4 if info[1] else 2] += 1 @@ -1357,21 +1458,28 @@ def _count_subexpression_occurances(self): n_subexpressions[0] += 1 return n_subexpressions + def _record_named_expression_usage(self, named_exprs, src, comp_type): + self.used_named_expressions.update(named_exprs) + src = id(src) + for _id in named_exprs: + info = self.subexpression_cache[_id][2] + if info[comp_type] is None: + info[comp_type] = src + elif info[comp_type] != src: + info[comp_type] = 0 + def _write_nl_expression(self, repn, include_const): # Note that repn.mult should always be 1 (the AMPLRepn was # compiled before this point). Omitting the assertion for # efficiency. - #assert repn.mult == 1 + # assert repn.mult == 1 if repn.nonlinear: nl, args = repn.nonlinear if include_const and repn.const: # Add the constant to the NL expression. AMPL adds the # constant as the second argument, so we will too. - nl = self.template.binary_sum + nl + ( - self.template.const % repn.const) - self.ostream.write( - nl % tuple(map(self.var_id_to_nl.__getitem__, args)) - ) + nl = self.template.binary_sum + nl + (self.template.const % repn.const) + self.ostream.write(nl % tuple(map(self.var_id_to_nl.__getitem__, args))) elif include_const: self.ostream.write(self.template.const % repn.const) else: @@ -1406,6 +1514,7 @@ class NLFragment(object): portion of an Expression component. """ + __slots__ = ('_repn', '_node') def __init__(self, repn, node): @@ -1418,7 +1527,7 @@ def name(self): class AMPLRepn(object): - __slots__ = ('nl', 'mult', 'const', 'linear', 'nonlinear') + __slots__ = ('nl', 'mult', 'const', 'linear', 'nonlinear', 'named_exprs') ActiveVisitor = None @@ -1427,35 +1536,82 @@ def __init__(self, const, linear, nonlinear): self.mult = 1 self.const = const self.linear = linear - self.nonlinear = nonlinear + if nonlinear is None: + self.nonlinear = self.named_exprs = None + else: + nl, nl_args, self.named_exprs = nonlinear + self.nonlinear = nl, nl_args + + def __str__(self): + return ( + f'AMPLRepn(mult={self.mult}, const={self.const}, ' + f'linear={self.linear}, nonlinear={self.nonlinear}, ' + f'nl={self.nl}, named_exprs={self.named_exprs})' + ) - def compile_repn(self, visitor, prefix='', args=None): + def __repr__(self): + return str(self) + + def duplicate(self): + ans = self.__class__.__new__(self.__class__) + ans.nl = self.nl + ans.mult = self.mult + ans.const = self.const + ans.linear = None if self.linear is None else dict(self.linear) + ans.nonlinear = self.nonlinear + ans.named_exprs = self.named_exprs + return ans + + def compile_repn(self, visitor, prefix='', args=None, named_exprs=None): template = visitor.template if self.mult != 1: if self.mult == -1: prefix += template.negation else: prefix += template.multiplier % self.mult + self.mult = 1 + if self.named_exprs is not None: + if named_exprs is None: + named_exprs = set(self.named_exprs) + else: + named_exprs.update(self.named_exprs) if self.nl is not None: + # This handles both named subexpressions and embedded + # non-numeric (e.g., string) arguments. nl, nl_args = self.nl - visitor._mark_named_expression_as_used(nl_args) if prefix: nl = prefix + nl - if args is not None and args is not nl_args: + if args is not None: + assert args is not nl_args args.extend(nl_args) else: args = list(nl_args) - return nl, args + if nl_args: + # For string arguments, nl_args is an empty tuple and + # self.named_exprs is None. For named subexpressions, + # we are guaranteed that named_exprs is NOT None. We + # need to ensure that the named subexpression that we + # are returning is added to the named_exprs set. + named_exprs.update(nl_args) + return nl, args, named_exprs if args is None: args = [] if self.linear: - nterms = len(self.linear) + nterms = -len(args) _v_template = template.var _m_template = template.monomial - nl_sum = ''.join(_v_template if c == 1 else _m_template % c - for c in map(itemgetter(1), self.linear)) - args.extend(map(itemgetter(0), self.linear)) + # Because we are compiling this expression (into a NL + # expression), we will go ahead and filter the 0*x terms + # from the expression. Note that the args are accumulated + # by side-effect, which prevents iterating over the linear + # terms twice. + nl_sum = ''.join( + args.append(v) or (_v_template if c == 1 else _m_template % c) + for v, c in self.linear.items() + if c + ) + nterms += len(args) else: nterms = 0 nl_sum = '' @@ -1463,8 +1619,7 @@ def compile_repn(self, visitor, prefix='', args=None): if self.nonlinear.__class__ is list: nterms += len(self.nonlinear) nl_sum += ''.join(map(itemgetter(0), self.nonlinear)) - deque(map(args.extend, map(itemgetter(1), self.nonlinear)), - maxlen=0) + deque(map(args.extend, map(itemgetter(1), self.nonlinear)), maxlen=0) else: nterms += 1 nl_sum += self.nonlinear[0] @@ -1474,29 +1629,29 @@ def compile_repn(self, visitor, prefix='', args=None): nl_sum += template.const % self.const if nterms > 2: - return prefix + (template.nary_sum % nterms) + nl_sum, args + return (prefix + (template.nary_sum % nterms) + nl_sum, args, named_exprs) elif nterms == 2: - return prefix + template.binary_sum + nl_sum, args + return prefix + template.binary_sum + nl_sum, args, named_exprs elif nterms == 1: - return prefix + nl_sum, args - else: # nterms == 0 - return prefix + (template.const % 0), [] + return prefix + nl_sum, args, named_exprs + else: # nterms == 0 + return prefix + (template.const % 0), args, named_exprs def compile_nonlinear_fragment(self, visitor): + if not self.nonlinear: + self.nonlinear = None + return args = [] nterms = len(self.nonlinear) nl_sum = ''.join(map(itemgetter(0), self.nonlinear)) - deque(map(args.extend, map(itemgetter(1), self.nonlinear)), - maxlen=0) + deque(map(args.extend, map(itemgetter(1), self.nonlinear)), maxlen=0) if nterms > 2: self.nonlinear = (visitor.template.nary_sum % nterms) + nl_sum, args elif nterms == 2: self.nonlinear = visitor.template.binary_sum + nl_sum, args - elif nterms == 1: + else: # nterms == 1: self.nonlinear = nl_sum, args - else: # nterms == 0 - self.nonlinear = None def append(self, other): """Append a child result from acceptChildResult @@ -1512,37 +1667,51 @@ def append(self, other): # Note that self.mult will always be 1 (we only call append() # within a sum, so there is no opportunity for self.mult to # change). Omitting the assertion for efficiency. - #assert self.mult == 1 + # assert self.mult == 1 _type = other[0] if _type is _MONOMIAL: - self.linear.append(other[1:]) + _, v, c = other + if v in self.linear: + self.linear[v] += c + else: + self.linear[v] = c elif _type is _GENERAL: - other = other[1] - if other.nl is not None and other.nonlinear: + _, other = other + if other.nl is not None and other.nl[1]: if other.linear: # This is a named expression with both a linear and # nonlinear component. We want to merge it with # this AMPLRepn, preserving the named expression for # only the nonlinear component (merging the linear - # component with this AMPLRepn). We need to make - # sure that we have marked that we are using the - # named expression for the nonlinear component. - self.ActiveVisitor._mark_named_expression_as_used( - other.nonlinear[1]) + # component with this AMPLRepn). + pass else: # This is a nonlinear-only named expression, # possibly with a multiplier that is not 1. Compile # it and append it (this both resolves the # multiplier, and marks the named expression as # having been used) - self.nonlinear.append( - other.compile_repn(self.ActiveVisitor)) + other = other.compile_repn( + self.ActiveVisitor, '', None, self.named_exprs + ) + nl, nl_args, self.named_exprs = other + self.nonlinear.append((nl, nl_args)) return + if other.named_exprs is not None: + if self.named_exprs is None: + self.named_exprs = set(other.named_exprs) + else: + self.named_exprs.update(other.named_exprs) if other.mult != 1: mult = other.mult self.const += mult * other.const if other.linear: - self.linear.extend((v, c*mult) for v, c in other.linear) + linear = self.linear + for v, c in other.linear.items(): + if v in linear: + linear[v] += c * mult + else: + linear[v] = c * mult if other.nonlinear: if other.nonlinear.__class__ is list: other.compile_nonlinear_fragment(self.ActiveVisitor) @@ -1556,7 +1725,12 @@ def append(self, other): else: self.const += other.const if other.linear: - self.linear.extend(other.linear) + linear = self.linear + for v, c in other.linear.items(): + if v in linear: + linear[v] += c + else: + linear[v] = c if other.nonlinear: if other.nonlinear.__class__ is list: self.nonlinear.extend(other.nonlinear) @@ -1579,24 +1753,24 @@ def _create_strict_inequality_map(vars_): class text_nl_debug_template(object): unary = { - 'log': 'o43\t#log\n', - 'log10': 'o42\t#log10\n', - 'sin': 'o41\t#sin\n', - 'cos': 'o46\t#cos\n', - 'tan': 'o38\t#tan\n', - 'sinh': 'o40\t#sinh\n', - 'cosh': 'o45\t#cosh\n', - 'tanh': 'o37\t#tanh\n', - 'asin': 'o51\t#asin\n', - 'acos': 'o53\t#acos\n', - 'atan': 'o49\t#atan\n', - 'exp': 'o44\t#exp\n', - 'sqrt': 'o39\t#sqrt\n', - 'asinh': 'o50\t#asinh\n', - 'acosh': 'o52\t#acosh\n', - 'atanh': 'o47\t#atanh\n', - 'ceil': 'o14\t#ceil\n', - 'floor': 'o13\t#floor\n', + 'log': 'o43\t#log\n', + 'log10': 'o42\t#log10\n', + 'sin': 'o41\t#sin\n', + 'cos': 'o46\t#cos\n', + 'tan': 'o38\t#tan\n', + 'sinh': 'o40\t#sinh\n', + 'cosh': 'o45\t#cosh\n', + 'tanh': 'o37\t#tanh\n', + 'asin': 'o51\t#asin\n', + 'acos': 'o53\t#acos\n', + 'atan': 'o49\t#atan\n', + 'exp': 'o44\t#exp\n', + 'sqrt': 'o39\t#sqrt\n', + 'asinh': 'o50\t#asinh\n', + 'acosh': 'o52\t#acosh\n', + 'atanh': 'o47\t#atanh\n', + 'ceil': 'o14\t#ceil\n', + 'floor': 'o13\t#floor\n', } binary_sum = 'o0\t#+\n' @@ -1620,9 +1794,9 @@ class text_nl_debug_template(object): _create_strict_inequality_map(vars()) + def _strip_template_comments(vars_, base_): - vars_['unary'] = {k: v[:v.find('\t#')]+'\n' - for k, v in base_.unary.items()} + vars_['unary'] = {k: v[: v.find('\t#')] + '\n' for k, v in base_.unary.items()} for k, v in base_.__dict__.items(): if type(v) is str and '\t#' in v: v_lines = v.split('\n') @@ -1644,8 +1818,9 @@ def node_result_to_amplrepn(data): if data[0] is _GENERAL: return data[1] elif data[0] is _MONOMIAL: - if data[2]: - return AMPLRepn(0, [data[1:]], None) + _, v, c = data + if c: + return AMPLRepn(0, {v: c}, None) else: return AMPLRepn(0, None, None) elif data[0] is _CONSTANT: @@ -1653,17 +1828,19 @@ def node_result_to_amplrepn(data): else: raise DeveloperError("unknown result type") + def handle_negation_node(visitor, node, arg1): if arg1[0] is _MONOMIAL: - return (_MONOMIAL, arg1[1], -1*arg1[2]) + return (_MONOMIAL, arg1[1], -1 * arg1[2]) elif arg1[0] is _GENERAL: arg1[1].mult *= -1 return arg1 elif arg1[0] is _CONSTANT: - return (_CONSTANT, -1*arg1[1]) + return (_CONSTANT, -1 * arg1[1]) else: raise RuntimeError("%s: %s" % (type(arg1[0]), arg1)) + def handle_product_node(visitor, node, arg1, arg2): if arg2[0] is _CONSTANT: arg2, arg1 = arg1, arg2 @@ -1671,86 +1848,173 @@ def handle_product_node(visitor, node, arg1, arg2): mult = arg1[1] if not mult: # simplify multiplication by 0 (if arg2 is zero, the - # simplification happens implicitly when we evaluate the - # constant below) + # simplification happens when we evaluate the constant + # below). Note that this is not IEEE-754 compliant, and + # will map 0*inf and 0*nan to 0 (and not to nan). We are + # including this for backwards compatibility with the NLv1 + # writer, but arguably we should deprecate/remove this + # "feature" in the future. + if arg2[0] is _CONSTANT: + _prod = mult * arg2[1] + if _prod: + deprecation_warning( + f"Encountered {mult}*{str(arg2[1])} in expression tree. " + "Mapping the NaN result to 0 for compatibility " + "with the nl_v1 writer. In the future, this NaN " + "will be preserved/emitted to comply with IEEE-754.", + version='6.4.3', + ) + _prod = 0 + return (_CONSTANT, _prod) return arg1 if mult == 1: return arg2 elif arg2[0] is _MONOMIAL: - return (_MONOMIAL, arg2[1], mult*arg2[2]) + if mult != mult: + # This catches mult (i.e., arg1) == nan + return arg1 + return (_MONOMIAL, arg2[1], mult * arg2[2]) elif arg2[0] is _GENERAL: + if mult != mult: + # This catches mult (i.e., arg1) == nan + return arg1 arg2[1].mult *= mult return arg2 elif arg2[0] is _CONSTANT: - return (_CONSTANT, mult*arg2[1]) + if not arg2[1]: + # Simplify multiplication by 0; see note above about + # IEEE-754 incompatibility. + _prod = mult * arg2[1] + if _prod: + deprecation_warning( + f"Encountered {str(mult)}*{arg2[1]} in expression tree. " + "Mapping the NaN result to 0 for compatibility " + "with the nl_v1 writer. In the future, this NaN " + "will be preserved/emitted to comply with IEEE-754.", + version='6.4.3', + ) + _prod = 0 + return (_CONSTANT, _prod) + return (_CONSTANT, mult * arg2[1]) nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.product) + visitor, visitor.template.product + ) nonlin = node_result_to_amplrepn(arg2).compile_repn(visitor, *nonlin) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_division_node(visitor, node, arg1, arg2): if arg2[0] is _CONSTANT: div = arg2[1] if div == 1: return arg1 if arg1[0] is _MONOMIAL: - return (_MONOMIAL, arg1[1], arg1[2]/div) + tmp = apply_node_operation(node, (arg1[2], div)) + if tmp != tmp: + # This catches if the coefficient division results in nan + return _CONSTANT, tmp + return (_MONOMIAL, arg1[1], tmp) elif arg1[0] is _GENERAL: - arg1[1].mult /= div + tmp = apply_node_operation(node, (arg1[1].mult, div)) + if tmp != tmp: + # This catches if the multiplier division results in nan + return _CONSTANT, tmp + arg1[1].mult = tmp return arg1 elif arg1[0] is _CONSTANT: - return (_CONSTANT, arg1[1]/div) + return _CONSTANT, apply_node_operation(node, (arg1[1], div)) + elif arg1[0] is _CONSTANT and not arg1[1]: + return _CONSTANT, 0 nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.division) + visitor, visitor.template.division + ) nonlin = node_result_to_amplrepn(arg2).compile_repn(visitor, *nonlin) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_pow_node(visitor, node, arg1, arg2): - nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.pow) + if arg2[0] is _CONSTANT: + if arg1[0] is _CONSTANT: + ans = apply_node_operation(node, (arg1[1], arg2[1])) + if ans.__class__ in _complex_types: + ans = complex_number_error(ans, visitor, node) + return _CONSTANT, ans + elif not arg2[1]: + return _CONSTANT, 1 + elif arg2[1] == 1: + return arg1 + nonlin = node_result_to_amplrepn(arg1).compile_repn(visitor, visitor.template.pow) nonlin = node_result_to_amplrepn(arg2).compile_repn(visitor, *nonlin) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_abs_node(visitor, node, arg1): - nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.abs) + if arg1[0] is _CONSTANT: + return (_CONSTANT, abs(arg1[1])) + nonlin = node_result_to_amplrepn(arg1).compile_repn(visitor, visitor.template.abs) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_unary_node(visitor, node, arg1): + if arg1[0] is _CONSTANT: + return _CONSTANT, apply_node_operation(node, (arg1[1],)) nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.unary[node.name]) + visitor, visitor.template.unary[node.name] + ) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_exprif_node(visitor, node, arg1, arg2, arg3): + if arg1[0] is _CONSTANT: + if arg1[1]: + return arg2 + else: + return arg3 nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.exprif) + visitor, visitor.template.exprif + ) nonlin = node_result_to_amplrepn(arg2).compile_repn(visitor, *nonlin) nonlin = node_result_to_amplrepn(arg3).compile_repn(visitor, *nonlin) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_equality_node(visitor, node, arg1, arg2): + if arg1[0] is _CONSTANT and arg2[0] is _CONSTANT: + return (_CONSTANT, arg1[1] == arg2[1]) nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.equality) + visitor, visitor.template.equality + ) nonlin = node_result_to_amplrepn(arg2).compile_repn(visitor, *nonlin) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_inequality_node(visitor, node, arg1, arg2): + if arg1[0] is _CONSTANT and arg2[0] is _CONSTANT: + return (_CONSTANT, node._apply_operation((arg1[1], arg2[1]))) nonlin = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.strict_inequality_map[node.strict]) + visitor, visitor.template.strict_inequality_map[node.strict] + ) nonlin = node_result_to_amplrepn(arg2).compile_repn(visitor, *nonlin) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_ranged_inequality_node(visitor, node, arg1, arg2, arg3): + if arg1[0] is _CONSTANT and arg2[0] is _CONSTANT and arg3[0] is _CONSTANT: + return (_CONSTANT, node._apply_operation((arg1[1], arg2[1], arg3[1]))) op = visitor.template.strict_inequality_map[node.strict] - nl, args = node_result_to_amplrepn(arg1).compile_repn( - visitor, visitor.template.and_expr + op[0]) - nl2, args2 = node_result_to_amplrepn(arg2).compile_repn(visitor) + nl, args, named = node_result_to_amplrepn(arg1).compile_repn( + visitor, visitor.template.and_expr + op[0] + ) + nl2, args2, named = node_result_to_amplrepn(arg2).compile_repn( + visitor, '', None, named + ) nl += nl2 + op[1] + nl2 args.extend(args2) args.extend(args2) - nonlin = node_result_to_amplrepn(arg3).compile_repn(visitor, nl, args) + nonlin = node_result_to_amplrepn(arg3).compile_repn(visitor, nl, args, named) return (_GENERAL, AMPLRepn(0, None, nonlin)) + def handle_named_expression_node(visitor, node, arg1): _id = id(node) # Note that while named subexpressions ('defined variables' in the @@ -1760,10 +2024,6 @@ def handle_named_expression_node(visitor, node, arg1): # wrapped in the nonlinear portion of the expression tree. repn = node_result_to_amplrepn(arg1) - # When converting this shared subexpression to a (nonlinear) - # node, we want to just reference this subexpression: - repn.nl = (visitor.template.var, (_id,)) - # A local copy of the expression source list. This will be updated # later if the same Expression node is encountered in another # expression tree. @@ -1771,11 +2031,32 @@ def handle_named_expression_node(visitor, node, arg1): # This is a 3-tuple [con_id, obj_id, substitute_expression]. If the # expression is used by more than 1 constraint / objective, then the # id is set to 0. If it is not used by any, then it is None. - # substitue_expression is a bool indicating if this named + # substitute_expression is a bool indicating if this named # subexpression tree should be directly substituted into any # expression tree that references this node (i.e., do NOT emit the V # line). expression_source = [None, None, False] + # Record this common expression + visitor.subexpression_cache[_id] = ( + # 0: the "component" that generated this expression ID + node, + # 1: the common subexpression (to be written out) + repn, + # 2: the source usage information for this subexpression: + # [(con_id, obj_id, substitute); see above] + expression_source, + ) + + if not visitor.use_named_exprs: + return _GENERAL, repn.duplicate() + + mult, repn.mult = repn.mult, 1 + if repn.named_exprs is None: + repn.named_exprs = set() + + # When converting this shared subexpression to a (nonlinear) + # node, we want to just reference this subexpression: + repn.nl = (visitor.template.var, (_id,)) if repn.nonlinear: # As we will eventually need the compiled form of any nonlinear @@ -1788,7 +2069,7 @@ def handle_named_expression_node(visitor, node, arg1): repn.compile_nonlinear_fragment(visitor) if repn.linear: - # If this expession has both linear and nonlinear + # If this expression has both linear and nonlinear # components, we will follow the ASL convention and break # the named subexpression into two named subexpressions: one # that is only the nonlinear component and one that has the @@ -1797,29 +2078,30 @@ def handle_named_expression_node(visitor, node, arg1): # named subexpressions when appropriate. sub_node = NLFragment(repn, node) sub_id = id(sub_node) - sub_repn = AMPLRepn(0, None, repn.nonlinear) + sub_repn = AMPLRepn(0, None, None) + sub_repn.nonlinear = repn.nonlinear sub_repn.nl = (visitor.template.var, (sub_id,)) + sub_repn.named_exprs = set(repn.named_exprs) + + repn.named_exprs.add(sub_id) + repn.nonlinear = sub_repn.nl + # See above for the meaning of this source information nl_info = list(expression_source) - visitor.subexpression_cache[sub_id] = ( - sub_node, sub_repn, nl_info, - ) - repn.nonlinear = sub_repn.nl + visitor.subexpression_cache[sub_id] = (sub_node, sub_repn, nl_info) # It is important that the NL subexpression comes before the # main named expression: visitor.subexpression_order.append(sub_id) - # The nonlinear identifier is *always* used - visitor.used_named_expressions.add(sub_id) else: nl_info = expression_source - # The nonlinear component of this named expression is - # guaranteed to be used by this expression - setitem(nl_info, *visitor.active_expression_source) else: repn.nonlinear = None if repn.linear: - if (not repn.const and len(repn.linear) == 1 - and repn.linear[0][1] == 1): + if ( + not repn.const + and len(repn.linear) == 1 + and next(iter(repn.linear.values())) == 1 + ): # This Expression holds only a variable (multiplied by # 1). Do not emit this as a named variable and instead # just inject the variable where this expression is @@ -1833,12 +2115,12 @@ def handle_named_expression_node(visitor, node, arg1): repn.nl = None expression_source[2] = True - if repn.mult != 1: - mult = repn.mult - repn.mult = 1 + if mult != 1: repn.const *= mult if repn.linear: - repn.linear = [(v, c*mult) for v, c in repn.linear] + _lin = repn.linear + for v in repn.linear: + _lin[v] *= mult if repn.nonlinear: if mult == -1: prefix = visitor.template.negation @@ -1846,23 +2128,19 @@ def handle_named_expression_node(visitor, node, arg1): prefix = visitor.template.multiplier % mult repn.nonlinear = prefix + repn.nonlinear[0], repn.nonlinear[1] - visitor.subexpression_cache[_id] = ( - # 0: the "component" that generated this expression ID - node, - # 1: the common subexpression (to be written out) - repn, - # 2: the source usage information for this subexpression: - # [(con_id, obj_id, substitute); see above] - expression_source, - ) + if expression_source[2]: + if repn.linear: + return (_MONOMIAL, next(iter(repn.linear)), 1) + else: + return (_CONSTANT, repn.const) + + # Defer recording this _id until after we know that this repn will + # not be directly substituted (and to ensure that the NL fragment is + # added to the order first). visitor.subexpression_order.append(_id) - ans = AMPLRepn( - repn.const, - list(repn.linear) if repn.linear is not None else repn.linear, - repn.nonlinear - ) - ans.nl = repn.nl - return (_GENERAL, ans) + + return (_GENERAL, repn.duplicate()) + def handle_external_function_node(visitor, node, *args): func = node._fcn._function @@ -1871,33 +2149,35 @@ def handle_external_function_node(visitor, node, *args): # these as 'precompiled' general NL fragments, the normal trap for # constant subexpressions will miss constant external function calls # that contain strings. We will catch that case here. - if all(arg[0] is _CONSTANT or - (arg[0] is _GENERAL and arg[1].nl and not arg[1].nl[1]) - for arg in args): - arg_list = [arg[1] if arg[0] is _CONSTANT else arg[1].const - for arg in args] - return (_CONSTANT, node._apply_operation(arg_list)) + if all( + arg[0] is _CONSTANT or (arg[0] is _GENERAL and arg[1].nl and not arg[1].nl[1]) + for arg in args + ): + arg_list = [arg[1] if arg[0] is _CONSTANT else arg[1].const for arg in args] + return _CONSTANT, apply_node_operation(node, arg_list) if func in visitor.external_functions: if node._fcn._library != visitor.external_functions[func][1]._library: raise RuntimeError( "The same external function name (%s) is associated " "with two different libraries (%s through %s, and %s " "through %s). The ASL solver will fail to link " - "correctly." % - (func, - visitor.external_byFcn[func]._library, - visitor.external_byFcn[func]._library.name, - node._fcn._library, - node._fcn.name)) + "correctly." + % ( + func, + visitor.external_byFcn[func]._library, + visitor.external_byFcn[func]._library.name, + node._fcn._library, + node._fcn.name, + ) + ) else: - visitor.external_functions[func] = ( - len(visitor.external_functions), - node._fcn, - ) + visitor.external_functions[func] = (len(visitor.external_functions), node._fcn) comment = f'\t#{node.local_name}' if visitor.symbolic_solver_labels else '' nonlin = node_result_to_amplrepn(args[0]).compile_repn( - visitor, visitor.template.external_fcn % ( - visitor.external_functions[func][0], len(args), comment)) + visitor, + visitor.template.external_fcn + % (visitor.external_functions[func][0], len(args), comment), + ) for arg in args[1:]: nonlin = node_result_to_amplrepn(arg).compile_repn(visitor, *nonlin) return (_GENERAL, AMPLRepn(0, None, nonlin)) @@ -1926,26 +2206,52 @@ def handle_external_function_node(visitor, node, *args): # These are handled explicitly in beforeChild(): # LinearExpression: handle_linear_expression, # SumExpression: handle_sum_expression, - # MonomialTermExpression: handle_monomial_term, + # + # Note: MonomialTermExpression is only hit when processing NPV + # subexpressions that raise errors (e.g., log(0) * m.x), so no + # special processing is needed [it is just a product expression] + MonomialTermExpression: handle_product_node, } def _before_native(visitor, child): return False, (_CONSTANT, child) + +def _before_complex(visitor, child): + return False, (_CONSTANT, complex_number_error(child, visitor, child)) + + def _before_string(visitor, child): - ans = AMPLRepn(None, None, None) + visitor.encountered_string_arguments = True + ans = AMPLRepn(child, None, None) ans.nl = (visitor.template.string % (len(child), child), ()) return False, (_GENERAL, ans) + def _before_var(visitor, child): _id = id(child) if _id not in visitor.var_map: if child.fixed: - return False, (_CONSTANT, child()) + ans = child() + if ans is None or ans != ans: + ans = InvalidNumber(nan) + elif ans.__class__ in _complex_types: + ans = complex_number_error(ans, self, node) + return False, (_CONSTANT, ans) visitor.var_map[_id] = child return False, (_MONOMIAL, _id, 1) + +def _before_param(visitor, child): + ans = child() + if ans is None or ans != ans: + ans = InvalidNumber(nan) + elif ans.__class__ in _complex_types: + ans = complex_number_error(ans, self, child) + return False, (_CONSTANT, ans) + + def _before_npv(visitor, child): # TBD: It might be more efficient to cache the value of NPV # expressions to avoid duplicate evaluations. However, current @@ -1957,7 +2263,14 @@ def _before_npv(visitor, child): # else: # child = visitor.value_cache[_id] = child() # return False, (_CONSTANT, child) - return False, (_CONSTANT, child()) + try: + return False, (_CONSTANT, visitor._eval_expr(child)) + except: + # If there was an exception evaluating the subexpression, then + # we need to descend into it (in case there is something like 0 * + # nan that we need to map to 0) + return True, None + def _before_monomial(visitor, child): # @@ -1975,61 +2288,92 @@ def _before_monomial(visitor, child): # arg1 = visitor.value_cache[_id] # else: # arg1 = visitor.value_cache[_id] = arg1() - arg1 = arg1() - # Trap multiplication by 0 + try: + arg1 = visitor._eval_expr(arg1) + except: + # If there was an exception evaluating the subexpression, + # then we need to descend into it (in case there is something + # like 0 * nan that we need to map to 0) + return True, None + + if arg2.fixed: + arg2 = arg2.value + _prod = arg1 * arg2 + if not (arg1 and arg2) and _prod: + deprecation_warning( + f"Encountered {arg1}*{arg2} in expression tree. " + "Mapping the NaN result to 0 for compatibility " + "with the nl_v1 writer. In the future, this NaN " + "will be preserved/emitted to comply with IEEE-754.", + version='6.4.3', + ) + _prod = 0 + return False, (_CONSTANT, _prod) + + # Trap multiplication by 0. if not arg1: return False, (_CONSTANT, 0) _id = id(arg2) if _id not in visitor.var_map: - if arg2.fixed: - return False, (_CONSTANT, arg1 * arg2()) visitor.var_map[_id] = arg2 return False, (_MONOMIAL, _id, arg1) + def _before_linear(visitor, child): # Because we are going to modify the LinearExpression in this - # walker, we need to make a copy of the LinearExpression from - # the original expression tree. + # walker, we need to make a copy of the arg list from the original + # expression tree. var_map = visitor.var_map - const = child.constant - linear = [] - for v, c in zip(child.linear_vars, child.linear_coefs): - if c.__class__ not in native_types: - c = c() - if not c: - continue - elif v.fixed: - const += c * v() + const = 0 + linear = {} + for arg in child.args: + if arg.__class__ is MonomialTermExpression: + c, v = arg.args + if c.__class__ not in native_types: + c = visitor._eval_expr(c) + if v.fixed: + const += c * v.value + elif c: + _id = id(v) + if _id not in var_map: + var_map[_id] = v + if _id in linear: + linear[_id] += c + else: + linear[_id] = c + elif arg.__class__ in native_types: + const += arg else: - _id = id(v) - if _id not in var_map: - var_map[_id] = v - linear.append((_id, c)) - return False, (_GENERAL, AMPLRepn(const, linear, None)) + const += visitor._eval_expr(arg) + if linear: + return False, (_GENERAL, AMPLRepn(const, linear, None)) + else: + return False, (_CONSTANT, const) + def _before_named_expression(visitor, child): _id = id(child) if _id in visitor.subexpression_cache: obj, repn, info = visitor.subexpression_cache[_id] - ans = AMPLRepn( - repn.const, - list(repn.linear) if repn.linear is not None else repn.linear, - repn.nonlinear - ) - ans.nl = repn.nl - return False, (_GENERAL, ans) + if info[2]: + if repn.linear: + return False, (_MONOMIAL, next(iter(repn.linear)), 1) + else: + return False, (_CONSTANT, repn.const) + return False, (_GENERAL, repn.duplicate()) else: return True, None + def _before_general_expression(visitor, child): return True, None +_complex_types = set((complex,)) # Register an initial set of known expression types with the "before # child" expression handler lookup table. -_before_child_handlers = { - _type: _before_native for _type in native_numeric_types -} +_before_child_handlers = {_type: _before_native for _type in native_numeric_types} +_before_child_handlers[complex] = _before_complex for _type in native_types: if issubclass(_type, str): _before_child_handlers[_type] = _before_string @@ -2038,21 +2382,33 @@ def _before_general_expression(visitor, child): _before_child_handlers[_type] = _before_general_expression # named subexpressions for _type in ( - _GeneralExpressionData, ScalarExpression, - kernel.expression.expression, kernel.expression.noclone, - _GeneralObjectiveData, ScalarObjective, kernel.objective.objective): - + _GeneralExpressionData, + ScalarExpression, + kernel.expression.expression, + kernel.expression.noclone, + _GeneralObjectiveData, + ScalarObjective, + kernel.objective.objective, +): _before_child_handlers[_type] = _before_named_expression # Special linear / summation expressions _before_child_handlers[MonomialTermExpression] = _before_monomial _before_child_handlers[LinearExpression] = _before_linear _before_child_handlers[SumExpression] = _before_general_expression -class AMPLRepnVisitor(StreamBasedExpressionVisitor): - def __init__(self, template, subexpression_cache, subexpression_order, - external_functions, var_map, used_named_expressions, - symbolic_solver_labels): +class AMPLRepnVisitor(StreamBasedExpressionVisitor): + def __init__( + self, + template, + subexpression_cache, + subexpression_order, + external_functions, + var_map, + used_named_expressions, + symbolic_solver_labels, + use_named_exprs, + ): super().__init__() self.template = template self.subexpression_cache = subexpression_cache @@ -2062,9 +2418,17 @@ def __init__(self, template, subexpression_cache, subexpression_order, self.var_map = var_map self.used_named_expressions = used_named_expressions self.symbolic_solver_labels = symbolic_solver_labels - #self.value_cache = {} - self._before_child_handlers = _before_child_handlers - self._operator_handles = _operator_handles + self.use_named_exprs = use_named_exprs + self.encountered_string_arguments = False + self._eval_expr_visitor = _EvaluationVisitor(True) + + def _eval_expr(self, expr): + ans = self._eval_expr_visitor.dfs_postorder_stack(expr) + if ans.__class__ not in native_types: + ans = value(ans) + if ans.__class__ in _complex_types: + return complex_number_error(ans, self, expr) + return ans def initializeWalker(self, expr): expr, src, src_idx = expr @@ -2076,16 +2440,18 @@ def initializeWalker(self, expr): def beforeChild(self, node, child, child_idx): try: - return self._before_child_handlers[child.__class__](self, child) + return _before_child_handlers[child.__class__](self, child) except KeyError: self._register_new_before_child_processor(child) - return self._before_child_handlers[child.__class__](self, child) + return _before_child_handlers[child.__class__](self, child) def enterNode(self, node): # SumExpression are potentially large nary operators. Directly # populate the result if node.__class__ is SumExpression: - return node.args, AMPLRepn(0, [], []) + data = AMPLRepn(0, {}, None) + data.nonlinear = [] + return node.args, data else: return node.args, [] @@ -2099,15 +2465,11 @@ def exitNode(self, node, data): # # General expressions... # - if all(arg[0] is _CONSTANT for arg in data): - return ( - _CONSTANT, node._apply_operation(list(map( - itemgetter(1), data))) - ) - return self._operator_handles[node.__class__](self, node, *data) + return _operator_handles[node.__class__](self, node, *data) def finalizeResult(self, result): ans = node_result_to_amplrepn(result) + # If this was a nonlinear named expression, and that expression # has no linear portion, then we will directly use this as a # named expression. We need to mark that the expression was @@ -2118,45 +2480,48 @@ def finalizeResult(self, result): # this outer named expression). This prevents accidentally # recharacterizing variables that only appear linearly as # nonlinear variables. - if ans.nl and ans.nonlinear: - if ans.linear: - self._mark_named_expression_as_used(ans.nonlinear[1]) - else: - self._mark_named_expression_as_used(ans.nl[1]) + if ans.nl is not None: + if not ans.nl[1]: + raise ValueError("Numeric expression resolved to a string constant") + # This *is* a named subexpression. If there is no linear + # component, then replace this expression with the named + # expression. The mult will be handled later. We know that + # the const is built into the nonlinear expression, because + # it cannot be changed "in place" (only through addition, + # which would have "cleared" the nl attribute) + if not ans.linear: + ans.named_exprs.update(ans.nl[1]) ans.nonlinear = ans.nl - - ans.nl = None + ans.const = 0 + else: + # This named expression has both a linear and a + # nonlinear component, and possibly a multiplier and + # constant. We will not include this named expression + # and instead will expose the components so that linear + # variables are not accidentally re-characterized as + # nonlinear. + pass + # ans.nonlinear = orig.nonlinear + ans.nl = None if ans.nonlinear.__class__ is list: - if ans.nonlinear: - ans.compile_nonlinear_fragment(self) - else: - ans.nonlinear = None - linear = {} + ans.compile_nonlinear_fragment(self) + + if not ans.linear: + ans.linear = {} + linear = ans.linear if ans.mult != 1: - mult = ans.mult - ans.mult = 1 + mult, ans.mult = ans.mult, 1 ans.const *= mult - if ans.linear: - for v, c in ans.linear: - if v in linear: - linear[v] += mult * c - else: - linear[v] = mult * c + if linear: + for k in linear: + linear[k] *= mult if ans.nonlinear: if mult == -1: prefix = self.template.negation else: prefix = self.template.multiplier % mult ans.nonlinear = prefix + ans.nonlinear[0], ans.nonlinear[1] - - elif ans.linear: - for v, c in ans.linear: - if v in linear: - linear[v] += c - else: - linear[v] = c - ans.linear = linear # self.active_expression_source = None return ans @@ -2165,7 +2530,11 @@ def _register_new_before_child_processor(self, child): handlers = _before_child_handlers child_type = child.__class__ if child_type in native_numeric_types: - handlers[child_type] = _before_native + if isinstance(child_type, complex): + _complex_types.add(child_type) + dispatcher[child_type] = _before_complex + else: + dispatcher[child_type] = _before_native elif issubclass(child_type, str): handlers[child_type] = _before_string elif child_type in native_types: @@ -2174,22 +2543,25 @@ def _register_new_before_child_processor(self, child): if child.is_potentially_variable(): handlers[child_type] = _before_var else: - handlers[child_type] = _before_npv + handlers[child_type] = _before_param elif not child.is_potentially_variable(): handlers[child_type] = _before_npv - elif id(child) in self.subexpression_cache: + # If we descend into the named expression (because of an + # evaluation error), then on the way back out, we will use + # the potentially variable handler to process the result. + pv_base_type = child.potentially_variable_base_class() + if pv_base_type not in handlers: + try: + child.__class__ = pv_base_type + _register_new_before_child_processor(self, child) + finally: + child.__class__ = child_type + if pv_base_type in _operator_handles: + _operator_handles[child_type] = _operator_handles[pv_base_type] + elif id(child) in self.subexpression_cache or issubclass( + child_type, _GeneralExpressionData + ): handlers[child_type] = _before_named_expression + _operator_handles[child_type] = handle_named_expression_node else: handlers[child_type] = _before_general_expression - - def _mark_named_expression_as_used(self, ref): - assert len(ref) == 1 - _named_expr_id = ref[0] - self.used_named_expressions.add(_named_expr_id) - # Record that this named expression was used - info = self.subexpression_cache[_named_expr_id][2] - _idx = self.active_expression_source[0] - if info[_idx] is None: - info[_idx] = self.active_expression_source[1] - elif info[_idx] != self.active_expression_source[1]: - info[_idx] = 0 diff --git a/pyomo/repn/quadratic.py b/pyomo/repn/quadratic.py new file mode 100644 index 00000000000..5e102c89833 --- /dev/null +++ b/pyomo/repn/quadratic.py @@ -0,0 +1,398 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import copy + +from pyomo.core.expr.numeric_expr import ( + NegationExpression, + ProductExpression, + DivisionExpression, + PowExpression, + AbsExpression, + UnaryFunctionExpression, + Expr_ifExpression, + LinearExpression, + MonomialTermExpression, + mutable_expression, +) +from pyomo.core.expr.relational_expr import ( + EqualityExpression, + InequalityExpression, + RangedExpression, +) +from pyomo.core.base.expression import ScalarExpression +from . import linear +from .linear import _merge_dict, to_expression + +_CONSTANT = linear.ExprType.CONSTANT +_LINEAR = linear.ExprType.LINEAR +_GENERAL = linear.ExprType.GENERAL +_QUADRATIC = linear.ExprType.QUADRATIC + + +class QuadraticRepn(object): + __slots__ = ("multiplier", "constant", "linear", "quadratic", "nonlinear") + + def __init__(self): + self.multiplier = 1 + self.constant = 0 + self.linear = {} + self.quadratic = None + self.nonlinear = None + + def __str__(self): + return ( + f"QuadraticRepn(mult={self.multiplier}, const={self.constant}, " + f"linear={self.linear}, quadratic={self.quadratic}, " + f"nonlinear={self.nonlinear})" + ) + + def __repr__(self): + return str(self) + + def walker_exitNode(self): + if self.nonlinear is not None: + return _GENERAL, self + elif self.quadratic: + return _QUADRATIC, self + elif self.linear: + return _LINEAR, self + else: + return _CONSTANT, self.multiplier * self.constant + + def duplicate(self): + ans = self.__class__.__new__(self.__class__) + ans.multiplier = self.multiplier + ans.constant = self.constant + ans.linear = dict(self.linear) + if self.quadratic: + ans.quadratic = dict(self.quadratic) + else: + ans.quadratic = None + ans.nonlinear = self.nonlinear + return ans + + def to_expression(self, visitor): + var_map = visitor.var_map + if self.nonlinear is not None: + # We want to start with the nonlinear term (and use + # assignment) in case the term is a non-numeric node (like a + # relational expression) + ans = self.nonlinear + else: + ans = 0 + if self.quadratic: + with mutable_expression() as e: + for (x1, x2), coef in self.quadratic.items(): + if x1 == x2: + e += coef * var_map[x1] ** 2 + else: + e += coef * (var_map[x1] * var_map[x2]) + ans += e + if self.linear: + if len(self.linear) == 1: + vid, coef = next(iter(self.linear.items())) + if coef == 1: + ans += var_map[vid] + elif coef: + ans += MonomialTermExpression((coef, var_map[vid])) + else: + pass + else: + ans += LinearExpression( + [ + MonomialTermExpression((coef, var_map[vid])) + for vid, coef in self.linear.items() + if coef + ] + ) + if self.constant: + ans += self.constant + if self.multiplier != 1: + ans *= self.multiplier + return ans + + def append(self, other): + """Append a child result from acceptChildResult + + Notes + ----- + This method assumes that the operator was "+". It is implemented + so that we can directly use a QuadraticRepn() as a data object in + the expression walker (thereby avoiding the function call for a + custom callback) + + """ + # Note that self.multiplier will always be 1 (we only call append() + # within a sum, so there is no opportunity for self.multiplier to + # change). Omitting the assertion for efficiency. + # assert self.multiplier == 1 + _type, other = other + if _type is _CONSTANT: + self.constant += other + return + + mult = other.multiplier + self.constant += mult * other.constant + if other.linear: + _merge_dict(self.linear, mult, other.linear) + if other.quadratic: + if not self.quadratic: + self.quadratic = {} + _merge_dict(self.quadratic, mult, other.quadratic) + if other.nonlinear is not None: + if mult != 1: + nl = mult * other.nonlinear + else: + nl = other.nonlinear + if self.nonlinear is None: + self.nonlinear = nl + else: + self.nonlinear += nl + + +_exit_node_handlers = copy.deepcopy(linear._exit_node_handlers) + +# +# NEGATION +# +_exit_node_handlers[NegationExpression][(_QUADRATIC,)] = linear._handle_negation_ANY + + +# +# PRODUCT +# +def _mul_linear_linear(varOrder, linear1, linear2): + quadratic = {} + for vid1, coef1 in linear1.items(): + for vid2, coef2 in linear2.items(): + if varOrder(vid1) < varOrder(vid2): + key = vid1, vid2 + else: + key = vid2, vid1 + if key in quadratic: + quadratic[key] += coef1 * coef2 + else: + quadratic[key] = coef1 * coef2 + return quadratic + + +def _handle_product_linear_linear(visitor, node, arg1, arg2): + _, arg1 = arg1 + _, arg2 = arg2 + # Quadratic first, because we will update linear in a minute + arg1.quadratic = _mul_linear_linear( + visitor.var_order.__getitem__, arg1.linear, arg2.linear + ) + # Linear second, as this relies on knowing the original constants + if not arg2.constant: + arg1.linear = {} + elif arg2.constant != 1: + c = arg2.constant + _linear = arg1.linear + for vid, coef in _linear.items(): + _linear[vid] = c * coef + if arg1.constant: + _merge_dict(arg1.linear, arg1.constant, arg2.linear) + # Finally, the constant and multipliers + arg1.constant *= arg2.constant + arg1.multiplier *= arg2.multiplier + return _QUADRATIC, arg1 + + +def _handle_product_nonlinear(visitor, node, arg1, arg2): + ans = visitor.Result() + if not visitor.expand_nonlinear_products: + ans.nonlinear = to_expression(visitor, arg1) * to_expression(visitor, arg2) + return _GENERAL, ans + + # We are multiplying (A + Bx + Cx^2 + D(x)) * (A + Bx + Cx^2 + Dx)) + _, x1 = arg1 + _, x2 = arg2 + ans = visitor.Result() + ans.multiplier = x1.multiplier * x2.multiplier + x1.multiplier = x2.multiplier = 1 + # x1.const * x2.const [AA] + ans.constant = x1.constant * x2.constant + # linear & quadratic terms + if x2.constant: + # [BA], [CA] + c = x2.constant + if c == 1: + ans.linear = dict(x1.linear) + if x1.quadratic: + ans.quadratic = dict(x1.quadratic) + else: + ans.linear = {vid: c * coef for vid, coef in x1.linear.items()} + if x1.quadratic: + ans.quadratic = {k: c * coef for k, coef in x1.quadratic.items()} + if x1.constant: + # [AB] + _merge_dict(ans.linear, x1.constant, x2.linear) + # [AC] + if x2.quadratic: + if ans.quadratic: + _merge_dict(ans.quadratic, x1.constant, x2.quadratic) + elif x1.constant == 1: + ans.quadratic = dict(x2.quadratic) + else: + c = x1.constant + ans.quadratic = {k: c * coef for k, coef in x2.quadratic.items()} + # [BB] + if x1.linear and x2.linear: + quad = _mul_linear_linear(visitor.var_order.__getitem__, x1.linear, x2.linear) + if ans.quadratic: + _merge_dict(ans.quadratic, 1, quad) + else: + ans.quadratic = quad + # [DA] + [DB] + [DC] + [DD] + ans.nonlinear = 0 + if x1.nonlinear is not None: + ans.nonlinear += x1.nonlinear * x2.to_expression(visitor) + x1.nonlinear = None + x2.constant = 0 + x1_c = x1.constant + x1.constant = 0 + x1_lin = x1.linear + x1.linear = {} + # [CB] + [CC] + [CD] + if x1.quadratic: + ans.nonlinear += x1.to_expression(visitor) * x2.to_expression(visitor) + x1.quadratic = None + x2.linear = {} + # [BC] + [BD] + if x1_lin: + x1.linear = x1_lin + ans.nonlinear += x1.to_expression(visitor) * x2.to_expression(visitor) + # [AD] + if x1_c and x2.nonlinear is not None: + ans.nonlinear += x1_c * x2.nonlinear + return _GENERAL, ans + + +_exit_node_handlers[ProductExpression].update( + { + (_CONSTANT, _QUADRATIC): linear._handle_product_constant_ANY, + (_LINEAR, _QUADRATIC): _handle_product_nonlinear, + (_QUADRATIC, _QUADRATIC): _handle_product_nonlinear, + (_GENERAL, _QUADRATIC): _handle_product_nonlinear, + (_QUADRATIC, _CONSTANT): linear._handle_product_ANY_constant, + (_QUADRATIC, _LINEAR): _handle_product_nonlinear, + (_QUADRATIC, _GENERAL): _handle_product_nonlinear, + # Replace handler from the linear walker + (_LINEAR, _LINEAR): _handle_product_linear_linear, + } +) + +# +# DIVISION +# +_exit_node_handlers[DivisionExpression].update( + { + (_CONSTANT, _QUADRATIC): linear._handle_division_nonlinear, + (_LINEAR, _QUADRATIC): linear._handle_division_nonlinear, + (_QUADRATIC, _QUADRATIC): linear._handle_division_nonlinear, + (_GENERAL, _QUADRATIC): linear._handle_division_nonlinear, + (_QUADRATIC, _CONSTANT): linear._handle_division_ANY_constant, + (_QUADRATIC, _LINEAR): linear._handle_division_nonlinear, + (_QUADRATIC, _GENERAL): linear._handle_division_nonlinear, + } +) + + +# +# EXPONENTIATION +# +_exit_node_handlers[PowExpression].update( + { + (_CONSTANT, _QUADRATIC): linear._handle_pow_nonlinear, + (_LINEAR, _QUADRATIC): linear._handle_pow_nonlinear, + (_QUADRATIC, _QUADRATIC): linear._handle_pow_nonlinear, + (_GENERAL, _QUADRATIC): linear._handle_pow_nonlinear, + (_QUADRATIC, _CONSTANT): linear._handle_pow_ANY_constant, + (_QUADRATIC, _LINEAR): linear._handle_pow_nonlinear, + (_QUADRATIC, _GENERAL): linear._handle_pow_nonlinear, + } +) + +# +# ABS and UNARY handlers +# +_exit_node_handlers[AbsExpression][(_QUADRATIC,)] = linear._handle_unary_nonlinear +_exit_node_handlers[UnaryFunctionExpression][ + (_QUADRATIC,) +] = linear._handle_unary_nonlinear + +# +# NAMED EXPRESSION handlers +# +_exit_node_handlers[ScalarExpression][(_QUADRATIC,)] = linear._handle_named_ANY + +# +# EXPR_IF handlers +# +# Note: it is easier to just recreate the entire data structure, rather +# than update it +_exit_node_handlers[Expr_ifExpression] = { + (i, j, k): linear._handle_expr_if_nonlinear + for i in (_LINEAR, _QUADRATIC, _GENERAL) + for j in (_CONSTANT, _LINEAR, _QUADRATIC, _GENERAL) + for k in (_CONSTANT, _LINEAR, _QUADRATIC, _GENERAL) +} +for j in (_CONSTANT, _LINEAR, _QUADRATIC, _GENERAL): + for k in (_CONSTANT, _LINEAR, _QUADRATIC, _GENERAL): + _exit_node_handlers[Expr_ifExpression][ + _CONSTANT, j, k + ] = linear._handle_expr_if_const + +# +# RELATIONAL handlers +# +_exit_node_handlers[EqualityExpression].update( + { + (_CONSTANT, _QUADRATIC): linear._handle_equality_general, + (_LINEAR, _QUADRATIC): linear._handle_equality_general, + (_QUADRATIC, _QUADRATIC): linear._handle_equality_general, + (_GENERAL, _QUADRATIC): linear._handle_equality_general, + (_QUADRATIC, _CONSTANT): linear._handle_equality_general, + (_QUADRATIC, _LINEAR): linear._handle_equality_general, + (_QUADRATIC, _GENERAL): linear._handle_equality_general, + } +) +_exit_node_handlers[InequalityExpression].update( + { + (_CONSTANT, _QUADRATIC): linear._handle_inequality_general, + (_LINEAR, _QUADRATIC): linear._handle_inequality_general, + (_QUADRATIC, _QUADRATIC): linear._handle_inequality_general, + (_GENERAL, _QUADRATIC): linear._handle_inequality_general, + (_QUADRATIC, _CONSTANT): linear._handle_inequality_general, + (_QUADRATIC, _LINEAR): linear._handle_inequality_general, + (_QUADRATIC, _GENERAL): linear._handle_inequality_general, + } +) +_exit_node_handlers[RangedExpression].update( + { + (_CONSTANT, _QUADRATIC): linear._handle_ranged_general, + (_LINEAR, _QUADRATIC): linear._handle_ranged_general, + (_QUADRATIC, _QUADRATIC): linear._handle_ranged_general, + (_GENERAL, _QUADRATIC): linear._handle_ranged_general, + (_QUADRATIC, _CONSTANT): linear._handle_ranged_general, + (_QUADRATIC, _LINEAR): linear._handle_ranged_general, + (_QUADRATIC, _GENERAL): linear._handle_ranged_general, + } +) + + +class QuadraticRepnVisitor(linear.LinearRepnVisitor): + Result = QuadraticRepn + exit_node_handlers = _exit_node_handlers + exit_node_dispatcher = linear._initialize_exit_node_dispatcher(_exit_node_handlers) + max_exponential_expansion = 2 diff --git a/pyomo/repn/standard_aux.py b/pyomo/repn/standard_aux.py index 6c65432cfaf..7995949fc05 100644 --- a/pyomo/repn/standard_aux.py +++ b/pyomo/repn/standard_aux.py @@ -15,7 +15,8 @@ from pyomo.repn.standard_repn import ( - preprocess_block_constraints, preprocess_block_objectives + preprocess_block_constraints, + preprocess_block_objectives, ) diff --git a/pyomo/repn/standard_repn.py b/pyomo/repn/standard_repn.py index 36d139a1b85..95fa824b14a 100644 --- a/pyomo/repn/standard_repn.py +++ b/pyomo/repn/standard_repn.py @@ -18,22 +18,16 @@ import logging import itertools -from pyomo.core.base import (Constraint, - Objective, - ComponentMap) +from pyomo.common.numeric_types import native_types, native_numeric_types +from pyomo.core.base import Constraint, Objective, ComponentMap -from pyomo.core.expr import current as EXPR -from pyomo.core.base.objective import (_GeneralObjectiveData, - ScalarObjective) +import pyomo.core.expr as EXPR +from pyomo.core.expr.numvalue import NumericConstant +from pyomo.core.base.objective import _GeneralObjectiveData, ScalarObjective from pyomo.core.base import _ExpressionData, Expression from pyomo.core.base.expression import ScalarExpression, _GeneralExpressionData -from pyomo.core.base.var import (ScalarVar, - Var, - _GeneralVarData, - value) +from pyomo.core.base.var import ScalarVar, Var, _GeneralVarData, value from pyomo.core.base.param import ScalarParam, _ParamData -from pyomo.core.base.numvalue import (NumericConstant, - native_numeric_types) from pyomo.core.kernel.expression import expression, noclone from pyomo.core.kernel.variable import IVariable, variable from pyomo.core.kernel.objective import objective @@ -54,7 +48,7 @@ def isclose_const(a, b, rel_tol=1e-9, abs_tol=0.0): a = value(a) else: return False - return abs(a-b) <= max( rel_tol * max(abs(a), abs(b)), abs_tol ) + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) class StandardRepn(object): @@ -65,13 +59,15 @@ class StandardRepn(object): TODO: define what "efficient" means to us. """ - __slots__ = ('constant', # The constant term - 'linear_coefs', # Linear coefficients - 'linear_vars', # Linear variables - 'quadratic_coefs', # Quadratic coefficients - 'quadratic_vars', # Quadratic variables - 'nonlinear_expr', # Nonlinear expression - 'nonlinear_vars') # Variables that appear in the nonlinear expression + __slots__ = ( + 'constant', # The constant term + 'linear_coefs', # Linear coefficients + 'linear_vars', # Linear variables + 'quadratic_coefs', # Quadratic coefficients + 'quadratic_vars', # Quadratic variables + 'nonlinear_expr', # Nonlinear expression + 'nonlinear_vars', + ) # Variables that appear in the nonlinear expression def __init__(self): self.constant = 0 @@ -86,39 +82,55 @@ def __getstate__(self): """ This method is required because this class uses slots. """ - return (self.constant, - self.linear_coefs, - self.linear_vars, - self.quadratic_coefs, - self.quadratic_vars, - self.nonlinear_expr, - self.nonlinear_vars) + return ( + self.constant, + self.linear_coefs, + self.linear_vars, + self.quadratic_coefs, + self.quadratic_vars, + self.nonlinear_expr, + self.nonlinear_vars, + ) def __setstate__(self, state): """ This method is required because this class uses slots. """ - self.constant, \ - self.linear_coefs, \ - self.linear_vars, \ - self.quadratic_coefs, \ - self.quadratic_vars, \ - self.nonlinear_expr, \ - self.nonlinear_vars = state + ( + self.constant, + self.linear_coefs, + self.linear_vars, + self.quadratic_coefs, + self.quadratic_vars, + self.nonlinear_expr, + self.nonlinear_vars, + ) = state # # Generate a string representation of the expression # - def __str__(self): #pragma: nocover + def __str__(self): # pragma: nocover output = StringIO() output.write("\n") - output.write("constant: "+str(self.constant)+"\n") - output.write("linear vars: "+str([v_.name for v_ in self.linear_vars])+"\n") - output.write("linear var ids: "+str([id(v_) for v_ in self.linear_vars])+"\n") - output.write("linear coef: "+str(list(self.linear_coefs))+"\n") - output.write("quadratic vars: "+str([(v_[0].name,v_[1].name) for v_ in self.quadratic_vars])+"\n") - output.write("quadratic var ids: "+str([(id(v_[0]), id(v_[1])) for v_ in self.quadratic_vars])+"\n") - output.write("quadratic coef: "+str(list(self.quadratic_coefs))+"\n") + output.write("constant: " + str(self.constant) + "\n") + output.write( + "linear vars: " + str([v_.name for v_ in self.linear_vars]) + "\n" + ) + output.write( + "linear var ids: " + str([id(v_) for v_ in self.linear_vars]) + "\n" + ) + output.write("linear coef: " + str(list(self.linear_coefs)) + "\n") + output.write( + "quadratic vars: " + + str([(v_[0].name, v_[1].name) for v_ in self.quadratic_vars]) + + "\n" + ) + output.write( + "quadratic var ids: " + + str([(id(v_[0]), id(v_[1])) for v_ in self.quadratic_vars]) + + "\n" + ) + output.write("quadratic coef: " + str(list(self.quadratic_coefs)) + "\n") if self.nonlinear_expr is None: output.write("nonlinear expr: None\n") else: @@ -129,7 +141,9 @@ def __str__(self): #pragma: nocover except AttributeError: output.write(str(self.nonlinear_expr)) output.write("\n") - output.write("nonlinear vars: "+str([v_.name for v_ in self.nonlinear_vars])+"\n") + output.write( + "nonlinear vars: " + str([v_.name for v_ in self.nonlinear_vars]) + "\n" + ) output.write("\n") ret_str = output.getvalue() @@ -137,7 +151,11 @@ def __str__(self): #pragma: nocover return ret_str def is_fixed(self): - if len(self.linear_vars) == 0 and len(self.nonlinear_vars) == 0 and len(self.quadratic_vars) == 0: + if ( + len(self.linear_vars) == 0 + and len(self.nonlinear_vars) == 0 + and len(self.quadratic_vars) == 0 + ): return True return False @@ -151,7 +169,11 @@ def polynomial_degree(self): return 0 def is_constant(self): - return self.nonlinear_expr is None and len(self.quadratic_coefs) == 0 and len(self.linear_coefs) == 0 + return ( + self.nonlinear_expr is None + and len(self.quadratic_coefs) == 0 + and len(self.linear_coefs) == 0 + ) def is_linear(self): return self.nonlinear_expr is None and len(self.quadratic_coefs) == 0 @@ -173,31 +195,33 @@ def to_expression(self, sort=True): # expr = self.constant - lvars = [(i,v) for i,v in enumerate(self.linear_vars)] + lvars = [(i, v) for i, v in enumerate(self.linear_vars)] if sort: lvars = sorted(lvars, key=lambda x: str(x[1])) - for i,v in lvars: + for i, v in lvars: c = self.linear_coefs[i] if c.__class__ in native_numeric_types: + if not c: + pass if isclose_const(c, 1.0): expr += v elif isclose_const(c, -1.0): expr -= v elif c < 0.0: - expr -= - c*v + expr -= -c * v else: - expr += c*v + expr += c * v else: - expr += c*v + expr += c * v - qvars = [(i,v) for i,v in enumerate(self.quadratic_vars)] + qvars = [(i, v) for i, v in enumerate(self.quadratic_vars)] if sort: qvars = sorted(qvars, key=lambda x: (str(x[1][0]), str(x[1][1]))) - for i,v in qvars: + for i, v in qvars: if id(v[0]) == id(v[1]): - term = v[0]**2 + term = v[0] ** 2 else: - term = v[0]*v[1] + term = v[0] * v[1] c = self.quadratic_coefs[i] if c.__class__ in native_numeric_types: if isclose_const(c, 1.0): @@ -205,11 +229,15 @@ def to_expression(self, sort=True): elif isclose_const(c, -1.0): expr -= term else: - expr += c*term + expr += c * term else: - expr += c*term + expr += c * term - if not self.nonlinear_expr is None: + if self.nonlinear_expr is not None: + if expr.__class__ in native_numeric_types and expr == 0: + # Some "NL" expressions do not support addition + # (e.g. relational expressions) + return self.nonlinear_expr expr += self.nonlinear_expr return expr @@ -226,8 +254,12 @@ def to_expression(self, sort=True): to a solver and then be deleted. """ -#@profile -def generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False, quadratic=True, repn=None): + + +# @profile +def generate_standard_repn( + expr, idMap=None, compute_values=True, verbose=False, quadratic=True, repn=None +): # # Use a custom Results object # @@ -273,64 +305,68 @@ def generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False, # The expression is linear # elif expr.__class__ is EXPR.LinearExpression: + linear_coefs = {} + linear_vars = {} + C_ = 0 if compute_values: - C_ = EXPR.evaluate_expression(expr.constant) - else: - C_ = expr.constant - if compute_values: - linear_coefs = {} - for c,v in zip(expr.linear_coefs, expr.linear_vars): - if c.__class__ in native_numeric_types: - cval = c - elif c.is_expression_type(): - cval = EXPR.evaluate_expression(c) - else: - cval = value(c) - if v.fixed: - C_ += cval * v.value - else: + for arg in expr.args: + if arg.__class__ is EXPR.MonomialTermExpression: + c, v = arg.args + if c.__class__ not in native_numeric_types: + c = EXPR.evaluate_expression(c) + if v.fixed: + C_ += c * v.value + continue id_ = id(v) - if not id_ in idMap[None]: - key = len(idMap) - 1 - idMap[None][id_] = key - idMap[key] = v + if id_ in linear_coefs: + linear_coefs[id_] += c else: - key = idMap[None][id_] - if key in linear_coefs: - linear_coefs[key] += cval - else: - linear_coefs[key] = cval - keys = list(linear_coefs.keys()) - repn.linear_vars = tuple(idMap[key] for key in keys) - repn.linear_coefs = tuple(linear_coefs[key] for key in keys) - else: - linear_coefs = {} - for c,v in zip(expr.linear_coefs, expr.linear_vars): - if v.fixed: - C_ += c*v + linear_coefs[id_] = c + linear_vars[id_] = v + elif arg.__class__ in native_numeric_types: + C_ += arg else: + C_ += EXPR.evaluate_expression(arg) + else: # compute_values == False + for arg in expr.args: + if arg.__class__ is EXPR.MonomialTermExpression: + c, v = arg.args + if v.fixed: + C_ += c * v + continue id_ = id(v) - if not id_ in idMap[None]: - key = len(idMap) - 1 - idMap[None][id_] = key - idMap[key] = v + if id_ in linear_coefs: + linear_coefs[id_] += c else: - key = idMap[None][id_] - if key in linear_coefs: - linear_coefs[key] += c - else: - linear_coefs[key] = c - keys = list(linear_coefs.keys()) - repn.linear_vars = tuple(idMap[key] for key in keys) - repn.linear_coefs = tuple(linear_coefs[key] for key in keys) + linear_coefs[id_] = c + linear_vars[id_] = v + else: + C_ += arg + + vars_ = [] + coef_ = [] + for id_, coef in linear_coefs.items(): + if coef.__class__ in native_numeric_types and not coef: + continue + if id_ not in idMap[None]: + key = len(idMap) - 1 + idMap[None][id_] = key + idMap[key] = linear_vars[id_] + else: + key = idMap[None][id_] + vars_.append(idMap[key]) + coef_.append(coef) + + repn.linear_vars = tuple(vars_) + repn.linear_coefs = tuple(coef_) repn.constant = C_ return repn # # Unknown expression object # - elif not expr.is_expression_type(): #pragma: nocover - raise ValueError("Unexpected expression type: "+str(expr)) + elif not expr.is_expression_type(): # pragma: nocover + raise ValueError("Unexpected expression type: " + str(expr)) # # WEH - Checking the polynomial degree didn't @@ -338,20 +374,23 @@ def generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False, # in as a comment for now, since we're not # done tuning this code. # - #degree = expr.polynomial_degree() - #if degree == 1: + # degree = expr.polynomial_degree() + # if degree == 1: # return _generate_linear_standard_repn(expr, # idMap=idMap, # compute_values=compute_values, # verbose=verbose, # repn=repn) - #else: - return _generate_standard_repn(expr, - idMap=idMap, - compute_values=compute_values, - verbose=verbose, - quadratic=quadratic, - repn=repn) + # else: + return _generate_standard_repn( + expr, + idMap=idMap, + compute_values=compute_values, + verbose=verbose, + quadratic=quadratic, + repn=repn, + ) + ##----------------------------------------------------------------------- ## @@ -359,6 +398,7 @@ def generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False, ## ##----------------------------------------------------------------------- + class ResultsWithQuadratics(object): __slot__ = ('const', 'nonl', 'linear', 'quadratic') @@ -366,18 +406,24 @@ def __init__(self, constant=0, nonl=0, linear=None, quadratic=None): self.constant = constant self.nonl = nonl self.linear = {} - #if linear is None: + # if linear is None: # self.linear = {} - #else: + # else: # self.linear = linear self.quadratic = {} - #if quadratic is None: + # if quadratic is None: # self.quadratic = {} - #else: + # else: # self.quadratic = quadratic - def __str__(self): #pragma: nocover - return "Const:\t%s\nLinear:\t%s\nQuadratic:\t%s\nNonlinear:\t%s" % (str(self.constant), str(self.linear), str(self.quadratic), str(self.nonl)) + def __str__(self): # pragma: nocover + return "Const:\t%s\nLinear:\t%s\nQuadratic:\t%s\nNonlinear:\t%s" % ( + str(self.constant), + str(self.linear), + str(self.quadratic), + str(self.nonl), + ) + class ResultsWithoutQuadratics(object): __slot__ = ('const', 'nonl', 'linear') @@ -386,18 +432,23 @@ def __init__(self, constant=0, nonl=0, linear=None): self.constant = constant self.nonl = nonl self.linear = {} - #if linear is None: + # if linear is None: # self.linear = {} - #else: + # else: # self.linear = linear - def __str__(self): #pragma: nocover - return "Const:\t%s\nLinear:\t%s\nNonlinear:\t%s" % (str(self.constant), str(self.linear), str(self.nonl)) + def __str__(self): # pragma: nocover + return "Const:\t%s\nLinear:\t%s\nNonlinear:\t%s" % ( + str(self.constant), + str(self.linear), + str(self.nonl), + ) + Results = ResultsWithQuadratics -#@profile +# @profile def _collect_sum(exp, multiplier, idMap, compute_values, verbose, quadratic): ans = Results() nonl = [] @@ -405,14 +456,19 @@ def _collect_sum(exp, multiplier, idMap, compute_values, verbose, quadratic): for e_ in itertools.islice(exp._args_, exp.nargs()): if e_.__class__ is EXPR.MonomialTermExpression: - lhs, v = e_._args_ - if compute_values and not lhs.__class__ in native_numeric_types: + lhs, v = e_.args + if lhs.__class__ in native_numeric_types: + if not lhs: + continue + elif compute_values: lhs = value(lhs) + if not lhs: + continue if v.fixed: if compute_values: - ans.constant += multiplier*lhs*value(v) + ans.constant += multiplier * lhs * value(v) else: - ans.constant += multiplier*lhs*v + ans.constant += multiplier * lhs * v else: id_ = id(v) if id_ in varkeys: @@ -422,17 +478,17 @@ def _collect_sum(exp, multiplier, idMap, compute_values, verbose, quadratic): varkeys[id_] = key idMap[key] = v if key in ans.linear: - ans.linear[key] += multiplier*lhs + ans.linear[key] += multiplier * lhs else: - ans.linear[key] = multiplier*lhs + ans.linear[key] = multiplier * lhs elif e_.__class__ in native_numeric_types: - ans.constant += multiplier*e_ + ans.constant += multiplier * e_ elif e_.is_variable_type(): if e_.fixed: if compute_values: - ans.constant += multiplier*e_.value + ans.constant += multiplier * e_.value else: - ans.constant += multiplier*e_ + ans.constant += multiplier * e_ else: id_ = id(e_) if id_ in varkeys: @@ -451,16 +507,17 @@ def _collect_sum(exp, multiplier, idMap, compute_values, verbose, quadratic): else: ans.constant += multiplier * e_ else: - res_ = _collect_standard_repn(e_, multiplier, idMap, - compute_values, verbose, quadratic) + res_ = _collect_standard_repn( + e_, multiplier, idMap, compute_values, verbose, quadratic + ) # # Add returned from recursion # ans.constant += res_.constant if not (res_.nonl.__class__ in native_numeric_types and res_.nonl == 0): nonl.append(res_.nonl) - for i in res_.linear: - ans.linear[i] = ans.linear.get(i,0) + res_.linear[i] + for i, v in res_.linear.items(): + ans.linear[i] = ans.linear.get(i, 0) + v if quadratic: for i in res_.quadratic: ans.quadratic[i] = ans.quadratic.get(i, 0) + res_.quadratic[i] @@ -470,18 +527,32 @@ def _collect_sum(exp, multiplier, idMap, compute_values, verbose, quadratic): ans.nonl = nonl[0] else: ans.nonl = EXPR.SumExpression(nonl) + zero_coef = [ + k + for k, coef in ans.linear.items() + if coef.__class__ in native_numeric_types and not coef + ] + for k in zero_coef: + ans.linear.pop(k) return ans -#@profile + +# @profile def _collect_term(exp, multiplier, idMap, compute_values, verbose, quadratic): # # LHS is a numeric value # if exp._args_[0].__class__ in native_numeric_types: - if exp._args_[0] == 0: # TODO: coverage? + if exp._args_[0] == 0: # TODO: coverage? return Results() - return _collect_standard_repn(exp._args_[1], multiplier * exp._args_[0], idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * exp._args_[0], + idMap, + compute_values, + verbose, + quadratic, + ) # # LHS is a non-variable expression # @@ -490,29 +561,54 @@ def _collect_term(exp, multiplier, idMap, compute_values, verbose, quadratic): val = value(exp._args_[0]) if val == 0: return Results() - return _collect_standard_repn(exp._args_[1], multiplier * val, idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * val, + idMap, + compute_values, + verbose, + quadratic, + ) else: - return _collect_standard_repn(exp._args_[1], multiplier*exp._args_[0], idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * exp._args_[0], + idMap, + compute_values, + verbose, + quadratic, + ) + def _collect_prod(exp, multiplier, idMap, compute_values, verbose, quadratic): # # LHS is a numeric value # if exp._args_[0].__class__ in native_numeric_types: - if exp._args_[0] == 0: # TODO: coverage? + if exp._args_[0] == 0: # TODO: coverage? return Results() - return _collect_standard_repn(exp._args_[1], multiplier * exp._args_[0], idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * exp._args_[0], + idMap, + compute_values, + verbose, + quadratic, + ) # # RHS is a numeric value # if exp._args_[1].__class__ in native_numeric_types: - if exp._args_[1] == 0: # TODO: coverage? + if exp._args_[1] == 0: # TODO: coverage? return Results() - return _collect_standard_repn(exp._args_[0], multiplier * exp._args_[1], idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[0], + multiplier * exp._args_[1], + idMap, + compute_values, + verbose, + quadratic, + ) # # LHS is a non-variable expression # @@ -521,11 +617,23 @@ def _collect_prod(exp, multiplier, idMap, compute_values, verbose, quadratic): val = value(exp._args_[0]) if val == 0: return Results() - return _collect_standard_repn(exp._args_[1], multiplier * val, idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * val, + idMap, + compute_values, + verbose, + quadratic, + ) else: - return _collect_standard_repn(exp._args_[1], multiplier*exp._args_[0], idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * exp._args_[0], + idMap, + compute_values, + verbose, + quadratic, + ) # # RHS is a non-variable expression # @@ -534,108 +642,168 @@ def _collect_prod(exp, multiplier, idMap, compute_values, verbose, quadratic): val = value(exp._args_[1]) if val == 0: return Results() - return _collect_standard_repn(exp._args_[0], multiplier * val, idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[0], + multiplier * val, + idMap, + compute_values, + verbose, + quadratic, + ) else: - return _collect_standard_repn(exp._args_[0], multiplier*exp._args_[1], idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[0], + multiplier * exp._args_[1], + idMap, + compute_values, + verbose, + quadratic, + ) # # Both the LHS and RHS are potentially variable ... # # Collect LHS # - lhs = _collect_standard_repn(exp._args_[0], 1, idMap, - compute_values, verbose, quadratic) - lhs_nonl_None = lhs.nonl.__class__ in native_numeric_types and lhs.nonl == 0 + lhs = _collect_standard_repn( + exp._args_[0], 1, idMap, compute_values, verbose, quadratic + ) + lhs_nonl_None = lhs.nonl.__class__ in native_numeric_types and not lhs.nonl # # LHS is potentially variable, but it turns out to be a constant # because the variables were fixed. # - if lhs_nonl_None and len(lhs.linear) == 0 and (not quadratic or len(lhs.quadratic) == 0): + if ( + lhs_nonl_None + and len(lhs.linear) == 0 + and (not quadratic or len(lhs.quadratic) == 0) + ): if lhs.constant.__class__ in native_numeric_types and lhs.constant == 0: return Results() if compute_values: val = value(lhs.constant) - if val == 0: # TODO: coverage? + if val == 0: # TODO: coverage? return Results() - return _collect_standard_repn(exp._args_[1], multiplier*val, idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * val, + idMap, + compute_values, + verbose, + quadratic, + ) else: - return _collect_standard_repn(exp._args_[1], multiplier*lhs.constant, idMap, - compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[1], + multiplier * lhs.constant, + idMap, + compute_values, + verbose, + quadratic, + ) # # Collect RHS # - rhs = _collect_standard_repn(exp._args_[1], 1, idMap, - compute_values, verbose, quadratic) - rhs_nonl_None = rhs.nonl.__class__ in native_numeric_types and rhs.nonl == 0 + rhs = _collect_standard_repn( + exp._args_[1], 1, idMap, compute_values, verbose, quadratic + ) + rhs_nonl_None = rhs.nonl.__class__ in native_numeric_types and not rhs.nonl # # If RHS is zero, then return an empty results # - if rhs_nonl_None and len(rhs.linear) == 0 and (not quadratic or len(rhs.quadratic) == 0) and rhs.constant.__class__ in native_numeric_types and rhs.constant == 0: + if ( + rhs_nonl_None + and len(rhs.linear) == 0 + and (not quadratic or len(rhs.quadratic) == 0) + and rhs.constant.__class__ in native_numeric_types + and rhs.constant == 0 + ): return Results() # # If either the LHS or RHS are nonlinear, then simply return the nonlinear expression # if not lhs_nonl_None or not rhs_nonl_None: - return Results(nonl=multiplier*exp) + return Results(nonl=multiplier * exp) # If the resulting expression has a polynomial degree greater than 2 # (1 if quadratic is False), then simply return this as a general # nonlinear expression # - if ( max(1 if lhs.linear else 0, 2 if quadratic and lhs.quadratic else 0) + - max(1 if rhs.linear else 0, 2 if quadratic and rhs.quadratic else 0) - > (2 if quadratic else 1) ): - return Results(nonl=multiplier*exp) + if max(1 if lhs.linear else 0, 2 if quadratic and lhs.quadratic else 0) + max( + 1 if rhs.linear else 0, 2 if quadratic and rhs.quadratic else 0 + ) > (2 if quadratic else 1): + return Results(nonl=multiplier * exp) ans = Results() - ans.constant = multiplier*lhs.constant * rhs.constant + ans.constant = multiplier * lhs.constant * rhs.constant if not (lhs.constant.__class__ in native_numeric_types and lhs.constant == 0): for key, coef in rhs.linear.items(): - ans.linear[key] = multiplier*coef*lhs.constant + ans.linear[key] = multiplier * coef * lhs.constant if not (rhs.constant.__class__ in native_numeric_types and rhs.constant == 0): for key, coef in lhs.linear.items(): if key in ans.linear: - ans.linear[key] += multiplier*coef*rhs.constant + ans.linear[key] += multiplier * coef * rhs.constant else: - ans.linear[key] = multiplier*coef*rhs.constant + ans.linear[key] = multiplier * coef * rhs.constant if quadratic: if not (lhs.constant.__class__ in native_numeric_types and lhs.constant == 0): for key, coef in rhs.quadratic.items(): - ans.quadratic[key] = multiplier*coef*lhs.constant + ans.quadratic[key] = multiplier * coef * lhs.constant if not (rhs.constant.__class__ in native_numeric_types and rhs.constant == 0): for key, coef in lhs.quadratic.items(): if key in ans.quadratic: - ans.quadratic[key] += multiplier*coef*rhs.constant + ans.quadratic[key] += multiplier * coef * rhs.constant else: - ans.quadratic[key] = multiplier*coef*rhs.constant + ans.quadratic[key] = multiplier * coef * rhs.constant for lkey, lcoef in lhs.linear.items(): for rkey, rcoef in rhs.linear.items(): ndx = (lkey, rkey) if lkey <= rkey else (rkey, lkey) if ndx in ans.quadratic: - ans.quadratic[ndx] += multiplier*lcoef*rcoef + ans.quadratic[ndx] += multiplier * lcoef * rcoef else: - ans.quadratic[ndx] = multiplier*lcoef*rcoef + ans.quadratic[ndx] = multiplier * lcoef * rcoef # TODO - Use quicksum here? - el_linear = multiplier*sum(coef*idMap[key] for key, coef in lhs.linear.items()) - er_linear = multiplier*sum(coef*idMap[key] for key, coef in rhs.linear.items()) - el_quadratic = multiplier*sum(coef*idMap[key[0]]*idMap[key[1]] for key, coef in lhs.quadratic.items()) - er_quadratic = multiplier*sum(coef*idMap[key[0]]*idMap[key[1]] for key, coef in rhs.quadratic.items()) - ans.nonl += el_linear*er_quadratic + el_quadratic*er_linear + el_linear = multiplier * sum( + coef * idMap[key] + for key, coef in lhs.linear.items() + if coef.__class__ not in native_numeric_types or coef + ) + er_linear = multiplier * sum( + coef * idMap[key] + for key, coef in rhs.linear.items() + if coef.__class__ not in native_numeric_types or coef + ) + el_quadratic = multiplier * sum( + coef * idMap[key[0]] * idMap[key[1]] + for key, coef in lhs.quadratic.items() + if coef.__class__ not in native_numeric_types or coef + ) + er_quadratic = multiplier * sum( + coef * idMap[key[0]] * idMap[key[1]] + for key, coef in rhs.quadratic.items() + if coef.__class__ not in native_numeric_types or coef + ) + if (el_linear.__class__ not in native_numeric_types or el_linear) and ( + er_quadratic.__class__ not in native_numeric_types or er_quadratic + ): + ans.nonl += el_linear * er_quadratic + if (er_linear.__class__ not in native_numeric_types or er_linear) and ( + el_quadratic.__class__ not in native_numeric_types or el_quadratic + ): + ans.nonl += er_linear * el_quadratic return ans -#@profile + +# @profile def _collect_var(exp, multiplier, idMap, compute_values, verbose, quadratic): ans = Results() if exp.fixed: if compute_values: - ans.constant += multiplier*value(exp) + ans.constant += multiplier * value(exp) else: - ans.constant += multiplier*exp + ans.constant += multiplier * exp else: id_ = id(exp) if id_ in idMap[None]: @@ -648,6 +816,7 @@ def _collect_var(exp, multiplier, idMap, compute_values, verbose, quadratic): return ans + def _collect_pow(exp, multiplier, idMap, compute_values, verbose, quadratic): # # Exponent is a numeric value @@ -668,12 +837,18 @@ def _collect_pow(exp, multiplier, idMap, compute_values, verbose, quadratic): # Otherwise collect a standard repn # else: - res = _collect_standard_repn(exp._args_[1], 1, idMap, compute_values, verbose, quadratic) + res = _collect_standard_repn( + exp._args_[1], 1, idMap, compute_values, verbose, quadratic + ) # # If the expression is variable, then return a nonlinear expression # - if not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) or len(res.linear) > 0 or (quadratic and len(res.quadratic) > 0): - return Results(nonl=multiplier*exp) + if ( + not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) + or len(res.linear) > 0 + or (quadratic and len(res.quadratic) > 0) + ): + return Results(nonl=multiplier * exp) exponent = res.constant if exponent.__class__ in native_numeric_types: @@ -688,32 +863,40 @@ def _collect_pow(exp, multiplier, idMap, compute_values, verbose, quadratic): # Return the standard repn for arg(0) # elif exponent == 1: - return _collect_standard_repn(exp._args_[0], multiplier, idMap, compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[0], multiplier, idMap, compute_values, verbose, quadratic + ) # # Ignore #**2 unless quadratic==True # elif exponent == 2 and quadratic: - res =_collect_standard_repn(exp._args_[0], 1, idMap, compute_values, verbose, quadratic) + res = _collect_standard_repn( + exp._args_[0], 1, idMap, compute_values, verbose, quadratic + ) # # If arg(0) is nonlinear, then this is a nonlinear repn # - if not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) or len(res.quadratic) > 0: - return Results(nonl=multiplier*exp) + if ( + not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) + or len(res.quadratic) > 0 + ): + return Results(nonl=multiplier * exp) # # If computing values and no linear terms, then the return a constant repn # elif compute_values and len(res.linear) == 0: - return Results(constant=multiplier*res.constant**exponent) + return Results(constant=multiplier * res.constant**exponent) # # If the base is linear, then we compute the quadratic expression for it. # else: ans = Results() - has_constant = (res.constant.__class__ - not in native_numeric_types - or res.constant != 0) + has_constant = ( + res.constant.__class__ not in native_numeric_types + or res.constant != 0 + ) if has_constant: - ans.constant = multiplier*res.constant*res.constant + ans.constant = multiplier * res.constant * res.constant # this is reversed since we want to pop off the end for efficiency # and the quadratic terms have a convention that the indexing tuple @@ -723,11 +906,11 @@ def _collect_pow(exp, multiplier, idMap, compute_values, verbose, quadratic): key1 = keys.pop() coef1 = res.linear[key1] if has_constant: - ans.linear[key1] = 2*multiplier*coef1*res.constant - ans.quadratic[key1,key1] = multiplier*coef1*coef1 + ans.linear[key1] = 2 * multiplier * coef1 * res.constant + ans.quadratic[key1, key1] = multiplier * coef1 * coef1 for key2 in keys: coef2 = res.linear[key2] - ans.quadratic[key1,key2] = 2*multiplier*coef1*coef2 + ans.quadratic[key1, key2] = 2 * multiplier * coef1 * coef2 return ans # @@ -735,179 +918,231 @@ def _collect_pow(exp, multiplier, idMap, compute_values, verbose, quadratic): # if exp._args_[0].__class__ in native_numeric_types or exp._args_[0].is_fixed(): if compute_values: - return Results(constant=multiplier*value(exp._args_[0])**exponent) + return Results(constant=multiplier * value(exp._args_[0]) ** exponent) else: - return Results(constant=multiplier*exp) + return Results(constant=multiplier * exp) # # Return a nonlinear expression here # - return Results(nonl=multiplier*exp) + return Results(nonl=multiplier * exp) + def _collect_division(exp, multiplier, idMap, compute_values, verbose, quadratic): - if exp._args_[1].__class__ in native_numeric_types or not exp._args_[1].is_potentially_variable(): # TODO: coverage? + if ( + exp._args_[1].__class__ in native_numeric_types + or not exp._args_[1].is_potentially_variable() + ): # TODO: coverage? # Denominator is trivially constant if compute_values: denom = 1.0 * value(exp._args_[1]) else: denom = 1.0 * exp._args_[1] else: - res =_collect_standard_repn(exp._args_[1], 1, idMap, compute_values, verbose, quadratic) - if not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) or len(res.linear) > 0 or (quadratic and len(res.quadratic) > 0): + res = _collect_standard_repn( + exp._args_[1], 1, idMap, compute_values, verbose, quadratic + ) + if ( + not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) + or len(res.linear) > 0 + or (quadratic and len(res.quadratic) > 0) + ): # Denominator is variable, give up: this is nonlinear - return Results(nonl=multiplier*exp) + return Results(nonl=multiplier * exp) else: # Denominaor ended up evaluating to a constant - denom = 1.0*res.constant + denom = 1.0 * res.constant if denom.__class__ in native_numeric_types and denom == 0: raise ZeroDivisionError - if exp._args_[0].__class__ in native_numeric_types or not exp._args_[0].is_potentially_variable(): + if ( + exp._args_[0].__class__ in native_numeric_types + or not exp._args_[0].is_potentially_variable() + ): num = exp._args_[0] if compute_values: num = value(num) - return Results(constant=multiplier*num/denom) + return Results(constant=multiplier * num / denom) + + return _collect_standard_repn( + exp._args_[0], multiplier / denom, idMap, compute_values, verbose, quadratic + ) - return _collect_standard_repn(exp._args_[0], multiplier/denom, idMap, compute_values, verbose, quadratic) def _collect_branching_expr(exp, multiplier, idMap, compute_values, verbose, quadratic): - if exp._if.__class__ in native_numeric_types: # TODO: coverage? - if_val = exp._if - elif not exp._if.is_potentially_variable(): + _if, _then, _else = exp.args + if _if.__class__ in native_types: + if_val = _if + elif not _if.is_potentially_variable(): if compute_values: - if_val = value(exp._if) + if_val = value(_if) else: - return Results(nonl=multiplier*exp) + return Results(nonl=multiplier * exp) else: - res = _collect_standard_repn(exp._if, 1, idMap, compute_values, verbose, quadratic) - if not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) or len(res.linear) > 0 or (quadratic and len(res.quadratic) > 0): - return Results(nonl=multiplier*exp) + res = _collect_standard_repn(_if, 1, idMap, compute_values, verbose, quadratic) + if ( + not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) + or len(res.linear) > 0 + or (quadratic and len(res.quadratic) > 0) + ): + return Results(nonl=multiplier * exp) elif res.constant.__class__ in native_numeric_types: if_val = res.constant else: - return Results(constant=multiplier*exp) + return Results(constant=multiplier * exp) if if_val: - if exp._then.__class__ in native_numeric_types: - return Results(constant=multiplier*exp._then) - return _collect_standard_repn(exp._then, multiplier, idMap, compute_values, verbose, quadratic) + if _then.__class__ in native_numeric_types: + return Results(constant=multiplier * _then) + return _collect_standard_repn( + _then, multiplier, idMap, compute_values, verbose, quadratic + ) else: - if exp._else.__class__ in native_numeric_types: - return Results(constant=multiplier*exp._else) - return _collect_standard_repn(exp._else, multiplier, idMap, compute_values, verbose, quadratic) + if _else.__class__ in native_numeric_types: + return Results(constant=multiplier * _else) + return _collect_standard_repn( + _else, multiplier, idMap, compute_values, verbose, quadratic + ) + def _collect_nonl(exp, multiplier, idMap, compute_values, verbose, quadratic): - res = _collect_standard_repn(exp._args_[0], 1, idMap, compute_values, verbose, quadratic) - if not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) or len(res.linear) > 0 or (quadratic and len(res.quadratic) > 0): - return Results(nonl=multiplier*exp) + res = _collect_standard_repn( + exp._args_[0], 1, idMap, compute_values, verbose, quadratic + ) + if ( + not (res.nonl.__class__ in native_numeric_types and res.nonl == 0) + or len(res.linear) > 0 + or (quadratic and len(res.quadratic) > 0) + ): + return Results(nonl=multiplier * exp) if compute_values: - return Results(constant=multiplier*exp._apply_operation([res.constant])) + return Results(constant=multiplier * exp._apply_operation([res.constant])) else: - return Results(constant=multiplier*exp) + return Results(constant=multiplier * exp) + def _collect_negation(exp, multiplier, idMap, compute_values, verbose, quadratic): - return _collect_standard_repn(exp._args_[0], -1*multiplier, idMap, compute_values, verbose, quadratic) + return _collect_standard_repn( + exp._args_[0], -1 * multiplier, idMap, compute_values, verbose, quadratic + ) + # # TODO - Verify if code is used # def _collect_const(exp, multiplier, idMap, compute_values, verbose, quadratic): if compute_values: - return Results(constant=multiplier*value(exp)) + return Results(constant=multiplier * value(exp)) else: - return Results(constant=multiplier*exp) + return Results(constant=multiplier * exp) + def _collect_identity(exp, multiplier, idMap, compute_values, verbose, quadratic): if exp._args_[0].__class__ in native_numeric_types: - return Results(constant=multiplier*exp._args_[0]) + return Results(constant=multiplier * exp._args_[0]) if not exp._args_[0].is_potentially_variable(): if compute_values: - return Results(constant=multiplier*value(exp._args_[0])) + return Results(constant=multiplier * value(exp._args_[0])) else: - return Results(constant=multiplier*exp._args_[0]) - return _collect_standard_repn(exp.expr, multiplier, idMap, compute_values, verbose, quadratic) + return Results(constant=multiplier * exp._args_[0]) + return _collect_standard_repn( + exp.expr, multiplier, idMap, compute_values, verbose, quadratic + ) + def _collect_linear(exp, multiplier, idMap, compute_values, verbose, quadratic): ans = Results() if compute_values: - ans.constant = multiplier*value(exp.constant) + ans.constant = multiplier * value(exp.constant) else: - ans.constant = multiplier*exp.constant + ans.constant = multiplier * exp.constant - for c,v in zip(exp.linear_coefs, exp.linear_vars): + linear = {} + linear_vars = {} + for c, v in zip(exp.linear_coefs, exp.linear_vars): if v.fixed: if compute_values: ans.constant += multiplier * value(c) * value(v) else: ans.constant += multiplier * c * v else: - id_ = id(v) - if id_ in idMap[None]: - key = idMap[None][id_] - else: - key = len(idMap) - 1 - idMap[None][id_] = key - idMap[key] = v + key = id(v) if compute_values: - if key in ans.linear: - ans.linear[key] += multiplier*value(c) + if key in linear: + linear[key] += multiplier * value(c) else: - ans.linear[key] = multiplier*value(c) + linear[key] = multiplier * value(c) + linear_vars[key] = v else: - if key in ans.linear: - ans.linear[key] += multiplier*c + if key in linear: + linear[key] += multiplier * c else: - ans.linear[key] = multiplier*c + linear[key] = multiplier * c + linear_vars[key] = v + for id_, coef in linear.items(): + if coef.__class__ in native_numeric_types and not coef: + continue + if id_ in idMap[None]: + key = idMap[None][id_] + else: + key = len(idMap) - 1 + idMap[None][id_] = key + idMap[key] = linear_vars[id_] + ans.linear[key] = coef return ans + def _collect_comparison(exp, multiplier, idMap, compute_values, verbose, quadratic): - return Results(nonl=multiplier*exp) + if multiplier != 1: + # this *will* generate an exception with the new relational expressions + exp = multiplier * exp + return Results(nonl=exp) + def _collect_external_fn(exp, multiplier, idMap, compute_values, verbose, quadratic): if compute_values and exp.is_fixed(): - return Results(constant=multiplier*value(exp)) - return Results(nonl=multiplier*exp) + return Results(constant=multiplier * value(exp)) + return Results(nonl=multiplier * exp) _repn_collectors = { - EXPR.SumExpression : _collect_sum, - EXPR.ProductExpression : _collect_prod, - EXPR.MonomialTermExpression : _collect_term, - EXPR.PowExpression : _collect_pow, - EXPR.DivisionExpression : _collect_division, - EXPR.Expr_ifExpression : _collect_branching_expr, - EXPR.UnaryFunctionExpression : _collect_nonl, - EXPR.AbsExpression : _collect_nonl, - EXPR.NegationExpression : _collect_negation, - EXPR.LinearExpression : _collect_linear, - EXPR.InequalityExpression : _collect_comparison, - EXPR.RangedExpression : _collect_comparison, - EXPR.EqualityExpression : _collect_comparison, - EXPR.ExternalFunctionExpression : _collect_external_fn, - #_ConnectorData : _collect_linear_connector, - #ScalarConnector : _collect_linear_connector, - _ParamData : _collect_const, - ScalarParam : _collect_const, - #param.Param : _collect_linear_const, - #parameter : _collect_linear_const, - NumericConstant : _collect_const, - _GeneralVarData : _collect_var, - ScalarVar : _collect_var, - Var : _collect_var, - variable : _collect_var, - IVariable : _collect_var, - _GeneralExpressionData : _collect_identity, - ScalarExpression : _collect_identity, - expression : _collect_identity, - noclone : _collect_identity, - _ExpressionData : _collect_identity, - Expression : _collect_identity, - _GeneralObjectiveData : _collect_identity, - ScalarObjective : _collect_identity, - objective : _collect_identity, - } - - -def _collect_standard_repn(exp, multiplier, idMap, - compute_values, verbose, quadratic): + EXPR.SumExpression: _collect_sum, + EXPR.ProductExpression: _collect_prod, + EXPR.MonomialTermExpression: _collect_term, + EXPR.PowExpression: _collect_pow, + EXPR.DivisionExpression: _collect_division, + EXPR.Expr_ifExpression: _collect_branching_expr, + EXPR.UnaryFunctionExpression: _collect_nonl, + EXPR.AbsExpression: _collect_nonl, + EXPR.NegationExpression: _collect_negation, + EXPR.LinearExpression: _collect_linear, + EXPR.InequalityExpression: _collect_comparison, + EXPR.RangedExpression: _collect_comparison, + EXPR.EqualityExpression: _collect_comparison, + EXPR.ExternalFunctionExpression: _collect_external_fn, + # _ConnectorData : _collect_linear_connector, + # ScalarConnector : _collect_linear_connector, + _ParamData: _collect_const, + ScalarParam: _collect_const, + # param.Param : _collect_linear_const, + # parameter : _collect_linear_const, + NumericConstant: _collect_const, + _GeneralVarData: _collect_var, + ScalarVar: _collect_var, + Var: _collect_var, + variable: _collect_var, + IVariable: _collect_var, + _GeneralExpressionData: _collect_identity, + ScalarExpression: _collect_identity, + expression: _collect_identity, + noclone: _collect_identity, + _ExpressionData: _collect_identity, + Expression: _collect_identity, + _GeneralObjectiveData: _collect_identity, + ScalarObjective: _collect_identity, + objective: _collect_identity, +} + + +def _collect_standard_repn(exp, multiplier, idMap, compute_values, verbose, quadratic): fn = _repn_collectors.get(exp.__class__, None) if fn is not None: return fn(exp, multiplier, idMap, compute_values, verbose, quadratic) @@ -915,8 +1150,9 @@ def _collect_standard_repn(exp, multiplier, idMap, # Catch any known numeric constants # if exp.__class__ in native_numeric_types or not exp.is_potentially_variable(): - return _collect_const(exp, multiplier, idMap, compute_values, - verbose, quadratic) + return _collect_const( + exp, multiplier, idMap, compute_values, verbose, quadratic + ) # # These are types that might be extended using duck typing. # @@ -925,15 +1161,19 @@ def _collect_standard_repn(exp, multiplier, idMap, fn = _collect_var if exp.is_named_expression_type(): fn = _collect_identity - except AttributeError: # TODO: coverage? + except AttributeError: # TODO: coverage? pass if fn is not None: _repn_collectors[exp.__class__] = fn return fn(exp, multiplier, idMap, compute_values, verbose, quadratic) - raise ValueError( "Unexpected expression (type %s)" % type(exp).__name__) # TODO: coverage? + raise ValueError( + "Unexpected expression (type %s)" % type(exp).__name__ + ) # TODO: coverage? -def _generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False, quadratic=True, repn=None): +def _generate_standard_repn( + expr, idMap=None, compute_values=True, verbose=False, quadratic=True, repn=None +): if expr.__class__ is EXPR.SumExpression: # # This is the common case, so start collecting the sum @@ -953,16 +1193,15 @@ def _generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False # v = [] c = [] - for key in ans.linear: - val = ans.linear[key] + for key, val in ans.linear.items(): if val.__class__ in native_numeric_types: - if val == 0: + if not val: continue - elif val.is_constant(): # TODO: coverage? + elif val.is_constant(): # TODO: coverage? if value(val) == 0: continue v.append(idMap[key]) - c.append(ans.linear[key]) + c.append(val) repn.linear_vars = tuple(v) repn.linear_coefs = tuple(c) @@ -972,13 +1211,13 @@ def _generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False for key in ans.quadratic: val = ans.quadratic[key] if val.__class__ in native_numeric_types: - if val == 0: # TODO: coverage? + if val == 0: # TODO: coverage? continue - elif val.is_constant(): # TODO: coverage? + elif val.is_constant(): # TODO: coverage? if value(val) == 0: continue - repn.quadratic_vars.append( (idMap[key[0]],idMap[key[1]]) ) - repn.quadratic_coefs.append( val ) + repn.quadratic_vars.append((idMap[key[0]], idMap[key[1]])) + repn.quadratic_coefs.append(val) repn.quadratic_vars = tuple(repn.quadratic_vars) repn.quadratic_coefs = tuple(repn.quadratic_coefs) v = [] @@ -989,7 +1228,7 @@ def _generate_standard_repn(expr, idMap=None, compute_values=True, verbose=False repn.quadratic_vars = tuple(v) repn.quadratic_coefs = tuple(c) - if ans.nonl is not None and not isclose_const(ans.nonl,0): + if ans.nonl is not None and not isclose_const(ans.nonl, 0): repn.nonlinear_expr = ans.nonl repn.nonlinear_vars = [] for v_ in EXPR.identify_variables(repn.nonlinear_expr, include_fixed=False): @@ -1356,108 +1595,99 @@ def _generate_linear_standard_repn(expr, idMap=None, compute_values=True, verbos def preprocess_block_objectives(block, idMap=None): - # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn - for objective_data in block.component_data_objects(Objective, - active=True, - descend_into=False): - + for objective_data in block.component_data_objects( + Objective, active=True, descend_into=False + ): if objective_data.expr is None: - raise ValueError("No expression has been defined for objective %s" - % (objective_data.name)) + raise ValueError( + "No expression has been defined for objective %s" + % (objective_data.name) + ) try: repn = generate_standard_repn(objective_data.expr, idMap=idMap) except Exception: err = sys.exc_info()[1] - logging.getLogger('pyomo.core').error\ - ( "exception generating a standard representation for objective %s: %s" \ - % (objective_data.name, str(err)) ) + logging.getLogger('pyomo.core').error( + "exception generating a standard representation for objective %s: %s" + % (objective_data.name, str(err)) + ) raise block_repn[objective_data] = repn -def preprocess_block_constraints(block, idMap=None): +def preprocess_block_constraints(block, idMap=None): # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn - for constraint in block.component_objects(Constraint, - active=True, - descend_into=False): - - preprocess_constraint(block, - constraint, - idMap=idMap, - block_repn=block_repn) + for constraint in block.component_objects( + Constraint, active=True, descend_into=False + ): + preprocess_constraint(block, constraint, idMap=idMap, block_repn=block_repn) -def preprocess_constraint(block, - constraint, - idMap=None, - block_repn=None): +def preprocess_constraint(block, constraint, idMap=None, block_repn=None): from pyomo.repn.beta.matrix import MatrixConstraint + if isinstance(constraint, MatrixConstraint): return # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn for index, constraint_data in constraint.items(): - if not constraint_data.active: continue if constraint_data.body is None: raise ValueError( "No expression has been defined for the body " - "of constraint %s" % (constraint_data.name)) + "of constraint %s" % (constraint_data.name) + ) try: - repn = generate_standard_repn(constraint_data.body, - idMap=idMap) + repn = generate_standard_repn(constraint_data.body, idMap=idMap) except Exception: err = sys.exc_info()[1] logging.getLogger('pyomo.core').error( "exception generating a standard representation for " - "constraint %s: %s" - % (constraint_data.name, str(err))) + "constraint %s: %s" % (constraint_data.name, str(err)) + ) raise block_repn[constraint_data] = repn -def preprocess_constraint_data(block, - constraint_data, - idMap=None, - block_repn=None): +def preprocess_constraint_data(block, constraint_data, idMap=None, block_repn=None): # Get/Create the ComponentMap for the repn - if not hasattr(block,'_repn'): + if not hasattr(block, '_repn'): block._repn = ComponentMap() block_repn = block._repn if constraint_data.body is None: raise ValueError( "No expression has been defined for the body " - "of constraint %s" % (constraint_data.name)) + "of constraint %s" % (constraint_data.name) + ) try: - repn = generate_standard_repn(constraint_data.body, - idMap=idMap) + repn = generate_standard_repn(constraint_data.body, idMap=idMap) except Exception: err = sys.exc_info()[1] logging.getLogger('pyomo.core').error( "exception generating a standard representation for " - "constraint %s: %s" - % (constraint_data.name, str(err))) + "constraint %s: %s" % (constraint_data.name, str(err)) + ) raise block_repn[constraint_data] = repn diff --git a/pyomo/repn/tests/ampl/helper.py b/pyomo/repn/tests/ampl/helper.py index 3743da8ea93..eb09afc37cc 100644 --- a/pyomo/repn/tests/ampl/helper.py +++ b/pyomo/repn/tests/ampl/helper.py @@ -14,9 +14,12 @@ class MockFixedValue(NumericValue): value = 42 - def __init__(self, v = 42): + + def __init__(self, v=42): self.value = v + def is_fixed(self): return True + def __call__(self, exception=True): return self.value diff --git a/pyomo/repn/tests/ampl/nl_diff.py b/pyomo/repn/tests/ampl/nl_diff.py index 76d6bc99c46..ecac3967dfe 100644 --- a/pyomo/repn/tests/ampl/nl_diff.py +++ b/pyomo/repn/tests/ampl/nl_diff.py @@ -9,129 +9,6 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -import itertools -import os -import re +from pyomo.common.deprecation import relocated_module -from difflib import SequenceMatcher, unified_diff - -import pyomo.repn.plugins.nl_writer as nl_writer - -template = nl_writer.text_nl_debug_template - -_norm_whitespace = re.compile(r'[^\S\n]+') -_norm_integers = re.compile(r'(?m)\.0+$') -_norm_comment = re.compile(r'\s*#\s*') -_strip_comment = re.compile(r'\s*#.*') -_norm_negation = re.compile(r'(?m)^o2(\s*#\s*\*)?\nn-1(.0)?\s*\n') -_norm_timesone = re.compile(r'(?m)^o2(\s*#\s*\*)?\nn1(.0)?\s*\n') - -def _compare_floats(base, test, abstol=1e-14, reltol=1e-14): - base = base.split() - test = test.split() - if len(base) != len(test): - return False - for i, b in enumerate(base): - if b == test[i]: - continue - try: - b = float(b) - t = float(test[i]) - except: - return False - if abs(b - t) < abstol: - continue - if abs((b - t) / max(abs(b), abs(t))) < reltol: - continue - return False - return True - -def _update_subsets(subset, base, test): - for i, j in zip(*subset): - # Try checking for numbers - if base[i][0] == 'n' and test[j][0] == 'n': - if _compare_floats(base[i][1:], test[j][1:]): - test[j] = base[i] - elif _compare_floats(base[i], test[j]): - test[j] = base[i] - else: - # try stripping comments, but only if it results in a match - base_nc = _strip_comment.sub('', base[i]) - test_nc = _strip_comment.sub('', test[j]) - if _compare_floats(base_nc, test_nc): - if len(base_nc) > len(test_nc): - test[j] = base[i] - else: - base[i] = test[j] - -def _preprocess_data(data): - # Normalize negation (convert " * -1" to the negation operator) - data = _norm_negation.sub(template.negation, data) - # Remove multiplication by 1 - data = _norm_timesone.sub('', data) - # Normalize consecutive whitespace to a single space - data = _norm_whitespace.sub(' ', data) - # preface all comments with a single tab character - data = _norm_comment.sub('\t#', data) - # Normalize floating point integers to integers - data = _norm_integers.sub('', data) - # return the sequence of lines - return data.splitlines() - -def nl_diff(base, test, baseline='baseline', testfile='testfile'): - if test == base: - return [], [] - - test = _preprocess_data(test) - base = _preprocess_data(base) - if test == base: - return [], [] - - # First do a quick pass to check / standardize embedded numbers. - # This is a little fragile (it requires that the embedded constants - # appear in the same order in the base and test files), but we see - # cases where differences within numerical tolerances lead to huge - # add / delete chunks (instead of small replace chunks) from the - # SequenceMatcher (because it is not as fast / aggressive as Unix - # diff). Those add/remove chunks are ignored by the _update_subsets - # code below, leading to unnecessary test failures. - test_nlines = list(x for x in enumerate(test) if x[1] and x[1][0] == 'n') - base_nlines = list(x for x in enumerate(base) if x[1] and x[1][0] == 'n') - if len(test_nlines) == len(base_nlines): - for t_line, b_line in zip(test_nlines, base_nlines): - if _compare_floats(t_line[1][1:], b_line[1][1:]): - test[t_line[0]] = base[b_line[0]] - - for group in SequenceMatcher(None, base, test).get_grouped_opcodes(3): - for tag, i1, i2, j1, j2 in group: - if tag != 'replace': - continue - _update_subsets((range(i1, i2), range(j1, j2)), base, test) - - if test == base: - return [], [] - - print(''.join(unified_diff( - [_+"\n" for _ in base], - [_+"\n" for _ in test], - fromfile=baseline, - tofile=testfile))) - return base, test - -def load_nl_baseline(baseline, testfile, version='nl'): - with open(testfile, 'r') as FILE: - test = FILE.read() - if baseline.endswith('.nl'): - _tmp = baseline[:-2] + version - else: - _tmp = baseline.replace('.nl.', f'.{version}.') - if os.path.exists(_tmp): - baseline = _tmp - with open(baseline, 'r') as FILE: - base = FILE.read() - return base, test - -def load_and_compare_nl_baseline(baseline, testfile, version='nl'): - return nl_diff( - *load_nl_baseline(baseline, testfile, version), baseline, testfile - ) +relocated_module('pyomo.repn.tests.nl_diff', version='6.6.0', remove_in='6.6.1') diff --git a/pyomo/repn/tests/ampl/small10_testCase.py b/pyomo/repn/tests/ampl/small10_testCase.py index 596983e2b49..f51aea76d3e 100644 --- a/pyomo/repn/tests/ampl/small10_testCase.py +++ b/pyomo/repn/tests/ampl/small10_testCase.py @@ -19,7 +19,14 @@ # will not solve if sent to a real optimizer. # -from pyomo.environ import ConcreteModel, Var, Param, Objective, Constraint, simple_constraint_rule +from pyomo.environ import ( + ConcreteModel, + Var, + Param, + Objective, + Constraint, + simple_constraint_rule, +) model = ConcreteModel() @@ -27,45 +34,44 @@ model.y = Var() model.z = Var() model.q = Param(initialize=0.0) -model.p = Param(initialize=0.0,mutable=True) +model.p = Param(initialize=0.0, mutable=True) -model.obj = Objective( expr=model.x*model.y +\ - model.z*model.y +\ - model.q*model.y +\ - model.y*model.y*model.q +\ - model.p*model.y +\ - model.y*model.y*model.p +\ - model.y*model.y*model.z +\ - model.z*(model.y**2)) +model.obj = Objective( + expr=model.x * model.y + + model.z * model.y + + model.q * model.y + + model.y * model.y * model.q + + model.p * model.y + + model.y * model.y * model.p + + model.y * model.y * model.z + + model.z * (model.y**2) +) -model.con1 = Constraint(expr=model.x*model.y == 0) -model.con2 = Constraint(expr=model.z*model.y + model.y == 0) -model.con3 = Constraint(expr=model.q*(model.y**2) + model.y == 0) -model.con4 = Constraint(expr=model.q*model.y*model.x + model.y == 0) -model.con5 = Constraint(expr=model.p*(model.y**2) + model.y == 0) -model.con6 = Constraint(expr=model.p*model.y*model.x + model.y == 0) -model.con7 = Constraint(expr=model.z*(model.y**2) + model.y == 0) -model.con8 = Constraint(expr=model.z*model.y*model.x + model.y == 0) +model.con1 = Constraint(expr=model.x * model.y == 0) +model.con2 = Constraint(expr=model.z * model.y + model.y == 0) +model.con3 = Constraint(expr=model.q * (model.y**2) + model.y == 0) +model.con4 = Constraint(expr=model.q * model.y * model.x + model.y == 0) +model.con5 = Constraint(expr=model.p * (model.y**2) + model.y == 0) +model.con6 = Constraint(expr=model.p * model.y * model.x + model.y == 0) +model.con7 = Constraint(expr=model.z * (model.y**2) + model.y == 0) +model.con8 = Constraint(expr=model.z * model.y * model.x + model.y == 0) # Pyomo differs from AMPL in these cases that involve immutable params (q). # These never actually become constraints in Pyomo, and for good reason. -model.con9 = Constraint(expr=model.z*model.y == 0) -model.con10 = Constraint( - rule= simple_constraint_rule(model.q*(model.y**2) == 0) ) -model.con11 = Constraint( - rule= simple_constraint_rule(model.q*model.y*model.x == 0) ) -model.con12 = Constraint(expr=model.p*(model.y**2) == 0) -model.con13 = Constraint(expr=model.p*model.y*model.x == 0) -model.con14 = Constraint(expr=model.z*(model.y**2) == 0) -model.con15 = Constraint(expr=model.z*model.y*model.x == 0) -model.con16 = Constraint( - rule= simple_constraint_rule(model.q*model.y == 0) ) -model.con17 = Constraint(expr=model.p*model.y == 0) +model.con9 = Constraint(expr=model.z * model.y == 0) +model.con10 = Constraint(rule=simple_constraint_rule(model.q * (model.y**2) == 0)) +model.con11 = Constraint(rule=simple_constraint_rule(model.q * model.y * model.x == 0)) +model.con12 = Constraint(expr=model.p * (model.y**2) == 0) +model.con13 = Constraint(expr=model.p * model.y * model.x == 0) +model.con14 = Constraint(expr=model.z * (model.y**2) == 0) +model.con15 = Constraint(expr=model.z * model.y * model.x == 0) +model.con16 = Constraint(rule=simple_constraint_rule(model.q * model.y == 0)) +model.con17 = Constraint(expr=model.p * model.y == 0) ###### Add some constraint which we deactivate just ###### to make sure this is working properly -model.con1D = Constraint(expr=model.x*model.y == 0) -model.con1D_indexeda = Constraint([1,2],rule=lambda model,i: model.x*model.y == 0) -model.con1D_indexedb = Constraint([1,2],rule=lambda model,i: model.x*model.y == 0) +model.con1D = Constraint(expr=model.x * model.y == 0) +model.con1D_indexeda = Constraint([1, 2], rule=lambda model, i: model.x * model.y == 0) +model.con1D_indexedb = Constraint([1, 2], rule=lambda model, i: model.x * model.y == 0) model.con1D.deactivate() model.con1D_indexeda.deactivate() model.con1D_indexedb[1].deactivate() diff --git a/pyomo/repn/tests/ampl/small11_testCase.py b/pyomo/repn/tests/ampl/small11_testCase.py index 993fa85de59..5874007e13c 100644 --- a/pyomo/repn/tests/ampl/small11_testCase.py +++ b/pyomo/repn/tests/ampl/small11_testCase.py @@ -22,19 +22,26 @@ # from pyomo.environ import ConcreteModel, Var, Objective, Constraint, RangeSet + model = ConcreteModel() -n=3 +n = 3 + +model.x = Var([(k, i) for k in range(1, n + 1) for i in range(k, n + 1)]) -model.x = Var([(k,i) for k in range(1,n+1) for i in range(k,n+1)]) def obj_rule(model): - return model.x[n,n] + return model.x[n, n] + + model.obj = Objective(rule=obj_rule) -def var_bnd_rule(model,i): - return (-1.0, model.x[1,i], 1.0) -model.var_bnd = Constraint(RangeSet(1,n),rule=var_bnd_rule) -model.x[1,1] = 1.0 -model.x[1,1].fixed = True +def var_bnd_rule(model, i): + return (-1.0, model.x[1, i], 1.0) + + +model.var_bnd = Constraint(RangeSet(1, n), rule=var_bnd_rule) + +model.x[1, 1] = 1.0 +model.x[1, 1].fixed = True diff --git a/pyomo/repn/tests/ampl/small12_testCase.py b/pyomo/repn/tests/ampl/small12_testCase.py index a6870c78d66..63d4ba29cf6 100644 --- a/pyomo/repn/tests/ampl/small12_testCase.py +++ b/pyomo/repn/tests/ampl/small12_testCase.py @@ -18,61 +18,165 @@ # from pyomo.environ import ConcreteModel, Var, Param, Objective, Constraint, inequality -from pyomo.core.expr.current import Expr_if +from pyomo.core.expr import Expr_if model = ConcreteModel() -model.vTrue = Var(initialize=1) +model.vTrue = Var(initialize=1) model.vFalse = Var(initialize=-1) -model.pTrue = Param(initialize=1) +model.pTrue = Param(initialize=1) model.pFalse = Param(initialize=-1) model.vN1 = Var(initialize=-1) model.vP1 = Var(initialize=1) -model.v0 = Var(initialize=0) +model.v0 = Var(initialize=0) model.vN2 = Var(initialize=-2) model.vP2 = Var(initialize=2) -model.obj = Objective(expr=10.0*Expr_if(IF=model.v0, - THEN=model.vTrue, - ELSE=model.vFalse)) +model.obj = Objective( + expr=10.0 * Expr_if(IF=model.v0, THEN=model.vTrue, ELSE=model.vFalse) +) # True/False -model.c1 = Constraint(expr= Expr_if(IF=(0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c2 = Constraint(expr= Expr_if(IF=(1), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) +model.c1 = Constraint( + expr=Expr_if(IF=(0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse +) +model.c2 = Constraint( + expr=Expr_if(IF=(1), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue +) # x <= 0 -model.c3 = Constraint(expr= Expr_if(IF=(model.vN1 <= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c4 = Constraint(expr= Expr_if(IF=(model.v0 <= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c5 = Constraint(expr= Expr_if(IF=(model.vP1 <= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) +model.c3 = Constraint( + expr=Expr_if(IF=(model.vN1 <= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pTrue +) +model.c4 = Constraint( + expr=Expr_if(IF=(model.v0 <= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pTrue +) +model.c5 = Constraint( + expr=Expr_if(IF=(model.vP1 <= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pFalse +) # x < 0 -model.c6 = Constraint(expr= Expr_if(IF=(model.vN1 < 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c7 = Constraint(expr= Expr_if(IF=(model.v0 < 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c8 = Constraint(expr= Expr_if(IF=(model.vP1 < 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) +model.c6 = Constraint( + expr=Expr_if(IF=(model.vN1 < 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pTrue +) +model.c7 = Constraint( + expr=Expr_if(IF=(model.v0 < 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pFalse +) +model.c8 = Constraint( + expr=Expr_if(IF=(model.vP1 < 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pFalse +) # x >= 0 -model.c9 = Constraint(expr= Expr_if(IF=(model.vN1*10.0 >= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c10 = Constraint(expr= Expr_if(IF=(model.v0*10.0 >= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c11 = Constraint(expr= Expr_if(IF=(model.vP1*10.0 >= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) +model.c9 = Constraint( + expr=Expr_if(IF=(model.vN1 * 10.0 >= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pFalse +) +model.c10 = Constraint( + expr=Expr_if(IF=(model.v0 * 10.0 >= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pTrue +) +model.c11 = Constraint( + expr=Expr_if(IF=(model.vP1 * 10.0 >= 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pTrue +) # x > 0 -model.c12 = Constraint(expr= Expr_if(IF=(model.vN1*10.0 > 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c13 = Constraint(expr= Expr_if(IF=(model.v0*10.0 > 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c14 = Constraint(expr= Expr_if(IF=(model.vP1*10.0 > 0), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) +model.c12 = Constraint( + expr=Expr_if(IF=(model.vN1 * 10.0 > 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pFalse +) +model.c13 = Constraint( + expr=Expr_if(IF=(model.v0 * 10.0 > 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pFalse +) +model.c14 = Constraint( + expr=Expr_if(IF=(model.vP1 * 10.0 > 0), THEN=(model.vTrue), ELSE=(model.vFalse)) + == model.pTrue +) # -1 <= x <= 1 -model.c15 = Constraint(expr= Expr_if(IF=inequality(-1, model.vN2, 1), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c16 = Constraint(expr= Expr_if(IF=inequality(-1*model.vP1, model.vN1, 1), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c17 = Constraint(expr= Expr_if(IF=inequality(-1*model.vP1**2, model.v0, 1), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c18 = Constraint(expr= Expr_if(IF=inequality(model.vN1, model.vP1, 1), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c19 = Constraint(expr= Expr_if(IF=inequality(-1, model.vP2, 1), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) +model.c15 = Constraint( + expr=Expr_if( + IF=inequality(-1, model.vN2, 1), THEN=(model.vTrue), ELSE=(model.vFalse) + ) + == model.pFalse +) +model.c16 = Constraint( + expr=Expr_if( + IF=inequality(-1 * model.vP1, model.vN1, 1), + THEN=(model.vTrue), + ELSE=(model.vFalse), + ) + == model.pTrue +) +model.c17 = Constraint( + expr=Expr_if( + IF=inequality(-1 * model.vP1**2, model.v0, 1), + THEN=(model.vTrue), + ELSE=(model.vFalse), + ) + == model.pTrue +) +model.c18 = Constraint( + expr=Expr_if( + IF=inequality(model.vN1, model.vP1, 1), THEN=(model.vTrue), ELSE=(model.vFalse) + ) + == model.pTrue +) +model.c19 = Constraint( + expr=Expr_if( + IF=inequality(-1, model.vP2, 1), THEN=(model.vTrue), ELSE=(model.vFalse) + ) + == model.pFalse +) # -1 < x < 1 -model.c20 = Constraint(expr= Expr_if(IF=inequality(-1, model.vN2, 1, strict=True), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c21 = Constraint(expr= Expr_if(IF=inequality(-1, model.vN1, 1*model.vP1, strict=True), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c22 = Constraint(expr= Expr_if(IF=inequality(-1, model.v0, 1*model.vP1**2, strict=True), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pTrue) -model.c23 = Constraint(expr= Expr_if(IF=inequality(-1, model.vP1, model.vP1, strict=True), THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) -model.c24 = Constraint(expr= Expr_if(IF=inequality(-1, model.vP2, 1, strict=True) , THEN=(model.vTrue), ELSE=(model.vFalse)) == model.pFalse) +model.c20 = Constraint( + expr=Expr_if( + IF=inequality(-1, model.vN2, 1, strict=True), + THEN=(model.vTrue), + ELSE=(model.vFalse), + ) + == model.pFalse +) +model.c21 = Constraint( + expr=Expr_if( + IF=inequality(-1, model.vN1, 1 * model.vP1, strict=True), + THEN=(model.vTrue), + ELSE=(model.vFalse), + ) + == model.pFalse +) +model.c22 = Constraint( + expr=Expr_if( + IF=inequality(-1, model.v0, 1 * model.vP1**2, strict=True), + THEN=(model.vTrue), + ELSE=(model.vFalse), + ) + == model.pTrue +) +model.c23 = Constraint( + expr=Expr_if( + IF=inequality(-1, model.vP1, model.vP1, strict=True), + THEN=(model.vTrue), + ELSE=(model.vFalse), + ) + == model.pFalse +) +model.c24 = Constraint( + expr=Expr_if( + IF=inequality(-1, model.vP2, 1, strict=True), + THEN=(model.vTrue), + ELSE=(model.vFalse), + ) + == model.pFalse +) diff --git a/pyomo/repn/tests/ampl/small13_testCase.py b/pyomo/repn/tests/ampl/small13_testCase.py index 31d462ea7f0..9814c979cc7 100644 --- a/pyomo/repn/tests/ampl/small13_testCase.py +++ b/pyomo/repn/tests/ampl/small13_testCase.py @@ -25,6 +25,6 @@ model.obj = Objective(expr=model.x, sense=maximize) -model.c1 = Constraint(expr= (model.x**3 - model.x) == 0) -model.c2 = Constraint(expr= 10*(model.x**3 - model.x) == 0) -model.c3 = Constraint(expr= (model.x**3 - model.x)/10.0 == 0) +model.c1 = Constraint(expr=(model.x**3 - model.x) == 0) +model.c2 = Constraint(expr=10 * (model.x**3 - model.x) == 0) +model.c3 = Constraint(expr=(model.x**3 - model.x) / 10.0 == 0) diff --git a/pyomo/repn/tests/ampl/small14_testCase.py b/pyomo/repn/tests/ampl/small14_testCase.py index a59618d1fdc..3d896242243 100644 --- a/pyomo/repn/tests/ampl/small14_testCase.py +++ b/pyomo/repn/tests/ampl/small14_testCase.py @@ -9,7 +9,30 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import ConcreteModel, Var, Objective, Constraint, log, log10, sin, cos, tan, sinh, cosh, tanh, asin, acos, atan, asinh, acosh, atanh, exp, sqrt, ceil, floor +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + Constraint, + log, + log10, + sin, + cos, + tan, + sinh, + cosh, + tanh, + asin, + acos, + atan, + asinh, + acosh, + atanh, + exp, + sqrt, + ceil, + floor, +) from math import e, pi model = ConcreteModel() @@ -19,29 +42,29 @@ model.ZERO = Var(initialize=0) -model.obj = Objective(expr=model.ONE+model.ZERO) +model.obj = Objective(expr=model.ONE + model.ZERO) -model.c_log = Constraint(expr=log(model.ONE) == 0) -model.c_log10 = Constraint(expr=log10(model.ONE) == 0) +model.c_log = Constraint(expr=log(model.ONE) == 0) +model.c_log10 = Constraint(expr=log10(model.ONE) == 0) -model.c_sin = Constraint(expr=sin(model.ZERO) == 0) -model.c_cos = Constraint(expr=cos(model.ZERO) == 1) -model.c_tan = Constraint(expr=tan(model.ZERO) == 0) +model.c_sin = Constraint(expr=sin(model.ZERO) == 0) +model.c_cos = Constraint(expr=cos(model.ZERO) == 1) +model.c_tan = Constraint(expr=tan(model.ZERO) == 0) -model.c_sinh = Constraint(expr=sinh(model.ZERO) == 0) -model.c_cosh = Constraint(expr=cosh(model.ZERO) == 1) -model.c_tanh = Constraint(expr=tanh(model.ZERO) == 0) +model.c_sinh = Constraint(expr=sinh(model.ZERO) == 0) +model.c_cosh = Constraint(expr=cosh(model.ZERO) == 1) +model.c_tanh = Constraint(expr=tanh(model.ZERO) == 0) -model.c_asin = Constraint(expr=asin(model.ZERO) == 0) -model.c_acos = Constraint(expr=acos(model.ZERO) == pi/2) -model.c_atan = Constraint(expr=atan(model.ZERO) == 0) +model.c_asin = Constraint(expr=asin(model.ZERO) == 0) +model.c_acos = Constraint(expr=acos(model.ZERO) == pi / 2) +model.c_atan = Constraint(expr=atan(model.ZERO) == 0) -model.c_asinh = Constraint(expr=asinh(model.ZERO) == 0) -model.c_acosh = Constraint(expr=acosh((e**2 + model.ONE)/(2*e)) == 0) -model.c_atanh = Constraint(expr=atanh(model.ZERO) == 0) +model.c_asinh = Constraint(expr=asinh(model.ZERO) == 0) +model.c_acosh = Constraint(expr=acosh((e**2 + model.ONE) / (2 * e)) == 0) +model.c_atanh = Constraint(expr=atanh(model.ZERO) == 0) -model.c_exp = Constraint(expr=exp(model.ZERO) == 1) -model.c_sqrt = Constraint(expr=sqrt(model.ONE) == 1) -model.c_ceil = Constraint(expr=ceil(model.ONE) == 1) -model.c_floor = Constraint(expr=floor(model.ONE) == 1) -model.c_abs = Constraint(expr=abs(model.ONE) == 1) +model.c_exp = Constraint(expr=exp(model.ZERO) == 1) +model.c_sqrt = Constraint(expr=sqrt(model.ONE) == 1) +model.c_ceil = Constraint(expr=ceil(model.ONE) == 1) +model.c_floor = Constraint(expr=floor(model.ONE) == 1) +model.c_abs = Constraint(expr=abs(model.ONE) == 1) diff --git a/pyomo/repn/tests/ampl/small15_testCase.py b/pyomo/repn/tests/ampl/small15_testCase.py index bbe1450de9e..8345621cecd 100644 --- a/pyomo/repn/tests/ampl/small15_testCase.py +++ b/pyomo/repn/tests/ampl/small15_testCase.py @@ -31,4 +31,3 @@ model.CON1 = Constraint(expr=model.b.y**2 == 4) model.b.deactivate() - diff --git a/pyomo/repn/tests/ampl/small1_testCase.py b/pyomo/repn/tests/ampl/small1_testCase.py index a2c3eb064ac..00e6dd322ed 100644 --- a/pyomo/repn/tests/ampl/small1_testCase.py +++ b/pyomo/repn/tests/ampl/small1_testCase.py @@ -26,4 +26,3 @@ model.OBJ = Objective(expr=model.x**2) model.CON1 = Constraint(expr=model.y**2 == 4) - diff --git a/pyomo/repn/tests/ampl/small3_testCase.py b/pyomo/repn/tests/ampl/small3_testCase.py index ac37a5dca92..f11137979b4 100644 --- a/pyomo/repn/tests/ampl/small3_testCase.py +++ b/pyomo/repn/tests/ampl/small3_testCase.py @@ -23,7 +23,6 @@ model.x = Var(initialize=1.0) model.y = Var(initialize=1.0) -model.OBJ = Objective(expr=model.x*model.y) +model.OBJ = Objective(expr=model.x * model.y) model.CON1 = Constraint(expr=model.y**2 == 4) - diff --git a/pyomo/repn/tests/ampl/small4_testCase.py b/pyomo/repn/tests/ampl/small4_testCase.py index 4c5742ec844..08d68c21f50 100644 --- a/pyomo/repn/tests/ampl/small4_testCase.py +++ b/pyomo/repn/tests/ampl/small4_testCase.py @@ -26,5 +26,4 @@ model.OBJ = Objective(expr=model.y**2) -model.CON1 = Constraint(expr=model.y*model.x == 4) - +model.CON1 = Constraint(expr=model.y * model.x == 4) diff --git a/pyomo/repn/tests/ampl/small5_testCase.py b/pyomo/repn/tests/ampl/small5_testCase.py index 67ebf2529de..1e976820f9b 100644 --- a/pyomo/repn/tests/ampl/small5_testCase.py +++ b/pyomo/repn/tests/ampl/small5_testCase.py @@ -26,23 +26,23 @@ from pyomo.environ import ConcreteModel, Var, Param, Objective, Constraint model = ConcreteModel() -model.x = Var(bounds=(-1.0,1.0),initialize=1.0) -model.y = Var(bounds=(-1.0,1.0),initialize=2.0) -model.v = Var(bounds=(-1.0,1.0),initialize=3.0) +model.x = Var(bounds=(-1.0, 1.0), initialize=1.0) +model.y = Var(bounds=(-1.0, 1.0), initialize=2.0) +model.v = Var(bounds=(-1.0, 1.0), initialize=3.0) model.p = Param(initialize=2.0) -model.q = Param(initialize=2.0,mutable=True) +model.q = Param(initialize=2.0, mutable=True) -model.OBJ = Objective(expr=model.x**2/model.p + model.x**2/model.q) -model.CON1 = Constraint(expr=1.0/model.p*model.v*(model.x-model.y) == 2.0) -model.CON2 = Constraint(expr=model.v*1.0/model.p*(model.x-model.y) == 2.0) -model.CON3 = Constraint(expr=model.v*(model.x-model.y)/model.p == 2.0) -model.CON4 = Constraint(expr=model.v*(model.x/model.p-model.y/model.p) == 2.0) -model.CON5 = Constraint(expr=model.v*(model.x-model.y)*(1.0/model.p) == 2.0) -model.CON6 = Constraint(expr=model.v*(model.x-model.y) == 2.0*model.p) +model.OBJ = Objective(expr=model.x**2 / model.p + model.x**2 / model.q) +model.CON1 = Constraint(expr=1.0 / model.p * model.v * (model.x - model.y) == 2.0) +model.CON2 = Constraint(expr=model.v * 1.0 / model.p * (model.x - model.y) == 2.0) +model.CON3 = Constraint(expr=model.v * (model.x - model.y) / model.p == 2.0) +model.CON4 = Constraint(expr=model.v * (model.x / model.p - model.y / model.p) == 2.0) +model.CON5 = Constraint(expr=model.v * (model.x - model.y) * (1.0 / model.p) == 2.0) +model.CON6 = Constraint(expr=model.v * (model.x - model.y) == 2.0 * model.p) -model.CON7 = Constraint(expr=1.0/model.q*model.v*(model.x-model.y) == 2.0) -model.CON8 = Constraint(expr=model.v*1.0/model.q*(model.x-model.y) == 2.0) -model.CON9 = Constraint(expr=model.v*(model.x-model.y)/model.q == 2.0) -model.CON10 = Constraint(expr=model.v*(model.x/model.q-model.y/model.q) == 2.0) -model.CON11 = Constraint(expr=model.v*(model.x-model.y)*(1.0/model.q) == 2.0) -model.CON12 = Constraint(expr=model.v*(model.x-model.y) == 2.0*model.q) +model.CON7 = Constraint(expr=1.0 / model.q * model.v * (model.x - model.y) == 2.0) +model.CON8 = Constraint(expr=model.v * 1.0 / model.q * (model.x - model.y) == 2.0) +model.CON9 = Constraint(expr=model.v * (model.x - model.y) / model.q == 2.0) +model.CON10 = Constraint(expr=model.v * (model.x / model.q - model.y / model.q) == 2.0) +model.CON11 = Constraint(expr=model.v * (model.x - model.y) * (1.0 / model.q) == 2.0) +model.CON12 = Constraint(expr=model.v * (model.x - model.y) == 2.0 * model.q) diff --git a/pyomo/repn/tests/ampl/small6_testCase.py b/pyomo/repn/tests/ampl/small6_testCase.py index 4eea6484ecb..da9f1d58f9b 100644 --- a/pyomo/repn/tests/ampl/small6_testCase.py +++ b/pyomo/repn/tests/ampl/small6_testCase.py @@ -26,16 +26,18 @@ from pyomo.environ import ConcreteModel, Var, Objective, Constraint model = ConcreteModel() -model.x = Var(bounds=(-1.0,1.0),initialize=1.0) -model.y = Var(bounds=(-1.0,1.0),initialize=2.0) -model.v = Var(bounds=(-1.0,1.0),initialize=3.0) +model.x = Var(bounds=(-1.0, 1.0), initialize=1.0) +model.y = Var(bounds=(-1.0, 1.0), initialize=2.0) +model.v = Var(bounds=(-1.0, 1.0), initialize=3.0) model.p = Var(initialize=2.0) model.p.fixed = True model.OBJ = Objective(expr=model.x) -model.CON1 = Constraint(rule=lambda model: (2.0,1.0/model.p*model.v*(model.x-model.y))) -model.CON2 = Constraint(expr=model.v*1.0/model.p*(model.x-model.y) == 2.0) -model.CON3 = Constraint(expr=model.v*(model.x-model.y)/model.p == 2.0) -model.CON4 = Constraint(expr=model.v*(model.x/model.p-model.y/model.p) == 2.0) -model.CON5 = Constraint(expr=model.v*(model.x-model.y)*(1.0/model.p) == 2.0) -model.CON6 = Constraint(expr=model.v*(model.x-model.y) - 2.0*model.p == 0) +model.CON1 = Constraint( + rule=lambda model: (2.0, 1.0 / model.p * model.v * (model.x - model.y)) +) +model.CON2 = Constraint(expr=model.v * 1.0 / model.p * (model.x - model.y) == 2.0) +model.CON3 = Constraint(expr=model.v * (model.x - model.y) / model.p == 2.0) +model.CON4 = Constraint(expr=model.v * (model.x / model.p - model.y / model.p) == 2.0) +model.CON5 = Constraint(expr=model.v * (model.x - model.y) * (1.0 / model.p) == 2.0) +model.CON6 = Constraint(expr=model.v * (model.x - model.y) - 2.0 * model.p == 0) diff --git a/pyomo/repn/tests/ampl/small7_testCase.py b/pyomo/repn/tests/ampl/small7_testCase.py index eb4736c6e16..22a75a33394 100644 --- a/pyomo/repn/tests/ampl/small7_testCase.py +++ b/pyomo/repn/tests/ampl/small7_testCase.py @@ -26,38 +26,88 @@ from pyomo.environ import ConcreteModel, Var, Param, Objective, Constraint model = ConcreteModel() -model.x = Var(bounds=(-1.0,1.0),initialize=1.0) -model.y = Var(bounds=(-1.0,1.0),initialize=2.0) -model.v = Var(bounds=(-1.0,1.0),initialize=3.0) +model.x = Var(bounds=(-1.0, 1.0), initialize=1.0) +model.y = Var(bounds=(-1.0, 1.0), initialize=2.0) +model.v = Var(bounds=(-1.0, 1.0), initialize=3.0) model.p = Var(initialize=2.0) model.p.fixed = True model.q = Param(initialize=2.0) model.OBJ = Objective(expr=model.x) -model.CON1a = Constraint(expr=1.0/model.p/model.q*model.v*(model.x-model.y) == 2.0) -model.CON2a = Constraint(expr=model.v*1.0/model.p/model.p*(model.x-model.y) == 2.0) -model.CON3a = Constraint(expr=model.v*(model.x-model.y)/model.p/model.q == 2.0) -model.CON4a = Constraint(expr=model.v*(model.x/model.p/model.q-model.y/model.p/model.q) == 2.0) -model.CON5a = Constraint(expr=model.v*(model.x-model.y)*(1.0/model.p/model.q) == 2.0) -model.CON6a = Constraint(expr=model.v*(model.x-model.y) - 2.0*model.p*model.q == 0) +model.CON1a = Constraint( + expr=1.0 / model.p / model.q * model.v * (model.x - model.y) == 2.0 +) +model.CON2a = Constraint( + expr=model.v * 1.0 / model.p / model.p * (model.x - model.y) == 2.0 +) +model.CON3a = Constraint(expr=model.v * (model.x - model.y) / model.p / model.q == 2.0) +model.CON4a = Constraint( + expr=model.v * (model.x / model.p / model.q - model.y / model.p / model.q) == 2.0 +) +model.CON5a = Constraint( + expr=model.v * (model.x - model.y) * (1.0 / model.p / model.q) == 2.0 +) +model.CON6a = Constraint( + expr=model.v * (model.x - model.y) - 2.0 * model.p * model.q == 0 +) -model.CON1b = Constraint(expr=1.0/(model.p*model.q)*model.v*(model.x-model.y) == 2.0) -model.CON2b = Constraint(expr=model.v*1.0/(model.p*model.p)*(model.x-model.y) == 2.0) -model.CON3b = Constraint(expr=model.v*(model.x-model.y)/(model.p*model.q) == 2.0) -model.CON4b = Constraint(expr=model.v*(model.x/(model.p*model.q)-model.y/(model.p*model.q)) == 2.0) -model.CON5b = Constraint(expr=model.v*(model.x-model.y)*(1.0/(model.p*model.q)) == 2.0) -model.CON6b = Constraint(expr=model.v*(model.x-model.y) - 2.0*(model.p*model.q) == 0) +model.CON1b = Constraint( + expr=1.0 / (model.p * model.q) * model.v * (model.x - model.y) == 2.0 +) +model.CON2b = Constraint( + expr=model.v * 1.0 / (model.p * model.p) * (model.x - model.y) == 2.0 +) +model.CON3b = Constraint( + expr=model.v * (model.x - model.y) / (model.p * model.q) == 2.0 +) +model.CON4b = Constraint( + expr=model.v * (model.x / (model.p * model.q) - model.y / (model.p * model.q)) + == 2.0 +) +model.CON5b = Constraint( + expr=model.v * (model.x - model.y) * (1.0 / (model.p * model.q)) == 2.0 +) +model.CON6b = Constraint( + expr=model.v * (model.x - model.y) - 2.0 * (model.p * model.q) == 0 +) -model.CON1c = Constraint(expr=1.0/(model.p+model.q)*model.v*(model.x-model.y) == 2.0) -model.CON2c = Constraint(expr=model.v*1.0/(model.p+model.p)*(model.x-model.y) == 2.0) -model.CON3c = Constraint(expr=model.v*(model.x-model.y)/(model.p+model.q) == 2.0) -model.CON4c = Constraint(expr=model.v*(model.x/(model.p+model.q)-model.y/(model.p+model.q)) == 2.0) -model.CON5c = Constraint(expr=model.v*(model.x-model.y)*(1.0/(model.p+model.q)) == 2.0) -model.CON6c = Constraint(expr=model.v*(model.x-model.y) == 2.0*(model.p+model.q)) +model.CON1c = Constraint( + expr=1.0 / (model.p + model.q) * model.v * (model.x - model.y) == 2.0 +) +model.CON2c = Constraint( + expr=model.v * 1.0 / (model.p + model.p) * (model.x - model.y) == 2.0 +) +model.CON3c = Constraint( + expr=model.v * (model.x - model.y) / (model.p + model.q) == 2.0 +) +model.CON4c = Constraint( + expr=model.v * (model.x / (model.p + model.q) - model.y / (model.p + model.q)) + == 2.0 +) +model.CON5c = Constraint( + expr=model.v * (model.x - model.y) * (1.0 / (model.p + model.q)) == 2.0 +) +model.CON6c = Constraint( + expr=model.v * (model.x - model.y) == 2.0 * (model.p + model.q) +) -model.CON1d = Constraint(expr=1.0/((model.p+model.q)**2)*model.v*(model.x-model.y) == 2.0) -model.CON2d = Constraint(expr=model.v*1.0/((model.p+model.p)**2)*(model.x-model.y) == 2.0) -model.CON3d = Constraint(expr=model.v*(model.x-model.y)/((model.p+model.q)**2) == 2.0) -model.CON4d = Constraint(expr=model.v*(model.x/((model.p+model.q)**2)-model.y/((model.p+model.q)**2)) == 2.0) -model.CON5d = Constraint(expr=model.v*(model.x-model.y)*(1.0/((model.p+model.q)**2)) == 2.0) -model.CON6d = Constraint(expr=model.v*(model.x-model.y) - 2.0*((model.p+model.q)**2) == 0) +model.CON1d = Constraint( + expr=1.0 / ((model.p + model.q) ** 2) * model.v * (model.x - model.y) == 2.0 +) +model.CON2d = Constraint( + expr=model.v * 1.0 / ((model.p + model.p) ** 2) * (model.x - model.y) == 2.0 +) +model.CON3d = Constraint( + expr=model.v * (model.x - model.y) / ((model.p + model.q) ** 2) == 2.0 +) +model.CON4d = Constraint( + expr=model.v + * (model.x / ((model.p + model.q) ** 2) - model.y / ((model.p + model.q) ** 2)) + == 2.0 +) +model.CON5d = Constraint( + expr=model.v * (model.x - model.y) * (1.0 / ((model.p + model.q) ** 2)) == 2.0 +) +model.CON6d = Constraint( + expr=model.v * (model.x - model.y) - 2.0 * ((model.p + model.q) ** 2) == 0 +) diff --git a/pyomo/repn/tests/ampl/small8_testCase.py b/pyomo/repn/tests/ampl/small8_testCase.py index 44733bdf2cb..554e27c0924 100644 --- a/pyomo/repn/tests/ampl/small8_testCase.py +++ b/pyomo/repn/tests/ampl/small8_testCase.py @@ -19,7 +19,15 @@ # will not solve if sent to a real optimizer. # -from pyomo.environ import AbstractModel, Param, Var, NonNegativeReals, Objective, Constraint, minimize +from pyomo.environ import ( + AbstractModel, + Param, + Var, + NonNegativeReals, + Objective, + Constraint, + minimize, +) model = AbstractModel() @@ -27,21 +35,32 @@ model.x = Var(within=NonNegativeReals) model.y = Var(within=NonNegativeReals) -model.z = Var(within=NonNegativeReals,bounds=(7,None)) +model.z = Var(within=NonNegativeReals, bounds=(7, None)) + def obj_rule(model): - return model.z + model.x*model.x + model.y -model.obj = Objective(rule=obj_rule,sense=minimize) + return model.z + model.x * model.x + model.y + + +model.obj = Objective(rule=obj_rule, sense=minimize) + def constr_rule(model): - return (model.a,model.y*model.y,None) + return (model.a, model.y * model.y, None) + + model.constr = Constraint(rule=constr_rule) + def constr2_rule(model): - return model.x/model.a >= model.y + return model.x / model.a >= model.y + + model.constr2 = Constraint(rule=constr2_rule) + def constr3_rule(model): return model.z <= model.y + model.a -model.constr3 = Constraint(rule=constr3_rule) + +model.constr3 = Constraint(rule=constr3_rule) diff --git a/pyomo/repn/tests/ampl/small9_testCase.py b/pyomo/repn/tests/ampl/small9_testCase.py index 94af4f64edc..3d7af602a88 100644 --- a/pyomo/repn/tests/ampl/small9_testCase.py +++ b/pyomo/repn/tests/ampl/small9_testCase.py @@ -19,25 +19,32 @@ # will not solve if sent to a real optimizer. # -from pyomo.environ import ConcreteModel, Var, Param, Objective, Constraint, simple_constraint_rule +from pyomo.environ import ( + ConcreteModel, + Var, + Param, + Objective, + Constraint, + simple_constraint_rule, +) model = ConcreteModel() model.x = Var() model.y = Var(initialize=0.0) model.z = Var() -model.p = Param(initialize=0.0,mutable=True) -model.q = Param(initialize=0.0,mutable=False) +model.p = Param(initialize=0.0, mutable=True) +model.q = Param(initialize=0.0, mutable=False) model.y.fixed = True -model.obj = Objective( expr=model.x ) +model.obj = Objective(expr=model.x) -model.con1 = Constraint(expr= model.x*model.y*model.z + model.x == 1.0) -model.con2 = Constraint(expr= model.x*model.p*model.z + model.x == 1.0) -model.con3 = Constraint(expr= model.x*model.q*model.z + model.x == 1.0) +model.con1 = Constraint(expr=model.x * model.y * model.z + model.x == 1.0) +model.con2 = Constraint(expr=model.x * model.p * model.z + model.x == 1.0) +model.con3 = Constraint(expr=model.x * model.q * model.z + model.x == 1.0) # Pyomo differs from AMPL in these cases that involve immutable params (q). # These never actually become constants in Pyomo, and for good reason. -model.con4 = Constraint(expr= model.x*model.y*model.z == 1.0) -model.con5 = Constraint(expr= model.x*model.p*model.z == 1.0) -model.con6 = Constraint(rule= simple_constraint_rule(model.x*model.q*model.z == 0.0) ) +model.con4 = Constraint(expr=model.x * model.y * model.z == 1.0) +model.con5 = Constraint(expr=model.x * model.p * model.z == 1.0) +model.con6 = Constraint(rule=simple_constraint_rule(model.x * model.q * model.z == 0.0)) diff --git a/pyomo/repn/tests/ampl/test_ampl_comparison.py b/pyomo/repn/tests/ampl/test_ampl_comparison.py index 3cab33d8236..eb5aff329e1 100644 --- a/pyomo/repn/tests/ampl/test_ampl_comparison.py +++ b/pyomo/repn/tests/ampl/test_ampl_comparison.py @@ -42,14 +42,13 @@ names = [] # add test methods to classes for f in glob.glob(join(currdir, '*_testCase.py')): - names.append(re.split('[._]',os.path.basename(f))[0]) + names.append(re.split('[._]', os.path.basename(f))[0]) class Tests(unittest.TestCase): - def pyomo(self, cmd): os.chdir(currdir) - output = main.main(['convert', '--logging=quiet', '-c']+cmd) + output = main.main(['convert', '--logging=quiet', '-c'] + cmd) return output def setUp(self): @@ -67,21 +66,17 @@ class BaselineTests(Tests): @parameterized.parameterized.expand(input=names) def nlwriter_baseline_test(self, name): - baseline = join(currdir, name+'.pyomo.nl') - testFile = TempfileManager.create_tempfile( - suffix=name + '.test.nl') - cmd = ['--output=' + testFile, - join(currdir, name+'_testCase.py')] + baseline = join(currdir, name + '.pyomo.nl') + testFile = TempfileManager.create_tempfile(suffix=name + '.test.nl') + cmd = ['--output=' + testFile, join(currdir, name + '_testCase.py')] if os.path.exists(join(currdir, name + '.dat')): cmd.append(join(currdir, name + '.dat')) self.pyomo(cmd) # Check that the pyomo nl file matches its own baseline with open(testFile, 'r') as f1, open(baseline, 'r') as f2: - f1_contents = list(filter( - None, f1.read().replace('n', 'n ').split())) - f2_contents = list(filter( - None, f2.read().replace('n', 'n ').split())) + f1_contents = list(filter(None, f1.read().replace('n', 'n ').split())) + f2_contents = list(filter(None, f2.read().replace('n', 'n ').split())) for item1, item2 in zip_longest(f1_contents, f2_contents): try: self.assertEqual(float(item1), float(item2)) @@ -89,10 +84,8 @@ def nlwriter_baseline_test(self, name): self.assertEqual(item1, item2) -@unittest.skipUnless( - has_gjh_asl_json, "'gjh_asl_json' executable not available") +@unittest.skipUnless(has_gjh_asl_json, "'gjh_asl_json' executable not available") class ASLJSONTests(Tests): - # # The following test calls the gjh_asl_json executable to # generate JSON files corresponding to both the @@ -102,51 +95,63 @@ class ASLJSONTests(Tests): # @parameterized.parameterized.expand(input=names) def nlwriter_asl_test(self, name): - testFile = TempfileManager.create_tempfile(suffix=name+'.test.nl') + testFile = TempfileManager.create_tempfile(suffix=name + '.test.nl') testFile_row = testFile[:-2] + 'row' TempfileManager.add_tempfile(testFile_row, exists=False) testFile_col = testFile[:-2] + 'col' TempfileManager.add_tempfile(testFile_col, exists=False) - cmd = ['--output='+testFile, - '--file-determinism=2', - '--symbolic-solver-labels', - join(currdir, name+'_testCase.py')] + cmd = [ + '--output=' + testFile, + '--file-determinism=2', + '--symbolic-solver-labels', + join(currdir, name + '_testCase.py'), + ] if os.path.exists(join(currdir, name + '.dat')): - cmd.append(join(currdir, name + '.dat')) + cmd.append(join(currdir, name + '.dat')) self.pyomo(cmd) # # compare AMPL and Pyomo nl file structure # - testFile_json = testFile[:-2]+'json' + testFile_json = testFile[:-2] + 'json' TempfileManager.add_tempfile(testFile_json, exists=False) # obtain the nl file summary information for comparison with ampl p = subprocess.run( - ['gjh_asl_json', testFile, - 'rows=' + testFile_row, - 'cols=' + testFile_col, - 'json=' + testFile_json], - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - universal_newlines=True) + [ + 'gjh_asl_json', + testFile, + 'rows=' + testFile_row, + 'cols=' + testFile_col, + 'json=' + testFile_json, + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) self.assertTrue(p.returncode == 0, msg=p.stdout) - baseFile = join(currdir, name+'.ampl.nl') - amplFile = TempfileManager.create_tempfile(suffix=name+'.ampl.json') + baseFile = join(currdir, name + '.ampl.nl') + amplFile = TempfileManager.create_tempfile(suffix=name + '.ampl.json') # obtain the nl file summary information for comparison with ampl p = subprocess.run( - ['gjh_asl_json', baseFile, - 'rows=' + baseFile[:-2] + 'row', - 'cols=' + baseFile[:-2] + 'col', - 'json=' + amplFile], - stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - universal_newlines=True) + [ + 'gjh_asl_json', + baseFile, + 'rows=' + baseFile[:-2] + 'row', + 'cols=' + baseFile[:-2] + 'col', + 'json=' + amplFile, + ], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) self.assertTrue(p.returncode == 0, msg=p.stdout) with open(testFile_json, 'r') as f1, open(amplFile, 'r') as f2: - self.assertStructuredAlmostEqual( - json.load(f1), json.load(f2), abstol=1e-8) + self.assertStructuredAlmostEqual(json.load(f1), json.load(f2), abstol=1e-8) + if __name__ == "__main__": deleteFiles = False diff --git a/pyomo/repn/tests/ampl/test_ampl_nl.py b/pyomo/repn/tests/ampl/test_ampl_nl.py index a2e067a192d..bd58c254bfd 100644 --- a/pyomo/repn/tests/ampl/test_ampl_nl.py +++ b/pyomo/repn/tests/ampl/test_ampl_nl.py @@ -15,14 +15,20 @@ import pyomo.common.unittest as unittest -from pyomo.common.getGSL import find_GSL +from pyomo.common.gsl import find_GSL from pyomo.common.fileutils import this_file_dir from pyomo.common.tempfiles import TempfileManager from pyomo.environ import ( - ConcreteModel, Var, Constraint, Objective, Param, Block, - ExternalFunction, value, + ConcreteModel, + Var, + Constraint, + Objective, + Param, + Block, + ExternalFunction, + value, ) -from .nl_diff import load_and_compare_nl_baseline +from ..nl_diff import load_and_compare_nl_baseline import pyomo.repn.plugins.ampl.ampl_ as ampl_ import pyomo.repn.plugins.nl_writer as nl_writer @@ -32,6 +38,7 @@ thisdir = this_file_dir() + class _NLWriter_suite(object): @classmethod def setUpClass(cls): @@ -45,40 +52,43 @@ def tearDownClass(cls): def _get_fnames(self): class_name, test_name = self.id().split('.')[-2:] prefix = test_name.replace("test_", "", 1) - return (os.path.join(thisdir, prefix+".nl.baseline"), - os.path.join(self.tempdir, prefix+".nl.out")) + return ( + os.path.join(thisdir, prefix + ".nl.baseline"), + os.path.join(self.tempdir, prefix + ".nl.out"), + ) def _compare_nl_baseline(self, baseline, testfile): - self.assertEqual(*load_and_compare_nl_baseline( - baseline, testfile, self._nl_version)) + self.assertEqual( + *load_and_compare_nl_baseline(baseline, testfile, self._nl_version) + ) def test_export_nonlinear_variables(self): model = ConcreteModel() model.x = Var() model.y = Var() model.z = Var() - model.w = Var([1,2,3]) + model.w = Var([1, 2, 3]) model.c = Constraint(expr=model.x == model.y**2) model.y.fix(3) - test_fname = "export_nonlinear_variables" + test_fname = os.path.join(self.tempdir, "export_nonlinear_variables") model.write( test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True} + io_options={'symbolic_solver_labels': True}, ) with open(test_fname + '.col') as f: names = list(map(str.strip, f.readlines())) - assert "z" not in names # z is not in a constraint - assert "y" not in names # y is fixed + assert "z" not in names # z is not in a constraint + assert "y" not in names # y is fixed assert "x" in names model.write( test_fname, format=self._nl_version, io_options={ - 'symbolic_solver_labels':True, - 'export_nonlinear_variables':[model.z] - } + 'symbolic_solver_labels': True, + 'export_nonlinear_variables': [model.z], + }, ) with open(test_fname + '.col') as f: names = list(map(str.strip, f.readlines())) @@ -92,9 +102,9 @@ def test_export_nonlinear_variables(self): test_fname, format=self._nl_version, io_options={ - 'symbolic_solver_labels':True, - 'export_nonlinear_variables':[model.z, model.w] - } + 'symbolic_solver_labels': True, + 'export_nonlinear_variables': [model.z, model.w], + }, ) with open(test_fname + '.col') as f: names = list(map(str.strip, f.readlines())) @@ -109,9 +119,9 @@ def test_export_nonlinear_variables(self): test_fname, format=self._nl_version, io_options={ - 'symbolic_solver_labels':True, - 'export_nonlinear_variables':[model.z, model.w[2]] - } + 'symbolic_solver_labels': True, + 'export_nonlinear_variables': [model.z, model.w[2]], + }, ) with open(test_fname + '.col') as f: names = list(map(str.strip, f.readlines())) @@ -130,14 +140,17 @@ def test_var_on_other_model(self): model = ConcreteModel() model.x = Var() - model.c = Constraint(expr=other.a + 2*model.x <= 0) + model.c = Constraint(expr=other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() self.assertRaisesRegex( KeyError, "'a' is not part of the model", - model.write, test_fname, format=self._nl_version) + model.write, + test_fname, + format=self._nl_version, + ) def test_var_on_deactivated_block(self): model = ConcreteModel() @@ -145,7 +158,7 @@ def test_var_on_deactivated_block(self): model.other = Block() model.other.a = Var() model.other.deactivate() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() @@ -155,23 +168,27 @@ def test_var_on_deactivated_block(self): def test_var_on_nonblock(self): if self._nl_version != 'nl_v1': self.skipTest(f'test not applicable to writer {self._nl_version}') + class Foo(Block().__class__): def __init__(self, *args, **kwds): - kwds.setdefault('ctype',Foo) - super(Foo,self).__init__(*args, **kwds) + kwds.setdefault('ctype', Foo) + super(Foo, self).__init__(*args, **kwds) model = ConcreteModel() model.x = Var() model.other = Foo() model.other.a = Var() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() self.assertRaisesRegex( KeyError, "'other.a' exists within Foo 'other'", - model.write, test_fname, format=self._nl_version) + model.write, + test_fname, + format=self._nl_version, + ) def _external_model(self): DLL = find_GSL() @@ -181,11 +198,10 @@ def _external_model(self): m = ConcreteModel() m.hypot = ExternalFunction(library=DLL, function="gsl_hypot") m.p = Param(initialize=1, mutable=True) - m.x = Var(initialize=3, bounds=(1e-5,None)) - m.y = Var(initialize=3, bounds=(0,None)) + m.x = Var(initialize=3, bounds=(1e-5, None)) + m.y = Var(initialize=3, bounds=(0, None)) m.z = Var(initialize=1) - m.o = Objective( - expr=m.z**2 * m.hypot(m.p*m.x, m.p+m.y)**2) + m.o = Objective(expr=m.z**2 * m.hypot(m.p * m.x, m.p + m.y) ** 2) self.assertAlmostEqual(value(m.o), 25.0, 7) return m @@ -195,23 +211,28 @@ def test_external_expression_constant(self): self.skipTest("Could not find the amplgsl.dll library") m = ConcreteModel() - m.y = Var(initialize=4, bounds=(0,None)) + m.y = Var(initialize=4, bounds=(0, None)) m.hypot = ExternalFunction(library=DLL, function="gsl_hypot") m.o = Objective(expr=m.hypot(3, m.y)) self.assertAlmostEqual(value(m.o), 5.0, 7) baseline_fname, test_fname = self._get_fnames() - m.write(test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True}) + m.write( + test_fname, + format=self._nl_version, + io_options={'symbolic_solver_labels': True}, + ) self._compare_nl_baseline(baseline_fname, test_fname) def test_external_expression_variable(self): m = self._external_model() baseline_fname, test_fname = self._get_fnames() - m.write(test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True, - 'column_order': True}) + m.write( + test_fname, + format=self._nl_version, + io_options={'symbolic_solver_labels': True, 'column_order': True}, + ) self._compare_nl_baseline(baseline_fname, test_fname) def test_external_expression_partial_fixed(self): @@ -219,9 +240,11 @@ def test_external_expression_partial_fixed(self): m.x.fix() baseline_fname, test_fname = self._get_fnames() - m.write(test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True, - 'column_order': True}) + m.write( + test_fname, + format=self._nl_version, + io_options={'symbolic_solver_labels': True, 'column_order': True}, + ) self._compare_nl_baseline(baseline_fname, test_fname) def test_external_expression_fixed(self): @@ -230,33 +253,41 @@ def test_external_expression_fixed(self): m.y.fix() baseline_fname, test_fname = self._get_fnames() - m.write(test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True, - 'column_order': True}) + m.write( + test_fname, + format=self._nl_version, + io_options={'symbolic_solver_labels': True, 'column_order': True}, + ) self._compare_nl_baseline(baseline_fname, test_fname) def test_external_expression_rewrite_fixed(self): m = self._external_model() baseline_fname, test_fname = self._get_fnames() - variable_baseline = baseline_fname.replace('rewrite_fixed','variable') - m.write(test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True, - 'column_order': True}) + variable_baseline = baseline_fname.replace('rewrite_fixed', 'variable') + m.write( + test_fname, + format=self._nl_version, + io_options={'symbolic_solver_labels': True, 'column_order': True}, + ) self._compare_nl_baseline(variable_baseline, test_fname) m.x.fix() - m.write(test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True, - 'column_order': True}) - partial_baseline = baseline_fname.replace( - 'rewrite_fixed','partial_fixed') + m.write( + test_fname, + format=self._nl_version, + io_options={'symbolic_solver_labels': True, 'column_order': True}, + ) + partial_baseline = baseline_fname.replace('rewrite_fixed', 'partial_fixed') self._compare_nl_baseline(partial_baseline, test_fname) m.y.fix() - m.write(test_fname, format=self._nl_version, - io_options={'symbolic_solver_labels':True}) - fixed_baseline = baseline_fname.replace('rewrite_fixed','fixed') + m.write( + test_fname, + format=self._nl_version, + io_options={'symbolic_solver_labels': True}, + ) + fixed_baseline = baseline_fname.replace('rewrite_fixed', 'fixed') self._compare_nl_baseline(fixed_baseline, test_fname) def test_obj_con_cache(self): @@ -342,8 +373,10 @@ def test_obj_con_cache(self): model._gen_obj_repn = False model._gen_con_repn = False try: + def dont_call_gsr(*args, **kwargs): self.fail("generate_standard_repn should not be called") + ampl_.generate_standard_repn = dont_call_gsr model.write(nl_file, format=self._nl_version) finally: @@ -360,12 +393,13 @@ def dont_call_gsr(*args, **kwargs): # Check that repns generated by the LP wrter will be # processed correctly model._repn[model.c] = c_repn = gsr(model.c.body, quadratic=True) - model._repn[model.obj] = obj_repn = gsr( - model.obj.expr, quadratic=True) + model._repn[model.obj] = obj_repn = gsr(model.obj.expr, quadratic=True) nl_file = TMP.create_tempfile(suffix='.nl') try: + def dont_call_gsr(*args, **kwargs): self.fail("generate_standard_repn should not be called") + ampl_.generate_standard_repn = dont_call_gsr model.write(nl_file, format=self._nl_version) finally: @@ -379,11 +413,14 @@ def dont_call_gsr(*args, **kwargs): nl_test = FILE.read() self.assertEqual(nl_ref, nl_test) + class TestNLWriter_v1(_NLWriter_suite, unittest.TestCase): _nl_version = 'nl_v1' + class TestNLWriter_v2(_NLWriter_suite, unittest.TestCase): _nl_version = 'nl_v2' + if __name__ == "__main__": unittest.main() diff --git a/pyomo/repn/tests/ampl/test_ampl_repn.py b/pyomo/repn/tests/ampl/test_ampl_repn.py index fd90ddee5a2..cf1a889006e 100644 --- a/pyomo/repn/tests/ampl/test_ampl_repn.py +++ b/pyomo/repn/tests/ampl/test_ampl_repn.py @@ -16,15 +16,14 @@ class AmplRepnTests(unittest.TestCase): - def test_divide_by_mutable(self): # # Test from https://github.com/Pyomo/pyomo/issues/153 # m = ConcreteModel() - m.x = Var(bounds=(1,5)) + m.x = Var(bounds=(1, 5)) m.p = Param(initialize=100, mutable=True) - m.con = Constraint(expr=exp(5*(1/m.x - 1/m.p))<=10) + m.con = Constraint(expr=exp(5 * (1 / m.x - 1 / m.p)) <= 10) m.obj = Objective(expr=m.x**2) test = gar(m.con.body) @@ -34,5 +33,6 @@ def test_divide_by_mutable(self): self.assertEqual(set(id(v) for v in test.nonlinear_vars), set([id(m.x)])) self.assertIs(test.nonlinear_expr, m.con.body) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/repn/tests/ampl/test_nlv2.py b/pyomo/repn/tests/ampl/test_nlv2.py new file mode 100644 index 00000000000..629f2a88dd2 --- /dev/null +++ b/pyomo/repn/tests/ampl/test_nlv2.py @@ -0,0 +1,1032 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ +# + +import pyomo.common.unittest as unittest + +import io +import math +import os + +import pyomo.repn.util as repn_util +import pyomo.repn.plugins.nl_writer as nl_writer +from pyomo.repn.tests.nl_diff import nl_diff + +from pyomo.common.log import LoggingIntercept +from pyomo.common.tempfiles import TempfileManager +from pyomo.core.expr import Expr_if, inequality, LinearExpression +from pyomo.core.base.expression import ScalarExpression +from pyomo.environ import ( + ConcreteModel, + Objective, + Param, + Var, + log, + ExternalFunction, + Suffix, + Constraint, + Expression, +) +import pyomo.environ as pyo + +_invalid_1j = r'InvalidNumber\((\([-+0-9.e]+\+)?1j\)?\)' + + +class INFO(object): + def __init__(self, symbolic=False): + if symbolic: + self.template = nl_writer.text_nl_debug_template + else: + self.template = nl_writer.text_nl_template + self.subexpression_cache = {} + self.subexpression_order = [] + self.external_functions = {} + self.var_map = {} + self.used_named_expressions = set() + self.symbolic_solver_labels = symbolic + + self.visitor = nl_writer.AMPLRepnVisitor( + self.template, + self.subexpression_cache, + self.subexpression_order, + self.external_functions, + self.var_map, + self.used_named_expressions, + self.symbolic_solver_labels, + True, + ) + + +class Test_AMPLRepnVisitor(unittest.TestCase): + def test_divide(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=1) + m.x = Var() + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x**2 / m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o5\nv%s\nn2\n', [id(m.x)])) + + m.p = 2 + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((4 / m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 2) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x / m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {id(m.x): 0.5}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression(((4 * m.x) / m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {id(m.x): 2}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((4 * (m.x + 2) / m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 4) + self.assertEqual(repn.linear, {id(m.x): 2}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x**2 / m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o2\nn0.5\no5\nv%s\nn2\n', [id(m.x)])) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((log(m.x) / m.x, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o3\no43\nv%s\nv%s\n', [id(m.x), id(m.x)])) + + def test_errors_divide_by_0(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=0) + m.x = Var() + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((1 / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/p\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(str(repn.const), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/p\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(str(repn.const), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression(((3 * m.x) / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(3, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 3/p\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(str(repn.const), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((3 * (m.x + 2) / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(3, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 3*(x + 2)/p\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(str(repn.const), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x**2 / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: x**2/p\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(str(repn.const), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_pow(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=2) + m.x = Var() + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x**m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o5\nv%s\nn2\n', [id(m.x)])) + + m.p = 1 + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x**m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + + m.p = 0 + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x**m.p, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 1) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_errors_divide_by_0_mult_by_0(self): + # Note: we may elect to deprecate this functionality in the future + # + m = ConcreteModel() + m.p = Param(mutable=True, initialize=0) + m.x = Var() + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.p * (1 / m.p), None, None)) + self.assertIn( + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/p\n", + LOG.getvalue(), + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression(((1 / m.p) * m.p, None, None)) + self.assertIn( + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/p\n", + LOG.getvalue(), + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.p * (m.x / m.p), None, None)) + self.assertIn( + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/p\n", + LOG.getvalue(), + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression( + (m.p * (3 * (m.x + 2) / m.p), None, None) + ) + self.assertIn( + "Exception encountered evaluating expression 'div(3, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 3*(x + 2)/p\n", + LOG.getvalue(), + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.p * (m.x**2 / m.p), None, None)) + self.assertIn( + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: x**2/p\n", + LOG.getvalue(), + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_errors_divide_by_0_halt(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=0) + m.x = Var() + + repn_util.HALT_ON_EVALUATION_ERROR, tmp = ( + True, + repn_util.HALT_ON_EVALUATION_ERROR, + ) + try: + info = INFO() + with LoggingIntercept() as LOG, self.assertRaises(ZeroDivisionError): + info.visitor.walk_expression((1 / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/p\n", + ) + + info = INFO() + with LoggingIntercept() as LOG, self.assertRaises(ZeroDivisionError): + info.visitor.walk_expression((m.x / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/p\n", + ) + + info = INFO() + with LoggingIntercept() as LOG, self.assertRaises(ZeroDivisionError): + info.visitor.walk_expression((3 * (m.x + 2) / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(3, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 3*(x + 2)/p\n", + ) + + info = INFO() + with LoggingIntercept() as LOG, self.assertRaises(ZeroDivisionError): + info.visitor.walk_expression((m.x**2 / m.p, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: x**2/p\n", + ) + finally: + repn_util.HALT_ON_EVALUATION_ERROR = tmp + + def test_errors_negative_frac_pow(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=-1) + m.x = Var() + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.p ** (0.5), None, None)) + self.assertEqual( + LOG.getvalue(), + "Complex number returned from expression\n" + "\tmessage: Pyomo AMPLRepnVisitor does not support complex numbers\n" + "\texpression: p**0.5\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertRegex(str(repn.const), _invalid_1j) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(0.5) + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.p**m.x, None, None)) + self.assertEqual( + LOG.getvalue(), + "Complex number returned from expression\n" + "\tmessage: Pyomo AMPLRepnVisitor does not support complex numbers\n" + "\texpression: p**x\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertRegex(str(repn.const), _invalid_1j) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_errors_unary_func(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=0) + m.x = Var() + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((log(m.p), None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'log(0)'\n" + "\tmessage: math domain error\n" + "\texpression: log(p)\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(str(repn.const), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_errors_propagate_nan(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=0) + m.x = Var() + m.y = Var() + m.y.fix(1) + + expr = m.y**2 * m.x**2 * (((3 * m.x) / m.p) * m.x) / m.y + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(3, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 3/p\n", + ) + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(str(repn.const), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_linearexpression_npv(self): + m = ConcreteModel() + m.x = Var(initialize=4) + m.y = Var(initialize=4) + m.z = Var(initialize=4) + m.p = Param(initialize=5, mutable=True) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression( + ( + LinearExpression( + args=[1, m.p, m.p * m.x, (m.p + 2) * m.y, 3 * m.z, m.p * m.z] + ), + None, + None, + ) + ) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 6) + self.assertEqual(repn.linear, {id(m.x): 5, id(m.y): 7, id(m.z): 8}) + self.assertEqual(repn.nonlinear, None) + + def test_eval_pow(self): + m = ConcreteModel() + m.x = Var(initialize=4) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x ** (0.5), None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o5\nv%s\nn0.5\n', [id(m.x)])) + + m.x.fix() + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((m.x ** (0.5), None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 2) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_eval_abs(self): + m = ConcreteModel() + m.x = Var(initialize=-4) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((abs(m.x), None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o15\nv%s\n', [id(m.x)])) + + m.x.fix() + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((abs(m.x), None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 4) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_eval_unary_func(self): + m = ConcreteModel() + m.x = Var(initialize=4) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((log(m.x), None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o43\nv%s\n', [id(m.x)])) + + m.x.fix() + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((log(m.x), None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, math.log(4)) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_eval_expr_if_lessEq(self): + m = ConcreteModel() + m.x = Var(initialize=4) + m.y = Var(initialize=4) + expr = Expr_if(m.x <= 4, m.x**2, m.y) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual( + repn.nonlinear, + ('o35\no23\nv%s\nn4\no5\nv%s\nn2\nv%s\n', [id(m.x), id(m.x), id(m.y)]), + ) + + m.x.fix() + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 16) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(5) + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {id(m.y): 1}) + self.assertEqual(repn.nonlinear, None) + + def test_eval_expr_if_Eq(self): + m = ConcreteModel() + m.x = Var(initialize=4) + m.y = Var(initialize=4) + expr = Expr_if(m.x == 4, m.x**2, m.y) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual( + repn.nonlinear, + ('o35\no24\nv%s\nn4\no5\nv%s\nn2\nv%s\n', [id(m.x), id(m.x), id(m.y)]), + ) + + m.x.fix() + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 16) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(5) + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {id(m.y): 1}) + self.assertEqual(repn.nonlinear, None) + + def test_eval_expr_if_ranged(self): + m = ConcreteModel() + m.x = Var(initialize=4) + m.y = Var(initialize=4) + expr = Expr_if(inequality(1, m.x, 4), m.x**2, m.y) + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual( + repn.nonlinear, + ( + 'o35\no21\no23\nn1\nv%s\no23\nv%s\nn4\no5\nv%s\nn2\nv%s\n', + [id(m.x), id(m.x), id(m.x), id(m.y)], + ), + ) + + m.x.fix() + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 16) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(5) + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {id(m.y): 1}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(0) + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {id(m.y): 1}) + self.assertEqual(repn.nonlinear, None) + + def test_custom_named_expression(self): + class CustomExpression(ScalarExpression): + pass + + m = ConcreteModel() + m.x = Var() + m.e = CustomExpression() + m.e.expr = m.x + 3 + + expr = m.e + m.e + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 6) + self.assertEqual(repn.linear, {id(m.x): 2}) + self.assertEqual(repn.nonlinear, None) + + self.assertEqual(len(info.subexpression_cache), 1) + obj, repn, info = info.subexpression_cache[id(m.e)] + self.assertIs(obj, m.e) + self.assertEqual(repn.nl, ('v%s\n', (id(m.e),))) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 3) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + self.assertEqual(info, [None, None, False]) + + def test_nested_operator_zero_arg(self): + # This tests an error encountered when developing the nlv2 + # writer where var ids were being dropped then the second + # argument in a binary operator was 0. The original case was + # for expr**p where p was a variable fixed to 0. However, since + # then, _handle_pow_operator contains special handling for **0 + # and **1. + m = ConcreteModel() + m.x = Var() + m.p = Param(initialize=0, mutable=True) + expr = (1 / m.x) == m.p + + info = INFO() + with LoggingIntercept() as LOG: + repn = info.visitor.walk_expression((expr, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn.nl, None) + self.assertEqual(repn.mult, 1) + self.assertEqual(repn.const, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, ('o24\no3\nn1\nv%s\nn0\n', [id(m.x)])) + + def test_duplicate_shared_linear_expressions(self): + # This tests an issue where AMPLRepn.duplicate() was not copying + # the linear dict, allowing certain operations (like finalizing + # a bare expression multiplied by something other than 1) to + # change the compiled shared expression + m = ConcreteModel() + m.x = Var() + m.y = Var() + m.e = Expression(expr=2 * m.x + 3 * m.y) + + expr1 = 10 * m.e + expr2 = m.e + 100 * m.x + 100 * m.y + + info = INFO() + with LoggingIntercept() as LOG: + repn1 = info.visitor.walk_expression((expr1, None, None)) + repn2 = info.visitor.walk_expression((expr2, None, None)) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual(repn1.nl, None) + self.assertEqual(repn1.mult, 1) + self.assertEqual(repn1.const, 0) + self.assertEqual(repn1.linear, {id(m.x): 20, id(m.y): 30}) + self.assertEqual(repn1.nonlinear, None) + + self.assertEqual(repn2.nl, None) + self.assertEqual(repn2.mult, 1) + self.assertEqual(repn2.const, 0) + self.assertEqual(repn2.linear, {id(m.x): 102, id(m.y): 103}) + self.assertEqual(repn2.nonlinear, None) + + +class Test_NLWriter(unittest.TestCase): + def test_external_function_str_args(self): + m = ConcreteModel() + m.x = Var() + m.e = ExternalFunction(library='tmp', function='test') + m.o = Objective(expr=m.e(m.x, 'str')) + + # Test explicit newline translation + OUT = io.StringIO(newline='\r\n') + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + self.assertIn( + "Writing NL file containing string arguments to a " + "text output stream with line endings other than '\\n' ", + LOG.getvalue(), + ) + + # Test system-dependent newline translation + with TempfileManager: + fname = TempfileManager.create_tempfile() + with open(fname, 'w') as OUT: + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + if os.linesep == '\n': + self.assertEqual(LOG.getvalue(), "") + else: + self.assertIn( + "Writing NL file containing string arguments to a " + "text output stream with line endings other than '\\n' ", + LOG.getvalue(), + ) + + # Test objects lacking 'tell': + r, w = os.pipe() + try: + OUT = os.fdopen(w, 'w') + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + if os.linesep == '\n': + self.assertEqual(LOG.getvalue(), "") + else: + self.assertIn( + "Writing NL file containing string arguments to a " + "text output stream that does not support tell()", + LOG.getvalue(), + ) + finally: + OUT.close() + os.close(r) + + def test_suffix_warning_new_components(self): + m = ConcreteModel() + m.junk = Suffix(direction=Suffix.EXPORT) + m.x = Var() + m.y = Var() + m.z = Var([1, 2, 3]) + m.o = Objective(expr=m.x + m.z[2]) + m.c = Constraint(expr=m.y <= 0) + m.c.deactivate() + + @m.Constraint([1, 2, 3]) + def d(m, i): + return m.z[i] <= 0 + + m.d.deactivate() + m.d[2].activate() + m.junk[m.x] = 1 + + OUT = io.StringIO() + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + self.assertEqual(LOG.getvalue(), "") + + m.junk[m.y] = 1 + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + self.assertEqual( + "model contains export suffix 'junk' that contains 1 component " + "keys that are not exported as part of the NL file. Skipping.\n", + LOG.getvalue(), + ) + + m.junk[m.z] = 1 + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + self.assertEqual( + "model contains export suffix 'junk' that contains 3 component " + "keys that are not exported as part of the NL file. Skipping.\n", + LOG.getvalue(), + ) + + m.junk[m.c] = 2 + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + self.assertEqual( + "model contains export suffix 'junk' that contains 4 component " + "keys that are not exported as part of the NL file. Skipping.\n", + LOG.getvalue(), + ) + + m.junk[m.d] = 2 + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + self.assertEqual( + "model contains export suffix 'junk' that contains 6 component " + "keys that are not exported as part of the NL file. Skipping.\n", + LOG.getvalue(), + ) + + m.junk[5] = 5 + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT) + self.assertEqual( + "model contains export suffix 'junk' that contains 6 component " + "keys that are not exported as part of the NL file. Skipping.\n" + "model contains export suffix 'junk' that contains 1 " + "keys that are not Var, Constraint, Objective, or the model. " + "Skipping.\n", + LOG.getvalue(), + ) + + def test_linear_constraint_npv_const(self): + # This tests an error possibly reported by #2810 + m = ConcreteModel() + m.x = Var([1, 2]) + m.p = Param(initialize=5, mutable=True) + m.o = Objective(expr=1) + m.c = Constraint( + expr=LinearExpression([m.p**2, 5 * m.x[1], 10 * m.x[2]]) == 0 + ) + + OUT = io.StringIO() + nl_writer.NLWriter().write(m, OUT) + self.assertEqual( + *nl_diff( + """g3 1 1 0 # problem unknown + 2 1 1 0 1 # vars, constraints, objectives, ranges, eqns + 0 0 0 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 # network constraints: nonlinear, linear + 0 0 0 # nonlinear vars in constraints, objectives, both + 0 0 0 1 # linear network variables; functions; arith, flags + 0 0 0 0 0 # discrete variables: binary, integer, nonlinear (b,c,o) + 2 0 # nonzeros in Jacobian, obj. gradient + 0 0 # max name lengths: constraints, variables + 0 0 0 0 0 # common exprs: b,c,o,c1,o1 +C0 +n0 +O0 0 +n1.0 +x0 +r +4 -25 +b +3 +3 +k1 +1 +J0 2 +0 5 +1 10 +""", + OUT.getvalue(), + ) + ) + + def test_indexed_sos_constraints(self): + # This tests the example from issue #2827 + m = pyo.ConcreteModel() + m.A = pyo.Set(initialize=[1]) + m.B = pyo.Set(initialize=[1, 2, 3]) + m.C = pyo.Set(initialize=[1]) + + m.param_cx = pyo.Param(m.A, initialize={1: 1}) + m.param_cy = pyo.Param(m.B, initialize={1: 2, 2: 3, 3: 1}) + + m.x = pyo.Var(m.A, domain=pyo.NonNegativeReals, bounds=(0, 40)) + m.y = pyo.Var(m.B, domain=pyo.NonNegativeIntegers) + + @m.Objective() + def OBJ(m): + return sum(m.param_cx[a] * m.x[a] for a in m.A) + sum( + m.param_cy[b] * m.y[b] for b in m.B + ) + + m.y[3].bounds = (2, 3) + + m.mysos = pyo.SOSConstraint( + m.C, var=m.y, sos=1, index={1: [2, 3]}, weights={2: 25.0, 3: 18.0} + ) + + OUT = io.StringIO() + with LoggingIntercept() as LOG: + nl_writer.NLWriter().write(m, OUT, symbolic_solver_labels=True) + self.assertEqual(LOG.getvalue(), "") + self.assertEqual( + *nl_diff( + """g3 1 1 0 # problem unknown + 4 0 1 0 0 # vars, constraints, objectives, ranges, eqns + 0 0 0 0 0 0 # nonlinear constrs, objs; ccons: lin, nonlin, nd, nzlb + 0 0 # network constraints: nonlinear, linear + 0 0 0 # nonlinear vars in constraints, objectives, both + 0 0 0 1 # linear network variables; functions; arith, flags + 0 3 0 0 0 # discrete variables: binary, integer, nonlinear (b,c,o) + 0 4 # nonzeros in Jacobian, obj. gradient + 3 4 # max name lengths: constraints, variables + 0 0 0 0 0 # common exprs: b,c,o,c1,o1 +S0 2 sosno +2 1 +3 1 +S0 2 ref +2 25.0 +3 18.0 +O0 0 #OBJ +n0 +x0 # initial guess +r #0 ranges (rhs's) +b #4 bounds (on variables) +0 0 40 #x[1] +2 0 #y[1] +2 0 #y[2] +0 2 3 #y[3] +k3 #intermediate Jacobian column lengths +0 +0 +0 +G0 4 #OBJ +0 1 +1 2 +2 3 +3 1 +""", + OUT.getvalue(), + ) + ) diff --git a/pyomo/repn/tests/ampl/test_suffixes.py b/pyomo/repn/tests/ampl/test_suffixes.py index fd0d3f5b33a..e73060e7e8c 100644 --- a/pyomo/repn/tests/ampl/test_suffixes.py +++ b/pyomo/repn/tests/ampl/test_suffixes.py @@ -21,14 +21,20 @@ from pyomo.opt import ProblemFormat from pyomo.environ import ( - ConcreteModel, Suffix, Var, Objective, Constraint, SOSConstraint, + ConcreteModel, + Suffix, + Var, + Objective, + Constraint, + SOSConstraint, sum_product, ) -from .nl_diff import load_and_compare_nl_baseline +from ..nl_diff import load_and_compare_nl_baseline currdir = this_file_dir() -class TestSuffix(unittest.TestCase): + +class SuffixTester(object): @classmethod def setUpClass(cls): cls.context = TempfileManager.new_context() @@ -43,39 +49,41 @@ def tearDownClass(cls): # will end up in the NL file with integer tags def test_EXPORT_suffixes_int(self): model = ConcreteModel() - model.junk = Suffix(direction=Suffix.EXPORT,datatype=Suffix.INT) - model.junk_inactive = Suffix(direction=Suffix.EXPORT,datatype=Suffix.INT) + model.junk = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT) + model.junk_inactive = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT) model.x = Var() - model.junk.set_value(model.x,1) - model.junk_inactive.set_value(model.x,1) + model.junk.set_value(model.x, 1) + model.junk_inactive.set_value(model.x, 1) - model.y = Var([1,2], dense=True) - model.junk.set_value(model.y,2) - model.junk_inactive.set_value(model.y,2) + model.y = Var([1, 2], dense=True) + model.junk.set_value(model.y, 2) + model.junk_inactive.set_value(model.y, 2) - model.obj = Objective(expr=model.x+sum_product(model.y)) - model.junk.set_value(model.obj,3) - model.junk_inactive.set_value(model.obj,3) + model.obj = Objective(expr=model.x + sum_product(model.y)) + model.junk.set_value(model.obj, 3) + model.junk_inactive.set_value(model.obj, 3) - model.conx = Constraint(expr=model.x>=1) - model.junk.set_value(model.conx,4) - model.junk_inactive.set_value(model.conx,4) + model.conx = Constraint(expr=model.x >= 1) + model.junk.set_value(model.conx, 4) + model.junk_inactive.set_value(model.conx, 4) - model.cony = Constraint([1,2],rule=lambda model,i: model.y[i]>=1) - model.junk.set_value(model.cony,5) - model.junk_inactive.set_value(model.cony,5) + model.cony = Constraint([1, 2], rule=lambda model, i: model.y[i] >= 1) + model.junk.set_value(model.cony, 5) + model.junk_inactive.set_value(model.cony, 5) - model.junk.set_value(model,6) - model.junk_inactive.set_value(model,6) + model.junk.set_value(model, 6) + model.junk_inactive.set_value(model, 6) # This one should NOT end up in the NL file model.junk_inactive.deactivate() _test = os.path.join(self.tempdir, "EXPORT_suffixes.test.nl") - model.write(filename=_test, - format=ProblemFormat.nl, - io_options={"symbolic_solver_labels": False}) + model.write( + filename=_test, + format=self.nl_version, + io_options={"symbolic_solver_labels": False, "file_determinism": 1}, + ) _base = os.path.join(currdir, "EXPORT_suffixes_int.baseline.nl") self.assertEqual(*load_and_compare_nl_baseline(_base, _test)) @@ -84,39 +92,41 @@ def test_EXPORT_suffixes_int(self): # will end up in the NL file with floating point tags def test_EXPORT_suffixes_float(self): model = ConcreteModel() - model.junk = Suffix(direction=Suffix.EXPORT,datatype=Suffix.FLOAT) - model.junk_inactive = Suffix(direction=Suffix.EXPORT,datatype=Suffix.FLOAT) + model.junk = Suffix(direction=Suffix.EXPORT, datatype=Suffix.FLOAT) + model.junk_inactive = Suffix(direction=Suffix.EXPORT, datatype=Suffix.FLOAT) model.x = Var() - model.junk.set_value(model.x,1) - model.junk_inactive.set_value(model.x,1) + model.junk.set_value(model.x, 1) + model.junk_inactive.set_value(model.x, 1) - model.y = Var([1,2], dense=True) - model.junk.set_value(model.y,2) - model.junk_inactive.set_value(model.y,2) + model.y = Var([1, 2], dense=True) + model.junk.set_value(model.y, 2) + model.junk_inactive.set_value(model.y, 2) - model.obj = Objective(expr=model.x+sum_product(model.y)) - model.junk.set_value(model.obj,3) - model.junk_inactive.set_value(model.obj,3) + model.obj = Objective(expr=model.x + sum_product(model.y)) + model.junk.set_value(model.obj, 3) + model.junk_inactive.set_value(model.obj, 3) - model.conx = Constraint(expr=model.x>=1) - model.junk.set_value(model.conx,4) - model.junk_inactive.set_value(model.conx,4) + model.conx = Constraint(expr=model.x >= 1) + model.junk.set_value(model.conx, 4) + model.junk_inactive.set_value(model.conx, 4) - model.cony = Constraint([1,2],rule=lambda model,i: model.y[i]>=1) - model.junk.set_value(model.cony,5) - model.junk_inactive.set_value(model.cony,5) + model.cony = Constraint([1, 2], rule=lambda model, i: model.y[i] >= 1) + model.junk.set_value(model.cony, 5) + model.junk_inactive.set_value(model.cony, 5) - model.junk.set_value(model,6) - model.junk_inactive.set_value(model,6) + model.junk.set_value(model, 6) + model.junk_inactive.set_value(model, 6) # This one should NOT end up in the NL file model.junk_inactive.deactivate() _test = os.path.join(self.tempdir, "EXPORT_suffixes.test.nl") - model.write(filename=_test, - format=ProblemFormat.nl, - io_options={"symbolic_solver_labels" : False}) + model.write( + filename=_test, + format=self.nl_version, + io_options={"symbolic_solver_labels": False, "file_determinism": 1}, + ) _base = os.path.join(currdir, "EXPORT_suffixes_float.baseline.nl") self.assertEqual(*load_and_compare_nl_baseline(_base, _test)) @@ -125,69 +135,90 @@ def test_EXPORT_suffixes_float(self): # component when variables get assigned duplicate values for ref def test_EXPORT_suffixes_with_SOSConstraint_duplicateref(self): model = ConcreteModel() - model.ref = Suffix(direction=Suffix.EXPORT,datatype=Suffix.INT) - model.y = Var([1,2,3]) + model.ref = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT) + model.y = Var([1, 2, 3]) model.obj = Objective(expr=sum_product(model.y)) # The NL writer will convert this constraint to ref and sosno # suffixes on model.y - model.sos_con = SOSConstraint(var=model.y, index=[1,2,3], sos=1) + model.sos_con = SOSConstraint(var=model.y, index=[1, 2, 3], sos=1) + + for i, val in zip([1, 2, 3], [11, 12, 13]): + model.ref.set_value(model.y[i], val) - for i,val in zip([1,2,3],[11,12,13]): - model.ref.set_value(model.y[i],val) - with self.assertRaisesRegex( - RuntimeError, "NL file writer does not allow both manually " - "declared 'ref' suffixes as well as SOSConstraint "): - model.write(filename=os.path.join(self.tempdir, "junk.nl"), - format=ProblemFormat.nl, - io_options={"symbolic_solver_labels" : False}) + RuntimeError, + "NL file writer does not allow both manually " + "declared 'ref' suffixes as well as SOSConstraint ", + ): + model.write( + filename=os.path.join(self.tempdir, "junk.nl"), + format=self.nl_version, + io_options={"symbolic_solver_labels": False}, + ) # Test that user defined sosno suffixes fail to # merge with those created from translating the SOSConstraint # component when variables get assigned duplicate values for sosno def test_EXPORT_suffixes_with_SOSConstraint_duplicatesosno(self): model = ConcreteModel() - model.sosno = Suffix(direction=Suffix.EXPORT,datatype=Suffix.INT) - model.y = Var([1,2,3]) + model.sosno = Suffix(direction=Suffix.EXPORT, datatype=Suffix.INT) + model.y = Var([1, 2, 3]) model.obj = Objective(expr=sum_product(model.y)) # The NL writer will convert this constraint to ref and sosno # suffixes on model.y - model.sos_con = SOSConstraint(var=model.y, index=[1,2,3], sos=1) + model.sos_con = SOSConstraint(var=model.y, index=[1, 2, 3], sos=1) + + for i in [1, 2, 3]: + model.sosno.set_value(model.y[i], -1) - for i in [1,2,3]: - model.sosno.set_value(model.y[i],-1) - with self.assertRaisesRegex( - RuntimeError, "NL file writer does not allow both manually " - "declared 'sosno' suffixes as well as SOSConstraint "): - model.write(filename=os.path.join(self.tempdir, "junk.nl"), - format=ProblemFormat.nl, - io_options={"symbolic_solver_labels" : False}) + RuntimeError, + "NL file writer does not allow both manually " + "declared 'sosno' suffixes as well as SOSConstraint ", + ): + model.write( + filename=os.path.join(self.tempdir, "junk.nl"), + format=self.nl_version, + io_options={"symbolic_solver_labels": False}, + ) # Test that user defined sosno suffixes fail to # merge with those created from translating the SOSConstraint # component when variables get assigned duplicate values for sosno def test_EXPORT_suffixes_no_datatype(self): model = ConcreteModel() - model.sosno = Suffix(direction=Suffix.EXPORT,datatype=None) - model.y = Var([1,2,3]) + model.sosno = Suffix(direction=Suffix.EXPORT, datatype=None) + model.y = Var([1, 2, 3]) model.obj = Objective(expr=sum_product(model.y)) # The NL writer will convert this constraint to ref and sosno # suffixes on model.y - model.sos_con = SOSConstraint(var=model.y, index=[1,2,3], sos=1) + model.sos_con = SOSConstraint(var=model.y, index=[1, 2, 3], sos=1) + + for i in [1, 2, 3]: + model.sosno.set_value(model.y[i], -1) - for i in [1,2,3]: - model.sosno.set_value(model.y[i],-1) - with self.assertRaisesRegex( - RuntimeError, "NL file writer does not allow both manually " - "declared 'sosno' suffixes as well as SOSConstraint "): - model.write(filename=os.path.join(self.tempdir, "junk.nl"), - format=ProblemFormat.nl, - io_options={"symbolic_solver_labels" : False}) + RuntimeError, + "NL file writer does not allow both manually " + "declared 'sosno' suffixes as well as SOSConstraint ", + ): + model.write( + filename=os.path.join(self.tempdir, "junk.nl"), + format=self.nl_version, + io_options={"symbolic_solver_labels": False}, + ) + + +class TestSuffix_nlv1(SuffixTester, unittest.TestCase): + nl_version = 'nl_v1' + + +class TestSuffix_nlv2(SuffixTester, unittest.TestCase): + nl_version = 'nl_v2' + if __name__ == "__main__": unittest.main() diff --git a/pyomo/repn/tests/baron/branching_priorities.bar.baseline b/pyomo/repn/tests/baron/branching_priorities.bar.baseline index a0f19dfc620..03200e74913 100644 --- a/pyomo/repn/tests/baron/branching_priorities.bar.baseline +++ b/pyomo/repn/tests/baron/branching_priorities.bar.baseline @@ -27,7 +27,7 @@ y_2_: 2; EQUATIONS c_e_FIX_ONE_VAR_CONST__, c; c_e_FIX_ONE_VAR_CONST__: ONE_VAR_CONST__ == 1; -c: y_1_*y_2_ - 2*x >= 0; +c: y_1_*y_2_ + (-2)*x >= 0; OBJ: maximize y_1_ + y_2_; diff --git a/pyomo/repn/tests/baron/small14a_testCase.py b/pyomo/repn/tests/baron/small14a_testCase.py index 218fe8ba69b..72190756dc7 100644 --- a/pyomo/repn/tests/baron/small14a_testCase.py +++ b/pyomo/repn/tests/baron/small14a_testCase.py @@ -9,7 +9,16 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import ConcreteModel, Var, Objective, Constraint, log, log10, exp, sqrt +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + Constraint, + log, + log10, + exp, + sqrt, +) model = ConcreteModel() @@ -18,11 +27,11 @@ model.ZERO = Var(initialize=0) -model.obj = Objective(expr=model.ONE+model.ZERO) +model.obj = Objective(expr=model.ONE + model.ZERO) -model.c_log = Constraint(expr=log(model.ONE) == 0) -model.c_log10 = Constraint(expr=log10(model.ONE) == 0) +model.c_log = Constraint(expr=log(model.ONE) == 0) +model.c_log10 = Constraint(expr=log10(model.ONE) == 0) -model.c_exp = Constraint(expr=exp(model.ZERO) == 1) -model.c_sqrt = Constraint(expr=sqrt(model.ONE) == 1) -model.c_abs = Constraint(expr=abs(model.ONE) == 1) +model.c_exp = Constraint(expr=exp(model.ZERO) == 1) +model.c_sqrt = Constraint(expr=sqrt(model.ONE) == 1) +model.c_abs = Constraint(expr=abs(model.ONE) == 1) diff --git a/pyomo/repn/tests/baron/test_baron.py b/pyomo/repn/tests/baron/test_baron.py index 21749a2a6d8..348ad6036fb 100644 --- a/pyomo/repn/tests/baron/test_baron.py +++ b/pyomo/repn/tests/baron/test_baron.py @@ -17,19 +17,29 @@ from io import StringIO import pyomo.common.unittest as unittest - +from pyomo.common.collections import OrderedSet from pyomo.common.fileutils import this_file_dir +import pyomo.core.expr as EXPR +from pyomo.core.base import SymbolMap from pyomo.environ import ( - ConcreteModel, Var, Param, Constraint, Objective, Block, sin, - maximize, Binary, Suffix + ConcreteModel, + Var, + Param, + Constraint, + Objective, + Block, + sin, + maximize, + Binary, + Suffix, ) +from pyomo.repn.plugins.baron_writer import expression_to_string thisdir = this_file_dir() class Test(unittest.TestCase): - def _cleanup(self, fname): try: os.remove(fname) @@ -39,16 +49,14 @@ def _cleanup(self, fname): def _get_fnames(self): class_name, test_name = self.id().split('.')[-2:] prefix = os.path.join(thisdir, test_name.replace("test_", "", 1)) - return prefix+".bar.baseline", prefix+".bar.out" + return prefix + ".bar.baseline", prefix + ".bar.out" def _check_baseline(self, model, **kwds): baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) io_options = {"symbolic_solver_labels": True} io_options.update(kwds) - model.write(test_fname, - format="bar", - io_options=io_options) + model.write(test_fname, format="bar", io_options=io_options) try: self.assertTrue(cmp(test_fname, baseline_fname)) except: @@ -56,9 +64,9 @@ def _check_baseline(self, model, **kwds): f1_contents = f1.read().replace(' ;', ';').split() f2_contents = f2.read().replace(' ;', ';').split() self.assertEqual( - f1_contents, f2_contents, - "\n\nbaseline: %s\ntestFile: %s\n" % ( - baseline_fname, test_fname) + f1_contents, + f2_contents, + "\n\nbaseline: %s\ntestFile: %s\n" % (baseline_fname, test_fname), ) self._cleanup(test_fname) @@ -82,9 +90,17 @@ def test_no_column_ordering_quadratic(self): model.b = Var() model.c = Var() - terms = [model.a, model.b, model.c, - (model.a, model.a), (model.b, model.b), (model.c, model.c), - (model.a, model.b), (model.a, model.c), (model.b, model.c)] + terms = [ + model.a, + model.b, + model.c, + (model.a, model.a), + (model.b, model.b), + (model.c, model.c), + (model.a, model.b), + (model.a, model.c), + (model.b, model.c), + ] model.obj = Objective(expr=self._gen_expression(terms)) model.con = Constraint(expr=self._gen_expression(terms) <= 1) self._check_baseline(model) @@ -109,7 +125,7 @@ def test_no_row_ordering(self): components["con1"] = Constraint(expr=model.a >= 0) components["con2"] = Constraint(expr=model.a <= 1) components["con3"] = Constraint(expr=(0, model.a, 1)) - components["con4"] = Constraint([1,2], rule=lambda m, i: model.a == i) + components["con4"] = Constraint([1, 2], rule=lambda m, i: model.a == i) for key in components: model.add_component(key, components[key]) @@ -122,7 +138,7 @@ def test_var_on_other_model(self): model = ConcreteModel() model.x = Var() - model.c = Constraint(expr=other.a + 2*model.x <= 0) + model.c = Constraint(expr=other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) self._check_baseline(model) @@ -132,33 +148,33 @@ def test_var_on_deactivated_block(self): model.other = Block() model.other.a = Var() model.other.deactivate() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) self._check_baseline(model) def test_var_on_nonblock(self): class Foo(Block().__class__): def __init__(self, *args, **kwds): - kwds.setdefault('ctype',Foo) - super(Foo,self).__init__(*args, **kwds) + kwds.setdefault('ctype', Foo) + super(Foo, self).__init__(*args, **kwds) model = ConcreteModel() model.x = Var() model.other = Foo() model.other.deactivate() model.other.a = Var() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) self._check_baseline(model) def test_trig_generates_exception(self): m = ConcreteModel() - m.x = Var(bounds=(0,2*3.1415)) + m.x = Var(bounds=(0, 2 * 3.1415)) m.obj = Objective(expr=sin(m.x)) with self.assertRaisesRegex( RuntimeError, - 'The BARON .BAR format does not support the unary function "sin"' - ): + 'The BARON .BAR format does not support the unary function "sin"', + ): test_fname = self._get_fnames()[1] self._cleanup(test_fname) m.write(test_fname, format="bar") @@ -169,15 +185,15 @@ def test_exponential_NPV(self): m.x = Var() m.obj = Objective(expr=m.x**2) m.p = Param(initialize=1, mutable=True) - m.c = Constraint(expr=m.x * m.p ** 1.2 == 0) + m.c = Constraint(expr=m.x * m.p**1.2 == 0) self._check_baseline(m) def test_branching_priorities(self): m = ConcreteModel() m.x = Var(within=Binary) m.y = Var([1, 2, 3], within=Binary) - m.c = Constraint(expr=m.y[1]*m.y[2] - 2*m.x >= 0) - m.obj = Objective(expr=m.y[1]+m.y[2], sense=maximize) + m.c = Constraint(expr=m.y[1] * m.y[2] - 2 * m.x >= 0) + m.obj = Objective(expr=m.y[1] + m.y[2], sense=maximize) m.priority = Suffix(direction=Suffix.EXPORT) m.priority[m.x] = 1 # Note this checks that y[3] is filtered out @@ -188,21 +204,25 @@ def test_invalid_suffix(self): m = ConcreteModel() m.x = Var(within=Binary) m.y = Var([1, 2, 3], within=Binary) - m.c = Constraint(expr=m.y[1]*m.y[2] - 2*m.x >= 0) - m.obj = Objective(expr=m.y[1]+m.y[2], sense=maximize) + m.c = Constraint(expr=m.y[1] * m.y[2] - 2 * m.x >= 0) + m.obj = Objective(expr=m.y[1] + m.y[2], sense=maximize) m.priorities = Suffix(direction=Suffix.EXPORT) m.priorities[m.x] = 1 m.priorities[m.y] = 2 with self.assertRaisesRegex( - ValueError, "The BARON writer can not export suffix " - "with name 'priorities'. Either remove it from " - "the model or deactivate it."): + ValueError, + "The BARON writer can not export suffix " + "with name 'priorities'. Either remove it from " + "the model or deactivate it.", + ): m.write(StringIO(), format='bar') m._name = 'TestModel' with self.assertRaisesRegex( - ValueError, "The BARON writer can not export suffix " - "with name 'priorities'. Either remove it from " - "the model 'TestModel' or deactivate it."): + ValueError, + "The BARON writer can not export suffix " + "with name 'priorities'. Either remove it from " + "the model 'TestModel' or deactivate it.", + ): m.write(StringIO(), format='bar') p = m.priorities del m.priorities @@ -210,13 +230,51 @@ def test_invalid_suffix(self): m.blk.sub = Block() m.blk.sub.priorities = p with self.assertRaisesRegex( - ValueError, "The BARON writer can not export suffix " - "with name 'priorities'. Either remove it from " - "the block 'blk.sub' or deactivate it."): + ValueError, + "The BARON writer can not export suffix " + "with name 'priorities'. Either remove it from " + "the block 'blk.sub' or deactivate it.", + ): m.write(StringIO(), format='bar') -#class TestBaron_writer(unittest.TestCase): +class TestToBaronVisitor(unittest.TestCase): + def test_pow(self): + variables = OrderedSet() + smap = SymbolMap() + + m = ConcreteModel() + m.x = Var(initialize=1) + m.y = Var(initialize=2) + m.p = Param(mutable=True, initialize=0) + + e = m.x**m.y + test = expression_to_string(e, variables, smap) + self.assertEqual(test, "exp((x) * log(y))") + + e = m.x ** (3 + EXPR.ProductExpression((m.p, m.y))) + test = expression_to_string(e, variables, smap) + self.assertEqual(test, "x ^ 3") + + e = (3 + EXPR.ProductExpression((m.p, m.y))) ** m.x + test = expression_to_string(e, variables, smap) + self.assertEqual(test, "3 ^ x") + + def test_issue_2819(self): + m = ConcreteModel() + m.x = Var() + m.z = Var() + t = 0.55 + m.x.fix(3.5) + e = (m.x - 4) ** 2 + (m.z - 1) ** 2 - t + + variables = OrderedSet() + smap = SymbolMap() + test = expression_to_string(e, variables, smap) + self.assertEqual(test, '(-0.5) ^ 2 + (z - 1) ^ 2 + (-0.55)') + + +# class TestBaron_writer(unittest.TestCase): class XTestBaron_writer(object): """These tests verified that the BARON writer complained loudly for variables that were not on the model, not on an active block, or not @@ -232,7 +290,7 @@ def _cleanup(self, fname): def _get_fnames(self): class_name, test_name = self.id().split('.')[-2:] prefix = os.path.join(thisdir, test_name.replace("test_", "", 1)) - return prefix+".bar.baseline", prefix+".bar.out" + return prefix + ".bar.baseline", prefix + ".bar.out" def test_var_on_other_model(self): other = ConcreteModel() @@ -240,14 +298,12 @@ def test_var_on_other_model(self): model = ConcreteModel() model.x = Var() - model.c = Constraint(expr=other.a + 2*model.x <= 0) + model.c = Constraint(expr=other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) - self.assertRaises( - KeyError, - model.write, test_fname, format='bar') + self.assertRaises(KeyError, model.write, test_fname, format='bar') self._cleanup(test_fname) def test_var_on_deactivated_block(self): @@ -256,34 +312,30 @@ def test_var_on_deactivated_block(self): model.other = Block() model.other.a = Var() model.other.deactivate() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) - self.assertRaises( - KeyError, - model.write, test_fname, format='bar' ) + self.assertRaises(KeyError, model.write, test_fname, format='bar') self._cleanup(test_fname) def test_var_on_nonblock(self): class Foo(Block().__class__): def __init__(self, *args, **kwds): - kwds.setdefault('ctype',Foo) - super(Foo,self).__init__(*args, **kwds) + kwds.setdefault('ctype', Foo) + super(Foo, self).__init__(*args, **kwds) model = ConcreteModel() model.x = Var() model.other = Foo() model.other.a = Var() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) - self.assertRaises( - KeyError, - model.write, test_fname, format='bar') + self.assertRaises(KeyError, model.write, test_fname, format='bar') self._cleanup(test_fname) diff --git a/pyomo/repn/tests/baron/test_baron_comparison.py b/pyomo/repn/tests/baron/test_baron_comparison.py index 45d6fc9ba6d..7c480321624 100644 --- a/pyomo/repn/tests/baron/test_baron_comparison.py +++ b/pyomo/repn/tests/baron/test_baron_comparison.py @@ -16,30 +16,33 @@ import glob import os from os.path import abspath, dirname, join -currdir = dirname(abspath(__file__))+os.sep -datadir = abspath(join(currdir, "..", "ampl"))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep +datadir = abspath(join(currdir, "..", "ampl")) + os.sep import pyomo.common.unittest as unittest import pyomo.common import pyomo.scripting.pyomo_main as main -parameterized, param_available = pyomo.common.dependencies.attempt_import('parameterized') +parameterized, param_available = pyomo.common.dependencies.attempt_import( + 'parameterized' +) if not param_available: raise unittest.SkipTest('Parameterized is not available.') names = [] # add test methods to classes -for f in itertools.chain(glob.glob(join(datadir,'*_testCase.py')), - glob.glob(join(currdir, '*_testCase.py'))): - names.append(re.split('[._]',os.path.basename(f))[0]) +for f in itertools.chain( + glob.glob(join(datadir, '*_testCase.py')), glob.glob(join(currdir, '*_testCase.py')) +): + names.append(re.split('[._]', os.path.basename(f))[0]) class Tests(unittest.TestCase): - def pyomo(self, cmd): os.chdir(currdir) - output = main.main(['convert', '--logging=quiet', '-c']+cmd) + output = main.main(['convert', '--logging=quiet', '-c'] + cmd) return output @@ -48,29 +51,26 @@ def __init__(self, *args, **kwds): Tests.__init__(self, *args, **kwds) # - #The following test generates an BAR file for the test case - #and checks that it matches the current pyomo baseline BAR file + # The following test generates an BAR file for the test case + # and checks that it matches the current pyomo baseline BAR file # @parameterized.parameterized.expand(input=names) def barwriter_baseline_test(self, name): - baseline = join(currdir, name+'.pyomo.bar') - output = join(currdir, name+'.test.bar') + baseline = join(currdir, name + '.pyomo.bar') + output = join(currdir, name + '.test.bar') if not os.path.exists(baseline): self.skipTest("baseline file (%s) not found" % (baseline,)) - if os.path.exists(datadir+name+'_testCase.py'): + if os.path.exists(datadir + name + '_testCase.py'): testDir = datadir else: testDir = currdir - testCase = testDir+name+'_testCase.py' + testCase = testDir + name + '_testCase.py' - if os.path.exists(testDir+name+'.dat'): - self.pyomo(['--output='+output, - testCase, - testDir+name+'.dat']) + if os.path.exists(testDir + name + '.dat'): + self.pyomo(['--output=' + output, testCase, testDir + name + '.dat']) else: - self.pyomo(['--output='+output, - testCase]) + self.pyomo(['--output=' + output, testCase]) # Check that the pyomo BAR file matches its own baseline with open(baseline, 'r') as f1, open(output, 'r') as f2: @@ -81,10 +81,11 @@ def barwriter_baseline_test(self, name): self.assertAlmostEqual(float(item1), float(item2)) except: self.assertEqual( - item1, item2, - "\n\nbaseline: %s\ntestFile: %s\n" % (baseline, output) + item1, + item2, + "\n\nbaseline: %s\ntestFile: %s\n" % (baseline, output), ) - os.remove(join(currdir, name+'.test.bar')) + os.remove(join(currdir, name + '.test.bar')) if __name__ == "__main__": diff --git a/pyomo/repn/tests/cpxlp/column_ordering_linear.lp_v2.baseline b/pyomo/repn/tests/cpxlp/column_ordering_linear.lp_v2.baseline new file mode 100644 index 00000000000..37f436b4c2c --- /dev/null +++ b/pyomo/repn/tests/cpxlp/column_ordering_linear.lp_v2.baseline @@ -0,0 +1,21 @@ +\* Source Pyomo model name=unknown *\ + +min +obj: ++1 c ++1 b ++1 a + +s.t. + +c_u_con_: ++1 c ++1 b ++1 a +<= 1 + +bounds + -inf <= c <= +inf + -inf <= b <= +inf + -inf <= a <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/column_ordering_quadratic.lp_v2.baseline b/pyomo/repn/tests/cpxlp/column_ordering_quadratic.lp_v2.baseline new file mode 100644 index 00000000000..11ec324bdf4 --- /dev/null +++ b/pyomo/repn/tests/cpxlp/column_ordering_quadratic.lp_v2.baseline @@ -0,0 +1,37 @@ +\* Source Pyomo model name=unknown *\ + +min +obj: ++1 c ++1 b ++1 a ++ [ ++2 c ^ 2 ++2 c * b ++2 c * a ++2 b ^ 2 ++2 b * a ++2 a ^ 2 +] / 2 + +s.t. + +c_u_con_: ++1 c ++1 b ++1 a ++ [ ++1 c ^ 2 ++1 c * b ++1 c * a ++1 b ^ 2 ++1 b * a ++1 a ^ 2 +] +<= 1 + +bounds + -inf <= c <= +inf + -inf <= b <= +inf + -inf <= a <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/linear_var_on_other_model.lp_v2.baseline b/pyomo/repn/tests/cpxlp/linear_var_on_other_model.lp_v2.baseline new file mode 100644 index 00000000000..38540b7711d --- /dev/null +++ b/pyomo/repn/tests/cpxlp/linear_var_on_other_model.lp_v2.baseline @@ -0,0 +1,17 @@ +\* Source Pyomo model name=unknown *\ + +min +x1: ++1 x2 + +s.t. + +c_u_x3_: ++2 x2 ++1 x4 +<= 0 + +bounds + -inf <= x2 <= +inf + -inf <= x4 <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/no_column_ordering_linear.lp_v2.baseline b/pyomo/repn/tests/cpxlp/no_column_ordering_linear.lp_v2.baseline new file mode 100644 index 00000000000..d51a63b7bb4 --- /dev/null +++ b/pyomo/repn/tests/cpxlp/no_column_ordering_linear.lp_v2.baseline @@ -0,0 +1,21 @@ +\* Source Pyomo model name=unknown *\ + +min +obj: ++1 a ++1 b ++1 c + +s.t. + +c_u_con_: ++1 a ++1 b ++1 c +<= 1 + +bounds + -inf <= a <= +inf + -inf <= b <= +inf + -inf <= c <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/no_column_ordering_quadratic.lp_v2.baseline b/pyomo/repn/tests/cpxlp/no_column_ordering_quadratic.lp_v2.baseline new file mode 100644 index 00000000000..2a65bdf6560 --- /dev/null +++ b/pyomo/repn/tests/cpxlp/no_column_ordering_quadratic.lp_v2.baseline @@ -0,0 +1,37 @@ +\* Source Pyomo model name=unknown *\ + +min +obj: ++1 a ++1 b ++1 c ++ [ ++2 a ^ 2 ++2 a * b ++2 a * c ++2 b ^ 2 ++2 b * c ++2 c ^ 2 +] / 2 + +s.t. + +c_u_con_: ++1 a ++1 b ++1 c ++ [ ++1 a ^ 2 ++1 a * b ++1 a * c ++1 b ^ 2 ++1 b * c ++1 c ^ 2 +] +<= 1 + +bounds + -inf <= a <= +inf + -inf <= b <= +inf + -inf <= c <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/no_row_ordering.lp_v2.baseline b/pyomo/repn/tests/cpxlp/no_row_ordering.lp_v2.baseline new file mode 100644 index 00000000000..cd6abf945c0 --- /dev/null +++ b/pyomo/repn/tests/cpxlp/no_row_ordering.lp_v2.baseline @@ -0,0 +1,35 @@ +\* Source Pyomo model name=unknown *\ + +min +obj: ++1 a + +s.t. + +c_l_con1_: ++1 a +>= 0 + +c_u_con2_: ++1 a +<= 1 + +r_l_con3_: ++1 a +>= 0 + +r_u_con3_: ++1 a +<= 1 + +c_e_con4(1)_: ++1 a += 1 + +c_e_con4(2)_: ++1 a += 2 + +bounds + -inf <= a <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/quadratic_var_on_other_model.lp_v2.baseline b/pyomo/repn/tests/cpxlp/quadratic_var_on_other_model.lp_v2.baseline new file mode 100644 index 00000000000..23238f2a95a --- /dev/null +++ b/pyomo/repn/tests/cpxlp/quadratic_var_on_other_model.lp_v2.baseline @@ -0,0 +1,18 @@ +\* Source Pyomo model name=unknown *\ + +min +x1: ++1 x2 + +s.t. + +c_u_x3_: ++ [ ++1 x2 * x4 +] +<= 0 + +bounds + -inf <= x2 <= +inf + -inf <= x4 <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/row_ordering.lp_v2.baseline b/pyomo/repn/tests/cpxlp/row_ordering.lp_v2.baseline new file mode 100644 index 00000000000..a3bc2b7777f --- /dev/null +++ b/pyomo/repn/tests/cpxlp/row_ordering.lp_v2.baseline @@ -0,0 +1,35 @@ +\* Source Pyomo model name=unknown *\ + +min +obj: ++1 a + +s.t. + +c_e_con4(2)_: ++1 a += 2 + +c_e_con4(1)_: ++1 a += 1 + +r_l_con3_: ++1 a +>= 0 + +r_u_con3_: ++1 a +<= 1 + +c_u_con2_: ++1 a +<= 1 + +c_l_con1_: ++1 a +>= 0 + +bounds + -inf <= a <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/test_cpxlp.py b/pyomo/repn/tests/cpxlp/test_cpxlp.py index 349fe664ee4..28c9043a8de 100644 --- a/pyomo/repn/tests/cpxlp/test_cpxlp.py +++ b/pyomo/repn/tests/cpxlp/test_cpxlp.py @@ -15,56 +15,57 @@ import os import random -from filecmp import cmp +from ..lp_diff import load_and_compare_lp_baseline + import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept +from pyomo.common.fileutils import this_file_dir from pyomo.common.tempfiles import TempfileManager -from pyomo.environ import ( - ConcreteModel, Var, Constraint, Objective, Block, ComponentMap, -) +from pyomo.environ import ConcreteModel, Var, Constraint, Objective, Block, ComponentMap + +thisdir = this_file_dir() -thisdir = os.path.dirname(os.path.abspath(__file__)) -class TestCPXLPOrdering(unittest.TestCase): +class _CPXLPOrdering_Suite(object): + @classmethod + def setUpClass(cls): + cls.context = TempfileManager.new_context() + cls.tempdir = cls.context.create_tempdir() - def _cleanup(self, fname): - try: - os.remove(fname) - except OSError: - pass + @classmethod + def tearDownClass(cls): + cls.context.release(remove=False) def _get_fnames(self): class_name, test_name = self.id().split('.')[-2:] - prefix = os.path.join(thisdir, test_name.replace("test_", "", 1)) - return prefix+".lp.baseline", prefix+".lp.out" + prefix = test_name.replace("test_", "", 1) + return ( + os.path.join(thisdir, prefix + ".lp.baseline"), + os.path.join(self.tempdir, prefix + ".lp.out"), + ) def _check_baseline(self, model, **kwds): - baseline_fname, test_fname = self._get_fnames() + baseline, testfile = self._get_fnames() io_options = {"symbolic_solver_labels": True} io_options.update(kwds) - model.write(test_fname, - format="lp", - io_options=io_options) - self.assertTrue(cmp( - test_fname, - baseline_fname), - msg="Files %s and %s differ" % (test_fname, baseline_fname)) - self._cleanup(test_fname) - - # generates an expression in a randomized way so that - # we can test for consistent ordering of expressions - # in the LP file + model.write(testfile, format=self._lp_version, io_options=io_options) + self.assertEqual( + *load_and_compare_lp_baseline(baseline, testfile, self._lp_version) + ) + + # Note that this used to generate a random permutation of the + # expression terms. However, the default variable ordering in LPv2 + # is the order in which variables are encountered when walking + # expressions. Removing the randomization does not significantly + # change the intent of the test, as the raw term list does not + # correspond to the final term sequence in the LP tile. def _gen_expression(self, terms): - terms = list(terms) - random.shuffle(terms) expr = 0.0 for term in terms: if type(term) is tuple: - prodterms = list(term) - random.shuffle(prodterms) prodexpr = 1.0 - for x in prodterms: + for x in term: prodexpr *= x expr += prodexpr else: @@ -77,9 +78,17 @@ def test_no_column_ordering_quadratic(self): model.b = Var() model.c = Var() - terms = [model.a, model.b, model.c, - (model.a, model.a), (model.b, model.b), (model.c, model.c), - (model.a, model.b), (model.a, model.c), (model.b, model.c)] + terms = [ + model.a, + model.b, + model.c, + (model.a, model.a), + (model.b, model.b), + (model.c, model.c), + (model.a, model.b), + (model.a, model.c), + (model.b, model.c), + ] model.obj = Objective(expr=self._gen_expression(terms)) model.con = Constraint(expr=self._gen_expression(terms) <= 1) self._check_baseline(model) @@ -90,9 +99,17 @@ def test_column_ordering_quadratic(self): model.b = Var() model.c = Var() - terms = [model.a, model.b, model.c, - (model.a, model.a), (model.b, model.b), (model.c, model.c), - (model.a, model.b), (model.a, model.c), (model.b, model.c)] + terms = [ + model.a, + model.b, + model.c, + (model.a, model.a), + (model.b, model.b), + (model.c, model.c), + (model.a, model.b), + (model.a, model.c), + (model.b, model.c), + ] model.obj = Objective(expr=self._gen_expression(terms)) model.con = Constraint(expr=self._gen_expression(terms) <= 1) # reverse the symbolic ordering @@ -138,7 +155,7 @@ def test_no_row_ordering(self): components["con1"] = Constraint(expr=model.a >= 0) components["con2"] = Constraint(expr=model.a <= 1) components["con3"] = Constraint(expr=(0, model.a, 1)) - components["con4"] = Constraint([1,2], rule=lambda m, i: model.a == i) + components["con4"] = Constraint([1, 2], rule=lambda m, i: model.a == i) # add components in random order random_order = list(components.keys()) @@ -157,7 +174,7 @@ def test_row_ordering(self): components["con1"] = Constraint(expr=model.a >= 0) components["con2"] = Constraint(expr=model.a <= 1) components["con3"] = Constraint(expr=(0, model.a, 1)) - components["con4"] = Constraint([1,2], rule=lambda m, i: model.a == i) + components["con4"] = Constraint([1, 2], rule=lambda m, i: model.a == i) # add components in random order random_order = list(components.keys()) @@ -174,22 +191,35 @@ def test_row_ordering(self): row_order[model.con4[2]] = -1 self._check_baseline(model, row_order=row_order) + +class Test_CPXLPOrdering_v1(_CPXLPOrdering_Suite, unittest.TestCase): + _lp_version = 'lp_v1' + + +class Test_CPXLPOrdering_v2(_CPXLPOrdering_Suite, unittest.TestCase): + _lp_version = 'lp_v2' + + class TestCPXLP_writer(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.context = TempfileManager.new_context() + cls.tempdir = cls.context.create_tempdir() - def _cleanup(self, fname): - try: - os.remove(fname) - except OSError: - pass + @classmethod + def tearDownClass(cls): + cls.context.release(remove=False) def _get_fnames(self): class_name, test_name = self.id().split('.')[-2:] - prefix = os.path.join(thisdir, test_name.replace("test_", "", 1)) - return prefix+".lp.baseline", prefix+".lp.out" + prefix = test_name.replace("test_", "", 1) + return ( + os.path.join(thisdir, prefix + ".lp.baseline"), + os.path.join(self.tempdir, prefix + ".lp.out"), + ) - def test_var_on_other_model(self): + def test_linear_var_on_other_model(self): baseline_fname, test_fname = self._get_fnames() - self._cleanup(test_fname) other = ConcreteModel() other.a = Var() @@ -199,31 +229,48 @@ def test_var_on_other_model(self): model.obj = Objective(expr=model.x) # Test var in linear expression - model.c = Constraint(expr=other.a + 2*model.x <= 0) + model.c = Constraint(expr=other.a + 2 * model.x <= 0) with LoggingIntercept() as LOG: - self.assertRaises( - KeyError, - model.write, test_fname, format='lp') + self.assertRaises(KeyError, model.write, test_fname, format='lp_v1') self.assertEqual( LOG.getvalue().replace('\n', ' ').strip(), 'Model contains an expression (c) that contains a variable ' '(a) that is not attached to an active block on the ' - 'submodel being written') - self._cleanup(test_fname) + 'submodel being written', + ) + + # OK with LPv2 + model.write(test_fname, format='lp_v2') + self.assertEqual( + *load_and_compare_lp_baseline(baseline_fname, test_fname, 'lp_v2') + ) + + def test_quadratic_var_on_other_model(self): + baseline_fname, test_fname = self._get_fnames() + + other = ConcreteModel() + other.a = Var() + + model = ConcreteModel() + model.x = Var() + model.obj = Objective(expr=model.x) # Test var in quadratic expression - del model.c model.c = Constraint(expr=other.a * model.x <= 0) with LoggingIntercept() as LOG: - self.assertRaises( - KeyError, - model.write, test_fname, format='lp') + self.assertRaises(KeyError, model.write, test_fname, format='lp_v1') self.assertEqual( LOG.getvalue().replace('\n', ' ').strip(), 'Model contains an expression (c) that contains a variable ' '(a) that is not attached to an active block on the ' - 'submodel being written') - self._cleanup(test_fname) + 'submodel being written', + ) + + # OK with LPv2 + model.write(test_fname, format='lp_v2') + self.assertEqual( + *load_and_compare_lp_baseline(baseline_fname, test_fname, 'lp_v2') + ) def test_var_on_deactivated_block(self): model = ConcreteModel() @@ -231,53 +278,60 @@ def test_var_on_deactivated_block(self): model.other = Block() model.other.a = Var() model.other.deactivate() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() - self._cleanup(test_fname) - model.write(test_fname, format='lp') - self.assertTrue(cmp( - test_fname, - baseline_fname), - msg="Files %s and %s differ" % (test_fname, baseline_fname)) + model.write(test_fname, format='lp_v1') + self.assertEqual( + *load_and_compare_lp_baseline(baseline_fname, test_fname, 'lp_v1') + ) + + model.write(test_fname, format='lp_v2') + self.assertEqual( + *load_and_compare_lp_baseline(baseline_fname, test_fname, 'lp_v2') + ) def test_var_on_nonblock(self): class Foo(Block().__class__): def __init__(self, *args, **kwds): - kwds.setdefault('ctype',Foo) - super(Foo,self).__init__(*args, **kwds) + kwds.setdefault('ctype', Foo) + super(Foo, self).__init__(*args, **kwds) model = ConcreteModel() model.x = Var() model.other = Foo() + model.other.deactivate() model.other.a = Var() - model.c = Constraint(expr=model.other.a + 2*model.x <= 0) + model.c = Constraint(expr=model.other.a + 2 * model.x <= 0) model.obj = Objective(expr=model.x) baseline_fname, test_fname = self._get_fnames() - self._cleanup(test_fname) - self.assertRaises( - KeyError, - model.write, test_fname, format='lp') - self._cleanup(test_fname) + self.assertRaises(KeyError, model.write, test_fname, format='lp_v1') + + # OK with LPv2 + model.write(test_fname, format='lp_v2') + self.assertEqual( + *load_and_compare_lp_baseline(baseline_fname, test_fname, 'lp_v2') + ) def test_obj_con_cache(self): + # Note that the repn caching is only implemented for the v1 writer model = ConcreteModel() model.x = Var() model.c = Constraint(expr=model.x >= 1) - model.obj = Objective(expr=model.x*2) + model.obj = Objective(expr=model.x * 2) with TempfileManager.new_context() as TMP: lp_file = TMP.create_tempfile(suffix='.lp') - model.write(lp_file, format='lp') + model.write(lp_file, format='lp_v1') self.assertFalse(hasattr(model, '_repn')) with open(lp_file) as FILE: lp_ref = FILE.read() lp_file = TMP.create_tempfile(suffix='.lp') model._gen_obj_repn = True - model.write(lp_file) + model.write(lp_file, format='lp_v1') self.assertEqual(len(model._repn), 1) self.assertIn(model.obj, model._repn) obj_repn = model._repn[model.obj] @@ -288,7 +342,7 @@ def test_obj_con_cache(self): lp_file = TMP.create_tempfile(suffix='.lp') model._gen_obj_repn = None model._gen_con_repn = True - model.write(lp_file) + model.write(lp_file, format='lp_v1') self.assertEqual(len(model._repn), 2) self.assertIn(model.obj, model._repn) self.assertIn(model.c, model._repn) @@ -302,7 +356,7 @@ def test_obj_con_cache(self): lp_file = TMP.create_tempfile(suffix='.lp') model._gen_obj_repn = None model._gen_con_repn = None - model.write(lp_file) + model.write(lp_file, format='lp_v1') self.assertEqual(len(model._repn), 2) self.assertIn(model.obj, model._repn) self.assertIn(model.c, model._repn) @@ -315,7 +369,7 @@ def test_obj_con_cache(self): lp_file = TMP.create_tempfile(suffix='.lp') model._gen_obj_repn = True model._gen_con_repn = True - model.write(lp_file) + model.write(lp_file, format='lp_v1') self.assertEqual(len(model._repn), 2) self.assertIn(model.obj, model._repn) self.assertIn(model.c, model._repn) @@ -331,12 +385,15 @@ def test_obj_con_cache(self): model._gen_obj_repn = False model._gen_con_repn = False import pyomo.repn.plugins.ampl.ampl_ as ampl_ + gsr = ampl_.generate_standard_repn try: + def dont_call_gsr(*args, **kwargs): self.fail("generate_standard_repn should not be called") + ampl_.generate_standard_repn = dont_call_gsr - model.write(lp_file) + model.write(lp_file, format='lp_v1') finally: ampl_.generate_standard_repn = gsr self.assertEqual(len(model._repn), 2) diff --git a/pyomo/repn/tests/cpxlp/var_on_deactivated_block.lp_v2.baseline b/pyomo/repn/tests/cpxlp/var_on_deactivated_block.lp_v2.baseline new file mode 100644 index 00000000000..38540b7711d --- /dev/null +++ b/pyomo/repn/tests/cpxlp/var_on_deactivated_block.lp_v2.baseline @@ -0,0 +1,17 @@ +\* Source Pyomo model name=unknown *\ + +min +x1: ++1 x2 + +s.t. + +c_u_x3_: ++2 x2 ++1 x4 +<= 0 + +bounds + -inf <= x2 <= +inf + -inf <= x4 <= +inf +end diff --git a/pyomo/repn/tests/cpxlp/var_on_nonblock.lp_v2.baseline b/pyomo/repn/tests/cpxlp/var_on_nonblock.lp_v2.baseline new file mode 100644 index 00000000000..38540b7711d --- /dev/null +++ b/pyomo/repn/tests/cpxlp/var_on_nonblock.lp_v2.baseline @@ -0,0 +1,17 @@ +\* Source Pyomo model name=unknown *\ + +min +x1: ++1 x2 + +s.t. + +c_u_x3_: ++2 x2 ++1 x4 +<= 0 + +bounds + -inf <= x2 <= +inf + -inf <= x4 <= +inf +end diff --git a/pyomo/repn/tests/diffutils.py b/pyomo/repn/tests/diffutils.py new file mode 100644 index 00000000000..24188d46c86 --- /dev/null +++ b/pyomo/repn/tests/diffutils.py @@ -0,0 +1,55 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import os + +import pyomo.core.expr as EXPR + + +def compare_floats(base, test, abstol=1e-14, reltol=1e-14): + base = base.split() + test = test.split() + if len(base) != len(test): + return False + for i, b in enumerate(base): + if b.strip() == test[i].strip(): + continue + try: + b = float(b) + t = float(test[i]) + except: + return False + if abs(b - t) < abstol: + continue + if abs((b - t) / max(abs(b), abs(t))) < reltol: + continue + return False + return True + + +def load_baseline(baseline, testfile, extension, version): + with open(testfile, 'r') as FILE: + test = FILE.read() + if baseline.endswith(f'.{extension}'): + _tmp = [baseline[:-3]] + else: + _tmp = baseline.split(f'.{extension}.', 1) + _tmp.insert(1, f'expr{int(EXPR.Mode.CURRENT)}') + _tmp.insert(2, version) + if not os.path.exists('.'.join(_tmp)): + _tmp.pop(1) + if not os.path.exists('.'.join(_tmp)): + _tmp = [] + if _tmp: + baseline = '.'.join(_tmp) + with open(baseline, 'r') as FILE: + base = FILE.read() + return base, test, baseline, testfile diff --git a/pyomo/repn/tests/gams/small14a_testCase.py b/pyomo/repn/tests/gams/small14a_testCase.py index cde511f6f81..c7e3e0805ea 100644 --- a/pyomo/repn/tests/gams/small14a_testCase.py +++ b/pyomo/repn/tests/gams/small14a_testCase.py @@ -9,8 +9,28 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.environ import ConcreteModel, Var, Objective, Constraint, log, log10, sin, cos, tan, sinh, cosh, tanh, asin, acos, atan, exp, sqrt, ceil, floor -from math import pi +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + Constraint, + log, + log10, + sin, + cos, + tan, + sinh, + cosh, + tanh, + asin, + acos, + atan, + exp, + sqrt, + ceil, + floor, +) +from math import pi model = ConcreteModel() @@ -19,29 +39,29 @@ model.ZERO = Var(initialize=0) -model.obj = Objective(expr=model.ONE+model.ZERO) +model.obj = Objective(expr=model.ONE + model.ZERO) -model.c_log = Constraint(expr=log(model.ONE) == 0) -model.c_log10 = Constraint(expr=log10(model.ONE) == 0) +model.c_log = Constraint(expr=log(model.ONE) == 0) +model.c_log10 = Constraint(expr=log10(model.ONE) == 0) -model.c_sin = Constraint(expr=sin(model.ZERO) == 0) -model.c_cos = Constraint(expr=cos(model.ZERO) == 1) -model.c_tan = Constraint(expr=tan(model.ZERO) == 0) +model.c_sin = Constraint(expr=sin(model.ZERO) == 0) +model.c_cos = Constraint(expr=cos(model.ZERO) == 1) +model.c_tan = Constraint(expr=tan(model.ZERO) == 0) -model.c_sinh = Constraint(expr=sinh(model.ZERO) == 0) -model.c_cosh = Constraint(expr=cosh(model.ZERO) == 1) -model.c_tanh = Constraint(expr=tanh(model.ZERO) == 0) +model.c_sinh = Constraint(expr=sinh(model.ZERO) == 0) +model.c_cosh = Constraint(expr=cosh(model.ZERO) == 1) +model.c_tanh = Constraint(expr=tanh(model.ZERO) == 0) -model.c_asin = Constraint(expr=asin(model.ZERO) == 0) -model.c_acos = Constraint(expr=acos(model.ZERO) == pi/2) -model.c_atan = Constraint(expr=atan(model.ZERO) == 0) +model.c_asin = Constraint(expr=asin(model.ZERO) == 0) +model.c_acos = Constraint(expr=acos(model.ZERO) == pi / 2) +model.c_atan = Constraint(expr=atan(model.ZERO) == 0) -#model.c_asinh = Constraint(expr=asinh(model.ZERO) == 0) -#model.c_acosh = Constraint(expr=acosh((e**2 + model.ONE)/(2*e)) == 0) -#model.c_atanh = Constraint(expr=atanh(model.ZERO) == 0) +# model.c_asinh = Constraint(expr=asinh(model.ZERO) == 0) +# model.c_acosh = Constraint(expr=acosh((e**2 + model.ONE)/(2*e)) == 0) +# model.c_atanh = Constraint(expr=atanh(model.ZERO) == 0) -model.c_exp = Constraint(expr=exp(model.ZERO) == 1) -model.c_sqrt = Constraint(expr=sqrt(model.ONE) == 1) -model.c_ceil = Constraint(expr=ceil(model.ONE) == 1) -model.c_floor = Constraint(expr=floor(model.ONE) == 1) -model.c_abs = Constraint(expr=abs(model.ONE) == 1) +model.c_exp = Constraint(expr=exp(model.ZERO) == 1) +model.c_sqrt = Constraint(expr=sqrt(model.ONE) == 1) +model.c_ceil = Constraint(expr=ceil(model.ONE) == 1) +model.c_floor = Constraint(expr=floor(model.ONE) == 1) +model.c_abs = Constraint(expr=abs(model.ONE) == 1) diff --git a/pyomo/repn/tests/gams/test_gams.py b/pyomo/repn/tests/gams/test_gams.py index 41ebaed8f30..e6b729e5dfc 100644 --- a/pyomo/repn/tests/gams/test_gams.py +++ b/pyomo/repn/tests/gams/test_gams.py @@ -19,21 +19,38 @@ from filecmp import cmp import pyomo.common.unittest as unittest from pyomo.core.base import NumericLabeler, SymbolMap -from pyomo.environ import (Block, ConcreteModel, Constraint, - Objective, TransformationFactory, Var, exp, log, - ceil, floor, asin, acos, atan, asinh, acosh, atanh, - Binary, quicksum) +from pyomo.environ import ( + Block, + ConcreteModel, + Constraint, + Objective, + TransformationFactory, + Var, + exp, + log, + ceil, + floor, + asin, + acos, + atan, + asinh, + acosh, + atanh, + Binary, + quicksum, +) from pyomo.gdp import Disjunction from pyomo.network import Port, Arc -from pyomo.repn.plugins.gams_writer import (StorageTreeChecker, - expression_to_string, - split_long_line) +from pyomo.repn.plugins.gams_writer import ( + StorageTreeChecker, + expression_to_string, + split_long_line, +) thisdir = os.path.dirname(os.path.abspath(__file__)) class Test(unittest.TestCase): - def _cleanup(self, fname): try: os.remove(fname) @@ -48,14 +65,9 @@ def _get_fnames(self): def _check_baseline(self, model, **kwds): baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) - io_options = { - "symbolic_solver_labels": True, - "output_fixed_variables": True, - } + io_options = {"symbolic_solver_labels": True, "output_fixed_variables": True} io_options.update(kwds) - model.write(test_fname, - format="gams", - io_options=io_options) + model.write(test_fname, format="gams", io_options=io_options) try: self.assertTrue(cmp(test_fname, baseline_fname)) except: @@ -63,9 +75,9 @@ def _check_baseline(self, model, **kwds): f1_contents = list(filter(None, f1.read().split())) f2_contents = list(filter(None, f2.read().split())) self.assertEqual( - f1_contents, f2_contents, - "\n\nbaseline: %s\ntestFile: %s\n" % ( - baseline_fname, test_fname) + f1_contents, + f2_contents, + "\n\nbaseline: %s\ntestFile: %s\n" % (baseline_fname, test_fname), ) self._cleanup(test_fname) @@ -89,9 +101,17 @@ def test_no_column_ordering_quadratic(self): model.b = Var() model.c = Var() - terms = [model.a, model.b, model.c, - (model.a, model.a), (model.b, model.b), (model.c, model.c), - (model.a, model.b), (model.a, model.c), (model.b, model.c)] + terms = [ + model.a, + model.b, + model.c, + (model.a, model.a), + (model.b, model.b), + (model.c, model.c), + (model.a, model.b), + (model.a, model.c), + (model.b, model.c), + ] model.obj = Objective(expr=self._gen_expression(terms)) model.con = Constraint(expr=self._gen_expression(terms) <= 1) self._check_baseline(model) @@ -140,7 +160,7 @@ def test_fixed_linear_expr(self): m = ConcreteModel() m.y = Var(within=Binary) m.y.fix(0) - m.x = Var(bounds=(0,None)) + m.x = Var(bounds=(0, None)) m.c1 = Constraint(expr=quicksum([m.y, m.y], linear=True) >= 0) m.c2 = Constraint(expr=quicksum([m.x, m.y], linear=True) == 1) m.obj = Objective(expr=m.x) @@ -160,7 +180,7 @@ def nested(n_disj, _): m.choice = Disjunction(expr=[m.disj[0], m.disj[1]]) - m.c = Constraint(expr=m.x ** 2 + m.disj[1].nested['A'].indicator_var >= 1) + m.c = Constraint(expr=m.x**2 + m.disj[1].nested['A'].indicator_var >= 1) m.disj[0].indicator_var.fix(1) m.disj[1].deactivate() @@ -183,26 +203,34 @@ def test_quicksum(self): lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) - self.assertEqual(("x1 + x1", False), expression_to_string(m.c.body, tc, smap=smap)) + self.assertEqual( + ("x1 + x1", False), expression_to_string(m.c.body, tc, smap=smap) + ) m.x = Var() m.c2 = Constraint(expr=quicksum([m.x, m.y], linear=True) == 1) - self.assertEqual(("x2 + x1", False), expression_to_string(m.c2.body, tc, smap=smap)) + self.assertEqual( + ("x2 + x1", False), expression_to_string(m.c2.body, tc, smap=smap) + ) m.y.fix(1) lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) - self.assertEqual(("1 + 1", False), expression_to_string(m.c.body, tc, smap=smap)) + self.assertEqual( + ("1 + 1", False), expression_to_string(m.c.body, tc, smap=smap) + ) m.x = Var() m.c2 = Constraint(expr=quicksum([m.x, m.y], linear=True) == 1) - self.assertEqual(("x1 + 1", False), expression_to_string(m.c2.body, tc, smap=smap)) + self.assertEqual( + ("x1 + 1", False), expression_to_string(m.c2.body, tc, smap=smap) + ) def test_quicksum_integer_var_fixed(self): m = ConcreteModel() m.x = Var() m.y = Var(domain=Binary) m.c = Constraint(expr=quicksum([m.y, m.y], linear=True) == 1) - m.o = Objective(expr=m.x ** 2) + m.o = Objective(expr=m.x**2) m.y.fix(1) outs = StringIO() m.write(outs, format='gams') @@ -210,8 +238,11 @@ def test_quicksum_integer_var_fixed(self): def test_expr_xfrm(self): from pyomo.repn.plugins.gams_writer import ( - expression_to_string, StorageTreeChecker) + expression_to_string, + StorageTreeChecker, + ) from pyomo.core.expr.symbol_map import SymbolMap + M = ConcreteModel() M.abc = Var() @@ -220,33 +251,41 @@ def test_expr_xfrm(self): expr = M.abc**2.0 self.assertEqual(str(expr), "abc**2.0") - self.assertEqual(expression_to_string( - expr, tc, smap=smap), ("power(abc, 2)", False)) + self.assertEqual( + expression_to_string(expr, tc, smap=smap), ("power(abc, 2)", False) + ) expr = log(M.abc**2.0) self.assertEqual(str(expr), "log(abc**2.0)") - self.assertEqual(expression_to_string( - expr, tc, smap=smap), ("log(power(abc, 2))", False)) + self.assertEqual( + expression_to_string(expr, tc, smap=smap), ("log(power(abc, 2))", False) + ) expr = log(M.abc**2.0) + 5 self.assertEqual(str(expr), "log(abc**2.0) + 5") - self.assertEqual(expression_to_string( - expr, tc, smap=smap), ("log(power(abc, 2)) + 5", False)) + self.assertEqual( + expression_to_string(expr, tc, smap=smap), ("log(power(abc, 2)) + 5", False) + ) expr = exp(M.abc**2.0) + 5 self.assertEqual(str(expr), "exp(abc**2.0) + 5") - self.assertEqual(expression_to_string( - expr, tc, smap=smap), ("exp(power(abc, 2)) + 5", False)) + self.assertEqual( + expression_to_string(expr, tc, smap=smap), ("exp(power(abc, 2)) + 5", False) + ) - expr = log(M.abc**2.0)**4 + expr = log(M.abc**2.0) ** 4 self.assertEqual(str(expr), "log(abc**2.0)**4") - self.assertEqual(expression_to_string( - expr, tc, smap=smap), ("power(log(power(abc, 2)), 4)", False)) + self.assertEqual( + expression_to_string(expr, tc, smap=smap), + ("power(log(power(abc, 2)), 4)", False), + ) - expr = log(M.abc**2.0)**4.5 + expr = log(M.abc**2.0) ** 4.5 self.assertEqual(str(expr), "log(abc**2.0)**4.5") - self.assertEqual(expression_to_string( - expr, tc, smap=smap), ("log(power(abc, 2)) ** 4.5", False)) + self.assertEqual( + expression_to_string(expr, tc, smap=smap), + ("log(power(abc, 2)) ** 4.5", False), + ) def test_power_function_to_string(self): m = ConcreteModel() @@ -254,12 +293,15 @@ def test_power_function_to_string(self): lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) - self.assertEqual(expression_to_string( - m.x ** -3, tc, lbl, smap=smap), ("power(x1, (-3))", False)) - self.assertEqual(expression_to_string( - m.x ** 0.33, tc, smap=smap), ("x1 ** 0.33", False)) - self.assertEqual(expression_to_string( - pow(m.x, 2), tc, smap=smap), ("power(x1, 2)", False)) + self.assertEqual( + expression_to_string(m.x**-3, tc, smap=smap), ("power(x1, (-3))", False) + ) + self.assertEqual( + expression_to_string(m.x**0.33, tc, smap=smap), ("x1 ** 0.33", False) + ) + self.assertEqual( + expression_to_string(pow(m.x, 2), tc, smap=smap), ("power(x1, 2)", False) + ) def test_fixed_var_to_string(self): m = ConcreteModel() @@ -270,17 +312,24 @@ def test_fixed_var_to_string(self): lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) - self.assertEqual(expression_to_string( - m.x + m.y - m.z, tc, lbl, smap=smap), ("x1 + x2 + 3", False)) + self.assertEqual( + expression_to_string(m.x + m.y - m.z, tc, smap=smap), ("x1 + x2 + 3", False) + ) m.z.fix(-400) - self.assertEqual(expression_to_string( - m.z + m.y - m.z, tc, smap=smap), ("(-400) + x2 + 400", False)) + self.assertEqual( + expression_to_string(m.z + m.y - m.z, tc, smap=smap), + ("(-400) + x2 + 400", False), + ) m.z.fix(8.8) - self.assertEqual(expression_to_string( - m.x + m.z - m.y, tc, smap=smap), ("x1 + 8.8 - x2", False)) + self.assertEqual( + expression_to_string(m.x + m.z - m.y, tc, smap=smap), + ("x1 + 8.8 - x2", False), + ) m.z.fix(-8.8) - self.assertEqual(expression_to_string( - m.x * m.z - m.y, tc, smap=smap), ("x1*(-8.8) - x2", False)) + self.assertEqual( + expression_to_string(m.x * m.z - m.y, tc, smap=smap), + ("x1*(-8.8) - x2", False), + ) def test_dnlp_to_string(self): m = ConcreteModel() @@ -290,12 +339,15 @@ def test_dnlp_to_string(self): lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) - self.assertEqual(expression_to_string( - ceil(m.x), tc, lbl, smap=smap), ("ceil(x1)", True)) - self.assertEqual(expression_to_string( - floor(m.x), tc, lbl, smap=smap), ("floor(x1)", True)) - self.assertEqual(expression_to_string( - abs(m.x), tc, lbl, smap=smap), ("abs(x1)", True)) + self.assertEqual( + expression_to_string(ceil(m.x), tc, smap=smap), ("ceil(x1)", True) + ) + self.assertEqual( + expression_to_string(floor(m.x), tc, smap=smap), ("floor(x1)", True) + ) + self.assertEqual( + expression_to_string(abs(m.x), tc, smap=smap), ("abs(x1)", True) + ) def test_arcfcn_to_string(self): m = ConcreteModel() @@ -303,24 +355,27 @@ def test_arcfcn_to_string(self): lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) - self.assertEqual(expression_to_string( - asin(m.x), tc, lbl, smap=smap), ("arcsin(x1)", False)) - self.assertEqual(expression_to_string( - acos(m.x), tc, lbl, smap=smap), ("arccos(x1)", False)) - self.assertEqual(expression_to_string( - atan(m.x), tc, lbl, smap=smap), ("arctan(x1)", False)) + self.assertEqual( + expression_to_string(asin(m.x), tc, smap=smap), ("arcsin(x1)", False) + ) + self.assertEqual( + expression_to_string(acos(m.x), tc, smap=smap), ("arccos(x1)", False) + ) + self.assertEqual( + expression_to_string(atan(m.x), tc, smap=smap), ("arctan(x1)", False) + ) with self.assertRaisesRegex( - RuntimeError, - "GAMS files cannot represent the unary function asinh"): - expression_to_string(asinh(m.x), tc, lbl, smap=smap) + RuntimeError, "GAMS files cannot represent the unary function asinh" + ): + expression_to_string(asinh(m.x), tc, smap=smap) with self.assertRaisesRegex( - RuntimeError, - "GAMS files cannot represent the unary function acosh"): - expression_to_string(acosh(m.x), tc, lbl, smap=smap) + RuntimeError, "GAMS files cannot represent the unary function acosh" + ): + expression_to_string(acosh(m.x), tc, smap=smap) with self.assertRaisesRegex( - RuntimeError, - "GAMS files cannot represent the unary function atanh"): - expression_to_string(atanh(m.x), tc, lbl, smap=smap) + RuntimeError, "GAMS files cannot represent the unary function atanh" + ): + expression_to_string(atanh(m.x), tc, smap=smap) def test_gams_arc_in_active_constraint(self): m = ConcreteModel() @@ -358,16 +413,20 @@ def test_gams_expanded_arcs(self): def test_split_long_line(self): pat = "var1 + log(var2 / 9) - " line = (pat * 10000) + "x" - self.assertEqual(split_long_line(line), - pat * 3478 + "var1 +\n log(var2 / 9) - " + - pat * 3477 + "var1 +\n log(var2 / 9) - " + - pat * 3043 + "x") + self.assertEqual( + split_long_line(line), + pat * 3478 + + "var1 +\n log(var2 / 9) - " + + pat * 3477 + + "var1 +\n log(var2 / 9) - " + + pat * 3043 + + "x", + ) def test_split_long_line_no_comment(self): pat = "1000 * 2000 * " line = pat * 5715 + "x" - self.assertEqual(split_long_line(line), - pat * 5714 + "1000\n * 2000 * x") + self.assertEqual(split_long_line(line), pat * 5714 + "1000\n * 2000 * x") def test_solver_arg(self): m = ConcreteModel() @@ -384,21 +443,37 @@ def test_negative_float_double_operator(self): m.y = Var() m.z = Var(bounds=(0, 6)) m.c = Constraint(expr=(m.x * m.y * -2) == 0) - m.c2 = Constraint(expr=m.z ** -1.5 == 0) + m.c2 = Constraint(expr=m.z**-1.5 == 0) m.o = Objective(expr=m.z) m.y.fix(-7) m.x.fix(4) lbl = NumericLabeler('x') smap = SymbolMap(lbl) tc = StorageTreeChecker(m) - self.assertEqual(expression_to_string( - m.c.body, tc, smap=smap), ("4*(-7)*(-2)", False)) - self.assertEqual(expression_to_string( - m.c2.body, tc, smap=smap), ("x1 ** (-1.5)", False)) + self.assertEqual( + expression_to_string(m.c.body, tc, smap=smap), ("4*(-7)*(-2)", False) + ) + self.assertEqual( + expression_to_string(m.c2.body, tc, smap=smap), ("x1 ** (-1.5)", False) + ) + + def test_issue_2819(self): + m = ConcreteModel() + m.x = Var() + m.z = Var() + t = 0.55 + m.x.fix(3.5) + e = (m.x - 4) ** 2 + (m.z - 1) ** 2 - t + tc = StorageTreeChecker(m) + smap = SymbolMap() + test = expression_to_string(e, tc, smap=smap) + self.assertEqual( + test, ('power((3.5 + (-4)), 2) + power((z + (-1)), 2) + (-0.55)', False) + ) -class TestGams_writer(unittest.TestCase): +class TestGams_writer(unittest.TestCase): def _cleanup(self, fname): try: os.remove(fname) @@ -421,9 +496,7 @@ def test_var_on_other_model(self): baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) - self.assertRaises( - RuntimeError, - model.write, test_fname, format='gams') + self.assertRaises(RuntimeError, model.write, test_fname, format='gams') self._cleanup(test_fname) def test_var_on_nonblock(self): @@ -441,9 +514,7 @@ def __init__(self, *args, **kwds): baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) - self.assertRaises( - RuntimeError, - model.write, test_fname, format='gams') + self.assertRaises(RuntimeError, model.write, test_fname, format='gams') self._cleanup(test_fname) diff --git a/pyomo/repn/tests/gams/test_gams_comparison.py b/pyomo/repn/tests/gams/test_gams_comparison.py index e1e97fc8108..4e530b10d43 100644 --- a/pyomo/repn/tests/gams/test_gams_comparison.py +++ b/pyomo/repn/tests/gams/test_gams_comparison.py @@ -37,9 +37,9 @@ invalidlist = [] validlist = [] -invalid_tests = {'small14',} +invalid_tests = {'small14'} for f in glob.glob(join(datadir, '*_testCase.py')): - name = re.split('[._]',os.path.basename(f))[0] + name = re.split('[._]', os.path.basename(f))[0] if name in invalid_tests: # Create some list invalidlist.append((name, datadir)) @@ -47,15 +47,14 @@ validlist.append((name, datadir)) for f in glob.glob(join(currdir, '*_testCase.py')): - name = re.split('[._]',os.path.basename(f))[0] + name = re.split('[._]', os.path.basename(f))[0] validlist.append((name, currdir)) class Tests(unittest.TestCase): - def pyomo(self, cmd): os.chdir(currdir) - output = main.main(['convert', '--logging=quiet', '-c']+cmd) + output = main.main(['convert', '--logging=quiet', '-c'] + cmd) return output def setUp(self): @@ -73,11 +72,9 @@ class BaselineTests(Tests): @parameterized.parameterized.expand(input=validlist) def gams_writer_baseline_test(self, name, targetdir): - baseline = join(currdir, name+'.pyomo.gms') - testFile = TempfileManager.create_tempfile( - suffix=name + '.test.gms') - cmd = ['--output=' + testFile, - join(targetdir, name + '_testCase.py')] + baseline = join(currdir, name + '.pyomo.gms') + testFile = TempfileManager.create_tempfile(suffix=name + '.test.gms') + cmd = ['--output=' + testFile, join(targetdir, name + '_testCase.py')] if os.path.exists(join(targetdir, name + '.dat')): cmd.append(join(targetdir, name + '.dat')) self.pyomo(cmd) @@ -90,19 +87,18 @@ def gams_writer_baseline_test(self, name, targetdir): f1_contents = list(filter(None, f1.read().split())) f2_contents = list(filter(None, f2.read().split())) self.assertEqual( - f1_contents, f2_contents, - "\n\nbaseline: %s\ntestFile: %s\n" % (baseline, testFile) + f1_contents, + f2_contents, + "\n\nbaseline: %s\ntestFile: %s\n" % (baseline, testFile), ) - @parameterized.parameterized.expand(input=invalidlist) def gams_writer_test_invalid(self, name, targetdir): with self.assertRaisesRegex( - RuntimeError, "GAMS files cannot represent the unary function"): - testFile = TempfileManager.create_tempfile( - suffix=name + '.test.gms') - cmd = ['--output=' + testFile, - join(targetdir, name + '_testCase.py')] + RuntimeError, "GAMS files cannot represent the unary function" + ): + testFile = TempfileManager.create_tempfile(suffix=name + '.test.gms') + cmd = ['--output=' + testFile, join(targetdir, name + '_testCase.py')] if os.path.exists(join(targetdir, name + '.dat')): cmd.append(join(targetdir, name + '.dat')) self.pyomo(cmd) diff --git a/pyomo/repn/tests/lp_diff.py b/pyomo/repn/tests/lp_diff.py new file mode 100644 index 00000000000..23b24f8b51b --- /dev/null +++ b/pyomo/repn/tests/lp_diff.py @@ -0,0 +1,82 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import re + +from difflib import SequenceMatcher, unified_diff + +from pyomo.repn.tests.diffutils import compare_floats, load_baseline + +_strip_comment = re.compile(r'\s*\\.*') + + +def _update_subsets(subset, base, test): + for i, j in zip(*subset): + if compare_floats(base[i], test[j]): + base[i] = test[j] + + +def _preprocess_data(data): + for line in data.splitlines(): + fields = line.split() + for i, f in enumerate(fields): + try: + if int(f) == float(f): + fields[i] = str(int(f)) + except: + pass + yield ' '.join(fields) + + +def lp_diff(base, test, baseline='baseline', testfile='testfile'): + if test == base: + return [], [] + + test = list(_preprocess_data(test)) + base = list(_preprocess_data(base)) + if test == base: + return [], [] + + for group in SequenceMatcher(None, base, test).get_grouped_opcodes(3): + for tag, i1, i2, j1, j2 in group: + if tag != 'replace': + continue + _update_subsets((range(i1, i2), range(j1, j2)), base, test) + + if test == base: + return [], [] + + print( + ''.join( + unified_diff( + [_ + "\n" for _ in base], + [_ + "\n" for _ in test], + fromfile=baseline, + tofile=testfile, + ) + ) + ) + return base, test + + +def load_lp_baseline(baseline, testfile, version='lp'): + return load_baseline(baseline, testfile, 'lp', version) + + +def load_and_compare_lp_baseline(baseline, testfile, version='lp'): + return lp_diff(*load_lp_baseline(baseline, testfile, version)) + + +if __name__ == '__main__': + import sys + + base, test = load_and_compare_lp_baseline(sys.argv[1], sys.argv[2]) + sys.exit(1 if base or test else 0) diff --git a/pyomo/repn/tests/mps/test_mps.py b/pyomo/repn/tests/mps/test_mps.py index 073b448d2be..44f3d93b75e 100644 --- a/pyomo/repn/tests/mps/test_mps.py +++ b/pyomo/repn/tests/mps/test_mps.py @@ -22,8 +22,8 @@ thisdir = os.path.dirname(os.path.abspath(__file__)) -class TestMPSOrdering(unittest.TestCase): +class TestMPSOrdering(unittest.TestCase): def _cleanup(self, fname): try: os.remove(fname) @@ -33,20 +33,18 @@ def _cleanup(self, fname): def _get_fnames(self): class_name, test_name = self.id().split('.')[-2:] prefix = os.path.join(thisdir, test_name.replace("test_", "", 1)) - return prefix+".mps.baseline", prefix+".mps.out" + return prefix + ".mps.baseline", prefix + ".mps.out" def _check_baseline(self, model, **kwds): baseline_fname, test_fname = self._get_fnames() self._cleanup(test_fname) io_options = {"symbolic_solver_labels": True} io_options.update(kwds) - model.write(test_fname, - format="mps", - io_options=io_options) - self.assertTrue(cmp( - test_fname, - baseline_fname), - msg="Files %s and %s differ" % (test_fname, baseline_fname)) + model.write(test_fname, format="mps", io_options=io_options) + self.assertTrue( + cmp(test_fname, baseline_fname), + msg="Files %s and %s differ" % (test_fname, baseline_fname), + ) self._cleanup(test_fname) # generates an expression in a randomized way so that @@ -74,9 +72,17 @@ def test_no_column_ordering_quadratic(self): model.b = Var() model.c = Var() - terms = [model.a, model.b, model.c, - (model.a, model.a), (model.b, model.b), (model.c, model.c), - (model.a, model.b), (model.a, model.c), (model.b, model.c)] + terms = [ + model.a, + model.b, + model.c, + (model.a, model.a), + (model.b, model.b), + (model.c, model.c), + (model.a, model.b), + (model.a, model.c), + (model.b, model.c), + ] model.obj = Objective(expr=self._gen_expression(terms)) model.con = Constraint(expr=self._gen_expression(terms) <= 1) self._check_baseline(model) @@ -87,9 +93,17 @@ def test_column_ordering_quadratic(self): model.b = Var() model.c = Var() - terms = [model.a, model.b, model.c, - (model.a, model.a), (model.b, model.b), (model.c, model.c), - (model.a, model.b), (model.a, model.c), (model.b, model.c)] + terms = [ + model.a, + model.b, + model.c, + (model.a, model.a), + (model.b, model.b), + (model.c, model.c), + (model.a, model.b), + (model.a, model.c), + (model.b, model.c), + ] model.obj = Objective(expr=self._gen_expression(terms)) model.con = Constraint(expr=self._gen_expression(terms) <= 1) # reverse the symbolic ordering @@ -135,7 +149,7 @@ def test_no_row_ordering(self): components["con1"] = Constraint(expr=model.a >= 0) components["con2"] = Constraint(expr=model.a <= 1) components["con3"] = Constraint(expr=(0, model.a, 1)) - components["con4"] = Constraint([1,2], rule=lambda m, i: model.a == i) + components["con4"] = Constraint([1, 2], rule=lambda m, i: model.a == i) # add components in random order random_order = list(components.keys()) @@ -154,7 +168,7 @@ def test_row_ordering(self): components["con1"] = Constraint(expr=model.a >= 0) components["con2"] = Constraint(expr=model.a <= 1) components["con3"] = Constraint(expr=(0, model.a, 1)) - components["con4"] = Constraint([1,2], rule=lambda m, i: model.a == i) + components["con4"] = Constraint([1, 2], rule=lambda m, i: model.a == i) # add components in random order random_order = list(components.keys()) @@ -171,5 +185,6 @@ def test_row_ordering(self): row_order[model.con4[2]] = -1 self._check_baseline(model, row_order=row_order) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/repn/tests/nl_diff.py b/pyomo/repn/tests/nl_diff.py new file mode 100644 index 00000000000..e96d6f6357b --- /dev/null +++ b/pyomo/repn/tests/nl_diff.py @@ -0,0 +1,125 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +import itertools +import re + +from difflib import SequenceMatcher, unified_diff + +from pyomo.repn.tests.diffutils import compare_floats, load_baseline +import pyomo.repn.plugins.nl_writer as nl_writer + +template = nl_writer.text_nl_debug_template + +_norm_whitespace = re.compile(r'[^\S\n]+') +_norm_integers = re.compile(r'(?m)\.0+$') +_norm_comment = re.compile(r'\s*#\s*') +_strip_comment = re.compile(r'\s*#.*') +_norm_negation = re.compile(r'(?m)^o2(\s*#\s*\*)?\nn-1(.0)?\s*\n') +_norm_timesone = re.compile(r'(?m)^o2(\s*#\s*\*)?\nn1(.0)?\s*\n') +_norm_double_negation = re.compile(r'(?m)^o16(\s*#\s*-)?\no16(\s*#\s*-)?\n') + + +def _update_subsets(subset, base, test): + for i, j in zip(*subset): + # Try checking for numbers + if base[i][0] == 'n' and test[j][0] == 'n': + if compare_floats(base[i][1:], test[j][1:]): + test[j] = base[i] + elif compare_floats(base[i], test[j]): + test[j] = base[i] + else: + # try stripping comments, but only if it results in a match + base_nc = _strip_comment.sub('', base[i]) + test_nc = _strip_comment.sub('', test[j]) + if compare_floats(base_nc, test_nc): + if len(base_nc) > len(test_nc): + test[j] = base[i] + else: + base[i] = test[j] + + +def _preprocess_data(data): + # Normalize negation (convert " * -1" to the negation operator) + data = _norm_negation.sub(template.negation, data) + # Normalize double negation (convert "-(-x)" to x) + data = _norm_double_negation.sub('', data) + # Remove multiplication by 1 + data = _norm_timesone.sub('', data) + # Normalize consecutive whitespace to a single space + data = _norm_whitespace.sub(' ', data) + # preface all comments with a single tab character + data = _norm_comment.sub('\t#', data) + # Normalize floating point integers to integers + data = _norm_integers.sub('', data) + # return the sequence of lines + return data.splitlines() + + +def nl_diff(base, test, baseline='baseline', testfile='testfile'): + if test == base: + return [], [] + + test = _preprocess_data(test) + base = _preprocess_data(base) + if test == base: + return [], [] + + # First do a quick pass to check / standardize embedded numbers. + # This is a little fragile (it requires that the embedded constants + # appear in the same order in the base and test files), but we see + # cases where differences within numerical tolerances lead to huge + # add / delete chunks (instead of small replace chunks) from the + # SequenceMatcher (because it is not as fast / aggressive as Unix + # diff). Those add/remove chunks are ignored by the _update_subsets + # code below, leading to unnecessary test failures. + test_nlines = list(x for x in enumerate(test) if x[1] and x[1][0] == 'n') + base_nlines = list(x for x in enumerate(base) if x[1] and x[1][0] == 'n') + if len(test_nlines) == len(base_nlines): + for t_line, b_line in zip(test_nlines, base_nlines): + if compare_floats(t_line[1][1:], b_line[1][1:]): + test[t_line[0]] = base[b_line[0]] + + for group in SequenceMatcher(None, base, test).get_grouped_opcodes(3): + for tag, i1, i2, j1, j2 in group: + if tag != 'replace': + continue + _update_subsets((range(i1, i2), range(j1, j2)), base, test) + + if test == base: + return [], [] + + print( + ''.join( + unified_diff( + [_ + "\n" for _ in base], + [_ + "\n" for _ in test], + fromfile=baseline, + tofile=testfile, + ) + ) + ) + return base, test + + +def load_nl_baseline(baseline, testfile, version='nl'): + return load_baseline(baseline, testfile, 'nl', version) + + +def load_and_compare_nl_baseline(baseline, testfile, version='nl'): + return nl_diff(*load_nl_baseline(baseline, testfile, version)) + + +if __name__ == '__main__': + import sys + + base, test = load_and_compare_nl_baseline(sys.argv[1], sys.argv[2]) + sys.exit(1 if base or test else 0) diff --git a/pyomo/repn/tests/test_linear.py b/pyomo/repn/tests/test_linear.py new file mode 100644 index 00000000000..5e8df940efc --- /dev/null +++ b/pyomo/repn/tests/test_linear.py @@ -0,0 +1,1409 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.log import LoggingIntercept +import pyomo.common.unittest as unittest + +from pyomo.core.expr.compare import assertExpressionsEqual +from pyomo.core.expr.numeric_expr import LinearExpression, MonomialTermExpression +from pyomo.core.expr import Expr_if, inequality, LinearExpression, NPV_SumExpression +from pyomo.repn.linear import LinearRepn, LinearRepnVisitor +from pyomo.repn.util import InvalidNumber + +from pyomo.environ import ConcreteModel, Param, Var, Expression, ExternalFunction, cos + +nan = float('nan') + + +class VisitorConfig(object): + def __init__(self): + self.subexpr = {} + self.var_map = {} + self.var_order = {} + + def __iter__(self): + return iter((self.subexpr, self.var_map, self.var_order)) + + +def sum_sq(args, fixed, fgh): + f = sum(arg**2 for arg in args) + g = [2 * arg for arg in args] + h = None + return f, g, h + + +class TestLinear(unittest.TestCase): + def test_finalize(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + m.z = Var() + + e = m.x + 2 * m.y - m.x - m.z + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y, id(m.z): m.z}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1, id(m.z): 2}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.y): 2, id(m.z): -1}) + self.assertEqual(repn.nonlinear, None) + + e *= 5 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y, id(m.z): m.z}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1, id(m.z): 2}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.y): 10, id(m.z): -5}) + self.assertEqual(repn.nonlinear, None) + + e = 5 * (m.y + m.z**2 + 3 * m.y**3) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.y): m.y, id(m.z): m.z}) + self.assertEqual(cfg.var_order, {id(m.y): 0, id(m.z): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.y): 5}) + assertExpressionsEqual(self, repn.nonlinear, (m.z**2 + 3 * m.y**3) * 5) + + def test_scalars(self): + m = ConcreteModel() + m.x = Var() + m.p = Param(mutable=True, initialize=2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(3) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 3) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression((-1) ** 0.5) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertStructuredAlmostEqual(repn.constant, InvalidNumber(1j)) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.p) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 2) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p.set_value(None) + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.p) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p.set_value(nan) + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.p) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p.set_value(1j) + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.p) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, InvalidNumber(1j)) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.x) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(1) + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.x) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(None) + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.x) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(nan) + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.x) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(1j) + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(m.x) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(1j)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_npv(self): + m = ConcreteModel() + m.p = Param(mutable=True, initialize=4) + + nested_expr = 1 / m.p + pow_expr = m.p ** (0.5) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1 / 4) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 2) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p = 0 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p = -1 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, -1) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertStructuredAlmostEqual(repn.constant, InvalidNumber(1j)) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_monomial(self): + m = ConcreteModel() + m.x = Var() + m.p = Param(mutable=True, initialize=4) + + const_expr = 3 * m.x + param_expr = m.p * m.x + nested_expr = (1 / m.p) * m.x + pow_expr = (m.p ** (0.5)) * m.x + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(const_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 3}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 4}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 1 / 4}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 2}) + self.assertEqual(repn.nonlinear, None) + + m.p = -1.0 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): -1}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): -1}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertStructuredAlmostEqual(repn.linear, {id(m.x): InvalidNumber(1j)}) + self.assertEqual(repn.nonlinear, None) + + m.p = float('nan') + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertStructuredAlmostEqual(repn.linear, {id(m.x): InvalidNumber(nan)}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertStructuredAlmostEqual(repn.linear, {id(m.x): InvalidNumber(nan)}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertStructuredAlmostEqual(repn.linear, {id(m.x): InvalidNumber(nan)}) + self.assertEqual(repn.nonlinear, None) + + m.p.set_value(None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertStructuredAlmostEqual(repn.linear, {id(m.x): InvalidNumber(nan)}) + self.assertEqual(repn.nonlinear, None) + + m.p.set_value(4) + m.x.fix(10) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(const_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 30) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 40) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 2.5) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 20) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p = float('nan') + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(nested_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(pow_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p.set_value(None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(str(repn.constant), 'InvalidNumber(nan)') + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p.set_value(0) + m.x.fix(10) + + cfg = VisitorConfig() + with LoggingIntercept() as LOG: + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertEqual(LOG.getvalue(), "") + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.set_value(nan) + + cfg = VisitorConfig() + with LoggingIntercept() as LOG: + repn = LinearRepnVisitor(*cfg).walk_expression(param_expr) + self.assertIn( + "DEPRECATED: Encountered 0*nan in expression tree.", LOG.getvalue() + ) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_linear(self): + m = ConcreteModel() + m.x = Var(range(3)) + m.p = Param(mutable=True, initialize=4) + + e = LinearExpression() + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + e += m.x[0] + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x[0]): 1}) + self.assertEqual(repn.nonlinear, None) + + e += 2 * m.x[0] + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x[0]): 3}) + self.assertEqual(repn.nonlinear, None) + + e += m.p * m.x[1] + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x[0]): 3, id(m.x[1]): 4}) + self.assertEqual(repn.nonlinear, None) + + e += (m.p**0.5) * m.x[1] + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x[0]): 3, id(m.x[1]): 6}) + self.assertEqual(repn.nonlinear, None) + + e += 10 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 10) + self.assertEqual(repn.linear, {id(m.x[0]): 3, id(m.x[1]): 6}) + self.assertEqual(repn.nonlinear, None) + + e += 10 * m.p + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 50) + self.assertEqual(repn.linear, {id(m.x[0]): 3, id(m.x[1]): 6}) + self.assertEqual(repn.nonlinear, None) + + m.p = -1 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertStructuredAlmostEqual( + repn.linear, {id(m.x[0]): 3, id(m.x[1]): InvalidNumber(-1 + 1j)} + ) + self.assertEqual(repn.nonlinear, None) + + m.p = 0 + e += (1 / m.p) * m.x[1] + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1]}) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 10) + self.assertStructuredAlmostEqual( + repn.linear, {id(m.x[0]): 3, id(m.x[1]): InvalidNumber(nan)} + ) + self.assertEqual(repn.nonlinear, None) + + m.x[0].fix(10) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[1]): m.x[1]}) + self.assertEqual(cfg.var_order, {id(m.x[1]): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 40) + self.assertStructuredAlmostEqual(repn.linear, {id(m.x[1]): InvalidNumber(nan)}) + self.assertEqual(repn.nonlinear, None) + + m.x[1].fix(10) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertStructuredAlmostEqual(repn.constant, InvalidNumber(nan)) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + # Test some edge cases + + e = LinearExpression() + + e += m.x[2] + (1 / m.p) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[2]): m.x[2]}) + self.assertEqual(cfg.var_order, {id(m.x[2]): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertStructuredAlmostEqual(repn.constant, InvalidNumber(nan)) + self.assertEqual(repn.linear, {id(m.x[2]): 1}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + cfg.var_map[id(m.x[2])] = m.x[2] + cfg.var_map[id(m.x[0])] = m.x[0] + cfg.var_order[id(m.x[2])] = 0 + cfg.var_order[id(m.x[0])] = 1 + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x[2]): m.x[2], id(m.x[0]): m.x[0]}) + self.assertEqual(cfg.var_order, {id(m.x[2]): 0, id(m.x[0]): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertStructuredAlmostEqual(repn.constant, InvalidNumber(nan)) + self.assertEqual(repn.linear, {id(m.x[2]): 1}) + self.assertEqual(repn.nonlinear, None) + + e = LinearExpression() + e += 0 * m.x[1] + + cfg = VisitorConfig() + with LoggingIntercept() as LOG: + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(LOG.getvalue(), "") + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x[1].set_value(nan) + + cfg = VisitorConfig() + with LoggingIntercept() as LOG: + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertIn( + "DEPRECATED: Encountered 0*nan in expression tree.", LOG.getvalue() + ) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_trig(self): + m = ConcreteModel() + m.x = Var() + + e = cos(m.x) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, cos(m.x)) + + m.x.fix(0) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_named_expr(self): + m = ConcreteModel() + m.x = Var(range(3)) + m.e = Expression(expr=sum((i + 2) * m.x[i] for i in range(3))) + + e = m.e * 2 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(len(cfg.subexpr), 1) + self.assertEqual(cfg.subexpr[id(m.e)][1].multiplier, 1) + self.assertEqual(cfg.subexpr[id(m.e)][1].constant, 0) + self.assertEqual( + cfg.subexpr[id(m.e)][1].linear, + {id(m.x[0]): 2, id(m.x[1]): 3, id(m.x[2]): 4}, + ) + self.assertEqual(cfg.subexpr[id(m.e)][1].nonlinear, None) + + self.assertEqual( + cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1], id(m.x[2]): m.x[2]} + ) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1, id(m.x[2]): 2}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x[0]): 4, id(m.x[1]): 6, id(m.x[2]): 8}) + self.assertEqual(repn.nonlinear, None) + + e = m.e * 2 + 3 * m.e + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(len(cfg.subexpr), 1) + self.assertEqual(cfg.subexpr[id(m.e)][1].multiplier, 1) + self.assertEqual(cfg.subexpr[id(m.e)][1].constant, 0) + self.assertEqual( + cfg.subexpr[id(m.e)][1].linear, + {id(m.x[0]): 2, id(m.x[1]): 3, id(m.x[2]): 4}, + ) + self.assertEqual(cfg.subexpr[id(m.e)][1].nonlinear, None) + + self.assertEqual( + cfg.var_map, {id(m.x[0]): m.x[0], id(m.x[1]): m.x[1], id(m.x[2]): m.x[2]} + ) + self.assertEqual(cfg.var_order, {id(m.x[0]): 0, id(m.x[1]): 1, id(m.x[2]): 2}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x[0]): 10, id(m.x[1]): 15, id(m.x[2]): 20}) + self.assertEqual(repn.nonlinear, None) + + m = ConcreteModel() + m.e = Expression(expr=10) + + e = m.e * 2 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(len(cfg.subexpr), 1) + self.assertEqual(cfg.subexpr[id(m.e)][1], 10) + + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 20) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + e = m.e * 2 + 3 * m.e + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(len(cfg.subexpr), 1) + self.assertEqual(cfg.subexpr[id(m.e)][1], 10) + + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 50) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_pow_expr(self): + m = ConcreteModel() + m.x = Var() + m.p = Param(mutable=True, initialize=1) + + e = m.x**m.p + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + + m.p = 0 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p = 2 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, m.x**2) + + m.x.fix(2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 4) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.p = 1 / 2 + m.x = -1 + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertStructuredAlmostEqual(repn.constant, InvalidNumber(1j)) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.unfix() + e = (1 + m.x) ** 2 + + cfg = VisitorConfig() + visitor = LinearRepnVisitor(*cfg) + visitor.max_exponential_expansion = 2 + repn = visitor.walk_expression(e) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, (m.x + 1) * (m.x + 1)) + + cfg = VisitorConfig() + visitor = LinearRepnVisitor(*cfg) + visitor.max_exponential_expansion = 2 + visitor.expand_nonlinear_products = True + repn = visitor.walk_expression(e) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1) + self.assertEqual(repn.linear, {id(m.x): 2}) + assertExpressionsEqual(self, repn.nonlinear, m.x * m.x) + + def test_product(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + m.z = Var() + + e = (2 + 3 * m.x + 4 * m.x**2) * (5 + 6 * m.x + 7 * m.x**2) + + cfg = VisitorConfig() + visitor = LinearRepnVisitor(*cfg) + visitor.expand_nonlinear_products = True + repn = visitor.walk_expression(e) + + LE3 = MonomialTermExpression((3, m.x)) + LE6 = MonomialTermExpression((6, m.x)) + NL = ( + 2 * (7 * m.x**2) + + 4 * m.x**2 * (7 * m.x**2 + 6 * m.x + 5) + + (LE3) * (7 * m.x**2 + LE6) + ) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertAlmostEqual(repn.constant, 10) + self.assertEqual(repn.linear, {id(m.x): 27}) + assertExpressionsEqual(self, repn.nonlinear, NL) + + m.x.fix(0) + m.y.fix(nan) + e = m.x * m.y + + cfg = VisitorConfig() + visitor = LinearRepnVisitor(*cfg) + visitor.expand_nonlinear_products = True + with LoggingIntercept() as LOG: + repn = visitor.walk_expression(e) + self.assertIn( + 'Encountered 0*InvalidNumber(nan) in expression tree.', LOG.getvalue() + ) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertAlmostEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + e = m.x * (m.y + 2 + m.z) + + cfg = VisitorConfig() + visitor = LinearRepnVisitor(*cfg) + visitor.expand_nonlinear_products = True + with LoggingIntercept() as LOG: + repn = visitor.walk_expression(e) + self.assertIn('Encountered 0*nan in expression tree.', LOG.getvalue()) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.z): m.z}) + self.assertEqual(cfg.var_order, {id(m.z): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertAlmostEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_expr_if(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + + e = Expr_if(m.y >= 5, m.x, m.x**2) + f = Expr_if(m.y == 5, m.x, m.x**2) + g = Expr_if(inequality(3, m.y, 5), m.x, m.x**2) + + m.y.fix(2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, m.x**2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(f) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, m.x**2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(g) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, m.x**2) + + m.y.fix(5) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(f) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(g) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + + m.y.fix(2) + m.x.fix(3) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 9) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(f) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 9) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(g) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 9) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.y.fix(5) + m.x.fix(6) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 6) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(f) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 6) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(g) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 6) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.y.fix(None) + m.x.unfix() + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, m.x**2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(f) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, m.x**2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(g) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, m.x**2) + + m.y.unfix() + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.y): m.y, id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.y): 0, id(m.x): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual( + self, repn.nonlinear, Expr_if(IF=m.y >= 5, THEN=m.x, ELSE=m.x**2) + ) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(f) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.y): m.y, id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.y): 0, id(m.x): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual( + self, repn.nonlinear, Expr_if(IF=m.y == 5, THEN=m.x, ELSE=m.x**2) + ) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(g) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.y): m.y, id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.y): 0, id(m.x): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual( + self, + repn.nonlinear, + Expr_if(IF=inequality(3, m.y, 5), THEN=m.x, ELSE=m.x**2), + ) + + def test_division(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + + e = (2 * m.x + 1) / m.y + m.y.fix(2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1 / 2) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.nonlinear, None) + + e = m.y / (m.x + 1) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + assertExpressionsEqual(self, repn.nonlinear, 2 / (m.x + 1)) + + def test_negation(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + + e = -(m.x + 2) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, -2) + self.assertEqual(repn.linear, {id(m.x): -1}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(3) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, -5) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + def test_external(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + m.sq = ExternalFunction(fgh=sum_sq) + + e = m.sq(2 / m.x, 2 * m.y) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertIs(repn.nonlinear, e) + + m.x.fix(2) + m.y.fix(3) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 37) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.nonlinear, None) + + m.x.fix(0) + + cfg = VisitorConfig() + repn = LinearRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertIs(repn.nonlinear, e) + + def test_type_registrations(self): + m = ConcreteModel() + + cfg = VisitorConfig() + visitor = LinearRepnVisitor(*cfg) + + import pyomo.repn.linear as linear + + _orig_dispatcher = linear._before_child_dispatcher + linear._before_child_dispatcher = bcd = {} + try: + # native type + self.assertEqual( + linear._register_new_before_child_dispatcher(visitor, 5), + (False, (linear._CONSTANT, 5)), + ) + self.assertEqual(len(bcd), 1) + self.assertIs(bcd[int], linear._before_native) + # complex type + self.assertEqual( + linear._register_new_before_child_dispatcher(visitor, 5j), + (False, (linear._CONSTANT, 5j)), + ) + self.assertEqual(len(bcd), 2) + self.assertIs(bcd[complex], linear._before_complex) + # ScalarParam + m.p = Param(initialize=5) + self.assertEqual( + linear._register_new_before_child_dispatcher(visitor, m.p), + (False, (linear._CONSTANT, 5)), + ) + self.assertEqual(len(bcd), 3) + self.assertIs(bcd[m.p.__class__], linear._before_param) + # ParamData + m.q = Param([0], initialize=6, mutable=True) + self.assertEqual( + linear._register_new_before_child_dispatcher(visitor, m.q[0]), + (False, (linear._CONSTANT, 6)), + ) + self.assertEqual(len(bcd), 4) + self.assertIs(bcd[m.q[0].__class__], linear._before_param) + # NPV_SumExpression + self.assertEqual( + linear._register_new_before_child_dispatcher(visitor, m.p + m.q[0]), + (False, (linear._CONSTANT, 11)), + ) + self.assertEqual(len(bcd), 6) + self.assertIs(bcd[NPV_SumExpression], linear._before_npv) + self.assertIs(bcd[LinearExpression], linear._before_general_expression) + # Named expression + m.e = Expression(expr=m.p + m.q[0]) + self.assertEqual( + linear._register_new_before_child_dispatcher(visitor, m.e), (True, None) + ) + self.assertEqual(len(bcd), 7) + self.assertIs(bcd[m.e.__class__], linear._before_named_expression) + + finally: + linear._before_child_dispatcher = _orig_dispatcher + + def test_to_expression(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + + cfg = VisitorConfig() + visitor = LinearRepnVisitor(*cfg) + # prepopulate the visitor's var_map + visitor.walk_expression(m.x + m.y) + + expr = LinearRepn() + self.assertEqual(expr.to_expression(visitor), 0) + + expr.linear[id(m.x)] = 0 + self.assertEqual(expr.to_expression(visitor), 0) + + expr.linear[id(m.x)] = 1 + assertExpressionsEqual(self, expr.to_expression(visitor), m.x) + + expr.linear[id(m.x)] = 2 + assertExpressionsEqual(self, expr.to_expression(visitor), 2 * m.x) + + expr.linear[id(m.y)] = 3 + assertExpressionsEqual(self, expr.to_expression(visitor), 2 * m.x + 3 * m.y) + + expr.multiplier = 10 + assertExpressionsEqual( + self, expr.to_expression(visitor), (2 * m.x + 3 * m.y) * 10 + ) + expr.multiplier = 1 + + expr.constant = 0 + expr.linear[id(m.x)] = 0 + expr.linear[id(m.y)] = 0 + assertExpressionsEqual(self, expr.to_expression(visitor), LinearExpression()) diff --git a/pyomo/repn/tests/test_quadratic.py b/pyomo/repn/tests/test_quadratic.py new file mode 100644 index 00000000000..7832fedee36 --- /dev/null +++ b/pyomo/repn/tests/test_quadratic.py @@ -0,0 +1,293 @@ +# ___________________________________________________________________________ +# +# Pyomo: Python Optimization Modeling Objects +# Copyright (c) 2008-2022 +# National Technology and Engineering Solutions of Sandia, LLC +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# rights in this software. +# This software is distributed under the 3-clause BSD License. +# ___________________________________________________________________________ + +from pyomo.common.log import LoggingIntercept +import pyomo.common.unittest as unittest + +from pyomo.core.expr.compare import assertExpressionsEqual +from pyomo.core.expr.numeric_expr import ( + LinearExpression, + MonomialTermExpression, + SumExpression, +) +from pyomo.repn.quadratic import QuadraticRepnVisitor + +from pyomo.environ import ConcreteModel, Var + + +class VisitorConfig(object): + def __init__(self): + self.subexpr = {} + self.var_map = {} + self.var_order = {} + + def __iter__(self): + return iter((self.subexpr, self.var_map, self.var_order)) + + +class TestQuadratic(unittest.TestCase): + def test_product(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + + e = 2 + + cfg = VisitorConfig() + visitor = QuadraticRepnVisitor(*cfg) + visitor.expand_nonlinear_products = True + repn = visitor.walk_expression(e) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 2) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.quadratic, None) + self.assertEqual(repn.nonlinear, None) + + e = 2 + 3 * m.x + + cfg = VisitorConfig() + visitor = QuadraticRepnVisitor(*cfg) + visitor.expand_nonlinear_products = True + repn = visitor.walk_expression(e) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 2) + self.assertEqual(repn.linear, {id(m.x): 3}) + self.assertEqual(repn.quadratic, None) + self.assertEqual(repn.nonlinear, None) + + e = 2 + 3 * m.x + 4 * m.x**2 + + cfg = VisitorConfig() + visitor = QuadraticRepnVisitor(*cfg) + visitor.expand_nonlinear_products = True + repn = visitor.walk_expression(e) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 2) + self.assertEqual(repn.linear, {id(m.x): 3}) + self.assertEqual(repn.quadratic, {(id(m.x), id(m.x)): 4}) + self.assertEqual(repn.nonlinear, None) + + e = (2 + 3 * m.x + 4 * m.x**2) * (5 + 6 * m.x + 7 * m.x**2) + + cfg = VisitorConfig() + visitor = QuadraticRepnVisitor(*cfg) + visitor.expand_nonlinear_products = True + repn = visitor.walk_expression(e) + + QE4 = SumExpression([4 * m.x**2]) + QE7 = SumExpression([7 * m.x**2]) + LE3 = MonomialTermExpression((3, m.x)) + LE6 = MonomialTermExpression((6, m.x)) + NL = +QE4 * (QE7 + LE6) + (LE3) * (QE7) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 10) + self.assertEqual(repn.linear, {id(m.x): 27}) + self.assertEqual(repn.quadratic, {(id(m.x), id(m.x)): 52}) + assertExpressionsEqual(self, repn.nonlinear, NL) + + e = (2 + 3 * m.x + 4 * m.x**2) * (5 + 6 * m.x + 7 * m.x**2) + + cfg = VisitorConfig() + visitor = QuadraticRepnVisitor(*cfg) + visitor.expand_nonlinear_products = False + repn = visitor.walk_expression(e) + + NL = (4 * m.x**2 + 3 * m.x + 2) * (7 * m.x**2 + 6 * m.x + 5) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.quadratic, None) + print(repn.nonlinear) + print(NL) + assertExpressionsEqual(self, repn.nonlinear, NL) + + e = (1 + 2 * m.x + 3 * m.y) * (4 + 5 * m.x + 6 * m.y) + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 4) + self.assertEqual(repn.linear, {id(m.x): 13, id(m.y): 18}) + self.assertEqual( + repn.quadratic, + {(id(m.x), id(m.x)): 10, (id(m.y), id(m.y)): 18, (id(m.x), id(m.y)): 27}, + ) + assertExpressionsEqual(self, repn.nonlinear, None) + + def test_sum(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + + e = SumExpression([]) + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 0) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.quadratic, None) + self.assertEqual(repn.nonlinear, None) + + e += 5 + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {}) + self.assertEqual(cfg.var_order, {}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 5) + self.assertEqual(repn.linear, {}) + self.assertEqual(repn.quadratic, None) + self.assertEqual(repn.nonlinear, None) + + e += m.x + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x}) + self.assertEqual(cfg.var_order, {id(m.x): 0}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 5) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.quadratic, None) + self.assertEqual(repn.nonlinear, None) + + e += m.y**2 + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 5) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.quadratic, {(id(m.y), id(m.y)): 1}) + self.assertEqual(repn.nonlinear, None) + + e += m.y**3 + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 5) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.quadratic, {(id(m.y), id(m.y)): 1}) + assertExpressionsEqual(self, repn.nonlinear, m.y**3) + + e += 2 * m.x**4 + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 5) + self.assertEqual(repn.linear, {id(m.x): 1}) + self.assertEqual(repn.quadratic, {(id(m.y), id(m.y)): 1}) + assertExpressionsEqual(self, repn.nonlinear, m.y**3 + 2 * m.x**4) + + e += 2 * m.y + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 5) + self.assertEqual(repn.linear, {id(m.x): 1, id(m.y): 2}) + self.assertEqual(repn.quadratic, {(id(m.y), id(m.y)): 1}) + assertExpressionsEqual(self, repn.nonlinear, m.y**3 + 2 * m.x**4) + + e += 3 * m.x * m.y + + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression(e) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 5) + self.assertEqual(repn.linear, {id(m.x): 1, id(m.y): 2}) + self.assertEqual(repn.quadratic, {(id(m.y), id(m.y)): 1, (id(m.x), id(m.y)): 3}) + assertExpressionsEqual(self, repn.nonlinear, m.y**3 + 2 * m.x**4) + + def test_pow(self): + m = ConcreteModel() + m.x = Var() + m.y = Var() + + # Check **{int} + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression((1 + 3 * m.x + 4 * m.y) ** 2) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1) + self.assertEqual(repn.linear, {id(m.x): 6, id(m.y): 8}) + self.assertEqual( + repn.quadratic, + {(id(m.x), id(m.x)): 9, (id(m.y), id(m.y)): 16, (id(m.x), id(m.y)): 24}, + ) + self.assertEqual(repn.nonlinear, None) + + # Check **{int} + cfg = VisitorConfig() + repn = QuadraticRepnVisitor(*cfg).walk_expression( + (1 + 3 * m.x + 4 * m.y) ** 2.0 + ) + self.assertEqual(cfg.subexpr, {}) + self.assertEqual(cfg.var_map, {id(m.x): m.x, id(m.y): m.y}) + self.assertEqual(cfg.var_order, {id(m.x): 0, id(m.y): 1}) + self.assertEqual(repn.multiplier, 1) + self.assertEqual(repn.constant, 1) + self.assertEqual(repn.linear, {id(m.x): 6, id(m.y): 8}) + self.assertEqual( + repn.quadratic, + {(id(m.x), id(m.x)): 9, (id(m.y), id(m.y)): 16, (id(m.x), id(m.y)): 24}, + ) + self.assertEqual(repn.nonlinear, None) diff --git a/pyomo/repn/tests/test_standard.py b/pyomo/repn/tests/test_standard.py index bc1ea0a8cd4..b62d18e6eff 100644 --- a/pyomo/repn/tests/test_standard.py +++ b/pyomo/repn/tests/test_standard.py @@ -15,21 +15,36 @@ import pickle import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest -from pyomo.core.expr.current import Expr_if +import pyomo.core.expr as EXPR +from pyomo.core.expr.numvalue import native_numeric_types, as_numeric, value from pyomo.core.expr.visitor import replace_expressions -from pyomo.core.expr import current as EXPR from pyomo.repn import generate_standard_repn -from pyomo.environ import AbstractModel, ConcreteModel, Var, Param, Set, Expression, RangeSet, ExternalFunction, quicksum, cos, sin, summation, sum_product +from pyomo.environ import ( + AbstractModel, + ConcreteModel, + Var, + Param, + Set, + Expression, + RangeSet, + ExternalFunction, + quicksum, + cos, + sin, + summation, + sum_product, +) import pyomo.kernel -from pyomo.core.expr.numvalue import native_numeric_types, as_numeric, value class frozendict(dict): __slots__ = ('_hash',) + def __hash__(self): rval = getattr(self, '_hash', None) if rval is None: @@ -51,13 +66,15 @@ def repn_to_dict(repn): result[id(v1_), id(v2_)] = value(repn.quadratic_coefs[i]) else: result[id(v2_), id(v1_)] = value(repn.quadratic_coefs[i]) - if not (repn.constant is None or (type(repn.constant) in native_numeric_types and repn.constant == 0)): + if not ( + repn.constant is None + or (type(repn.constant) in native_numeric_types and repn.constant == 0) + ): result[None] = value(repn.constant) return result class Test(unittest.TestCase): - def test_number(self): # 1.0 m = AbstractModel() @@ -72,12 +89,12 @@ def test_number(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None : 1 } + baseline = {None: 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline, repn_to_dict(rep)) - + def test_var(self): # a m = ConcreteModel() @@ -86,12 +103,12 @@ def test_var(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -99,23 +116,23 @@ def test_var(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 1 } + baseline = {id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]) : 1 } + baseline = {id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) m.a.value = 3 m.a.fixed = True rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -123,17 +140,17 @@ def test_var(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None: 3 } + baseline = {None: 3} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -141,7 +158,7 @@ def test_var(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None: 3 } + baseline = {None: 3} self.assertEqual(baseline, repn_to_dict(rep)) self.assertTrue(rep.constant is m.a) @@ -155,12 +172,12 @@ def test_param(self): rep = generate_standard_repn(e) rep = generate_standard_repn(e, compute_values=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -168,13 +185,13 @@ def test_param(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None : -4 } + baseline = {None: -4} m.p.value = -4 self.assertEqual(baseline, repn_to_dict(rep)) - #s = pickle.dumps(rep) - #rep = pickle.loads(s) - #baseline = { None : m.p } - #self.assertEqual(baseline, repn_to_dict(rep)) + # s = pickle.dumps(rep) + # rep = pickle.loads(s) + # baseline = { None : m.p } + # self.assertEqual(baseline, repn_to_dict(rep)) def test_simplesum(self): # a + b @@ -182,15 +199,15 @@ def test_simplesum(self): m.a = Var() m.b = Var() e = m.a + m.b - + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -198,11 +215,11 @@ def test_simplesum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 1, id(m.b) : 1 } + baseline = {id(m.a): 1, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]) : 1, id(rep.linear_vars[1]) : 1 } + baseline = {id(rep.linear_vars[0]): 1, id(rep.linear_vars[1]): 1} self.assertEqual(baseline, repn_to_dict(rep)) def test_constsum(self): @@ -210,15 +227,15 @@ def test_constsum(self): m = ConcreteModel() m.a = Var() e = m.a + 5 - + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -226,26 +243,26 @@ def test_constsum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : 1 } + baseline = {None: 5, id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]) : 1 } + baseline = {None: 5, id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # 5 + a m = ConcreteModel() m.a = Var() e = 5 + m.a - + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -253,11 +270,11 @@ def test_constsum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : 1 } + baseline = {None: 5, id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]) : 1 } + baseline = {None: 5, id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) def test_paramsum(self): @@ -266,15 +283,15 @@ def test_paramsum(self): m.a = Var() m.p = Param(mutable=True, default=5) e = m.a + m.p - + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -282,11 +299,11 @@ def test_paramsum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : 1 } + baseline = {None: 5, id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]) : 1 } + baseline = {None: 5, id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # 5 + a @@ -294,15 +311,15 @@ def test_paramsum(self): m.a = Var() m.p = Param(mutable=True, default=5) e = m.p + m.a - + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -310,21 +327,21 @@ def test_paramsum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : 1 } + baseline = {None: 5, id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]) : 1 } + baseline = {None: 5, id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -332,7 +349,7 @@ def test_paramsum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : 1 } + baseline = {None: 5, id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) self.assertTrue(rep.constant is m.p) @@ -341,16 +358,16 @@ def test_paramprod1(self): m = ConcreteModel() m.a = Var() m.p = Param(mutable=True, default=5) - e = m.p*m.a - + e = m.p * m.a + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -358,21 +375,21 @@ def test_paramprod1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 5 } + baseline = {id(m.a): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]) : 5 } + baseline = {id(rep.linear_vars[0]): 5} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -380,7 +397,7 @@ def test_paramprod1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 5 } + baseline = {id(m.a): 5} self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0] is m.p) @@ -390,16 +407,16 @@ def test_paramprod2(self): m = ConcreteModel() m.a = Var() m.p = Param(mutable=True, default=0) - e = m.p*m.a - + e = m.p * m.a + rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -407,17 +424,17 @@ def test_paramprod2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -425,7 +442,7 @@ def test_paramprod2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 0 } + baseline = {id(m.a): 0} self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0] is m.p) @@ -437,16 +454,16 @@ def test_linear_sum1(self): m.y = Var() m.p = Param(mutable=True, default=1) m.q = Param(mutable=True, default=2) - e = m.p*m.x + m.q*m.y - + e = m.p * m.x + m.q * m.y + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -454,17 +471,17 @@ def test_linear_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x):1, id(m.y):2 } + baseline = {id(m.x): 1, id(m.y): 2} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -472,7 +489,7 @@ def test_linear_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x):1, id(m.y):2 } + baseline = {id(m.x): 1, id(m.y): 2} self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0] is m.p) @@ -484,16 +501,16 @@ def test_linear_sum2(self): m.A = Set(initialize=range(5)) m.x = Var(m.A) m.p = Param(m.A, mutable=True, default=1) - e = quicksum(m.p[i]*m.x[i] for i in m.A) - + e = quicksum(m.p[i] * m.x[i] for i in m.A) + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 5) self.assertTrue(len(rep.linear_coefs) == 5) @@ -501,17 +518,23 @@ def test_linear_sum2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):1, id(m.x[2]):1, id(m.x[3]):1, id(m.x[4]):1} + baseline = { + id(m.x[0]): 1, + id(m.x[1]): 1, + id(m.x[2]): 1, + id(m.x[3]): 1, + id(m.x[4]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 5) self.assertTrue(len(rep.linear_coefs) == 5) @@ -519,7 +542,13 @@ def test_linear_sum2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):1, id(m.x[2]):1, id(m.x[3]):1, id(m.x[4]):1} + baseline = { + id(m.x[0]): 1, + id(m.x[1]): 1, + id(m.x[2]): 1, + id(m.x[3]): 1, + id(m.x[4]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0] is m.p[0]) @@ -531,16 +560,16 @@ def test_linear_sum3(self): m.A = Set(initialize=range(5)) m.x = Var(m.A, initialize=3) m.p = Param(m.A, mutable=True, default=1) - e = quicksum((i+1)*m.x[i] for i in m.A) - + e = quicksum((i + 1) * m.x[i] for i in m.A) + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 5) self.assertTrue(len(rep.linear_coefs) == 5) @@ -548,19 +577,25 @@ def test_linear_sum3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):2, id(m.x[2]):3, id(m.x[3]):4, id(m.x[4]):5} + baseline = { + id(m.x[0]): 1, + id(m.x[1]): 2, + id(m.x[2]): 3, + id(m.x[3]): 4, + id(m.x[4]): 5, + } self.assertEqual(baseline, repn_to_dict(rep)) m.x[2].fixed = True rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -568,7 +603,7 @@ def test_linear_sum3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):2, None:9, id(m.x[3]):4, id(m.x[4]):5} + baseline = {id(m.x[0]): 1, id(m.x[1]): 2, None: 9, id(m.x[3]): 4, id(m.x[4]): 5} self.assertEqual(baseline, repn_to_dict(rep)) def test_linear_sum4(self): @@ -577,18 +612,18 @@ def test_linear_sum4(self): m.A = Set(initialize=range(5)) m.x = Var(m.A, initialize=3) m.p = Param(m.A, mutable=True, default=1) - e = quicksum(m.p[i]*m.x[i] for i in m.A) - + e = quicksum(m.p[i] * m.x[i] for i in m.A) + m.x[2].fixed = True rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -596,17 +631,17 @@ def test_linear_sum4(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):1, None:3, id(m.x[3]):1, id(m.x[4]):1} + baseline = {id(m.x[0]): 1, id(m.x[1]): 1, None: 3, id(m.x[3]): 1, id(m.x[4]): 1} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -614,7 +649,7 @@ def test_linear_sum4(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):1, None:3, id(m.x[3]):1, id(m.x[4]):1} + baseline = {id(m.x[0]): 1, id(m.x[1]): 1, None: 3, id(m.x[3]): 1, id(m.x[4]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0] is m.p[0]) @@ -627,18 +662,18 @@ def test_linear_sum5(self): m.A = Set(initialize=range(5)) m.x = Var(m.A, initialize=3) m.p = Param(m.A, mutable=True, default=1) - e = quicksum((m.p[i]*m.p[i])*m.x[i] for i in m.A) - + e = quicksum((m.p[i] * m.p[i]) * m.x[i] for i in m.A) + m.x[2].fixed = True rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -646,17 +681,17 @@ def test_linear_sum5(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):1, None:3, id(m.x[3]):1, id(m.x[4]):1} + baseline = {id(m.x[0]): 1, id(m.x[1]): 1, None: 3, id(m.x[3]): 1, id(m.x[4]): 1} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -664,7 +699,7 @@ def test_linear_sum5(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):1, id(m.x[1]):1, None:3, id(m.x[3]):1, id(m.x[4]):1} + baseline = {id(m.x[0]): 1, id(m.x[1]): 1, None: 3, id(m.x[3]): 1, id(m.x[4]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0].is_expression_type()) @@ -677,16 +712,18 @@ def test_linear_sum6(self): m.x = Var(m.A) m.p = Param(m.A, mutable=True, default=1) m.q = Param(m.A, mutable=True, default=2) - e = quicksum(m.p[i]*m.x[i] if i < 5 else m.q[i-5]*m.x[i-5] for i in range(10)) - + e = quicksum( + m.p[i] * m.x[i] if i < 5 else m.q[i - 5] * m.x[i - 5] for i in range(10) + ) + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 5) self.assertTrue(len(rep.linear_coefs) == 5) @@ -694,17 +731,23 @@ def test_linear_sum6(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):3, id(m.x[1]):3, id(m.x[2]):3, id(m.x[3]):3, id(m.x[4]):3} + baseline = { + id(m.x[0]): 3, + id(m.x[1]): 3, + id(m.x[2]): 3, + id(m.x[3]): 3, + id(m.x[4]): 3, + } self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 5) self.assertTrue(len(rep.linear_coefs) == 5) @@ -712,7 +755,13 @@ def test_linear_sum6(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):3, id(m.x[1]):3, id(m.x[2]):3, id(m.x[3]):3, id(m.x[4]):3} + baseline = { + id(m.x[0]): 3, + id(m.x[1]): 3, + id(m.x[2]): 3, + id(m.x[3]): 3, + id(m.x[4]): 3, + } self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0].is_expression_type()) @@ -723,17 +772,17 @@ def test_general_sum1(self): m.A = Set(initialize=range(3)) m.x = Var(m.A, initialize=2) m.p = Param(m.A, mutable=True, default=3) - e = sum(m.p[i]*m.x[i] for i in range(3)) + e = sum(m.p[i] * m.x[i] for i in range(3)) m.x[1].fixed = True - + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -741,17 +790,17 @@ def test_general_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):3, None:6, id(m.x[2]):3} + baseline = {id(m.x[0]): 3, None: 6, id(m.x[2]): 3} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -759,7 +808,7 @@ def test_general_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):3, None:6, id(m.x[2]):3} + baseline = {id(m.x[0]): 3, None: 6, id(m.x[2]): 3} self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0] is m.p[0]) @@ -770,17 +819,17 @@ def test_general_sum2(self): m.A = Set(initialize=range(3)) m.x = Var(m.A, initialize=2) m.p = Param(m.A, mutable=True, default=3) - e = sum(m.p[i]*m.x[i] if i!=1 else m.x[i] for i in range(3)) + e = sum(m.p[i] * m.x[i] if i != 1 else m.x[i] for i in range(3)) m.x[1].fixed = True - + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -788,17 +837,17 @@ def test_general_sum2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):3, None:2, id(m.x[2]):3} + baseline = {id(m.x[0]): 3, None: 2, id(m.x[2]): 3} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -806,7 +855,7 @@ def test_general_sum2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):3, None:2, id(m.x[2]):3} + baseline = {id(m.x[0]): 3, None: 2, id(m.x[2]): 3} self.assertEqual(baseline, repn_to_dict(rep)) # self.assertTrue(rep.linear_coefs[0] is m.p[0]) @@ -817,16 +866,16 @@ def test_general_sum3(self): m.A = Set(initialize=range(3)) m.x = Var(m.A, initialize=2) m.p = Param(m.A, mutable=True, default=3) - e = sum(m.p[i]*m.x[i] if i<3 else m.x[i-3] for i in range(6)) - + e = sum(m.p[i] * m.x[i] if i < 3 else m.x[i - 3] for i in range(6)) + rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -834,17 +883,17 @@ def test_general_sum3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):4, id(m.x[1]):4, id(m.x[2]):4} + baseline = {id(m.x[0]): 4, id(m.x[1]): 4, id(m.x[2]): 4} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -852,7 +901,7 @@ def test_general_sum3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.x[0]):4, id(m.x[1]):4, id(m.x[2]):4} + baseline = {id(m.x[0]): 4, id(m.x[1]): 4, id(m.x[2]): 4} self.assertEqual(baseline, repn_to_dict(rep)) def test_nestedSum(self): @@ -875,12 +924,12 @@ def test_nestedSum(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -888,15 +937,15 @@ def test_nestedSum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : 1, id(m.b) : 1 } + baseline = {None: 5, id(m.a): 1, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):1 } + baseline = {None: 5, id(rep.linear_vars[0]): 1, id(rep.linear_vars[1]): 1} self.assertEqual(baseline, repn_to_dict(rep)) - # + - # / \ + # + + # / \ # 5 + # / \ # a b @@ -905,12 +954,12 @@ def test_nestedSum(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -918,11 +967,11 @@ def test_nestedSum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : 1, id(m.b) : 1 } + baseline = {None: 5, id(m.a): 1, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):1 } + baseline = {None: 5, id(rep.linear_vars[0]): 1, id(rep.linear_vars[1]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # + @@ -935,12 +984,12 @@ def test_nestedSum(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -948,15 +997,19 @@ def test_nestedSum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 1, id(m.b) : 1, id(m.c) : 1 } + baseline = {id(m.a): 1, id(m.b): 1, id(m.c): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):1, id(rep.linear_vars[2]):1 } + baseline = { + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): 1, + id(rep.linear_vars[2]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) - # + - # / \ + # + + # / \ # c + # / \ # a b @@ -965,12 +1018,12 @@ def test_nestedSum(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -978,11 +1031,15 @@ def test_nestedSum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 1, id(m.b) : 1, id(m.c) : 1 } + baseline = {id(m.a): 1, id(m.b): 1, id(m.c): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):1, id(rep.linear_vars[2]):1 } + baseline = { + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): 1, + id(rep.linear_vars[2]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) # + @@ -996,12 +1053,12 @@ def test_nestedSum(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -1009,11 +1066,16 @@ def test_nestedSum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 1, id(m.b) : 1, id(m.c) : 1, id(m.d) : 1 } + baseline = {id(m.a): 1, id(m.b): 1, id(m.c): 1, id(m.d): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):1, id(rep.linear_vars[2]):1, id(rep.linear_vars[3]):1 } + baseline = { + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): 1, + id(rep.linear_vars[2]): 1, + id(rep.linear_vars[3]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) def test_sumOf_nestedTrivialProduct(self): @@ -1035,12 +1097,12 @@ def test_sumOf_nestedTrivialProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1048,11 +1110,11 @@ def test_sumOf_nestedTrivialProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 5, id(m.b) : 1 } + baseline = {id(m.a): 5, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]) : 5, id(rep.linear_vars[1]) : 1 } + baseline = {id(rep.linear_vars[0]): 5, id(rep.linear_vars[1]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # + @@ -1064,12 +1126,12 @@ def test_sumOf_nestedTrivialProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1077,11 +1139,11 @@ def test_sumOf_nestedTrivialProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 5, id(m.b) : 1 } + baseline = {id(m.a): 5, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[1]) : 5, id(rep.linear_vars[0]) : 1 } + baseline = {id(rep.linear_vars[1]): 5, id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # + @@ -1094,12 +1156,12 @@ def test_sumOf_nestedTrivialProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -1107,11 +1169,15 @@ def test_sumOf_nestedTrivialProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 5, id(m.b) : 1, id(m.c) : 1 } + baseline = {id(m.a): 5, id(m.b): 1, id(m.c): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[2]) : 5, id(rep.linear_vars[0]) : 1, id(rep.linear_vars[1]):1 } + baseline = { + id(rep.linear_vars[2]): 5, + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) # + @@ -1124,12 +1190,12 @@ def test_sumOf_nestedTrivialProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -1137,11 +1203,15 @@ def test_sumOf_nestedTrivialProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 5, id(m.b) : 1, id(m.c) : 1 } + baseline = {id(m.a): 5, id(m.b): 1, id(m.c): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[2]) : 5, id(rep.linear_vars[0]) : 1, id(rep.linear_vars[1]):1 } + baseline = { + id(rep.linear_vars[2]): 5, + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) # + @@ -1154,12 +1224,12 @@ def test_sumOf_nestedTrivialProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1167,11 +1237,11 @@ def test_sumOf_nestedTrivialProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 5, id(m.b) : 5 } + baseline = {id(m.a): 5, id(m.b): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):5, id(rep.linear_vars[1]):5 } + baseline = {id(rep.linear_vars[0]): 5, id(rep.linear_vars[1]): 5} self.assertEqual(baseline, repn_to_dict(rep)) def test_negation(self): @@ -1180,16 +1250,16 @@ def test_negation(self): # a m = ConcreteModel() m.a = Var() - e = - m.a + e = -m.a rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1197,11 +1267,11 @@ def test_negation(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : -1 } + baseline = {id(m.a): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]) : -1 } + baseline = {id(rep.linear_vars[0]): -1} self.assertEqual(baseline, repn_to_dict(rep)) def test_simpleDiff(self): @@ -1215,12 +1285,12 @@ def test_simpleDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1228,11 +1298,11 @@ def test_simpleDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a) : 1, id(m.b) : -1 } + baseline = {id(m.a): 1, id(m.b): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]) : 1, id(rep.linear_vars[1]) : -1 } + baseline = {id(rep.linear_vars[0]): 1, id(rep.linear_vars[1]): -1} self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1242,12 +1312,12 @@ def test_simpleDiff(self): rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertEqual(len(rep.linear_vars), 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -1255,7 +1325,7 @@ def test_simpleDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) @@ -1271,12 +1341,12 @@ def test_constDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1284,11 +1354,11 @@ def test_constDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:-5, id(m.a) : 1 } + baseline = {None: -5, id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:-5, id(rep.linear_vars[0]) : 1 } + baseline = {None: -5, id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1298,12 +1368,12 @@ def test_constDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1311,11 +1381,11 @@ def test_constDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a) : -1 } + baseline = {None: 5, id(m.a): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]):-1 } + baseline = {None: 5, id(rep.linear_vars[0]): -1} self.assertEqual(baseline, repn_to_dict(rep)) def test_nestedDiff(self): @@ -1338,12 +1408,12 @@ def test_nestedDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1351,11 +1421,11 @@ def test_nestedDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:-5, id(m.a):1, id(m.b):-1 } + baseline = {None: -5, id(m.a): 1, id(m.b): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:-5, id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):-1 } + baseline = {None: -5, id(rep.linear_vars[0]): 1, id(rep.linear_vars[1]): -1} self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1368,12 +1438,12 @@ def test_nestedDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1381,11 +1451,11 @@ def test_nestedDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:5, id(m.a):-1, id(m.b):1 } + baseline = {None: 5, id(m.a): -1, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:5, id(rep.linear_vars[0]):-1, id(rep.linear_vars[1]):1 } + baseline = {None: 5, id(rep.linear_vars[0]): -1, id(rep.linear_vars[1]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1398,12 +1468,12 @@ def test_nestedDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -1411,11 +1481,15 @@ def test_nestedDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):1, id(m.b):-1, id(m.c):-1 } + baseline = {id(m.a): 1, id(m.b): -1, id(m.c): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):-1, id(rep.linear_vars[2]):-1 } + baseline = { + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): -1, + id(rep.linear_vars[2]): -1, + } self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1428,12 +1502,12 @@ def test_nestedDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -1441,11 +1515,15 @@ def test_nestedDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):-1, id(m.b):1, id(m.c):1 } + baseline = {id(m.a): -1, id(m.b): 1, id(m.c): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[1]):-1, id(rep.linear_vars[0]):1, id(rep.linear_vars[2]):1 } + baseline = { + id(rep.linear_vars[1]): -1, + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[2]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1459,12 +1537,12 @@ def test_nestedDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -1472,11 +1550,16 @@ def test_nestedDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):1, id(m.b):-1, id(m.c):-1, id(m.d):1 } + baseline = {id(m.a): 1, id(m.b): -1, id(m.c): -1, id(m.d): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):-1, id(rep.linear_vars[2]):-1, id(rep.linear_vars[3]):1 } + baseline = { + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): -1, + id(rep.linear_vars[2]): -1, + id(rep.linear_vars[3]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1490,12 +1573,12 @@ def test_nestedDiff(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 4) self.assertTrue(len(rep.linear_coefs) == 4) @@ -1503,11 +1586,16 @@ def test_nestedDiff(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):-1, id(m.b):1, id(m.c):1, id(m.d):-1 } + baseline = {id(m.a): -1, id(m.b): 1, id(m.c): 1, id(m.d): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[2]):-1, id(rep.linear_vars[3]):1, id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):-1 } + baseline = { + id(rep.linear_vars[2]): -1, + id(rep.linear_vars[3]): 1, + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): -1, + } self.assertEqual(baseline, repn_to_dict(rep)) def test_sumOf_nestedTrivialProduct2(self): @@ -1529,12 +1617,12 @@ def test_sumOf_nestedTrivialProduct2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1542,11 +1630,11 @@ def test_sumOf_nestedTrivialProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):5, id(m.b):-1 } + baseline = {id(m.a): 5, id(m.b): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):5, id(rep.linear_vars[1]):-1 } + baseline = {id(rep.linear_vars[0]): 5, id(rep.linear_vars[1]): -1} self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1559,12 +1647,12 @@ def test_sumOf_nestedTrivialProduct2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1572,11 +1660,11 @@ def test_sumOf_nestedTrivialProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):-5, id(m.b):1 } + baseline = {id(m.a): -5, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[1]):-5, id(rep.linear_vars[0]):1 } + baseline = {id(rep.linear_vars[1]): -5, id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1596,11 +1684,15 @@ def test_sumOf_nestedTrivialProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):5, id(m.b):-1, id(m.c):1 } + baseline = {id(m.a): 5, id(m.b): -1, id(m.c): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):5, id(rep.linear_vars[1]):-1, id(rep.linear_vars[2]):1 } + baseline = { + id(rep.linear_vars[0]): 5, + id(rep.linear_vars[1]): -1, + id(rep.linear_vars[2]): 1, + } self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1614,12 +1706,12 @@ def test_sumOf_nestedTrivialProduct2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 3) self.assertTrue(len(rep.linear_coefs) == 3) @@ -1627,11 +1719,15 @@ def test_sumOf_nestedTrivialProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):-5, id(m.b):1, id(m.c):-1 } + baseline = {id(m.a): -5, id(m.b): 1, id(m.c): -1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[2]):-5, id(rep.linear_vars[0]):1, id(rep.linear_vars[1]):-1 } + baseline = { + id(rep.linear_vars[2]): -5, + id(rep.linear_vars[0]): 1, + id(rep.linear_vars[1]): -1, + } self.assertEqual(baseline, repn_to_dict(rep)) # - @@ -1639,16 +1735,16 @@ def test_sumOf_nestedTrivialProduct2(self): # - # / \ # a b - e = - (m.a - m.b) + e = -(m.a - m.b) rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1656,11 +1752,11 @@ def test_sumOf_nestedTrivialProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):-1, id(m.b):1 } + baseline = {id(m.a): -1, id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):-1, id(rep.linear_vars[1]):1 } + baseline = {id(rep.linear_vars[0]): -1, id(rep.linear_vars[1]): 1} self.assertEqual(baseline, repn_to_dict(rep)) def test_simpleProduct1(self): @@ -1674,12 +1770,12 @@ def test_simpleProduct1(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1687,11 +1783,11 @@ def test_simpleProduct1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):2 } + baseline = {id(m.a): 2} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):2 } + baseline = {id(rep.linear_vars[0]): 2} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -1701,12 +1797,12 @@ def test_simpleProduct1(self): rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -1714,7 +1810,7 @@ def test_simpleProduct1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) @@ -1730,12 +1826,12 @@ def test_simpleProduct2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1743,11 +1839,11 @@ def test_simpleProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):5 } + baseline = {id(m.a): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):5 } + baseline = {id(rep.linear_vars[0]): 5} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -1757,12 +1853,12 @@ def test_simpleProduct2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1770,11 +1866,11 @@ def test_simpleProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):5 } + baseline = {id(m.a): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):5 } + baseline = {id(rep.linear_vars[0]): 5} self.assertEqual(baseline, repn_to_dict(rep)) def test_nestedProduct(self): @@ -1794,12 +1890,12 @@ def test_nestedProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1807,11 +1903,11 @@ def test_nestedProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):10 } + baseline = {id(m.a): 10} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):10 } + baseline = {id(rep.linear_vars[0]): 10} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -1824,12 +1920,12 @@ def test_nestedProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1837,11 +1933,11 @@ def test_nestedProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):10 } + baseline = {id(m.a): 10} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):10 } + baseline = {id(rep.linear_vars[0]): 10} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -1855,12 +1951,12 @@ def test_nestedProduct(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1868,11 +1964,11 @@ def test_nestedProduct(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):42 } + baseline = {id(m.a): 42} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):42 } + baseline = {id(rep.linear_vars[0]): 42} self.assertEqual(baseline, repn_to_dict(rep)) def test_nestedProduct2(self): @@ -1901,12 +1997,12 @@ def test_nestedProduct2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1914,11 +2010,11 @@ def test_nestedProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:50, id(m.d):10 } + baseline = {None: 50, id(m.d): 10} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { None:50, id(rep.linear_vars[0]):10 } + baseline = {None: 50, id(rep.linear_vars[0]): 10} self.assertEqual(baseline, repn_to_dict(rep)) # @@ -1938,12 +2034,12 @@ def test_nestedProduct2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -1951,11 +2047,11 @@ def test_nestedProduct2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.d):125 } + baseline = {id(m.d): 125} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):125 } + baseline = {id(rep.linear_vars[0]): 125} self.assertEqual(baseline, repn_to_dict(rep)) def test_division(self): @@ -1971,16 +2067,16 @@ def test_division(self): m.y = Var(initialize=2.0) m.y.fixed = True - e = (m.a + m.b)/2.0 + e = (m.a + m.b) / 2.0 rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -1988,11 +2084,11 @@ def test_division(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):0.5, id(m.b):0.5 } + baseline = {id(m.a): 0.5, id(m.b): 0.5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):0.5, id(rep.linear_vars[1]):0.5 } + baseline = {id(rep.linear_vars[0]): 0.5, id(rep.linear_vars[1]): 0.5} self.assertEqual(baseline, repn_to_dict(rep)) # / @@ -2000,16 +2096,16 @@ def test_division(self): # + y # / \ # a b - e = (m.a + m.b)/m.y + e = (m.a + m.b) / m.y rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2017,11 +2113,11 @@ def test_division(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):0.5, id(m.b):0.5 } + baseline = {id(m.a): 0.5, id(m.b): 0.5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):0.5, id(rep.linear_vars[1]):0.5 } + baseline = {id(rep.linear_vars[0]): 0.5, id(rep.linear_vars[1]): 0.5} self.assertEqual(baseline, repn_to_dict(rep)) # / @@ -2029,16 +2125,16 @@ def test_division(self): # + + # / \ / \ # a b y 2 - e = (m.a + m.b)/(m.y+2) + e = (m.a + m.b) / (m.y + 2) rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2046,11 +2142,11 @@ def test_division(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):0.25, id(m.b):0.25 } + baseline = {id(m.a): 0.25, id(m.b): 0.25} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):0.25, id(rep.linear_vars[1]):0.25 } + baseline = {id(rep.linear_vars[0]): 0.25, id(rep.linear_vars[1]): 0.25} self.assertEqual(baseline, repn_to_dict(rep)) def test_weighted_sum1(self): @@ -2070,12 +2166,12 @@ def test_weighted_sum1(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2083,11 +2179,11 @@ def test_weighted_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):5, id(m.b):5 } + baseline = {id(m.a): 5, id(m.b): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):5, id(rep.linear_vars[1]):5 } + baseline = {id(rep.linear_vars[0]): 5, id(rep.linear_vars[1]): 5} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -2100,12 +2196,12 @@ def test_weighted_sum1(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2113,11 +2209,11 @@ def test_weighted_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):5, id(m.b):5 } + baseline = {id(m.a): 5, id(m.b): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):5, id(rep.linear_vars[1]):5 } + baseline = {id(rep.linear_vars[0]): 5, id(rep.linear_vars[1]): 5} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -2128,16 +2224,16 @@ def test_weighted_sum1(self): # / \ # a b e1 = m.a + m.b - e = 5 * 2* e1 + e = 5 * 2 * e1 rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2145,24 +2241,24 @@ def test_weighted_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):10, id(m.b):10 } + baseline = {id(m.a): 10, id(m.b): 10} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):10, id(rep.linear_vars[1]):10 } + baseline = {id(rep.linear_vars[0]): 10, id(rep.linear_vars[1]): 10} self.assertEqual(baseline, repn_to_dict(rep)) # 5(a+2(a+b)) - e = 5*(m.a+2*(m.a+m.b)) + e = 5 * (m.a + 2 * (m.a + m.b)) rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2170,11 +2266,11 @@ def test_weighted_sum1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):15, id(m.b):10 } + baseline = {id(m.a): 15, id(m.b): 10} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):15, id(rep.linear_vars[1]):10 } + baseline = {id(rep.linear_vars[0]): 15, id(rep.linear_vars[1]): 10} self.assertEqual(baseline, repn_to_dict(rep)) def test_quadratic1(self): @@ -2184,7 +2280,7 @@ def test_quadratic1(self): m.c = Var() m.d = Var() - ab_key = (id(m.a),id(m.b)) if id(m.a) <= id(m.b) else (id(m.b),id(m.a)) + ab_key = (id(m.a), id(m.b)) if id(m.a) <= id(m.b) else (id(m.b), id(m.a)) # * # / \ @@ -2196,12 +2292,12 @@ def test_quadratic1(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2209,14 +2305,14 @@ def test_quadratic1(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:5 } + baseline = {ab_key: 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) if id(rep.quadratic_vars[0][0]) < id(rep.quadratic_vars[0][1]): - baseline = { (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])):5 } + baseline = {(id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 5} else: - baseline = { (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])):5 } + baseline = {(id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])): 5} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -2229,12 +2325,12 @@ def test_quadratic1(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2242,14 +2338,14 @@ def test_quadratic1(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:5 } + baseline = {ab_key: 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) if id(rep.quadratic_vars[0][0]) < id(rep.quadratic_vars[0][1]): - baseline = { (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])):5 } + baseline = {(id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 5} else: - baseline = { (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])):5 } + baseline = {(id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])): 5} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -2258,16 +2354,16 @@ def test_quadratic1(self): # / \ # a b e1 = m.a * m.b - e = 5*e1 + e = 5 * e1 rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2275,14 +2371,14 @@ def test_quadratic1(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:5 } + baseline = {ab_key: 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) if id(rep.quadratic_vars[0][0]) < id(rep.quadratic_vars[0][1]): - baseline = { (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])):5 } + baseline = {(id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 5} else: - baseline = { (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])):5 } + baseline = {(id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])): 5} self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -2291,16 +2387,16 @@ def test_quadratic1(self): # / \ # a 5 e1 = m.a * 5 - e = m.b*e1 + e = m.b * e1 rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2308,17 +2404,16 @@ def test_quadratic1(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:5 } + baseline = {ab_key: 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) if id(rep.quadratic_vars[0][0]) < id(rep.quadratic_vars[0][1]): - baseline = { (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])):5 } + baseline = {(id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 5} else: - baseline = { (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])):5 } + baseline = {(id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])): 5} self.assertEqual(baseline, repn_to_dict(rep)) - def test_quadratic2(self): m = ConcreteModel() m.a = Var() @@ -2326,9 +2421,9 @@ def test_quadratic2(self): m.c = Var() m.d = Var() - ab_key = (id(m.a),id(m.b)) if id(m.a) <= id(m.b) else (id(m.b),id(m.a)) - ac_key = (id(m.a),id(m.c)) if id(m.a) <= id(m.c) else (id(m.c),id(m.a)) - bc_key = (id(m.b),id(m.c)) if id(m.b) <= id(m.c) else (id(m.c),id(m.b)) + ab_key = (id(m.a), id(m.b)) if id(m.a) <= id(m.b) else (id(m.b), id(m.a)) + ac_key = (id(m.a), id(m.c)) if id(m.a) <= id(m.c) else (id(m.c), id(m.a)) + bc_key = (id(m.b), id(m.c)) if id(m.b) <= id(m.c) else (id(m.c), id(m.b)) # * # / \ @@ -2341,12 +2436,12 @@ def test_quadratic2(self): # Collect quadratics rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -2354,25 +2449,31 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:1, id(m.b):5 } + baseline = {ab_key: 1, id(m.b): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) if id(rep.quadratic_vars[0][0]) < id(rep.quadratic_vars[0][1]): - baseline = { (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])):1, id(rep.linear_vars[0]):5 } + baseline = { + (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 1, + id(rep.linear_vars[0]): 5, + } else: - baseline = { (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])):1, id(rep.linear_vars[0]):5 } + baseline = { + (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])): 1, + id(rep.linear_vars[0]): 5, + } self.assertEqual(baseline, repn_to_dict(rep)) # Do not collect quadratics rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2380,10 +2481,12 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertEqual(len(rep.nonlinear_vars), 2) - baseline1 = { } + baseline1 = {} self.assertEqual(baseline1, repn_to_dict(rep)) - baseline2 = set([ id(m.a), id(m.b) ]) - self.assertEqual(baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline2 = set([id(m.a), id(m.b)]) + self.assertEqual( + baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline1, repn_to_dict(rep)) @@ -2399,12 +2502,12 @@ def test_quadratic2(self): # Collect quadratics rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -2412,25 +2515,31 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:1, id(m.b):5 } + baseline = {ab_key: 1, id(m.b): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) if id(rep.quadratic_vars[0][0]) < id(rep.quadratic_vars[0][1]): - baseline = { (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])):1, id(rep.linear_vars[0]):5 } + baseline = { + (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 1, + id(rep.linear_vars[0]): 5, + } else: - baseline = { (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])):1, id(rep.linear_vars[0]):5 } + baseline = { + (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])): 1, + id(rep.linear_vars[0]): 5, + } self.assertEqual(baseline, repn_to_dict(rep)) # Do not collect quadratics rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2438,10 +2547,12 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertEqual(len(rep.nonlinear_vars), 2) - baseline1 = { } + baseline1 = {} self.assertEqual(baseline1, repn_to_dict(rep)) - baseline2 = set([ id(m.a), id(m.b) ]) - self.assertEqual(baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline2 = set([id(m.a), id(m.b)]) + self.assertEqual( + baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline1, repn_to_dict(rep)) @@ -2456,12 +2567,12 @@ def test_quadratic2(self): # Collect quadratics rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2469,13 +2580,26 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 2) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:1, ac_key:1, id(m.b):5, id(m.c):5 } + baseline = {ab_key: 1, ac_key: 1, id(m.b): 5, id(m.c): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - ab_key_ = (id(rep.quadratic_vars[0][0]),id(rep.quadratic_vars[0][1])) if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) else (id(rep.quadratic_vars[0][1]),id(rep.quadratic_vars[0][0])) - ac_key_ = (id(rep.quadratic_vars[1][0]),id(rep.quadratic_vars[1][1])) if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) else (id(rep.quadratic_vars[1][1]),id(rep.quadratic_vars[1][0])) - baseline = { ab_key_:1, ac_key_:1, id(rep.linear_vars[0]):5, id(rep.linear_vars[1]):5 } + ab_key_ = ( + (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])) + if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) + else (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])) + ) + ac_key_ = ( + (id(rep.quadratic_vars[1][0]), id(rep.quadratic_vars[1][1])) + if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) + else (id(rep.quadratic_vars[1][1]), id(rep.quadratic_vars[1][0])) + ) + baseline = { + ab_key_: 1, + ac_key_: 1, + id(rep.linear_vars[0]): 5, + id(rep.linear_vars[1]): 5, + } self.assertEqual(baseline, repn_to_dict(rep)) # * @@ -2488,12 +2612,12 @@ def test_quadratic2(self): # Collect quadratics rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 2) self.assertTrue(len(rep.linear_coefs) == 2) @@ -2501,24 +2625,37 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 2) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:1, ac_key:1, id(m.b):5, id(m.c):5 } + baseline = {ab_key: 1, ac_key: 1, id(m.b): 5, id(m.c): 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - ab_key_ = (id(rep.quadratic_vars[0][0]),id(rep.quadratic_vars[0][1])) if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) else (id(rep.quadratic_vars[0][1]),id(rep.quadratic_vars[0][0])) - ac_key_ = (id(rep.quadratic_vars[1][0]),id(rep.quadratic_vars[1][1])) if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) else (id(rep.quadratic_vars[1][1]),id(rep.quadratic_vars[1][0])) - baseline = { ab_key_:1, ac_key_:1, id(rep.linear_vars[0]):5, id(rep.linear_vars[1]):5 } + ab_key_ = ( + (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])) + if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) + else (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])) + ) + ac_key_ = ( + (id(rep.quadratic_vars[1][0]), id(rep.quadratic_vars[1][1])) + if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) + else (id(rep.quadratic_vars[1][1]), id(rep.quadratic_vars[1][0])) + ) + baseline = { + ab_key_: 1, + ac_key_: 1, + id(rep.linear_vars[0]): 5, + id(rep.linear_vars[1]): 5, + } self.assertEqual(baseline, repn_to_dict(rep)) # Do not collect quadratics rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2526,10 +2663,12 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertEqual(len(rep.nonlinear_vars), 3) - baseline1 = { } + baseline1 = {} self.assertEqual(baseline1, repn_to_dict(rep)) - baseline2 = set([ id(m.a), id(m.b), id(m.c) ]) - self.assertEqual(baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline2 = set([id(m.a), id(m.b), id(m.c)]) + self.assertEqual( + baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline1, repn_to_dict(rep)) @@ -2544,12 +2683,12 @@ def test_quadratic2(self): # Collect quadratics rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2557,24 +2696,32 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 2) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:5, ac_key:5 } + baseline = {ab_key: 5, ac_key: 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - ab_key_ = (id(rep.quadratic_vars[0][0]),id(rep.quadratic_vars[0][1])) if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) else (id(rep.quadratic_vars[0][1]),id(rep.quadratic_vars[0][0])) - ac_key_ = (id(rep.quadratic_vars[1][0]),id(rep.quadratic_vars[1][1])) if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) else (id(rep.quadratic_vars[1][1]),id(rep.quadratic_vars[1][0])) - baseline = { ab_key_:5, ac_key_:5 } + ab_key_ = ( + (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])) + if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) + else (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])) + ) + ac_key_ = ( + (id(rep.quadratic_vars[1][0]), id(rep.quadratic_vars[1][1])) + if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) + else (id(rep.quadratic_vars[1][1]), id(rep.quadratic_vars[1][0])) + ) + baseline = {ab_key_: 5, ac_key_: 5} self.assertEqual(baseline, repn_to_dict(rep)) # Do not collect quadratics rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2582,10 +2729,12 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertEqual(len(rep.nonlinear_vars), 3) - baseline1 = { } + baseline1 = {} self.assertEqual(baseline1, repn_to_dict(rep)) - baseline2 = set([ id(m.a), id(m.b), id(m.c) ]) - self.assertEqual(baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline2 = set([id(m.a), id(m.b), id(m.c)]) + self.assertEqual( + baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline1, repn_to_dict(rep)) @@ -2599,12 +2748,12 @@ def test_quadratic2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2612,24 +2761,32 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 2) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { ab_key:5, ac_key:5 } + baseline = {ab_key: 5, ac_key: 5} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - ab_key_ = (id(rep.quadratic_vars[0][0]),id(rep.quadratic_vars[0][1])) if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) else (id(rep.quadratic_vars[0][1]),id(rep.quadratic_vars[0][0])) - ac_key_ = (id(rep.quadratic_vars[1][0]),id(rep.quadratic_vars[1][1])) if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) else (id(rep.quadratic_vars[1][1]),id(rep.quadratic_vars[1][0])) - baseline = { ab_key_:5, ac_key_:5 } + ab_key_ = ( + (id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])) + if id(rep.quadratic_vars[0][0]) <= id(rep.quadratic_vars[0][1]) + else (id(rep.quadratic_vars[0][1]), id(rep.quadratic_vars[0][0])) + ) + ac_key_ = ( + (id(rep.quadratic_vars[1][0]), id(rep.quadratic_vars[1][1])) + if id(rep.quadratic_vars[1][0]) <= id(rep.quadratic_vars[1][1]) + else (id(rep.quadratic_vars[1][1]), id(rep.quadratic_vars[1][0])) + ) + baseline = {ab_key_: 5, ac_key_: 5} self.assertEqual(baseline, repn_to_dict(rep)) # Do not collect quadratics rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2637,10 +2794,12 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertEqual(len(rep.nonlinear_vars), 3) - baseline1 = { } + baseline1 = {} self.assertEqual(baseline1, repn_to_dict(rep)) - baseline2 = set([ id(m.a), id(m.b), id(m.c) ]) - self.assertEqual(baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline2 = set([id(m.a), id(m.b), id(m.c)]) + self.assertEqual( + baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline1, repn_to_dict(rep)) @@ -2654,12 +2813,12 @@ def test_quadratic2(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2677,12 +2836,12 @@ def test_quadratic2(self): # Do not collect quadratics rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2690,10 +2849,12 @@ def test_quadratic2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertEqual(len(rep.nonlinear_vars), 3) - baseline1 = { } + baseline1 = {} self.assertEqual(baseline1, repn_to_dict(rep)) - baseline2 = set([ id(m.a), id(m.b), id(m.c) ]) - self.assertEqual(baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline2 = set([id(m.a), id(m.b), id(m.c)]) + self.assertEqual( + baseline2, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline1, repn_to_dict(rep)) @@ -2709,16 +2870,16 @@ def test_pow(self): m.q = Param(default=1) m.r = Param(default=2) - e = m.a ** 0 + e = m.a**0 rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2726,7 +2887,7 @@ def test_pow(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1 } + baseline = {None: 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) @@ -2735,16 +2896,16 @@ def test_pow(self): # ^ # / \ # a 1 - e = m.a ** 1 + e = m.a**1 rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -2752,26 +2913,26 @@ def test_pow(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):1 } + baseline = {id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1 } + baseline = {id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # ^ # / \ # a 2 - e = m.a ** 2 + e = m.a**2 rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2779,26 +2940,26 @@ def test_pow(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { (id(m.a),id(m.a)):1 } + baseline = {(id(m.a), id(m.a)): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { (id(rep.quadratic_vars[0][0]),id(rep.quadratic_vars[0][1])):1 } + baseline = {(id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 1} self.assertEqual(baseline, repn_to_dict(rep)) # ^ # / \ # a r - e = m.a ** m.r + e = m.a**m.r rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2806,26 +2967,26 @@ def test_pow(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { (id(m.a),id(m.a)):1 } + baseline = {(id(m.a), id(m.a)): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { (id(rep.quadratic_vars[0][0]),id(rep.quadratic_vars[0][1])):1 } + baseline = {(id(rep.quadratic_vars[0][0]), id(rep.quadratic_vars[0][1])): 1} self.assertEqual(baseline, repn_to_dict(rep)) # ^ # / \ # a 2 - e = m.a ** 2 + e = m.a**2 rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2833,26 +2994,30 @@ def test_pow(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = set([ id(m.a) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(m.a)]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = set([ id(rep.nonlinear_vars[0]) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(rep.nonlinear_vars[0])]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) # ^ # / \ # a m.r - e = m.a ** m.r + e = m.a**m.r rep = generate_standard_repn(e, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2860,26 +3025,30 @@ def test_pow(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = set([ id(m.a) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(m.a)]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = set([ id(rep.nonlinear_vars[0]) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(rep.nonlinear_vars[0])]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) # ^ # / \ # a q - e = m.a ** m.q + e = m.a**m.q rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -2887,11 +3056,11 @@ def test_pow(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):1 } + baseline = {id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1 } + baseline = {id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) def test_pow2(self): @@ -2903,16 +3072,16 @@ def test_pow2(self): m.p = Param(default=3) m.a.fixed = True - e = m.p*m.a**2 + e = m.p * m.a**2 rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2932,12 +3101,12 @@ def test_pow3(self): rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2945,17 +3114,17 @@ def test_pow3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=True, quadratic=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2963,19 +3132,19 @@ def test_pow3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1 } + baseline = {None: 1} self.assertEqual(baseline, repn_to_dict(rep)) m.p.value = 1 rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -2983,17 +3152,17 @@ def test_pow3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=True, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3001,7 +3170,7 @@ def test_pow3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):1 } + baseline = {id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) def test_pow4(self): @@ -3018,12 +3187,12 @@ def test_pow4(self): rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3031,17 +3200,17 @@ def test_pow4(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1 } + baseline = {None: 1} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=True, quadratic=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3049,19 +3218,19 @@ def test_pow4(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1 } + baseline = {None: 1} self.assertEqual(baseline, repn_to_dict(rep)) m.b.fixed = False rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3069,7 +3238,7 @@ def test_pow4(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) def test_pow5(self): @@ -3077,16 +3246,16 @@ def test_pow5(self): m.a = Var(initialize=2) m.b = Var(initialize=2) - e = sin(m.a)**2 + e = sin(m.a) ** 2 rep = generate_standard_repn(e, compute_values=False, quadratic=True) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3094,19 +3263,19 @@ def test_pow5(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) - e = (m.a**2)**2 + e = (m.a**2) ** 2 rep = generate_standard_repn(e, compute_values=False, quadratic=True) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3114,19 +3283,19 @@ def test_pow5(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) - e = (m.a+m.b)**2 + e = (m.a + m.b) ** 2 rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3134,17 +3303,17 @@ def test_pow5(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 2) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False, quadratic=True) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3152,8 +3321,7 @@ def test_pow5(self): self.assertTrue(len(rep.quadratic_coefs) == 3) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { (id(m.a), id(m.a)): 1, - (id(m.b), id(m.b)): 1 } + baseline = {(id(m.a), id(m.a)): 1, (id(m.b), id(m.b)): 1} if id(m.a) < id(m.b): baseline[id(m.a), id(m.b)] = 2 else: @@ -3161,16 +3329,16 @@ def test_pow5(self): self.assertEqual(baseline, repn_to_dict(rep)) - e = (m.a+3)**2 + e = (m.a + 3) ** 2 rep = generate_standard_repn(e, compute_values=False, quadratic=True) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3178,17 +3346,17 @@ def test_pow5(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:9, id(m.a):6, (id(m.a),id(m.a)):1} + baseline = {None: 9, id(m.a): 6, (id(m.a), id(m.a)): 1} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=True, quadratic=True) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3196,19 +3364,19 @@ def test_pow5(self): self.assertTrue(len(rep.quadratic_coefs) == 1) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:9, id(m.a):6, (id(m.a),id(m.a)):1 } + baseline = {None: 9, id(m.a): 6, (id(m.a), id(m.a)): 1} self.assertEqual(baseline, repn_to_dict(rep)) m.a.fixed = True rep = generate_standard_repn(e, compute_values=True, quadratic=True) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3216,7 +3384,7 @@ def test_pow5(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:25 } + baseline = {None: 25} self.assertEqual(baseline, repn_to_dict(rep)) def test_pow6(self): @@ -3227,12 +3395,12 @@ def test_pow6(self): rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3240,19 +3408,19 @@ def test_pow6(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) - m.a.fixed=True + m.a.fixed = True rep = generate_standard_repn(e, compute_values=True, quadratic=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3260,22 +3428,22 @@ def test_pow6(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:8 } + baseline = {None: 8} self.assertEqual(baseline, repn_to_dict(rep)) def test_pow_of_lin_sum(self): m = ConcreteModel() m.x = Var(range(4)) - e = sum(x for x in m.x.values())**2 + e = sum(x for x in m.x.values()) ** 2 rep = generate_standard_repn(e, compute_values=False, quadratic=False) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3283,17 +3451,17 @@ def test_pow_of_lin_sum(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 4) - baseline = { } + baseline = {} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False, quadratic=True) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 2 ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertTrue( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 2) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertTrue(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3301,10 +3469,12 @@ def test_pow_of_lin_sum(self): self.assertTrue(len(rep.quadratic_coefs) == 10) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = {(id(i), id(j)): 2 - for i in m.x.values() - for j in m.x.values() - if id(i) < id(j)} + baseline = { + (id(i), id(j)): 2 + for i in m.x.values() + for j in m.x.values() + if id(i) < id(j) + } baseline.update({(id(i), id(i)): 1 for i in m.x.values()}) self.assertEqual(baseline, repn_to_dict(rep)) @@ -3317,12 +3487,12 @@ def test_fixed_exponent(self): m.x.fix(1) rep = generate_standard_repn(e) - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3330,17 +3500,17 @@ def test_fixed_exponent(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.y):1, None: 2 } + baseline = {id(m.y): 1, None: 2} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1, None: 2 } + baseline = {id(rep.linear_vars[0]): 1, None: 2} self.assertEqual(baseline, repn_to_dict(rep)) def test_abs(self): # abs - # / - # a + # / + # a m = ConcreteModel() m.a = Var() m.q = Param(default=-1) @@ -3349,12 +3519,12 @@ def test_abs(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3362,28 +3532,32 @@ def test_abs(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = set([ id(m.a) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(m.a)]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = set([ id(rep.nonlinear_vars[0]) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(rep.nonlinear_vars[0])]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) # abs - # / - # a + # / + # a e = abs(m.a) m.a.set_value(-1) m.a.fixed = True rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3391,25 +3565,25 @@ def test_abs(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1 } + baseline = {None: 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline, repn_to_dict(rep)) # abs - # / - # q + # / + # q e = abs(m.q) rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3417,7 +3591,7 @@ def test_abs(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1 } + baseline = {None: 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) @@ -3425,8 +3599,8 @@ def test_abs(self): def test_cos(self): # cos - # / - # a + # / + # a m = ConcreteModel() m.a = Var() m.q = Param(default=0) @@ -3435,12 +3609,12 @@ def test_cos(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3448,28 +3622,32 @@ def test_cos(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 1) - baseline = set([ id(m.a) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(m.a)]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = set([ id(rep.nonlinear_vars[0]) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) + baseline = set([id(rep.nonlinear_vars[0])]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) # cos - # / - # a + # / + # a e = cos(m.a) m.a.set_value(0) m.a.fixed = True rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3477,25 +3655,25 @@ def test_cos(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1.0 } + baseline = {None: 1.0} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) self.assertEqual(baseline, repn_to_dict(rep)) # cos - # / - # q + # / + # q e = cos(m.q) rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3503,7 +3681,7 @@ def test_cos(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:1.0 } + baseline = {None: 1.0} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) @@ -3523,12 +3701,12 @@ def test_ExprIf(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3536,11 +3714,11 @@ def test_ExprIf(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):1 } + baseline = {id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1 } + baseline = {id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # ExprIf @@ -3550,12 +3728,12 @@ def test_ExprIf(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3563,11 +3741,11 @@ def test_ExprIf(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.b):1 } + baseline = {id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1 } + baseline = {id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # ExprIf @@ -3577,12 +3755,12 @@ def test_ExprIf(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), None ) - self.assertFalse( rep.is_constant() ) - self.assertFalse( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertTrue( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), None) + self.assertFalse(rep.is_constant()) + self.assertFalse(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertTrue(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3590,11 +3768,13 @@ def test_ExprIf(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertFalse(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 3) - baseline = set([ id(m.a), id(m.b), id(m.c) ]) - self.assertEqual(baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr))) - #s = pickle.dumps(rep) - #rep = pickle.loads(s) - #self.assertEqual(baseline, repn_to_dict(rep)) + baseline = set([id(m.a), id(m.b), id(m.c)]) + self.assertEqual( + baseline, set(id(v_) for v_ in EXPR.identify_variables(rep.nonlinear_expr)) + ) + # s = pickle.dumps(rep) + # rep = pickle.loads(s) + # self.assertEqual(baseline, repn_to_dict(rep)) m = ConcreteModel() m.a = Var() @@ -3609,12 +3789,12 @@ def test_ExprIf(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3622,11 +3802,11 @@ def test_ExprIf(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.a):1 } + baseline = {id(m.a): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1 } + baseline = {id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) # ExprIf @@ -3638,12 +3818,12 @@ def test_ExprIf(self): rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3651,11 +3831,11 @@ def test_ExprIf(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.b):1 } + baseline = {id(m.b): 1} self.assertEqual(baseline, repn_to_dict(rep)) s = pickle.dumps(rep) rep = pickle.loads(s) - baseline = { id(rep.linear_vars[0]):1 } + baseline = {id(rep.linear_vars[0]): 1} self.assertEqual(baseline, repn_to_dict(rep)) def test_expr_identity1(self): @@ -3663,16 +3843,16 @@ def test_expr_identity1(self): m.p = Param(mutable=True, initialize=2) m.e = Expression(expr=m.p) - e = 1000*m.e + e = 1000 * m.e rep = generate_standard_repn(e, compute_values=True) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3680,17 +3860,17 @@ def test_expr_identity1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:2000 } + baseline = {None: 2000} self.assertEqual(baseline, repn_to_dict(rep)) rep = generate_standard_repn(e, compute_values=False) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3698,23 +3878,23 @@ def test_expr_identity1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:2000 } + baseline = {None: 2000} self.assertEqual(baseline, repn_to_dict(rep)) def test_expr_identity2(self): o = pyomo.kernel.expression() o.expr = 2 - e = 1000*o + e = 1000 * o rep = generate_standard_repn(e) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3722,7 +3902,7 @@ def test_expr_identity2(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:2000 } + baseline = {None: 2000} self.assertEqual(baseline, repn_to_dict(rep)) def test_expr_identity3(self): @@ -3730,16 +3910,16 @@ def test_expr_identity3(self): m.v = Var(initialize=2) m.e = Expression(expr=m.v) - e = 1000*m.e + e = 1000 * m.e rep = generate_standard_repn(e) # - self.assertFalse( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 1 ) - self.assertFalse( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertFalse(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 1) + self.assertFalse(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 1) self.assertTrue(len(rep.linear_coefs) == 1) @@ -3747,23 +3927,23 @@ def test_expr_identity3(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { id(m.v):1000 } + baseline = {id(m.v): 1000} self.assertEqual(baseline, repn_to_dict(rep)) def test_expr_const1(self): o = pyomo.kernel.expression() o.expr = as_numeric(2) - e = 1000*o + e = 1000 * o rep = generate_standard_repn(e, compute_values=True) # - self.assertTrue( rep.is_fixed() ) - self.assertEqual( rep.polynomial_degree(), 0 ) - self.assertTrue( rep.is_constant() ) - self.assertTrue( rep.is_linear() ) - self.assertFalse( rep.is_quadratic() ) - self.assertFalse( rep.is_nonlinear() ) + self.assertTrue(rep.is_fixed()) + self.assertEqual(rep.polynomial_degree(), 0) + self.assertTrue(rep.is_constant()) + self.assertTrue(rep.is_linear()) + self.assertFalse(rep.is_quadratic()) + self.assertFalse(rep.is_nonlinear()) # self.assertTrue(len(rep.linear_vars) == 0) self.assertTrue(len(rep.linear_coefs) == 0) @@ -3771,60 +3951,69 @@ def test_expr_const1(self): self.assertTrue(len(rep.quadratic_coefs) == 0) self.assertTrue(rep.nonlinear_expr is None) self.assertTrue(len(rep.nonlinear_vars) == 0) - baseline = { None:2000 } + baseline = {None: 2000} self.assertEqual(baseline, repn_to_dict(rep)) def test_to_expression1(self): m = ConcreteModel() m.A = RangeSet(5) m.v = Var(m.A) - m.p = Param(m.A, initialize={1:-2, 2:-1, 3:0, 4:1, 5:2}) + m.p = Param(m.A, initialize={1: -2, 2: -1, 3: 0, 4: 1, 5: 2}) e = sum(m.v[i] for i in m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "v[1] + v[2] + v[3] + v[4] + v[5]") - - e = sum(m.p[i]*m.v[i] for i in m.v) + + e = sum(m.p[i] * m.v[i] for i in m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "-2*v[1] - v[2] + v[4] + 2*v[5]") - + def test_to_expression2(self): m = ConcreteModel() m.A = RangeSet(5) m.v = Var(m.A) - m.p = Param(m.A, initialize={1:-2, 2:-1, 3:0, 4:1, 5:2}, mutable=True) + m.p = Param(m.A, initialize={1: -2, 2: -1, 3: 0, 4: 1, 5: 2}, mutable=True) - e = sum(m.p[i]*m.v[i] for i in m.v) + e = sum(m.p[i] * m.v[i] for i in m.v) rep = generate_standard_repn(e, compute_values=False) - self.assertEqual(str(rep.to_expression()), "p[1]*v[1] + p[2]*v[2] + p[3]*v[3] + p[4]*v[4] + p[5]*v[5]") - + self.assertEqual( + str(rep.to_expression()), + "p[1]*v[1] + p[2]*v[2] + p[3]*v[3] + p[4]*v[4] + p[5]*v[5]", + ) + def test_to_expression3(self): m = ConcreteModel() m.A = RangeSet(5) m.v = Var(m.A) - m.p = Param(m.A, initialize={1:-2, 2:-1, 3:0, 4:1, 5:2}) + m.p = Param(m.A, initialize={1: -2, 2: -1, 3: 0, 4: 1, 5: 2}) - e = sum(m.v[i]**2 for i in m.v) + e = sum(m.v[i] ** 2 for i in m.v) rep = generate_standard_repn(e, compute_values=True) - self.assertEqual(str(rep.to_expression()), "v[1]**2 + v[2]**2 + v[3]**2 + v[4]**2 + v[5]**2") - - e = sum(m.p[i]*m.v[i]**2 for i in m.v) + self.assertEqual( + str(rep.to_expression()), "v[1]**2 + v[2]**2 + v[3]**2 + v[4]**2 + v[5]**2" + ) + + e = sum(m.p[i] * m.v[i] ** 2 for i in m.v) rep = generate_standard_repn(e, compute_values=True) - self.assertEqual(str(rep.to_expression()), "-2*v[1]**2 - v[2]**2 + v[4]**2 + 2*v[5]**2") - - e = m.v[1]*m.v[2] + m.v[2]*m.v[3] + self.assertEqual( + str(rep.to_expression()), "-2*v[1]**2 - v[2]**2 + v[4]**2 + 2*v[5]**2" + ) + + e = m.v[1] * m.v[2] + m.v[2] * m.v[3] rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "v[1]*v[2] + v[2]*v[3]") - + def test_to_expression4(self): m = ConcreteModel() m.A = RangeSet(3) m.v = Var(m.A) - m.p = Param(m.A, initialize={1:-1, 2:0, 3:1}, mutable=True) + m.p = Param(m.A, initialize={1: -1, 2: 0, 3: 1}, mutable=True) - e = sum(m.p[i]*m.v[i]**2 for i in m.v) + e = sum(m.p[i] * m.v[i] ** 2 for i in m.v) rep = generate_standard_repn(e, compute_values=False) - self.assertEqual(str(rep.to_expression()), "p[1]*v[1]**2 + p[2]*v[2]**2 + p[3]*v[3]**2") + self.assertEqual( + str(rep.to_expression()), "p[1]*v[1]**2 + p[2]*v[2]**2 + p[3]*v[3]**2" + ) e = sin(m.v[1]) rep = generate_standard_repn(e, compute_values=False) @@ -3834,123 +4023,127 @@ def test_nonlinear_sum(self): m = ConcreteModel() m.v = Var() - e = 10*(sin(m.v) + cos(m.v)) + e = 10 * (sin(m.v) + cos(m.v)) rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "10*sin(v) + 10*cos(v)") - - e = 10*(1 + sin(m.v)) + + e = 10 * (1 + sin(m.v)) rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "10 + 10*sin(v)") - + def test_product1(self): m = ConcreteModel() m.v = Var() m.p = Param(mutable=True, initialize=0) - e = m.p*(1+m.v) + e = m.p * (1 + m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "0") rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "p + p*v") - - e = (1+m.v)*m.p + + e = (1 + m.v) * m.p rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "0") rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "p + p*v") - - m.p.value = 1 - e = m.p*(1+m.v) + m.p.value = 1 + + e = m.p * (1 + m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "1 + v") rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "p + p*v") - - e = (1+m.v)*m.p + + e = (1 + m.v) * m.p rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "1 + v") rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "p + p*v") - - def test_product2(self): + + def test_product2(self): m = ConcreteModel() m.v = Var(initialize=2) m.w = Var(initialize=3) m.v.fixed = True m.w.fixed = True - e = m.v*m.w + e = m.v * m.w rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "6") rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "v*w") - - e = m.w*m.v + + e = m.w * m.v rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "6") m.v.value = 0 - e = m.v*m.w + e = m.v * m.w rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "0") rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "v*w") - - e = m.w*m.v + + e = m.w * m.v m.w.fixed = False rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "0") - def test_product3(self): + def test_product3(self): m = ConcreteModel() m.v = Var(initialize=2) m.w = Var(initialize=3) - e = sin(m.v)*m.w + e = sin(m.v) * m.w rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "sin(v)*w") - e = m.w*sin(m.v) + e = m.w * sin(m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "w*sin(v)") - def test_product4(self): + def test_product4(self): m = ConcreteModel() m.v = Var(initialize=2) m.w = Var(initialize=3) - e = (1 + m.v + m.w)*(m.v + m.w) + e = (1 + m.v + m.w) * (m.v + m.w) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "v + w + v**2 + 2*(v*w) + w**2") rep = generate_standard_repn(e, compute_values=True, quadratic=False) self.assertEqual(str(rep.to_expression()), "(1 + v + w)*(v + w)") - e = (1 + m.v + m.w + m.v**2)*(m.v + m.w + m.v**2) + e = (1 + m.v + m.w + m.v**2) * (m.v + m.w + m.v**2) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "(1 + v + w + v**2)*(v + w + v**2)") rep = generate_standard_repn(e, compute_values=True, quadratic=False) self.assertEqual(str(rep.to_expression()), "(1 + v + w + v**2)*(v + w + v**2)") - e = (m.v + m.w + m.v**2)*(1 + m.v + m.w + m.v**2) + e = (m.v + m.w + m.v**2) * (1 + m.v + m.w + m.v**2) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "(v + w + v**2)*(1 + v + w + v**2)") rep = generate_standard_repn(e, compute_values=True, quadratic=False) self.assertEqual(str(rep.to_expression()), "(v + w + v**2)*(1 + v + w + v**2)") - e = (1 + m.v + m.w + m.v**2)*(1 + m.v + m.w + m.v**2) + e = (1 + m.v + m.w + m.v**2) * (1 + m.v + m.w + m.v**2) rep = generate_standard_repn(e, compute_values=True) - self.assertEqual(str(rep.to_expression()), "(1 + v + w + v**2)*(1 + v + w + v**2)") + self.assertEqual( + str(rep.to_expression()), "(1 + v + w + v**2)*(1 + v + w + v**2)" + ) rep = generate_standard_repn(e, compute_values=True, quadratic=False) - self.assertEqual(str(rep.to_expression()), "(1 + v + w + v**2)*(1 + v + w + v**2)") + self.assertEqual( + str(rep.to_expression()), "(1 + v + w + v**2)*(1 + v + w + v**2)" + ) - def test_product5(self): + def test_product5(self): m = ConcreteModel() m.v = Var(initialize=2) m.w = Var(initialize=3) - e = (1 + m.v)*(1 + m.v) + e = (1 + m.v) * (1 + m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "1 + 2*v + v**2") rep = generate_standard_repn(e, compute_values=True, quadratic=False) @@ -3961,18 +4154,18 @@ def test_product6(self): m.x = Var() m.y = Var() - e = (m.x + m.y) * (m.x - m.y) * (m.x ** 2 + m.y ** 2) + e = (m.x + m.y) * (m.x - m.y) * (m.x**2 + m.y**2) rep = generate_standard_repn(e) self.assertEqual(str(rep.to_expression()), "(x + y)*(x - y)*(x**2 + y**2)") self.assertTrue(rep.is_nonlinear()) self.assertFalse(rep.is_quadratic()) - def test_vars(self): + def test_vars(self): m = ConcreteModel() m.v = Var(initialize=2) m.w = Var(initialize=3) - e = sin(m.v) + m.v + 2*m.v + e = sin(m.v) + m.v + 2 * m.v rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "3*v + sin(v)") rep = generate_standard_repn(e, compute_values=False) @@ -3984,18 +4177,18 @@ def test_reciprocal(self): m.w = Var(initialize=0) m.p = Param(mutable=True, initialize=0.5) - e = sin(m.v)/m.p + e = sin(m.v) / m.p rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "1/p*sin(v)") rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "2.0*sin(v)") - e = m.p/sin(m.v) + e = m.p / sin(m.v) rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "p/sin(v)") - m.w.fixed=True - e = m.v/m.w + m.w.fixed = True + e = m.v / m.w try: rep = generate_standard_repn(e, compute_values=True) self.fail("Expected division by zero") @@ -4009,39 +4202,47 @@ def test_IfThen(self): m.v.fixed = True m.p = Param(mutable=True, initialize=1) - e = Expr_if(1, 1, m.w) + e = EXPR.Expr_if(1, 1, m.w) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "1") - e = Expr_if(1, m.w, 0) + e = EXPR.Expr_if(1, m.w, 0) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "w") - e = Expr_if(m.p == 0, 1, 0) + e = EXPR.Expr_if(m.p == 0, 1, 0) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "0") rep = generate_standard_repn(e, compute_values=False) - self.assertEqual(str(rep.to_expression()), - "Expr_if( ( p == 0 ), then=( 1 ), else=( 0 ) )") + self.assertEqual( + str(rep.to_expression()), "Expr_if( ( p == 0 ), then=( 1 ), else=( 0 ) )" + ) - e = Expr_if(m.p == 0, 1, m.v) + e = EXPR.Expr_if(m.p == 0, 1, m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "0") rep = generate_standard_repn(e, compute_values=False) - self.assertEqual(str(rep.to_expression()), - "Expr_if( ( p == 0 ), then=( 1 ), else=( v ) )") + self.assertEqual( + str(rep.to_expression()), "Expr_if( ( p == 0 ), then=( 1 ), else=( v ) )" + ) - e = Expr_if(m.v, 1, 0) + e = EXPR.Expr_if(m.v, 1, 0) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "0") rep = generate_standard_repn(e, compute_values=False) - self.assertEqual(str(rep.to_expression()), "Expr_if( ( v ), then=( 1 ), else=( 0 ) )") + self.assertEqual( + str(rep.to_expression()), "Expr_if( ( v ), then=( 1 ), else=( 0 ) )" + ) - e = Expr_if(m.w, 1, 0) + e = EXPR.Expr_if(m.w, 1, 0) rep = generate_standard_repn(e, compute_values=True) - self.assertEqual(str(rep.to_expression()), "Expr_if( ( w ), then=( 1 ), else=( 0 ) )") + self.assertEqual( + str(rep.to_expression()), "Expr_if( ( w ), then=( 1 ), else=( 0 ) )" + ) rep = generate_standard_repn(e, compute_values=False) - self.assertEqual(str(rep.to_expression()), "Expr_if( ( w ), then=( 1 ), else=( 0 ) )") + self.assertEqual( + str(rep.to_expression()), "Expr_if( ( w ), then=( 1 ), else=( 0 ) )" + ) def test_nonl(self): m = ConcreteModel() @@ -4112,9 +4313,9 @@ def test_linear2(self): m = ConcreteModel() m.A = RangeSet(5) m.v = Var(m.A, initialize=1) - m.p = Param(m.A, initialize={1:-2, 2:-1, 3:0, 4:1, 5:2}) + m.p = Param(m.A, initialize={1: -2, 2: -1, 3: 0, 4: 1, 5: 2}) - e = quicksum(m.p[i]*m.v[1] for i in m.p) + summation(m.p, m.v) + e = quicksum(m.p[i] * m.v[1] for i in m.p) + summation(m.p, m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "-2*v[1] - v[2] + v[4] + 2*v[5]") rep = generate_standard_repn(e, compute_values=False) @@ -4124,19 +4325,21 @@ def test_quadraticX1(self): m = ConcreteModel() m.A = RangeSet(5) m.v = Var(m.A, initialize=1) - m.p = Param(m.A, initialize={1:-2, 2:-1, 3:0, 4:1, 5:2}, mutable=True) + m.p = Param(m.A, initialize={1: -2, 2: -1, 3: 0, 4: 1, 5: 2}, mutable=True) - e = sum(m.p[i]*m.v[i]**2 for i in m.A) + e = sum(m.p[i] * m.v[i] ** 2 for i in m.A) rep = generate_standard_repn(e, compute_values=True) - self.assertEqual(str(rep.to_expression()), "-2*v[1]**2 - v[2]**2 + v[4]**2 + 2*v[5]**2") - #rep = generate_standard_repn(e, compute_values=False) - #self.assertEqual(str(rep.to_expression()), "-2*v[1]**2 - v[2]**2 + v[4]**2 + 2*v[5]**2") + self.assertEqual( + str(rep.to_expression()), "-2*v[1]**2 - v[2]**2 + v[4]**2 + 2*v[5]**2" + ) + # rep = generate_standard_repn(e, compute_values=False) + # self.assertEqual(str(rep.to_expression()), "-2*v[1]**2 - v[2]**2 + v[4]**2 + 2*v[5]**2") - m.v[1].fixed=True + m.v[1].fixed = True rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "-2 - v[2]**2 + v[4]**2 + 2*v[5]**2") - #rep = generate_standard_repn(e, compute_values=False) - #self.assertEqual(str(rep.to_expression()), "-2*v[1]*v[1] - v[2]**2 + v[4]**2 + 2*v[5]**2") + # rep = generate_standard_repn(e, compute_values=False) + # self.assertEqual(str(rep.to_expression()), "-2*v[1]*v[1] - v[2]**2 + v[4]**2 + 2*v[5]**2") def test_relational(self): m = ConcreteModel() @@ -4156,16 +4359,16 @@ def _g(*args): m.v.fixed = True m.g = ExternalFunction(_g) - e = 100*m.g(1,2.0,'3') + e = 100 * m.g(1, 2.0, '3') rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "300") self.assertEqual(rep.polynomial_degree(), 0) rep = generate_standard_repn(e, compute_values=False) self.assertEqual(rep.polynomial_degree(), 0) # The function ID is inconsistent, so we don't do a test - #self.assertEqual(str(rep.to_expression()), "100*g(0, 1, 2.0, '3')") + # self.assertEqual(str(rep.to_expression()), "100*g(0, 1, 2.0, '3')") - e = 100*m.g(1,2.0,'3',m.v) + e = 100 * m.g(1, 2.0, '3', m.v) rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "400") self.assertEqual(rep.polynomial_degree(), 0) @@ -4175,11 +4378,12 @@ def _g(*args): # computed degree appears to be general nonlinear. self.assertEqual(rep.polynomial_degree(), None) # The function ID is inconsistent, so we don't do a test - #self.assertEqual(str(rep.to_expression()), "100*g(0, 1, 2.0, '3', v)") + # self.assertEqual(str(rep.to_expression()), "100*g(0, 1, 2.0, '3', v)") def test_ducktyping(self): class vtype(pyomo.kernel.variable): pass + class Etype(pyomo.kernel.expression): pass @@ -4200,14 +4404,14 @@ class Etype(pyomo.kernel.expression): rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "1 + ") - e = (1 + v)*v + e = (1 + v) * v rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "6") rep = generate_standard_repn(e, compute_values=False) self.assertEqual(str(rep.to_expression()), "(1 + )*") E.expr = v - e = (1 + v)*E + e = (1 + v) * E rep = generate_standard_repn(e, compute_values=True) self.assertEqual(str(rep.to_expression()), "6") rep = generate_standard_repn(e, compute_values=False) @@ -4226,7 +4430,7 @@ def test_unexpectedly_NPV(self): m.x = Var() m.y = Var() m.p = Param(mutable=True, initialize=0) - e = m.y*cos(m.x/2) + e = m.y * cos(m.x / 2) # Replacing Var with a Param results in a NPV product expression # as the single argument of a regular (non-NPV) unary function. diff --git a/pyomo/repn/tests/test_util.py b/pyomo/repn/tests/test_util.py index e06736d3e51..a26bf97ca31 100644 --- a/pyomo/repn/tests/test_util.py +++ b/pyomo/repn/tests/test_util.py @@ -10,25 +10,75 @@ # ___________________________________________________________________________ import logging +import math import pyomo.common.unittest as unittest from io import StringIO +from pyomo.common.collections import ComponentMap +from pyomo.common.errors import DeveloperError, InvalidValueError from pyomo.common.log import LoggingIntercept -from pyomo.repn.util import ftoa +from pyomo.environ import ( + ConcreteModel, + Block, + Constraint, + Var, + Param, + Objective, + Suffix, + Expression, + Set, + SortComponents, +) +import pyomo.repn.util +from pyomo.repn.util import ( + FileDeterminism, + FileDeterminism_to_SortComponents, + InvalidNumber, + apply_node_operation, + categorize_valid_components, + complex_number_error, + ftoa, + initialize_var_map_from_column_order, + ordered_active_constraints, +) try: import numpy as np + numpy_available = True except: numpy_available = False + class TestRepnUtils(unittest.TestCase): def test_ftoa(self): # Test that trailing zeros are removed - f = 1.0 - a = ftoa(f) - self.assertEqual(a, '1') + self.assertEqual(ftoa(10.0), '10') + self.assertEqual(ftoa(1), '1') + self.assertEqual(ftoa(1.0), '1') + self.assertEqual(ftoa(-1.0), '-1') + self.assertEqual(ftoa(0.0), '0') + self.assertEqual(ftoa(1e100), '1e+100') + self.assertEqual(ftoa(1e-100), '1e-100') + + self.assertEqual(ftoa(10.0, True), '10') + self.assertEqual(ftoa(1, True), '1') + self.assertEqual(ftoa(1.0, True), '1') + self.assertEqual(ftoa(-1.0, True), '(-1)') + self.assertEqual(ftoa(0.0, True), '0') + self.assertEqual(ftoa(1e100, True), '1e+100') + self.assertEqual(ftoa(1e-100, True), '1e-100') + + # Check None + self.assertIsNone(ftoa(None)) + + m = ConcreteModel() + m.x = Var() + with self.assertRaisesRegex( + ValueError, r'Converting non-fixed bound or value to string: 2\*x' + ): + self.assertIsNone(ftoa(2 * m.x)) @unittest.skipIf(not numpy_available, "NumPy is not available") def test_ftoa_precision(self): @@ -43,9 +93,534 @@ def test_ftoa_precision(self): test = self.assertNotRegexpMatches else: test = self.assertRegex - test( log.getvalue(), - '.*Converting 1.1234567890123456789 to string ' - 'resulted in loss of precision' ) + test( + log.getvalue(), + '.*Converting 1.1234567890123456789 to string ' + 'resulted in loss of precision', + ) + + def test_filedeterminism(self): + with LoggingIntercept() as LOG: + a = FileDeterminism(10) + self.assertEqual(a, FileDeterminism.ORDERED) + self.assertEqual('', LOG.getvalue()) + + self.assertEqual(str(a), 'FileDeterminism.ORDERED') + self.assertEqual(f"{a}", 'FileDeterminism.ORDERED') + + with LoggingIntercept() as LOG: + a = FileDeterminism(1) + self.assertEqual(a, FileDeterminism.SORT_INDICES) + self.assertIn( + 'FileDeterminism(1) is deprecated. ' + 'Please use FileDeterminism.SORT_INDICES (20)', + LOG.getvalue().replace('\n', ' '), + ) + + with self.assertRaisesRegex(ValueError, "5 is not a valid FileDeterminism"): + FileDeterminism(5) + + def test_InvalidNumber(self): + a = InvalidNumber(-3) + b = InvalidNumber(5) + c = InvalidNumber(5) + + self.assertEqual((a + b).value, 2) + self.assertEqual((a - b).value, -8) + self.assertEqual((a * b).value, -15) + self.assertEqual((a / b).value, -0.6) + self.assertEqual((a**b).value, -(3**5)) + self.assertEqual(abs(a).value, 3) + self.assertEqual(abs(b).value, 5) + self.assertEqual((-a).value, 3) + self.assertEqual((-b).value, -5) + + self.assertEqual((a + 5).value, 2) + self.assertEqual((a - 5).value, -8) + self.assertEqual((a * 5).value, -15) + self.assertEqual((a / 5).value, -0.6) + self.assertEqual((a**5).value, -(3**5)) + + self.assertEqual((-3 + b).value, 2) + self.assertEqual((-3 - b).value, -8) + self.assertEqual((-3 * b).value, -15) + self.assertEqual((-3 / b).value, -0.6) + self.assertEqual(((-3) ** b).value, -(3**5)) + + self.assertTrue(a < b) + self.assertTrue(a <= b) + self.assertFalse(a > b) + self.assertFalse(a >= b) + self.assertFalse(a == b) + self.assertTrue(a != b) + + self.assertFalse(c < b) + self.assertTrue(c <= b) + self.assertFalse(c > b) + self.assertTrue(c >= b) + self.assertTrue(c == b) + self.assertFalse(c != b) + + self.assertTrue(a < 5) + self.assertTrue(a <= 5) + self.assertFalse(a > 5) + self.assertFalse(a >= 5) + self.assertFalse(a == 5) + self.assertTrue(a != 5) + + self.assertTrue(3 < b) + self.assertTrue(3 <= b) + self.assertFalse(3 > b) + self.assertFalse(3 >= b) + self.assertFalse(3 == b) + self.assertTrue(3 != b) + + # TODO: eventually these should raise exceptions + d = InvalidNumber('abc') + self.assertEqual(repr(b), "5") + self.assertEqual(repr(d), "'abc'") + self.assertEqual(f'{b}', "5") + self.assertEqual(f'{d}', "abc") + + def test_apply_operation(self): + m = ConcreteModel() + m.x = Var() + div = 1 / m.x + mul = m.x * m.x + exp = m.x ** (1 / 2) + + with LoggingIntercept() as LOG: + self.assertEqual(apply_node_operation(exp, [4, 1 / 2]), 2) + self.assertEqual(LOG.getvalue(), "") + + with LoggingIntercept() as LOG: + ans = apply_node_operation(mul, [float('inf'), 0]) + self.assertIs(type(ans), InvalidNumber) + self.assertTrue(math.isnan(ans.value)) + self.assertEqual(LOG.getvalue(), "") + + _halt = pyomo.repn.util.HALT_ON_EVALUATION_ERROR + try: + pyomo.repn.util.HALT_ON_EVALUATION_ERROR = True + with LoggingIntercept() as LOG: + with self.assertRaisesRegex(ZeroDivisionError, 'division by zero'): + apply_node_operation(div, [1, 0]) + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/x\n", + ) + + pyomo.repn.util.HALT_ON_EVALUATION_ERROR = False + with LoggingIntercept() as LOG: + val = apply_node_operation(div, [1, 0]) + self.assertEqual(str(val), "InvalidNumber(nan)") + self.assertEqual( + LOG.getvalue(), + "Exception encountered evaluating expression 'div(1, 0)'\n" + "\tmessage: division by zero\n" + "\texpression: 1/x\n", + ) + + finally: + pyomo.repn.util.HALT_ON_EVALUATION_ERROR = _halt + + def test_complex_number_error(self): + class Visitor(object): + pass + + visitor = Visitor() + + m = ConcreteModel() + m.x = Var() + exp = m.x ** (1 / 2) + + _halt = pyomo.repn.util.HALT_ON_EVALUATION_ERROR + try: + pyomo.repn.util.HALT_ON_EVALUATION_ERROR = True + with LoggingIntercept() as LOG: + with self.assertRaisesRegex( + InvalidValueError, 'Pyomo Visitor does not support complex numbers' + ): + complex_number_error(1j, visitor, exp) + self.assertEqual( + LOG.getvalue(), + "Complex number returned from expression\n" + "\tmessage: Pyomo Visitor does not support complex numbers\n" + "\texpression: x**0.5\n", + ) + + with LoggingIntercept() as LOG: + with self.assertRaisesRegex( + InvalidValueError, 'Pyomo Visitor does not support complex numbers' + ): + complex_number_error(1j, visitor, exp, "'(-1)**(0.5)'") + self.assertEqual( + LOG.getvalue(), + "Complex number returned from expression '(-1)**(0.5)'\n" + "\tmessage: Pyomo Visitor does not support complex numbers\n" + "\texpression: x**0.5\n", + ) + + pyomo.repn.util.HALT_ON_EVALUATION_ERROR = False + with LoggingIntercept() as LOG: + val = complex_number_error(1j, visitor, exp) + self.assertEqual(str(val), "InvalidNumber(1j)") + self.assertEqual( + LOG.getvalue(), + "Complex number returned from expression\n" + "\tmessage: Pyomo Visitor does not support complex numbers\n" + "\texpression: x**0.5\n", + ) + + finally: + pyomo.repn.util.HALT_ON_EVALUATION_ERROR = _halt + + def test_categorize_valid_components(self): + m = ConcreteModel() + m.x = Var() + m.o = Objective() + m.b2 = Block() + m.b2.e = Expression() + m.b2.p = Param() + m.b2.q = Param() + m.b = Block() + m.b.p = Param() + m.s = Suffix() + m.b.t = Suffix() + m.b.s = Suffix() + + m.b.deactivate() + + component_map, unrecognized = categorize_valid_components( + m, valid={Var, Block}, targets={Param, Objective, Set} + ) + self.assertStructuredAlmostEqual( + component_map, {Param: [m.b2], Objective: [m], Set: []} + ) + self.assertStructuredAlmostEqual(unrecognized, {Suffix: [m.s]}) + + component_map, unrecognized = categorize_valid_components( + m, active=None, valid={Var, Block}, targets={Param, Objective, Set} + ) + self.assertStructuredAlmostEqual( + component_map, {Param: [m.b2, m.b], Objective: [m], Set: []} + ) + self.assertStructuredAlmostEqual( + unrecognized, {Suffix: [m.s, m.b.t, m.b.s], Expression: [m.b2.e]} + ) + + component_map, unrecognized = categorize_valid_components( + m, sort=True, valid={Var, Block}, targets={Param, Objective, Set} + ) + self.assertStructuredAlmostEqual( + component_map, {Param: [m.b2], Objective: [m], Set: []} + ) + self.assertStructuredAlmostEqual(unrecognized, {Suffix: [m.s]}) + + component_map, unrecognized = categorize_valid_components( + m, + sort=True, + active=None, + valid={Var, Block}, + targets={Param, Objective, Set}, + ) + self.assertStructuredAlmostEqual( + component_map, {Param: [m.b, m.b2], Objective: [m], Set: []} + ) + self.assertStructuredAlmostEqual( + unrecognized, {Suffix: [m.s, m.b.s, m.b.t], Expression: [m.b2.e]} + ) + + with self.assertRaises(AssertionError): + component_map, unrecognized = categorize_valid_components(m, active=False) + + with self.assertRaisesRegex( + DeveloperError, + "categorize_valid_components: Cannot have component type " + r"\[\]*Set'\>\] in both the `valid` " + "and `targets` sets", + ): + categorize_valid_components( + m, valid={Var, Block, Set}, targets={Param, Objective, Set} + ) + + def test_FileDeterminism_to_SortComponents(self): + self.assertEqual( + FileDeterminism_to_SortComponents(FileDeterminism(0)), + SortComponents.unsorted, + ) + self.assertEqual( + FileDeterminism_to_SortComponents(FileDeterminism.ORDERED), + SortComponents.unsorted, + ) + self.assertEqual( + FileDeterminism_to_SortComponents(FileDeterminism.SORT_INDICES), + SortComponents.indices, + ) + self.assertEqual( + FileDeterminism_to_SortComponents(FileDeterminism.SORT_SYMBOLS), + SortComponents.indices | SortComponents.alphabetical, + ) + + def test_initialize_var_map_from_column_order(self): + class MockConfig(object): + column_order = None + file_determinism = FileDeterminism(0) + + m = ConcreteModel() + m.x = Var() + m.y = Var([3, 2]) + m.c = Block() + m.c.x = Var() + m.c.y = Var([5, 4]) + m.b = Block() + m.b.x = Var() + m.b.y = Var([7, 6]) + + # No column order, no determinism: + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), [] + ) + # ...sort indices (but not names): + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.x, m.y[2], m.y[3], m.c.x, m.c.y[4], m.c.y[5], m.b.x, m.b.y[6], m.b.y[7]], + ) + # ...sort indices and names: + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.x, m.y[2], m.y[3], m.b.x, m.b.y[6], m.b.y[7], m.c.x, m.c.y[4], m.c.y[5]], + ) + + # column order "False", no determinism: + MockConfig.column_order = False + MockConfig.file_determinism = FileDeterminism(0) + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), [] + ) + # ...sort indices (but not names): + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.x, m.y[2], m.y[3], m.c.x, m.c.y[4], m.c.y[5], m.b.x, m.b.y[6], m.b.y[7]], + ) + # ...sort indices and names: + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.x, m.y[2], m.y[3], m.b.x, m.b.y[6], m.b.y[7], m.c.x, m.c.y[4], m.c.y[5]], + ) + + # column order "True", no determinism: + MockConfig.column_order = True + MockConfig.file_determinism = FileDeterminism(0) + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.x, m.y[3], m.y[2], m.c.x, m.c.y[5], m.c.y[4], m.b.x, m.b.y[7], m.b.y[6]], + ) + # ...sort indices (but not names): + MockConfig.column_order = True + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.x, m.y[2], m.y[3], m.c.x, m.c.y[4], m.c.y[5], m.b.x, m.b.y[6], m.b.y[7]], + ) + # ...sort indices and names: + MockConfig.column_order = True + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.x, m.y[2], m.y[3], m.b.x, m.b.y[6], m.b.y[7], m.c.x, m.c.y[4], m.c.y[5]], + ) + + # column order "True", no determinism, pre-specified entries + # (prespecified stay at the beginning of the list): + MockConfig.column_order = True + MockConfig.file_determinism = FileDeterminism.ORDERED + var_map = {id(m.b.y[7]): m.b.y[7], id(m.c.y[5]): m.c.y[5], id(m.y[3]): m.y[3]} + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, var_map).values()), + [m.b.y[7], m.c.y[5], m.y[3], m.x, m.y[2], m.c.x, m.c.y[4], m.b.x, m.b.y[6]], + ) + + # column order from a ComponentMap + MockConfig.column_order = ComponentMap( + (v, i) for i, v in enumerate([m.b.y, m.y, m.c.y[4], m.x]) + ) + MockConfig.file_determinism = FileDeterminism.ORDERED + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.b.y[7], m.b.y[6], m.y[3], m.y[2], m.c.y[4], m.x], + ) + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.c.x, m.c.y[5], m.b.x], + ) + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.b.x, m.c.x, m.c.y[5]], + ) + + # column order from a list + MockConfig.column_order = [m.b.y, m.y, m.c.y[4], m.x] + ref = list(MockConfig.column_order) + MockConfig.file_determinism = FileDeterminism.ORDERED + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.b.y[7], m.b.y[6], m.y[3], m.y[2], m.c.y[4], m.x], + ) + # verify no side effects + self.assertEqual(MockConfig.column_order, ref) + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.c.x, m.c.y[5], m.b.x], + ) + # verify no side effects + self.assertEqual(MockConfig.column_order, ref) + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(initialize_var_map_from_column_order(m, MockConfig, {}).values()), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.b.x, m.c.x, m.c.y[5]], + ) + # verify no side effects + self.assertEqual(MockConfig.column_order, ref) + + def test_ordered_active_constraints(self): + class MockConfig(object): + row_order = None + file_determinism = FileDeterminism(0) + + m = ConcreteModel() + m.v = Var() + m.x = Constraint(expr=m.v >= 0) + m.y = Constraint([3, 2], rule=lambda b, i: m.v >= 0) + m.c = Block() + m.c.x = Constraint(expr=m.v >= 0) + m.c.y = Constraint([5, 4], rule=lambda b, i: m.v >= 0) + m.b = Block() + m.b.x = Constraint(expr=m.v >= 0) + m.b.y = Constraint([7, 6], rule=lambda b, i: m.v >= 0) + + # No row order, no determinism: + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[3], m.y[2], m.c.x, m.c.y[5], m.c.y[4], m.b.x, m.b.y[7], m.b.y[6]], + ) + # ...sort indices (but not names): + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[2], m.y[3], m.c.x, m.c.y[4], m.c.y[5], m.b.x, m.b.y[6], m.b.y[7]], + ) + # ...sort indices and names: + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[2], m.y[3], m.b.x, m.b.y[6], m.b.y[7], m.c.x, m.c.y[4], m.c.y[5]], + ) + + # Empty row order, no determinism: + MockConfig.row_order = [] + MockConfig.file_determinism = FileDeterminism(0) + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[3], m.y[2], m.c.x, m.c.y[5], m.c.y[4], m.b.x, m.b.y[7], m.b.y[6]], + ) + + # row order "False", no determinism: + MockConfig.row_order = False + MockConfig.file_determinism = FileDeterminism(0) + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[3], m.y[2], m.c.x, m.c.y[5], m.c.y[4], m.b.x, m.b.y[7], m.b.y[6]], + ) + # ...sort indices (but not names): + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[2], m.y[3], m.c.x, m.c.y[4], m.c.y[5], m.b.x, m.b.y[6], m.b.y[7]], + ) + # ...sort indices and names: + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[2], m.y[3], m.b.x, m.b.y[6], m.b.y[7], m.c.x, m.c.y[4], m.c.y[5]], + ) + + # row order "True", no determinism: + MockConfig.row_order = True + MockConfig.file_determinism = FileDeterminism(0) + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[3], m.y[2], m.c.x, m.c.y[5], m.c.y[4], m.b.x, m.b.y[7], m.b.y[6]], + ) + # ...sort indices (but not names): + MockConfig.row_order = True + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[2], m.y[3], m.c.x, m.c.y[4], m.c.y[5], m.b.x, m.b.y[6], m.b.y[7]], + ) + # ...sort indices and names: + MockConfig.row_order = True + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.x, m.y[2], m.y[3], m.b.x, m.b.y[6], m.b.y[7], m.c.x, m.c.y[4], m.c.y[5]], + ) + + # row order from a ComponentMap + MockConfig.row_order = ComponentMap( + (v, i) for i, v in enumerate([m.b.y, m.y, m.c.y[4], m.x]) + ) + MockConfig.file_determinism = FileDeterminism.ORDERED + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.b.y[7], m.b.y[6], m.y[3], m.y[2], m.c.y[4], m.x, m.c.x, m.c.y[5], m.b.x], + ) + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.c.x, m.c.y[5], m.b.x], + ) + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.b.x, m.c.x, m.c.y[5]], + ) + + # row order from a list + MockConfig.row_order = [m.b.y, m.y, m.c.y[4], m.x] + ref = list(MockConfig.row_order) + MockConfig.file_determinism = FileDeterminism.ORDERED + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.b.y[7], m.b.y[6], m.y[3], m.y[2], m.c.y[4], m.x, m.c.x, m.c.y[5], m.b.x], + ) + # verify no side effects + self.assertEqual(MockConfig.row_order, ref) + MockConfig.file_determinism = FileDeterminism.SORT_INDICES + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.c.x, m.c.y[5], m.b.x], + ) + # verify no side effects + self.assertEqual(MockConfig.row_order, ref) + MockConfig.file_determinism = FileDeterminism.SORT_SYMBOLS + self.assertEqual( + list(ordered_active_constraints(m, MockConfig)), + [m.b.y[6], m.b.y[7], m.y[2], m.y[3], m.c.y[4], m.x, m.b.x, m.c.x, m.c.y[5]], + ) + # verify no side effects + self.assertEqual(MockConfig.row_order, ref) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/repn/util.py b/pyomo/repn/util.py index 745de1c5341..4f855b53433 100644 --- a/pyomo/repn/util.py +++ b/pyomo/repn/util.py @@ -9,16 +9,399 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core.base import Var, Param, Expression, Objective, Block, \ - Constraint, Suffix -from pyomo.core.expr.numvalue import native_numeric_types, is_fixed, value +import enum +import itertools import logging +import sys + +from pyomo.common.collections import Sequence, ComponentMap +from pyomo.common.deprecation import deprecation_warning +from pyomo.common.errors import DeveloperError, InvalidValueError +from pyomo.core.base import ( + Var, + Param, + Expression, + Objective, + Block, + Constraint, + Suffix, + SortComponents, +) +from pyomo.core.base.component import ActiveComponent +from pyomo.core.expr.numvalue import native_numeric_types, is_fixed, value +import pyomo.core.kernel as kernel logger = logging.getLogger('pyomo.core') valid_expr_ctypes_minlp = {Var, Param, Expression, Objective} valid_active_ctypes_minlp = {Block, Constraint, Objective, Suffix} +HALT_ON_EVALUATION_ERROR = False +nan = float('nan') + + +class ExprType(enum.IntEnum): + CONSTANT = 0 + MONOMIAL = 10 + LINEAR = 20 + QUADRATIC = 30 + GENERAL = 40 + + +_FileDeterminism_deprecation = { + 1: 20, + 2: 30, + 'DEPRECATED_KEYS': 20, + 'DEPRECATED_KEYS_AND_NAMES': 30, +} + + +class FileDeterminism(enum.IntEnum): + NONE = 0 + # DEPRECATED_KEYS = 1 + # DEPRECATED_KEYS_AND_NAMES = 2 + ORDERED = 10 + SORT_INDICES = 20 + SORT_SYMBOLS = 30 + + # We will define __str__ and __format__ so that behavior in python + # 3.11 is consistent with 3.7 - 3.10. + + def __str__(self): + return enum.Enum.__str__(self) + + def __format__(self, spec): + # This cannot just call Enum.__format__ because that returns the + # numeric value in Python 3.7 + return str(self).__format__(spec) + + @classmethod + def _missing_(cls, value): + # This is not a perfect deprecation path, as the old attributes + # are no longer valid. However, as the previous implementation + # was a pure int and not an Enum, this is sufficient for our + # needs. + if value in _FileDeterminism_deprecation: + new = FileDeterminism(_FileDeterminism_deprecation[value]) + deprecation_warning( + f'FileDeterminism({value}) is deprecated. ' + f'Please use {str(new)} ({int(new)})', + version='6.5.0', + ) + return new + return super()._missing_(value) + + +class InvalidNumber(object): + def __init__(self, value): + self.value = value + + def duplicate(self, new_value): + return InvalidNumber(new_value) + + def merge(self, other, new_value): + return InvalidNumber(new_value) + + def __eq__(self, other): + if other.__class__ is InvalidNumber: + return self.value == other.value + else: + return self.value == other + + def __lt__(self, other): + if other.__class__ is InvalidNumber: + return self.value < other.value + else: + return self.value < other + + def __gt__(self, other): + if other.__class__ is InvalidNumber: + return self.value > other.value + else: + return self.value > other + + def __le__(self, other): + if other.__class__ is InvalidNumber: + return self.value <= other.value + else: + return self.value <= other + + def __ge__(self, other): + if other.__class__ is InvalidNumber: + return self.value >= other.value + else: + return self.value >= other + + def __str__(self): + return f'InvalidNumber({self.value})' + + def __repr__(self): + # FIXME: We want to move to where converting InvalidNumber to + # string (with either repr() or f"") should raise a + # InvalidValueError. However, at the moment, this breaks some + # tests in PyROS. + return repr(self.value) + # raise InvalidValueError(f'Cannot emit {str(self)} in compiled representation') + + def __format__(self, format_spec): + # FIXME: We want to move to where converting InvalidNumber to + # string (with either repr() or f"") should raise a + # InvalidValueError. However, at the moment, this breaks some + # tests in PyROS. + return self.value.__format__(format_spec) + # raise InvalidValueError(f'Cannot emit {str(self)} in compiled representation') + + def __neg__(self): + return self.duplicate(-self.value) + + def __abs__(self): + return self.duplicate(abs(self.value)) + + def __add__(self, other): + if other.__class__ is InvalidNumber: + return self.merge(other, self.value + other.value) + else: + return self.duplicate(self.value + other) + + def __sub__(self, other): + if other.__class__ is InvalidNumber: + return self.merge(other, self.value - other.value) + else: + return self.duplicate(self.value - other) + + def __mul__(self, other): + if other.__class__ is InvalidNumber: + return self.merge(other, self.value * other.value) + else: + return self.duplicate(self.value * other) + + def __truediv__(self, other): + if other.__class__ is InvalidNumber: + return self.merge(other, self.value / other.value) + else: + return self.duplicate(self.value / other) + + def __pow__(self, other): + if other.__class__ is InvalidNumber: + return self.merge(other, self.value**other.value) + else: + return self.duplicate(self.value**other) + + def __radd__(self, other): + return self.duplicate(other + self.value) + + def __rsub__(self, other): + return self.duplicate(other - self.value) + + def __rmul__(self, other): + return self.duplicate(other * self.value) + + def __rtruediv__(self, other): + return self.duplicate(other / self.value) + + def __rpow__(self, other): + return self.duplicate(other**self.value) + + +def apply_node_operation(node, args): + try: + ans = node._apply_operation(args) + if ans != ans and ans.__class__ is not InvalidNumber: + ans = InvalidNumber(ans) + return ans + except: + logger.warning( + "Exception encountered evaluating expression " + "'%s(%s)'\n\tmessage: %s\n\texpression: %s" + % (node.name, ", ".join(map(str, args)), str(sys.exc_info()[1]), node) + ) + if HALT_ON_EVALUATION_ERROR: + raise + return InvalidNumber(nan) + + +def complex_number_error(value, visitor, expr, node=""): + msg = f'Pyomo {visitor.__class__.__name__} does not support complex numbers' + logger.warning( + ' '.join(filter(None, ("Complex number returned from expression", node))) + + f"\n\tmessage: {msg}\n\texpression: {expr}" + ) + if HALT_ON_EVALUATION_ERROR: + raise InvalidValueError( + f'Pyomo {visitor.__class__.__name__} does not support complex numbers' + ) + return InvalidNumber(value) + + +def categorize_valid_components( + model, active=True, sort=None, valid=set(), targets=set() +): + """Walk model and check for valid component types + + This routine will walk the model and check all component types. + Components types in the `valid` set are ignored, blocks with + components in the `targets` set are collected, and all other + component types are added to a dictionary of `unrecognized` + components. + + A Component type may not appear in both `valid` and `targets` sets. + + Parameters + ---------- + model: _BlockData + The model tree to walk + + active: True or None + If True, only unrecognized active components are returned in the + `uncategorized` dictionary. Also, if True, only active Blocks + are descended into. + + sort: bool or SortComponents + The sorting flag to pass to the block walkers + + valid: Set[type] + The set of "valid" component types. These are ignored by the + categorizer. + + targets: Set[type] + The set of component types to "collect". Blocks with components + in the `targets` set will be returned in the `component_map` + + Returns + ------- + component_map: Dict[type, List[_BlockData]] + A dict mapping component type to a list of block data + objects that contain declared component of that type. + + unrecognized: Dict[type, List[ComponentData]] + A dict mapping unrecognized component types to a (non-empty) + list of component data objects found on the model. + + """ + assert active in (True, None) + # Note: we assume every target component is valid but that we expect + # there to be far mode valid components than target components. + # Generate an error if a target is in the valid set (because the + # valid set will preclude recording the block in the component_map) + if any(ctype in valid for ctype in targets): + ctypes = list(filter(valid.__contains__, targets)) + raise DeveloperError( + f"categorize_valid_components: Cannot have component type {ctypes} in " + "both the `valid` and `targets` sets" + ) + unrecognized = {} + component_map = {k: [] for k in targets} + for block in model.block_data_objects(active=active, descend_into=True, sort=sort): + local_ctypes = block.collect_ctypes(active=None, descend_into=False) + for ctype in local_ctypes: + if ctype in kernel.base._kernel_ctype_backmap: + ctype = kernel.base._kernel_ctype_backmap[ctype] + if ctype in valid: + continue + if ctype in targets: + component_map[ctype].append(block) + continue + # TODO: we should rethink the definition of "active" for + # Components that are not subclasses of ActiveComponent + if ( + active + and not issubclass(ctype, ActiveComponent) + and not issubclass(ctype, kernel.base.ICategorizedObject) + ): + continue + if ctype not in unrecognized: + unrecognized[ctype] = [] + unrecognized[ctype].extend( + block.component_data_objects( + ctype=ctype, + active=active, + descend_into=False, + sort=SortComponents.unsorted, + ) + ) + return component_map, {k: v for k, v in unrecognized.items() if v} + + +def FileDeterminism_to_SortComponents(file_determinism): + sorter = SortComponents.unsorted + if file_determinism >= FileDeterminism.SORT_INDICES: + sorter = sorter | SortComponents.indices + if file_determinism >= FileDeterminism.SORT_SYMBOLS: + sorter = sorter | SortComponents.alphabetical + return sorter + + +def initialize_var_map_from_column_order(model, config, var_map): + column_order = config.column_order + sorter = FileDeterminism_to_SortComponents(config.file_determinism) + + if column_order is None or column_order.__class__ is bool: + if not column_order: + column_order = None + elif isinstance(column_order, ComponentMap): + # The column order has historically has supported a ComponentMap of + # component to position in addition to the simple list of + # components. Convert it to the simple list + column_order = sorted(column_order, key=column_order.__getitem__) + + if column_order == True: + column_order = model.component_data_objects(Var, descend_into=True, sort=sorter) + elif config.file_determinism > FileDeterminism.ORDERED: + # We will pre-gather the variables so that their order + # matches the file_determinism flag. This is a little + # cumbersome, but is implemented this way for consistency + # with the original NL writer. + var_objs = model.component_data_objects(Var, descend_into=True, sort=sorter) + if column_order is None: + column_order = var_objs + else: + column_order = itertools.chain(column_order, var_objs) + + if column_order is not None: + # Note that Vars that appear twice (e.g., through a + # Reference) will be sorted with the FIRST occurrence. + for var in column_order: + if var.is_indexed(): + for _v in var.values(sorter): + if not _v.fixed: + var_map[id(_v)] = _v + elif not var.fixed: + var_map[id(var)] = var + return var_map + + +def ordered_active_constraints(model, config): + sorter = FileDeterminism_to_SortComponents(config.file_determinism) + constraints = model.component_data_objects(Constraint, active=True, sort=sorter) + + row_order = config.row_order + if row_order is None or row_order.__class__ is bool: + return constraints + elif isinstance(row_order, ComponentMap): + # The row order has historically also supported a ComponentMap of + # component to position in addition to the simple list of + # components. Convert it to the simple list + row_order = sorted(row_order, key=row_order.__getitem__) + + row_map = {} + for con in row_order: + if con.is_indexed(): + for c in con.values(sorter): + row_map[id(c)] = c + else: + row_map[id(con)] = con + if not row_map: + return constraints + # map the implicit dict ordering to an explicit 0..n ordering + row_map = {_id: i for i, _id in enumerate(row_map)} + # sorted() is stable (per Python docs), so we can let all + # unspecified rows have a row number one bigger than the + # number of rows specified by the user ordering. + _n = len(row_map) + _row_getter = row_map.get + return sorted(constraints, key=lambda x: _row_getter(id(x), _n)) + + # Copied from cpxlp.py: # Keven Hunter made a nice point about using %.16g in his attachment # to ticket #4319. I am adjusting this to %.17g as this mocks the @@ -34,7 +417,7 @@ _ftoa_precision_str = '%.17g' -def ftoa(val): +def ftoa(val, parenthesize_negative_values=False): if val is None: return val # @@ -46,7 +429,8 @@ def ftoa(val): _val = value(val) else: raise ValueError( - "Converting non-fixed bound or value to string: %s" % (val,)) + "Converting non-fixed bound or value to string: %s" % (val,) + ) # # Convert to string a = _ftoa_precision_str % _val @@ -55,21 +439,22 @@ def ftoa(val): # necessary, this helps keep the emitted string consistent between # python versions by simplifying things like "1.0000000000001" to # "1". - i = len(a) - try: - while i > 1: - if float(a[:i-1]) == _val: + i = len(a) - 1 + if i: + try: + while float(a[:i]) == _val: i -= 1 - else: - break - except: - pass + except: + pass + i += 1 # # It is important to issue a warning if the conversion loses # precision (as the emitted model is not exactly what the user # specified) if i == len(a) and float(a) != _val: - logger.warning( - "Converting %s to string resulted in loss of precision" % val) + logger.warning("Converting %s to string resulted in loss of precision" % val) # - return a[:i] + if parenthesize_negative_values and a[0] == '-': + return '(' + a[:i] + ')' + else: + return a[:i] diff --git a/pyomo/scripting/commands.py b/pyomo/scripting/commands.py index a00f4060f9c..7782962c2c1 100644 --- a/pyomo/scripting/commands.py +++ b/pyomo/scripting/commands.py @@ -28,11 +28,12 @@ def pyomo_python(args=None): args = sys.argv[1:] if args is None or len(args) == 0: console = code.InteractiveConsole() - console.interact('Pyomo Python Console\n'+sys.version) + console.interact('Pyomo Python Console\n' + sys.version) else: - cmd = sys.executable+' '+ ' '.join(args) + cmd = sys.executable + ' ' + ' '.join(args) subprocess.run(cmd) + @pyomo_command('pyomo', "The main command interface for Pyomo") def pyomo(args=None): parser = pyomo.scripting.pyomo_parser.get_parser() diff --git a/pyomo/scripting/convert.py b/pyomo/scripting/convert.py index ae31a64cf4d..2f0c0e5b400 100644 --- a/pyomo/scripting/convert.py +++ b/pyomo/scripting/convert.py @@ -16,11 +16,7 @@ from pyomo.common.collections import Bunch from pyomo.opt import ProblemFormat -from pyomo.core.base import (Objective, - Var, - Constraint, - value, - ConcreteModel) +from pyomo.core.base import Objective, Var, Constraint, value, ConcreteModel _format = None @@ -38,7 +34,7 @@ def convert(options=Bunch(), parser=None, model_format=None): if _format == ProblemFormat.cpxlp: options.model.save_file = 'unknown.lp' else: - options.model.save_file = 'unknown.'+str(_format) + options.model.save_file = 'unknown.' + str(_format) options.model.save_format = _format data = Bunch(options=options) @@ -56,24 +52,22 @@ def convert(options=Bunch(), parser=None, model_format=None): model_data.options = options except: - # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. - pyomo.scripting.util.finalize(data, - model=ConcreteModel(), - instance=None, - results=None) + pyomo.scripting.util.finalize( + data, model=ConcreteModel(), instance=None, results=None + ) raise else: - pyomo.scripting.util.finalize(data, model=model_data.model) return model_data + def convert_dakota(options=Bunch(), parser=None): # # Import plugins @@ -89,7 +83,7 @@ def convert_dakota(options=Bunch(), parser=None): # By default replace .py with .nl if options.model.save_file is None: - options.model.save_file = model_file_no_ext + '.nl' + options.model.save_file = model_file_no_ext + '.nl' options.model.save_format = ProblemFormat.nl # Dakota requires .row/.col files options.model.symbolic_solver_labels = True @@ -109,9 +103,9 @@ def convert_dakota(options=Bunch(), parser=None): model = model_data.instance # Easy way - #print "VARIABLE:" - #lines = open(options.save_model.replace('.nl','.col'),'r').readlines() - #for varName in lines: + # print "VARIABLE:" + # lines = open(options.save_model.replace('.nl','.col'),'r').readlines() + # for varName in lines: # varName = varName.strip() # var = model_data.symbol_map.getObject(varName) # print "'%s': %s" % (varName, var) @@ -185,13 +179,13 @@ def convert_dakota(options=Bunch(), parser=None): dakfrag.write("#--- Dakota interface block ---#\n") dakfrag.write("interface\n") - dakfrag.write(" algebraic_mappings = '" + options.model.save_file + "'\n") + dakfrag.write(" algebraic_mappings = '" + options.model.save_file + "'\n") dakfrag.write("#--- Dakota responses block ---#\n") dakfrag.write("responses\n") dakfrag.write(" objective_functions " + str(objectives) + '\n') - if (constraints > 0): + if constraints > 0: dakfrag.write(" nonlinear_inequality_constraints " + str(constraints) + '\n') dakfrag.write(" lower_bounds " + " ".join(cons_lb) + '\n') dakfrag.write(" upper_bounds " + " ".join(cons_ub) + '\n') @@ -199,7 +193,7 @@ def convert_dakota(options=Bunch(), parser=None): dakfrag.write(" descriptors\n") for od in obj_descriptors: dakfrag.write(" '%s'\n" % od) - if (constraints > 0): + if constraints > 0: for cd in cons_descriptors: dakfrag.write(" '%s'\n" % cd) @@ -209,35 +203,43 @@ def convert_dakota(options=Bunch(), parser=None): dakfrag.close() - sys.stdout.write( "Dakota input fragment written to file '%s'\n" - % (model_file_no_ext + ".dak",) ) + sys.stdout.write( + "Dakota input fragment written to file '%s'\n" % (model_file_no_ext + ".dak",) + ) return model_data def pyomo2lp(args=None): from pyomo.scripting.pyomo_main import main + if args is None: return main() else: - return main(['convert', '--format=lp']+args) + return main(['convert', '--format=lp'] + args) + def pyomo2nl(args=None): from pyomo.scripting.pyomo_main import main + if args is None: return main() else: - return main(['convert', '--format=nl']+args) + return main(['convert', '--format=nl'] + args) + def pyomo2bar(args=None): from pyomo.scripting.pyomo_main import main + if args is None: return main() else: - return main(['convert', '--format=bar']+args) + return main(['convert', '--format=bar'] + args) + def pyomo2dakota(args=None): from pyomo.scripting.pyomo_main import main + if args is None: return main() else: - return main(['convert','--format=dakota']+args) + return main(['convert', '--format=dakota'] + args) diff --git a/pyomo/scripting/driver_help.py b/pyomo/scripting/driver_help.py index 5d321d40650..81970a6b5cc 100644 --- a/pyomo/scripting/driver_help.py +++ b/pyomo/scripting/driver_help.py @@ -13,7 +13,6 @@ import os.path import sys import glob -import datetime import textwrap import logging import socket @@ -25,24 +24,33 @@ logger = logging.getLogger('pyomo.solvers') -#-------------------------------------------------- +# -------------------------------------------------- # run # --list -#-------------------------------------------------- +# -------------------------------------------------- + def setup_command_parser(parser): - parser.add_argument("--list", dest="summary", action='store_true', default=False, - help="List the commands that are installed with Pyomo") - parser.add_argument("command", nargs='*', help="The command and command-line options") + parser.add_argument( + "--list", + dest="summary", + action='store_true', + default=False, + help="List the commands that are installed with Pyomo", + ) + parser.add_argument( + "command", nargs='*', help="The command and command-line options" + ) + def command_exec(options): - cmddir = os.path.dirname(os.path.abspath(sys.executable))+os.sep + cmddir = os.path.dirname(os.path.abspath(sys.executable)) + os.sep if options.summary: print("") print("The following commands are installed in the Pyomo bin directory:") print("----------------------------------------------------------------") - for file in sorted(glob.glob(cmddir+'*')): - print(" "+os.path.basename(file)) + for file in sorted(glob.glob(cmddir + '*')): + print(" " + os.path.basename(file)) print("") if len(options.command) > 0: print("WARNING: ignoring command specification") @@ -50,18 +58,22 @@ def command_exec(options): if len(options.command) == 0: print(" ERROR: no command specified") return 1 - if not os.path.exists(cmddir+options.command[0]): - print(" ERROR: the command '%s' does not exist" % (cmddir+options.command[0])) + if not os.path.exists(cmddir + options.command[0]): + print( + " ERROR: the command '%s' does not exist" % (cmddir + options.command[0]) + ) return 1 - return subprocess.run([cmddir] + options.command, - stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL).returncode + return subprocess.run( + [cmddir] + options.command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ).returncode + # # Add a subparser for the pyomo command # setup_command_parser( - pyomo.scripting.pyomo_parser.add_subparser('run', + pyomo.scripting.pyomo_parser.add_subparser( + 'run', func=command_exec, help='Execute a command from the Pyomo bin (or Scripts) directory.', description='This pyomo subcommand is used to execute commands installed with Pyomo.', @@ -72,21 +84,23 @@ def command_exec(options): that are installed with Pyomo. Thus, if Pyomo is installed in the Python system directories, then this command executes any command included with Python. -""" - )) +""", + ) +) -#-------------------------------------------------- +# -------------------------------------------------- # help # --components # --command # --transformations # --solvers -#-------------------------------------------------- +# -------------------------------------------------- + def help_commands(): print("") print("The following commands are installed with Pyomo:") - print("-"*75) + print("-" * 75) registry = pyomo.common.get_pyomo_commands() d = max(len(key) for key in registry) fmt = "%%-%ds %%s" % d @@ -94,9 +108,11 @@ def help_commands(): print(fmt % (key, registry[key])) print("") + def help_writers(): import pyomo.environ from pyomo.opt.base import WriterFactory + wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' wrapper.subsequent_indent = ' ' @@ -104,31 +120,14 @@ def help_writers(): print("Pyomo Problem Writers") print("---------------------") for writer in sorted(WriterFactory): - print(" "+writer) + print(" " + writer) print(wrapper.fill(WriterFactory.doc(writer))) -def help_checkers(): - import pyomo.environ - import pyomo.common.plugin - from pyomo.checker import IModelChecker - wrapper = textwrap.TextWrapper() - wrapper.initial_indent = ' ' - wrapper.subsequent_indent = ' ' - print("") - print("Pyomo Model Checkers") - print("--------------------") - ep = pyomo.common.plugin.ExtensionPoint(IModelChecker) - tmp = {} - for checker in ep.extensions(): - for alias in getattr(checker, '_factory_aliases', set()): - tmp[alias[0]] = alias[1] - for key in sorted(tmp.keys()): - print(" "+key) - print(wrapper.fill(tmp[key])) def help_datamanagers(options): import pyomo.environ from pyomo.dataportal import DataManagerFactory + wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' wrapper.subsequent_indent = ' ' @@ -136,9 +135,10 @@ def help_datamanagers(options): print("Pyomo Data Managers") print("-------------------") for xform in sorted(DataManagerFactory): - print(" "+xform) + print(" " + xform) print(wrapper.fill(DataManagerFactory.doc(xform))) + def help_environment(): info = Bunch() # @@ -149,9 +149,9 @@ def help_environment(): try: packages = [] import pip + for package in pip.get_installed_distributions(): - packages.append(Bunch(name=package.project_name, - version=package.version)) + packages.append(Bunch(name=package.project_name, version=package.version)) info.python.packages = packages except: pass @@ -167,9 +167,11 @@ def help_environment(): print('#') print(str(info)) + def help_transformations(): import pyomo.environ from pyomo.core import TransformationFactory + wrapper = textwrap.TextWrapper() wrapper.initial_indent = ' ' wrapper.subsequent_indent = ' ' @@ -177,7 +179,7 @@ def help_transformations(): print("Pyomo Model Transformations") print("---------------------------") for xform in sorted(TransformationFactory): - print(" "+xform) + print(" " + xform) _doc = TransformationFactory.doc(xform) or "" # Ideally, the Factory would ensure that the doc string # indicated deprecation, but as @deprecated() is Pyomo @@ -185,32 +187,41 @@ def help_transformations(): # PyUtilib probably shouldn't contain Pyomo-specific processing. # The next best thing is to ensure that the deprecation status # is indicated here. - _init_doc = TransformationFactory.get_class(xform).__init__.__doc__ \ - or "" + _init_doc = TransformationFactory.get_class(xform).__init__.__doc__ or "" if _init_doc.strip().startswith('DEPRECATED') and 'DEPRECAT' not in _doc: _doc = ' '.join(('[DEPRECATED]', _doc)) if _doc: print(wrapper.fill(_doc)) + def help_solvers(): import pyomo.environ + wrapper = textwrap.TextWrapper(replace_whitespace=False) print("") print("Pyomo Solvers and Solver Managers") print("---------------------------------") - print(wrapper.fill("Pyomo uses 'solver managers' to execute 'solvers' that perform optimization and other forms of model analysis. A solver directly executes an optimizer, typically using an executable found on the user's PATH environment. Solver managers support a flexible mechanism for asyncronously executing solvers either locally or remotely. The following solver managers are available in Pyomo:")) + print( + wrapper.fill( + "Pyomo uses 'solver managers' to execute 'solvers' that perform optimization and other forms of model analysis. A solver directly executes an optimizer, typically using an executable found on the user's PATH environment. Solver managers support a flexible mechanism for asynchronously executing solvers either locally or remotely. The following solver managers are available in Pyomo:" + ) + ) print("") solvermgr_list = list(pyomo.opt.SolverManagerFactory) - solvermgr_list = sorted( filter(lambda x: '_' != x[0], solvermgr_list) ) + solvermgr_list = sorted(filter(lambda x: '_' != x[0], solvermgr_list)) n = max(map(len, solvermgr_list)) - wrapper = textwrap.TextWrapper(subsequent_indent=' '*(n+9)) + wrapper = textwrap.TextWrapper(subsequent_indent=' ' * (n + 9)) for s in solvermgr_list: - format = ' %-'+str(n)+'s %s' - print(wrapper.fill(format % (s , pyomo.opt.SolverManagerFactory.doc(s)))) + format = ' %-' + str(n) + 's %s' + print(wrapper.fill(format % (s, pyomo.opt.SolverManagerFactory.doc(s)))) print("") wrapper = textwrap.TextWrapper(subsequent_indent='') - print(wrapper.fill("If no solver manager is specified, Pyomo uses the serial solver manager to execute solvers locally. The neos solver manager is used to execute solvers on the NEOS optimization server.")) + print( + wrapper.fill( + "If no solver manager is specified, Pyomo uses the serial solver manager to execute solvers locally. The neos solver manager is used to execute solvers on the NEOS optimization server." + ) + ) print("") print("") @@ -219,7 +230,7 @@ def help_solvers(): print(wrapper.fill("The serial manager supports the following solver interfaces:")) print("") solver_list = list(pyomo.opt.SolverFactory) - solver_list = sorted( filter(lambda x: '_' != x[0], solver_list) ) + solver_list = sorted(filter(lambda x: '_' != x[0], solver_list)) _data = [] try: # Disable warnings @@ -258,21 +269,38 @@ def help_solvers(): verFieldLen = max(len(line[2]) for line in _data) fmt = ' %%1s%%-%ds %%-%ds %%s' % (nameFieldLen, verFieldLen) wrapper = textwrap.TextWrapper( - subsequent_indent=' '*(nameFieldLen + verFieldLen + 6)) + subsequent_indent=' ' * (nameFieldLen + verFieldLen + 6) + ) for _line in _data: print(wrapper.fill(fmt % _line)) print("") wrapper = textwrap.TextWrapper(subsequent_indent='') - print(wrapper.fill("""The leading symbol (one of *, -, +) indicates the current solver availability. A plus (+) indicates the solver is currently available to be run from Pyomo with the serial solver manager, and (if applicable) has a valid license. A minus (-) indicates the solver executables are available but do not report having a valid license. The solver may still be usable in an unlicensed or "demo" mode for limited problem sizes. An asterisk (*) indicates meta-solvers or generic interfaces, which are always available.""")) + print( + wrapper.fill( + """The leading symbol (one of *, -, +) indicates the current solver availability. A plus (+) indicates the solver is currently available to be run from Pyomo with the serial solver manager, and (if applicable) has a valid license. A minus (-) indicates the solver executables are available but do not report having a valid license. The solver may still be usable in an unlicensed or "demo" mode for limited problem sizes. An asterisk (*) indicates meta-solvers or generic interfaces, which are always available.""" + ) + ) print('') - print(wrapper.fill('Pyomo also supports solver interfaces that are wrappers around third-party solver interfaces. These interfaces require a subsolver specification that indicates the solver being executed. For example, the following indicates that the ipopt solver will be used:')) + print( + wrapper.fill( + 'Pyomo also supports solver interfaces that are wrappers around third-party solver interfaces. These interfaces require a subsolver specification that indicates the solver being executed. For example, the following indicates that the ipopt solver will be used:' + ) + ) print('') print(' asl:ipopt') print('') - print(wrapper.fill('The asl interface provides a generic wrapper for all solvers that use the AMPL Solver Library.')) + print( + wrapper.fill( + 'The asl interface provides a generic wrapper for all solvers that use the AMPL Solver Library.' + ) + ) print('') - print(wrapper.fill('Note that subsolvers can not be enumerated automatically for these interfaces. However, if a solver is specified that is not found, Pyomo assumes that the asl solver interface is being used. Thus the following solver name will launch ipopt if the \'ipopt\' executable is on the user\'s path:')) + print( + wrapper.fill( + 'Note that subsolvers can not be enumerated automatically for these interfaces. However, if a solver is specified that is not found, Pyomo assumes that the asl solver interface is being used. Thus the following solver name will launch ipopt if the \'ipopt\' executable is on the user\'s path:' + ) + ) print('') print(' ipopt') print('') @@ -280,27 +308,49 @@ def help_solvers(): logging.disable(logging.WARNING) socket.setdefaulttimeout(10) import pyomo.neos.kestrel + kestrel = pyomo.neos.kestrel.kestrelAMPL() - #print "HERE", solver_list - solver_list = list(set([name[:-5].lower() for name in kestrel.solvers() if name.endswith('AMPL')])) - #print "HERE", solver_list + # print "HERE", solver_list + solver_list = list( + set( + [ + name[:-5].lower() + for name in kestrel.solvers() + if name.endswith('AMPL') + ] + ) + ) + # print "HERE", solver_list if len(solver_list) > 0: print("") print("NEOS Solver Interfaces") print("----------------------") - print(wrapper.fill("The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. The following solver interfaces are available with your current system configuration:")) + print( + wrapper.fill( + "The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. The following solver interfaces are available with your current system configuration:" + ) + ) print("") solver_list = sorted(solver_list) n = max(map(len, solver_list)) - format = ' %-'+str(n)+'s %s' + format = ' %-' + str(n) + 's %s' for name in solver_list: - print(wrapper.fill(format % (name , pyomo.neos.doc.get(name,'Unexpected NEOS solver')))) + print( + wrapper.fill( + format + % (name, pyomo.neos.doc.get(name, 'Unexpected NEOS solver')) + ) + ) print("") else: print("") print("NEOS Solver Interfaces") print("----------------------") - print(wrapper.fill("The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. This server is not available with your current system configuration.")) + print( + wrapper.fill( + "The neos solver manager supports solver interfaces that can be executed remotely on the NEOS optimization server. This server is not available with your current system configuration." + ) + ) print("") except ImportError: pass @@ -308,96 +358,153 @@ def help_solvers(): logging.disable(logging.NOTSET) socket.setdefaulttimeout(None) + def print_components(data): """ Print information about modeling components supported by Pyomo. """ + from pyomo.core.base.component import ModelComponentFactory, GlobalSets + print("") print("----------------------------------------------------------------") print("Pyomo Model Components:") print("----------------------------------------------------------------") for name in sorted(ModelComponentFactory): print("") - print(" "+name) + print(" " + name) for line in textwrap.wrap(ModelComponentFactory.doc(name), 59): - print(" "+line) + print(" " + line) print("") print("----------------------------------------------------------------") print("Pyomo Virtual Sets:") print("----------------------------------------------------------------") for name, obj in sorted(GlobalSets.items()): print("") - print(" "+name) - print(" "+obj.doc) + print(" " + name) + print(" " + obj.doc) + def help_exec(options): - flag=False + flag = False if options.commands: if options.asciidoc: - print("The '--commands' help information is not printed in an asciidoc format.") - flag=True + print( + "The '--commands' help information is not printed in an asciidoc format." + ) + flag = True help_commands() if options.components: if options.asciidoc: - print("The '--components' help information is not printed in an asciidoc format.") - flag=True + print( + "The '--components' help information is not printed in an asciidoc format." + ) + flag = True print_components(None) if options.datamanager: - flag=True + flag = True help_datamanagers(options) if options.environment: - flag=True + flag = True help_environment() if options.transformations: if options.asciidoc: - print("The '--transformations' help information is not printed in an asciidoc format.") - flag=True + print( + "The '--transformations' help information is not printed in an asciidoc format." + ) + flag = True help_transformations() if options.solvers: if options.asciidoc: - print("The '--solvers' help information is not printed in an asciidoc format.") - flag=True + print( + "The '--solvers' help information is not printed in an asciidoc format." + ) + flag = True help_solvers() if options.writers: - flag=True + flag = True if options.asciidoc: - print("The '--writers' help information is not printed in an asciidoc format.") + print( + "The '--writers' help information is not printed in an asciidoc format." + ) help_writers() - if options.checkers: - flag=True - if options.asciidoc: - print("The '--checkers' help information is not printed in an asciidoc format.") - help_checkers() if not flag: help_parser.print_help() + # # Add a subparser for the pyomo command # def setup_help_parser(parser): - parser.add_argument("--asciidoc", dest="asciidoc", action='store_true', default=False, - help="Generate output that is compatible with asciidoc's markup language") - parser.add_argument("--checkers", dest="checkers", action='store_true', default=False, - help="List the available model checkers") - parser.add_argument("-c", "--commands", dest="commands", action='store_true', default=False, - help="List the commands that are installed with Pyomo") - parser.add_argument("--components", dest="components", action='store_true', default=False, - help="List the components that are available in Pyomo's modeling environment") - parser.add_argument("-d", "--data-managers", dest="datamanager", action='store_true', default=False, - help="Print a summary of the data managers in Pyomo") - parser.add_argument("-i", "--info", dest="environment", action='store_true', default=False, - help="Summarize the environment and Python installation") - parser.add_argument("-s", "--solvers", dest="solvers", action='store_true', default=False, - help="Summarize the available solvers and solver interfaces") - parser.add_argument("-t", "--transformations", dest="transformations", action='store_true', default=False, - help="List the available model transformations") - parser.add_argument("-w", "--writers", dest="writers", action='store_true', default=False, - help="List the available problem writers") + parser.add_argument( + "--asciidoc", + dest="asciidoc", + action='store_true', + default=False, + help="Generate output that is compatible with asciidoc's markup language", + ) + parser.add_argument( + "-c", + "--commands", + dest="commands", + action='store_true', + default=False, + help="List the commands that are installed with Pyomo", + ) + parser.add_argument( + "--components", + dest="components", + action='store_true', + default=False, + help="List the components that are available in Pyomo's modeling environment", + ) + parser.add_argument( + "-d", + "--data-managers", + dest="datamanager", + action='store_true', + default=False, + help="Print a summary of the data managers in Pyomo", + ) + parser.add_argument( + "-i", + "--info", + dest="environment", + action='store_true', + default=False, + help="Summarize the environment and Python installation", + ) + parser.add_argument( + "-s", + "--solvers", + dest="solvers", + action='store_true', + default=False, + help="Summarize the available solvers and solver interfaces", + ) + parser.add_argument( + "-t", + "--transformations", + dest="transformations", + action='store_true', + default=False, + help="List the available model transformations", + ) + parser.add_argument( + "-w", + "--writers", + dest="writers", + action='store_true', + default=False, + help="List the available problem writers", + ) return parser + help_parser = setup_help_parser( - pyomo.scripting.pyomo_parser.add_subparser('help', + pyomo.scripting.pyomo_parser.add_subparser( + 'help', func=help_exec, help='Print help information.', - description="This pyomo subcommand is used to print information about Pyomo's subcommands and installed Pyomo services." - )) + description="This pyomo subcommand is used to print information about Pyomo's subcommands and installed Pyomo services.", + ) +) diff --git a/pyomo/scripting/interface.py b/pyomo/scripting/interface.py index 5c01c3c95e0..efb97470e43 100644 --- a/pyomo/scripting/interface.py +++ b/pyomo/scripting/interface.py @@ -9,14 +9,20 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.common.plugin import ( - Interface, DeprecatedInterface, Plugin, SingletonPlugin, - ExtensionPoint, implements, alias +from pyomo.common.plugin_base import ( + Interface, + DeprecatedInterface, + Plugin, + SingletonPlugin, + ExtensionPoint, + implements, + alias, ) registered_callback = {} -def pyomo_callback( name ): + +def pyomo_callback(name): """This is a decorator that declares a function to be a callback function. The callback functions are added to the solver when run from the pyomo script. @@ -27,64 +33,65 @@ def pyomo_callback( name ): def my_cut_generator(solver, model): ... """ + def fn(f): registered_callback[name] = f return f + return fn class IPyomoScriptPreprocess(Interface): - def apply(self, **kwds): """Apply preprocessing step in the Pyomo script""" -class IPyomoScriptCreateModel(Interface): +class IPyomoScriptCreateModel(Interface): def apply(self, **kwds): """Apply model creation step in the Pyomo script""" -class IPyomoScriptModifyInstance(Interface): +class IPyomoScriptModifyInstance(Interface): def apply(self, **kwds): """Modify and return the model instance""" -class IPyomoScriptCreateDataPortal(Interface): +class IPyomoScriptCreateDataPortal(Interface): def apply(self, **kwds): """Apply model data creation step in the Pyomo script""" -class IPyomoScriptPrintModel(Interface): +class IPyomoScriptPrintModel(Interface): def apply(self, **kwds): """Apply model printing step in the Pyomo script""" -class IPyomoScriptPrintInstance(Interface): +class IPyomoScriptPrintInstance(Interface): def apply(self, **kwds): """Apply instance printing step in the Pyomo script""" -class IPyomoScriptSaveInstance(Interface): +class IPyomoScriptSaveInstance(Interface): def apply(self, **kwds): """Apply instance saving step in the Pyomo script""" -class IPyomoScriptPrintResults(Interface): +class IPyomoScriptPrintResults(Interface): def apply(self, **kwds): """Apply results printing step in the Pyomo script""" -class IPyomoScriptSaveResults(Interface): +class IPyomoScriptSaveResults(Interface): def apply(self, **kwds): """Apply results saving step in the Pyomo script""" -class IPyomoScriptPostprocess(Interface): +class IPyomoScriptPostprocess(Interface): def apply(self, **kwds): """Apply postprocessing step in the Pyomo script""" -class IPyomoPresolver(Interface): +class IPyomoPresolver(Interface): def get_actions(self): """Return a list of presolve actions, in the order in which they will be applied.""" @@ -104,7 +111,6 @@ def presolve(self, instance): class IPyomoPresolveAction(Interface): - def presolve(self, instance): """Apply the presolve action to this instance, and return the revised instance""" diff --git a/pyomo/scripting/plugins/__init__.py b/pyomo/scripting/plugins/__init__.py index 6684e3b1d32..44e3956f314 100644 --- a/pyomo/scripting/plugins/__init__.py +++ b/pyomo/scripting/plugins/__init__.py @@ -9,8 +9,8 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): - import pyomo.scripting.plugins.check import pyomo.scripting.plugins.convert import pyomo.scripting.plugins.solve import pyomo.scripting.plugins.download diff --git a/pyomo/scripting/plugins/build_ext.py b/pyomo/scripting/plugins/build_ext.py index cf88ccb7bad..9ae63cbb8a1 100644 --- a/pyomo/scripting/plugins/build_ext.py +++ b/pyomo/scripting/plugins/build_ext.py @@ -15,6 +15,7 @@ from pyomo.common.extensions import ExtensionBuilderFactory from pyomo.scripting.pyomo_parser import add_subparser + class ExtensionBuilder(object): def create_parser(self, parser): return parser @@ -45,25 +46,30 @@ def _call_impl(self, args, unparsed, logger): result = ' OK ' except SystemExit: _info = sys.exc_info() - _cls = str(_info[0].__name__ if _info[0] is not None - else "NoneType") + ": " + _cls = ( + str(_info[0].__name__ if _info[0] is not None else "NoneType") + + ": " + ) logger.error(_cls + str(_info[1])) result = 'FAIL' returncode |= 2 except: _info = sys.exc_info() - _cls = str(_info[0].__name__ if _info[0] is not None - else "NoneType") + ": " + _cls = ( + str(_info[0].__name__ if _info[0] is not None else "NoneType") + + ": " + ) logger.error(_cls + str(_info[1])) result = 'FAIL' returncode |= 1 results.append(result_fmt % (result, target)) logger.info("Finished building Pyomo extensions.") logger.info( - "The following extensions were built:\n " + - "\n ".join(results)) + "The following extensions were built:\n " + "\n ".join(results) + ) return returncode + # # Add a subparser for the download-extensions command # @@ -74,14 +80,16 @@ def _call_impl(self, args, unparsed, logger): func=_extension_builder.call, help='Build compiled extension modules', add_help=False, - description='This builds all registered (compileable) extension modules' - )) + description='This builds all registered (compilable) extension modules', + ) +) _parser.add_argument( - '-j', '--parallel', + '-j', + '--parallel', action='store', type=int, dest='parallel', default=None, help="Build with this many processes/cores", - ) +) diff --git a/pyomo/scripting/plugins/check.py b/pyomo/scripting/plugins/check.py deleted file mode 100644 index 97ccd1f1ea5..00000000000 --- a/pyomo/scripting/plugins/check.py +++ /dev/null @@ -1,105 +0,0 @@ -# ___________________________________________________________________________ -# -# Pyomo: Python Optimization Modeling Objects -# Copyright (c) 2008-2022 -# National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain -# rights in this software. -# This software is distributed under the 3-clause BSD License. -# ___________________________________________________________________________ - -import argparse -import pyomo.scripting.pyomo_parser -import os.path - -class EnableDisableAction(argparse.Action): - - def add_package(self, namespace, package): - if namespace.checkers.get(package, None) is None: - namespace.checkers[package] = [] - for c in pyomo.checker.runner.ModelCheckRunner._checkers(all=True): - if c._checkerPackage() == package: - namespace.checkers[package].append(c._checkerName()) - - def remove_package(self, namespace, package): - if package in namespace.checkers: - del namespace.checkers[package] - - def add_checker(self, namespace, checker): - for c in pyomo.checker.runner.ModelCheckRunner._checkers(all=True): - if c._checkerName() == checker: - if namespace.checkers.get(c._checkerPackage(), None) is None: - namespace.checkers[c._checkerPackage()] = [] - if c._checkerName() not in namespace.checkers[c._checkerPackage()]: - namespace.checkers[c._checkerPackage()].append(c._checkerName()) - - def remove_checker(self, namespace, checker): - for c in pyomo.core.check.ModelCheckRunner._checkers(all=True): - if c._checkerName() == checker: - if namespace.checkers.get(c._checkerPackage(), None) is not None: - for i in range(namespace.checkers[c._checkerPackage()].count(c._checkerName())): - namespace.checkers[c._checkerPackage()].remove(c._checkerName()) - - def add_default_checkers(self, namespace): - self.add_package(namespace, 'model') - self.add_package(namespace, 'py3k') - - def __call__(self, parser, namespace, values, option_string=None): - if 'checkers' not in dir(namespace): - setattr(namespace, 'checkers', {}) - self.add_default_checkers(namespace) - - if option_string == '-c': - self.add_checker(namespace, values) - elif option_string == '-C': - self.add_package(namespace, values) - elif option_string == '-x': - self.remove_checker(namespace, values) - elif option_string == '-X': - self.remove_package(namespace, values) - -def setup_parser(parser): - parser.add_argument("script", metavar="SCRIPT", default=None, - help="A Pyomo script that is checked") - parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", - default=False, help="Enable additional output messages") - parser.add_argument("-c", "--enable-checker", metavar="CHECKER", action=EnableDisableAction, - help="Activate a specific checker") - parser.add_argument("-C", "--enable-package", metavar="PACKAGE", action=EnableDisableAction, - help="Activate an entire checker package") - parser.add_argument("-x", "--disable-checker", metavar="CHECKER", action=EnableDisableAction, - help="Disable a specific checker") - parser.add_argument("-X", "--disable-package", metavar="PACKAGE", action=EnableDisableAction, - help="Disable an entire checker package") - - -def main_exec(options): - import pyomo.checker.runner as check - - if options.script is None: - raise IOError("Must specify a model script!") - if not os.path.exists(options.script): - raise IOError("Model script '%s' does not exist!" % options.script) - - # force default checkers - if getattr(options, 'checkers', None) is None: - EnableDisableAction(None, None)(None, options, None, None) - - runner = check.ModelCheckRunner() - runner.run(**vars(options)) - -# -# Add a subparser for the check command -# -setup_parser( - pyomo.scripting.pyomo_parser.add_subparser('check', - func=main_exec, - help='Check a model for errors.', - description='This pyomo subcommand is used to check a model script for errors.', - epilog=""" -The default behavior of this command is to assume that the model -script is a simple Pyomo model. Eventually, this script will support -options that allow other Pyomo models to be checked. -""" - )) diff --git a/pyomo/scripting/plugins/convert.py b/pyomo/scripting/plugins/convert.py index 078ac6aba83..55290ed90ce 100644 --- a/pyomo/scripting/plugins/convert.py +++ b/pyomo/scripting/plugins/convert.py @@ -18,33 +18,34 @@ from pyomo.scripting.pyomo_parser import add_subparser, CustomHelpFormatter from pyomo.scripting.solve_config import Default_Config + def create_parser(parser=None): # # Setup command-line options. # if parser is None: parser = argparse.ArgumentParser( - usage = '%(prog)s [options] []' - ) - parser.add_argument('--output', + usage='%(prog)s [options] []' + ) + parser.add_argument( + '--output', action='store', dest='filename', help="Output file name. This option is required unless the file name is specified in a configuration file.", - default=None) - parser.add_argument('--format', - action='store', - dest='format', - help="Output format", - default=None) - parser.add_argument('--generate-config-template', - action='store', - dest='template', - default=None) + default=None, + ) + parser.add_argument( + '--format', action='store', dest='format', help="Output format", default=None + ) + parser.add_argument( + '--generate-config-template', action='store', dest='template', default=None + ) return parser def run_convert(options=Bunch(), parser=None): from pyomo.scripting.convert import convert, convert_dakota + if options.model.save_format is None and options.model.save_file: options.model.save_format = options.model.save_file.split('.')[-1] # @@ -56,8 +57,10 @@ def run_convert(options=Bunch(), parser=None): if options.model.save_format is None: raise RuntimeError("Unspecified target conversion format!") else: - raise RuntimeError("Unrecognized target conversion format (%s)!" - % (options.model.save_format,) ) + raise RuntimeError( + "Unrecognized target conversion format (%s)!" + % (options.model.save_format,) + ) else: return convert(options, parser, _format) @@ -65,6 +68,7 @@ def run_convert(options=Bunch(), parser=None): def convert_exec(args, unparsed): # import pyomo.scripting.util + # # Generate a template file # @@ -79,11 +83,11 @@ def convert_exec(args, unparsed): print(" Created template file '%s'" % args.template) sys.exit(0) # - save_filename = getattr(args,'filename',None) + save_filename = getattr(args, 'filename', None) if save_filename is None: - save_format = getattr(args,'format',None) + save_format = getattr(args, 'format', None) if not save_format is None: - save_filename = 'unknown.'+save_format + save_filename = 'unknown.' + save_format if save_filename is None: # # Get configuration values if no model file has been specified @@ -104,7 +108,7 @@ def convert_exec(args, unparsed): pass if save_filename is None: try: - save_filename = 'unknown.'+str(val['model']['save format']) + save_filename = 'unknown.' + str(val['model']['save format']) except: pass # @@ -117,7 +121,7 @@ def convert_exec(args, unparsed): config, blocks = Default_Config().config_block() parser = create_temporary_parser(output=True, generate=True) config.initialize_argparse(parser) - parser.parse_args(args=unparsed+['-h']) + parser.parse_args(args=unparsed + ['-h']) sys.exit(1) # # Parse previously unparsed options @@ -138,24 +142,29 @@ def convert_exec(args, unparsed): config.data.files = _options.data_files else: val = pyomo.scripting.util.get_config_values(_options.model_or_config_file) - config.set_value( val ) + config.set_value(val) # # Note that we pass-in pre-parsed options. The run_command() # function knows to not perform a parse, but instead to simply # used these parsed values. # - return pyomo.scripting.util.run_command(command=run_convert, parser=convert_parser, options=config, name='convert') + return pyomo.scripting.util.run_command( + command=run_convert, parser=convert_parser, options=config, name='convert' + ) + # # Add a subparser for the pyomo command # -convert_parser = create_parser(add_subparser('convert', +convert_parser = create_parser( + add_subparser( + 'convert', func=convert_exec, help='Convert a Pyomo model to another format', add_help=False, - description='This pyomo subcommand is used to create a new model file in a specified format from a Pyomo model.' - )) - + description='This pyomo subcommand is used to create a new model file in a specified format from a Pyomo model.', + ) +) def create_temporary_parser(output=False, generate=False): @@ -166,28 +175,34 @@ def create_temporary_parser(output=False, generate=False): parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter) _subparsers = parser.add_subparsers() _parser = _subparsers.add_parser('convert') - _parser.formatter_class=CustomHelpFormatter + _parser.formatter_class = CustomHelpFormatter if generate: # - # Adding documentation about the two options that are + # Adding documentation about the two options that are # defined in the initial parser. # - _parser.add_argument('--generate-config-template', + _parser.add_argument( + '--generate-config-template', action='store', dest='template', default=None, - help='Create a configuration template file in YAML or JSON and exit.') + help='Create a configuration template file in YAML or JSON and exit.', + ) if output: - parser.add_argument('--output', + parser.add_argument( + '--output', action='store', dest='filename', help="Output file name. This option is required unless the file name is specified in a configuration file.", - default=None) - parser.add_argument('--format', + default=None, + ) + parser.add_argument( + '--format', action='store', dest='format', help="Output format", - default=None) + default=None, + ) _parser.usage = '%(prog)s [options] []' _parser.epilog = """ Description: @@ -229,25 +244,29 @@ def create_temporary_parser(output=False, generate=False): """ # - _parser.add_argument('--output', + _parser.add_argument( + '--output', action='store', dest='filename', help="Output file name. This option is required unless the file name is specified in a configuration file.", - default=None) - _parser.add_argument('--format', + default=None, + ) + _parser.add_argument( + '--format', action='store', dest='format', help="Output format", default=None + ) + _parser.add_argument( + 'model_or_config_file', action='store', - dest='format', - help="Output format", - default=None) - _parser.add_argument('model_or_config_file', - action='store', - nargs='?', + nargs='?', default='', - help="A Python module that defines a Pyomo model, or a configuration file that defines options for 'pyomo convert' (in either YAML or JSON format)") - _parser.add_argument('data_files', - action='store', - nargs='*', + help="A Python module that defines a Pyomo model, or a configuration file that defines options for 'pyomo convert' (in either YAML or JSON format)", + ) + _parser.add_argument( + 'data_files', + action='store', + nargs='*', default=[], - help='Pyomo data files that defined data used to initialize the model (specified in the first argument)') + help='Pyomo data files that defined data used to initialize the model (specified in the first argument)', + ) # return _parser diff --git a/pyomo/scripting/plugins/download.py b/pyomo/scripting/plugins/download.py index 16f414db982..73a164ee708 100644 --- a/pyomo/scripting/plugins/download.py +++ b/pyomo/scripting/plugins/download.py @@ -14,6 +14,7 @@ from pyomo.common.download import FileDownloader, DownloadFactory from pyomo.scripting.pyomo_parser import add_subparser + class GroupDownloader(object): def __init__(self): self.downloader = FileDownloader() @@ -36,6 +37,11 @@ def _call_impl(self, args, unparsed, logger): returncode = 0 self.downloader.cacert = args.cacert self.downloader.insecure = args.insecure + logger.info( + "As of February 9, 2023, AMPL GSL can no longer be downloaded\ + through download-extensions. Visit https://portal.ampl.com/\ + to download the AMPL GSL binaries." + ) for target in DownloadFactory: try: ext = DownloadFactory(target, downloader=self.downloader) @@ -49,23 +55,27 @@ def _call_impl(self, args, unparsed, logger): result = ' OK ' except SystemExit: _info = sys.exc_info() - _cls = str(_info[0].__name__ if _info[0] is not None - else "NoneType") + ": " + _cls = ( + str(_info[0].__name__ if _info[0] is not None else "NoneType") + + ": " + ) logger.error(_cls + str(_info[1])) result = 'FAIL' returncode |= 2 except: _info = sys.exc_info() - _cls = str(_info[0].__name__ if _info[0] is not None - else "NoneType") + ": " + _cls = ( + str(_info[0].__name__ if _info[0] is not None else "NoneType") + + ": " + ) logger.error(_cls + str(_info[1])) result = 'FAIL' returncode |= 1 results.append(result_fmt % (result, target)) logger.info("Finished downloading Pyomo extensions.") logger.info( - "The following extensions were downloaded:\n " + - "\n ".join(results)) + "The following extensions were downloaded:\n " + "\n ".join(results) + ) return returncode @@ -79,6 +89,6 @@ def _call_impl(self, args, unparsed, logger): func=_group_downloader.call, help='Download compiled extension modules', add_help=False, - description='This downloads all registered (compiled) extension modules' - )) - + description='This downloads all registered (compiled) extension modules', + ) +) diff --git a/pyomo/scripting/plugins/extras.py b/pyomo/scripting/plugins/extras.py index e8743196e42..4cf9e623212 100644 --- a/pyomo/scripting/plugins/extras.py +++ b/pyomo/scripting/plugins/extras.py @@ -13,35 +13,39 @@ from pyomo.common.deprecation import deprecated + def get_packages(): packages = [ - 'sympy', - 'xlrd', - 'openpyxl', - #('suds-jurko', 'suds'), + 'sympy', + 'xlrd', + 'openpyxl', + # ('suds-jurko', 'suds'), ('PyYAML', 'yaml'), - 'pypyodbc', - 'pymysql', - #'openopt', - #'FuncDesigner', - #'DerApproximator', + 'pypyodbc', + 'pymysql', + #'openopt', + #'FuncDesigner', + #'DerApproximator', ('ipython[notebook]', 'IPython'), ] return packages + @deprecated( - "Use of the pyomo install-extras is deprecated." - "The current recommended course of action is to manually install " - "optional dependencies as needed.", - version='5.7.1') + "Use of the pyomo install-extras is deprecated." + "The current recommended course of action is to manually install " + "optional dependencies as needed.", + version='5.7.1', +) def install_extras(args=[], quiet=False): # # Verify that pip is installed # try: import pip + pip_version = pip.__version__.split('.') - for i,s in enumerate(pip_version): + for i, s in enumerate(pip_version): try: pip_version[i] = int(s) except: @@ -51,7 +55,7 @@ def install_extras(args=[], quiet=False): print("You must have 'pip' installed to run this script.") raise SystemExit - cmd = ['--disable-pip-version-check', 'install','--upgrade'] + cmd = ['--disable-pip-version-check', 'install', '--upgrade'] # Disable the PIP download cache if pip_version[0] >= 6: cmd.append('--no-cache-dir') @@ -61,10 +65,10 @@ def install_extras(args=[], quiet=False): if not quiet: print(' ') - print('-'*60) + print('-' * 60) print("Installation Output Logs") print(" (A summary will be printed below)") - print('-'*60) + print('-' * 60) print(' ') results = {} @@ -89,9 +93,9 @@ def install_extras(args=[], quiet=False): if not quiet: print(' ') print(' ') - print('-'*60) + print('-' * 60) print("Installation Summary") - print('-'*60) + print('-' * 60) print(' ') for package, result in sorted(results.items()): if result: @@ -124,7 +128,8 @@ def pyomo_subcommand(options): ) _parser.add_argument( - '-q', '--quiet', + '-q', + '--quiet', action='store_true', dest='quiet', default=False, @@ -134,7 +139,5 @@ def pyomo_subcommand(options): "--pip-args", dest="args", action="append", - help=("Arguments that are passed to the 'pip' command when " - "installing packages"), + help=("Arguments that are passed to the 'pip' command when installing packages"), ) - diff --git a/pyomo/scripting/plugins/solve.py b/pyomo/scripting/plugins/solve.py index 2497423dcad..69451a04e3c 100644 --- a/pyomo/scripting/plugins/solve.py +++ b/pyomo/scripting/plugins/solve.py @@ -24,20 +24,15 @@ def create_parser(parser=None): # if parser is None: parser = argparse.ArgumentParser( - usage = '%(prog)s [options] []' - ) - parser.add_argument('--solver', - action='store', - dest='solver', - default=None) - parser.add_argument('--solver-manager', - action='store', - dest='solver_manager', - default='serial') - parser.add_argument('--generate-config-template', - action='store', - dest='template', - default=None) + usage='%(prog)s [options] []' + ) + parser.add_argument('--solver', action='store', dest='solver', default=None) + parser.add_argument( + '--solver-manager', action='store', dest='solver_manager', default='serial' + ) + parser.add_argument( + '--generate-config-template', action='store', dest='template', default=None + ) return parser @@ -49,23 +44,28 @@ def create_temporary_parser(solver=False, generate=False): parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter) _subparsers = parser.add_subparsers() _parser = _subparsers.add_parser('solve') - _parser.formatter_class=CustomHelpFormatter + _parser.formatter_class = CustomHelpFormatter if generate: # # Adding documentation about the two options that are # defined in the initial parser. # - _parser.add_argument('--generate-config-template', + _parser.add_argument( + '--generate-config-template', action='store', dest='template', default=None, - help='Create a configuration template file in YAML or JSON and exit.') + help='Create a configuration template file in YAML or JSON and exit.', + ) if solver: - _parser.add_argument('--solver', + _parser.add_argument( + '--solver', action='store', dest='solver', default=None, - help='Solver name. This option is required unless the solver name is specified in a configuration file.') + help='Solver name. This option is required unless the solver name is' + 'specified in a configuration file.', + ) _parser.usage = '%(prog)s [options] []' _parser.epilog = """ Description: @@ -116,26 +116,32 @@ def create_temporary_parser(solver=False, generate=False): """ # - _parser.add_argument('model_or_config_file', + _parser.add_argument( + 'model_or_config_file', action='store', nargs='?', default='', - help="A Python module that defines a Pyomo model, or a configuration file that defines options for 'pyomo solve' (in either YAML or JSON format)") - _parser.add_argument('data_files', + help="A Python module that defines a Pyomo model, or a configuration file " + "that defines options for 'pyomo solve' (in either YAML or JSON format)", + ) + _parser.add_argument( + 'data_files', action='store', nargs='*', default=[], - help='Pyomo data files that defined data used to initialize the model (specified in the first argument)') + help='Pyomo data files that defined data used to initialize the model ' + '(specified in the first argument)', + ) # return _parser def solve_exec(args, unparsed): - import pyomo.scripting.util + # - solver_manager = getattr(args,'solver_manager',None) - solver = getattr(args,'solver',None) + solver_manager = getattr(args, 'solver_manager', None) + solver = getattr(args, 'solver', None) # if solver is None: # @@ -163,7 +169,7 @@ def solve_exec(args, unparsed): print("ERROR: No solver specified!") print("") parser = create_temporary_parser(solver=True, generate=True) - parser.parse_args(args=unparsed+['-h']) + parser.parse_args(args=unparsed + ['-h']) sys.exit(1) config = None @@ -215,23 +221,26 @@ def solve_exec(args, unparsed): config.solvers[0].manager = solver_manager from pyomo.scripting.pyomo_command import run_pyomo + # # Note that we pass-in pre-parsed options. The run_command() # function knows to not perform a parse, but instead to simply # used these parsed values. # - return pyomo.scripting.util.run_command(command=run_pyomo, - parser=_parser, - options=config, - name='pyomo solve') + return pyomo.scripting.util.run_command( + command=run_pyomo, parser=_parser, options=config, name='pyomo solve' + ) + # # Add a subparser for the solve command # -solve_parser = create_parser(add_subparser('solve', - func=solve_exec, - help='Optimize a model', - add_help=False, - description='This pyomo subcommand is used to analyze optimization models.' - )) - +solve_parser = create_parser( + add_subparser( + 'solve', + func=solve_exec, + help='Optimize a model', + add_help=False, + description='This pyomo subcommand is used to analyze optimization models.', + ) +) diff --git a/pyomo/scripting/pyomo_command.py b/pyomo/scripting/pyomo_command.py index e75956d276d..b652e95372a 100644 --- a/pyomo/scripting/pyomo_command.py +++ b/pyomo/scripting/pyomo_command.py @@ -25,18 +25,16 @@ def run_pyomo(options=Bunch(), parser=None): try: pyomo.scripting.util.setup_environment(data) - pyomo.scripting.util.apply_preprocessing(data, - parser=parser) + pyomo.scripting.util.apply_preprocessing(data, parser=parser) except: # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. - pyomo.scripting.util.finalize(data, - model=ConcreteModel(), - instance=None, - results=None) + pyomo.scripting.util.finalize( + data, model=ConcreteModel(), instance=None, results=None + ) raise else: if data.error: @@ -45,11 +43,10 @@ def run_pyomo(options=Bunch(), parser=None): # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. - pyomo.scripting.util.finalize(data, - model=ConcreteModel(), - instance=None, - results=None) - return Bunch() #pragma:nocover + pyomo.scripting.util.finalize( + data, model=ConcreteModel(), instance=None, results=None + ) + return Bunch() # pragma:nocover try: model_data = pyomo.scripting.util.create_model(data) @@ -59,53 +56,55 @@ def run_pyomo(options=Bunch(), parser=None): # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. - pyomo.scripting.util.finalize(data, - model=ConcreteModel(), - instance=None, - results=None) + pyomo.scripting.util.finalize( + data, model=ConcreteModel(), instance=None, results=None + ) raise else: - if (((not options.runtime.logging == 'debug') and \ - options.model.save_file) or \ - options.runtime.only_instance): - pyomo.scripting.util.finalize(data, - model=model_data.model, - instance=model_data.instance, - results=None) + if ( + (not options.runtime.logging == 'debug') and options.model.save_file + ) or options.runtime.only_instance: + pyomo.scripting.util.finalize( + data, model=model_data.model, instance=model_data.instance, results=None + ) return Bunch(instance=model_data.instance) try: - opt_data = pyomo.scripting.util.apply_optimizer(data, - instance=model_data.instance) + opt_data = pyomo.scripting.util.apply_optimizer( + data, instance=model_data.instance + ) - pyomo.scripting.util.process_results(data, - instance=model_data.instance, - results=opt_data.results, - opt=opt_data.opt) + pyomo.scripting.util.process_results( + data, + instance=model_data.instance, + results=opt_data.results, + opt=opt_data.opt, + ) - pyomo.scripting.util.apply_postprocessing(data, - instance=model_data.instance, - results=opt_data.results) + pyomo.scripting.util.apply_postprocessing( + data, instance=model_data.instance, results=opt_data.results + ) except: # TBD: I should be able to call this function in the case of # an exception to perform cleanup. However, as it stands # calling finalize with its default keyword value for # model(=None) results in an a different error related to # task port values. Not sure how to interpret that. - pyomo.scripting.util.finalize(data, - model=ConcreteModel(), - instance=None, - results=None) + pyomo.scripting.util.finalize( + data, model=ConcreteModel(), instance=None, results=None + ) raise else: - pyomo.scripting.util.finalize(data, - model=model_data.model, - instance=model_data.instance, - results=opt_data.results) - - return Bunch(options=options, - instance=model_data.instance, - results=opt_data.results, - local=opt_data.local) - + pyomo.scripting.util.finalize( + data, + model=model_data.model, + instance=model_data.instance, + results=opt_data.results, + ) + return Bunch( + options=options, + instance=model_data.instance, + results=opt_data.results, + local=opt_data.local, + ) diff --git a/pyomo/scripting/pyomo_main.py b/pyomo/scripting/pyomo_main.py index 1ef21cfc3f9..9acafea0471 100644 --- a/pyomo/scripting/pyomo_main.py +++ b/pyomo/scripting/pyomo_main.py @@ -11,9 +11,11 @@ import sys import copy +from pyomo.common.deprecation import deprecation_warning try: import pkg_resources + pyomo_commands = pkg_resources.iter_entry_points('pyomo.command') except: pyomo_commands = [] @@ -27,10 +29,12 @@ except Exception: exctype, err, tb = sys.exc_info() # BUG? import traceback - msg = "Error loading pyomo.command entry point %s:\nOriginal %s: %s\n"\ - "Traceback:\n%s" \ - % (entrypoint, exctype.__name__, err, - ''.join(traceback.format_tb(tb)),) + + msg = ( + "Error loading pyomo.command entry point %s:\nOriginal %s: %s\n" + "Traceback:\n%s" + % (entrypoint, exctype.__name__, err, ''.join(traceback.format_tb(tb))) + ) # clear local variables to remove circular references exctype = err = tb = None # TODO: Should this just log an error and re-raise the original @@ -44,6 +48,7 @@ def main(args=None): # from pyomo.scripting import pyomo_parser import pyomo.environ + # # Parse the arguments # @@ -55,13 +60,20 @@ def main(args=None): # if not args: args.append('-h') - # FIXME: This should use the logger and not print() if args[0][0] == '-': if args[0] not in ['-h', '--help', '--version']: - print("WARNING: converting to the 'pyomo solve' subcommand") + deprecation_warning( + "Running the 'pyomo' script with no subcommand is deprecated. " + "Defaulting to 'pyomo solve'", + version='6.5.0', + ) args = ['solve'] + args[0:] elif args[0] not in pyomo_parser.subparsers: - print("WARNING: converting to the 'pyomo solve' subcommand") + deprecation_warning( + "Running the 'pyomo' script with no subcommand is deprecated. " + "Defaulting to 'pyomo solve'", + version='6.5.0', + ) args = ['solve'] + args[0:] # # Process arguments @@ -96,5 +108,6 @@ def main_console_script(): except AttributeError: return ans + if __name__ == '__main__': sys.exit(main_console_script()) diff --git a/pyomo/scripting/pyomo_parser.py b/pyomo/scripting/pyomo_parser.py index 725a5f87a71..345d400a1aa 100644 --- a/pyomo/scripting/pyomo_parser.py +++ b/pyomo/scripting/pyomo_parser.py @@ -14,6 +14,7 @@ import argparse import sys + # # Sort sub_parser names, since these are inserted throughout Pyomo # @@ -21,7 +22,6 @@ # mucking with a non-public API here ... # class CustomHelpFormatter(argparse.RawDescriptionHelpFormatter): - def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar @@ -35,7 +35,8 @@ def format(tuple_size): if isinstance(result, tuple): return result else: - return (result, ) * tuple_size + return (result,) * tuple_size + return format def _iter_indented_subactions(self, action): @@ -57,18 +58,21 @@ def _iter_indented_subactions(self, action): def get_version(): from pyomo.version import version import platform + return "Pyomo %s (%s %s on %s %s)" % ( - version, - platform.python_implementation(), - '.'.join( str(x) for x in sys.version_info[:3] ), - platform.system(), - platform.release() ) + version, + platform.python_implementation(), + '.'.join(str(x) for x in sys.version_info[:3]), + platform.system(), + platform.release(), + ) + # # Create the argparse parser for Pyomo # -doc="This is the main driver for the Pyomo optimization software." -epilog=""" +doc = "This is the main driver for the Pyomo optimization software." +epilog = """ ------------------------------------------------------------------------- Pyomo supports a variety of modeling and optimization capabilities, which are executed either as subcommands of 'pyomo' or as separate @@ -86,6 +90,7 @@ def get_version(): subparsers = [] + def add_subparser(name, **args): """ Add a subparser to the 'pyomo' command. @@ -100,20 +105,19 @@ def add_subparser(name, **args): parser.set_defaults(func=func) return parser + def get_parser(): """ - Return the parser used by the 'pyomo' commmand. + Return the parser used by the 'pyomo' command. """ global _pyomo_parser if _pyomo_parser is None: _pyomo_parser = argparse.ArgumentParser( - description=doc, - epilog=epilog, - formatter_class=CustomHelpFormatter + description=doc, epilog=epilog, formatter_class=CustomHelpFormatter ) - _pyomo_parser.add_argument( - "--version", action="version", version=get_version()) + _pyomo_parser.add_argument("--version", action="version", version=get_version()) global _pyomo_subparsers _pyomo_subparsers = _pyomo_parser.add_subparsers( - dest='subparser_name', title='subcommands' ) + dest='subparser_name', title='subcommands' + ) return _pyomo_parser diff --git a/pyomo/scripting/solve_config.py b/pyomo/scripting/solve_config.py index 174896c6897..3048431d443 100644 --- a/pyomo/scripting/solve_config.py +++ b/pyomo/scripting/solve_config.py @@ -13,35 +13,38 @@ class Default_Config(object): - def config_block(self, init=False): config, blocks = minlp_config_block(init=init) return config, blocks def minlp_config_block(init=False): - config = ConfigBlock("Configuration for a canonical model " - "construction and optimization sequence") - blocks={} - + config = ConfigBlock( + "Configuration for a canonical model construction and optimization sequence" + ) + blocks = {} + # # Data # data = config.declare('data', ConfigBlock()) blocks['data'] = data - data.declare('files', ConfigList( - [], - ConfigValue(None, str, 'Filename', None), - 'Model data files', - None, - )) - data.declare('namespaces', ConfigList( - [], - ConfigValue(None, str, 'Namespace', None), - 'A namespace that is used to select data in Pyomo data files.', - None, - )).declare_as_argument('--namespace', dest='namespaces', action='append') + data.declare( + 'files', + ConfigList( + [], ConfigValue(None, str, 'Filename', None), 'Model data files', None + ), + ) + data.declare( + 'namespaces', + ConfigList( + [], + ConfigValue(None, str, 'Namespace', None), + 'A namespace that is used to select data in Pyomo data files.', + None, + ), + ).declare_as_argument('--namespace', dest='namespaces', action='append') # # Model @@ -49,74 +52,83 @@ def minlp_config_block(init=False): model = config.declare('model', ConfigBlock()) blocks['model'] = model - model.declare('filename', ConfigValue( - None, - str, - 'The Python module that specifies the model', - None, - )) - model.declare('object name', ConfigValue( - None, - str, - 'The name of the model object that is created in the ' - 'specified Pyomo module', - None, - )).declare_as_argument('--model-name', dest='model_name') - model.declare('type', ConfigValue( - None, - str, - 'The problem type', - None, - )) - model.declare('options', ConfigBlock( - implicit=True, - description='Options used to construct the model', - )) - model.declare('linearize expressions', ConfigValue( - False, - bool, - 'An option intended for use on linear or mixed-integer models ' - 'in which expression trees in a model (constraints or objectives) ' - 'are compacted into a more memory-efficient and concise form.', - None, - )) - model.declare('save file', ConfigValue( - None, - str, - 'The filename to which the model is saved. The suffix of this ' - 'filename specifies the file format.', - None, - )) - model.declare('save format', ConfigValue( - None, - str, - "The format that the model is saved. When specified, this " - "overrides the format implied by the 'save file' option.", - None, - )) - model.declare('symbolic solver labels', ConfigValue( - False, - bool, - 'When interfacing with the solver, use symbol names derived ' - 'from the model. For example, "my_special_variable[1_2_3]" ' - 'instead of "v1". Useful for debugging. When using the ASL ' - 'interface (--solver-io=nl), generates corresponding .row ' - '(constraints) and .col (variables) files. The ordering in ' - 'these files provides a mapping from ASL index to symbolic ' - 'model names.', - None, - )).declare_as_argument(dest='symbolic_solver_labels') - model.declare('file determinism', ConfigValue( - 1, - int, - 'When interfacing with a solver using file based I/O, set ' - 'the effort level for ensuring the file creation process is ' - 'determistic. The default (1) sorts the index of components ' - 'when transforming the model. Anything less than 1 disables ' - 'index sorting. Anything greater than 1 additionally sorts ' - 'by component name to override declaration order.', - None, - )).declare_as_argument(dest='file_determinism') + model.declare( + 'filename', + ConfigValue(None, str, 'The Python module that specifies the model', None), + ) + model.declare( + 'object name', + ConfigValue( + None, + str, + 'The name of the model object that is created in the ' + 'specified Pyomo module', + None, + ), + ).declare_as_argument('--model-name', dest='model_name') + model.declare('type', ConfigValue(None, str, 'The problem type', None)) + model.declare( + 'options', + ConfigBlock(implicit=True, description='Options used to construct the model'), + ) + model.declare( + 'linearize expressions', + ConfigValue( + False, + bool, + 'An option intended for use on linear or mixed-integer models ' + 'in which expression trees in a model (constraints or objectives) ' + 'are compacted into a more memory-efficient and concise form.', + None, + ), + ) + model.declare( + 'save file', + ConfigValue( + None, + str, + 'The filename to which the model is saved. The suffix of this ' + 'filename specifies the file format.', + None, + ), + ) + model.declare( + 'save format', + ConfigValue( + None, + str, + "The format that the model is saved. When specified, this " + "overrides the format implied by the 'save file' option.", + None, + ), + ) + model.declare( + 'symbolic solver labels', + ConfigValue( + False, + bool, + 'When interfacing with the solver, use symbol names derived ' + 'from the model. For example, "my_special_variable[1_2_3]" ' + 'instead of "v1". Useful for debugging. When using the ASL ' + 'interface (--solver-io=nl), generates corresponding .row ' + '(constraints) and .col (variables) files. The ordering in ' + 'these files provides a mapping from ASL index to symbolic ' + 'model names.', + None, + ), + ).declare_as_argument(dest='symbolic_solver_labels') + model.declare( + 'file determinism', + ConfigValue( + None, + int, + 'When interfacing with a solver using file based I/O, set ' + 'the effort level for ensuring the file creation process is ' + 'determistic. See the individual solver interfaces for ' + 'valid values and default level of file determinism.', + None, + ), + ).declare_as_argument(dest='file_determinism') # # Transform @@ -124,36 +136,38 @@ def minlp_config_block(init=False): transform = ConfigBlock() blocks['transform'] = transform - transform.declare('name', ConfigValue( - None, - str, - 'Name of the model transformation', - None, - )) - transform.declare('options', ConfigBlock( - implicit=True, - description='Transformation options', - )) + transform.declare( + 'name', ConfigValue(None, str, 'Name of the model transformation', None) + ) + transform.declare( + 'options', ConfigBlock(implicit=True, description='Transformation options') + ) # - transform_list = config.declare('transform', ConfigList( - [], - ConfigValue(None, str, 'Transformation', None), - 'List of model transformations', - None, - )).declare_as_argument(dest='transformations', action='append') + transform_list = config.declare( + 'transform', + ConfigList( + [], + ConfigValue(None, str, 'Transformation', None), + 'List of model transformations', + None, + ), + ).declare_as_argument(dest='transformations', action='append') if init: transform_list.append() # # Preprocess # - config.declare('preprocess', ConfigList( - [], - ConfigValue(None, str, 'Module', None), - 'Specify a Python module that gets immediately executed ' - '(before the optimization model is setup).', - None, - )).declare_as_argument(dest='preprocess') + config.declare( + 'preprocess', + ConfigList( + [], + ConfigValue(None, str, 'Module', None), + 'Specify a Python module that gets immediately executed ' + '(before the optimization model is setup).', + None, + ), + ).declare_as_argument(dest='preprocess') # # Runtime @@ -161,89 +175,101 @@ def minlp_config_block(init=False): runtime = config.declare('runtime', ConfigBlock()) blocks['runtime'] = runtime - runtime.declare('logging', ConfigValue( - None, - str, - 'Logging level: quiet, warning, info, verbose, debug', - None, - )).declare_as_argument(dest="logging", metavar="LEVEL") - runtime.declare('logfile', ConfigValue( - None, - str, - 'Redirect output to the specified file.', - None, - )).declare_as_argument(dest="output", metavar="FILE") - runtime.declare('catch errors', ConfigValue( - False, - bool, - 'Trigger failures for exceptions to print the program stack.', - None, - )).declare_as_argument('-c', '--catch-errors', dest="catch") - runtime.declare('disable gc', ConfigValue( - False, - bool, - 'Disable the garbage collecter.', - None, - )).declare_as_argument('--disable-gc', dest='disable_gc') - runtime.declare('interactive', ConfigValue( - False, - bool, - 'After executing Pyomo, launch an interactive Python shell. ' - 'If IPython is installed, this shell is an IPython shell.', - None, - )) - runtime.declare('keep files', ConfigValue( - False, - bool, - 'Keep temporary files', - None, - )).declare_as_argument('-k', '--keepfiles', dest='keepfiles') - runtime.declare('paths', ConfigList( - [], - ConfigValue(None, str, 'Path', None), - 'Give a path that is used to find the Pyomo python files.', - None, - )).declare_as_argument('--path', dest='path') - runtime.declare('profile count', ConfigValue( - 0, - int, - 'Enable profiling of Python code. The value of this option ' - 'is the number of functions that are summarized.', - None, - )).declare_as_argument(dest='profile_count', metavar='COUNT') - runtime.declare('profile memory', ConfigValue( - 0, - int, - "Report memory usage statistics for the generated instance " - "and any associated processing steps. A value of 0 indicates " - "disabled. A value of 1 forces the print of the total memory " - "after major stages of the pyomo script. A value of 2 forces " - "summary memory statistics after major stages of the pyomo " - "script. A value of 3 forces detailed memory statistics " - "during instance creation and various steps of preprocessing. " - "Values equal to 4 and higher currently provide no additional " - "information. Higher values automatically enable all " - "functionality associated with lower values, e.g., 3 turns " - "on detailed and summary statistics.", - None, - )) - runtime.declare('report timing', ConfigValue( - False, - bool, - 'Report various timing statistics during model construction.', - None, - )).declare_as_argument(dest='report_timing') - runtime.declare('tempdir', ConfigValue( - None, - str, - 'Specify the directory where temporary files are generated.', - None, - )).declare_as_argument(dest='tempdir') + runtime.declare( + 'logging', + ConfigValue( + None, str, 'Logging level: quiet, warning, info, verbose, debug', None + ), + ).declare_as_argument(dest="logging", metavar="LEVEL") + runtime.declare( + 'logfile', + ConfigValue(None, str, 'Redirect output to the specified file.', None), + ).declare_as_argument(dest="output", metavar="FILE") + runtime.declare( + 'catch errors', + ConfigValue( + False, + bool, + 'Trigger failures for exceptions to print the program stack.', + None, + ), + ).declare_as_argument('-c', '--catch-errors', dest="catch") + runtime.declare( + 'disable gc', ConfigValue(False, bool, 'Disable the garbage collecter.', None) + ).declare_as_argument('--disable-gc', dest='disable_gc') + runtime.declare( + 'interactive', + ConfigValue( + False, + bool, + 'After executing Pyomo, launch an interactive Python shell. ' + 'If IPython is installed, this shell is an IPython shell.', + None, + ), + ) + runtime.declare( + 'keep files', ConfigValue(False, bool, 'Keep temporary files', None) + ).declare_as_argument('-k', '--keepfiles', dest='keepfiles') + runtime.declare( + 'paths', + ConfigList( + [], + ConfigValue(None, str, 'Path', None), + 'Give a path that is used to find the Pyomo python files.', + None, + ), + ).declare_as_argument('--path', dest='path') + runtime.declare( + 'profile count', + ConfigValue( + 0, + int, + 'Enable profiling of Python code. The value of this option ' + 'is the number of functions that are summarized.', + None, + ), + ).declare_as_argument(dest='profile_count', metavar='COUNT') + runtime.declare( + 'profile memory', + ConfigValue( + 0, + int, + "Report memory usage statistics for the generated instance " + "and any associated processing steps. A value of 0 indicates " + "disabled. A value of 1 forces the print of the total memory " + "after major stages of the pyomo script. A value of 2 forces " + "summary memory statistics after major stages of the pyomo " + "script. A value of 3 forces detailed memory statistics " + "during instance creation and various steps of preprocessing. " + "Values equal to 4 and higher currently provide no additional " + "information. Higher values automatically enable all " + "functionality associated with lower values, e.g., 3 turns " + "on detailed and summary statistics.", + None, + ), + ) + runtime.declare( + 'report timing', + ConfigValue( + False, + bool, + 'Report various timing statistics during model construction.', + None, + ), + ).declare_as_argument(dest='report_timing') + runtime.declare( + 'tempdir', + ConfigValue( + None, + str, + 'Specify the directory where temporary files are generated.', + None, + ), + ).declare_as_argument(dest='tempdir') return config, blocks - def default_config_block(solver, init=False): config, blocks = Default_Config().config_block(init) @@ -251,71 +277,79 @@ def default_config_block(solver, init=False): # Solver # solver = ConfigBlock() - solver.declare('solver name', ConfigValue( - 'glpk', - str, - 'Solver name', - None, - )) - solver.declare('solver executable', ConfigValue( - default=None, - domain=str, - description="The solver executable used by the solver interface.", - doc=("The solver executable used by the solver interface. " - "This option is only valid for those solver interfaces that " - "interact with a local executable through the shell. If unset, " - "the solver interface will attempt to find an executable within " - "the search path of the shell's environment that matches a name " - "commonly associated with the solver interface."), - )) - solver.declare('io format', ConfigValue( - None, - str, - 'The type of IO used to execute the solver. Different solvers ' - 'support different types of IO, but the following are common ' - 'options: lp - generate LP files, nl - generate NL files, ' - 'python - direct Python interface, os - generate OSiL XML files.', - None, - )) - solver.declare('manager', ConfigValue( - 'serial', - str, - 'The technique that is used to manage solver executions.', - None, - )) - solver.declare('options', ConfigBlock( - implicit=True, - implicit_domain=ConfigValue( + solver.declare('solver name', ConfigValue('glpk', str, 'Solver name', None)) + solver.declare( + 'solver executable', + ConfigValue( + default=None, + domain=str, + description="The solver executable used by the solver interface.", + doc=( + "The solver executable used by the solver interface. " + "This option is only valid for those solver interfaces that " + "interact with a local executable through the shell. If unset, " + "the solver interface will attempt to find an executable within " + "the search path of the shell's environment that matches a name " + "commonly associated with the solver interface." + ), + ), + ) + solver.declare( + 'io format', + ConfigValue( None, str, - 'Solver option', - None), - description="Options passed into the solver", - )) - solver.declare('options string', ConfigValue( - None, - str, - 'String describing solver options', - None, - )) - solver.declare('suffixes', ConfigList( - [], - ConfigValue(None, str, 'Suffix', None), - 'Solution suffixes that will be extracted by the solver ' - '(e.g., rc, dual, or slack). The use of this option is not ' - 'required when a suffix has been declared on the model ' - 'using Pyomo\'s Suffix component.', - None, - )) + 'The type of IO used to execute the solver. Different solvers ' + 'support different types of IO, but the following are common ' + 'options: lp - generate LP files, nl - generate NL files, ' + 'python - direct Python interface, os - generate OSiL XML files.', + None, + ), + ) + solver.declare( + 'manager', + ConfigValue( + 'serial', + str, + 'The technique that is used to manage solver executions.', + None, + ), + ) + solver.declare( + 'options', + ConfigBlock( + implicit=True, + implicit_domain=ConfigValue(None, str, 'Solver option', None), + description="Options passed into the solver", + ), + ) + solver.declare( + 'options string', + ConfigValue(None, str, 'String describing solver options', None), + ) + solver.declare( + 'suffixes', + ConfigList( + [], + ConfigValue(None, str, 'Suffix', None), + 'Solution suffixes that will be extracted by the solver ' + '(e.g., rc, dual, or slack). The use of this option is not ' + 'required when a suffix has been declared on the model ' + 'using Pyomo\'s Suffix component.', + None, + ), + ) blocks['solver'] = solver # - solver_list = config.declare('solvers', ConfigList( - [], - solver, #ConfigValue(None, str, 'Solver', None), - 'List of solvers. The first solver in this list is the ' - 'master solver.', - None, - )) + solver_list = config.declare( + 'solvers', + ConfigList( + [], + solver, # ConfigValue(None, str, 'Solver', None), + 'List of solvers. The first solver in this list is the main solver.', + None, + ), + ) # # Make sure that there is one solver in the list. # @@ -328,31 +362,36 @@ def default_config_block(solver, init=False): # than one solver defined, we wouldn't want command line options # going to both. solver_list.append() - #solver_list[0].get('solver name').\ + # solver_list[0].get('solver name').\ # declare_as_argument('--solver', dest='solver') - solver_list[0].get('solver executable').\ - declare_as_argument('--solver-executable', - dest="solver_executable", metavar="FILE") - solver_list[0].get('io format').\ - declare_as_argument('--solver-io', dest='io_format', metavar="FORMAT") - solver_list[0].get('manager').\ - declare_as_argument('--solver-manager', dest="smanager_type", - metavar="TYPE") - solver_list[0].get('options string').\ - declare_as_argument('--solver-options', dest='options_string', - metavar="STRING") - solver_list[0].get('suffixes').\ - declare_as_argument('--solver-suffix', dest="solver_suffixes") + solver_list[0].get('solver executable').declare_as_argument( + '--solver-executable', dest="solver_executable", metavar="FILE" + ) + solver_list[0].get('io format').declare_as_argument( + '--solver-io', dest='io_format', metavar="FORMAT" + ) + solver_list[0].get('manager').declare_as_argument( + '--solver-manager', dest="smanager_type", metavar="TYPE" + ) + solver_list[0].get('options string').declare_as_argument( + '--solver-options', dest='options_string', metavar="STRING" + ) + solver_list[0].get('suffixes').declare_as_argument( + '--solver-suffix', dest="solver_suffixes" + ) # # Postprocess # - config.declare('postprocess', ConfigList( - [], - ConfigValue(None, str, 'Module', None), - 'Specify a Python module that gets executed after optimization.', - None, - )).declare_as_argument(dest='postprocess') + config.declare( + 'postprocess', + ConfigList( + [], + ConfigValue(None, str, 'Module', None), + 'Specify a Python module that gets executed after optimization.', + None, + ), + ).declare_as_argument(dest='postprocess') # # Postsolve @@ -360,60 +399,61 @@ def default_config_block(solver, init=False): postsolve = config.declare('postsolve', ConfigBlock()) blocks['postsolve'] = postsolve - postsolve.declare('print logfile', ConfigValue( - False, - bool, - 'Print the solver logfile after performing optimization.', - None, - )).declare_as_argument('-l', '--log', dest="log") - postsolve.declare('save results', ConfigValue( - None, - str, - 'Specify the filename to which the results are saved.', - None, - )).declare_as_argument('--save-results', dest="save_results", - metavar="FILE") - postsolve.declare('show results', ConfigValue( - False, - bool, - 'Print the results object after optimization.', - None, - )).declare_as_argument(dest="show_results") - postsolve.declare('results format', ConfigValue( - None, - str, - 'Specify the results format: json or yaml.', - None) + postsolve.declare( + 'print logfile', + ConfigValue( + False, bool, 'Print the solver logfile after performing optimization.', None + ), + ).declare_as_argument('-l', '--log', dest="log") + postsolve.declare( + 'save results', + ConfigValue( + None, str, 'Specify the filename to which the results are saved.', None + ), + ).declare_as_argument('--save-results', dest="save_results", metavar="FILE") + postsolve.declare( + 'show results', + ConfigValue(False, bool, 'Print the results object after optimization.', None), + ).declare_as_argument(dest="show_results") + postsolve.declare( + 'results format', + ConfigValue(None, str, 'Specify the results format: json or yaml.', None), ).declare_as_argument( '--results-format', dest="results_format", metavar="FORMAT" ).declare_as_argument( - '--json', dest="results_format", action="store_const", - const="json", help="Store results in JSON format", + '--json', + dest="results_format", + action="store_const", + const="json", + help="Store results in JSON format", ) - postsolve.declare('summary', ConfigValue( - False, - bool, - 'Summarize the final solution after performing optimization.', - None, - )).declare_as_argument(dest="summary") + postsolve.declare( + 'summary', + ConfigValue( + False, + bool, + 'Summarize the final solution after performing optimization.', + None, + ), + ).declare_as_argument(dest="summary") # # Runtime # runtime = blocks['runtime'] - runtime.declare('only instance', ConfigValue( - False, - bool, - "Generate a model instance, and then exit", - None, - )).declare_as_argument('--instance-only', dest='only_instance') - runtime.declare('stream output', ConfigValue( - False, - bool, - "Stream the solver output to provide information about the " - "solver's progress.", - None, - )).declare_as_argument('--stream-output', '--stream-solver', dest="tee") + runtime.declare( + 'only instance', + ConfigValue(False, bool, "Generate a model instance, and then exit", None), + ).declare_as_argument('--instance-only', dest='only_instance') + runtime.declare( + 'stream output', + ConfigValue( + False, + bool, + "Stream the solver output to provide information about the " + "solver's progress.", + None, + ), + ).declare_as_argument('--stream-output', '--stream-solver', dest="tee") # return config, blocks - diff --git a/pyomo/scripting/tests/test_cmds.py b/pyomo/scripting/tests/test_cmds.py index 8ba409aea91..960e0d4ada1 100644 --- a/pyomo/scripting/tests/test_cmds.py +++ b/pyomo/scripting/tests/test_cmds.py @@ -12,12 +12,19 @@ import re import pyomo.common.unittest as unittest from pyomo.common.tee import capture_output +from pyomo.common.log import LoggingIntercept from pyomo.environ import SolverFactory from pyomo.scripting.driver_help import help_solvers, help_transformations +from pyomo.scripting.pyomo_main import main class Test(unittest.TestCase): + def test_pyomo_main_deprecation(self): + with LoggingIntercept() as LOG: + with unittest.pytest.raises(SystemExit) as e: + main(args=['--solvers=glpk', 'foo.py']) + self.assertIn("Running the 'pyomo' script with no subcommand", LOG.getvalue()) def test_help_solvers(self): with capture_output() as OUT: @@ -30,30 +37,35 @@ def test_help_solvers(self): self.assertTrue(re.search(r'\n \*asl ', OUT)) # MindtPY is bundled with Pyomo so should always be available self.assertTrue(re.search(r'\n \+mindtpy ', OUT)) - for solver in ('ipopt','cbc','glpk'): + for solver in ('ipopt', 'cbc', 'glpk'): s = SolverFactory(solver) if s.available(): self.assertTrue( re.search(r"\n \+%s " % solver, OUT), - "' +%s' not found in help --solvers" % solver) + "' +%s' not found in help --solvers" % solver, + ) else: self.assertTrue( re.search(r"\n %s " % solver, OUT), - "' %s' not found in help --solvers" % solver) + "' %s' not found in help --solvers" % solver, + ) for solver in ('baron',): s = SolverFactory(solver) if s.license_is_valid(): self.assertTrue( re.search(r"\n \+%s " % solver, OUT), - "' +%s' not found in help --solvers" % solver) + "' +%s' not found in help --solvers" % solver, + ) elif s.available(): self.assertTrue( re.search(r"\n \-%s " % solver, OUT), - "' -%s' not found in help --solvers" % solver) + "' -%s' not found in help --solvers" % solver, + ) else: self.assertTrue( re.search(r"\n %s " % solver, OUT), - "' %s' not found in help --solvers" % solver) + "' %s' not found in help --solvers" % solver, + ) def test_help_transformations(self): with capture_output() as OUT: @@ -62,8 +74,7 @@ def test_help_transformations(self): self.assertTrue(re.search('Pyomo Model Transformations', OUT)) self.assertTrue(re.search('core.relax_integer_vars', OUT)) # test a transformation that we know is deprecated - self.assertTrue( - re.search(r'duality.linear_dual\s+\[DEPRECATED\]', OUT)) + self.assertTrue(re.search(r'duality.linear_dual\s+\[DEPRECATED\]', OUT)) if __name__ == "__main__": diff --git a/pyomo/scripting/util.py b/pyomo/scripting/util.py index 056e5218b69..3ec0feccd66 100644 --- a/pyomo/scripting/util.py +++ b/pyomo/scripting/util.py @@ -25,8 +25,11 @@ from pyomo.common.tee import capture_output from pyomo.common.dependencies import ( - yaml, yaml_available, yaml_load_args, - pympler, pympler_available, + yaml, + yaml_available, + yaml_load_args, + pympler, + pympler_available, ) from pyomo.common.collections import Bunch from pyomo.opt import ProblemFormat @@ -34,13 +37,20 @@ from pyomo.opt.parallel import SolverManagerFactory from pyomo.dataportal import DataPortal from pyomo.scripting.interface import ( - ExtensionPoint, Plugin, implements, + ExtensionPoint, + Plugin, + implements, registered_callback, - IPyomoScriptCreateModel, IPyomoScriptCreateDataPortal, - IPyomoScriptPrintModel, IPyomoScriptModifyInstance, - IPyomoScriptPrintInstance, IPyomoScriptSaveInstance, - IPyomoScriptPrintResults, IPyomoScriptSaveResults, - IPyomoScriptPostprocess, IPyomoScriptPreprocess, + IPyomoScriptCreateModel, + IPyomoScriptCreateDataPortal, + IPyomoScriptPrintModel, + IPyomoScriptModifyInstance, + IPyomoScriptPrintInstance, + IPyomoScriptSaveInstance, + IPyomoScriptPrintResults, + IPyomoScriptSaveResults, + IPyomoScriptPostprocess, + IPyomoScriptPreprocess, ) from pyomo.core import Model, TransformationFactory, Suffix, display @@ -49,16 +59,18 @@ # actually needed. IPython_available = None -filter_excepthook=False -modelapi = { 'pyomo_create_model':IPyomoScriptCreateModel, - 'pyomo_create_dataportal':IPyomoScriptCreateDataPortal, - 'pyomo_print_model':IPyomoScriptPrintModel, - 'pyomo_modify_instance':IPyomoScriptModifyInstance, - 'pyomo_print_instance':IPyomoScriptPrintInstance, - 'pyomo_save_instance':IPyomoScriptSaveInstance, - 'pyomo_print_results':IPyomoScriptPrintResults, - 'pyomo_save_results':IPyomoScriptSaveResults, - 'pyomo_postprocess':IPyomoScriptPostprocess} +filter_excepthook = False +modelapi = { + 'pyomo_create_model': IPyomoScriptCreateModel, + 'pyomo_create_dataportal': IPyomoScriptCreateDataPortal, + 'pyomo_print_model': IPyomoScriptPrintModel, + 'pyomo_modify_instance': IPyomoScriptModifyInstance, + 'pyomo_print_instance': IPyomoScriptPrintInstance, + 'pyomo_save_instance': IPyomoScriptSaveInstance, + 'pyomo_print_results': IPyomoScriptPrintResults, + 'pyomo_save_results': IPyomoScriptSaveResults, + 'pyomo_postprocess': IPyomoScriptPostprocess, +} logger = logging.getLogger('pyomo.scripting') @@ -73,7 +85,9 @@ def setup_environment(data): postsolve = getattr(data.options, 'postsolve', None) if postsolve: if data.options.postsolve.results_format == 'yaml' and not yaml_available: - raise ValueError("Configuration specifies a yaml file, but pyyaml is not installed!") + raise ValueError( + "Configuration specifies a yaml file, but pyyaml is not installed!" + ) # global start_time start_time = time.time() @@ -91,14 +105,14 @@ def setup_environment(data): # if not data.options.runtime.tempdir is None: if not os.path.exists(data.options.runtime.tempdir): - msg = 'Directory for temporary files does not exist: %s' + msg = 'Directory for temporary files does not exist: %s' raise ValueError(msg % data.options.runtime.tempdir) TempfileManager.tempdir = data.options.runtime.tempdir # # Configure exception management # - def pyomo_excepthook(etype,value,tb): + def pyomo_excepthook(etype, value, tb): """ This exception hook gets called when debugging is on. Otherwise, run_command in this module is called. @@ -109,14 +123,16 @@ def pyomo_excepthook(etype,value,tb): else: name = "model" - if filter_excepthook: action = "loading" else: action = "running" - msg = "Unexpected exception (%s) while %s %s:\n " \ - % (etype.__name__, action, name) + msg = "Unexpected exception (%s) while %s %s:\n " % ( + etype.__name__, + action, + name, + ) # # This handles the case where the error is propagated by a KeyError. @@ -126,13 +142,13 @@ def pyomo_excepthook(etype,value,tb): # valueStr = str(value) if etype == KeyError: - valueStr = valueStr.replace(r"\n","\n") + valueStr = valueStr.replace(r"\n", "\n") if valueStr[0] == valueStr[-1] and valueStr[0] in "\"'": valueStr = valueStr[1:-1] - logger.error(msg+valueStr) + logger.error(msg + valueStr) - tb_list = traceback.extract_tb(tb,None) + tb_list = traceback.extract_tb(tb, None) i = 0 if not is_debug_set(logger) and filter_excepthook: while i < len(tb_list): @@ -143,10 +159,13 @@ def pyomo_excepthook(etype,value,tb): i = 0 print("\nTraceback (most recent call last):") for item in tb_list[i:]: - print(" File \""+item[0]+"\", line "+str(item[1])+", in "+item[2]) + print( + " File \"" + item[0] + "\", line " + str(item[1]) + ", in " + item[2] + ) if item[3] is not None: - print(" "+item[3]) + print(" " + item[3]) sys.exit(1) + sys.excepthook = pyomo_excepthook @@ -163,7 +182,10 @@ def apply_preprocessing(data, parser=None): data.local = Bunch() # if not data.options.runtime.logging == 'quiet': - sys.stdout.write('[%8.2f] Applying Pyomo preprocessing actions\n' % (time.time()-start_time)) + sys.stdout.write( + '[%8.2f] Applying Pyomo preprocessing actions\n' + % (time.time() - start_time) + ) sys.stdout.flush() # global filter_excepthook @@ -182,43 +204,49 @@ def apply_preprocessing(data, parser=None): preprocess = import_file(config_value, clear_cache=True) # for ep in ExtensionPoint(IPyomoScriptPreprocess): - ep.apply( options=data.options ) + ep.apply(options=data.options) # # Verify that files exist # - for file in [data.options.model.filename]+data.options.data.files.value(): + for file in [data.options.model.filename] + data.options.data.files.value(): if not os.path.exists(file): - raise IOError("File "+file+" does not exist!") + raise IOError("File " + file + " does not exist!") # - filter_excepthook=True + filter_excepthook = True tick = time.time() - data.local.usermodel = import_file(data.options.model.filename, - clear_cache=True) - data.local.time_initial_import = time.time()-tick - filter_excepthook=False + data.local.usermodel = import_file(data.options.model.filename, clear_cache=True) + data.local.time_initial_import = time.time() - tick + filter_excepthook = False usermodel_dir = dir(data.local.usermodel) data.local._usermodel_plugins = [] for key in modelapi: if key in usermodel_dir: + class TMP(Plugin): implements(modelapi[key], service=True) + def __init__(self): self.fn = getattr(data.local.usermodel, key) - def apply(self,**kwds): + + def apply(self, **kwds): return self.fn(**kwds) + tmp = TMP() - data.local._usermodel_plugins.append( tmp ) + data.local._usermodel_plugins.append(tmp) if 'pyomo_preprocess' in usermodel_dir: if data.options.model.object_name in usermodel_dir: - msg = "Preprocessing function 'pyomo_preprocess' defined in file" \ - " '%s', but model is already constructed!" + msg = ( + "Preprocessing function 'pyomo_preprocess' defined in file" + " '%s', but model is already constructed!" + ) raise SystemExit(msg % data.options.model.filename) - getattr(data.local.usermodel, 'pyomo_preprocess')( options=data.options ) + getattr(data.local.usermodel, 'pyomo_preprocess')(options=data.options) # return data + def create_model(data): """ Create instance of Pyomo model. @@ -231,7 +259,7 @@ def create_model(data): """ # if not data.options.runtime.logging == 'quiet': - sys.stdout.write('[%8.2f] Creating model\n' % (time.time()-start_time)) + sys.stdout.write('[%8.2f] Creating model\n' % (time.time() - start_time)) sys.stdout.flush() # if data.options.runtime.profile_memory >= 1 and pympler_available: @@ -250,9 +278,9 @@ def create_model(data): _model_IDS.add(id(_obj)) model_name = data.options.model.object_name if len(_models) == 1: - _name = list(_models.keys())[0] + _name = list(_models.keys())[0] if model_name is None: - model_name = _name + model_name = _name elif model_name != _name: msg = "Model '%s' is not defined in file '%s'!" raise SystemExit(msg % (model_name, data.options.model.filename)) @@ -268,19 +296,27 @@ def create_model(data): if model_name is None: if len(ep) == 0: - msg = "A model is not defined and the 'pyomo_create_model' is not "\ - "provided in module %s" + msg = ( + "A model is not defined and the 'pyomo_create_model' is not " + "provided in module %s" + ) raise SystemExit(msg % data.options.model.filename) elif len(ep) > 1: - msg = 'Multiple model construction plugins have been registered in module %s!' + msg = ( + 'Multiple model construction plugins have been registered in module %s!' + ) raise SystemExit(msg % data.options.model.filename) else: model_options = data.options.model.options.value() tick = time.time() - model = ep.service().apply( options = Bunch(*data.options), - model_options=Bunch(*model_options) ) + model = ep.service().apply( + options=Bunch(**data.options), model_options=Bunch(**model_options) + ) if data.options.runtime.report_timing is True: - print(" %6.2f seconds required to construct instance" % (time.time() - tick)) + print( + " %6.2f seconds required to construct instance" + % (time.time() - tick) + ) data.local.time_initial_import = None tick = time.time() else: @@ -292,15 +328,17 @@ def create_model(data): msg = "'%s' object is 'None' in module %s" raise SystemExit(msg % (model_name, data.options.model.filename)) elif len(ep) > 0: - msg = "Model construction function 'create_model' defined in " \ - "file '%s', but model is already constructed!" + msg = ( + "Model construction function 'create_model' defined in " + "file '%s', but model is already constructed!" + ) raise SystemExit(msg % data.options.model.filename) # # Print model # for ep in ExtensionPoint(IPyomoScriptPrintModel): - ep.apply( options=data.options, model=model ) + ep.apply(options=data.options, model=model) # # Create Problem Instance @@ -311,18 +349,23 @@ def create_model(data): raise SystemExit(msg) if len(ep) == 1: - modeldata = ep.service().apply( options=data.options, model=model ) + modeldata = ep.service().apply(options=data.options, model=model) else: modeldata = DataPortal() - if model._constructed: # # TODO: use a better test for ConcreteModel # instance = model - if data.options.runtime.report_timing is True and not data.local.time_initial_import is None: - print(" %6.2f seconds required to construct instance" % (data.local.time_initial_import)) + if ( + data.options.runtime.report_timing is True + and not data.local.time_initial_import is None + ): + print( + " %6.2f seconds required to construct instance" + % (data.local.time_initial_import) + ) else: tick = time.time() if len(data.options.data.files) > 1: @@ -332,16 +375,20 @@ def create_model(data): for file in data.options.data.files: suffix = (file).split(".")[-1] if suffix != "dat": - msg = 'When specifiying multiple data files, they must all ' \ - 'be *.dat files. File specified: %s' - raise SystemExit(msg % str( file )) + msg = ( + 'When specifying multiple data files, they must all ' + 'be *.dat files. File specified: %s' + ) + raise SystemExit(msg % str(file)) modeldata.load(filename=file, model=model) - instance = model.create_instance(modeldata, - namespaces=data.options.data.namespaces, - profile_memory=data.options.runtime.profile_memory, - report_timing=data.options.runtime.report_timing) + instance = model.create_instance( + modeldata, + namespaces=data.options.data.namespaces, + profile_memory=data.options.runtime.profile_memory, + report_timing=data.options.runtime.report_timing, + ) elif len(data.options.data.files) == 1: # @@ -349,58 +396,76 @@ def create_model(data): # suffix = (data.options.data.files[0]).split(".")[-1].lower() if suffix == "dat": - instance = model.create_instance(data.options.data.files[0], - namespaces=data.options.data.namespaces, - profile_memory=data.options.runtime.profile_memory, - report_timing=data.options.runtime.report_timing) + instance = model.create_instance( + data.options.data.files[0], + namespaces=data.options.data.namespaces, + profile_memory=data.options.runtime.profile_memory, + report_timing=data.options.runtime.report_timing, + ) elif suffix == "py": - userdata = import_file(data.options.data.files[0], - clear_cache=True) + userdata = import_file(data.options.data.files[0], clear_cache=True) if "modeldata" in dir(userdata): if len(ep) == 1: - msg = "Cannot apply 'pyomo_create_modeldata' and use the" \ - " 'modeldata' object that is provided in the model" + msg = ( + "Cannot apply 'pyomo_create_modeldata' and use the" + " 'modeldata' object that is provided in the model" + ) raise SystemExit(msg) if userdata.modeldata is None: msg = "'modeldata' object is 'None' in module %s" - raise SystemExit(msg % str( data.options.data.files[0] )) + raise SystemExit(msg % str(data.options.data.files[0])) - modeldata=userdata.modeldata + modeldata = userdata.modeldata else: if len(ep) == 0: - msg = "Neither 'modeldata' nor 'pyomo_create_dataportal' " \ - 'is defined in module %s' - raise SystemExit(msg % str( data.options.data.files[0] )) + msg = ( + "Neither 'modeldata' nor 'pyomo_create_dataportal' " + 'is defined in module %s' + ) + raise SystemExit(msg % str(data.options.data.files[0])) modeldata.read(model) - instance = model.create_instance(modeldata, - namespaces=data.options.data.namespaces, - profile_memory=data.options.runtime.profile_memory, - report_timing=data.options.runtime.report_timing) + instance = model.create_instance( + modeldata, + namespaces=data.options.data.namespaces, + profile_memory=data.options.runtime.profile_memory, + report_timing=data.options.runtime.report_timing, + ) elif suffix == "yml" or suffix == 'yaml': - modeldata = yaml.load(open(data.options.data.files[0]), **yaml_load_args) - instance = model.create_instance(modeldata, - namespaces=data.options.data.namespaces, - profile_memory=data.options.runtime.profile_memory, - report_timing=data.options.runtime.report_timing) + modeldata = yaml.load( + open(data.options.data.files[0]), **yaml_load_args + ) + instance = model.create_instance( + modeldata, + namespaces=data.options.data.namespaces, + profile_memory=data.options.runtime.profile_memory, + report_timing=data.options.runtime.report_timing, + ) else: - raise ValueError("Unknown data file type: "+data.options.data.files[0]) + raise ValueError( + "Unknown data file type: " + data.options.data.files[0] + ) else: - instance = model.create_instance(modeldata, - namespaces=data.options.data.namespaces, - profile_memory=data.options.runtime.profile_memory, - report_timing=data.options.runtime.report_timing) + instance = model.create_instance( + modeldata, + namespaces=data.options.data.namespaces, + profile_memory=data.options.runtime.profile_memory, + report_timing=data.options.runtime.report_timing, + ) if data.options.runtime.report_timing is True: - print(" %6.2f seconds required to construct instance" % (time.time() - tick)) + print( + " %6.2f seconds required to construct instance" + % (time.time() - tick) + ) # modify_start_time = time.time() for ep in ExtensionPoint(IPyomoScriptModifyInstance): if data.options.runtime.report_timing is True: tick = time.time() - ep.apply( options=data.options, model=model, instance=instance ) + ep.apply(options=data.options, model=model, instance=instance) if data.options.runtime.report_timing is True: print(" %6.2f seconds to apply %s" % (time.time() - tick, type(ep))) tick = time.time() @@ -409,8 +474,10 @@ def create_model(data): with TransformationFactory(transformation) as xfrm: instance = xfrm.create_using(instance) if instance is None: - raise SystemExit("Unexpected error while applying " - "transformation '%s'" % transformation) + raise SystemExit( + "Unexpected error while applying " + "transformation '%s'" % transformation + ) # if data.options.runtime.report_timing is True: total_time = time.time() - modify_start_time @@ -422,39 +489,38 @@ def create_model(data): print("") for ep in ExtensionPoint(IPyomoScriptPrintInstance): - ep.apply( options=data.options, instance=instance ) + ep.apply(options=data.options, instance=instance) - fname=None - smap_id=None + fname = None + smap_id = None if not data.options.model.save_file is None: - if data.options.runtime.report_timing is True: write_start_time = time.time() if data.options.model.save_file == True: if data.local.model_format in (ProblemFormat.cpxlp, ProblemFormat.lpxlp): - fname = (data.options.data.files[0])[:-3]+'lp' + fname = (data.options.data.files[0])[:-3] + 'lp' else: - fname = (data.options.data.files[0])[:-3]+str(data.local.model_format) - format=data.local.model_format + fname = (data.options.data.files[0])[:-3] + str(data.local.model_format) + format = data.local.model_format else: fname = data.options.model.save_file - format= data.options.model.save_format + format = data.options.model.save_format io_options = {} if data.options.model.symbolic_solver_labels: io_options['symbolic_solver_labels'] = True - if data.options.model.file_determinism != 1: + if data.options.model.file_determinism is not None: io_options['file_determinism'] = data.options.model.file_determinism - (fname, smap_id) = instance.write(filename=fname, - format=format, - io_options=io_options) + (fname, smap_id) = instance.write( + filename=fname, format=format, io_options=io_options + ) if not data.options.runtime.logging == 'quiet': if not os.path.exists(fname): - print("ERROR: file "+fname+" has not been created!") + print("ERROR: file " + fname + " has not been created!") else: - print("Model written to file '"+str(fname)+"'") + print("Model written to file '" + str(fname) + "'") if data.options.runtime.report_timing is True: total_time = time.time() - write_start_time @@ -463,13 +529,15 @@ def create_model(data): if data.options.runtime.profile_memory >= 2 and pympler_available: print("") print(" Summary of objects following file output") - post_file_output_summary = pympler.summary.summarize(pympler.muppy.get_objects()) + post_file_output_summary = pympler.summary.summarize( + pympler.muppy.get_objects() + ) pympler.summary.print_(post_file_output_summary, limit=100) print("") for ep in ExtensionPoint(IPyomoScriptSaveInstance): - ep.apply( options=data.options, instance=instance ) + ep.apply(options=data.options, instance=instance) if data.options.runtime.profile_memory >= 1 and pympler_available: mem_used = pympler.muppy.get_size(pympler.muppy.get_objects()) @@ -477,8 +545,14 @@ def create_model(data): data.local.max_memory = mem_used print(" Total memory = %d bytes following Pyomo instance creation" % mem_used) - return Bunch(model=model, instance=instance, - smap_id=smap_id, filename=fname, local=data.local ) + return Bunch( + model=model, + instance=instance, + smap_id=smap_id, + filename=fname, + local=data.local, + ) + def apply_optimizer(data, instance=None): """ @@ -493,7 +567,7 @@ def apply_optimizer(data, instance=None): """ # if not data.options.runtime.logging == 'quiet': - sys.stdout.write('[%8.2f] Applying solver\n' % (time.time()-start_time)) + sys.stdout.write('[%8.2f] Applying solver\n' % (time.time() - start_time)) sys.stdout.flush() # # @@ -505,16 +579,18 @@ def apply_optimizer(data, instance=None): if len(data.options.solvers[0].suffixes) > 0: for suffix_name in data.options.solvers[0].suffixes: - if suffix_name[0] in ['"',"'"]: + if suffix_name[0] in ['"', "'"]: suffix_name = suffix_name[1:-1] # Don't redeclare the suffix if it already exists suffix = getattr(instance, suffix_name, None) if suffix is None: setattr(instance, suffix_name, Suffix(direction=Suffix.IMPORT)) else: - raise ValueError("Problem declaring solver suffix %s. A component "\ - "with that name already exists on model %s." - % (suffix_name, instance.name)) + raise ValueError( + "Problem declaring solver suffix %s. A component " + "with that name already exists on model %s." + % (suffix_name, instance.name) + ) if getattr(data.options.solvers[0].options, 'timelimit', 0) == 0: data.options.solvers[0].options.timelimit = None @@ -529,8 +605,7 @@ def apply_optimizer(data, instance=None): if data.options.solvers[0].manager is None: solver_mngr_name = 'serial' elif not data.options.solvers[0].manager in SolverManagerFactory: - raise ValueError("Unknown solver manager %s" - % data.options.solvers[0].manager) + raise ValueError("Unknown solver manager %s" % data.options.solvers[0].manager) else: solver_mngr_name = data.options.solvers[0].manager # @@ -545,12 +620,11 @@ def apply_optimizer(data, instance=None): # Setup keywords for the solve # keywords = {} - if (data.options.runtime.keep_files or \ - data.options.postsolve.print_logfile): + if data.options.runtime.keep_files or data.options.postsolve.print_logfile: keywords['keepfiles'] = True if data.options.model.symbolic_solver_labels: keywords['symbolic_solver_labels'] = True - if data.options.model.file_determinism != 1: + if data.options.model.file_determinism is not None: keywords['file_determinism'] = data.options.model.file_determinism keywords['tee'] = data.options.runtime.stream_output keywords['timelimit'] = getattr(data.options.solvers[0].options, 'timelimit', 0) @@ -580,7 +654,7 @@ def apply_optimizer(data, instance=None): if len(data.options.solvers[0].options) > 0: opt.set_options(data.options.solvers[0].options) - #opt.set_options(" ".join("%s=%s" % (key, value) + # opt.set_options(" ".join("%s=%s" % (key, value) # for key, value in data.options.solvers[0].options.iteritems() # if not key == 'timelimit')) if not data.options.solvers[0].options_string is None: @@ -593,13 +667,20 @@ def apply_optimizer(data, instance=None): # # Get the solver option arguments # - if len(data.options.solvers[0].options) > 0 and not data.options.solvers[0].options_string is None: + if ( + len(data.options.solvers[0].options) > 0 + and not data.options.solvers[0].options_string is None + ): # If both 'options' and 'options_string' were specified, then create a # single options string that is passed to the solver. - ostring = " ".join("%s=%s" % (key, value) - for key, value in data.options.solvers[0].options.iteritems() - if not value is None) - keywords['options'] = ostring + ' ' + data.options.solvers[0].options_string + ostring = " ".join( + "%s=%s" % (key, value) + for key, value in data.options.solvers[0].options.iteritems() + if not value is None + ) + keywords['options'] = ( + ostring + ' ' + data.options.solvers[0].options_string + ) elif len(data.options.solvers[0].options) > 0: keywords['options'] = data.options.solvers[0].options else: @@ -631,13 +712,13 @@ def process_results(data, instance=None, results=None, opt=None): """ # if not data.options.runtime.logging == 'quiet': - sys.stdout.write('[%8.2f] Processing results\n' % (time.time()-start_time)) + sys.stdout.write('[%8.2f] Processing results\n' % (time.time() - start_time)) sys.stdout.flush() # if data.options.postsolve.print_logfile: print("") print("==========================================================") - print("Solver Logfile: "+str(opt._log_file)) + print("Solver Logfile: " + str(opt._log_file)) print("==========================================================") print("") with open(opt._log_file, "r") as INPUT: @@ -663,20 +744,24 @@ def process_results(data, instance=None, results=None, opt=None): # The ordering of the elif and else conditions is important here # to ensure that the default file format is yaml results_file = 'results.yml' - results.write(filename=results_file, - format=data.options.postsolve.results_format) + results.write( + filename=results_file, format=data.options.postsolve.results_format + ) if not data.options.runtime.logging == 'quiet': - print(" Number of solutions: "+str(len(results.solution))) + print(" Number of solutions: " + str(len(results.solution))) if len(results.solution) > 0: print(" Solution Information") - print(" Gap: "+str(results.solution[0].gap)) - print(" Status: "+str(results.solution[0].status)) + print(" Gap: " + str(results.solution[0].gap)) + print(" Status: " + str(results.solution[0].status)) if len(results.solution[0].objective) == 1: key = list(results.solution[0].objective.keys())[0] - print(" Function Value: "+str(results.solution[0].objective[key]['Value'])) - print(" Solver results file: "+results_file) + print( + " Function Value: " + + str(results.solution[0].objective[key]['Value']) + ) + print(" Solver results file: " + results_file) # - #ep = ExtensionPoint(IPyomoScriptPrintResults) + # ep = ExtensionPoint(IPyomoScriptPrintResults) if data.options.postsolve.show_results: print("") results.write(num=1, format=data.options.postsolve.results_format) @@ -695,10 +780,10 @@ def process_results(data, instance=None, results=None, opt=None): print("No solutions reported by solver.") # for ep in ExtensionPoint(IPyomoScriptPrintResults): - ep.apply( options=data.options, instance=instance, results=results ) + ep.apply(options=data.options, instance=instance, results=results) # for ep in ExtensionPoint(IPyomoScriptSaveResults): - ep.apply( options=data.options, instance=instance, results=results ) + ep.apply(options=data.options, instance=instance, results=results) # if data.options.runtime.profile_memory >= 1 and pympler_available: global memory_data @@ -707,6 +792,7 @@ def process_results(data, instance=None, results=None, opt=None): data.local.max_memory = mem_used print(" Total memory = %d bytes following results processing" % mem_used) + def apply_postprocessing(data, instance=None, results=None): """ Apply post-processing steps. @@ -717,17 +803,20 @@ def apply_postprocessing(data, instance=None, results=None): """ # if not data.options.runtime.logging == 'quiet': - sys.stdout.write('[%8.2f] Applying Pyomo postprocessing actions\n' % (time.time()-start_time)) + sys.stdout.write( + '[%8.2f] Applying Pyomo postprocessing actions\n' + % (time.time() - start_time) + ) sys.stdout.flush() # options are of type ConfigValue, not raw strings / atomics. for config_value in data.options.postprocess: postprocess = import_file(config_value, clear_cache=True) if "pyomo_postprocess" in dir(postprocess): - postprocess.pyomo_postprocess(data.options, instance,results) + postprocess.pyomo_postprocess(data.options, instance, results) for ep in ExtensionPoint(IPyomoScriptPostprocess): - ep.apply( options=data.options, instance=instance, results=results ) + ep.apply(options=data.options, instance=instance, results=results) if data.options.runtime.profile_memory >= 1 and pympler_available: mem_used = pympler.muppy.get_size(pympler.muppy.get_objects()) @@ -735,6 +824,7 @@ def apply_postprocessing(data, instance=None, results=None): data.local.max_memory = mem_used print(" Total memory = %d bytes upon termination" % mem_used) + def finalize(data, model=None, instance=None, results=None): """ Perform final actions to finish the execution of the pyomo script. @@ -760,7 +850,7 @@ def finalize(data, model=None, instance=None, results=None): # NOTE: This function gets called for cleanup during exceptions # to prevent memory leaks. Don't reconfigure the loggers # here or we will lose the exception information. - #configure_loggers(reset=True) + # configure_loggers(reset=True) data.local._usermodel_plugins = [] ##gc.collect() ##print gc.get_referrers(_tmp) @@ -768,51 +858,57 @@ def finalize(data, model=None, instance=None, results=None): ##print "HERE - usermodel_plugins" ## if not data.options.runtime.logging == 'quiet': - sys.stdout.write('[%8.2f] Pyomo Finished\n' % (time.time()-start_time)) + sys.stdout.write('[%8.2f] Pyomo Finished\n' % (time.time() - start_time)) if (pympler_available is True) and (data.options.runtime.profile_memory >= 1): sys.stdout.write('Maximum memory used = %d bytes\n' % data.local.max_memory) sys.stdout.flush() # - model=model - instance=instance - results=results + model = model + instance = instance + results = results # if data.options.runtime.interactive: global IPython_available if IPython_available is None: try: import IPython - IPython_available=True + + IPython_available = True except: - IPython_available=False + IPython_available = False if IPython_available: IPython.Shell.IPShellEmbed( - [''], - banner = '\n# Dropping into Python interpreter', - exit_msg = '\n# Leaving Interpreter, back to Pyomo\n')() + [''], + banner='\n# Dropping into Python interpreter', + exit_msg='\n# Leaving Interpreter, back to Pyomo\n', + )() else: import code + shell = code.InteractiveConsole(locals()) print('\n# Dropping into Python interpreter') shell.interact() print('\n# Leaving Interpreter, back to Pyomo\n') -@deprecated("configure_loggers is deprecated. The Pyomo command uses the " - "PyomoCommandLogContext to update the logger configuration", - version='5.7.3') +@deprecated( + "configure_loggers is deprecated. The Pyomo command uses the " + "PyomoCommandLogContext to update the logger configuration", + version='5.7.3', +) def configure_loggers(options=None, shutdown=False): context = PyomoCommandLogContext(options) if shutdown: # historically, configure_loggers(shutdown=True) forced 'quiet' context.options.runtime.logging = 'quiet' context.fileLogger = configure_loggers.fileLogger - context.__exit__(None,None,None) + context.__exit__(None, None, None) else: context.__enter__() configure_loggers.fileLogger = context.fileLogger + configure_loggers.fileLogger = None @@ -830,7 +926,7 @@ def __init__(self, options): def __enter__(self): _pyomo = logging.getLogger('pyomo') - self.original = ( _pyomo.level, _pyomo.handlers) + self.original = (_pyomo.level, _pyomo.handlers) # # Configure the logger @@ -872,7 +968,9 @@ def __exit__(self, et, ev, tb): self.capture.reset() -def run_command(command=None, parser=None, args=None, name='unknown', data=None, options=None): +def run_command( + command=None, parser=None, args=None, name='unknown', data=None, options=None +): """ Execute a function that processes command-line arguments and then calls a command-line driver. @@ -928,7 +1026,8 @@ def run_command(command=None, parser=None, args=None, name='unknown', data=None, try: with PyomoCommandLogContext(options): retval, errorcode = _run_command_impl( - command, parser, args, name, data, options) + command, parser, args, name, data, options + ) finally: if options.runtime.disable_gc: gc.enable() @@ -956,18 +1055,21 @@ def _run_command_impl(command, parser, args, name, data, options): except ImportError: raise ValueError( "Cannot use the 'profile' option: the Python " - "'profile' or 'pstats' package cannot be imported!") + "'profile' or 'pstats' package cannot be imported!" + ) tfile = TempfileManager.create_tempfile(suffix=".profile") tmp = profile.runctx( command.__name__ + '(options=options,parser=parser)', - command.__globals__, locals(), tfile + command.__globals__, + locals(), + tfile, ) p = pstats.Stats(tfile).strip_dirs() p.sort_stats('time', 'cumulative') p = p.print_stats(pcount) p.print_callers(pcount) p.print_callees(pcount) - p = p.sort_stats('cumulative','calls') + p = p.sort_stats('cumulative', 'calls') p.print_stats(pcount) p.print_callers(pcount) p.print_callees(pcount) @@ -988,7 +1090,9 @@ def _run_command_impl(command, parser, args, name, data, options): # If debugging is enabled or the 'catch' option is specified, then # exit. Otherwise, print an "Exiting..." message. # - if __debug__ and (options.runtime.logging == 'debug' or options.runtime.catch_errors): + if __debug__ and ( + options.runtime.logging == 'debug' or options.runtime.catch_errors + ): sys.exit(0) print('Exiting %s: %s' % (name, str(err))) errorcode = err.code @@ -998,7 +1102,9 @@ def _run_command_impl(command, parser, args, name, data, options): # If debugging is enabled or the 'catch' option is specified, then # pass the exception up the chain (to pyomo_excepthook) # - if __debug__ and (options.runtime.logging == 'debug' or options.runtime.catch_errors): + if __debug__ and ( + options.runtime.logging == 'debug' or options.runtime.catch_errors + ): raise if not options.model is None and not options.model.save_file is None: @@ -1021,9 +1127,9 @@ def _run_command_impl(command, parser, args, name, data, options): # errStr = str(err) if type(err) == KeyError and errStr != "None": - errStr = str(err).replace(r"\n","\n")[1:-1] + errStr = str(err).replace(r"\n", "\n")[1:-1] - logger.error(msg+errStr) + logger.error(msg + errStr) errorcode = 1 return retval, errorcode @@ -1034,10 +1140,13 @@ def cleanup(): for ep in ExtensionPoint(modelapi[key]): ep.deactivate() + def get_config_values(filename): if filename.endswith('.yml') or filename.endswith('.yaml'): if not yaml_available: - raise ValueError("ERROR: yaml configuration file specified, but pyyaml is not installed!") + raise ValueError( + "ERROR: yaml configuration file specified, but pyyaml is not installed!" + ) INPUT = open(filename, 'r') val = yaml.load(INPUT, **yaml_load_args) INPUT.close() diff --git a/pyomo/solvers/__init__.py b/pyomo/solvers/__init__.py index 9320e403e95..d93cfd77b3c 100644 --- a/pyomo/solvers/__init__.py +++ b/pyomo/solvers/__init__.py @@ -8,4 +8,3 @@ # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - diff --git a/pyomo/solvers/mockmip.py b/pyomo/solvers/mockmip.py index 8b1289e546e..9497a6dff9d 100644 --- a/pyomo/solvers/mockmip.py +++ b/pyomo/solvers/mockmip.py @@ -16,25 +16,26 @@ from pyomo.opt.base.solvers import _extract_version + class MockMIP(object): - """Methods used to create a mock MIP solver used for testing - """ + """Methods used to create a mock MIP solver used for testing""" def __init__(self, mockdir): - self.mock_subdir=mockdir + self.mock_subdir = mockdir - def create_command_line(self,executable,problem_files): + def create_command_line(self, executable, problem_files): self._mock_problem = basename(problem_files[0]).split('.')[0] self._mock_dir = dirname(problem_files[0]) def _default_executable(self): return "mock" + executable = _default_executable def version(self): return _extract_version('') - def _execute_command(self,cmd): + def _execute_command(self, cmd): mock_basename = join(self._mock_dir, self.mock_subdir, self._mock_problem) if self._soln_file is not None: # prefer .sol over .soln @@ -48,12 +49,12 @@ def _execute_command(self,cmd): for file in glob.glob(mock_basename + "*"): if file.split(".")[-1] != "out": shutil.copyfile(file, join(self._mock_dir, basename(file))) - log="" + log = "" fname = mock_basename + ".out" if not isfile(fname): - raise ValueError("Missing mock data file: "+fname) - INPUT=open(mock_basename + ".out") + raise ValueError("Missing mock data file: " + fname) + INPUT = open(mock_basename + ".out") for line in INPUT: - log = log+line + log = log + line INPUT.close() - return [0,log] + return [0, log] diff --git a/pyomo/solvers/plugins/__init__.py b/pyomo/solvers/plugins/__init__.py index bb071a7d4ee..797ed5036bd 100644 --- a/pyomo/solvers/plugins/__init__.py +++ b/pyomo/solvers/plugins/__init__.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ + def load(): import pyomo.solvers.plugins.converter import pyomo.solvers.plugins.solvers diff --git a/pyomo/solvers/plugins/converter/ampl.py b/pyomo/solvers/plugins/converter/ampl.py index 5d346c704f0..b718faf2d21 100644 --- a/pyomo/solvers/plugins/converter/ampl.py +++ b/pyomo/solvers/plugins/converter/ampl.py @@ -21,7 +21,6 @@ @ProblemConverterFactory.register('ampl') class AmplMIPConverter(object): - def can_convert(self, from_type, to_type): """Returns true if this object supports the specified conversion""" # @@ -45,12 +44,12 @@ def apply(self, *args, **kwargs): _exec = pyomo.common.Executable("ampl") if not _exec: raise ConverterError("The 'ampl' executable cannot be found") - script_filename = TempfileManager.create_tempfile(suffix = '.ampl') + script_filename = TempfileManager.create_tempfile(suffix='.ampl') if args[1] == ProblemFormat.nl: - output_filename = TempfileManager.create_tempfile(suffix = '.nl') + output_filename = TempfileManager.create_tempfile(suffix='.nl') else: - output_filename = TempfileManager.create_tempfile(suffix = '.mps') + output_filename = TempfileManager.create_tempfile(suffix='.mps') cmd = [_exec.path(), script_filename] # @@ -61,22 +60,28 @@ def apply(self, *args, **kwargs): OUTPUT.write("# AMPL script for converting the following files\n") OUTPUT.write("#\n") if len(args[2:]) == 1: - OUTPUT.write('model '+args[2]+";\n") + OUTPUT.write('model ' + args[2] + ";\n") else: - OUTPUT.write('model '+args[2]+";\n") - OUTPUT.write('data '+args[3]+";\n") + OUTPUT.write('model ' + args[2] + ";\n") + OUTPUT.write('data ' + args[3] + ";\n") abs_ofile = os.path.abspath(output_filename) if args[1] == ProblemFormat.nl: - OUTPUT.write('write g'+abs_ofile[:-3]+";\n") + OUTPUT.write('write g' + abs_ofile[:-3] + ";\n") else: - OUTPUT.write('write m'+abs_ofile[:-4]+";\n") + OUTPUT.write('write m' + abs_ofile[:-4] + ";\n") OUTPUT.close() # # Execute command and cleanup # - output = subprocess.run(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) - if not os.path.exists(output_filename): #pragma:nocover - raise ApplicationError("Problem launching 'ampl' to create '%s': %s" % (output_filename, output.stdout)) - return (output_filename,),None # empty variable map + output = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) + if not os.path.exists(output_filename): # pragma:nocover + raise ApplicationError( + "Problem launching 'ampl' to create '%s': %s" + % (output_filename, output.stdout) + ) + return (output_filename,), None # empty variable map diff --git a/pyomo/solvers/plugins/converter/glpsol.py b/pyomo/solvers/plugins/converter/glpsol.py index 841c75828f0..a38892e3cf5 100644 --- a/pyomo/solvers/plugins/converter/glpsol.py +++ b/pyomo/solvers/plugins/converter/glpsol.py @@ -21,7 +21,6 @@ @ProblemConverterFactory.register('glpsol') class GlpsolMIPConverter(object): - def can_convert(self, from_type, to_type): """Returns true if this object supports the specified conversion""" # @@ -54,21 +53,29 @@ def apply(self, *args, **kwargs): # MPS->LP conversion is ignored in coverage because it's not being # used; instead, we're using pico_convert for this conversion # - modfile='' - if args[1] == ProblemFormat.mps: #pragma:nocover - ofile = TempfileManager.create_tempfile(suffix = '.glpsol.mps') - cmd.extend([ - "--check", - "--name", "MPS model derived from "+os.path.basename(args[2]), - "--wfreemps", ofile - ]) + modfile = '' + if args[1] == ProblemFormat.mps: # pragma:nocover + ofile = TempfileManager.create_tempfile(suffix='.glpsol.mps') + cmd.extend( + [ + "--check", + "--name", + "MPS model derived from " + os.path.basename(args[2]), + "--wfreemps", + ofile, + ] + ) elif args[1] == ProblemFormat.cpxlp: - ofile = TempfileManager.create_tempfile(suffix = '.glpsol.lp') - cmd.extend([ - "--check", - "--name","MPS model derived from "+os.path.basename(args[2]), - "--wcpxlp", ofile - ]) + ofile = TempfileManager.create_tempfile(suffix='.glpsol.lp') + cmd.extend( + [ + "--check", + "--name", + "MPS model derived from " + os.path.basename(args[2]), + "--wcpxlp", + ofile, + ] + ) if len(args[2:]) == 1: cmd.append(args[2]) else: @@ -76,38 +83,39 @@ def apply(self, *args, **kwargs): # Create a temporary model file, since GLPSOL can only # handle one input file # - modfile = TempfileManager.create_tempfile(suffix = '.glpsol.mod') - OUTPUT=open(modfile,"w") - flag=False + modfile = TempfileManager.create_tempfile(suffix='.glpsol.mod') + OUTPUT = open(modfile, "w") + flag = False # # Read the model file # - INPUT= open(args[2]) + INPUT = open(args[2]) for line in INPUT: line = line.strip() if line == "data;": - raise ConverterError("Problem composing mathprog model and data files - mathprog file already has data in it!") + raise ConverterError( + "Problem composing mathprog model and data files - mathprog file already has data in it!" + ) if line != "end;": - OUTPUT.write(line+'\n') + OUTPUT.write(line + '\n') INPUT.close() OUTPUT.write("data;\n") # # Read the data files # for file in args[3:]: - INPUT= open(file) + INPUT = open(file) for line in INPUT: line = line.strip() if line != "end;" and line != "data;": - OUTPUT.write(line+'\n') + OUTPUT.write(line + '\n') INPUT.close() OUTPUT.write("end;\n") OUTPUT.close() cmd.append(modfile) - subprocess.run(cmd, stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) - if not os.path.exists(ofile): #pragma:nocover - raise ApplicationError("Problem launching 'glpsol' to create "+ofile) + subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + if not os.path.exists(ofile): # pragma:nocover + raise ApplicationError("Problem launching 'glpsol' to create " + ofile) if os.path.exists(modfile): os.remove(modfile) - return (ofile,),None # empty variable map + return (ofile,), None # empty variable map diff --git a/pyomo/solvers/plugins/converter/model.py b/pyomo/solvers/plugins/converter/model.py index 9c8dba7d0a0..89a521d1521 100644 --- a/pyomo/solvers/plugins/converter/model.py +++ b/pyomo/solvers/plugins/converter/model.py @@ -21,7 +21,6 @@ @ProblemConverterFactory.register('pyomo') class PyomoMIPConverter(object): - pico_converter = PicoMIPConverter() def can_convert(self, from_type, to_type): @@ -31,11 +30,13 @@ def can_convert(self, from_type, to_type): # # Return True for specific from/to pairs # - if to_type in (ProblemFormat.nl, - ProblemFormat.cpxlp, - ProblemFormat.osil, - ProblemFormat.bar, - ProblemFormat.mps): + if to_type in ( + ProblemFormat.nl, + ProblemFormat.cpxlp, + ProblemFormat.osil, + ProblemFormat.bar, + ProblemFormat.mps, + ): return True return False @@ -63,8 +64,7 @@ def apply(self, *args, **kwds): instance = args[2] if args[1] == ProblemFormat.cpxlp: - problem_filename = TempfileManager.\ - create_tempfile(suffix = '.pyomo.lp') + problem_filename = TempfileManager.create_tempfile(suffix='.pyomo.lp') if instance is not None: if isinstance(instance, IBlock): symbol_map_id = instance.write( @@ -72,17 +72,17 @@ def apply(self, *args, **kwds): format=ProblemFormat.cpxlp, _solver_capability=capabilities, _called_by_solver=True, - **io_options) + **io_options + ) else: - (problem_filename, symbol_map_id) = \ - instance.write( - filename=problem_filename, - format=ProblemFormat.cpxlp, - solver_capability=capabilities, - io_options=io_options) + (problem_filename, symbol_map_id) = instance.write( + filename=problem_filename, + format=ProblemFormat.cpxlp, + solver_capability=capabilities, + io_options=io_options, + ) return (problem_filename,), symbol_map_id else: - # # I'm simply exposing a fatal issue with # this code path. How would we convert the @@ -92,25 +92,27 @@ def apply(self, *args, **kwds): if len(io_options): raise ValueError( "The following io_options will be ignored " - "(please create a bug report):\n\t" + - "\n\t".join("%s = %s" % (k,v) - for k,v in io_options.items())) + "(please create a bug report):\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) - ans = pyomo.scripting.convert.\ - pyomo2lp(['--output',problem_filename,args[2]]) + ans = pyomo.scripting.convert.pyomo2lp( + ['--output', problem_filename, args[2]] + ) if ans.errorcode: - raise RuntimeError("pyomo2lp conversion " - "returned nonzero error code " - "(%s)" % ans.errorcode) + raise RuntimeError( + "pyomo2lp conversion " + "returned nonzero error code " + "(%s)" % ans.errorcode + ) model = ans.retval problem_filename = model.filename symbol_map = model.symbol_map - return (problem_filename,),symbol_map + return (problem_filename,), symbol_map elif args[1] == ProblemFormat.bar: - problem_filename = TempfileManager.\ - create_tempfile(suffix = '.pyomo.bar') + problem_filename = TempfileManager.create_tempfile(suffix='.pyomo.bar') if instance is not None: if isinstance(instance, IBlock): symbol_map_id = instance.write( @@ -118,17 +120,17 @@ def apply(self, *args, **kwds): format=ProblemFormat.bar, _solver_capability=capabilities, _called_by_solver=True, - **io_options) + **io_options + ) else: - (problem_filename, symbol_map_id) = \ - instance.write( - filename=problem_filename, - format=ProblemFormat.bar, - solver_capability=capabilities, - io_options=io_options) + (problem_filename, symbol_map_id) = instance.write( + filename=problem_filename, + format=ProblemFormat.bar, + solver_capability=capabilities, + io_options=io_options, + ) return (problem_filename,), symbol_map_id else: - # # I'm simply exposing a fatal issue with # this code path. How would we convert the @@ -138,36 +140,37 @@ def apply(self, *args, **kwds): if len(io_options): raise ValueError( "The following io_options will be ignored " - "(please create a bug report):\n\t" + - "\n\t".join("%s = %s" % (k,v) - for k,v in io_options.items())) + "(please create a bug report):\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) - ans = pyomo.scripting.convert.\ - pyomo2bar(['--output',problem_filename,args[2]]) + ans = pyomo.scripting.convert.pyomo2bar( + ['--output', problem_filename, args[2]] + ) if ans.errorcode: - raise RuntimeError("pyomo2bar conversion " - "returned nonzero error code " - "(%s)" % ans.errorcode) + raise RuntimeError( + "pyomo2bar conversion " + "returned nonzero error code " + "(%s)" % ans.errorcode + ) model = ans.retval problem_filename = model.filename symbol_map = model.symbol_map - return (problem_filename,),symbol_map + return (problem_filename,), symbol_map elif args[1] in [ProblemFormat.mps, ProblemFormat.nl]: if args[1] == ProblemFormat.nl: - problem_filename = TempfileManager.\ - create_tempfile(suffix = '.pyomo.nl') + problem_filename = TempfileManager.create_tempfile(suffix='.pyomo.nl') if io_options.get("symbolic_solver_labels", False): TempfileManager.add_tempfile( - problem_filename[:-3]+".row", - exists=False) + problem_filename[:-3] + ".row", exists=False + ) TempfileManager.add_tempfile( - problem_filename[:-3]+".col", - exists=False) + problem_filename[:-3] + ".col", exists=False + ) else: assert args[1] == ProblemFormat.mps - problem_filename = TempfileManager.\ - create_tempfile(suffix = '.pyomo.mps') + problem_filename = TempfileManager.create_tempfile(suffix='.pyomo.mps') if instance is not None: if isinstance(instance, IBlock): symbol_map_id = instance.write( @@ -175,17 +178,17 @@ def apply(self, *args, **kwds): format=args[1], _solver_capability=capabilities, _called_by_solver=True, - **io_options) + **io_options + ) else: - (problem_filename, symbol_map_id) = \ - instance.write( - filename=problem_filename, - format=args[1], - solver_capability=capabilities, - io_options=io_options) + (problem_filename, symbol_map_id) = instance.write( + filename=problem_filename, + format=args[1], + solver_capability=capabilities, + io_options=io_options, + ) return (problem_filename,), symbol_map_id else: - # # I'm simply exposing a fatal issue with # this code path. How would we convert the @@ -195,22 +198,25 @@ def apply(self, *args, **kwds): if len(io_options): raise ValueError( "The following io_options will be ignored " - "(please create a bug report):\n\t" + - "\n\t".join("%s = %s" % (k,v) - for k,v in io_options.items())) + "(please create a bug report):\n\t" + + "\n\t".join("%s = %s" % (k, v) for k, v in io_options.items()) + ) - ans = pyomo.scripting.convert.\ - pyomo2nl(['--output',problem_filename,args[2]]) + ans = pyomo.scripting.convert.pyomo2nl( + ['--output', problem_filename, args[2]] + ) if ans.errorcode: - raise RuntimeError("pyomo2nl conversion " - "returned nonzero error " - "code (%s)" % ans.errorcode) + raise RuntimeError( + "pyomo2nl conversion " + "returned nonzero error " + "code (%s)" % ans.errorcode + ) model = ans.retval problem_filename = model.filename symbol_map = model.symbol_map if args[1] == ProblemFormat.nl: - return (problem_filename,),symbol_map + return (problem_filename,), symbol_map # # Convert from NL to MPS # @@ -221,16 +227,15 @@ def apply(self, *args, **kwds): # NOTE: we should generalize this so it doesn't strictly # depend on the PICO converter utility. # - ans = self.pico_converter.apply(ProblemFormat.nl, - ProblemFormat.mps, - problem_filename) + ans = self.pico_converter.apply( + ProblemFormat.nl, ProblemFormat.mps, problem_filename + ) os.remove(problem_filename) return ans elif args[1] == ProblemFormat.osil: if False: - problem_filename = TempfileManager.\ - create_tempfile(suffix='pyomo.osil') + problem_filename = TempfileManager.create_tempfile(suffix='pyomo.osil') if instance: if isinstance(instance, IBlock): symbol_map_id = instance.write( @@ -238,17 +243,19 @@ def apply(self, *args, **kwds): format=ProblemFormat.osil, _solver_capability=capabilities, _called_by_solver=True, - **io_options) + **io_options + ) else: - (problem_filename, symbol_map_id) = \ - instance.write( - filename=problem_filename, - format=ProblemFormat.osil, - solver_capability=capabilities, - io_options=io_options) + (problem_filename, symbol_map_id) = instance.write( + filename=problem_filename, + format=ProblemFormat.osil, + solver_capability=capabilities, + io_options=io_options, + ) return (problem_filename,), None else: raise NotImplementedError( "There is currently no " "script conversion available from " - "Pyomo to OSiL format.") + "Pyomo to OSiL format." + ) diff --git a/pyomo/solvers/plugins/converter/pico.py b/pyomo/solvers/plugins/converter/pico.py index 2715dd028aa..7fd0d11222b 100644 --- a/pyomo/solvers/plugins/converter/pico.py +++ b/pyomo/solvers/plugins/converter/pico.py @@ -20,7 +20,6 @@ class PicoMIPConverter(object): - def can_convert(self, from_type, to_type): """Returns true if this object supports the specified conversion""" # @@ -49,31 +48,45 @@ def apply(self, *args, **kwargs): Run the external pico_convert utility """ if len(args) != 3: - raise ConverterError("Cannot apply pico_convert with more than one filename or model") + raise ConverterError( + "Cannot apply pico_convert with more than one filename or model" + ) _exe = pyomo.common.Executable("pico_convert") if not _exe: raise ConverterError("The 'pico_convert' application cannot be found") pico_convert_cmd = _exe.path() - target=str(args[1]) - if target=="cpxlp": - target="lp" + target = str(args[1]) + if target == "cpxlp": + target = "lp" # NOTE: if you have an extra "." in the suffix, the pico_convert program fails to output to the correct filename. - output_filename = TempfileManager.create_tempfile(suffix = 'pico_convert.' + target) + output_filename = TempfileManager.create_tempfile( + suffix='pico_convert.' + target + ) if not isinstance(args[2], str): - fname= TempfileManager.create_tempfile(suffix= 'pico_convert.' +str(args[0])) + fname = TempfileManager.create_tempfile( + suffix='pico_convert.' + str(args[0]) + ) args[2].write(filename=fname, format=args[1]) - cmd = pico_convert_cmd +" --output="+output_filename+" "+target+" "+fname + cmd = ( + pico_convert_cmd + + " --output=" + + output_filename + + " " + + target + + " " + + fname + ) else: - cmd = pico_convert_cmd +" --output="+output_filename+" "+target + cmd = pico_convert_cmd + " --output=" + output_filename + " " + target for item in args[2:]: if not os.path.exists(item): - raise ConverterError("File "+item+" does not exist!") - cmd = cmd + " "+item - print("Running command: "+cmd) - subprocess.run(cmd, stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) - if not os.path.exists(output_filename): #pragma:nocover - raise ApplicationError(\ - "Problem launching 'pico_convert' to create "+output_filename) - return (output_filename,),None # no variable map at the moment + raise ConverterError("File " + item + " does not exist!") + cmd = cmd + " " + item + print("Running command: " + cmd) + subprocess.run(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) + if not os.path.exists(output_filename): # pragma:nocover + raise ApplicationError( + "Problem launching 'pico_convert' to create " + output_filename + ) + return (output_filename,), None # no variable map at the moment diff --git a/pyomo/solvers/plugins/solvers/ASL.py b/pyomo/solvers/plugins/solvers/ASL.py index 198e04f2277..debcd27f75e 100644 --- a/pyomo/solvers/plugins/solvers/ASL.py +++ b/pyomo/solvers/plugins/solvers/ASL.py @@ -26,14 +26,15 @@ from pyomo.core import TransformationFactory import logging + logger = logging.getLogger('pyomo.solvers') -@SolverFactory.register('asl', doc='Interface for solvers using the AMPL Solver Library') +@SolverFactory.register( + 'asl', doc='Interface for solvers using the AMPL Solver Library' +) class ASL(SystemCallSolver): - """A generic optimizer that uses the AMPL Solver Library to interface with applications. - """ - + """A generic optimizer that uses the AMPL Solver Library to interface with applications.""" def __init__(self, **kwds): # @@ -47,7 +48,7 @@ def __init__(self, **kwds): # Setup valid problem formats, and valid results for each problem format. # Also set the default problem and results formats. # - self._valid_problem_formats=[ProblemFormat.nl] + self._valid_problem_formats = [ProblemFormat.nl] self._valid_result_formats = {} self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol] self.set_problem_format(ProblemFormat.nl) @@ -74,14 +75,14 @@ def _default_executable(self): logger.warning("No solver option specified for ASL solver interface") return None if not self.options.solver: - logger.warning( - "No solver option specified for ASL solver interface") + logger.warning("No solver option specified for ASL solver interface") return None executable = Executable(self.options.solver) if not executable: logger.warning( "Could not locate the '%s' executable, which is required " - "for solver %s" % (self.options.solver, self.name)) + "for solver %s" % (self.options.solver, self.name) + ) self.enable = False return None return executable.path() @@ -94,11 +95,13 @@ def _get_version(self): if solver_exec is None: return _extract_version('') try: - results = subprocess.run([solver_exec, "-v"], - timeout=5, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [solver_exec, "-v"], + timeout=5, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) ver = _extract_version(results.stdout) if ver is None: # Some ASL solvers do not export a version number @@ -116,28 +119,30 @@ def available(self, exception_flag=True): return self.version() is not None def create_command_line(self, executable, problem_files): - assert(self._problem_format == ProblemFormat.nl) - assert(self._results_format == ResultsFormat.sol) + assert self._problem_format == ProblemFormat.nl + assert self._results_format == ResultsFormat.sol # # Define log file # solver_name = os.path.basename(self.options.solver) if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix="_%s.log" % solver_name) + self._log_file = TempfileManager.create_tempfile( + suffix="_%s.log" % solver_name + ) # # Define solution file # if self._soln_file is not None: # the solution file can not be redefined - logger.warning("The 'soln_file' keyword will be ignored " - "for solver="+self.type) + logger.warning( + "The 'soln_file' keyword will be ignored for solver=" + self.type + ) fname = problem_files[0] if '.' in fname: tmp = fname.split('.') fname = '.'.join(tmp[:-1]) - self._soln_file = fname+".sol" + self._soln_file = fname + ".sol" # # Define results file (since an external parser is used) @@ -147,7 +152,7 @@ def create_command_line(self, executable, problem_files): # # Define command line # - env=os.environ.copy() + env = os.environ.copy() # # Merge the PYOMO_AMPLFUNC (externals defined within # Pyomo/Pyomo) with any user-specified external function @@ -172,20 +177,19 @@ def create_command_line(self, executable, problem_files): # Because of this, I think the only reliable way to pass options for any # solver is by using the command line # - opt=[] + opt = [] for key in self.options: if key == 'solver': continue - if isinstance(self.options[key], str) and \ - (' ' in self.options[key]): - opt.append(key+"=\""+str(self.options[key])+"\"") - cmd.append(str(key)+"="+str(self.options[key])) + if isinstance(self.options[key], str) and (' ' in self.options[key]): + opt.append(key + "=\"" + str(self.options[key]) + "\"") + cmd.append(str(key) + "=" + str(self.options[key])) elif key == 'subsolver': - opt.append("solver="+str(self.options[key])) - cmd.append(str(key)+"="+str(self.options[key])) + opt.append("solver=" + str(self.options[key])) + cmd.append(str(key) + "=" + str(self.options[key])) else: - opt.append(key+"="+str(self.options[key])) - cmd.append(str(key)+"="+str(self.options[key])) + opt.append(key + "=" + str(self.options[key])) + cmd.append(str(key) + "=" + str(self.options[key])) envstr = "%s_options" % self.options.solver # Merge with any options coming in through the environment @@ -194,8 +198,7 @@ def create_command_line(self, executable, problem_files): return Bunch(cmd=cmd, log_file=self._log_file, env=env) def _presolve(self, *args, **kwds): - if (not isinstance(args[0], str)) and \ - (not isinstance(args[0], IBlock)): + if (not isinstance(args[0], str)) and (not isinstance(args[0], IBlock)): self._instance = args[0] xfrm = TransformationFactory('mpec.nl') xfrm.apply_to(self._instance) @@ -214,11 +217,12 @@ def _postsolve(self): # # Reclassify complementarity components # - mpec=False + mpec = False if not self._instance is None: from pyomo.mpec import Complementarity + for cuid in self._instance._transformation_data['mpec.nl'].compl_cuids: - mpec=True + mpec = True cobj = cuid.find_component_on(self._instance) cobj.parent_block().reclassify_component_type(cobj, Complementarity) # @@ -227,32 +231,27 @@ def _postsolve(self): @SolverFactory.register('_mock_asl') -class MockASL(ASL,MockMIP): - """A Mock ASL solver used for testing - """ +class MockASL(ASL, MockMIP): + """A Mock ASL solver used for testing""" def __init__(self, **kwds): try: - ASL.__init__(self,**kwds) - except ApplicationError: #pragma:nocover - pass #pragma:nocover - MockMIP.__init__(self,"asl") + ASL.__init__(self, **kwds) + except ApplicationError: # pragma:nocover + pass # pragma:nocover + MockMIP.__init__(self, "asl") self._assert_available = True def available(self, exception_flag=True): - return ASL.available(self,exception_flag) + return ASL.available(self, exception_flag) - def create_command_line(self,executable, problem_files): - command = ASL.create_command_line(self, - executable, - problem_files) - MockMIP.create_command_line(self, - executable, - problem_files) + def create_command_line(self, executable, problem_files): + command = ASL.create_command_line(self, executable, problem_files) + MockMIP.create_command_line(self, executable, problem_files) return command def executable(self): return MockMIP.executable(self) - def _execute_command(self,cmd): - return MockMIP._execute_command(self,cmd) + def _execute_command(self, cmd): + return MockMIP._execute_command(self, cmd) diff --git a/pyomo/solvers/plugins/solvers/BARON.py b/pyomo/solvers/plugins/solvers/BARON.py index 3f3d7f31219..eb5ac0830c5 100644 --- a/pyomo/solvers/plugins/solvers/BARON.py +++ b/pyomo/solvers/plugins/solvers/BARON.py @@ -22,17 +22,21 @@ from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver from pyomo.opt.base.solvers import _extract_version, SolverFactory from pyomo.opt.results import ( - SolverResults, Solution, SolverStatus, TerminationCondition, + SolverResults, + Solution, + SolverStatus, + TerminationCondition, SolutionStatus, ) from pyomo.opt.solver import SystemCallSolver logger = logging.getLogger('pyomo.solvers') -@SolverFactory.register('baron', doc='The BARON MINLP solver') + +@SolverFactory.register('baron', doc='The BARON MINLP solver') class BARONSHELL(SystemCallSolver): - """The BARON MINLP solver - """ + """The BARON MINLP solver""" + _solver_info_cache = {} def __init__(self, **kwds): @@ -44,7 +48,7 @@ def __init__(self, **kwds): self._tim_file = None - self._valid_problem_formats=[ProblemFormat.bar] + self._valid_problem_formats = [ProblemFormat.bar] self._valid_result_formats = {} self._valid_result_formats[ProblemFormat.bar] = [ResultsFormat.soln] self.set_problem_format(ProblemFormat.bar) @@ -57,8 +61,7 @@ def __init__(self, **kwds): self._capabilities.sos1 = False self._capabilities.sos2 = False - - # CLH: Coppied from cpxlp.py, the cplex file writer. + # CLH: Copied from cpxlp.py, the cplex file writer. # Keven Hunter made a nice point about using %.16g in his attachment # to ticket #4319. I am adjusting this to %.17g as this mocks the # behavior of using %r (i.e., float('%r'%) == ) with @@ -73,37 +76,35 @@ def __init__(self, **kwds): self._precision_string = '.17g' def _get_dummy_input_files(self, check_license=False): - with tempfile.NamedTemporaryFile(mode='w', - delete=False) as f: + with tempfile.NamedTemporaryFile(mode='w', delete=False) as f: # For some reason, if results: 0 is added to the options # section, it causes a file named fort.71 to appear. # So point the ResName option to a temporary file that # we will delete - with tempfile.NamedTemporaryFile(mode='w', - delete=False) as fr: + with tempfile.NamedTemporaryFile(mode='w', delete=False) as fr: pass # Doing this for the remaining output files as well. # Can't seem to reliably control the files created by # Baron otherwise. - with tempfile.NamedTemporaryFile(mode='w', - delete=False) as fs: + with tempfile.NamedTemporaryFile(mode='w', delete=False) as fs: pass - with tempfile.NamedTemporaryFile(mode='w', - delete=False) as ft: + with tempfile.NamedTemporaryFile(mode='w', delete=False) as ft: pass - f.write("//This is a dummy .bar file created to " - "return the baron version//\n" - "OPTIONS {\n" - "results: 1;\n" - "ResName: \""+fr.name+"\";\n" - "summary: 1;\n" - "SumName: \""+fs.name+"\";\n" - "times: 1;\n" - "TimName: \""+ft.name+"\";\n" - "}\n") + f.write( + "//This is a dummy .bar file created to " + "return the baron version//\n" + "OPTIONS {\n" + "results: 1;\n" + "ResName: \"" + fr.name + "\";\n" + "summary: 1;\n" + "SumName: \"" + fs.name + "\";\n" + "times: 1;\n" + "TimName: \"" + ft.name + "\";\n" + "}\n" + ) f.write("POSITIVE_VARIABLES ") if check_license: - f.write(", ".join("x"+str(i) for i in range(11))) + f.write(", ".join("x" + str(i) for i in range(11))) else: f.write("x1") f.write(";\n") @@ -130,11 +131,13 @@ def license_is_valid(self): if not solver_exec: licensed = False else: - fnames= self._get_dummy_input_files(check_license=True) + fnames = self._get_dummy_input_files(check_license=True) try: - process = subprocess.Popen([solver_exec, fnames[0]], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) + process = subprocess.Popen( + [solver_exec, fnames[0]], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) stdout, stderr = process.communicate() assert stderr is None rc = 0 @@ -156,8 +159,10 @@ def license_is_valid(self): def _default_executable(self): executable = Executable("baron") if not executable: - logger.warning("Could not locate the 'baron' executable, " - "which is required for solver %s" % self.name) + logger.warning( + "Could not locate the 'baron' executable, " + "which is required for solver %s" % self.name + ) self.enable = False return None return executable.path() @@ -175,10 +180,12 @@ def _get_version(self): else: fnames = self._get_dummy_input_files(check_license=False) try: - results = subprocess.run([solver_exec, fnames[0]], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [solver_exec, fnames[0]], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) ver = _extract_version(results.stdout) finally: self._remove_dummy_input_files(fnames) @@ -187,34 +194,24 @@ def _get_version(self): return ver def create_command_line(self, executable, problem_files): - # The solution file is created in the _convert_problem function. # The bar file needs the solution filename in the OPTIONS section, but # this function is executed after the bar problem file writing. - #self._soln_file = pyomo.common.tempfiles.TempfileManager.create_tempfile(suffix = '.baron.sol') - + # self._soln_file = pyomo.common.tempfiles.TempfileManager.create_tempfile(suffix = '.baron.sol') cmd = [executable, problem_files[0]] if self._timer: cmd.insert(0, self._timer) - return Bunch( cmd=cmd, - log_file=self._log_file, - env=None ) + return Bunch(cmd=cmd, log_file=self._log_file, env=None) # # Assuming the variable values stored in the model will # automatically be included in the Baron input file # (returning True implies the opposite and requires another function) def warm_start_capable(self): - return False - def _convert_problem(self, - args, - problem_format, - valid_problem_formats, - **kwds): - + def _convert_problem(self, args, problem_format, valid_problem_formats, **kwds): # Baron needs all solver options and file redirections # inside the input file, so we need to input those # here through io_options before calling the baron writer @@ -223,22 +220,19 @@ def _convert_problem(self, # Define log file # if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix = '.baron.log') + self._log_file = TempfileManager.create_tempfile(suffix='.baron.log') # # Define solution file # if self._soln_file is None: - self._soln_file = TempfileManager.\ - create_tempfile(suffix = '.baron.soln') + self._soln_file = TempfileManager.create_tempfile(suffix='.baron.soln') - self._tim_file = TempfileManager.\ - create_tempfile(suffix = '.baron.tim') + self._tim_file = TempfileManager.create_tempfile(suffix='.baron.tim') # # Create options to send through as io_options - # containing all relevent info needed in the Baron file + # containing all relevant info needed in the Baron file # solver_options = {} solver_options['ResName'] = self._soln_file @@ -250,12 +244,13 @@ def _convert_problem(self, 'Ignoring user-specified option "%s=%s". This ' 'option is set to %s, and can be overridden using ' 'the "solnfile" argument to the solve() method.' - % (key, self.options[key], self._soln_file)) + % (key, self.options[key], self._soln_file) + ) elif lower_key == 'timname': logger.warning( 'Ignoring user-specified option "%s=%s". This ' - 'option is set to %s.' - % (key, self.options[key], self._tim_file)) + 'option is set to %s.' % (key, self.options[key], self._tim_file) + ) else: solver_options[key] = self.options[key] @@ -265,29 +260,27 @@ def _convert_problem(self, break if 'solver_options' in kwds: - raise ValueError("Baron solver options should be set " - "using the options object on this " - "solver plugin. The solver_options " - "I/O options dict for the Baron writer " - "will be populated by this plugin's " - "options object") + raise ValueError( + "Baron solver options should be set " + "using the options object on this " + "solver plugin. The solver_options " + "I/O options dict for the Baron writer " + "will be populated by this plugin's " + "options object" + ) kwds['solver_options'] = solver_options - return OptSolver._convert_problem(self, - args, - problem_format, - valid_problem_formats, - **kwds) + return OptSolver._convert_problem( + self, args, problem_format, valid_problem_formats, **kwds + ) def process_logfile(self): - results = SolverResults() # # Process logfile # - cuts = ['Bilinear', 'LD-Envelopes', 'Multilinears', - 'Convexity', 'Integrality'] + cuts = ['Bilinear', 'LD-Envelopes', 'Multilinears', 'Convexity', 'Integrality'] # Collect cut-generation statistics from the log file with open(self._log_file) as OUTPUT: @@ -295,14 +288,14 @@ def process_logfile(self): for field in cuts: if field in line: try: - results.solver.statistics[field+'_cuts'] = int( - line.split()[1]) + results.solver.statistics[field + '_cuts'] = int( + line.split()[1] + ) except: pass return results - def process_soln_file(self, results): # check for existence of the solution and time file. Not sure why we # just return - would think that we would want to indicate @@ -315,7 +308,7 @@ def process_soln_file(self, results): return with open(self._tim_file, "r") as TimFile: - with open(self._soln_file,"r") as INPUT: + with open(self._soln_file, "r") as INPUT: self._process_soln_file(results, TimFile, INPUT) def _process_soln_file(self, results, TimFile, INPUT): @@ -341,15 +334,17 @@ def _process_soln_file(self, results, TimFile, INPUT): extract_price = False for suffix in self._suffixes: flag = False - if re.match(suffix, "rc"): #baron_marginal + if re.match(suffix, "rc"): # baron_marginal extract_marginals = True flag = True - if re.match(suffix, "dual"): #baron_price + if re.match(suffix, "dual"): # baron_price extract_price = True flag = True if not flag: - raise RuntimeError("***The BARON solver plugin cannot" - "extract solution suffix="+suffix) + raise RuntimeError( + "***The BARON solver plugin cannot" + "extract solution suffix=" + suffix + ) soln = Solution() @@ -402,71 +397,64 @@ def _process_soln_file(self, results, TimFile, INPUT): soln.objective[objective_label] = {'Value': None} results.problem.number_of_objectives = 1 if objective is not None: - results.problem.sense = \ + results.problem.sense = ( 'minimizing' if objective.is_minimizing() else 'maximizing' + ) if solver_status == '1': results.solver.status = SolverStatus.ok elif solver_status == '2': results.solver.status = SolverStatus.error results.solver.termination_condition = TerminationCondition.error - #CLH: I wasn't sure if this was double reporting errors. I + # CLH: I wasn't sure if this was double reporting errors. I # just filled in one termination_message for now - results.solver.termination_message = \ - ("Insufficient memory to store the number of nodes required " - "for this seach tree. Increase physical memory or change " - "algorithmic options") + results.solver.termination_message = ( + "Insufficient memory to store the number of nodes required " + "for this search tree. Increase physical memory or change " + "algorithmic options" + ) elif solver_status == '3': results.solver.status = SolverStatus.ok - results.solver.termination_condition = \ - TerminationCondition.maxIterations + results.solver.termination_condition = TerminationCondition.maxIterations elif solver_status == '4': results.solver.status = SolverStatus.ok - results.solver.termination_condition = \ - TerminationCondition.maxTimeLimit + results.solver.termination_condition = TerminationCondition.maxTimeLimit elif solver_status == '5': results.solver.status = SolverStatus.warning - results.solver.termination_condition = \ - TerminationCondition.other + results.solver.termination_condition = TerminationCondition.other elif solver_status == '6': results.solver.status = SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.userInterrupt + results.solver.termination_condition = TerminationCondition.userInterrupt elif solver_status == '7': results.solver.status = SolverStatus.error - results.solver.termination_condition = \ - TerminationCondition.error + results.solver.termination_condition = TerminationCondition.error elif solver_status == '8': results.solver.status = SolverStatus.unknown - results.solver.termination_condition = \ - TerminationCondition.unknown + results.solver.termination_condition = TerminationCondition.unknown elif solver_status == '9': results.solver.status = SolverStatus.error - results.solver.termination_condition = \ - TerminationCondition.solverFailure + results.solver.termination_condition = TerminationCondition.solverFailure elif solver_status == '10': results.solver.status = SolverStatus.error - results.solver.termination_condition = \ - TerminationCondition.error + results.solver.termination_condition = TerminationCondition.error elif solver_status == '11': results.solver.status = SolverStatus.aborted - results.solver.termination_condition = \ + results.solver.termination_condition = ( TerminationCondition.licensingProblems - results.solver.termination_message = \ + ) + results.solver.termination_message = ( 'Run terminated because of a licensing error.' + ) if model_status == '1': soln.status = SolutionStatus.optimal - results.solver.termination_condition = \ - TerminationCondition.optimal + results.solver.termination_condition = TerminationCondition.optimal elif model_status == '2': soln.status = SolutionStatus.infeasible - results.solver.termination_condition = \ - TerminationCondition.infeasible + results.solver.termination_condition = TerminationCondition.infeasible elif model_status == '3': soln.status = SolutionStatus.unbounded - results.solver.termination_condition = \ - TerminationCondition.unbounded + results.solver.termination_condition = TerminationCondition.unbounded elif model_status == '4': soln.status = SolutionStatus.feasible elif model_status == '5': @@ -478,8 +466,7 @@ def _process_soln_file(self, results, TimFile, INPUT): # Solutions that were preprocessed infeasible, were aborted, # or gave error will not have filled in res.lst files - if results.solver.status not in [SolverStatus.error, - SolverStatus.aborted]: + if results.solver.status not in [SolverStatus.error, SolverStatus.aborted]: # # Extract the solution vector and objective value from BARON # @@ -505,12 +492,13 @@ def _process_soln_file(self, results, TimFile, INPUT): objective_value = float(INPUT.readline().split()[4]) except IndexError: # No objective value, so no solution to return - if solver_status == '1' and model_status in ('1','4'): + if solver_status == '1' and model_status in ('1', '4'): logger.error( -"""Failed to process BARON solution file: could not extract the final + """Failed to process BARON solution file: could not extract the final objective value, but BARON completed normally. This is indicative of a bug in Pyomo's BARON solution parser. Please report this (along with -the Pyomo model and BARON version) to the Pyomo Developers.""") +the Pyomo model and BARON version) to the Pyomo Developers.""" + ) return INPUT.readline() INPUT.readline() @@ -571,7 +559,6 @@ def _process_soln_file(self, results, TimFile, INPUT): # filled with variable name, number, and value. Also, # optionally fill the baron_marginal suffix for i, (label, val) in enumerate(zip(var_name, var_value)): - soln_variable[label] = {"Value": val} # Only adds the baron_marginal key it is requested and exists @@ -588,14 +575,15 @@ def _process_soln_file(self, results, TimFile, INPUT): # for i, price_val in enumerate(con_price, 1): # use the alias made by the Baron writer - con_label = ".c"+str(i) + con_label = ".c" + str(i) soln_constraint[con_label] = {"dual": price_val} # This check is necessary because solutions that are # preprocessed infeasible have ok solver status, but no # objective value located in the res.lst file - if not (SolvedDuringPreprocessing and \ - soln.status == SolutionStatus.infeasible): + if not ( + SolvedDuringPreprocessing and soln.status == SolutionStatus.infeasible + ): soln.objective[objective_label] = {'Value': objective_value} # Fill the solution for most cases, except errors diff --git a/pyomo/solvers/plugins/solvers/CBCplugin.py b/pyomo/solvers/plugins/solvers/CBCplugin.py index e169adf1993..86871dbc1ac 100644 --- a/pyomo/solvers/plugins/solvers/CBCplugin.py +++ b/pyomo/solvers/plugins/solvers/CBCplugin.py @@ -26,7 +26,14 @@ from pyomo.core import Var from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver from pyomo.opt.base.solvers import _extract_version, SolverFactory -from pyomo.opt.results import SolverResults, SolverStatus, TerminationCondition, SolutionStatus, ProblemSense, Solution +from pyomo.opt.results import ( + SolverResults, + SolverStatus, + TerminationCondition, + SolutionStatus, + ProblemSense, + Solution, +) from pyomo.opt.solver import SystemCallSolver from pyomo.solvers.mockmip import MockMIP @@ -35,13 +42,12 @@ @SolverFactory.register('cbc', doc='The CBC LP/MIP solver') class CBC(OptSolver): - """The CBC LP/MIP solver - """ + """The CBC LP/MIP solver""" def __new__(cls, *args, **kwds): mode = kwds.pop('solver_io', 'lp') - if mode == 'lp' or mode is None: + if mode == 'lp' or mode is None: opt = SolverFactory('_cbc_shell', **kwds) opt.set_problem_format(ProblemFormat.cpxlp) return opt @@ -60,7 +66,7 @@ def __new__(cls, *args, **kwds): # options (-s in particular, which is required for # streaming output of all asl solvers). Therefore we need # to send it through the cbc_shell instead of ASL - opt = SolverFactory('_cbc_shell',**kwds) + opt = SolverFactory('_cbc_shell', **kwds) opt.set_problem_format(ProblemFormat.nl) return opt elif mode == 'os': @@ -72,11 +78,9 @@ def __new__(cls, *args, **kwds): return - -@SolverFactory.register('_cbc_shell', doc='Shell interface to the CBC LP/MIP solver') +@SolverFactory.register('_cbc_shell', doc='Shell interface to the CBC LP/MIP solver') class CBCSHELL(SystemCallSolver): - """Shell interface to the CBC LP/MIP solver - """ + """Shell interface to the CBC LP/MIP solver""" def __init__(self, **kwds): # @@ -99,15 +103,15 @@ def __init__(self, **kwds): # Set up valid problem formats and valid results for each # problem format # - self._valid_problem_formats=[ + self._valid_problem_formats = [ ProblemFormat.cpxlp, ProblemFormat.nl, - #ProblemFormat.mps, + # ProblemFormat.mps, ] - self._valid_result_formats={ + self._valid_result_formats = { ProblemFormat.cpxlp: [ResultsFormat.soln], ProblemFormat.nl: [ResultsFormat.sol], - #ProblemFormat.mps: [ResultsFormat.soln], + # ProblemFormat.mps: [ResultsFormat.soln], } # Note: Undefined capabilities default to 'None' @@ -129,7 +133,7 @@ def __init__(self, **kwds): self.set_problem_format(ProblemFormat.cpxlp) def set_problem_format(self, format): - super(CBCSHELL,self).set_problem_format(format) + super(CBCSHELL, self).set_problem_format(format) if self._problem_format == ProblemFormat.cpxlp: self._capabilities.sos1 = False self._capabilities.sos2 = False @@ -144,22 +148,22 @@ def set_problem_format(self, format): if _ver is None: _ver_str = "" else: - _ver_str ='.'.join(str(i) for i in _ver) + _ver_str = '.'.join(str(i) for i in _ver) logger.warning( f"found CBC version {_ver_str} < 2.7; " "ASL support disabled (falling back on LP interface)." ) - logger.warning("Upgrade CBC to activate ASL " - "support in this plugin") + logger.warning("Upgrade CBC to activate ASL support in this plugin") # Fall back on LP self.set_problem_format(ProblemFormat.cpxlp) else: - logger.warning("CBC solver is not compiled with ASL " - "interface (falling back on LP interface).") + logger.warning( + "CBC solver is not compiled with ASL " + "interface (falling back on LP interface)." + ) # Fall back on LP self.set_problem_format(ProblemFormat.cpxlp) - def _default_results_format(self, prob_format): if prob_format == ProblemFormat.nl: return ResultsFormat.sol @@ -169,10 +173,9 @@ def warm_start_capable(self): if self._problem_format != ProblemFormat.cpxlp: return False _ver = self.version() - return _ver and _ver >= (2,8,0,0) + return _ver and _ver >= (2, 8, 0, 0) def _write_soln_file(self, instance, filename): - # Maybe this could be a useful method for any instance. if isinstance(instance, IBlock): @@ -186,15 +189,13 @@ def _write_soln_file(self, instance, filename): for var in instance.component_data_objects(Var): # Cbc only expects integer variables with non-zero # values for mipstart. - if var.value \ - and (var.is_integer() or var.is_binary()) \ - and (id(var) in byObject): + if ( + var.value + and (var.is_integer() or var.is_binary()) + and (id(var) in byObject) + ): name = byObject[id(var)] - solnfile.write( - '{} {} {}\n'.format( - column_index, name, var.value - ) - ) + solnfile.write('{} {} {}\n'.format(column_index, name, var.value)) # Cbc ignores column indexes, so the value does not matter. column_index += 1 @@ -202,12 +203,10 @@ def _write_soln_file(self, instance, filename): # Write a warm-start file in the SOLN format. # def _warm_start(self, instance): - self._write_soln_file(instance, self._warm_start_file_name) # over-ride presolve to extract the warm-start keyword, if specified. def _presolve(self, *args, **kwds): - # create a context in the temporary file manager for # this plugin - is "pop"ed in the _postsolve method. TempfileManager.push() @@ -238,7 +237,8 @@ def _presolve(self, *args, **kwds): if self._warm_start_file_name is None: assert not user_warmstart self._warm_start_file_name = TempfileManager.create_tempfile( - suffix = '.cbc.soln') + suffix='.cbc.soln' + ) # CBC does not cleanly handle windows-style drive names in the # MIPSTART file name (though at least 2.10.5). @@ -255,7 +255,8 @@ def _presolve(self, *args, **kwds): logger.warning( "warmstart_file points to a file on a drive " "different from the current working directory. " - "CBC is likely to (silently) ignore the warmstart.") + "CBC is likely to (silently) ignore the warmstart." + ) # let the base class handle any remaining keywords/actions. # let the base class handle any remaining keywords/actions. @@ -265,29 +266,30 @@ def _presolve(self, *args, **kwds): # symbol_map is actually constructed! if (len(args) > 0) and (not isinstance(args[0], str)): - if len(args) != 1: raise ValueError( "CBCplugin _presolve method can only handle a single " - "problem instance - %s were supplied" % (len(args),)) + "problem instance - %s were supplied" % (len(args),) + ) # write the warm-start file - currently only supports MIPs. # we only know how to deal with a single problem instance. if self._warm_start_solve and (not user_warmstart): - start_time = time.time() self._warm_start(args[0]) end_time = time.time() if self._report_timing is True: - print("Warm start write time=%.2f seconds" % (end_time-start_time)) - + print( + "Warm start write time=%.2f seconds" % (end_time - start_time) + ) def _default_executable(self): executable = Executable("cbc") if not executable: logger.warning( "Could not locate the 'cbc' executable, which is " - "required for solver %s" % self.name) + "required for solver %s" % self.name + ) self.enable = False return None return executable.path() @@ -301,7 +303,7 @@ def _get_version(self): timeout=5, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - universal_newlines=True + universal_newlines=True, ) _version = _extract_version(results.stdout) if _version is None: @@ -314,7 +316,7 @@ def _compiled_with_asl(self): timeout=5, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - universal_newlines=True + universal_newlines=True, ) return 'No match for AMPL'.lower() not in results.stdout.lower() @@ -340,9 +342,9 @@ def create_command_line(self, executable, problem_files): else: problem_filename_prefix = tmp[0] if self._results_format is ResultsFormat.sol: - self._soln_file = problem_filename_prefix+".sol" + self._soln_file = problem_filename_prefix + ".sol" else: - self._soln_file = problem_filename_prefix+".soln" + self._soln_file = problem_filename_prefix + ".soln" # # Define the results file (if the sol external parser is used) @@ -368,14 +370,16 @@ def _check_and_escape_options(options): tmp_v = '"' + tmp_v + '"' if _bad: - raise ValueError("Unable to properly escape solver option:" - "\n\t%s=%s" % (key, val) ) + raise ValueError( + "Unable to properly escape solver option:" + "\n\t%s=%s" % (key, val) + ) yield (tmp_k, tmp_v) # # Define command line # - cmd = [ executable ] + cmd = [executable] if self._timer: cmd.insert(0, self._timer) if self._problem_format == ProblemFormat.nl: @@ -386,35 +390,32 @@ def _check_and_escape_options(options): cmd.extend(['-sec', str(self._timelimit)]) cmd.extend(['-timeMode', "elapsed"]) if "debug" in self.options: - cmd.extend(["-log","5"]) + cmd.extend(["-log", "5"]) for key, val in _check_and_escape_options(self.options): if key == 'solver': continue - cmd.append(key+"="+val) - os.environ['cbc_options']="printingOptions=all" - #cmd.extend(["-printingOptions=all", - #"-stat"]) + cmd.append(key + "=" + val) + os.environ['cbc_options'] = "printingOptions=all" + # cmd.extend(["-printingOptions=all", + # "-stat"]) else: if self._timelimit is not None and self._timelimit > 0.0: cmd.extend(['-sec', str(self._timelimit)]) cmd.extend(['-timeMode', "elapsed"]) if "debug" in self.options: - cmd.extend(["-log","5"]) + cmd.extend(["-log", "5"]) # these must go after options that take a value action_options = [] for key, val in _check_and_escape_options(self.options): if val.strip() != '': - cmd.extend(['-'+key, val]) + cmd.extend(['-' + key, val]) else: - action_options.append('-'+key) - cmd.extend(["-printingOptions", "all", - "-import", problem_files[0]]) + action_options.append('-' + key) + cmd.extend(["-printingOptions", "all", "-import", problem_files[0]]) cmd.extend(action_options) if self._warm_start_solve: - cmd.extend(["-mipstart",self._warm_start_file_name]) - cmd.extend(["-stat=1", - "-solve", - "-solu", self._soln_file]) + cmd.extend(["-mipstart", self._warm_start_file_name]) + cmd.extend(["-stat=1", "-solve", "-solu", self._soln_file]) return Bunch(cmd=cmd, log_file=self._log_file, env=None) @@ -457,48 +458,78 @@ def process_logfile(self): n_tokens = len(tokens) if n_tokens > 1: # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L3769 - if n_tokens > 4 and tokens[:4] == ('Continuous', 'objective', 'value', 'is'): + if n_tokens > 4 and tokens[:4] == ( + 'Continuous', + 'objective', + 'value', + 'is', + ): lower_bound = _float(tokens[4]) # Search completed - best objective %g, took %d iterations and %d nodes - elif n_tokens > 12 and tokens[1:3] == ('Search', 'completed') \ - and tokens[4:6] == ('best', 'objective') and tokens[9] == 'iterations' \ - and tokens[12] == 'nodes': + elif ( + n_tokens > 12 + and tokens[1:3] == ('Search', 'completed') + and tokens[4:6] == ('best', 'objective') + and tokens[9] == 'iterations' + and tokens[12] == 'nodes' + ): optim_value = _float(tokens[6][:-1]) - results.solver.statistics.black_box.number_of_iterations = int(tokens[8]) + results.solver.statistics.black_box.number_of_iterations = int( + tokens[8] + ) nodes = int(tokens[11]) elif tokens[1] == 'Exiting' and n_tokens > 4: if tokens[2:4] == ('on', 'maximum'): - results.solver.termination_condition = {'nodes': TerminationCondition.maxEvaluations, - 'time': TerminationCondition.maxTimeLimit, - 'solutions': TerminationCondition.other, - 'iterations': TerminationCondition.maxIterations - }.get(tokens[4], TerminationCondition.other) + results.solver.termination_condition = { + 'nodes': TerminationCondition.maxEvaluations, + 'time': TerminationCondition.maxTimeLimit, + 'solutions': TerminationCondition.other, + 'iterations': TerminationCondition.maxIterations, + }.get(tokens[4], TerminationCondition.other) # elif tokens[2:5] == ('as', 'integer', 'gap'): # # We might want to handle this case # Integer solution of %g found... elif n_tokens >= 4 and tokens[1:4] == ('Integer', 'solution', 'of'): optim_value = _float(tokens[4]) try: - results.solver.statistics.black_box.number_of_iterations = \ - int(tokens[tokens.index('iterations') - 1]) + results.solver.statistics.black_box.number_of_iterations = int( + tokens[tokens.index('iterations') - 1] + ) nodes = int(tokens[tokens.index('nodes') - 1]) except ValueError: pass # Partial search - best objective %g (best possible %g), took %d iterations and %d nodes - elif n_tokens > 15 and tokens[1:3] == ('Partial', 'search') \ - and tokens[4:6] == ('best', 'objective') and tokens[7:9] == ('(best', 'possible') \ - and tokens[12] == 'iterations' and tokens[15] == 'nodes': + elif ( + n_tokens > 15 + and tokens[1:3] == ('Partial', 'search') + and tokens[4:6] == ('best', 'objective') + and tokens[7:9] == ('(best', 'possible') + and tokens[12] == 'iterations' + and tokens[15] == 'nodes' + ): optim_value = _float(tokens[6]) lower_bound = _float(tokens[9][:-2]) - results.solver.statistics.black_box.number_of_iterations = int(tokens[11]) + results.solver.statistics.black_box.number_of_iterations = int( + tokens[11] + ) nodes = int(tokens[14]) - elif n_tokens > 12 and tokens[1] == 'After' and tokens[3] == 'nodes,' \ - and tokens[8:10] == ('best', 'solution,') and tokens[10:12] == ('best', 'possible'): + elif ( + n_tokens > 12 + and tokens[1] == 'After' + and tokens[3] == 'nodes,' + and tokens[8:10] == ('best', 'solution,') + and tokens[10:12] == ('best', 'possible') + ): nodes = int(tokens[2]) optim_value = _float(tokens[7]) lower_bound = _float(tokens[12]) - elif tokens[0] == "Current" and n_tokens == 10 and tokens[1] == "default" and tokens[2] == "(if" \ - and results.problem.name is None: + elif ( + tokens[0] == "Current" + and n_tokens == 10 + and tokens[1] == "default" + and tokens[2] == "(if" + and results.problem.name is None + ): results.problem.name = tokens[-1] if '.' in results.problem.name: parts = results.problem.name.split('.') @@ -513,48 +544,73 @@ def process_logfile(self): # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L10840 elif tokens[0] == 'Presolve': if n_tokens > 9 and tokens[3] == 'rows,' and tokens[6] == 'columns': - results.problem.number_of_variables = int(tokens[4]) - int(tokens[5][1:-1]) - results.problem.number_of_constraints = int(tokens[1]) - int(tokens[2][1:-1]) + results.problem.number_of_variables = int(tokens[4]) - int( + tokens[5][1:-1] + ) + results.problem.number_of_constraints = int(tokens[1]) - int( + tokens[2][1:-1] + ) results.problem.number_of_objectives = 1 elif n_tokens > 6 and tokens[6] == 'infeasible': soln.status = SolutionStatus.infeasible # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L11105 - elif n_tokens > 11 and tokens[:2] == ('Problem', 'has') and tokens[3] == 'rows,' and \ - tokens[5] == 'columns' and tokens[7:9] == ('with', 'objective)'): + elif ( + n_tokens > 11 + and tokens[:2] == ('Problem', 'has') + and tokens[3] == 'rows,' + and tokens[5] == 'columns' + and tokens[7:9] == ('with', 'objective)') + ): results.problem.number_of_variables = int(tokens[4]) results.problem.number_of_constraints = int(tokens[2]) results.problem.number_of_nonzeros = int(tokens[6][1:]) results.problem.number_of_objectives = 1 # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L10814 - elif n_tokens > 8 and tokens[:3] == ('Original', 'problem', 'has') and tokens[4] == 'integers' \ - and tokens[6:9] == ('of', 'which', 'binary)'): + elif ( + n_tokens > 8 + and tokens[:3] == ('Original', 'problem', 'has') + and tokens[4] == 'integers' + and tokens[6:9] == ('of', 'which', 'binary)') + ): results.problem.number_of_integer_variables = int(tokens[3]) results.problem.number_of_binary_variables = int(tokens[5][1:]) elif n_tokens == 5 and tokens[3] == "NAME": results.problem.name = tokens[4] - elif 'CoinLpIO::readLp(): Maximization problem reformulated as minimization' in ' '.join(tokens): + elif ( + 'CoinLpIO::readLp(): Maximization problem reformulated as minimization' + in ' '.join(tokens) + ): results.problem.sense = ProblemSense.maximize # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L3047 elif n_tokens > 3 and tokens[:2] == ('Result', '-'): if tokens[2:4] in [('Run', 'abandoned'), ('User', 'ctrl-c')]: - results.solver.termination_condition = TerminationCondition.userInterrupt + results.solver.termination_condition = ( + TerminationCondition.userInterrupt + ) if n_tokens > 4: if tokens[2:5] == ('Optimal', 'solution', 'found'): # parser for log file generetated with discrete variable soln.status = SolutionStatus.optimal # if n_tokens > 7 and tokens[5:8] == ('(within', 'gap', 'tolerance)'): # # We might want to handle this case - elif tokens[2:5] in [('Linear', 'relaxation', 'infeasible'), - ('Problem', 'proven', 'infeasible')]: + elif tokens[2:5] in [ + ('Linear', 'relaxation', 'infeasible'), + ('Problem', 'proven', 'infeasible'), + ]: soln.status = SolutionStatus.infeasible elif tokens[2:5] == ('Linear', 'relaxation', 'unbounded'): soln.status = SolutionStatus.unbounded - elif n_tokens > 5 and tokens[2:4] == ('Stopped', 'on') and tokens[5] == 'limit': - results.solver.termination_condition = {'node': TerminationCondition.maxEvaluations, - 'time': TerminationCondition.maxTimeLimit, - 'solution': TerminationCondition.other, - 'iterations': TerminationCondition.maxIterations - }.get(tokens[4], TerminationCondition.other) + elif ( + n_tokens > 5 + and tokens[2:4] == ('Stopped', 'on') + and tokens[5] == 'limit' + ): + results.solver.termination_condition = { + 'node': TerminationCondition.maxEvaluations, + 'time': TerminationCondition.maxTimeLimit, + 'solution': TerminationCondition.other, + 'iterations': TerminationCondition.maxIterations, + }.get(tokens[4], TerminationCondition.other) # perhaps from https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L12318 elif n_tokens > 3 and tokens[2] == "Finished": soln.status = SolutionStatus.optimal @@ -564,10 +620,17 @@ def process_logfile(self): # parser for log file generetated with discrete variable optim_value = _float(tokens[2]) # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L7904 - elif n_tokens >= 4 and tokens[:4] == ('No', 'feasible', 'solution', 'found'): + elif n_tokens >= 4 and tokens[:4] == ( + 'No', + 'feasible', + 'solution', + 'found', + ): soln.status = SolutionStatus.infeasible elif n_tokens > 2 and tokens[:2] == ('Lower', 'bound:'): - if lower_bound is None: # Only use if not already found since this is to less decimal places + if ( + lower_bound is None + ): # Only use if not already found since this is to less decimal places results.problem.lower_bound = _float(tokens[2]) # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L7918 elif tokens[0] == 'Gap:': @@ -578,7 +641,9 @@ def process_logfile(self): nodes = int(tokens[2]) # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L7926 elif n_tokens > 2 and tokens[:2] == ('Total', 'iterations:'): - results.solver.statistics.black_box.number_of_iterations = int(tokens[2]) + results.solver.statistics.black_box.number_of_iterations = int( + tokens[2] + ) # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L7930 elif n_tokens > 3 and tokens[:3] == ('Time', '(CPU', 'seconds):'): results.solver.system_time = _float(tokens[3]) @@ -586,7 +651,12 @@ def process_logfile(self): elif n_tokens > 3 and tokens[:3] == ('Time', '(Wallclock', 'Seconds):'): results.solver.wallclock_time = _float(tokens[3]) # https://projects.coin-or.org/Cbc/browser/trunk/Cbc/src/CbcSolver.cpp?rev=2497#L10477 - elif n_tokens > 4 and tokens[:4] == ('Total', 'time', '(CPU', 'seconds):'): + elif n_tokens > 4 and tokens[:4] == ( + 'Total', + 'time', + '(CPU', + 'seconds):', + ): results.solver.system_time = _float(tokens[4]) if n_tokens > 7 and tokens[5:7] == ('(Wallclock', 'seconds):'): results.solver.wallclock_time = _float(tokens[7]) @@ -597,16 +667,27 @@ def process_logfile(self): # complementarity gap both smallish and small steps" soln.status = SolutionStatus.optimal optim_value = _float(tokens[4]) - elif n_tokens > 5 and tokens[1] == 'objective' and tokens[5] == 'iterations': + elif ( + n_tokens > 5 + and tokens[1] == 'objective' + and tokens[5] == 'iterations' + ): soln.status = SolutionStatus.optimal optim_value = _float(tokens[2]) - results.solver.statistics.black_box.number_of_iterations = int(tokens[4]) + results.solver.statistics.black_box.number_of_iterations = int( + tokens[4] + ) elif tokens[0] == "sys" and n_tokens == 2: results.solver.system_time = _float(tokens[1]) elif tokens[0] == "user" and n_tokens == 2: results.solver.user_time = _float(tokens[1]) - elif n_tokens == 10 and "Presolve" in tokens and \ - "iterations" in tokens and tokens[0] == "Optimal" and "objective" == tokens[1]: + elif ( + n_tokens == 10 + and "Presolve" in tokens + and "iterations" in tokens + and tokens[0] == "Optimal" + and "objective" == tokens[1] + ): soln.status = SolutionStatus.optimal optim_value = _float(tokens[2]) results.solver.user_time = -1.0 # Why is this set to -1? @@ -615,8 +696,10 @@ def process_logfile(self): results.problem.name = 'unknown' if soln.status is SolutionStatus.optimal: - results.solver.termination_message = "Model was solved to optimality (subject to tolerances), and an " \ - "optimal solution is available." + results.solver.termination_message = ( + "Model was solved to optimality (subject to tolerances), and an " + "optimal solution is available." + ) results.solver.termination_condition = TerminationCondition.optimal results.solver.status = SolverStatus.ok if gap is None: @@ -630,28 +713,46 @@ def process_logfile(self): results.solver.termination_message = "Model was proven to be unbounded." results.solver.termination_condition = TerminationCondition.unbounded results.solver.status = SolverStatus.warning - elif results.solver.termination_condition in [TerminationCondition.maxTimeLimit, - TerminationCondition.maxEvaluations, - TerminationCondition.other, - TerminationCondition.maxIterations]: + elif results.solver.termination_condition in [ + TerminationCondition.maxTimeLimit, + TerminationCondition.maxEvaluations, + TerminationCondition.other, + TerminationCondition.maxIterations, + ]: results.solver.status = SolverStatus.aborted soln.status = SolutionStatus.stoppedByLimit - if results.solver.termination_condition == TerminationCondition.maxTimeLimit: - results.solver.termination_message = "Optimization terminated because the time expended " \ - "exceeded the value specified in the seconds " \ - "parameter." - elif results.solver.termination_condition == TerminationCondition.maxEvaluations: - results.solver.termination_message = \ - "Optimization terminated because the total number of branch-and-cut nodes explored " \ + if ( + results.solver.termination_condition + == TerminationCondition.maxTimeLimit + ): + results.solver.termination_message = ( + "Optimization terminated because the time expended " + "exceeded the value specified in the seconds " + "parameter." + ) + elif ( + results.solver.termination_condition + == TerminationCondition.maxEvaluations + ): + results.solver.termination_message = ( + "Optimization terminated because the total number of branch-and-cut nodes explored " "exceeded the value specified in the maxNodes parameter" + ) elif results.solver.termination_condition == TerminationCondition.other: - results.solver.termination_message = "Optimization terminated because the number of " \ - "solutions found reached the value specified in the " \ - "maxSolutions parameter." - elif results.solver.termination_condition == TerminationCondition.maxIterations: - results.solver.termination_message = "Optimization terminated because the total number of simplex " \ - "iterations performed exceeded the value specified in the " \ - "maxIterations parameter." + results.solver.termination_message = ( + "Optimization terminated because the number of " + "solutions found reached the value specified in the " + "maxSolutions parameter." + ) + elif ( + results.solver.termination_condition + == TerminationCondition.maxIterations + ): + results.solver.termination_message = ( + "Optimization terminated because the total number of simplex " + "iterations performed exceeded the value specified in the " + "maxIterations parameter." + ) soln.gap = gap if results.problem.sense == ProblemSense.minimize: upper_bound = optim_value @@ -670,10 +771,12 @@ def process_logfile(self): results.solver.statistics.branch_and_bound.number_of_bounded_subproblems = nodes results.solver.statistics.branch_and_bound.number_of_created_subproblems = nodes - if soln.status in [SolutionStatus.optimal, - SolutionStatus.stoppedByLimit, - SolutionStatus.unknown, - SolutionStatus.other]: + if soln.status in [ + SolutionStatus.optimal, + SolutionStatus.stoppedByLimit, + SolutionStatus.unknown, + SolutionStatus.other, + ]: results.solution.insert(soln) return results @@ -686,15 +789,17 @@ def process_soln_file(self, results): extract_duals = False extract_reduced_costs = False for suffix in self._suffixes: - flag=False + flag = False if re.match(suffix, "dual"): extract_duals = True - flag=True + flag = True if re.match(suffix, "rc"): extract_reduced_costs = True - flag=True + flag = True if not flag: - raise RuntimeError("***CBC solver plugin cannot extract solution suffix="+suffix) + raise RuntimeError( + "***CBC solver plugin cannot extract solution suffix=" + suffix + ) # if dealing with SOL format files, we've already read # this via the base class reader functionality. @@ -709,24 +814,24 @@ def process_soln_file(self, results): results.problem.number_of_objectives = 1 - processing_constraints = None # None means header, True means constraints, False means variables. + processing_constraints = ( + None # None means header, True means constraints, False means variables. + ) header_processed = False optim_value = None try: - INPUT = open(self._soln_file,"r") + INPUT = open(self._soln_file, "r") except IOError: INPUT = [] _ver = self.version() - invert_objective_sense = ( - results.problem.sense == ProblemSense.maximize - and ( _ver and _ver[:3] < (2, 10, 2) ) + invert_objective_sense = results.problem.sense == ProblemSense.maximize and ( + _ver and _ver[:3] < (2, 10, 2) ) - for line in INPUT: - tokens = tuple(re.split('[ \t]+',line.strip())) + tokens = tuple(re.split('[ \t]+', line.strip())) n_tokens = len(tokens) # # These are the only header entries CBC will generate (identified via browsing CbcSolver.cpp) @@ -739,23 +844,40 @@ def process_soln_file(self, results): if tokens[0] == 'Optimal': results.solver.termination_condition = TerminationCondition.optimal results.solver.status = SolverStatus.ok - results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \ - "and an optimal solution is available." + results.solver.termination_message = ( + "Model was solved to optimality (subject to tolerances), " + "and an optimal solution is available." + ) solution.status = SolutionStatus.optimal optim_value = _float(tokens[-1]) elif tokens[0] in ('Infeasible', 'PrimalInfeasible') or ( - n_tokens > 1 and tokens[0:2] == ('Integer', 'infeasible')): - results.solver.termination_message = "Model was proven to be infeasible." - results.solver.termination_condition = TerminationCondition.infeasible + n_tokens > 1 and tokens[0:2] == ('Integer', 'infeasible') + ): + results.solver.termination_message = ( + "Model was proven to be infeasible." + ) + results.solver.termination_condition = ( + TerminationCondition.infeasible + ) results.solver.status = SolverStatus.warning solution.status = SolutionStatus.infeasible INPUT.close() return - elif tokens[0] == 'Unbounded' or ( - n_tokens > 2 and tokens[0] == 'Problem' and tokens[2] == 'unbounded') or ( - n_tokens > 1 and tokens[0:2] == ('Dual', 'infeasible')): - results.solver.termination_message = "Model was proven to be unbounded." - results.solver.termination_condition = TerminationCondition.unbounded + elif ( + tokens[0] == 'Unbounded' + or ( + n_tokens > 2 + and tokens[0] == 'Problem' + and tokens[2] == 'unbounded' + ) + or (n_tokens > 1 and tokens[0:2] == ('Dual', 'infeasible')) + ): + results.solver.termination_message = ( + "Model was proven to be unbounded." + ) + results.solver.termination_condition = ( + TerminationCondition.unbounded + ) results.solver.status = SolverStatus.warning solution.status = SolutionStatus.unbounded INPUT.close() @@ -766,62 +888,110 @@ def process_soln_file(self, results): results.solver.status = SolverStatus.aborted solution.status = SolutionStatus.stoppedByLimit if tokens[2] == 'time': - results.solver.termination_message = "Optimization terminated because the time expended " \ - "exceeded the value specified in the seconds " \ - "parameter." - results.solver.termination_condition = TerminationCondition.maxTimeLimit + results.solver.termination_message = ( + "Optimization terminated because the time expended " + "exceeded the value specified in the seconds " + "parameter." + ) + results.solver.termination_condition = ( + TerminationCondition.maxTimeLimit + ) elif tokens[2] == 'iterations': # Only add extra info if not already obtained from logs (which give a better description) - if results.solver.termination_condition not in [TerminationCondition.maxEvaluations, - TerminationCondition.other, - TerminationCondition.maxIterations]: - results.solver.termination_message = "Optimization terminated because a limit was hit" - results.solver.termination_condition = TerminationCondition.maxIterations + if results.solver.termination_condition not in [ + TerminationCondition.maxEvaluations, + TerminationCondition.other, + TerminationCondition.maxIterations, + ]: + results.solver.termination_message = ( + "Optimization terminated because a limit was hit" + ) + results.solver.termination_condition = ( + TerminationCondition.maxIterations + ) elif tokens[2] == 'difficulties': - results.solver.termination_condition = TerminationCondition.solverFailure + results.solver.termination_condition = ( + TerminationCondition.solverFailure + ) results.solver.status = SolverStatus.error solution.status = SolutionStatus.error elif tokens[2] == 'ctrl-c': - results.solver.termination_message = "Optimization was terminated by the user." - results.solver.termination_condition = TerminationCondition.userInterrupt + results.solver.termination_message = ( + "Optimization was terminated by the user." + ) + results.solver.termination_condition = ( + TerminationCondition.userInterrupt + ) solution.status = SolutionStatus.unknown else: - results.solver.termination_condition = TerminationCondition.unknown + results.solver.termination_condition = ( + TerminationCondition.unknown + ) results.solver.status = SolverStatus.unknown solution.status = SolutionStatus.unknown results.solver.termination_message = ' '.join(tokens) - print('***WARNING: CBC plugin currently not processing solution status=Stopped correctly. Full ' - 'status line is: {}'.format(line.strip())) - if n_tokens > 8 and tokens[3:9] == ('(no', 'integer', 'solution', '-', 'continuous', 'used)'): - results.solver.termination_message = "Optimization terminated because a limit was hit, " \ - "however it had not found an integer solution yet." - results.solver.termination_condition = TerminationCondition.intermediateNonInteger + print( + '***WARNING: CBC plugin currently not processing solution status=Stopped correctly. Full ' + 'status line is: {}'.format(line.strip()) + ) + if n_tokens > 8 and tokens[3:9] == ( + '(no', + 'integer', + 'solution', + '-', + 'continuous', + 'used)', + ): + results.solver.termination_message = ( + "Optimization terminated because a limit was hit, " + "however it had not found an integer solution yet." + ) + results.solver.termination_condition = ( + TerminationCondition.intermediateNonInteger + ) solution.status = SolutionStatus.other else: results.solver.termination_condition = TerminationCondition.unknown results.solver.status = SolverStatus.unknown solution.status = SolutionStatus.unknown results.solver.termination_message = ' '.join(tokens) - print('***WARNING: CBC plugin currently not processing solution status={} correctly. Full status ' - 'line is: {}'.format(tokens[0], line.strip())) + print( + '***WARNING: CBC plugin currently not processing solution status={} correctly. Full status ' + 'line is: {}'.format(tokens[0], line.strip()) + ) # most of the first tokens should be integers # if it's not an integer, only then check the list of results try: - row_number = int( tokens[0]) - if row_number == 0: # indicates section start. + row_number = int(tokens[0]) + if row_number == 0: # indicates section start. if processing_constraints is None: processing_constraints = True elif processing_constraints is True: processing_constraints = False else: - raise RuntimeError("CBC plugin encountered unexpected line=("+line.strip()+") in solution file="+self._soln_file+"; constraint and variable sections already processed!") + raise RuntimeError( + "CBC plugin encountered unexpected line=(" + + line.strip() + + ") in solution file=" + + self._soln_file + + "; constraint and variable sections already processed!" + ) except ValueError: - if tokens[0] in ("Optimal", "Infeasible", "Unbounded", "Stopped", "Integer", "Status"): + if tokens[0] in ( + "Optimal", + "Infeasible", + "Unbounded", + "Stopped", + "Integer", + "Status", + ): if optim_value is not None: if invert_objective_sense: optim_value *= -1 - solution.objective['__default_objective__'] = {'Value': optim_value} + solution.objective['__default_objective__'] = { + 'Value': optim_value + } header_processed = True if (processing_constraints is True) and (extract_duals is True): @@ -830,29 +1000,40 @@ def process_soln_file(self, results): elif (n_tokens == 5) and tokens[0] == "**": tokens = tokens[1:] else: - raise RuntimeError("Unexpected line format encountered in CBC solution file - line="+line) + raise RuntimeError( + "Unexpected line format encountered in CBC solution file - line=" + + line + ) constraint = tokens[1] - constraint_ax = _float(tokens[2]) # CBC reports the constraint row times the solution vector - not the slack. + constraint_ax = _float( + tokens[2] + ) # CBC reports the constraint row times the solution vector - not the slack. constraint_dual = _float(tokens[3]) if invert_objective_sense: constraint_dual *= -1 if constraint[:2] == 'c_': - solution.constraint[constraint] = {"Dual" : constraint_dual} + solution.constraint[constraint] = {"Dual": constraint_dual} elif constraint[:2] == 'r_': # For the range constraints, supply only the dual with the largest # magnitude (at least one should always be numerically zero) - existing_constraint_dual_dict = solution.constraint.get( 'r_l_' + constraint[4:], None ) + existing_constraint_dual_dict = solution.constraint.get( + 'r_l_' + constraint[4:], None + ) if existing_constraint_dual_dict: # if a constraint dual is already saved, then update it if its # magnitude is larger than existing; this avoids checking vs # zero (avoiding problems with solver tolerances) existing_constraint_dual = existing_constraint_dual_dict["Dual"] - if abs( constraint_dual) > abs(existing_constraint_dual): - solution.constraint[ 'r_l_' + constraint[4:] ] = {"Dual": constraint_dual} + if abs(constraint_dual) > abs(existing_constraint_dual): + solution.constraint['r_l_' + constraint[4:]] = { + "Dual": constraint_dual + } else: # if no constraint with that name yet, just save it in the solution constraint dictionary - solution.constraint[ 'r_l_' + constraint[4:] ] = {"Dual": constraint_dual} + solution.constraint['r_l_' + constraint[4:]] = { + "Dual": constraint_dual + } elif processing_constraints is False: if n_tokens == 4: @@ -860,14 +1041,16 @@ def process_soln_file(self, results): elif (n_tokens == 5) and tokens[0] == "**": tokens = tokens[1:] else: - raise RuntimeError("Unexpected line format encountered " - "in CBC solution file - line="+line) + raise RuntimeError( + "Unexpected line format encountered " + "in CBC solution file - line=" + line + ) variable_name = tokens[1] variable_value = _float(tokens[2]) - variable = solution.variable[variable_name] = {"Value" : variable_value} + variable = solution.variable[variable_name] = {"Value": variable_value} if extract_reduced_costs is True: - variable_reduced_cost = _float(tokens[3]) # currently ignored. + variable_reduced_cost = _float(tokens[3]) # currently ignored. if invert_objective_sense: variable_reduced_cost *= -1 variable["Rc"] = variable_reduced_cost @@ -876,23 +1059,28 @@ def process_soln_file(self, results): pass else: - raise RuntimeError("CBC plugin encountered unexpected " - "line=("+line.strip()+") in solution file=" - +self._soln_file+"; expecting header, but " - "found data!") + raise RuntimeError( + "CBC plugin encountered unexpected " + "line=(" + + line.strip() + + ") in solution file=" + + self._soln_file + + "; expecting header, but " + "found data!" + ) if not type(INPUT) is list: INPUT.close() - if len(results.solution) == 0 and solution.status in [SolutionStatus.optimal, - SolutionStatus.stoppedByLimit, - SolutionStatus.unknown, - SolutionStatus.other]: + if len(results.solution) == 0 and solution.status in [ + SolutionStatus.optimal, + SolutionStatus.stoppedByLimit, + SolutionStatus.unknown, + SolutionStatus.other, + ]: results.solution.insert(solution) - def _postsolve(self): - # let the base class deal with returning results. results = super(CBCSHELL, self)._postsolve() @@ -910,32 +1098,31 @@ def _float(x): @SolverFactory.register('_mock_cbc') -class MockCBC(CBCSHELL,MockMIP): - """A Mock CBC solver used for testing - """ +class MockCBC(CBCSHELL, MockMIP): + """A Mock CBC solver used for testing""" def __init__(self, **kwds): try: - CBCSHELL.__init__(self,**kwds) - except ApplicationError: #pragma:nocover - pass #pragma:nocover - MockMIP.__init__(self,"cbc") + CBCSHELL.__init__(self, **kwds) + except ApplicationError: # pragma:nocover + pass # pragma:nocover + MockMIP.__init__(self, "cbc") def available(self, exception_flag=True): - return CBCSHELL.available(self,exception_flag) + return CBCSHELL.available(self, exception_flag) - def create_command_line(self,executable,problem_files): - command = CBCSHELL.create_command_line(self,executable,problem_files) - MockMIP.create_command_line(self,executable,problem_files) + def create_command_line(self, executable, problem_files): + command = CBCSHELL.create_command_line(self, executable, problem_files) + MockMIP.create_command_line(self, executable, problem_files) return command def executable(self): return MockMIP.executable(self) - def _execute_command(self,cmd): - return MockMIP._execute_command(self,cmd) + def _execute_command(self, cmd): + return MockMIP._execute_command(self, cmd) - def _convert_problem(self,args,pformat,valid_pformats): + def _convert_problem(self, args, pformat, valid_pformats): if pformat in [ProblemFormat.mps, ProblemFormat.cpxlp, ProblemFormat.nl]: return (args, pformat, None) else: diff --git a/pyomo/solvers/plugins/solvers/CONOPT.py b/pyomo/solvers/plugins/solvers/CONOPT.py index ccfddc2fb6e..30e8ada11a1 100644 --- a/pyomo/solvers/plugins/solvers/CONOPT.py +++ b/pyomo/solvers/plugins/solvers/CONOPT.py @@ -19,9 +19,10 @@ from pyomo.opt.base import ProblemFormat, ResultsFormat from pyomo.opt.base.solvers import _extract_version, SolverFactory from pyomo.opt.results import SolverStatus -from pyomo.opt.solver import SystemCallSolver +from pyomo.opt.solver import SystemCallSolver import logging + logger = logging.getLogger('pyomo.solvers') @@ -31,7 +32,6 @@ class CONOPT(SystemCallSolver): An interface to the CONOPT optimizer that uses the AMPL Solver Library. """ - def __init__(self, **kwds): # # Call base constructor @@ -42,7 +42,7 @@ def __init__(self, **kwds): # Setup valid problem formats, and valid results for each problem format # Also set the default problem and results formats. # - self._valid_problem_formats=[ProblemFormat.nl] + self._valid_problem_formats = [ProblemFormat.nl] self._valid_result_formats = {} self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol] self.set_problem_format(ProblemFormat.nl) @@ -62,8 +62,10 @@ def _default_results_format(self, prob_format): def _default_executable(self): executable = Executable("conopt") if not executable: - logger.warning("Could not locate the 'conopt' executable, " - "which is required for solver %s" % self.name) + logger.warning( + "Could not locate the 'conopt' executable, " + "which is required for solver %s" % self.name + ) self.enable = False return None return executable.path() @@ -75,23 +77,24 @@ def _get_version(self): solver_exec = self.executable() if solver_exec is None: return _extract_version('') - results = subprocess.run( [solver_exec], timeout=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [solver_exec], + timeout=1, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) return _extract_version(results.stdout) def create_command_line(self, executable, problem_files): - - assert(self._problem_format == ProblemFormat.nl) - assert(self._results_format == ResultsFormat.sol) + assert self._problem_format == ProblemFormat.nl + assert self._results_format == ResultsFormat.sol # # Define log file # if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix="_conopt.log") + self._log_file = TempfileManager.create_tempfile(suffix="_conopt.log") fname = problem_files[0] if '.' in fname: @@ -100,7 +103,7 @@ def create_command_line(self, executable, problem_files): fname = '.'.join(tmp[:-1]) else: fname = tmp[0] - self._soln_file = fname+".sol" + self._soln_file = fname + ".sol" # # Define results file (since an external parser is used) @@ -110,7 +113,7 @@ def create_command_line(self, executable, problem_files): # # Define command line # - env=os.environ.copy() + env = os.environ.copy() # # Merge the PYOMO_AMPLFUNC (externals defined within # Pyomo/Pyomo) with any user-specified external function @@ -130,19 +133,19 @@ def create_command_line(self, executable, problem_files): # to the command line. I'm not sure what solvers this method of passing options # through the envstr variable works for, but it does not seem to work for cplex # or gurobi - opt=[] + opt = [] for key in self.options: if key == 'solver': continue if isinstance(self.options[key], str) and ' ' in self.options[key]: - opt.append(key+"=\""+str(self.options[key])+"\"") - cmd.append(str(key)+"="+str(self.options[key])) + opt.append(key + "=\"" + str(self.options[key]) + "\"") + cmd.append(str(key) + "=" + str(self.options[key])) elif key == 'subsolver': - opt.append("solver="+str(self.options[key])) - cmd.append(str(key)+"="+str(self.options[key])) + opt.append("solver=" + str(self.options[key])) + cmd.append(str(key) + "=" + str(self.options[key])) else: - opt.append(key+"="+str(self.options[key])) - cmd.append(str(key)+"="+str(self.options[key])) + opt.append(key + "=" + str(self.options[key])) + cmd.append(str(key) + "=" + str(self.options[key])) envstr = "%s_options" % self.options.solver # Merge with any options coming in through the environment @@ -157,7 +160,6 @@ def _postsolve(self): # For some reason it sets the solver_results_num to # 100 in this case, which is reserved for cases # where "optimal solution indicated, but error likely". - if results.solver.id == 100 and \ - 'Locally optimal' in results.solver.message: + if results.solver.id == 100 and 'Locally optimal' in results.solver.message: results.solver.status = SolverStatus.ok return results diff --git a/pyomo/solvers/plugins/solvers/CPLEX.py b/pyomo/solvers/plugins/solvers/CPLEX.py index 7fb16249fa9..9755bc58614 100644 --- a/pyomo/solvers/plugins/solvers/CPLEX.py +++ b/pyomo/solvers/plugins/solvers/CPLEX.py @@ -21,13 +21,15 @@ from pyomo.common.tempfiles import TempfileManager from pyomo.common.collections import ComponentMap, Bunch -from pyomo.opt.base import ( - ProblemFormat, ResultsFormat, OptSolver, BranchDirection, -) +from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver, BranchDirection from pyomo.opt.base.solvers import _extract_version, SolverFactory from pyomo.opt.results import ( - SolverResults, SolverStatus, TerminationCondition, SolutionStatus, - ProblemSense, Solution, + SolverResults, + SolverStatus, + TerminationCondition, + SolutionStatus, + ProblemSense, + Solution, ) from pyomo.opt.solver import ILMLicensedSystemCallSolver from pyomo.solvers.mockmip import MockMIP @@ -53,29 +55,39 @@ def _validate_file_name(cplex, filename, description): raise ValueError( "Unallowed character (%s) found in CPLEX %s file path/name.\n\t" "For portability reasons, only [%s] are allowed." - % (matches.group(), description, - _validate_file_name.allowed_characters.replace("\\",''))) + % ( + matches.group(), + description, + _validate_file_name.allowed_characters.replace("\\", ''), + ) + ) # CPLEX only supports quoting spaces starting in v12.8. if ' ' in filename: - if cplex.version()[:2] >= (12,8): - filename = '"'+filename+'"' + if cplex.version()[:2] >= (12, 8): + filename = '"' + filename + '"' else: raise ValueError( "Space detected in CPLEX %s file path/name\n\t%s\nand " "CPLEX older than version 12.8. Please either upgrade " "CPLEX or remove the space from the %s path." - % (description, filename, description)) + % (description, filename, description) + ) return filename -_validate_file_name.allowed_characters = r"a-zA-Z0-9 ~:\.\-_\%s" % ( - os.path.sep,) + + +# The full list of allowed characters, per IBM, is: +# (a-z, A-Z, 0-9) or ! " # $ % & ( ) / , . ; ? @ _ ` ' { } | ~ +_validate_file_name.allowed_characters = ( + r"a-zA-Z0-9 ~:;,!'`|\$\(\)\{\}\?\#\&\.\-_\@\%s" % (os.path.sep,) +) _validate_file_name.illegal_characters = re.compile( - '[^%s]' % (_validate_file_name.allowed_characters,)) + '[^%s]' % (_validate_file_name.allowed_characters,) +) @SolverFactory.register('cplex', doc='The CPLEX LP/MIP solver') class CPLEX(OptSolver): - """The CPLEX LP/MIP solver - """ + """The CPLEX LP/MIP solver""" def __new__(cls, *args, **kwds): try: @@ -86,7 +98,7 @@ def __new__(cls, *args, **kwds): except KeyError: mode = 'lp' # - if mode == 'lp': + if mode == 'lp': return SolverFactory('_cplex_shell', **kwds) if mode == 'mps': opt = SolverFactory('_cplex_shell', **kwds) @@ -95,13 +107,17 @@ def __new__(cls, *args, **kwds): if mode in ['python', 'direct']: opt = SolverFactory('cplex_direct', **kwds) if opt is None: - logging.getLogger('pyomo.solvers').error('Python API for CPLEX is not installed') + logging.getLogger('pyomo.solvers').error( + 'Python API for CPLEX is not installed' + ) return return opt if mode == 'persistent': opt = SolverFactory('cplex_persistent', **kwds) if opt is None: - logging.getLogger('pyomo.solvers').error('Python API for CPLEX is not installed') + logging.getLogger('pyomo.solvers').error( + 'Python API for CPLEX is not installed' + ) return return opt # @@ -122,11 +138,7 @@ class ORDFileSchema(object): @classmethod def ROW(cls, name, priority, branch_direction=None): - return " %s %s %s\n" % ( - cls._direction_to_str(branch_direction), - name, - priority, - ) + return " %s %s %s\n" % (cls._direction_to_str(branch_direction), name, priority) @staticmethod def _direction_to_str(branch_direction): @@ -138,10 +150,11 @@ def _direction_to_str(branch_direction): return "" -@SolverFactory.register('_cplex_shell', doc='Shell interface to the CPLEX LP/MIP solver') +@SolverFactory.register( + '_cplex_shell', doc='Shell interface to the CPLEX LP/MIP solver' +) class CPLEXSHELL(ILMLicensedSystemCallSolver): - """Shell interface to the CPLEX LP/MIP solver - """ + """Shell interface to the CPLEX LP/MIP solver""" def __init__(self, **kwds): # @@ -160,8 +173,8 @@ def __init__(self, **kwds): # # Define valid problem formats and associated results formats # - self._valid_problem_formats=[ProblemFormat.cpxlp, ProblemFormat.mps] - self._valid_result_formats={} + self._valid_problem_formats = [ProblemFormat.cpxlp, ProblemFormat.mps] + self._valid_result_formats = {} self._valid_result_formats[ProblemFormat.cpxlp] = [ResultsFormat.soln] self._valid_result_formats[ProblemFormat.mps] = [ResultsFormat.soln] self.set_problem_format(ProblemFormat.cpxlp) @@ -196,8 +209,7 @@ def _warm_start(self, instance): # contains only references to the variables encountered in constraints output_index = 0 if isinstance(instance, IBlock): - smap = getattr(instance,"._symbol_maps")\ - [self._smap_id] + smap = getattr(instance, "._symbol_maps")[self._smap_id] else: smap = instance.solutions.symbol_map[self._smap_id] byObject = smap.byObject @@ -213,12 +225,13 @@ def _warm_start(self, instance): mst_file.write("\n") mst_file.write("\n") for var in instance.component_data_objects(Var): - if (var.value is not None) and \ - (id(var) in byObject): + if (var.value is not None) and (id(var) in byObject): name = byObject[id(var)] - mst_file.write("\n" - % (output_index, name, var.value)) + mst_file.write( + "\n" + % (output_index, name, var.value) + ) output_index = output_index + 1 mst_file.write("\n") @@ -229,7 +242,7 @@ def _warm_start(self, instance): SUFFIX_DIRECTION_NAME = "direction" def _write_priorities_file(self, instance): - """ Write a variable priorities file in the CPLEX ORD format. """ + """Write a variable priorities file in the CPLEX ORD format.""" priorities, directions = self._get_suffixes(instance) rows = self._convert_priorities_to_rows(instance, priorities, directions) self._write_priority_rows(rows) @@ -292,7 +305,6 @@ def _write_priority_rows(self, rows): # over-ride presolve to extract the warm-start keyword, if specified. def _presolve(self, *args, **kwds): - # create a context in the temporary file manager for # this plugin - is "pop"ed in the _postsolve method. TempfileManager.push() @@ -302,26 +314,26 @@ def _presolve(self, *args, **kwds): # to a file. self._warm_start_solve = kwds.pop('warmstart', False) self._warm_start_file_name = _validate_file_name( - self, kwds.pop('warmstart_file', None), "warm start") + self, kwds.pop('warmstart_file', None), "warm start" + ) user_warmstart = self._warm_start_file_name is not None # the input argument can currently be one of two things: an instance or a filename. # if a filename is provided and a warm-start is indicated, we go ahead and # create the temporary file - assuming that the user has already, via some external # mechanism, invoked warm_start() with a instance to create the warm start file. - if self._warm_start_solve and \ - isinstance(args[0], str): + if self._warm_start_solve and isinstance(args[0], str): # we assume the user knows what they are doing... pass - elif self._warm_start_solve and \ - (not isinstance(args[0], str)): + elif self._warm_start_solve and (not isinstance(args[0], str)): # assign the name of the warm start file *before* calling the base class # presolve - the base class method ends up creating the command line, # and the warm start file-name is (obviously) needed there. if self._warm_start_file_name is None: assert not user_warmstart - self._warm_start_file_name = TempfileManager.\ - create_tempfile(suffix = '.cplex.mst') + self._warm_start_file_name = TempfileManager.create_tempfile( + suffix='.cplex.mst' + ) self._priorities_solve = kwds.pop("priorities", False) self._priorities_file_name = _validate_file_name( @@ -345,23 +357,22 @@ def _presolve(self, *args, **kwds): # symbol_map is actually constructed! if (len(args) > 0) and (not isinstance(args[0], str)): - if len(args) != 1: raise ValueError( "CPLEX _presolve method can only handle a " - "single problem instance - %s were supplied" - % (len(args),)) + "single problem instance - %s were supplied" % (len(args),) + ) # write the warm-start file - currently only supports MIPs. # we only know how to deal with a single problem instance. if self._warm_start_solve and (not user_warmstart): - start_time = time.time() self._warm_start(args[0]) end_time = time.time() if self._report_timing: - print("Warm start write time= %.2f seconds" - % (end_time-start_time)) + print( + "Warm start write time= %.2f seconds" % (end_time - start_time) + ) if self._priorities_solve and (not user_priorities): start_time = time.time() @@ -376,9 +387,10 @@ def _presolve(self, *args, **kwds): def _default_executable(self): executable = Executable("cplex") if not executable: - logger.warning("Could not locate the 'cplex' executable" - ", which is required for solver %s" - % self.name) + logger.warning( + "Could not locate the 'cplex' executable" + ", which is required for solver %s" % self.name + ) self.enable = False return None return executable.path() @@ -390,21 +402,22 @@ def _get_version(self): solver_exec = self.executable() if solver_exec is None: return _extract_version('') - results = subprocess.run( [solver_exec,'-c','quit'], timeout=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [solver_exec, '-c', 'quit'], + timeout=1, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) return _extract_version(results.stdout) def create_command_line(self, executable, problem_files): - # # Define log file # The log file in CPLEX contains the solution trace, but the solver status can be found in the solution file. # if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix = '.cplex.log') + self._log_file = TempfileManager.create_tempfile(suffix='.cplex.log') self._log_file = _validate_file_name(self, self._log_file, "log") # @@ -412,8 +425,7 @@ def create_command_line(self, executable, problem_files): # As indicated above, contains (in XML) both the solution and solver status. # if self._soln_file is None: - self._soln_file = TempfileManager.\ - create_tempfile(suffix = '.cplex.sol') + self._soln_file = TempfileManager.create_tempfile(suffix='.cplex.sol') self._soln_file = _validate_file_name(self, self._soln_file, "solution") # @@ -421,28 +433,24 @@ def create_command_line(self, executable, problem_files): # script = 'set logfile %s\n' % (self._log_file,) if self._timelimit is not None and self._timelimit > 0.0: - script += 'set timelimit %s\n' % ( self._timelimit, ) + script += 'set timelimit %s\n' % (self._timelimit,) - if (self.options.mipgap is not None) and \ - (float(self.options.mipgap) > 0.0): - script += ('set mip tolerances mipgap %s\n' - % (self.options.mipgap,)) + if (self.options.mipgap is not None) and (float(self.options.mipgap) > 0.0): + script += 'set mip tolerances mipgap %s\n' % (self.options.mipgap,) for key in self.options: if key == 'relax_integrality' or key == 'mipgap': continue - elif isinstance(self.options[key], str) and \ - (' ' in self.options[key]): - opt = ' '.join(key.split('_'))+' '+str(self.options[key]) + elif isinstance(self.options[key], str) and (' ' in self.options[key]): + opt = ' '.join(key.split('_')) + ' ' + str(self.options[key]) else: - opt = ' '.join(key.split('_'))+' '+str(self.options[key]) - script += 'set %s\n' % ( opt, ) + opt = ' '.join(key.split('_')) + ' ' + str(self.options[key]) + script += 'set %s\n' % (opt,) _lp_file = _validate_file_name(self, problem_files[0], "LP") - script += 'read %s\n' % ( _lp_file, ) + script += 'read %s\n' % (_lp_file,) # if we're dealing with an LP, the MST file will be empty. - if self._warm_start_solve and \ - (self._warm_start_file_name is not None): + if self._warm_start_solve and (self._warm_start_file_name is not None): script += 'read %s\n' % (self._warm_start_file_name,) if self._priorities_solve and self._priorities_file_name is not None: @@ -459,17 +467,14 @@ def create_command_line(self, executable, problem_files): # dump the script and warm-start file names for the # user if we're keeping files around. if self._keepfiles: - script_fname = TempfileManager.\ - create_tempfile(suffix = '.cplex.script') - tmp = open(script_fname,'w') + script_fname = TempfileManager.create_tempfile(suffix='.cplex.script') + tmp = open(script_fname, 'w') tmp.write(script) tmp.close() print("Solver script file=" + script_fname) - if self._warm_start_solve and \ - (self._warm_start_file_name is not None): - print("Solver warm-start file=" - +self._warm_start_file_name) + if self._warm_start_solve and (self._warm_start_file_name is not None): + print("Solver warm-start file=" + self._warm_start_file_name) if self._priorities_solve and self._priorities_file_name is not None: print("Solver priorities file=" + self._priorities_file_name) @@ -480,8 +485,7 @@ def create_command_line(self, executable, problem_files): cmd = [executable] if self._timer: cmd.insert(0, self._timer) - return Bunch(cmd=cmd, script=script, - log_file=self._log_file, env=None) + return Bunch(cmd=cmd, script=script, log_file=self._log_file, env=None) def process_logfile(self): """ @@ -511,50 +515,82 @@ def process_logfile(self): self._gap = None for line in output.split("\n"): - tokens = re.split('[ \t]+',line.strip()) + tokens = re.split('[ \t]+', line.strip()) if len(tokens) > 3 and tokens[0] == "CPLEX" and tokens[1] == "Error": - # IMPT: See below - cplex can generate an error line and then terminate fine, e.g., in CPLEX 12.1. - # To handle these cases, we should be specifying some kind of termination criterion always - # in the course of parsing a log file (we aren't doing so currently - just in some conditions). - results.solver.status=SolverStatus.error + # IMPT: See below - cplex can generate an error line and then terminate fine, e.g., in CPLEX 12.1. + # To handle these cases, we should be specifying some kind of termination criterion always + # in the course of parsing a log file (we aren't doing so currently - just in some conditions). + results.solver.status = SolverStatus.error results.solver.error = " ".join(tokens) elif len(tokens) >= 3 and tokens[0] == "ILOG" and tokens[1] == "CPLEX": cplex_version = tokens[2].rstrip(',') elif len(tokens) >= 3 and tokens[0] == "Variables": - if results.problem.number_of_variables is None: # CPLEX 11.2 and subsequent versions have two Variables sections in the log file output. + if ( + results.problem.number_of_variables is None + ): # CPLEX 11.2 and subsequent versions have two Variables sections in the log file output. results.problem.number_of_variables = int(tokens[2]) # In CPLEX 11 (and presumably before), there was only a single line output to - # indicate the constriant count, e.g., "Linear constraints : 16 [Less: 7, Greater: 6, Equal: 3]". + # indicate the constraint count, e.g., "Linear constraints : 16 [Less: 7, Greater: 6, Equal: 3]". # In CPLEX 11.2 (or somewhere in between 11 and 11.2 - I haven't bothered to track it down # in that detail), there is another instance of this line prefix in the min/max problem statistics # block - which we don't care about. In this case, the line looks like: "Linear constraints :" and # that's all. - elif len(tokens) >= 4 and tokens[0] == "Linear" and tokens[1] == "constraints": + elif ( + len(tokens) >= 4 + and tokens[0] == "Linear" + and tokens[1] == "constraints" + ): results.problem.number_of_constraints = int(tokens[3]) elif len(tokens) >= 3 and tokens[0] == "Nonzeros": - if results.problem.number_of_nonzeros is None: # CPLEX 11.2 and subsequent has two Nonzeros sections. + if ( + results.problem.number_of_nonzeros is None + ): # CPLEX 11.2 and subsequent has two Nonzeros sections. results.problem.number_of_nonzeros = int(tokens[2]) elif len(tokens) >= 5 and tokens[4] == "MINIMIZE": results.problem.sense = ProblemSense.minimize elif len(tokens) >= 5 and tokens[4] == "MAXIMIZE": results.problem.sense = ProblemSense.maximize - elif len(tokens) >= 4 and tokens[0] == "Solution" and tokens[1] == "time" and tokens[2] == "=": + elif ( + len(tokens) >= 4 + and tokens[0] == "Solution" + and tokens[1] == "time" + and tokens[2] == "=" + ): # technically, I'm not sure if this is CPLEX user time or user+system - CPLEX doesn't appear # to differentiate, and I'm not sure we can always provide a break-down. results.solver.user_time = float(tokens[3]) - elif len(tokens) >= 4 and tokens[0] == "Primal" and tokens[1] == "simplex" and tokens[3] == "Optimal:": + elif ( + len(tokens) >= 4 + and tokens[0] == "Primal" + and tokens[1] == "simplex" + and tokens[3] == "Optimal:" + ): results.solver.termination_condition = TerminationCondition.optimal results.solver.termination_message = ' '.join(tokens) - elif len(tokens) >= 4 and tokens[0] == "Dual" and tokens[1] == "simplex" and tokens[3] == "Optimal:": + elif ( + len(tokens) >= 4 + and tokens[0] == "Dual" + and tokens[1] == "simplex" + and tokens[3] == "Optimal:" + ): results.solver.termination_condition = TerminationCondition.optimal results.solver.termination_message = ' '.join(tokens) - elif len(tokens) >= 4 and tokens[0] == "Barrier" and tokens[2] == "Optimal:": + elif ( + len(tokens) >= 4 and tokens[0] == "Barrier" and tokens[2] == "Optimal:" + ): results.solver.termination_condition = TerminationCondition.optimal results.solver.termination_message = ' '.join(tokens) - elif len(tokens) >= 4 and tokens[0] == "Dual" and tokens[3] == "Infeasible:": + elif ( + len(tokens) >= 4 and tokens[0] == "Dual" and tokens[3] == "Infeasible:" + ): results.solver.termination_condition = TerminationCondition.infeasible results.solver.termination_message = ' '.join(tokens) - elif len(tokens) >= 4 and tokens[0] == "MIP" and tokens[2] == "Integer" and tokens[3] == "infeasible.": + elif ( + len(tokens) >= 4 + and tokens[0] == "MIP" + and tokens[2] == "Integer" + and tokens[3] == "infeasible." + ): # if CPLEX has previously printed an error message, reduce it to a warning - # there is a strong indication it recovered, but we can't be sure. if results.solver.status == SolverStatus.error: @@ -563,18 +599,39 @@ def process_logfile(self): results.solver.status = SolverStatus.ok results.solver.termination_condition = TerminationCondition.infeasible results.solver.termination_message = ' '.join(tokens) - elif len(tokens) >= 10 and tokens[0] == "MIP" and tokens[2] == "Time" and tokens[3] == "limit" and tokens[6] == "feasible:": + elif ( + len(tokens) >= 10 + and tokens[0] == "MIP" + and tokens[2] == "Time" + and tokens[3] == "limit" + and tokens[6] == "feasible:" + ): # handle processing when the time limit has been exceeded, and we have a feasible solution. results.solver.status = SolverStatus.ok results.solver.termination_condition = TerminationCondition.maxTimeLimit results.solver.termination_message = ' '.join(tokens) - elif len(tokens) >= 10 and tokens[0] == "Current" and tokens[1] == "MIP" and tokens[2] == "best" and tokens[3] == "bound": + elif ( + len(tokens) >= 10 + and tokens[0] == "Current" + and tokens[1] == "MIP" + and tokens[2] == "best" + and tokens[3] == "bound" + ): self._best_bound = float(tokens[5]) self._gap = float(tokens[8].rstrip(',')) # for the case below, CPLEX sometimes reports "true" optimal (the first case) # and other times within-tolerance optimal (the second case). - elif (len(tokens) >= 4 and tokens[0] == "MIP" and tokens[2] == "Integer" and tokens[3] == "optimal") or \ - (len(tokens) >= 4 and tokens[0] == "MIP" and tokens[2] == "Integer" and tokens[3] == "optimal,"): + elif ( + len(tokens) >= 4 + and tokens[0] == "MIP" + and tokens[2] == "Integer" + and tokens[3] == "optimal" + ) or ( + len(tokens) >= 4 + and tokens[0] == "MIP" + and tokens[2] == "Integer" + and tokens[3] == "optimal," + ): # if CPLEX has previously printed an error message, reduce it to a warning - # there is a strong indication it recovered, but we can't be sure. if results.solver.status == SolverStatus.error: @@ -583,7 +640,11 @@ def process_logfile(self): results.solver.status = SolverStatus.ok results.solver.termination_condition = TerminationCondition.optimal results.solver.termination_message = ' '.join(tokens) - elif len(tokens) >= 3 and tokens[0] == "Presolve" and tokens[2] == "Infeasible.": + elif ( + len(tokens) >= 3 + and tokens[0] == "Presolve" + and tokens[2] == "Infeasible." + ): # if CPLEX has previously printed an error message, reduce it to a warning - # there is a strong indication it recovered, but we can't be sure. if results.solver.status == SolverStatus.error: @@ -592,19 +653,27 @@ def process_logfile(self): results.solver.status = SolverStatus.ok results.solver.termination_condition = TerminationCondition.infeasible results.solver.termination_message = ' '.join(tokens) - elif ((len(tokens) == 6) and \ - (tokens[2] == "Integer") and \ - (tokens[3] == "infeasible") and \ - (tokens[5] == "unbounded.")) or \ - ((len(tokens) >= 4) and \ - (tokens[0] == "MIP") and \ - (tokens[1] == "-") and \ - (tokens[2] == "Integer") and \ - (tokens[3] == "unbounded:")) or \ - ((len(tokens) >= 5) and \ - (tokens[0] == "Presolve") and \ - (tokens[2] == "Unbounded") and \ - (tokens[4] == "infeasible.")): + elif ( + ( + (len(tokens) == 6) + and (tokens[2] == "Integer") + and (tokens[3] == "infeasible") + and (tokens[5] == "unbounded.") + ) + or ( + (len(tokens) >= 4) + and (tokens[0] == "MIP") + and (tokens[1] == "-") + and (tokens[2] == "Integer") + and (tokens[3] == "unbounded:") + ) + or ( + (len(tokens) >= 5) + and (tokens[0] == "Presolve") + and (tokens[2] == "Unbounded") + and (tokens[4] == "infeasible.") + ) + ): # if CPLEX has previously printed an error message, reduce it to a warning - # there is a strong indication it recovered, but we can't be sure. if results.solver.status == SolverStatus.error: @@ -618,13 +687,14 @@ def process_logfile(self): try: if isinstance(results.solver.termination_message, str): - results.solver.termination_message = results.solver.termination_message.replace(':', '\\x3a') + results.solver.termination_message = ( + results.solver.termination_message.replace(':', '\\x3a') + ) except: pass return results - def process_soln_file(self,results): - + def process_soln_file(self, results): # the only suffixes that we extract from CPLEX are # constraint duals, constraint slacks, and variable # reduced-costs. scan through the solver suffix list @@ -637,27 +707,30 @@ def process_soln_file(self,results): extract_lrc = False extract_urc = False for suffix in self._suffixes: - flag=False - if re.match(suffix,"dual"): + flag = False + if re.match(suffix, "dual"): extract_duals = True - flag=True - if re.match(suffix,"slack"): + flag = True + if re.match(suffix, "slack"): extract_slacks = True - flag=True - if re.match(suffix,"rc"): + flag = True + if re.match(suffix, "rc"): extract_reduced_costs = True extract_rc = True - flag=True - if re.match(suffix,"lrc"): + flag = True + if re.match(suffix, "lrc"): extract_reduced_costs = True extract_lrc = True - flag=True - if re.match(suffix,"urc"): + flag = True + if re.match(suffix, "urc"): extract_reduced_costs = True extract_urc = True - flag=True + flag = True if not flag: - raise RuntimeError("***The CPLEX solver plugin cannot extract solution suffix="+suffix) + raise RuntimeError( + "***The CPLEX solver plugin cannot extract solution suffix=" + + suffix + ) # check for existence of the solution file # not sure why we just return - would think that we @@ -668,43 +741,49 @@ def process_soln_file(self,results): range_duals = {} range_slacks = {} soln = Solution() - soln.objective['__default_objective__'] = {'Value':None} + soln.objective['__default_objective__'] = {'Value': None} # caching for efficiency soln_variables = soln.variable soln_constraints = soln.constraint INPUT = open(self._soln_file, "r") - results.problem.number_of_objectives=1 + results.problem.number_of_objectives = 1 time_limit_exceeded = False - mip_problem=False + mip_problem = False for line in INPUT: line = line.strip() line = line.lstrip('?') - tokens=line.split(' ') + tokens = line.split(' ') if tokens[0] == "variable": variable_name = None variable_value = None variable_reduced_cost = None variable_status = None - for i in range(1,len(tokens)): - field_name = tokens[i].split('=')[0] + for i in range(1, len(tokens)): + field_name = tokens[i].split('=')[0] field_value = tokens[i].split('=')[1].lstrip("\"").rstrip("\"") if field_name == "name": variable_name = field_value elif field_name == "value": variable_value = field_value - elif (extract_reduced_costs is True) and (field_name == "reducedCost"): + elif (extract_reduced_costs is True) and ( + field_name == "reducedCost" + ): variable_reduced_cost = field_value elif (extract_reduced_costs is True) and (field_name == "status"): variable_status = field_value # skip the "constant-one" variable, used to capture/retain objective offsets in the CPLEX LP format. if variable_name != "ONE_VAR_CONSTANT": - variable = soln_variables[variable_name] = {"Value" : float(variable_value)} - if (variable_reduced_cost is not None) and (extract_reduced_costs is True): + variable = soln_variables[variable_name] = { + "Value": float(variable_value) + } + if (variable_reduced_cost is not None) and ( + extract_reduced_costs is True + ): try: if extract_rc is True: variable["Rc"] = float(variable_reduced_cost) @@ -720,13 +799,20 @@ def process_soln_file(self,results): else: variable["Urc"] = 0.0 except: - raise ValueError("Unexpected reduced-cost value="+str(variable_reduced_cost)+" encountered for variable="+variable_name) - elif (tokens[0] == "constraint") and ((extract_duals is True) or (extract_slacks is True)): + raise ValueError( + "Unexpected reduced-cost value=" + + str(variable_reduced_cost) + + " encountered for variable=" + + variable_name + ) + elif (tokens[0] == "constraint") and ( + (extract_duals is True) or (extract_slacks is True) + ): is_range = False rlabel = None rkey = None - for i in range(1,len(tokens)): - field_name = tokens[i].split('=')[0] + for i in range(1, len(tokens)): + field_name = tokens[i].split('=')[0] field_value = tokens[i].split('=')[1].lstrip("\"").rstrip("\"") if field_name == "name": if field_value.startswith('c_'): @@ -739,80 +825,105 @@ def process_soln_file(self,results): is_range = True rlabel = field_value[4:] rkey = 1 - elif (extract_duals is True) and (field_name == "dual"): # for LPs + elif (extract_duals is True) and (field_name == "dual"): # for LPs if is_range is False: constraint["Dual"] = float(field_value) else: - range_duals.setdefault(rlabel,[0,0])[rkey] = float(field_value) - elif (extract_slacks is True) and (field_name == "slack"): # for MIPs + range_duals.setdefault(rlabel, [0, 0])[rkey] = float( + field_value + ) + elif (extract_slacks is True) and ( + field_name == "slack" + ): # for MIPs if is_range is False: constraint["Slack"] = float(field_value) else: - range_slacks.setdefault(rlabel,[0,0])[rkey] = float(field_value) + range_slacks.setdefault(rlabel, [0, 0])[rkey] = float( + field_value + ) elif tokens[0].startswith("problemName"): filename = (tokens[0].split('=')[1].strip()).lstrip("\"").rstrip("\"") results.problem.name = os.path.basename(filename) if '.' in results.problem.name: results.problem.name = results.problem.name.split('.')[0] - tINPUT=open(filename,"r") + tINPUT = open(filename, "r") for tline in tINPUT: tline = tline.strip() if tline == "": continue - tokens = re.split('[\t ]+',tline) + tokens = re.split('[\t ]+', tline) if tokens[0][0] in ['\\', '*']: continue elif tokens[0] == "NAME": results.problem.name = tokens[1] else: sense = tokens[0].lower() - if sense in ['max','maximize']: + if sense in ['max', 'maximize']: results.problem.sense = ProblemSense.maximize - if sense in ['min','minimize']: + if sense in ['min', 'minimize']: results.problem.sense = ProblemSense.minimize break tINPUT.close() - elif tokens[0].startswith("objectiveValue") and tokens[0] != 'objectiveValues': + elif ( + tokens[0].startswith("objectiveValue") + and tokens[0] != 'objectiveValues' + ): # prior to 12.10.0, the objective value came back as an # attribute on the
tag - objective_value = (tokens[0].split('=')[1].strip()).lstrip("\"").rstrip("\"") - soln.objective['__default_objective__']['Value'] = float(objective_value) + objective_value = ( + (tokens[0].split('=')[1].strip()).lstrip("\"").rstrip("\"") + ) + soln.objective['__default_objective__']['Value'] = float( + objective_value + ) elif tokens[0] == "objective": # beginning in 12.10.0, CPLEX supports multiple # objectives in an tag fields = {} for field in tokens[1:]: - k,v = field.split('=') + k, v = field.split('=') fields[k] = v.strip('"') - soln.objective.setdefault(fields['name'], {})['Value'] = float(fields['value']) + soln.objective.setdefault(fields['name'], {})['Value'] = float( + fields['value'] + ) elif tokens[0].startswith("solutionStatusValue"): - pieces = tokens[0].split("=") - solution_status = eval(pieces[1]) - # solution status = 1 => optimal - # solution status = 3 => infeasible - if soln.status == SolutionStatus.unknown: - if solution_status == 1: - soln.status = SolutionStatus.optimal - elif solution_status == 3: - soln.status = SolutionStatus.infeasible - soln.gap = None - else: - # we are flagging anything with a solution status >= 4 as an error, to possibly - # be over-ridden as we learn more about the status (e.g., due to time limit exceeded). - soln.status = SolutionStatus.error - soln.gap = None + pieces = tokens[0].split("=") + solution_status = eval(pieces[1]) + # solution status = 1 => optimal + # solution status = 3 => infeasible + if soln.status == SolutionStatus.unknown: + if solution_status == 1: + soln.status = SolutionStatus.optimal + elif solution_status == 3: + soln.status = SolutionStatus.infeasible + soln.gap = None + else: + # we are flagging anything with a solution status >= 4 as an error, to possibly + # be over-ridden as we learn more about the status (e.g., due to time limit exceeded). + soln.status = SolutionStatus.error + soln.gap = None elif tokens[0].startswith("solutionStatusString"): - solution_status = ((" ".join(tokens).split('=')[1]).strip()).lstrip("\"").rstrip("\"") - if solution_status in ["optimal", "integer optimal solution", "integer optimal, tolerance"]: + solution_status = ( + ((" ".join(tokens).split('=')[1]).strip()).lstrip("\"").rstrip("\"") + ) + if solution_status in [ + "optimal", + "integer optimal solution", + "integer optimal, tolerance", + ]: soln.status = SolutionStatus.optimal soln.gap = 0.0 - results.problem.lower_bound = soln.objective['__default_objective__']['Value'] - results.problem.upper_bound = soln.objective['__default_objective__']['Value'] + results.problem.lower_bound = soln.objective[ + '__default_objective__' + ]['Value'] + results.problem.upper_bound = soln.objective[ + '__default_objective__' + ]['Value'] if "integer" in solution_status: - mip_problem=True + mip_problem = True elif solution_status in ["infeasible"]: soln.status = SolutionStatus.infeasible soln.gap = None @@ -822,21 +933,36 @@ def process_soln_file(self,results): time_limit_exceeded = True elif tokens[0].startswith("MIPNodes"): if mip_problem: - n = eval(eval((" ".join(tokens).split('=')[1]).strip()).lstrip("\"").rstrip("\"")) - results.solver.statistics.branch_and_bound.number_of_created_subproblems=n - results.solver.statistics.branch_and_bound.number_of_bounded_subproblems=n - elif tokens[0].startswith("primalFeasible") and (time_limit_exceeded is True): - primal_feasible = int(((" ".join(tokens).split('=')[1]).strip()).lstrip("\"").rstrip("\"")) + n = eval( + eval((" ".join(tokens).split('=')[1]).strip()) + .lstrip("\"") + .rstrip("\"") + ) + results.solver.statistics.branch_and_bound.number_of_created_subproblems = ( + n + ) + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems = ( + n + ) + elif tokens[0].startswith("primalFeasible") and ( + time_limit_exceeded is True + ): + primal_feasible = int( + ((" ".join(tokens).split('=')[1]).strip()).lstrip("\"").rstrip("\"") + ) if primal_feasible == 1: soln.status = SolutionStatus.feasible - if (results.problem.sense == ProblemSense.minimize): - results.problem.upper_bound = soln.objective['__default_objective__']['Value'] + if results.problem.sense == ProblemSense.minimize: + results.problem.upper_bound = soln.objective[ + '__default_objective__' + ]['Value'] else: - results.problem.lower_bound = soln.objective['__default_objective__']['Value'] + results.problem.lower_bound = soln.objective[ + '__default_objective__' + ]['Value'] else: soln.status = SolutionStatus.infeasible - if self._best_bound is not None: if results.problem.sense == ProblemSense.minimize: results.problem.lower_bound = self._best_bound @@ -847,38 +973,42 @@ def process_soln_file(self,results): # For the range constraints, supply only the dual with the largest # magnitude (at least one should always be numerically zero) - for key,(ld,ud) in range_duals.items(): + for key, (ld, ud) in range_duals.items(): if abs(ld) > abs(ud): - soln_constraints['r_l_'+key] = {"Dual" : ld} + soln_constraints['r_l_' + key] = {"Dual": ld} else: - soln_constraints['r_l_'+key] = {"Dual" : ud} # Use the same key + soln_constraints['r_l_' + key] = {"Dual": ud} # Use the same key # slacks - for key,(ls,us) in range_slacks.items(): + for key, (ls, us) in range_slacks.items(): if abs(ls) > abs(us): - soln_constraints.setdefault('r_l_'+key,{})["Slack"] = ls + soln_constraints.setdefault('r_l_' + key, {})["Slack"] = ls else: - soln_constraints.setdefault('r_l_'+key,{})["Slack"] = us # Use the same key + soln_constraints.setdefault('r_l_' + key, {})[ + "Slack" + ] = us # Use the same key if not results.solver.status is SolverStatus.error: - if results.solver.termination_condition in [TerminationCondition.unknown, - #TerminationCondition.maxIterations, - #TerminationCondition.minFunctionValue, - #TerminationCondition.minStepLength, - TerminationCondition.globallyOptimal, - TerminationCondition.locallyOptimal, - TerminationCondition.optimal, - #TerminationCondition.maxEvaluations, - TerminationCondition.other]: + if results.solver.termination_condition in [ + TerminationCondition.unknown, + # TerminationCondition.maxIterations, + # TerminationCondition.minFunctionValue, + # TerminationCondition.minStepLength, + TerminationCondition.globallyOptimal, + TerminationCondition.locallyOptimal, + TerminationCondition.optimal, + # TerminationCondition.maxEvaluations, + TerminationCondition.other, + ]: results.solution.insert(soln) - elif (results.solver.termination_condition is \ - TerminationCondition.maxTimeLimit) and \ - (soln.status is not SolutionStatus.infeasible): + elif ( + results.solver.termination_condition + is TerminationCondition.maxTimeLimit + ) and (soln.status is not SolutionStatus.infeasible): results.solution.insert(soln) INPUT.close() def _postsolve(self): - # take care of the annoying (and empty) CPLEX temporary files in the current directory. # this approach doesn't seem overly efficient, but python os module functions don't # accept regular expression directly. @@ -892,7 +1022,7 @@ def _postsolve(self): # these logs don't matter anyway (we redirect everything), # and are largely an annoyance. try: - if re.match(r'cplex\.log', filename) != None: + if re.match(r'cplex\.log', filename) != None: os.remove(filename) elif re.match(r'clone\d+\.log', filename) != None: os.remove(filename) @@ -912,27 +1042,22 @@ def _postsolve(self): @SolverFactory.register('_mock_cplex') -class MockCPLEX(CPLEXSHELL,MockMIP): - """A Mock CPLEX solver used for testing - """ +class MockCPLEX(CPLEXSHELL, MockMIP): + """A Mock CPLEX solver used for testing""" def __init__(self, **kwds): try: CPLEXSHELL.__init__(self, **kwds) - except ApplicationError: #pragma:nocover - pass #pragma:nocover - MockMIP.__init__(self,"cplex") + except ApplicationError: # pragma:nocover + pass # pragma:nocover + MockMIP.__init__(self, "cplex") def available(self, exception_flag=True): - return CPLEXSHELL.available(self,exception_flag) + return CPLEXSHELL.available(self, exception_flag) def create_command_line(self, executable, problem_files): - command = CPLEXSHELL.create_command_line(self, - executable, - problem_files) - MockMIP.create_command_line(self, - executable, - problem_files) + command = CPLEXSHELL.create_command_line(self, executable, problem_files) + MockMIP.create_command_line(self, executable, problem_files) return command def _default_executable(self): @@ -940,5 +1065,3 @@ def _default_executable(self): def _execute_command(self, cmd): return MockMIP._execute_command(self, cmd) - - diff --git a/pyomo/solvers/plugins/solvers/GAMS.py b/pyomo/solvers/plugins/solvers/GAMS.py index 85e9ee5a06b..ae0b12cdad4 100644 --- a/pyomo/solvers/plugins/solvers/GAMS.py +++ b/pyomo/solvers/plugins/solvers/GAMS.py @@ -30,14 +30,22 @@ import pyomo.core.base.suffix import pyomo.core.kernel.suffix -from pyomo.opt.results import (SolverResults, SolverStatus, Solution, - SolutionStatus, TerminationCondition, ProblemSense) +from pyomo.opt.results import ( + SolverResults, + SolverStatus, + Solution, + SolutionStatus, + TerminationCondition, + ProblemSense, +) from pyomo.common.dependencies import attempt_import + gdxcc, gdxcc_available = attempt_import('gdxcc', defer_check=True) logger = logging.getLogger('pyomo.solvers') + class _GAMSSolver(object): """Aggregate of common methods for GAMS interfaces""" @@ -89,17 +97,17 @@ def _options_string_to_dict(istr): index = token.find('=') if index == -1: raise ValueError( - "Solver options must have the form option=value: '%s'" % istr) + "Solver options must have the form option=value: '%s'" % istr + ) try: - val = eval(token[(index+1):]) + val = eval(token[(index + 1) :]) except: - val = token[(index+1):] + val = token[(index + 1) :] ans[token[:index]] = val return ans def _simple_model(self, n): - return \ - """ + return """ option limrow = 0; option limcol = 0; option solprint = off; @@ -110,7 +118,9 @@ def _simple_model(self, n): obj.. ans =g= sum(I, x(I)); model test / all /; solve test using lp minimizing ans; - """ % (n,) + """ % ( + n, + ) # # Support "with" statements. @@ -122,7 +132,6 @@ def __exit__(self, t, v, traceback): pass - @SolverFactory.register('gams', doc='The GAMS modeling language') class GAMSSolver(_GAMSSolver): """ @@ -134,6 +143,7 @@ class GAMSSolver(_GAMSSolver): solver_io='shell' or 'gms' to use command line to call gams Requires the gams executable be on your system PATH. """ + def __new__(cls, *args, **kwds): mode = kwds.pop('solver_io', 'shell') if mode is None: @@ -148,8 +158,9 @@ def __new__(cls, *args, **kwds): return -@SolverFactory.register('_gams_direct', - doc='Direct python interface to the GAMS modeling language') +@SolverFactory.register( + '_gams_direct', doc='Direct python interface to the GAMS modeling language' +) class GAMSDirect(_GAMSSolver): """ A generic python interface to GAMS solvers. @@ -164,14 +175,17 @@ def available(self, exception_flag=True): except ImportError as e: if not exception_flag: return False - raise ImportError("Import of gams failed - GAMS direct " - "solver functionality is not available.\n" - "GAMS message: %s" % (e,)) + raise ImportError( + "Import of gams failed - GAMS direct " + "solver functionality is not available.\n" + "GAMS message: %s" % (e,) + ) avail = self._run_simple_model(1) if not avail and exception_flag: raise NameError( "'gams' command failed to solve a simple model - " - "GAMS shell solver functionality is not available.") + "GAMS shell solver functionality is not available." + ) return avail def license_is_valid(self): @@ -183,9 +197,10 @@ def _get_version(self): if not self.available(exception_flag=False): return _extract_version('') from gams import GamsWorkspace + ws = GamsWorkspace() version = tuple(int(i) for i in ws._version.split('.')[:4]) - while(len(version) < 4): + while len(version) < 4: version += (0,) return version @@ -193,8 +208,8 @@ def _run_simple_model(self, n): tmpdir = mkdtemp() try: from gams import GamsWorkspace, DebugLevel - ws = GamsWorkspace(debug=DebugLevel.Off, - working_directory=tmpdir) + + ws = GamsWorkspace(debug=DebugLevel.Off, working_directory=tmpdir) t1 = ws.add_job_from_string(self._simple_model(n)) t1.run() return True @@ -238,8 +253,9 @@ def solve(self, *args, **kwds): from gams.workspace import GamsExceptionExecution if len(args) != 1: - raise ValueError('Exactly one model must be passed ' - 'to solve method of GAMSSolver.') + raise ValueError( + 'Exactly one model must be passed to solve method of GAMSSolver.' + ) model = args[0] # self.options are default for each run, overwritten by kwds @@ -248,12 +264,12 @@ def solve(self, *args, **kwds): options.update(kwds) load_solutions = options.pop("load_solutions", True) - tee = options.pop("tee", False) - logfile = options.pop("logfile", None) - keepfiles = options.pop("keepfiles", False) - tmpdir = options.pop("tmpdir", None) - report_timing = options.pop("report_timing", False) - io_options = options.pop("io_options", {}) + tee = options.pop("tee", False) + logfile = options.pop("logfile", None) + keepfiles = options.pop("keepfiles", False) + tmpdir = options.pop("tmpdir", None) + report_timing = options.pop("report_timing", False) + io_options = options.pop("io_options", {}) # Pass remaining keywords to writer, which will handle # any unrecognized arguments @@ -261,6 +277,12 @@ def solve(self, *args, **kwds): initial_time = time.time() + # Because GAMS changes the CWD when running the solver, we need + # to convert user-provided file names to absolute paths + # (relative to the current directory) + if logfile is not None: + logfile = os.path.abspath(logfile) + #################################################################### # Presolve #################################################################### @@ -271,21 +293,25 @@ def solve(self, *args, **kwds): output_file = StringIO() if isinstance(model, IBlock): # Kernel blocks have slightly different write method - smap_id = model.write(filename=output_file, - format=ProblemFormat.gams, - _called_by_solver=True, - **io_options) + smap_id = model.write( + filename=output_file, + format=ProblemFormat.gams, + _called_by_solver=True, + **io_options + ) symbolMap = getattr(model, "._symbol_maps")[smap_id] else: - (_, smap_id) = model.write(filename=output_file, - format=ProblemFormat.gams, - io_options=io_options) + (_, smap_id) = model.write( + filename=output_file, format=ProblemFormat.gams, io_options=io_options + ) symbolMap = model.solutions.symbol_map[smap_id] presolve_completion_time = time.time() if report_timing: - print(" %6.2f seconds required for presolve" % - (presolve_completion_time - initial_time)) + print( + " %6.2f seconds required for presolve" + % (presolve_completion_time - initial_time) + ) #################################################################### # Apply solver @@ -300,9 +326,10 @@ def solve(self, *args, **kwds): if tmpdir is not None and os.path.exists(tmpdir): newdir = False - ws = GamsWorkspace(debug=DebugLevel.KeepFiles if keepfiles - else DebugLevel.Off, - working_directory=tmpdir) + ws = GamsWorkspace( + debug=DebugLevel.KeepFiles if keepfiles else DebugLevel.Off, + working_directory=tmpdir, + ) t1 = ws.add_job_from_string(output_file.getvalue()) @@ -318,8 +345,7 @@ def solve(self, *args, **kwds): # Always name working directory or delete files, # regardless of any errors. if keepfiles: - print("\nGAMS WORKING DIRECTORY: %s\n" % - ws.working_directory) + print("\nGAMS WORKING DIRECTORY: %s\n" % ws.working_directory) elif tmpdir is not None: # Garbage collect all references to t1.out_db # So that .gdx file can be deleted @@ -339,8 +365,10 @@ def solve(self, *args, **kwds): solve_completion_time = time.time() if report_timing: - print(" %6.2f seconds required for solver" % - (solve_completion_time - presolve_completion_time)) + print( + " %6.2f seconds required for solver" + % (solve_completion_time - presolve_completion_time) + ) #################################################################### # Postsolve @@ -348,36 +376,40 @@ def solve(self, *args, **kwds): # import suffixes must be on the top-level model if isinstance(model, IBlock): - model_suffixes = list(comp.storage_key for comp \ - in pyomo.core.kernel.suffix.\ - import_suffix_generator(model, - active=True, - descend_into=False)) + model_suffixes = list( + comp.storage_key + for comp in pyomo.core.kernel.suffix.import_suffix_generator( + model, active=True, descend_into=False + ) + ) else: - model_suffixes = list(name for (name,comp) \ - in pyomo.core.base.suffix.\ - active_import_suffix_generator(model)) - extract_dual = ('dual' in model_suffixes) - extract_rc = ('rc' in model_suffixes) + model_suffixes = list( + name + for ( + name, + comp, + ) in pyomo.core.base.suffix.active_import_suffix_generator(model) + ) + extract_dual = 'dual' in model_suffixes + extract_rc = 'rc' in model_suffixes results = SolverResults() results.problem.name = os.path.join(ws.working_directory, t1.name + '.gms') results.problem.lower_bound = t1.out_db["OBJEST"].find_record().value results.problem.upper_bound = t1.out_db["OBJEST"].find_record().value - results.problem.number_of_variables = \ - t1.out_db["NUMVAR"].find_record().value - results.problem.number_of_constraints = \ - t1.out_db["NUMEQU"].find_record().value - results.problem.number_of_nonzeros = \ - t1.out_db["NUMNZ"].find_record().value + results.problem.number_of_variables = t1.out_db["NUMVAR"].find_record().value + results.problem.number_of_constraints = t1.out_db["NUMEQU"].find_record().value + results.problem.number_of_nonzeros = t1.out_db["NUMNZ"].find_record().value results.problem.number_of_binary_variables = None # Includes binary vars: - results.problem.number_of_integer_variables = \ + results.problem.number_of_integer_variables = ( t1.out_db["NUMDVAR"].find_record().value - results.problem.number_of_continuous_variables = \ - t1.out_db["NUMVAR"].find_record().value \ + ) + results.problem.number_of_continuous_variables = ( + t1.out_db["NUMVAR"].find_record().value - t1.out_db["NUMDVAR"].find_record().value - results.problem.number_of_objectives = 1 # required by GAMS writer + ) + results.problem.number_of_objectives = 1 # required by GAMS writer obj = list(model.component_data_objects(Objective, active=True)) assert len(obj) == 1, 'Only one objective is allowed.' obj = obj[0] @@ -410,7 +442,9 @@ def solve(self, *args, **kwds): results.solver.termination_condition = TerminationCondition.maxEvaluations elif solvestat == 7: results.solver.status = SolverStatus.aborted - results.solver.termination_condition = TerminationCondition.licensingProblems + results.solver.termination_condition = ( + TerminationCondition.licensingProblems + ) elif solvestat == 8: results.solver.status = SolverStatus.aborted results.solver.termination_condition = TerminationCondition.userInterrupt @@ -419,7 +453,9 @@ def solve(self, *args, **kwds): results.solver.termination_condition = TerminationCondition.solverFailure elif solvestat == 11: results.solver.status = SolverStatus.error - results.solver.termination_condition = TerminationCondition.internalSolverError + results.solver.termination_condition = ( + TerminationCondition.internalSolverError + ) elif solvestat == 4: results.solver.status = SolverStatus.warning results.solver.message = "Solver quit with a problem (see LST file)" @@ -459,13 +495,17 @@ def solve(self, *args, **kwds): results.solver.termination_condition = TerminationCondition.optimal soln.status = SolutionStatus.optimal elif modelstat == 9: - results.solver.termination_condition = TerminationCondition.intermediateNonInteger + results.solver.termination_condition = ( + TerminationCondition.intermediateNonInteger + ) soln.status = SolutionStatus.other elif modelstat == 11: # Should be handled above, if modelstat and solvestat both # indicate a licensing problem if results.solver.termination_condition is None: - results.solver.termination_condition = TerminationCondition.licensingProblems + results.solver.termination_condition = ( + TerminationCondition.licensingProblems + ) soln.status = SolutionStatus.error elif modelstat in [12, 13]: if results.solver.termination_condition is None: @@ -484,11 +524,9 @@ def solve(self, *args, **kwds): # This is just a backup catch, all cases are handled above soln.status = SolutionStatus.error - soln.gap = abs(results.problem.upper_bound \ - - results.problem.lower_bound) + soln.gap = abs(results.problem.upper_bound - results.problem.lower_bound) - for sym, ref in symbolMap.bySymbol.items(): - obj = ref() + for sym, obj in symbolMap.bySymbol.items(): if isinstance(model, IBlock): # Kernel variables have no 'parent_component' if obj.ctype is IObjective: @@ -510,8 +548,7 @@ def solve(self, *args, **kwds): if extract_dual: for c in model.component_data_objects(Constraint, active=True): - if c.body.is_fixed() or \ - (not (c.has_lb() or c.has_ub())): + if c.body.is_fixed() or (not (c.has_lb() or c.has_ub())): # the constraint was not sent to GAMS continue sym = symbolMap.getSymbol(c) @@ -561,10 +598,12 @@ def solve(self, *args, **kwds): results._smap = None if isinstance(model, IBlock): if len(results.solution) == 1: - results.solution(0).symbol_map = \ - getattr(model, "._symbol_maps")[results._smap_id] - results.solution(0).default_variable_value = \ - self._default_variable_value + results.solution(0).symbol_map = getattr(model, "._symbol_maps")[ + results._smap_id + ] + results.solution( + 0 + ).default_variable_value = self._default_variable_value if load_solutions: model.load_solution(results.solution(0)) else: @@ -575,8 +614,7 @@ def solve(self, *args, **kwds): assert len(getattr(model, "._symbol_maps")) == 1 delattr(model, "._symbol_maps") del results._smap_id - if load_solutions and \ - (len(results.solution) == 0): + if load_solutions and (len(results.solution) == 0): logger.error("No solution is available") else: if load_solutions: @@ -589,16 +627,21 @@ def solve(self, *args, **kwds): postsolve_completion_time = time.time() if report_timing: - print(" %6.2f seconds required for postsolve" % - (postsolve_completion_time - solve_completion_time)) - print(" %6.2f seconds required total" % - (postsolve_completion_time - initial_time)) + print( + " %6.2f seconds required for postsolve" + % (postsolve_completion_time - solve_completion_time) + ) + print( + " %6.2f seconds required total" + % (postsolve_completion_time - initial_time) + ) return results -@SolverFactory.register('_gams_shell', - doc='Shell interface to the GAMS modeling language') +@SolverFactory.register( + '_gams_shell', doc='Shell interface to the GAMS modeling language' +) class GAMSShell(_GAMSSolver): """A generic shell interface to GAMS solvers.""" @@ -610,7 +653,8 @@ def available(self, exception_flag=True): return False raise NameError( "No 'gams' command found on system PATH - GAMS shell " - "solver functionality is not available.") + "solver functionality is not available." + ) # New versions of GAMS require a license to run anything. # Instead of parsing the output, we will try solving a trivial # model. @@ -618,7 +662,8 @@ def available(self, exception_flag=True): if not avail and exception_flag: raise NameError( "'gams' command failed to solve a simple model - " - "GAMS shell solver functionality is not available.") + "GAMS shell solver functionality is not available." + ) return avail def license_is_valid(self): @@ -626,15 +671,19 @@ def license_is_valid(self): return self._run_simple_model(5001) def _run_simple_model(self, n): + solver_exec = self.executable() + if solver_exec is None: + return False tmpdir = mkdtemp() try: test = os.path.join(tmpdir, 'test.gms') with open(test, 'w') as FILE: FILE.write(self._simple_model(n)) result = subprocess.run( - [self.executable(), test, "curdir=" + tmpdir, 'lo=0'], + [solver_exec, test, "curdir=" + tmpdir, 'lo=0'], stdout=subprocess.DEVNULL, - stderr=subprocess.DEVNULL) + stderr=subprocess.DEVNULL, + ) return not result.returncode finally: shutil.rmtree(tmpdir) @@ -643,8 +692,10 @@ def _run_simple_model(self, n): def _default_executable(self): executable = pyomo.common.Executable("gams") if not executable: - logger.warning("Could not locate the 'gams' executable, " - "which is required for solver gams") + logger.warning( + "Could not locate the 'gams' executable, " + "which is required for solver gams" + ) self.enable = False return None return executable.path() @@ -662,9 +713,12 @@ def _get_version(self): else: # specify logging to stdout for windows compatibility cmd = [solver_exec, "audit", "lo=3"] - results = subprocess.run(cmd, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) return _extract_version(results.stdout) @staticmethod @@ -712,8 +766,9 @@ def solve(self, *args, **kwds): self.available() if len(args) != 1: - raise ValueError('Exactly one model must be passed ' - 'to solve method of GAMSSolver.') + raise ValueError( + 'Exactly one model must be passed to solve method of GAMSSolver.' + ) model = args[0] # self.options are default for each run, overwritten by kwds @@ -722,12 +777,12 @@ def solve(self, *args, **kwds): options.update(kwds) load_solutions = options.pop("load_solutions", True) - tee = options.pop("tee", False) - logfile = options.pop("logfile", None) - keepfiles = options.pop("keepfiles", False) - tmpdir = options.pop("tmpdir", None) - report_timing = options.pop("report_timing", False) - io_options = options.pop("io_options", {}) + tee = options.pop("tee", False) + logfile = options.pop("logfile", None) + keepfiles = options.pop("keepfiles", False) + tmpdir = options.pop("tmpdir", None) + report_timing = options.pop("report_timing", False) + io_options = options.pop("io_options", {}) io_options.update(options) @@ -735,6 +790,12 @@ def solve(self, *args, **kwds): # any unrecognized arguments initial_time = time.time() + # Because GAMS changes the CWD when running the solver, we need + # to convert user-provided file names to absolute paths + # (relative to the current directory) + if logfile is not None: + logfile = os.path.abspath(logfile) + #################################################################### # Presolve #################################################################### @@ -763,37 +824,38 @@ def solve(self, *args, **kwds): put_results = "results" io_options["put_results"] = put_results - io_options.setdefault("put_results_format", - 'gdx' if gdxcc_available else 'dat') + io_options.setdefault("put_results_format", 'gdx' if gdxcc_available else 'dat') if io_options['put_results_format'] == 'gdx': - results_filename = os.path.join( - tmpdir, "GAMS_MODEL_p.gdx") - statresults_filename = os.path.join( - tmpdir, "%s_s.gdx" % (put_results,)) + results_filename = os.path.join(tmpdir, "GAMS_MODEL_p.gdx") + statresults_filename = os.path.join(tmpdir, "%s_s.gdx" % (put_results,)) else: - results_filename = os.path.join( - tmpdir, "%s.dat" % (put_results,)) - statresults_filename = os.path.join( - tmpdir, "%sstat.dat" % (put_results,)) + results_filename = os.path.join(tmpdir, "%s.dat" % (put_results,)) + statresults_filename = os.path.join(tmpdir, "%sstat.dat" % (put_results,)) if isinstance(model, IBlock): # Kernel blocks have slightly different write method - smap_id = model.write(filename=output_filename, - format=ProblemFormat.gams, - _called_by_solver=True, - **io_options) + smap_id = model.write( + filename=output_filename, + format=ProblemFormat.gams, + _called_by_solver=True, + **io_options + ) symbolMap = getattr(model, "._symbol_maps")[smap_id] else: - (_, smap_id) = model.write(filename=output_filename, - format=ProblemFormat.gams, - io_options=io_options) + (_, smap_id) = model.write( + filename=output_filename, + format=ProblemFormat.gams, + io_options=io_options, + ) symbolMap = model.solutions.symbol_map[smap_id] presolve_completion_time = time.time() if report_timing: - print(" %6.2f seconds required for presolve" % - (presolve_completion_time - initial_time)) + print( + " %6.2f seconds required for presolve" + % (presolve_completion_time - initial_time) + ) #################################################################### # Apply solver @@ -803,7 +865,7 @@ def solve(self, *args, **kwds): command = [exe, output, "o=" + lst, "curdir=" + tmpdir] if tee and not logfile: # default behaviour of gams is to print to console, for - # compatability with windows and *nix we want to explicitly log to + # compatibility with windows and *nix we want to explicitly log to # stdout (see https://www.gams.com/latest/docs/UG_GamsCall.html) command.append("lo=3") elif not tee and not logfile: @@ -820,8 +882,7 @@ def solve(self, *args, **kwds): if tee: ostreams.append(sys.stdout) with TeeStream(*ostreams) as t: - result = subprocess.run(command, stdout=t.STDOUT, - stderr=t.STDERR) + result = subprocess.run(command, stdout=t.STDOUT, stderr=t.STDERR) rc = result.returncode txt = ostreams[0].getvalue() @@ -836,22 +897,27 @@ def solve(self, *args, **kwds): # Run check_expr_evaluation, which errors if necessary check_expr_evaluation(model, symbolMap, 'shell') # If nothing was raised, or for all other cases, raise this - logger.error("GAMS encountered an error during solve. " - "Check listing file for details.") + logger.error( + "GAMS encountered an error during solve. " + "Check listing file for details." + ) logger.error(txt) if os.path.exists(lst_filename): with open(lst_filename, 'r') as FILE: - logger.error( - "GAMS Listing file:\n\n%s" % (FILE.read(),)) - raise RuntimeError("GAMS encountered an error during solve. " - "Check listing file for details.") + logger.error("GAMS Listing file:\n\n%s" % (FILE.read(),)) + raise RuntimeError( + "GAMS encountered an error during solve. " + "Check listing file for details." + ) if io_options['put_results_format'] == 'gdx': model_soln, stat_vars = self._parse_gdx_results( - results_filename, statresults_filename) + results_filename, statresults_filename + ) else: model_soln, stat_vars = self._parse_dat_results( - results_filename, statresults_filename) + results_filename, statresults_filename + ) finally: if not keepfiles: if newdir: @@ -864,8 +930,10 @@ def solve(self, *args, **kwds): solve_completion_time = time.time() if report_timing: - print(" %6.2f seconds required for solver" % - (solve_completion_time - presolve_completion_time)) + print( + " %6.2f seconds required for solver" + % (solve_completion_time - presolve_completion_time) + ) #################################################################### # Postsolve @@ -873,17 +941,22 @@ def solve(self, *args, **kwds): # import suffixes must be on the top-level model if isinstance(model, IBlock): - model_suffixes = list(comp.storage_key for comp \ - in pyomo.core.kernel.suffix.\ - import_suffix_generator(model, - active=True, - descend_into=False)) + model_suffixes = list( + comp.storage_key + for comp in pyomo.core.kernel.suffix.import_suffix_generator( + model, active=True, descend_into=False + ) + ) else: - model_suffixes = list(name for (name,comp) \ - in pyomo.core.base.suffix.\ - active_import_suffix_generator(model)) - extract_dual = ('dual' in model_suffixes) - extract_rc = ('rc' in model_suffixes) + model_suffixes = list( + name + for ( + name, + comp, + ) in pyomo.core.base.suffix.active_import_suffix_generator(model) + ) + extract_dual = 'dual' in model_suffixes + extract_rc = 'rc' in model_suffixes results = SolverResults() results.problem.name = output_filename @@ -895,9 +968,10 @@ def solve(self, *args, **kwds): results.problem.number_of_binary_variables = None # Includes binary vars: results.problem.number_of_integer_variables = stat_vars["NUMDVAR"] - results.problem.number_of_continuous_variables = stat_vars["NUMVAR"] \ - - stat_vars["NUMDVAR"] - results.problem.number_of_objectives = 1 # required by GAMS writer + results.problem.number_of_continuous_variables = ( + stat_vars["NUMVAR"] - stat_vars["NUMDVAR"] + ) + results.problem.number_of_objectives = 1 # required by GAMS writer obj = list(model.component_data_objects(Objective, active=True)) assert len(obj) == 1, 'Only one objective is allowed.' obj = obj[0] @@ -930,7 +1004,9 @@ def solve(self, *args, **kwds): results.solver.termination_condition = TerminationCondition.maxEvaluations elif solvestat == 7: results.solver.status = SolverStatus.aborted - results.solver.termination_condition = TerminationCondition.licensingProblems + results.solver.termination_condition = ( + TerminationCondition.licensingProblems + ) elif solvestat == 8: results.solver.status = SolverStatus.aborted results.solver.termination_condition = TerminationCondition.userInterrupt @@ -939,7 +1015,9 @@ def solve(self, *args, **kwds): results.solver.termination_condition = TerminationCondition.solverFailure elif solvestat == 11: results.solver.status = SolverStatus.error - results.solver.termination_condition = TerminationCondition.internalSolverError + results.solver.termination_condition = ( + TerminationCondition.internalSolverError + ) elif solvestat == 4: results.solver.status = SolverStatus.warning results.solver.message = "Solver quit with a problem (see LST file)" @@ -948,7 +1026,7 @@ def solve(self, *args, **kwds): elif solvestat == 6: results.solver.status = SolverStatus.unknown - results.solver.return_code = rc # 0 + results.solver.return_code = rc # 0 # Not sure if this value is actually user time # "the elapsed time it took to execute a solve statement in total" results.solver.user_time = stat_vars["ETSOLVE"] @@ -979,13 +1057,17 @@ def solve(self, *args, **kwds): results.solver.termination_condition = TerminationCondition.optimal soln.status = SolutionStatus.optimal elif modelstat == 9: - results.solver.termination_condition = TerminationCondition.intermediateNonInteger + results.solver.termination_condition = ( + TerminationCondition.intermediateNonInteger + ) soln.status = SolutionStatus.other elif modelstat == 11: # Should be handled above, if modelstat and solvestat both # indicate a licensing problem if results.solver.termination_condition is None: - results.solver.termination_condition = TerminationCondition.licensingProblems + results.solver.termination_condition = ( + TerminationCondition.licensingProblems + ) soln.status = SolutionStatus.error elif modelstat in [12, 13]: if results.solver.termination_condition is None: @@ -1004,12 +1086,10 @@ def solve(self, *args, **kwds): # This is just a backup catch, all cases are handled above soln.status = SolutionStatus.error - soln.gap = abs(results.problem.upper_bound \ - - results.problem.lower_bound) + soln.gap = abs(results.problem.upper_bound - results.problem.lower_bound) has_rc_info = True - for sym, ref in symbolMap.bySymbol.items(): - obj = ref() + for sym, obj in symbolMap.bySymbol.items(): if isinstance(model, IBlock): # Kernel variables have no 'parent_component' if obj.ctype is IObjective: @@ -1038,8 +1118,7 @@ def solve(self, *args, **kwds): if extract_dual: for c in model.component_data_objects(Constraint, active=True): - if (c.body.is_fixed()) or \ - (not (c.has_lb() or c.has_ub())): + if (c.body.is_fixed()) or (not (c.has_lb() or c.has_ub())): # the constraint was not sent to GAMS continue sym = symbolMap.getSymbol(c) @@ -1101,10 +1180,12 @@ def solve(self, *args, **kwds): results._smap = None if isinstance(model, IBlock): if len(results.solution) == 1: - results.solution(0).symbol_map = \ - getattr(model, "._symbol_maps")[results._smap_id] - results.solution(0).default_variable_value = \ - self._default_variable_value + results.solution(0).symbol_map = getattr(model, "._symbol_maps")[ + results._smap_id + ] + results.solution( + 0 + ).default_variable_value = self._default_variable_value if load_solutions: model.load_solution(results.solution(0)) else: @@ -1115,8 +1196,7 @@ def solve(self, *args, **kwds): assert len(getattr(model, "._symbol_maps")) == 1 delattr(model, "._symbol_maps") del results._smap_id - if load_solutions and \ - (len(results.solution) == 0): + if load_solutions and (len(results.solution) == 0): logger.error("No solution is available") else: if load_solutions: @@ -1129,18 +1209,32 @@ def solve(self, *args, **kwds): postsolve_completion_time = time.time() if report_timing: - print(" %6.2f seconds required for postsolve" % - (postsolve_completion_time - solve_completion_time)) - print(" %6.2f seconds required total" % - (postsolve_completion_time - initial_time)) + print( + " %6.2f seconds required for postsolve" + % (postsolve_completion_time - solve_completion_time) + ) + print( + " %6.2f seconds required total" + % (postsolve_completion_time - initial_time) + ) return results def _parse_gdx_results(self, results_filename, statresults_filename): model_soln = dict() - stat_vars = dict.fromkeys(['MODELSTAT', 'SOLVESTAT', 'OBJEST', - 'OBJVAL', 'NUMVAR', 'NUMEQU', 'NUMDVAR', - 'NUMNZ', 'ETSOLVE']) + stat_vars = dict.fromkeys( + [ + 'MODELSTAT', + 'SOLVESTAT', + 'OBJEST', + 'OBJVAL', + 'NUMVAR', + 'NUMEQU', + 'NUMDVAR', + 'NUMNZ', + 'ETSOLVE', + ] + ) pgdx = gdxcc.new_gdxHandle_tp() ret = gdxcc.gdxCreateD(pgdx, os.path.dirname(self.executable()), 128) @@ -1309,6 +1403,7 @@ def check_expr_evaluation(model, symbolMap, solver_io): for var in uninit_vars: var.set_value(None) + def check_expr(expr, name, solver_io): # Check if GAMS will encounter domain violations in presolver # operations at current values, which are None (0) by default @@ -1316,17 +1411,20 @@ def check_expr(expr, name, solver_io): try: value(expr) except (ValueError, ZeroDivisionError): - logger.warning("While evaluating model.%s's expression, GAMS solver " - "encountered an error.\nGAMS requires that all " - "equations and expressions evaluate at initial values.\n" - "Ensure variable values do not violate any domains, " - "and use the warmstart=True keyword to solve()." % name) + logger.warning( + "While evaluating model.%s's expression, GAMS solver " + "encountered an error.\nGAMS requires that all " + "equations and expressions evaluate at initial values.\n" + "Ensure variable values do not violate any domains, " + "and use the warmstart=True keyword to solve()." % name + ) if solver_io == 'shell': # For shell, there is no previous exception to worry about # overwriting, so raise the ValueError. # But for direct, the GamsExceptionExecution will be raised. raise + def file_removal_gams_direct(tmpdir, newdir): if newdir: shutil.rmtree(tmpdir) diff --git a/pyomo/solvers/plugins/solvers/GLPK.py b/pyomo/solvers/plugins/solvers/GLPK.py index 5054f3a04d9..a5b8ad9c019 100644 --- a/pyomo/solvers/plugins/solvers/GLPK.py +++ b/pyomo/solvers/plugins/solvers/GLPK.py @@ -20,8 +20,14 @@ from pyomo.common import Executable from pyomo.common.collections import Bunch from pyomo.opt import ( - SolverFactory, OptSolver, ProblemFormat, ResultsFormat, SolverResults, - TerminationCondition, SolutionStatus, ProblemSense, + SolverFactory, + OptSolver, + ProblemFormat, + ResultsFormat, + SolverResults, + TerminationCondition, + SolutionStatus, + ProblemSense, ) from pyomo.opt.base.solvers import _extract_version from pyomo.opt.solver import SystemCallSolver @@ -32,19 +38,19 @@ # Not sure how better to get these constants, but pulled from GLPK # documentation and source code (include/glpk.h) - # status of auxiliary / structural variables -GLP_BS = 1 # inactive constraint / basic variable -GLP_NL = 2 # active constraint or non-basic variable on lower bound -GLP_NU = 3 # active constraint or non-basic variable on upper bound -GLP_NF = 4 # active free row or non-basic free variable -GLP_NS = 5 # active equality constraint or non-basic fixed variable +# status of auxiliary / structural variables +GLP_BS = 1 # inactive constraint / basic variable +GLP_NL = 2 # active constraint or non-basic variable on lower bound +GLP_NU = 3 # active constraint or non-basic variable on upper bound +GLP_NF = 4 # active free row or non-basic free variable +GLP_NS = 5 # active equality constraint or non-basic fixed variable - # solution status -GLP_UNDEF = 'u' # solution is undefined -GLP_FEAS = 'f' # solution is feasible +# solution status +GLP_UNDEF = 'u' # solution is undefined +GLP_FEAS = 'f' # solution is feasible GLP_INFEAS = 'i' # solution is infeasible GLP_NOFEAS = 'n' # no feasible solution exists -GLP_OPT = 'o' # solution is optimal +GLP_OPT = 'o' # solution is optimal @SolverFactory.register('glpk', doc='The GLPK LP/MIP solver') @@ -68,8 +74,8 @@ def __new__(cls, *args, **kwds): @SolverFactory.register( - '_glpk_shell', - doc='Shell interface to the GNU Linear Programming Kit') + '_glpk_shell', doc='Shell interface to the GNU Linear Programming Kit' +) class GLPKSHELL(SystemCallSolver): """Shell interface to the GLPK LP/MIP solver""" @@ -77,7 +83,7 @@ class GLPKSHELL(SystemCallSolver): # version every time we run the solver. _known_versions = {} - def __init__ (self, **kwargs): + def __init__(self, **kwargs): # # Call base constructor # @@ -89,13 +95,15 @@ def __init__ (self, **kwargs): # # Valid problem formats, and valid results for each format # - self._valid_problem_formats = [ProblemFormat.cpxlp, - ProblemFormat.mps, - ProblemFormat.mod] + self._valid_problem_formats = [ + ProblemFormat.cpxlp, + ProblemFormat.mps, + ProblemFormat.mod, + ] self._valid_result_formats = { - ProblemFormat.mod: ResultsFormat.soln, - ProblemFormat.cpxlp: ResultsFormat.soln, - ProblemFormat.mps: ResultsFormat.soln, + ProblemFormat.mod: ResultsFormat.soln, + ProblemFormat.cpxlp: ResultsFormat.soln, + ProblemFormat.mps: ResultsFormat.soln, } self.set_problem_format(ProblemFormat.cpxlp) @@ -110,8 +118,10 @@ def _default_results_format(self, prob_format): def _default_executable(self): executable = Executable('glpsol') if not executable: - msg = ("Could not locate the 'glpsol' executable, which is " - "required for solver '%s'") + msg = ( + "Could not locate the 'glpsol' executable, which is " + "required for solver '%s'" + ) logger.warning(msg % self.name) self.enable = False return None @@ -199,10 +209,10 @@ def process_logfile(self): results = SolverResults() # For the lazy programmer, handle long variable names - prob = results.problem - solv = results.solver + prob = results.problem + solv = results.solver solv.termination_condition = TerminationCondition.unknown - stats = results.solver.statistics + stats = results.solver.statistics bbound = stats.branch_and_bound prob.upper_bound = float('inf') @@ -222,33 +232,48 @@ def process_logfile(self): solv.user_time = toks[1] elif len(toks) > 2 and (toks[0], toks[2]) == ("TIME", "EXCEEDED;"): solv.termination_condition = TerminationCondition.maxTimeLimit - elif len(toks) > 5 and (toks[:6] == ['PROBLEM', 'HAS', 'NO', 'DUAL', 'FEASIBLE', 'SOLUTION']): + elif len(toks) > 5 and ( + toks[:6] == ['PROBLEM', 'HAS', 'NO', 'DUAL', 'FEASIBLE', 'SOLUTION'] + ): solv.termination_condition = TerminationCondition.unbounded - elif len(toks) > 5 and (toks[:6] == ['PROBLEM', 'HAS', 'NO', 'PRIMAL', 'FEASIBLE', 'SOLUTION']): + elif len(toks) > 5 and ( + toks[:6] + == ['PROBLEM', 'HAS', 'NO', 'PRIMAL', 'FEASIBLE', 'SOLUTION'] + ): solv.termination_condition = TerminationCondition.infeasible - elif len(toks) > 4 and (toks[:5] == ['PROBLEM', 'HAS', 'NO', 'FEASIBLE', 'SOLUTION']): + elif len(toks) > 4 and ( + toks[:5] == ['PROBLEM', 'HAS', 'NO', 'FEASIBLE', 'SOLUTION'] + ): solv.termination_condition = TerminationCondition.infeasible - elif len(toks) > 6 and (toks[:7] == ['LP', 'RELAXATION', 'HAS', 'NO', 'DUAL', 'FEASIBLE', 'SOLUTION']): + elif len(toks) > 6 and ( + toks[:7] + == ['LP', 'RELAXATION', 'HAS', 'NO', 'DUAL', 'FEASIBLE', 'SOLUTION'] + ): solv.termination_condition = TerminationCondition.unbounded return results def _glpk_get_solution_status(self, status): - if GLP_FEAS == status: return SolutionStatus.feasible - elif GLP_INFEAS == status: return SolutionStatus.infeasible - elif GLP_NOFEAS == status: return SolutionStatus.infeasible - elif GLP_UNDEF == status: return SolutionStatus.other - elif GLP_OPT == status: return SolutionStatus.optimal + if GLP_FEAS == status: + return SolutionStatus.feasible + elif GLP_INFEAS == status: + return SolutionStatus.infeasible + elif GLP_NOFEAS == status: + return SolutionStatus.infeasible + elif GLP_UNDEF == status: + return SolutionStatus.other + elif GLP_OPT == status: + return SolutionStatus.optimal raise RuntimeError("Unknown solution status returned by GLPK solver") - def process_soln_file (self, results): + def process_soln_file(self, results): pdata = self._glpfile psoln = self._rawfile prob = results.problem solv = results.solver - prob.name = 'unknown' # will ostensibly get updated + prob.name = 'unknown' # will ostensibly get updated # Step 1: Make use of the GLPK's machine parseable format (--wglp) to # collect variable and constraint names. @@ -259,7 +284,7 @@ def process_soln_file (self, results): # order as the --write output. # Note that documentation for these formats is available from the GLPK # documentation of 'glp_read_prob' and 'glp_write_sol' - variable_names = dict() # cols + variable_names = dict() # cols constraint_names = dict() # rows obj_name = 'objective' @@ -272,17 +297,23 @@ def process_soln_file (self, results): pcols = int(pcols) # fails if not a number; intentional pnonz = int(pnonz) # fails if not a number; intentional - if pprob != 'p' or \ - ptype not in ('lp', 'mip') or \ - psense not in ('max', 'min') or \ - prows < 0 or pcols < 0 or pnonz < 0: + if ( + pprob != 'p' + or ptype not in ('lp', 'mip') + or psense not in ('max', 'min') + or prows < 0 + or pcols < 0 + or pnonz < 0 + ): raise ValueError - self.is_integer = ('mip' == ptype and True or False) - prob.sense = 'min' == psense and ProblemSense.minimize or ProblemSense.maximize + self.is_integer = 'mip' == ptype and True or False + prob.sense = ( + 'min' == psense and ProblemSense.minimize or ProblemSense.maximize + ) prob.number_of_constraints = prows - prob.number_of_nonzeros = pnonz - prob.number_of_variables = pcols + prob.number_of_nonzeros = pnonz + prob.number_of_variables = pcols for line in f: glp_line_count += 1 @@ -293,20 +324,20 @@ def process_soln_file (self, results): pass elif 'n' == switch: # naming some attribute ntype = tokens.pop(0) - name = tokens.pop() - if 'i' == ntype: # row + name = tokens.pop() + if 'i' == ntype: # row row = tokens.pop() constraint_names[int(row)] = name # --write order == --wglp order; store name w/ row no - elif 'j' == ntype: # var + elif 'j' == ntype: # var col = tokens.pop() variable_names[int(col)] = name # --write order == --wglp order; store name w/ col no - elif 'z' == ntype: # objective + elif 'z' == ntype: # objective obj_name = name - elif 'p' == ntype: # problem name + elif 'p' == ntype: # problem name prob.name = name - else: # anything else is incorrect. + else: # anything else is incorrect. raise ValueError else: @@ -325,23 +356,31 @@ def process_soln_file (self, results): row = next(reader) try: row = next(reader) - while (row[0] == 'c'): + while row[0] == 'c': row = next(reader) if not row[0] == 's': raise ValueError("Expecting 's' row after 'c' rows") if row[1] == 'bas': - self._process_soln_bas(row, reader, results, obj_name, variable_names, constraint_names) + self._process_soln_bas( + row, reader, results, obj_name, variable_names, constraint_names + ) elif row[1] == 'ipt': - self._process_soln_ipt(row, reader, results, obj_name, variable_names, constraint_names) + self._process_soln_ipt( + row, reader, results, obj_name, variable_names, constraint_names + ) elif row[1] == 'mip': - self._process_soln_mip(row, reader, results, obj_name, variable_names, constraint_names) + self._process_soln_mip( + row, reader, results, obj_name, variable_names, constraint_names + ) except Exception: print("ERROR: " + str(sys.exc_info()[1])) msg = "Error parsing solution data file, line %d" % reader.line_num raise ValueError(msg) - def _process_soln_bas(self, row, reader, results, obj_name, variable_names, constraint_names): + def _process_soln_bas( + self, row, reader, results, obj_name, variable_names, constraint_names + ): """ Process a basic solution """ @@ -363,7 +402,7 @@ def _process_soln_bas(self, row, reader, results, obj_name, variable_names, cons solv.termination_condition = TerminationCondition.other elif pstat == 'f': - soln = results.solution.add() + soln = results.solution.add() soln.status = SolutionStatus.feasible solv.termination_condition = TerminationCondition.optimal @@ -404,11 +443,11 @@ def _process_soln_bas(self, row, reader, results, obj_name, variable_names, cons continue rdual = float(rdual) if cname.startswith('c_'): - soln.constraint[cname] = {"Dual":rdual} + soln.constraint[cname] = {"Dual": rdual} elif cname.startswith('r_l_'): - range_duals.setdefault(cname[4:],[0,0])[0] = rdual + range_duals.setdefault(cname[4:], [0, 0])[0] = rdual elif cname.startswith('r_u_'): - range_duals.setdefault(cname[4:],[0,0])[1] = rdual + range_duals.setdefault(cname[4:], [0, 0])[1] = rdual elif rtype == 'j': # NOTE: we are not using the column status (cst) value right now @@ -418,9 +457,9 @@ def _process_soln_bas(self, row, reader, results, obj_name, variable_names, cons continue cprim = float(cprim) if extract_reduced_costs is False: - soln.variable[vname] = {"Value" : cprim} + soln.variable[vname] = {"Value": cprim} else: - soln.variable[vname] = {"Value" : cprim, "Rc" : float(cdual)} + soln.variable[vname] = {"Value": cprim, "Rc": float(cdual)} elif rtype == 'e': break @@ -429,23 +468,25 @@ def _process_soln_bas(self, row, reader, results, obj_name, variable_names, cons continue else: - raise ValueError("Unexpected row type: "+rtype) + raise ValueError("Unexpected row type: " + rtype) # For the range constraints, supply only the dual with the largest # magnitude (at least one should always be numerically zero) scon = soln.Constraint - for key, (ld,ud) in range_duals.items(): + for key, (ld, ud) in range_duals.items(): if abs(ld) > abs(ud): - scon['r_l_'+key] = {"Dual":ld} + scon['r_l_' + key] = {"Dual": ld} else: - scon['r_l_'+key] = {"Dual":ud} # Use the same key + scon['r_l_' + key] = {"Dual": ud} # Use the same key - def _process_soln_mip(self, row, reader, results, obj_name, variable_names, constraint_names): + def _process_soln_mip( + self, row, reader, results, obj_name, variable_names, constraint_names + ): """ Process a basic solution """ - #prows = int(row[2]) - #pcols = int(row[3]) + # prows = int(row[2]) + # pcols = int(row[3]) status = row[4] obj_val = float(row[5]) @@ -461,7 +502,7 @@ def _process_soln_mip(self, row, reader, results, obj_name, variable_names, cons solv.termination_condition = TerminationCondition.unbounded return - soln = results.solution.add() + soln = results.solution.add() if status == 'f': soln.status = SolutionStatus.feasible solv.termination_condition = TerminationCondition.feasible @@ -496,7 +537,7 @@ def _process_soln_mip(self, row, reader, results, obj_name, variable_names, cons vname = variable_names[int(cid)] if 'ONE_VAR_CONSTANT' == vname: continue - soln.variable[vname] = {"Value" : float(cval)} + soln.variable[vname] = {"Value": float(cval)} elif rtype == 'e': break @@ -505,13 +546,12 @@ def _process_soln_mip(self, row, reader, results, obj_name, variable_names, cons continue else: - raise ValueError("Unexpected row type: "+rtype) + raise ValueError("Unexpected row type: " + rtype) @SolverFactory.register('_mock_glpk') class MockGLPK(GLPKSHELL, MockMIP): - """A Mock GLPK solver used for testing - """ + """A Mock GLPK solver used for testing""" def __init__(self, **kwds): try: @@ -531,7 +571,7 @@ def create_command_line(self, executable, problem_files): def executable(self): return MockMIP.executable(self) - def _execute_command(self,cmd): + def _execute_command(self, cmd): return MockMIP._execute_command(self, cmd) def _convert_problem(self, args, pformat, valid_pformats): diff --git a/pyomo/solvers/plugins/solvers/GUROBI.py b/pyomo/solvers/plugins/solvers/GUROBI.py index bde1bd9e31f..45a44ac968b 100644 --- a/pyomo/solvers/plugins/solvers/GUROBI.py +++ b/pyomo/solvers/plugins/solvers/GUROBI.py @@ -24,7 +24,11 @@ from pyomo.opt.base import ProblemFormat, ResultsFormat, OptSolver from pyomo.opt.base.solvers import _extract_version, SolverFactory from pyomo.opt.results import ( - SolverStatus, TerminationCondition, SolutionStatus, ProblemSense, Solution, + SolverStatus, + TerminationCondition, + SolutionStatus, + ProblemSense, + Solution, ) from pyomo.opt.solver import ILMLicensedSystemCallSolver from pyomo.core.kernel.block import IBlock @@ -36,14 +40,14 @@ @SolverFactory.register('gurobi', doc='The GUROBI LP/MIP solver') class GUROBI(OptSolver): - """The GUROBI LP/MIP solver - """ + """The GUROBI LP/MIP solver""" + def __new__(cls, *args, **kwds): mode = kwds.pop('solver_io', 'lp') if mode is None: mode = 'lp' # - if mode == 'lp': + if mode == 'lp': return SolverFactory('_gurobi_shell', **kwds) if mode == 'mps': opt = SolverFactory('_gurobi_shell', **kwds) @@ -73,12 +77,12 @@ def __new__(cls, *args, **kwds): return opt - @SolverFactory.register( - '_gurobi_shell', doc='Shell interface to the GUROBI LP/MIP solver') + '_gurobi_shell', doc='Shell interface to the GUROBI LP/MIP solver' +) class GUROBISHELL(ILMLicensedSystemCallSolver): - """Shell interface to the GUROBI LP/MIP solver - """ + """Shell interface to the GUROBI LP/MIP solver""" + _solver_info_cache = {} def __init__(self, **kwds): @@ -101,8 +105,8 @@ def __init__(self, **kwds): # # Define valid problem formats and associated results formats # - self._valid_problem_formats=[ProblemFormat.cpxlp, ProblemFormat.mps] - self._valid_result_formats={} + self._valid_problem_formats = [ProblemFormat.cpxlp, ProblemFormat.mps] + self._valid_result_formats = {} self._valid_result_formats[ProblemFormat.cpxlp] = [ResultsFormat.soln] self._valid_result_formats[ProblemFormat.mps] = [ResultsFormat.soln] self.set_problem_format(ProblemFormat.cpxlp) @@ -131,8 +135,7 @@ def license_is_valid(self): if not solver_exec: licensed = False else: - executable = os.path.join( - os.path.dirname(solver_exec), 'gurobi_cl') + executable = os.path.join(os.path.dirname(solver_exec), 'gurobi_cl') try: rc = subprocess.call( [executable, "--license"], @@ -143,8 +146,7 @@ def license_is_valid(self): try: rc = subprocess.run( [solver_exec], - input=('import gurobipy; ' - 'gurobipy.Env().dispose(); quit()'), + input=('import gurobipy; gurobipy.Env().dispose(); quit()'), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, @@ -156,7 +158,6 @@ def license_is_valid(self): self._solver_info_cache[(solver_exec, 'licensed')] = licensed return licensed - def _default_results_format(self, prob_format): return ResultsFormat.soln @@ -180,22 +181,18 @@ def _warm_start(self, instance): # constraints output_index = 0 if isinstance(instance, IBlock): - smap = getattr(instance,"._symbol_maps")\ - [self._smap_id] + smap = getattr(instance, "._symbol_maps")[self._smap_id] else: smap = instance.solutions.symbol_map[self._smap_id] byObject = smap.byObject with open(self._warm_start_file_name, 'w') as mst_file: for vdata in instance.component_data_objects(Var): - if (vdata.value is not None) and \ - (id(vdata) in byObject): + if (vdata.value is not None) and (id(vdata) in byObject): name = byObject[id(vdata)] - mst_file.write("%s %s\n" - % (name, vdata.value)) + mst_file.write("%s %s\n" % (name, vdata.value)) # over-ride presolve to extract the warm-start keyword, if specified. def _presolve(self, *args, **kwds): - # create a context in the temporary file manager for # this plugin - is "pop"ed in the _postsolve method. TempfileManager.push() @@ -215,12 +212,10 @@ def _presolve(self, *args, **kwds): # file - assuming that the user has already, via some external # mechanism, invoked warm_start() with a instance to create the # warm start file. - if self._warm_start_solve and \ - isinstance(args[0], str): + if self._warm_start_solve and isinstance(args[0], str): # we assume the user knows what they are doing... pass - elif self._warm_start_solve and \ - (not isinstance(args[0], str)): + elif self._warm_start_solve and (not isinstance(args[0], str)): # assign the name of the warm start file *before* calling # the base class presolve - the base class method ends up # creating the command line, and the warm start file-name is @@ -228,7 +223,8 @@ def _presolve(self, *args, **kwds): if self._warm_start_file_name is None: assert not user_warmstart self._warm_start_file_name = TempfileManager.create_tempfile( - suffix='.gurobi.mst') + suffix='.gurobi.mst' + ) # let the base class handle any remaining keywords/actions. ILMLicensedSystemCallSolver._presolve(self, *args, **kwds) @@ -237,11 +233,11 @@ def _presolve(self, *args, **kwds): # symbol_map is actually constructed! if (len(args) > 0) and (not isinstance(args[0], str)): - if len(args) != 1: raise ValueError( "GUROBI _presolve method can only handle a single " - "problem instance - %s were supplied" % (len(args),)) + "problem instance - %s were supplied" % (len(args),) + ) # write the warm-start file - currently only supports MIPs. # we only know how to deal with a single problem instance. @@ -250,8 +246,9 @@ def _presolve(self, *args, **kwds): self._warm_start(args[0]) end_time = time.time() if self._report_timing is True: - print("Warm start write time=%.2f seconds" - % (end_time-start_time)) + print( + "Warm start write time=%.2f seconds" % (end_time - start_time) + ) def _default_executable(self): if sys.platform == 'win32': @@ -262,8 +259,10 @@ def _default_executable(self): return executable.path() if gurobipy_available: return sys.executable - logger.warning("Could not locate the 'gurobi' executable, " - "which is required for solver %s" % self.name) + logger.warning( + "Could not locate the 'gurobi' executable, " + "which is required for solver %s" % self.name + ) self.enable = False return None @@ -280,8 +279,7 @@ def _get_version(self): else: results = subprocess.run( [solver_exec], - input=('import gurobipy; ' - 'print(gurobipy.gurobi.version()); quit()'), + input=('import gurobipy; print(gurobipy.gurobi.version()); quit()'), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, @@ -289,7 +287,7 @@ def _get_version(self): ver = None try: ver = tuple(eval(results.stdout.strip())) - while(len(ver) < 4): + while len(ver) < 4: ver += (0,) except SyntaxError: ver = _extract_version('') @@ -298,15 +296,14 @@ def _get_version(self): self._solver_info_cache[(solver_exec, 'version')] = ver return ver - def create_command_line(self,executable,problem_files): + def create_command_line(self, executable, problem_files): # # Define log file # The log file in CPLEX contains the solution trace, but the # solver status can be found in the solution file. # if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix = '.gurobi.log') + self._log_file = TempfileManager.create_tempfile(suffix='.gurobi.log') # # Define solution file @@ -314,8 +311,7 @@ def create_command_line(self,executable,problem_files): # solver status. # if self._soln_file is None: - self._soln_file = TempfileManager.\ - create_tempfile(suffix = '.gurobi.txt') + self._soln_file = TempfileManager.create_tempfile(suffix='.gurobi.txt') # # Write the GUROBI execution script @@ -338,20 +334,20 @@ def create_command_line(self,executable,problem_files): # explicitly. # NOTE: The gurobi plugin (GUROBI.py) and GUROBI_RUN.py live in # the same directory. - script = "import sys\n" + script = "import sys\n" script += "from gurobipy import *\n" script += "sys.path.append(%r)\n" % (this_file_dir(),) script += "from GUROBI_RUN import *\n" script += "gurobi_run(" - mipgap = float(self.options.mipgap) if \ - self.options.mipgap is not None else \ - None - for x in ( problem_filename, - warmstart_filename, - solution_filename, - None, - options_dict, - self._suffixes ): + mipgap = float(self.options.mipgap) if self.options.mipgap is not None else None + for x in ( + problem_filename, + warmstart_filename, + solution_filename, + None, + options_dict, + self._suffixes, + ): script += "%r," % x script += ")\n" script += "quit()\n" @@ -359,17 +355,14 @@ def create_command_line(self,executable,problem_files): # dump the script and warm-start file names for the # user if we're keeping files around. if self._keepfiles: - script_fname = TempfileManager.create_tempfile( - suffix='.gurobi.script') + script_fname = TempfileManager.create_tempfile(suffix='.gurobi.script') script_file = open(script_fname, 'w') - script_file.write( script ) + script_file.write(script) script_file.close() print("Solver script file: '%s'" % script_fname) - if self._warm_start_solve and \ - (self._warm_start_file_name is not None): - print("Solver warm-start file: " - +self._warm_start_file_name) + if self._warm_start_solve and (self._warm_start_file_name is not None): + print("Solver warm-start file: " + self._warm_start_file_name) # # Define command line @@ -377,9 +370,7 @@ def create_command_line(self,executable,problem_files): cmd = [executable] if self._timer: cmd.insert(0, self._timer) - return Bunch(cmd=cmd, script=script, - log_file=self._log_file, env=None) - + return Bunch(cmd=cmd, script=script, log_file=self._log_file, env=None) def process_soln_file(self, results): # the only suffixes that we extract from CPLEX are @@ -391,18 +382,21 @@ def process_soln_file(self, results): extract_slacks = False extract_rc = False for suffix in self._suffixes: - flag=False + flag = False if re.match(suffix, "dual"): extract_duals = True - flag=True + flag = True if re.match(suffix, "slack"): extract_slacks = True - flag=True + flag = True if re.match(suffix, "rc"): extract_rc = True - flag=True + flag = True if not flag: - raise RuntimeError("***The GUROBI solver plugin cannot extract solution suffix="+suffix) + raise RuntimeError( + "***The GUROBI solver plugin cannot extract solution suffix=" + + suffix + ) # check for existence of the solution file # not sure why we just return - would think that we @@ -425,7 +419,7 @@ def process_soln_file(self, results): # 2 - solution # 3 - solver - section = 0 # unknown + section = 0 # unknown solution_seen = False @@ -436,56 +430,69 @@ def process_soln_file(self, results): for line in INPUT: line = line.strip() tokens = [token.strip() for token in line.split(":")] - if (tokens[0] == 'section'): - if (tokens[1] == 'problem'): + if tokens[0] == 'section': + if tokens[1] == 'problem': section = 1 - elif (tokens[1] == 'solution'): + elif tokens[1] == 'solution': section = 2 solution_seen = True - elif (tokens[1] == 'solver'): + elif tokens[1] == 'solver': section = 3 else: - if (section == 2): - if (tokens[0] == 'var'): + if section == 2: + if tokens[0] == 'var': if tokens[1] != "ONE_VAR_CONSTANT": - soln_variables[tokens[1]] = {"Value" : float(tokens[2])} + soln_variables[tokens[1]] = {"Value": float(tokens[2])} num_variables_read += 1 - elif (tokens[0] == 'status'): + elif tokens[0] == 'status': soln.status = getattr(SolutionStatus, tokens[1]) - elif (tokens[0] == 'gap'): + elif tokens[0] == 'gap': soln.gap = float(tokens[1]) - elif (tokens[0] == 'objective'): + elif tokens[0] == 'objective': if tokens[1].strip() != 'None': - soln.objective['__default_objective__'] = \ - {'Value': float(tokens[1])} + soln.objective['__default_objective__'] = { + 'Value': float(tokens[1]) + } if results.problem.sense == ProblemSense.minimize: results.problem.upper_bound = float(tokens[1]) else: results.problem.lower_bound = float(tokens[1]) - elif (tokens[0] == 'constraintdual'): + elif tokens[0] == 'constraintdual': name = tokens[1] if name != "c_e_ONE_VAR_CONSTANT": if name.startswith('c_'): - soln_constraints.setdefault(tokens[1],{})["Dual"] = float(tokens[2]) + soln_constraints.setdefault(tokens[1], {})[ + "Dual" + ] = float(tokens[2]) elif name.startswith('r_l_'): - range_duals.setdefault(name[4:],[0,0])[0] = float(tokens[2]) + range_duals.setdefault(name[4:], [0, 0])[0] = float( + tokens[2] + ) elif name.startswith('r_u_'): - range_duals.setdefault(name[4:],[0,0])[1] = float(tokens[2]) - elif (tokens[0] == 'constraintslack'): + range_duals.setdefault(name[4:], [0, 0])[1] = float( + tokens[2] + ) + elif tokens[0] == 'constraintslack': name = tokens[1] if name != "c_e_ONE_VAR_CONSTANT": if name.startswith('c_'): - soln_constraints.setdefault(tokens[1],{})["Slack"] = float(tokens[2]) + soln_constraints.setdefault(tokens[1], {})[ + "Slack" + ] = float(tokens[2]) elif name.startswith('r_l_'): - range_slacks.setdefault(name[4:],[0,0])[0] = float(tokens[2]) + range_slacks.setdefault(name[4:], [0, 0])[0] = float( + tokens[2] + ) elif name.startswith('r_u_'): - range_slacks.setdefault(name[4:],[0,0])[1] = float(tokens[2]) - elif (tokens[0] == 'varrc'): + range_slacks.setdefault(name[4:], [0, 0])[1] = float( + tokens[2] + ) + elif tokens[0] == 'varrc': if tokens[1] != "ONE_VAR_CONSTANT": soln_variables[tokens[1]]["Rc"] = float(tokens[2]) else: setattr(soln, tokens[0], tokens[1]) - elif (section == 1): + elif section == 1: if tokens[0] == 'sense': if tokens[1] == 'minimize': results.problem.sense = ProblemSense.minimize @@ -497,14 +504,18 @@ def process_soln_file(self, results): except: val = tokens[1] setattr(results.problem, tokens[0], val) - elif (section == 3): - if (tokens[0] == 'status'): + elif section == 3: + if tokens[0] == 'status': results.solver.status = getattr(SolverStatus, tokens[1]) - elif (tokens[0] == 'termination_condition'): + elif tokens[0] == 'termination_condition': try: - results.solver.termination_condition = getattr(TerminationCondition, tokens[1]) + results.solver.termination_condition = getattr( + TerminationCondition, tokens[1] + ) except AttributeError: - results.solver.termination_condition = TerminationCondition.unknown + results.solver.termination_condition = ( + TerminationCondition.unknown + ) else: setattr(results.solver, tokens[0], tokens[1]) @@ -512,25 +523,24 @@ def process_soln_file(self, results): # For the range constraints, supply only the dual with the largest # magnitude (at least one should always be numerically zero) - for key,(ld,ud) in range_duals.items(): + for key, (ld, ud) in range_duals.items(): if abs(ld) > abs(ud): - soln_constraints['r_l_'+key] = {"Dual" : ld} + soln_constraints['r_l_' + key] = {"Dual": ld} else: # Use the same key - soln_constraints['r_l_'+key] = {"Dual" : ud} + soln_constraints['r_l_' + key] = {"Dual": ud} # slacks - for key,(ls,us) in range_slacks.items(): + for key, (ls, us) in range_slacks.items(): if abs(ls) > abs(us): - soln_constraints.setdefault('r_l_'+key,{})["Slack"] = ls + soln_constraints.setdefault('r_l_' + key, {})["Slack"] = ls else: # Use the same key - soln_constraints.setdefault('r_l_'+key,{})["Slack"] = us + soln_constraints.setdefault('r_l_' + key, {})["Slack"] = us if solution_seen: results.solution.insert(soln) def _postsolve(self): - # take care of the annoying GUROBI log file in the current # directory. this approach doesn't seem overly efficient, but # python os module functions doesn't accept regular expression diff --git a/pyomo/solvers/plugins/solvers/GUROBI_RUN.py b/pyomo/solvers/plugins/solvers/GUROBI_RUN.py index 2d19d61ead7..2b505adf49c 100644 --- a/pyomo/solvers/plugins/solvers/GUROBI_RUN.py +++ b/pyomo/solvers/plugins/solvers/GUROBI_RUN.py @@ -19,6 +19,7 @@ """ from gurobipy import gurobi, read, GRB import sys + if sys.version_info[0] < 3: from itertools import izip as zip @@ -30,6 +31,7 @@ # rather, print an error message and return - the caller will know to look # in the logs in case of a failure. + def _is_numeric(x): try: float(x) @@ -37,25 +39,27 @@ def _is_numeric(x): return False return True -def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes): +def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes): # figure out what suffixes we need to extract. extract_duals = False extract_slacks = False extract_reduced_costs = False for suffix in suffixes: - flag=False - if re.match(suffix,"dual"): + flag = False + if re.match(suffix, "dual"): extract_duals = True - flag=True - if re.match(suffix,"slack"): + flag = True + if re.match(suffix, "slack"): extract_slacks = True - flag=True - if re.match(suffix,"rc"): + flag = True + if re.match(suffix, "rc"): extract_reduced_costs = True - flag=True + flag = True if not flag: - print("***The GUROBI solver plugin cannot extract solution suffix="+suffix) + print( + "***The GUROBI solver plugin cannot extract solution suffix=" + suffix + ) return # Load the lp model @@ -69,10 +73,12 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) # printing the module will crash (when we have a QCP) if GUROBI_VERSION[0] >= 5: if (extract_reduced_costs is True) or (extract_duals is True): - model.setParam(GRB.Param.QCPDual,1) + model.setParam(GRB.Param.QCPDual, 1) if model is None: - print("***The GUROBI solver plugin failed to load the input LP file="+soln_file) + print( + "***The GUROBI solver plugin failed to load the input LP file=" + soln_file + ) return if warmstart_file is not None: @@ -104,7 +110,6 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) raise model.setParam(key, float(value)) - if 'relax_integrality' in options: for v in model.getVars(): if v.vType != GRB.CONTINUOUS: @@ -122,85 +127,94 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) solver_status = model.getAttr(GRB.Attr.Status) solution_status = None return_code = 0 - if (solver_status == GRB.LOADED): + if solver_status == GRB.LOADED: status = 'aborted' - message = 'Model is loaded, but no solution information is availale.' + message = 'Model is loaded, but no solution information is available.' term_cond = 'error' solution_status = 'unknown' - elif (solver_status == GRB.OPTIMAL): + elif solver_status == GRB.OPTIMAL: status = 'ok' message = 'Model was solved to optimality (subject to tolerances), and an optimal solution is available.' term_cond = 'optimal' solution_status = 'optimal' - elif (solver_status == GRB.INFEASIBLE): + elif solver_status == GRB.INFEASIBLE: status = 'warning' message = 'Model was proven to be infeasible.' term_cond = 'infeasible' solution_status = 'infeasible' - elif (solver_status == GRB.INF_OR_UNBD): + elif solver_status == GRB.INF_OR_UNBD: status = 'warning' message = 'Problem proven to be infeasible or unbounded.' term_cond = 'infeasibleOrUnbounded' solution_status = 'unsure' - elif (solver_status == GRB.UNBOUNDED): + elif solver_status == GRB.UNBOUNDED: status = 'warning' message = 'Model was proven to be unbounded.' term_cond = 'unbounded' solution_status = 'unbounded' - elif (solver_status == GRB.CUTOFF): + elif solver_status == GRB.CUTOFF: status = 'aborted' message = 'Optimal objective for model was proven to be worse than the value specified in the Cutoff parameter. No solution information is available.' term_cond = 'minFunctionValue' solution_status = 'unknown' - elif (solver_status == GRB.ITERATION_LIMIT): + elif solver_status == GRB.ITERATION_LIMIT: status = 'aborted' message = 'Optimization terminated because the total number of simplex iterations performed exceeded the value specified in the IterationLimit parameter.' term_cond = 'maxIterations' solution_status = 'stoppedByLimit' - elif (solver_status == GRB.NODE_LIMIT): + elif solver_status == GRB.NODE_LIMIT: status = 'aborted' message = 'Optimization terminated because the total number of branch-and-cut nodes explored exceeded the value specified in the NodeLimit parameter.' term_cond = 'maxEvaluations' solution_status = 'stoppedByLimit' - elif (solver_status == GRB.TIME_LIMIT): + elif solver_status == GRB.TIME_LIMIT: status = 'aborted' message = 'Optimization terminated because the time expended exceeded the value specified in the TimeLimit parameter.' term_cond = 'maxTimeLimit' solution_status = 'stoppedByLimit' - elif (solver_status == GRB.SOLUTION_LIMIT): + elif hasattr(GRB, "WORK_LIMIT") and (solver_status == GRB.WORK_LIMIT): + status = 'aborted' + message = 'Optimization terminated because the work expended exceeded the value specified in the WorkLimit parameter.' + term_cond = 'maxTimeLimit' + solution_status = 'stoppedByLimit' + elif solver_status == GRB.SOLUTION_LIMIT: status = 'aborted' message = 'Optimization terminated because the number of solutions found reached the value specified in the SolutionLimit parameter.' term_cond = 'stoppedByLimit' solution_status = 'stoppedByLimit' - elif (solver_status == GRB.INTERRUPTED): + elif solver_status == GRB.INTERRUPTED: status = 'aborted' message = 'Optimization was terminated by the user.' term_cond = 'error' solution_status = 'error' - elif (solver_status == GRB.NUMERIC): + elif solver_status == GRB.NUMERIC: status = 'error' - message = 'Optimization was terminated due to unrecoverable numerical difficulties.' + message = ( + 'Optimization was terminated due to unrecoverable numerical difficulties.' + ) term_cond = 'error' solution_status = 'error' - elif (solver_status == GRB.SUBOPTIMAL): + elif solver_status == GRB.SUBOPTIMAL: status = 'warning' message = 'Unable to satisfy optimality tolerances; a sub-optimal solution is available.' term_cond = 'other' solution_status = 'feasible' # note that USER_OBJ_LIMIT was added in Gurobi 7.0, so it may not be present - elif (solver_status is not None) and \ - (solver_status == getattr(GRB,'USER_OBJ_LIMIT',None)): + elif (solver_status is not None) and ( + solver_status == getattr(GRB, 'USER_OBJ_LIMIT', None) + ): status = 'aborted' - message = "User specified an objective limit " \ - "(a bound on either the best objective " \ - "or the best bound), and that limit has " \ - "been reached. Solution is available." + message = ( + "User specified an objective limit " + "(a bound on either the best objective " + "or the best bound), and that limit has " + "been reached. Solution is available." + ) term_cond = 'other' solution_status = 'stoppedByLimit' else: status = 'error' - message = ("Unhandled Gurobi solve status " - "("+str(solver_status)+")") + message = "Unhandled Gurobi solve status (" + str(solver_status) + ")" term_cond = 'error' solution_status = 'error' assert solution_status is not None @@ -211,14 +225,14 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) except: obj_value = None if term_cond == "unbounded": - if (sense < 0): + if sense < 0: # maximize obj_value = float('inf') else: # minimize obj_value = float('-inf') elif term_cond == "infeasible": - if (sense < 0): + if sense < 0: # maximize obj_value = float('-inf') else: @@ -231,7 +245,7 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) # write the information required by results.problem solnfile.write("section:problem\n") name = model.getAttr(GRB.Attr.ModelName) - solnfile.write("name: "+name+'\n') + solnfile.write("name: " + name + '\n') # TODO: find out about bounds and fix this with error checking # this line fails for some reason so set the value to unknown @@ -243,7 +257,7 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) else: bound = None - if (sense < 0): + if sense < 0: solnfile.write("sense:maximize\n") if bound is None: solnfile.write("upper_bound: %f\n" % float('inf')) @@ -264,7 +278,9 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) qcons = [] if GUROBI_VERSION[0] >= 5: qcons = model.getQConstrs() - solnfile.write("number_of_constraints: %d\n" % (len(cons)+len(qcons)+model.NumSOS,)) + solnfile.write( + "number_of_constraints: %d\n" % (len(cons) + len(qcons) + model.NumSOS,) + ) vars = model.getVars() solnfile.write("number_of_variables: %d\n" % len(vars)) @@ -275,7 +291,7 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) n_intvars = model.getAttr(GRB.Attr.NumIntVars) solnfile.write("number_of_integer_variables: %d\n" % n_intvars) - solnfile.write("number_of_continuous_variables: %d\n" % (len(vars)-n_intvars,)) + solnfile.write("number_of_continuous_variables: %d\n" % (len(vars) - n_intvars,)) solnfile.write("number_of_nonzeros: %d\n" % model.getAttr(GRB.Attr.NumNZs)) @@ -290,7 +306,7 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) solnfile.write('termination_message: %s\n' % message) is_discrete = False - if (model.getAttr(GRB.Attr.IsMIP)): + if model.getAttr(GRB.Attr.IsMIP): is_discrete = True if (term_cond == 'optimal') or (model.getAttr(GRB.Attr.SolCount) >= 1): @@ -318,7 +334,7 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) if (is_discrete is False) and (extract_duals is True): vals = model.getAttr("Pi", cons) for val, name in zip(vals, con_names): - # Pi attributes in Gurobi are the constraint duals + # Pi attributes in Gurobi are the constraint duals solnfile.write("constraintdual: %s : %s\n" % (str(name), str(val))) if GUROBI_VERSION[0] >= 5: vals = model.getAttr("QCPi", qcons) @@ -326,7 +342,7 @@ def gurobi_run(model_file, warmstart_file, soln_file, mipgap, options, suffixes) # QCPI attributes in Gurobi are the constraint duals solnfile.write("constraintdual: %s : %s\n" % (str(name), str(val))) - if (extract_slacks is True): + if extract_slacks is True: vals = model.getAttr("Slack", cons) for val, name in zip(vals, con_names): solnfile.write("constraintslack: %s : %s\n" % (str(name), str(val))) diff --git a/pyomo/solvers/plugins/solvers/IPOPT.py b/pyomo/solvers/plugins/solvers/IPOPT.py index 0ff58e1d46c..611180113c8 100644 --- a/pyomo/solvers/plugins/solvers/IPOPT.py +++ b/pyomo/solvers/plugins/solvers/IPOPT.py @@ -19,9 +19,10 @@ from pyomo.opt.base import ProblemFormat, ResultsFormat from pyomo.opt.base.solvers import _extract_version, SolverFactory from pyomo.opt.results import SolverStatus, SolverResults, TerminationCondition -from pyomo.opt.solver import SystemCallSolver +from pyomo.opt.solver import SystemCallSolver import logging + logger = logging.getLogger('pyomo.solvers') @@ -41,7 +42,7 @@ def __init__(self, **kwds): # Setup valid problem formats, and valid results for each problem format # Also set the default problem and results formats. # - self._valid_problem_formats=[ProblemFormat.nl] + self._valid_problem_formats = [ProblemFormat.nl] self._valid_result_formats = {} self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol] self.set_problem_format(ProblemFormat.nl) @@ -61,8 +62,10 @@ def _default_results_format(self, prob_format): def _default_executable(self): executable = Executable("ipopt") if not executable: - logger.warning("Could not locate the 'ipopt' executable, " - "which is required for solver %s" % self.name) + logger.warning( + "Could not locate the 'ipopt' executable, " + "which is required for solver %s" % self.name + ) self.enable = False return None return executable.path() @@ -74,23 +77,24 @@ def _get_version(self): solver_exec = self.executable() if solver_exec is None: return _extract_version('') - results = subprocess.run( [solver_exec,"-v"], timeout=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [solver_exec, "-v"], + timeout=1, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) return _extract_version(results.stdout) def create_command_line(self, executable, problem_files): - - assert(self._problem_format == ProblemFormat.nl) - assert(self._results_format == ResultsFormat.sol) + assert self._problem_format == ProblemFormat.nl + assert self._results_format == ResultsFormat.sol # # Define log file # if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix="_ipopt.log") + self._log_file = TempfileManager.create_tempfile(suffix="_ipopt.log") fname = problem_files[0] if '.' in fname: @@ -99,7 +103,7 @@ def create_command_line(self, executable, problem_files): fname = '.'.join(tmp[:-1]) else: fname = tmp[0] - self._soln_file = fname+".sol" + self._soln_file = fname + ".sol" # # Define results file (since an external parser is used) @@ -109,7 +113,7 @@ def create_command_line(self, executable, problem_files): # # Define command line # - env=os.environ.copy() + env = os.environ.copy() # # Merge the PYOMO_AMPLFUNC (externals defined within # Pyomo/Pyomo) with any user-specified external function @@ -138,11 +142,11 @@ def create_command_line(self, executable, problem_files): if key == "option_file_name": ofn_option_used = True if isinstance(self.options[key], str) and ' ' in self.options[key]: - env_opt.append(key+"=\""+str(self.options[key])+"\"") - cmd.append(str(key)+"="+str(self.options[key])) + env_opt.append(key + "=\"" + str(self.options[key]) + "\"") + cmd.append(str(key) + "=" + str(self.options[key])) else: - env_opt.append(key+"="+str(self.options[key])) - cmd.append(str(key)+"="+str(self.options[key])) + env_opt.append(key + "=" + str(self.options[key])) + cmd.append(str(key) + "=" + str(self.options[key])) if len(of_opt) > 0: # If the 'option_file_name' command-line option @@ -155,32 +159,33 @@ def create_command_line(self, executable, problem_files): "option for Ipopt can not be used " "when specifying options for the " "options file (i.e., options that " - "start with 'OF_'") + "start with 'OF_'" + ) # Now check if an 'ipopt.opt' file exists in the # current working directory. If so, we need to # make it clear that this file will be ignored. default_of_name = os.path.join(os.getcwd(), 'ipopt.opt') if os.path.exists(default_of_name): - logger.warning("A file named '%s' exists in " - "the current working directory, but " - "Ipopt options file options (i.e., " - "options that start with 'OF_') were " - "provided. The options file '%s' will " - "be ignored." % (default_of_name, - default_of_name)) + logger.warning( + "A file named '%s' exists in " + "the current working directory, but " + "Ipopt options file options (i.e., " + "options that start with 'OF_') were " + "provided. The options file '%s' will " + "be ignored." % (default_of_name, default_of_name) + ) # Now write the new options file - options_filename = TempfileManager.\ - create_tempfile(suffix="_ipopt.opt") + options_filename = TempfileManager.create_tempfile(suffix="_ipopt.opt") with open(options_filename, "w") as f: for key, val in of_opt: - f.write(key+" "+str(val)+"\n") + f.write(key + " " + str(val) + "\n") # Now set the command-line option telling Ipopt # to use this file - env_opt.append('option_file_name="'+str(options_filename)+'"') - cmd.append('option_file_name='+str(options_filename)) + env_opt.append('option_file_name="' + str(options_filename) + '"') + cmd.append('option_file_name=' + str(options_filename)) envstr = "%s_options" % self.options.solver # Merge with any options coming in through the environment diff --git a/pyomo/solvers/plugins/solvers/SCIPAMPL.py b/pyomo/solvers/plugins/solvers/SCIPAMPL.py index 0de81311048..69a24455706 100644 --- a/pyomo/solvers/plugins/solvers/SCIPAMPL.py +++ b/pyomo/solvers/plugins/solvers/SCIPAMPL.py @@ -10,7 +10,8 @@ # ___________________________________________________________________________ import os -import os.path + +# import os.path import subprocess from pyomo.common import Executable @@ -19,17 +20,22 @@ from pyomo.opt.base import ProblemFormat, ResultsFormat from pyomo.opt.base.solvers import _extract_version, SolverFactory -from pyomo.opt.results import SolverStatus, TerminationCondition, SolutionStatus +from pyomo.opt.results import ( + SolverStatus, + TerminationCondition, + SolutionStatus, + ProblemSense, +) from pyomo.opt.solver import SystemCallSolver import logging + logger = logging.getLogger('pyomo.solvers') @SolverFactory.register('scip', doc='The SCIP LP/MIP solver') class SCIPAMPL(SystemCallSolver): - """A generic optimizer that uses the AMPL Solver Library to interface with applications. - """ + """A generic optimizer that uses the AMPL Solver Library to interface with applications.""" # Cache default executable, so we do not need to repeatedly query the # versions every time. @@ -45,7 +51,7 @@ def __init__(self, **kwds): # Setup valid problem formats, and valid results for each problem format # Also set the default problem and results formats. # - self._valid_problem_formats=[ProblemFormat.nl] + self._valid_problem_formats = [ProblemFormat.nl] self._valid_result_formats = {} self._valid_result_formats[ProblemFormat.nl] = [ResultsFormat.sol] self.set_problem_format(ProblemFormat.nl) @@ -63,13 +69,14 @@ def _default_results_format(self, prob_format): return ResultsFormat.sol def _default_executable(self): - executable = Executable("scip") if executable: executable_path = executable.path() if executable_path not in self._known_versions: - self._known_versions[executable_path] = self._get_version(executable_path) + self._known_versions[executable_path] = self._get_version( + executable_path + ) _ver = self._known_versions[executable_path] if _ver and _ver >= (8,): return executable_path @@ -77,9 +84,11 @@ def _default_executable(self): # revert to scipampl for older versions executable = Executable("scipampl") if not executable: - logger.warning("Could not locate the 'scipampl' executable or" - " the 'scip' executable since 8.0.0, which is " - "required for solver %s" % self.name) + logger.warning( + "Could not locate the 'scip' executable or" + " the older 'scipampl' executable, which is " + "required for solver %s" % self.name + ) self.enable = False return None return executable.path() @@ -92,23 +101,24 @@ def _get_version(self, solver_exec=None): solver_exec = self.executable() if solver_exec is None: return _extract_version('') - results = subprocess.run([solver_exec, "--version"], timeout=1, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + results = subprocess.run( + [solver_exec, "--version"], + timeout=1, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) return _extract_version(results.stdout) def create_command_line(self, executable, problem_files): - - assert(self._problem_format == ProblemFormat.nl) - assert(self._results_format == ResultsFormat.sol) + assert self._problem_format == ProblemFormat.nl + assert self._results_format == ResultsFormat.sol # # Define log file # if self._log_file is None: - self._log_file = TempfileManager.\ - create_tempfile(suffix="_scipampl.log") + self._log_file = TempfileManager.create_tempfile(suffix="_scip.log") fname = problem_files[0] if '.' in fname: @@ -117,7 +127,7 @@ def create_command_line(self, executable, problem_files): fname = '.'.join(tmp[:-1]) else: fname = tmp[0] - self._soln_file = fname+".sol" + self._soln_file = fname + ".sol" # # Define results file (since an external parser is used) @@ -127,7 +137,7 @@ def create_command_line(self, executable, problem_files): # # Define command line # - env=os.environ.copy() + env = os.environ.copy() # # Merge the PYOMO_AMPLFUNC (externals defined within # Pyomo/Pyomo) with any user-specified external function @@ -157,18 +167,22 @@ def create_command_line(self, executable, problem_files): # to the command line. I'm not sure what solvers this method of passing options # through the envstr variable works for, but it does not seem to work for cplex # or gurobi - env_opt=[] + env_opt = [] of_opt = [] for key in self.options: if key == 'solver': continue if isinstance(self.options[key], str) and ' ' in self.options[key]: - env_opt.append(key+"=\""+str(self.options[key])+"\"") + env_opt.append(key + "=\"" + str(self.options[key]) + "\"") else: - env_opt.append(key+"="+str(self.options[key])) - of_opt.append(str(key)+" = "+str(self.options[key])) + env_opt.append(key + "=" + str(self.options[key])) + of_opt.append(str(key) + " = " + str(self.options[key])) - if self._timelimit is not None and self._timelimit > 0.0 and 'limits/time' not in self.options: + if ( + self._timelimit is not None + and self._timelimit > 0.0 + and 'limits/time' not in self.options + ): of_opt.append("limits/time = " + str(self._timelimit)) envstr = "%s_options" % self.options.solver @@ -181,146 +195,293 @@ def create_command_line(self, executable, problem_files): # make it clear that this file will be ignored. default_of_name = os.path.join(os.getcwd(), 'scip.set') if os.path.exists(default_of_name): - logger.warning("A file named '%s' exists in " - "the current working directory, but " - "SCIP options are being set using a " - "separate options file. The options " - "file '%s' will be ignored." - % (default_of_name, default_of_name)) + logger.warning( + "A file named '%s' exists in " + "the current working directory, but " + "SCIP options are being set using a " + "separate options file. The options " + "file '%s' will be ignored." % (default_of_name, default_of_name) + ) options_dir = TempfileManager.create_tempdir() # Now write the new options file with open(os.path.join(options_dir, 'scip.set'), 'w') as f: for line in of_opt: - f.write(line+"\n") + f.write(line + "\n") else: options_dir = None return Bunch(cmd=cmd, log_file=self._log_file, env=env, cwd=options_dir) def _postsolve(self): - results = super(SCIPAMPL, self)._postsolve() + # find SCIP version (calling version() and _get_version() mess things) + + executable = self._command.cmd[0] + + version = self._known_versions[executable] + + if version < (8, 0, 0, 0): + # it may be possible to get results from older version but this was + # not tested, so the old way of doing things is here preserved + + results = super(SCIPAMPL, self)._postsolve() + + else: + # repeat code from super(SCIPAMPL, self)._postsolve() + # in order to access the log file and get the results from there + + if self._log_file is not None: + OUTPUT = open(self._log_file, "w") + OUTPUT.write("Solver command line: " + str(self._command.cmd) + '\n') + OUTPUT.write("\n") + OUTPUT.write(self._log + '\n') + OUTPUT.close() + + # JPW: The cleanup of the problem file probably shouldn't be here, but + # rather in the base OptSolver class. That would require movement of + # the keepfiles attribute and associated cleanup logic to the base + # class, which I didn't feel like doing at this present time. the + # base class remove_files method should clean up the problem file. + + if (self._log_file is not None) and (not os.path.exists(self._log_file)): + msg = "File '%s' not generated while executing %s" + raise IOError(msg % (self._log_file, self.path)) + results = None + + if self._results_format is not None: + results = self.process_output(self._rc) + + # read results from the log file + + log_dict = self.read_scip_log(self._log_file) + + if len(log_dict) != 0: + # if any were read, store them + + results.solver.time = log_dict['solving_time'] + results.solver.gap = log_dict['gap'] + results.solver.primal_bound = log_dict['primal_bound'] + results.solver.dual_bound = log_dict['dual_bound'] + + # TODO: get scip to produce a statistics file and read it + # Why? It has all the information one can possibly need. + # + # If keepfiles is true, then we pop the + # TempfileManager context while telling it to + # _not_ remove the files. + # + if not self._keepfiles: + # in some cases, the solution filename is + # not generated via the temp-file mechanism, + # instead being automatically derived from + # the input lp/nl filename. so, we may have + # to clean it up manually. + if (not self._soln_file is None) and os.path.exists( + self._soln_file + ): + os.remove(self._soln_file) + + TempfileManager.pop(remove=not self._keepfiles) + + # ********************************************************************** + # ********************************************************************** + + # UNKNOWN # unknown='unknown' # An uninitialized value + if results.solver.message == "unknown": - results.solver.status = \ - SolverStatus.unknown - results.solver.termination_condition = \ - TerminationCondition.unknown + results.solver.status = SolverStatus.unknown + results.solver.termination_condition = TerminationCondition.unknown if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.unknown + results.solution(0).status = SolutionStatus.unknown + + # ABORTED # userInterrupt='userInterrupt' # Interrupt signal generated by user + elif results.solver.message == "user interrupt": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.userInterrupt + results.solver.status = SolverStatus.aborted + results.solver.termination_condition = TerminationCondition.userInterrupt if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.unknown + results.solution(0).status = SolutionStatus.unknown + + # OK # maxEvaluations='maxEvaluations' # Exceeded maximum number of problem evaluations + elif results.solver.message == "node limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.maxEvaluations + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.maxEvaluations if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # maxEvaluations='maxEvaluations' # Exceeded maximum number of problem evaluations + elif results.solver.message == "total node limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.maxEvaluations + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.maxEvaluations if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # maxEvaluations='maxEvaluations' # Exceeded maximum number of problem evaluations + elif results.solver.message == "stall node limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.maxEvaluations + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.maxEvaluations if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # maxTimeLimit='maxTimeLimit' # Exceeded maximum time limited allowed by user but having return a feasible solution + elif results.solver.message == "time limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.maxTimeLimit + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.maxTimeLimit if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # other='other' # Other, uncategorized normal termination + elif results.solver.message == "memory limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.other + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.other if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # other='other' # Other, uncategorized normal termination + elif results.solver.message == "gap limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.other + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.other if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # other='other' # Other, uncategorized normal termination + elif results.solver.message == "solution limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.other + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.other if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # other='other' # Other, uncategorized normal termination + elif results.solver.message == "solution improvement limit reached": - results.solver.status = \ - SolverStatus.aborted - results.solver.termination_condition = \ - TerminationCondition.other + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.other if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.stoppedByLimit + results.solution(0).status = SolutionStatus.stoppedByLimit + + # OK # optimal='optimal' # Found an optimal solution + elif results.solver.message == "optimal solution found": - results.solver.status = \ - SolverStatus.ok - results.solver.termination_condition = \ - TerminationCondition.optimal + results.solver.status = SolverStatus.ok + results.solver.termination_condition = TerminationCondition.optimal if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.optimal + results.solution(0).status = SolutionStatus.optimal + if results.problem.sense == ProblemSense.minimize: + results.problem.lower_bound = results.solver.primal_bound + else: + results.problem.upper_bound = results.solver.primal_bound + + # WARNING # infeasible='infeasible' # Demonstrated that the problem is infeasible + elif results.solver.message == "infeasible": - results.solver.status = \ - SolverStatus.warning - results.solver.termination_condition = \ - TerminationCondition.infeasible + results.solver.status = SolverStatus.warning + results.solver.termination_condition = TerminationCondition.infeasible if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.infeasible + results.solution(0).status = SolutionStatus.infeasible + + # WARNING # unbounded='unbounded' # Demonstrated that problem is unbounded + elif results.solver.message == "unbounded": - results.solver.status = \ - SolverStatus.warning - results.solver.termination_condition = \ - TerminationCondition.unbounded + results.solver.status = SolverStatus.warning + results.solver.termination_condition = TerminationCondition.unbounded if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.unbounded + results.solution(0).status = SolutionStatus.unbounded + + # WARNING # infeasibleOrUnbounded='infeasibleOrUnbounded' # Problem is either infeasible or unbounded + elif results.solver.message == "infeasible or unbounded": - results.solver.status = \ - SolverStatus.warning - results.solver.termination_condition = \ + results.solver.status = SolverStatus.warning + results.solver.termination_condition = ( TerminationCondition.infeasibleOrUnbounded + ) if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.unsure + results.solution(0).status = SolutionStatus.unsure + + # UNKNOWN # unknown='unknown' # An uninitialized value + else: - logger.warning("Unexpected SCIP solver message: %s" - % (results.solver.message)) - results.solver.status = \ - SolverStatus.unknown - results.solver.termination_condition = \ - TerminationCondition.unknown + logger.warning( + "Unexpected SCIP solver message: %s" % (results.solver.message) + ) + results.solver.status = SolverStatus.unknown + results.solver.termination_condition = TerminationCondition.unknown if len(results.solution) > 0: - results.solution(0).status = \ - SolutionStatus.unknown + results.solution(0).status = SolutionStatus.unknown return results + + @staticmethod + def read_scip_log(filename: str): + # TODO: check file exists, ensure opt has finished, etc + + from collections import deque + + with open(filename) as f: + scip_lines = list(deque(f, 7)) + scip_lines.pop() + + expected_labels = [ + 'SCIP Status :', + 'Solving Time (sec) :', + 'Solving Nodes :', + 'Primal Bound :', + 'Dual Bound :', + 'Gap :', + ] + + colon_position = 19 # or scip_lines[0].index(':') + + for i, log_file_line in enumerate(scip_lines): + if expected_labels[i] != log_file_line[0 : colon_position + 1]: + return {} + + # get data + + solver_status = scip_lines[0][colon_position + 2 : scip_lines[0].index('\n')] + + solving_time = float( + scip_lines[1][colon_position + 2 : scip_lines[1].index('\n')] + ) + + try: + solving_nodes = int( + scip_lines[2][colon_position + 2 : scip_lines[2].index('(')] + ) + except ValueError: + solving_nodes = int( + scip_lines[2][colon_position + 2 : scip_lines[2].index('\n')] + ) + + primal_bound = float( + scip_lines[3][colon_position + 2 : scip_lines[3].index('(')] + ) + + dual_bound = float( + scip_lines[4][colon_position + 2 : scip_lines[4].index('\n')] + ) + + try: + gap = float(scip_lines[5][colon_position + 2 : scip_lines[5].index('%')]) + except ValueError: + gap = scip_lines[5][colon_position + 2 : scip_lines[5].index('\n')] + + if gap == 'infinite': + gap = float('inf') + + out_dict = { + 'solver_status': solver_status, + 'solving_time': solving_time, + 'solving_nodes': solving_nodes, + 'primal_bound': primal_bound, + 'dual_bound': dual_bound, + 'gap': gap, + } + + return out_dict diff --git a/pyomo/solvers/plugins/solvers/XPRESS.py b/pyomo/solvers/plugins/solvers/XPRESS.py index 70ef36ee07a..6ab51cfbbf3 100644 --- a/pyomo/solvers/plugins/solvers/XPRESS.py +++ b/pyomo/solvers/plugins/solvers/XPRESS.py @@ -8,8 +8,7 @@ @SolverFactory.register('xpress', doc='The XPRESS LP/MIP solver') class XPRESS(OptSolver): - """The XPRESS LP/MIP solver - """ + """The XPRESS LP/MIP solver""" def __new__(cls, *args, **kwds): mode = kwds.pop('solver_io', 'python') @@ -17,8 +16,10 @@ def __new__(cls, *args, **kwds): mode = 'python' if mode not in {'python', 'direct', 'persistent'}: - logger.error('Pyomo currently only supports a Python interface to XPRESS. ' - 'Please use one of python, direct, or persistent for solver_io.') + logger.error( + 'Pyomo currently only supports a Python interface to XPRESS. ' + 'Please use one of python, direct, or persistent for solver_io.' + ) return if mode in ['python', 'direct']: opt = SolverFactory('xpress_direct', **kwds) diff --git a/pyomo/solvers/plugins/solvers/cplex_direct.py b/pyomo/solvers/plugins/solvers/cplex_direct.py index f38c56dcfec..3ddb328ebdd 100644 --- a/pyomo/solvers/plugins/solvers/cplex_direct.py +++ b/pyomo/solvers/plugins/solvers/cplex_direct.py @@ -21,7 +21,9 @@ from pyomo.core.staleflag import StaleFlagManager from pyomo.repn import generate_standard_repn from pyomo.solvers.plugins.solvers.direct_solver import DirectSolver -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) from pyomo.core.kernel.objective import minimize, maximize from pyomo.opt.results.results_ import SolverResults from pyomo.opt.results.solution import Solution, SolutionStatus @@ -111,7 +113,6 @@ def store_in_cplex(self): @SolverFactory.register('cplex_direct', doc='Direct python interface to CPLEX') class CPLEXDirect(DirectSolver): - def __init__(self, **kwds): kwds['type'] = 'cplexdirect' DirectSolver.__init__(self, **kwds) @@ -123,10 +124,12 @@ def __init__(self, **kwds): def _init(self): try: import cplex + self._cplex = cplex self._python_api_exists = True self._version = tuple( - int(k) for k in self._cplex.Cplex().get_version().split('.')) + int(k) for k in self._cplex.Cplex().get_version().split('.') + ) while len(self._version) < 4: self._version += (0,) self._version = tuple(int(i) for i in self._version[:4]) @@ -148,7 +151,7 @@ def _init(self): self._max_constraint_degree = 2 self._max_obj_degree = 2 - # Note: Undefined capabilites default to None + # Note: Undefined capabilities default to None self._capabilities.linear = True self._capabilities.quadratic_objective = True self._capabilities.quadratic_constraint = True @@ -166,68 +169,86 @@ def _apply_solver(self): # log file. Passing in an opened file object is supported at # least as far back as CPLEX 12.5.1 [the oldest version # supported by IBM as of 1 Oct 2020] - if self.version() >= (12, 5, 1) \ - and isinstance(self._log_file, str): + if self.version() >= (12, 5, 1) and isinstance(self._log_file, str): _log_file = (open(self._log_file, 'a'),) _close_log_file = True else: _log_file = (self._log_file,) _close_log_file = False if self._tee: + def _process_stream(arg): sys.stdout.write(arg) return arg + _log_file += (_process_stream,) try: self._solver_model.set_results_stream(*_log_file) if self._keepfiles: - print("Solver log file: "+self._log_file) - - obj_degree = self._objective.expr.polynomial_degree() + print("Solver log file: " + self._log_file) + + obj_degree = self._objective.polynomial_degree() if obj_degree is None or obj_degree > 2: - raise DegreeError('CPLEXDirect does not support expressions of degree {0}.'\ - .format(obj_degree)) + raise DegreeError( + 'CPLEXDirect does not support expressions of degree {0}.'.format( + obj_degree + ) + ) elif obj_degree == 2: quadratic_objective = True else: quadratic_objective = False - + num_integer_vars = self._solver_model.variables.get_num_integer() num_binary_vars = self._solver_model.variables.get_num_binary() num_sos = self._solver_model.SOS.get_num() - + if self._solver_model.quadratic_constraints.get_num() != 0: quadratic_cons = True else: quadratic_cons = False - + if (num_integer_vars + num_binary_vars + num_sos) > 0: integer = True else: integer = False - + if integer: if quadratic_cons: - self._solver_model.set_problem_type(self._solver_model.problem_type.MIQCP) + self._solver_model.set_problem_type( + self._solver_model.problem_type.MIQCP + ) elif quadratic_objective: - self._solver_model.set_problem_type(self._solver_model.problem_type.MIQP) + self._solver_model.set_problem_type( + self._solver_model.problem_type.MIQP + ) else: - self._solver_model.set_problem_type(self._solver_model.problem_type.MILP) + self._solver_model.set_problem_type( + self._solver_model.problem_type.MILP + ) else: if quadratic_cons: - self._solver_model.set_problem_type(self._solver_model.problem_type.QCP) + self._solver_model.set_problem_type( + self._solver_model.problem_type.QCP + ) elif quadratic_objective: - self._solver_model.set_problem_type(self._solver_model.problem_type.QP) + self._solver_model.set_problem_type( + self._solver_model.problem_type.QP + ) else: - self._solver_model.set_problem_type(self._solver_model.problem_type.LP) + self._solver_model.set_problem_type( + self._solver_model.problem_type.LP + ) # if the user specifies a 'mipgap' # set cplex's mip.tolerances.mipgap if self.options.mipgap is not None: - self._solver_model.parameters.mip.tolerances.mipgap.set(float(self.options.mipgap)) - + self._solver_model.parameters.mip.tolerances.mipgap.set( + float(self.options.mipgap) + ) + for key, option in self.options.items(): - if key == 'mipgap': # handled above + if key == 'mipgap': # handled above continue opt_cmd = self._solver_model.parameters key_pieces = key.split('_') @@ -250,7 +271,7 @@ def _process_stream(arg): if not _is_numeric(option): raise opt_cmd.set(float(option)) - + t0 = time.time() self._solver_model.solve() t1 = time.time() @@ -302,7 +323,9 @@ def _get_expr_from_pyomo_expr(self, expr, max_degree=2): repn = generate_standard_repn(expr, quadratic=False) try: - cplex_expr, referenced_vars = self._get_expr_from_pyomo_repn(repn, max_degree) + cplex_expr, referenced_vars = self._get_expr_from_pyomo_repn( + repn, max_degree + ) except DegreeError as e: msg = e.args[0] msg += '\nexpr: {0}'.format(expr) @@ -351,10 +374,11 @@ def _set_instance(self, model, kwds={}): self._solver_model = self._cplex.Cplex() except Exception: e = sys.exc_info()[1] - msg = ("Unable to create CPLEX model. " - "Have you installed the Python " - "bindings for CPLEX?\n\n\t"+ - "Error message: {0}".format(e)) + msg = ( + "Unable to create CPLEX model. " + "Have you installed the Python " + "bindings for CPLEX?\n\n\t" + "Error message: {0}".format(e) + ) raise Exception(msg) self._add_block(model) @@ -371,7 +395,8 @@ def _set_instance(self, model, kwds={}): "the IO-option 'output_fixed_variable_bounds=True' " "to suppress this error and fix the variable " "by overwriting its bounds in the CPLEX instance." - % (var.name, self._pyomo_model.name,)) + % (var.name, self._pyomo_model.name) + ) def _add_block(self, block): var_data = _VariableData(self._solver_model) @@ -384,10 +409,7 @@ def _add_block(self, block): lin_con_data = _LinearConstraintData(self._solver_model) for sub_block in block.block_data_objects(descend_into=True, active=True): for con in sub_block.component_data_objects( - ctype=Constraint, - descend_into=False, - active=True, - sort=True, + ctype=Constraint, descend_into=False, active=True, sort=True ): if not con.has_lb() and not con.has_ub(): assert not con.equality @@ -396,18 +418,13 @@ def _add_block(self, block): self._add_constraint(con, lin_con_data) for con in sub_block.component_data_objects( - ctype=SOSConstraint, - descend_into=False, - active=True, - sort=True, + ctype=SOSConstraint, descend_into=False, active=True, sort=True ): self._add_sos_constraint(con) obj_counter = 0 for obj in sub_block.component_data_objects( - ctype=Objective, - descend_into=False, - active=True, + ctype=Objective, descend_into=False, active=True ): obj_counter += 1 if obj_counter > 1: @@ -479,18 +496,22 @@ def _add_constraint(self, con, lin_con_data=None): cplex_lin_con_data.store_in_cplex() else: if sense == 'R': - raise ValueError("The CPLEXDirect interface does not " - "support quadratic range constraints: " - "{0}".format(con)) + raise ValueError( + "The CPLEXDirect interface does not " + "support quadratic range constraints: " + "{0}".format(con) + ) self._solver_model.quadratic_constraints.add( - lin_expr=[cplex_expr.variables, - cplex_expr.coefficients], - quad_expr=[cplex_expr.q_variables1, - cplex_expr.q_variables2, - cplex_expr.q_coefficients], + lin_expr=[cplex_expr.variables, cplex_expr.coefficients], + quad_expr=[ + cplex_expr.q_variables1, + cplex_expr.q_variables2, + cplex_expr.q_coefficients, + ], sense=sense, rhs=rhs, - name=conname) + name=conname, + ) for var in referenced_vars: self._referenced_variables[var] += 1 @@ -509,8 +530,9 @@ def _add_sos_constraint(self, con): elif level == 2: sos_type = self._solver_model.SOS.type.SOS2 else: - raise ValueError("Solver does not support SOS " - "level {0} constraints".format(level)) + raise ValueError( + "Solver does not support SOS level {0} constraints".format(level) + ) cplex_vars = [] weights = [] @@ -530,7 +552,9 @@ def _add_sos_constraint(self, con): self._referenced_variables[v] += 1 weights.append(w) - self._solver_model.SOS.add(type=sos_type, SOS=[cplex_vars, weights], name=conname) + self._solver_model.SOS.add( + type=sos_type, SOS=[cplex_vars, weights], name=conname + ) self._pyomo_con_to_solver_con_map[con] = conname self._solver_con_to_pyomo_con_map[conname] = con @@ -547,7 +571,9 @@ def _cplex_vtype_from_var(self, var): elif var.is_continuous(): vtype = self._solver_model.variables.type.continuous else: - raise ValueError('Variable domain type is not recognized for {0}'.format(var.domain)) + raise ValueError( + 'Variable domain type is not recognized for {0}'.format(var.domain) + ) return vtype def _set_objective(self, obj): @@ -567,7 +593,9 @@ def _set_objective(self, obj): else: raise ValueError('Objective sense is not recognized: {0}'.format(obj.sense)) - cplex_expr, referenced_vars = self._get_expr_from_pyomo_expr(obj.expr, self._max_obj_degree) + cplex_expr, referenced_vars = self._get_expr_from_pyomo_expr( + obj.expr, self._max_obj_degree + ) for i in range(len(cplex_expr.q_coefficients)): cplex_expr.q_coefficients[i] *= 2 @@ -579,7 +607,9 @@ def _set_objective(self, obj): self._solver_model.objective.set_offset(cplex_expr.offset) linear_objective_already_exists = any(self._solver_model.objective.get_linear()) - quadratic_objective_already_exists = self._solver_model.objective.get_num_quadratic_nonzeros() + quadratic_objective_already_exists = ( + self._solver_model.objective.get_num_quadratic_nonzeros() + ) contains_linear_terms = any(cplex_expr.coefficients) contains_quadratic_terms = any(cplex_expr.q_coefficients) @@ -589,7 +619,9 @@ def _set_objective(self, obj): self._solver_model.objective.set_linear([(i, 0.0) for i in range(num_cols)]) if contains_linear_terms: - self._solver_model.objective.set_linear(list(zip(cplex_expr.variables, cplex_expr.coefficients))) + self._solver_model.objective.set_linear( + list(zip(cplex_expr.variables, cplex_expr.coefficients)) + ) if quadratic_objective_already_exists or contains_quadratic_terms: self._solver_model.objective.set_quadratic([0.0] * num_cols) @@ -600,7 +632,7 @@ def _set_objective(self, obj): zip( cplex_expr.q_variables1, cplex_expr.q_variables2, - cplex_expr.q_coefficients + cplex_expr.q_coefficients, ) ) ) @@ -628,14 +660,19 @@ def _postsolve(self): extract_reduced_costs = True flag = True if not flag: - raise RuntimeError("***The cplex_direct solver plugin cannot extract solution suffix="+suffix) + raise RuntimeError( + "***The cplex_direct solver plugin cannot extract solution suffix=" + + suffix + ) cpxprob = self._solver_model status = cpxprob.solution.get_status() - if cpxprob.get_problem_type() in [cpxprob.problem_type.MILP, - cpxprob.problem_type.MIQP, - cpxprob.problem_type.MIQCP]: + if cpxprob.get_problem_type() in [ + cpxprob.problem_type.MILP, + cpxprob.problem_type.MIQP, + cpxprob.problem_type.MIQCP, + ]: if extract_reduced_costs: logger.warning("Cannot get reduced costs for MIP.") if extract_duals: @@ -646,7 +683,7 @@ def _postsolve(self): self.results = SolverResults() soln = Solution() - self.results.solver.name = ("CPLEX {0}".format(cpxprob.get_version())) + self.results.solver.name = "CPLEX {0}".format(cpxprob.get_version()) self.results.solver.wallclock_time = self._wallclock_time if status in [1, 101, 102]: @@ -661,8 +698,9 @@ def _postsolve(self): # Note: status of 4 means infeasible or unbounded # and 119 means MIP infeasible or unbounded self.results.solver.status = SolverStatus.warning - self.results.solver.termination_condition = \ + self.results.solver.termination_condition = ( TerminationCondition.infeasibleOrUnbounded + ) soln.status = SolutionStatus.unsure elif status in [3, 103]: self.results.solver.status = SolverStatus.warning @@ -670,11 +708,15 @@ def _postsolve(self): soln.status = SolutionStatus.infeasible elif status in [10]: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_condition = TerminationCondition.maxIterations + self.results.solver.termination_condition = ( + TerminationCondition.maxIterations + ) soln.status = SolutionStatus.stoppedByLimit elif status in [11, 25, 107, 131]: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_condition = TerminationCondition.maxTimeLimit + self.results.solver.termination_condition = ( + TerminationCondition.maxTimeLimit + ) soln.status = SolutionStatus.stoppedByLimit else: self.results.solver.status = SolverStatus.error @@ -686,50 +728,75 @@ def _postsolve(self): elif cpxprob.objective.get_sense() == cpxprob.objective.sense.maximize: self.results.problem.sense = maximize else: - raise RuntimeError('Unrecognized cplex objective sense: {0}'.\ - format(cpxprob.objective.get_sense())) + raise RuntimeError( + 'Unrecognized cplex objective sense: {0}'.format( + cpxprob.objective.get_sense() + ) + ) self.results.problem.upper_bound = None self.results.problem.lower_bound = None if cpxprob.solution.get_solution_type() != cpxprob.solution.type.none: - if (cpxprob.variables.get_num_binary() + cpxprob.variables.get_num_integer()) == 0: - self.results.problem.upper_bound = cpxprob.solution.get_objective_value() - self.results.problem.lower_bound = cpxprob.solution.get_objective_value() + if ( + cpxprob.variables.get_num_binary() + cpxprob.variables.get_num_integer() + ) == 0: + self.results.problem.upper_bound = ( + cpxprob.solution.get_objective_value() + ) + self.results.problem.lower_bound = ( + cpxprob.solution.get_objective_value() + ) elif cpxprob.objective.get_sense() == cpxprob.objective.sense.minimize: - self.results.problem.upper_bound = cpxprob.solution.get_objective_value() - self.results.problem.lower_bound = cpxprob.solution.MIP.get_best_objective() + self.results.problem.upper_bound = ( + cpxprob.solution.get_objective_value() + ) + self.results.problem.lower_bound = ( + cpxprob.solution.MIP.get_best_objective() + ) else: assert cpxprob.objective.get_sense() == cpxprob.objective.sense.maximize - self.results.problem.upper_bound = cpxprob.solution.MIP.get_best_objective() - self.results.problem.lower_bound = cpxprob.solution.get_objective_value() + self.results.problem.upper_bound = ( + cpxprob.solution.MIP.get_best_objective() + ) + self.results.problem.lower_bound = ( + cpxprob.solution.get_objective_value() + ) try: - soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound + soln.gap = ( + self.results.problem.upper_bound - self.results.problem.lower_bound + ) except TypeError: soln.gap = None self.results.problem.name = cpxprob.get_problem_name() assert cpxprob.indicator_constraints.get_num() == 0 - self.results.problem.number_of_constraints = \ - (cpxprob.linear_constraints.get_num() + - cpxprob.quadratic_constraints.get_num() + - cpxprob.SOS.get_num()) + self.results.problem.number_of_constraints = ( + cpxprob.linear_constraints.get_num() + + cpxprob.quadratic_constraints.get_num() + + cpxprob.SOS.get_num() + ) self.results.problem.number_of_nonzeros = None self.results.problem.number_of_variables = cpxprob.variables.get_num() - self.results.problem.number_of_binary_variables = cpxprob.variables.get_num_binary() - self.results.problem.number_of_integer_variables = cpxprob.variables.get_num_integer() + self.results.problem.number_of_binary_variables = ( + cpxprob.variables.get_num_binary() + ) + self.results.problem.number_of_integer_variables = ( + cpxprob.variables.get_num_integer() + ) assert cpxprob.variables.get_num_semiinteger() == 0 assert cpxprob.variables.get_num_semicontinuous() == 0 - self.results.problem.number_of_continuous_variables = \ - (cpxprob.variables.get_num() - - cpxprob.variables.get_num_binary() - - cpxprob.variables.get_num_integer()) + self.results.problem.number_of_continuous_variables = ( + cpxprob.variables.get_num() + - cpxprob.variables.get_num_binary() + - cpxprob.variables.get_num_integer() + ) self.results.problem.number_of_objectives = 1 # only try to get objective and variable values if a solution exists if self._save_results: """ - This code in this if statement is only needed for backwards compatability. It is more efficient to set + This code in this if statement is only needed for backwards compatibility. It is more efficient to set _save_results to False and use load_vars, load_duals, etc. """ if cpxprob.solution.get_solution_type() > 0: @@ -745,7 +812,9 @@ def _postsolve(self): soln_variables[name] = {"Value": val} if extract_reduced_costs: - reduced_costs = self._solver_model.solution.get_reduced_costs(var_names) + reduced_costs = self._solver_model.solution.get_reduced_costs( + var_names + ) for i, name in enumerate(var_names): pyomo_var = self._solver_var_to_pyomo_var_map[name] if self._referenced_variables[pyomo_var] > 0: @@ -754,7 +823,9 @@ def _postsolve(self): if extract_slacks: for con_name in self._solver_model.linear_constraints.get_names(): soln_constraints[con_name] = {} - for con_name in self._solver_model.quadratic_constraints.get_names(): + for ( + con_name + ) in self._solver_model.quadratic_constraints.get_names(): soln_constraints[con_name] = {} elif extract_duals: # CPLEX PYTHON API DOES NOT SUPPORT QUADRATIC DUAL COLLECTION @@ -763,16 +834,22 @@ def _postsolve(self): if extract_duals: dual_values = self._solver_model.solution.get_dual_values() - for i, con_name in enumerate(self._solver_model.linear_constraints.get_names()): + for i, con_name in enumerate( + self._solver_model.linear_constraints.get_names() + ): soln_constraints[con_name]["Dual"] = dual_values[i] if extract_slacks: linear_slacks = self._solver_model.solution.get_linear_slacks() qudratic_slacks = self._solver_model.solution.get_quadratic_slacks() - for i, con_name in enumerate(self._solver_model.linear_constraints.get_names()): + for i, con_name in enumerate( + self._solver_model.linear_constraints.get_names() + ): pyomo_con = self._solver_con_to_pyomo_con_map[con_name] if pyomo_con in self._range_constraints: - R_ = self._solver_model.linear_constraints.get_range_values(con_name) + R_ = self._solver_model.linear_constraints.get_range_values( + con_name + ) if R_ == 0: soln_constraints[con_name]["Slack"] = linear_slacks[i] else: @@ -784,7 +861,9 @@ def _postsolve(self): soln_constraints[con_name]["Slack"] = -Ls_ else: soln_constraints[con_name]["Slack"] = linear_slacks[i] - for i, con_name in enumerate(self._solver_model.quadratic_constraints.get_names()): + for i, con_name in enumerate( + self._solver_model.quadratic_constraints.get_names() + ): soln_constraints[con_name]["Slack"] = qudratic_slacks[i] elif self._load_solutions: if cpxprob.solution.get_solution_type() > 0: @@ -814,9 +893,11 @@ def _warm_start(self): # here warm start means MIP start, which we can not add # if the problem type is not discrete cpxprob = self._solver_model - if cpxprob.get_problem_type() in [cpxprob.problem_type.MILP, - cpxprob.problem_type.MIQP, - cpxprob.problem_type.MIQCP]: + if cpxprob.get_problem_type() in [ + cpxprob.problem_type.MILP, + cpxprob.problem_type.MIQP, + cpxprob.problem_type.MIQCP, + ]: var_names = [] var_values = [] for pyomo_var, cplex_var in self._pyomo_var_to_solver_var_map.items(): @@ -827,7 +908,8 @@ def _warm_start(self): if len(var_names): self._solver_model.MIP_starts.add( [var_names, var_values], - self._solver_model.MIP_starts.effort_level.auto) + self._solver_model.MIP_starts.effort_level.auto, + ) def _load_vars(self, vars_to_load=None): var_map = self._pyomo_var_to_ndx_map @@ -870,7 +952,9 @@ def _load_duals(self, cons_to_load=None): vals = self._solver_model.solution.get_dual_values() else: cplex_cons_to_load = set([con_map[pyomo_con] for pyomo_con in cons_to_load]) - linear_cons_to_load = cplex_cons_to_load.intersection(set(self._solver_model.linear_constraints.get_names())) + linear_cons_to_load = cplex_cons_to_load.intersection( + set(self._solver_model.linear_constraints.get_names()) + ) vals = self._solver_model.solution.get_dual_values(linear_cons_to_load) for i, cplex_con in enumerate(linear_cons_to_load): @@ -887,14 +971,24 @@ def _load_slacks(self, cons_to_load=None): if cons_to_load is None: linear_cons_to_load = self._solver_model.linear_constraints.get_names() linear_vals = self._solver_model.solution.get_linear_slacks() - quadratic_cons_to_load = self._solver_model.quadratic_constraints.get_names() + quadratic_cons_to_load = ( + self._solver_model.quadratic_constraints.get_names() + ) quadratic_vals = self._solver_model.solution.get_quadratic_slacks() else: cplex_cons_to_load = set([con_map[pyomo_con] for pyomo_con in cons_to_load]) - linear_cons_to_load = cplex_cons_to_load.intersection(set(self._solver_model.linear_constraints.get_names())) - linear_vals = self._solver_model.solution.get_linear_slacks(linear_cons_to_load) - quadratic_cons_to_load = cplex_cons_to_load.intersection(set(self._solver_model.quadratic_constraints.get_names())) - quadratic_vals = self._solver_model.solution.get_quadratic_slacks(quadratic_cons_to_load) + linear_cons_to_load = cplex_cons_to_load.intersection( + set(self._solver_model.linear_constraints.get_names()) + ) + linear_vals = self._solver_model.solution.get_linear_slacks( + linear_cons_to_load + ) + quadratic_cons_to_load = cplex_cons_to_load.intersection( + set(self._solver_model.quadratic_constraints.get_names()) + ) + quadratic_vals = self._solver_model.solution.get_quadratic_slacks( + quadratic_cons_to_load + ) for i, cplex_con in enumerate(linear_cons_to_load): pyomo_con = reverse_con_map[cplex_con] diff --git a/pyomo/solvers/plugins/solvers/cplex_persistent.py b/pyomo/solvers/plugins/solvers/cplex_persistent.py index b2ca7ba5ece..a7fdcc45ade 100644 --- a/pyomo/solvers/plugins/solvers/cplex_persistent.py +++ b/pyomo/solvers/plugins/solvers/cplex_persistent.py @@ -27,7 +27,7 @@ class CPLEXPersistent(PersistentSolver, CPLEXDirect): Keyword Arguments ----------------- model: ConcreteModel - Passing a model to the constructor is equivalent to calling the set_instance mehtod. + Passing a model to the constructor is equivalent to calling the set_instance method. type: str String indicating the class type of the solver instance. name: str @@ -54,7 +54,9 @@ def _remove_constraint(self, solver_con): try: self._solver_model.quadratic_constraints.delete(solver_con) except self._cplex.exceptions.CplexError: - raise ValueError('Failed to find the cplex constraint {0}'.format(solver_con)) + raise ValueError( + 'Failed to find the cplex constraint {0}'.format(solver_con) + ) def _remove_sos_constraint(self, solver_sos_con): self._solver_model.SOS.delete(solver_sos_con) @@ -86,12 +88,16 @@ def update_var(self, var): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if var.is_indexed(): + # if var.is_indexed(): # for child_var in var.values(): # self.compile_var(child_var) # return if var not in self._pyomo_var_to_solver_var_map: - raise ValueError('The Var provided to compile_var needs to be added first: {0}'.format(var)) + raise ValueError( + 'The Var provided to compile_var needs to be added first: {0}'.format( + var + ) + ) cplex_var = self._pyomo_var_to_solver_var_map[var] vtype = self._cplex_vtype_from_var(var) lb, ub = self._cplex_lb_ub_from_var(var) @@ -117,9 +123,9 @@ def _add_column(self, var, obj_coef, constraints, coefficients): """Add a column to the solver's model This will add the Pyomo variable var to the solver's - model, and put the coefficients on the associated + model, and put the coefficients on the associated constraints in the solver model. If the obj_coef is - not zero, it will add obj_coef*var to the objective + not zero, it will add obj_coef*var to the objective of the solver's model. Parameters @@ -136,8 +142,14 @@ def _add_column(self, var, obj_coef, constraints, coefficients): lb, ub = self._cplex_lb_ub_from_var(var) ## do column addition - self._solver_model.variables.add(obj=[obj_coef], lb=[lb], ub=[ub], types=[vtype], names=[varname], - columns=[self._cplex.SparsePair(ind=constraints, val=coefficients)]) + self._solver_model.variables.add( + obj=[obj_coef], + lb=[lb], + ub=[ub], + types=[vtype], + names=[varname], + columns=[self._cplex.SparsePair(ind=constraints, val=coefficients)], + ) self._pyomo_var_to_solver_var_map[var] = varname self._solver_var_to_pyomo_var_map[varname] = var diff --git a/pyomo/solvers/plugins/solvers/direct_or_persistent_solver.py b/pyomo/solvers/plugins/solvers/direct_or_persistent_solver.py index ad88d161c15..09bbfbda70f 100644 --- a/pyomo/solvers/plugins/solvers/direct_or_persistent_solver.py +++ b/pyomo/solvers/plugins/solvers/direct_or_persistent_solver.py @@ -42,6 +42,7 @@ class DirectOrPersistentSolver(OptSolver): options: dict Dictionary of solver options """ + def __init__(self, **kwds): OptSolver.__init__(self, **kwds) @@ -66,7 +67,7 @@ def __init__(self, **kwds): """A dictionary mapping pyomo constraints to solver constraints.""" self._vars_referenced_by_con = ComponentMap() - """A dictionary mapping constraints to a ComponentSet containt the pyomo variables referenced by that + """A dictionary mapping constraints to a ComponentSet containing the pyomo variables referenced by that constraint. This is primarily needed for the persistent solvers. When a constraint is deleted, we need to decrement the number of times those variables are referenced (see self._referenced_variables).""" @@ -100,7 +101,7 @@ def __init__(self, **kwds): self._symbolic_solver_labels = False """A bool. If true then the solver components will be given names corresponding to the pyomo component names.""" - self._capabilites = Bunch() + self._capabilities = Bunch() self._referenced_variables = ComponentMap() """dict: {var: count} where count is the number of constraints/objective referencing the var""" @@ -109,9 +110,9 @@ def __init__(self, **kwds): """A bool. If True, then the solver log will be saved.""" self._save_results = True - """A bool. This is used for backwards compatability. If True, the solution will be loaded into the Solution + """A bool. This is used for backwards compatibility. If True, the solution will be loaded into the Solution object that gets placed on the SolverResults object. This way, users can do model.solutions.load_from(results) - to load solutions into thier model. However, it is more efficient to bypass the Solution object and load + to load solutions into their model. However, it is more efficient to bypass the Solution object and load the results directly from the solver object. If False, the solution will not be loaded into the Solution object.""" @@ -137,7 +138,7 @@ def _presolve(self, **kwds): OptSolver._presolve(self, **kwds) # *********************************************************** - # The following code is only needed for backwards compatability of load_solutions=False. + # The following code is only needed for backwards compatibility of load_solutions=False. # If we ever only want to support the load_vars, load_duals, etc. methods, then this can be deleted. if self._save_results: self._smap_id = id(self._symbol_map) @@ -145,8 +146,9 @@ def _presolve(self, **kwds): # BIG HACK (see pyomo.core.kernel write function) if not hasattr(self._pyomo_model, "._symbol_maps"): setattr(self._pyomo_model, "._symbol_maps", {}) - getattr(self._pyomo_model, - "._symbol_maps")[self._smap_id] = self._symbol_map + getattr(self._pyomo_model, "._symbol_maps")[ + self._smap_id + ] = self._symbol_map else: self._pyomo_model.solutions.add_symbol_map(self._symbol_map) # *********************************************************** @@ -155,30 +157,42 @@ def _presolve(self, **kwds): if self.warm_start_capable(): self._warm_start() else: - raise ValueError('{0} solver plugin is not capable of warmstart.'.format(type(self))) + raise ValueError( + '{0} solver plugin is not capable of warmstart.'.format(type(self)) + ) if self._log_file is None: self._log_file = TempfileManager.create_tempfile(suffix='.log') """ This method should be implemented by subclasses.""" + def _apply_solver(self): raise NotImplementedError('This method should be implemented by subclasses') """ This method should be implemented by subclasses.""" + def _postsolve(self): return OptSolver._postsolve(self) """ This method should be implemented by subclasses.""" + def _set_instance(self, model, kwds={}): if not isinstance(model, (Model, IBlock, Block, _BlockData)): - msg = "The problem instance supplied to the {0} plugin " \ - "'_presolve' method must be a Model or a Block".format(type(self)) + msg = ( + "The problem instance supplied to the {0} plugin " + "'_presolve' method must be a Model or a Block".format(type(self)) + ) raise ValueError(msg) self._pyomo_model = model - self._symbolic_solver_labels = kwds.pop('symbolic_solver_labels', self._symbolic_solver_labels) - self._skip_trivial_constraints = kwds.pop('skip_trivial_constraints', self._skip_trivial_constraints) - self._output_fixed_variable_bounds = kwds.pop('output_fixed_variable_bounds', - self._output_fixed_variable_bounds) + self._symbolic_solver_labels = kwds.pop( + 'symbolic_solver_labels', self._symbolic_solver_labels + ) + self._skip_trivial_constraints = kwds.pop( + 'skip_trivial_constraints', self._skip_trivial_constraints + ) + self._output_fixed_variable_bounds = kwds.pop( + 'output_fixed_variable_bounds', self._output_fixed_variable_bounds + ) self._pyomo_var_to_solver_var_map = ComponentMap() self._solver_var_to_pyomo_var_map = dict() self._pyomo_con_to_solver_con_map = dict() @@ -198,77 +212,77 @@ def _set_instance(self, model, kwds={}): def _add_block(self, block): for var in block.component_data_objects( - ctype=pyomo.core.base.var.Var, - descend_into=True, - active=True, - sort=True): + ctype=pyomo.core.base.var.Var, descend_into=True, active=True, sort=True + ): self._add_var(var) - for sub_block in block.block_data_objects(descend_into=True, - active=True): + for sub_block in block.block_data_objects(descend_into=True, active=True): for con in sub_block.component_data_objects( - ctype=pyomo.core.base.constraint.Constraint, - descend_into=False, - active=True, - sort=True): - if (not con.has_lb()) and \ - (not con.has_ub()): + ctype=pyomo.core.base.constraint.Constraint, + descend_into=False, + active=True, + sort=True, + ): + if (not con.has_lb()) and (not con.has_ub()): assert not con.equality continue # non-binding, so skip self._add_constraint(con) for con in sub_block.component_data_objects( - ctype=pyomo.core.base.sos.SOSConstraint, - descend_into=False, - active=True, - sort=True): + ctype=pyomo.core.base.sos.SOSConstraint, + descend_into=False, + active=True, + sort=True, + ): self._add_sos_constraint(con) obj_counter = 0 for obj in sub_block.component_data_objects( - ctype=pyomo.core.base.objective.Objective, - descend_into=False, - active=True): + ctype=pyomo.core.base.objective.Objective, + descend_into=False, + active=True, + ): obj_counter += 1 if obj_counter > 1: - raise ValueError("Solver interface does not " - "support multiple objectives.") + raise ValueError( + "Solver interface does not support multiple objectives." + ) self._set_objective(obj) """ This method should be implemented by subclasses.""" + def _set_objective(self, obj): - raise NotImplementedError("This method should be implemented " - "by subclasses") + raise NotImplementedError("This method should be implemented by subclasses") """ This method should be implemented by subclasses.""" + def _add_constraint(self, con): - raise NotImplementedError("This method should be implemented " - "by subclasses") + raise NotImplementedError("This method should be implemented by subclasses") """ This method should be implemented by subclasses.""" + def _add_sos_constraint(self, con): - raise NotImplementedError("This method should be implemented " - "by subclasses") + raise NotImplementedError("This method should be implemented by subclasses") """ This method should be implemented by subclasses.""" + def _add_var(self, var): - raise NotImplementedError("This method should be implemented " - "by subclasses") + raise NotImplementedError("This method should be implemented by subclasses") """ This method should be implemented by subclasses.""" + def _get_expr_from_pyomo_repn(self, repn, max_degree=None): - raise NotImplementedError("This method should be implemented " - "by subclasses") + raise NotImplementedError("This method should be implemented by subclasses") """ This method should be implemented by subclasses.""" + def _get_expr_from_pyomo_expr(self, expr, max_degree=None): - raise NotImplementedError("This method should be implemented " - "by subclasses") + raise NotImplementedError("This method should be implemented by subclasses") """ This method should be implemented by subclasses.""" + def _load_vars(self, vars_to_load): - raise NotImplementedError("This method should be implemented " - "by subclasses") + raise NotImplementedError("This method should be implemented by subclasses") def load_vars(self, vars_to_load=None): """ @@ -282,11 +296,14 @@ def load_vars(self, vars_to_load=None): StaleFlagManager.mark_all_as_stale(delayed=True) """ This method should be implemented by subclasses.""" + def warm_start_capable(self): raise NotImplementedError('This method should be implemented by subclasses') def _warm_start(self): - raise NotImplementedError('If a subclass can warmstart, then it should implement this method.') + raise NotImplementedError( + 'If a subclass can warmstart, then it should implement this method.' + ) def available(self, exception_flag=True): """True if the solver is available.""" @@ -294,8 +311,8 @@ def available(self, exception_flag=True): _api = getattr(self, '_python_api_exists', False) if exception_flag and not _api: raise ApplicationError( - "No Python bindings available for %s solver plugin" - % (type(self),)) + "No Python bindings available for %s solver plugin" % (type(self),) + ) return bool(_api) def _get_version(self): diff --git a/pyomo/solvers/plugins/solvers/direct_solver.py b/pyomo/solvers/plugins/solvers/direct_solver.py index 3064ca48154..a99eec79fd9 100644 --- a/pyomo/solvers/plugins/solvers/direct_solver.py +++ b/pyomo/solvers/plugins/solvers/direct_solver.py @@ -12,7 +12,9 @@ import time import logging -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) from pyomo.core.base.block import _BlockData from pyomo.core.kernel.block import IBlock from pyomo.core.base.suffix import active_import_suffix_generator @@ -22,6 +24,7 @@ logger = logging.getLogger('pyomo.solvers') + class DirectSolver(DirectOrPersistentSolver): """ Subclasses need to: @@ -56,8 +59,10 @@ def _presolve(self, *args, **kwds): """ model = args[0] if len(args) != 1: - msg = ("The {0} plugin method '_presolve' must be supplied a single problem instance - {1} were " + - "supplied.").format(type(self), len(args)) + msg = ( + "The {0} plugin method '_presolve' must be supplied a single problem instance - {1} were " + + "supplied." + ).format(type(self), len(args)) raise ValueError(msg) self._set_instance(model, kwds) @@ -65,7 +70,7 @@ def _presolve(self, *args, **kwds): DirectOrPersistentSolver._presolve(self, **kwds) def solve(self, *args, **kwds): - """ Solve the problem """ + """Solve the problem""" self.available(exception_flag=True) # @@ -79,21 +84,26 @@ def solve(self, *args, **kwds): if not arg.is_constructed(): raise RuntimeError( "Attempting to solve model=%s with unconstructed " - "component(s)" % (arg.name,) ) + "component(s)" % (arg.name,) + ) _model = arg # import suffixes must be on the top-level model if isinstance(arg, _BlockData): - model_suffixes = list(name for (name,comp) in active_import_suffix_generator(arg)) + model_suffixes = list( + name for (name, comp) in active_import_suffix_generator(arg) + ) else: assert isinstance(arg, IBlock) - model_suffixes = list(comp.storage_key for comp in - import_suffix_generator(arg, - active=True, - descend_into=False)) + model_suffixes = list( + comp.storage_key + for comp in import_suffix_generator( + arg, active=True, descend_into=False + ) + ) if len(model_suffixes) > 0: - kwds_suffixes = kwds.setdefault('suffixes',[]) + kwds_suffixes = kwds.setdefault('suffixes', []) for name in model_suffixes: if name not in kwds_suffixes: kwds_suffixes.append(name) @@ -111,9 +121,9 @@ def solve(self, *args, **kwds): self.options.update(orig_options) self.options.update(kwds.pop('options', {})) self.options.update( - self._options_string_to_dict(kwds.pop('options_string', ''))) + self._options_string_to_dict(kwds.pop('options_string', '')) + ) try: - # we're good to go. initial_time = time.time() @@ -121,7 +131,10 @@ def solve(self, *args, **kwds): presolve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for presolve" % (presolve_completion_time - initial_time)) + print( + " %6.2f seconds required for presolve" + % (presolve_completion_time - initial_time) + ) if not _model is None: self._initialize_callbacks(_model) @@ -133,25 +146,28 @@ def solve(self, *args, **kwds): logger.warning( "Solver (%s) did not return a solver status code.\n" "This is indicative of an internal solver plugin error.\n" - "Please report this to the Pyomo developers." ) + "Please report this to the Pyomo developers." + ) elif _status.rc: logger.error( "Solver (%s) returned non-zero return code (%s)" - % (self.name, _status.rc,)) + % (self.name, _status.rc) + ) if self._tee: - logger.error( - "See the solver log above for diagnostic information." ) + logger.error("See the solver log above for diagnostic information.") elif hasattr(_status, 'log') and _status.log: logger.error("Solver log:\n" + str(_status.log)) - raise ApplicationError( - "Solver (%s) did not exit normally" % self.name) + raise ApplicationError("Solver (%s) did not exit normally" % self.name) solve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for solver" % (solve_completion_time - presolve_completion_time)) + print( + " %6.2f seconds required for solver" + % (solve_completion_time - presolve_completion_time) + ) result = self._postsolve() # *********************************************************** - # The following code is only needed for backwards compatability of load_solutions=False. + # The following code is only needed for backwards compatibility of load_solutions=False. # If we ever only want to support the load_vars, load_duals, etc. methods, then this can be deleted. if self._save_results: result._smap_id = self._smap_id @@ -159,10 +175,12 @@ def solve(self, *args, **kwds): if _model: if isinstance(_model, IBlock): if len(result.solution) == 1: - result.solution(0).symbol_map = \ - getattr(_model, "._symbol_maps")[result._smap_id] - result.solution(0).default_variable_value = \ - self._default_variable_value + result.solution(0).symbol_map = getattr( + _model, "._symbol_maps" + )[result._smap_id] + result.solution( + 0 + ).default_variable_value = self._default_variable_value if self._load_solutions: _model.load_solution(result.solution(0)) else: @@ -173,15 +191,15 @@ def solve(self, *args, **kwds): assert len(getattr(_model, "._symbol_maps")) == 1 delattr(_model, "._symbol_maps") del result._smap_id - if self._load_solutions and \ - (len(result.solution) == 0): + if self._load_solutions and (len(result.solution) == 0): logger.error("No solution is available") else: if self._load_solutions: _model.solutions.load_from( result, select=self._select_index, - default_variable_value=self._default_variable_value) + default_variable_value=self._default_variable_value, + ) result._smap_id = None result.solution.clear() else: @@ -191,7 +209,10 @@ def solve(self, *args, **kwds): postsolve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for postsolve" % (postsolve_completion_time - solve_completion_time)) + print( + " %6.2f seconds required for postsolve" + % (postsolve_completion_time - solve_completion_time) + ) finally: # @@ -200,5 +221,3 @@ def solve(self, *args, **kwds): self.options = orig_options return result - - diff --git a/pyomo/solvers/plugins/solvers/gurobi_direct.py b/pyomo/solvers/plugins/solvers/gurobi_direct.py index c2a73946197..54ea9111508 100644 --- a/pyomo/solvers/plugins/solvers/gurobi_direct.py +++ b/pyomo/solvers/plugins/solvers/gurobi_direct.py @@ -23,7 +23,9 @@ from pyomo.core.staleflag import StaleFlagManager from pyomo.repn import generate_standard_repn from pyomo.solvers.plugins.solvers.direct_solver import DirectSolver -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) from pyomo.core.kernel.objective import minimize, maximize from pyomo.opt.results.results_ import SolverResults from pyomo.opt.results.solution import Solution, SolutionStatus @@ -39,6 +41,7 @@ class DegreeError(ValueError): pass + def _is_numeric(x): try: float(x) @@ -57,6 +60,7 @@ def _parse_gurobi_version(gurobipy, avail): GurobiDirect._version = GurobiDirect._version[:4] GurobiDirect._version_major = GurobiDirect._version[0] + gurobipy, gurobipy_available = attempt_import( 'gurobipy', # Other forms of exceptions can be thrown by the gurobi python @@ -70,16 +74,80 @@ def _parse_gurobi_version(gurobipy, avail): ) +def _set_options(model_or_env, options): + # Set a parameters from the dictionary 'options' on the given gurobipy + # model or environment. + for key, option in options.items(): + # When options come from the pyomo command, all + # values are string types, so we try to cast + # them to a numeric value in the event that + # setting the parameter fails. + try: + model_or_env.setParam(key, option) + except TypeError: + # we place the exception handling for + # checking the cast of option to a float in + # another function so that we can simply + # call raise here instead of except + # TypeError as e / raise e, because the + # latter does not preserve the Gurobi stack + # trace + if not _is_numeric(option): + raise + model_or_env.setParam(key, float(option)) + + @SolverFactory.register('gurobi_direct', doc='Direct python interface to Gurobi') class GurobiDirect(DirectSolver): + """A direct interface to Gurobi using gurobipy. + + :param manage_env: Set to True if this solver instance should create and + manage its own Gurobi environment (defaults to False) + :type manage_env: bool + :param options: Dictionary of Gurobi parameters to set + :type options: dict + + If ``manage_env`` is set to True, the ``GurobiDirect`` object creates a local + Gurobi environment and manage all associated Gurobi resources. Importantly, + this enables Gurobi licenses to be freed and connections terminated when the + solver context is exited:: + + with SolverFactory('gurobi', solver_io='python', manage_env=True) as opt: + opt.solve(model) + + # All Gurobi models and environments are freed + + If ``manage_env`` is set to False (the default), the ``GurobiDirect`` object + uses the global default Gurobi environment:: + + with SolverFactory('gurobi', solver_io='python') as opt: + opt.solve(model) + + # Only models created by `opt` are freed, the global default + # environment remains active + + ``manage_env=True`` is required when setting license or connection parameters + programmatically. The ``options`` argument is used to pass parameters to the + Gurobi environment. For example, to connect to a Gurobi Cluster Manager:: + + options = { + "CSManager": "", + "CSAPIAccessID": "", + "CSAPISecret": "", + } + with SolverFactory( + 'gurobi', solver_io='python', manage_env=True, options=options + ) as opt: + opt.solve(model) # Model solved on compute server + # Compute server connection terminated + """ - _verified_license = None - _import_messages = '' _name = None _version = 0 _version_major = 0 + _default_env_started = False - def __init__(self, **kwds): + def __init__(self, manage_env=False, **kwds): if 'type' not in kwds: kwds['type'] = 'gurobi_direct' super(GurobiDirect, self).__init__(**kwds) @@ -97,7 +165,7 @@ def __init__(self, **kwds): self._max_obj_degree = 2 self._max_constraint_degree = 2 - # Note: Undefined capabilites default to None + # Note: Undefined capabilities default to None self._capabilities.linear = True self._capabilities.quadratic_objective = True self._capabilities.quadratic_constraint = True @@ -118,35 +186,55 @@ def __init__(self, **kwds): # version of gurobi is supported (and stored as a class attribute) del self._version + self._manage_env = manage_env + self._env = None + self._env_options = None + self._solver_model = None + def available(self, exception_flag=True): + """Returns True if the solver is available. + + :param exception_flag: If True, raise an exception instead of returning + False if the solver is unavailable (defaults to False) + :type exception_flag: bool + + In general, ``available()`` does not need to be called by the user, as + the check is run automatically when solving a model. However it is useful + for a simple retry loop when using a shared Gurobi license:: + + with SolverFactory('gurobi', solver_io='python') as opt: + while not available(exception_flag=False): + time.sleep(1) + opt.solve(model) + + """ + # First check gurobipy is imported if not gurobipy_available: if exception_flag: gurobipy.log_import_warning(logger=__name__) raise ApplicationError( - "No Python bindings available for %s solver plugin" - % (type(self),)) + "No Python bindings available for %s solver plugin" % (type(self),) + ) return False - if self._verified_license is None: - with capture_output(capture_fd=True) as OUT: - try: - # verify that we can get a Gurobi license - # Gurobipy writes out license file information when creating - # the environment - m = gurobipy.Model() - m.dispose() - GurobiDirect._verified_license = True - except Exception as e: - GurobiDirect._import_messages += \ - "\nCould not create Model - gurobi message=%s\n" % (e,) - GurobiDirect._verified_license = False - if OUT.getvalue(): - GurobiDirect._import_messages += "\n" + OUT.getvalue() - if exception_flag and not self._verified_license: - logger.warning(GurobiDirect._import_messages) + + # Ensure environment is started to check for a valid license + with capture_output(capture_fd=True) as OUT: + try: + self._init_env() + return True + except gurobipy.GurobiError as e: + msg = "Could not create Model - gurobi message=%s\n" % (e,) + if OUT.getvalue(): + msg += "\n" + OUT.getvalue() + # Didn't return, so environment start failed + if exception_flag: + logger.warning(msg) raise ApplicationError( - "Could not create a gurobipy Model for %s solver plugin" - % (type(self),)) - return self._verified_license + "Could not create Model for %s solver plugin - gurobi message=%s" + % (type(self), msg) + ) + else: + return False def _apply_solver(self): StaleFlagManager.mark_all_as_stale() @@ -159,40 +247,18 @@ def _apply_solver(self): if self._keepfiles: # Only save log file when the user wants to keep it. self._solver_model.setParam('LogFile', self._log_file) - print("Solver log file: "+self._log_file) - - # Options accepted by gurobi (case insensitive): - # ['Cutoff', 'IterationLimit', 'NodeLimit', 'SolutionLimit', 'TimeLimit', - # 'FeasibilityTol', 'IntFeasTol', 'MarkowitzTol', 'MIPGap', 'MIPGapAbs', - # 'OptimalityTol', 'PSDTol', 'Method', 'PerturbValue', 'ObjScale', 'ScaleFlag', - # 'SimplexPricing', 'Quad', 'NormAdjust', 'BarIterLimit', 'BarConvTol', - # 'BarCorrectors', 'BarOrder', 'Crossover', 'CrossoverBasis', 'BranchDir', - # 'Heuristics', 'MinRelNodes', 'MIPFocus', 'NodefileStart', 'NodefileDir', - # 'NodeMethod', 'PumpPasses', 'RINS', 'SolutionNumber', 'SubMIPNodes', 'Symmetry', - # 'VarBranch', 'Cuts', 'CutPasses', 'CliqueCuts', 'CoverCuts', 'CutAggPasses', - # 'FlowCoverCuts', 'FlowPathCuts', 'GomoryPasses', 'GUBCoverCuts', 'ImpliedCuts', - # 'MIPSepCuts', 'MIRCuts', 'NetworkCuts', 'SubMIPCuts', 'ZeroHalfCuts', 'ModKCuts', - # 'Aggregate', 'AggFill', 'PreDual', 'DisplayInterval', 'IISMethod', 'InfUnbdInfo', - # 'LogFile', 'PreCrush', 'PreDepRow', 'PreMIQPMethod', 'PrePasses', 'Presolve', - # 'ResultFile', 'ImproveStartTime', 'ImproveStartGap', 'Threads', 'Dummy', 'OutputFlag'] - for key, option in self.options.items(): - # When options come from the pyomo command, all - # values are string types, so we try to cast - # them to a numeric value in the event that - # setting the parameter fails. - try: - self._solver_model.setParam(key, option) - except TypeError: - # we place the exception handling for - # checking the cast of option to a float in - # another function so that we can simply - # call raise here instead of except - # TypeError as e / raise e, because the - # latter does not preserve the Gurobi stack - # trace - if not _is_numeric(option): - raise - self._solver_model.setParam(key, float(option)) + print("Solver log file: " + self._log_file) + + # Only pass along changed parameters to the model + if self._env_options: + new_options = { + key: option + for key, option in self.options.items() + if key not in self._env_options or self._env_options[key] != option + } + else: + new_options = self.options + _set_options(self._solver_model, new_options) if self._version_major >= 5: for suffix in self._suffixes: @@ -201,7 +267,7 @@ def _apply_solver(self): self._solver_model.optimize(self._callback) self._needs_updated = False - + if self._keepfiles: # Change LogFile to make Gurobi close the original log file. # May not work for all Gurobi versions, like ver. 9.5.0. @@ -215,17 +281,28 @@ def _get_expr_from_pyomo_repn(self, repn, max_degree=2): degree = repn.polynomial_degree() if (degree is None) or (degree > max_degree): - raise DegreeError('GurobiDirect does not support expressions of degree {0}.'.format(degree)) + raise DegreeError( + 'GurobiDirect does not support expressions of degree {0}.'.format( + degree + ) + ) if len(repn.linear_vars) > 0: referenced_vars.update(repn.linear_vars) - new_expr = gurobipy.LinExpr(repn.linear_coefs, [self._pyomo_var_to_solver_var_map[i] for i in repn.linear_vars]) + new_expr = gurobipy.LinExpr( + repn.linear_coefs, + [self._pyomo_var_to_solver_var_map[i] for i in repn.linear_vars], + ) else: new_expr = 0.0 - for i,v in enumerate(repn.quadratic_vars): - x,y = v - new_expr += repn.quadratic_coefs[i] * self._pyomo_var_to_solver_var_map[x] * self._pyomo_var_to_solver_var_map[y] + for i, v in enumerate(repn.quadratic_vars): + x, y = v + new_expr += ( + repn.quadratic_coefs[i] + * self._pyomo_var_to_solver_var_map[x] + * self._pyomo_var_to_solver_var_map[y] + ) referenced_vars.add(x) referenced_vars.add(y) @@ -240,7 +317,9 @@ def _get_expr_from_pyomo_expr(self, expr, max_degree=2): repn = generate_standard_repn(expr, quadratic=False) try: - gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn(repn, max_degree) + gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn( + repn, max_degree + ) except DegreeError as e: msg = e.args[0] msg += '\nexpr: {0}'.format(expr) @@ -267,7 +346,9 @@ def _add_var(self, var): vtype = self._gurobi_vtype_from_var(var) lb, ub = self._gurobi_lb_ub_from_var(var) - gurobipy_var = self._solver_model.addVar(lb=lb, ub=ub, vtype=vtype, name=varname) + gurobipy_var = self._solver_model.addVar( + lb=lb, ub=ub, vtype=vtype, name=varname + ) self._pyomo_var_to_solver_var_map[var] = gurobipy_var self._solver_var_to_pyomo_var_map[gurobipy_var] = var @@ -275,6 +356,101 @@ def _add_var(self, var): self._needs_updated = True + def close_global(self): + """Frees all Gurobi models used by this solver, and frees the global + default Gurobi environment. + + The default environment is used by all ``GurobiDirect`` solvers started + with ``manage_env=False`` (the default). To guarantee that all Gurobi + resources are freed, all instantiated ``GurobiDirect`` solvers must also + be correctly closed. + + The following example will free all Gurobi resources assuming the user did + not create any other models (e.g. via another ``GurobiDirect`` object with + ``manage_env=False``):: + + opt = SolverFactory('gurobi', solver_io='python') + try: + opt.solve(model) + finally: + opt.close_global() + # All Gurobi models created by `opt` are freed and the default + # Gurobi environment is closed + """ + self.close() + with capture_output(capture_fd=True): + gurobipy.disposeDefaultEnv() + GurobiDirect._default_env_started = False + + def _init_env(self): + if self._manage_env: + # Ensure an environment is active for this instance + if self._env is None: + assert self._solver_model is None + env = gurobipy.Env(empty=True) + _set_options(env, self.options) + env.start() + # Successful start (no errors): store the environment + self._env = env + self._env_options = dict(self.options) + else: + # Ensure the (global) default env is started + if not GurobiDirect._default_env_started: + m = gurobipy.Model() + m.close() + GurobiDirect._default_env_started = True + + def _create_model(self, model): + self._init_env() + if self._solver_model is not None: + self._solver_model.close() + if model.name is not None: + self._solver_model = gurobipy.Model(model.name, env=self._env) + else: + self._solver_model = gurobipy.Model(env=self._env) + + def close(self): + """Frees local Gurobi resources used by this solver instance. + + All Gurobi models created by the solver are freed. If the solver was + created with ``manage_env=True``, this method also closes the Gurobi + environment used by this solver instance. Calling ``.close()`` achieves + the same result as exiting the solver context (although using context + managers is preferred where possible):: + + opt = SolverFactory('gurobi', solver_io='python', manage_env=True) + try: + opt.solve(model) + finally: + opt.close() + # Gurobi models and environments created by `opt` are freed + + As with the context manager, if ``manage_env=False`` (the default) was + used, only the Gurobi models created by this solver are freed. The + default global Gurobi environment will still be active:: + + opt = SolverFactory('gurobi', solver_io='python') + try: + opt.solve(model) + finally: + opt.close() + # Gurobi models created by `opt` are freed; however the + # default/global Gurobi environment is still active + """ + + if self._solver_model is not None: + self._solver_model.close() + self._solver_model = None + if self._manage_env: + if self._env is not None: + self._env.close() + self._env = None + self._env_options = None + + def __exit__(self, t, v, traceback): + super().__exit__(t, v, traceback) + self.close() + def _set_instance(self, model, kwds={}): self._range_constraints = set() DirectOrPersistentSolver._set_instance(self, model, kwds) @@ -283,16 +459,14 @@ def _set_instance(self, model, kwds={}): self._pyomo_var_to_solver_var_map = ComponentMap() self._solver_var_to_pyomo_var_map = ComponentMap() try: - if model.name is not None: - self._solver_model = gurobipy.Model(model.name) - else: - self._solver_model = gurobipy.Model() + self._create_model(model) except Exception: e = sys.exc_info()[1] - msg = ("Unable to create Gurobi model. " - "Have you installed the Python " - "bindings for Gurobi?\n\n\t"+ - "Error message: {0}".format(e)) + msg = ( + "Unable to create Gurobi model. " + "Have you installed the Python " + "bindings for Gurobi?\n\n\t" + "Error message: {0}".format(e) + ) raise Exception(msg) self._add_block(model) @@ -309,7 +483,8 @@ def _set_instance(self, model, kwds={}): "the IO-option 'output_fixed_variable_bounds=True' " "to suppress this error and fix the variable " "by overwriting its bounds in the Gurobi instance." - % (var.name, self._pyomo_model.name,)) + % (var.name, self._pyomo_model.name) + ) def _add_block(self, block): DirectOrPersistentSolver._add_block(self, block) @@ -326,50 +501,59 @@ def _add_constraint(self, con): if con._linear_canonical_form: gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn( - con.canonical_form(), - self._max_constraint_degree) - #elif isinstance(con, LinearCanonicalRepn): + con.canonical_form(), self._max_constraint_degree + ) + # elif isinstance(con, LinearCanonicalRepn): # gurobi_expr, referenced_vars = self._get_expr_from_pyomo_repn( # con, # self._max_constraint_degree) else: gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr( - con.body, - self._max_constraint_degree) + con.body, self._max_constraint_degree + ) if con.has_lb(): if not is_fixed(con.lower): - raise ValueError("Lower bound of constraint {0} " - "is not constant.".format(con)) + raise ValueError( + "Lower bound of constraint {0} is not constant.".format(con) + ) if con.has_ub(): if not is_fixed(con.upper): - raise ValueError("Upper bound of constraint {0} " - "is not constant.".format(con)) + raise ValueError( + "Upper bound of constraint {0} is not constant.".format(con) + ) if con.equality: - gurobipy_con = self._solver_model.addConstr(lhs=gurobi_expr, - sense=gurobipy.GRB.EQUAL, - rhs=value(con.lower), - name=conname) + gurobipy_con = self._solver_model.addConstr( + lhs=gurobi_expr, + sense=gurobipy.GRB.EQUAL, + rhs=value(con.lower), + name=conname, + ) elif con.has_lb() and con.has_ub(): - gurobipy_con = self._solver_model.addRange(gurobi_expr, - value(con.lower), - value(con.upper), - name=conname) + gurobipy_con = self._solver_model.addRange( + gurobi_expr, value(con.lower), value(con.upper), name=conname + ) self._range_constraints.add(con) elif con.has_lb(): - gurobipy_con = self._solver_model.addConstr(lhs=gurobi_expr, - sense=gurobipy.GRB.GREATER_EQUAL, - rhs=value(con.lower), - name=conname) + gurobipy_con = self._solver_model.addConstr( + lhs=gurobi_expr, + sense=gurobipy.GRB.GREATER_EQUAL, + rhs=value(con.lower), + name=conname, + ) elif con.has_ub(): - gurobipy_con = self._solver_model.addConstr(lhs=gurobi_expr, - sense=gurobipy.GRB.LESS_EQUAL, - rhs=value(con.upper), - name=conname) + gurobipy_con = self._solver_model.addConstr( + lhs=gurobi_expr, + sense=gurobipy.GRB.LESS_EQUAL, + rhs=value(con.upper), + name=conname, + ) else: - raise ValueError("Constraint does not have a lower " - "or an upper bound: {0} \n".format(con)) + raise ValueError( + "Constraint does not have a lower " + "or an upper bound: {0} \n".format(con) + ) for var in referenced_vars: self._referenced_variables[var] += 1 @@ -390,8 +574,9 @@ def _add_sos_constraint(self, con): elif level == 2: sos_type = gurobipy.GRB.SOS_TYPE2 else: - raise ValueError("Solver does not support SOS " - "level {0} constraints".format(level)) + raise ValueError( + "Solver does not support SOS level {0} constraints".format(level) + ) gurobi_vars = [] weights = [] @@ -430,7 +615,9 @@ def _gurobi_vtype_from_var(self, var): elif var.is_continuous(): vtype = gurobipy.GRB.CONTINUOUS else: - raise ValueError('Variable domain type is not recognized for {0}'.format(var.domain)) + raise ValueError( + 'Variable domain type is not recognized for {0}'.format(var.domain) + ) return vtype def _set_objective(self, obj): @@ -450,7 +637,9 @@ def _set_objective(self, obj): else: raise ValueError('Objective sense is not recognized: {0}'.format(obj.sense)) - gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr(obj.expr, self._max_obj_degree) + gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr( + obj.expr, self._max_obj_degree + ) for var in referenced_vars: self._referenced_variables[var] += 1 @@ -482,7 +671,10 @@ def _postsolve(self): extract_reduced_costs = True flag = True if not flag: - raise RuntimeError("***The gurobi_direct solver plugin cannot extract solution suffix="+suffix) + raise RuntimeError( + "***The gurobi_direct solver plugin cannot extract solution suffix=" + + suffix + ) gprob = self._solver_model grb = gurobipy.GRB @@ -504,95 +696,132 @@ def _postsolve(self): if status == grb.LOADED: # problem is loaded, but no solution self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Model is loaded, but no solution information is available." + self.results.solver.termination_message = ( + "Model is loaded, but no solution information is available." + ) self.results.solver.termination_condition = TerminationCondition.error soln.status = SolutionStatus.unknown elif status == grb.OPTIMAL: # optimal self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \ - "and an optimal solution is available." + self.results.solver.termination_message = ( + "Model was solved to optimality (subject to tolerances), " + "and an optimal solution is available." + ) self.results.solver.termination_condition = TerminationCondition.optimal soln.status = SolutionStatus.optimal elif status == grb.INFEASIBLE: self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Model was proven to be infeasible" + self.results.solver.termination_message = ( + "Model was proven to be infeasible" + ) self.results.solver.termination_condition = TerminationCondition.infeasible soln.status = SolutionStatus.infeasible elif status == grb.INF_OR_UNBD: self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Problem proven to be infeasible or unbounded." - self.results.solver.termination_condition = TerminationCondition.infeasibleOrUnbounded + self.results.solver.termination_message = ( + "Problem proven to be infeasible or unbounded." + ) + self.results.solver.termination_condition = ( + TerminationCondition.infeasibleOrUnbounded + ) soln.status = SolutionStatus.unsure elif status == grb.UNBOUNDED: self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Model was proven to be unbounded." + self.results.solver.termination_message = ( + "Model was proven to be unbounded." + ) self.results.solver.termination_condition = TerminationCondition.unbounded soln.status = SolutionStatus.unbounded elif status == grb.CUTOFF: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimal objective for model was proven to be worse than the " \ - "value specified in the Cutoff parameter. No solution " \ - "information is available." - self.results.solver.termination_condition = TerminationCondition.minFunctionValue + self.results.solver.termination_message = ( + "Optimal objective for model was proven to be worse than the " + "value specified in the Cutoff parameter. No solution " + "information is available." + ) + self.results.solver.termination_condition = ( + TerminationCondition.minFunctionValue + ) soln.status = SolutionStatus.unknown elif status == grb.ITERATION_LIMIT: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimization terminated because the total number of simplex " \ - "iterations performed exceeded the value specified in the " \ - "IterationLimit parameter." - self.results.solver.termination_condition = TerminationCondition.maxIterations + self.results.solver.termination_message = ( + "Optimization terminated because the total number of simplex " + "iterations performed exceeded the value specified in the " + "IterationLimit parameter." + ) + self.results.solver.termination_condition = ( + TerminationCondition.maxIterations + ) soln.status = SolutionStatus.stoppedByLimit elif status == grb.NODE_LIMIT: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimization terminated because the total number of " \ - "branch-and-cut nodes explored exceeded the value specified " \ - "in the NodeLimit parameter" - self.results.solver.termination_condition = TerminationCondition.maxEvaluations + self.results.solver.termination_message = ( + "Optimization terminated because the total number of " + "branch-and-cut nodes explored exceeded the value specified " + "in the NodeLimit parameter" + ) + self.results.solver.termination_condition = ( + TerminationCondition.maxEvaluations + ) soln.status = SolutionStatus.stoppedByLimit elif status == grb.TIME_LIMIT: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimization terminated because the time expended exceeded " \ - "the value specified in the TimeLimit parameter." - self.results.solver.termination_condition = TerminationCondition.maxTimeLimit + self.results.solver.termination_message = ( + "Optimization terminated because the time expended exceeded " + "the value specified in the TimeLimit parameter." + ) + self.results.solver.termination_condition = ( + TerminationCondition.maxTimeLimit + ) soln.status = SolutionStatus.stoppedByLimit elif status == grb.SOLUTION_LIMIT: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimization terminated because the number of solutions found " \ - "reached the value specified in the SolutionLimit parameter." + self.results.solver.termination_message = ( + "Optimization terminated because the number of solutions found " + "reached the value specified in the SolutionLimit parameter." + ) self.results.solver.termination_condition = TerminationCondition.unknown soln.status = SolutionStatus.stoppedByLimit elif status == grb.INTERRUPTED: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimization was terminated by the user." + self.results.solver.termination_message = ( + "Optimization was terminated by the user." + ) self.results.solver.termination_condition = TerminationCondition.error soln.status = SolutionStatus.error elif status == grb.NUMERIC: self.results.solver.status = SolverStatus.error - self.results.solver.termination_message = "Optimization was terminated due to unrecoverable numerical " \ - "difficulties." + self.results.solver.termination_message = ( + "Optimization was terminated due to unrecoverable numerical " + "difficulties." + ) self.results.solver.termination_condition = TerminationCondition.error soln.status = SolutionStatus.error elif status == grb.SUBOPTIMAL: self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Unable to satisfy optimality tolerances; a sub-optimal " \ - "solution is available." + self.results.solver.termination_message = ( + "Unable to satisfy optimality tolerances; a sub-optimal " + "solution is available." + ) self.results.solver.termination_condition = TerminationCondition.other soln.status = SolutionStatus.feasible # note that USER_OBJ_LIMIT was added in Gurobi 7.0, so it may not be present - elif (status is not None) and \ - (status == getattr(grb,'USER_OBJ_LIMIT',None)): + elif (status is not None) and (status == getattr(grb, 'USER_OBJ_LIMIT', None)): self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "User specified an objective limit " \ - "(a bound on either the best objective " \ - "or the best bound), and that limit has " \ - "been reached. Solution is available." + self.results.solver.termination_message = ( + "User specified an objective limit " + "(a bound on either the best objective " + "or the best bound), and that limit has " + "been reached. Solution is available." + ) self.results.solver.termination_condition = TerminationCondition.other soln.status = SolutionStatus.stoppedByLimit else: self.results.solver.status = SolverStatus.error - self.results.solver.termination_message = \ - ("Unhandled Gurobi solve status " - "("+str(status)+")") + self.results.solver.termination_message = ( + "Unhandled Gurobi solve status (" + str(status) + ")" + ) self.results.solver.termination_condition = TerminationCondition.error soln.status = SolutionStatus.error @@ -603,7 +832,9 @@ def _postsolve(self): elif gprob.ModelSense == -1: self.results.problem.sense = maximize else: - raise RuntimeError('Unrecognized gurobi objective sense: {0}'.format(gprob.ModelSense)) + raise RuntimeError( + 'Unrecognized gurobi objective sense: {0}'.format(gprob.ModelSense) + ) self.results.problem.upper_bound = None self.results.problem.lower_bound = None @@ -632,19 +863,27 @@ def _postsolve(self): except (gurobipy.GurobiError, AttributeError): pass else: - raise RuntimeError('Unrecognized gurobi objective sense: {0}'.format(gprob.ModelSense)) + raise RuntimeError( + 'Unrecognized gurobi objective sense: {0}'.format(gprob.ModelSense) + ) try: - soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound + soln.gap = ( + self.results.problem.upper_bound - self.results.problem.lower_bound + ) except TypeError: soln.gap = None - self.results.problem.number_of_constraints = gprob.NumConstrs + gprob.NumQConstrs + gprob.NumSOS + self.results.problem.number_of_constraints = ( + gprob.NumConstrs + gprob.NumQConstrs + gprob.NumSOS + ) self.results.problem.number_of_nonzeros = gprob.NumNZs self.results.problem.number_of_variables = gprob.NumVars self.results.problem.number_of_binary_variables = gprob.NumBinVars self.results.problem.number_of_integer_variables = gprob.NumIntVars - self.results.problem.number_of_continuous_variables = gprob.NumVars - gprob.NumIntVars - gprob.NumBinVars + self.results.problem.number_of_continuous_variables = ( + gprob.NumVars - gprob.NumIntVars - gprob.NumBinVars + ) self.results.problem.number_of_objectives = 1 self.results.problem.number_of_solutions = gprob.SolCount @@ -653,7 +892,7 @@ def _postsolve(self): # be the case, both in LP and MIP contexts. if self._save_results: """ - This code in this if statement is only needed for backwards compatability. It is more efficient to set + This code in this if statement is only needed for backwards compatibility. It is more efficient to set _save_results to False and use load_vars, load_duals, etc. """ if gprob.SolCount > 0: @@ -661,7 +900,11 @@ def _postsolve(self): soln_constraints = soln.constraint gurobi_vars = self._solver_model.getVars() - gurobi_vars = list(set(gurobi_vars).intersection(set(self._pyomo_var_to_solver_var_map.values()))) + gurobi_vars = list( + set(gurobi_vars).intersection( + set(self._pyomo_var_to_solver_var_map.values()) + ) + ) var_vals = self._solver_model.getAttr("X", gurobi_vars) names = self._solver_model.getAttr("VarName", gurobi_vars) for gurobi_var, val, name in zip(gurobi_vars, var_vals, names): @@ -683,7 +926,9 @@ def _postsolve(self): soln_constraints[name] = {} if self._version_major >= 5: gurobi_q_cons = self._solver_model.getQConstrs() - q_con_names = self._solver_model.getAttr("QCName", gurobi_q_cons) + q_con_names = self._solver_model.getAttr( + "QCName", gurobi_q_cons + ) for name in q_con_names: soln_constraints[name] = {} @@ -697,7 +942,9 @@ def _postsolve(self): soln_constraints[name]["Dual"] = val if extract_slacks: - gurobi_range_con_vars = set(self._solver_model.getVars()) - set(self._pyomo_var_to_solver_var_map.values()) + gurobi_range_con_vars = set(self._solver_model.getVars()) - set( + self._pyomo_var_to_solver_var_map.values() + ) vals = self._solver_model.getAttr("Slack", gurobi_cons) for gurobi_con, val, name in zip(gurobi_cons, vals, con_names): pyomo_con = self._solver_con_to_pyomo_con_map[gurobi_con] @@ -721,7 +968,6 @@ def _postsolve(self): soln_constraints[name]["Slack"] = val elif self._load_solutions: if gprob.SolCount > 0: - self.load_vars() if extract_reduced_costs: @@ -791,10 +1037,16 @@ def _load_duals(self, cons_to_load=None): if self._version_major >= 5: quadratic_cons_to_load = self._solver_model.getQConstrs() else: - gurobi_cons_to_load = set([con_map[pyomo_con] for pyomo_con in cons_to_load]) - linear_cons_to_load = gurobi_cons_to_load.intersection(set(self._solver_model.getConstrs())) + gurobi_cons_to_load = set( + [con_map[pyomo_con] for pyomo_con in cons_to_load] + ) + linear_cons_to_load = gurobi_cons_to_load.intersection( + set(self._solver_model.getConstrs()) + ) if self._version_major >= 5: - quadratic_cons_to_load = gurobi_cons_to_load.intersection(set(self._solver_model.getQConstrs())) + quadratic_cons_to_load = gurobi_cons_to_load.intersection( + set(self._solver_model.getQConstrs()) + ) linear_vals = self._solver_model.getAttr("Pi", linear_cons_to_load) if self._version_major >= 5: quadratic_vals = self._solver_model.getAttr("QCPi", quadratic_cons_to_load) @@ -814,20 +1066,30 @@ def _load_slacks(self, cons_to_load=None): reverse_con_map = self._solver_con_to_pyomo_con_map slack = self._pyomo_model.slack - gurobi_range_con_vars = set(self._solver_model.getVars()) - set(self._pyomo_var_to_solver_var_map.values()) + gurobi_range_con_vars = set(self._solver_model.getVars()) - set( + self._pyomo_var_to_solver_var_map.values() + ) if cons_to_load is None: linear_cons_to_load = self._solver_model.getConstrs() if self._version_major >= 5: quadratic_cons_to_load = self._solver_model.getQConstrs() else: - gurobi_cons_to_load = set([con_map[pyomo_con] for pyomo_con in cons_to_load]) - linear_cons_to_load = gurobi_cons_to_load.intersection(set(self._solver_model.getConstrs())) + gurobi_cons_to_load = set( + [con_map[pyomo_con] for pyomo_con in cons_to_load] + ) + linear_cons_to_load = gurobi_cons_to_load.intersection( + set(self._solver_model.getConstrs()) + ) if self._version_major >= 5: - quadratic_cons_to_load = gurobi_cons_to_load.intersection(set(self._solver_model.getQConstrs())) + quadratic_cons_to_load = gurobi_cons_to_load.intersection( + set(self._solver_model.getQConstrs()) + ) linear_vals = self._solver_model.getAttr("Slack", linear_cons_to_load) if self._version_major >= 5: - quadratic_vals = self._solver_model.getAttr("QCSlack", quadratic_cons_to_load) + quadratic_vals = self._solver_model.getAttr( + "QCSlack", quadratic_cons_to_load + ) for gurobi_con, val in zip(linear_cons_to_load, linear_vals): pyomo_con = reverse_con_map[gurobi_con] diff --git a/pyomo/solvers/plugins/solvers/gurobi_persistent.py b/pyomo/solvers/plugins/solvers/gurobi_persistent.py index a9a94ee8a75..382cb7c4e6d 100644 --- a/pyomo/solvers/plugins/solvers/gurobi_persistent.py +++ b/pyomo/solvers/plugins/solvers/gurobi_persistent.py @@ -18,7 +18,9 @@ from pyomo.opt.base import SolverFactory -@SolverFactory.register('gurobi_persistent', doc='Persistent python interface to Gurobi') +@SolverFactory.register( + 'gurobi_persistent', doc='Persistent python interface to Gurobi' +) class GurobiPersistent(PersistentSolver, GurobiDirect): """ A class that provides a persistent interface to Gurobi. Direct solver interfaces do not use any file io. @@ -30,7 +32,7 @@ class GurobiPersistent(PersistentSolver, GurobiDirect): Keyword Arguments ----------------- model: ConcreteModel - Passing a model to the constructor is equivalent to calling the set_instance mehtod. + Passing a model to the constructor is equivalent to calling the set_instance method. type: str String indicating the class type of the solver instance. name: str @@ -54,7 +56,9 @@ def _remove_constraint(self, solver_con): if self._solver_model.getAttr('NumConstrs') == 0: self._update() else: - name = self._symbol_map.getSymbol(self._solver_con_to_pyomo_con_map[solver_con]) + name = self._symbol_map.getSymbol( + self._solver_con_to_pyomo_con_map[solver_con] + ) if self._solver_model.getConstrByName(name) is None: self._update() elif isinstance(solver_con, gurobipy.QConstr): @@ -74,7 +78,9 @@ def _remove_constraint(self, solver_con): except gurobipy.GurobiError: self._update() else: - raise ValueError('Unrecognized type for gurobi constraint: {0}'.format(type(solver_con))) + raise ValueError( + 'Unrecognized type for gurobi constraint: {0}'.format(type(solver_con)) + ) self._solver_model.remove(solver_con) self._needs_updated = True @@ -86,7 +92,9 @@ def _remove_var(self, solver_var): if self._solver_model.getAttr('NumVars') == 0: self._update() else: - name = self._symbol_map.getSymbol(self._solver_var_to_pyomo_var_map[solver_var]) + name = self._symbol_map.getSymbol( + self._solver_var_to_pyomo_var_map[solver_var] + ) if self._solver_model.getVarByName(name) is None: self._update() self._solver_model.remove(solver_var) @@ -109,12 +117,16 @@ def update_var(self, var): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if var.is_indexed(): + # if var.is_indexed(): # for child_var in var.values(): # self.update_var(child_var) # return if var not in self._pyomo_var_to_solver_var_map: - raise ValueError('The Var provided to update_var needs to be added first: {0}'.format(var)) + raise ValueError( + 'The Var provided to update_var needs to be added first: {0}'.format( + var + ) + ) gurobipy_var = self._pyomo_var_to_solver_var_map[var] vtype = self._gurobi_vtype_from_var(var) lb, ub = self._gurobi_lb_ub_from_var(var) @@ -159,12 +171,17 @@ def set_linear_constraint_attr(self, con, attr, val): See gurobi documentation for acceptable values. """ if attr in {'Sense', 'RHS', 'ConstrName'}: - raise ValueError('Linear constraint attr {0} cannot be set with' + - ' the set_linear_constraint_attr method. Please use' + - ' the remove_constraint and add_constraint methods.'.format(attr)) + raise ValueError( + 'Linear constraint attr {0} cannot be set with' + + ' the set_linear_constraint_attr method. Please use' + + ' the remove_constraint and add_constraint methods.'.format(attr) + ) if self._version_major < 7: - if (self._solver_model.getAttr('NumConstrs') == 0 or - self._solver_model.getConstrByName(self._symbol_map.getSymbol(con)) is None): + if ( + self._solver_model.getAttr('NumConstrs') == 0 + or self._solver_model.getConstrByName(self._symbol_map.getSymbol(con)) + is None + ): self._solver_model.update() self._pyomo_con_to_solver_con_map[con].setAttr(attr, val) self._needs_updated = True @@ -192,16 +209,23 @@ def set_var_attr(self, var, attr, val): See gurobi documentation for acceptable values. """ if attr in {'LB', 'UB', 'VType', 'VarName'}: - raise ValueError('Var attr {0} cannot be set with' + - ' the set_var_attr method. Please use' + - ' the update_var method.'.format(attr)) + raise ValueError( + 'Var attr {0} cannot be set with' + + ' the set_var_attr method. Please use' + + ' the update_var method.'.format(attr) + ) if attr == 'Obj': - raise ValueError('Var attr Obj cannot be set with' + - ' the set_var_attr method. Please use' + - ' the set_objective method.') + raise ValueError( + 'Var attr Obj cannot be set with' + + ' the set_var_attr method. Please use' + + ' the set_objective method.' + ) if self._version_major < 7: - if (self._solver_model.getAttr('NumVars') == 0 or - self._solver_model.getVarByName(self._symbol_map.getSymbol(var)) is None): + if ( + self._solver_model.getAttr('NumVars') == 0 + or self._solver_model.getVarByName(self._symbol_map.getSymbol(var)) + is None + ): self._solver_model.update() self._pyomo_var_to_solver_var_map[var].setAttr(attr, val) self._needs_updated = True @@ -445,7 +469,7 @@ def get_gurobi_param_info(self, param): Parameters ---------- param: str - The gurobi parameter to get info for. See Gurobi documenation for possible options. + The gurobi parameter to get info for. See Gurobi documentation for possible options. Returns ------- @@ -456,6 +480,7 @@ def get_gurobi_param_info(self, param): def _intermediate_callback(self): def f(gurobi_model, where): self._callback_func(self._pyomo_model, self, where) + return f def set_callback(self, func=None): @@ -483,7 +508,7 @@ def set_callback(self, func=None): & y \in \mathbb{Z} \end{array} - as an MILP using exteneded cutting planes in callbacks. + as an MILP using extended cutting planes in callbacks. .. testcode:: :skipif: not gurobipy_available @@ -551,30 +576,41 @@ def cbCut(self, con): raise ValueError('cbCut expected an active constraint.') if is_fixed(con.body): - raise ValueError('cbCut expected a non-trival constraint') + raise ValueError('cbCut expected a non-trivial constraint') - gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr(con.body, self._max_constraint_degree) + gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr( + con.body, self._max_constraint_degree + ) if con.has_lb(): if con.has_ub(): raise ValueError('Range constraints are not supported in cbCut.') if not is_fixed(con.lower): - raise ValueError('Lower bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Lower bound of constraint {0} is not constant.'.format(con) + ) if con.has_ub(): if not is_fixed(con.upper): - raise ValueError('Upper bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Upper bound of constraint {0} is not constant.'.format(con) + ) if con.equality: - self._solver_model.cbCut(lhs=gurobi_expr, sense=gurobipy.GRB.EQUAL, - rhs=value(con.lower)) + self._solver_model.cbCut( + lhs=gurobi_expr, sense=gurobipy.GRB.EQUAL, rhs=value(con.lower) + ) elif con.has_lb() and (value(con.lower) > -float('inf')): - self._solver_model.cbCut(lhs=gurobi_expr, sense=gurobipy.GRB.GREATER_EQUAL, - rhs=value(con.lower)) + self._solver_model.cbCut( + lhs=gurobi_expr, sense=gurobipy.GRB.GREATER_EQUAL, rhs=value(con.lower) + ) elif con.has_ub() and (value(con.upper) < float('inf')): - self._solver_model.cbCut(lhs=gurobi_expr, sense=gurobipy.GRB.LESS_EQUAL, - rhs=value(con.upper)) + self._solver_model.cbCut( + lhs=gurobi_expr, sense=gurobipy.GRB.LESS_EQUAL, rhs=value(con.upper) + ) else: - raise ValueError('Constraint does not have a lower or an upper bound {0} \n'.format(con)) + raise ValueError( + 'Constraint does not have a lower or an upper bound {0} \n'.format(con) + ) def cbGet(self, what): return self._solver_model.cbGet(what) @@ -618,30 +654,41 @@ def cbLazy(self, con): raise ValueError('cbLazy expected an active constraint.') if is_fixed(con.body): - raise ValueError('cbLazy expected a non-trival constraint') + raise ValueError('cbLazy expected a non-trivial constraint') - gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr(con.body, self._max_constraint_degree) + gurobi_expr, referenced_vars = self._get_expr_from_pyomo_expr( + con.body, self._max_constraint_degree + ) if con.has_lb(): if con.has_ub(): raise ValueError('Range constraints are not supported in cbLazy.') if not is_fixed(con.lower): - raise ValueError('Lower bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Lower bound of constraint {0} is not constant.'.format(con) + ) if con.has_ub(): if not is_fixed(con.upper): - raise ValueError('Upper bound of constraint {0} is not constant.'.format(con)) + raise ValueError( + 'Upper bound of constraint {0} is not constant.'.format(con) + ) if con.equality: - self._solver_model.cbLazy(lhs=gurobi_expr, sense=gurobipy.GRB.EQUAL, - rhs=value(con.lower)) + self._solver_model.cbLazy( + lhs=gurobi_expr, sense=gurobipy.GRB.EQUAL, rhs=value(con.lower) + ) elif con.has_lb() and (value(con.lower) > -float('inf')): - self._solver_model.cbLazy(lhs=gurobi_expr, sense=gurobipy.GRB.GREATER_EQUAL, - rhs=value(con.lower)) + self._solver_model.cbLazy( + lhs=gurobi_expr, sense=gurobipy.GRB.GREATER_EQUAL, rhs=value(con.lower) + ) elif con.has_ub() and (value(con.upper) < float('inf')): - self._solver_model.cbLazy(lhs=gurobi_expr, sense=gurobipy.GRB.LESS_EQUAL, - rhs=value(con.upper)) + self._solver_model.cbLazy( + lhs=gurobi_expr, sense=gurobipy.GRB.LESS_EQUAL, rhs=value(con.upper) + ) else: - raise ValueError('Constraint does not have a lower or an upper bound {0} \n'.format(con)) + raise ValueError( + 'Constraint does not have a lower or an upper bound {0} \n'.format(con) + ) def cbSetSolution(self, vars, solution): if not isinstance(vars, Iterable): @@ -656,9 +703,9 @@ def _add_column(self, var, obj_coef, constraints, coefficients): """Add a column to the solver's model This will add the Pyomo variable var to the solver's - model, and put the coefficients on the associated + model, and put the coefficients on the associated constraints in the solver model. If the obj_coef is - not zero, it will add obj_coef*var to the objective + not zero, it will add obj_coef*var to the objective of the solver's model. Parameters @@ -674,10 +721,16 @@ def _add_column(self, var, obj_coef, constraints, coefficients): vtype = self._gurobi_vtype_from_var(var) lb, ub = self._gurobi_lb_ub_from_var(var) - gurobipy_var = self._solver_model.addVar(obj=obj_coef, lb=lb, ub=ub, vtype=vtype, name=varname, - column=gurobipy.Column(coeffs=coefficients, constrs=constraints) ) + gurobipy_var = self._solver_model.addVar( + obj=obj_coef, + lb=lb, + ub=ub, + vtype=vtype, + name=varname, + column=gurobipy.Column(coeffs=coefficients, constrs=constraints), + ) - self._pyomo_var_to_solver_var_map[var] = gurobipy_var + self._pyomo_var_to_solver_var_map[var] = gurobipy_var self._solver_var_to_pyomo_var_map[gurobipy_var] = var self._referenced_variables[var] = len(coefficients) diff --git a/pyomo/solvers/plugins/solvers/mosek_direct.py b/pyomo/solvers/plugins/solvers/mosek_direct.py index abf8f6cc1f6..6a21e0fcb9b 100644 --- a/pyomo/solvers/plugins/solvers/mosek_direct.py +++ b/pyomo/solvers/plugins/solvers/mosek_direct.py @@ -16,6 +16,7 @@ import operator import pyomo.core.base.var import pyomo.core.base.constraint +from pyomo.common.dependencies import attempt_import from pyomo.common.tempfiles import TempfileManager from pyomo.core import is_fixed, value, minimize, maximize from pyomo.core.base.suffix import Suffix @@ -23,26 +24,41 @@ from pyomo.opt.base.solvers import OptSolver from pyomo.repn import generate_standard_repn from pyomo.solvers.plugins.solvers.direct_solver import DirectSolver -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import \ - DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) from pyomo.common.collections import ComponentMap, ComponentSet, Bunch from pyomo.opt import SolverFactory -from pyomo.core.kernel.conic import (_ConicBase, quadratic, rotated_quadratic, - primal_exponential, primal_power, - dual_exponential, dual_power) +from pyomo.core.kernel.conic import ( + _ConicBase, + quadratic, + rotated_quadratic, + primal_exponential, + primal_power, + primal_geomean, + dual_exponential, + dual_power, + dual_geomean, + svec_psdcone, +) from pyomo.opt.results.results_ import SolverResults from pyomo.opt.results.solution import Solution, SolutionStatus from pyomo.opt.results.solver import TerminationCondition, SolverStatus + logger = logging.getLogger('pyomo.solvers') inf = float('inf') -from itertools import accumulate, filterfalse +mosek, mosek_available = attempt_import('mosek') class DegreeError(ValueError): pass +class UnsupportedDomainError(TypeError): + pass + + def _is_numeric(x): try: float(x) @@ -54,7 +70,9 @@ def _is_numeric(x): @SolverFactory.register('mosek', doc='The MOSEK LP/QP/SOCP/MIP solver') class MOSEK(OptSolver): """ - The MOSEK LP/QP/SOCP/MIP solver + The MOSEK solver for continuous/mixed-integer linear, quadratic and conic + (quadratic, exponential, power cones) problems. MOSEK also supports + continuous SDPs. """ def __new__(cls, *args, **kwds): @@ -62,18 +80,15 @@ def __new__(cls, *args, **kwds): if mode in {'python', 'direct'}: opt = SolverFactory('mosek_direct', **kwds) if opt is None: - logger.error( - 'MOSEK\'s Optimizer API for python is not installed.') + logger.error('MOSEK\'s Optimizer API for python is not installed.') return opt if mode == 'persistent': opt = SolverFactory('mosek_persistent', **kwds) if opt is None: - logger.error( - 'MOSEK\'s Optimizer API for python is not installed.') + logger.error('MOSEK\'s Optimizer API for python is not installed.') return opt else: - logger.error( - 'Unknown solver interface: \"{}\"'.format(mode)) + logger.error('Unknown solver interface: \"{}\"'.format(mode)) return None @@ -81,8 +96,7 @@ def __new__(cls, *args, **kwds): class MOSEKDirect(DirectSolver): """ A class to provide a direct interface between pyomo and MOSEK's Optimizer API. - Due to direct python bindings interacting with each other, there is no need for - file IO. + Direct python bindings eliminate any need for file IO. """ def __init__(self, **kwds): @@ -91,10 +105,9 @@ def __init__(self, **kwds): self._pyomo_cone_to_solver_cone_map = dict() self._solver_cone_to_pyomo_cone_map = ComponentMap() self._name = None + self._mosek_env = None try: - import mosek - self._mosek = mosek - self._mosek_env = self._mosek.Env() + self._mosek_env = mosek.Env() self._python_api_exists = True self._version = self._mosek_env.getversion() self._name = "MOSEK " + ".".join(str(i) for i in self._version) @@ -123,13 +136,12 @@ def license_is_valid(self): Runs a check for a valid MOSEK license. Returns False if MOSEK fails to run on a trivial test case. """ - try: - import mosek - except ImportError: + if not mosek_available: return False try: - mosek.Env().checkoutlicense(mosek.feature.pton) - mosek.Env().checkinlicense(mosek.feature.pton) + with mosek.Env() as env: + env.checkoutlicense(mosek.feature.pton) + env.checkinlicense(mosek.feature.pton) except mosek.Error: return False return True @@ -138,11 +150,12 @@ def _apply_solver(self): StaleFlagManager.mark_all_as_stale() if self._tee: + def _process_stream(msg): sys.stdout.write(msg) sys.stdout.flush() - self._solver_model.set_Stream( - self._mosek.streamtype.log, _process_stream) + + self._solver_model.set_Stream(mosek.streamtype.log, _process_stream) if self._keepfiles: logger.info("Solver log file: {}".format(self._log_file)) @@ -152,7 +165,7 @@ def _process_stream(msg): param = key.split('.') if param[0] == 'mosek': param.pop(0) - param = getattr(self._mosek, param[0])(param[1]) + param = getattr(mosek, param[0])(param[1]) if 'sparam' in key.split('.'): self._solver_model.putstrparam(param, option) elif 'dparam' in key.split('.'): @@ -162,15 +175,20 @@ def _process_stream(msg): option = option.split('.') if option[0] == 'mosek': option.pop('mosek') - option = getattr(self._mosek, option[0])(option[1]) + option = getattr(mosek, option[0])(option[1]) else: self._solver_model.putintparam(param, option) except (TypeError, AttributeError): raise try: self._termcode = self._solver_model.optimize() - self._solver_model.solutionsummary(self._mosek.streamtype.msg) - except self._mosek.Error as e: + self._solver_model.solutionsummary(mosek.streamtype.msg) + except mosek.Error as e: + # MOSEK is not good about releasing licenses when an + # exception is raised during optimize(). We will explicitly + # release all licenses to prevent (among other things) a + # "license leak" during testing with expected failures. + self._mosek_env.checkinall() logger.error(e) raise return Bunch(rc=None, log=None) @@ -180,51 +198,103 @@ def _set_instance(self, model, kwds={}): super(MOSEKDirect, self)._set_instance(model, kwds) self._pyomo_cone_to_solver_cone_map = dict() self._solver_cone_to_pyomo_cone_map = ComponentMap() - self._whichsol = getattr(self._mosek.soltype, kwds.pop( - 'soltype', 'bas')) + self._whichsol = getattr(mosek.soltype, kwds.pop('soltype', 'bas')) try: - self._solver_model = self._mosek.Env().Task() + self._solver_model = self._mosek_env.Task() except: err_msg = sys.exc_info()[1] - logger.error("MOSEK task creation failed. " - + "Reason: {}".format(err_msg)) + logger.error("MOSEK task creation failed. " + "Reason: {}".format(err_msg)) raise self._add_block(model) def _get_cone_data(self, con): cone_type, cone_param, cone_members = None, 0, None if isinstance(con, quadratic): - cone_type = self._mosek.conetype.quad + cone_type = mosek.conetype.quad cone_members = [con.r] + list(con.x) elif isinstance(con, rotated_quadratic): - cone_type = self._mosek.conetype.rquad + cone_type = mosek.conetype.rquad cone_members = [con.r1, con.r2] + list(con.x) - elif self._version[0] >= 9: + elif self._version[0] == 9: if isinstance(con, primal_exponential): - cone_type = self._mosek.conetype.pexp + cone_type = mosek.conetype.pexp cone_members = [con.r, con.x1, con.x2] elif isinstance(con, primal_power): - cone_type = self._mosek.conetype.ppow + cone_type = mosek.conetype.ppow cone_param = value(con.alpha) cone_members = [con.r1, con.r2] + list(con.x) elif isinstance(con, dual_exponential): - cone_type = self._mosek.conetype.dexp + cone_type = mosek.conetype.dexp cone_members = [con.r, con.x1, con.x2] elif isinstance(con, dual_power): - cone_type = self._mosek.conetype.dpow + cone_type = mosek.conetype.dpow cone_param = value(con.alpha) cone_members = [con.r1, con.r2] + list(con.x) - return(cone_type, cone_param, ComponentSet(cone_members)) + else: + raise UnsupportedDomainError( + "MOSEK version 9 does not support {}.".format(type(con)) + ) + else: + raise UnsupportedDomainError( + "MOSEK version {} does not support {}".format( + self._version[0], type(con) + ) + ) + return (cone_type, cone_param, ComponentSet(cone_members)) + + def _get_acc_domain(self, cone): + domidx, domdim, members = None, 0, None + if isinstance(cone, quadratic): + domdim = 1 + len(cone.x) + domidx = self._solver_model.appendquadraticconedomain(domdim) + members = [cone.r] + list(cone.x) + elif isinstance(cone, rotated_quadratic): + domdim = 2 + len(cone.x) + domidx = self._solver_model.appendrquadraticconedomain(domdim) + members = [cone.r1, cone.r2] + list(cone.x) + elif isinstance(cone, primal_exponential): + domdim = 3 + domidx = self._solver_model.appendprimalexpconedomain() + members = [cone.r, cone.x1, cone.x2] + elif isinstance(cone, dual_exponential): + domdim = 3 + domidx = self._solver_model.appenddualexpconedomain() + members = [cone.r, cone.x1, cone.x2] + elif isinstance(cone, primal_power): + domdim = 2 + len(cone.x) + domidx = self._solver_model.appendprimalpowerconedomain( + domdim, [value(cone.alpha), 1 - value(cone.alpha)] + ) + members = [cone.r1, cone.r2] + list(cone.x) + elif isinstance(cone, dual_power): + domdim = 2 + len(cone.x) + domidx = self._solver_model.appenddualpowerconedomain( + domdim, [value(cone.alpha), 1 - value(cone.alpha)] + ) + members = [cone.r1, cone.r2] + list(cone.x) + elif isinstance(cone, primal_geomean): + domdim = len(cone.r) + 1 + domidx = self._solver_model.appendprimalgeomeanconedomain(domdim) + members = list(cone.r) + [cone.x] + elif isinstance(cone, dual_geomean): + domdim = len(cone.r) + 1 + domidx = self._solver_model.appenddualgeomeanconedomain(domdim) + members = list(cone.r) + [cone.x] + elif isinstance(cone, svec_psdcone): + domdim = len(cone.x) + domidx = self._solver_model.appendsvecpsdconedomain(domdim) + members = list(cone.x) + return (domdim, domidx, members) def _get_expr_from_pyomo_repn(self, repn, max_degree=2): degree = repn.polynomial_degree() if (degree is None) or degree > max_degree: raise DegreeError( - 'MOSEK does not support expressions of degree {}.'.format(degree)) + 'MOSEK does not support expressions of degree {}.'.format(degree) + ) referenced_vars = ComponentSet(repn.linear_vars) - indices = tuple(self._pyomo_var_to_solver_var_map[i] - for i in repn.linear_vars) + indices = tuple(self._pyomo_var_to_solver_var_map[i] for i in repn.linear_vars) mosek_arow = (indices, tuple(repn.linear_coefs), repn.constant) if len(repn.quadratic_vars) == 0: @@ -233,20 +303,30 @@ def _get_expr_from_pyomo_repn(self, repn, max_degree=2): else: q_vars = itertools.chain.from_iterable(repn.quadratic_vars) referenced_vars.update(q_vars) - qsubi = tuple( - self._pyomo_var_to_solver_var_map[i] for i, j in repn.quadratic_vars) - qsubj = tuple( - self._pyomo_var_to_solver_var_map[j] for i, j in repn.quadratic_vars) - qvals = tuple(v * 2 if qsubi[i] is qsubj[i] else v - for i, v in enumerate(repn.quadratic_coefs)) - mosek_qexp = (qsubj, qsubi, qvals) + qsubi, qsubj = zip( + *[ + (i, j) + if self._pyomo_var_to_solver_var_map[i] + >= self._pyomo_var_to_solver_var_map[j] + else (j, i) + for i, j in repn.quadratic_vars + ] + ) + qsubi = tuple(self._pyomo_var_to_solver_var_map[i] for i in qsubi) + qsubj = tuple(self._pyomo_var_to_solver_var_map[j] for j in qsubj) + qvals = tuple( + v * 2 if qsubi[i] is qsubj[i] else v + for i, v in enumerate(repn.quadratic_coefs) + ) + mosek_qexp = (qsubi, qsubj, qvals) return mosek_arow, mosek_qexp, referenced_vars def _get_expr_from_pyomo_expr(self, expr, max_degree=2): repn = generate_standard_repn(expr, quadratic=(max_degree == 2)) try: mosek_arow, mosek_qexp, referenced_vars = self._get_expr_from_pyomo_repn( - repn, max_degree) + repn, max_degree + ) except DegreeError as e: msg = e.args[0] msg += '\nexpr: {}'.format(expr) @@ -256,20 +336,20 @@ def _get_expr_from_pyomo_expr(self, expr, max_degree=2): def _mosek_vartype_from_var(self, var): if var.is_integer(): - return self._mosek.variabletype.type_int - return self._mosek.variabletype.type_cont + return mosek.variabletype.type_int + return mosek.variabletype.type_cont def _mosek_bounds(self, lb, ub, fixed_bool): if fixed_bool: - return self._mosek.boundkey.fx + return mosek.boundkey.fx if lb == -inf: if ub == inf: - return self._mosek.boundkey.fr + return mosek.boundkey.fr else: - return self._mosek.boundkey.up + return mosek.boundkey.up elif ub == inf: - return self._mosek.boundkey.lo - return self._mosek.boundkey.ra + return mosek.boundkey.lo + return mosek.boundkey.ra def _add_var(self, var): self._add_vars((var,)) @@ -278,30 +358,79 @@ def _add_vars(self, var_seq): if not var_seq: return var_num = self._solver_model.getnumvar() - vnames = tuple(self._symbol_map.getSymbol( - v, self._labeler) for v in var_seq) + vnames = tuple(self._symbol_map.getSymbol(v, self._labeler) for v in var_seq) vtypes = tuple(map(self._mosek_vartype_from_var, var_seq)) - lbs = tuple( value(v) if v.fixed - else -inf if value(v.lb) is None - else value(v.lb) - for v in var_seq + lbs = tuple( + value(v) if v.fixed else -inf if value(v.lb) is None else value(v.lb) + for v in var_seq ) - ubs = tuple( value(v) if v.fixed - else inf if value(v.ub) is None - else value(v.ub) - for v in var_seq + ubs = tuple( + value(v) if v.fixed else inf if value(v.ub) is None else value(v.ub) + for v in var_seq ) fxs = tuple(v.is_fixed() for v in var_seq) bound_types = tuple(map(self._mosek_bounds, lbs, ubs, fxs)) self._solver_model.appendvars(len(var_seq)) - var_ids = range(var_num, - var_num + len(var_seq)) + var_ids = range(var_num, var_num + len(var_seq)) _vnames = tuple(map(self._solver_model.putvarname, var_ids, vnames)) self._solver_model.putvartypelist(var_ids, vtypes) self._solver_model.putvarboundlist(var_ids, bound_types, lbs, ubs) self._pyomo_var_to_solver_var_map.update(zip(var_seq, var_ids)) self._solver_var_to_pyomo_var_map.update(zip(var_ids, var_seq)) - self._referenced_variables.update(zip(var_seq, [0]*len(var_seq))) + self._referenced_variables.update(zip(var_seq, [0] * len(var_seq))) + + def _add_cones(self, cones, num_cones): + cone_names = tuple(self._symbol_map.getSymbol(c, self._labeler) for c in cones) + + # MOSEK v<10 : use "cones" + if self._version[0] < 10: + cone_num = self._solver_model.getnumcone() + cone_indices = range(cone_num, cone_num + num_cones) + cone_type, cone_param, cone_members = zip(*map(self._get_cone_data, cones)) + for i in range(num_cones): + members = tuple( + self._pyomo_var_to_solver_var_map[c_m] for c_m in cone_members[i] + ) + self._solver_model.appendcone(cone_type[i], cone_param[i], members) + self._solver_model.putconename(cone_indices[i], cone_names[i]) + self._pyomo_cone_to_solver_cone_map.update(zip(cones, cone_indices)) + self._solver_cone_to_pyomo_cone_map.update(zip(cone_indices, cones)) + + for i, c in enumerate(cones): + self._vars_referenced_by_con[c] = cone_members[i] + for v in cone_members[i]: + self._referenced_variables[v] += 1 + else: + # MOSEK v>=10 : use affine conic constraints (old cones are deprecated) + domain_dims, domain_indices, cone_members = zip( + *map(self._get_acc_domain, cones) + ) + total_dim = sum(domain_dims) + numafe = self._solver_model.getnumafe() + numacc = self._solver_model.getnumacc() + + members = tuple( + self._pyomo_var_to_solver_var_map[c_m] + for c_m in itertools.chain(*cone_members) + ) + afe_indices = tuple(range(numafe, numafe + total_dim)) + acc_indices = tuple(range(numacc, numacc + num_cones)) + + self._solver_model.appendafes(total_dim) + self._solver_model.putafefentrylist(afe_indices, members, [1] * total_dim) + self._solver_model.appendaccsseq( + domain_indices, total_dim, afe_indices[0], None + ) + + for name in cone_names: + self._solver_model.putaccname(numacc, name) + self._pyomo_cone_to_solver_cone_map.update(zip(cones, acc_indices)) + self._solver_cone_to_pyomo_cone_map.update(zip(acc_indices, cones)) + + for i, c in enumerate(cones): + self._vars_referenced_by_con[c] = cone_members[i] + for v in cone_members[i]: + self._referenced_variables[v] += 1 def _add_constraint(self, con): self._add_constraints((con,)) @@ -314,43 +443,53 @@ def _add_constraints(self, con_seq): logger.warning("Inactive constraints will be skipped.") con_seq = active_seq if self._skip_trivial_constraints: - con_seq = tuple(filter(is_fixed( - operator.attrgetter('body')), con_seq)) - - lq = tuple(filter(operator.attrgetter("_linear_canonical_form"), - con_seq)) - conic = tuple(filter(lambda x: isinstance(x, _ConicBase), con_seq)) - lq_ex = tuple(filterfalse(lambda x: isinstance( - x, _ConicBase) or (x._linear_canonical_form), con_seq)) + con_seq = tuple(filter(is_fixed(operator.attrgetter('body')), con_seq)) + + # Linear/Quadratic constraints + lq = tuple(filter(operator.attrgetter("_linear_canonical_form"), con_seq)) + lq_ex = tuple( + filter( + lambda x: not isinstance(x, _ConicBase) + and not (x._linear_canonical_form), + con_seq, + ) + ) lq_all = lq + lq_ex num_lq = len(lq) + len(lq_ex) - num_cones = len(conic) + if num_lq > 0: con_num = self._solver_model.getnumcon() - lq_data = [self._get_expr_from_pyomo_repn(c.canonical_form()) - for c in lq] - lq_data.extend( - self._get_expr_from_pyomo_expr(c.body) for c in lq_ex) + lq_data = [self._get_expr_from_pyomo_repn(c.canonical_form()) for c in lq] + lq_data.extend(self._get_expr_from_pyomo_expr(c.body) for c in lq_ex) arow, qexp, referenced_vars = zip(*lq_data) q_is, q_js, q_vals = zip(*qexp) l_ids, l_coefs, constants = zip(*arow) - lbs = tuple(-inf if value(lq_all[i].lower) is None else value( - lq_all[i].lower) - constants[i] for i in range(num_lq)) - ubs = tuple(inf if value(lq_all[i].upper) is None else value( - lq_all[i].upper) - constants[i] for i in range(num_lq)) + lbs = tuple( + -inf + if value(lq_all[i].lower) is None + else value(lq_all[i].lower) - constants[i] + for i in range(num_lq) + ) + ubs = tuple( + inf + if value(lq_all[i].upper) is None + else value(lq_all[i].upper) - constants[i] + for i in range(num_lq) + ) fxs = tuple(c.equality for c in lq_all) bound_types = tuple(map(self._mosek_bounds, lbs, ubs, fxs)) sub = range(con_num, con_num + num_lq) - sub_names = tuple(self._symbol_map.getSymbol(c, self._labeler) - for c in lq_all) - ptre = tuple(accumulate(list(map(len, l_ids)))) + sub_names = tuple( + self._symbol_map.getSymbol(c, self._labeler) for c in lq_all + ) + ptre = tuple(itertools.accumulate(list(map(len, l_ids)))) ptrb = (0,) + ptre[:-1] asubs = tuple(itertools.chain.from_iterable(l_ids)) avals = tuple(itertools.chain.from_iterable(l_coefs)) qcsubi = tuple(itertools.chain.from_iterable(q_is)) qcsubj = tuple(itertools.chain.from_iterable(q_js)) qcval = tuple(itertools.chain.from_iterable(q_vals)) - qcsubk = tuple(i for i in sub for j in range(len(q_is[i-con_num]))) + qcsubk = tuple(i for i in sub for j in range(len(q_is[i - con_num]))) self._solver_model.appendcons(num_lq) self._solver_model.putarowlist(sub, ptrb, ptre, asubs, avals) self._solver_model.putqcon(qcsubk, qcsubi, qcsubj, qcval) @@ -365,30 +504,11 @@ def _add_constraints(self, con_seq): for v in referenced_vars[i]: self._referenced_variables[v] += 1 + # Conic constraints + conic = tuple(filter(lambda x: isinstance(x, _ConicBase), con_seq)) + num_cones = len(conic) if num_cones > 0: - cone_num = self._solver_model.getnumcone() - cone_indices = range(cone_num, - cone_num + num_cones) - cone_names = tuple(self._symbol_map.getSymbol( - c, self._labeler) for c in conic) - cone_type, cone_param, cone_members = zip(*map( - self._get_cone_data, conic)) - for i in range(num_cones): - members = tuple(self._pyomo_var_to_solver_var_map[c_m] - for c_m in cone_members[i]) - self._solver_model.appendcone( - cone_type[i], cone_param[i], members) - self._solver_model.putconename( - cone_indices[i], cone_names[i]) - self._pyomo_cone_to_solver_cone_map.update( - zip(conic, cone_indices)) - self._solver_cone_to_pyomo_cone_map.update( - zip(cone_indices, conic)) - - for i, c in enumerate(conic): - self._vars_referenced_by_con[c] = cone_members[i] - for v in cone_members[i]: - self._referenced_variables[v] += 1 + self._add_cones(conic, num_cones) def _set_objective(self, obj): if self._objective is not None: @@ -401,14 +521,15 @@ def _set_objective(self, obj): raise ValueError('Cannot add inactive objective to solver.') if obj.sense == minimize: - self._solver_model.putobjsense(self._mosek.objsense.minimize) + self._solver_model.putobjsense(mosek.objsense.minimize) elif obj.sense == maximize: - self._solver_model.putobjsense(self._mosek.objsense.maximize) + self._solver_model.putobjsense(mosek.objsense.maximize) else: raise ValueError("Objective sense not recognized.") mosek_arow, mosek_qexp, referenced_vars = self._get_expr_from_pyomo_expr( - obj.expr, self._max_obj_degree) + obj.expr, self._max_obj_degree + ) for var in referenced_vars: self._referenced_variables[var] += 1 @@ -426,53 +547,68 @@ def _add_block(self, block): This will keep any existing model components intact. - Use this method when adding conic domains. The add_constraint method - is compatible with conic-constraints, not conic-domains. + Use this method when cones are passed as_domain. The add_constraint method + is compatible with regular cones, not when the as_domain method is used. Parameters ---------- block: Block (scalar Block or single _BlockData) """ - var_seq = tuple(block.component_data_objects( - ctype=pyomo.core.base.var.Var, - descend_into=True, active=True, - sort=True)) + var_seq = tuple( + block.component_data_objects( + ctype=pyomo.core.base.var.Var, descend_into=True, active=True, sort=True + ) + ) self._add_vars(var_seq) - for sub_block in block.block_data_objects(descend_into=True, - active=True): + for sub_block in block.block_data_objects(descend_into=True, active=True): con_list = [] for con in sub_block.component_data_objects( - ctype=pyomo.core.base.constraint.Constraint, - descend_into=False, - active=True, - sort=True): - if (not con.has_lb()) and \ - (not con.has_ub()): + ctype=pyomo.core.base.constraint.Constraint, + descend_into=False, + active=True, + sort=True, + ): + if (not con.has_lb()) and (not con.has_ub()): assert not con.equality continue # non-binding, so skip con_list.append(con) self._add_constraints(con_list) for con in sub_block.component_data_objects( - ctype=pyomo.core.base.sos.SOSConstraint, - descend_into=False, - active=True, - sort=True): + ctype=pyomo.core.base.sos.SOSConstraint, + descend_into=False, + active=True, + sort=True, + ): self._add_sos_constraint(con) obj_counter = 0 for obj in sub_block.component_data_objects( - ctype=pyomo.core.base.objective.Objective, - descend_into=False, - active=True): + ctype=pyomo.core.base.objective.Objective, + descend_into=False, + active=True, + ): obj_counter += 1 if obj_counter > 1: - raise ValueError("Solver interface does not " - "support multiple objectives.") + raise ValueError( + "Solver interface does not support multiple objectives." + ) self._set_objective(obj) - def _postsolve(self): + def _set_whichsol(self): + itr_soltypes = [ + mosek.problemtype.qo, + mosek.problemtype.qcqo, + mosek.problemtype.conic, + ] + if self._solver_model.getnumintvar() >= 1: + self._whichsol = mosek.soltype.itg + elif self._solver_model.getprobtype() in itr_soltypes: + self._whichsol = mosek.soltype.itr + elif self._solver_model.getprobtype() == mosek.problemtype.lo: + self._whichsol = mosek.soltype.bas + def _postsolve(self): extract_duals = False extract_slacks = False extract_reduced_costs = False @@ -489,25 +625,20 @@ def _postsolve(self): flag = True if not flag: raise RuntimeError( - "***MOSEK solver plugin cannot extract solution suffix = " - + suffix) + "***MOSEK solver plugin cannot extract solution suffix = " + suffix + ) msk_task = self._solver_model - msk = self._mosek - itr_soltypes = [msk.problemtype.qo, msk.problemtype.qcqo, - msk.problemtype.conic] + self._set_whichsol() - if (msk_task.getnumintvar() >= 1): - self._whichsol = msk.soltype.itg + if self._whichsol == mosek.soltype.itg: if extract_reduced_costs: logger.warning("Cannot get reduced costs for MIP.") if extract_duals: logger.warning("Cannot get duals for MIP.") extract_reduced_costs = False extract_duals = False - elif (msk_task.getprobtype() in itr_soltypes): - self._whichsol = msk.soltype.itr whichsol = self._whichsol sol_status = msk_task.getsolsta(whichsol) @@ -518,90 +649,114 @@ def _postsolve(self): self.results.solver.name = self._name self.results.solver.wallclock_time = msk_task.getdouinf( - msk.dinfitem.optimizer_time) + mosek.dinfitem.optimizer_time + ) SOLSTA_MAP = { - msk.solsta.unknown: 'unknown', - msk.solsta.optimal: 'optimal', - msk.solsta.prim_and_dual_feas: 'pd_feas', - msk.solsta.prim_feas: 'p_feas', - msk.solsta.dual_feas: 'd_feas', - msk.solsta.prim_infeas_cer: 'p_infeas', - msk.solsta.dual_infeas_cer: 'd_infeas', - msk.solsta.prim_illposed_cer: 'p_illposed', - msk.solsta.dual_illposed_cer: 'd_illposed', - msk.solsta.integer_optimal: 'optimal' + mosek.solsta.unknown: 'unknown', + mosek.solsta.optimal: 'optimal', + mosek.solsta.prim_and_dual_feas: 'pd_feas', + mosek.solsta.prim_feas: 'p_feas', + mosek.solsta.dual_feas: 'd_feas', + mosek.solsta.prim_infeas_cer: 'p_infeas', + mosek.solsta.dual_infeas_cer: 'd_infeas', + mosek.solsta.prim_illposed_cer: 'p_illposed', + mosek.solsta.dual_illposed_cer: 'd_illposed', + mosek.solsta.integer_optimal: 'optimal', } PROSTA_MAP = { - msk.prosta.unknown: 'unknown', - msk.prosta.prim_and_dual_feas: 'pd_feas', - msk.prosta.prim_feas: 'p_feas', - msk.prosta.dual_feas: 'd_feas', - msk.prosta.prim_infeas: 'p_infeas', - msk.prosta.dual_infeas: 'd_infeas', - msk.prosta.prim_and_dual_infeas: 'pd_infeas', - msk.prosta.ill_posed: 'illposed', - msk.prosta.prim_infeas_or_unbounded: 'p_inf_unb' + mosek.prosta.unknown: 'unknown', + mosek.prosta.prim_and_dual_feas: 'pd_feas', + mosek.prosta.prim_feas: 'p_feas', + mosek.prosta.dual_feas: 'd_feas', + mosek.prosta.prim_infeas: 'p_infeas', + mosek.prosta.dual_infeas: 'd_infeas', + mosek.prosta.prim_and_dual_infeas: 'pd_infeas', + mosek.prosta.ill_posed: 'illposed', + mosek.prosta.prim_infeas_or_unbounded: 'p_inf_unb', } if self._version[0] < 9: SOLSTA_OLD = { - msk.solsta.near_optimal: 'optimal', - msk.solsta.near_integer_optimal: 'optimal', - msk.solsta.near_prim_feas: 'p_feas', - msk.solsta.near_dual_feas: 'd_feas', - msk.solsta.near_prim_and_dual_feas: 'pd_feas', - msk.solsta.near_prim_infeas_cer: 'p_infeas', - msk.solsta.near_dual_infeas_cer: 'd_infeas' + mosek.solsta.near_optimal: 'optimal', + mosek.solsta.near_integer_optimal: 'optimal', + mosek.solsta.near_prim_feas: 'p_feas', + mosek.solsta.near_dual_feas: 'd_feas', + mosek.solsta.near_prim_and_dual_feas: 'pd_feas', + mosek.solsta.near_prim_infeas_cer: 'p_infeas', + mosek.solsta.near_dual_infeas_cer: 'd_infeas', } PROSTA_OLD = { - msk.prosta.near_prim_and_dual_feas: 'pd_feas', - msk.prosta.near_prim_feas: 'p_feas', - msk.prosta.near_dual_feas: 'd_feas' + mosek.prosta.near_prim_and_dual_feas: 'pd_feas', + mosek.prosta.near_prim_feas: 'p_feas', + mosek.prosta.near_dual_feas: 'd_feas', } SOLSTA_MAP.update(SOLSTA_OLD) PROSTA_MAP.update(PROSTA_OLD) - if self._termcode == msk.rescode.ok: + if self._termcode == mosek.rescode.ok: self.results.solver.status = SolverStatus.ok self.results.solver.termination_message = "" - elif self._termcode == msk.rescode.trm_max_iterations: + elif self._termcode == mosek.rescode.trm_max_iterations: self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "Optimizer terminated at the maximum number of iterations." - self.results.solver.termination_condition = TerminationCondition.maxIterations + self.results.solver.termination_message = ( + "Optimizer terminated at the maximum number of iterations." + ) + self.results.solver.termination_condition = ( + TerminationCondition.maxIterations + ) soln.status = SolutionStatus.stoppedByLimit - elif self._termcode == msk.rescode.trm_max_time: + elif self._termcode == mosek.rescode.trm_max_time: self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "Optimizer terminated at the maximum amount of time." - self.results.solver.termination_condition = TerminationCondition.maxTimeLimit + self.results.solver.termination_message = ( + "Optimizer terminated at the maximum amount of time." + ) + self.results.solver.termination_condition = ( + TerminationCondition.maxTimeLimit + ) soln.status = SolutionStatus.stoppedByLimit - elif self._termcode == msk.rescode.trm_user_callback: + elif self._termcode == mosek.rescode.trm_user_callback: self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimizer terminated due to the return of the "\ + self.results.solver.termination_message = ( + "Optimizer terminated due to the return of the " "user-defined callback function." - self.results.solver.termination_condition = TerminationCondition.userInterrupt + ) + self.results.solver.termination_condition = ( + TerminationCondition.userInterrupt + ) soln.status = SolutionStatus.unknown - elif self._termcode in [msk.rescode.trm_mio_num_relaxs, - msk.rescode.trm_mio_num_branches, - msk.rescode.trm_num_max_num_int_solutions]: + elif self._termcode in [ + mosek.rescode.trm_mio_num_relaxs, + mosek.rescode.trm_mio_num_branches, + mosek.rescode.trm_num_max_num_int_solutions, + ]: self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "The mixed-integer optimizer terminated as the maximum number "\ + self.results.solver.termination_message = ( + "The mixed-integer optimizer terminated as the maximum number " "of relaxations/branches/feasible solutions was reached." - self.results.solver.termination_condition = TerminationCondition.maxEvaluations + ) + self.results.solver.termination_condition = ( + TerminationCondition.maxEvaluations + ) soln.status = SolutionStatus.stoppedByLimit else: - self.results.solver.termination_message = " Optimization terminated with {} response code." \ + self.results.solver.termination_message = ( + " Optimization terminated with {} response code." "Check MOSEK response code documentation for more information.".format( - self._termcode) + self._termcode + ) + ) self.results.solver.termination_condition = TerminationCondition.unknown if SOLSTA_MAP[sol_status] == 'unknown': self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message += " The solution status is unknown." + self.results.solver.termination_message += ( + " The solution status is unknown." + ) self.results.solver.Message = self.results.solver.termination_message self.results.solver.termination_condition = TerminationCondition.unknown soln.status = SolutionStatus.unknown @@ -622,34 +777,46 @@ def _postsolve(self): elif PROSTA_MAP[pro_status] == 'pd_infeas': self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message += " Problem is primal and dual infeasible." + self.results.solver.termination_message += ( + " Problem is primal and dual infeasible." + ) self.results.solver.Message = self.results.solver.termination_message self.results.solver.termination_condition = TerminationCondition.infeasible soln.status = SolutionStatus.infeasible elif PROSTA_MAP[pro_status] == 'p_inf_unb': self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message += " Problem is either primal infeasible or unbounded."\ + self.results.solver.termination_message += ( + " Problem is either primal infeasible or unbounded." " This may happen for MIPs." + ) self.results.solver.Message = self.results.solver.termination_message - self.results.solver.termination_condition = TerminationCondition.infeasibleOrUnbounded + self.results.solver.termination_condition = ( + TerminationCondition.infeasibleOrUnbounded + ) soln.status = SolutionStatus.unsure if SOLSTA_MAP[sol_status] == 'optimal': self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message += " Model was solved to optimality and an optimal solution is available." + self.results.solver.termination_message += ( + " Model was solved to optimality and an optimal solution is available." + ) self.results.solver.termination_condition = TerminationCondition.optimal soln.status = SolutionStatus.optimal elif SOLSTA_MAP[sol_status] == 'pd_feas': self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message += " The solution is both primal and dual feasible." + self.results.solver.termination_message += ( + " The solution is both primal and dual feasible." + ) self.results.solver.termination_condition = TerminationCondition.feasible soln.status = SolutionStatus.feasible elif SOLSTA_MAP[sol_status] == 'p_feas': self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message += " The solution is primal feasible." + self.results.solver.termination_message += ( + " The solution is primal feasible." + ) self.results.solver.termination_condition = TerminationCondition.feasible soln.status = SolutionStatus.feasible @@ -661,74 +828,78 @@ def _postsolve(self): elif SOLSTA_MAP[sol_status] == 'd_infeas': self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message += " The solution is a certificate of dual infeasibility." + self.results.solver.termination_message += ( + " The solution is a certificate of dual infeasibility." + ) self.results.solver.Message = self.results.solver.termination_message self.results.solver.termination_condition = TerminationCondition.unbounded soln.status = SolutionStatus.infeasible elif SOLSTA_MAP[sol_status] == 'p_infeas': self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message += " The solution is a certificate of primal infeasibility." + self.results.solver.termination_message += ( + " The solution is a certificate of primal infeasibility." + ) self.results.solver.Message = self.results.solver.termination_message self.results.solver.termination_condition = TerminationCondition.infeasible soln.status = SolutionStatus.infeasible self.results.problem.name = msk_task.gettaskname() - if msk_task.getobjsense() == msk.objsense.minimize: + if msk_task.getobjsense() == mosek.objsense.minimize: self.results.problem.sense = minimize - elif msk_task.getobjsense() == msk.objsense.maximize: + elif msk_task.getobjsense() == mosek.objsense.maximize: self.results.problem.sense = maximize else: raise RuntimeError( - 'Unrecognized Mosek objective sense: {0}'.format(msk_task.getobjname())) + 'Unrecognized Mosek objective sense: {0}'.format(msk_task.getobjname()) + ) self.results.problem.upper_bound = None self.results.problem.lower_bound = None if msk_task.getnumintvar() == 0: try: - if msk_task.getobjsense() == msk.objsense.minimize: - self.results.problem.upper_bound = msk_task.getprimalobj( - whichsol) - self.results.problem.lower_bound = msk_task.getdualobj( - whichsol) - elif msk_task.getobjsense() == msk.objsense.maximize: - self.results.problem.upper_bound = msk_task.getprimalobj( - whichsol) - self.results.problem.lower_bound = msk_task.getdualobj( - whichsol) - - except (msk.MosekException, AttributeError): + if msk_task.getobjsense() == mosek.objsense.minimize: + self.results.problem.upper_bound = msk_task.getprimalobj(whichsol) + self.results.problem.lower_bound = msk_task.getdualobj(whichsol) + elif msk_task.getobjsense() == mosek.objsense.maximize: + self.results.problem.upper_bound = msk_task.getprimalobj(whichsol) + self.results.problem.lower_bound = msk_task.getdualobj(whichsol) + + except (mosek.MosekException, AttributeError): pass - elif msk_task.getobjsense() == msk.objsense.minimize: # minimizing + elif msk_task.getobjsense() == mosek.objsense.minimize: # minimizing try: - self.results.problem.upper_bound = msk_task.getprimalobj( - whichsol) - except (msk.MosekException, AttributeError): + self.results.problem.upper_bound = msk_task.getprimalobj(whichsol) + except (mosek.MosekException, AttributeError): pass try: self.results.problem.lower_bound = msk_task.getdouinf( - msk.dinfitem.mio_obj_bound) - except (msk.MosekException, AttributeError): + mosek.dinfitem.mio_obj_bound + ) + except (mosek.MosekException, AttributeError): pass - elif msk_task.getobjsense() == msk.objsense.maximize: # maximizing + elif msk_task.getobjsense() == mosek.objsense.maximize: # maximizing try: self.results.problem.upper_bound = msk_task.getdouinf( - msk.dinfitem.mio_obj_bound) - except (msk.MosekException, AttributeError): + mosek.dinfitem.mio_obj_bound + ) + except (mosek.MosekException, AttributeError): pass try: - self.results.problem.lower_bound = msk_task.getprimalobj( - whichsol) - except (msk.MosekException, AttributeError): + self.results.problem.lower_bound = msk_task.getprimalobj(whichsol) + except (mosek.MosekException, AttributeError): pass else: raise RuntimeError( - 'Unrecognized Mosek objective sense: {0}'.format(msk_task.getobjsense())) + 'Unrecognized Mosek objective sense: {0}'.format(msk_task.getobjsense()) + ) try: - soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound + soln.gap = ( + self.results.problem.upper_bound - self.results.problem.lower_bound + ) except TypeError: soln.gap = None @@ -736,14 +907,15 @@ def _postsolve(self): self.results.problem.number_of_nonzeros = msk_task.getnumanz() self.results.problem.number_of_variables = msk_task.getnumvar() self.results.problem.number_of_integer_variables = msk_task.getnumintvar() - self.results.problem.number_of_continuous_variables = msk_task.getnumvar() - \ - msk_task.getnumintvar() + self.results.problem.number_of_continuous_variables = ( + msk_task.getnumvar() - msk_task.getnumintvar() + ) self.results.problem.number_of_objectives = 1 self.results.problem.number_of_solutions = 1 if self._save_results: """ - This code in this if statement is only needed for backwards compatability. It is more efficient to set + This code in this if statement is only needed for backwards compatibility. It is more efficient to set _save_results to False and use load_vars, load_duals, etc. """ if self.results.problem.number_of_solutions > 0: @@ -751,8 +923,11 @@ def _postsolve(self): soln_constraints = soln.constraint mosek_vars = list(range(msk_task.getnumvar())) - mosek_vars = list(set(mosek_vars).intersection( - set(self._pyomo_var_to_solver_var_map.values()))) + mosek_vars = list( + set(mosek_vars).intersection( + set(self._pyomo_var_to_solver_var_map.values()) + ) + ) var_vals = [0.0] * len(mosek_vars) self._solver_model.getxx(whichsol, var_vals) names = list(map(msk_task.getvarname, mosek_vars)) @@ -763,9 +938,8 @@ def _postsolve(self): soln_variables[name] = {"Value": val} if extract_reduced_costs: - vals = [0.0]*len(mosek_vars) - msk_task.getreducedcosts( - whichsol, 0, len(mosek_vars), vals) + vals = [0.0] * len(mosek_vars) + msk_task.getreducedcosts(whichsol, 0, len(mosek_vars), vals) for mosek_var, val, name in zip(mosek_vars, vals, names): pyomo_var = self._solver_var_to_pyomo_var_map[mosek_var] if self._referenced_variables[pyomo_var] > 0: @@ -776,42 +950,83 @@ def _postsolve(self): con_names = list(map(msk_task.getconname, mosek_cons)) for name in con_names: soln_constraints[name] = {} - """TODO wrong length, needs to be getnumvars() - mosek_cones = list(range(msk_task.getnumcone())) - cone_names = [] - for cone in mosek_cones: - cone_names.append(msk_task.getconename(cone)) - for name in cone_names: - soln_constraints[name] = {} - """ + """using getnumcone, but for each cone, + pass the duals as a tuple of length = dim(cone)""" + if self._version[0] <= 9: + mosek_cones = list(range(msk_task.getnumcone())) + cone_names = [] + for cone in mosek_cones: + cone_names.append(msk_task.getconename(cone)) + for name in cone_names: + soln_constraints[name] = {} + else: + mosek_cones = list(range(msk_task.getnumacc())) + cone_names = [] + for cone in mosek_cones: + cone_names.append(msk_task.getaccname(cone)) + for name in cone_names: + soln_constraints[name] = {} if extract_duals: ncon = msk_task.getnumcon() if ncon > 0: - vals = [0.0]*ncon + vals = [0.0] * ncon msk_task.gety(whichsol, vals) for val, name in zip(vals, con_names): soln_constraints[name]["Dual"] = val - """TODO: wrong length, needs to be getnumvars() - ncone = msk_task.getnumcone() - if ncone > 0: - vals = [0.0]*ncone - msk_task.getsnx(whichsol, vals) - for val, name in zip(vals, cone_names): - soln_constraints[name]["Dual"] = val + """using getnumcone, but for each cone, + pass the duals as a tuple of length = dim(cone) """ + # MOSEK <= 9.3, i.e. variable cones + if self._version[0] <= 9: + ncone = msk_task.getnumcone() + if ncone > 0: + mosek_cones = list(range(ncone)) + cone_duals = list(range(msk_task.getnumvar())) + vals = [0] * len(cone_duals) + self._solver_model.getsnx(whichsol, vals) + for name, cone in zip(cone_names, mosek_cones): + dim = msk_task.getnumconemem(cone) + # Indices of cone members + members = [0] * dim + msk_task.getcone(cone, members) + # Save dual info + soln_constraints[name]["Dual"] = tuple( + vals[i] for i in members + ) + # MOSEK >= 10, i.e. affine conic constraints + else: + ncone = msk_task.getnumacc() + if ncone > 0: + mosek_cones = range(msk_task.getnumacc()) + cone_dims = [msk_task.getaccn(i) for i in mosek_cones] + vals = self._solver_model.getaccdotys(whichsol) + dim = 0 + for name, cone in zip(cone_names, mosek_cones): + soln_constraints[name]['Dual'] = tuple( + vals[dim : dim + cone_dims[cone]] + ) + dim += cone_dims[cone] if extract_slacks: - Ax = [0]*len(mosek_cons) + Ax = [0] * len(mosek_cons) msk_task.getxc(self._whichsol, Ax) for con, name in zip(mosek_cons, con_names): Us = Ls = 0 bk, lb, ub = msk_task.getconbound(con) - if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.up]: + if bk in [ + mosek.boundkey.fx, + mosek.boundkey.ra, + mosek.boundkey.up, + ]: Us = ub - Ax[con] - if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.lo]: + if bk in [ + mosek.boundkey.fx, + mosek.boundkey.ra, + mosek.boundkey.lo, + ]: Ls = Ax[con] - lb if Us > Ls: @@ -821,7 +1036,6 @@ def _postsolve(self): elif self._load_solutions: if self.results.problem.number_of_solutions > 0: - self.load_vars() if extract_reduced_costs: @@ -842,14 +1056,22 @@ def _postsolve(self): return DirectOrPersistentSolver._postsolve(self) def warm_start_capable(self): + # See #2613: enabling warmstart on MOSEK 10 breaks an MIQP test. + # return self.version() < (10, 0) return True def _warm_start(self): + self._set_whichsol() for pyomo_var, mosek_var in self._pyomo_var_to_solver_var_map.items(): if pyomo_var.value is not None: - for solType in self._mosek.soltype.values: - self._solver_model.putxxslice( - solType, mosek_var, mosek_var + 1, [(pyomo_var.value)]) + self._solver_model.putxxslice( + self._whichsol, mosek_var, mosek_var + 1, [(pyomo_var.value)] + ) + + if (self._version[0] > 9) & (self._whichsol == mosek.soltype.itg): + self._solver_model.putintparam( + mosek.iparam.mio_construct_sol, mosek.onoffkey.on + ) def _load_vars(self, vars_to_load=None): var_map = self._pyomo_var_to_solver_var_map @@ -875,9 +1097,10 @@ def _load_rc(self, vars_to_load=None): vars_to_load = var_map.keys() mosek_vars_to_load = [var_map[pyomo_var] for pyomo_var in vars_to_load] - vals = [0.0]*len(mosek_vars_to_load) + vals = [0.0] * len(mosek_vars_to_load) self._solver_model.getreducedcosts( - self._whichsol, 0, len(mosek_vars_to_load), vals) + self._whichsol, 0, len(mosek_vars_to_load), vals + ) for var, val in zip(vars_to_load, vals): if ref_vars[var] > 0: @@ -889,13 +1112,13 @@ def _load_duals(self, objs_to_load=None): con_map = self._pyomo_con_to_solver_con_map reverse_con_map = self._solver_con_to_pyomo_con_map cone_map = self._pyomo_cone_to_solver_cone_map - #reverse_cone_map = self._solver_cone_to_pyomo_cone_map + reverse_cone_map = self._solver_cone_to_pyomo_cone_map dual = self._pyomo_model.dual if objs_to_load is None: # constraints mosek_cons_to_load = range(self._solver_model.getnumcon()) - vals = [0.0]*len(mosek_cons_to_load) + vals = [0.0] * len(mosek_cons_to_load) self._solver_model.gety(self._whichsol, vals) for mosek_con, val in zip(mosek_cons_to_load, vals): pyomo_con = reverse_con_map[mosek_con] @@ -908,7 +1131,36 @@ def _load_duals(self, objs_to_load=None): for mosek_cone, val in zip(mosek_cones_to_load, vals): pyomo_cone = reverse_cone_map[mosek_cone] dual[pyomo_cone] = val + UPDATE: the following code gets the dual info from cones, + but each cones dual values are passed as lists """ + # cones (MOSEK <= 9) + if self._version[0] <= 9: + vals = [0.0] * self._solver_model.getnumvar() + self._solver_model.getsnx(self._whichsol, vals) + + for mosek_cone in range(self._solver_model.getnumcone()): + dim = self._solver_model.getnumconemem(mosek_cone) + # Indices of cone members + members = [0] * dim + self._solver_model.getcone(mosek_cone, members) + # Save dual info + pyomo_cone = reverse_cone_map[mosek_cone] + dual[pyomo_cone] = tuple(vals[i] for i in members) + # cones (MOSEK >= 10, i.e. affine conic constraints) + else: + mosek_cones_to_load = range(self._solver_model.getnumacc()) + mosek_cone_dims = [ + self._solver_model.getaccn(i) for i in mosek_cones_to_load + ] + vals = self._solver_model.getaccdotys(self._whichsol) + dim = 0 + for mosek_cone in mosek_cones_to_load: + pyomo_cone = reverse_cone_map[mosek_cone] + dual[pyomo_cone] = tuple( + vals[dim : dim + mosek_cone_dims[mosek_cone]] + ) + dim += mosek_cone_dims[mosek_cone] else: mosek_cons_to_load = [] mosek_cones_to_load = [] @@ -919,19 +1171,19 @@ def _load_duals(self, objs_to_load=None): # assume it is a cone mosek_cones_to_load.append(cone_map[obj]) # constraints - mosek_cons_first = min(mosek_cons_to_load) - mosek_cons_last = max(mosek_cons_to_load) - vals = [0.0]*(mosek_cons_last - mosek_cons_first + 1) - self._solver_model.getyslice(self._whichsol, - mosek_cons_first, - mosek_cons_last, - vals) - for mosek_con in mosek_cons_to_load: - slice_index = mosek_con - mosek_cons_first - val = vals[slice_index] - pyomo_con = reverse_con_map[mosek_con] - dual[pyomo_con] = val - """TODO wrong length, needs to be getnumvars() + if len(mosek_cons_to_load) > 0: + mosek_cons_first = min(mosek_cons_to_load) + mosek_cons_last = max(mosek_cons_to_load) + vals = [0.0] * (mosek_cons_last - mosek_cons_first + 1) + self._solver_model.getyslice( + self._whichsol, mosek_cons_first, mosek_cons_last, vals + ) + for mosek_con in mosek_cons_to_load: + slice_index = mosek_con - mosek_cons_first + val = vals[slice_index] + pyomo_con = reverse_con_map[mosek_con] + dual[pyomo_con] = val + """TODO wrong length, needs to be getnumvars() # cones mosek_cones_first = min(mosek_cones_to_load) mosek_cones_last = max(mosek_cones_to_load) @@ -946,6 +1198,24 @@ def _load_duals(self, objs_to_load=None): pyomo_cone = reverse_cone_map[mosek_cone] dual[pyomo_cone] = val """ + # cones (MOSEK <= 9) + if len(mosek_cones_to_load) > 0: + if self._version[0] <= 9: + vals = [0] * self._solver_model.getnumvar() + self._solver_model.getsnx(self._whichsol, vals) + for mosek_cone in mosek_cones_to_load: + dim = self._solver_model.getnumconemem(mosek_cone) + members = [0] * dim + self._solver_model.getcone(mosek_cone, members) + pyomo_cone = reverse_cone_map[mosek_cone] + dual[pyomo_cone] = tuple(vals[i] for i in members) + # cones (MOSEK >= 10, i.e. affine conic constraints) + else: + for mosek_cone in mosek_cones_to_load: + pyomo_cone = reverse_cone_map[mosek_cone] + dual[pyomo_cone] = tuple( + self._solver_model.getaccdoty(self._whichsol, mosek_cone) + ) def _load_slacks(self, cons_to_load=None): if not hasattr(self._pyomo_model, 'slack'): @@ -953,15 +1223,13 @@ def _load_slacks(self, cons_to_load=None): con_map = self._pyomo_con_to_solver_con_map reverse_con_map = self._solver_con_to_pyomo_con_map slack = self._pyomo_model.slack - msk = self._mosek if cons_to_load is None: mosek_cons_to_load = range(self._solver_model.getnumcon()) else: - mosek_cons_to_load = set([con_map[pyomo_con] - for pyomo_con in cons_to_load]) + mosek_cons_to_load = set([con_map[pyomo_con] for pyomo_con in cons_to_load]) - Ax = [0]*len(mosek_cons_to_load) + Ax = [0] * len(mosek_cons_to_load) self._solver_model.getxc(self._whichsol, Ax) for con in mosek_cons_to_load: pyomo_con = reverse_con_map[con] @@ -969,9 +1237,9 @@ def _load_slacks(self, cons_to_load=None): bk, lb, ub = self._solver_model.getconbound(con) - if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.up]: + if bk in [mosek.boundkey.fx, mosek.boundkey.ra, mosek.boundkey.up]: Us = ub - Ax[con] - if bk in [msk.boundkey.fx, msk.boundkey.ra, msk.boundkey.lo]: + if bk in [mosek.boundkey.fx, mosek.boundkey.ra, mosek.boundkey.lo]: Ls = Ax[con] - lb if Us > Ls: diff --git a/pyomo/solvers/plugins/solvers/mosek_persistent.py b/pyomo/solvers/plugins/solvers/mosek_persistent.py index b0ec512ec57..4e2aa97b379 100644 --- a/pyomo/solvers/plugins/solvers/mosek_persistent.py +++ b/pyomo/solvers/plugins/solvers/mosek_persistent.py @@ -20,8 +20,9 @@ from pyomo.solvers.plugins.solvers.mosek_direct import MOSEKDirect from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver from pyomo.solvers.plugins.solvers.direct_solver import DirectSolver -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import \ - DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) from pyomo.opt.base import SolverFactory from pyomo.core.kernel.conic import _ConicBase from pyomo.core.kernel.block import block @@ -31,11 +32,11 @@ class MOSEKPersistent(PersistentSolver, MOSEKDirect): """ This class provides a persistent interface between pyomo and MOSEK's Optimizer API. - As a child to the MOSEKDirect class, this interface does not need any file IO. - Furthermore, the persistent interface preserves the MOSEK task object, allowing + As a child to the MOSEKDirect class, this interface does not need any file IO. + Furthermore, the persistent interface preserves the MOSEK task object, allowing users to make incremental changes (such as removing variables/constraints, modifying - variables, adding columns etc.) to their models. Note that users are responsible for - informing the persistent interface of any incremental change. For instance, if a new + variables, adding columns etc.) to their models. Note that users are responsible for + informing the persistent interface of any incremental change. For instance, if a new variable is defined, then it would need to be added explicitly by calling the add_var method, before the solver knows of its existence. Keyword Arguments @@ -79,7 +80,7 @@ def add_constraints(self, con_seq): This will keep any existing model components intact. - NOTE: If this method is used to add cones, then the cones should be + NOTE: If this method is used to add cones, then the cones should be passed as constraints. Use the add_block method for conic_domains. Parameters @@ -118,16 +119,18 @@ def remove_vars(self, *solver_vars): except KeyError: v_name = self._symbol_map.getSymbol(v, self._labeler) raise ValueError( - "Variable {} needs to be added before removal.".format(v_name)) + "Variable {} needs to be added before removal.".format(v_name) + ) var_num = self._solver_model.getnumvar() for i, v in enumerate(self._pyomo_var_to_solver_var_map): self._pyomo_var_to_solver_var_map[v] = i - self._solver_var_to_pyomo_var_map = dict(zip(( - range(var_num)), self._pyomo_var_to_solver_var_map.keys())) + self._solver_var_to_pyomo_var_map = dict( + zip((range(var_num)), self._pyomo_var_to_solver_var_map.keys()) + ) def remove_constraint(self, solver_con): """ - Remove a single constraint from the model as well as the MOSEK task. + Remove a single constraint from the model as well as the MOSEK task. This will keep any other model components intact. @@ -141,7 +144,7 @@ def remove_constraint(self, solver_con): def remove_constraints(self, *solver_cons): """ Remove multiple constraints from the model as well as the MOSEK task in one - method call. + method call. This will keep any other model components intact. To remove conic-domains, use the remove_block method. @@ -150,10 +153,10 @@ def remove_constraints(self, *solver_cons): ---------- *solver_cons: Constraint (scalar Constraint or single _ConstraintData) """ - lq_cons = tuple(itertools.filterfalse( - lambda x: isinstance(x, _ConicBase), solver_cons)) - cone_cons = tuple( - filter(lambda x: isinstance(x, _ConicBase), solver_cons)) + lq_cons = tuple( + itertools.filterfalse(lambda x: isinstance(x, _ConicBase), solver_cons) + ) + cone_cons = tuple(filter(lambda x: isinstance(x, _ConicBase), solver_cons)) try: lq = [] cones = [] @@ -172,11 +175,14 @@ def remove_constraints(self, *solver_cons): except KeyError: c_name = self._symbol_map.getSymbol(c, self._labeler) raise ValueError( - "Constraint/Cone {} needs to be added before removal.".format(c_name)) - self._solver_con_to_pyomo_con_map = dict(zip( - range(lq_num), self._pyomo_con_to_solver_con_map.keys())) - self._solver_cone_to_pyomo_cone_map = dict(zip( - range(cone_num), self._pyomo_cone_to_solver_cone_map.keys())) + "Constraint/Cone {} needs to be added before removal.".format(c_name) + ) + self._solver_con_to_pyomo_con_map = dict( + zip(range(lq_num), self._pyomo_con_to_solver_con_map.keys()) + ) + self._solver_cone_to_pyomo_cone_map = dict( + zip(range(cone_num), self._pyomo_cone_to_solver_cone_map.keys()) + ) for i, c in enumerate(self._pyomo_con_to_solver_con_map): self._pyomo_con_to_solver_con_map[c] = i for i, c in enumerate(self._pyomo_cone_to_solver_cone_map): @@ -206,40 +212,46 @@ def update_vars(self, *solver_vars): for v in solver_vars: var_ids.append(self._pyomo_var_to_solver_var_map[v]) vtypes = tuple(map(self._mosek_vartype_from_var, solver_vars)) - lbs = tuple( value(v) if v.fixed - else -float('inf') if value(v.lb) is None - else value(v.lb) - for v in solver_vars + lbs = tuple( + value(v) + if v.fixed + else -float('inf') + if value(v.lb) is None + else value(v.lb) + for v in solver_vars ) - ubs = tuple( value(v) if v.fixed - else float('inf') if value(v.ub) is None - else value(v.ub) - for v in solver_vars + ubs = tuple( + value(v) + if v.fixed + else float('inf') + if value(v.ub) is None + else value(v.ub) + for v in solver_vars ) fxs = tuple(v.is_fixed() for v in solver_vars) bound_types = tuple(map(self._mosek_bounds, lbs, ubs, fxs)) self._solver_model.putvartypelist(var_ids, vtypes) self._solver_model.putvarboundlist(var_ids, bound_types, lbs, ubs) except KeyError: - print(v.name) v_name = self._symbol_map.getSymbol(v, self._labeler) raise ValueError( "Variable {} needs to be added before it can be modified.".format( - v_name)) + v_name + ) + ) def _add_column(self, var, obj_coef, constraints, coefficients): self.add_var(var) var_num = self._solver_model.getnumvar() - self._solver_model.putcj(var_num-1, obj_coef) - self._solver_model.putacol( - var_num-1, constraints, coefficients) + self._solver_model.putcj(var_num - 1, obj_coef) + self._solver_model.putacol(var_num - 1, constraints, coefficients) self._referenced_variables[var] = len(constraints) def write(self, filename): """ Write the model to a file. MOSEK can write files in various popular formats such as: lp, mps, ptf, cbf etc. - In addition to the file formats mentioned above, MOSEK can + In addition to the file formats mentioned above, MOSEK can also write files to native formats such as : opf, task and jtask. The task format is binary, and is the preferred format for sharing with the MOSEK staff in case of queries, since it saves diff --git a/pyomo/solvers/plugins/solvers/persistent_solver.py b/pyomo/solvers/plugins/solvers/persistent_solver.py index be8e8b6611b..34df4e4b454 100644 --- a/pyomo/solvers/plugins/solvers/persistent_solver.py +++ b/pyomo/solvers/plugins/solvers/persistent_solver.py @@ -9,7 +9,9 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) from pyomo.core.base.block import _BlockData from pyomo.core.kernel.block import IBlock from pyomo.core.base.suffix import active_import_suffix_generator @@ -28,6 +30,7 @@ logger = logging.getLogger('pyomo.solvers') + def _convert_to_const(val): if val.__class__ in native_numeric_types: return val @@ -36,6 +39,7 @@ def _convert_to_const(val): else: return value(val) + class PersistentSolver(DirectOrPersistentSolver): """ A base class for persistent solvers. Direct solver interfaces do not use any file io. @@ -100,7 +104,7 @@ def add_block(self, block): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if block.is_indexed(): + # if block.is_indexed(): # for sub_block in block.values(): # self._add_block(block) # return @@ -116,7 +120,9 @@ def set_objective(self, obj): obj: Objective """ if self._pyomo_model is None: - raise RuntimeError('You must call set_instance before calling set_objective.') + raise RuntimeError( + 'You must call set_instance before calling set_objective.' + ) return self._set_objective(obj) def add_constraint(self, con): @@ -130,14 +136,16 @@ def add_constraint(self, con): """ if self._pyomo_model is None: - raise RuntimeError('You must call set_instance before calling add_constraint.') + raise RuntimeError( + 'You must call set_instance before calling add_constraint.' + ) # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if con.is_indexed(): + # if con.is_indexed(): # for child_con in con.values(): # self._add_constraint(child_con) - #else: + # else: self._add_constraint(con) def add_var(self, var): @@ -157,10 +165,10 @@ def add_var(self, var): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if var.is_indexed(): + # if var.is_indexed(): # for child_var in var.values(): # self._add_var(child_var) - #else: + # else: self._add_var(var) def add_sos_constraint(self, con): @@ -174,23 +182,25 @@ def add_sos_constraint(self, con): """ if self._pyomo_model is None: - raise RuntimeError('You must call set_instance before calling add_sos_constraint.') + raise RuntimeError( + 'You must call set_instance before calling add_sos_constraint.' + ) # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if con.is_indexed(): + # if con.is_indexed(): # for child_con in con.values(): # self._add_sos_constraint(child_con) - #else: + # else: self._add_sos_constraint(con) def add_column(self, model, var, obj_coef, constraints, coefficients): """Add a column to the solver's and Pyomo model This will add the Pyomo variable var to the solver's - model, and put the coefficients on the associated + model, and put the coefficients on the associated constraints in the solver model. If the obj_coef is - not zero, it will add obj_coef*var to the objective + not zero, it will add obj_coef*var to the objective of both the Pyomo and solver's model. Parameters @@ -198,30 +208,37 @@ def add_column(self, model, var, obj_coef, constraints, coefficients): model: pyomo ConcreteModel to which the column will be added var: Var (scalar Var or single _VarData) obj_coef: float, pyo.Param - constraints: list of scalar Constraints of single _ConstraintDatas + constraints: list of scalar Constraints of single _ConstraintDatas coefficients: list of the coefficient to put on var in the associated constraint """ if self._pyomo_model is None: raise RuntimeError('You must call set_instance before calling add_column.') if id(self._pyomo_model) != id(model): - raise RuntimeError('The pyomo model which the column is being added to ' - 'must be the same as the pyomo model attached to this ' - 'PersistentSolver instance; i.e., the same pyomo model ' - 'used in set_instance.') + raise RuntimeError( + 'The pyomo model which the column is being added to ' + 'must be the same as the pyomo model attached to this ' + 'PersistentSolver instance; i.e., the same pyomo model ' + 'used in set_instance.' + ) if id(self._pyomo_model) != id(var.model()): raise RuntimeError('The pyomo var must be attached to the solver model') if var in self._pyomo_var_to_solver_var_map: - raise RuntimeError('The pyomo var must not have been already added to ' - 'the solver model') + raise RuntimeError( + 'The pyomo var must not have been already added to the solver model' + ) if len(constraints) != len(coefficients): - raise RuntimeError('The list of constraints and the list of coefficents ' - 'be of equal length') + raise RuntimeError( + 'The list of constraints and the list of coefficients ' + 'be of equal length' + ) obj_coef, constraints, coefficients = self._add_and_collect_column_data( - var, obj_coef, constraints, coefficients) + var, obj_coef, constraints, coefficients + ) self._add_column(var, obj_coef, constraints, coefficients) """ This method should be implemented by subclasses.""" + def _add_column(self, var, obj_coef, constraints, coefficients): raise NotImplementedError('This method should be implemented by subclasses.') @@ -233,10 +250,10 @@ def _add_and_collect_column_data(self, var, obj_coef, constraints, coefficients) Returns the column and objective coefficient data to pass to the solver """ ## process the objective - if obj_coef.__class__ in native_numeric_types and obj_coef == 0.: - pass ## nothing to do + if obj_coef.__class__ in native_numeric_types and obj_coef == 0.0: + pass ## nothing to do else: - self._objective.expr += obj_coef*var + self._objective.expr += obj_coef * var self._vars_referenced_by_obj.add(var) obj_coef = _convert_to_const(obj_coef) @@ -244,8 +261,8 @@ def _add_and_collect_column_data(self, var, obj_coef, constraints, coefficients) ## column information coeff_list = list() constr_list = list() - for val,c in zip(coefficients,constraints): - c._body += val*var + for val, c in zip(coefficients, constraints): + c._body += val * var self._vars_referenced_by_con[c].add(var) cval = _convert_to_const(val) @@ -255,14 +272,17 @@ def _add_and_collect_column_data(self, var, obj_coef, constraints, coefficients) return obj_coef, constr_list, coeff_list """ This method should be implemented by subclasses.""" + def _remove_constraint(self, solver_con): raise NotImplementedError('This method should be implemented by subclasses.') """ This method should be implemented by subclasses.""" + def _remove_sos_constraint(self, solver_sos_con): raise NotImplementedError('This method should be implemented by subclasses.') """ This method should be implemented by subclasses.""" + def _remove_var(self, solver_var): raise NotImplementedError('This method should be implemented by subclasses.') @@ -281,18 +301,24 @@ def remove_block(self, block): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if block.is_indexed(): + # if block.is_indexed(): # for sub_block in block.values(): # self.remove_block(sub_block) # return for sub_block in block.block_data_objects(descend_into=True, active=True): - for con in sub_block.component_data_objects(ctype=Constraint, descend_into=False, active=True): + for con in sub_block.component_data_objects( + ctype=Constraint, descend_into=False, active=True + ): self.remove_constraint(con) - for con in sub_block.component_data_objects(ctype=SOSConstraint, descend_into=False, active=True): + for con in sub_block.component_data_objects( + ctype=SOSConstraint, descend_into=False, active=True + ): self.remove_sos_constraint(con) - for var in block.component_data_objects(ctype=Var, descend_into=True, active=True): + for var in block.component_data_objects( + ctype=Var, descend_into=True, active=True + ): self.remove_var(var) def remove_constraint(self, con): @@ -308,7 +334,7 @@ def remove_constraint(self, con): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if con.is_indexed(): + # if con.is_indexed(): # for child_con in con.values(): # self.remove_constraint(child_con) # return @@ -334,7 +360,7 @@ def remove_sos_constraint(self, con): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if con.is_indexed(): + # if con.is_indexed(): # for child_con in con.values(): # self.remove_sos_constraint(child_con) # return @@ -360,13 +386,17 @@ def remove_var(self, var): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if var.is_indexed(): + # if var.is_indexed(): # for child_var in var.values(): # self.remove_var(child_var) # return if self._referenced_variables[var] != 0: - raise ValueError('Cannot remove Var {0} because it is still referenced by the '.format(var) + - 'objective or one or more constraints') + raise ValueError( + 'Cannot remove Var {0} because it is still referenced by the '.format( + var + ) + + 'objective or one or more constraints' + ) solver_var = self._pyomo_var_to_solver_var_map[var] self._remove_var(solver_var) self._symbol_map.removeSymbol(var) @@ -375,6 +405,7 @@ def remove_var(self, var): del self._solver_var_to_pyomo_var_map[solver_var] """ This method should be implemented by subclasses.""" + def update_var(self, var): """ Update a variable in the solver's model. This will update bounds, fix/unfix the variable as needed, and update @@ -393,7 +424,7 @@ def solve(self, *args, **kwds): Keyword Arguments ----------------- suffixes: list of str - The strings should represnt suffixes support by the solver. Examples include 'dual', 'slack', and 'rc'. + The strings should represent suffixes support by the solver. Examples include 'dual', 'slack', and 'rc'. options: dict Dictionary of solver options. See the solver documentation for possible solver options. warmstart: bool @@ -416,21 +447,28 @@ def solve(self, *args, **kwds): if len(args) != 0: if self._pyomo_model is not args[0]: msg = 'The problem instance provided to the solve method is not the same as the instance provided' - msg += ' to the set_instance method in the persistent solver interface. ' + msg += ( + ' to the set_instance method in the persistent solver interface. ' + ) raise ValueError(msg) self.available(exception_flag=True) # Collect suffix names to try and import from solution. if isinstance(self._pyomo_model, _BlockData): - model_suffixes = list(name for (name, comp) in active_import_suffix_generator(self._pyomo_model)) + model_suffixes = list( + name + for (name, comp) in active_import_suffix_generator(self._pyomo_model) + ) else: assert isinstance(self._pyomo_model, IBlock) - model_suffixes = list(comp.storage_key for comp in - import_suffix_generator(self._pyomo_model, - active=True, - descend_into=False)) + model_suffixes = list( + comp.storage_key + for comp in import_suffix_generator( + self._pyomo_model, active=True, descend_into=False + ) + ) if len(model_suffixes) > 0: kwds_suffixes = kwds.setdefault('suffixes', []) @@ -450,9 +488,10 @@ def solve(self, *args, **kwds): self.options = Bunch() self.options.update(orig_options) self.options.update(kwds.pop('options', {})) - self.options.update(self._options_string_to_dict(kwds.pop('options_string', ''))) + self.options.update( + self._options_string_to_dict(kwds.pop('options_string', '')) + ) try: - # we're good to go. initial_time = time.time() @@ -460,7 +499,10 @@ def solve(self, *args, **kwds): presolve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for presolve" % (presolve_completion_time - initial_time)) + print( + " %6.2f seconds required for presolve" + % (presolve_completion_time - initial_time) + ) if self._pyomo_model is not None: self._initialize_callbacks(self._pyomo_model) @@ -472,25 +514,28 @@ def solve(self, *args, **kwds): logger.warning( "Solver (%s) did not return a solver status code.\n" "This is indicative of an internal solver plugin error.\n" - "Please report this to the Pyomo developers.") + "Please report this to the Pyomo developers." + ) elif _status.rc: logger.error( "Solver (%s) returned non-zero return code (%s)" - % (self.name, _status.rc,)) + % (self.name, _status.rc) + ) if self._tee: - logger.error( - "See the solver log above for diagnostic information.") + logger.error("See the solver log above for diagnostic information.") elif hasattr(_status, 'log') and _status.log: logger.error("Solver log:\n" + str(_status.log)) - raise ApplicationError( - "Solver (%s) did not exit normally" % self.name) + raise ApplicationError("Solver (%s) did not exit normally" % self.name) solve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for solver" % (solve_completion_time - presolve_completion_time)) + print( + " %6.2f seconds required for solver" + % (solve_completion_time - presolve_completion_time) + ) result = self._postsolve() # *********************************************************** - # The following code is only needed for backwards compatability of load_solutions=False. + # The following code is only needed for backwards compatibility of load_solutions=False. # If we ever only want to support the load_vars, load_duals, etc. methods, then this can be deleted. if self._save_results: result._smap_id = self._smap_id @@ -499,10 +544,12 @@ def solve(self, *args, **kwds): if _model: if isinstance(_model, IBlock): if len(result.solution) == 1: - result.solution(0).symbol_map = \ - getattr(_model, "._symbol_maps")[result._smap_id] - result.solution(0).default_variable_value = \ - self._default_variable_value + result.solution(0).symbol_map = getattr( + _model, "._symbol_maps" + )[result._smap_id] + result.solution( + 0 + ).default_variable_value = self._default_variable_value if self._load_solutions: _model.load_solution(result.solution(0)) else: @@ -513,15 +560,15 @@ def solve(self, *args, **kwds): assert len(getattr(_model, "._symbol_maps")) == 1 delattr(_model, "._symbol_maps") del result._smap_id - if self._load_solutions and \ - (len(result.solution) == 0): + if self._load_solutions and (len(result.solution) == 0): logger.error("No solution is available") else: if self._load_solutions: _model.solutions.load_from( result, select=self._select_index, - default_variable_value=self._default_variable_value) + default_variable_value=self._default_variable_value, + ) result._smap_id = None result.solution.clear() else: @@ -531,8 +578,10 @@ def solve(self, *args, **kwds): postsolve_completion_time = time.time() if self._report_timing: - print(" %6.2f seconds required for postsolve" % (postsolve_completion_time - - solve_completion_time)) + print( + " %6.2f seconds required for postsolve" + % (postsolve_completion_time - solve_completion_time) + ) finally: # diff --git a/pyomo/solvers/plugins/solvers/pywrapper.py b/pyomo/solvers/plugins/solvers/pywrapper.py index 839cd84594b..8f72e630a3d 100644 --- a/pyomo/solvers/plugins/solvers/pywrapper.py +++ b/pyomo/solvers/plugins/solvers/pywrapper.py @@ -19,22 +19,24 @@ @SolverFactory.register('py', doc='Direct python solver interfaces') class pywrapper(OptSolver): - """Direct python solver interface - """ + """Direct python solver interface""" def __new__(cls, *args, **kwds): mode = kwds.get('solver_io', 'python') if mode is None: mode = 'python' if mode != 'python': - logging.getLogger('pyomo.solvers').error("Cannot specify IO mode '%s' for direct python solver interface" % mode) + logging.getLogger('pyomo.solvers').error( + "Cannot specify IO mode '%s' for direct python solver interface" % mode + ) return None # if not 'solver' in kwds: - logging.getLogger('pyomo.solvers').warning("No solver specified for direct python solver interface") + logging.getLogger('pyomo.solvers').warning( + "No solver specified for direct python solver interface" + ) return None kwds['solver_io'] = 'python' solver = kwds['solver'] del kwds['solver'] return SolverFactory(solver, **kwds) - diff --git a/pyomo/solvers/plugins/solvers/xpress_direct.py b/pyomo/solvers/plugins/solvers/xpress_direct.py index 94eff3ffb34..aa5a4ba1b4e 100644 --- a/pyomo/solvers/plugins/solvers/xpress_direct.py +++ b/pyomo/solvers/plugins/solvers/xpress_direct.py @@ -25,7 +25,9 @@ from pyomo.core.staleflag import StaleFlagManager from pyomo.repn import generate_standard_repn from pyomo.solvers.plugins.solvers.direct_solver import DirectSolver -from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import DirectOrPersistentSolver +from pyomo.solvers.plugins.solvers.direct_or_persistent_solver import ( + DirectOrPersistentSolver, +) from pyomo.core.kernel.objective import minimize, maximize from pyomo.opt.results.results_ import SolverResults from pyomo.opt.results.solution import Solution, SolutionStatus @@ -37,26 +39,29 @@ logger = logging.getLogger('pyomo.solvers') + class DegreeError(ValueError): pass -def _is_convertable(conv_type,x): + +def _is_convertible(conv_type, x): try: conv_type(x) except ValueError: return False return True + def _print_message(xp_prob, _, msg, *args): if msg is not None: - sys.stdout.write(msg+'\n') + sys.stdout.write(msg + '\n') sys.stdout.flush() + def _finalize_xpress_import(xpress, avail): if not avail: return - XpressDirect._version = tuple( - int(k) for k in xpress.getversion().split('.')) + XpressDirect._version = tuple(int(k) for k in xpress.getversion().split('.')) XpressDirect._name = "Xpress %s.%s.%s" % XpressDirect._version # in versions prior to 34, xpress raised a RuntimeError, but # in more recent versions it raises a @@ -70,6 +75,7 @@ def _finalize_xpress_import(xpress, avail): if not hasattr(xpress, 'rng'): xpress.rng = xpress.range + class _xpress_importer_class(object): # We want to be able to *update* the message that the deferred # import generates using the stdout recorded during the actual @@ -96,6 +102,7 @@ def __call__(self): self.import_message += OUT.getvalue() return xpress + _xpress_importer = _xpress_importer_class() xpress, xpress_available = attempt_import( 'xpress', @@ -113,7 +120,6 @@ def __call__(self): @SolverFactory.register('xpress_direct', doc='Direct python interface to XPRESS') class XpressDirect(DirectSolver): - _name = None _version = None XpressException = RuntimeError @@ -141,7 +147,7 @@ def __init__(self, **kwds): # ourselves self._opt_time = None - # Note: Undefined capabilites default to None + # Note: Undefined capabilities default to None self._capabilities.linear = True self._capabilities.quadratic_objective = True self._capabilities.quadratic_constraint = True @@ -160,8 +166,8 @@ def available(self, exception_flag=True): if exception_flag and not xpress_available: xpress.log_import_warning(logger=__name__) raise ApplicationError( - "No Python bindings available for %s solver plugin" - % (type(self),)) + "No Python bindings available for %s solver plugin" % (type(self),) + ) return bool(xpress_available) def _apply_solver(self): @@ -169,7 +175,7 @@ def _apply_solver(self): self._solver_model.setlogfile(self._log_file) if self._keepfiles: - print("Solver log file: "+self._log_file) + print("Solver log file: " + self._log_file) # Setting a log file in xpress disables all output # in xpress versions less than 36. @@ -190,7 +196,7 @@ def _apply_solver(self): # get the xpress valid controls xp_controls = xpress.controls for key, option in self.options.items(): - if key == 'mipgap': # handled above + if key == 'mipgap': # handled above continue try: self._solver_model.setControl(key, option) @@ -199,13 +205,13 @@ def _apply_solver(self): # we'll wrap this in a function to raise the # xpress error contr_type = type(getattr(xp_controls, key)) - if not _is_convertable(contr_type, option): + if not _is_convertible(contr_type, option): raise self._solver_model.setControl(key, contr_type(option)) start_time = time.time() if self._tee: - self._solver_model.solve() + self._solve_model() else: # In xpress versions greater than or equal 36, # it seems difficult to completely suppress console @@ -213,7 +219,7 @@ def _apply_solver(self): # As a work around, we capature all screen output # when tee is False. with capture_output() as OUT: - self._solver_model.solve() + self._solve_model() self._opt_time = time.time() - start_time self._solver_model.setlogfile('') @@ -223,24 +229,355 @@ def _apply_solver(self): # FIXME: can we get a return code indicating if XPRESS had a significant failure? return Bunch(rc=None, log=None) + def _get_mip_results(self, results, soln): + """Sets up `results` and `soln` and returns whether there is a solution + to query. + Returns `True` if a feasible solution is available, `False` otherwise. + """ + xprob = self._solver_model + xp = xpress + xprob_attrs = xprob.attributes + status = xprob_attrs.mipstatus + mip_sols = xprob_attrs.mipsols + if status == xp.mip_not_loaded: + results.solver.status = SolverStatus.aborted + results.solver.termination_message = ( + "Model is not loaded; no solution information is available." + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.unknown + # no MIP solution, first LP did not solve, second LP did, third search started but incomplete + elif ( + status == xp.mip_lp_not_optimal + or status == xp.mip_lp_optimal + or status == xp.mip_no_sol_found + ): + results.solver.status = SolverStatus.aborted + results.solver.termination_message = ( + "Model is loaded, but no solution information is available." + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.unknown + elif status == xp.mip_solution: # some solution available + results.solver.status = SolverStatus.warning + results.solver.termination_message = ( + "Unable to satisfy optimality tolerances; a sub-optimal " + "solution is available." + ) + results.solver.termination_condition = TerminationCondition.other + soln.status = SolutionStatus.feasible + elif status == xp.mip_infeas: # MIP proven infeasible + results.solver.status = SolverStatus.warning + results.solver.termination_message = "Model was proven to be infeasible" + results.solver.termination_condition = TerminationCondition.infeasible + soln.status = SolutionStatus.infeasible + elif status == xp.mip_optimal: # optimal + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Model was solved to optimality (subject to tolerances), " + "and an optimal solution is available." + ) + results.solver.termination_condition = TerminationCondition.optimal + soln.status = SolutionStatus.optimal + elif status == xp.mip_unbounded and mip_sols > 0: + results.solver.status = SolverStatus.warning + results.solver.termination_message = ( + "LP relaxation was proven to be unbounded, " + "but a solution is available." + ) + results.solver.termination_condition = TerminationCondition.unbounded + soln.status = SolutionStatus.unbounded + elif status == xp.mip_unbounded and mip_sols <= 0: + results.solver.status = SolverStatus.warning + results.solver.termination_message = ( + "LP relaxation was proven to be unbounded." + ) + results.solver.termination_condition = TerminationCondition.unbounded + soln.status = SolutionStatus.unbounded + else: + results.solver.status = SolverStatus.error + results.solver.termination_message = ( + "Unhandled Xpress solve status (" + str(status) + ")" + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.error + + results.problem.upper_bound = None + results.problem.lower_bound = None + if xprob_attrs.objsense == 1.0: # minimizing MIP + try: + results.problem.upper_bound = xprob_attrs.mipbestobjval + except (XpressDirect.XpressException, AttributeError): + pass + try: + results.problem.lower_bound = xprob_attrs.bestbound + except (XpressDirect.XpressException, AttributeError): + pass + elif xprob_attrs.objsense == -1.0: # maximizing MIP + try: + results.problem.upper_bound = xprob_attrs.bestbound + except (XpressDirect.XpressException, AttributeError): + pass + try: + results.problem.lower_bound = xprob_attrs.mipbestobjval + except (XpressDirect.XpressException, AttributeError): + pass + + return mip_sols > 0 + + def _get_lp_results(self, results, soln): + """Sets up `results` and `soln` and returns whether there is a solution + to query. + Returns `True` if a feasible solution is available, `False` otherwise. + """ + xprob = self._solver_model + xp = xpress + xprob_attrs = xprob.attributes + status = xprob_attrs.lpstatus + if status == xp.lp_unstarted: + results.solver.status = SolverStatus.aborted + results.solver.termination_message = ( + "Model is not loaded; no solution information is available." + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.unknown + elif status == xp.lp_optimal: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Model was solved to optimality (subject to tolerances), " + "and an optimal solution is available." + ) + results.solver.termination_condition = TerminationCondition.optimal + soln.status = SolutionStatus.optimal + elif status == xp.lp_infeas: + results.solver.status = SolverStatus.warning + results.solver.termination_message = "Model was proven to be infeasible" + results.solver.termination_condition = TerminationCondition.infeasible + soln.status = SolutionStatus.infeasible + elif status == xp.lp_cutoff: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Optimal objective for model was proven to be worse than the " + "cutoff value specified; a solution is available." + ) + results.solver.termination_condition = TerminationCondition.minFunctionValue + soln.status = SolutionStatus.optimal + elif status == xp.lp_unfinished: + results.solver.status = SolverStatus.aborted + results.solver.termination_message = ( + "Optimization was terminated by the user." + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.error + elif status == xp.lp_unbounded: + results.solver.status = SolverStatus.warning + results.solver.termination_message = "Model was proven to be unbounded." + results.solver.termination_condition = TerminationCondition.unbounded + soln.status = SolutionStatus.unbounded + elif status == xp.lp_cutoff_in_dual: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Xpress reported the LP was cutoff in the dual." + ) + results.solver.termination_condition = TerminationCondition.minFunctionValue + soln.status = SolutionStatus.optimal + elif status == xp.lp_unsolved: + results.solver.status = SolverStatus.error + results.solver.termination_message = ( + "Optimization was terminated due to unrecoverable numerical " + "difficulties." + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.error + elif status == xp.lp_nonconvex: + results.solver.status = SolverStatus.error + results.solver.termination_message = ( + "Optimization was terminated because nonconvex quadratic data " + "were found." + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.error + else: + results.solver.status = SolverStatus.error + results.solver.termination_message = ( + "Unhandled Xpress solve status (" + str(status) + ")" + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.error + + results.problem.upper_bound = None + results.problem.lower_bound = None + try: + results.problem.upper_bound = xprob_attrs.lpobjval + results.problem.lower_bound = xprob_attrs.lpobjval + except (XpressDirect.XpressException, AttributeError): + pass + + # Not all solution information will be available in all cases, it is + # up to the caller/user to check the actual status and figure which + # of x, slack, duals, reduced costs are valid. + return xprob_attrs.lpstatus in [ + xp.lp_optimal, + xp.lp_cutoff, + xp.lp_cutoff_in_dual, + ] + + def _get_nlp_results(self, results, soln): + """Sets up `results` and `soln` and returns whether there is a solution + to query. + Returns `True` if a feasible solution is available, `False` otherwise. + """ + xprob = self._solver_model + xp = xpress + xprob_attrs = xprob.attributes + solver = xprob_attrs.xslp_solverselected + if solver == 2: + # Under the hood we used the Xpress optimizer, i.e., the problem + # was convex + if (xprob_attrs.originalmipents > 0) or (xprob_attrs.originalsets > 0): + return self._get_mip_results(results, soln) + elif xprob_attrs.lpstatus and not xprob_attrs.xslp_nlpstatus: + # If there is no NLP solver status, process the result + # using the LP results processor. + return self._get_lp_results(results, soln) + + # The problem was non-linear + status = xprob_attrs.xslp_nlpstatus + solstatus = xprob_attrs.xslp_solstatus + have_soln = False + optimal = False # *globally* optimal? + if status == xp.nlp_unstarted: + results.solver.status = SolverStatus.unknown + results.solver.termination_message = ( + "Non-convex model solve was not started" + ) + results.solver.termination_condition = TerminationCondition.unknown + soln.status = SolutionStatus.unknown + elif status == xp.nlp_locally_optimal: + # This is either xp.nlp_locally_optimal or xp.nlp_solution + # we must look at the solstatus to figure out which + if solstatus in [2, 3]: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Non-convex model was solved to local optimality" + ) + results.solver.termination_condition = ( + TerminationCondition.locallyOptimal + ) + soln.status = SolutionStatus.locallyOptimal + else: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Feasible solution found for non-convex model" + ) + results.solver.termination_condition = TerminationCondition.feasible + soln.status = SolutionStatus.feasible + have_soln = True + elif status == xp.nlp_globally_optimal: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Non-convex model was solved to global optimality" + ) + results.solver.termination_condition = TerminationCondition.optimal + soln.status = SolutionStatus.optimal + have_soln = True + optimal = True + elif status == xp.nlp_locally_infeasible: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Non-convex model was proven to be locally infeasible" + ) + results.solver.termination_condition = TerminationCondition.noSolution + soln.status = SolutionStatus.unknown + elif status == xp.nlp_infeasible: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Non-convex model was proven to be infeasible" + ) + results.solver.termination_condition = TerminationCondition.infeasible + soln.status = SolutionStatus.infeasible + elif status == xp.nlp_unbounded: # locally unbounded! + results.solver.status = SolverStatus.ok + results.solver.termination_message = "Non-convex model is locally unbounded" + results.solver.termination_condition = TerminationCondition.unbounded + soln.status = SolutionStatus.unbounded + elif status == xp.nlp_unfinished: + results.solver.status = SolverStatus.ok + results.solver.termination_message = ( + "Non-convex solve not finished (numerical issues?)" + ) + results.solver.termination_condition = TerminationCondition.unknown + soln.status = SolutionStatus.unknown + have_soln = True + else: + results.solver.status = SolverStatus.error + results.solver.termination_message = "Error for non-convex model: " + str( + status + ) + results.solver.termination_condition = TerminationCondition.error + soln.status = SolutionStatus.error + + results.problem.upper_bound = None + results.problem.lower_bound = None + try: + if xprob_attrs.objsense > 0.0 or optimal: # minimizing + results.problem.upper_bound = xprob_attrs.xslp_objval + if xprob_attrs.objsense < 0.0 or optimal: # maximizing + results.problem.lower_bound = xprob_attrs.xslp_objval + except (XpressDirect.XpressException, AttributeError): + pass + + return have_soln + + def _solve_model(self): + xprob = self._solver_model + + is_mip = (xprob.attributes.mipents > 0) or (xprob.attributes.sets > 0) + # Check for quadratic objective or quadratic constraints. If there are + # any then we call nlpoptimize since that can handle non-convex + # quadratics as well. In case of convex quadratics it will call + # mipoptimize under the hood. + if (xprob.attributes.qelems > 0) or (xprob.attributes.qcelems > 0): + xprob.nlpoptimize("g" if is_mip else "") + self._get_results = self._get_nlp_results + elif is_mip: + xprob.mipoptimize() + self._get_results = self._get_mip_results + else: + xprob.lpoptimize() + self._get_results = self._get_lp_results + + self._solver_model.postsolve() + def _get_expr_from_pyomo_repn(self, repn, max_degree=2): referenced_vars = ComponentSet() degree = repn.polynomial_degree() if (degree is None) or (degree > max_degree): - raise DegreeError('XpressDirect does not support expressions of degree {0}.'.format(degree)) + raise DegreeError( + 'XpressDirect does not support expressions of degree {0}.'.format( + degree + ) + ) - # NOTE: xpress's python interface only allows for expresions + # NOTE: xpress's python interface only allows for expressions # with native numeric types. Others, like numpy.float64, # will cause an exception when constructing expressions if len(repn.linear_vars) > 0: referenced_vars.update(repn.linear_vars) - new_expr = xpress.Sum(float(coef)*self._pyomo_var_to_solver_var_map[var] for coef,var in zip(repn.linear_coefs, repn.linear_vars)) + new_expr = xpress.Sum( + float(coef) * self._pyomo_var_to_solver_var_map[var] + for coef, var in zip(repn.linear_coefs, repn.linear_vars) + ) else: new_expr = 0.0 - for coef,(x,y) in zip(repn.quadratic_coefs,repn.quadratic_vars): - new_expr += float(coef) * self._pyomo_var_to_solver_var_map[x] * self._pyomo_var_to_solver_var_map[y] + for coef, (x, y) in zip(repn.quadratic_coefs, repn.quadratic_vars): + new_expr += ( + float(coef) + * self._pyomo_var_to_solver_var_map[x] + * self._pyomo_var_to_solver_var_map[y] + ) referenced_vars.add(x) referenced_vars.add(y) @@ -255,7 +592,9 @@ def _get_expr_from_pyomo_expr(self, expr, max_degree=2): repn = generate_standard_repn(expr, quadratic=False) try: - xpress_expr, referenced_vars = self._get_expr_from_pyomo_repn(repn, max_degree) + xpress_expr, referenced_vars = self._get_expr_from_pyomo_repn( + repn, max_degree + ) except DegreeError as e: msg = e.args[0] msg += '\nexpr: {0}'.format(expr) @@ -291,7 +630,9 @@ def _add_var(self, var): if lb == ub: self._solver_model.chgbounds([xpress_var], ['B'], [lb]) else: - self._solver_model.chgbounds([xpress_var, xpress_var], ['L', 'U'], [lb,ub]) + self._solver_model.chgbounds( + [xpress_var, xpress_var], ['L', 'U'], [lb, ub] + ) self._pyomo_var_to_solver_var_map[var] = xpress_var self._solver_var_to_pyomo_var_map[xpress_var] = var @@ -311,10 +652,11 @@ def _set_instance(self, model, kwds={}): self._solver_model = xpress.problem() except Exception: e = sys.exc_info()[1] - msg = ("Unable to create Xpress model. " - "Have you installed the Python " - "bindings for Xpress?\n\n\t" + - "Error message: {0}".format(e)) + msg = ( + "Unable to create Xpress model. " + "Have you installed the Python " + "bindings for Xpress?\n\n\t" + "Error message: {0}".format(e) + ) raise Exception(msg) self._add_block(model) @@ -333,47 +675,50 @@ def _add_constraint(self, con): if con._linear_canonical_form: xpress_expr, referenced_vars = self._get_expr_from_pyomo_repn( - con.canonical_form(), - self._max_constraint_degree) + con.canonical_form(), self._max_constraint_degree + ) else: xpress_expr, referenced_vars = self._get_expr_from_pyomo_expr( - con.body, - self._max_constraint_degree) + con.body, self._max_constraint_degree + ) if con.has_lb(): if not is_fixed(con.lower): - raise ValueError("Lower bound of constraint {0} " - "is not constant.".format(con)) + raise ValueError( + "Lower bound of constraint {0} is not constant.".format(con) + ) if con.has_ub(): if not is_fixed(con.upper): - raise ValueError("Upper bound of constraint {0} " - "is not constant.".format(con)) + raise ValueError( + "Upper bound of constraint {0} is not constant.".format(con) + ) if con.equality: - xpress_con = xpress.constraint(body=xpress_expr, - sense=xpress.eq, - rhs=value(con.lower), - name=conname) + xpress_con = xpress.constraint( + body=xpress_expr, sense=xpress.eq, rhs=value(con.lower), name=conname + ) elif con.has_lb() and con.has_ub(): - xpress_con = xpress.constraint(body=xpress_expr, - sense=xpress.rng, - lb=value(con.lower), - ub=value(con.upper), - name=conname) + xpress_con = xpress.constraint( + body=xpress_expr, + sense=xpress.rng, + lb=value(con.lower), + ub=value(con.upper), + name=conname, + ) self._range_constraints.add(xpress_con) elif con.has_lb(): - xpress_con = xpress.constraint(body=xpress_expr, - sense=xpress.geq, - rhs=value(con.lower), - name=conname) + xpress_con = xpress.constraint( + body=xpress_expr, sense=xpress.geq, rhs=value(con.lower), name=conname + ) elif con.has_ub(): - xpress_con = xpress.constraint(body=xpress_expr, - sense=xpress.leq, - rhs=value(con.upper), - name=conname) + xpress_con = xpress.constraint( + body=xpress_expr, sense=xpress.leq, rhs=value(con.upper), name=conname + ) else: - raise ValueError("Constraint does not have a lower " - "or an upper bound: {0} \n".format(con)) + raise ValueError( + "Constraint does not have a lower " + "or an upper bound: {0} \n".format(con) + ) self._solver_model.addConstraint(xpress_con) @@ -389,9 +734,10 @@ def _add_sos_constraint(self, con): conname = self._symbol_map.getSymbol(con, self._labeler) level = con.level - if level not in [1,2]: - raise ValueError("Solver does not support SOS " - "level {0} constraints".format(level)) + if level not in [1, 2]: + raise ValueError( + "Solver does not support SOS level {0} constraints".format(level) + ) xpress_vars = [] weights = [] @@ -429,7 +775,9 @@ def _xpress_vartype_from_var(self, var): elif var.is_continuous(): vartype = xpress.continuous else: - raise ValueError('Variable domain type is not recognized for {0}'.format(var.domain)) + raise ValueError( + 'Variable domain type is not recognized for {0}'.format(var.domain) + ) return vartype def _set_objective(self, obj): @@ -449,7 +797,9 @@ def _set_objective(self, obj): else: raise ValueError('Objective sense is not recognized: {0}'.format(obj.sense)) - xpress_expr, referenced_vars = self._get_expr_from_pyomo_expr(obj.expr, self._max_obj_degree) + xpress_expr, referenced_vars = self._get_expr_from_pyomo_expr( + obj.expr, self._max_obj_degree + ) for var in referenced_vars: self._referenced_variables[var] += 1 @@ -479,7 +829,10 @@ def _postsolve(self): extract_reduced_costs = True flag = True if not flag: - raise RuntimeError("***The xpress_direct solver plugin cannot extract solution suffix="+suffix) + raise RuntimeError( + "***The xpress_direct solver plugin cannot extract solution suffix=" + + suffix + ) xprob = self._solver_model xp = xpress @@ -503,117 +856,11 @@ def _postsolve(self): self.results.solver.name = XpressDirect._name self.results.solver.wallclock_time = self._opt_time - if is_mip: - status = xprob_attrs.mipstatus - mip_sols = xprob_attrs.mipsols - if status == xp.mip_not_loaded: - self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Model is not loaded; no solution information is available." - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.unknown - #no MIP solution, first LP did not solve, second LP did, third search started but incomplete - elif status == xp.mip_lp_not_optimal \ - or status == xp.mip_lp_optimal \ - or status == xp.mip_no_sol_found: - self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Model is loaded, but no solution information is available." - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.unknown - elif status == xp.mip_solution: # some solution available - self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Unable to satisfy optimality tolerances; a sub-optimal " \ - "solution is available." - self.results.solver.termination_condition = TerminationCondition.other - soln.status = SolutionStatus.feasible - elif status == xp.mip_infeas: # MIP proven infeasible - self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Model was proven to be infeasible" - self.results.solver.termination_condition = TerminationCondition.infeasible - soln.status = SolutionStatus.infeasible - elif status == xp.mip_optimal: # optimal - self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \ - "and an optimal solution is available." - self.results.solver.termination_condition = TerminationCondition.optimal - soln.status = SolutionStatus.optimal - elif status == xp.mip_unbounded and mip_sols > 0: - self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "LP relaxation was proven to be unbounded, " \ - "but a solution is available." - self.results.solver.termination_condition = TerminationCondition.unbounded - soln.status = SolutionStatus.unbounded - elif status == xp.mip_unbounded and mip_sols <= 0: - self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "LP relaxation was proven to be unbounded." - self.results.solver.termination_condition = TerminationCondition.unbounded - soln.status = SolutionStatus.unbounded - else: - self.results.solver.status = SolverStatus.error - self.results.solver.termination_message = \ - ("Unhandled Xpress solve status " - "("+str(status)+")") - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.error - else: ## an LP, we'll check the lpstatus - status = xprob_attrs.lpstatus - if status == xp.lp_unstarted: - self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Model is not loaded; no solution information is available." - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.unknown - elif status == xp.lp_optimal: - self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "Model was solved to optimality (subject to tolerances), " \ - "and an optimal solution is available." - self.results.solver.termination_condition = TerminationCondition.optimal - soln.status = SolutionStatus.optimal - elif status == xp.lp_infeas: - self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Model was proven to be infeasible" - self.results.solver.termination_condition = TerminationCondition.infeasible - soln.status = SolutionStatus.infeasible - elif status == xp.lp_cutoff: - self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "Optimal objective for model was proven to be worse than the " \ - "cutoff value specified; a solution is available." - self.results.solver.termination_condition = TerminationCondition.minFunctionValue - soln.status = SolutionStatus.optimal - elif status == xp.lp_unfinished: - self.results.solver.status = SolverStatus.aborted - self.results.solver.termination_message = "Optimization was terminated by the user." - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.error - elif status == xp.lp_unbounded: - self.results.solver.status = SolverStatus.warning - self.results.solver.termination_message = "Model was proven to be unbounded." - self.results.solver.termination_condition = TerminationCondition.unbounded - soln.status = SolutionStatus.unbounded - elif status == xp.lp_cutoff_in_dual: - self.results.solver.status = SolverStatus.ok - self.results.solver.termination_message = "Xpress reported the LP was cutoff in the dual." - self.results.solver.termination_condition = TerminationCondition.minFunctionValue - soln.status = SolutionStatus.optimal - elif status == xp.lp_unsolved: - self.results.solver.status = SolverStatus.error - self.results.solver.termination_message = "Optimization was terminated due to unrecoverable numerical " \ - "difficulties." - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.error - elif status == xp.lp_nonconvex: - self.results.solver.status = SolverStatus.error - self.results.solver.termination_message = "Optimization was terminated because nonconvex quadratic data " \ - "were found." - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.error - else: - self.results.solver.status = SolverStatus.error - self.results.solver.termination_message = \ - ("Unhandled Xpress solve status " - "("+str(status)+")") - self.results.solver.termination_condition = TerminationCondition.error - soln.status = SolutionStatus.error - - + if not hasattr(self, '_get_results'): + raise RuntimeError( + 'Model was solved but `_get_results` property is not set' + ) + have_soln = self._get_results(self.results, soln) self.results.problem.name = xprob_attrs.matrixname if xprob_attrs.objsense == 1.0: @@ -621,47 +868,26 @@ def _postsolve(self): elif xprob_attrs.objsense == -1.0: self.results.problem.sense = maximize else: - raise RuntimeError('Unrecognized Xpress objective sense: {0}'.format(xprob_attrs.objsense)) - - self.results.problem.upper_bound = None - self.results.problem.lower_bound = None - if not is_mip: #LP or continuous problem - try: - self.results.problem.upper_bound = xprob_attrs.lpobjval - self.results.problem.lower_bound = xprob_attrs.lpobjval - except (XpressDirect.XpressException, AttributeError): - pass - elif xprob_attrs.objsense == 1.0: # minimizing MIP - try: - self.results.problem.upper_bound = xprob_attrs.mipbestobjval - except (XpressDirect.XpressException, AttributeError): - pass - try: - self.results.problem.lower_bound = xprob_attrs.bestbound - except (XpressDirect.XpressException, AttributeError): - pass - elif xprob_attrs.objsense == -1.0: # maximizing MIP - try: - self.results.problem.upper_bound = xprob_attrs.bestbound - except (XpressDirect.XpressException, AttributeError): - pass - try: - self.results.problem.lower_bound = xprob_attrs.mipbestobjval - except (XpressDirect.XpressException, AttributeError): - pass - else: - raise RuntimeError('Unrecognized xpress objective sense: {0}'.format(xprob_attrs.objsense)) + raise RuntimeError( + 'Unrecognized Xpress objective sense: {0}'.format(xprob_attrs.objsense) + ) try: - soln.gap = self.results.problem.upper_bound - self.results.problem.lower_bound + soln.gap = ( + self.results.problem.upper_bound - self.results.problem.lower_bound + ) except TypeError: soln.gap = None - self.results.problem.number_of_constraints = xprob_attrs.rows + xprob_attrs.sets + xprob_attrs.qconstraints + self.results.problem.number_of_constraints = ( + xprob_attrs.rows + xprob_attrs.sets + xprob_attrs.qconstraints + ) self.results.problem.number_of_nonzeros = xprob_attrs.elems self.results.problem.number_of_variables = xprob_attrs.cols self.results.problem.number_of_integer_variables = xprob_attrs.mipents - self.results.problem.number_of_continuous_variables = xprob_attrs.cols - xprob_attrs.mipents + self.results.problem.number_of_continuous_variables = ( + xprob_attrs.cols - xprob_attrs.mipents + ) self.results.problem.number_of_objectives = 1 self.results.problem.number_of_solutions = xprob_attrs.mipsols if is_mip else 1 @@ -670,12 +896,10 @@ def _postsolve(self): # be the case, both in LP and MIP contexts. if self._save_results: """ - This code in this if statement is only needed for backwards compatability. It is more efficient to set + This code in this if statement is only needed for backwards compatibility. It is more efficient to set _save_results to False and use load_vars, load_duals, etc. """ - if xprob_attrs.lpstatus in \ - [xp.lp_optimal, xp.lp_cutoff, xp.lp_cutoff_in_dual] or \ - xprob_attrs.mipsols > 0: + if have_soln: soln_variables = soln.variable soln_constraints = soln.constraint @@ -712,8 +936,8 @@ def _postsolve(self): lb = con.lb ub = con.ub ub_s = val - expr_val = ub-ub_s - lb_s = lb-expr_val + expr_val = ub - ub_s + lb_s = lb - expr_val if abs(ub_s) > abs(lb_s): soln_constraints[con.name]["Slack"] = ub_s else: @@ -722,9 +946,7 @@ def _postsolve(self): soln_constraints[con.name]["Slack"] = val elif self._load_solutions: - if xprob_attrs.lpstatus == xp.lp_optimal and \ - ((not is_mip) or (xprob_attrs.mipsols > 0)): - + if have_soln: self.load_vars() if extract_reduced_costs: @@ -815,11 +1037,13 @@ def _load_slacks(self, cons_to_load=None): if xpress_con in self._range_constraints: ## for xpress, the slack on a range constraint ## is based on the upper bound + ## FIXME: This looks like a bug - there is no variable named + ## `con` - there is, however, `xpress_con` and `pyomo_con` lb = con.lb ub = con.ub ub_s = val - expr_val = ub-ub_s - lb_s = lb-expr_val + expr_val = ub - ub_s + lb_s = lb - expr_val if abs(ub_s) > abs(lb_s): slack[pyomo_con] = ub_s else: diff --git a/pyomo/solvers/plugins/solvers/xpress_persistent.py b/pyomo/solvers/plugins/solvers/xpress_persistent.py index 54191da98df..56024bc0540 100644 --- a/pyomo/solvers/plugins/solvers/xpress_persistent.py +++ b/pyomo/solvers/plugins/solvers/xpress_persistent.py @@ -13,12 +13,14 @@ from pyomo.solvers.plugins.solvers.xpress_direct import XpressDirect from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver from pyomo.core.expr.numvalue import value, is_fixed -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.opt.base import SolverFactory import collections -@SolverFactory.register('xpress_persistent', doc='Persistent python interface to Xpress') +@SolverFactory.register( + 'xpress_persistent', doc='Persistent python interface to Xpress' +) class XpressPersistent(PersistentSolver, XpressDirect): """ A class that provides a persistent interface to Xpress. Direct solver interfaces do not use any file io. @@ -30,7 +32,7 @@ class XpressPersistent(PersistentSolver, XpressDirect): Keyword Arguments ----------------- model: ConcreteModel - Passing a model to the constructor is equivalent to calling the set_instance mehtod. + Passing a model to the constructor is equivalent to calling the set_instance method. type: str String indicating the class type of the solver instance. name: str @@ -75,7 +77,9 @@ def _xpress_chgcoltype_from_var(self, var): elif var.is_continuous(): vartype = 'C' else: - raise ValueError('Variable domain type is not recognized for {0}'.format(var.domain)) + raise ValueError( + 'Variable domain type is not recognized for {0}'.format(var.domain) + ) return vartype def update_var(self, var): @@ -92,12 +96,16 @@ def update_var(self, var): # see PR #366 for discussion about handling indexed # objects and keeping compatibility with the # pyomo.kernel objects - #if var.is_indexed(): + # if var.is_indexed(): # for child_var in var.values(): # self.update_var(child_var) # return if var not in self._pyomo_var_to_solver_var_map: - raise ValueError('The Var provided to update_var needs to be added first: {0}'.format(var)) + raise ValueError( + 'The Var provided to update_var needs to be added first: {0}'.format( + var + ) + ) xpress_var = self._pyomo_var_to_solver_var_map[var] qctype = self._xpress_chgcoltype_from_var(var) lb, ub = self._xpress_lb_ub_from_var(var) @@ -109,9 +117,9 @@ def _add_column(self, var, obj_coef, constraints, coefficients): """Add a column to the solver's model This will add the Pyomo variable var to the solver's - model, and put the coefficients on the associated + model, and put the coefficients on the associated constraints in the solver model. If the obj_coef is - not zero, it will add obj_coef*var to the objective + not zero, it will add obj_coef*var to the objective of the solver's model. Parameters @@ -127,13 +135,20 @@ def _add_column(self, var, obj_coef, constraints, coefficients): vartype = self._xpress_chgcoltype_from_var(var) lb, ub = self._xpress_lb_ub_from_var(var) - self._solver_model.addcols(objx=[obj_coef], mstart=[0,len(coefficients)], - mrwind=constraints, dmatval=coefficients, - bdl=[lb], bdu=[ub], names=[varname], - types=[vartype]) + self._solver_model.addcols( + objx=[obj_coef], + mstart=[0, len(coefficients)], + mrwind=constraints, + dmatval=coefficients, + bdl=[lb], + bdu=[ub], + names=[varname], + types=[vartype], + ) xpress_var = self._solver_model.getVariable( - index=self._solver_model.getIndexFromName(type=2, name=varname)) + index=self._solver_model.getIndexFromName(type=2, name=varname) + ) self._pyomo_var_to_solver_var_map[var] = xpress_var self._solver_var_to_pyomo_var_map[xpress_var] = var @@ -141,13 +156,13 @@ def _add_column(self, var, obj_coef, constraints, coefficients): def get_xpress_attribute(self, *args): """ - Get xpress atrributes. + Get xpress attributes. Parameters ---------- control(s): str, strs, list, None The xpress attribute to get. Options include any xpress attribute. - Can also be list of xpress controls or None for every atrribute + Can also be list of xpress controls or None for every attribute Please see the Xpress documentation for options. See the Xpress documentation for xpress.problem.getAttrib for other @@ -183,7 +198,7 @@ def get_xpress_control(self, *args): ---------- control(s): str, strs, list, None The xpress control to get. Options include any xpress control. - Can also be list of xpress controls or None for every contorl + Can also be list of xpress controls or None for every control Please see the Xpress documentation for options. See the Xpress documentation for xpress.problem.getControl for other diff --git a/pyomo/solvers/tests/checks/test_BARON.py b/pyomo/solvers/tests/checks/test_BARON.py index fbae14797f4..eb58076b09c 100644 --- a/pyomo/solvers/tests/checks/test_BARON.py +++ b/pyomo/solvers/tests/checks/test_BARON.py @@ -16,25 +16,22 @@ import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept -from pyomo.environ import ( - ConcreteModel, Constraint, Objective, Var, log10, minimize, -) +from pyomo.environ import ConcreteModel, Constraint, Objective, Var, log10, minimize from pyomo.opt import SolverFactory, TerminationCondition # check if BARON is available from pyomo.solvers.tests.solvers import test_solver_cases + baron_available = test_solver_cases('baron', 'bar').available -@unittest.skipIf(not baron_available, - "The 'BARON' solver is not available") +@unittest.skipIf(not baron_available, "The 'BARON' solver is not available") class BaronTest(unittest.TestCase): """Test the BARON interface.""" def test_log10(self): # Tests the special transformation for log10 with SolverFactory("baron") as opt: - m = ConcreteModel() m.x = Var() m.c = Constraint(expr=log10(m.x) >= 2) @@ -42,37 +39,38 @@ def test_log10(self): results = opt.solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) def test_abs(self): # Tests the special transformation for abs with SolverFactory("baron") as opt: - m = ConcreteModel() - m.x = Var(bounds=(-100,1)) + m.x = Var(bounds=(-100, 1)) m.c = Constraint(expr=abs(m.x) >= 2) m.obj = Objective(expr=m.x, sense=minimize) results = opt.solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) def test_pow(self): # Tests the special transformation for x ^ y (both variables) with SolverFactory("baron") as opt: - m = ConcreteModel() - m.x = Var(bounds=(10,100)) - m.y = Var(bounds=(1,10)) - m.c = Constraint(expr=m.x ** m.y >= 20) + m.x = Var(bounds=(10, 100)) + m.y = Var(bounds=(1, 10)) + m.c = Constraint(expr=m.x**m.y >= 20) m.obj = Objective(expr=m.x, sense=minimize) results = opt.solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) def test_BARON_option_warnings(self): os = StringIO() @@ -82,15 +80,20 @@ def test_BARON_option_warnings(self): m.obj = Objective(expr=m.x**2) with SolverFactory("baron") as opt: - results = opt.solve(m, options={'ResName': 'results.lst', - 'TimName': 'results.tim'}) - - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) - self.assertIn('Ignoring user-specified option "ResName=results.lst"', - os.getvalue()) - self.assertIn('Ignoring user-specified option "TimName=results.tim"', - os.getvalue()) + results = opt.solve( + m, options={'ResName': 'results.lst', 'TimName': 'results.tim'} + ) + + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + self.assertIn( + 'Ignoring user-specified option "ResName=results.lst"', os.getvalue() + ) + self.assertIn( + 'Ignoring user-specified option "TimName=results.tim"', os.getvalue() + ) + if __name__ == '__main__': unittest.main() diff --git a/pyomo/solvers/tests/checks/test_CBCplugin.py b/pyomo/solvers/tests/checks/test_CBCplugin.py index f1386bd2431..fe01a89bb53 100644 --- a/pyomo/solvers/tests/checks/test_CBCplugin.py +++ b/pyomo/solvers/tests/checks/test_CBCplugin.py @@ -15,12 +15,21 @@ import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, Var, Objective, RangeSet, - Constraint, Reals, NonNegativeIntegers, - NonNegativeReals, Integers, Binary, - maximize, minimize) -from pyomo.opt import (SolverFactory, ProblemSense, - TerminationCondition, SolverStatus) +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + RangeSet, + Constraint, + Reals, + NonNegativeIntegers, + NonNegativeReals, + Integers, + Binary, + maximize, + minimize, +) +from pyomo.opt import SolverFactory, ProblemSense, TerminationCondition, SolverStatus from pyomo.solvers.plugins.solvers.CBCplugin import CBCSHELL cbc_available = SolverFactory('cbc', solver_io='lp').available(exception_flag=False) @@ -54,21 +63,31 @@ def test_infeasible_lp(self): results = self.opt.solve(self.model) self.assertEqual(ProblemSense.minimize, results.problem.sense) - self.assertEqual(TerminationCondition.infeasible, results.solver.termination_condition) - self.assertEqual('Model was proven to be infeasible.', results.solver.termination_message) + self.assertEqual( + TerminationCondition.infeasible, results.solver.termination_condition + ) + self.assertEqual( + 'Model was proven to be infeasible.', results.solver.termination_message + ) self.assertEqual(SolverStatus.warning, results.solver.status) @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available") def test_unbounded_lp(self): self.model.Idx = RangeSet(2) self.model.X = Var(self.model.Idx, within=Reals) - self.model.Obj = Objective(expr=self.model.X[1] + self.model.X[2], sense=maximize) + self.model.Obj = Objective( + expr=self.model.X[1] + self.model.X[2], sense=maximize + ) results = self.opt.solve(self.model) self.assertEqual(ProblemSense.maximize, results.problem.sense) - self.assertEqual(TerminationCondition.unbounded, results.solver.termination_condition) - self.assertEqual('Model was proven to be unbounded.', results.solver.termination_message) + self.assertEqual( + TerminationCondition.unbounded, results.solver.termination_condition + ) + self.assertEqual( + 'Model was proven to be unbounded.', results.solver.termination_message + ) self.assertEqual(SolverStatus.warning, results.solver.status) @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available") @@ -81,10 +100,13 @@ def test_optimal_lp(self): self.assertEqual(0.0, results.problem.lower_bound) self.assertEqual(0.0, results.problem.upper_bound) self.assertEqual(ProblemSense.minimize, results.problem.sense) - self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.optimal, results.solver.termination_condition + ) self.assertEqual( 'Model was solved to optimality (subject to tolerances), and an optimal solution is available.', - results.solver.termination_message) + results.solver.termination_message, + ) self.assertEqual(SolverStatus.ok, results.solver.status) @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available") @@ -97,8 +119,12 @@ def test_infeasible_mip(self): results = self.opt.solve(self.model) self.assertEqual(ProblemSense.minimize, results.problem.sense) - self.assertEqual(TerminationCondition.infeasible, results.solver.termination_condition) - self.assertEqual('Model was proven to be infeasible.', results.solver.termination_message) + self.assertEqual( + TerminationCondition.infeasible, results.solver.termination_condition + ) + self.assertEqual( + 'Model was proven to be infeasible.', results.solver.termination_message + ) self.assertEqual(SolverStatus.warning, results.solver.status) @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available") @@ -109,8 +135,12 @@ def test_unbounded_mip(self): results = self.opt.solve(self.model) self.assertEqual(ProblemSense.minimize, results.problem.sense) - self.assertEqual(TerminationCondition.unbounded, results.solver.termination_condition) - self.assertEqual('Model was proven to be unbounded.', results.solver.termination_message) + self.assertEqual( + TerminationCondition.unbounded, results.solver.termination_condition + ) + self.assertEqual( + 'Model was proven to be unbounded.', results.solver.termination_message + ) self.assertEqual(SolverStatus.warning, results.solver.status) @unittest.skipIf(not cbc_available, "The 'cbc' solver is not available") @@ -119,8 +149,9 @@ def test_optimal_mip(self): self.model.X = Var(self.model.Idx, within=NonNegativeIntegers) self.model.Y = Var(self.model.Idx, within=Binary) self.model.C1 = Constraint(expr=self.model.X[1] == self.model.X[2] + 1) - self.model.Obj = Objective(expr=self.model.Y[1] + self.model.Y[2] - self.model.X[1], - sense=maximize) + self.model.Obj = Objective( + expr=self.model.Y[1] + self.model.Y[2] - self.model.X[1], sense=maximize + ) results = self.opt.solve(self.model) @@ -129,10 +160,13 @@ def test_optimal_mip(self): self.assertEqual(results.problem.number_of_binary_variables, 2) self.assertEqual(results.problem.number_of_integer_variables, 4) self.assertEqual(ProblemSense.maximize, results.problem.sense) - self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.optimal, results.solver.termination_condition + ) self.assertEqual( 'Model was solved to optimality (subject to tolerances), and an optimal solution is available.', - results.solver.termination_message) + results.solver.termination_message, + ) self.assertEqual(SolverStatus.ok, results.solver.status) @@ -212,12 +246,19 @@ def test_optimal_mip(self): self.assertEqual(SolverStatus.ok, results.solver.status) self.assertEqual(0.34, results.solver.system_time) self.assertEqual(0.72, results.solver.wallclock_time) - self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.optimal, results.solver.termination_condition + ) self.assertEqual( 'Model was solved to optimality (subject to tolerances), and an optimal solution is available.', - results.solver.termination_message) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 2) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 2) + results.solver.termination_message, + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 2 + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, 2 + ) self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 625) def test_max_time_limit_mip(self): @@ -227,17 +268,26 @@ def test_max_time_limit_mip(self): lp_file = 'max_time_limit.out.lp' results = self.opt.solve(os.path.join(data_dir, lp_file)) - self.assertEqual(1.1084706, results.problem.lower_bound) # Note that we ignore the lower bound given at the end + self.assertEqual( + 1.1084706, results.problem.lower_bound + ) # Note that we ignore the lower bound given at the end self.assertEqual(1.35481947, results.problem.upper_bound) self.assertEqual(SolverStatus.aborted, results.solver.status) self.assertEqual(0.1, results.solver.system_time) self.assertEqual(0.11, results.solver.wallclock_time) - self.assertEqual(TerminationCondition.maxTimeLimit, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.maxTimeLimit, results.solver.termination_condition + ) self.assertEqual( 'Optimization terminated because the time expended exceeded the value specified in the seconds parameter.', - results.solver.termination_message) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0) + results.solver.termination_message, + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0 + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0 + ) self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 82) def test_intermediate_non_integer_mip(self): @@ -251,12 +301,20 @@ def test_intermediate_non_integer_mip(self): self.assertEqual(SolverStatus.aborted, results.solver.status) self.assertEqual(0.02, results.solver.system_time) self.assertEqual(0.02, results.solver.wallclock_time) - self.assertEqual(TerminationCondition.intermediateNonInteger, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.intermediateNonInteger, + results.solver.termination_condition, + ) self.assertEqual( 'Optimization terminated because a limit was hit, however it had not found an integer solution yet.', - results.solver.termination_message) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0) + results.solver.termination_message, + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0 + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0 + ) self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0) def test_max_solutions(self): @@ -271,12 +329,20 @@ def test_max_solutions(self): self.assertEqual(SolverStatus.aborted, results.solver.status) self.assertEqual(0.03, results.solver.system_time) self.assertEqual(0.03, results.solver.wallclock_time) - self.assertEqual(TerminationCondition.other, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.other, results.solver.termination_condition + ) self.assertEqual( 'Optimization terminated because the number of solutions found reached the value specified in the ' - 'maxSolutions parameter.', results.solver.termination_message) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0) + 'maxSolutions parameter.', + results.solver.termination_message, + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0 + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0 + ) self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0) def test_within_gap_tolerance(self): @@ -291,12 +357,19 @@ def test_within_gap_tolerance(self): self.assertEqual(SolverStatus.ok, results.solver.status) self.assertEqual(0.07, results.solver.system_time) self.assertEqual(0.07, results.solver.wallclock_time) - self.assertEqual(TerminationCondition.optimal, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.optimal, results.solver.termination_condition + ) self.assertEqual( 'Model was solved to optimality (subject to tolerances), and an optimal solution is available.', - results.solver.termination_message) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0) + results.solver.termination_message, + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0 + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0 + ) self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0) def test_max_evaluations(self): @@ -311,12 +384,20 @@ def test_max_evaluations(self): self.assertEqual(SolverStatus.aborted, results.solver.status) self.assertEqual(0.16, results.solver.system_time) self.assertEqual(0.18, results.solver.wallclock_time) - self.assertEqual(TerminationCondition.maxEvaluations, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.maxEvaluations, results.solver.termination_condition + ) self.assertEqual( 'Optimization terminated because the total number of branch-and-cut nodes explored exceeded the value ' - 'specified in the maxNodes parameter', results.solver.termination_message) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 1) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 1) + 'specified in the maxNodes parameter', + results.solver.termination_message, + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 1 + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, 1 + ) self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 602) def test_fix_parsing_bug(self): @@ -336,12 +417,20 @@ def test_fix_parsing_bug(self): self.assertEqual(SolverStatus.aborted, results.solver.status) self.assertEqual(0.08, results.solver.system_time) self.assertEqual(0.09, results.solver.wallclock_time) - self.assertEqual(TerminationCondition.other, results.solver.termination_condition) + self.assertEqual( + TerminationCondition.other, results.solver.termination_condition + ) self.assertEqual( 'Optimization terminated because the number of solutions found reached the value specified in the ' - 'maxSolutions parameter.', results.solver.termination_message) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0) + 'maxSolutions parameter.', + results.solver.termination_message, + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_bounded_subproblems, 0 + ) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, 0 + ) self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 0) def test_process_logfile(self): @@ -349,18 +438,28 @@ def test_process_logfile(self): cbc_shell._log_file = os.path.join(data_dir, 'test5_timeout_cbc.txt') results = cbc_shell.process_logfile() self.assertEqual(results.solution.gap, 0.01) - self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 50364) + self.assertEqual( + results.solver.statistics.black_box.number_of_iterations, 50364 + ) self.assertEqual(results.solver.system_time, 2.01) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 34776) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, + 34776, + ) def test_process_logfile_gap_inf(self): cbc_shell = CBCSHELL() cbc_shell._log_file = os.path.join(data_dir, 'test5_timeout_cbc_gap.txt') results = cbc_shell.process_logfile() self.assertEqual(results.solution.gap, float('inf')) - self.assertEqual(results.solver.statistics.black_box.number_of_iterations, 50364) + self.assertEqual( + results.solver.statistics.black_box.number_of_iterations, 50364 + ) self.assertEqual(results.solver.system_time, 2.01) - self.assertEqual(results.solver.statistics.branch_and_bound.number_of_created_subproblems, 34776) + self.assertEqual( + results.solver.statistics.branch_and_bound.number_of_created_subproblems, + 34776, + ) if __name__ == "__main__": diff --git a/pyomo/solvers/tests/checks/test_CPLEXDirect.py b/pyomo/solvers/tests/checks/test_CPLEXDirect.py index 731b5080495..86e03d1024f 100644 --- a/pyomo/solvers/tests/checks/test_CPLEXDirect.py +++ b/pyomo/solvers/tests/checks/test_CPLEXDirect.py @@ -13,25 +13,39 @@ import pyomo.common.unittest as unittest -from pyomo.environ import (ConcreteModel, AbstractModel, Var, Objective, - Block, Constraint, Suffix, NonNegativeIntegers, - NonNegativeReals, Integers, Binary, is_fixed, - value) +from pyomo.environ import ( + ConcreteModel, + AbstractModel, + Var, + Objective, + Block, + Constraint, + Suffix, + NonNegativeIntegers, + NonNegativeReals, + Integers, + Binary, + is_fixed, + value, +) from pyomo.opt import SolverFactory, TerminationCondition, SolutionStatus -from pyomo.solvers.plugins.solvers.cplex_direct import (_CplexExpr, - _LinearConstraintData, - _VariableData) +from pyomo.solvers.plugins.solvers.cplex_direct import ( + _CplexExpr, + _LinearConstraintData, + _VariableData, +) try: import cplex + cplexpy_available = True except ImportError: cplexpy_available = False diff_tol = 1e-4 -class CPLEXDirectTests(unittest.TestCase): +class CPLEXDirectTests(unittest.TestCase): def setUp(self): self.stderr = sys.stderr sys.stderr = None @@ -39,64 +53,68 @@ def setUp(self): def tearDown(self): sys.stderr = self.stderr - @unittest.skipIf(not cplexpy_available, - "The 'cplex' python bindings are not available") + @unittest.skipIf( + not cplexpy_available, "The 'cplex' python bindings are not available" + ) def test_infeasible_lp(self): with SolverFactory("cplex", solver_io="python") as opt: - model = ConcreteModel() model.X = Var(within=NonNegativeReals) - model.C1 = Constraint(expr= model.X==1) - model.C2 = Constraint(expr= model.X==2) - model.O = Objective(expr= model.X) + model.C1 = Constraint(expr=model.X == 1) + model.C2 = Constraint(expr=model.X == 2) + model.O = Objective(expr=model.X) results = opt.solve(model) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) - @unittest.skipIf(not cplexpy_available, - "The 'cplex' python bindings are not available") + @unittest.skipIf( + not cplexpy_available, "The 'cplex' python bindings are not available" + ) def test_unbounded_lp(self): with SolverFactory("cplex", solver_io="python") as opt: - model = ConcreteModel() model.X = Var() - model.O = Objective(expr= model.X) + model.O = Objective(expr=model.X) results = opt.solve(model) - self.assertIn(results.solver.termination_condition, - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded)) + self.assertIn( + results.solver.termination_condition, + ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ), + ) - @unittest.skipIf(not cplexpy_available, - "The 'cplex' python bindings are not available") + @unittest.skipIf( + not cplexpy_available, "The 'cplex' python bindings are not available" + ) def test_optimal_lp(self): with SolverFactory("cplex", solver_io="python") as opt: - model = ConcreteModel() model.X = Var(within=NonNegativeReals) - model.O = Objective(expr= model.X) + model.O = Objective(expr=model.X) results = opt.solve(model, load_solutions=False) - self.assertEqual(results.solution.status, - SolutionStatus.optimal) + self.assertEqual(results.solution.status, SolutionStatus.optimal) - @unittest.skipIf(not cplexpy_available, - "The 'cplex' python bindings are not available") + @unittest.skipIf( + not cplexpy_available, "The 'cplex' python bindings are not available" + ) def test_get_duals_lp(self): with SolverFactory("cplex", solver_io="python") as opt: - model = ConcreteModel() model.X = Var(within=NonNegativeReals) model.Y = Var(within=NonNegativeReals) - model.C1 = Constraint(expr= 2*model.X + model.Y >= 8 ) - model.C2 = Constraint(expr= model.X + 3*model.Y >= 6 ) + model.C1 = Constraint(expr=2 * model.X + model.Y >= 8) + model.C2 = Constraint(expr=model.X + 3 * model.Y >= 6) - model.O = Objective(expr= model.X + model.Y) + model.O = Objective(expr=model.X + model.Y) results = opt.solve(model, suffixes=['dual'], load_solutions=False) @@ -106,56 +124,60 @@ def test_get_duals_lp(self): self.assertAlmostEqual(model.dual[model.C1], 0.4) self.assertAlmostEqual(model.dual[model.C2], 0.2) - @unittest.skipIf(not cplexpy_available, - "The 'cplex' python bindings are not available") + @unittest.skipIf( + not cplexpy_available, "The 'cplex' python bindings are not available" + ) def test_infeasible_mip(self): with SolverFactory("cplex", solver_io="python") as opt: - model = ConcreteModel() model.X = Var(within=NonNegativeIntegers) - model.C1 = Constraint(expr= model.X==1) - model.C2 = Constraint(expr= model.X==2) - model.O = Objective(expr= model.X) + model.C1 = Constraint(expr=model.X == 1) + model.C2 = Constraint(expr=model.X == 2) + model.O = Objective(expr=model.X) results = opt.solve(model) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.infeasible) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) - @unittest.skipIf(not cplexpy_available, - "The 'cplex' python bindings are not available") + @unittest.skipIf( + not cplexpy_available, "The 'cplex' python bindings are not available" + ) def test_unbounded_mip(self): with SolverFactory("cplex", solver_io="python") as opt: - model = AbstractModel() model.X = Var(within=Integers) - model.O = Objective(expr= model.X) + model.O = Objective(expr=model.X) instance = model.create_instance() results = opt.solve(instance) - self.assertIn(results.solver.termination_condition, - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded)) + self.assertIn( + results.solver.termination_condition, + ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ), + ) - @unittest.skipIf(not cplexpy_available, - "The 'cplex' python bindings are not available") + @unittest.skipIf( + not cplexpy_available, "The 'cplex' python bindings are not available" + ) def test_optimal_mip(self): with SolverFactory("cplex", solver_io="python") as opt: - model = ConcreteModel() model.X = Var(within=NonNegativeIntegers) - model.O = Objective(expr= model.X) + model.O = Objective(expr=model.X) results = opt.solve(model, load_solutions=False) - self.assertEqual(results.solution.status, - SolutionStatus.optimal) + self.assertEqual(results.solution.status, SolutionStatus.optimal) @unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") class TestIsFixedCallCount(unittest.TestCase): - """ Tests for PR#1402 (669e7b2b) """ + """Tests for PR#1402 (669e7b2b)""" def setup(self, skip_trivial_constraints): m = ConcreteModel() @@ -306,7 +328,7 @@ def test_constraint_data(self): @unittest.skipIf(not cplexpy_available, "The 'cplex' python bindings are not available") class TestAddVar(unittest.TestCase): def test_add_single_variable(self): - """ Test that the variable is added correctly to `solver_model`. """ + """Test that the variable is added correctly to `solver_model`.""" model = ConcreteModel() opt = SolverFactory("cplex", solver_io="python") @@ -340,7 +362,7 @@ def test_add_single_variable(self): self.assertEqual(opt._solver_model.variables.get_num_binary(), 1) def test_add_block_containing_single_variable(self): - """ Test that the variable is added correctly to `solver_model`. """ + """Test that the variable is added correctly to `solver_model`.""" model = ConcreteModel() opt = SolverFactory("cplex", solver_io="python") @@ -366,10 +388,10 @@ def test_add_block_containing_single_variable(self): self.assertEqual(opt._solver_model.variables.get_num_binary(), 1) def test_add_block_containing_multiple_variables(self): - """ Test that: - - The variable is added correctly to `solver_model` - - The CPLEX `variables` interface is called only once - - Fixed variable bounds are set correctly + """Test that: + - The variable is added correctly to `solver_model` + - The CPLEX `variables` interface is called only once + - Fixed variable bounds are set correctly """ model = ConcreteModel() diff --git a/pyomo/solvers/tests/checks/test_CPLEXPersistent.py b/pyomo/solvers/tests/checks/test_CPLEXPersistent.py index b78d3a2f825..d7f00d0f486 100644 --- a/pyomo/solvers/tests/checks/test_CPLEXPersistent.py +++ b/pyomo/solvers/tests/checks/test_CPLEXPersistent.py @@ -12,8 +12,7 @@ import pyomo.common.unittest as unittest import pyomo.environ -from pyomo.core import (ConcreteModel, Var, Objective, - Constraint, NonNegativeReals) +from pyomo.core import ConcreteModel, Var, Objective, Constraint, NonNegativeReals from pyomo.opt import SolverFactory try: @@ -30,7 +29,7 @@ def test_quadratic_objective_is_set(self): model = ConcreteModel() model.X = Var(bounds=(-2, 2)) model.Y = Var(bounds=(-2, 2)) - model.O = Objective(expr=model.X ** 2 + model.Y ** 2) + model.O = Objective(expr=model.X**2 + model.Y**2) model.C1 = Constraint(expr=model.Y >= 2 * model.X - 1) model.C2 = Constraint(expr=model.Y >= -model.X + 2) opt = SolverFactory("cplex_persistent") @@ -41,7 +40,7 @@ def test_quadratic_objective_is_set(self): self.assertAlmostEqual(model.Y.value, 1, places=3) del model.O - model.O = Objective(expr=model.X ** 2) + model.O = Objective(expr=model.X**2) opt.set_objective(model.O) opt.solve() self.assertAlmostEqual(model.X.value, 0, places=3) @@ -70,7 +69,7 @@ def test_add_column_exceptions(self): m = ConcreteModel() m.x = Var() m.c = Constraint(expr=(0, m.x, 1)) - m.ci = Constraint([1,2], rule=lambda m,i:(0,m.x,i+1)) + m.ci = Constraint([1, 2], rule=lambda m, i: (0, m.x, i + 1)) m.cd = Constraint(expr=(0, -m.x, 1)) m.cd.deactivate() m.obj = Objective(expr=-m.x) @@ -84,7 +83,7 @@ def test_add_column_exceptions(self): m2 = ConcreteModel() m2.y = Var() - m2.c = Constraint(expr=(0,m.x,1)) + m2.c = Constraint(expr=(0, m.x, 1)) # different model than attached to opt self.assertRaises(RuntimeError, opt.add_column, m2, m2.y, 0, [], []) @@ -96,8 +95,8 @@ def test_add_column_exceptions(self): self.assertRaises(RuntimeError, opt.add_column, m, z, -2, [m.c, z], [1]) m.y = Var() - # len(coefficents) == len(constraints) - self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1,2]) + # len(coefficients) == len(constraints) + self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1, 2]) self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c, z], [1]) # add indexed constraint diff --git a/pyomo/solvers/tests/checks/test_GAMS.py b/pyomo/solvers/tests/checks/test_GAMS.py index 202f5248afb..5260f2bd195 100644 --- a/pyomo/solvers/tests/checks/test_GAMS.py +++ b/pyomo/solvers/tests/checks/test_GAMS.py @@ -9,14 +9,21 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ - -from pyomo.environ import ConcreteModel, Var, Objective, Constraint, maximize, Expression, log10 +import pyomo.environ as pyo +from pyomo.environ import ( + ConcreteModel, + Var, + Objective, + Constraint, + maximize, + Expression, + log10, +) from pyomo.opt import SolverFactory, TerminationCondition -from pyomo.solvers.plugins.solvers.GAMS import ( - GAMSShell, GAMSDirect, gdxcc_available -) +from pyomo.solvers.plugins.solvers.GAMS import GAMSShell, GAMSDirect, gdxcc_available import pyomo.common.unittest as unittest +from pyomo.common.tempfiles import TempfileManager from pyomo.common.tee import capture_output import os, shutil from tempfile import mkdtemp @@ -31,54 +38,48 @@ class GAMSTests(unittest.TestCase): - - @unittest.skipIf(not gamspy_available, - "The 'gams' python bindings are not available") + @unittest.skipIf( + not gamspy_available, "The 'gams' python bindings are not available" + ) def test_check_expr_eval_py(self): with SolverFactory("gams", solver_io="python") as opt: - m = ConcreteModel() m.x = Var() - m.e = Expression(expr= log10(m.x) + 5) - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.e) + m.e = Expression(expr=log10(m.x) + 5) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.e) self.assertRaises(GamsExceptionExecution, opt.solve, m) - @unittest.skipIf(not gamsgms_available, - "The 'gams' executable is not available") + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") def test_check_expr_eval_gms(self): with SolverFactory("gams", solver_io="gms") as opt: - m = ConcreteModel() m.x = Var() - m.e = Expression(expr= log10(m.x) + 5) - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.e) + m.e = Expression(expr=log10(m.x) + 5) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.e) self.assertRaises(ValueError, opt.solve, m) - @unittest.skipIf(not gamspy_available, - "The 'gams' python bindings are not available") + @unittest.skipIf( + not gamspy_available, "The 'gams' python bindings are not available" + ) def test_file_removal_py(self): with SolverFactory("gams", solver_io="python") as opt: - m = ConcreteModel() m.x = Var() - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.x) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.x) tmpdir = mkdtemp() results = opt.solve(m, tmpdir=tmpdir) self.assertTrue(os.path.exists(tmpdir)) - self.assertFalse(os.path.exists(os.path.join(tmpdir, - '_gams_py_gjo0.gms'))) - self.assertFalse(os.path.exists(os.path.join(tmpdir, - '_gams_py_gjo0.lst'))) - self.assertFalse(os.path.exists(os.path.join(tmpdir, - '_gams_py_gdb0.gdx'))) + self.assertFalse(os.path.exists(os.path.join(tmpdir, '_gams_py_gjo0.gms'))) + self.assertFalse(os.path.exists(os.path.join(tmpdir, '_gams_py_gjo0.lst'))) + self.assertFalse(os.path.exists(os.path.join(tmpdir, '_gams_py_gdb0.gdx'))) os.rmdir(tmpdir) @@ -86,29 +87,23 @@ def test_file_removal_py(self): self.assertFalse(os.path.exists(tmpdir)) - @unittest.skipIf(not gamsgms_available, - "The 'gams' executable is not available") + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") def test_file_removal_gms(self): with SolverFactory("gams", solver_io="gms") as opt: - m = ConcreteModel() m.x = Var() - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.x) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.x) tmpdir = mkdtemp() results = opt.solve(m, tmpdir=tmpdir) self.assertTrue(os.path.exists(tmpdir)) - self.assertFalse(os.path.exists(os.path.join(tmpdir, - 'model.gms'))) - self.assertFalse(os.path.exists(os.path.join(tmpdir, - 'output.lst'))) - self.assertFalse(os.path.exists(os.path.join(tmpdir, - 'GAMS_MODEL_p.gdx'))) - self.assertFalse(os.path.exists(os.path.join(tmpdir, - 'GAMS_MODEL_s.gdx'))) + self.assertFalse(os.path.exists(os.path.join(tmpdir, 'model.gms'))) + self.assertFalse(os.path.exists(os.path.join(tmpdir, 'output.lst'))) + self.assertFalse(os.path.exists(os.path.join(tmpdir, 'GAMS_MODEL_p.gdx'))) + self.assertFalse(os.path.exists(os.path.join(tmpdir, 'GAMS_MODEL_s.gdx'))) os.rmdir(tmpdir) @@ -116,145 +111,147 @@ def test_file_removal_gms(self): self.assertFalse(os.path.exists(tmpdir)) - @unittest.skipIf(not gamspy_available, - "The 'gams' python bindings are not available") + @unittest.skipIf( + not gamspy_available, "The 'gams' python bindings are not available" + ) def test_keepfiles_py(self): with SolverFactory("gams", solver_io="python") as opt: - m = ConcreteModel() m.x = Var() - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.x) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.x) tmpdir = mkdtemp() results = opt.solve(m, tmpdir=tmpdir, keepfiles=True) self.assertTrue(os.path.exists(tmpdir)) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - '_gams_py_gjo0.gms'))) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - '_gams_py_gjo0.lst'))) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - '_gams_py_gdb0.gdx'))) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - '_gams_py_gjo0.pf'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, '_gams_py_gjo0.gms'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, '_gams_py_gjo0.lst'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, '_gams_py_gdb0.gdx'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, '_gams_py_gjo0.pf'))) shutil.rmtree(tmpdir) - @unittest.skipIf(not gamsgms_available, - "The 'gams' executable is not available") + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") def test_keepfiles_gms(self): with SolverFactory("gams", solver_io="gms") as opt: - m = ConcreteModel() m.x = Var() - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.x) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.x) tmpdir = mkdtemp() results = opt.solve(m, tmpdir=tmpdir, keepfiles=True) self.assertTrue(os.path.exists(tmpdir)) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - 'model.gms'))) - self.assertTrue(os.path.exists(os.path.join(tmpdir, - 'output.lst'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, 'model.gms'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, 'output.lst'))) if gdxcc_available: - self.assertTrue(os.path.exists(os.path.join( - tmpdir, 'GAMS_MODEL_p.gdx'))) - self.assertTrue(os.path.exists(os.path.join( - tmpdir, 'results_s.gdx'))) + self.assertTrue( + os.path.exists(os.path.join(tmpdir, 'GAMS_MODEL_p.gdx')) + ) + self.assertTrue(os.path.exists(os.path.join(tmpdir, 'results_s.gdx'))) else: - self.assertTrue(os.path.exists(os.path.join( - tmpdir, 'results.dat'))) - self.assertTrue(os.path.exists(os.path.join( - tmpdir, 'resultsstat.dat'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, 'results.dat'))) + self.assertTrue(os.path.exists(os.path.join(tmpdir, 'resultsstat.dat'))) shutil.rmtree(tmpdir) - @unittest.skipIf(not gamspy_available, - "The 'gams' python bindings are not available") + @unittest.skipIf( + not gamspy_available, "The 'gams' python bindings are not available" + ) def test_fixed_var_sign_py(self): with SolverFactory("gams", solver_io="python") as opt: - m = ConcreteModel() m.x = Var() m.y = Var() m.z = Var() m.z.fix(-3) - m.c1 = Constraint(expr= m.x + m.y - m.z == 0) - m.c2 = Constraint(expr= m.z + m.y - m.z >= -10000) - m.c3 = Constraint(expr= -3 * m.z + m.y - m.z >= -10000) - m.c4 = Constraint(expr= -m.z + m.y - m.z >= -10000) - m.c5 = Constraint(expr= m.x <= 100) - m.o = Objective(expr= m.x, sense=maximize) + m.c1 = Constraint(expr=m.x + m.y - m.z == 0) + m.c2 = Constraint(expr=m.z + m.y - m.z >= -10000) + m.c3 = Constraint(expr=-3 * m.z + m.y - m.z >= -10000) + m.c4 = Constraint(expr=-m.z + m.y - m.z >= -10000) + m.c5 = Constraint(expr=m.x <= 100) + m.o = Objective(expr=m.x, sense=maximize) results = opt.solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) - @unittest.skipIf(not gamsgms_available, - "The 'gams' executable is not available") + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") def test_fixed_var_sign_gms(self): with SolverFactory("gams", solver_io="gms") as opt: - m = ConcreteModel() m.x = Var() m.y = Var() m.z = Var() m.z.fix(-3) - m.c1 = Constraint(expr= m.x + m.y - m.z == 0) - m.c2 = Constraint(expr= m.z + m.y - m.z >= -10000) - m.c3 = Constraint(expr= -3 * m.z + m.y - m.z >= -10000) - m.c4 = Constraint(expr= -m.z + m.y - m.z >= -10000) - m.c5 = Constraint(expr= m.x <= 100) - m.o = Objective(expr= m.x, sense=maximize) + m.c1 = Constraint(expr=m.x + m.y - m.z == 0) + m.c2 = Constraint(expr=m.z + m.y - m.z >= -10000) + m.c3 = Constraint(expr=-3 * m.z + m.y - m.z >= -10000) + m.c4 = Constraint(expr=-m.z + m.y - m.z >= -10000) + m.c5 = Constraint(expr=m.x <= 100) + m.o = Objective(expr=m.x, sense=maximize) results = opt.solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) - @unittest.skipIf(not gamspy_available, - "The 'gams' python bindings are not available") + @unittest.skipIf( + not gamspy_available, "The 'gams' python bindings are not available" + ) def test_long_var_py(self): with SolverFactory("gams", solver_io="python") as opt: - m = ConcreteModel() - x = m.a23456789012345678901234567890123456789012345678901234567890123 = Var() - y = m.b234567890123456789012345678901234567890123456789012345678901234 = Var() - z = m.c23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 = Var() + x = ( + m.a23456789012345678901234567890123456789012345678901234567890123 + ) = Var() + y = ( + m.b234567890123456789012345678901234567890123456789012345678901234 + ) = Var() + z = ( + m.c23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ) = Var() w = m.d01234567890 = Var() - m.c1 = Constraint(expr= x + y + z + w == 0) - m.c2 = Constraint(expr= x >= 10) - m.o = Objective(expr= x) + m.c1 = Constraint(expr=x + y + z + w == 0) + m.c2 = Constraint(expr=x >= 10) + m.o = Objective(expr=x) results = opt.solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) - @unittest.skipIf(not gamsgms_available, - "The 'gams' executable is not available") + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") def test_long_var_gms(self): with SolverFactory("gams", solver_io="gms") as opt: - m = ConcreteModel() - x = m.a23456789012345678901234567890123456789012345678901234567890123 = Var() - y = m.b234567890123456789012345678901234567890123456789012345678901234 = Var() - z = m.c23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 = Var() + x = ( + m.a23456789012345678901234567890123456789012345678901234567890123 + ) = Var() + y = ( + m.b234567890123456789012345678901234567890123456789012345678901234 + ) = Var() + z = ( + m.c23456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + ) = Var() w = m.d01234567890 = Var() - m.c1 = Constraint(expr= x + y + z + w == 0) - m.c2 = Constraint(expr= x >= 10) - m.o = Objective(expr= x) + m.c1 = Constraint(expr=x + y + z + w == 0) + m.c2 = Constraint(expr=x >= 10) + m.o = Objective(expr=x) results = opt.solve(m) - self.assertEqual(results.solver.termination_condition, - TerminationCondition.optimal) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) def test_subsolver_notation(self): opt1 = SolverFactory("gams:ipopt", solver_io="gms") @@ -274,57 +271,78 @@ def test_subsolver_notation(self): self.assertTrue(isinstance(opt4, GAMSDirect)) self.assertEqual(opt4.options["solver"], "cbc") - @unittest.skipIf(not gamspy_available, - "The 'gams' python bindings are not available") + @unittest.skipIf( + not gamspy_available, "The 'gams' python bindings are not available" + ) def test_options_py(self): with SolverFactory("gams", solver_io="python") as opt: - m = ConcreteModel() m.x = Var() - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.x) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.x) - opt.options["load_solutions"] = False # set option - opt.solve(m) # use option + opt.options["load_solutions"] = False # set option + opt.solve(m) # use option self.assertEqual(m.x.value, None) - opt.solve(m, load_solutions=True) # overwrite option + opt.solve(m, load_solutions=True) # overwrite option self.assertEqual(m.x.value, 10) - @unittest.skipIf(not gamsgms_available, - "The 'gams' executable is not available") + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") def test_options_gms(self): with SolverFactory("gams", solver_io="gms") as opt: - m = ConcreteModel() m.x = Var() - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.x) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.x) - opt.options["load_solutions"] = False # set option - opt.solve(m) # use option + opt.options["load_solutions"] = False # set option + opt.solve(m) # use option self.assertEqual(m.x.value, None) - opt.solve(m, load_solutions=True) # overwrite option + opt.solve(m, load_solutions=True) # overwrite option self.assertEqual(m.x.value, 10) - @unittest.skipIf(not gamspy_available, - "The 'gams' python bindings are not available") + @unittest.skipIf( + not gamspy_available, "The 'gams' python bindings are not available" + ) def test_version_py(self): with SolverFactory("gams", solver_io="python") as opt: self.assertIsNotNone(opt.version()) - @unittest.skipIf(not gamsgms_available, - "The 'gams' executable is not available") + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") def test_version_gms(self): with SolverFactory("gams", solver_io="gms") as opt: self.assertIsNotNone(opt.version()) + @unittest.skipIf(not gamsgms_available, "The 'gams' executable is not available") + def test_dat_parser(self): + # This tests issue 2571 + m = pyo.ConcreteModel() + m.S = pyo.Set(initialize=list(range(5))) + m.a_long_var_name = pyo.Var(m.S, bounds=(0, 1), initialize=1) + m.obj = pyo.Objective( + expr=2000 * pyo.summation(m.a_long_var_name), sense=pyo.maximize + ) + solver = pyo.SolverFactory("gams:conopt") + res = solver.solve( + m, + symbolic_solver_labels=True, + load_solutions=False, + io_options={'put_results_format': 'dat'}, + ) + self.assertEqual(res.solution[0].Objective['obj']['Value'], 10000) + for i in range(5): + self.assertEqual( + res.solution[0].Variable[f'a_long_var_name_{i}_']['Value'], 1 + ) + + class GAMSLogfileTestBase(unittest.TestCase): def setUp(self): """Set up model and temporary directory.""" m = ConcreteModel() m.x = Var() - m.c = Constraint(expr= m.x >= 10) - m.o = Objective(expr= m.x) + m.c = Constraint(expr=m.x >= 10) + m.o = Objective(expr=m.x) self.m = m self.tmpdir = mkdtemp() self.logfile = os.path.join(self.tmpdir, 'logfile.log') @@ -395,6 +413,22 @@ def test_logfile(self): self._check_stdout(output.getvalue(), exists=False) self._check_logfile(exists=True) + def test_logfile_relative(self): + cwd = os.getcwd() + with TempfileManager: + tmpdir = TempfileManager.create_tempdir() + os.chdir(tmpdir) + try: + self.logfile = 'test-gams.log' + with SolverFactory("gams", solver_io="gms") as opt: + with capture_output() as output: + opt.solve(self.m, logfile=self.logfile) + self._check_stdout(output.getvalue(), exists=False) + self._check_logfile(exists=True) + self.assertTrue(os.path.exists(os.path.join(tmpdir, self.logfile))) + finally: + os.chdir(cwd) + def test_tee_and_logfile(self): with SolverFactory("gams", solver_io="gms") as opt: with capture_output() as output: @@ -433,6 +467,22 @@ def test_logfile(self): self._check_stdout(output.getvalue(), exists=False) self._check_logfile(exists=True) + def test_logfile_relative(self): + cwd = os.getcwd() + with TempfileManager: + tmpdir = TempfileManager.create_tempdir() + os.chdir(tmpdir) + try: + self.logfile = 'test-gams.log' + with SolverFactory("gams", solver_io="python") as opt: + with capture_output() as output: + opt.solve(self.m, logfile=self.logfile) + self._check_stdout(output.getvalue(), exists=False) + self._check_logfile(exists=True) + self.assertTrue(os.path.exists(os.path.join(tmpdir, self.logfile))) + finally: + os.chdir(cwd) + def test_tee_and_logfile(self): with SolverFactory("gams", solver_io="python") as opt: with capture_output() as output: @@ -441,6 +491,5 @@ def test_tee_and_logfile(self): self._check_logfile(exists=True) - if __name__ == "__main__": unittest.main() diff --git a/pyomo/solvers/tests/checks/test_MOSEKDirect.py b/pyomo/solvers/tests/checks/test_MOSEKDirect.py index b5aa813d68a..369cc08161a 100644 --- a/pyomo/solvers/tests/checks/test_MOSEKDirect.py +++ b/pyomo/solvers/tests/checks/test_MOSEKDirect.py @@ -11,9 +11,7 @@ import pyomo.common.unittest as unittest -from pyomo.opt import ( - TerminationCondition, SolutionStatus, check_available_solvers, -) +from pyomo.opt import TerminationCondition, SolutionStatus, check_available_solvers import pyomo.environ as pyo import pyomo.kernel as pmo import sys @@ -22,10 +20,9 @@ mosek_available = check_available_solvers('mosek_direct') -@unittest.skipIf(not mosek_available , - "MOSEK's python bindings are not available") -class MOSEKDirectTests(unittest.TestCase): +@unittest.skipIf(not mosek_available, "MOSEK's python bindings are not available") +class MOSEKDirectTests(unittest.TestCase): def setUp(self): self.stderr = sys.stderr sys.stderr = None @@ -34,7 +31,6 @@ def tearDown(self): sys.stderr = self.stderr def test_interface_call(self): - interface_instance = type(pyo.SolverFactory('mosek_direct')) alt_1 = pyo.SolverFactory('mosek') alt_2 = pyo.SolverFactory('mosek', solver_io='python') @@ -44,7 +40,6 @@ def test_interface_call(self): self.assertIsInstance(alt_3, interface_instance) def test_infeasible_lp(self): - model = pyo.ConcreteModel() model.X = pyo.Var(within=pyo.NonNegativeReals) model.C1 = pyo.Constraint(expr=model.X == 1) @@ -54,12 +49,15 @@ def test_infeasible_lp(self): opt = pyo.SolverFactory("mosek_direct") results = opt.solve(model) - self.assertIn(results.solver.termination_condition, - (TerminationCondition.infeasible, - TerminationCondition.infeasibleOrUnbounded)) + self.assertIn( + results.solver.termination_condition, + ( + TerminationCondition.infeasible, + TerminationCondition.infeasibleOrUnbounded, + ), + ) def test_unbounded_lp(self): - model = pyo.ConcreteModel() model.X = pyo.Var() model.O = pyo.Objective(expr=model.X) @@ -67,12 +65,15 @@ def test_unbounded_lp(self): opt = pyo.SolverFactory("mosek_direct") results = opt.solve(model) - self.assertIn(results.solver.termination_condition, - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded)) + self.assertIn( + results.solver.termination_condition, + ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ), + ) def test_optimal_lp(self): - model = pyo.ConcreteModel() model.X = pyo.Var(within=pyo.NonNegativeReals) model.O = pyo.Objective(expr=model.X) @@ -80,17 +81,15 @@ def test_optimal_lp(self): opt = pyo.SolverFactory("mosek_direct") results = opt.solve(model, load_solutions=False) - self.assertEqual(results.solution.status, - SolutionStatus.optimal) + self.assertEqual(results.solution.status, SolutionStatus.optimal) def test_get_duals_lp(self): - model = pyo.ConcreteModel() model.X = pyo.Var(within=pyo.NonNegativeReals) model.Y = pyo.Var(within=pyo.NonNegativeReals) - model.C1 = pyo.Constraint(expr=2*model.X + model.Y >= 8) - model.C2 = pyo.Constraint(expr=model.X + 3*model.Y >= 6) + model.C1 = pyo.Constraint(expr=2 * model.X + model.Y >= 8) + model.C2 = pyo.Constraint(expr=model.X + 3 * model.Y >= 6) model.O = pyo.Objective(expr=model.X + model.Y) @@ -104,7 +103,6 @@ def test_get_duals_lp(self): self.assertAlmostEqual(model.dual[model.C2], 0.2, 4) def test_infeasible_mip(self): - model = pyo.ConcreteModel() model.X = pyo.Var(within=pyo.NonNegativeIntegers) model.C1 = pyo.Constraint(expr=model.X == 1) @@ -114,12 +112,15 @@ def test_infeasible_mip(self): opt = pyo.SolverFactory("mosek_direct") results = opt.solve(model) - self.assertIn(results.solver.termination_condition, - (TerminationCondition.infeasibleOrUnbounded, - TerminationCondition.infeasible)) + self.assertIn( + results.solver.termination_condition, + ( + TerminationCondition.infeasibleOrUnbounded, + TerminationCondition.infeasible, + ), + ) def test_unbounded_mip(self): - model = pyo.AbstractModel() model.X = pyo.Var(within=pyo.Integers) model.O = pyo.Objective(expr=model.X) @@ -128,12 +129,15 @@ def test_unbounded_mip(self): opt = pyo.SolverFactory("mosek_direct") results = opt.solve(instance) - self.assertIn(results.solver.termination_condition, - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded)) + self.assertIn( + results.solver.termination_condition, + ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ), + ) def test_optimal_mip(self): - model = pyo.ConcreteModel() model.X = pyo.Var(within=pyo.NonNegativeIntegers) model.O = pyo.Objective(expr=model.X) @@ -141,60 +145,81 @@ def test_optimal_mip(self): opt = pyo.SolverFactory("mosek_direct") results = opt.solve(model, load_solutions=False) - self.assertEqual(results.solution.status, - SolutionStatus.optimal) + self.assertEqual(results.solution.status, SolutionStatus.optimal) - def test_conic(self): + def test_qcqo(self): + model = pmo.block() + model.x = pmo.variable_list() + for i in range(3): + model.x.append(pmo.variable(lb=0.0)) + + model.cons = pmo.constraint( + expr=model.x[0] + + model.x[1] + + model.x[2] + - model.x[0] ** 2 + - model.x[1] ** 2 + - 0.1 * model.x[2] ** 2 + + 0.2 * model.x[0] * model.x[2] + >= 1.0 + ) + + model.o = pmo.objective( + expr=model.x[0] ** 2 + + 0.1 * model.x[1] ** 2 + + model.x[2] ** 2 + - model.x[0] * model.x[2] + - model.x[1], + sense=pmo.minimize, + ) + opt = pmo.SolverFactory("mosek_direct") + results = opt.solve(model) + + self.assertAlmostEqual(results.problem.upper_bound, -4.9176e-01, 4) + self.assertAlmostEqual(results.problem.lower_bound, -4.9180e-01, 4) + + del model + + def test_conic(self): model = pmo.block() model.o = pmo.objective(0.0) - model.c = pmo.constraint(body=0.0, - rhs=1) + model.c = pmo.constraint(body=0.0, rhs=1) b = model.quadratic = pmo.block() - b.x = pmo.variable_tuple((pmo.variable(), - pmo.variable())) + b.x = pmo.variable_tuple((pmo.variable(), pmo.variable())) b.r = pmo.variable(lb=0) - b.c = pmo.conic.quadratic(x=b.x, - r=b.r) + b.c = pmo.conic.quadratic(x=b.x, r=b.r) model.o.expr += b.r model.c.body += b.r del b b = model.rotated_quadratic = pmo.block() - b.x = pmo.variable_tuple((pmo.variable(), - pmo.variable())) + b.x = pmo.variable_tuple((pmo.variable(), pmo.variable())) b.r1 = pmo.variable(lb=0) b.r2 = pmo.variable(lb=0) - b.c = pmo.conic.rotated_quadratic(x=b.x, - r1=b.r1, - r2=b.r2) + b.c = pmo.conic.rotated_quadratic(x=b.x, r1=b.r1, r2=b.r2) model.o.expr += b.r1 + b.r2 model.c.body += b.r1 + b.r2 del b import mosek + if mosek.Env().getversion() >= (9, 0, 0): b = model.primal_exponential = pmo.block() b.x1 = pmo.variable(lb=0) b.x2 = pmo.variable() b.r = pmo.variable(lb=0) - b.c = pmo.conic.primal_exponential(x1=b.x1, - x2=b.x2, - r=b.r) + b.c = pmo.conic.primal_exponential(x1=b.x1, x2=b.x2, r=b.r) model.o.expr += b.r model.c.body += b.r del b b = model.primal_power = pmo.block() - b.x = pmo.variable_tuple((pmo.variable(), - pmo.variable())) + b.x = pmo.variable_tuple((pmo.variable(), pmo.variable())) b.r1 = pmo.variable(lb=0) b.r2 = pmo.variable(lb=0) - b.c = pmo.conic.primal_power(x=b.x, - r1=b.r1, - r2=b.r2, - alpha=0.6) + b.c = pmo.conic.primal_power(x=b.x, r1=b.r1, r2=b.r2, alpha=0.6) model.o.expr += b.r1 + b.r2 model.c.body += b.r1 + b.r2 del b @@ -203,30 +228,90 @@ def test_conic(self): b.x1 = pmo.variable() b.x2 = pmo.variable(ub=0) b.r = pmo.variable(lb=0) - b.c = pmo.conic.dual_exponential(x1=b.x1, - x2=b.x2, - r=b.r) + b.c = pmo.conic.dual_exponential(x1=b.x1, x2=b.x2, r=b.r) model.o.expr += b.r model.c.body += b.r del b b = model.dual_power = pmo.block() - b.x = pmo.variable_tuple((pmo.variable(), - pmo.variable())) + b.x = pmo.variable_tuple((pmo.variable(), pmo.variable())) b.r1 = pmo.variable(lb=0) b.r2 = pmo.variable(lb=0) - b.c = pmo.conic.dual_power(x=b.x, - r1=b.r1, - r2=b.r2, - alpha=0.4) + b.c = pmo.conic.dual_power(x=b.x, r1=b.r1, r2=b.r2, alpha=0.4) model.o.expr += b.r1 + b.r2 model.c.body += b.r1 + b.r2 + if mosek.Env().getversion() >= (10, 0, 0): + b = model.primal_geomean = pmo.block() + b.r = pmo.variable_tuple((pmo.variable(), pmo.variable())) + b.x = pmo.variable() + b.c = pmo.conic.primal_geomean(r=b.r, x=b.x) + model.o.expr += b.r[0] + b.r[1] + model.c.body += b.r[0] + b.r[1] + del b + + b = model.dual_geomean = pmo.block() + b.r = pmo.variable_tuple((pmo.variable(), pmo.variable())) + b.x = pmo.variable() + b.c = pmo.conic.dual_geomean(r=b.r, x=b.x) + model.o.expr += b.r[0] + b.r[1] + model.c.body += b.r[0] + b.r[1] + del b + + b = model.svec_psdcone = pmo.block() + b.x = pmo.variable_tuple((pmo.variable(), pmo.variable(), pmo.variable())) + b.c = pmo.conic.svec_psdcone(x=b.x) + model.o.expr += b.x[0] + 2 * b.x[1] + b.x[2] + model.c.body += b.x[0] + 2 * b.x[1] + b.x[2] + del b + opt = pmo.SolverFactory("mosek_direct") results = opt.solve(model) - self.assertEqual(results.solution.status, - SolutionStatus.optimal) + self.assertEqual(results.solution.status, SolutionStatus.optimal) + + def _test_model(self): + model = pmo.block() + model.x0, model.x1, model.x2 = [pmo.variable() for i in range(3)] + model.obj = pmo.objective(2 * model.x0 + 3 * model.x1 - model.x2, sense=-1) + + model.con1 = pmo.constraint(model.x0 + model.x1 + model.x2 == 1) + model.quad = pmo.conic.quadratic.as_domain( + r=0.03, + x=[ + pmo.expression(1.5 * model.x0 + 0.1 * model.x1), + pmo.expression(0.3 * model.x0 + 2.1 * model.x2 + 0.1), + ], + ) + return model + + def test_conic_duals(self): + check = [-1.94296808, -0.303030303, -1.91919191] + # load_duals (without args) + with pmo.SolverFactory('mosek_direct') as solver: + model = self._test_model() + results = solver.solve(model) + model.dual = pmo.suffix(direction=pmo.suffix.IMPORT) + solver.load_duals() + for i in range(3): + self.assertAlmostEqual(model.dual[model.quad.q][i], check[i], 5) + # load_duals (with args) + with pmo.SolverFactory('mosek_direct') as solver: + model = self._test_model() + results = solver.solve(model) + model.dual = pmo.suffix(direction=pmo.suffix.IMPORT) + solver.load_duals([model.quad.q]) + for i in range(3): + self.assertAlmostEqual(model.dual[model.quad.q][i], check[i], 5) + # save_results=True (deprecated) + with pmo.SolverFactory('mosek_direct') as solver: + model = self._test_model() + model.dual = pmo.suffix(direction=pmo.suffix.IMPORT) + results = solver.solve(model, save_results=True) + for i in range(3): + self.assertAlmostEqual( + results.Solution.constraint['x11']['Dual'][i], check[i], 5 + ) if __name__ == "__main__": diff --git a/pyomo/solvers/tests/checks/test_MOSEKPersistent.py b/pyomo/solvers/tests/checks/test_MOSEKPersistent.py index 1e8f9f6f1c4..59ea930c4f0 100644 --- a/pyomo/solvers/tests/checks/test_MOSEKPersistent.py +++ b/pyomo/solvers/tests/checks/test_MOSEKPersistent.py @@ -1,7 +1,9 @@ import pyomo.common.unittest as unittest from pyomo.opt import ( - TerminationCondition, SolutionStatus, SolverStatus, + TerminationCondition, + SolutionStatus, + SolverStatus, check_available_solvers, ) import pyomo.environ as pyo @@ -11,10 +13,13 @@ diff_tol = 1e-3 mosek_available = check_available_solvers('mosek_direct') +msk_version = [0] +if mosek_available: + msk_version = pyo.SolverFactory('mosek')._version + @unittest.skipIf(not mosek_available, "MOSEK's python bindings are missing.") class MOSEKPersistentTests(unittest.TestCase): - def setUp(self): self.stderr = sys.stderr sys.stderr = None @@ -23,7 +28,6 @@ def tearDown(self): sys.stderr = self.stderr def test_interface_call(self): - interface_instance = type(pyo.SolverFactory('mosek_persistent')) alt_1 = pyo.SolverFactory('mosek', solver_io='persistent') self.assertIsInstance(alt_1, interface_instance) @@ -55,7 +59,7 @@ def test_constraint_removal_1(self): m.x = pyo.Var() m.y = pyo.Var() m.z = pyo.Var() - m.c1 = pyo.Constraint(expr=2*m.x >= m.y**2) + m.c1 = pyo.Constraint(expr=2 * m.x >= m.y**2) m.c2 = pyo.Constraint(expr=m.x**2 >= m.y**2 + m.z**2) m.c3 = pyo.Constraint(expr=m.z >= 0) m.c4 = pyo.Constraint(expr=m.x + m.y >= 0) @@ -74,6 +78,9 @@ def test_constraint_removal_1(self): self.assertEqual(opt._solver_model.getnumcon(), 2) self.assertRaises(ValueError, opt.remove_constraint, m.c2) + @unittest.skipIf( + msk_version[0] > 9, "MOSEK 10 does not (yet) have a removeacc method." + ) def test_constraint_removal_2(self): m = pmo.block() m.x = pmo.variable() @@ -112,10 +119,10 @@ def test_column_addition(self): m.x = pyo.Var(bounds=(0, None)) m.y = pyo.Var(bounds=(0, 10)) m.z = pyo.Var(bounds=(0, None)) - m.c1 = pyo.Constraint(expr=3*m.x + m.y + 2*m.z == 30) - m.c2 = pyo.Constraint(expr=2*m.x + m.y + 3*m.z >= 15) - m.c3 = pyo.Constraint(expr=2*m.y <= 25) - m.o = pyo.Objective(expr=3*m.x + m.y + 5*m.z, sense=pyo.maximize) + m.c1 = pyo.Constraint(expr=3 * m.x + m.y + 2 * m.z == 30) + m.c2 = pyo.Constraint(expr=2 * m.x + m.y + 3 * m.z >= 15) + m.c3 = pyo.Constraint(expr=2 * m.y <= 25) + m.o = pyo.Objective(expr=3 * m.x + m.y + 5 * m.z, sense=pyo.maximize) opt = pyo.SolverFactory('mosek_persistent') opt.set_instance(m) @@ -139,9 +146,9 @@ def test_variable_update(self): m = pyo.ConcreteModel() m.x = pyo.Var() m.y = pyo.Var() - m.c1 = pyo.Constraint(expr=50*m.x + 31*m.y <= 250) - m.c2 = pyo.Constraint(expr=3*m.x - 2*m.y >= -4) - m.o = pyo.Objective(expr=m.x + 0.64*m.y, sense=pyo.maximize) + m.c1 = pyo.Constraint(expr=50 * m.x + 31 * m.y <= 250) + m.c2 = pyo.Constraint(expr=3 * m.x - 2 * m.y >= -4) + m.o = pyo.Objective(expr=m.x + 0.64 * m.y, sense=pyo.maximize) opt = pyo.SolverFactory('mosek_persistent') opt.set_instance(m) opt.solve(m) diff --git a/pyomo/solvers/tests/checks/test_cbc.py b/pyomo/solvers/tests/checks/test_cbc.py index a774aa30910..0fd6e9f49a1 100644 --- a/pyomo/solvers/tests/checks/test_cbc.py +++ b/pyomo/solvers/tests/checks/test_cbc.py @@ -11,8 +11,15 @@ import os from pyomo.environ import ( - SolverFactory, ConcreteModel, Var, Constraint, Objective, - Integers, Boolean, Suffix, maximize, + SolverFactory, + ConcreteModel, + Var, + Constraint, + Objective, + Integers, + Boolean, + Suffix, + maximize, ) from pyomo.common.tee import capture_output from pyomo.common.tempfiles import TempfileManager @@ -23,11 +30,8 @@ class CBCTests(unittest.TestCase): - - @unittest.skipIf(not cbc_available, - "The CBC solver is not available") + @unittest.skipIf(not cbc_available, "The CBC solver is not available") def test_warm_start(self): - m = ConcreteModel() m.x = Var() m.z = Var(domain=Integers) @@ -39,9 +43,8 @@ def test_warm_start(self): tempdir = os.path.dirname(TempfileManager.create_tempfile()) TempfileManager.pop() - sameDrive = os.path.splitdrive(tempdir)[0] == \ - os.path.splitdrive(os.getcwd())[0] - + sameDrive = os.path.splitdrive(tempdir)[0] == os.path.splitdrive(os.getcwd())[0] + # At the moment, CBC does not cleanly handle windows-style drive # names in the MIPSTART file name (though at least 2.10.5). # @@ -58,8 +61,9 @@ def test_warm_start(self): m.w.set_value(1) with SolverFactory("cbc") as opt, capture_output() as output: - opt.solve(m, tee=True, warmstart=True, options={ - 'sloglevel': 2, 'loglevel': 2}) + opt.solve( + m, tee=True, warmstart=True, options={'sloglevel': 2, 'loglevel': 2} + ) log = output.getvalue() # Check if CBC loaded the warmstart file. @@ -73,7 +77,6 @@ def test_warm_start(self): else: self.assertNotIn('MIPStart values read', log) - # Set some initial values for warm start. m.x.set_value(10) m.z.set_value(5) @@ -83,8 +86,9 @@ def test_warm_start(self): _origDir = os.getcwd() os.chdir(tempdir) with SolverFactory("cbc") as opt, capture_output() as output: - opt.solve(m, tee=True, warmstart=True, options={ - 'sloglevel': 2, 'loglevel': 2}) + opt.solve( + m, tee=True, warmstart=True, options={'sloglevel': 2, 'loglevel': 2} + ) finally: os.chdir(_origDir) @@ -96,9 +100,7 @@ def test_warm_start(self): # m.x is ignored because it is continuous, so cost should be 5+1 self.assertIn('MIPStart provided solution with cost 6', log) - - @unittest.skipIf(not cbc_available, - "The CBC solver is not available") + @unittest.skipIf(not cbc_available, "The CBC solver is not available") def test_duals_signs(self): m = ConcreteModel() m.x = Var() @@ -116,8 +118,7 @@ def test_duals_signs(self): self.assertAlmostEqual(res.problem.upper_bound, 1) self.assertAlmostEqual(m.dual[m.c], 1) - @unittest.skipIf(not cbc_available, - "The CBC solver is not available") + @unittest.skipIf(not cbc_available, "The CBC solver is not available") def test_rc_signs(self): m = ConcreteModel() m.x = Var(bounds=(-1, 1)) diff --git a/pyomo/solvers/tests/checks/test_cplex.py b/pyomo/solvers/tests/checks/test_cplex.py index 8c9cb52c7cb..4f1d7aca99b 100644 --- a/pyomo/solvers/tests/checks/test_cplex.py +++ b/pyomo/solvers/tests/checks/test_cplex.py @@ -15,18 +15,35 @@ import pyomo.common.unittest as unittest import pyomo.kernel as pmo -from pyomo.core import Binary, ConcreteModel, Constraint, Objective, Var, Integers, RangeSet, minimize, quicksum, Suffix +from pyomo.core import ( + Binary, + ConcreteModel, + Constraint, + Objective, + Var, + Integers, + RangeSet, + minimize, + quicksum, + Suffix, +) from pyomo.opt import ProblemFormat, convert_problem, SolverFactory, BranchDirection -from pyomo.solvers.plugins.solvers.CPLEX import CPLEXSHELL, MockCPLEX, _validate_file_name +from pyomo.solvers.plugins.solvers.CPLEX import ( + CPLEXSHELL, + MockCPLEX, + _validate_file_name, +) class _mock_cplex_128(object): def version(self): - return (12,8,0) + return (12, 8, 0) + class _mock_cplex_126(object): def version(self): - return (12,6,0) + return (12, 6, 0) + class CPLEX_utils(unittest.TestCase): def test_validate_file_name(self): @@ -40,11 +57,9 @@ def test_validate_file_name(self): # Check spaces in the file fname = 'foo bar.lp' - with self.assertRaisesRegex( - ValueError, "Space detected in CPLEX xxx file"): + with self.assertRaisesRegex(ValueError, "Space detected in CPLEX xxx file"): _validate_file_name(_126, fname, 'xxx') - self.assertEqual('"%s"' % (fname,), - _validate_file_name(_128, fname, 'xxx')) + self.assertEqual('"%s"' % (fname,), _validate_file_name(_128, fname, 'xxx')) # check OK path separators fname = 'foo%sbar.lp' % (os.path.sep,) @@ -52,10 +67,37 @@ def test_validate_file_name(self): self.assertEqual(fname, _validate_file_name(_128, fname, 'xxx')) # check BAD path separators - bad_char = '/\\'.replace(os.path.sep,'') + bad_char = '/\\'.replace(os.path.sep, '') fname = 'foo%sbar.lp' % (bad_char,) msg = r'Unallowed character \(%s\) found in CPLEX xxx file' % ( - repr(bad_char)[1:-1],) + repr(bad_char)[1:-1], + ) + with self.assertRaisesRegex(ValueError, msg): + _validate_file_name(_126, fname, 'xxx') + with self.assertRaisesRegex(ValueError, msg): + _validate_file_name(_128, fname, 'xxx') + + # check allowable characters + fname = 'foo$$bar.lp' + self.assertEqual(fname, _validate_file_name(_126, fname, 'xxx')) + self.assertEqual(fname, _validate_file_name(_128, fname, 'xxx')) + fname = 'foo_bar.lp' + self.assertEqual(fname, _validate_file_name(_126, fname, 'xxx')) + self.assertEqual(fname, _validate_file_name(_128, fname, 'xxx')) + fname = 'foo&bar.lp' + self.assertEqual(fname, _validate_file_name(_126, fname, 'xxx')) + self.assertEqual(fname, _validate_file_name(_128, fname, 'xxx')) + fname = 'foo~bar.lp' + self.assertEqual(fname, _validate_file_name(_126, fname, 'xxx')) + self.assertEqual(fname, _validate_file_name(_128, fname, 'xxx')) + fname = 'foo-bar.lp' + self.assertEqual(fname, _validate_file_name(_126, fname, 'xxx')) + self.assertEqual(fname, _validate_file_name(_128, fname, 'xxx')) + + # Check unallowable character + bad_char = '^' + fname = 'foo%sbar.lp' % (bad_char,) + msg = r"Unallowed character \(\^\) found in CPLEX xxx file" with self.assertRaisesRegex(ValueError, msg): _validate_file_name(_126, fname, 'xxx') with self.assertRaisesRegex(ValueError, msg): @@ -63,7 +105,8 @@ def test_validate_file_name(self): class CPLEXShellWritePrioritiesFile(unittest.TestCase): - """ Unit test on writing of priorities via `CPLEXSHELL._write_priorities_file()` """ + """Unit test on writing of priorities via `CPLEXSHELL._write_priorities_file()`""" + suffix_cls = Suffix def setUp(self): @@ -86,11 +129,16 @@ def get_mock_model(self): def get_mock_cplex_shell(self, mock_model): solver = MockCPLEX() - solver._problem_files, solver._problem_format, solver._smap_id = convert_problem( + ( + solver._problem_files, + solver._problem_format, + solver._smap_id, + ) = convert_problem( (mock_model,), ProblemFormat.cpxlp, [ProblemFormat.cpxlp], has_capability=lambda x: True, + symbolic_solver_labels=True, ) return solver @@ -108,9 +156,13 @@ def test_write_without_priority_suffix(self): CPLEXSHELL._write_priorities_file(self.mock_cplex_shell, self.mock_model) def test_write_priority_to_priorities_file(self): - self.mock_model.priority = self.suffix_cls(direction=Suffix.EXPORT, datatype=Suffix.INT) + self.mock_model.priority = self.suffix_cls( + direction=Suffix.EXPORT, datatype=Suffix.INT + ) priority_val = 10 - self._set_suffix_value(self.mock_model.priority, self.mock_model.x, priority_val) + self._set_suffix_value( + self.mock_model.priority, self.mock_model.x, priority_val + ) CPLEXSHELL._write_priorities_file(self.mock_cplex_shell, self.mock_model) priorities_file = self.get_priorities_file_as_string(self.mock_cplex_shell) @@ -119,18 +171,26 @@ def test_write_priority_to_priorities_file(self): priorities_file, "* ENCODING=ISO-8859-1\n" "NAME Priority Order\n" - " x1 10\n" - "ENDATA\n" + " x 10\n" + "ENDATA\n", ) def test_write_priority_and_direction_to_priorities_file(self): - self.mock_model.priority = self.suffix_cls(direction=Suffix.EXPORT, datatype=Suffix.INT) + self.mock_model.priority = self.suffix_cls( + direction=Suffix.EXPORT, datatype=Suffix.INT + ) priority_val = 10 - self._set_suffix_value(self.mock_model.priority, self.mock_model.x, priority_val) + self._set_suffix_value( + self.mock_model.priority, self.mock_model.x, priority_val + ) - self.mock_model.direction = self.suffix_cls(direction=Suffix.EXPORT, datatype=Suffix.INT) + self.mock_model.direction = self.suffix_cls( + direction=Suffix.EXPORT, datatype=Suffix.INT + ) direction_val = BranchDirection.down - self._set_suffix_value(self.mock_model.direction, self.mock_model.x, direction_val) + self._set_suffix_value( + self.mock_model.direction, self.mock_model.x, direction_val + ) CPLEXSHELL._write_priorities_file(self.mock_cplex_shell, self.mock_model) priorities_file = self.get_priorities_file_as_string(self.mock_cplex_shell) @@ -139,12 +199,14 @@ def test_write_priority_and_direction_to_priorities_file(self): priorities_file, "* ENCODING=ISO-8859-1\n" "NAME Priority Order\n" - " DN x1 10\n" - "ENDATA\n" + " DN x 10\n" + "ENDATA\n", ) def test_raise_due_to_invalid_priority(self): - self.mock_model.priority = self.suffix_cls(direction=Suffix.EXPORT, datatype=Suffix.INT) + self.mock_model.priority = self.suffix_cls( + direction=Suffix.EXPORT, datatype=Suffix.INT + ) self._set_suffix_value(self.mock_model.priority, self.mock_model.x, -1) with self.assertRaises(ValueError): CPLEXSHELL._write_priorities_file(self.mock_cplex_shell, self.mock_model) @@ -154,11 +216,17 @@ def test_raise_due_to_invalid_priority(self): CPLEXSHELL._write_priorities_file(self.mock_cplex_shell, self.mock_model) def test_use_default_due_to_invalid_direction(self): - self.mock_model.priority = self.suffix_cls(direction=Suffix.EXPORT, datatype=Suffix.INT) + self.mock_model.priority = self.suffix_cls( + direction=Suffix.EXPORT, datatype=Suffix.INT + ) priority_val = 10 - self._set_suffix_value(self.mock_model.priority, self.mock_model.x, priority_val) + self._set_suffix_value( + self.mock_model.priority, self.mock_model.x, priority_val + ) - self.mock_model.direction = self.suffix_cls(direction=Suffix.EXPORT, datatype=Suffix.INT) + self.mock_model.direction = self.suffix_cls( + direction=Suffix.EXPORT, datatype=Suffix.INT + ) self._set_suffix_value( self.mock_model.direction, self.mock_model.x, "invalid_branching_direction" ) @@ -170,8 +238,8 @@ def test_use_default_due_to_invalid_direction(self): priorities_file, "* ENCODING=ISO-8859-1\n" "NAME Priority Order\n" - " x1 10\n" - "ENDATA\n" + " x 10\n" + "ENDATA\n", ) @@ -191,11 +259,12 @@ def get_mock_model(self): class CPLEXShellSolvePrioritiesFile(unittest.TestCase): - """ Integration test on the end-to-end application of priorities via the `Suffix` through a `solve()` """ + """Integration test on the end-to-end application of priorities via the `Suffix` through a `solve()`""" + def get_mock_model_with_priorities(self): m = ConcreteModel() m.x = Var(domain=Integers) - m.s = RangeSet(10) + m.s = RangeSet(0, 9) m.y = Var(m.s, domain=Integers) m.o = Objective(expr=m.x + sum(m.y), sense=minimize) m.c = Constraint(expr=m.x >= 1) @@ -210,13 +279,15 @@ def get_mock_model_with_priorities(self): m.priority.set_value(m.y, 2, expand=False) m.direction.set_value(m.y, BranchDirection.down, expand=True) - m.direction.set_value(m.y[10], BranchDirection.up) + m.direction.set_value(m.y[9], BranchDirection.up) return m def test_use_variable_priorities(self): model = self.get_mock_model_with_priorities() with SolverFactory("_mock_cplex") as opt: - opt._presolve(model, priorities=True, keepfiles=True) + opt._presolve( + model, priorities=True, keepfiles=True, symbolic_solver_labels=True + ) with open(opt._priorities_file_name, "r") as ord_file: priorities_file = ord_file.read() @@ -226,17 +297,17 @@ def test_use_variable_priorities(self): ( "* ENCODING=ISO-8859-1\n" "NAME Priority Order\n" - " x1 1\n" - " DN x2 2\n" - " DN x3 2\n" - " DN x4 2\n" - " DN x5 2\n" - " DN x6 2\n" - " DN x7 2\n" - " DN x8 2\n" - " DN x9 2\n" - " DN x10 2\n" - " UP x11 2\n" + " x 1\n" + " DN y(0) 2\n" + " DN y(1) 2\n" + " DN y(2) 2\n" + " DN y(3) 2\n" + " DN y(4) 2\n" + " DN y(5) 2\n" + " DN y(6) 2\n" + " DN y(7) 2\n" + " DN y(8) 2\n" + " UP y(9) 2\n" "ENDATA\n" ), ) @@ -251,7 +322,7 @@ def test_ignore_variable_priorities(self): self.assertNotIn(".ord", opt._command.script) def test_can_use_manual_priorities_file_with_lp_solve(self): - """ Test that we can pass an LP file (not a pyomo model) along with a priorities file to `.solve()` """ + """Test that we can pass an LP file (not a pyomo model) along with a priorities file to `.solve()`""" model = self.get_mock_model_with_priorities() with SolverFactory("_mock_cplex") as pre_opt: diff --git a/pyomo/solvers/tests/checks/test_gurobi.py b/pyomo/solvers/tests/checks/test_gurobi.py new file mode 100644 index 00000000000..f33a00ce8a2 --- /dev/null +++ b/pyomo/solvers/tests/checks/test_gurobi.py @@ -0,0 +1,46 @@ +import pyomo.common.unittest as unittest +from unittest.mock import patch, MagicMock + +try: + from pyomo.solvers.plugins.solvers.GUROBI_RUN import gurobi_run + from gurobipy import GRB + + gurobipy_available = True + has_worklimit = hasattr(GRB, "WORK_LIMIT") +except: + gurobipy_available = False + has_worklimit = False + + +@unittest.skipIf(not gurobipy_available, "gurobipy is not available") +class GurobiTest(unittest.TestCase): + @unittest.skipIf(not has_worklimit, "gurobi < 9.5") + @patch("builtins.open") + @patch("pyomo.solvers.plugins.solvers.GUROBI_RUN.read") + def test_work_limit(self, read: MagicMock, open: MagicMock): + file = MagicMock() + open.return_value = file + model = MagicMock() + read.return_value = model + + def getAttr(attr): + if attr == GRB.Attr.Status: + return GRB.WORK_LIMIT + elif attr == GRB.Attr.ModelSense: + return 1 + elif attr == GRB.Attr.ModelName: + return "" + elif attr.startswith("Num"): + return 1 + elif attr == GRB.Attr.SolCount: + return 0 + + return None + + model.getAttr = getAttr + gurobi_run(None, None, None, None, {}, []) + self.assertTrue("WorkLimit" in file.write.call_args[0][0]) + + +if __name__ == '__main__': + unittest.main() diff --git a/pyomo/solvers/tests/checks/test_gurobi_direct.py b/pyomo/solvers/tests/checks/test_gurobi_direct.py new file mode 100644 index 00000000000..8fb9526195d --- /dev/null +++ b/pyomo/solvers/tests/checks/test_gurobi_direct.py @@ -0,0 +1,415 @@ +""" +Tests for working with Gurobi environments. Some require a single-use license +and are skipped if this isn't the case. +""" + +import gc +from unittest.mock import patch + +import pyomo.environ as pyo +import pyomo.common.unittest as unittest +from pyomo.common.errors import ApplicationError +from pyomo.environ import SolverFactory, ConcreteModel +from pyomo.opt import SolverStatus, TerminationCondition +from pyomo.solvers.plugins.solvers.gurobi_direct import GurobiDirect + + +try: + import gurobipy as gp + + NO_LICENSE = gp.GRB.Error.NO_LICENSE + gurobipy_available = True +except ImportError: + gurobipy_available = False + + +def clean_up_global_state(): + # Best efforts to dispose any gurobipy objects from previous tests + # which might keep the default environment active + gc.collect() + gp.disposeDefaultEnv() + # Reset flag to sync with default env state + GurobiDirect._default_env_started = False + + +def single_use_license(): + # Return true if the current license is valid and single-use + if not gurobipy_available: + return False + clean_up_global_state() + try: + with gp.Env(): + try: + with gp.Env(): + # License allows multiple uses + return False + except gp.GurobiError: + return True + except gp.GurobiError: + # No license available + return False + + +class GurobiBase(unittest.TestCase): + # Base class ensures the global environment is cleaned up + + def setUp(self): + clean_up_global_state() + + # A simple model to solve + model = ConcreteModel() + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + model.OBJ = pyo.Objective(expr=model.x[1] + model.x[2], sense=pyo.maximize) + model.Constraint1 = pyo.Constraint(expr=2 * model.x[1] + model.x[2] <= 1) + model.Constraint2 = pyo.Constraint(expr=model.x[1] + 2 * model.x[2] <= 1) + self.model = model + + def tearDown(self): + clean_up_global_state() + + +@unittest.skipIf(gurobipy_available, "gurobipy is installed, skip import test") +class GurobiImportFailedTests(unittest.TestCase): + def test_gurobipy_not_installed(self): + # ApplicationError should be thrown if gurobipy is not available + model = ConcreteModel() + with SolverFactory("gurobi_direct") as opt: + with self.assertRaisesRegex(ApplicationError, "No Python bindings"): + opt.solve(model) + + +@unittest.skipIf(not gurobipy_available, "gurobipy is not available") +class GurobiParameterTests(GurobiBase): + # Test parameter handling at the model and environment level + + def test_set_environment_parameters(self): + # Solver options should handle parameters which must be set before the + # environment is started (i.e. connection params, memory limits). This + # can only work with a managed env. + + with SolverFactory( + "gurobi_direct", manage_env=True, options={"ComputeServer": "my-cs-url"} + ) as opt: + # Check that the error comes from an attempted connection, (i.e. error + # message reports the hostname) and not from setting the parameter after + # the environment is started. + with self.assertRaisesRegex(ApplicationError, "my-cs-url"): + opt.solve(self.model) + + def test_set_once(self): + # Make sure parameters aren't set twice. If they are set on the + # environment, they shouldn't also be set on the model. This isn't an + # issue for most parameters, but some license parameters (e.g. WLS) + # will complain if set in both places. + + envparams = {} + modelparams = {} + + class TempEnv(gp.Env): + def setParam(self, param, value): + envparams[param] = value + + class TempModel(gp.Model): + def setParam(self, param, value): + modelparams[param] = value + + with patch("gurobipy.Env", new=TempEnv), patch("gurobipy.Model", new=TempModel): + with SolverFactory( + "gurobi_direct", options={"Method": 2, "MIPFocus": 1}, manage_env=True + ) as opt: + opt.solve(self.model, options={"MIPFocus": 2}) + + # Method should not be set again, but MIPFocus was changed. + # OutputFlag is explicitly set on the model. + assert envparams == {"Method": 2, "MIPFocus": 1} + assert modelparams == {"MIPFocus": 2, "OutputFlag": 0} + + # Try an erroneous parameter setting to ensure parameters go through in all + # cases. Expect an error to indicate pyomo tried to set the parameter. + + def test_param_changes_1(self): + # Default env: parameters set on model at solve time + with SolverFactory("gurobi_direct", options={"Method": -100}) as opt: + with self.assertRaisesRegex(gp.GurobiError, "Unable to set"): + opt.solve(self.model) + + def test_param_changes_2(self): + # Note that this case throws an ApplicationError instead of a + # GurobiError since the bad parameter value prevents the environment + # from starting + # Managed env: parameters set on env at solve time + with SolverFactory( + "gurobi_direct", options={"Method": -100}, manage_env=True + ) as opt: + with self.assertRaisesRegex(ApplicationError, "Unable to set"): + opt.solve(self.model) + + def test_param_changes_3(self): + # Default env: parameters passed to solve() + with SolverFactory("gurobi_direct") as opt: + with self.assertRaisesRegex(gp.GurobiError, "Unable to set"): + opt.solve(self.model, options={"Method": -100}) + + def test_param_changes_4(self): + # Managed env: parameters passed to solve() + with SolverFactory("gurobi_direct", manage_env=True) as opt: + with self.assertRaisesRegex(gp.GurobiError, "Unable to set"): + opt.solve(self.model, options={"Method": -100}) + + +@unittest.skipIf(not gurobipy_available, "gurobipy is not available") +class GurobiEnvironmentTests(GurobiBase): + # Test handling of gurobi environments + + def assert_optimal_result(self, results): + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.optimal + ) + + def test_init_default_env(self): + # available() calls with the default env shouldn't need a repeat check + with patch("gurobipy.Model") as PatchModel: + with SolverFactory("gurobi_direct") as opt: + opt.available() + opt.available() + PatchModel.assert_called_once_with() + + def test_close_global(self): + # method releases the license and syncs the flag + with patch("gurobipy.Model") as PatchModel, patch( + "gurobipy.disposeDefaultEnv" + ) as patch_dispose: + with SolverFactory("gurobi_direct") as opt: + opt.available() + opt.available() + PatchModel.assert_called_once_with() + patch_dispose.assert_not_called() + + # close default environment + opt.close_global() + patch_dispose.assert_called_once_with() + + # _default_env_started flag was correctly synced, so available() is + # checked again + with patch("gurobipy.Model") as PatchModel, patch( + "gurobipy.disposeDefaultEnv" + ) as patch_dispose: + with SolverFactory("gurobi_direct") as opt: + opt.available() + opt.available() + PatchModel.assert_called_once_with() + patch_dispose.assert_not_called() + + def test_persisted_license_failure(self): + # Gurobi error message should come through in the exception + # Failure to start an environment should not be persistent + + with patch( + "gurobipy.Model", side_effect=gp.GurobiError(NO_LICENSE, "nolicense") + ): + with SolverFactory("gurobi_direct") as opt: + with self.assertRaisesRegex(ApplicationError, "nolicense"): + opt.solve(self.model) + + with SolverFactory("gurobi_direct") as opt: + results = opt.solve(self.model) + self.assert_optimal_result(results) + + def test_persisted_license_failure_managed(self): + # Gurobi error message should come through in the exception + # Failure to start an environment should not be persistent + + with patch("gurobipy.Env", side_effect=gp.GurobiError(NO_LICENSE, "nolicense")): + with SolverFactory("gurobi_direct", manage_env=True) as opt: + with self.assertRaisesRegex(ApplicationError, "nolicense"): + opt.solve(self.model) + + with SolverFactory("gurobi_direct", manage_env=True) as opt: + results = opt.solve(self.model) + self.assert_optimal_result(results) + self.assertEqual(results.solver.status, SolverStatus.ok) + + def test_context(self): + # Context management should close the gurobi environment + + with gp.Env() as use_env: + with patch("gurobipy.Env", return_value=use_env): + with SolverFactory("gurobi_direct", manage_env=True) as opt: + results = opt.solve(self.model) + self.assert_optimal_result(results) + + # Environment was closed (cannot be restarted) + with self.assertRaises(gp.GurobiError): + use_env.start() + + def test_close(self): + # Manual close() call should close the gurobi environment + + with gp.Env() as use_env: + with patch("gurobipy.Env", return_value=use_env): + opt = SolverFactory("gurobi_direct", manage_env=True) + try: + results = opt.solve(self.model) + self.assert_optimal_result(results) + finally: + opt.close() + + # Environment was closed (cannot be restarted) + with self.assertRaises(gp.GurobiError): + use_env.start() + + @unittest.skipIf(single_use_license(), reason="test requires multi-use license") + def test_multiple_solvers_managed(self): + # Multiple managed solvers will create their own envs + + with SolverFactory("gurobi_direct", manage_env=True) as opt1, SolverFactory( + "gurobi_direct", manage_env=True + ) as opt2: + results1 = opt1.solve(self.model) + self.assert_optimal_result(results1) + results2 = opt2.solve(self.model) + self.assert_optimal_result(results2) + + def test_multiple_solvers_nonmanaged(self): + # Multiple solvers will share the default environment + + with SolverFactory("gurobi_direct") as opt1, SolverFactory( + "gurobi_direct" + ) as opt2: + results1 = opt1.solve(self.model) + self.assert_optimal_result(results1) + results2 = opt2.solve(self.model) + self.assert_optimal_result(results2) + + @unittest.skipIf(single_use_license(), reason="test requires multi-use license") + def test_managed_env(self): + # Test that manage_env=True creates its own environment + + # Set parameters on the default environment + gp.setParam("IterationLimit", 100) + + # On the patched environment, solve times out due to parameter setting + with gp.Env(params={"IterationLimit": 0, "Presolve": 0}) as use_env, patch( + "gurobipy.Env", return_value=use_env + ): + with SolverFactory("gurobi_direct", manage_env=True) as opt: + results = opt.solve(self.model) + self.assertEqual(results.solver.status, SolverStatus.aborted) + self.assertEqual( + results.solver.termination_condition, + TerminationCondition.maxIterations, + ) + + def test_nonmanaged_env(self): + # Test that manage_env=False (default) uses the default environment + + # Set parameters on the default environment + gp.setParam("IterationLimit", 0) + gp.setParam("Presolve", 0) + + # Using the default env, solve times out due to parameter setting + with SolverFactory("gurobi_direct") as opt: + results = opt.solve(self.model) + self.assertEqual(results.solver.status, SolverStatus.aborted) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.maxIterations + ) + + +@unittest.skipIf(not gurobipy_available, "gurobipy is not available") +@unittest.skipIf(not single_use_license(), reason="test needs a single use license") +class GurobiSingleUseTests(GurobiBase): + # Integration tests for Gurobi single-use licenses (useful for checking all Gurobi + # environments were correctly freed). These tests are not run in pyomo's CI. Each + # test in this class has an equivalent in GurobiEnvironmentTests which tests the + # same behaviour via monkey patching. + + def test_persisted_license_failure(self): + # Solver should allow retries to start the environment, instead of + # persisting the same failure (default env). + + with SolverFactory("gurobi_direct") as opt: + with gp.Env(): + # Expected to fail: there is another environment open so the + # default env cannot be started. + with self.assertRaises(ApplicationError): + opt.solve(self.model) + # Should not raise an error, since the other environment has been freed. + opt.solve(self.model) + + def test_persisted_license_failure_managed(self): + # Solver should allow retries to start the environment, instead of + # persisting the same failure (managed env). + + with SolverFactory("gurobi_direct", manage_env=True) as opt: + with gp.Env(): + # Expected to fail: there is another environment open so the + # default env cannot be started. + with self.assertRaises(ApplicationError): + opt.solve(self.model) + # Should not raise an error, since the other environment has been freed. + opt.solve(self.model) + + def test_context(self): + # Context management should close the gurobi environment. + with SolverFactory("gurobi_direct", manage_env=True) as opt: + opt.solve(self.model) + + # Environment closed, so another can be created + with gp.Env(): + pass + + def test_close(self): + # Manual close() call should close the gurobi environment. + opt = SolverFactory("gurobi_direct", manage_env=True) + try: + opt.solve(self.model) + finally: + opt.close() + + # Environment closed, so another can be created + with gp.Env(): + pass + + def test_multiple_solvers(self): + # One environment per solver would break this pattern. Test that + # global env is still used by default (manage_env=False) + + with SolverFactory("gurobi_direct") as opt1, SolverFactory( + "gurobi_direct" + ) as opt2: + opt1.solve(self.model) + opt2.solve(self.model) + + def test_multiple_models_leaky(self): + # Make sure all models are closed explicitly by the GurobiDirect instance. + + with SolverFactory("gurobi_direct", manage_env=True) as opt: + opt.solve(self.model) + # Leak a model reference, then create a new model. + # Pyomo should close the old model since it is no longed needed. + tmp = opt._solver_model + opt.solve(self.model) + + # Context manager properly closed all models and environments + with gp.Env(): + pass + + def test_close_global(self): + # If using the default environment, calling the close_global + # classmethod closes the environment, providing any other solvers + # have also been closed. + + opt1 = SolverFactory("gurobi_direct") + opt2 = SolverFactory("gurobi_direct") + try: + opt1.solve(self.model) + opt2.solve(self.model) + finally: + opt1.close() + opt2.close_global() + + # Context closed AND close_global called + with gp.Env(): + pass diff --git a/pyomo/solvers/tests/checks/test_gurobi_persistent.py b/pyomo/solvers/tests/checks/test_gurobi_persistent.py index f0485be4ced..9d69c1dd920 100644 --- a/pyomo/solvers/tests/checks/test_gurobi_persistent.py +++ b/pyomo/solvers/tests/checks/test_gurobi_persistent.py @@ -12,9 +12,13 @@ import pyomo.common.unittest as unittest import pyomo.environ as pyo from pyomo.core.expr.taylor_series import taylor_series_expansion + try: import gurobipy - m = gurobipy.Model() + + with gurobipy.Env(): + pass + gurobipy_available = True except: gurobipy_available = False @@ -27,7 +31,7 @@ def test_basics(self): m.x = pyo.Var(bounds=(-10, 10)) m.y = pyo.Var() m.obj = pyo.Objective(expr=m.x**2 + m.y**2) - m.c1 = pyo.Constraint(expr=m.y >= 2*m.x + 1) + m.c1 = pyo.Constraint(expr=m.y >= 2 * m.x + 1) opt = pyo.SolverFactory('gurobi_persistent') opt.set_instance(m) @@ -192,7 +196,7 @@ def test_update4(self): @unittest.skipIf(not gurobipy_available, "gurobipy is not available") def test_update5(self): m = pyo.ConcreteModel() - m.a = pyo.Set(initialize=[1,2,3], ordered=True) + m.a = pyo.Set(initialize=[1, 2, 3], ordered=True) m.x = pyo.Var(m.a, within=pyo.Binary) m.y = pyo.Var(within=pyo.Binary) m.obj = pyo.Objective(expr=m.y) @@ -214,7 +218,7 @@ def test_update5(self): @unittest.skipIf(not gurobipy_available, "gurobipy is not available") def test_update6(self): m = pyo.ConcreteModel() - m.a = pyo.Set(initialize=[1,2,3], ordered=True) + m.a = pyo.Set(initialize=[1, 2, 3], ordered=True) m.x = pyo.Var(m.a, within=pyo.Binary) m.y = pyo.Var(within=pyo.Binary) m.obj = pyo.Objective(expr=m.y) @@ -295,12 +299,12 @@ def test_callback(self): m = pyo.ConcreteModel() m.x = pyo.Var(bounds=(0, 4)) m.y = pyo.Var(within=pyo.Integers, bounds=(0, None)) - m.obj = pyo.Objective(expr=2*m.x + m.y) + m.obj = pyo.Objective(expr=2 * m.x + m.y) m.cons = pyo.ConstraintList() def _add_cut(xval): m.x.value = xval - return m.cons.add(m.y >= taylor_series_expansion((m.x - 2)**2)) + return m.cons.add(m.y >= taylor_series_expansion((m.x - 2) ** 2)) _add_cut(0) _add_cut(4) @@ -313,7 +317,7 @@ def _add_cut(xval): def _my_callback(cb_m, cb_opt, cb_where): if cb_where == gurobipy.GRB.Callback.MIPSOL: cb_opt.cbGetSolution(vars=[m.x, m.y]) - if m.y.value < (m.x.value - 2)**2 - 1e-6: + if m.y.value < (m.x.value - 2) ** 2 - 1e-6: cb_opt.cbLazy(_add_cut(m.x.value)) opt.set_callback(_my_callback) @@ -346,7 +350,7 @@ def test_add_column_exceptions(self): m = pyo.ConcreteModel() m.x = pyo.Var() m.c = pyo.Constraint(expr=(0, m.x, 1)) - m.ci = pyo.Constraint([1,2], rule=lambda m,i:(0,m.x,i+1)) + m.ci = pyo.Constraint([1, 2], rule=lambda m, i: (0, m.x, i + 1)) m.cd = pyo.Constraint(expr=(0, -m.x, 1)) m.cd.deactivate() m.obj = pyo.Objective(expr=-m.x) @@ -360,7 +364,7 @@ def test_add_column_exceptions(self): m2 = pyo.ConcreteModel() m2.y = pyo.Var() - m2.c = pyo.Constraint(expr=(0,m.x,1)) + m2.c = pyo.Constraint(expr=(0, m.x, 1)) # different model than attached to opt self.assertRaises(RuntimeError, opt.add_column, m2, m2.y, 0, [], []) @@ -372,8 +376,8 @@ def test_add_column_exceptions(self): self.assertRaises(RuntimeError, opt.add_column, m, z, -2, [m.c, z], [1]) m.y = pyo.Var() - # len(coefficents) == len(constraints) - self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1,2]) + # len(coefficients) == len(constraints) + self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1, 2]) self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c, z], [1]) # add indexed constraint diff --git a/pyomo/solvers/tests/checks/test_no_solution_behavior.py b/pyomo/solvers/tests/checks/test_no_solution_behavior.py index f96c8efb340..9ba8e86a013 100644 --- a/pyomo/solvers/tests/checks/test_no_solution_behavior.py +++ b/pyomo/solvers/tests/checks/test_no_solution_behavior.py @@ -11,11 +11,13 @@ import os import types + try: import new - new_available=True + + new_available = True except: - new_available=False + new_available = False import pyomo.common.unittest as unittest @@ -26,7 +28,7 @@ from io import StringIO # The test directory -thisDir = os.path.dirname(os.path.abspath( __file__ )) +thisDir = os.path.dirname(os.path.abspath(__file__)) # Cleanup Expected Failure Results Files _cleanup_expected_failures = True @@ -36,11 +38,7 @@ # A function that returns a function that gets # added to a test class. # -def create_method(model, - solver, - io, - test_case): - +def create_method(model, solver, io, test_case): is_expected_failure = test_case.status == 'expected failure' # @@ -66,11 +64,11 @@ def failed_solve_test(self): test_case.testcase.io_options, test_case.testcase.options, symbolic_labels, - load_solutions) + load_solutions, + ) model_class.post_solve_test_validation(self, results) if len(results.solution) == 0: - self.assertIn("No solution is available", - out.getvalue()) + self.assertIn("No solution is available", out.getvalue()) else: # Note ASL solvers might still return a solution # file with garbage values in it for a failed solve @@ -78,19 +76,25 @@ def failed_solve_test(self): # Skip this test if the status is 'skip' if test_case.status == 'skip': + def return_test(self): return self.skipTest(test_case.msg) + elif is_expected_failure: + @unittest.expectedFailure def return_test(self): return failed_solve_test(self) + else: # Return a normal test def return_test(self): return failed_solve_test(self) + unittest.pytest.mark.solver(solver)(return_test) return return_test + cls = None # @@ -123,7 +127,7 @@ def return_test(self): # a change in load_solutions behavior is # propagated into that framework. if "_kernel" in cls.__name__: - test_name = "test_"+solver+"_"+io + test_name = "test_" + solver + "_" + io test_method = create_method(model, solver, io, value) if test_method is not None: setattr(cls, test_name, test_method) diff --git a/pyomo/solvers/tests/checks/test_pickle.py b/pyomo/solvers/tests/checks/test_pickle.py index b5f50ba68a3..d8551b34740 100644 --- a/pyomo/solvers/tests/checks/test_pickle.py +++ b/pyomo/solvers/tests/checks/test_pickle.py @@ -11,29 +11,28 @@ import pickle import types + try: import new - new_available=True + + new_available = True except: - new_available=False + new_available = False import pyomo.common.unittest as unittest from pyomo.solvers.tests.models.base import all_models from pyomo.solvers.tests.testcases import generate_scenarios + # # A function that returns a function that gets # added to a test class. # -def create_method(model, solver, io, - test_case, - symbolic_labels): - +def create_method(model, solver, io, test_case, symbolic_labels): # Ignore expected failures? is_expected_failure = False def pickle_test(self): - # Instantiate the model class model_class = test_case.model() @@ -41,16 +40,19 @@ def pickle_test(self): model_class.generate_model(test_case.testcase.import_suffixes) model_class.warmstart_model() - load_solutions = (not model_class.solve_should_fail) and \ - (test_case.status != 'expected failure') + load_solutions = (not model_class.solve_should_fail) and ( + test_case.status != 'expected failure' + ) try: - opt, status = model_class.solve(solver, - io, - test_case.testcase.io_options, - test_case.testcase.options, - symbolic_labels, - load_solutions) + opt, status = model_class.solve( + solver, + io, + test_case.testcase.io_options, + test_case.testcase.options, + symbolic_labels, + load_solutions, + ) except: if test_case.status == 'expected failure': return @@ -62,69 +64,86 @@ def pickle_test(self): # instance1 = m.clone() model_class.model = instance1 - opt, status1 = model_class.solve(solver, - io, - test_case.testcase.io_options, - test_case.testcase.options, - symbolic_labels, - load_solutions) - inst, res = pickle.loads(pickle.dumps([instance1,status1])) + opt, status1 = model_class.solve( + solver, + io, + test_case.testcase.io_options, + test_case.testcase.options, + symbolic_labels, + load_solutions, + ) + inst, res = pickle.loads(pickle.dumps([instance1, status1])) # # operate on an unpickled model # # try to pickle then unpickle instance instance2 = pickle.loads(pickle.dumps(instance1)) - self.assertNotEqual(id(instance1),id(instance2)) + self.assertNotEqual(id(instance1), id(instance2)) model_class.model = instance2 - opt, status2 = model_class.solve(solver, - io, - test_case.testcase.io_options, - test_case.testcase.options, - symbolic_labels, - load_solutions) + opt, status2 = model_class.solve( + solver, + io, + test_case.testcase.io_options, + test_case.testcase.options, + symbolic_labels, + load_solutions, + ) # try to pickle the instance and status, # then unpickle and load status - inst, res = pickle.loads(pickle.dumps([instance2,status2])) + inst, res = pickle.loads(pickle.dumps([instance2, status2])) # # operate on a clone of an unpickled model # instance3 = instance2.clone() - self.assertNotEqual(id(instance2),id(instance3)) + self.assertNotEqual(id(instance2), id(instance3)) model_class.model = instance3 - opt, status3 = model_class.solve(solver, - io, - test_case.testcase.io_options, - test_case.testcase.options, - symbolic_labels, - load_solutions) + opt, status3 = model_class.solve( + solver, + io, + test_case.testcase.io_options, + test_case.testcase.options, + symbolic_labels, + load_solutions, + ) # try to pickle the instance and status, # then unpickle and load status - inst, res = pickle.loads(pickle.dumps([instance3,status3])) + inst, res = pickle.loads(pickle.dumps([instance3, status3])) # Skip this test if the status is 'skip' if test_case.status == 'skip': + def return_test(self): return self.skipTest(test_case.msg) + elif is_expected_failure: + @unittest.expectedFailure def return_test(self): return pickle_test(self) + else: # If this solver is in demo mode size = getattr(test_case.model, 'size', (None, None, None)) for prb, sol in zip(size, test_case.demo_limits): if prb and sol and prb > sol: + def return_test(self): - return self.skipTest("Problem is too large for unlicensed %s solver" % solver) + return self.skipTest( + "Problem is too large for unlicensed %s solver" % solver + ) + break else: + def return_test(self): return pickle_test(self) + unittest.pytest.mark.solver(solver)(return_test) return return_test + cls = None # @@ -152,14 +171,14 @@ def return_test(self): cls = driver[model] # Symbolic labels - test_name = "test_"+solver+"_"+io +"_symbolic_labels" + test_name = "test_" + solver + "_" + io + "_symbolic_labels" test_method = create_method(model, solver, io, value, True) if test_method is not None: setattr(cls, test_name, test_method) test_method = None # Non-symbolic labels - test_name = "test_"+solver+"_"+io +"_nonsymbolic_labels" + test_name = "test_" + solver + "_" + io + "_nonsymbolic_labels" test_method = create_method(model, solver, io, value, False) if test_method is not None: setattr(cls, test_name, test_method) diff --git a/pyomo/solvers/tests/checks/test_writers.py b/pyomo/solvers/tests/checks/test_writers.py index 249986da1ac..e406e07a4d6 100644 --- a/pyomo/solvers/tests/checks/test_writers.py +++ b/pyomo/solvers/tests/checks/test_writers.py @@ -12,11 +12,13 @@ import os from os.path import join, dirname, abspath import types + try: import new - new_available=True + + new_available = True except: - new_available=False + new_available = False import pyomo.common.unittest as unittest from pyomo.opt import TerminationCondition @@ -25,28 +27,23 @@ from pyomo.core.kernel.block import IBlock # The test directory -thisDir = dirname(abspath( __file__ )) +thisDir = dirname(abspath(__file__)) # Cleanup Expected Failure Results Files _cleanup_expected_failures = True + # # A function that returns a function that gets # added to a test class. # -def create_method(test_name, model, - solver, - io, - test_case, - symbolic_labels): - +def create_method(test_name, model, solver, io, test_case, symbolic_labels): is_expected_failure = test_case.status == 'expected failure' # # Create a function that executes the test # def writer_test(self): - # Create the model test class model_class = test_case.model() @@ -67,29 +64,38 @@ def writer_test(self): test_case.testcase.io_options, test_case.testcase.options, symbolic_labels, - load_solutions) + load_solutions, + ) termination_condition = results['Solver'][0]['termination condition'] model_class.post_solve_test_validation(self, results) - if termination_condition == TerminationCondition.unbounded or \ - termination_condition == TerminationCondition.infeasible or \ - termination_condition == TerminationCondition.infeasibleOrUnbounded: + if ( + termination_condition == TerminationCondition.unbounded + or termination_condition == TerminationCondition.infeasible + or termination_condition == TerminationCondition.infeasibleOrUnbounded + ): return # validate the solution returned by the solver if isinstance(model_class.model, IBlock): model_class.model.load_solution(results.Solution) else: - model_class.model.solutions.load_from(results, default_variable_value=opt.default_variable_value()) - model_class.save_current_solution(save_filename, suffixes=model_class.test_suffixes) - rc = model_class.validate_current_solution(suffixes=model_class.test_suffixes, - exclude_suffixes=test_case.exclude_suffixes) + model_class.model.solutions.load_from( + results, default_variable_value=opt.default_variable_value() + ) + model_class.save_current_solution( + save_filename, suffixes=model_class.test_suffixes + ) + rc = model_class.validate_current_solution( + suffixes=model_class.test_suffixes, + exclude_suffixes=test_case.exclude_suffixes, + ) if is_expected_failure: if rc[0]: self.fail( "\nTest model '%s' was marked as an expected " - "failure but no failure occured. The " + "failure but no failure occurred. The " "reason given for the expected failure " "is:\n\n****\n%s\n****\n\n" "Please remove this case as an expected " @@ -106,11 +112,18 @@ def writer_test(self): model_class.model.solutions.store_to(results) except ValueError: pass - self.fail("Solution mismatch for plugin "+test_name - +', '+io+ - " interface and problem type " - +model_class.description+"\n"+rc[1]+"\n" - +(str(results.Solution(0)) if len(results.solution) else "No Solution")) + self.fail( + "Solution mismatch for plugin " + + test_name + + ', ' + + io + + " interface and problem type " + + model_class.description + + "\n" + + rc[1] + + "\n" + + (str(results.Solution(0)) if len(results.solution) else "No Solution") + ) # cleanup if the test passed try: @@ -120,26 +133,37 @@ def writer_test(self): # Skip this test if the status is 'skip' if test_case.status == 'skip': + def return_test(self): return self.skipTest(test_case.msg) + elif is_expected_failure: + @unittest.expectedFailure def return_test(self): return writer_test(self) + else: # Skip if solver is in demo mode size = getattr(test_case.model, 'size', (None, None, None)) for prb, sol in zip(size, test_case.demo_limits): if (prb and sol) and prb > sol: + def return_test(self): - return self.skipTest("Problem is too large for unlicensed %s solver" % solver) + return self.skipTest( + "Problem is too large for unlicensed %s solver" % solver + ) + break else: + def return_test(self): return writer_test(self) + unittest.pytest.mark.solver(solver)(return_test) return return_test + cls = None # @@ -168,14 +192,14 @@ def return_test(self): cls = driver[model] # Symbolic labels - test_name = "test_"+solver+"_"+io +"_symbolic_labels" + test_name = "test_" + solver + "_" + io + "_symbolic_labels" test_method = create_method(test_name, model, solver, io, value, True) if test_method is not None: setattr(cls, test_name, test_method) test_method = None # Non-symbolic labels - test_name = "test_"+solver+"_"+io +"_nonsymbolic_labels" + test_name = "test_" + solver + "_" + io + "_nonsymbolic_labels" test_method = create_method(test_name, model, solver, io, value, False) if test_method is not None: setattr(cls, test_name, test_method) diff --git a/pyomo/solvers/tests/checks/test_xpress_persistent.py b/pyomo/solvers/tests/checks/test_xpress_persistent.py index 96b9cc196af..cd9c30fc73b 100644 --- a/pyomo/solvers/tests/checks/test_xpress_persistent.py +++ b/pyomo/solvers/tests/checks/test_xpress_persistent.py @@ -2,6 +2,8 @@ import pyomo.environ as pe from pyomo.core.expr.taylor_series import taylor_series_expansion from pyomo.solvers.plugins.solvers.xpress_direct import xpress_available +from pyomo.opt.results.solver import TerminationCondition, SolverStatus + class TestXpressPersistent(unittest.TestCase): @unittest.skipIf(not xpress_available, "xpress is not available") @@ -10,7 +12,7 @@ def test_basics(self): m.x = pe.Var(bounds=(-10, 10)) m.y = pe.Var() m.obj = pe.Objective(expr=m.x**2 + m.y**2) - m.c1 = pe.Constraint(expr=m.y >= 2*m.x + 1) + m.c1 = pe.Constraint(expr=m.y >= 2 * m.x + 1) opt = pe.SolverFactory('xpress_persistent') opt.set_instance(m) @@ -137,7 +139,7 @@ def test_add_remove_lconstraint(self): @unittest.skipIf(not xpress_available, "xpress is not available") def test_add_remove_sosconstraint(self): m = pe.ConcreteModel() - m.a = pe.Set(initialize=[1,2,3], ordered=True) + m.a = pe.Set(initialize=[1, 2, 3], ordered=True) m.x = pe.Var(m.a, within=pe.Binary) m.y = pe.Var(within=pe.Binary) m.obj = pe.Objective(expr=m.y) @@ -156,7 +158,7 @@ def test_add_remove_sosconstraint(self): @unittest.skipIf(not xpress_available, "xpress is not available") def test_add_remove_sosconstraint2(self): m = pe.ConcreteModel() - m.a = pe.Set(initialize=[1,2,3], ordered=True) + m.a = pe.Set(initialize=[1, 2, 3], ordered=True) m.x = pe.Var(m.a, within=pe.Binary) m.y = pe.Var(within=pe.Binary) m.obj = pe.Objective(expr=m.y) @@ -217,7 +219,7 @@ def test_add_column_exceptions(self): m = pe.ConcreteModel() m.x = pe.Var() m.c = pe.Constraint(expr=(0, m.x, 1)) - m.ci = pe.Constraint([1,2], rule=lambda m,i:(0,m.x,i+1)) + m.ci = pe.Constraint([1, 2], rule=lambda m, i: (0, m.x, i + 1)) m.cd = pe.Constraint(expr=(0, -m.x, 1)) m.cd.deactivate() m.obj = pe.Objective(expr=-m.x) @@ -231,7 +233,7 @@ def test_add_column_exceptions(self): m2 = pe.ConcreteModel() m2.y = pe.Var() - m2.c = pe.Constraint(expr=(0,m.x,1)) + m2.c = pe.Constraint(expr=(0, m.x, 1)) # different model than attached to opt self.assertRaises(RuntimeError, opt.add_column, m2, m2.y, 0, [], []) @@ -243,8 +245,8 @@ def test_add_column_exceptions(self): self.assertRaises(RuntimeError, opt.add_column, m, z, -2, [m.c, z], [1]) m.y = pe.Var() - # len(coefficents) == len(constraints) - self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1,2]) + # len(coefficients) == len(constraints) + self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1, 2]) self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c, z], [1]) # add indexed constraint @@ -261,3 +263,55 @@ def test_add_column_exceptions(self): opt.add_var(m.y) # var already in solver model self.assertRaises(RuntimeError, opt.add_column, m, m.y, -2, [m.c], [1]) + + @unittest.skipIf(not xpress_available, "xpress is not available") + def test_nonconvexqp_locally_optimal(self): + """Test non-convex QP for which xpress_direct should find a locally + optimal solution.""" + m = pe.ConcreteModel() + m.x1 = pe.Var() + m.x2 = pe.Var() + m.x3 = pe.Var() + + m.obj = pe.Objective(rule=lambda m: 2 * m.x1 + m.x2 + m.x3, sense=pe.minimize) + m.equ1 = pe.Constraint(rule=lambda m: m.x1 + m.x2 + m.x3 == 1) + m.cone = pe.Constraint(rule=lambda m: m.x2 * m.x2 + m.x3 * m.x3 <= m.x1 * m.x1) + m.equ2 = pe.Constraint(rule=lambda m: m.x1 >= 0) + + opt = pe.SolverFactory('xpress_direct') + opt.options['XSLP_SOLVER'] = 0 + + results = opt.solve(m) + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.locallyOptimal + ) + + # Cannot test exact values since the may be different depending on + # random effects. So just test all are non-zero. + self.assertGreater(m.x1.value, 0.0) + self.assertGreater(m.x2.value, 0.0) + self.assertGreater(m.x3.value, 0.0) + + @unittest.skipIf(not xpress_available, "xpress is not available") + def test_nonconvexqp_infeasible(self): + """Test non-convex QP which xpress_direct should prove infeasible.""" + m = pe.ConcreteModel() + m.x1 = pe.Var() + m.x2 = pe.Var() + m.x3 = pe.Var() + + m.obj = pe.Objective(rule=lambda m: 2 * m.x1 + m.x2 + m.x3, sense=pe.minimize) + m.equ1a = pe.Constraint(rule=lambda m: m.x1 + m.x2 + m.x3 == 1) + m.equ1b = pe.Constraint(rule=lambda m: m.x1 + m.x2 + m.x3 == -1) + m.cone = pe.Constraint(rule=lambda m: m.x2 * m.x2 + m.x3 * m.x3 <= m.x1 * m.x1) + m.equ2 = pe.Constraint(rule=lambda m: m.x1 >= 0) + + opt = pe.SolverFactory('xpress_direct') + opt.options['XSLP_SOLVER'] = 0 + + results = opt.solve(m) + self.assertEqual(results.solver.status, SolverStatus.ok) + self.assertEqual( + results.solver.termination_condition, TerminationCondition.infeasible + ) diff --git a/pyomo/solvers/tests/mip/model.py b/pyomo/solvers/tests/mip/model.py index 91e1ce9a427..389151160b8 100644 --- a/pyomo/solvers/tests/mip/model.py +++ b/pyomo/solvers/tests/mip/model.py @@ -13,10 +13,13 @@ model = AbstractModel() -model.A = RangeSet(1,4) +model.A = RangeSet(1, 4) model.x = Var(model.A) + def obj_rule(model): return sum_product(model.x) + + model.obj = Objective(rule=obj_rule) diff --git a/pyomo/solvers/tests/mip/test_asl.py b/pyomo/solvers/tests/mip/test_asl.py index f08b0fe68e5..1e6a9e53030 100644 --- a/pyomo/solvers/tests/mip/test_asl.py +++ b/pyomo/solvers/tests/mip/test_asl.py @@ -27,28 +27,34 @@ deleteFiles = True old_ignore_time = None + + def setUpModule(): global old_ignore_time old_ignore_time = SolverResults.default_print_options.ignore_time SolverResults.default_print_options.ignore_time = True + def tearDownModule(): SolverResults.default_print_options.ignore_time = old_ignore_time + cplexamp_available = False -class mock_all(unittest.TestCase): + +class mock_all(unittest.TestCase): @classmethod def setUpClass(cls): global cplexamp_available import pyomo.environ from pyomo.solvers.tests.solvers import test_solver_cases + cplexamp_available = test_solver_cases('cplex', 'nl').available def setUp(self): self.do_setup(False) - def do_setup(self,flag): + def do_setup(self, flag): TempfileManager.push() if flag: if not cplexamp_available: @@ -62,76 +68,74 @@ def tearDown(self): self.asl = None def test_path(self): - """ Verify that the ASL path is what is expected """ + """Verify that the ASL path is what is expected""" if type(self.asl) == 'ASL': - self.assertEqual(self.asl.executable.split(os.sep)[-1], - "ASL"+pyomo.common.executable_extension) + self.assertEqual( + self.asl.executable.split(os.sep)[-1], + "ASL" + pyomo.common.executable_extension, + ) def test_solve4(self): - """ Test ASL - test4.nl """ + """Test ASL - test4.nl""" _log = TempfileManager.create_tempfile(".test_solve4.log") _out = TempfileManager.create_tempfile(".test_solve4.txt") - results = self.asl.solve(join(currdir, "test4.nl"), - logfile=_log, - suffixes=['.*']) + results = self.asl.solve( + join(currdir, "test4.nl"), logfile=_log, suffixes=['.*'] + ) results.write(filename=_out, times=False, format='json') _baseline = join(currdir, "test4_asl.txt") with open(_out, 'r') as out, open(_baseline, 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-4, - allow_second_superset=True) + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-4, allow_second_superset=True + ) # # This test is disabled, but it's useful for interactively exercising # the option specifications of a solver # def Xtest_options(self): - """ Test ASL options behavior """ - results = self.asl.solve(currdir+"bell3a.mps", - logfile=currdir+"test_options.log", - options="sec=0.1 foo=1 bar='a=b c=d' xx_zz=yy", - suffixes=['.*']) - results.write(filename=currdir+"test_options.txt", - times=False) + """Test ASL options behavior""" + results = self.asl.solve( + currdir + "bell3a.mps", + logfile=currdir + "test_options.log", + options="sec=0.1 foo=1 bar='a=b c=d' xx_zz=yy", + suffixes=['.*'], + ) + results.write(filename=currdir + "test_options.txt", times=False) _out, _log = join(currdir, "test_options.txt"), join(currdir, "test4_asl.txt") - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) - #os.remove(currdir+"test4.sol") - #os.remove(currdir+"test_solve4.log") + self.assertTrue(cmp(_out, _log), msg="Files %s and %s differ" % (_out, _log)) + # os.remove(currdir+"test4.sol") + # os.remove(currdir+"test_solve4.log") def test_error1(self): - """ Bad results format """ + """Bad results format""" try: model = ConcreteModel() - results = self.asl.solve(model, - format=ResultsFormat.sol, - suffixes=['.*']) + results = self.asl.solve(model, format=ResultsFormat.sol, suffixes=['.*']) self.fail("test_error1") except ValueError: pass def test_error2(self): - """ Bad solve option """ + """Bad solve option""" try: model = ConcreteModel() - results = self.asl.solve(model, - foo="bar") + results = self.asl.solve(model, foo="bar") self.fail("test_error2") except ValueError: pass def test_error3(self): - """ Bad solve option """ + """Bad solve option""" try: - results = self.asl.solve(currdir+"model.py", - foo="bar") + results = self.asl.solve(currdir + "model.py", foo="bar") self.fail("test_error3") except ValueError: pass -class mip_all(mock_all): +class mip_all(mock_all): def setUp(self): self.do_setup(True) diff --git a/pyomo/solvers/tests/mip/test_convert.py b/pyomo/solvers/tests/mip/test_convert.py index 4fa48399bf9..cd916da29f2 100644 --- a/pyomo/solvers/tests/mip/test_convert.py +++ b/pyomo/solvers/tests/mip/test_convert.py @@ -28,59 +28,60 @@ from pyomo.opt import ProblemFormat, ConverterError, convert_problem from pyomo.common import Executable + def filter(line): return 'Problem' in line or line.startswith('NAME') + currdir = this_file_dir() deleteFiles = True -class MockArg(object): +class MockArg(object): def __init__(self): pass def valid_problem_types(self): return [ProblemFormat.pyomo] - def write(self,filename="", format=None, solver_capability=None, io_options={}): - return (filename,None) + def write(self, filename="", format=None, solver_capability=None, io_options={}): + return (filename, None) -class MockArg2(MockArg): +class MockArg2(MockArg): def valid_problem_types(self): return [ProblemFormat.nl] - def write(self,filename="", format=None, solver_capability=None, io_options={}): - OUTPUT=open(filename,"w") - INPUT=open(join(currdir, "test4.nl")) + def write(self, filename="", format=None, solver_capability=None, io_options={}): + OUTPUT = open(filename, "w") + INPUT = open(join(currdir, "test4.nl")) for line in INPUT: OUTPUT.write(line) OUTPUT.close() INPUT.close() - return (filename,None) + return (filename, None) -class MockArg3(MockArg): +class MockArg3(MockArg): def valid_problem_types(self): return [ProblemFormat.mod] - def write(self,filename="", format=None, solver_capability=None, io_options={}): - return (filename,None) + def write(self, filename="", format=None, solver_capability=None, io_options={}): + return (filename, None) -class MockArg4(MockArg): - def write(self,filename="", format=None, solver_capability=None, io_options={}): - OUTPUT=open(filename,"w") - INPUT=open(join(currdir, "test4.nl")) +class MockArg4(MockArg): + def write(self, filename="", format=None, solver_capability=None, io_options={}): + OUTPUT = open(filename, "w") + INPUT = open(join(currdir, "test4.nl")) for line in INPUT: OUTPUT.write(line) OUTPUT.close() INPUT.close() - return (filename,None) + return (filename, None) class Test(unittest.TestCase): - @classmethod def setUpClass(cls): import pyomo.environ @@ -92,33 +93,35 @@ def tearDown(self): TempfileManager.pop(remove=deleteFiles or self.currentTestPassed()) def test_nl_nl1(self): - #""" Convert from NL to NL """ - ans = convert_problem( ("test4.nl",), None, [ProblemFormat.nl]) - self.assertEqual(ans[0],("test4.nl",)) + # """ Convert from NL to NL """ + ans = convert_problem(("test4.nl",), None, [ProblemFormat.nl]) + self.assertEqual(ans[0], ("test4.nl",)) def test_nl_nl2(self): - #""" Convert from NL to NL """ - ans = convert_problem( ("test4.nl","tmp.nl"), None, [ProblemFormat.nl]) - self.assertEqual(ans[0],("test4.nl","tmp.nl")) + # """ Convert from NL to NL """ + ans = convert_problem(("test4.nl", "tmp.nl"), None, [ProblemFormat.nl]) + self.assertEqual(ans[0], ("test4.nl", "tmp.nl")) @unittest.skipUnless( - Executable("pico_convert").available(), 'pico_convert required') + Executable("pico_convert").available(), 'pico_convert required' + ) def test_nl_lp1(self): - #""" Convert from NL to LP """ - ans = convert_problem( - (join(currdir, "test4.nl"),), None, [ProblemFormat.cpxlp]) - self.assertEqual(ans[0][0][-15:],"pico_convert.lp") + # """ Convert from NL to LP """ + ans = convert_problem((join(currdir, "test4.nl"),), None, [ProblemFormat.cpxlp]) + self.assertEqual(ans[0][0][-15:], "pico_convert.lp") _out, _log = ans[0][0], join(currdir, "test1_convert.lp") - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + self.assertTrue(cmp(_out, _log), msg="Files %s and %s differ" % (_out, _log)) @unittest.skipUnless(Executable("glpsol").available(), 'glpsol required') def test_mod_lp1(self): - #""" Convert from MOD to LP """ + # """ Convert from MOD to LP """ ans = convert_problem( - (join(currdir, "test3.mod"),), None, [ProblemFormat.cpxlp]) + (join(currdir, "test3.mod"),), None, [ProblemFormat.cpxlp] + ) self.assertTrue(ans[0][0].endswith("glpsol.lp")) - with open(ans[0][0], 'r') as f1, open(join(currdir, "test2_convert.lp"), 'r') as f2: + with open(ans[0][0], 'r') as f1, open( + join(currdir, "test2_convert.lp"), 'r' + ) as f2: for line1, line2 in zip_longest(f1, f2): if 'Problem' in line1: continue @@ -126,12 +129,16 @@ def test_mod_lp1(self): @unittest.skipUnless(Executable("glpsol").available(), 'glpsol required') def test_mod_lp2(self): - #""" Convert from MOD+DAT to LP """ + # """ Convert from MOD+DAT to LP """ ans = convert_problem( (join(currdir, "test5.mod"), join(currdir, "test5.dat")), - None, [ProblemFormat.cpxlp]) + None, + [ProblemFormat.cpxlp], + ) self.assertTrue(ans[0][0].endswith("glpsol.lp")) - with open(ans[0][0], 'r') as f1, open(join(currdir, "test3_convert.lp"), 'r') as f2: + with open(ans[0][0], 'r') as f1, open( + join(currdir, "test3_convert.lp"), 'r' + ) as f2: for line1, line2 in zip_longest(f1, f2): if 'Problem' in line1: continue @@ -139,187 +146,229 @@ def test_mod_lp2(self): @unittest.skipUnless(Executable("ampl").available(), 'ampl required') def test_mod_nl1(self): - #""" Convert from MOD to NL """ - ans = convert_problem( - (join(currdir, "test3.mod"),), None, [ProblemFormat.nl]) + # """ Convert from MOD to NL """ + ans = convert_problem((join(currdir, "test3.mod"),), None, [ProblemFormat.nl]) self.assertTrue(ans[0][0].endswith('.nl')) - #self.assertFileEqualsBinaryFile(ans[0][0], join(currdir, "test_mod_nl1.nl") + # self.assertFileEqualsBinaryFile(ans[0][0], join(currdir, "test_mod_nl1.nl") @unittest.skipUnless(Executable("ampl").available(), 'ampl required') def test_mod_nl2(self): - #""" Convert from MOD+DAT to NL """ + # """ Convert from MOD+DAT to NL """ ans = convert_problem( (join(currdir, "test5.mod"), join(currdir, "test5.dat")), - None, [ProblemFormat.nl]) + None, + [ProblemFormat.nl], + ) self.assertTrue(ans[0][0].endswith('.nl')) - #self.assertTrue(cmp(ans[0][0], join(currdir, "test_mod_nl2.nl") + # self.assertTrue(cmp(ans[0][0], join(currdir, "test_mod_nl2.nl") def test_mock_lp1(self): - #""" Convert from Pyomo to LP """ - arg=MockArg() - ans = convert_problem( (arg, ProblemFormat.cpxlp,arg), None, [ProblemFormat.cpxlp]) - self.assertNotEqual(re.match(".*tmp.*pyomo.lp$",ans[0][0]), None) + # """ Convert from Pyomo to LP """ + arg = MockArg() + ans = convert_problem( + (arg, ProblemFormat.cpxlp, arg), None, [ProblemFormat.cpxlp] + ) + self.assertNotEqual(re.match(".*tmp.*pyomo.lp$", ans[0][0]), None) def test_pyomo_lp1(self): - #""" Convert from Pyomo to LP with file""" - ans = convert_problem( (join(currdir, 'model.py'), ProblemFormat.cpxlp,), None, [ProblemFormat.cpxlp]) - self.assertNotEqual(re.match(".*tmp.*pyomo.lp$",ans[0][0]), None) + # """ Convert from Pyomo to LP with file""" + ans = convert_problem( + (join(currdir, 'model.py'), ProblemFormat.cpxlp), + None, + [ProblemFormat.cpxlp], + ) + self.assertNotEqual(re.match(".*tmp.*pyomo.lp$", ans[0][0]), None) def test_mock_lp2(self): - #""" Convert from NL to LP """ - arg=MockArg2() + # """ Convert from NL to LP """ + arg = MockArg2() try: - ans = convert_problem( (arg,), None, [ProblemFormat.cpxlp]) + ans = convert_problem((arg,), None, [ProblemFormat.cpxlp]) except ConverterError: err = sys.exc_info()[1] if not Executable("pico_convert"): return else: - self.fail("Expected ApplicationError because pico_convert " - "is not available: '%s'" % str(err)) - self.assertEqual(ans[0][0][-15:],"pico_convert.lp") + self.fail( + "Expected ApplicationError because pico_convert " + "is not available: '%s'" % str(err) + ) + self.assertEqual(ans[0][0][-15:], "pico_convert.lp") os.remove(ans[0][0]) # Note sure what to do with this test now that we # have a native MPS converter def Xtest_mock_mps1(self): - #""" Convert from Pyomo to MPS """ - arg=MockArg4() + # """ Convert from Pyomo to MPS """ + arg = MockArg4() try: - ans = convert_problem((arg, ProblemFormat.mps,arg), None, [ProblemFormat.mps]) + ans = convert_problem( + (arg, ProblemFormat.mps, arg), None, [ProblemFormat.mps] + ) except ConverterError: err = sys.exc_info()[1] if not Executable("pico_convert"): return else: - self.fail("Expected ApplicationError because pico_convert " - "is not available: '%s'" % str(err)) - self.assertEqual(ans[0][0][-16:],"pico_convert.mps") + self.fail( + "Expected ApplicationError because pico_convert " + "is not available: '%s'" % str(err) + ) + self.assertEqual(ans[0][0][-16:], "pico_convert.mps") os.remove(ans[0][0]) def test_pyomo_mps1(self): - #""" Convert from Pyomo to MPS with file""" + # """ Convert from Pyomo to MPS with file""" try: - ans = convert_problem( (join(currdir, 'model.py'), ProblemFormat.mps,), None, [ProblemFormat.mps]) + ans = convert_problem( + (join(currdir, 'model.py'), ProblemFormat.mps), + None, + [ProblemFormat.mps], + ) except ConverterError: err = sys.exc_info()[1] if not Executable("pico_convert"): return else: - self.fail("Expected ApplicationError because pico_convert " - "is not available: '%s'" % str(err)) - self.assertEqual(ans[0][0][-16:],"pico_convert.mps") + self.fail( + "Expected ApplicationError because pico_convert " + "is not available: '%s'" % str(err) + ) + self.assertEqual(ans[0][0][-16:], "pico_convert.mps") os.remove(ans[0][0]) def test_mock_nl1(self): - #""" Convert from Pyomo to NL """ + # """ Convert from Pyomo to NL """ arg = MockArg4() - ans = convert_problem( (arg, ProblemFormat.nl,arg), None, [ProblemFormat.nl]) - self.assertNotEqual(re.match(".*tmp.*pyomo.nl$",ans[0][0]), None) + ans = convert_problem((arg, ProblemFormat.nl, arg), None, [ProblemFormat.nl]) + self.assertNotEqual(re.match(".*tmp.*pyomo.nl$", ans[0][0]), None) os.remove(ans[0][0]) def test_pyomo_nl1(self): - #""" Convert from Pyomo to NL with file""" - ans = convert_problem( (join(currdir, 'model.py'), ProblemFormat.nl,), None, [ProblemFormat.nl]) - self.assertNotEqual(re.match(".*tmp.*pyomo.nl$",ans[0][0]), None) + # """ Convert from Pyomo to NL with file""" + ans = convert_problem( + (join(currdir, 'model.py'), ProblemFormat.nl), None, [ProblemFormat.nl] + ) + self.assertNotEqual(re.match(".*tmp.*pyomo.nl$", ans[0][0]), None) os.remove(ans[0][0]) def test_error1(self): - #""" No valid problem types """ + # """ No valid problem types """ try: - convert_problem( ("test4.nl","tmp.nl"), ProblemFormat.nl, []) + convert_problem(("test4.nl", "tmp.nl"), ProblemFormat.nl, []) self.fail("Expected ConverterError exception") except ConverterError: err = sys.exc_info()[1] pass def test_error2(self): - #""" Target problem type is not valid """ + # """ Target problem type is not valid """ try: - convert_problem( ("test4.nl","tmp.nl"), ProblemFormat.nl, [ProblemFormat.mps]) + convert_problem( + ("test4.nl", "tmp.nl"), ProblemFormat.nl, [ProblemFormat.mps] + ) self.fail("Expected ConverterError exception") except ConverterError: pass def test_error3(self): - #""" Empty argument list """ + # """ Empty argument list """ try: - convert_problem( (), None, [ProblemFormat.mps]) + convert_problem((), None, [ProblemFormat.mps]) self.fail("Expected ConverterError exception") - except ConverterError: + except ConverterError: pass def test_error4(self): - #""" Unknown source type """ + # """ Unknown source type """ try: - convert_problem( ("prob.foo",), None, [ProblemFormat.mps]) + convert_problem(("prob.foo",), None, [ProblemFormat.mps]) self.fail("Expected ConverterError exception") - except ConverterError: + except ConverterError: pass def test_error5(self): - #""" Unknown source type """ + # """ Unknown source type """ try: - convert_problem( ("prob.lp",), ProblemFormat.nl, [ProblemFormat.nl]) + convert_problem(("prob.lp",), ProblemFormat.nl, [ProblemFormat.nl]) self.fail("Expected ConverterError exception") - except ConverterError: + except ConverterError: pass def test_error6(self): - #""" Cannot use pico_convert with more than one file """ + # """ Cannot use pico_convert with more than one file """ try: - ans = convert_problem( (join(currdir, "test4.nl"), "foo"), None, [ProblemFormat.cpxlp]) + ans = convert_problem( + (join(currdir, "test4.nl"), "foo"), None, [ProblemFormat.cpxlp] + ) self.fail("Expected ConverterError exception") except ConverterError: pass def test_error8(self): - #""" Error when source file cannot be found """ + # """ Error when source file cannot be found """ try: - ans = convert_problem( (join(currdir, "unknown.nl"),), None, [ProblemFormat.cpxlp]) + ans = convert_problem( + (join(currdir, "unknown.nl"),), None, [ProblemFormat.cpxlp] + ) self.fail("Expected ConverterError exception") except ApplicationError: err = sys.exc_info()[1] if not Executable("pico_convert"): - self.fail("Expected ApplicationError because pico_convert " - "is not available: '%s'" % str(err)) + self.fail( + "Expected ApplicationError because pico_convert " + "is not available: '%s'" % str(err) + ) return except ConverterError: pass def test_error9(self): - #""" The Opt configuration has not been initialized """ + # """ The Opt configuration has not been initialized """ cmd = Executable("pico_convert").disable() try: - ans = convert_problem( (join(currdir, "test4.nl"),), None, [ProblemFormat.cpxlp]) + ans = convert_problem( + (join(currdir, "test4.nl"),), None, [ProblemFormat.cpxlp] + ) self.fail("This test didn't fail, but pico_convert should not be defined.") except ConverterError: pass cmd = Executable("pico_convert").rehash() def test_error10(self): - #""" GLPSOL can only convert file data """ + # """ GLPSOL can only convert file data """ try: arg = MockArg3() - ans = convert_problem( (arg, ProblemFormat.cpxlp,arg), None, [ProblemFormat.cpxlp]) + ans = convert_problem( + (arg, ProblemFormat.cpxlp, arg), None, [ProblemFormat.cpxlp] + ) self.fail("This test didn't fail, but glpsol cannot handle objects.") except ConverterError: pass def test_error11(self): - #""" Cannot convert MOD that contains data """ + # """ Cannot convert MOD that contains data """ try: - ans = convert_problem( (join(currdir, "test3.mod"),join(currdir, "test5.dat")), None, [ProblemFormat.cpxlp]) - self.fail("Expected ConverterError exception because we provided a MOD file with a 'data;' declaration") + ans = convert_problem( + (join(currdir, "test3.mod"), join(currdir, "test5.dat")), + None, + [ProblemFormat.cpxlp], + ) + self.fail( + "Expected ConverterError exception because we provided a MOD file with a 'data;' declaration" + ) except ApplicationError: err = sys.exc_info()[1] if Executable("glpsol"): - self.fail("Expected ApplicationError because glpsol " - "is not available: '%s'" % str(err)) + self.fail( + "Expected ApplicationError because glpsol " + "is not available: '%s'" % str(err) + ) return except ConverterError: pass + if __name__ == "__main__": deleteFiles = False unittest.main() diff --git a/pyomo/solvers/tests/mip/test_factory.py b/pyomo/solvers/tests/mip/test_factory.py index 654903a0f78..6960a0f8ced 100644 --- a/pyomo/solvers/tests/mip/test_factory.py +++ b/pyomo/solvers/tests/mip/test_factory.py @@ -16,35 +16,37 @@ import pyomo.common.unittest as unittest -from pyomo.opt import (AbstractProblemWriter, AbstractResultsReader, - OptSolver, ReaderFactory, - SolverFactory, WriterFactory) +from pyomo.opt import ( + AbstractProblemWriter, + AbstractResultsReader, + OptSolver, + ReaderFactory, + SolverFactory, + WriterFactory, +) from pyomo.opt.base.solvers import UnknownSolver from pyomo.opt.plugins.sol import ResultsReader_sol from pyomo.solvers.plugins.solvers.CBCplugin import MockCBC -class MockWriter(AbstractProblemWriter): +class MockWriter(AbstractProblemWriter): def __init__(self, name=None): - AbstractProblemWriter.__init__(self,name) + AbstractProblemWriter.__init__(self, name) class MockReader(AbstractResultsReader): - def __init__(self, name=None): - AbstractResultsReader.__init__(self,name) + AbstractResultsReader.__init__(self, name) class MockSolver(OptSolver): - def __init__(self, **kwds): kwds['type'] = 'stest_type' kwds['doc'] = 'MockSolver Documentation' - OptSolver.__init__(self,**kwds) + OptSolver.__init__(self, **kwds) class OptFactoryDebug(unittest.TestCase): - @classmethod def setUpClass(cls): import pyomo.environ @@ -60,9 +62,22 @@ def test_solver_factory(self): """ SolverFactory.register('stest3')(MockSolver) ans = sorted(SolverFactory) - tmp = ['_mock_asl', '_mock_cbc', '_mock_cplex', '_mock_glpk', 'cbc', 'cplex', 'glpk', 'stest3', 'asl'] + tmp = [ + '_mock_asl', + '_mock_cbc', + '_mock_cplex', + '_mock_glpk', + 'cbc', + 'cplex', + 'glpk', + 'scip', + 'stest3', + 'asl', + ] tmp.sort() - self.assertTrue(set(tmp) <= set(ans), msg="Set %s is not a subset of set %s" %(tmp,ans)) + self.assertTrue( + set(tmp) <= set(ans), msg="Set %s is not a subset of set %s" % (tmp, ans) + ) def test_solver_instance(self): """ @@ -74,7 +89,7 @@ def test_solver_instance(self): self.assertEqual(type(ans), MockCBC) ans = SolverFactory("_mock_cbc", name="mymock") self.assertEqual(type(ans), MockCBC) - self.assertEqual(ans.name, "mymock") + self.assertEqual(ans.name, "mymock") def test_solver_registration(self): """ @@ -115,15 +130,14 @@ def test_writer_registration(self): WriterFactory.register('wtest3')(MockWriter) self.assertTrue('wtest3' in WriterFactory) - def test_reader_factory(self): """ Testing the pyomo.opt reader factory """ ReaderFactory.register('rtest3')(MockReader) ans = ReaderFactory - #self.assertEqual(len(ans),4) - self.assertTrue(set(ans) >= set(["rtest3", "sol","yaml", "json"])) + # self.assertEqual(len(ans),4) + self.assertTrue(set(ans) >= set(["rtest3", "sol", "yaml", "json"])) def test_reader_instance(self): """ @@ -133,9 +147,9 @@ def test_reader_instance(self): self.assertEqual(ans, None) ans = ReaderFactory("sol") self.assertEqual(type(ans), ResultsReader_sol) - #ans = pyomo.opt.ReaderFactory("osrl", "myreader") - #self.assertEqual(type(ans), pyomo.opt.reader.OS.ResultsReader_osrl) - #self.assertEqual(ans.name, "myreader") + # ans = pyomo.opt.ReaderFactory("osrl", "myreader") + # self.assertEqual(type(ans), pyomo.opt.reader.OS.ResultsReader_osrl) + # self.assertEqual(ans.name, "myreader") def test_reader_registration(self): """ @@ -146,5 +160,6 @@ def test_reader_registration(self): ReaderFactory.register('rtest3')(MockReader) self.assertTrue('rtest3' in ReaderFactory) + if __name__ == "__main__": unittest.main() diff --git a/pyomo/solvers/tests/mip/test_ipopt.py b/pyomo/solvers/tests/mip/test_ipopt.py index 5fb0ea071d1..ca553c12447 100644 --- a/pyomo/solvers/tests/mip/test_ipopt.py +++ b/pyomo/solvers/tests/mip/test_ipopt.py @@ -21,22 +21,30 @@ import pyomo.opt from pyomo.core import ( - ConcreteModel, RangeSet, Var, Param, Objective, ConstraintList, - value, minimize, + ConcreteModel, + RangeSet, + Var, + Param, + Objective, + ConstraintList, + value, + minimize, ) currdir = this_file_dir() deleteFiles = True ipopt_available = False -class Test(unittest.TestCase): + +class Test(unittest.TestCase): @classmethod def setUpClass(cls): global ipopt_available import pyomo.environ from pyomo.solvers.tests.solvers import test_solver_cases - ipopt_available = test_solver_cases('ipopt','nl').available + + ipopt_available = test_solver_cases('ipopt', 'nl').available def setUp(self): if not ipopt_available: @@ -65,30 +73,33 @@ def setUp(self): sisser_instance = ConcreteModel() - sisser_instance.N = RangeSet(1,2) - sisser_instance.xinit = Param( - sisser_instance.N, initialize={ 1 : 1.0, 2 : 0.1}) + sisser_instance.N = RangeSet(1, 2) + sisser_instance.xinit = Param(sisser_instance.N, initialize={1: 1.0, 2: 0.1}) def fa(model, i): return value(model.xinit[i]) - sisser_instance.x = Var(sisser_instance.N,initialize=fa) + + sisser_instance.x = Var(sisser_instance.N, initialize=fa) def f(model): - return 3*model.x[1]**4 - 2*(model.x[1]*model.x[2])**2 + 3*model.x[2]**4 - sisser_instance.f = Objective(rule=f,sense=minimize) + return ( + 3 * model.x[1] ** 4 + - 2 * (model.x[1] * model.x[2]) ** 2 + + 3 * model.x[2] ** 4 + ) - self.sisser_instance = sisser_instance + sisser_instance.f = Objective(rule=f, sense=minimize) + self.sisser_instance = sisser_instance def tearDown(self): TempfileManager.pop(remove=deleteFiles or self.currentTestPassed()) def compare_json(self, file1, file2): - with open(file1, 'r') as out, \ - open(file2, 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-7, - allow_second_superset=True) + with open(file1, 'r') as out, open(file2, 'r') as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-7, allow_second_superset=True + ) def test_version_asl(self): self.assertTrue(self.asl.version() is not None) @@ -103,76 +114,62 @@ def test_version_ipopt(self): def test_asl_solve_from_nl(self): # Test ipopt solve from nl file _log = TempfileManager.create_tempfile(".test_ipopt.log") - results = self.asl.solve(join(currdir, "sisser.pyomo.nl"), - logfile=_log, - suffixes=['.*']) + results = self.asl.solve( + join(currdir, "sisser.pyomo.nl"), logfile=_log, suffixes=['.*'] + ) # We don't want the test to care about which Ipopt version we are using results.Solution(0).Message = "Ipopt" results.Solver.Message = "Ipopt" _out = TempfileManager.create_tempfile(".test_ipopt.txt") - results.write(filename=_out, - times=False, - format='json') - self.compare_json( - _out, join(currdir, "test_solve_from_nl.baseline")) + results.write(filename=_out, times=False, format='json') + self.compare_json(_out, join(currdir, "test_solve_from_nl.baseline")) def test_ipopt_solve_from_nl(self): # Test ipopt solve from nl file _log = TempfileManager.create_tempfile(".test_ipopt.log") - results = self.ipopt.solve(join(currdir, "sisser.pyomo.nl"), - logfile=_log, - suffixes=['.*']) + results = self.ipopt.solve( + join(currdir, "sisser.pyomo.nl"), logfile=_log, suffixes=['.*'] + ) # We don't want the test to care about which Ipopt version we are using results.Solution(0).Message = "Ipopt" results.Solver.Message = "Ipopt" _out = TempfileManager.create_tempfile(".test_ipopt.txt") - results.write(filename=_out, - times=False, - format='json') - self.compare_json( - _out, join(currdir, "test_solve_from_nl.baseline")) + results.write(filename=_out, times=False, format='json') + self.compare_json(_out, join(currdir, "test_solve_from_nl.baseline")) def test_asl_solve_from_instance(self): # Test ipopt solve from a pyomo instance and load the solution - results = self.asl.solve(self.sisser_instance, - suffixes=['.*']) + results = self.asl.solve(self.sisser_instance, suffixes=['.*']) # We don't want the test to care about which Ipopt version we are using self.sisser_instance.solutions.store_to(results) results.Solution(0).Message = "Ipopt" results.Solver.Message = "Ipopt" _out = TempfileManager.create_tempfile(".test_ipopt.txt") - results.write(filename=_out, - times=False, - format='json') - self.compare_json( - _out, join(currdir, "test_solve_from_instance.baseline")) - #self.sisser_instance.load_solutions(results) + results.write(filename=_out, times=False, format='json') + self.compare_json(_out, join(currdir, "test_solve_from_instance.baseline")) + # self.sisser_instance.load_solutions(results) def test_ipopt_solve_from_instance(self): # Test ipopt solve from a pyomo instance and load the solution - results = self.ipopt.solve(self.sisser_instance, - suffixes=['.*']) + results = self.ipopt.solve(self.sisser_instance, suffixes=['.*']) # We don't want the test to care about which Ipopt version we are using self.sisser_instance.solutions.store_to(results) results.Solution(0).Message = "Ipopt" results.Solver.Message = "Ipopt" _out = TempfileManager.create_tempfile(".test_ipopt.txt") - results.write(filename=_out, - times=False, - format='json') - self.compare_json( - _out, join(currdir, "test_solve_from_instance.baseline")) - #self.sisser_instance.load_solutions(results) + results.write(filename=_out, times=False, format='json') + self.compare_json(_out, join(currdir, "test_solve_from_instance.baseline")) + # self.sisser_instance.load_solutions(results) def test_ipopt_solve_from_instance_OF_options(self): - with self.assertRaises(ValueError): # using OF_ options AND option_file_name # is not allowed - self.ipopt.solve(self.sisser_instance, - suffixes=['.*'], - options={"OF_mu_init": 0.1, - "option_file_name": "junk.opt"}) + self.ipopt.solve( + self.sisser_instance, + suffixes=['.*'], + options={"OF_mu_init": 0.1, "option_file_name": "junk.opt"}, + ) # Creating a dummy ipopt.opt file in the cwd # will cover the code that prints a warning _cwd = os.getcwd() @@ -183,15 +180,16 @@ def test_ipopt_solve_from_instance_OF_options(self): open(join(tmpdir, 'ipopt.opt'), "w").close() # Test ipopt solve from a pyomo instance and load the solution with LoggingIntercept() as LOG: - results = self.ipopt.solve(self.sisser_instance, - suffixes=['.*'], - options={"OF_mu_init": 0.1}) + results = self.ipopt.solve( + self.sisser_instance, suffixes=['.*'], options={"OF_mu_init": 0.1} + ) self.assertRegex( LOG.getvalue().replace("\n", " "), r"A file named (.*) exists in the current working " r"directory, but Ipopt options file options \(i.e., " r"options that start with 'OF_'\) were provided. The " - r"options file \1 will be ignored.") + r"options file \1 will be ignored.", + ) finally: os.chdir(_cwd) @@ -200,12 +198,9 @@ def test_ipopt_solve_from_instance_OF_options(self): results.Solution(0).Message = "Ipopt" results.Solver.Message = "Ipopt" _out = TempfileManager.create_tempfile(".test_ipopt.txt") - results.write(filename=_out, - times=False, - format='json') - self.compare_json( - _out, join(currdir, "test_solve_from_instance.baseline")) - #self.sisser_instance.load_solutions(results) + results.write(filename=_out, times=False, format='json') + self.compare_json(_out, join(currdir, "test_solve_from_instance.baseline")) + # self.sisser_instance.load_solutions(results) def test_bad_dof(self): m = ConcreteModel() @@ -214,12 +209,13 @@ def test_bad_dof(self): m.c = ConstraintList() m.c.add(m.x + m.y == 1) m.c.add(m.x - m.y == 0) - m.c.add(2*m.x - 3*m.y == 1) + m.c.add(2 * m.x - 3 * m.y == 1) res = self.ipopt.solve(m) self.assertEqual(str(res.solver.status), "warning") self.assertEqual(str(res.solver.termination_condition), "other") self.assertTrue("Too few degrees of freedom" in res.solver.message) + if __name__ == "__main__": deleteFiles = False unittest.main() diff --git a/pyomo/solvers/tests/mip/test_mip.py b/pyomo/solvers/tests/mip/test_mip.py index 4e4a04ea4bd..0257e65de20 100644 --- a/pyomo/solvers/tests/mip/test_mip.py +++ b/pyomo/solvers/tests/mip/test_mip.py @@ -14,7 +14,8 @@ import os from os.path import abspath, dirname -currdir = dirname(abspath(__file__))+os.sep + +currdir = dirname(abspath(__file__)) + os.sep import pyomo.common.unittest as unittest diff --git a/pyomo/solvers/tests/mip/test_scip.py b/pyomo/solvers/tests/mip/test_scip.py index 186e2d74d9d..8a43b120a34 100644 --- a/pyomo/solvers/tests/mip/test_scip.py +++ b/pyomo/solvers/tests/mip/test_scip.py @@ -26,14 +26,16 @@ deleteFiles = True scip_available = False -class Test(unittest.TestCase): + +class Test(unittest.TestCase): @classmethod def setUpClass(cls): global scip_available import pyomo.environ from pyomo.solvers.tests.solvers import test_solver_cases - scip_available = test_solver_cases('scip','nl').available + + scip_available = test_solver_cases('scip', 'nl').available def setUp(self): if not scip_available: @@ -51,11 +53,10 @@ def tearDown(self): TempfileManager.pop(remove=deleteFiles or self.currentTestPassed()) def compare_json(self, file1, file2): - with open(file1, 'r') as out, \ - open(file2, 'r') as txt: - self.assertStructuredAlmostEqual(json.load(txt), json.load(out), - abstol=1e-7, - allow_second_superset=True) + with open(file1, 'r') as out, open(file2, 'r') as txt: + self.assertStructuredAlmostEqual( + json.load(txt), json.load(out), abstol=1e-7, allow_second_superset=True + ) def test_version_scip(self): self.assertTrue(self.scip.version() is not None) @@ -64,8 +65,7 @@ def test_version_scip(self): def test_scip_solve_from_instance(self): # Test scip solve from a pyomo instance and load the solution - results = self.scip.solve(self.model, - suffixes=['.*']) + results = self.scip.solve(self.model, suffixes=['.*']) # We don't want the test to care about which Scip version we are using self.model.solutions.store_to(results) results.Solution(0).Message = "Scip" @@ -73,11 +73,9 @@ def test_scip_solve_from_instance(self): results.Solver.Time = 0 _out = TempfileManager.create_tempfile(".txt") results.write(filename=_out, times=False, format='json') - self.compare_json( - _out, join(currdir, "test_scip_solve_from_instance.baseline")) + self.compare_json(_out, join(currdir, "test_scip_solve_from_instance.baseline")) def test_scip_solve_from_instance_options(self): - # Creating a dummy scip.set file in the cwd # will cover the code that prints a warning _cwd = os.getcwd() @@ -87,15 +85,16 @@ def test_scip_solve_from_instance_options(self): open(join(tmpdir, 'scip.set'), "w").close() # Test scip solve from a pyomo instance and load the solution with LoggingIntercept() as LOG: - results = self.scip.solve(self.model, - suffixes=['.*'], - options={"limits/softtime": 100}) + results = self.scip.solve( + self.model, suffixes=['.*'], options={"limits/softtime": 100} + ) self.assertRegex( LOG.getvalue().replace("\n", " "), r"A file named (.*) exists in the current working " r"directory, but SCIP options are being " r"set using a separate options file. The " - r"options file \1 will be ignored.") + r"options file \1 will be ignored.", + ) finally: os.chdir(_cwd) # We don't want the test to care about which Scip version we are using @@ -105,8 +104,8 @@ def test_scip_solve_from_instance_options(self): results.Solver.Time = 0 _out = TempfileManager.create_tempfile(".txt") results.write(filename=_out, times=False, format='json') - self.compare_json( - _out, join(currdir, "test_scip_solve_from_instance.baseline")) + self.compare_json(_out, join(currdir, "test_scip_solve_from_instance.baseline")) + if __name__ == "__main__": deleteFiles = False diff --git a/pyomo/solvers/tests/mip/test_scip_log_data.py b/pyomo/solvers/tests/mip/test_scip_log_data.py new file mode 100644 index 00000000000..8f756de220a --- /dev/null +++ b/pyomo/solvers/tests/mip/test_scip_log_data.py @@ -0,0 +1,252 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Created on Tue May 3 09:33:42 2022 +@author: pmlpm +""" + +import pyomo.environ as pyo +import pyomo.common.unittest as unittest +from pyomo.opt import check_available_solvers + +scip_available = bool(check_available_solvers('scip')) + +import random + +# ****************************************************************************** +# ****************************************************************************** + +# carry out optimisations + + +def optimise( + problem: pyo.ConcreteModel, + solver_timelimit, + solver_rel_mip_gap, + solver_abs_mip_gap, + print_solver_output: bool = False, +): + # config + + options_dict_format = { + 'limits/time': solver_timelimit, + 'limits/gap': solver_rel_mip_gap, + 'limits/absgap': solver_abs_mip_gap, + } + + opt = pyo.SolverFactory('scip') + + for key, value in options_dict_format.items(): + opt.options[key] = value + + # solve + + results = opt.solve(problem, tee=print_solver_output) + + # return + + return results, opt + + +# ****************************************************************************** +# ****************************************************************************** + + +def problem_lp_optimal(): + model = pyo.ConcreteModel('lp_optimal') + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + + model.OBJ = pyo.Objective(expr=2 * model.x[1] + 3 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + + return model + + +def problem_lp_infeasible(): + model = pyo.ConcreteModel('lp_infeasible') + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + + model.OBJ = pyo.Objective(expr=2 * model.x[1] + 3 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] <= -1) + + return model + + +def problem_lp_unbounded(): + model = pyo.ConcreteModel('lp_unbounded') + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + + model.OBJ = pyo.Objective(expr=2 * model.x[1] + 3 * model.x[2], sense=pyo.maximize) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + + return model + + +def problem_milp_optimal(): + model = pyo.ConcreteModel('milp_optimal') + + model.x = pyo.Var([1, 2], domain=pyo.Binary) + + model.OBJ = pyo.Objective(expr=2.15 * model.x[1] + 3.8 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + + return model + + +def problem_milp_infeasible(): + model = pyo.ConcreteModel('milp_infeasible') + + model.x = pyo.Var([1, 2], domain=pyo.Binary) + + model.OBJ = pyo.Objective(expr=2 * model.x[1] + 3 * model.x[2]) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] <= -1) + + return model + + +def problem_milp_unbounded(): + model = pyo.ConcreteModel('milp_unbounded') + + model.x = pyo.Var([1, 2], domain=pyo.NonNegativeReals) + + model.y = pyo.Var(domain=pyo.Binary) + + model.OBJ = pyo.Objective( + expr=2 * model.x[1] + 3 * model.x[2] + model.y, sense=pyo.maximize + ) + + model.Constraint1 = pyo.Constraint(expr=3 * model.x[1] + 4 * model.x[2] >= 1) + + return model + + +def problem_milp_feasible(): + model = pyo.ConcreteModel('milp_feasible') + + random.seed(6254) + + # a knapsack-type problem + + number_binary_variables = 20 # may need to be tweaked depending on specs + + model.Y = pyo.RangeSet(number_binary_variables) + + model.y = pyo.Var(model.Y, domain=pyo.Binary) + + model.OBJ = pyo.Objective( + expr=sum(model.y[j] * random.random() for j in model.Y), sense=pyo.maximize + ) + + model.Constraint1 = pyo.Constraint( + expr=sum(model.y[j] * random.random() for j in model.Y) + <= round(number_binary_variables / 5) + ) + + def rule_c1(m, i): + return ( + sum( + model.y[j] * (random.random() - 0.5) + for j in model.Y + if j != i + if random.randint(0, 1) + ) + <= round(number_binary_variables / 5) * model.y[i] + ) + + model.constr_c1 = pyo.Constraint(model.Y, rule=rule_c1) + + return model + + +# ****************************************************************************** +# ****************************************************************************** + + +@unittest.skipIf(not scip_available, "SCIP solver is not available.") +def test_scip_some_more(): + # list of problems + + list_concrete_models = [ + problem_lp_unbounded(), + problem_lp_infeasible(), + problem_lp_optimal(), + problem_milp_unbounded(), + problem_milp_infeasible(), + problem_milp_optimal(), + problem_milp_feasible(), # may reach optimality depending on the budget + ] + + list_extra_data_expected = [ + (), # problem_lp_unbounded(), + (), # problem_lp_infeasible(), + ('Time', 'Gap', 'Primal bound', 'Dual bound'), # problem_lp_optimal(), + (), # problem_milp_unbounded(), + (), # problem_milp_infeasible(), + ('Time', 'Gap', 'Primal bound', 'Dual bound'), # problem_milp_optimal(), + ('Time', 'Gap', 'Primal bound', 'Dual bound'), # problem_milp_feasible() + ] + + # ************************************************************************** + # ************************************************************************** + + # solver settings + + solver_timelimit = 1 + + solver_abs_mip_gap = 0 + + solver_rel_mip_gap = 1e-6 + + # ************************************************************************** + # ************************************************************************** + + for problem_index, problem in enumerate(list_concrete_models): + print('******************************') + print('******************************') + + print(problem.name) + + print('******************************') + print('******************************') + + results, opt = optimise( + problem, + solver_timelimit, + solver_rel_mip_gap, + solver_abs_mip_gap, + print_solver_output=True, + ) + + print(results) + + # check the version + + executable = opt._command.cmd[0] + + version = opt._known_versions[executable] + + if version < (8, 0, 0, 0): + # if older and untested, skip tests + + continue + + # for each new attribute expected + + for log_file_attr in list_extra_data_expected[problem_index]: + # check that it is part of the results object + + assert log_file_attr in results['Solver'][0] + + +# ****************************************************************************** +# ****************************************************************************** + +# test_scip_some_more() # uncomment to run individually diff --git a/pyomo/solvers/tests/mip/test_scip_solve_from_instance.baseline b/pyomo/solvers/tests/mip/test_scip_solve_from_instance.baseline index c59de0e01ec..a3eb9ffacec 100644 --- a/pyomo/solvers/tests/mip/test_scip_solve_from_instance.baseline +++ b/pyomo/solvers/tests/mip/test_scip_solve_from_instance.baseline @@ -1,12 +1,12 @@ { "Problem": [ { - "Lower bound": -Infinity, + "Lower bound": -Infinity, "Number of constraints": 0, "Number of objectives": 1, "Number of variables": 1, "Sense": "unknown", - "Upper bound": Infinity + "Upper bound": 1.0 } ], "Solution": [ @@ -42,4 +42,4 @@ "Time": 0 } ] -} \ No newline at end of file +} diff --git a/pyomo/solvers/tests/mip/test_scip_version.py b/pyomo/solvers/tests/mip/test_scip_version.py index 04f92df33ec..c0cc80c0316 100644 --- a/pyomo/solvers/tests/mip/test_scip_version.py +++ b/pyomo/solvers/tests/mip/test_scip_version.py @@ -27,25 +27,35 @@ currdir = this_file_dir() deleteFiles = True -@unittest.skipIf('pypy_version_info' in dir(sys), - "Skip SCIPAMPL tests on Pypy due to performance") -class Test(unittest.TestCase): +@unittest.skipIf( + 'pypy_version_info' in dir(sys), "Skip SCIPAMPL tests on Pypy due to performance" +) +class Test(unittest.TestCase): def setUp(self): scip = SolverFactory('scip', solver_io='nl') type(scip)._known_versions = {} TempfileManager.push() - self.patch_run = unittest.mock.patch('pyomo.solvers.plugins.solvers.SCIPAMPL.subprocess.run') + self.patch_run = unittest.mock.patch( + 'pyomo.solvers.plugins.solvers.SCIPAMPL.subprocess.run' + ) # Executable cannot be partially mocked since it creates a PathData object. - self.patch_path = unittest.mock.patch.object(pyomo.common.fileutils.PathData, 'path', autospec=True) - self.patch_available = unittest.mock.patch.object(pyomo.common.fileutils.PathData, 'available', autospec=True) + self.patch_path = unittest.mock.patch.object( + pyomo.common.fileutils.PathData, 'path', autospec=True + ) + self.patch_available = unittest.mock.patch.object( + pyomo.common.fileutils.PathData, 'available', autospec=True + ) self.run = self.patch_run.start() self.path = self.patch_path.start() self.available = self.patch_available.start() - self.executable_paths = {"scip": join(currdir, "scip"), "scipampl": join(currdir, "scipampl")} + self.executable_paths = { + "scip": join(currdir, "scip"), + "scipampl": join(currdir, "scipampl"), + } def tearDown(self): self.patch_run.stop() @@ -57,19 +67,23 @@ def tearDown(self): def generate_stdout(self, solver, version): if solver == "scip": # Template from SCIP 8.0.0 - stdout = "SCIP version {} [precision: 8 byte] [memory: block] [mode: optimized] [LP solver: SoPlex 6.0.0] [GitHash: d9b84b0709]\n"\ - "Copyright (C) 2002-2021 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\n"\ - "\n"\ - "External libraries:\n" \ - " SoPlex 6.0.0 Linear Programming Solver developed at Zuse Institute Berlin (soplex.zib.de) [GitHash: f5cfa86b]" + stdout = ( + "SCIP version {} [precision: 8 byte] [memory: block] [mode: optimized] [LP solver: SoPlex 6.0.0] [GitHash: d9b84b0709]\n" + "Copyright (C) 2002-2021 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\n" + "\n" + "External libraries:\n" + " SoPlex 6.0.0 Linear Programming Solver developed at Zuse Institute Berlin (soplex.zib.de) [GitHash: f5cfa86b]" + ) # Template from SCIPAMPL 7.0.3 elif solver == "scipampl": - stdout = "SCIP version {} [precision: 8 byte] [memory: block] [mode: optimized] [LP solver: SoPlex 5.0.2] [GitHash: 74c11e60cd]\n"\ - "Copyright (C) 2002-2021 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\n"\ - "\n"\ - "External libraries:\n"\ - " Readline 8.0 GNU library for command line editing (gnu.org/s/readline)" + stdout = ( + "SCIP version {} [precision: 8 byte] [memory: block] [mode: optimized] [LP solver: SoPlex 5.0.2] [GitHash: 74c11e60cd]\n" + "Copyright (C) 2002-2021 Konrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)\n" + "\n" + "External libraries:\n" + " Readline 8.0 GNU library for command line editing (gnu.org/s/readline)" + ) else: raise ValueError("Unsupported solver for stdout generation.") @@ -77,7 +91,6 @@ def generate_stdout(self, solver, version): return stdout.format(version) def set_solvers(self, scip=(8, 0, 0, 0), scipampl=(7, 0, 3, 0), fail=True): - executables = {"scip": scip, "scipampl": scipampl} def get_executable(*args, **kwargs): @@ -108,7 +121,9 @@ def run(args, **kwargs): if solver_version is None: raise FileNotFoundError() else: - return subprocess.CompletedProcess(args, 0, self.generate_stdout(solver_name, solver_version), None) + return subprocess.CompletedProcess( + args, 0, self.generate_stdout(solver_name, solver_version), None + ) if fail: self.fail("Solver creation looked up a non scip executable.") diff --git a/pyomo/solvers/tests/mip/test_solver.py b/pyomo/solvers/tests/mip/test_solver.py index 24d03b1f8a9..90a7076cbca 100644 --- a/pyomo/solvers/tests/mip/test_solver.py +++ b/pyomo/solvers/tests/mip/test_solver.py @@ -20,18 +20,17 @@ import pyomo.solvers.plugins.solvers from pyomo.solvers.plugins.solvers.CBCplugin import MockCBC -class MockSolver2(pyomo.opt.OptSolver): +class MockSolver2(pyomo.opt.OptSolver): def __init__(self, **kwds): kwds['type'] = 'stest_type' - pyomo.opt.OptSolver.__init__(self,**kwds) + pyomo.opt.OptSolver.__init__(self, **kwds) def enabled(self): return False class OptSolverDebug(unittest.TestCase): - def setUp(self): pyomo.opt.SolverFactory.register('stest2')(MockSolver2) diff --git a/pyomo/solvers/tests/models/LP_block.py b/pyomo/solvers/tests/models/LP_block.py index 9c817de168a..64c866faa9e 100644 --- a/pyomo/solvers/tests/models/LP_block.py +++ b/pyomo/solvers/tests/models/LP_block.py @@ -10,9 +10,18 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, Block, NonNegativeReals +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + Block, + NonNegativeReals, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_block(_BaseTestModel): """ @@ -24,7 +33,7 @@ class LP_block(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -32,16 +41,16 @@ def _generate_model(self): model._name = self.description model.b = Block() - model.B = Block([1,2,3]) + model.B = Block([1, 2, 3]) model.a = Param(initialize=1.0, mutable=True) model.b.x = Var(within=NonNegativeReals) model.B[1].x = Var(within=NonNegativeReals) - model.obj = Objective(expr=model.b.x + 3.0*model.B[1].x) + model.obj = Objective(expr=model.b.x + 3.0 * model.B[1].x) model.obj.deactivate() model.B[2].c = Constraint(expr=-model.B[1].x <= -model.a) - model.B[2].obj = Objective(expr=model.b.x + 3.0*model.B[1].x + 2) - model.B[3].c = Constraint(expr=(2.0, model.b.x/model.a - model.B[1].x, 10)) + model.B[2].obj = Objective(expr=model.b.x + 3.0 * model.B[1].x + 2) + model.B[3].c = Constraint(expr=(2.0, model.b.x / model.a - model.B[1].x, 10)) def warmstart_model(self): assert self.model is not None @@ -49,24 +58,24 @@ def warmstart_model(self): model.b.x.value = 1.0 model.B[1].x.value = 1.0 + @register_model class LP_block_kernel(LP_block): - def _generate_model(self): self.model = pmo.block() model = self.model model._name = self.description model.b = pmo.block() - model.B = pmo.block_dict((i, pmo.block()) - for i in range(1,4)) + model.B = pmo.block_dict((i, pmo.block()) for i in range(1, 4)) model.a = pmo.parameter(value=1.0) model.b.x = pmo.variable(lb=0) model.B[1].x = pmo.variable(lb=0) - model.obj = pmo.objective(expr=model.b.x + 3.0*model.B[1].x) + model.obj = pmo.objective(expr=model.b.x + 3.0 * model.B[1].x) model.obj.deactivate() model.B[2].c = pmo.constraint(expr=-model.B[1].x <= -model.a) - model.B[2].obj = pmo.objective(expr=model.b.x + 3.0*model.B[1].x + 2) - model.B[3].c = pmo.constraint(expr=(2.0, model.b.x/model.a - model.B[1].x, 10)) - + model.B[2].obj = pmo.objective(expr=model.b.x + 3.0 * model.B[1].x + 2) + model.B[3].c = pmo.constraint( + expr=(2.0, model.b.x / model.a - model.B[1].x, 10) + ) diff --git a/pyomo/solvers/tests/models/LP_compiled.py b/pyomo/solvers/tests/models/LP_compiled.py index 36011fdad1f..686406e7ec6 100644 --- a/pyomo/solvers/tests/models/LP_compiled.py +++ b/pyomo/solvers/tests/models/LP_compiled.py @@ -10,13 +10,21 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Var, Objective, Constraint, RangeSet, ConstraintList +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Constraint, + RangeSet, + ConstraintList, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model from pyomo.repn.beta.matrix import compile_block_linear_constraints has_numpy = False try: import numpy + has_numpy = True except: pass @@ -25,10 +33,12 @@ try: import scipy import scipy.sparse + has_scipy = True except: pass + @register_model class LP_compiled(_BaseTestModel): """ @@ -43,7 +53,7 @@ class LP_compiled(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") self.disable_suffix_tests = True def _generate_model(self): @@ -51,28 +61,29 @@ def _generate_model(self): model = self.model model._name = self.description - model.s = RangeSet(1,12) + model.s = RangeSet(1, 12) model.x = Var(model.s) model.x[1].setlb(-1) model.x[1].setub(1) model.x[2].setlb(-1) model.x[2].setub(1) - model.obj = Objective(expr=sum(model.x[i]*((-1)**(i+1)) - for i in model.x.index_set())) + model.obj = Objective( + expr=sum(model.x[i] * ((-1) ** (i + 1)) for i in model.x.index_set()) + ) model.c = ConstraintList() # to make the variable used in the constraint match the name model.c.add(Constraint.Skip) model.c.add(Constraint.Skip) - model.c.add(model.x[3]>=-1.) - model.c.add(model.x[4]<=1.) - model.c.add(model.x[5]==-1.) - model.c.add(model.x[6]==-1.) - model.c.add(model.x[7]==1.) - model.c.add(model.x[8]==1.) - model.c.add((-1.,model.x[9],-1.)) - model.c.add((-1.,model.x[10],-1.)) - model.c.add((1.,model.x[11],1.)) - model.c.add((1.,model.x[12],1.)) + model.c.add(model.x[3] >= -1.0) + model.c.add(model.x[4] <= 1.0) + model.c.add(model.x[5] == -1.0) + model.c.add(model.x[6] == -1.0) + model.c.add(model.x[7] == 1.0) + model.c.add(model.x[8] == 1.0) + model.c.add((-1.0, model.x[9], -1.0)) + model.c.add((-1.0, model.x[10], -1.0)) + model.c.add((1.0, model.x[11], 1.0)) + model.c.add((1.0, model.x[12], 1.0)) cdata = model.c.add((0, 1, 3)) assert cdata.lower == 0 assert cdata.upper == 3 @@ -93,7 +104,7 @@ def _generate_model(self): assert cdata.upper == 1 assert cdata.body() == 0 assert not cdata.equality - cdata = model.c.add((1,1)) + cdata = model.c.add((1, 1)) assert cdata.lower == 1 assert cdata.upper == 1 assert cdata.body() == 1 @@ -101,7 +112,7 @@ def _generate_model(self): model.fixed_var = Var() model.fixed_var.fix(1.0) - cdata = model.c.add((0, 1+model.fixed_var, 3)) + cdata = model.c.add((0, 1 + model.fixed_var, 3)) cdata = model.c.add((0, 2 + model.fixed_var, 3)) cdata = model.c.add((0, model.fixed_var, None)) cdata = model.c.add((None, model.fixed_var, 1)) @@ -111,8 +122,8 @@ def _generate_model(self): # to make the variable used in the constraint match the name model.c_inactive.add(Constraint.Skip) model.c_inactive.add(Constraint.Skip) - model.c_inactive.add(model.x[3]>=-2.) - model.c_inactive.add(model.x[4]<=2.) + model.c_inactive.add(model.x[3] >= -2.0) + model.c_inactive.add(model.x[4] <= 2.0) compile_block_linear_constraints(model, 'Amatrix') @@ -122,72 +133,112 @@ def warmstart_model(self): for i in model.s: model.x[i].value = None + if has_numpy and has_scipy: # TODO: we need to somehow label this as a skip rather # than not defining the test class @register_model class LP_compiled_dense_kernel(LP_compiled): - def _get_dense_data(self): assert has_numpy and has_scipy A = numpy.array( - [[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 1., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 1., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 1., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 1., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 1., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 1., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 1., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 1.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [1., 0., 0., 0., 0., 0., 0., 0., 0., 0.], - [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.]], - dtype=float) + [ + [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + ], + dtype=float, + ) lb = numpy.array( - [-1.0, -numpy.inf, -1.0, -1.0, - 1.0, 1.0, -1.0, -1.0, - 1.0, 1.0, -1.0, -2.0, - -1.0, -numpy.inf, 0.0, -2.0, - -3.0, -1.0, -numpy.inf, 0.0, - -2.0, -numpy.inf]) - ub = numpy.array([ - numpy.inf, 1.0, -1.0, -1.0, - 1.0, 1.0, -1.0, -1.0, - 1.0, 1.0, 2.0, 1.0, - numpy.inf, 1.0, 0.0, 1.0, - 0.0, numpy.inf, 0.0, 0.0, - numpy.inf, 2.0]) - eq_index = [2,3,4,5,14,19] + [ + -1.0, + -numpy.inf, + -1.0, + -1.0, + 1.0, + 1.0, + -1.0, + -1.0, + 1.0, + 1.0, + -1.0, + -2.0, + -1.0, + -numpy.inf, + 0.0, + -2.0, + -3.0, + -1.0, + -numpy.inf, + 0.0, + -2.0, + -numpy.inf, + ] + ) + ub = numpy.array( + [ + numpy.inf, + 1.0, + -1.0, + -1.0, + 1.0, + 1.0, + -1.0, + -1.0, + 1.0, + 1.0, + 2.0, + 1.0, + numpy.inf, + 1.0, + 0.0, + 1.0, + 0.0, + numpy.inf, + 0.0, + 0.0, + numpy.inf, + 2.0, + ] + ) + eq_index = [2, 3, 4, 5, 14, 19] return A, lb, ub, eq_index def _generate_base_model(self): - self.model = pmo.block() model = self.model model._name = self.description - model.s = list(range(1,13)) - model.x = pmo.variable_dict( - ((i, pmo.variable()) for i in model.s)) + model.s = list(range(1, 13)) + model.x = pmo.variable_dict(((i, pmo.variable()) for i in model.s)) model.x[1].lb = -1 model.x[1].ub = 1 model.x[2].lb = -1 model.x[2].ub = 1 - model.obj = pmo.objective(expr=sum(model.x[i]*((-1)**(i+1)) - for i in model.s)) + model.obj = pmo.objective( + expr=sum(model.x[i] * ((-1) ** (i + 1)) for i in model.s) + ) variable_order = [ model.x[3], model.x[4], @@ -198,7 +249,8 @@ def _generate_base_model(self): model.x[9], model.x[10], model.x[11], - model.x[12]] + model.x[12], + ] return variable_order @@ -206,26 +258,18 @@ def _generate_model(self): x = self._generate_base_model() model = self.model A, lb, ub, eq_index = self._get_dense_data() - model.Amatrix = pmo.matrix_constraint( - A, lb=lb, ub=ub, x=x, sparse=False) + model.Amatrix = pmo.matrix_constraint(A, lb=lb, ub=ub, x=x, sparse=False) for i in eq_index: - assert model.Amatrix[i].lb == \ - model.Amatrix[i].ub - model.Amatrix[i].rhs = \ - model.Amatrix[i].lb + assert model.Amatrix[i].lb == model.Amatrix[i].ub + model.Amatrix[i].rhs = model.Amatrix[i].lb @register_model - class LP_compiled_sparse_kernel( - LP_compiled_dense_kernel): - + class LP_compiled_sparse_kernel(LP_compiled_dense_kernel): def _generate_model(self): x = self._generate_base_model() model = self.model A, lb, ub, eq_index = self._get_dense_data() - model.Amatrix = pmo.matrix_constraint( - A, lb=lb, ub=ub, x=x, sparse=True) + model.Amatrix = pmo.matrix_constraint(A, lb=lb, ub=ub, x=x, sparse=True) for i in eq_index: - assert model.Amatrix[i].lb == \ - model.Amatrix[i].ub - model.Amatrix[i].rhs = \ - model.Amatrix[i].lb + assert model.Amatrix[i].lb == model.Amatrix[i].ub + model.Amatrix[i].rhs = model.Amatrix[i].lb diff --git a/pyomo/solvers/tests/models/LP_constant_objective1.py b/pyomo/solvers/tests/models/LP_constant_objective1.py index 2c46ac9bc6a..306a7a867a2 100644 --- a/pyomo/solvers/tests/models/LP_constant_objective1.py +++ b/pyomo/solvers/tests/models/LP_constant_objective1.py @@ -13,6 +13,7 @@ from pyomo.core import ConcreteModel, Var, Objective, Constraint, NonNegativeReals from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_constant_objective1(_BaseTestModel): """ @@ -24,7 +25,7 @@ class LP_constant_objective1(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -40,9 +41,9 @@ def warmstart_model(self): model = self.model model.x.value = None + @register_model class LP_constant_objective1_kernel(LP_constant_objective1): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -50,5 +51,4 @@ def _generate_model(self): model.x = pmo.variable(domain=NonNegativeReals) model.obj = pmo.objective(0.0) - model.con = pmo.linear_constraint(terms=[(model.x,1.0)], - rhs=1.0) + model.con = pmo.linear_constraint(terms=[(model.x, 1.0)], rhs=1.0) diff --git a/pyomo/solvers/tests/models/LP_constant_objective2.py b/pyomo/solvers/tests/models/LP_constant_objective2.py index 4d5512547ec..17da01bf209 100644 --- a/pyomo/solvers/tests/models/LP_constant_objective2.py +++ b/pyomo/solvers/tests/models/LP_constant_objective2.py @@ -13,6 +13,7 @@ from pyomo.core import ConcreteModel, Var, Objective, Constraint, NonNegativeReals from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_constant_objective2(_BaseTestModel): """ @@ -25,7 +26,7 @@ class LP_constant_objective2(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -33,7 +34,7 @@ def _generate_model(self): model._name = self.description model.x = Var(within=NonNegativeReals) - model.obj = Objective(expr=model.x-model.x) + model.obj = Objective(expr=model.x - model.x) model.con = Constraint(expr=model.x == 1.0) def warmstart_model(self): @@ -41,14 +42,14 @@ def warmstart_model(self): model = self.model model.x.value = 1.0 + @register_model class LP_constant_objective2_kernel(LP_constant_objective2): - def _generate_model(self): self.model = pmo.block() model = self.model model._name = self.description model.x = pmo.variable(domain=NonNegativeReals) - model.obj = pmo.objective(model.x-model.x) + model.obj = pmo.objective(model.x - model.x) model.con = pmo.constraint(model.x == 1.0) diff --git a/pyomo/solvers/tests/models/LP_duals_maximize.py b/pyomo/solvers/tests/models/LP_duals_maximize.py index abc13aabb1e..61d827daa62 100644 --- a/pyomo/solvers/tests/models/LP_duals_maximize.py +++ b/pyomo/solvers/tests/models/LP_duals_maximize.py @@ -10,9 +10,19 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, RangeSet, maximize, ConstraintList +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + RangeSet, + maximize, + ConstraintList, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_duals_maximize(_BaseTestModel): """ @@ -28,7 +38,7 @@ class LP_duals_maximize(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -38,36 +48,37 @@ def _generate_model(self): model.neg1 = Param(initialize=-1.0, mutable=True) model.pos1 = Param(initialize=1.0, mutable=True) - model.s = RangeSet(1,12) + model.s = RangeSet(1, 12) model.x = Var(model.s) model.x[1].setlb(model.neg1) model.x[1].setub(model.pos1) model.x[2].setlb(model.neg1) model.x[2].setub(model.pos1) - model.obj = Objective(expr=sum(model.x[i]*((-1)**(i)) - for i in model.x.index_set()), - sense=maximize) + model.obj = Objective( + expr=sum(model.x[i] * ((-1) ** (i)) for i in model.x.index_set()), + sense=maximize, + ) model.c = ConstraintList() # to make the variable used in the constraint match the name model.c.add(Constraint.Skip) model.c.add(Constraint.Skip) - model.c.add(model.x[3]>=-1.) - model.c.add(model.x[4]<=1.) - model.c.add(model.x[5]==-1.) - model.c.add(model.x[6]==-1.) - model.c.add(model.x[7]==1.) - model.c.add(model.x[8]==1.) - model.c.add((model.neg1,model.x[9],model.neg1)) - model.c.add((-1.,model.x[10],-1.)) - model.c.add((1.,model.x[11],1.)) - model.c.add((1.,model.x[12],1.)) + model.c.add(model.x[3] >= -1.0) + model.c.add(model.x[4] <= 1.0) + model.c.add(model.x[5] == -1.0) + model.c.add(model.x[6] == -1.0) + model.c.add(model.x[7] == 1.0) + model.c.add(model.x[8] == 1.0) + model.c.add((model.neg1, model.x[9], model.neg1)) + model.c.add((-1.0, model.x[10], -1.0)) + model.c.add((1.0, model.x[11], 1.0)) + model.c.add((1.0, model.x[12], 1.0)) model.c_inactive = ConstraintList() # to make the variable used in the constraint match the name model.c_inactive.add(Constraint.Skip) model.c_inactive.add(Constraint.Skip) - model.c_inactive.add(model.x[3]>=-2.) - model.c_inactive.add(model.x[4]<=2.) + model.c_inactive.add(model.x[3] >= -2.0) + model.c_inactive.add(model.x[4] <= 2.0) def warmstart_model(self): assert self.model is not None @@ -75,9 +86,9 @@ def warmstart_model(self): for i in model.s: model.x[i].value = None + @register_model class LP_duals_maximize_kernel(LP_duals_maximize): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -86,29 +97,28 @@ def _generate_model(self): model.neg1 = pmo.parameter(value=-1.0) model.pos1 = pmo.parameter(value=1.0) - model.s = list(range(1,13)) - model.x = pmo.variable_dict( - (i, pmo.variable()) for i in model.s) + model.s = list(range(1, 13)) + model.x = pmo.variable_dict((i, pmo.variable()) for i in model.s) model.x[1].lb = model.neg1 model.x[1].ub = model.pos1 model.x[2].lb = model.neg1 model.x[2].ub = model.pos1 - model.obj = pmo.objective(sum(model.x[i]*((-1)**(i)) - for i in model.s), - sense=pmo.maximize) + model.obj = pmo.objective( + sum(model.x[i] * ((-1) ** (i)) for i in model.s), sense=pmo.maximize + ) model.c = pmo.constraint_dict() - model.c[3] = pmo.constraint(model.x[3]>=-1.) - model.c[4] = pmo.constraint(model.x[4]<=1.) - model.c[5] = pmo.constraint(model.x[5]==-1.) - model.c[6] = pmo.constraint(model.x[6]==-1.) - model.c[7] = pmo.constraint(model.x[7]==1.) - model.c[8] = pmo.constraint(model.x[8]==1.) - model.c[9] = pmo.constraint((model.neg1,model.x[9],model.neg1)) - model.c[10] = pmo.constraint((-1.,model.x[10],-1.)) - model.c[11] = pmo.constraint((1.,model.x[11],1.)) - model.c[12] = pmo.constraint((1.,model.x[12],1.)) + model.c[3] = pmo.constraint(model.x[3] >= -1.0) + model.c[4] = pmo.constraint(model.x[4] <= 1.0) + model.c[5] = pmo.constraint(model.x[5] == -1.0) + model.c[6] = pmo.constraint(model.x[6] == -1.0) + model.c[7] = pmo.constraint(model.x[7] == 1.0) + model.c[8] = pmo.constraint(model.x[8] == 1.0) + model.c[9] = pmo.constraint((model.neg1, model.x[9], model.neg1)) + model.c[10] = pmo.constraint((-1.0, model.x[10], -1.0)) + model.c[11] = pmo.constraint((1.0, model.x[11], 1.0)) + model.c[12] = pmo.constraint((1.0, model.x[12], 1.0)) model.c_inactive = pmo.constraint_dict() # to make the variable used in the constraint match the name - model.c_inactive[3] = pmo.constraint(model.x[3]>=-2.) - model.c_inactive[4] = pmo.constraint(model.x[4]<=2.) + model.c_inactive[3] = pmo.constraint(model.x[3] >= -2.0) + model.c_inactive[4] = pmo.constraint(model.x[4] <= 2.0) diff --git a/pyomo/solvers/tests/models/LP_duals_minimize.py b/pyomo/solvers/tests/models/LP_duals_minimize.py index b150b68bbef..77471d0182c 100644 --- a/pyomo/solvers/tests/models/LP_duals_minimize.py +++ b/pyomo/solvers/tests/models/LP_duals_minimize.py @@ -10,9 +10,17 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Var, Objective, Constraint, RangeSet, ConstraintList +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Constraint, + RangeSet, + ConstraintList, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_duals_minimize(_BaseTestModel): """ @@ -28,7 +36,7 @@ class LP_duals_minimize(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = None @@ -36,35 +44,36 @@ def _generate_model(self): model = self.model model._name = self.description - model.s = RangeSet(1,12) + model.s = RangeSet(1, 12) model.x = Var(model.s) model.x[1].setlb(-1) model.x[1].setub(1) model.x[2].setlb(-1) model.x[2].setub(1) - model.obj = Objective(expr=sum(model.x[i]*((-1)**(i+1)) - for i in model.x.index_set())) + model.obj = Objective( + expr=sum(model.x[i] * ((-1) ** (i + 1)) for i in model.x.index_set()) + ) model.c = ConstraintList() # to make the variable used in the constraint match the name model.c.add(Constraint.Skip) model.c.add(Constraint.Skip) - model.c.add(model.x[3]>=-1.) - model.c.add(model.x[4]<=1.) - model.c.add(model.x[5]==-1.) - model.c.add(model.x[6]==-1.) - model.c.add(model.x[7]==1.) - model.c.add(model.x[8]==1.) - model.c.add((-1.,model.x[9],-1.)) - model.c.add((-1.,model.x[10],-1.)) - model.c.add((1.,model.x[11],1.)) - model.c.add((1.,model.x[12],1.)) + model.c.add(model.x[3] >= -1.0) + model.c.add(model.x[4] <= 1.0) + model.c.add(model.x[5] == -1.0) + model.c.add(model.x[6] == -1.0) + model.c.add(model.x[7] == 1.0) + model.c.add(model.x[8] == 1.0) + model.c.add((-1.0, model.x[9], -1.0)) + model.c.add((-1.0, model.x[10], -1.0)) + model.c.add((1.0, model.x[11], 1.0)) + model.c.add((1.0, model.x[12], 1.0)) model.c_inactive = ConstraintList() # to make the variable used in the constraint match the name model.c_inactive.add(Constraint.Skip) model.c_inactive.add(Constraint.Skip) - model.c_inactive.add(model.x[3]>=-2.) - model.c_inactive.add(model.x[4]<=2.) + model.c_inactive.add(model.x[3] >= -2.0) + model.c_inactive.add(model.x[4] <= 2.0) def warmstart_model(self): assert self.model is not None @@ -72,38 +81,36 @@ def warmstart_model(self): for i in model.s: model.x[i].value = None + @register_model class LP_duals_minimize_kernel(LP_duals_minimize): - def _generate_model(self): self.model = None self.model = pmo.block() model = self.model model._name = self.description - model.s = list(range(1,13)) - model.x = pmo.variable_dict( - (i, pmo.variable()) for i in model.s) + model.s = list(range(1, 13)) + model.x = pmo.variable_dict((i, pmo.variable()) for i in model.s) model.x[1].lb = -1 model.x[1].ub = 1 model.x[2].lb = -1 model.x[2].ub = 1 - model.obj = pmo.objective(sum(model.x[i]*((-1)**(i+1)) - for i in model.s)) + model.obj = pmo.objective(sum(model.x[i] * ((-1) ** (i + 1)) for i in model.s)) model.c = pmo.constraint_dict() # to make the variable used in the constraint match the name - model.c[3] = pmo.constraint(model.x[3]>=-1.) - model.c[4] = pmo.constraint(model.x[4]<=1.) - model.c[5] = pmo.constraint(model.x[5]==-1.) - model.c[6] = pmo.constraint(model.x[6]==-1.) - model.c[7] = pmo.constraint(model.x[7]==1.) - model.c[8] = pmo.constraint(model.x[8]==1.) - model.c[9] = pmo.constraint((-1.,model.x[9],-1.)) - model.c[10] = pmo.constraint((-1.,model.x[10],-1.)) - model.c[11] = pmo.constraint((1.,model.x[11],1.)) - model.c[12] = pmo.constraint((1.,model.x[12],1.)) + model.c[3] = pmo.constraint(model.x[3] >= -1.0) + model.c[4] = pmo.constraint(model.x[4] <= 1.0) + model.c[5] = pmo.constraint(model.x[5] == -1.0) + model.c[6] = pmo.constraint(model.x[6] == -1.0) + model.c[7] = pmo.constraint(model.x[7] == 1.0) + model.c[8] = pmo.constraint(model.x[8] == 1.0) + model.c[9] = pmo.constraint((-1.0, model.x[9], -1.0)) + model.c[10] = pmo.constraint((-1.0, model.x[10], -1.0)) + model.c[11] = pmo.constraint((1.0, model.x[11], 1.0)) + model.c[12] = pmo.constraint((1.0, model.x[12], 1.0)) model.c_inactive = pmo.constraint_dict() # to make the variable used in the constraint match the name - model.c_inactive[3] = pmo.constraint(model.x[3]>=-2.) - model.c_inactive[4] = pmo.constraint(model.x[4]<=2.) + model.c_inactive[3] = pmo.constraint(model.x[3] >= -2.0) + model.c_inactive[4] = pmo.constraint(model.x[4] <= 2.0) diff --git a/pyomo/solvers/tests/models/LP_inactive_index.py b/pyomo/solvers/tests/models/LP_inactive_index.py index b269c4e31ff..d3fdd5b32ca 100644 --- a/pyomo/solvers/tests/models/LP_inactive_index.py +++ b/pyomo/solvers/tests/models/LP_inactive_index.py @@ -10,21 +10,32 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Var, Objective, Constraint, Set, ConstraintList, Block +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Constraint, + Set, + ConstraintList, + Block, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model -def inactive_index_LP_obj_rule(model,i): + +def inactive_index_LP_obj_rule(model, i): if i == 1: - return model.x-model.y + return model.x - model.y else: - return -model.x+model.y+model.z + return -model.x + model.y + model.z + -def inactive_index_LP_c2_rule(model,i): +def inactive_index_LP_c2_rule(model, i): if i == 1: return model.y >= -2 else: return model.x <= 2 + @register_model class LP_inactive_index(_BaseTestModel): """ @@ -36,32 +47,30 @@ class LP_inactive_index(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() model = self.model model._name = self.description - model.s = Set(initialize=[1,2]) + model.s = Set(initialize=[1, 2]) model.x = Var() model.y = Var() - model.z = Var(bounds=(0,None)) + model.z = Var(bounds=(0, None)) - model.obj = Objective(model.s, - rule=inactive_index_LP_obj_rule) - model.OBJ = Objective(expr=model.x+model.y) + model.obj = Objective(model.s, rule=inactive_index_LP_obj_rule) + model.OBJ = Objective(expr=model.x + model.y) model.obj[1].deactivate() model.OBJ.deactivate() model.c1 = ConstraintList() - model.c1.add(model.x<=1) # index=1 - model.c1.add(model.x>=-1) # index=2 - model.c1.add(model.y<=1) # index=3 - model.c1.add(model.y>=-1) # index=4 + model.c1.add(model.x <= 1) # index=1 + model.c1.add(model.x >= -1) # index=2 + model.c1.add(model.y <= 1) # index=3 + model.c1.add(model.y >= -1) # index=4 model.c1[1].deactivate() model.c1[4].deactivate() - model.c2 = Constraint(model.s, - rule=inactive_index_LP_c2_rule) + model.c2 = Constraint(model.s, rule=inactive_index_LP_c2_rule) model.b = Block() model.b.c = Constraint(expr=model.z >= 2) @@ -80,38 +89,36 @@ def warmstart_model(self): model.y.value = None model.z.value = 2.0 + @register_model class LP_inactive_index_kernel(LP_inactive_index): - def _generate_model(self): self.model = pmo.block() model = self.model model._name = self.description - model.s = [1,2] + model.s = [1, 2] model.x = pmo.variable() model.y = pmo.variable() model.z = pmo.variable(lb=0) model.obj = pmo.objective_dict() for i in model.s: - model.obj[i] = pmo.objective( - inactive_index_LP_obj_rule(model,i)) + model.obj[i] = pmo.objective(inactive_index_LP_obj_rule(model, i)) - model.OBJ = pmo.objective(model.x+model.y) + model.OBJ = pmo.objective(model.x + model.y) model.obj[1].deactivate() model.OBJ.deactivate() model.c1 = pmo.constraint_dict() - model.c1[1] = pmo.constraint(model.x<=1) - model.c1[2] = pmo.constraint(model.x>=-1) - model.c1[3] = pmo.constraint(model.y<=1) - model.c1[4] = pmo.constraint(model.y>=-1) + model.c1[1] = pmo.constraint(model.x <= 1) + model.c1[2] = pmo.constraint(model.x >= -1) + model.c1[3] = pmo.constraint(model.y <= 1) + model.c1[4] = pmo.constraint(model.y >= -1) model.c1[1].deactivate() model.c1[4].deactivate() model.c2 = pmo.constraint_dict() for i in model.s: - model.c2[i] = pmo.constraint( - inactive_index_LP_c2_rule(model, i)) + model.c2[i] = pmo.constraint(inactive_index_LP_c2_rule(model, i)) model.b = pmo.block() model.b.c = pmo.constraint(model.z >= 2) diff --git a/pyomo/solvers/tests/models/LP_infeasible1.py b/pyomo/solvers/tests/models/LP_infeasible1.py index d0801bebbc3..28243574a37 100644 --- a/pyomo/solvers/tests/models/LP_infeasible1.py +++ b/pyomo/solvers/tests/models/LP_infeasible1.py @@ -14,6 +14,7 @@ from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_infeasible1(_BaseTestModel): """ @@ -27,17 +28,17 @@ class LP_infeasible1(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.solve_should_fail = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() model = self.model model._name = self.description - model.x = Var(bounds=(1,None)) - model.y = Var(bounds=(1,None)) - model.o = Objective(expr=model.x+model.y) - model.c = Constraint(expr=model.x+model.y <= 0) + model.x = Var(bounds=(1, None)) + model.y = Var(bounds=(1, None)) + model.o = Objective(expr=model.x + model.y) + model.c = Constraint(expr=model.x + model.y <= 0) def warmstart_model(self): assert self.model is not None @@ -47,15 +48,19 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] == \ - TerminationCondition.infeasible + assert ( + results['Solver'][0]['termination condition'] + == TerminationCondition.infeasible + ) else: - tester.assertEqual(results['Solver'][0]['termination condition'], - TerminationCondition.infeasible) + tester.assertEqual( + results['Solver'][0]['termination condition'], + TerminationCondition.infeasible, + ) + @register_model class LP_infeasible1_kernel(LP_infeasible1): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -63,5 +68,5 @@ def _generate_model(self): model.x = pmo.variable(lb=1) model.y = pmo.variable(lb=1) - model.o = pmo.objective(model.x+model.y) - model.c = pmo.constraint(model.x+model.y <= 0) + model.o = pmo.objective(model.x + model.y) + model.c = pmo.constraint(model.x + model.y <= 0) diff --git a/pyomo/solvers/tests/models/LP_infeasible2.py b/pyomo/solvers/tests/models/LP_infeasible2.py index 9550026f1f3..383267c0e3c 100644 --- a/pyomo/solvers/tests/models/LP_infeasible2.py +++ b/pyomo/solvers/tests/models/LP_infeasible2.py @@ -14,6 +14,7 @@ from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_infeasible2(_BaseTestModel): """ @@ -27,17 +28,17 @@ class LP_infeasible2(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.solve_should_fail = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() model = self.model model._name = self.description - model.x = Var(bounds=(1,None)) - model.y = Var(bounds=(1,None)) - model.o = Objective(expr=-model.x-model.y, sense=maximize) - model.c = Constraint(expr=model.x+model.y <= 0) + model.x = Var(bounds=(1, None)) + model.y = Var(bounds=(1, None)) + model.o = Objective(expr=-model.x - model.y, sense=maximize) + model.c = Constraint(expr=model.x + model.y <= 0) def warmstart_model(self): assert self.model is not None @@ -47,15 +48,19 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] == \ - TerminationCondition.infeasible + assert ( + results['Solver'][0]['termination condition'] + == TerminationCondition.infeasible + ) else: - tester.assertEqual(results['Solver'][0]['termination condition'], - TerminationCondition.infeasible) + tester.assertEqual( + results['Solver'][0]['termination condition'], + TerminationCondition.infeasible, + ) + @register_model class LP_infeasible2_kernel(LP_infeasible2): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -63,5 +68,5 @@ def _generate_model(self): model.x = pmo.variable(lb=1) model.y = pmo.variable(lb=1) - model.o = pmo.objective(-model.x-model.y, sense=pmo.maximize) - model.c = pmo.constraint(model.x+model.y <= 0) + model.o = pmo.objective(-model.x - model.y, sense=pmo.maximize) + model.c = pmo.constraint(model.x + model.y <= 0) diff --git a/pyomo/solvers/tests/models/LP_piecewise.py b/pyomo/solvers/tests/models/LP_piecewise.py index ce85158b906..f6350b38591 100644 --- a/pyomo/solvers/tests/models/LP_piecewise.py +++ b/pyomo/solvers/tests/models/LP_piecewise.py @@ -12,6 +12,7 @@ from pyomo.core import ConcreteModel, Var, Objective, Piecewise from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_piecewise(_BaseTestModel): """ @@ -23,7 +24,7 @@ class LP_piecewise(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -34,12 +35,15 @@ def _generate_model(self): model.y = Var() model.obj = Objective(expr=model.y) - model.p = Piecewise(model.y, model.x, - pw_pts=[-1,0,1], - f_rule=[1,0.5,1], - pw_repn='SOS2', - pw_constr_type='LB', - unbounded_domain_var=True) + model.p = Piecewise( + model.y, + model.x, + pw_pts=[-1, 0, 1], + f_rule=[1, 0.5, 1], + pw_repn='SOS2', + pw_constr_type='LB', + unbounded_domain_var=True, + ) def warmstart_model(self): assert self.model is not None @@ -47,9 +51,9 @@ def warmstart_model(self): model.x.value = None model.y.value = 1.0 + @register_model class LP_piecewise_nosuffixes(LP_piecewise): - description = "LP_piecewise_nosuffixes" test_pickling = False diff --git a/pyomo/solvers/tests/models/LP_simple.py b/pyomo/solvers/tests/models/LP_simple.py index bbcb4e60340..3449a657f79 100644 --- a/pyomo/solvers/tests/models/LP_simple.py +++ b/pyomo/solvers/tests/models/LP_simple.py @@ -10,9 +10,18 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Expression, Objective, Constraint, NonNegativeReals +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Expression, + Objective, + Constraint, + NonNegativeReals, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_simple(_BaseTestModel): """ @@ -24,7 +33,7 @@ class LP_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -39,17 +48,18 @@ def _generate_model(self): model.y = Var(within=NonNegativeReals) model.z1 = Var(bounds=(float('-inf'), float('inf'))) model.z2 = Var() - model.dummy_expr1 = Expression(initialize=model.a1*model.a2[1]) - model.dummy_expr2 = Expression(initialize=model.y/model.a3*model.a4[1]) + model.dummy_expr1 = Expression(initialize=model.a1 * model.a2[1]) + model.dummy_expr2 = Expression(initialize=model.y / model.a3 * model.a4[1]) model.inactive_obj = Objective( - expr=model.x + 3.0*model.y + 1.0 + model.z1 - model.z2) + expr=model.x + 3.0 * model.y + 1.0 + model.z1 - model.z2 + ) model.inactive_obj.deactivate() model.p = Param(mutable=True, initialize=0.0) model.obj = Objective(expr=model.p + model.inactive_obj) model.c1 = Constraint(expr=model.dummy_expr1 <= model.dummy_expr2) - model.c2 = Constraint(expr=(2.0, model.x/model.a3 - model.y, 10)) + model.c2 = Constraint(expr=(2.0, model.x / model.a3 - model.y, 10)) model.c3 = Constraint(expr=(0, model.z1 + 1, 10)) model.c4 = Constraint(expr=(-10, model.z2 + 1, 0)) @@ -59,34 +69,33 @@ def warmstart_model(self): model.x.value = None model.y.value = 1.0 + @register_model class LP_simple_kernel(LP_simple): - def _generate_model(self): self.model = pmo.block() model = self.model model._name = self.description model.a1 = pmo.parameter(value=1.0) - model.a2 = pmo.parameter_dict( - {1: pmo.parameter(value=1.0)}) + model.a2 = pmo.parameter_dict({1: pmo.parameter(value=1.0)}) model.a3 = pmo.parameter(value=1.0) - model.a4 = pmo.parameter_dict( - {1: pmo.parameter(value=1.0)}) + model.a4 = pmo.parameter_dict({1: pmo.parameter(value=1.0)}) model.x = pmo.variable(domain=NonNegativeReals) model.y = pmo.variable(domain=NonNegativeReals) model.z1 = pmo.variable() model.z2 = pmo.variable() - model.dummy_expr1 = pmo.expression(model.a1*model.a2[1]) - model.dummy_expr2 = pmo.expression(model.y/model.a3*model.a4[1]) + model.dummy_expr1 = pmo.expression(model.a1 * model.a2[1]) + model.dummy_expr2 = pmo.expression(model.y / model.a3 * model.a4[1]) model.inactive_obj = pmo.objective( - model.x + 3.0*model.y + 1.0 + model.z1 - model.z2) + model.x + 3.0 * model.y + 1.0 + model.z1 - model.z2 + ) model.inactive_obj.deactivate() model.p = pmo.parameter(value=0.0) model.obj = pmo.objective(model.p + model.inactive_obj) model.c1 = pmo.constraint(model.dummy_expr1 <= pmo.noclone(model.dummy_expr2)) - model.c2 = pmo.constraint((2.0, model.x/model.a3 - model.y, 10)) + model.c2 = pmo.constraint((2.0, model.x / model.a3 - model.y, 10)) model.c3 = pmo.constraint((0, model.z1 + 1, 10)) model.c4 = pmo.constraint((-10, model.z2 + 1, 0)) diff --git a/pyomo/solvers/tests/models/LP_trivial_constraints.py b/pyomo/solvers/tests/models/LP_trivial_constraints.py index 19c29dc962a..096c9e71712 100644 --- a/pyomo/solvers/tests/models/LP_trivial_constraints.py +++ b/pyomo/solvers/tests/models/LP_trivial_constraints.py @@ -11,10 +11,16 @@ import pyomo.kernel as pmo from pyomo.core import ( - ConcreteModel, Var, Objective, Constraint, RangeSet, ConstraintList + ConcreteModel, + Var, + Objective, + Constraint, + RangeSet, + ConstraintList, ) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_trivial_constraints(_BaseTestModel): """ @@ -27,7 +33,7 @@ class LP_trivial_constraints(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = None @@ -61,13 +67,12 @@ def _generate_model(self): assert cdata.upper == 1 assert cdata.body() == 0 assert not cdata.equality - cdata = model.c.add((1,1)) + cdata = model.c.add((1, 1)) assert cdata.lower == 1 assert cdata.upper == 1 assert cdata.body() == 1 assert cdata.equality - model.d = Constraint( - rule=lambda m: (float('-inf'), m.x, float('inf'))) + model.d = Constraint(rule=lambda m: (float('-inf'), m.x, float('inf'))) assert not model.d.equality def warmstart_model(self): @@ -86,9 +91,9 @@ def post_solve_test_validation(self, tester, results): tester.assertIn(id(self.model.c[i]), symbol_map.byObject) tester.assertNotIn(id(self.model.d), symbol_map.byObject) + @register_model class LP_trivial_constraints_kernel(LP_trivial_constraints): - def _generate_model(self): self.model = None self.model = pmo.block() @@ -121,7 +126,7 @@ def _generate_model(self): assert cdata.ub == 1 assert cdata.body() == 0 assert not cdata.equality - cdata = model.c[7] = pmo.constraint((1,1)) + cdata = model.c[7] = pmo.constraint((1, 1)) assert cdata.lb == 1 assert cdata.ub == 1 assert cdata.body() == 1 diff --git a/pyomo/solvers/tests/models/LP_unbounded.py b/pyomo/solvers/tests/models/LP_unbounded.py index 5ac0e4e55ec..e3173e2ff07 100644 --- a/pyomo/solvers/tests/models/LP_unbounded.py +++ b/pyomo/solvers/tests/models/LP_unbounded.py @@ -14,6 +14,7 @@ from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_unbounded(_BaseTestModel): """ @@ -26,7 +27,7 @@ class LP_unbounded(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.solve_should_fail = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -36,7 +37,7 @@ def _generate_model(self): model.x = Var() model.y = Var() - model.o = Objective(expr=model.x+model.y) + model.o = Objective(expr=model.x + model.y) def warmstart_model(self): assert self.model is not None @@ -46,17 +47,22 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] in \ - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded) + assert results['Solver'][0]['termination condition'] in ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ) else: - tester.assertIn(results['Solver'][0]['termination condition'], - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded)) + tester.assertIn( + results['Solver'][0]['termination condition'], + ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ), + ) + @register_model class LP_unbounded_kernel(LP_unbounded): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -65,4 +71,4 @@ def _generate_model(self): model.x = pmo.variable() model.y = pmo.variable() - model.o = pmo.objective(model.x+model.y) + model.o = pmo.objective(model.x + model.y) diff --git a/pyomo/solvers/tests/models/LP_unique_duals.py b/pyomo/solvers/tests/models/LP_unique_duals.py index 4e0c4f842e1..624181eb27d 100644 --- a/pyomo/solvers/tests/models/LP_unique_duals.py +++ b/pyomo/solvers/tests/models/LP_unique_duals.py @@ -9,28 +9,42 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, RangeSet, NonNegativeReals, Suffix, sum_product +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + RangeSet, + NonNegativeReals, + Suffix, + sum_product, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + def c_rule(model, j): - return 5 if j<5 else 9.0/2 + return 5 if j < 5 else 9.0 / 2 + def b_rule(model, i): if i == 4: i = 5 elif i == 5: i = 4 - return 5 if i<5 else 7.0/2 + return 5 if i < 5 else 7.0 / 2 + def A_rule(model, i, j): if i == 4: i = 5 elif i == 5: i = 4 - return 2 if i==j else 1 + return 2 if i == j else 1 + def primalcon_rule(model, i): - return sum(model.A[i,j]*model.x[j] for j in model.N) >= model.b[i] + return sum(model.A[i, j] * model.x[j] for j in model.N) >= model.b[i] @register_model @@ -44,7 +58,7 @@ class LP_unique_duals(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = None @@ -54,8 +68,8 @@ def _generate_model(self): n = 7 m = 7 - model.N = RangeSet(1,n) - model.M = RangeSet(1,m) + model.N = RangeSet(1, n) + model.M = RangeSet(1, m) model.c = Param(model.N, rule=c_rule) @@ -70,8 +84,8 @@ def _generate_model(self): model.primalcon = Constraint(model.M, rule=primalcon_rule) - #model.dual = Suffix(direction=Suffix.IMPORT) - #model.rc = Suffix(direction=Suffix.IMPORT) + # model.dual = Suffix(direction=Suffix.IMPORT) + # model.rc = Suffix(direction=Suffix.IMPORT) model.slack = Suffix(direction=Suffix.IMPORT) model.urc = Suffix(direction=Suffix.IMPORT) model.lrc = Suffix(direction=Suffix.IMPORT) @@ -83,4 +97,3 @@ def warmstart_model(self): model.x[i] = None for i in model.y: model.y[i] = None - diff --git a/pyomo/solvers/tests/models/LP_unused_vars.json b/pyomo/solvers/tests/models/LP_unused_vars.json index 3c8ff51a3dc..1eaa5855642 100644 --- a/pyomo/solvers/tests/models/LP_unused_vars.json +++ b/pyomo/solvers/tests/models/LP_unused_vars.json @@ -9,11 +9,11 @@ "stale": true, "value": null }, - "B[1].b.X_initialy_stale[1]": { + "B[1].b.X_initially_stale[1]": { "stale": true, "value": null }, - "B[1].b.X_initialy_stale[2]": { + "B[1].b.X_initially_stale[2]": { "stale": true, "value": null }, @@ -25,11 +25,11 @@ "stale": true, "value": null }, - "B[1].b.X_unused_initialy_stale[1]": { + "B[1].b.X_unused_initially_stale[1]": { "stale": true, "value": null }, - "B[1].b.X_unused_initialy_stale[2]": { + "B[1].b.X_unused_initially_stale[2]": { "stale": true, "value": null }, @@ -58,7 +58,7 @@ "stale": true, "value": null }, - "B[1].b.x_initialy_stale": { + "B[1].b.x_initially_stale": { "stale": true, "value": null }, @@ -66,7 +66,7 @@ "stale": true, "value": null }, - "B[1].b.x_unused_initialy_stale": { + "B[1].b.x_unused_initially_stale": { "stale": true, "value": null }, @@ -80,11 +80,11 @@ "stale": true, "value": null }, - "B[2].b.X_initialy_stale[1]": { + "B[2].b.X_initially_stale[1]": { "stale": true, "value": null }, - "B[2].b.X_initialy_stale[2]": { + "B[2].b.X_initially_stale[2]": { "stale": true, "value": null }, @@ -96,11 +96,11 @@ "stale": true, "value": null }, - "B[2].b.X_unused_initialy_stale[1]": { + "B[2].b.X_unused_initially_stale[1]": { "stale": true, "value": null }, - "B[2].b.X_unused_initialy_stale[2]": { + "B[2].b.X_unused_initially_stale[2]": { "stale": true, "value": null }, @@ -129,7 +129,7 @@ "stale": true, "value": null }, - "B[2].b.x_initialy_stale": { + "B[2].b.x_initially_stale": { "stale": true, "value": null }, @@ -137,7 +137,7 @@ "stale": true, "value": null }, - "B[2].b.x_unused_initialy_stale": { + "B[2].b.x_unused_initially_stale": { "stale": true, "value": null }, @@ -151,12 +151,12 @@ "stale": false, "value": 1.0 }, - "X_initialy_stale[1]": { + "X_initially_stale[1]": { "rc": 0.0, "stale": false, "value": 0.0 }, - "X_initialy_stale[2]": { + "X_initially_stale[2]": { "rc": 0.0, "stale": false, "value": 1.0 @@ -169,11 +169,11 @@ "stale": true, "value": -1.0 }, - "X_unused_initialy_stale[1]": { + "X_unused_initially_stale[1]": { "stale": true, "value": -1.0 }, - "X_unused_initialy_stale[2]": { + "X_unused_initially_stale[2]": { "stale": true, "value": -1.0 }, @@ -187,11 +187,11 @@ "stale": true, "value": null }, - "b.b.X_initialy_stale[1]": { + "b.b.X_initially_stale[1]": { "stale": true, "value": null }, - "b.b.X_initialy_stale[2]": { + "b.b.X_initially_stale[2]": { "stale": true, "value": null }, @@ -203,11 +203,11 @@ "stale": true, "value": null }, - "b.b.X_unused_initialy_stale[1]": { + "b.b.X_unused_initially_stale[1]": { "stale": true, "value": null }, - "b.b.X_unused_initialy_stale[2]": { + "b.b.X_unused_initially_stale[2]": { "stale": true, "value": null }, @@ -236,7 +236,7 @@ "stale": true, "value": null }, - "b.b.x_initialy_stale": { + "b.b.x_initially_stale": { "stale": true, "value": null }, @@ -244,7 +244,7 @@ "stale": true, "value": null }, - "b.b.x_unused_initialy_stale": { + "b.b.x_unused_initially_stale": { "stale": true, "value": null }, @@ -301,7 +301,7 @@ "stale": false, "value": 1.0 }, - "x_initialy_stale": { + "x_initially_stale": { "rc": 0.0, "stale": false, "value": 1.0 @@ -310,7 +310,7 @@ "stale": true, "value": -1.0 }, - "x_unused_initialy_stale": { + "x_unused_initially_stale": { "stale": true, "value": -1.0 } diff --git a/pyomo/solvers/tests/models/LP_unused_vars.py b/pyomo/solvers/tests/models/LP_unused_vars.py index 52e2a6af523..5e6b40fa4bf 100644 --- a/pyomo/solvers/tests/models/LP_unused_vars.py +++ b/pyomo/solvers/tests/models/LP_unused_vars.py @@ -10,9 +10,18 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Var, Objective, Set, ConstraintList, sum_product, Block +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Set, + ConstraintList, + sum_product, + Block, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class LP_unused_vars(_BaseTestModel): """ @@ -26,7 +35,7 @@ class LP_unused_vars(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.disable_suffix_tests = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = None @@ -34,44 +43,46 @@ def _generate_model(self): model = self.model model._name = self.description - model.s = Set(initialize=[1,2]) + model.s = Set(initialize=[1, 2]) model.x_unused = Var() model.x_unused.stale = False - model.x_unused_initialy_stale = Var() - model.x_unused_initialy_stale.stale = True + model.x_unused_initially_stale = Var() + model.x_unused_initially_stale.stale = True model.X_unused = Var(model.s) - model.X_unused_initialy_stale = Var(model.s) + model.X_unused_initially_stale = Var(model.s) for i in model.s: model.X_unused[i].stale = False - model.X_unused_initialy_stale[i].stale = True + model.X_unused_initially_stale[i].stale = True model.x = Var() model.x.stale = False - model.x_initialy_stale = Var() - model.x_initialy_stale.stale = True + model.x_initially_stale = Var() + model.x_initially_stale.stale = True model.X = Var(model.s) - model.X_initialy_stale = Var(model.s) + model.X_initially_stale = Var(model.s) for i in model.s: model.X[i].stale = False - model.X_initialy_stale[i].stale = True + model.X_initially_stale[i].stale = True - model.obj = Objective(expr= model.x + \ - model.x_initialy_stale + \ - sum_product(model.X) + \ - sum_product(model.X_initialy_stale)) + model.obj = Objective( + expr=model.x + + model.x_initially_stale + + sum_product(model.X) + + sum_product(model.X_initially_stale) + ) model.c = ConstraintList() - model.c.add( model.x >= 1 ) - model.c.add( model.x_initialy_stale >= 1 ) - model.c.add( model.X[1] >= 0 ) - model.c.add( model.X[2] >= 1 ) - model.c.add( model.X_initialy_stale[1] >= 0 ) - model.c.add( model.X_initialy_stale[2] >= 1 ) + model.c.add(model.x >= 1) + model.c.add(model.x_initially_stale >= 1) + model.c.add(model.X[1] >= 0) + model.c.add(model.X[2] >= 1) + model.c.add(model.X_initially_stale[1] >= 0) + model.c.add(model.X_initially_stale[2] >= 1) # Test that stale flags get set # on inactive blocks (where "inactive blocks" mean blocks @@ -100,72 +111,74 @@ def warmstart_model(self): assert self.model is not None model = self.model model.x_unused.value = -1.0 - model.x_unused_initialy_stale.value = -1.0 - model.x_unused_initialy_stale.stale = True + model.x_unused_initially_stale.value = -1.0 + model.x_unused_initially_stale.stale = True for i in model.s: model.X_unused[i].value = -1.0 - model.X_unused_initialy_stale[i].value = -1.0 - model.X_unused_initialy_stale[i].stale = True + model.X_unused_initially_stale[i].value = -1.0 + model.X_unused_initially_stale[i].stale = True model.x.value = -1.0 - model.x_initialy_stale.value = -1.0 - model.x_initialy_stale.stale = True + model.x_initially_stale.value = -1.0 + model.x_initially_stale.stale = True for i in model.s: model.X[i].value = -1.0 - model.X_initialy_stale[i].value = -1.0 - model.X_initialy_stale[i].stale = True + model.X_initially_stale[i].value = -1.0 + model.X_initially_stale[i].stale = True + @register_model class LP_unused_vars_kernel(LP_unused_vars): - def _generate_model(self): self.model = None self.model = pmo.block() model = self.model model._name = self.description - model.s = [1,2] + model.s = [1, 2] model.x_unused = pmo.variable() model.x_unused.stale = False - model.x_unused_initialy_stale = pmo.variable() - model.x_unused_initialy_stale.stale = True + model.x_unused_initially_stale = pmo.variable() + model.x_unused_initially_stale.stale = True - model.X_unused = pmo.variable_dict( - (i, pmo.variable()) for i in model.s) - model.X_unused_initialy_stale = pmo.variable_dict( - (i, pmo.variable()) for i in model.s) + model.X_unused = pmo.variable_dict((i, pmo.variable()) for i in model.s) + model.X_unused_initially_stale = pmo.variable_dict( + (i, pmo.variable()) for i in model.s + ) for i in model.X_unused: model.X_unused[i].stale = False - model.X_unused_initialy_stale[i].stale = True + model.X_unused_initially_stale[i].stale = True model.x = pmo.variable() model.x.stale = False - model.x_initialy_stale = pmo.variable() - model.x_initialy_stale.stale = True + model.x_initially_stale = pmo.variable() + model.x_initially_stale.stale = True - model.X = pmo.variable_dict( - (i, pmo.variable()) for i in model.s) - model.X_initialy_stale = pmo.variable_dict( - (i, pmo.variable()) for i in model.s) + model.X = pmo.variable_dict((i, pmo.variable()) for i in model.s) + model.X_initially_stale = pmo.variable_dict( + (i, pmo.variable()) for i in model.s + ) for i in model.X: model.X[i].stale = False - model.X_initialy_stale[i].stale = True + model.X_initially_stale[i].stale = True - model.obj = pmo.objective(model.x + \ - model.x_initialy_stale + \ - sum(model.X.values()) + \ - sum(model.X_initialy_stale.values())) + model.obj = pmo.objective( + model.x + + model.x_initially_stale + + sum(model.X.values()) + + sum(model.X_initially_stale.values()) + ) model.c = pmo.constraint_dict() - model.c[1] = pmo.constraint(model.x >= 1) - model.c[2] = pmo.constraint(model.x_initialy_stale >= 1) - model.c[3] = pmo.constraint(model.X[1] >= 0) - model.c[4] = pmo.constraint(model.X[2] >= 1) - model.c[5] = pmo.constraint(model.X_initialy_stale[1] >= 0) - model.c[6] = pmo.constraint(model.X_initialy_stale[2] >= 1) + model.c[1] = pmo.constraint(model.x >= 1) + model.c[2] = pmo.constraint(model.x_initially_stale >= 1) + model.c[3] = pmo.constraint(model.X[1] >= 0) + model.c[4] = pmo.constraint(model.X[2] >= 1) + model.c[5] = pmo.constraint(model.X_initially_stale[1] >= 0) + model.c[6] = pmo.constraint(model.X_initially_stale[2] >= 1) # Test that stale flags do not get updated # on inactive blocks (where "inactive blocks" mean blocks diff --git a/pyomo/solvers/tests/models/MILP_discrete_var_bounds.py b/pyomo/solvers/tests/models/MILP_discrete_var_bounds.py index 75d7e675581..8fef69ef76a 100644 --- a/pyomo/solvers/tests/models/MILP_discrete_var_bounds.py +++ b/pyomo/solvers/tests/models/MILP_discrete_var_bounds.py @@ -13,6 +13,7 @@ from pyomo.core import ConcreteModel, Var, Objective, Constraint, Binary, Integers from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class MILP_discrete_var_bounds(_BaseTestModel): """ @@ -25,7 +26,7 @@ class MILP_discrete_var_bounds(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.disable_suffix_tests = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -34,15 +35,14 @@ def _generate_model(self): model.w2 = Var(within=Binary) model.x2 = Var(within=Binary) - model.yb = Var(within=Binary, bounds=(1,1)) - model.zb = Var(within=Binary, bounds=(0,0)) - model.yi = Var(within=Integers, bounds=(-1,None)) - model.zi = Var(within=Integers, bounds=(None,1)) + model.yb = Var(within=Binary, bounds=(1, 1)) + model.zb = Var(within=Binary, bounds=(0, 0)) + model.yi = Var(within=Integers, bounds=(-1, None)) + model.zi = Var(within=Integers, bounds=(None, 1)) - model.obj = Objective(expr=\ - model.w2 - model.x2 +\ - model.yb - model.zb +\ - model.yi - model.zi) + model.obj = Objective( + expr=model.w2 - model.x2 + model.yb - model.zb + model.yi - model.zi + ) model.c3 = Constraint(expr=model.w2 >= 0) model.c4 = Constraint(expr=model.x2 <= 1) @@ -57,27 +57,24 @@ def warmstart_model(self): model.yi.value = None model.zi.value = 0 + @register_model class MILP_discrete_var_bounds_kernel(MILP_discrete_var_bounds): - def _generate_model(self): self.model = pmo.block() model = self.model model._name = self.description model.w2 = pmo.variable(domain=pmo.BooleanSet) - model.x2 = pmo.variable(domain_type=pmo.IntegerSet, - lb=0, ub=1) - model.yb = pmo.variable(domain_type=pmo.IntegerSet, - lb=1, ub=1) - model.zb = pmo.variable(domain_type=pmo.IntegerSet, - lb=0, ub=0) + model.x2 = pmo.variable(domain_type=pmo.IntegerSet, lb=0, ub=1) + model.yb = pmo.variable(domain_type=pmo.IntegerSet, lb=1, ub=1) + model.zb = pmo.variable(domain_type=pmo.IntegerSet, lb=0, ub=0) model.yi = pmo.variable(domain=pmo.IntegerSet, lb=-1) model.zi = pmo.variable(domain=pmo.IntegerSet, ub=1) - model.obj = pmo.objective(model.w2 - model.x2 +\ - model.yb - model.zb +\ - model.yi - model.zi) + model.obj = pmo.objective( + model.w2 - model.x2 + model.yb - model.zb + model.yi - model.zi + ) model.c3 = pmo.constraint(model.w2 >= 0) model.c4 = pmo.constraint(model.x2 <= 1) diff --git a/pyomo/solvers/tests/models/MILP_infeasible1.py b/pyomo/solvers/tests/models/MILP_infeasible1.py index 9b09cf4c586..2a0bf1bd188 100644 --- a/pyomo/solvers/tests/models/MILP_infeasible1.py +++ b/pyomo/solvers/tests/models/MILP_infeasible1.py @@ -14,6 +14,7 @@ from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class MILP_infeasible1(_BaseTestModel): """ @@ -26,7 +27,7 @@ class MILP_infeasible1(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.solve_should_fail = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -37,12 +38,12 @@ def _generate_model(self): model.y = Var(within=Binary) model.z = Var(within=Binary) - model.o = Objective(expr=-model.x-model.y-model.z) + model.o = Objective(expr=-model.x - model.y - model.z) - model.c1 = Constraint(expr=model.x+model.y <= 1) - model.c2 = Constraint(expr=model.x+model.z <= 1) - model.c3 = Constraint(expr=model.y+model.z <= 1) - model.c4 = Constraint(expr=model.x+model.y+model.z >= 1.5) + model.c1 = Constraint(expr=model.x + model.y <= 1) + model.c2 = Constraint(expr=model.x + model.z <= 1) + model.c3 = Constraint(expr=model.y + model.z <= 1) + model.c4 = Constraint(expr=model.x + model.y + model.z >= 1.5) def warmstart_model(self): assert self.model is not None @@ -53,15 +54,19 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] == \ - TerminationCondition.infeasible + assert ( + results['Solver'][0]['termination condition'] + == TerminationCondition.infeasible + ) else: - tester.assertEqual(results['Solver'][0]['termination condition'], - TerminationCondition.infeasible) + tester.assertEqual( + results['Solver'][0]['termination condition'], + TerminationCondition.infeasible, + ) + @register_model class MILP_infeasible1_kernel(MILP_infeasible1): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -71,9 +76,9 @@ def _generate_model(self): model.y = pmo.variable(domain=Binary) model.z = pmo.variable(domain=Binary) - model.o = pmo.objective(-model.x-model.y-model.z) + model.o = pmo.objective(-model.x - model.y - model.z) - model.c1 = pmo.constraint(model.x+model.y <= 1) - model.c2 = pmo.constraint(model.x+model.z <= 1) - model.c3 = pmo.constraint(model.y+model.z <= 1) - model.c4 = pmo.constraint(model.x+model.y+model.z >= 1.5) + model.c1 = pmo.constraint(model.x + model.y <= 1) + model.c2 = pmo.constraint(model.x + model.z <= 1) + model.c3 = pmo.constraint(model.y + model.z <= 1) + model.c4 = pmo.constraint(model.x + model.y + model.z >= 1.5) diff --git a/pyomo/solvers/tests/models/MILP_simple.py b/pyomo/solvers/tests/models/MILP_simple.py index 62e7f0fa5d4..fb157ea6555 100644 --- a/pyomo/solvers/tests/models/MILP_simple.py +++ b/pyomo/solvers/tests/models/MILP_simple.py @@ -10,9 +10,18 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, Binary, NonNegativeReals +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + Binary, + NonNegativeReals, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class MILP_simple(_BaseTestModel): """ @@ -25,7 +34,7 @@ class MILP_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -36,9 +45,9 @@ def _generate_model(self): model.x = Var(within=NonNegativeReals) model.y = Var(within=Binary) - model.obj = Objective(expr=model.x + 3.0*model.y) + model.obj = Objective(expr=model.x + 3.0 * model.y) model.c1 = Constraint(expr=model.a <= model.y) - model.c2 = Constraint(expr=(2.0, model.x/model.a - model.y, 10)) + model.c2 = Constraint(expr=(2.0, model.x / model.a - model.y, 10)) def warmstart_model(self): assert self.model is not None @@ -46,9 +55,9 @@ def warmstart_model(self): model.x.value = 0.1 model.y.value = 0 + @register_model class MILP_simple_kernel(MILP_simple): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -58,6 +67,6 @@ def _generate_model(self): model.x = pmo.variable(domain=NonNegativeReals) model.y = pmo.variable(domain=Binary) - model.obj = pmo.objective(model.x + 3.0*model.y) + model.obj = pmo.objective(model.x + 3.0 * model.y) model.c1 = pmo.constraint(model.a <= model.y) - model.c2 = pmo.constraint((2.0, model.x/model.a - model.y, 10)) + model.c2 = pmo.constraint((2.0, model.x / model.a - model.y, 10)) diff --git a/pyomo/solvers/tests/models/MILP_unbounded.py b/pyomo/solvers/tests/models/MILP_unbounded.py index 8acc548ab1e..364f3ffeb86 100644 --- a/pyomo/solvers/tests/models/MILP_unbounded.py +++ b/pyomo/solvers/tests/models/MILP_unbounded.py @@ -14,6 +14,7 @@ from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class MILP_unbounded(_BaseTestModel): """ @@ -26,7 +27,7 @@ class MILP_unbounded(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.solve_should_fail = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -36,7 +37,7 @@ def _generate_model(self): model.x = Var(within=Integers) model.y = Var(within=Integers) - model.o = Objective(expr=model.x+model.y) + model.o = Objective(expr=model.x + model.y) def warmstart_model(self): assert self.model is not None @@ -46,17 +47,22 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] in \ - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded) + assert results['Solver'][0]['termination condition'] in ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ) else: - tester.assertIn(results['Solver'][0]['termination condition'], - (TerminationCondition.unbounded, - TerminationCondition.infeasibleOrUnbounded)) + tester.assertIn( + results['Solver'][0]['termination condition'], + ( + TerminationCondition.unbounded, + TerminationCondition.infeasibleOrUnbounded, + ), + ) + @register_model class MILP_unbounded_kernel(MILP_unbounded): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -65,4 +71,4 @@ def _generate_model(self): model.x = pmo.variable(domain=pmo.IntegerSet) model.y = pmo.variable(domain=pmo.IntegerSet) - model.o = pmo.objective(model.x+model.y) + model.o = pmo.objective(model.x + model.y) diff --git a/pyomo/solvers/tests/models/MILP_unused_vars.json b/pyomo/solvers/tests/models/MILP_unused_vars.json index 3c8ff51a3dc..1eaa5855642 100644 --- a/pyomo/solvers/tests/models/MILP_unused_vars.json +++ b/pyomo/solvers/tests/models/MILP_unused_vars.json @@ -9,11 +9,11 @@ "stale": true, "value": null }, - "B[1].b.X_initialy_stale[1]": { + "B[1].b.X_initially_stale[1]": { "stale": true, "value": null }, - "B[1].b.X_initialy_stale[2]": { + "B[1].b.X_initially_stale[2]": { "stale": true, "value": null }, @@ -25,11 +25,11 @@ "stale": true, "value": null }, - "B[1].b.X_unused_initialy_stale[1]": { + "B[1].b.X_unused_initially_stale[1]": { "stale": true, "value": null }, - "B[1].b.X_unused_initialy_stale[2]": { + "B[1].b.X_unused_initially_stale[2]": { "stale": true, "value": null }, @@ -58,7 +58,7 @@ "stale": true, "value": null }, - "B[1].b.x_initialy_stale": { + "B[1].b.x_initially_stale": { "stale": true, "value": null }, @@ -66,7 +66,7 @@ "stale": true, "value": null }, - "B[1].b.x_unused_initialy_stale": { + "B[1].b.x_unused_initially_stale": { "stale": true, "value": null }, @@ -80,11 +80,11 @@ "stale": true, "value": null }, - "B[2].b.X_initialy_stale[1]": { + "B[2].b.X_initially_stale[1]": { "stale": true, "value": null }, - "B[2].b.X_initialy_stale[2]": { + "B[2].b.X_initially_stale[2]": { "stale": true, "value": null }, @@ -96,11 +96,11 @@ "stale": true, "value": null }, - "B[2].b.X_unused_initialy_stale[1]": { + "B[2].b.X_unused_initially_stale[1]": { "stale": true, "value": null }, - "B[2].b.X_unused_initialy_stale[2]": { + "B[2].b.X_unused_initially_stale[2]": { "stale": true, "value": null }, @@ -129,7 +129,7 @@ "stale": true, "value": null }, - "B[2].b.x_initialy_stale": { + "B[2].b.x_initially_stale": { "stale": true, "value": null }, @@ -137,7 +137,7 @@ "stale": true, "value": null }, - "B[2].b.x_unused_initialy_stale": { + "B[2].b.x_unused_initially_stale": { "stale": true, "value": null }, @@ -151,12 +151,12 @@ "stale": false, "value": 1.0 }, - "X_initialy_stale[1]": { + "X_initially_stale[1]": { "rc": 0.0, "stale": false, "value": 0.0 }, - "X_initialy_stale[2]": { + "X_initially_stale[2]": { "rc": 0.0, "stale": false, "value": 1.0 @@ -169,11 +169,11 @@ "stale": true, "value": -1.0 }, - "X_unused_initialy_stale[1]": { + "X_unused_initially_stale[1]": { "stale": true, "value": -1.0 }, - "X_unused_initialy_stale[2]": { + "X_unused_initially_stale[2]": { "stale": true, "value": -1.0 }, @@ -187,11 +187,11 @@ "stale": true, "value": null }, - "b.b.X_initialy_stale[1]": { + "b.b.X_initially_stale[1]": { "stale": true, "value": null }, - "b.b.X_initialy_stale[2]": { + "b.b.X_initially_stale[2]": { "stale": true, "value": null }, @@ -203,11 +203,11 @@ "stale": true, "value": null }, - "b.b.X_unused_initialy_stale[1]": { + "b.b.X_unused_initially_stale[1]": { "stale": true, "value": null }, - "b.b.X_unused_initialy_stale[2]": { + "b.b.X_unused_initially_stale[2]": { "stale": true, "value": null }, @@ -236,7 +236,7 @@ "stale": true, "value": null }, - "b.b.x_initialy_stale": { + "b.b.x_initially_stale": { "stale": true, "value": null }, @@ -244,7 +244,7 @@ "stale": true, "value": null }, - "b.b.x_unused_initialy_stale": { + "b.b.x_unused_initially_stale": { "stale": true, "value": null }, @@ -301,7 +301,7 @@ "stale": false, "value": 1.0 }, - "x_initialy_stale": { + "x_initially_stale": { "rc": 0.0, "stale": false, "value": 1.0 @@ -310,7 +310,7 @@ "stale": true, "value": -1.0 }, - "x_unused_initialy_stale": { + "x_unused_initially_stale": { "stale": true, "value": -1.0 } diff --git a/pyomo/solvers/tests/models/MILP_unused_vars.py b/pyomo/solvers/tests/models/MILP_unused_vars.py index 36b021224d5..742d0f951a8 100644 --- a/pyomo/solvers/tests/models/MILP_unused_vars.py +++ b/pyomo/solvers/tests/models/MILP_unused_vars.py @@ -10,9 +10,20 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Var, Objective, ConstraintList, Set, Integers, RangeSet, sum_product, Block +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + ConstraintList, + Set, + Integers, + RangeSet, + sum_product, + Block, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class MILP_unused_vars(_BaseTestModel): """ @@ -26,51 +37,53 @@ class MILP_unused_vars(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) self.disable_suffix_tests = True - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() model = self.model model._name = self.description - model.s = Set(initialize=[1,2]) + model.s = Set(initialize=[1, 2]) model.x_unused = Var(within=Integers) model.x_unused.stale = False - model.x_unused_initialy_stale = Var(within=Integers) - model.x_unused_initialy_stale.stale = True + model.x_unused_initially_stale = Var(within=Integers) + model.x_unused_initially_stale.stale = True model.X_unused = Var(model.s, within=Integers) - model.X_unused_initialy_stale = Var(model.s, within=Integers) + model.X_unused_initially_stale = Var(model.s, within=Integers) for i in model.s: model.X_unused[i].stale = False - model.X_unused_initialy_stale[i].stale = True + model.X_unused_initially_stale[i].stale = True - model.x = Var(within=RangeSet(None,None)) + model.x = Var(within=RangeSet(None, None)) model.x.stale = False - model.x_initialy_stale = Var(within=Integers) - model.x_initialy_stale.stale = True + model.x_initially_stale = Var(within=Integers) + model.x_initially_stale.stale = True model.X = Var(model.s, within=Integers) - model.X_initialy_stale = Var(model.s, within=Integers) + model.X_initially_stale = Var(model.s, within=Integers) for i in model.s: model.X[i].stale = False - model.X_initialy_stale[i].stale = True + model.X_initially_stale[i].stale = True - model.obj = Objective(expr= model.x + \ - model.x_initialy_stale + \ - sum_product(model.X) + \ - sum_product(model.X_initialy_stale)) + model.obj = Objective( + expr=model.x + + model.x_initially_stale + + sum_product(model.X) + + sum_product(model.X_initially_stale) + ) model.c = ConstraintList() - model.c.add( model.x >= 1 ) - model.c.add( model.x_initialy_stale >= 1 ) - model.c.add( model.X[1] >= 0 ) - model.c.add( model.X[2] >= 1 ) - model.c.add( model.X_initialy_stale[1] >= 0 ) - model.c.add( model.X_initialy_stale[2] >= 1 ) + model.c.add(model.x >= 1) + model.c.add(model.x_initially_stale >= 1) + model.c.add(model.X[1] >= 0) + model.c.add(model.X[2] >= 1) + model.c.add(model.X_initially_stale[1] >= 0) + model.c.add(model.X_initially_stale[2] >= 1) # Test that stale flags get set # on inactive blocks (where "inactive blocks" mean blocks @@ -99,71 +112,77 @@ def warmstart_model(self): assert self.model is not None model = self.model model.x_unused.value = -1 - model.x_unused_initialy_stale.value = -1 - model.x_unused_initialy_stale.stale = True + model.x_unused_initially_stale.value = -1 + model.x_unused_initially_stale.stale = True for i in model.s: model.X_unused[i].value = -1 - model.X_unused_initialy_stale[i].value = -1 - model.X_unused_initialy_stale[i].stale = True + model.X_unused_initially_stale[i].value = -1 + model.X_unused_initially_stale[i].stale = True model.x.value = -1 - model.x_initialy_stale.value = -1 - model.x_initialy_stale.stale = True + model.x_initially_stale.value = -1 + model.x_initially_stale.stale = True for i in model.s: model.X[i].value = -1 - model.X_initialy_stale[i].value = -1 - model.X_initialy_stale[i].stale = True + model.X_initially_stale[i].value = -1 + model.X_initially_stale[i].stale = True + @register_model class MILP_unused_vars_kernel(MILP_unused_vars): - def _generate_model(self): self.model = pmo.block() model = self.model model._name = self.description - model.s = [1,2] + model.s = [1, 2] model.x_unused = pmo.variable(domain=pmo.IntegerSet) model.x_unused.stale = False - model.x_unused_initialy_stale = pmo.variable(domain=pmo.IntegerSet) - model.x_unused_initialy_stale.stale = True + model.x_unused_initially_stale = pmo.variable(domain=pmo.IntegerSet) + model.x_unused_initially_stale.stale = True model.X_unused = pmo.variable_dict( - (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s) - model.X_unused_initialy_stale = pmo.variable_dict( - (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s) + (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s + ) + model.X_unused_initially_stale = pmo.variable_dict( + (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s + ) for i in model.s: model.X_unused[i].stale = False - model.X_unused_initialy_stale[i].stale = True + model.X_unused_initially_stale[i].stale = True - model.x = pmo.variable(domain=RangeSet(None,None)) + model.x = pmo.variable(domain=RangeSet(None, None)) model.x.stale = False - model.x_initialy_stale = pmo.variable(domain=pmo.IntegerSet) - model.x_initialy_stale.stale = True + model.x_initially_stale = pmo.variable(domain=pmo.IntegerSet) + model.x_initially_stale.stale = True model.X = pmo.variable_dict( - (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s) - model.X_initialy_stale = pmo.variable_dict( - (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s) + (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s + ) + model.X_initially_stale = pmo.variable_dict( + (i, pmo.variable(domain=pmo.IntegerSet)) for i in model.s + ) for i in model.s: model.X[i].stale = False - model.X_initialy_stale[i].stale = True + model.X_initially_stale[i].stale = True - model.obj = pmo.objective(model.x + \ - model.x_initialy_stale + \ - sum(model.X.values()) + \ - sum(model.X_initialy_stale.values())) + model.obj = pmo.objective( + model.x + + model.x_initially_stale + + sum(model.X.values()) + + sum(model.X_initially_stale.values()) + ) model.c = pmo.constraint_dict() - model.c[1] = pmo.constraint(model.x >= 1) - model.c[2] = pmo.constraint(model.x_initialy_stale >= 1) - model.c[3] = pmo.constraint(model.X[1] >= 0) - model.c[4] = pmo.constraint(model.X[2] >= 1) - model.c[5] = pmo.constraint(model.X_initialy_stale[1] >= 0) - model.c[6] = pmo.constraint(model.X_initialy_stale[2] >= 1) + model.c[1] = pmo.constraint(model.x >= 1) + model.c[2] = pmo.constraint(model.x_initially_stale >= 1) + model.c[3] = pmo.constraint(model.X[1] >= 0) + model.c[4] = pmo.constraint(model.X[2] >= 1) + model.c[5] = pmo.constraint(model.X_initially_stale[1] >= 0) + model.c[6] = pmo.constraint(model.X_initially_stale[2] >= 1) # Test that stale flags do not get updated # on inactive blocks (where "inactive blocks" mean blocks diff --git a/pyomo/solvers/tests/models/MIQCP_simple.py b/pyomo/solvers/tests/models/MIQCP_simple.py index 3c08db40823..46c1293b23c 100644 --- a/pyomo/solvers/tests/models/MIQCP_simple.py +++ b/pyomo/solvers/tests/models/MIQCP_simple.py @@ -13,6 +13,7 @@ from pyomo.core import ConcreteModel, Var, Objective, Constraint, Binary, maximize from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class MIQCP_simple(_BaseTestModel): """ @@ -25,7 +26,7 @@ class MIQCP_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -36,10 +37,10 @@ def _generate_model(self): model.y = Var(within=Binary) model.z = Var(within=Binary) - model.obj = Objective(expr=model.x,sense=maximize) - model.c0 = Constraint(expr=model.x+model.y+model.z == 1) + model.obj = Objective(expr=model.x, sense=maximize) + model.c0 = Constraint(expr=model.x + model.y + model.z == 1) model.qc0 = Constraint(expr=model.x**2 + model.y**2 <= model.z**2) - model.qc1 = Constraint(expr=model.x**2 <= model.y*model.z) + model.qc1 = Constraint(expr=model.x**2 <= model.y * model.z) def warmstart_model(self): assert self.model is not None @@ -48,9 +49,9 @@ def warmstart_model(self): model.y.value = None model.z.value = None + @register_model class MIQCP_simple_kernel(MIQCP_simple): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -60,7 +61,7 @@ def _generate_model(self): model.y = pmo.variable(domain=Binary) model.z = pmo.variable(domain=Binary) - model.obj = pmo.objective(model.x,sense=maximize) - model.c0 = pmo.constraint(model.x+model.y+model.z == 1) + model.obj = pmo.objective(model.x, sense=maximize) + model.c0 = pmo.constraint(model.x + model.y + model.z == 1) model.qc0 = pmo.constraint(model.x**2 + model.y**2 <= model.z**2) - model.qc1 = pmo.constraint(model.x**2 <= model.y*model.z) + model.qc1 = pmo.constraint(model.x**2 <= model.y * model.z) diff --git a/pyomo/solvers/tests/models/MIQP_simple.py b/pyomo/solvers/tests/models/MIQP_simple.py index 8001c5c056d..1d43d96ab8b 100644 --- a/pyomo/solvers/tests/models/MIQP_simple.py +++ b/pyomo/solvers/tests/models/MIQP_simple.py @@ -10,10 +10,19 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, NonNegativeReals, Binary +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + NonNegativeReals, + Binary, +) from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class MIQP_simple(_BaseTestModel): """ @@ -26,7 +35,7 @@ class MIQP_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -37,9 +46,9 @@ def _generate_model(self): model.x = Var(within=NonNegativeReals) model.y = Var(within=Binary) - model.obj = Objective(expr=model.x**2 + 3.0*model.y**2) + model.obj = Objective(expr=model.x**2 + 3.0 * model.y**2) model.c1 = Constraint(expr=model.a <= model.y) - model.c2 = Constraint(expr=(2.0, model.x/model.a - model.y, 10)) + model.c2 = Constraint(expr=(2.0, model.x / model.a - model.y, 10)) def warmstart_model(self): assert self.model is not None @@ -49,17 +58,19 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] in \ - (TerminationCondition.optimal, - TerminationCondition.locallyOptimal) + assert results['Solver'][0]['termination condition'] in ( + TerminationCondition.optimal, + TerminationCondition.locallyOptimal, + ) else: - tester.assertIn(results['Solver'][0]['termination condition'], - (TerminationCondition.optimal, - TerminationCondition.locallyOptimal)) + tester.assertIn( + results['Solver'][0]['termination condition'], + (TerminationCondition.optimal, TerminationCondition.locallyOptimal), + ) + @register_model class MIQP_simple_kernel(MIQP_simple): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -69,6 +80,6 @@ def _generate_model(self): model.x = pmo.variable(domain=NonNegativeReals) model.y = pmo.variable(domain=Binary) - model.obj = pmo.objective(model.x**2 + 3.0*model.y**2) + model.obj = pmo.objective(model.x**2 + 3.0 * model.y**2) model.c1 = pmo.constraint(model.a <= model.y) - model.c2 = pmo.constraint((2.0, model.x/model.a - model.y, 10)) + model.c2 = pmo.constraint((2.0, model.x / model.a - model.y, 10)) diff --git a/pyomo/solvers/tests/models/QCP_simple.py b/pyomo/solvers/tests/models/QCP_simple.py index ef7d3569406..5f8405f1f00 100644 --- a/pyomo/solvers/tests/models/QCP_simple.py +++ b/pyomo/solvers/tests/models/QCP_simple.py @@ -10,10 +10,19 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Var, Objective, Constraint, NonNegativeReals, maximize, ConstraintList +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Constraint, + NonNegativeReals, + maximize, + ConstraintList, +) from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class QCP_simple(_BaseTestModel): """ @@ -26,7 +35,7 @@ class QCP_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -39,11 +48,13 @@ def _generate_model(self): model.fixed_var = Var() model.fixed_var.fix(0.2) model.q1 = Var(bounds=(None, 0.2)) - model.q2 = Var(bounds=(-2,None)) - model.obj = Objective(expr=model.x+model.q1-model.q2,sense=maximize) - model.c0 = Constraint(expr=model.x+model.y+model.z == 1) - model.qc0 = Constraint(expr=model.x**2 + model.y**2 + model.fixed_var <= model.z**2) - model.qc1 = Constraint(expr=model.x**2 <= model.y*model.z) + model.q2 = Var(bounds=(-2, None)) + model.obj = Objective(expr=model.x + model.q1 - model.q2, sense=maximize) + model.c0 = Constraint(expr=model.x + model.y + model.z == 1) + model.qc0 = Constraint( + expr=model.x**2 + model.y**2 + model.fixed_var <= model.z**2 + ) + model.qc1 = Constraint(expr=model.x**2 <= model.y * model.z) model.c = ConstraintList() model.c.add((0, -model.q1**2 + model.fixed_var, None)) model.c.add((None, model.q2**2 + model.fixed_var, 5)) @@ -57,17 +68,19 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] in \ - (TerminationCondition.optimal, - TerminationCondition.locallyOptimal) + assert results['Solver'][0]['termination condition'] in ( + TerminationCondition.optimal, + TerminationCondition.locallyOptimal, + ) else: - tester.assertIn(results['Solver'][0]['termination condition'], - (TerminationCondition.optimal, - TerminationCondition.locallyOptimal)) + tester.assertIn( + results['Solver'][0]['termination condition'], + (TerminationCondition.optimal, TerminationCondition.locallyOptimal), + ) + @register_model class QCP_simple_nosuffixes(QCP_simple): - description = "QCP_simple_nosuffixes" test_pickling = False @@ -76,9 +89,9 @@ def __init__(self): self.disable_suffix_tests = True self.add_results("QCP_simple.json") + @register_model class QCP_simple_kernel(QCP_simple): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -91,17 +104,19 @@ def _generate_model(self): model.fixed_var.fix(0.2) model.q1 = pmo.variable(ub=0.2) model.q2 = pmo.variable(lb=-2) - model.obj = pmo.objective(model.x+model.q1-model.q2,sense=maximize) - model.c0 = pmo.constraint(model.x+model.y+model.z == 1) - model.qc0 = pmo.constraint(model.x**2 + model.y**2 + model.fixed_var <= model.z**2) - model.qc1 = pmo.constraint(model.x**2 <= model.y*model.z) + model.obj = pmo.objective(model.x + model.q1 - model.q2, sense=maximize) + model.c0 = pmo.constraint(model.x + model.y + model.z == 1) + model.qc0 = pmo.constraint( + model.x**2 + model.y**2 + model.fixed_var <= model.z**2 + ) + model.qc1 = pmo.constraint(model.x**2 <= model.y * model.z) model.c = pmo.constraint_dict() model.c[1] = pmo.constraint(lb=0, body=-model.q1**2 + model.fixed_var) model.c[2] = pmo.constraint(body=model.q2**2 + model.fixed_var, ub=5) + @register_model class QCP_simple_nosuffixes_kernel(QCP_simple_kernel): - description = "QCP_simple_nosuffixes" test_pickling = False diff --git a/pyomo/solvers/tests/models/QP_constant_objective.py b/pyomo/solvers/tests/models/QP_constant_objective.py index 0a87f6e7403..2769fe07556 100644 --- a/pyomo/solvers/tests/models/QP_constant_objective.py +++ b/pyomo/solvers/tests/models/QP_constant_objective.py @@ -16,6 +16,7 @@ # linear objectives IF we could get some proper preprocessing # in place for the canonical_repn + @register_model class QP_constant_objective(_BaseTestModel): """ @@ -28,7 +29,7 @@ class QP_constant_objective(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -36,7 +37,7 @@ def _generate_model(self): model._name = self.description model.x = Var(within=NonNegativeReals) - model.obj = Objective(expr=model.x**2-model.x**2) + model.obj = Objective(expr=model.x**2 - model.x**2) model.con = Constraint(expr=model.x == 1.0) def warmstart_model(self): @@ -44,14 +45,14 @@ def warmstart_model(self): model = self.model model.x.value = 1.0 + @register_model class QP_constant_objective_kernel(QP_constant_objective): - def _generate_model(self): self.model = ConcreteModel() model = self.model model._name = self.description model.x = Var(within=NonNegativeReals) - model.obj = Objective(expr=model.x**2-model.x**2) + model.obj = Objective(expr=model.x**2 - model.x**2) model.con = Constraint(expr=model.x == 1.0) diff --git a/pyomo/solvers/tests/models/QP_simple.py b/pyomo/solvers/tests/models/QP_simple.py index ec9d6cc1a77..5959cf1d8b1 100644 --- a/pyomo/solvers/tests/models/QP_simple.py +++ b/pyomo/solvers/tests/models/QP_simple.py @@ -10,10 +10,18 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, NonNegativeReals +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + NonNegativeReals, +) from pyomo.opt import TerminationCondition from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class QP_simple(_BaseTestModel): """ @@ -26,7 +34,7 @@ class QP_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = None @@ -40,9 +48,9 @@ def _generate_model(self): model.inactive_obj = Objective(expr=model.y) model.inactive_obj.deactivate() - model.obj = Objective(expr=model.x**2 + 3.0*model.inactive_obj**2 + 1.0) + model.obj = Objective(expr=model.x**2 + 3.0 * model.inactive_obj**2 + 1.0) model.c1 = Constraint(expr=model.a <= model.y) - model.c2 = Constraint(expr=(2.0, model.x/model.a - model.y, 10)) + model.c2 = Constraint(expr=(2.0, model.x / model.a - model.y, 10)) def warmstart_model(self): assert self.model is not None @@ -52,17 +60,19 @@ def warmstart_model(self): def post_solve_test_validation(self, tester, results): if tester is None: - assert results['Solver'][0]['termination condition'] in \ - (TerminationCondition.optimal, - TerminationCondition.locallyOptimal) + assert results['Solver'][0]['termination condition'] in ( + TerminationCondition.optimal, + TerminationCondition.locallyOptimal, + ) else: - tester.assertIn(results['Solver'][0]['termination condition'], - (TerminationCondition.optimal, - TerminationCondition.locallyOptimal)) + tester.assertIn( + results['Solver'][0]['termination condition'], + (TerminationCondition.optimal, TerminationCondition.locallyOptimal), + ) + @register_model class QP_simple_nosuffixes(QP_simple): - description = "QP_simple_nosuffixes" test_pickling = False @@ -71,9 +81,9 @@ def __init__(self): self.disable_suffix_tests = True self.add_results("QP_simple.json") + @register_model class QP_simple_kernel(QP_simple): - def _generate_model(self): self.model = None self.model = pmo.block() @@ -86,13 +96,13 @@ def _generate_model(self): model.inactive_obj = pmo.objective(model.y) model.inactive_obj.deactivate() - model.obj = pmo.objective(model.x**2 + 3.0*model.inactive_obj**2 + 1.0) + model.obj = pmo.objective(model.x**2 + 3.0 * model.inactive_obj**2 + 1.0) model.c1 = pmo.constraint(model.a <= model.y) - model.c2 = pmo.constraint((2.0, model.x/model.a - model.y, 10)) + model.c2 = pmo.constraint((2.0, model.x / model.a - model.y, 10)) + @register_model class QP_simple_nosuffixes_kernel(QP_simple_kernel): - description = "QP_simple_nosuffixes" test_pickling = False diff --git a/pyomo/solvers/tests/models/SOS1_simple.py b/pyomo/solvers/tests/models/SOS1_simple.py index dc40e0e2425..e6156ad5c32 100644 --- a/pyomo/solvers/tests/models/SOS1_simple.py +++ b/pyomo/solvers/tests/models/SOS1_simple.py @@ -10,9 +10,19 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, NonNegativeReals, SOSConstraint, sum_product +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + NonNegativeReals, + SOSConstraint, + sum_product, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class SOS1_simple(_BaseTestModel): """ @@ -25,7 +35,7 @@ class SOS1_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -34,17 +44,17 @@ def _generate_model(self): model.a = Param(initialize=0.1) model.x = Var(within=NonNegativeReals) - model.y = Var([1,2],within=NonNegativeReals) + model.y = Var([1, 2], within=NonNegativeReals) - model.obj = Objective(expr=model.x + model.y[1]+2*model.y[2]) + model.obj = Objective(expr=model.x + model.y[1] + 2 * model.y[2]) model.c1 = Constraint(expr=model.a <= model.y[2]) model.c2 = Constraint(expr=(2.0, model.x, 10.0)) - model.c3 = SOSConstraint(var=model.y, index=[1,2], sos=1) + model.c3 = SOSConstraint(var=model.y, index=[1, 2], sos=1) model.c4 = Constraint(expr=sum_product(model.y) == 1) # Make an empty SOSConstraint - model.c5 = SOSConstraint(var=model.y, index=[1,2], sos=1) - model.c5.set_items([],[]) + model.c5 = SOSConstraint(var=model.y, index=[1, 2], sos=1) + model.c5.set_items([], []) assert len(list(model.c5.get_items())) == 0 def warmstart_model(self): @@ -54,9 +64,9 @@ def warmstart_model(self): model.y[1].value = 1 model.y[2].value = None + @register_model class SOS1_simple_kernel(SOS1_simple): - def _generate_model(self): self.model = pmo.block() model = self.model @@ -68,7 +78,7 @@ def _generate_model(self): model.y[1] = pmo.variable(domain=NonNegativeReals) model.y[2] = pmo.variable(domain=NonNegativeReals) - model.obj = pmo.objective(model.x + model.y[1]+2*model.y[2]) + model.obj = pmo.objective(model.x + model.y[1] + 2 * model.y[2]) model.c1 = pmo.constraint(model.a <= model.y[2]) model.c2 = pmo.constraint((2.0, model.x, 10.0)) model.c3 = pmo.sos1(model.y.values()) diff --git a/pyomo/solvers/tests/models/SOS2_simple.py b/pyomo/solvers/tests/models/SOS2_simple.py index c077e2b251d..4f192773ca4 100644 --- a/pyomo/solvers/tests/models/SOS2_simple.py +++ b/pyomo/solvers/tests/models/SOS2_simple.py @@ -10,9 +10,20 @@ # ___________________________________________________________________________ import pyomo.kernel as pmo -from pyomo.core import ConcreteModel, Param, Var, Objective, Constraint, SOSConstraint, NonNegativeReals, ConstraintList, sum_product +from pyomo.core import ( + ConcreteModel, + Param, + Var, + Objective, + Constraint, + SOSConstraint, + NonNegativeReals, + ConstraintList, + sum_product, +) from pyomo.solvers.tests.models.base import _BaseTestModel, register_model + @register_model class SOS2_simple(_BaseTestModel): """ @@ -25,7 +36,7 @@ class SOS2_simple(_BaseTestModel): def __init__(self): _BaseTestModel.__init__(self) - self.add_results(self.description+".json") + self.add_results(self.description + ".json") def _generate_model(self): self.model = ConcreteModel() @@ -33,56 +44,56 @@ def _generate_model(self): model._name = self.description model.f = Var() - model.x = Var(bounds=(1,3)) - model.fi = Param([1,2,3],mutable=True) + model.x = Var(bounds=(1, 3)) + model.fi = Param([1, 2, 3], mutable=True) model.fi[1] = 1.0 model.fi[2] = 2.0 model.fi[3] = 0.0 - model.xi = Param([1,2,3],mutable=True) + model.xi = Param([1, 2, 3], mutable=True) model.xi[1] = 1.0 model.xi[2] = 2.0 model.xi[3] = 3.0 model.p = Var(within=NonNegativeReals) model.n = Var(within=NonNegativeReals) - model.lmbda = Var([1,2,3]) - model.obj = Objective(expr=model.p+model.n) + model.lmbda = Var([1, 2, 3]) + model.obj = Objective(expr=model.p + model.n) model.c1 = ConstraintList() model.c1.add((0.0, model.lmbda[1], 1.0)) model.c1.add((0.0, model.lmbda[2], 1.0)) model.c1.add(0.0 <= model.lmbda[3]) - model.c2 = SOSConstraint(var=model.lmbda, index=[1,2,3], sos=2) + model.c2 = SOSConstraint(var=model.lmbda, index=[1, 2, 3], sos=2) model.c3 = Constraint(expr=sum_product(model.lmbda) == 1) - model.c4 = Constraint(expr=model.f==sum_product(model.fi,model.lmbda)) - model.c5 = Constraint(expr=model.x==sum_product(model.xi,model.lmbda)) + model.c4 = Constraint(expr=model.f == sum_product(model.fi, model.lmbda)) + model.c5 = Constraint(expr=model.x == sum_product(model.xi, model.lmbda)) model.x = 2.75 model.x.fixed = True # Make an empty SOSConstraint - model.c6 = SOSConstraint(var=model.lmbda, index=[1,2,3], sos=2) - model.c6.set_items([],[]) + model.c6 = SOSConstraint(var=model.lmbda, index=[1, 2, 3], sos=2) + model.c6.set_items([], []) assert len(list(model.c6.get_items())) == 0 def warmstart_model(self): assert self.model is not None model = self.model model.f.value = 0 - assert model.x.value == 2.75 # Fixed + assert model.x.value == 2.75 # Fixed model.p.value = 1 model.n.value = 0 model.lmbda[1].value = None model.lmbda[2].value = None model.lmbda[3].value = 1 + @register_model class SOS2_simple_kernel(SOS2_simple): - def _generate_model(self): self.model = pmo.block() model = self.model model._name = self.description model.f = pmo.variable() - model.x = pmo.variable(lb=1,ub=3) + model.x = pmo.variable(lb=1, ub=3) model.fi = pmo.parameter_dict() model.fi[1] = pmo.parameter(value=1.0) model.fi[2] = pmo.parameter(value=2.0) @@ -93,19 +104,20 @@ def _generate_model(self): model.xi[3] = pmo.parameter(value=3.0) model.p = pmo.variable(domain=NonNegativeReals) model.n = pmo.variable(domain=NonNegativeReals) - model.lmbda = pmo.variable_dict( - (i, pmo.variable()) for i in range(1,4)) - model.obj = pmo.objective(model.p+model.n) + model.lmbda = pmo.variable_dict((i, pmo.variable()) for i in range(1, 4)) + model.obj = pmo.objective(model.p + model.n) model.c1 = pmo.constraint_dict() model.c1[1] = pmo.constraint((0.0, model.lmbda[1], 1.0)) model.c1[2] = pmo.constraint((0.0, model.lmbda[2], 1.0)) model.c1[3] = pmo.constraint(0.0 <= model.lmbda[3]) model.c2 = pmo.sos2(model.lmbda.values()) model.c3 = pmo.constraint(sum(model.lmbda.values()) == 1) - model.c4 = pmo.constraint(model.f==sum(model.fi[i]*model.lmbda[i] - for i in model.lmbda)) - model.c5 = pmo.constraint(model.x==sum(model.xi[i]*model.lmbda[i] - for i in model.lmbda)) + model.c4 = pmo.constraint( + model.f == sum(model.fi[i] * model.lmbda[i] for i in model.lmbda) + ) + model.c5 = pmo.constraint( + model.x == sum(model.xi[i] * model.lmbda[i] for i in model.lmbda) + ) model.x.fix(2.75) # Make an empty SOS constraint diff --git a/pyomo/solvers/tests/models/__init__.py b/pyomo/solvers/tests/models/__init__.py index 157b7512887..c6a550397d5 100644 --- a/pyomo/solvers/tests/models/__init__.py +++ b/pyomo/solvers/tests/models/__init__.py @@ -25,8 +25,9 @@ import pyomo.solvers.tests.models.LP_trivial_constraints import pyomo.solvers.tests.models.LP_unbounded import pyomo.solvers.tests.models.LP_unused_vars + # WEH - Omitting this for because it's not reliably solved by ipopt -#import pyomo.solvers.tests.models.LP_unique_duals +# import pyomo.solvers.tests.models.LP_unique_duals import pyomo.solvers.tests.models.MILP_discrete_var_bounds import pyomo.solvers.tests.models.MILP_infeasible1 diff --git a/pyomo/solvers/tests/models/base.py b/pyomo/solvers/tests/models/base.py index d7115b378af..5adfae8c729 100644 --- a/pyomo/solvers/tests/models/base.py +++ b/pyomo/solvers/tests/models/base.py @@ -20,7 +20,7 @@ from pyomo.opt import ProblemFormat, SolverFactory, TerminationCondition from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver -thisDir = dirname(abspath( __file__ )) +thisDir = dirname(abspath(__file__)) _test_models = {} @@ -36,7 +36,7 @@ def all_models(arg=None): def register_model(cls): - """ Decorator for test model classes """ + """Decorator for test model classes""" global _test_models assert cls.__name__ not in _test_models _test_models[cls.__name__] = cls @@ -62,15 +62,14 @@ def __init__(self): self.solve_should_fail = False def add_results(self, filename): - """ Add results file """ + """Add results file""" self.results_file = join(thisDir, filename) def generate_model(self, import_suffixes=[]): - """ Generate the model """ + """Generate the model""" self._generate_model() # Add suffixes - self.test_suffixes = [] if self.disable_suffix_tests else \ - import_suffixes + self.test_suffixes = [] if self.disable_suffix_tests else import_suffixes if isinstance(self.model, IBlock): for suffix in self.test_suffixes: setattr(self.model, suffix, pmo.suffix(direction=pmo.suffix.IMPORT)) @@ -78,14 +77,10 @@ def generate_model(self, import_suffixes=[]): for suffix in self.test_suffixes: setattr(self.model, suffix, Suffix(direction=Suffix.IMPORT)) - def solve(self, - solver, - io, - io_options, - solver_options, - symbolic_labels, - load_solutions): - """ Optimize the model """ + def solve( + self, solver, io, io_options, solver_options, symbolic_labels, load_solutions + ): + """Optimize the model""" assert self.model is not None if not io_options: @@ -102,7 +97,7 @@ def solve(self, assert opt.problem_format() == ProblemFormat.cpxlp elif io == 'mps': assert opt.problem_format() == ProblemFormat.mps - #elif io == 'python': + # elif io == 'python': # print opt.problem_format() # assert opt.problem_format() is None @@ -110,12 +105,11 @@ def solve(self, if isinstance(opt, PersistentSolver): opt.set_instance(self.model, symbolic_solver_labels=symbolic_labels) if opt.warm_start_capable(): - results = opt.solve(warmstart=True, - load_solutions=load_solutions, - **io_options) + results = opt.solve( + warmstart=True, load_solutions=load_solutions, **io_options + ) else: - results = opt.solve(load_solutions=load_solutions, - **io_options) + results = opt.solve(load_solutions=load_solutions, **io_options) else: if opt.warm_start_capable(): results = opt.solve( @@ -123,36 +117,39 @@ def solve(self, symbolic_solver_labels=symbolic_labels, warmstart=True, load_solutions=load_solutions, - **io_options) + **io_options + ) else: results = opt.solve( self.model, symbolic_solver_labels=symbolic_labels, load_solutions=load_solutions, - **io_options) + **io_options + ) return opt, results finally: pass - #opt.deactivate() + # opt.deactivate() del opt return None, None def save_current_solution(self, filename, **kwds): - """ Save the solution in a specified file name """ + """Save the solution in a specified file name""" assert self.model is not None model = self.model - suffixes = dict((suffix, getattr(model,suffix)) - for suffix in kwds.pop('suffixes',[])) + suffixes = dict( + (suffix, getattr(model, suffix)) for suffix in kwds.pop('suffixes', []) + ) for suf in suffixes.values(): if isinstance(self.model, IBlock): - assert isinstance(suf,pmo.suffix) + assert isinstance(suf, pmo.suffix) assert suf.import_enabled else: - assert isinstance(suf,Suffix) + assert isinstance(suf, Suffix) assert suf.import_enabled() - with open(filename,'w') as f: + with open(filename, 'w') as f: # # Collect Block, Variable, Constraint, Objective and Suffix data # @@ -195,231 +192,287 @@ def validate_current_solution(self, **kwds): assert self.model is not None assert self.results_file is not None model = self.model - suffixes = dict((suffix, getattr(model,suffix)) - for suffix in kwds.pop('suffixes',[])) - exclude = kwds.pop('exclude_suffixes',set()) + suffixes = dict( + (suffix, getattr(model, suffix)) for suffix in kwds.pop('suffixes', []) + ) + exclude = kwds.pop('exclude_suffixes', set()) for suf in suffixes.values(): if isinstance(self.model, IBlock): - assert isinstance(suf,pmo.suffix) + assert isinstance(suf, pmo.suffix) assert suf.import_enabled else: - assert isinstance(suf,Suffix) + assert isinstance(suf, Suffix) assert suf.import_enabled() solution = None - error_str = ("Difference in solution for {0}.{1}:\n\tBaseline " - "- {2}\n\tCurrent - {3}") + error_str = ( + "Difference in solution for {0}.{1}:\n\tBaseline - {2}\n\tCurrent - {3}" + ) - with open(self.results_file,'r') as f: + with open(self.results_file, 'r') as f: try: solution = json.load(f) except: - return (False,"Problem reading file "+self.results_file) + return (False, "Problem reading file " + self.results_file) for var in model.component_data_objects(Var): var_value_sol = solution[var.name]['value'] var_value = var.value if not ((var_value is None) and (var_value_sol is None)): - if ((var_value is None) ^ (var_value_sol is None)) or \ - (abs(var_value_sol - var_value) > self.diff_tol): - return (False, - error_str.format(var.name, - 'value', - var_value_sol, - var_value)) + if ((var_value is None) ^ (var_value_sol is None)) or ( + abs(var_value_sol - var_value) > self.diff_tol + ): + return ( + False, + error_str.format(var.name, 'value', var_value_sol, var_value), + ) if not (solution[var.name]['stale'] is var.stale): - return (False, - error_str.format(var.name, - 'stale', - solution[var.name]['stale'], - var.stale)) + return ( + False, + error_str.format( + var.name, 'stale', solution[var.name]['stale'], var.stale + ), + ) for suffix_name, suffix in suffixes.items(): _ex = exclude.get(suffix_name, None) if suffix_name in solution[var.name]: if suffix.get(var) is None: - if _ex is not None and ( - not _ex[1] or var.name in _ex[1] ): + if _ex is not None and (not _ex[1] or var.name in _ex[1]): continue - if not(solution[var.name][suffix_name] in \ - solution["suffix defaults"][suffix_name]): - return (False, - error_str.format( - var.name, - suffix, - solution[var.name][suffix_name], - "none defined")) - elif _ex is not None and _ex[0] and ( - not _ex[1] or var.name in _ex[1] ): - return ( - False, - "Expected solution to be missing suffix %s" - % suffix_name) - elif not abs(solution[var.name][suffix_name] - \ - suffix.get(var)) < self.diff_tol: - return (False, + if not ( + solution[var.name][suffix_name] + in solution["suffix defaults"][suffix_name] + ): + return ( + False, error_str.format( var.name, suffix, solution[var.name][suffix_name], - suffix.get(var))) + "none defined", + ), + ) + elif ( + _ex is not None + and _ex[0] + and (not _ex[1] or var.name in _ex[1]) + ): + return ( + False, + "Expected solution to be missing suffix %s" % suffix_name, + ) + elif ( + not abs(solution[var.name][suffix_name] - suffix.get(var)) + < self.diff_tol + ): + return ( + False, + error_str.format( + var.name, + suffix, + solution[var.name][suffix_name], + suffix.get(var), + ), + ) for con in model.component_data_objects(Constraint): con_value_sol = solution[con.name]['value'] con_value = con(exception=False) if not ((con_value is None) and (con_value_sol is None)): - if ((con_value is None) ^ (con_value_sol is None)) or \ - (abs(con_value_sol - con_value) > self.diff_tol): - return (False, - error_str.format(con.name, - 'value', - con_value_sol, - con_value)) + if ((con_value is None) ^ (con_value_sol is None)) or ( + abs(con_value_sol - con_value) > self.diff_tol + ): + return ( + False, + error_str.format(con.name, 'value', con_value_sol, con_value), + ) for suffix_name, suffix in suffixes.items(): _ex = exclude.get(suffix_name, None) if suffix_name in solution[con.name]: if suffix.get(con) is None: - if _ex is not None and ( - not _ex[1] or con.name in _ex[1] ): + if _ex is not None and (not _ex[1] or con.name in _ex[1]): continue - if not (solution[con.name][suffix_name] in \ - solution["suffix defaults"][suffix_name]): - return (False, - error_str.format( - con.name, - suffix, - solution[con.name][suffix_name], - "none defined")) - elif _ex is not None and _ex[0] and ( - not _ex[1] or con.name in _ex[1] ): - return ( - False, - "Expected solution to be missing suffix %s" - % suffix_name) - elif not abs(solution[con.name][suffix_name] - \ - suffix.get(con)) < self.diff_tol: - return (False, + if not ( + solution[con.name][suffix_name] + in solution["suffix defaults"][suffix_name] + ): + return ( + False, error_str.format( con.name, suffix, solution[con.name][suffix_name], - suffix.get(con))) + "none defined", + ), + ) + elif ( + _ex is not None + and _ex[0] + and (not _ex[1] or con.name in _ex[1]) + ): + return ( + False, + "Expected solution to be missing suffix %s" % suffix_name, + ) + elif ( + not abs(solution[con.name][suffix_name] - suffix.get(con)) + < self.diff_tol + ): + return ( + False, + error_str.format( + con.name, + suffix, + solution[con.name][suffix_name], + suffix.get(con), + ), + ) for obj in model.component_data_objects(Objective): obj_value_sol = solution[obj.name]['value'] obj_value = obj(exception=False) if not ((obj_value is None) and (obj_value_sol is None)): - if ((obj_value is None) ^ (obj_value_sol is None)) or \ - (abs(obj_value_sol - obj_value) > self.diff_tol): - return (False, - error_str.format(obj.name, - 'value', - obj_value_sol, - obj_value)) + if ((obj_value is None) ^ (obj_value_sol is None)) or ( + abs(obj_value_sol - obj_value) > self.diff_tol + ): + return ( + False, + error_str.format(obj.name, 'value', obj_value_sol, obj_value), + ) for suffix_name, suffix in suffixes.items(): _ex = exclude.get(suffix_name, None) if suffix_name in solution[obj.name]: if suffix.get(obj) is None: - if _ex is not None and ( - not _ex[1] or obj.name in _ex[1] ): + if _ex is not None and (not _ex[1] or obj.name in _ex[1]): continue - if not(solution[obj.name][suffix_name] in \ - solution["suffix defaults"][suffix_name]): - return (False, - error_str.format( - obj.name, - suffix, - solution[obj.name][suffix_name], - "none defined")) - elif _ex is not None and _ex[0] and ( - not _ex[1] or obj.name in _ex[1] ): - return ( - False, - "Expected solution to be missing suffix %s" - % suffix_name) - elif not abs(solution[obj.name][suffix_name] - \ - suffix.get(obj)) < self.diff_tol: - return (False, + if not ( + solution[obj.name][suffix_name] + in solution["suffix defaults"][suffix_name] + ): + return ( + False, error_str.format( obj.name, suffix, solution[obj.name][suffix_name], - suffix.get(obj))) - - first=True + "none defined", + ), + ) + elif ( + _ex is not None + and _ex[0] + and (not _ex[1] or obj.name in _ex[1]) + ): + return ( + False, + "Expected solution to be missing suffix %s" % suffix_name, + ) + elif ( + not abs(solution[obj.name][suffix_name] - suffix.get(obj)) + < self.diff_tol + ): + return ( + False, + error_str.format( + obj.name, + suffix, + solution[obj.name][suffix_name], + suffix.get(obj), + ), + ) + + first = True for block in model.block_data_objects(): if first: - first=False + first = False continue for suffix_name, suffix in suffixes.items(): _ex = exclude.get(suffix_name, None) - if (solution[block.name] is not None) and \ - (suffix_name in solution[block.name]): + if (solution[block.name] is not None) and ( + suffix_name in solution[block.name] + ): if suffix.get(block) is None: - if _ex is not None and ( - not _ex[1] or block.name in _ex[1] ): + if _ex is not None and (not _ex[1] or block.name in _ex[1]): continue - if not(solution[block.name][suffix_name] in \ - solution["suffix defaults"][suffix_name]): - return (False, - error_str.format( - block.name, - suffix, - solution[block.name][suffix_name], - "none defined")) - elif _ex is not None and _ex[0] and ( - not _ex[1] or block.name in _ex[1] ): - return ( - False, - "Expected solution to be missing suffix %s" - % suffix_name) - elif not abs(solution[block.name][suffix_name] - \ - suffix.get(block)) < self.diff_tol: - return (False, + if not ( + solution[block.name][suffix_name] + in solution["suffix defaults"][suffix_name] + ): + return ( + False, error_str.format( block.name, suffix, solution[block.name][suffix_name], - suffix.get(block))) - return (True,"") + "none defined", + ), + ) + elif ( + _ex is not None + and _ex[0] + and (not _ex[1] or block.name in _ex[1]) + ): + return ( + False, + "Expected solution to be missing suffix %s" % suffix_name, + ) + elif ( + not abs(solution[block.name][suffix_name] - suffix.get(block)) + < self.diff_tol + ): + return ( + False, + error_str.format( + block.name, + suffix, + solution[block.name][suffix_name], + suffix.get(block), + ), + ) + return (True, "") def validate_capabilities(self, opt): - """ Validate the capabilites of the optimizer """ - if (self.linear is True) and \ - (not opt.has_capability('linear') is True): + """Validate the capabilities of the optimizer""" + if (self.linear is True) and (not opt.has_capability('linear') is True): return False - if (self.integer is True) and \ - (not opt.has_capability('integer') is True): + if (self.integer is True) and (not opt.has_capability('integer') is True): return False - if (self.quadratic_objective is True) and \ - (not opt.has_capability('quadratic_objective') is True): + if (self.quadratic_objective is True) and ( + not opt.has_capability('quadratic_objective') is True + ): return False - if (self.quadratic_constraint is True) and \ - (not opt.has_capability('quadratic_constraint') is True): + if (self.quadratic_constraint is True) and ( + not opt.has_capability('quadratic_constraint') is True + ): return False - if (self.sos1 is True) and \ - (not opt.has_capability('sos1') is True): + if (self.sos1 is True) and (not opt.has_capability('sos1') is True): return False - if (self.sos2 is True) and \ - (not opt.has_capability('sos2') is True): + if (self.sos2 is True) and (not opt.has_capability('sos2') is True): return False return True def post_solve_test_validation(self, tester, results): - """ Perform post-solve validation tests """ + """Perform post-solve validation tests""" if tester is None: - assert results['Solver'][0]['termination condition'] == TerminationCondition.optimal + assert ( + results['Solver'][0]['termination condition'] + == TerminationCondition.optimal + ) else: - tester.assertEqual(results['Solver'][0]['termination condition'], TerminationCondition.optimal) + tester.assertEqual( + results['Solver'][0]['termination condition'], + TerminationCondition.optimal, + ) def warmstart_model(self): - """ Initialize model parameters """ + """Initialize model parameters""" pass if __name__ == "__main__": import pyomo.solvers.tests.models + for key, value in _test_models.items(): print(key) obj = value() obj.generate_model() obj.warmstart_model() - diff --git a/pyomo/solvers/tests/piecewise_linear/convex.lp b/pyomo/solvers/tests/piecewise_linear/convex.lp index 39d9ae9c26b..7db4a1ee7df 100644 --- a/pyomo/solvers/tests/piecewise_linear/convex.lp +++ b/pyomo/solvers/tests/piecewise_linear/convex.lp @@ -7,19 +7,16 @@ obj: s.t. c_u_con_simplified_piecewise_constraint(1)_: --1 X -1 Z -<= -2 +-1.0 X +<= -2.0 c_u_con_simplified_piecewise_constraint(2)_: -+1 X -1 Z -<= 0 - -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 ++1 X +<= 0.0 bounds + -inf <= Z <= +inf -5 <= X <= 5 - -inf <= Z <= +inf end diff --git a/pyomo/solvers/tests/piecewise_linear/indexed.lp b/pyomo/solvers/tests/piecewise_linear/indexed.lp index dd4205ec74d..32e9a0161fc 100644 --- a/pyomo/solvers/tests/piecewise_linear/indexed.lp +++ b/pyomo/solvers/tests/piecewise_linear/indexed.lp @@ -9,27 +9,27 @@ s.t. c_e_linearized_constraint(0_1)_LOG_constraint1_: +1 X(0_1) -+2 linearized_constraint(0_1)_LOG_lambda(1) ++2.0 linearized_constraint(0_1)_LOG_lambda(1) +1.5 linearized_constraint(0_1)_LOG_lambda(2) -+1 linearized_constraint(0_1)_LOG_lambda(3) ++1.0 linearized_constraint(0_1)_LOG_lambda(3) +0.5 linearized_constraint(0_1)_LOG_lambda(4) -0.5 linearized_constraint(0_1)_LOG_lambda(6) -1 linearized_constraint(0_1)_LOG_lambda(7) -1.5 linearized_constraint(0_1)_LOG_lambda(8) --2 linearized_constraint(0_1)_LOG_lambda(9) +-2.0 linearized_constraint(0_1)_LOG_lambda(9) = 0 c_e_linearized_constraint(0_1)_LOG_constraint2_: +1 Z(0_1) --0.63907152907645237 linearized_constraint(0_1)_LOG_lambda(1) +-0.6390715290764524 linearized_constraint(0_1)_LOG_lambda(1) +0.49663531783502585 linearized_constraint(0_1)_LOG_lambda(2) -+0.38366218546322628 linearized_constraint(0_1)_LOG_lambda(3) --0.75114361554693365 linearized_constraint(0_1)_LOG_lambda(4) -+1 linearized_constraint(0_1)_LOG_lambda(5) --0.85114361554693374 linearized_constraint(0_1)_LOG_lambda(6) -+0.1836621854632263 linearized_constraint(0_1)_LOG_lambda(7) ++0.3836621854632263 linearized_constraint(0_1)_LOG_lambda(3) +-0.7511436155469337 linearized_constraint(0_1)_LOG_lambda(4) +-0.8511436155469337 linearized_constraint(0_1)_LOG_lambda(6) ++0.18366218546322624 linearized_constraint(0_1)_LOG_lambda(7) +0.1966353178350258 linearized_constraint(0_1)_LOG_lambda(8) -1.0390715290764525 linearized_constraint(0_1)_LOG_lambda(9) ++1.0 linearized_constraint(0_1)_LOG_lambda(5) = 0 c_e_linearized_constraint(0_1)_LOG_constraint3_: @@ -37,80 +37,80 @@ c_e_linearized_constraint(0_1)_LOG_constraint3_: +1 linearized_constraint(0_1)_LOG_lambda(2) +1 linearized_constraint(0_1)_LOG_lambda(3) +1 linearized_constraint(0_1)_LOG_lambda(4) -+1 linearized_constraint(0_1)_LOG_lambda(5) +1 linearized_constraint(0_1)_LOG_lambda(6) +1 linearized_constraint(0_1)_LOG_lambda(7) +1 linearized_constraint(0_1)_LOG_lambda(8) +1 linearized_constraint(0_1)_LOG_lambda(9) ++1 linearized_constraint(0_1)_LOG_lambda(5) = 1 c_u_linearized_constraint(0_1)_LOG_constraint4(1)_: --1 linearized_constraint(0_1)_LOG_bin_y(1) +1 linearized_constraint(0_1)_LOG_lambda(6) +1 linearized_constraint(0_1)_LOG_lambda(7) +1 linearized_constraint(0_1)_LOG_lambda(8) +1 linearized_constraint(0_1)_LOG_lambda(9) +-1 linearized_constraint(0_1)_LOG_bin_y(1) <= 0 c_u_linearized_constraint(0_1)_LOG_constraint4(2)_: --1 linearized_constraint(0_1)_LOG_bin_y(2) +1 linearized_constraint(0_1)_LOG_lambda(4) -+1 linearized_constraint(0_1)_LOG_lambda(5) +1 linearized_constraint(0_1)_LOG_lambda(6) ++1 linearized_constraint(0_1)_LOG_lambda(5) +-1 linearized_constraint(0_1)_LOG_bin_y(2) <= 0 c_u_linearized_constraint(0_1)_LOG_constraint4(3)_: --1 linearized_constraint(0_1)_LOG_bin_y(3) +1 linearized_constraint(0_1)_LOG_lambda(3) +1 linearized_constraint(0_1)_LOG_lambda(7) +-1 linearized_constraint(0_1)_LOG_bin_y(3) <= 0 c_u_linearized_constraint(0_1)_LOG_constraint5(1)_: -+1 linearized_constraint(0_1)_LOG_bin_y(1) +1 linearized_constraint(0_1)_LOG_lambda(1) +1 linearized_constraint(0_1)_LOG_lambda(2) +1 linearized_constraint(0_1)_LOG_lambda(3) +1 linearized_constraint(0_1)_LOG_lambda(4) ++1 linearized_constraint(0_1)_LOG_bin_y(1) <= 1 c_u_linearized_constraint(0_1)_LOG_constraint5(2)_: -+1 linearized_constraint(0_1)_LOG_bin_y(2) +1 linearized_constraint(0_1)_LOG_lambda(1) +1 linearized_constraint(0_1)_LOG_lambda(2) +1 linearized_constraint(0_1)_LOG_lambda(8) +1 linearized_constraint(0_1)_LOG_lambda(9) ++1 linearized_constraint(0_1)_LOG_bin_y(2) <= 1 c_u_linearized_constraint(0_1)_LOG_constraint5(3)_: -+1 linearized_constraint(0_1)_LOG_bin_y(3) +1 linearized_constraint(0_1)_LOG_lambda(1) -+1 linearized_constraint(0_1)_LOG_lambda(5) +1 linearized_constraint(0_1)_LOG_lambda(9) ++1 linearized_constraint(0_1)_LOG_lambda(5) ++1 linearized_constraint(0_1)_LOG_bin_y(3) <= 1 c_e_linearized_constraint(8_3)_LOG_constraint1_: +1 X(8_3) -+2 linearized_constraint(8_3)_LOG_lambda(1) ++2.0 linearized_constraint(8_3)_LOG_lambda(1) +1.5 linearized_constraint(8_3)_LOG_lambda(2) -+1 linearized_constraint(8_3)_LOG_lambda(3) ++1.0 linearized_constraint(8_3)_LOG_lambda(3) +0.5 linearized_constraint(8_3)_LOG_lambda(4) -0.5 linearized_constraint(8_3)_LOG_lambda(6) -1 linearized_constraint(8_3)_LOG_lambda(7) -1.5 linearized_constraint(8_3)_LOG_lambda(8) --2 linearized_constraint(8_3)_LOG_lambda(9) +-2.0 linearized_constraint(8_3)_LOG_lambda(9) = 0 c_e_linearized_constraint(8_3)_LOG_constraint2_: +1 Z(8_3) --0.63907152907645237 linearized_constraint(8_3)_LOG_lambda(1) +-0.6390715290764524 linearized_constraint(8_3)_LOG_lambda(1) +0.49663531783502585 linearized_constraint(8_3)_LOG_lambda(2) -+0.38366218546322628 linearized_constraint(8_3)_LOG_lambda(3) --0.75114361554693365 linearized_constraint(8_3)_LOG_lambda(4) -+1 linearized_constraint(8_3)_LOG_lambda(5) --0.85114361554693374 linearized_constraint(8_3)_LOG_lambda(6) -+0.1836621854632263 linearized_constraint(8_3)_LOG_lambda(7) ++0.3836621854632263 linearized_constraint(8_3)_LOG_lambda(3) +-0.7511436155469337 linearized_constraint(8_3)_LOG_lambda(4) +-0.8511436155469337 linearized_constraint(8_3)_LOG_lambda(6) ++0.18366218546322624 linearized_constraint(8_3)_LOG_lambda(7) +0.1966353178350258 linearized_constraint(8_3)_LOG_lambda(8) -1.0390715290764525 linearized_constraint(8_3)_LOG_lambda(9) ++1.0 linearized_constraint(8_3)_LOG_lambda(5) = 0 c_e_linearized_constraint(8_3)_LOG_constraint3_: @@ -118,86 +118,83 @@ c_e_linearized_constraint(8_3)_LOG_constraint3_: +1 linearized_constraint(8_3)_LOG_lambda(2) +1 linearized_constraint(8_3)_LOG_lambda(3) +1 linearized_constraint(8_3)_LOG_lambda(4) -+1 linearized_constraint(8_3)_LOG_lambda(5) +1 linearized_constraint(8_3)_LOG_lambda(6) +1 linearized_constraint(8_3)_LOG_lambda(7) +1 linearized_constraint(8_3)_LOG_lambda(8) +1 linearized_constraint(8_3)_LOG_lambda(9) ++1 linearized_constraint(8_3)_LOG_lambda(5) = 1 c_u_linearized_constraint(8_3)_LOG_constraint4(1)_: --1 linearized_constraint(8_3)_LOG_bin_y(1) +1 linearized_constraint(8_3)_LOG_lambda(6) +1 linearized_constraint(8_3)_LOG_lambda(7) +1 linearized_constraint(8_3)_LOG_lambda(8) +1 linearized_constraint(8_3)_LOG_lambda(9) +-1 linearized_constraint(8_3)_LOG_bin_y(1) <= 0 c_u_linearized_constraint(8_3)_LOG_constraint4(2)_: --1 linearized_constraint(8_3)_LOG_bin_y(2) +1 linearized_constraint(8_3)_LOG_lambda(4) -+1 linearized_constraint(8_3)_LOG_lambda(5) +1 linearized_constraint(8_3)_LOG_lambda(6) ++1 linearized_constraint(8_3)_LOG_lambda(5) +-1 linearized_constraint(8_3)_LOG_bin_y(2) <= 0 c_u_linearized_constraint(8_3)_LOG_constraint4(3)_: --1 linearized_constraint(8_3)_LOG_bin_y(3) +1 linearized_constraint(8_3)_LOG_lambda(3) +1 linearized_constraint(8_3)_LOG_lambda(7) +-1 linearized_constraint(8_3)_LOG_bin_y(3) <= 0 c_u_linearized_constraint(8_3)_LOG_constraint5(1)_: -+1 linearized_constraint(8_3)_LOG_bin_y(1) +1 linearized_constraint(8_3)_LOG_lambda(1) +1 linearized_constraint(8_3)_LOG_lambda(2) +1 linearized_constraint(8_3)_LOG_lambda(3) +1 linearized_constraint(8_3)_LOG_lambda(4) ++1 linearized_constraint(8_3)_LOG_bin_y(1) <= 1 c_u_linearized_constraint(8_3)_LOG_constraint5(2)_: -+1 linearized_constraint(8_3)_LOG_bin_y(2) +1 linearized_constraint(8_3)_LOG_lambda(1) +1 linearized_constraint(8_3)_LOG_lambda(2) +1 linearized_constraint(8_3)_LOG_lambda(8) +1 linearized_constraint(8_3)_LOG_lambda(9) ++1 linearized_constraint(8_3)_LOG_bin_y(2) <= 1 c_u_linearized_constraint(8_3)_LOG_constraint5(3)_: -+1 linearized_constraint(8_3)_LOG_bin_y(3) +1 linearized_constraint(8_3)_LOG_lambda(1) -+1 linearized_constraint(8_3)_LOG_lambda(5) +1 linearized_constraint(8_3)_LOG_lambda(9) ++1 linearized_constraint(8_3)_LOG_lambda(5) ++1 linearized_constraint(8_3)_LOG_bin_y(3) <= 1 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds + -inf <= Z(0_1) <= +inf + -inf <= Z(8_3) <= +inf -2 <= X(0_1) <= 2 - -2 <= X(8_3) <= 2 - -inf <= Z(0_1) <= +inf - -inf <= Z(8_3) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(1) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(2) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(3) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(4) <= +inf - 0 <= linearized_constraint(0_1)_LOG_lambda(5) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(6) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(7) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(8) <= +inf 0 <= linearized_constraint(0_1)_LOG_lambda(9) <= +inf + 0 <= linearized_constraint(0_1)_LOG_lambda(5) <= +inf 0 <= linearized_constraint(0_1)_LOG_bin_y(1) <= 1 0 <= linearized_constraint(0_1)_LOG_bin_y(2) <= 1 0 <= linearized_constraint(0_1)_LOG_bin_y(3) <= 1 + -2 <= X(8_3) <= 2 0 <= linearized_constraint(8_3)_LOG_lambda(1) <= +inf 0 <= linearized_constraint(8_3)_LOG_lambda(2) <= +inf 0 <= linearized_constraint(8_3)_LOG_lambda(3) <= +inf 0 <= linearized_constraint(8_3)_LOG_lambda(4) <= +inf - 0 <= linearized_constraint(8_3)_LOG_lambda(5) <= +inf 0 <= linearized_constraint(8_3)_LOG_lambda(6) <= +inf 0 <= linearized_constraint(8_3)_LOG_lambda(7) <= +inf 0 <= linearized_constraint(8_3)_LOG_lambda(8) <= +inf 0 <= linearized_constraint(8_3)_LOG_lambda(9) <= +inf + 0 <= linearized_constraint(8_3)_LOG_lambda(5) <= +inf 0 <= linearized_constraint(8_3)_LOG_bin_y(1) <= 1 0 <= linearized_constraint(8_3)_LOG_bin_y(2) <= 1 0 <= linearized_constraint(8_3)_LOG_bin_y(3) <= 1 diff --git a/pyomo/solvers/tests/piecewise_linear/kernel_problems/concave_var.py b/pyomo/solvers/tests/piecewise_linear/kernel_problems/concave_var.py index ab548b09060..38c840f9ed9 100644 --- a/pyomo/solvers/tests/piecewise_linear/kernel_problems/concave_var.py +++ b/pyomo/solvers/tests/piecewise_linear/kernel_problems/concave_var.py @@ -9,13 +9,22 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.kernel import block, variable, variable_list, block_list, piecewise, objective, constraint, constraint_list +from pyomo.kernel import ( + block, + variable, + variable_list, + block_list, + piecewise, + objective, + constraint, + constraint_list, +) -breakpoints = list(range(-5,0))+list(range(1,5)) -values = [-x**2 for x in breakpoints] +breakpoints = list(range(-5, 0)) + list(range(1, 5)) +values = [-(x**2) for x in breakpoints] -def define_model(**kwds): +def define_model(**kwds): sense = kwds.pop("sense") m = block() @@ -27,13 +36,10 @@ def define_model(**kwds): m.x.append(variable(lb=-5, ub=4)) m.Fx.append(variable()) m.piecewise.append( - piecewise(breakpoints, values, - input=m.x[i], - output=m.Fx[i], - **kwds)) + piecewise(breakpoints, values, input=m.x[i], output=m.Fx[i], **kwds) + ) - m.obj = objective(expr=sum(m.Fx), - sense=sense) + m.obj = objective(expr=sum(m.Fx), sense=sense) # fix the answer for testing purposes m.set_answer = constraint_list() diff --git a/pyomo/solvers/tests/piecewise_linear/kernel_problems/convex_var.py b/pyomo/solvers/tests/piecewise_linear/kernel_problems/convex_var.py index 9de3a614b06..3aef735965e 100644 --- a/pyomo/solvers/tests/piecewise_linear/kernel_problems/convex_var.py +++ b/pyomo/solvers/tests/piecewise_linear/kernel_problems/convex_var.py @@ -9,13 +9,22 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.kernel import block, variable, variable_list, block_list, piecewise, objective, constraint, constraint_list - -breakpoints = list(range(-5,0))+list(range(1,5)) +from pyomo.kernel import ( + block, + variable, + variable_list, + block_list, + piecewise, + objective, + constraint, + constraint_list, +) + +breakpoints = list(range(-5, 0)) + list(range(1, 5)) values = [x**2 for x in breakpoints] -def define_model(**kwds): +def define_model(**kwds): sense = kwds.pop("sense") m = block() @@ -27,13 +36,10 @@ def define_model(**kwds): m.x.append(variable(lb=-5, ub=4)) m.Fx.append(variable()) m.piecewise.append( - piecewise(breakpoints, values, - input=m.x[i], - output=m.Fx[i], - **kwds)) + piecewise(breakpoints, values, input=m.x[i], output=m.Fx[i], **kwds) + ) - m.obj = objective(expr=sum(m.Fx), - sense=sense) + m.obj = objective(expr=sum(m.Fx), sense=sense) # fix the answer for testing purposes m.set_answer = constraint_list() diff --git a/pyomo/solvers/tests/piecewise_linear/kernel_problems/piecewise_var.py b/pyomo/solvers/tests/piecewise_linear/kernel_problems/piecewise_var.py index 37c6e585268..b77566e9d2d 100644 --- a/pyomo/solvers/tests/piecewise_linear/kernel_problems/piecewise_var.py +++ b/pyomo/solvers/tests/piecewise_linear/kernel_problems/piecewise_var.py @@ -9,13 +9,22 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.kernel import block, variable, variable_list, block_list, piecewise, objective, constraint, constraint_list +from pyomo.kernel import ( + block, + variable, + variable_list, + block_list, + piecewise, + objective, + constraint, + constraint_list, +) -breakpoints = [0,1,3,5,6] -values = [0,2,3,-3,-1] +breakpoints = [0, 1, 3, 5, 6] +values = [0, 2, 3, -3, -1] -def define_model(**kwds): +def define_model(**kwds): sense = kwds.pop("sense") m = block() @@ -27,13 +36,10 @@ def define_model(**kwds): m.x.append(variable(lb=0, ub=6)) m.Fx.append(variable()) m.piecewise.append( - piecewise(breakpoints, values, - input=m.x[i], - output=m.Fx[i], - **kwds)) + piecewise(breakpoints, values, input=m.x[i], output=m.Fx[i], **kwds) + ) - m.obj = objective(expr=sum(m.Fx), - sense=sense) + m.obj = objective(expr=sum(m.Fx), sense=sense) # fix the answer for testing purposes m.set_answer = constraint_list() diff --git a/pyomo/solvers/tests/piecewise_linear/kernel_problems/step_var.py b/pyomo/solvers/tests/piecewise_linear/kernel_problems/step_var.py index b237130eeb9..642181deb7d 100644 --- a/pyomo/solvers/tests/piecewise_linear/kernel_problems/step_var.py +++ b/pyomo/solvers/tests/piecewise_linear/kernel_problems/step_var.py @@ -9,13 +9,22 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -from pyomo.kernel import block, variable, variable_list, block_list, piecewise, objective, constraint, constraint_list +from pyomo.kernel import ( + block, + variable, + variable_list, + block_list, + piecewise, + objective, + constraint, + constraint_list, +) breakpoints = [0, 1, 1, 2, 3] values = [0, 0, 1, 1, 2] -def define_model(**kwds): +def define_model(**kwds): sense = kwds.pop("sense") m = block() @@ -27,22 +36,19 @@ def define_model(**kwds): m.x.append(variable(lb=0, ub=3)) m.Fx.append(variable()) m.piecewise.append( - piecewise(breakpoints, values, - input=m.x[i], - output=m.Fx[i], - **kwds)) - m.obj = objective(expr=sum(m.Fx) + sum(m.x), - sense=sense) + piecewise(breakpoints, values, input=m.x[i], output=m.Fx[i], **kwds) + ) + m.obj = objective(expr=sum(m.Fx) + sum(m.x), sense=sense) # fix the answer for testing purposes m.set_answer = constraint_list() # Fx1 should solve to 0 - m.set_answer.append(constraint(expr= m.x[0] == 0.5)) - m.set_answer.append(constraint(expr= m.x[1] == 1.0)) - m.set_answer.append(constraint(expr= m.Fx[1] == 0.5)) + m.set_answer.append(constraint(expr=m.x[0] == 0.5)) + m.set_answer.append(constraint(expr=m.x[1] == 1.0)) + m.set_answer.append(constraint(expr=m.Fx[1] == 0.5)) # Fx[2] should solve to 1 - m.set_answer.append(constraint(expr= m.x[2] == 1.5)) + m.set_answer.append(constraint(expr=m.x[2] == 1.5)) # Fx[3] should solve to 1.5 - m.set_answer.append(constraint(expr= m.x[3] == 2.5)) + m.set_answer.append(constraint(expr=m.x[3] == 2.5)) return m diff --git a/pyomo/solvers/tests/piecewise_linear/nonconvex.lp b/pyomo/solvers/tests/piecewise_linear/nonconvex.lp index c886aef1f83..8265d8efd60 100644 --- a/pyomo/solvers/tests/piecewise_linear/nonconvex.lp +++ b/pyomo/solvers/tests/piecewise_linear/nonconvex.lp @@ -2,51 +2,51 @@ min obj: -+1 n +1 p ++1 n s.t. c_e_pn_con_: -+1 Z -+1 n -1 p -= 7 ++1 n ++1 Z += 7.0 c_e_con_DCC_constraint1_: +1 X -+1 con_DCC_lambda(1_1) --2 con_DCC_lambda(1_2) --2 con_DCC_lambda(2_2) --6 con_DCC_lambda(2_3) --6 con_DCC_lambda(3_3) --10 con_DCC_lambda(3_4) ++1.0 con_DCC_lambda(1_1) +-2.0 con_DCC_lambda(1_2) +-2.0 con_DCC_lambda(2_2) +-6.0 con_DCC_lambda(2_3) +-6.0 con_DCC_lambda(3_3) +-10.0 con_DCC_lambda(3_4) = 0 c_e_con_DCC_constraint2_: +1 Z -+1 con_DCC_lambda(1_1) -+8 con_DCC_lambda(2_3) -+8 con_DCC_lambda(3_3) --12 con_DCC_lambda(3_4) ++1.0 con_DCC_lambda(1_1) ++8.0 con_DCC_lambda(2_3) ++8.0 con_DCC_lambda(3_3) +-12.0 con_DCC_lambda(3_4) = 0 c_e_con_DCC_constraint3(1)_: -+1 con_DCC_bin_y(1) -1 con_DCC_lambda(1_1) -1 con_DCC_lambda(1_2) ++1 con_DCC_bin_y(1) = 0 c_e_con_DCC_constraint3(2)_: -+1 con_DCC_bin_y(2) -1 con_DCC_lambda(2_2) -1 con_DCC_lambda(2_3) ++1 con_DCC_bin_y(2) = 0 c_e_con_DCC_constraint3(3)_: -+1 con_DCC_bin_y(3) -1 con_DCC_lambda(3_3) -1 con_DCC_lambda(3_4) ++1 con_DCC_bin_y(3) = 0 c_e_con_DCC_constraint4_: @@ -55,14 +55,11 @@ c_e_con_DCC_constraint4_: +1 con_DCC_bin_y(3) = 1 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds - -1 <= X <= 10 - -inf <= Z <= +inf 0 <= p <= +inf 0 <= n <= +inf + -inf <= Z <= +inf + -1.0 <= X <= 10.0 0 <= con_DCC_lambda(1_1) <= +inf 0 <= con_DCC_lambda(1_2) <= +inf 0 <= con_DCC_lambda(2_2) <= +inf diff --git a/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray1.py b/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray1.py index 587cd650243..b24f7e1bd72 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray1.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray1.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -""" +r""" / 9x+20 , -5 <= x <= -4 | 7x+12 , -4 <= x <= -3 | 5x+6 , -3 <= x <= -2 @@ -20,43 +20,59 @@ \ -7x+12, 3 <= x <= 4 """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX_SET1 = range(1,8) # There will be two copies of this function -INDEX_SET2 = range(0,2) -DOMAIN_PTS = dict([((t1,t2),[float(i) for i in (list(range(-5,0))+list(range(1,5)))]) for t1 in INDEX_SET1 for t2 in INDEX_SET2]) +INDEX_SET1 = range(1, 8) # There will be two copies of this function +INDEX_SET2 = range(0, 2) +DOMAIN_PTS = dict( + [ + ((t1, t2), [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))]) + for t1 in INDEX_SET1 + for t2 in INDEX_SET2 + ] +) -def F(model,t1,t2,x): - return -(x**2)*model.p[t1,t2] -def define_model(**kwds): +def F(model, t1, t2, x): + return -(x**2) * model.p[t1, t2] + +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET1, INDEX_SET2, bounds=(-5,4)) # domain variable - model.Fx = Var(INDEX_SET1, INDEX_SET2) # range variable + model.x = Var(INDEX_SET1, INDEX_SET2, bounds=(-5, 4)) # domain variable + model.Fx = Var(INDEX_SET1, INDEX_SET2) # range variable model.p = Param(INDEX_SET1, INDEX_SET2, initialize=1.0) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) - - model.piecewise = Piecewise(INDEX_SET1,INDEX_SET2,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - - #Fix the answer for testing purpose - model.set_answer_constraint1 = Constraint(expr= model.x[1,0] == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x[2,0] == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x[3,0] == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x[4,0] == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x[5,0] == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x[6,0] == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x[7,0] == 4.0) - model.set_answer_constraint8 = Constraint(expr= model.x[1,1] == -5.0) - model.set_answer_constraint9 = Constraint(expr= model.x[2,1] == -3.0) - model.set_answer_constraint10 = Constraint(expr= model.x[3,1] == -2.5) - model.set_answer_constraint11 = Constraint(expr= model.x[4,1] == -1.5) - model.set_answer_constraint12 = Constraint(expr= model.x[5,1] == 2.0) - model.set_answer_constraint13 = Constraint(expr= model.x[6,1] == 3.5) - model.set_answer_constraint14 = Constraint(expr= model.x[7,1] == 4.0) + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) + + model.piecewise = Piecewise( + INDEX_SET1, INDEX_SET2, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + + # Fix the answer for testing purpose + model.set_answer_constraint1 = Constraint(expr=model.x[1, 0] == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x[2, 0] == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x[3, 0] == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x[4, 0] == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x[5, 0] == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x[6, 0] == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x[7, 0] == 4.0) + model.set_answer_constraint8 = Constraint(expr=model.x[1, 1] == -5.0) + model.set_answer_constraint9 = Constraint(expr=model.x[2, 1] == -3.0) + model.set_answer_constraint10 = Constraint(expr=model.x[3, 1] == -2.5) + model.set_answer_constraint11 = Constraint(expr=model.x[4, 1] == -1.5) + model.set_answer_constraint12 = Constraint(expr=model.x[5, 1] == 2.0) + model.set_answer_constraint13 = Constraint(expr=model.x[6, 1] == 3.5) + model.set_answer_constraint14 = Constraint(expr=model.x[7, 1] == 4.0) return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray2.py b/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray2.py index 9610cf9e414..24c8beeba34 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray2.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/concave_multi_vararray2.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -""" +r""" / 9x+20 , -5 <= x <= -4 | 7x+12 , -4 <= x <= -3 | 5x+6 , -3 <= x <= -2 @@ -20,43 +20,58 @@ \ -7x+12, 3 <= x <= 4 """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX_SET = [(t1,t2) for t1 in range(1,8) for t2 in range(0,2)] -DOMAIN_PTS = dict([(t,[float(i) for i in (list(range(-5,0))+list(range(1,5)))]) for t in INDEX_SET]) +INDEX_SET = [(t1, t2) for t1 in range(1, 8) for t2 in range(0, 2)] +DOMAIN_PTS = dict( + [ + (t, [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))]) + for t in INDEX_SET + ] +) -def F(model,t1,t2,x): - return -(x**2)*model.p[t1,t2] -def define_model(**kwds): +def F(model, t1, t2, x): + return -(x**2) * model.p[t1, t2] + +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET,bounds=(-5,4)) # domain variable - model.Fx = Var(INDEX_SET) # range variable + model.x = Var(INDEX_SET, bounds=(-5, 4)) # domain variable + model.Fx = Var(INDEX_SET) # range variable model.p = Param(INDEX_SET, initialize=1.0) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) - - model.piecewise = Piecewise(INDEX_SET,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - - #Fix the answer for testing purpose - model.set_answer_constraint1 = Constraint(expr= model.x[1,0] == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x[2,0] == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x[3,0] == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x[4,0] == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x[5,0] == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x[6,0] == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x[7,0] == 4.0) - model.set_answer_constraint8 = Constraint(expr= model.x[1,1] == -5.0) - model.set_answer_constraint9 = Constraint(expr= model.x[2,1] == -3.0) - model.set_answer_constraint10 = Constraint(expr= model.x[3,1] == -2.5) - model.set_answer_constraint11 = Constraint(expr= model.x[4,1] == -1.5) - model.set_answer_constraint12 = Constraint(expr= model.x[5,1] == 2.0) - model.set_answer_constraint13 = Constraint(expr= model.x[6,1] == 3.5) - model.set_answer_constraint14 = Constraint(expr= model.x[7,1] == 4.0) + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) + + model.piecewise = Piecewise( + INDEX_SET, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + + # Fix the answer for testing purpose + model.set_answer_constraint1 = Constraint(expr=model.x[1, 0] == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x[2, 0] == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x[3, 0] == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x[4, 0] == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x[5, 0] == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x[6, 0] == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x[7, 0] == 4.0) + model.set_answer_constraint8 = Constraint(expr=model.x[1, 1] == -5.0) + model.set_answer_constraint9 = Constraint(expr=model.x[2, 1] == -3.0) + model.set_answer_constraint10 = Constraint(expr=model.x[3, 1] == -2.5) + model.set_answer_constraint11 = Constraint(expr=model.x[4, 1] == -1.5) + model.set_answer_constraint12 = Constraint(expr=model.x[5, 1] == 2.0) + model.set_answer_constraint13 = Constraint(expr=model.x[6, 1] == 3.5) + model.set_answer_constraint14 = Constraint(expr=model.x[7, 1] == 4.0) return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/concave_var.py b/pyomo/solvers/tests/piecewise_linear/problems/concave_var.py index c1b64577287..4eedf7bdeb9 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/concave_var.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/concave_var.py @@ -20,64 +20,74 @@ \ -7x+12, 3 <= x <= 4 """ -from pyomo.core import ConcreteModel, Var, Objective, Piecewise, Constraint, maximize +from pyomo.core import ConcreteModel, Var, Objective, Piecewise, Constraint, maximize -DOMAIN_PTS = [float(i) for i in (list(range(-5,0))+list(range(1,5)))] +DOMAIN_PTS = [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))] -def F(model,x): + +def F(model, x): return -(x**2) -def define_model(**kwds): +def define_model(**kwds): model = ConcreteModel() - model.x1 = Var(bounds=(-5,4)) # domain variable - model.x2 = Var(bounds=(-5,4)) # domain variable - model.x3 = Var(bounds=(-5,4)) # domain variable - model.x4 = Var(bounds=(-5,4)) # domain variable - model.x5 = Var(bounds=(-5,4)) # domain variable - model.x6 = Var(bounds=(-5,4)) # domain variable - model.x7 = Var(bounds=(-5,4)) # domain variable + model.x1 = Var(bounds=(-5, 4)) # domain variable + model.x2 = Var(bounds=(-5, 4)) # domain variable + model.x3 = Var(bounds=(-5, 4)) # domain variable + model.x4 = Var(bounds=(-5, 4)) # domain variable + model.x5 = Var(bounds=(-5, 4)) # domain variable + model.x6 = Var(bounds=(-5, 4)) # domain variable + model.x7 = Var(bounds=(-5, 4)) # domain variable + + model.Fx1 = Var() # range variable + model.Fx2 = Var() # range variable + model.Fx3 = Var() # range variable + model.Fx4 = Var() # range variable + model.Fx5 = Var() # range variable + model.Fx6 = Var() # range variable + model.Fx7 = Var() # range variable - model.Fx1 = Var() # range variable - model.Fx2 = Var() # range variable - model.Fx3 = Var() # range variable - model.Fx4 = Var() # range variable - model.Fx5 = Var() # range variable - model.Fx6 = Var() # range variable - model.Fx7 = Var() # range variable + model.obj = Objective( + expr=model.Fx1 + + model.Fx2 + + model.Fx3 + + model.Fx4 + + model.Fx5 + + model.Fx6 + + model.Fx7, + sense=kwds.pop('sense', maximize), + ) - model.obj = Objective(expr=model.Fx1+model.Fx2+model.Fx3+model.Fx4+model.Fx5+model.Fx6+model.Fx7, sense=kwds.pop('sense',maximize)) + model.piecewise1 = Piecewise( + model.Fx1, model.x1, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise2 = Piecewise( + model.Fx2, model.x2, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise3 = Piecewise( + model.Fx3, model.x3, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise4 = Piecewise( + model.Fx4, model.x4, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise5 = Piecewise( + model.Fx5, model.x5, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise6 = Piecewise( + model.Fx6, model.x6, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise7 = Piecewise( + model.Fx7, model.x7, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise1 = Piecewise(model.Fx1,model.x1, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise2 = Piecewise(model.Fx2,model.x2, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise3 = Piecewise(model.Fx3,model.x3, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise4 = Piecewise(model.Fx4,model.x4, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise5 = Piecewise(model.Fx5,model.x5, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise6 = Piecewise(model.Fx6,model.x6, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise7 = Piecewise(model.Fx7,model.x7, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x1 == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x2 == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x3 == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x4 == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x5 == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x6 == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x7 == 4.0) - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x1 == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x2 == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x3 == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x4 == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x5 == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x6 == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x7 == 4.0) - return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/concave_vararray.py b/pyomo/solvers/tests/piecewise_linear/problems/concave_vararray.py index 5f682410897..be013b62309 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/concave_vararray.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/concave_vararray.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -""" +r""" / 9x+20 , -5 <= x <= -4 | 7x+12 , -4 <= x <= -3 | 5x+6 , -3 <= x <= -2 @@ -20,35 +20,50 @@ \ -7x+12, 3 <= x <= 4 """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX_SET = range(1,8) # There will be two copies of this function -DOMAIN_PTS = dict([(t,[float(i) for i in (list(range(-5,0))+list(range(1,5)))]) for t in INDEX_SET]) +INDEX_SET = range(1, 8) # There will be two copies of this function +DOMAIN_PTS = dict( + [ + (t, [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))]) + for t in INDEX_SET + ] +) -def F(model,t,x): - return -(x**2)*model.p[t] -def define_model(**kwds): +def F(model, t, x): + return -(x**2) * model.p[t] + +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET, bounds=(-5,4)) # domain variable - model.Fx = Var(INDEX_SET) # range variable + model.x = Var(INDEX_SET, bounds=(-5, 4)) # domain variable + model.Fx = Var(INDEX_SET) # range variable model.p = Param(INDEX_SET, initialize=1.0) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) - - model.piecewise = Piecewise(INDEX_SET,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x[1] == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x[2] == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x[3] == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x[4] == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x[5] == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x[6] == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x[7] == 4.0) - + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) + + model.piecewise = Piecewise( + INDEX_SET, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x[1] == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x[2] == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x[3] == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x[4] == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x[5] == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x[6] == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x[7] == 4.0) + return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray1.py b/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray1.py index b4dba4f2b43..8d00a99d49d 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray1.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray1.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -""" +r""" / -9x-20, -5 <= x <= -4 | -7x-12, -4 <= x <= -3 | -5x-6 , -3 <= x <= -2 @@ -21,44 +21,59 @@ """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX_SET1 = range(1,8) -INDEX_SET2 = range(0,2) -DOMAIN_PTS = dict([((t1,t2),[float(i) for i in (list(range(-5,0))+list(range(1,5)))]) for t1 in INDEX_SET1 for t2 in INDEX_SET2]) +INDEX_SET1 = range(1, 8) +INDEX_SET2 = range(0, 2) +DOMAIN_PTS = dict( + [ + ((t1, t2), [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))]) + for t1 in INDEX_SET1 + for t2 in INDEX_SET2 + ] +) - -def F(model,t1,t2,x): - return (x**2)*model.p[t1,t2] -def define_model(**kwds): +def F(model, t1, t2, x): + return (x**2) * model.p[t1, t2] + +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET1, INDEX_SET2, bounds=(-5,4)) # domain variable - model.Fx = Var(INDEX_SET1, INDEX_SET2) # range variable + model.x = Var(INDEX_SET1, INDEX_SET2, bounds=(-5, 4)) # domain variable + model.Fx = Var(INDEX_SET1, INDEX_SET2) # range variable model.p = Param(INDEX_SET1, INDEX_SET2, initialize=1.0) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) + + model.piecewise = Piecewise( + INDEX_SET1, INDEX_SET2, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise = Piecewise(INDEX_SET1,INDEX_SET2,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x[1, 0] == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x[2, 0] == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x[3, 0] == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x[4, 0] == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x[5, 0] == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x[6, 0] == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x[7, 0] == 4.0) + model.set_answer_constraint8 = Constraint(expr=model.x[1, 1] == -5.0) + model.set_answer_constraint9 = Constraint(expr=model.x[2, 1] == -3.0) + model.set_answer_constraint10 = Constraint(expr=model.x[3, 1] == -2.5) + model.set_answer_constraint11 = Constraint(expr=model.x[4, 1] == -1.5) + model.set_answer_constraint12 = Constraint(expr=model.x[5, 1] == 2.0) + model.set_answer_constraint13 = Constraint(expr=model.x[6, 1] == 3.5) + model.set_answer_constraint14 = Constraint(expr=model.x[7, 1] == 4.0) - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x[1,0] == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x[2,0] == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x[3,0] == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x[4,0] == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x[5,0] == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x[6,0] == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x[7,0] == 4.0) - model.set_answer_constraint8 = Constraint(expr= model.x[1,1] == -5.0) - model.set_answer_constraint9 = Constraint(expr= model.x[2,1] == -3.0) - model.set_answer_constraint10 = Constraint(expr= model.x[3,1] == -2.5) - model.set_answer_constraint11 = Constraint(expr= model.x[4,1] == -1.5) - model.set_answer_constraint12 = Constraint(expr= model.x[5,1] == 2.0) - model.set_answer_constraint13 = Constraint(expr= model.x[6,1] == 3.5) - model.set_answer_constraint14 = Constraint(expr= model.x[7,1] == 4.0) - return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray2.py b/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray2.py index 6c031f3078e..2892b759a65 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray2.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/convex_multi_vararray2.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -""" +r""" / -9x-20, -5 <= x <= -4 | -7x-12, -4 <= x <= -3 | -5x-6 , -3 <= x <= -2 @@ -21,42 +21,57 @@ """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX_SET = [(t1,t2) for t1 in range(1,8) for t2 in range(0,2)] -DOMAIN_PTS = dict([(t,[float(i) for i in (list(range(-5,0))+list(range(1,5)))]) for t in INDEX_SET]) - -def F(model,t1,t2,x): - return (x**2)*model.p[t1,t2] +INDEX_SET = [(t1, t2) for t1 in range(1, 8) for t2 in range(0, 2)] +DOMAIN_PTS = dict( + [ + (t, [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))]) + for t in INDEX_SET + ] +) + + +def F(model, t1, t2, x): + return (x**2) * model.p[t1, t2] -def define_model(**kwds): +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET, bounds=(-5,4)) # domain variable - model.Fx = Var(INDEX_SET) # range variable + model.x = Var(INDEX_SET, bounds=(-5, 4)) # domain variable + model.Fx = Var(INDEX_SET) # range variable model.p = Param(INDEX_SET, initialize=1.0) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) - - model.piecewise = Piecewise(INDEX_SET,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x[1,0] == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x[2,0] == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x[3,0] == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x[4,0] == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x[5,0] == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x[6,0] == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x[7,0] == 4.0) - model.set_answer_constraint8 = Constraint(expr= model.x[1,1] == -5.0) - model.set_answer_constraint9 = Constraint(expr= model.x[2,1] == -3.0) - model.set_answer_constraint10 = Constraint(expr= model.x[3,1] == -2.5) - model.set_answer_constraint11 = Constraint(expr= model.x[4,1] == -1.5) - model.set_answer_constraint12 = Constraint(expr= model.x[5,1] == 2.0) - model.set_answer_constraint13 = Constraint(expr= model.x[6,1] == 3.5) - model.set_answer_constraint14 = Constraint(expr= model.x[7,1] == 4.0) - + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) + + model.piecewise = Piecewise( + INDEX_SET, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x[1, 0] == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x[2, 0] == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x[3, 0] == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x[4, 0] == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x[5, 0] == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x[6, 0] == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x[7, 0] == 4.0) + model.set_answer_constraint8 = Constraint(expr=model.x[1, 1] == -5.0) + model.set_answer_constraint9 = Constraint(expr=model.x[2, 1] == -3.0) + model.set_answer_constraint10 = Constraint(expr=model.x[3, 1] == -2.5) + model.set_answer_constraint11 = Constraint(expr=model.x[4, 1] == -1.5) + model.set_answer_constraint12 = Constraint(expr=model.x[5, 1] == 2.0) + model.set_answer_constraint13 = Constraint(expr=model.x[6, 1] == 3.5) + model.set_answer_constraint14 = Constraint(expr=model.x[7, 1] == 4.0) + return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/convex_var.py b/pyomo/solvers/tests/piecewise_linear/problems/convex_var.py index 13ac8040446..bb4609be7c9 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/convex_var.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/convex_var.py @@ -21,64 +21,74 @@ """ -from pyomo.core import ConcreteModel, Var, Objective, Piecewise, Constraint, maximize +from pyomo.core import ConcreteModel, Var, Objective, Piecewise, Constraint, maximize -DOMAIN_PTS = [float(i) for i in (list(range(-5,0))+list(range(1,5)))] +DOMAIN_PTS = [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))] -def F(model,x): + +def F(model, x): return x**2 -def define_model(**kwds): +def define_model(**kwds): model = ConcreteModel() - model.x1 = Var(bounds=(-5,4)) # domain variable - model.x2 = Var(bounds=(-5,4)) # domain variable - model.x3 = Var(bounds=(-5,4)) # domain variable - model.x4 = Var(bounds=(-5,4)) # domain variable - model.x5 = Var(bounds=(-5,4)) # domain variable - model.x6 = Var(bounds=(-5,4)) # domain variable - model.x7 = Var(bounds=(-5,4)) # domain variable + model.x1 = Var(bounds=(-5, 4)) # domain variable + model.x2 = Var(bounds=(-5, 4)) # domain variable + model.x3 = Var(bounds=(-5, 4)) # domain variable + model.x4 = Var(bounds=(-5, 4)) # domain variable + model.x5 = Var(bounds=(-5, 4)) # domain variable + model.x6 = Var(bounds=(-5, 4)) # domain variable + model.x7 = Var(bounds=(-5, 4)) # domain variable + + model.Fx1 = Var() # range variable + model.Fx2 = Var() # range variable + model.Fx3 = Var() # range variable + model.Fx4 = Var() # range variable + model.Fx5 = Var() # range variable + model.Fx6 = Var() # range variable + model.Fx7 = Var() # range variable - model.Fx1 = Var() # range variable - model.Fx2 = Var() # range variable - model.Fx3 = Var() # range variable - model.Fx4 = Var() # range variable - model.Fx5 = Var() # range variable - model.Fx6 = Var() # range variable - model.Fx7 = Var() # range variable + model.obj = Objective( + expr=model.Fx1 + + model.Fx2 + + model.Fx3 + + model.Fx4 + + model.Fx5 + + model.Fx6 + + model.Fx7, + sense=kwds.pop('sense', maximize), + ) - model.obj = Objective(expr=model.Fx1+model.Fx2+model.Fx3+model.Fx4+model.Fx5+model.Fx6+model.Fx7, sense=kwds.pop('sense',maximize)) + model.piecewise1 = Piecewise( + model.Fx1, model.x1, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise2 = Piecewise( + model.Fx2, model.x2, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise3 = Piecewise( + model.Fx3, model.x3, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise4 = Piecewise( + model.Fx4, model.x4, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise5 = Piecewise( + model.Fx5, model.x5, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise6 = Piecewise( + model.Fx6, model.x6, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise7 = Piecewise( + model.Fx7, model.x7, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise1 = Piecewise(model.Fx1,model.x1, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise2 = Piecewise(model.Fx2,model.x2, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise3 = Piecewise(model.Fx3,model.x3, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise4 = Piecewise(model.Fx4,model.x4, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise5 = Piecewise(model.Fx5,model.x5, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise6 = Piecewise(model.Fx6,model.x6, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise7 = Piecewise(model.Fx7,model.x7, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x1 == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x2 == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x3 == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x4 == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x5 == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x6 == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x7 == 4.0) - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x1 == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x2 == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x3 == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x4 == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x5 == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x6 == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x7 == 4.0) - return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/convex_vararray.py b/pyomo/solvers/tests/piecewise_linear/problems/convex_vararray.py index 132d73ded5c..140d69dcb1a 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/convex_vararray.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/convex_vararray.py @@ -21,36 +21,50 @@ """ -from pyomo.environ import ConcreteModel, Var, Param, Piecewise, Constraint, Objective, sum_product, maximize +from pyomo.environ import ( + ConcreteModel, + Var, + Param, + Piecewise, + Constraint, + Objective, + sum_product, + maximize, +) -INDEX_SET = range(1,8) # There will be two copies of this function -DOMAIN_PTS = dict([(t,[float(i) for i in (list(range(-5,0))+list(range(1,5)))]) for t in INDEX_SET]) +INDEX_SET = range(1, 8) # There will be two copies of this function +DOMAIN_PTS = dict( + [ + (t, [float(i) for i in (list(range(-5, 0)) + list(range(1, 5)))]) + for t in INDEX_SET + ] +) - -def F(model,t,x): - return (x**2)*model.p[t] -def define_model(**kwds): +def F(model, t, x): + return (x**2) * model.p[t] + +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET, bounds=(-5,4)) # domain variable - model.Fx = Var(INDEX_SET) # range variable - model.p = Param(INDEX_SET,initialize=1.0) + model.x = Var(INDEX_SET, bounds=(-5, 4)) # domain variable + model.Fx = Var(INDEX_SET) # range variable + model.p = Param(INDEX_SET, initialize=1.0) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) - model.piecewise = Piecewise(INDEX_SET,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + model.piecewise = Piecewise( + INDEX_SET, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x[1] == -5.0) - model.set_answer_constraint2 = Constraint(expr= model.x[2] == -3.0) - model.set_answer_constraint3 = Constraint(expr= model.x[3] == -2.5) - model.set_answer_constraint4 = Constraint(expr= model.x[4] == -1.5) - model.set_answer_constraint5 = Constraint(expr= model.x[5] == 2.0) - model.set_answer_constraint6 = Constraint(expr= model.x[6] == 3.5) - model.set_answer_constraint7 = Constraint(expr= model.x[7] == 4.0) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x[1] == -5.0) + model.set_answer_constraint2 = Constraint(expr=model.x[2] == -3.0) + model.set_answer_constraint3 = Constraint(expr=model.x[3] == -2.5) + model.set_answer_constraint4 = Constraint(expr=model.x[4] == -1.5) + model.set_answer_constraint5 = Constraint(expr=model.x[5] == 2.0) + model.set_answer_constraint6 = Constraint(expr=model.x[6] == 3.5) + model.set_answer_constraint7 = Constraint(expr=model.x[7] == 4.0) return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/piecewise_multi_vararray.py b/pyomo/solvers/tests/piecewise_linear/problems/piecewise_multi_vararray.py index ed6edc59d80..3c587d694e1 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/piecewise_multi_vararray.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/piecewise_multi_vararray.py @@ -9,41 +9,65 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -""" +r""" / 2x , 0 <= x <= 1 | (1/2)x+(3/2), 1 <= x <= 3 f(x) = | -3x+12 , 3 <= x <= 5 \ 2x-13 , 5 <= x <= 6 """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX_SET1 = ['1','2','3','40'] # There will be two copies of this function -INDEX_SET2 = [(t1,t2) for t1 in range(1,4) for t2 in range(1,5)] -DOMAIN_PTS = dict([((t1,t2,t3),[float(i) for i in [0,1,3,5,6]]) for t1 in INDEX_SET1 for (t2,t3) in INDEX_SET2]) -RANGE_PTS = {0.0:0.0, 1.0:2.0, 3.0:3.0, 5.0:-3.0, 6.0:-1.0} +INDEX_SET1 = ['1', '2', '3', '40'] # There will be two copies of this function +INDEX_SET2 = [(t1, t2) for t1 in range(1, 4) for t2 in range(1, 5)] +DOMAIN_PTS = dict( + [ + ((t1, t2, t3), [float(i) for i in [0, 1, 3, 5, 6]]) + for t1 in INDEX_SET1 + for (t2, t3) in INDEX_SET2 + ] +) +RANGE_PTS = {0.0: 0.0, 1.0: 2.0, 3.0: 3.0, 5.0: -3.0, 6.0: -1.0} -def F(model,t1,t2,t3,x): - return RANGE_PTS[x]*model.p[t1,t2,t3] -def define_model(**kwds): +def F(model, t1, t2, t3, x): + return RANGE_PTS[x] * model.p[t1, t2, t3] + +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET1, INDEX_SET2, bounds=(0,6)) # domain variable - model.Fx = Var(INDEX_SET1, INDEX_SET2) # range variable + model.x = Var(INDEX_SET1, INDEX_SET2, bounds=(0, 6)) # domain variable + model.Fx = Var(INDEX_SET1, INDEX_SET2) # range variable model.p = Param(INDEX_SET1, INDEX_SET2, initialize=1.0) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) - model.piecewise = Piecewise(INDEX_SET1,INDEX_SET2,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + model.piecewise = Piecewise( + INDEX_SET1, INDEX_SET2, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(INDEX_SET2,rule= lambda model,t2,t3: model.x['1',t2,t3] == 0.0) - model.set_answer_constraint2 = Constraint(INDEX_SET2,rule= lambda model,t2,t3: model.x['2',t2,t3] == 3.0) - model.set_answer_constraint3 = Constraint(INDEX_SET2,rule= lambda model,t2,t3: model.x['3',t2,t3] == 5.5) - model.set_answer_constraint4 = Constraint(INDEX_SET2,rule= lambda model,t2,t3: model.x['40',t2,t3] == 6.0) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint( + INDEX_SET2, rule=lambda model, t2, t3: model.x['1', t2, t3] == 0.0 + ) + model.set_answer_constraint2 = Constraint( + INDEX_SET2, rule=lambda model, t2, t3: model.x['2', t2, t3] == 3.0 + ) + model.set_answer_constraint3 = Constraint( + INDEX_SET2, rule=lambda model, t2, t3: model.x['3', t2, t3] == 5.5 + ) + model.set_answer_constraint4 = Constraint( + INDEX_SET2, rule=lambda model, t2, t3: model.x['40', t2, t3] == 6.0 + ) return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/piecewise_var.py b/pyomo/solvers/tests/piecewise_linear/problems/piecewise_var.py index 52be784a164..5b18842f81d 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/piecewise_var.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/piecewise_var.py @@ -16,50 +16,62 @@ \ 2x-13 , 5 <= x <= 6 """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, +) -DOMAIN_PTS = [float(i) for i in [0,1,3,5,6]] -RANGE_PTS = {0.0:0.0, 1.0:2.0, 3.0:3.0, 5.0:-3.0, 6.0:-1.0} +DOMAIN_PTS = [float(i) for i in [0, 1, 3, 5, 6]] +RANGE_PTS = {0.0: 0.0, 1.0: 2.0, 3.0: 3.0, 5.0: -3.0, 6.0: -1.0} -def F(model,x): - return RANGE_PTS[x]*model.p -def define_model(**kwds): +def F(model, x): + return RANGE_PTS[x] * model.p + +def define_model(**kwds): model = ConcreteModel() - model.x1 = Var(bounds=(0,6)) # domain variable - model.x2 = Var(bounds=(0,6)) # domain variable - model.x3 = Var(bounds=(0,6)) # domain variable - model.x4 = Var(bounds=(0,6)) # domain variable - - model.Fx1 = Var() # range variable - model.Fx2 = Var() # range variable - model.Fx3 = Var() # range variable - model.Fx4 = Var() # range variable + model.x1 = Var(bounds=(0, 6)) # domain variable + model.x2 = Var(bounds=(0, 6)) # domain variable + model.x3 = Var(bounds=(0, 6)) # domain variable + model.x4 = Var(bounds=(0, 6)) # domain variable + + model.Fx1 = Var() # range variable + model.Fx2 = Var() # range variable + model.Fx3 = Var() # range variable + model.Fx4 = Var() # range variable model.p = Param(initialize=1.0) - model.obj = Objective(expr=model.Fx1+model.Fx2+model.Fx3+model.Fx4, sense=kwds.pop('sense',maximize)) + model.obj = Objective( + expr=model.Fx1 + model.Fx2 + model.Fx3 + model.Fx4, + sense=kwds.pop('sense', maximize), + ) + + model.piecewise1 = Piecewise( + model.Fx1, model.x1, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise2 = Piecewise( + model.Fx2, model.x2, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise1 = Piecewise(model.Fx1,model.x1, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise2 = Piecewise(model.Fx2,model.x2, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + model.piecewise3 = Piecewise( + model.Fx3, model.x3, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise3 = Piecewise(model.Fx3,model.x3, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + model.piecewise4 = Piecewise( + model.Fx4, model.x4, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise4 = Piecewise(model.Fx4,model.x4, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x1 == 0.0) + model.set_answer_constraint2 = Constraint(expr=model.x2 == 3.0) + model.set_answer_constraint3 = Constraint(expr=model.x3 == 5.5) + model.set_answer_constraint4 = Constraint(expr=model.x4 == 6.0) - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x1 == 0.0) - model.set_answer_constraint2 = Constraint(expr= model.x2 == 3.0) - model.set_answer_constraint3 = Constraint(expr= model.x3 == 5.5) - model.set_answer_constraint4 = Constraint(expr= model.x4 == 6.0) - return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/piecewise_vararray.py b/pyomo/solvers/tests/piecewise_linear/problems/piecewise_vararray.py index 0e87fbb9082..d35c308e172 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/piecewise_vararray.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/piecewise_vararray.py @@ -16,33 +16,43 @@ \ 2x-13 , 5 <= x <= 6 """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX_SET = [1,2,3,4] # There will be two copies of this function -DOMAIN_PTS = dict([(t,[float(i) for i in [0,1,3,5,6]]) for t in INDEX_SET]) -RANGE_PTS = {0.0:0.0, 1.0:2.0, 3.0:3.0, 5.0:-3.0, 6.0:-1.0} +INDEX_SET = [1, 2, 3, 4] # There will be two copies of this function +DOMAIN_PTS = dict([(t, [float(i) for i in [0, 1, 3, 5, 6]]) for t in INDEX_SET]) +RANGE_PTS = {0.0: 0.0, 1.0: 2.0, 3.0: 3.0, 5.0: -3.0, 6.0: -1.0} -def F(model,t,x): - return RANGE_PTS[x]*model.p[t] -def define_model(**kwds): +def F(model, t, x): + return RANGE_PTS[x] * model.p[t] + +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX_SET, bounds=(0,6)) # domain variable - model.Fx = Var(INDEX_SET) # range variable + model.x = Var(INDEX_SET, bounds=(0, 6)) # domain variable + model.Fx = Var(INDEX_SET) # range variable model.p = Param(INDEX_SET, initialize=1.0, mutable=True) - model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense',maximize)) + model.obj = Objective(expr=sum_product(model.Fx), sense=kwds.pop('sense', maximize)) + + model.piecewise = Piecewise( + INDEX_SET, model.Fx, model.x, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise = Piecewise(INDEX_SET,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint(expr=model.x[1] == 0.0) + model.set_answer_constraint2 = Constraint(expr=model.x[2] == 3.0) + model.set_answer_constraint3 = Constraint(expr=model.x[3] == 5.5) + model.set_answer_constraint4 = Constraint(expr=model.x[4] == 6.0) - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x[1] == 0.0) - model.set_answer_constraint2 = Constraint(expr= model.x[2] == 3.0) - model.set_answer_constraint3 = Constraint(expr= model.x[3] == 5.5) - model.set_answer_constraint4 = Constraint(expr= model.x[4] == 6.0) - return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/step_var.py b/pyomo/solvers/tests/piecewise_linear/problems/step_var.py index 5f6be710b08..a0c1062c9d6 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/step_var.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/step_var.py @@ -17,48 +17,72 @@ \ x-1 , 2 < x <= 3 """ -from pyomo.core import ConcreteModel, Var, Objective, Param, Piecewise, Constraint, maximize +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Param, + Piecewise, + Constraint, + maximize, +) DOMAIN_PTS = [0, 1, 1, 2, 3] F = [0, 0, 1, 1, 2] -def define_model(**kwds): +def define_model(**kwds): model = ConcreteModel() - model.x1 = Var(bounds=(0,3)) # domain variable - model.x2 = Var(bounds=(0,3)) # domain variable - model.x3 = Var(bounds=(0,3)) # domain variable - model.x4 = Var(bounds=(0,3)) # domain variable - - model.Fx1 = Var() # range variable - model.Fx2 = Var() # range variable - model.Fx3 = Var() # range variable - model.Fx4 = Var() # range variable + model.x1 = Var(bounds=(0, 3)) # domain variable + model.x2 = Var(bounds=(0, 3)) # domain variable + model.x3 = Var(bounds=(0, 3)) # domain variable + model.x4 = Var(bounds=(0, 3)) # domain variable + + model.Fx1 = Var() # range variable + model.Fx2 = Var() # range variable + model.Fx3 = Var() # range variable + model.Fx4 = Var() # range variable model.p = Param(initialize=1.0) - model.obj = Objective(expr=model.Fx1+model.Fx2+model.Fx3+model.Fx4+model.x1+model.x2+model.x3+model.x4, sense=kwds.pop('sense',maximize)) + model.obj = Objective( + expr=model.Fx1 + + model.Fx2 + + model.Fx3 + + model.Fx4 + + model.x1 + + model.x2 + + model.x3 + + model.x4, + sense=kwds.pop('sense', maximize), + ) + + model.piecewise1 = Piecewise( + model.Fx1, model.x1, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) + model.piecewise2 = Piecewise( + model.Fx2, model.x2, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise1 = Piecewise(model.Fx1,model.x1, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) - model.piecewise2 = Piecewise(model.Fx2,model.x2, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + model.piecewise3 = Piecewise( + model.Fx3, model.x3, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise3 = Piecewise(model.Fx3,model.x3, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + model.piecewise4 = Piecewise( + model.Fx4, model.x4, pw_pts=DOMAIN_PTS, f_rule=F, **kwds + ) - model.piecewise4 = Piecewise(model.Fx4,model.x4, - pw_pts=DOMAIN_PTS, - f_rule=F, **kwds) + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint( + expr=model.x1 == 0.5 + ) # Fx1 should solve to 0 + model.set_answer_constraint2 = Constraint(expr=model.x2 == 1.0) # + model.set_answer_constraint3 = Constraint(expr=model.Fx2 == 0.5) # + model.set_answer_constraint4 = Constraint( + expr=model.x3 == 1.5 + ) # Fx3 should solve to 1 + model.set_answer_constraint5 = Constraint( + expr=model.x4 == 2.5 + ) # Fx4 should solve to 1.5 - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x1 == 0.5) # Fx1 should solve to 0 - model.set_answer_constraint2 = Constraint(expr= model.x2 == 1.0) # - model.set_answer_constraint3 = Constraint(expr= model.Fx2 == 0.5) # - model.set_answer_constraint4 = Constraint(expr= model.x3 == 1.5) # Fx3 should solve to 1 - model.set_answer_constraint5 = Constraint(expr= model.x4 == 2.5) # Fx4 should solve to 1.5 - return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/step_vararray.py b/pyomo/solvers/tests/piecewise_linear/problems/step_vararray.py index 418f20293a1..749df3b6d7f 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/step_vararray.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/step_vararray.py @@ -17,33 +17,54 @@ \ x-1 , 2 < x <= 3 """ -from pyomo.core import ConcreteModel, Var, Objective, Piecewise, Constraint, maximize, sum_product +from pyomo.core import ( + ConcreteModel, + Var, + Objective, + Piecewise, + Constraint, + maximize, + sum_product, +) -INDEX = [1,2,3,4] +INDEX = [1, 2, 3, 4] DOMAIN_PTS = [0, 1, 1, 2, 3] F = [0, 0, 1, 1, 2] -def define_model(**kwds): +def define_model(**kwds): model = ConcreteModel() - model.x = Var(INDEX) # domain variable - - model.Fx = Var(INDEX) # range variable - - model.obj = Objective(expr=sum_product(model.Fx)+sum_product(model.x), sense=kwds.pop('sense',maximize)) - - model.piecewise = Piecewise(INDEX,model.Fx,model.x, - pw_pts=DOMAIN_PTS, - f_rule=F, - unbounded_domain_var=True, - **kwds) - - #Fix the answer for testing purposes - model.set_answer_constraint1 = Constraint(expr= model.x[1] == 0.5) # Fx1 should solve to 0 - model.set_answer_constraint2 = Constraint(expr= model.x[2] == 1.0) # - model.set_answer_constraint3 = Constraint(expr= model.Fx[2] == 0.5) # - model.set_answer_constraint4 = Constraint(expr= model.x[3] == 1.5) # Fx3 should solve to 1 - model.set_answer_constraint5 = Constraint(expr= model.x[4] == 2.5) # Fx4 should solve to 1.5 - + model.x = Var(INDEX) # domain variable + + model.Fx = Var(INDEX) # range variable + + model.obj = Objective( + expr=sum_product(model.Fx) + sum_product(model.x), + sense=kwds.pop('sense', maximize), + ) + + model.piecewise = Piecewise( + INDEX, + model.Fx, + model.x, + pw_pts=DOMAIN_PTS, + f_rule=F, + unbounded_domain_var=True, + **kwds + ) + + # Fix the answer for testing purposes + model.set_answer_constraint1 = Constraint( + expr=model.x[1] == 0.5 + ) # Fx1 should solve to 0 + model.set_answer_constraint2 = Constraint(expr=model.x[2] == 1.0) # + model.set_answer_constraint3 = Constraint(expr=model.Fx[2] == 0.5) # + model.set_answer_constraint4 = Constraint( + expr=model.x[3] == 1.5 + ) # Fx3 should solve to 1 + model.set_answer_constraint5 = Constraint( + expr=model.x[4] == 2.5 + ) # Fx4 should solve to 1.5 + return model diff --git a/pyomo/solvers/tests/piecewise_linear/problems/tester.py b/pyomo/solvers/tests/piecewise_linear/problems/tester.py index 4563fde2630..02e04f5052e 100644 --- a/pyomo/solvers/tests/piecewise_linear/problems/tester.py +++ b/pyomo/solvers/tests/piecewise_linear/problems/tester.py @@ -9,12 +9,16 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +import os + +from pyomo.common.fileutils import import_file, this_file_dir from pyomo.environ import Var, maximize, value from pyomo.opt import SolverFactory -opt = SolverFactory('cplexamp',solve_io='nl') +solver_name = 'cplexamp' +opt = SolverFactory(solver_name, solve_io='nl') -kwds = {'pw_constr_type':'UB','pw_repn':'DCC','sense':maximize,'force_pw':True} +kwds = {'pw_constr_type': 'UB', 'pw_repn': 'DCC', 'sense': maximize, 'force_pw': True} problem_names = [] problem_names.append("piecewise_multi_vararray") @@ -33,21 +37,20 @@ problem_names = ['convex_var'] -for problem_name in problem_names: - p = __import__(problem_name) - - model = p.define_model(**kwds) - inst = model.create() +if __name__ == '__main__': + for problem_name in problem_names: + p = import_file(os.path.join(this_file_dir(), problem_name) + '.py') - results = opt.solve(inst,tee=True) + model = p.define_model(**kwds) + inst = model.create_instance() - inst.load(results) + results = opt.solve(inst, tee=True) - res = dict() - for block in inst.block_data_objects(active=True): - for variable in block.component_map(Var, active=True).values(): - for var in variable.values(): - name = var.name - if (name[:2] == 'Fx') or (name[:1] == 'x'): - res[name] = value(var) - print(res) + res = dict() + for block in inst.block_data_objects(active=True): + for variable in block.component_map(Var, active=True).values(): + for var in variable.values(): + name = var.name + if (name[:2] == 'Fx') or (name[:1] == 'x'): + res[name] = value(var) + print(res) diff --git a/pyomo/solvers/tests/piecewise_linear/step.lp b/pyomo/solvers/tests/piecewise_linear/step.lp index 6091906ce08..7ecd9e7e34e 100644 --- a/pyomo/solvers/tests/piecewise_linear/step.lp +++ b/pyomo/solvers/tests/piecewise_linear/step.lp @@ -2,8 +2,8 @@ max obj: -+1 X +1 Z ++1 X s.t. @@ -16,61 +16,58 @@ c_e_con_INC_constraint1_: c_e_con_INC_constraint2_: +1 Z --2 con_INC_delta(2) +-2.0 con_INC_delta(2) +1.5 con_INC_delta(4) = 0 c_u_con_INC_constraint3(1)_: --1 con_INC_bin_y(1) +1 con_INC_delta(2) +-1 con_INC_bin_y(1) <= 0 c_u_con_INC_constraint3(2)_: --1 con_INC_bin_y(2) +1 con_INC_delta(3) +-1 con_INC_bin_y(2) <= 0 c_u_con_INC_constraint3(3)_: --1 con_INC_bin_y(3) +1 con_INC_delta(4) +-1 con_INC_bin_y(3) <= 0 c_u_con_INC_constraint3(4)_: --1 con_INC_bin_y(4) +1 con_INC_delta(5) +-1 con_INC_bin_y(4) <= 0 c_u_con_INC_constraint4(1)_: -+1 con_INC_bin_y(1) -1 con_INC_delta(1) ++1 con_INC_bin_y(1) <= 0 c_u_con_INC_constraint4(2)_: -+1 con_INC_bin_y(2) -1 con_INC_delta(2) ++1 con_INC_bin_y(2) <= 0 c_u_con_INC_constraint4(3)_: -+1 con_INC_bin_y(3) -1 con_INC_delta(3) ++1 con_INC_bin_y(3) <= 0 c_u_con_INC_constraint4(4)_: -+1 con_INC_bin_y(4) -1 con_INC_delta(4) ++1 con_INC_bin_y(4) <= 0 -c_e_ONE_VAR_CONSTANT: -ONE_VAR_CONSTANT = 1.0 - bounds + -inf <= Z <= +inf 0 <= X <= 3 - -inf <= Z <= +inf - -inf <= con_INC_delta(1) <= 1 - -inf <= con_INC_delta(2) <= +inf - -inf <= con_INC_delta(3) <= +inf - -inf <= con_INC_delta(4) <= +inf + -inf <= con_INC_delta(1) <= 1 + -inf <= con_INC_delta(3) <= +inf 0 <= con_INC_delta(5) <= +inf + -inf <= con_INC_delta(2) <= +inf + -inf <= con_INC_delta(4) <= +inf 0 <= con_INC_bin_y(1) <= 1 0 <= con_INC_bin_y(2) <= 1 0 <= con_INC_bin_y(3) <= 1 diff --git a/pyomo/solvers/tests/piecewise_linear/test_examples.py b/pyomo/solvers/tests/piecewise_linear/test_examples.py index 69ed1975f2e..b151ffd2c0e 100644 --- a/pyomo/solvers/tests/piecewise_linear/test_examples.py +++ b/pyomo/solvers/tests/piecewise_linear/test_examples.py @@ -14,107 +14,103 @@ # from itertools import zip_longest -from filecmp import cmp import os from os.path import abspath, dirname, join -currdir = dirname(abspath(__file__))+os.sep -scriptdir = dirname(dirname(dirname(dirname(dirname(abspath(__file__))))))+os.sep -scriptdir = join(scriptdir,'examples','pyomo','piecewise') import pyomo.common.unittest as unittest - import pyomo.scripting.convert as convert -from pyomo.repn.tests.ampl.nl_diff import load_and_compare_nl_baseline + +from pyomo.common.fileutils import this_file_dir, PYOMO_ROOT_DIR +from pyomo.common.tempfiles import TempfileManager +from pyomo.repn.tests.nl_diff import load_and_compare_nl_baseline +from pyomo.repn.tests.lp_diff import load_and_compare_lp_baseline + +currdir = this_file_dir() +scriptdir = join(PYOMO_ROOT_DIR, 'examples', 'pyomo', 'piecewise') _NL_diff_tol = 1e-9 _LP_diff_tol = 1e-9 + class Test(unittest.TestCase): + def setUp(self): + self.cwd = os.getcwd() + TempfileManager.push() + self.tmpdir = TempfileManager.create_tempdir() + os.chdir(self.tmpdir) + + def tearDown(self): + os.chdir(self.cwd) + TempfileManager.pop() def run_convert2nl(self, name): - os.chdir(currdir) - return convert.pyomo2nl(['--symbolic-solver-labels' - ,join(scriptdir,name)]) + return convert.pyomo2nl( + [ + '--symbolic-solver-labels', + '--file-determinism', + '1', + join(scriptdir, name), + ] + ) def run_convert2lp(self, name): - os.chdir(currdir) - return convert.pyomo2lp(['--symbolic-solver-labels',join(scriptdir,name)]) + return convert.pyomo2lp(['--symbolic-solver-labels', join(scriptdir, name)]) def test_step_lp(self): """Test examples/pyomo/piecewise/step.py""" self.run_convert2lp('step.py') - _out, _log = join(currdir,'unknown.lp'), join(currdir, 'step.lp') - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + _test, _base = join(self.tmpdir, 'unknown.lp'), join(currdir, 'step.lp') + self.assertEqual(*load_and_compare_lp_baseline(_base, _test)) def test_step_nl(self): """Test examples/pyomo/piecewise/step.py""" self.run_convert2nl('step.py') - _test, _base = join(currdir,'unknown.nl'), join(currdir, 'step.nl') + _test, _base = join(self.tmpdir, 'unknown.nl'), join(currdir, 'step.nl') self.assertEqual(*load_and_compare_nl_baseline(_base, _test)) - os.remove(join(currdir,'unknown.row')) - os.remove(join(currdir,'unknown.col')) def test_nonconvex_lp(self): """Test examples/pyomo/piecewise/nonconvex.py""" self.run_convert2lp('nonconvex.py') - _out, _log = join(currdir,'unknown.lp'), join(currdir, 'nonconvex.lp') - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + _test, _base = join(self.tmpdir, 'unknown.lp'), join(currdir, 'nonconvex.lp') + self.assertEqual(*load_and_compare_lp_baseline(_base, _test)) def test_nonconvex_nl(self): """Test examples/pyomo/piecewise/nonconvex.py""" self.run_convert2nl('nonconvex.py') - _test, _base = join(currdir,'unknown.nl'), join(currdir, 'nonconvex.nl') + _test, _base = join(self.tmpdir, 'unknown.nl'), join(currdir, 'nonconvex.nl') self.assertEqual(*load_and_compare_nl_baseline(_base, _test)) - os.remove(join(currdir,'unknown.row')) - os.remove(join(currdir,'unknown.col')) def test_convex_lp(self): """Test examples/pyomo/piecewise/convex.py""" self.run_convert2lp('convex.py') - _out, _log = join(currdir,'unknown.lp'), join(currdir, 'convex.lp') - self.assertTrue(cmp(_out, _log), - msg="Files %s and %s differ" % (_out, _log)) + _test, _base = join(self.tmpdir, 'unknown.lp'), join(currdir, 'convex.lp') + self.assertEqual(*load_and_compare_lp_baseline(_base, _test)) def test_convex_nl(self): """Test examples/pyomo/piecewise/convex.py""" self.run_convert2nl('convex.py') - _test, _base = join(currdir,'unknown.nl'), join(currdir, 'convex.nl') + _test, _base = join(self.tmpdir, 'unknown.nl'), join(currdir, 'convex.nl') self.assertEqual(*load_and_compare_nl_baseline(_base, _test)) - os.remove(join(currdir,'unknown.row')) - os.remove(join(currdir,'unknown.col')) def test_indexed_lp(self): """Test examples/pyomo/piecewise/indexed.py""" self.run_convert2lp('indexed.py') - with open(join(currdir,'unknown.lp'), 'r') as f1, \ - open(join(currdir, 'indexed.lp'), 'r') as f2: - f1_contents = list(filter(None, f1.read().split())) - f2_contents = list(filter(None, f2.read().split())) - for item1, item2 in zip_longest(f1_contents, f2_contents): - try: - self.assertAlmostEqual(float(item1), float(item2)) - except: - self.assertEqual(item1, item2) + _test, _base = join(self.tmpdir, 'unknown.lp'), join(currdir, 'indexed.lp') + self.assertEqual(*load_and_compare_lp_baseline(_base, _test)) def test_indexed_nl(self): """Test examples/pyomo/piecewise/indexed.py""" self.run_convert2nl('indexed.py') _base = join(currdir, 'indexed.nl') - _test = join(currdir,'unknown.nl') + _test = join(self.tmpdir, 'unknown.nl') self.assertEqual(*load_and_compare_nl_baseline(_base, _test)) - os.remove(join(currdir,'unknown.row')) - os.remove(join(currdir,'unknown.col')) def test_indexed_nonlinear_nl(self): """Test examples/pyomo/piecewise/indexed_nonlinear.py""" self.run_convert2nl('indexed_nonlinear.py') - _test = join(currdir,'unknown.nl') + _test = join(self.tmpdir, 'unknown.nl') _base = join(currdir, 'indexed_nonlinear.nl') self.assertEqual(*load_and_compare_nl_baseline(_base, _test)) - os.remove(join(currdir,'unknown.row')) - os.remove(join(currdir,'unknown.col')) if __name__ == "__main__": diff --git a/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear.py b/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear.py index 6bb76ca36f5..adf1a000fb4 100644 --- a/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear.py +++ b/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear.py @@ -11,7 +11,8 @@ import os from os.path import dirname, abspath, join -thisDir = dirname( abspath(__file__) ) + +thisDir = dirname(abspath(__file__)) import pyomo.common.unittest as unittest @@ -23,116 +24,181 @@ from pyomo.core.base.piecewise import Bound, PWRepn from pyomo.solvers.tests.solvers import test_solver_cases -smoke_problems = ['convex_var','step_var','step_vararray'] +smoke_problems = ['convex_var', 'step_var', 'step_vararray'] -nightly_problems = ['convex_vararray', 'concave_vararray', \ - 'concave_var','piecewise_var', 'piecewise_vararray'] +nightly_problems = [ + 'convex_vararray', + 'concave_vararray', + 'concave_var', + 'piecewise_var', + 'piecewise_vararray', +] -expensive_problems = ['piecewise_multi_vararray', \ - 'convex_multi_vararray1','concave_multi_vararray1', \ - 'convex_multi_vararray2','concave_multi_vararray2'] +expensive_problems = [ + 'piecewise_multi_vararray', + 'convex_multi_vararray1', + 'concave_multi_vararray1', + 'convex_multi_vararray2', + 'concave_multi_vararray2', +] testing_solvers = {} -testing_solvers['gurobi','lp'] = False -#testing_solvers['cplex','lp'] = False -#testing_solvers['cplex','nl'] = False -#testing_solvers['ipopt','nl'] = False -#testing_solvers['cplex','python'] = False -#testing_solvers['_cplex_persistent','python'] = False +testing_solvers['gurobi', 'lp'] = False +# testing_solvers['cplex','lp'] = False +# testing_solvers['cplex','nl'] = False +# testing_solvers['ipopt','nl'] = False +# testing_solvers['cplex','python'] = False +# testing_solvers['_cplex_persistent','python'] = False for _solver, _io in test_solver_cases(): - if (_solver, _io) in testing_solvers and \ - test_solver_cases(_solver, _io).available: + if (_solver, _io) in testing_solvers and test_solver_cases(_solver, _io).available: testing_solvers[_solver, _io] = True - def createMethod(pName, problem, solver, writer, kwds): - def Method(obj): - if not testing_solvers[solver, writer]: - obj.skipTest("Solver %s (interface=%s) is not available" - % (solver, writer)) + obj.skipTest("Solver %s (interface=%s) is not available" % (solver, writer)) - m = import_file(os.path.join(thisDir, 'problems', problem + '.py'), - clear_cache=True) + m = import_file( + os.path.join(thisDir, 'problems', problem + '.py'), clear_cache=True + ) model = m.define_model(**kwds) - opt = pyomo.opt.SolverFactory(solver,solver_io=writer) + opt = pyomo.opt.SolverFactory(solver, solver_io=writer) results = opt.solve(model) # non-recursive - new_results = ((var.name, var.value) - for var in model.component_data_objects(Var, - active=True, - descend_into=False)) - baseline_results = getattr(obj,problem+'_results') + new_results = ( + (var.name, var.value) + for var in model.component_data_objects( + Var, active=True, descend_into=False + ) + ) + baseline_results = getattr(obj, problem + '_results') for name, value in new_results: - if abs(baseline_results[name]-value) > 0.0001: - raise IOError("Difference in baseline solution values and " - "current solution values using:\n" + \ - "Solver: "+solver+"\n" + \ - "Writer: "+writer+"\n" + \ - "Variable: "+name+"\n" + \ - "Solution: "+str(value)+"\n" + \ - "Baseline: "+str(baseline_results[name])+"\n") + if abs(baseline_results[name] - value) > 0.0001: + raise IOError( + "Difference in baseline solution values and " + "current solution values using:\n" + + "Solver: " + + solver + + "\n" + + "Writer: " + + writer + + "\n" + + "Variable: " + + name + + "\n" + + "Solution: " + + str(value) + + "\n" + + "Baseline: " + + str(baseline_results[name]) + + "\n" + ) return Method + def assignProblems(cls, problem_list): - for solver,writer in testing_solvers: + for solver, writer in testing_solvers: for PROBLEM in problem_list: - aux_list = ['','force_pw'] + aux_list = ['', 'force_pw'] for AUX in aux_list: for REPN in PWRepn: for BOUND_TYPE in Bound: - for SENSE in [maximize,minimize]: - if not( ((BOUND_TYPE == Bound.Lower) and (SENSE == maximize)) or \ - ((BOUND_TYPE == Bound.Upper) and (SENSE == minimize)) or \ - ((REPN in [PWRepn.BIGM_BIN,PWRepn.BIGM_SOS1,PWRepn.MC]) and ('step' in PROBLEM)) ): + for SENSE in [maximize, minimize]: + if not ( + ((BOUND_TYPE == Bound.Lower) and (SENSE == maximize)) + or ((BOUND_TYPE == Bound.Upper) and (SENSE == minimize)) + or ( + ( + REPN + in [ + PWRepn.BIGM_BIN, + PWRepn.BIGM_SOS1, + PWRepn.MC, + ] + ) + and ('step' in PROBLEM) + ) + ): kwds = {} kwds['sense'] = SENSE kwds['pw_repn'] = REPN kwds['pw_constr_type'] = BOUND_TYPE if SENSE == maximize: - attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format(PROBLEM,REPN,BOUND_TYPE,'maximize',solver,writer) + attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format( + PROBLEM, + REPN, + BOUND_TYPE, + 'maximize', + solver, + writer, + ) elif SENSE == minimize: - attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format(PROBLEM,REPN,BOUND_TYPE,'minimize',solver,writer) + attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format( + PROBLEM, + REPN, + BOUND_TYPE, + 'minimize', + solver, + writer, + ) if AUX != '': kwds[AUX] = True - attrName += '_'+AUX - setattr(cls, - attrName, - createMethod(attrName, - PROBLEM, - solver, - writer, - kwds)) + attrName += '_' + AUX + setattr( + cls, + attrName, + createMethod( + attrName, PROBLEM, solver, writer, kwds + ), + ) if yaml_available: - with open(join(thisDir, - 'baselines', - PROBLEM+'_baseline_results.yml'),'r') as f: - baseline_results = yaml.load(f, **yaml_load_args) - setattr(cls,PROBLEM+'_results',baseline_results) + with open( + join( + thisDir, + 'baselines', + PROBLEM + '_baseline_results.yml', + ), + 'r', + ) as f: + baseline_results = yaml.load( + f, **yaml_load_args + ) + setattr( + cls, PROBLEM + '_results', baseline_results + ) + @unittest.skipUnless(yaml_available, "PyYAML module is not available.") -class PW_Tests(unittest.TestCase): pass +class PW_Tests(unittest.TestCase): + pass + class PiecewiseLinearTest_Smoke(PW_Tests): pass + + assignProblems(PiecewiseLinearTest_Smoke, smoke_problems) + class PiecewiseLinearTest_Nightly(PW_Tests): pass + + assignProblems(PiecewiseLinearTest_Nightly, nightly_problems) + @unittest.pytest.mark.expensive class PiecewiseLinearTest_Expensive(PW_Tests): pass + + assignProblems(PiecewiseLinearTest_Expensive, expensive_problems) if __name__ == "__main__": unittest.main() - diff --git a/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear_kernel.py b/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear_kernel.py index 6ab777da855..516ee25ffa3 100644 --- a/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear_kernel.py +++ b/pyomo/solvers/tests/piecewise_linear/test_piecewise_linear_kernel.py @@ -12,7 +12,8 @@ import json import os from os.path import dirname, abspath, join -thisDir = dirname( abspath(__file__) ) + +thisDir = dirname(abspath(__file__)) import pyomo.common.unittest as unittest @@ -20,33 +21,28 @@ from pyomo.kernel import SolverFactory, variable, maximize, minimize from pyomo.solvers.tests.solvers import test_solver_cases -problems = ['convex_var', - 'concave_var', - 'piecewise_var', - 'step_var'] +problems = ['convex_var', 'concave_var', 'piecewise_var', 'step_var'] testing_solvers = {} -testing_solvers['gurobi','nl'] = False -#testing_solvers['cplex','lp'] = False -#testing_solvers['cplex','nl'] = False -#testing_solvers['ipopt','nl'] = False -#testing_solvers['cplex','python'] = False -#testing_solvers['_cplex_persistent','python'] = False +testing_solvers['gurobi', 'nl'] = False +# testing_solvers['cplex','lp'] = False +# testing_solvers['cplex','nl'] = False +# testing_solvers['ipopt','nl'] = False +# testing_solvers['cplex','python'] = False +# testing_solvers['_cplex_persistent','python'] = False for _solver, _io in test_solver_cases(): - if (_solver, _io) in testing_solvers and \ - test_solver_cases(_solver, _io).available: + if (_solver, _io) in testing_solvers and test_solver_cases(_solver, _io).available: testing_solvers[_solver, _io] = True -def createTestMethod(pName,problem,solver,writer,kwds): +def createTestMethod(pName, problem, solver, writer, kwds): def testMethod(obj): - if not testing_solvers[solver, writer]: - obj.skipTest("Solver %s (interface=%s) is not available" - % (solver, writer)) + obj.skipTest("Solver %s (interface=%s) is not available" % (solver, writer)) - m = import_file(os.path.join(thisDir, 'kernel_problems', problem + '.py'), - clear_cache=True) + m = import_file( + os.path.join(thisDir, 'kernel_problems', problem + '.py'), clear_cache=True + ) model = m.define_model(**kwds) @@ -54,54 +50,100 @@ def testMethod(obj): results = opt.solve(model) # non-recursive - new_results = ((var.name, var.value) - for var in model.components(ctype=variable.ctype, - active=True, - descend_into=False)) - baseline_results = getattr(obj,problem+'_results') + new_results = ( + (var.name, var.value) + for var in model.components( + ctype=variable.ctype, active=True, descend_into=False + ) + ) + baseline_results = getattr(obj, problem + '_results') for name, value in new_results: - if abs(baseline_results[name]-value) > 0.00001: - raise IOError("Difference in baseline solution values and " - "current solution values using:\n" + \ - "Solver: "+solver+"\n" + \ - "Writer: "+writer+"\n" + \ - "Variable: "+name+"\n" + \ - "Solution: "+str(value)+"\n" + \ - "Baseline: "+str(baseline_results[name])+"\n") + if abs(baseline_results[name] - value) > 0.00001: + raise IOError( + "Difference in baseline solution values and " + "current solution values using:\n" + + "Solver: " + + solver + + "\n" + + "Writer: " + + writer + + "\n" + + "Variable: " + + name + + "\n" + + "Solution: " + + str(value) + + "\n" + + "Baseline: " + + str(baseline_results[name]) + + "\n" + ) return testMethod + def assignTests(cls, problem_list): - for solver,writer in testing_solvers: + for solver, writer in testing_solvers: for PROBLEM in problem_list: - aux_list = [{'simplify': True}, - {'simplify': False}] + aux_list = [{'simplify': True}, {'simplify': False}] for AUX in aux_list: - for REPN in ['sos2','mc','inc','cc','dcc','dlog','log']: - for BOUND_TYPE in ['lb','ub','eq']: + for REPN in ['sos2', 'mc', 'inc', 'cc', 'dcc', 'dlog', 'log']: + for BOUND_TYPE in ['lb', 'ub', 'eq']: for SENSE in [maximize, minimize]: - if not( ((BOUND_TYPE == 'lb') and (SENSE == maximize)) or \ - ((BOUND_TYPE == 'ub') and (SENSE == minimize)) or \ - ((REPN == 'mc') and ('step' in PROBLEM)) ): + if not ( + ((BOUND_TYPE == 'lb') and (SENSE == maximize)) + or ((BOUND_TYPE == 'ub') and (SENSE == minimize)) + or ((REPN == 'mc') and ('step' in PROBLEM)) + ): kwds = {} kwds['sense'] = SENSE kwds['repn'] = REPN kwds['bound'] = BOUND_TYPE if SENSE == maximize: - attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format(PROBLEM,REPN,BOUND_TYPE,'maximize',solver,writer) + attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format( + PROBLEM, + REPN, + BOUND_TYPE, + 'maximize', + solver, + writer, + ) else: assert SENSE == minimize - attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format(PROBLEM,REPN,BOUND_TYPE,'minimize',solver,writer) + attrName = "test_{0}_{1}_{2}_{3}_{4}_{5}".format( + PROBLEM, + REPN, + BOUND_TYPE, + 'minimize', + solver, + writer, + ) assert len(AUX) == 1 kwds.update(AUX) - attrName += '_simplify_'+str(AUX['simplify']) - setattr(cls,attrName,createTestMethod(attrName,PROBLEM,solver,writer,kwds)) - with open(join(thisDir,'kernel_baselines',PROBLEM+'_baseline_results.json'),'r') as f: + attrName += '_simplify_' + str(AUX['simplify']) + setattr( + cls, + attrName, + createTestMethod( + attrName, PROBLEM, solver, writer, kwds + ), + ) + with open( + join( + thisDir, + 'kernel_baselines', + PROBLEM + '_baseline_results.json', + ), + 'r', + ) as f: baseline_results = json.load(f) - setattr(cls,PROBLEM+'_results',baseline_results) + setattr(cls, PROBLEM + '_results', baseline_results) + class PiecewiseLinearKernelTest(unittest.TestCase): pass + + assignTests(PiecewiseLinearKernelTest, problems) if __name__ == "__main__": diff --git a/pyomo/solvers/tests/solvers.py b/pyomo/solvers/tests/solvers.py index 191d548927c..6bbfe08c7c7 100644 --- a/pyomo/solvers/tests/solvers.py +++ b/pyomo/solvers/tests/solvers.py @@ -9,7 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -__all__ = ['test_solver_cases', 'available_solvers'] +__all__ = ['test_solver_cases'] import logging @@ -29,7 +29,8 @@ # ---------------------------------------------------------------- -licensed_solvers_with_demo_mode = {'baron',} +licensed_solvers_with_demo_mode = {'baron'} + # # NOTE: we initialize the test case, since different @@ -51,8 +52,7 @@ def initialize(**kwds): obj.available = False elif hasattr(opt, 'executable') and opt.executable() is None: obj.available = False - elif not opt.license_is_valid() \ - and obj.name not in licensed_solvers_with_demo_mode: + elif not opt.license_is_valid() and obj.name not in licensed_solvers_with_demo_mode: obj.available = False else: obj.available = True @@ -69,7 +69,9 @@ def initialize(**kwds): if not (opt is None or isinstance(opt, UnknownSolver)): for _c in obj.capabilities: if not _c in opt._capabilities: - raise ValueError("Solver %s does not support capability %s!" % (obj.name, _c)) + raise ValueError( + "Solver %s does not support capability %s!" % (obj.name, _c) + ) # # Get version # @@ -85,66 +87,79 @@ def test_solver_cases(*args): if len(_test_solver_cases) == 0: logging.disable(logging.WARNING) - # # MOSEK # - _mosek_capabilities = set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint', - 'conic_constraints']) - + _mosek_capabilities = set( + [ + 'linear', + 'integer', + 'quadratic_objective', + 'quadratic_constraint', + 'conic_constraints', + ] + ) + _test_solver_cases['mosek', 'python'] = initialize( name='mosek', io='python', capabilities=_mosek_capabilities, - import_suffixes=['dual', 'rc', 'slack']) + import_suffixes=['dual', 'rc', 'slack'], + ) # # MOSEK Persistent # - _test_solver_cases['mosek','persistent'] = initialize( - name = 'mosek', - io = 'persistent', - capabilities=_mosek_capabilities, - import_suffixes=['dual','rc','slack']) + _test_solver_cases['mosek', 'persistent'] = initialize( + name='mosek', + io='persistent', + capabilities=_mosek_capabilities, + import_suffixes=['dual', 'rc', 'slack'], + ) # # CPLEX # - _cplex_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint', - 'sos1', - 'sos2']) + _cplex_capabilities = set( + [ + 'linear', + 'integer', + 'quadratic_objective', + 'quadratic_constraint', + 'sos1', + 'sos2', + ] + ) _test_solver_cases['cplex', 'lp'] = initialize( name='cplex', io='lp', capabilities=_cplex_capabilities, - import_suffixes=['slack','dual','rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) _test_solver_cases['cplex', 'mps'] = initialize( name='cplex', io='mps', capabilities=_cplex_capabilities, - import_suffixes=['slack','dual','rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) _test_solver_cases['cplex', 'nl'] = initialize( name='cplex', io='nl', capabilities=_cplex_capabilities, - import_suffixes=['dual']) + import_suffixes=['dual'], + ) _test_solver_cases['cplex', 'python'] = initialize( name='cplex', io='python', capabilities=_cplex_capabilities, - import_suffixes=['slack','dual','rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) # # CPLEX PERSISTENT @@ -154,28 +169,30 @@ def test_solver_cases(*args): name='cplex_persistent', io='python', capabilities=_cplex_capabilities, - import_suffixes=['slack','dual','rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) # # GAMS # - _gams_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint']) + _gams_capabilities = set( + ['linear', 'integer', 'quadratic_objective', 'quadratic_constraint'] + ) _test_solver_cases['gams', 'gms'] = initialize( name='gams', io='gms', capabilities=_gams_capabilities, - import_suffixes=['dual','rc']) + import_suffixes=['dual', 'rc'], + ) _test_solver_cases['gams', 'python'] = initialize( name='gams', io='python', capabilities=_gams_capabilities, - import_suffixes=['dual','rc']) + import_suffixes=['dual', 'rc'], + ) # # GUROBI @@ -183,37 +200,45 @@ def test_solver_cases(*args): # **NOTE: Gurobi does not handle quadratic constraints before # Major Version 5 # - _gurobi_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint', - 'sos1', - 'sos2']) + _gurobi_capabilities = set( + [ + 'linear', + 'integer', + 'quadratic_objective', + 'quadratic_constraint', + 'sos1', + 'sos2', + ] + ) _test_solver_cases['gurobi', 'lp'] = initialize( name='gurobi', io='lp', capabilities=_gurobi_capabilities, - import_suffixes=['slack','dual','rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) _test_solver_cases['gurobi', 'mps'] = initialize( name='gurobi', io='mps', capabilities=_gurobi_capabilities, - import_suffixes=['slack','dual','rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) _test_solver_cases['gurobi', 'nl'] = initialize( name='gurobi', io='nl', capabilities=_gurobi_capabilities, - options={'qcpdual':1,'simplex':1}, - import_suffixes=['dual']) + options={'qcpdual': 1, 'simplex': 1}, + import_suffixes=['dual'], + ) _test_solver_cases['gurobi', 'python'] = initialize( name='gurobi', io='python', capabilities=_gurobi_capabilities, - import_suffixes=['slack','dual','rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) # # Gurobi PERSISTENT @@ -223,37 +248,40 @@ def test_solver_cases(*args): name='gurobi_persistent', io='python', capabilities=_gurobi_capabilities, - import_suffixes=['slack', 'dual', 'rc']) + import_suffixes=['slack', 'dual', 'rc'], + ) # # GLPK # - _glpk_capabilities= set(['linear', - 'integer']) + _glpk_capabilities = set(['linear', 'integer']) if 'GLPKSHELL_old' in str(pyomo.solvers.plugins.solvers.GLPK.GLPK().__class__): glpk_import_suffixes = ['dual'] else: - glpk_import_suffixes = ['rc','dual'] + glpk_import_suffixes = ['rc', 'dual'] _test_solver_cases['glpk', 'lp'] = initialize( name='glpk', io='lp', capabilities=_glpk_capabilities, - import_suffixes=glpk_import_suffixes) + import_suffixes=glpk_import_suffixes, + ) _test_solver_cases['glpk', 'mps'] = initialize( name='glpk', io='mps', capabilities=_glpk_capabilities, import_suffixes=glpk_import_suffixes, - io_options={"skip_objective_sense": True}) + io_options={"skip_objective_sense": True}, + ) _test_solver_cases['glpk', 'python'] = initialize( name='glpk', io='python', capabilities=_glpk_capabilities, - import_suffixes=[]) + import_suffixes=[], + ) # # CBC @@ -264,7 +292,8 @@ def test_solver_cases(*args): name='cbc', io='lp', capabilities=_cbc_lp_capabilities, - import_suffixes=['dual','rc']) + import_suffixes=['dual', 'rc'], + ) _cbc_nl_capabilities = set(['linear', 'integer', 'sos1', 'sos2']) @@ -272,35 +301,41 @@ def test_solver_cases(*args): name='cbc', io='nl', capabilities=_cbc_nl_capabilities, - import_suffixes=['dual']) + import_suffixes=['dual'], + ) - #_cbc_mps_capabilities = set(['linear', 'integer', 'sos1', 'sos2']) + # _cbc_mps_capabilities = set(['linear', 'integer', 'sos1', 'sos2']) - #_test_solver_cases['cbc', 'mps'] = initialize( - #name='cbc', - #io='mps', - #capabilities=_cbc_mps_capabilities, - #import_suffixes=['dual', 'rc']) + # _test_solver_cases['cbc', 'mps'] = initialize( + # name='cbc', + # io='mps', + # capabilities=_cbc_mps_capabilities, + # import_suffixes=['dual', 'rc']) # # XPRESS # - _xpress_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint', - 'sos1', - 'sos2']) + _xpress_capabilities = set( + [ + 'linear', + 'integer', + 'quadratic_objective', + 'quadratic_constraint', + 'sos1', + 'sos2', + ] + ) _test_solver_cases['xpress', 'python'] = initialize( name='xpress_direct', io='python', capabilities=_xpress_capabilities, import_suffixes=['dual', 'rc', 'slack'], - options={'bargapstop':1e-9,}) + options={'bargapstop': 1e-9}, + ) # - # XPRESS PERSISTENT + # XPRESS PERSISTENT # _test_solver_cases['xpress_persistent', 'python'] = initialize( @@ -308,80 +343,88 @@ def test_solver_cases(*args): io='python', capabilities=_xpress_capabilities, import_suffixes=['slack', 'dual', 'rc'], - options={'bargapstop':1e-9,}) + options={'bargapstop': 1e-9}, + ) # # IPOPT # - _ipopt_capabilities= set(['linear', - 'quadratic_objective', - 'quadratic_constraint']) + _ipopt_capabilities = set( + ['linear', 'quadratic_objective', 'quadratic_constraint'] + ) _test_solver_cases['ipopt', 'nl'] = initialize( name='ipopt', io='nl', capabilities=_ipopt_capabilities, - import_suffixes=['dual']) + import_suffixes=['dual'], + ) # # SCIP # - _scip_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint', - 'sos1', - 'sos2']) + _scip_capabilities = set( + [ + 'linear', + 'integer', + 'quadratic_objective', + 'quadratic_constraint', + 'sos1', + 'sos2', + ] + ) _test_solver_cases['scip', 'nl'] = initialize( - name='scip', - io='nl', - capabilities=_scip_capabilities, - import_suffixes=[]) + name='scip', io='nl', capabilities=_scip_capabilities, import_suffixes=[] + ) # # CONOPT # - _conopt_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint', - 'sos1', - 'sos2']) + _conopt_capabilities = set( + [ + 'linear', + 'integer', + 'quadratic_objective', + 'quadratic_constraint', + 'sos1', + 'sos2', + ] + ) _test_solver_cases['conopt', 'nl'] = initialize( name='conopt', io='nl', capabilities=_conopt_capabilities, - import_suffixes=[]) + import_suffixes=[], + ) # # BARON # - _baron_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint']) + _baron_capabilities = set( + ['linear', 'integer', 'quadratic_objective', 'quadratic_constraint'] + ) _test_solver_cases['baron', 'bar'] = initialize( name='baron', io='bar', capabilities=_baron_capabilities, - import_suffixes=['rc','dual']) + import_suffixes=['rc', 'dual'], + ) # # KNITROAMPL # - _knitroampl_capabilities= set(['linear', - 'integer', - 'quadratic_objective', - 'quadratic_constraint']) + _knitroampl_capabilities = set( + ['linear', 'integer', 'quadratic_objective', 'quadratic_constraint'] + ) _test_solver_cases['knitroampl', 'nl'] = initialize( name='knitroampl', io='nl', capabilities=_knitroampl_capabilities, - import_suffixes=['dual']) - + import_suffixes=['dual'], + ) logging.disable(logging.NOTSET) @@ -400,8 +443,8 @@ def test_solver_cases(*args): if sc.io_options is None: sc.io_options = {} assert (sc.io is not None) and (type(sc.io) is str) - assert type(sc.export_suffixes) in [list,tuple] - assert type(sc.import_suffixes) in [list,tuple] + assert type(sc.export_suffixes) in [list, tuple] + assert type(sc.import_suffixes) in [list, tuple] assert type(sc.options) is dict for tag in sc.export_suffixes: assert type(tag) is str @@ -414,4 +457,3 @@ def test_solver_cases(*args): if len(args) == 0: return _test_solver_cases.keys() return _test_solver_cases[args] - diff --git a/pyomo/solvers/tests/testcases.py b/pyomo/solvers/tests/testcases.py index 5a365bf4602..eaebbcd9003 100644 --- a/pyomo/solvers/tests/testcases.py +++ b/pyomo/solvers/tests/testcases.py @@ -19,7 +19,7 @@ from pyomo.core.kernel.block import IBlock # For expected failures that appear in all known version -_trunk_version = (float('inf'), float('inf'), float('inf'), float('inf')) +_trunk_version = (float('inf'), float('inf'), float('inf'), float('inf')) # These are usually due to a bug in the latest version of the # thirdparty solver. Tests will be expected to fail. If they do not, @@ -53,7 +53,14 @@ for _test in ('QCP_simple', 'QCP_simple_nosuffixes', 'MIQCP_simple'): ExpectedFailures['mosek', _io, _test] = ( lambda v: True, - "Mosek does not handle nonconvex quadratic constraints") + "Mosek does not handle nonconvex quadratic constraints", + ) + + for _test in ('MIQP_simple',): + SkipTests['mosek', _io, _test] = ( + lambda v: v[0] == 10 and v < (10, 0, 30), + "Mosek 10 fails on assertion warmstarting MIQP models; see #2613", + ) # # CPLEX @@ -61,28 +68,39 @@ MissingSuffixFailures['cplex', 'lp', 'QCP_simple'] = ( lambda v: v <= _trunk_version, - {'dual': (True, {'qc0','qc1'})}, - "Cplex does not report duals of quadratic constraints.") + {'dual': (True, {'qc0', 'qc1'})}, + "Cplex does not report duals of quadratic constraints.", +) MissingSuffixFailures['cplex', 'mps', 'QCP_simple'] = ( lambda v: v <= _trunk_version, - {'dual': (True, {'qc0','qc1'})}, - "Cplex does not report duals of quadratic constraints.") + {'dual': (True, {'qc0', 'qc1'})}, + "Cplex does not report duals of quadratic constraints.", +) MissingSuffixFailures['cplex', 'python', 'QCP_simple'] = ( lambda v: v <= _trunk_version, - {'dual': (True, {'qc0','qc1'})}, - "Cplex does not report duals of quadratic constraints.") + {'dual': (True, {'qc0', 'qc1'})}, + "Cplex does not report duals of quadratic constraints.", +) MissingSuffixFailures['cplex_persistent', 'python', 'QCP_simple'] = ( lambda v: v <= _trunk_version, - {'dual': (True, {'qc0','qc1'})}, - "Cplex does not report duals of quadratic constraints.") + {'dual': (True, {'qc0', 'qc1'})}, + "Cplex does not report duals of quadratic constraints.", +) MissingSuffixFailures['cplex', 'nl', 'QCP_simple'] = ( - lambda v: v <= (12,5,9,9), - {'dual': (True, {'qc0','qc1'})}, - "Cplex does not report duals of quadratic constraints.") + lambda v: v < (12, 6, 0, 0), + {'dual': (True, {'qc0', 'qc1'})}, + "Cplex does not report duals of quadratic constraints.", +) + +SkipTests['cplex', 'nl', 'QCP_simple'] = ( + lambda v: v == (12, 6, 0, 0), + "Cplex 12.6.0.0 produces inconsistent dual values based on " + "NL variable ordering (which changes between the NLv1 and NLv2 writers", +) # # GUROBI @@ -90,45 +108,74 @@ # NO EXPECTED FAILURES # +# +# GAMS +# + +ExpectedFailures['gams', 'gms', 'MILP_unbounded'] = ( + lambda v: v <= _trunk_version, + "GAMS requires finite bounds for integer variables. 1.0E100 is as extreme" + "as GAMS will define, and should be enough to appear unbounded. If the" + "solver cannot handle this bound, explicitly set a smaller bound on" + "the pyomo model, or try a different GAMS solver.", +) + +ExpectedFailures['gams', 'python', 'MILP_unbounded'] = ( + lambda v: v <= _trunk_version, + "GAMS requires finite bounds for integer variables. 1.0E100 is as extreme" + "as GAMS will define, and should be enough to appear unbounded. If the" + "solver cannot handle this bound, explicitly set a smaller bound on" + "the pyomo model, or try a different GAMS solver.", +) + # # GLPK # -ExpectedFailures['glpk', 'lp', 'MILP_discrete_var_bounds'] = \ - (lambda v: v <= (4,52,0,0), +ExpectedFailures['glpk', 'lp', 'MILP_discrete_var_bounds'] = ( + lambda v: v <= (4, 52, 0, 0), "Glpk ignores bounds on Binary variables through the " - "LP file interface. A ticket has been filed.") + "LP file interface. A ticket has been filed.", +) -ExpectedFailures['glpk', 'mps', 'LP_duals_maximize'] = \ - (lambda v: v <= _trunk_version, +ExpectedFailures['glpk', 'mps', 'LP_duals_maximize'] = ( + lambda v: v <= _trunk_version, "Glpk does not accept the OBJSENSE section of the Free MPS format. " - "Therefore maximization models are not explicitly handled.") + "Therefore maximization models are not explicitly handled.", +) # # CBC # -ExpectedFailures['cbc', 'nl', 'MILP_unbounded'] = \ - (lambda v: v <= _trunk_version, - "Cbc fails to report an unbounded MILP model as unbounded through " - "the NL interface (through 2.9.x), and fails with invalid free() " - "(in 2.10.x).") +ExpectedFailures['cbc', 'nl', 'MILP_unbounded'] = ( + lambda v: v <= _trunk_version, + "Cbc fails to report an unbounded MILP model as unbounded through " + "the NL interface (through 2.9.x), and fails with invalid free() " + "(in 2.10.x).", +) -ExpectedFailures['cbc', 'nl', 'LP_unbounded'] = \ - (lambda v: v[:2] == (2, 10), - "Cbc fails (invalid free()) for unbounded LP models through " - "the NL interface in 2.10.x versions " - "(reported upstream as coin-or/Cbc#389)") +# The following is due to a bug introduced into Clp as part of CBC 2.10 +# and was resolved by Clp commit 130dd199 (13 Feb 2021), and included +# in the CBC 2.10.6 release +ExpectedFailures['cbc', 'nl', 'LP_unbounded'] = ( + lambda v: v > (2, 10) and v < (2, 10, 6), + "Cbc fails (invalid free()) for unbounded LP models through " + "the NL interface in 2.10.x versions through 2.10.5 " + "(reported upstream as coin-or/Cbc#389)", +) -ExpectedFailures['cbc', 'nl', 'SOS1_simple'] = \ - (lambda v: v[:2] == (2, 10), - "Cbc segfaults for SOS constraints in the NL interface " - "(reported upstream as coin-or/Cbc#388)") +ExpectedFailures['cbc', 'nl', 'SOS1_simple'] = ( + lambda v: v[:2] == (2, 10), + "Cbc segfaults for SOS constraints in the NL interface " + "(reported upstream as coin-or/Cbc#388)", +) -ExpectedFailures['cbc', 'nl', 'SOS2_simple'] = \ - (lambda v: v[:2] == (2, 10), - "Cbc segfaults for SOS constraints in the NL interface " - "(reported upstream as coin-or/Cbc#388)") +ExpectedFailures['cbc', 'nl', 'SOS2_simple'] = ( + lambda v: v[:2] == (2, 10), + "Cbc segfaults for SOS constraints in the NL interface " + "(reported upstream as coin-or/Cbc#388)", +) # # XPRESS @@ -140,169 +187,100 @@ # IPOPT # -ExpectedFailures['ipopt', 'nl', 'LP_duals_maximize'] = \ - (lambda v: v == (3,10,3,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.4") - -ExpectedFailures['ipopt', 'nl', 'QCP_simple'] = \ - (lambda v: v == (3,10,3,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.4") - -ExpectedFailures['ipopt', 'nl', 'LP_block'] = \ - (lambda v: v <= (3,10,2,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.3") - -ExpectedFailures['ipopt', 'nl', 'LP_duals_minimize'] = \ - (lambda v: v <= (3,10,2,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.3") - -ExpectedFailures['ipopt', 'nl', 'LP_inactive_index'] = \ - (lambda v: v <= (3,10,2,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.3") - -ExpectedFailures['ipopt', 'nl', 'LP_piecewise'] = \ - (lambda v: v <= (3,10,2,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.3") - -ExpectedFailures['ipopt', 'nl', 'LP_simple'] = \ - (lambda v: v <= (3,10,2,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.3") - -ExpectedFailures['ipopt', 'nl', 'QP_simple'] = \ - (lambda v: v <= (3,10,2,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.3") - -ExpectedFailures['ipopt', 'nl', 'LP_trivial_constraints'] = \ - (lambda v: v <= (3,10,2,0), - "Ipopt returns duals with a different sign convention. " - "Fixed in Ipopt 3.10.3") +ExpectedFailures['ipopt', 'nl', 'LP_duals_maximize'] = ( + lambda v: v == (3, 10, 3, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.4", +) + +ExpectedFailures['ipopt', 'nl', 'QCP_simple'] = ( + lambda v: v == (3, 10, 3, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.4", +) + +ExpectedFailures['ipopt', 'nl', 'LP_block'] = ( + lambda v: v <= (3, 10, 2, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.3", +) + +ExpectedFailures['ipopt', 'nl', 'LP_duals_minimize'] = ( + lambda v: v <= (3, 10, 2, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.3", +) + +ExpectedFailures['ipopt', 'nl', 'LP_inactive_index'] = ( + lambda v: v <= (3, 10, 2, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.3", +) + +ExpectedFailures['ipopt', 'nl', 'LP_piecewise'] = ( + lambda v: v <= (3, 10, 2, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.3", +) + +ExpectedFailures['ipopt', 'nl', 'LP_simple'] = ( + lambda v: v <= (3, 10, 2, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.3", +) + +ExpectedFailures['ipopt', 'nl', 'QP_simple'] = ( + lambda v: v <= (3, 10, 2, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.3", +) + +ExpectedFailures['ipopt', 'nl', 'LP_trivial_constraints'] = ( + lambda v: v <= (3, 10, 2, 0), + "Ipopt returns duals with a different sign convention. Fixed in Ipopt 3.10.3", +) # # SCIP # -ExpectedFailures['scip', 'nl', 'SOS2_simple'] = \ - (lambda v: v <= (3, 1, 0, 9), - "SCIP (scipampl) does not recognize sos2 constraints " - "inside NL files. A ticket has been filed.") +ExpectedFailures['scip', 'nl', 'SOS2_simple'] = ( + lambda v: v <= (3, 1, 0, 9), + "SCIP (scip) does not recognize sos2 constraints " + "inside NL files. A ticket has been filed.", +) -ExpectedFailures['scip', 'nl', 'SOS1_simple'] = \ - (lambda v: v <= (3, 1, 0, 9), - "SCIP (scipampl) does not recognize sos1 constraints " - "inside NL files. A ticket has been filed.") +ExpectedFailures['scip', 'nl', 'SOS1_simple'] = ( + lambda v: v <= (3, 1, 0, 9), + "SCIP (scip) does not recognize sos1 constraints " + "inside NL files. A ticket has been filed.", +) # # BARON # SkipTests['baron', 'bar', 'LP_trivial_constraints'] = ( lambda v: v[:3] == (22, 1, 19), - 'BARON 22.1.19 hits an infinite loop for this test case' + 'BARON 22.1.19 hits an infinite loop for this test case', ) -for prob in ('QP_simple_nosuffixes', 'QP_simple_nosuffixes_kernel', - 'QP_simple', 'QP_simple_kernel', - 'MIQP_simple', 'MIQP_simple_kernel', - 'MILP_simple', 'MILP_simple_kernel', - 'LP_simple', 'LP_simple_kernel', - 'LP_block', 'LP_block_kernel'): +for prob in ( + 'QP_simple_nosuffixes', + 'QP_simple_nosuffixes_kernel', + 'QP_simple', + 'QP_simple_kernel', + 'MIQP_simple', + 'MIQP_simple_kernel', + 'MILP_simple', + 'MILP_simple_kernel', + 'LP_simple', + 'LP_simple_kernel', + 'LP_block', + 'LP_block_kernel', +): ExpectedFailures['baron', 'bar', prob] = ( lambda v: v[:3] == (22, 1, 19), - 'BARON 22.1.19 reports model as infeasible' + 'BARON 22.1.19 reports model as infeasible', ) for prob in ('LP_unbounded', 'LP_unbounded_kernel'): ExpectedFailures['baron', 'bar', prob] = ( lambda v: v[:3] == (22, 1, 19), - 'BARON 22.1.19 reports model as optimal' + 'BARON 22.1.19 reports model as optimal', ) -# -# The following were necessary before we started adding the 'WantDual' -# option when a user explicitly defines a 'dual' or 'rc' suffix to -# "force" Baron to run a local solve (if necessary) and always return -# dual nformation. -# - -# # Known to fail through 18.11.15, but was resolved by 19.12.7 -# ExpectedFailures['baron', 'bar', 'MILP_unbounded'] = ( -# lambda v: v <= (18,11,15), -# ['dual'], -# "Baron fails to report a MILP model as unbounded") - -# # Known to work through 18.11.15, and fail in 19.12.7 -# MissingSuffixFailures['baron', 'bar', 'LP_piecewise'] = ( -# lambda v: v <= (15,0,0,0) or v > (18,11,15), -# ['dual'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - -# # Marking this test suffixes as fragile: Baron 20.4.14 will -# # intermittently return suffixes. -# MissingSuffixFailures['baron', 'bar', 'QP_simple'] = ( -# lambda v: v <= (15,2,0,0) or v > (18,11,15), -# {'dual': (False, {}), 'rc': (False, {})}, -# "Baron will intermittently return dual solution when " -# "a solution is found during preprocessing.") - -# # Known to fail through 17.4.1, but was resolved by 18.5.9 -# MissingSuffixFailures['baron', 'bar', 'QCP_simple'] = ( -# lambda v: v <= (17,4,1) or v > (18,11,15), -# ['dual','rc'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - -# # Known to work through 18.11.15, and fail in 19.12.7 -# MissingSuffixFailures['baron', 'bar', 'LP_block'] = ( -# lambda v: v > (18,11,15), -# ['dual'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - -# # Known to work through 18.11.15, and fail in 19.12.7 -# MissingSuffixFailures['baron', 'bar', 'LP_inactive_index'] = ( -# lambda v: v > (18,11,15), -# ['dual'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - -# # Known to work through 18.11.15, and fail in 19.12.7 -# MissingSuffixFailures['baron', 'bar', 'LP_simple'] = ( -# lambda v: v > (18,11,15), -# ['dual'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - -# # Known to work through 18.11.15, and fail in 19.12.7 -# MissingSuffixFailures['baron', 'bar', 'LP_trivial_constraints'] = ( -# lambda v: v > (18,11,15), -# ['dual'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - -# # Known to work through 19.12.7, and fail in 20.4.14 -# MissingSuffixFailures['baron', 'bar', 'LP_duals_minimize'] = ( -# lambda v: v > (19,12,7), -# ['dual','rc'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - -# # Known to work through 19.12.7, and fail in 20.4.14 -# MissingSuffixFailures['baron', 'bar', 'LP_duals_maximize'] = ( -# lambda v: v > (19,12,7), -# ['dual','rc'], -# "Baron will not return dual solution when a solution is " -# "found during preprocessing.") - - - # # KNITROAMPL @@ -325,7 +303,7 @@ def generate_scenarios(arg=None): # Skip this test case if the solver doesn't support the # capabilities required by the model - if not _model.capabilities.issubset( _solver_case.capabilities ): + if not _model.capabilities.issubset(_solver_case.capabilities): continue # Set status values for expected failures @@ -334,23 +312,22 @@ def generate_scenarios(arg=None): msg = "" case_skip = SkipTests.get((solver, io, _model.description), None) case_suffix = MissingSuffixFailures.get( - (solver, io, _model.description), None) - case_fail = ExpectedFailures.get( - (solver, io, _model.description), None) + (solver, io, _model.description), None + ) + case_fail = ExpectedFailures.get((solver, io, _model.description), None) if not _solver_case.available: status = 'skip' - msg = ("Skipping test because solver %s (%s) is unavailable" - % (solver, io)) - elif (case_skip is not None and - _ver is not None and case_skip[0](_ver)): + msg = "Skipping test because solver %s (%s) is unavailable" % ( + solver, + io, + ) + elif case_skip is not None and _ver is not None and case_skip[0](_ver): status = 'skip' msg = case_skip[1] - elif (case_fail is not None and - _ver is not None and case_fail[0](_ver)): + elif case_fail is not None and _ver is not None and case_fail[0](_ver): status = 'expected failure' msg = case_fail[1] - elif (case_suffix is not None and - _ver is not None and case_suffix[0](_ver)): + elif case_suffix is not None and _ver is not None and case_suffix[0](_ver): if type(case_suffix[1]) is dict: exclude_suffixes.update(case_suffix[1]) else: @@ -360,9 +337,14 @@ def generate_scenarios(arg=None): # Return scenario dimensions and scenario information yield (model, solver, io), Bunch( - status=status, msg=msg, model=_model, solver=None, - testcase=_solver_case, demo_limits=_solver_case.demo_limits, - exclude_suffixes=exclude_suffixes) + status=status, + msg=msg, + model=_model, + solver=None, + testcase=_solver_case, + demo_limits=_solver_case.demo_limits, + exclude_suffixes=exclude_suffixes, + ) def run_scenarios(options): @@ -392,7 +374,8 @@ def run_scenarios(options): test_case.testcase.io_options, {}, symbolic_labels, - load_solutions) + load_solutions, + ) termination_condition = results['Solver'][0]['termination condition'] # Validate solution status @@ -402,10 +385,15 @@ def run_scenarios(options): if test_case.status == 'expected failure': stat[key] = (True, "Expected failure") else: - stat[key] = (False, "Unexpected termination condition: %s" % str(termination_condition)) + stat[key] = ( + False, + "Unexpected termination condition: %s" % str(termination_condition), + ) continue - if termination_condition == TerminationCondition.unbounded or \ - termination_condition == TerminationCondition.infeasible: + if ( + termination_condition == TerminationCondition.unbounded + or termination_condition == TerminationCondition.infeasible + ): # Unbounded or Infeasible stat[key] = (True, "") else: @@ -414,9 +402,11 @@ def run_scenarios(options): model_class.model.load_solution(results.solution) else: model_class.model.solutions.load_from( - results, - default_variable_value=opt.default_variable_value()) - rc = model_class.validate_current_solution(suffixes=model_class.test_suffixes) + results, default_variable_value=opt.default_variable_value() + ) + rc = model_class.validate_current_solution( + suffixes=model_class.test_suffixes + ) if test_case.status == 'expected failure': if rc[0] is True: @@ -442,8 +432,7 @@ def run_scenarios(options): for key in stat: model, solver, io = key if not solver in summary: - summary[solver] = Bunch(NumEPass=0, NumEFail=0, - NumUPass=0, NumUFail=0) + summary[solver] = Bunch(NumEPass=0, NumEFail=0, NumUPass=0, NumUFail=0) _pass, _str = stat[key] if _pass: if _str == "Expected failure": @@ -455,11 +444,15 @@ def run_scenarios(options): if _str == "Unexpected failure": summary[solver].NumUFail += 1 if options.verbose: - print("- Unexpected Test Failure: "+", ".join((model, solver, io))) + print( + "- Unexpected Test Failure: " + ", ".join((model, solver, io)) + ) else: summary[solver].NumUPass += 1 if options.verbose: - print("- Unexpected Test Success: "+", ".join((model, solver, io))) + print( + "- Unexpected Test Success: " + ", ".join((model, solver, io)) + ) if options.verbose: if nfail == 0: print("- NONE") @@ -467,12 +460,18 @@ def run_scenarios(options): stream = sys.stdout maxSolverNameLen = max([max(len(name) for name in summary), len("Solver")]) - fmtStr = "{{0:<{0}}}| {{1:>8}} | {{2:>8}} | {{3:>10}} | {{4:>10}} | {{5:>13}}\n".format(maxSolverNameLen + 2) + fmtStr = ( + "{{0:<{0}}}| {{1:>8}} | {{2:>8}} | {{3:>10}} | {{4:>10}} | {{5:>13}}\n".format( + maxSolverNameLen + 2 + ) + ) # stream.write("\n") stream.write("Solver Test Summary\n") stream.write("=" * (maxSolverNameLen + 66) + "\n") - stream.write(fmtStr.format("Solver", "# Pass", "# Fail", "# OK Fail", "# Bad Pass", "% OK")) + stream.write( + fmtStr.format("Solver", "# Pass", "# Fail", "# OK Fail", "# Bad Pass", "% OK") + ) stream.write("=" * (maxSolverNameLen + 66) + "\n") # for _solver in sorted(summary): @@ -481,10 +480,45 @@ def run_scenarios(options): total.NumEFail += ans.NumEFail total.NumUPass += ans.NumUPass total.NumUFail += ans.NumUFail - stream.write(fmtStr.format(_solver, str(ans.NumEPass), str(ans.NumUFail), str(ans.NumEFail), str(ans.NumUPass), str(int(100.0*(ans.NumEPass+ans.NumEFail)/(ans.NumEPass+ans.NumEFail+ans.NumUFail+ans.NumUPass))))) + stream.write( + fmtStr.format( + _solver, + str(ans.NumEPass), + str(ans.NumUFail), + str(ans.NumEFail), + str(ans.NumUPass), + str( + int( + 100.0 + * (ans.NumEPass + ans.NumEFail) + / (ans.NumEPass + ans.NumEFail + ans.NumUFail + ans.NumUPass) + ) + ), + ) + ) # stream.write("=" * (maxSolverNameLen + 66) + "\n") - stream.write(fmtStr.format("TOTALS", str(total.NumEPass), str(total.NumUFail), str(total.NumEFail), str(total.NumUPass), str(int(100.0*(total.NumEPass+total.NumEFail)/(total.NumEPass+total.NumEFail+total.NumUFail+total.NumUPass))))) + stream.write( + fmtStr.format( + "TOTALS", + str(total.NumEPass), + str(total.NumUFail), + str(total.NumEFail), + str(total.NumUPass), + str( + int( + 100.0 + * (total.NumEPass + total.NumEFail) + / ( + total.NumEPass + + total.NumEFail + + total.NumUFail + + total.NumUPass + ) + ) + ), + ) + ) stream.write("=" * (maxSolverNameLen + 66) + "\n") logging.disable(logging.NOTSET) @@ -493,7 +527,7 @@ def run_scenarios(options): if __name__ == "__main__": print("") print("Testing model generation") - print("-"*30) + print("-" * 30) for key in sorted(all_models()): print(key) obj = all_models(key)() @@ -502,8 +536,7 @@ def run_scenarios(options): print("") print("Testing scenario generation") - print("-"*30) + print("-" * 30) for key, value in generate_scenarios(): print(", ".join(key)) print(" %s: %s" % (value.status, value.msg)) - diff --git a/pyomo/solvers/wrappers.py b/pyomo/solvers/wrappers.py index 29d07e64b6d..3b083f7a14f 100644 --- a/pyomo/solvers/wrappers.py +++ b/pyomo/solvers/wrappers.py @@ -9,9 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -class MIPSolverWrapper(object): - def add(self, constraint): #pragma:nocover +class MIPSolverWrapper(object): + def add(self, constraint): # pragma:nocover pass - - diff --git a/pyomo/util/blockutil.py b/pyomo/util/blockutil.py index b487f05f686..52befea6ed5 100644 --- a/pyomo/util/blockutil.py +++ b/pyomo/util/blockutil.py @@ -31,9 +31,11 @@ def has_discrete_variables(block): def log_model_constraints(m, logger=logger, active=True): """Prints the model constraints in the model.""" for constr in m.component_data_objects( - ctype=Constraint, active=active, descend_into=True, - descent_order=TraversalStrategy.PrefixDepthFirstSearch): - logger.info("%s %s" % ( - constr.name, - ("active" if constr.active else "deactivated") - )) + ctype=Constraint, + active=active, + descend_into=True, + descent_order=TraversalStrategy.PrefixDepthFirstSearch, + ): + logger.info( + "%s %s" % (constr.name, ("active" if constr.active else "deactivated")) + ) diff --git a/pyomo/util/calc_var_value.py b/pyomo/util/calc_var_value.py index 8a151ba10d0..81bbd285dd2 100644 --- a/pyomo/util/calc_var_value.py +++ b/pyomo/util/calc_var_value.py @@ -9,16 +9,32 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from pyomo.common.errors import IterationLimitError from pyomo.core.expr.numvalue import native_numeric_types, value, is_fixed from pyomo.core.expr.calculus.derivatives import differentiate from pyomo.core.base.constraint import Constraint, _ConstraintData import logging + logger = logging.getLogger(__name__) -def calculate_variable_from_constraint(variable, constraint, - eps=1e-8, iterlim=1000, - linesearch=True, alpha_min=1e-8): +_default_differentiation_mode = differentiate.Modes.sympy +_symbolic_modes = { + None, + differentiate.Modes.sympy, + differentiate.Modes.reverse_symbolic, +} + + +def calculate_variable_from_constraint( + variable, + constraint, + eps=1e-8, + iterlim=1000, + linesearch=True, + alpha_min=1e-8, + diff_mode=None, +): """Calculate the variable value given a specified equality constraint This function calculates the value of the specified variable @@ -55,6 +71,9 @@ def calculate_variable_from_constraint(variable, constraint, [default=True] alpha_min: `float` The minimum fractional step to use in the linesearch [default=1e-8]. + diff_mode: :py:enum:`pyomo.core.expr.calculus.derivatives.Modes` + The mode to use to differentiate the expression. If + unspecified, defaults to `Modes.sympy` Returns: -------- @@ -71,7 +90,7 @@ def calculate_variable_from_constraint(variable, constraint, upper = constraint.ub if lower != upper: - raise ValueError("Constraint must be an equality constraint") + raise ValueError(f"Constraint '{constraint}' must be an equality constraint") if variable.value is None: # Note that we use "skip_validation=True" here as well, as the @@ -98,7 +117,8 @@ def calculate_variable_from_constraint(variable, constraint, else: # set the initial value to the midpoint of the bounds variable.set_value( - (variable.lb+variable.ub)/2.0, skip_validation=True) + (variable.lb + variable.ub) / 2.0, skip_validation=True + ) # store the initial value to use later if necessary orig_initial_value = variable.value @@ -115,18 +135,21 @@ def calculate_variable_from_constraint(variable, constraint, except: logger.error( "Encountered an error evaluating the expression at the " - "initial guess.\n\tPlease provide a different initial guess.") + "initial guess.\n\tPlease provide a different initial guess." + ) raise - variable.set_value(x1 - (residual_1 - upper), skip_validation=True) - residual_2 = value(body, exception=False) - - # If we encounter an error while evaluating the expression at the - # linear intercept calculated assuming the derivative was 1. This - # is most commonly due to nonlinear expressions (like sqrt()) - # becoming invalid/complex. We will skip the rest of the - # "shortcuts" that assume the expression is linear and move directly - # to using Newton's method. + try: + variable.set_value(x1 - (residual_1 - upper), skip_validation=True) + residual_2 = value(body, exception=False) + except OverflowError: + # If we encounter an error while evaluating the expression at the + # linear intercept calculated assuming the derivative was 1. This + # is most commonly due to nonlinear expressions (like sqrt()) + # becoming invalid/complex. We will skip the rest of the + # "shortcuts" that assume the expression is linear and move directly + # to using Newton's method. + residual_2 = None if residual_2 is not None and type(residual_2) is not complex: # if the variable appears linearly with a coefficient of 1, then we @@ -140,11 +163,15 @@ def calculate_variable_from_constraint(variable, constraint, # Assume the variable appears linearly and calculate the coefficient x2 = value(variable) slope = float(residual_1 - residual_2) / (x1 - x2) - intercept = (residual_1 - upper) - slope*x1 + intercept = (residual_1 - upper) - slope * x1 if slope: - variable.set_value(-intercept/slope, skip_validation=True) + variable.set_value(-intercept / slope, skip_validation=True) body_val = value(body, exception=False) - if body_val is not None and abs(body_val - upper) < eps: + if ( + body_val is not None + and body_val.__class__ is not complex + and abs(body_val - upper) < eps + ): # Re-set the variable value to trigger any warnings WRT # the final variable state variable.set_value(variable.value) @@ -155,55 +182,92 @@ def calculate_variable_from_constraint(variable, constraint, # restore initial value variable.set_value(orig_initial_value, skip_validation=True) expr = body - upper - expr_deriv = differentiate(expr, wrt=variable, - mode=differentiate.Modes.sympy) - if type(expr_deriv) in native_numeric_types and expr_deriv == 0: - raise ValueError("Variable derivative == 0, cannot solve for variable") + expr_deriv = None + if diff_mode in _symbolic_modes: + try: + expr_deriv = differentiate( + expr, wrt=variable, mode=diff_mode or _default_differentiation_mode + ) + except: + if diff_mode is None: + # If the user didn't care how we differentiate, try to + # (mostly silently) revert to numeric differentiation. + logger.debug( + 'Calculating symbolic derivative of expression failed. ' + 'Reverting to numeric differentiation' + ) + diff_mode = differentiate.Modes.reverse_numeric + else: + raise + + if type(expr_deriv) in native_numeric_types and expr_deriv == 0: + raise ValueError( + f"Variable '{variable}' derivative == 0 in constraint " + f"'{constraint}', cannot solve for variable" + ) - if abs(value(expr_deriv)) < 1e-12: - raise RuntimeError( - 'Initial value for variable results in a derivative value that is ' - 'very close to zero.\n\tPlease provide a different initial guess.') + if expr_deriv is None: + fp0 = differentiate(expr, wrt=variable, mode=diff_mode) + else: + fp0 = value(expr_deriv) + + if abs(value(fp0)) < 1e-12: + raise ValueError( + f"Initial value for variable '{variable}' results in a derivative " + f"value for constraint '{constraint}' that is very close to zero.\n" + "\tPlease provide a different initial guess." + ) iter_left = iterlim fk = residual_1 - upper while abs(fk) > eps and iter_left: iter_left -= 1 if not iter_left: - raise RuntimeError( - "Iteration limit (%s) reached; remaining residual = %s" - % (iterlim, value(expr)) ) + raise IterationLimitError( + f"Iteration limit (%s) reached solving for variable '{variable}' " + f"using constraint '{constraint}'; remaining residual = %s" + % (iterlim, value(expr)) + ) # compute step xk = value(variable) try: fk = value(expr) if type(fk) is complex: - raise ValueError( - "Complex numbers are not allowed in Newton's method.") + raise ValueError("Complex numbers are not allowed in Newton's method.") except: # We hit numerical problems with the last step (possible if # the line search is turned off) logger.error( "Newton's method encountered an error evaluating the " - "expression.\n\tPlease provide a different initial guess " - "or enable the linesearch if you have not.") + f"expression for constraint '{constraint}'.\n\tPlease provide a " + "different initial guess or enable the linesearch if you have not." + ) raise - fpk = value(expr_deriv) + + if expr_deriv is None: + fpk = differentiate(expr, wrt=variable, mode=diff_mode) + else: + fpk = value(expr_deriv) + if abs(fpk) < 1e-12: + # TODO: should this raise a ValueError or a new + # DerivativeError (subclassing ArithmeticError)? raise RuntimeError( - "Newton's method encountered a derivative that was too " + "Newton's method encountered a derivative of constraint " + f"'{constraint}' with respect to variable '{variable}' that was too " "close to zero.\n\tPlease provide a different initial guess " - "or enable the linesearch if you have not.") - pk = -fk/fpk + "or enable the linesearch if you have not." + ) + pk = -fk / fpk alpha = 1.0 xkp1 = xk + alpha * pk variable.set_value(xkp1, skip_validation=True) # perform line search if linesearch: - c1 = 0.999 # ensure sufficient progress + c1 = 0.999 # ensure sufficient progress while alpha > alpha_min: # check if the value at xkp1 has sufficient reduction in # the residual @@ -214,7 +278,7 @@ def calculate_variable_from_constraint(variable, constraint, if type(fkp1) is complex: # We cannot perform computations on complex numbers fkp1 = None - if fkp1 is not None and fkp1**2 < c1*fk**2: + if fkp1 is not None and fkp1**2 < c1 * fk**2: # found an alpha value with sufficient reduction # continue to the next step fk = fkp1 @@ -227,9 +291,11 @@ def calculate_variable_from_constraint(variable, constraint, residual = value(expr, exception=False) if residual is None or type(residual) is complex: residual = "{function evaluation error}" - raise RuntimeError( - "Linesearch iteration limit reached; remaining " - "residual = %s." % (residual,)) + raise IterationLimitError( + f"Linesearch iteration limit reached solving for " + f"variable '{variable}' using constraint '{constraint}'; " + f"remaining residual = {residual}." + ) # # Re-set the variable value to trigger any warnings WRT the final # variable state diff --git a/pyomo/util/check_units.py b/pyomo/util/check_units.py index 5ac7c037cc0..be72493af3f 100644 --- a/pyomo/util/check_units.py +++ b/pyomo/util/check_units.py @@ -14,11 +14,26 @@ This module has some helpful methods to support checking units on Pyomo module objects. """ +import logging + from pyomo.core.base.units_container import units, UnitsError -from pyomo.core.base import (Objective, Constraint, Var, Param, - Suffix, Set, SetOf, RangeSet, Block, - ExternalFunction, Expression, - value, BooleanVar, BuildAction, BuildCheck) +from pyomo.core.base import ( + Objective, + Constraint, + Var, + Param, + Suffix, + Set, + SetOf, + RangeSet, + Block, + ExternalFunction, + Expression, + value, + BooleanVar, + BuildAction, + BuildCheck, +) from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.network import Port, Arc from pyomo.mpec import Complementarity @@ -26,6 +41,10 @@ from pyomo.core.expr.template_expr import IndexTemplate from pyomo.core.expr.numvalue import native_types from pyomo.util.components import iter_component +from pyomo.common.collections import ComponentSet + +logger = logging.getLogger(__name__) + def check_units_equivalent(*args): """ @@ -43,7 +62,7 @@ def check_units_equivalent(*args): Returns ------- - bool : True if all the expressions passed as argments have the same units + bool : True if all the expressions passed as arguments have the same units """ try: assert_units_equivalent(*args) @@ -51,6 +70,7 @@ def check_units_equivalent(*args): except UnitsError: return False + def assert_units_equivalent(*args): """ Raise an exception if the units are inconsistent within an @@ -73,7 +93,10 @@ def assert_units_equivalent(*args): if not units._equivalent_pint_units(pint_unit_compare, pint_unit): raise UnitsError( "Units between {} and {} are not consistent.".format( - str(pint_unit_compare), str(pint_unit))) + str(pint_unit_compare), str(pint_unit) + ) + ) + def _assert_units_consistent_constraint_data(condata): """ @@ -99,6 +122,7 @@ def _assert_units_consistent_constraint_data(condata): else: assert_units_equivalent(*args) + def _assert_units_consistent_arc_data(arcdata): """ Raise an exception if the any units do not match for the connected ports @@ -123,6 +147,7 @@ def _assert_units_consistent_arc_data(arcdata): else: assert_units_equivalent(svar, dvar) + def _assert_units_consistent_property_expr(obj): """ Check the .expr property of the object and raise @@ -130,6 +155,7 @@ def _assert_units_consistent_property_expr(obj): """ _assert_units_consistent_expression(obj.expr) + def _assert_units_consistent_expression(expr): """ Raise an exception if any units in expr are inconsistent. @@ -139,11 +165,12 @@ def _assert_units_consistent_expression(expr): pint_unit = units._get_pint_units(expr) # pyomo_unit = units.get_units(expr) + # Complementarities that are not in standard form do not # current work with the checking code. The Units container # should be modified to allow sum and relationals with zero # terms (e.g., unitless). Then this code can be enabled. -#def _assert_units_complementarity(cdata): +# def _assert_units_complementarity(cdata): # """ # Raise an exception if any units in either of the complementarity # expressions are inconsistent, and also check the standard block @@ -155,6 +182,7 @@ def _assert_units_consistent_expression(expr): # pyomo_unit, pint_unit = units._get_units_tuple(cdata._args[1]) # _assert_units_consistent_block(cdata) + def _assert_units_consistent_block(obj): """ This method gets all the components from the block @@ -164,9 +192,10 @@ def _assert_units_consistent_block(obj): for component in obj.component_objects(descend_into=False, active=True): assert_units_consistent(component) + _component_data_handlers = { Objective: _assert_units_consistent_property_expr, - Constraint: _assert_units_consistent_constraint_data, + Constraint: _assert_units_consistent_constraint_data, Var: _assert_units_consistent_expression, DerivativeVar: _assert_units_consistent_expression, Port: None, @@ -186,13 +215,14 @@ def _assert_units_consistent_block(obj): RangeSet: None, # TODO: Piecewise: _assert_units_consistent_piecewise, # TODO: SOSConstraint: _assert_units_consistent_sos, - # TODO: LogicalConstriant: _assert_units_consistent_logical, + # TODO: LogicalConstraint: _assert_units_consistent_logical, BuildAction: None, BuildCheck: None, # complementarities that are not in normal form are not working yet # see comment in test_check_units # TODO: Complementarity: _assert_units_complementarity - } +} + def assert_units_consistent(obj): """ @@ -218,13 +248,15 @@ def assert_units_consistent(obj): try: _assert_units_consistent_expression(obj) except UnitsError: - print('Units problem with expression {}'.format(obj)) + logger.error('Units problem with expression {}'.format(obj)) raise return # if object is not in our component handler, raise an exception if obj.ctype not in _component_data_handlers: - raise TypeError("Units checking not supported for object of type {}.".format(obj.ctype)) + raise TypeError( + "Units checking not supported for object of type {}.".format(obj.ctype) + ) # get the function form the list of handlers handler = _component_data_handlers[obj.ctype] @@ -237,12 +269,37 @@ def assert_units_consistent(obj): try: handler(cdata) except UnitsError: - print('Error in units when checking {}'.format(cdata)) + logger.error('Error in units when checking {}'.format(cdata)) raise else: + handler(obj) + + +def identify_inconsistent_units(block): + """ + This function generates a ComponentSet of all Constraints, Expressions, and Objectives + in a Block or model which have inconsistent units. + + Parameters + ---------- + block : Pyomo Block or Model to test + + Returns + ------ + ComponentSet : contains all Constraints, Expressions or Objectives which were + identified as having unit consistency issues + """ + # It would be nice (and more efficient) if there were a method that would check + # unit consistency and return a bool for success or failure. + # However, the underlying methods (at least as deep as I looked) all raise exceptions + # so we need to iterate over the block here and do a try/except for each component + + inconsistent_units = ComponentSet() + for obj in block.component_data_objects( + [Constraint, Expression, Objective], descend_into=True + ): try: - handler(obj) + assert_units_consistent(obj) except UnitsError: - print('Error in units when checking {}'.format(obj)) - raise - + inconsistent_units.add(obj) + return inconsistent_units diff --git a/pyomo/util/components.py b/pyomo/util/components.py index 73df9d9cc87..02ef8a30f64 100644 --- a/pyomo/util/components.py +++ b/pyomo/util/components.py @@ -9,6 +9,7 @@ # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ +from pyomo.core.base.reference import Reference from pyomo.common.collections import ComponentMap from pyomo.common.modeling import unique_component_name @@ -42,16 +43,57 @@ def rename_components(model, component_list, prefix): - need to add a check to see if someone accidentally passes a generator since this can lead to an infinite loop """ + # Need to collect any Reference first so that we can record the old mapping of data objects before renaming + refs = {} + for c in component_list: + if c.is_reference(): + refs[c] = {} + for k, v in c._data.items(): + refs[c][k] = (v.parent_block(), v.local_name) + + # Now rename all the non-Reference components name_map = ComponentMap() for c in component_list: # get the parent block - we will use this to ensure new names are # unique and to add the new "scaled" components + if not c.is_reference(): + # Skip References for now + parent = c.parent_block() + old_name = c.name + new_name = unique_component_name(parent, prefix + c.local_name) + parent.del_component(c) + parent.add_component(new_name, c) + name_map[c] = old_name + + # Finally, replace all the References with remapped equivalents + for c in refs: + # Get mapping of new component names + new_map = ComponentMap() + for k, v in refs[c].items(): + # Assume the data was renamed + new_data = v[0].find_component(prefix + v[1]) + + if new_data is None: + # If we couldn't find the new name, try the old one + # This might happen if the reference pointed to something outside + # the scope of the renaming + new_data = v[0].find_component(v[1]) + + if new_data is None: + # If we still haven't found it, give up + raise RuntimeError( + f"Unable to remap Reference {c.name} whilst renaming components." + ) + + new_map[k] = new_data + parent = c.parent_block() old_name = c.name new_name = unique_component_name(parent, prefix + c.local_name) parent.del_component(c) - parent.add_component(new_name, c) - name_map[c] = old_name + cnew = Reference(new_map) + parent.add_component(new_name, cnew) + name_map[cnew] = old_name return name_map diff --git a/pyomo/util/diagnostics.py b/pyomo/util/diagnostics.py index cf3775de4de..d4b7974b9da 100644 --- a/pyomo/util/diagnostics.py +++ b/pyomo/util/diagnostics.py @@ -12,8 +12,16 @@ def log_disjunct_values(m): """Prints the values of the disjunct indicator variables.""" for disj in m.component_data_objects( - ctype=Disjunct, active=True, descend_into=(Block, Disjunct), - descent_order=TraversalStrategy.PrefixDepthFirstSearch + ctype=Disjunct, + active=True, + descend_into=(Block, Disjunct), + descent_order=TraversalStrategy.PrefixDepthFirstSearch, ): - logger.info("%s %s%s" % (disj.name, disj.indicator_var.value, - " fixed" if disj.indicator_var.fixed else "")) + logger.info( + "%s %s%s" + % ( + disj.name, + disj.indicator_var.value, + " fixed" if disj.indicator_var.fixed else "", + ) + ) diff --git a/pyomo/util/infeasible.py b/pyomo/util/infeasible.py index 81b80ad3b7c..9c8196d1ff4 100644 --- a/pyomo/util/infeasible.py +++ b/pyomo/util/infeasible.py @@ -21,169 +21,360 @@ logger = logging.getLogger(__name__) -def log_infeasible_constraints( - m, tol=1E-6, logger=logger, - log_expression=False, log_variables=False -): - """Print the infeasible constraints in the model. - Uses the current model state. Uses pyomo.util.infeasible logger unless one - is provided. +def _check_infeasible(obj, val, tol): + if val is None: + # Undefined value due to missing variable value or evaluation error + return 4 + # Check for infeasibilities + infeasible = 0 + if obj.has_lb(): + lb = value(obj.lower, exception=False) + if lb is None: + infeasible |= 4 | 1 + elif lb - val > tol: + infeasible |= 1 + if obj.has_ub(): + ub = value(obj.upper, exception=False) + if ub is None: + infeasible |= 4 | 2 + elif val - ub > tol: + infeasible |= 2 + return infeasible + + +def find_infeasible_constraints(m, tol=1e-6): + """Find the infeasible constraints in the model. + + Uses the current model state. + + Parameters + ---------- + m: Block + Pyomo block or model to check + + tol: float + absolute feasibility tolerance + + Yields + ------ + constr: ConstraintData + The infeasible constraint object + + body_value: float or None + The numeric value of the constraint body (or None if there was an + error evaluating the expression) - Args: - m (Block): Pyomo block or model to check - tol (float): feasibility tolerance - log_expression (bool): If true, prints the constraint expression - log_variables (bool): If true, prints the constraint variable names and values + infeasible: int + A bitmask indicating which bound was infeasible (1 for the lower + bound, 2 for the upper bound, or 4 if the body or bound was + undefined) """ # Iterate through all active constraints on the model for constr in m.component_data_objects( - ctype=Constraint, active=True, descend_into=True): - constr_body_value = value(constr.body, exception=False) - constr_lb_value = value(constr.lower, exception=False) - constr_ub_value = value(constr.upper, exception=False) - - constr_undefined = False - equality_violated = False - lb_violated = False - ub_violated = False - - if constr_body_value is None: - # Undefined constraint body value due to missing variable value - constr_undefined = True - pass - else: - # Check for infeasibilities - if constr.equality: - if fabs(constr_lb_value - constr_body_value) >= tol: - equality_violated = True - else: - if constr.has_lb() and constr_lb_value - constr_body_value >= tol: - lb_violated = True - if constr.has_ub() and constr_body_value - constr_ub_value >= tol: - ub_violated = True + ctype=Constraint, active=True, descend_into=True + ): + body_value = value(constr.body, exception=False) + infeasible = _check_infeasible(constr, body_value, tol) + if infeasible: + yield constr, body_value, infeasible - if not any((constr_undefined, equality_violated, lb_violated, ub_violated)): - # constraint is fine. skip to next constraint - continue - output_dict = dict(name=constr.name) +def log_infeasible_constraints( + m, tol=1e-6, logger=logger, log_expression=False, log_variables=False +): + """Logs the infeasible constraints in the model. - log_template = "CONSTR {name}: {lb_value}{lb_operator}{body_value}{ub_operator}{ub_value}" - if log_expression: - log_template += "\n - EXPR: {lb_expr}{lb_operator}{body_expr}{ub_operator}{ub_expr}" - if log_variables: - vars_template = "\n - VAR {name}: {value}" - log_template += "{var_printout}" - constraint_vars = identify_variables(constr.body, include_fixed=True) - output_dict['var_printout'] = ''.join( - vars_template.format(name=v.name, value=v.value) for v in constraint_vars) - - output_dict['body_value'] = "missing variable value" if constr_undefined else constr_body_value - output_dict['body_expr'] = constr.body + Uses the current model state. Messages are logged at the INFO level. + + Parameters + ---------- + m: Block + Pyomo block or model to check + + tol: float + absolute feasibility tolerance + + logger: logging.Logger + Logger to output to; defaults to `pyomo.util.infeasible`. + + log_expression: bool + If true, prints the constraint expression + + log_variables: bool + If true, prints the constraint variable names and values + + """ + if logger.getEffectiveLevel() > logging.INFO: + logger.warning( + 'log_infeasible_constraints() called with a logger whose ' + 'effective level is higher than logging.INFO: no output ' + 'will be logged regardless of constraint feasibility' + ) + + for constr, body, infeas in find_infeasible_constraints(m, tol): if constr.equality: - output_dict['lb_value'] = output_dict['lb_expr'] = output_dict['lb_operator'] = "" - output_dict['ub_value'] = constr_ub_value - output_dict['ub_expr'] = constr.upper - if equality_violated: - output_dict['ub_operator'] = " =/= " - elif constr_undefined: - output_dict['ub_operator'] = " =?= " + lb = lb_expr = lb_op = "" + ub_expr = constr.upper + ub = value(ub_expr, exception=False) + if body is None: + ub_op = " =?= " + else: + ub_op = " =/= " else: if constr.has_lb(): - output_dict['lb_value'] = constr_lb_value - output_dict['lb_expr'] = constr.lower - if lb_violated: - output_dict['lb_operator'] = " logging.INFO: + logger.warning( + 'log_infeasible_bounds() called with a logger whose ' + 'effective level is higher than logging.INFO: no output ' + 'will be logged regardless of bound feasibility' + ) + + for var, infeas in find_infeasible_bounds(m, tol): + if infeas & 4: + logger.info(f"VAR {var.name}: {_evaluation_errors[infeas]}.") continue - if var.has_lb() and value(var.lb - var) >= tol: - logger.info('VAR {}: {} >/= LB {}'.format( - var.name, value(var), value(var.lb))) - if var.has_ub() and value(var - var.ub) >= tol: - logger.info('VAR {}: {} logging.INFO: + logger.warning( + 'log_close_to_bounds() called with a logger whose ' + 'effective level is higher than logging.INFO: no output ' + 'will be logged regardless of bound status' + ) + + for obj, val, close in find_close_to_bounds(m, tol): + if not close: + if obj.ctype is Var: + logger.debug(f"Skipping VAR {obj.name} with no assigned value.") + elif obj.ctype is Constraint: + logger.info(f"Skipping CONSTR {obj.name}: evaluation error.") + else: + logger.error(f"Object {obj.name} was neither a Var nor Constraint") + continue + if close & 1: + logger.info(f'{obj.name} near LB of {obj.lb}') + if close & 2: + logger.info(f'{obj.name} near UB of {obj.ub}') + return + + +@deprecated( + "log_active_constraints is deprecated. " + "Please use pyomo.util.blockutil.log_model_constraints()", + version="5.7.3", +) def log_active_constraints(m, logger=logger): log_model_constraints(m, logger) diff --git a/pyomo/util/model_size.py b/pyomo/util/model_size.py index 27ed22ade36..9575e327a74 100644 --- a/pyomo/util/model_size.py +++ b/pyomo/util/model_size.py @@ -14,7 +14,7 @@ from pyomo.common.collections import ComponentSet, Bunch from pyomo.core import Block, Constraint, Var -from pyomo.core.expr import current as EXPR +import pyomo.core.expr as EXPR from pyomo.gdp import Disjunct, Disjunction @@ -42,6 +42,7 @@ class ModelSizeReport(Bunch): Activated disjunctions follow the same rules as activated constraints. """ + pass @@ -62,18 +63,20 @@ def build_model_size_report(model): new_activated_constraints = ComponentSet() for container in new_containers: - (next_activated_disjunctions, - next_fixed_true_disjuncts, - next_activated_disjuncts, - next_activated_constraints - ) = _process_activated_container(container) + ( + next_activated_disjunctions, + next_fixed_true_disjuncts, + next_activated_disjuncts, + next_activated_constraints, + ) = _process_activated_container(container) new_activated_disjunctions.update(next_activated_disjunctions) new_activated_disjuncts.update(next_activated_disjuncts) new_fixed_true_disjuncts.update(next_fixed_true_disjuncts) new_activated_constraints.update(next_activated_constraints) - new_containers = ((new_activated_disjuncts - activated_disjuncts) | - (new_fixed_true_disjuncts - fixed_true_disjuncts)) + new_containers = (new_activated_disjuncts - activated_disjuncts) | ( + new_fixed_true_disjuncts - fixed_true_disjuncts + ) activated_disjunctions.update(new_activated_disjunctions) activated_disjuncts.update(new_activated_disjuncts) @@ -81,57 +84,60 @@ def build_model_size_report(model): activated_constraints.update(new_activated_constraints) activated_vars.update( - var for constr in activated_constraints - for var in EXPR.identify_variables( - constr.body, include_fixed=False)) + var + for constr in activated_constraints + for var in EXPR.identify_variables(constr.body, include_fixed=False) + ) activated_vars.update( - disj.indicator_var.get_associated_binary() - for disj in activated_disjuncts) + disj.indicator_var.get_associated_binary() for disj in activated_disjuncts + ) report.activated = Bunch() report.activated.variables = len(activated_vars) - report.activated.binary_variables = sum( - 1 for v in activated_vars if v.is_binary()) + report.activated.binary_variables = sum(1 for v in activated_vars if v.is_binary()) report.activated.integer_variables = sum( - 1 for v in activated_vars if v.is_integer() and not v.is_binary()) + 1 for v in activated_vars if v.is_integer() and not v.is_binary() + ) report.activated.continuous_variables = sum( - 1 for v in activated_vars if v.is_continuous()) + 1 for v in activated_vars if v.is_continuous() + ) report.activated.disjunctions = len(activated_disjunctions) report.activated.disjuncts = len(activated_disjuncts) report.activated.constraints = len(activated_constraints) report.activated.nonlinear_constraints = sum( - 1 for c in activated_constraints - if c.body.polynomial_degree() not in (1, 0)) + 1 for c in activated_constraints if c.body.polynomial_degree() not in (1, 0) + ) report.overall = Bunch() block_like = (Block, Disjunct) - all_vars = ComponentSet( - model.component_data_objects(Var, descend_into=block_like)) + all_vars = ComponentSet(model.component_data_objects(Var, descend_into=block_like)) report.overall.variables = len(all_vars) report.overall.binary_variables = sum(1 for v in all_vars if v.is_binary()) report.overall.integer_variables = sum( - 1 for v in all_vars if v.is_integer() and not v.is_binary()) - report.overall.continuous_variables = sum( - 1 for v in all_vars if v.is_continuous()) + 1 for v in all_vars if v.is_integer() and not v.is_binary() + ) + report.overall.continuous_variables = sum(1 for v in all_vars if v.is_continuous()) report.overall.disjunctions = sum( - 1 for d in model.component_data_objects( - Disjunction, descend_into=block_like)) + 1 for d in model.component_data_objects(Disjunction, descend_into=block_like) + ) report.overall.disjuncts = sum( - 1 for d in model.component_data_objects( - Disjunct, descend_into=block_like)) + 1 for d in model.component_data_objects(Disjunct, descend_into=block_like) + ) report.overall.constraints = sum( - 1 for c in model.component_data_objects( - Constraint, descend_into=block_like)) + 1 for c in model.component_data_objects(Constraint, descend_into=block_like) + ) report.overall.nonlinear_constraints = sum( - 1 for c in model.component_data_objects( - Constraint, descend_into=block_like) - if c.body.polynomial_degree() not in (1, 0)) + 1 + for c in model.component_data_objects(Constraint, descend_into=block_like) + if c.body.polynomial_degree() not in (1, 0) + ) report.warning = Bunch() report.warning.unassociated_disjuncts = sum( - 1 for d in model.component_data_objects( - Disjunct, descend_into=block_like) - if not d.indicator_var.fixed and d not in activated_disjuncts) + 1 + for d in model.component_data_objects(Disjunct, descend_into=block_like) + if not d.indicator_var.fixed and d not in activated_disjuncts + ) return report @@ -144,24 +150,33 @@ def log_model_size_report(model, logger=default_logger): def _process_activated_container(blk): """Process a container object, returning the new components found.""" new_fixed_true_disjuncts = ComponentSet( - disj for disj in blk.component_data_objects(Disjunct, active=True) - if disj.indicator_var.value and disj.indicator_var.fixed) + disj + for disj in blk.component_data_objects(Disjunct, active=True) + if disj.indicator_var.value and disj.indicator_var.fixed + ) new_activated_disjunctions = ComponentSet( - blk.component_data_objects(Disjunction, active=True)) + blk.component_data_objects(Disjunction, active=True) + ) new_activated_disjuncts = ComponentSet( - disj for disjtn in new_activated_disjunctions - for disj in _activated_disjuncts_in_disjunction(disjtn)) + disj + for disjtn in new_activated_disjunctions + for disj in _activated_disjuncts_in_disjunction(disjtn) + ) new_activated_constraints = ComponentSet( - blk.component_data_objects(Constraint, active=True)) + blk.component_data_objects(Constraint, active=True) + ) return ( new_activated_disjunctions, new_fixed_true_disjuncts, new_activated_disjuncts, - new_activated_constraints + new_activated_constraints, ) def _activated_disjuncts_in_disjunction(disjtn): """Retrieve generator of activated disjuncts on disjunction.""" - return (disj for disj in disjtn.disjuncts - if disj.active and not disj.indicator_var.fixed) + return ( + disj + for disj in disjtn.disjuncts + if disj.active and not disj.indicator_var.fixed + ) diff --git a/pyomo/util/report_scaling.py b/pyomo/util/report_scaling.py index 76a0ee9fc03..5b4a4df7c84 100644 --- a/pyomo/util/report_scaling.py +++ b/pyomo/util/report_scaling.py @@ -68,7 +68,9 @@ def _print_coefficients(comp_map): return s -def _check_coefficents(comp, expr, too_large, too_small, largs_coef_map, small_coef_map): +def _check_coefficients( + comp, expr, too_large, too_small, largs_coef_map, small_coef_map +): ders = reverse_sd(expr) for _v, _der in ders.items(): if isinstance(_v, _GeneralVarData): @@ -87,7 +89,9 @@ def _check_coefficents(comp, expr, too_large, too_small, largs_coef_map, small_c small_coef_map[comp].append((_v, der_lb, der_ub)) -def report_scaling(m: _BlockData, too_large: float = 5e4, too_small: float = 1e-6) -> bool: +def report_scaling( + m: _BlockData, too_large: float = 5e4, too_small: float = 1e-6 +) -> bool: """ This function logs potentially poorly scaled parts of the model. It requires that all variables be bounded. @@ -125,7 +129,14 @@ def report_scaling(m: _BlockData, too_large: float = 5e4, too_small: float = 1e- objs_with_small_coefficients = pyo.ComponentMap() for c in m.component_data_objects(pyo.Constraint, active=True, descend_into=True): - _check_coefficents(c, c.body, too_large, too_small, cons_with_large_coefficients, cons_with_small_coefficients) + _check_coefficients( + c, + c.body, + too_large, + too_small, + cons_with_large_coefficients, + cons_with_small_coefficients, + ) for c in m.component_data_objects(pyo.Constraint, active=True, descend_into=True): c_lb, c_ub = compute_bounds_on_expr(c.body) @@ -134,7 +145,14 @@ def report_scaling(m: _BlockData, too_large: float = 5e4, too_small: float = 1e- cons_with_large_bounds[c] = (c_lb, c_ub) for c in m.component_data_objects(pyo.Objective, active=True, descend_into=True): - _check_coefficents(c, c.expr, too_large, too_small, objs_with_large_coefficients, objs_with_small_coefficients) + _check_coefficients( + c, + c.expr, + too_large, + too_small, + objs_with_large_coefficients, + objs_with_small_coefficients, + ) s = '\n\n' @@ -168,13 +186,15 @@ def report_scaling(m: _BlockData, too_large: float = 5e4, too_small: float = 1e- for c, (c_lb, c_ub) in cons_with_large_bounds.items(): s += f'{c_lb:>12.2e}{c_ub:>12.2e} {str(c)}\n' - if (len(vars_without_bounds) > 0 - or len(vars_with_large_bounds) > 0 - or len(cons_with_large_coefficients) > 0 - or len(cons_with_small_coefficients) > 0 - or len(objs_with_small_coefficients) > 0 - or len(objs_with_large_coefficients) > 0 - or len(cons_with_large_bounds) > 0): + if ( + len(vars_without_bounds) > 0 + or len(vars_with_large_bounds) > 0 + or len(cons_with_large_coefficients) > 0 + or len(cons_with_small_coefficients) > 0 + or len(objs_with_small_coefficients) > 0 + or len(objs_with_large_coefficients) > 0 + or len(cons_with_large_bounds) > 0 + ): logger.info(s) return False return True diff --git a/pyomo/util/slices.py b/pyomo/util/slices.py index 441a480e046..0449acb3f2f 100644 --- a/pyomo/util/slices.py +++ b/pyomo/util/slices.py @@ -14,6 +14,7 @@ from pyomo.core.base.indexed_component_slice import IndexedComponent_slice from pyomo.core.base.global_set import UnindexedComponent_set + def _to_iterable(source): iterable_scalars = (str, bytes) if hasattr(source, '__iter__'): @@ -25,10 +26,11 @@ def _to_iterable(source): else: yield source + def get_component_call_stack(comp, context=None): """Get the call stack necessary to locate a `Component` - The call stack is a `list` of `tuple`s where the first entry is a + The call stack is a `list` of `tuple`s where the first entry is a code for `__getattr__` or `__getitem__`, using the same convention as `IndexedComponent_slice`. The second entry is the argument of the corresponding function. Following this sequence of calls from @@ -54,7 +56,7 @@ def get_component_call_stack(comp, context=None): # A component is not said to exist in "the context of" itself. call_stack = [] while comp.parent_block() is not None: - # If parent_block is None, comp is the root + # If parent_block is None, comp is the root # of the model, so we don't add anything else # to the call stack. if comp is context: @@ -72,25 +74,25 @@ def get_component_call_stack(comp, context=None): break # Add (get_attribute, name) to the call stack - call_stack.append(( - IndexedComponent_slice.get_attribute, - parent_component.local_name - )) + call_stack.append( + (IndexedComponent_slice.get_attribute, parent_component.local_name) + ) comp = comp.parent_block() return call_stack + def slice_component_along_sets(comp, sets, context=None): """Slice a component along the indices corresponding to some sets, wherever they appear in the component's block hierarchy. Given a component or component data object, for all parent components and parent blocks between the object and the `context` block, replace - any index corresponding to a set in `sets` with slices or an + any index corresponding to a set in `sets` with slices or an ellipsis. Parameters: ----------- - comp: `pyomo.core.base.component.Component` or + comp: `pyomo.core.base.component.Component` or `pyomo.core.base.component.ComponentData` Component whose parent structure to search and replace sets: `pyomo.common.collections.ComponentSet` @@ -102,7 +104,7 @@ def slice_component_along_sets(comp, sets, context=None): Returns: -------- `pyomo.core.base.indexed_component_slice.IndexedComponent_slice`: - Slice of `comp` with wildcards replacing the indices of `sets` + Slice of `comp` with wildcards replacing the indices of `sets` """ # Cast to ComponentSet so a tuple or list of sets is an appropriate @@ -136,6 +138,7 @@ def slice_component_along_sets(comp, sets, context=None): return sliced_comp + def replace_indices(index, location_set_map, sets): """Use `location_set_map` to replace values in `index` with slices or an Ellipsis. @@ -183,6 +186,7 @@ def replace_indices(index, location_set_map, sets): loc += 1 return tuple(new_index) + def get_location_set_map(index, index_set): """Map each value in an index to the set from which it originates @@ -226,7 +230,7 @@ def get_location_set_map(index, index_set): # Although in this case, the location of an index should # just be its position in the subsets list, so maybe # the info we need is actually more simple to obtain. - ) + ) subsets = list(index_set.subsets()) @@ -244,8 +248,8 @@ def get_location_set_map(index, index_set): dimen_none_set_coord = sub_coord break for i in range(dimen): - location_set_map[location+i] = sub - locations_left.remove(location+i) + location_set_map[location + i] = sub + locations_left.remove(location + i) location += dimen # We are either done or have encountered a set of dimen None @@ -263,20 +267,20 @@ def get_location_set_map(index, index_set): # Make sure this set is the same one we encountered # earlier. It is sufficient to check the coordinate. raise RuntimeError( - 'Cannot get locations when multiple sets of dimen==None ' - 'are present.' - '\nFound %s at position %s and %s at position %s.' - '\nLocation is ambiguous in this case.' - % (dimen_none_set, dimen_none_set_coord, sub, sub_coord) + 'Cannot get locations when multiple sets of dimen==None ' + 'are present.' + '\nFound %s at position %s and %s at position %s.' + '\nLocation is ambiguous in this case.' + % (dimen_none_set, dimen_none_set_coord, sub, sub_coord) ) break for i in range(dimen): - location_set_map[location-i] = sub - locations_left.remove(location-i) + location_set_map[location - i] = sub + locations_left.remove(location - i) location -= sub.dimen for loc in locations_left: - # All remaining locations, that cannot be accessed from some + # All remaining locations, that cannot be accessed from some # constant offset from the beginning or end of the tuple, # must belong to the dimen-None set. location_set_map[loc] = dimen_none_set diff --git a/pyomo/util/subsystems.py b/pyomo/util/subsystems.py index 079b7f48e62..673781def17 100644 --- a/pyomo/util/subsystems.py +++ b/pyomo/util/subsystems.py @@ -24,7 +24,6 @@ class _ExternalFunctionVisitor(StreamBasedExpressionVisitor): - def initializeWalker(self, expr): self._functions = [] self._seen = set() @@ -57,9 +56,7 @@ def identify_external_functions(expr): def add_local_external_functions(block): ef_exprs = [] - for comp in block.component_data_objects( - (Constraint, Expression), active=True - ): + for comp in block.component_data_objects((Constraint, Expression), active=True): ef_exprs.extend(identify_external_functions(comp.expr)) unique_functions = [] fcn_set = set() @@ -79,7 +76,7 @@ def add_local_external_functions(block): def create_subsystem_block(constraints, variables=None, include_fixed=False): - """ This function creates a block to serve as a subsystem with the + """This function creates a block to serve as a subsystem with the specified variables and constraints. To satisfy certain writers, other variables that appear in the constraints must be added to the block as well. We call these the "input vars." They may be thought of as @@ -120,7 +117,7 @@ def create_subsystem_block(constraints, variables=None, include_fixed=False): def generate_subsystem_blocks(subsystems, include_fixed=False): - """ Generates blocks that contain subsystems of variables and constraints. + """Generates blocks that contain subsystems of variables and constraints. Arguments --------- @@ -144,14 +141,14 @@ def generate_subsystem_blocks(subsystems, include_fixed=False): class TemporarySubsystemManager(object): - """ This class is a context manager for cases when we want to + """This class is a context manager for cases when we want to temporarily fix or deactivate certain variables or constraints in order to perform some solve or calculation with the resulting subsystem. """ - def __init__(self, to_fix=None, to_deactivate=None, to_reset=None): + def __init__(self, to_fix=None, to_deactivate=None, to_reset=None, to_unfix=None): """ Arguments --------- @@ -167,6 +164,10 @@ def __init__(self, to_fix=None, to_deactivate=None, to_reset=None): List of var data objects that should be reset to their original values on exit from this object's context context manager. + to_unfix: List + List of var data objects to be temporarily unfixed. These are + restored to their original status on exit from this object's + context manager. """ if to_fix is None: @@ -175,18 +176,31 @@ def __init__(self, to_fix=None, to_deactivate=None, to_reset=None): to_deactivate = [] if to_reset is None: to_reset = [] + if to_unfix is None: + to_unfix = [] + if not ComponentSet(to_fix).isdisjoint(ComponentSet(to_unfix)): + to_unfix_set = ComponentSet(to_unfix) + both = [var for var in to_fix if var in to_unfix_set] + var_names = "\n" + "\n".join([var.name for var in both]) + raise RuntimeError( + f"Conflicting instructions: The following variables are present" + " in both to_fix and to_unfix lists: {var_names}" + ) self._vars_to_fix = to_fix self._cons_to_deactivate = to_deactivate self._comps_to_set = to_reset + self._vars_to_unfix = to_unfix self._var_was_fixed = None self._con_was_active = None self._comp_original_value = None + self._var_was_unfixed = None def __enter__(self): to_fix = self._vars_to_fix to_deactivate = self._cons_to_deactivate to_set = self._comps_to_set - self._var_was_fixed = [(var, var.fixed) for var in to_fix] + to_unfix = self._vars_to_unfix + self._var_was_fixed = [(var, var.fixed) for var in to_fix + to_unfix] self._con_was_active = [(con, con.active) for con in to_deactivate] self._comp_original_value = [(comp, comp.value) for comp in to_set] @@ -196,11 +210,18 @@ def __enter__(self): for con in self._cons_to_deactivate: con.deactivate() + for var in self._vars_to_unfix: + # As of Pyomo 6.5, attempting to unfix an already unfixed var + # does not raise an exception. Here we rely on this behavior. + var.unfix() + return self def __exit__(self, ex_type, ex_val, ex_bt): for var, was_fixed in self._var_was_fixed: - if not was_fixed: + if was_fixed: + var.fix() + else: var.unfix() for con, was_active in self._con_was_active: if was_active: @@ -210,7 +231,7 @@ def __exit__(self, ex_type, ex_val, ex_bt): class ParamSweeper(TemporarySubsystemManager): - """ This class enables setting values of variables/parameters + """This class enables setting values of variables/parameters according to a provided sequence. Iterating over this object sets values to the next in the sequence, at which point a calculation may be performed and output values compared. @@ -243,14 +264,15 @@ class ParamSweeper(TemporarySubsystemManager): """ - def __init__(self, - n_scenario, - input_values, - output_values=None, - to_fix=None, - to_deactivate=None, - to_reset=None, - ): + def __init__( + self, + n_scenario, + input_values, + output_values=None, + to_fix=None, + to_deactivate=None, + to_reset=None, + ): """ Parameters ---------- @@ -276,7 +298,7 @@ def __init__(self, self.output_values = output self.n_scenario = n_scenario self.initial_state_values = None - self._ip = -1 # Index pointer for iteration + self._ip = -1 # Index pointer for iteration if to_reset is None: # Input values will be set repeatedly by iterating over this @@ -290,10 +312,8 @@ def __init__(self, to_reset.extend(var for var in output) super(ParamSweeper, self).__init__( - to_fix=to_fix, - to_deactivate=to_deactivate, - to_reset=to_reset, - ) + to_fix=to_fix, to_deactivate=to_deactivate, to_reset=to_reset + ) def __iter__(self): return self @@ -317,8 +337,8 @@ def __next__(self): var.set_value(val) inputs[var] = val - outputs = ComponentMap([ - (var, values[i]) for var, values in output_values.items() - ]) + outputs = ComponentMap( + [(var, values[i]) for var, values in output_values.items()] + ) return inputs, outputs diff --git a/pyomo/util/tests/test_blockutil.py b/pyomo/util/tests/test_blockutil.py index fa494a9056d..06b75bd6b68 100644 --- a/pyomo/util/tests/test_blockutil.py +++ b/pyomo/util/tests/test_blockutil.py @@ -18,9 +18,8 @@ from pyomo.common.log import LoggingIntercept from pyomo.environ import ConcreteModel, Constraint, Var, inequality -from pyomo.util.blockutil import ( - log_model_constraints, -) +from pyomo.util.blockutil import log_model_constraints + class TestBlockutil(unittest.TestCase): """Tests block utilities.""" @@ -42,8 +41,8 @@ def test_log_model_constraints(self): m.c10 = Constraint(expr=m.y >= 3, doc="Inactive") m.c10.deactivate() m.c11 = Constraint(expr=m.y <= m.y.value) - m.yy = Var(bounds=(0, 1), initialize=1E-7, doc="Close to lower bound") - m.y3 = Var(bounds=(0, 1E-7), initialize=0, doc="Bounds too close") + m.yy = Var(bounds=(0, 1), initialize=1e-7, doc="Close to lower bound") + m.y3 = Var(bounds=(0, 1e-7), initialize=0, doc="Bounds too close") m.y4 = Var(bounds=(0, 1), initialize=2, doc="Fixed out of bounds.") m.y4.fix() @@ -51,8 +50,15 @@ def test_log_model_constraints(self): with LoggingIntercept(output, 'pyomo.util', logging.INFO): log_model_constraints(m) expected_output = [ - "c1 active", "c2 active", "c3 active", "c4 active", - "c5 active", "c6 active", "c7 active", "c8 active", - "c9 active", "c11 active" + "c1 active", + "c2 active", + "c3 active", + "c4 active", + "c5 active", + "c6 active", + "c7 active", + "c8 active", + "c9 active", + "c11 active", ] self.assertEqual(expected_output, output.getvalue().splitlines()) diff --git a/pyomo/util/tests/test_calc_var_value.py b/pyomo/util/tests/test_calc_var_value.py index f7bdf863f7c..91f23dd5a5d 100644 --- a/pyomo/util/tests/test_calc_var_value.py +++ b/pyomo/util/tests/test_calc_var_value.py @@ -14,13 +14,38 @@ import pyomo.common.unittest as unittest +from pyomo.common.errors import IterationLimitError from pyomo.common.log import LoggingIntercept from pyomo.environ import ( - ConcreteModel, Var, Constraint, Param, value, exp, NonNegativeReals, + ConcreteModel, + Var, + Constraint, + Param, + ExternalFunction, + value, + exp, + NonNegativeReals, Binary, ) from pyomo.util.calc_var_value import calculate_variable_from_constraint from pyomo.core.expr.calculus.diff_with_sympy import differentiate_available +from pyomo.core.expr.calculus.derivatives import differentiate +from pyomo.core.expr.sympy_tools import sympy_available + + +all_diff_modes = [ + differentiate.Modes.sympy, + differentiate.Modes.reverse_symbolic, + differentiate.Modes.reverse_numeric, +] + + +def sum_sq(args, fixed, fgh): + f = sum(arg**2 for arg in args) + g = [2 * arg for arg in args] + h = None + return f, g, h + class Test_calc_var(unittest.TestCase): def test_initialize_value(self): @@ -29,69 +54,85 @@ def test_initialize_value(self): m.y = Var(initialize=0) m.c = Constraint(expr=m.x == 5) - m.x.set_value(None) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 5) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 5) - m.x.set_value(None) m.x.setlb(3) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 5) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 5) - m.x.set_value(None) m.x.setlb(-10) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 5) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 5) - m.x.set_value(None) m.x.setub(10) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 5) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 5) - m.x.set_value(None) m.x.setlb(3) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 5) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 5) - m.x.set_value(None) m.x.setlb(None) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 5) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 5) - m.x.set_value(None) m.x.setub(-10) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 5) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 5) m.lt = Constraint(expr=m.x <= m.y) with self.assertRaisesRegex( - ValueError, "Constraint must be an equality constraint"): + ValueError, "Constraint 'lt' must be an equality constraint" + ): calculate_variable_from_constraint(m.x, m.lt) def test_linear(self): m = ConcreteModel() m.x = Var() - m.c = Constraint(expr=5*m.x == 10) + m.c = Constraint(expr=5 * m.x == 10) - calculate_variable_from_constraint(m.x, m.c) - self.assertEqual(value(m.x), 2) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) + self.assertEqual(value(m.x), 2) def test_constraint_as_tuple(self): m = ConcreteModel() m.x = Var() m.p = Param(initialize=15, mutable=True) - calculate_variable_from_constraint(m.x, 5*m.x == 5) - self.assertEqual(value(m.x), 1) - calculate_variable_from_constraint(m.x, (5*m.x, 10)) - self.assertEqual(value(m.x), 2) - calculate_variable_from_constraint(m.x, (15, 5*m.x, m.p)) - self.assertEqual(value(m.x), 3) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, 5 * m.x == 5, diff_mode=mode) + self.assertEqual(value(m.x), 1) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, (5 * m.x, 10), diff_mode=mode) + self.assertEqual(value(m.x), 2) + for mode in all_diff_modes: + m.x.set_value(None) + calculate_variable_from_constraint(m.x, (15, 5 * m.x, m.p), diff_mode=mode) + self.assertEqual(value(m.x), 3) with self.assertRaisesRegex( - ValueError, "Constraint 'tuple' is a Ranged Inequality " - "with a variable upper bound."): - calculate_variable_from_constraint(m.x, (15, 5*m.x, m.x)) - + ValueError, + "Constraint 'tuple' is a Ranged Inequality with a variable upper bound.", + ): + calculate_variable_from_constraint(m.x, (15, 5 * m.x, m.x)) @unittest.skipIf(not differentiate_available, "this test requires sympy") def test_nonlinear(self): @@ -100,62 +141,98 @@ def test_nonlinear(self): m.y = Var(initialize=0) m.c = Constraint(expr=m.x**2 == 16) - m.x.set_value(1.0) # set an initial value - calculate_variable_from_constraint(m.x, m.c, linesearch=False) - self.assertAlmostEqual(value(m.x), 4) + for mode in all_diff_modes: + m.x.set_value(1.0) # set an initial value + calculate_variable_from_constraint( + m.x, m.c, linesearch=False, diff_mode=mode + ) + self.assertEqual(value(m.x), 4) # test that infeasible constraint throws error m.d = Constraint(expr=m.x**2 == -1) - m.x.set_value(1.25) # set the initial value - with self.assertRaisesRegex( - RuntimeError, r'Iteration limit \(10\) reached'): - calculate_variable_from_constraint( - m.x, m.d, iterlim=10, linesearch=False) + for mode in all_diff_modes: + m.x.set_value(1.25) # set the initial value + with self.assertRaisesRegex( + IterationLimitError, r'Iteration limit \(10\) reached' + ): + calculate_variable_from_constraint( + m.x, m.d, iterlim=10, linesearch=False, diff_mode=mode + ) # same problem should throw a linesearch error if linesearch is on - m.x.set_value(1.25) # set the initial value - with self.assertRaisesRegex( - RuntimeError, "Linesearch iteration limit reached"): - calculate_variable_from_constraint( - m.x, m.d, iterlim=10, linesearch=True) + for mode in all_diff_modes: + m.x.set_value(1.25) # set the initial value + with self.assertRaisesRegex( + IterationLimitError, "Linesearch iteration limit reached" + ): + calculate_variable_from_constraint( + m.x, m.d, iterlim=10, linesearch=True, diff_mode=mode + ) # same problem should raise an error if initialized at 0 - m.x = 0 - with self.assertRaisesRegex( - RuntimeError, "Initial value for variable results in a " - "derivative value that is very close to zero."): - calculate_variable_from_constraint(m.x, m.c) + for mode in all_diff_modes: + m.x = 0 + with self.assertRaisesRegex( + ValueError, + "Initial value for variable 'x' results in a " + "derivative value for constraint 'c' that is very close to zero.", + ): + calculate_variable_from_constraint(m.x, m.c, diff_mode=mode) # same problem should raise a value error if we are asked to # solve for a variable that is not present - with self.assertRaisesRegex( - ValueError, "Variable derivative == 0"): - calculate_variable_from_constraint(m.y, m.c) - + for mode in all_diff_modes: + if mode == differentiate.Modes.reverse_numeric: + # numeric differentiation should not be used to check if a + # derivative is always zero + with self.assertRaisesRegex( + ValueError, + "Initial value for variable 'y' results in a " + "derivative value for constraint 'c' that is very close to zero.", + ): + calculate_variable_from_constraint(m.y, m.c, diff_mode=mode) + else: + with self.assertRaisesRegex( + ValueError, "Variable 'y' derivative == 0 in constraint 'c'" + ): + calculate_variable_from_constraint(m.y, m.c, diff_mode=mode) # should succeed with or without a linesearch - m.e = Constraint(expr=(m.x - 2.0)**2 - 1 == 0) - m.x.set_value(3.1) - calculate_variable_from_constraint(m.x, m.e, linesearch=False) - self.assertAlmostEqual(value(m.x), 3) - - m.x.set_value(3.1) - calculate_variable_from_constraint(m.x, m.e, linesearch=True) - self.assertAlmostEqual(value(m.x), 3) - + m.e = Constraint(expr=(m.x - 2.0) ** 2 - 1 == 0) + for mode in all_diff_modes: + m.x.set_value(3.1) + calculate_variable_from_constraint( + m.x, m.e, linesearch=False, diff_mode=mode + ) + self.assertAlmostEqual(value(m.x), 3) + + for mode in all_diff_modes: + m.x.set_value(3.1) + calculate_variable_from_constraint( + m.x, m.e, linesearch=True, diff_mode=mode + ) + self.assertAlmostEqual(value(m.x), 3) # we expect this to succeed with the linesearch - m.f = Constraint(expr=1.0/(1.0+exp(-m.x))-0.5 == 0) - m.x.set_value(3.0) - calculate_variable_from_constraint(m.x, m.f, linesearch=True) - self.assertAlmostEqual(value(m.x), 0) + m.f = Constraint(expr=1.0 / (1.0 + exp(-m.x)) - 0.5 == 0) + for mode in all_diff_modes: + m.x.set_value(3.0) + calculate_variable_from_constraint( + m.x, m.f, linesearch=True, diff_mode=mode + ) + self.assertAlmostEqual(value(m.x), 0) # we expect this to fail without a linesearch - m.x.set_value(3.0) - with self.assertRaisesRegex( - RuntimeError, "Newton's method encountered a derivative " - "that was too close to zero"): - calculate_variable_from_constraint(m.x, m.f, linesearch=False) + for mode in all_diff_modes: + m.x.set_value(3.0) + with self.assertRaisesRegex( + RuntimeError, + "Newton's method encountered a derivative of constraint 'f' " + "with respect to variable 'x' that was too close to zero", + ): + calculate_variable_from_constraint( + m.x, m.f, linesearch=False, diff_mode=mode + ) # Calculate the bubble point of Benzene. THe first step # computed by calculate_variable_from_constraint will make the @@ -165,20 +242,26 @@ def test_nonlinear(self): m.x = Var() m.pc = 48.9e5 m.tc = 562.2 - m.psc = {'A': -6.98273, - 'B': 1.33213, - 'C': -2.62863, - 'D': -3.33399, - } + m.psc = {'A': -6.98273, 'B': 1.33213, 'C': -2.62863, 'D': -3.33399} m.p = 101325 + @m.Constraint() def f(m): - return m.pc * \ - exp((m.psc['A'] * (1 - m.x / m.tc) + - m.psc['B'] * (1 - m.x / m.tc)**1.5 + - m.psc['C'] * (1 - m.x / m.tc)**3 + - m.psc['D'] * (1 - m.x / m.tc)**6 - ) / (1 - (1 - m.x / m.tc))) - m.p == 0 + return ( + m.pc + * exp( + ( + m.psc['A'] * (1 - m.x / m.tc) + + m.psc['B'] * (1 - m.x / m.tc) ** 1.5 + + m.psc['C'] * (1 - m.x / m.tc) ** 3 + + m.psc['D'] * (1 - m.x / m.tc) ** 6 + ) + / (1 - (1 - m.x / m.tc)) + ) + - m.p + == 0 + ) + m.x.set_value(298.15) calculate_variable_from_constraint(m.x, m.f, linesearch=False) self.assertAlmostEqual(value(m.x), 353.31855602) @@ -193,19 +276,21 @@ def f(m): with LoggingIntercept(output, 'pyomo', logging.WARNING): with self.assertRaises(TypeError): calculate_variable_from_constraint(m.x, m.f, linesearch=False) - self.assertIn('Encountered an error evaluating the expression ' - 'at the initial guess', output.getvalue()) + self.assertIn( + 'Encountered an error evaluating the expression at the initial guess', + output.getvalue(), + ) # This example triggers an expression evaluation error if the # linesearch is turned off because the first step in Newton's # method will cause the LHS to become complex m = ConcreteModel() m.x = Var() - m.c = Constraint(expr=(1/m.x**3)**0.5 == 100) - m.x = .1 + m.c = Constraint(expr=(1 / m.x**3) ** 0.5 == 100) + m.x = 0.1 calculate_variable_from_constraint(m.x, m.c, linesearch=True) self.assertAlmostEqual(value(m.x), 0.046415888) - m.x = .1 + m.x = 0.1 output = StringIO() with LoggingIntercept(output, 'pyomo', logging.WARNING): with self.assertRaises(ValueError): @@ -216,8 +301,11 @@ def f(m): # raising a generic ValueError in # calculate_variable_from_constraint calculate_variable_from_constraint(m.x, m.c, linesearch=False) - self.assertIn("Newton's method encountered an error evaluating " - "the expression.", output.getvalue()) + self.assertIn( + "Newton's method encountered an error evaluating the expression for " + "constraint 'c'.", + output.getvalue(), + ) # This is a completely contrived example where the linesearch # hits the iteration limit before Newton's method ever finds a @@ -225,12 +313,13 @@ def f(m): m = ConcreteModel() m.x = Var() m.c = Constraint(expr=m.x**0.5 == -1e-8) - m.x = 1e-8#197.932807183 + m.x = 1e-8 # 197.932807183 with self.assertRaisesRegex( - RuntimeError, "Linesearch iteration limit reached; " - "remaining residual = {function evaluation error}"): - calculate_variable_from_constraint(m.x, m.c, linesearch=True, - alpha_min=.5) + IterationLimitError, + "Linesearch iteration limit reached solving for variable 'x' using " + "constraint 'c'; remaining residual = {function evaluation error}", + ): + calculate_variable_from_constraint(m.x, m.c, linesearch=True, alpha_min=0.5) def test_bound_violation(self): # Test Issue #2176: solving a constraint where the intermediate @@ -240,20 +329,24 @@ def test_bound_violation(self): m.c1 = Constraint(expr=m.v1 == 0) # Calculate value of v1 using constraint c1 - calculate_variable_from_constraint(m.v1, m.c1) - self.assertEqual(value(m.v1), 0) + for mode in all_diff_modes: + m.v1.set_value(None) + calculate_variable_from_constraint(m.v1, m.c1, diff_mode=mode) + self.assertEqual(value(m.v1), 0) # Calculate value of v1 using a scaled constraint c2 - m.c2 = Constraint(expr=m.v1*10 == 0) - m.v1.set_value(1) - calculate_variable_from_constraint(m.v1, m.c2) - self.assertEqual(value(m.v1), 0) + m.c2 = Constraint(expr=m.v1 * 10 == 0) + for mode in all_diff_modes: + m.v1.set_value(1) + calculate_variable_from_constraint(m.v1, m.c2, diff_mode=mode) + self.assertEqual(value(m.v1), 0) # Test linear solution falling outside bounds - m.c3 = Constraint(expr=m.v1*10 == -1) - m.v1.set_value(1) - calculate_variable_from_constraint(m.v1, m.c3) - self.assertEqual(value(m.v1), -0.1) + m.c3 = Constraint(expr=m.v1 * 10 == -1) + for mode in all_diff_modes: + m.v1.set_value(1) + calculate_variable_from_constraint(m.v1, m.c3, diff_mode=mode) + self.assertEqual(value(m.v1), -0.1) @unittest.skipUnless(differentiate_available, "this test requires sympy") def test_nonlinear_bound_violation(self): @@ -263,44 +356,46 @@ def test_nonlinear_bound_violation(self): # Test nonlinear solution falling outside bounds m.c4 = Constraint(expr=m.v1**3 == -8) - m.v1.set_value(1) - calculate_variable_from_constraint(m.v1, m.c4) - self.assertEqual(value(m.v1), -2) + for mode in all_diff_modes: + m.v1.set_value(1) + calculate_variable_from_constraint(m.v1, m.c4, diff_mode=mode) + self.assertEqual(value(m.v1), -2) def test_warn_final_value_linear(self): m = ConcreteModel() - m.x = Var(bounds=(0,1)) + m.x = Var(bounds=(0, 1)) m.c1 = Constraint(expr=m.x == 10) - m.c2 = Constraint(expr=5*m.x == 10) + m.c2 = Constraint(expr=5 * m.x == 10) with LoggingIntercept() as LOG: calculate_variable_from_constraint(m.x, m.c1) self.assertEqual( LOG.getvalue().strip(), - "Setting Var 'x' to a numeric value `10` outside the " - "bounds (0, 1).") + "Setting Var 'x' to a numeric value `10` outside the bounds (0, 1).", + ) self.assertEqual(value(m.x), 10) with LoggingIntercept() as LOG: calculate_variable_from_constraint(m.x, m.c2) self.assertEqual( LOG.getvalue().strip(), - "Setting Var 'x' to a numeric value `2.0` outside the " - "bounds (0, 1).") + "Setting Var 'x' to a numeric value `2.0` outside the bounds (0, 1).", + ) self.assertEqual(value(m.x), 2) @unittest.skipUnless(differentiate_available, "this test requires sympy") def test_warn_final_value_nonlinear(self): m = ConcreteModel() - m.x = Var(bounds=(0,1)) - m.c3 = Constraint(expr=(m.x - 3.5)**2 == 0) + m.x = Var(bounds=(0, 1)) + m.c3 = Constraint(expr=(m.x - 3.5) ** 2 == 0) with LoggingIntercept() as LOG: calculate_variable_from_constraint(m.x, m.c3) self.assertRegex( LOG.getvalue().strip(), r"Setting Var 'x' to a numeric value `[0-9\.]+` outside the " - r"bounds \(0, 1\).") + r"bounds \(0, 1\).", + ) self.assertAlmostEqual(value(m.x), 3.5, 3) m.x.domain = Binary @@ -308,6 +403,50 @@ def test_warn_final_value_nonlinear(self): calculate_variable_from_constraint(m.x, m.c3) self.assertRegex( LOG.getvalue().strip(), - r"Setting Var 'x' to a value `[0-9\.]+` \(float\) not in " - "domain Binary.") + r"Setting Var 'x' to a value `[0-9\.]+` \(float\) not in domain Binary.", + ) self.assertAlmostEqual(value(m.x), 3.5, 3) + + @unittest.skipUnless(differentiate_available, "this test requires sympy") + def test_nonlinear_overflow(self): + # Regression check to make sure calculate_variable_from_constraint + # can handle extreme non-linear cases where assuming linear behaviour + # results in OverflowErrors + m = ConcreteModel() + m.x = Var(initialize=1) + m.c = Constraint(expr=exp(1e2 * m.x**2) == 100) + + calculate_variable_from_constraint(m.x, m.c) + + self.assertAlmostEqual(value(m.x), 0.214597, 5) + + def test_external_function(self): + m = ConcreteModel() + m.x = Var() + m.sq = ExternalFunction(fgh=sum_sq) + m.c = Constraint(expr=m.sq(m.x - 3) == 0) + + with LoggingIntercept(level=logging.DEBUG) as LOG: + calculate_variable_from_constraint(m.x, m.c) + self.assertAlmostEqual(value(m.x), 3, 3) + self.assertEqual( + LOG.getvalue(), + "Calculating symbolic derivative of expression failed. " + "Reverting to numeric differentiation\n", + ) + + @unittest.skipUnless(sympy_available, 'test expects that sympy is available') + def test_external_function_explicit_sympy(self): + m = ConcreteModel() + m.x = Var() + m.sq = ExternalFunction(fgh=sum_sq) + m.c = Constraint(expr=m.sq(m.x - 3) == 0) + + with self.assertRaisesRegex( + TypeError, + r"Expressions containing external functions are not convertible " + r"to sympy expressions \(found 'f\(x0 - 3", + ): + calculate_variable_from_constraint( + m.x, m.c, diff_mode=differentiate.Modes.sympy + ) diff --git a/pyomo/util/tests/test_check_units.py b/pyomo/util/tests/test_check_units.py index 4637c995782..d2fb35c4f3b 100644 --- a/pyomo/util/tests/test_check_units.py +++ b/pyomo/util/tests/test_check_units.py @@ -14,73 +14,104 @@ import pyomo.common.unittest as unittest from pyomo.environ import ( - ConcreteModel, Var, Param, Set, Constraint, Objective, Expression, - Suffix, RangeSet, ExternalFunction, units, maximize, sin, cos, sqrt, + ConcreteModel, + Var, + Param, + Set, + Constraint, + Objective, + Expression, + Suffix, + RangeSet, + ExternalFunction, + units, + maximize, + sin, + cos, + sqrt, ) from pyomo.network import Port, Arc from pyomo.dae import ContinuousSet, DerivativeVar from pyomo.gdp import Disjunct, Disjunction -from pyomo.core.base.units_container import ( - pint_available, UnitsError, +from pyomo.core.base.units_container import pint_available, UnitsError +from pyomo.util.check_units import ( + assert_units_consistent, + assert_units_equivalent, + check_units_equivalent, + identify_inconsistent_units, ) -from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent, check_units_equivalent + def python_callback_function(arg1, arg2): return 42.0 -@unittest.skipIf(not pint_available, 'Testing units requires pint') + +@unittest.skipIf(not pint_available, "Testing units requires pint") class TestUnitsChecking(unittest.TestCase): def _create_model_and_vars(self): u = units m = ConcreteModel() m.dx = Var(units=u.m, initialize=0.10188943773836046) m.dy = Var(units=u.m, initialize=0.0) - m.vx = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.vy = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.t = Var(units=u.s, bounds=(1e-5,10.0), initialize=0.0024015570927624456) - m.theta = Var(bounds=(0, 0.49*3.14), initialize=0.7853981693583533, units=u.radians) - m.a = Param(initialize=-32.2, units=u.ft/u.s**2) + m.vx = Var(units=u.m / u.s, initialize=0.7071067769802851) + m.vy = Var(units=u.m / u.s, initialize=0.7071067769802851) + m.t = Var(units=u.s, bounds=(1e-5, 10.0), initialize=0.0024015570927624456) + m.theta = Var( + bounds=(0, 0.49 * 3.14), initialize=0.7853981693583533, units=u.radians + ) + m.a = Param(initialize=-32.2, units=u.ft / u.s**2) m.x_unitless = Var() return m - + def test_assert_units_consistent_equivalent(self): u = units m = ConcreteModel() m.dx = Var(units=u.m, initialize=0.10188943773836046) m.dy = Var(units=u.m, initialize=0.0) - m.vx = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.vy = Var(units=u.m/u.s, initialize=0.7071067769802851) - m.t = Var(units=u.min, bounds=(1e-5,10.0), initialize=0.0024015570927624456) - m.theta = Var(bounds=(0, 0.49*3.14), initialize=0.7853981693583533, units=u.radians) - m.a = Param(initialize=-32.2, units=u.ft/u.s**2) + m.vx = Var(units=u.m / u.s, initialize=0.7071067769802851) + m.vy = Var(units=u.m / u.s, initialize=0.7071067769802851) + m.t = Var(units=u.min, bounds=(1e-5, 10.0), initialize=0.0024015570927624456) + m.theta = Var( + bounds=(0, 0.49 * 3.14), initialize=0.7853981693583533, units=u.radians + ) + m.a = Param(initialize=-32.2, units=u.ft / u.s**2) m.x_unitless = Var() - m.obj = Objective(expr = m.dx, sense=maximize) - m.vx_con = Constraint(expr = m.vx == 1.0*u.m/u.s*cos(m.theta)) - m.vy_con = Constraint(expr = m.vy == 1.0*u.m/u.s*sin(m.theta)) - m.dx_con = Constraint(expr = m.dx == m.vx*u.convert(m.t, to_units=u.s)) - m.dy_con = Constraint(expr = m.dy == m.vy*u.convert(m.t, to_units=u.s) - + 0.5*(u.convert(m.a, to_units=u.m/u.s**2))*(u.convert(m.t, to_units=u.s))**2) - m.ground = Constraint(expr = m.dy == 0) - m.unitless_con = Constraint(expr = m.x_unitless == 5.0) - - assert_units_consistent(m) # check model - assert_units_consistent(m.dx) # check var - this should never fail - assert_units_consistent(m.x_unitless) # check unitless var - this should never fail - assert_units_consistent(m.vx_con) # check constraint - assert_units_consistent(m.unitless_con) # check unitless constraint - - assert_units_equivalent(m.dx, m.dy) # check var - assert_units_equivalent(m.x_unitless, u.dimensionless) # check unitless var - assert_units_equivalent(m.x_unitless, None) # check unitless var - assert_units_equivalent(m.vx_con.body, u.m/u.s) # check constraint - assert_units_equivalent(m.unitless_con.body, u.dimensionless) # check unitless constraint - assert_units_equivalent(m.dx, m.dy) # check var - assert_units_equivalent(m.x_unitless, u.dimensionless) # check unitless var - assert_units_equivalent(m.x_unitless, None) # check unitless var - assert_units_equivalent(m.vx_con.body, u.m/u.s) # check constraint - - m.broken = Constraint(expr = m.dy == 42.0*u.kg) + m.obj = Objective(expr=m.dx, sense=maximize) + m.vx_con = Constraint(expr=m.vx == 1.0 * u.m / u.s * cos(m.theta)) + m.vy_con = Constraint(expr=m.vy == 1.0 * u.m / u.s * sin(m.theta)) + m.dx_con = Constraint(expr=m.dx == m.vx * u.convert(m.t, to_units=u.s)) + m.dy_con = Constraint( + expr=m.dy + == m.vy * u.convert(m.t, to_units=u.s) + + 0.5 + * (u.convert(m.a, to_units=u.m / u.s**2)) + * (u.convert(m.t, to_units=u.s)) ** 2 + ) + m.ground = Constraint(expr=m.dy == 0) + m.unitless_con = Constraint(expr=m.x_unitless == 5.0) + + assert_units_consistent(m) # check model + assert_units_consistent(m.dx) # check var - this should never fail + assert_units_consistent( + m.x_unitless + ) # check unitless var - this should never fail + assert_units_consistent(m.vx_con) # check constraint + assert_units_consistent(m.unitless_con) # check unitless constraint + + assert_units_equivalent(m.dx, m.dy) # check var + assert_units_equivalent(m.x_unitless, u.dimensionless) # check unitless var + assert_units_equivalent(m.x_unitless, None) # check unitless var + assert_units_equivalent(m.vx_con.body, u.m / u.s) # check constraint + assert_units_equivalent( + m.unitless_con.body, u.dimensionless + ) # check unitless constraint + assert_units_equivalent(m.dx, m.dy) # check var + assert_units_equivalent(m.x_unitless, u.dimensionless) # check unitless var + assert_units_equivalent(m.x_unitless, None) # check unitless var + assert_units_equivalent(m.vx_con.body, u.m / u.s) # check constraint + + m.broken = Constraint(expr=m.dy == 42.0 * u.kg) with self.assertRaises(UnitsError): assert_units_consistent(m) assert_units_consistent(m.dx) @@ -94,48 +125,55 @@ def test_assert_units_consistent_equivalent(self): def test_assert_units_consistent_on_datas(self): u = units m = ConcreteModel() - m.S = Set(initialize=[1,2,3]) + m.S = Set(initialize=[1, 2, 3]) m.x = Var(m.S, units=u.m) m.t = Var(m.S, units=u.s) - m.v = Var(m.S, units=u.m/u.s) + m.v = Var(m.S, units=u.m / u.s) m.unitless = Var(m.S) @m.Constraint(m.S) - def vel_con(m,i): - return m.v[i] == m.x[i]/m.t[i] + def vel_con(m, i): + return m.v[i] == m.x[i] / m.t[i] + @m.Constraint(m.S) - def unitless_con(m,i): + def unitless_con(m, i): return m.unitless[i] == 42.0 + @m.Constraint(m.S) - def sqrt_con(m,i): - return sqrt(m.v[i]) == sqrt(m.x[i]/m.t[i]) + def sqrt_con(m, i): + return sqrt(m.v[i]) == sqrt(m.x[i] / m.t[i]) assert_units_consistent(m) # check model assert_units_consistent(m.x) # check var assert_units_consistent(m.t) # check var assert_units_consistent(m.v) # check var assert_units_consistent(m.unitless) # check var - assert_units_consistent(m.vel_con) # check constraint - assert_units_consistent(m.unitless_con) # check unitless constraint + assert_units_consistent(m.vel_con) # check constraint + assert_units_consistent(m.unitless_con) # check unitless constraint assert_units_consistent(m.x[2]) # check var data assert_units_consistent(m.t[2]) # check var data assert_units_consistent(m.v[2]) # check var data assert_units_consistent(m.unitless[2]) # check var - assert_units_consistent(m.vel_con[2]) # check constraint data - assert_units_consistent(m.unitless_con[2]) # check unitless constraint data + assert_units_consistent(m.vel_con[2]) # check constraint data + assert_units_consistent(m.unitless_con[2]) # check unitless constraint data assert_units_equivalent(m.x[2], m.x[1]) # check var data assert_units_equivalent(m.t[2], u.s) # check var data - assert_units_equivalent(m.v[2], u.m/u.s) # check var data - assert_units_equivalent(m.unitless[2], u.dimensionless) # check var data unitless + assert_units_equivalent(m.v[2], u.m / u.s) # check var data + assert_units_equivalent( + m.unitless[2], u.dimensionless + ) # check var data unitless assert_units_equivalent(m.unitless[2], None) # check var - assert_units_equivalent(m.vel_con[2].body, u.m/u.s) # check constraint data - assert_units_equivalent(m.unitless_con[2].body, u.dimensionless) # check unitless constraint data + assert_units_equivalent(m.vel_con[2].body, u.m / u.s) # check constraint data + assert_units_equivalent( + m.unitless_con[2].body, u.dimensionless + ) # check unitless constraint data @m.Constraint(m.S) - def broken(m,i): - return m.x[i] == 42.0*m.v[i] + def broken(m, i): + return m.x[i] == 42.0 * m.v[i] + with self.assertRaises(UnitsError): assert_units_consistent(m) with self.assertRaises(UnitsError): @@ -148,52 +186,59 @@ def broken(m,i): assert_units_consistent(m.t) # check var assert_units_consistent(m.v) # check var assert_units_consistent(m.unitless) # check var - assert_units_consistent(m.vel_con) # check constraint - assert_units_consistent(m.unitless_con) # check unitless constraint + assert_units_consistent(m.vel_con) # check constraint + assert_units_consistent(m.unitless_con) # check unitless constraint assert_units_consistent(m.x[2]) # check var data assert_units_consistent(m.t[2]) # check var data assert_units_consistent(m.v[2]) # check var data assert_units_consistent(m.unitless[2]) # check var - assert_units_consistent(m.vel_con[2]) # check constraint data - assert_units_consistent(m.unitless_con[2]) # check unitless constraint data + assert_units_consistent(m.vel_con[2]) # check constraint data + assert_units_consistent(m.unitless_con[2]) # check unitless constraint data def test_assert_units_consistent_all_components(self): # test all scalar components consistent u = units m = self._create_model_and_vars() - m.obj = Objective(expr=m.dx/m.t - m.vx) - m.con = Constraint(expr=m.dx/m.t == m.vx) + m.obj = Objective(expr=m.dx / m.t - m.vx) + m.con = Constraint(expr=m.dx / m.t == m.vx) # vars already added - m.exp = Expression(expr=m.dx/m.t - m.vx) + m.exp = Expression(expr=m.dx / m.t - m.vx) m.suff = Suffix(direction=Suffix.LOCAL) # params already added # sets already added m.rs = RangeSet(5) m.disj1 = Disjunct() - m.disj1.constraint = Constraint(expr=m.dx/m.t <= m.vx) + m.disj1.constraint = Constraint(expr=m.dx / m.t <= m.vx) m.disj2 = Disjunct() - m.disj2.constraint = Constraint(expr=m.dx/m.t <= m.vx) + m.disj2.constraint = Constraint(expr=m.dx / m.t <= m.vx) m.disjn = Disjunction(expr=[m.disj1, m.disj2]) # block tested as part of model - m.extfn = ExternalFunction(python_callback_function, units=u.m/u.s, arg_units=[u.m, u.s]) - m.conext = Constraint(expr=m.extfn(m.dx, m.t) - m.vx==0) - m.cset = ContinuousSet(bounds=(0,1)) + m.extfn = ExternalFunction( + python_callback_function, units=u.m / u.s, arg_units=[u.m, u.s] + ) + m.conext = Constraint(expr=m.extfn(m.dx, m.t) - m.vx == 0) + m.cset = ContinuousSet(bounds=(0, 1)) m.svar = Var(m.cset, units=u.m) - m.dvar = DerivativeVar(sVar=m.svar, units=u.m/u.s) + m.dvar = DerivativeVar(sVar=m.svar, units=u.m / u.s) + def prt1_rule(m): - return {'avar': m.dx} + return {"avar": m.dx} + def prt2_rule(m): - return {'avar': m.dy} + return {"avar": m.dy} + m.prt1 = Port(rule=prt1_rule) m.prt2 = Port(rule=prt2_rule) + def arcrule(m): return dict(source=m.prt1, destination=m.prt2) + m.arc = Arc(rule=arcrule) # complementarities do not work yet # The expression system removes the u.m since it is multiplied by zero. - # We need to change the units_container to allow 0 when comparing units + # We need to change the units_container to allow 0 when comparing units # m.compl = Complementarity(expr=complements(m.dx/m.t >= m.vx, m.dx == 0*u.m)) assert_units_consistent(m) @@ -201,27 +246,75 @@ def arcrule(m): def test_units_roundoff_error(self): # Issue 2393: this example resulted in roundoff error where the # computed units for var_1 were - #(0.9999999999999986, + # (0.9999999999999986, # - #) + # ) m = ConcreteModel() m.var_1 = Var( initialize=400, - units=((units.J**0.4) * - (units.kg**0.2) * - (units.W**0.6) / - units.K / - (units.m**2.2) / - (units.Pa**0.2) / - (units.s**0.8)) + units=( + (units.J**0.4) + * (units.kg**0.2) + * (units.W**0.6) + / units.K + / (units.m**2.2) + / (units.Pa**0.2) + / (units.s**0.8) + ), ) m.var_1.fix() - m.var_2 = Var( - initialize=400, - units=units.kg/units.s**3/units.K - ) + m.var_2 = Var(initialize=400, units=units.kg / units.s**3 / units.K) assert_units_equivalent(m.var_1, m.var_2) + def test_identify_inconsistent_units(self): + u = units + m = ConcreteModel() + m.S = Set(initialize=[1, 2, 3]) + m.x = Var(units=u.m) + m.t = Var(units=u.s) + + # Scalar constraints + m.c1 = Constraint(expr=m.x == 10 * u.m) # Units consistent + m.c2 = Constraint(expr=m.x == m.t) # Units inconsistent + + # Indexed Constraint + @m.Constraint(m.S) + def c3(blk, i): + if i == 1: + return m.t == 10 * u.m # Inconsistent units + return m.t == 10 * u.s # Consistent units + + # Scalar Expressions + m.e1 = Expression(expr=m.x + 10 * u.m) # Units consistent + m.e2 = Expression(expr=m.x + m.t) # Units inconsistent + + # Indexed Expression + @m.Expression(m.S) + def e3(blk, i): + if i == 1: + return m.t + 10 * u.m # Inconsistent units + return m.t + 10 * u.s # Consistent units + + # Scalar Objectives + m.o1 = Objective(expr=m.x + 10 * u.m) # Units consistent + m.o2 = Objective(expr=m.x + m.t) # Units inconsistent + + # Indexed Objective + @m.Objective(m.S) + def o3(blk, i): + if i == 1: + return m.t + 10 * u.m # Inconsistent units + return m.t + 10 * u.s # Consistent units + + failures = identify_inconsistent_units(m) + + assert len(failures) == 6 + assert m.c2 in failures + assert m.c3[1] in failures + assert m.e2 in failures + assert m.e3[1] in failures + assert m.o2 in failures + assert m.o3[1] in failures if __name__ == "__main__": diff --git a/pyomo/util/tests/test_components.py b/pyomo/util/tests/test_components.py index 63d06cb08b6..92eb7dd5ef1 100644 --- a/pyomo/util/tests/test_components.py +++ b/pyomo/util/tests/test_components.py @@ -17,8 +17,8 @@ import pyomo.kernel as pmo from pyomo.util.components import iter_component, rename_components -class TestUtilComponents(unittest.TestCase): +class TestUtilComponents(unittest.TestCase): def test_rename_components(self): model = pyo.ConcreteModel() model.x = pyo.Var([1, 2, 3], bounds=(-10, 10), initialize=5.0) @@ -27,22 +27,32 @@ def test_rename_components(self): def con_rule(m, i): return m.x[i] + m.z == i + model.con = pyo.Constraint([1, 2, 3], rule=con_rule) model.zcon = pyo.Constraint(expr=model.z >= model.x[2]) model.b = pyo.Block() - model.b.bx = pyo.Var([1,2,3], initialize=42) + model.b.bx = pyo.Var([1, 2, 3], initialize=42) model.b.bz = pyo.Var(initialize=42) - c_list = list(model.component_objects(ctype=[pyo.Var,pyo.Constraint,pyo.Objective])) - name_map = rename_components(model=model, - component_list=c_list, - prefix='scaled_') + model.x_ref = pyo.Reference(model.x) + model.zcon_ref = pyo.Reference(model.zcon) + model.b.bx_ref = pyo.Reference(model.b.bx[2]) + + c_list = list( + model.component_objects(ctype=[pyo.Var, pyo.Constraint, pyo.Objective]) + ) + name_map = rename_components( + model=model, component_list=c_list, prefix='scaled_' + ) self.assertEqual(name_map[model.scaled_obj], 'obj') self.assertEqual(name_map[model.scaled_x], 'x') self.assertEqual(name_map[model.scaled_con], 'con') self.assertEqual(name_map[model.scaled_zcon], 'zcon') self.assertEqual(name_map[model.b.scaled_bz], 'b.bz') + self.assertEqual(name_map[model.scaled_x_ref], 'x_ref') + self.assertEqual(name_map[model.scaled_zcon_ref], 'zcon_ref') + self.assertEqual(name_map[model.b.scaled_bx_ref], 'b.bx_ref') self.assertEqual(model.scaled_obj.name, 'scaled_obj') self.assertEqual(model.scaled_x.name, 'scaled_x') @@ -51,6 +61,14 @@ def con_rule(m, i): self.assertEqual(model.b.name, 'b') self.assertEqual(model.b.scaled_bz.name, 'b.scaled_bz') + assert hasattr(model, "scaled_x_ref") + for i in model.scaled_x_ref: + assert model.scaled_x_ref[i] is model.scaled_x[i] + assert hasattr(model, "scaled_zcon_ref") + assert model.scaled_zcon_ref[None] is model.scaled_zcon + assert hasattr(model.b, "scaled_bx_ref") + assert model.b.scaled_bx_ref[None] is model.b.scaled_bx[2] + def assertSameComponents(self, obj, other_obj): for i, j in zip_longest(obj, other_obj): self.assertEqual(id(i), id(j)) diff --git a/pyomo/util/tests/test_infeasible.py b/pyomo/util/tests/test_infeasible.py index 3c9676736e9..cefc129b41e 100644 --- a/pyomo/util/tests/test_infeasible.py +++ b/pyomo/util/tests/test_infeasible.py @@ -17,9 +17,12 @@ import pyomo.common.unittest as unittest from pyomo.common.log import LoggingIntercept from pyomo.environ import ConcreteModel, Constraint, Var, inequality -from pyomo.util.infeasible import (log_active_constraints, log_close_to_bounds, - log_infeasible_bounds, - log_infeasible_constraints) +from pyomo.util.infeasible import ( + log_active_constraints, + log_close_to_bounds, + log_infeasible_bounds, + log_infeasible_constraints, +) class TestInfeasible(unittest.TestCase): @@ -42,8 +45,8 @@ def build_model(self): m.c10 = Constraint(expr=m.y >= 3, doc="Inactive") m.c10.deactivate() m.c11 = Constraint(expr=m.y <= m.y.value) - m.yy = Var(bounds=(0, 1), initialize=1E-7, doc="Close to lower bound") - m.y3 = Var(bounds=(0, 1E-7), initialize=0, doc="Bounds too close") + m.yy = Var(bounds=(0, 1), initialize=1e-7, doc="Close to lower bound") + m.y3 = Var(bounds=(0, 1e-7), initialize=0, doc="Bounds too close") m.y4 = Var(bounds=(0, 1), initialize=2, doc="Fixed out of bounds.") m.y4.fix() return m @@ -51,6 +54,16 @@ def build_model(self): def test_log_infeasible_constraints(self): """Test for logging of infeasible constraints.""" m = self.build_model() + + with LoggingIntercept(None, 'pyomo.util.infeasible') as LOG: + log_infeasible_constraints(m) + self.assertEqual( + 'log_infeasible_constraints() called with a logger whose ' + 'effective level is higher than logging.INFO: no output ' + 'will be logged regardless of constraint feasibility', + LOG.getvalue().strip(), + ) + output = StringIO() with LoggingIntercept(output, 'pyomo.util.infeasible', logging.INFO): log_infeasible_constraints(m) @@ -58,8 +71,8 @@ def test_log_infeasible_constraints(self): "CONSTR c1: 2.0 /= LB 2", "VAR x: 1 2: diff --git a/scripts/get_pyomo.py b/scripts/get_pyomo.py index 969346afb0b..a97c0ba3a00 100644 --- a/scripts/get_pyomo.py +++ b/scripts/get_pyomo.py @@ -13,10 +13,12 @@ # import sys + try: import pip + pip_version = pip.__version__.split('.') - for i,s in enumerate(pip_version): + for i, s in enumerate(pip_version): try: pip_version[i] = int(s) except: @@ -29,7 +31,7 @@ print("Installing Pyomo ...") -cmd = ['install',] +cmd = ['install'] # Disable the PIP download cache if pip_version[0] >= 6: cmd.append('--no-cache-dir') @@ -37,7 +39,7 @@ cmd.append('--download-cache') cmd.append('') # Allow the user to provide extra options -cmd.extend( sys.argv[1:] ) +cmd.extend(sys.argv[1:]) # install Pyomo cmd.append('Pyomo') diff --git a/scripts/get_pyomo_extras.py b/scripts/get_pyomo_extras.py index 7a2f2bd2299..d2aa097154a 100644 --- a/scripts/get_pyomo_extras.py +++ b/scripts/get_pyomo_extras.py @@ -27,7 +27,10 @@ callerFrame = inspect.stack()[0] _dir = os.path.join( dirname(dirname(abspath(inspect.getfile(callerFrame[0])))), - 'pyomo','scripting','plugins') + 'pyomo', + 'scripting', + 'plugins', + ) sys.path.insert(0, _dir) extras = __import__('extras') extras.install_extras() diff --git a/scripts/performance/compare.py b/scripts/performance/compare.py index 4c4e55052c0..5edef9bfadd 100755 --- a/scripts/performance/compare.py +++ b/scripts/performance/compare.py @@ -19,7 +19,8 @@ from math import sqrt, log10, floor from statistics import stdev, mean -#import scipy.stats as st + +# import scipy.stats as st # scipy.stats.norm.ppf(0.9) # 0.95 = 1.6448536269514722 # 0.90 = 1.2815515655446004 @@ -29,11 +30,12 @@ # Z-score: (mean(x) - mean(y)) / sqrt( # stdev(x)**2 / card(x) + stdev(y)**2 / card(y) ) + class Result(object): z_threshold = 1.6448536269514722 # 95% - #z_threshold = 1.2815515655446004 # 90% - #z_threshold = 0.8416212335729143 # 80% - #z_threshold = 0.6744897501960817 # 75% + # z_threshold = 1.2815515655446004 # 90% + # z_threshold = 0.8416212335729143 # 80% + # z_threshold = 0.6744897501960817 # 75% def __init__(self, test, base=None, relative=False, precision=None): self.test = test @@ -83,7 +85,7 @@ def test_base_value(self): def __float__(self): val, dev = self.value() if isinstance(val, str): - return 0. + return 0.0 return float(val) def __lt__(self, other): @@ -107,12 +109,16 @@ def tostr(self, width=0): precision = self.precision if width: - precision = max(0, min( - precision, - width - (2 if val >= 0 else 3) - ( - 1 if not val else floor(log10(abs(val)))) - )) - val_str = ('%%%d.%df' % (width, precision,)) % val + precision = max( + 0, + min( + precision, + width + - (2 if val >= 0 else 3) + - (1 if not val else floor(log10(abs(val)))), + ), + ) + val_str = ('%%%d.%df' % (width, precision)) % val if z > Result.z_threshold: if val < 0: return '\033[92m' + val_str + '\033[0m' @@ -136,6 +142,12 @@ def combine(*results): for result in results: for dataset in result[1:]: for test, result in dataset.items(): + if "::" not in test: + # Convert nosetests results in to pytest format + path, test = test.split(':') + test = ( + '/'.join(path.split('.')) + '.py::' + '::'.join(test.split('.')) + ) if test not in ans: ans[test] = {} testdata = ans[test] @@ -143,38 +155,79 @@ def combine(*results): if type(value) is dict: continue testdata.setdefault(metric, []).append(value) + # Nosetests and pytest would give different test names (based on + # where they started including path elements). We will assume that + # tests should be uniquely declared by the test file, class, and + # test name. So, any two tests where one name ends with the + # complete name of the other will be assumed to be the same test and + # combined. + for base in list(ans): + for test in ans: + if test == base: + break + if test.endswith(base): + testdata = ans[test] + otherdata = ans.pop(base) + for metric, value in otherdata.items(): + testdata.setdefault(metric, []).append(value) + break return ans + def compare(base_data, test_data): """Compare two data sets (generated by compare())""" + # Nosetests and pytest would give different test names (based on + # where they started including path elements). We will assume that + # tests should be uniquely declared by the test file, class, and + # test name. So, any two tests where one name ends with the + # complete name of the other will be assumed to be the same test. + # We will make both to the "more specific" (longer) name before + # comparing. + for base in list(base_data): + for test in test_data: + if base == test: + break + if test.endswith(base): + base_data[test] = base_data.pop(base) + break + if base.endswith(test): + test_data[base] = test_data.pop(test) + break + fields = set() for testname, base in base_data.items(): if testname not in test_data: continue fields.update(set(base).intersection(test_data[testname])) - fields = sorted(fields - {'test_time',}) + fields = sorted(fields - {'test_time'}) lines = [] for testname, base in base_data.items(): if testname not in test_data: continue test = test_data[testname] - lines.append([ - [ Result(testname) ], - [ Result(test['test_time']) ], - [ Result(test['test_time'], base['test_time']), - Result(test['test_time'], base['test_time'], relative=True)], - [ Result(test.get(field, None), base.get(field,None)) - for field in fields ], - [ Result(test.get(field, None)) for field in fields ] - ]) + lines.append( + [ + [Result(testname)], + [Result(test['test_time'])], + [ + Result(test['test_time'], base['test_time']), + Result(test['test_time'], base['test_time'], relative=True), + ], + [ + Result(test.get(field, None), base.get(field, None)) + for field in fields + ], + [Result(test.get(field, None)) for field in fields], + ] + ) lines.sort() return ( - [['test_name'], ['test_time'], ['time(\u0394)', 'time(%)'], - fields, fields], + [['test_name'], ['test_time'], ['time(\u0394)', 'time(%)'], fields, fields], lines, ) + def print_comparison(os, data): """Print the 'comparison' table from the data to os @@ -188,6 +241,7 @@ def print_comparison(os, data): """ _printer([2, 1, 3, 0], os, data) + def print_test_result(os, data): """Print the 'test result' table from the data to os @@ -201,28 +255,37 @@ def print_test_result(os, data): """ _printer([1, 4, 0], os, data) + def _printer(arglist, os, data): fields = sum((data[0][i] for i in arglist), []) - lines = [ sum((line[i] for i in arglist), []) for line in data[1] ] + lines = [sum((line[i] for i in arglist), []) for line in data[1]] field_w = [max(len(field), 7) for field in fields] - os.write(' '.join(('%%%ds' % w) % fields[i] - for i, w in enumerate(field_w)) + '\n') - os.write('-'*(len(field_w) + sum(field_w) - 1) + '\n') - cumul = [Result(0, 0, relative=v.relative) for i,v in enumerate(lines[0])] + os.write(' '.join(('%%%ds' % w) % fields[i] for i, w in enumerate(field_w)) + '\n') + os.write('-' * (len(field_w) + sum(field_w) - 1) + '\n') + cumul = [Result(0, 0, relative=v.relative) for i, v in enumerate(lines[0])] for line in sorted(lines): - os.write(' '.join(('%%%ds' % width) % line[i].tostr(width) - for i, width in enumerate(field_w)) + '\n') - for i,v in enumerate(line): + os.write( + ' '.join( + ('%%%ds' % width) % line[i].tostr(width) + for i, width in enumerate(field_w) + ) + + '\n' + ) + for i, v in enumerate(line): _test, _base = v.test_base_value() if isinstance(_test, str): continue cumul[i].test += _test cumul[i].base += _base - os.write('-'*(len(field_w) + sum(field_w) - 1) + '\n') + os.write('-' * (len(field_w) + sum(field_w) - 1) + '\n') cumul[-1].test = "[ TOTAL ]" - os.write(' '.join(('%%%ds' % width) % cumul[i].tostr(width) - for i, width in enumerate(field_w)) + '\n') + os.write( + ' '.join( + ('%%%ds' % width) % cumul[i].tostr(width) for i, width in enumerate(field_w) + ) + + '\n' + ) if not any(c.base for c in cumul): return for c in cumul[:-1]: @@ -230,8 +293,13 @@ def _printer(arglist, os, data): c.base = 0 c.relative = True cumul[-1].test = "[ %diff ]" - os.write(' '.join(('%%%ds' % width) % cumul[i].tostr(width) - for i, width in enumerate(field_w)) + '\n') + os.write( + ' '.join( + ('%%%ds' % width) % cumul[i].tostr(width) for i, width in enumerate(field_w) + ) + + '\n' + ) + if __name__ == '__main__': clean = '--clean' in sys.argv @@ -259,9 +327,8 @@ def _printer(arglist, os, data): for line in data[1]: name = line[0][0].test if 'nonpublic' in name: - line[0][0].test = ( - name[:name.find('.', name.find('nonpublic'))] - + (".%s" % n) + line[0][0].test = name[: name.find('.', name.find('nonpublic'))] + ( + ".%s" % n ) n += 1 print_test_result(sys.stdout, data) diff --git a/scripts/performance/compare_components.py b/scripts/performance/compare_components.py index ddd07f3dc42..f390fad8454 100644 --- a/scripts/performance/compare_components.py +++ b/scripts/performance/compare_components.py @@ -16,17 +16,19 @@ import time import pickle -from pyomo.kernel import (block, - block_list, - variable, - variable_list, - variable_dict, - constraint, - linear_constraint, - constraint_dict, - constraint_list, - matrix_constraint, - objective) +from pyomo.kernel import ( + block, + block_list, + variable, + variable_list, + variable_dict, + constraint, + linear_constraint, + constraint_dict, + constraint_list, + matrix_constraint, + objective, +) from pyomo.core.kernel.variable import IVariable from pyomo.core.base import Integers, RangeSet, Objective @@ -47,16 +49,18 @@ pympler_kwds = {} + def _fmt(num, suffix='B'): """format memory output""" if num is None: return "" - for unit in ['','K','M','G','T','P','E','Z']: + for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(num) < 1000.0: return "%3.1f %s%s" % (num, unit, suffix) num /= 1000.0 return "%.1f %s%s" % (num, 'Yi', suffix) + def measure(f, n=25): """measure average execution time over n trials""" gc.collect() @@ -78,6 +82,7 @@ def measure(f, n=25): gc.collect() return mem_bytes, time_seconds + def summarize(results): """neatly summarize output for comparison of several tests""" line = "%9s %50s %12s %7s %12s %7s" @@ -89,16 +94,12 @@ def summarize(results): time_factor = "" if i > 0: if initial_mem_b is not None: - mem_factor = "(%4.2fx)" % (float(mem_b)/initial_mem_b) + mem_factor = "(%4.2fx)" % (float(mem_b) / initial_mem_b) else: mem_factory = "" - time_factor = "(%4.2fx)" % (float(time_s)/initial_time_s) - print(line % (libname, - label, - _fmt(mem_b), - mem_factor, - time_s, - time_factor)) + time_factor = "(%4.2fx)" % (float(time_s) / initial_time_s) + print(line % (libname, label, _fmt(mem_b), mem_factor, time_s, time_factor)) + def build_Var(): """Build a Var and delete any references to external @@ -108,6 +109,7 @@ def build_Var(): obj._domain = None return obj + def build_GeneralVarData(): """Build a _GeneralVarData and delete any references to external objects so its size can be computed.""" @@ -115,122 +117,151 @@ def build_GeneralVarData(): obj._domain = None return obj + def build_variable(): """Build a variable with no references to external objects so its size can be computed.""" - return variable(domain_type=None, - lb=None, - ub=None) + return variable(domain_type=None, lb=None, ub=None) + class _staticvariable(IVariable): """An _example_ of a more lightweight variable.""" + _ctype = IVariable domain_type = None lb = None ub = None fixed = False stale = False - __slots__ = ("value","_parent","_storage_key","_active") + __slots__ = ("value", "_parent", "_storage_key", "_active") + def __init__(self): self.value = None self._parent = None self._storage_key = None self._active = True + def build_staticvariable(): """Build a static variable with no references to external objects so its size can be computed.""" return _staticvariable() + def build_Constraint(): """Build a Constraint and delete any references to external objects so its size can be computed.""" - expr = sum(x*c for x,c in zip(build_Constraint.xlist, - build_Constraint.clist)) + expr = sum(x * c for x, c in zip(build_Constraint.xlist, build_Constraint.clist)) obj = Constraint(expr=(0, expr, 1)) obj._parent = build_Constraint.dummy_parent obj.construct() obj._parent = None return obj + + build_Constraint.xlist = [build_GeneralVarData() for i in range(5)] build_Constraint.clist = [1.1 for i in range(5)] build_Constraint.dummy_parent = lambda: None + def build_GeneralConstraintData(): """Build a _GeneralConstraintData and delete any references to external objects so its size can be computed.""" - expr = sum(x*c for x,c in zip(build_Constraint.xlist, - build_Constraint.clist)) + expr = sum(x * c for x, c in zip(build_Constraint.xlist, build_Constraint.clist)) return _GeneralConstraintData(expr=(0, expr, 1)) + + build_Constraint.xlist = [build_GeneralVarData() for i in range(5)] build_Constraint.clist = [1.1 for i in range(5)] + def build_constraint(): """Build a constraint with no references to external objects so its size can be computed.""" - expr = sum(x*c for x,c in zip(build_constraint.xlist, - build_constraint.clist)) + expr = sum(x * c for x, c in zip(build_constraint.xlist, build_constraint.clist)) return constraint(lb=0, body=expr, ub=1) + + build_constraint.xlist = [build_variable() for i in range(5)] build_constraint.clist = [1.1 for i in range(5)] + def build_linear_constraint(): """Build a linear_constraint with no references to external objects so its size can be computed.""" - return linear_constraint(variables=build_linear_constraint.xlist, - coefficients=build_linear_constraint.clist, - lb=0, ub=1) + return linear_constraint( + variables=build_linear_constraint.xlist, + coefficients=build_linear_constraint.clist, + lb=0, + ub=1, + ) + + build_linear_constraint.xlist = [build_variable() for i in range(5)] build_linear_constraint.clist = [1.1 for i in range(5)] + def _bounds_rule(m, i): return (None, None) + + def _initialize_rule(m, i): return None + + def _reset(): build_indexed_Var.model = Block(concrete=True) - build_indexed_Var.model.ndx = RangeSet(0, N-1) + build_indexed_Var.model.ndx = RangeSet(0, N - 1) build_indexed_Var.bounds_rule = _bounds_rule build_indexed_Var.initialize_rule = _initialize_rule + + def build_indexed_Var(): """Build an indexed Var with no references to external objects so its size can be computed.""" model = build_indexed_Var.model - model.indexed_Var = Var(model.ndx, - domain=Integers, - bounds=build_indexed_Var.bounds_rule, - initialize=build_indexed_Var.initialize_rule) + model.indexed_Var = Var( + model.ndx, + domain=Integers, + bounds=build_indexed_Var.bounds_rule, + initialize=build_indexed_Var.initialize_rule, + ) model.indexed_Var._domain = None model.indexed_Var._component = None return model.indexed_Var + + build_indexed_Var.reset_for_test = _reset build_indexed_Var.reset_for_test() + def build_variable_dict(): """Build a variable_dict with no references to external objects so its size can be computed.""" return variable_dict( - ((i, variable(domain_type=None, lb=None, ub=None, value=None)) - for i in range(N))) + ( + (i, variable(domain_type=None, lb=None, ub=None, value=None)) + for i in range(N) + ) + ) + def build_variable_list(): """Build a variable_list with no references to external objects so its size can be computed.""" return variable_list( - variable(domain_type=None, lb=None, ub=None, value=None) - for i in range(N)) + variable(domain_type=None, lb=None, ub=None, value=None) for i in range(N) + ) + def build_staticvariable_list(): """Build a variable_list of static variables with no references to external objects so its size can be computed.""" - return variable_list(_staticvariable() - for i in range(N)) + return variable_list(_staticvariable() for i in range(N)) -A = scipy.sparse.random(N, N, - density=0.2, - format='csr', - dtype=float) + +A = scipy.sparse.random(N, N, density=0.2, format='csr', dtype=float) b = numpy.ones(N) # as lists A_data = A.data.tolist() @@ -240,75 +271,118 @@ def build_staticvariable_list(): X_aml = [build_GeneralVarData() for i in range(N)] X_kernel = [build_variable() for i in range(N)] + def _con_rule(m, i): # expr == rhs - return (sum(A_data[p]*X_aml[A_indices[p]] - for p in range(A_indptr[i], A_indptr[i+1])), 1) + return ( + sum( + A_data[p] * X_aml[A_indices[p]] for p in range(A_indptr[i], A_indptr[i + 1]) + ), + 1, + ) + + def _reset(): build_indexed_Constraint.model = Block(concrete=True) - build_indexed_Constraint.model.ndx = RangeSet(0, N-1) + build_indexed_Constraint.model.ndx = RangeSet(0, N - 1) build_indexed_Constraint.rule = _con_rule + + def build_indexed_Constraint(): """Build an indexed Constraint with no references to external objects so its size can be computed.""" model = build_indexed_Constraint.model - model.indexed_Constraint = Constraint(model.ndx, - rule=build_indexed_Constraint.rule) + model.indexed_Constraint = Constraint(model.ndx, rule=build_indexed_Constraint.rule) model.indexed_Constraint._component = None return model.indexed_Constraint + + build_indexed_Constraint.reset_for_test = _reset build_indexed_Constraint.reset_for_test() + def build_constraint_dict(): """Build a constraint_dict with no references to external objects so its size can be computed.""" return constraint_dict( - ((i, constraint(rhs=1, body=sum(A_data[p]*X_kernel[A_indices[p]] - for p in range(A_indptr[i], A_indptr[i+1])))) - for i in range(N))) + ( + ( + i, + constraint( + rhs=1, + body=sum( + A_data[p] * X_kernel[A_indices[p]] + for p in range(A_indptr[i], A_indptr[i + 1]) + ), + ), + ) + for i in range(N) + ) + ) + def build_constraint_list(): """Build a constraint_list with no references to external objects so its size can be computed.""" return constraint_list( - constraint(rhs=1, body=sum(A_data[p]*X_kernel[A_indices[p]] - for p in range(A_indptr[i], A_indptr[i+1]))) - for i in range(N)) + constraint( + rhs=1, + body=sum( + A_data[p] * X_kernel[A_indices[p]] + for p in range(A_indptr[i], A_indptr[i + 1]) + ), + ) + for i in range(N) + ) + def build_linear_constraint_list(): """Build a constraint_list of linear_constraints with no references to external objects so its size can be computed.""" return constraint_list( - linear_constraint(variables=(X_kernel[A_indices[p]] for p in range(A_indptr[i], A_indptr[i+1])), - coefficients=(A_data[p] for p in range(A_indptr[i], A_indptr[i+1])), - rhs=1) - for i in range(N)) + linear_constraint( + variables=( + X_kernel[A_indices[p]] for p in range(A_indptr[i], A_indptr[i + 1]) + ), + coefficients=(A_data[p] for p in range(A_indptr[i], A_indptr[i + 1])), + rhs=1, + ) + for i in range(N) + ) + def build_matrix_constraint(): """Build a constraint_list with no references to external objects so its size can be computed.""" return matrix_constraint(A, rhs=b, x=X_kernel) + def build_Block(): """Build a Block with a few components.""" obj = Block(concrete=True) obj.construct() return obj + def build_BlockData(): """Build a _BlockData with a few components.""" obj = _BlockData(build_BlockData.owner) obj._component = None return obj + + build_BlockData.owner = Block() + def build_block(): b = block() b._activate_large_storage_mode() return b + build_small_block = block + def build_Block_with_objects(): """Build an empty Block""" obj = Block(concrete=True) @@ -319,6 +393,7 @@ def build_Block_with_objects(): obj.o = Objective() return obj + def build_BlockData_with_objects(): """Build an empty _BlockData""" obj = _BlockData(build_BlockData_with_objects.owner) @@ -328,8 +403,11 @@ def build_BlockData_with_objects(): obj.o = Objective() obj._component = None return obj + + build_BlockData_with_objects.owner = Block() + def build_block_with_objects(): """Build an empty block.""" b = block() @@ -339,6 +417,7 @@ def build_block_with_objects(): b.o = objective() return b + def build_small_block_with_objects(): """Build an empty block.""" b = block() @@ -347,6 +426,7 @@ def build_small_block_with_objects(): b.o = objective() return b + def _indexed_Block_rule(b, i): b.x1 = Var() b.x1._domain = None @@ -359,18 +439,26 @@ def _indexed_Block_rule(b, i): b.x5 = Var() b.x5._domain = None return b + + def _reset(): build_indexed_BlockWVars.model = Block(concrete=True) - build_indexed_BlockWVars.model.ndx = RangeSet(0, N-1) + build_indexed_BlockWVars.model.ndx = RangeSet(0, N - 1) build_indexed_BlockWVars.indexed_Block_rule = _indexed_Block_rule + + def build_indexed_BlockWVars(): model = build_indexed_BlockWVars.model - model.indexed_Block = Block(model.ndx, - rule=build_indexed_BlockWVars.indexed_Block_rule) + model.indexed_Block = Block( + model.ndx, rule=build_indexed_BlockWVars.indexed_Block_rule + ) return model.indexed_Block + + build_indexed_BlockWVars.reset_for_test = _reset build_indexed_BlockWVars.reset_for_test() + def build_block_list_with_variables(): blist = block_list() for i in range(N): @@ -384,6 +472,7 @@ def build_block_list_with_variables(): blist.append(b) return blist + def _get_small_block(): b = block() b.x1 = variable(domain_type=None, lb=None, ub=None) @@ -393,12 +482,14 @@ def _get_small_block(): b.x5 = variable(domain_type=None, lb=None, ub=None) return b + def build_small_block_list_with_variables(): - return block_list( - build_small_block_list_with_variables.myblock() - for i in range(N)) + return block_list(build_small_block_list_with_variables.myblock() for i in range(N)) + + build_small_block_list_with_variables.myblock = _get_small_block + def _get_small_block_wstaticvars(): myvar = _staticvariable b = block() @@ -409,15 +500,17 @@ def _get_small_block_wstaticvars(): b.x5 = myvar() return b + def build_small_block_list_with_staticvariables(): return block_list( - build_small_block_list_with_staticvariables.myblock() - for i in range(N)) + build_small_block_list_with_staticvariables.myblock() for i in range(N) + ) + + build_small_block_list_with_staticvariables.myblock = _get_small_block_wstaticvars if __name__ == "__main__": - # # Compare construction time of different variable # implementations @@ -438,7 +531,9 @@ def build_small_block_list_with_staticvariables(): results.append(("AML", "Indexed Var (%s)" % N, measure(build_indexed_Var))) results.append(("Kernel", "variable_dict (%s)" % N, measure(build_variable_dict))) results.append(("Kernel", "variable_list (%s)" % N, measure(build_variable_list))) - results.append(("", "staticvariable_list (%s)" % N, measure(build_staticvariable_list))) + results.append( + ("", "staticvariable_list (%s)" % N, measure(build_staticvariable_list)) + ) summarize(results) print("") @@ -447,9 +542,19 @@ def build_small_block_list_with_staticvariables(): # implementations # results = [] - results.append(("AML", "Constraint()", measure(build_Constraint))) - results.append(("AML", "_GeneralConstraintData()", measure(build_GeneralConstraintData))) - results.append(("Kernel", "constraint()", measure(build_constraint))) + results.append( + ("AML", "Constraint()", measure(build_Constraint)) + ) + results.append( + ( + "AML", + "_GeneralConstraintData()", + measure(build_GeneralConstraintData), + ) + ) + results.append( + ("Kernel", "constraint()", measure(build_constraint)) + ) results.append(("Kernel", "linear_constraint", measure(build_linear_constraint))) summarize(results) print("") @@ -459,11 +564,25 @@ def build_small_block_list_with_staticvariables(): # container implementations # results = [] - results.append(("AML", "Indexed Constraint (%s)" % N, measure(build_indexed_Constraint))) - results.append(("Kernel", "constraint_dict (%s)" % N, measure(build_constraint_dict))) - results.append(("Kernel", "constraint_list (%s)" % N, measure(build_constraint_list))) - results.append(("Kernel", "linear_constraint_list (%s)" % N, measure(build_linear_constraint_list))) - results.append(("Kernel", "matrix_constraint (%s)" % N, measure(build_matrix_constraint))) + results.append( + ("AML", "Indexed Constraint (%s)" % N, measure(build_indexed_Constraint)) + ) + results.append( + ("Kernel", "constraint_dict (%s)" % N, measure(build_constraint_dict)) + ) + results.append( + ("Kernel", "constraint_list (%s)" % N, measure(build_constraint_list)) + ) + results.append( + ( + "Kernel", + "linear_constraint_list (%s)" % N, + measure(build_linear_constraint_list), + ) + ) + results.append( + ("Kernel", "matrix_constraint (%s)" % N, measure(build_matrix_constraint)) + ) summarize(results) print("") @@ -481,9 +600,19 @@ def build_small_block_list_with_staticvariables(): results = [] results.append(("AML", "Block w/ 3 components", measure(build_Block_with_objects))) - results.append(("AML", "_BlockData w/ 3 components", measure(build_BlockData_with_objects))) - results.append(("Kernel", "block w/ 3 components", measure(build_block_with_objects))) - results.append(("Kernel", "small_block w/ 3 components", measure(build_small_block_with_objects))) + results.append( + ("AML", "_BlockData w/ 3 components", measure(build_BlockData_with_objects)) + ) + results.append( + ("Kernel", "block w/ 3 components", measure(build_block_with_objects)) + ) + results.append( + ( + "Kernel", + "small_block w/ 3 components", + measure(build_small_block_with_objects), + ) + ) summarize(results) print("") @@ -492,12 +621,28 @@ def build_small_block_list_with_staticvariables(): # container implementations # results = [] - results.append(("AML", "Indexed Block (%s) w/ Vars (5)" % N, - measure(build_indexed_BlockWVars))) - results.append(("Kernel", "block_list (%s) w/ variables (5)" % N, - measure(build_block_list_with_variables))) - results.append(("Kernel", "small_block_list (%s) w/ variables (5)" % N, - measure(build_small_block_list_with_variables))) - results.append(("", "small_block_list (%s) w/ staticvariables (5)" % N, - measure(build_small_block_list_with_staticvariables))) + results.append( + ("AML", "Indexed Block (%s) w/ Vars (5)" % N, measure(build_indexed_BlockWVars)) + ) + results.append( + ( + "Kernel", + "block_list (%s) w/ variables (5)" % N, + measure(build_block_list_with_variables), + ) + ) + results.append( + ( + "Kernel", + "small_block_list (%s) w/ variables (5)" % N, + measure(build_small_block_list_with_variables), + ) + ) + results.append( + ( + "", + "small_block_list (%s) w/ staticvariables (5)" % N, + measure(build_small_block_list_with_staticvariables), + ) + ) summarize(results) diff --git a/scripts/performance/expr_perf.py b/scripts/performance/expr_perf.py index 3cd19994be5..6566431b9f3 100644 --- a/scripts/performance/expr_perf.py +++ b/scripts/performance/expr_perf.py @@ -5,17 +5,19 @@ from pyomo.environ import * import pyomo.version from pyomo.core.base.expr_common import _clear_expression_pool -from pyomo.core.base import expr as EXPR +from pyomo.core.base import expr as EXPR import pprint as pp import gc import time + try: import pympler - pympler_available=True + + pympler_available = True pympler_kwds = {} except: - pympler_available=False + pympler_available = False import sys import argparse @@ -35,36 +37,59 @@ # Dummy Sum() function used for Coopr3 tests # if coopr3_or_pyomo4: + def Sum(*args): return sum(*args) + class TimeoutError(Exception): pass + class timeout: def __init__(self, seconds=10, error_message='Timeout'): self.seconds = seconds self.error_message = error_message + def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) + def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) + def __exit__(self, type, value, traceback): signal.alarm(0) - _timeout = 20 -#NTerms = 100 -#N = 1 +# NTerms = 100 +# N = 1 NTerms = 100000 -N = 30 +N = 30 parser = argparse.ArgumentParser() -parser.add_argument("-o", "--output", help="Save results to the specified file", action="store", default=None) -parser.add_argument("--nterms", help="The number of terms in test expressions", action="store", type=int, default=None) -parser.add_argument("--ntrials", help="The number of test trials", action="store", type=int, default=None) +parser.add_argument( + "-o", + "--output", + help="Save results to the specified file", + action="store", + default=None, +) +parser.add_argument( + "--nterms", + help="The number of terms in test expressions", + action="store", + type=int, + default=None, +) +parser.add_argument( + "--ntrials", + help="The number of test trials", + action="store", + type=int, + default=None, +) args = parser.parse_args() if args.nterms: @@ -74,7 +99,6 @@ def __exit__(self, type, value, traceback): print("NTerms %d NTrials %d\n\n" % (NTerms, N)) - # # Execute a function 'n' times, collecting performance statistics and # averaging them @@ -92,11 +116,12 @@ def measure(f, n=25): for key in data[0]: d_ = [] for i in range(n): - d_.append( data[i][key] ) - ans[key] = {"mean": sum(d_)/float(n), "data": d_} + d_.append(data[i][key]) + ans[key] = {"mean": sum(d_) / float(n), "data": d_} # return ans + # # Evaluate standard operations on an expression # @@ -117,7 +142,7 @@ def evaluate(expr, seconds): start = time.time() expr = EXPR.compress_expression(expr, verbose=False) stop = time.time() - seconds['compress'] = stop-start + seconds['compress'] = stop - start seconds['compressed_size'] = expr.size() except TimeoutError: print("TIMEOUT") @@ -134,7 +159,7 @@ def evaluate(expr, seconds): start = time.time() expr_ = expr.clone() stop = time.time() - seconds['clone'] = stop-start + seconds['clone'] = stop - start except RecursionError: seconds['clone'] = -888.0 except TimeoutError: @@ -148,7 +173,7 @@ def evaluate(expr, seconds): start = time.time() d_ = expr.polynomial_degree() stop = time.time() - seconds['polynomial_degree'] = stop-start + seconds['polynomial_degree'] = stop - start except RecursionError: seconds['polynomial_degree'] = -888.0 except TimeoutError: @@ -162,7 +187,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_constant() stop = time.time() - seconds['is_constant'] = stop-start + seconds['is_constant'] = stop - start except RecursionError: seconds['is_constant'] = -888.0 except TimeoutError: @@ -176,7 +201,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_fixed() stop = time.time() - seconds['is_fixed'] = stop-start + seconds['is_fixed'] = stop - start except RecursionError: seconds['is_fixed'] = -888.0 except TimeoutError: @@ -188,11 +213,12 @@ def evaluate(expr, seconds): _clear_expression_pool() try: from pyomo.repn import generate_standard_repn + with timeout(seconds=_timeout): start = time.time() r_ = generate_standard_repn(expr, quadratic=False) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -208,7 +234,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_constant() stop = time.time() - seconds['is_constant'] = stop-start + seconds['is_constant'] = stop - start except RecursionError: seconds['is_constant'] = -888.0 except TimeoutError: @@ -222,7 +248,7 @@ def evaluate(expr, seconds): start = time.time() s_ = expr.is_fixed() stop = time.time() - seconds['is_fixed'] = stop-start + seconds['is_fixed'] = stop - start except RecursionError: seconds['is_fixed'] = -888.0 except TimeoutError: @@ -234,11 +260,12 @@ def evaluate(expr, seconds): _clear_expression_pool() try: from pyomo.repn import generate_ampl_repn + with timeout(seconds=_timeout): start = time.time() r_ = generate_ampl_repn(expr) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -249,6 +276,7 @@ def evaluate(expr, seconds): return seconds + # # Evaluate standard operations on an expression # @@ -266,7 +294,7 @@ def evaluate_all(expr, seconds): for e in expr: e.clone() stop = time.time() - seconds['clone'] = stop-start + seconds['clone'] = stop - start except RecursionError: seconds['clone'] = -888.0 except TimeoutError: @@ -281,7 +309,7 @@ def evaluate_all(expr, seconds): for e in expr: e.polynomial_degree() stop = time.time() - seconds['polynomial_degree'] = stop-start + seconds['polynomial_degree'] = stop - start except RecursionError: seconds['polynomial_degree'] = -888.0 except TimeoutError: @@ -296,7 +324,7 @@ def evaluate_all(expr, seconds): for e in expr: e.is_constant() stop = time.time() - seconds['is_constant'] = stop-start + seconds['is_constant'] = stop - start except RecursionError: seconds['is_constant'] = -888.0 except TimeoutError: @@ -311,7 +339,7 @@ def evaluate_all(expr, seconds): for e in expr: e.is_fixed() stop = time.time() - seconds['is_fixed'] = stop-start + seconds['is_fixed'] = stop - start except RecursionError: seconds['is_fixed'] = -888.0 except TimeoutError: @@ -323,20 +351,22 @@ def evaluate_all(expr, seconds): _clear_expression_pool() if True: from pyomo.repn import generate_standard_repn + with timeout(seconds=_timeout): start = time.time() for e in expr: generate_standard_repn(e, quadratic=False) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start try: from pyomo.repn import generate_standard_repn + with timeout(seconds=_timeout): start = time.time() for e in expr: generate_standard_repn(e, quadratic=False) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -350,12 +380,13 @@ def evaluate_all(expr, seconds): _clear_expression_pool() try: from pyomo.repn import generate_ampl_repn + with timeout(seconds=_timeout): start = time.time() for e in expr: generate_ampl_repn(e) stop = time.time() - seconds['generate_repn'] = stop-start + seconds['generate_repn'] = stop - start except RecursionError: seconds['generate_repn'] = -888.0 except ImportError: @@ -366,11 +397,11 @@ def evaluate_all(expr, seconds): return seconds + # # Create a linear expression # def linear(N, flag): - def f(): seconds = {} @@ -397,52 +428,52 @@ def f(): stop = time.time() elif flag == 2: start = time.time() - expr=sum(model.p[i]*model.x[i] for i in model.A) + expr = sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 3: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] stop = time.time() elif flag == 4: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] stop = time.time() elif flag == 5: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr stop = time.time() elif flag == 6: start = time.time() - expr=Sum(model.p[i]*model.x[i] for i in model.A) + expr = Sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 7: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * (1 + model.x[i]) stop = time.time() elif flag == 8: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += (model.x[i]+model.x[i]) + expr += model.x[i] + model.x[i] stop = time.time() elif flag == 9: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += model.p[i]*(model.x[i]+model.x[i]) + expr += model.p[i] * (model.x[i] + model.x[i]) stop = time.time() elif flag == 12: start = time.time() with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) stop = time.time() elif flag == 13: start = time.time() @@ -469,7 +500,7 @@ def f(): expr += model.p[i] * (1 + model.x[i]) stop = time.time() # - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -481,11 +512,11 @@ def f(): return f + # # Create a linear expression # def simple_linear(N, flag): - def f(): seconds = {} @@ -508,52 +539,52 @@ def f(): stop = time.time() elif flag == 2: start = time.time() - expr=sum(model.p[i]*model.x[i] for i in model.A) + expr = sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 3: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] stop = time.time() elif flag == 4: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] stop = time.time() elif flag == 5: start = time.time() - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr stop = time.time() elif flag == 6: start = time.time() - expr=Sum(model.p[i]*model.x[i] for i in model.A) + expr = Sum(model.p[i] * model.x[i] for i in model.A) stop = time.time() elif flag == 7: start = time.time() - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * (1 + model.x[i]) stop = time.time() elif flag == 8: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += (model.x[i]+model.x[i]) + expr += model.x[i] + model.x[i] stop = time.time() elif flag == 9: start = time.time() - expr=0 + expr = 0 for i in model.A: - expr += model.p[i]*(model.x[i]+model.x[i]) + expr += model.p[i] * (model.x[i] + model.x[i]) stop = time.time() elif flag == 12: start = time.time() with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) stop = time.time() elif flag == 13: start = time.time() @@ -580,7 +611,7 @@ def f(): expr += model.p[i] * (1 + model.x[i]) stop = time.time() # - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -592,11 +623,11 @@ def f(): return f + # # Create a nested linear expression # def nested_linear(N, flag): - def f(): seconds = {} @@ -618,29 +649,31 @@ def f(): start = time.time() # if flag == 1: - expr = 2* sum_product(model.p, model.x) + expr = 2 * sum_product(model.p, model.x) elif flag == 2: - expr= 2 * sum(model.p[i]*model.x[i] for i in model.A) + expr = 2 * sum(model.p[i] * model.x[i] for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] expr *= 2 elif flag == 4: - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] expr *= 2 elif flag == 5: - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr expr *= 2 elif flag == 6: - expr= 2 * Sum(model.p[i]*model.x[i] for i in model.A) + expr = 2 * Sum(model.p[i] * model.x[i] for i in model.A) elif flag == 12: with EXPR.linear_expression as expr: - expr= 2 * sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = 2 * sum( + (model.p[i] * model.x[i] for i in model.A), expr + ) elif flag == 13: with EXPR.linear_expression as expr: for i in model.A: @@ -658,7 +691,7 @@ def f(): expr *= 2 # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -675,7 +708,6 @@ def f(): # Create a constant expression from mutable parameters # def constant(N, flag): - def f(): seconds = {} @@ -696,23 +728,23 @@ def f(): if flag == 1: expr = sum_product(model.p, model.q, index=model.A) elif flag == 2: - expr=sum(model.p[i]*model.q[i] for i in model.A) + expr = sum(model.p[i] * model.q[i] for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.q[i] elif flag == 4: - expr=Sum(model.p[i]*model.q[i] for i in model.A) + expr = Sum(model.p[i] * model.q[i] for i in model.A) elif flag == 12: with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.q[i] for i in model.A), expr) + expr = sum((model.p[i] * model.q[i] for i in model.A), expr) elif flag == 13: with EXPR.linear_expression as expr: for i in model.A: expr += model.p[i] * model.q[i] # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -729,7 +761,6 @@ def f(): # Create a bilinear expression # def bilinear(N, flag): - def f(): seconds = {} @@ -756,23 +787,30 @@ def f(): if flag == 1: expr = sum_product(model.p, model.x, model.y) elif flag == 2: - expr=sum(model.p[i]*model.x[i]*model.y[i] for i in model.A) + expr = sum( + model.p[i] * model.x[i] * model.y[i] for i in model.A + ) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] * model.y[i] elif flag == 4: - expr=Sum(model.p[i]*model.x[i]*model.y[i] for i in model.A) + expr = Sum( + model.p[i] * model.x[i] * model.y[i] for i in model.A + ) elif flag == 12: with EXPR.quadratic_expression as expr: - expr=sum((model.p[i]*model.x[i]*model.y[i] for i in model.A), expr) + expr = sum( + (model.p[i] * model.x[i] * model.y[i] for i in model.A), + expr, + ) elif flag == 13: with EXPR.quadratic_expression as expr: for i in model.A: expr += model.p[i] * model.x[i] * model.y[i] # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -789,7 +827,6 @@ def f(): # Create a simple nonlinear expression # def nonlinear(N, flag): - def f(): seconds = {} @@ -812,23 +849,25 @@ def f(): start = time.time() # if flag == 2: - expr=sum(model.p[i]*tan(model.x[i]) for i in model.A) + expr = sum(model.p[i] * tan(model.x[i]) for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * tan(model.x[i]) elif flag == 4: - expr=Sum(model.p[i]*tan(model.x[i]) for i in model.A) + expr = Sum(model.p[i] * tan(model.x[i]) for i in model.A) if flag == 12: with EXPR.nonlinear_expression as expr: - expr=sum((model.p[i]*tan(model.x[i]) for i in model.A), expr) + expr = sum( + (model.p[i] * tan(model.x[i]) for i in model.A), expr + ) elif flag == 13: with EXPR.nonlinear_expression as expr: for i in model.A: expr += model.p[i] * tan(model.x[i]) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -845,7 +884,6 @@ def f(): # Create an expression that is a complex polynomial # def polynomial(N, flag): - def f(): seconds = {} @@ -864,12 +902,12 @@ def f(): start = time.time() # if True: - expr=0 + expr = 0 for i in model.A: expr = model.x[i] * (1 + expr) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -886,7 +924,6 @@ def f(): # Create an expression that is a large product # def product(N, flag): - def f(): seconds = {} @@ -905,18 +942,18 @@ def f(): start = time.time() # if flag == 1: - expr=model.x+model.x + expr = model.x + model.x for i in model.A: - expr = model.p[i]*expr + expr = model.p[i] * expr elif flag == 2: - expr=model.x+model.x + expr = model.x + model.x for i in model.A: expr *= model.p[i] elif flag == 3: - expr=(model.x+model.x) * prod(model.p[i] for i in model.A) + expr = (model.x + model.x) * prod(model.p[i] for i in model.A) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate(expr, seconds) except RecursionError: @@ -933,7 +970,6 @@ def f(): # Create many small linear expressions # def many_linear(N, flag): - def f(): seconds = {} @@ -954,10 +990,10 @@ def f(): expr = [] if flag == 2: for i in model.A: - expr.append( model.x[1] + model.x[i] ) + expr.append(model.x[1] + model.x[i]) # stop = time.time() - seconds['construction'] = stop-start + seconds['construction'] = stop - start seconds['nclones'] = ctr.count - nclones seconds = evaluate_all(expr, seconds) try: @@ -971,6 +1007,7 @@ def f(): return f + # # Utility function used by runall() # @@ -982,277 +1019,272 @@ def print_results(factors_, ans_, output): # -# Run the experiments and populate the dictionary 'res' +# Run the experiments and populate the dictionary 'res' # with the mapping: factors -> performance results # # Performance results are a mapping: name -> seconds # def runall(factors, res, output=True): - if True: - factors_ = tuple(factors+['ManyLinear','Loop 2']) + factors_ = tuple(factors + ['ManyLinear', 'Loop 2']) ans_ = res[factors_] = measure(many_linear(NTerms, 2), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['Constant','Loop 1']) + factors_ = tuple(factors + ['Constant', 'Loop 1']) ans_ = res[factors_] = measure(constant(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 2']) + factors_ = tuple(factors + ['Constant', 'Loop 2']) ans_ = res[factors_] = measure(constant(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 12']) + factors_ = tuple(factors + ['Constant', 'Loop 12']) ans_ = res[factors_] = measure(constant(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 3']) + factors_ = tuple(factors + ['Constant', 'Loop 3']) ans_ = res[factors_] = measure(constant(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 13']) + factors_ = tuple(factors + ['Constant', 'Loop 13']) ans_ = res[factors_] = measure(constant(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Constant','Loop 4']) + factors_ = tuple(factors + ['Constant', 'Loop 4']) ans_ = res[factors_] = measure(constant(NTerms, 4), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['Linear','Loop 1']) + factors_ = tuple(factors + ['Linear', 'Loop 1']) ans_ = res[factors_] = measure(linear(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 2']) + factors_ = tuple(factors + ['Linear', 'Loop 2']) ans_ = res[factors_] = measure(linear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 12']) + factors_ = tuple(factors + ['Linear', 'Loop 12']) ans_ = res[factors_] = measure(linear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 3']) + factors_ = tuple(factors + ['Linear', 'Loop 3']) ans_ = res[factors_] = measure(linear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 13']) + factors_ = tuple(factors + ['Linear', 'Loop 13']) ans_ = res[factors_] = measure(linear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 4']) + factors_ = tuple(factors + ['Linear', 'Loop 4']) ans_ = res[factors_] = measure(linear(NTerms, 4), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 14']) + factors_ = tuple(factors + ['Linear', 'Loop 14']) ans_ = res[factors_] = measure(linear(NTerms, 14), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 5']) + factors_ = tuple(factors + ['Linear', 'Loop 5']) ans_ = res[factors_] = measure(linear(NTerms, 5), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 15']) + factors_ = tuple(factors + ['Linear', 'Loop 15']) ans_ = res[factors_] = measure(linear(NTerms, 15), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 6']) + factors_ = tuple(factors + ['Linear', 'Loop 6']) ans_ = res[factors_] = measure(linear(NTerms, 6), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 7']) + factors_ = tuple(factors + ['Linear', 'Loop 7']) ans_ = res[factors_] = measure(linear(NTerms, 7), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 17']) + factors_ = tuple(factors + ['Linear', 'Loop 17']) ans_ = res[factors_] = measure(linear(NTerms, 17), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 8']) + factors_ = tuple(factors + ['Linear', 'Loop 8']) ans_ = res[factors_] = measure(linear(NTerms, 8), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Linear','Loop 9']) + factors_ = tuple(factors + ['Linear', 'Loop 9']) ans_ = res[factors_] = measure(linear(NTerms, 9), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['SimpleLinear','Loop 1']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 1']) ans_ = res[factors_] = measure(simple_linear(NTerms, 1), n=N) print_results(factors_, ans_, output) if True: - factors_ = tuple(factors+['SimpleLinear','Loop 2']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 2']) ans_ = res[factors_] = measure(simple_linear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 12']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 12']) ans_ = res[factors_] = measure(simple_linear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 3']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 3']) ans_ = res[factors_] = measure(simple_linear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 13']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 13']) ans_ = res[factors_] = measure(simple_linear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 4']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 4']) ans_ = res[factors_] = measure(simple_linear(NTerms, 4), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 14']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 14']) ans_ = res[factors_] = measure(simple_linear(NTerms, 14), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 5']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 5']) ans_ = res[factors_] = measure(simple_linear(NTerms, 5), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 15']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 15']) ans_ = res[factors_] = measure(simple_linear(NTerms, 15), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 7']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 7']) ans_ = res[factors_] = measure(simple_linear(NTerms, 7), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 17']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 17']) ans_ = res[factors_] = measure(simple_linear(NTerms, 17), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 8']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 8']) ans_ = res[factors_] = measure(simple_linear(NTerms, 8), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['SimpleLinear','Loop 9']) + factors_ = tuple(factors + ['SimpleLinear', 'Loop 9']) ans_ = res[factors_] = measure(simple_linear(NTerms, 9), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['NestedLinear','Loop 1']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 1']) ans_ = res[factors_] = measure(nested_linear(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 2']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 2']) ans_ = res[factors_] = measure(nested_linear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 12']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 12']) ans_ = res[factors_] = measure(nested_linear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 3']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 3']) ans_ = res[factors_] = measure(nested_linear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 13']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 13']) ans_ = res[factors_] = measure(nested_linear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 4']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 4']) ans_ = res[factors_] = measure(nested_linear(NTerms, 4), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 14']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 14']) ans_ = res[factors_] = measure(nested_linear(NTerms, 14), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 5']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 5']) ans_ = res[factors_] = measure(nested_linear(NTerms, 5), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 15']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 15']) ans_ = res[factors_] = measure(nested_linear(NTerms, 15), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['NestedLinear','Loop 6']) + factors_ = tuple(factors + ['NestedLinear', 'Loop 6']) ans_ = res[factors_] = measure(nested_linear(NTerms, 6), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Bilinear','Loop 1']) + factors_ = tuple(factors + ['Bilinear', 'Loop 1']) ans_ = res[factors_] = measure(bilinear(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 2']) + factors_ = tuple(factors + ['Bilinear', 'Loop 2']) ans_ = res[factors_] = measure(bilinear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 12']) + factors_ = tuple(factors + ['Bilinear', 'Loop 12']) ans_ = res[factors_] = measure(bilinear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 3']) + factors_ = tuple(factors + ['Bilinear', 'Loop 3']) ans_ = res[factors_] = measure(bilinear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 13']) + factors_ = tuple(factors + ['Bilinear', 'Loop 13']) ans_ = res[factors_] = measure(bilinear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Bilinear','Loop 4']) + factors_ = tuple(factors + ['Bilinear', 'Loop 4']) ans_ = res[factors_] = measure(bilinear(NTerms, 4), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Nonlinear','Loop 2']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 2']) ans_ = res[factors_] = measure(nonlinear(NTerms, 2), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 12']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 12']) ans_ = res[factors_] = measure(nonlinear(NTerms, 12), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 3']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 3']) ans_ = res[factors_] = measure(nonlinear(NTerms, 3), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 13']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 13']) ans_ = res[factors_] = measure(nonlinear(NTerms, 13), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Nonlinear','Loop 4']) + factors_ = tuple(factors + ['Nonlinear', 'Loop 4']) ans_ = res[factors_] = measure(nonlinear(NTerms, 4), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Polynomial','Loop 3']) + factors_ = tuple(factors + ['Polynomial', 'Loop 3']) ans_ = res[factors_] = measure(polynomial(NTerms, 3), n=N) print_results(factors_, ans_, output) - if True: - factors_ = tuple(factors+['Product','Loop 1']) + factors_ = tuple(factors + ['Product', 'Loop 1']) ans_ = res[factors_] = measure(polynomial(NTerms, 1), n=N) print_results(factors_, ans_, output) - factors_ = tuple(factors+['Product','Loop 2']) + factors_ = tuple(factors + ['Product', 'Loop 2']) ans_ = res[factors_] = measure(polynomial(NTerms, 2), n=N) print_results(factors_, ans_, output) def remap_keys(mapping): - return [{'factors':k, 'performance': v} for k, v in mapping.items()] + return [{'factors': k, 'performance': v} for k, v in mapping.items()] + # # MAIN # res = {} -#runall(["COOPR3"], res) +# runall(["COOPR3"], res) -#EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo4_trees) -#runall(["PYOMO4"], res) +# EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo4_trees) +# runall(["PYOMO4"], res) -#EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo5_trees) -#import cProfile -#cProfile.run('runall(["PYOMO5"], res)', 'restats4') +# EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo5_trees) +# import cProfile +# cProfile.run('runall(["PYOMO5"], res)', 'restats4') runall(["PYOMO5"], res) if args.output: @@ -1261,21 +1293,34 @@ def remap_keys(mapping): # Write csv file # perf_types = sorted(next(iter(res.values())).keys()) - res_ = [ list(key) + [res.get(key,{}).get(k,{}).get('mean',-777) for k in perf_types] for key in sorted(res.keys())] + res_ = [ + list(key) + + [res.get(key, {}).get(k, {}).get('mean', -777) for k in perf_types] + for key in sorted(res.keys()) + ] with open(args.output, 'w') as OUTPUT: import csv + writer = csv.writer(OUTPUT) writer.writerow(['Version', 'ExprType', 'ExprNum'] + perf_types) for line in res_: writer.writerow(line) elif args.output.endswith(".json"): - res_ = {'script': sys.argv[0], 'NTerms':NTerms, 'NTrials':N, 'data': remap_keys(res), 'pyomo_version':pyomo.version.version, 'pyomo_versioninfo':pyomo.version.version_info[:3]} + res_ = { + 'script': sys.argv[0], + 'NTerms': NTerms, + 'NTrials': N, + 'data': remap_keys(res), + 'pyomo_version': pyomo.version.version, + 'pyomo_versioninfo': pyomo.version.version_info[:3], + } # # Write json file # with open(args.output, 'w') as OUTPUT: import json + json.dump(res_, OUTPUT) else: diff --git a/scripts/performance/main.py b/scripts/performance/main.py index a81d850c887..10349c0eb73 100755 --- a/scripts/performance/main.py +++ b/scripts/performance/main.py @@ -18,6 +18,7 @@ import platform import sys import time + try: import ujson as json except ImportError: @@ -39,6 +40,7 @@ class TimingHandler(logging.Handler): information and adds it to the test data recorder. """ + def __init__(self): super(TimingHandler, self).__init__() self._testRecord = None @@ -62,7 +64,7 @@ def emit(self, record): name = record.msg.name val = record.msg.timer except AttributeError: - name = None + name = '' val = str(record.msg) if name in cat_data: try: @@ -81,6 +83,7 @@ class DataRecorder(object): report. """ + def __init__(self, data): self._data = data self._timer = TicTocTimer() @@ -130,12 +133,7 @@ def getProjectInfo(project): version = _module.__version__ finally: os.chdir(cwd) - return { - 'branch': branch, - 'sha': sha, - 'diffs': diffs, - 'version': version, - } + return {'branch': branch, 'sha': sha, 'diffs': diffs, 'version': version} def getRunInfo(options): @@ -151,6 +149,7 @@ def getRunInfo(options): info['pypy_version'] = tuple(sys.pypy_version_info) if options.cython: import Cython + info['cython'] = tuple(int(x) for x in Cython.__version__.split('.')) for project in options.projects: info[project] = getProjectInfo(project) @@ -160,7 +159,7 @@ def getRunInfo(options): def run_tests(options, argv): gc.collect() gc.collect() - results = ( getRunInfo(options), OrderedDict() ) + results = (getRunInfo(options), OrderedDict()) recorder = DataRecorder(results[1]) unittest.pytest.main(argv, plugins=[recorder]) gc.collect() @@ -169,55 +168,59 @@ def run_tests(options, argv): def main(argv): - parser = argparse.ArgumentParser( - epilog="Remaining arguments are passed to pytest" - ) + parser = argparse.ArgumentParser(epilog="Remaining arguments are passed to pytest") parser.add_argument( - '-o', '--output', + '-o', + '--output', action='store', dest='output', default=None, - help='Store the test results to the specified file.' + help='Store the test results to the specified file.', ) parser.add_argument( - '-d', '--dir', + '-d', + '--dir', action='store', dest='output_dir', default=None, help='Store the test results in the specified directory. If -o ' 'is not specified, then a file name is automatically generated ' - 'based on the first "main project" git branch and hash.' + 'based on the first "main project" git branch and hash.', ) parser.add_argument( - '-p', '--project', + '-p', + '--project', action='append', dest='projects', default=[], help='Main project (used for generating and recording SHA and ' - 'DIFF information)' + 'DIFF information)', ) parser.add_argument( - '-n', '--replicates', + '-n', + '--replicates', action='store', dest='replicates', type=int, default=1, - help='Number of replicates to run.' + help='Number of replicates to run.', ) parser.add_argument( '--with-cython', action='store_true', dest='cython', - help='Cythonization enabled.' + help='Cythonization enabled.', ) options, argv = parser.parse_known_args(argv) - if not options.projects: + if options.projects: + # Pytest really, really wants the initial script to belong to the + # "main" project being tested. Just re-assign it to the main + # project module (which seems to be enough for pytest) + argv[0] = options.projects[0] + else: options.projects.append('pyomo') - # Pytest really, really wants the initial script to belong to the - # "main" project being tested. Just re-assign it to the main - # project module (which seems to be enough for pytest) - argv[0] = options.projects[0] + argv.append('-W ignore::Warning') results = tuple(run_tests(options, argv) for i in range(options.replicates)) @@ -227,11 +230,11 @@ def main(argv): if not options.output: options.output = 'perf-%s-%s-%s-%s.json' % ( results[0][options.projects[0]]['branch'], - results[0][options.projects[0]]['sha'][:7] + ( - '_mod' if results[0][options.projects[0]]['diffs'] else ''), - results[0]['python_implementation'].lower() + ( - '.'.join(str(i) for i in results[0]['python_version'][:3])), - time.strftime('%y%m%d_%H%M', time.localtime()) + results[0][options.projects[0]]['sha'][:7] + + ('_mod' if results[0][options.projects[0]]['diffs'] else ''), + results[0]['python_implementation'].lower() + + ('.'.join(str(i) for i in results[0]['python_version'][:3])), + time.strftime('%y%m%d_%H%M', time.localtime()), ) options.output = os.path.join(options.output_dir, options.output) if options.output: @@ -249,6 +252,8 @@ def main(argv): if close_ostream: ostream.close() print("Performance run complete.") + return results + if __name__ == '__main__': - main(sys.argv) + results = main(sys.argv) diff --git a/scripts/performance/simple.py b/scripts/performance/simple.py index 9ac5a8b0a37..2990f13f413 100644 --- a/scripts/performance/simple.py +++ b/scripts/performance/simple.py @@ -11,18 +11,23 @@ else: from pyomo.repn import generate_standard_repn + class TimeoutError(Exception): pass + class timeout: def __init__(self, seconds=10, error_message='Timeout'): self.seconds = seconds self.error_message = error_message + def handle_timeout(self, signum, frame): raise TimeoutError(self.error_message) + def __enter__(self): signal.signal(signal.SIGALRM, self.handle_timeout) signal.alarm(self.seconds) + def __exit__(self, type, value, traceback): signal.alarm(0) @@ -38,51 +43,51 @@ def __exit__(self, type, value, traceback): def linear(flag): if flag == 0: - expr=sum(model.x[i] for i in model.A) + expr = sum(model.x[i] for i in model.A) elif flag == 10: with EXPR.linear_expression as expr: - expr=sum((model.x[i] for i in model.A), expr) + expr = sum((model.x[i] for i in model.A), expr) elif flag == 20: - expr=quicksum(model.x[i] for i in model.A) + expr = quicksum(model.x[i] for i in model.A) elif flag == 1: expr = sum_product(model.p, model.x) elif flag == 6: - expr=quicksum((model.p[i]*model.x[i] for i in model.A), linear=False) + expr = quicksum((model.p[i] * model.x[i] for i in model.A), linear=False) elif flag == 16: - expr=quicksum((model.p[i]*model.x[i] for i in model.A), linear=True) + expr = quicksum((model.p[i] * model.x[i] for i in model.A), linear=True) elif flag == 26: - expr=quicksum(model.p[i]*model.x[i] for i in model.A) + expr = quicksum(model.p[i] * model.x[i] for i in model.A) elif flag == 2: - expr=sum(model.p[i]*model.x[i] for i in model.A) + expr = sum(model.p[i] * model.x[i] for i in model.A) elif flag == 3: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * model.x[i] elif flag == 4: try: with timeout(10): - expr=0 + expr = 0 for i in model.A: expr = expr + model.p[i] * model.x[i] except: - expr = model.x[1] # BOGUS + expr = model.x[1] # BOGUS elif flag == 5: try: with timeout(10): - expr=0 + expr = 0 for i in model.A: expr = model.p[i] * model.x[i] + expr except: - expr = model.x[1] # BOGUS + expr = model.x[1] # BOGUS elif flag == 12: with EXPR.linear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) elif flag == 22: with EXPR.nonlinear_expression as expr: - expr=sum((model.p[i]*model.x[i] for i in model.A), expr) + expr = sum((model.p[i] * model.x[i] for i in model.A), expr) elif flag == 13: with EXPR.linear_expression as expr: for i in model.A: @@ -97,7 +102,7 @@ def linear(flag): expr = model.p[i] * model.x[i] + expr elif flag == 7: - expr=0 + expr = 0 for i in model.A: expr += model.p[i] * (1 + model.x[i]) elif flag == 17: @@ -105,60 +110,110 @@ def linear(flag): for i in model.A: expr += model.p[i] * (1 + model.x[i]) elif flag == 27: - expr = quicksum(model.p[i]*(1 + model.x[i]) for i in model.A) + expr = quicksum(model.p[i] * (1 + model.x[i]) for i in model.A) elif flag == 8: - expr=0 + expr = 0 for i in model.A: - expr += (model.x[i]+model.x[i]) + expr += model.x[i] + model.x[i] elif flag == 18: # This will assume a nonlinear sum expr = quicksum((model.x[i] + model.x[i]) for i in model.A) elif flag == 9: - expr=0 + expr = 0 for i in model.A: - expr += model.p[i]*(model.x[i]+model.x[i]) + expr += model.p[i] * (model.x[i] + model.x[i]) elif flag == 19: # This will assume a nonlinear sum - expr = quicksum(model.p[i]*(model.x[i] + model.x[i]) for i in model.A) + expr = quicksum(model.p[i] * (model.x[i] + model.x[i]) for i in model.A) elif flag == -9: expr = quicksum(sin(model.x[i]) for i in model.A) elif flag == 30: - expr=0 + expr = 0 for i in model.A: - expr += model.x[i]*model.y[i] + expr += model.x[i] * model.y[i] elif flag == -30: - expr= quicksum(model.x[i]*model.y[i] for i in model.A) + expr = quicksum(model.x[i] * model.y[i] for i in model.A) if coopr3 or pyomo4: repn = generate_ampl_repn(expr) else: repn = generate_standard_repn(EXPR.compress_expression(expr), quadratic=False) + if coopr3: import pyomo.core.kernel.expr_coopr3 as COOPR3 - print("REFCOUNT: "+str(COOPR3._getrefcount_available)) - for i in (0,2,3,6,7,8,9): - print((i,timeit.timeit('linear(%d)' % i, "from __main__ import linear", number=1))) + + print("REFCOUNT: " + str(COOPR3._getrefcount_available)) + for i in (0, 2, 3, 6, 7, 8, 9): + print( + ( + i, + timeit.timeit( + 'linear(%d)' % i, "from __main__ import linear", number=1 + ), + ) + ) if pyomo4: import pyomo.core.kernel.expr_pyomo4 as PYOMO4 + EXPR.set_expression_tree_format(EXPR.common.Mode.pyomo4_trees) - print("REFCOUNT: "+str(PYOMO4._getrefcount_available)) - for i in (0,2,3,6,7,8,9): - print((i,timeit.timeit('linear(%d)' % i, "from __main__ import linear", number=1))) + print("REFCOUNT: " + str(PYOMO4._getrefcount_available)) + for i in (0, 2, 3, 6, 7, 8, 9): + print( + ( + i, + timeit.timeit( + 'linear(%d)' % i, "from __main__ import linear", number=1 + ), + ) + ) if not (coopr3 or pyomo4): import pyomo.core.expr.expr_pyomo5 as PYOMO5 - #print("REFCOUNT: "+str(PYOMO5._getrefcount_available)) - #import cProfile - #cProfile.run("linear(7)", "stats.7") - for i in (0,10,20,2,12,22,3,13,4,14,5,15,6,16,26,7,17,27,8,18,9,19,-9,30,-30): - #for i in (6,16,26): - print((i,timeit.timeit('linear(%d)' % i, "from __main__ import linear", number=1))) + # print("REFCOUNT: "+str(PYOMO5._getrefcount_available)) + # import cProfile + # cProfile.run("linear(7)", "stats.7") + for i in ( + 0, + 10, + 20, + 2, + 12, + 22, + 3, + 13, + 4, + 14, + 5, + 15, + 6, + 16, + 26, + 7, + 17, + 27, + 8, + 18, + 9, + 19, + -9, + 30, + -30, + ): + # for i in (6,16,26): + print( + ( + i, + timeit.timeit( + 'linear(%d)' % i, "from __main__ import linear", number=1 + ), + ) + ) diff --git a/setup.cfg b/setup.cfg index 870e602a337..b606138f38c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [metadata] -license_files = LICENSE.txt +license_files = LICENSE.md [bdist_wheel] universal=1 @@ -12,6 +12,7 @@ markers = expensive: marks tests as expensive mpi: marks tests that require MPI neos: marks tests that require NEOS server connections + importtest: marks tests that checks for warnings when importing modules book: marks tests from the Pyomo book performance: marks performance tests long: marks long performance tests diff --git a/setup.py b/setup.py index 7b47b46a94f..071893a7ad3 100644 --- a/setup.py +++ b/setup.py @@ -17,11 +17,13 @@ import platform import sys from setuptools import setup, find_packages, Command + try: from setuptools import DistutilsOptionError except ImportError: from distutils.errors import DistutilsOptionError + def read(*rnames): with open(os.path.join(os.path.dirname(__file__), *rnames)) as README: # Strip all leading badges up to, but not including the COIN-OR @@ -34,6 +36,7 @@ def read(*rnames): break return line + README.read() + def import_pyomo_module(*path): _module_globals = dict(globals()) _module_globals['__name__'] = None @@ -42,13 +45,16 @@ def import_pyomo_module(*path): exec(_FILE.read(), _module_globals) return _module_globals + def get_version(): # Source pyomo/version/info.py to get the version number - return import_pyomo_module('pyomo','version','info.py')['__version__'] + return import_pyomo_module('pyomo', 'version', 'info.py')['__version__'] + CYTHON_REQUIRED = "required" -if not any(arg.startswith(cmd) - for cmd in ('build','install','bdist') for arg in sys.argv): +if not any( + arg.startswith(cmd) for cmd in ('build', 'install', 'bdist') for arg in sys.argv +): using_cython = False else: using_cython = "automatic" @@ -66,16 +72,18 @@ def get_version(): # break out of this try-except (disable Cython) raise RuntimeError("Cython is only supported under CPython") from Cython.Build import cythonize + # # Note: The Cython developers recommend that you distribute C source # files to users. But this is fine for evaluating the utility of Cython # import shutil + files = [ "pyomo/core/expr/numvalue.pyx", "pyomo/core/expr/numeric_expr.pyx", "pyomo/core/expr/logical_expr.pyx", - #"pyomo/core/expr/visitor.pyx", + # "pyomo/core/expr/visitor.pyx", "pyomo/core/util.pyx", "pyomo/repn/standard_repn.pyx", "pyomo/repn/plugins/cpxlp.pyx", @@ -85,20 +93,22 @@ def get_version(): ] for f in files: shutil.copyfile(f[:-1], f) - ext_modules = cythonize(files, - compiler_directives={"language_level": 3}) + ext_modules = cythonize(files, compiler_directives={"language_level": 3}) except: if using_cython == CYTHON_REQUIRED: - print(""" + print( + """ ERROR: Cython was explicitly requested with --with-cython, but cythonization of core Pyomo modules failed. -""") +""" + ) raise using_cython = False -if (('--with-distributable-extensions' in sys.argv) - or (os.getenv('PYOMO_SETUP_ARGS') is not None and - '--with-distributable-extensions' in os.getenv('PYOMO_SETUP_ARGS'))): +if ('--with-distributable-extensions' in sys.argv) or ( + os.getenv('PYOMO_SETUP_ARGS') is not None + and '--with-distributable-extensions' in os.getenv('PYOMO_SETUP_ARGS') +): try: sys.argv.remove('--with-distributable-extensions') except: @@ -108,10 +118,14 @@ def get_version(): # NOTE: There is inconsistent behavior in Windows for APPSI. # As a result, we will NOT include these extensions in Windows. if not sys.platform.startswith('win'): - appsi_extension = import_pyomo_module( - 'pyomo', 'contrib', 'appsi', 'build.py')['get_appsi_extension']( - in_setup=True, appsi_root=os.path.join( - os.path.dirname(__file__), 'pyomo', 'contrib', 'appsi')) + appsi_extension = import_pyomo_module('pyomo', 'contrib', 'appsi', 'build.py')[ + 'get_appsi_extension' + ]( + in_setup=True, + appsi_root=os.path.join( + os.path.dirname(__file__), 'pyomo', 'contrib', 'appsi' + ), + ) ext_modules.append(appsi_extension) @@ -127,24 +141,22 @@ class DependenciesCommand(Command): `extras_require`). """ + description = "list the dependencies for this package" - user_options = [ - ('extras=', None, 'extra targets to include'), - ] + user_options = [('extras=', None, 'extra targets to include')] def initialize_options(self): self.extras = None def finalize_options(self): if self.extras is not None: - self.extras = [ - e for e in (_.strip() for _ in self.extras.split(',')) if e - ] + self.extras = [e for e in (_.strip() for _ in self.extras.split(',')) if e] for e in self.extras: if e not in setup_kwargs['extras_require']: raise DistutilsOptionError( "extras can only include {%s}" - % (', '.join(setup_kwargs['extras_require']))) + % (', '.join(setup_kwargs['extras_require'])) + ) def run(self): deps = list(self._print_deps(setup_kwargs['install_requires'])) @@ -165,26 +177,26 @@ def _print_deps(self, deplist): setup_kwargs = dict( - name = 'Pyomo', + name='Pyomo', # # Note: the release number is set in pyomo/version/info.py # - cmdclass = {'dependencies': DependenciesCommand}, - version = get_version(), - maintainer = 'Pyomo Developer Team', - maintainer_email = 'pyomo-developers@googlegroups.com', - url = 'http://pyomo.org', - project_urls = { + cmdclass={'dependencies': DependenciesCommand}, + version=get_version(), + maintainer='Pyomo Developer Team', + maintainer_email='pyomo-developers@googlegroups.com', + url='http://pyomo.org', + project_urls={ 'Documentation': 'https://pyomo.readthedocs.io/', 'Source': 'https://github.com/Pyomo/pyomo', }, - license = 'BSD', - platforms = ["any"], - description = 'Pyomo: Python Optimization Modeling Objects', - long_description = read('README.md'), - long_description_content_type = 'text/markdown', - keywords = ['optimization'], - classifiers = [ + license='BSD', + platforms=["any"], + description='Pyomo: Python Optimization Modeling Objects', + long_description=read('README.md'), + long_description_content_type='text/markdown', + keywords=['optimization'], + classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: End Users/Desktop', 'Intended Audience :: Science/Research', @@ -199,15 +211,15 @@ def _print_deps(self, deplist): 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', + 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Software Development :: Libraries :: Python Modules' ], - python_requires = '>=3.7', - install_requires = [ - 'ply', + 'Topic :: Software Development :: Libraries :: Python Modules', ], - extras_require = { + python_requires='>=3.7', + install_requires=['ply'], + extras_require={ 'tests': [ #'codecov', # useful for testing infrastructures, but not required 'coverage', @@ -222,24 +234,27 @@ def _print_deps(self, deplist): 'sphinx_rtd_theme>0.5', 'sphinxcontrib-jsmath', 'sphinxcontrib-napoleon', - 'numpy', # Needed by autodoc for pynumero - 'scipy', # Needed by autodoc for pynumero + 'numpy', # Needed by autodoc for pynumero + 'scipy', # Needed by autodoc for pynumero ], 'optional': [ - 'dill', # No direct use, but improves lambda pickle - 'ipython', # contrib.viewer - 'matplotlib', + 'dill', # No direct use, but improves lambda pickle + 'ipython', # contrib.viewer + # Note: matplotlib 3.6.1 has bug #24127, which breaks + # seaborn's histplot (triggering parmest failures) + 'matplotlib!=3.6.1', 'networkx', # network, incidence_analysis, community_detection 'numpy', 'openpyxl', # dataportals #'pathos', # requested for #963, but PR currently closed - 'pint', # units - 'python-louvain', # community_detection - 'pyyaml', # core + 'pint', # units + 'plotly', # incidence_analysis + 'python-louvain', # community_detection + 'pyyaml', # core 'scipy', - 'sympy', # differentiation - 'xlrd', # dataportals - 'z3-solver', # community_detection + 'sympy', # differentiation + 'xlrd', # dataportals + 'z3-solver', # community_detection # # subprocess output is merged more reliably if # 'PeekNamedPipe' is available from pywin32 @@ -248,29 +263,31 @@ def _print_deps(self, deplist): # The following optional dependencies are difficult to # install on PyPy (binary wheels are not available), so we # will only "require" them on other (CPython) platforms: - 'casadi; implementation_name!="pypy"', # dae - 'numdifftools; implementation_name!="pypy"', # pynumero + # + # DAE can use casadi + 'casadi; implementation_name!="pypy"', + 'numdifftools; implementation_name!="pypy"', # pynumero 'pandas; implementation_name!="pypy"', - 'seaborn; implementation_name!="pypy"', # parmest.graphics + 'seaborn; implementation_name!="pypy"', # parmest.graphics ], }, - packages = find_packages(exclude=("scripts",)), - package_data = { + packages=find_packages(exclude=("scripts",)), + package_data={ "pyomo.contrib.ampl_function_demo": ["src/*"], "pyomo.contrib.appsi.cmodel": ["src/*"], "pyomo.contrib.mcpp": ["*.cpp"], "pyomo.contrib.pynumero": ['src/*', 'src/tests/*'], "pyomo.contrib.viewer": ["*.ui"], }, - ext_modules = ext_modules, - entry_points = """ + ext_modules=ext_modules, + entry_points=""" [console_scripts] pyomo = pyomo.scripting.pyomo_main:main_console_script [pyomo.command] pyomo.help = pyomo.scripting.driver_help pyomo.viewer=pyomo.contrib.viewer.pyomo_viewer - """ + """, ) @@ -284,23 +301,31 @@ def _print_deps(self, deplist): if 'Microsoft Visual C++' not in str(e_info): raise elif using_cython == CYTHON_REQUIRED: - print(""" + print( + """ ERROR: Cython was explicitly requested with --with-cython, but cythonization of core Pyomo modules failed. -""") +""" + ) raise else: - print(""" + print( + """ ERROR: setup() failed: %s Re-running setup() without the Cython modules -""" % (str(e_info),)) +""" + % (str(e_info),) + ) setup_kwargs['ext_modules'] = [] setup(**setup_kwargs) - print(""" + print( + """ WARNING: Installation completed successfully, but the attempt to cythonize core Pyomo modules failed. Cython provides performance optimizations and is not required for any Pyomo functionality. Cython returned the following error: "%s" -""" % (str(e_info),)) +""" + % (str(e_info),) + )